Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def analyze_portfolio_mamba_backtest(df, window_years=5, test_years=1, n_portfolios=10):
- """
- Perform a MAMBA backtest to generate multiple portfolios.
- This function creates N equally spaced rolling windows for training/testing,
- fits MAMBA models, and computes portfolio returns.
- """
- import pandas as pd
- import numpy as np
- import torch
- from torch import nn, optim
- from sklearn.preprocessing import StandardScaler
- independent_vars = ["MARKET_RETURN_ADJ", "SMB", "HML", "MOM",
- 'MARKET_RETURN_ADJ_lag1', 'SMB_lag1', 'HML_lag1', 'MOM_lag1',
- 'MARKET_RETURN_ADJ_lag2', 'SMB_lag2', 'HML_lag2', 'MOM_lag2',
- 'MARKET_RETURN_ADJ_rollmean_3', 'MARKET_RETURN_ADJ_rollstd_3',
- 'SMB_rollmean_3', 'SMB_rollstd_3', 'HML_rollmean_3', 'HML_rollstd_3',
- 'MOM_rollmean_3', 'MOM_rollstd_3', 'MARKET_RETURN_ADJ_rollmean_6',
- 'MARKET_RETURN_ADJ_rollstd_6', 'SMB_rollmean_6', 'SMB_rollstd_6',
- 'HML_rollmean_6', 'HML_rollstd_6', 'MOM_rollmean_6', 'MOM_rollstd_6',
- 'MARKET_RETURN_ADJ_x_SMB', 'MARKET_RETURN_ADJ_x_HML',
- 'MARKET_RETURN_ADJ_x_MOM', 'SMB_x_HML', 'SMB_x_MOM', 'HML_x_MOM',
- 'MARKET_RETURN_ADJ_squared', 'MARKET_RETURN_ADJ_cubed',
- 'SMB_squared', 'SMB_cubed', 'HML_squared', 'HML_cubed',
- 'MOM_squared', 'MOM_cubed', 'Month_sin', 'Month_cos', 'Quarter_sin', 'Quarter_cos']
- exclude = set(independent_vars + ["TRADEDATE", 'Year', 'Month', 'Quarter'])
- df = df.copy()
- df['TRADEDATE'] = pd.to_datetime(df['TRADEDATE'])
- df = df.sort_values('TRADEDATE')
- tradedates = df['TRADEDATE'].drop_duplicates().sort_values().tolist()
- window_months = window_years * 12
- test_months = test_years * 12
- total_months = window_months + test_months
- max_possible_starts = len(tradedates) - total_months + 1
- step = max_possible_starts // n_portfolios
- results = []
- for i in range(0, max_possible_starts, step):
- if len(results) >= n_portfolios:
- break
- train_start = tradedates[i]
- train_end = tradedates[i + window_months - 1]
- test_start = tradedates[i + window_months]
- test_end = tradedates[i + window_months + test_months - 1]
- train_df = df[(df['TRADEDATE'] >= train_start) & (df['TRADEDATE'] <= train_end)]
- test_df = df[(df['TRADEDATE'] >= test_start) & (df['TRADEDATE'] <= test_end)]
- target_cols = [col for col in df.columns if col not in exclude]
- expected_returns = {}
- trained_models = {}
- for target in target_cols:
- df_temp = pd.concat([train_df[independent_vars], train_df[target]], axis=1).dropna()
- if df_temp.empty:
- continue
- X = df_temp[independent_vars]
- y = df_temp[target]
- scaler = StandardScaler()
- X_scaled = scaler.fit_transform(X)
- X_tensor = torch.tensor(X_scaled, dtype=torch.float32, device=device).unsqueeze(1)
- y_tensor = torch.tensor(y.values.reshape(-1, 1), dtype=torch.float32, device=device)
- model = MambaTabularModel(input_dim=X_tensor.shape[-1]).to(device)
- if hasattr(torch, "compile") and device.type != "mps":
- model = torch.compile(model)
- optimizer = optim.Adam(model.parameters(), lr=0.05)
- loss_fn = nn.MSELoss()
- for _ in range(50):
- model.train()
- optimizer.zero_grad()
- output = model(X_tensor).mean(dim=1)
- loss = loss_fn(output, y_tensor)
- loss.backward()
- optimizer.step()
- model.eval()
- with torch.no_grad():
- predictions = model(X_tensor).cpu().numpy()
- expected_returns[target] = predictions.mean()
- trained_models[target] = model
- if not expected_returns:
- continue
- all_stocks = list(expected_returns.keys())
- mu = np.array([expected_returns[s] for s in all_stocks])
- train_returns_df = train_df[all_stocks].dropna()
- cov_matrix = train_returns_df.cov().values
- optimal_weights = optimize_portfolio(mu, cov_matrix)
- test_returns_df = test_df[all_stocks].dropna()
- if test_returns_df.empty:
- continue
- portfolio_returns = backtest_portfolio(test_returns_df, optimal_weights, all_stocks)
- portfolio_beta = 1.0
- risk_metrics = compute_risk_metrics(portfolio_returns, portfolio_beta)
- result_row = {
- 'Train Start': train_start,
- 'Train End': train_end,
- 'Test Start': test_start,
- 'Test End': test_end,
- }
- result_row.update(risk_metrics)
- for stock, weight in zip(all_stocks, optimal_weights):
- result_row[f'Weight_{stock}'] = weight
- results.append(result_row)
- return pd.DataFrame(results)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement