bbstrader 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of bbstrader might be problematic. Click here for more details.

@@ -0,0 +1,681 @@
1
+ """
2
+ Trading Strategies module
3
+ """
4
+
5
+
6
+ import numpy as np
7
+ import seaborn as sns
8
+ import pandas as pd
9
+ import time
10
+ from arch import arch_model
11
+ import matplotlib.pyplot as plt
12
+ from statsmodels.graphics.tsaplots import plot_acf
13
+ from statsmodels.stats.diagnostic import acorr_ljungbox
14
+ from filterpy.kalman import KalmanFilter
15
+ from scipy.optimize import minimize
16
+ from bbstrader.tseries import (
17
+ load_and_prepare_data, fit_best_arima,
18
+ fit_garch, predict_next_return, get_prediction
19
+ )
20
+ sns.set_theme()
21
+
22
+ __all__ = [
23
+ "ArimaGarchStrategy",
24
+ "KLFStrategy",
25
+ "SMAStrategy",
26
+ "OrnsteinUhlenbeck"
27
+ ]
28
+
29
+ class ArimaGarchStrategy():
30
+ """
31
+ This class implements a trading strategy
32
+ that combines `ARIMA (AutoRegressive Integrated Moving Average)`
33
+ and `GARCH (Generalized Autoregressive Conditional Heteroskedasticity)` models
34
+ to predict future returns based on historical price data.
35
+ It inherits from a abstract `Strategy` class and implement `calculate_signals()`.
36
+
37
+ The strategy is implemented in the following steps:
38
+ 1. Data Preparation: Load and prepare the historical price data.
39
+ 2. Modeling: Fit the ARIMA model to the data and then fit the GARCH model to the residuals.
40
+ 3. Prediction: Predict the next return using the ARIMA model and the next volatility using the GARCH model.
41
+ 4. Trading Strategy: Execute the trading strategy based on the predictions.
42
+ 5. Vectorized Backtesting: Backtest the trading strategy using the historical data.
43
+
44
+ Exemple:
45
+ >>> import yfinance as yf
46
+ >>> from bbstrader.strategies import ArimaGarchStrategy
47
+ >>> from bbstrader.tseries import load_and_prepare_data
48
+
49
+ >>> if __name__ == '__main__':
50
+ >>> # ARCH SPY Vectorize Backtest
51
+ >>> k = 252
52
+ >>> data = yf.download("SPY", start="2004-01-02", end="2015-12-31")
53
+ >>> arch = ArimaGarchStrategy("SPY", data, k=k)
54
+ >>> df = load_and_prepare_data(data)
55
+ >>> arch.show_arima_garch_results(df['diff_log_return'].values[-k:])
56
+ >>> arch.backtest_strategy()
57
+ """
58
+
59
+ def __init__(self, symbol, data, k: int = 252):
60
+ """
61
+ Initializes the ArimaGarchStrategy class.
62
+
63
+ Args:
64
+ symbol (str): The ticker symbol for the financial instrument.
65
+ data (pd.DataFrame): `The raw dataset containing at least the 'Close' prices`.
66
+ k (int): The window size for rolling prediction in backtesting.
67
+ """
68
+ self.symbol = symbol
69
+ self.data = self.load_and_prepare_data(data)
70
+ self.k = k
71
+
72
+ # Step 1: Data Preparation
73
+ def load_and_prepare_data(self, df):
74
+ """
75
+ Prepares the dataset by calculating logarithmic returns
76
+ and differencing if necessary.
77
+
78
+ Args:
79
+ df (pd.DataFrame): `The raw dataset containing at least the 'Close' prices`.
80
+
81
+ Returns:
82
+ pd.DataFrame: The dataset with additional columns
83
+ for log returns and differenced log returns.
84
+ """
85
+ return load_and_prepare_data(df)
86
+
87
+ # Step 2: Modeling (ARIMA + GARCH)
88
+ def fit_best_arima(self, window_data):
89
+ """
90
+ Fits the ARIMA model to the provided window of data,
91
+ selecting the best model based on AIC.
92
+
93
+ Args:
94
+ window_data (np.array): The dataset for a specific window period.
95
+
96
+ Returns:
97
+ ARIMA model: The best fitted ARIMA model based on AIC.
98
+ """
99
+ return fit_best_arima(window_data)
100
+
101
+ def fit_garch(self, window_data):
102
+ """
103
+ Fits the GARCH model to the residuals of the best ARIMA model.
104
+
105
+ Args:
106
+ window_data (np.array): The dataset for a specific window period.
107
+
108
+ Returns:
109
+ tuple: Contains the ARIMA result and GARCH result.
110
+ """
111
+ return fit_garch(window_data)
112
+
113
+ def show_arima_garch_results(self, window_data, acf=True, test_resid=True):
114
+ """
115
+ Displays the ARIMA and GARCH model results, including plotting
116
+ ACF of residuals and conducting , Box-Pierce and Ljung-Box tests.
117
+
118
+ Args:
119
+ window_data (np.array): The dataset for a specific window period.
120
+ acf (bool, optional): If True, plot the ACF of residuals. Defaults to True.
121
+
122
+ test_resid (bool, optional):
123
+ If True, conduct Box-Pierce and Ljung-Box tests on residuals. Defaults to True.
124
+ """
125
+ arima_result = self.fit_best_arima(window_data)
126
+ resid = np.asarray(arima_result.resid)
127
+ resid = resid[~(np.isnan(resid) | np.isinf(resid))]
128
+ garch_model = arch_model(resid, p=1, q=1, rescale=False)
129
+ garch_result = garch_model.fit(disp='off')
130
+ residuals = garch_result.resid
131
+
132
+ # TODO : Plot the ACF of the residuals
133
+ if acf:
134
+ fig = plt.figure(figsize=(12, 8))
135
+ # Plot the ACF of ARIMA residuals
136
+ ax1 = fig.add_subplot(211, ylabel='ACF')
137
+ plot_acf(resid, alpha=0.05, ax=ax1, title='ACF of ARIMA Residuals')
138
+ ax1.set_xlabel('Lags')
139
+ ax1.grid(True)
140
+
141
+ # Plot the ACF of GARCH residuals on the same axes
142
+ ax2 = fig.add_subplot(212, ylabel='ACF')
143
+ plot_acf(residuals, alpha=0.05, ax=ax2,
144
+ title='ACF of GARCH Residuals')
145
+ ax2.set_xlabel('Lags')
146
+ ax2.grid(True)
147
+
148
+ # Plot the figure
149
+ plt.tight_layout()
150
+ plt.show()
151
+
152
+ # TODO : Conduct Box-Pierce and Ljung-Box Tests of the residuals
153
+ if test_resid:
154
+ print(arima_result.summary())
155
+ print(garch_result.summary())
156
+ bp_test = acorr_ljungbox(resid, return_df=True)
157
+ print("Box-Pierce and Ljung-Box Tests Results for ARIMA:\n", bp_test)
158
+
159
+ # Step 3: Prediction
160
+ def predict_next_return(self, arima_result, garch_result):
161
+ """
162
+ Predicts the next return using the ARIMA model
163
+ and the next volatility using the GARCH model.
164
+
165
+ Args:
166
+ arima_result (ARIMA model): The ARIMA model result.
167
+ garch_result (GARCH model): The GARCH model result.
168
+
169
+ Returns:
170
+ float: The predicted next return.
171
+ """
172
+ return predict_next_return(arima_result, garch_result)
173
+
174
+ def get_prediction(self, window_data):
175
+ """
176
+ Generates a prediction for the next return based on a window of data.
177
+
178
+ Args:
179
+ window_data (np.array): The dataset for a specific window period.
180
+
181
+ Returns:
182
+ float: The predicted next return.
183
+ """
184
+ return get_prediction(window_data)
185
+
186
+ def calculate_signals(self, window_data):
187
+ """
188
+ Calculates the trading signal based on the prediction.
189
+
190
+ Args:
191
+ window_data (np.array): The dataset for a specific window period.
192
+
193
+ Returns:
194
+ str: The trading signal ('LONG', 'SHORT', or None).
195
+ """
196
+ prediction = self.get_prediction(window_data)
197
+ if prediction > 0:
198
+ signal = "LONG"
199
+ elif prediction < 0:
200
+ signal = "SHORT"
201
+ else:
202
+ signal = None
203
+ return signal
204
+
205
+ # Step 4: Trading Strategy
206
+
207
+ def execute_trading_strategy(self, predictions):
208
+ """
209
+ Executes the trading strategy based on a list
210
+ of predictions, determining positions to take.
211
+
212
+ Args:
213
+ predictions (list): A list of predicted returns.
214
+
215
+ Returns:
216
+ list: A list of positions (1 for 'LONG', -1 for 'SHORT', 0 for 'HOLD').
217
+ """
218
+ positions = [] # Long if 1, Short if -1
219
+ previous_position = 0 # Initial position
220
+ for prediction in predictions:
221
+ if prediction > 0:
222
+ current_position = 1 # Long
223
+ elif prediction < 0:
224
+ current_position = -1 # Short
225
+ else:
226
+ current_position = previous_position # Hold previous position
227
+ positions.append(current_position)
228
+ previous_position = current_position
229
+
230
+ return positions
231
+
232
+ # Step 5: Vectorized Backtesting
233
+ def generate_predictions(self):
234
+ """
235
+ Generator that yields predictions one by one.
236
+ """
237
+ data = self.data
238
+ window_size = self.k
239
+ for i in range(window_size, len(data)):
240
+ print(
241
+ f"Processing window {i - window_size + 1}/{len(data) - window_size}...")
242
+ window_data = data['diff_log_return'].iloc[i-window_size:i]
243
+ next_return = self.get_prediction(window_data)
244
+ yield next_return
245
+
246
+ def backtest_strategy(self):
247
+ """
248
+ Performs a backtest of the strategy over
249
+ the entire dataset, plotting cumulative returns.
250
+ """
251
+ data = self.data
252
+ window_size = self.k
253
+ print(
254
+ f"Starting backtesting for {self.symbol}\n"
255
+ f"Window size {window_size}.\n"
256
+ f"Total iterations: {len(data) - window_size}.\n")
257
+ predictions_generator = self.generate_predictions()
258
+
259
+ positions = self.execute_trading_strategy(predictions_generator)
260
+
261
+ strategy_returns = np.array(
262
+ positions[:-1]) * data['log_return'].iloc[window_size+1:].values
263
+ buy_and_hold = data['log_return'].iloc[window_size+1:].values
264
+ buy_and_hold_returns = np.cumsum(buy_and_hold)
265
+ cumulative_returns = np.cumsum(strategy_returns)
266
+ dates = data.index[window_size+1:]
267
+ self.plot_cumulative_returns(
268
+ cumulative_returns, buy_and_hold_returns, dates)
269
+
270
+ print("\nBacktesting completed !!")
271
+
272
+ # Function to plot the cumulative returns
273
+ def plot_cumulative_returns(self, strategy_returns, buy_and_hold_returns, dates):
274
+ """
275
+ Plots the cumulative returns of the ARIMA+GARCH strategy against
276
+ a buy-and-hold strategy.
277
+
278
+ Args:
279
+ strategy_returns (np.array): Cumulative returns from the strategy.
280
+ buy_and_hold_returns (np.array): Cumulative returns from a buy-and-hold strategy.
281
+ dates (pd.Index): The dates corresponding to the returns.
282
+ """
283
+ plt.figure(figsize=(14, 7))
284
+ plt.plot(dates, strategy_returns, label='ARIMA+GARCH ', color='blue')
285
+ plt.plot(dates, buy_and_hold_returns, label='Buy & Hold', color='red')
286
+ plt.xlabel('Time')
287
+ plt.ylabel('Cumulative Returns')
288
+ plt.title(f'ARIMA+GARCH Strategy vs. Buy & Hold on ({self.symbol})')
289
+ plt.legend()
290
+ plt.grid(True)
291
+ plt.show()
292
+
293
+
294
+ class KLFStrategy():
295
+ """
296
+ Implements a trading strategy based on the Kalman Filter,
297
+ a recursive algorithm used for estimating the state of a linear dynamic system
298
+ from a series of noisy measurements. It's designed to process market data,
299
+ estimate dynamic parameters such as the slope and intercept of price relationships,
300
+ and generate trading signals based on those estimates.
301
+
302
+ You can learn more here https://en.wikipedia.org/wiki/Kalman_filter
303
+ """
304
+
305
+ def __init__(self, tickers: list | tuple, **kwargs):
306
+ """
307
+ Initializes the Kalman Filter strategy.
308
+
309
+ Args:
310
+ tickers :
311
+ A list or tuple of ticker symbols representing financial instruments.
312
+
313
+ kwargs : Keyword arguments for additional parameters,
314
+ specifically `delta` and `vt`
315
+ """
316
+ self.tickers = tickers
317
+ assert self.tickers is not None
318
+ self.latest_prices = np.array([-1.0, -1.0])
319
+
320
+ self.delta = kwargs.get("delta", 1e-4)
321
+ self.wt = self.delta/(1-self.delta) * np.eye(2)
322
+ self.vt = kwargs.get("vt", 1e-3)
323
+ self.theta = np.zeros(2)
324
+ self.P = np.zeros((2, 2))
325
+ self.R = None
326
+ self.kf = self._init_kalman()
327
+
328
+ def _init_kalman(self):
329
+ """
330
+ Initializes and returns a Kalman Filter configured
331
+ for the trading strategy. The filter is set up with initial
332
+ state and covariance, state transition matrix, process noise
333
+ and measurement noise covariances.
334
+ """
335
+ kf = KalmanFilter(dim_x=2, dim_z=1)
336
+ kf.x = np.zeros((2, 1)) # Initial state
337
+ kf.P = self.P # Initial covariance
338
+ kf.F = np.eye(2) # State transition matrix
339
+ kf.Q = self.wt # Process noise covariance
340
+ kf.R = 1. # Scalar measurement noise covariance
341
+
342
+ return kf
343
+
344
+ def calc_slope_intercep(self, prices: np.ndarray):
345
+ """
346
+ Calculates and returns the slope and intercept
347
+ of the relationship between the provided prices using the Kalman Filter.
348
+ This method updates the filter with the latest price and returns
349
+ the estimated slope and intercept.
350
+
351
+ Args:
352
+ prices : A numpy array of prices for two financial instruments.
353
+
354
+ Returns:
355
+ A tuple containing the slope and intercept of the relationship
356
+ """
357
+ kf = self.kf
358
+ kf.H = np.array([[prices[1], 1.0]])
359
+ kf.predict()
360
+ kf.update(prices[0])
361
+ slope = kf.x.copy().flatten()[0]
362
+ intercept = kf.x.copy().flatten()[1]
363
+
364
+ return slope, intercept
365
+
366
+ def calculate_xy_signals(self, et, std):
367
+ """
368
+ Generates trading signals based on the forecast error
369
+ and standard deviation of the predictions. It returns signals for exiting,
370
+ going long, or shorting positions based on the comparison of
371
+ the forecast error with the standard deviation.
372
+
373
+ Args:
374
+ et : The forecast error.
375
+ std : The standard deviation of the predictions.
376
+
377
+ Returns:
378
+ A tuple containing the trading signals for the two financial instruments.
379
+ """
380
+ y_signal = None
381
+ x_signal = None
382
+
383
+ if et >= -std or et <= std:
384
+ y_signal = "EXIT"
385
+ x_signal = "EXIT"
386
+
387
+ if et <= -std:
388
+ y_signal = "LONG"
389
+ x_signal = "SHORT"
390
+
391
+ if et >= std:
392
+ y_signal = "SHORT"
393
+ x_signal = "LONG"
394
+
395
+ return y_signal, x_signal
396
+
397
+ def calculate_signals(self, prices: np.ndarray):
398
+ """
399
+ Calculates trading signals based on the latest prices
400
+ and the Kalman Filter's estimates. It updates the filter's state
401
+ with the latest prices, computes the slope and intercept
402
+ and generates trading signals based on the forecast error
403
+ and prediction standard deviation.
404
+
405
+ Args:
406
+ prices : A numpy array of prices for two financial instruments.
407
+
408
+ Returns:
409
+ A dictionary containing trading signals for the two financial instruments.
410
+ """
411
+
412
+ self.latest_prices[0] = prices[0]
413
+ self.latest_prices[1] = prices[1]
414
+
415
+ if all(self.latest_prices > -1.0):
416
+ slope, intercept = self.calc_slope_intercep(self.latest_prices)
417
+
418
+ self.theta[0] = slope
419
+ self.theta[1] = intercept
420
+
421
+ # Create the observation matrix of the latest prices
422
+ # of Y and the intercept value (1.0) as well as the
423
+ # scalar value of the latest price from X
424
+ F = np.asarray([self.latest_prices[0], 1.0]).reshape((1, 2))
425
+ y = self.latest_prices[1]
426
+
427
+ # The prior value of the states {\theta_t} is
428
+ # distributed as a multivariate Gaussian with
429
+ # mean a_t and variance-covariance {R_t}
430
+ if self.R is not None:
431
+ self.R = self.C + self.wt
432
+ else:
433
+ self.R = np.zeros((2, 2))
434
+
435
+ # Calculate the Kalman Filter update
436
+ # ---------------------------------
437
+ # Calculate prediction of new observation
438
+ # as well as forecast error of that prediction
439
+ yhat = F.dot(self.theta)
440
+ et = y - yhat
441
+
442
+ # {Q_t} is the variance of the prediction of
443
+ # observations and hence sqrt_Qt is the
444
+ # standard deviation of the predictions
445
+ Qt = F.dot(self.R).dot(F.T) + self.vt
446
+ sqrt_Qt = np.sqrt(Qt)
447
+
448
+ # The posterior value of the states {\theta_t} is
449
+ # distributed as a multivariate Gaussian with mean
450
+ # {m_t} and variance-covariance {C_t}
451
+ At = self.R.dot(F.T) / Qt
452
+ self.theta = self.theta + At.flatten() * et
453
+ self.C = self.R - At * F.dot(self.R)
454
+
455
+ y_signal, x_signal = self.calculate_xy_signals(et, sqrt_Qt)
456
+
457
+ return {
458
+ self.tickers[1]: y_signal,
459
+ self.tickers[0]: x_signal
460
+ }
461
+
462
+
463
+ class SMAStrategy():
464
+
465
+ """
466
+ Carries out a basic Moving Average Crossover strategy with a
467
+ short/long simple weighted moving average. Default short/long
468
+ windows are 50/200 periods respectively and uses Hiden Markov Model
469
+ as risk Managment system for filteering signals.
470
+ """
471
+
472
+ def __init__(
473
+ self, **kwargs
474
+ ):
475
+ self.short_window = kwargs.get("short_window", 50)
476
+ self.long_window = kwargs.get("long_window", 200)
477
+
478
+ def get_data(self, prices):
479
+ assert len(prices) >= self.long_window
480
+ short_sma = np.mean(prices[-self.short_window:])
481
+ long_sma = np.mean(prices[-self.long_window:])
482
+ return short_sma, long_sma
483
+
484
+ def create_signal(self, prices):
485
+ signal = None
486
+ data = self.get_data(prices)
487
+ short_sma, long_sma = data
488
+ if short_sma > long_sma:
489
+ signal = 'LONG'
490
+ elif short_sma < long_sma:
491
+ signal = 'SHORT'
492
+ return signal
493
+
494
+ def calculate_signals(self, prices):
495
+ return self.create_signal(prices)
496
+
497
+
498
+ class OrnsteinUhlenbeck():
499
+ """
500
+ The Ornstein-Uhlenbeck process is a mathematical model
501
+ used to describe the behavior of a mean-reverting stochastic process.
502
+ We use it to model the price dynamics of an asset that tends
503
+ to revert to a long-term mean.
504
+
505
+ We Estimate the drift (θ), volatility (σ), and long-term mean (μ)
506
+ based on historical price data; then we Simulate the OU process
507
+ using the estimated parameters.
508
+
509
+ https://en.wikipedia.org/wiki/Ornstein%E2%80%93Uhlenbeck_process
510
+ """
511
+
512
+ def __init__(
513
+ self, prices: np.ndarray,
514
+ returns: bool = True, timeframe: str = "D1"
515
+ ):
516
+ """
517
+ Initializes the OrnsteinUhlenbeck instance.
518
+
519
+ Args:
520
+ prices (np.ndarray) : Historical close prices.
521
+
522
+ retrurns (bool) : Use it to indicate weither
523
+ you want to simulate the returns or your raw data
524
+
525
+ timeframe (str) : The time frame for the Historical prices
526
+ (1m, 5m, 15m, 30m, 1h, 4h, D1)
527
+ """
528
+ self.prices = prices
529
+ if returns:
530
+ series = pd.Series(self.prices)
531
+ self.returns = series.pct_change().dropna().values
532
+ else:
533
+ self.returns = self.prices
534
+
535
+ time_frame_mapping = {
536
+ '1m': 1 / (24 * 60), # 1 minute intervals
537
+ '5m': 5 / (24 * 60), # 5 minute intervals
538
+ '15m': 15 / (24 * 60), # 15 minute intervals
539
+ '30m': 30 / (24 * 60), # 30 minute intervals
540
+ '1h': 1 / 24, # 1 hour intervals
541
+ '4h': 4 / 24, # 4 hour intervals
542
+ 'D1': 1, # Daily intervals
543
+ }
544
+ if timeframe not in time_frame_mapping:
545
+ raise ValueError("Unsupported time frame")
546
+ self.tf = time_frame_mapping[timeframe]
547
+
548
+ params = self.estimate_parameters()
549
+ self.mu_hat = params[0] # Mean (μ)
550
+ self.theta_hat = params[1] # Drift (θ)
551
+ self.sigma_hat = params[2] # Volatility (σ)
552
+ print(f'Estimated μ: {self.mu_hat}')
553
+ print(f'Estimated θ: {self.theta_hat}')
554
+ print(f'Estimated σ: {self.sigma_hat}')
555
+ time.sleep(1)
556
+
557
+ def ornstein_uhlenbeck(self, mu, theta, sigma, dt, X0, n):
558
+ """
559
+ Simulates the Ornstein-Uhlenbeck process.
560
+
561
+ Args:
562
+ mu (float): Estimated long-term mean.
563
+ theta (float): Estimated drift.
564
+ sigma (float): Estimated volatility.
565
+ dt (float): Time step.
566
+ X0 (float): Initial value.
567
+ n (int): Number of time steps.
568
+
569
+ Returns:
570
+ np.ndarray : Simulated process.
571
+ """
572
+ x = np.zeros(n)
573
+ x[0] = X0
574
+ for t in range(1, n):
575
+ dW = np.random.normal(loc=0, scale=np.sqrt(dt))
576
+ # O-U process differential equation
577
+ x[t] = x[t-1] + (theta * (mu - x[t-1]) * dt) + (sigma * dW)
578
+ # dW is a Wiener process
579
+ # (theta * (mu - x[t-1]) * dt) represents the mean-reverting tendency
580
+ # (sigma * dW) represents the random volatility
581
+ return x
582
+
583
+ def estimate_parameters(self):
584
+ """
585
+ Estimates the mean-reverting parameters (μ, θ, σ)
586
+ using the negative log-likelihood.
587
+
588
+ Returns:
589
+ Tuple: Estimated μ, θ, and σ.
590
+ """
591
+ initial_guess = [0, 0.1, np.std(self.returns)]
592
+ result = minimize(
593
+ self._neg_log_likelihood, initial_guess, args=(self.returns,)
594
+ )
595
+ mu, theta, sigma = result.x
596
+ return mu, theta, sigma
597
+
598
+ def _neg_log_likelihood(self, params, returns):
599
+ """
600
+ Calculates the negative
601
+ log-likelihood for parameter estimation.
602
+
603
+ Args:
604
+ params (list): List of parameters [mu, theta, sigma].
605
+ returns (np.ndarray): Historical returns.
606
+
607
+ Returns:
608
+ float: Negative log-likelihood.
609
+ """
610
+ mu, theta, sigma = params
611
+ dt = self.tf
612
+ n = len(returns)
613
+ ou_simulated = self.ornstein_uhlenbeck(
614
+ mu, theta, sigma, dt, 0, n + 1
615
+ )
616
+ residuals = ou_simulated[1:n + 1] - returns
617
+ neg_ll = 0.5 * np.sum(
618
+ residuals**2
619
+ ) / sigma**2 + 0.5 * n * np.log(2 * np.pi * sigma**2)
620
+ return neg_ll
621
+
622
+ def simulate_process(self, rts=None, n=100, p=None):
623
+ """
624
+ Simulates the OU process multiple times .
625
+
626
+ Args:
627
+ rts (np.ndarray): Historical returns.
628
+ n (int): Number of simulations to perform.
629
+ p (int): Number of time steps.
630
+
631
+ Returns:
632
+ np.ndarray: 2D array representing simulated processes.
633
+ """
634
+ if rts is not None:
635
+ returns = rts
636
+ else:
637
+ returns = self.returns
638
+ if p is not None:
639
+ T = p
640
+ else:
641
+ T = len(returns)
642
+ dt = self.tf
643
+
644
+ dW_matrix = np.random.normal(
645
+ loc=0, scale=np.sqrt(dt), size=(n, T)
646
+ )
647
+ simulations_matrix = np.zeros((n, T))
648
+ simulations_matrix[:, 0] = returns[-1]
649
+
650
+ for t in range(1, T):
651
+ simulations_matrix[:, t] = (
652
+ simulations_matrix[:, t-1] +
653
+ self.theta_hat * (
654
+ self.mu_hat - simulations_matrix[:, t-1]) * dt +
655
+ self.sigma_hat * dW_matrix[:, t]
656
+ )
657
+ return simulations_matrix
658
+
659
+ def calculate_signals(self, rts, p, n=10, th=1):
660
+ """
661
+ Calculate the SignalEvents based on the simulated processes.
662
+
663
+ Args:
664
+ rts (np.ndarray): Historical returns.
665
+ p (int): Number of time steps.
666
+ n (int): Number of simulations to perform.
667
+
668
+ Returns
669
+ np.ndarray: 2D array representing simulated processes.
670
+ """
671
+ simulations_matrix = self.simulate_process(rts=rts, n=n, p=p)
672
+ last_values = simulations_matrix[:, -1]
673
+ mean = last_values.mean()
674
+ deviation = mean - self.mu_hat
675
+ if deviation < -self.sigma_hat * th:
676
+ signal = "LONG"
677
+ elif deviation > self.sigma_hat * th:
678
+ signal = "SHORT"
679
+ else:
680
+ signal = None
681
+ return signal
@@ -0,0 +1,4 @@
1
+ """
2
+ Trading strategies execution module
3
+ """
4
+ from bbstrader.trading.run import *