voly 0.0.142__py3-none-any.whl → 0.0.144__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
voly/client.py CHANGED
@@ -343,30 +343,11 @@ class VolyClient:
343
343
  df_hist: pd.DataFrame,
344
344
  domain_params: Tuple[float, float, int] = (-1.5, 1.5, 1000),
345
345
  return_domain: str = 'log_moneyness',
346
- method: str = 'garch',
346
+ method: str = 'arch_returns',
347
+ model_type: str = 'garch',
348
+ distribution: str = 'normal',
347
349
  **kwargs) -> Dict[str, Any]:
348
- """
349
- Generate historical density surface from historical price data.
350
-
351
- Parameters:
352
- model_results: DataFrame with model parameters and maturities
353
- df_hist: DataFrame with historical price data
354
- domain_params: Tuple of (min, max, num_points) for x-domain
355
- return_domain: Domain for x-axis values ('log_moneyness', 'moneyness', 'returns', 'strikes')
356
- method: Method to use for HD estimation ('hist_returns' or 'garch')
357
- **kwargs: Additional parameters for specific methods:
358
- For 'garch' method:
359
- n_fits: Number of sliding windows (default: 400)
360
- simulations: Number of Monte Carlo simulations (default: 5000)
361
- window_length: Length of sliding windows (default: 365)
362
- variate_parameters: Whether to vary GARCH parameters (default: True)
363
- bandwidth: KDE bandwidth (default: 'silverman')
364
- For 'hist_returns' method:
365
- bandwidth: KDE bandwidth (default: 'silverman')
366
350
 
367
- Returns:
368
- Dictionary containing pdf_surface, cdf_surface, x_surface, and moments
369
- """
370
351
  logger.info(f"Calculating historical density surface using {method} method")
371
352
 
372
353
  return get_hd_surface(
@@ -375,5 +356,7 @@ class VolyClient:
375
356
  domain_params=domain_params,
376
357
  return_domain=return_domain,
377
358
  method=method,
359
+ model_type=model_type,
360
+ distribution=distribution,
378
361
  **kwargs
379
362
  )
voly/core/hd.py CHANGED
@@ -16,6 +16,7 @@ from voly.formulas import iv, get_domain
16
16
  from voly.models import SVIModel
17
17
  from voly.core.fit import fit_model
18
18
  from arch import arch_model
19
+ from arch.univariate import GARCH, EGARCH
19
20
 
20
21
 
21
22
  @catch_exception
@@ -79,68 +80,151 @@ def get_historical_data(currency, lookback_days, granularity, exchange_name):
79
80
 
80
81
 
81
82
  @catch_exception
82
- def fit_garch_model(log_returns, n_fits=400, window_length=365):
83
+ def parse_window_length(window_length, df_hist):
83
84
  """
84
- Fit a GARCH(1,1) model to log returns.
85
+ Parse window length from string format (e.g., '7d', '30d') to number of data points.
86
+
87
+ Parameters:
88
+ -----------
89
+ window_length : str
90
+ Window length in days, formatted as '7d', '30d', etc.
91
+ df_hist : pd.DataFrame
92
+ Historical data DataFrame with datetime index.
93
+
94
+ Returns:
95
+ --------
96
+ int
97
+ Number of data points corresponding to the window length.
98
+ """
99
+ if not window_length.endswith('d'):
100
+ raise VolyError("window_length should be in format '7d', '30d', etc.")
101
+
102
+ # Extract number of days
103
+ days = int(window_length[:-1])
104
+
105
+ # Calculate time delta between consecutive data points
106
+ if len(df_hist) > 1:
107
+ avg_delta = (df_hist.index[-1] - df_hist.index[0]) / (len(df_hist) - 1)
108
+ # Convert to days and get points per day
109
+ days_per_point = avg_delta.total_seconds() / (24 * 60 * 60)
110
+ # Calculate number of points for the window
111
+ n_points = int(days / days_per_point)
112
+ return max(n_points, 10) # Ensure at least 10 points
113
+ else:
114
+ raise VolyError("Not enough data points in df_hist to calculate granularity.")
115
+
116
+
117
+ @catch_exception
118
+ def fit_volatility_model(log_returns, df_hist, model_type='garch', distribution='normal', window_length='30d',
119
+ n_fits=400):
120
+ """
121
+ Fit a volatility model (GARCH or EGARCH) to log returns.
85
122
 
86
123
  Args:
87
124
  log_returns: Array of log returns
125
+ df_hist: DataFrame with historical price data
126
+ model_type: Type of volatility model ('garch' or 'egarch')
127
+ distribution: Distribution type ('normal', 'studentst', or 'skewstudent')
128
+ window_length: Length of each window as a string (e.g., '30d')
88
129
  n_fits: Number of sliding windows
89
- window_length: Length of each window
90
130
 
91
131
  Returns:
92
- Dict with GARCH parameters and processes
132
+ Dict with model parameters and processes
93
133
  """
134
+ # Parse window length
135
+ window_points = parse_window_length(window_length, df_hist)
94
136
 
95
- if len(log_returns) < window_length + n_fits:
96
- raise VolyError(f"Not enough data points. Need at least {window_length + n_fits}, got {len(log_returns)}")
137
+ if len(log_returns) < window_points + n_fits:
138
+ raise VolyError(f"Not enough data points. Need at least {window_points + n_fits}, got {len(log_returns)}")
97
139
 
98
140
  # Adjust window sizes if necessary
99
141
  n_fits = min(n_fits, len(log_returns) // 3)
100
- window_length = min(window_length, len(log_returns) // 3)
142
+ window_points = min(window_points, len(log_returns) // 3)
101
143
 
102
- start = window_length + n_fits
144
+ start = window_points + n_fits
103
145
  end = n_fits
104
146
 
105
- parameters = np.zeros((n_fits, 4)) # [mu, omega, alpha, beta]
147
+ # Different number of parameters based on model type and distribution
148
+ if model_type.lower() == 'garch':
149
+ if distribution.lower() == 'normal':
150
+ n_params = 4 # mu, omega, alpha, beta
151
+ elif distribution.lower() == 'studentst':
152
+ n_params = 5 # mu, omega, alpha, beta, nu
153
+ else: # skewstudent
154
+ n_params = 6 # mu, omega, alpha, beta, nu, lambda (skew)
155
+ else: # egarch
156
+ if distribution.lower() == 'normal':
157
+ n_params = 5 # mu, omega, alpha, gamma, beta
158
+ elif distribution.lower() == 'studentst':
159
+ n_params = 6 # mu, omega, alpha, gamma, beta, nu
160
+ else: # skewstudent
161
+ n_params = 7 # mu, omega, alpha, gamma, beta, nu, lambda (skew)
162
+
163
+ parameters = np.zeros((n_fits, n_params))
106
164
  z_process = []
107
165
 
108
- logger.info(f"Fitting GARCH model with {n_fits} windows...")
166
+ logger.info(f"Fitting {model_type.upper()} model with {distribution} distribution using {n_fits} windows...")
109
167
 
110
168
  for i in range(n_fits):
111
169
  window = log_returns[end - i - 1:start - i - 1]
112
170
  data = window - np.mean(window)
113
171
 
114
- model = arch_model(data, vol='GARCH', p=1, q=1)
115
172
  try:
116
- GARCH_fit = model.fit(disp='off')
117
-
118
- mu, omega, alpha, beta = [
119
- GARCH_fit.params["mu"],
120
- GARCH_fit.params["omega"],
121
- GARCH_fit.params["alpha[1]"],
122
- GARCH_fit.params["beta[1]"],
123
- ]
124
- parameters[i, :] = [mu, omega, alpha, beta]
125
-
126
- # Calculate sigma2 and innovations for last observation
127
- if i == 0:
128
- sigma2_tm1 = omega / (1 - alpha - beta)
129
- else:
130
- e_tm1 = data.tolist()[-2]
131
- sigma2_tm1 = omega + alpha * e_tm1 ** 2 + beta * sigma2_tm1
132
-
133
- e_t = data.tolist()[-1]
134
- sigma2_t = omega + alpha * data.tolist()[-2] ** 2 + beta * sigma2_tm1
135
- z_t = e_t / np.sqrt(sigma2_t)
173
+ # Configure model based on type and distribution
174
+ if model_type.lower() == 'garch':
175
+ model = arch_model(data, vol='GARCH', p=1, q=1, dist=distribution.lower())
176
+ else: # egarch
177
+ model = arch_model(data, vol='EGARCH', p=1, o=1, q=1, dist=distribution.lower())
178
+
179
+ fit_result = model.fit(disp='off')
180
+
181
+ # Extract parameters based on model type and distribution
182
+ params_dict = fit_result.params.to_dict()
183
+
184
+ if model_type.lower() == 'garch':
185
+ mu = params_dict.get("mu", 0)
186
+ omega = params_dict.get("omega", 0)
187
+ alpha = params_dict.get("alpha[1]", 0)
188
+ beta = params_dict.get("beta[1]", 0)
189
+
190
+ if distribution.lower() == 'normal':
191
+ parameters[i, :] = [mu, omega, alpha, beta]
192
+ elif distribution.lower() == 'studentst':
193
+ nu = params_dict.get("nu", 0)
194
+ parameters[i, :] = [mu, omega, alpha, beta, nu]
195
+ else: # skewstudent
196
+ nu = params_dict.get("nu", 0)
197
+ lam = params_dict.get("lambda", 0)
198
+ parameters[i, :] = [mu, omega, alpha, beta, nu, lam]
199
+ else: # egarch
200
+ mu = params_dict.get("mu", 0)
201
+ omega = params_dict.get("omega", 0)
202
+ alpha = params_dict.get("alpha[1]", 0)
203
+ gamma = params_dict.get("gamma[1]", 0)
204
+ beta = params_dict.get("beta[1]", 0)
205
+
206
+ if distribution.lower() == 'normal':
207
+ parameters[i, :] = [mu, omega, alpha, gamma, beta]
208
+ elif distribution.lower() == 'studentst':
209
+ nu = params_dict.get("nu", 0)
210
+ parameters[i, :] = [mu, omega, alpha, gamma, beta, nu]
211
+ else: # skewstudent
212
+ nu = params_dict.get("nu", 0)
213
+ lam = params_dict.get("lambda", 0)
214
+ parameters[i, :] = [mu, omega, alpha, gamma, beta, nu, lam]
215
+
216
+ # Get last innovation
217
+ residuals = fit_result.resid
218
+ conditional_vol = fit_result.conditional_volatility
219
+ z_t = residuals[-1] / conditional_vol[-1]
136
220
  z_process.append(z_t)
137
221
 
138
222
  except Exception as e:
139
- logger.warning(f"GARCH fit failed for window {i}: {str(e)}")
223
+ logger.warning(f"Model fit failed for window {i}: {str(e)}")
140
224
 
141
225
  # Clean up any failed fits
142
226
  if len(z_process) < n_fits / 2:
143
- raise VolyError("Too many GARCH fits failed. Check your data.")
227
+ raise VolyError("Too many model fits failed. Check your data.")
144
228
 
145
229
  avg_params = np.mean(parameters, axis=0)
146
230
  std_params = np.std(parameters, axis=0)
@@ -149,17 +233,38 @@ def fit_garch_model(log_returns, n_fits=400, window_length=365):
149
233
  'parameters': parameters,
150
234
  'avg_params': avg_params,
151
235
  'std_params': std_params,
152
- 'z_process': np.array(z_process)
236
+ 'z_process': np.array(z_process),
237
+ 'model_type': model_type,
238
+ 'distribution': distribution,
239
+ 'param_names': get_param_names(model_type, distribution)
153
240
  }
154
241
 
155
242
 
243
+ def get_param_names(model_type, distribution):
244
+ """Get parameter names based on model type and distribution."""
245
+ if model_type.lower() == 'garch':
246
+ if distribution.lower() == 'normal':
247
+ return ['mu', 'omega', 'alpha', 'beta']
248
+ elif distribution.lower() == 'studentst':
249
+ return ['mu', 'omega', 'alpha', 'beta', 'nu']
250
+ else: # skewstudent
251
+ return ['mu', 'omega', 'alpha', 'beta', 'nu', 'lambda']
252
+ else: # egarch
253
+ if distribution.lower() == 'normal':
254
+ return ['mu', 'omega', 'alpha', 'gamma', 'beta']
255
+ elif distribution.lower() == 'studentst':
256
+ return ['mu', 'omega', 'alpha', 'gamma', 'beta', 'nu']
257
+ else: # skewstudent
258
+ return ['mu', 'omega', 'alpha', 'gamma', 'beta', 'nu', 'lambda']
259
+
260
+
156
261
  @catch_exception
157
- def simulate_garch_paths(garch_model, horizon, simulations=5000, variate_parameters=True):
262
+ def simulate_volatility_paths(vol_model, horizon, simulations=5000, variate_parameters=True):
158
263
  """
159
- Simulate future paths using a fitted GARCH model.
264
+ Simulate future paths using a fitted volatility model.
160
265
 
161
266
  Args:
162
- garch_model: Dict with GARCH model parameters
267
+ vol_model: Dict with volatility model parameters
163
268
  horizon: Number of steps to simulate
164
269
  simulations: Number of paths to simulate
165
270
  variate_parameters: Whether to vary parameters between simulations
@@ -167,21 +272,34 @@ def simulate_garch_paths(garch_model, horizon, simulations=5000, variate_paramet
167
272
  Returns:
168
273
  Array of simulated log returns
169
274
  """
170
- parameters = garch_model['parameters']
171
- z_process = garch_model['z_process']
275
+ parameters = vol_model['parameters']
276
+ z_process = vol_model['z_process']
277
+ model_type = vol_model['model_type']
278
+ distribution = vol_model['distribution']
279
+ param_names = vol_model['param_names']
172
280
 
173
281
  # Use mean parameters as starting point
174
- pars = garch_model['avg_params'].copy() # [mu, omega, alpha, beta]
175
- bounds = garch_model['std_params'].copy()
176
-
177
- mu, omega, alpha, beta = pars
178
- logger.info(f"GARCH parameters: mu={mu:.6f}, omega={omega:.6f}, alpha={alpha:.6f}, beta={beta:.6f}")
282
+ pars = vol_model['avg_params'].copy()
283
+ bounds = vol_model['std_params'].copy()
284
+
285
+ # Log parameters
286
+ param_str = ", ".join([f"{name}={par:.6f}" for name, par in zip(param_names, pars)])
287
+ logger.info(f"{model_type.upper()} parameters: {param_str}")
288
+
289
+ # Create KDE for innovations based on distribution
290
+ if distribution.lower() == 'normal':
291
+ # Use standard normal for normal distribution
292
+ def sample_innovation(size=1):
293
+ return np.random.normal(0, 1, size=size)
294
+ else:
295
+ # Use KDE for non-normal distributions to capture empirical distribution
296
+ kde = stats.gaussian_kde(z_process, bw_method='silverman') # original code doesn't include bw_method
297
+ z_range = np.linspace(min(z_process), max(z_process), 1000)
298
+ z_prob = kde(z_range)
299
+ z_prob = z_prob / np.sum(z_prob)
179
300
 
180
- # Create KDE for innovations
181
- kde = stats.gaussian_kde(z_process)
182
- z_range = np.linspace(min(z_process), max(z_process), 1000)
183
- z_prob = kde(z_range)
184
- z_prob = z_prob / np.sum(z_prob)
301
+ def sample_innovation(size=1):
302
+ return np.random.choice(z_range, size=size, p=z_prob)
185
303
 
186
304
  # Simulate paths
187
305
  simulated_returns = np.zeros(simulations)
@@ -196,24 +314,63 @@ def simulate_garch_paths(garch_model, horizon, simulations=5000, variate_paramet
196
314
  for j, (par, bound) in enumerate(zip(pars, bounds)):
197
315
  var = bound ** 2 / len(parameters)
198
316
  new_par = np.random.normal(par, var)
199
- if j >= 1 and new_par <= 0: # Ensure omega, alpha, beta are positive
317
+ # Ensure omega is positive, betas are between 0 and 1, etc.
318
+ if j >= 1 and new_par <= 0:
200
319
  new_par = 0.01
201
320
  new_pars.append(new_par)
202
- mu, omega, alpha, beta = new_pars
321
+ sim_pars = new_pars
322
+ else:
323
+ sim_pars = pars.copy()
324
+
325
+ # Initialize variables based on model type
326
+ if model_type.lower() == 'garch':
327
+ if distribution.lower() == 'normal':
328
+ mu, omega, alpha, beta = sim_pars
329
+ sigma2 = omega / (1 - alpha - beta)
330
+ elif distribution.lower() == 'studentst':
331
+ mu, omega, alpha, beta, nu = sim_pars
332
+ sigma2 = omega / (1 - alpha - beta)
333
+ else: # skewstudent
334
+ mu, omega, alpha, beta, nu, lam = sim_pars
335
+ sigma2 = omega / (1 - alpha - beta)
336
+ else: # egarch
337
+ if distribution.lower() == 'normal':
338
+ mu, omega, alpha, gamma, beta = sim_pars
339
+ log_sigma2 = omega / (1 - beta)
340
+ sigma2 = np.exp(log_sigma2)
341
+ elif distribution.lower() == 'studentst':
342
+ mu, omega, alpha, gamma, beta, nu = sim_pars
343
+ log_sigma2 = omega / (1 - beta)
344
+ sigma2 = np.exp(log_sigma2)
345
+ else: # skewstudent
346
+ mu, omega, alpha, gamma, beta, nu, lam = sim_pars
347
+ log_sigma2 = omega / (1 - beta)
348
+ sigma2 = np.exp(log_sigma2)
203
349
 
204
- # Initial values
205
- sigma2 = omega / (1 - alpha - beta)
206
350
  returns_sum = 0
207
351
 
208
352
  # Simulate path
209
353
  for _ in range(horizon):
210
- # Sample from innovation distribution
211
- z = np.random.choice(z_range, p=z_prob)
212
-
213
- # Calculate return and update volatility
214
- e = z * np.sqrt(sigma2)
215
- returns_sum += e + mu
216
- sigma2 = omega + alpha * e ** 2 + beta * sigma2
354
+ # Sample innovation
355
+ z = sample_innovation()
356
+
357
+ # Update volatility and returns based on model type
358
+ if model_type.lower() == 'garch':
359
+ # Calculate return
360
+ e = z * np.sqrt(sigma2)
361
+ returns_sum += e + mu
362
+
363
+ # Update GARCH volatility
364
+ sigma2 = omega + alpha * e ** 2 + beta * sigma2
365
+ else: # egarch
366
+ # Calculate return
367
+ e = z * np.sqrt(sigma2)
368
+ returns_sum += e + mu
369
+
370
+ # Update EGARCH volatility
371
+ abs_z = abs(z)
372
+ log_sigma2 = omega + beta * log_sigma2 + alpha * (abs_z - np.sqrt(2 / np.pi)) + gamma * z
373
+ sigma2 = np.exp(log_sigma2)
217
374
 
218
375
  simulated_returns[i] = returns_sum
219
376
 
@@ -224,7 +381,9 @@ def get_hd_surface(model_results: pd.DataFrame,
224
381
  df_hist: pd.DataFrame,
225
382
  domain_params: Tuple[float, float, int] = (-1.5, 1.5, 1000),
226
383
  return_domain: str = 'log_moneyness',
227
- method: str = 'garch',
384
+ method: str = 'arch_returns',
385
+ model_type: str = 'garch',
386
+ distribution: str = 'normal',
228
387
  **kwargs) -> Dict[str, Any]:
229
388
  """
230
389
  Generate historical density surface from historical price data.
@@ -234,13 +393,15 @@ def get_hd_surface(model_results: pd.DataFrame,
234
393
  df_hist: DataFrame with historical price data
235
394
  domain_params: Tuple of (min, max, num_points) for x-domain
236
395
  return_domain: Domain for x-axis values ('log_moneyness', 'moneyness', 'returns', 'strikes')
237
- method: Method to use for HD estimation ('hist_returns' or 'garch')
396
+ method: Method to use for HD estimation ('hist_returns' or 'arch_returns')
397
+ model_type: Type of volatility model to use ('garch' or 'egarch')
398
+ distribution: Distribution to use ('normal', 'studentst', or 'skewstudent')
238
399
  **kwargs: Additional parameters for specific methods:
239
- For 'garch' method:
400
+ For volatility models ('garch'/'egarch' method):
240
401
  n_fits: Number of sliding windows (default: 400)
241
402
  simulations: Number of Monte Carlo simulations (default: 5000)
242
- window_length: Length of sliding windows (default: 365)
243
- variate_parameters: Whether to vary GARCH parameters (default: True)
403
+ window_length: Length of sliding windows as string (default: '30d')
404
+ variate_parameters: Whether to vary parameters (default: True)
244
405
  bandwidth: KDE bandwidth (default: 'silverman')
245
406
  For 'hist_returns' method:
246
407
  bandwidth: KDE bandwidth (default: 'silverman')
@@ -248,7 +409,6 @@ def get_hd_surface(model_results: pd.DataFrame,
248
409
  Returns:
249
410
  Dictionary containing pdf_surface, cdf_surface, x_surface, and moments
250
411
  """
251
-
252
412
  # Check if required columns are present
253
413
  required_columns = ['s', 't', 'r']
254
414
  missing_columns = [col for col in required_columns if col not in model_results.columns]
@@ -263,28 +423,46 @@ def get_hd_surface(model_results: pd.DataFrame,
263
423
  else:
264
424
  raise VolyError("Cannot determine granularity from df_hist.")
265
425
 
426
+ # Validate model_type and distribution
427
+ valid_model_types = ['garch', 'egarch']
428
+ valid_distributions = ['normal', 'studentst', 'skewstudent']
429
+
430
+ if model_type.lower() not in valid_model_types:
431
+ raise VolyError(f"Invalid model_type: {model_type}. Must be one of {valid_model_types}")
432
+
433
+ if distribution.lower() not in valid_distributions:
434
+ raise VolyError(f"Invalid distribution: {distribution}. Must be one of {valid_distributions}")
435
+
266
436
  # Get method-specific parameters
267
- if method == 'garch':
437
+ if method == 'arch_returns':
268
438
  n_fits = kwargs.get('n_fits', 400)
269
439
  simulations = kwargs.get('simulations', 5000)
270
- window_length = kwargs.get('window_length', 365)
440
+ window_length = kwargs.get('window_length', '30d')
271
441
  variate_parameters = kwargs.get('variate_parameters', True)
272
442
  bandwidth = kwargs.get('bandwidth', 'silverman')
273
- logger.info(f"Using GARCH method with {n_fits} fits, {simulations} simulations")
443
+ logger.info(
444
+ f"Using {model_type.upper()} method with {distribution} distribution, {n_fits} fits, {simulations} simulations")
274
445
  elif method == 'hist_returns':
275
446
  bandwidth = kwargs.get('bandwidth', 'silverman')
276
447
  logger.info(f"Using returns-based KDE method with bandwidth {bandwidth}")
277
448
  else:
278
- raise VolyError(f"Unknown method: {method}. Use 'hist_returns' or 'garch'.")
449
+ raise VolyError(f"Unknown method: {method}. Use 'hist_returns', 'arch_returns'.")
279
450
 
280
451
  # Calculate log returns from price history
281
452
  log_returns = np.log(df_hist['close'] / df_hist['close'].shift(1)) * 100
282
453
  log_returns = log_returns.dropna().values
283
454
 
284
- # Fit GARCH model once if using garch method
285
- garch_model = None
286
- if method == 'garch':
287
- garch_model = fit_garch_model(log_returns, n_fits, window_length)
455
+ # Fit volatility model once if using garch/egarch method
456
+ vol_model = None
457
+ if method == 'arch_returns':
458
+ vol_model = fit_volatility_model(
459
+ log_returns,
460
+ df_hist,
461
+ model_type=model_type,
462
+ distribution=distribution,
463
+ window_length=window_length,
464
+ n_fits=n_fits
465
+ )
288
466
 
289
467
  pdf_surface = {}
290
468
  cdf_surface = {}
@@ -311,7 +489,7 @@ def get_hd_surface(model_results: pd.DataFrame,
311
489
  logger.info(f"Processing HD for maturity {i} (t={t:.4f} years, {tau_days_float:.2f} days)")
312
490
 
313
491
  if method == 'hist_returns':
314
- # Standard returns-based method (your existing implementation)
492
+ # Standard returns-based method
315
493
  # Filter historical data for this maturity's lookback period
316
494
  start_date = pd.Timestamp.now() - pd.Timedelta(days=int(t * 365.25))
317
495
  maturity_hist = df_hist[df_hist.index >= start_date].copy()
@@ -341,16 +519,16 @@ def get_hd_surface(model_results: pd.DataFrame,
341
519
  f = stats.gaussian_kde(adj_returns, bw_method=bandwidth)
342
520
  pdf_values = f(LM)
343
521
 
344
- elif method == 'garch':
345
- # GARCH-based method
346
- if garch_model is None:
347
- logger.warning(f"GARCH model fitting failed, skipping maturity {i}")
522
+ elif method == 'arch_returns':
523
+ # Volatility model-based method
524
+ if vol_model is None:
525
+ logger.warning(f"Volatility model fitting failed, skipping maturity {i}")
348
526
  continue
349
527
 
350
- # Simulate paths with the GARCH model
528
+ # Simulate paths with the volatility model
351
529
  horizon = max(1, int(tau_days_float))
352
- simulated_returns, simulated_mu = simulate_garch_paths(
353
- garch_model,
530
+ simulated_returns, simulated_mu = simulate_volatility_paths(
531
+ vol_model,
354
532
  horizon,
355
533
  simulations,
356
534
  variate_parameters
@@ -377,15 +555,16 @@ def get_hd_surface(model_results: pd.DataFrame,
377
555
  kde = stats.gaussian_kde(simulated_moneyness, bw_method=bandwidth)
378
556
  pdf_values = kde(M)
379
557
 
380
- # Include GARCH params in moments
381
- avg_params = garch_model['avg_params']
382
- model_params = {
383
- 'mu': avg_params[0],
384
- 'omega': avg_params[1],
385
- 'alpha': avg_params[2],
386
- 'beta': avg_params[3],
387
- 'persistence': avg_params[2] + avg_params[3]
388
- }
558
+ # Include volatility model params in moments
559
+ avg_params = vol_model['avg_params']
560
+ param_names = vol_model['param_names']
561
+ model_params = {name: value for name, value in zip(param_names, avg_params)}
562
+ model_params['model_type'] = model_type
563
+ model_params['distribution'] = distribution
564
+
565
+ # Add persistence for GARCH-type models
566
+ if model_type.lower() == 'garch':
567
+ model_params['persistence'] = model_params.get('alpha', 0) + model_params.get('beta', 0)
389
568
  else:
390
569
  continue # Skip this maturity if method is invalid
391
570
 
@@ -406,7 +585,7 @@ def get_hd_surface(model_results: pd.DataFrame,
406
585
  pdf_m = pdf_lm / M
407
586
  pdf_k = pdf_lm / K
408
587
  pdf_r = pdf_lm / (1 + R)
409
- else: # 'garch'
588
+ else: # volatility models
410
589
  pdf_m = pdf_values
411
590
  pdf_lm = pdf_m * M
412
591
  pdf_k = pdf_lm / K
@@ -420,19 +599,19 @@ def get_hd_surface(model_results: pd.DataFrame,
420
599
  if return_domain == 'log_moneyness':
421
600
  x = LM
422
601
  pdf = pdf_lm
423
- moments = get_all_moments(x, pdf, model_params if method == 'garch' else None)
602
+ moments = get_all_moments(x, pdf, model_params if method in ['garch', 'egarch'] else None)
424
603
  elif return_domain == 'moneyness':
425
604
  x = M
426
605
  pdf = pdf_m
427
- moments = get_all_moments(x, pdf, model_params if method == 'garch' else None)
606
+ moments = get_all_moments(x, pdf, model_params if method in ['garch', 'egarch'] else None)
428
607
  elif return_domain == 'returns':
429
608
  x = R
430
609
  pdf = pdf_r
431
- moments = get_all_moments(x, pdf, model_params if method == 'garch' else None)
610
+ moments = get_all_moments(x, pdf, model_params if method in ['garch', 'egarch'] else None)
432
611
  elif return_domain == 'strikes':
433
612
  x = K
434
613
  pdf = pdf_k
435
- moments = get_all_moments(x, pdf, model_params if method == 'garch' else None)
614
+ moments = get_all_moments(x, pdf, model_params if method in ['garch', 'egarch'] else None)
436
615
  else:
437
616
  raise VolyError(f"Unsupported return_domain: {return_domain}")
438
617
 
@@ -445,7 +624,8 @@ def get_hd_surface(model_results: pd.DataFrame,
445
624
  # Create DataFrame with moments
446
625
  moments = pd.DataFrame(all_moments).T
447
626
 
448
- logger.info(f"Historical density calculation complete using {method} method")
627
+ logger.info(
628
+ f"Historical density calculation complete using {method} method with {model_type} model and {distribution} distribution")
449
629
 
450
630
  return {
451
631
  'pdf_surface': pdf_surface,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: voly
3
- Version: 0.0.142
3
+ Version: 0.0.144
4
4
  Summary: Options & volatility research package
5
5
  Author-email: Manu de Cara <manu.de.cara@gmail.com>
6
6
  License: MIT
@@ -1,5 +1,5 @@
1
1
  voly/__init__.py,sha256=8xyDk7rFCn_MOD5hxuv5cxxKZvBVRiSIM7TgaMPpwpw,211
2
- voly/client.py,sha256=Lj3YY6P1VBQD5C_psPh2pSxCMVvFjRBybrMrs4e9qXI,14249
2
+ voly/client.py,sha256=UzgvwLf5SsNkvv3-Zdzej5nJ4pxoS-UCP8LjFMh3LFo,13229
3
3
  voly/exceptions.py,sha256=PBsbn1vNMvKcCJwwJ4lBO6glD85jo1h2qiEmD7ArAjs,92
4
4
  voly/formulas.py,sha256=G_soRiPwQlHy6milOAj6TdmBWr-fNZpMvm0joXAMZ90,10767
5
5
  voly/models.py,sha256=o-pHujGfr5Gn8ItckMzLI4Q8yaX9FQaV8UjCxv2zgTY,3364
@@ -7,13 +7,13 @@ voly/core/__init__.py,sha256=bu6fS2I1Pj9fPPnl-zY3L7NqrZSY5Zy6NY2uMUvdhKs,183
7
7
  voly/core/charts.py,sha256=E21OZB5lTY4YL2flgaFJ6s5g3_ExtAQT2zryZZxLPyM,12735
8
8
  voly/core/data.py,sha256=pDeuYhP0GX4RbtlqByvsE3rfHcIkix0BU5MLW8sKIeI,8935
9
9
  voly/core/fit.py,sha256=Tb9eeG7e_2dQTcqt6aqEwFrZdy6jR9rSNqe6tzOdVhQ,9245
10
- voly/core/hd.py,sha256=K2X0isAchumuRPcc5RSEkMOR5sOeb_I3twwqAZYYL1A,16809
10
+ voly/core/hd.py,sha256=fIV97Xe5P-Aj_oGFDla5dZxAvfdk8Cjnb7DRCWnbSOY,25251
11
11
  voly/core/interpolate.py,sha256=JkK172-FXyhesW3hY4pEeuJWG3Bugq7QZXbeKoRpLuo,5305
12
12
  voly/core/rnd.py,sha256=masjK4WrVb925gPGboD8iDAaEN7FY7S4OHYthHPtA3o,13613
13
13
  voly/utils/__init__.py,sha256=E05mWatyC-PDOsCxQV1p5Xi1IgpOomxrNURyCx_gB-w,200
14
14
  voly/utils/logger.py,sha256=4-_2bVJmq17Q0d7Rd2mPg1AeR8gxv6EPvcmBDMFWcSM,1744
15
- voly-0.0.142.dist-info/licenses/LICENSE,sha256=wcHIVbE12jfcBOai_wqBKY6xvNQU5E909xL1zZNq_2Q,1065
16
- voly-0.0.142.dist-info/METADATA,sha256=BLz0X2tkaiZqfL5cYXcihLfbrZra3AQRrRXPXrNkS5w,4115
17
- voly-0.0.142.dist-info/WHEEL,sha256=DK49LOLCYiurdXXOXwGJm6U4DkHkg4lcxjhqwRa0CP4,91
18
- voly-0.0.142.dist-info/top_level.txt,sha256=ZfLw2sSxF-LrKAkgGjOmeTcw6_gD-30zvtdEY5W4B7c,5
19
- voly-0.0.142.dist-info/RECORD,,
15
+ voly-0.0.144.dist-info/licenses/LICENSE,sha256=wcHIVbE12jfcBOai_wqBKY6xvNQU5E909xL1zZNq_2Q,1065
16
+ voly-0.0.144.dist-info/METADATA,sha256=tU78Lvzik5rL9RhVKuW8jG7nMCqG2R5JN14-yOjY07s,4115
17
+ voly-0.0.144.dist-info/WHEEL,sha256=DK49LOLCYiurdXXOXwGJm6U4DkHkg4lcxjhqwRa0CP4,91
18
+ voly-0.0.144.dist-info/top_level.txt,sha256=ZfLw2sSxF-LrKAkgGjOmeTcw6_gD-30zvtdEY5W4B7c,5
19
+ voly-0.0.144.dist-info/RECORD,,
File without changes