voly 0.0.142__py3-none-any.whl → 0.0.143__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
voly/client.py CHANGED
@@ -343,30 +343,11 @@ class VolyClient:
343
343
  df_hist: pd.DataFrame,
344
344
  domain_params: Tuple[float, float, int] = (-1.5, 1.5, 1000),
345
345
  return_domain: str = 'log_moneyness',
346
- method: str = 'garch',
346
+ method: str = 'arch_returns',
347
+ model_type: str = 'garch',
348
+ distribution: str = 'normal',
347
349
  **kwargs) -> Dict[str, Any]:
348
- """
349
- Generate historical density surface from historical price data.
350
-
351
- Parameters:
352
- model_results: DataFrame with model parameters and maturities
353
- df_hist: DataFrame with historical price data
354
- domain_params: Tuple of (min, max, num_points) for x-domain
355
- return_domain: Domain for x-axis values ('log_moneyness', 'moneyness', 'returns', 'strikes')
356
- method: Method to use for HD estimation ('hist_returns' or 'garch')
357
- **kwargs: Additional parameters for specific methods:
358
- For 'garch' method:
359
- n_fits: Number of sliding windows (default: 400)
360
- simulations: Number of Monte Carlo simulations (default: 5000)
361
- window_length: Length of sliding windows (default: 365)
362
- variate_parameters: Whether to vary GARCH parameters (default: True)
363
- bandwidth: KDE bandwidth (default: 'silverman')
364
- For 'hist_returns' method:
365
- bandwidth: KDE bandwidth (default: 'silverman')
366
350
 
367
- Returns:
368
- Dictionary containing pdf_surface, cdf_surface, x_surface, and moments
369
- """
370
351
  logger.info(f"Calculating historical density surface using {method} method")
371
352
 
372
353
  return get_hd_surface(
@@ -375,5 +356,7 @@ class VolyClient:
375
356
  domain_params=domain_params,
376
357
  return_domain=return_domain,
377
358
  method=method,
359
+ model_type=model_type,
360
+ distribution=distribution,
378
361
  **kwargs
379
362
  )
voly/core/hd.py CHANGED
@@ -16,6 +16,7 @@ from voly.formulas import iv, get_domain
16
16
  from voly.models import SVIModel
17
17
  from voly.core.fit import fit_model
18
18
  from arch import arch_model
19
+ from arch.univariate import GARCH, EGARCH
19
20
 
20
21
 
21
22
  @catch_exception
@@ -79,68 +80,152 @@ def get_historical_data(currency, lookback_days, granularity, exchange_name):
79
80
 
80
81
 
81
82
  @catch_exception
82
- def fit_garch_model(log_returns, n_fits=400, window_length=365):
83
+ def parse_window_length(window_length, df_hist):
83
84
  """
84
- Fit a GARCH(1,1) model to log returns.
85
+ Parse window length from string format (e.g., '7d', '30d') to number of data points.
86
+
87
+ Parameters:
88
+ -----------
89
+ window_length : str
90
+ Window length in days, formatted as '7d', '30d', etc.
91
+ df_hist : pd.DataFrame
92
+ Historical data DataFrame with datetime index.
93
+
94
+ Returns:
95
+ --------
96
+ int
97
+ Number of data points corresponding to the window length.
98
+ """
99
+ if not window_length.endswith('d'):
100
+ raise VolyError("window_length should be in format '7d', '30d', etc.")
101
+
102
+ # Extract number of days
103
+ days = int(window_length[:-1])
104
+
105
+ # Calculate time delta between consecutive data points
106
+ if len(df_hist) > 1:
107
+ avg_delta = (df_hist.index[-1] - df_hist.index[0]) / (len(df_hist) - 1)
108
+ # Convert to days and get points per day
109
+ days_per_point = avg_delta.total_seconds() / (24 * 60 * 60)
110
+ # Calculate number of points for the window
111
+ n_points = int(days / days_per_point)
112
+ return max(n_points, 10) # Ensure at least 10 points
113
+ else:
114
+ raise VolyError("Not enough data points in df_hist to calculate granularity.")
115
+
116
+
117
+ @catch_exception
118
+ def fit_volatility_model(log_returns, df_hist, model_type='garch', distribution='normal', window_length='30d',
119
+ n_fits=400):
120
+ """
121
+ Fit a volatility model (GARCH or EGARCH) to log returns.
85
122
 
86
123
  Args:
87
124
  log_returns: Array of log returns
125
+ df_hist: DataFrame with historical price data
126
+ model_type: Type of volatility model ('garch' or 'egarch')
127
+ distribution: Distribution type ('normal', 'studentst', or 'skewstudent')
128
+ window_length: Length of each window as a string (e.g., '30d')
88
129
  n_fits: Number of sliding windows
89
- window_length: Length of each window
90
130
 
91
131
  Returns:
92
- Dict with GARCH parameters and processes
132
+ Dict with model parameters and processes
93
133
  """
134
+ # Parse window length
135
+ window_points = parse_window_length(window_length, df_hist)
94
136
 
95
- if len(log_returns) < window_length + n_fits:
96
- raise VolyError(f"Not enough data points. Need at least {window_length + n_fits}, got {len(log_returns)}")
137
+ if len(log_returns) < window_points + n_fits:
138
+ raise VolyError(f"Not enough data points. Need at least {window_points + n_fits}, got {len(log_returns)}")
97
139
 
98
140
  # Adjust window sizes if necessary
99
141
  n_fits = min(n_fits, len(log_returns) // 3)
100
- window_length = min(window_length, len(log_returns) // 3)
142
+ window_points = min(window_points, len(log_returns) // 3)
101
143
 
102
- start = window_length + n_fits
144
+ start = window_points + n_fits
103
145
  end = n_fits
104
146
 
105
- parameters = np.zeros((n_fits, 4)) # [mu, omega, alpha, beta]
147
+ # Different number of parameters based on model type and distribution
148
+ if model_type.lower() == 'garch':
149
+ if distribution.lower() == 'normal':
150
+ n_params = 4 # mu, omega, alpha, beta
151
+ elif distribution.lower() == 'studentst':
152
+ n_params = 5 # mu, omega, alpha, beta, nu
153
+ else: # skewstudent
154
+ n_params = 6 # mu, omega, alpha, beta, nu, lambda (skew)
155
+ else: # egarch
156
+ if distribution.lower() == 'normal':
157
+ n_params = 5 # mu, omega, alpha, gamma, beta
158
+ elif distribution.lower() == 'studentst':
159
+ n_params = 6 # mu, omega, alpha, gamma, beta, nu
160
+ else: # skewstudent
161
+ n_params = 7 # mu, omega, alpha, gamma, beta, nu, lambda (skew)
162
+
163
+ parameters = np.zeros((n_fits, n_params))
106
164
  z_process = []
107
165
 
108
- logger.info(f"Fitting GARCH model with {n_fits} windows...")
166
+ logger.info(f"Fitting {model_type.upper()} model with {distribution} distribution using {n_fits} windows...")
109
167
 
110
168
  for i in range(n_fits):
111
169
  window = log_returns[end - i - 1:start - i - 1]
112
170
  data = window - np.mean(window)
113
171
 
114
- model = arch_model(data, vol='GARCH', p=1, q=1)
115
172
  try:
116
- GARCH_fit = model.fit(disp='off')
117
-
118
- mu, omega, alpha, beta = [
119
- GARCH_fit.params["mu"],
120
- GARCH_fit.params["omega"],
121
- GARCH_fit.params["alpha[1]"],
122
- GARCH_fit.params["beta[1]"],
123
- ]
124
- parameters[i, :] = [mu, omega, alpha, beta]
125
-
126
- # Calculate sigma2 and innovations for last observation
127
- if i == 0:
128
- sigma2_tm1 = omega / (1 - alpha - beta)
129
- else:
130
- e_tm1 = data.tolist()[-2]
131
- sigma2_tm1 = omega + alpha * e_tm1 ** 2 + beta * sigma2_tm1
132
-
133
- e_t = data.tolist()[-1]
134
- sigma2_t = omega + alpha * data.tolist()[-2] ** 2 + beta * sigma2_tm1
135
- z_t = e_t / np.sqrt(sigma2_t)
173
+ # Configure model based on type and distribution
174
+ if model_type.lower() == 'garch':
175
+ vol_model = GARCH(p=1, q=1)
176
+ else: # egarch
177
+ vol_model = EGARCH(p=1, o=1, q=1)
178
+
179
+ model = arch_model(data, vol=vol_model, dist=distribution.lower())
180
+ fit_result = model.fit(disp='off')
181
+
182
+ # Extract parameters based on model type and distribution
183
+ params_dict = fit_result.params.to_dict()
184
+
185
+ if model_type.lower() == 'garch':
186
+ mu = params_dict.get("mu", 0)
187
+ omega = params_dict.get("omega", 0)
188
+ alpha = params_dict.get("alpha[1]", 0)
189
+ beta = params_dict.get("beta[1]", 0)
190
+
191
+ if distribution.lower() == 'normal':
192
+ parameters[i, :] = [mu, omega, alpha, beta]
193
+ elif distribution.lower() == 'studentst':
194
+ nu = params_dict.get("nu", 0)
195
+ parameters[i, :] = [mu, omega, alpha, beta, nu]
196
+ else: # skewstudent
197
+ nu = params_dict.get("nu", 0)
198
+ lam = params_dict.get("lambda", 0)
199
+ parameters[i, :] = [mu, omega, alpha, beta, nu, lam]
200
+ else: # egarch
201
+ mu = params_dict.get("mu", 0)
202
+ omega = params_dict.get("omega", 0)
203
+ alpha = params_dict.get("alpha[1]", 0)
204
+ gamma = params_dict.get("gamma[1]", 0)
205
+ beta = params_dict.get("beta[1]", 0)
206
+
207
+ if distribution.lower() == 'normal':
208
+ parameters[i, :] = [mu, omega, alpha, gamma, beta]
209
+ elif distribution.lower() == 'studentst':
210
+ nu = params_dict.get("nu", 0)
211
+ parameters[i, :] = [mu, omega, alpha, gamma, beta, nu]
212
+ else: # skewstudent
213
+ nu = params_dict.get("nu", 0)
214
+ lam = params_dict.get("lambda", 0)
215
+ parameters[i, :] = [mu, omega, alpha, gamma, beta, nu, lam]
216
+
217
+ # Get last innovation
218
+ residuals = fit_result.resid
219
+ conditional_vol = fit_result.conditional_volatility
220
+ z_t = residuals[-1] / conditional_vol[-1]
136
221
  z_process.append(z_t)
137
222
 
138
223
  except Exception as e:
139
- logger.warning(f"GARCH fit failed for window {i}: {str(e)}")
224
+ logger.warning(f"Model fit failed for window {i}: {str(e)}")
140
225
 
141
226
  # Clean up any failed fits
142
227
  if len(z_process) < n_fits / 2:
143
- raise VolyError("Too many GARCH fits failed. Check your data.")
228
+ raise VolyError("Too many model fits failed. Check your data.")
144
229
 
145
230
  avg_params = np.mean(parameters, axis=0)
146
231
  std_params = np.std(parameters, axis=0)
@@ -149,17 +234,38 @@ def fit_garch_model(log_returns, n_fits=400, window_length=365):
149
234
  'parameters': parameters,
150
235
  'avg_params': avg_params,
151
236
  'std_params': std_params,
152
- 'z_process': np.array(z_process)
237
+ 'z_process': np.array(z_process),
238
+ 'model_type': model_type,
239
+ 'distribution': distribution,
240
+ 'param_names': get_param_names(model_type, distribution)
153
241
  }
154
242
 
155
243
 
244
+ def get_param_names(model_type, distribution):
245
+ """Get parameter names based on model type and distribution."""
246
+ if model_type.lower() == 'garch':
247
+ if distribution.lower() == 'normal':
248
+ return ['mu', 'omega', 'alpha', 'beta']
249
+ elif distribution.lower() == 'studentst':
250
+ return ['mu', 'omega', 'alpha', 'beta', 'nu']
251
+ else: # skewstudent
252
+ return ['mu', 'omega', 'alpha', 'beta', 'nu', 'lambda']
253
+ else: # egarch
254
+ if distribution.lower() == 'normal':
255
+ return ['mu', 'omega', 'alpha', 'gamma', 'beta']
256
+ elif distribution.lower() == 'studentst':
257
+ return ['mu', 'omega', 'alpha', 'gamma', 'beta', 'nu']
258
+ else: # skewstudent
259
+ return ['mu', 'omega', 'alpha', 'gamma', 'beta', 'nu', 'lambda']
260
+
261
+
156
262
  @catch_exception
157
- def simulate_garch_paths(garch_model, horizon, simulations=5000, variate_parameters=True):
263
+ def simulate_volatility_paths(vol_model, horizon, simulations=5000, variate_parameters=True):
158
264
  """
159
- Simulate future paths using a fitted GARCH model.
265
+ Simulate future paths using a fitted volatility model.
160
266
 
161
267
  Args:
162
- garch_model: Dict with GARCH model parameters
268
+ vol_model: Dict with volatility model parameters
163
269
  horizon: Number of steps to simulate
164
270
  simulations: Number of paths to simulate
165
271
  variate_parameters: Whether to vary parameters between simulations
@@ -167,21 +273,34 @@ def simulate_garch_paths(garch_model, horizon, simulations=5000, variate_paramet
167
273
  Returns:
168
274
  Array of simulated log returns
169
275
  """
170
- parameters = garch_model['parameters']
171
- z_process = garch_model['z_process']
276
+ parameters = vol_model['parameters']
277
+ z_process = vol_model['z_process']
278
+ model_type = vol_model['model_type']
279
+ distribution = vol_model['distribution']
280
+ param_names = vol_model['param_names']
172
281
 
173
282
  # Use mean parameters as starting point
174
- pars = garch_model['avg_params'].copy() # [mu, omega, alpha, beta]
175
- bounds = garch_model['std_params'].copy()
176
-
177
- mu, omega, alpha, beta = pars
178
- logger.info(f"GARCH parameters: mu={mu:.6f}, omega={omega:.6f}, alpha={alpha:.6f}, beta={beta:.6f}")
283
+ pars = vol_model['avg_params'].copy()
284
+ bounds = vol_model['std_params'].copy()
285
+
286
+ # Log parameters
287
+ param_str = ", ".join([f"{name}={par:.6f}" for name, par in zip(param_names, pars)])
288
+ logger.info(f"{model_type.upper()} parameters: {param_str}")
289
+
290
+ # Create KDE for innovations based on distribution
291
+ if distribution.lower() == 'normal':
292
+ # Use standard normal for normal distribution
293
+ def sample_innovation(size=1):
294
+ return np.random.normal(0, 1, size=size)
295
+ else:
296
+ # Use KDE for non-normal distributions to capture empirical distribution
297
+ kde = stats.gaussian_kde(z_process, bw_method='silverman') # original code doesn't include bw_method
298
+ z_range = np.linspace(min(z_process), max(z_process), 1000)
299
+ z_prob = kde(z_range)
300
+ z_prob = z_prob / np.sum(z_prob)
179
301
 
180
- # Create KDE for innovations
181
- kde = stats.gaussian_kde(z_process)
182
- z_range = np.linspace(min(z_process), max(z_process), 1000)
183
- z_prob = kde(z_range)
184
- z_prob = z_prob / np.sum(z_prob)
302
+ def sample_innovation(size=1):
303
+ return np.random.choice(z_range, size=size, p=z_prob)
185
304
 
186
305
  # Simulate paths
187
306
  simulated_returns = np.zeros(simulations)
@@ -196,24 +315,63 @@ def simulate_garch_paths(garch_model, horizon, simulations=5000, variate_paramet
196
315
  for j, (par, bound) in enumerate(zip(pars, bounds)):
197
316
  var = bound ** 2 / len(parameters)
198
317
  new_par = np.random.normal(par, var)
199
- if j >= 1 and new_par <= 0: # Ensure omega, alpha, beta are positive
318
+ # Ensure omega is positive, betas are between 0 and 1, etc.
319
+ if j >= 1 and new_par <= 0:
200
320
  new_par = 0.01
201
321
  new_pars.append(new_par)
202
- mu, omega, alpha, beta = new_pars
322
+ sim_pars = new_pars
323
+ else:
324
+ sim_pars = pars.copy()
325
+
326
+ # Initialize variables based on model type
327
+ if model_type.lower() == 'garch':
328
+ if distribution.lower() == 'normal':
329
+ mu, omega, alpha, beta = sim_pars
330
+ sigma2 = omega / (1 - alpha - beta)
331
+ elif distribution.lower() == 'studentst':
332
+ mu, omega, alpha, beta, nu = sim_pars
333
+ sigma2 = omega / (1 - alpha - beta)
334
+ else: # skewstudent
335
+ mu, omega, alpha, beta, nu, lam = sim_pars
336
+ sigma2 = omega / (1 - alpha - beta)
337
+ else: # egarch
338
+ if distribution.lower() == 'normal':
339
+ mu, omega, alpha, gamma, beta = sim_pars
340
+ log_sigma2 = omega / (1 - beta)
341
+ sigma2 = np.exp(log_sigma2)
342
+ elif distribution.lower() == 'studentst':
343
+ mu, omega, alpha, gamma, beta, nu = sim_pars
344
+ log_sigma2 = omega / (1 - beta)
345
+ sigma2 = np.exp(log_sigma2)
346
+ else: # skewstudent
347
+ mu, omega, alpha, gamma, beta, nu, lam = sim_pars
348
+ log_sigma2 = omega / (1 - beta)
349
+ sigma2 = np.exp(log_sigma2)
203
350
 
204
- # Initial values
205
- sigma2 = omega / (1 - alpha - beta)
206
351
  returns_sum = 0
207
352
 
208
353
  # Simulate path
209
354
  for _ in range(horizon):
210
- # Sample from innovation distribution
211
- z = np.random.choice(z_range, p=z_prob)
212
-
213
- # Calculate return and update volatility
214
- e = z * np.sqrt(sigma2)
215
- returns_sum += e + mu
216
- sigma2 = omega + alpha * e ** 2 + beta * sigma2
355
+ # Sample innovation
356
+ z = sample_innovation()
357
+
358
+ # Update volatility and returns based on model type
359
+ if model_type.lower() == 'garch':
360
+ # Calculate return
361
+ e = z * np.sqrt(sigma2)
362
+ returns_sum += e + mu
363
+
364
+ # Update GARCH volatility
365
+ sigma2 = omega + alpha * e ** 2 + beta * sigma2
366
+ else: # egarch
367
+ # Calculate return
368
+ e = z * np.sqrt(sigma2)
369
+ returns_sum += e + mu
370
+
371
+ # Update EGARCH volatility
372
+ abs_z = abs(z)
373
+ log_sigma2 = omega + beta * log_sigma2 + alpha * (abs_z - np.sqrt(2 / np.pi)) + gamma * z
374
+ sigma2 = np.exp(log_sigma2)
217
375
 
218
376
  simulated_returns[i] = returns_sum
219
377
 
@@ -224,7 +382,9 @@ def get_hd_surface(model_results: pd.DataFrame,
224
382
  df_hist: pd.DataFrame,
225
383
  domain_params: Tuple[float, float, int] = (-1.5, 1.5, 1000),
226
384
  return_domain: str = 'log_moneyness',
227
- method: str = 'garch',
385
+ method: str = 'arch_returns',
386
+ model_type: str = 'garch',
387
+ distribution: str = 'normal',
228
388
  **kwargs) -> Dict[str, Any]:
229
389
  """
230
390
  Generate historical density surface from historical price data.
@@ -234,13 +394,15 @@ def get_hd_surface(model_results: pd.DataFrame,
234
394
  df_hist: DataFrame with historical price data
235
395
  domain_params: Tuple of (min, max, num_points) for x-domain
236
396
  return_domain: Domain for x-axis values ('log_moneyness', 'moneyness', 'returns', 'strikes')
237
- method: Method to use for HD estimation ('hist_returns' or 'garch')
397
+ method: Method to use for HD estimation ('hist_returns' or 'arch_returns')
398
+ model_type: Type of volatility model to use ('garch' or 'egarch')
399
+ distribution: Distribution to use ('normal', 'studentst', or 'skewstudent')
238
400
  **kwargs: Additional parameters for specific methods:
239
- For 'garch' method:
401
+ For volatility models ('garch'/'egarch' method):
240
402
  n_fits: Number of sliding windows (default: 400)
241
403
  simulations: Number of Monte Carlo simulations (default: 5000)
242
- window_length: Length of sliding windows (default: 365)
243
- variate_parameters: Whether to vary GARCH parameters (default: True)
404
+ window_length: Length of sliding windows as string (default: '30d')
405
+ variate_parameters: Whether to vary parameters (default: True)
244
406
  bandwidth: KDE bandwidth (default: 'silverman')
245
407
  For 'hist_returns' method:
246
408
  bandwidth: KDE bandwidth (default: 'silverman')
@@ -248,7 +410,6 @@ def get_hd_surface(model_results: pd.DataFrame,
248
410
  Returns:
249
411
  Dictionary containing pdf_surface, cdf_surface, x_surface, and moments
250
412
  """
251
-
252
413
  # Check if required columns are present
253
414
  required_columns = ['s', 't', 'r']
254
415
  missing_columns = [col for col in required_columns if col not in model_results.columns]
@@ -263,28 +424,46 @@ def get_hd_surface(model_results: pd.DataFrame,
263
424
  else:
264
425
  raise VolyError("Cannot determine granularity from df_hist.")
265
426
 
427
+ # Validate model_type and distribution
428
+ valid_model_types = ['garch', 'egarch']
429
+ valid_distributions = ['normal', 'studentst', 'skewstudent']
430
+
431
+ if model_type.lower() not in valid_model_types:
432
+ raise VolyError(f"Invalid model_type: {model_type}. Must be one of {valid_model_types}")
433
+
434
+ if distribution.lower() not in valid_distributions:
435
+ raise VolyError(f"Invalid distribution: {distribution}. Must be one of {valid_distributions}")
436
+
266
437
  # Get method-specific parameters
267
- if method == 'garch':
438
+ if method == 'arch_returns':
268
439
  n_fits = kwargs.get('n_fits', 400)
269
440
  simulations = kwargs.get('simulations', 5000)
270
- window_length = kwargs.get('window_length', 365)
441
+ window_length = kwargs.get('window_length', '30d')
271
442
  variate_parameters = kwargs.get('variate_parameters', True)
272
443
  bandwidth = kwargs.get('bandwidth', 'silverman')
273
- logger.info(f"Using GARCH method with {n_fits} fits, {simulations} simulations")
444
+ logger.info(
445
+ f"Using {model_type.upper()} method with {distribution} distribution, {n_fits} fits, {simulations} simulations")
274
446
  elif method == 'hist_returns':
275
447
  bandwidth = kwargs.get('bandwidth', 'silverman')
276
448
  logger.info(f"Using returns-based KDE method with bandwidth {bandwidth}")
277
449
  else:
278
- raise VolyError(f"Unknown method: {method}. Use 'hist_returns' or 'garch'.")
450
+ raise VolyError(f"Unknown method: {method}. Use 'hist_returns', 'arch_returns'.")
279
451
 
280
452
  # Calculate log returns from price history
281
453
  log_returns = np.log(df_hist['close'] / df_hist['close'].shift(1)) * 100
282
454
  log_returns = log_returns.dropna().values
283
455
 
284
- # Fit GARCH model once if using garch method
285
- garch_model = None
286
- if method == 'garch':
287
- garch_model = fit_garch_model(log_returns, n_fits, window_length)
456
+ # Fit volatility model once if using garch/egarch method
457
+ vol_model = None
458
+ if method == 'arch_returns':
459
+ vol_model = fit_volatility_model(
460
+ log_returns,
461
+ df_hist,
462
+ model_type=model_type,
463
+ distribution=distribution,
464
+ window_length=window_length,
465
+ n_fits=n_fits
466
+ )
288
467
 
289
468
  pdf_surface = {}
290
469
  cdf_surface = {}
@@ -311,7 +490,7 @@ def get_hd_surface(model_results: pd.DataFrame,
311
490
  logger.info(f"Processing HD for maturity {i} (t={t:.4f} years, {tau_days_float:.2f} days)")
312
491
 
313
492
  if method == 'hist_returns':
314
- # Standard returns-based method (your existing implementation)
493
+ # Standard returns-based method
315
494
  # Filter historical data for this maturity's lookback period
316
495
  start_date = pd.Timestamp.now() - pd.Timedelta(days=int(t * 365.25))
317
496
  maturity_hist = df_hist[df_hist.index >= start_date].copy()
@@ -341,16 +520,16 @@ def get_hd_surface(model_results: pd.DataFrame,
341
520
  f = stats.gaussian_kde(adj_returns, bw_method=bandwidth)
342
521
  pdf_values = f(LM)
343
522
 
344
- elif method == 'garch':
345
- # GARCH-based method
346
- if garch_model is None:
347
- logger.warning(f"GARCH model fitting failed, skipping maturity {i}")
523
+ elif method == 'arch_returns':
524
+ # Volatility model-based method
525
+ if vol_model is None:
526
+ logger.warning(f"Volatility model fitting failed, skipping maturity {i}")
348
527
  continue
349
528
 
350
- # Simulate paths with the GARCH model
529
+ # Simulate paths with the volatility model
351
530
  horizon = max(1, int(tau_days_float))
352
- simulated_returns, simulated_mu = simulate_garch_paths(
353
- garch_model,
531
+ simulated_returns, simulated_mu = simulate_volatility_paths(
532
+ vol_model,
354
533
  horizon,
355
534
  simulations,
356
535
  variate_parameters
@@ -377,15 +556,16 @@ def get_hd_surface(model_results: pd.DataFrame,
377
556
  kde = stats.gaussian_kde(simulated_moneyness, bw_method=bandwidth)
378
557
  pdf_values = kde(M)
379
558
 
380
- # Include GARCH params in moments
381
- avg_params = garch_model['avg_params']
382
- model_params = {
383
- 'mu': avg_params[0],
384
- 'omega': avg_params[1],
385
- 'alpha': avg_params[2],
386
- 'beta': avg_params[3],
387
- 'persistence': avg_params[2] + avg_params[3]
388
- }
559
+ # Include volatility model params in moments
560
+ avg_params = vol_model['avg_params']
561
+ param_names = vol_model['param_names']
562
+ model_params = {name: value for name, value in zip(param_names, avg_params)}
563
+ model_params['model_type'] = model_type
564
+ model_params['distribution'] = distribution
565
+
566
+ # Add persistence for GARCH-type models
567
+ if model_type.lower() == 'garch':
568
+ model_params['persistence'] = model_params.get('alpha', 0) + model_params.get('beta', 0)
389
569
  else:
390
570
  continue # Skip this maturity if method is invalid
391
571
 
@@ -406,7 +586,7 @@ def get_hd_surface(model_results: pd.DataFrame,
406
586
  pdf_m = pdf_lm / M
407
587
  pdf_k = pdf_lm / K
408
588
  pdf_r = pdf_lm / (1 + R)
409
- else: # 'garch'
589
+ else: # volatility models
410
590
  pdf_m = pdf_values
411
591
  pdf_lm = pdf_m * M
412
592
  pdf_k = pdf_lm / K
@@ -420,19 +600,19 @@ def get_hd_surface(model_results: pd.DataFrame,
420
600
  if return_domain == 'log_moneyness':
421
601
  x = LM
422
602
  pdf = pdf_lm
423
- moments = get_all_moments(x, pdf, model_params if method == 'garch' else None)
603
+ moments = get_all_moments(x, pdf, model_params if method in ['garch', 'egarch'] else None)
424
604
  elif return_domain == 'moneyness':
425
605
  x = M
426
606
  pdf = pdf_m
427
- moments = get_all_moments(x, pdf, model_params if method == 'garch' else None)
607
+ moments = get_all_moments(x, pdf, model_params if method in ['garch', 'egarch'] else None)
428
608
  elif return_domain == 'returns':
429
609
  x = R
430
610
  pdf = pdf_r
431
- moments = get_all_moments(x, pdf, model_params if method == 'garch' else None)
611
+ moments = get_all_moments(x, pdf, model_params if method in ['garch', 'egarch'] else None)
432
612
  elif return_domain == 'strikes':
433
613
  x = K
434
614
  pdf = pdf_k
435
- moments = get_all_moments(x, pdf, model_params if method == 'garch' else None)
615
+ moments = get_all_moments(x, pdf, model_params if method in ['garch', 'egarch'] else None)
436
616
  else:
437
617
  raise VolyError(f"Unsupported return_domain: {return_domain}")
438
618
 
@@ -445,7 +625,8 @@ def get_hd_surface(model_results: pd.DataFrame,
445
625
  # Create DataFrame with moments
446
626
  moments = pd.DataFrame(all_moments).T
447
627
 
448
- logger.info(f"Historical density calculation complete using {method} method")
628
+ logger.info(
629
+ f"Historical density calculation complete using {method} method with {model_type} model and {distribution} distribution")
449
630
 
450
631
  return {
451
632
  'pdf_surface': pdf_surface,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: voly
3
- Version: 0.0.142
3
+ Version: 0.0.143
4
4
  Summary: Options & volatility research package
5
5
  Author-email: Manu de Cara <manu.de.cara@gmail.com>
6
6
  License: MIT
@@ -1,5 +1,5 @@
1
1
  voly/__init__.py,sha256=8xyDk7rFCn_MOD5hxuv5cxxKZvBVRiSIM7TgaMPpwpw,211
2
- voly/client.py,sha256=Lj3YY6P1VBQD5C_psPh2pSxCMVvFjRBybrMrs4e9qXI,14249
2
+ voly/client.py,sha256=UzgvwLf5SsNkvv3-Zdzej5nJ4pxoS-UCP8LjFMh3LFo,13229
3
3
  voly/exceptions.py,sha256=PBsbn1vNMvKcCJwwJ4lBO6glD85jo1h2qiEmD7ArAjs,92
4
4
  voly/formulas.py,sha256=G_soRiPwQlHy6milOAj6TdmBWr-fNZpMvm0joXAMZ90,10767
5
5
  voly/models.py,sha256=o-pHujGfr5Gn8ItckMzLI4Q8yaX9FQaV8UjCxv2zgTY,3364
@@ -7,13 +7,13 @@ voly/core/__init__.py,sha256=bu6fS2I1Pj9fPPnl-zY3L7NqrZSY5Zy6NY2uMUvdhKs,183
7
7
  voly/core/charts.py,sha256=E21OZB5lTY4YL2flgaFJ6s5g3_ExtAQT2zryZZxLPyM,12735
8
8
  voly/core/data.py,sha256=pDeuYhP0GX4RbtlqByvsE3rfHcIkix0BU5MLW8sKIeI,8935
9
9
  voly/core/fit.py,sha256=Tb9eeG7e_2dQTcqt6aqEwFrZdy6jR9rSNqe6tzOdVhQ,9245
10
- voly/core/hd.py,sha256=K2X0isAchumuRPcc5RSEkMOR5sOeb_I3twwqAZYYL1A,16809
10
+ voly/core/hd.py,sha256=7b5kIIOz_g8uAM_Bsqx8treo1wA3mx1HiEsTNfwpw1Q,25236
11
11
  voly/core/interpolate.py,sha256=JkK172-FXyhesW3hY4pEeuJWG3Bugq7QZXbeKoRpLuo,5305
12
12
  voly/core/rnd.py,sha256=masjK4WrVb925gPGboD8iDAaEN7FY7S4OHYthHPtA3o,13613
13
13
  voly/utils/__init__.py,sha256=E05mWatyC-PDOsCxQV1p5Xi1IgpOomxrNURyCx_gB-w,200
14
14
  voly/utils/logger.py,sha256=4-_2bVJmq17Q0d7Rd2mPg1AeR8gxv6EPvcmBDMFWcSM,1744
15
- voly-0.0.142.dist-info/licenses/LICENSE,sha256=wcHIVbE12jfcBOai_wqBKY6xvNQU5E909xL1zZNq_2Q,1065
16
- voly-0.0.142.dist-info/METADATA,sha256=BLz0X2tkaiZqfL5cYXcihLfbrZra3AQRrRXPXrNkS5w,4115
17
- voly-0.0.142.dist-info/WHEEL,sha256=DK49LOLCYiurdXXOXwGJm6U4DkHkg4lcxjhqwRa0CP4,91
18
- voly-0.0.142.dist-info/top_level.txt,sha256=ZfLw2sSxF-LrKAkgGjOmeTcw6_gD-30zvtdEY5W4B7c,5
19
- voly-0.0.142.dist-info/RECORD,,
15
+ voly-0.0.143.dist-info/licenses/LICENSE,sha256=wcHIVbE12jfcBOai_wqBKY6xvNQU5E909xL1zZNq_2Q,1065
16
+ voly-0.0.143.dist-info/METADATA,sha256=citPOTKDdkxXG6cgQAxHWRH9hz09lyve8l8BASS-wVU,4115
17
+ voly-0.0.143.dist-info/WHEEL,sha256=DK49LOLCYiurdXXOXwGJm6U4DkHkg4lcxjhqwRa0CP4,91
18
+ voly-0.0.143.dist-info/top_level.txt,sha256=ZfLw2sSxF-LrKAkgGjOmeTcw6_gD-30zvtdEY5W4B7c,5
19
+ voly-0.0.143.dist-info/RECORD,,
File without changes