voly 0.0.151__tar.gz → 0.0.153__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {voly-0.0.151/src/voly.egg-info → voly-0.0.153}/PKG-INFO +1 -1
  2. {voly-0.0.151 → voly-0.0.153}/pyproject.toml +2 -2
  3. {voly-0.0.151 → voly-0.0.153}/src/voly/client.py +42 -27
  4. voly-0.0.153/src/voly/core/hd.py +249 -0
  5. {voly-0.0.151 → voly-0.0.153}/src/voly/core/rnd.py +34 -157
  6. voly-0.0.153/src/voly/utils/density.py +155 -0
  7. {voly-0.0.151 → voly-0.0.153/src/voly.egg-info}/PKG-INFO +1 -1
  8. {voly-0.0.151 → voly-0.0.153}/src/voly.egg-info/SOURCES.txt +1 -0
  9. voly-0.0.151/src/voly/core/hd.py +0 -887
  10. {voly-0.0.151 → voly-0.0.153}/LICENSE +0 -0
  11. {voly-0.0.151 → voly-0.0.153}/README.md +0 -0
  12. {voly-0.0.151 → voly-0.0.153}/setup.cfg +0 -0
  13. {voly-0.0.151 → voly-0.0.153}/setup.py +0 -0
  14. {voly-0.0.151 → voly-0.0.153}/src/voly/__init__.py +0 -0
  15. {voly-0.0.151 → voly-0.0.153}/src/voly/core/__init__.py +0 -0
  16. {voly-0.0.151 → voly-0.0.153}/src/voly/core/charts.py +0 -0
  17. {voly-0.0.151 → voly-0.0.153}/src/voly/core/data.py +0 -0
  18. {voly-0.0.151 → voly-0.0.153}/src/voly/core/fit.py +0 -0
  19. {voly-0.0.151 → voly-0.0.153}/src/voly/core/interpolate.py +0 -0
  20. {voly-0.0.151 → voly-0.0.153}/src/voly/exceptions.py +0 -0
  21. {voly-0.0.151 → voly-0.0.153}/src/voly/formulas.py +0 -0
  22. {voly-0.0.151 → voly-0.0.153}/src/voly/models.py +0 -0
  23. {voly-0.0.151 → voly-0.0.153}/src/voly/utils/__init__.py +0 -0
  24. {voly-0.0.151 → voly-0.0.153}/src/voly/utils/logger.py +0 -0
  25. {voly-0.0.151 → voly-0.0.153}/src/voly.egg-info/dependency_links.txt +0 -0
  26. {voly-0.0.151 → voly-0.0.153}/src/voly.egg-info/requires.txt +0 -0
  27. {voly-0.0.151 → voly-0.0.153}/src/voly.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: voly
3
- Version: 0.0.151
3
+ Version: 0.0.153
4
4
  Summary: Options & volatility research package
5
5
  Author-email: Manu de Cara <manu.de.cara@gmail.com>
6
6
  License: MIT
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "voly"
7
- version = "0.0.151"
7
+ version = "0.0.153"
8
8
  description = "Options & volatility research package"
9
9
  readme = "README.md"
10
10
  authors = [
@@ -60,7 +60,7 @@ line_length = 100
60
60
  multi_line_output = 3
61
61
 
62
62
  [tool.mypy]
63
- python_version = "0.0.151"
63
+ python_version = "0.0.153"
64
64
  warn_return_any = true
65
65
  warn_unused_configs = true
66
66
  disallow_untyped_defs = true
@@ -23,8 +23,7 @@ from voly.core.rnd import get_rnd_surface
23
23
  from voly.core.hd import get_historical_data, get_hd_surface
24
24
  from voly.core.interpolate import interpolate_model
25
25
  from voly.core.charts import (
26
- plot_all_smiles, plot_raw_parameters, plot_jw_parameters, plot_fit_performance, plot_3d_surface,
27
- plot_fit_performance
26
+ plot_all_smiles, plot_raw_parameters, plot_jw_parameters, plot_fit_performance, plot_3d_surface
28
27
  )
29
28
 
30
29
 
@@ -156,10 +155,6 @@ class VolyClient:
156
155
  option_type: str = 'call') -> float:
157
156
  return iv(option_price, s, K, r, t, option_type)
158
157
 
159
- @staticmethod
160
- def pdf_to_calls(pdf_K: np.ndarray, s: float, K: np.ndarray, r: float, t: float):
161
- return pdf_to_calls(pdf_K, s, K, r, t)
162
-
163
158
  # -------------------------------------------------------------------------
164
159
  # Model Fitting
165
160
  # -------------------------------------------------------------------------
@@ -307,8 +302,19 @@ class VolyClient:
307
302
  def get_rnd_surface(model_results: pd.DataFrame,
308
303
  domain_params: Tuple[float, float, int] = (-1.5, 1.5, 1000),
309
304
  return_domain: str = 'log_moneyness',
310
- method: str = 'rookley') -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray], Dict[str, np.ndarray], pd.DataFrame]:
305
+ method: str = 'rookley') -> Dict[str, Any]:
306
+ """
307
+ Generate risk-neutral density surface from volatility surface parameters.
311
308
 
309
+ Parameters:
310
+ - model_results: DataFrame from fit_model() or interpolate_model()
311
+ - domain_params: Tuple of (min_log_moneyness, max_log_moneyness, num_points)
312
+ - return_domain: Domain for results ('log_moneyness', 'moneyness', 'returns', 'strikes')
313
+ - method: Method for RND estimation ('rookley' or 'breeden')
314
+
315
+ Returns:
316
+ - Dictionary with pdf_surface, cdf_surface, x_surface, and moments
317
+ """
312
318
  logger.info("Calculating RND surface")
313
319
 
314
320
  return get_rnd_surface(
@@ -326,41 +332,50 @@ class VolyClient:
326
332
  def get_historical_data(currency: str = 'BTC',
327
333
  lookback_days: str = '90d',
328
334
  granularity: str = '15m',
329
- exchange_name: str = 'binance'):
335
+ exchange_name: str = 'binance') -> pd.DataFrame:
336
+ """
337
+ Fetch historical OHLCV data for a cryptocurrency.
330
338
 
331
- # Generate the surface
332
- df_hist = get_historical_data(
339
+ Parameters:
340
+ - currency: The cryptocurrency to fetch data for (e.g., 'BTC', 'ETH')
341
+ - lookback_days: The lookback period in days, formatted as '90d', '30d', etc.
342
+ - granularity: The time interval for data points (e.g., '15m', '1h', '1d')
343
+ - exchange_name: The exchange to fetch data from (default: 'binance')
344
+
345
+ Returns:
346
+ - Historical price data with OHLCV columns and datetime index
347
+ """
348
+ logger.info(f"Fetching historical {currency} data from {exchange_name}...")
349
+
350
+ return get_historical_data(
333
351
  currency=currency,
334
352
  lookback_days=lookback_days,
335
353
  granularity=granularity,
336
354
  exchange_name=exchange_name
337
355
  )
338
356
 
339
- return df_hist
340
-
341
357
  @staticmethod
342
358
  def get_hd_surface(model_results: pd.DataFrame,
343
359
  df_hist: pd.DataFrame,
344
360
  domain_params: Tuple[float, float, int] = (-1.5, 1.5, 1000),
345
- return_domain: str = 'log_moneyness',
346
- method: str = 'garch',
347
- distribution: str = 'normal',
348
- window_length: str = '30d',
349
- n_fits: int = 400,
350
- simulations: int = 5000,
351
- bandwidth: str = 'silverman') -> Dict[str, Any]:
361
+ return_domain: str = 'log_moneyness') -> Dict[str, Any]:
362
+ """
363
+ Generate historical density surface using normal distributions.
352
364
 
353
- logger.info(f"Calculating historical density surface using {method} method")
365
+ Parameters:
366
+ - model_results: DataFrame with model parameters and maturities
367
+ - df_hist: DataFrame with historical price data
368
+ - domain_params: Tuple of (min_log_moneyness, max_log_moneyness, num_points)
369
+ - return_domain: Domain for results ('log_moneyness', 'moneyness', 'returns', 'strikes')
370
+
371
+ Returns:
372
+ - Dictionary with pdf_surface, cdf_surface, x_surface, and moments
373
+ """
374
+ logger.info("Calculating HD surface")
354
375
 
355
376
  return get_hd_surface(
356
377
  model_results=model_results,
357
378
  df_hist=df_hist,
358
379
  domain_params=domain_params,
359
- return_domain=return_domain,
360
- method=method,
361
- distribution=distribution,
362
- window_length=window_length,
363
- n_fits=n_fits,
364
- simulations=simulations,
365
- bandwidth=bandwidth
380
+ return_domain=return_domain
366
381
  )
@@ -0,0 +1,249 @@
1
+ """
2
+ This module handles calculating historical densities from time series of prices
3
+ and converting them to probability distributions.
4
+ """
5
+
6
+ import ccxt
7
+ import numpy as np
8
+ import pandas as pd
9
+ import datetime as dt
10
+ from typing import Dict, Tuple, Any, Optional, List
11
+ from scipy import stats
12
+ from voly.utils.logger import logger, catch_exception
13
+ from voly.exceptions import VolyError
14
+ from voly.core.rnd import get_all_moments
15
+ from voly.utils.density import prepare_domains, normalize_density, transform_to_domains, select_domain_results
16
+
17
+
18
+ @catch_exception
19
+ def get_historical_data(currency: str,
20
+ lookback_days: str,
21
+ granularity: str,
22
+ exchange_name: str) -> pd.DataFrame:
23
+ """
24
+ Fetch historical OHLCV data for a cryptocurrency.
25
+
26
+ Parameters:
27
+ -----------
28
+ currency : str
29
+ The cryptocurrency to fetch data for (e.g., 'BTC', 'ETH')
30
+ lookback_days : str
31
+ The lookback period in days, formatted as '90d', '30d', etc.
32
+ granularity : str
33
+ The time interval for data points (e.g., '15m', '1h', '1d')
34
+ exchange_name : str
35
+ The exchange to fetch data from (default: 'binance')
36
+
37
+ Returns:
38
+ --------
39
+ pd.DataFrame
40
+ Historical price data with OHLCV columns and datetime index
41
+ """
42
+ # Validate inputs
43
+ if not lookback_days.endswith('d'):
44
+ raise VolyError("lookback_days should be in format '90d', '30d', etc.")
45
+
46
+ try:
47
+ # Get the exchange class from ccxt
48
+ exchange_class = getattr(ccxt, exchange_name.lower())
49
+ exchange = exchange_class({'enableRateLimit': True})
50
+ except (AttributeError, TypeError):
51
+ raise VolyError(f"Exchange '{exchange_name}' not found in ccxt. Please check the exchange name.")
52
+
53
+ # Form the trading pair symbol
54
+ symbol = f"{currency}/USDT"
55
+
56
+ # Convert lookback_days to timestamp
57
+ days_ago = int(lookback_days[:-1])
58
+ date_start = (dt.datetime.now() - dt.timedelta(days=days_ago)).strftime('%Y-%m-%d %H:%M:%S')
59
+ from_ts = exchange.parse8601(date_start)
60
+
61
+ ohlcv_list = []
62
+ ohlcv = exchange.fetch_ohlcv(symbol, granularity, since=from_ts, limit=1000)
63
+ ohlcv_list.append(ohlcv)
64
+
65
+ while True:
66
+ from_ts = ohlcv[-1][0]
67
+ new_ohlcv = exchange.fetch_ohlcv(symbol, granularity, since=from_ts, limit=1000)
68
+ if len(new_ohlcv) <= 1: # No new data or just one overlapping candle
69
+ break
70
+ ohlcv.extend(new_ohlcv[1:]) # Skip the first one to avoid duplicates
71
+ if len(new_ohlcv) != 1000:
72
+ break
73
+
74
+ # Convert to DataFrame
75
+ df_hist = pd.DataFrame(ohlcv, columns=['date', 'open', 'high', 'low', 'close', 'volume'])
76
+ df_hist['date'] = pd.to_datetime(df_hist['date'], unit='ms')
77
+ df_hist.set_index('date', inplace=True)
78
+ df_hist = df_hist.sort_index(ascending=True)
79
+ df_hist = df_hist[~df_hist.index.duplicated(keep='last')].sort_index()
80
+
81
+ logger.info(f"Data fetched successfully: {len(df_hist)} rows from {df_hist.index[0]} to {df_hist.index[-1]}")
82
+
83
+ return df_hist
84
+
85
+
86
+ @catch_exception
87
+ def calculate_normal_hd(df_hist: pd.DataFrame,
88
+ t: float,
89
+ r: float,
90
+ n_periods: int,
91
+ domains: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
92
+ """
93
+ Calculate historical density using a normal distribution based on historical returns.
94
+
95
+ Parameters:
96
+ -----------
97
+ df_hist : pd.DataFrame
98
+ Historical price data
99
+ t : float
100
+ Time to maturity in years
101
+ r : float
102
+ Risk-free rate
103
+ n_periods : int
104
+ Number of periods to scale returns
105
+ domains : Dict[str, np.ndarray]
106
+ Domain arrays
107
+
108
+ Returns:
109
+ --------
110
+ Dict[str, np.ndarray]
111
+ Dictionary of PDFs in different domains
112
+ """
113
+ # Extract log-moneyness domain
114
+ LM = domains['log_moneyness']
115
+ dx = domains['dx']
116
+
117
+ # Calculate log returns
118
+ returns = np.log(df_hist['close'] / df_hist['close'].shift(1)).dropna().values
119
+
120
+ # Filter historical data based on n_periods
121
+ if len(returns) < n_periods:
122
+ logger.warning(f"Not enough historical data, using all {len(returns)} points available")
123
+ dte_returns = returns
124
+ else:
125
+ dte_returns = returns[-n_periods:]
126
+
127
+ # Calculate scaled parameters for normal distribution
128
+ mu_scaled = np.mean(dte_returns) * np.sqrt(n_periods)
129
+ sigma_scaled = np.std(dte_returns) * np.sqrt(n_periods)
130
+
131
+ # Apply Girsanov adjustment to shift to risk-neutral measure
132
+ expected_risk_neutral_mean = (r - 0.5 * sigma_scaled ** 2) * np.sqrt(t)
133
+ adjustment = mu_scaled - expected_risk_neutral_mean
134
+ mu_rn = mu_scaled - adjustment
135
+
136
+ # Calculate PDF using normal distribution in log-moneyness domain
137
+ pdf_lm = stats.norm.pdf(LM, loc=mu_rn, scale=sigma_scaled)
138
+
139
+ # Normalize the PDF
140
+ pdf_lm = normalize_density(pdf_lm, dx)
141
+
142
+ # Transform to other domains
143
+ pdfs = transform_to_domains(pdf_lm, domains)
144
+
145
+ return pdfs
146
+
147
+
148
+ @catch_exception
149
+ def get_hd_surface(model_results: pd.DataFrame,
150
+ df_hist: pd.DataFrame,
151
+ domain_params: Tuple[float, float, int] = (-1.5, 1.5, 1000),
152
+ return_domain: str = 'log_moneyness') -> Dict[str, Any]:
153
+ """
154
+ Generate historical density surface using normal distributions.
155
+
156
+ Parameters:
157
+ -----------
158
+ model_results : pd.DataFrame
159
+ DataFrame with model parameters and maturities
160
+ df_hist : pd.DataFrame
161
+ DataFrame with historical price data
162
+ domain_params : Tuple[float, float, int]
163
+ (min_log_moneyness, max_log_moneyness, num_points)
164
+ return_domain : str
165
+ Domain for results ('log_moneyness', 'moneyness', 'returns', 'strikes')
166
+
167
+ Returns:
168
+ --------
169
+ Dict[str, Any]
170
+ Dictionary with pdf_surface, cdf_surface, x_surface, and moments
171
+ """
172
+ # Validate inputs
173
+ required_columns = ['s', 't', 'r']
174
+ missing_columns = [col for col in required_columns if col not in model_results.columns]
175
+ if missing_columns:
176
+ raise VolyError(f"Required columns missing in model_results: {missing_columns}")
177
+
178
+ if len(df_hist) < 2:
179
+ raise VolyError("Not enough data points in df_hist")
180
+
181
+ # Validate return domain
182
+ valid_domains = ['log_moneyness', 'moneyness', 'returns', 'strikes']
183
+ if return_domain not in valid_domains:
184
+ raise VolyError(f"Invalid return_domain: {return_domain}. Must be one of {valid_domains}")
185
+
186
+ # Determine granularity from data (minutes between data points)
187
+ time_diff = (df_hist.index[1] - df_hist.index[0]).total_seconds() / 60
188
+ minutes_per_period = max(1, int(time_diff))
189
+
190
+ # Initialize result containers
191
+ pdf_surface = {}
192
+ cdf_surface = {}
193
+ x_surface = {}
194
+ all_moments = {}
195
+
196
+ # Process each maturity
197
+ for i in model_results.index:
198
+ try:
199
+ # Get parameters for this maturity
200
+ s = model_results.loc[i, 's'] # Spot price
201
+ t = model_results.loc[i, 't'] # Time to maturity in years
202
+ r = model_results.loc[i, 'r'] # Risk-free rate
203
+
204
+ # Calculate relevant periods for this maturity
205
+ dte = t * 365.25 # Days to expiry
206
+ n_periods = max(1, int(dte * 24 * 60 / minutes_per_period))
207
+
208
+ # Prepare domains
209
+ domains = prepare_domains(domain_params, s)
210
+
211
+ # Calculate density
212
+ pdfs = calculate_normal_hd(
213
+ df_hist=df_hist,
214
+ t=t,
215
+ r=r,
216
+ n_periods=n_periods,
217
+ domains=domains
218
+ )
219
+
220
+ # Select results for the requested domain
221
+ pdf, cdf, x = select_domain_results(pdfs, domains, return_domain)
222
+
223
+ # Calculate moments
224
+ moments = get_all_moments(x, pdf)
225
+
226
+ # Store results
227
+ pdf_surface[i] = pdf
228
+ cdf_surface[i] = cdf
229
+ x_surface[i] = x
230
+ all_moments[i] = moments
231
+
232
+ except Exception as e:
233
+ logger.warning(f"Failed to calculate HD for maturity {i}: {str(e)}")
234
+
235
+ # Check if we have any valid results
236
+ if not pdf_surface:
237
+ raise VolyError("No valid densities could be calculated. Check your input data.")
238
+
239
+ # Create DataFrame with moments
240
+ moments = pd.DataFrame(all_moments).T
241
+
242
+ logger.info("Historical density calculation complete using normal distribution")
243
+
244
+ return {
245
+ 'pdf_surface': pdf_surface,
246
+ 'cdf_surface': cdf_surface,
247
+ 'x_surface': x_surface,
248
+ 'moments': moments
249
+ }
@@ -9,154 +9,9 @@ from typing import Dict, List, Tuple, Optional, Union, Any, Callable
9
9
  from voly.utils.logger import logger, catch_exception
10
10
  from voly.exceptions import VolyError
11
11
  from voly.models import SVIModel
12
- from voly.formulas import bs, d1, d2, get_domain
12
+ from voly.formulas import bs, d1, d2
13
13
  from scipy import stats
14
-
15
-
16
- @catch_exception
17
- def _prepare_domains(domain_params, s, r, o, t):
18
- """
19
- Calculate domain arrays for different representations (log_moneyness, moneyness, etc.).
20
-
21
- Parameters:
22
- -----------
23
- domain_params : tuple
24
- (min_log_moneyness, max_log_moneyness, num_points)
25
- s : float
26
- Spot price
27
- r : float
28
- Risk-free rate
29
- o : ndarray
30
- Implied volatility array
31
- t : float
32
- Time to expiry in years
33
-
34
- Returns:
35
- --------
36
- dict
37
- Dictionary containing arrays for different domains
38
- """
39
- domains = {}
40
- domains['log_moneyness'] = get_domain(domain_params, s, r, o, t, 'log_moneyness')
41
- domains['moneyness'] = get_domain(domain_params, s, r, o, t, 'moneyness')
42
- domains['returns'] = get_domain(domain_params, s, r, o, t, 'returns')
43
- domains['strikes'] = get_domain(domain_params, s, r, o, t, 'strikes')
44
- domains['delta'] = get_domain(domain_params, s, r, o, t, 'delta')
45
-
46
- # Precompute differentials for integration
47
- domains['dx'] = domains['log_moneyness'][1] - domains['log_moneyness'][0]
48
-
49
- return domains
50
-
51
-
52
- @catch_exception
53
- def _normalize_density(pdf_values, dx):
54
- """
55
- Normalize a probability density function to integrate to 1.
56
-
57
- Parameters:
58
- -----------
59
- pdf_values : ndarray
60
- Array of PDF values
61
- dx : float
62
- Grid spacing
63
-
64
- Returns:
65
- --------
66
- ndarray
67
- Normalized PDF values
68
- """
69
- total_area = np.sum(pdf_values * dx)
70
- if total_area <= 0:
71
- logger.warning("PDF area is negative or zero, using absolute values")
72
- total_area = np.sum(np.abs(pdf_values) * dx)
73
-
74
- return pdf_values / total_area
75
-
76
-
77
- @catch_exception
78
- def _transform_to_domains(rnd_k, domains):
79
- """
80
- Transform density from strike domain to other domains.
81
-
82
- Parameters:
83
- -----------
84
- rnd_k : ndarray
85
- PDF in strike domain
86
- domains : dict
87
- Domain arrays
88
-
89
- Returns:
90
- --------
91
- dict
92
- Dictionary of PDFs in different domains
93
- """
94
- LM = domains['log_moneyness']
95
- M = domains['moneyness']
96
- K = domains['strikes']
97
- R = domains['returns']
98
- dx = domains['dx']
99
-
100
- # Calculate PDF in different domains
101
- rnd_lm = rnd_k * K # Convert to log-moneyness domain
102
- pdf_lm = _normalize_density(rnd_lm, dx)
103
-
104
- # Transform to other domains
105
- pdf_k = pdf_lm / K
106
- pdf_m = pdf_k * domains['strikes'][0] # s = K[0] * M[0]
107
- pdf_r = pdf_lm / (1 + R)
108
-
109
- # For delta domain, need special handling due to non-monotonicity
110
- pdf_d1 = stats.norm.pdf(d1(domains['strikes'][0], K, 0, domains['delta'][0], 1, option_type='call'))
111
- dd_dK = pdf_d1 / (domains['delta'][0] * np.sqrt(1) * K)
112
- pdf_d = pdf_k / dd_dK
113
-
114
- # Calculate CDF
115
- cdf = np.cumsum(pdf_lm * dx)
116
- cdf = np.minimum(cdf / cdf[-1], 1.0) # Ensure max value is 1
117
-
118
- return {
119
- 'log_moneyness': pdf_lm,
120
- 'moneyness': pdf_m,
121
- 'returns': pdf_r,
122
- 'strikes': pdf_k,
123
- 'delta': pdf_d,
124
- 'cdf': cdf
125
- }
126
-
127
-
128
- @catch_exception
129
- def _select_domain_results(pdfs, domains, return_domain):
130
- """
131
- Select results for the requested domain.
132
-
133
- Parameters:
134
- -----------
135
- pdfs : dict
136
- PDFs in different domains
137
- domains : dict
138
- Domain arrays
139
- return_domain : str
140
- Requested domain
141
-
142
- Returns:
143
- --------
144
- tuple
145
- (pdf, cdf, x, moments)
146
- """
147
- if return_domain == 'delta':
148
- # Special handling for delta domain due to potential non-monotonicity
149
- D = domains['delta']
150
- pdf_d = pdfs['delta']
151
- sort_idx = np.argsort(D)
152
- x = D[sort_idx]
153
- pdf = pdf_d[sort_idx]
154
- else:
155
- x = domains[return_domain]
156
- pdf = pdfs[return_domain]
157
-
158
- moments = get_all_moments(x, pdf)
159
- return pdf, pdfs['cdf'], x, moments
14
+ from voly.utils.density import prepare_domains, normalize_density, transform_to_domains, select_domain_results
160
15
 
161
16
 
162
17
  @catch_exception
@@ -177,7 +32,7 @@ def breeden(domain_params, s, r, o, t, return_domain):
177
32
  t : float
178
33
  Time to expiry in years
179
34
  return_domain : str
180
- Domain for results ('log_moneyness', 'moneyness', 'returns', 'strikes', 'delta')
35
+ Domain for results ('log_moneyness', 'moneyness', 'returns', 'strikes')
181
36
 
182
37
  Returns:
183
38
  --------
@@ -185,8 +40,9 @@ def breeden(domain_params, s, r, o, t, return_domain):
185
40
  (pdf, cdf, x, moments)
186
41
  """
187
42
  # Prepare domain arrays
188
- domains = _prepare_domains(domain_params, s, r, o, t)
43
+ domains = prepare_domains(domain_params, s)
189
44
  K = domains['strikes']
45
+ dx = domains['dx']
190
46
 
191
47
  # Calculate option prices and derivatives
192
48
  c = bs(s, K, r, o, t, option_type='call')
@@ -196,11 +52,21 @@ def breeden(domain_params, s, r, o, t, return_domain):
196
52
  # Calculate RND in strike domain and apply discount factor
197
53
  rnd_k = np.maximum(np.exp(r * t) * c2, 0)
198
54
 
55
+ # Transform to log-moneyness domain first
56
+ LM = domains['log_moneyness']
57
+ rnd_lm = rnd_k * K # Convert to log-moneyness domain
58
+ pdf_lm = normalize_density(rnd_lm, dx)
59
+
199
60
  # Transform to other domains
200
- pdfs = _transform_to_domains(rnd_k, domains)
61
+ pdfs = transform_to_domains(pdf_lm, domains)
201
62
 
202
63
  # Return results for requested domain
203
- return _select_domain_results(pdfs, domains, return_domain)
64
+ pdf, cdf, x = select_domain_results(pdfs, domains, return_domain)
65
+
66
+ # Calculate moments
67
+ moments = get_all_moments(x, pdf)
68
+
69
+ return pdf, cdf, x, moments
204
70
 
205
71
 
206
72
  @catch_exception
@@ -221,7 +87,7 @@ def rookley(domain_params, s, r, o, t, return_domain):
221
87
  t : float
222
88
  Time to expiry in years
223
89
  return_domain : str
224
- Domain for results ('log_moneyness', 'moneyness', 'returns', 'strikes', 'delta')
90
+ Domain for results ('log_moneyness', 'moneyness', 'returns', 'strikes')
225
91
 
226
92
  Returns:
227
93
  --------
@@ -229,9 +95,10 @@ def rookley(domain_params, s, r, o, t, return_domain):
229
95
  (pdf, cdf, x, moments)
230
96
  """
231
97
  # Prepare domain arrays
232
- domains = _prepare_domains(domain_params, s, r, o, t)
98
+ domains = prepare_domains(domain_params, s)
233
99
  M = domains['moneyness']
234
100
  K = domains['strikes']
101
+ dx = domains['dx']
235
102
 
236
103
  # Calculate volatility derivatives with respect to moneyness
237
104
  o1 = np.gradient(o, M)
@@ -287,11 +154,21 @@ def rookley(domain_params, s, r, o, t, return_domain):
287
154
  # Calculate RND in strike domain and apply discount factor
288
155
  rnd_k = np.maximum(ert * s * dd_c_K, 0)
289
156
 
157
+ # Transform to log-moneyness domain first
158
+ LM = domains['log_moneyness']
159
+ rnd_lm = rnd_k * K # Convert to log-moneyness domain
160
+ pdf_lm = normalize_density(rnd_lm, dx)
161
+
290
162
  # Transform to other domains
291
- pdfs = _transform_to_domains(rnd_k, domains)
163
+ pdfs = transform_to_domains(pdf_lm, domains)
292
164
 
293
165
  # Return results for requested domain
294
- return _select_domain_results(pdfs, domains, return_domain)
166
+ pdf, cdf, x = select_domain_results(pdfs, domains, return_domain)
167
+
168
+ # Calculate moments
169
+ moments = get_all_moments(x, pdf)
170
+
171
+ return pdf, cdf, x, moments
295
172
 
296
173
 
297
174
  @catch_exception
@@ -433,7 +310,7 @@ def get_rnd_surface(model_results: pd.DataFrame,
433
310
  domain_params : tuple
434
311
  (min_log_moneyness, max_log_moneyness, num_points)
435
312
  return_domain : str
436
- Domain for results ('log_moneyness', 'moneyness', 'returns', 'strikes', 'delta')
313
+ Domain for results ('log_moneyness', 'moneyness', 'returns', 'strikes')
437
314
  method : str
438
315
  Method for RND estimation ('rookley' or 'breeden')
439
316
 
@@ -453,7 +330,7 @@ def get_rnd_surface(model_results: pd.DataFrame,
453
330
  raise VolyError(f"Invalid method: {method}. Must be 'rookley' or 'breeden'")
454
331
 
455
332
  # Validate return_domain
456
- valid_domains = ['log_moneyness', 'moneyness', 'returns', 'strikes', 'delta']
333
+ valid_domains = ['log_moneyness', 'moneyness', 'returns', 'strikes']
457
334
  if return_domain not in valid_domains:
458
335
  raise VolyError(f"Invalid return_domain: {return_domain}. Must be one of {valid_domains}")
459
336