voly 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
voly/core/data.py ADDED
@@ -0,0 +1,312 @@
1
+ """
2
+ Data fetching and processing module for the Voly package.
3
+
4
+ This module handles fetching options data from exchanges and processing
5
+ it into a standardized format for further analysis.
6
+ """
7
+
8
+ import os
9
+ import asyncio
10
+ import websockets
11
+ import json
12
+ import pandas as pd
13
+ import requests
14
+ import time
15
+ import datetime
16
+ import re
17
+ import numpy as np
18
+ from typing import List, Dict, Any, Optional, Union
19
+ from voly.utils.logger import logger, catch_exception
20
+ from voly.exceptions import DataError, ExchangeError, ConnectionError
21
+
22
+
23
+ async def subscribe_channels(ws, channels):
24
+ """Helper function to subscribe to a list of channels"""
25
+ await ws.send(json.dumps({
26
+ "jsonrpc": "2.0",
27
+ "method": "public/subscribe",
28
+ "id": 42,
29
+ "params": {"channels": channels}
30
+ }))
31
+ await ws.recv() # Skip confirmation
32
+
33
+
34
+ async def unsubscribe_channels(ws, channels):
35
+ """Helper function to unsubscribe from a list of channels"""
36
+ await ws.send(json.dumps({
37
+ "jsonrpc": "2.0",
38
+ "method": "public/unsubscribe",
39
+ "id": 43,
40
+ "params": {"channels": channels}
41
+ }))
42
+ await ws.recv() # Skip confirmation
43
+
44
+
45
+ @catch_exception
46
+ async def process_batch(ws, batch: List[str], batch_num: int, total_batches: int) -> List[Dict[str, Any]]:
47
+ """Process a batch of instruments and return their data"""
48
+ batch_start = time.time()
49
+
50
+ # Create channel subscriptions
51
+ ticker_channels = [f"ticker.{instr}.100ms" for instr in batch]
52
+ book_channels = [f"book.{instr}.100ms" for instr in batch]
53
+ channels = ticker_channels + book_channels
54
+
55
+ # Subscribe to channels
56
+ await subscribe_channels(ws, channels)
57
+
58
+ # Process batch responses
59
+ data_count = 0
60
+ needed_responses = len(batch) * 2 # Ticker and book for each instrument
61
+ instrument_data = {}
62
+
63
+ while data_count < needed_responses:
64
+ try:
65
+ response = await ws.recv()
66
+ data = json.loads(response)
67
+
68
+ if 'params' in data and 'data' in data['params'] and 'channel' in data['params']:
69
+ channel = data['params']['channel']
70
+ parts = channel.split('.')
71
+
72
+ if len(parts) >= 2:
73
+ channel_type = parts[0] # 'ticker' or 'book'
74
+ instr_name = parts[1]
75
+
76
+ if instr_name in batch:
77
+ if instr_name not in instrument_data:
78
+ instrument_data[instr_name] = {}
79
+
80
+ if channel_type not in instrument_data[instr_name]:
81
+ instrument_data[instr_name][channel_type] = data['params']['data']
82
+ data_count += 1
83
+
84
+ except Exception as e:
85
+ logger.error(f"Error in batch {batch_num}: {e}")
86
+ break
87
+
88
+ # Unsubscribe from channels
89
+ await unsubscribe_channels(ws, channels)
90
+
91
+ # Process data for this batch
92
+ batch_results = []
93
+ for instr_name, channels_data in instrument_data.items():
94
+ row = {"instrument_name": instr_name}
95
+
96
+ # Merge ticker data
97
+ if 'ticker' in channels_data:
98
+ ticker = channels_data['ticker']
99
+ # Add basic fields
100
+ for k, v in ticker.items():
101
+ if k not in ['stats', 'greeks']:
102
+ row[k] = v
103
+
104
+ # Flatten stats and greeks
105
+ for nested_key in ['stats', 'greeks']:
106
+ if nested_key in ticker and isinstance(ticker[nested_key], dict):
107
+ for k, v in ticker[nested_key].items():
108
+ row[k] = v
109
+
110
+ # Merge book data
111
+ if 'book' in channels_data:
112
+ book = channels_data['book']
113
+ # Add book fields that don't conflict with ticker
114
+ for k, v in book.items():
115
+ if k not in row and k not in ['bids', 'asks']:
116
+ row[k] = v
117
+
118
+ # Store raw bids and asks
119
+ if 'bids' in book:
120
+ row['bids'] = book['bids']
121
+ if 'asks' in book:
122
+ row['asks'] = book['asks']
123
+
124
+ batch_results.append(row)
125
+
126
+ batch_time = time.time() - batch_start
127
+ logger.info(
128
+ f"Batch {batch_num}/{total_batches} completed in {batch_time:.2f}s - {len(batch_results)} instruments processed")
129
+
130
+ return batch_results
131
+
132
+
133
+ @catch_exception
134
+ async def get_deribit_data(currency: str = "BTC") -> pd.DataFrame:
135
+ """
136
+ Get options data with ticker and order book information from Deribit.
137
+
138
+ Parameters:
139
+ currency (str): Currency to fetch options for (default: "BTC")
140
+
141
+ Returns:
142
+ pandas.DataFrame: DataFrame with ticker and book data
143
+ """
144
+ total_start = time.time()
145
+
146
+ # Get active options instruments
147
+ logger.info(f"Fetching {currency} options...")
148
+ try:
149
+ response = requests.get(
150
+ "https://www.deribit.com/api/v2/public/get_instruments",
151
+ params={"currency": currency, "kind": "option", "expired": "false"}
152
+ )
153
+ response.raise_for_status() # Raise exception for non-200 status codes
154
+ except requests.RequestException as e:
155
+ raise ConnectionError(f"Failed to connect to Deribit API: {str(e)}")
156
+
157
+ try:
158
+ instruments = [i['instrument_name'] for i in response.json()['result']]
159
+ except (KeyError, json.JSONDecodeError) as e:
160
+ raise DataError(f"Failed to parse Deribit API response: {str(e)}")
161
+
162
+ total_instruments = len(instruments)
163
+ logger.info(f"Found {total_instruments} active {currency} options")
164
+
165
+ # Calculate batches
166
+ total_batches = (total_instruments + 100 - 1) // 100
167
+
168
+ # Collect data
169
+ all_data = []
170
+
171
+ try:
172
+ async with websockets.connect('wss://www.deribit.com/ws/api/v2') as ws:
173
+ for i in range(0, total_instruments, 100):
174
+ batch_num = i // 100 + 1
175
+ batch = instruments[i:i + 100]
176
+
177
+ batch_results = await process_batch(ws, batch, batch_num, total_batches)
178
+ all_data.extend(batch_results)
179
+ except (websockets.exceptions.WebSocketException, ConnectionError) as e:
180
+ raise ConnectionError(f"WebSocket connection error: {str(e)}")
181
+
182
+ total_time = time.time() - total_start
183
+ logger.info(f"Total processing time: {total_time:.2f}s - {len(all_data)} instruments processed")
184
+
185
+ if not all_data:
186
+ raise DataError("No data collected from Deribit")
187
+
188
+ return pd.DataFrame(all_data)
189
+
190
+
191
+ @catch_exception
192
+ def process_option_chain(df: pd.DataFrame, currency: str, min_dte: float = 2.0) -> pd.DataFrame:
193
+ """
194
+ Process raw option chain data into a standardized format.
195
+
196
+ Parameters:
197
+ df (pd.DataFrame): Raw option chain data
198
+ currency (str): Currency code (e.g., 'BTC', 'ETH')
199
+ min_dte (float): Minimum days to expiry to include
200
+
201
+ Returns:
202
+ pd.DataFrame: Processed option chain data
203
+ """
204
+ logger.info(f"Processing option chain data for {currency}...")
205
+
206
+ # Extract instrument details
207
+ # Format is typically BTC-DDMMMYY-STRIKE-C/P or ETH-DDMMMYY-STRIKE-C/P
208
+ def extract_details(instrument_name):
209
+ pattern = f"{currency}-([\\d]{{1,2}})([A-Z]{{3}})(\\d{{2}})-([\\d]+)-([CP])"
210
+ match = re.match(pattern, instrument_name)
211
+ if match:
212
+ day = int(match.group(1))
213
+ month_str = match.group(2)
214
+ year = 2000 + int(match.group(3))
215
+ strike = float(match.group(4))
216
+ option_type = match.group(5)
217
+
218
+ month_dict = {'JAN': 1, 'FEB': 2, 'MAR': 3, 'APR': 4, 'MAY': 5, 'JUN': 6,
219
+ 'JUL': 7, 'AUG': 8, 'SEP': 9, 'OCT': 10, 'NOV': 11, 'DEC': 12}
220
+ month = month_dict.get(month_str)
221
+
222
+ maturity_name = f"{day}{month_str}{str(year)[-2:]}"
223
+
224
+ return {'day': day, 'month': month, 'year': year,
225
+ 'strike': strike, 'option_type': option_type,
226
+ 'maturity_name': maturity_name}
227
+ return None
228
+
229
+ # Apply extraction to create new columns
230
+ logger.info(f"Extracting option details from instrument names...")
231
+ df['details'] = df['instrument_name'].apply(lambda x: extract_details(x))
232
+ df['strike'] = df['details'].apply(lambda x: x['strike'] if x else None)
233
+ df['option_type'] = df['details'].apply(lambda x: x['option_type'] if x else None)
234
+ df['maturity_name'] = df['details'].apply(lambda x: x['maturity_name'] if x else None)
235
+
236
+ # Create expiry date at 8:00 AM UTC
237
+ df['expiry_date'] = df['details'].apply(
238
+ lambda x: datetime.datetime(x['year'], x['month'], x['day'], 8, 0, 0) if x else None
239
+ )
240
+
241
+ # Get reference time from timestamp
242
+ reference_time = datetime.datetime.fromtimestamp(df['timestamp'].iloc[0] / 1000)
243
+ logger.info(f"Reference time: {reference_time}")
244
+
245
+ # Calculate days to expiry (DTE)
246
+ df['dte'] = (df['expiry_date'] - reference_time).dt.total_seconds() / (24 * 60 * 60)
247
+
248
+ # Calculate time to expiry in years
249
+ df['yte'] = df['dte'] / 365.25
250
+
251
+ # Calculate implied volatility (convert from percentage)
252
+ df['mark_iv'] = df['mark_iv'] / 100
253
+ df['bid_iv'] = df['bid_iv'].replace({0: np.nan}) / 100
254
+ df['ask_iv'] = df['ask_iv'].replace({0: np.nan}) / 100
255
+
256
+ # Calculate log-moneyness
257
+ df['log_moneyness'] = np.log(df['underlying_price'] / df['strike'])
258
+
259
+ # Remove rows with missing implied volatility
260
+ original_rows = len(df)
261
+ df = df.dropna(subset=['mark_iv', 'log_moneyness', 'yte'])
262
+ logger.info(f"Removed {original_rows - len(df)} rows with missing data")
263
+
264
+ # Filter options with DTE > min_dte
265
+ if min_dte > 0:
266
+ original_count = len(df)
267
+ df = df[df['dte'] > min_dte]
268
+ logger.info(f"Filtered out {original_count - len(df)} options with DTE < {min_dte}")
269
+
270
+ # Group by time to expiry and ensure we have enough data points for each expiry
271
+ expiry_counts = df.groupby('yte').size()
272
+ valid_expiries = expiry_counts[expiry_counts >= 5].index
273
+ df = df[df['yte'].isin(valid_expiries)]
274
+ logger.info(f"Filtered to {len(df)} options with at least 5 strikes per expiry")
275
+
276
+ # Report on the maturities we're working with
277
+ maturities = df.groupby(['maturity_name', 'yte']).size().reset_index()
278
+ for _, row in maturities.iterrows():
279
+ logger.info(f"Maturity: {row['maturity_name']}, YTE: {row['yte']:.4f}, Strikes: {row[0]}")
280
+
281
+ return df
282
+
283
+
284
+ @catch_exception
285
+ async def fetch_option_chain(exchange: str = 'deribit',
286
+ currency: str = 'BTC',
287
+ depth: bool = False) -> pd.DataFrame:
288
+ """
289
+ Fetch option chain data from the specified exchange.
290
+
291
+ Parameters:
292
+ exchange (str): Exchange to fetch data from (currently only 'deribit' is supported)
293
+ currency (str): Currency to fetch options for (e.g., 'BTC', 'ETH')
294
+ depth (bool): Whether to include full order book depth. Else, just top of book.
295
+
296
+ Returns:
297
+ pd.DataFrame: Processed option chain data
298
+ """
299
+ if exchange.lower() != 'deribit':
300
+ raise ExchangeError(f"Exchange '{exchange}' is not supported. Currently only 'deribit' is available.")
301
+
302
+ # Get raw data
303
+ raw_data = await get_deribit_data(currency=currency)
304
+
305
+ # Process data
306
+ processed_data = process_option_chain(raw_data, currency)
307
+
308
+ # Remove order book depth if not needed
309
+ if not depth and 'bids' in processed_data.columns and 'asks' in processed_data.columns:
310
+ processed_data = processed_data.drop(columns=['bids', 'asks'])
311
+
312
+ return processed_data
voly/core/fit.py ADDED
@@ -0,0 +1,331 @@
1
+ """
2
+ Model fitting and calibration module for the Voly package.
3
+
4
+ This module handles fitting volatility models to market data and
5
+ calculating fitting statistics.
6
+ """
7
+
8
+ import numpy as np
9
+ import pandas as pd
10
+ from typing import List, Tuple, Dict, Optional, Union, Any
11
+ from scipy.optimize import least_squares
12
+ from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
13
+ from voly.utils.logger import logger, catch_exception
14
+ from voly.exceptions import ModelError, ValidationError
15
+ from voly.models import SVIModel
16
+
17
+
18
+ @catch_exception
19
+ def calculate_residuals(params: List[float],
20
+ time_to_expiry: float,
21
+ market_data: pd.DataFrame,
22
+ model: Any = SVIModel) -> np.ndarray:
23
+ """
24
+ Calculate the residuals between market and model implied volatilities.
25
+
26
+ Parameters:
27
+ - params: Model parameters (e.g., SVI parameters [a, b, sigma, rho, m])
28
+ - time_to_expiry: The time to expiry in years
29
+ - market_data: DataFrame with market data
30
+ - model: Model class to use (default: SVIModel)
31
+
32
+ Returns:
33
+ - Array of residuals
34
+ """
35
+ # Filter market data for the specific time to expiry
36
+ specific_expiry_data = market_data[market_data['yte'] == time_to_expiry]
37
+
38
+ # Calculate the total implied variance using the model for filtered data
39
+ w_model = np.array([model.svi(x, *params) for x in specific_expiry_data['log_moneyness']])
40
+
41
+ # Extract the actual market implied volatilities
42
+ iv_actual = specific_expiry_data['mark_iv'].values
43
+
44
+ # Calculate residuals between market implied volatilities and model predictions
45
+ residuals = iv_actual - np.sqrt(w_model / time_to_expiry)
46
+
47
+ return residuals
48
+
49
+
50
+ @catch_exception
51
+ def optimize_svi_parameters(market_data: pd.DataFrame,
52
+ initial_params: Optional[List[float]] = None,
53
+ param_bounds: Optional[Tuple] = None) -> Dict[str, Dict[str, Any]]:
54
+ """
55
+ Optimize SVI parameters for all unique expiries in the market data.
56
+
57
+ Parameters:
58
+ - market_data: DataFrame with market data
59
+ - initial_params: Initial guess for SVI parameters (default: from SVIModel)
60
+ - param_bounds: Bounds for parameters (default: from SVIModel)
61
+
62
+ Returns:
63
+ - Dictionary of optimization results by maturity name
64
+ """
65
+ results = {}
66
+ unique_expiries = sorted(market_data['yte'].unique())
67
+
68
+ # Use defaults if not provided
69
+ if initial_params is None:
70
+ initial_params = SVIModel.DEFAULT_INITIAL_PARAMS
71
+
72
+ if param_bounds is None:
73
+ param_bounds = SVIModel.DEFAULT_PARAM_BOUNDS
74
+
75
+ for t_dte in unique_expiries:
76
+ # Get maturity name for reporting
77
+ expiry_data = market_data[market_data['yte'] == t_dte]
78
+ maturity_name = expiry_data['maturity_name'].iloc[0]
79
+ dte_value = expiry_data['dte'].iloc[0]
80
+
81
+ logger.info(f"Optimizing for {maturity_name} (DTE: {dte_value:.1f}, YTE: {t_dte:.4f})...")
82
+
83
+ # Optimize SVI parameters
84
+ try:
85
+ result = least_squares(
86
+ calculate_residuals,
87
+ initial_params,
88
+ args=(t_dte, market_data, SVIModel),
89
+ bounds=param_bounds,
90
+ max_nfev=1000
91
+ )
92
+ except Exception as e:
93
+ raise ModelError(f"Optimization failed for {maturity_name}: {str(e)}")
94
+
95
+ # Store results with maturity name as key
96
+ results[maturity_name] = {
97
+ 'params': result.x,
98
+ 'success': result.success,
99
+ 'cost': result.cost,
100
+ 'optimality': result.optimality,
101
+ 'message': result.message,
102
+ 'yte': t_dte,
103
+ 'dte': dte_value
104
+ }
105
+
106
+ if result.success:
107
+ logger.info(f'Optimization for {maturity_name} (DTE: {dte_value:.1f}): SUCCESS')
108
+ else:
109
+ logger.warning(f'Optimization for {maturity_name} (DTE: {dte_value:.1f}): FAILED')
110
+
111
+ logger.info('------------------------------------------')
112
+
113
+ return results
114
+
115
+
116
+ @catch_exception
117
+ def create_parameters_matrix(optimization_results: Dict[str, Dict[str, Any]]) -> Tuple[pd.DataFrame, pd.DataFrame]:
118
+ """
119
+ Create matrices of optimized parameters for each expiry.
120
+ Uses maturity names as column names.
121
+
122
+ Parameters:
123
+ - optimization_results: Dictionary of optimization results by maturity name
124
+
125
+ Returns:
126
+ - Tuple of DataFrames with optimized parameters:
127
+ 1. Raw SVI parameters (a, b, sigma, rho, m)
128
+ 2. Jump-Wing parameters (nu, psi, p, c, nu_tilde)
129
+ """
130
+ # Get maturity names in order by DTE
131
+ maturity_names = sorted(optimization_results.keys(),
132
+ key=lambda x: optimization_results[x]['dte'])
133
+
134
+ # Create DataFrame for raw parameters with maturity names as columns
135
+ raw_param_matrix = pd.DataFrame(
136
+ columns=maturity_names,
137
+ index=SVIModel.PARAM_NAMES
138
+ )
139
+
140
+ # Create DataFrame for JW parameters
141
+ jw_param_matrix = pd.DataFrame(
142
+ columns=maturity_names,
143
+ index=SVIModel.JW_PARAM_NAMES
144
+ )
145
+
146
+ # Store YTE and DTE values for reference
147
+ yte_values = {}
148
+ dte_values = {}
149
+
150
+ # Fill the matrices with optimized parameters
151
+ for maturity_name in maturity_names:
152
+ result = optimization_results[maturity_name]
153
+
154
+ # Extract raw SVI parameters
155
+ a, b, sigma, rho, m = result['params']
156
+ raw_param_matrix[maturity_name] = [a, b, sigma, rho, m]
157
+
158
+ # Get time to expiry
159
+ yte = result['yte']
160
+ yte_values[maturity_name] = yte
161
+ dte_values[maturity_name] = result['dte']
162
+
163
+ # Calculate JW parameters
164
+ nu, psi, p, c, nu_tilde = SVIModel.svi_jw_params(a, b, sigma, rho, m, yte)
165
+ jw_param_matrix[maturity_name] = [nu, psi, p, c, nu_tilde]
166
+
167
+ # Store YTE and DTE as attributes in all DataFrames for reference
168
+ attrs = {
169
+ 'yte_values': yte_values,
170
+ 'dte_values': dte_values
171
+ }
172
+
173
+ raw_param_matrix.attrs.update(attrs)
174
+ jw_param_matrix.attrs.update(attrs)
175
+
176
+ return raw_param_matrix, jw_param_matrix
177
+
178
+
179
+ @catch_exception
180
+ def generate_implied_volatility_surface(
181
+ param_matrix: pd.DataFrame,
182
+ moneyness_range: Tuple[float, float] = (-2, 2),
183
+ num_points: int = 500
184
+ ) -> Tuple[np.ndarray, Dict[float, np.ndarray]]:
185
+ """
186
+ Generate implied volatility surface using optimized SVI parameters.
187
+
188
+ Parameters:
189
+ - param_matrix: Matrix of optimized SVI parameters with maturity names as columns
190
+ - moneyness_range: (min, max) range for moneyness grid
191
+ - num_points: Number of points for moneyness grid
192
+
193
+ Returns:
194
+ - Moneyness grid and implied volatility surface
195
+ """
196
+ # Generate moneyness grid
197
+ min_m, max_m = moneyness_range
198
+ moneyness_values = np.linspace(min_m, max_m, num=num_points)
199
+ implied_volatility_surface = {}
200
+
201
+ # Get YTE values from the parameter matrix attributes
202
+ yte_values = param_matrix.attrs['yte_values']
203
+
204
+ # Generate implied volatility for each expiry
205
+ for maturity_name, yte in yte_values.items():
206
+ svi_params = param_matrix[maturity_name].values
207
+ w_svi = [SVIModel.svi(x, *svi_params) for x in moneyness_values]
208
+ implied_volatility_surface[yte] = np.sqrt(np.array(w_svi) / yte)
209
+
210
+ return moneyness_values, implied_volatility_surface
211
+
212
+
213
+ @catch_exception
214
+ def calculate_fit_statistics(market_data: pd.DataFrame, param_matrix: pd.DataFrame) -> pd.DataFrame:
215
+ """
216
+ Calculate fitting accuracy statistics for each expiry.
217
+
218
+ Parameters:
219
+ - market_data: DataFrame with market data
220
+ - param_matrix: Matrix of optimized SVI parameters with maturity names as columns
221
+
222
+ Returns:
223
+ - DataFrame with fitting statistics
224
+ """
225
+ # Get YTE values from the parameter matrix attributes
226
+ yte_values = param_matrix.attrs['yte_values']
227
+ dte_values = param_matrix.attrs['dte_values']
228
+
229
+ # Initialize lists for statistics
230
+ maturity_name_list = []
231
+ dte_list = []
232
+ yte_list = []
233
+ rmse_list = []
234
+ mae_list = []
235
+ r2_list = []
236
+ max_error_list = []
237
+ num_points_list = []
238
+
239
+ # Calculate statistics for each expiry
240
+ for maturity_name, yte in yte_values.items():
241
+ # Filter market data for the specific expiry
242
+ expiry_data = market_data[market_data['yte'] == yte]
243
+ dte_value = dte_values[maturity_name]
244
+
245
+ # Calculate SVI model predictions
246
+ svi_params = param_matrix[maturity_name].values
247
+ w_svi = np.array([SVIModel.svi(x, *svi_params) for x in expiry_data['log_moneyness']])
248
+ iv_model = np.sqrt(w_svi / yte)
249
+
250
+ # Get actual market implied volatilities
251
+ iv_market = expiry_data['mark_iv'].values
252
+
253
+ # Calculate statistics
254
+ rmse = np.sqrt(mean_squared_error(iv_market, iv_model))
255
+ mae = mean_absolute_error(iv_market, iv_model)
256
+ r2 = r2_score(iv_market, iv_model)
257
+ max_error = np.max(np.abs(iv_market - iv_model))
258
+ num_points = len(expiry_data)
259
+
260
+ # Append to lists
261
+ maturity_name_list.append(maturity_name)
262
+ dte_list.append(dte_value)
263
+ yte_list.append(yte)
264
+ rmse_list.append(rmse)
265
+ mae_list.append(mae)
266
+ r2_list.append(r2)
267
+ max_error_list.append(max_error)
268
+ num_points_list.append(num_points)
269
+
270
+ # Create DataFrame with statistics
271
+ stats_df = pd.DataFrame({
272
+ 'Maturity': maturity_name_list,
273
+ 'DTE': dte_list,
274
+ 'YTE': yte_list,
275
+ 'RMSE': rmse_list,
276
+ 'MAE': mae_list,
277
+ 'R²': r2_list,
278
+ 'Max Error': max_error_list,
279
+ 'Number of Points': num_points_list
280
+ })
281
+
282
+ return stats_df
283
+
284
+
285
+ @catch_exception
286
+ def fit_model(market_data: pd.DataFrame,
287
+ model_name: str = 'svi',
288
+ moneyness_range: Tuple[float, float] = (-2, 2),
289
+ num_points: int = 500) -> Dict[str, Any]:
290
+ """
291
+ Fit a volatility model to market data.
292
+
293
+ Parameters:
294
+ - market_data: DataFrame with market data
295
+ - model_name: Type of model to fit (default: 'svi')
296
+ - moneyness_range: (min, max) range for moneyness grid
297
+ - num_points: Number of points for moneyness grid
298
+
299
+ Returns:
300
+ - Dictionary with fitting results
301
+ """
302
+ if model_name.lower() != 'svi':
303
+ raise ValidationError(f"Model type '{model_name}' is not supported. Currently only 'svi' is available.")
304
+
305
+ # Step 1: Optimize model parameters
306
+ optimization_results = optimize_svi_parameters(market_data)
307
+
308
+ # Step 2: Create parameter matrices
309
+ raw_param_matrix, jw_param_matrix = create_parameters_matrix(optimization_results)
310
+
311
+ # Step 3: Generate implied volatility surface
312
+ moneyness_grid, iv_surface = generate_implied_volatility_surface(
313
+ raw_param_matrix, moneyness_range, num_points
314
+ )
315
+
316
+ # Step 4: Calculate fitting statistics
317
+ stats_df = calculate_fit_statistics(market_data, raw_param_matrix)
318
+
319
+ # Step 5: Get unique expiries in sorted order (in years)
320
+ unique_expiries_years = np.array(sorted(market_data['yte'].unique()))
321
+
322
+ # Return all results in a dictionary
323
+ return {
324
+ 'optimization_results': optimization_results,
325
+ 'raw_param_matrix': raw_param_matrix,
326
+ 'jw_param_matrix': jw_param_matrix,
327
+ 'moneyness_grid': moneyness_grid,
328
+ 'iv_surface': iv_surface,
329
+ 'stats_df': stats_df,
330
+ 'unique_expiries': unique_expiries_years,
331
+ }