aponyx 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of aponyx might be problematic. Click here for more details.

aponyx/data/fetch.py ADDED
@@ -0,0 +1,410 @@
1
+ """
2
+ Unified data fetching interface with provider abstraction.
3
+
4
+ Fetch functions handle data acquisition from any source (file, Bloomberg, API)
5
+ with automatic validation and optional caching.
6
+ """
7
+
8
+ import logging
9
+
10
+ import pandas as pd
11
+
12
+ from ..config import DATA_DIR, CACHE_ENABLED, CACHE_TTL_DAYS, DEFAULT_DATA_SOURCES
13
+ from ..persistence.registry import DataRegistry, REGISTRY_PATH
14
+ from .cache import get_cached_data, save_to_cache
15
+ from .sources import DataSource, FileSource, BloombergSource, resolve_provider
16
+ from .providers.file import fetch_from_file
17
+ from .providers.bloomberg import fetch_from_bloomberg
18
+ from .validation import validate_cdx_schema, validate_vix_schema, validate_etf_schema
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ def _get_provider_fetch_function(source: DataSource):
24
+ """
25
+ Get fetch function for data source.
26
+
27
+ Parameters
28
+ ----------
29
+ source : DataSource
30
+ Data source configuration.
31
+
32
+ Returns
33
+ -------
34
+ Callable
35
+ Provider fetch function.
36
+ """
37
+ provider_type = resolve_provider(source)
38
+
39
+ if provider_type == "file":
40
+ return fetch_from_file
41
+ elif provider_type == "bloomberg":
42
+ return fetch_from_bloomberg
43
+ else:
44
+ raise ValueError(f"Unsupported provider: {provider_type}")
45
+
46
+
47
+ def fetch_cdx(
48
+ source: DataSource | None = None,
49
+ index_name: str | None = None,
50
+ tenor: str | None = None,
51
+ start_date: str | None = None,
52
+ end_date: str | None = None,
53
+ use_cache: bool = CACHE_ENABLED,
54
+ force_refresh: bool = False,
55
+ ) -> pd.DataFrame:
56
+ """
57
+ Fetch CDX index spread data from configured source.
58
+
59
+ Parameters
60
+ ----------
61
+ source : DataSource or None
62
+ Data source. If None, uses default from config.
63
+ index_name : str or None
64
+ Filter to specific index (e.g., "CDX_IG", "CDX_HY").
65
+ tenor : str or None
66
+ Filter to specific tenor (e.g., "5Y", "10Y").
67
+ start_date : str or None
68
+ Start date in YYYY-MM-DD format.
69
+ end_date : str or None
70
+ End date in YYYY-MM-DD format.
71
+ use_cache : bool, default CACHE_ENABLED
72
+ Whether to use cache.
73
+ force_refresh : bool, default False
74
+ Force fetch from source, bypassing cache.
75
+
76
+ Returns
77
+ -------
78
+ pd.DataFrame
79
+ Validated CDX data with DatetimeIndex and columns:
80
+ - spread: CDX spread in basis points
81
+ - index: Index identifier (if present)
82
+ - tenor: Tenor identifier (if present)
83
+
84
+ Examples
85
+ --------
86
+ >>> from aponyx.data import fetch_cdx, FileSource
87
+ >>> df = fetch_cdx(FileSource("data/raw/cdx.parquet"), tenor="5Y")
88
+ """
89
+ source = source or DEFAULT_DATA_SOURCES.get("cdx")
90
+ if source is None:
91
+ raise ValueError("No source provided and no default configured for CDX")
92
+
93
+ instrument = "cdx"
94
+ cache_dir = DATA_DIR / "cache"
95
+
96
+ # Check cache first
97
+ if use_cache and not force_refresh:
98
+ cached = get_cached_data(
99
+ source,
100
+ instrument,
101
+ cache_dir,
102
+ start_date=start_date,
103
+ end_date=end_date,
104
+ ttl_days=CACHE_TTL_DAYS.get(instrument),
105
+ index_name=index_name,
106
+ tenor=tenor,
107
+ )
108
+ if cached is not None:
109
+ df = cached
110
+ # Apply filters if needed
111
+ if index_name is not None and "index" in df.columns:
112
+ df = df[df["index"] == index_name]
113
+ if tenor is not None and "tenor" in df.columns:
114
+ df = df[df["tenor"] == tenor]
115
+ return df
116
+
117
+ # Fetch from source
118
+ logger.info("Fetching CDX from %s", resolve_provider(source))
119
+ fetch_fn = _get_provider_fetch_function(source)
120
+
121
+ if isinstance(source, FileSource):
122
+ df = fetch_fn(
123
+ file_path=source.path,
124
+ instrument=instrument,
125
+ start_date=start_date,
126
+ end_date=end_date,
127
+ )
128
+ elif isinstance(source, BloombergSource):
129
+ # Construct Bloomberg ticker from filters
130
+ ticker = _build_cdx_ticker(index_name, tenor)
131
+ df = fetch_fn(
132
+ ticker=ticker,
133
+ instrument=instrument,
134
+ start_date=start_date,
135
+ end_date=end_date,
136
+ )
137
+ else:
138
+ raise ValueError(f"Unsupported source type: {type(source)}")
139
+
140
+ # Validate schema
141
+ df = validate_cdx_schema(df)
142
+
143
+ # Apply filters
144
+ if index_name is not None:
145
+ if "index" not in df.columns:
146
+ raise ValueError("Cannot filter by index_name: 'index' column not found")
147
+ df = df[df["index"] == index_name]
148
+ logger.debug("Filtered to index=%s: %d rows", index_name, len(df))
149
+
150
+ if tenor is not None:
151
+ if "tenor" not in df.columns:
152
+ raise ValueError("Cannot filter by tenor: 'tenor' column not found")
153
+ df = df[df["tenor"] == tenor]
154
+ logger.debug("Filtered to tenor=%s: %d rows", tenor, len(df))
155
+
156
+ # Cache if enabled
157
+ if use_cache:
158
+ registry = DataRegistry(REGISTRY_PATH, DATA_DIR)
159
+ save_to_cache(
160
+ df,
161
+ source,
162
+ instrument,
163
+ cache_dir,
164
+ registry=registry,
165
+ start_date=start_date,
166
+ end_date=end_date,
167
+ index_name=index_name,
168
+ tenor=tenor,
169
+ )
170
+
171
+ logger.info("Fetched CDX data: %d rows, %s to %s", len(df), df.index.min(), df.index.max())
172
+ return df
173
+
174
+
175
+ def fetch_vix(
176
+ source: DataSource | None = None,
177
+ start_date: str | None = None,
178
+ end_date: str | None = None,
179
+ use_cache: bool = CACHE_ENABLED,
180
+ force_refresh: bool = False,
181
+ ) -> pd.DataFrame:
182
+ """
183
+ Fetch VIX volatility index data from configured source.
184
+
185
+ Parameters
186
+ ----------
187
+ source : DataSource or None
188
+ Data source. If None, uses default from config.
189
+ start_date : str or None
190
+ Start date in YYYY-MM-DD format.
191
+ end_date : str or None
192
+ End date in YYYY-MM-DD format.
193
+ use_cache : bool, default CACHE_ENABLED
194
+ Whether to use cache.
195
+ force_refresh : bool, default False
196
+ Force fetch from source, bypassing cache.
197
+
198
+ Returns
199
+ -------
200
+ pd.DataFrame
201
+ Validated VIX data with DatetimeIndex and columns:
202
+ - close: VIX closing level
203
+
204
+ Examples
205
+ --------
206
+ >>> from aponyx.data import fetch_vix, FileSource
207
+ >>> df = fetch_vix(FileSource("data/raw/vix.parquet"))
208
+ """
209
+ source = source or DEFAULT_DATA_SOURCES.get("vix")
210
+ if source is None:
211
+ raise ValueError("No source provided and no default configured for VIX")
212
+
213
+ instrument = "vix"
214
+ cache_dir = DATA_DIR / "cache"
215
+
216
+ # Check cache first
217
+ if use_cache and not force_refresh:
218
+ cached = get_cached_data(
219
+ source,
220
+ instrument,
221
+ cache_dir,
222
+ start_date=start_date,
223
+ end_date=end_date,
224
+ ttl_days=CACHE_TTL_DAYS.get(instrument),
225
+ )
226
+ if cached is not None:
227
+ return cached
228
+
229
+ # Fetch from source
230
+ logger.info("Fetching VIX from %s", resolve_provider(source))
231
+ fetch_fn = _get_provider_fetch_function(source)
232
+
233
+ if isinstance(source, FileSource):
234
+ df = fetch_fn(
235
+ file_path=source.path,
236
+ instrument=instrument,
237
+ start_date=start_date,
238
+ end_date=end_date,
239
+ )
240
+ elif isinstance(source, BloombergSource):
241
+ df = fetch_fn(
242
+ ticker="VIX Index",
243
+ instrument=instrument,
244
+ start_date=start_date,
245
+ end_date=end_date,
246
+ )
247
+ else:
248
+ raise ValueError(f"Unsupported source type: {type(source)}")
249
+
250
+ # Validate schema
251
+ df = validate_vix_schema(df)
252
+
253
+ # Cache if enabled
254
+ if use_cache:
255
+ registry = DataRegistry(REGISTRY_PATH, DATA_DIR)
256
+ save_to_cache(
257
+ df,
258
+ source,
259
+ instrument,
260
+ cache_dir,
261
+ registry=registry,
262
+ start_date=start_date,
263
+ end_date=end_date,
264
+ )
265
+
266
+ logger.info("Fetched VIX data: %d rows, %s to %s", len(df), df.index.min(), df.index.max())
267
+ return df
268
+
269
+
270
+ def fetch_etf(
271
+ source: DataSource | None = None,
272
+ ticker: str | None = None,
273
+ start_date: str | None = None,
274
+ end_date: str | None = None,
275
+ use_cache: bool = CACHE_ENABLED,
276
+ force_refresh: bool = False,
277
+ ) -> pd.DataFrame:
278
+ """
279
+ Fetch credit ETF price data from configured source.
280
+
281
+ Parameters
282
+ ----------
283
+ source : DataSource or None
284
+ Data source. If None, uses default from config.
285
+ ticker : str or None
286
+ Filter to specific ticker (e.g., "HYG", "LQD").
287
+ start_date : str or None
288
+ Start date in YYYY-MM-DD format.
289
+ end_date : str or None
290
+ End date in YYYY-MM-DD format.
291
+ use_cache : bool, default CACHE_ENABLED
292
+ Whether to use cache.
293
+ force_refresh : bool, default False
294
+ Force fetch from source, bypassing cache.
295
+
296
+ Returns
297
+ -------
298
+ pd.DataFrame
299
+ Validated ETF data with DatetimeIndex and columns:
300
+ - close: Closing price
301
+ - ticker: ETF ticker symbol (if present)
302
+
303
+ Examples
304
+ --------
305
+ >>> from aponyx.data import fetch_etf, FileSource
306
+ >>> df = fetch_etf(FileSource("data/raw/etf.parquet"), ticker="HYG")
307
+ """
308
+ source = source or DEFAULT_DATA_SOURCES.get("etf")
309
+ if source is None:
310
+ raise ValueError("No source provided and no default configured for ETF")
311
+
312
+ instrument = "etf"
313
+ cache_dir = DATA_DIR / "cache"
314
+
315
+ # Check cache first
316
+ if use_cache and not force_refresh:
317
+ cached = get_cached_data(
318
+ source,
319
+ instrument,
320
+ cache_dir,
321
+ start_date=start_date,
322
+ end_date=end_date,
323
+ ttl_days=CACHE_TTL_DAYS.get(instrument),
324
+ ticker=ticker,
325
+ )
326
+ if cached is not None:
327
+ df = cached
328
+ if ticker is not None and "ticker" in df.columns:
329
+ df = df[df["ticker"] == ticker]
330
+ return df
331
+
332
+ # Fetch from source
333
+ logger.info("Fetching ETF from %s", resolve_provider(source))
334
+ fetch_fn = _get_provider_fetch_function(source)
335
+
336
+ if isinstance(source, FileSource):
337
+ df = fetch_fn(
338
+ file_path=source.path,
339
+ instrument=instrument,
340
+ start_date=start_date,
341
+ end_date=end_date,
342
+ )
343
+ elif isinstance(source, BloombergSource):
344
+ if ticker is None:
345
+ raise ValueError("ticker required for Bloomberg ETF fetch")
346
+ df = fetch_fn(
347
+ ticker=f"{ticker} US Equity",
348
+ instrument=instrument,
349
+ start_date=start_date,
350
+ end_date=end_date,
351
+ )
352
+ else:
353
+ raise ValueError(f"Unsupported source type: {type(source)}")
354
+
355
+ # Validate schema
356
+ df = validate_etf_schema(df)
357
+
358
+ # Apply ticker filter
359
+ if ticker is not None:
360
+ if "ticker" not in df.columns:
361
+ raise ValueError("Cannot filter by ticker: 'ticker' column not found")
362
+ df = df[df["ticker"] == ticker]
363
+ logger.debug("Filtered to ticker=%s: %d rows", ticker, len(df))
364
+
365
+ # Cache if enabled
366
+ if use_cache:
367
+ registry = DataRegistry(REGISTRY_PATH, DATA_DIR)
368
+ save_to_cache(
369
+ df,
370
+ source,
371
+ instrument,
372
+ cache_dir,
373
+ registry=registry,
374
+ start_date=start_date,
375
+ end_date=end_date,
376
+ ticker=ticker,
377
+ )
378
+
379
+ logger.info("Fetched ETF data: %d rows, %s to %s", len(df), df.index.min(), df.index.max())
380
+ return df
381
+
382
+
383
+ def _build_cdx_ticker(index_name: str | None, tenor: str | None) -> str:
384
+ """
385
+ Construct Bloomberg ticker from CDX index and tenor.
386
+
387
+ Parameters
388
+ ----------
389
+ index_name : str or None
390
+ Index name (e.g., "CDX_IG", "CDX_HY").
391
+ tenor : str or None
392
+ Tenor (e.g., "5Y", "10Y").
393
+
394
+ Returns
395
+ -------
396
+ str
397
+ Bloomberg ticker.
398
+ """
399
+ if index_name is None or tenor is None:
400
+ raise ValueError("index_name and tenor required for Bloomberg CDX fetch")
401
+
402
+ # Example: CDX_IG_5Y -> "CDX.NA.IG.5Y Index"
403
+ parts = index_name.split("_")
404
+ if len(parts) >= 2:
405
+ index_type = parts[1] # IG, HY, XO
406
+ ticker = f"CDX.NA.{index_type}.{tenor} Index"
407
+ else:
408
+ ticker = f"{index_name}.{tenor} Index"
409
+
410
+ return ticker
@@ -0,0 +1,13 @@
1
+ """
2
+ Data provider implementations for different sources.
3
+
4
+ Providers handle the specifics of fetching data from files, Bloomberg, APIs, etc.
5
+ """
6
+
7
+ from .file import fetch_from_file
8
+ from .bloomberg import fetch_from_bloomberg
9
+
10
+ __all__ = [
11
+ "fetch_from_file",
12
+ "fetch_from_bloomberg",
13
+ ]
@@ -0,0 +1,269 @@
1
+ """
2
+ Bloomberg Terminal/API data provider.
3
+
4
+ Fetches market data using Bloomberg's Python API via xbbg wrapper.
5
+ Requires active Bloomberg Terminal session.
6
+ """
7
+
8
+ import logging
9
+ from datetime import datetime, timedelta
10
+ from typing import Any
11
+
12
+ import pandas as pd
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ # Bloomberg field mappings for different instrument types
18
+ BLOOMBERG_FIELDS = {
19
+ "cdx": ["PX_LAST"], # CDX spread only
20
+ "vix": ["PX_LAST"], # VIX close only
21
+ "etf": ["PX_LAST"], # ETF close only
22
+ }
23
+
24
+ # Mapping from Bloomberg field names to schema column names
25
+ FIELD_MAPPING = {
26
+ "cdx": {
27
+ "PX_LAST": "spread",
28
+ },
29
+ "vix": {
30
+ "PX_LAST": "close",
31
+ },
32
+ "etf": {
33
+ "PX_LAST": "close",
34
+ },
35
+ }
36
+
37
+
38
+ def fetch_from_bloomberg(
39
+ ticker: str,
40
+ instrument: str,
41
+ start_date: str | None = None,
42
+ end_date: str | None = None,
43
+ **params: Any,
44
+ ) -> pd.DataFrame:
45
+ """
46
+ Fetch historical data from Bloomberg Terminal via xbbg wrapper.
47
+
48
+ Parameters
49
+ ----------
50
+ ticker : str
51
+ Bloomberg ticker (e.g., 'CDX.NA.IG.5Y Index', 'VIX Index', 'HYG US Equity').
52
+ instrument : str
53
+ Instrument type for field mapping ('cdx', 'vix', 'etf').
54
+ start_date : str or None, default None
55
+ Start date in YYYY-MM-DD format. Defaults to 5 years ago.
56
+ end_date : str or None, default None
57
+ End date in YYYY-MM-DD format. Defaults to today.
58
+ **params : Any
59
+ Additional Bloomberg request parameters passed to xbbg.
60
+
61
+ Returns
62
+ -------
63
+ pd.DataFrame
64
+ Historical data with DatetimeIndex and schema-compatible columns.
65
+
66
+ Raises
67
+ ------
68
+ ImportError
69
+ If xbbg is not installed.
70
+ ValueError
71
+ If ticker format is invalid or instrument type is unknown.
72
+ RuntimeError
73
+ If Bloomberg request fails or returns empty data.
74
+
75
+ Notes
76
+ -----
77
+ Requires active Bloomberg Terminal session. Connection is handled
78
+ automatically by xbbg wrapper.
79
+
80
+ Returned DataFrame columns are mapped to project schemas:
81
+ - CDX: spread, index, tenor
82
+ - VIX: close
83
+ - ETF: close, ticker
84
+
85
+ Example tickers:
86
+ - CDX: 'CDX.NA.IG.5Y Index'
87
+ - VIX: 'VIX Index'
88
+ - ETFs: 'HYG US Equity', 'LQD US Equity'
89
+ """
90
+ # Validate instrument type
91
+ if instrument not in BLOOMBERG_FIELDS:
92
+ raise ValueError(
93
+ f"Unknown instrument type: {instrument}. "
94
+ f"Must be one of {list(BLOOMBERG_FIELDS.keys())}"
95
+ )
96
+
97
+ # Default to 5-year lookback if dates not provided
98
+ if end_date is None:
99
+ end_date = datetime.now().strftime("%Y-%m-%d")
100
+ if start_date is None:
101
+ start_dt = datetime.now() - timedelta(days=5 * 365)
102
+ start_date = start_dt.strftime("%Y-%m-%d")
103
+
104
+ # Convert dates to Bloomberg format (YYYYMMDD)
105
+ bbg_start = start_date.replace("-", "")
106
+ bbg_end = end_date.replace("-", "")
107
+
108
+ logger.info(
109
+ "Fetching %s from Bloomberg: ticker=%s, dates=%s to %s",
110
+ instrument,
111
+ ticker,
112
+ start_date,
113
+ end_date,
114
+ )
115
+
116
+ # Import xbbg wrapper
117
+ try:
118
+ from xbbg import blp
119
+ except ImportError:
120
+ raise ImportError(
121
+ "xbbg not installed. "
122
+ "Install with: uv pip install --optional bloomberg"
123
+ )
124
+
125
+ # Fetch historical data using xbbg
126
+ fields = BLOOMBERG_FIELDS[instrument]
127
+ try:
128
+ df = blp.bdh(
129
+ tickers=ticker,
130
+ flds=fields,
131
+ start_date=bbg_start,
132
+ end_date=bbg_end,
133
+ **params,
134
+ )
135
+ except Exception as e:
136
+ logger.error("Bloomberg request failed: %s", str(e))
137
+ raise RuntimeError(f"Failed to fetch data from Bloomberg: {e}") from e
138
+
139
+ # Check if response is empty
140
+ if df is None or df.empty:
141
+ raise RuntimeError(
142
+ f"Bloomberg returned empty data for {ticker}. "
143
+ "Check ticker format and data availability."
144
+ )
145
+
146
+ logger.debug("Fetched %d rows from Bloomberg", len(df))
147
+
148
+ # Map Bloomberg field names to schema columns
149
+ df = _map_bloomberg_fields(df, instrument, ticker)
150
+
151
+ # Add metadata columns (index, tenor, ticker)
152
+ df = _add_metadata_columns(df, instrument, ticker)
153
+
154
+ logger.info("Successfully fetched %d rows with columns: %s", len(df), list(df.columns))
155
+
156
+ return df
157
+
158
+
159
+ def _map_bloomberg_fields(
160
+ df: pd.DataFrame,
161
+ instrument: str,
162
+ ticker: str,
163
+ ) -> pd.DataFrame:
164
+ """
165
+ Map Bloomberg field names to schema-expected column names.
166
+
167
+ Parameters
168
+ ----------
169
+ df : pd.DataFrame
170
+ Raw DataFrame from xbbg with Bloomberg field names.
171
+ instrument : str
172
+ Instrument type for field mapping.
173
+ ticker : str
174
+ Bloomberg ticker (used for multi-ticker responses).
175
+
176
+ Returns
177
+ -------
178
+ pd.DataFrame
179
+ DataFrame with renamed columns matching project schemas.
180
+
181
+ Notes
182
+ -----
183
+ xbbg returns multi-index columns for multiple tickers: (ticker, field).
184
+ For single ticker requests, we flatten to just field names.
185
+ """
186
+ # Handle xbbg multi-index columns: (ticker, field)
187
+ if isinstance(df.columns, pd.MultiIndex):
188
+ # Flatten by taking second level (field names)
189
+ df.columns = df.columns.get_level_values(1)
190
+
191
+ # Rename columns according to mapping
192
+ field_map = FIELD_MAPPING[instrument]
193
+ df = df.rename(columns=field_map)
194
+
195
+ logger.debug("Mapped fields: %s -> %s", list(field_map.keys()), list(field_map.values()))
196
+
197
+ return df
198
+
199
+
200
+ def _add_metadata_columns(
201
+ df: pd.DataFrame,
202
+ instrument: str,
203
+ ticker: str,
204
+ ) -> pd.DataFrame:
205
+ """
206
+ Add metadata columns required by schemas.
207
+
208
+ Parameters
209
+ ----------
210
+ df : pd.DataFrame
211
+ DataFrame with mapped field columns.
212
+ instrument : str
213
+ Instrument type ('cdx', 'vix', 'etf').
214
+ ticker : str
215
+ Bloomberg ticker string to parse for metadata.
216
+
217
+ Returns
218
+ -------
219
+ pd.DataFrame
220
+ DataFrame with added metadata columns.
221
+
222
+ Raises
223
+ ------
224
+ ValueError
225
+ If ticker format cannot be parsed.
226
+
227
+ Notes
228
+ -----
229
+ Extracts metadata from ticker strings:
230
+ - CDX: 'CDX.NA.IG.5Y Index' -> index='CDX_IG', tenor='5Y'
231
+ - ETF: 'HYG US Equity' -> ticker='HYG'
232
+ - VIX: No metadata needed
233
+ """
234
+ if instrument == "cdx":
235
+ # Parse CDX ticker: 'CDX.NA.IG.5Y Index' or 'CDX.NA.HY.5Y Index'
236
+ parts = ticker.split(".")
237
+ if len(parts) < 4 or not ticker.endswith(" Index"):
238
+ raise ValueError(
239
+ f"Invalid CDX ticker format: {ticker}. "
240
+ "Expected format: 'CDX.NA.{{IG|HY|XO}}.{{tenor}} Index'"
241
+ )
242
+
243
+ index_type = parts[2] # IG, HY, XO
244
+ tenor_part = parts[3].split()[0] # '5Y' from '5Y Index'
245
+
246
+ df["index"] = f"CDX_{index_type}"
247
+ df["tenor"] = tenor_part
248
+
249
+ logger.debug("Added CDX metadata: index=%s, tenor=%s", df["index"].iloc[0], df["tenor"].iloc[0])
250
+
251
+ elif instrument == "etf":
252
+ # Parse ETF ticker: 'HYG US Equity' or 'LQD US Equity'
253
+ parts = ticker.split()
254
+ if len(parts) < 2 or parts[-1] != "Equity":
255
+ raise ValueError(
256
+ f"Invalid ETF ticker format: {ticker}. "
257
+ "Expected format: '{{ticker}} US Equity'"
258
+ )
259
+
260
+ etf_ticker = parts[0]
261
+ df["ticker"] = etf_ticker
262
+
263
+ logger.debug("Added ETF metadata: ticker=%s", etf_ticker)
264
+
265
+ # VIX doesn't need metadata columns
266
+ elif instrument == "vix":
267
+ pass
268
+
269
+ return df