zipline_polygon_bundle 0.1.7__py3-none-any.whl → 0.2.0.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zipline_polygon_bundle/__init__.py +31 -1
- zipline_polygon_bundle/adjustments.py +34 -0
- zipline_polygon_bundle/bundle.py +183 -34
- zipline_polygon_bundle/concat_all_aggs.py +18 -53
- zipline_polygon_bundle/concat_all_aggs_partitioned.py +6 -6
- zipline_polygon_bundle/config.py +132 -26
- zipline_polygon_bundle/nyse_all_hours_calendar.py +25 -0
- zipline_polygon_bundle/polygon_file_reader.py +1 -1
- zipline_polygon_bundle/process_all_aggs.py +2 -2
- zipline_polygon_bundle/quotes.py +101 -0
- zipline_polygon_bundle/tickers_and_names.py +5 -38
- zipline_polygon_bundle/trades.py +944 -0
- {zipline_polygon_bundle-0.1.7.dist-info → zipline_polygon_bundle-0.2.0.dev1.dist-info}/METADATA +6 -3
- zipline_polygon_bundle-0.2.0.dev1.dist-info/RECORD +17 -0
- zipline_polygon_bundle-0.1.7.dist-info/RECORD +0 -14
- {zipline_polygon_bundle-0.1.7.dist-info → zipline_polygon_bundle-0.2.0.dev1.dist-info}/LICENSE +0 -0
- {zipline_polygon_bundle-0.1.7.dist-info → zipline_polygon_bundle-0.2.0.dev1.dist-info}/WHEEL +0 -0
zipline_polygon_bundle/config.py
CHANGED
@@ -1,8 +1,34 @@
|
|
1
|
-
from exchange_calendars.calendar_helpers import Date, parse_date
|
2
|
-
from
|
1
|
+
from exchange_calendars.calendar_helpers import Date, parse_date
|
2
|
+
from exchange_calendars.calendar_utils import get_calendar
|
3
|
+
|
4
|
+
from .nyse_all_hours_calendar import NYSE_ALL_HOURS
|
5
|
+
|
6
|
+
from typing import Iterator, Tuple
|
3
7
|
|
4
|
-
import os
|
5
8
|
import pandas as pd
|
9
|
+
from pyarrow.fs import LocalFileSystem
|
10
|
+
import os
|
11
|
+
import re
|
12
|
+
import fnmatch
|
13
|
+
|
14
|
+
|
15
|
+
PARTITION_COLUMN_NAME = "part"
|
16
|
+
PARTITION_KEY_LENGTH = 2
|
17
|
+
|
18
|
+
|
19
|
+
def to_partition_key(s: str) -> str:
|
20
|
+
"""
|
21
|
+
Partition key is low cardinality and must be filesystem-safe.
|
22
|
+
The reason for partitioning is to keep the parquet files from getting too big.
|
23
|
+
10 years of minute aggs for US stocks is 83GB gzipped. A single parquet would be 62GB on disk.
|
24
|
+
Currently the first two characters so files stay under 1GB. Weird characters are replaced with "A".
|
25
|
+
"""
|
26
|
+
k = (s + "A")[0:PARTITION_KEY_LENGTH].upper()
|
27
|
+
if k.isalpha():
|
28
|
+
return k
|
29
|
+
# Replace non-alpha characters with "A".
|
30
|
+
k = "".join([c if c.isalpha() else "A" for c in k])
|
31
|
+
return k
|
6
32
|
|
7
33
|
|
8
34
|
class PolygonConfig:
|
@@ -10,33 +36,35 @@ class PolygonConfig:
|
|
10
36
|
self,
|
11
37
|
environ: dict,
|
12
38
|
calendar_name: str,
|
13
|
-
|
14
|
-
|
39
|
+
start_date: Date,
|
40
|
+
end_date: Date,
|
15
41
|
agg_time: str = "day",
|
42
|
+
custom_aggs_format: str = "{config.agg_timedelta.seconds}sec_aggs",
|
16
43
|
):
|
17
|
-
if agg_time not in ["minute", "day"]:
|
18
|
-
raise ValueError(f"agg_time must be 'minute' or 'day', got '{agg_time}'")
|
19
44
|
self.calendar_name = calendar_name
|
45
|
+
self.start_date = start_date
|
46
|
+
self.end_date = end_date
|
20
47
|
self.start_timestamp = (
|
21
|
-
parse_date(
|
22
|
-
if
|
48
|
+
parse_date(start_date, calendar=self.calendar)
|
49
|
+
if start_date
|
23
50
|
else self.calendar.first_session
|
24
51
|
)
|
25
52
|
self.end_timestamp = (
|
26
|
-
parse_date(
|
27
|
-
if
|
53
|
+
parse_date(end_date, calendar=self.calendar)
|
54
|
+
if end_date
|
28
55
|
else self.calendar.last_session
|
29
56
|
)
|
30
57
|
self.max_workers = None
|
31
58
|
if environ.get("POLYGON_MAX_WORKERS", "").strip() != "":
|
32
59
|
self.max_workers = int(environ.get("POLYGON_MAX_WORKERS"))
|
33
60
|
self.api_key = environ.get("POLYGON_API_KEY")
|
61
|
+
self.filesystem = LocalFileSystem()
|
34
62
|
self.data_dir = environ.get("POLYGON_DATA_DIR", "data/files.polygon.io")
|
35
63
|
self.cik_cusip_mapping_csv_path = environ.get(
|
36
64
|
"CIK_CUSIP_MAPS_CSV", os.path.join(self.data_dir, "cik-cusip-maps.csv")
|
37
65
|
)
|
38
|
-
self.asset_subdir = environ.get("POLYGON_ASSET_SUBDIR", "us_stocks_sip")
|
39
66
|
self.market = environ.get("POLYGON_MARKET", "stocks")
|
67
|
+
self.asset_subdir = environ.get("POLYGON_ASSET_SUBDIR", "us_stocks_sip")
|
40
68
|
self.tickers_dir = environ.get(
|
41
69
|
"POLYGON_TICKERS_DIR",
|
42
70
|
os.path.join(os.path.join(self.data_dir, "tickers"), self.asset_subdir),
|
@@ -51,25 +79,55 @@ class PolygonConfig:
|
|
51
79
|
self.flat_files_dir = environ.get(
|
52
80
|
"POLYGON_FLAT_FILES_DIR", os.path.join(self.data_dir, "flatfiles")
|
53
81
|
)
|
54
|
-
|
55
|
-
self.
|
82
|
+
# TODO: Restore non-recusive option. Always recursive for now.
|
83
|
+
self.csv_paths_pattern = environ.get(
|
84
|
+
# "POLYGON_FLAT_FILES_CSV_PATTERN", "**/*.csv.gz"
|
85
|
+
"POLYGON_FLAT_FILES_CSV_PATTERN",
|
86
|
+
"*.csv.gz",
|
87
|
+
)
|
56
88
|
self.asset_files_dir = os.path.join(self.flat_files_dir, self.asset_subdir)
|
57
89
|
self.minute_aggs_dir = os.path.join(self.asset_files_dir, "minute_aggs_v1")
|
58
90
|
self.day_aggs_dir = os.path.join(self.asset_files_dir, "day_aggs_v1")
|
59
|
-
self.
|
60
|
-
|
61
|
-
|
91
|
+
self.trades_dir = os.path.join(self.asset_files_dir, "trades_v1")
|
92
|
+
self.quotes_dir = os.path.join(self.asset_files_dir, "quotes_v1")
|
93
|
+
|
62
94
|
# TODO: The "by ticker" files are temporary/intermediate and should/could be in the zipline data dir.
|
63
95
|
self.minute_by_ticker_dir = os.path.join(
|
64
96
|
self.asset_files_dir, "minute_by_ticker_v1"
|
65
97
|
)
|
66
98
|
self.day_by_ticker_dir = os.path.join(self.asset_files_dir, "day_by_ticker_v1")
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
99
|
+
|
100
|
+
if bool(re.match(r"^\d", agg_time)):
|
101
|
+
self.agg_timedelta = pd.to_timedelta(agg_time)
|
102
|
+
self.custom_asset_files_dir = environ.get(
|
103
|
+
"CUSTOM_ASSET_FILES_DIR", self.asset_files_dir
|
104
|
+
)
|
105
|
+
self.custom_aggs_dir = os.path.join(
|
106
|
+
self.custom_asset_files_dir, custom_aggs_format.format(config=self)
|
107
|
+
)
|
108
|
+
self.custom_aggs_by_ticker_dir = os.path.join(
|
109
|
+
self.custom_asset_files_dir,
|
110
|
+
(custom_aggs_format + "_by_ticker").format(config=self),
|
111
|
+
)
|
112
|
+
self.aggs_dir = self.custom_aggs_dir
|
113
|
+
self.by_ticker_dir = self.custom_aggs_by_ticker_dir
|
114
|
+
elif agg_time == "minute":
|
115
|
+
self.agg_timedelta = pd.to_timedelta("1minute")
|
116
|
+
self.aggs_dir = self.minute_aggs_dir
|
117
|
+
self.by_ticker_dir = self.minute_by_ticker_dir
|
118
|
+
elif agg_time == "day":
|
119
|
+
self.agg_timedelta = pd.to_timedelta("1day")
|
120
|
+
self.aggs_dir = self.day_aggs_dir
|
121
|
+
self.by_ticker_dir = self.day_by_ticker_dir
|
122
|
+
else:
|
123
|
+
raise ValueError(
|
124
|
+
f"agg_time must be 'minute', 'day', or a timedelta string; got '{agg_time=}'"
|
125
|
+
)
|
126
|
+
self.agg_time = agg_time
|
127
|
+
|
128
|
+
self.arrow_format = environ.get(
|
129
|
+
"POLYGON_ARROW_FORMAT", "parquet" if self.agg_time == "day" else "hive"
|
71
130
|
)
|
72
|
-
self.arrow_format = environ.get("POLYGON_ARROW_FORMAT", "parquet" if self.agg_time == "day" else "hive")
|
73
131
|
# self.by_ticker_hive_dir = os.path.join(
|
74
132
|
# self.by_ticker_dir,
|
75
133
|
# f"{self.agg_time}_{self.start_timestamp.date().isoformat()}_{self.end_timestamp.date().isoformat()}.hive",
|
@@ -78,7 +136,10 @@ class PolygonConfig:
|
|
78
136
|
|
79
137
|
@property
|
80
138
|
def calendar(self):
|
81
|
-
|
139
|
+
# If you don't give a start date you'll only get 20 years from today.
|
140
|
+
if self.calendar_name in [NYSE_ALL_HOURS, "us_futures", "CMES", "XNYS", "NYSE"]:
|
141
|
+
return get_calendar(self.calendar_name, side="right", start=pd.Timestamp("1990-01-01"))
|
142
|
+
return get_calendar(self.calendar_name, side="right")
|
82
143
|
|
83
144
|
def ticker_file_path(self, date: pd.Timestamp):
|
84
145
|
ticker_year_dir = os.path.join(
|
@@ -88,14 +149,22 @@ class PolygonConfig:
|
|
88
149
|
return os.path.join(
|
89
150
|
ticker_year_dir, f"tickers_{date.date().isoformat()}.parquet"
|
90
151
|
)
|
91
|
-
|
152
|
+
|
92
153
|
def file_path_to_name(self, path: str):
|
154
|
+
# TODO: Use csv_paths_pattern to remove the suffixes
|
93
155
|
return os.path.basename(path).removesuffix(".gz").removesuffix(".csv")
|
94
156
|
|
95
|
-
|
157
|
+
@property
|
158
|
+
def by_ticker_aggs_arrow_dir(self):
|
159
|
+
# TODO: Don't split these up by ingestion range. They're already time indexed.
|
160
|
+
# Only reason to separate them is if we're worried about (or want) data being different across ingestions.
|
161
|
+
# This scattering is really slow and is usually gonna be redundant.
|
162
|
+
# This wasn't a problem when start/end dates were the calendar bounds when omitted.
|
163
|
+
# Can't just drop this because concat_all_aggs_from_csv will skip if it exists.
|
96
164
|
return os.path.join(
|
97
165
|
self.by_ticker_dir,
|
98
|
-
f"{self.
|
166
|
+
f"{self.start_timestamp.date().isoformat()}_{self.end_timestamp.date().isoformat()}.arrow",
|
167
|
+
# "aggs.arrow",
|
99
168
|
)
|
100
169
|
|
101
170
|
def api_cache_path(
|
@@ -107,6 +176,43 @@ class PolygonConfig:
|
|
107
176
|
self.cache_dir, f"{start_str}_{end_str}/{filename}{extension}"
|
108
177
|
)
|
109
178
|
|
179
|
+
def csv_paths(self) -> Iterator[str]:
|
180
|
+
for root, dirnames, filenames in os.walk(self.aggs_dir, topdown=True):
|
181
|
+
if dirnames:
|
182
|
+
dirnames[:] = sorted(dirnames)
|
183
|
+
# Filter out filenames that don't match the pattern.
|
184
|
+
filenames = fnmatch.filter(filenames, self.csv_paths_pattern)
|
185
|
+
if filenames:
|
186
|
+
for filename in sorted(filenames):
|
187
|
+
yield os.path.join(root, filename)
|
188
|
+
|
189
|
+
def find_first_and_last_aggs(self, aggs_dir, file_pattern) -> Tuple[str | None, str | None]:
|
190
|
+
# Find the path to the lexically first and last paths in aggs_dir that matches csv_paths_pattern.
|
191
|
+
# Would like to use Path.walk(top_down=True) but it is only availble in Python 3.12+.
|
192
|
+
# This needs to be efficient because it is called on every init, even though we only need it for ingest.
|
193
|
+
# But we can't call it in ingest because the writer initializes and writes the metadata before it is called.
|
194
|
+
paths = []
|
195
|
+
for root, dirnames, filenames in os.walk(aggs_dir, topdown=True):
|
196
|
+
if dirnames:
|
197
|
+
# We only want first and last in each directory.
|
198
|
+
sorted_dirs = sorted(dirnames)
|
199
|
+
dirnames[:] = (
|
200
|
+
[sorted_dirs[0], sorted_dirs[-1]]
|
201
|
+
if len(sorted_dirs) > 1
|
202
|
+
else sorted_dirs
|
203
|
+
)
|
204
|
+
# Filter out filenames that don't match the pattern.
|
205
|
+
filenames = fnmatch.filter(filenames, file_pattern)
|
206
|
+
if filenames:
|
207
|
+
filenames = sorted(filenames)
|
208
|
+
paths.append(os.path.join(root, filenames[0]))
|
209
|
+
if len(filenames) > 1:
|
210
|
+
paths.append(os.path.join(root, filenames[-1]))
|
211
|
+
if not paths:
|
212
|
+
return None, None
|
213
|
+
paths = sorted(paths)
|
214
|
+
return self.file_path_to_name(paths[0]), self.file_path_to_name(paths[-1])
|
215
|
+
|
110
216
|
|
111
217
|
if __name__ == "__main__":
|
112
218
|
config = PolygonConfig(os.environ, "XNYS", "2003-10-01", "2023-01-01")
|
@@ -0,0 +1,25 @@
|
|
1
|
+
import datetime
|
2
|
+
from exchange_calendars.calendar_utils import get_calendar_names, register_calendar_type
|
3
|
+
from exchange_calendars.exchange_calendar_xnys import XNYSExchangeCalendar
|
4
|
+
|
5
|
+
|
6
|
+
NYSE_ALL_HOURS = "NYSE_ALL_HOURS"
|
7
|
+
|
8
|
+
|
9
|
+
class USExtendedHoursExchangeCalendar(XNYSExchangeCalendar):
|
10
|
+
"""
|
11
|
+
A calendar for extended hours which runs from 4 AM to 8 PM.
|
12
|
+
"""
|
13
|
+
|
14
|
+
name = NYSE_ALL_HOURS
|
15
|
+
|
16
|
+
open_times = ((None, datetime.time(4)),)
|
17
|
+
|
18
|
+
close_times = ((None, datetime.time(20)),)
|
19
|
+
|
20
|
+
regular_early_close = datetime.time(13)
|
21
|
+
|
22
|
+
|
23
|
+
def register_nyse_all_hours_calendar():
|
24
|
+
if NYSE_ALL_HOURS not in get_calendar_names():
|
25
|
+
register_calendar_type(NYSE_ALL_HOURS, USExtendedHoursExchangeCalendar)
|
@@ -99,6 +99,6 @@ def process_all_csv_to_parquet(
|
|
99
99
|
if __name__ == "__main__":
|
100
100
|
# os.environ["POLYGON_DATA_DIR"] = "/Volumes/Oahu/Mirror/files.polygon.io"
|
101
101
|
config = PolygonConfig(
|
102
|
-
environ=os.environ, calendar_name="XNYS",
|
102
|
+
environ=os.environ, calendar_name="XNYS", start_date=None, end_date=None
|
103
103
|
)
|
104
104
|
process_all_csv_to_parquet(config.aggs_dir)
|
@@ -74,8 +74,8 @@ if __name__ == "__main__":
|
|
74
74
|
config = PolygonConfig(
|
75
75
|
environ=os.environ,
|
76
76
|
calendar_name="XNYS",
|
77
|
-
|
78
|
-
|
77
|
+
start_date="2020-10-07",
|
78
|
+
end_date="2020-10-15",
|
79
79
|
)
|
80
80
|
print(f"{config.aggs_dir=}")
|
81
81
|
max_ticker_lens = apply_to_all_aggs(
|
@@ -0,0 +1,101 @@
|
|
1
|
+
from .config import PolygonConfig
|
2
|
+
from .trades import cast_strings_to_list
|
3
|
+
|
4
|
+
import os
|
5
|
+
|
6
|
+
import pyarrow as pa
|
7
|
+
from pyarrow import dataset as pa_ds
|
8
|
+
from pyarrow import compute as pa_compute
|
9
|
+
from pyarrow import fs as pa_fs
|
10
|
+
from fsspec.implementations.arrow import ArrowFSWrapper
|
11
|
+
from pyarrow import csv as pa_csv
|
12
|
+
|
13
|
+
|
14
|
+
def quotes_schema(raw: bool = False) -> pa.Schema:
|
15
|
+
# There is some problem reading the timestamps as timestamps so we have to read as integer then change the schema.
|
16
|
+
# I make the timestamp timezone-aware because that's how Unix timestamps work and it may help avoid mistakes.
|
17
|
+
# timestamp_type = pa.timestamp("ns", tz="UTC")
|
18
|
+
timestamp_type = pa.int64() if raw else pa.timestamp("ns", tz="UTC")
|
19
|
+
|
20
|
+
# Polygon price scale is 4 decimal places (i.e. hundredths of a penny), but we'll use 10 because we have precision to spare.
|
21
|
+
# price_type = pa.decimal128(precision=38, scale=10)
|
22
|
+
# 64bit float a little overkill but avoids any plausible truncation error.
|
23
|
+
price_type = pa.float64()
|
24
|
+
|
25
|
+
# ticker: string
|
26
|
+
# ask_exchange: int64
|
27
|
+
# ask_price: double
|
28
|
+
# ask_size: int64
|
29
|
+
# bid_exchange: int64
|
30
|
+
# bid_price: double
|
31
|
+
# bid_size: int64
|
32
|
+
# conditions: string
|
33
|
+
# indicators: int64
|
34
|
+
# participant_timestamp: int64
|
35
|
+
# sequence_number: int64
|
36
|
+
# sip_timestamp: int64
|
37
|
+
# tape: int64
|
38
|
+
# trf_timestamp: int64
|
39
|
+
|
40
|
+
return pa.schema(
|
41
|
+
[
|
42
|
+
pa.field("ticker", pa.string(), nullable=False),
|
43
|
+
pa.field("ask_exchange", pa.int8(), nullable=False),
|
44
|
+
pa.field("ask_price", price_type, nullable=False),
|
45
|
+
pa.field("ask_size", pa.int64(), nullable=False),
|
46
|
+
pa.field("bid_exchange", pa.int8(), nullable=False),
|
47
|
+
pa.field("bid_price", price_type, nullable=False),
|
48
|
+
pa.field("bid_size", pa.int64(), nullable=False),
|
49
|
+
pa.field("conditions", pa.string(), nullable=False),
|
50
|
+
pa.field("indicators", pa.string(), nullable=False),
|
51
|
+
pa.field("participant_timestamp", timestamp_type, nullable=False),
|
52
|
+
pa.field("sequence_number", pa.int64(), nullable=False),
|
53
|
+
pa.field("sip_timestamp", timestamp_type, nullable=False),
|
54
|
+
pa.field("tape", pa.int8(), nullable=False),
|
55
|
+
pa.field("trf_timestamp", timestamp_type, nullable=False),
|
56
|
+
]
|
57
|
+
)
|
58
|
+
|
59
|
+
|
60
|
+
def quotes_dataset(config: PolygonConfig) -> pa_ds.Dataset:
|
61
|
+
"""
|
62
|
+
Create a pyarrow dataset from the quotes files.
|
63
|
+
"""
|
64
|
+
|
65
|
+
# https://arrow.apache.org/docs/python/filesystems.html#using-arrow-filesystems-with-fsspec
|
66
|
+
# https://filesystem-spec.readthedocs.io/en/latest/_modules/fsspec/spec.html#AbstractFileSystem.glob.
|
67
|
+
fsspec = ArrowFSWrapper(config.filesystem)
|
68
|
+
|
69
|
+
# We sort by path because they have the year and month in the dir names and the date in the filename.
|
70
|
+
paths = sorted(
|
71
|
+
fsspec.glob(os.path.join(config.quotes_dir, config.csv_paths_pattern))
|
72
|
+
)
|
73
|
+
|
74
|
+
return pa_ds.FileSystemDataset.from_paths(paths,
|
75
|
+
format=pa_ds.CsvFileFormat(),
|
76
|
+
schema=quotes_schema(raw=True),
|
77
|
+
filesystem=config.filesystem)
|
78
|
+
|
79
|
+
|
80
|
+
def cast_strings_to_list(string_array, separator=",", default="0", value_type=pa.uint8()):
|
81
|
+
"""Cast a PyArrow StringArray of comma-separated numbers to a ListArray of values."""
|
82
|
+
|
83
|
+
# Create a mask to identify empty strings
|
84
|
+
is_empty = pa_compute.equal(pa_compute.utf8_trim_whitespace(string_array), "")
|
85
|
+
|
86
|
+
# Use replace_with_mask to replace empty strings with the default ("0")
|
87
|
+
filled_column = pa_compute.replace_with_mask(string_array, is_empty, pa.scalar(default))
|
88
|
+
|
89
|
+
# Split the strings by comma
|
90
|
+
split_array = pa_compute.split_pattern(filled_column, pattern=separator)
|
91
|
+
|
92
|
+
# Cast each element in the resulting lists to integers
|
93
|
+
int_list_array = pa_compute.cast(split_array, pa.list_(value_type))
|
94
|
+
|
95
|
+
return int_list_array
|
96
|
+
|
97
|
+
|
98
|
+
def cast_quotes(quotes):
|
99
|
+
quotes = quotes.cast(quotes_schema())
|
100
|
+
condition_values = cast_strings_to_list(quotes.column("conditions").combine_chunks())
|
101
|
+
return quotes.append_column('condition_values', condition_values)
|
@@ -3,6 +3,7 @@ from .config import PolygonConfig
|
|
3
3
|
import datetime
|
4
4
|
import os
|
5
5
|
import pandas as pd
|
6
|
+
import csv
|
6
7
|
import polygon
|
7
8
|
import logging
|
8
9
|
from concurrent.futures import ProcessPoolExecutor
|
@@ -47,7 +48,10 @@ class PolygonAssets:
|
|
47
48
|
active: bool = True,
|
48
49
|
):
|
49
50
|
response = self.polygon_client.list_tickers(
|
50
|
-
market=self.config.market,
|
51
|
+
market=self.config.market,
|
52
|
+
active=active,
|
53
|
+
date=date.date().isoformat(),
|
54
|
+
limit=500,
|
51
55
|
)
|
52
56
|
tickers_df = pd.DataFrame(list(response))
|
53
57
|
# The currency info is for crypto. The source_feed is always NA.
|
@@ -383,40 +387,3 @@ def get_ticker_universe(config: PolygonConfig, fetch_missing: bool = False):
|
|
383
387
|
merged_tickers = pd.read_parquet(parquet_path)
|
384
388
|
merged_tickers.info()
|
385
389
|
return merged_tickers
|
386
|
-
|
387
|
-
|
388
|
-
# Initialize ticker files in __main__. Use CLI args to specify start and end dates.
|
389
|
-
if __name__ == "__main__":
|
390
|
-
import argparse
|
391
|
-
|
392
|
-
parser = argparse.ArgumentParser(description="Initialize ticker files.")
|
393
|
-
parser.add_argument(
|
394
|
-
"--start-date",
|
395
|
-
type=str,
|
396
|
-
help="Start date in ISO format (YYYY-MM-DD)",
|
397
|
-
default="2014-05-01",
|
398
|
-
)
|
399
|
-
parser.add_argument(
|
400
|
-
"--end-date",
|
401
|
-
type=str,
|
402
|
-
help="End date in ISO format (YYYY-MM-DD)",
|
403
|
-
default="2024-04-01",
|
404
|
-
)
|
405
|
-
args = parser.parse_args()
|
406
|
-
|
407
|
-
start_date = (
|
408
|
-
datetime.datetime.strptime(args.start_date, "%Y-%m-%d").date()
|
409
|
-
if args.start_date
|
410
|
-
else datetime.date.today()
|
411
|
-
)
|
412
|
-
end_date = (
|
413
|
-
datetime.datetime.strptime(args.end_date, "%Y-%m-%d").date()
|
414
|
-
if args.end_date
|
415
|
-
else datetime.date.today()
|
416
|
-
)
|
417
|
-
|
418
|
-
all_tickers = load_all_tickers(start_date, end_date, fetch_missing=True)
|
419
|
-
merged_tickers = merge_tickers(all_tickers)
|
420
|
-
merged_tickers.to_csv(f"data/tickers/us_tickers_{start_date}-{end_date}.csv")
|
421
|
-
ticker_names = ticker_names_from_merged_tickers(merged_tickers)
|
422
|
-
print(ticker_names)
|