zipline_polygon_bundle 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,22 @@
1
+ from .bundle import (
2
+ register_polygon_equities_bundle,
3
+ symbol_to_upper,
4
+ polygon_equities_bundle_day,
5
+ polygon_equities_bundle_minute,
6
+ )
7
+
8
+ from .config import PolygonConfig
9
+ from .concat_all_aggs import concat_all_aggs_from_csv, generate_csv_agg_tables
10
+ from .adjustments import load_splits, load_dividends
11
+
12
+ __all__ = [
13
+ "register_polygon_equities_bundle",
14
+ "symbol_to_upper",
15
+ "polygon_equities_bundle_day",
16
+ "polygon_equities_bundle_minute",
17
+ "PolygonConfig",
18
+ "concat_all_aggs_from_csv",
19
+ "generate_csv_agg_tables",
20
+ "load_splits",
21
+ "load_dividends",
22
+ ]
@@ -0,0 +1,151 @@
1
+ from .config import PolygonConfig
2
+
3
+ import polygon
4
+
5
+ import datetime
6
+ import logging
7
+ import os
8
+ import pandas as pd
9
+ from urllib3 import HTTPResponse
10
+
11
+
12
+ def load_polygon_splits(
13
+ config: PolygonConfig, first_start_end: datetime.date, last_end_date: datetime.date
14
+ ) -> pd.DataFrame:
15
+ # N.B. If the schema changes then the filename should change. We're on v3 now.
16
+ splits_path = config.api_cache_path(
17
+ start_date=first_start_end, end_date=last_end_date, filename="list_splits"
18
+ )
19
+ expected_split_count = (last_end_date - first_start_end).days * 3
20
+ if not os.path.exists(splits_path):
21
+ client = polygon.RESTClient(api_key=config.api_key)
22
+ splits = client.list_splits(
23
+ limit=1000,
24
+ execution_date_gte=first_start_end,
25
+ execution_date_lt=last_end_date + datetime.timedelta(days=1),
26
+ )
27
+ if splits is HTTPResponse:
28
+ raise ValueError(f"Polygon.list_splits bad HTTPResponse: {splits}")
29
+ splits = pd.DataFrame(splits)
30
+ print(f"Got {len(splits)=} from Polygon list_splits.")
31
+ os.makedirs(os.path.dirname(splits_path), exist_ok=True)
32
+ splits.to_parquet(splits_path)
33
+ if len(splits) < expected_split_count:
34
+ logging.warning(
35
+ f"Only got {len(splits)=} from Polygon list_splits (expected {expected_split_count=}). "
36
+ "This is probably fine if your historical range is short."
37
+ )
38
+ # We will always load from the file to avoid any chance of weird errors.
39
+ if os.path.exists(splits_path):
40
+ splits = pd.read_parquet(splits_path)
41
+ print(f"Loaded {len(splits)=} from {splits_path}")
42
+ if len(splits) < expected_split_count:
43
+ logging.warning(
44
+ f"Only got {len(splits)=} from Polygon list_splits (expected {expected_split_count=}). "
45
+ "This is probably fine if your historical range is short."
46
+ )
47
+ return splits
48
+ raise ValueError(f"Failed to load splits from {splits_path}")
49
+
50
+
51
+ def load_splits(
52
+ config: PolygonConfig,
53
+ first_start_end: datetime.date,
54
+ last_end_date: datetime.date,
55
+ ticker_to_sid: dict[str, int],
56
+ ) -> pd.DataFrame:
57
+ splits = load_polygon_splits(config, first_start_end, last_end_date)
58
+ splits["sid"] = splits["ticker"].apply(lambda t: ticker_to_sid.get(t, pd.NA))
59
+ splits.dropna(inplace=True)
60
+ splits["sid"] = splits["sid"].astype("int64")
61
+ splits["execution_date"] = pd.to_datetime(splits["execution_date"])
62
+ splits.rename(columns={"execution_date": "effective_date"}, inplace=True)
63
+ # Not only do we want a float for ratio but some to/from are not integers.
64
+ splits["split_from"] = splits["split_from"].astype(float)
65
+ splits["split_to"] = splits["split_to"].astype(float)
66
+ splits["ratio"] = splits["split_from"] / splits["split_to"]
67
+ splits.drop(columns=["ticker", "split_from", "split_to"], inplace=True)
68
+ return splits
69
+
70
+
71
+ def load_polygon_dividends(
72
+ config: PolygonConfig, first_start_date: datetime.date, last_end_date: datetime.date
73
+ ) -> pd.DataFrame:
74
+ # N.B. If the schema changes then the filename should change. We're on v3 now.
75
+ dividends_path = config.api_cache_path(
76
+ start_date=first_start_date, end_date=last_end_date, filename="list_dividends"
77
+ )
78
+ if not os.path.exists(dividends_path):
79
+ client = polygon.RESTClient(api_key=config.api_key)
80
+ dividends = client.list_dividends(
81
+ limit=1000,
82
+ record_date_gte=first_start_date,
83
+ pay_date_lt=last_end_date + datetime.timedelta(days=1),
84
+ )
85
+ if dividends is HTTPResponse:
86
+ raise ValueError(f"Polygon.list_dividends bad HTTPResponse: {dividends}")
87
+ dividends = pd.DataFrame(dividends)
88
+ os.makedirs(os.path.dirname(dividends_path), exist_ok=True)
89
+ dividends.to_parquet(dividends_path)
90
+ print(f"Wrote {len(dividends)=} from Polygon list_dividends to {dividends_path=}")
91
+ # if len(dividends) < 10000:
92
+ # logging.error(f"Only got {len(dividends)=} from Polygon list_dividends.")
93
+ # We will always load from the file to avoid any chance of weird errors.
94
+ if os.path.exists(dividends_path):
95
+ dividends = pd.read_parquet(dividends_path)
96
+ # print(f"Loaded {len(dividends)=} from {dividends_path}")
97
+ # if len(dividends) < 10000:
98
+ # logging.error(f"Only found {len(dividends)=} at {dividends_path}")
99
+ return dividends
100
+ raise ValueError(f"Failed to load dividends from {dividends_path}")
101
+
102
+
103
+ def load_chunked_polygon_dividends(
104
+ config: PolygonConfig, first_start_end: datetime.date, last_end_date: datetime.date
105
+ ) -> pd.DataFrame:
106
+ dividends_list = []
107
+ next_start_end = first_start_end
108
+ while next_start_end < last_end_date:
109
+ # We want at most a month of dividends at a time. They should end on the last day of the month.
110
+ # So the next_end_date is the day before the first day of the next month.
111
+ first_of_next_month = datetime.date(
112
+ next_start_end.year + (next_start_end.month // 12),
113
+ (next_start_end.month % 12) + 1,
114
+ 1,
115
+ )
116
+ next_end_date = first_of_next_month - datetime.timedelta(days=1)
117
+ if next_end_date > last_end_date:
118
+ next_end_date = last_end_date
119
+ dividends_list.append(load_polygon_dividends(
120
+ config, next_start_end, next_end_date
121
+ ))
122
+ next_start_end = next_end_date + datetime.timedelta(days=1)
123
+ return pd.concat(dividends_list)
124
+
125
+
126
+ def load_dividends(
127
+ config: PolygonConfig,
128
+ first_start_end: datetime.date,
129
+ last_end_date: datetime.date,
130
+ ticker_to_sid: dict[str, int],
131
+ ) -> pd.DataFrame:
132
+ dividends = load_chunked_polygon_dividends(config, first_start_end, last_end_date)
133
+ dividends["sid"] = dividends["ticker"].apply(lambda t: ticker_to_sid.get(t, pd.NA))
134
+ dividends.dropna(how="any", inplace=True)
135
+ dividends["sid"] = dividends["sid"].astype("int64")
136
+ dividends["declaration_date"] = pd.to_datetime(dividends["declaration_date"])
137
+ dividends["ex_dividend_date"] = pd.to_datetime(dividends["ex_dividend_date"])
138
+ dividends["record_date"] = pd.to_datetime(dividends["record_date"])
139
+ dividends["pay_date"] = pd.to_datetime(dividends["pay_date"])
140
+ dividends.rename(
141
+ columns={
142
+ "cash_amount": "amount",
143
+ "declaration_date": "declared_date",
144
+ "ex_dividend_date": "ex_date",
145
+ },
146
+ inplace=True,
147
+ )
148
+ dividends.drop(
149
+ columns=["ticker", "frequency", "currency", "dividend_type"], inplace=True
150
+ )
151
+ return dividends