zipline_polygon_bundle 0.1.7__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- zipline_polygon_bundle/__init__.py +33 -5
- zipline_polygon_bundle/adjustments.py +60 -31
- zipline_polygon_bundle/bundle.py +202 -208
- zipline_polygon_bundle/compute_signals.py +261 -0
- zipline_polygon_bundle/concat_all_aggs.py +140 -70
- zipline_polygon_bundle/concat_all_aggs_partitioned.py +6 -6
- zipline_polygon_bundle/config.py +167 -36
- zipline_polygon_bundle/nyse_all_hours_calendar.py +25 -0
- zipline_polygon_bundle/polygon_file_reader.py +1 -1
- zipline_polygon_bundle/process_all_aggs.py +2 -2
- zipline_polygon_bundle/quotes.py +101 -0
- zipline_polygon_bundle/tickers_and_names.py +5 -38
- zipline_polygon_bundle/trades.py +533 -0
- {zipline_polygon_bundle-0.1.7.dist-info → zipline_polygon_bundle-0.2.0.dist-info}/METADATA +10 -5
- zipline_polygon_bundle-0.2.0.dist-info/RECORD +18 -0
- zipline_polygon_bundle-0.1.7.dist-info/RECORD +0 -14
- {zipline_polygon_bundle-0.1.7.dist-info → zipline_polygon_bundle-0.2.0.dist-info}/LICENSE +0 -0
- {zipline_polygon_bundle-0.1.7.dist-info → zipline_polygon_bundle-0.2.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,261 @@
|
|
1
|
+
from .config import PolygonConfig
|
2
|
+
from .trades import custom_aggs_schema, custom_aggs_partitioning
|
3
|
+
|
4
|
+
import datetime
|
5
|
+
import numpy as np
|
6
|
+
import pyarrow as pa
|
7
|
+
import pyarrow.compute as pa_compute
|
8
|
+
import pyarrow.dataset as pa_ds
|
9
|
+
import pandas_ta as ta
|
10
|
+
import pandas as pd
|
11
|
+
|
12
|
+
|
13
|
+
def calculate_mfi(typical_price: pd.Series, money_flow: pd.Series, period: int):
|
14
|
+
mf_sign = np.where(typical_price > np.roll(typical_price, shift=1), 1, -1)
|
15
|
+
signed_mf = money_flow * mf_sign
|
16
|
+
|
17
|
+
# Calculate gain and loss using vectorized operations
|
18
|
+
positive_mf = np.maximum(signed_mf, 0)
|
19
|
+
negative_mf = np.maximum(-signed_mf, 0)
|
20
|
+
|
21
|
+
mf_avg_gain = (
|
22
|
+
np.convolve(positive_mf, np.ones(period), mode="full")[: len(positive_mf)]
|
23
|
+
/ period
|
24
|
+
)
|
25
|
+
mf_avg_loss = (
|
26
|
+
np.convolve(negative_mf, np.ones(period), mode="full")[: len(negative_mf)]
|
27
|
+
/ period
|
28
|
+
)
|
29
|
+
|
30
|
+
epsilon = 1e-10 # Small epsilon value to avoid division by zero
|
31
|
+
mfi = 100 - (100 / (1 + mf_avg_gain / (mf_avg_loss + epsilon)))
|
32
|
+
return mfi
|
33
|
+
|
34
|
+
|
35
|
+
# https://github.com/twopirllc/pandas-ta/blob/main/pandas_ta/momentum/stoch.py
|
36
|
+
# https://github.com/twopirllc/pandas-ta/blob/development/pandas_ta/momentum/stoch.py
|
37
|
+
# `k` vs `fast_k` arg names.
|
38
|
+
# https://github.com/twopirllc/pandas-ta/issues/726
|
39
|
+
# Results affected by values outside range
|
40
|
+
# https://github.com/twopirllc/pandas-ta/issues/535
|
41
|
+
|
42
|
+
|
43
|
+
def calculate_stoch(
|
44
|
+
high: pd.Series,
|
45
|
+
low: pd.Series,
|
46
|
+
close: pd.Series,
|
47
|
+
k: int = 14,
|
48
|
+
d: int = 3,
|
49
|
+
smooth_k: int = 3,
|
50
|
+
mamode: str = "sma",
|
51
|
+
):
|
52
|
+
"""Indicator: Stochastic Oscillator (STOCH)"""
|
53
|
+
lowest_low = low.rolling(k).min()
|
54
|
+
highest_high = high.rolling(k).max()
|
55
|
+
|
56
|
+
stoch = 100 * (close - lowest_low)
|
57
|
+
stoch /= ta.utils.non_zero_range(highest_high, lowest_low)
|
58
|
+
|
59
|
+
stoch_k = ta.overlap.ma(
|
60
|
+
mamode, stoch.loc[stoch.first_valid_index() :,], length=smooth_k
|
61
|
+
)
|
62
|
+
stoch_d = (
|
63
|
+
ta.overlap.ma(mamode, stoch_k.loc[stoch_k.first_valid_index() :,], length=d)
|
64
|
+
if stoch_k is not None
|
65
|
+
else None
|
66
|
+
)
|
67
|
+
# Histogram
|
68
|
+
stoch_h = stoch_k - stoch_d if stoch_d is not None else None
|
69
|
+
|
70
|
+
return stoch_k, stoch_d, stoch_h
|
71
|
+
|
72
|
+
|
73
|
+
def compute_per_ticker_signals(df: pd.DataFrame, period: int = 14) -> pd.DataFrame:
|
74
|
+
df = df.set_index("window_start").sort_index()
|
75
|
+
session_index = pd.date_range(
|
76
|
+
start=df.index[0], end=df.index[-1], freq=pd.Timedelta(seconds=60)
|
77
|
+
)
|
78
|
+
df = df.reindex(session_index)
|
79
|
+
df.index.rename("window_start", inplace=True)
|
80
|
+
|
81
|
+
# df["minute_of_day"] = (df.index.hour * 60) + df.index.minute
|
82
|
+
# df["day_of_week"] = df.index.day_of_week
|
83
|
+
|
84
|
+
df.transactions = df.transactions.fillna(0)
|
85
|
+
df.volume = df.volume.fillna(0)
|
86
|
+
df.total = df.total.fillna(0)
|
87
|
+
df.close = df.close.ffill()
|
88
|
+
close = df.close
|
89
|
+
df.vwap = df.vwap.fillna(close)
|
90
|
+
df.high = df.high.fillna(close)
|
91
|
+
df.low = df.low.fillna(close)
|
92
|
+
df.open = df.open.fillna(close)
|
93
|
+
price_open = df.open
|
94
|
+
high = df.high
|
95
|
+
low = df.low
|
96
|
+
vwap = df.vwap
|
97
|
+
# volume = df.volume
|
98
|
+
total = df.total
|
99
|
+
next_close = close.shift()
|
100
|
+
|
101
|
+
# TODO: Odometer rollover signal. Relative difference to nearest power of 10.
|
102
|
+
# Something about log10 being a whole number? When is $50 the rollover vs $100 or $10?
|
103
|
+
|
104
|
+
# "True (Typical?) Price" which I think is an approximation of VWAP.
|
105
|
+
# Trouble with both is that if there are no trades in a bar we get NaN.
|
106
|
+
# That then means we get NaN for averages for the next period-1 bars too.
|
107
|
+
# Question is whether to ffill the price for these calculations.
|
108
|
+
df["TP"] = (high + low + close) / 3
|
109
|
+
|
110
|
+
# Gain/loss in this bar.
|
111
|
+
df["ret1bar"] = close.div(price_open).sub(1)
|
112
|
+
|
113
|
+
for t in range(2, period):
|
114
|
+
df[f"ret{t}bar"] = close.div(price_open.shift(t - 1)).sub(1)
|
115
|
+
|
116
|
+
# Average True Range (ATR)
|
117
|
+
true_range = pd.concat(
|
118
|
+
[high.sub(low), high.sub(next_close).abs(), low.sub(next_close).abs()], axis=1
|
119
|
+
).max(1)
|
120
|
+
# Normalized ATR (NATR) or Average of Normalized TR.
|
121
|
+
# Choice of NATR operations ordering discussion: https://www.macroption.com/normalized-atr/
|
122
|
+
# He doesn't talk about VWAP but I think that is a better normalizing price for a bar.
|
123
|
+
# atr = true_range.ewm(span=period).mean()
|
124
|
+
# df["natr_c"] = atr / close
|
125
|
+
# df["antr_c"] = (true_range / close).ewm(span=period).mean()
|
126
|
+
# df["natr_v"] = atr / vwap
|
127
|
+
# df["antr_v"] = (true_range / vwap).ewm(span=period).mean()
|
128
|
+
df["NATR"] = (true_range / vwap).ewm(span=period).mean()
|
129
|
+
|
130
|
+
# True Price as HLC average VS VWAP.
|
131
|
+
# VWAP is better I think but is quite different than standard CCI.
|
132
|
+
# Three ways to compute CCI, all give the same value using TP.
|
133
|
+
# tp = (high + low + close) / 3
|
134
|
+
# df['SMA'] = ta.sma(tp, length=period)
|
135
|
+
# df['sma_r'] = tp.rolling(period).mean()
|
136
|
+
# df['MAD'] = ta.mad(tp, length=period)
|
137
|
+
# # Series.mad deprecated. mad = (s - s.mean()).abs().mean()
|
138
|
+
# df['mad_r'] = tp.rolling(period).apply(lambda x: (pd.Series(x) - pd.Series(x).mean()).abs().mean())
|
139
|
+
|
140
|
+
# df['cci_r'] = (tp - df['sma_r']) / (0.015 * df['mad_r'])
|
141
|
+
# df['CCI'] = (tp - df['SMA']) / (0.015 * df['MAD'])
|
142
|
+
# df['cci_ta'] = ta.cci(high=high, low=low, close=close, length=period)
|
143
|
+
|
144
|
+
df["taCCI"] = ta.cci(high=high, low=low, close=close, length=period)
|
145
|
+
|
146
|
+
# https://gist.github.com/quantra-go-algo/1b37bfb74d69148f0dfbdb5a2c7bdb25
|
147
|
+
# https://medium.com/@huzaifazahoor654/how-to-calculate-cci-in-python-a-step-by-step-guide-9a3f61698be6
|
148
|
+
sma = pd.Series(ta.sma(vwap, length=period))
|
149
|
+
mad = pd.Series(ta.mad(vwap, length=period))
|
150
|
+
df["CCI"] = (vwap - sma) / (0.015 * mad)
|
151
|
+
|
152
|
+
# df['MFI'] = calculate_mfi(high=high, low=low, close=close, volume=volume, period=period)
|
153
|
+
df["MFI"] = calculate_mfi(typical_price=vwap, money_flow=total, period=period)
|
154
|
+
|
155
|
+
# We use Stochastic (rather than MACD because we need a ticker independent indicator.
|
156
|
+
# IOW a percentage price oscillator (PPO) rather than absolute price oscillator (APO).
|
157
|
+
# https://www.alpharithms.com/moving-average-convergence-divergence-macd-031217/
|
158
|
+
# We're using 14/3 currently rather than the usual 26/12 popular for MACD though.
|
159
|
+
stoch_k, stoch_d, stoch_h = calculate_stoch(high, low, close, k=period)
|
160
|
+
df["STOCHk"] = stoch_k
|
161
|
+
df["STOCHd"] = stoch_d
|
162
|
+
df["STOCHh"] = stoch_h
|
163
|
+
|
164
|
+
return df
|
165
|
+
|
166
|
+
|
167
|
+
def iterate_all_aggs_tables(
|
168
|
+
config: PolygonConfig,
|
169
|
+
valid_tickers: pa.Array,
|
170
|
+
):
|
171
|
+
schedule = config.calendar.trading_index(
|
172
|
+
start=config.start_timestamp, end=config.end_timestamp, period="1D"
|
173
|
+
)
|
174
|
+
for timestamp in schedule:
|
175
|
+
date = timestamp.to_pydatetime().date()
|
176
|
+
aggs_ds = pa_ds.dataset(
|
177
|
+
config.aggs_dir,
|
178
|
+
format="parquet",
|
179
|
+
schema=custom_aggs_schema(tz=config.calendar.tz.key),
|
180
|
+
partitioning=custom_aggs_partitioning(),
|
181
|
+
)
|
182
|
+
date_filter_expr = (
|
183
|
+
(pa_compute.field("year") == date.year)
|
184
|
+
& (pa_compute.field("month") == date.month)
|
185
|
+
& (pa_compute.field("date") == date)
|
186
|
+
)
|
187
|
+
# print(f"{date_filter_expr=}")
|
188
|
+
for fragment in aggs_ds.get_fragments(filter=date_filter_expr):
|
189
|
+
session_filter = (
|
190
|
+
(pa_compute.field("window_start") >= start_dt)
|
191
|
+
& (pa_compute.field("window_start") < end_dt)
|
192
|
+
& pa_compute.is_in(pa_compute.field("ticker"), valid_tickers)
|
193
|
+
)
|
194
|
+
# Sorting table doesn't seem to avoid needing to sort the df. Maybe use_threads=False on to_pandas would help?
|
195
|
+
# table = fragment.to_table(filter=session_filter).sort_by([('ticker', 'ascending'), ('window_start', 'descending')])
|
196
|
+
table = fragment.to_table(filter=session_filter)
|
197
|
+
if table.num_rows > 0:
|
198
|
+
metadata = (
|
199
|
+
dict(table.schema.metadata) if table.schema.metadata else dict()
|
200
|
+
)
|
201
|
+
metadata["date"] = date.isoformat()
|
202
|
+
table = table.replace_schema_metadata(metadata)
|
203
|
+
yield table
|
204
|
+
|
205
|
+
|
206
|
+
# def iterate_all_aggs_with_signals(config: PolygonConfig):
|
207
|
+
# for table in iterate_all_aggs_tables(config):
|
208
|
+
# df = table.to_pandas()
|
209
|
+
# df = df.groupby("ticker").apply(
|
210
|
+
# compute_per_ticker_signals, include_groups=False
|
211
|
+
# )
|
212
|
+
# yield pa.Table.from_pandas(df)
|
213
|
+
|
214
|
+
|
215
|
+
def file_visitor(written_file):
|
216
|
+
print(f"{written_file.path=}")
|
217
|
+
|
218
|
+
|
219
|
+
def compute_signals_for_all_aggs(
|
220
|
+
from_config: PolygonConfig,
|
221
|
+
to_config: PolygonConfig,
|
222
|
+
valid_tickers: pa.Array,
|
223
|
+
overwrite: bool = False,
|
224
|
+
) -> str:
|
225
|
+
if overwrite:
|
226
|
+
print("WARNING: overwrite not implemented/ignored.")
|
227
|
+
|
228
|
+
# Need a different aggs_dir for the signals because schema is different.
|
229
|
+
print(f"{to_config.aggs_dir=}")
|
230
|
+
|
231
|
+
for aggs_table in iterate_all_aggs_tables(from_config, valid_tickers):
|
232
|
+
metadata = aggs_table.schema.metadata
|
233
|
+
date = datetime.date.fromisoformat(metadata[b"date"].decode("utf-8"))
|
234
|
+
print(f"{date=}")
|
235
|
+
df = aggs_table.to_pandas()
|
236
|
+
df = df.groupby("ticker").apply(
|
237
|
+
compute_per_ticker_signals, include_groups=False
|
238
|
+
)
|
239
|
+
table = pa.Table.from_pandas(df)
|
240
|
+
if table.num_rows > 0:
|
241
|
+
table = table.replace_schema_metadata(metadata)
|
242
|
+
table = table.append_column("date", pa.array(np.full(len(table), date)))
|
243
|
+
table = table.append_column(
|
244
|
+
"year", pa.array(np.full(len(table), date.year), type=pa.uint16())
|
245
|
+
)
|
246
|
+
table = table.append_column(
|
247
|
+
"month", pa.array(np.full(len(table), date.month), type=pa.uint8())
|
248
|
+
)
|
249
|
+
table = table.sort_by(
|
250
|
+
[("ticker", "ascending"), ("window_start", "ascending")]
|
251
|
+
)
|
252
|
+
pa_ds.write_dataset(
|
253
|
+
table,
|
254
|
+
filesystem=to_config.filesystem,
|
255
|
+
base_dir=to_config.aggs_dir,
|
256
|
+
partitioning=custom_aggs_partitioning(),
|
257
|
+
format="parquet",
|
258
|
+
existing_data_behavior="overwrite_or_ignore",
|
259
|
+
file_visitor=file_visitor,
|
260
|
+
)
|
261
|
+
return to_config.aggs_dir
|
@@ -1,43 +1,43 @@
|
|
1
|
-
from .config import PolygonConfig
|
1
|
+
from .config import PolygonConfig, PARTITION_COLUMN_NAME, to_partition_key
|
2
2
|
|
3
3
|
import shutil
|
4
|
-
from typing import Iterator, Tuple
|
4
|
+
from typing import Iterator, Tuple, Union
|
5
5
|
|
6
6
|
import argparse
|
7
|
-
import glob
|
8
7
|
import os
|
8
|
+
import datetime
|
9
9
|
|
10
10
|
import pyarrow as pa
|
11
|
-
|
12
|
-
|
11
|
+
import pyarrow.compute as pa_compute
|
12
|
+
import pyarrow.csv as pa_csv
|
13
|
+
import pyarrow.dataset as pa_ds
|
14
|
+
import pyarrow.fs as pa_fs
|
13
15
|
|
14
16
|
import pandas as pd
|
15
17
|
|
16
18
|
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
return k
|
19
|
+
# def get_by_ticker_dates(config: PolygonConfig, schema) -> set[datetime.date]:
|
20
|
+
# file_info = config.filesystem.get_file_info(config.by_ticker_dir)
|
21
|
+
# if file_info.type == pa_fs.FileType.NotFound:
|
22
|
+
# return set()
|
23
|
+
# partitioning = None
|
24
|
+
# if PARTITION_COLUMN_NAME in schema.names:
|
25
|
+
# partitioning = pa_ds.partitioning(
|
26
|
+
# pa.schema([(PARTITION_COLUMN_NAME, pa.string())]), flavor="hive"
|
27
|
+
# )
|
28
|
+
# by_ticker_aggs_ds = pa_ds.dataset(config.by_ticker_aggs_arrow_dir, schema=schema, partitioning=partitioning)
|
29
|
+
# return set(
|
30
|
+
# [
|
31
|
+
# pa_ds.get_partition_keys(fragment.partition_expression).get("date")
|
32
|
+
# for fragment in by_ticker_aggs_ds.get_fragments()
|
33
|
+
# ]
|
34
|
+
# )
|
34
35
|
|
35
36
|
|
36
37
|
def generate_tables_from_csv_files(
|
37
|
-
|
38
|
+
config: PolygonConfig,
|
38
39
|
schema: pa.Schema,
|
39
|
-
|
40
|
-
limit_timestamp: pd.Timestamp,
|
40
|
+
overwrite: bool = False,
|
41
41
|
) -> Iterator[pa.Table]:
|
42
42
|
empty_table = schema.empty_table()
|
43
43
|
# TODO: Find which column(s) need to be cast to int64 from the schema.
|
@@ -48,16 +48,35 @@ def generate_tables_from_csv_files(
|
|
48
48
|
)
|
49
49
|
csv_schema = empty_table.schema
|
50
50
|
|
51
|
+
existing_by_ticker_dates = set()
|
52
|
+
if not overwrite:
|
53
|
+
# print("Getting existing by_ticker_dates")
|
54
|
+
# existing_by_ticker_dates = get_by_ticker_dates(config, schema)
|
55
|
+
print(f"{len(existing_by_ticker_dates)=}")
|
56
|
+
|
57
|
+
schedule = config.calendar.trading_index(
|
58
|
+
start=config.start_timestamp, end=config.end_timestamp, period="1D"
|
59
|
+
)
|
60
|
+
start_timestamp = config.start_timestamp.tz_localize(config.calendar.tz.key)
|
61
|
+
limit_timestamp = (config.end_timestamp + pd.Timedelta(days=1)).tz_localize(
|
62
|
+
config.calendar.tz.key)
|
63
|
+
# print(f"{start_timestamp=} {limit_timestamp=} {config.calendar.tz=} {schedule[:2]=} {schedule[-2:]=}")
|
64
|
+
|
51
65
|
tables_read_count = 0
|
52
66
|
skipped_table_count = 0
|
53
|
-
for
|
67
|
+
for timestamp in schedule:
|
68
|
+
date: datetime.date = timestamp.tz_localize(config.calendar.tz.key).to_pydatetime().date()
|
69
|
+
# print(f"{date=} {timestamp=}")
|
70
|
+
if date in existing_by_ticker_dates:
|
71
|
+
continue
|
72
|
+
csv_path = config.date_to_csv_file_path(date)
|
54
73
|
convert_options = pa_csv.ConvertOptions(
|
55
74
|
column_types=csv_schema,
|
56
75
|
strings_can_be_null=False,
|
57
76
|
quoted_strings_can_be_null=False,
|
58
77
|
)
|
59
78
|
|
60
|
-
table =
|
79
|
+
table = pa_csv.read_csv(csv_path, convert_options=convert_options)
|
61
80
|
tables_read_count += 1
|
62
81
|
table = table.set_column(
|
63
82
|
table.column_names.index("window_start"),
|
@@ -75,10 +94,10 @@ def generate_tables_from_csv_files(
|
|
75
94
|
),
|
76
95
|
)
|
77
96
|
expr = (
|
78
|
-
|
97
|
+
pa_compute.field("window_start")
|
79
98
|
>= pa.scalar(start_timestamp, type=schema.field("window_start").type)
|
80
99
|
) & (
|
81
|
-
|
100
|
+
pa_compute.field("window_start")
|
82
101
|
< pa.scalar(
|
83
102
|
limit_timestamp,
|
84
103
|
type=schema.field("window_start").type,
|
@@ -95,32 +114,86 @@ def generate_tables_from_csv_files(
|
|
95
114
|
skipped_table_count += 1
|
96
115
|
continue
|
97
116
|
|
117
|
+
if PARTITION_COLUMN_NAME in schema.names:
|
118
|
+
print(f"{date=}")
|
98
119
|
yield table
|
99
120
|
print(f"{tables_read_count=} {skipped_table_count=}")
|
100
121
|
|
101
122
|
|
123
|
+
# def generate_tables_from_csv_files(
|
124
|
+
# paths: Iterator[Union[str, os.PathLike]],
|
125
|
+
# schema: pa.Schema,
|
126
|
+
# start_timestamp: pd.Timestamp,
|
127
|
+
# limit_timestamp: pd.Timestamp,
|
128
|
+
# ) -> Iterator[pa.Table]:
|
129
|
+
# empty_table = schema.empty_table()
|
130
|
+
# # TODO: Find which column(s) need to be cast to int64 from the schema.
|
131
|
+
# empty_table = empty_table.set_column(
|
132
|
+
# empty_table.column_names.index("window_start"),
|
133
|
+
# "window_start",
|
134
|
+
# empty_table.column("window_start").cast(pa.int64()),
|
135
|
+
# )
|
136
|
+
# csv_schema = empty_table.schema
|
137
|
+
|
138
|
+
# tables_read_count = 0
|
139
|
+
# skipped_table_count = 0
|
140
|
+
# for path in paths:
|
141
|
+
# convert_options = pa_csv.ConvertOptions(
|
142
|
+
# column_types=csv_schema,
|
143
|
+
# strings_can_be_null=False,
|
144
|
+
# quoted_strings_can_be_null=False,
|
145
|
+
# )
|
146
|
+
|
147
|
+
# table = pa_csv.read_csv(path, convert_options=convert_options)
|
148
|
+
# tables_read_count += 1
|
149
|
+
# table = table.set_column(
|
150
|
+
# table.column_names.index("window_start"),
|
151
|
+
# "window_start",
|
152
|
+
# table.column("window_start").cast(schema.field("window_start").type),
|
153
|
+
# )
|
154
|
+
# if PARTITION_COLUMN_NAME in schema.names:
|
155
|
+
# table = table.append_column(
|
156
|
+
# PARTITION_COLUMN_NAME,
|
157
|
+
# pa.array(
|
158
|
+
# [
|
159
|
+
# to_partition_key(ticker)
|
160
|
+
# for ticker in table.column("ticker").to_pylist()
|
161
|
+
# ]
|
162
|
+
# ),
|
163
|
+
# )
|
164
|
+
# expr = (
|
165
|
+
# pa_compute.field("window_start")
|
166
|
+
# >= pa.scalar(start_timestamp, type=schema.field("window_start").type)
|
167
|
+
# ) & (
|
168
|
+
# pa_compute.field("window_start")
|
169
|
+
# < pa.scalar(
|
170
|
+
# limit_timestamp,
|
171
|
+
# type=schema.field("window_start").type,
|
172
|
+
# )
|
173
|
+
# )
|
174
|
+
# table = table.filter(expr)
|
175
|
+
|
176
|
+
# # TODO: Also check that these rows are within range for this file's date (not just the whole session).
|
177
|
+
# # And if we're doing that (figuring date for each file), we can just skip reading the file.
|
178
|
+
# # Might able to do a single comparison using compute.days_between.
|
179
|
+
# # https://arrow.apache.org/docs/python/generated/pyarrow.compute.days_between.html
|
180
|
+
|
181
|
+
# if table.num_rows == 0:
|
182
|
+
# skipped_table_count += 1
|
183
|
+
# continue
|
184
|
+
|
185
|
+
# yield table
|
186
|
+
# print(f"{tables_read_count=} {skipped_table_count=}")
|
187
|
+
|
188
|
+
|
102
189
|
def generate_csv_agg_tables(
|
103
|
-
config: PolygonConfig,
|
104
|
-
) -> Tuple[
|
190
|
+
config: PolygonConfig, overwrite: bool = False
|
191
|
+
) -> Tuple[pa.Schema, Iterator[pa.Table]]:
|
105
192
|
"""zipline does bundle ingestion one ticker at a time."""
|
106
|
-
# We sort by path because they have the year and month in the dir names and the date in the filename.
|
107
|
-
paths = sorted(
|
108
|
-
list(
|
109
|
-
glob.glob(
|
110
|
-
os.path.join(config.aggs_dir, config.csv_paths_pattern),
|
111
|
-
recursive="**" in config.csv_paths_pattern,
|
112
|
-
)
|
113
|
-
)
|
114
|
-
)
|
115
|
-
|
116
|
-
print(f"{len(paths)=}")
|
117
|
-
if len(paths) > 0:
|
118
|
-
print(f"{paths[0]=}")
|
119
|
-
print(f"{paths[-1]=}")
|
120
193
|
|
121
194
|
# Polygon Aggregate flatfile timestamps are in nanoseconds (like trades), not milliseconds as the docs say.
|
122
195
|
# I make the timestamp timezone-aware because that's how Unix timestamps work and it may help avoid mistakes.
|
123
|
-
timestamp_type = pa.timestamp("ns", tz=
|
196
|
+
timestamp_type = pa.timestamp("ns", tz='UTC')
|
124
197
|
|
125
198
|
# But we can't use the timestamp type in the schema here because it's not supported by the CSV reader.
|
126
199
|
# So we'll use int64 and cast it after reading the CSV file.
|
@@ -155,13 +228,11 @@ def generate_csv_agg_tables(
|
|
155
228
|
)
|
156
229
|
|
157
230
|
return (
|
158
|
-
paths,
|
159
231
|
polygon_aggs_schema,
|
160
232
|
generate_tables_from_csv_files(
|
161
|
-
|
233
|
+
config,
|
162
234
|
schema=polygon_aggs_schema,
|
163
|
-
|
164
|
-
limit_timestamp=config.end_timestamp + pd.to_timedelta(1, unit="day"),
|
235
|
+
overwrite=overwrite,
|
165
236
|
),
|
166
237
|
)
|
167
238
|
|
@@ -176,18 +247,17 @@ def concat_all_aggs_from_csv(
|
|
176
247
|
config: PolygonConfig,
|
177
248
|
overwrite: bool = False,
|
178
249
|
) -> str:
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
return by_ticker_aggs_arrow_dir
|
250
|
+
schema, tables = generate_csv_agg_tables(config, overwrite=overwrite)
|
251
|
+
|
252
|
+
by_ticker_aggs_arrow_dir = config.by_ticker_aggs_arrow_dir
|
253
|
+
# if os.path.exists(by_ticker_aggs_arrow_dir):
|
254
|
+
# if overwrite:
|
255
|
+
# print(f"Removing {by_ticker_aggs_arrow_dir=}")
|
256
|
+
# shutil.rmtree(by_ticker_aggs_arrow_dir)
|
257
|
+
# else:
|
258
|
+
# # TODO: Validate the existing data.
|
259
|
+
# print(f"Found existing {by_ticker_aggs_arrow_dir=}")
|
260
|
+
# return by_ticker_aggs_arrow_dir
|
191
261
|
|
192
262
|
partitioning = None
|
193
263
|
if PARTITION_COLUMN_NAME in schema.names:
|
@@ -195,7 +265,7 @@ def concat_all_aggs_from_csv(
|
|
195
265
|
pa.schema([(PARTITION_COLUMN_NAME, pa.string())]), flavor="hive"
|
196
266
|
)
|
197
267
|
|
198
|
-
|
268
|
+
print(f"Scattering aggregates by ticker to {by_ticker_aggs_arrow_dir=}")
|
199
269
|
pa_ds.write_dataset(
|
200
270
|
generate_batches_from_tables(tables),
|
201
271
|
schema=schema,
|
@@ -204,7 +274,7 @@ def concat_all_aggs_from_csv(
|
|
204
274
|
format="parquet",
|
205
275
|
existing_data_behavior="overwrite_or_ignore",
|
206
276
|
)
|
207
|
-
print(f"
|
277
|
+
print(f"Scattered aggregates by ticker to {by_ticker_aggs_arrow_dir=}")
|
208
278
|
return by_ticker_aggs_arrow_dir
|
209
279
|
|
210
280
|
|
@@ -212,10 +282,10 @@ if __name__ == "__main__":
|
|
212
282
|
parser = argparse.ArgumentParser()
|
213
283
|
parser.add_argument("--calendar_name", default="XNYS")
|
214
284
|
|
215
|
-
parser.add_argument("--
|
216
|
-
parser.add_argument("--
|
217
|
-
# parser.add_argument("--
|
218
|
-
# parser.add_argument("--
|
285
|
+
parser.add_argument("--start_date", default="2014-06-16")
|
286
|
+
parser.add_argument("--end_date", default="2024-09-06")
|
287
|
+
# parser.add_argument("--start_date", default="2020-01-01")
|
288
|
+
# parser.add_argument("--end_date", default="2020-12-31")
|
219
289
|
|
220
290
|
parser.add_argument("--agg_time", default="day")
|
221
291
|
|
@@ -235,8 +305,8 @@ if __name__ == "__main__":
|
|
235
305
|
config = PolygonConfig(
|
236
306
|
environ=os.environ,
|
237
307
|
calendar_name=args.calendar_name,
|
238
|
-
|
239
|
-
|
308
|
+
start_date=args.start_date,
|
309
|
+
end_date=args.end_date,
|
240
310
|
agg_time=args.agg_time,
|
241
311
|
)
|
242
312
|
|
@@ -138,10 +138,10 @@ if __name__ == "__main__":
|
|
138
138
|
parser = argparse.ArgumentParser()
|
139
139
|
parser.add_argument("--calendar_name", default="XNYS")
|
140
140
|
|
141
|
-
parser.add_argument("--
|
142
|
-
parser.add_argument("--
|
143
|
-
# parser.add_argument("--
|
144
|
-
# parser.add_argument("--
|
141
|
+
parser.add_argument("--start_date", default="2014-06-16")
|
142
|
+
parser.add_argument("--end_date", default="2024-09-06")
|
143
|
+
# parser.add_argument("--start_date", default="2020-10-07")
|
144
|
+
# parser.add_argument("--end_date", default="2020-10-15")
|
145
145
|
# parser.add_argument("--aggs_pattern", default="2020/10/**/*.csv.gz")
|
146
146
|
parser.add_argument("--aggs_pattern", default="**/*.csv.gz")
|
147
147
|
|
@@ -163,8 +163,8 @@ if __name__ == "__main__":
|
|
163
163
|
config = PolygonConfig(
|
164
164
|
environ=os.environ,
|
165
165
|
calendar_name=args.calendar_name,
|
166
|
-
|
167
|
-
|
166
|
+
start_date=args.start_date,
|
167
|
+
end_date=args.end_date,
|
168
168
|
)
|
169
169
|
|
170
170
|
concat_all_aggs_from_csv(
|