lumibot 4.1.3__py3-none-any.whl → 4.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lumibot might be problematic. Click here for more details.
- lumibot/backtesting/__init__.py +19 -5
- lumibot/backtesting/backtesting_broker.py +98 -18
- lumibot/backtesting/databento_backtesting.py +5 -686
- lumibot/backtesting/databento_backtesting_pandas.py +738 -0
- lumibot/backtesting/databento_backtesting_polars.py +860 -546
- lumibot/backtesting/fix_debug.py +37 -0
- lumibot/backtesting/thetadata_backtesting.py +9 -355
- lumibot/backtesting/thetadata_backtesting_pandas.py +1178 -0
- lumibot/brokers/alpaca.py +8 -1
- lumibot/brokers/schwab.py +12 -2
- lumibot/credentials.py +13 -0
- lumibot/data_sources/__init__.py +5 -8
- lumibot/data_sources/data_source.py +6 -2
- lumibot/data_sources/data_source_backtesting.py +30 -0
- lumibot/data_sources/databento_data.py +5 -390
- lumibot/data_sources/databento_data_pandas.py +440 -0
- lumibot/data_sources/databento_data_polars.py +15 -9
- lumibot/data_sources/pandas_data.py +30 -17
- lumibot/data_sources/polars_data.py +986 -0
- lumibot/data_sources/polars_mixin.py +472 -96
- lumibot/data_sources/polygon_data_polars.py +5 -0
- lumibot/data_sources/yahoo_data.py +9 -2
- lumibot/data_sources/yahoo_data_polars.py +5 -0
- lumibot/entities/__init__.py +15 -0
- lumibot/entities/asset.py +5 -28
- lumibot/entities/bars.py +89 -20
- lumibot/entities/data.py +29 -6
- lumibot/entities/data_polars.py +668 -0
- lumibot/entities/position.py +38 -4
- lumibot/strategies/_strategy.py +2 -1
- lumibot/strategies/strategy.py +61 -49
- lumibot/tools/backtest_cache.py +284 -0
- lumibot/tools/databento_helper.py +35 -35
- lumibot/tools/databento_helper_polars.py +738 -775
- lumibot/tools/futures_roll.py +251 -0
- lumibot/tools/indicators.py +135 -104
- lumibot/tools/polars_utils.py +142 -0
- lumibot/tools/thetadata_helper.py +1068 -134
- {lumibot-4.1.3.dist-info → lumibot-4.2.0.dist-info}/METADATA +9 -1
- {lumibot-4.1.3.dist-info → lumibot-4.2.0.dist-info}/RECORD +71 -147
- tests/backtest/test_databento.py +37 -6
- tests/backtest/test_databento_comprehensive_trading.py +8 -4
- tests/backtest/test_databento_parity.py +4 -2
- tests/backtest/test_debug_avg_fill_price.py +1 -1
- tests/backtest/test_example_strategies.py +11 -1
- tests/backtest/test_futures_edge_cases.py +3 -3
- tests/backtest/test_futures_single_trade.py +2 -2
- tests/backtest/test_futures_ultra_simple.py +2 -2
- tests/backtest/test_polars_lru_eviction.py +470 -0
- tests/backtest/test_yahoo.py +42 -0
- tests/test_asset.py +4 -4
- tests/test_backtest_cache_manager.py +149 -0
- tests/test_backtesting_data_source_env.py +6 -0
- tests/test_continuous_futures_resolution.py +60 -48
- tests/test_data_polars_parity.py +160 -0
- tests/test_databento_asset_validation.py +23 -5
- tests/test_databento_backtesting.py +1 -1
- tests/test_databento_backtesting_polars.py +312 -192
- tests/test_databento_data.py +220 -463
- tests/test_databento_live.py +10 -10
- tests/test_futures_roll.py +38 -0
- tests/test_indicator_subplots.py +101 -0
- tests/test_market_infinite_loop_bug.py +77 -3
- tests/test_polars_resample.py +67 -0
- tests/test_polygon_helper.py +46 -0
- tests/test_thetadata_backwards_compat.py +97 -0
- tests/test_thetadata_helper.py +222 -23
- tests/test_thetadata_pandas_verification.py +186 -0
- lumibot/__pycache__/__init__.cpython-312.pyc +0 -0
- lumibot/__pycache__/constants.cpython-312.pyc +0 -0
- lumibot/__pycache__/credentials.cpython-312.pyc +0 -0
- lumibot/backtesting/__pycache__/__init__.cpython-312.pyc +0 -0
- lumibot/backtesting/__pycache__/alpaca_backtesting.cpython-312.pyc +0 -0
- lumibot/backtesting/__pycache__/alpha_vantage_backtesting.cpython-312.pyc +0 -0
- lumibot/backtesting/__pycache__/backtesting_broker.cpython-312.pyc +0 -0
- lumibot/backtesting/__pycache__/ccxt_backtesting.cpython-312.pyc +0 -0
- lumibot/backtesting/__pycache__/databento_backtesting.cpython-312.pyc +0 -0
- lumibot/backtesting/__pycache__/interactive_brokers_rest_backtesting.cpython-312.pyc +0 -0
- lumibot/backtesting/__pycache__/pandas_backtesting.cpython-312.pyc +0 -0
- lumibot/backtesting/__pycache__/polygon_backtesting.cpython-312.pyc +0 -0
- lumibot/backtesting/__pycache__/thetadata_backtesting.cpython-312.pyc +0 -0
- lumibot/backtesting/__pycache__/yahoo_backtesting.cpython-312.pyc +0 -0
- lumibot/brokers/__pycache__/__init__.cpython-312.pyc +0 -0
- lumibot/brokers/__pycache__/alpaca.cpython-312.pyc +0 -0
- lumibot/brokers/__pycache__/bitunix.cpython-312.pyc +0 -0
- lumibot/brokers/__pycache__/broker.cpython-312.pyc +0 -0
- lumibot/brokers/__pycache__/ccxt.cpython-312.pyc +0 -0
- lumibot/brokers/__pycache__/example_broker.cpython-312.pyc +0 -0
- lumibot/brokers/__pycache__/interactive_brokers.cpython-312.pyc +0 -0
- lumibot/brokers/__pycache__/interactive_brokers_rest.cpython-312.pyc +0 -0
- lumibot/brokers/__pycache__/projectx.cpython-312.pyc +0 -0
- lumibot/brokers/__pycache__/schwab.cpython-312.pyc +0 -0
- lumibot/brokers/__pycache__/tradier.cpython-312.pyc +0 -0
- lumibot/brokers/__pycache__/tradovate.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/__init__.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/alpaca_data.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/alpha_vantage_data.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/bitunix_data.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/ccxt_backtesting_data.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/ccxt_data.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/data_source.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/data_source_backtesting.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/databento_data_polars_backtesting.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/databento_data_polars_live.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/example_broker_data.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/exceptions.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/interactive_brokers_data.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/interactive_brokers_rest_data.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/pandas_data.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/polars_mixin.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/polygon_data_polars.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/projectx_data.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/schwab_data.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/tradier_data.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/tradovate_data.cpython-312.pyc +0 -0
- lumibot/data_sources/__pycache__/yahoo_data_polars.cpython-312.pyc +0 -0
- lumibot/entities/__pycache__/__init__.cpython-312.pyc +0 -0
- lumibot/entities/__pycache__/asset.cpython-312.pyc +0 -0
- lumibot/entities/__pycache__/bar.cpython-312.pyc +0 -0
- lumibot/entities/__pycache__/bars.cpython-312.pyc +0 -0
- lumibot/entities/__pycache__/chains.cpython-312.pyc +0 -0
- lumibot/entities/__pycache__/data.cpython-312.pyc +0 -0
- lumibot/entities/__pycache__/dataline.cpython-312.pyc +0 -0
- lumibot/entities/__pycache__/order.cpython-312.pyc +0 -0
- lumibot/entities/__pycache__/position.cpython-312.pyc +0 -0
- lumibot/entities/__pycache__/quote.cpython-312.pyc +0 -0
- lumibot/entities/__pycache__/trading_fee.cpython-312.pyc +0 -0
- lumibot/example_strategies/__pycache__/__init__.cpython-312.pyc +0 -0
- lumibot/example_strategies/__pycache__/test_broker_functions.cpython-312-pytest-8.4.1.pyc +0 -0
- lumibot/strategies/__pycache__/__init__.cpython-312.pyc +0 -0
- lumibot/strategies/__pycache__/_strategy.cpython-312.pyc +0 -0
- lumibot/strategies/__pycache__/strategy.cpython-312.pyc +0 -0
- lumibot/strategies/__pycache__/strategy_executor.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/__init__.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/alpaca_helpers.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/bitunix_helpers.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/black_scholes.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/ccxt_data_store.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/databento_helper.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/databento_helper_polars.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/debugers.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/decorators.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/helpers.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/indicators.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/lumibot_logger.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/pandas.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/polygon_helper.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/polygon_helper_async.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/polygon_helper_polars_optimized.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/projectx_helpers.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/schwab_helper.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/thetadata_helper.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/types.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/yahoo_helper.cpython-312.pyc +0 -0
- lumibot/tools/__pycache__/yahoo_helper_polars_optimized.cpython-312.pyc +0 -0
- lumibot/traders/__pycache__/__init__.cpython-312.pyc +0 -0
- lumibot/traders/__pycache__/trader.cpython-312.pyc +0 -0
- lumibot/trading_builtins/__pycache__/__init__.cpython-312.pyc +0 -0
- lumibot/trading_builtins/__pycache__/custom_stream.cpython-312.pyc +0 -0
- lumibot/trading_builtins/__pycache__/safe_list.cpython-312.pyc +0 -0
- {lumibot-4.1.3.dist-info → lumibot-4.2.0.dist-info}/WHEEL +0 -0
- {lumibot-4.1.3.dist-info → lumibot-4.2.0.dist-info}/licenses/LICENSE +0 -0
- {lumibot-4.1.3.dist-info → lumibot-4.2.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,470 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Test LRU eviction functionality for Polars data sources.
|
|
3
|
+
|
|
4
|
+
This test verifies that:
|
|
5
|
+
1. Memory limits are enforced via LRU eviction
|
|
6
|
+
2. Two-tier eviction works (aggregated cache first, then data store)
|
|
7
|
+
3. LRU order is maintained (oldest unused items evicted first)
|
|
8
|
+
4. Memory calculation is accurate
|
|
9
|
+
5. Multiple symbols are handled correctly under memory pressure
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import pytest
|
|
13
|
+
from datetime import datetime, timedelta
|
|
14
|
+
import polars as pl
|
|
15
|
+
|
|
16
|
+
from lumibot.entities import Asset
|
|
17
|
+
from lumibot.entities.data_polars import DataPolars
|
|
18
|
+
from lumibot.data_sources.polars_data import PolarsData
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class TestLRUEviction:
|
|
22
|
+
"""Test suite for LRU eviction functionality"""
|
|
23
|
+
|
|
24
|
+
def test_memory_limit_configuration(self):
|
|
25
|
+
"""Test that memory limit is configured correctly"""
|
|
26
|
+
start_date = datetime(2024, 1, 1)
|
|
27
|
+
end_date = datetime(2024, 1, 31)
|
|
28
|
+
|
|
29
|
+
polars_data = PolarsData(
|
|
30
|
+
datetime_start=start_date,
|
|
31
|
+
datetime_end=end_date,
|
|
32
|
+
pandas_data=None
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# Default should be 1GB
|
|
36
|
+
assert polars_data.MAX_STORAGE_BYTES == 1_000_000_000
|
|
37
|
+
|
|
38
|
+
def test_eviction_from_aggregated_cache_first(self):
|
|
39
|
+
"""Test that eviction happens from aggregated cache first.
|
|
40
|
+
|
|
41
|
+
This test verifies the two-tier eviction priority:
|
|
42
|
+
1. Aggregated cache is evicted first (less critical)
|
|
43
|
+
2. Data store is evicted only if aggregated cache eviction isn't enough
|
|
44
|
+
"""
|
|
45
|
+
start_date = datetime(2024, 1, 1)
|
|
46
|
+
end_date = datetime(2024, 1, 31)
|
|
47
|
+
|
|
48
|
+
polars_data = PolarsData(
|
|
49
|
+
datetime_start=start_date,
|
|
50
|
+
datetime_end=end_date,
|
|
51
|
+
pandas_data=None
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
# Set memory limit high enough that evicting aggregated cache is sufficient
|
|
55
|
+
# Each asset: ~48KB data + ~10KB aggregated = ~58KB total
|
|
56
|
+
# 5 assets = ~290KB total
|
|
57
|
+
# Set limit to 250KB so aggregated cache eviction (50KB) is enough
|
|
58
|
+
polars_data.MAX_STORAGE_BYTES = 250_000 # 250KB
|
|
59
|
+
|
|
60
|
+
# Create 1-minute test data for 5 assets
|
|
61
|
+
assets = [Asset(f"TEST{i}", "stock") for i in range(5)]
|
|
62
|
+
quote = Asset("USD", "forex")
|
|
63
|
+
|
|
64
|
+
for asset in assets:
|
|
65
|
+
dates = pl.datetime_range(
|
|
66
|
+
start_date,
|
|
67
|
+
start_date + timedelta(minutes=1000),
|
|
68
|
+
interval="1m",
|
|
69
|
+
eager=True
|
|
70
|
+
)
|
|
71
|
+
df = pl.DataFrame({
|
|
72
|
+
"datetime": dates,
|
|
73
|
+
"open": [100.0] * len(dates),
|
|
74
|
+
"high": [101.0] * len(dates),
|
|
75
|
+
"low": [99.0] * len(dates),
|
|
76
|
+
"close": [100.5] * len(dates),
|
|
77
|
+
"volume": [1000.0] * len(dates),
|
|
78
|
+
})
|
|
79
|
+
data = DataPolars(asset, df=df, timestep="minute", quote=None)
|
|
80
|
+
polars_data._data_store[(asset, quote)] = data
|
|
81
|
+
|
|
82
|
+
# Aggregate all 5 assets to create aggregated cache entries
|
|
83
|
+
for asset in assets:
|
|
84
|
+
polars_data._get_or_aggregate_bars(asset, quote, 100, "minute", "5 minutes")
|
|
85
|
+
|
|
86
|
+
assert len(polars_data._aggregated_cache) == 5
|
|
87
|
+
original_data_store_size = len(polars_data._data_store)
|
|
88
|
+
|
|
89
|
+
# CRITICAL: Set _trim_iteration_count = 0 to actually trigger enforcement
|
|
90
|
+
# (Production code only enforces when _trim_iteration_count == 0)
|
|
91
|
+
polars_data._trim_iteration_count = 0
|
|
92
|
+
|
|
93
|
+
# Force memory limit enforcement
|
|
94
|
+
polars_data._enforce_memory_limits()
|
|
95
|
+
|
|
96
|
+
# Aggregated cache should be partially/fully evicted
|
|
97
|
+
# Data store should still have all 5 assets (evicting agg cache was enough)
|
|
98
|
+
assert len(polars_data._aggregated_cache) < 5, "Aggregated cache should have been evicted"
|
|
99
|
+
assert len(polars_data._data_store) == original_data_store_size, \
|
|
100
|
+
f"Data store should be untouched (expected {original_data_store_size}, got {len(polars_data._data_store)})"
|
|
101
|
+
|
|
102
|
+
def test_eviction_from_data_store_when_aggregated_empty(self):
|
|
103
|
+
"""Test that eviction happens from data_store when aggregated cache is empty"""
|
|
104
|
+
start_date = datetime(2024, 1, 1)
|
|
105
|
+
end_date = datetime(2024, 1, 31)
|
|
106
|
+
|
|
107
|
+
polars_data = PolarsData(
|
|
108
|
+
datetime_start=start_date,
|
|
109
|
+
datetime_end=end_date,
|
|
110
|
+
pandas_data=None
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# Set a very low memory limit
|
|
114
|
+
polars_data.MAX_STORAGE_BYTES = 50_000 # 50KB
|
|
115
|
+
|
|
116
|
+
# Create 1-minute test data for 10 assets (no aggregated cache)
|
|
117
|
+
assets = [Asset(f"TEST{i}", "stock") for i in range(10)]
|
|
118
|
+
quote = Asset("USD", "forex")
|
|
119
|
+
|
|
120
|
+
for asset in assets:
|
|
121
|
+
dates = pl.datetime_range(
|
|
122
|
+
start_date,
|
|
123
|
+
start_date + timedelta(minutes=1000),
|
|
124
|
+
interval="1m",
|
|
125
|
+
eager=True
|
|
126
|
+
)
|
|
127
|
+
df = pl.DataFrame({
|
|
128
|
+
"datetime": dates,
|
|
129
|
+
"open": [100.0] * len(dates),
|
|
130
|
+
"high": [101.0] * len(dates),
|
|
131
|
+
"low": [99.0] * len(dates),
|
|
132
|
+
"close": [100.5] * len(dates),
|
|
133
|
+
"volume": [1000.0] * len(dates),
|
|
134
|
+
})
|
|
135
|
+
data = DataPolars(asset, df=df, timestep="minute", quote=None)
|
|
136
|
+
polars_data._data_store[(asset, quote)] = data
|
|
137
|
+
|
|
138
|
+
# No aggregated cache entries
|
|
139
|
+
assert len(polars_data._aggregated_cache) == 0
|
|
140
|
+
|
|
141
|
+
# Set _trim_iteration_count = 0 to trigger enforcement
|
|
142
|
+
polars_data._trim_iteration_count = 0
|
|
143
|
+
|
|
144
|
+
# Force memory limit enforcement
|
|
145
|
+
polars_data._enforce_memory_limits()
|
|
146
|
+
|
|
147
|
+
# Data store should have been evicted
|
|
148
|
+
assert len(polars_data._data_store) < 10
|
|
149
|
+
|
|
150
|
+
def test_lru_order_maintained(self):
|
|
151
|
+
"""Test that LRU order is maintained - oldest unused items evicted first"""
|
|
152
|
+
start_date = datetime(2024, 1, 1)
|
|
153
|
+
end_date = datetime(2024, 1, 31)
|
|
154
|
+
|
|
155
|
+
polars_data = PolarsData(
|
|
156
|
+
datetime_start=start_date,
|
|
157
|
+
datetime_end=end_date,
|
|
158
|
+
pandas_data=None
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
# Create 5 assets
|
|
162
|
+
assets = [Asset(f"TEST{i}", "stock") for i in range(5)]
|
|
163
|
+
quote = Asset("USD", "forex")
|
|
164
|
+
|
|
165
|
+
for asset in assets:
|
|
166
|
+
dates = pl.datetime_range(
|
|
167
|
+
start_date,
|
|
168
|
+
start_date + timedelta(minutes=1000),
|
|
169
|
+
interval="1m",
|
|
170
|
+
eager=True
|
|
171
|
+
)
|
|
172
|
+
df = pl.DataFrame({
|
|
173
|
+
"datetime": dates,
|
|
174
|
+
"open": [100.0] * len(dates),
|
|
175
|
+
"high": [101.0] * len(dates),
|
|
176
|
+
"low": [99.0] * len(dates),
|
|
177
|
+
"close": [100.5] * len(dates),
|
|
178
|
+
"volume": [1000.0] * len(dates),
|
|
179
|
+
})
|
|
180
|
+
data = DataPolars(asset, df=df, timestep="minute", quote=None)
|
|
181
|
+
polars_data._data_store[(asset, quote)] = data
|
|
182
|
+
|
|
183
|
+
# Aggregate all 5 to create cache entries in order
|
|
184
|
+
for asset in assets:
|
|
185
|
+
polars_data._get_or_aggregate_bars(asset, quote, 100, "minute", "5 minutes")
|
|
186
|
+
|
|
187
|
+
# Access TEST0, TEST1, TEST2 again (should move to end)
|
|
188
|
+
polars_data._get_or_aggregate_bars(assets[0], quote, 100, "minute", "5 minutes")
|
|
189
|
+
polars_data._get_or_aggregate_bars(assets[1], quote, 100, "minute", "5 minutes")
|
|
190
|
+
polars_data._get_or_aggregate_bars(assets[2], quote, 100, "minute", "5 minutes")
|
|
191
|
+
|
|
192
|
+
# Order should now be: TEST3, TEST4, TEST0, TEST1, TEST2 (least to most recent)
|
|
193
|
+
keys = list(polars_data._aggregated_cache.keys())
|
|
194
|
+
assert keys[0][0] == assets[3] # TEST3 is oldest
|
|
195
|
+
assert keys[1][0] == assets[4] # TEST4 is second oldest
|
|
196
|
+
assert keys[-3][0] == assets[0] # TEST0 is third newest
|
|
197
|
+
assert keys[-2][0] == assets[1] # TEST1 is second newest
|
|
198
|
+
assert keys[-1][0] == assets[2] # TEST2 is newest
|
|
199
|
+
|
|
200
|
+
def test_memory_calculation_accuracy(self):
|
|
201
|
+
"""Test that memory calculation is accurate using polars estimated_size()"""
|
|
202
|
+
start_date = datetime(2024, 1, 1)
|
|
203
|
+
end_date = datetime(2024, 1, 31)
|
|
204
|
+
|
|
205
|
+
polars_data = PolarsData(
|
|
206
|
+
datetime_start=start_date,
|
|
207
|
+
datetime_end=end_date,
|
|
208
|
+
pandas_data=None
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
# Create test data
|
|
212
|
+
asset = Asset("TEST", "stock")
|
|
213
|
+
quote = Asset("USD", "forex")
|
|
214
|
+
dates = pl.datetime_range(
|
|
215
|
+
start_date,
|
|
216
|
+
start_date + timedelta(minutes=1000),
|
|
217
|
+
interval="1m",
|
|
218
|
+
eager=True
|
|
219
|
+
)
|
|
220
|
+
df = pl.DataFrame({
|
|
221
|
+
"datetime": dates,
|
|
222
|
+
"open": [100.0] * len(dates),
|
|
223
|
+
"high": [101.0] * len(dates),
|
|
224
|
+
"low": [99.0] * len(dates),
|
|
225
|
+
"close": [100.5] * len(dates),
|
|
226
|
+
"volume": [1000.0] * len(dates),
|
|
227
|
+
})
|
|
228
|
+
|
|
229
|
+
data = DataPolars(asset, df=df, timestep="minute", quote=None)
|
|
230
|
+
polars_data._data_store[(asset, quote)] = data
|
|
231
|
+
|
|
232
|
+
# Calculate memory manually
|
|
233
|
+
expected_size = df.estimated_size()
|
|
234
|
+
|
|
235
|
+
# Should be non-zero
|
|
236
|
+
assert expected_size > 0
|
|
237
|
+
|
|
238
|
+
# Create aggregated cache entry
|
|
239
|
+
polars_data._get_or_aggregate_bars(asset, quote, 100, "minute", "5 minutes")
|
|
240
|
+
|
|
241
|
+
# Calculate total memory
|
|
242
|
+
total_memory = 0
|
|
243
|
+
for data in polars_data._data_store.values():
|
|
244
|
+
if hasattr(data, 'polars_df'):
|
|
245
|
+
total_memory += data.polars_df.estimated_size()
|
|
246
|
+
|
|
247
|
+
for agg_df in polars_data._aggregated_cache.values():
|
|
248
|
+
if agg_df is not None:
|
|
249
|
+
total_memory += agg_df.estimated_size()
|
|
250
|
+
|
|
251
|
+
# Should be larger than original df
|
|
252
|
+
assert total_memory > expected_size
|
|
253
|
+
|
|
254
|
+
def test_two_tier_eviction(self):
|
|
255
|
+
"""Test that two-tier eviction works: aggregated first, then data store"""
|
|
256
|
+
start_date = datetime(2024, 1, 1)
|
|
257
|
+
end_date = datetime(2024, 1, 31)
|
|
258
|
+
|
|
259
|
+
polars_data = PolarsData(
|
|
260
|
+
datetime_start=start_date,
|
|
261
|
+
datetime_end=end_date,
|
|
262
|
+
pandas_data=None
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
# Set memory limit to force eviction
|
|
266
|
+
polars_data.MAX_STORAGE_BYTES = 80_000 # 80KB
|
|
267
|
+
|
|
268
|
+
# Create 5 assets
|
|
269
|
+
assets = [Asset(f"TEST{i}", "stock") for i in range(5)]
|
|
270
|
+
quote = Asset("USD", "forex")
|
|
271
|
+
|
|
272
|
+
for asset in assets:
|
|
273
|
+
dates = pl.datetime_range(
|
|
274
|
+
start_date,
|
|
275
|
+
start_date + timedelta(minutes=1000),
|
|
276
|
+
interval="1m",
|
|
277
|
+
eager=True
|
|
278
|
+
)
|
|
279
|
+
df = pl.DataFrame({
|
|
280
|
+
"datetime": dates,
|
|
281
|
+
"open": [100.0] * len(dates),
|
|
282
|
+
"high": [101.0] * len(dates),
|
|
283
|
+
"low": [99.0] * len(dates),
|
|
284
|
+
"close": [100.5] * len(dates),
|
|
285
|
+
"volume": [1000.0] * len(dates),
|
|
286
|
+
})
|
|
287
|
+
data = DataPolars(asset, df=df, timestep="minute", quote=None)
|
|
288
|
+
polars_data._data_store[(asset, quote)] = data
|
|
289
|
+
|
|
290
|
+
# Create aggregated cache entries for all 5
|
|
291
|
+
for asset in assets:
|
|
292
|
+
polars_data._get_or_aggregate_bars(asset, quote, 100, "minute", "5 minutes")
|
|
293
|
+
|
|
294
|
+
initial_data_store_size = len(polars_data._data_store)
|
|
295
|
+
initial_agg_cache_size = len(polars_data._aggregated_cache)
|
|
296
|
+
|
|
297
|
+
assert initial_data_store_size == 5
|
|
298
|
+
assert initial_agg_cache_size == 5
|
|
299
|
+
|
|
300
|
+
# Set _trim_iteration_count = 0 to trigger enforcement
|
|
301
|
+
polars_data._trim_iteration_count = 0
|
|
302
|
+
|
|
303
|
+
# Force eviction
|
|
304
|
+
polars_data._enforce_memory_limits()
|
|
305
|
+
|
|
306
|
+
# Aggregated cache should be evicted first
|
|
307
|
+
after_eviction_agg_size = len(polars_data._aggregated_cache)
|
|
308
|
+
after_eviction_data_size = len(polars_data._data_store)
|
|
309
|
+
|
|
310
|
+
# Either aggregated cache was reduced, or if that wasn't enough, data store was reduced
|
|
311
|
+
assert after_eviction_agg_size < initial_agg_cache_size or \
|
|
312
|
+
after_eviction_data_size < initial_data_store_size
|
|
313
|
+
|
|
314
|
+
def test_multiple_symbols_under_pressure(self):
|
|
315
|
+
"""Test handling of multiple symbols under memory pressure"""
|
|
316
|
+
start_date = datetime(2024, 1, 1)
|
|
317
|
+
end_date = datetime(2024, 1, 31)
|
|
318
|
+
|
|
319
|
+
polars_data = PolarsData(
|
|
320
|
+
datetime_start=start_date,
|
|
321
|
+
datetime_end=end_date,
|
|
322
|
+
pandas_data=None
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
# Very low memory limit
|
|
326
|
+
polars_data.MAX_STORAGE_BYTES = 30_000 # 30KB
|
|
327
|
+
|
|
328
|
+
# Create 20 assets (more than can fit in memory)
|
|
329
|
+
assets = [Asset(f"TEST{i}", "stock") for i in range(20)]
|
|
330
|
+
quote = Asset("USD", "forex")
|
|
331
|
+
|
|
332
|
+
for asset in assets:
|
|
333
|
+
dates = pl.datetime_range(
|
|
334
|
+
start_date,
|
|
335
|
+
start_date + timedelta(minutes=500), # Smaller dataset
|
|
336
|
+
interval="1m",
|
|
337
|
+
eager=True
|
|
338
|
+
)
|
|
339
|
+
df = pl.DataFrame({
|
|
340
|
+
"datetime": dates,
|
|
341
|
+
"open": [100.0] * len(dates),
|
|
342
|
+
"high": [101.0] * len(dates),
|
|
343
|
+
"low": [99.0] * len(dates),
|
|
344
|
+
"close": [100.5] * len(dates),
|
|
345
|
+
"volume": [1000.0] * len(dates),
|
|
346
|
+
})
|
|
347
|
+
data = DataPolars(asset, df=df, timestep="minute", quote=None)
|
|
348
|
+
polars_data._data_store[(asset, quote)] = data
|
|
349
|
+
|
|
350
|
+
# Set _trim_iteration_count = 0 to trigger enforcement
|
|
351
|
+
polars_data._trim_iteration_count = 0
|
|
352
|
+
|
|
353
|
+
# Enforce limits after each addition
|
|
354
|
+
polars_data._enforce_memory_limits()
|
|
355
|
+
|
|
356
|
+
# Should have evicted some items
|
|
357
|
+
final_size = len(polars_data._data_store)
|
|
358
|
+
assert final_size < 20
|
|
359
|
+
assert final_size > 0 # Should keep at least some data
|
|
360
|
+
|
|
361
|
+
def test_no_eviction_under_limit(self):
|
|
362
|
+
"""Test that no eviction happens when under memory limit"""
|
|
363
|
+
start_date = datetime(2024, 1, 1)
|
|
364
|
+
end_date = datetime(2024, 1, 31)
|
|
365
|
+
|
|
366
|
+
polars_data = PolarsData(
|
|
367
|
+
datetime_start=start_date,
|
|
368
|
+
datetime_end=end_date,
|
|
369
|
+
pandas_data=None
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
# Very high memory limit (default 1GB should be fine)
|
|
373
|
+
# Create just 2 small assets
|
|
374
|
+
assets = [Asset(f"TEST{i}", "stock") for i in range(2)]
|
|
375
|
+
quote = Asset("USD", "forex")
|
|
376
|
+
|
|
377
|
+
for asset in assets:
|
|
378
|
+
dates = pl.datetime_range(
|
|
379
|
+
start_date,
|
|
380
|
+
start_date + timedelta(minutes=100),
|
|
381
|
+
interval="1m",
|
|
382
|
+
eager=True
|
|
383
|
+
)
|
|
384
|
+
df = pl.DataFrame({
|
|
385
|
+
"datetime": dates,
|
|
386
|
+
"open": [100.0] * len(dates),
|
|
387
|
+
"high": [101.0] * len(dates),
|
|
388
|
+
"low": [99.0] * len(dates),
|
|
389
|
+
"close": [100.5] * len(dates),
|
|
390
|
+
"volume": [1000.0] * len(dates),
|
|
391
|
+
})
|
|
392
|
+
data = DataPolars(asset, df=df, timestep="minute", quote=None)
|
|
393
|
+
polars_data._data_store[(asset, quote)] = data
|
|
394
|
+
|
|
395
|
+
# Aggregate both
|
|
396
|
+
for asset in assets:
|
|
397
|
+
polars_data._get_or_aggregate_bars(asset, quote, 50, "minute", "5 minutes")
|
|
398
|
+
|
|
399
|
+
# Record sizes
|
|
400
|
+
data_store_size = len(polars_data._data_store)
|
|
401
|
+
agg_cache_size = len(polars_data._aggregated_cache)
|
|
402
|
+
|
|
403
|
+
# Set _trim_iteration_count = 0 to trigger enforcement
|
|
404
|
+
polars_data._trim_iteration_count = 0
|
|
405
|
+
|
|
406
|
+
# Force enforcement
|
|
407
|
+
polars_data._enforce_memory_limits()
|
|
408
|
+
|
|
409
|
+
# Nothing should be evicted
|
|
410
|
+
assert len(polars_data._data_store) == data_store_size
|
|
411
|
+
assert len(polars_data._aggregated_cache) == agg_cache_size
|
|
412
|
+
|
|
413
|
+
def test_eviction_updates_lru_order(self):
|
|
414
|
+
"""Test that eviction correctly updates LRU order"""
|
|
415
|
+
start_date = datetime(2024, 1, 1)
|
|
416
|
+
end_date = datetime(2024, 1, 31)
|
|
417
|
+
|
|
418
|
+
polars_data = PolarsData(
|
|
419
|
+
datetime_start=start_date,
|
|
420
|
+
datetime_end=end_date,
|
|
421
|
+
pandas_data=None
|
|
422
|
+
)
|
|
423
|
+
|
|
424
|
+
# Set memory limit
|
|
425
|
+
polars_data.MAX_STORAGE_BYTES = 60_000 # 60KB
|
|
426
|
+
|
|
427
|
+
# Create 5 assets
|
|
428
|
+
assets = [Asset(f"TEST{i}", "stock") for i in range(5)]
|
|
429
|
+
quote = Asset("USD", "forex")
|
|
430
|
+
|
|
431
|
+
for asset in assets:
|
|
432
|
+
dates = pl.datetime_range(
|
|
433
|
+
start_date,
|
|
434
|
+
start_date + timedelta(minutes=1000),
|
|
435
|
+
interval="1m",
|
|
436
|
+
eager=True
|
|
437
|
+
)
|
|
438
|
+
df = pl.DataFrame({
|
|
439
|
+
"datetime": dates,
|
|
440
|
+
"open": [100.0] * len(dates),
|
|
441
|
+
"high": [101.0] * len(dates),
|
|
442
|
+
"low": [99.0] * len(dates),
|
|
443
|
+
"close": [100.5] * len(dates),
|
|
444
|
+
"volume": [1000.0] * len(dates),
|
|
445
|
+
})
|
|
446
|
+
data = DataPolars(asset, df=df, timestep="minute", quote=None)
|
|
447
|
+
polars_data._data_store[(asset, quote)] = data
|
|
448
|
+
|
|
449
|
+
# Aggregate all 5 in order: 0, 1, 2, 3, 4
|
|
450
|
+
for asset in assets:
|
|
451
|
+
polars_data._get_or_aggregate_bars(asset, quote, 100, "minute", "5 minutes")
|
|
452
|
+
|
|
453
|
+
# Access 4 again (should move to end)
|
|
454
|
+
polars_data._get_or_aggregate_bars(assets[4], quote, 100, "minute", "5 minutes")
|
|
455
|
+
|
|
456
|
+
# Set _trim_iteration_count = 0 to trigger enforcement
|
|
457
|
+
polars_data._trim_iteration_count = 0
|
|
458
|
+
|
|
459
|
+
# Force eviction
|
|
460
|
+
polars_data._enforce_memory_limits()
|
|
461
|
+
|
|
462
|
+
# After eviction, if TEST4 is still in cache, it should be at the end
|
|
463
|
+
if (assets[4], quote, "5 minutes") in polars_data._aggregated_cache:
|
|
464
|
+
keys = list(polars_data._aggregated_cache.keys())
|
|
465
|
+
# TEST4 should be last (most recent)
|
|
466
|
+
assert keys[-1][0] == assets[4]
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
if __name__ == "__main__":
|
|
470
|
+
pytest.main([__file__, "-v", "-s"])
|
tests/backtest/test_yahoo.py
CHANGED
|
@@ -1,4 +1,6 @@
|
|
|
1
1
|
import datetime
|
|
2
|
+
import pandas as pd
|
|
3
|
+
import pytz
|
|
2
4
|
import pytest
|
|
3
5
|
|
|
4
6
|
from lumibot.backtesting import BacktestingBroker, YahooDataBacktesting
|
|
@@ -28,6 +30,45 @@ class YahooPriceTest(Strategy):
|
|
|
28
30
|
|
|
29
31
|
class TestYahooBacktestFull:
|
|
30
32
|
|
|
33
|
+
def test_yahoo_no_future_bars_before_open(self, monkeypatch):
|
|
34
|
+
tz = pytz.timezone('America/New_York')
|
|
35
|
+
asset = 'SPY'
|
|
36
|
+
index = pd.DatetimeIndex([
|
|
37
|
+
tz.localize(datetime.datetime(2023, 10, 31, 16, 0)),
|
|
38
|
+
tz.localize(datetime.datetime(2023, 11, 1, 16, 0)),
|
|
39
|
+
])
|
|
40
|
+
|
|
41
|
+
frame = pd.DataFrame(
|
|
42
|
+
{
|
|
43
|
+
'Open': [416.18, 419.20],
|
|
44
|
+
'High': [416.50, 420.10],
|
|
45
|
+
'Low': [415.80, 418.90],
|
|
46
|
+
'Close': [418.53, 419.54],
|
|
47
|
+
'Volume': [1_000_000, 1_100_000],
|
|
48
|
+
'Dividends': [0.0, 0.0],
|
|
49
|
+
'Stock Splits': [0.0, 0.0],
|
|
50
|
+
},
|
|
51
|
+
index=index,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
monkeypatch.setattr(
|
|
55
|
+
'lumibot.tools.YahooHelper.get_symbol_data',
|
|
56
|
+
lambda *args, **kwargs: frame,
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
data_source = YahooDataBacktesting(
|
|
60
|
+
datetime_start=datetime.datetime(2023, 10, 30),
|
|
61
|
+
datetime_end=datetime.datetime(2023, 11, 2),
|
|
62
|
+
)
|
|
63
|
+
data_source._datetime = tz.localize(datetime.datetime(2023, 11, 1, 8, 45))
|
|
64
|
+
|
|
65
|
+
price = data_source.get_last_price(asset, timestep='day')
|
|
66
|
+
assert round(price, 2) == 416.18
|
|
67
|
+
|
|
68
|
+
bars = data_source.get_historical_prices(asset, 1, timestep='day')
|
|
69
|
+
# The bar timestamp must be strictly before the current backtest clock to avoid lookahead.
|
|
70
|
+
assert bars.df.index[-1] < data_source._datetime
|
|
71
|
+
|
|
31
72
|
def test_yahoo_last_price(self):
|
|
32
73
|
"""
|
|
33
74
|
Test the YahooDataBacktesting class by running a backtest and checking that the strategy object is returned
|
|
@@ -62,3 +103,4 @@ class TestYahooBacktestFull:
|
|
|
62
103
|
last_price = round(last_price, 2)
|
|
63
104
|
|
|
64
105
|
assert last_price == 416.18 # This is the correct price for 2023-11-01 (the open price)
|
|
106
|
+
|
tests/test_asset.py
CHANGED
|
@@ -124,12 +124,12 @@ def test_resolve_continuous_futures_contract_year_digits():
|
|
|
124
124
|
assert four_digit == "MNQZ2025"
|
|
125
125
|
|
|
126
126
|
|
|
127
|
-
def
|
|
128
|
-
"""Verify contracts roll
|
|
127
|
+
def test_resolve_continuous_futures_contract_rolls_on_rule_date():
|
|
128
|
+
"""Verify contracts roll eight business days before the third Friday."""
|
|
129
129
|
asset = Asset(symbol="MNQ", asset_type=Asset.AssetType.CONT_FUTURE)
|
|
130
130
|
|
|
131
|
-
before_roll = datetime.datetime(2025, 9,
|
|
132
|
-
after_roll = datetime.datetime(2025, 9,
|
|
131
|
+
before_roll = datetime.datetime(2025, 9, 8)
|
|
132
|
+
after_roll = datetime.datetime(2025, 9, 10)
|
|
133
133
|
|
|
134
134
|
contract_before = asset.resolve_continuous_futures_contract(reference_date=before_roll)
|
|
135
135
|
contract_after = asset.resolve_continuous_futures_contract(reference_date=after_roll)
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Dict, Tuple
|
|
5
|
+
|
|
6
|
+
import pytest
|
|
7
|
+
|
|
8
|
+
from lumibot.tools import backtest_cache
|
|
9
|
+
from lumibot.tools.backtest_cache import (
|
|
10
|
+
BacktestCacheManager,
|
|
11
|
+
BacktestCacheSettings,
|
|
12
|
+
CacheMode,
|
|
13
|
+
reset_backtest_cache_manager,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@pytest.fixture(autouse=True)
|
|
18
|
+
def reset_manager():
|
|
19
|
+
reset_backtest_cache_manager(for_testing=True)
|
|
20
|
+
yield
|
|
21
|
+
reset_backtest_cache_manager(for_testing=True)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def test_settings_from_env_disabled_when_backend_not_s3():
|
|
25
|
+
config = {
|
|
26
|
+
"backend": "local",
|
|
27
|
+
"mode": "disabled",
|
|
28
|
+
}
|
|
29
|
+
assert BacktestCacheSettings.from_env(config) is None
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def test_settings_from_env_requires_bucket():
|
|
33
|
+
config = {
|
|
34
|
+
"backend": "s3",
|
|
35
|
+
"mode": "readwrite",
|
|
36
|
+
}
|
|
37
|
+
with pytest.raises(ValueError):
|
|
38
|
+
BacktestCacheSettings.from_env(config)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class StubS3Client:
|
|
42
|
+
def __init__(self, objects: Dict[Tuple[str, str], bytes] | None = None):
|
|
43
|
+
self.objects = objects or {}
|
|
44
|
+
self.uploads: Dict[Tuple[str, str], bytes] = {}
|
|
45
|
+
|
|
46
|
+
def download_file(self, bucket: str, key: str, destination: str) -> None:
|
|
47
|
+
lookup = (bucket, key)
|
|
48
|
+
if lookup not in self.objects:
|
|
49
|
+
raise FileNotFoundError(f"{bucket}/{key} missing")
|
|
50
|
+
Path(destination).write_bytes(self.objects[lookup])
|
|
51
|
+
|
|
52
|
+
def upload_file(self, source: str, bucket: str, key: str) -> None:
|
|
53
|
+
self.uploads[(bucket, key)] = Path(source).read_bytes()
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _build_settings(prefix: str = "prod/cache") -> BacktestCacheSettings:
|
|
57
|
+
return BacktestCacheSettings(
|
|
58
|
+
backend="s3",
|
|
59
|
+
mode=CacheMode.S3_READWRITE,
|
|
60
|
+
bucket="test-bucket",
|
|
61
|
+
prefix=prefix,
|
|
62
|
+
region="us-east-1",
|
|
63
|
+
version="v3",
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def test_remote_key_uses_relative_cache_path(tmp_path, monkeypatch):
|
|
68
|
+
cache_root = tmp_path / "cache"
|
|
69
|
+
cache_root.mkdir()
|
|
70
|
+
local_file = cache_root / "thetadata" / "bars" / "spy.parquet"
|
|
71
|
+
local_file.parent.mkdir(parents=True, exist_ok=True)
|
|
72
|
+
|
|
73
|
+
monkeypatch.setattr(backtest_cache, "LUMIBOT_CACHE_FOLDER", cache_root)
|
|
74
|
+
|
|
75
|
+
settings = _build_settings(prefix="stage/cache")
|
|
76
|
+
manager = BacktestCacheManager(settings, client_factory=lambda settings: StubS3Client())
|
|
77
|
+
|
|
78
|
+
remote_key = manager.remote_key_for(local_file)
|
|
79
|
+
assert remote_key == "stage/cache/v3/thetadata/bars/spy.parquet"
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def test_ensure_local_file_downloads_from_s3(tmp_path, monkeypatch):
|
|
83
|
+
cache_root = tmp_path / "cache"
|
|
84
|
+
cache_root.mkdir()
|
|
85
|
+
local_file = cache_root / "thetadata" / "bars" / "spy.parquet"
|
|
86
|
+
|
|
87
|
+
monkeypatch.setattr(backtest_cache, "LUMIBOT_CACHE_FOLDER", cache_root)
|
|
88
|
+
|
|
89
|
+
remote_key = "stage/cache/v3/thetadata/bars/spy.parquet"
|
|
90
|
+
objects = {("test-bucket", remote_key): b"cached-data"}
|
|
91
|
+
|
|
92
|
+
stub = StubS3Client(objects)
|
|
93
|
+
manager = BacktestCacheManager(_build_settings(prefix="stage/cache"), client_factory=lambda s: stub)
|
|
94
|
+
|
|
95
|
+
fetched = manager.ensure_local_file(local_file)
|
|
96
|
+
assert fetched is True
|
|
97
|
+
assert local_file.exists()
|
|
98
|
+
assert local_file.read_bytes() == b"cached-data"
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def test_ensure_local_file_handles_missing_remote(tmp_path, monkeypatch):
|
|
102
|
+
cache_root = tmp_path / "cache"
|
|
103
|
+
cache_root.mkdir()
|
|
104
|
+
local_file = cache_root / "thetadata" / "bars" / "spy.parquet"
|
|
105
|
+
|
|
106
|
+
monkeypatch.setattr(backtest_cache, "LUMIBOT_CACHE_FOLDER", cache_root)
|
|
107
|
+
|
|
108
|
+
stub = StubS3Client()
|
|
109
|
+
manager = BacktestCacheManager(_build_settings(prefix="stage/cache"), client_factory=lambda s: stub)
|
|
110
|
+
|
|
111
|
+
fetched = manager.ensure_local_file(local_file)
|
|
112
|
+
assert fetched is False
|
|
113
|
+
assert not local_file.exists()
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def test_on_local_update_uploads_file(tmp_path, monkeypatch):
|
|
117
|
+
cache_root = tmp_path / "cache"
|
|
118
|
+
cache_root.mkdir()
|
|
119
|
+
local_file = cache_root / "thetadata" / "bars" / "spy.parquet"
|
|
120
|
+
local_file.parent.mkdir(parents=True, exist_ok=True)
|
|
121
|
+
local_file.write_bytes(b"new-data")
|
|
122
|
+
|
|
123
|
+
monkeypatch.setattr(backtest_cache, "LUMIBOT_CACHE_FOLDER", cache_root)
|
|
124
|
+
|
|
125
|
+
remote_key = "stage/cache/v3/thetadata/bars/spy.parquet"
|
|
126
|
+
stub = StubS3Client({("test-bucket", remote_key): b"old"})
|
|
127
|
+
manager = BacktestCacheManager(_build_settings(prefix="stage/cache"), client_factory=lambda s: stub)
|
|
128
|
+
|
|
129
|
+
uploaded = manager.on_local_update(local_file)
|
|
130
|
+
assert uploaded is True
|
|
131
|
+
assert stub.uploads[( "test-bucket", remote_key)] == b"new-data"
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def test_manager_disabled_skip_upload(tmp_path, monkeypatch):
|
|
135
|
+
cache_root = tmp_path / "cache"
|
|
136
|
+
cache_root.mkdir()
|
|
137
|
+
local_file = cache_root / "foo.parquet"
|
|
138
|
+
local_file.write_bytes(b"noop")
|
|
139
|
+
|
|
140
|
+
monkeypatch.setattr(backtest_cache, "LUMIBOT_CACHE_FOLDER", cache_root)
|
|
141
|
+
|
|
142
|
+
disabled_settings = BacktestCacheSettings(
|
|
143
|
+
backend="local",
|
|
144
|
+
mode=CacheMode.DISABLED,
|
|
145
|
+
)
|
|
146
|
+
manager = BacktestCacheManager(disabled_settings)
|
|
147
|
+
|
|
148
|
+
assert manager.ensure_local_file(local_file) is False
|
|
149
|
+
assert manager.on_local_update(local_file) is False
|