tsagentkit 1.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. tsagentkit/__init__.py +126 -0
  2. tsagentkit/anomaly/__init__.py +130 -0
  3. tsagentkit/backtest/__init__.py +48 -0
  4. tsagentkit/backtest/engine.py +788 -0
  5. tsagentkit/backtest/metrics.py +244 -0
  6. tsagentkit/backtest/report.py +342 -0
  7. tsagentkit/calibration/__init__.py +136 -0
  8. tsagentkit/contracts/__init__.py +133 -0
  9. tsagentkit/contracts/errors.py +275 -0
  10. tsagentkit/contracts/results.py +418 -0
  11. tsagentkit/contracts/schema.py +44 -0
  12. tsagentkit/contracts/task_spec.py +300 -0
  13. tsagentkit/covariates/__init__.py +340 -0
  14. tsagentkit/eval/__init__.py +285 -0
  15. tsagentkit/features/__init__.py +20 -0
  16. tsagentkit/features/covariates.py +328 -0
  17. tsagentkit/features/extra/__init__.py +5 -0
  18. tsagentkit/features/extra/native.py +179 -0
  19. tsagentkit/features/factory.py +187 -0
  20. tsagentkit/features/matrix.py +159 -0
  21. tsagentkit/features/tsfeatures_adapter.py +115 -0
  22. tsagentkit/features/versioning.py +203 -0
  23. tsagentkit/hierarchy/__init__.py +39 -0
  24. tsagentkit/hierarchy/aggregation.py +62 -0
  25. tsagentkit/hierarchy/evaluator.py +400 -0
  26. tsagentkit/hierarchy/reconciliation.py +232 -0
  27. tsagentkit/hierarchy/structure.py +453 -0
  28. tsagentkit/models/__init__.py +182 -0
  29. tsagentkit/models/adapters/__init__.py +83 -0
  30. tsagentkit/models/adapters/base.py +321 -0
  31. tsagentkit/models/adapters/chronos.py +387 -0
  32. tsagentkit/models/adapters/moirai.py +256 -0
  33. tsagentkit/models/adapters/registry.py +171 -0
  34. tsagentkit/models/adapters/timesfm.py +440 -0
  35. tsagentkit/models/baselines.py +207 -0
  36. tsagentkit/models/sktime.py +307 -0
  37. tsagentkit/monitoring/__init__.py +51 -0
  38. tsagentkit/monitoring/alerts.py +302 -0
  39. tsagentkit/monitoring/coverage.py +203 -0
  40. tsagentkit/monitoring/drift.py +330 -0
  41. tsagentkit/monitoring/report.py +214 -0
  42. tsagentkit/monitoring/stability.py +275 -0
  43. tsagentkit/monitoring/triggers.py +423 -0
  44. tsagentkit/qa/__init__.py +347 -0
  45. tsagentkit/router/__init__.py +37 -0
  46. tsagentkit/router/bucketing.py +489 -0
  47. tsagentkit/router/fallback.py +132 -0
  48. tsagentkit/router/plan.py +23 -0
  49. tsagentkit/router/router.py +271 -0
  50. tsagentkit/series/__init__.py +26 -0
  51. tsagentkit/series/alignment.py +206 -0
  52. tsagentkit/series/dataset.py +449 -0
  53. tsagentkit/series/sparsity.py +261 -0
  54. tsagentkit/series/validation.py +393 -0
  55. tsagentkit/serving/__init__.py +39 -0
  56. tsagentkit/serving/orchestration.py +943 -0
  57. tsagentkit/serving/packaging.py +73 -0
  58. tsagentkit/serving/provenance.py +317 -0
  59. tsagentkit/serving/tsfm_cache.py +214 -0
  60. tsagentkit/skill/README.md +135 -0
  61. tsagentkit/skill/__init__.py +8 -0
  62. tsagentkit/skill/recipes.md +429 -0
  63. tsagentkit/skill/tool_map.md +21 -0
  64. tsagentkit/time/__init__.py +134 -0
  65. tsagentkit/utils/__init__.py +20 -0
  66. tsagentkit/utils/quantiles.py +83 -0
  67. tsagentkit/utils/signature.py +47 -0
  68. tsagentkit/utils/temporal.py +41 -0
  69. tsagentkit-1.0.2.dist-info/METADATA +371 -0
  70. tsagentkit-1.0.2.dist-info/RECORD +72 -0
  71. tsagentkit-1.0.2.dist-info/WHEEL +4 -0
  72. tsagentkit-1.0.2.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,135 @@
1
+ # tsagentkit Skill Documentation
2
+
3
+ ## What
4
+ Agent-facing quick reference for using tsagentkit correctly, including guardrails,
5
+ core modules, and the end-to-end workflow.
6
+
7
+ ## When
8
+ Use this when writing or reviewing forecasting scripts that must comply with
9
+ tsagentkit's temporal integrity and leakage prevention rules.
10
+
11
+ ## Inputs
12
+ - `data`: pandas DataFrame with `unique_id`, `ds`, `y`
13
+ - `task_spec`: `TaskSpec` (h, freq, optional quantiles/covariate policy)
14
+ - Optional: `repair_strategy` passed to `run_forecast` or `run_qa`
15
+ - Optional: custom `fit_func` (fit(dataset, plan)), `predict_func` (predict(dataset, artifact, spec)),
16
+ `monitoring_config`, `calibrator_spec`, `anomaly_spec`
17
+
18
+ ## Workflow
19
+ 1. `validate_contract` to enforce schema and ordering.
20
+ 2. `run_qa` (repairs optional in non-strict modes).
21
+ 3. Build `TSDataset`.
22
+ 4. `make_plan` selects model and fallback ladder.
23
+ 5. `rolling_backtest` (standard/strict).
24
+ 6. `fit` then `predict`.
25
+ 7. `package_run` returns `RunArtifact` with provenance.
26
+
27
+ ---
28
+
29
+ ## Guardrails (Critical)
30
+
31
+ | Guardrail | Error Code | Prevention |
32
+ |---|---|---|
33
+ | No random splits | `E_SPLIT_RANDOM_FORBIDDEN` | Data must be sorted by (`unique_id`, `ds`) |
34
+ | No covariate leakage | `E_COVARIATE_LEAKAGE` | Future covariates cannot be used |
35
+ | Temporal ordering | `E_CONTRACT_UNSORTED` | Dates must increase within each series |
36
+
37
+ ---
38
+
39
+ ## Module Reference
40
+
41
+ ### contracts
42
+ **What**: Validates input data schema and types.
43
+
44
+ **When**: Before any forecasting operation.
45
+
46
+ **Inputs**:
47
+ - `unique_id` (str), `ds` (datetime), `y` (numeric)
48
+
49
+ **Workflow**:
50
+ ```python
51
+ from tsagentkit import validate_contract, TaskSpec
52
+
53
+ report = validate_contract(df)
54
+ if not report.valid:
55
+ report.raise_if_errors()
56
+
57
+ spec = TaskSpec(
58
+ h=7,
59
+ freq="D",
60
+ quantiles=[0.1, 0.5, 0.9],
61
+ )
62
+
63
+ repair_strategy = {
64
+ "winsorize": {"window": 30, "lower_q": 0.01, "upper_q": 0.99},
65
+ "missing_method": "ffill",
66
+ "outlier_z": 3.0,
67
+ }
68
+ ```
69
+
70
+ ### series
71
+ **What**: Immutable dataset container with sparsity profiling.
72
+
73
+ **When**: After validation, before modeling.
74
+
75
+ **Inputs**: DataFrame + TaskSpec.
76
+
77
+ **Workflow**:
78
+ ```python
79
+ from tsagentkit import TSDataset, build_dataset
80
+
81
+ dataset = build_dataset(df, spec)
82
+ print(dataset.n_series)
83
+ print(dataset.date_range)
84
+ ```
85
+
86
+ ### router
87
+ **What**: Selects models and fallbacks based on data characteristics.
88
+
89
+ **When**: After building dataset.
90
+
91
+ **Inputs**: `TSDataset`, `TaskSpec`.
92
+
93
+ **Workflow**:
94
+ ```python
95
+ from tsagentkit import make_plan
96
+
97
+ plan = make_plan(dataset, spec)
98
+ print(plan.candidate_models)
99
+ ```
100
+
101
+ ### backtest
102
+ **What**: Rolling window validation without random splits.
103
+
104
+ **When**: Standard/strict mode before final training.
105
+
106
+ **Inputs**: Dataset, plan, fit/predict functions.
107
+
108
+ **Workflow**:
109
+ ```python
110
+ from tsagentkit import rolling_backtest
111
+
112
+ report = rolling_backtest(
113
+ dataset=dataset,
114
+ spec=spec,
115
+ plan=plan,
116
+ fit_func=fit_func,
117
+ predict_func=predict_func,
118
+ )
119
+ print(report.summary())
120
+ ```
121
+
122
+ ### serving
123
+ **What**: Full pipeline execution with provenance.
124
+
125
+ **When**: End-to-end forecasting runs.
126
+
127
+ **Inputs**: DataFrame + TaskSpec (+ optional configs).
128
+
129
+ **Workflow**:
130
+ ```python
131
+ from tsagentkit import run_forecast
132
+
133
+ result = run_forecast(df, spec, mode="standard")
134
+ print(result.summary())
135
+ ```
@@ -0,0 +1,8 @@
1
+ """Skill module for tsagentkit.
2
+
3
+ Provides documentation and recipes specifically designed for AI agents
4
+ using tsagentkit for time series forecasting tasks.
5
+ """
6
+
7
+ # This module contains documentation only
8
+ # No code exports - see README.md and recipes.md
@@ -0,0 +1,429 @@
1
+ # tsagentkit Recipes
2
+
3
+ ## What
4
+ Runnable, end-to-end examples for common forecasting scenarios.
5
+
6
+ ## When
7
+ Use these as templates when building scripts or demos.
8
+
9
+ ## Inputs
10
+ - `data`: pandas DataFrame with `unique_id`, `ds`, `y`
11
+ - `task_spec`: `TaskSpec`
12
+
13
+ ## Workflow
14
+ - Pick a recipe, generate or load data, define `TaskSpec`, call `run_forecast`.
15
+
16
+ ## Recipe 1: Retail Daily Sales
17
+
18
+ **Scenario**: Daily sales forecasting for multiple retail stores with seasonal patterns.
19
+
20
+ ```python
21
+ import pandas as pd
22
+ import numpy as np
23
+ from tsagentkit import TaskSpec, run_forecast
24
+
25
+ # Generate sample retail data
26
+ def generate_retail_data(n_stores=3, n_days=90) -> pd.DataFrame:
27
+ """Generate synthetic retail daily sales data."""
28
+ np.random.seed(42)
29
+
30
+ records = []
31
+ for store_id in range(n_stores):
32
+ base_sales = 1000 + store_id * 200
33
+ trend = np.linspace(0, 50, n_days)
34
+ seasonality = 100 * np.sin(2 * np.pi * np.arange(n_days) / 7) # Weekly
35
+ noise = np.random.normal(0, 50, n_days)
36
+
37
+ sales = base_sales + trend + seasonality + noise
38
+ sales = np.maximum(sales, 0) # No negative sales
39
+
40
+ for i, (date, sale) in enumerate(zip(
41
+ pd.date_range("2024-01-01", periods=n_days, freq="D"),
42
+ sales
43
+ )):
44
+ records.append({
45
+ "unique_id": f"store_{store_id}",
46
+ "ds": date,
47
+ "y": float(sale),
48
+ })
49
+
50
+ return pd.DataFrame(records)
51
+
52
+ # Load data
53
+ df = generate_retail_data()
54
+ print(f"Data shape: {df.shape}")
55
+ print(f"Series: {df['unique_id'].unique()}")
56
+
57
+ # Define forecasting task
58
+ spec = TaskSpec(
59
+ h=14, # Forecast 2 weeks ahead
60
+ freq="D", # Daily frequency
61
+ quantiles=[0.1, 0.5, 0.9], # Include prediction intervals
62
+ )
63
+
64
+ # Run forecast (quick mode for demo)
65
+ result = run_forecast(df, spec, mode="quick")
66
+
67
+ # Review results
68
+ print("\n=== Forecast ===")
69
+ print(result.forecast.head())
70
+
71
+ print("\n=== Model Used ===")
72
+ print(result.model_name)
73
+
74
+ print("\n=== Provenance ===")
75
+ print(f"Data signature: {result.provenance['data_signature']}")
76
+ print(f"Timestamp: {result.provenance['timestamp']}")
77
+
78
+ print("\n=== Summary ===")
79
+ print(result.summary())
80
+ ```
81
+
82
+ ## Recipe 2: Industrial Hourly Metrics
83
+
84
+ **Scenario**: Hourly equipment sensor readings with irregular gaps.
85
+
86
+ ```python
87
+ import pandas as pd
88
+ import numpy as np
89
+ from tsagentkit import TaskSpec, run_forecast, TSDataset
90
+ from tsagentkit.series import compute_sparsity_profile
91
+
92
+ # Generate hourly sensor data with gaps
93
+ def generate_sensor_data(n_sensors=2, hours=168) -> pd.DataFrame: # 1 week
94
+ """Generate sensor data with some missing hours."""
95
+ np.random.seed(123)
96
+
97
+ records = []
98
+ for sensor_id in range(n_sensors):
99
+ # Base signal with daily pattern
100
+ base = 50 + sensor_id * 10
101
+ daily_pattern = 10 * np.sin(2 * np.pi * np.arange(hours) / 24)
102
+ noise = np.random.normal(0, 2, hours)
103
+ values = base + daily_pattern + noise
104
+
105
+ # Random gaps (5% missing)
106
+ gap_indices = np.random.choice(hours, size=int(hours * 0.05), replace=False)
107
+
108
+ for hour, value in enumerate(values):
109
+ if hour not in gap_indices:
110
+ records.append({
111
+ "unique_id": f"sensor_{sensor_id}",
112
+ "ds": pd.Timestamp("2024-01-01") + pd.Timedelta(hours=hour),
113
+ "y": float(value),
114
+ })
115
+
116
+ return pd.DataFrame(records)
117
+
118
+ # Load data
119
+ df = generate_sensor_data()
120
+
121
+ # Analyze sparsity
122
+ print("=== Sparsity Analysis ===")
123
+ dataset = TSDataset.from_dataframe(df, TaskSpec(h=24, freq="H"))
124
+ profile = dataset.sparsity_profile
125
+ for uid, metrics in profile.series_profiles.items():
126
+ print(f"{uid}: {metrics['classification']} "
127
+ f"(gaps: {metrics.get('gap_ratio', 0):.2%})")
128
+
129
+ # Forecast next 24 hours
130
+ spec = TaskSpec(h=24, freq="H")
131
+ result = run_forecast(df, spec, mode="standard")
132
+
133
+ print("\n=== Results ===")
134
+ print(result.summary())
135
+ ```
136
+
137
+ ## Recipe 3: Intermittent Demand
138
+
139
+ **Scenario**: Spare parts demand with many zero values (intermittent demand).
140
+
141
+ ```python
142
+ import pandas as pd
143
+ import numpy as np
144
+ from tsagentkit import TaskSpec, run_forecast
145
+ from tsagentkit.router import make_plan
146
+
147
+ # Generate intermittent demand data
148
+ def generate_intermittent_data(n_parts=3, n_weeks=52) -> pd.DataFrame:
149
+ """Generate intermittent demand (many zeros, occasional spikes)."""
150
+ np.random.seed(456)
151
+
152
+ records = []
153
+ for part_id in range(n_parts):
154
+ for week in range(n_weeks):
155
+ # 70% chance of zero demand
156
+ if np.random.random() < 0.7:
157
+ demand = 0.0
158
+ else:
159
+ # Occasional demand spike
160
+ demand = float(np.random.poisson(5) + 1)
161
+
162
+ records.append({
163
+ "unique_id": f"part_{part_id}",
164
+ "ds": pd.Timestamp("2024-01-01") + pd.Timedelta(weeks=week),
165
+ "y": demand,
166
+ })
167
+
168
+ return pd.DataFrame(records)
169
+
170
+ # Load data
171
+ df = generate_intermittent_data()
172
+
173
+ # Check zero ratio
174
+ for uid in df["unique_id"].unique():
175
+ series = df[df["unique_id"] == uid]
176
+ zero_ratio = (series["y"] == 0).mean()
177
+ print(f"{uid}: {zero_ratio:.1%} zeros")
178
+
179
+ # Create task
180
+ spec = TaskSpec(h=4, freq="W") # Weekly, 4 weeks ahead
181
+
182
+ # Run forecast
183
+ result = run_forecast(df, spec, mode="standard")
184
+
185
+ print("\n=== Forecast ===")
186
+ print(result.forecast)
187
+
188
+ print("\n=== Model Selected ===")
189
+ print(result.model_name)
190
+ print("(Intermittent series use appropriate models)")
191
+ ```
192
+
193
+ ## Recipe 4: Custom Model Integration
194
+
195
+ **Scenario**: Using a custom model with tsagentkit's pipeline.
196
+
197
+ ```python
198
+ import pandas as pd
199
+ import numpy as np
200
+ from tsagentkit import TaskSpec, rolling_backtest, run_forecast
201
+ from tsagentkit.contracts import ModelArtifact
202
+ from tsagentkit.series import TSDataset
203
+
204
+ # Define custom naive model
205
+ class NaiveModel:
206
+ """Simple naive forecast model."""
207
+
208
+ def __init__(self, season_length: int = 1):
209
+ self.season_length = season_length
210
+ self.last_values = {}
211
+
212
+ def fit(self, df: pd.DataFrame) -> "NaiveModel":
213
+ """Fit by storing last value per series."""
214
+ for uid in df["unique_id"].unique():
215
+ series = df[df["unique_id"] == uid].sort_values("ds")
216
+ self.last_values[uid] = series["y"].iloc[-self.season_length:].values
217
+ return self
218
+
219
+ def predict(self, horizon: int, last_dates: dict[str, pd.Timestamp]) -> pd.DataFrame:
220
+ """Generate naive forecast."""
221
+ predictions = []
222
+ for uid, values in self.last_values.items():
223
+ for h in range(1, horizon + 1):
224
+ # Cycle through last values
225
+ idx = (h - 1) % len(values)
226
+ predictions.append({
227
+ "unique_id": uid,
228
+ "ds": last_dates[uid] + pd.Timedelta(days=h),
229
+ "yhat": values[idx],
230
+ })
231
+ return pd.DataFrame(predictions)
232
+
233
+ # Custom fit function
234
+ def custom_fit(dataset: TSDataset, plan):
235
+ """Fit custom model."""
236
+ season_length = dataset.task_spec.season_length or 1
237
+ model = NaiveModel(season_length=season_length)
238
+ model.fit(dataset.df)
239
+
240
+ return ModelArtifact(
241
+ model=model,
242
+ model_name="CustomNaive",
243
+ config={"season_length": season_length},
244
+ )
245
+
246
+ # Custom predict function
247
+ def custom_predict(dataset: TSDataset, model: ModelArtifact, spec: TaskSpec):
248
+ """Generate predictions."""
249
+ naive_model = model.model
250
+ horizon = spec.horizon
251
+ last_dates = dataset.df.groupby("unique_id")["ds"].max().to_dict()
252
+ return naive_model.predict(horizon, last_dates)
253
+
254
+ # Generate data
255
+ df = pd.DataFrame({
256
+ "unique_id": ["A"] * 30 + ["B"] * 30,
257
+ "ds": list(pd.date_range("2024-01-01", periods=30, freq="D")) * 2,
258
+ "y": list(range(30)) * 2,
259
+ })
260
+
261
+ # Run with custom model
262
+ spec = TaskSpec(h=7, freq="D")
263
+ result = run_forecast(
264
+ df, spec,
265
+ mode="standard",
266
+ fit_func=custom_fit,
267
+ predict_func=custom_predict,
268
+ )
269
+
270
+ print("=== Custom Model Results ===")
271
+ print(result.summary())
272
+ ```
273
+
274
+ ## Recipe 5: Backtest Analysis
275
+
276
+ **Scenario**: Detailed backtest analysis to evaluate model performance.
277
+
278
+ ```python
279
+ import pandas as pd
280
+ import numpy as np
281
+ from tsagentkit import TaskSpec, rolling_backtest
282
+ from tsagentkit.models import fit, predict
283
+ from tsagentkit.router import make_plan
284
+ from tsagentkit.series import TSDataset
285
+
286
+ # Generate data with trend
287
+ df = pd.DataFrame({
288
+ "unique_id": ["A"] * 60,
289
+ "ds": pd.date_range("2024-01-01", periods=60, freq="D"),
290
+ "y": np.linspace(100, 200, 60) + np.random.normal(0, 5, 60),
291
+ })
292
+
293
+ # Create dataset and plan
294
+ spec = TaskSpec(h=7, freq="D")
295
+ dataset = TSDataset.from_dataframe(df, spec)
296
+ plan = make_plan(dataset, spec)
297
+
298
+ # Run detailed backtest
299
+ report = rolling_backtest(
300
+ dataset=dataset,
301
+ spec=spec,
302
+ plan=plan,
303
+ fit_func=fit,
304
+ predict_func=predict,
305
+ n_windows=5,
306
+ window_strategy="expanding",
307
+ )
308
+
309
+ # Analyze results
310
+ print("=== Aggregate Metrics ===")
311
+ for metric, value in sorted(report.aggregate_metrics.items()):
312
+ print(f"{metric}: {value:.4f}")
313
+
314
+ print("\n=== Per-Series Performance ===")
315
+ for uid, metrics in report.series_metrics.items():
316
+ print(f"{uid}: WAPE={metrics.metrics.get('wape', 0):.2%}, "
317
+ f"windows={metrics.num_windows}")
318
+
319
+ print("\n=== Window Results ===")
320
+ for window in report.window_results:
321
+ print(f"Window {window.window_index}: "
322
+ f"train={window.train_start} to {window.train_end}, "
323
+ f"test={window.test_start} to {window.test_end}")
324
+
325
+ print("\n=== Report Summary ===")
326
+ print(report.summary())
327
+ ```
328
+
329
+ ## Recipe 6: Error Handling
330
+
331
+ **Scenario**: Proper error handling and recovery.
332
+
333
+ ```python
334
+ import pandas as pd
335
+ from tsagentkit import TaskSpec, run_forecast, validate_contract
336
+ from tsagentkit.contracts import (
337
+ TSAgentKitError,
338
+ EContractMissingColumn,
339
+ ESplitRandomForbidden,
340
+ )
341
+
342
+ # Example 1: Handle missing columns
343
+ def safe_forecast_with_validation(df, spec):
344
+ """Run forecast with proper validation."""
345
+ # Validate first
346
+ validation = validate_contract(df)
347
+ if not validation.valid:
348
+ print("Validation failed:")
349
+ for error in validation.errors:
350
+ print(f" - {error['code']}: {error['message']}")
351
+
352
+ # Auto-fix if possible
353
+ if any(e['code'] == EContractMissingColumn.error_code for e in validation.errors):
354
+ print("Hint: Ensure DataFrame has 'unique_id', 'ds', and 'y' columns")
355
+ return None
356
+
357
+ return run_forecast(df, spec)
358
+
359
+ # Example 2: Handle shuffled data
360
+ def safe_forecast_sorted(df, spec):
361
+ """Ensure data is sorted before forecasting."""
362
+ # Sort data to prevent E_SPLIT_RANDOM_FORBIDDEN
363
+ df = df.sort_values(["unique_id", "ds"]).reset_index(drop=True)
364
+ return run_forecast(df, spec)
365
+
366
+ # Example 3: Comprehensive error handling
367
+ def robust_forecast(df, spec):
368
+ """Run forecast with comprehensive error handling."""
369
+ try:
370
+ result = run_forecast(df, spec, mode="standard")
371
+ print("Success!")
372
+ return result
373
+
374
+ except EContractMissingColumn as e:
375
+ print(f"Data error: {e.message}")
376
+ print(f"Available columns: {e.context.get('available', [])}")
377
+
378
+ except ESplitRandomForbidden as e:
379
+ print(f"Ordering error: {e.message}")
380
+ print(f"Suggestion: {e.context.get('suggestion', '')}")
381
+ # Try to fix
382
+ df_sorted = df.sort_values(["unique_id", "ds"]).reset_index(drop=True)
383
+ print("Retrying with sorted data...")
384
+ return robust_forecast(df_sorted, spec)
385
+
386
+ except TSAgentKitError as e:
387
+ print(f"tsagentkit error ({e.error_code}): {e.message}")
388
+
389
+ except Exception as e:
390
+ print(f"Unexpected error: {type(e).__name__}: {e}")
391
+
392
+ return None
393
+
394
+ # Test with various problematic inputs
395
+ print("=== Test 1: Valid Data ===")
396
+ df_valid = pd.DataFrame({
397
+ "unique_id": ["A", "A", "B", "B"],
398
+ "ds": pd.date_range("2024-01-01", periods=4, freq="D"),
399
+ "y": [1.0, 2.0, 3.0, 4.0],
400
+ })
401
+ spec = TaskSpec(h=2, freq="D")
402
+ result = robust_forecast(df_valid, spec)
403
+
404
+ print("\n=== Test 2: Missing Column ===")
405
+ df_missing = pd.DataFrame({"x": [1, 2, 3]})
406
+ robust_forecast(df_missing, spec)
407
+
408
+ print("\n=== Test 3: Shuffled Data ===")
409
+ df_shuffled = df_valid.sample(frac=1).reset_index(drop=True)
410
+ robust_forecast(df_shuffled, spec)
411
+ ```
412
+
413
+ ## Running the Recipes
414
+
415
+ All recipes are self-contained and can be run directly:
416
+
417
+ ```bash
418
+ # Retail daily sales
419
+ python -c "$(cat recipe1_retail_daily.py)"
420
+
421
+ # Or save to file and run
422
+ python recipe1_retail_daily.py
423
+ ```
424
+
425
+ ## Next Steps
426
+
427
+ - See `README.md` for detailed module documentation
428
+ - Check `docs/PRD.md` for technical requirements
429
+ - Review test files in `tests/` for more examples
@@ -0,0 +1,21 @@
1
+ # tsagentkit Tool Map
2
+
3
+ ## What
4
+ Quick mapping of common tasks to the recommended tsagentkit entry points.
5
+
6
+ ## When
7
+ Use this to choose the correct module or function when building workflows.
8
+
9
+ ## Inputs
10
+ - `data`: pandas DataFrame with `unique_id`, `ds`, `y`
11
+ - `task_spec`: `TaskSpec`
12
+ - Optional: `fit_func` (fit(dataset, plan)), `predict_func` (predict(dataset, artifact, spec)), `monitoring_config`
13
+
14
+ ## Workflow
15
+ - Validate schema and ordering: `validate_contract`
16
+ - Run QA checks/repairs: `run_qa`
17
+ - Build dataset: `TSDataset.from_dataframe` or `build_dataset`
18
+ - Plan model routing: `make_plan`
19
+ - Run backtest: `rolling_backtest`
20
+ - Fit and predict: `fit` + `predict`
21
+ - Full pipeline: `run_forecast`