jerry-thomas 0.2.0__tar.gz → 0.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jerry_thomas-0.3.0/PKG-INFO +502 -0
- jerry_thomas-0.3.0/README.md +483 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/pyproject.toml +1 -1
- jerry_thomas-0.3.0/src/datapipeline/analysis/vector/collector.py +275 -0
- jerry_thomas-0.3.0/src/datapipeline/analysis/vector/matrix.py +527 -0
- jerry_thomas-0.3.0/src/datapipeline/analysis/vector/report.py +317 -0
- jerry_thomas-0.3.0/src/datapipeline/analysis/vector_analyzer.py +5 -0
- jerry_thomas-0.3.0/src/datapipeline/build/__init__.py +6 -0
- jerry_thomas-0.3.0/src/datapipeline/build/state.py +52 -0
- jerry_thomas-0.3.0/src/datapipeline/build/tasks.py +186 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/cli/app.py +125 -56
- jerry_thomas-0.3.0/src/datapipeline/cli/commands/build.py +39 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/cli/commands/domain.py +1 -1
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/cli/commands/filter.py +1 -2
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/cli/commands/inspect.py +77 -26
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/cli/commands/link.py +11 -12
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/cli/commands/plugin.py +1 -1
- jerry_thomas-0.3.0/src/datapipeline/cli/commands/run.py +274 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/cli/commands/source.py +3 -3
- jerry_thomas-0.3.0/src/datapipeline/cli/commands/writers.py +138 -0
- jerry_thomas-0.3.0/src/datapipeline/cli/visuals/__init__.py +14 -0
- jerry_thomas-0.2.0/src/datapipeline/cli/visuals.py → jerry_thomas-0.3.0/src/datapipeline/cli/visuals/labels.py +35 -24
- jerry_thomas-0.3.0/src/datapipeline/cli/visuals/sources.py +138 -0
- jerry_thomas-0.3.0/src/datapipeline/config/build.py +64 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/config/dataset/dataset.py +1 -2
- jerry_thomas-0.3.0/src/datapipeline/config/dataset/loader.py +19 -0
- jerry_thomas-0.3.0/src/datapipeline/config/postprocess.py +14 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/config/project.py +13 -1
- jerry_thomas-0.3.0/src/datapipeline/config/run.py +116 -0
- jerry_thomas-0.3.0/src/datapipeline/config/split.py +35 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/domain/vector.py +0 -9
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/filters/filters.py +1 -1
- jerry_thomas-0.3.0/src/datapipeline/integrations/ml/__init__.py +16 -0
- jerry_thomas-0.3.0/src/datapipeline/integrations/ml/adapter.py +120 -0
- jerry_thomas-0.3.0/src/datapipeline/integrations/ml/pandas_support.py +46 -0
- jerry_thomas-0.3.0/src/datapipeline/integrations/ml/rows.py +82 -0
- jerry_thomas-0.3.0/src/datapipeline/integrations/ml/torch_support.py +94 -0
- jerry_thomas-0.3.0/src/datapipeline/pipeline/context.py +69 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/pipeline/pipelines.py +21 -23
- jerry_thomas-0.3.0/src/datapipeline/pipeline/split.py +171 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/pipeline/stages.py +54 -15
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/pipeline/utils/keygen.py +2 -2
- jerry_thomas-0.3.0/src/datapipeline/pipeline/utils/transform_utils.py +96 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/plugins.py +1 -1
- jerry_thomas-0.3.0/src/datapipeline/runtime.py +73 -0
- jerry_thomas-0.3.0/src/datapipeline/services/artifacts.py +96 -0
- jerry_thomas-0.3.0/src/datapipeline/services/bootstrap/__init__.py +12 -0
- jerry_thomas-0.3.0/src/datapipeline/services/bootstrap/config.py +141 -0
- jerry_thomas-0.3.0/src/datapipeline/services/bootstrap/core.py +186 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/services/constants.py +5 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/services/entrypoints.py +1 -1
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/services/factories.py +5 -2
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/services/paths.py +1 -1
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/services/project_paths.py +21 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/services/scaffold/domain.py +1 -2
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/services/scaffold/filter.py +1 -2
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/services/scaffold/mappers.py +1 -1
- jerry_thomas-0.3.0/src/datapipeline/services/scaffold/plugin.py +49 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/services/scaffold/source.py +2 -4
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/sources/models/generator.py +6 -2
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/sources/models/loader.py +0 -3
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/sources/models/synthetic.py +1 -1
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/sources/synthetic/time/loader.py +10 -2
- jerry_thomas-0.3.0/src/datapipeline/templates/plugin_skeleton/README.md +96 -0
- jerry_thomas-0.2.0/src/datapipeline/templates/plugin_skeleton/config/contracts/time_hour_sin.yaml → jerry_thomas-0.3.0/src/datapipeline/templates/plugin_skeleton/config/contracts/time_hour_sin.synthetic.yaml +3 -3
- jerry_thomas-0.2.0/src/datapipeline/templates/plugin_skeleton/config/contracts/time_linear.yaml → jerry_thomas-0.3.0/src/datapipeline/templates/plugin_skeleton/config/contracts/time_linear.synthetic.yaml +3 -3
- jerry_thomas-0.3.0/src/datapipeline/templates/plugin_skeleton/config/datasets/default/build.yaml +9 -0
- jerry_thomas-0.3.0/src/datapipeline/templates/plugin_skeleton/config/datasets/default/dataset.yaml +14 -0
- jerry_thomas-0.3.0/src/datapipeline/templates/plugin_skeleton/config/datasets/default/postprocess.yaml +13 -0
- jerry_thomas-0.3.0/src/datapipeline/templates/plugin_skeleton/config/datasets/default/project.yaml +20 -0
- jerry_thomas-0.3.0/src/datapipeline/templates/plugin_skeleton/config/datasets/default/runs/run_test.yaml +10 -0
- jerry_thomas-0.3.0/src/datapipeline/templates/plugin_skeleton/config/datasets/default/runs/run_train.yaml +10 -0
- jerry_thomas-0.3.0/src/datapipeline/templates/plugin_skeleton/config/datasets/default/runs/run_val.yaml +10 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/templates/plugin_skeleton/pyproject.toml +2 -2
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/templates/stubs/dto.py.j2 +2 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/templates/stubs/mapper.py.j2 +5 -3
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/templates/stubs/parser.py.j2 +1 -0
- jerry_thomas-0.3.0/src/datapipeline/transforms/feature/scaler.py +157 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/transforms/filter.py +5 -2
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/transforms/stream/fill.py +3 -25
- jerry_thomas-0.3.0/src/datapipeline/transforms/utils.py +26 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/transforms/vector.py +62 -78
- jerry_thomas-0.3.0/src/datapipeline/transforms/vector_utils.py +36 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/utils/load.py +2 -2
- jerry_thomas-0.3.0/src/datapipeline/utils/pickle_model.py +30 -0
- jerry_thomas-0.3.0/src/datapipeline/utils/placeholders.py +35 -0
- jerry_thomas-0.3.0/src/jerry_thomas.egg-info/PKG-INFO +502 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/jerry_thomas.egg-info/SOURCES.txt +39 -9
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/tests/test_config_pipeline.py +2 -8
- jerry_thomas-0.3.0/tests/test_regression_vectors.py +249 -0
- jerry_thomas-0.3.0/tests/test_run_config.py +99 -0
- jerry_thomas-0.3.0/tests/test_scaffold_plugin.py +26 -0
- jerry_thomas-0.3.0/tests/test_split_stage.py +46 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/tests/test_transforms.py +70 -34
- jerry_thomas-0.3.0/tests/test_vector_analyzer.py +19 -0
- jerry_thomas-0.2.0/PKG-INFO +0 -402
- jerry_thomas-0.2.0/README.md +0 -383
- jerry_thomas-0.2.0/src/datapipeline/analysis/vector_analyzer.py +0 -696
- jerry_thomas-0.2.0/src/datapipeline/cli/commands/run.py +0 -150
- jerry_thomas-0.2.0/src/datapipeline/cli/visual_source.py +0 -32
- jerry_thomas-0.2.0/src/datapipeline/common/geo.py +0 -13
- jerry_thomas-0.2.0/src/datapipeline/config/dataset/loader.py +0 -99
- jerry_thomas-0.2.0/src/datapipeline/integrations/ml.py +0 -319
- jerry_thomas-0.2.0/src/datapipeline/pipeline/utils/transform_utils.py +0 -55
- jerry_thomas-0.2.0/src/datapipeline/registries/registries.py +0 -15
- jerry_thomas-0.2.0/src/datapipeline/services/bootstrap.py +0 -191
- jerry_thomas-0.2.0/src/datapipeline/services/scaffold/plugin.py +0 -23
- jerry_thomas-0.2.0/src/datapipeline/templates/plugin_skeleton/README.md +0 -51
- jerry_thomas-0.2.0/src/datapipeline/templates/plugin_skeleton/config/datasets/default/dataset.yaml +0 -29
- jerry_thomas-0.2.0/src/datapipeline/templates/plugin_skeleton/config/datasets/default/project.yaml +0 -8
- jerry_thomas-0.2.0/src/datapipeline/transforms/feature/scaler.py +0 -92
- jerry_thomas-0.2.0/src/datapipeline/transforms/utils.py +0 -10
- jerry_thomas-0.2.0/src/datapipeline/transforms/vector_utils.py +0 -84
- jerry_thomas-0.2.0/src/datapipeline/utils/__init__.py +0 -0
- jerry_thomas-0.2.0/src/jerry_thomas.egg-info/PKG-INFO +0 -402
- jerry_thomas-0.2.0/tests/test_regression_vectors.py +0 -162
- jerry_thomas-0.2.0/tests/test_vector_analyzer.py +0 -19
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/LICENSE +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/setup.cfg +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/__init__.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/analysis/__init__.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/cli/commands/list_.py +0 -0
- {jerry_thomas-0.2.0/src/datapipeline/common → jerry_thomas-0.3.0/src/datapipeline/config}/__init__.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/config/catalog.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/config/dataset/feature.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/config/dataset/normalize.py +0 -0
- {jerry_thomas-0.2.0/src/datapipeline/config → jerry_thomas-0.3.0/src/datapipeline/domain}/__init__.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/domain/feature.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/domain/record.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/integrations/__init__.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/mappers/noop.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/mappers/synthetic/time.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/parsers/identity.py +0 -0
- {jerry_thomas-0.2.0/src/datapipeline/domain → jerry_thomas-0.3.0/src/datapipeline/pipeline}/__init__.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/pipeline/utils/memory_sort.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/pipeline/utils/ordering.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/registries/registry.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/services/scaffold/__init__.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/services/scaffold/templates.py +0 -0
- {jerry_thomas-0.2.0/src/datapipeline/pipeline → jerry_thomas-0.3.0/src/datapipeline/sources}/__init__.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/sources/composed_loader.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/sources/decoders.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/sources/factory.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/sources/models/__init__.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/sources/models/base.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/sources/models/parser.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/sources/models/source.py +0 -0
- {jerry_thomas-0.2.0/src/datapipeline/sources → jerry_thomas-0.3.0/src/datapipeline/sources/synthetic}/__init__.py +0 -0
- {jerry_thomas-0.2.0/src/datapipeline/sources/synthetic → jerry_thomas-0.3.0/src/datapipeline/sources/synthetic/time}/__init__.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/sources/synthetic/time/parser.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/sources/transports.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/templates/plugin_skeleton/config/sources/time_ticks.yaml +0 -0
- {jerry_thomas-0.2.0/src/datapipeline/sources/synthetic/time → jerry_thomas-0.3.0/src/datapipeline/templates/plugin_skeleton/src/{{PACKAGE_NAME}}}/__init__.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/templates/stubs/filter.py.j2 +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/templates/stubs/loader_synthetic.py.j2 +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/templates/stubs/parser_custom.py.j2 +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/templates/stubs/record.py.j2 +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/templates/stubs/source.yaml.j2 +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/transforms/debug/identity.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/transforms/debug/lint.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/transforms/feature/model.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/transforms/record/floor_time.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/transforms/record/lag.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/transforms/sequence.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/transforms/stream/ensure_ticks.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/transforms/stream/granularity.py +0 -0
- {jerry_thomas-0.2.0/src/datapipeline/templates/plugin_skeleton/src/{{PACKAGE_NAME}} → jerry_thomas-0.3.0/src/datapipeline/utils}/__init__.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/utils/paths.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/datapipeline/utils/time.py +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/jerry_thomas.egg-info/dependency_links.txt +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/jerry_thomas.egg-info/entry_points.txt +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/jerry_thomas.egg-info/requires.txt +0 -0
- {jerry_thomas-0.2.0 → jerry_thomas-0.3.0}/src/jerry_thomas.egg-info/top_level.txt +0 -0
|
@@ -0,0 +1,502 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: jerry-thomas
|
|
3
|
+
Version: 0.3.0
|
|
4
|
+
Summary: Jerry-Thomas: a stream-first, plugin-friendly data pipeline (mixology-themed CLI)
|
|
5
|
+
Author: Anders Skott Lind
|
|
6
|
+
License: MIT
|
|
7
|
+
Requires-Python: >=3.10
|
|
8
|
+
Description-Content-Type: text/markdown
|
|
9
|
+
License-File: LICENSE
|
|
10
|
+
Requires-Dist: numpy<3.0,>=1.24
|
|
11
|
+
Requires-Dist: pydantic>=2.0
|
|
12
|
+
Requires-Dist: PyYAML>=5.4
|
|
13
|
+
Requires-Dist: tqdm>=4.0
|
|
14
|
+
Requires-Dist: jinja2>=3.0
|
|
15
|
+
Provides-Extra: ml
|
|
16
|
+
Requires-Dist: pandas>=2.0; extra == "ml"
|
|
17
|
+
Requires-Dist: torch>=2.0; extra == "ml"
|
|
18
|
+
Dynamic: license-file
|
|
19
|
+
|
|
20
|
+
# Datapipeline Runtime
|
|
21
|
+
|
|
22
|
+
Jerry Thomas is a time-series-first data pipeline runtime. It turns declarative
|
|
23
|
+
YAML projects into iterators that stream records, engineered features, and
|
|
24
|
+
model-ready vectors. The CLI lets you preview every stage, build deterministic
|
|
25
|
+
artifacts, inspect coverage, and scaffold plugins for custom loaders, parsers,
|
|
26
|
+
transforms, and filters.
|
|
27
|
+
|
|
28
|
+
> **Core assumptions**
|
|
29
|
+
>
|
|
30
|
+
> - Every record carries a timezone-aware `time` attribute and a numeric
|
|
31
|
+
> `value`.
|
|
32
|
+
> - Grouping is purely temporal. Dimensional splits belong in `partition_by`.
|
|
33
|
+
|
|
34
|
+
---
|
|
35
|
+
|
|
36
|
+
## Why You Might Use It
|
|
37
|
+
|
|
38
|
+
- Materialize canonical time-series datasets from disparate sources.
|
|
39
|
+
- Preview and debug each stage of the pipeline without writing ad-hoc scripts.
|
|
40
|
+
- Enforce coverage/quality gates and publish artifacts (expected IDs, scaler
|
|
41
|
+
stats) for downstream ML teams.
|
|
42
|
+
- Extend the runtime with entry-point driven plugins for domain-specific I/O or
|
|
43
|
+
feature engineering.
|
|
44
|
+
- Consume vectors directly from Python via iterators, Pandas DataFrames, or
|
|
45
|
+
`torch.utils.data.Dataset`.
|
|
46
|
+
|
|
47
|
+
---
|
|
48
|
+
|
|
49
|
+
## Quick Start
|
|
50
|
+
|
|
51
|
+
```bash
|
|
52
|
+
# 1. Install in editable mode (with optional dev extras for testing).
|
|
53
|
+
pip install -e .[dev]
|
|
54
|
+
|
|
55
|
+
# 2. Bootstrap a project (scaffolds configs, plugin package, and templates).
|
|
56
|
+
jerry plugin init --name my_datapipeline --out .
|
|
57
|
+
|
|
58
|
+
# 3. Create a source & domain scaffold, then declare a canonical stream.
|
|
59
|
+
jerry source add --provider demo --dataset weather --transport fs --format csv
|
|
60
|
+
jerry domain add --domain weather
|
|
61
|
+
# (edit config/contracts/<alias>.yaml to point at your mapper and policies)
|
|
62
|
+
|
|
63
|
+
# 4. Configure dataset/postprocess/build files in config/datasets/<name>/.
|
|
64
|
+
# Then preview the pipeline and serve a few vectors:
|
|
65
|
+
jerry serve --project config/datasets/default/project.yaml --stage 2 --limit 5
|
|
66
|
+
jerry serve --project config/datasets/default/project.yaml --output print --limit 3
|
|
67
|
+
|
|
68
|
+
# 5. Inspect coverage and build artifacts:
|
|
69
|
+
jerry inspect report --project config/datasets/default/project.yaml
|
|
70
|
+
jerry build --project config/datasets/default/project.yaml
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
The skeleton project in `src/datapipeline/templates/plugin_skeleton/` mirrors the
|
|
74
|
+
paths expected by the CLI. Copy it or run `jerry plugin init` to get a ready-made
|
|
75
|
+
layout with `config/`, `src/<package>/`, and entry-point stubs.
|
|
76
|
+
|
|
77
|
+
---
|
|
78
|
+
|
|
79
|
+
## Pipeline Architecture
|
|
80
|
+
|
|
81
|
+
```text
|
|
82
|
+
raw source ──▶ canonical stream ──▶ record stage ──▶ feature stage ──▶ vector stage
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
1. **Raw sources** pair a loader with a parser. Loaders fetch bytes (file system,
|
|
86
|
+
HTTP, synthetic generators). Parsers turn those bytes into typed DTOs.
|
|
87
|
+
Register them via entry points (`loaders`, `parsers`) and declaratively wire
|
|
88
|
+
them in `config/sources/*.yaml`.
|
|
89
|
+
2. **Canonical streams** decorate raw sources with mappers and per-stream
|
|
90
|
+
policies. Contract files under `config/contracts/` define record transforms,
|
|
91
|
+
feature transforms, sort hints, and partitioning.
|
|
92
|
+
3. **Record stage** applies canonical policies to DTOs, turning them into
|
|
93
|
+
`TemporalRecord` instances (tz-aware timestamp + numeric value).
|
|
94
|
+
4. **Feature stage** wraps records into `FeatureRecord`s, handles per-feature
|
|
95
|
+
sorting, optional scaling, and sequence windows (`FeatureRecordSequence`).
|
|
96
|
+
5. **Vector stage** merges all feature streams, buckets them using `group_by`
|
|
97
|
+
cadence (e.g., `1h`), and emits `(group_key, Vector)` pairs ready for
|
|
98
|
+
downstream consumers.
|
|
99
|
+
|
|
100
|
+
The runtime (`src/datapipeline/runtime.py`) hosts registries for sources,
|
|
101
|
+
transforms, artifacts, and postprocess rules. The CLI constructs lightweight
|
|
102
|
+
`PipelineContext` objects to build iterators without mutating global state.
|
|
103
|
+
|
|
104
|
+
---
|
|
105
|
+
|
|
106
|
+
## Configuration Files
|
|
107
|
+
|
|
108
|
+
All project configuration lives under `config/datasets/<name>/` by default.
|
|
109
|
+
|
|
110
|
+
### `project.yaml`
|
|
111
|
+
|
|
112
|
+
```yaml
|
|
113
|
+
version: 1
|
|
114
|
+
name: default
|
|
115
|
+
paths:
|
|
116
|
+
streams: ../../contracts
|
|
117
|
+
sources: ../../sources
|
|
118
|
+
dataset: dataset.yaml
|
|
119
|
+
postprocess: postprocess.yaml
|
|
120
|
+
artifacts: ../../build/datasets/${project_name}
|
|
121
|
+
build: build.yaml
|
|
122
|
+
run: run.yaml
|
|
123
|
+
globals:
|
|
124
|
+
start_time: 2021-01-01T00:00:00Z
|
|
125
|
+
end_time: 2023-01-03T23:00:00Z
|
|
126
|
+
split:
|
|
127
|
+
mode: hash # hash | time
|
|
128
|
+
key: group # group | feature:<id>
|
|
129
|
+
seed: 42
|
|
130
|
+
ratios: { train: 0.8, val: 0.1, test: 0.1 }
|
|
131
|
+
```
|
|
132
|
+
|
|
133
|
+
- `name` provides a stable identifier you can reuse inside config files via `${project_name}`.
|
|
134
|
+
- `paths.*` are resolved relative to the project file unless absolute; they also support `${var}` interpolation.
|
|
135
|
+
- `globals` provide values for `${var}` interpolation across YAML files. Datetime
|
|
136
|
+
values are normalized to strict UTC `YYYY-MM-DDTHH:MM:SSZ`.
|
|
137
|
+
- `split` config defines how labels are assigned; the active label is selected by `run.yaml` or CLI `--keep`.
|
|
138
|
+
- `paths.run` may point to a single file (default) or a directory. When it is a directory,
|
|
139
|
+
every `*.yaml` file inside is treated as a run config; `jerry serve` executes them
|
|
140
|
+
sequentially in alphabetical order unless you pass `--run <name>` (filename stem).
|
|
141
|
+
- Label names are free-form: match whatever keys you declare in `split.ratios` (hash) or `split.labels` (time).
|
|
142
|
+
|
|
143
|
+
### `run.yaml`
|
|
144
|
+
|
|
145
|
+
```yaml
|
|
146
|
+
version: 1
|
|
147
|
+
keep: train # set to any label defined in globals.split (null disables filtering)
|
|
148
|
+
output: print # override to 'stream' or a .pt path for binary dumps
|
|
149
|
+
limit: 100 # cap vectors per serve run (null = unlimited)
|
|
150
|
+
include_targets: false
|
|
151
|
+
throttle_ms: null # sleep between vectors (milliseconds)
|
|
152
|
+
log_level: INFO # DEBUG=progress bars, INFO=spinner, WARNING=quiet (null inherits CLI)
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
- `keep` selects the currently served split. This file is referenced by `project.paths.run`.
|
|
156
|
+
- `output`, `limit`, `include_targets`, `throttle_ms`, and `log_level` provide defaults for `jerry serve`; CLI flags still win per invocation.
|
|
157
|
+
- Override `keep` (and other fields) per invocation via `jerry serve ... --keep val` etc.
|
|
158
|
+
- To manage multiple runs, point `project.paths.run` at a folder (e.g., `config/datasets/default/runs/`)
|
|
159
|
+
and drop additional `*.yaml` files there. `jerry serve` will run each file in order; pass
|
|
160
|
+
`--run train` to execute only `runs/train.yaml`.
|
|
161
|
+
|
|
162
|
+
### `config/sources/<alias>.yaml`
|
|
163
|
+
|
|
164
|
+
Each file defines a loader/parser pair exposed under `<alias>` (also the
|
|
165
|
+
`source_id` the rest of the pipeline references).
|
|
166
|
+
|
|
167
|
+
```yaml
|
|
168
|
+
source_id: demo_weather
|
|
169
|
+
loader:
|
|
170
|
+
entrypoint: demo.csv_loader
|
|
171
|
+
args:
|
|
172
|
+
path: data/weather.csv
|
|
173
|
+
parser:
|
|
174
|
+
entrypoint: demo.weather_parser
|
|
175
|
+
args:
|
|
176
|
+
timezone: UTC
|
|
177
|
+
```
|
|
178
|
+
|
|
179
|
+
### `config/contracts/<alias>.yaml`
|
|
180
|
+
|
|
181
|
+
Canonical stream contracts describe how the runtime should map and prepare a
|
|
182
|
+
source. `alias` normally matches the source alias; use folders to organize by
|
|
183
|
+
domain.
|
|
184
|
+
|
|
185
|
+
```yaml
|
|
186
|
+
source_id: demo_weather
|
|
187
|
+
stream_id: demo_weather
|
|
188
|
+
|
|
189
|
+
mapper:
|
|
190
|
+
entrypoint: weather.domain.mapper
|
|
191
|
+
args: {}
|
|
192
|
+
|
|
193
|
+
partition_by: station
|
|
194
|
+
sort_batch_size: 50000
|
|
195
|
+
|
|
196
|
+
record:
|
|
197
|
+
- filter: { operator: ge, field: time, comparand: "${start_time}" }
|
|
198
|
+
- filter: { operator: lt, field: time, comparand: "${end_time}" }
|
|
199
|
+
- floor_time: { resolution: 10m }
|
|
200
|
+
|
|
201
|
+
stream:
|
|
202
|
+
- ensure_ticks: { tick: 10m }
|
|
203
|
+
- granularity: { mode: mean }
|
|
204
|
+
- fill: { statistic: median, window: 6, min_samples: 2 }
|
|
205
|
+
|
|
206
|
+
debug:
|
|
207
|
+
- lint: { mode: warn, tick: 10m }
|
|
208
|
+
```
|
|
209
|
+
|
|
210
|
+
- `record`: ordered record-level transforms (filters, floor/lag, custom
|
|
211
|
+
transforms registered under the `record` entry-point group).
|
|
212
|
+
- `stream`: transforms applied after feature wrapping, still per base feature.
|
|
213
|
+
- `debug`: instrumentation-only transforms (linters, assertions).
|
|
214
|
+
- `partition_by`: optional keys used to suffix feature IDs (e.g., `temp__station=XYZ`).
|
|
215
|
+
- `sort_batch_size`: chunk size used by the in-memory sorter when normalizing
|
|
216
|
+
order before stream transforms.
|
|
217
|
+
|
|
218
|
+
### `dataset.yaml`
|
|
219
|
+
|
|
220
|
+
Defines which canonical streams become features/targets and the vector bucketing.
|
|
221
|
+
|
|
222
|
+
```yaml
|
|
223
|
+
group_by: 1h
|
|
224
|
+
|
|
225
|
+
features:
|
|
226
|
+
- id: temp_c
|
|
227
|
+
record_stream: demo_weather
|
|
228
|
+
scale: true
|
|
229
|
+
sequence: { size: 6, stride: 1, tick: 10m }
|
|
230
|
+
|
|
231
|
+
targets:
|
|
232
|
+
- id: precip
|
|
233
|
+
record_stream: demo_weather
|
|
234
|
+
```
|
|
235
|
+
|
|
236
|
+
- `group_by` controls the cadence for vector partitioning (accepts `Xm|min|Xh`
|
|
237
|
+
— minutes or hours).
|
|
238
|
+
- `scale: true` inserts the standard scaler feature transform (requires scaler
|
|
239
|
+
stats artifact or inline statistics).
|
|
240
|
+
- `sequence` emits `FeatureRecordSequence` windows (size, stride, optional
|
|
241
|
+
cadence enforcement via `tick`).
|
|
242
|
+
|
|
243
|
+
### `postprocess.yaml`
|
|
244
|
+
|
|
245
|
+
Project-scoped vector transforms that run after assembly and before serving.
|
|
246
|
+
|
|
247
|
+
```yaml
|
|
248
|
+
- drop_missing:
|
|
249
|
+
required: [temp_c__station=001]
|
|
250
|
+
min_coverage: 0.95
|
|
251
|
+
- fill_constant: { value: 0.0 }
|
|
252
|
+
- fill_history:
|
|
253
|
+
statistic: median
|
|
254
|
+
window: 48
|
|
255
|
+
min_samples: 6
|
|
256
|
+
- fill_horizontal:
|
|
257
|
+
statistic: mean
|
|
258
|
+
min_samples: 2
|
|
259
|
+
```
|
|
260
|
+
|
|
261
|
+
- Vector transforms rely on artifacts (expected IDs, scaler stats) to decide
|
|
262
|
+
which features should be present.
|
|
263
|
+
- When no transforms are configured the stream passes through unchanged.
|
|
264
|
+
|
|
265
|
+
### `build.yaml`
|
|
266
|
+
|
|
267
|
+
Declares which artifacts the build step should materialize.
|
|
268
|
+
|
|
269
|
+
```yaml
|
|
270
|
+
version: 1
|
|
271
|
+
partitioned_ids:
|
|
272
|
+
output: expected.txt
|
|
273
|
+
include_targets: false
|
|
274
|
+
scaler:
|
|
275
|
+
enabled: true
|
|
276
|
+
output: scaler.pkl
|
|
277
|
+
include_targets: false
|
|
278
|
+
split_label: train
|
|
279
|
+
```
|
|
280
|
+
|
|
281
|
+
- `expected.txt` lists every fully partitioned feature ID observed in the latest
|
|
282
|
+
run (used by vector postprocess transforms).
|
|
283
|
+
- `scaler.pkl` is a pickled standard scaler fitted on the requested split.
|
|
284
|
+
|
|
285
|
+
---
|
|
286
|
+
|
|
287
|
+
## CLI Reference
|
|
288
|
+
|
|
289
|
+
All commands live under the `jerry` entry point (`src/datapipeline/cli/app.py`).
|
|
290
|
+
Pass `--help` on any command for flags.
|
|
291
|
+
|
|
292
|
+
### Preview Stages
|
|
293
|
+
|
|
294
|
+
- `jerry serve --project <project.yaml> --stage <0-7> --limit N [--log-level LEVEL]`
|
|
295
|
+
- Stage 0: raw DTOs
|
|
296
|
+
- Stage 1: domain `TemporalRecord`s
|
|
297
|
+
- Stage 2: record transforms applied
|
|
298
|
+
- Stage 3: feature records (before sort/regularization)
|
|
299
|
+
- Stage 4: feature regularization (post stream transforms)
|
|
300
|
+
- Stage 5: feature transforms/sequence outputs
|
|
301
|
+
- Stage 6: vectors assembled (no postprocess)
|
|
302
|
+
- Stage 7: vectors + postprocess transforms
|
|
303
|
+
- Use `--log-level DEBUG` for progress bars, `--log-level INFO` for spinner + prints, or the default (`WARNING`) for minimal output.
|
|
304
|
+
- `jerry serve --project <project.yaml> --output print|stream|path.pt|path.csv|path.jsonl.gz --limit N [--include-targets] [--log-level LEVEL] [--run name]`
|
|
305
|
+
- Applies postprocess transforms and optional dataset split before emitting.
|
|
306
|
+
- Set `--log-level DEBUG` (or set `run.yaml` -> `log_level: DEBUG`) to reuse the tqdm progress bars when previewing stages.
|
|
307
|
+
- When `project.paths.run` is a directory, add `--run val` (filename stem) to target a single config; otherwise every run file is executed sequentially.
|
|
308
|
+
- Argument precedence: CLI flags > run.yaml > built‑in defaults.
|
|
309
|
+
|
|
310
|
+
### Build & Quality
|
|
311
|
+
|
|
312
|
+
- `jerry inspect report --project <project.yaml> [--threshold 0.95] [--include-targets]`
|
|
313
|
+
- Prints coverage summary (keep/below lists) and writes `coverage.json` under
|
|
314
|
+
the artifacts directory.
|
|
315
|
+
- Add `--matrix csv|html` to persist an availability matrix.
|
|
316
|
+
- `jerry inspect partitions --project <project.yaml> [--include-targets]`
|
|
317
|
+
- Writes discovered partition suffixes to `partitions.json`.
|
|
318
|
+
- `jerry inspect expected --project <project.yaml> [--include-targets]`
|
|
319
|
+
- Writes the full set of observed feature IDs to `expected.txt`.
|
|
320
|
+
- `jerry build --project <project.yaml> [--force]`
|
|
321
|
+
- Regenerates artifacts declared in `build.yaml` if configuration hash changed.
|
|
322
|
+
|
|
323
|
+
### Scaffolding & Reference
|
|
324
|
+
|
|
325
|
+
- `jerry plugin init --name <package> --out <dir>`
|
|
326
|
+
- Generates a plugin project (pyproject, package skeleton, config templates).
|
|
327
|
+
- `jerry source add --provider <name> --dataset <slug> --transport fs|url|synthetic --format csv|json|json-lines`
|
|
328
|
+
- Creates loader/parser stubs, updates entry points, and drops a matching
|
|
329
|
+
source YAML.
|
|
330
|
+
- `jerry domain add --domain <name>`
|
|
331
|
+
- Adds a `domains/<name>/` package with a `model.py` stub.
|
|
332
|
+
- `jerry filter create --name <identifier>`
|
|
333
|
+
- Scaffolds an entry-point-ready filter (helpful for custom record predicates).
|
|
334
|
+
- `jerry list sources|domains`
|
|
335
|
+
- Introspect configured source aliases or domain packages.
|
|
336
|
+
|
|
337
|
+
---
|
|
338
|
+
|
|
339
|
+
## Transform & Filter Library
|
|
340
|
+
|
|
341
|
+
### Record Filters (`config/contracts[].record`)
|
|
342
|
+
|
|
343
|
+
- Binary comparisons: `eq`, `ne`, `lt`, `le`, `gt`, `ge` (timezone-aware for ISO
|
|
344
|
+
or datetime literals).
|
|
345
|
+
- Membership: `in`, `nin`.
|
|
346
|
+
```yaml
|
|
347
|
+
- filter: { operator: ge, field: time, comparand: "${start_time}" }
|
|
348
|
+
- filter: { operator: in, field: station, comparand: [a, b, c] }
|
|
349
|
+
```
|
|
350
|
+
|
|
351
|
+
### Record Transforms
|
|
352
|
+
|
|
353
|
+
- `floor_time`: snap timestamps down to the nearest resolution (`10m`, `1h`, …).
|
|
354
|
+
- `lag`: add lagged copies of records (see `src/datapipeline/transforms/record/lag.py` for options).
|
|
355
|
+
|
|
356
|
+
### Stream (Feature) Transforms
|
|
357
|
+
|
|
358
|
+
- `ensure_ticks`: backfill missing ticks with `value=None` records to enforce a
|
|
359
|
+
strict cadence.
|
|
360
|
+
- `granularity`: merge duplicate timestamps using `first|last|mean|median`.
|
|
361
|
+
- `fill`: rolling statistic-based imputation within each feature stream.
|
|
362
|
+
- Custom transforms can be registered under the `stream` entry-point group.
|
|
363
|
+
|
|
364
|
+
### Feature Transforms
|
|
365
|
+
|
|
366
|
+
- `scale`: wraps `StandardScalerTransform`. Read statistics from the build
|
|
367
|
+
artifact or accept inline `statistics`.
|
|
368
|
+
```yaml
|
|
369
|
+
scale:
|
|
370
|
+
with_mean: true
|
|
371
|
+
with_std: true
|
|
372
|
+
statistics:
|
|
373
|
+
temp_c__station=001: { mean: 10.3, std: 2.1 }
|
|
374
|
+
```
|
|
375
|
+
|
|
376
|
+
### Sequence Transforms
|
|
377
|
+
|
|
378
|
+
- `sequence`: sliding window generator (`size`, `stride`, optional `tick` to
|
|
379
|
+
enforce gaps). Emits `FeatureRecordSequence` payloads with `.records`.
|
|
380
|
+
|
|
381
|
+
### Vector (Postprocess) Transforms
|
|
382
|
+
|
|
383
|
+
- `drop_missing`: drop vectors that do not meet required IDs or coverage ratio.
|
|
384
|
+
- `fill_constant`: seed absent IDs with a constant.
|
|
385
|
+
- `fill_history`: impute using rolling statistics from prior vectors.
|
|
386
|
+
- `fill_horizontal`: aggregate sibling partitions in the same timestamp.
|
|
387
|
+
|
|
388
|
+
All transforms share a consistent entry-point signature and accept their config
|
|
389
|
+
dict as keyword arguments. Register new ones in `pyproject.toml` under the
|
|
390
|
+
appropriate group (`record`, `stream`, `feature`, `sequence`, `vector`,
|
|
391
|
+
`filters`, `debug`).
|
|
392
|
+
|
|
393
|
+
---
|
|
394
|
+
|
|
395
|
+
## Artifacts & Postprocess
|
|
396
|
+
|
|
397
|
+
- `expected.txt`: newline-delimited full feature IDs. Required by drop/fill
|
|
398
|
+
transforms to know the target feature universe.
|
|
399
|
+
- `scaler.pkl`: pickled standard scaler fitted on the configured split. Loaded
|
|
400
|
+
lazily by feature transforms at runtime.
|
|
401
|
+
- Build state is tracked in `artifacts/build/state.json`; config hashes avoid
|
|
402
|
+
redundant runs.
|
|
403
|
+
|
|
404
|
+
If a postprocess transform needs an artifact and it is missing, the runtime will
|
|
405
|
+
raise a descriptive error suggesting `jerry build`.
|
|
406
|
+
|
|
407
|
+
---
|
|
408
|
+
|
|
409
|
+
## Splitting & Serving
|
|
410
|
+
|
|
411
|
+
If `project.globals.split` is present, `jerry serve` filters vectors at the
|
|
412
|
+
end of the pipeline:
|
|
413
|
+
|
|
414
|
+
- `mode: hash` – deterministic entity hash using either the group key or a
|
|
415
|
+
specified feature ID.
|
|
416
|
+
- `mode: time` – boundary-based slicing using timestamp labels.
|
|
417
|
+
- `run.keep` (or CLI `--keep`) selects the active slice; use any label name defined in your split config.
|
|
418
|
+
|
|
419
|
+
The split configuration never mutates stored artifacts; it is only applied when
|
|
420
|
+
serving vectors (either via CLI or the Python integrations).
|
|
421
|
+
|
|
422
|
+
---
|
|
423
|
+
|
|
424
|
+
## Python Integrations
|
|
425
|
+
|
|
426
|
+
`datapipeline.integrations.ml` demonstrates how to reuse the runtime from
|
|
427
|
+
application code:
|
|
428
|
+
|
|
429
|
+
- `VectorAdapter.from_project(project_yaml)` – bootstrap once, then stream
|
|
430
|
+
vectors or row dicts.
|
|
431
|
+
- `stream_vectors(project_yaml, limit=...)` – iterator matching `jerry serve`.
|
|
432
|
+
- `iter_vector_rows` / `collect_vector_rows` – handy for Pandas or custom sinks.
|
|
433
|
+
- `dataframe_from_vectors` – eager helper that returns a Pandas DataFrame
|
|
434
|
+
(requires `pandas`).
|
|
435
|
+
- `torch_dataset` – builds a `torch.utils.data.Dataset` that yields tensors. See
|
|
436
|
+
`examples/minimal_project/run_torch.py` for usage.
|
|
437
|
+
|
|
438
|
+
---
|
|
439
|
+
|
|
440
|
+
## Extending the Runtime
|
|
441
|
+
|
|
442
|
+
### Entry Points
|
|
443
|
+
|
|
444
|
+
Register custom components in your plugin’s `pyproject.toml`:
|
|
445
|
+
|
|
446
|
+
```toml
|
|
447
|
+
[project.entry-points."datapipeline.loaders"]
|
|
448
|
+
demo.csv_loader = "my_datapipeline.loaders.csv:CsvLoader"
|
|
449
|
+
|
|
450
|
+
[project.entry-points."datapipeline.parsers"]
|
|
451
|
+
demo.weather_parser = "my_datapipeline.parsers.weather:WeatherParser"
|
|
452
|
+
|
|
453
|
+
[project.entry-points."datapipeline.mappers"]
|
|
454
|
+
weather.domain.mapper = "my_datapipeline.mappers.weather:DomainMapper"
|
|
455
|
+
|
|
456
|
+
[project.entry-points."datapipeline.stream"]
|
|
457
|
+
weather.fill = "my_datapipeline.transforms.weather:CustomFill"
|
|
458
|
+
```
|
|
459
|
+
|
|
460
|
+
Loader, parser, mapper, and transform classes should provide a callable
|
|
461
|
+
interface (usually `__call__`) matching the runtime expectations. Refer to the
|
|
462
|
+
built-in implementations in `src/datapipeline/sources/`, `src/datapipeline/transforms/`,
|
|
463
|
+
and `src/datapipeline/filters/`.
|
|
464
|
+
|
|
465
|
+
### Scaffolding Helpers
|
|
466
|
+
|
|
467
|
+
- `datapipeline.services.scaffold.plugin.scaffold_plugin` – invoked by
|
|
468
|
+
`jerry plugin init`.
|
|
469
|
+
- `datapipeline.services.scaffold.source.create_source` – writes loader/parser
|
|
470
|
+
stubs and updates entry points.
|
|
471
|
+
- `datapipeline.services.scaffold.domain.create_domain` – domain DTO skeleton.
|
|
472
|
+
- `datapipeline.services.scaffold.filter.create_filter` – custom filter stub.
|
|
473
|
+
- `datapipeline.services.scaffold.mappers.attach_source_to_domain` – helper for
|
|
474
|
+
programmatically wiring sources to domain mappers and emitting stream
|
|
475
|
+
contracts (useful in custom automation or tests).
|
|
476
|
+
|
|
477
|
+
---
|
|
478
|
+
|
|
479
|
+
## Development Workflow
|
|
480
|
+
|
|
481
|
+
- Install dependencies: `pip install -e .[dev]`.
|
|
482
|
+
- Run tests: `pytest`.
|
|
483
|
+
- When iterating on configs, use `jerry serve --stage <n>` to peek into problematic
|
|
484
|
+
stages.
|
|
485
|
+
- After tuning transforms, refresh artifacts: `jerry build`.
|
|
486
|
+
- Use `jerry inspect report --include-targets` to ensure targets meet coverage
|
|
487
|
+
gates before handing vectors to downstream consumers.
|
|
488
|
+
|
|
489
|
+
---
|
|
490
|
+
|
|
491
|
+
## Additional Resources
|
|
492
|
+
|
|
493
|
+
- `src/datapipeline/analysis/vector_analyzer.py` – quality metrics collected by
|
|
494
|
+
the inspect commands.
|
|
495
|
+
- `src/datapipeline/pipeline/` – pure functions that wire each stage.
|
|
496
|
+
- `src/datapipeline/services/bootstrap/` – runtime initialization and
|
|
497
|
+
registry population (see `core.py`).
|
|
498
|
+
- `examples/minimal_project/` – runnable demo showing config layout and Torch
|
|
499
|
+
integration.
|
|
500
|
+
|
|
501
|
+
Happy shipping! Build, inspect, and serve consistent time-series features with
|
|
502
|
+
confidence.
|