pynnlf 0.2.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. pynnlf-0.2.2/LICENSE +21 -0
  2. pynnlf-0.2.2/PKG-INFO +168 -0
  3. pynnlf-0.2.2/README.md +70 -0
  4. pynnlf-0.2.2/pyproject.toml +127 -0
  5. pynnlf-0.2.2/setup.cfg +4 -0
  6. pynnlf-0.2.2/src/pynnlf/__about__.py +1 -0
  7. pynnlf-0.2.2/src/pynnlf/__init__.py +5 -0
  8. pynnlf-0.2.2/src/pynnlf/api.py +17 -0
  9. pynnlf-0.2.2/src/pynnlf/discovery.py +63 -0
  10. pynnlf-0.2.2/src/pynnlf/engine.py +1238 -0
  11. pynnlf-0.2.2/src/pynnlf/hyperparams.py +38 -0
  12. pynnlf-0.2.2/src/pynnlf/model_utils.py +186 -0
  13. pynnlf-0.2.2/src/pynnlf/runner.py +108 -0
  14. pynnlf-0.2.2/src/pynnlf/scaffold/README_WORKSPACE.md +0 -0
  15. pynnlf-0.2.2/src/pynnlf/scaffold/data/README_data.md +40 -0
  16. pynnlf-0.2.2/src/pynnlf/scaffold/data/ds0_test.csv +4081 -0
  17. pynnlf-0.2.2/src/pynnlf/scaffold/models/README_models.md +61 -0
  18. pynnlf-0.2.2/src/pynnlf/scaffold/models/hyperparameters.yaml +264 -0
  19. pynnlf-0.2.2/src/pynnlf/scaffold/models/m10_rf.py +65 -0
  20. pynnlf-0.2.2/src/pynnlf/scaffold/models/m11_svr.py +53 -0
  21. pynnlf-0.2.2/src/pynnlf/scaffold/models/m12_rnn.py +152 -0
  22. pynnlf-0.2.2/src/pynnlf/scaffold/models/m13_lstm.py +208 -0
  23. pynnlf-0.2.2/src/pynnlf/scaffold/models/m14_gru.py +139 -0
  24. pynnlf-0.2.2/src/pynnlf/scaffold/models/m15_transformer.py +138 -0
  25. pynnlf-0.2.2/src/pynnlf/scaffold/models/m16_prophet.py +216 -0
  26. pynnlf-0.2.2/src/pynnlf/scaffold/models/m17_xgb.py +66 -0
  27. pynnlf-0.2.2/src/pynnlf/scaffold/models/m18_nbeats.py +107 -0
  28. pynnlf-0.2.2/src/pynnlf/scaffold/models/m1_naive.py +49 -0
  29. pynnlf-0.2.2/src/pynnlf/scaffold/models/m2_snaive.py +49 -0
  30. pynnlf-0.2.2/src/pynnlf/scaffold/models/m3_ets.py +133 -0
  31. pynnlf-0.2.2/src/pynnlf/scaffold/models/m4_arima.py +123 -0
  32. pynnlf-0.2.2/src/pynnlf/scaffold/models/m5_sarima.py +128 -0
  33. pynnlf-0.2.2/src/pynnlf/scaffold/models/m6_lr.py +76 -0
  34. pynnlf-0.2.2/src/pynnlf/scaffold/models/m7_ann.py +148 -0
  35. pynnlf-0.2.2/src/pynnlf/scaffold/models/m8_dnn.py +141 -0
  36. pynnlf-0.2.2/src/pynnlf/scaffold/models/m9_rt.py +74 -0
  37. pynnlf-0.2.2/src/pynnlf/scaffold/models/mXX_template.py +68 -0
  38. pynnlf-0.2.2/src/pynnlf/scaffold/specs/batch.yaml +4 -0
  39. pynnlf-0.2.2/src/pynnlf/scaffold/specs/experiment.yaml +4 -0
  40. pynnlf-0.2.2/src/pynnlf/scaffold/specs/pynnlf_config.yaml +69 -0
  41. pynnlf-0.2.2/src/pynnlf/scaffold/specs/testing_benchmark.csv +613 -0
  42. pynnlf-0.2.2/src/pynnlf/scaffold/specs/testing_benchmark_metadata.md +12 -0
  43. pynnlf-0.2.2/src/pynnlf/scaffold/specs/tests_ci.yaml +8 -0
  44. pynnlf-0.2.2/src/pynnlf/scaffold/specs/tests_full.yaml +23 -0
  45. pynnlf-0.2.2/src/pynnlf/tests_runner.py +211 -0
  46. pynnlf-0.2.2/src/pynnlf/tools/strip_notebook_artifacts.py +32 -0
  47. pynnlf-0.2.2/src/pynnlf/workspace.py +63 -0
  48. pynnlf-0.2.2/src/pynnlf/yamlio.py +28 -0
  49. pynnlf-0.2.2/src/pynnlf.egg-info/PKG-INFO +168 -0
  50. pynnlf-0.2.2/src/pynnlf.egg-info/SOURCES.txt +51 -0
  51. pynnlf-0.2.2/src/pynnlf.egg-info/dependency_links.txt +1 -0
  52. pynnlf-0.2.2/src/pynnlf.egg-info/requires.txt +94 -0
  53. pynnlf-0.2.2/src/pynnlf.egg-info/top_level.txt +1 -0
pynnlf-0.2.2/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 M. Syahman Samhan
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
pynnlf-0.2.2/PKG-INFO ADDED
@@ -0,0 +1,168 @@
1
+ Metadata-Version: 2.4
2
+ Name: pynnlf
3
+ Version: 0.2.2
4
+ Summary: Python Network Net Load Forecasting (PyNNLF): reproducible evaluation of net load forecasting models.
5
+ License: MIT
6
+ Requires-Python: >=3.12
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: attrs>=24.2.0
9
+ Requires-Dist: certifi>=2025.6.15
10
+ Requires-Dist: charset-normalizer>=3.4.2
11
+ Requires-Dist: click>=8.2.1
12
+ Requires-Dist: cmdstanpy>=1.2.5
13
+ Requires-Dist: colorama>=0.4.6
14
+ Requires-Dist: cftime>=1.6.4.post1
15
+ Requires-Dist: contourpy>=1.3.0
16
+ Requires-Dist: cycler>=0.12.1
17
+ Requires-Dist: DateTime>=5.5
18
+ Requires-Dist: debugpy>=1.8.5
19
+ Requires-Dist: decorator>=5.1.1
20
+ Requires-Dist: dill>=0.3.9
21
+ Requires-Dist: et-xmlfile>=2.0.0
22
+ Requires-Dist: filelock>=3.16.1
23
+ Requires-Dist: fonttools>=4.54.0
24
+ Requires-Dist: fsspec>=2024.12.0
25
+ Requires-Dist: holidays>=0.72
26
+ Requires-Dist: idna>=3.10
27
+ Requires-Dist: importlib_resources>=6.5.2
28
+ Requires-Dist: Jinja2>=3.1.5
29
+ Requires-Dist: joblib>=1.4.2
30
+ Requires-Dist: kiwisolver>=1.4.7
31
+ Requires-Dist: MarkupSafe>=3.0.2
32
+ Requires-Dist: matplotlib>=3.9.2
33
+ Requires-Dist: mpmath>=1.3.0
34
+ Requires-Dist: nbformat>=5.10.4
35
+ Requires-Dist: nest-asyncio>=1.6.0
36
+ Requires-Dist: netCDF4>=1.7.2
37
+ Requires-Dist: networkx>=3.4.2
38
+ Requires-Dist: numpy>=2.1.1
39
+ Requires-Dist: openpyxl>=3.1.5
40
+ Requires-Dist: packaging>=24.1
41
+ Requires-Dist: pandas>=2.2.3
42
+ Requires-Dist: patsy>=1.0.1
43
+ Requires-Dist: pillow>=10.4.0
44
+ Requires-Dist: platformdirs>=4.3.6
45
+ Requires-Dist: prophet>=1.1.6
46
+ Requires-Dist: psutil>=6.0.0
47
+ Requires-Dist: PyYAML>=6.0.2
48
+ Requires-Dist: pyyaml_env_tag>=1.1
49
+ Requires-Dist: pyzmq>=26.2.0
50
+ Requires-Dist: requests>=2.32.4
51
+ Requires-Dist: scikit-learn>=1.6.0
52
+ Requires-Dist: scipy>=1.14.1
53
+ Requires-Dist: shapely>=2.1.1
54
+ Requires-Dist: six>=1.16.0
55
+ Requires-Dist: stanio>=0.5.1
56
+ Requires-Dist: statsmodels>=0.14.4
57
+ Requires-Dist: sympy>=1.13.1
58
+ Requires-Dist: threadpoolctl>=3.5.0
59
+ Requires-Dist: torch>=2.5.1
60
+ Requires-Dist: tornado>=6.4.1
61
+ Requires-Dist: tqdm>=4.67.1
62
+ Requires-Dist: typing_extensions>=4.12.2
63
+ Requires-Dist: tzdata>=2024.1
64
+ Requires-Dist: urllib3>=2.5.0
65
+ Requires-Dist: wcwidth>=0.2.13
66
+ Requires-Dist: xgboost>=3.0.0
67
+ Requires-Dist: zope.interface>=7.0.3
68
+ Requires-Dist: pywin32>=306; platform_system == "Windows"
69
+ Provides-Extra: notebook
70
+ Requires-Dist: ipykernel>=6.29.5; extra == "notebook"
71
+ Requires-Dist: ipython>=8.27.0; extra == "notebook"
72
+ Requires-Dist: jupyter_client>=8.6.3; extra == "notebook"
73
+ Requires-Dist: jupyter_core>=5.7.2; extra == "notebook"
74
+ Requires-Dist: matplotlib-inline>=0.1.7; extra == "notebook"
75
+ Requires-Dist: prompt_toolkit>=3.0.47; extra == "notebook"
76
+ Requires-Dist: Pygments>=2.18.0; extra == "notebook"
77
+ Requires-Dist: stack-data>=0.6.3; extra == "notebook"
78
+ Requires-Dist: traitlets>=5.14.3; extra == "notebook"
79
+ Provides-Extra: docs
80
+ Requires-Dist: babel>=2.17.0; extra == "docs"
81
+ Requires-Dist: backrefs>=5.9; extra == "docs"
82
+ Requires-Dist: ghp-import>=2.1.0; extra == "docs"
83
+ Requires-Dist: griffe>=1.14.0; extra == "docs"
84
+ Requires-Dist: Markdown>=3.9; extra == "docs"
85
+ Requires-Dist: mergedeep>=1.3.4; extra == "docs"
86
+ Requires-Dist: mkdocs>=1.6.1; extra == "docs"
87
+ Requires-Dist: mkdocs-autorefs>=1.4.3; extra == "docs"
88
+ Requires-Dist: mkdocs-get-deps>=0.2.0; extra == "docs"
89
+ Requires-Dist: mkdocs-material>=9.6.20; extra == "docs"
90
+ Requires-Dist: mkdocs-material-extensions>=1.3.1; extra == "docs"
91
+ Requires-Dist: mkdocstrings>=0.30.0; extra == "docs"
92
+ Requires-Dist: mkdocstrings-python>=1.18.2; extra == "docs"
93
+ Requires-Dist: paginate>=0.5.7; extra == "docs"
94
+ Requires-Dist: pathspec>=0.12.1; extra == "docs"
95
+ Requires-Dist: pymdown-extensions>=10.16.1; extra == "docs"
96
+ Requires-Dist: PyYAML>=6.0.2; extra == "docs"
97
+ Requires-Dist: watchdog>=6.0.0; extra == "docs"
98
+
99
+ # Welcome to PyNNLF
100
+ PyNNLF (Python for Network Net Load Forecast) is a tool to evaluate net load forecasting model performance in a reliable and reproducible way.
101
+
102
+ This tool evaluates net load forecasting models aiming to make new net load forecasting research more reliable and reproducible. It includes a library of public net load datasets and common forecasting models, including simple benchmark models. Users input the forecast problem and model specification, and the tool outputs evaluation results.
103
+ It also allows users to add datasets, models, and modify hyperparameters. Researchers claiming a new or superior model can compare their model with existing ones on public datasets. The target audience includes researchers working in academia or industry focusing on evaluating and optimizing net load forecasting models.
104
+
105
+ A visual illustration of the tool's workflow is shown below.
106
+
107
+ ![Home Illustration](./docs/img/home_illustration.png)
108
+
109
+ # Input
110
+ 1. **Forecast Target**: Dataset and forecast horizon defined in the YAML spec at example_project/specs/experiment.yaml.
111
+ 2. **Model Specification**: Model and hyperparameters defined in the YAML spec at example_project/specs/experiment.yaml.
112
+
113
+ # Output
114
+ 1. a1_experiment_result.csv – Contains accuracy (cross-validated test n-RMSE), stability (accuracy standard deviation), and training time.
115
+ 2. a2_hyperparameter.csv – Lists the hyperparameters used for each model.
116
+ 3. a3_cross_validation_result.csv – Detailed results for each cross-validation split.
117
+ 4. cv_plots/ – Folder with plots including:
118
+ - Observation vs forecast (time plot)
119
+ - Observation vs forecast (scatter plot)
120
+ - Residual time plot
121
+ - Residual histogram
122
+ 5. cv_test/ and cv_train/ – Folders containing time series of observations, forecasts, and residuals for each cross-validation split.
123
+
124
+ # Tool Output Naming Convention
125
+ Format:
126
+ [experiment_no]_[experiment_date]_[dataset]_[forecast_horizon]_[model]_[hyperparameter]
127
+
128
+ Example:
129
+ E00001_250915_ds0_fh30_m6_lr_hp1
130
+
131
+ # Installation Instruction
132
+ 1. Install the package:
133
+ On macOS, use `python3`/`pip3` if `python`/`pip` are not available.
134
+ ```bash
135
+ python -m pip install pynnlf
136
+ ```
137
+
138
+ # How to Use The Tool
139
+ 1. Initialize a workspace in any directory you want (example name: my_project). By default, only the sample dataset (ds0) is included. You can choose to download all datasets if needed:
140
+ On macOS, use `python3 -c` if `python -c` is not available.
141
+ ```bash
142
+ python -c "import pynnlf; pynnlf.init('my_project')"
143
+ ```
144
+ 2. Set up your experiment in example_project/specs/experiment.yaml.
145
+ 3. Run the experiment:
146
+ ```bash
147
+ python -c "import pynnlf; pynnlf.run_experiment('my_project/specs/experiment.yaml')"
148
+ ```
149
+ 4. View results under example_project/experiment_result.
150
+
151
+ # CI
152
+ CI (Continuous Integration) is automated testing that runs on code changes. CI is available to run smoke tests on 3 models and check whether results fall within the standard benchmark.
153
+
154
+ # Output
155
+ The tool will output the evaluation result in example_project/experiment_result as one folder.
156
+
157
+ # Full Documentation
158
+ Detailed documentation including examples, testing, detailed guide, API reference, features & limitations, etc. can be seen here. [PyNNLF Documentation](https://mssamhan31.github.io/PyNNLF/)
159
+
160
+ # Acknowledgements
161
+ This project is part of Samhan's PhD study, supported by the University International Postgraduate Award (UIPA) Scholarship from UNSW, the Industry Collaboration Project Scholarship from Ausgrid, and the RACE for 2030 Scholarship. We also acknowledge Solcast and the Australian Bureau of Meteorology (BOM) for providing access to historical weather datasets for this research. We further acknowledge the use of Python libraries including Pandas, NumPy, PyTorch, Scikit-learn, XGBoost, Prophet, Statsmodels, and Matplotlib. Finally, we thank the reviewers and editor of the Journal of Open Source Software for their valuable feedback and guidance.
162
+
163
+ The authors declare that they have no competing financial, personal, or professional interests related to this work.
164
+
165
+ # Contributors
166
+ - **M. Syahman Samhan** (m.samhan@unsw.edu.au): Lead developer and researcher. Responsible for conceptualization, implementation, documentation, and experimentation.
167
+ - **Anna Bruce**: Supervisor. Provided guidance on research direction and methodology.
168
+ - **Baran Yildiz**: Supervisor. Provided guidance on research direction and methodology.
pynnlf-0.2.2/README.md ADDED
@@ -0,0 +1,70 @@
1
+ # Welcome to PyNNLF
2
+ PyNNLF (Python for Network Net Load Forecast) is a tool to evaluate net load forecasting model performance in a reliable and reproducible way.
3
+
4
+ This tool evaluates net load forecasting models aiming to make new net load forecasting research more reliable and reproducible. It includes a library of public net load datasets and common forecasting models, including simple benchmark models. Users input the forecast problem and model specification, and the tool outputs evaluation results.
5
+ It also allows users to add datasets, models, and modify hyperparameters. Researchers claiming a new or superior model can compare their model with existing ones on public datasets. The target audience includes researchers working in academia or industry focusing on evaluating and optimizing net load forecasting models.
6
+
7
+ A visual illustration of the tool's workflow is shown below.
8
+
9
+ ![Home Illustration](./docs/img/home_illustration.png)
10
+
11
+ # Input
12
+ 1. **Forecast Target**: Dataset and forecast horizon defined in the YAML spec at example_project/specs/experiment.yaml.
13
+ 2. **Model Specification**: Model and hyperparameters defined in the YAML spec at example_project/specs/experiment.yaml.
14
+
15
+ # Output
16
+ 1. a1_experiment_result.csv – Contains accuracy (cross-validated test n-RMSE), stability (accuracy standard deviation), and training time.
17
+ 2. a2_hyperparameter.csv – Lists the hyperparameters used for each model.
18
+ 3. a3_cross_validation_result.csv – Detailed results for each cross-validation split.
19
+ 4. cv_plots/ – Folder with plots including:
20
+ - Observation vs forecast (time plot)
21
+ - Observation vs forecast (scatter plot)
22
+ - Residual time plot
23
+ - Residual histogram
24
+ 5. cv_test/ and cv_train/ – Folders containing time series of observations, forecasts, and residuals for each cross-validation split.
25
+
26
+ # Tool Output Naming Convention
27
+ Format:
28
+ [experiment_no]_[experiment_date]_[dataset]_[forecast_horizon]_[model]_[hyperparameter]
29
+
30
+ Example:
31
+ E00001_250915_ds0_fh30_m6_lr_hp1
32
+
33
+ # Installation Instruction
34
+ 1. Install the package:
35
+ On macOS, use `python3`/`pip3` if `python`/`pip` are not available.
36
+ ```bash
37
+ python -m pip install pynnlf
38
+ ```
39
+
40
+ # How to Use The Tool
41
+ 1. Initialize a workspace in any directory you want (example name: my_project). By default, only the sample dataset (ds0) is included. You can choose to download all datasets if needed:
42
+ On macOS, use `python3 -c` if `python -c` is not available.
43
+ ```bash
44
+ python -c "import pynnlf; pynnlf.init('my_project')"
45
+ ```
46
+ 2. Set up your experiment in example_project/specs/experiment.yaml.
47
+ 3. Run the experiment:
48
+ ```bash
49
+ python -c "import pynnlf; pynnlf.run_experiment('my_project/specs/experiment.yaml')"
50
+ ```
51
+ 4. View results under example_project/experiment_result.
52
+
53
+ # CI
54
+ CI (Continuous Integration) is automated testing that runs on code changes. CI is available to run smoke tests on 3 models and check whether results fall within the standard benchmark.
55
+
56
+ # Output
57
+ The tool will output the evaluation result in example_project/experiment_result as one folder.
58
+
59
+ # Full Documentation
60
+ Detailed documentation including examples, testing, detailed guide, API reference, features & limitations, etc. can be seen here. [PyNNLF Documentation](https://mssamhan31.github.io/PyNNLF/)
61
+
62
+ # Acknowledgements
63
+ This project is part of Samhan's PhD study, supported by the University International Postgraduate Award (UIPA) Scholarship from UNSW, the Industry Collaboration Project Scholarship from Ausgrid, and the RACE for 2030 Scholarship. We also acknowledge Solcast and the Australian Bureau of Meteorology (BOM) for providing access to historical weather datasets for this research. We further acknowledge the use of Python libraries including Pandas, NumPy, PyTorch, Scikit-learn, XGBoost, Prophet, Statsmodels, and Matplotlib. Finally, we thank the reviewers and editor of the Journal of Open Source Software for their valuable feedback and guidance.
64
+
65
+ The authors declare that they have no competing financial, personal, or professional interests related to this work.
66
+
67
+ # Contributors
68
+ - **M. Syahman Samhan** (m.samhan@unsw.edu.au): Lead developer and researcher. Responsible for conceptualization, implementation, documentation, and experimentation.
69
+ - **Anna Bruce**: Supervisor. Provided guidance on research direction and methodology.
70
+ - **Baran Yildiz**: Supervisor. Provided guidance on research direction and methodology.
@@ -0,0 +1,127 @@
1
+ [build-system]
2
+ requires = ["setuptools>=77.0.3"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "pynnlf"
7
+ dynamic = ["version"]
8
+ description = "Python Network Net Load Forecasting (PyNNLF): reproducible evaluation of net load forecasting models."
9
+ readme = "README.md"
10
+ requires-python = ">=3.12"
11
+ license = { text = "MIT" }
12
+
13
+ # --- Runtime dependencies (minimum versions for compatibility) ---
14
+ dependencies = [
15
+ "attrs>=24.2.0",
16
+ "certifi>=2025.6.15",
17
+ "charset-normalizer>=3.4.2",
18
+ "click>=8.2.1",
19
+ "cmdstanpy>=1.2.5",
20
+ "colorama>=0.4.6",
21
+ "cftime>=1.6.4.post1",
22
+ "contourpy>=1.3.0",
23
+ "cycler>=0.12.1",
24
+ "DateTime>=5.5",
25
+ "debugpy>=1.8.5",
26
+ "decorator>=5.1.1",
27
+ "dill>=0.3.9",
28
+ "et-xmlfile>=2.0.0",
29
+ "filelock>=3.16.1",
30
+ "fonttools>=4.54.0",
31
+ "fsspec>=2024.12.0",
32
+ "holidays>=0.72",
33
+ "idna>=3.10",
34
+ "importlib_resources>=6.5.2",
35
+ "Jinja2>=3.1.5",
36
+ "joblib>=1.4.2",
37
+ "kiwisolver>=1.4.7",
38
+ "MarkupSafe>=3.0.2",
39
+ "matplotlib>=3.9.2",
40
+ "mpmath>=1.3.0",
41
+ "nbformat>=5.10.4",
42
+ "nest-asyncio>=1.6.0",
43
+ "netCDF4>=1.7.2",
44
+ "networkx>=3.4.2",
45
+ "numpy>=2.1.1",
46
+ "openpyxl>=3.1.5",
47
+ "packaging>=24.1",
48
+ "pandas>=2.2.3",
49
+ "patsy>=1.0.1",
50
+ "pillow>=10.4.0",
51
+ "platformdirs>=4.3.6",
52
+ "prophet>=1.1.6",
53
+ "psutil>=6.0.0",
54
+ "PyYAML>=6.0.2",
55
+ "pyyaml_env_tag>=1.1",
56
+ "pyzmq>=26.2.0",
57
+ "requests>=2.32.4",
58
+ "scikit-learn>=1.6.0",
59
+ "scipy>=1.14.1",
60
+ "shapely>=2.1.1",
61
+ "six>=1.16.0",
62
+ "stanio>=0.5.1",
63
+ "statsmodels>=0.14.4",
64
+ "sympy>=1.13.1",
65
+ "threadpoolctl>=3.5.0",
66
+ "torch>=2.5.1",
67
+ "tornado>=6.4.1",
68
+ "tqdm>=4.67.1",
69
+ "typing_extensions>=4.12.2",
70
+ "tzdata>=2024.1",
71
+ "urllib3>=2.5.0",
72
+ "wcwidth>=0.2.13",
73
+ "xgboost>=3.0.0",
74
+ "zope.interface>=7.0.3",
75
+
76
+ # Windows-only:
77
+ "pywin32>=306; platform_system=='Windows'",
78
+ ]
79
+
80
+ [project.optional-dependencies]
81
+ # Keep notebook tooling optional (since notebooks won't be "the software" anymore)
82
+ notebook = [
83
+ "ipykernel>=6.29.5",
84
+ "ipython>=8.27.0",
85
+ "jupyter_client>=8.6.3",
86
+ "jupyter_core>=5.7.2",
87
+ "matplotlib-inline>=0.1.7",
88
+ "prompt_toolkit>=3.0.47",
89
+ "Pygments>=2.18.0",
90
+ "stack-data>=0.6.3",
91
+ "traitlets>=5.14.3",
92
+ ]
93
+
94
+ # Keep docs tooling optional
95
+ docs = [
96
+ "babel>=2.17.0",
97
+ "backrefs>=5.9",
98
+ "ghp-import>=2.1.0",
99
+ "griffe>=1.14.0",
100
+ "Markdown>=3.9",
101
+ "mergedeep>=1.3.4",
102
+ "mkdocs>=1.6.1",
103
+ "mkdocs-autorefs>=1.4.3",
104
+ "mkdocs-get-deps>=0.2.0",
105
+ "mkdocs-material>=9.6.20",
106
+ "mkdocs-material-extensions>=1.3.1",
107
+ "mkdocstrings>=0.30.0",
108
+ "mkdocstrings-python>=1.18.2",
109
+ "paginate>=0.5.7",
110
+ "pathspec>=0.12.1",
111
+ "pymdown-extensions>=10.16.1",
112
+ "PyYAML>=6.0.2",
113
+ "watchdog>=6.0.0",
114
+ ]
115
+
116
+ [tool.setuptools]
117
+ package-dir = {"" = "src"}
118
+ license-files = []
119
+
120
+ [tool.setuptools.dynamic]
121
+ version = { attr = "pynnlf.__about__.__version__" }
122
+
123
+ [tool.setuptools.packages.find]
124
+ where = ["src"]
125
+
126
+ [tool.setuptools.package-data]
127
+ pynnlf = ["scaffold/**/*"]
pynnlf-0.2.2/setup.cfg ADDED
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1 @@
1
+ __version__ = "0.2.2"
@@ -0,0 +1,5 @@
1
+ from .__about__ import __version__
2
+ from .api import run_experiment, run_experiment_batch, run_tests
3
+ from .workspace import init
4
+
5
+ __all__ = ["__version__", "init", "run_experiment", "run_experiment_batch", "run_tests"]
@@ -0,0 +1,17 @@
1
+ from __future__ import annotations
2
+ from pathlib import Path
3
+ from typing import Optional, Literal, Union
4
+ from .runner import run_single, run_batch
5
+ from .tests_runner import run_tests
6
+
7
+ PathLike = Union[str, Path]
8
+ TestMode = Literal["smoke", "full"]
9
+
10
+
11
+ def run_experiment(spec_path: str | Path) -> None:
12
+ """Run a single experiment from <workspace>/specs/experiment.yaml."""
13
+ run_single(spec_path)
14
+
15
+ def run_experiment_batch(spec_path: str | Path) -> None:
16
+ """Run batch experiments from <workspace>/specs/batch.yaml."""
17
+ run_batch(spec_path)
@@ -0,0 +1,63 @@
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ from pathlib import Path
5
+
6
+ def discover_unique_file(directory: Path, prefix: str, suffix: str):
7
+ """
8
+ Discover a unique file in a directory by prefix and suffix.
9
+
10
+ Example:
11
+ prefix="m19" suffix=".py" matches: m19_*.py
12
+ prefix="ds19" suffix=".csv" matches: ds19_*.csv
13
+
14
+ Args:
15
+ directory (Path): Directory to search.
16
+ prefix (str): Required filename prefix (e.g. "m19" or "ds19").
17
+ suffix (str): Required suffix (e.g. ".py" or ".csv").
18
+
19
+ Returns:
20
+ Path: The unique matching file path.
21
+
22
+ Raises:
23
+ FileNotFoundError: If no match found.
24
+ ValueError: If more than one match found.
25
+ """
26
+ directory = Path(directory)
27
+ matches = sorted(directory.glob(f"{prefix}_*{suffix}"))
28
+ if len(matches) == 0:
29
+ # also allow exact name without underscore: ds19.csv / m19.py
30
+ exact = directory / f"{prefix}{suffix}"
31
+ if exact.exists():
32
+ return exact
33
+ raise FileNotFoundError(f"No file found for '{prefix}' in {directory}")
34
+ if len(matches) > 1:
35
+ raise ValueError(f"Multiple matches for '{prefix}' in {directory}: {[m.name for m in matches]}")
36
+ return matches[0]
37
+
38
+ def discover_model_name(models_dir: Path, model_id: str) -> str:
39
+ """
40
+ Discover model file and return model_name (file stem).
41
+
42
+ Args:
43
+ models_dir (Path): <workspace>/models
44
+ model_id (str): model ID from spec, e.g. "m6" or "m19"
45
+
46
+ Returns:
47
+ str: model_name file stem, e.g. "m6_lr" or "m19_my_model"
48
+ """
49
+ p = discover_unique_file(models_dir, model_id, ".py")
50
+ return p.stem
51
+
52
+ def discover_dataset_path(data_dir: Path, dataset_id: str) -> Path:
53
+ """
54
+ Discover dataset file path from dataset ID.
55
+
56
+ Args:
57
+ data_dir (Path): <workspace>/data
58
+ dataset_id (str): dataset ID from spec, e.g. "ds0" or "ds19"
59
+
60
+ Returns:
61
+ Path: dataset file path.
62
+ """
63
+ return discover_unique_file(data_dir, dataset_id, ".csv")