pynnlf 0.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. pynnlf/__about__.py +1 -0
  2. pynnlf/__init__.py +5 -0
  3. pynnlf/api.py +17 -0
  4. pynnlf/discovery.py +63 -0
  5. pynnlf/engine.py +1238 -0
  6. pynnlf/hyperparams.py +38 -0
  7. pynnlf/model_utils.py +186 -0
  8. pynnlf/runner.py +108 -0
  9. pynnlf/scaffold/README_WORKSPACE.md +0 -0
  10. pynnlf/scaffold/data/README_data.md +40 -0
  11. pynnlf/scaffold/data/ds0_test.csv +4081 -0
  12. pynnlf/scaffold/models/README_models.md +61 -0
  13. pynnlf/scaffold/models/hyperparameters.yaml +264 -0
  14. pynnlf/scaffold/models/m10_rf.py +65 -0
  15. pynnlf/scaffold/models/m11_svr.py +53 -0
  16. pynnlf/scaffold/models/m12_rnn.py +152 -0
  17. pynnlf/scaffold/models/m13_lstm.py +208 -0
  18. pynnlf/scaffold/models/m14_gru.py +139 -0
  19. pynnlf/scaffold/models/m15_transformer.py +138 -0
  20. pynnlf/scaffold/models/m16_prophet.py +216 -0
  21. pynnlf/scaffold/models/m17_xgb.py +66 -0
  22. pynnlf/scaffold/models/m18_nbeats.py +107 -0
  23. pynnlf/scaffold/models/m1_naive.py +49 -0
  24. pynnlf/scaffold/models/m2_snaive.py +49 -0
  25. pynnlf/scaffold/models/m3_ets.py +133 -0
  26. pynnlf/scaffold/models/m4_arima.py +123 -0
  27. pynnlf/scaffold/models/m5_sarima.py +128 -0
  28. pynnlf/scaffold/models/m6_lr.py +76 -0
  29. pynnlf/scaffold/models/m7_ann.py +148 -0
  30. pynnlf/scaffold/models/m8_dnn.py +141 -0
  31. pynnlf/scaffold/models/m9_rt.py +74 -0
  32. pynnlf/scaffold/models/mXX_template.py +68 -0
  33. pynnlf/scaffold/specs/batch.yaml +4 -0
  34. pynnlf/scaffold/specs/experiment.yaml +4 -0
  35. pynnlf/scaffold/specs/pynnlf_config.yaml +69 -0
  36. pynnlf/scaffold/specs/testing_benchmark.csv +613 -0
  37. pynnlf/scaffold/specs/testing_benchmark_metadata.md +12 -0
  38. pynnlf/scaffold/specs/tests_ci.yaml +8 -0
  39. pynnlf/scaffold/specs/tests_full.yaml +23 -0
  40. pynnlf/tests_runner.py +211 -0
  41. pynnlf/tools/strip_notebook_artifacts.py +32 -0
  42. pynnlf/workspace.py +63 -0
  43. pynnlf/yamlio.py +28 -0
  44. pynnlf-0.2.2.dist-info/METADATA +168 -0
  45. pynnlf-0.2.2.dist-info/RECORD +47 -0
  46. pynnlf-0.2.2.dist-info/WHEEL +5 -0
  47. pynnlf-0.2.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,23 @@
1
+ datasets: [ds0]
2
+ forecast_horizons: [fh1]
3
+ model_and_hp:
4
+ - [m1, hp1]
5
+ - [m2, hp2]
6
+ - [m3, hp1]
7
+ - [m4, hp1]
8
+ - [m5, hp1]
9
+ - [m6, hp1]
10
+ - [m7, hp1]
11
+ - [m8, hp1]
12
+ - [m9, hp3]
13
+ - [m10, hp1]
14
+ - [m11, hp1]
15
+ - [m12, hp1]
16
+ - [m13, hp1]
17
+ - [m14, hp1]
18
+ - [m15, hp1]
19
+ - [m16, hp1]
20
+ - [m17, hp1]
21
+ - [m18, hp1]
22
+ benchmark_csv: specs/testing_benchmark.csv
23
+ mode: warn
pynnlf/tests_runner.py ADDED
@@ -0,0 +1,211 @@
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ from pathlib import Path
5
+ from datetime import datetime
6
+ import pandas as pd
7
+
8
+ from .yamlio import load_yaml
9
+ from .runner import run_batch
10
+ from .engine import compute_exp_no # or keep local helper if you moved it
11
+ import re
12
+
13
+
14
+ def _workspace_root_from_spec(spec_path: Path) -> Path:
15
+ """
16
+ Infer workspace root from a spec path.
17
+
18
+ Args:
19
+ spec_path (Path): <workspace>/specs/tests_*.yaml
20
+
21
+ Returns:
22
+ Path: workspace root
23
+ """
24
+ return spec_path.parent.parent
25
+
26
+
27
+ def _find_latest_experiment_dir(output_dir: Path) -> Path:
28
+ """
29
+ Find the most recently created experiment directory matching E00001_* pattern.
30
+
31
+ Args:
32
+ output_dir (Path): <workspace>/experiment_result
33
+
34
+ Returns:
35
+ Path: latest experiment directory
36
+ """
37
+ pat = re.compile(r"^E(\d{5})_")
38
+ dirs = []
39
+ for p in output_dir.iterdir():
40
+ if p.is_dir() and pat.match(p.name):
41
+ dirs.append(p)
42
+ if not dirs:
43
+ raise FileNotFoundError(f"No experiment folders found in {output_dir}")
44
+ # newest by modification time
45
+ return max(dirs, key=lambda x: x.stat().st_mtime)
46
+
47
+
48
+ def _load_benchmark_csv(workspace_root: Path, benchmark_rel: str) -> pd.DataFrame:
49
+ """
50
+ Load benchmark CSV (index-style).
51
+
52
+ Args:
53
+ workspace_root (Path): workspace root
54
+ benchmark_rel (str): e.g. "specs/testing_benchmark.csv"
55
+
56
+ Returns:
57
+ pd.DataFrame: indexed by metric_id with min/max columns
58
+ """
59
+ p = workspace_root / benchmark_rel
60
+ df = pd.read_csv(p)
61
+ df = df.rename(columns={df.columns[0]: "metric_id"}) if df.columns[0] != "metric_id" else df
62
+ df = df.set_index("metric_id")
63
+ return df
64
+
65
+
66
+ def _write_report(report_path: Path, report_rows: list[dict]) -> None:
67
+ """
68
+ Write report CSV in your existing format.
69
+
70
+ Args:
71
+ report_path (Path): output file path
72
+ report_rows (list[dict]): list of report rows
73
+
74
+ Returns:
75
+ None
76
+ """
77
+ expected_cols = ["metric_id", "min acceptable value", "max acceptable value", "value, test", "test_result"]
78
+
79
+ # If nothing matched, write an empty report with headers (no crash)
80
+ if not report_rows:
81
+ out = pd.DataFrame(columns=["metric_id","min acceptable value","max acceptable value","value, test","test_result"])
82
+ out = out.set_index("metric_id")
83
+ report_path.parent.mkdir(parents=True, exist_ok=True)
84
+ out.to_csv(report_path, index=True)
85
+ return
86
+
87
+ out = pd.DataFrame(report_rows)
88
+
89
+ if "metric_id" not in out.columns:
90
+ raise KeyError(f"'metric_id' missing in report rows. Keys seen: {list(out.columns)}")
91
+
92
+ out = out.set_index("metric_id")
93
+ report_path.parent.mkdir(parents=True, exist_ok=True)
94
+ out.to_csv(report_path, index=True)
95
+
96
+
97
+ def run_tests(spec_path: str | Path) -> Path:
98
+ """
99
+ Run regression tests described by specs/tests_ci.yaml or specs/tests_full.yaml.
100
+
101
+ Pure warning mode:
102
+ - Always completes and writes a report.
103
+ - Never raises / never fails CI.
104
+
105
+ Args:
106
+ spec_path (str | Path): path to <workspace>/specs/tests_*.yaml
107
+
108
+ Returns:
109
+ Path: path to generated report CSV
110
+ """
111
+ spec_path = Path(spec_path)
112
+ ws = _workspace_root_from_spec(spec_path)
113
+
114
+ tests = load_yaml(spec_path)
115
+ cfg = load_yaml(ws / "specs" / "pynnlf_config.yaml")
116
+
117
+ output_dir = ws / cfg["paths"]["output_dir"]
118
+
119
+ # 1) Run experiments (reuse batch runner)
120
+ # Build a temporary batch spec in-memory and write to a temp yaml in specs/
121
+ batch_spec = {
122
+ "datasets": tests["datasets"],
123
+ "forecast_horizons": tests["forecast_horizons"],
124
+ "model_and_hp": tests["model_and_hp"],
125
+ }
126
+ tmp_batch = ws / "specs" / "_tmp_tests_batch.yaml"
127
+ tmp_batch.write_text(
128
+ "\n".join([
129
+ f"datasets: {batch_spec['datasets']}",
130
+ f"forecast_horizons: {batch_spec['forecast_horizons']}",
131
+ "model_and_hp:",
132
+ *[f" - [{m}, {hp}]" for m, hp in batch_spec["model_and_hp"]],
133
+ ]),
134
+ encoding="utf-8"
135
+ )
136
+
137
+ run_batch(tmp_batch)
138
+
139
+ # 2) Load benchmark
140
+ bench = _load_benchmark_csv(ws, tests["benchmark_csv"])
141
+
142
+ # 3) For each newly created experiment folder, read a3 and compare metrics
143
+ # NOTE: simplest approach: compare using latest experiment folder repeatedly.
144
+ # Since run_batch creates multiple experiment folders, we scan all new ones.
145
+ pat = re.compile(r"^E(\d{5})_")
146
+ exp_dirs = sorted([p for p in output_dir.iterdir() if p.is_dir() and pat.match(p.name)],
147
+ key=lambda x: x.stat().st_mtime)
148
+
149
+ report_rows = []
150
+
151
+ for exp_dir in exp_dirs:
152
+ # a3 filename pattern: <E00001>_a3_cross_validation_result.csv
153
+ # we detect it by suffix
154
+ a3_files = list(exp_dir.glob("*_a3_cross_validation_result.csv"))
155
+ if not a3_files:
156
+ continue
157
+ a3_path = a3_files[0]
158
+
159
+ a3 = pd.read_csv(a3_path, index_col=0)
160
+ # expecting mean/stddev rows exist
161
+ if "mean" not in a3.index or "stddev" not in a3.index:
162
+ continue
163
+
164
+ parts = exp_dir.name.split("_")
165
+
166
+ # folder pattern: E00001_YYMMDD_ds0_fh30_<model_name...>_<hp_no>
167
+ hp_no = parts[-1]
168
+ model_name = "_".join(parts[4:-1]) # join all tokens after fhXX up to hpXX
169
+
170
+ # Compare each metric in a3 columns for mean and stddev
171
+ for col in a3.columns:
172
+ for stat in ("mean", "stddev"):
173
+ metric_id = f"{model_name}_{col}_{stat}"
174
+ if metric_id not in bench.index:
175
+ continue
176
+
177
+ v = float(a3.loc[stat, col])
178
+ vmin = float(bench.loc[metric_id, "min acceptable value"])
179
+ vmax = float(bench.loc[metric_id, "max acceptable value"])
180
+
181
+ eps = float(tests.get("tolerance", 0.005)) # default: 2dp rounding half-step
182
+ passed = (v >= (vmin - eps)) and (v <= (vmax + eps))
183
+ report_rows.append({
184
+ "metric_id": metric_id,
185
+ "min acceptable value": vmin,
186
+ "max acceptable value": vmax,
187
+ "value, test": v,
188
+ "test_result": "pass" if passed else "fail",
189
+ })
190
+
191
+ # 4) Write report (pure warning)
192
+ ts = datetime.now().strftime("%Y%m%d_%H%M%S")
193
+ report_path = output_dir / "Archive" / "Testing Result" / f"test_result_{ts}.csv"
194
+
195
+ if not report_rows:
196
+ print("[PyNNLF tests] WARNING: No metrics matched benchmark CSV keys. Report will be empty.")
197
+
198
+ _write_report(report_path, report_rows)
199
+
200
+ # Print summary, but do not raise
201
+ n_fail = sum(1 for r in report_rows if r["test_result"] == "fail")
202
+ n_pass = sum(1 for r in report_rows if r["test_result"] == "pass")
203
+ print(f"[PyNNLF tests] pass={n_pass} fail={n_fail} report={report_path}")
204
+
205
+ # cleanup temp file
206
+ try:
207
+ tmp_batch.unlink()
208
+ except Exception:
209
+ pass
210
+
211
+ return report_path
@@ -0,0 +1,32 @@
1
+ from pathlib import Path
2
+ import re
3
+
4
+ PATTERNS = [
5
+ r"^\s*get_ipython\(", # ipython magic runner
6
+ r"^\s*%(\w+)", # %pip, %run, etc
7
+ r"^\s*!", # shell escapes
8
+ r"^\s*#\s*In\[\d*\]:\s*$", # cell markers
9
+ ]
10
+
11
+ def strip_file(p: Path) -> int:
12
+ lines = p.read_text(encoding="utf-8", errors="ignore").splitlines(True)
13
+ out, removed = [], 0
14
+ for line in lines:
15
+ if any(re.search(pat, line) for pat in PATTERNS):
16
+ removed += 1
17
+ continue
18
+ out.append(line)
19
+ if removed:
20
+ p.write_text("".join(out), encoding="utf-8")
21
+ return removed
22
+
23
+ def main():
24
+ root = Path("src")
25
+ py_files = list(root.rglob("*.py"))
26
+ total = 0
27
+ for f in py_files:
28
+ total += strip_file(f)
29
+ print(f"Removed {total} notebook artifact lines across {len(py_files)} files.")
30
+
31
+ if __name__ == "__main__":
32
+ main()
pynnlf/workspace.py ADDED
@@ -0,0 +1,63 @@
1
+ from __future__ import annotations
2
+
3
+ from importlib import resources
4
+ from pathlib import Path
5
+ import shutil
6
+ import urllib.request
7
+ import yaml
8
+
9
+ def init(
10
+ workspace_dir: str | Path,
11
+ *,
12
+ download_data: bool = False,
13
+ all_data: bool = False,
14
+ datasets: list[str] | None = None,
15
+ base_url_override: str | None = None,
16
+ ) -> Path:
17
+ """
18
+ Create a user workspace by copying bundled scaffold.
19
+ Optionally download datasets from GitHub raw (no checksum, just download).
20
+
21
+ Args:
22
+ workspace_dir (str | Path): target directory to create the workspace
23
+ download_data (bool): whether to download datasets from online source
24
+ all_data (bool): if True, download all datasets except ds0
25
+ datasets (list[str] | None): list of dataset IDs to download (e.g., ["ds15"])
26
+ base_url_override (str | None): override base_url in config if provided
27
+
28
+ Returns:
29
+ Path: workspace directory path
30
+ """
31
+ ws = Path(workspace_dir)
32
+ ws.mkdir(parents=True, exist_ok=True)
33
+
34
+ scaffold = resources.files("pynnlf").joinpath("scaffold")
35
+
36
+ for item in ("models", "data", "specs", "README_WORKSPACE.md"):
37
+ src = scaffold.joinpath(item)
38
+ dst = ws / item
39
+ if src.is_dir():
40
+ shutil.copytree(src, dst, dirs_exist_ok=True)
41
+ else:
42
+ dst.parent.mkdir(parents=True, exist_ok=True)
43
+ shutil.copy2(src, dst)
44
+
45
+ cfg = yaml.safe_load((ws / "specs" / "pynnlf_config.yaml").read_text(encoding="utf-8")) # safe_load recommended [1](https://playwright.dev/docs/ci-intro)[2](https://github.com/mssamhan31/CANVAS)
46
+ out_dir = ws / cfg["paths"]["output_dir"]
47
+ out_dir.mkdir(parents=True, exist_ok=True)
48
+
49
+ if download_data:
50
+ base_url = base_url_override or cfg["dataset_download"]["base_url"]
51
+ ds_map: dict[str, str] = cfg["datasets"]
52
+
53
+ to_get = [k for k in ds_map.keys() if k != "ds0"] if all_data else (datasets or [])
54
+ for ds_id in to_get:
55
+ if ds_id not in ds_map:
56
+ raise KeyError(f"Unknown dataset id: {ds_id}")
57
+ fname = ds_map[ds_id]
58
+ url = base_url.rstrip("/") + "/" + fname
59
+ dest = ws / cfg["paths"]["data_dir"] / fname
60
+ dest.parent.mkdir(parents=True, exist_ok=True)
61
+ urllib.request.urlretrieve(url, dest)
62
+
63
+ return ws
pynnlf/yamlio.py ADDED
@@ -0,0 +1,28 @@
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ from pathlib import Path
5
+ from typing import Any, Union
6
+ import yaml
7
+
8
+ PathLike = Union[str, Path]
9
+
10
+ def load_yaml(path: PathLike) -> dict[str, Any]:
11
+ """
12
+ Load YAML file into a Python dict using safe_load.
13
+
14
+ Args:
15
+ path (str | Path): Path to a .yaml/.yml file.
16
+
17
+ Returns:
18
+ dict: Parsed YAML content.
19
+ """
20
+ p = Path(path)
21
+ if not p.exists():
22
+ raise FileNotFoundError(p)
23
+ data = yaml.safe_load(p.read_text(encoding="utf-8"))
24
+ if data is None:
25
+ data = {}
26
+ if not isinstance(data, dict):
27
+ raise TypeError(f"YAML root must be a mapping/dict. Got: {type(data)}")
28
+ return data
@@ -0,0 +1,168 @@
1
+ Metadata-Version: 2.4
2
+ Name: pynnlf
3
+ Version: 0.2.2
4
+ Summary: Python Network Net Load Forecasting (PyNNLF): reproducible evaluation of net load forecasting models.
5
+ License: MIT
6
+ Requires-Python: >=3.12
7
+ Description-Content-Type: text/markdown
8
+ Requires-Dist: attrs>=24.2.0
9
+ Requires-Dist: certifi>=2025.6.15
10
+ Requires-Dist: charset-normalizer>=3.4.2
11
+ Requires-Dist: click>=8.2.1
12
+ Requires-Dist: cmdstanpy>=1.2.5
13
+ Requires-Dist: colorama>=0.4.6
14
+ Requires-Dist: cftime>=1.6.4.post1
15
+ Requires-Dist: contourpy>=1.3.0
16
+ Requires-Dist: cycler>=0.12.1
17
+ Requires-Dist: DateTime>=5.5
18
+ Requires-Dist: debugpy>=1.8.5
19
+ Requires-Dist: decorator>=5.1.1
20
+ Requires-Dist: dill>=0.3.9
21
+ Requires-Dist: et-xmlfile>=2.0.0
22
+ Requires-Dist: filelock>=3.16.1
23
+ Requires-Dist: fonttools>=4.54.0
24
+ Requires-Dist: fsspec>=2024.12.0
25
+ Requires-Dist: holidays>=0.72
26
+ Requires-Dist: idna>=3.10
27
+ Requires-Dist: importlib_resources>=6.5.2
28
+ Requires-Dist: Jinja2>=3.1.5
29
+ Requires-Dist: joblib>=1.4.2
30
+ Requires-Dist: kiwisolver>=1.4.7
31
+ Requires-Dist: MarkupSafe>=3.0.2
32
+ Requires-Dist: matplotlib>=3.9.2
33
+ Requires-Dist: mpmath>=1.3.0
34
+ Requires-Dist: nbformat>=5.10.4
35
+ Requires-Dist: nest-asyncio>=1.6.0
36
+ Requires-Dist: netCDF4>=1.7.2
37
+ Requires-Dist: networkx>=3.4.2
38
+ Requires-Dist: numpy>=2.1.1
39
+ Requires-Dist: openpyxl>=3.1.5
40
+ Requires-Dist: packaging>=24.1
41
+ Requires-Dist: pandas>=2.2.3
42
+ Requires-Dist: patsy>=1.0.1
43
+ Requires-Dist: pillow>=10.4.0
44
+ Requires-Dist: platformdirs>=4.3.6
45
+ Requires-Dist: prophet>=1.1.6
46
+ Requires-Dist: psutil>=6.0.0
47
+ Requires-Dist: PyYAML>=6.0.2
48
+ Requires-Dist: pyyaml_env_tag>=1.1
49
+ Requires-Dist: pyzmq>=26.2.0
50
+ Requires-Dist: requests>=2.32.4
51
+ Requires-Dist: scikit-learn>=1.6.0
52
+ Requires-Dist: scipy>=1.14.1
53
+ Requires-Dist: shapely>=2.1.1
54
+ Requires-Dist: six>=1.16.0
55
+ Requires-Dist: stanio>=0.5.1
56
+ Requires-Dist: statsmodels>=0.14.4
57
+ Requires-Dist: sympy>=1.13.1
58
+ Requires-Dist: threadpoolctl>=3.5.0
59
+ Requires-Dist: torch>=2.5.1
60
+ Requires-Dist: tornado>=6.4.1
61
+ Requires-Dist: tqdm>=4.67.1
62
+ Requires-Dist: typing_extensions>=4.12.2
63
+ Requires-Dist: tzdata>=2024.1
64
+ Requires-Dist: urllib3>=2.5.0
65
+ Requires-Dist: wcwidth>=0.2.13
66
+ Requires-Dist: xgboost>=3.0.0
67
+ Requires-Dist: zope.interface>=7.0.3
68
+ Requires-Dist: pywin32>=306; platform_system == "Windows"
69
+ Provides-Extra: notebook
70
+ Requires-Dist: ipykernel>=6.29.5; extra == "notebook"
71
+ Requires-Dist: ipython>=8.27.0; extra == "notebook"
72
+ Requires-Dist: jupyter_client>=8.6.3; extra == "notebook"
73
+ Requires-Dist: jupyter_core>=5.7.2; extra == "notebook"
74
+ Requires-Dist: matplotlib-inline>=0.1.7; extra == "notebook"
75
+ Requires-Dist: prompt_toolkit>=3.0.47; extra == "notebook"
76
+ Requires-Dist: Pygments>=2.18.0; extra == "notebook"
77
+ Requires-Dist: stack-data>=0.6.3; extra == "notebook"
78
+ Requires-Dist: traitlets>=5.14.3; extra == "notebook"
79
+ Provides-Extra: docs
80
+ Requires-Dist: babel>=2.17.0; extra == "docs"
81
+ Requires-Dist: backrefs>=5.9; extra == "docs"
82
+ Requires-Dist: ghp-import>=2.1.0; extra == "docs"
83
+ Requires-Dist: griffe>=1.14.0; extra == "docs"
84
+ Requires-Dist: Markdown>=3.9; extra == "docs"
85
+ Requires-Dist: mergedeep>=1.3.4; extra == "docs"
86
+ Requires-Dist: mkdocs>=1.6.1; extra == "docs"
87
+ Requires-Dist: mkdocs-autorefs>=1.4.3; extra == "docs"
88
+ Requires-Dist: mkdocs-get-deps>=0.2.0; extra == "docs"
89
+ Requires-Dist: mkdocs-material>=9.6.20; extra == "docs"
90
+ Requires-Dist: mkdocs-material-extensions>=1.3.1; extra == "docs"
91
+ Requires-Dist: mkdocstrings>=0.30.0; extra == "docs"
92
+ Requires-Dist: mkdocstrings-python>=1.18.2; extra == "docs"
93
+ Requires-Dist: paginate>=0.5.7; extra == "docs"
94
+ Requires-Dist: pathspec>=0.12.1; extra == "docs"
95
+ Requires-Dist: pymdown-extensions>=10.16.1; extra == "docs"
96
+ Requires-Dist: PyYAML>=6.0.2; extra == "docs"
97
+ Requires-Dist: watchdog>=6.0.0; extra == "docs"
98
+
99
+ # Welcome to PyNNLF
100
+ PyNNLF (Python for Network Net Load Forecast) is a tool to evaluate net load forecasting model performance in a reliable and reproducible way.
101
+
102
+ This tool evaluates net load forecasting models aiming to make new net load forecasting research more reliable and reproducible. It includes a library of public net load datasets and common forecasting models, including simple benchmark models. Users input the forecast problem and model specification, and the tool outputs evaluation results.
103
+ It also allows users to add datasets, models, and modify hyperparameters. Researchers claiming a new or superior model can compare their model with existing ones on public datasets. The target audience includes researchers working in academia or industry focusing on evaluating and optimizing net load forecasting models.
104
+
105
+ A visual illustration of the tool's workflow is shown below.
106
+
107
+ ![Home Illustration](./docs/img/home_illustration.png)
108
+
109
+ # Input
110
+ 1. **Forecast Target**: Dataset and forecast horizon defined in the YAML spec at example_project/specs/experiment.yaml.
111
+ 2. **Model Specification**: Model and hyperparameters defined in the YAML spec at example_project/specs/experiment.yaml.
112
+
113
+ # Output
114
+ 1. a1_experiment_result.csv – Contains accuracy (cross-validated test n-RMSE), stability (accuracy standard deviation), and training time.
115
+ 2. a2_hyperparameter.csv – Lists the hyperparameters used for each model.
116
+ 3. a3_cross_validation_result.csv – Detailed results for each cross-validation split.
117
+ 4. cv_plots/ – Folder with plots including:
118
+ - Observation vs forecast (time plot)
119
+ - Observation vs forecast (scatter plot)
120
+ - Residual time plot
121
+ - Residual histogram
122
+ 5. cv_test/ and cv_train/ – Folders containing time series of observations, forecasts, and residuals for each cross-validation split.
123
+
124
+ # Tool Output Naming Convention
125
+ Format:
126
+ [experiment_no]_[experiment_date]_[dataset]_[forecast_horizon]_[model]_[hyperparameter]
127
+
128
+ Example:
129
+ E00001_250915_ds0_fh30_m6_lr_hp1
130
+
131
+ # Installation Instruction
132
+ 1. Install the package:
133
+ On macOS, use `python3`/`pip3` if `python`/`pip` are not available.
134
+ ```bash
135
+ python -m pip install pynnlf
136
+ ```
137
+
138
+ # How to Use The Tool
139
+ 1. Initialize a workspace in any directory you want (example name: my_project). By default, only the sample dataset (ds0) is included. You can choose to download all datasets if needed:
140
+ On macOS, use `python3 -c` if `python -c` is not available.
141
+ ```bash
142
+ python -c "import pynnlf; pynnlf.init('my_project')"
143
+ ```
144
+ 2. Set up your experiment in example_project/specs/experiment.yaml.
145
+ 3. Run the experiment:
146
+ ```bash
147
+ python -c "import pynnlf; pynnlf.run_experiment('my_project/specs/experiment.yaml')"
148
+ ```
149
+ 4. View results under example_project/experiment_result.
150
+
151
+ # CI
152
+ CI (Continuous Integration) is automated testing that runs on code changes. CI is available to run smoke tests on 3 models and check whether results fall within the standard benchmark.
153
+
154
+ # Output
155
+ The tool will output the evaluation result in example_project/experiment_result as one folder.
156
+
157
+ # Full Documentation
158
+ Detailed documentation including examples, testing, detailed guide, API reference, features & limitations, etc. can be seen here. [PyNNLF Documentation](https://mssamhan31.github.io/PyNNLF/)
159
+
160
+ # Acknowledgements
161
+ This project is part of Samhan's PhD study, supported by the University International Postgraduate Award (UIPA) Scholarship from UNSW, the Industry Collaboration Project Scholarship from Ausgrid, and the RACE for 2030 Scholarship. We also acknowledge Solcast and the Australian Bureau of Meteorology (BOM) for providing access to historical weather datasets for this research. We further acknowledge the use of Python libraries including Pandas, NumPy, PyTorch, Scikit-learn, XGBoost, Prophet, Statsmodels, and Matplotlib. Finally, we thank the reviewers and editor of the Journal of Open Source Software for their valuable feedback and guidance.
162
+
163
+ The authors declare that they have no competing financial, personal, or professional interests related to this work.
164
+
165
+ # Contributors
166
+ - **M. Syahman Samhan** (m.samhan@unsw.edu.au): Lead developer and researcher. Responsible for conceptualization, implementation, documentation, and experimentation.
167
+ - **Anna Bruce**: Supervisor. Provided guidance on research direction and methodology.
168
+ - **Baran Yildiz**: Supervisor. Provided guidance on research direction and methodology.
@@ -0,0 +1,47 @@
1
+ pynnlf/__about__.py,sha256=anBcPGggCP0f-o9w0IsZYV49rl6HvJbHcuDN39x3mxw,23
2
+ pynnlf/__init__.py,sha256=q9cKeNYBkRea8-uyT3VLiP_sYK0qaYUQFUSqV9AkX-c,221
3
+ pynnlf/api.py,sha256=3owuiVkhw3cj2h1p8IhlrRxklN9JVaE_B7WCu_iuJnk,565
4
+ pynnlf/discovery.py,sha256=MrwUue3VK0xmypyX1ZZPhr3Ibb4jir2iInuJiFtQmgg,2057
5
+ pynnlf/engine.py,sha256=G10umld8KvKYItHCc3u2n4K-sgTk3n1vtfxfIQkMZNg,49308
6
+ pynnlf/hyperparams.py,sha256=TULpIY_IdTaTEjGHIa6bplrejaKeaeJLyACcqAY6wdY,1177
7
+ pynnlf/model_utils.py,sha256=AvzGd5jxPNiVbjRDuzi--AjfNuMYpLqvvfNxgRJuONg,6079
8
+ pynnlf/runner.py,sha256=Ol4MtA93tl24Jzbf-NX3ECDNDbgERup21MM3nJ9TVt8,3422
9
+ pynnlf/tests_runner.py,sha256=aF_0w0LFzJ3g135K1EoIqBlPvp4u_EMnPiL5456d9xM,7176
10
+ pynnlf/workspace.py,sha256=QixfD06BM47N2CP9v7UAgC3_75FuoinFYuKHto1bNyE,2394
11
+ pynnlf/yamlio.py,sha256=Zk8Jwe5ih6kero1m4uRWD5f1K6ZiiKrRpts5YczuraY,700
12
+ pynnlf/scaffold/README_WORKSPACE.md,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
+ pynnlf/scaffold/data/README_data.md,sha256=ZjkCgo5JnOZsY1sCgNiPchORPkCtUtU3L4mwoGR5tkE,1117
14
+ pynnlf/scaffold/data/ds0_test.csv,sha256=a-7T1RXDPfmdJ8Ie8TwuVVJlxiOr84jv_s2rniQUUFY,127117
15
+ pynnlf/scaffold/models/README_models.md,sha256=m9TZinylShhRorz-f1Om1Dyw9r2aqzWHRY_nixNgyPY,1569
16
+ pynnlf/scaffold/models/hyperparameters.yaml,sha256=x0-nZxs9_qVuCOMMabTFSZXNSbgrMqrtWj21YREKu-A,4282
17
+ pynnlf/scaffold/models/m10_rf.py,sha256=HlrqzBPi6_lVZBNlI_9FfLOTOs2rIm-rBR_CUgPEwM4,2190
18
+ pynnlf/scaffold/models/m11_svr.py,sha256=01QA1n8w_2q0wBWlpWaOlAud8NUxWw2CX8xwPkwiPls,1657
19
+ pynnlf/scaffold/models/m12_rnn.py,sha256=7rDs3RQ4TrNP9wTGUQEy5aB-siDOV02xEd8oPQ5vTrE,6023
20
+ pynnlf/scaffold/models/m13_lstm.py,sha256=m3j422aLp_niaVYubU552vSHnSDU027lJWpZSt04KDw,10405
21
+ pynnlf/scaffold/models/m14_gru.py,sha256=aCYmo2u1zycYBsleqrL55h7LS8Du700GtiNmHafJP58,5681
22
+ pynnlf/scaffold/models/m15_transformer.py,sha256=rTwxfQaZg5ERCcAj0kXSygN9cTJcpF7I8qLwjngajVE,6279
23
+ pynnlf/scaffold/models/m16_prophet.py,sha256=O4-cS_6XZyqysZ-sf_3b6IggRpOoi-RNJr33k2LKzfE,9222
24
+ pynnlf/scaffold/models/m17_xgb.py,sha256=9CScEVgIvX5_OXnLuH7NyTQNVUaCR46dYK-bKsNWUsY,1983
25
+ pynnlf/scaffold/models/m18_nbeats.py,sha256=z3ibngjkxayo5qPKktSwJbnCunp47rVhfraJDTez5sY,3926
26
+ pynnlf/scaffold/models/m1_naive.py,sha256=ncaK0laGuTQ1apANq9RbGiXtjZ5ckfiSxEoYHyGVvKg,1466
27
+ pynnlf/scaffold/models/m2_snaive.py,sha256=YKmohLzUC2T0hW67YttV3AfoDPWQ-BHtERpH9Fj7LG8,1452
28
+ pynnlf/scaffold/models/m3_ets.py,sha256=CTDREM9-OdD3i2I3kh29R4inXqpPBaiUILL87w-tBCI,5349
29
+ pynnlf/scaffold/models/m4_arima.py,sha256=We5EWJ5hA-IIbD-pvbpSdAqY3zAvwbzn_X6kLCJFRRo,4938
30
+ pynnlf/scaffold/models/m5_sarima.py,sha256=MSvs3HxYrStbKeuLkw9p9dhTt7hPuNkL1PwUH8N9m4k,5221
31
+ pynnlf/scaffold/models/m6_lr.py,sha256=pZKQ3fdLqo_TwfZHvDjPf_48gyrpWS8KeIfAIC_o44g,2420
32
+ pynnlf/scaffold/models/m7_ann.py,sha256=a94_eCqm-6qOY_sro-Kb2A7FjlsQ4J23i0iaXEDKXUc,4686
33
+ pynnlf/scaffold/models/m8_dnn.py,sha256=YZmreqF5xGiG8yD63Dq3o_evlAEYb_RdqozGgMXnZvE,4998
34
+ pynnlf/scaffold/models/m9_rt.py,sha256=us86ODVfn4F0t3SzPWbcwKNgDx38dlD96_bk9tNJ3jo,2435
35
+ pynnlf/scaffold/models/mXX_template.py,sha256=vaBxAsVtmhPbizNvzXX7Ca1Stfp9khrE3gc3UV46wqg,2603
36
+ pynnlf/scaffold/specs/batch.yaml,sha256=L2tAkCtSaaDV3vP4upj3smotZKL0o621ZxyZDfZfjXI,71
37
+ pynnlf/scaffold/specs/experiment.yaml,sha256=BjaBc1L8WvLF6Tp9ddFGenzrXsaaXZcnuJ5iD4oZybo,67
38
+ pynnlf/scaffold/specs/pynnlf_config.yaml,sha256=ke0L5wPJxzQ7xO9XTuJqXuU7Zq_EtkJcN_6JYU-xVwo,1409
39
+ pynnlf/scaffold/specs/testing_benchmark.csv,sha256=6EJMefgXXgGNjWs36Pr4QGhN4EvtHV2QuxrL56vwdX4,43980
40
+ pynnlf/scaffold/specs/testing_benchmark_metadata.md,sha256=6pTK9zfcwGVfbfhhwmVsYh-CK3N3G--j-rnaxsepERI,551
41
+ pynnlf/scaffold/specs/tests_ci.yaml,sha256=WqofSuvVubTkFu_FSv32jHO99cManF0BT4PDhIPJEOw,191
42
+ pynnlf/scaffold/specs/tests_full.yaml,sha256=v7ONNWcIyDTeU6AV9i0UAip0KX-KgszpeRzVOjzUim0,391
43
+ pynnlf/tools/strip_notebook_artifacts.py,sha256=K93LP6haH5TP9v7kMacDmSdJmHH3IjITlocOx_Ucrnk,926
44
+ pynnlf-0.2.2.dist-info/METADATA,sha256=zHQGrmafzh2Ckqdu0P4ewuarJOumAKVGuE3Ia4DIl1E,8217
45
+ pynnlf-0.2.2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
46
+ pynnlf-0.2.2.dist-info/top_level.txt,sha256=3fCgUvjEPm_Pi4jIVZ4mCPCPhhqb2Si-ViVEcS-h70Q,7
47
+ pynnlf-0.2.2.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.10.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1 @@
1
+ pynnlf