fameio 3.2.0__tar.gz → 3.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fameio-3.2.0 → fameio-3.3.0}/CHANGELOG.md +9 -0
- {fameio-3.2.0 → fameio-3.3.0}/PKG-INFO +33 -23
- {fameio-3.2.0 → fameio-3.3.0}/README.md +32 -22
- {fameio-3.2.0 → fameio-3.3.0}/pyproject.toml +2 -1
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/cli/convert_results.py +4 -6
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/cli/make_config.py +3 -5
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/cli/options.py +6 -4
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/cli/parser.py +53 -29
- fameio-3.3.0/src/fameio/cli/reformat.py +58 -0
- fameio-3.3.0/src/fameio/input/__init__.py +19 -0
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/loader/__init__.py +4 -6
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/loader/controller.py +11 -16
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/loader/loader.py +11 -9
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/metadata.py +26 -29
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/resolver.py +4 -6
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/scenario/agent.py +18 -16
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/scenario/attribute.py +85 -31
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/scenario/contract.py +23 -28
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/scenario/exception.py +3 -6
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/scenario/fameiofactory.py +7 -12
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/scenario/generalproperties.py +7 -8
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/scenario/scenario.py +14 -18
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/scenario/stringset.py +5 -6
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/schema/agenttype.py +8 -10
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/schema/attribute.py +30 -36
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/schema/java_packages.py +6 -7
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/schema/schema.py +9 -11
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/validator.py +178 -41
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/writer.py +20 -29
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/logs.py +28 -19
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/output/agent_type.py +14 -16
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/output/conversion.py +9 -12
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/output/csv_writer.py +33 -23
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/output/data_transformer.py +11 -11
- fameio-3.3.0/src/fameio/output/execution_dao.py +170 -0
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/output/input_dao.py +16 -19
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/output/output_dao.py +7 -7
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/output/reader.py +8 -10
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/output/yaml_writer.py +2 -3
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/scripts/__init__.py +15 -4
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/scripts/convert_results.py +18 -17
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/scripts/exception.py +1 -1
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/scripts/make_config.py +3 -4
- fameio-3.3.0/src/fameio/scripts/reformat.py +71 -0
- fameio-3.3.0/src/fameio/scripts/reformat.py.license +3 -0
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/series.py +78 -47
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/time.py +15 -18
- fameio-3.3.0/src/fameio/tools.py +68 -0
- fameio-3.2.0/src/fameio/input/__init__.py +0 -19
- fameio-3.2.0/src/fameio/tools.py +0 -30
- {fameio-3.2.0 → fameio-3.3.0}/LICENSE.txt +0 -0
- {fameio-3.2.0 → fameio-3.3.0}/LICENSES/Apache-2.0.txt +0 -0
- {fameio-3.2.0 → fameio-3.3.0}/LICENSES/CC-BY-4.0.txt +0 -0
- {fameio-3.2.0 → fameio-3.3.0}/LICENSES/CC0-1.0.txt +0 -0
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/__init__.py +0 -0
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/cli/__init__.py +0 -0
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/scenario/__init__.py +0 -0
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/input/schema/__init__.py +0 -0
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/output/__init__.py +0 -0
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/scripts/__init__.py.license +0 -0
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/scripts/convert_results.py.license +0 -0
- {fameio-3.2.0 → fameio-3.3.0}/src/fameio/scripts/make_config.py.license +0 -0
@@ -2,6 +2,15 @@
|
|
2
2
|
|
3
3
|
SPDX-License-Identifier: CC0-1.0 -->
|
4
4
|
|
5
|
+
## [3.3.0](https://gitlab.com/fame-framework/fame-io/-/tags/v3.3.0) - 2025-5-09
|
6
|
+
### Changed
|
7
|
+
- Expose static methods to read, convert, and write time series #245 (@dlr-cjs)
|
8
|
+
- Improve docstrings of SchemaValidator !219 (@dlr-cjs)
|
9
|
+
|
10
|
+
### Added
|
11
|
+
- Add command-line script to reformat time series #246 (@dlr-cjs)
|
12
|
+
- Read execution metadata from protobuf file #193 (@dlr-cjs)
|
13
|
+
|
5
14
|
## [3.2.0](https://gitlab.com/fame-framework/fame-io/-/tags/v3.2.0) - 2025-04-22
|
6
15
|
### Changed
|
7
16
|
- Suppress detailed Exception traceback in console #239 (@dlr_fn, @dlr-cjs)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: fameio
|
3
|
-
Version: 3.
|
3
|
+
Version: 3.3.0
|
4
4
|
Summary: Tools for input preparation and output digestion of FAME models
|
5
5
|
License: Apache-2.0
|
6
6
|
Keywords: FAME,fameio,agent-based modelling,energy systems
|
@@ -47,8 +47,7 @@ SPDX-License-Identifier: Apache-2.0 -->
|
|
47
47
|
*Tools for input preparation and output digestion of FAME models*
|
48
48
|
|
49
49
|
FAME-Io compiles input for FAME models in protobuf format and extracts model outputs to human-readable files.
|
50
|
-
Please visit the [FAME-Wiki](https://gitlab.com/fame-framework/wiki/-/wikis/home) to get an explanation of FAME and its
|
51
|
-
components.
|
50
|
+
Please visit the [FAME-Wiki](https://gitlab.com/fame-framework/wiki/-/wikis/home) to get an explanation of FAME and its components.
|
52
51
|
|
53
52
|
# Installation
|
54
53
|
|
@@ -56,30 +55,26 @@ We recommend installing `fameio` using PyPI:
|
|
56
55
|
|
57
56
|
pip install fameio
|
58
57
|
|
59
|
-
You may also use `pipx`. For detailed information please refer to the
|
60
|
-
official `pipx` [documentation](https://github.com/pypa/pipx).
|
58
|
+
You may also use `pipx`. For detailed information please refer to the official `pipx` [documentation](https://github.com/pypa/pipx).
|
61
59
|
|
62
60
|
pipx install fameio
|
63
61
|
|
64
|
-
`fameio` is currently developed and tested for Python 3.
|
62
|
+
`fameio` is currently developed and tested for Python 3.9 or higher.
|
65
63
|
See the `pyproject.toml` for a complete listing of dependencies.
|
66
64
|
|
67
65
|
# Usage
|
68
66
|
|
69
|
-
FAME-Io currently offers two main scripts `makeFameRunConfig` and `convertFameResults
|
70
|
-
|
67
|
+
FAME-Io currently offers two main scripts `makeFameRunConfig` and `convertFameResults`, plus a helper script `reformatTimeSeries`
|
68
|
+
All are automatically installed with the package.
|
71
69
|
The first one creates a protobuf file for FAME applications using YAML definition files and CSV files.
|
72
|
-
The
|
70
|
+
The second one reads output files from FAME applications in protobuf format and converts them to CSV files.
|
71
|
+
The third script reformats time series CSV files to FAME format.
|
73
72
|
|
74
|
-
You may use the [example data](https://gitlab.com/dlr-ve/esy/amiris/examples) provided for
|
75
|
-
the [AMIRIS](https://gitlab.com/dlr-ve/esy/amiris/amiris) model which can be used to simulate electricity markets
|
76
|
-
in [Germany](https://gitlab.com/dlr-ve/esy/amiris/examples/-/tree/main/Germany2019), [Austria](https://gitlab.com/dlr-ve/esy/amiris/examples/-/tree/main/Austria2019),
|
77
|
-
and a simple [proof-of-concept model](https://gitlab.com/dlr-ve/esy/amiris/examples/-/tree/main/Simple).
|
73
|
+
You may use the [example data](https://gitlab.com/dlr-ve/esy/amiris/examples) provided for the [AMIRIS](https://gitlab.com/dlr-ve/esy/amiris/amiris) model which can be used to simulate electricity markets in [Germany](https://gitlab.com/dlr-ve/esy/amiris/examples/-/tree/main/Germany2019), [Austria](https://gitlab.com/dlr-ve/esy/amiris/examples/-/tree/main/Austria2019), and a simple [proof-of-concept model](https://gitlab.com/dlr-ve/esy/amiris/examples/-/tree/main/Simple).
|
78
74
|
|
79
75
|
## Make a FAME run configuration
|
80
76
|
|
81
|
-
Digests configuration files in YAML format, combines them with CSV data files and creates a single input file for FAME
|
82
|
-
applications in protobuf format.
|
77
|
+
Digests configuration files in YAML format, combines them with CSV data files and creates a single input file for FAME applications in protobuf format.
|
83
78
|
Call structure:
|
84
79
|
|
85
80
|
makeFameRunConfig -f <path/to/scenario.yaml>
|
@@ -644,10 +639,8 @@ TIME_SERIES inputs are not directly fed into the Scenario YAML file.
|
|
644
639
|
Instead, TIME_SERIES reference a CSV file that can be stored some place else.
|
645
640
|
These CSV files follow a specific structure:
|
646
641
|
|
647
|
-
* They should contain exactly two columns - any other columns are ignored.
|
648
|
-
|
649
|
-
* The first column must be a time stamp in form `YYYY-MM-DD_hh:mm:ss` or
|
650
|
-
a [FAME-Timestamp](https://gitlab.com/fame-framework/wiki/-/wikis/architecture/decisions/TimeStamp) integer value.
|
642
|
+
* They should contain exactly two columns - any other columns are ignored. A warning is raised if more than two non-empty columns are detected.
|
643
|
+
* The first column must be a time stamp in form `YYYY-MM-DD_hh:mm:ss` or a [FAME-Timestamp](https://gitlab.com/fame-framework/wiki/-/wikis/architecture/decisions/TimeStamp) integer value.
|
651
644
|
* The second column must be a numerical value (either integer or floating-point)
|
652
645
|
* The separator of the two columns is a semicolon
|
653
646
|
* The data must **not** have headers, except for comments marked with `#`
|
@@ -663,11 +656,10 @@ Exemplary content of a valid CSV file:
|
|
663
656
|
2016-01-01_00:00:00;42 # optional comment on this particular data point
|
664
657
|
2017-01-01_00:00:00;0.1
|
665
658
|
|
666
|
-
Please refer also to the detailed article about `TimeStamps` in
|
667
|
-
the
|
668
|
-
For large CSV files (with more than 20,000 rows) we recommend using the integer representation of FAME-Timestamps in the
|
669
|
-
first column (instead of text representation) to improve conversion speed.
|
659
|
+
Please refer also to the detailed article about `TimeStamps` in the [FAME-Wiki](https://gitlab.com/fame-framework/wiki/-/wikis/TimeStamp).
|
660
|
+
For large CSV files (with more than 20,000 rows) we recommend using the integer representation of FAME-Timestamps in the first column (instead of text representation) to improve conversion speed.
|
670
661
|
A warning will be raised for very large files (exceeding 50,000 rows) that require time stamp conversion.
|
662
|
+
Use `reformatTimeSeries` to convert one or multiple timeseries CSV files into FAME format to improve conversion speed and avoid this warning.
|
671
663
|
|
672
664
|
### Split and join multiple YAML files
|
673
665
|
|
@@ -936,6 +928,24 @@ run_config = handle_args(my_arg_string, my_defaults)
|
|
936
928
|
convert_results(run_config)
|
937
929
|
```
|
938
930
|
|
931
|
+
## Reformat time series
|
932
|
+
|
933
|
+
Takes CSV time series files and reformats them into FAME time format.
|
934
|
+
This improves speed of run configuration creation but also reduces readability of the CSV files' content.
|
935
|
+
Thus, we recommend to apply this reformatting only for CSV time series files with more than 20,000 lines.
|
936
|
+
|
937
|
+
Call structure:
|
938
|
+
|
939
|
+
reformatTimeSeries -fp <file_or_file_pattern*.csv>
|
940
|
+
|
941
|
+
You may also specify any of the following arguments:
|
942
|
+
|
943
|
+
| Command | Action |
|
944
|
+
|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
945
|
+
| `-l` or `--log` <option> | Sets the logging level. Default is `WARNING`. Options are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`. |
|
946
|
+
| `-lf` or `--logfile` <file> | Sets the logging file. Default is `None`. If `None` is provided, all logs get only printed to the console. |
|
947
|
+
| `--replace` or `--no-replace` | If `--replace` is specified, existing csv files are replaced with the new content. Otherwise and by default, new files are created extending the original file name by "_reformatted". |
|
948
|
+
|
939
949
|
## Cite FAME-Io
|
940
950
|
|
941
951
|
If you use FAME-Io for academic work, please cite as follows.
|
@@ -15,8 +15,7 @@ SPDX-License-Identifier: Apache-2.0 -->
|
|
15
15
|
*Tools for input preparation and output digestion of FAME models*
|
16
16
|
|
17
17
|
FAME-Io compiles input for FAME models in protobuf format and extracts model outputs to human-readable files.
|
18
|
-
Please visit the [FAME-Wiki](https://gitlab.com/fame-framework/wiki/-/wikis/home) to get an explanation of FAME and its
|
19
|
-
components.
|
18
|
+
Please visit the [FAME-Wiki](https://gitlab.com/fame-framework/wiki/-/wikis/home) to get an explanation of FAME and its components.
|
20
19
|
|
21
20
|
# Installation
|
22
21
|
|
@@ -24,30 +23,26 @@ We recommend installing `fameio` using PyPI:
|
|
24
23
|
|
25
24
|
pip install fameio
|
26
25
|
|
27
|
-
You may also use `pipx`. For detailed information please refer to the
|
28
|
-
official `pipx` [documentation](https://github.com/pypa/pipx).
|
26
|
+
You may also use `pipx`. For detailed information please refer to the official `pipx` [documentation](https://github.com/pypa/pipx).
|
29
27
|
|
30
28
|
pipx install fameio
|
31
29
|
|
32
|
-
`fameio` is currently developed and tested for Python 3.
|
30
|
+
`fameio` is currently developed and tested for Python 3.9 or higher.
|
33
31
|
See the `pyproject.toml` for a complete listing of dependencies.
|
34
32
|
|
35
33
|
# Usage
|
36
34
|
|
37
|
-
FAME-Io currently offers two main scripts `makeFameRunConfig` and `convertFameResults
|
38
|
-
|
35
|
+
FAME-Io currently offers two main scripts `makeFameRunConfig` and `convertFameResults`, plus a helper script `reformatTimeSeries`
|
36
|
+
All are automatically installed with the package.
|
39
37
|
The first one creates a protobuf file for FAME applications using YAML definition files and CSV files.
|
40
|
-
The
|
38
|
+
The second one reads output files from FAME applications in protobuf format and converts them to CSV files.
|
39
|
+
The third script reformats time series CSV files to FAME format.
|
41
40
|
|
42
|
-
You may use the [example data](https://gitlab.com/dlr-ve/esy/amiris/examples) provided for
|
43
|
-
the [AMIRIS](https://gitlab.com/dlr-ve/esy/amiris/amiris) model which can be used to simulate electricity markets
|
44
|
-
in [Germany](https://gitlab.com/dlr-ve/esy/amiris/examples/-/tree/main/Germany2019), [Austria](https://gitlab.com/dlr-ve/esy/amiris/examples/-/tree/main/Austria2019),
|
45
|
-
and a simple [proof-of-concept model](https://gitlab.com/dlr-ve/esy/amiris/examples/-/tree/main/Simple).
|
41
|
+
You may use the [example data](https://gitlab.com/dlr-ve/esy/amiris/examples) provided for the [AMIRIS](https://gitlab.com/dlr-ve/esy/amiris/amiris) model which can be used to simulate electricity markets in [Germany](https://gitlab.com/dlr-ve/esy/amiris/examples/-/tree/main/Germany2019), [Austria](https://gitlab.com/dlr-ve/esy/amiris/examples/-/tree/main/Austria2019), and a simple [proof-of-concept model](https://gitlab.com/dlr-ve/esy/amiris/examples/-/tree/main/Simple).
|
46
42
|
|
47
43
|
## Make a FAME run configuration
|
48
44
|
|
49
|
-
Digests configuration files in YAML format, combines them with CSV data files and creates a single input file for FAME
|
50
|
-
applications in protobuf format.
|
45
|
+
Digests configuration files in YAML format, combines them with CSV data files and creates a single input file for FAME applications in protobuf format.
|
51
46
|
Call structure:
|
52
47
|
|
53
48
|
makeFameRunConfig -f <path/to/scenario.yaml>
|
@@ -612,10 +607,8 @@ TIME_SERIES inputs are not directly fed into the Scenario YAML file.
|
|
612
607
|
Instead, TIME_SERIES reference a CSV file that can be stored some place else.
|
613
608
|
These CSV files follow a specific structure:
|
614
609
|
|
615
|
-
* They should contain exactly two columns - any other columns are ignored.
|
616
|
-
|
617
|
-
* The first column must be a time stamp in form `YYYY-MM-DD_hh:mm:ss` or
|
618
|
-
a [FAME-Timestamp](https://gitlab.com/fame-framework/wiki/-/wikis/architecture/decisions/TimeStamp) integer value.
|
610
|
+
* They should contain exactly two columns - any other columns are ignored. A warning is raised if more than two non-empty columns are detected.
|
611
|
+
* The first column must be a time stamp in form `YYYY-MM-DD_hh:mm:ss` or a [FAME-Timestamp](https://gitlab.com/fame-framework/wiki/-/wikis/architecture/decisions/TimeStamp) integer value.
|
619
612
|
* The second column must be a numerical value (either integer or floating-point)
|
620
613
|
* The separator of the two columns is a semicolon
|
621
614
|
* The data must **not** have headers, except for comments marked with `#`
|
@@ -631,11 +624,10 @@ Exemplary content of a valid CSV file:
|
|
631
624
|
2016-01-01_00:00:00;42 # optional comment on this particular data point
|
632
625
|
2017-01-01_00:00:00;0.1
|
633
626
|
|
634
|
-
Please refer also to the detailed article about `TimeStamps` in
|
635
|
-
the
|
636
|
-
For large CSV files (with more than 20,000 rows) we recommend using the integer representation of FAME-Timestamps in the
|
637
|
-
first column (instead of text representation) to improve conversion speed.
|
627
|
+
Please refer also to the detailed article about `TimeStamps` in the [FAME-Wiki](https://gitlab.com/fame-framework/wiki/-/wikis/TimeStamp).
|
628
|
+
For large CSV files (with more than 20,000 rows) we recommend using the integer representation of FAME-Timestamps in the first column (instead of text representation) to improve conversion speed.
|
638
629
|
A warning will be raised for very large files (exceeding 50,000 rows) that require time stamp conversion.
|
630
|
+
Use `reformatTimeSeries` to convert one or multiple timeseries CSV files into FAME format to improve conversion speed and avoid this warning.
|
639
631
|
|
640
632
|
### Split and join multiple YAML files
|
641
633
|
|
@@ -904,6 +896,24 @@ run_config = handle_args(my_arg_string, my_defaults)
|
|
904
896
|
convert_results(run_config)
|
905
897
|
```
|
906
898
|
|
899
|
+
## Reformat time series
|
900
|
+
|
901
|
+
Takes CSV time series files and reformats them into FAME time format.
|
902
|
+
This improves speed of run configuration creation but also reduces readability of the CSV files' content.
|
903
|
+
Thus, we recommend to apply this reformatting only for CSV time series files with more than 20,000 lines.
|
904
|
+
|
905
|
+
Call structure:
|
906
|
+
|
907
|
+
reformatTimeSeries -fp <file_or_file_pattern*.csv>
|
908
|
+
|
909
|
+
You may also specify any of the following arguments:
|
910
|
+
|
911
|
+
| Command | Action |
|
912
|
+
|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
913
|
+
| `-l` or `--log` <option> | Sets the logging level. Default is `WARNING`. Options are `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`. |
|
914
|
+
| `-lf` or `--logfile` <file> | Sets the logging file. Default is `None`. If `None` is provided, all logs get only printed to the console. |
|
915
|
+
| `--replace` or `--no-replace` | If `--replace` is specified, existing csv files are replaced with the new content. Otherwise and by default, new files are created extending the original file name by "_reformatted". |
|
916
|
+
|
907
917
|
## Cite FAME-Io
|
908
918
|
|
909
919
|
If you use FAME-Io for academic work, please cite as follows.
|
@@ -7,7 +7,7 @@ build-backend = "poetry.core.masonry.api"
|
|
7
7
|
|
8
8
|
[project]
|
9
9
|
name = "fameio"
|
10
|
-
version = "3.
|
10
|
+
version = "3.3.0"
|
11
11
|
description = "Tools for input preparation and output digestion of FAME models"
|
12
12
|
license = "Apache-2.0"
|
13
13
|
readme = "README.md"
|
@@ -39,6 +39,7 @@ repository = "https://gitlab.com/fame-framework/fame-io"
|
|
39
39
|
[project.scripts]
|
40
40
|
makeFameRunConfig = "fameio.scripts:makeFameRunConfig"
|
41
41
|
convertFameResults = "fameio.scripts:convertFameResults"
|
42
|
+
reformatTimeSeries = "fameio.scripts:reformatTimeSeries"
|
42
43
|
|
43
44
|
[tool.poetry]
|
44
45
|
classifiers = [
|
@@ -26,8 +26,8 @@ CLI_DEFAULTS = {
|
|
26
26
|
Options.FILE: None,
|
27
27
|
Options.LOG_LEVEL: "WARN",
|
28
28
|
Options.LOG_FILE: None,
|
29
|
-
Options.AGENT_LIST: None,
|
30
29
|
Options.OUTPUT: None,
|
30
|
+
Options.AGENT_LIST: None,
|
31
31
|
Options.SINGLE_AGENT_EXPORT: False,
|
32
32
|
Options.MEMORY_SAVING: False,
|
33
33
|
Options.RESOLVE_COMPLEX_FIELD: ResolveOptions.SPLIT,
|
@@ -41,8 +41,7 @@ _OUTFILE_PATH_HELP = "Provide path to folder to store output .csv files"
|
|
41
41
|
|
42
42
|
|
43
43
|
def handle_args(args: list[str], defaults: dict[Options, Any] | None = None) -> dict[Options, Any]:
|
44
|
-
"""
|
45
|
-
Handles command line arguments and returns `run_config` for convert_results script
|
44
|
+
"""Handles command line arguments and returns `run_config` for convert_results script.
|
46
45
|
|
47
46
|
Args:
|
48
47
|
args: list of (command line) arguments, e.g., ['-f', 'my_file']; arg values take precedence over defaults
|
@@ -57,8 +56,7 @@ def handle_args(args: list[str], defaults: dict[Options, Any] | None = None) ->
|
|
57
56
|
|
58
57
|
|
59
58
|
def _prepare_parser(defaults: dict[Options, Any] | None) -> argparse.ArgumentParser:
|
60
|
-
"""
|
61
|
-
Creates a parser with given defaults to handle `make_config` configuration arguments
|
59
|
+
"""Creates a parser with given defaults to handle `make_config` configuration arguments.
|
62
60
|
|
63
61
|
Returns:
|
64
62
|
new parser using given defaults for its arguments; if a default is not specified, hard-coded defaults are used
|
@@ -82,5 +80,5 @@ def _prepare_parser(defaults: dict[Options, Any] | None) -> argparse.ArgumentPar
|
|
82
80
|
|
83
81
|
|
84
82
|
def _get_default(defaults: dict, option: Options) -> Any:
|
85
|
-
"""Returns default for given `option` or its cli default"""
|
83
|
+
"""Returns default for given `option` or its cli default."""
|
86
84
|
return defaults.get(option, CLI_DEFAULTS[option])
|
@@ -34,8 +34,7 @@ _ENCODING_HELP = (
|
|
34
34
|
|
35
35
|
|
36
36
|
def handle_args(args: list[str], defaults: dict[Options, Any] | None = None) -> dict[Options, Any]:
|
37
|
-
"""
|
38
|
-
Converts given `arguments` and returns a configuration for the make_config script
|
37
|
+
"""Converts given `args` and returns a configuration for the make_config script.
|
39
38
|
|
40
39
|
Args:
|
41
40
|
args: list of (command line) arguments, e.g., ['-f', 'my_file']; arg values take precedence over defaults
|
@@ -50,8 +49,7 @@ def handle_args(args: list[str], defaults: dict[Options, Any] | None = None) ->
|
|
50
49
|
|
51
50
|
|
52
51
|
def _prepare_parser(defaults: dict[Options, Any] | None) -> argparse.ArgumentParser:
|
53
|
-
"""
|
54
|
-
Creates a parser with given defaults to handle `make_config` configuration arguments
|
52
|
+
"""Creates a parser with given defaults to handle `make_config` configuration arguments.
|
55
53
|
|
56
54
|
Returns:
|
57
55
|
new parser using given defaults for its arguments; if a default is not specified, hard-coded defaults are used
|
@@ -67,5 +65,5 @@ def _prepare_parser(defaults: dict[Options, Any] | None) -> argparse.ArgumentPar
|
|
67
65
|
|
68
66
|
|
69
67
|
def _get_default(defaults: dict, option: Options) -> Any:
|
70
|
-
"""Returns default for given `option` or, if missing, its cli default"""
|
68
|
+
"""Returns default for given `option` or, if missing, its cli default."""
|
71
69
|
return defaults.get(option, CLI_DEFAULTS[option])
|
@@ -6,7 +6,7 @@ from enum import Enum, auto
|
|
6
6
|
|
7
7
|
|
8
8
|
class ParsableEnum(Enum):
|
9
|
-
"""Extend this to create an enum that can be parsed with argparse"""
|
9
|
+
"""Extend this to create an enum that can be parsed with argparse."""
|
10
10
|
|
11
11
|
@classmethod
|
12
12
|
def instantiate(cls, name: str) -> Enum:
|
@@ -20,7 +20,7 @@ class ParsableEnum(Enum):
|
|
20
20
|
|
21
21
|
|
22
22
|
class Options(Enum):
|
23
|
-
"""Specifies command line configuration options"""
|
23
|
+
"""Specifies command line configuration options."""
|
24
24
|
|
25
25
|
FILE = auto()
|
26
26
|
LOG_LEVEL = auto()
|
@@ -34,10 +34,12 @@ class Options(Enum):
|
|
34
34
|
TIME_MERGING = auto()
|
35
35
|
INPUT_RECOVERY = auto()
|
36
36
|
INPUT_ENCODING = auto()
|
37
|
+
FILE_PATTERN = auto()
|
38
|
+
REPLACE = auto()
|
37
39
|
|
38
40
|
|
39
41
|
class TimeOptions(ParsableEnum, Enum):
|
40
|
-
"""Specifies options for conversion of time in output"""
|
42
|
+
"""Specifies options for conversion of time in output."""
|
41
43
|
|
42
44
|
INT = auto()
|
43
45
|
UTC = auto()
|
@@ -45,7 +47,7 @@ class TimeOptions(ParsableEnum, Enum):
|
|
45
47
|
|
46
48
|
|
47
49
|
class ResolveOptions(ParsableEnum, Enum):
|
48
|
-
"""Specifies options for resolving complex fields in output files"""
|
50
|
+
"""Specifies options for resolving complex fields in output files."""
|
49
51
|
|
50
52
|
IGNORE = auto()
|
51
53
|
SPLIT = auto()
|
@@ -26,13 +26,15 @@ _OPTION_ARGUMENT_NAME: dict[str, Options] = {
|
|
26
26
|
"input_recovery": Options.INPUT_RECOVERY,
|
27
27
|
"complex_column": Options.RESOLVE_COMPLEX_FIELD,
|
28
28
|
"merge_times": Options.TIME_MERGING,
|
29
|
+
"file_pattern": Options.FILE_PATTERN,
|
30
|
+
"replace": Options.REPLACE,
|
29
31
|
}
|
30
32
|
|
31
33
|
|
32
34
|
def add_file_argument(parser: ArgumentParser, default: Path | None, help_text: str) -> None:
|
33
|
-
"""
|
34
|
-
|
35
|
-
If a default is not specified, the argument is required (optional otherwise)
|
35
|
+
"""Adds 'file' argument to the provided `parser` with the provided `help_text`.
|
36
|
+
|
37
|
+
If a default is not specified, the argument is required (optional otherwise).
|
36
38
|
|
37
39
|
Args:
|
38
40
|
parser: to add the argument to
|
@@ -46,24 +48,24 @@ def add_file_argument(parser: ArgumentParser, default: Path | None, help_text: s
|
|
46
48
|
|
47
49
|
|
48
50
|
def add_select_agents_argument(parser: ArgumentParser, default_value: list[str] | None) -> None:
|
49
|
-
"""Adds optional repeatable string argument 'agent' to given `parser
|
51
|
+
"""Adds optional repeatable string argument 'agent' to given `parser`."""
|
50
52
|
help_text = f"Provide list of agents to extract (default={default_value})"
|
51
53
|
parser.add_argument("-a", "--agents", nargs="*", type=str, default=default_value, help=help_text)
|
52
54
|
|
53
55
|
|
54
56
|
def add_logfile_argument(parser: ArgumentParser, default_value: Path | None) -> None:
|
55
|
-
"""Adds optional argument 'logfile' to given `parser
|
57
|
+
"""Adds optional argument 'logfile' to given `parser`."""
|
56
58
|
help_text = f"provide logging file (default={default_value})"
|
57
59
|
parser.add_argument("-lf", "--logfile", type=Path, default=default_value, help=help_text)
|
58
60
|
|
59
61
|
|
60
62
|
def add_output_argument(parser: ArgumentParser, default_value, help_text: str) -> None:
|
61
|
-
"""Adds optional argument 'output' to given `parser` using the given `help_text` and `default_value
|
63
|
+
"""Adds optional argument 'output' to given `parser` using the given `help_text` and `default_value`."""
|
62
64
|
parser.add_argument("-o", "--output", type=Path, default=default_value, help=help_text)
|
63
65
|
|
64
66
|
|
65
67
|
def add_log_level_argument(parser: ArgumentParser, default_value: str) -> None:
|
66
|
-
"""Adds optional argument 'log' to given `parser
|
68
|
+
"""Adds optional argument 'log' to given `parser`."""
|
67
69
|
help_text = f"choose logging level (default={default_value})"
|
68
70
|
# noinspection PyTypeChecker
|
69
71
|
parser.add_argument(
|
@@ -82,7 +84,7 @@ def add_encoding_argument(parser: ArgumentParser, default_value: str | None, hel
|
|
82
84
|
|
83
85
|
|
84
86
|
def add_single_export_argument(parser: ArgumentParser, default_value: bool) -> None:
|
85
|
-
"""Adds optional repeatable string argument 'agent' to given `parser
|
87
|
+
"""Adds optional repeatable string argument 'agent' to given `parser`."""
|
86
88
|
help_text = f"Enable export of single agents (default={default_value})"
|
87
89
|
parser.add_argument(
|
88
90
|
"-se",
|
@@ -94,7 +96,7 @@ def add_single_export_argument(parser: ArgumentParser, default_value: bool) -> N
|
|
94
96
|
|
95
97
|
|
96
98
|
def add_memory_saving_argument(parser: ArgumentParser, default_value: bool) -> None:
|
97
|
-
"""Adds optional bool argument to given `parser` to enable memory saving mode"""
|
99
|
+
"""Adds optional bool argument to given `parser` to enable memory saving mode."""
|
98
100
|
help_text = f"Reduces memory usage profile at the cost of runtime (default={default_value})"
|
99
101
|
parser.add_argument(
|
100
102
|
"-m",
|
@@ -106,7 +108,7 @@ def add_memory_saving_argument(parser: ArgumentParser, default_value: bool) -> N
|
|
106
108
|
|
107
109
|
|
108
110
|
def add_resolve_complex_argument(parser: ArgumentParser, default_value: ResolveOptions | str):
|
109
|
-
"""Instructs given `parser` how to deal with complex field outputs"""
|
111
|
+
"""Instructs given `parser` how to deal with complex field outputs."""
|
110
112
|
default_value = default_value if isinstance(default_value, ResolveOptions) else ResolveOptions[default_value]
|
111
113
|
help_text = f"How to deal with complex index columns? (default={default_value.name})"
|
112
114
|
parser.add_argument(
|
@@ -120,7 +122,7 @@ def add_resolve_complex_argument(parser: ArgumentParser, default_value: ResolveO
|
|
120
122
|
|
121
123
|
|
122
124
|
def add_time_argument(parser: ArgumentParser, default_value: TimeOptions | str) -> None:
|
123
|
-
"""Adds optional argument to given `parser` to define conversion of TimeSteps"""
|
125
|
+
"""Adds optional argument to given `parser` to define conversion of TimeSteps."""
|
124
126
|
default_value = default_value if isinstance(default_value, TimeOptions) else TimeOptions[default_value]
|
125
127
|
help_text = f"Apply conversion of time steps to given format (default={default_value.name})"
|
126
128
|
parser.add_argument(
|
@@ -134,7 +136,7 @@ def add_time_argument(parser: ArgumentParser, default_value: TimeOptions | str)
|
|
134
136
|
|
135
137
|
|
136
138
|
def add_merge_time_argument(parser: ArgumentParser, defaults: list[int] | None = None) -> None:
|
137
|
-
"""Adds optional three-fold argument for merging of TimeSteps to given `parser
|
139
|
+
"""Adds optional three-fold argument for merging of TimeSteps to given `parser`."""
|
138
140
|
if defaults is None:
|
139
141
|
defaults = []
|
140
142
|
if (
|
@@ -152,21 +154,43 @@ def add_merge_time_argument(parser: ArgumentParser, defaults: list[int] | None =
|
|
152
154
|
|
153
155
|
|
154
156
|
def add_inputs_recovery_argument(parser: ArgumentParser, default_value: bool) -> None:
|
155
|
-
"""Adds optional bool argument to given `parser` to recover inputs"""
|
156
|
-
|
157
|
-
|
158
|
-
help_text = f"If --(no-)input-recovery is specified, (no) inputs will be recovered (default={default_str})"
|
159
|
-
parser.add_argument(
|
160
|
-
f"--{arg_name}",
|
161
|
-
action=BooleanOptionalAction,
|
162
|
-
default=default_value,
|
163
|
-
help=help_text,
|
164
|
-
)
|
157
|
+
"""Adds optional bool argument to given `parser` to recover inputs."""
|
158
|
+
description = "(no) inputs will be recovered"
|
159
|
+
_add_optional_boolean_argument(parser, default_value, "input-recovery", description)
|
165
160
|
|
166
161
|
|
167
|
-
def
|
162
|
+
def _add_optional_boolean_argument(parser: ArgumentParser, default: bool, arg_name: str, description: str) -> None:
|
163
|
+
"""Adds optional boolean argument to parser.
|
164
|
+
|
165
|
+
Argument named `arg_name` is added to given `parser` overwriting the provided default.
|
166
|
+
Help from argument `description` is added as help text.
|
167
|
+
|
168
|
+
Args:
|
169
|
+
parser: to add the argument to
|
170
|
+
default: of the argument
|
171
|
+
arg_name: long name of the argument, no short name allowed; will be prepended with 'no-' for negation
|
172
|
+
description: to create the help text from: "If --(no-)<arg_name> is specified, <description> (default=X)'
|
168
173
|
"""
|
169
|
-
|
174
|
+
default_str = "--" + ("no-" if not default else "") + arg_name
|
175
|
+
help_text = f"If --(no-){arg_name} is specified, {description} (default={default_str})"
|
176
|
+
parser.add_argument(f"--{arg_name}", action=BooleanOptionalAction, default=default, help=help_text)
|
177
|
+
|
178
|
+
|
179
|
+
def add_file_pattern_argument(parser: ArgumentParser, default_value: str | None) -> None:
|
180
|
+
"""Adds argument to given `parser` to specify a file pattern; if no default provided, the argument is mandatory."""
|
181
|
+
help_text = f"Path to csv file(s) that are to be converted (default='{default_value}')"
|
182
|
+
required = not bool(default_value)
|
183
|
+
parser.add_argument("--file-pattern", "-fp", required=required, type=str, default=default_value, help=help_text)
|
184
|
+
|
185
|
+
|
186
|
+
def add_replace_argument(parser: ArgumentParser, default_value: bool) -> None:
|
187
|
+
"""Adds optional bool argument to given `parser` to replace converted files."""
|
188
|
+
description = "original files will (not) be replaced"
|
189
|
+
_add_optional_boolean_argument(parser, default_value, "replace", description)
|
190
|
+
|
191
|
+
|
192
|
+
def update_default_config(overrides: dict[Options, Any] | None, defaults: dict[Options, Any]) -> dict[Options, Any]:
|
193
|
+
"""Returns `defaults` with updated fields received from `overrides`.
|
170
194
|
|
171
195
|
Args:
|
172
196
|
overrides: updates to be applied to `defaults`
|
@@ -183,8 +207,7 @@ def update_default_config(overrides: dict[Options, Any] | None, defaults: dict[O
|
|
183
207
|
|
184
208
|
|
185
209
|
def map_namespace_to_options_dict(parsed: Namespace) -> dict[Options, Any]:
|
186
|
-
"""
|
187
|
-
Maps given parsing results to their corresponding configuration option
|
210
|
+
"""Maps given parsing results to their corresponding configuration option.
|
188
211
|
|
189
212
|
Args:
|
190
213
|
parsed: result of a parsing
|
@@ -196,9 +219,10 @@ def map_namespace_to_options_dict(parsed: Namespace) -> dict[Options, Any]:
|
|
196
219
|
|
197
220
|
|
198
221
|
def _map_namespace_to_options(parsed: Namespace, names_to_options: dict[str, Options]) -> dict[Options, Any]:
|
199
|
-
"""
|
200
|
-
|
201
|
-
|
222
|
+
"""Maps given parsing results to their corresponding configuration option.
|
223
|
+
|
224
|
+
Elements that cannot be mapped are ignored.
|
225
|
+
If a configuration option has inner elements, these will be also read and added as inner dictionary.
|
202
226
|
|
203
227
|
Args:
|
204
228
|
parsed: result of a parsing
|
@@ -0,0 +1,58 @@
|
|
1
|
+
# SPDX-FileCopyrightText: 2025 German Aerospace Center <fame@dlr.de>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
from __future__ import annotations
|
5
|
+
|
6
|
+
import argparse
|
7
|
+
from typing import Any
|
8
|
+
|
9
|
+
from fameio.cli.options import Options
|
10
|
+
from fameio.cli.parser import (
|
11
|
+
map_namespace_to_options_dict,
|
12
|
+
add_logfile_argument,
|
13
|
+
add_log_level_argument,
|
14
|
+
add_file_pattern_argument,
|
15
|
+
add_replace_argument,
|
16
|
+
)
|
17
|
+
|
18
|
+
CLI_DEFAULTS = {
|
19
|
+
Options.LOG_LEVEL: "WARN",
|
20
|
+
Options.LOG_FILE: None,
|
21
|
+
Options.FILE_PATTERN: None,
|
22
|
+
Options.REPLACE: False,
|
23
|
+
}
|
24
|
+
|
25
|
+
|
26
|
+
def handle_args(args: list[str], defaults: dict[Options, Any] | None = None) -> dict[Options, Any]:
|
27
|
+
"""Converts given `args` and returns a configuration for the transform script.
|
28
|
+
|
29
|
+
Args:
|
30
|
+
args: list of (command line) arguments, e.g., ['-fp', 'my_file']; arg values take precedence over defaults
|
31
|
+
defaults: optional default values used for unspecified parameters; missing defaults are replaced by CLI defaults
|
32
|
+
|
33
|
+
Returns:
|
34
|
+
final configuration compiled from (given) `defaults` and given `args`
|
35
|
+
"""
|
36
|
+
parser = _prepare_parser(defaults)
|
37
|
+
parsed = parser.parse_args(args)
|
38
|
+
return map_namespace_to_options_dict(parsed)
|
39
|
+
|
40
|
+
|
41
|
+
def _prepare_parser(defaults: dict[Options, Any] | None) -> argparse.ArgumentParser:
|
42
|
+
"""Creates a parser with given defaults to handle `reformat` configuration arguments.
|
43
|
+
|
44
|
+
Returns:
|
45
|
+
new parser using given defaults for its arguments; if a default is not specified, hard-coded defaults are used
|
46
|
+
"""
|
47
|
+
defaults = defaults if (defaults is not None) else {}
|
48
|
+
parser = argparse.ArgumentParser()
|
49
|
+
add_log_level_argument(parser, _get_default(defaults, Options.LOG_LEVEL))
|
50
|
+
add_logfile_argument(parser, _get_default(defaults, Options.LOG_FILE))
|
51
|
+
add_file_pattern_argument(parser, _get_default(defaults, Options.FILE_PATTERN))
|
52
|
+
add_replace_argument(parser, _get_default(defaults, Options.REPLACE))
|
53
|
+
return parser
|
54
|
+
|
55
|
+
|
56
|
+
def _get_default(defaults: dict, option: Options) -> Any:
|
57
|
+
"""Returns default for given `option` or its cli default."""
|
58
|
+
return defaults.get(option, CLI_DEFAULTS[option])
|
@@ -0,0 +1,19 @@
|
|
1
|
+
# SPDX-FileCopyrightText: 2024 German Aerospace Center <fame@dlr.de>
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
4
|
+
|
5
|
+
|
6
|
+
class InputError(Exception):
|
7
|
+
"""An error that occurred while preparing a fame input file."""
|
8
|
+
|
9
|
+
|
10
|
+
class SchemaError(InputError):
|
11
|
+
"""An error that occurred while parsing a Schema."""
|
12
|
+
|
13
|
+
|
14
|
+
class ScenarioError(InputError):
|
15
|
+
"""An error that occurred while parsing a Scenario."""
|
16
|
+
|
17
|
+
|
18
|
+
class YamlLoaderError(InputError):
|
19
|
+
"""An error that occurred while parsing a YAML file."""
|
@@ -32,8 +32,7 @@ FameYamlLoader.add_constructor(FameYamlLoader.INCLUDE_COMMAND, _include_callback
|
|
32
32
|
|
33
33
|
|
34
34
|
def load_yaml(yaml_file_path: Path, path_resolver: PathResolver = PathResolver(), encoding: str | None = None) -> dict:
|
35
|
-
"""
|
36
|
-
Loads the YAML file from given and returns its content as a dict
|
35
|
+
"""Loads the YAML file from given `yaml_file_path` and returns its content as a dict.
|
37
36
|
|
38
37
|
Args:
|
39
38
|
yaml_file_path: Path to the YAML file that is to be read
|
@@ -52,19 +51,18 @@ def load_yaml(yaml_file_path: Path, path_resolver: PathResolver = PathResolver()
|
|
52
51
|
|
53
52
|
|
54
53
|
def _update_current_controller(path_resolver: PathResolver, encoding: str | None) -> None:
|
55
|
-
"""Updates the current LoaderController to use the given `path_resolver` and `encoding
|
54
|
+
"""Updates the current LoaderController to use the given `path_resolver` and `encoding`."""
|
56
55
|
__CONTROLLERS[0] = LoaderController(path_resolver, encoding)
|
57
56
|
|
58
57
|
|
59
58
|
def validate_yaml_file_suffix(yaml_file: Path) -> None:
|
60
|
-
"""
|
61
|
-
Ensures that given file has a file suffix compatible with YAML
|
59
|
+
"""Ensures that given file has a file suffix compatible with YAML.
|
62
60
|
|
63
61
|
Args:
|
64
62
|
yaml_file: that is to be checked for suffix correctness
|
65
63
|
|
66
64
|
Raises:
|
67
|
-
YamlLoaderError: if given file has no YAML-associated file suffix
|
65
|
+
YamlLoaderError: if given file has no YAML-associated file suffix, logged with level "CRITICAL"
|
68
66
|
"""
|
69
67
|
if yaml_file.suffix.lower() not in ALLOWED_SUFFIXES:
|
70
68
|
raise log_critical(YamlLoaderError(_ERR_NO_YAML_SUFFIX.format(ALLOWED_SUFFIXES, yaml_file)))
|