fameio 3.2.0__py3-none-any.whl → 3.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. fameio/cli/convert_results.py +4 -6
  2. fameio/cli/make_config.py +3 -5
  3. fameio/cli/options.py +6 -4
  4. fameio/cli/parser.py +53 -29
  5. fameio/cli/reformat.py +58 -0
  6. fameio/input/__init__.py +4 -4
  7. fameio/input/loader/__init__.py +4 -6
  8. fameio/input/loader/controller.py +11 -16
  9. fameio/input/loader/loader.py +11 -9
  10. fameio/input/metadata.py +26 -29
  11. fameio/input/resolver.py +4 -6
  12. fameio/input/scenario/agent.py +18 -16
  13. fameio/input/scenario/attribute.py +85 -31
  14. fameio/input/scenario/contract.py +78 -38
  15. fameio/input/scenario/exception.py +3 -6
  16. fameio/input/scenario/fameiofactory.py +7 -12
  17. fameio/input/scenario/generalproperties.py +7 -8
  18. fameio/input/scenario/scenario.py +14 -18
  19. fameio/input/scenario/stringset.py +5 -6
  20. fameio/input/schema/agenttype.py +8 -10
  21. fameio/input/schema/attribute.py +30 -36
  22. fameio/input/schema/java_packages.py +6 -7
  23. fameio/input/schema/schema.py +9 -11
  24. fameio/input/validator.py +178 -41
  25. fameio/input/writer.py +20 -29
  26. fameio/logs.py +28 -19
  27. fameio/output/agent_type.py +14 -16
  28. fameio/output/conversion.py +9 -12
  29. fameio/output/csv_writer.py +33 -23
  30. fameio/output/data_transformer.py +11 -11
  31. fameio/output/execution_dao.py +170 -0
  32. fameio/output/input_dao.py +16 -19
  33. fameio/output/output_dao.py +7 -7
  34. fameio/output/reader.py +8 -10
  35. fameio/output/yaml_writer.py +2 -3
  36. fameio/scripts/__init__.py +15 -4
  37. fameio/scripts/convert_results.py +18 -17
  38. fameio/scripts/exception.py +1 -1
  39. fameio/scripts/make_config.py +3 -4
  40. fameio/scripts/reformat.py +71 -0
  41. fameio/scripts/reformat.py.license +3 -0
  42. fameio/series.py +78 -47
  43. fameio/time.py +56 -18
  44. fameio/tools.py +42 -4
  45. {fameio-3.2.0.dist-info → fameio-3.4.0.dist-info}/METADATA +64 -40
  46. fameio-3.4.0.dist-info/RECORD +60 -0
  47. {fameio-3.2.0.dist-info → fameio-3.4.0.dist-info}/entry_points.txt +1 -0
  48. fameio-3.2.0.dist-info/RECORD +0 -56
  49. {fameio-3.2.0.dist-info → fameio-3.4.0.dist-info}/LICENSE.txt +0 -0
  50. {fameio-3.2.0.dist-info → fameio-3.4.0.dist-info}/LICENSES/Apache-2.0.txt +0 -0
  51. {fameio-3.2.0.dist-info → fameio-3.4.0.dist-info}/LICENSES/CC-BY-4.0.txt +0 -0
  52. {fameio-3.2.0.dist-info → fameio-3.4.0.dist-info}/LICENSES/CC0-1.0.txt +0 -0
  53. {fameio-3.2.0.dist-info → fameio-3.4.0.dist-info}/WHEEL +0 -0
@@ -10,7 +10,7 @@ from fameio.output import OutputError
10
10
 
11
11
 
12
12
  class AgentType:
13
- """Provides information derived from an underlying protobuf AgentType"""
13
+ """Provides information derived from an underlying protobuf AgentType."""
14
14
 
15
15
  def __init__(self, agent_type: Output.AgentType) -> None:
16
16
  self._agent_type = agent_type
@@ -53,18 +53,17 @@ class AgentType:
53
53
 
54
54
 
55
55
  class AgentTypeError(OutputError):
56
- """Indicates an error with the agent types definitions"""
56
+ """Indicates an error with the agent types definitions."""
57
57
 
58
58
 
59
59
  class AgentTypeLog:
60
- """Stores data about collected agent types"""
60
+ """Stores data about collected agent types."""
61
61
 
62
62
  _ERR_AGENT_TYPE_MISSING = "Requested AgentType `{}` not found."
63
63
  _ERR_DOUBLE_DEFINITION = "Just one definition allowed per AgentType. Found multiple for {}. File might be corrupt."
64
64
 
65
65
  def __init__(self, _agent_name_filter_list: list[str]) -> None:
66
- """
67
- Initialises new AgentTypeLog
66
+ """Initialises new AgentTypeLog.
68
67
 
69
68
  Args:
70
69
  _agent_name_filter_list: list of agent type names that are requested for output data extraction
@@ -76,8 +75,10 @@ class AgentTypeLog:
76
75
  self._agents_with_output: list[str] = []
77
76
 
78
77
  def update_agents(self, new_types: dict[str, Output.AgentType]) -> None:
79
- """
80
- If any new `agent_types` are provided, checks if they are requested for extraction, and, if so, saves them
78
+ """Saves `new_types` if they are requested for extraction.
79
+
80
+ If any new agent types are provided, checks if they are requested for extraction, and, if so, saves them.
81
+ Agent types not requested for extraction are ignored.
81
82
 
82
83
  Args:
83
84
  new_types: to be saved (if requested for extraction)
@@ -94,8 +95,7 @@ class AgentTypeLog:
94
95
  self._requested_agent_types.update(filtered_types)
95
96
 
96
97
  def _filter_agents_by_name(self, new_types: dict[str, Output.AgentType]) -> dict[str, Output.AgentType]:
97
- """
98
- Removes and entries from `new_types` not on `agent_name_filter_list`
98
+ """Removes and entries from `new_types` not on `agent_name_filter_list`.
99
99
 
100
100
  Args:
101
101
  new_types: to be filtered
@@ -112,8 +112,7 @@ class AgentTypeLog:
112
112
  return new_types
113
113
 
114
114
  def _ensure_no_duplication(self, filtered_types: dict[str, Output.AgentType]) -> None:
115
- """
116
- Ensures no duplicate agent type definitions occur
115
+ """Ensures no duplicate agent type definitions occur.
117
116
 
118
117
  Args:
119
118
  filtered_types: to be checked for duplications with already registered types
@@ -126,12 +125,11 @@ class AgentTypeLog:
126
125
  raise log_error(AgentTypeError(self._ERR_DOUBLE_DEFINITION.format(agent_name)))
127
126
 
128
127
  def has_any_agent_type(self) -> bool:
129
- """Returns True if any agent type was registered so far present"""
128
+ """Returns True if any agent type was registered so far present."""
130
129
  return len(self._requested_agent_types) > 0
131
130
 
132
131
  def get_agent_type(self, agent_type_name: str) -> AgentType:
133
- """
134
- Returns the requested type of agent
132
+ """Returns the requested type of agent.
135
133
 
136
134
  Args:
137
135
  agent_type_name: requested name of agent type
@@ -147,9 +145,9 @@ class AgentTypeLog:
147
145
  return AgentType(self._requested_agent_types[agent_type_name])
148
146
 
149
147
  def is_requested(self, agent_name: str) -> bool:
150
- """Returns True if given agent_name is known and requested"""
148
+ """Returns True if given agent_name is known and requested."""
151
149
  return agent_name in self._requested_agent_types
152
150
 
153
151
  def get_agents_with_output(self) -> list[str]:
154
- """Returns all names of agents that had output"""
152
+ """Returns all names of agents that had output."""
155
153
  return self._agents_with_output
@@ -18,12 +18,11 @@ _ERR_NEGATIVE = "StepsBefore and StepsAfter must be Zero or positive integers"
18
18
 
19
19
 
20
20
  class ConversionError(OutputError):
21
- """An error that occurred during conversion of output data"""
21
+ """An error that occurred during conversion of output data."""
22
22
 
23
23
 
24
24
  def apply_time_merging(data: dict[str | None, pd.DataFrame], config: list[int] | None) -> None:
25
- """
26
- Applies merging of TimeSteps inplace for given `data`
25
+ """Applies merging of TimeSteps inplace for given `data`.
27
26
 
28
27
  Args:
29
28
  data: one or multiple DataFrames of time series; depending on the given config, contents might be modified
@@ -46,7 +45,7 @@ def apply_time_merging(data: dict[str | None, pd.DataFrame], config: list[int] |
46
45
  def _apply_time_merging(
47
46
  dataframes: dict[str | None, pd.DataFrame], offset: int, period: int, first_positive_focal_point: int
48
47
  ) -> None:
49
- """Applies time merging to `data` based on given `offset`, `period`, and `first_positive_focal_point`"""
48
+ """Applies time merging to `data` based on given `offset`, `period`, and `first_positive_focal_point`."""
50
49
  log().debug("Grouping TimeSteps...")
51
50
  for key in dataframes.keys():
52
51
  df = dataframes[key]
@@ -57,8 +56,7 @@ def _apply_time_merging(
57
56
 
58
57
 
59
58
  def _merge_time(time_step: int, focal_time: int, offset: int, period: int) -> int:
60
- """
61
- Returns `time_step` rounded to its corresponding focal point
59
+ """Returns `time_step` rounded to its corresponding focal point.
62
60
 
63
61
  Args:
64
62
  time_step: TimeStep to round
@@ -73,8 +71,7 @@ def _merge_time(time_step: int, focal_time: int, offset: int, period: int) -> in
73
71
 
74
72
 
75
73
  def apply_time_option(data: dict[str | None, pd.DataFrame], mode: TimeOptions) -> None:
76
- """
77
- Applies time option based on given `mode` inplace of given `data`
74
+ """Applies time option based on given `mode` inplace of given `data`.
78
75
 
79
76
  Args:
80
77
  data: one or multiple DataFrames of time series; column `TimeStep` might be modified (depending on mode)
@@ -97,13 +94,13 @@ def apply_time_option(data: dict[str | None, pd.DataFrame], mode: TimeOptions) -
97
94
 
98
95
 
99
96
  def _convert_time_index(data: dict[str | None, pd.DataFrame], datetime_format: str) -> None:
100
- """
101
- Inplace replacement of `TimeStep` column in MultiIndex of each item of `data` from FAME's time steps` to DateTime
102
- in given `date_format`
97
+ """Replaces (inplace) `TimeStep` column in MultiIndex of each item of `data` to DateTime.
98
+
99
+ Format of the resulting DateTime is determined by given `date_format`.
103
100
 
104
101
  Args:
105
102
  data: one or multiple DataFrames of time series; column `TimeStep` will be modified
106
- datetime_format: used for the conversion
103
+ datetime_format: determines result of the conversion
107
104
 
108
105
  Raises:
109
106
  TimeConversionError: if time cannot be converted, logged with level "ERROR"
@@ -15,11 +15,11 @@ from fameio.tools import ensure_path_exists
15
15
 
16
16
 
17
17
  class CsvWriterError(OutputError):
18
- """An error occurred during writing a CSV file"""
18
+ """An error occurred during writing a CSV file."""
19
19
 
20
20
 
21
21
  class CsvWriter:
22
- """Writes dataframes to different csv files"""
22
+ """Writes dataframes to different csv files."""
23
23
 
24
24
  _ERR_DIR_CREATE = "Could not create directory for output files: '{}'"
25
25
  _ERR_FILE_OPEN = "Could not open file for writing: '{}'"
@@ -31,7 +31,8 @@ class CsvWriter:
31
31
  CSV_FILE_SUFFIX = ".csv"
32
32
 
33
33
  def __init__(self, config_output: Path, input_file_path: Path, single_export: bool) -> None:
34
- """
34
+ """Constructs a new CsvWriter.
35
+
35
36
  Raises:
36
37
  CsvWriterError: if output folder could not be created, logged with level "ERROR"
37
38
  """
@@ -42,7 +43,7 @@ class CsvWriter:
42
43
 
43
44
  @staticmethod
44
45
  def _get_output_folder_name(config_output: Path, input_file_path: Path) -> Path:
45
- """Returns name of the output folder derived either from the specified `config_output` or `input_file_path`"""
46
+ """Returns name of the output folder derived either from the specified `config_output` or `input_file_path`."""
46
47
  if config_output:
47
48
  output_folder_name: str | Path = config_output
48
49
  log().info(CsvWriter._INFO_USING_PATH.format(config_output))
@@ -52,8 +53,7 @@ class CsvWriter:
52
53
  return Path(output_folder_name)
53
54
 
54
55
  def _create_output_folder(self) -> None:
55
- """
56
- Creates output folder if not yet present
56
+ """Creates output folder if not yet present.
57
57
 
58
58
  Raises:
59
59
  CsvWriterError: if output folder could not be created, logged with level "ERROR"
@@ -66,8 +66,7 @@ class CsvWriter:
66
66
  raise log_error(CsvWriterError(self._ERR_DIR_CREATE.format(self._output_folder))) from e
67
67
 
68
68
  def write_to_files(self, agent_name: str, data: dict[None | str, pd.DataFrame]) -> None:
69
- """
70
- Writes `data` for given `agent_name` to .csv file(s)
69
+ """Writes `data` for given `agent_name` to .csv file(s).
71
70
 
72
71
  Args:
73
72
  agent_name: name of agent whose data are to be written to file(s)
@@ -86,12 +85,11 @@ class CsvWriter:
86
85
  identifier = self._get_identifier(agent_name, column_name)
87
86
  self._write_data_frame(column_data, identifier)
88
87
 
89
- def write_time_series_to_disk(self, timeseries_manager: TimeSeriesManager) -> None:
90
- """
91
- Writes time_series of given `timeseries_manager` to disk
88
+ def write_all_time_series_to_disk(self, timeseries_manager: TimeSeriesManager) -> None:
89
+ """Writes time_series of given `timeseries_manager` to disk.
92
90
 
93
91
  Args:
94
- timeseries_manager:
92
+ timeseries_manager: to provide the time series that are to be written
95
93
 
96
94
  Raises:
97
95
  CsvWriterError: if data could not be written to disk, logged on level "ERROR"
@@ -100,12 +98,24 @@ class CsvWriter:
100
98
  if data is not None:
101
99
  target_path = Path(self._output_folder, name)
102
100
  ensure_path_exists(target_path.parent)
103
- self._dataframe_to_csv(data, target_path, header=False, index=False, mode="w")
101
+ self.write_single_time_series_to_disk(data, target_path)
104
102
 
105
103
  @staticmethod
106
- def _dataframe_to_csv(data: pd.DataFrame, file: Path, header: bool, index: bool, mode: str) -> None:
104
+ def write_single_time_series_to_disk(data: pd.DataFrame, file: Path) -> None:
105
+ """Writes given timeseries the provided file path.
106
+
107
+ Args:
108
+ data: to be written
109
+ file: target path of csv file
110
+
111
+ Raises:
112
+ CsvWriterError: if data could not be written to disk, logged on level "ERROR"
107
113
  """
108
- Write given data to specified CSV file with specified parameters using semicolon separators
114
+ CsvWriter._dataframe_to_csv(data, file, header=False, index=False, mode="w")
115
+
116
+ @staticmethod
117
+ def _dataframe_to_csv(data: pd.DataFrame, file: Path, header: bool, index: bool, mode: str) -> None:
118
+ """Write given data to specified CSV file with specified parameters using semicolon separators.
109
119
 
110
120
  Args:
111
121
  data: to be written
@@ -135,9 +145,9 @@ class CsvWriter:
135
145
  return identifier
136
146
 
137
147
  def _write_data_frame(self, data: pd.DataFrame, identifier: str) -> None:
138
- """
139
- Appends `data` to existing csv file derived from `identifier` without headers,
140
- or writes new file with headers instead
148
+ """Writes `data` to csv file derived from `identifier`.
149
+
150
+ Appends data if csv file exists, else writes new file with headers instead.
141
151
 
142
152
  Args:
143
153
  data: to be written to file
@@ -158,23 +168,23 @@ class CsvWriter:
158
168
  self._dataframe_to_csv(data, outfile_name, header=header, index=True, mode=mode)
159
169
 
160
170
  def _has_file(self, identifier: str) -> bool:
161
- """Returns True if a file for given `identifier` was already written"""
171
+ """Returns True if a file for given `identifier` was already written."""
162
172
  return identifier in self._files
163
173
 
164
174
  def pop_all_file_paths(self) -> dict[str, Path]:
165
- """Clears all stored file paths and returns their previous identifiers and their paths"""
175
+ """Clears all stored file paths and returns their previous identifiers and their paths."""
166
176
  current_files = self._files
167
177
  self._files = {}
168
178
  return current_files
169
179
 
170
180
  def _get_outfile_name(self, identifier: str) -> Path:
171
- """Returns file path for given `agent_name` and (optional) `agent_id`"""
181
+ """Returns file path for given `agent_name` and (optional) `agent_id`."""
172
182
  return self._files[identifier]
173
183
 
174
184
  def _create_outfile_name(self, identifier: str) -> Path:
175
- """Returns fully qualified file path based on given `agent_name` and (optional) `agent_id`"""
185
+ """Returns fully qualified file path based on given `agent_name` and (optional) `agent_id`."""
176
186
  return Path(self._output_folder, f"{identifier}{self.CSV_FILE_SUFFIX}")
177
187
 
178
188
  def _save_outfile_name(self, outfile_name: Path, identifier: str) -> None:
179
- """Stores given name for given `agent_name` and (optional) `agent_id`"""
189
+ """Stores given name for given `agent_name` and (optional) `agent_id`."""
180
190
  self._files[identifier] = outfile_name
@@ -16,7 +16,7 @@ INDEX = ("AgentId", "TimeStep")
16
16
 
17
17
 
18
18
  class DataTransformer(ABC):
19
- """Extracts and provides series data from parsed and processed output files for requested agents"""
19
+ """Extracts and provides series data from parsed and processed output files for requested agents."""
20
20
 
21
21
  MODES = {
22
22
  ResolveOptions.IGNORE: lambda: DataTransformerIgnore(), # pylint: disable=unnecessary-lambda
@@ -29,8 +29,8 @@ class DataTransformer(ABC):
29
29
  return DataTransformer.MODES[complex_column_mode]()
30
30
 
31
31
  def extract_agent_data(self, series: list[Output.Series], agent_type: AgentType) -> dict[str | None, pd.DataFrame]:
32
- """
33
- Returns dict of DataFrame(s) containing all data from given `series` of given `agent_type`.
32
+ """Returns dict of DataFrame(s) containing all data from given `series` of given `agent_type`.
33
+
34
34
  When ResolveOption is `SPLIT`, the dict maps each complex column's name to the associated DataFrame.
35
35
  In any case, the dict maps `None` to a DataFrame with the content of all simple columns.
36
36
  """
@@ -56,7 +56,7 @@ class DataTransformer(ABC):
56
56
  def _extract_agent_data(
57
57
  self, series: list[Output.Series], agent_type: AgentType
58
58
  ) -> dict[int, dict[tuple, list[float | None | str]]]:
59
- """Returns mapping of (agentId, timeStep) to fixed-length list of all output columns for given `class_name`"""
59
+ """Returns mapping of (agentId, timeStep) to fixed-length list of all output columns for given `class_name`."""
60
60
  container = DataTransformer._create_container(agent_type)
61
61
  mask_simple = agent_type.get_simple_column_mask()
62
62
  while series:
@@ -66,7 +66,7 @@ class DataTransformer(ABC):
66
66
 
67
67
  @staticmethod
68
68
  def _create_container(agent_type: AgentType) -> dict[int, dict]:
69
- """Returns map of complex columns IDs to an empty dict, and one more for the remaining simple columns"""
69
+ """Returns map of complex columns IDs to an empty dict, and one more for the remaining simple columns."""
70
70
  field_ids = agent_type.get_complex_column_ids().union([DataTransformer.SIMPLE_COLUMN_INDEX])
71
71
  return {field_id: {} for field_id in field_ids}
72
72
 
@@ -76,7 +76,7 @@ class DataTransformer(ABC):
76
76
  mask_simple: list[bool],
77
77
  container: dict[int, dict[tuple, list[float | None | str]]],
78
78
  ) -> None:
79
- """Adds data from given `series` to specified `container` dict as list"""
79
+ """Adds data from given `series` to specified `container` dict as list."""
80
80
  empty_list: list = [None] * len(mask_simple)
81
81
  for line in series.lines:
82
82
  index = (series.agent_id, line.time_step)
@@ -90,24 +90,24 @@ class DataTransformer(ABC):
90
90
 
91
91
  @staticmethod
92
92
  def _store_complex_values(column: Output.Series.Line.Column, container: dict[int, dict], base_index: tuple) -> None:
93
- """Stores complex column data"""
93
+ """Stores complex column data."""
94
94
 
95
95
  @staticmethod
96
96
  def _get_column_map(agent_type: AgentType) -> dict[int, str]:
97
- """Returns mapping of simple column IDs to their name for given `agent_type`"""
97
+ """Returns mapping of simple column IDs to their name for given `agent_type`."""
98
98
  return agent_type.get_simple_column_map()
99
99
 
100
100
 
101
101
  class DataTransformerIgnore(DataTransformer):
102
- """Ignores complex columns on output"""
102
+ """Ignores complex columns on output."""
103
103
 
104
104
 
105
105
  class DataTransformerSplit(DataTransformer):
106
- """Stores complex data columns split by column type"""
106
+ """Stores complex data columns split by column type."""
107
107
 
108
108
  @staticmethod
109
109
  def _store_complex_values(column: Output.Series.Line.Column, container: dict[int, dict], base_index: tuple) -> None:
110
- """Adds inner data from `column` to given `container` - split by column type"""
110
+ """Adds inner data from `column` to given `container` - split by column type."""
111
111
  for entry in column.entries:
112
112
  index = base_index + tuple(entry.index_values)
113
113
  container[column.field_id][index] = entry.value
@@ -0,0 +1,170 @@
1
+ # SPDX-FileCopyrightText: 2025 German Aerospace Center <fame@dlr.de>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ from __future__ import annotations
5
+
6
+ from typing import Any, Final
7
+
8
+ from fameprotobuf.data_storage_pb2 import DataStorage
9
+ from fameprotobuf.execution_data_pb2 import ExecutionData
10
+ from google.protobuf import message
11
+
12
+ from fameio.logs import log_error
13
+ from fameio.output import OutputError
14
+
15
+
16
+ class ExecutionDataError(OutputError):
17
+ """Indicates an error during reconstruction of execution metadata from its protobuf representation."""
18
+
19
+
20
+ VERSION_MAP: Final[dict[str, str]] = {
21
+ "fame_protobuf": "FameProtobuf",
22
+ "fame_io": "FameIo",
23
+ "fame_core": "FameCore",
24
+ "python": "Python",
25
+ "jvm": "JavaVirtualMachine",
26
+ "os": "OperatingSystem",
27
+ }
28
+ PROCESS_MAP: Final[dict[str, str]] = {
29
+ "core_count": "NumberOfCores",
30
+ "output_interval": "OutputIntervalInTicks",
31
+ "output_process": "ProcessControllingOutputs",
32
+ }
33
+ STATISTICS_MAP: Final[dict[str, str]] = {
34
+ "start": "SimulationBeginInRealTime",
35
+ "duration_in_ms": "SimulationWallTimeInMillis",
36
+ "tick_count": "SimulatedTicks",
37
+ }
38
+
39
+
40
+ class ExecutionDao:
41
+ """Data access object for execution metadata saved in protobuf."""
42
+
43
+ _ERR_MULTIPLE_VERSIONS = "More than two version metadata sections found: File is corrupt."
44
+ _ERR_MULTIPLE_CONFIGURATIONS = "More than one configuration metadata section found: File is corrupt."
45
+ _ERR_MULTIPLE_SIMULATIONS = "More than one simulation metadata section found: File is corrupt."
46
+ _ERR_NO_VERSION = "No version data found: File is either corrupt or was created with fameio version < 3.0."
47
+
48
+ KEY_COMPILATION: Final[str] = "InputCompilation"
49
+ KEY_RUN: Final[str] = "ModelRun"
50
+ KEY_VERSIONS: Final[str] = "SoftwareVersions"
51
+ KEY_PROCESSES: Final[str] = "ProcessConfiguration"
52
+ KEY_STATISTICS: Final[str] = "Statistics"
53
+
54
+ def __init__(self) -> None:
55
+ self._compile_versions: ExecutionData.VersionData | None = None
56
+ self._run_versions: ExecutionData.VersionData | None = None
57
+ self._run_configuration: ExecutionData.ProcessConfiguration | None = None
58
+ self._run_simulation: ExecutionData.Simulation | None = None
59
+
60
+ def store_execution_metadata(self, data_storages: list[DataStorage]) -> None:
61
+ """Scans given data storages for execution metadata.
62
+
63
+ If metadata are present, they are extracted for later inspection
64
+
65
+ Args:
66
+ data_storages: to be scanned for execution metadata
67
+
68
+ Raises:
69
+ ExecutionDataError: if more execution sections are found than expected, logged with level "ERROR"
70
+ """
71
+ for entry in [storage.execution for storage in data_storages if storage.HasField("execution")]:
72
+ if entry.HasField("version_data"):
73
+ self._add_version_data(entry.version_data)
74
+ if entry.HasField("configuration"):
75
+ self._add_configuration(entry.configuration)
76
+ if entry.HasField("simulation"):
77
+ self._add_simulation(entry.simulation)
78
+
79
+ def _add_version_data(self, data: ExecutionData.VersionData) -> None:
80
+ """Stores given version metadata.
81
+
82
+ Args:
83
+ data: version data saved during compilation (first call), or during model run (second call)
84
+
85
+ Raises:
86
+ ExecutionDataError: if both version data are already set, logged with level "ERROR"
87
+ """
88
+ if not self._compile_versions:
89
+ self._compile_versions = data
90
+ elif not self._run_versions:
91
+ self._run_versions = data
92
+ else:
93
+ raise log_error(ExecutionDataError(self._ERR_MULTIPLE_VERSIONS))
94
+
95
+ def _add_configuration(self, data: ExecutionData.ProcessConfiguration) -> None:
96
+ """Stores given process configuration metadata.
97
+
98
+ Args:
99
+ data: process configuration data to be saved
100
+
101
+ Raises:
102
+ ExecutionDataError: if process configuration data are already set, logged with level "ERROR"
103
+ """
104
+ if not self._run_configuration:
105
+ self._run_configuration = data
106
+ else:
107
+ raise log_error(ExecutionDataError(self._ERR_MULTIPLE_CONFIGURATIONS))
108
+
109
+ def _add_simulation(self, data: ExecutionData.Simulation) -> None:
110
+ """Stores given simulation metadata.
111
+
112
+ Args:
113
+ data: simulation metadata to be stored
114
+
115
+ Raises:
116
+ ExecutionDataError: if simulation metadata are already set, logged with level "ERROR"
117
+ """
118
+ if not self._run_simulation:
119
+ self._run_simulation = data
120
+ else:
121
+ raise log_error(ExecutionDataError(self._ERR_MULTIPLE_SIMULATIONS))
122
+
123
+ def get_fameio_version(self) -> str:
124
+ """Gets version of fameio used to create the input data.
125
+
126
+ Returns:
127
+ fameio version that was used to create the input data
128
+
129
+ Raises:
130
+ ExecutionDataError: if fameio version could not be read, logged with level "ERROR"
131
+ """
132
+ if self._compile_versions:
133
+ return self._compile_versions.fame_io
134
+ raise log_error(ExecutionDataError(self._ERR_NO_VERSION))
135
+
136
+ def get_metadata_dict(self) -> dict[str, Any]:
137
+ """Creates a dictionary from all provided execution metadata.
138
+
139
+ Returns:
140
+ dictionary with all execution metadata currently stored in this DAO
141
+ """
142
+ result = {
143
+ self.KEY_COMPILATION: {self.KEY_VERSIONS: self._get_dict(self._compile_versions, VERSION_MAP)},
144
+ self.KEY_RUN: {
145
+ self.KEY_VERSIONS: self._get_dict(self._run_versions, VERSION_MAP),
146
+ self.KEY_PROCESSES: self._get_dict(self._run_configuration, PROCESS_MAP),
147
+ self.KEY_STATISTICS: self._get_dict(self._run_simulation, STATISTICS_MAP),
148
+ },
149
+ }
150
+ return result
151
+
152
+ @staticmethod
153
+ def _get_dict(data: message, replacements: dict[str, str]) -> dict[str, str]:
154
+ """Searches for `replacements.keys()` in provided `data`.
155
+
156
+ If key is available, saves the corresponding data item to dict, associated to a name matching the value in `replacements`.
157
+
158
+ Args:
159
+ data: to extract data from
160
+ replacements: keys to be replaced by their values in the resulting dict
161
+
162
+ Returns:
163
+ a dictionary matching entries from `data` with their new keys as specified under "replacements"
164
+ """
165
+ versions = {}
166
+ if data is not None:
167
+ for key, replacement in replacements.items():
168
+ if data.HasField(key):
169
+ versions[replacement] = getattr(data, key)
170
+ return versions
@@ -16,11 +16,11 @@ from fameio.series import TimeSeriesManager, TimeSeriesError
16
16
 
17
17
 
18
18
  class InputConversionError(OutputError):
19
- """Indicates an error during reconstruction of input from its protobuf representation"""
19
+ """Indicates an error during reconstruction of input from its protobuf representation."""
20
20
 
21
21
 
22
22
  class InputDao:
23
- """Data access object for inputs saved in protobuf"""
23
+ """Data access object for inputs saved in protobuf."""
24
24
 
25
25
  _ERR_NO_INPUTS = "No input data found on file."
26
26
  _ERR_MULTIPLE_INPUTS = "File corrupt. More than one input section found on file."
@@ -44,8 +44,7 @@ class InputDao:
44
44
  self._timeseries_manager: TimeSeriesManager = TimeSeriesManager()
45
45
 
46
46
  def store_inputs(self, data_storages: list[DataStorage]) -> None:
47
- """
48
- Extracts and stores Inputs in given DataStorages - if such are present
47
+ """Extracts and stores Inputs in given DataStorages - if such are present.
49
48
 
50
49
  Args:
51
50
  data_storages: to be scanned for InputData
@@ -53,8 +52,7 @@ class InputDao:
53
52
  self._inputs.extend([data_storage.input for data_storage in data_storages if data_storage.HasField("input")])
54
53
 
55
54
  def recover_inputs(self) -> tuple[TimeSeriesManager, Scenario]:
56
- """
57
- Recovers inputs to GeneralProperties, Schema, Agents, Contracts, Timeseries
55
+ """Recovers inputs to GeneralProperties, Schema, Agents, Contracts, Timeseries.
58
56
 
59
57
  Return:
60
58
  recovered timeseries and scenario
@@ -76,11 +74,13 @@ class InputDao:
76
74
  return self._timeseries_manager, scenario
77
75
 
78
76
  def _get_input_data(self) -> InputData:
79
- """
80
- Check that exactly one previously extracted input data exist, otherwise raises an exception
77
+ """Check that exactly one previously extracted input data exist, otherwise raises an exception.
78
+
79
+ Returns:
80
+ the previously extracted input data
81
81
 
82
82
  Raises:
83
- InputConversionException: if no or more than one input is present, logged with level "ERROR"
83
+ InputConversionException: if no input, or more than one input is present, logged with level "ERROR"
84
84
  """
85
85
  if not self._inputs:
86
86
  raise log_error(InputConversionError(self._ERR_NO_INPUTS))
@@ -90,12 +90,12 @@ class InputDao:
90
90
 
91
91
  @staticmethod
92
92
  def _get_schema(input_data: InputData) -> Schema:
93
- """Read and return Schema from given `input_data`"""
93
+ """Read and return Schema from given `input_data`."""
94
94
  return Schema.from_string(input_data.schema)
95
95
 
96
96
  @staticmethod
97
97
  def _get_general_properties(input_data: InputData) -> GeneralProperties:
98
- """Read and return GeneralProperties from given `input_data`"""
98
+ """Read and return GeneralProperties from given `input_data`."""
99
99
  return GeneralProperties(
100
100
  run_id=input_data.run_id,
101
101
  simulation_start_time=input_data.simulation.start_time,
@@ -105,7 +105,7 @@ class InputDao:
105
105
 
106
106
  @staticmethod
107
107
  def _get_contracts(input_data: InputData) -> list[Contract]:
108
- """Read and return Contracts from given `input_data`"""
108
+ """Read and return Contracts from given `input_data`."""
109
109
  return [
110
110
  Contract(
111
111
  sender_id=contract.sender_id,
@@ -120,12 +120,11 @@ class InputDao:
120
120
  ]
121
121
 
122
122
  def _init_timeseries(self, input_data: InputData) -> None:
123
- """Read timeseries from given `input_data` and initialise TimeSeriesManager"""
123
+ """Read timeseries from given `input_data` and initialise TimeSeriesManager."""
124
124
  self._timeseries_manager.reconstruct_time_series(list(input_data.time_series))
125
125
 
126
126
  def _get_agents(self, input_data: InputData, schema: Schema) -> list[Agent]:
127
- """
128
- Read and return Agents from given `input_data`
127
+ """Read and return Agents from given `input_data`.
129
128
 
130
129
  Args:
131
130
  input_data: to read agents from
@@ -153,8 +152,7 @@ class InputDao:
153
152
  return agents
154
153
 
155
154
  def _get_attributes(self, fields: list[NestedField], schematics: dict[str, AttributeSpecs]) -> dict[str, Any]:
156
- """
157
- Read and return all Attributes as Dictionary from given list of fields
155
+ """Read and return all Attributes as Dictionary from given list of fields.
158
156
 
159
157
  Args:
160
158
  fields: data fields representing attributes
@@ -172,8 +170,7 @@ class InputDao:
172
170
  return attributes
173
171
 
174
172
  def _get_field_value(self, field: NestedField, schematic: AttributeSpecs) -> Any:
175
- """
176
- Extracts and returns value(s) of given `field`
173
+ """Extracts and returns value(s) of given `field`.
177
174
 
178
175
  Args:
179
176
  field: to extract the value(s) from