fameio 3.5.0__tar.gz → 3.5.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. {fameio-3.5.0 → fameio-3.5.2}/CHANGELOG.md +16 -2
  2. {fameio-3.5.0 → fameio-3.5.2}/PKG-INFO +1 -1
  3. {fameio-3.5.0 → fameio-3.5.2}/pyproject.toml +1 -1
  4. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/loader/controller.py +8 -6
  5. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/scenario/scenario.py +3 -3
  6. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/agent_type.py +14 -16
  7. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/conversion.py +9 -4
  8. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/csv_writer.py +18 -38
  9. fameio-3.5.2/src/fameio/output/files.py +55 -0
  10. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/input_dao.py +54 -12
  11. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/metadata/json_writer.py +1 -2
  12. fameio-3.5.2/src/fameio/scripts/REUSE.toml +6 -0
  13. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/scripts/convert_results.py +32 -30
  14. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/scripts/reformat.py +25 -1
  15. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/series.py +9 -6
  16. fameio-3.5.0/src/fameio/scripts/__init__.py.license +0 -3
  17. fameio-3.5.0/src/fameio/scripts/convert_results.py.license +0 -3
  18. fameio-3.5.0/src/fameio/scripts/make_config.py.license +0 -3
  19. fameio-3.5.0/src/fameio/scripts/reformat.py.license +0 -3
  20. {fameio-3.5.0 → fameio-3.5.2}/LICENSE.txt +0 -0
  21. {fameio-3.5.0 → fameio-3.5.2}/LICENSES/Apache-2.0.txt +0 -0
  22. {fameio-3.5.0 → fameio-3.5.2}/LICENSES/CC-BY-4.0.txt +0 -0
  23. {fameio-3.5.0 → fameio-3.5.2}/LICENSES/CC-BY-ND-4.0.txt +0 -0
  24. {fameio-3.5.0 → fameio-3.5.2}/LICENSES/CC0-1.0.txt +0 -0
  25. {fameio-3.5.0 → fameio-3.5.2}/README.md +0 -0
  26. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/__init__.py +0 -0
  27. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/cli/__init__.py +0 -0
  28. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/cli/convert_results.py +0 -0
  29. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/cli/make_config.py +0 -0
  30. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/cli/options.py +0 -0
  31. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/cli/parser.py +0 -0
  32. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/cli/reformat.py +0 -0
  33. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/__init__.py +0 -0
  34. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/loader/__init__.py +0 -0
  35. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/loader/loader.py +0 -0
  36. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/metadata.py +0 -0
  37. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/resolver.py +0 -0
  38. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/scenario/__init__.py +0 -0
  39. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/scenario/agent.py +0 -0
  40. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/scenario/attribute.py +0 -0
  41. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/scenario/contract.py +0 -0
  42. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/scenario/exception.py +0 -0
  43. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/scenario/fameiofactory.py +0 -0
  44. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/scenario/generalproperties.py +0 -0
  45. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/scenario/stringset.py +0 -0
  46. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/schema/__init__.py +0 -0
  47. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/schema/agenttype.py +0 -0
  48. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/schema/attribute.py +0 -0
  49. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/schema/java_packages.py +0 -0
  50. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/schema/schema.py +0 -0
  51. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/validator.py +0 -0
  52. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/input/writer.py +0 -0
  53. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/logs.py +0 -0
  54. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/__init__.py +0 -0
  55. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/data_transformer.py +0 -0
  56. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/execution_dao.py +0 -0
  57. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/metadata/__init__.py +0 -0
  58. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/metadata/compiler.py +0 -0
  59. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/metadata/locator.py +0 -0
  60. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/metadata/oeo_template.py +0 -0
  61. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/metadata/template_reader.py +0 -0
  62. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/output_dao.py +0 -0
  63. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/reader.py +0 -0
  64. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/output/yaml_writer.py +0 -0
  65. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/scripts/__init__.py +0 -0
  66. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/scripts/exception.py +0 -0
  67. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/scripts/make_config.py +0 -0
  68. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/time.py +0 -0
  69. {fameio-3.5.0 → fameio-3.5.2}/src/fameio/tools.py +0 -0
@@ -1,7 +1,21 @@
1
1
  <!-- SPDX-FileCopyrightText: 2025 German Aerospace Center <fame@dlr.de>
2
2
 
3
3
  SPDX-License-Identifier: Apache-2.0 -->
4
- ## [3.5.0](https://gitlab.com/fame-framework/fame-io/-/tags/v3.5.0) - 2025-07-02
4
+ # Changelog
5
+ ## [3.5.2](https://gitlab.com/fame-framework/fame-io/-/tags/v3.5.2) - 2025-10-14
6
+ ### Fixed
7
+ - Fix conversion of time stamps on input recovery based on user specifications #274 (@dlr_fn)
8
+
9
+ ## [3.5.1](https://gitlab.com/fame-framework/fame-io/-/tags/v3.5.1) - 2025-07-25
10
+ ### Fixed
11
+ - Fix crash on result conversion if agent list is provided #266 (@dlr-cjs, @dlr_jk)
12
+ - Fix missing recovery of scenario StringSets #264 (@dlr-cjs)
13
+ - Fix missing recovery of scenario metadata #263 (@dlr-cjs)
14
+ - Fix missing recovery of agent attribute metadata #265 (@dlr-cjs)
15
+ - Fix unexpected abort of file conversion if any file cannot be converted #268 (@dlr-cjs)
16
+ - Correct location of `metadata.json` if output folder is unspecified #262 (@dlr-cjs)
17
+
18
+ ## [3.5.0](https://gitlab.com/fame-framework/fame-io/-/tags/v3.5.0) - 2025-07-02
5
19
  ### Changed
6
20
  - Move most documentation from README to docs folder #200 (@dlr-cjs, @dlr_fn, @LeonardWilleke)
7
21
  - Update minimum fameprotobuf dependency to v2.1.0 #256 (@dlr-cjs)
@@ -14,7 +28,7 @@ SPDX-License-Identifier: Apache-2.0 -->
14
28
  - Save scenario attribute metadata to protobuf #255 (@dlr-cjs)
15
29
  - Create documentation with sphinx #47 (@dlr-cjs, @dlr_fn)
16
30
 
17
- ## [3.4.0](https://gitlab.com/fame-framework/fame-io/-/tags/v3.4.0) - 2025-05-27
31
+ ## [3.4.0](https://gitlab.com/fame-framework/fame-io/-/tags/v3.4.0) - 2025-05-27
18
32
  ### Changed
19
33
  - Allow nesting of sender or receiver lists in contracts !228 (@dlr-cjs)
20
34
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: fameio
3
- Version: 3.5.0
3
+ Version: 3.5.2
4
4
  Summary: Tools for input preparation and output digestion of FAME models
5
5
  License: Apache-2.0
6
6
  Keywords: FAME,fameio,agent-based modelling,energy systems
@@ -7,7 +7,7 @@ build-backend = "poetry.core.masonry.api"
7
7
 
8
8
  [project]
9
9
  name = "fameio"
10
- version = "3.5.0"
10
+ version = "3.5.2"
11
11
  description = "Tools for input preparation and output digestion of FAME models"
12
12
  license = "Apache-2.0"
13
13
  readme = "README.md"
@@ -31,7 +31,7 @@ class LoaderController:
31
31
  NODE_SPLIT_STRING: Final[str] = ":"
32
32
 
33
33
  _ERR_FILE_OPEN_ERROR = "Could not open file: '{}'"
34
- _ERR_FILE_LOAD_ERROR = "Could not parse YAML file due to errors in (line:column): ({}:{})"
34
+ _ERR_FILE_LOAD_ERROR = "Could not parse file '{}' due to error in (line:column): ({}:{})"
35
35
  _ERR_NODE_MISSING = "'!include_node [{}, {}]': Cannot find '{}'"
36
36
  _ERR_NOT_LIST = "!include can only combine list-like elements from multiple files!"
37
37
  _WARN_NOTHING_TO_INCLUDE = "Could not find any files matching this '!include' directive '{}'"
@@ -51,11 +51,11 @@ class LoaderController:
51
51
  self._path_resolver = path_resolver
52
52
  self._encoding: str | None = encoding
53
53
 
54
- def load(self, yaml_file_path: Path) -> dict:
54
+ def load(self, file_path: Path) -> dict:
55
55
  """Spawns a new FameYamlLoader, loads the given `yaml_file_path` and returns its content.
56
56
 
57
57
  Args:
58
- yaml_file_path: path to YAML file that is to be loaded
58
+ file_path: path to YAML file that is to be loaded
59
59
 
60
60
  Returns:
61
61
  dictionary representation of loaded file
@@ -64,14 +64,16 @@ class LoaderController:
64
64
  YamlLoaderError: if file could not be read, logged with level "CRITICAL"
65
65
  """
66
66
  try:
67
- with open(yaml_file_path, "r", encoding=self._encoding) as configfile:
67
+ with open(file_path, "r", encoding=self._encoding) as configfile:
68
68
  try:
69
69
  data = yaml.load(configfile, self._spawn_loader_builder()) # type: ignore[arg-type]
70
70
  except yaml.YAMLError as e:
71
71
  line, column = self._get_problem_position(e)
72
- raise log_critical(YamlLoaderError(self._ERR_FILE_LOAD_ERROR.format(line, column))) from e
72
+ raise log_critical(
73
+ YamlLoaderError(self._ERR_FILE_LOAD_ERROR.format(file_path, line, column))
74
+ ) from e
73
75
  except OSError as e:
74
- raise log_critical(YamlLoaderError(self._ERR_FILE_OPEN_ERROR.format(yaml_file_path))) from e
76
+ raise log_critical(YamlLoaderError(self._ERR_FILE_OPEN_ERROR.format(file_path))) from e
75
77
  return data
76
78
 
77
79
  @staticmethod
@@ -5,7 +5,7 @@
5
5
 
6
6
  from __future__ import annotations
7
7
 
8
- from typing import Final, Any
8
+ from typing import Final, Any, Optional
9
9
 
10
10
  from fameio.input import SchemaError
11
11
  from fameio.input.metadata import Metadata
@@ -34,8 +34,8 @@ class Scenario(Metadata):
34
34
  _ERR_MULTI_CONTRACT = "Could not create scenario: Definition of Contracts has errors: {}"
35
35
  _ERR_CONTRACT = "Could not create scenario: Definition of Contract has errors: {}"
36
36
 
37
- def __init__(self, schema: Schema, general_props: GeneralProperties) -> None:
38
- super().__init__()
37
+ def __init__(self, schema: Schema, general_props: GeneralProperties, metadata: Optional[dict] = None) -> None:
38
+ super().__init__({Metadata.KEY_METADATA: metadata})
39
39
  self._schema = schema
40
40
  self._general_props = general_props
41
41
  self._string_sets: dict[str, StringSet] = {}
@@ -77,24 +77,22 @@ class AgentTypeLog:
77
77
  self._agents_with_output: list[str] = []
78
78
 
79
79
  def update_agents(self, new_types: dict[str, Output.AgentType]) -> None:
80
- """Saves `new_types` if they are requested for extraction.
80
+ """Saves new types of agents for later inspection.
81
81
 
82
- If any new agent types are provided, checks if they are requested for extraction, and, if so, saves them.
83
- Agent types not requested for extraction are ignored.
82
+ If any new agent types are provided, registers them as "agents with output"
83
+ Then, checks if they are requested for extraction, and, if so, saves them as "requested agent types".
84
84
 
85
85
  Args:
86
- new_types: to be saved (if requested for extraction)
86
+ new_types: to be logged
87
87
 
88
88
  Raises:
89
89
  AgentTypeError: if agent type was already registered, logged with level "ERROR"
90
90
  """
91
- if not new_types:
92
- return
93
-
94
- self._agents_with_output.extend(list(new_types.keys()))
95
- filtered_types = self._filter_agents_by_name(new_types)
96
- self._ensure_no_duplication(filtered_types)
97
- self._requested_agent_types.update(filtered_types)
91
+ if new_types is not None and len(new_types) > 0:
92
+ self._agents_with_output.extend(list(new_types.keys()))
93
+ filtered_types = self._filter_agents_by_name(new_types)
94
+ self._ensure_no_duplication(filtered_types)
95
+ self._requested_agent_types.update(filtered_types)
98
96
 
99
97
  def _filter_agents_by_name(self, new_types: dict[str, Output.AgentType]) -> dict[str, Output.AgentType]:
100
98
  """Removes and entries from `new_types` not on `agent_name_filter_list`.
@@ -127,7 +125,7 @@ class AgentTypeLog:
127
125
  raise log_error(AgentTypeError(self._ERR_DOUBLE_DEFINITION.format(agent_name)))
128
126
 
129
127
  def has_any_agent_type(self) -> bool:
130
- """Returns True if any agent type was registered so far present."""
128
+ """Returns True if any agent type was registered so far."""
131
129
  return len(self._requested_agent_types) > 0
132
130
 
133
131
  def get_agent_type(self, agent_type_name: str) -> AgentType:
@@ -155,13 +153,13 @@ class AgentTypeLog:
155
153
  return self._agents_with_output
156
154
 
157
155
  def get_agent_columns(self) -> dict[str, list[str]]:
158
- """Returns all agents with output mapped to their simple output columns.
156
+ """Returns all agents that were not filtered, with their output mapped to their simple output columns.
159
157
 
160
158
  Raises:
161
159
  AgentTypeError: if - somehow - an agent type is not registered but has data, logged with level "ERROR"
162
160
  """
163
161
  result = {}
164
- for agent in self.get_agents_with_output():
165
- agent_type = self.get_agent_type(agent)
166
- result[agent] = list(agent_type.get_simple_column_map().values())
162
+ for agent_name in self._requested_agent_types:
163
+ agent_type = self.get_agent_type(agent_name)
164
+ result[agent_name] = list(agent_type.get_simple_column_map().values())
167
165
  return result
@@ -12,7 +12,10 @@ import pandas as pd
12
12
  from fameio.cli.options import TimeOptions
13
13
  from fameio.logs import log_error, log
14
14
  from fameio.output import OutputError
15
- from fameio.time import FameTime, ConversionError as TimeConversionError
15
+ from fameio.series import TimeSeriesManager
16
+ from fameio.time import FameTime, ConversionError as TimeConversionError, DATE_FORMAT as DATETIME_FORMAT_FAME
17
+
18
+ DATETIME_FORMAT_UTC = "%Y-%m-%d %H:%M:%S"
16
19
 
17
20
  _ERR_UNIMPLEMENTED = "Time conversion mode '{}' not implemented."
18
21
  _ERR_TIME_CONVERSION = "Conversion of timestamps failed."
@@ -86,9 +89,9 @@ def apply_time_option(data: dict[str | None, pd.DataFrame], mode: TimeOptions) -
86
89
  if mode == TimeOptions.INT:
87
90
  log().debug("No time conversion...")
88
91
  elif mode == TimeOptions.UTC:
89
- _convert_time_index(data, "%Y-%m-%d %H:%M:%S")
92
+ _convert_time_index(data, DATETIME_FORMAT_UTC)
90
93
  elif mode == TimeOptions.FAME:
91
- _convert_time_index(data, "%Y-%m-%d_%H:%M:%S")
94
+ _convert_time_index(data, DATETIME_FORMAT_FAME)
92
95
  else:
93
96
  raise log_error(ConversionError(_ERR_UNIMPLEMENTED.format(mode)))
94
97
  except TimeConversionError as e:
@@ -111,5 +114,7 @@ def _convert_time_index(data: dict[str | None, pd.DataFrame], datetime_format: s
111
114
  for _, df in data.items():
112
115
  index_columns = df.index.names
113
116
  df.reset_index(inplace=True)
114
- df["TimeStep"] = df["TimeStep"].apply(lambda t: FameTime.convert_fame_time_step_to_datetime(t, datetime_format))
117
+ df[TimeSeriesManager.KEY_ROW_TIME] = df[TimeSeriesManager.KEY_ROW_TIME].apply(
118
+ lambda t: FameTime.convert_fame_time_step_to_datetime(t, datetime_format)
119
+ )
115
120
  df.set_index(keys=index_columns, inplace=True)
@@ -6,11 +6,14 @@
6
6
  from __future__ import annotations
7
7
 
8
8
  from pathlib import Path
9
+ from typing import Literal
9
10
 
10
11
  import pandas as pd
11
12
 
12
- from fameio.logs import log, log_error
13
+ from fameio.cli.options import TimeOptions
14
+ from fameio.logs import log_error
13
15
  from fameio.output import OutputError
16
+ from fameio.output.conversion import apply_time_option
14
17
  from fameio.output.data_transformer import INDEX
15
18
  from fameio.series import TimeSeriesManager
16
19
  from fameio.tools import ensure_path_exists
@@ -23,49 +26,21 @@ class CsvWriterError(OutputError):
23
26
  class CsvWriter:
24
27
  """Writes dataframes to different csv files."""
25
28
 
26
- _ERR_DIR_CREATE = "Could not create directory for output files: '{}'"
27
29
  _ERR_FILE_OPEN = "Could not open file for writing: '{}'"
28
30
  _ERR_FILE_WRITE = "Could not write to file '{}' due to: {}"
29
31
 
30
- _INFO_USING_PATH = "Using specified output path: {}"
31
- _INFO_USING_DERIVED_PATH = "No output path specified - writing to new local folder: {}"
32
-
33
32
  CSV_FILE_SUFFIX = ".csv"
34
33
 
35
- def __init__(self, config_output: Path, input_file_path: Path, single_export: bool) -> None:
34
+ def __init__(self, output_folder: Path, single_export: bool) -> None:
36
35
  """Constructs a new CsvWriter.
37
36
 
38
- Raises:
39
- CsvWriterError: if output folder could not be created, logged with level "ERROR"
37
+ Args:
38
+ output_folder: to write the output files to
39
+ single_export: if true, one output file per unique agent is created
40
40
  """
41
41
  self._single_export = single_export
42
- self._output_folder = self._get_output_folder_name(config_output, input_file_path)
42
+ self._output_folder = output_folder
43
43
  self._files: dict[str, Path] = {}
44
- self._create_output_folder()
45
-
46
- @staticmethod
47
- def _get_output_folder_name(config_output: Path, input_file_path: Path) -> Path:
48
- """Returns name of the output folder derived either from the specified `config_output` or `input_file_path`."""
49
- if config_output:
50
- output_folder_name: str | Path = config_output
51
- log().info(CsvWriter._INFO_USING_PATH.format(config_output))
52
- else:
53
- output_folder_name = input_file_path.stem
54
- log().info(CsvWriter._INFO_USING_DERIVED_PATH.format(output_folder_name))
55
- return Path(output_folder_name)
56
-
57
- def _create_output_folder(self) -> None:
58
- """Creates output folder if not yet present.
59
-
60
- Raises:
61
- CsvWriterError: if output folder could not be created, logged with level "ERROR"
62
- """
63
- log().debug("Creating output folder if required...")
64
- if not self._output_folder.is_dir():
65
- try:
66
- self._output_folder.mkdir(parents=True)
67
- except OSError as e:
68
- raise log_error(CsvWriterError(self._ERR_DIR_CREATE.format(self._output_folder))) from e
69
44
 
70
45
  def write_to_files(self, agent_name: str, data: dict[None | str, pd.DataFrame]) -> None:
71
46
  """Writes `data` for given `agent_name` to .csv file(s).
@@ -87,11 +62,14 @@ class CsvWriter:
87
62
  identifier = self._get_identifier(agent_name, column_name)
88
63
  self._write_data_frame(column_data, identifier)
89
64
 
90
- def write_all_time_series_to_disk(self, timeseries_manager: TimeSeriesManager) -> None:
65
+ def write_all_time_series_to_disk(
66
+ self, timeseries_manager: TimeSeriesManager, time_mode: TimeOptions | None = None
67
+ ) -> None:
91
68
  """Writes time_series of given `timeseries_manager` to disk.
92
69
 
93
70
  Args:
94
71
  timeseries_manager: to provide the time series that are to be written
72
+ time_mode: mode of representing time in series that are to be written
95
73
 
96
74
  Raises:
97
75
  CsvWriterError: if data could not be written to disk, logged on level "ERROR"
@@ -100,6 +78,8 @@ class CsvWriter:
100
78
  if data is not None:
101
79
  target_path = Path(self._output_folder, name)
102
80
  ensure_path_exists(target_path.parent)
81
+ if time_mode:
82
+ apply_time_option(data={name: data}, mode=time_mode)
103
83
  self.write_single_time_series_to_disk(data, target_path)
104
84
 
105
85
  @staticmethod
@@ -113,10 +93,10 @@ class CsvWriter:
113
93
  Raises:
114
94
  CsvWriterError: if data could not be written to disk, logged on level "ERROR"
115
95
  """
116
- CsvWriter._dataframe_to_csv(data, file, header=False, index=False, mode="w")
96
+ CsvWriter._dataframe_to_csv(data, file, header=False, index=True, mode="w")
117
97
 
118
98
  @staticmethod
119
- def _dataframe_to_csv(data: pd.DataFrame, file: Path, header: bool, index: bool, mode: str) -> None:
99
+ def _dataframe_to_csv(data: pd.DataFrame, file: Path, header: bool, index: bool, mode: Literal["a", "w"]) -> None:
120
100
  """Write given data to specified CSV file in UTF8 encoding with specified parameters using semicolon separators.
121
101
 
122
102
  Args:
@@ -160,7 +140,7 @@ class CsvWriter:
160
140
  """
161
141
  if self._has_file(identifier):
162
142
  outfile_name = self._get_outfile_name(identifier)
163
- mode = "a"
143
+ mode: Literal["a", "w"] = "a"
164
144
  header = False
165
145
  else:
166
146
  outfile_name = self._create_outfile_name(identifier)
@@ -0,0 +1,55 @@
1
+ # SPDX-FileCopyrightText: 2025 German Aerospace Center <fame@dlr.de>
2
+ #
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ """Finding output folders and files, creating the output folder."""
5
+
6
+ from __future__ import annotations
7
+
8
+ from pathlib import Path
9
+ from typing import Final, Optional
10
+
11
+ from fameio.logs import log, log_error
12
+ from fameio.output import OutputError
13
+
14
+ _ERR_DIR_CREATE = "Could not create directory for output files: '{}'"
15
+
16
+ _INFO_USING_PATH = "Using specified output path: '{}'"
17
+ _INFO_USING_DERIVED_PATH = "No output path specified - writing to new local folder: '{}'"
18
+
19
+ _DEBUG_NEW_FOLDER = "Output folder '{}' not present, trying to create it..."
20
+ _DEBUG_EXISTING_FOLDER = "Output folder '{}' already exists..."
21
+
22
+ RECOVERED_INPUT_PATH: Final[str] = "./recovered"
23
+ RECOVERED_SCENARIO_PATH: Final[str] = "./recovered/scenario.yaml"
24
+ METADATA_FILE_NAME: Final[str] = "metadata.json"
25
+
26
+
27
+ class OutputPathError(OutputError):
28
+ """An error that occurred during creation of the output path."""
29
+
30
+
31
+ def get_output_folder_name(config_output: Optional[Path | str], input_file_path: Path) -> Path:
32
+ """Returns name of the output folder derived either from the specified `config_output` or `input_file_path`."""
33
+ if config_output:
34
+ output_folder_name = config_output
35
+ log().info(_INFO_USING_PATH.format(config_output))
36
+ else:
37
+ output_folder_name = input_file_path.stem
38
+ log().info(_INFO_USING_DERIVED_PATH.format(output_folder_name))
39
+ return Path(output_folder_name)
40
+
41
+
42
+ def create_output_folder(output_path: Path) -> None:
43
+ """Creates output folder if not yet present.
44
+
45
+ Raises:
46
+ OutputPathError: if output folder could not be created, logged with level "ERROR"
47
+ """
48
+ if not output_path.is_dir():
49
+ log().debug(_DEBUG_NEW_FOLDER.format(output_path))
50
+ try:
51
+ output_path.mkdir(parents=True)
52
+ except OSError as e:
53
+ raise log_error(OutputPathError(_ERR_DIR_CREATE.format(output_path))) from e
54
+ else:
55
+ log().debug(_DEBUG_EXISTING_FOLDER.format(output_path))
@@ -4,15 +4,15 @@
4
4
  """Accessing input content of protobuf messages."""
5
5
 
6
6
  import ast
7
- from typing import Any
7
+ from typing import Any, Optional
8
8
 
9
9
  from fameprotobuf.data_storage_pb2 import DataStorage
10
10
  from fameprotobuf.field_pb2 import NestedField
11
11
  from fameprotobuf.input_file_pb2 import InputData
12
12
 
13
- from fameio.input.scenario import GeneralProperties, Agent, Contract, Scenario
13
+ from fameio.input.scenario import GeneralProperties, Agent, Contract, Scenario, StringSet, Attribute
14
14
  from fameio.input.schema import Schema, AttributeSpecs, AttributeType
15
- from fameio.logs import log_error
15
+ from fameio.logs import log_error, log
16
16
  from fameio.output import OutputError
17
17
  from fameio.series import TimeSeriesManager, TimeSeriesError
18
18
 
@@ -28,6 +28,8 @@ class InputDao:
28
28
  _ERR_MULTIPLE_INPUTS = "File corrupt. More than one input section found on file."
29
29
  _ERR_NO_SCHEMA = "No schema found on file - cannot recover inputs."
30
30
  _ERR_SERIES_MISSING = "References time series '{}' was not registered on file."
31
+ _ERR_SCENARIO_METADATA = "Proceeding without metadata for scenario - could not be extracted due to: {}"
32
+ _ERR_STRING_SET_METADATA = "Proceeding without metadata for string set '{}' - could not be extracted due to: {}"
31
33
 
32
34
  _FIELD_NAME_MAP: dict = {
33
35
  AttributeType.STRING: "string_values",
@@ -65,7 +67,8 @@ class InputDao:
65
67
  """
66
68
  input_data = self.get_input_data()
67
69
  schema = self._get_schema(input_data)
68
- scenario = Scenario(schema, self._get_general_properties(input_data))
70
+ metadata = self._metadata_to_dict(input_data.metadata)
71
+ scenario = Scenario(schema, self._get_general_properties(input_data), metadata)
69
72
  for contract in self._get_contracts(input_data):
70
73
  scenario.add_contract(contract)
71
74
 
@@ -73,6 +76,9 @@ class InputDao:
73
76
  for agent in self._get_agents(input_data, schema):
74
77
  scenario.add_agent(agent)
75
78
 
79
+ for name, string_set in self._get_string_sets(input_data).items():
80
+ scenario.add_string_set(name, string_set)
81
+
76
82
  return self._timeseries_manager, scenario
77
83
 
78
84
  def get_input_data(self) -> InputData:
@@ -95,6 +101,16 @@ class InputDao:
95
101
  """Read and return Schema from given `input_data`."""
96
102
  return Schema.from_string(input_data.schema)
97
103
 
104
+ @staticmethod
105
+ def _metadata_to_dict(metadata: Optional[str] = None) -> dict:
106
+ """Convert given metadata `metadata to dict`, proceeds on error but logs given `message`"""
107
+ if metadata:
108
+ try:
109
+ return ast.literal_eval(metadata)
110
+ except (ValueError, TypeError, SyntaxError, MemoryError, RecursionError) as e:
111
+ log().error(InputDao._ERR_SCENARIO_METADATA.format(e))
112
+ return {}
113
+
98
114
  @staticmethod
99
115
  def _get_general_properties(input_data: InputData) -> GeneralProperties:
100
116
  """Read and return GeneralProperties from given `input_data`."""
@@ -121,6 +137,20 @@ class InputDao:
121
137
  for contract in input_data.contracts
122
138
  ]
123
139
 
140
+ @staticmethod
141
+ def _get_string_sets(input_data: InputData) -> dict[str, StringSet]:
142
+ """Read and return StringSets from given `input_data`."""
143
+ string_sets = {}
144
+ for dao in input_data.string_sets:
145
+ values = {
146
+ entry.name: {StringSet.KEY_METADATA: InputDao._metadata_to_dict(entry.metadata)} for entry in dao.values
147
+ }
148
+ metadata = InputDao._metadata_to_dict(dao.metadata)
149
+ string_sets[dao.name] = StringSet.from_dict(
150
+ {StringSet.KEY_VALUES: values, StringSet.KEY_METADATA: metadata}
151
+ )
152
+ return string_sets
153
+
124
154
  def _init_timeseries(self, input_data: InputData) -> None:
125
155
  """Read timeseries from given `input_data` and initialise TimeSeriesManager."""
126
156
  self._timeseries_manager.reconstruct_time_series(list(input_data.time_series))
@@ -146,14 +176,14 @@ class InputDao:
146
176
  type_name=agent_dao.class_name,
147
177
  metadata=ast.literal_eval(agent_dao.metadata) if agent_dao.metadata else None,
148
178
  )
149
- attribute_dict = self._get_attributes(
179
+ attributes_dict = self._get_attributes_dict(
150
180
  list(agent_dao.fields), schema.agent_types[agent_dao.class_name].attributes
151
181
  )
152
- agent.init_attributes_from_dict(attribute_dict)
182
+ agent.init_attributes_from_dict(attributes_dict)
153
183
  agents.append(agent)
154
184
  return agents
155
185
 
156
- def _get_attributes(self, fields: list[NestedField], schematics: dict[str, AttributeSpecs]) -> dict[str, Any]:
186
+ def _get_attributes_dict(self, fields: list[NestedField], schematics: dict[str, AttributeSpecs]) -> dict[str, dict]:
157
187
  """Read and return all Attributes as Dictionary from given list of fields.
158
188
 
159
189
  Args:
@@ -161,14 +191,15 @@ class InputDao:
161
191
  schematics: description of the attributes associated by name
162
192
 
163
193
  Returns:
164
- all recovered attributes and their associated values
194
+ all recovered attributes and their associated values as dictionary
165
195
 
166
196
  Raises:
167
197
  InputConversionError: if attributes could not be reconstructed, logged with level "ERROR"
168
198
  """
169
- attributes: dict[str, Any] = {}
199
+ attributes: dict[str, dict[str, Any]] = {}
170
200
  for field in fields:
171
- attributes[field.field_name] = self._get_field_value(field, schematics[field.field_name])
201
+ value = self._get_field_value(field, schematics[field.field_name])
202
+ attributes[field.field_name] = value if not field.metadata else self._get_field_dict(value, field.metadata)
172
203
  return attributes
173
204
 
174
205
  def _get_field_value(self, field: NestedField, schematic: AttributeSpecs) -> Any:
@@ -192,9 +223,20 @@ class InputDao:
192
223
  raise log_error(InputConversionError(self._ERR_SERIES_MISSING.format(field.series_id))) from e
193
224
  if attribute_type is AttributeType.BLOCK:
194
225
  if schematic.is_list:
195
- return [self._get_attributes(list(entry.fields), schematic.nested_attributes) for entry in field.fields]
196
- return self._get_attributes(list(field.fields), schematic.nested_attributes)
226
+ return [
227
+ self._get_attributes_dict(list(entry.fields), schematic.nested_attributes) for entry in field.fields
228
+ ]
229
+ return self._get_attributes_dict(list(field.fields), schematic.nested_attributes)
197
230
  value = getattr(field, self._FIELD_NAME_MAP[attribute_type])
198
231
  if schematic.is_list:
199
232
  return list(value)
200
233
  return list(value)[0]
234
+
235
+ def _get_field_dict(self, field_value: Any, metadata: str) -> dict[str, Any]:
236
+ """Returns dict with metadata and `field_value` associated with either singular or plural key, if is list."""
237
+ result: dict[str, Any] = {Attribute.KEY_METADATA: self._metadata_to_dict(metadata)}
238
+ if isinstance(field_value, list):
239
+ result[Attribute.KEY_VALUES] = field_value
240
+ else:
241
+ result[Attribute.KEY_VALUE] = field_value
242
+ return result
@@ -8,8 +8,7 @@ from pathlib import Path
8
8
 
9
9
  from fameio.logs import log, log_error
10
10
  from fameio.output import OutputError
11
-
12
- METADATA_FILE_NAME = "metadata.json"
11
+ from fameio.output.files import METADATA_FILE_NAME
13
12
 
14
13
  _ERR_OPEN_FILE = "Could not open file for writing: '{}'"
15
14
  _INFO_DESTINATION = "Saving JSON to file to {}"
@@ -0,0 +1,6 @@
1
+ version = 1
2
+
3
+ [[annotations]]
4
+ path = ["*.py"]
5
+ SPDX-FileCopyrightText = "2025 German Aerospace Center <fame@dlr.de>"
6
+ SPDX-License-Identifier = "Apache-2.0"
@@ -3,13 +3,13 @@ from __future__ import annotations
3
3
 
4
4
  import sys
5
5
  from pathlib import Path
6
- from typing import Any, BinaryIO
6
+ from typing import Any, BinaryIO, Optional
7
7
 
8
8
  import pandas as pd
9
9
 
10
10
  from fameio.cli import update_default_config
11
11
  from fameio.cli.convert_results import handle_args, CLI_DEFAULTS as DEFAULT_CONFIG
12
- from fameio.cli.options import Options
12
+ from fameio.cli.options import Options, TimeOptions
13
13
  from fameio.input import InputError
14
14
  from fameio.logs import fameio_logger, log, log_error, log_critical
15
15
  from fameio.output import OutputError
@@ -18,6 +18,12 @@ from fameio.output.conversion import apply_time_option, apply_time_merging
18
18
  from fameio.output.csv_writer import CsvWriter
19
19
  from fameio.output.data_transformer import DataTransformer, INDEX
20
20
  from fameio.output.execution_dao import ExecutionDao
21
+ from fameio.output.files import (
22
+ get_output_folder_name,
23
+ create_output_folder,
24
+ RECOVERED_INPUT_PATH,
25
+ RECOVERED_SCENARIO_PATH,
26
+ )
21
27
  from fameio.output.input_dao import InputDao
22
28
  from fameio.output.metadata.compiler import MetadataCompiler
23
29
  from fameio.output.metadata.json_writer import data_to_json_file
@@ -69,7 +75,10 @@ def _extract_and_convert_data(config: dict[Options, Any], file_stream: BinaryIO,
69
75
  OutputError: if file could not be opened or converted, logged with level "ERROR"
70
76
  """
71
77
  log().info("Reading and extracting data...")
72
- output_writer = CsvWriter(config[Options.OUTPUT], file_path, config[Options.SINGLE_AGENT_EXPORT])
78
+ output_path = get_output_folder_name(config[Options.OUTPUT], file_path)
79
+ create_output_folder(output_path)
80
+
81
+ output_writer = CsvWriter(output_path, config[Options.SINGLE_AGENT_EXPORT])
73
82
  agent_type_log = AgentTypeLog(_agent_name_filter_list=config[Options.AGENT_LIST])
74
83
  data_transformer = DataTransformer.build(config[Options.RESOLVE_COMPLEX_FIELD])
75
84
  reader = Reader.get_reader(file=file_stream, read_single=config[Options.MEMORY_SAVING])
@@ -90,7 +99,7 @@ def _extract_and_convert_data(config: dict[Options, Any], file_stream: BinaryIO,
90
99
  output_writer.write_to_files(agent_name, data_frames)
91
100
 
92
101
  if config[Options.INPUT_RECOVERY]:
93
- _recover_inputs(config, input_dao, execution_dao.get_fameio_version())
102
+ _recover_inputs(output_path, input_dao, execution_dao.get_fameio_version(), config[Options.TIME])
94
103
  if config[Options.MEMORY_SAVING]:
95
104
  _memory_saving_apply_conversions(config, output_writer)
96
105
 
@@ -100,17 +109,23 @@ def _extract_and_convert_data(config: dict[Options, Any], file_stream: BinaryIO,
100
109
  else:
101
110
  log().warning(_WARN_OUTPUT_MISSING)
102
111
  elif config[Options.METADATA]:
103
- write_metadata(config, input_dao, execution_dao, agent_type_log)
112
+ compiler = MetadataCompiler(
113
+ input_data=input_dao.get_input_data(),
114
+ execution_data=execution_dao.get_metadata_dict(),
115
+ agent_columns=agent_type_log.get_agent_columns(),
116
+ )
117
+ write_metadata(output_path, config[Options.METADATA_TEMPLATE], compiler)
104
118
  log().info("Data conversion completed.")
105
119
 
106
120
 
107
- def _recover_inputs(config: dict[Options, Any], input_dao: InputDao, fameio_version: str) -> None:
121
+ def _recover_inputs(output_path: Path, input_dao: InputDao, fameio_version: str, time_mode: TimeOptions) -> None:
108
122
  """Reads scenario configuration from provided `input_dao`.
109
123
 
110
124
  Args:
111
- config: script configuration options
125
+ output_path: path to output files
112
126
  input_dao: to recover the input data from
113
127
  fameio_version: version of fameio that was used to create the input data
128
+ time_mode: mode of representing time in recovered input time series
114
129
 
115
130
  Raises:
116
131
  OutputError: if inputs could not be recovered or saved to files, logged with level "ERROR"
@@ -120,12 +135,10 @@ def _recover_inputs(config: dict[Options, Any], input_dao: InputDao, fameio_vers
120
135
  timeseries, scenario = input_dao.recover_inputs()
121
136
  except InputError as ex:
122
137
  raise log_error(OutputError(_ERR_RECOVER_INPUT.format(fameio_version))) from ex
123
- base_path = config[Options.OUTPUT] if config[Options.OUTPUT] is not None else "./"
124
- series_writer = CsvWriter(
125
- config_output=Path(base_path, "./recovered"), input_file_path=Path("./"), single_export=False
126
- )
127
- series_writer.write_all_time_series_to_disk(timeseries)
128
- data_to_yaml_file(scenario.to_dict(), Path(base_path, "./recovered/scenario.yaml"))
138
+
139
+ series_writer = CsvWriter(output_folder=Path(output_path, RECOVERED_INPUT_PATH), single_export=False)
140
+ series_writer.write_all_time_series_to_disk(timeseries, time_mode)
141
+ data_to_yaml_file(scenario.to_dict(), Path(output_path, RECOVERED_SCENARIO_PATH))
129
142
 
130
143
 
131
144
  def _memory_saving_apply_conversions(config: dict[Options, Any], output_writer: CsvWriter) -> None:
@@ -149,31 +162,20 @@ def _memory_saving_apply_conversions(config: dict[Options, Any], output_writer:
149
162
  output_writer.write_to_files(agent_name, parsed_data)
150
163
 
151
164
 
152
- def write_metadata(
153
- config: dict[Options, Any], input_dao: InputDao, execution_dao: ExecutionDao, agent_type_log: AgentTypeLog
154
- ):
165
+ def write_metadata(output_path: Path, template_file: Optional[Path], compiler: MetadataCompiler):
155
166
  """Reads metadata templates, fills in available metadata, and writes output to a JSON file.
156
167
 
157
168
  Args:
158
- config: to determined metadata template, and output path
159
- input_dao: contains input data
160
- execution_dao: contains execution metadata
161
- agent_type_log: contains log about which agent output was created
169
+ output_path: path to output folder
170
+ template_file: path to metadata template (None allowed)
171
+ compiler: to compile metadata with
162
172
 
163
173
  Raises:
164
174
  OutputError: in case templates could not be read or filled-in, or JSON writing failed, logged with level "ERROR"
165
175
  """
166
- compiler = MetadataCompiler(
167
- input_data=input_dao.get_input_data(),
168
- execution_data=execution_dao.get_metadata_dict(),
169
- agent_columns=agent_type_log.get_agent_columns(),
170
- )
171
-
172
- template_file = config[Options.METADATA_TEMPLATE]
173
176
  template = OEO_TEMPLATE if template_file is None else read_template_file(template_file)
174
- metadata = compiler.locate_and_replace(template)
175
- base_path = config[Options.OUTPUT] if config[Options.OUTPUT] is not None else Path(".")
176
- data_to_json_file(metadata, base_path)
177
+ output_metadata = compiler.locate_and_replace(template)
178
+ data_to_json_file(output_metadata, output_path)
177
179
 
178
180
 
179
181
  def run(config: dict[Options, Any] | None = None) -> None:
@@ -19,6 +19,7 @@ FILE_NAME_APPENDIX = "_reformatted"
19
19
  _ERR_FAIL = "Timeseries reformatting script failed."
20
20
  _ERR_NO_FILES = "No file found matching this pattern: '{}'"
21
21
  _ERR_FILE_CONVERSION = "Could not reformat file: '{}'"
22
+ _ERR_FILES_FAILED = "Could not reformat these files: '{}'"
22
23
 
23
24
 
24
25
  def reformat_file(file: Path, replace: bool) -> None:
@@ -58,9 +59,32 @@ def run(config: dict[Options, Any] | None = None) -> None:
58
59
  raise log_error(ScriptError(_ERR_NO_FILES.format(config[Options.FILE_PATTERN]))) from ex
59
60
  if not files:
60
61
  raise log_error(ScriptError(_ERR_NO_FILES.format(config[Options.FILE_PATTERN])))
62
+
63
+ erroneous_files = reformat_files(files, config[Options.REPLACE])
64
+ if len(erroneous_files) > 0:
65
+ log_error(ScriptError(_ERR_FILES_FAILED.format(erroneous_files)))
66
+ else:
67
+ log_and_print("All files reformatted.")
68
+
69
+
70
+ def reformat_files(files: list[Path], replace: bool) -> list[str]:
71
+ """Reformats given files and potentially replaces them.
72
+
73
+ Args:
74
+ files: list of files to be reformatted
75
+ replace: if true, original files are replaced
76
+
77
+ Returns:
78
+ list of files that could not be reformatted
79
+ """
80
+ erroneous_files: list[str] = []
61
81
  for file in files:
62
82
  log_and_print(f"Reformatting file: {file}")
63
- reformat_file(file, config[Options.REPLACE])
83
+ try:
84
+ reformat_file(file, replace)
85
+ except ScriptError:
86
+ erroneous_files.append(file.as_posix())
87
+ return erroneous_files
64
88
 
65
89
 
66
90
  if __name__ == "__main__":
@@ -8,11 +8,12 @@ import math
8
8
  import os
9
9
  from enum import Enum, auto
10
10
  from pathlib import Path
11
- from typing import Any
11
+ from typing import Any, Final
12
12
 
13
13
  import pandas as pd
14
14
  from fameprotobuf.input_file_pb2 import InputData
15
15
  from google.protobuf.internal.wire_format import INT64_MIN, INT64_MAX
16
+ from pandas.errors import EmptyDataError, ParserError
16
17
 
17
18
  from fameio.input import InputError
18
19
  from fameio.input.resolver import PathResolver
@@ -40,10 +41,11 @@ class Entry(Enum):
40
41
  class TimeSeriesManager:
41
42
  """Manages matching of timeseries data from files and values to unique ids and vice versa."""
42
43
 
44
+ KEY_ROW_TIME: Final[str] = "TimeStep"
45
+ KEY_ROW_VALUE: Final[str] = "Value"
46
+
43
47
  _TIMESERIES_RECONSTRUCTION_PATH = "./timeseries/"
44
48
  _CONSTANT_IDENTIFIER = "Constant value: {}"
45
- _KEY_ROW_TIME = "timeStep"
46
- _KEY_ROW_VALUE = "value"
47
49
 
48
50
  _ERR_FILE_NOT_FOUND = "Cannot find Timeseries file '{}'."
49
51
  _ERR_NUMERIC_STRING = " Remove quotes to use a constant numeric value instead of a timeseries file."
@@ -141,7 +143,7 @@ class TimeSeriesManager:
141
143
  """
142
144
  try:
143
145
  return pd.read_csv(file, sep=";", header=None, comment="#")
144
- except OSError as e:
146
+ except (OSError, EmptyDataError, ParserError) as e:
145
147
  raise log_error(TimeSeriesError(e)) from e
146
148
 
147
149
  @staticmethod
@@ -273,7 +275,7 @@ class TimeSeriesManager:
273
275
  """Reconstructs and stores time series from given list of `timeseries_dao`."""
274
276
  for one_series in timeseries:
275
277
  self._id_count += 1
276
- reconstructed = {Entry.ID: one_series.series_id}
278
+ reconstructed: dict[Entry, int | float | None | str | pd.DataFrame] = {Entry.ID: one_series.series_id}
277
279
  if len(one_series.values) == 1 or (
278
280
  len(one_series.values) == 2 and one_series.values[0] == one_series.values[1]
279
281
  ):
@@ -282,7 +284,8 @@ class TimeSeriesManager:
282
284
  else:
283
285
  reconstructed[Entry.NAME] = self._get_cleaned_file_name(one_series.series_name)
284
286
  reconstructed[Entry.DATA] = pd.DataFrame(
285
- {self._KEY_ROW_TIME: list(one_series.time_steps), self._KEY_ROW_VALUE: list(one_series.values)}
287
+ data={self.KEY_ROW_VALUE: list(one_series.values)},
288
+ index=pd.Index(data=list(one_series.time_steps), name=self.KEY_ROW_TIME),
286
289
  )
287
290
  self._series_by_id[one_series.series_id] = reconstructed
288
291
 
@@ -1,3 +0,0 @@
1
- SPDX-FileCopyrightText: 2024 German Aerospace Center <fame@dlr.de>
2
-
3
- SPDX-License-Identifier: Apache-2.0
@@ -1,3 +0,0 @@
1
- SPDX-FileCopyrightText: 2025 German Aerospace Center <fame@dlr.de>
2
-
3
- SPDX-License-Identifier: Apache-2.0
@@ -1,3 +0,0 @@
1
- SPDX-FileCopyrightText: 2025 German Aerospace Center <fame@dlr.de>
2
-
3
- SPDX-License-Identifier: Apache-2.0
@@ -1,3 +0,0 @@
1
- SPDX-FileCopyrightText: 2025 German Aerospace Center <fame@dlr.de>
2
-
3
- SPDX-License-Identifier: Apache-2.0
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes