fameio 3.1.0__py3-none-any.whl → 3.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. fameio/cli/__init__.py +2 -3
  2. fameio/cli/convert_results.py +6 -4
  3. fameio/cli/make_config.py +6 -4
  4. fameio/cli/options.py +3 -3
  5. fameio/cli/parser.py +43 -31
  6. fameio/input/__init__.py +1 -9
  7. fameio/input/loader/__init__.py +9 -7
  8. fameio/input/loader/controller.py +64 -14
  9. fameio/input/loader/loader.py +14 -7
  10. fameio/input/metadata.py +37 -18
  11. fameio/input/resolver.py +5 -4
  12. fameio/input/scenario/__init__.py +7 -8
  13. fameio/input/scenario/agent.py +52 -19
  14. fameio/input/scenario/attribute.py +28 -29
  15. fameio/input/scenario/contract.py +161 -52
  16. fameio/input/scenario/exception.py +45 -22
  17. fameio/input/scenario/fameiofactory.py +63 -7
  18. fameio/input/scenario/generalproperties.py +17 -6
  19. fameio/input/scenario/scenario.py +111 -28
  20. fameio/input/scenario/stringset.py +27 -8
  21. fameio/input/schema/__init__.py +5 -5
  22. fameio/input/schema/agenttype.py +29 -11
  23. fameio/input/schema/attribute.py +174 -84
  24. fameio/input/schema/java_packages.py +8 -5
  25. fameio/input/schema/schema.py +35 -9
  26. fameio/input/validator.py +58 -42
  27. fameio/input/writer.py +139 -41
  28. fameio/logs.py +23 -17
  29. fameio/output/__init__.py +5 -1
  30. fameio/output/agent_type.py +93 -27
  31. fameio/output/conversion.py +48 -30
  32. fameio/output/csv_writer.py +88 -18
  33. fameio/output/data_transformer.py +12 -21
  34. fameio/output/input_dao.py +68 -32
  35. fameio/output/output_dao.py +26 -4
  36. fameio/output/reader.py +61 -18
  37. fameio/output/yaml_writer.py +18 -9
  38. fameio/scripts/__init__.py +9 -2
  39. fameio/scripts/convert_results.py +144 -52
  40. fameio/scripts/convert_results.py.license +1 -1
  41. fameio/scripts/exception.py +7 -0
  42. fameio/scripts/make_config.py +34 -12
  43. fameio/scripts/make_config.py.license +1 -1
  44. fameio/series.py +132 -47
  45. fameio/time.py +88 -37
  46. fameio/tools.py +9 -8
  47. {fameio-3.1.0.dist-info → fameio-3.2.0.dist-info}/METADATA +19 -13
  48. fameio-3.2.0.dist-info/RECORD +56 -0
  49. {fameio-3.1.0.dist-info → fameio-3.2.0.dist-info}/WHEEL +1 -1
  50. CHANGELOG.md +0 -279
  51. fameio-3.1.0.dist-info/RECORD +0 -56
  52. {fameio-3.1.0.dist-info → fameio-3.2.0.dist-info}/LICENSE.txt +0 -0
  53. {fameio-3.1.0.dist-info → fameio-3.2.0.dist-info}/LICENSES/Apache-2.0.txt +0 -0
  54. {fameio-3.1.0.dist-info → fameio-3.2.0.dist-info}/LICENSES/CC-BY-4.0.txt +0 -0
  55. {fameio-3.1.0.dist-info → fameio-3.2.0.dist-info}/LICENSES/CC0-1.0.txt +0 -0
  56. {fameio-3.1.0.dist-info → fameio-3.2.0.dist-info}/entry_points.txt +0 -0
@@ -1,10 +1,13 @@
1
- # SPDX-FileCopyrightText: 2024 German Aerospace Center <fame@dlr.de>
1
+ # SPDX-FileCopyrightText: 2025 German Aerospace Center <fame@dlr.de>
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
- from typing import Union
4
+ from __future__ import annotations
5
5
 
6
6
  from fameprotobuf.services_pb2 import Output
7
7
 
8
+ from fameio.logs import log_error
9
+ from fameio.output import OutputError
10
+
8
11
 
9
12
  class AgentType:
10
13
  """Provides information derived from an underlying protobuf AgentType"""
@@ -28,18 +31,17 @@ class AgentType:
28
31
 
29
32
  def get_simple_column_mask(self) -> list[bool]:
30
33
  """Returns list of bool - where an entry is True if the output column with the same index is not complex"""
31
- return [True if len(field.index_names) == 0 else False for field in self._agent_type.fields]
34
+ return [len(field.index_names) == 0 for field in self._agent_type.fields]
32
35
 
33
36
  def get_complex_column_ids(self) -> set[int]:
34
37
  """Returns set of IDs for complex columns, ignoring simple columns"""
35
- return set([field.field_id for field in self._agent_type.fields if len(field.index_names) > 0])
38
+ return {field.field_id for field in self._agent_type.fields if len(field.index_names) > 0}
36
39
 
37
- def get_column_name_for_id(self, column_index: int) -> Union[str, None]:
40
+ def get_column_name_for_id(self, column_index: int) -> str | None:
38
41
  """Returns name of column by given `column_index` or None, if column is not present"""
39
42
  if 0 <= column_index < len(self._agent_type.fields):
40
43
  return self._agent_type.fields[column_index].field_name
41
- else:
42
- return None
44
+ return None
43
45
 
44
46
  def get_inner_columns(self, column_index: int) -> tuple[str, ...]:
45
47
  """Returns tuple of inner column names for complex column with given `column_index`"""
@@ -50,40 +52,104 @@ class AgentType:
50
52
  return self._agent_type.class_name
51
53
 
52
54
 
55
+ class AgentTypeError(OutputError):
56
+ """Indicates an error with the agent types definitions"""
57
+
58
+
53
59
  class AgentTypeLog:
54
60
  """Stores data about collected agent types"""
55
61
 
56
62
  _ERR_AGENT_TYPE_MISSING = "Requested AgentType `{}` not found."
57
63
  _ERR_DOUBLE_DEFINITION = "Just one definition allowed per AgentType. Found multiple for {}. File might be corrupt."
58
64
 
59
- def __init__(self, requested_agents: list[str]) -> None:
60
- self._requested_agents = [agent.upper() for agent in requested_agents] if requested_agents else None
61
- self._requested_agent_types = {}
65
+ def __init__(self, _agent_name_filter_list: list[str]) -> None:
66
+ """
67
+ Initialises new AgentTypeLog
68
+
69
+ Args:
70
+ _agent_name_filter_list: list of agent type names that are requested for output data extraction
71
+ """
72
+ self._agent_name_filter_list: list[str] | None = (
73
+ [agent.upper() for agent in _agent_name_filter_list] if _agent_name_filter_list else None
74
+ )
75
+ self._requested_agent_types: dict[str, AgentType] = {}
76
+ self._agents_with_output: list[str] = []
62
77
 
63
78
  def update_agents(self, new_types: dict[str, Output.AgentType]) -> None:
64
- """Saves new `agent_types` (if any) contained in given `output` if requested for extraction"""
65
- if new_types:
66
- if self._requested_agents:
67
- new_types = {
68
- agent_name: agent_type
69
- for agent_name, agent_type in new_types.items()
70
- if agent_name.upper() in self._requested_agents
71
- }
72
- for agent_name in self._requested_agent_types.keys():
73
- if agent_name in new_types:
74
- raise Exception(self._ERR_DOUBLE_DEFINITION.format(agent_name))
75
- self._requested_agent_types.update(new_types)
79
+ """
80
+ If any new `agent_types` are provided, checks if they are requested for extraction, and, if so, saves them
81
+
82
+ Args:
83
+ new_types: to be saved (if requested for extraction)
84
+
85
+ Raises:
86
+ AgentTypeError: if agent type was already registered, logged with level "ERROR"
87
+ """
88
+ if not new_types:
89
+ return
90
+
91
+ self._agents_with_output.extend(list(new_types.keys()))
92
+ filtered_types = self._filter_agents_by_name(new_types)
93
+ self._ensure_no_duplication(filtered_types)
94
+ self._requested_agent_types.update(filtered_types)
95
+
96
+ def _filter_agents_by_name(self, new_types: dict[str, Output.AgentType]) -> dict[str, Output.AgentType]:
97
+ """
98
+ Removes and entries from `new_types` not on `agent_name_filter_list`
99
+
100
+ Args:
101
+ new_types: to be filtered
102
+
103
+ Returns:
104
+ filtered list, or original list if no filter is active
105
+ """
106
+ if self._agent_name_filter_list:
107
+ return {
108
+ agent_name: agent_type
109
+ for agent_name, agent_type in new_types.items()
110
+ if agent_name.upper() in self._agent_name_filter_list
111
+ }
112
+ return new_types
113
+
114
+ def _ensure_no_duplication(self, filtered_types: dict[str, Output.AgentType]) -> None:
115
+ """
116
+ Ensures no duplicate agent type definitions occur
117
+
118
+ Args:
119
+ filtered_types: to be checked for duplications with already registered types
120
+
121
+ Raises:
122
+ AgentTypeError: if duplicate agent type is found, logged with level "ERROR"
123
+ """
124
+ for agent_name in self._requested_agent_types:
125
+ if agent_name in filtered_types:
126
+ raise log_error(AgentTypeError(self._ERR_DOUBLE_DEFINITION.format(agent_name)))
76
127
 
77
128
  def has_any_agent_type(self) -> bool:
78
129
  """Returns True if any agent type was registered so far present"""
79
130
  return len(self._requested_agent_types) > 0
80
131
 
81
- def get_agent_type(self, agent_name: str) -> AgentType:
82
- """Returns `AgentType` of given agent `name`"""
83
- if agent_name not in self._requested_agent_types:
84
- raise Exception(self._ERR_AGENT_TYPE_MISSING.format(agent_name))
85
- return AgentType(self._requested_agent_types[agent_name])
132
+ def get_agent_type(self, agent_type_name: str) -> AgentType:
133
+ """
134
+ Returns the requested type of agent
135
+
136
+ Args:
137
+ agent_type_name: requested name of agent type
138
+
139
+ Returns:
140
+ stored agent type
141
+
142
+ Raises:
143
+ AgentTypeError: if no agent type could be found with that name, logged with level "ERROR"
144
+ """
145
+ if agent_type_name not in self._requested_agent_types:
146
+ raise log_error(AgentTypeError(self._ERR_AGENT_TYPE_MISSING.format(agent_type_name)))
147
+ return AgentType(self._requested_agent_types[agent_type_name])
86
148
 
87
149
  def is_requested(self, agent_name: str) -> bool:
88
150
  """Returns True if given agent_name is known and requested"""
89
151
  return agent_name in self._requested_agent_types
152
+
153
+ def get_agents_with_output(self) -> list[str]:
154
+ """Returns all names of agents that had output"""
155
+ return self._agents_with_output
@@ -1,53 +1,62 @@
1
- # SPDX-FileCopyrightText: 2024 German Aerospace Center <fame@dlr.de>
1
+ # SPDX-FileCopyrightText: 2025 German Aerospace Center <fame@dlr.de>
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
+ from __future__ import annotations
4
5
 
5
6
  import math
6
- from typing import Optional
7
7
 
8
8
  import pandas as pd
9
9
 
10
10
  from fameio.cli.options import TimeOptions
11
- from fameio.logs import log_error_and_raise, log
12
- from fameio.time import ConversionError, FameTime
11
+ from fameio.logs import log_error, log
12
+ from fameio.output import OutputError
13
+ from fameio.time import FameTime, ConversionError as TimeConversionError
13
14
 
14
15
  _ERR_UNIMPLEMENTED = "Time conversion mode '{}' not implemented."
16
+ _ERR_TIME_CONVERSION = "Conversion of timestamps failed."
15
17
  _ERR_NEGATIVE = "StepsBefore and StepsAfter must be Zero or positive integers"
16
18
 
17
19
 
18
- def _apply_time_merging(
19
- dataframes: dict[Optional[str], pd.DataFrame], offset: int, period: int, first_positive_focal_point: int
20
- ) -> None:
21
- """Applies time merging to `data` based on given `offset`, `period`, and `first_positive_focal_point`"""
22
- log().debug(f"Grouping TimeSteps...")
23
- for key in dataframes.keys():
24
- df = dataframes[key]
25
- index_columns = df.index.names
26
- df.reset_index(inplace=True)
27
- df["TimeStep"] = df["TimeStep"].apply(lambda t: merge_time(t, first_positive_focal_point, offset, period))
28
- dataframes[key] = df.groupby(by=index_columns).sum()
20
+ class ConversionError(OutputError):
21
+ """An error that occurred during conversion of output data"""
29
22
 
30
23
 
31
- def apply_time_merging(data: dict[Optional[str], pd.DataFrame], config: Optional[list[int]]) -> None:
24
+ def apply_time_merging(data: dict[str | None, pd.DataFrame], config: list[int] | None) -> None:
32
25
  """
33
26
  Applies merging of TimeSteps inplace for given `data`
34
27
 
35
28
  Args:
36
29
  data: one or multiple DataFrames of time series; depending on the given config, contents might be modified
37
30
  config: three integer values defining how to merge data within a range of time steps
31
+
32
+ Raises:
33
+ ConversionError: if parameters are not valid, logged with level "ERROR"
38
34
  """
39
35
  if not config or all(v == 0 for v in config):
40
36
  return
41
37
  focal_point, steps_before, steps_after = config
42
38
  if steps_before < 0 or steps_after < 0:
43
- raise ValueError(_ERR_NEGATIVE)
39
+ raise log_error(ConversionError(_ERR_NEGATIVE))
44
40
 
45
41
  period = steps_before + steps_after + 1
46
42
  first_positive_focal_point = focal_point % period
47
43
  _apply_time_merging(data, offset=steps_before, period=period, first_positive_focal_point=first_positive_focal_point)
48
44
 
49
45
 
50
- def merge_time(time_step: int, focal_time: int, offset: int, period: int) -> int:
46
+ def _apply_time_merging(
47
+ dataframes: dict[str | None, pd.DataFrame], offset: int, period: int, first_positive_focal_point: int
48
+ ) -> None:
49
+ """Applies time merging to `data` based on given `offset`, `period`, and `first_positive_focal_point`"""
50
+ log().debug("Grouping TimeSteps...")
51
+ for key in dataframes.keys():
52
+ df = dataframes[key]
53
+ index_columns = df.index.names
54
+ df.reset_index(inplace=True)
55
+ df["TimeStep"] = df["TimeStep"].apply(lambda t: _merge_time(t, first_positive_focal_point, offset, period))
56
+ dataframes[key] = df.groupby(by=index_columns).sum()
57
+
58
+
59
+ def _merge_time(time_step: int, focal_time: int, offset: int, period: int) -> int:
51
60
  """
52
61
  Returns `time_step` rounded to its corresponding focal point
53
62
 
@@ -63,25 +72,31 @@ def merge_time(time_step: int, focal_time: int, offset: int, period: int) -> int
63
72
  return math.floor((time_step + offset - focal_time) / period) * period + focal_time
64
73
 
65
74
 
66
- def apply_time_option(data: dict[Optional[str], pd.DataFrame], mode: TimeOptions) -> None:
75
+ def apply_time_option(data: dict[str | None, pd.DataFrame], mode: TimeOptions) -> None:
67
76
  """
68
77
  Applies time option based on given `mode` inplace of given `data`
69
78
 
70
79
  Args:
71
80
  data: one or multiple DataFrames of time series; column `TimeStep` might be modified (depending on mode)
72
81
  mode: name of time conversion mode (derived from Enum)
73
- """
74
- if mode == TimeOptions.INT:
75
- log().debug("No time conversion...")
76
- elif mode == TimeOptions.UTC:
77
- _convert_time_index(data, "%Y-%m-%d %H:%M:%S")
78
- elif mode == TimeOptions.FAME:
79
- _convert_time_index(data, "%Y-%m-%d_%H:%M:%S")
80
- else:
81
- log_error_and_raise(ConversionError(_ERR_UNIMPLEMENTED.format(mode)))
82
-
83
82
 
84
- def _convert_time_index(data: dict[Optional[str], pd.DataFrame], datetime_format: str) -> None:
83
+ Raises:
84
+ ConversionError: if provided mode is not implemented , logged with level "ERROR"
85
+ """
86
+ try:
87
+ if mode == TimeOptions.INT:
88
+ log().debug("No time conversion...")
89
+ elif mode == TimeOptions.UTC:
90
+ _convert_time_index(data, "%Y-%m-%d %H:%M:%S")
91
+ elif mode == TimeOptions.FAME:
92
+ _convert_time_index(data, "%Y-%m-%d_%H:%M:%S")
93
+ else:
94
+ raise log_error(ConversionError(_ERR_UNIMPLEMENTED.format(mode)))
95
+ except TimeConversionError as e:
96
+ raise log_error(ConversionError(_ERR_TIME_CONVERSION.format())) from e
97
+
98
+
99
+ def _convert_time_index(data: dict[str | None, pd.DataFrame], datetime_format: str) -> None:
85
100
  """
86
101
  Inplace replacement of `TimeStep` column in MultiIndex of each item of `data` from FAME's time steps` to DateTime
87
102
  in given `date_format`
@@ -89,6 +104,9 @@ def _convert_time_index(data: dict[Optional[str], pd.DataFrame], datetime_format
89
104
  Args:
90
105
  data: one or multiple DataFrames of time series; column `TimeStep` will be modified
91
106
  datetime_format: used for the conversion
107
+
108
+ Raises:
109
+ TimeConversionError: if time cannot be converted, logged with level "ERROR"
92
110
  """
93
111
  log().debug(f"Converting TimeStep to format '{datetime_format}'...")
94
112
  for _, df in data.items():
@@ -1,36 +1,50 @@
1
- # SPDX-FileCopyrightText: 2024 German Aerospace Center <fame@dlr.de>
1
+ # SPDX-FileCopyrightText: 2025 German Aerospace Center <fame@dlr.de>
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
+ from __future__ import annotations
5
+
4
6
  from pathlib import Path
5
- from typing import Union
6
7
 
7
8
  import pandas as pd
8
9
 
9
- from fameio.logs import log
10
+ from fameio.logs import log, log_error
11
+ from fameio.output import OutputError
10
12
  from fameio.output.data_transformer import INDEX
11
13
  from fameio.series import TimeSeriesManager
12
14
  from fameio.tools import ensure_path_exists
13
15
 
14
16
 
17
+ class CsvWriterError(OutputError):
18
+ """An error occurred during writing a CSV file"""
19
+
20
+
15
21
  class CsvWriter:
16
22
  """Writes dataframes to different csv files"""
17
23
 
24
+ _ERR_DIR_CREATE = "Could not create directory for output files: '{}'"
25
+ _ERR_FILE_OPEN = "Could not open file for writing: '{}'"
26
+ _ERR_FILE_WRITE = "Could not write to file '{}' due to: {}"
27
+
18
28
  _INFO_USING_PATH = "Using specified output path: {}"
19
29
  _INFO_USING_DERIVED_PATH = "No output path specified - writing to new local folder: {}"
20
30
 
21
31
  CSV_FILE_SUFFIX = ".csv"
22
32
 
23
33
  def __init__(self, config_output: Path, input_file_path: Path, single_export: bool) -> None:
34
+ """
35
+ Raises:
36
+ CsvWriterError: if output folder could not be created, logged with level "ERROR"
37
+ """
24
38
  self._single_export = single_export
25
39
  self._output_folder = self._get_output_folder_name(config_output, input_file_path)
26
- self._files = {}
40
+ self._files: dict[str, Path] = {}
27
41
  self._create_output_folder()
28
42
 
29
43
  @staticmethod
30
44
  def _get_output_folder_name(config_output: Path, input_file_path: Path) -> Path:
31
45
  """Returns name of the output folder derived either from the specified `config_output` or `input_file_path`"""
32
46
  if config_output:
33
- output_folder_name = config_output
47
+ output_folder_name: str | Path = config_output
34
48
  log().info(CsvWriter._INFO_USING_PATH.format(config_output))
35
49
  else:
36
50
  output_folder_name = input_file_path.stem
@@ -38,13 +52,30 @@ class CsvWriter:
38
52
  return Path(output_folder_name)
39
53
 
40
54
  def _create_output_folder(self) -> None:
41
- """Creates output folder if not yet present"""
55
+ """
56
+ Creates output folder if not yet present
57
+
58
+ Raises:
59
+ CsvWriterError: if output folder could not be created, logged with level "ERROR"
60
+ """
42
61
  log().debug("Creating output folder if required...")
43
62
  if not self._output_folder.is_dir():
44
- self._output_folder.mkdir(parents=True)
63
+ try:
64
+ self._output_folder.mkdir(parents=True)
65
+ except OSError as e:
66
+ raise log_error(CsvWriterError(self._ERR_DIR_CREATE.format(self._output_folder))) from e
67
+
68
+ def write_to_files(self, agent_name: str, data: dict[None | str, pd.DataFrame]) -> None:
69
+ """
70
+ Writes `data` for given `agent_name` to .csv file(s)
71
+
72
+ Args:
73
+ agent_name: name of agent whose data are to be written to file(s)
74
+ data: previously extracted data for that agent that are to be written
45
75
 
46
- def write_to_files(self, agent_name: str, data: dict[Union[None, str], pd.DataFrame]) -> None:
47
- """Writes `data` for given `agent_name` to .csv file(s)"""
76
+ Raises:
77
+ CsvWriterError: when file could not be written, logged on level "ERROR"
78
+ """
48
79
  for column_name, column_data in data.items():
49
80
  column_data.sort_index(inplace=True)
50
81
  if self._single_export:
@@ -56,16 +87,45 @@ class CsvWriter:
56
87
  self._write_data_frame(column_data, identifier)
57
88
 
58
89
  def write_time_series_to_disk(self, timeseries_manager: TimeSeriesManager) -> None:
59
- """Writes time_series of given `timeseries_manager` to disk"""
90
+ """
91
+ Writes time_series of given `timeseries_manager` to disk
92
+
93
+ Args:
94
+ timeseries_manager:
95
+
96
+ Raises:
97
+ CsvWriterError: if data could not be written to disk, logged on level "ERROR"
98
+ """
60
99
  for _, name, data in timeseries_manager.get_all_series():
61
100
  if data is not None:
62
101
  target_path = Path(self._output_folder, name)
63
102
  ensure_path_exists(target_path.parent)
64
- # noinspection PyTypeChecker
65
- data.to_csv(path_or_buf=target_path, sep=";", header=None, index=None)
103
+ self._dataframe_to_csv(data, target_path, header=False, index=False, mode="w")
66
104
 
67
105
  @staticmethod
68
- def _get_identifier(agent_name: str, column_name: str, agent_id: str = None) -> str:
106
+ def _dataframe_to_csv(data: pd.DataFrame, file: Path, header: bool, index: bool, mode: str) -> None:
107
+ """
108
+ Write given data to specified CSV file with specified parameters using semicolon separators
109
+
110
+ Args:
111
+ data: to be written
112
+ file: target path of csv file
113
+ header: write column headers
114
+ index: write index column(s)
115
+ mode: append to or overwrite file
116
+
117
+ Raises:
118
+ CsvWriterError: if data could not be written to disk, logged on level "ERROR"
119
+ """
120
+ try:
121
+ data.to_csv(path_or_buf=file, sep=";", header=header, index=index, mode=mode)
122
+ except OSError as e:
123
+ raise log_error(CsvWriterError(CsvWriter._ERR_FILE_OPEN.format(file))) from e
124
+ except UnicodeError as e:
125
+ raise log_error(CsvWriterError(CsvWriter._ERR_FILE_WRITE.format(file, str(e)))) from e
126
+
127
+ @staticmethod
128
+ def _get_identifier(agent_name: str, column_name: str | None = None, agent_id: str | None = None) -> str:
69
129
  """Returns unique identifier for given `agent_name` and (optional) `agent_id` and `column_name`"""
70
130
  identifier = str(agent_name)
71
131
  if column_name:
@@ -78,14 +138,24 @@ class CsvWriter:
78
138
  """
79
139
  Appends `data` to existing csv file derived from `identifier` without headers,
80
140
  or writes new file with headers instead
141
+
142
+ Args:
143
+ data: to be written to file
144
+ identifier: to derive the file name from
145
+
146
+ Raises:
147
+ CsvWriterError: when file could not be written, logged on level "ERROR"
81
148
  """
82
149
  if self._has_file(identifier):
83
150
  outfile_name = self._get_outfile_name(identifier)
84
- data.to_csv(outfile_name, sep=";", index=True, header=False, mode="a")
151
+ mode = "a"
152
+ header = False
85
153
  else:
86
154
  outfile_name = self._create_outfile_name(identifier)
87
155
  self._save_outfile_name(outfile_name, identifier)
88
- data.to_csv(outfile_name, sep=";", index=True, header=True)
156
+ mode = "w"
157
+ header = True
158
+ self._dataframe_to_csv(data, outfile_name, header=header, index=True, mode=mode)
89
159
 
90
160
  def _has_file(self, identifier: str) -> bool:
91
161
  """Returns True if a file for given `identifier` was already written"""
@@ -97,12 +167,12 @@ class CsvWriter:
97
167
  self._files = {}
98
168
  return current_files
99
169
 
100
- def _get_outfile_name(self, identifier: str) -> str:
101
- """Returns file name for given `agent_name` and (optional) `agent_id`"""
170
+ def _get_outfile_name(self, identifier: str) -> Path:
171
+ """Returns file path for given `agent_name` and (optional) `agent_id`"""
102
172
  return self._files[identifier]
103
173
 
104
174
  def _create_outfile_name(self, identifier: str) -> Path:
105
- """Returns fully qualified file name based on given `agent_name` and (optional) `agent_id`"""
175
+ """Returns fully qualified file path based on given `agent_name` and (optional) `agent_id`"""
106
176
  return Path(self._output_folder, f"{identifier}{self.CSV_FILE_SUFFIX}")
107
177
 
108
178
  def _save_outfile_name(self, outfile_name: Path, identifier: str) -> None:
@@ -1,11 +1,9 @@
1
- # SPDX-FileCopyrightText: 2023 German Aerospace Center <fame@dlr.de>
1
+ # SPDX-FileCopyrightText: 2025 German Aerospace Center <fame@dlr.de>
2
2
  #
3
3
  # SPDX-License-Identifier: Apache-2.0
4
4
  from __future__ import annotations
5
5
 
6
6
  from abc import ABC
7
- from builtins import staticmethod
8
- from typing import Union, Optional
9
7
 
10
8
  import pandas as pd
11
9
  from fameprotobuf.services_pb2 import Output
@@ -21,8 +19,8 @@ class DataTransformer(ABC):
21
19
  """Extracts and provides series data from parsed and processed output files for requested agents"""
22
20
 
23
21
  MODES = {
24
- ResolveOptions.IGNORE: lambda: DataTransformerIgnore(),
25
- ResolveOptions.SPLIT: lambda: DataTransformerSplit(),
22
+ ResolveOptions.IGNORE: lambda: DataTransformerIgnore(), # pylint: disable=unnecessary-lambda
23
+ ResolveOptions.SPLIT: lambda: DataTransformerSplit(), # pylint: disable=unnecessary-lambda
26
24
  }
27
25
  SIMPLE_COLUMN_INDEX = -1
28
26
 
@@ -30,13 +28,11 @@ class DataTransformer(ABC):
30
28
  def build(complex_column_mode: ResolveOptions) -> DataTransformer:
31
29
  return DataTransformer.MODES[complex_column_mode]()
32
30
 
33
- def extract_agent_data(
34
- self, series: list[Output.Series], agent_type: AgentType
35
- ) -> dict[Optional[str], pd.DataFrame]:
31
+ def extract_agent_data(self, series: list[Output.Series], agent_type: AgentType) -> dict[str | None, pd.DataFrame]:
36
32
  """
37
33
  Returns dict of DataFrame(s) containing all data from given `series` of given `agent_type`.
38
34
  When ResolveOption is `SPLIT`, the dict maps each complex column's name to the associated DataFrame.
39
- In any case, the dict maps `None` to a DataFrame with the content of all simple column / merged columns.
35
+ In any case, the dict maps `None` to a DataFrame with the content of all simple columns.
40
36
  """
41
37
  container = self._extract_agent_data(series, agent_type)
42
38
  data_frames = {}
@@ -45,7 +41,7 @@ class DataTransformer(ABC):
45
41
  column_name = agent_type.get_column_name_for_id(column_id)
46
42
  if column_id == DataTransformer.SIMPLE_COLUMN_INDEX:
47
43
  data_frame.rename(columns=self._get_column_map(agent_type), inplace=True)
48
- index = INDEX
44
+ index: tuple[str, ...] = INDEX
49
45
  data_frame = data_frame.loc[:, agent_type.get_simple_column_mask()]
50
46
  else:
51
47
  data_frame.rename(columns={0: column_name}, inplace=True)
@@ -59,7 +55,7 @@ class DataTransformer(ABC):
59
55
 
60
56
  def _extract_agent_data(
61
57
  self, series: list[Output.Series], agent_type: AgentType
62
- ) -> dict[int, dict[tuple, list[Union[float, None, str]]]]:
58
+ ) -> dict[int, dict[tuple, list[float | None | str]]]:
63
59
  """Returns mapping of (agentId, timeStep) to fixed-length list of all output columns for given `class_name`"""
64
60
  container = DataTransformer._create_container(agent_type)
65
61
  mask_simple = agent_type.get_simple_column_mask()
@@ -78,7 +74,7 @@ class DataTransformer(ABC):
78
74
  self,
79
75
  series: Output.Series,
80
76
  mask_simple: list[bool],
81
- container: dict[int, dict[tuple, list[Union[float, None, str]]]],
77
+ container: dict[int, dict[tuple, list[float | None | str]]],
82
78
  ) -> None:
83
79
  """Adds data from given `series` to specified `container` dict as list"""
84
80
  empty_list: list = [None] * len(mask_simple)
@@ -89,19 +85,12 @@ class DataTransformer(ABC):
89
85
  if mask_simple[column.field_id]:
90
86
  simple_values[column.field_id] = column.value
91
87
  else:
92
- self._merge_complex_column(column, simple_values)
93
88
  self._store_complex_values(column, container, index)
94
89
  container[DataTransformer.SIMPLE_COLUMN_INDEX][index] = simple_values
95
90
 
96
91
  @staticmethod
97
- def _merge_complex_column(column: Output.Series.Line.Column, values: list) -> None:
98
- """Does not merge complex column data"""
99
- pass
100
-
101
- @staticmethod
102
- def _store_complex_values(column: Output.Series.Line.Column, container: dict[int, dict], index: tuple) -> None:
103
- """Does not store complex column data"""
104
- pass
92
+ def _store_complex_values(column: Output.Series.Line.Column, container: dict[int, dict], base_index: tuple) -> None:
93
+ """Stores complex column data"""
105
94
 
106
95
  @staticmethod
107
96
  def _get_column_map(agent_type: AgentType) -> dict[int, str]:
@@ -114,6 +103,8 @@ class DataTransformerIgnore(DataTransformer):
114
103
 
115
104
 
116
105
  class DataTransformerSplit(DataTransformer):
106
+ """Stores complex data columns split by column type"""
107
+
117
108
  @staticmethod
118
109
  def _store_complex_values(column: Output.Series.Line.Column, container: dict[int, dict], base_index: tuple) -> None:
119
110
  """Adds inner data from `column` to given `container` - split by column type"""