fameio 2.3.1__py3-none-any.whl → 3.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +24 -0
- fameio/__init__.py +4 -1
- fameio/{source/cli → cli}/__init__.py +2 -0
- fameio/{source/cli → cli}/convert_results.py +8 -8
- fameio/{source/cli → cli}/make_config.py +5 -5
- fameio/{source/cli → cli}/options.py +0 -8
- fameio/{source/cli → cli}/parser.py +26 -83
- fameio/input/__init__.py +27 -0
- fameio/input/loader/__init__.py +68 -0
- fameio/input/loader/controller.py +129 -0
- fameio/input/loader/loader.py +109 -0
- fameio/input/metadata.py +149 -0
- fameio/input/resolver.py +44 -0
- fameio/{source → input}/scenario/__init__.py +1 -2
- fameio/{source → input}/scenario/agent.py +24 -38
- fameio/input/scenario/attribute.py +203 -0
- fameio/{source → input}/scenario/contract.py +50 -61
- fameio/{source → input}/scenario/exception.py +8 -13
- fameio/{source → input}/scenario/fameiofactory.py +6 -6
- fameio/{source → input}/scenario/generalproperties.py +22 -47
- fameio/{source → input}/scenario/scenario.py +34 -31
- fameio/input/scenario/stringset.py +48 -0
- fameio/{source → input}/schema/__init__.py +2 -2
- fameio/input/schema/agenttype.py +125 -0
- fameio/input/schema/attribute.py +268 -0
- fameio/{source → input}/schema/java_packages.py +26 -22
- fameio/{source → input}/schema/schema.py +25 -22
- fameio/{source → input}/validator.py +32 -35
- fameio/{source → input}/writer.py +86 -86
- fameio/{source/logs.py → logs.py} +25 -9
- fameio/{source/results → output}/agent_type.py +21 -22
- fameio/{source/results → output}/conversion.py +34 -31
- fameio/{source/results → output}/csv_writer.py +7 -7
- fameio/{source/results → output}/data_transformer.py +24 -24
- fameio/{source/results → output}/input_dao.py +51 -49
- fameio/{source/results → output}/output_dao.py +16 -17
- fameio/{source/results → output}/reader.py +30 -31
- fameio/{source/results → output}/yaml_writer.py +2 -3
- fameio/scripts/__init__.py +2 -2
- fameio/scripts/convert_results.py +16 -15
- fameio/scripts/make_config.py +9 -9
- fameio/{source/series.py → series.py} +28 -26
- fameio/{source/time.py → time.py} +8 -8
- fameio/{source/tools.py → tools.py} +2 -2
- {fameio-2.3.1.dist-info → fameio-3.0.0.dist-info}/METADATA +277 -72
- fameio-3.0.0.dist-info/RECORD +56 -0
- fameio/source/__init__.py +0 -8
- fameio/source/loader.py +0 -181
- fameio/source/metadata.py +0 -32
- fameio/source/path_resolver.py +0 -34
- fameio/source/scenario/attribute.py +0 -130
- fameio/source/scenario/stringset.py +0 -51
- fameio/source/schema/agenttype.py +0 -132
- fameio/source/schema/attribute.py +0 -203
- fameio/source/schema/exception.py +0 -9
- fameio-2.3.1.dist-info/RECORD +0 -55
- /fameio/{source/results → output}/__init__.py +0 -0
- {fameio-2.3.1.dist-info → fameio-3.0.0.dist-info}/LICENSE.txt +0 -0
- {fameio-2.3.1.dist-info → fameio-3.0.0.dist-info}/LICENSES/Apache-2.0.txt +0 -0
- {fameio-2.3.1.dist-info → fameio-3.0.0.dist-info}/LICENSES/CC-BY-4.0.txt +0 -0
- {fameio-2.3.1.dist-info → fameio-3.0.0.dist-info}/LICENSES/CC0-1.0.txt +0 -0
- {fameio-2.3.1.dist-info → fameio-3.0.0.dist-info}/WHEEL +0 -0
- {fameio-2.3.1.dist-info → fameio-3.0.0.dist-info}/entry_points.txt +0 -0
@@ -1,41 +1,50 @@
|
|
1
|
-
# SPDX-FileCopyrightText:
|
1
|
+
# SPDX-FileCopyrightText: 2024 German Aerospace Center <fame@dlr.de>
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
|
5
5
|
import math
|
6
|
-
from typing import
|
6
|
+
from typing import Optional
|
7
7
|
|
8
8
|
import pandas as pd
|
9
9
|
|
10
|
-
from fameio.
|
11
|
-
from fameio.
|
12
|
-
from fameio.
|
10
|
+
from fameio.cli.options import TimeOptions
|
11
|
+
from fameio.logs import log_error_and_raise, log
|
12
|
+
from fameio.time import ConversionError, FameTime
|
13
13
|
|
14
14
|
_ERR_UNIMPLEMENTED = "Time conversion mode '{}' not implemented."
|
15
|
+
_ERR_NEGATIVE = "StepsBefore and StepsAfter must be Zero or positive integers"
|
15
16
|
|
16
17
|
|
17
|
-
def
|
18
|
+
def _apply_time_merging(
|
19
|
+
dataframes: dict[Optional[str], pd.DataFrame], offset: int, period: int, first_positive_focal_point: int
|
20
|
+
) -> None:
|
21
|
+
"""Applies time merging to `data` based on given `offset`, `period`, and `first_positive_focal_point`"""
|
22
|
+
log().debug(f"Grouping TimeSteps...")
|
23
|
+
for key in dataframes.keys():
|
24
|
+
df = dataframes[key]
|
25
|
+
index_columns = df.index.names
|
26
|
+
df.reset_index(inplace=True)
|
27
|
+
df["TimeStep"] = df["TimeStep"].apply(lambda t: merge_time(t, first_positive_focal_point, offset, period))
|
28
|
+
dataframes[key] = df.groupby(by=index_columns).sum()
|
29
|
+
|
30
|
+
|
31
|
+
def apply_time_merging(data: dict[Optional[str], pd.DataFrame], config: Optional[list[int]]) -> None:
|
18
32
|
"""
|
19
33
|
Applies merging of TimeSteps inplace for given `data`
|
20
34
|
|
21
35
|
Args:
|
22
|
-
data: one or multiple DataFrames of time series;
|
23
|
-
config:
|
24
|
-
|
25
|
-
Returns:
|
26
|
-
Nothing - data is modified inplace
|
36
|
+
data: one or multiple DataFrames of time series; depending on the given config, contents might be modified
|
37
|
+
config: three integer values defining how to merge data within a range of time steps
|
27
38
|
"""
|
28
|
-
if config:
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
df["TimeStep"] = df["TimeStep"].apply(lambda t: merge_time(t, first_positive_focal_point, offset, period))
|
38
|
-
data[key] = df.groupby(by=index_columns).sum()
|
39
|
+
if not config or all(v == 0 for v in config):
|
40
|
+
return
|
41
|
+
focal_point, steps_before, steps_after = config
|
42
|
+
if steps_before < 0 or steps_after < 0:
|
43
|
+
raise ValueError(_ERR_NEGATIVE)
|
44
|
+
|
45
|
+
period = steps_before + steps_after + 1
|
46
|
+
first_positive_focal_point = focal_point % period
|
47
|
+
_apply_time_merging(data, offset=steps_before, period=period, first_positive_focal_point=first_positive_focal_point)
|
39
48
|
|
40
49
|
|
41
50
|
def merge_time(time_step: int, focal_time: int, offset: int, period: int) -> int:
|
@@ -54,16 +63,13 @@ def merge_time(time_step: int, focal_time: int, offset: int, period: int) -> int
|
|
54
63
|
return math.floor((time_step + offset - focal_time) / period) * period + focal_time
|
55
64
|
|
56
65
|
|
57
|
-
def apply_time_option(data:
|
66
|
+
def apply_time_option(data: dict[Optional[str], pd.DataFrame], mode: TimeOptions) -> None:
|
58
67
|
"""
|
59
68
|
Applies time option based on given `mode` inplace of given `data`
|
60
69
|
|
61
70
|
Args:
|
62
71
|
data: one or multiple DataFrames of time series; column `TimeStep` might be modified (depending on mode)
|
63
72
|
mode: name of time conversion mode (derived from Enum)
|
64
|
-
|
65
|
-
Returns:
|
66
|
-
Nothing - data is modified inplace
|
67
73
|
"""
|
68
74
|
if mode == TimeOptions.INT:
|
69
75
|
log().debug("No time conversion...")
|
@@ -72,10 +78,10 @@ def apply_time_option(data: Dict[Optional[str], pd.DataFrame], mode: TimeOptions
|
|
72
78
|
elif mode == TimeOptions.FAME:
|
73
79
|
_convert_time_index(data, "%Y-%m-%d_%H:%M:%S")
|
74
80
|
else:
|
75
|
-
log_error_and_raise(
|
81
|
+
log_error_and_raise(ConversionError(_ERR_UNIMPLEMENTED.format(mode)))
|
76
82
|
|
77
83
|
|
78
|
-
def _convert_time_index(data:
|
84
|
+
def _convert_time_index(data: dict[Optional[str], pd.DataFrame], datetime_format: str) -> None:
|
79
85
|
"""
|
80
86
|
Inplace replacement of `TimeStep` column in MultiIndex of each item of `data` from FAME's time steps` to DateTime
|
81
87
|
in given `date_format`
|
@@ -83,9 +89,6 @@ def _convert_time_index(data: Dict[Optional[str], pd.DataFrame], datetime_format
|
|
83
89
|
Args:
|
84
90
|
data: one or multiple DataFrames of time series; column `TimeStep` will be modified
|
85
91
|
datetime_format: used for the conversion
|
86
|
-
|
87
|
-
Returns:
|
88
|
-
Nothing - data is modified inplace
|
89
92
|
"""
|
90
93
|
log().debug(f"Converting TimeStep to format '{datetime_format}'...")
|
91
94
|
for _, df in data.items():
|
@@ -2,14 +2,14 @@
|
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
from pathlib import Path
|
5
|
-
from typing import
|
5
|
+
from typing import Union
|
6
6
|
|
7
7
|
import pandas as pd
|
8
8
|
|
9
|
-
from fameio.
|
10
|
-
from fameio.
|
11
|
-
from fameio.
|
12
|
-
from fameio.
|
9
|
+
from fameio.logs import log
|
10
|
+
from fameio.output.data_transformer import INDEX
|
11
|
+
from fameio.series import TimeSeriesManager
|
12
|
+
from fameio.tools import ensure_path_exists
|
13
13
|
|
14
14
|
|
15
15
|
class CsvWriter:
|
@@ -43,7 +43,7 @@ class CsvWriter:
|
|
43
43
|
if not self._output_folder.is_dir():
|
44
44
|
self._output_folder.mkdir(parents=True)
|
45
45
|
|
46
|
-
def write_to_files(self, agent_name: str, data:
|
46
|
+
def write_to_files(self, agent_name: str, data: dict[Union[None, str], pd.DataFrame]) -> None:
|
47
47
|
"""Writes `data` for given `agent_name` to .csv file(s)"""
|
48
48
|
for column_name, column_data in data.items():
|
49
49
|
column_data.sort_index(inplace=True)
|
@@ -91,7 +91,7 @@ class CsvWriter:
|
|
91
91
|
"""Returns True if a file for given `identifier` was already written"""
|
92
92
|
return identifier in self._files
|
93
93
|
|
94
|
-
def pop_all_file_paths(self) ->
|
94
|
+
def pop_all_file_paths(self) -> dict[str, Path]:
|
95
95
|
"""Clears all stored file paths and returns their previous identifiers and their paths"""
|
96
96
|
current_files = self._files
|
97
97
|
self._files = {}
|
@@ -5,14 +5,14 @@ from __future__ import annotations
|
|
5
5
|
|
6
6
|
from abc import ABC
|
7
7
|
from builtins import staticmethod
|
8
|
-
from typing import
|
8
|
+
from typing import Union, Optional
|
9
9
|
|
10
10
|
import pandas as pd
|
11
|
-
from fameprotobuf.
|
11
|
+
from fameprotobuf.services_pb2 import Output
|
12
12
|
from pandas import DataFrame
|
13
13
|
|
14
|
-
from fameio.
|
15
|
-
from fameio.
|
14
|
+
from fameio.cli.options import ResolveOptions
|
15
|
+
from fameio.output.agent_type import AgentType
|
16
16
|
|
17
17
|
INDEX = ("AgentId", "TimeStep")
|
18
18
|
|
@@ -31,8 +31,8 @@ class DataTransformer(ABC):
|
|
31
31
|
return DataTransformer.MODES[complex_column_mode]()
|
32
32
|
|
33
33
|
def extract_agent_data(
|
34
|
-
self, series:
|
35
|
-
) ->
|
34
|
+
self, series: list[Output.Series], agent_type: AgentType
|
35
|
+
) -> dict[Optional[str], pd.DataFrame]:
|
36
36
|
"""
|
37
37
|
Returns dict of DataFrame(s) containing all data from given `series` of given `agent_type`.
|
38
38
|
When ResolveOption is `SPLIT`, the dict maps each complex column's name to the associated DataFrame.
|
@@ -58,8 +58,8 @@ class DataTransformer(ABC):
|
|
58
58
|
return data_frames
|
59
59
|
|
60
60
|
def _extract_agent_data(
|
61
|
-
self, series:
|
62
|
-
) ->
|
61
|
+
self, series: list[Output.Series], agent_type: AgentType
|
62
|
+
) -> dict[int, dict[tuple, list[Union[float, None, str]]]]:
|
63
63
|
"""Returns mapping of (agentId, timeStep) to fixed-length list of all output columns for given `class_name`"""
|
64
64
|
container = DataTransformer._create_container(agent_type)
|
65
65
|
mask_simple = agent_type.get_simple_column_mask()
|
@@ -69,7 +69,7 @@ class DataTransformer(ABC):
|
|
69
69
|
return filled_columns
|
70
70
|
|
71
71
|
@staticmethod
|
72
|
-
def _create_container(agent_type: AgentType) ->
|
72
|
+
def _create_container(agent_type: AgentType) -> dict[int, dict]:
|
73
73
|
"""Returns map of complex columns IDs to an empty dict, and one more for the remaining simple columns"""
|
74
74
|
field_ids = agent_type.get_complex_column_ids().union([DataTransformer.SIMPLE_COLUMN_INDEX])
|
75
75
|
return {field_id: {} for field_id in field_ids}
|
@@ -77,34 +77,34 @@ class DataTransformer(ABC):
|
|
77
77
|
def _add_series_data(
|
78
78
|
self,
|
79
79
|
series: Output.Series,
|
80
|
-
mask_simple:
|
81
|
-
container:
|
80
|
+
mask_simple: list[bool],
|
81
|
+
container: dict[int, dict[tuple, list[Union[float, None, str]]]],
|
82
82
|
) -> None:
|
83
83
|
"""Adds data from given `series` to specified `container` dict as list"""
|
84
|
-
empty_list = [None] * len(mask_simple)
|
85
|
-
for line in series.
|
86
|
-
index = (series.
|
84
|
+
empty_list: list = [None] * len(mask_simple)
|
85
|
+
for line in series.lines:
|
86
|
+
index = (series.agent_id, line.time_step)
|
87
87
|
simple_values = empty_list.copy()
|
88
|
-
for column in line.
|
89
|
-
if mask_simple[column.
|
90
|
-
simple_values[column.
|
88
|
+
for column in line.columns:
|
89
|
+
if mask_simple[column.field_id]:
|
90
|
+
simple_values[column.field_id] = column.value
|
91
91
|
else:
|
92
92
|
self._merge_complex_column(column, simple_values)
|
93
93
|
self._store_complex_values(column, container, index)
|
94
94
|
container[DataTransformer.SIMPLE_COLUMN_INDEX][index] = simple_values
|
95
95
|
|
96
96
|
@staticmethod
|
97
|
-
def _merge_complex_column(column: Output.Series.Line.Column, values:
|
97
|
+
def _merge_complex_column(column: Output.Series.Line.Column, values: list) -> None:
|
98
98
|
"""Does not merge complex column data"""
|
99
99
|
pass
|
100
100
|
|
101
101
|
@staticmethod
|
102
|
-
def _store_complex_values(column: Output.Series.Line.Column, container:
|
102
|
+
def _store_complex_values(column: Output.Series.Line.Column, container: dict[int, dict], index: tuple) -> None:
|
103
103
|
"""Does not store complex column data"""
|
104
104
|
pass
|
105
105
|
|
106
106
|
@staticmethod
|
107
|
-
def _get_column_map(agent_type: AgentType) ->
|
107
|
+
def _get_column_map(agent_type: AgentType) -> dict[int, str]:
|
108
108
|
"""Returns mapping of simple column IDs to their name for given `agent_type`"""
|
109
109
|
return agent_type.get_simple_column_map()
|
110
110
|
|
@@ -115,8 +115,8 @@ class DataTransformerIgnore(DataTransformer):
|
|
115
115
|
|
116
116
|
class DataTransformerSplit(DataTransformer):
|
117
117
|
@staticmethod
|
118
|
-
def _store_complex_values(column: Output.Series.Line.Column, container:
|
118
|
+
def _store_complex_values(column: Output.Series.Line.Column, container: dict[int, dict], base_index: tuple) -> None:
|
119
119
|
"""Adds inner data from `column` to given `container` - split by column type"""
|
120
|
-
for entry in column.
|
121
|
-
index = base_index + tuple(entry.
|
122
|
-
container[column.
|
120
|
+
for entry in column.entries:
|
121
|
+
index = base_index + tuple(entry.index_values)
|
122
|
+
container[column.field_id][index] = entry.value
|
@@ -2,20 +2,20 @@
|
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
import ast
|
5
|
-
from typing import
|
5
|
+
from typing import Any, Optional
|
6
6
|
|
7
|
-
from fameprotobuf.
|
8
|
-
from fameprotobuf.
|
9
|
-
from fameprotobuf.
|
7
|
+
from fameprotobuf.data_storage_pb2 import DataStorage
|
8
|
+
from fameprotobuf.field_pb2 import NestedField
|
9
|
+
from fameprotobuf.input_file_pb2 import InputData
|
10
10
|
|
11
|
-
from fameio.
|
12
|
-
from fameio.
|
13
|
-
from fameio.
|
14
|
-
from fameio.
|
11
|
+
from fameio.input.scenario import GeneralProperties, Agent, Contract, Scenario
|
12
|
+
from fameio.input.schema import Schema, AttributeSpecs, AttributeType
|
13
|
+
from fameio.logs import log
|
14
|
+
from fameio.series import TimeSeriesManager
|
15
15
|
|
16
16
|
|
17
|
-
class
|
18
|
-
"""
|
17
|
+
class InputConversionError(Exception):
|
18
|
+
"""Indicates an error during reconstruction of input from its protobuf representation"""
|
19
19
|
|
20
20
|
pass
|
21
21
|
|
@@ -26,22 +26,24 @@ class InputDao:
|
|
26
26
|
_ERR_NO_INPUTS = "No input data found on file."
|
27
27
|
_ERR_MULTIPLE_INPUTS = "File corrupt. More than one input section found on file."
|
28
28
|
|
29
|
-
_FIELD_NAME_MAP:
|
30
|
-
AttributeType.STRING: "
|
31
|
-
AttributeType.
|
32
|
-
AttributeType.
|
33
|
-
AttributeType.
|
34
|
-
AttributeType.
|
35
|
-
AttributeType.
|
36
|
-
AttributeType.
|
29
|
+
_FIELD_NAME_MAP: dict = {
|
30
|
+
AttributeType.STRING: "string_values",
|
31
|
+
AttributeType.STRING_SET: "string_values",
|
32
|
+
AttributeType.ENUM: "string_values",
|
33
|
+
AttributeType.INTEGER: "int_values",
|
34
|
+
AttributeType.DOUBLE: "double_values",
|
35
|
+
AttributeType.LONG: "long_values",
|
36
|
+
AttributeType.TIME_STAMP: "long_values",
|
37
|
+
AttributeType.TIME_SERIES: "series_id",
|
38
|
+
AttributeType.BLOCK: "fields",
|
37
39
|
}
|
38
40
|
|
39
41
|
def __init__(self) -> None:
|
40
|
-
self._inputs:
|
42
|
+
self._inputs: list[InputData] = []
|
41
43
|
self._timeseries_manager: TimeSeriesManager = TimeSeriesManager()
|
42
44
|
self._schema: Optional[Schema] = None
|
43
45
|
|
44
|
-
def store_inputs(self, data_storages:
|
46
|
+
def store_inputs(self, data_storages: list[DataStorage]) -> None:
|
45
47
|
"""
|
46
48
|
Extracts and stores Inputs in given DataStorages - if such are present
|
47
49
|
|
@@ -50,7 +52,7 @@ class InputDao:
|
|
50
52
|
"""
|
51
53
|
self._inputs.extend([data_storage.input for data_storage in data_storages if data_storage.HasField("input")])
|
52
54
|
|
53
|
-
def recover_inputs(self) ->
|
55
|
+
def recover_inputs(self) -> tuple[TimeSeriesManager, Scenario]:
|
54
56
|
"""
|
55
57
|
Recovers inputs to GeneralProperties, Schema, Agents, Contracts, Timeseries
|
56
58
|
|
@@ -81,10 +83,10 @@ class InputDao:
|
|
81
83
|
"""
|
82
84
|
if not self._inputs:
|
83
85
|
log().error(self._ERR_NO_INPUTS)
|
84
|
-
raise
|
86
|
+
raise InputConversionError(self._ERR_NO_INPUTS)
|
85
87
|
if len(self._inputs) > 1:
|
86
88
|
log().error(self._ERR_MULTIPLE_INPUTS)
|
87
|
-
raise
|
89
|
+
raise InputConversionError(self._ERR_MULTIPLE_INPUTS)
|
88
90
|
return self._inputs[0]
|
89
91
|
|
90
92
|
@staticmethod
|
@@ -96,53 +98,53 @@ class InputDao:
|
|
96
98
|
def _get_general_properties(input_data: InputData) -> GeneralProperties:
|
97
99
|
"""Read and return GeneralProperties from given `input_data`"""
|
98
100
|
return GeneralProperties(
|
99
|
-
run_id=input_data.
|
100
|
-
simulation_start_time=input_data.simulation.
|
101
|
-
simulation_stop_time=input_data.simulation.
|
102
|
-
simulation_random_seed=input_data.simulation.
|
103
|
-
output_process=input_data.output.process,
|
104
|
-
output_interval=input_data.output.interval,
|
101
|
+
run_id=input_data.run_id,
|
102
|
+
simulation_start_time=input_data.simulation.start_time,
|
103
|
+
simulation_stop_time=input_data.simulation.stop_time,
|
104
|
+
simulation_random_seed=input_data.simulation.random_seed,
|
105
105
|
)
|
106
106
|
|
107
107
|
@staticmethod
|
108
|
-
def _get_contracts(input_data: InputData) ->
|
108
|
+
def _get_contracts(input_data: InputData) -> list[Contract]:
|
109
109
|
"""Read and return Contracts from given `input_data`"""
|
110
110
|
return [
|
111
111
|
Contract(
|
112
|
-
sender_id=contract.
|
113
|
-
receiver_id=contract.
|
114
|
-
product_name=contract.
|
115
|
-
delivery_interval=contract.
|
116
|
-
first_delivery_time=contract.
|
117
|
-
expiration_time=contract.
|
118
|
-
|
112
|
+
sender_id=contract.sender_id,
|
113
|
+
receiver_id=contract.receiver_id,
|
114
|
+
product_name=contract.product_name,
|
115
|
+
delivery_interval=contract.delivery_interval_in_steps,
|
116
|
+
first_delivery_time=contract.first_delivery_time,
|
117
|
+
expiration_time=contract.expiration_time,
|
118
|
+
metadata=ast.literal_eval(contract.metadata) if contract.metadata else None,
|
119
119
|
)
|
120
|
-
for contract in input_data.
|
120
|
+
for contract in input_data.contracts
|
121
121
|
]
|
122
122
|
|
123
123
|
def _init_timeseries(self, input_data: InputData) -> None:
|
124
124
|
"""Read timeseries from given `input_data` and initialise TimeSeriesManager"""
|
125
|
-
self._timeseries_manager.reconstruct_time_series(list(input_data.
|
125
|
+
self._timeseries_manager.reconstruct_time_series(list(input_data.time_series))
|
126
126
|
|
127
|
-
def _get_agents(self, input_data: InputData) ->
|
127
|
+
def _get_agents(self, input_data: InputData) -> list[Agent]:
|
128
128
|
"""Read and return Agents from given `input_data`"""
|
129
129
|
agents = []
|
130
|
-
for agent_dao in input_data.
|
130
|
+
for agent_dao in input_data.agents:
|
131
131
|
agent = Agent(
|
132
|
-
agent_id=agent_dao.id,
|
132
|
+
agent_id=agent_dao.id,
|
133
|
+
type_name=agent_dao.class_name,
|
134
|
+
metadata=ast.literal_eval(agent_dao.metadata) if agent_dao.metadata else None,
|
133
135
|
)
|
134
136
|
attribute_dict = self._get_attributes(
|
135
|
-
list(agent_dao.
|
137
|
+
list(agent_dao.fields), self._schema.agent_types[agent_dao.class_name].attributes
|
136
138
|
)
|
137
139
|
agent.init_attributes_from_dict(attribute_dict)
|
138
140
|
agents.append(agent)
|
139
141
|
return agents
|
140
142
|
|
141
|
-
def _get_attributes(self, fields:
|
143
|
+
def _get_attributes(self, fields: list[NestedField], schematics: dict[str, AttributeSpecs]) -> dict[str, Any]:
|
142
144
|
"""Read and return Attributes as Dictionary from given list of fields"""
|
143
|
-
attributes:
|
145
|
+
attributes: dict[str, Any] = {}
|
144
146
|
for field in fields:
|
145
|
-
attributes[field.
|
147
|
+
attributes[field.field_name] = self._get_field_value(field, schematics[field.field_name])
|
146
148
|
return attributes
|
147
149
|
|
148
150
|
def _get_field_value(self, field: NestedField, schematic: AttributeSpecs) -> Any:
|
@@ -150,12 +152,12 @@ class InputDao:
|
|
150
152
|
attribute_type: AttributeType = schematic.attr_type
|
151
153
|
value = field.__getattribute__(self._FIELD_NAME_MAP[attribute_type])
|
152
154
|
if attribute_type is AttributeType.TIME_SERIES:
|
153
|
-
return self._timeseries_manager.get_reconstructed_series_by_id(field.
|
155
|
+
return self._timeseries_manager.get_reconstructed_series_by_id(field.series_id)
|
154
156
|
elif attribute_type is AttributeType.BLOCK:
|
155
157
|
if schematic.is_list:
|
156
|
-
return [self._get_attributes(list(entry.
|
158
|
+
return [self._get_attributes(list(entry.fields), schematic.nested_attributes) for entry in field.fields]
|
157
159
|
else:
|
158
|
-
return self._get_attributes(list(field.
|
160
|
+
return self._get_attributes(list(field.fields), schematic.nested_attributes)
|
159
161
|
else:
|
160
162
|
if schematic.is_list:
|
161
163
|
return list(value)
|
@@ -1,51 +1,50 @@
|
|
1
1
|
# SPDX-FileCopyrightText: 2023 German Aerospace Center <fame@dlr.de>
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
|
-
|
5
|
-
from typing import List, Dict, Iterable, Optional
|
4
|
+
from typing import Iterable, Optional
|
6
5
|
|
7
6
|
import pandas as pd
|
8
|
-
from fameprotobuf.
|
9
|
-
from fameprotobuf.
|
7
|
+
from fameprotobuf.data_storage_pb2 import DataStorage
|
8
|
+
from fameprotobuf.services_pb2 import Output
|
10
9
|
|
11
|
-
from fameio.
|
12
|
-
from fameio.
|
10
|
+
from fameio.output.agent_type import AgentTypeLog
|
11
|
+
from fameio.output.data_transformer import DataTransformer
|
13
12
|
|
14
13
|
|
15
14
|
class OutputDAO:
|
16
15
|
"""Grants convenient access to content of Output protobuf messages for given DataStorages"""
|
17
16
|
|
18
|
-
def __init__(self, data_storages:
|
17
|
+
def __init__(self, data_storages: list[DataStorage], agent_type_log: AgentTypeLog) -> None:
|
19
18
|
self._agent_type_log = agent_type_log
|
20
19
|
outputs = self._extract_output_from_data_storages(data_storages)
|
21
20
|
self._agent_type_log.update_agents(self._extract_new_agent_types(outputs))
|
22
21
|
self._all_series = self._extract_series(outputs)
|
23
22
|
|
24
23
|
@staticmethod
|
25
|
-
def _extract_output_from_data_storages(data_storages:
|
24
|
+
def _extract_output_from_data_storages(data_storages: list[DataStorage]) -> list[Output]:
|
26
25
|
"""Returns list of Outputs extracted from given `data_storages`"""
|
27
26
|
if data_storages is None:
|
28
27
|
data_storages = []
|
29
28
|
return [data_storage.output for data_storage in data_storages if data_storage.HasField("output")]
|
30
29
|
|
31
30
|
@staticmethod
|
32
|
-
def _extract_new_agent_types(outputs:
|
31
|
+
def _extract_new_agent_types(outputs: list[Output]) -> dict[str, Output.AgentType]:
|
33
32
|
"""Returns dict of agent names mapped to its type defined in given `outputs`"""
|
34
|
-
list_of_agent_type_lists = [output.
|
33
|
+
list_of_agent_type_lists = [output.agent_types for output in outputs if len(output.agent_types) > 0]
|
35
34
|
list_of_agent_types = [item for sublist in list_of_agent_type_lists for item in sublist]
|
36
|
-
return {item.
|
35
|
+
return {item.class_name: item for item in list_of_agent_types}
|
37
36
|
|
38
37
|
@staticmethod
|
39
|
-
def _extract_series(outputs:
|
38
|
+
def _extract_series(outputs: list[Output]) -> dict[str, list[Output.Series]]:
|
40
39
|
"""Returns series data from associated `outputs` mapped to the className of its agent"""
|
41
40
|
list_of_series_lists = [output.series for output in outputs if len(output.series) > 0]
|
42
41
|
list_of_series = [series for sublist in list_of_series_lists for series in sublist]
|
43
42
|
|
44
43
|
series_per_class_name = {}
|
45
44
|
for series in list_of_series:
|
46
|
-
if series.
|
47
|
-
series_per_class_name[series.
|
48
|
-
series_per_class_name[series.
|
45
|
+
if series.class_name not in series_per_class_name:
|
46
|
+
series_per_class_name[series.class_name] = []
|
47
|
+
series_per_class_name[series.class_name].append(series)
|
49
48
|
return series_per_class_name
|
50
49
|
|
51
50
|
def get_sorted_agents_to_extract(self) -> Iterable[str]:
|
@@ -54,13 +53,13 @@ class OutputDAO:
|
|
54
53
|
filtered_series = [agent_name for agent_name in all_series if self._agent_type_log.is_requested(agent_name)]
|
55
54
|
return iter(filtered_series)
|
56
55
|
|
57
|
-
def _get_agent_names_by_series_count_ascending(self) ->
|
56
|
+
def _get_agent_names_by_series_count_ascending(self) -> list[str]:
|
58
57
|
"""Returns list of agent type names sorted by their amount of series"""
|
59
58
|
length_per_agent_types = {agent_name: len(value) for agent_name, value in self._all_series.items()}
|
60
59
|
sorted_dict = sorted(length_per_agent_types.items(), key=lambda item: item[1])
|
61
60
|
return [agent_name for agent_name, _ in sorted_dict]
|
62
61
|
|
63
|
-
def get_agent_data(self, agent_name: str, data_transformer: DataTransformer) ->
|
62
|
+
def get_agent_data(self, agent_name: str, data_transformer: DataTransformer) -> dict[Optional[str], pd.DataFrame]:
|
64
63
|
"""
|
65
64
|
Returns DataFrame(s) containing all data of given `agent` - data is removed after the first call
|
66
65
|
Depending on the chosen ResolveOption the dict contains one DataFrame for the simple (and merged columns),
|
@@ -4,14 +4,20 @@
|
|
4
4
|
from __future__ import annotations
|
5
5
|
|
6
6
|
import struct
|
7
|
-
import typing
|
8
7
|
from abc import ABC, abstractmethod
|
9
|
-
from typing import IO,
|
8
|
+
from typing import IO, final, NoReturn
|
10
9
|
|
11
|
-
from fameprotobuf.
|
10
|
+
from fameprotobuf.data_storage_pb2 import DataStorage
|
12
11
|
from google.protobuf.message import DecodeError
|
13
12
|
|
14
|
-
|
13
|
+
import fameio
|
14
|
+
from fameio.logs import log, log_critical_and_raise
|
15
|
+
|
16
|
+
|
17
|
+
class ProtobufReaderError(Exception):
|
18
|
+
"""Indicates an error while reading a protobuf file"""
|
19
|
+
|
20
|
+
pass
|
15
21
|
|
16
22
|
|
17
23
|
class Reader(ABC):
|
@@ -27,16 +33,27 @@ class Reader(ABC):
|
|
27
33
|
_HEADER_LENGTH = 30
|
28
34
|
HEADER_ENCODING = "utf-8"
|
29
35
|
BYTES_DEFINING_MESSAGE_LENGTH = 4
|
36
|
+
|
37
|
+
_ERR_DEPRECATED_V0 = "Cannot read file: File was created with `FAME-Core` version <1.4 or `fameio` version < 1.6"
|
38
|
+
_ERR_DEPRECATED_V1 = "Cannot read file: File was created with `FAME-Core` version <2.0 or `fameio` version < 3.0"
|
39
|
+
|
30
40
|
_READER_HEADERS = {
|
31
|
-
|
41
|
+
None: lambda file, mode: Reader._raise_error(Reader._ERR_DEPRECATED_V0),
|
42
|
+
fameio.FILE_HEADER_V1: lambda file, mode: Reader._raise_error(Reader._ERR_DEPRECATED_V1),
|
43
|
+
fameio.FILE_HEADER_V2: lambda file, mode: ReaderV2(file, mode),
|
32
44
|
}
|
33
45
|
|
46
|
+
@staticmethod
|
47
|
+
@final
|
48
|
+
def _raise_error(error_message: str) -> NoReturn:
|
49
|
+
log_critical_and_raise(ProtobufReaderError(error_message))
|
50
|
+
|
34
51
|
def __init__(self, file: IO, read_single) -> None:
|
35
52
|
self._file = file
|
36
53
|
self._read_single = read_single
|
37
54
|
|
38
55
|
@abstractmethod
|
39
|
-
def read(self) ->
|
56
|
+
def read(self) -> list[DataStorage]:
|
40
57
|
"""Reads associated filestream and returns one or multiple DataStorage(s) or empty list"""
|
41
58
|
|
42
59
|
@staticmethod
|
@@ -56,13 +73,9 @@ class Reader(ABC):
|
|
56
73
|
header = file.read(Reader._HEADER_LENGTH).decode(Reader.HEADER_ENCODING)
|
57
74
|
return Reader._READER_HEADERS[header](file, read_single)
|
58
75
|
except (KeyError, UnicodeDecodeError):
|
59
|
-
|
60
|
-
file.seek(0)
|
61
|
-
if read_single:
|
62
|
-
log().error(Reader._ERR_UNSUPPORTED_MODE)
|
63
|
-
return ReaderV0(file, False)
|
76
|
+
return Reader._READER_HEADERS[None](file, read_single)
|
64
77
|
|
65
|
-
@
|
78
|
+
@final
|
66
79
|
def _read_message_length(self) -> int:
|
67
80
|
"""Returns length of next DataStorage message in file"""
|
68
81
|
message_length_byte = self._file.read(self.BYTES_DEFINING_MESSAGE_LENGTH)
|
@@ -73,7 +86,7 @@ class Reader(ABC):
|
|
73
86
|
message_length_int = struct.unpack(">i", message_length_byte)[0]
|
74
87
|
return message_length_int
|
75
88
|
|
76
|
-
@
|
89
|
+
@final
|
77
90
|
def _read_data_storage_message(self, message_length: int = None) -> DataStorage:
|
78
91
|
"""
|
79
92
|
Returns given `data_storage` read from current file position and following `message_length` bytes.
|
@@ -90,7 +103,7 @@ class Reader(ABC):
|
|
90
103
|
return self._parse_to_data_storage(message) if message else None
|
91
104
|
|
92
105
|
@staticmethod
|
93
|
-
@
|
106
|
+
@final
|
94
107
|
def _parse_to_data_storage(message: bytes) -> DataStorage:
|
95
108
|
data_storage = DataStorage()
|
96
109
|
try:
|
@@ -100,24 +113,10 @@ class Reader(ABC):
|
|
100
113
|
return data_storage
|
101
114
|
|
102
115
|
|
103
|
-
class
|
104
|
-
"""Reader class for
|
105
|
-
|
106
|
-
_WARN_DEPRECATED = "DeprecationWarning: Please consider updating to `FAME-Core>=1.4` and `fameio>=1.6`"
|
107
|
-
|
108
|
-
def __init__(self, file: IO, read_single):
|
109
|
-
super().__init__(file, read_single)
|
110
|
-
log().warning(self._WARN_DEPRECATED)
|
111
|
-
|
112
|
-
def read(self) -> List[DataStorage]:
|
113
|
-
result = self._read_data_storage_message()
|
114
|
-
return [result] if result else []
|
115
|
-
|
116
|
-
|
117
|
-
class ReaderV1(Reader):
|
118
|
-
"""Reader class for `fame-core>=1.4` output with header of version v001"""
|
116
|
+
class ReaderV2(Reader):
|
117
|
+
"""Reader class for `fame-core>=2.0` output with header of version v002"""
|
119
118
|
|
120
|
-
def read(self) ->
|
119
|
+
def read(self) -> list[DataStorage]:
|
121
120
|
messages = []
|
122
121
|
while True:
|
123
122
|
message_length = self._read_message_length()
|
@@ -2,17 +2,16 @@
|
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: Apache-2.0
|
4
4
|
from pathlib import Path
|
5
|
-
from typing import Dict
|
6
5
|
|
7
6
|
import yaml
|
8
7
|
|
9
|
-
from fameio.
|
8
|
+
from fameio.logs import log
|
10
9
|
|
11
10
|
ERR_WRITE_EXCEPTION = "Failed to save dictionary to YAML file `{}`"
|
12
11
|
INFO_DESTINATION = "Saving scenario to file at {}"
|
13
12
|
|
14
13
|
|
15
|
-
def data_to_yaml_file(data:
|
14
|
+
def data_to_yaml_file(data: dict, file_path: Path) -> None:
|
16
15
|
"""
|
17
16
|
Save the given data to a YAML file at given path
|
18
17
|
|