flood-adapt 0.3.9__py3-none-any.whl → 0.3.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. flood_adapt/__init__.py +26 -22
  2. flood_adapt/adapter/__init__.py +9 -9
  3. flood_adapt/adapter/fiat_adapter.py +1541 -1541
  4. flood_adapt/adapter/interface/hazard_adapter.py +70 -70
  5. flood_adapt/adapter/interface/impact_adapter.py +36 -36
  6. flood_adapt/adapter/interface/model_adapter.py +89 -89
  7. flood_adapt/adapter/interface/offshore.py +19 -19
  8. flood_adapt/adapter/sfincs_adapter.py +1853 -1848
  9. flood_adapt/adapter/sfincs_offshore.py +187 -193
  10. flood_adapt/config/config.py +248 -248
  11. flood_adapt/config/fiat.py +219 -219
  12. flood_adapt/config/gui.py +331 -331
  13. flood_adapt/config/sfincs.py +481 -336
  14. flood_adapt/config/site.py +129 -129
  15. flood_adapt/database_builder/database_builder.py +2210 -2210
  16. flood_adapt/database_builder/templates/default_units/imperial.toml +9 -9
  17. flood_adapt/database_builder/templates/default_units/metric.toml +9 -9
  18. flood_adapt/database_builder/templates/green_infra_table/green_infra_lookup_table.csv +10 -10
  19. flood_adapt/database_builder/templates/infographics/OSM/config_charts.toml +90 -90
  20. flood_adapt/database_builder/templates/infographics/OSM/config_people.toml +57 -57
  21. flood_adapt/database_builder/templates/infographics/OSM/config_risk_charts.toml +121 -121
  22. flood_adapt/database_builder/templates/infographics/OSM/config_roads.toml +65 -65
  23. flood_adapt/database_builder/templates/infographics/OSM/styles.css +45 -45
  24. flood_adapt/database_builder/templates/infographics/US_NSI/config_charts.toml +126 -126
  25. flood_adapt/database_builder/templates/infographics/US_NSI/config_people.toml +60 -60
  26. flood_adapt/database_builder/templates/infographics/US_NSI/config_risk_charts.toml +121 -121
  27. flood_adapt/database_builder/templates/infographics/US_NSI/config_roads.toml +65 -65
  28. flood_adapt/database_builder/templates/infographics/US_NSI/styles.css +45 -45
  29. flood_adapt/database_builder/templates/infometrics/OSM/metrics_additional_risk_configs.toml +4 -4
  30. flood_adapt/database_builder/templates/infometrics/OSM/with_SVI/infographic_metrics_config.toml +143 -143
  31. flood_adapt/database_builder/templates/infometrics/OSM/with_SVI/infographic_metrics_config_risk.toml +153 -153
  32. flood_adapt/database_builder/templates/infometrics/OSM/without_SVI/infographic_metrics_config.toml +127 -127
  33. flood_adapt/database_builder/templates/infometrics/OSM/without_SVI/infographic_metrics_config_risk.toml +57 -57
  34. flood_adapt/database_builder/templates/infometrics/US_NSI/metrics_additional_risk_configs.toml +4 -4
  35. flood_adapt/database_builder/templates/infometrics/US_NSI/with_SVI/infographic_metrics_config.toml +191 -191
  36. flood_adapt/database_builder/templates/infometrics/US_NSI/with_SVI/infographic_metrics_config_risk.toml +153 -153
  37. flood_adapt/database_builder/templates/infometrics/US_NSI/without_SVI/infographic_metrics_config.toml +178 -178
  38. flood_adapt/database_builder/templates/infometrics/US_NSI/without_SVI/infographic_metrics_config_risk.toml +57 -57
  39. flood_adapt/database_builder/templates/infometrics/mandatory_metrics_config.toml +9 -9
  40. flood_adapt/database_builder/templates/infometrics/mandatory_metrics_config_risk.toml +65 -65
  41. flood_adapt/database_builder/templates/output_layers/bin_colors.toml +5 -5
  42. flood_adapt/database_builder.py +16 -16
  43. flood_adapt/dbs_classes/__init__.py +21 -21
  44. flood_adapt/dbs_classes/database.py +533 -684
  45. flood_adapt/dbs_classes/dbs_benefit.py +77 -76
  46. flood_adapt/dbs_classes/dbs_event.py +61 -59
  47. flood_adapt/dbs_classes/dbs_measure.py +112 -111
  48. flood_adapt/dbs_classes/dbs_projection.py +34 -34
  49. flood_adapt/dbs_classes/dbs_scenario.py +137 -137
  50. flood_adapt/dbs_classes/dbs_static.py +274 -273
  51. flood_adapt/dbs_classes/dbs_strategy.py +130 -129
  52. flood_adapt/dbs_classes/dbs_template.py +279 -278
  53. flood_adapt/dbs_classes/interface/database.py +107 -139
  54. flood_adapt/dbs_classes/interface/element.py +121 -121
  55. flood_adapt/dbs_classes/interface/static.py +47 -47
  56. flood_adapt/flood_adapt.py +1229 -1178
  57. flood_adapt/misc/database_user.py +16 -16
  58. flood_adapt/misc/exceptions.py +22 -0
  59. flood_adapt/misc/log.py +183 -183
  60. flood_adapt/misc/path_builder.py +54 -54
  61. flood_adapt/misc/utils.py +185 -185
  62. flood_adapt/objects/__init__.py +82 -82
  63. flood_adapt/objects/benefits/benefits.py +61 -61
  64. flood_adapt/objects/events/event_factory.py +135 -135
  65. flood_adapt/objects/events/event_set.py +88 -84
  66. flood_adapt/objects/events/events.py +236 -234
  67. flood_adapt/objects/events/historical.py +58 -58
  68. flood_adapt/objects/events/hurricane.py +68 -67
  69. flood_adapt/objects/events/synthetic.py +46 -50
  70. flood_adapt/objects/forcing/__init__.py +92 -92
  71. flood_adapt/objects/forcing/csv.py +68 -68
  72. flood_adapt/objects/forcing/discharge.py +66 -66
  73. flood_adapt/objects/forcing/forcing.py +150 -150
  74. flood_adapt/objects/forcing/forcing_factory.py +182 -182
  75. flood_adapt/objects/forcing/meteo_handler.py +93 -93
  76. flood_adapt/objects/forcing/netcdf.py +40 -40
  77. flood_adapt/objects/forcing/plotting.py +453 -429
  78. flood_adapt/objects/forcing/rainfall.py +98 -98
  79. flood_adapt/objects/forcing/tide_gauge.py +191 -191
  80. flood_adapt/objects/forcing/time_frame.py +90 -90
  81. flood_adapt/objects/forcing/timeseries.py +564 -564
  82. flood_adapt/objects/forcing/unit_system.py +580 -580
  83. flood_adapt/objects/forcing/waterlevels.py +108 -108
  84. flood_adapt/objects/forcing/wind.py +124 -124
  85. flood_adapt/objects/measures/measure_factory.py +92 -92
  86. flood_adapt/objects/measures/measures.py +551 -529
  87. flood_adapt/objects/object_model.py +74 -68
  88. flood_adapt/objects/projections/projections.py +103 -103
  89. flood_adapt/objects/scenarios/scenarios.py +22 -22
  90. flood_adapt/objects/strategies/strategies.py +89 -89
  91. flood_adapt/workflows/benefit_runner.py +579 -554
  92. flood_adapt/workflows/floodmap.py +85 -85
  93. flood_adapt/workflows/impacts_integrator.py +85 -85
  94. flood_adapt/workflows/scenario_runner.py +70 -70
  95. {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/LICENSE +674 -674
  96. {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/METADATA +867 -865
  97. flood_adapt-0.3.11.dist-info/RECORD +140 -0
  98. flood_adapt-0.3.9.dist-info/RECORD +0 -139
  99. {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/WHEEL +0 -0
  100. {flood_adapt-0.3.9.dist-info → flood_adapt-0.3.11.dist-info}/top_level.txt +0 -0
@@ -1,564 +1,564 @@
1
- import os
2
- from datetime import timedelta
3
- from enum import Enum
4
- from pathlib import Path
5
- from typing import Generic, Optional, TypeVar
6
-
7
- import numpy as np
8
- import pandas as pd
9
- import plotly.express as px
10
- import plotly.graph_objects as go
11
- import tomli
12
- import tomli_w
13
- from pydantic import BaseModel, model_validator
14
-
15
- from flood_adapt.misc.path_builder import TopLevelDir, db_path
16
- from flood_adapt.objects.forcing import unit_system as us
17
- from flood_adapt.objects.forcing.csv import read_csv
18
- from flood_adapt.objects.forcing.time_frame import REFERENCE_TIME, TimeFrame
19
-
20
- TValueUnitPair = TypeVar("TValueUnitPair", bound=us.ValueUnitPair)
21
-
22
-
23
- class ShapeType(str, Enum):
24
- gaussian = "gaussian"
25
- block = "block"
26
- triangle = "triangle"
27
- scs = "scs"
28
-
29
-
30
- class Scstype(str, Enum):
31
- type1 = "type_1"
32
- type1a = "type_1a"
33
- type2 = "type_2"
34
- type3 = "type_3"
35
-
36
-
37
- class SyntheticTimeseries(BaseModel):
38
- # Required
39
- shape_type: ShapeType
40
- duration: us.UnitfulTime
41
- peak_time: us.UnitfulTime
42
-
43
- # Either one of these must be set
44
- peak_value: Optional[us.ValueUnitPairs] = None
45
- cumulative: Optional[us.ValueUnitPairs] = None
46
-
47
- # Optional
48
- fill_value: float = 0.0
49
-
50
- @model_validator(mode="after")
51
- def positive_duration(self):
52
- if self.duration.value < 0:
53
- raise ValueError(
54
- f"Timeseries shape duration must be positive, got {self.duration}"
55
- )
56
- return self
57
-
58
- @model_validator(mode="after")
59
- def either_value_or_cumulative(self):
60
- if (self.peak_value is None and self.cumulative is None) or (
61
- self.peak_value is not None and self.cumulative is not None
62
- ):
63
- raise ValueError(
64
- "Either `peak_value` or `cumulative` must be specified for Synthetic Timeseries."
65
- )
66
- return self
67
-
68
- @property
69
- def start_time(self) -> us.UnitfulTime:
70
- return self.peak_time - self.duration / 2
71
-
72
- @property
73
- def end_time(self) -> us.UnitfulTime:
74
- return self.peak_time + self.duration / 2
75
-
76
- def calculate_data(
77
- self, time_step: timedelta = TimeFrame().time_step
78
- ) -> np.ndarray:
79
- """Interpolate timeseries data as a numpy array with the provided time step and time as index and intensity as column."""
80
- # @abstractmethod doesnt work nicely with pydantic BaseModel, so we use this instead
81
- raise NotImplementedError(
82
- "This method should be implemented in subclasses of SyntheticTimeseries."
83
- )
84
-
85
- def to_dataframe(
86
- self,
87
- time_frame: TimeFrame,
88
- ) -> pd.DataFrame:
89
- """
90
- Interpolate the timeseries data using the time_step provided.
91
-
92
- Parameters
93
- ----------
94
- start_time : datetime | str
95
- Start time of the timeseries.
96
- end_time : datetime | str
97
- End time of the timeseries.
98
- time_step : us.UnitfulTime, optional
99
- Time step of the timeseries, by default TimeFrame().time_step.
100
-
101
- """
102
- return self._to_dataframe(
103
- time_frame=time_frame,
104
- ts_start_time=self.start_time,
105
- ts_end_time=self.end_time,
106
- fill_value=self.fill_value,
107
- )
108
-
109
- def _to_dataframe(
110
- self,
111
- time_frame: TimeFrame,
112
- ts_start_time: us.UnitfulTime,
113
- ts_end_time: us.UnitfulTime,
114
- fill_value: float = 0.0,
115
- ) -> pd.DataFrame:
116
- """
117
- Convert timeseries data to a pandas DataFrame that has time as the index and intensity as the column.
118
-
119
- The dataframe time range is from start_time to end_time with the provided time_step.
120
- The timeseries data is added to this range by first
121
- - Interpolating the data to the time_step
122
- - Filling the missing values with 0.
123
-
124
- Args:
125
- time_frame (TimeFrame):
126
- The time frame for the data.
127
- ts_start_time (us.UnitfulTime):
128
- The start time of the timeseries data relative to the time_frame start time.
129
- ts_end_time (us.UnitfulTime):
130
- The end time of the timeseries data relative to the time_frame start time.
131
- fill_value (float, optional):
132
- The fill value for missing data. Defaults to 0.0.
133
-
134
- Returns
135
- -------
136
- pd.DataFrame: A pandas DataFrame with time as the index and values as the columns.
137
- The data is interpolated to the time_step and values that fall outside of the timeseries data are filled with 0.
138
- """
139
- full_df_time_range = pd.date_range(
140
- start=time_frame.start_time,
141
- end=time_frame.end_time,
142
- freq=time_frame.time_step,
143
- name="time",
144
- )
145
-
146
- data = self.calculate_data(time_step=time_frame.time_step) + fill_value
147
-
148
- n_cols = data.shape[1] if len(data.shape) > 1 else 1
149
- ts_time_range = pd.date_range(
150
- start=(time_frame.start_time + ts_start_time.to_timedelta()),
151
- end=(time_frame.start_time + ts_end_time.to_timedelta()),
152
- freq=time_frame.time_step,
153
- )
154
-
155
- # If the data contains more than the requested time range (from reading a csv file)
156
- # Slice the data to match the expected time range
157
- if len(data) > len(ts_time_range):
158
- data = data[: len(ts_time_range)]
159
-
160
- df = pd.DataFrame(
161
- data, columns=[f"data_{i}" for i in range(n_cols)], index=ts_time_range
162
- )
163
-
164
- full_df = df.reindex(
165
- index=full_df_time_range,
166
- method="nearest",
167
- limit=1,
168
- fill_value=fill_value,
169
- )
170
- full_df = full_df.set_index(full_df_time_range)
171
- full_df.index = pd.to_datetime(full_df.index)
172
- full_df.index.name = "time"
173
- return full_df
174
-
175
- @classmethod
176
- def load_file(cls, file_path: Path | str | os.PathLike) -> "SyntheticTimeseries":
177
- """Load object from file."""
178
- with open(file_path, mode="rb") as fp:
179
- toml = tomli.load(fp)
180
- return cls(**toml)
181
-
182
- def save(self, filepath: Path):
183
- """
184
- Save Synthetic Timeseries toml.
185
-
186
- Parameters
187
- ----------
188
- file : Path
189
- path to the location where file will be saved
190
- """
191
- with open(filepath, "wb") as f:
192
- tomli_w.dump(self.model_dump(exclude_none=True), f)
193
-
194
- @staticmethod
195
- def plot(
196
- df,
197
- xmin: pd.Timestamp,
198
- xmax: pd.Timestamp,
199
- timeseries_variable: us.ValueUnitPair,
200
- ) -> go.Figure:
201
- fig = px.line(data_frame=df)
202
- fig.update_layout(
203
- autosize=False,
204
- height=100 * 2,
205
- width=280 * 2,
206
- margin={"r": 0, "l": 0, "b": 0, "t": 0},
207
- font={"size": 10, "color": "black", "family": "Arial"},
208
- title_font={"size": 10, "color": "black", "family": "Arial"},
209
- legend=None,
210
- yaxis_title_font={"size": 10, "color": "black", "family": "Arial"},
211
- xaxis_title_font={"size": 10, "color": "black", "family": "Arial"},
212
- xaxis_title={"text": "Time"},
213
- yaxis_title={"text": f"{timeseries_variable.units}"},
214
- showlegend=False,
215
- xaxis={"range": [xmin, xmax]},
216
- )
217
- return fig
218
-
219
- def __eq__(self, other) -> bool:
220
- if not isinstance(other, SyntheticTimeseries):
221
- raise NotImplementedError(f"Cannot compare Timeseries to {type(other)}")
222
-
223
- # If the following equation is element-wise True, then allclose returns True.:
224
- # absolute(a - b) <= (atol + rtol * absolute(b))
225
- return np.allclose(
226
- self.calculate_data(),
227
- other.calculate_data(),
228
- rtol=1e-2,
229
- )
230
-
231
-
232
- class ScsTimeseries(SyntheticTimeseries):
233
- shape_type: ShapeType = ShapeType.scs
234
-
235
- scs_file_name: str
236
- scs_type: Scstype
237
-
238
- def calculate_data(
239
- self, time_step: timedelta = TimeFrame().time_step
240
- ) -> np.ndarray:
241
- _duration = self.duration.convert(us.UnitTypesTime.seconds)
242
- _start_time = self.start_time.convert(us.UnitTypesTime.seconds)
243
-
244
- scs_df = pd.read_csv(
245
- db_path(top_level_dir=TopLevelDir.static) / "scs" / self.scs_file_name,
246
- index_col=0,
247
- )[self.scs_type]
248
-
249
- tt = pd.date_range(
250
- start=(REFERENCE_TIME + self.start_time.to_timedelta()),
251
- end=(REFERENCE_TIME + self.end_time.to_timedelta()),
252
- freq=time_step,
253
- )
254
- tt = (tt - REFERENCE_TIME).total_seconds()
255
-
256
- tt_rain = _start_time + scs_df.index.to_numpy() * _duration
257
- rain_series = scs_df.to_numpy()
258
- rain_instantaneous = np.diff(rain_series) / np.diff(
259
- tt_rain / 3600
260
- ) # divide by time in hours to get mm/hour
261
-
262
- # interpolate instanetaneous rain intensity timeseries to tt
263
- rain_interp = np.interp(
264
- tt,
265
- tt_rain,
266
- np.concatenate(([0], rain_instantaneous)),
267
- left=0,
268
- right=0,
269
- )
270
- rainfall = (
271
- rain_interp * self.cumulative.value / np.trapz(rain_interp, tt / 3600)
272
- )
273
-
274
- return rainfall
275
-
276
- @model_validator(mode="after")
277
- def validate_attrs(self):
278
- if self.cumulative is None:
279
- raise ValueError(
280
- f"SCS timeseries must have `cumulative` specified. {self.cumulative}"
281
- )
282
- return self
283
-
284
-
285
- class GaussianTimeseries(SyntheticTimeseries):
286
- shape_type: ShapeType = ShapeType.gaussian
287
-
288
- def calculate_data(
289
- self, time_step: timedelta = TimeFrame().time_step
290
- ) -> np.ndarray:
291
- _start = self.start_time.convert(us.UnitTypesTime.hours)
292
- _end = self.end_time.convert(us.UnitTypesTime.hours)
293
-
294
- tt = pd.date_range(
295
- start=(REFERENCE_TIME + self.start_time.to_timedelta()),
296
- end=(REFERENCE_TIME + self.end_time.to_timedelta()),
297
- freq=time_step,
298
- )
299
- tt_hours = (tt - REFERENCE_TIME).total_seconds() / 3600
300
-
301
- mean = (_start + _end) / 2
302
- sigma = (_end - _start) / 6
303
- gaussian_curve = np.exp(-0.5 * ((tt_hours - mean) / sigma) ** 2)
304
-
305
- if self.cumulative:
306
- # Normalize to ensure the integral sums to 1 over the time steps
307
- integral_approx = np.trapz(gaussian_curve, tt_hours)
308
- normalized_gaussian = gaussian_curve / integral_approx
309
- ts = self.cumulative.value * normalized_gaussian.to_numpy()
310
- elif self.peak_value:
311
- ts = self.peak_value.value * gaussian_curve
312
- else:
313
- raise ValueError("Either peak_value or cumulative must be specified.")
314
-
315
- return ts
316
-
317
- @model_validator(mode="after")
318
- def validate_attrs(self):
319
- # either peak_value or cumulative must be set, which is already checked in the parent class: `either_value_or_cumulative`
320
- return self
321
-
322
-
323
- class BlockTimeseries(SyntheticTimeseries):
324
- shape_type: ShapeType = ShapeType.block
325
-
326
- def calculate_data(
327
- self, time_step: timedelta = TimeFrame().time_step
328
- ) -> np.ndarray:
329
- tt = pd.date_range(
330
- start=(REFERENCE_TIME + self.start_time.to_timedelta()),
331
- end=(REFERENCE_TIME + self.end_time.to_timedelta()),
332
- freq=time_step,
333
- )
334
- if self.peak_value:
335
- height_value = self.peak_value.value
336
- elif self.cumulative:
337
- area = self.cumulative.value
338
- base = self.duration.convert(
339
- us.UnitTypesTime.hours
340
- ) # always expect duration in hours
341
- height_value = area / base
342
-
343
- ts = np.zeros((len(tt),)) + height_value
344
- return ts
345
-
346
- @model_validator(mode="after")
347
- def validate_attrs(self):
348
- # either peak_value or cumulative must be set, which is already checked in the parent class: `either_value_or_cumulative`
349
- return self
350
-
351
-
352
- class TriangleTimeseries(SyntheticTimeseries):
353
- shape_type: ShapeType = ShapeType.triangle
354
-
355
- def calculate_data(
356
- self, time_step: timedelta = TimeFrame().time_step
357
- ) -> np.ndarray:
358
- tt = pd.date_range(
359
- start=(REFERENCE_TIME + self.start_time.to_timedelta()),
360
- end=(REFERENCE_TIME + self.end_time.to_timedelta()),
361
- freq=time_step,
362
- )
363
- tt_seconds = (tt - REFERENCE_TIME).total_seconds()
364
- peak_time = self.peak_time.to_timedelta().total_seconds()
365
- start_time = self.start_time.to_timedelta().total_seconds()
366
-
367
- if self.peak_value:
368
- height_value = self.peak_value.value
369
- elif self.cumulative:
370
- area = self.cumulative.value
371
- base = self.duration.convert(
372
- us.UnitTypesTime.hours
373
- ) # always expect duration in hours
374
- height_value = (2 * area) / base
375
-
376
- ascending_slope = (
377
- height_value
378
- / (self.peak_time - self.start_time).to_timedelta().total_seconds()
379
- )
380
- descending_slope = (
381
- -height_value
382
- / (self.end_time - self.peak_time).to_timedelta().total_seconds()
383
- )
384
-
385
- ts = np.piecewise(
386
- tt_seconds,
387
- [tt_seconds < peak_time, tt_seconds >= peak_time],
388
- [
389
- lambda x: np.maximum(ascending_slope * (x - start_time), 0),
390
- lambda x: np.maximum(
391
- descending_slope * (x - peak_time) + height_value, 0
392
- ),
393
- 0,
394
- ],
395
- )
396
- return ts
397
-
398
- @model_validator(mode="after")
399
- def validate_attrs(self):
400
- # either peak_value or cumulative must be set, which is already checked in the parent class: `either_value_or_cumulative`
401
- return self
402
-
403
-
404
- class CSVTimeseries(BaseModel, Generic[TValueUnitPair]):
405
- path: Path
406
- units: TValueUnitPair
407
-
408
- @model_validator(mode="after")
409
- def validate_csv(self):
410
- if not self.path.exists():
411
- raise ValueError(f"Path {self.path} does not exist.")
412
- if not self.path.is_file():
413
- raise ValueError(f"Path {self.path} is not a file.")
414
- if not self.path.suffix == ".csv":
415
- raise ValueError(f"Path {self.path} is not a csv file.")
416
-
417
- # Try loading the csv file, read_csv will raise an error if it cannot read the file
418
- read_csv(self.path)
419
- return self
420
-
421
- @staticmethod
422
- def load_file(path: str | Path, units: us.ValueUnitPair):
423
- return CSVTimeseries[type(units)](path=Path(path), units=units)
424
-
425
- def to_dataframe(
426
- self,
427
- time_frame: TimeFrame,
428
- fill_value: float = 0,
429
- ) -> pd.DataFrame:
430
- """
431
- Interpolate the timeseries data using the time_step provided.
432
-
433
- Parameters
434
- ----------
435
- time_frame : TimeFrame
436
- Time frame for the data.
437
- fill_value : float, optional
438
- Value to fill missing data with, by default 0.
439
-
440
- Returns
441
- -------
442
- pd.DataFrame
443
- Interpolated timeseries with datetime index.
444
- """
445
- file_data = read_csv(self.path)
446
-
447
- # Ensure requested time range is within available data
448
- start_time = max(time_frame.start_time, file_data.index.min())
449
- end_time = min(time_frame.end_time, file_data.index.max())
450
-
451
- df = file_data.loc[start_time:end_time]
452
-
453
- # Generate the complete time range
454
- time_range = pd.date_range(
455
- start=time_frame.start_time,
456
- end=time_frame.end_time,
457
- freq=time_frame.time_step,
458
- )
459
-
460
- # Reindex and fill missing values with specified fill_value
461
- interpolated_df = (
462
- df.reindex(time_range, method="nearest", limit=1)
463
- .interpolate(method="linear")
464
- .fillna(fill_value)
465
- )
466
- interpolated_df.index.name = "time"
467
- return interpolated_df
468
-
469
- def calculate_data(
470
- self,
471
- time_step: timedelta = TimeFrame().time_step,
472
- ) -> np.ndarray:
473
- return read_csv(self.path).to_numpy()
474
-
475
- def read_time_frame(self) -> TimeFrame:
476
- """
477
- Read the time frame from the file.
478
-
479
- Returns
480
- -------
481
- TimeFrame
482
- Time frame of the data in the file.
483
- """
484
- file_data = read_csv(self.path)
485
- return TimeFrame(
486
- start_time=file_data.index.min(),
487
- end_time=file_data.index.max(),
488
- )
489
-
490
-
491
- class TimeseriesFactory:
492
- @staticmethod
493
- def from_args(
494
- shape_type: ShapeType,
495
- duration: us.UnitfulTime,
496
- peak_time: us.UnitfulTime,
497
- peak_value: Optional[us.ValueUnitPairs] = None,
498
- cumulative: Optional[us.ValueUnitPairs] = None,
499
- fill_value: float = 0.0,
500
- scs_file_name: Optional[str] = None,
501
- scs_type: Optional[Scstype] = None,
502
- ) -> SyntheticTimeseries:
503
- """Create a timeseries object based on the shape type."""
504
- match shape_type:
505
- case ShapeType.gaussian:
506
- return GaussianTimeseries(
507
- duration=duration,
508
- peak_time=peak_time,
509
- peak_value=peak_value,
510
- cumulative=cumulative,
511
- fill_value=fill_value,
512
- )
513
- case ShapeType.block:
514
- return BlockTimeseries(
515
- duration=duration,
516
- peak_time=peak_time,
517
- peak_value=peak_value,
518
- cumulative=cumulative,
519
- fill_value=fill_value,
520
- )
521
- case ShapeType.triangle:
522
- return TriangleTimeseries(
523
- duration=duration,
524
- peak_time=peak_time,
525
- peak_value=peak_value,
526
- cumulative=cumulative,
527
- fill_value=fill_value,
528
- )
529
- case ShapeType.scs:
530
- if scs_file_name is None or scs_type is None:
531
- from flood_adapt.dbs_classes.database import Database
532
-
533
- scs_config = Database().site.sfincs.scs
534
- if scs_config is None:
535
- raise ValueError("SCS configuration not found in database.")
536
- scs_file_name = scs_file_name or scs_config.file
537
- scs_type = scs_type or scs_config.type
538
-
539
- return ScsTimeseries(
540
- duration=duration,
541
- peak_time=peak_time,
542
- peak_value=peak_value,
543
- cumulative=cumulative,
544
- fill_value=fill_value,
545
- scs_file_name=scs_file_name,
546
- scs_type=scs_type,
547
- )
548
- case _:
549
- raise ValueError(f"Unknown shape type {shape_type}.")
550
-
551
- @staticmethod
552
- def load_file(
553
- file_path: Path | str | os.PathLike,
554
- ) -> SyntheticTimeseries:
555
- """Load object from file."""
556
- with open(file_path, mode="rb") as fp:
557
- toml = tomli.load(fp)
558
- return TimeseriesFactory.from_args(
559
- **toml,
560
- )
561
-
562
- @staticmethod
563
- def from_object(obj: SyntheticTimeseries) -> SyntheticTimeseries:
564
- return TimeseriesFactory.from_args(**obj.model_dump(exclude_none=True))
1
+ import os
2
+ from datetime import timedelta
3
+ from enum import Enum
4
+ from pathlib import Path
5
+ from typing import Generic, Optional, TypeVar
6
+
7
+ import numpy as np
8
+ import pandas as pd
9
+ import plotly.express as px
10
+ import plotly.graph_objects as go
11
+ import tomli
12
+ import tomli_w
13
+ from pydantic import BaseModel, model_validator
14
+
15
+ from flood_adapt.misc.path_builder import TopLevelDir, db_path
16
+ from flood_adapt.objects.forcing import unit_system as us
17
+ from flood_adapt.objects.forcing.csv import read_csv
18
+ from flood_adapt.objects.forcing.time_frame import REFERENCE_TIME, TimeFrame
19
+
20
+ TValueUnitPair = TypeVar("TValueUnitPair", bound=us.ValueUnitPair)
21
+
22
+
23
+ class ShapeType(str, Enum):
24
+ gaussian = "gaussian"
25
+ block = "block"
26
+ triangle = "triangle"
27
+ scs = "scs"
28
+
29
+
30
+ class Scstype(str, Enum):
31
+ type1 = "type_1"
32
+ type1a = "type_1a"
33
+ type2 = "type_2"
34
+ type3 = "type_3"
35
+
36
+
37
+ class SyntheticTimeseries(BaseModel):
38
+ # Required
39
+ shape_type: ShapeType
40
+ duration: us.UnitfulTime
41
+ peak_time: us.UnitfulTime
42
+
43
+ # Either one of these must be set
44
+ peak_value: Optional[us.ValueUnitPairs] = None
45
+ cumulative: Optional[us.ValueUnitPairs] = None
46
+
47
+ # Optional
48
+ fill_value: float = 0.0
49
+
50
+ @model_validator(mode="after")
51
+ def positive_duration(self):
52
+ if self.duration.value < 0:
53
+ raise ValueError(
54
+ f"Timeseries shape duration must be positive, got {self.duration}"
55
+ )
56
+ return self
57
+
58
+ @model_validator(mode="after")
59
+ def either_value_or_cumulative(self):
60
+ if (self.peak_value is None and self.cumulative is None) or (
61
+ self.peak_value is not None and self.cumulative is not None
62
+ ):
63
+ raise ValueError(
64
+ "Either `peak_value` or `cumulative` must be specified for Synthetic Timeseries."
65
+ )
66
+ return self
67
+
68
+ @property
69
+ def start_time(self) -> us.UnitfulTime:
70
+ return self.peak_time - self.duration / 2
71
+
72
+ @property
73
+ def end_time(self) -> us.UnitfulTime:
74
+ return self.peak_time + self.duration / 2
75
+
76
+ def calculate_data(
77
+ self, time_step: timedelta = TimeFrame().time_step
78
+ ) -> np.ndarray:
79
+ """Interpolate timeseries data as a numpy array with the provided time step and time as index and intensity as column."""
80
+ # @abstractmethod doesnt work nicely with pydantic BaseModel, so we use this instead
81
+ raise NotImplementedError(
82
+ "This method should be implemented in subclasses of SyntheticTimeseries."
83
+ )
84
+
85
+ def to_dataframe(
86
+ self,
87
+ time_frame: TimeFrame,
88
+ ) -> pd.DataFrame:
89
+ """
90
+ Interpolate the timeseries data using the time_step provided.
91
+
92
+ Parameters
93
+ ----------
94
+ start_time : datetime | str
95
+ Start time of the timeseries.
96
+ end_time : datetime | str
97
+ End time of the timeseries.
98
+ time_step : us.UnitfulTime, optional
99
+ Time step of the timeseries, by default TimeFrame().time_step.
100
+
101
+ """
102
+ return self._to_dataframe(
103
+ time_frame=time_frame,
104
+ ts_start_time=self.start_time,
105
+ ts_end_time=self.end_time,
106
+ fill_value=self.fill_value,
107
+ )
108
+
109
+ def _to_dataframe(
110
+ self,
111
+ time_frame: TimeFrame,
112
+ ts_start_time: us.UnitfulTime,
113
+ ts_end_time: us.UnitfulTime,
114
+ fill_value: float = 0.0,
115
+ ) -> pd.DataFrame:
116
+ """
117
+ Convert timeseries data to a pandas DataFrame that has time as the index and intensity as the column.
118
+
119
+ The dataframe time range is from start_time to end_time with the provided time_step.
120
+ The timeseries data is added to this range by first
121
+ - Interpolating the data to the time_step
122
+ - Filling the missing values with 0.
123
+
124
+ Args:
125
+ time_frame (TimeFrame):
126
+ The time frame for the data.
127
+ ts_start_time (us.UnitfulTime):
128
+ The start time of the timeseries data relative to the time_frame start time.
129
+ ts_end_time (us.UnitfulTime):
130
+ The end time of the timeseries data relative to the time_frame start time.
131
+ fill_value (float, optional):
132
+ The fill value for missing data. Defaults to 0.0.
133
+
134
+ Returns
135
+ -------
136
+ pd.DataFrame: A pandas DataFrame with time as the index and values as the columns.
137
+ The data is interpolated to the time_step and values that fall outside of the timeseries data are filled with 0.
138
+ """
139
+ full_df_time_range = pd.date_range(
140
+ start=time_frame.start_time,
141
+ end=time_frame.end_time,
142
+ freq=time_frame.time_step,
143
+ name="time",
144
+ )
145
+
146
+ data = self.calculate_data(time_step=time_frame.time_step) + fill_value
147
+
148
+ n_cols = data.shape[1] if len(data.shape) > 1 else 1
149
+ ts_time_range = pd.date_range(
150
+ start=(time_frame.start_time + ts_start_time.to_timedelta()),
151
+ end=(time_frame.start_time + ts_end_time.to_timedelta()),
152
+ freq=time_frame.time_step,
153
+ )
154
+
155
+ # If the data contains more than the requested time range (from reading a csv file)
156
+ # Slice the data to match the expected time range
157
+ if len(data) > len(ts_time_range):
158
+ data = data[: len(ts_time_range)]
159
+
160
+ df = pd.DataFrame(
161
+ data, columns=[f"data_{i}" for i in range(n_cols)], index=ts_time_range
162
+ )
163
+
164
+ full_df = df.reindex(
165
+ index=full_df_time_range,
166
+ method="nearest",
167
+ limit=1,
168
+ fill_value=fill_value,
169
+ )
170
+ full_df = full_df.set_index(full_df_time_range)
171
+ full_df.index = pd.to_datetime(full_df.index)
172
+ full_df.index.name = "time"
173
+ return full_df
174
+
175
+ @classmethod
176
+ def load_file(cls, file_path: Path | str | os.PathLike) -> "SyntheticTimeseries":
177
+ """Load object from file."""
178
+ with open(file_path, mode="rb") as fp:
179
+ toml = tomli.load(fp)
180
+ return cls(**toml)
181
+
182
+ def save(self, filepath: Path):
183
+ """
184
+ Save Synthetic Timeseries toml.
185
+
186
+ Parameters
187
+ ----------
188
+ file : Path
189
+ path to the location where file will be saved
190
+ """
191
+ with open(filepath, "wb") as f:
192
+ tomli_w.dump(self.model_dump(exclude_none=True), f)
193
+
194
+ @staticmethod
195
+ def plot(
196
+ df,
197
+ xmin: pd.Timestamp,
198
+ xmax: pd.Timestamp,
199
+ timeseries_variable: us.ValueUnitPair,
200
+ ) -> go.Figure:
201
+ fig = px.line(data_frame=df)
202
+ fig.update_layout(
203
+ autosize=False,
204
+ height=100 * 2,
205
+ width=280 * 2,
206
+ margin={"r": 0, "l": 0, "b": 0, "t": 0},
207
+ font={"size": 10, "color": "black", "family": "Arial"},
208
+ title_font={"size": 10, "color": "black", "family": "Arial"},
209
+ legend=None,
210
+ yaxis_title_font={"size": 10, "color": "black", "family": "Arial"},
211
+ xaxis_title_font={"size": 10, "color": "black", "family": "Arial"},
212
+ xaxis_title={"text": "Time"},
213
+ yaxis_title={"text": f"{timeseries_variable.units}"},
214
+ showlegend=False,
215
+ xaxis={"range": [xmin, xmax]},
216
+ )
217
+ return fig
218
+
219
+ def __eq__(self, other) -> bool:
220
+ if not isinstance(other, SyntheticTimeseries):
221
+ raise NotImplementedError(f"Cannot compare Timeseries to {type(other)}")
222
+
223
+ # If the following equation is element-wise True, then allclose returns True.:
224
+ # absolute(a - b) <= (atol + rtol * absolute(b))
225
+ return np.allclose(
226
+ self.calculate_data(),
227
+ other.calculate_data(),
228
+ rtol=1e-2,
229
+ )
230
+
231
+
232
+ class ScsTimeseries(SyntheticTimeseries):
233
+ shape_type: ShapeType = ShapeType.scs
234
+
235
+ scs_file_name: str
236
+ scs_type: Scstype
237
+
238
+ def calculate_data(
239
+ self, time_step: timedelta = TimeFrame().time_step
240
+ ) -> np.ndarray:
241
+ _duration = self.duration.convert(us.UnitTypesTime.seconds)
242
+ _start_time = self.start_time.convert(us.UnitTypesTime.seconds)
243
+
244
+ scs_df = pd.read_csv(
245
+ db_path(top_level_dir=TopLevelDir.static) / "scs" / self.scs_file_name,
246
+ index_col=0,
247
+ )[self.scs_type]
248
+
249
+ tt = pd.date_range(
250
+ start=(REFERENCE_TIME + self.start_time.to_timedelta()),
251
+ end=(REFERENCE_TIME + self.end_time.to_timedelta()),
252
+ freq=time_step,
253
+ )
254
+ tt = (tt - REFERENCE_TIME).total_seconds()
255
+
256
+ tt_rain = _start_time + scs_df.index.to_numpy() * _duration
257
+ rain_series = scs_df.to_numpy()
258
+ rain_instantaneous = np.diff(rain_series) / np.diff(
259
+ tt_rain / 3600
260
+ ) # divide by time in hours to get mm/hour
261
+
262
+ # interpolate instanetaneous rain intensity timeseries to tt
263
+ rain_interp = np.interp(
264
+ tt,
265
+ tt_rain,
266
+ np.concatenate(([0], rain_instantaneous)),
267
+ left=0,
268
+ right=0,
269
+ )
270
+ rainfall = (
271
+ rain_interp * self.cumulative.value / np.trapz(rain_interp, tt / 3600)
272
+ )
273
+
274
+ return rainfall
275
+
276
+ @model_validator(mode="after")
277
+ def validate_attrs(self):
278
+ if self.cumulative is None:
279
+ raise ValueError(
280
+ f"SCS timeseries must have `cumulative` specified. {self.cumulative}"
281
+ )
282
+ return self
283
+
284
+
285
+ class GaussianTimeseries(SyntheticTimeseries):
286
+ shape_type: ShapeType = ShapeType.gaussian
287
+
288
+ def calculate_data(
289
+ self, time_step: timedelta = TimeFrame().time_step
290
+ ) -> np.ndarray:
291
+ _start = self.start_time.convert(us.UnitTypesTime.hours)
292
+ _end = self.end_time.convert(us.UnitTypesTime.hours)
293
+
294
+ tt = pd.date_range(
295
+ start=(REFERENCE_TIME + self.start_time.to_timedelta()),
296
+ end=(REFERENCE_TIME + self.end_time.to_timedelta()),
297
+ freq=time_step,
298
+ )
299
+ tt_hours = (tt - REFERENCE_TIME).total_seconds() / 3600
300
+
301
+ mean = (_start + _end) / 2
302
+ sigma = (_end - _start) / 6
303
+ gaussian_curve = np.exp(-0.5 * ((tt_hours - mean) / sigma) ** 2)
304
+
305
+ if self.cumulative:
306
+ # Normalize to ensure the integral sums to 1 over the time steps
307
+ integral_approx = np.trapz(gaussian_curve, tt_hours)
308
+ normalized_gaussian = gaussian_curve / integral_approx
309
+ ts = self.cumulative.value * normalized_gaussian.to_numpy()
310
+ elif self.peak_value:
311
+ ts = self.peak_value.value * gaussian_curve
312
+ else:
313
+ raise ValueError("Either peak_value or cumulative must be specified.")
314
+
315
+ return ts
316
+
317
+ @model_validator(mode="after")
318
+ def validate_attrs(self):
319
+ # either peak_value or cumulative must be set, which is already checked in the parent class: `either_value_or_cumulative`
320
+ return self
321
+
322
+
323
+ class BlockTimeseries(SyntheticTimeseries):
324
+ shape_type: ShapeType = ShapeType.block
325
+
326
+ def calculate_data(
327
+ self, time_step: timedelta = TimeFrame().time_step
328
+ ) -> np.ndarray:
329
+ tt = pd.date_range(
330
+ start=(REFERENCE_TIME + self.start_time.to_timedelta()),
331
+ end=(REFERENCE_TIME + self.end_time.to_timedelta()),
332
+ freq=time_step,
333
+ )
334
+ if self.peak_value:
335
+ height_value = self.peak_value.value
336
+ elif self.cumulative:
337
+ area = self.cumulative.value
338
+ base = self.duration.convert(
339
+ us.UnitTypesTime.hours
340
+ ) # always expect duration in hours
341
+ height_value = area / base
342
+
343
+ ts = np.zeros((len(tt),)) + height_value
344
+ return ts
345
+
346
+ @model_validator(mode="after")
347
+ def validate_attrs(self):
348
+ # either peak_value or cumulative must be set, which is already checked in the parent class: `either_value_or_cumulative`
349
+ return self
350
+
351
+
352
+ class TriangleTimeseries(SyntheticTimeseries):
353
+ shape_type: ShapeType = ShapeType.triangle
354
+
355
+ def calculate_data(
356
+ self, time_step: timedelta = TimeFrame().time_step
357
+ ) -> np.ndarray:
358
+ tt = pd.date_range(
359
+ start=(REFERENCE_TIME + self.start_time.to_timedelta()),
360
+ end=(REFERENCE_TIME + self.end_time.to_timedelta()),
361
+ freq=time_step,
362
+ )
363
+ tt_seconds = (tt - REFERENCE_TIME).total_seconds()
364
+ peak_time = self.peak_time.to_timedelta().total_seconds()
365
+ start_time = self.start_time.to_timedelta().total_seconds()
366
+
367
+ if self.peak_value:
368
+ height_value = self.peak_value.value
369
+ elif self.cumulative:
370
+ area = self.cumulative.value
371
+ base = self.duration.convert(
372
+ us.UnitTypesTime.hours
373
+ ) # always expect duration in hours
374
+ height_value = (2 * area) / base
375
+
376
+ ascending_slope = (
377
+ height_value
378
+ / (self.peak_time - self.start_time).to_timedelta().total_seconds()
379
+ )
380
+ descending_slope = (
381
+ -height_value
382
+ / (self.end_time - self.peak_time).to_timedelta().total_seconds()
383
+ )
384
+
385
+ ts = np.piecewise(
386
+ tt_seconds,
387
+ [tt_seconds < peak_time, tt_seconds >= peak_time],
388
+ [
389
+ lambda x: np.maximum(ascending_slope * (x - start_time), 0),
390
+ lambda x: np.maximum(
391
+ descending_slope * (x - peak_time) + height_value, 0
392
+ ),
393
+ 0,
394
+ ],
395
+ )
396
+ return ts
397
+
398
+ @model_validator(mode="after")
399
+ def validate_attrs(self):
400
+ # either peak_value or cumulative must be set, which is already checked in the parent class: `either_value_or_cumulative`
401
+ return self
402
+
403
+
404
+ class CSVTimeseries(BaseModel, Generic[TValueUnitPair]):
405
+ path: Path
406
+ units: TValueUnitPair
407
+
408
+ @model_validator(mode="after")
409
+ def validate_csv(self):
410
+ if not self.path.exists():
411
+ raise ValueError(f"Path {self.path} does not exist.")
412
+ if not self.path.is_file():
413
+ raise ValueError(f"Path {self.path} is not a file.")
414
+ if not self.path.suffix == ".csv":
415
+ raise ValueError(f"Path {self.path} is not a csv file.")
416
+
417
+ # Try loading the csv file, read_csv will raise an error if it cannot read the file
418
+ read_csv(self.path)
419
+ return self
420
+
421
+ @staticmethod
422
+ def load_file(path: str | Path, units: us.ValueUnitPair):
423
+ return CSVTimeseries[type(units)](path=Path(path), units=units)
424
+
425
+ def to_dataframe(
426
+ self,
427
+ time_frame: TimeFrame,
428
+ fill_value: float = 0,
429
+ ) -> pd.DataFrame:
430
+ """
431
+ Interpolate the timeseries data using the time_step provided.
432
+
433
+ Parameters
434
+ ----------
435
+ time_frame : TimeFrame
436
+ Time frame for the data.
437
+ fill_value : float, optional
438
+ Value to fill missing data with, by default 0.
439
+
440
+ Returns
441
+ -------
442
+ pd.DataFrame
443
+ Interpolated timeseries with datetime index.
444
+ """
445
+ file_data = read_csv(self.path)
446
+
447
+ # Ensure requested time range is within available data
448
+ start_time = max(time_frame.start_time, file_data.index.min())
449
+ end_time = min(time_frame.end_time, file_data.index.max())
450
+
451
+ df = file_data.loc[start_time:end_time]
452
+
453
+ # Generate the complete time range
454
+ time_range = pd.date_range(
455
+ start=time_frame.start_time,
456
+ end=time_frame.end_time,
457
+ freq=time_frame.time_step,
458
+ )
459
+
460
+ # Reindex and fill missing values with specified fill_value
461
+ interpolated_df = (
462
+ df.reindex(time_range, method="nearest", limit=1)
463
+ .interpolate(method="linear")
464
+ .fillna(fill_value)
465
+ )
466
+ interpolated_df.index.name = "time"
467
+ return interpolated_df
468
+
469
+ def calculate_data(
470
+ self,
471
+ time_step: timedelta = TimeFrame().time_step,
472
+ ) -> np.ndarray:
473
+ return read_csv(self.path).to_numpy()
474
+
475
+ def read_time_frame(self) -> TimeFrame:
476
+ """
477
+ Read the time frame from the file.
478
+
479
+ Returns
480
+ -------
481
+ TimeFrame
482
+ Time frame of the data in the file.
483
+ """
484
+ file_data = read_csv(self.path)
485
+ return TimeFrame(
486
+ start_time=file_data.index.min(),
487
+ end_time=file_data.index.max(),
488
+ )
489
+
490
+
491
+ class TimeseriesFactory:
492
+ @staticmethod
493
+ def from_args(
494
+ shape_type: ShapeType,
495
+ duration: us.UnitfulTime,
496
+ peak_time: us.UnitfulTime,
497
+ peak_value: Optional[us.ValueUnitPairs] = None,
498
+ cumulative: Optional[us.ValueUnitPairs] = None,
499
+ fill_value: float = 0.0,
500
+ scs_file_name: Optional[str] = None,
501
+ scs_type: Optional[Scstype] = None,
502
+ ) -> SyntheticTimeseries:
503
+ """Create a timeseries object based on the shape type."""
504
+ match shape_type:
505
+ case ShapeType.gaussian:
506
+ return GaussianTimeseries(
507
+ duration=duration,
508
+ peak_time=peak_time,
509
+ peak_value=peak_value,
510
+ cumulative=cumulative,
511
+ fill_value=fill_value,
512
+ )
513
+ case ShapeType.block:
514
+ return BlockTimeseries(
515
+ duration=duration,
516
+ peak_time=peak_time,
517
+ peak_value=peak_value,
518
+ cumulative=cumulative,
519
+ fill_value=fill_value,
520
+ )
521
+ case ShapeType.triangle:
522
+ return TriangleTimeseries(
523
+ duration=duration,
524
+ peak_time=peak_time,
525
+ peak_value=peak_value,
526
+ cumulative=cumulative,
527
+ fill_value=fill_value,
528
+ )
529
+ case ShapeType.scs:
530
+ if scs_file_name is None or scs_type is None:
531
+ from flood_adapt.dbs_classes.database import Database
532
+
533
+ scs_config = Database().site.sfincs.scs
534
+ if scs_config is None:
535
+ raise ValueError("SCS configuration not found in database.")
536
+ scs_file_name = scs_file_name or scs_config.file
537
+ scs_type = scs_type or scs_config.type
538
+
539
+ return ScsTimeseries(
540
+ duration=duration,
541
+ peak_time=peak_time,
542
+ peak_value=peak_value,
543
+ cumulative=cumulative,
544
+ fill_value=fill_value,
545
+ scs_file_name=scs_file_name,
546
+ scs_type=scs_type,
547
+ )
548
+ case _:
549
+ raise ValueError(f"Unknown shape type {shape_type}.")
550
+
551
+ @staticmethod
552
+ def load_file(
553
+ file_path: Path | str | os.PathLike,
554
+ ) -> SyntheticTimeseries:
555
+ """Load object from file."""
556
+ with open(file_path, mode="rb") as fp:
557
+ toml = tomli.load(fp)
558
+ return TimeseriesFactory.from_args(
559
+ **toml,
560
+ )
561
+
562
+ @staticmethod
563
+ def from_object(obj: SyntheticTimeseries) -> SyntheticTimeseries:
564
+ return TimeseriesFactory.from_args(**obj.model_dump(exclude_none=True))