roms-tools 3.1.2__py3-none-any.whl → 3.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. roms_tools/__init__.py +3 -0
  2. roms_tools/analysis/cdr_analysis.py +203 -0
  3. roms_tools/analysis/cdr_ensemble.py +198 -0
  4. roms_tools/analysis/roms_output.py +80 -46
  5. roms_tools/data/grids/GLORYS_global_grid.nc +0 -0
  6. roms_tools/download.py +4 -0
  7. roms_tools/plot.py +75 -21
  8. roms_tools/setup/boundary_forcing.py +44 -19
  9. roms_tools/setup/cdr_forcing.py +122 -8
  10. roms_tools/setup/cdr_release.py +161 -8
  11. roms_tools/setup/datasets.py +626 -340
  12. roms_tools/setup/grid.py +138 -137
  13. roms_tools/setup/initial_conditions.py +113 -48
  14. roms_tools/setup/mask.py +63 -7
  15. roms_tools/setup/nesting.py +67 -42
  16. roms_tools/setup/river_forcing.py +45 -19
  17. roms_tools/setup/surface_forcing.py +4 -6
  18. roms_tools/setup/tides.py +1 -2
  19. roms_tools/setup/topography.py +4 -4
  20. roms_tools/setup/utils.py +134 -22
  21. roms_tools/tests/test_analysis/test_cdr_analysis.py +144 -0
  22. roms_tools/tests/test_analysis/test_cdr_ensemble.py +202 -0
  23. roms_tools/tests/test_analysis/test_roms_output.py +61 -3
  24. roms_tools/tests/test_setup/test_boundary_forcing.py +54 -52
  25. roms_tools/tests/test_setup/test_cdr_forcing.py +54 -0
  26. roms_tools/tests/test_setup/test_cdr_release.py +118 -1
  27. roms_tools/tests/test_setup/test_datasets.py +392 -44
  28. roms_tools/tests/test_setup/test_grid.py +222 -115
  29. roms_tools/tests/test_setup/test_initial_conditions.py +94 -41
  30. roms_tools/tests/test_setup/test_surface_forcing.py +2 -1
  31. roms_tools/tests/test_setup/test_utils.py +91 -1
  32. roms_tools/tests/test_setup/utils.py +71 -0
  33. roms_tools/tests/test_tiling/test_join.py +241 -0
  34. roms_tools/tests/test_utils.py +139 -17
  35. roms_tools/tiling/join.py +189 -0
  36. roms_tools/utils.py +131 -99
  37. {roms_tools-3.1.2.dist-info → roms_tools-3.2.0.dist-info}/METADATA +12 -2
  38. {roms_tools-3.1.2.dist-info → roms_tools-3.2.0.dist-info}/RECORD +41 -33
  39. {roms_tools-3.1.2.dist-info → roms_tools-3.2.0.dist-info}/WHEEL +0 -0
  40. {roms_tools-3.1.2.dist-info → roms_tools-3.2.0.dist-info}/licenses/LICENSE +0 -0
  41. {roms_tools-3.1.2.dist-info → roms_tools-3.2.0.dist-info}/top_level.txt +0 -0
roms_tools/plot.py CHANGED
@@ -1,6 +1,7 @@
1
1
  from typing import Any, Literal
2
2
 
3
3
  import cartopy.crs as ccrs
4
+ import matplotlib.dates as mdates
4
5
  import matplotlib.pyplot as plt
5
6
  import numpy as np
6
7
  import xarray as xr
@@ -9,18 +10,16 @@ from matplotlib.figure import Figure
9
10
 
10
11
  from roms_tools.regrid import LateralRegridFromROMS, VerticalRegridFromROMS
11
12
  from roms_tools.utils import (
12
- _generate_coordinate_range,
13
- _remove_edge_nans,
13
+ generate_coordinate_range,
14
14
  infer_nominal_horizontal_resolution,
15
15
  normalize_longitude,
16
+ remove_edge_nans,
16
17
  )
17
18
  from roms_tools.vertical_coordinate import compute_depth_coordinates
18
19
 
19
20
  LABEL_COLOR = "k"
20
21
  LABEL_SZ = 10
21
22
  FONT_SZ = 10
22
- EDGE_POS_START = "start"
23
- EDGE_POS_END = "end"
24
23
 
25
24
 
26
25
  def _add_gridlines(ax: Axes) -> None:
@@ -497,16 +496,16 @@ def line_plot(
497
496
 
498
497
 
499
498
  def _get_edge(
500
- arr: xr.DataArray, dim_name: str, pos: Literal[EDGE_POS_START, EDGE_POS_END]
499
+ arr: xr.DataArray, dim_name: str, pos: Literal["start", "end"]
501
500
  ) -> xr.DataArray:
502
501
  """Extract the first ("start") or last ("end") slice along the given dimension."""
503
- if pos == EDGE_POS_START:
502
+ if pos == "start":
504
503
  return arr.isel({dim_name: 0})
505
504
 
506
- if pos == EDGE_POS_END:
505
+ if pos == "end":
507
506
  return arr.isel({dim_name: -1})
508
507
 
509
- raise ValueError(f"pos must be {EDGE_POS_START} or {EDGE_POS_END}")
508
+ raise ValueError("pos must be `start` or `end`")
510
509
 
511
510
 
512
511
  def _add_boundary_to_ax(
@@ -538,23 +537,23 @@ def _add_boundary_to_ax(
538
537
 
539
538
  edges = [
540
539
  (
541
- _get_edge(lon_deg, xi_dim, EDGE_POS_START),
542
- _get_edge(lat_deg, xi_dim, EDGE_POS_START),
540
+ _get_edge(lon_deg, xi_dim, "start"),
541
+ _get_edge(lat_deg, xi_dim, "start"),
543
542
  r"$\eta$",
544
543
  ), # left
545
544
  (
546
- _get_edge(lon_deg, xi_dim, EDGE_POS_END),
547
- _get_edge(lat_deg, xi_dim, EDGE_POS_END),
545
+ _get_edge(lon_deg, xi_dim, "end"),
546
+ _get_edge(lat_deg, xi_dim, "end"),
548
547
  r"$\eta$",
549
548
  ), # right
550
549
  (
551
- _get_edge(lon_deg, eta_dim, EDGE_POS_START),
552
- _get_edge(lat_deg, eta_dim, EDGE_POS_START),
550
+ _get_edge(lon_deg, eta_dim, "start"),
551
+ _get_edge(lat_deg, eta_dim, "start"),
553
552
  r"$\xi$",
554
553
  ), # bottom
555
554
  (
556
- _get_edge(lon_deg, eta_dim, EDGE_POS_END),
557
- _get_edge(lat_deg, eta_dim, EDGE_POS_END),
555
+ _get_edge(lon_deg, eta_dim, "end"),
556
+ _get_edge(lat_deg, eta_dim, "end"),
558
557
  r"$\xi$",
559
558
  ), # top
560
559
  ]
@@ -819,7 +818,7 @@ def plot(
819
818
  with_dim_names: bool = False,
820
819
  ax: Axes | None = None,
821
820
  save_path: str | None = None,
822
- cmap_name: str | None = "YlOrRd",
821
+ cmap_name: str = "YlOrRd",
823
822
  add_colorbar: bool = True,
824
823
  ) -> None:
825
824
  """Generate a plot of a 2D or 3D ROMS field for a horizontal or vertical slice.
@@ -1052,7 +1051,7 @@ def plot(
1052
1051
  title = title + f", lat = {lat}°N"
1053
1052
  else:
1054
1053
  resolution = infer_nominal_horizontal_resolution(grid_ds)
1055
- lats = _generate_coordinate_range(
1054
+ lats = generate_coordinate_range(
1056
1055
  field.lat.min().values, field.lat.max().values, resolution
1057
1056
  )
1058
1057
  lats = xr.DataArray(lats, dims=["lat"], attrs={"units": "°N"})
@@ -1062,7 +1061,7 @@ def plot(
1062
1061
  title = title + f", lon = {lon}°E"
1063
1062
  else:
1064
1063
  resolution = infer_nominal_horizontal_resolution(grid_ds, lat)
1065
- lons = _generate_coordinate_range(
1064
+ lons = generate_coordinate_range(
1066
1065
  field.lon.min().values, field.lon.max().values, resolution
1067
1066
  )
1068
1067
  lons = xr.DataArray(lons, dims=["lon"], attrs={"units": "°E"})
@@ -1079,11 +1078,11 @@ def plot(
1079
1078
  field = field.assign_coords({"layer_depth": layer_depth})
1080
1079
 
1081
1080
  if lat is not None:
1082
- field, layer_depth = _remove_edge_nans(
1081
+ field, layer_depth = remove_edge_nans(
1083
1082
  field, "lon", layer_depth if "layer_depth" in locals() else None
1084
1083
  )
1085
1084
  if lon is not None:
1086
- field, layer_depth = _remove_edge_nans(
1085
+ field, layer_depth = remove_edge_nans(
1087
1086
  field, "lat", layer_depth if "layer_depth" in locals() else None
1088
1087
  )
1089
1088
 
@@ -1252,3 +1251,58 @@ def plot_location(
1252
1251
 
1253
1252
  if include_legend:
1254
1253
  ax.legend(loc="center left", bbox_to_anchor=(1.1, 0.5))
1254
+
1255
+
1256
+ def plot_uptake_efficiency(ds: xr.Dataset) -> None:
1257
+ """
1258
+ Plot Carbon Dioxide Removal (CDR) uptake efficiency over time.
1259
+
1260
+ This function plots two estimates of uptake efficiency stored in the dataset:
1261
+ 1. `cdr_efficiency`, computed from CO2 flux differences.
1262
+ 2. `cdr_efficiency_from_delta_diff`, computed from DIC differences.
1263
+
1264
+ The x-axis shows absolute time, formatted as YYYY-MM-DD, and the y-axis shows
1265
+ the uptake efficiency values. The plot includes a legend and grid for clarity.
1266
+
1267
+ Parameters
1268
+ ----------
1269
+ ds : xarray.Dataset
1270
+ Dataset containing the following variables:
1271
+ - "abs_time": array of timestamps (datetime-like)
1272
+ - "cdr_efficiency": uptake efficiency from flux differences
1273
+ - "cdr_efficiency_from_delta_diff": uptake efficiency from DIC differences
1274
+
1275
+ Raises
1276
+ ------
1277
+ ValueError
1278
+ If required variables are missing or empty.
1279
+
1280
+ Returns
1281
+ -------
1282
+ None
1283
+ """
1284
+ required_vars = ["abs_time", "cdr_efficiency", "cdr_efficiency_from_delta_diff"]
1285
+ for var in required_vars:
1286
+ if var not in ds or ds[var].size == 0:
1287
+ raise ValueError(f"Dataset must contain non-empty variable '{var}'.")
1288
+
1289
+ times = ds["abs_time"]
1290
+
1291
+ # Check for monotonically increasing times
1292
+ if not np.all(times[1:] >= times[:-1]):
1293
+ raise ValueError("abs_time must be strictly increasing.")
1294
+
1295
+ fig, ax = plt.subplots(figsize=(10, 4))
1296
+
1297
+ ax.plot(times, ds["cdr_efficiency"], label="from CO2 flux differences", lw=2)
1298
+ ax.plot(
1299
+ times, ds["cdr_efficiency_from_delta_diff"], label="from DIC differences", lw=2
1300
+ )
1301
+ ax.grid()
1302
+ ax.set_title("CDR uptake efficiency")
1303
+ ax.legend()
1304
+
1305
+ # Format x-axis as YYYY-MM-DD
1306
+ ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m-%d"))
1307
+ fig.autofmt_xdate()
1308
+ plt.show()
@@ -15,9 +15,9 @@ from roms_tools.plot import line_plot, section_plot
15
15
  from roms_tools.regrid import LateralRegridToROMS, VerticalRegridToROMS
16
16
  from roms_tools.setup.datasets import (
17
17
  CESMBGCDataset,
18
- Dataset,
19
18
  GLORYSDataset,
20
19
  GLORYSDefaultDataset,
20
+ RawDataSource,
21
21
  UnifiedBGCDataset,
22
22
  )
23
23
  from roms_tools.setup.utils import (
@@ -63,7 +63,7 @@ class BoundaryForcing:
63
63
  If no time filtering is desired, set it to None. Default is None.
64
64
  boundaries : Dict[str, bool], optional
65
65
  Dictionary specifying which boundaries are forced (south, east, north, west). Default is all True.
66
- source : Dict[str, Union[str, Path, List[Union[str, Path]]], bool]
66
+ source : RawDataSource
67
67
  Dictionary specifying the source of the boundary forcing data. Keys include:
68
68
 
69
69
  - "name" (str): Name of the data source (e.g., "GLORYS").
@@ -71,7 +71,9 @@ class BoundaryForcing:
71
71
 
72
72
  - A single string (with or without wildcards).
73
73
  - A single Path object.
74
- - A list of strings or Path objects containing multiple files.
74
+ - A list of strings or Path objects.
75
+ If omitted, the data will be streamed via the Copernicus Marine Toolkit.
76
+ Note: streaming is currently not recommended due to performance limitations.
75
77
  - "climatology" (bool): Indicates if the data is climatology data. Defaults to False.
76
78
 
77
79
  type : str
@@ -124,7 +126,7 @@ class BoundaryForcing:
124
126
  }
125
127
  )
126
128
  """Dictionary specifying which boundaries are forced (south, east, north, west)."""
127
- source: dict[str, str | Path | list[str | Path]]
129
+ source: RawDataSource
128
130
  """Dictionary specifying the source of the boundary forcing data."""
129
131
  type: str = "physics"
130
132
  """Specifies the type of forcing data ("physics", "bgc")."""
@@ -157,7 +159,6 @@ class BoundaryForcing:
157
159
  if self.apply_2d_horizontal_fill:
158
160
  data.choose_subdomain(
159
161
  target_coords,
160
- buffer_points=20, # lateral fill needs good buffer from data margin
161
162
  )
162
163
  # Enforce double precision to ensure reproducibility
163
164
  data.convert_to_float64()
@@ -297,14 +298,12 @@ class BoundaryForcing:
297
298
  zeta_v = zeta_v.isel(**self.bdry_coords["v"][direction])
298
299
 
299
300
  if not self.apply_2d_horizontal_fill and bdry_data.needs_lateral_fill:
300
- logging.info(
301
- f"Applying 1D horizontal fill to {direction}ern boundary."
302
- )
303
- self._validate_1d_fill(
304
- processed_fields,
305
- direction,
306
- bdry_data.dim_names["depth"],
307
- )
301
+ if not self.bypass_validation:
302
+ self._validate_1d_fill(
303
+ processed_fields,
304
+ direction,
305
+ bdry_data.dim_names["depth"],
306
+ )
308
307
  for var_name in processed_fields:
309
308
  processed_fields[var_name] = apply_1d_horizontal_fill(
310
309
  processed_fields[var_name]
@@ -435,7 +434,9 @@ class BoundaryForcing:
435
434
  "Sea surface height will NOT be used to adjust depth coordinates."
436
435
  )
437
436
 
438
- def _get_data(self) -> Dataset:
437
+ def _get_data(
438
+ self,
439
+ ) -> GLORYSDataset | GLORYSDefaultDataset | CESMBGCDataset | UnifiedBGCDataset:
439
440
  """Determine the correct `Dataset` type and return an instance.
440
441
 
441
442
  Returns
@@ -444,7 +445,21 @@ class BoundaryForcing:
444
445
  The `Dataset` instance
445
446
 
446
447
  """
447
- dataset_map: dict[str, dict[str, dict[str, type[Dataset]]]] = {
448
+ dataset_map: dict[
449
+ str,
450
+ dict[
451
+ str,
452
+ dict[
453
+ str,
454
+ type[
455
+ GLORYSDataset
456
+ | GLORYSDefaultDataset
457
+ | CESMBGCDataset
458
+ | UnifiedBGCDataset
459
+ ],
460
+ ],
461
+ ],
462
+ ] = {
448
463
  "physics": {
449
464
  "GLORYS": {
450
465
  "external": GLORYSDataset,
@@ -471,13 +486,16 @@ class BoundaryForcing:
471
486
 
472
487
  data_type = dataset_map[self.type][source_name][variant]
473
488
 
489
+ if isinstance(self.source["path"], bool):
490
+ raise ValueError('source["path"] cannot be a boolean here')
491
+
474
492
  return data_type(
475
493
  filename=self.source["path"],
476
494
  start_time=self.start_time,
477
495
  end_time=self.end_time,
478
- climatology=self.source["climatology"],
496
+ climatology=self.source["climatology"], # type: ignore[arg-type]
479
497
  use_dask=self.use_dask,
480
- ) # type: ignore
498
+ )
481
499
 
482
500
  def _set_variable_info(self, data):
483
501
  """Sets up a dictionary with metadata for variables based on the type of data
@@ -756,6 +774,9 @@ class BoundaryForcing:
756
774
  None
757
775
  If a boundary is divided by land, a warning is issued. No return value is provided.
758
776
  """
777
+ if not hasattr(self, "_warned_directions"):
778
+ self._warned_directions = set()
779
+
759
780
  for var_name in processed_fields.keys():
760
781
  if self.variable_info[var_name]["validate"]:
761
782
  location = self.variable_info[var_name]["location"]
@@ -778,16 +799,20 @@ class BoundaryForcing:
778
799
  wet_nans = xr.where(da.where(mask).isnull(), 1, 0)
779
800
  # Apply label to find connected components of wet NaNs
780
801
  labeled_array, num_features = label(wet_nans)
802
+
781
803
  left_margin = labeled_array[0]
782
804
  right_margin = labeled_array[-1]
783
805
  if left_margin != 0:
784
806
  num_features = num_features - 1
785
807
  if right_margin != 0:
786
808
  num_features = num_features - 1
787
- if num_features > 0:
809
+
810
+ if num_features > 0 and direction not in self._warned_directions:
788
811
  logging.warning(
789
- f"For {var_name}, the {direction}ern boundary is divided by land. It would be safer (but slower) to use `apply_2d_horizontal_fill = True`."
812
+ f"The {direction}ern boundary is divided by land. "
813
+ "It would be safer (but slower and more memory-intensive) to use `apply_2d_horizontal_fill = True`."
790
814
  )
815
+ self._warned_directions.add(direction)
791
816
 
792
817
  def _validate(self, ds):
793
818
  """Validate the dataset for NaN values at the first time step (bry_time=0) for
@@ -2,19 +2,19 @@ import itertools
2
2
  import logging
3
3
  from collections import Counter
4
4
  from collections.abc import Iterator
5
- from datetime import datetime
5
+ from datetime import datetime, timedelta
6
6
  from pathlib import Path
7
7
  from typing import Annotated
8
8
 
9
9
  import matplotlib.gridspec as gridspec
10
10
  import matplotlib.pyplot as plt
11
11
  import numpy as np
12
+ import pandas as pd
12
13
  import xarray as xr
13
14
  from pydantic import (
14
15
  BaseModel,
15
16
  Field,
16
17
  RootModel,
17
- conlist,
18
18
  model_serializer,
19
19
  model_validator,
20
20
  )
@@ -40,6 +40,7 @@ from roms_tools.setup.utils import (
40
40
  from_yaml,
41
41
  gc_dist,
42
42
  get_target_coords,
43
+ get_tracer_metadata_dict,
43
44
  to_dict,
44
45
  validate_names,
45
46
  write_to_yaml,
@@ -103,14 +104,16 @@ class ReleaseSimulationManager(BaseModel):
103
104
  class ReleaseCollector(RootModel):
104
105
  """Collects and validates multiple releases against each other."""
105
106
 
106
- root: conlist(
107
- Annotated[
108
- VolumeRelease | TracerPerturbation, Field(discriminator="release_type")
107
+ root: Annotated[
108
+ list[
109
+ Annotated[
110
+ VolumeRelease | TracerPerturbation, Field(discriminator="release_type")
111
+ ]
109
112
  ],
110
- min_length=1,
111
- ) = Field(alias="releases")
113
+ Field(alias="releases", min_length=1),
114
+ ]
112
115
 
113
- _release_type: ReleaseType = None
116
+ _release_type: ReleaseType | None = None
114
117
 
115
118
  def __iter__(self) -> Iterator[Release]:
116
119
  return iter(self.root)
@@ -126,6 +129,9 @@ class ReleaseCollector(RootModel):
126
129
  else:
127
130
  raise TypeError(f"Invalid key type: {type(item)}. Must be int or str.")
128
131
 
132
+ def __len__(self):
133
+ return len(self.root)
134
+
129
135
  @model_validator(mode="before")
130
136
  @classmethod
131
137
  def unpack_dict(cls, data):
@@ -774,6 +780,62 @@ class CDRForcing(BaseModel):
774
780
  fig.subplots_adjust(hspace=0.45)
775
781
  fig.suptitle(f"Release distribution for: {release_name}")
776
782
 
783
+ def compute_total_cdr_source(self, dt: float) -> pd.DataFrame:
784
+ """
785
+ Compute integrated tracer quantities for all releases and return a DataFrame.
786
+
787
+ Parameters
788
+ ----------
789
+ dt : float
790
+ Time step in seconds for reconstructing ROMS time stamps.
791
+
792
+ Returns
793
+ -------
794
+ pd.DataFrame
795
+ DataFrame with one row per release and one row of units at the top.
796
+ Columns 'temp' and 'salt' are excluded from integrated totals.
797
+ """
798
+ # Reconstruct ROMS time stamps
799
+ _, rel_seconds = _reconstruct_roms_time_stamps(
800
+ self.start_time, self.end_time, dt, self.model_reference_date
801
+ )
802
+
803
+ # Collect accounting results for all releases
804
+ records = []
805
+ release_names = []
806
+ for release in self.releases:
807
+ result = release._do_accounting(rel_seconds, self.model_reference_date)
808
+ records.append(result)
809
+ release_names.append(getattr(release, "name", f"release_{len(records)}"))
810
+
811
+ # Build DataFrame: rows = releases, columns = tracer names
812
+ df = pd.DataFrame(records, index=release_names)
813
+
814
+ # Exclude temp and salt from units row and integrated totals
815
+ integrated_tracers = [col for col in df.columns if col not in ("temp", "salt")]
816
+
817
+ # Add a row of units only for integrated tracers
818
+ tracer_meta = get_tracer_metadata_dict(include_bgc=True, unit_type="integrated")
819
+ units_row = {
820
+ col: tracer_meta.get(col, {}).get("units", "") for col in integrated_tracers
821
+ }
822
+
823
+ df_units = pd.DataFrame([units_row], index=["units"])
824
+
825
+ # Keep only integrated_tracers columns in df, drop temp and salt
826
+ df_integrated = df[integrated_tracers]
827
+
828
+ # Concatenate units row on top
829
+ df_final = pd.concat([df_units, df_integrated])
830
+
831
+ # Store dt as metadata
832
+ df_final.attrs["time_step"] = dt
833
+ df_final.attrs["start_time"] = self.start_time
834
+ df_final.attrs["end_time"] = self.end_time
835
+ df_final.attrs["title"] = "Integrated tracer releases"
836
+
837
+ return df_final
838
+
777
839
  def save(
778
840
  self,
779
841
  filepath: str | Path,
@@ -1051,3 +1113,55 @@ def _map_3d_gaussian(
1051
1113
  distribution_3d /= distribution_3d.sum()
1052
1114
 
1053
1115
  return distribution_3d
1116
+
1117
+
1118
+ def _reconstruct_roms_time_stamps(
1119
+ start_time: datetime,
1120
+ end_time: datetime,
1121
+ dt: float,
1122
+ model_reference_date: datetime,
1123
+ ) -> tuple[list[datetime], np.ndarray]:
1124
+ """
1125
+ Reconstruct ROMS time stamps between `start_time` and `end_time` with step `dt`.
1126
+
1127
+ Parameters
1128
+ ----------
1129
+ start_time : datetime
1130
+ Beginning of the time series.
1131
+ end_time : datetime
1132
+ End of the time series (inclusive if it falls exactly on a step).
1133
+ dt : float
1134
+ Time step in seconds (can be fractional if needed).
1135
+ model_reference_date : datetime
1136
+ The reference date for ROMS time (elapsed time will be relative to this).
1137
+
1138
+ Returns
1139
+ -------
1140
+ times : list of datetime
1141
+ Sequence of datetimes from `start_time` to `end_time`.
1142
+ rel_days : np.ndarray
1143
+ Array of elapsed times in **seconds** relative to `model_reference_date`.
1144
+
1145
+ Raises
1146
+ ------
1147
+ ValueError
1148
+ If `end_time` is not after `start_time` or if `dt` is not positive.
1149
+ """
1150
+ if end_time <= start_time:
1151
+ raise ValueError("end_time must be after start_time")
1152
+ if dt <= 0:
1153
+ raise ValueError("dt must be positive")
1154
+
1155
+ # Generate absolute times
1156
+ delta = timedelta(seconds=dt)
1157
+ times: list[datetime] = []
1158
+ t = start_time
1159
+ while t <= end_time:
1160
+ times.append(t)
1161
+ t += delta
1162
+
1163
+ # Convert to relative ROMS time (days since model_reference_date)
1164
+ rel_days = convert_to_relative_days(times, model_reference_date)
1165
+ rel_seconds = rel_days * 3600 * 24
1166
+
1167
+ return times, rel_seconds