roms-tools 3.1.2__py3-none-any.whl → 3.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. roms_tools/__init__.py +3 -0
  2. roms_tools/analysis/cdr_analysis.py +203 -0
  3. roms_tools/analysis/cdr_ensemble.py +198 -0
  4. roms_tools/analysis/roms_output.py +80 -46
  5. roms_tools/data/grids/GLORYS_global_grid.nc +0 -0
  6. roms_tools/download.py +4 -0
  7. roms_tools/plot.py +75 -21
  8. roms_tools/setup/boundary_forcing.py +44 -19
  9. roms_tools/setup/cdr_forcing.py +122 -8
  10. roms_tools/setup/cdr_release.py +161 -8
  11. roms_tools/setup/datasets.py +626 -340
  12. roms_tools/setup/grid.py +138 -137
  13. roms_tools/setup/initial_conditions.py +113 -48
  14. roms_tools/setup/mask.py +63 -7
  15. roms_tools/setup/nesting.py +67 -42
  16. roms_tools/setup/river_forcing.py +45 -19
  17. roms_tools/setup/surface_forcing.py +4 -6
  18. roms_tools/setup/tides.py +1 -2
  19. roms_tools/setup/topography.py +4 -4
  20. roms_tools/setup/utils.py +134 -22
  21. roms_tools/tests/test_analysis/test_cdr_analysis.py +144 -0
  22. roms_tools/tests/test_analysis/test_cdr_ensemble.py +202 -0
  23. roms_tools/tests/test_analysis/test_roms_output.py +61 -3
  24. roms_tools/tests/test_setup/test_boundary_forcing.py +54 -52
  25. roms_tools/tests/test_setup/test_cdr_forcing.py +54 -0
  26. roms_tools/tests/test_setup/test_cdr_release.py +118 -1
  27. roms_tools/tests/test_setup/test_datasets.py +392 -44
  28. roms_tools/tests/test_setup/test_grid.py +222 -115
  29. roms_tools/tests/test_setup/test_initial_conditions.py +94 -41
  30. roms_tools/tests/test_setup/test_surface_forcing.py +2 -1
  31. roms_tools/tests/test_setup/test_utils.py +91 -1
  32. roms_tools/tests/test_setup/utils.py +71 -0
  33. roms_tools/tests/test_tiling/test_join.py +241 -0
  34. roms_tools/tests/test_utils.py +139 -17
  35. roms_tools/tiling/join.py +189 -0
  36. roms_tools/utils.py +131 -99
  37. {roms_tools-3.1.2.dist-info → roms_tools-3.2.0.dist-info}/METADATA +12 -2
  38. {roms_tools-3.1.2.dist-info → roms_tools-3.2.0.dist-info}/RECORD +41 -33
  39. {roms_tools-3.1.2.dist-info → roms_tools-3.2.0.dist-info}/WHEEL +0 -0
  40. {roms_tools-3.1.2.dist-info → roms_tools-3.2.0.dist-info}/licenses/LICENSE +0 -0
  41. {roms_tools-3.1.2.dist-info → roms_tools-3.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,189 @@
1
+ from collections.abc import Sequence
2
+ from pathlib import Path
3
+ from typing import Literal, cast
4
+
5
+ import xarray as xr
6
+
7
+ from roms_tools.utils import FilePaths, _path_list_from_input
8
+
9
+
10
+ def open_partitions(files: FilePaths) -> xr.Dataset:
11
+ """
12
+ Open partitioned ROMS netCDF files as a single dataset.
13
+
14
+ Parameters
15
+ ----------
16
+ files: str | List[str | Path]
17
+ List or wildcard pattern describing files to join,
18
+ e.g. "roms_rst.20121209133435.*.nc"
19
+
20
+ Returns
21
+ -------
22
+ xarray.Dataset
23
+ Dataset containing unified partitioned datasets
24
+ """
25
+ filepaths = _path_list_from_input(files)
26
+ datasets = [xr.open_dataset(p, decode_timedelta=True) for p in sorted(filepaths)]
27
+ joined = join_datasets(datasets)
28
+ return joined
29
+
30
+
31
+ def join_netcdf(files: FilePaths, output_path: Path | None = None) -> Path:
32
+ """
33
+ Join partitioned NetCDFs into a single dataset.
34
+
35
+ Parameters
36
+ ----------
37
+ files : str | List[str | Path]
38
+ List or wildcard pattern describing files to join,
39
+ e.g. "roms_rst.20121209133435.*.nc"
40
+
41
+ output_path : Path, optional
42
+ If provided, the joined dataset will be saved to this path.
43
+ Otherwise, the common base of pattern (e.g. roms_rst.20121209133435.nc)
44
+ will be used.
45
+
46
+ Returns
47
+ -------
48
+ Path
49
+ The path of the saved file
50
+ """
51
+ filepaths = _path_list_from_input(files)
52
+ # Determine output path if not provided
53
+ if output_path is None:
54
+ # e.g. roms_rst.20120101120000.023.nc -> roms_rst.20120101120000.nc
55
+ output_path = filepaths[0].with_suffix("").with_suffix(".nc")
56
+
57
+ joined = open_partitions(cast(FilePaths, filepaths))
58
+ joined.to_netcdf(output_path)
59
+ print(f"Saved joined dataset to: {output_path}")
60
+
61
+ return output_path
62
+
63
+
64
+ def _find_transitions(dim_sizes: list[int]) -> list[int]:
65
+ """Finds the indices of all transitions in a list of dimension sizes.
66
+
67
+ A transition is a point where the dimension size changes from the previous one.
68
+ This function is used to determine the number of partitions (e.g., np_eta or np_xi).
69
+
70
+ Parameters
71
+ ----------
72
+ dim_sizes : list[int]
73
+ A list of integer sizes for a given dimension across multiple datasets.
74
+
75
+ Returns
76
+ -------
77
+ List[int]
78
+ A list of indices where a transition was detected.
79
+ """
80
+ transitions: list[int] = []
81
+ if len(dim_sizes) < 2:
82
+ return transitions
83
+
84
+ for i in range(1, len(dim_sizes)):
85
+ if dim_sizes[i] != dim_sizes[i - 1]:
86
+ transitions.append(i)
87
+ return transitions
88
+
89
+
90
+ def _find_common_dims(
91
+ direction: Literal["xi", "eta"], datasets: Sequence[xr.Dataset]
92
+ ) -> list[str]:
93
+ """Finds all common dimensions along the xi or eta direction amongst a list of Datasets.
94
+
95
+ Parameters
96
+ ----------
97
+ direction: str ("xi" or "eta")
98
+ The direction in which to seek a common dimension
99
+ datasets: Sequence[xr.Dataset]:
100
+ The datasets in which to look
101
+
102
+ Returns
103
+ -------
104
+ common_dim: list[str]
105
+ The dimensions common to all specified datasets along 'direction'
106
+ """
107
+ if direction not in ["xi", "eta"]:
108
+ raise ValueError("'direction' must be 'xi' or 'eta'")
109
+ dims = []
110
+ for point in ["rho", "u", "v"]:
111
+ if all(f"{direction}_{point}" in d.dims for d in datasets):
112
+ dims.append(f"{direction}_{point}")
113
+ if not dims:
114
+ raise ValueError(f"No common point found along direction {direction}")
115
+ return dims
116
+
117
+
118
+ def _infer_partition_layout_from_datasets(
119
+ datasets: Sequence[xr.Dataset],
120
+ ) -> tuple[int, int]:
121
+ """Infer np_eta, np_xi from datasets."""
122
+ nd = len(datasets)
123
+ if nd == 1:
124
+ return 1, 1
125
+
126
+ eta_dims = _find_common_dims("eta", datasets)
127
+ first_eta_transition = nd
128
+
129
+ for eta_dim in eta_dims:
130
+ dim_sizes = [ds.sizes.get(eta_dim, 0) for ds in datasets]
131
+ eta_transitions = _find_transitions(dim_sizes)
132
+ if eta_transitions and (min(eta_transitions) < first_eta_transition):
133
+ first_eta_transition = min(eta_transitions)
134
+ if first_eta_transition < nd:
135
+ np_xi = first_eta_transition
136
+ np_eta = nd // np_xi
137
+ return np_xi, np_eta
138
+ # If we did not successfully find np_xi,np_eta using eta points
139
+ # then we have a single-column grid:
140
+
141
+ return nd, 1
142
+
143
+
144
+ def join_datasets(datasets: Sequence[xr.Dataset]) -> xr.Dataset:
145
+ """Take a sequence of partitioned Datasets and return a joined Dataset."""
146
+ np_xi, np_eta = _infer_partition_layout_from_datasets(datasets)
147
+
148
+ # Arrange into grid
149
+ grid = [[datasets[j + i * np_xi] for j in range(np_xi)] for i in range(np_eta)]
150
+
151
+ # Join each row (along xi_*)
152
+ rows_joined = []
153
+ for row in grid:
154
+ all_vars = set().union(*(ds.data_vars for ds in row))
155
+ row_dataset = xr.Dataset()
156
+
157
+ for varname in all_vars:
158
+ var_slices = [ds[varname] for ds in row if varname in ds]
159
+ xi_dims = [dim for dim in var_slices[0].dims if dim.startswith("xi_")]
160
+
161
+ if not xi_dims:
162
+ row_dataset[varname] = var_slices[0]
163
+ else:
164
+ xi_dim = xi_dims[0]
165
+ row_dataset[varname] = xr.concat(
166
+ var_slices, dim=xi_dim, combine_attrs="override"
167
+ )
168
+
169
+ rows_joined.append(row_dataset)
170
+
171
+ # Join all rows (along eta_*)
172
+ final_dataset = xr.Dataset()
173
+ all_vars = set().union(*(ds.data_vars for ds in rows_joined))
174
+
175
+ for varname in all_vars:
176
+ var_slices = [ds[varname] for ds in rows_joined if varname in ds]
177
+ eta_dims = [dim for dim in var_slices[0].dims if dim.startswith("eta_")]
178
+
179
+ if not eta_dims:
180
+ final_dataset[varname] = var_slices[0]
181
+ else:
182
+ eta_dim = eta_dims[0]
183
+ final_dataset[varname] = xr.concat(
184
+ var_slices, dim=eta_dim, combine_attrs="override"
185
+ )
186
+ # Copy attributes from first dataset
187
+ final_dataset.attrs = datasets[0].attrs
188
+
189
+ return final_dataset
roms_tools/utils.py CHANGED
@@ -3,16 +3,51 @@ import logging
3
3
  import re
4
4
  import textwrap
5
5
  import warnings
6
- from collections.abc import Callable, Iterable
6
+ from collections.abc import Callable, Iterable, Sequence
7
+ from dataclasses import dataclass
7
8
  from importlib.util import find_spec
8
9
  from pathlib import Path
10
+ from typing import TypeAlias
9
11
 
10
12
  import numpy as np
11
13
  import xarray as xr
12
- from attr import dataclass
13
14
 
14
15
  from roms_tools.constants import R_EARTH
15
16
 
17
+ FilePaths: TypeAlias = str | Path | list[Path | str]
18
+
19
+
20
+ def _path_list_from_input(files: FilePaths) -> list[Path]:
21
+ """Converts a generic user input to a list of Paths.
22
+
23
+ Takes a list of strings or paths, or wildcard pattern, and
24
+ returns a list of pathlib.Path objects
25
+
26
+ Parameters
27
+ ----------
28
+ files: FilePaths
29
+ A list of files (str, Path), single path as a str or Path, or a wildcard string
30
+
31
+ Returns
32
+ -------
33
+ List[Path]
34
+ A list of pathlib.Paths
35
+ """
36
+ if isinstance(files, str):
37
+ filepaths = sorted(Path(files).parent.glob(Path(files).name))
38
+ if not filepaths:
39
+ raise FileNotFoundError(f"No files matched: {files}")
40
+ elif isinstance(files, Path):
41
+ filepaths = [
42
+ files,
43
+ ]
44
+ elif isinstance(files, list):
45
+ filepaths = [Path(f) for f in files]
46
+ else:
47
+ raise TypeError("'files' should be str, Path, or List[Path | str]")
48
+
49
+ return filepaths
50
+
16
51
 
17
52
  @dataclass
18
53
  class FileMatchResult:
@@ -25,56 +60,39 @@ class FileMatchResult:
25
60
 
26
61
 
27
62
  def _get_file_matches(
28
- filename: str | Path | list[str | Path],
63
+ filename: str | Path | Sequence[str | Path],
29
64
  ) -> FileMatchResult:
30
65
  """Filter the filename using an optional wildcard search in the filename.
31
66
 
32
67
  Parameters
33
68
  ----------
34
- filename : str or Path or list of str or Path
69
+ filename : str | Path | Sequence[str | Path]
35
70
  An item to search for matches.
36
71
  """
37
72
  # Precompile the regex for matching wildcard characters
38
73
  wildcard_regex = re.compile(r"[\*\?\[\]]")
39
74
 
40
- # Convert Path objects to strings
41
- if isinstance(filename, str | Path):
42
- filename_str = str(filename)
43
- elif isinstance(filename, list):
44
- filename_str = [str(f) for f in filename]
75
+ # Normalize input into a list of strings
76
+ if isinstance(filename, (str | Path)):
77
+ filenames: list[str] = [str(filename)]
78
+ elif isinstance(filename, Sequence):
79
+ filenames = [str(f) for f in filename]
45
80
  else:
46
- msg = "filename must be a string, Path, or a list of strings/Paths."
81
+ msg = "filename must be a string, Path, or a sequence of strings/Paths."
47
82
  raise ValueError(msg)
48
83
 
49
- # Handle the case when filename is a string
50
- contains_wildcard = False
51
- matching_files = []
52
-
53
- if isinstance(filename_str, str):
54
- contains_wildcard = bool(wildcard_regex.search(filename_str))
55
- if contains_wildcard:
56
- matching_files = glob.glob(filename_str)
57
- if not matching_files:
58
- msg = f"No files found matching the pattern '{filename_str}'."
59
- raise FileNotFoundError(msg)
60
- else:
61
- matching_files = [filename_str]
62
-
63
- # Handle the case when filename is a list
64
- elif isinstance(filename_str, list):
65
- # contains_wildcard = any(wildcard_regex.search(f) for f in filename_str)
66
- if contains_wildcard := any(wildcard_regex.search(f) for f in filename_str):
67
- matching_files = []
68
- for f in filename_str:
69
- files = glob.glob(f)
70
- if not files:
71
- msg = f"No files found matching the pattern '{f}'."
72
- raise FileNotFoundError(msg)
73
- matching_files.extend(files)
84
+ contains_wildcard = any(wildcard_regex.search(f) for f in filenames)
85
+ matching_files: list[str] = []
86
+
87
+ for f in filenames:
88
+ if wildcard_regex.search(f):
89
+ files = glob.glob(f)
90
+ if not files:
91
+ raise FileNotFoundError(f"No files found matching the pattern '{f}'.")
92
+ matching_files.extend(files)
74
93
  else:
75
- matching_files = filename_str
94
+ matching_files.append(f)
76
95
 
77
- # Sort the matching files
78
96
  return FileMatchResult(
79
97
  contains_wildcard=contains_wildcard,
80
98
  matches=sorted(matching_files),
@@ -132,11 +150,60 @@ def _get_ds_combine_base_params() -> dict[str, str]:
132
150
  }
133
151
 
134
152
 
153
+ def get_dask_chunks(
154
+ dim_names: dict[str, str], time_chunking: bool = True
155
+ ) -> dict[str, int]:
156
+ """Return the default dask chunks for ROMS datasets.
157
+
158
+ Parameters
159
+ ----------
160
+ dim_names : dict[str, str]
161
+ Dictionary specifying the names of dimensions in the dataset.
162
+ - For lat-lon datasets, provide keys "latitude" and "longitude" (and optionally "depth" and "time").
163
+ - For ROMS datasets, the default ROMS dimensions are assumed ("eta_rho", "xi_rho", "s_rho", etc.).
164
+ time_chunking : bool, optional
165
+ Whether to chunk along the time dimension.
166
+ - True: chunk time dimension with size 1 (useful for processing large time-series data with Dask).
167
+ - False: do not explicitly chunk time; Dask will use default auto-chunking.
168
+ Defaults to True.
169
+
170
+ Returns
171
+ -------
172
+ dict[str, int]
173
+ The default dask chunks for ROMS datasets.
174
+ """
175
+ if "latitude" in dim_names and "longitude" in dim_names:
176
+ # for lat-lon datasets
177
+ chunks = {
178
+ dim_names["latitude"]: -1,
179
+ dim_names["longitude"]: -1,
180
+ }
181
+ else:
182
+ # For ROMS datasets
183
+ chunks = {
184
+ "eta_rho": -1,
185
+ "eta_v": -1,
186
+ "xi_rho": -1,
187
+ "xi_u": -1,
188
+ "s_rho": -1,
189
+ }
190
+
191
+ if "depth" in dim_names:
192
+ chunks[dim_names["depth"]] = -1
193
+ if "time" in dim_names and time_chunking:
194
+ chunks[dim_names["time"]] = 1
195
+ if "ntides" in dim_names:
196
+ chunks[dim_names["ntides"]] = 1
197
+
198
+ return chunks
199
+
200
+
135
201
  def _load_data_dask(
136
202
  filenames: list[str],
137
203
  dim_names: dict[str, str],
138
204
  time_chunking: bool = True,
139
205
  decode_times: bool = True,
206
+ decode_timedelta: bool = True,
140
207
  read_zarr: bool = True,
141
208
  load_kwargs: dict[str, str] | None = None,
142
209
  ) -> xr.Dataset:
@@ -158,6 +225,9 @@ def _load_data_dask(
158
225
  decode_times: bool, optional
159
226
  If True, decode times and timedeltas encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers.
160
227
  Defaults to True.
228
+ decode_timedelta: bool, optional
229
+ If True, decode timedeltas encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers.
230
+ Defaults to True.
161
231
  read_zarr: bool, optional
162
232
  If True, use the zarr engine to read the dataset, and don't use mfdataset.
163
233
  Defaults to False.
@@ -175,28 +245,7 @@ def _load_data_dask(
175
245
  If a list of files is provided but dim_names["time"] is not available or use_dask=False.
176
246
 
177
247
  """
178
- if "latitude" in dim_names and "longitude" in dim_names:
179
- # for lat-lon datasets
180
- chunks = {
181
- dim_names["latitude"]: -1,
182
- dim_names["longitude"]: -1,
183
- }
184
- else:
185
- # For ROMS datasets
186
- chunks = {
187
- "eta_rho": -1,
188
- "eta_v": -1,
189
- "xi_rho": -1,
190
- "xi_u": -1,
191
- "s_rho": -1,
192
- }
193
-
194
- if "depth" in dim_names:
195
- chunks[dim_names["depth"]] = -1
196
- if "time" in dim_names and time_chunking:
197
- chunks[dim_names["time"]] = 1
198
- if "ntides" in dim_names:
199
- chunks[dim_names["ntides"]] = 1
248
+ chunks = get_dask_chunks(dim_names, time_chunking)
200
249
 
201
250
  with warnings.catch_warnings():
202
251
  warnings.filterwarnings(
@@ -209,6 +258,7 @@ def _load_data_dask(
209
258
  return xr.open_zarr(
210
259
  filenames[0],
211
260
  decode_times=decode_times,
261
+ decode_timedelta=decode_timedelta,
212
262
  chunks=chunks,
213
263
  consolidated=None,
214
264
  storage_options={"token": "anon"},
@@ -218,7 +268,7 @@ def _load_data_dask(
218
268
  return xr.open_mfdataset(
219
269
  filenames,
220
270
  decode_times=decode_times,
221
- decode_timedelta=decode_times,
271
+ decode_timedelta=decode_timedelta,
222
272
  chunks=chunks,
223
273
  **kwargs,
224
274
  )
@@ -237,7 +287,7 @@ def _check_load_data_dask(use_dask: bool) -> None:
237
287
  RuntimeError
238
288
  If dask is requested but not installed.
239
289
  """
240
- if use_dask and not _has_dask():
290
+ if use_dask and not has_dask():
241
291
  msg = (
242
292
  "Dask is required but not installed. Install it with:\n"
243
293
  " • `pip install roms-tools[dask]` or\n"
@@ -310,12 +360,13 @@ def _check_load_data_filename(
310
360
  raise ValueError(msg)
311
361
 
312
362
 
313
- def _load_data(
363
+ def load_data(
314
364
  filename: str | Path | list[str | Path],
315
- dim_names: dict[str, str],
316
- use_dask: bool,
365
+ dim_names: dict[str, str] | None = None,
366
+ use_dask: bool = False,
317
367
  time_chunking: bool = True,
318
368
  decode_times: bool = True,
369
+ decode_timedelta: bool = True,
319
370
  force_combine_nested: bool = False,
320
371
  read_zarr: bool = False,
321
372
  ds_loader_fn: Callable[[], xr.Dataset] | None = None,
@@ -324,21 +375,24 @@ def _load_data(
324
375
 
325
376
  Parameters
326
377
  ----------
327
- filename : Union[str, Path, List[Union[str, Path]]]
378
+ filename : str | Path | list[str | Path]
328
379
  The path to the data file(s). Can be a single string (with or without wildcards), a single Path object,
329
380
  or a list of strings or Path objects containing multiple files.
330
- dim_names : Dict[str, str], optional
381
+ dim_names : dict[str, str], optional
331
382
  Dictionary specifying the names of dimensions in the dataset.
332
383
  Required only for lat-lon datasets to map dimension names like "latitude" and "longitude".
333
384
  For ROMS datasets, this parameter can be omitted, as default ROMS dimensions ("eta_rho", "xi_rho", "s_rho") are assumed.
334
- use_dask: bool
385
+ use_dask: bool, optional
335
386
  Indicates whether to use dask for chunking. If True, data is loaded with dask; if False, data is loaded eagerly. Defaults to False.
336
387
  time_chunking : bool, optional
337
388
  If True and `use_dask=True`, the data will be chunked along the time dimension with a chunk size of 1.
338
389
  If False, the data will not be chunked explicitly along the time dimension, but will follow the default auto chunking scheme. This option is useful for ROMS restart files.
339
390
  Defaults to True.
340
391
  decode_times: bool, optional
341
- If True, decode times and timedeltas encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers.
392
+ If True, decode times encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers.
393
+ Defaults to True.
394
+ decode_timedelta: bool, optional
395
+ If True, decode timedeltas encoded in the standard NetCDF datetime format into datetime objects. Otherwise, leave them encoded as numbers.
342
396
  Defaults to True.
343
397
  force_combine_nested: bool, optional
344
398
  If True, forces the use of nested combination (`combine_nested`) regardless of whether wildcards are used.
@@ -385,6 +439,7 @@ def _load_data(
385
439
  dim_names,
386
440
  time_chunking,
387
441
  decode_times,
442
+ decode_timedelta,
388
443
  read_zarr,
389
444
  load_kwargs,
390
445
  )
@@ -394,7 +449,7 @@ def _load_data(
394
449
  ds = xr.open_dataset(
395
450
  file,
396
451
  decode_times=decode_times,
397
- decode_timedelta=decode_times,
452
+ decode_timedelta=decode_timedelta,
398
453
  chunks=None,
399
454
  )
400
455
  ds_list.append(ds)
@@ -629,30 +684,7 @@ def save_datasets(dataset_list, output_filenames, use_dask=False, verbose=True):
629
684
  return saved_filenames
630
685
 
631
686
 
632
- def get_dask_chunks(location, chunk_size):
633
- """Returns the appropriate Dask chunking dictionary based on grid location.
634
-
635
- Parameters
636
- ----------
637
- location : str
638
- The grid location, one of "rho", "u", or "v".
639
- chunk_size : int
640
- The chunk size to apply.
641
-
642
- Returns
643
- -------
644
- dict
645
- Dictionary specifying the chunking strategy.
646
- """
647
- chunk_mapping = {
648
- "rho": {"eta_rho": chunk_size, "xi_rho": chunk_size},
649
- "u": {"eta_rho": chunk_size, "xi_u": chunk_size},
650
- "v": {"eta_v": chunk_size, "xi_rho": chunk_size},
651
- }
652
- return chunk_mapping.get(location, {})
653
-
654
-
655
- def _generate_coordinate_range(min_val: float, max_val: float, resolution: float):
687
+ def generate_coordinate_range(min_val: float, max_val: float, resolution: float):
656
688
  """Generate an array of target coordinates (e.g., latitude or longitude) within a
657
689
  specified range, with a resolution that is rounded to the nearest value of the form
658
690
  `1/n` (or integer).
@@ -714,7 +746,7 @@ def _generate_coordinate_range(min_val: float, max_val: float, resolution: float
714
746
  return target.astype(np.float32)
715
747
 
716
748
 
717
- def _generate_focused_coordinate_range(
749
+ def generate_focused_coordinate_range(
718
750
  center: float,
719
751
  sc: float,
720
752
  min_val: float,
@@ -800,7 +832,7 @@ def _generate_focused_coordinate_range(
800
832
  return centers, faces
801
833
 
802
834
 
803
- def _remove_edge_nans(
835
+ def remove_edge_nans(
804
836
  field: xr.DataArray, xdim: str, layer_depth: xr.DataArray | None = None
805
837
  ) -> tuple[xr.DataArray, xr.DataArray | None]:
806
838
  """Remove NaN-only slices at the edges of a specified dimension.
@@ -876,7 +908,7 @@ def _remove_edge_nans(
876
908
  return field, layer_depth
877
909
 
878
910
 
879
- def _has_dask() -> bool:
911
+ def has_dask() -> bool:
880
912
  """Determine if the Dask package is installed.
881
913
 
882
914
  Returns
@@ -888,7 +920,7 @@ def _has_dask() -> bool:
888
920
  return find_spec("dask") is not None
889
921
 
890
922
 
891
- def _has_gcsfs() -> bool:
923
+ def has_gcsfs() -> bool:
892
924
  """Determine if the GCSFS package is installed.
893
925
 
894
926
  Returns
@@ -900,7 +932,7 @@ def _has_gcsfs() -> bool:
900
932
  return find_spec("gcsfs") is not None
901
933
 
902
934
 
903
- def _has_copernicus() -> bool:
935
+ def has_copernicus() -> bool:
904
936
  """Determine if the Copernicus Marine Toolkit package is installed.
905
937
 
906
938
  Returns
@@ -976,7 +1008,7 @@ def infer_nominal_horizontal_resolution(
976
1008
  return float(resolution_in_degrees)
977
1009
 
978
1010
 
979
- def _get_pkg_error_msg(purpose: str, package_name: str, option_name: str) -> str:
1011
+ def get_pkg_error_msg(purpose: str, package_name: str, option_name: str) -> str:
980
1012
  """Generate an error message indicating how to install an optional dependency.
981
1013
 
982
1014
  Parameters
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: roms-tools
3
- Version: 3.1.2
3
+ Version: 3.2.0
4
4
  Summary: Tools for running and analysing UCLA-ROMS simulations
5
5
  Author-email: Nora Loose <nora.loose@gmail.com>, Thomas Nicholas <tom@cworthy.org>, Scott Eilerman <scott.eilerman@cworthy.org>
6
6
  License: Apache-2
@@ -42,15 +42,25 @@ Requires-Dist: dask[diagnostics]; extra == "stream"
42
42
  Requires-Dist: gcsfs; extra == "stream"
43
43
  Requires-Dist: zarr; extra == "stream"
44
44
  Requires-Dist: copernicusmarine; extra == "stream"
45
+ Provides-Extra: notebooks
46
+ Requires-Dist: dask[diagnostics]; extra == "notebooks"
47
+ Requires-Dist: gcsfs; extra == "notebooks"
48
+ Requires-Dist: zarr; extra == "notebooks"
49
+ Requires-Dist: copernicusmarine; extra == "notebooks"
50
+ Requires-Dist: gdown; extra == "notebooks"
51
+ Requires-Dist: ipykernel; extra == "notebooks"
52
+ Requires-Dist: jupyter; extra == "notebooks"
45
53
  Dynamic: license-file
46
54
 
47
55
  # ROMS-Tools
56
+
48
57
  [![Conda version](https://img.shields.io/conda/vn/conda-forge/roms-tools.svg)](https://anaconda.org/conda-forge/roms-tools)
49
58
  [![PyPI version](https://img.shields.io/pypi/v/roms-tools.svg)](https://pypi.org/project/roms-tools/)
59
+ [![Run Tests](https://github.com/CWorthy-ocean/roms-tools/actions/workflows/tests.yaml/badge.svg)](https://github.com/CWorthy-ocean/roms-tools/actions/workflows/tests.yaml?query=branch%3Amain)
50
60
  [![codecov](https://codecov.io/gh/CWorthy-ocean/roms-tools/graph/badge.svg?token=5S1oNu39xE)](https://codecov.io/gh/CWorthy-ocean/roms-tools)
51
61
  [![Documentation Status](https://readthedocs.org/projects/roms-tools/badge/?version=latest)](https://roms-tools.readthedocs.io/en/latest/?badge=latest)
52
- ![Run Tests](https://github.com/CWorthy-ocean/roms-tools/actions/workflows/tests.yaml/badge.svg)
53
62
  ![Supported Python Versions](https://img.shields.io/pypi/pyversions/roms-tools)
63
+ [![PyPI Downloads](https://static.pepy.tech/personalized-badge/roms-tools?period=total&units=INTERNATIONAL_SYSTEM&left_color=BLACK&right_color=GREEN&left_text=downloads)](https://pepy.tech/projects/roms-tools)
54
64
 
55
65
  ## Overview
56
66