pycontrails 0.53.0__cp311-cp311-macosx_11_0_arm64.whl → 0.54.0__cp311-cp311-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pycontrails might be problematic. Click here for more details.

@@ -1,79 +1,434 @@
1
- """Utilities for working with ECMWF model-level data.
2
-
3
- This module requires the following additional dependency:
4
-
5
- - `lxml <https://lxml.de/>`_
6
- """
1
+ """Utilities for working with ECMWF model-level data."""
7
2
 
3
+ import datetime
8
4
  import pathlib
5
+ import warnings
9
6
 
7
+ import dask.array
8
+ import numpy as np
9
+ import numpy.typing as npt
10
10
  import pandas as pd
11
+ import xarray as xr
11
12
 
12
13
  from pycontrails.physics import units
13
- from pycontrails.utils import dependencies
14
14
 
15
15
  _path_to_static = pathlib.Path(__file__).parent / "static"
16
16
  MODEL_LEVELS_PATH = _path_to_static / "model_level_dataframe_v20240418.csv"
17
17
 
18
18
 
19
- def pressure_levels_at_model_levels(alt_ft_min: float, alt_ft_max: float) -> list[int]:
19
+ def model_level_reference_pressure(
20
+ alt_ft_min: float | None = None,
21
+ alt_ft_max: float | None = None,
22
+ ) -> list[int]:
20
23
  """Return the pressure levels at each model level assuming a constant surface pressure.
21
24
 
22
- The pressure levels are rounded to the nearest hPa.
25
+ This function assumes
26
+ `137 model levels <https://confluence.ecmwf.int/display/UDOC/L137+model+level+definitions>`_
27
+ and the constant ICAO ISA surface pressure of 1013.25 hPa.
28
+
29
+ The returned pressure levels are rounded to the nearest hPa.
23
30
 
24
31
  Parameters
25
32
  ----------
26
- alt_ft_min : float
27
- Minimum altitude, [:math:`ft`].
28
- alt_ft_max : float
29
- Maximum altitude, [:math:`ft`].
33
+ alt_ft_min : float | None
34
+ Minimum altitude, [:math:`ft`]. If None, there is no minimum altitude
35
+ used in filtering the ``MODEL_LEVELS_PATH`` table.
36
+ alt_ft_max : float | None
37
+ Maximum altitude, [:math:`ft`]. If None, there is no maximum altitude
38
+ used in filtering the ``MODEL_LEVELS_PATH`` table.
30
39
 
31
40
  Returns
32
41
  -------
33
42
  list[int]
34
- List of pressure levels, [:math:`hPa`].
43
+ List of pressure levels, [:math:`hPa`] between the minimum and maximum altitudes.
44
+
45
+ See Also
46
+ --------
47
+ model_level_pressure
35
48
  """
36
- df = pd.read_csv(MODEL_LEVELS_PATH)
37
- alt_m_min = units.ft_to_m(alt_ft_min)
38
- alt_m_max = units.ft_to_m(alt_ft_max)
39
- filt = df["Geometric Altitude [m]"].between(alt_m_min, alt_m_max)
49
+ usecols = ["n", "Geometric Altitude [m]", "pf [hPa]"]
50
+ df = pd.read_csv(MODEL_LEVELS_PATH, usecols=usecols, index_col="n")
51
+
52
+ filt = df.index >= 1 # exclude degenerate model level 0
53
+ if alt_ft_min is not None:
54
+ alt_m_min = units.ft_to_m(alt_ft_min)
55
+ filt &= df["Geometric Altitude [m]"] >= alt_m_min
56
+ if alt_ft_max is not None:
57
+ alt_m_max = units.ft_to_m(alt_ft_max)
58
+ filt &= df["Geometric Altitude [m]"] <= alt_m_max
59
+
40
60
  return df.loc[filt, "pf [hPa]"].round().astype(int).tolist()
41
61
 
42
62
 
43
- def _cache_model_level_dataframe() -> pd.DataFrame:
63
+ def _cache_model_level_dataframe() -> None:
44
64
  """Regenerate static model level data file.
45
65
 
46
- Read the ERA5 model level definitions published by ECMWF
66
+ Read the ERA5 L137 model level definitions published by ECMWF
47
67
  and cache it in a static file for use by this module.
48
68
  This should only be used by model developers, and only if ECMWF model
49
69
  level definitions change. ``MODEL_LEVEL_PATH`` must be manually
50
70
  updated to use newly-cached files.
51
71
 
52
- Requires the lxml package to be installed.
72
+ Requires the `lxml <https://lxml.de/>`_ package to be installed.
53
73
  """
54
- import os
55
- from datetime import datetime
56
74
 
57
75
  url = "https://confluence.ecmwf.int/display/UDOC/L137+model+level+definitions"
58
- try:
59
- df = pd.read_html(url, na_values="-", index_col="n")[0]
60
- today = datetime.now()
61
- new_file_path = _path_to_static / f"model_level_dataframe_v{today.strftime('%Y%m%d')}.csv"
62
- if os.path.exists(new_file_path):
63
- msg = f"Static file already exists at {new_file_path}"
76
+ df = pd.read_html(url, na_values="-", index_col="n")[0]
77
+
78
+ today = datetime.datetime.now()
79
+ new_file_path = _path_to_static / f"model_level_dataframe_v{today.strftime('%Y%m%d')}.csv"
80
+ if new_file_path.is_file():
81
+ msg = f"Static file already exists at {new_file_path}"
82
+ raise ValueError(msg)
83
+
84
+ df.to_csv(new_file_path)
85
+
86
+
87
+ def model_level_pressure(sp: xr.DataArray, model_levels: npt.ArrayLike) -> xr.DataArray:
88
+ r"""Return the pressure levels at each model level given the surface pressure.
89
+
90
+ This function assumes
91
+ `137 model levels <https://confluence.ecmwf.int/display/UDOC/L137+model+level+definitions>`_.
92
+ Unlike :func:`model_level_reference_pressure`, this function
93
+ does not assume constant pressure. Instead, it uses the
94
+ `half-level pressure formula <https://confluence.ecmwf.int/x/JJh0CQ#heading-Pressureonmodellevels>`_
95
+ :math:`p = a + b \cdot \text{sp}` where :math:`a` and :math:`b` are constants
96
+ for each model level.
97
+
98
+ Parameters
99
+ ----------
100
+ sp : xr.DataArray
101
+ Surface pressure, [:math:`\text{Pa}`]. A warning is issued if the minimum
102
+ value of ``sp`` is less than 30320.0 Pa. Such low values are unrealistic.
103
+ model_levels : npt.ArrayLike
104
+ Target model levels. Expected to be a one-dimensional array of integers between 1 and 137.
105
+
106
+ Returns
107
+ -------
108
+ xr.DataArray
109
+ Pressure levels at each model level, [:math:`hPa`]. The shape of the output is
110
+ the product of the shape of the input and the length of ``model_levels``. In
111
+ other words, the output will have dimensions of the input plus a new dimension
112
+ for ``model_levels``.
113
+
114
+ If ``sp`` is not dask-backed, the output will be computed eagerly. In particular,
115
+ if ``sp`` has a large size and ``model_levels`` is a large range, this function
116
+ may consume a large amount of memory.
117
+
118
+ The ``dtype`` of the output is the same as the ``dtype`` of the ``sp`` parameter.
119
+
120
+ Examples
121
+ --------
122
+ >>> import numpy as np
123
+ >>> import xarray as xr
124
+
125
+ >>> sp_arr = np.linspace(101325.0, 90000.0, 16).reshape(4, 4)
126
+ >>> longitude = np.linspace(-180, 180, 4)
127
+ >>> latitude = np.linspace(-90, 90, 4)
128
+ >>> sp = xr.DataArray(sp_arr, coords={"longitude": longitude, "latitude": latitude})
129
+
130
+ >>> model_levels = [80, 100]
131
+ >>> model_level_pressure(sp, model_levels)
132
+ <xarray.DataArray (model_level: 2, longitude: 4, latitude: 4)> Size: 256B
133
+ array([[[259.75493944, 259.27107504, 258.78721064, 258.30334624],
134
+ [257.81948184, 257.33561744, 256.85175304, 256.36788864],
135
+ [255.88402424, 255.40015984, 254.91629544, 254.43243104],
136
+ [253.94856664, 253.46470224, 252.98083784, 252.49697344]],
137
+ [[589.67975444, 586.47283154, 583.26590864, 580.05898574],
138
+ [576.85206284, 573.64513994, 570.43821704, 567.23129414],
139
+ [564.02437124, 560.81744834, 557.61052544, 554.40360254],
140
+ [551.19667964, 547.98975674, 544.78283384, 541.57591094]]])
141
+ Coordinates:
142
+ * longitude (longitude) float64 32B -180.0 -60.0 60.0 180.0
143
+ * latitude (latitude) float64 32B -90.0 -30.0 30.0 90.0
144
+ * model_level (model_level) int64 16B 80 100
145
+
146
+ See Also
147
+ --------
148
+ model_level_reference_pressure
149
+ """
150
+ # When sp is too low, the pressure up the vertical column will not monotonically decreasing.
151
+ # The first example of this occurs when sp is close to 30320.0 Pa between model
152
+ # levels 114 and 115. Issue a warning here to alert the user.
153
+ if (sp < 30320.0).any():
154
+ msg = (
155
+ "The 'sp' parameter appears to be low. The calculated pressure levels will "
156
+ "not be monotonically decreasing. The 'sp' parameter has units of Pa. "
157
+ "Most surface pressure data should be in the range of 50000.0 to 105000.0 Pa."
158
+ )
159
+ warnings.warn(msg)
160
+
161
+ model_levels = np.asarray(model_levels, dtype=int)
162
+ if not np.all((model_levels >= 1) & (model_levels <= 137)):
163
+ msg = "model_levels must be integers between 1 and 137"
164
+ raise ValueError(msg)
165
+
166
+ usecols = ["n", "a [Pa]", "b"]
167
+ df = (
168
+ pd.read_csv(MODEL_LEVELS_PATH, usecols=usecols)
169
+ .rename(columns={"n": "model_level", "a [Pa]": "a"})
170
+ .set_index("model_level")
171
+ )
172
+
173
+ a = df["a"].to_xarray()
174
+ b = df["b"].to_xarray()
175
+
176
+ if "model_level" in sp.dims:
177
+ sp_model_levels = sp["model_level"]
178
+ if len(sp_model_levels) != 1:
179
+ msg = "Found multiple model levels in sp, expected at most one"
180
+ raise ValueError(msg)
181
+ if sp_model_levels.item() != 1:
182
+ msg = f"sp must be at model level 1, found model level {sp_model_levels.item()}"
64
183
  raise ValueError(msg)
65
- df.to_csv(new_file_path)
66
-
67
- except ImportError as exc:
68
- if "lxml" in exc.msg:
69
- dependencies.raise_module_not_found_error(
70
- "model_level_utils._read_model_level_dataframe function",
71
- package_name="lxml",
72
- module_not_found_error=exc,
73
- extra=(
74
- "Alternatively, if instantiating a model-level ECMWF datalib, you can provide "
75
- "the 'pressure_levels' parameter directly to avoid the need to read the "
76
- "ECMWF model level definitions."
77
- ),
78
- )
79
- raise
184
+ # Remove the model level dimension to allow automatic broadcasting below
185
+ sp = sp.squeeze("model_level")
186
+
187
+ dtype = sp.dtype
188
+ a = a.astype(dtype, copy=False)
189
+ b = b.astype(dtype, copy=False)
190
+
191
+ indexer = {"model_level": model_levels}
192
+ p_half_below = a.sel(indexer) + b.sel(indexer) * sp
193
+
194
+ indexer = {"model_level": model_levels - 1}
195
+ p_half_above = (a.sel(indexer) + b.sel(indexer) * sp).assign_coords(model_level=model_levels)
196
+
197
+ p_full = (p_half_above + p_half_below) / 2.0
198
+ return p_full / 100.0 # Pa -> hPa
199
+
200
+
201
+ def searchsorted2d(
202
+ a: npt.NDArray[np.floating],
203
+ v: npt.NDArray[np.floating],
204
+ ) -> npt.NDArray[np.int64]:
205
+ """Return the indices where elements in ``v`` would be inserted in ``a`` along its second axis.
206
+
207
+ Implementation based on a `StackOverflow answer <https://stackoverflow.com/a/40588862>`_.
208
+
209
+ Parameters
210
+ ----------
211
+ a : npt.NDArray[np.floating]
212
+ 2D array of shape ``(m, n)`` that is sorted along its second axis. This is not checked.
213
+ v : npt.NDArray[np.floating]
214
+ 1D array of values of shape ``(k,)`` to insert into the second axis of ``a``.
215
+ The current implementation could be extended to handle 2D arrays as well.
216
+
217
+ Returns
218
+ -------
219
+ npt.NDArray[np.int64]
220
+ 2D array of indices where elements in ``v`` would be inserted in ``a`` along its
221
+ second axis to keep the second axis of ``a`` sorted. The shape of the output is ``(m, k)``.
222
+
223
+ Examples
224
+ --------
225
+ >>> a = np.array([
226
+ ... [ 1., 8., 11., 12.],
227
+ ... [ 5., 8., 9., 14.],
228
+ ... [ 4., 5., 6., 17.],
229
+ ... ])
230
+ >>> v = np.array([3., 7., 10., 13., 15.])
231
+ >>> searchsorted2d(a, v)
232
+ array([[1, 1, 2, 4, 4],
233
+ [0, 1, 3, 3, 4],
234
+ [0, 3, 3, 3, 3]])
235
+ """
236
+ if a.ndim != 2:
237
+ msg = "The parameter 'a' must be a 2D array"
238
+ raise ValueError(msg)
239
+ if v.ndim != 1:
240
+ msg = "The parameter 'v' must be a 1D array"
241
+ raise ValueError(msg)
242
+
243
+ m, n = a.shape
244
+
245
+ offset_scalar = max(np.ptp(a).item(), np.ptp(v).item()) + 1.0
246
+
247
+ # IMPORTANT: Keep the dtype as float64 to avoid round-off error
248
+ # when computing a_scaled and v_scaled
249
+ # If we used float32 here, the searchsorted output below can be off by 1
250
+ # or 2 if offset_scalar is large and m is large
251
+ steps = np.arange(m, dtype=np.float64).reshape(-1, 1)
252
+ offset = steps * offset_scalar
253
+ a_scaled = a + offset # float32 + float64 = float64
254
+ v_scaled = v + offset # float32 + float64 = float64
255
+
256
+ idx_scaled = np.searchsorted(a_scaled.reshape(-1), v_scaled.reshape(-1)).reshape(v_scaled.shape)
257
+ return idx_scaled - n * steps.astype(np.int64)
258
+
259
+
260
+ def _interp_artifacts(
261
+ xp: npt.NDArray[np.floating], x: npt.NDArray[np.floating]
262
+ ) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.floating], npt.NDArray[np.bool_]]:
263
+ """Compute the indices and distances for linear interpolation."""
264
+ idx = searchsorted2d(xp, x)
265
+ out_of_bounds = (idx == 0) | (idx == xp.shape[1])
266
+ idx.clip(1, xp.shape[1] - 1, out=idx)
267
+
268
+ x0 = np.take_along_axis(xp, idx - 1, axis=1)
269
+ x1 = np.take_along_axis(xp, idx, axis=1)
270
+ dist = (x.reshape(1, -1) - x0) / (x1 - x0)
271
+
272
+ return idx, dist, out_of_bounds
273
+
274
+
275
+ def _interp_on_chunk(ds_chunk: xr.Dataset, target_pl: npt.NDArray[np.floating]) -> xr.Dataset:
276
+ """Interpolate the data on a chunk to the target pressure levels.
277
+
278
+ Parameters
279
+ ----------
280
+ ds_chunk : xr.Dataset
281
+ Chunk of the dataset. The last dimension must be "model_level".
282
+ The dataset from which ``ds_chunk`` is taken must not split the
283
+ "model_level" dimension across chunks.
284
+ target_pl : npt.NDArray[np.floating]
285
+ Target pressure levels, [:math:`hPa`].
286
+
287
+ Returns
288
+ -------
289
+ xr.Dataset
290
+ Interpolated data on the target pressure levels. This has the same
291
+ dimensions as ``ds_chunk`` except that the "model_level" dimension
292
+ is replaced with "level". The shape of the "level" dimension is
293
+ the length of ``target_pl``.
294
+ """
295
+ if any(da_chunk.dims[-1] != "model_level" for da_chunk in ds_chunk.values()):
296
+ msg = "The last dimension of the dataset must be 'model_level'"
297
+ raise ValueError(msg)
298
+
299
+ pl_chunk = ds_chunk["pressure_level"]
300
+
301
+ # Put the model_level column in the second dimension
302
+ # And stack the horizontal dimensions into the first dimension
303
+ xp = pl_chunk.values.reshape(-1, len(pl_chunk["model_level"]))
304
+
305
+ # AFAICT, metview performs linear interpolation in xp and target_pl by default
306
+ # However, the conversion_from_ml_to_pl.py script in https://confluence.ecmwf.int/x/JJh0CQ
307
+ # suggests interpolating in the log space. If using consecutive model levels,
308
+ # the difference between the two methods is negligible. We use the log space
309
+ # method here for consistency with the ECMWF script. This only changes
310
+ # the `dist` calculation below.
311
+ idx, dist, out_of_bounds = _interp_artifacts(np.log(xp), np.log(target_pl))
312
+
313
+ shape4d = pl_chunk.shape[:-1] + target_pl.shape
314
+ idx = idx.reshape(shape4d)
315
+ dist = dist.reshape(shape4d)
316
+ out_of_bounds = out_of_bounds.reshape(shape4d)
317
+
318
+ interped_dict = {}
319
+
320
+ for name, da in ds_chunk.items():
321
+ if name == "pressure_level":
322
+ continue
323
+
324
+ fp = da.values
325
+ f0 = np.take_along_axis(fp, idx - 1, axis=-1)
326
+ f1 = np.take_along_axis(fp, idx, axis=-1)
327
+ interped = f0 + dist * (f1 - f0)
328
+ interped[out_of_bounds] = np.nan # we could extrapolate here like RGI(..., fill_value=None)
329
+
330
+ coords = {k: da.coords[k] for k in da.dims[:-1]}
331
+ coords["level"] = target_pl
332
+
333
+ interped_dict[name] = xr.DataArray(
334
+ interped,
335
+ dims=tuple(coords),
336
+ coords=coords,
337
+ attrs=da.attrs,
338
+ )
339
+
340
+ return xr.Dataset(interped_dict)
341
+
342
+
343
+ def _build_template(ds: xr.Dataset, target_pl: npt.NDArray[np.floating]) -> xr.Dataset:
344
+ """Build the template dataset for the interpolated data."""
345
+ coords = {k: ds.coords[k] for k in ds.dims if k != "model_level"} | {"level": target_pl}
346
+
347
+ dims = tuple(coords)
348
+ shape = tuple(len(v) for v in coords.values())
349
+
350
+ vars = {
351
+ k: (dims, dask.array.empty(shape=shape, dtype=da.dtype))
352
+ for k, da in ds.items()
353
+ if k != "pressure_level"
354
+ }
355
+
356
+ chunks = {k: v for k, v in ds.chunks.items() if k != "model_level"}
357
+ chunks["level"] = (len(target_pl),)
358
+
359
+ return xr.Dataset(data_vars=vars, coords=coords, attrs=ds.attrs).chunk(chunks)
360
+
361
+
362
+ def ml_to_pl(
363
+ ds: xr.Dataset,
364
+ target_pl: npt.ArrayLike,
365
+ *,
366
+ lnsp: xr.DataArray | None = None,
367
+ sp: xr.DataArray | None = None,
368
+ ) -> xr.Dataset:
369
+ r"""Interpolate L137 model-level meteorology data to pressure levels.
370
+
371
+ The implementation is here is consistent with ECMWF's
372
+ `suggested implementation <https://confluence.ecmwf.int/x/JJh0CQ#heading-Step2Interpolatevariablesonmodellevelstocustompressurelevels>`_.
373
+
374
+ Parameters
375
+ ----------
376
+ ds : xr.Dataset
377
+ Dataset with model-level meteorology data. Must include a "model_level" dimension
378
+ which is not split across chunks. The non-"model_level" dimensions must be
379
+ aligned with the "lnsp" parameter. Can include any number of variables.
380
+ Any `non-dimension coordinates <https://docs.xarray.dev/en/latest/user-guide/terminology.html#term-Non-dimension-coordinate>`_
381
+ will be dropped.
382
+ target_pl : npt.ArrayLike
383
+ Target pressure levels, [:math:`hPa`].
384
+ lnsp : xr.DataArray
385
+ Natural logarithm of surface pressure, [:math:`\ln(\text{Pa})`]. If provided,
386
+ ``sp`` is ignored. At least one of ``lnsp`` or ``sp`` must be provided.
387
+ sp : xr.DataArray
388
+ Surface pressure, [:math:`\text{Pa}`].
389
+ At least one of ``lnsp`` or ``sp`` must be provided.
390
+
391
+ Returns
392
+ -------
393
+ xr.Dataset
394
+ Interpolated data on the target pressure levels. This has the same
395
+ dimensions as ``ds`` except that the "model_level" dimension
396
+ is replaced with "level". The shape of the "level" dimension is
397
+ the length of ``target_pl``. If ``ds`` is dask-backed, the output
398
+ will be as well. Call ``.compute()`` to compute the result eagerly.
399
+ """
400
+ if sp is None:
401
+ if lnsp is None:
402
+ msg = "At least one of 'lnsp' or 'sp' must be provided"
403
+ raise ValueError(msg)
404
+ sp = dask.array.exp(lnsp)
405
+
406
+ model_levels = ds["model_level"]
407
+ pl = model_level_pressure(sp, model_levels)
408
+
409
+ if "pressure_level" in ds:
410
+ msg = "The dataset must not contain a 'pressure_level' variable"
411
+ raise ValueError(msg)
412
+ ds = ds.assign(pressure_level=pl)
413
+
414
+ ds = ds.reset_coords(drop=True) # drop "expver"
415
+
416
+ # If there are any variables which do not have the "model_level" dimension,
417
+ # issue a warning and drop them
418
+ for name, da in ds.items():
419
+ if "model_level" not in da.dims:
420
+ msg = f"Variable '{name}' does not have a 'model_level' dimension"
421
+ warnings.warn(msg)
422
+ ds = ds.drop_vars([name])
423
+
424
+ # IMPORTANT: model_level must be the last dimension for _interp_on_chunk
425
+ ds = ds.transpose(..., "model_level")
426
+
427
+ # Raise if chunks over model level
428
+ if ds.chunks and len(ds.chunks["model_level"]) > 1:
429
+ msg = "The 'model_level' dimension must not be split across chunks"
430
+ raise ValueError(msg)
431
+
432
+ target_pl = np.asarray(target_pl, dtype=sp.dtype)
433
+ template = _build_template(ds, target_pl)
434
+ return xr.map_blocks(_interp_on_chunk, ds, (target_pl,), template=template)
@@ -218,6 +218,17 @@ CloudAreaFraction = MetVariable(
218
218
  ),
219
219
  )
220
220
 
221
+ OzoneMassMixingRatio = MetVariable(
222
+ short_name="o3",
223
+ standard_name="mass_fraction_of_ozone_in_air",
224
+ long_name="Ozone mass mixing ratio",
225
+ level_type="isobaricInhPa",
226
+ units="kg kg**-1",
227
+ ecmwf_id=203,
228
+ description="This parameter is the mass of ozone per kilogram of air.",
229
+ )
230
+
231
+
221
232
  PRESSURE_LEVEL_VARIABLES = [
222
233
  met_var.AirTemperature,
223
234
  met_var.SpecificHumidity,
@@ -170,7 +170,6 @@ class Landsat:
170
170
  bands: str | Iterable[str] | None = None,
171
171
  cachestore: cache.CacheStore | None = None,
172
172
  ) -> None:
173
-
174
173
  self.base_url = base_url
175
174
  self.bands = _parse_bands(bands)
176
175
  _check_band_resolution(self.bands)
@@ -343,7 +342,9 @@ def _read(path: str, meta: str, band: str, processing: str) -> xr.DataArray:
343
342
  else (
344
343
  "nondim"
345
344
  if processing == "reflectance"
346
- else "K" if processing == "brightness_temperature" else "none"
345
+ else "K"
346
+ if processing == "brightness_temperature"
347
+ else "none"
347
348
  )
348
349
  ),
349
350
  "crs": crs,
@@ -185,7 +185,6 @@ class Sentinel:
185
185
  bands: str | Iterable[str] | None = None,
186
186
  cachestore: cache.CacheStore | None = None,
187
187
  ) -> None:
188
-
189
188
  self.base_url = base_url
190
189
  self.granule_id = granule_id
191
190
  self.bands = _parse_bands(bands)
@@ -301,7 +301,11 @@ class SyntheticFlight:
301
301
  return self._generate_single_flight_no_wind(timestep) # recursive
302
302
 
303
303
  result = self.geod.fwd_intermediate(
304
- *src, az, npts, m_per_timestep, return_back_azimuth=False # type: ignore
304
+ *src,
305
+ az,
306
+ npts,
307
+ m_per_timestep, # type: ignore
308
+ return_back_azimuth=False, # type: ignore
305
309
  )
306
310
  longitude = np.asarray(result.lons)
307
311
  latitude = np.asarray(result.lats)
@@ -550,7 +550,6 @@ class APCEMM(models.Model):
550
550
  contrails: list[pd.DataFrame] = []
551
551
 
552
552
  for _, row in self.source.dataframe.iterrows():
553
-
554
553
  waypoint = row["waypoint"]
555
554
 
556
555
  # Mark waypoint as skipped if no APCEMM simulation ran
@@ -1545,7 +1545,6 @@ def _process_rad(rad: MetDataset) -> MetDataset:
1545
1545
  )
1546
1546
  raise ValueError(msg) from exc
1547
1547
  if radiation_accumulated:
1548
-
1549
1548
  # Don't assume that radiation data is uniformly spaced in time
1550
1549
  # Instead, infer the appropriate time shift
1551
1550
  time_diff = rad.data["time"].diff("time", label="upper")
@@ -322,7 +322,7 @@ class CocipGrid(models.Model):
322
322
 
323
323
  if met is None:
324
324
  # idx is the first index at which self.met.variables["time"].to_numpy() >= time_end
325
- idx = np.searchsorted(self.met.indexes["time"].to_numpy(), time_end)
325
+ idx = np.searchsorted(self.met.indexes["time"].to_numpy(), time_end).item()
326
326
  sl = slice(max(0, idx - 1), idx + 1)
327
327
  logger.debug("Select met slice %s", sl)
328
328
  met = MetDataset(self.met.data.isel(time=sl), copy=False)
@@ -331,7 +331,7 @@ class CocipGrid(models.Model):
331
331
  current_times = met.indexes["time"].to_numpy()
332
332
  all_times = self.met.indexes["time"].to_numpy()
333
333
  # idx is the first index at which all_times >= time_end
334
- idx = np.searchsorted(all_times, time_end)
334
+ idx = np.searchsorted(all_times, time_end).item()
335
335
  sl = slice(max(0, idx - 1), idx + 1)
336
336
 
337
337
  # case 1: cannot re-use end of current met as start of new met
@@ -353,7 +353,7 @@ class CocipGrid(models.Model):
353
353
 
354
354
  if rad is None:
355
355
  # idx is the first index at which self.rad.variables["time"].to_numpy() >= time_end
356
- idx = np.searchsorted(self.rad.indexes["time"].to_numpy(), time_end)
356
+ idx = np.searchsorted(self.rad.indexes["time"].to_numpy(), time_end).item()
357
357
  sl = slice(max(0, idx - 1), idx + 1)
358
358
  logger.debug("Select rad slice %s", sl)
359
359
  rad = MetDataset(self.rad.data.isel(time=sl), copy=False)
@@ -362,7 +362,7 @@ class CocipGrid(models.Model):
362
362
  current_times = rad.indexes["time"].to_numpy()
363
363
  all_times = self.rad.indexes["time"].to_numpy()
364
364
  # idx is the first index at which all_times >= time_end
365
- idx = np.searchsorted(all_times, time_end)
365
+ idx = np.searchsorted(all_times, time_end).item()
366
366
  sl = slice(max(0, idx - 1), idx + 1)
367
367
 
368
368
  # case 1: cannot re-use end of current rad as start of new rad
@@ -6,6 +6,7 @@ import dataclasses
6
6
  from typing import Any, NoReturn, overload
7
7
 
8
8
  import numpy as np
9
+ import numpy.typing as npt
9
10
 
10
11
  from pycontrails.core import models
11
12
  from pycontrails.core.flight import Flight
@@ -329,7 +330,7 @@ def _perform_interp_for_step(
329
330
  def _calc_geometry(
330
331
  vector: GeoVectorDataset,
331
332
  dz_m: float,
332
- dt: np.timedelta64,
333
+ dt: npt.NDArray[np.timedelta64] | np.timedelta64,
333
334
  max_depth: float | None,
334
335
  ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
335
336
  """Calculate wind-shear-derived geometry of evolved plume."""
@@ -455,7 +456,11 @@ def _evolve_one_step(
455
456
  longitude_2 = geo.advect_longitude(longitude, latitude, u_wind, dt) # type: ignore[arg-type]
456
457
  latitude_2 = geo.advect_latitude(latitude, v_wind, dt) # type: ignore[arg-type]
457
458
  level_2 = geo.advect_level(
458
- vector.level, vertical_velocity, 0.0, 0.0, dt # type: ignore[arg-type]
459
+ vector.level,
460
+ vertical_velocity,
461
+ 0.0,
462
+ 0.0,
463
+ dt, # type: ignore[arg-type]
459
464
  )
460
465
 
461
466
  out = GeoVectorDataset(
@@ -475,7 +480,10 @@ def _evolve_one_step(
475
480
 
476
481
  # Attach wind-shear-derived geometry to output vector
477
482
  azimuth_2, width_2, depth_2, sigma_yz_2, area_eff_2 = _calc_geometry(
478
- vector, dz_m, dt, max_depth # type: ignore[arg-type]
483
+ vector,
484
+ dz_m,
485
+ dt, # type: ignore[arg-type]
486
+ max_depth, # type: ignore[arg-type]
479
487
  )
480
488
  out["azimuth"] = azimuth_2
481
489
  out["width"] = width_2
@@ -59,13 +59,13 @@ class ISSR(Model):
59
59
  >>> out1 = model.eval()
60
60
  >>> issr1 = out1["issr"]
61
61
  >>> issr1.proportion # Get proportion of values with ice supersaturation
62
- 0.11414134603859523
62
+ 0.114...
63
63
 
64
64
  >>> # Run with a lower threshold
65
65
  >>> out2 = model.eval(rhi_threshold=0.95)
66
66
  >>> issr2 = out2["issr"]
67
67
  >>> issr2.proportion
68
- 0.146647
68
+ 0.146...
69
69
  """
70
70
 
71
71
  name = "issr"