pycontrails 0.54.4__cp311-cp311-win_amd64.whl → 0.54.6__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pycontrails might be problematic. Click here for more details.

Files changed (38) hide show
  1. pycontrails/_version.py +2 -2
  2. pycontrails/core/aircraft_performance.py +34 -16
  3. pycontrails/core/airports.py +3 -4
  4. pycontrails/core/fleet.py +30 -9
  5. pycontrails/core/flight.py +8 -5
  6. pycontrails/core/flightplan.py +11 -11
  7. pycontrails/core/interpolation.py +7 -4
  8. pycontrails/core/met.py +145 -86
  9. pycontrails/core/met_var.py +62 -0
  10. pycontrails/core/models.py +3 -2
  11. pycontrails/core/rgi_cython.cp311-win_amd64.pyd +0 -0
  12. pycontrails/core/vector.py +97 -74
  13. pycontrails/datalib/_met_utils/metsource.py +1 -1
  14. pycontrails/datalib/ecmwf/era5.py +5 -6
  15. pycontrails/datalib/ecmwf/era5_model_level.py +4 -5
  16. pycontrails/datalib/ecmwf/ifs.py +1 -3
  17. pycontrails/datalib/gfs/gfs.py +1 -3
  18. pycontrails/models/apcemm/apcemm.py +2 -2
  19. pycontrails/models/apcemm/utils.py +1 -1
  20. pycontrails/models/cocip/cocip.py +86 -27
  21. pycontrails/models/cocip/output_formats.py +1 -0
  22. pycontrails/models/cocipgrid/cocip_grid.py +8 -73
  23. pycontrails/models/dry_advection.py +99 -31
  24. pycontrails/models/emissions/emissions.py +2 -2
  25. pycontrails/models/humidity_scaling/humidity_scaling.py +1 -1
  26. pycontrails/models/issr.py +2 -2
  27. pycontrails/models/pcc.py +1 -2
  28. pycontrails/models/ps_model/ps_grid.py +2 -2
  29. pycontrails/models/ps_model/ps_model.py +4 -32
  30. pycontrails/models/ps_model/ps_operational_limits.py +2 -6
  31. pycontrails/models/tau_cirrus.py +13 -6
  32. pycontrails/physics/geo.py +3 -3
  33. {pycontrails-0.54.4.dist-info → pycontrails-0.54.6.dist-info}/METADATA +3 -4
  34. {pycontrails-0.54.4.dist-info → pycontrails-0.54.6.dist-info}/RECORD +38 -38
  35. {pycontrails-0.54.4.dist-info → pycontrails-0.54.6.dist-info}/WHEEL +1 -1
  36. {pycontrails-0.54.4.dist-info → pycontrails-0.54.6.dist-info}/LICENSE +0 -0
  37. {pycontrails-0.54.4.dist-info → pycontrails-0.54.6.dist-info}/NOTICE +0 -0
  38. {pycontrails-0.54.4.dist-info → pycontrails-0.54.6.dist-info}/top_level.txt +0 -0
@@ -68,12 +68,26 @@ class Cocip(Model):
68
68
  -----
69
69
  **Inputs**
70
70
 
71
- The required meteorology variables depend on the data source (e.g. ECMWF, GFS).
71
+ The required meteorology variables depend on the data source. :class:`Cocip`
72
+ supports data-source-specific variables from ECMWF models (HRES, ERA5) and the NCEP GFS, plus
73
+ a generic set of model-agnostic variables.
72
74
 
73
75
  See :attr:`met_variables` and :attr:`rad_variables` for the list of required variables
74
76
  to the ``met`` and ``rad`` parameters, respectively.
75
77
  When an item in one of these arrays is a :class:`tuple`, variable keys depend on data source.
76
78
 
79
+ A warning will be raised if meteorology data is from a source not currently supported by
80
+ a pycontrails datalib. In this case it is the responsibility of the user to ensure that
81
+ meteorology data is formatted correctly. The warning can be suppressed with a context manager:
82
+
83
+ .. code-block:: python
84
+ :emphasize-lines: 2,3
85
+
86
+ import warnings
87
+ with warnings.catch_warnings():
88
+ warnings.simplefilter("ignore", category=UserWarning, message="Unknown provider")
89
+ cocip = Cocip(met, rad, ...)
90
+
77
91
  The current list of required variables (labelled by ``"standard_name"``):
78
92
 
79
93
  .. list-table:: Variable keys for pressure level data
@@ -82,24 +96,31 @@ class Cocip(Model):
82
96
  * - Parameter
83
97
  - ECMWF
84
98
  - GFS
99
+ - Generic
85
100
  * - Air Temperature
86
101
  - ``air_temperature``
87
102
  - ``air_temperature``
103
+ - ``air_temperature``
88
104
  * - Specific Humidity
89
105
  - ``specific_humidity``
90
106
  - ``specific_humidity``
107
+ - ``specific_humidity``
91
108
  * - Eastward wind
92
109
  - ``eastward_wind``
93
110
  - ``eastward_wind``
111
+ - ``eastward_wind``
94
112
  * - Northward wind
95
113
  - ``northward_wind``
96
114
  - ``northward_wind``
115
+ - ``northward_wind``
97
116
  * - Vertical velocity
98
117
  - ``lagrangian_tendency_of_air_pressure``
99
118
  - ``lagrangian_tendency_of_air_pressure``
119
+ - ``lagrangian_tendency_of_air_pressure``
100
120
  * - Ice water content
101
121
  - ``specific_cloud_ice_water_content``
102
122
  - ``ice_water_mixing_ratio``
123
+ - ``mass_fraction_of_cloud_ice_in_air``
103
124
 
104
125
  .. list-table:: Variable keys for single-level radiation data
105
126
  :header-rows: 1
@@ -107,12 +128,15 @@ class Cocip(Model):
107
128
  * - Parameter
108
129
  - ECMWF
109
130
  - GFS
131
+ - Generic
110
132
  * - Top solar radiation
111
133
  - ``top_net_solar_radiation``
112
134
  - ``toa_upward_shortwave_flux``
135
+ - ``toa_net_downward_shortwave_flux``
113
136
  * - Top thermal radiation
114
137
  - ``top_net_thermal_radiation``
115
138
  - ``toa_upward_longwave_flux``
139
+ - ``toa_outgoing_longwave_flux``
116
140
 
117
141
  **Modifications**
118
142
 
@@ -214,14 +238,26 @@ class Cocip(Model):
214
238
  met_var.EastwardWind,
215
239
  met_var.NorthwardWind,
216
240
  met_var.VerticalVelocity,
217
- (ecmwf.SpecificCloudIceWaterContent, gfs.CloudIceWaterMixingRatio),
241
+ (
242
+ met_var.MassFractionOfCloudIceInAir,
243
+ ecmwf.SpecificCloudIceWaterContent,
244
+ gfs.CloudIceWaterMixingRatio,
245
+ ),
218
246
  )
219
247
 
220
248
  #: Required single-level top of atmosphere radiation variables.
221
249
  #: Variable keys depend on data source (e.g. ECMWF, GFS).
222
250
  rad_variables = (
223
- (ecmwf.TopNetSolarRadiation, gfs.TOAUpwardShortwaveRadiation),
224
- (ecmwf.TopNetThermalRadiation, gfs.TOAUpwardLongwaveRadiation),
251
+ (
252
+ met_var.TOANetDownwardShortwaveFlux,
253
+ ecmwf.TopNetSolarRadiation,
254
+ gfs.TOAUpwardShortwaveRadiation,
255
+ ),
256
+ (
257
+ met_var.TOAOutgoingLongwaveFlux,
258
+ ecmwf.TopNetThermalRadiation,
259
+ gfs.TOAUpwardLongwaveRadiation,
260
+ ),
225
261
  )
226
262
 
227
263
  #: Minimal set of met variables needed to run the model after pre-processing.
@@ -242,7 +278,11 @@ class Cocip(Model):
242
278
  #: Moved Geopotential from :attr:`met_variables` to :attr:`optional_met_variables`
243
279
  optional_met_variables = (
244
280
  (met_var.Geopotential, met_var.GeopotentialHeight),
245
- (ecmwf.CloudAreaFractionInLayer, gfs.TotalCloudCoverIsobaric),
281
+ (
282
+ met_var.CloudAreaFractionInAtmosphereLayer,
283
+ ecmwf.CloudAreaFractionInLayer,
284
+ gfs.TotalCloudCoverIsobaric,
285
+ ),
246
286
  )
247
287
 
248
288
  #: Met data is not optional
@@ -391,7 +431,7 @@ class Cocip(Model):
391
431
  # which is the positive direction for level
392
432
  logger.debug("Downselect met for Cocip initialization")
393
433
  level_buffer = 0, self.params["met_level_buffer"][1]
394
- met = self.source.downselect_met(self.met, level_buffer=level_buffer, copy=False)
434
+ met = self.source.downselect_met(self.met, level_buffer=level_buffer)
395
435
  met = add_tau_cirrus(met)
396
436
 
397
437
  # Prepare flight for model
@@ -575,10 +615,12 @@ class Cocip(Model):
575
615
  if verbose_outputs:
576
616
  interpolate_met(met, self.source, "tau_cirrus", **interp_kwargs)
577
617
 
578
- # handle ECMWF/GFS ciwc variables
618
+ # handle ECMWF/GFS/generic ciwc variables
579
619
  if (key := "specific_cloud_ice_water_content") in met: # noqa: SIM114
580
620
  interpolate_met(met, self.source, key, **interp_kwargs)
581
- elif (key := "ice_water_mixing_ratio") in met:
621
+ elif (key := "ice_water_mixing_ratio") in met: # noqa: SIM114
622
+ interpolate_met(met, self.source, key, **interp_kwargs)
623
+ elif (key := "mass_fraction_of_cloud_ice_in_air") in met:
582
624
  interpolate_met(met, self.source, key, **interp_kwargs)
583
625
 
584
626
  self.source["rho_air"] = thermo.rho_d(
@@ -976,9 +1018,9 @@ class Cocip(Model):
976
1018
  for coord in ("longitude", "latitude", "level")
977
1019
  }
978
1020
  logger.debug("Downselect met for start of Cocip evolution")
979
- met = self._downwash_contrail.downselect_met(self.met, **buffers, copy=False)
1021
+ met = self._downwash_contrail.downselect_met(self.met, **buffers)
980
1022
  met = add_tau_cirrus(met)
981
- rad = self._downwash_contrail.downselect_met(self.rad, **buffers, copy=False)
1023
+ rad = self._downwash_contrail.downselect_met(self.rad, **buffers)
982
1024
 
983
1025
  calc_continuous(self._downwash_contrail)
984
1026
  calc_timestep_geometry(self._downwash_contrail)
@@ -1135,11 +1177,11 @@ class Cocip(Model):
1135
1177
  & (self._downwash_flight["time"] <= lookahead),
1136
1178
  copy=False,
1137
1179
  )
1138
- vector = GeoVectorDataset(
1180
+ vector = GeoVectorDataset._from_fastpath(
1139
1181
  {
1140
1182
  key: np.concatenate((latest_contrail[key], future_contrails[key]))
1141
1183
  for key in ("longitude", "latitude", "level", "time")
1142
- }
1184
+ },
1143
1185
  )
1144
1186
 
1145
1187
  # compute time buffer to ensure downselection extends to time_end
@@ -1152,7 +1194,7 @@ class Cocip(Model):
1152
1194
  max(np.timedelta64(0, "ns"), time_end - vector["time"].max()),
1153
1195
  )
1154
1196
 
1155
- return vector.downselect_met(met, **buffers, copy=False)
1197
+ return vector.downselect_met(met, **buffers)
1156
1198
 
1157
1199
  def _create_downwash_contrail(self) -> GeoVectorDataset:
1158
1200
  """Get Contrail representation of downwash flight."""
@@ -1180,7 +1222,7 @@ class Cocip(Model):
1180
1222
  "persistent": self._downwash_flight["persistent_1"],
1181
1223
  }
1182
1224
 
1183
- contrail = GeoVectorDataset(downwash_contrail_data, copy=True)
1225
+ contrail = GeoVectorDataset._from_fastpath(downwash_contrail_data).copy()
1184
1226
  contrail["formation_time"] = contrail["time"].copy()
1185
1227
  contrail["age"] = contrail["formation_time"] - contrail["time"]
1186
1228
 
@@ -1587,8 +1629,7 @@ def _process_rad(rad: MetDataset) -> MetDataset:
1587
1629
  rad.data["time"].attrs["shift_radiation_time"] = "variable"
1588
1630
  return rad
1589
1631
 
1590
- else:
1591
- shift_radiation_time = -np.timedelta64(30, "m")
1632
+ shift_radiation_time = -np.timedelta64(30, "m")
1592
1633
 
1593
1634
  elif dataset == "ERA5" and product == "ensemble":
1594
1635
  shift_radiation_time = -np.timedelta64(90, "m")
@@ -1893,8 +1934,8 @@ def calc_shortwave_radiation(
1893
1934
  Raises
1894
1935
  ------
1895
1936
  ValueError
1896
- If ``rad`` does not contain ``"toa_upward_shortwave_flux"`` or
1897
- ``"top_net_solar_radiation"`` variable.
1937
+ If ``rad`` does not contain ``"toa_net_downward_shortwave_flux"``,
1938
+ ``"toa_upward_shortwave_flux"`` or ``"top_net_solar_radiation"`` variable.
1898
1939
 
1899
1940
  Notes
1900
1941
  -----
@@ -1918,6 +1959,13 @@ def calc_shortwave_radiation(
1918
1959
  sdr = geo.solar_direct_radiation(longitude, latitude, time, threshold_cos_sza=0.01)
1919
1960
  vector["sdr"] = sdr
1920
1961
 
1962
+ # Generic contains net downward shortwave flux at TOA (SDR - RSR) in W/m2
1963
+ generic_key = "toa_net_downward_shortwave_flux"
1964
+ if generic_key in rad:
1965
+ tnsr = interpolate_met(rad, vector, generic_key, **interp_kwargs)
1966
+ vector["rsr"] = np.maximum(sdr - tnsr, 0.0)
1967
+ return
1968
+
1921
1969
  # GFS contains RSR (toa_upward_shortwave_flux) variable directly
1922
1970
  gfs_key = "toa_upward_shortwave_flux"
1923
1971
  if gfs_key in rad:
@@ -1926,10 +1974,13 @@ def calc_shortwave_radiation(
1926
1974
 
1927
1975
  ecmwf_key = "top_net_solar_radiation"
1928
1976
  if ecmwf_key not in rad:
1929
- msg = f"'rad' data must contain either '{gfs_key}' or '{ecmwf_key}' (ECMWF) variable."
1977
+ msg = (
1978
+ f"'rad' data must contain either '{generic_key}' (generic), "
1979
+ f"'{gfs_key}' (GFS), or '{ecmwf_key}' (ECMWF) variable."
1980
+ )
1930
1981
  raise ValueError(msg)
1931
1982
 
1932
- # ECMWF contains "top_net_solar_radiation" which is SDR - RSR
1983
+ # ECMWF also contains net downward shortwave flux at TOA, but possibly as an accumulation
1933
1984
  tnsr = interpolate_met(rad, vector, ecmwf_key, **interp_kwargs)
1934
1985
  tnsr = _rad_accumulation_to_average_instantaneous(rad, ecmwf_key, tnsr)
1935
1986
  vector.update({ecmwf_key: tnsr})
@@ -1958,14 +2009,20 @@ def calc_outgoing_longwave_radiation(
1958
2009
  Raises
1959
2010
  ------
1960
2011
  ValueError
1961
- If ``rad`` does not contain a ``"toa_upward_longwave_flux"``
1962
- or ``"top_net_thermal_radiation"`` variable.
2012
+ If ``rad`` does not contain a ``"toa_outgoing_longwave_flux"``,
2013
+ ``"toa_upward_longwave_flux"`` or ``"top_net_thermal_radiation"`` variable.
1963
2014
  """
1964
2015
 
1965
2016
  if "olr" in vector:
1966
- return None
2017
+ return
2018
+
2019
+ # Generic contains OLR (toa_outgoing_longwave_flux) directly
2020
+ generic_key = "toa_outgoing_longwave_flux"
2021
+ if generic_key in rad:
2022
+ interpolate_met(rad, vector, generic_key, "olr", **interp_kwargs)
2023
+ return
1967
2024
 
1968
- # GFS contains OLR (toa_upward_longwave_flux) variable directly
2025
+ # GFS contains OLR (toa_upward_longwave_flux) directly
1969
2026
  gfs_key = "toa_upward_longwave_flux"
1970
2027
  if gfs_key in rad:
1971
2028
  interpolate_met(rad, vector, gfs_key, "olr", **interp_kwargs)
@@ -1974,7 +2031,10 @@ def calc_outgoing_longwave_radiation(
1974
2031
  # ECMWF contains "top_net_thermal_radiation" which is -1 * OLR
1975
2032
  ecmwf_key = "top_net_thermal_radiation"
1976
2033
  if ecmwf_key not in rad:
1977
- msg = f"'rad' data must contain either '{gfs_key}' or '{ecmwf_key}' (ECMWF) variable."
2034
+ msg = (
2035
+ f"'rad' data must contain either '{generic_key}' (generic), "
2036
+ f"'{gfs_key}' (GFS), or '{ecmwf_key}' (ECMWF) variable."
2037
+ )
1978
2038
  raise ValueError(msg)
1979
2039
 
1980
2040
  tntr = interpolate_met(rad, vector, ecmwf_key, **interp_kwargs)
@@ -2300,7 +2360,7 @@ def calc_timestep_contrail_evolution(
2300
2360
  level_2 = geo.advect_level(level_1, vertical_velocity_1, rho_air_1, terminal_fall_speed_1, dt)
2301
2361
  altitude_2 = units.pl_to_m(level_2)
2302
2362
 
2303
- contrail_2 = GeoVectorDataset(
2363
+ contrail_2 = GeoVectorDataset._from_fastpath(
2304
2364
  {
2305
2365
  "waypoint": waypoint_2,
2306
2366
  "flight_id": contrail_1["flight_id"],
@@ -2312,7 +2372,6 @@ def calc_timestep_contrail_evolution(
2312
2372
  "altitude": altitude_2,
2313
2373
  "level": level_2,
2314
2374
  },
2315
- copy=False,
2316
2375
  )
2317
2376
  intersection = contrail_2.coords_intersect_met(met)
2318
2377
  if not np.any(intersection):
@@ -2259,3 +2259,4 @@ def compare_cocip_with_goes(
2259
2259
  plt.close()
2260
2260
 
2261
2261
  return output_path
2262
+ return None
@@ -11,11 +11,10 @@ from typing import TYPE_CHECKING, Any, NoReturn, TypeVar, overload
11
11
  import numpy as np
12
12
  import numpy.typing as npt
13
13
  import pandas as pd
14
- import xarray as xr
15
14
 
16
15
  import pycontrails
17
16
  from pycontrails.core import models
18
- from pycontrails.core.met import MetDataset
17
+ from pycontrails.core.met import MetDataset, maybe_downselect_mds
19
18
  from pycontrails.core.vector import GeoVectorDataset, VectorDataset
20
19
  from pycontrails.models import humidity_scaling, sac
21
20
  from pycontrails.models.cocip import cocip, contrail_properties, wake_vortex, wind_shear
@@ -323,8 +322,8 @@ class CocipGrid(models.Model):
323
322
  If ``self.params["downselect_met"]`` is True, the :func:`_downselect_met` has
324
323
  already performed a spatial downselection of the met data.
325
324
  """
326
- met = _maybe_downselect_mds(self.met, met, t0, t1)
327
- rad = _maybe_downselect_mds(self.rad, rad, t0, t1)
325
+ met = maybe_downselect_mds(self.met, met, t0, t1)
326
+ rad = maybe_downselect_mds(self.rad, rad, t0, t1)
328
327
 
329
328
  return met, rad
330
329
 
@@ -615,7 +614,7 @@ class CocipGrid(models.Model):
615
614
  for idx, time in enumerate(times_in_filt):
616
615
  # For now, sticking with the convention that every vector should
617
616
  # have a constant time value.
618
- source_slice = MetDataset(self.source.data.sel(time=[time]))
617
+ source_slice = MetDataset._from_fastpath(self.source.data.sel(time=[time]))
619
618
 
620
619
  # Convert the 4D grid to a vector
621
620
  vector = source_slice.to_vector()
@@ -1402,7 +1401,7 @@ def simulate_wake_vortex_downwash(
1402
1401
 
1403
1402
  # Experimental segment-free model
1404
1403
  if _is_segment_free_mode(vector):
1405
- return GeoVectorDataset(data, attrs=vector.attrs, copy=True)
1404
+ return GeoVectorDataset._from_fastpath(data, attrs=vector.attrs).copy()
1406
1405
 
1407
1406
  # Stored in `_generate_new_grid_vectors`
1408
1407
  data["longitude_head"] = vector["longitude_head"]
@@ -1421,7 +1420,7 @@ def simulate_wake_vortex_downwash(
1421
1420
  # segment_length variable.
1422
1421
  data["segment_length"] = np.full_like(data["longitude"], segment_length)
1423
1422
 
1424
- return GeoVectorDataset(data, attrs=vector.attrs, copy=True)
1423
+ return GeoVectorDataset._from_fastpath(data, attrs=vector.attrs).copy()
1425
1424
 
1426
1425
 
1427
1426
  def find_initial_persistent_contrails(
@@ -2022,7 +2021,7 @@ def advect(
2022
2021
  assert _is_segment_free_mode(contrail)
2023
2022
  assert dt_tail is None
2024
2023
  assert dt_head is None
2025
- return GeoVectorDataset(data, attrs=contrail.attrs, copy=True)
2024
+ return GeoVectorDataset._from_fastpath(data, attrs=contrail.attrs).copy()
2026
2025
 
2027
2026
  longitude_head = contrail["longitude_head"]
2028
2027
  latitude_head = contrail["latitude_head"]
@@ -2064,7 +2063,7 @@ def advect(
2064
2063
  data["segment_length"] = segment_length_t2
2065
2064
  data["head_tail_dt"] = head_tail_dt_t2
2066
2065
 
2067
- return GeoVectorDataset(data, attrs=contrail.attrs, copy=True)
2066
+ return GeoVectorDataset._from_fastpath(data, attrs=contrail.attrs).copy()
2068
2067
 
2069
2068
 
2070
2069
  def _aggregate_ef_summary(vector_list: list[VectorDataset]) -> VectorDataset | None:
@@ -2438,7 +2437,6 @@ def _downselect_met(
2438
2437
  longitude_buffer=longitude_buffer,
2439
2438
  level_buffer=level_buffer,
2440
2439
  time_buffer=(t0, t1),
2441
- copy=False,
2442
2440
  )
2443
2441
 
2444
2442
  rad = source.downselect_met(
@@ -2446,7 +2444,6 @@ def _downselect_met(
2446
2444
  latitude_buffer=latitude_buffer,
2447
2445
  longitude_buffer=longitude_buffer,
2448
2446
  time_buffer=(t0, t1),
2449
- copy=False,
2450
2447
  )
2451
2448
 
2452
2449
  return met, rad
@@ -2522,65 +2519,3 @@ def _check_end_time(
2522
2519
  f"Include additional time at the end of '{name}' or reduce 'max_age' parameter."
2523
2520
  f"{note}"
2524
2521
  )
2525
-
2526
-
2527
- def _maybe_downselect_mds(
2528
- big_mds: MetDataset,
2529
- little_mds: MetDataset | None,
2530
- t0: np.datetime64,
2531
- t1: np.datetime64,
2532
- ) -> MetDataset:
2533
- """Possibly downselect ``big_mds`` to cover ``[t0, t1]``.
2534
-
2535
- This implementation assumes ``t0 <= t1``, but this is not enforced.
2536
-
2537
- If possible, ``little_mds`` is recycled to avoid re-loading data.
2538
-
2539
- This function only downselects in the time domain.
2540
-
2541
- If ``big_mds`` doesn't cover the time range, no error is raised.
2542
- """
2543
- if little_mds is not None:
2544
- little_time = little_mds.indexes["time"].to_numpy()
2545
- ignore_little = t0 > little_time[-1] or t1 < little_time[0]
2546
-
2547
- big_time = big_mds.indexes["time"].to_numpy()
2548
- if little_mds is None or ignore_little:
2549
- i0 = np.searchsorted(big_time, t0, side="right").item()
2550
- i0 = max(0, i0 - 1)
2551
- i1 = np.searchsorted(big_time, t1, side="left").item()
2552
- i1 = min(i1 + 1, big_time.size)
2553
- return MetDataset(big_mds.data.isel(time=slice(i0, i1)), copy=False)
2554
-
2555
- j0 = np.searchsorted(little_time, t0, side="right").item()
2556
- j0 = max(0, j0 - 1)
2557
- j1 = np.searchsorted(little_time, t1, side="left").item()
2558
- j1 = min(j1 + 1, little_time.size)
2559
-
2560
- little_ds = little_mds.data.isel(time=slice(j0, j1))
2561
- little_time0 = little_time[j0]
2562
- little_time1 = little_time[j1 - 1]
2563
-
2564
- if t0 >= little_time0 and t1 <= little_time1:
2565
- return MetDataset(little_ds, copy=False)
2566
-
2567
- ds_concat = []
2568
- if t0 < little_time0: # unlikely to encounter this case
2569
- i0 = np.searchsorted(big_time, t0, side="right").item()
2570
- i0 = max(0, i0 - 1)
2571
- i1 = np.searchsorted(big_time, little_time0, side="right").item()
2572
- i1 = max(i1, i0 + 1)
2573
- ds_concat.append(big_mds.data.isel(time=slice(i0, i1)))
2574
-
2575
- ds_concat.append(little_ds)
2576
-
2577
- if t1 > little_time1:
2578
- i0 = np.searchsorted(big_time, little_time1, side="left").item()
2579
- i0 = min(i0 + 1, big_time.size)
2580
- i1 = np.searchsorted(big_time, t1, side="left").item()
2581
- i1 = min(i1 + 1, big_time.size)
2582
- ds_concat.append(big_mds.data.isel(time=slice(i0, i1)))
2583
-
2584
- # If little_mds is loaded into memory but big_mds is not,
2585
- # the concat operation below will load the slice of big_mds into memory.
2586
- return MetDataset(xr.concat(ds_concat, dim="time"), copy=False)
@@ -3,16 +3,24 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import dataclasses
6
+ import sys
6
7
  from typing import Any, NoReturn, overload
7
8
 
9
+ if sys.version_info >= (3, 12):
10
+ from typing import override
11
+ else:
12
+ from typing_extensions import override
13
+
8
14
  import numpy as np
9
15
  import numpy.typing as npt
16
+ import pandas as pd
10
17
 
11
18
  from pycontrails.core import models
12
- from pycontrails.core.met import MetDataset
19
+ from pycontrails.core.met import MetDataset, maybe_downselect_mds
13
20
  from pycontrails.core.met_var import (
14
21
  AirTemperature,
15
22
  EastwardWind,
23
+ MetVariable,
16
24
  NorthwardWind,
17
25
  VerticalVelocity,
18
26
  )
@@ -55,6 +63,15 @@ class DryAdvectionParams(models.AdvectionBuffers):
55
63
  # If None, only pointwise advection is simulated without wind shear effects.
56
64
  azimuth: float | None = 0.0
57
65
 
66
+ #: Add additional intermediate variables to the output vector.
67
+ #: This includes interpolated met variables and wind-shear-derived geometry.
68
+ verbose_outputs: bool = False
69
+
70
+ #: Whether to include ``source`` points in the output vector. Enabling allows
71
+ #: the user to view additional data (e.g., interpolated met variables) for
72
+ #: source points as well as evolved points.
73
+ include_source_in_output: bool = False
74
+
58
75
 
59
76
  class DryAdvection(models.Model):
60
77
  """Simulate "dry advection" of an emissions plume with an elliptical cross section.
@@ -89,7 +106,12 @@ class DryAdvection(models.Model):
89
106
 
90
107
  name = "dry_advection"
91
108
  long_name = "Emission plume advection without sedimentation"
92
- met_variables = AirTemperature, EastwardWind, NorthwardWind, VerticalVelocity
109
+ met_variables: tuple[MetVariable, ...] = (
110
+ AirTemperature,
111
+ EastwardWind,
112
+ NorthwardWind,
113
+ VerticalVelocity,
114
+ )
93
115
  default_params = DryAdvectionParams
94
116
 
95
117
  met: MetDataset
@@ -127,6 +149,10 @@ class DryAdvection(models.Model):
127
149
  self.update_params(params)
128
150
  self.set_source(source)
129
151
  self.source = self.require_source_type(GeoVectorDataset)
152
+ self.downselect_met()
153
+ if not self.source.coords_intersect_met(self.met).any():
154
+ msg = "No source coordinates intersect met data."
155
+ raise ValueError(msg)
130
156
 
131
157
  self.source = self._prepare_source()
132
158
 
@@ -137,36 +163,51 @@ class DryAdvection(models.Model):
137
163
  sedimentation_rate = self.params["sedimentation_rate"]
138
164
  dz_m = self.params["dz_m"]
139
165
  max_depth = self.params["max_depth"]
166
+ verbose_outputs = self.params["verbose_outputs"]
140
167
 
141
168
  source_time = self.source["time"]
142
- t0 = source_time.min()
169
+ t0 = pd.Timestamp(source_time.min()).floor(pd.Timedelta(dt_integration)).to_numpy()
143
170
  t1 = source_time.max()
144
171
  timesteps = np.arange(t0 + dt_integration, t1 + dt_integration + max_age, dt_integration)
145
172
 
146
- vector = GeoVectorDataset()
173
+ vector2 = GeoVectorDataset()
174
+ met = None
147
175
 
148
176
  evolved = []
149
177
  for t in timesteps:
150
178
  filt = (source_time < t) & (source_time >= t - dt_integration)
151
- vector = vector + self.source.filter(filt, copy=False)
152
- vector = _evolve_one_step(
153
- self.met,
154
- vector,
179
+ vector1 = vector2 + self.source.filter(filt, copy=False)
180
+
181
+ t0 = vector1["time"].min()
182
+ t1 = vector1["time"].max()
183
+ met = maybe_downselect_mds(self.met, met, t0, t1)
184
+
185
+ vector2 = _evolve_one_step(
186
+ met,
187
+ vector1,
155
188
  t,
156
189
  sedimentation_rate=sedimentation_rate,
157
190
  dz_m=dz_m,
158
191
  max_depth=max_depth,
192
+ verbose_outputs=verbose_outputs,
159
193
  **interp_kwargs,
160
194
  )
195
+ evolved.append(vector1)
161
196
 
162
- filt = (vector["age"] <= max_age) & vector.coords_intersect_met(self.met)
163
- vector = vector.filter(filt)
197
+ filt = (vector2["age"] <= max_age) & vector2.coords_intersect_met(self.met)
198
+ vector2 = vector2.filter(filt)
164
199
 
165
- evolved.append(vector)
166
- if not vector and np.all(source_time < t):
200
+ if not vector2 and np.all(source_time < t):
167
201
  break
168
202
 
169
- return GeoVectorDataset.sum(evolved, fill_value=np.nan)
203
+ evolved.append(vector2)
204
+ out = GeoVectorDataset.sum(evolved, fill_value=np.nan)
205
+
206
+ if self.params["include_source_in_output"]:
207
+ return out
208
+
209
+ filt = out["age"] > np.timedelta64(0, "ns")
210
+ return out.filter(filt)
170
211
 
171
212
  def _prepare_source(self) -> GeoVectorDataset:
172
213
  r"""Prepare :attr:`source` vector for advection by wind-shear-derived variables.
@@ -202,7 +243,7 @@ class DryAdvection(models.Model):
202
243
  raise ValueError(
203
244
  "If 'azimuth' is None, then 'width' and 'depth' must also be None."
204
245
  )
205
- return GeoVectorDataset(self.source.select(columns, copy=False), copy=False)
246
+ return GeoVectorDataset._from_fastpath(self.source.select(columns, copy=False).data)
206
247
 
207
248
  if "azimuth" not in self.source:
208
249
  self.source["azimuth"] = np.full_like(self.source["longitude"], azimuth)
@@ -228,7 +269,19 @@ class DryAdvection(models.Model):
228
269
  width, depth, sigma_yz=0.0
229
270
  )
230
271
 
231
- return GeoVectorDataset(self.source.select(columns, copy=False), copy=False)
272
+ return GeoVectorDataset._from_fastpath(self.source.select(columns, copy=False).data)
273
+
274
+ @override
275
+ def downselect_met(self) -> None:
276
+ if not self.params["downselect_met"]:
277
+ return
278
+
279
+ buffers = {
280
+ f"{coord}_buffer": self.params[f"met_{coord}_buffer"]
281
+ for coord in ("longitude", "latitude", "level")
282
+ }
283
+ buffers["time_buffer"] = (np.timedelta64(0, "ns"), self.params["max_age"])
284
+ self.met = self.source.downselect_met(self.met, **buffers)
232
285
 
233
286
 
234
287
  def _perform_interp_for_step(
@@ -329,8 +382,12 @@ def _calc_geometry(
329
382
  dz_m: float,
330
383
  dt: npt.NDArray[np.timedelta64] | np.timedelta64,
331
384
  max_depth: float | None,
385
+ verbose_outputs: bool,
332
386
  ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
333
- """Calculate wind-shear-derived geometry of evolved plume."""
387
+ """Calculate wind-shear-derived geometry of evolved plume.
388
+
389
+ This method mutates the input ``vector`` in place.
390
+ """
334
391
 
335
392
  u_wind = vector["u_wind"]
336
393
  v_wind = vector["v_wind"]
@@ -384,6 +441,11 @@ def _calc_geometry(
384
441
  eff_heat_rate=None,
385
442
  )
386
443
 
444
+ if verbose_outputs:
445
+ vector["ds_dz"] = ds_dz
446
+ vector["dsn_dz"] = dsn_dz
447
+ vector["dT_dz"] = dT_dz
448
+
387
449
  sigma_yy_2, sigma_zz_2, sigma_yz_2 = contrail_properties.plume_temporal_evolution(
388
450
  width,
389
451
  depth,
@@ -442,9 +504,13 @@ def _evolve_one_step(
442
504
  sedimentation_rate: float,
443
505
  dz_m: float,
444
506
  max_depth: float | None,
507
+ verbose_outputs: bool,
445
508
  **interp_kwargs: Any,
446
509
  ) -> GeoVectorDataset:
447
- """Evolve plume geometry by one step."""
510
+ """Evolve plume geometry by one step.
511
+
512
+ This method mutates the input ``vector`` in place.
513
+ """
448
514
 
449
515
  _perform_interp_for_step(met, vector, dz_m, **interp_kwargs)
450
516
  u_wind = vector["u_wind"]
@@ -459,20 +525,21 @@ def _evolve_one_step(
459
525
  level_2 = geo.advect_level(
460
526
  vector.level,
461
527
  vertical_velocity,
462
- 0.0,
463
- 0.0,
464
- dt, # type: ignore[arg-type]
528
+ rho_air=0.0,
529
+ terminal_fall_speed=0.0,
530
+ dt=dt, # type: ignore[arg-type]
465
531
  )
466
532
 
467
- out = GeoVectorDataset(
468
- longitude=longitude_2,
469
- latitude=latitude_2,
470
- level=level_2,
471
- time=np.full(longitude_2.shape, t),
472
- copy=False,
533
+ out = GeoVectorDataset._from_fastpath(
534
+ {
535
+ "longitude": longitude_2,
536
+ "latitude": latitude_2,
537
+ "level": level_2,
538
+ "time": np.full(longitude_2.shape, t),
539
+ "age": vector["age"] + dt,
540
+ "waypoint": vector["waypoint"],
541
+ }
473
542
  )
474
- out["age"] = vector["age"] + dt
475
- out["waypoint"] = vector["waypoint"]
476
543
 
477
544
  azimuth = vector.get("azimuth")
478
545
  if azimuth is None:
@@ -482,9 +549,10 @@ def _evolve_one_step(
482
549
  # Attach wind-shear-derived geometry to output vector
483
550
  azimuth_2, width_2, depth_2, sigma_yz_2, area_eff_2 = _calc_geometry(
484
551
  vector,
485
- dz_m,
486
- dt, # type: ignore[arg-type]
487
- max_depth, # type: ignore[arg-type]
552
+ dz_m=dz_m,
553
+ dt=dt, # type: ignore[arg-type]
554
+ max_depth=max_depth, # type: ignore[arg-type]
555
+ verbose_outputs=verbose_outputs,
488
556
  )
489
557
  out["azimuth"] = azimuth_2
490
558
  out["width"] = width_2