pycontrails 0.58.0__cp314-cp314-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pycontrails might be problematic. Click here for more details.
- pycontrails/__init__.py +70 -0
- pycontrails/_version.py +34 -0
- pycontrails/core/__init__.py +30 -0
- pycontrails/core/aircraft_performance.py +679 -0
- pycontrails/core/airports.py +228 -0
- pycontrails/core/cache.py +889 -0
- pycontrails/core/coordinates.py +174 -0
- pycontrails/core/fleet.py +483 -0
- pycontrails/core/flight.py +2185 -0
- pycontrails/core/flightplan.py +228 -0
- pycontrails/core/fuel.py +140 -0
- pycontrails/core/interpolation.py +702 -0
- pycontrails/core/met.py +2931 -0
- pycontrails/core/met_var.py +387 -0
- pycontrails/core/models.py +1321 -0
- pycontrails/core/polygon.py +549 -0
- pycontrails/core/rgi_cython.cpython-314-darwin.so +0 -0
- pycontrails/core/vector.py +2249 -0
- pycontrails/datalib/__init__.py +12 -0
- pycontrails/datalib/_met_utils/metsource.py +746 -0
- pycontrails/datalib/ecmwf/__init__.py +73 -0
- pycontrails/datalib/ecmwf/arco_era5.py +345 -0
- pycontrails/datalib/ecmwf/common.py +114 -0
- pycontrails/datalib/ecmwf/era5.py +554 -0
- pycontrails/datalib/ecmwf/era5_model_level.py +490 -0
- pycontrails/datalib/ecmwf/hres.py +804 -0
- pycontrails/datalib/ecmwf/hres_model_level.py +466 -0
- pycontrails/datalib/ecmwf/ifs.py +287 -0
- pycontrails/datalib/ecmwf/model_levels.py +435 -0
- pycontrails/datalib/ecmwf/static/model_level_dataframe_v20240418.csv +139 -0
- pycontrails/datalib/ecmwf/variables.py +268 -0
- pycontrails/datalib/geo_utils.py +261 -0
- pycontrails/datalib/gfs/__init__.py +28 -0
- pycontrails/datalib/gfs/gfs.py +656 -0
- pycontrails/datalib/gfs/variables.py +104 -0
- pycontrails/datalib/goes.py +757 -0
- pycontrails/datalib/himawari/__init__.py +27 -0
- pycontrails/datalib/himawari/header_struct.py +266 -0
- pycontrails/datalib/himawari/himawari.py +667 -0
- pycontrails/datalib/landsat.py +589 -0
- pycontrails/datalib/leo_utils/__init__.py +5 -0
- pycontrails/datalib/leo_utils/correction.py +266 -0
- pycontrails/datalib/leo_utils/landsat_metadata.py +300 -0
- pycontrails/datalib/leo_utils/search.py +250 -0
- pycontrails/datalib/leo_utils/sentinel_metadata.py +748 -0
- pycontrails/datalib/leo_utils/static/bq_roi_query.sql +6 -0
- pycontrails/datalib/leo_utils/vis.py +59 -0
- pycontrails/datalib/sentinel.py +650 -0
- pycontrails/datalib/spire/__init__.py +5 -0
- pycontrails/datalib/spire/exceptions.py +62 -0
- pycontrails/datalib/spire/spire.py +604 -0
- pycontrails/ext/bada.py +42 -0
- pycontrails/ext/cirium.py +14 -0
- pycontrails/ext/empirical_grid.py +140 -0
- pycontrails/ext/synthetic_flight.py +431 -0
- pycontrails/models/__init__.py +1 -0
- pycontrails/models/accf.py +425 -0
- pycontrails/models/apcemm/__init__.py +8 -0
- pycontrails/models/apcemm/apcemm.py +983 -0
- pycontrails/models/apcemm/inputs.py +226 -0
- pycontrails/models/apcemm/static/apcemm_yaml_template.yaml +183 -0
- pycontrails/models/apcemm/utils.py +437 -0
- pycontrails/models/cocip/__init__.py +29 -0
- pycontrails/models/cocip/cocip.py +2742 -0
- pycontrails/models/cocip/cocip_params.py +305 -0
- pycontrails/models/cocip/cocip_uncertainty.py +291 -0
- pycontrails/models/cocip/contrail_properties.py +1530 -0
- pycontrails/models/cocip/output_formats.py +2270 -0
- pycontrails/models/cocip/radiative_forcing.py +1260 -0
- pycontrails/models/cocip/radiative_heating.py +520 -0
- pycontrails/models/cocip/unterstrasser_wake_vortex.py +508 -0
- pycontrails/models/cocip/wake_vortex.py +396 -0
- pycontrails/models/cocip/wind_shear.py +120 -0
- pycontrails/models/cocipgrid/__init__.py +9 -0
- pycontrails/models/cocipgrid/cocip_grid.py +2552 -0
- pycontrails/models/cocipgrid/cocip_grid_params.py +138 -0
- pycontrails/models/dry_advection.py +602 -0
- pycontrails/models/emissions/__init__.py +21 -0
- pycontrails/models/emissions/black_carbon.py +599 -0
- pycontrails/models/emissions/emissions.py +1353 -0
- pycontrails/models/emissions/ffm2.py +336 -0
- pycontrails/models/emissions/static/default-engine-uids.csv +239 -0
- pycontrails/models/emissions/static/edb-gaseous-v29b-engines.csv +596 -0
- pycontrails/models/emissions/static/edb-nvpm-v29b-engines.csv +215 -0
- pycontrails/models/extended_k15.py +1327 -0
- pycontrails/models/humidity_scaling/__init__.py +37 -0
- pycontrails/models/humidity_scaling/humidity_scaling.py +1075 -0
- pycontrails/models/humidity_scaling/quantiles/era5-model-level-quantiles.pq +0 -0
- pycontrails/models/humidity_scaling/quantiles/era5-pressure-level-quantiles.pq +0 -0
- pycontrails/models/issr.py +210 -0
- pycontrails/models/pcc.py +326 -0
- pycontrails/models/pcr.py +154 -0
- pycontrails/models/ps_model/__init__.py +18 -0
- pycontrails/models/ps_model/ps_aircraft_params.py +381 -0
- pycontrails/models/ps_model/ps_grid.py +701 -0
- pycontrails/models/ps_model/ps_model.py +1000 -0
- pycontrails/models/ps_model/ps_operational_limits.py +525 -0
- pycontrails/models/ps_model/static/ps-aircraft-params-20250328.csv +69 -0
- pycontrails/models/ps_model/static/ps-synonym-list-20250328.csv +104 -0
- pycontrails/models/sac.py +442 -0
- pycontrails/models/tau_cirrus.py +183 -0
- pycontrails/physics/__init__.py +1 -0
- pycontrails/physics/constants.py +117 -0
- pycontrails/physics/geo.py +1138 -0
- pycontrails/physics/jet.py +968 -0
- pycontrails/physics/static/iata-cargo-load-factors-20250221.csv +74 -0
- pycontrails/physics/static/iata-passenger-load-factors-20250221.csv +74 -0
- pycontrails/physics/thermo.py +551 -0
- pycontrails/physics/units.py +472 -0
- pycontrails/py.typed +0 -0
- pycontrails/utils/__init__.py +1 -0
- pycontrails/utils/dependencies.py +66 -0
- pycontrails/utils/iteration.py +13 -0
- pycontrails/utils/json.py +187 -0
- pycontrails/utils/temp.py +50 -0
- pycontrails/utils/types.py +163 -0
- pycontrails-0.58.0.dist-info/METADATA +180 -0
- pycontrails-0.58.0.dist-info/RECORD +122 -0
- pycontrails-0.58.0.dist-info/WHEEL +6 -0
- pycontrails-0.58.0.dist-info/licenses/LICENSE +178 -0
- pycontrails-0.58.0.dist-info/licenses/NOTICE +43 -0
- pycontrails-0.58.0.dist-info/top_level.txt +3 -0
|
@@ -0,0 +1,2552 @@
|
|
|
1
|
+
"""Gridded CoCiP model."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import itertools
|
|
6
|
+
import logging
|
|
7
|
+
import warnings
|
|
8
|
+
from collections.abc import Generator, Iterable, Iterator, Sequence
|
|
9
|
+
from typing import TYPE_CHECKING, Any, NoReturn, TypeVar, overload
|
|
10
|
+
|
|
11
|
+
import numpy as np
|
|
12
|
+
import numpy.typing as npt
|
|
13
|
+
import pandas as pd
|
|
14
|
+
|
|
15
|
+
import pycontrails
|
|
16
|
+
from pycontrails.core import models
|
|
17
|
+
from pycontrails.core.met import MetDataset, maybe_downselect_mds
|
|
18
|
+
from pycontrails.core.vector import GeoVectorDataset, VectorDataset
|
|
19
|
+
from pycontrails.models import extended_k15, humidity_scaling, sac
|
|
20
|
+
from pycontrails.models.cocip import cocip, contrail_properties, wake_vortex, wind_shear
|
|
21
|
+
from pycontrails.models.cocipgrid.cocip_grid_params import CocipGridParams
|
|
22
|
+
from pycontrails.models.emissions import Emissions
|
|
23
|
+
from pycontrails.physics import constants, geo, thermo, units
|
|
24
|
+
from pycontrails.utils import dependencies
|
|
25
|
+
|
|
26
|
+
if TYPE_CHECKING:
|
|
27
|
+
import tqdm
|
|
28
|
+
|
|
29
|
+
logger = logging.getLogger(__name__)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class CocipGrid(models.Model):
|
|
33
|
+
"""Run CoCiP simulation on a grid.
|
|
34
|
+
|
|
35
|
+
See :meth:`eval` for a description of model evaluation ``source`` parameters.
|
|
36
|
+
|
|
37
|
+
Parameters
|
|
38
|
+
----------
|
|
39
|
+
met : MetDataset
|
|
40
|
+
CoCiP-specific met data to interpolate against
|
|
41
|
+
rad : MetDataset
|
|
42
|
+
CoCiP-specific radiation data to interpolate against
|
|
43
|
+
params : dict[str, Any] | None, optional
|
|
44
|
+
Override :class:`CocipGridParams` defaults. Most notably, the model is highly
|
|
45
|
+
dependent on the parameter ``dt_integration``. Memory usage is also affected by
|
|
46
|
+
parameters ``met_slice_dt`` and ``target_split_size``.
|
|
47
|
+
**params_kwargs : Any
|
|
48
|
+
Override CocipGridParams defaults with arbitrary keyword arguments.
|
|
49
|
+
|
|
50
|
+
Notes
|
|
51
|
+
-----
|
|
52
|
+
- If ``rad`` contains accumulated radiative fluxes, differencing to obtain
|
|
53
|
+
time-averaged fluxes will reduce the time coverage of ``rad`` by half a forecast
|
|
54
|
+
step. A warning will be produced during :meth:`eval` if the time coverage of
|
|
55
|
+
``rad`` (after differencing) is too short given the model evaluation parameters.
|
|
56
|
+
If this occurs, provide an additional step of radiation data at the start or end
|
|
57
|
+
of ``rad``.
|
|
58
|
+
|
|
59
|
+
References
|
|
60
|
+
----------
|
|
61
|
+
- :cite:`schumannPotentialReduceClimate2011`
|
|
62
|
+
- :cite:`schumannContrailsVisibleAviation2012`
|
|
63
|
+
|
|
64
|
+
See Also
|
|
65
|
+
--------
|
|
66
|
+
:class:`CocipGridParams`
|
|
67
|
+
:class:`Cocip`
|
|
68
|
+
:mod:`wake_vortex`
|
|
69
|
+
:mod:`contrail_properties`
|
|
70
|
+
:mod:`radiative_forcing`
|
|
71
|
+
:mod:`humidity_scaling`
|
|
72
|
+
:class:`Emissions`
|
|
73
|
+
:mod:`sac`
|
|
74
|
+
:mod:`tau_cirrus`
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
__slots__ = (
|
|
78
|
+
"_target_dtype",
|
|
79
|
+
"contrail",
|
|
80
|
+
"contrail_list",
|
|
81
|
+
"rad",
|
|
82
|
+
"timesteps",
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
name = "contrail_grid"
|
|
86
|
+
long_name = "Gridded Contrail Cirrus Prediction Model"
|
|
87
|
+
default_params = CocipGridParams
|
|
88
|
+
|
|
89
|
+
# Reference Cocip as the source of truth for met variables
|
|
90
|
+
met_variables = cocip.Cocip.met_variables
|
|
91
|
+
rad_variables = cocip.Cocip.rad_variables
|
|
92
|
+
processed_met_variables = cocip.Cocip.processed_met_variables
|
|
93
|
+
generic_rad_variables = cocip.Cocip.generic_rad_variables
|
|
94
|
+
ecmwf_rad_variables = cocip.Cocip.ecmwf_rad_variables
|
|
95
|
+
gfs_rad_variables = cocip.Cocip.gfs_rad_variables
|
|
96
|
+
|
|
97
|
+
#: Met data is not optional
|
|
98
|
+
met: MetDataset
|
|
99
|
+
met_required = True
|
|
100
|
+
rad: MetDataset
|
|
101
|
+
|
|
102
|
+
#: Last evaluated input source
|
|
103
|
+
source: MetDataset | GeoVectorDataset
|
|
104
|
+
|
|
105
|
+
#: Artifacts attached when parameter ``verbose_outputs_evolution`` is True
|
|
106
|
+
#: These allow for some additional information and parity with the approach
|
|
107
|
+
#: taken by :class:`Cocip`.
|
|
108
|
+
contrail_list: list[GeoVectorDataset]
|
|
109
|
+
contrail: pd.DataFrame
|
|
110
|
+
|
|
111
|
+
def __init__(
|
|
112
|
+
self,
|
|
113
|
+
met: MetDataset,
|
|
114
|
+
rad: MetDataset,
|
|
115
|
+
params: dict[str, Any] | None = None,
|
|
116
|
+
**params_kwargs: Any,
|
|
117
|
+
) -> None:
|
|
118
|
+
super().__init__(met, params=params, **params_kwargs)
|
|
119
|
+
|
|
120
|
+
compute_tau_cirrus = self.params["compute_tau_cirrus_in_model_init"]
|
|
121
|
+
self.met, self.rad = cocip.process_met_datasets(met, rad, compute_tau_cirrus)
|
|
122
|
+
|
|
123
|
+
# Convenience -- only used in `run_interpolators`
|
|
124
|
+
self.params["_interp_kwargs"] = self.interp_kwargs
|
|
125
|
+
|
|
126
|
+
if self.params["radiative_heating_effects"]:
|
|
127
|
+
msg = "Parameter 'radiative_heating_effects' is not yet implemented in CocipGrid"
|
|
128
|
+
raise NotImplementedError(msg)
|
|
129
|
+
|
|
130
|
+
if self.params["unterstrasser_ice_survival_fraction"]:
|
|
131
|
+
msg = "Parameter 'unterstrasser_ice_survival_fraction' is not implemented in CocipGrid"
|
|
132
|
+
raise NotImplementedError(msg)
|
|
133
|
+
|
|
134
|
+
self._target_dtype = np.result_type(*self.met.data.values())
|
|
135
|
+
|
|
136
|
+
@overload
|
|
137
|
+
def eval(self, source: GeoVectorDataset, **params: Any) -> GeoVectorDataset: ...
|
|
138
|
+
|
|
139
|
+
@overload
|
|
140
|
+
def eval(self, source: MetDataset, **params: Any) -> MetDataset: ...
|
|
141
|
+
|
|
142
|
+
@overload
|
|
143
|
+
def eval(self, source: None = ..., **params: Any) -> NoReturn: ...
|
|
144
|
+
|
|
145
|
+
def eval(
|
|
146
|
+
self, source: GeoVectorDataset | MetDataset | None = None, **params: Any
|
|
147
|
+
) -> GeoVectorDataset | MetDataset:
|
|
148
|
+
"""Run CoCiP simulation on a 4d coordinate grid or arbitrary set of 4d points.
|
|
149
|
+
|
|
150
|
+
If the :attr:`params` ``verbose_outputs_evolution`` is True, the model holds
|
|
151
|
+
:attr:`contrail_list` and :attr:`contrail` attributes for viewing intermediate
|
|
152
|
+
artifacts. If ``source`` data is large, these intermediate vectors may consume
|
|
153
|
+
substantial memory.
|
|
154
|
+
|
|
155
|
+
.. versionchanged:: 0.25.0
|
|
156
|
+
|
|
157
|
+
No longer explicitly support :class:`Flight` as a source. Any flight source
|
|
158
|
+
will be viewed as a :class:`GeoVectorDataset`. In order to evaluate CoCiP
|
|
159
|
+
predictions over a flight trajectory, it is best to use the :class:`Cocip`
|
|
160
|
+
model. It's also possible to pre-compute segment azimuth and true airspeed
|
|
161
|
+
before passing the flight trajectory in here.
|
|
162
|
+
|
|
163
|
+
Parameters
|
|
164
|
+
----------
|
|
165
|
+
source : GeoVectorDataset | MetDataset | None
|
|
166
|
+
Input :class:`GeoVectorDataset` or :class:`MetDataset`. If None,
|
|
167
|
+
a ``NotImplementedError`` is raised. If any subclass of :class:`GeoVectorDataset`
|
|
168
|
+
is passed (e.g., :class:`Flight`), the additional structure is forgotten and
|
|
169
|
+
the model is evaluated as if it were a :class:`GeoVectorDataset`.
|
|
170
|
+
Additional variables may be passed as ``source`` data or attrs. These
|
|
171
|
+
include:
|
|
172
|
+
|
|
173
|
+
- ``aircraft_type``: This overrides any value in :attr:`params`. Must be included
|
|
174
|
+
in the source attrs (not data).
|
|
175
|
+
- ``fuel_flow``, ``engine_efficiency``, ``true_airspeed``, ``wingspan``,
|
|
176
|
+
``aircraft_mass``: These override any value in :attr:`params`.
|
|
177
|
+
- ``azimuth``: This overrides any value in :attr:`params`.
|
|
178
|
+
- ``segment_length``: This overrides any value in :attr:`params`.
|
|
179
|
+
**params : Any
|
|
180
|
+
Overwrite model parameters before eval
|
|
181
|
+
|
|
182
|
+
Returns
|
|
183
|
+
-------
|
|
184
|
+
GeoVectorDataset | MetDataset
|
|
185
|
+
CoCiP predictions for each point in ``source``. Output data contains variables
|
|
186
|
+
``contrail_age`` and ``ef_per_m``. Additional variables specified by the model
|
|
187
|
+
:attr:`params` ``verbose_outputs_formation`` are also included.
|
|
188
|
+
|
|
189
|
+
Raises
|
|
190
|
+
------
|
|
191
|
+
NotImplementedError
|
|
192
|
+
If ``source`` is None
|
|
193
|
+
|
|
194
|
+
Notes
|
|
195
|
+
-----
|
|
196
|
+
At a high level, the model is broken down into the following steps:
|
|
197
|
+
- Convert any :class:`MetDataset` ``source`` to :class:`GeoVectorDataset`.
|
|
198
|
+
- Split the ``source`` into chunks of size ``params["target_split_size"]``.
|
|
199
|
+
- For each timestep in :attr:`timesteps`:
|
|
200
|
+
|
|
201
|
+
- Generate any new waypoints from the source data. Calculate aircraft performance
|
|
202
|
+
and run the CoCiP downwash routine over the new waypoints.
|
|
203
|
+
- For each "active" contrail (i.e., a contrail that has been initialized but
|
|
204
|
+
has not yet reach its end of life), evolve the contrail forward one step.
|
|
205
|
+
Filter any waypoint that has reached its end of life.
|
|
206
|
+
|
|
207
|
+
- Aggregate contrail age and energy forcing predictions to a single
|
|
208
|
+
output variable to return.
|
|
209
|
+
"""
|
|
210
|
+
self.update_params(params)
|
|
211
|
+
if source is None:
|
|
212
|
+
# Unclear how to implement this
|
|
213
|
+
# We expect met and rad to contain time slices beyond what is found
|
|
214
|
+
# in the source (we need to evolve contrails forward in time).
|
|
215
|
+
# Perhaps we could use the isel(time=0) slice to construct the source
|
|
216
|
+
# from the met and rad data.
|
|
217
|
+
msg = "CocipGrid.eval() with 'source=None' is not implemented."
|
|
218
|
+
raise NotImplementedError(msg)
|
|
219
|
+
self.set_source(source)
|
|
220
|
+
|
|
221
|
+
self.met, self.rad = _downselect_met(self.source, self.met, self.rad, self.params)
|
|
222
|
+
self.met = cocip.add_tau_cirrus(self.met)
|
|
223
|
+
self._check_met_covers_source()
|
|
224
|
+
|
|
225
|
+
# Save humidity scaling type to output attrs
|
|
226
|
+
humidity_scaling = self.params["humidity_scaling"]
|
|
227
|
+
if humidity_scaling is not None:
|
|
228
|
+
for k, v in humidity_scaling.description.items():
|
|
229
|
+
self.source.attrs[f"humidity_scaling_{k}"] = v
|
|
230
|
+
|
|
231
|
+
self._parse_verbose_outputs()
|
|
232
|
+
|
|
233
|
+
self._set_timesteps()
|
|
234
|
+
pbar = self._init_pbar()
|
|
235
|
+
|
|
236
|
+
met: MetDataset | None = None
|
|
237
|
+
rad: MetDataset | None = None
|
|
238
|
+
|
|
239
|
+
ef_summary: list[VectorDataset] = []
|
|
240
|
+
verbose_dicts: list[dict[str, pd.Series]] = []
|
|
241
|
+
contrail_list: list[GeoVectorDataset] = []
|
|
242
|
+
existing_vectors: Iterator[GeoVectorDataset] = iter(())
|
|
243
|
+
|
|
244
|
+
for time_idx, time_end in enumerate(self.timesteps):
|
|
245
|
+
evolved_this_step = []
|
|
246
|
+
ef_summary_this_step = []
|
|
247
|
+
downwash_vectors_this_step = []
|
|
248
|
+
for vector in self._generate_new_vectors(time_idx):
|
|
249
|
+
t0 = vector["time"].min()
|
|
250
|
+
met, rad = self._maybe_downselect_met_rad(met, rad, t0, time_end)
|
|
251
|
+
downwash, verbose_dict = _run_downwash(vector, met, rad, self.params)
|
|
252
|
+
|
|
253
|
+
if downwash:
|
|
254
|
+
# T_crit_sac is no longer needed. If verbose_outputs_formation is True,
|
|
255
|
+
# it's already storied in the verbose_dict data
|
|
256
|
+
downwash.data.pop("T_crit_sac", None)
|
|
257
|
+
downwash_vectors_this_step.append(downwash)
|
|
258
|
+
if self.params["verbose_outputs_evolution"]:
|
|
259
|
+
contrail_list.append(downwash)
|
|
260
|
+
|
|
261
|
+
if self.params["verbose_outputs_formation"] and verbose_dict:
|
|
262
|
+
verbose_dicts.append(verbose_dict)
|
|
263
|
+
|
|
264
|
+
if pbar is not None:
|
|
265
|
+
pbar.update()
|
|
266
|
+
|
|
267
|
+
for vector in itertools.chain(existing_vectors, downwash_vectors_this_step):
|
|
268
|
+
t0 = vector["time"].min()
|
|
269
|
+
met, rad = self._maybe_downselect_met_rad(met, rad, t0, time_end)
|
|
270
|
+
contrail, ef = _evolve_vector(
|
|
271
|
+
vector,
|
|
272
|
+
met=met,
|
|
273
|
+
rad=rad,
|
|
274
|
+
params=self.params,
|
|
275
|
+
t=time_end,
|
|
276
|
+
)
|
|
277
|
+
if ef:
|
|
278
|
+
evolved_this_step.append(contrail)
|
|
279
|
+
ef_summary_this_step.append(ef)
|
|
280
|
+
if self.params["verbose_outputs_evolution"]:
|
|
281
|
+
contrail_list.append(contrail)
|
|
282
|
+
|
|
283
|
+
if pbar is not None:
|
|
284
|
+
pbar.update()
|
|
285
|
+
|
|
286
|
+
if not evolved_this_step:
|
|
287
|
+
if np.all(time_end > self.source_time):
|
|
288
|
+
break
|
|
289
|
+
continue
|
|
290
|
+
|
|
291
|
+
existing_vectors = combine_vectors(evolved_this_step, self.params["target_split_size"])
|
|
292
|
+
|
|
293
|
+
summary = VectorDataset.sum(ef_summary_this_step)
|
|
294
|
+
if summary:
|
|
295
|
+
ef_summary.append(summary)
|
|
296
|
+
|
|
297
|
+
if pbar is not None:
|
|
298
|
+
logger.debug("Close progress bar")
|
|
299
|
+
pbar.refresh()
|
|
300
|
+
pbar.close()
|
|
301
|
+
|
|
302
|
+
self._attach_verbose_outputs_evolution(contrail_list)
|
|
303
|
+
total_ef_summary = _aggregate_ef_summary(ef_summary)
|
|
304
|
+
return self._bundle_results(total_ef_summary, verbose_dicts)
|
|
305
|
+
|
|
306
|
+
def _maybe_downselect_met_rad(
|
|
307
|
+
self,
|
|
308
|
+
met: MetDataset | None,
|
|
309
|
+
rad: MetDataset | None,
|
|
310
|
+
t0: np.datetime64,
|
|
311
|
+
t1: np.datetime64,
|
|
312
|
+
) -> tuple[MetDataset, MetDataset]:
|
|
313
|
+
"""Downselect ``self.met`` and ``self.rad`` if necessary to cover ``[t0, t1]``.
|
|
314
|
+
|
|
315
|
+
This implementation assumes ``t0 <= t1``, but does not enforce this.
|
|
316
|
+
|
|
317
|
+
If the currently used ``met`` and ``rad`` slices do not include the time
|
|
318
|
+
interval ``[t0, t1]``, new slices are selected from the larger ``self.met``
|
|
319
|
+
and ``self.rad`` data. The slicing only occurs in the time domain.
|
|
320
|
+
|
|
321
|
+
Existing slices from ``met`` and ``rad`` will be used when possible to avoid
|
|
322
|
+
losing and re-loading already-loaded met data.
|
|
323
|
+
|
|
324
|
+
If ``self.params["downselect_met"]`` is True, the :func:`_downselect_met` has
|
|
325
|
+
already performed a spatial downselection of the met data.
|
|
326
|
+
"""
|
|
327
|
+
met = maybe_downselect_mds(self.met, met, t0, t1)
|
|
328
|
+
rad = maybe_downselect_mds(self.rad, rad, t0, t1)
|
|
329
|
+
|
|
330
|
+
return met, rad
|
|
331
|
+
|
|
332
|
+
def _attach_verbose_outputs_evolution(self, contrail_list: list[GeoVectorDataset]) -> None:
|
|
333
|
+
"""Attach intermediate artifacts to the model.
|
|
334
|
+
|
|
335
|
+
This method attaches :attr:`contrail_list` and :attr:`contrail` when
|
|
336
|
+
:attr:`params["verbose_outputs_evolution"]` is True.
|
|
337
|
+
|
|
338
|
+
Mirrors implementation in :class:`Cocip`. We could do additional work here
|
|
339
|
+
if this turns out to be useful.
|
|
340
|
+
"""
|
|
341
|
+
if not self.params["verbose_outputs_evolution"]:
|
|
342
|
+
return
|
|
343
|
+
|
|
344
|
+
self.contrail_list = contrail_list # attach raw data
|
|
345
|
+
|
|
346
|
+
if contrail_list:
|
|
347
|
+
# And the contrail DataFrame (pd.concat is expensive here)
|
|
348
|
+
dfs = [contrail.dataframe for contrail in contrail_list]
|
|
349
|
+
dfs = [df.assign(timestep=t_idx) for t_idx, df in enumerate(dfs)]
|
|
350
|
+
self.contrail = pd.concat(dfs)
|
|
351
|
+
else:
|
|
352
|
+
self.contrail = pd.DataFrame()
|
|
353
|
+
|
|
354
|
+
def _bundle_results(
|
|
355
|
+
self,
|
|
356
|
+
summary: VectorDataset | None,
|
|
357
|
+
verbose_dicts: list[dict[str, pd.Series]],
|
|
358
|
+
) -> GeoVectorDataset | MetDataset:
|
|
359
|
+
"""Gather and massage model outputs for return."""
|
|
360
|
+
max_age = self.params["max_age"]
|
|
361
|
+
dt_integration = self.params["dt_integration"]
|
|
362
|
+
azimuth = self.get_source_param("azimuth")
|
|
363
|
+
segment_length = self.get_source_param("segment_length")
|
|
364
|
+
if segment_length is None:
|
|
365
|
+
segment_length = 1.0
|
|
366
|
+
|
|
367
|
+
# Deal with verbose_dicts
|
|
368
|
+
verbose_dict = _concat_verbose_dicts(
|
|
369
|
+
verbose_dicts, self.source.size, self.params["verbose_outputs_formation"]
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
# Make metadata in attrs more readable
|
|
373
|
+
if max_age.astype("timedelta64[h]") == max_age:
|
|
374
|
+
max_age_str = str(max_age.astype("timedelta64[h]"))
|
|
375
|
+
else:
|
|
376
|
+
max_age_str = str(max_age.astype("timedelta64[m]"))
|
|
377
|
+
if dt_integration.astype("timedelta64[m]") == dt_integration:
|
|
378
|
+
dt_integration_str = str(dt_integration.astype("timedelta64[m]"))
|
|
379
|
+
else:
|
|
380
|
+
dt_integration_str = str(dt_integration.astype("timedelta64[s]"))
|
|
381
|
+
|
|
382
|
+
self.transfer_met_source_attrs()
|
|
383
|
+
attrs: dict[str, Any] = {
|
|
384
|
+
"description": self.long_name,
|
|
385
|
+
"max_age": max_age_str,
|
|
386
|
+
"dt_integration": dt_integration_str,
|
|
387
|
+
"aircraft_type": self.get_source_param("aircraft_type"),
|
|
388
|
+
"pycontrails_version": pycontrails.__version__,
|
|
389
|
+
**self.source.attrs,
|
|
390
|
+
}
|
|
391
|
+
if ap_model := self.params["aircraft_performance"]:
|
|
392
|
+
attrs["ap_model"] = type(ap_model).__name__
|
|
393
|
+
|
|
394
|
+
if isinstance(azimuth, np.floating | np.integer):
|
|
395
|
+
attrs["azimuth"] = azimuth.item()
|
|
396
|
+
elif isinstance(azimuth, float | int):
|
|
397
|
+
attrs["azimuth"] = azimuth
|
|
398
|
+
|
|
399
|
+
if isinstance(self.source, MetDataset):
|
|
400
|
+
self.source = result_to_metdataset(
|
|
401
|
+
result=summary,
|
|
402
|
+
verbose_dict=verbose_dict,
|
|
403
|
+
source=self.source,
|
|
404
|
+
nominal_segment_length=segment_length,
|
|
405
|
+
attrs=attrs,
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
if self.params["compute_atr20"]:
|
|
409
|
+
self.source["global_yearly_mean_rf_per_m"] = (
|
|
410
|
+
self.source["ef_per_m"].data
|
|
411
|
+
/ constants.surface_area_earth
|
|
412
|
+
/ constants.seconds_per_year
|
|
413
|
+
)
|
|
414
|
+
self.source["atr20_per_m"] = (
|
|
415
|
+
self.params["global_rf_to_atr20_factor"]
|
|
416
|
+
* self.source["global_yearly_mean_rf_per_m"].data
|
|
417
|
+
)
|
|
418
|
+
else:
|
|
419
|
+
self.source = result_merge_source(
|
|
420
|
+
result=summary,
|
|
421
|
+
verbose_dict=verbose_dict,
|
|
422
|
+
source=self.source,
|
|
423
|
+
nominal_segment_length=segment_length,
|
|
424
|
+
attrs=attrs,
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
if self.params["compute_atr20"]:
|
|
428
|
+
self.source["global_yearly_mean_rf_per_m"] = (
|
|
429
|
+
self.source["ef_per_m"]
|
|
430
|
+
/ constants.surface_area_earth
|
|
431
|
+
/ constants.seconds_per_year
|
|
432
|
+
)
|
|
433
|
+
self.source["atr20_per_m"] = (
|
|
434
|
+
self.params["global_rf_to_atr20_factor"]
|
|
435
|
+
* self.source["global_yearly_mean_rf_per_m"]
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
return self.source
|
|
439
|
+
|
|
440
|
+
# ---------------------------
|
|
441
|
+
# Common Methods & Properties
|
|
442
|
+
# ---------------------------
|
|
443
|
+
|
|
444
|
+
@property
|
|
445
|
+
def source_time(self) -> npt.NDArray[np.datetime64]:
|
|
446
|
+
"""Return the time array of the :attr:`source` data."""
|
|
447
|
+
try:
|
|
448
|
+
source = self.source
|
|
449
|
+
except AttributeError as exc:
|
|
450
|
+
msg = "No source set"
|
|
451
|
+
raise AttributeError(msg) from exc
|
|
452
|
+
|
|
453
|
+
if isinstance(source, GeoVectorDataset):
|
|
454
|
+
return source["time"]
|
|
455
|
+
if isinstance(source, MetDataset):
|
|
456
|
+
return source.indexes["time"].values
|
|
457
|
+
|
|
458
|
+
msg = f"Cannot calculate timesteps for {source}"
|
|
459
|
+
raise TypeError(msg)
|
|
460
|
+
|
|
461
|
+
def _set_timesteps(self) -> None:
|
|
462
|
+
"""Set the :attr:`timesteps` based on the ``source`` time range."""
|
|
463
|
+
source_time = self.source_time
|
|
464
|
+
tmin = source_time.min()
|
|
465
|
+
tmax = source_time.max()
|
|
466
|
+
|
|
467
|
+
tmin = pd.to_datetime(tmin)
|
|
468
|
+
tmax = pd.to_datetime(tmax)
|
|
469
|
+
dt = pd.to_timedelta(self.params["dt_integration"])
|
|
470
|
+
|
|
471
|
+
t_start = tmin.ceil(dt)
|
|
472
|
+
t_end = tmax.floor(dt) + self.params["max_age"] + dt
|
|
473
|
+
|
|
474
|
+
# Pass in t_end (as opposed to tmax) to ensure that the met and rad data
|
|
475
|
+
# cover the entire evolution period.
|
|
476
|
+
_check_met_rad_time(self.met, self.rad, tmin, t_end)
|
|
477
|
+
|
|
478
|
+
self.timesteps = np.arange(t_start, t_end, dt)
|
|
479
|
+
|
|
480
|
+
def _init_pbar(self) -> tqdm.tqdm | None:
|
|
481
|
+
"""Initialize a progress bar for model evaluation.
|
|
482
|
+
|
|
483
|
+
The total number of steps is estimated in a very crude way. Do not
|
|
484
|
+
rely on the progress bar for accurate estimates of runtime.
|
|
485
|
+
|
|
486
|
+
Returns
|
|
487
|
+
-------
|
|
488
|
+
tqdm.tqdm | None
|
|
489
|
+
A progress bar for model evaluation. If ``show_progress`` is False, returns None.
|
|
490
|
+
"""
|
|
491
|
+
|
|
492
|
+
if not self.params["show_progress"]:
|
|
493
|
+
return None
|
|
494
|
+
|
|
495
|
+
try:
|
|
496
|
+
from tqdm.auto import tqdm
|
|
497
|
+
except ModuleNotFoundError as exc:
|
|
498
|
+
dependencies.raise_module_not_found_error(
|
|
499
|
+
name="CocipGrid._init_pbar method",
|
|
500
|
+
package_name="tqdm",
|
|
501
|
+
module_not_found_error=exc,
|
|
502
|
+
extra="Alternatively, set model parameter 'show_progress=False'.",
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
split_size = self.params["target_split_size"]
|
|
506
|
+
if isinstance(self.source, MetDataset):
|
|
507
|
+
n_splits_by_time = self._metdataset_source_n_splits()
|
|
508
|
+
n_splits = len(self.source_time) * n_splits_by_time
|
|
509
|
+
else:
|
|
510
|
+
tmp1 = self.source_time[:, None] < self.timesteps[1:]
|
|
511
|
+
tmp2 = self.source_time[:, None] >= self.timesteps[:-1]
|
|
512
|
+
n_points_by_timestep = np.sum(tmp1 & tmp2, axis=0)
|
|
513
|
+
|
|
514
|
+
init_split_size = self.params["target_split_size_pre_SAC_boost"] * split_size
|
|
515
|
+
n_splits_by_time = np.ceil(n_points_by_timestep / init_split_size)
|
|
516
|
+
n_splits = np.sum(n_splits_by_time)
|
|
517
|
+
|
|
518
|
+
n_init_surv = 0.1 * n_splits # assume 10% of points survive the downwash
|
|
519
|
+
n_evo_steps = len(self.timesteps) * n_init_surv
|
|
520
|
+
total = n_splits + n_evo_steps
|
|
521
|
+
|
|
522
|
+
return tqdm(total=int(total), desc=f"{type(self).__name__} eval")
|
|
523
|
+
|
|
524
|
+
def _metdataset_source_n_splits(self) -> int:
|
|
525
|
+
"""Compute the number of splits at a given time for a :class:`MetDataset` source.
|
|
526
|
+
|
|
527
|
+
This method assumes :attr:`source` is a :class:`MetDataset`.
|
|
528
|
+
|
|
529
|
+
Returns
|
|
530
|
+
-------
|
|
531
|
+
int
|
|
532
|
+
The number of splits.
|
|
533
|
+
"""
|
|
534
|
+
if not isinstance(self.source, MetDataset):
|
|
535
|
+
msg = f"Expected source to be a MetDataset, found {type(self.source)}"
|
|
536
|
+
raise TypeError(msg)
|
|
537
|
+
|
|
538
|
+
indexes = self.source.indexes
|
|
539
|
+
grid_size = indexes["longitude"].size * indexes["latitude"].size * indexes["level"].size
|
|
540
|
+
|
|
541
|
+
split_size = int(
|
|
542
|
+
self.params["target_split_size_pre_SAC_boost"] * self.params["target_split_size"]
|
|
543
|
+
)
|
|
544
|
+
return max(grid_size // split_size, 1)
|
|
545
|
+
|
|
546
|
+
def _parse_verbose_outputs(self) -> None:
|
|
547
|
+
"""Confirm param "verbose_outputs" has the expected type for grid and path mode.
|
|
548
|
+
|
|
549
|
+
This function mutates the "verbose_outputs" field on :attr:`params`.
|
|
550
|
+
|
|
551
|
+
Currently, the list of all supported variables for verbose outputs
|
|
552
|
+
is determine by :func:`_supported_verbose_outputs`.
|
|
553
|
+
"""
|
|
554
|
+
if self.params["verbose_outputs"]:
|
|
555
|
+
msg = (
|
|
556
|
+
"Parameter 'verbose_outputs' is no longer supported for grid mode. "
|
|
557
|
+
"Instead, use 'verbose_outputs_formation' and 'verbose_outputs_evolution'."
|
|
558
|
+
)
|
|
559
|
+
raise ValueError(msg)
|
|
560
|
+
vo = self.params["verbose_outputs_formation"]
|
|
561
|
+
supported = _supported_verbose_outputs_formation()
|
|
562
|
+
|
|
563
|
+
# Parse to set of strings
|
|
564
|
+
if isinstance(vo, str):
|
|
565
|
+
vo = {vo}
|
|
566
|
+
elif isinstance(vo, bool):
|
|
567
|
+
vo = supported if vo else set()
|
|
568
|
+
else:
|
|
569
|
+
vo = set(vo)
|
|
570
|
+
|
|
571
|
+
unknown_vars = vo - supported
|
|
572
|
+
if unknown_vars:
|
|
573
|
+
warnings.warn(
|
|
574
|
+
f"Unknown variables in 'verbose_outputs': {unknown_vars}. "
|
|
575
|
+
"Presently, CocipGrid only supports verbose outputs for "
|
|
576
|
+
f"variables {supported}. The unknown variables will be ignored."
|
|
577
|
+
)
|
|
578
|
+
self.params["verbose_outputs_formation"] = vo & supported
|
|
579
|
+
|
|
580
|
+
def _generate_new_vectors(self, time_idx: int) -> Generator[GeoVectorDataset, None, None]:
|
|
581
|
+
"""Generate :class:`GeoVectorDataset` instances from :attr:`source`.
|
|
582
|
+
|
|
583
|
+
Parameters
|
|
584
|
+
----------
|
|
585
|
+
time_idx : int
|
|
586
|
+
The index of the current time slice in :attr:`timesteps`.
|
|
587
|
+
|
|
588
|
+
Yields
|
|
589
|
+
------
|
|
590
|
+
GeoVectorDataset
|
|
591
|
+
Unevolved vectors arising from :attr`self.source_time` filtered by ``filt``.
|
|
592
|
+
When :attr:`source` is a :class:`MetDataset`, each yielded dataset has a
|
|
593
|
+
constant time value.
|
|
594
|
+
"""
|
|
595
|
+
if "index" in self.source:
|
|
596
|
+
# FIXME: We can simply change the internal variable to __index
|
|
597
|
+
msg = "The variable 'index' is used internally. Found in source."
|
|
598
|
+
raise RuntimeError(msg)
|
|
599
|
+
|
|
600
|
+
source_time = self.source_time
|
|
601
|
+
t_cur = self.timesteps[time_idx]
|
|
602
|
+
if time_idx == 0:
|
|
603
|
+
filt = source_time < t_cur
|
|
604
|
+
else:
|
|
605
|
+
t_prev = self.timesteps[time_idx - 1]
|
|
606
|
+
filt = (source_time >= t_prev) & (source_time < t_cur)
|
|
607
|
+
|
|
608
|
+
if not filt.any():
|
|
609
|
+
return
|
|
610
|
+
|
|
611
|
+
if isinstance(self.source, MetDataset):
|
|
612
|
+
times_in_filt = source_time[filt]
|
|
613
|
+
filt_start_idx = np.argmax(filt).item() # needed to ensure globally unique indexes
|
|
614
|
+
|
|
615
|
+
n_splits = self._metdataset_source_n_splits()
|
|
616
|
+
for idx, time in enumerate(times_in_filt):
|
|
617
|
+
# For now, sticking with the convention that every vector should
|
|
618
|
+
# have a constant time value.
|
|
619
|
+
source_slice = MetDataset._from_fastpath(self.source.data.sel(time=[time]))
|
|
620
|
+
|
|
621
|
+
# Convert the 4D grid to a vector
|
|
622
|
+
vector = source_slice.to_vector()
|
|
623
|
+
vector.update(
|
|
624
|
+
longitude=vector["longitude"].astype(self._target_dtype, copy=False),
|
|
625
|
+
latitude=vector["latitude"].astype(self._target_dtype, copy=False),
|
|
626
|
+
level=vector["level"].astype(self._target_dtype, copy=False),
|
|
627
|
+
)
|
|
628
|
+
vector["index"] = source_time.size * np.arange(vector.size) + filt_start_idx + idx
|
|
629
|
+
|
|
630
|
+
# Split into chunks
|
|
631
|
+
for subvector in vector.generate_splits(n_splits):
|
|
632
|
+
subvector = self._build_subvector(subvector)
|
|
633
|
+
logger.debug(
|
|
634
|
+
"Yield new vector at time %s with size %s",
|
|
635
|
+
time.astype("datetime64[m]"),
|
|
636
|
+
subvector.size,
|
|
637
|
+
)
|
|
638
|
+
yield subvector
|
|
639
|
+
|
|
640
|
+
elif isinstance(self.source, GeoVectorDataset):
|
|
641
|
+
split_size = (
|
|
642
|
+
self.params["target_split_size_pre_SAC_boost"] * self.params["target_split_size"]
|
|
643
|
+
)
|
|
644
|
+
n_splits = max(filt.sum() // split_size, 1)
|
|
645
|
+
# Don't copy here ... we copy when we call `generate_splits`
|
|
646
|
+
vector = self.source.filter(filt, copy=False)
|
|
647
|
+
if vector:
|
|
648
|
+
vector["index"] = np.flatnonzero(filt)
|
|
649
|
+
|
|
650
|
+
# Split into chunks
|
|
651
|
+
for subvector in vector.generate_splits(n_splits, copy=True):
|
|
652
|
+
subvector = self._build_subvector(subvector)
|
|
653
|
+
logger.debug("Yield new vector with size %s", subvector.size)
|
|
654
|
+
yield subvector
|
|
655
|
+
|
|
656
|
+
else:
|
|
657
|
+
msg = f"Unknown source {self.source}"
|
|
658
|
+
raise TypeError(msg)
|
|
659
|
+
|
|
660
|
+
def _build_subvector(self, vector: GeoVectorDataset) -> GeoVectorDataset:
|
|
661
|
+
"""Mutate `vector` by adding additional keys."""
|
|
662
|
+
# Add time
|
|
663
|
+
vector["formation_time"] = vector["time"]
|
|
664
|
+
vector["age"] = np.full(vector.size, np.timedelta64(0, "ns"))
|
|
665
|
+
|
|
666
|
+
# Precompute
|
|
667
|
+
vector["air_pressure"] = vector.air_pressure
|
|
668
|
+
vector["altitude"] = vector.altitude
|
|
669
|
+
|
|
670
|
+
# Add nominals -- it's a little strange that segment_length
|
|
671
|
+
# is also a nominal
|
|
672
|
+
for key in _nominal_params():
|
|
673
|
+
_setdefault_from_params(key, vector, self.params)
|
|
674
|
+
|
|
675
|
+
segment_length = self._get_source_param_override("segment_length", vector)
|
|
676
|
+
azimuth = self._get_source_param_override("azimuth", vector)
|
|
677
|
+
|
|
678
|
+
# Experimental segment-free mode logic
|
|
679
|
+
if azimuth is None and segment_length is None:
|
|
680
|
+
return vector
|
|
681
|
+
if azimuth is None:
|
|
682
|
+
msg = "Set 'segment_length' to None for experimental segment-free model"
|
|
683
|
+
raise ValueError(msg)
|
|
684
|
+
if segment_length is None:
|
|
685
|
+
msg = "Set 'azimuth' to None for experimental segment-free model"
|
|
686
|
+
raise ValueError(msg)
|
|
687
|
+
if self.params["dsn_dz_factor"]:
|
|
688
|
+
msg = "'dsn_dz_factor' not supported outside of the segment-free mode"
|
|
689
|
+
raise ValueError(msg)
|
|
690
|
+
|
|
691
|
+
lons = vector["longitude"]
|
|
692
|
+
lats = vector["latitude"]
|
|
693
|
+
dist = segment_length / 2.0
|
|
694
|
+
|
|
695
|
+
# These should probably not be included in model input ... so
|
|
696
|
+
# we'll get a warning if they get overwritten
|
|
697
|
+
lon_head, lat_head = geo.forward_azimuth(lons=lons, lats=lats, az=azimuth, dist=dist)
|
|
698
|
+
vector["longitude_head"] = lon_head.astype(self._target_dtype, copy=False)
|
|
699
|
+
vector["latitude_head"] = lat_head.astype(self._target_dtype, copy=False)
|
|
700
|
+
|
|
701
|
+
lon_tail, lat_tail = geo.forward_azimuth(lons=lons, lats=lats, az=azimuth, dist=-dist)
|
|
702
|
+
vector["longitude_tail"] = lon_tail.astype(self._target_dtype, copy=False)
|
|
703
|
+
vector["latitude_tail"] = lat_tail.astype(self._target_dtype, copy=False)
|
|
704
|
+
|
|
705
|
+
return vector
|
|
706
|
+
|
|
707
|
+
def _check_met_covers_source(self) -> None:
|
|
708
|
+
"""Ensure that the met and rad data cover the source data.
|
|
709
|
+
|
|
710
|
+
See also :func:`_check_met_rad_time` which checks the time coverage
|
|
711
|
+
in more detail.
|
|
712
|
+
"""
|
|
713
|
+
try:
|
|
714
|
+
source = self.source
|
|
715
|
+
except AttributeError as exc:
|
|
716
|
+
msg = "No source set"
|
|
717
|
+
raise AttributeError(msg) from exc
|
|
718
|
+
|
|
719
|
+
if isinstance(source, MetDataset):
|
|
720
|
+
indexes = source.indexes
|
|
721
|
+
longitude = indexes["longitude"].to_numpy()
|
|
722
|
+
latitude = indexes["latitude"].to_numpy()
|
|
723
|
+
level = indexes["level"].to_numpy()
|
|
724
|
+
time = indexes["time"].to_numpy()
|
|
725
|
+
else:
|
|
726
|
+
longitude = source["longitude"]
|
|
727
|
+
latitude = source["latitude"]
|
|
728
|
+
level = source.level
|
|
729
|
+
time = source["time"]
|
|
730
|
+
|
|
731
|
+
indexes = self.met.indexes
|
|
732
|
+
_check_coverage(indexes["longitude"].to_numpy(), longitude, "longitude", "met")
|
|
733
|
+
_check_coverage(indexes["latitude"].to_numpy(), latitude, "latitude", "met")
|
|
734
|
+
_check_coverage(indexes["level"].to_numpy(), level, "level", "met")
|
|
735
|
+
_check_coverage(indexes["time"].to_numpy(), time, "time", "met")
|
|
736
|
+
|
|
737
|
+
indexes = self.rad.indexes
|
|
738
|
+
_check_coverage(indexes["longitude"].to_numpy(), longitude, "longitude", "rad")
|
|
739
|
+
_check_coverage(indexes["latitude"].to_numpy(), latitude, "latitude", "rad")
|
|
740
|
+
_check_coverage(indexes["time"].to_numpy(), time, "time", "rad")
|
|
741
|
+
|
|
742
|
+
_warn_not_wrap(self.met)
|
|
743
|
+
_warn_not_wrap(self.rad)
|
|
744
|
+
|
|
745
|
+
def _get_source_param_override(self, key: str, vector: GeoVectorDataset) -> Any:
|
|
746
|
+
return _get_source_param_override(key, vector, self.params)
|
|
747
|
+
|
|
748
|
+
# ------------
|
|
749
|
+
# Constructors
|
|
750
|
+
# ------------
|
|
751
|
+
|
|
752
|
+
@staticmethod
|
|
753
|
+
def create_source(
|
|
754
|
+
level: npt.NDArray[np.floating] | list[float] | float,
|
|
755
|
+
time: npt.NDArray[np.datetime64] | list[np.datetime64] | np.datetime64,
|
|
756
|
+
longitude: npt.NDArray[np.floating] | list[float] | None = None,
|
|
757
|
+
latitude: npt.NDArray[np.floating] | list[float] | None = None,
|
|
758
|
+
lon_step: float = 1.0,
|
|
759
|
+
lat_step: float = 1.0,
|
|
760
|
+
) -> MetDataset:
|
|
761
|
+
"""
|
|
762
|
+
Shortcut to create a :class:`MetDataset` source from coordinate arrays.
|
|
763
|
+
|
|
764
|
+
.. versionchanged:: 0.54.3
|
|
765
|
+
By default, the returned latitude values now extend to the poles.
|
|
766
|
+
|
|
767
|
+
Parameters
|
|
768
|
+
----------
|
|
769
|
+
level : npt.NDArray[np.floating] | list[float] | float
|
|
770
|
+
Pressure levels for gridded cocip.
|
|
771
|
+
To avoid interpolating outside of the passed ``met`` and ``rad`` data, this
|
|
772
|
+
parameter should avoid the extreme values of the ``met`` and `rad` levels.
|
|
773
|
+
If ``met`` is already defined, a good choice for ``level`` is
|
|
774
|
+
``met.data['level'].values[1: -1]``.
|
|
775
|
+
time : npt.NDArray[np.datetime64] | list[np.datetime64] | np.datetime64
|
|
776
|
+
One or more time values for gridded cocip.
|
|
777
|
+
longitude : npt.NDArray[np.floating] | list[float] | None, optional
|
|
778
|
+
Longitude array, by default None. If not specified, values of
|
|
779
|
+
``lon_step`` and ``lat_step`` are used to define ``longitude`` and ``latitude``.
|
|
780
|
+
latitude : npt.NDArray[np.floating] | list[float] | None, optional
|
|
781
|
+
Latitude array, by default None. If not specified, values of
|
|
782
|
+
``lon_step`` and ``lat_step`` are used to define ``longitude`` and ``latitude``.
|
|
783
|
+
lon_step : float, optional
|
|
784
|
+
Longitude resolution, by default 1.0.
|
|
785
|
+
Only used if parameter ``longitude`` not specified.
|
|
786
|
+
lat_step : float, optional
|
|
787
|
+
Latitude resolution, by default 1.0.
|
|
788
|
+
Only used if parameter ``latitude`` not specified.
|
|
789
|
+
|
|
790
|
+
Returns
|
|
791
|
+
-------
|
|
792
|
+
MetDataset
|
|
793
|
+
MetDataset that can be used as ``source`` input to :meth:`CocipGrid.eval(source=...)`
|
|
794
|
+
|
|
795
|
+
See Also
|
|
796
|
+
--------
|
|
797
|
+
:meth:`MetDataset.from_coords`
|
|
798
|
+
"""
|
|
799
|
+
if longitude is None:
|
|
800
|
+
longitude = np.arange(-180, 180, lon_step, dtype=float)
|
|
801
|
+
if latitude is None:
|
|
802
|
+
latitude = np.arange(-90, 90.000001, lat_step, dtype=float)
|
|
803
|
+
|
|
804
|
+
return MetDataset.from_coords(
|
|
805
|
+
longitude=longitude, latitude=latitude, level=level, time=time
|
|
806
|
+
)
|
|
807
|
+
|
|
808
|
+
|
|
809
|
+
################################
|
|
810
|
+
# Functions used by CocipGrid
|
|
811
|
+
################################
|
|
812
|
+
|
|
813
|
+
|
|
814
|
+
def _get_source_param_override(key: str, vector: GeoVectorDataset, params: dict[str, Any]) -> Any:
|
|
815
|
+
"""Mimic logic in :meth:`Models.get_source_param` replacing :attr:`source` with a ``vector``."""
|
|
816
|
+
try:
|
|
817
|
+
return vector[key]
|
|
818
|
+
except KeyError:
|
|
819
|
+
pass
|
|
820
|
+
|
|
821
|
+
try:
|
|
822
|
+
return vector.attrs[key]
|
|
823
|
+
except KeyError:
|
|
824
|
+
pass
|
|
825
|
+
|
|
826
|
+
val = params[key]
|
|
827
|
+
vector.attrs[key] = val
|
|
828
|
+
return val
|
|
829
|
+
|
|
830
|
+
|
|
831
|
+
def _setdefault_from_params(key: str, vector: GeoVectorDataset, params: dict[str, Any]) -> None:
|
|
832
|
+
"""Set a parameter on ``vector`` if it is not already set.
|
|
833
|
+
|
|
834
|
+
This method only sets "scalar" values.
|
|
835
|
+
If ``params[key]`` is None, the parameter is not set.
|
|
836
|
+
If ``params[key]`` is not a scalar, a TypeError is raised.
|
|
837
|
+
"""
|
|
838
|
+
|
|
839
|
+
if key in vector:
|
|
840
|
+
return
|
|
841
|
+
if key in vector.attrs:
|
|
842
|
+
return
|
|
843
|
+
|
|
844
|
+
scalar = params[key]
|
|
845
|
+
if scalar is None:
|
|
846
|
+
return
|
|
847
|
+
|
|
848
|
+
if not isinstance(scalar, int | float):
|
|
849
|
+
msg = (
|
|
850
|
+
f"Parameter {key} must be a scalar. For non-scalar values, directly "
|
|
851
|
+
"set the data on the 'source'."
|
|
852
|
+
)
|
|
853
|
+
raise TypeError(msg)
|
|
854
|
+
vector.attrs[key] = float(scalar)
|
|
855
|
+
|
|
856
|
+
|
|
857
|
+
def _nominal_params() -> Iterable[str]:
|
|
858
|
+
"""Return fields from :class:`CocipGridParams` that override values computed by the model.
|
|
859
|
+
|
|
860
|
+
Each of the fields returned by this method is included in :class:`CocipGridParams`
|
|
861
|
+
with a default value of None. When a non-None scalar value is provided for one of
|
|
862
|
+
these fields and the :attr:`source` data does not provide a value, the scalar value
|
|
863
|
+
is used (and broadcast over :attr:`source`) instead of running the AP or Emissions models.
|
|
864
|
+
|
|
865
|
+
If non-scalar values are desired, they should be provided directly on
|
|
866
|
+
:attr:`source` instead.
|
|
867
|
+
|
|
868
|
+
Returns
|
|
869
|
+
-------
|
|
870
|
+
Iterable[str]
|
|
871
|
+
"""
|
|
872
|
+
return (
|
|
873
|
+
"wingspan",
|
|
874
|
+
"aircraft_mass",
|
|
875
|
+
"true_airspeed",
|
|
876
|
+
"engine_efficiency",
|
|
877
|
+
"fuel_flow",
|
|
878
|
+
)
|
|
879
|
+
|
|
880
|
+
|
|
881
|
+
def run_interpolators(
|
|
882
|
+
vector: GeoVectorDataset,
|
|
883
|
+
met: MetDataset,
|
|
884
|
+
rad: MetDataset | None = None,
|
|
885
|
+
*,
|
|
886
|
+
dz_m: float | None = None,
|
|
887
|
+
humidity_scaling: humidity_scaling.HumidityScaling | None = None,
|
|
888
|
+
keys: Sequence[str] | None = None,
|
|
889
|
+
**interp_kwargs: Any,
|
|
890
|
+
) -> GeoVectorDataset:
|
|
891
|
+
"""Run interpolators over ``vector``.
|
|
892
|
+
|
|
893
|
+
Intersect ``vector`` with DataArrays in met and rad needed for CoCiP. In addition, calculate
|
|
894
|
+
three "lower level" intersections in which the level of the ``vector`` data is decreased
|
|
895
|
+
according to the "dz_m" key in ``params``.
|
|
896
|
+
|
|
897
|
+
Modifies ``vector`` in place and returns it.
|
|
898
|
+
|
|
899
|
+
This function avoids overwriting existing variables on ``vector``.
|
|
900
|
+
|
|
901
|
+
Aim to confine all interpolation to this function
|
|
902
|
+
|
|
903
|
+
Parameters
|
|
904
|
+
----------
|
|
905
|
+
vector : GeoVectorDataset
|
|
906
|
+
Grid points.
|
|
907
|
+
met : MetDataset
|
|
908
|
+
CoCiP met slices. See :class:`CocipGrid`.
|
|
909
|
+
rad : MetDataset | None, optional
|
|
910
|
+
CoCiP rad slices. If ``keys`` is not None, this parameter must be None.
|
|
911
|
+
dz_m : float | None, optional
|
|
912
|
+
Difference in altitude between top and bottom layer for stratification calculations (m).
|
|
913
|
+
Must be specified if ``keys`` is None.
|
|
914
|
+
humidity_scaling : humidity_scaling.HumidityScaling | None, optional
|
|
915
|
+
Specific humidity scaling scheme. Must be specified if ``keys`` is None.
|
|
916
|
+
keys : Sequence[str] | None, optional
|
|
917
|
+
Only run interpolators for select keys from ``met``
|
|
918
|
+
**interp_kwargs : Any
|
|
919
|
+
Interpolation keyword arguments
|
|
920
|
+
|
|
921
|
+
Returns
|
|
922
|
+
-------
|
|
923
|
+
GeoVectorDataset
|
|
924
|
+
Parameter ``vector`` with interpolated variables
|
|
925
|
+
|
|
926
|
+
Raises
|
|
927
|
+
------
|
|
928
|
+
TypeError
|
|
929
|
+
If a required parameter is None
|
|
930
|
+
ValueError
|
|
931
|
+
If parameters `keys` and `rad` are both defined
|
|
932
|
+
"""
|
|
933
|
+
# Avoid scaling specific humidity twice
|
|
934
|
+
humidity_interpolated = "specific_humidity" not in vector
|
|
935
|
+
|
|
936
|
+
if keys:
|
|
937
|
+
if rad is not None:
|
|
938
|
+
msg = "The 'keys' override only valid for 'met' input"
|
|
939
|
+
raise ValueError(msg)
|
|
940
|
+
|
|
941
|
+
for met_key in keys:
|
|
942
|
+
# NOTE: Changed in v0.43: no longer overwrites existing variables
|
|
943
|
+
models.interpolate_met(met, vector, met_key, **interp_kwargs)
|
|
944
|
+
|
|
945
|
+
return _apply_humidity_scaling(vector, humidity_scaling, humidity_interpolated)
|
|
946
|
+
|
|
947
|
+
if dz_m is None:
|
|
948
|
+
msg = "Specify 'dz_m'."
|
|
949
|
+
raise TypeError(msg)
|
|
950
|
+
if rad is None:
|
|
951
|
+
msg = "Specify 'rad'."
|
|
952
|
+
raise TypeError(msg)
|
|
953
|
+
|
|
954
|
+
# Interpolation at usual level
|
|
955
|
+
# Excluded keys are not needed -- only used to initially compute tau_cirrus
|
|
956
|
+
excluded = {
|
|
957
|
+
"specific_cloud_ice_water_content",
|
|
958
|
+
"ice_water_mixing_ratio",
|
|
959
|
+
"geopotential",
|
|
960
|
+
"geopotential_height",
|
|
961
|
+
}
|
|
962
|
+
for met_key in met:
|
|
963
|
+
if met_key in excluded:
|
|
964
|
+
continue
|
|
965
|
+
models.interpolate_met(met, vector, met_key, **interp_kwargs)
|
|
966
|
+
|
|
967
|
+
# calculate radiative properties
|
|
968
|
+
cocip.calc_shortwave_radiation(rad, vector, **interp_kwargs)
|
|
969
|
+
cocip.calc_outgoing_longwave_radiation(rad, vector, **interp_kwargs)
|
|
970
|
+
|
|
971
|
+
# Interpolation at lower level
|
|
972
|
+
air_temperature = vector["air_temperature"]
|
|
973
|
+
air_pressure = vector.air_pressure
|
|
974
|
+
air_pressure_lower = thermo.pressure_dz(air_temperature, air_pressure, dz_m)
|
|
975
|
+
lower_level = air_pressure_lower / 100.0
|
|
976
|
+
|
|
977
|
+
# Advect at lower_level
|
|
978
|
+
for met_key in ("air_temperature", "eastward_wind", "northward_wind"):
|
|
979
|
+
vector_key = f"{met_key}_lower"
|
|
980
|
+
models.interpolate_met(
|
|
981
|
+
met,
|
|
982
|
+
vector,
|
|
983
|
+
met_key,
|
|
984
|
+
vector_key,
|
|
985
|
+
**interp_kwargs,
|
|
986
|
+
level=lower_level,
|
|
987
|
+
)
|
|
988
|
+
|
|
989
|
+
# Experimental segment-free model
|
|
990
|
+
if _is_segment_free_mode(vector):
|
|
991
|
+
return _apply_humidity_scaling(vector, humidity_scaling, humidity_interpolated)
|
|
992
|
+
|
|
993
|
+
longitude_head = vector["longitude_head"]
|
|
994
|
+
latitude_head = vector["latitude_head"]
|
|
995
|
+
longitude_tail = vector["longitude_tail"]
|
|
996
|
+
latitude_tail = vector["latitude_tail"]
|
|
997
|
+
|
|
998
|
+
# Advect at head and tail
|
|
999
|
+
# NOTE: Not using head_tail_dt here to offset time. We could do this for slightly
|
|
1000
|
+
# more accurate interpolation, but we would have to load an additional met time
|
|
1001
|
+
# slice at t_{-1}. After t_0, the head_tail_dt offset is not used.
|
|
1002
|
+
for met_key in ("eastward_wind", "northward_wind"):
|
|
1003
|
+
vector_key = f"{met_key}_head"
|
|
1004
|
+
models.interpolate_met(
|
|
1005
|
+
met,
|
|
1006
|
+
vector,
|
|
1007
|
+
met_key,
|
|
1008
|
+
vector_key,
|
|
1009
|
+
**interp_kwargs,
|
|
1010
|
+
longitude=longitude_head,
|
|
1011
|
+
latitude=latitude_head,
|
|
1012
|
+
)
|
|
1013
|
+
|
|
1014
|
+
vector_key = f"{met_key}_tail"
|
|
1015
|
+
models.interpolate_met(
|
|
1016
|
+
met,
|
|
1017
|
+
vector,
|
|
1018
|
+
met_key,
|
|
1019
|
+
vector_key,
|
|
1020
|
+
**interp_kwargs,
|
|
1021
|
+
longitude=longitude_tail,
|
|
1022
|
+
latitude=latitude_tail,
|
|
1023
|
+
)
|
|
1024
|
+
|
|
1025
|
+
return _apply_humidity_scaling(vector, humidity_scaling, humidity_interpolated)
|
|
1026
|
+
|
|
1027
|
+
|
|
1028
|
+
def _apply_humidity_scaling(
|
|
1029
|
+
vector: GeoVectorDataset,
|
|
1030
|
+
humidity_scaling: humidity_scaling.HumidityScaling | None,
|
|
1031
|
+
humidity_interpolated: bool,
|
|
1032
|
+
) -> GeoVectorDataset:
|
|
1033
|
+
"""Scale specific humidity if it has been added by interpolator.
|
|
1034
|
+
|
|
1035
|
+
Assumes that air_temperature and pressure are available on ``vector``.
|
|
1036
|
+
"""
|
|
1037
|
+
if "specific_humidity" not in vector:
|
|
1038
|
+
return vector
|
|
1039
|
+
|
|
1040
|
+
if humidity_scaling is not None and humidity_interpolated:
|
|
1041
|
+
humidity_scaling.eval(vector, copy_source=False)
|
|
1042
|
+
return vector
|
|
1043
|
+
|
|
1044
|
+
if "rhi" in vector:
|
|
1045
|
+
return vector
|
|
1046
|
+
|
|
1047
|
+
vector["rhi"] = thermo.rhi(
|
|
1048
|
+
vector["specific_humidity"], vector["air_temperature"], vector.air_pressure
|
|
1049
|
+
)
|
|
1050
|
+
|
|
1051
|
+
return vector
|
|
1052
|
+
|
|
1053
|
+
|
|
1054
|
+
def _evolve_vector(
|
|
1055
|
+
vector: GeoVectorDataset,
|
|
1056
|
+
*,
|
|
1057
|
+
met: MetDataset,
|
|
1058
|
+
rad: MetDataset,
|
|
1059
|
+
params: dict[str, Any],
|
|
1060
|
+
t: np.datetime64,
|
|
1061
|
+
) -> tuple[GeoVectorDataset, VectorDataset]:
|
|
1062
|
+
"""Evolve ``vector`` to time ``t``.
|
|
1063
|
+
|
|
1064
|
+
Return surviving contrail at end of evolution and aggregate metrics from evolution.
|
|
1065
|
+
|
|
1066
|
+
.. versionchanged:: 0.25.0
|
|
1067
|
+
|
|
1068
|
+
No longer expect ``vector`` to have a constant time variable. Consequently,
|
|
1069
|
+
time step handling now mirrors that in :class:`Cocip`. Moreover, this method now
|
|
1070
|
+
handles both :class:`GeoVectorDataset` and :class:`MetDataset` vectors derived
|
|
1071
|
+
from :attr:`source`.
|
|
1072
|
+
|
|
1073
|
+
Parameters
|
|
1074
|
+
----------
|
|
1075
|
+
vector : GeoVectorDataset
|
|
1076
|
+
Contrail points that have been initialized and are ready for evolution.
|
|
1077
|
+
met : MetDataset
|
|
1078
|
+
CoCiP met slices. See :class:`CocipGrid`.
|
|
1079
|
+
rad : MetDataset
|
|
1080
|
+
CoCiP rad slices. See :class:`CocipGrid`.
|
|
1081
|
+
params : dict[str, Any]
|
|
1082
|
+
CoCiP model parameters. See :class:`CocipGrid`.
|
|
1083
|
+
t : np.datetime64
|
|
1084
|
+
Time to evolve to.
|
|
1085
|
+
|
|
1086
|
+
Returns
|
|
1087
|
+
-------
|
|
1088
|
+
contrail : GeoVectorDataset
|
|
1089
|
+
Evolved contrail at end of the evolution step.
|
|
1090
|
+
ef_summary : VectorDataset
|
|
1091
|
+
The ``contrail`` summary statistics. The result of
|
|
1092
|
+
``contrail.select(("index", "age", "ef"), copy=False)``.
|
|
1093
|
+
"""
|
|
1094
|
+
dt = t - vector["time"]
|
|
1095
|
+
|
|
1096
|
+
if _is_segment_free_mode(vector):
|
|
1097
|
+
dt_head = None
|
|
1098
|
+
dt_tail = None
|
|
1099
|
+
else:
|
|
1100
|
+
head_tail_dt = vector["head_tail_dt"]
|
|
1101
|
+
half_head_tail_dt = head_tail_dt / 2
|
|
1102
|
+
dt_head = dt - half_head_tail_dt # type: ignore[operator]
|
|
1103
|
+
dt_tail = dt + half_head_tail_dt # type: ignore[operator]
|
|
1104
|
+
|
|
1105
|
+
# After advection, out has time t
|
|
1106
|
+
out = advect(vector, dt, dt_head, dt_tail) # type: ignore[arg-type]
|
|
1107
|
+
|
|
1108
|
+
out = run_interpolators(
|
|
1109
|
+
out,
|
|
1110
|
+
met,
|
|
1111
|
+
rad,
|
|
1112
|
+
dz_m=params["dz_m"],
|
|
1113
|
+
humidity_scaling=params["humidity_scaling"],
|
|
1114
|
+
**params["_interp_kwargs"],
|
|
1115
|
+
)
|
|
1116
|
+
out = calc_evolve_one_step(vector, out, params)
|
|
1117
|
+
ef_summary = out.select(("index", "age", "ef"), copy=False)
|
|
1118
|
+
|
|
1119
|
+
return out, ef_summary
|
|
1120
|
+
|
|
1121
|
+
|
|
1122
|
+
def _run_downwash(
|
|
1123
|
+
vector: GeoVectorDataset, met: MetDataset, rad: MetDataset, params: dict[str, Any]
|
|
1124
|
+
) -> tuple[GeoVectorDataset, dict[str, pd.Series]]:
|
|
1125
|
+
"""Perform calculations involving downwash and initial contrail.
|
|
1126
|
+
|
|
1127
|
+
.. versionchanged:: 0.25.0
|
|
1128
|
+
|
|
1129
|
+
No longer return ``summary_data``. This was previously a vector of zeros,
|
|
1130
|
+
and does not give any useful information.
|
|
1131
|
+
|
|
1132
|
+
Parameters
|
|
1133
|
+
----------
|
|
1134
|
+
vector : GeoVectorDataset
|
|
1135
|
+
Grid values
|
|
1136
|
+
met : MetDataset
|
|
1137
|
+
CoCiP met slices. See :class:`CocipGrid`.
|
|
1138
|
+
rad : MetDataset
|
|
1139
|
+
CoCiP rad slices. See :class:`CocipGrid`.
|
|
1140
|
+
params : dict[str, Any]
|
|
1141
|
+
CoCiP model parameters. See :class:`CocipGrid`.
|
|
1142
|
+
|
|
1143
|
+
Returns
|
|
1144
|
+
-------
|
|
1145
|
+
vector : GeoVectorDataset
|
|
1146
|
+
Downwash vector.
|
|
1147
|
+
verbose_dict : dict[str, pd.Series]
|
|
1148
|
+
Dictionary of verbose outputs.
|
|
1149
|
+
"""
|
|
1150
|
+
# All extra variables as required by verbose_outputs are computed on the
|
|
1151
|
+
# initial calculations involving the downwash contrail.
|
|
1152
|
+
verbose_dict: dict[str, pd.Series] = {}
|
|
1153
|
+
verbose_outputs_formation = params["verbose_outputs_formation"]
|
|
1154
|
+
|
|
1155
|
+
keys = "air_temperature", "specific_humidity"
|
|
1156
|
+
vector = run_interpolators(
|
|
1157
|
+
vector,
|
|
1158
|
+
met,
|
|
1159
|
+
humidity_scaling=params["humidity_scaling"],
|
|
1160
|
+
keys=keys,
|
|
1161
|
+
**params["_interp_kwargs"],
|
|
1162
|
+
)
|
|
1163
|
+
calc_emissions(vector, params)
|
|
1164
|
+
|
|
1165
|
+
# Get verbose outputs from emissions. These include fuel_flow, nvpm_ei_n, true_airspeed.
|
|
1166
|
+
for key in verbose_outputs_formation:
|
|
1167
|
+
if (val := vector.get(key)) is not None:
|
|
1168
|
+
verbose_dict[key] = pd.Series(data=val, index=vector["index"])
|
|
1169
|
+
|
|
1170
|
+
# Get verbose outputs from SAC calculation.
|
|
1171
|
+
vector, sac_ = find_initial_contrail_regions(vector, params)
|
|
1172
|
+
if (key := "sac") in verbose_outputs_formation:
|
|
1173
|
+
verbose_dict[key] = sac_
|
|
1174
|
+
if (key := "T_crit_sac") in verbose_outputs_formation and (val := vector.get(key)) is not None:
|
|
1175
|
+
verbose_dict[key] = pd.Series(data=val, index=vector["index"])
|
|
1176
|
+
|
|
1177
|
+
# Early exit if nothing in vector passes the SAC
|
|
1178
|
+
if not vector:
|
|
1179
|
+
logger.debug("No vector waypoints satisfy SAC")
|
|
1180
|
+
return vector, verbose_dict
|
|
1181
|
+
|
|
1182
|
+
vector = run_interpolators(vector, met, rad, dz_m=params["dz_m"], **params["_interp_kwargs"])
|
|
1183
|
+
out = simulate_wake_vortex_downwash(vector, params)
|
|
1184
|
+
|
|
1185
|
+
out = run_interpolators(
|
|
1186
|
+
out,
|
|
1187
|
+
met,
|
|
1188
|
+
rad,
|
|
1189
|
+
dz_m=params["dz_m"],
|
|
1190
|
+
humidity_scaling=params["humidity_scaling"],
|
|
1191
|
+
**params["_interp_kwargs"],
|
|
1192
|
+
)
|
|
1193
|
+
out, persistent = find_initial_persistent_contrails(vector, out, params)
|
|
1194
|
+
|
|
1195
|
+
if (key := "persistent") in verbose_outputs_formation:
|
|
1196
|
+
verbose_dict[key] = persistent
|
|
1197
|
+
if (key := "iwc") in verbose_outputs_formation and (data := out.get(key)) is not None:
|
|
1198
|
+
verbose_dict[key] = pd.Series(data=data, index=out["index"])
|
|
1199
|
+
|
|
1200
|
+
return out, verbose_dict
|
|
1201
|
+
|
|
1202
|
+
|
|
1203
|
+
def combine_vectors(
|
|
1204
|
+
vectors: list[GeoVectorDataset],
|
|
1205
|
+
target_split_size: int,
|
|
1206
|
+
) -> Generator[GeoVectorDataset, None, None]:
|
|
1207
|
+
"""Combine vectors until size exceeds ``target_split_size``.
|
|
1208
|
+
|
|
1209
|
+
.. versionchanged:: 0.25.0
|
|
1210
|
+
|
|
1211
|
+
Ignore common end of life constraint previously imposed.
|
|
1212
|
+
|
|
1213
|
+
Change function to return a generator.
|
|
1214
|
+
|
|
1215
|
+
Parameters
|
|
1216
|
+
----------
|
|
1217
|
+
vectors : list[GeoVectorDataset]
|
|
1218
|
+
Vectors to combine
|
|
1219
|
+
target_split_size : int
|
|
1220
|
+
Target vector size in combined vectors
|
|
1221
|
+
|
|
1222
|
+
Yields
|
|
1223
|
+
------
|
|
1224
|
+
GeoVectorDataset
|
|
1225
|
+
Combined vectors.
|
|
1226
|
+
"""
|
|
1227
|
+
# Loop through vectors until we've accumulated more grid points than the
|
|
1228
|
+
# target size. Once have, concatenate and yield
|
|
1229
|
+
i0 = 0
|
|
1230
|
+
cum_size = 0
|
|
1231
|
+
for i1, vector in enumerate(vectors):
|
|
1232
|
+
cum_size += vector.size
|
|
1233
|
+
if cum_size >= target_split_size:
|
|
1234
|
+
yield GeoVectorDataset.sum(vectors[i0 : i1 + 1])
|
|
1235
|
+
i0 = i1 + 1
|
|
1236
|
+
cum_size = 0
|
|
1237
|
+
|
|
1238
|
+
# If there is anything nontrivial left over, yield it
|
|
1239
|
+
if cum_size:
|
|
1240
|
+
yield GeoVectorDataset.sum(vectors[i0:])
|
|
1241
|
+
|
|
1242
|
+
|
|
1243
|
+
def find_initial_contrail_regions(
|
|
1244
|
+
vector: GeoVectorDataset, params: dict[str, Any]
|
|
1245
|
+
) -> tuple[GeoVectorDataset, pd.Series]:
|
|
1246
|
+
"""Filter ``vector`` according to the SAC.
|
|
1247
|
+
|
|
1248
|
+
This function also attaches the ``T_crit_sac`` variable to the returned
|
|
1249
|
+
GeoVectorDataset instance.
|
|
1250
|
+
|
|
1251
|
+
Parameters
|
|
1252
|
+
----------
|
|
1253
|
+
vector : GeoVectorDataset
|
|
1254
|
+
Data to apply SAC. Must contain variables
|
|
1255
|
+
- "air_temperature"
|
|
1256
|
+
- "specific_humidity"
|
|
1257
|
+
|
|
1258
|
+
params : dict[str, Any]
|
|
1259
|
+
CoCiP model parameters. See :class:`CocipGrid`. Must contain keys
|
|
1260
|
+
- "fuel"
|
|
1261
|
+
- "filter_sac"
|
|
1262
|
+
|
|
1263
|
+
Returns
|
|
1264
|
+
-------
|
|
1265
|
+
filtered_vector : GeoVectorDataset
|
|
1266
|
+
Input parameter ``vector`` filtered according to SAC (if ``param["filter_sac"]``).
|
|
1267
|
+
sac_series : pd.Series
|
|
1268
|
+
SAC values for each point in input ``vector``. The :class:`pd.Series` is
|
|
1269
|
+
indexed by ``vector["index"]``
|
|
1270
|
+
"""
|
|
1271
|
+
air_temperature = vector["air_temperature"]
|
|
1272
|
+
specific_humidity = vector["specific_humidity"]
|
|
1273
|
+
air_pressure = vector.air_pressure
|
|
1274
|
+
engine_efficiency = vector["engine_efficiency"]
|
|
1275
|
+
fuel = vector.attrs["fuel"]
|
|
1276
|
+
ei_h2o = fuel.ei_h2o
|
|
1277
|
+
q_fuel = fuel.q_fuel
|
|
1278
|
+
|
|
1279
|
+
G = sac.slope_mixing_line(specific_humidity, air_pressure, engine_efficiency, ei_h2o, q_fuel)
|
|
1280
|
+
t_sat_liq = sac.T_sat_liquid(G)
|
|
1281
|
+
rh = thermo.rh(specific_humidity, air_temperature, air_pressure)
|
|
1282
|
+
rh_crit = sac.rh_critical_sac(air_temperature, t_sat_liq, G)
|
|
1283
|
+
sac_ = sac.sac(rh, rh_crit)
|
|
1284
|
+
|
|
1285
|
+
filt = sac_ == 1.0
|
|
1286
|
+
logger.debug(
|
|
1287
|
+
"Fraction of grid points satisfying the SAC: %s / %s",
|
|
1288
|
+
filt.sum(),
|
|
1289
|
+
vector.size,
|
|
1290
|
+
)
|
|
1291
|
+
|
|
1292
|
+
if params["filter_sac"]:
|
|
1293
|
+
filtered_vector = vector.filter(filt)
|
|
1294
|
+
else:
|
|
1295
|
+
filt = np.ones(vector.size, dtype=bool) # needed below in T_crit_sac
|
|
1296
|
+
filtered_vector = vector.copy()
|
|
1297
|
+
logger.debug("Not filtering SAC")
|
|
1298
|
+
|
|
1299
|
+
# If filtered_vector is already empty, sac.T_critical_sac will raise an error
|
|
1300
|
+
# in the Newton approximation
|
|
1301
|
+
# So just return the empty vector here
|
|
1302
|
+
if not filtered_vector:
|
|
1303
|
+
return filtered_vector, pd.Series([], dtype=float)
|
|
1304
|
+
|
|
1305
|
+
# This is only used in `calc_first_contrail`, but we compute it here in order
|
|
1306
|
+
# to do everything SAC related at once.
|
|
1307
|
+
# It is slightly more performant to compute this AFTER we filter by sac_ == 1,
|
|
1308
|
+
# which is why we compute it here
|
|
1309
|
+
T_crit_sac = sac.T_critical_sac(t_sat_liq[filt], rh[filt], G[filt])
|
|
1310
|
+
filtered_vector["T_crit_sac"] = T_crit_sac
|
|
1311
|
+
if params["vpm_activation"]:
|
|
1312
|
+
filtered_vector["G"] = G[filt] # only used in vpm_activation
|
|
1313
|
+
return filtered_vector, pd.Series(data=sac_, index=vector["index"])
|
|
1314
|
+
|
|
1315
|
+
|
|
1316
|
+
def simulate_wake_vortex_downwash(
|
|
1317
|
+
vector: GeoVectorDataset, params: dict[str, Any]
|
|
1318
|
+
) -> GeoVectorDataset:
|
|
1319
|
+
"""Calculate regions of initial contrail formation.
|
|
1320
|
+
|
|
1321
|
+
This function calculates data effective flight downwash, then constructs a
|
|
1322
|
+
GeoVectorDataset object consisting persistent downwash regions. No filtering
|
|
1323
|
+
occurs here; the length of the returned GeoVectorDataset equals the length
|
|
1324
|
+
of the parameter ``vector``.
|
|
1325
|
+
|
|
1326
|
+
Of all steps in the gridded cocip pipeline, this one is generally the slowest
|
|
1327
|
+
since grid points have not yet been filtered by persistence (only SAC filtering
|
|
1328
|
+
has been applied in the CocipGrid pipeline). This function includes abundant
|
|
1329
|
+
logging.
|
|
1330
|
+
|
|
1331
|
+
Parameters
|
|
1332
|
+
----------
|
|
1333
|
+
vector : GeoVectorDataset
|
|
1334
|
+
Grid points from which initial contrail regions are calculated.
|
|
1335
|
+
Must already be interpolated against CoCiP met data.
|
|
1336
|
+
params : dict[str, Any]
|
|
1337
|
+
CoCiP model parameters. See :class:`CocipGrid`.
|
|
1338
|
+
|
|
1339
|
+
Returns
|
|
1340
|
+
-------
|
|
1341
|
+
GeoVectorDataset
|
|
1342
|
+
Initial regions of persistent contrail.
|
|
1343
|
+
"""
|
|
1344
|
+
# stored in `calc_emissions`
|
|
1345
|
+
true_airspeed = vector["true_airspeed"]
|
|
1346
|
+
|
|
1347
|
+
# NOTE: The `calc_wind_shear` function is run on both the downwash and contrail object.
|
|
1348
|
+
# This is the only time it is called with the `is_downwash` flag on
|
|
1349
|
+
calc_wind_shear(
|
|
1350
|
+
vector,
|
|
1351
|
+
dz_m=params["dz_m"],
|
|
1352
|
+
is_downwash=True,
|
|
1353
|
+
dsn_dz_factor=params["dsn_dz_factor"],
|
|
1354
|
+
)
|
|
1355
|
+
|
|
1356
|
+
# Stored in `calc_wind_shear`
|
|
1357
|
+
dT_dz = vector["dT_dz"]
|
|
1358
|
+
ds_dz = vector["ds_dz"]
|
|
1359
|
+
|
|
1360
|
+
air_pressure = vector.air_pressure
|
|
1361
|
+
air_temperature = vector["air_temperature"]
|
|
1362
|
+
T_crit_sac = vector["T_crit_sac"]
|
|
1363
|
+
|
|
1364
|
+
wsee = _get_source_param_override("wind_shear_enhancement_exponent", vector, params)
|
|
1365
|
+
wingspan = _get_source_param_override("wingspan", vector, params)
|
|
1366
|
+
aircraft_mass = _get_source_param_override("aircraft_mass", vector, params)
|
|
1367
|
+
|
|
1368
|
+
dz_max = wake_vortex.max_downward_displacement(
|
|
1369
|
+
wingspan=wingspan,
|
|
1370
|
+
true_airspeed=true_airspeed,
|
|
1371
|
+
aircraft_mass=aircraft_mass,
|
|
1372
|
+
air_temperature=air_temperature,
|
|
1373
|
+
dT_dz=dT_dz,
|
|
1374
|
+
ds_dz=ds_dz,
|
|
1375
|
+
air_pressure=air_pressure,
|
|
1376
|
+
effective_vertical_resolution=params["effective_vertical_resolution"],
|
|
1377
|
+
wind_shear_enhancement_exponent=wsee,
|
|
1378
|
+
)
|
|
1379
|
+
|
|
1380
|
+
width = wake_vortex.initial_contrail_width(wingspan, dz_max)
|
|
1381
|
+
iwvd = _get_source_param_override("initial_wake_vortex_depth", vector, params)
|
|
1382
|
+
depth = wake_vortex.initial_contrail_depth(dz_max, iwvd)
|
|
1383
|
+
# Initially, sigma_yz is set to 0
|
|
1384
|
+
# See bottom left paragraph p. 552 Schumann 2012 beginning with:
|
|
1385
|
+
# >>> "The contrail model starts from initial values ..."
|
|
1386
|
+
sigma_yz = np.zeros_like(width)
|
|
1387
|
+
|
|
1388
|
+
index = vector["index"]
|
|
1389
|
+
time = vector["time"]
|
|
1390
|
+
longitude = vector["longitude"]
|
|
1391
|
+
latitude = vector["latitude"]
|
|
1392
|
+
altitude = vector.altitude
|
|
1393
|
+
formation_time = vector["formation_time"]
|
|
1394
|
+
age = vector["age"]
|
|
1395
|
+
|
|
1396
|
+
# Initial contrail is constructed at a lower altitude
|
|
1397
|
+
altitude_1 = altitude - 0.5 * depth
|
|
1398
|
+
level_1 = units.m_to_pl(altitude_1)
|
|
1399
|
+
air_pressure_1 = 100.0 * level_1
|
|
1400
|
+
|
|
1401
|
+
data = {
|
|
1402
|
+
"index": index,
|
|
1403
|
+
"longitude": longitude,
|
|
1404
|
+
"latitude": latitude,
|
|
1405
|
+
"level": level_1,
|
|
1406
|
+
"altitude": altitude_1,
|
|
1407
|
+
"air_pressure": air_pressure_1,
|
|
1408
|
+
"time": time,
|
|
1409
|
+
"formation_time": formation_time,
|
|
1410
|
+
"age": age,
|
|
1411
|
+
"T_crit_sac": T_crit_sac,
|
|
1412
|
+
"width": width,
|
|
1413
|
+
"depth": depth,
|
|
1414
|
+
"sigma_yz": sigma_yz,
|
|
1415
|
+
**_get_uncertainty_params(vector),
|
|
1416
|
+
}
|
|
1417
|
+
|
|
1418
|
+
# Experimental segment-free model
|
|
1419
|
+
if _is_segment_free_mode(vector):
|
|
1420
|
+
return GeoVectorDataset._from_fastpath(data, attrs=vector.attrs).copy()
|
|
1421
|
+
|
|
1422
|
+
# Stored in `_generate_new_grid_vectors`
|
|
1423
|
+
data["longitude_head"] = vector["longitude_head"]
|
|
1424
|
+
data["latitude_head"] = vector["latitude_head"]
|
|
1425
|
+
data["longitude_tail"] = vector["longitude_tail"]
|
|
1426
|
+
data["latitude_tail"] = vector["latitude_tail"]
|
|
1427
|
+
data["head_tail_dt"] = vector["head_tail_dt"]
|
|
1428
|
+
|
|
1429
|
+
segment_length = _get_source_param_override("segment_length", vector, params)
|
|
1430
|
+
if isinstance(segment_length, np.ndarray):
|
|
1431
|
+
data["segment_length"] = segment_length
|
|
1432
|
+
else:
|
|
1433
|
+
# This should be broadcast over the source: subsequent vectors created during
|
|
1434
|
+
# evolution always recompute the segment length. GeoVectorDataset.sum will
|
|
1435
|
+
# raise an error if the wake vortex GeoVectorDataset does not contain a
|
|
1436
|
+
# segment_length variable.
|
|
1437
|
+
data["segment_length"] = np.full_like(data["longitude"], segment_length)
|
|
1438
|
+
|
|
1439
|
+
return GeoVectorDataset._from_fastpath(data, attrs=vector.attrs).copy()
|
|
1440
|
+
|
|
1441
|
+
|
|
1442
|
+
def find_initial_persistent_contrails(
|
|
1443
|
+
vector: GeoVectorDataset, contrail: GeoVectorDataset, params: dict[str, Any]
|
|
1444
|
+
) -> tuple[GeoVectorDataset, pd.Series]:
|
|
1445
|
+
"""Calculate first contrail immediately after downwash calculation.
|
|
1446
|
+
|
|
1447
|
+
This function filters according to :func:`contrail_properties.initial_persistant`.
|
|
1448
|
+
|
|
1449
|
+
The ``_1`` naming convention represents conditions are the wake vortex phase.
|
|
1450
|
+
|
|
1451
|
+
Parameters
|
|
1452
|
+
----------
|
|
1453
|
+
vector : GeoVectorDataset
|
|
1454
|
+
Data from original grid points after SAC filtering.
|
|
1455
|
+
contrail : GeoVectorDataset
|
|
1456
|
+
Output of :func:`simulate_wake_vortex_downwash`.
|
|
1457
|
+
params : dict[str, Any]
|
|
1458
|
+
CoCiP model parameters. See :class:`CocipGrid`.
|
|
1459
|
+
|
|
1460
|
+
Returns
|
|
1461
|
+
-------
|
|
1462
|
+
tuple[GeoVectorDataset, pd.Series]
|
|
1463
|
+
The first entry in the tuple holds the first contrail after filtering
|
|
1464
|
+
by initially persistent. This GeoVectorDataset instance is equipped with
|
|
1465
|
+
all necessary keys to begin main contrail evolution. The second entry is
|
|
1466
|
+
the raw output of the :func:`contrail_properties.initial_persistant`
|
|
1467
|
+
computation as needed if "persistent" is in the "verbose_outputs"
|
|
1468
|
+
parameter. The :class:`pd.Series` is indexed by ``vector["index"]``.
|
|
1469
|
+
"""
|
|
1470
|
+
# Gridpoint data
|
|
1471
|
+
# NOTE: In the Cocip implementation, these are variables on sac_flight
|
|
1472
|
+
# without the suffix "_1"
|
|
1473
|
+
air_pressure = vector.air_pressure
|
|
1474
|
+
air_temperature = vector["air_temperature"]
|
|
1475
|
+
specific_humidity = vector["specific_humidity"]
|
|
1476
|
+
fuel_dist = vector["fuel_flow"] / vector["true_airspeed"]
|
|
1477
|
+
nvpm_ei_n = vector["nvpm_ei_n"]
|
|
1478
|
+
ei_h2o = params["fuel"].ei_h2o
|
|
1479
|
+
|
|
1480
|
+
# Contrail data
|
|
1481
|
+
T_crit_sac = contrail["T_crit_sac"]
|
|
1482
|
+
width = contrail["width"]
|
|
1483
|
+
depth = contrail["depth"]
|
|
1484
|
+
air_pressure_1 = contrail.air_pressure
|
|
1485
|
+
|
|
1486
|
+
# Initialize initial contrail properties (ice water content, number of ice particles)
|
|
1487
|
+
# The logic here is fairly different from the later timesteps
|
|
1488
|
+
iwc = contrail_properties.initial_iwc(
|
|
1489
|
+
air_temperature=air_temperature,
|
|
1490
|
+
specific_humidity=specific_humidity,
|
|
1491
|
+
air_pressure=air_pressure,
|
|
1492
|
+
fuel_dist=fuel_dist,
|
|
1493
|
+
width=width,
|
|
1494
|
+
depth=depth,
|
|
1495
|
+
ei_h2o=ei_h2o,
|
|
1496
|
+
)
|
|
1497
|
+
iwc_ad = contrail_properties.iwc_adiabatic_heating(
|
|
1498
|
+
air_temperature=air_temperature,
|
|
1499
|
+
air_pressure=air_pressure,
|
|
1500
|
+
air_pressure_1=air_pressure_1,
|
|
1501
|
+
)
|
|
1502
|
+
iwc_1 = contrail_properties.iwc_post_wake_vortex(iwc, iwc_ad)
|
|
1503
|
+
|
|
1504
|
+
if params["vpm_activation"]:
|
|
1505
|
+
# We can add a Cocip parameter for T_exhaust, vpm_ei_n, and particles
|
|
1506
|
+
aei = extended_k15.droplet_apparent_emission_index(
|
|
1507
|
+
specific_humidity=specific_humidity,
|
|
1508
|
+
T_ambient=air_temperature,
|
|
1509
|
+
T_exhaust=vector.attrs.get("T_exhaust", extended_k15.DEFAULT_EXHAUST_T),
|
|
1510
|
+
air_pressure=air_pressure,
|
|
1511
|
+
nvpm_ei_n=nvpm_ei_n,
|
|
1512
|
+
vpm_ei_n=vector.attrs.get("vpm_ei_n", extended_k15.DEFAULT_VPM_EI_N),
|
|
1513
|
+
G=vector["G"],
|
|
1514
|
+
)
|
|
1515
|
+
min_aei = None
|
|
1516
|
+
else:
|
|
1517
|
+
f_activ = contrail_properties.ice_particle_activation_rate(air_temperature, T_crit_sac)
|
|
1518
|
+
aei = nvpm_ei_n * f_activ
|
|
1519
|
+
min_aei = params["min_ice_particle_number_nvpm_ei_n"]
|
|
1520
|
+
|
|
1521
|
+
n_ice_per_m_0 = contrail_properties.initial_ice_particle_number(
|
|
1522
|
+
aei=aei, fuel_dist=fuel_dist, min_aei=min_aei
|
|
1523
|
+
)
|
|
1524
|
+
|
|
1525
|
+
f_surv = contrail_properties.ice_particle_survival_fraction(iwc, iwc_1)
|
|
1526
|
+
n_ice_per_m_1 = n_ice_per_m_0 * f_surv
|
|
1527
|
+
|
|
1528
|
+
# The logic below corresponds to Cocip._create_downwash_contrail (roughly)
|
|
1529
|
+
contrail["iwc"] = iwc_1
|
|
1530
|
+
contrail["n_ice_per_m"] = n_ice_per_m_1
|
|
1531
|
+
|
|
1532
|
+
# Check for persistent initial_contrails
|
|
1533
|
+
rhi_1 = contrail["rhi"]
|
|
1534
|
+
persistent_1 = contrail_properties.initial_persistent(iwc_1=iwc_1, rhi_1=rhi_1)
|
|
1535
|
+
|
|
1536
|
+
logger.debug(
|
|
1537
|
+
"Fraction of grid points with persistent initial contrails: %s / %s",
|
|
1538
|
+
int(persistent_1.sum()),
|
|
1539
|
+
contrail.size,
|
|
1540
|
+
)
|
|
1541
|
+
|
|
1542
|
+
# Filter by persistent
|
|
1543
|
+
if params["filter_initially_persistent"]:
|
|
1544
|
+
persistent_contrail = contrail.filter(persistent_1.astype(bool))
|
|
1545
|
+
else:
|
|
1546
|
+
persistent_contrail = contrail.copy()
|
|
1547
|
+
logger.debug("Not filtering initially persistent")
|
|
1548
|
+
|
|
1549
|
+
# Attach a bunch of other initialization variables
|
|
1550
|
+
# (Previously, this was done before filtering. It's computationally more
|
|
1551
|
+
# efficient to do it down here)
|
|
1552
|
+
calc_thermal_properties(persistent_contrail)
|
|
1553
|
+
calc_wind_shear(
|
|
1554
|
+
persistent_contrail,
|
|
1555
|
+
is_downwash=False,
|
|
1556
|
+
dz_m=params["dz_m"],
|
|
1557
|
+
dsn_dz_factor=params["dsn_dz_factor"],
|
|
1558
|
+
)
|
|
1559
|
+
|
|
1560
|
+
effective_vertical_resolution = persistent_contrail.get(
|
|
1561
|
+
"effective_vertical_resolution", params["effective_vertical_resolution"]
|
|
1562
|
+
)
|
|
1563
|
+
wind_shear_enhancement_exponent = persistent_contrail.get(
|
|
1564
|
+
"wind_shear_enhancement_exponent", params["wind_shear_enhancement_exponent"]
|
|
1565
|
+
)
|
|
1566
|
+
sedimentation_impact_factor = persistent_contrail.get(
|
|
1567
|
+
"sedimentation_impact_factor", params["sedimentation_impact_factor"]
|
|
1568
|
+
)
|
|
1569
|
+
cocip.calc_contrail_properties(
|
|
1570
|
+
persistent_contrail,
|
|
1571
|
+
effective_vertical_resolution=effective_vertical_resolution,
|
|
1572
|
+
wind_shear_enhancement_exponent=wind_shear_enhancement_exponent,
|
|
1573
|
+
sedimentation_impact_factor=sedimentation_impact_factor,
|
|
1574
|
+
radiative_heating_effects=False, # Not yet supported in CocipGrid
|
|
1575
|
+
)
|
|
1576
|
+
|
|
1577
|
+
# assumes "sdr", "rsr", and "olr" are already available on vector
|
|
1578
|
+
cocip.calc_radiative_properties(persistent_contrail, params)
|
|
1579
|
+
|
|
1580
|
+
# no EF forcing on first contrail
|
|
1581
|
+
persistent_contrail["ef"] = np.zeros_like(persistent_contrail["n_ice_per_m"])
|
|
1582
|
+
|
|
1583
|
+
persistent_series = pd.Series(data=persistent_1, index=contrail["index"])
|
|
1584
|
+
return persistent_contrail, persistent_series
|
|
1585
|
+
|
|
1586
|
+
|
|
1587
|
+
def calc_evolve_one_step(
|
|
1588
|
+
curr_contrail: GeoVectorDataset,
|
|
1589
|
+
next_contrail: GeoVectorDataset,
|
|
1590
|
+
params: dict[str, Any],
|
|
1591
|
+
) -> GeoVectorDataset:
|
|
1592
|
+
"""Calculate contrail properties of ``next_contrail``.
|
|
1593
|
+
|
|
1594
|
+
This function attaches additional variables to ``next_contrail``, then
|
|
1595
|
+
filters by :func:`contrail_properties.contrail_persistent`.
|
|
1596
|
+
|
|
1597
|
+
Parameters
|
|
1598
|
+
----------
|
|
1599
|
+
curr_contrail : GeoVectorDataset
|
|
1600
|
+
Existing contrail
|
|
1601
|
+
next_contrail : GeoVectorDataset
|
|
1602
|
+
Result of advecting existing contrail already interpolated against CoCiP met data
|
|
1603
|
+
params : dict[str, Any]
|
|
1604
|
+
CoCiP model parameters. See :class:`CocipGrid`.
|
|
1605
|
+
|
|
1606
|
+
Returns
|
|
1607
|
+
-------
|
|
1608
|
+
GeoVectorDataset
|
|
1609
|
+
Parameter ``next_contrail`` filtered by persistence.
|
|
1610
|
+
"""
|
|
1611
|
+
calc_wind_shear(
|
|
1612
|
+
next_contrail,
|
|
1613
|
+
is_downwash=False,
|
|
1614
|
+
dz_m=params["dz_m"],
|
|
1615
|
+
dsn_dz_factor=params["dsn_dz_factor"],
|
|
1616
|
+
)
|
|
1617
|
+
calc_thermal_properties(next_contrail)
|
|
1618
|
+
|
|
1619
|
+
iwc_t1 = curr_contrail["iwc"]
|
|
1620
|
+
specific_humidity_t1 = curr_contrail["specific_humidity"]
|
|
1621
|
+
specific_humidity_t2 = next_contrail["specific_humidity"]
|
|
1622
|
+
q_sat_t1 = curr_contrail["q_sat"]
|
|
1623
|
+
q_sat_t2 = next_contrail["q_sat"]
|
|
1624
|
+
plume_mass_per_m_t1 = curr_contrail["plume_mass_per_m"]
|
|
1625
|
+
width_t1 = curr_contrail["width"]
|
|
1626
|
+
depth_t1 = curr_contrail["depth"]
|
|
1627
|
+
sigma_yz_t1 = curr_contrail["sigma_yz"]
|
|
1628
|
+
dsn_dz_t1 = curr_contrail["dsn_dz"]
|
|
1629
|
+
diffuse_h_t1 = curr_contrail["diffuse_h"]
|
|
1630
|
+
diffuse_v_t1 = curr_contrail["diffuse_v"]
|
|
1631
|
+
|
|
1632
|
+
# Segment-free mode logic
|
|
1633
|
+
segment_length_t2: np.ndarray | float
|
|
1634
|
+
seg_ratio_t12: np.ndarray | float
|
|
1635
|
+
if _is_segment_free_mode(curr_contrail):
|
|
1636
|
+
segment_length_t2 = 1.0
|
|
1637
|
+
seg_ratio_t12 = 1.0
|
|
1638
|
+
else:
|
|
1639
|
+
segment_length_t1 = curr_contrail["segment_length"]
|
|
1640
|
+
segment_length_t2 = next_contrail["segment_length"]
|
|
1641
|
+
seg_ratio_t12 = contrail_properties.segment_length_ratio(
|
|
1642
|
+
segment_length_t1, segment_length_t2
|
|
1643
|
+
)
|
|
1644
|
+
|
|
1645
|
+
dt = next_contrail["time"] - curr_contrail["time"]
|
|
1646
|
+
|
|
1647
|
+
sigma_yy_t2, sigma_zz_t2, sigma_yz_t2 = contrail_properties.plume_temporal_evolution(
|
|
1648
|
+
width_t1=width_t1,
|
|
1649
|
+
depth_t1=depth_t1,
|
|
1650
|
+
sigma_yz_t1=sigma_yz_t1,
|
|
1651
|
+
dsn_dz_t1=dsn_dz_t1,
|
|
1652
|
+
diffuse_h_t1=diffuse_h_t1,
|
|
1653
|
+
diffuse_v_t1=diffuse_v_t1,
|
|
1654
|
+
seg_ratio=seg_ratio_t12,
|
|
1655
|
+
dt=dt,
|
|
1656
|
+
max_depth=params["max_depth"],
|
|
1657
|
+
)
|
|
1658
|
+
|
|
1659
|
+
width_t2, depth_t2 = contrail_properties.new_contrail_dimensions(sigma_yy_t2, sigma_zz_t2)
|
|
1660
|
+
next_contrail["sigma_yz"] = sigma_yz_t2
|
|
1661
|
+
next_contrail["width"] = width_t2
|
|
1662
|
+
next_contrail["depth"] = depth_t2
|
|
1663
|
+
|
|
1664
|
+
area_eff_t2 = contrail_properties.new_effective_area_from_sigma(
|
|
1665
|
+
sigma_yy=sigma_yy_t2,
|
|
1666
|
+
sigma_zz=sigma_zz_t2,
|
|
1667
|
+
sigma_yz=sigma_yz_t2,
|
|
1668
|
+
)
|
|
1669
|
+
|
|
1670
|
+
rho_air_t2 = next_contrail["rho_air"]
|
|
1671
|
+
plume_mass_per_m_t2 = contrail_properties.plume_mass_per_distance(area_eff_t2, rho_air_t2)
|
|
1672
|
+
iwc_t2 = contrail_properties.new_ice_water_content(
|
|
1673
|
+
iwc_t1=iwc_t1,
|
|
1674
|
+
q_t1=specific_humidity_t1,
|
|
1675
|
+
q_t2=specific_humidity_t2,
|
|
1676
|
+
q_sat_t1=q_sat_t1,
|
|
1677
|
+
q_sat_t2=q_sat_t2,
|
|
1678
|
+
mass_plume_t1=plume_mass_per_m_t1,
|
|
1679
|
+
mass_plume_t2=plume_mass_per_m_t2,
|
|
1680
|
+
)
|
|
1681
|
+
next_contrail["iwc"] = iwc_t2
|
|
1682
|
+
|
|
1683
|
+
n_ice_per_m_t1 = curr_contrail["n_ice_per_m"]
|
|
1684
|
+
dn_dt_agg = curr_contrail["dn_dt_agg"]
|
|
1685
|
+
dn_dt_turb = curr_contrail["dn_dt_turb"]
|
|
1686
|
+
|
|
1687
|
+
n_ice_per_m_t2 = contrail_properties.new_ice_particle_number(
|
|
1688
|
+
n_ice_per_m_t1=n_ice_per_m_t1,
|
|
1689
|
+
dn_dt_agg=dn_dt_agg,
|
|
1690
|
+
dn_dt_turb=dn_dt_turb,
|
|
1691
|
+
seg_ratio=seg_ratio_t12,
|
|
1692
|
+
dt=dt,
|
|
1693
|
+
)
|
|
1694
|
+
next_contrail["n_ice_per_m"] = n_ice_per_m_t2
|
|
1695
|
+
|
|
1696
|
+
cocip.calc_contrail_properties(
|
|
1697
|
+
next_contrail,
|
|
1698
|
+
params["effective_vertical_resolution"],
|
|
1699
|
+
params["wind_shear_enhancement_exponent"],
|
|
1700
|
+
params["sedimentation_impact_factor"],
|
|
1701
|
+
radiative_heating_effects=False, # Not yet supported in CocipGrid
|
|
1702
|
+
)
|
|
1703
|
+
cocip.calc_radiative_properties(next_contrail, params)
|
|
1704
|
+
|
|
1705
|
+
rf_net_t1 = curr_contrail["rf_net"]
|
|
1706
|
+
rf_net_t2 = next_contrail["rf_net"]
|
|
1707
|
+
ef = contrail_properties.energy_forcing(
|
|
1708
|
+
rf_net_t1=rf_net_t1,
|
|
1709
|
+
rf_net_t2=rf_net_t2,
|
|
1710
|
+
width_t1=width_t1,
|
|
1711
|
+
width_t2=width_t2,
|
|
1712
|
+
seg_length_t2=segment_length_t2,
|
|
1713
|
+
dt=dt,
|
|
1714
|
+
)
|
|
1715
|
+
# NOTE: This will get masked below if `persistent` is False
|
|
1716
|
+
# That is, we are taking a right Riemann sum of a decreasing function, so we are
|
|
1717
|
+
# underestimating the truth. With dt small enough, this is fine.
|
|
1718
|
+
next_contrail["ef"] = ef
|
|
1719
|
+
|
|
1720
|
+
# NOTE: Only dealing with `next_contrail` here
|
|
1721
|
+
latitude = next_contrail["latitude"]
|
|
1722
|
+
altitude = next_contrail["altitude"]
|
|
1723
|
+
tau_contrail = next_contrail["tau_contrail"]
|
|
1724
|
+
n_ice_per_vol = next_contrail["n_ice_per_vol"]
|
|
1725
|
+
age = next_contrail["age"]
|
|
1726
|
+
|
|
1727
|
+
# Both tau_contrail and n_ice_per_vol could have nan values
|
|
1728
|
+
# These are mostly due to out of bounds interpolation
|
|
1729
|
+
# Both are computed in cocip.calc_contrail_properties
|
|
1730
|
+
# Interpolation out-of-bounds nan values first appear in tau_contrail,
|
|
1731
|
+
# then in n_ice_per_vol at the next time step.
|
|
1732
|
+
# We can use something like np.nan(tau_contrail) to get values that
|
|
1733
|
+
# are filled with nan in interpolation.
|
|
1734
|
+
persistent = contrail_properties.contrail_persistent(
|
|
1735
|
+
latitude=latitude,
|
|
1736
|
+
altitude=altitude,
|
|
1737
|
+
segment_length=segment_length_t2, # type: ignore[arg-type]
|
|
1738
|
+
age=age,
|
|
1739
|
+
tau_contrail=tau_contrail,
|
|
1740
|
+
n_ice_per_m3=n_ice_per_vol,
|
|
1741
|
+
params=params,
|
|
1742
|
+
)
|
|
1743
|
+
|
|
1744
|
+
# Filter by persistent
|
|
1745
|
+
logger.debug(
|
|
1746
|
+
"Fraction of grid points surviving: %s / %s",
|
|
1747
|
+
np.sum(persistent),
|
|
1748
|
+
next_contrail.size,
|
|
1749
|
+
)
|
|
1750
|
+
if params["persistent_buffer"] is not None:
|
|
1751
|
+
# See Cocip implementation if we want to support this
|
|
1752
|
+
raise NotImplementedError
|
|
1753
|
+
return next_contrail.filter(persistent)
|
|
1754
|
+
|
|
1755
|
+
|
|
1756
|
+
def calc_emissions(vector: GeoVectorDataset, params: dict[str, Any]) -> None:
|
|
1757
|
+
"""Calculate aircraft performance (AP) and emissions data.
|
|
1758
|
+
|
|
1759
|
+
This function mutates the ``vector`` parameter in-place by setting keys:
|
|
1760
|
+
- "true_airspeed": computed by the aircraft performance model
|
|
1761
|
+
- "engine_efficiency": computed by the aircraft performance model
|
|
1762
|
+
- "fuel_flow": computed by the aircraft performance model
|
|
1763
|
+
- "nvpm_ei_n": computed by the :class:`Emissions` model
|
|
1764
|
+
- "head_tail_dt"
|
|
1765
|
+
|
|
1766
|
+
The ``params`` parameter is also mutated in-place by setting keys:
|
|
1767
|
+
- "wingspan": aircraft wingspan
|
|
1768
|
+
- "aircraft_mass": mass of aircraft
|
|
1769
|
+
|
|
1770
|
+
Implementation note: Previously, this function computed "fuel_dist" instead of
|
|
1771
|
+
"fuel_flow". While "fuel_dist" is the only variabled needed in
|
|
1772
|
+
:func:`find_initial_persistent_contrails`, "fuel_flow" is needed for verbose
|
|
1773
|
+
outputs. Moreover, we are anticipating having "fuel_flow" as a preexisting
|
|
1774
|
+
variable on the input ``source``, whereas "fuel_dist" is less common and
|
|
1775
|
+
less interpretable. So, we set "fuel_flow" here and then calculate "fuel_dist"
|
|
1776
|
+
in :func:`find_initial_persistent_contrails`.
|
|
1777
|
+
|
|
1778
|
+
Parameters
|
|
1779
|
+
----------
|
|
1780
|
+
vector : GeoVectorDataset
|
|
1781
|
+
Grid points already interpolated against CoCiP met data
|
|
1782
|
+
params : dict[str, Any]
|
|
1783
|
+
CoCiP model parameters. See :class:`CocipGrid`.
|
|
1784
|
+
|
|
1785
|
+
Raises
|
|
1786
|
+
------
|
|
1787
|
+
NotImplementedError
|
|
1788
|
+
Aircraft type in `params` not found in EDB
|
|
1789
|
+
"""
|
|
1790
|
+
logger.debug("Process emissions")
|
|
1791
|
+
|
|
1792
|
+
# PART 1: Fuel flow data
|
|
1793
|
+
vector.attrs.setdefault("aircraft_type", params["aircraft_type"])
|
|
1794
|
+
|
|
1795
|
+
# Important: If params["engine_uid"] is None (the default value), let Emissions
|
|
1796
|
+
# overwrite with the assumed value.
|
|
1797
|
+
# Otherwise, set the non-None value on vector here
|
|
1798
|
+
if param_engine_uid := params["engine_uid"]:
|
|
1799
|
+
vector.attrs.setdefault("engine_uid", param_engine_uid)
|
|
1800
|
+
vector.attrs.setdefault("fuel", params["fuel"])
|
|
1801
|
+
|
|
1802
|
+
ap_vars = {
|
|
1803
|
+
"true_airspeed",
|
|
1804
|
+
"engine_efficiency",
|
|
1805
|
+
"fuel_flow",
|
|
1806
|
+
"aircraft_mass",
|
|
1807
|
+
"n_engine",
|
|
1808
|
+
"wingspan",
|
|
1809
|
+
}
|
|
1810
|
+
|
|
1811
|
+
# Look across both vector.data and vector.attrs
|
|
1812
|
+
missing = ap_vars.difference(vector).difference(vector.attrs)
|
|
1813
|
+
|
|
1814
|
+
if missing == {"true_airspeed"}:
|
|
1815
|
+
# If we're only missing true_airspeed but mach_number is present,
|
|
1816
|
+
# we can still proceed
|
|
1817
|
+
mach_number = vector.get_data_or_attr("mach_number", None)
|
|
1818
|
+
if mach_number is not None:
|
|
1819
|
+
air_temperature = vector["air_temperature"]
|
|
1820
|
+
vector["true_airspeed"] = units.mach_number_to_tas(mach_number, air_temperature)
|
|
1821
|
+
missing = set()
|
|
1822
|
+
|
|
1823
|
+
if missing:
|
|
1824
|
+
ap_model = params["aircraft_performance"]
|
|
1825
|
+
if ap_model is None:
|
|
1826
|
+
msg = (
|
|
1827
|
+
f"Missing variables: {missing} and no aircraft_performance included in "
|
|
1828
|
+
"params. Instantiate 'CocipGrid' with an 'aircraft_performance' param. "
|
|
1829
|
+
"For example: 'CocipGrid(..., aircraft_performance=PSGrid())'"
|
|
1830
|
+
)
|
|
1831
|
+
raise ValueError(msg)
|
|
1832
|
+
ap_model.eval(vector, copy_source=False)
|
|
1833
|
+
|
|
1834
|
+
# PART 2: True airspeed logic
|
|
1835
|
+
# NOTE: This doesn't exactly fit here, but it is closely related to
|
|
1836
|
+
# true_airspeed calculations, so it's convenient to get it done now
|
|
1837
|
+
# For the purpose of Cocip <> CocipGrid model parity, we attach a
|
|
1838
|
+
# "head_tail_dt" variable. This variable is used the first time `advect`
|
|
1839
|
+
# is called. It makes a small but noticeable difference in model outputs.
|
|
1840
|
+
true_airspeed = vector.get_data_or_attr("true_airspeed")
|
|
1841
|
+
|
|
1842
|
+
if not _is_segment_free_mode(vector):
|
|
1843
|
+
segment_length = _get_source_param_override("segment_length", vector, params)
|
|
1844
|
+
head_tail_dt_s = segment_length / true_airspeed
|
|
1845
|
+
head_tail_dt_ns = 1_000_000_000.0 * head_tail_dt_s
|
|
1846
|
+
head_tail_dt = head_tail_dt_ns.astype("timedelta64[ns]")
|
|
1847
|
+
vector["head_tail_dt"] = head_tail_dt
|
|
1848
|
+
|
|
1849
|
+
# PART 3: Emissions data
|
|
1850
|
+
factor = _get_source_param_override("nvpm_ei_n_enhancement_factor", vector, params)
|
|
1851
|
+
default_nvpm_ei_n = params["default_nvpm_ei_n"]
|
|
1852
|
+
|
|
1853
|
+
# Early exit
|
|
1854
|
+
if not params["process_emissions"]:
|
|
1855
|
+
vector.attrs.setdefault("nvpm_ei_n", factor * default_nvpm_ei_n)
|
|
1856
|
+
return
|
|
1857
|
+
|
|
1858
|
+
emissions = Emissions()
|
|
1859
|
+
emissions.eval(vector, copy_source=False)
|
|
1860
|
+
vector.update(nvpm_ei_n=factor * vector["nvpm_ei_n"])
|
|
1861
|
+
|
|
1862
|
+
|
|
1863
|
+
def calc_wind_shear(
|
|
1864
|
+
contrail: GeoVectorDataset,
|
|
1865
|
+
dz_m: float,
|
|
1866
|
+
*,
|
|
1867
|
+
is_downwash: bool,
|
|
1868
|
+
dsn_dz_factor: float,
|
|
1869
|
+
) -> None:
|
|
1870
|
+
"""Calculate wind shear data.
|
|
1871
|
+
|
|
1872
|
+
This function is used for both `first_contrail` calculation and `evolve_one_step`. The
|
|
1873
|
+
data requirements of these two functions is slightly different, and the `is_downwash` flag
|
|
1874
|
+
allows for this discrepancy.
|
|
1875
|
+
|
|
1876
|
+
This function modifies the `contrail` parameter in-place by attaching `data` keys:
|
|
1877
|
+
- "dT_dz"
|
|
1878
|
+
- "ds_dz"
|
|
1879
|
+
- "dsn_dz" (attached only if `is_downwash=False`)
|
|
1880
|
+
|
|
1881
|
+
NOTE: This is the only function involving interpolation at a different `level`.
|
|
1882
|
+
|
|
1883
|
+
Parameters
|
|
1884
|
+
----------
|
|
1885
|
+
contrail : GeoVectorDataset
|
|
1886
|
+
Grid points already interpolated against CoCiP met data
|
|
1887
|
+
dz_m : float
|
|
1888
|
+
Difference in altitude between top and bottom layer for stratification calculations (m)
|
|
1889
|
+
is_downwash : bool
|
|
1890
|
+
Only used initially in the `first_contrail` function
|
|
1891
|
+
dsn_dz_factor : float
|
|
1892
|
+
Experimental parameter for segment-free model.
|
|
1893
|
+
"""
|
|
1894
|
+
air_temperature = contrail["air_temperature"]
|
|
1895
|
+
air_pressure = contrail.air_pressure
|
|
1896
|
+
|
|
1897
|
+
u_wind = contrail["eastward_wind"]
|
|
1898
|
+
v_wind = contrail["northward_wind"]
|
|
1899
|
+
|
|
1900
|
+
air_pressure_lower = thermo.pressure_dz(air_temperature, air_pressure, dz_m)
|
|
1901
|
+
air_temperature_lower = contrail["air_temperature_lower"]
|
|
1902
|
+
u_wind_lower = contrail["eastward_wind_lower"]
|
|
1903
|
+
v_wind_lower = contrail["northward_wind_lower"]
|
|
1904
|
+
|
|
1905
|
+
dT_dz = thermo.T_potential_gradient(
|
|
1906
|
+
air_temperature, air_pressure, air_temperature_lower, air_pressure_lower, dz_m
|
|
1907
|
+
)
|
|
1908
|
+
ds_dz = wind_shear.wind_shear(u_wind, u_wind_lower, v_wind, v_wind_lower, dz_m)
|
|
1909
|
+
|
|
1910
|
+
contrail["dT_dz"] = dT_dz
|
|
1911
|
+
contrail["ds_dz"] = ds_dz
|
|
1912
|
+
|
|
1913
|
+
# Calculate wind shear normal: NOT needed for downwash step
|
|
1914
|
+
if is_downwash:
|
|
1915
|
+
return
|
|
1916
|
+
|
|
1917
|
+
# Experimental segment-free mode
|
|
1918
|
+
# Instead of calculating dsn_dz, just multiply ds_dz with some scalar
|
|
1919
|
+
if _is_segment_free_mode(contrail):
|
|
1920
|
+
contrail["dsn_dz"] = dsn_dz_factor * ds_dz
|
|
1921
|
+
return
|
|
1922
|
+
|
|
1923
|
+
# NOTE: This is the only function requiring cos_a and sin_a
|
|
1924
|
+
# Consequently, we don't store sin_a and cos_a on the contrail, but just
|
|
1925
|
+
# use them once here to compute dsn_dz
|
|
1926
|
+
sin_a, cos_a = geo.longitudinal_angle(
|
|
1927
|
+
lons0=contrail["longitude_tail"],
|
|
1928
|
+
lats0=contrail["latitude_tail"],
|
|
1929
|
+
lons1=contrail["longitude_head"],
|
|
1930
|
+
lats1=contrail["latitude_head"],
|
|
1931
|
+
)
|
|
1932
|
+
dsn_dz = wind_shear.wind_shear_normal(
|
|
1933
|
+
u_wind_top=u_wind,
|
|
1934
|
+
u_wind_btm=u_wind_lower,
|
|
1935
|
+
v_wind_top=v_wind,
|
|
1936
|
+
v_wind_btm=v_wind_lower,
|
|
1937
|
+
cos_a=cos_a,
|
|
1938
|
+
sin_a=sin_a,
|
|
1939
|
+
dz=dz_m,
|
|
1940
|
+
)
|
|
1941
|
+
contrail["dsn_dz"] = dsn_dz
|
|
1942
|
+
|
|
1943
|
+
|
|
1944
|
+
def calc_thermal_properties(contrail: GeoVectorDataset) -> None:
|
|
1945
|
+
"""Calculate contrail thermal properties.
|
|
1946
|
+
|
|
1947
|
+
Modifies parameter `contrail` in place by attaching keys:
|
|
1948
|
+
- "q_sat"
|
|
1949
|
+
- "rho_air"
|
|
1950
|
+
|
|
1951
|
+
Parameters
|
|
1952
|
+
----------
|
|
1953
|
+
contrail : GeoVectorDataset
|
|
1954
|
+
Grid points already interpolated against CoCiP met data.
|
|
1955
|
+
"""
|
|
1956
|
+
air_pressure = contrail.air_pressure
|
|
1957
|
+
air_temperature = contrail["air_temperature"]
|
|
1958
|
+
|
|
1959
|
+
# calculate thermo properties
|
|
1960
|
+
contrail["q_sat"] = thermo.q_sat_ice(air_temperature, air_pressure)
|
|
1961
|
+
contrail["rho_air"] = thermo.rho_d(air_temperature, air_pressure)
|
|
1962
|
+
|
|
1963
|
+
|
|
1964
|
+
def advect(
|
|
1965
|
+
contrail: GeoVectorDataset,
|
|
1966
|
+
dt: np.timedelta64 | npt.NDArray[np.timedelta64],
|
|
1967
|
+
dt_head: np.timedelta64 | None,
|
|
1968
|
+
dt_tail: np.timedelta64 | None,
|
|
1969
|
+
) -> GeoVectorDataset:
|
|
1970
|
+
"""Form new contrail by advecting existing contrail.
|
|
1971
|
+
|
|
1972
|
+
Parameter ``contrail`` is not modified.
|
|
1973
|
+
|
|
1974
|
+
.. versionchanged:: 0.25.0
|
|
1975
|
+
|
|
1976
|
+
The ``dt_head`` and ``dt_tail`` parameters are no longer optional.
|
|
1977
|
+
Set these to ``dt`` to evolve the contrail uniformly over a constant time.
|
|
1978
|
+
Set to None for segment-free mode.
|
|
1979
|
+
|
|
1980
|
+
Parameters
|
|
1981
|
+
----------
|
|
1982
|
+
contrail : GeoVectorDataset
|
|
1983
|
+
Grid points already interpolated against wind data
|
|
1984
|
+
dt : np.timedelta64 | npt.NDArray[np.timedelta64]
|
|
1985
|
+
Time step for advection
|
|
1986
|
+
dt_head : np.timedelta64 | None
|
|
1987
|
+
Time step for segment head advection. Use None for segment-free mode.
|
|
1988
|
+
dt_tail : np.timedelta64 | None
|
|
1989
|
+
Time step for segment tail advection. Use None for segment-free mode.
|
|
1990
|
+
|
|
1991
|
+
Returns
|
|
1992
|
+
-------
|
|
1993
|
+
GeoVectorDataset
|
|
1994
|
+
New contrail instance with keys:
|
|
1995
|
+
- "index"
|
|
1996
|
+
- "longitude"
|
|
1997
|
+
- "latitude"
|
|
1998
|
+
- "level"
|
|
1999
|
+
- "air_pressure"
|
|
2000
|
+
- "altitude",
|
|
2001
|
+
- "time"
|
|
2002
|
+
- "formation_time"
|
|
2003
|
+
- "age"
|
|
2004
|
+
- "longitude_head" (only if `is_segment_free=False`)
|
|
2005
|
+
- "latitude_head" (only if `is_segment_free=False`)
|
|
2006
|
+
- "longitude_tail" (only if `is_segment_free=False`)
|
|
2007
|
+
- "longitude_tail" (only if `is_segment_free=False`)
|
|
2008
|
+
- "segment_length" (only if `is_segment_free=False`)
|
|
2009
|
+
- "head_tail_dt" (only if `is_segment_free=False`)
|
|
2010
|
+
"""
|
|
2011
|
+
longitude = contrail["longitude"]
|
|
2012
|
+
latitude = contrail["latitude"]
|
|
2013
|
+
level = contrail["level"]
|
|
2014
|
+
time = contrail["time"]
|
|
2015
|
+
formation_time = contrail["formation_time"]
|
|
2016
|
+
age = contrail["age"]
|
|
2017
|
+
u_wind = contrail["eastward_wind"]
|
|
2018
|
+
v_wind = contrail["northward_wind"]
|
|
2019
|
+
vertical_velocity = contrail["lagrangian_tendency_of_air_pressure"]
|
|
2020
|
+
rho_air = contrail["rho_air"]
|
|
2021
|
+
terminal_fall_speed = contrail["terminal_fall_speed"]
|
|
2022
|
+
|
|
2023
|
+
# Using the _t2 convention for post-advection data
|
|
2024
|
+
index_t2 = contrail["index"]
|
|
2025
|
+
time_t2 = time + dt
|
|
2026
|
+
age_t2 = age + dt
|
|
2027
|
+
|
|
2028
|
+
longitude_t2, latitude_t2 = geo.advect_horizontal(
|
|
2029
|
+
longitude=longitude,
|
|
2030
|
+
latitude=latitude,
|
|
2031
|
+
u_wind=u_wind,
|
|
2032
|
+
v_wind=v_wind,
|
|
2033
|
+
dt=dt,
|
|
2034
|
+
)
|
|
2035
|
+
level_t2 = geo.advect_level(level, vertical_velocity, rho_air, terminal_fall_speed, dt)
|
|
2036
|
+
altitude_t2 = units.pl_to_m(level_t2)
|
|
2037
|
+
|
|
2038
|
+
data = {
|
|
2039
|
+
"index": index_t2,
|
|
2040
|
+
"longitude": longitude_t2,
|
|
2041
|
+
"latitude": latitude_t2,
|
|
2042
|
+
"level": level_t2,
|
|
2043
|
+
"air_pressure": 100.0 * level_t2,
|
|
2044
|
+
"altitude": altitude_t2,
|
|
2045
|
+
"time": time_t2,
|
|
2046
|
+
"formation_time": formation_time,
|
|
2047
|
+
"age": age_t2,
|
|
2048
|
+
**_get_uncertainty_params(contrail),
|
|
2049
|
+
}
|
|
2050
|
+
|
|
2051
|
+
if dt_tail is None or dt_head is None:
|
|
2052
|
+
assert _is_segment_free_mode(contrail)
|
|
2053
|
+
assert dt_tail is None
|
|
2054
|
+
assert dt_head is None
|
|
2055
|
+
return GeoVectorDataset._from_fastpath(data, attrs=contrail.attrs).copy()
|
|
2056
|
+
|
|
2057
|
+
longitude_head = contrail["longitude_head"]
|
|
2058
|
+
latitude_head = contrail["latitude_head"]
|
|
2059
|
+
longitude_tail = contrail["longitude_tail"]
|
|
2060
|
+
latitude_tail = contrail["latitude_tail"]
|
|
2061
|
+
u_wind_head = contrail["eastward_wind_head"]
|
|
2062
|
+
v_wind_head = contrail["northward_wind_head"]
|
|
2063
|
+
u_wind_tail = contrail["eastward_wind_tail"]
|
|
2064
|
+
v_wind_tail = contrail["northward_wind_tail"]
|
|
2065
|
+
|
|
2066
|
+
longitude_head_t2, latitude_head_t2 = geo.advect_horizontal(
|
|
2067
|
+
longitude=longitude_head,
|
|
2068
|
+
latitude=latitude_head,
|
|
2069
|
+
u_wind=u_wind_head,
|
|
2070
|
+
v_wind=v_wind_head,
|
|
2071
|
+
dt=dt_head,
|
|
2072
|
+
)
|
|
2073
|
+
longitude_tail_t2, latitude_tail_t2 = geo.advect_horizontal(
|
|
2074
|
+
longitude=longitude_tail,
|
|
2075
|
+
latitude=latitude_tail,
|
|
2076
|
+
u_wind=u_wind_tail,
|
|
2077
|
+
v_wind=v_wind_tail,
|
|
2078
|
+
dt=dt_tail,
|
|
2079
|
+
)
|
|
2080
|
+
|
|
2081
|
+
segment_length_t2 = geo.haversine(
|
|
2082
|
+
lons0=longitude_head_t2,
|
|
2083
|
+
lats0=latitude_head_t2,
|
|
2084
|
+
lons1=longitude_tail_t2,
|
|
2085
|
+
lats1=latitude_tail_t2,
|
|
2086
|
+
)
|
|
2087
|
+
|
|
2088
|
+
head_tail_dt_t2 = np.full(contrail.size, np.timedelta64(0, "ns")) # trivial
|
|
2089
|
+
|
|
2090
|
+
data["longitude_head"] = longitude_head_t2
|
|
2091
|
+
data["latitude_head"] = latitude_head_t2
|
|
2092
|
+
data["longitude_tail"] = longitude_tail_t2
|
|
2093
|
+
data["latitude_tail"] = latitude_tail_t2
|
|
2094
|
+
data["segment_length"] = segment_length_t2
|
|
2095
|
+
data["head_tail_dt"] = head_tail_dt_t2
|
|
2096
|
+
|
|
2097
|
+
return GeoVectorDataset._from_fastpath(data, attrs=contrail.attrs).copy()
|
|
2098
|
+
|
|
2099
|
+
|
|
2100
|
+
def _aggregate_ef_summary(vector_list: list[VectorDataset]) -> VectorDataset | None:
|
|
2101
|
+
"""Aggregate EF results after cocip simulation.
|
|
2102
|
+
|
|
2103
|
+
Results are summed over each vector in ``vector_list``.
|
|
2104
|
+
|
|
2105
|
+
If ``vector_list`` is empty, return None.
|
|
2106
|
+
|
|
2107
|
+
Parameters
|
|
2108
|
+
----------
|
|
2109
|
+
vector_list : list[VectorDataset]
|
|
2110
|
+
List of :class:`VectorDataset` objects each containing keys "index", "age", and "ef".
|
|
2111
|
+
|
|
2112
|
+
Returns
|
|
2113
|
+
-------
|
|
2114
|
+
VectorDataset | None
|
|
2115
|
+
Dataset with keys:
|
|
2116
|
+
- "index": Used to join to :attr:`CocipGrid.source`
|
|
2117
|
+
- "ef": Sum of ef values
|
|
2118
|
+
- "age": Contrail age associated to each index
|
|
2119
|
+
Only return points with non-zero ef or age.
|
|
2120
|
+
"""
|
|
2121
|
+
if not vector_list:
|
|
2122
|
+
return None
|
|
2123
|
+
|
|
2124
|
+
i0 = min(v["index"].min() for v in vector_list)
|
|
2125
|
+
i1 = max(v["index"].max() for v in vector_list)
|
|
2126
|
+
index = np.arange(i0, i1 + 1)
|
|
2127
|
+
|
|
2128
|
+
# Use the dtype of the first vector to determine the dtype of the aggregate
|
|
2129
|
+
v0 = vector_list[0]
|
|
2130
|
+
ef = np.zeros(index.shape, dtype=v0["ef"].dtype)
|
|
2131
|
+
age = np.zeros(index.shape, dtype=v0["age"].dtype)
|
|
2132
|
+
|
|
2133
|
+
for v in vector_list:
|
|
2134
|
+
idx = v["index"] - i0
|
|
2135
|
+
ef[idx] += v["ef"]
|
|
2136
|
+
age[idx] = np.maximum(age[idx], v["age"])
|
|
2137
|
+
|
|
2138
|
+
# Only return points with non-zero ef or age
|
|
2139
|
+
cond = age.astype(bool) | ef.astype(bool)
|
|
2140
|
+
index = index[cond].copy()
|
|
2141
|
+
ef = ef[cond].copy()
|
|
2142
|
+
age = age[cond].copy()
|
|
2143
|
+
|
|
2144
|
+
data = {"index": index, "ef": ef, "age": age}
|
|
2145
|
+
return VectorDataset(data, copy=False)
|
|
2146
|
+
|
|
2147
|
+
|
|
2148
|
+
def result_to_metdataset(
|
|
2149
|
+
result: VectorDataset | None,
|
|
2150
|
+
verbose_dict: dict[str, npt.NDArray[np.floating]],
|
|
2151
|
+
source: MetDataset,
|
|
2152
|
+
nominal_segment_length: float,
|
|
2153
|
+
attrs: dict[str, str],
|
|
2154
|
+
) -> MetDataset:
|
|
2155
|
+
"""Convert aggregated data in ``result`` to MetDataset.
|
|
2156
|
+
|
|
2157
|
+
Parameters
|
|
2158
|
+
----------
|
|
2159
|
+
result : VectorDataset | None
|
|
2160
|
+
Aggregated data arising from contrail evolution. Expected to contain keys:
|
|
2161
|
+
``index``, ``age``, ``ef``.
|
|
2162
|
+
verbose_dict : dict[str, npt.NDArray[np.floating]]
|
|
2163
|
+
Verbose outputs to attach to results.
|
|
2164
|
+
source : MetDataset
|
|
2165
|
+
:attr:`CocipGrid.`source` data on which to attach results.
|
|
2166
|
+
nominal_segment_length : float
|
|
2167
|
+
Used to normalize energy forcing cumulative sum.
|
|
2168
|
+
attrs : dict[str, str]
|
|
2169
|
+
Additional global attributes to attach to xr.Dataset.
|
|
2170
|
+
|
|
2171
|
+
Returns
|
|
2172
|
+
-------
|
|
2173
|
+
MetDataset
|
|
2174
|
+
Data with variables ``contrail_age``, ``ef_per_m``, and any other keys
|
|
2175
|
+
in ``verbose_dicts`.
|
|
2176
|
+
"""
|
|
2177
|
+
logger.debug("Desparsify grid results into 4D numpy array")
|
|
2178
|
+
|
|
2179
|
+
shape = tuple(value.size for value in source.coords.values())
|
|
2180
|
+
size = np.prod(shape)
|
|
2181
|
+
|
|
2182
|
+
dtype = result["ef"].dtype if result else np.float32
|
|
2183
|
+
contrail_age_1d = np.zeros(size, dtype=np.float32)
|
|
2184
|
+
ef_per_m_1d = np.zeros(size, dtype=dtype)
|
|
2185
|
+
|
|
2186
|
+
if result:
|
|
2187
|
+
contrail_idx = result["index"]
|
|
2188
|
+
# Step 1: Contrail age. Convert from timedelta to float
|
|
2189
|
+
contrail_age_1d[contrail_idx] = result["age"] / np.timedelta64(1, "h")
|
|
2190
|
+
# Step 2: EF
|
|
2191
|
+
ef_per_m_1d[contrail_idx] = result["ef"] / nominal_segment_length
|
|
2192
|
+
|
|
2193
|
+
contrail_age_4d = contrail_age_1d.reshape(shape)
|
|
2194
|
+
ef_per_m_4d = ef_per_m_1d.reshape(shape)
|
|
2195
|
+
|
|
2196
|
+
# Step 3: Dataset dims and attrs
|
|
2197
|
+
dims = tuple(source.coords)
|
|
2198
|
+
local_attrs = _contrail_grid_variable_attrs()
|
|
2199
|
+
|
|
2200
|
+
# Step 4: Dataset core variables
|
|
2201
|
+
data_vars = {
|
|
2202
|
+
"contrail_age": (dims, contrail_age_4d, local_attrs["contrail_age"]),
|
|
2203
|
+
"ef_per_m": (dims, ef_per_m_4d, local_attrs["ef_per_m"]),
|
|
2204
|
+
}
|
|
2205
|
+
|
|
2206
|
+
# Step 5: Dataset variables from verbose_dicts
|
|
2207
|
+
for k, v in verbose_dict.items():
|
|
2208
|
+
data_vars[k] = (dims, v.reshape(shape), local_attrs[k])
|
|
2209
|
+
|
|
2210
|
+
# Update source
|
|
2211
|
+
for k, v in data_vars.items(): # type: ignore[assignment]
|
|
2212
|
+
source[k] = v
|
|
2213
|
+
source.attrs.update(attrs)
|
|
2214
|
+
|
|
2215
|
+
# Return reference to source
|
|
2216
|
+
return source
|
|
2217
|
+
|
|
2218
|
+
|
|
2219
|
+
def result_merge_source(
|
|
2220
|
+
result: VectorDataset | None,
|
|
2221
|
+
verbose_dict: dict[str, npt.NDArray[np.floating]],
|
|
2222
|
+
source: GeoVectorDataset,
|
|
2223
|
+
nominal_segment_length: float | npt.NDArray[np.floating],
|
|
2224
|
+
attrs: dict[str, str],
|
|
2225
|
+
) -> GeoVectorDataset:
|
|
2226
|
+
"""Merge ``results`` and ``verbose_dict`` onto ``source``."""
|
|
2227
|
+
|
|
2228
|
+
# Initialize the main output arrays to all zeros
|
|
2229
|
+
age_dtype = result["age"].dtype if result else "timedelta64[ns]"
|
|
2230
|
+
contrail_age = np.zeros(source.size, dtype=age_dtype)
|
|
2231
|
+
|
|
2232
|
+
ef_dtype = result["ef"].dtype if result else np.float32
|
|
2233
|
+
ef_per_m = np.zeros(source.size, dtype=ef_dtype)
|
|
2234
|
+
|
|
2235
|
+
# If there are results, merge them in
|
|
2236
|
+
if result:
|
|
2237
|
+
index = result["index"]
|
|
2238
|
+
contrail_age[index] = result["age"]
|
|
2239
|
+
|
|
2240
|
+
if isinstance(nominal_segment_length, np.ndarray):
|
|
2241
|
+
ef_per_m[index] = result["ef"] / nominal_segment_length[index]
|
|
2242
|
+
else:
|
|
2243
|
+
ef_per_m[index] = result["ef"] / nominal_segment_length
|
|
2244
|
+
|
|
2245
|
+
# Set the output variables onto the source
|
|
2246
|
+
source["contrail_age"] = contrail_age
|
|
2247
|
+
source["ef_per_m"] = ef_per_m
|
|
2248
|
+
for k, v in verbose_dict.items():
|
|
2249
|
+
source.setdefault(k, v)
|
|
2250
|
+
source.attrs.update(attrs)
|
|
2251
|
+
|
|
2252
|
+
return source
|
|
2253
|
+
|
|
2254
|
+
|
|
2255
|
+
def _concat_verbose_dicts(
|
|
2256
|
+
verbose_dicts: list[dict[str, pd.Series]],
|
|
2257
|
+
source_size: int,
|
|
2258
|
+
verbose_outputs_formation: set[str],
|
|
2259
|
+
) -> dict[str, npt.NDArray[np.floating]]:
|
|
2260
|
+
# Concatenate the values and return
|
|
2261
|
+
ret: dict[str, np.ndarray] = {}
|
|
2262
|
+
for key in verbose_outputs_formation:
|
|
2263
|
+
series_list = [v for d in verbose_dicts if d and (v := d.get(key)) is not None]
|
|
2264
|
+
data = np.concatenate(series_list)
|
|
2265
|
+
index = np.concatenate([s.index for s in series_list])
|
|
2266
|
+
|
|
2267
|
+
# Reindex to source_size. Assuming all verbose_dicts have float dtype
|
|
2268
|
+
out = np.full(source_size, np.nan, dtype=data.dtype)
|
|
2269
|
+
out[index] = data
|
|
2270
|
+
ret[key] = out
|
|
2271
|
+
|
|
2272
|
+
return ret
|
|
2273
|
+
|
|
2274
|
+
|
|
2275
|
+
def _contrail_grid_variable_attrs() -> dict[str, dict[str, str]]:
|
|
2276
|
+
"""Get attributes for each variables in :class:`CocipGrid` gridded output.
|
|
2277
|
+
|
|
2278
|
+
TODO: It might be better for these to live elsewhere (ie, in some `variables.py`).
|
|
2279
|
+
"""
|
|
2280
|
+
return {
|
|
2281
|
+
"contrail_age": {
|
|
2282
|
+
"long_name": "Total age in hours of persistent contrail",
|
|
2283
|
+
"units": "hours",
|
|
2284
|
+
},
|
|
2285
|
+
"ef_per_m": {
|
|
2286
|
+
"long_name": "Energy forcing per meter of flight trajectory",
|
|
2287
|
+
"units": "J / m",
|
|
2288
|
+
},
|
|
2289
|
+
"sac": {"long_name": "Schmidt-Appleman Criterion"},
|
|
2290
|
+
"persistent": {"long_name": "Contrail initially persistent state"},
|
|
2291
|
+
"T_crit_sac": {
|
|
2292
|
+
"long_name": "Schmidt-Appleman critical temperature threshold",
|
|
2293
|
+
"units": "K",
|
|
2294
|
+
},
|
|
2295
|
+
"engine_efficiency": {"long_name": "Engine efficiency"},
|
|
2296
|
+
"true_airspeed": {"long_name": "True airspeed", "units": "m / s"},
|
|
2297
|
+
"aircraft_mass": {"long_name": "Aircraft mass", "units": "kg"},
|
|
2298
|
+
"nvpm_ei_n": {
|
|
2299
|
+
"long_name": "Black carbon emissions index number",
|
|
2300
|
+
"units": "kg^{-1}",
|
|
2301
|
+
},
|
|
2302
|
+
"fuel_flow": {"long_name": "Jet engine fuel flow", "units": "kg / s"},
|
|
2303
|
+
"specific_humidity": {"long_name": "Specific humidity", "units": "kg / kg"},
|
|
2304
|
+
"air_temperature": {"long_name": "Air temperature", "units": "K"},
|
|
2305
|
+
"rhi": {"long_name": "Relative humidity", "units": "dimensionless"},
|
|
2306
|
+
"iwc": {
|
|
2307
|
+
"long_name": "Ice water content after the wake vortex phase",
|
|
2308
|
+
"units": "kg_h2o / kg_air",
|
|
2309
|
+
},
|
|
2310
|
+
"global_yearly_mean_rf_per_m": {
|
|
2311
|
+
"long_name": "Global yearly mean RF per meter of flight trajectory",
|
|
2312
|
+
"units": "W / m**2 / m",
|
|
2313
|
+
},
|
|
2314
|
+
"atr20_per_m": {
|
|
2315
|
+
"long_name": "Average Temperature Response over a 20 year horizon",
|
|
2316
|
+
"units": "K / m",
|
|
2317
|
+
},
|
|
2318
|
+
}
|
|
2319
|
+
|
|
2320
|
+
|
|
2321
|
+
def _supported_verbose_outputs_formation() -> set[str]:
|
|
2322
|
+
"""Get supported keys for verbose outputs.
|
|
2323
|
+
|
|
2324
|
+
Uses output of :func:`_contrail_grid_variable_attrs` as a source of truth.
|
|
2325
|
+
"""
|
|
2326
|
+
return set(_contrail_grid_variable_attrs()) - {
|
|
2327
|
+
"contrail_age",
|
|
2328
|
+
"ef_per_m",
|
|
2329
|
+
"global_yearly_mean_rf_per_m",
|
|
2330
|
+
"atr20_per_m",
|
|
2331
|
+
}
|
|
2332
|
+
|
|
2333
|
+
|
|
2334
|
+
def _warn_not_wrap(met: MetDataset) -> None:
|
|
2335
|
+
"""Warn user if parameter met should be wrapped.
|
|
2336
|
+
|
|
2337
|
+
Parameters
|
|
2338
|
+
----------
|
|
2339
|
+
met : MetDataset
|
|
2340
|
+
Met dataset
|
|
2341
|
+
"""
|
|
2342
|
+
if met.is_wrapped:
|
|
2343
|
+
return
|
|
2344
|
+
lon = met.indexes["longitude"]
|
|
2345
|
+
if lon.min() == -180.0 and lon.max() == 179.75:
|
|
2346
|
+
warnings.warn(
|
|
2347
|
+
"The MetDataset `met` not been wrapped. The CocipGrid model may "
|
|
2348
|
+
"perform better if `met.wrap_longitude()` is called first."
|
|
2349
|
+
)
|
|
2350
|
+
|
|
2351
|
+
|
|
2352
|
+
def _get_uncertainty_params(contrail: VectorDataset) -> dict[str, npt.NDArray[np.floating]]:
|
|
2353
|
+
"""Return uncertainty parameters in ``contrail``.
|
|
2354
|
+
|
|
2355
|
+
This function assumes the underlying humidity scaling model is
|
|
2356
|
+
:class:`ConstantHumidityScaling`. This function should get revised if other
|
|
2357
|
+
humidity scaling models are used for uncertainty analysis.
|
|
2358
|
+
|
|
2359
|
+
For each of the keys:
|
|
2360
|
+
- "rhi_adj",
|
|
2361
|
+
- "rhi_boost_exponent",
|
|
2362
|
+
- "sedimentation_impact_factor",
|
|
2363
|
+
- "wind_shear_enhancement_exponent",
|
|
2364
|
+
|
|
2365
|
+
this function checks if key is present in contrail. The data is then
|
|
2366
|
+
bundled and returned as a dictionary.
|
|
2367
|
+
|
|
2368
|
+
Parameters
|
|
2369
|
+
----------
|
|
2370
|
+
contrail : VectorDataset
|
|
2371
|
+
Data from which uncertainty parameters are extracted
|
|
2372
|
+
|
|
2373
|
+
Returns
|
|
2374
|
+
-------
|
|
2375
|
+
dict[str, npt.NDArray[np.floating]]
|
|
2376
|
+
Dictionary of uncertainty parameters.
|
|
2377
|
+
"""
|
|
2378
|
+
keys = (
|
|
2379
|
+
"rhi_adj",
|
|
2380
|
+
"rhi_boost_exponent",
|
|
2381
|
+
"sedimentation_impact_factor",
|
|
2382
|
+
"wind_shear_enhancement_exponent",
|
|
2383
|
+
)
|
|
2384
|
+
return {key: val for key in keys if (val := contrail.get(key)) is not None}
|
|
2385
|
+
|
|
2386
|
+
|
|
2387
|
+
_T = TypeVar("_T", np.float64, np.datetime64)
|
|
2388
|
+
|
|
2389
|
+
|
|
2390
|
+
def _check_coverage(
|
|
2391
|
+
met_array: npt.NDArray[_T], grid_array: npt.NDArray[_T], coord: str, name: str
|
|
2392
|
+
) -> None:
|
|
2393
|
+
"""Warn if the met data does not cover the entire source domain.
|
|
2394
|
+
|
|
2395
|
+
Parameters
|
|
2396
|
+
----------
|
|
2397
|
+
met_array : npt.NDArray[_T]
|
|
2398
|
+
Coordinate on met data
|
|
2399
|
+
grid_array : npt.NDArray[_T]
|
|
2400
|
+
Coordinate on grid data
|
|
2401
|
+
coord : str
|
|
2402
|
+
Name of coordinate. Only used for warning message.
|
|
2403
|
+
name : str
|
|
2404
|
+
Name of met dataset. Only used for warning message.
|
|
2405
|
+
"""
|
|
2406
|
+
if met_array.min() > grid_array.min() or met_array.max() < grid_array.max():
|
|
2407
|
+
warnings.warn(
|
|
2408
|
+
f"Met data '{name}' does not cover the source domain along the {coord} axis. "
|
|
2409
|
+
"This causes some interpolated values to be nan, leading to meaningless results."
|
|
2410
|
+
)
|
|
2411
|
+
|
|
2412
|
+
|
|
2413
|
+
def _downselect_met(
|
|
2414
|
+
source: GeoVectorDataset | MetDataset, met: MetDataset, rad: MetDataset, params: dict[str, Any]
|
|
2415
|
+
) -> tuple[MetDataset, MetDataset]:
|
|
2416
|
+
"""Downselect met and rad to the bounding box of the source.
|
|
2417
|
+
|
|
2418
|
+
Implementation is nearly identical to the :meth:`Model.downselect_met` method. The
|
|
2419
|
+
key difference is that this method uses the "max_age" and "dt_integration" parameters
|
|
2420
|
+
to further buffer the bounding box in the time dimension.
|
|
2421
|
+
|
|
2422
|
+
.. versionchanged:: 0.25.0
|
|
2423
|
+
|
|
2424
|
+
Support :class:`MetDataset` ``source`` for use in :class:`CocipGrid`.
|
|
2425
|
+
|
|
2426
|
+
Parameters
|
|
2427
|
+
----------
|
|
2428
|
+
source : GeoVectorDataset | MetDataset
|
|
2429
|
+
Model source
|
|
2430
|
+
met : MetDataset
|
|
2431
|
+
Model met
|
|
2432
|
+
rad : MetDataset
|
|
2433
|
+
Model rad
|
|
2434
|
+
params : dict[str, Any]
|
|
2435
|
+
Model parameters
|
|
2436
|
+
|
|
2437
|
+
Returns
|
|
2438
|
+
-------
|
|
2439
|
+
met : MetDataset
|
|
2440
|
+
MetDataset with met data copied within the bounding box of ``source``.
|
|
2441
|
+
rad : MetDataset
|
|
2442
|
+
MetDataset with rad data copied within the bounding box of ``source``.
|
|
2443
|
+
|
|
2444
|
+
See Also
|
|
2445
|
+
--------
|
|
2446
|
+
:meth:`Model.downselect_met`
|
|
2447
|
+
"""
|
|
2448
|
+
|
|
2449
|
+
if not params["downselect_met"]:
|
|
2450
|
+
logger.debug("Avoiding downselecting met because params['downselect_met'] is False")
|
|
2451
|
+
return met, rad
|
|
2452
|
+
|
|
2453
|
+
logger.debug("Downselecting met domain to vector points")
|
|
2454
|
+
|
|
2455
|
+
# check params
|
|
2456
|
+
longitude_buffer = params["met_longitude_buffer"]
|
|
2457
|
+
latitude_buffer = params["met_latitude_buffer"]
|
|
2458
|
+
level_buffer = params["met_level_buffer"]
|
|
2459
|
+
time_buffer = params["met_time_buffer"]
|
|
2460
|
+
|
|
2461
|
+
# Down select met relative to min / max integration timesteps, not Flight
|
|
2462
|
+
t0 = time_buffer[0]
|
|
2463
|
+
t1 = time_buffer[1] + params["max_age"] + params["dt_integration"]
|
|
2464
|
+
|
|
2465
|
+
met = source.downselect_met(
|
|
2466
|
+
met,
|
|
2467
|
+
latitude_buffer=latitude_buffer,
|
|
2468
|
+
longitude_buffer=longitude_buffer,
|
|
2469
|
+
level_buffer=level_buffer,
|
|
2470
|
+
time_buffer=(t0, t1),
|
|
2471
|
+
)
|
|
2472
|
+
|
|
2473
|
+
rad = source.downselect_met(
|
|
2474
|
+
rad,
|
|
2475
|
+
latitude_buffer=latitude_buffer,
|
|
2476
|
+
longitude_buffer=longitude_buffer,
|
|
2477
|
+
time_buffer=(t0, t1),
|
|
2478
|
+
)
|
|
2479
|
+
|
|
2480
|
+
return met, rad
|
|
2481
|
+
|
|
2482
|
+
|
|
2483
|
+
def _is_segment_free_mode(vector: GeoVectorDataset) -> bool:
|
|
2484
|
+
"""Determine if model is run in a segment-free mode."""
|
|
2485
|
+
return "longitude_head" not in vector
|
|
2486
|
+
|
|
2487
|
+
|
|
2488
|
+
def _check_met_rad_time(
|
|
2489
|
+
met: MetDataset,
|
|
2490
|
+
rad: MetDataset,
|
|
2491
|
+
tmin: pd.Timestamp,
|
|
2492
|
+
tmax: pd.Timestamp,
|
|
2493
|
+
) -> None:
|
|
2494
|
+
"""Warn if meteorology data doesn't cover a required time range.
|
|
2495
|
+
|
|
2496
|
+
Parameters
|
|
2497
|
+
----------
|
|
2498
|
+
met : MetDataset
|
|
2499
|
+
Meteorology dataset
|
|
2500
|
+
rad : MetDataset
|
|
2501
|
+
Radiative flux dataset
|
|
2502
|
+
tmin: pd.Timestamp
|
|
2503
|
+
Start of required time range
|
|
2504
|
+
tmax:pd.Timestamp
|
|
2505
|
+
End of required time range
|
|
2506
|
+
"""
|
|
2507
|
+
met_time = met.data["time"].values
|
|
2508
|
+
met_tmin = pd.to_datetime(met_time.min())
|
|
2509
|
+
met_tmax = pd.to_datetime(met_time.max())
|
|
2510
|
+
_check_start_time(met_tmin, tmin, "met")
|
|
2511
|
+
_check_end_time(met_tmax, tmax, "met")
|
|
2512
|
+
|
|
2513
|
+
rad_time = rad.data["time"].values
|
|
2514
|
+
rad_tmin = pd.to_datetime(rad_time.min())
|
|
2515
|
+
rad_tmax = pd.to_datetime(rad_time.max())
|
|
2516
|
+
note = "differencing reduces time coverage when providing accumulated radiative fluxes."
|
|
2517
|
+
_check_start_time(rad_tmin, tmin, "rad", note=note)
|
|
2518
|
+
_check_end_time(rad_tmax, tmax, "rad", note=note)
|
|
2519
|
+
|
|
2520
|
+
|
|
2521
|
+
def _check_start_time(
|
|
2522
|
+
met_start: pd.Timestamp,
|
|
2523
|
+
model_start: pd.Timestamp,
|
|
2524
|
+
name: str,
|
|
2525
|
+
*,
|
|
2526
|
+
note: str | None = None,
|
|
2527
|
+
) -> None:
|
|
2528
|
+
if met_start > model_start:
|
|
2529
|
+
note = f" Note: {note}" if note else ""
|
|
2530
|
+
warnings.warn(
|
|
2531
|
+
f"Start time of parameter '{name}' ({met_start}) "
|
|
2532
|
+
f"is after model start time ({model_start}). "
|
|
2533
|
+
f"Include additional time at the start of '{name}'."
|
|
2534
|
+
f"{note}"
|
|
2535
|
+
)
|
|
2536
|
+
|
|
2537
|
+
|
|
2538
|
+
def _check_end_time(
|
|
2539
|
+
met_end: pd.Timestamp,
|
|
2540
|
+
model_end: pd.Timestamp,
|
|
2541
|
+
name: str,
|
|
2542
|
+
*,
|
|
2543
|
+
note: str | None = None,
|
|
2544
|
+
) -> None:
|
|
2545
|
+
if met_end < model_end:
|
|
2546
|
+
note = f" Note: {note}" if note else ""
|
|
2547
|
+
warnings.warn(
|
|
2548
|
+
f"End time of parameter '{name}' ({met_end}) "
|
|
2549
|
+
f"is before model end time ({model_end}). "
|
|
2550
|
+
f"Include additional time at the end of '{name}' or reduce 'max_age' parameter."
|
|
2551
|
+
f"{note}"
|
|
2552
|
+
)
|