cloudnetpy 1.49.9__py3-none-any.whl → 1.87.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cloudnetpy/categorize/__init__.py +1 -2
- cloudnetpy/categorize/atmos_utils.py +297 -67
- cloudnetpy/categorize/attenuation.py +31 -0
- cloudnetpy/categorize/attenuations/__init__.py +37 -0
- cloudnetpy/categorize/attenuations/gas_attenuation.py +30 -0
- cloudnetpy/categorize/attenuations/liquid_attenuation.py +84 -0
- cloudnetpy/categorize/attenuations/melting_attenuation.py +78 -0
- cloudnetpy/categorize/attenuations/rain_attenuation.py +84 -0
- cloudnetpy/categorize/categorize.py +332 -156
- cloudnetpy/categorize/classify.py +127 -125
- cloudnetpy/categorize/containers.py +107 -76
- cloudnetpy/categorize/disdrometer.py +40 -0
- cloudnetpy/categorize/droplet.py +23 -21
- cloudnetpy/categorize/falling.py +53 -24
- cloudnetpy/categorize/freezing.py +25 -12
- cloudnetpy/categorize/insects.py +35 -23
- cloudnetpy/categorize/itu.py +243 -0
- cloudnetpy/categorize/lidar.py +36 -41
- cloudnetpy/categorize/melting.py +34 -26
- cloudnetpy/categorize/model.py +84 -37
- cloudnetpy/categorize/mwr.py +18 -14
- cloudnetpy/categorize/radar.py +215 -102
- cloudnetpy/cli.py +578 -0
- cloudnetpy/cloudnetarray.py +43 -89
- cloudnetpy/concat_lib.py +218 -78
- cloudnetpy/constants.py +28 -10
- cloudnetpy/datasource.py +61 -86
- cloudnetpy/exceptions.py +49 -20
- cloudnetpy/instruments/__init__.py +5 -0
- cloudnetpy/instruments/basta.py +29 -12
- cloudnetpy/instruments/bowtie.py +135 -0
- cloudnetpy/instruments/ceilo.py +138 -115
- cloudnetpy/instruments/ceilometer.py +164 -80
- cloudnetpy/instruments/cl61d.py +21 -5
- cloudnetpy/instruments/cloudnet_instrument.py +74 -36
- cloudnetpy/instruments/copernicus.py +108 -30
- cloudnetpy/instruments/da10.py +54 -0
- cloudnetpy/instruments/disdrometer/common.py +126 -223
- cloudnetpy/instruments/disdrometer/parsivel.py +453 -94
- cloudnetpy/instruments/disdrometer/thies.py +254 -87
- cloudnetpy/instruments/fd12p.py +201 -0
- cloudnetpy/instruments/galileo.py +65 -23
- cloudnetpy/instruments/hatpro.py +123 -49
- cloudnetpy/instruments/instruments.py +113 -1
- cloudnetpy/instruments/lufft.py +39 -17
- cloudnetpy/instruments/mira.py +268 -61
- cloudnetpy/instruments/mrr.py +187 -0
- cloudnetpy/instruments/nc_lidar.py +19 -8
- cloudnetpy/instruments/nc_radar.py +109 -55
- cloudnetpy/instruments/pollyxt.py +135 -51
- cloudnetpy/instruments/radiometrics.py +313 -59
- cloudnetpy/instruments/rain_e_h3.py +171 -0
- cloudnetpy/instruments/rpg.py +321 -189
- cloudnetpy/instruments/rpg_reader.py +74 -40
- cloudnetpy/instruments/toa5.py +49 -0
- cloudnetpy/instruments/vaisala.py +95 -343
- cloudnetpy/instruments/weather_station.py +774 -105
- cloudnetpy/metadata.py +90 -19
- cloudnetpy/model_evaluation/file_handler.py +55 -52
- cloudnetpy/model_evaluation/metadata.py +46 -20
- cloudnetpy/model_evaluation/model_metadata.py +1 -1
- cloudnetpy/model_evaluation/plotting/plot_tools.py +32 -37
- cloudnetpy/model_evaluation/plotting/plotting.py +327 -117
- cloudnetpy/model_evaluation/products/advance_methods.py +92 -83
- cloudnetpy/model_evaluation/products/grid_methods.py +88 -63
- cloudnetpy/model_evaluation/products/model_products.py +43 -35
- cloudnetpy/model_evaluation/products/observation_products.py +41 -35
- cloudnetpy/model_evaluation/products/product_resampling.py +17 -7
- cloudnetpy/model_evaluation/products/tools.py +29 -20
- cloudnetpy/model_evaluation/statistics/statistical_methods.py +30 -20
- cloudnetpy/model_evaluation/tests/e2e/conftest.py +3 -3
- cloudnetpy/model_evaluation/tests/e2e/process_cf/main.py +9 -5
- cloudnetpy/model_evaluation/tests/e2e/process_cf/tests.py +15 -14
- cloudnetpy/model_evaluation/tests/e2e/process_iwc/main.py +9 -5
- cloudnetpy/model_evaluation/tests/e2e/process_iwc/tests.py +15 -14
- cloudnetpy/model_evaluation/tests/e2e/process_lwc/main.py +9 -5
- cloudnetpy/model_evaluation/tests/e2e/process_lwc/tests.py +15 -14
- cloudnetpy/model_evaluation/tests/unit/conftest.py +42 -41
- cloudnetpy/model_evaluation/tests/unit/test_advance_methods.py +41 -48
- cloudnetpy/model_evaluation/tests/unit/test_grid_methods.py +216 -194
- cloudnetpy/model_evaluation/tests/unit/test_model_products.py +23 -21
- cloudnetpy/model_evaluation/tests/unit/test_observation_products.py +37 -38
- cloudnetpy/model_evaluation/tests/unit/test_plot_tools.py +43 -40
- cloudnetpy/model_evaluation/tests/unit/test_plotting.py +30 -36
- cloudnetpy/model_evaluation/tests/unit/test_statistical_methods.py +68 -31
- cloudnetpy/model_evaluation/tests/unit/test_tools.py +33 -26
- cloudnetpy/model_evaluation/utils.py +2 -1
- cloudnetpy/output.py +170 -111
- cloudnetpy/plotting/__init__.py +2 -1
- cloudnetpy/plotting/plot_meta.py +562 -822
- cloudnetpy/plotting/plotting.py +1142 -704
- cloudnetpy/products/__init__.py +1 -0
- cloudnetpy/products/classification.py +370 -88
- cloudnetpy/products/der.py +85 -55
- cloudnetpy/products/drizzle.py +77 -34
- cloudnetpy/products/drizzle_error.py +15 -11
- cloudnetpy/products/drizzle_tools.py +79 -59
- cloudnetpy/products/epsilon.py +211 -0
- cloudnetpy/products/ier.py +27 -50
- cloudnetpy/products/iwc.py +55 -48
- cloudnetpy/products/lwc.py +96 -70
- cloudnetpy/products/mwr_tools.py +186 -0
- cloudnetpy/products/product_tools.py +170 -128
- cloudnetpy/utils.py +455 -240
- cloudnetpy/version.py +2 -2
- {cloudnetpy-1.49.9.dist-info → cloudnetpy-1.87.3.dist-info}/METADATA +44 -40
- cloudnetpy-1.87.3.dist-info/RECORD +127 -0
- {cloudnetpy-1.49.9.dist-info → cloudnetpy-1.87.3.dist-info}/WHEEL +1 -1
- cloudnetpy-1.87.3.dist-info/entry_points.txt +2 -0
- docs/source/conf.py +2 -2
- cloudnetpy/categorize/atmos.py +0 -361
- cloudnetpy/products/mwr_multi.py +0 -68
- cloudnetpy/products/mwr_single.py +0 -75
- cloudnetpy-1.49.9.dist-info/RECORD +0 -112
- {cloudnetpy-1.49.9.dist-info → cloudnetpy-1.87.3.dist-info/licenses}/LICENSE +0 -0
- {cloudnetpy-1.49.9.dist-info → cloudnetpy-1.87.3.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import numpy as np
|
|
2
|
+
import numpy.typing as npt
|
|
2
3
|
from numpy import ma
|
|
3
4
|
|
|
4
5
|
from cloudnetpy import utils
|
|
@@ -9,7 +10,8 @@ MU_ERROR_SMALL = 0.25
|
|
|
9
10
|
|
|
10
11
|
|
|
11
12
|
def get_drizzle_error(
|
|
12
|
-
categorize: DrizzleSource,
|
|
13
|
+
categorize: DrizzleSource,
|
|
14
|
+
drizzle_parameters: DrizzleSolver,
|
|
13
15
|
) -> dict:
|
|
14
16
|
"""Estimates error and bias for drizzle classification.
|
|
15
17
|
|
|
@@ -29,11 +31,10 @@ def get_drizzle_error(
|
|
|
29
31
|
z_error = np.full(categorize.z.shape, z_error)
|
|
30
32
|
error_input = z_error, bias_error
|
|
31
33
|
bias_input = _read_input_uncertainty(categorize, "bias")
|
|
32
|
-
|
|
33
|
-
return errors
|
|
34
|
+
return _calc_errors(drizzle_indices, error_input, bias_input)
|
|
34
35
|
|
|
35
36
|
|
|
36
|
-
def _get_drizzle_indices(diameter:
|
|
37
|
+
def _get_drizzle_indices(diameter: npt.NDArray) -> dict:
|
|
37
38
|
return {
|
|
38
39
|
"drizzle": diameter > 0,
|
|
39
40
|
"small": np.logical_and(diameter <= 1e-4, diameter > 1e-5),
|
|
@@ -100,7 +101,9 @@ def _calc_parameter_biases(bias_input: tuple) -> dict:
|
|
|
100
101
|
|
|
101
102
|
|
|
102
103
|
def _add_supplementary_errors(
|
|
103
|
-
results: dict,
|
|
104
|
+
results: dict,
|
|
105
|
+
drizzle_indices: dict,
|
|
106
|
+
error_input: tuple,
|
|
104
107
|
) -> dict:
|
|
105
108
|
def _calc_n_error() -> ma.MaskedArray:
|
|
106
109
|
z_error = error_input[0]
|
|
@@ -139,6 +142,7 @@ def _calc_error(
|
|
|
139
142
|
scale: float,
|
|
140
143
|
weights: tuple,
|
|
141
144
|
error_input: tuple,
|
|
145
|
+
*,
|
|
142
146
|
add_mu: bool = False,
|
|
143
147
|
add_mu_small: bool = False,
|
|
144
148
|
) -> ma.MaskedArray:
|
|
@@ -151,12 +155,12 @@ def _calc_error(
|
|
|
151
155
|
|
|
152
156
|
|
|
153
157
|
def _stack_errors(
|
|
154
|
-
error_in:
|
|
158
|
+
error_in: npt.NDArray,
|
|
155
159
|
drizzle_indices: dict,
|
|
156
|
-
error_small=None,
|
|
157
|
-
error_tiny=None,
|
|
160
|
+
error_small: npt.NDArray | None = None,
|
|
161
|
+
error_tiny: npt.NDArray | None = None,
|
|
158
162
|
) -> ma.MaskedArray:
|
|
159
|
-
def _add_error_component(source:
|
|
163
|
+
def _add_error_component(source: npt.NDArray, ind: tuple) -> None:
|
|
160
164
|
error[ind] = source[ind]
|
|
161
165
|
|
|
162
166
|
error = ma.zeros(error_in.shape)
|
|
@@ -171,14 +175,14 @@ def _stack_errors(
|
|
|
171
175
|
COR = 10 / np.log(10)
|
|
172
176
|
|
|
173
177
|
|
|
174
|
-
def db2lin(x_in:
|
|
178
|
+
def db2lin(x_in: npt.NDArray) -> ma.MaskedArray:
|
|
175
179
|
x = ma.copy(x_in)
|
|
176
180
|
threshold = 100
|
|
177
181
|
x[x > threshold] = threshold
|
|
178
182
|
return ma.exp(x / COR) - 1
|
|
179
183
|
|
|
180
184
|
|
|
181
|
-
def lin2db(x_in) -> ma.MaskedArray:
|
|
185
|
+
def lin2db(x_in: npt.NDArray) -> ma.MaskedArray:
|
|
182
186
|
x = ma.copy(x_in)
|
|
183
187
|
threshold = -0.9
|
|
184
188
|
x[x < threshold] = threshold
|
|
@@ -1,9 +1,12 @@
|
|
|
1
1
|
import logging
|
|
2
2
|
import os
|
|
3
3
|
from bisect import bisect_left
|
|
4
|
+
from os import PathLike
|
|
4
5
|
|
|
5
6
|
import netCDF4
|
|
6
7
|
import numpy as np
|
|
8
|
+
import numpy.typing as npt
|
|
9
|
+
from numpy import ma
|
|
7
10
|
from scipy.special import gamma
|
|
8
11
|
|
|
9
12
|
from cloudnetpy import utils
|
|
@@ -20,28 +23,27 @@ class DrizzleSource(DataSource):
|
|
|
20
23
|
|
|
21
24
|
Attributes:
|
|
22
25
|
mie (dict): Mie look-up table data.
|
|
23
|
-
dheight (float): Median difference of height array.
|
|
24
26
|
z (ndarray): 2D radar echo (linear units).
|
|
25
27
|
beta (ndarray): 2D lidar backscatter.
|
|
26
28
|
v (ndarray): 2D doppler velocity.
|
|
27
29
|
|
|
28
30
|
"""
|
|
29
31
|
|
|
30
|
-
def __init__(self, categorize_file: str):
|
|
32
|
+
def __init__(self, categorize_file: str | PathLike) -> None:
|
|
31
33
|
super().__init__(categorize_file)
|
|
32
34
|
self.mie = self._read_mie_lut()
|
|
33
|
-
self.dheight = utils.mdiff(self.getvar("height"))
|
|
34
35
|
self.z = self._convert_z_units()
|
|
35
36
|
self.beta = self.getvar("beta")
|
|
36
37
|
self.v = self.getvar("v")
|
|
38
|
+
self.height_agl: npt.NDArray
|
|
37
39
|
|
|
38
|
-
def _convert_z_units(self):
|
|
40
|
+
def _convert_z_units(self) -> npt.NDArray:
|
|
39
41
|
"""Converts reflectivity factor to SI units."""
|
|
40
42
|
z = self.getvar("Z") - 180
|
|
41
43
|
z[z > 0.0] = 0.0
|
|
42
44
|
return utils.db2lin(z)
|
|
43
45
|
|
|
44
|
-
def _read_mie_lut(self):
|
|
46
|
+
def _read_mie_lut(self) -> dict:
|
|
45
47
|
"""Reads mie scattering look-up table."""
|
|
46
48
|
mie_file = self._get_mie_file()
|
|
47
49
|
with netCDF4.Dataset(mie_file) as nc:
|
|
@@ -59,20 +61,25 @@ class DrizzleSource(DataSource):
|
|
|
59
61
|
"width": mie[f"lu_width_{band}"][:],
|
|
60
62
|
"ray": mie[f"lu_mie_ray_{band}"][:],
|
|
61
63
|
"v": mie[f"lu_v_{band}"][:],
|
|
62
|
-
}
|
|
64
|
+
},
|
|
63
65
|
)
|
|
64
66
|
return lut
|
|
65
67
|
|
|
66
68
|
@staticmethod
|
|
67
|
-
def _get_mie_file():
|
|
69
|
+
def _get_mie_file() -> str:
|
|
68
70
|
module_path = os.path.dirname(os.path.abspath(__file__))
|
|
69
|
-
return "/
|
|
71
|
+
return f"{module_path}/mie_lu_tables.nc"
|
|
70
72
|
|
|
71
|
-
def _get_wl_band(self):
|
|
73
|
+
def _get_wl_band(self) -> str:
|
|
72
74
|
"""Returns string corresponding the radar frequency."""
|
|
73
75
|
radar_frequency = float(self.getvar("radar_frequency"))
|
|
74
76
|
wl_band = utils.get_wl_band(radar_frequency)
|
|
75
|
-
|
|
77
|
+
if wl_band == "Ka":
|
|
78
|
+
return "35"
|
|
79
|
+
if wl_band == "W":
|
|
80
|
+
return "94"
|
|
81
|
+
msg = f"Unsupported band: {wl_band}"
|
|
82
|
+
raise ValueError(msg)
|
|
76
83
|
|
|
77
84
|
|
|
78
85
|
class DrizzleClassification(ProductClassification):
|
|
@@ -91,7 +98,7 @@ class DrizzleClassification(ProductClassification):
|
|
|
91
98
|
|
|
92
99
|
"""
|
|
93
100
|
|
|
94
|
-
def __init__(self, categorize_file: str):
|
|
101
|
+
def __init__(self, categorize_file: str | PathLike) -> None:
|
|
95
102
|
super().__init__(categorize_file)
|
|
96
103
|
self.is_v_sigma = self._find_v_sigma(categorize_file)
|
|
97
104
|
self.warm_liquid = self._find_warm_liquid()
|
|
@@ -100,43 +107,44 @@ class DrizzleClassification(ProductClassification):
|
|
|
100
107
|
self.cold_rain = self._find_cold_rain()
|
|
101
108
|
|
|
102
109
|
@staticmethod
|
|
103
|
-
def _find_v_sigma(cat_file: str):
|
|
104
|
-
v_sigma = product_tools.
|
|
110
|
+
def _find_v_sigma(cat_file: str | PathLike) -> npt.NDArray:
|
|
111
|
+
v_sigma = product_tools.read_nc_field(cat_file, "v_sigma")
|
|
105
112
|
return np.isfinite(v_sigma)
|
|
106
113
|
|
|
107
|
-
def _find_warm_liquid(self):
|
|
108
|
-
return self.category_bits
|
|
114
|
+
def _find_warm_liquid(self) -> npt.NDArray:
|
|
115
|
+
return self.category_bits.droplet & ~self.category_bits.freezing
|
|
109
116
|
|
|
110
|
-
def _find_drizzle(self):
|
|
117
|
+
def _find_drizzle(self) -> npt.NDArray:
|
|
111
118
|
return (
|
|
112
119
|
~utils.transpose(self.is_rain)
|
|
113
|
-
& self.category_bits
|
|
114
|
-
& ~self.category_bits
|
|
115
|
-
& ~self.category_bits
|
|
116
|
-
& ~self.category_bits
|
|
117
|
-
& ~self.category_bits
|
|
118
|
-
& self.quality_bits
|
|
119
|
-
& self.quality_bits
|
|
120
|
-
& ~self.quality_bits
|
|
121
|
-
& ~self.quality_bits
|
|
122
|
-
& ~self.quality_bits
|
|
120
|
+
& self.category_bits.falling
|
|
121
|
+
& ~self.category_bits.droplet
|
|
122
|
+
& ~self.category_bits.freezing
|
|
123
|
+
& ~self.category_bits.melting
|
|
124
|
+
& ~self.category_bits.insect
|
|
125
|
+
& self.quality_bits.radar
|
|
126
|
+
& self.quality_bits.lidar
|
|
127
|
+
& ~self.quality_bits.clutter
|
|
128
|
+
& ~self.quality_bits.molecular
|
|
129
|
+
& ~self.quality_bits.attenuated_liquid
|
|
130
|
+
& ~self.quality_bits.attenuated_rain
|
|
123
131
|
& self.is_v_sigma
|
|
124
132
|
)
|
|
125
133
|
|
|
126
|
-
def _find_would_be_drizzle(self):
|
|
134
|
+
def _find_would_be_drizzle(self) -> npt.NDArray:
|
|
127
135
|
return (
|
|
128
136
|
~utils.transpose(self.is_rain)
|
|
129
137
|
& self.warm_liquid
|
|
130
|
-
& self.category_bits
|
|
131
|
-
& ~self.category_bits
|
|
132
|
-
& ~self.category_bits
|
|
133
|
-
& self.quality_bits
|
|
134
|
-
& ~self.quality_bits
|
|
135
|
-
& ~self.quality_bits
|
|
138
|
+
& self.category_bits.falling
|
|
139
|
+
& ~self.category_bits.melting
|
|
140
|
+
& ~self.category_bits.insect
|
|
141
|
+
& self.quality_bits.radar
|
|
142
|
+
& ~self.quality_bits.clutter
|
|
143
|
+
& ~self.quality_bits.molecular
|
|
136
144
|
)
|
|
137
145
|
|
|
138
|
-
def _find_cold_rain(self):
|
|
139
|
-
return np.any(self.category_bits
|
|
146
|
+
def _find_cold_rain(self) -> npt.NDArray:
|
|
147
|
+
return np.any(self.category_bits.melting, axis=1)
|
|
140
148
|
|
|
141
149
|
|
|
142
150
|
class SpectralWidth:
|
|
@@ -155,33 +163,35 @@ class SpectralWidth:
|
|
|
155
163
|
|
|
156
164
|
"""
|
|
157
165
|
|
|
158
|
-
def __init__(self, categorize_file: str):
|
|
166
|
+
def __init__(self, categorize_file: str | PathLike) -> None:
|
|
159
167
|
self.cat_file = categorize_file
|
|
160
168
|
self.width_ht = self._calculate_spectral_width()
|
|
161
169
|
|
|
162
|
-
def _calculate_spectral_width(self):
|
|
163
|
-
v_sigma = product_tools.
|
|
170
|
+
def _calculate_spectral_width(self) -> npt.NDArray:
|
|
171
|
+
v_sigma = product_tools.read_nc_field(self.cat_file, "v_sigma")
|
|
164
172
|
try:
|
|
165
|
-
width = product_tools.
|
|
173
|
+
width = product_tools.read_nc_field(self.cat_file, "width")
|
|
166
174
|
except KeyError:
|
|
167
|
-
width = [0]
|
|
168
|
-
logging.warning(
|
|
175
|
+
width = ma.array([0])
|
|
176
|
+
logging.warning("No spectral width, assuming width = %s", width[0])
|
|
169
177
|
sigma_factor = self._calc_v_sigma_factor()
|
|
170
178
|
return width - sigma_factor * v_sigma
|
|
171
179
|
|
|
172
|
-
def _calc_v_sigma_factor(self):
|
|
180
|
+
def _calc_v_sigma_factor(self) -> npt.NDArray:
|
|
173
181
|
beam_divergence = self._calc_beam_divergence()
|
|
174
182
|
wind = self._calc_horizontal_wind()
|
|
175
183
|
actual_wind = (wind + beam_divergence) ** (2 / 3)
|
|
176
184
|
scaled_wind = (30 * wind + beam_divergence) ** (2 / 3)
|
|
177
185
|
return actual_wind / (scaled_wind - actual_wind)
|
|
178
186
|
|
|
179
|
-
def _calc_beam_divergence(self):
|
|
187
|
+
def _calc_beam_divergence(self) -> npt.NDArray:
|
|
180
188
|
beam_width = 0.5
|
|
181
|
-
height = product_tools.
|
|
182
|
-
|
|
189
|
+
height = product_tools.read_nc_field(self.cat_file, "height")
|
|
190
|
+
altitude = np.mean(product_tools.read_nc_field(self.cat_file, "altitude"))
|
|
191
|
+
height_agl = height - altitude
|
|
192
|
+
return height_agl * np.deg2rad(beam_width)
|
|
183
193
|
|
|
184
|
-
def _calc_horizontal_wind(self):
|
|
194
|
+
def _calc_horizontal_wind(self) -> npt.NDArray:
|
|
185
195
|
"""Calculates magnitude of horizontal wind.
|
|
186
196
|
|
|
187
197
|
Returns:
|
|
@@ -213,7 +223,7 @@ class DrizzleSolver:
|
|
|
213
223
|
drizzle_source: DrizzleSource,
|
|
214
224
|
drizzle_class: DrizzleClassification,
|
|
215
225
|
spectral_width: SpectralWidth,
|
|
216
|
-
):
|
|
226
|
+
) -> None:
|
|
217
227
|
self._data = drizzle_source
|
|
218
228
|
self._drizzle_class = drizzle_class
|
|
219
229
|
self._width_ht = spectral_width.width_ht
|
|
@@ -222,7 +232,7 @@ class DrizzleSolver:
|
|
|
222
232
|
self._beta_z_ratio = self._calc_beta_z_ratio()
|
|
223
233
|
self._solve_drizzle(self._dia_init)
|
|
224
234
|
|
|
225
|
-
def _init_variables(self) -> tuple[dict,
|
|
235
|
+
def _init_variables(self) -> tuple[dict, npt.NDArray]:
|
|
226
236
|
shape = self._data.z.shape
|
|
227
237
|
res = {
|
|
228
238
|
"Do": np.zeros(shape),
|
|
@@ -232,22 +242,27 @@ class DrizzleSolver:
|
|
|
232
242
|
}
|
|
233
243
|
return res, np.zeros(shape)
|
|
234
244
|
|
|
235
|
-
def _calc_beta_z_ratio(self) ->
|
|
245
|
+
def _calc_beta_z_ratio(self) -> npt.NDArray:
|
|
236
246
|
return 2 / np.pi * self._data.beta / self._data.z
|
|
237
247
|
|
|
238
|
-
def _find_lut_indices(
|
|
248
|
+
def _find_lut_indices(
|
|
249
|
+
self, ind: tuple[int, ...], dia_init: npt.NDArray, n_dia: int, n_widths: int
|
|
250
|
+
) -> tuple[int, int]:
|
|
239
251
|
ind_dia = bisect_left(self._data.mie["Do"], dia_init[ind], hi=n_dia - 1)
|
|
240
252
|
ind_width = bisect_left(
|
|
241
|
-
self._width_lut[:, ind_dia],
|
|
253
|
+
self._width_lut[:, ind_dia],
|
|
254
|
+
-self._width_ht[ind],
|
|
255
|
+
hi=n_widths - 1,
|
|
242
256
|
)
|
|
243
257
|
return ind_width, ind_dia
|
|
244
258
|
|
|
245
|
-
def _solve_drizzle(self, dia_init:
|
|
259
|
+
def _solve_drizzle(self, dia_init: npt.NDArray) -> None:
|
|
246
260
|
drizzle_ind = np.where(self._drizzle_class.drizzle == 1)
|
|
247
261
|
dia_init[drizzle_ind] = self._calc_dia(self._beta_z_ratio[drizzle_ind], k=18.8)
|
|
248
262
|
n_widths, n_dia = self._width_lut.shape[0], len(self._data.mie["Do"])
|
|
249
263
|
max_ite = 10
|
|
250
|
-
|
|
264
|
+
path_lengths = utils.path_lengths_from_ground(self._data.height_agl)
|
|
265
|
+
for ind in zip(*drizzle_ind, strict=True):
|
|
251
266
|
for _ in range(max_ite):
|
|
252
267
|
lut_ind = self._find_lut_indices(ind, dia_init, n_dia, n_widths)
|
|
253
268
|
dia = self._calc_dia(
|
|
@@ -261,24 +276,27 @@ class DrizzleSolver:
|
|
|
261
276
|
break
|
|
262
277
|
self._dia_init[ind] = dia
|
|
263
278
|
beta_factor = np.exp(
|
|
264
|
-
2 * self.params["S"][ind] * self._data.beta[ind] *
|
|
279
|
+
2 * self.params["S"][ind] * self._data.beta[ind] * path_lengths[ind[-1]]
|
|
265
280
|
)
|
|
266
281
|
self.params["beta_corr"][ind[0], (ind[-1] + 1) :] *= beta_factor
|
|
267
282
|
|
|
268
283
|
def _update_result_tables(
|
|
269
|
-
self,
|
|
270
|
-
|
|
284
|
+
self,
|
|
285
|
+
ind: tuple,
|
|
286
|
+
dia: npt.NDArray | float,
|
|
287
|
+
lut_ind: tuple,
|
|
288
|
+
) -> None:
|
|
271
289
|
self.params["Do"][ind] = dia
|
|
272
290
|
self.params["mu"][ind] = self._data.mie["mu"][lut_ind[0]]
|
|
273
291
|
self.params["S"][ind] = self._data.mie["S"][lut_ind]
|
|
274
292
|
|
|
275
293
|
@staticmethod
|
|
276
294
|
def _calc_dia(
|
|
277
|
-
beta_z_ratio:
|
|
295
|
+
beta_z_ratio: npt.NDArray | float,
|
|
278
296
|
mu: float = 0.0,
|
|
279
297
|
ray: float = 1.0,
|
|
280
298
|
k: float = 1.0,
|
|
281
|
-
) ->
|
|
299
|
+
) -> npt.NDArray | float:
|
|
282
300
|
"""Drizzle diameter calculation.
|
|
283
301
|
|
|
284
302
|
Args:
|
|
@@ -299,7 +317,9 @@ class DrizzleSolver:
|
|
|
299
317
|
|
|
300
318
|
@staticmethod
|
|
301
319
|
def _is_converged(
|
|
302
|
-
ind: tuple,
|
|
320
|
+
ind: tuple,
|
|
321
|
+
dia: npt.NDArray | float,
|
|
322
|
+
dia_init: npt.NDArray,
|
|
303
323
|
) -> bool:
|
|
304
324
|
threshold = 1e-3
|
|
305
325
|
return abs((dia - dia_init[ind]) / dia_init[ind]) < threshold
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
from os import PathLike
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from uuid import UUID
|
|
4
|
+
|
|
5
|
+
import doppy
|
|
6
|
+
import doppy.netcdf
|
|
7
|
+
import netCDF4
|
|
8
|
+
import numpy as np
|
|
9
|
+
import numpy.typing as npt
|
|
10
|
+
import scipy.constants
|
|
11
|
+
from doppy.product.turbulence import HorizontalWind, Options, Turbulence, VerticalWind
|
|
12
|
+
from scipy.interpolate import LinearNDInterpolator, NearestNDInterpolator
|
|
13
|
+
|
|
14
|
+
import cloudnetpy
|
|
15
|
+
from cloudnetpy.exceptions import ValidTimeStampError
|
|
16
|
+
from cloudnetpy.output import copy_variables
|
|
17
|
+
from cloudnetpy.utils import get_time, get_uuid
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def generate_epsilon_from_lidar(
|
|
21
|
+
doppler_lidar_file: str | PathLike,
|
|
22
|
+
doppler_lidar_wind_file: str | PathLike,
|
|
23
|
+
output_file: str | PathLike,
|
|
24
|
+
uuid: str | UUID | None = None,
|
|
25
|
+
) -> UUID:
|
|
26
|
+
sliding_window_in_seconds = 3 * 60
|
|
27
|
+
uuid = get_uuid(uuid)
|
|
28
|
+
opts = _get_options(doppler_lidar_file)
|
|
29
|
+
opts.period = sliding_window_in_seconds
|
|
30
|
+
vert = _vertical_wind_from_doppler_lidar_file(doppler_lidar_file)
|
|
31
|
+
hori = _horizontal_wind_from_doppler_lidar_file(doppler_lidar_wind_file)
|
|
32
|
+
turb = Turbulence.from_winds(vert, hori, opts)
|
|
33
|
+
|
|
34
|
+
with (
|
|
35
|
+
netCDF4.Dataset(Path(doppler_lidar_file), "r") as nc_src,
|
|
36
|
+
doppy.netcdf.Dataset(Path(output_file), format="NETCDF4_CLASSIC") as nc,
|
|
37
|
+
):
|
|
38
|
+
nc.add_dimension("time")
|
|
39
|
+
nc.add_dimension("height", size=len(turb.height))
|
|
40
|
+
nc.add_time(
|
|
41
|
+
name="time",
|
|
42
|
+
dimensions=("time",),
|
|
43
|
+
standard_name="time",
|
|
44
|
+
long_name="Time UTC",
|
|
45
|
+
data=turb.time,
|
|
46
|
+
dtype="f8",
|
|
47
|
+
)
|
|
48
|
+
nc.add_variable(
|
|
49
|
+
name="height",
|
|
50
|
+
dimensions=("height",),
|
|
51
|
+
units="m",
|
|
52
|
+
data=turb.height,
|
|
53
|
+
standard_name=nc_src["height"].standard_name,
|
|
54
|
+
long_name=nc_src["height"].long_name,
|
|
55
|
+
dtype="f4",
|
|
56
|
+
)
|
|
57
|
+
nc.add_variable(
|
|
58
|
+
name="epsilon",
|
|
59
|
+
dimensions=("time", "height"),
|
|
60
|
+
units="m2 s-3",
|
|
61
|
+
data=turb.turbulent_kinetic_energy_dissipation_rate,
|
|
62
|
+
mask=turb.mask,
|
|
63
|
+
dtype="f4",
|
|
64
|
+
long_name="Dissipation rate of turbulent kinetic energy",
|
|
65
|
+
)
|
|
66
|
+
nc.add_scalar_variable(
|
|
67
|
+
name="ray_accumulation_time",
|
|
68
|
+
units="s",
|
|
69
|
+
long_name="Ray accumulation time",
|
|
70
|
+
data=opts.ray_accumulation_time,
|
|
71
|
+
dtype="f4",
|
|
72
|
+
)
|
|
73
|
+
nc.add_scalar_variable(
|
|
74
|
+
name="rolling_window_period",
|
|
75
|
+
units="s",
|
|
76
|
+
long_name="Rolling window period",
|
|
77
|
+
data=opts.period,
|
|
78
|
+
dtype="f4",
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
nc.add_attribute("file_uuid", str(uuid))
|
|
82
|
+
nc.add_attribute("cloudnet_file_type", "epsilon-lidar")
|
|
83
|
+
nc.add_attribute("doppy_version", doppy.__version__)
|
|
84
|
+
nc.add_attribute("cloudnetpy_version", cloudnetpy.__version__)
|
|
85
|
+
nc.add_attribute(
|
|
86
|
+
"title",
|
|
87
|
+
"Dissipation rate of turbulent kinetic energy (lidar) "
|
|
88
|
+
f"from {nc_src.location}",
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
copy_attributes_from_src(doppler_lidar_file, output_file)
|
|
92
|
+
|
|
93
|
+
with (
|
|
94
|
+
netCDF4.Dataset(output_file, "r+") as nc_out,
|
|
95
|
+
netCDF4.Dataset(doppler_lidar_file, "r") as nc_src_stare,
|
|
96
|
+
netCDF4.Dataset(doppler_lidar_wind_file, "r") as nc_src_wind,
|
|
97
|
+
):
|
|
98
|
+
copy_variables(
|
|
99
|
+
nc_src_stare, nc_out, ("latitude", "longitude", "altitude", "source")
|
|
100
|
+
)
|
|
101
|
+
nc_out.source_file_uuids = f"{nc_src_stare.file_uuid}, {nc_src_wind.file_uuid}"
|
|
102
|
+
sources = {nc_src_stare.source, nc_src_wind.source}
|
|
103
|
+
nc_out.source = ", ".join(sources)
|
|
104
|
+
history = (
|
|
105
|
+
f"{get_time()} - epsilon-lidar file created using doppy "
|
|
106
|
+
f"v{doppy.__version__} and cloudnetpy v{cloudnetpy.__version__}\n"
|
|
107
|
+
f"{nc_src_stare.history}\n"
|
|
108
|
+
f"{nc_src_wind.history}"
|
|
109
|
+
)
|
|
110
|
+
history = "\n".join(
|
|
111
|
+
line.strip() for line in history.splitlines() if line.strip()
|
|
112
|
+
)
|
|
113
|
+
nc_out.history = history
|
|
114
|
+
nc_out.references = "https://doi.org/10.1175/2010JTECHA1455.1"
|
|
115
|
+
return uuid
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def copy_attributes_from_src(src: str | PathLike, trg: str | PathLike) -> None:
|
|
119
|
+
with netCDF4.Dataset(src, "r") as nc_src, netCDF4.Dataset(trg, "a") as nc_trg:
|
|
120
|
+
for attr in ("year", "month", "day", "location", "Conventions"):
|
|
121
|
+
nc_trg.setncattr(attr, nc_src.getncattr(attr))
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def _horizontal_wind_from_doppler_lidar_file(
|
|
125
|
+
doppler_lidar_wind_file: str | PathLike,
|
|
126
|
+
) -> HorizontalWind:
|
|
127
|
+
with netCDF4.Dataset(doppler_lidar_wind_file, "r") as nc:
|
|
128
|
+
time = _datetime64_from_nc_var(nc["time"])
|
|
129
|
+
height = np.array(nc["height"][:].data, dtype=np.float64)
|
|
130
|
+
uwind = np.array(nc["uwind"][:].data, dtype=np.float64)
|
|
131
|
+
vwind = np.array(nc["vwind"][:].data, dtype=np.float64)
|
|
132
|
+
umask = np.array(nc["uwind"][:].mask, dtype=np.bool_)
|
|
133
|
+
vmask = np.array(nc["vwind"][:].mask, dtype=np.bool_)
|
|
134
|
+
V = np.sqrt(uwind**2 + vwind**2)
|
|
135
|
+
mask = umask | vmask
|
|
136
|
+
if np.all(mask):
|
|
137
|
+
raise ValidTimeStampError
|
|
138
|
+
t = np.broadcast_to(time[:, None], mask.shape)[~mask]
|
|
139
|
+
h = np.broadcast_to(height[None, :], mask.shape)[~mask]
|
|
140
|
+
|
|
141
|
+
if len(np.unique(t)) < 3 or len(np.unique(h)) < 3:
|
|
142
|
+
msg = "Not enough unique values for interpolation"
|
|
143
|
+
raise ValidTimeStampError(msg)
|
|
144
|
+
|
|
145
|
+
interp_linear = LinearNDInterpolator(list(zip(t, h, strict=True)), V[~mask])
|
|
146
|
+
interp_nearest = NearestNDInterpolator(list(zip(t, h, strict=True)), V[~mask])
|
|
147
|
+
T, H = np.meshgrid(time, height, indexing="ij")
|
|
148
|
+
V_linear = interp_linear(T, H)
|
|
149
|
+
V_nearest = interp_nearest(T, H)
|
|
150
|
+
isnan = np.isnan(V_linear)
|
|
151
|
+
V_interp = V_linear
|
|
152
|
+
V_interp[isnan] = V_nearest[isnan]
|
|
153
|
+
if np.isnan(V_interp).any():
|
|
154
|
+
msg = "Unexpected nans"
|
|
155
|
+
raise ValueError(msg)
|
|
156
|
+
return HorizontalWind(time=time, height=height, V=V_interp)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def _get_options(doppler_lidar_file: str | PathLike) -> Options:
|
|
160
|
+
with netCDF4.Dataset(doppler_lidar_file, "r") as nc:
|
|
161
|
+
if "ray_accumulation_time" in nc.variables:
|
|
162
|
+
return Options(ray_accumulation_time=nc["ray_accumulation_time"][:])
|
|
163
|
+
if "pulses_per_ray" in nc.variables:
|
|
164
|
+
prf = _infer_pulse_repetition_frequency(
|
|
165
|
+
np.array(nc["range"][:].data, dtype=np.float64)
|
|
166
|
+
)
|
|
167
|
+
return Options(ray_accumulation_time=float(nc["pulses_per_ray"][:] / prf))
|
|
168
|
+
msg = "Missing ray info"
|
|
169
|
+
raise ValueError(msg)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def _infer_pulse_repetition_frequency(range_: npt.NDArray[np.float64]) -> float:
|
|
173
|
+
c = scipy.constants.c
|
|
174
|
+
dist = range_.max() - range_.min()
|
|
175
|
+
round_trip_time = 2 * dist / c
|
|
176
|
+
|
|
177
|
+
T_LOW = 1 / 10_000 # Halo XR instruments operate on lower frequency
|
|
178
|
+
T_HIGH = 1 / 15_000 # Rest should operate on higher frequency
|
|
179
|
+
if round_trip_time / T_HIGH < 1:
|
|
180
|
+
return 15e3
|
|
181
|
+
if round_trip_time / T_LOW < 1:
|
|
182
|
+
return 10e3
|
|
183
|
+
msg = f"Suspiciously large range ({dist}m). Cannot infer pulse repetition rate"
|
|
184
|
+
raise ValueError(msg)
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def _vertical_wind_from_doppler_lidar_file(
|
|
188
|
+
doppler_lidar_file: str | PathLike,
|
|
189
|
+
) -> VerticalWind:
|
|
190
|
+
with netCDF4.Dataset(doppler_lidar_file, "r") as nc:
|
|
191
|
+
time = _datetime64_from_nc_var(nc["time"])
|
|
192
|
+
height = np.array(nc["height"][:].data, dtype=np.float64)
|
|
193
|
+
w = np.array(nc["v"][:].data, dtype=np.float64)
|
|
194
|
+
mask = np.array(nc["v"][:].mask, dtype=np.bool_)
|
|
195
|
+
if isinstance(mask, np.ndarray) and mask.any():
|
|
196
|
+
w[mask] = np.nan
|
|
197
|
+
|
|
198
|
+
return VerticalWind(time=time, height=height, w=w, mask=mask)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def _datetime64_from_nc_var(var: netCDF4.Variable) -> npt.NDArray[np.datetime64]:
|
|
202
|
+
return np.array(
|
|
203
|
+
netCDF4.num2date(
|
|
204
|
+
var[:].data,
|
|
205
|
+
units=var.units,
|
|
206
|
+
calendar=var.calendar,
|
|
207
|
+
only_use_cftime_datetimes=False,
|
|
208
|
+
only_use_python_datetimes=True,
|
|
209
|
+
),
|
|
210
|
+
dtype="datetime64[us]",
|
|
211
|
+
)
|