disdrodb 0.1.3__py3-none-any.whl → 0.1.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- disdrodb/__init__.py +4 -0
- disdrodb/_version.py +2 -2
- disdrodb/api/checks.py +70 -47
- disdrodb/api/configs.py +0 -2
- disdrodb/api/info.py +3 -3
- disdrodb/api/io.py +48 -8
- disdrodb/api/path.py +116 -133
- disdrodb/api/search.py +12 -3
- disdrodb/cli/disdrodb_create_summary.py +103 -0
- disdrodb/cli/disdrodb_create_summary_station.py +1 -1
- disdrodb/cli/disdrodb_run_l0a_station.py +1 -1
- disdrodb/cli/disdrodb_run_l0b_station.py +2 -2
- disdrodb/cli/disdrodb_run_l0c_station.py +2 -2
- disdrodb/cli/disdrodb_run_l1_station.py +2 -2
- disdrodb/cli/disdrodb_run_l2e_station.py +2 -2
- disdrodb/cli/disdrodb_run_l2m_station.py +2 -2
- disdrodb/data_transfer/download_data.py +123 -7
- disdrodb/issue/writer.py +2 -0
- disdrodb/l0/l0a_processing.py +10 -5
- disdrodb/l0/l0b_nc_processing.py +10 -6
- disdrodb/l0/l0b_processing.py +26 -61
- disdrodb/l0/l0c_processing.py +369 -251
- disdrodb/l0/readers/LPM/ARM/ARM_LPM.py +7 -0
- disdrodb/l0/readers/PARSIVEL2/ARM/ARM_PARSIVEL2.py +4 -0
- disdrodb/l0/readers/PARSIVEL2/CANADA/UQAM_NC.py +69 -0
- disdrodb/l0/readers/PARSIVEL2/MPI/BCO_PARSIVEL2.py +136 -0
- disdrodb/l0/readers/PARSIVEL2/MPI/BOWTIE.py +220 -0
- disdrodb/l0/readers/PARSIVEL2/NASA/LPVEX.py +109 -0
- disdrodb/l0/readers/PARSIVEL2/NETHERLANDS/DELFT_NC.py +3 -0
- disdrodb/l1/fall_velocity.py +46 -0
- disdrodb/l1/processing.py +1 -1
- disdrodb/l2/processing.py +1 -1
- disdrodb/metadata/checks.py +132 -125
- disdrodb/psd/fitting.py +172 -205
- disdrodb/psd/models.py +1 -1
- disdrodb/routines/__init__.py +54 -0
- disdrodb/{l0/routines.py → routines/l0.py} +288 -418
- disdrodb/{l1/routines.py → routines/l1.py} +60 -92
- disdrodb/{l2/routines.py → routines/l2.py} +249 -462
- disdrodb/{routines.py → routines/wrappers.py} +95 -7
- disdrodb/scattering/axis_ratio.py +5 -1
- disdrodb/scattering/permittivity.py +18 -0
- disdrodb/scattering/routines.py +56 -36
- disdrodb/summary/routines.py +110 -34
- disdrodb/utils/archiving.py +434 -0
- disdrodb/utils/cli.py +5 -5
- disdrodb/utils/dask.py +62 -1
- disdrodb/utils/decorators.py +31 -0
- disdrodb/utils/encoding.py +5 -1
- disdrodb/{l2 → utils}/event.py +1 -66
- disdrodb/utils/logger.py +1 -1
- disdrodb/utils/manipulations.py +22 -12
- disdrodb/utils/routines.py +166 -0
- disdrodb/utils/time.py +3 -291
- disdrodb/utils/xarray.py +3 -0
- disdrodb/viz/plots.py +85 -14
- {disdrodb-0.1.3.dist-info → disdrodb-0.1.4.dist-info}/METADATA +2 -2
- {disdrodb-0.1.3.dist-info → disdrodb-0.1.4.dist-info}/RECORD +62 -54
- {disdrodb-0.1.3.dist-info → disdrodb-0.1.4.dist-info}/entry_points.txt +1 -0
- {disdrodb-0.1.3.dist-info → disdrodb-0.1.4.dist-info}/WHEEL +0 -0
- {disdrodb-0.1.3.dist-info → disdrodb-0.1.4.dist-info}/licenses/LICENSE +0 -0
- {disdrodb-0.1.3.dist-info → disdrodb-0.1.4.dist-info}/top_level.txt +0 -0
|
@@ -21,12 +21,9 @@ import datetime
|
|
|
21
21
|
import json
|
|
22
22
|
import logging
|
|
23
23
|
import os
|
|
24
|
-
import shutil
|
|
25
24
|
import time
|
|
26
25
|
from typing import Optional
|
|
27
26
|
|
|
28
|
-
import dask
|
|
29
|
-
import numpy as np
|
|
30
27
|
import pandas as pd
|
|
31
28
|
|
|
32
29
|
from disdrodb.api.checks import check_station_inputs
|
|
@@ -34,8 +31,8 @@ from disdrodb.api.create_directories import (
|
|
|
34
31
|
create_logs_directory,
|
|
35
32
|
create_product_directory,
|
|
36
33
|
)
|
|
37
|
-
from disdrodb.api.info import
|
|
38
|
-
from disdrodb.api.io import
|
|
34
|
+
from disdrodb.api.info import group_filepaths
|
|
35
|
+
from disdrodb.api.io import open_netcdf_files
|
|
39
36
|
from disdrodb.api.path import (
|
|
40
37
|
define_file_folder_path,
|
|
41
38
|
define_l2e_filename,
|
|
@@ -51,28 +48,30 @@ from disdrodb.configs import (
|
|
|
51
48
|
get_product_temporal_resolutions,
|
|
52
49
|
)
|
|
53
50
|
from disdrodb.l1.resampling import resample_dataset
|
|
54
|
-
from disdrodb.l2.event import get_files_partitions, group_timesteps_into_event
|
|
55
51
|
from disdrodb.l2.processing import (
|
|
56
52
|
generate_l2_radar,
|
|
57
53
|
generate_l2e,
|
|
58
54
|
generate_l2m,
|
|
59
55
|
)
|
|
60
56
|
from disdrodb.metadata import read_station_metadata
|
|
57
|
+
from disdrodb.scattering.routines import precompute_scattering_tables
|
|
58
|
+
from disdrodb.utils.archiving import define_temporal_partitions, get_files_partitions
|
|
59
|
+
from disdrodb.utils.dask import execute_tasks_safely
|
|
61
60
|
from disdrodb.utils.decorators import delayed_if_parallel, single_threaded_if_parallel
|
|
62
61
|
from disdrodb.utils.list import flatten_list
|
|
63
62
|
|
|
64
63
|
# Logger
|
|
65
64
|
from disdrodb.utils.logger import (
|
|
66
|
-
close_logger,
|
|
67
|
-
create_logger_file,
|
|
68
65
|
create_product_logs,
|
|
69
|
-
log_error,
|
|
70
66
|
log_info,
|
|
71
67
|
)
|
|
68
|
+
from disdrodb.utils.routines import (
|
|
69
|
+
is_possible_product,
|
|
70
|
+
run_product_generation,
|
|
71
|
+
try_get_required_filepaths,
|
|
72
|
+
)
|
|
72
73
|
from disdrodb.utils.time import (
|
|
73
74
|
ensure_sample_interval_in_seconds,
|
|
74
|
-
ensure_sorted_by_time,
|
|
75
|
-
generate_time_blocks,
|
|
76
75
|
get_resampling_information,
|
|
77
76
|
)
|
|
78
77
|
from disdrodb.utils.writer import write_product
|
|
@@ -81,210 +80,13 @@ logger = logging.getLogger(__name__)
|
|
|
81
80
|
|
|
82
81
|
|
|
83
82
|
####----------------------------------------------------------------------------.
|
|
84
|
-
def identify_events(
|
|
85
|
-
filepaths,
|
|
86
|
-
parallel=False,
|
|
87
|
-
min_drops=5,
|
|
88
|
-
neighbor_min_size=2,
|
|
89
|
-
neighbor_time_interval="5MIN",
|
|
90
|
-
event_max_time_gap="6H",
|
|
91
|
-
event_min_duration="5MIN",
|
|
92
|
-
event_min_size=3,
|
|
93
|
-
):
|
|
94
|
-
"""Return a list of rainy events.
|
|
95
|
-
|
|
96
|
-
Rainy timesteps are defined when N > min_drops.
|
|
97
|
-
Any rainy isolated timesteps (based on neighborhood criteria) is removed.
|
|
98
|
-
Then, consecutive rainy timesteps are grouped into the same event if the time gap between them does not
|
|
99
|
-
exceed `event_max_time_gap`. Finally, events that do not meet minimum size or duration
|
|
100
|
-
requirements are filtered out.
|
|
101
|
-
|
|
102
|
-
Parameters
|
|
103
|
-
----------
|
|
104
|
-
filepaths: list
|
|
105
|
-
List of L1C file paths.
|
|
106
|
-
parallel: bool
|
|
107
|
-
Whether to load the files in parallel.
|
|
108
|
-
Set parallel=True only in a multiprocessing environment.
|
|
109
|
-
The default is False.
|
|
110
|
-
neighbor_time_interval : str
|
|
111
|
-
The time interval around a given a timestep defining the neighborhood.
|
|
112
|
-
Only timesteps that fall within this time interval before or after a timestep are considered neighbors.
|
|
113
|
-
neighbor_min_size : int, optional
|
|
114
|
-
The minimum number of neighboring timesteps required within `neighbor_time_interval` for a
|
|
115
|
-
timestep to be considered non-isolated. Isolated timesteps are removed !
|
|
116
|
-
- If `neighbor_min_size=0, then no timestep is considered isolated and no filtering occurs.
|
|
117
|
-
- If `neighbor_min_size=1`, the timestep must have at least one neighbor within `neighbor_time_interval`.
|
|
118
|
-
- If `neighbor_min_size=2`, the timestep must have at least two timesteps within `neighbor_time_interval`.
|
|
119
|
-
Defaults to 1.
|
|
120
|
-
event_max_time_gap: str
|
|
121
|
-
The maximum time interval between two timesteps to be considered part of the same event.
|
|
122
|
-
This parameters is used to group timesteps into events !
|
|
123
|
-
event_min_duration : str
|
|
124
|
-
The minimum duration an event must span. Events shorter than this duration are discarded.
|
|
125
|
-
event_min_size : int, optional
|
|
126
|
-
The minimum number of valid timesteps required for an event. Defaults to 1.
|
|
127
|
-
|
|
128
|
-
Returns
|
|
129
|
-
-------
|
|
130
|
-
list of dict
|
|
131
|
-
A list of events, where each event is represented as a dictionary with keys:
|
|
132
|
-
- "start_time": np.datetime64, start time of the event
|
|
133
|
-
- "end_time": np.datetime64, end time of the event
|
|
134
|
-
- "duration": np.timedelta64, duration of the event
|
|
135
|
-
- "n_timesteps": int, number of valid timesteps in the event
|
|
136
|
-
"""
|
|
137
|
-
# Open datasets in parallel
|
|
138
|
-
ds = open_netcdf_files(filepaths, variables=["time", "N"], parallel=parallel)
|
|
139
|
-
# Sort dataset by time
|
|
140
|
-
ds = ensure_sorted_by_time(ds)
|
|
141
|
-
# Define candidate timesteps to group into events
|
|
142
|
-
idx_valid = ds["N"].data > min_drops
|
|
143
|
-
timesteps = ds["time"].data[idx_valid]
|
|
144
|
-
# Define event list
|
|
145
|
-
event_list = group_timesteps_into_event(
|
|
146
|
-
timesteps=timesteps,
|
|
147
|
-
neighbor_min_size=neighbor_min_size,
|
|
148
|
-
neighbor_time_interval=neighbor_time_interval,
|
|
149
|
-
event_max_time_gap=event_max_time_gap,
|
|
150
|
-
event_min_duration=event_min_duration,
|
|
151
|
-
event_min_size=event_min_size,
|
|
152
|
-
)
|
|
153
|
-
return event_list
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
def identify_time_partitions(filepaths: list[str], freq: str) -> list[dict]:
|
|
157
|
-
"""Identify the set of time blocks covered by files.
|
|
158
|
-
|
|
159
|
-
The result is a minimal, sorted, and unique set of time partitions.
|
|
160
|
-
|
|
161
|
-
Parameters
|
|
162
|
-
----------
|
|
163
|
-
filepaths : list of str
|
|
164
|
-
Paths to input files from which start and end times will be extracted
|
|
165
|
-
via `get_start_end_time_from_filepaths`.
|
|
166
|
-
freq : {'none', 'hour', 'day', 'month', 'quarter', 'season', 'year'}
|
|
167
|
-
Frequency determining the granularity of candidate blocks.
|
|
168
|
-
See `generate_time_blocks` for more details.
|
|
169
|
-
|
|
170
|
-
Returns
|
|
171
|
-
-------
|
|
172
|
-
list of dict
|
|
173
|
-
A list of dictionaries, each containing:
|
|
174
|
-
|
|
175
|
-
- `start_time` (numpy.datetime64[s])
|
|
176
|
-
Inclusive start of a time block.
|
|
177
|
-
- `end_time` (numpy.datetime64[s])
|
|
178
|
-
Inclusive end of a time block.
|
|
179
|
-
|
|
180
|
-
Only those blocks that overlap at least one file's interval are returned.
|
|
181
|
-
The list is sorted by `start_time` and contains no duplicate blocks.
|
|
182
|
-
"""
|
|
183
|
-
# Define file start time and end time
|
|
184
|
-
start_times, end_times = get_start_end_time_from_filepaths(filepaths)
|
|
185
|
-
|
|
186
|
-
# Define files time coverage
|
|
187
|
-
start_time, end_time = start_times.min(), end_times.max()
|
|
188
|
-
|
|
189
|
-
# Compute candidate time blocks
|
|
190
|
-
blocks = generate_time_blocks(start_time, end_time, freq=freq) # end_time non inclusive is correct?
|
|
191
|
-
|
|
192
|
-
# Select time blocks with files
|
|
193
|
-
mask = (blocks[:, 0][:, None] <= end_times) & (blocks[:, 1][:, None] >= start_times)
|
|
194
|
-
blocks = blocks[mask.any(axis=1)]
|
|
195
|
-
|
|
196
|
-
# Ensure sorted unique time blocks
|
|
197
|
-
order = np.argsort(blocks[:, 0])
|
|
198
|
-
blocks = np.unique(blocks[order], axis=0)
|
|
199
|
-
|
|
200
|
-
# Convert to list of dicts
|
|
201
|
-
list_time_blocks = [{"start_time": start_time, "end_time": end_time} for start_time, end_time in blocks]
|
|
202
|
-
return list_time_blocks
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
def is_possible_product(accumulation_interval, sample_interval, rolling):
|
|
206
|
-
"""Assess if production is possible given the requested accumulation interval and source sample_interval."""
|
|
207
|
-
# Avoid rolling product generation at source sample interval
|
|
208
|
-
if rolling and accumulation_interval == sample_interval:
|
|
209
|
-
return False
|
|
210
|
-
# Avoid product generation if the accumulation_interval is less than the sample interval
|
|
211
|
-
if accumulation_interval < sample_interval:
|
|
212
|
-
return False
|
|
213
|
-
# Avoid producti generation if accumulation_interval is not multiple of sample_interval
|
|
214
|
-
return accumulation_interval % sample_interval == 0
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
def define_temporal_partitions(filepaths, strategy, parallel, strategy_options):
|
|
218
|
-
"""Define temporal file processing partitions.
|
|
219
|
-
|
|
220
|
-
Parameters
|
|
221
|
-
----------
|
|
222
|
-
filepaths : list
|
|
223
|
-
List of files paths to be processed
|
|
224
|
-
|
|
225
|
-
strategy : str
|
|
226
|
-
Which partitioning strategy to apply:
|
|
227
|
-
|
|
228
|
-
- ``'time_block'`` defines fixed time intervals (e.g. monthly) covering input files.
|
|
229
|
-
- ``'event'`` detect clusters of precipitation ("events").
|
|
230
|
-
|
|
231
|
-
parallel : bool
|
|
232
|
-
If True, parallel data loading is used to identify events.
|
|
233
|
-
|
|
234
|
-
strategy_options : dict
|
|
235
|
-
Dictionary with strategy-specific parameters:
|
|
236
|
-
|
|
237
|
-
If ``strategy == 'time_block'``, supported options are:
|
|
238
|
-
|
|
239
|
-
- ``freq``: Time unit for blocks. One of {'year', 'season', 'month', 'day'}.
|
|
240
|
-
|
|
241
|
-
See identify_time_partitions for more information.
|
|
242
|
-
|
|
243
|
-
If ``strategy == 'event'``, supported options are:
|
|
244
|
-
|
|
245
|
-
- ``min_drops`` : int
|
|
246
|
-
Minimum number of drops to consider a timestep.
|
|
247
|
-
- ``neighbor_min_size`` : int
|
|
248
|
-
Minimum cluster size for merging neighboring events.
|
|
249
|
-
- ``neighbor_time_interval`` : str
|
|
250
|
-
Time window (e.g. "5MIN") to merge adjacent clusters.
|
|
251
|
-
- ``event_max_time_gap`` : str
|
|
252
|
-
Maximum allowed gap (e.g. "6H") within a single event.
|
|
253
|
-
- ``event_min_duration`` : str
|
|
254
|
-
Minimum total duration (e.g. "5MIN") of an event.
|
|
255
|
-
- ``event_min_size`` : int
|
|
256
|
-
Minimum number of records in an event.
|
|
257
|
-
|
|
258
|
-
See identify_events for more information.
|
|
259
|
-
|
|
260
|
-
Returns
|
|
261
|
-
-------
|
|
262
|
-
list
|
|
263
|
-
A list of dictionaries, each containing:
|
|
264
|
-
|
|
265
|
-
- ``start_time`` (numpy.datetime64[s])
|
|
266
|
-
Inclusive start of an event or time block.
|
|
267
|
-
- ``end_time`` (numpy.datetime64[s])
|
|
268
|
-
Inclusive end of an event or time block.
|
|
269
|
-
|
|
270
|
-
Notes
|
|
271
|
-
-----
|
|
272
|
-
- The ``'event'`` strategy requires loading data into memory to identify clusters.
|
|
273
|
-
- The ``'time_block'`` strategy can operate on metadata alone, without full data loading.
|
|
274
|
-
- The ``'event'`` strategy implicitly performs data selection on which files to process !
|
|
275
|
-
- The ``'time_block'`` strategy does not performs data selection on which files to process !
|
|
276
|
-
"""
|
|
277
|
-
if strategy not in ["time_block", "event"]:
|
|
278
|
-
raise ValueError(f"Unknown strategy: {strategy!r}. Must be 'time_block' or 'event'.")
|
|
279
|
-
if strategy == "event":
|
|
280
|
-
return identify_events(filepaths, parallel=parallel, **strategy_options)
|
|
281
|
-
|
|
282
|
-
return identify_time_partitions(filepaths, **strategy_options)
|
|
283
83
|
|
|
284
84
|
|
|
285
85
|
class ProcessingOptions:
|
|
286
86
|
"""Define L2 products processing options."""
|
|
287
87
|
|
|
88
|
+
# TODO: TO MOVE ELSEWHERE (AFTER L1 REFACTORING !)
|
|
89
|
+
|
|
288
90
|
def __init__(self, product, filepaths, parallel, temporal_resolutions=None):
|
|
289
91
|
"""Define L2 products processing options."""
|
|
290
92
|
import disdrodb
|
|
@@ -319,7 +121,7 @@ class ProcessingOptions:
|
|
|
319
121
|
|
|
320
122
|
# -------------------------------------------------------------------------.
|
|
321
123
|
# Retrieve product options
|
|
322
|
-
product_options = dict_product_options[temporal_resolution]
|
|
124
|
+
product_options = dict_product_options[temporal_resolution].copy()
|
|
323
125
|
|
|
324
126
|
# Retrieve accumulation_interval and rolling option
|
|
325
127
|
accumulation_interval, rolling = get_resampling_information(temporal_resolution)
|
|
@@ -337,7 +139,7 @@ class ProcessingOptions:
|
|
|
337
139
|
|
|
338
140
|
# -------------------------------------------------------------------------.
|
|
339
141
|
# Define list of temporal partitions
|
|
340
|
-
# - [{start_time:
|
|
142
|
+
# - [{start_time: np.datetime64, end_time: np.datetime64}, ....]
|
|
341
143
|
# - Either strategy: "event" or "time_block" or save_by_time_block"
|
|
342
144
|
# - "event" requires loading data into memory to identify events
|
|
343
145
|
# --> Does some data filtering on what to process !
|
|
@@ -362,6 +164,7 @@ class ProcessingOptions:
|
|
|
362
164
|
# some data after the actual event end_time to ensure that the resampled dataset
|
|
363
165
|
# contains the event_end_time
|
|
364
166
|
# --> get_files_partitions adjust the event end_time to accounts for the required "border" data.
|
|
167
|
+
# - ATTENTION: get_files_partitions returns start_time and end_time as datetime objects !
|
|
365
168
|
files_partitions = [
|
|
366
169
|
get_files_partitions(
|
|
367
170
|
list_partitions=list_partitions,
|
|
@@ -410,45 +213,19 @@ class ProcessingOptions:
|
|
|
410
213
|
return self.dict_folder_partitioning[temporal_resolution]
|
|
411
214
|
|
|
412
215
|
|
|
413
|
-
def precompute_scattering_tables(
|
|
414
|
-
frequency,
|
|
415
|
-
num_points,
|
|
416
|
-
diameter_max,
|
|
417
|
-
canting_angle_std,
|
|
418
|
-
axis_ratio_model,
|
|
419
|
-
permittivity_model,
|
|
420
|
-
water_temperature,
|
|
421
|
-
elevation_angle,
|
|
422
|
-
verbose=True,
|
|
423
|
-
):
|
|
424
|
-
"""Precompute the pyTMatrix scattering tables required for radar variables simulations."""
|
|
425
|
-
from disdrodb.scattering.routines import get_list_simulations_params, load_scatterer
|
|
426
|
-
|
|
427
|
-
# Define parameters for all requested simulations
|
|
428
|
-
list_params = get_list_simulations_params(
|
|
429
|
-
frequency=frequency,
|
|
430
|
-
num_points=num_points,
|
|
431
|
-
diameter_max=diameter_max,
|
|
432
|
-
canting_angle_std=canting_angle_std,
|
|
433
|
-
axis_ratio_model=axis_ratio_model,
|
|
434
|
-
permittivity_model=permittivity_model,
|
|
435
|
-
water_temperature=water_temperature,
|
|
436
|
-
elevation_angle=elevation_angle,
|
|
437
|
-
)
|
|
438
|
-
|
|
439
|
-
# Compute require scattering tables
|
|
440
|
-
for params in list_params:
|
|
441
|
-
# Initialize scattering table
|
|
442
|
-
_ = load_scatterer(
|
|
443
|
-
verbose=verbose,
|
|
444
|
-
**params,
|
|
445
|
-
)
|
|
446
|
-
|
|
447
|
-
|
|
448
216
|
####----------------------------------------------------------------------------.
|
|
449
217
|
#### L2E
|
|
450
218
|
|
|
451
219
|
|
|
220
|
+
def define_l2e_logs_filename(campaign_name, station_name, start_time, end_time, accumulation_interval, rolling):
|
|
221
|
+
"""Define L2E logs filename."""
|
|
222
|
+
temporal_resolution = define_temporal_resolution(seconds=accumulation_interval, rolling=rolling)
|
|
223
|
+
starting_time = pd.to_datetime(start_time).strftime("%Y%m%d%H%M%S")
|
|
224
|
+
ending_time = pd.to_datetime(end_time).strftime("%Y%m%d%H%M%S")
|
|
225
|
+
logs_filename = f"L2E.{temporal_resolution}.{campaign_name}.{station_name}.s{starting_time}.e{ending_time}"
|
|
226
|
+
return logs_filename
|
|
227
|
+
|
|
228
|
+
|
|
452
229
|
@delayed_if_parallel
|
|
453
230
|
@single_threaded_if_parallel
|
|
454
231
|
def _generate_l2e(
|
|
@@ -457,6 +234,7 @@ def _generate_l2e(
|
|
|
457
234
|
filepaths,
|
|
458
235
|
data_dir,
|
|
459
236
|
logs_dir,
|
|
237
|
+
logs_filename,
|
|
460
238
|
folder_partitioning,
|
|
461
239
|
campaign_name,
|
|
462
240
|
station_name,
|
|
@@ -469,42 +247,42 @@ def _generate_l2e(
|
|
|
469
247
|
verbose,
|
|
470
248
|
parallel, # this is used by the decorator and to initialize correctly the logger !
|
|
471
249
|
):
|
|
472
|
-
|
|
473
|
-
# Define product
|
|
250
|
+
"""Generate the L2E product from the DISDRODB L1 netCDF file."""
|
|
251
|
+
# Define product
|
|
474
252
|
product = "L2E"
|
|
475
253
|
|
|
476
|
-
#
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
254
|
+
# Define product processing function
|
|
255
|
+
def core(
|
|
256
|
+
filepaths,
|
|
257
|
+
campaign_name,
|
|
258
|
+
station_name,
|
|
259
|
+
product_options,
|
|
260
|
+
# Processing options
|
|
261
|
+
logger,
|
|
262
|
+
parallel,
|
|
263
|
+
verbose,
|
|
264
|
+
force,
|
|
265
|
+
# Resampling arguments
|
|
266
|
+
start_time,
|
|
267
|
+
end_time,
|
|
268
|
+
accumulation_interval,
|
|
269
|
+
rolling,
|
|
270
|
+
# Archiving arguments
|
|
271
|
+
data_dir,
|
|
272
|
+
folder_partitioning,
|
|
273
|
+
):
|
|
274
|
+
"""Define L1 product processing."""
|
|
275
|
+
# Copy to avoid in-place replacement (outside this function)
|
|
276
|
+
product_options = product_options.copy()
|
|
277
|
+
|
|
278
|
+
# Open the dataset over the period of interest
|
|
501
279
|
ds = open_netcdf_files(filepaths, start_time=start_time, end_time=end_time, parallel=False)
|
|
280
|
+
ds = ds.load()
|
|
281
|
+
ds.close()
|
|
502
282
|
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
# Define sample interval in seconds
|
|
283
|
+
# Resample dataset # TODO: in future to perform in L1
|
|
284
|
+
# - Define sample interval in seconds
|
|
506
285
|
sample_interval = ensure_sample_interval_in_seconds(ds["sample_interval"]).to_numpy().item()
|
|
507
|
-
|
|
508
286
|
# - Resample dataset
|
|
509
287
|
ds = resample_dataset(
|
|
510
288
|
ds=ds,
|
|
@@ -518,71 +296,71 @@ def _generate_l2e(
|
|
|
518
296
|
radar_enabled = product_options.get("radar_enabled")
|
|
519
297
|
radar_options = product_options.get("radar_options")
|
|
520
298
|
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
# - Only if at least 2 timesteps available
|
|
524
|
-
if ds["time"].size > 2:
|
|
525
|
-
|
|
526
|
-
# Compute L2E variables
|
|
527
|
-
ds = generate_l2e(ds=ds, **l2e_options)
|
|
528
|
-
|
|
529
|
-
# Simulate L2M-based radar variables if asked
|
|
530
|
-
if radar_enabled:
|
|
531
|
-
ds_radar = generate_l2_radar(ds, parallel=not parallel, **radar_options)
|
|
532
|
-
ds.update(ds_radar)
|
|
533
|
-
ds.attrs = ds_radar.attrs.copy()
|
|
534
|
-
|
|
535
|
-
# Write netCDF4 dataset
|
|
536
|
-
if ds["time"].size > 1:
|
|
537
|
-
# Define filepath
|
|
538
|
-
filename = define_l2e_filename(
|
|
539
|
-
ds,
|
|
540
|
-
campaign_name=campaign_name,
|
|
541
|
-
station_name=station_name,
|
|
542
|
-
sample_interval=accumulation_interval,
|
|
543
|
-
rolling=rolling,
|
|
544
|
-
)
|
|
545
|
-
folder_path = define_file_folder_path(ds, data_dir=data_dir, folder_partitioning=folder_partitioning)
|
|
546
|
-
filepath = os.path.join(folder_path, filename)
|
|
547
|
-
# Write file
|
|
548
|
-
write_product(ds, filepath=filepath, force=force)
|
|
549
|
-
|
|
550
|
-
# Update log
|
|
551
|
-
log_info(logger=logger, msg=f"{product} creation of {filename} has ended.", verbose=verbose)
|
|
552
|
-
else:
|
|
553
|
-
log_info(logger=logger, msg="File not created. Less than one timesteps available.", verbose=verbose)
|
|
554
|
-
else:
|
|
299
|
+
# Ensure at least 2 timestep available
|
|
300
|
+
if ds["time"].size < 2:
|
|
555
301
|
log_info(logger=logger, msg="File not created. Less than two timesteps available.", verbose=verbose)
|
|
302
|
+
return None
|
|
556
303
|
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
if folder_partitioning != "":
|
|
560
|
-
log_dst_dir = define_file_folder_path(ds, data_dir=logs_dir, folder_partitioning=folder_partitioning)
|
|
561
|
-
os.makedirs(log_dst_dir, exist_ok=True)
|
|
562
|
-
|
|
563
|
-
##--------------------------------------------------------------------.
|
|
564
|
-
# Clean environment
|
|
565
|
-
del ds
|
|
304
|
+
# Compute L2E variables
|
|
305
|
+
ds = generate_l2e(ds=ds, **l2e_options)
|
|
566
306
|
|
|
567
|
-
|
|
307
|
+
# Ensure at least 2 timestep available
|
|
308
|
+
if ds["time"].size < 2:
|
|
309
|
+
log_info(logger=logger, msg="File not created. Less than two timesteps available.", verbose=verbose)
|
|
310
|
+
return None
|
|
568
311
|
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
log_error(logger, msg, verbose=verbose)
|
|
312
|
+
# Simulate L2M-based radar variables if asked
|
|
313
|
+
if radar_enabled:
|
|
314
|
+
ds_radar = generate_l2_radar(ds, parallel=not parallel, **radar_options)
|
|
315
|
+
ds.update(ds_radar)
|
|
316
|
+
ds.attrs = ds_radar.attrs.copy()
|
|
575
317
|
|
|
576
|
-
|
|
577
|
-
|
|
318
|
+
# Write L2E netCDF4 dataset
|
|
319
|
+
filename = define_l2e_filename(
|
|
320
|
+
ds,
|
|
321
|
+
campaign_name=campaign_name,
|
|
322
|
+
station_name=station_name,
|
|
323
|
+
sample_interval=accumulation_interval,
|
|
324
|
+
rolling=rolling,
|
|
325
|
+
)
|
|
326
|
+
folder_path = define_file_folder_path(ds, dir_path=data_dir, folder_partitioning=folder_partitioning)
|
|
327
|
+
filepath = os.path.join(folder_path, filename)
|
|
328
|
+
write_product(ds, filepath=filepath, force=force)
|
|
578
329
|
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
# Move logger file to correct partitioning directory
|
|
582
|
-
dst_filepath = os.path.join(log_dst_dir, os.path.basename(logger_filepath))
|
|
583
|
-
shutil.move(logger_filepath, dst_filepath)
|
|
584
|
-
logger_filepath = dst_filepath
|
|
330
|
+
# Return L2E dataset
|
|
331
|
+
return ds
|
|
585
332
|
|
|
333
|
+
# Define product processing function kwargs
|
|
334
|
+
core_func_kwargs = dict( # noqa: C408
|
|
335
|
+
filepaths=filepaths,
|
|
336
|
+
campaign_name=campaign_name,
|
|
337
|
+
station_name=station_name,
|
|
338
|
+
product_options=product_options,
|
|
339
|
+
# Resampling arguments
|
|
340
|
+
start_time=start_time,
|
|
341
|
+
end_time=end_time,
|
|
342
|
+
accumulation_interval=accumulation_interval,
|
|
343
|
+
rolling=rolling,
|
|
344
|
+
# Archiving arguments
|
|
345
|
+
data_dir=data_dir,
|
|
346
|
+
folder_partitioning=folder_partitioning,
|
|
347
|
+
# Processing options
|
|
348
|
+
parallel=parallel,
|
|
349
|
+
verbose=verbose,
|
|
350
|
+
force=force,
|
|
351
|
+
)
|
|
352
|
+
# Run product generation
|
|
353
|
+
logger_filepath = run_product_generation(
|
|
354
|
+
product=product,
|
|
355
|
+
logs_dir=logs_dir,
|
|
356
|
+
logs_filename=logs_filename,
|
|
357
|
+
parallel=parallel,
|
|
358
|
+
verbose=verbose,
|
|
359
|
+
folder_partitioning=folder_partitioning,
|
|
360
|
+
core_func=core,
|
|
361
|
+
core_func_kwargs=core_func_kwargs,
|
|
362
|
+
pass_logger=True,
|
|
363
|
+
)
|
|
586
364
|
# Return the logger file path
|
|
587
365
|
return logger_filepath
|
|
588
366
|
|
|
@@ -672,33 +450,22 @@ def run_l2e_station(
|
|
|
672
450
|
log_info(logger=logger, msg=msg, verbose=verbose)
|
|
673
451
|
|
|
674
452
|
# -------------------------------------------------------------------------.
|
|
675
|
-
# List
|
|
453
|
+
# List files to process
|
|
454
|
+
# - If no data available, print error message and return None
|
|
676
455
|
required_product = get_required_product(product)
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
)
|
|
688
|
-
except Exception as e:
|
|
689
|
-
print(str(e)) # Case where no file paths available
|
|
690
|
-
flag_not_available_data = True
|
|
691
|
-
|
|
692
|
-
# -------------------------------------------------------------------------.
|
|
693
|
-
# If no data available, print error message and return None
|
|
694
|
-
if flag_not_available_data:
|
|
695
|
-
msg = (
|
|
696
|
-
f"{product} processing of {data_source} {campaign_name} {station_name} "
|
|
697
|
-
+ f"has not been launched because of missing {required_product} data."
|
|
698
|
-
)
|
|
699
|
-
print(msg)
|
|
456
|
+
filepaths = try_get_required_filepaths(
|
|
457
|
+
data_archive_dir=data_archive_dir,
|
|
458
|
+
data_source=data_source,
|
|
459
|
+
campaign_name=campaign_name,
|
|
460
|
+
station_name=station_name,
|
|
461
|
+
product=required_product,
|
|
462
|
+
# Processing options
|
|
463
|
+
debugging_mode=debugging_mode,
|
|
464
|
+
)
|
|
465
|
+
if filepaths is None:
|
|
700
466
|
return
|
|
701
467
|
|
|
468
|
+
# -------------------------------------------------------------------------.
|
|
702
469
|
# Retrieve L2E processing options
|
|
703
470
|
l2e_processing_options = ProcessingOptions(product="L2E", filepaths=filepaths, parallel=parallel)
|
|
704
471
|
|
|
@@ -770,6 +537,14 @@ def run_l2e_station(
|
|
|
770
537
|
filepaths=event_info["filepaths"],
|
|
771
538
|
data_dir=data_dir,
|
|
772
539
|
logs_dir=logs_dir,
|
|
540
|
+
logs_filename=define_l2e_logs_filename(
|
|
541
|
+
campaign_name=campaign_name,
|
|
542
|
+
station_name=station_name,
|
|
543
|
+
start_time=event_info["start_time"],
|
|
544
|
+
end_time=event_info["end_time"],
|
|
545
|
+
rolling=rolling,
|
|
546
|
+
accumulation_interval=accumulation_interval,
|
|
547
|
+
),
|
|
773
548
|
folder_partitioning=folder_partitioning,
|
|
774
549
|
campaign_name=campaign_name,
|
|
775
550
|
station_name=station_name,
|
|
@@ -784,7 +559,7 @@ def run_l2e_station(
|
|
|
784
559
|
)
|
|
785
560
|
for event_info in files_partitions
|
|
786
561
|
]
|
|
787
|
-
list_logs =
|
|
562
|
+
list_logs = execute_tasks_safely(list_tasks=list_tasks, parallel=parallel, logs_dir=logs_dir)
|
|
788
563
|
|
|
789
564
|
# -----------------------------------------------------------------.
|
|
790
565
|
# Define product summary logs
|
|
@@ -811,6 +586,15 @@ def run_l2e_station(
|
|
|
811
586
|
|
|
812
587
|
####----------------------------------------------------------------------------.
|
|
813
588
|
#### L2M
|
|
589
|
+
def define_l2m_logs_filename(campaign_name, station_name, start_time, end_time, model_name, sample_interval, rolling):
|
|
590
|
+
"""Define L2M logs filename."""
|
|
591
|
+
temporal_resolution = define_temporal_resolution(seconds=sample_interval, rolling=rolling)
|
|
592
|
+
starting_time = pd.to_datetime(start_time).strftime("%Y%m%d%H%M%S")
|
|
593
|
+
ending_time = pd.to_datetime(end_time).strftime("%Y%m%d%H%M%S")
|
|
594
|
+
logs_filename = (
|
|
595
|
+
f"L2M_{model_name}.{temporal_resolution}.{campaign_name}.{station_name}.s{starting_time}.e{ending_time}"
|
|
596
|
+
)
|
|
597
|
+
return logs_filename
|
|
814
598
|
|
|
815
599
|
|
|
816
600
|
@delayed_if_parallel
|
|
@@ -821,6 +605,7 @@ def _generate_l2m(
|
|
|
821
605
|
filepaths,
|
|
822
606
|
data_dir,
|
|
823
607
|
logs_dir,
|
|
608
|
+
logs_filename,
|
|
824
609
|
folder_partitioning,
|
|
825
610
|
campaign_name,
|
|
826
611
|
station_name,
|
|
@@ -834,34 +619,34 @@ def _generate_l2m(
|
|
|
834
619
|
verbose,
|
|
835
620
|
parallel, # this is used only to initialize the correct logger !
|
|
836
621
|
):
|
|
837
|
-
|
|
838
|
-
# Define product
|
|
622
|
+
"""Generate the L2M product from a DISDRODB L2E netCDF file."""
|
|
623
|
+
# Define product
|
|
839
624
|
product = "L2M"
|
|
840
625
|
|
|
841
|
-
#
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
626
|
+
# Define product processing function
|
|
627
|
+
def core(
|
|
628
|
+
start_time,
|
|
629
|
+
end_time,
|
|
630
|
+
filepaths,
|
|
631
|
+
campaign_name,
|
|
632
|
+
station_name,
|
|
633
|
+
# Processing options
|
|
634
|
+
logger,
|
|
635
|
+
verbose,
|
|
636
|
+
force,
|
|
637
|
+
# Product options
|
|
638
|
+
product_options,
|
|
639
|
+
sample_interval,
|
|
640
|
+
rolling,
|
|
641
|
+
model_name,
|
|
642
|
+
# Archiving arguments
|
|
643
|
+
data_dir,
|
|
644
|
+
folder_partitioning,
|
|
645
|
+
):
|
|
646
|
+
"""Define L1 product processing."""
|
|
647
|
+
# Copy to avoid in-place replacement (outside this function)
|
|
648
|
+
product_options = product_options.copy()
|
|
861
649
|
|
|
862
|
-
##------------------------------------------------------------------------
|
|
863
|
-
### Core computation
|
|
864
|
-
try:
|
|
865
650
|
##------------------------------------------------------------------------.
|
|
866
651
|
# Extract L2M processing options
|
|
867
652
|
l2m_options = product_options.get("product_options")
|
|
@@ -887,8 +672,10 @@ def _generate_l2m(
|
|
|
887
672
|
]
|
|
888
673
|
|
|
889
674
|
##------------------------------------------------------------------------.
|
|
890
|
-
# Open the
|
|
675
|
+
# Open the netCDF files
|
|
891
676
|
ds = open_netcdf_files(filepaths, start_time=start_time, end_time=end_time, variables=variables)
|
|
677
|
+
ds = ds.load()
|
|
678
|
+
ds.close()
|
|
892
679
|
|
|
893
680
|
# Produce L2M dataset
|
|
894
681
|
ds = generate_l2m(
|
|
@@ -902,54 +689,58 @@ def _generate_l2m(
|
|
|
902
689
|
ds.update(ds_radar)
|
|
903
690
|
ds.attrs = ds_radar.attrs.copy() # ds_radar contains already all L2M attrs
|
|
904
691
|
|
|
905
|
-
#
|
|
906
|
-
if ds["time"].size
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
ds,
|
|
910
|
-
campaign_name=campaign_name,
|
|
911
|
-
station_name=station_name,
|
|
912
|
-
sample_interval=sample_interval,
|
|
913
|
-
rolling=rolling,
|
|
914
|
-
model_name=model_name,
|
|
915
|
-
)
|
|
916
|
-
folder_path = define_file_folder_path(ds, data_dir=data_dir, folder_partitioning=folder_partitioning)
|
|
917
|
-
filepath = os.path.join(folder_path, filename)
|
|
918
|
-
# Write to disk
|
|
919
|
-
write_product(ds, filepath=filepath, force=force)
|
|
920
|
-
|
|
921
|
-
##--------------------------------------------------------------------.
|
|
922
|
-
#### - Define logger file final directory
|
|
923
|
-
if folder_partitioning != "":
|
|
924
|
-
log_dst_dir = define_file_folder_path(ds, data_dir=logs_dir, folder_partitioning=folder_partitioning)
|
|
925
|
-
os.makedirs(log_dst_dir, exist_ok=True)
|
|
926
|
-
|
|
927
|
-
##--------------------------------------------------------------------.
|
|
928
|
-
# Clean environment
|
|
929
|
-
del ds
|
|
930
|
-
|
|
931
|
-
# Log end processing
|
|
932
|
-
msg = f"{product} creation of {filename} has ended."
|
|
933
|
-
log_info(logger=logger, msg=msg, verbose=verbose)
|
|
934
|
-
success_flag = True
|
|
935
|
-
|
|
936
|
-
##--------------------------------------------------------------------.
|
|
937
|
-
# Otherwise log the error
|
|
938
|
-
except Exception as e:
|
|
939
|
-
error_type = str(type(e).__name__)
|
|
940
|
-
msg = f"{error_type}: {e}"
|
|
941
|
-
log_error(logger, msg, verbose=verbose)
|
|
942
|
-
|
|
943
|
-
# Close the file logger
|
|
944
|
-
close_logger(logger)
|
|
945
|
-
|
|
946
|
-
# Move logger file to correct partitioning directory
|
|
947
|
-
if success_flag and folder_partitioning != "" and logger_filepath is not None:
|
|
948
|
-
# Move logger file to correct partitioning directory
|
|
949
|
-
dst_filepath = os.path.join(log_dst_dir, os.path.basename(logger_filepath))
|
|
950
|
-
shutil.move(logger_filepath, dst_filepath)
|
|
951
|
-
logger_filepath = dst_filepath
|
|
692
|
+
# Ensure at least 2 timestep available
|
|
693
|
+
if ds["time"].size < 2:
|
|
694
|
+
log_info(logger=logger, msg="File not created. Less than two timesteps available.", verbose=verbose)
|
|
695
|
+
return None
|
|
952
696
|
|
|
697
|
+
# Write L2M netCDF4 dataset
|
|
698
|
+
filename = define_l2m_filename(
|
|
699
|
+
ds,
|
|
700
|
+
campaign_name=campaign_name,
|
|
701
|
+
station_name=station_name,
|
|
702
|
+
sample_interval=sample_interval,
|
|
703
|
+
rolling=rolling,
|
|
704
|
+
model_name=model_name,
|
|
705
|
+
)
|
|
706
|
+
folder_path = define_file_folder_path(ds, dir_path=data_dir, folder_partitioning=folder_partitioning)
|
|
707
|
+
filepath = os.path.join(folder_path, filename)
|
|
708
|
+
write_product(ds, filepath=filepath, force=force)
|
|
709
|
+
|
|
710
|
+
# Return L2M dataset
|
|
711
|
+
return ds
|
|
712
|
+
|
|
713
|
+
# Define product processing function kwargs
|
|
714
|
+
core_func_kwargs = dict( # noqa: C408
|
|
715
|
+
filepaths=filepaths,
|
|
716
|
+
start_time=start_time,
|
|
717
|
+
end_time=end_time,
|
|
718
|
+
campaign_name=campaign_name,
|
|
719
|
+
station_name=station_name,
|
|
720
|
+
# Processing options
|
|
721
|
+
verbose=verbose,
|
|
722
|
+
force=force,
|
|
723
|
+
# Product options
|
|
724
|
+
product_options=product_options,
|
|
725
|
+
sample_interval=sample_interval,
|
|
726
|
+
rolling=rolling,
|
|
727
|
+
model_name=model_name,
|
|
728
|
+
# Archiving arguments
|
|
729
|
+
data_dir=data_dir,
|
|
730
|
+
folder_partitioning=folder_partitioning,
|
|
731
|
+
)
|
|
732
|
+
# Run product generation
|
|
733
|
+
logger_filepath = run_product_generation(
|
|
734
|
+
product=product,
|
|
735
|
+
logs_dir=logs_dir,
|
|
736
|
+
logs_filename=logs_filename,
|
|
737
|
+
parallel=parallel,
|
|
738
|
+
verbose=verbose,
|
|
739
|
+
folder_partitioning=folder_partitioning,
|
|
740
|
+
core_func=core,
|
|
741
|
+
core_func_kwargs=core_func_kwargs,
|
|
742
|
+
pass_logger=True,
|
|
743
|
+
)
|
|
953
744
|
# Return the logger file path
|
|
954
745
|
return logger_filepath
|
|
955
746
|
|
|
@@ -1045,7 +836,6 @@ def run_l2m_station(
|
|
|
1045
836
|
# temporal_resolution = "1MIN"
|
|
1046
837
|
# temporal_resolution = "10MIN"
|
|
1047
838
|
temporal_resolutions = get_product_temporal_resolutions("L2M")
|
|
1048
|
-
print(temporal_resolutions)
|
|
1049
839
|
for temporal_resolution in temporal_resolutions:
|
|
1050
840
|
|
|
1051
841
|
# Retrieve accumulation_interval and rolling option
|
|
@@ -1062,33 +852,21 @@ def run_l2m_station(
|
|
|
1062
852
|
|
|
1063
853
|
# -----------------------------------------------------------------.
|
|
1064
854
|
# List files to process
|
|
855
|
+
# - If no data available, print error message and try with other L2E accumulation intervals
|
|
1065
856
|
required_product = get_required_product(product)
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1076
|
-
|
|
1077
|
-
|
|
1078
|
-
|
|
1079
|
-
debugging_mode=debugging_mode,
|
|
1080
|
-
)
|
|
1081
|
-
except Exception as e:
|
|
1082
|
-
print(str(e)) # Case where no file paths available
|
|
1083
|
-
flag_not_available_data = True
|
|
1084
|
-
|
|
1085
|
-
# If no data available, try with other L2E accumulation intervals
|
|
1086
|
-
if flag_not_available_data:
|
|
1087
|
-
msg = (
|
|
1088
|
-
f"{product} processing of {data_source} {campaign_name} {station_name} "
|
|
1089
|
-
+ f"has not been launched because of missing {required_product} {temporal_resolution} data."
|
|
1090
|
-
)
|
|
1091
|
-
log_info(logger=logger, msg=msg, verbose=verbose)
|
|
857
|
+
filepaths = try_get_required_filepaths(
|
|
858
|
+
data_archive_dir=data_archive_dir,
|
|
859
|
+
data_source=data_source,
|
|
860
|
+
campaign_name=campaign_name,
|
|
861
|
+
station_name=station_name,
|
|
862
|
+
product=required_product,
|
|
863
|
+
# Processing options
|
|
864
|
+
debugging_mode=debugging_mode,
|
|
865
|
+
# Product options
|
|
866
|
+
sample_interval=accumulation_interval,
|
|
867
|
+
rolling=rolling,
|
|
868
|
+
)
|
|
869
|
+
if filepaths is None:
|
|
1092
870
|
continue
|
|
1093
871
|
|
|
1094
872
|
# -------------------------------------------------------------------------.
|
|
@@ -1189,6 +967,15 @@ def run_l2m_station(
|
|
|
1189
967
|
filepaths=event_info["filepaths"],
|
|
1190
968
|
data_dir=data_dir,
|
|
1191
969
|
logs_dir=logs_dir,
|
|
970
|
+
logs_filename=define_l2m_logs_filename(
|
|
971
|
+
campaign_name=campaign_name,
|
|
972
|
+
station_name=station_name,
|
|
973
|
+
start_time=event_info["start_time"],
|
|
974
|
+
end_time=event_info["end_time"],
|
|
975
|
+
model_name=model_name,
|
|
976
|
+
sample_interval=accumulation_interval,
|
|
977
|
+
rolling=rolling,
|
|
978
|
+
),
|
|
1192
979
|
folder_partitioning=folder_partitioning,
|
|
1193
980
|
campaign_name=campaign_name,
|
|
1194
981
|
station_name=station_name,
|
|
@@ -1204,7 +991,7 @@ def run_l2m_station(
|
|
|
1204
991
|
)
|
|
1205
992
|
for event_info in files_partitions
|
|
1206
993
|
]
|
|
1207
|
-
list_logs =
|
|
994
|
+
list_logs = execute_tasks_safely(list_tasks=list_tasks, parallel=parallel, logs_dir=logs_dir)
|
|
1208
995
|
|
|
1209
996
|
# -----------------------------------------------------------------.
|
|
1210
997
|
# Define L2M summary logs
|