disdrodb 0.1.5__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- disdrodb/__init__.py +1 -5
- disdrodb/_version.py +2 -2
- disdrodb/accessor/methods.py +22 -4
- disdrodb/api/checks.py +10 -0
- disdrodb/api/io.py +20 -18
- disdrodb/api/path.py +42 -77
- disdrodb/api/search.py +89 -23
- disdrodb/cli/disdrodb_create_summary.py +1 -1
- disdrodb/cli/disdrodb_run_l0.py +1 -1
- disdrodb/cli/disdrodb_run_l0a.py +1 -1
- disdrodb/cli/disdrodb_run_l0b.py +1 -1
- disdrodb/cli/disdrodb_run_l0c.py +1 -1
- disdrodb/cli/disdrodb_run_l1.py +1 -1
- disdrodb/cli/disdrodb_run_l2e.py +1 -1
- disdrodb/cli/disdrodb_run_l2m.py +1 -1
- disdrodb/configs.py +30 -83
- disdrodb/constants.py +4 -3
- disdrodb/data_transfer/download_data.py +4 -2
- disdrodb/docs.py +2 -2
- disdrodb/etc/products/L1/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/LPM/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/LPM_V0/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/PARSIVEL/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/PARSIVEL2/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/PWS100/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/RD80/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/SWS250/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/global.yaml +6 -0
- disdrodb/etc/products/L2E/10MIN.yaml +1 -12
- disdrodb/etc/products/L2E/global.yaml +1 -1
- disdrodb/etc/products/L2M/MODELS/NGAMMA_GS_R_MAE.yaml +6 -0
- disdrodb/etc/products/L2M/global.yaml +1 -1
- disdrodb/issue/checks.py +2 -2
- disdrodb/l0/check_configs.py +1 -1
- disdrodb/l0/configs/LPM/l0a_encodings.yml +0 -1
- disdrodb/l0/configs/LPM/l0b_cf_attrs.yml +0 -4
- disdrodb/l0/configs/LPM/l0b_encodings.yml +9 -9
- disdrodb/l0/configs/LPM/raw_data_format.yml +11 -11
- disdrodb/l0/configs/LPM_V0/bins_diameter.yml +103 -0
- disdrodb/l0/configs/LPM_V0/bins_velocity.yml +103 -0
- disdrodb/l0/configs/LPM_V0/l0a_encodings.yml +45 -0
- disdrodb/l0/configs/LPM_V0/l0b_cf_attrs.yml +180 -0
- disdrodb/l0/configs/LPM_V0/l0b_encodings.yml +410 -0
- disdrodb/l0/configs/LPM_V0/raw_data_format.yml +474 -0
- disdrodb/l0/configs/PARSIVEL/l0b_encodings.yml +1 -1
- disdrodb/l0/configs/PARSIVEL/raw_data_format.yml +8 -8
- disdrodb/l0/configs/PARSIVEL2/raw_data_format.yml +9 -9
- disdrodb/l0/l0_reader.py +2 -2
- disdrodb/l0/l0a_processing.py +6 -2
- disdrodb/l0/l0b_processing.py +26 -19
- disdrodb/l0/l0c_processing.py +17 -3
- disdrodb/l0/manuals/LPM_V0.pdf +0 -0
- disdrodb/l0/readers/LPM/ITALY/GID_LPM.py +15 -7
- disdrodb/l0/readers/LPM/ITALY/GID_LPM_PI.py +279 -0
- disdrodb/l0/readers/LPM/ITALY/GID_LPM_T.py +276 -0
- disdrodb/l0/readers/LPM/ITALY/GID_LPM_W.py +2 -2
- disdrodb/l0/readers/LPM/NETHERLANDS/DELFT_RWANDA_LPM_NC.py +103 -0
- disdrodb/l0/readers/LPM/NORWAY/HAUKELISETER_LPM.py +216 -0
- disdrodb/l0/readers/LPM/NORWAY/NMBU_LPM.py +208 -0
- disdrodb/l0/readers/LPM/UK/WITHWORTH_LPM.py +219 -0
- disdrodb/l0/readers/LPM/USA/CHARLESTON.py +229 -0
- disdrodb/l0/readers/{LPM → LPM_V0}/BELGIUM/ULIEGE.py +33 -49
- disdrodb/l0/readers/LPM_V0/ITALY/GID_LPM_V0.py +240 -0
- disdrodb/l0/readers/PARSIVEL/BASQUECOUNTRY/EUSKALMET_OTT.py +227 -0
- disdrodb/l0/readers/{PARSIVEL2 → PARSIVEL}/NASA/LPVEX.py +16 -28
- disdrodb/l0/readers/PARSIVEL/{GPM → NASA}/MC3E.py +1 -1
- disdrodb/l0/readers/PARSIVEL/NCAR/VORTEX2_2010_UF.py +3 -3
- disdrodb/l0/readers/PARSIVEL2/BASQUECOUNTRY/EUSKALMET_OTT2.py +232 -0
- disdrodb/l0/readers/PARSIVEL2/DENMARK/EROSION_raw.py +1 -1
- disdrodb/l0/readers/PARSIVEL2/JAPAN/PRECIP.py +155 -0
- disdrodb/l0/readers/PARSIVEL2/MPI/BCO_PARSIVEL2.py +14 -7
- disdrodb/l0/readers/PARSIVEL2/MPI/BOWTIE.py +8 -3
- disdrodb/l0/readers/PARSIVEL2/NASA/APU.py +28 -5
- disdrodb/l0/readers/PARSIVEL2/NCAR/RELAMPAGO_PARSIVEL2.py +1 -1
- disdrodb/l0/readers/PARSIVEL2/{GPM/GCPEX.py → NORWAY/UIB.py} +54 -29
- disdrodb/l0/readers/PARSIVEL2/PHILIPPINES/{PANGASA.py → PAGASA.py} +6 -3
- disdrodb/l0/readers/PARSIVEL2/SPAIN/GRANADA.py +1 -1
- disdrodb/l0/readers/PARSIVEL2/SWEDEN/SMHI.py +189 -0
- disdrodb/l0/readers/{PARSIVEL/GPM/PIERS.py → PARSIVEL2/USA/CSU.py} +62 -29
- disdrodb/l0/readers/PARSIVEL2/USA/{C3WE.py → CW3E.py} +51 -24
- disdrodb/l0/readers/{PARSIVEL/GPM/IFLOODS.py → RD80/BRAZIL/ATTO_RD80.py} +50 -34
- disdrodb/l0/readers/{SW250 → SWS250}/BELGIUM/KMI.py +1 -1
- disdrodb/l1/beard_model.py +45 -1
- disdrodb/l1/fall_velocity.py +1 -6
- disdrodb/l1/filters.py +2 -0
- disdrodb/l1/processing.py +6 -5
- disdrodb/l1/resampling.py +101 -38
- disdrodb/l2/empirical_dsd.py +12 -8
- disdrodb/l2/processing.py +4 -3
- disdrodb/metadata/search.py +3 -4
- disdrodb/routines/l0.py +4 -4
- disdrodb/routines/l1.py +173 -60
- disdrodb/routines/l2.py +121 -269
- disdrodb/routines/options.py +347 -0
- disdrodb/routines/wrappers.py +9 -1
- disdrodb/scattering/axis_ratio.py +3 -0
- disdrodb/scattering/routines.py +1 -1
- disdrodb/summary/routines.py +765 -724
- disdrodb/utils/archiving.py +51 -44
- disdrodb/utils/attrs.py +1 -1
- disdrodb/utils/compression.py +4 -2
- disdrodb/utils/dask.py +35 -15
- disdrodb/utils/dict.py +33 -0
- disdrodb/utils/encoding.py +1 -1
- disdrodb/utils/manipulations.py +7 -1
- disdrodb/utils/routines.py +9 -8
- disdrodb/utils/time.py +9 -1
- disdrodb/viz/__init__.py +0 -13
- disdrodb/viz/plots.py +209 -0
- {disdrodb-0.1.5.dist-info → disdrodb-0.2.1.dist-info}/METADATA +1 -1
- {disdrodb-0.1.5.dist-info → disdrodb-0.2.1.dist-info}/RECORD +124 -95
- disdrodb/l0/readers/PARSIVEL/GPM/LPVEX.py +0 -85
- /disdrodb/etc/products/L2M/{GAMMA_GS_ND_MAE.yaml → MODELS/GAMMA_GS_ND_MAE.yaml} +0 -0
- /disdrodb/etc/products/L2M/{GAMMA_ML.yaml → MODELS/GAMMA_ML.yaml} +0 -0
- /disdrodb/etc/products/L2M/{LOGNORMAL_GS_LOG_ND_MAE.yaml → MODELS/LOGNORMAL_GS_LOG_ND_MAE.yaml} +0 -0
- /disdrodb/etc/products/L2M/{LOGNORMAL_GS_ND_MAE.yaml → MODELS/LOGNORMAL_GS_ND_MAE.yaml} +0 -0
- /disdrodb/etc/products/L2M/{LOGNORMAL_ML.yaml → MODELS/LOGNORMAL_ML.yaml} +0 -0
- /disdrodb/etc/products/L2M/{NGAMMA_GS_LOG_ND_MAE.yaml → MODELS/NGAMMA_GS_LOG_ND_MAE.yaml} +0 -0
- /disdrodb/etc/products/L2M/{NGAMMA_GS_ND_MAE.yaml → MODELS/NGAMMA_GS_ND_MAE.yaml} +0 -0
- /disdrodb/etc/products/L2M/{NGAMMA_GS_Z_MAE.yaml → MODELS/NGAMMA_GS_Z_MAE.yaml} +0 -0
- /disdrodb/l0/readers/PARSIVEL2/{GPM → NASA}/NSSTC.py +0 -0
- {disdrodb-0.1.5.dist-info → disdrodb-0.2.1.dist-info}/WHEEL +0 -0
- {disdrodb-0.1.5.dist-info → disdrodb-0.2.1.dist-info}/entry_points.txt +0 -0
- {disdrodb-0.1.5.dist-info → disdrodb-0.2.1.dist-info}/licenses/LICENSE +0 -0
- {disdrodb-0.1.5.dist-info → disdrodb-0.2.1.dist-info}/top_level.txt +0 -0
|
@@ -88,31 +88,22 @@ def read_txt_file(file, filename, logger):
|
|
|
88
88
|
df = df["TO_PARSE"].str.split(";", expand=True, n=43)
|
|
89
89
|
|
|
90
90
|
# Assign column names
|
|
91
|
-
|
|
92
|
-
"
|
|
93
|
-
"
|
|
94
|
-
"
|
|
91
|
+
names = [
|
|
92
|
+
"start_identifier",
|
|
93
|
+
"sensor_serial_number",
|
|
94
|
+
"weather_code_synop_4680_5min",
|
|
95
95
|
"weather_code_metar_4678_5min",
|
|
96
96
|
"precipitation_rate_5min",
|
|
97
|
-
"
|
|
97
|
+
"weather_code_synop_4680",
|
|
98
98
|
"weather_code_metar_4678",
|
|
99
99
|
"precipitation_rate",
|
|
100
100
|
"precipitation_accumulated",
|
|
101
101
|
"sensor_time",
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
# "quality_index",
|
|
105
|
-
# "max_hail_diameter",
|
|
106
|
-
# "laser_status",
|
|
107
|
-
"dummy1",
|
|
108
|
-
"dummy2",
|
|
109
|
-
# "laser_temperature",
|
|
102
|
+
"temperature_interior",
|
|
103
|
+
"laser_temperature",
|
|
110
104
|
"laser_current_average",
|
|
111
105
|
"control_voltage",
|
|
112
106
|
"optical_control_voltage_output",
|
|
113
|
-
# "current_heating_house",
|
|
114
|
-
# "current_heating_heads",
|
|
115
|
-
# "current_heating_carriers",
|
|
116
107
|
"number_particles",
|
|
117
108
|
"number_particles_internal_data",
|
|
118
109
|
"number_particles_min_speed",
|
|
@@ -123,29 +114,27 @@ def read_txt_file(file, filename, logger):
|
|
|
123
114
|
"number_particles_min_diameter_internal_data",
|
|
124
115
|
"number_particles_no_hydrometeor",
|
|
125
116
|
"number_particles_no_hydrometeor_internal_data",
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
"
|
|
129
|
-
"
|
|
130
|
-
"
|
|
131
|
-
"
|
|
132
|
-
"
|
|
133
|
-
"
|
|
134
|
-
"
|
|
135
|
-
"
|
|
136
|
-
"
|
|
137
|
-
"
|
|
138
|
-
"
|
|
139
|
-
"
|
|
140
|
-
"
|
|
141
|
-
"
|
|
142
|
-
"
|
|
143
|
-
"
|
|
144
|
-
"number_particles_class_9",
|
|
145
|
-
"number_particles_class_9_internal_data",
|
|
117
|
+
"number_particles_unknown_classification",
|
|
118
|
+
"total_gross_volume_unknown_classification",
|
|
119
|
+
"number_particles_hail",
|
|
120
|
+
"total_gross_volume_hail",
|
|
121
|
+
"number_particles_solid_precipitation",
|
|
122
|
+
"total_gross_volume_solid_precipitation",
|
|
123
|
+
"number_particles_great_pellet",
|
|
124
|
+
"total_gross_volume_great_pellet",
|
|
125
|
+
"number_particles_small_pellet",
|
|
126
|
+
"total_gross_volume_small_pellet",
|
|
127
|
+
"number_particles_snowgrain",
|
|
128
|
+
"total_gross_volume_snowgrain",
|
|
129
|
+
"number_particles_rain",
|
|
130
|
+
"total_gross_volume_rain",
|
|
131
|
+
"number_particles_small_rain",
|
|
132
|
+
"total_gross_volume_small_rain",
|
|
133
|
+
"number_particles_drizzle",
|
|
134
|
+
"total_gross_volume_drizzle",
|
|
146
135
|
"raw_drop_number",
|
|
147
136
|
]
|
|
148
|
-
df.columns =
|
|
137
|
+
df.columns = names
|
|
149
138
|
|
|
150
139
|
# Deal with case if there are 61 timesteps
|
|
151
140
|
# - Occurs sometimes when previous hourly file miss timesteps
|
|
@@ -163,7 +152,12 @@ def read_txt_file(file, filename, logger):
|
|
|
163
152
|
start_time = pd.to_datetime(start_time_str, format="%Y%m%d%H")
|
|
164
153
|
|
|
165
154
|
# - Define timedelta based on sensor_time
|
|
155
|
+
# --> Add +24h to subsequent times when time resets
|
|
166
156
|
dt = pd.to_timedelta(df["sensor_time"] + ":00").to_numpy().astype("m8[s]")
|
|
157
|
+
rollover_indices = np.where(np.diff(dt) < np.timedelta64(0, "s"))[0]
|
|
158
|
+
if rollover_indices.size > 0:
|
|
159
|
+
for idx in rollover_indices:
|
|
160
|
+
dt[idx + 1 :] += np.timedelta64(24, "h")
|
|
167
161
|
dt = dt - dt[0]
|
|
168
162
|
|
|
169
163
|
# - Define approximate time
|
|
@@ -173,25 +167,15 @@ def read_txt_file(file, filename, logger):
|
|
|
173
167
|
valid_rows = dt <= np.timedelta64(3540, "s")
|
|
174
168
|
df = df[valid_rows]
|
|
175
169
|
|
|
176
|
-
# Drop rows where sample interval is not 60 seconds
|
|
177
|
-
df = df[df["sample_interval"] == "000060"]
|
|
178
|
-
|
|
179
170
|
# Drop rows with invalid raw_drop_number
|
|
180
|
-
# --> 440 value # 22x20
|
|
181
171
|
# --> 400 here # 20x20
|
|
182
172
|
df = df[df["raw_drop_number"].astype(str).str.len() == 1599]
|
|
183
173
|
|
|
184
|
-
# Deal with old LPM version 20x20 spectrum
|
|
185
|
-
# - Add 000 in first two velocity bins
|
|
186
|
-
df["raw_drop_number"] = df["raw_drop_number"] + ";" + ";".join(["000"] * 40)
|
|
187
|
-
|
|
188
174
|
# Drop columns not agreeing with DISDRODB L0 standards
|
|
189
175
|
columns_to_drop = [
|
|
190
|
-
"
|
|
176
|
+
"start_identifier",
|
|
177
|
+
"sensor_serial_number",
|
|
191
178
|
"sensor_time",
|
|
192
|
-
"dummy1",
|
|
193
|
-
"dummy2",
|
|
194
|
-
"id",
|
|
195
179
|
]
|
|
196
180
|
df = df.drop(columns=columns_to_drop)
|
|
197
181
|
return df
|
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
# -----------------------------------------------------------------------------.
|
|
2
|
+
# Copyright (c) 2021-2023 DISDRODB developers
|
|
3
|
+
#
|
|
4
|
+
# This program is free software: you can redistribute it and/or modify
|
|
5
|
+
# it under the terms of the GNU General Public License as published by
|
|
6
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
7
|
+
# (at your option) any later version.
|
|
8
|
+
#
|
|
9
|
+
# This program is distributed in the hope that it will be useful,
|
|
10
|
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
11
|
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
12
|
+
# GNU General Public License for more details.
|
|
13
|
+
#
|
|
14
|
+
# You should have received a copy of the GNU General Public License
|
|
15
|
+
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
16
|
+
# -----------------------------------------------------------------------------.
|
|
17
|
+
"""DISDRODB reader for GID LPM V0 sensor (TC-TO) with incorrect reported time."""
|
|
18
|
+
import numpy as np
|
|
19
|
+
import pandas as pd
|
|
20
|
+
|
|
21
|
+
from disdrodb.l0.l0_reader import is_documented_by, reader_generic_docstring
|
|
22
|
+
from disdrodb.l0.l0a_processing import read_raw_text_file
|
|
23
|
+
from disdrodb.utils.logger import log_error, log_warning
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def read_txt_file(file, filename, logger):
|
|
27
|
+
"""Parse for TC-TO LPM hourly file."""
|
|
28
|
+
#### - Define raw data headers
|
|
29
|
+
column_names = ["TO_PARSE"]
|
|
30
|
+
|
|
31
|
+
##------------------------------------------------------------------------.
|
|
32
|
+
#### Define reader options
|
|
33
|
+
# - For more info: https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html
|
|
34
|
+
reader_kwargs = {}
|
|
35
|
+
|
|
36
|
+
# - Define delimiter
|
|
37
|
+
reader_kwargs["delimiter"] = "\\n"
|
|
38
|
+
|
|
39
|
+
# - Avoid first column to become df index !!!
|
|
40
|
+
reader_kwargs["index_col"] = False
|
|
41
|
+
|
|
42
|
+
# Since column names are expected to be passed explicitly, header is set to None
|
|
43
|
+
reader_kwargs["header"] = None
|
|
44
|
+
|
|
45
|
+
# - Number of rows to be skipped at the beginning of the file
|
|
46
|
+
reader_kwargs["skiprows"] = None
|
|
47
|
+
|
|
48
|
+
# - Define behaviour when encountering bad lines
|
|
49
|
+
reader_kwargs["on_bad_lines"] = "skip"
|
|
50
|
+
|
|
51
|
+
# - Define reader engine
|
|
52
|
+
# - C engine is faster
|
|
53
|
+
# - Python engine is more feature-complete
|
|
54
|
+
reader_kwargs["engine"] = "python"
|
|
55
|
+
|
|
56
|
+
# - Define on-the-fly decompression of on-disk data
|
|
57
|
+
# - Available: gzip, bz2, zip
|
|
58
|
+
reader_kwargs["compression"] = "infer"
|
|
59
|
+
|
|
60
|
+
# - Strings to recognize as NA/NaN and replace with standard NA flags
|
|
61
|
+
# - Already included: '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN',
|
|
62
|
+
# '-NaN', '-nan', '1.#IND', '1.#QNAN', '<NA>', 'N/A',
|
|
63
|
+
# 'NA', 'NULL', 'NaN', 'n/a', 'nan', 'null'
|
|
64
|
+
reader_kwargs["na_values"] = ["na", "", "error"]
|
|
65
|
+
|
|
66
|
+
##------------------------------------------------------------------------.
|
|
67
|
+
#### Read the data
|
|
68
|
+
df = read_raw_text_file(
|
|
69
|
+
filepath=file,
|
|
70
|
+
column_names=column_names,
|
|
71
|
+
reader_kwargs=reader_kwargs,
|
|
72
|
+
logger=logger,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
##------------------------------------------------------------------------.
|
|
76
|
+
#### Adapt the dataframe to adhere to DISDRODB L0 standards
|
|
77
|
+
# Raise error if empty file
|
|
78
|
+
if len(df) == 0:
|
|
79
|
+
raise ValueError(f"{filename} is empty.")
|
|
80
|
+
|
|
81
|
+
# Select only rows with expected number of delimiters
|
|
82
|
+
df = df[df["TO_PARSE"].str.count(";") == 442]
|
|
83
|
+
|
|
84
|
+
# Check there are still valid rows
|
|
85
|
+
if len(df) == 0:
|
|
86
|
+
raise ValueError(f"No valid rows in {filename}.")
|
|
87
|
+
|
|
88
|
+
# Split by ; delimiter (before raw drop number)
|
|
89
|
+
df = df["TO_PARSE"].str.split(";", expand=True, n=43)
|
|
90
|
+
|
|
91
|
+
# Assign column names
|
|
92
|
+
names = [
|
|
93
|
+
"start_identifier",
|
|
94
|
+
"sensor_serial_number",
|
|
95
|
+
"weather_code_synop_4680_5min",
|
|
96
|
+
"weather_code_metar_4678_5min",
|
|
97
|
+
"precipitation_rate_5min",
|
|
98
|
+
"weather_code_synop_4680",
|
|
99
|
+
"weather_code_metar_4678",
|
|
100
|
+
"precipitation_rate",
|
|
101
|
+
"precipitation_accumulated",
|
|
102
|
+
"sensor_time",
|
|
103
|
+
"temperature_interior",
|
|
104
|
+
"laser_temperature",
|
|
105
|
+
"laser_current_average",
|
|
106
|
+
"control_voltage",
|
|
107
|
+
"optical_control_voltage_output",
|
|
108
|
+
"number_particles",
|
|
109
|
+
"number_particles_internal_data",
|
|
110
|
+
"number_particles_min_speed",
|
|
111
|
+
"number_particles_min_speed_internal_data",
|
|
112
|
+
"number_particles_max_speed",
|
|
113
|
+
"number_particles_max_speed_internal_data",
|
|
114
|
+
"number_particles_min_diameter",
|
|
115
|
+
"number_particles_min_diameter_internal_data",
|
|
116
|
+
"number_particles_no_hydrometeor",
|
|
117
|
+
"number_particles_no_hydrometeor_internal_data",
|
|
118
|
+
"number_particles_unknown_classification",
|
|
119
|
+
"total_gross_volume_unknown_classification",
|
|
120
|
+
"number_particles_hail",
|
|
121
|
+
"total_gross_volume_hail",
|
|
122
|
+
"number_particles_solid_precipitation",
|
|
123
|
+
"total_gross_volume_solid_precipitation",
|
|
124
|
+
"number_particles_great_pellet",
|
|
125
|
+
"total_gross_volume_great_pellet",
|
|
126
|
+
"number_particles_small_pellet",
|
|
127
|
+
"total_gross_volume_small_pellet",
|
|
128
|
+
"number_particles_snowgrain",
|
|
129
|
+
"total_gross_volume_snowgrain",
|
|
130
|
+
"number_particles_rain",
|
|
131
|
+
"total_gross_volume_rain",
|
|
132
|
+
"number_particles_small_rain",
|
|
133
|
+
"total_gross_volume_small_rain",
|
|
134
|
+
"number_particles_drizzle",
|
|
135
|
+
"total_gross_volume_drizzle",
|
|
136
|
+
"raw_drop_number",
|
|
137
|
+
]
|
|
138
|
+
df.columns = names
|
|
139
|
+
|
|
140
|
+
# Deal with case if there are 61 timesteps
|
|
141
|
+
# - Occurs sometimes when previous hourly file miss timesteps
|
|
142
|
+
if len(df) == 61:
|
|
143
|
+
log_warning(logger=logger, msg=f"{filename} contains 61 timesteps. Dropping the first.")
|
|
144
|
+
df = df.iloc[1:]
|
|
145
|
+
|
|
146
|
+
# Raise error if more than 60 timesteps/rows
|
|
147
|
+
n_rows = len(df)
|
|
148
|
+
if n_rows > 60:
|
|
149
|
+
raise ValueError(f"The hourly file contains {n_rows} timesteps.")
|
|
150
|
+
|
|
151
|
+
# Infer and define "time" column
|
|
152
|
+
start_time_str = filename.split(".")[0] # '2024020200.txt'
|
|
153
|
+
start_time = pd.to_datetime(start_time_str, format="%Y%m%d%H")
|
|
154
|
+
|
|
155
|
+
# - Define timedelta based on sensor_time
|
|
156
|
+
dt = pd.to_timedelta(df["sensor_time"] + ":00").to_numpy().astype("m8[s]")
|
|
157
|
+
rollover_indices = np.where(np.diff(dt) < np.timedelta64(0, "s"))[0]
|
|
158
|
+
if rollover_indices.size > 0:
|
|
159
|
+
for idx in rollover_indices:
|
|
160
|
+
dt[idx + 1 :] += np.timedelta64(24, "h")
|
|
161
|
+
dt = dt - dt[0]
|
|
162
|
+
|
|
163
|
+
# - Define approximate time
|
|
164
|
+
df["time"] = start_time + dt
|
|
165
|
+
|
|
166
|
+
# - Keep rows where time increment is between 00 and 59 minutes
|
|
167
|
+
valid_rows = dt <= np.timedelta64(3540, "s")
|
|
168
|
+
df = df[valid_rows]
|
|
169
|
+
|
|
170
|
+
# Drop rows with invalid raw_drop_number
|
|
171
|
+
# --> 440 value # 22x20
|
|
172
|
+
df = df[df["raw_drop_number"].astype(str).str.len() == 1599]
|
|
173
|
+
|
|
174
|
+
# Drop columns not agreeing with DISDRODB L0 standards
|
|
175
|
+
columns_to_drop = [
|
|
176
|
+
"sensor_time",
|
|
177
|
+
"start_identifier",
|
|
178
|
+
"sensor_serial_number",
|
|
179
|
+
]
|
|
180
|
+
df = df.drop(columns=columns_to_drop)
|
|
181
|
+
return df
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
@is_documented_by(reader_generic_docstring)
|
|
185
|
+
def reader(
|
|
186
|
+
filepath,
|
|
187
|
+
logger=None,
|
|
188
|
+
):
|
|
189
|
+
"""Reader."""
|
|
190
|
+
import zipfile
|
|
191
|
+
|
|
192
|
+
##------------------------------------------------------------------------.
|
|
193
|
+
# filename = os.path.basename(filepath)
|
|
194
|
+
# return read_txt_file(file=filepath, filename=filename, logger=logger)
|
|
195
|
+
|
|
196
|
+
# ---------------------------------------------------------------------.
|
|
197
|
+
#### Iterate over all files (aka timesteps) in the daily zip archive
|
|
198
|
+
# - Each file contain a single timestep !
|
|
199
|
+
# list_df = []
|
|
200
|
+
# with tempfile.TemporaryDirectory() as temp_dir:
|
|
201
|
+
# # Extract all files
|
|
202
|
+
# unzip_file_on_terminal(filepath, temp_dir)
|
|
203
|
+
|
|
204
|
+
# # Walk through extracted files
|
|
205
|
+
# for root, _, files in os.walk(temp_dir):
|
|
206
|
+
# for filename in sorted(files):
|
|
207
|
+
# if filename.endswith(".txt"):
|
|
208
|
+
# full_path = os.path.join(root, filename)
|
|
209
|
+
# try:
|
|
210
|
+
# df = read_txt_file(file=full_path, filename=filename, logger=logger)
|
|
211
|
+
# if df is not None:
|
|
212
|
+
# list_df.append(df)
|
|
213
|
+
# except Exception as e:
|
|
214
|
+
# msg = f"An error occurred while reading {filename}: {e}"
|
|
215
|
+
# log_error(logger=logger, msg=msg, verbose=True)
|
|
216
|
+
|
|
217
|
+
list_df = []
|
|
218
|
+
with zipfile.ZipFile(filepath, "r") as zip_ref:
|
|
219
|
+
filenames = sorted(zip_ref.namelist())
|
|
220
|
+
for filename in filenames:
|
|
221
|
+
if filename.endswith(".txt"):
|
|
222
|
+
# Open file
|
|
223
|
+
with zip_ref.open(filename) as file:
|
|
224
|
+
try:
|
|
225
|
+
df = read_txt_file(file=file, filename=filename, logger=logger)
|
|
226
|
+
if df is not None:
|
|
227
|
+
list_df.append(df)
|
|
228
|
+
except Exception as e:
|
|
229
|
+
msg = f"An error occurred while reading {filename}. The error is: {e}"
|
|
230
|
+
log_error(logger=logger, msg=msg, verbose=True)
|
|
231
|
+
|
|
232
|
+
# Check the zip file contains at least some non.empty files
|
|
233
|
+
if len(list_df) == 0:
|
|
234
|
+
raise ValueError(f"{filepath} contains only empty files!")
|
|
235
|
+
|
|
236
|
+
# Concatenate all dataframes into a single one
|
|
237
|
+
df = pd.concat(list_df)
|
|
238
|
+
|
|
239
|
+
# ---------------------------------------------------------------------.
|
|
240
|
+
return df
|
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
# -----------------------------------------------------------------------------.
|
|
2
|
+
# Copyright (c) 2021-2023 DISDRODB developers
|
|
3
|
+
#
|
|
4
|
+
# This program is free software: you can redistribute it and/or modify
|
|
5
|
+
# it under the terms of the GNU General Public License as published by
|
|
6
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
7
|
+
# (at your option) any later version.
|
|
8
|
+
#
|
|
9
|
+
# This program is distributed in the hope that it will be useful,
|
|
10
|
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
11
|
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
12
|
+
# GNU General Public License for more details.
|
|
13
|
+
#
|
|
14
|
+
# You should have received a copy of the GNU General Public License
|
|
15
|
+
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
16
|
+
# -----------------------------------------------------------------------------.
|
|
17
|
+
"""DISDRODB reader for EUSKALMET OTT Parsivel raw data."""
|
|
18
|
+
# import os
|
|
19
|
+
# import tempfile
|
|
20
|
+
# from disdrodb.utils.compression import unzip_file_on_terminal
|
|
21
|
+
|
|
22
|
+
import numpy as np
|
|
23
|
+
import pandas as pd
|
|
24
|
+
|
|
25
|
+
from disdrodb.l0.l0_reader import is_documented_by, reader_generic_docstring
|
|
26
|
+
from disdrodb.l0.l0a_processing import read_raw_text_file
|
|
27
|
+
from disdrodb.utils.logger import log_error
|
|
28
|
+
|
|
29
|
+
COLUMN_DICT = {
|
|
30
|
+
"01": "rainfall_rate_32bit",
|
|
31
|
+
"02": "rainfall_accumulated_32bit",
|
|
32
|
+
"03": "weather_code_synop_4680",
|
|
33
|
+
"04": "weather_code_synop_4677",
|
|
34
|
+
"05": "weather_code_metar_4678", # empty
|
|
35
|
+
"06": "weather_code_nws", # empty
|
|
36
|
+
"07": "reflectivity_32bit",
|
|
37
|
+
"08": "mor_visibility",
|
|
38
|
+
"09": "sample_interval",
|
|
39
|
+
"10": "laser_amplitude",
|
|
40
|
+
"11": "number_particles",
|
|
41
|
+
"12": "sensor_temperature",
|
|
42
|
+
# "13": "sensor_serial_number",
|
|
43
|
+
# "14": "firmware_iop",
|
|
44
|
+
# "15": "firmware_dsp",
|
|
45
|
+
"16": "sensor_heating_current",
|
|
46
|
+
"17": "sensor_battery_voltage",
|
|
47
|
+
"18": "sensor_status",
|
|
48
|
+
# "19": "start_time",
|
|
49
|
+
# "20": "sensor_time",
|
|
50
|
+
# "21": "sensor_date",
|
|
51
|
+
# "22": "station_name",
|
|
52
|
+
# "23": "station_number",
|
|
53
|
+
"24": "rainfall_amount_absolute_32bit",
|
|
54
|
+
"25": "error_code",
|
|
55
|
+
"30": "rainfall_rate_16bit",
|
|
56
|
+
"31": "rainfall_rate_12bit",
|
|
57
|
+
"32": "rainfall_accumulated_16bit",
|
|
58
|
+
"90": "raw_drop_concentration",
|
|
59
|
+
"91": "raw_drop_average_velocity",
|
|
60
|
+
"93": "raw_drop_number",
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def infill_missing_columns(df):
|
|
65
|
+
"""Infill with NaN missing columns."""
|
|
66
|
+
columns = set(COLUMN_DICT.values())
|
|
67
|
+
for c in columns:
|
|
68
|
+
if c not in df.columns:
|
|
69
|
+
df[c] = "NaN"
|
|
70
|
+
return df
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def read_txt_file(file, filename, logger):
|
|
74
|
+
"""Parse a single txt file within the daily zip file."""
|
|
75
|
+
##------------------------------------------------------------------------.
|
|
76
|
+
#### Define column names
|
|
77
|
+
column_names = ["TO_PARSE"]
|
|
78
|
+
|
|
79
|
+
##------------------------------------------------------------------------.
|
|
80
|
+
#### Define reader options
|
|
81
|
+
reader_kwargs = {}
|
|
82
|
+
# - Define delimiter
|
|
83
|
+
reader_kwargs["delimiter"] = "\\n"
|
|
84
|
+
# - Skip first row as columns names
|
|
85
|
+
# - Define encoding
|
|
86
|
+
reader_kwargs["encoding"] = "latin" # "ISO-8859-1"
|
|
87
|
+
# - Avoid first column to become df index !!!
|
|
88
|
+
reader_kwargs["index_col"] = False
|
|
89
|
+
# - Define behaviour when encountering bad lines
|
|
90
|
+
reader_kwargs["on_bad_lines"] = "skip"
|
|
91
|
+
# - Define reader engine
|
|
92
|
+
# - C engine is faster
|
|
93
|
+
# - Python engine is more feature-complete
|
|
94
|
+
reader_kwargs["engine"] = "python"
|
|
95
|
+
# - Define on-the-fly decompression of on-disk data
|
|
96
|
+
# - Available: gzip, bz2, zip
|
|
97
|
+
reader_kwargs["compression"] = "infer"
|
|
98
|
+
# - Strings to recognize as NA/NaN and replace with standard NA flags
|
|
99
|
+
# - Already included: '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN',
|
|
100
|
+
# '-NaN', '-nan', '1.#IND', '1.#QNAN', '<NA>', 'N/A',
|
|
101
|
+
# 'NA', 'NULL', 'NaN', 'n/a', 'nan', 'null'
|
|
102
|
+
reader_kwargs["na_values"] = ["na", "", "error"]
|
|
103
|
+
|
|
104
|
+
##------------------------------------------------------------------------.
|
|
105
|
+
#### Read the data
|
|
106
|
+
df = read_raw_text_file(
|
|
107
|
+
filepath=file,
|
|
108
|
+
column_names=column_names,
|
|
109
|
+
reader_kwargs=reader_kwargs,
|
|
110
|
+
logger=logger,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
##--------------------------------\----------------------------------------.
|
|
114
|
+
#### Adapt the dataframe to adhere to DISDRODB L0 standards
|
|
115
|
+
# Empty file, raise error
|
|
116
|
+
if len(df) == 0:
|
|
117
|
+
raise ValueError(f"{filename} is empty.")
|
|
118
|
+
|
|
119
|
+
# Select rows with valid spectrum
|
|
120
|
+
# df = df[df["TO_PARSE"].str.count(";") == 1191] # 1112
|
|
121
|
+
|
|
122
|
+
# Raise errof if corrupted file
|
|
123
|
+
if len(df) == 4:
|
|
124
|
+
raise ValueError(f"{filename} is corrupted.")
|
|
125
|
+
|
|
126
|
+
# Extract string
|
|
127
|
+
string = df["TO_PARSE"].iloc[4]
|
|
128
|
+
|
|
129
|
+
# Split into lines
|
|
130
|
+
decoded_text = string.encode().decode("unicode_escape")
|
|
131
|
+
decoded_text = decoded_text.replace("'", "").replace('"', "")
|
|
132
|
+
lines = decoded_text.split()
|
|
133
|
+
|
|
134
|
+
# Extract time
|
|
135
|
+
time_str = lines[0].split(",")[1]
|
|
136
|
+
|
|
137
|
+
# Split each line at the first colon
|
|
138
|
+
data = [line.split(":", 1) for line in lines if ":" in line]
|
|
139
|
+
|
|
140
|
+
# Create the DataFrame
|
|
141
|
+
df = pd.DataFrame(data, columns=["ID", "Value"])
|
|
142
|
+
|
|
143
|
+
# Drop rows with invalid IDs
|
|
144
|
+
valid_id_str = np.char.rjust(np.arange(0, 94).astype(str), width=2, fillchar="0")
|
|
145
|
+
df = df[df["ID"].astype(str).isin(valid_id_str)]
|
|
146
|
+
|
|
147
|
+
# Select only rows with values
|
|
148
|
+
df = df[df["Value"].apply(lambda x: x is not None)]
|
|
149
|
+
|
|
150
|
+
# Reshape dataframe
|
|
151
|
+
df = df.set_index("ID").T
|
|
152
|
+
|
|
153
|
+
# Assign column names
|
|
154
|
+
df = df.rename(COLUMN_DICT, axis=1)
|
|
155
|
+
|
|
156
|
+
# Keep only columns defined in the dictionary
|
|
157
|
+
df = df.filter(items=list(COLUMN_DICT.values()))
|
|
158
|
+
|
|
159
|
+
# Infill missing columns
|
|
160
|
+
df = infill_missing_columns(df)
|
|
161
|
+
|
|
162
|
+
# Add time column ad datetime dtype
|
|
163
|
+
df["time"] = pd.to_datetime(time_str, format="%Y%m%d%H%M%S", errors="coerce")
|
|
164
|
+
|
|
165
|
+
# Preprocess the raw spectrum and raw_drop_average_velocity
|
|
166
|
+
# - Add 0 before every ; if ; not preceded by a digit
|
|
167
|
+
# - Example: ';;1;;' --> '0;0;1;0;'
|
|
168
|
+
df["raw_drop_number"] = df["raw_drop_number"].str.replace(r"(?<!\d);", "0;", regex=True)
|
|
169
|
+
df["raw_drop_average_velocity"] = df["raw_drop_average_velocity"].str.replace(r"(?<!\d);", "0;", regex=True)
|
|
170
|
+
|
|
171
|
+
# Return the dataframe adhering to DISDRODB L0 standards
|
|
172
|
+
return df
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
@is_documented_by(reader_generic_docstring)
|
|
176
|
+
def reader(
|
|
177
|
+
filepath,
|
|
178
|
+
logger=None,
|
|
179
|
+
):
|
|
180
|
+
"""Reader."""
|
|
181
|
+
import zipfile
|
|
182
|
+
|
|
183
|
+
# ---------------------------------------------------------------------.
|
|
184
|
+
#### Iterate over all files (aka timesteps) in the daily zip archive
|
|
185
|
+
# - Each file contain a single timestep !
|
|
186
|
+
# list_df = []
|
|
187
|
+
# with tempfile.TemporaryDirectory() as temp_dir:
|
|
188
|
+
# # Extract all files
|
|
189
|
+
# unzip_file_on_terminal(filepath, temp_dir)
|
|
190
|
+
|
|
191
|
+
# # Walk through extracted files
|
|
192
|
+
# for root, _, files in os.walk(temp_dir):
|
|
193
|
+
# for filename in sorted(files):
|
|
194
|
+
# if filename.endswith(".txt"):
|
|
195
|
+
# full_path = os.path.join(root, filename)
|
|
196
|
+
# try:
|
|
197
|
+
# df = read_txt_file(file=full_path, filename=filename, logger=logger)
|
|
198
|
+
# if df is not None:
|
|
199
|
+
# list_df.append(df)
|
|
200
|
+
# except Exception as e:
|
|
201
|
+
# msg = f"An error occurred while reading {filename}: {e}"
|
|
202
|
+
# log_error(logger=logger, msg=msg, verbose=True)
|
|
203
|
+
|
|
204
|
+
list_df = []
|
|
205
|
+
with zipfile.ZipFile(filepath, "r") as zip_ref:
|
|
206
|
+
filenames = sorted(zip_ref.namelist())
|
|
207
|
+
for filename in filenames:
|
|
208
|
+
if filename.endswith(".dat"):
|
|
209
|
+
# Open file
|
|
210
|
+
with zip_ref.open(filename) as file:
|
|
211
|
+
try:
|
|
212
|
+
df = read_txt_file(file=file, filename=filename, logger=logger)
|
|
213
|
+
if df is not None:
|
|
214
|
+
list_df.append(df)
|
|
215
|
+
except Exception as e:
|
|
216
|
+
msg = f"An error occurred while reading {filename}. The error is: {e}."
|
|
217
|
+
log_error(logger=logger, msg=msg, verbose=True)
|
|
218
|
+
|
|
219
|
+
# Check the zip file contains at least some non.empty files
|
|
220
|
+
if len(list_df) == 0:
|
|
221
|
+
raise ValueError(f"{filepath} contains only empty files!")
|
|
222
|
+
|
|
223
|
+
# Concatenate all dataframes into a single one
|
|
224
|
+
df = pd.concat(list_df)
|
|
225
|
+
|
|
226
|
+
# ---------------------------------------------------------------------.
|
|
227
|
+
return df
|
|
@@ -15,21 +15,6 @@
|
|
|
15
15
|
# You should have received a copy of the GNU General Public License
|
|
16
16
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
17
17
|
# -----------------------------------------------------------------------------.
|
|
18
|
-
"""This reader allows to read raw data from NASA APU stations.
|
|
19
|
-
|
|
20
|
-
The reader allows to read raw APU data from the following NASA campaigns:
|
|
21
|
-
|
|
22
|
-
- HYMEX
|
|
23
|
-
- IFLOODS
|
|
24
|
-
- IPHEX
|
|
25
|
-
- OLYMPEX
|
|
26
|
-
- ICEPOP
|
|
27
|
-
- IMPACTS
|
|
28
|
-
- GCPEX
|
|
29
|
-
- WFF
|
|
30
|
-
|
|
31
|
-
"""
|
|
32
|
-
|
|
33
18
|
import pandas as pd
|
|
34
19
|
|
|
35
20
|
from disdrodb.l0.l0_reader import is_documented_by, reader_generic_docstring
|
|
@@ -44,13 +29,13 @@ def reader(
|
|
|
44
29
|
"""Reader."""
|
|
45
30
|
##------------------------------------------------------------------------.
|
|
46
31
|
#### Define column names
|
|
47
|
-
column_names = ["
|
|
32
|
+
column_names = ["TO_PARSE"]
|
|
48
33
|
|
|
49
34
|
##------------------------------------------------------------------------.
|
|
50
35
|
#### Define reader options
|
|
51
36
|
reader_kwargs = {}
|
|
52
37
|
# - Define delimiter
|
|
53
|
-
reader_kwargs["delimiter"] = "
|
|
38
|
+
reader_kwargs["delimiter"] = "//n"
|
|
54
39
|
# - Skip first row as columns names
|
|
55
40
|
reader_kwargs["header"] = None
|
|
56
41
|
reader_kwargs["skiprows"] = 0
|
|
@@ -84,26 +69,29 @@ def reader(
|
|
|
84
69
|
|
|
85
70
|
##------------------------------------------------------------------------.
|
|
86
71
|
#### Adapt the dataframe to adhere to DISDRODB L0 standards
|
|
72
|
+
# Remove rows with invalid number of separators
|
|
73
|
+
df = df[df["TO_PARSE"].str.count(";") == 1]
|
|
74
|
+
if len(df) == 0:
|
|
75
|
+
raise ValueError(f"No valid data in {filepath}")
|
|
76
|
+
|
|
77
|
+
# Split the columns
|
|
78
|
+
df = df["TO_PARSE"].str.split(";", n=2, expand=True)
|
|
79
|
+
|
|
80
|
+
# Assign column names
|
|
81
|
+
df.columns = ["time", "TO_BE_SPLITTED"]
|
|
82
|
+
|
|
87
83
|
# Convert time column to datetime
|
|
88
84
|
df_time = pd.to_datetime(df["time"], format="%Y%m%d%H%M%S", errors="coerce")
|
|
89
85
|
|
|
90
86
|
# Split the 'TO_BE_SPLITTED' column
|
|
91
87
|
df = df["TO_BE_SPLITTED"].str.split(",", n=3, expand=True)
|
|
88
|
+
df.columns = ["station_id", "sensor_status", "sensor_temperature", "raw_drop_number"]
|
|
92
89
|
|
|
93
|
-
#
|
|
94
|
-
names = [
|
|
95
|
-
"station_name",
|
|
96
|
-
"unknown",
|
|
97
|
-
"unknown2",
|
|
98
|
-
"raw_drop_number",
|
|
99
|
-
]
|
|
100
|
-
df.columns = names
|
|
101
|
-
|
|
102
|
-
# Add the time column
|
|
90
|
+
# Add time
|
|
103
91
|
df["time"] = df_time
|
|
104
92
|
|
|
105
93
|
# Drop columns not agreeing with DISDRODB L0 standards
|
|
106
|
-
df = df.drop(columns=["
|
|
94
|
+
df = df.drop(columns=["station_id"])
|
|
107
95
|
|
|
108
96
|
# Return the dataframe adhering to DISDRODB L0 standards
|
|
109
97
|
return df
|
|
@@ -116,7 +116,7 @@ def reader(
|
|
|
116
116
|
return df
|
|
117
117
|
# ---------------------------------------------------------
|
|
118
118
|
#### Case of 1032 delimiters
|
|
119
|
-
if n_delimiters == 1033: # (most of the files)
|
|
119
|
+
if n_delimiters == 1033: # (most of the files ... PIERS FORMAT)
|
|
120
120
|
# Select valid rows
|
|
121
121
|
df = df.loc[df["TO_BE_SPLITTED"].str.count(",") == 1033]
|
|
122
122
|
# Get time column
|