disdrodb 0.1.5__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (125) hide show
  1. disdrodb/__init__.py +1 -5
  2. disdrodb/_version.py +2 -2
  3. disdrodb/accessor/methods.py +22 -4
  4. disdrodb/api/checks.py +10 -0
  5. disdrodb/api/io.py +20 -18
  6. disdrodb/api/path.py +42 -77
  7. disdrodb/api/search.py +89 -23
  8. disdrodb/cli/disdrodb_create_summary.py +1 -1
  9. disdrodb/cli/disdrodb_run_l0.py +1 -1
  10. disdrodb/cli/disdrodb_run_l0a.py +1 -1
  11. disdrodb/cli/disdrodb_run_l0b.py +1 -1
  12. disdrodb/cli/disdrodb_run_l0c.py +1 -1
  13. disdrodb/cli/disdrodb_run_l1.py +1 -1
  14. disdrodb/cli/disdrodb_run_l2e.py +1 -1
  15. disdrodb/cli/disdrodb_run_l2m.py +1 -1
  16. disdrodb/configs.py +30 -83
  17. disdrodb/constants.py +4 -3
  18. disdrodb/data_transfer/download_data.py +4 -2
  19. disdrodb/docs.py +2 -2
  20. disdrodb/etc/products/L1/1MIN.yaml +13 -0
  21. disdrodb/etc/products/L1/LPM/1MIN.yaml +13 -0
  22. disdrodb/etc/products/L1/LPM_V0/1MIN.yaml +13 -0
  23. disdrodb/etc/products/L1/PARSIVEL/1MIN.yaml +13 -0
  24. disdrodb/etc/products/L1/PARSIVEL2/1MIN.yaml +13 -0
  25. disdrodb/etc/products/L1/PWS100/1MIN.yaml +13 -0
  26. disdrodb/etc/products/L1/RD80/1MIN.yaml +13 -0
  27. disdrodb/etc/products/L1/SWS250/1MIN.yaml +13 -0
  28. disdrodb/etc/products/L1/global.yaml +6 -0
  29. disdrodb/etc/products/L2E/10MIN.yaml +1 -12
  30. disdrodb/etc/products/L2E/global.yaml +1 -1
  31. disdrodb/etc/products/L2M/MODELS/NGAMMA_GS_R_MAE.yaml +6 -0
  32. disdrodb/etc/products/L2M/global.yaml +1 -1
  33. disdrodb/issue/checks.py +2 -2
  34. disdrodb/l0/check_configs.py +1 -1
  35. disdrodb/l0/configs/LPM/l0a_encodings.yml +0 -1
  36. disdrodb/l0/configs/LPM/l0b_cf_attrs.yml +0 -4
  37. disdrodb/l0/configs/LPM/l0b_encodings.yml +9 -9
  38. disdrodb/l0/configs/LPM/raw_data_format.yml +11 -11
  39. disdrodb/l0/configs/LPM_V0/bins_diameter.yml +103 -0
  40. disdrodb/l0/configs/LPM_V0/bins_velocity.yml +103 -0
  41. disdrodb/l0/configs/LPM_V0/l0a_encodings.yml +45 -0
  42. disdrodb/l0/configs/LPM_V0/l0b_cf_attrs.yml +180 -0
  43. disdrodb/l0/configs/LPM_V0/l0b_encodings.yml +410 -0
  44. disdrodb/l0/configs/LPM_V0/raw_data_format.yml +474 -0
  45. disdrodb/l0/configs/PARSIVEL/l0b_encodings.yml +1 -1
  46. disdrodb/l0/configs/PARSIVEL/raw_data_format.yml +8 -8
  47. disdrodb/l0/configs/PARSIVEL2/raw_data_format.yml +9 -9
  48. disdrodb/l0/l0_reader.py +2 -2
  49. disdrodb/l0/l0a_processing.py +6 -2
  50. disdrodb/l0/l0b_processing.py +26 -19
  51. disdrodb/l0/l0c_processing.py +17 -3
  52. disdrodb/l0/manuals/LPM_V0.pdf +0 -0
  53. disdrodb/l0/readers/LPM/ITALY/GID_LPM.py +15 -7
  54. disdrodb/l0/readers/LPM/ITALY/GID_LPM_PI.py +279 -0
  55. disdrodb/l0/readers/LPM/ITALY/GID_LPM_T.py +276 -0
  56. disdrodb/l0/readers/LPM/ITALY/GID_LPM_W.py +2 -2
  57. disdrodb/l0/readers/LPM/NETHERLANDS/DELFT_RWANDA_LPM_NC.py +103 -0
  58. disdrodb/l0/readers/LPM/NORWAY/HAUKELISETER_LPM.py +216 -0
  59. disdrodb/l0/readers/LPM/NORWAY/NMBU_LPM.py +208 -0
  60. disdrodb/l0/readers/LPM/UK/WITHWORTH_LPM.py +219 -0
  61. disdrodb/l0/readers/LPM/USA/CHARLESTON.py +229 -0
  62. disdrodb/l0/readers/{LPM → LPM_V0}/BELGIUM/ULIEGE.py +33 -49
  63. disdrodb/l0/readers/LPM_V0/ITALY/GID_LPM_V0.py +240 -0
  64. disdrodb/l0/readers/PARSIVEL/BASQUECOUNTRY/EUSKALMET_OTT.py +227 -0
  65. disdrodb/l0/readers/{PARSIVEL2 → PARSIVEL}/NASA/LPVEX.py +16 -28
  66. disdrodb/l0/readers/PARSIVEL/{GPM → NASA}/MC3E.py +1 -1
  67. disdrodb/l0/readers/PARSIVEL/NCAR/VORTEX2_2010_UF.py +3 -3
  68. disdrodb/l0/readers/PARSIVEL2/BASQUECOUNTRY/EUSKALMET_OTT2.py +232 -0
  69. disdrodb/l0/readers/PARSIVEL2/DENMARK/EROSION_raw.py +1 -1
  70. disdrodb/l0/readers/PARSIVEL2/JAPAN/PRECIP.py +155 -0
  71. disdrodb/l0/readers/PARSIVEL2/MPI/BCO_PARSIVEL2.py +14 -7
  72. disdrodb/l0/readers/PARSIVEL2/MPI/BOWTIE.py +8 -3
  73. disdrodb/l0/readers/PARSIVEL2/NASA/APU.py +28 -5
  74. disdrodb/l0/readers/PARSIVEL2/NCAR/RELAMPAGO_PARSIVEL2.py +1 -1
  75. disdrodb/l0/readers/PARSIVEL2/{GPM/GCPEX.py → NORWAY/UIB.py} +54 -29
  76. disdrodb/l0/readers/PARSIVEL2/PHILIPPINES/{PANGASA.py → PAGASA.py} +6 -3
  77. disdrodb/l0/readers/PARSIVEL2/SPAIN/GRANADA.py +1 -1
  78. disdrodb/l0/readers/PARSIVEL2/SWEDEN/SMHI.py +189 -0
  79. disdrodb/l0/readers/{PARSIVEL/GPM/PIERS.py → PARSIVEL2/USA/CSU.py} +62 -29
  80. disdrodb/l0/readers/PARSIVEL2/USA/{C3WE.py → CW3E.py} +51 -24
  81. disdrodb/l0/readers/{PARSIVEL/GPM/IFLOODS.py → RD80/BRAZIL/ATTO_RD80.py} +50 -34
  82. disdrodb/l0/readers/{SW250 → SWS250}/BELGIUM/KMI.py +1 -1
  83. disdrodb/l1/beard_model.py +45 -1
  84. disdrodb/l1/fall_velocity.py +1 -6
  85. disdrodb/l1/filters.py +2 -0
  86. disdrodb/l1/processing.py +6 -5
  87. disdrodb/l1/resampling.py +101 -38
  88. disdrodb/l2/empirical_dsd.py +12 -8
  89. disdrodb/l2/processing.py +4 -3
  90. disdrodb/metadata/search.py +3 -4
  91. disdrodb/routines/l0.py +4 -4
  92. disdrodb/routines/l1.py +173 -60
  93. disdrodb/routines/l2.py +121 -269
  94. disdrodb/routines/options.py +347 -0
  95. disdrodb/routines/wrappers.py +9 -1
  96. disdrodb/scattering/axis_ratio.py +3 -0
  97. disdrodb/scattering/routines.py +1 -1
  98. disdrodb/summary/routines.py +765 -724
  99. disdrodb/utils/archiving.py +51 -44
  100. disdrodb/utils/attrs.py +1 -1
  101. disdrodb/utils/compression.py +4 -2
  102. disdrodb/utils/dask.py +35 -15
  103. disdrodb/utils/dict.py +33 -0
  104. disdrodb/utils/encoding.py +1 -1
  105. disdrodb/utils/manipulations.py +7 -1
  106. disdrodb/utils/routines.py +9 -8
  107. disdrodb/utils/time.py +9 -1
  108. disdrodb/viz/__init__.py +0 -13
  109. disdrodb/viz/plots.py +209 -0
  110. {disdrodb-0.1.5.dist-info → disdrodb-0.2.1.dist-info}/METADATA +1 -1
  111. {disdrodb-0.1.5.dist-info → disdrodb-0.2.1.dist-info}/RECORD +124 -95
  112. disdrodb/l0/readers/PARSIVEL/GPM/LPVEX.py +0 -85
  113. /disdrodb/etc/products/L2M/{GAMMA_GS_ND_MAE.yaml → MODELS/GAMMA_GS_ND_MAE.yaml} +0 -0
  114. /disdrodb/etc/products/L2M/{GAMMA_ML.yaml → MODELS/GAMMA_ML.yaml} +0 -0
  115. /disdrodb/etc/products/L2M/{LOGNORMAL_GS_LOG_ND_MAE.yaml → MODELS/LOGNORMAL_GS_LOG_ND_MAE.yaml} +0 -0
  116. /disdrodb/etc/products/L2M/{LOGNORMAL_GS_ND_MAE.yaml → MODELS/LOGNORMAL_GS_ND_MAE.yaml} +0 -0
  117. /disdrodb/etc/products/L2M/{LOGNORMAL_ML.yaml → MODELS/LOGNORMAL_ML.yaml} +0 -0
  118. /disdrodb/etc/products/L2M/{NGAMMA_GS_LOG_ND_MAE.yaml → MODELS/NGAMMA_GS_LOG_ND_MAE.yaml} +0 -0
  119. /disdrodb/etc/products/L2M/{NGAMMA_GS_ND_MAE.yaml → MODELS/NGAMMA_GS_ND_MAE.yaml} +0 -0
  120. /disdrodb/etc/products/L2M/{NGAMMA_GS_Z_MAE.yaml → MODELS/NGAMMA_GS_Z_MAE.yaml} +0 -0
  121. /disdrodb/l0/readers/PARSIVEL2/{GPM → NASA}/NSSTC.py +0 -0
  122. {disdrodb-0.1.5.dist-info → disdrodb-0.2.1.dist-info}/WHEEL +0 -0
  123. {disdrodb-0.1.5.dist-info → disdrodb-0.2.1.dist-info}/entry_points.txt +0 -0
  124. {disdrodb-0.1.5.dist-info → disdrodb-0.2.1.dist-info}/licenses/LICENSE +0 -0
  125. {disdrodb-0.1.5.dist-info → disdrodb-0.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,276 @@
1
+ # -----------------------------------------------------------------------------.
2
+ # Copyright (c) 2021-2023 DISDRODB developers
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify
5
+ # it under the terms of the GNU General Public License as published by
6
+ # the Free Software Foundation, either version 3 of the License, or
7
+ # (at your option) any later version.
8
+ #
9
+ # This program is distributed in the hope that it will be useful,
10
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
11
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
+ # GNU General Public License for more details.
13
+ #
14
+ # You should have received a copy of the GNU General Public License
15
+ # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
+ # -----------------------------------------------------------------------------.
17
+ """DISDRODB reader for GID LPM sensors not reporting time (TC-MI2, TC-MI3, TC-ER e TC-FI)."""
18
+ import numpy as np
19
+ import pandas as pd
20
+
21
+ from disdrodb.l0.l0_reader import is_documented_by, reader_generic_docstring
22
+ from disdrodb.l0.l0a_processing import read_raw_text_file
23
+ from disdrodb.utils.logger import log_error, log_warning
24
+
25
+
26
+ def read_txt_file(file, filename, logger):
27
+ """Parse for LPM hourly file."""
28
+ #### - Define raw data headers
29
+ column_names = ["TO_PARSE"]
30
+
31
+ ##------------------------------------------------------------------------.
32
+ #### Define reader options
33
+ # - For more info: https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html
34
+ reader_kwargs = {}
35
+
36
+ # - Define delimiter
37
+ reader_kwargs["delimiter"] = "\\n"
38
+
39
+ # - Avoid first column to become df index !!!
40
+ reader_kwargs["index_col"] = False
41
+
42
+ # Since column names are expected to be passed explicitly, header is set to None
43
+ reader_kwargs["header"] = None
44
+
45
+ # - Number of rows to be skipped at the beginning of the file
46
+ reader_kwargs["skiprows"] = None
47
+
48
+ # - Define behaviour when encountering bad lines
49
+ reader_kwargs["on_bad_lines"] = "skip"
50
+
51
+ # - Define reader engine
52
+ # - C engine is faster
53
+ # - Python engine is more feature-complete
54
+ reader_kwargs["engine"] = "python"
55
+
56
+ # - Define on-the-fly decompression of on-disk data
57
+ # - Available: gzip, bz2, zip
58
+ reader_kwargs["compression"] = "infer"
59
+
60
+ # - Strings to recognize as NA/NaN and replace with standard NA flags
61
+ # - Already included: '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN',
62
+ # '-NaN', '-nan', '1.#IND', '1.#QNAN', '<NA>', 'N/A',
63
+ # 'NA', 'NULL', 'NaN', 'n/a', 'nan', 'null'
64
+ reader_kwargs["na_values"] = ["na", "", "error"]
65
+
66
+ ##------------------------------------------------------------------------.
67
+ #### Read the data
68
+ df = read_raw_text_file(
69
+ filepath=file,
70
+ column_names=column_names,
71
+ reader_kwargs=reader_kwargs,
72
+ logger=logger,
73
+ )
74
+
75
+ ##------------------------------------------------------------------------.
76
+ #### Adapt the dataframe to adhere to DISDRODB L0 standards
77
+ # Count number of delimiters to identify valid rows
78
+ df = df[df["TO_PARSE"].str.count(";") == 520]
79
+
80
+ # Check there are still valid rows
81
+ if len(df) == 0:
82
+ raise ValueError(f"No valid rows in {filename}.")
83
+
84
+ # Split by ; delimiter (before raw drop number)
85
+ df = df["TO_PARSE"].str.split(";", expand=True, n=79)
86
+
87
+ # Assign column names
88
+ column_names = [
89
+ "start_identifier",
90
+ "device_address",
91
+ "sensor_serial_number",
92
+ "sensor_date",
93
+ "sensor_time",
94
+ "weather_code_synop_4677_5min",
95
+ "weather_code_synop_4680_5min",
96
+ "weather_code_metar_4678_5min",
97
+ "precipitation_rate_5min",
98
+ "weather_code_synop_4677",
99
+ "weather_code_synop_4680",
100
+ "weather_code_metar_4678",
101
+ "precipitation_rate",
102
+ "rainfall_rate",
103
+ "snowfall_rate",
104
+ "precipitation_accumulated",
105
+ "mor_visibility",
106
+ "reflectivity",
107
+ "quality_index",
108
+ "max_hail_diameter",
109
+ "laser_status",
110
+ "static_signal_status",
111
+ "laser_temperature_analog_status",
112
+ "laser_temperature_digital_status",
113
+ "laser_current_analog_status",
114
+ "laser_current_digital_status",
115
+ "sensor_voltage_supply_status",
116
+ "current_heating_pane_transmitter_head_status",
117
+ "current_heating_pane_receiver_head_status",
118
+ "temperature_sensor_status",
119
+ "current_heating_voltage_supply_status",
120
+ "current_heating_house_status",
121
+ "current_heating_heads_status",
122
+ "current_heating_carriers_status",
123
+ "control_output_laser_power_status",
124
+ "reserved_status",
125
+ "temperature_interior",
126
+ "laser_temperature",
127
+ "laser_current_average",
128
+ "control_voltage",
129
+ "optical_control_voltage_output",
130
+ "sensor_voltage_supply",
131
+ "current_heating_pane_transmitter_head",
132
+ "current_heating_pane_receiver_head",
133
+ "temperature_ambient",
134
+ "current_heating_voltage_supply",
135
+ "current_heating_house",
136
+ "current_heating_heads",
137
+ "current_heating_carriers",
138
+ "number_particles",
139
+ "number_particles_internal_data",
140
+ "number_particles_min_speed",
141
+ "number_particles_min_speed_internal_data",
142
+ "number_particles_max_speed",
143
+ "number_particles_max_speed_internal_data",
144
+ "number_particles_min_diameter",
145
+ "number_particles_min_diameter_internal_data",
146
+ "number_particles_no_hydrometeor",
147
+ "number_particles_no_hydrometeor_internal_data",
148
+ "number_particles_unknown_classification",
149
+ "number_particles_unknown_classification_internal_data",
150
+ "number_particles_class_1",
151
+ "number_particles_class_1_internal_data",
152
+ "number_particles_class_2",
153
+ "number_particles_class_2_internal_data",
154
+ "number_particles_class_3",
155
+ "number_particles_class_3_internal_data",
156
+ "number_particles_class_4",
157
+ "number_particles_class_4_internal_data",
158
+ "number_particles_class_5",
159
+ "number_particles_class_5_internal_data",
160
+ "number_particles_class_6",
161
+ "number_particles_class_6_internal_data",
162
+ "number_particles_class_7",
163
+ "number_particles_class_7_internal_data",
164
+ "number_particles_class_8",
165
+ "number_particles_class_8_internal_data",
166
+ "number_particles_class_9",
167
+ "number_particles_class_9_internal_data",
168
+ "raw_drop_number",
169
+ ]
170
+ df.columns = column_names
171
+
172
+ # Deal with case if there are 61 timesteps
173
+ # - Occurs sometimes when previous hourly file miss timesteps
174
+ if len(df) == 61:
175
+ log_warning(logger=logger, msg=f"{filename} contains 61 timesteps. Dropping the first.")
176
+ df = df.iloc[1:]
177
+
178
+ # Raise error if more than 60 timesteps/rows
179
+ n_rows = len(df)
180
+ if n_rows > 60:
181
+ raise ValueError(f"The hourly file contains {n_rows} timesteps.")
182
+
183
+ # Infer and define "time" column
184
+ start_time_str = filename.split(".")[0] # '2024020200.txt'
185
+ start_time = pd.to_datetime(start_time_str, format="%Y%m%d%H")
186
+
187
+ # - Define timedelta based on sensor_time
188
+ # --> Add +24h to subsequent times when time resets
189
+ dt = pd.to_timedelta(df["sensor_time"]).to_numpy().astype("m8[s]")
190
+ rollover_indices = np.where(np.diff(dt) < np.timedelta64(0, "s"))[0]
191
+ if rollover_indices.size > 0:
192
+ for idx in rollover_indices:
193
+ dt[idx + 1 :] += np.timedelta64(24, "h")
194
+ dt = dt - dt[0]
195
+
196
+ # - Define approximate time
197
+ df["time"] = start_time + dt
198
+
199
+ # - Keep rows where time increment is between 00 and 59 minutes
200
+ valid_rows = dt <= np.timedelta64(3540, "s")
201
+ df = df[valid_rows]
202
+
203
+ # Drop rows with invalid raw_drop_number
204
+ # --> 440 value # 22x20
205
+ # --> 400 here # 20x20
206
+ df = df[df["raw_drop_number"].astype(str).str.len() == 1763]
207
+
208
+ # Drop columns not agreeing with DISDRODB L0 standards
209
+ columns_to_drop = [
210
+ "start_identifier",
211
+ "device_address",
212
+ "sensor_serial_number",
213
+ "sensor_date",
214
+ "sensor_time",
215
+ ]
216
+ df = df.drop(columns=columns_to_drop)
217
+ return df
218
+
219
+
220
+ @is_documented_by(reader_generic_docstring)
221
+ def reader(
222
+ filepath,
223
+ logger=None,
224
+ ):
225
+ """Reader."""
226
+ import zipfile
227
+
228
+ ##------------------------------------------------------------------------.
229
+ # filename = os.path.basename(filepath)
230
+ # return read_txt_file(file=filepath, filename=filename, logger=logger)
231
+
232
+ # ---------------------------------------------------------------------.
233
+ #### Iterate over all files (aka timesteps) in the daily zip archive
234
+ # - Each file contain a single timestep !
235
+ # list_df = []
236
+ # with tempfile.TemporaryDirectory() as temp_dir:
237
+ # # Extract all files
238
+ # unzip_file_on_terminal(filepath, temp_dir)
239
+
240
+ # # Walk through extracted files
241
+ # for root, _, files in os.walk(temp_dir):
242
+ # for filename in sorted(files):
243
+ # if filename.endswith(".txt"):
244
+ # full_path = os.path.join(root, filename)
245
+ # try:
246
+ # df = read_txt_file(file=full_path, filename=filename, logger=logger)
247
+ # if df is not None:
248
+ # list_df.append(df)
249
+ # except Exception as e:
250
+ # msg = f"An error occurred while reading {filename}: {e}"
251
+ # log_error(logger=logger, msg=msg, verbose=True)
252
+
253
+ list_df = []
254
+ with zipfile.ZipFile(filepath, "r") as zip_ref:
255
+ filenames = sorted(zip_ref.namelist())
256
+ for filename in filenames:
257
+ if filename.endswith(".txt"):
258
+ # Open file
259
+ with zip_ref.open(filename) as file:
260
+ try:
261
+ df = read_txt_file(file=file, filename=filename, logger=logger)
262
+ if df is not None:
263
+ list_df.append(df)
264
+ except Exception as e:
265
+ msg = f"An error occurred while reading {filename}. The error is: {e}"
266
+ log_error(logger=logger, msg=msg, verbose=True)
267
+
268
+ # Check the zip file contains at least some non.empty files
269
+ if len(list_df) == 0:
270
+ raise ValueError(f"{filepath} contains only empty files!")
271
+
272
+ # Concatenate all dataframes into a single one
273
+ df = pd.concat(list_df)
274
+
275
+ # ---------------------------------------------------------------------.
276
+ return df
@@ -86,7 +86,7 @@ def reader(
86
86
  df = df["TO_BE_SPLITTED"].str.split(";", expand=True, n=79)
87
87
 
88
88
  # Assign column names
89
- column_names = [
89
+ names = [
90
90
  "start_identifier",
91
91
  "device_address",
92
92
  "sensor_serial_number",
@@ -168,7 +168,7 @@ def reader(
168
168
  "number_particles_class_9_internal_data",
169
169
  "TO_BE_FURTHER_PROCESSED",
170
170
  ]
171
- df.columns = column_names
171
+ df.columns = names
172
172
 
173
173
  # Extract the last variables remained in raw_drop_number
174
174
  df_parsed = df["TO_BE_FURTHER_PROCESSED"].str.rsplit(";", n=5, expand=True)
@@ -0,0 +1,103 @@
1
+ # -----------------------------------------------------------------------------.
2
+ # Copyright (c) 2021-2023 DISDRODB developers
3
+ #
4
+ # This program is free software: you can redistribute it and/or modify
5
+ # it under the terms of the GNU General Public License as published by
6
+ # the Free Software Foundation, either version 3 of the License, or
7
+ # (at your option) any later version.
8
+ #
9
+ # This program is distributed in the hope that it will be useful,
10
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
11
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
+ # GNU General Public License for more details.
13
+ #
14
+ # You should have received a copy of the GNU General Public License
15
+ # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
+ # -----------------------------------------------------------------------------.
17
+ """Reader for RWANDA DELFT Thies LPM sensor in netCDF format."""
18
+
19
+ from disdrodb.l0.l0_reader import is_documented_by, reader_generic_docstring
20
+ from disdrodb.l0.l0b_nc_processing import open_raw_netcdf_file, standardize_raw_dataset
21
+
22
+
23
+ @is_documented_by(reader_generic_docstring)
24
+ def reader(
25
+ filepath,
26
+ logger=None,
27
+ ):
28
+ """Reader."""
29
+ ##------------------------------------------------------------------------.
30
+ #### Open the netCDF
31
+ ds = open_raw_netcdf_file(filepath=filepath, logger=logger)
32
+
33
+ ##------------------------------------------------------------------------.
34
+ #### Adapt the dataframe to adhere to DISDRODB L0 standards
35
+ # Add time coordinate
36
+ ds["time"] = ds["time"].astype("M8[s]")
37
+ ds["time"].attrs.pop("comment", None)
38
+ ds["time"].attrs.pop("units", None)
39
+ ds = ds.set_coords("time")
40
+
41
+ # Define dictionary mapping dataset variables to select and rename
42
+ dict_names = {
43
+ ### Dimensions
44
+ "diameter_classes": "diameter_bin_center",
45
+ "velocity_classes": "velocity_bin_center",
46
+ ### Variables
47
+ "weather_code_synop_4680": "weather_code_synop_4680",
48
+ "weather_code_synop_4677": "weather_code_synop_4677",
49
+ "weather_code_metar_4678": "weather_code_metar_4678",
50
+ "liquid_precip_intensity": "rainfall_rate",
51
+ "solid_precip_intensity": "snowfall_rate",
52
+ "all_precip_intensity": "precipitation_rate",
53
+ "reflectivity": "reflectivity",
54
+ "visibility": "mor_visibility",
55
+ "measurement_quality": "quality_index",
56
+ "maximum_diameter_hail": "max_hail_diameter",
57
+ "status_laser": "laser_status",
58
+ "status_output_laser_power": "control_output_laser_power_status",
59
+ "interior_temperature": "temperature_interior",
60
+ "temperature_of_laser_driver": "laser_temperature",
61
+ "mean_value_laser_current": "laser_current_average",
62
+ "control_voltage": "control_voltage",
63
+ "optical_control_output": "optical_control_voltage_output",
64
+ "voltage_sensor_supply": "sensor_voltage_supply",
65
+ "current_heating_laser_head": "current_heating_pane_transmitter_head",
66
+ "current_heating_receiver_head": "current_heating_pane_receiver_head",
67
+ "ambient_temperature": "temperature_ambient",
68
+ "voltage_heating_supply": "current_heating_voltage_supply",
69
+ "current_heating_housing": "current_heating_house",
70
+ "current_heating_heads": "current_heating_heads",
71
+ "current_heating_carriers": "current_heating_carriers",
72
+ "number_of_all_measured_particles": "number_particles",
73
+ "number_of_particles_slower_than_0.15": "number_particles_min_speed",
74
+ "number_of_particles_faster_than_20": "number_particles_max_speed",
75
+ "number_of_particles_smaller_than_0.15": "number_particles_min_diameter",
76
+ "number_of_particles_with_unknown_classification": "number_particles_unknown_classification",
77
+ "total_volume_gross_particles_unknown_classification": "number_particles_unknown_classification_internal_data",
78
+ "number_of_particles_class_1": "number_particles_class_1",
79
+ "total_volume_gross_of_class_1": "number_particles_class_1_internal_data",
80
+ "number_of_particles_class_2": "number_particles_class_2",
81
+ "total_volume_gross_of_class_2": "number_particles_class_2_internal_data",
82
+ "number_of_particles_class_3": "number_particles_class_3",
83
+ "total_volume_gross_of_class_3": "number_particles_class_3_internal_data",
84
+ "number_of_particles_class_4": "number_particles_class_4",
85
+ "total_volume_gross_of_class_4": "number_particles_class_4_internal_data",
86
+ "number_of_particles_class_5": "number_particles_class_5",
87
+ "total_volume_gross_of_class_5": "number_particles_class_5_internal_data",
88
+ "number_of_particles_class_6": "number_particles_class_6",
89
+ "total_volume_gross_of_class_6": "number_particles_class_6_internal_data",
90
+ "number_of_particles_class_7": "number_particles_class_7",
91
+ "total_volume_gross_of_class_7": "number_particles_class_7_internal_data",
92
+ "number_of_particles_class_8": "number_particles_class_8",
93
+ "total_volume_gross_of_class_8": "number_particles_class_8_internal_data",
94
+ "number_of_particles_class_9": "number_particles_class_9",
95
+ "total_volume_gross_of_class_9": "number_particles_class_9_internal_data",
96
+ "raw_data": "raw_drop_number",
97
+ }
98
+
99
+ # Rename dataset variables and columns and infill missing variables
100
+ ds = standardize_raw_dataset(ds=ds, dict_names=dict_names, sensor_name="LPM")
101
+
102
+ # Return the dataset adhering to DISDRODB L0B standards
103
+ return ds
@@ -0,0 +1,216 @@
1
+ #!/usr/bin/env python3
2
+
3
+ # -----------------------------------------------------------------------------.
4
+ # Copyright (c) 2021-2023 DISDRODB developers
5
+ #
6
+ # This program is free software: you can redistribute it and/or modify
7
+ # it under the terms of the GNU General Public License as published by
8
+ # the Free Software Foundation, either version 3 of the License, or
9
+ # (at your option) any later version.
10
+ #
11
+ # This program is distributed in the hope that it will be useful,
12
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
13
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
+ # GNU General Public License for more details.
15
+ #
16
+ # You should have received a copy of the GNU General Public License
17
+ # along with this program. If not, see <http://www.gnu.org/licenses/>.
18
+ # -----------------------------------------------------------------------------.
19
+ """DISDRODB reader for Haukeliseter Test Site LPM sensors."""
20
+ import numpy as np
21
+ import pandas as pd
22
+
23
+ from disdrodb.l0.l0_reader import is_documented_by, reader_generic_docstring
24
+ from disdrodb.l0.l0a_processing import read_raw_text_file
25
+
26
+
27
+ @is_documented_by(reader_generic_docstring)
28
+ def reader(
29
+ filepath,
30
+ logger=None,
31
+ ):
32
+ """Reader."""
33
+ ##------------------------------------------------------------------------.
34
+ #### - Define raw data headers
35
+ column_names = ["TO_PARSE"]
36
+
37
+ ##------------------------------------------------------------------------.
38
+ #### Define reader options
39
+ # - For more info: https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html
40
+ reader_kwargs = {}
41
+
42
+ # - Define delimiter
43
+ reader_kwargs["delimiter"] = "\\n"
44
+
45
+ # - Avoid first column to become df index !!!
46
+ reader_kwargs["index_col"] = False
47
+
48
+ # - Define encoding
49
+ reader_kwargs["encoding"] = "ISO-8859-1"
50
+
51
+ # - Since column names are expected to be passed explicitly, header is set to None
52
+ reader_kwargs["header"] = None
53
+
54
+ # - Number of rows to be skipped at the beginning of the file
55
+ reader_kwargs["skiprows"] = None
56
+
57
+ # - Define behaviour when encountering bad lines
58
+ reader_kwargs["on_bad_lines"] = "skip"
59
+
60
+ # - Define reader engine
61
+ # - C engine is faster
62
+ # - Python engine is more feature-complete
63
+ reader_kwargs["engine"] = "python"
64
+
65
+ # - Define on-the-fly decompression of on-disk data
66
+ # - Available: gzip, bz2, zip
67
+ reader_kwargs["compression"] = "infer"
68
+
69
+ # - Strings to recognize as NA/NaN and replace with standard NA flags
70
+ # - Already included: '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN',
71
+ # '-NaN', '-nan', '1.#IND', '1.#QNAN', '<NA>', 'N/A',
72
+ # 'NA', 'NULL', 'NaN', 'n/a', 'nan', 'null'
73
+ reader_kwargs["na_values"] = ["na", "", "error"]
74
+
75
+ ##------------------------------------------------------------------------.
76
+ #### Read the data
77
+ df = read_raw_text_file(
78
+ filepath=filepath,
79
+ column_names=column_names,
80
+ reader_kwargs=reader_kwargs,
81
+ logger=logger,
82
+ )
83
+
84
+ ##------------------------------------------------------------------------.
85
+ #### Adapt the dataframe to adhere to DISDRODB L0 standards
86
+ # Raise error if empty file
87
+ if len(df) == 0:
88
+ raise ValueError(f"{filepath} is empty.")
89
+
90
+ # Select only rows with expected number of delimiters
91
+ df = df[df["TO_PARSE"].str.count(";").isin([520, 521])]
92
+
93
+ # Raise error if no data left
94
+ if len(df) == 0:
95
+ raise ValueError(f"No valid data in {filepath}.")
96
+
97
+ # Retrieve most frequent number of delimiters
98
+ possible_delimiters, counts = np.unique(df["TO_PARSE"].str.count(";"), return_counts=True)
99
+ n_delimiters = possible_delimiters[np.argmax(counts)]
100
+
101
+ if n_delimiters == 520:
102
+ n = 79
103
+ columns_to_drop = ["device_address", "sensor_serial_number"]
104
+ else: # n_delimiters == 521
105
+ n = 80
106
+ columns_to_drop = ["start_identifier", "device_address", "sensor_serial_number"]
107
+
108
+ # Split by ; delimiter (before raw drop number)
109
+ df = df["TO_PARSE"].str.split(";", expand=True, n=n)
110
+
111
+ # Assign column names
112
+ names = [
113
+ "time",
114
+ *columns_to_drop,
115
+ "sensor_date",
116
+ "sensor_time",
117
+ "weather_code_synop_4677_5min",
118
+ "weather_code_synop_4680_5min",
119
+ "weather_code_metar_4678_5min",
120
+ "precipitation_rate_5min",
121
+ "weather_code_synop_4677",
122
+ "weather_code_synop_4680",
123
+ "weather_code_metar_4678",
124
+ "precipitation_rate",
125
+ "rainfall_rate",
126
+ "snowfall_rate",
127
+ "precipitation_accumulated",
128
+ "mor_visibility",
129
+ "reflectivity",
130
+ "quality_index",
131
+ "max_hail_diameter",
132
+ "laser_status",
133
+ "static_signal_status",
134
+ "laser_temperature_analog_status",
135
+ "laser_temperature_digital_status",
136
+ "laser_current_analog_status",
137
+ "laser_current_digital_status",
138
+ "sensor_voltage_supply_status",
139
+ "current_heating_pane_transmitter_head_status",
140
+ "current_heating_pane_receiver_head_status",
141
+ "temperature_sensor_status",
142
+ "current_heating_voltage_supply_status",
143
+ "current_heating_house_status",
144
+ "current_heating_heads_status",
145
+ "current_heating_carriers_status",
146
+ "control_output_laser_power_status",
147
+ "reserved_status",
148
+ "temperature_interior",
149
+ "laser_temperature",
150
+ "laser_current_average",
151
+ "control_voltage",
152
+ "optical_control_voltage_output",
153
+ "sensor_voltage_supply",
154
+ "current_heating_pane_transmitter_head",
155
+ "current_heating_pane_receiver_head",
156
+ "temperature_ambient",
157
+ "current_heating_voltage_supply",
158
+ "current_heating_house",
159
+ "current_heating_heads",
160
+ "current_heating_carriers",
161
+ "number_particles",
162
+ "number_particles_internal_data",
163
+ "number_particles_min_speed",
164
+ "number_particles_min_speed_internal_data",
165
+ "number_particles_max_speed",
166
+ "number_particles_max_speed_internal_data",
167
+ "number_particles_min_diameter",
168
+ "number_particles_min_diameter_internal_data",
169
+ "number_particles_no_hydrometeor",
170
+ "number_particles_no_hydrometeor_internal_data",
171
+ "number_particles_unknown_classification",
172
+ "number_particles_unknown_classification_internal_data",
173
+ "number_particles_class_1",
174
+ "number_particles_class_1_internal_data",
175
+ "number_particles_class_2",
176
+ "number_particles_class_2_internal_data",
177
+ "number_particles_class_3",
178
+ "number_particles_class_3_internal_data",
179
+ "number_particles_class_4",
180
+ "number_particles_class_4_internal_data",
181
+ "number_particles_class_5",
182
+ "number_particles_class_5_internal_data",
183
+ "number_particles_class_6",
184
+ "number_particles_class_6_internal_data",
185
+ "number_particles_class_7",
186
+ "number_particles_class_7_internal_data",
187
+ "number_particles_class_8",
188
+ "number_particles_class_8_internal_data",
189
+ "number_particles_class_9",
190
+ "number_particles_class_9_internal_data",
191
+ "raw_drop_number",
192
+ ]
193
+ df.columns = names
194
+
195
+ # Remove checksum from raw_drop_number
196
+ df["raw_drop_number"] = df["raw_drop_number"].str.rsplit(";", n=2, expand=True)[0]
197
+
198
+ # Define datetime "time" column
199
+ if n_delimiters == 520:
200
+ time_str = df["time"].str.extract(r"(\d{8}_\d{6})")[0]
201
+ df["time"] = pd.to_datetime(time_str, format="%Y%m%d_%H%M%S", errors="coerce")
202
+ else:
203
+ time_str = df["time"].str.extract(r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})")[0]
204
+ df["time"] = pd.to_datetime(time_str, format="%Y-%m-%d %H:%M:%S", errors="coerce")
205
+
206
+ # Drop rows with invalid raw_drop_number
207
+ df = df[df["raw_drop_number"].astype(str).str.len() == 1759]
208
+
209
+ # Drop columns not agreeing with DISDRODB L0 standards
210
+ variables_to_drop = [
211
+ *columns_to_drop,
212
+ "sensor_date",
213
+ "sensor_time",
214
+ ]
215
+ df = df.drop(columns=variables_to_drop)
216
+ return df