disdrodb 0.1.4__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- disdrodb/__init__.py +1 -5
- disdrodb/_version.py +2 -2
- disdrodb/accessor/methods.py +14 -3
- disdrodb/api/checks.py +10 -0
- disdrodb/api/create_directories.py +0 -2
- disdrodb/api/io.py +14 -17
- disdrodb/api/path.py +42 -77
- disdrodb/api/search.py +89 -23
- disdrodb/cli/disdrodb_create_summary.py +11 -1
- disdrodb/cli/disdrodb_create_summary_station.py +10 -0
- disdrodb/cli/disdrodb_run_l0.py +1 -1
- disdrodb/cli/disdrodb_run_l0a.py +1 -1
- disdrodb/cli/disdrodb_run_l0b.py +1 -1
- disdrodb/cli/disdrodb_run_l0c.py +1 -1
- disdrodb/cli/disdrodb_run_l1.py +1 -1
- disdrodb/cli/disdrodb_run_l2e.py +1 -1
- disdrodb/cli/disdrodb_run_l2m.py +1 -1
- disdrodb/configs.py +30 -83
- disdrodb/constants.py +4 -3
- disdrodb/data_transfer/download_data.py +4 -2
- disdrodb/docs.py +2 -2
- disdrodb/etc/products/L1/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/LPM/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/PARSIVEL/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/PARSIVEL2/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/PWS100/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/RD80/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/SWS250/1MIN.yaml +13 -0
- disdrodb/etc/products/L1/global.yaml +7 -1
- disdrodb/etc/products/L2E/10MIN.yaml +1 -12
- disdrodb/etc/products/L2E/5MIN.yaml +1 -0
- disdrodb/etc/products/L2E/global.yaml +1 -1
- disdrodb/etc/products/L2M/MODELS/GAMMA_GS_ND_MAE.yaml +6 -0
- disdrodb/etc/products/L2M/{GAMMA_ML.yaml → MODELS/GAMMA_ML.yaml} +1 -1
- disdrodb/etc/products/L2M/MODELS/LOGNORMAL_GS_LOG_ND_MAE.yaml +6 -0
- disdrodb/etc/products/L2M/MODELS/LOGNORMAL_GS_ND_MAE.yaml +6 -0
- disdrodb/etc/products/L2M/MODELS/LOGNORMAL_ML.yaml +8 -0
- disdrodb/etc/products/L2M/MODELS/NGAMMA_GS_R_MAE.yaml +6 -0
- disdrodb/etc/products/L2M/global.yaml +11 -3
- disdrodb/l0/check_configs.py +49 -16
- disdrodb/l0/configs/LPM/l0a_encodings.yml +2 -2
- disdrodb/l0/configs/LPM/l0b_cf_attrs.yml +2 -2
- disdrodb/l0/configs/LPM/l0b_encodings.yml +2 -2
- disdrodb/l0/configs/LPM/raw_data_format.yml +2 -2
- disdrodb/l0/configs/PARSIVEL/l0b_encodings.yml +1 -1
- disdrodb/l0/configs/PWS100/l0b_encodings.yml +1 -0
- disdrodb/l0/configs/SWS250/bins_diameter.yml +108 -0
- disdrodb/l0/configs/SWS250/bins_velocity.yml +83 -0
- disdrodb/l0/configs/SWS250/l0a_encodings.yml +18 -0
- disdrodb/l0/configs/SWS250/l0b_cf_attrs.yml +72 -0
- disdrodb/l0/configs/SWS250/l0b_encodings.yml +155 -0
- disdrodb/l0/configs/SWS250/raw_data_format.yml +148 -0
- disdrodb/l0/l0_reader.py +2 -2
- disdrodb/l0/l0b_processing.py +70 -15
- disdrodb/l0/l0c_processing.py +7 -3
- disdrodb/l0/readers/LPM/ARM/ARM_LPM.py +1 -1
- disdrodb/l0/readers/LPM/AUSTRALIA/MELBOURNE_2007_LPM.py +2 -2
- disdrodb/l0/readers/LPM/BELGIUM/ULIEGE.py +256 -0
- disdrodb/l0/readers/LPM/BRAZIL/CHUVA_LPM.py +2 -2
- disdrodb/l0/readers/LPM/BRAZIL/GOAMAZON_LPM.py +2 -2
- disdrodb/l0/readers/LPM/GERMANY/DWD.py +491 -0
- disdrodb/l0/readers/LPM/ITALY/GID_LPM.py +2 -2
- disdrodb/l0/readers/LPM/ITALY/GID_LPM_W.py +2 -2
- disdrodb/l0/readers/LPM/KIT/CHWALA.py +2 -2
- disdrodb/l0/readers/LPM/SLOVENIA/ARSO.py +107 -12
- disdrodb/l0/readers/LPM/SLOVENIA/UL.py +3 -3
- disdrodb/l0/readers/LPM/SWITZERLAND/INNERERIZ_LPM.py +2 -2
- disdrodb/l0/readers/PARSIVEL/BASQUECOUNTRY/EUSKALMET_OTT.py +227 -0
- disdrodb/l0/readers/PARSIVEL/{GPM → NASA}/LPVEX.py +1 -1
- disdrodb/l0/readers/PARSIVEL/NCAR/VORTEX2_2010.py +5 -14
- disdrodb/l0/readers/PARSIVEL/NCAR/VORTEX2_2010_UF.py +8 -17
- disdrodb/l0/readers/PARSIVEL/SLOVENIA/UL.py +117 -8
- disdrodb/l0/readers/PARSIVEL2/BASQUECOUNTRY/EUSKALMET_OTT2.py +232 -0
- disdrodb/l0/readers/PARSIVEL2/BRAZIL/CHUVA_PARSIVEL2.py +10 -14
- disdrodb/l0/readers/PARSIVEL2/BRAZIL/GOAMAZON_PARSIVEL2.py +10 -14
- disdrodb/l0/readers/PARSIVEL2/DENMARK/DTU.py +8 -14
- disdrodb/l0/readers/PARSIVEL2/DENMARK/EROSION_raw.py +382 -0
- disdrodb/l0/readers/PARSIVEL2/FINLAND/FMI_PARSIVEL2.py +4 -0
- disdrodb/l0/readers/PARSIVEL2/FRANCE/OSUG.py +1 -1
- disdrodb/l0/readers/PARSIVEL2/GREECE/NOA.py +127 -0
- disdrodb/l0/readers/PARSIVEL2/ITALY/HYDROX.py +239 -0
- disdrodb/l0/readers/PARSIVEL2/NCAR/FARM_PARSIVEL2.py +5 -11
- disdrodb/l0/readers/PARSIVEL2/NCAR/PERILS_MIPS.py +4 -17
- disdrodb/l0/readers/PARSIVEL2/NCAR/RELAMPAGO_PARSIVEL2.py +5 -14
- disdrodb/l0/readers/PARSIVEL2/NCAR/SNOWIE_PJ.py +10 -13
- disdrodb/l0/readers/PARSIVEL2/NCAR/SNOWIE_SB.py +10 -13
- disdrodb/l0/readers/PARSIVEL2/PHILIPPINES/PAGASA.py +232 -0
- disdrodb/l0/readers/PARSIVEL2/SPAIN/CENER.py +6 -18
- disdrodb/l0/readers/PARSIVEL2/{NASA/LPVEX.py → SPAIN/GRANADA.py} +46 -35
- disdrodb/l0/readers/PARSIVEL2/SWEDEN/SMHI.py +189 -0
- disdrodb/l0/readers/PARSIVEL2/USA/{C3WE.py → CW3E.py} +10 -28
- disdrodb/l0/readers/PWS100/AUSTRIA/HOAL.py +321 -0
- disdrodb/l0/readers/SW250/BELGIUM/KMI.py +239 -0
- disdrodb/l1/beard_model.py +31 -129
- disdrodb/l1/fall_velocity.py +136 -83
- disdrodb/l1/filters.py +25 -28
- disdrodb/l1/processing.py +16 -17
- disdrodb/l1/resampling.py +101 -38
- disdrodb/l1_env/routines.py +46 -17
- disdrodb/l2/empirical_dsd.py +6 -0
- disdrodb/l2/processing.py +6 -5
- disdrodb/metadata/geolocation.py +0 -2
- disdrodb/metadata/search.py +3 -4
- disdrodb/psd/fitting.py +16 -13
- disdrodb/routines/l0.py +2 -2
- disdrodb/routines/l1.py +173 -60
- disdrodb/routines/l2.py +148 -284
- disdrodb/routines/options.py +345 -0
- disdrodb/routines/wrappers.py +14 -1
- disdrodb/scattering/axis_ratio.py +90 -84
- disdrodb/scattering/permittivity.py +6 -0
- disdrodb/summary/routines.py +735 -670
- disdrodb/utils/archiving.py +51 -44
- disdrodb/utils/attrs.py +3 -1
- disdrodb/utils/dask.py +4 -4
- disdrodb/utils/dict.py +33 -0
- disdrodb/utils/encoding.py +6 -1
- disdrodb/utils/routines.py +9 -8
- disdrodb/utils/time.py +11 -3
- disdrodb/viz/__init__.py +0 -13
- disdrodb/viz/plots.py +231 -1
- {disdrodb-0.1.4.dist-info → disdrodb-0.2.0.dist-info}/METADATA +2 -1
- {disdrodb-0.1.4.dist-info → disdrodb-0.2.0.dist-info}/RECORD +135 -103
- /disdrodb/etc/products/L2M/{NGAMMA_GS_LOG_ND_MAE.yaml → MODELS/NGAMMA_GS_LOG_ND_MAE.yaml} +0 -0
- /disdrodb/etc/products/L2M/{NGAMMA_GS_ND_MAE.yaml → MODELS/NGAMMA_GS_ND_MAE.yaml} +0 -0
- /disdrodb/etc/products/L2M/{NGAMMA_GS_Z_MAE.yaml → MODELS/NGAMMA_GS_Z_MAE.yaml} +0 -0
- /disdrodb/l0/readers/PARSIVEL/{GPM → NASA}/IFLOODS.py +0 -0
- /disdrodb/l0/readers/PARSIVEL/{GPM → NASA}/MC3E.py +0 -0
- /disdrodb/l0/readers/PARSIVEL/{GPM → NASA}/PIERS.py +0 -0
- /disdrodb/l0/readers/PARSIVEL2/{GPM → NASA}/GCPEX.py +0 -0
- /disdrodb/l0/readers/PARSIVEL2/{GPM → NASA}/NSSTC.py +0 -0
- {disdrodb-0.1.4.dist-info → disdrodb-0.2.0.dist-info}/WHEEL +0 -0
- {disdrodb-0.1.4.dist-info → disdrodb-0.2.0.dist-info}/entry_points.txt +0 -0
- {disdrodb-0.1.4.dist-info → disdrodb-0.2.0.dist-info}/licenses/LICENSE +0 -0
- {disdrodb-0.1.4.dist-info → disdrodb-0.2.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
# -----------------------------------------------------------------------------.
|
|
2
|
+
# Copyright (c) 2021-2023 DISDRODB developers
|
|
3
|
+
#
|
|
4
|
+
# This program is free software: you can redistribute it and/or modify
|
|
5
|
+
# it under the terms of the GNU General Public License as published by
|
|
6
|
+
# the Free Software Foundation, either version 3 of the License, or
|
|
7
|
+
# (at your option) any later version.
|
|
8
|
+
#
|
|
9
|
+
# This program is distributed in the hope that it will be useful,
|
|
10
|
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
11
|
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
12
|
+
# GNU General Public License for more details.
|
|
13
|
+
#
|
|
14
|
+
# You should have received a copy of the GNU General Public License
|
|
15
|
+
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
16
|
+
# -----------------------------------------------------------------------------.
|
|
17
|
+
"""DISDRODB reader for EUSKALMET OTT Parsivel 2 raw data."""
|
|
18
|
+
# import os
|
|
19
|
+
# import tempfile
|
|
20
|
+
# from disdrodb.utils.compression import unzip_file_on_terminal
|
|
21
|
+
|
|
22
|
+
import numpy as np
|
|
23
|
+
import pandas as pd
|
|
24
|
+
|
|
25
|
+
from disdrodb.l0.l0_reader import is_documented_by, reader_generic_docstring
|
|
26
|
+
from disdrodb.l0.l0a_processing import read_raw_text_file
|
|
27
|
+
from disdrodb.utils.logger import log_error
|
|
28
|
+
|
|
29
|
+
COLUMN_DICT = {
|
|
30
|
+
"01": "rainfall_rate_32bit",
|
|
31
|
+
"02": "rainfall_accumulated_32bit",
|
|
32
|
+
"03": "weather_code_synop_4680",
|
|
33
|
+
"04": "weather_code_synop_4677",
|
|
34
|
+
"05": "weather_code_metar_4678", # empty
|
|
35
|
+
"06": "weather_code_nws", # empty
|
|
36
|
+
"07": "reflectivity_32bit",
|
|
37
|
+
"08": "mor_visibility",
|
|
38
|
+
"09": "sample_interval",
|
|
39
|
+
"10": "laser_amplitude",
|
|
40
|
+
"11": "number_particles",
|
|
41
|
+
"12": "sensor_temperature",
|
|
42
|
+
# "13": "sensor_serial_number",
|
|
43
|
+
# "14": "firmware_iop",
|
|
44
|
+
# "15": "firmware_dsp",
|
|
45
|
+
"16": "sensor_heating_current",
|
|
46
|
+
"17": "sensor_battery_voltage",
|
|
47
|
+
"18": "sensor_status",
|
|
48
|
+
# "19": "start_time",
|
|
49
|
+
# "20": "sensor_time",
|
|
50
|
+
# "21": "sensor_date",
|
|
51
|
+
# "22": "station_name",
|
|
52
|
+
# "23": "station_number",
|
|
53
|
+
"24": "rainfall_amount_absolute_32bit",
|
|
54
|
+
"25": "error_code",
|
|
55
|
+
"26": "sensor_temperature_pcb",
|
|
56
|
+
"27": "sensor_temperature_receiver",
|
|
57
|
+
"28": "sensor_temperature_trasmitter",
|
|
58
|
+
"30": "rainfall_rate_16_bit_30",
|
|
59
|
+
"31": "rainfall_rate_16_bit_1200",
|
|
60
|
+
"32": "rainfall_accumulated_16bit",
|
|
61
|
+
"34": "rain_kinetic_energy",
|
|
62
|
+
"35": "snowfall_rate",
|
|
63
|
+
"90": "raw_drop_concentration",
|
|
64
|
+
"91": "raw_drop_average_velocity",
|
|
65
|
+
"93": "raw_drop_number",
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def infill_missing_columns(df):
|
|
70
|
+
"""Infill with NaN missing columns."""
|
|
71
|
+
columns = set(COLUMN_DICT.values())
|
|
72
|
+
for c in columns:
|
|
73
|
+
if c not in df.columns:
|
|
74
|
+
df[c] = "NaN"
|
|
75
|
+
return df
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def read_txt_file(file, filename, logger):
|
|
79
|
+
"""Parse a single txt file within the daily zip file."""
|
|
80
|
+
##------------------------------------------------------------------------.
|
|
81
|
+
#### Define column names
|
|
82
|
+
column_names = ["TO_PARSE"]
|
|
83
|
+
|
|
84
|
+
##------------------------------------------------------------------------.
|
|
85
|
+
#### Define reader options
|
|
86
|
+
reader_kwargs = {}
|
|
87
|
+
# - Define delimiter
|
|
88
|
+
reader_kwargs["delimiter"] = "\\n"
|
|
89
|
+
# - Skip first row as columns names
|
|
90
|
+
# - Define encoding
|
|
91
|
+
reader_kwargs["encoding"] = "latin" # "ISO-8859-1"
|
|
92
|
+
# - Avoid first column to become df index !!!
|
|
93
|
+
reader_kwargs["index_col"] = False
|
|
94
|
+
# - Define behaviour when encountering bad lines
|
|
95
|
+
reader_kwargs["on_bad_lines"] = "skip"
|
|
96
|
+
# - Define reader engine
|
|
97
|
+
# - C engine is faster
|
|
98
|
+
# - Python engine is more feature-complete
|
|
99
|
+
reader_kwargs["engine"] = "python"
|
|
100
|
+
# - Define on-the-fly decompression of on-disk data
|
|
101
|
+
# - Available: gzip, bz2, zip
|
|
102
|
+
reader_kwargs["compression"] = "infer"
|
|
103
|
+
# - Strings to recognize as NA/NaN and replace with standard NA flags
|
|
104
|
+
# - Already included: '#N/A', '#N/A N/A', '#NA', '-1.#IND', '-1.#QNAN',
|
|
105
|
+
# '-NaN', '-nan', '1.#IND', '1.#QNAN', '<NA>', 'N/A',
|
|
106
|
+
# 'NA', 'NULL', 'NaN', 'n/a', 'nan', 'null'
|
|
107
|
+
reader_kwargs["na_values"] = ["na", "", "error"]
|
|
108
|
+
|
|
109
|
+
##------------------------------------------------------------------------.
|
|
110
|
+
#### Read the data
|
|
111
|
+
df = read_raw_text_file(
|
|
112
|
+
filepath=file,
|
|
113
|
+
column_names=column_names,
|
|
114
|
+
reader_kwargs=reader_kwargs,
|
|
115
|
+
logger=logger,
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
##--------------------------------\----------------------------------------.
|
|
119
|
+
#### Adapt the dataframe to adhere to DISDRODB L0 standards
|
|
120
|
+
# Empty file, raise error
|
|
121
|
+
if len(df) == 0:
|
|
122
|
+
raise ValueError(f"{filename} is empty.")
|
|
123
|
+
|
|
124
|
+
# Select rows with valid spectrum
|
|
125
|
+
# df = df[df["TO_PARSE"].str.count(";") == 1191] # 1112
|
|
126
|
+
|
|
127
|
+
# Raise errof if corrupted file
|
|
128
|
+
if len(df) == 4:
|
|
129
|
+
raise ValueError(f"{filename} is corrupted.")
|
|
130
|
+
|
|
131
|
+
# Extract string
|
|
132
|
+
string = df["TO_PARSE"].iloc[4]
|
|
133
|
+
|
|
134
|
+
# Split into lines
|
|
135
|
+
decoded_text = string.encode().decode("unicode_escape")
|
|
136
|
+
decoded_text = decoded_text.replace("'", "").replace('"', "")
|
|
137
|
+
lines = decoded_text.split()
|
|
138
|
+
|
|
139
|
+
# Extract time
|
|
140
|
+
time_str = lines[0].split(",")[1]
|
|
141
|
+
|
|
142
|
+
# Split each line at the first colon
|
|
143
|
+
data = [line.split(":", 1) for line in lines if ":" in line]
|
|
144
|
+
|
|
145
|
+
# Create the DataFrame
|
|
146
|
+
df = pd.DataFrame(data, columns=["ID", "Value"])
|
|
147
|
+
|
|
148
|
+
# Drop rows with invalid IDs
|
|
149
|
+
valid_id_str = np.char.rjust(np.arange(0, 94).astype(str), width=2, fillchar="0")
|
|
150
|
+
df = df[df["ID"].astype(str).isin(valid_id_str)]
|
|
151
|
+
|
|
152
|
+
# Select only rows with values
|
|
153
|
+
df = df[df["Value"].apply(lambda x: x is not None)]
|
|
154
|
+
|
|
155
|
+
# Reshape dataframe
|
|
156
|
+
df = df.set_index("ID").T
|
|
157
|
+
|
|
158
|
+
# Assign column names
|
|
159
|
+
df = df.rename(COLUMN_DICT, axis=1)
|
|
160
|
+
|
|
161
|
+
# Keep only columns defined in the dictionary
|
|
162
|
+
df = df.filter(items=list(COLUMN_DICT.values()))
|
|
163
|
+
|
|
164
|
+
# Infill missing columns
|
|
165
|
+
df = infill_missing_columns(df)
|
|
166
|
+
|
|
167
|
+
# Add time column ad datetime dtype
|
|
168
|
+
df["time"] = pd.to_datetime(time_str, format="%Y%m%d%H%M%S", errors="coerce")
|
|
169
|
+
|
|
170
|
+
# Preprocess the raw spectrum and raw_drop_average_velocity
|
|
171
|
+
# - Add 0 before every ; if ; not preceded by a digit
|
|
172
|
+
# - Example: ';;1;;' --> '0;0;1;0;'
|
|
173
|
+
df["raw_drop_number"] = df["raw_drop_number"].str.replace(r"(?<!\d);", "0;", regex=True)
|
|
174
|
+
df["raw_drop_average_velocity"] = df["raw_drop_average_velocity"].str.replace(r"(?<!\d);", "0;", regex=True)
|
|
175
|
+
|
|
176
|
+
# Return the dataframe adhering to DISDRODB L0 standards
|
|
177
|
+
return df
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
@is_documented_by(reader_generic_docstring)
|
|
181
|
+
def reader(
|
|
182
|
+
filepath,
|
|
183
|
+
logger=None,
|
|
184
|
+
):
|
|
185
|
+
"""Reader."""
|
|
186
|
+
import zipfile
|
|
187
|
+
|
|
188
|
+
# ---------------------------------------------------------------------.
|
|
189
|
+
#### Iterate over all files (aka timesteps) in the daily zip archive
|
|
190
|
+
# - Each file contain a single timestep !
|
|
191
|
+
# list_df = []
|
|
192
|
+
# with tempfile.TemporaryDirectory() as temp_dir:
|
|
193
|
+
# # Extract all files
|
|
194
|
+
# unzip_file_on_terminal(filepath, temp_dir)
|
|
195
|
+
|
|
196
|
+
# # Walk through extracted files
|
|
197
|
+
# for root, _, files in os.walk(temp_dir):
|
|
198
|
+
# for filename in sorted(files):
|
|
199
|
+
# if filename.endswith(".txt"):
|
|
200
|
+
# full_path = os.path.join(root, filename)
|
|
201
|
+
# try:
|
|
202
|
+
# df = read_txt_file(file=full_path, filename=filename, logger=logger)
|
|
203
|
+
# if df is not None:
|
|
204
|
+
# list_df.append(df)
|
|
205
|
+
# except Exception as e:
|
|
206
|
+
# msg = f"An error occurred while reading {filename}: {e}"
|
|
207
|
+
# log_error(logger=logger, msg=msg, verbose=True)
|
|
208
|
+
|
|
209
|
+
list_df = []
|
|
210
|
+
with zipfile.ZipFile(filepath, "r") as zip_ref:
|
|
211
|
+
filenames = sorted(zip_ref.namelist())
|
|
212
|
+
for filename in filenames:
|
|
213
|
+
if filename.endswith(".dat"):
|
|
214
|
+
# Open file
|
|
215
|
+
with zip_ref.open(filename) as file:
|
|
216
|
+
try:
|
|
217
|
+
df = read_txt_file(file=file, filename=filename, logger=logger)
|
|
218
|
+
if df is not None:
|
|
219
|
+
list_df.append(df)
|
|
220
|
+
except Exception as e:
|
|
221
|
+
msg = f"An error occurred while reading {filename}. The error is: {e}."
|
|
222
|
+
log_error(logger=logger, msg=msg, verbose=True)
|
|
223
|
+
|
|
224
|
+
# Check the zip file contains at least some non.empty files
|
|
225
|
+
if len(list_df) == 0:
|
|
226
|
+
raise ValueError(f"{filepath} contains only empty files!")
|
|
227
|
+
|
|
228
|
+
# Concatenate all dataframes into a single one
|
|
229
|
+
df = pd.concat(list_df)
|
|
230
|
+
|
|
231
|
+
# ---------------------------------------------------------------------.
|
|
232
|
+
return df
|
|
@@ -81,20 +81,11 @@ def reader(
|
|
|
81
81
|
valid_id_str = np.char.rjust(np.arange(0, 94).astype(str), width=2, fillchar="0")
|
|
82
82
|
df = df[df["ID"].astype(str).isin(valid_id_str)]
|
|
83
83
|
|
|
84
|
-
# Create the dataframe
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
group_dfs = []
|
|
90
|
-
for _, group in groups:
|
|
91
|
-
group_df = group.set_index("ID").T
|
|
92
|
-
group_dfs.append(group_df)
|
|
93
|
-
|
|
94
|
-
# Merge each timestep dataframe
|
|
95
|
-
# --> Missing columns are infilled by NaN
|
|
96
|
-
df = pd.concat(group_dfs, axis=0)
|
|
97
|
-
df.columns = df.columns.astype(str).str.pad(width=2, side="left", fillchar="0")
|
|
84
|
+
# Create the dataframe where each row corresponds to a timestep
|
|
85
|
+
df["_group"] = (df["ID"].astype(int).diff() <= 0).cumsum()
|
|
86
|
+
df = df.pivot(index="_group", columns="ID") # noqa
|
|
87
|
+
df.columns = df.columns.get_level_values("ID")
|
|
88
|
+
df = df.reset_index(drop=True)
|
|
98
89
|
|
|
99
90
|
# Define available column names
|
|
100
91
|
column_dict = {
|
|
@@ -123,9 +114,14 @@ def reader(
|
|
|
123
114
|
# "23": "station_number",
|
|
124
115
|
"24": "rainfall_amount_absolute_32bit",
|
|
125
116
|
"25": "error_code",
|
|
117
|
+
# "26": "sensor_temperature_pcb",
|
|
118
|
+
# "27": "sensor_temperature_receiver",
|
|
119
|
+
# "28": "sensor_temperature_trasmitter",
|
|
126
120
|
"30": "rainfall_rate_16_bit_30",
|
|
127
121
|
"31": "rainfall_rate_16_bit_1200",
|
|
128
122
|
"32": "rainfall_accumulated_16bit",
|
|
123
|
+
# "34": "rain_kinetic_energy",
|
|
124
|
+
# "35": "snowfall_rate",
|
|
129
125
|
"90": "raw_drop_concentration",
|
|
130
126
|
"91": "raw_drop_average_velocity",
|
|
131
127
|
"93": "raw_drop_number",
|
|
@@ -81,20 +81,11 @@ def reader(
|
|
|
81
81
|
valid_id_str = np.char.rjust(np.arange(0, 94).astype(str), width=2, fillchar="0")
|
|
82
82
|
df = df[df["ID"].astype(str).isin(valid_id_str)]
|
|
83
83
|
|
|
84
|
-
# Create the dataframe
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
group_dfs = []
|
|
90
|
-
for _, group in groups:
|
|
91
|
-
group_df = group.set_index("ID").T
|
|
92
|
-
group_dfs.append(group_df)
|
|
93
|
-
|
|
94
|
-
# Merge each timestep dataframe
|
|
95
|
-
# --> Missing columns are infilled by NaN
|
|
96
|
-
df = pd.concat(group_dfs, axis=0)
|
|
97
|
-
df.columns = df.columns.astype(str).str.pad(width=2, side="left", fillchar="0")
|
|
84
|
+
# Create the dataframe where each row corresponds to a timestep
|
|
85
|
+
df["_group"] = (df["ID"].astype(int).diff() <= 0).cumsum()
|
|
86
|
+
df = df.pivot(index="_group", columns="ID") # noqa
|
|
87
|
+
df.columns = df.columns.get_level_values("ID")
|
|
88
|
+
df = df.reset_index(drop=True)
|
|
98
89
|
|
|
99
90
|
# Assign column names
|
|
100
91
|
column_dict = {
|
|
@@ -123,9 +114,14 @@ def reader(
|
|
|
123
114
|
# "23": "station_number",
|
|
124
115
|
"24": "rainfall_amount_absolute_32bit",
|
|
125
116
|
"25": "error_code",
|
|
117
|
+
# "26": "sensor_temperature_pcb",
|
|
118
|
+
# "27": "sensor_temperature_receiver",
|
|
119
|
+
# "28": "sensor_temperature_trasmitter",
|
|
126
120
|
"30": "rainfall_rate_16_bit_30",
|
|
127
121
|
"31": "rainfall_rate_16_bit_1200",
|
|
128
122
|
"32": "rainfall_accumulated_16bit",
|
|
123
|
+
# "34": "rain_kinetic_energy",
|
|
124
|
+
# "35": "snowfall_rate",
|
|
129
125
|
"90": "raw_drop_concentration",
|
|
130
126
|
"91": "raw_drop_average_velocity",
|
|
131
127
|
"93": "raw_drop_number",
|
|
@@ -81,20 +81,11 @@ def reader(
|
|
|
81
81
|
valid_id_str = np.char.rjust(np.arange(0, 94).astype(str), width=2, fillchar="0")
|
|
82
82
|
df = df[df["ID"].astype(str).isin(valid_id_str)]
|
|
83
83
|
|
|
84
|
-
# Create the dataframe
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
group_dfs = []
|
|
90
|
-
for _, group in groups:
|
|
91
|
-
group_df = group.set_index("ID").T
|
|
92
|
-
group_dfs.append(group_df)
|
|
93
|
-
|
|
94
|
-
# Merge each timestep dataframe
|
|
95
|
-
# --> Missing columns are infilled by NaN
|
|
96
|
-
df = pd.concat(group_dfs, axis=0)
|
|
97
|
-
df.columns = df.columns.astype(str).str.pad(width=2, side="left", fillchar="0")
|
|
84
|
+
# Create the dataframe where each row corresponds to a timestep
|
|
85
|
+
df["_group"] = (df["ID"].astype(int).diff() <= 0).cumsum()
|
|
86
|
+
df = df.pivot(index="_group", columns="ID") # noqa
|
|
87
|
+
df.columns = df.columns.get_level_values("ID")
|
|
88
|
+
df = df.reset_index(drop=True)
|
|
98
89
|
|
|
99
90
|
# Define available column names
|
|
100
91
|
column_dict = {
|
|
@@ -123,6 +114,9 @@ def reader(
|
|
|
123
114
|
# "23": "station_number",
|
|
124
115
|
"24": "rainfall_amount_absolute_32bit",
|
|
125
116
|
"25": "error_code",
|
|
117
|
+
# "26": "sensor_temperature_pcb",
|
|
118
|
+
# "27": "sensor_temperature_receiver",
|
|
119
|
+
# "28": "sensor_temperature_trasmitter",
|
|
126
120
|
# "30": "rainfall_rate_16_bit_30",
|
|
127
121
|
# "31": "rainfall_rate_16_bit_1200",
|
|
128
122
|
# "32": "rainfall_accumulated_16bit",
|