loone-data-prep 0.1.6__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,10 @@
1
1
  import sys
2
2
  from glob import glob
3
+ import uuid
4
+ import os
5
+ import pandas as pd
3
6
  from loone_data_prep.weather_data import weather
7
+ from loone_data_prep.utils import find_last_date_in_csv, dbhydro_data_is_latest
4
8
 
5
9
 
6
10
  D = {
@@ -14,14 +18,133 @@ D = {
14
18
  }
15
19
 
16
20
 
17
- def main(workspace: str, d: dict = D) -> dict:
21
+ DBKEY_STATIONS = {
22
+ "16021": "L001",
23
+ "12515": "L005",
24
+ "12524": "L006",
25
+ "13081": "LZ40",
26
+ "UT736": "L001",
27
+ "VM675": "L005",
28
+ "UT743": "L006",
29
+ "UT748": "LZ40",
30
+ "16031": "L001",
31
+ "12518": "L005",
32
+ "12527": "L006",
33
+ "16267": "LZ40",
34
+ "16025": "L001",
35
+ "12516": "L005",
36
+ "12525": "L006",
37
+ "15649": "LZ40",
38
+ "16024": "L001",
39
+ "12512": "L005",
40
+ "12522": "L006",
41
+ "13080": "LZ40",
42
+ "16027": "L001",
43
+ "12514": "L005",
44
+ "12911": "L006",
45
+ "13078": "LZ40",
46
+ "16023": "L001",
47
+ "12510": "L005",
48
+ "12520": "L006",
49
+ "13076": "LZ40",
50
+ }
51
+
52
+ def main(workspace: str, d: dict = D, dbkey_stations: dict = DBKEY_STATIONS) -> dict:
53
+ """
54
+ Retrieves all weather data used by LOONE. When the dbkey_stations argument is provided
55
+ the function will download only the latest data it doesn't have for the dbkeys in the d and dbkey_stations arguments.
56
+ Otherwise, it will download all the data for the dbkeys in the d argument.
57
+
58
+ Args:
59
+ workspace (str): Path to workspace where data will be downloaded.
60
+ d (dict): A dictionary of data type keys and dict values that hold keyword arguments to be used with weather_data.weather.get().
61
+ Valid keys are 'RAIN', 'ETPI', 'H2OT', 'RADP', 'RADT', 'AIRT', and 'WNDS'.
62
+ dbkey_stations (dict): Dictionary of dbkeys mapped to their station's name.
63
+ """
18
64
  missing_files = []
65
+ failed_downloads = [] # List of (data type name, file name) tuples that the script failed to get the latest data for (but the files still exist)
66
+
67
+ # Get the data for each data type
19
68
  for name, params in d.items():
20
- print(f"Getting {name} for the following dbkeys: {params['dbkeys']}.")
21
- weather.get(workspace, name, **params)
69
+
70
+ # Get the data for each dbkey individually for this data type
71
+ for dbkey in params['dbkeys']:
72
+ # Get the file name of the current file being downloaded
73
+ station = dbkey_stations[dbkey]
74
+ date_units_file, _ = weather._get_file_header_data_units(name)
75
+ original_file_name = ""
76
+ if name in ['RADP', 'RADT']:
77
+ original_file_name = f"{station}_{name}.csv"
78
+ else:
79
+ original_file_name = f"{station}_{name}_{date_units_file}.csv"
80
+
81
+ # Get the date of the latest data in the csv file
82
+ date_latest = find_last_date_in_csv(workspace, original_file_name)
83
+
84
+ # File with data for this dbkey does NOT already exist (or possibly some other error occurred)
85
+ if date_latest is None:
86
+ print(f"Getting all {name} data for the following dbkey: {dbkey}.")
87
+ weather.get(workspace, name, dbkeys=[dbkey])
88
+ continue
89
+
90
+ # Check whether the latest data is already up to date.
91
+ if dbhydro_data_is_latest(date_latest):
92
+ # Notify that the data is already up to date
93
+ print(f'Downloading of new {name} data skipped for dbkey {dbkey}. Data is already up to date.')
94
+ continue
95
+
96
+ # Temporarily rename current data file so it isn't over written
97
+ original_file_name_temp = original_file_name.replace(".csv", f"_{uuid.uuid4()}.csv")
98
+ os.rename(os.path.join(workspace, original_file_name), os.path.join(workspace, original_file_name_temp))
99
+
100
+ try:
101
+ # Download only the new data
102
+ print(f'Downloading new {name} data for dbkey {dbkey} starting from date {date_latest}')
103
+ weather.get(workspace, name, dbkeys=[dbkey], date_min=date_latest)
104
+
105
+ # Data failed to download - It's possible the data's end date has been reached
106
+ if not os.path.exists(os.path.join(workspace, original_file_name)):
107
+ raise Exception(f"It's possible that the data for {name} dbkey {dbkey} has reached its end date.")
108
+
109
+ # Read in the original data and the newly downloaded data
110
+ df_original = pd.read_csv(os.path.join(workspace, original_file_name_temp), index_col=0)
111
+ df_new = pd.read_csv(os.path.join(workspace, original_file_name), index_col=0)
112
+
113
+ # Merge the new data with the original data
114
+ df_merged = pd.concat([df_original, df_new], ignore_index=True)
115
+
116
+ # Write out the merged data
117
+ df_merged.to_csv(os.path.join(workspace, original_file_name))
118
+
119
+ # Remove the original renamed data file
120
+ os.remove(os.path.join(workspace, original_file_name_temp))
121
+ except Exception as e:
122
+ # Notify of the error
123
+ print(f"Error occurred while downloading new weather data: {e}")
124
+
125
+ # Remove the newly downloaded data file if it exists
126
+ if os.path.exists(os.path.join(workspace, original_file_name)):
127
+ os.remove(os.path.join(workspace, original_file_name))
128
+
129
+ # Rename the original renamed file back to its original name
130
+ if os.path.exists(os.path.join(workspace, original_file_name_temp)):
131
+ os.rename(os.path.join(workspace, original_file_name_temp), os.path.join(workspace, original_file_name))
132
+
133
+ # Add the file name to the list of failed downloads
134
+ failed_downloads.append((name, original_file_name))
135
+
136
+ # Check if all the files were downloaded
22
137
  if len(glob(f"{workspace}/*{name}*.csv")) < len(params["dbkeys"]):
23
138
  missing_files.append(True)
24
139
  print(f"After various tries, files are still missing for {name}.")
140
+
141
+ # Check if any files failed to update
142
+ if len(failed_downloads) > 0:
143
+ print(f"Failed to update the following files {failed_downloads}")
144
+
145
+ # Create LAKE_RAINFALL_DATA.csv and LOONE_AVERAGE_ETPI_DATA.csv
146
+ weather.merge_data(workspace, 'RAIN')
147
+ weather.merge_data(workspace, 'ETPI')
25
148
 
26
149
  if True in missing_files:
27
150
  return {"error": "Missing files."}
@@ -3,6 +3,7 @@ from datetime import datetime
3
3
  from retry import retry
4
4
  from rpy2.robjects import r
5
5
  from rpy2.rinterface_lib.embedded import RRuntimeError
6
+ import pandas as pd
6
7
 
7
8
 
8
9
  DEFAULT_DBKEYS = ["16021", "12515", "12524", "13081"]
@@ -20,40 +21,112 @@ def get(
20
21
  ) -> None:
21
22
  dbkeys_str = "\"" + "\", \"".join(dbkeys) + "\""
22
23
 
23
- r(
24
- f"""
25
- library(dbhydroR)
26
- library(dplyr)
24
+ data_type = param
25
+ data_units_file = None
26
+ data_units_header = None
27
+
28
+ # Get the units for the file name and column header based on the type of data
29
+ data_units_file, data_units_header = _get_file_header_data_units(data_type)
30
+
31
+ r_str = f"""
32
+ download_weather_data <- function()#workspace, dbkeys, date_min, date_max, data_type, data_units_file, data_units_header)
33
+ {{
34
+ library(dbhydroR)
35
+ library(dplyr)
27
36
 
28
- dbkeys <- c({dbkeys_str})
37
+ dbkeys <- c({dbkeys_str})
38
+ successful_stations <- list()
39
+
40
+ for (i in dbkeys)
41
+ {{
42
+ # Retrieve data for the dbkey
43
+ data <- get_hydro(dbkey = i, date_min = "{date_min}", date_max = "{date_max}", raw = TRUE)
44
+
45
+ # Give data.frame correct column names so it can be cleaned using the clean_hydro function
46
+ column_names <- c("station", "dbkey", "date", "data.value", "qualifer", "revision.date")
47
+ colnames(data) <- column_names
48
+
49
+ # Check if the data.frame has any rows
50
+ if (nrow(data) > 0)
51
+ {{
52
+ # Get the station
53
+ station <- data$station[1]
54
+
55
+ # Add a type and units column to data so it can be cleaned using the clean_hydro function
56
+ data$type <- "{data_type}"
57
+ data$units <- "{data_units_header}"
58
+
59
+ # Clean the data.frame
60
+ data <- clean_hydro(data)
61
+
62
+ # Get the filename of the output file
63
+ filename <- ""
64
+
65
+ if ("{param}" %in% c("RADP", "RADT"))
66
+ {{
67
+ filename <- paste(station, "{data_type}", sep = "_")
68
+ }}
69
+ else
70
+ {{
71
+ filename <- paste(station, "{data_type}", "{data_units_file}", sep = "_")
72
+ }}
73
+
74
+ filename <- paste0(filename, ".csv")
75
+ filename <- paste0("{workspace}/", filename)
29
76
 
30
- for (i in dbkeys) {{
31
- # Retrieve data for the dbkey
32
- data <- get_hydro(dbkey = i, date_min = "{date_min}", date_max = "{date_max}")
77
+ # Save data to a CSV file
78
+ write.csv(data, file = filename)
33
79
 
34
- # Extract the column names excluding the date column
35
- column_names <- names(data)[-1]
80
+ # Print a message indicating the file has been saved
81
+ cat("CSV file", filename, "has been saved.\n")
36
82
 
37
- # Generate the filename based on the column names
38
- if ("{param}" %in% c("RADP", "RADT")) {{
39
- filename <- paste0("{workspace}/", gsub(" ", "_", sub("_[^_]*$", "", paste(column_names, collapse = "_"))), ".csv")
40
- }} else {{
41
- filename <- paste0("{workspace}/", paste(column_names, collapse = "_"), ".csv")
42
- }}
43
-
44
- # Save data to a CSV file
45
- write.csv(data, file = filename)
46
-
47
- # Print a message indicating the file has been saved
48
- cat("CSV file", filename, "has been saved.\n")
83
+ # Append the station to the list of successful stations
84
+ successful_stations <- c(successful_stations, station)
85
+ }}
86
+ else
87
+ {{
88
+ # No data given back, It's possible that the dbkey has reached its end date.
89
+ print(paste("Empty data.frame returned for dbkey", i, "It's possible that the dbkey has reached its end date. Skipping to the next dbkey."))
90
+ }}
49
91
 
50
- # Add a delay between requests
51
- Sys.sleep(2) # Wait for 2 seconds before the next iteration
92
+ # Add a delay between requests
93
+ Sys.sleep(2) # Wait for 2 seconds before the next iteration
94
+ }}
95
+
96
+ # Return the station and dbkey to the python code
97
+ return(successful_stations)
52
98
  }}
53
99
  """ # noqa: E501
54
- )
100
+
101
+ # Download the weather data
102
+ r(r_str)
103
+ result = r.download_weather_data()
104
+
105
+ # Get the stations of the dbkeys who's data were successfully downloaded
106
+ stations = []
107
+ for value in result:
108
+ stations.append(value[0])
109
+
110
+ # Format files to expected layout
111
+ for station in stations:
112
+ if station in ["L001", "L005", "L006", "LZ40"]:
113
+ _reformat_weather_file(workspace, station, data_type, data_units_file, data_units_header)
114
+
115
+ # Print a message indicating the file has been saved
116
+ print(f"CSV file {workspace}/{station}_{data_type}_{data_units_file}.csv has been reformatted.")
117
+
55
118
 
56
- if param == "RAIN":
119
+ def merge_data(workspace: str, data_type: str):
120
+ """
121
+ Merge the data files for the different stations to create either the LAKE_RAINFALL_DATA.csv or LOONE_AVERAGE_ETPI_DATA.csv file.
122
+
123
+ Args:
124
+ workspace (str): The path to the workspace directory.
125
+ data_type (str): The type of data. Either 'RAIN' for LAKE_RAINFALL_DATA.csv or 'ETPI' for LOONE_AVERAGE_ETPI_DATA.csv.
126
+ """
127
+
128
+ # Merge the data files for the different stations (LAKE_RAINFALL_DATA.csv)
129
+ if data_type == "RAIN":
57
130
  r(
58
131
  f"""
59
132
  L001_RAIN_Inches <- read.csv("{workspace}/L001_RAIN_Inches.csv", colClasses = c("NULL", "character", "numeric"))
@@ -79,7 +152,8 @@ def get(
79
152
  """ # noqa: E501
80
153
  )
81
154
 
82
- if param == "ETPI":
155
+ # Merge the data files for the different stations (LOONE_AVERAGE_ETPI_DATA.csv)
156
+ if data_type == "ETPI":
83
157
  r(
84
158
  f"""
85
159
  L001_ETPI_Inches <- read.csv("{workspace}/L001_ETPI_Inches.csv", colClasses = c("NULL", "character", "numeric"))
@@ -107,6 +181,90 @@ def get(
107
181
  )
108
182
 
109
183
 
184
+ def _reformat_weather_file(workspace: str, station: str, data_type: str, data_units_file: str, data_units_header: str) -> None:
185
+ '''
186
+ Reformats the dbhydro weather file to the layout expected by the rest of the LOONE scripts.
187
+ This function reads in and writes out a .csv file.
188
+
189
+ Args:
190
+ workspace (str): The path to the workspace directory.
191
+ station (str): The station name. Ex: L001, L005, L006, LZ40.
192
+ data_type (str): The type of data. Ex: RAIN, ETPI, H2OT, RADP, RADT, AIRT, WNDS.
193
+ data_units_file (str): The units for the file name. Ex: Inches, Degrees Celsius, etc.
194
+ data_units_header (str): The units for the column header. Ex: Inches, Degrees Celsius, etc. Can differ from data_units_file when data_type is either RADP or RADT.
195
+
196
+ Returns:
197
+ None
198
+ '''
199
+ # Read in the data
200
+ df = None
201
+ if data_type in ['RADP', 'RADT']:
202
+ df = pd.read_csv(f"{workspace}/{station}_{data_type}.csv")
203
+ else:
204
+ df = pd.read_csv(f"{workspace}/{station}_{data_type}_{data_units_file}.csv")
205
+
206
+ # Remove unneeded column columns
207
+ df.drop(f' _{data_type}_{data_units_header}', axis=1, inplace=True)
208
+ df.drop('Unnamed: 0', axis=1, inplace=True)
209
+
210
+ # Convert date column to datetime
211
+ df['date'] = pd.to_datetime(df['date'], format='%d-%b-%Y')
212
+
213
+ # Sort the data by date
214
+ df.sort_values('date', inplace=True)
215
+
216
+ # Renumber the index
217
+ df.reset_index(drop=True, inplace=True)
218
+
219
+ # Drop rows that are missing all their values
220
+ df.dropna(how='all', inplace=True)
221
+
222
+ # Write the updated data back to the file
223
+ if data_type in ['RADP', 'RADT']:
224
+ df.to_csv(f"{workspace}/{station}_{data_type}.csv")
225
+ else:
226
+ df.to_csv(f"{workspace}/{station}_{data_type}_{data_units_file}.csv")
227
+
228
+
229
+ def _get_file_header_data_units(data_type: str) -> tuple[str, str]:
230
+ """
231
+ Retrieves the units of measurement for a given environmental data type to be used in file names and column headers.
232
+
233
+ This function maps a specified environmental data type to its corresponding units of measurement.
234
+ These units are used for naming files and for the column headers within those files.
235
+
236
+ Args:
237
+ data_type (str): The type of environmental data for which units are being requested. Supported types include "RAIN", "ETPI", "H2OT", "RADP", "RADT", "AIRT", and "WNDS".
238
+
239
+ Returns:
240
+ tuple[str, str]: A tuple containing two strings. The first string represents the unit of measurement for the file name, and the second string represents the unit of measurement for the column header in the data file.
241
+ """
242
+ # Get the units for the file name and column header based on the type of data
243
+ if data_type == "RAIN":
244
+ data_units_file = "Inches"
245
+ data_units_header = "Inches"
246
+ elif data_type == "ETPI":
247
+ data_units_file = "Inches"
248
+ data_units_header = "Inches"
249
+ elif data_type == "H2OT":
250
+ data_units_file = "Degrees Celsius"
251
+ data_units_header = "Degrees Celsius"
252
+ elif data_type == "RADP":
253
+ data_units_file = ""
254
+ data_units_header = "MICROMOLE/m^2/s"
255
+ elif data_type == "RADT":
256
+ data_units_file = ""
257
+ data_units_header = "kW/m^2"
258
+ elif data_type == "AIRT":
259
+ data_units_file = "Degrees Celsius"
260
+ data_units_header = "Degrees Celsius"
261
+ elif data_type == "WNDS":
262
+ data_units_file = "MPH"
263
+ data_units_header = "MPH"
264
+
265
+ return data_units_file, data_units_header
266
+
267
+
110
268
  if __name__ == "__main__":
111
269
  args = [sys.argv[1].rstrip("/"), sys.argv[2]]
112
270
  if len(sys.argv) >= 4:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: loone_data_prep
3
- Version: 0.1.6
3
+ Version: 0.1.8
4
4
  Summary: Prepare data to run the LOONE model.
5
5
  Author-email: Osama Tarabih <osamatarabih@usf.edu>
6
6
  Maintainer-email: Michael Souffront <msouffront@aquaveo.com>, James Dolinar <jdolinar@aquaveo.com>
@@ -20,6 +20,7 @@ Description-Content-Type: text/markdown
20
20
  License-File: LICENSE
21
21
  Requires-Dist: rpy2
22
22
  Requires-Dist: retry
23
+ Requires-Dist: numpy <2
23
24
  Requires-Dist: pandas
24
25
  Requires-Dist: scipy
25
26
  Requires-Dist: geoglows ==0.27.1
@@ -0,0 +1,27 @@
1
+ loone_data_prep/GEOGLOWS_LOONE_DATA_PREP.py,sha256=wstZQwb_e2Z117dhvuLPrqyln6Bpb3ZTL0RfnOTvET4,35456
2
+ loone_data_prep/LOONE_DATA_PREP.py,sha256=osaLYlrfTwwUGLwXGypy61BOYBlXnoTPDp09O4Am1ZE,67761
3
+ loone_data_prep/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ loone_data_prep/data_analyses_fns.py,sha256=BZ7famrSKoUfExQvZfbl72CyADHLb-zzgdWZ-kLJxcQ,4603
5
+ loone_data_prep/utils.py,sha256=Jsa08iaD04C-BqK0K5BHgRFZEOqp6f_dcJSjPgcz1zA,31575
6
+ loone_data_prep/flow_data/S65E_total.py,sha256=szNUfj0EyyyDzuKNhTGAZtWc5owiOpxYS55YTt4u19k,2835
7
+ loone_data_prep/flow_data/__init__.py,sha256=u7fENFUZsJjyl13Bc9ZE47sHMKmjxtqXhV9t7vDTm7Y,93
8
+ loone_data_prep/flow_data/forecast_bias_correction.py,sha256=ydoZ0UmDZvsPLHsO7cpCFN9Pmj7w_tKjMDy9RK5EoiM,10146
9
+ loone_data_prep/flow_data/get_forecast_flows.py,sha256=-nPkscE9UZbRzGZ_dk0zhKiNM2hOINx21HgSeQrFjaU,14462
10
+ loone_data_prep/flow_data/get_inflows.py,sha256=xKuSyJBdPrpjqMdRiyNDyxwdhYVIgLhiTP0k_1I1uWI,6456
11
+ loone_data_prep/flow_data/get_outflows.py,sha256=x7aisIkbXoTkcubFQLDghX-P8lztPq-tU0dQzoVRTtQ,5620
12
+ loone_data_prep/flow_data/hydro.py,sha256=5MwrzSUTCgPgeC_YGhz-en1CbOMp379Qf5zjpJlp-HM,5312
13
+ loone_data_prep/water_level_data/__init__.py,sha256=rgHDDkwccemsZnwUlw2M0h2ML4KmI89yPscmLoxbEHM,43
14
+ loone_data_prep/water_level_data/get_all.py,sha256=arPSWpb0XfQm0GKZJmoWhWdLuuNDxtGVX6_6UuD1_Vs,10885
15
+ loone_data_prep/water_level_data/hydro.py,sha256=PtsNdMXe1Y4e5CzEyLH6nJx_xv8sB90orGcSgxt7nao,3653
16
+ loone_data_prep/water_quality_data/__init__.py,sha256=PREV_pqo5welPDjgNvkKnRLLVV-uvhKVy6y6R3A2E78,57
17
+ loone_data_prep/water_quality_data/get_inflows.py,sha256=01wAVJaDSQiamc5qIOf4BIYCBkvW-bdJOpiPbOFAIl4,7295
18
+ loone_data_prep/water_quality_data/get_lake_wq.py,sha256=gcideLf2oddFVl_vEdhFkXhwhhtI58ZafKWhlpQ23X4,7791
19
+ loone_data_prep/water_quality_data/wq.py,sha256=sl6G3iDCk6QUzpHTXPHpRZNMBG0-wHuc6zdYbKI4eQk,5077
20
+ loone_data_prep/weather_data/__init__.py,sha256=TX58EPgGRzEK_LmLze79lC4L7kU_j3yZf5_iC4nOIP4,45
21
+ loone_data_prep/weather_data/get_all.py,sha256=aCufuxORU51XhXt7LN9wN_V4qtjNt1qRC1UKlI2b3Ko,6918
22
+ loone_data_prep/weather_data/weather.py,sha256=hvceksrGSnDkCjheBVBuPgY1DrdR0ZAtrFB-K2tYTtk,12043
23
+ loone_data_prep-0.1.8.dist-info/LICENSE,sha256=rR1QKggtQUbAoYu2SW1ouI5xPqt9g4jvRRpZ0ZfnuqQ,1497
24
+ loone_data_prep-0.1.8.dist-info/METADATA,sha256=WB5Nk0uuAtv55-zdjaLRZjn9qbMg1H34Yp5Qe2LpKbc,4122
25
+ loone_data_prep-0.1.8.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
26
+ loone_data_prep-0.1.8.dist-info/top_level.txt,sha256=wDyJMJiCO5huTAuNmvxpjFxtvGaq_8Tr4hFFcXf8jLE,16
27
+ loone_data_prep-0.1.8.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.43.0)
2
+ Generator: setuptools (75.5.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,27 +0,0 @@
1
- loone_data_prep/GEOGLOWS_LOONE_DATA_PREP.py,sha256=loaMvDU1IgLsz7_eHAVJMtk_pgW_CTHiZE43a0_mZZE,35394
2
- loone_data_prep/LOONE_DATA_PREP.py,sha256=qCZ35vtnT_QutHFs6m9FTUaX9l3BbuRTn6L1ZH0AbC8,59766
3
- loone_data_prep/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- loone_data_prep/data_analyses_fns.py,sha256=BZ7famrSKoUfExQvZfbl72CyADHLb-zzgdWZ-kLJxcQ,4603
5
- loone_data_prep/utils.py,sha256=2iG7gVsvmpGNMfQ1a9yf1p4en9JNIQ6paXHrXqVtcS4,25112
6
- loone_data_prep/flow_data/S65E_total.py,sha256=IOFRi0dOfXo-eSTqn0wYCxrEsqhvVsREmA7OD6ZET4w,662
7
- loone_data_prep/flow_data/__init__.py,sha256=u7fENFUZsJjyl13Bc9ZE47sHMKmjxtqXhV9t7vDTm7Y,93
8
- loone_data_prep/flow_data/forecast_bias_correction.py,sha256=pABmNWWF96JDfjl3u314ORSskGbWaPgz8ZgM8FdEwvE,3752
9
- loone_data_prep/flow_data/get_forecast_flows.py,sha256=-nPkscE9UZbRzGZ_dk0zhKiNM2hOINx21HgSeQrFjaU,14462
10
- loone_data_prep/flow_data/get_inflows.py,sha256=52vewRQ131SCN32bJu6j2OQckM1FjLR-0A6qAvAe958,1552
11
- loone_data_prep/flow_data/get_outflows.py,sha256=_Vzzlmz5_W7_t2mItS25vrEryzWm75nY6DibId6viJ0,1779
12
- loone_data_prep/flow_data/hydro.py,sha256=VjLESLSL_TsqMXcMZOxg6UNUAWIW10TsDF8Qajzj1gY,2010
13
- loone_data_prep/water_level_data/__init__.py,sha256=rgHDDkwccemsZnwUlw2M0h2ML4KmI89yPscmLoxbEHM,43
14
- loone_data_prep/water_level_data/get_all.py,sha256=kZ0KU5g8iugRerU0Su0eYw6nEze57EOoOBi9_gOTlD4,1318
15
- loone_data_prep/water_level_data/hydro.py,sha256=BYDDvAkyzFiEUC5JCyI_NBwVwr3BCnTWBun54vxgffk,1257
16
- loone_data_prep/water_quality_data/__init__.py,sha256=PREV_pqo5welPDjgNvkKnRLLVV-uvhKVy6y6R3A2E78,57
17
- loone_data_prep/water_quality_data/get_inflows.py,sha256=YVPWmSVOpPqG8LVUMW-82EEGm7brTTmGHZzhk1q5YlE,2243
18
- loone_data_prep/water_quality_data/get_lake_wq.py,sha256=I6kXWo7WT6hetv1MXAuaYDL9aiCkUownBigHjUgaD4g,2780
19
- loone_data_prep/water_quality_data/wq.py,sha256=_BjB7v-vk2W8dTZAjrl_fFy11g2844Fj5463mHMO-ag,3094
20
- loone_data_prep/weather_data/__init__.py,sha256=TX58EPgGRzEK_LmLze79lC4L7kU_j3yZf5_iC4nOIP4,45
21
- loone_data_prep/weather_data/get_all.py,sha256=KBpFgz1ldAcLjlMaptxeUB4fptFrTgZ0fec0T5IThJk,1174
22
- loone_data_prep/weather_data/weather.py,sha256=9MAlhp9gAVEjSkobOh9-v--C8Zwz1VeoFmbrlCid_WU,5148
23
- loone_data_prep-0.1.6.dist-info/LICENSE,sha256=rR1QKggtQUbAoYu2SW1ouI5xPqt9g4jvRRpZ0ZfnuqQ,1497
24
- loone_data_prep-0.1.6.dist-info/METADATA,sha256=5i9zJh1HBHwQl8vZEjnxTiP3_bOim2oLY4rN3yEwA_8,4098
25
- loone_data_prep-0.1.6.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
26
- loone_data_prep-0.1.6.dist-info/top_level.txt,sha256=wDyJMJiCO5huTAuNmvxpjFxtvGaq_8Tr4hFFcXf8jLE,16
27
- loone_data_prep-0.1.6.dist-info/RECORD,,