loone-data-prep 0.1.9__py3-none-any.whl → 1.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- loone_data_prep/GEOGLOWS_LOONE_DATA_PREP.py +252 -228
- loone_data_prep/LOONE_DATA_PREP.py +34 -17
- loone_data_prep/flow_data/forecast_bias_correction.py +52 -34
- loone_data_prep/flow_data/get_forecast_flows.py +131 -88
- loone_data_prep/forecast_scripts/create_forecast_LOWs.py +127 -0
- loone_data_prep/forecast_scripts/forecast_stages.py +40 -0
- loone_data_prep/forecast_scripts/predict_PI.py +51 -0
- loone_data_prep/forecast_scripts/trib_cond.py +84 -0
- loone_data_prep/forecast_scripts/weather_forecast.py +155 -0
- loone_data_prep/utils.py +52 -19
- {loone_data_prep-0.1.9.dist-info → loone_data_prep-1.1.2.dist-info}/METADATA +9 -4
- {loone_data_prep-0.1.9.dist-info → loone_data_prep-1.1.2.dist-info}/RECORD +15 -10
- {loone_data_prep-0.1.9.dist-info → loone_data_prep-1.1.2.dist-info}/WHEEL +1 -1
- {loone_data_prep-0.1.9.dist-info → loone_data_prep-1.1.2.dist-info/licenses}/LICENSE +0 -0
- {loone_data_prep-0.1.9.dist-info → loone_data_prep-1.1.2.dist-info}/top_level.txt +0 -0
|
@@ -61,53 +61,67 @@ def main(input_dir: str, output_dir: str, ensemble_number: str) -> None: # , hi
|
|
|
61
61
|
LO_Stg_Sto_SA_df["SA_acres"] = LO_SA # acres
|
|
62
62
|
|
|
63
63
|
# Using geoglows data for S65_total, only data from S65E_S (none from S65EX1_S)
|
|
64
|
-
S65_total = pd.read_csv(f"{input_dir}/
|
|
64
|
+
S65_total = pd.read_csv(f"{input_dir}/750072741_INFLOW_cmd_geoglows.csv")
|
|
65
65
|
|
|
66
|
-
S71_S = pd.read_csv(f"{input_dir}/
|
|
66
|
+
S71_S = pd.read_csv(f"{input_dir}/750068601_MATCHED_cmd_geoglows.csv")
|
|
67
67
|
# S72_S = pd.read_csv(f'{input_dir}/S72_S_FLOW_cmd.csv')
|
|
68
|
-
S84_S = pd.read_csv(f"{input_dir}/
|
|
68
|
+
S84_S = pd.read_csv(f"{input_dir}/750069782_INFLOW_cmd_geoglows.csv")
|
|
69
69
|
# S127_C = pd.read_csv(f'{input_dir}/S127_C_FLOW_cmd.csv')
|
|
70
70
|
# S127_P = pd.read_csv(f'{input_dir}/S127_P_FLOW_cmd.csv')
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
71
|
+
#THESE ARE BOTH THE SAME INFLOW - CHECK THIS
|
|
72
|
+
S129_C = pd.read_csv(f"{input_dir}/750053211_INFLOW_cmd_geoglows.csv")
|
|
73
|
+
S129_P = pd.read_csv(f"{input_dir}/750053211_INFLOW_cmd_geoglows.csv")
|
|
74
|
+
|
|
75
|
+
S133_P = pd.read_csv(f"{input_dir}/750035446_INFLOW_cmd_geoglows.csv")
|
|
76
|
+
#These are both the same inflow - CHECK THIS
|
|
77
|
+
S135_C = pd.read_csv(f"{input_dir}/750052624_MATCHED_cmd_geoglows.csv")
|
|
78
|
+
S135_P = pd.read_csv(f"{input_dir}/750052624_MATCHED_cmd_geoglows.csv")
|
|
79
|
+
|
|
80
|
+
S154_C = pd.read_csv(f"{input_dir}/750064453_INFLOW_cmd_geoglows.csv")
|
|
77
81
|
# S191_S = pd.read_csv(f'{input_dir}/S191_S_FLOW_cmd.csv')
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
82
|
+
|
|
83
|
+
#THIS MATCHES THE INFLOW OF S135_C
|
|
84
|
+
S308 = pd.read_csv(f"{input_dir}/750052624_MATCHED_cmd_geoglows.csv")
|
|
85
|
+
|
|
86
|
+
#I said that these ones shouldn't be included
|
|
87
|
+
# S351_S = pd.read_csv(f"{input_dir}/S351_S_FLOW_cmd_geoglows.csv")
|
|
88
|
+
# S352_S = pd.read_csv(f"{input_dir}/S352_S_FLOW_cmd_geoglows.csv")
|
|
89
|
+
# S354_S = pd.read_csv(f"{input_dir}/S354_S_FLOW_cmd_geoglows.csv")
|
|
90
|
+
|
|
91
|
+
FISHP = pd.read_csv(f"{input_dir}/750053213_MATCHED_cmd_geoglows.csv")
|
|
83
92
|
# L8 = pd.read_csv(f'{input_dir}/L8.441_FLOW_cmd_geoglows.csv')
|
|
84
|
-
|
|
85
|
-
|
|
93
|
+
|
|
94
|
+
#I said that these ones should now be included in the model
|
|
95
|
+
# S2_P = pd.read_csv(f"{input_dir}/S2_P_FLOW_cmd_geoglows.csv")
|
|
96
|
+
# S3_P = pd.read_csv(f"{input_dir}/S3_P_FLOW_cmd_geoglows.csv")
|
|
86
97
|
# S4_P = pd.read_csv(f'{input_dir}/S4_P_FLOW_cmd.csv')
|
|
87
98
|
|
|
88
|
-
S77_S = pd.read_csv(f"{input_dir}/
|
|
89
|
-
|
|
99
|
+
S77_S = pd.read_csv(f"{input_dir}/750038416_MATCHED_cmd_geoglows.csv")
|
|
100
|
+
|
|
101
|
+
#???
|
|
102
|
+
# INDUST = pd.read_csv(f"{input_dir}/INDUST_FLOW_cmd_geoglows.csv")
|
|
90
103
|
|
|
91
104
|
# Read Interpolated TP data
|
|
92
105
|
# Data_Interpolation Python Script is used to interpolate TP data for all inflow stations addressed below!
|
|
93
106
|
S65_total_TP = pd.read_csv(f"{input_dir}/S65E_S_PHOSPHATE_predicted.csv")[
|
|
94
|
-
["date", f"ensemble_{ensemble_number}
|
|
107
|
+
["date", f"ensemble_{ensemble_number}"]
|
|
95
108
|
]
|
|
96
|
-
S71_TP = pd.read_csv(f"{input_dir}/S71_S_PHOSPHATE_predicted.csv")[["date", f"ensemble_{ensemble_number}
|
|
109
|
+
S71_TP = pd.read_csv(f"{input_dir}/S71_S_PHOSPHATE_predicted.csv")[["date", f"ensemble_{ensemble_number}"]]
|
|
97
110
|
# S72_TP = pd.read_csv(f'{input_dir}/S72_S_PHOSPHATE_predicted.csv')[['date', f'ensemble_{ensemble_number}_m^3/d']]
|
|
98
|
-
S84_TP = pd.read_csv(f"{input_dir}/S84_S_PHOSPHATE_predicted.csv")[["date", f"ensemble_{ensemble_number}
|
|
111
|
+
S84_TP = pd.read_csv(f"{input_dir}/S84_S_PHOSPHATE_predicted.csv")[["date", f"ensemble_{ensemble_number}"]]
|
|
99
112
|
# S127_TP = pd.read_csv(f'{input_dir}/S127_C_PHOSPHATE_predicted.csv')[['date', f'ensemble_{ensemble_number}_m^3/d']]
|
|
100
|
-
S133_TP = pd.read_csv(f"{input_dir}/S133_P_PHOSPHATE_predicted.csv")[["date", f"ensemble_{ensemble_number}
|
|
101
|
-
S135_TP = pd.read_csv(f"{input_dir}/S135_C_PHOSPHATE_predicted.csv")[["date", f"ensemble_{ensemble_number}
|
|
102
|
-
S154_TP = pd.read_csv(f"{input_dir}/S154_C_PHOSPHATE_predicted.csv")[["date", f"ensemble_{ensemble_number}
|
|
113
|
+
S133_TP = pd.read_csv(f"{input_dir}/S133_P_PHOSPHATE_predicted.csv")[["date", f"ensemble_{ensemble_number}"]]
|
|
114
|
+
S135_TP = pd.read_csv(f"{input_dir}/S135_C_PHOSPHATE_predicted.csv")[["date", f"ensemble_{ensemble_number}"]]
|
|
115
|
+
S154_TP = pd.read_csv(f"{input_dir}/S154_C_PHOSPHATE_predicted.csv")[["date", f"ensemble_{ensemble_number}"]]
|
|
103
116
|
# S191_TP = pd.read_csv(f'{input_dir}/S191_S_PHOSPHATE_predicted.csv')[['date', f'ensemble_{ensemble_number}_m^3/d']]
|
|
104
117
|
# S308_TP = pd.read_csv(f'{input_dir}/water_quality_S308C_PHOSPHATE, TOTAL AS P_Interpolated.csv')[['date', 'Data']]
|
|
105
|
-
FISHP_TP = pd.read_csv(f"{input_dir}/FISHP_PHOSPHATE_predicted.csv")[["date", f"ensemble_{ensemble_number}
|
|
118
|
+
FISHP_TP = pd.read_csv(f"{input_dir}/FISHP_PHOSPHATE_predicted.csv")[["date", f"ensemble_{ensemble_number}"]]
|
|
106
119
|
# L8_TP = pd.read_csv(f'{input_dir}/water_quality_CULV10A_PHOSPHATE, TOTAL AS P_Interpolated.csv')[['date', f'ensemble_{ensemble_number}_m^3/d']] # ? Missing
|
|
107
120
|
# S4_TP = pd.read_csv(f'{input_dir}/S4_P_PHOSPHATE_predicted.csv')[['date', f'ensemble_{ensemble_number}_m^3/d']]
|
|
108
121
|
|
|
109
122
|
# Set date range for S65 TP
|
|
110
123
|
S65_total_TP = DF_Date_Range(S65_total_TP, M3_Yr, M3_M, M3_D, En_Yr, En_M, En_D)
|
|
124
|
+
|
|
111
125
|
|
|
112
126
|
# Set Date Range
|
|
113
127
|
Q_names = [
|
|
@@ -121,14 +135,14 @@ def main(input_dir: str, output_dir: str, ensemble_number: str) -> None: # , hi
|
|
|
121
135
|
"S135_P_Q",
|
|
122
136
|
"S154_Q", #'S191_Q',
|
|
123
137
|
"S308_Q",
|
|
124
|
-
"S351_Q",
|
|
125
|
-
|
|
126
|
-
"S354_Q",
|
|
138
|
+
# "S351_Q",
|
|
139
|
+
# "S352_Q",
|
|
140
|
+
# "S354_Q",
|
|
127
141
|
"FISHP_Q", #'L8_Q',
|
|
128
|
-
"S2_P_Q",
|
|
129
|
-
"S3_P_Q", #'S4_P_Q',
|
|
142
|
+
# "S2_P_Q",
|
|
143
|
+
# "S3_P_Q", #'S4_P_Q',
|
|
130
144
|
"S77_Q",
|
|
131
|
-
"INDUST_Q",
|
|
145
|
+
# "INDUST_Q",
|
|
132
146
|
]
|
|
133
147
|
Q_list = {
|
|
134
148
|
"S65_Q": S65_total,
|
|
@@ -141,14 +155,14 @@ def main(input_dir: str, output_dir: str, ensemble_number: str) -> None: # , hi
|
|
|
141
155
|
"S135_P_Q": S135_P,
|
|
142
156
|
"S154_Q": S154_C,
|
|
143
157
|
"S308_Q": S308,
|
|
144
|
-
"S351_Q": S351_S,
|
|
145
|
-
"S352_Q": S352_S,
|
|
146
|
-
"S354_Q": S354_S,
|
|
158
|
+
# "S351_Q": S351_S,
|
|
159
|
+
# "S352_Q": S352_S,
|
|
160
|
+
# "S354_Q": S354_S,
|
|
147
161
|
"FISHP_Q": FISHP, #'L8_Q': L8,
|
|
148
|
-
"S2_P_Q": S2_P,
|
|
149
|
-
"S3_P_Q": S3_P,
|
|
162
|
+
# "S2_P_Q": S2_P,
|
|
163
|
+
# "S3_P_Q": S3_P,
|
|
150
164
|
"S77_Q": S77_S,
|
|
151
|
-
"INDUST_Q": INDUST,
|
|
165
|
+
# "INDUST_Q": INDUST,
|
|
152
166
|
}
|
|
153
167
|
# Identify date range
|
|
154
168
|
date = pd.date_range(start=f"{st_month}/{st_day}/{st_year}", end=f"{end_month}/{end_day}/{end_year}", freq="D")
|
|
@@ -170,53 +184,97 @@ def main(input_dir: str, output_dir: str, ensemble_number: str) -> None: # , hi
|
|
|
170
184
|
for i in range(len(Q_names)):
|
|
171
185
|
x = DF_Date_Range(Q_list[Q_names[i]], st_year, st_month, st_day, end_year, end_month, end_day)
|
|
172
186
|
for column_name in x.columns:
|
|
173
|
-
if ensemble_number in column_name:
|
|
187
|
+
if str(ensemble_number) in column_name:
|
|
174
188
|
geoglows_flow_df[Q_names[i]] = x[column_name]
|
|
175
189
|
|
|
176
|
-
_create_flow_inflow_cqpq(geoglows_flow_df,
|
|
177
|
-
_create_flow_inflow_cqpq(geoglows_flow_df,
|
|
190
|
+
_create_flow_inflow_cqpq(geoglows_flow_df, "S129_C_Q", "S129_P_Q", "S129_In")
|
|
191
|
+
_create_flow_inflow_cqpq(geoglows_flow_df, "S135_C_Q", "S135_P_Q", "S135_In")
|
|
178
192
|
|
|
179
|
-
_create_flow_inflow_q(geoglows_flow_df,
|
|
180
|
-
_create_flow_inflow_q(geoglows_flow_df,
|
|
181
|
-
_create_flow_inflow_q(geoglows_flow_df, ensemble_number, "S351_Q", "S351_In")
|
|
182
|
-
|
|
183
|
-
_create_flow_inflow_q(geoglows_flow_df, ensemble_number, "S354_Q", "S354_In")
|
|
193
|
+
_create_flow_inflow_q(geoglows_flow_df, "S308_Q", "S308_In")
|
|
194
|
+
_create_flow_inflow_q(geoglows_flow_df, "S77_Q", "S77_In")
|
|
195
|
+
# _create_flow_inflow_q(geoglows_flow_df, ensemble_number, "S351_Q", "S351_In")
|
|
196
|
+
# _create_flow_inflow_q(geoglows_flow_df, ensemble_number, "S352_Q", "S352_In")
|
|
197
|
+
# _create_flow_inflow_q(geoglows_flow_df, ensemble_number, "S354_Q", "S354_In")
|
|
184
198
|
# _create_flow_inflow_q(geoglows_flow_df, ensemble_number, 'L8_Q', 'L8_In')
|
|
185
199
|
|
|
186
|
-
_create_flow_outflow_q(geoglows_flow_df,
|
|
187
|
-
_create_flow_outflow_q(geoglows_flow_df,
|
|
188
|
-
_create_flow_outflow_q(geoglows_flow_df, ensemble_number, "INDUST_Q", "INDUST_Out")
|
|
189
|
-
_create_flow_outflow_q(geoglows_flow_df, ensemble_number, "S351_Q", "S351_Out")
|
|
190
|
-
_create_flow_outflow_q(geoglows_flow_df, ensemble_number, "S352_Q", "S352_Out")
|
|
191
|
-
_create_flow_outflow_q(geoglows_flow_df, ensemble_number, "S354_Q", "S354_Out")
|
|
200
|
+
_create_flow_outflow_q(geoglows_flow_df, "S308_Q", "S308_Out")
|
|
201
|
+
_create_flow_outflow_q(geoglows_flow_df, "S77_Q", "S77_Out")
|
|
202
|
+
# _create_flow_outflow_q(geoglows_flow_df, ensemble_number, "INDUST_Q", "INDUST_Out")
|
|
203
|
+
# _create_flow_outflow_q(geoglows_flow_df, ensemble_number, "S351_Q", "S351_Out")
|
|
204
|
+
# _create_flow_outflow_q(geoglows_flow_df, ensemble_number, "S352_Q", "S352_Out")
|
|
205
|
+
# _create_flow_outflow_q(geoglows_flow_df, ensemble_number, "S354_Q", "S354_Out")
|
|
192
206
|
# _create_flow_outflow_q(geoglows_flow_df, ensemble_number, 'L8_Q', 'L8_Out')
|
|
193
207
|
|
|
194
|
-
geoglows_flow_df["Inflows"] = geoglows_flow_df[
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
].sum(
|
|
213
|
-
|
|
214
|
-
) # , 'S4_P_Q']].sum(axis=1)
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
208
|
+
# geoglows_flow_df["Inflows"] = geoglows_flow_df[
|
|
209
|
+
# [
|
|
210
|
+
# "S65_Q",
|
|
211
|
+
# "S71_Q", #'S72_Q',
|
|
212
|
+
# "S84_Q", #'S127_In',
|
|
213
|
+
# "S129_In",
|
|
214
|
+
# "S133_P_Q",
|
|
215
|
+
# "S135_In",
|
|
216
|
+
# "S154_Q", #'S191_Q',
|
|
217
|
+
# "S308_In",
|
|
218
|
+
# "S77_In",
|
|
219
|
+
# "S351_In",
|
|
220
|
+
# "S352_In",
|
|
221
|
+
# "S354_In", #'L8_In',
|
|
222
|
+
# "FISHP_Q",
|
|
223
|
+
# "S2_P_Q",
|
|
224
|
+
# "S3_P_Q",
|
|
225
|
+
# ]
|
|
226
|
+
# ].sum(
|
|
227
|
+
# axis=1
|
|
228
|
+
# ) # , 'S4_P_Q']].sum(axis=1)
|
|
229
|
+
# my code to get the inflows and sum them:
|
|
230
|
+
|
|
231
|
+
#I took out the INDUST_Out because it seems that out model doesn't include it. Double check what INDUST_Out is
|
|
232
|
+
# INFLOW_IDS = [
|
|
233
|
+
# 750059718, 750043742, 750035446, 750034865, 750055574, 750053211,
|
|
234
|
+
# 750050248, 750065049, 750064453, 750049661, 750069195, 750051436,
|
|
235
|
+
# 750068005, 750063868, 750069782, 750072741
|
|
236
|
+
# ]
|
|
237
|
+
# inflow_data = {}
|
|
238
|
+
# for reach in INFLOW_IDS:
|
|
239
|
+
# inflow_data[reach] = pd.read_csv(f"{input_dir}/{reach}_INFLOW_cmd_geoglows.csv")
|
|
240
|
+
# _create_flow_inflow_q(geoglows_flow_df, "S308_Q", f"{reach}_INFLOW")
|
|
241
|
+
|
|
242
|
+
# geoglows_flow_df["Netflows"] = geoglows_flow_df["Inflows"] - geoglows_flow_df["INDUST_Out"]
|
|
243
|
+
# # flow_filter_cols = ["S308_Out", "S77_Out", 'S351_Out', 'S352_Out', 'S354_Out', 'INDUST_Out', 'L8_Out']
|
|
244
|
+
# flow_filter_cols = ["S308_Out", "S77_Out"]
|
|
245
|
+
|
|
246
|
+
# geoglows_flow_df["Outflows"] = geoglows_flow_df[flow_filter_cols].sum(axis=1)
|
|
247
|
+
#get all 16 inflow ids from geoglows
|
|
248
|
+
INFLOW_IDS = [
|
|
249
|
+
750059718, 750043742, 750035446, 750034865, 750055574, 750053211,
|
|
250
|
+
750050248, 750065049, 750064453, 750049661, 750069195, 750051436,
|
|
251
|
+
750068005, 750063868, 750069782, 750072741
|
|
252
|
+
]
|
|
253
|
+
OUTFLOW_IDS = [750053809, 750057949]
|
|
254
|
+
# Ensure the date column exists and is used for geoglows_flow_df
|
|
255
|
+
# geoglows_flow_df = pd.DataFrame(first_inflow_data["date"], columns=["date"])
|
|
256
|
+
|
|
257
|
+
# Loop through all reach IDs to extract the relevant ensemble column
|
|
258
|
+
for reach in OUTFLOW_IDS:
|
|
259
|
+
outflow_data = pd.read_csv(f"{input_dir}/{reach}_OUTFLOW_cmd_geoglows.csv")
|
|
260
|
+
|
|
261
|
+
for column_name in outflow_data.columns:
|
|
262
|
+
if str(ensemble_number) in column_name:
|
|
263
|
+
geoglows_flow_df[reach] = outflow_data[column_name]
|
|
264
|
+
for reach in INFLOW_IDS:
|
|
265
|
+
inflow_data = pd.read_csv(f"{input_dir}/{reach}_INFLOW_cmd_geoglows.csv")
|
|
266
|
+
|
|
267
|
+
for column_name in inflow_data.columns:
|
|
268
|
+
if str(ensemble_number) in column_name:
|
|
269
|
+
geoglows_flow_df[reach] = inflow_data[column_name]
|
|
270
|
+
#Calculate the netflows by summing the inflows
|
|
271
|
+
geoglows_flow_df["Inflows"] = geoglows_flow_df[INFLOW_IDS].sum(axis=1)
|
|
272
|
+
geoglows_flow_df["Outflows"] = geoglows_flow_df[OUTFLOW_IDS].sum(axis=1)
|
|
273
|
+
# TODO: check if netflows are just the sum of the inflows or if they are the inflows minus the outflows
|
|
274
|
+
geoglows_flow_df["Netflows"] = geoglows_flow_df["Inflows"] # - geoglows_flow_df["Outflows"]
|
|
275
|
+
Netflows = pd.DataFrame(geoglows_flow_df["date"], columns=["date"])
|
|
276
|
+
Netflows["Netflows_acft"] = geoglows_flow_df["Netflows"] / 1233.48 # Convert from m^3/d to ac-ft
|
|
277
|
+
Netflows.to_csv(f"{output_dir}/Netflows_acft_geoglows_{ensemble_number}.csv", index=False)
|
|
220
278
|
TP_names = [
|
|
221
279
|
"S65_TP",
|
|
222
280
|
"S71_TP", #'S72_TP',
|
|
@@ -241,7 +299,7 @@ def main(input_dir: str, output_dir: str, ensemble_number: str) -> None: # , hi
|
|
|
241
299
|
TP_Loads_In = pd.DataFrame(date, columns=["date"])
|
|
242
300
|
for i in range(len(TP_names)):
|
|
243
301
|
y = DF_Date_Range(TP_list[TP_names[i]], st_year, st_month, st_day, end_year, end_month, end_day)
|
|
244
|
-
TP_Loads_In[TP_names[i]] = y[f"ensemble_{ensemble_number}
|
|
302
|
+
TP_Loads_In[TP_names[i]] = y[f"ensemble_{ensemble_number}"]
|
|
245
303
|
|
|
246
304
|
# Calculate the total External Loads to Lake Okeechobee
|
|
247
305
|
TP_Loads_In["External_P_Ld_mg"] = TP_Loads_In.sum(axis=1, numeric_only=True)
|
|
@@ -262,58 +320,77 @@ def main(input_dir: str, output_dir: str, ensemble_number: str) -> None: # , hi
|
|
|
262
320
|
|
|
263
321
|
# Create File (INDUST_Outflow_20082023)
|
|
264
322
|
INDUST_Outflows = pd.DataFrame(geoglows_flow_df["date"], columns=["date"])
|
|
265
|
-
INDUST_Outflows["INDUST"] = geoglows_flow_df["INDUST_Out"]
|
|
323
|
+
# INDUST_Outflows["INDUST"] = geoglows_flow_df["INDUST_Out"]
|
|
266
324
|
|
|
267
325
|
# Create File (Netflows_acft)
|
|
268
326
|
# This is also Column (Net Inflow) in File (SFWMM_Daily_Outputs)
|
|
269
|
-
Netflows = pd.DataFrame(geoglows_flow_df["date"], columns=["date"])
|
|
270
|
-
Netflows["Netflows_acft"] = geoglows_flow_df["Netflows"] / 1233.48 # acft
|
|
327
|
+
# Netflows = pd.DataFrame(geoglows_flow_df["date"], columns=["date"])
|
|
328
|
+
# Netflows["Netflows_acft"] = geoglows_flow_df["Netflows"] / 1233.48 # acft
|
|
271
329
|
|
|
272
330
|
# Create File (TotalQWCA_Obs)
|
|
273
331
|
# This is also Column (RegWCA) in File (SFWMM_Daily_Outputs)
|
|
274
332
|
TotalQWCA = pd.DataFrame(geoglows_flow_df["date"], columns=["date"])
|
|
275
|
-
|
|
276
|
-
TotalQWCA["
|
|
277
|
-
TotalQWCA["
|
|
278
|
-
TotalQWCA["
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
#
|
|
333
|
+
# We got rid of these stations
|
|
334
|
+
# TotalQWCA["S351_Out"] = geoglows_flow_df["S351_Out"] * (35.3147 / 86400) # cmd to cfs
|
|
335
|
+
# TotalQWCA["S354_Out"] = geoglows_flow_df["S354_Out"] * (35.3147 / 86400)
|
|
336
|
+
# TotalQWCA["RegWCA_cfs"] = TotalQWCA.sum(axis=1, numeric_only=True) # cfs
|
|
337
|
+
# TotalQWCA["RegWCA_acft"] = TotalQWCA["RegWCA_cfs"] * 1.9835 # acft
|
|
338
|
+
|
|
339
|
+
# Create Column (RegL8C51) in the File (SFWMM_Daily_Outputs)
|
|
340
|
+
L8C51 = pd.DataFrame(geoglows_flow_df["date"], columns=["date"])
|
|
282
341
|
# L8C51["S352_Out"] = geoglows_flow_df["S352_Out"].values * (35.3147 / 86400) # cmd to cfs
|
|
283
342
|
# L8C51["L8_O_cfs"] = geoglows_flow_df["L8_Out"].values * (35.3147 / 86400) # cmd to cfs
|
|
284
343
|
# L8C51["L8C51_cfs"] = L8C51.sum(axis=1) # cfs
|
|
285
344
|
# L8C51.to_csv(f"{output_dir}/L8C51.csv", index=False)
|
|
286
345
|
|
|
287
|
-
#
|
|
288
|
-
#
|
|
289
|
-
#
|
|
346
|
+
# C43 RO C44 RO
|
|
347
|
+
# Create Files (C43RO, C43RO_Monthly, C44RO, C44RO_Monthly)
|
|
348
|
+
# As well as Columns C43Runoff and C44Runoff in File (SFWMM_Daily_Outputs)
|
|
290
349
|
# s79_path = glob(f'{input_dir}/S79_*FLOW*geoglows.csv')[0]
|
|
291
350
|
# s80_path = glob(f'{input_dir}/S80_*FLOW*geoglows.csv')[0]
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
351
|
+
s79_path = f'{input_dir}/750050259_MATCHED_cmd_geoglows.csv'
|
|
352
|
+
s80_path = f'{input_dir}/750045514_MATCHED_cmd_geoglows.csv'
|
|
353
|
+
S79 = pd.read_csv(s79_path)
|
|
354
|
+
S80 = pd.read_csv(s80_path)
|
|
355
|
+
S79['Q_cmd'] = S79[f'ensemble_{ensemble_number}'] # already in cmd * 0.0283168466 * 86400
|
|
356
|
+
S80['Q_cmd'] = S80[f'ensemble_{ensemble_number}'] # already in cmd * 0.0283168466 * 86400
|
|
357
|
+
|
|
358
|
+
C43RO_df = pd.DataFrame(S79['date'], columns=['date'])
|
|
359
|
+
C44RO_df = pd.DataFrame(S79['date'], columns=['date'])
|
|
360
|
+
C43RO = np.zeros(len(C43RO_df.index))
|
|
361
|
+
C44RO = np.zeros(len(C44RO_df.index))
|
|
362
|
+
for i in range(len(C44RO_df.index)):
|
|
363
|
+
if S79['Q_cmd'].iloc[i] - geoglows_flow_df['S77_Out'].iloc[i] + geoglows_flow_df['S77_In'].iloc[i] < 0:
|
|
364
|
+
C43RO[i] = 0
|
|
365
|
+
else:
|
|
366
|
+
C43RO[i] = S79['Q_cmd'].iloc[i] - geoglows_flow_df['S77_Out'].iloc[i] + geoglows_flow_df['S77_In'].iloc[i]
|
|
367
|
+
for i in range(len(C44RO_df.index)):
|
|
368
|
+
if S80['Q_cmd'].iloc[i] - geoglows_flow_df['S308_Out'].iloc[i] + geoglows_flow_df['S308_In'].iloc[i] < 0:
|
|
369
|
+
C44RO[i] = 0
|
|
370
|
+
else:
|
|
371
|
+
C44RO[i] = S80['Q_cmd'].iloc[i] - geoglows_flow_df['S308_Out'].iloc[i] + geoglows_flow_df['S308_In'].iloc[i]
|
|
372
|
+
C43RO_df['C43RO_cmd'] = C43RO
|
|
373
|
+
C44RO_df['C44RO_cmd'] = C44RO
|
|
374
|
+
C43RO_df['C43RO'] = C43RO_df['C43RO_cmd']/(0.0283168466 * 86400)
|
|
375
|
+
C44RO_df['C44RO'] = C44RO_df['C44RO_cmd']/(0.0283168466 * 86400)
|
|
376
|
+
C43RO_df.to_csv(f'{output_dir}/C43RO_{ensemble_number}.csv', index=False)
|
|
377
|
+
C44RO_df.to_csv(f'{output_dir}/C44RO_{ensemble_number}.csv', index=False)
|
|
378
|
+
C43RO_df.index = pd.to_datetime(C43RO_df["date"])
|
|
379
|
+
C43RO_df = C43RO_df.drop(columns="date")
|
|
380
|
+
|
|
381
|
+
C44RO_df.index = pd.to_datetime(C44RO_df["date"])
|
|
382
|
+
C44RO_df = C44RO_df.drop(columns="date")
|
|
383
|
+
|
|
384
|
+
C43Mon = C43RO_df.resample('ME').mean()
|
|
385
|
+
C44Mon = C44RO_df.resample('ME').mean()
|
|
386
|
+
|
|
387
|
+
C43Mon.to_csv(f'{output_dir}/C43RO_Monthly_{ensemble_number}.csv', index=False)
|
|
388
|
+
C44Mon.to_csv(f'{output_dir}/C44RO_Monthly_{ensemble_number}.csv', index=False)
|
|
389
|
+
Basin_RO = pd.DataFrame(C44Mon.index, columns=['date'])
|
|
390
|
+
# Basin_RO['SLTRIB'] = SLTRIBMon['SLTRIB_cfs'].values * 1.9835 # cfs to acft
|
|
391
|
+
Basin_RO['C44RO'] = C44Mon['C44RO'].values * 86400
|
|
392
|
+
Basin_RO['C43RO'] = C43Mon['C43RO'].values * 86400
|
|
393
|
+
Basin_RO.to_csv(f'{output_dir}/Basin_RO_inputs_{ensemble_number}.csv', index=False)
|
|
317
394
|
|
|
318
395
|
# # Get monthly C43RO and C44RO from historical run
|
|
319
396
|
# shutil.copyfile(os.path.join(historical_files_src, "C43RO_Monthly.csv"), os.path.join(output_dir, 'C43RO_Monthly.csv'))
|
|
@@ -362,53 +439,25 @@ def main(input_dir: str, output_dir: str, ensemble_number: str) -> None: # , hi
|
|
|
362
439
|
# Net_RF = Net_RF.set_index(["date"])
|
|
363
440
|
# Net_RF.index = pd.to_datetime(Net_RF.index, unit="ns")
|
|
364
441
|
# Net_RF_Weekly = Net_RF.resample("W-FRI").sum()
|
|
365
|
-
|
|
366
|
-
#
|
|
367
|
-
#
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
# PI = pd.DataFrame(S65E_Weekly.index, columns=["date"])
|
|
385
|
-
# PI_data = pd.read_csv(f"{input_dir}/PI.csv")
|
|
386
|
-
# PI["PI"] = PI_data.iloc[:, 1]
|
|
387
|
-
|
|
388
|
-
# Trib_Cond_Wkly = pd.DataFrame(S65E_Weekly.index, columns=["date"])
|
|
389
|
-
# Trib_Cond_Wkly["NetRF"] = Net_RF_Weekly["NetRF_In"].values
|
|
390
|
-
# Trib_Cond_Wkly["NetInf"] = Net_Inflow_Weekly["Net_Inflows"].values
|
|
391
|
-
# Trib_Cond_Wkly["S65E"] = S65E_Weekly["S65E"].values
|
|
392
|
-
# Trib_Cond_Wkly["Palmer"] = PI["PI"].values
|
|
393
|
-
# Trib_Cond_Wkly.to_csv(f"{output_dir}/Trib_cond_wkly_data.csv", index=False)
|
|
394
|
-
|
|
395
|
-
# # Wind Speed
|
|
396
|
-
# # Create File (LOWS)
|
|
397
|
-
# L001WS = pd.read_csv(f"{input_dir}/L001_WNDS_MPH.csv")
|
|
398
|
-
# L005WS = pd.read_csv(f"{input_dir}/L005_WNDS_MPH.csv")
|
|
399
|
-
# L006WS = pd.read_csv(f"{input_dir}/L006_WNDS_MPH.csv")
|
|
400
|
-
# LZ40WS = pd.read_csv(f"{input_dir}/LZ40_WNDS_MPH.csv")
|
|
401
|
-
# L001WS = DF_Date_Range(L001WS, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
|
|
402
|
-
# L005WS = DF_Date_Range(L005WS, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
|
|
403
|
-
# L006WS = DF_Date_Range(L006WS, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
|
|
404
|
-
# LZ40WS = DF_Date_Range(LZ40WS, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
|
|
405
|
-
# LOWS = pd.DataFrame(L001WS["date"], columns=["date"])
|
|
406
|
-
# LOWS["L001WS"] = L001WS["L001_WNDS_MPH"]
|
|
407
|
-
# LOWS["L005WS"] = L005WS["L005_WNDS_MPH"]
|
|
408
|
-
# LOWS["L006WS"] = L006WS["L006_WNDS_MPH"]
|
|
409
|
-
# LOWS["LZ40WS"] = LZ40WS["LZ40_WNDS_MPH"]
|
|
410
|
-
# LOWS["LO_Avg_WS_MPH"] = LOWS.mean(axis=1)
|
|
411
|
-
# LOWS.to_csv(f"{output_dir}/LOWS.csv", index=False)
|
|
442
|
+
|
|
443
|
+
# Wind Speed
|
|
444
|
+
# Create File (LOWS)
|
|
445
|
+
L001WS = pd.read_csv(f"{input_dir}/L001_WNDS_MPH_predicted.csv")
|
|
446
|
+
L005WS = pd.read_csv(f"{input_dir}/L005_WNDS_MPH_predicted.csv")
|
|
447
|
+
L006WS = pd.read_csv(f"{input_dir}/L006_WNDS_MPH_predicted.csv")
|
|
448
|
+
LZ40WS = pd.read_csv(f"{input_dir}/LZ40_WNDS_MPH_predicted.csv")
|
|
449
|
+
L001WS = DF_Date_Range(L001WS, st_year, st_month, st_day, end_year, end_month, end_day)
|
|
450
|
+
L005WS = DF_Date_Range(L005WS, st_year, st_month, st_day, end_year, end_month, end_day)
|
|
451
|
+
L006WS = DF_Date_Range(L006WS, st_year, st_month, st_day, end_year, end_month, end_day)
|
|
452
|
+
LZ40WS = DF_Date_Range(LZ40WS, st_year, st_month, st_day, end_year, end_month, end_day)
|
|
453
|
+
LOWS = pd.DataFrame(L001WS["date"], columns=["date"])
|
|
454
|
+
LOWS["L001WS"] = L001WS["L001_WNDS_MPH"]
|
|
455
|
+
LOWS["L005WS"] = L005WS["L005_WNDS_MPH"]
|
|
456
|
+
LOWS["L006WS"] = L006WS["L006_WNDS_MPH"]
|
|
457
|
+
LOWS["LZ40WS"] = LZ40WS["LZ40_WNDS_MPH"]
|
|
458
|
+
LOWS = LOWS.set_index("date")
|
|
459
|
+
LOWS["LO_Avg_WS_MPH"] = LOWS.mean(axis=1)
|
|
460
|
+
LOWS.to_csv(f"{output_dir}/LOWS_predicted.csv")
|
|
412
461
|
|
|
413
462
|
# # RFVol acft
|
|
414
463
|
# # Create File (RF_Volume)
|
|
@@ -443,60 +492,34 @@ def main(input_dir: str, output_dir: str, ensemble_number: str) -> None: # , hi
|
|
|
443
492
|
# WCA_Stg.to_csv(f"{output_dir}/WCA_Stages_Inputs.csv", index=False)
|
|
444
493
|
|
|
445
494
|
# # Predict Water Temp Function of Air Temp
|
|
446
|
-
#
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
#
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
#
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
#
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
# LZ40_AirT = DF_Date_Range(LZ40_AirT, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
|
|
474
|
-
|
|
475
|
-
# WaterT_pred_df = pd.DataFrame(L001_AirT["date"], columns=["date"])
|
|
476
|
-
|
|
477
|
-
# WaterT_pred_df["L001_WaterT_pred"] = 1.862667 + 0.936899 * L001_AirT["L001_AIRT_Degrees Celsius"].values
|
|
478
|
-
# WaterT_pred_df["L005_WaterT_pred"] = 1.330211 + 0.909713 * L005_AirT["L005_AIRT_Degrees Celsius"].values
|
|
479
|
-
# WaterT_pred_df["L006_WaterT_pred"] = -0.88564 + 1.01585 * L006_AirT["L006_AIRT_Degrees Celsius"].values
|
|
480
|
-
# WaterT_pred_df["LZ40_WaterT_pred"] = 0.388231 + 0.980154 * LZ40_AirT["LZ40_AIRT_Degrees Celsius"].values
|
|
481
|
-
# water_t_pred_filter_cols = ["L001_WaterT_pred", "L005_WaterT_pred", "L006_WaterT_pred", "LZ40_WaterT_pred"]
|
|
482
|
-
# WaterT_pred_df["WaterT_pred_Mean"] = WaterT_pred_df[water_t_pred_filter_cols].mean(axis=1)
|
|
483
|
-
# WaterT_pred_df_1 = DF_Date_Range(WaterT_pred_df, St_Yr, St_M, St_D, 2020, 8, 25)
|
|
484
|
-
# WaterT_pred_df_2 = DF_Date_Range(WaterT_pred_df, 2020, 8, 26, En_Yr, En_M, En_D)
|
|
485
|
-
# Filled_WaterT_1 = np.zeros(len(WaterT_pred_df_1.index))
|
|
486
|
-
# Filled_WaterT_2 = np.zeros(len(WaterT_pred_df_2.index))
|
|
487
|
-
# for i in range(len(Water_Temp_data.index)):
|
|
488
|
-
# if np.isnan(Water_Temp_data["WaterT_Mean"].iloc[i]):
|
|
489
|
-
# Filled_WaterT_1[i] = WaterT_pred_df_1["WaterT_pred_Mean"].iloc[i]
|
|
490
|
-
# else:
|
|
491
|
-
# Filled_WaterT_1[i] = Water_Temp_data["WaterT_Mean"].iloc[i]
|
|
495
|
+
# Load and filter air temperature data
|
|
496
|
+
L001_AirT = pd.read_csv(f'{input_dir}/L001_AIRT_Degrees Celsius_forecast.csv')
|
|
497
|
+
L001_AirT = DF_Date_Range(L001_AirT, st_year, st_month, st_day, end_year, end_month, end_day)
|
|
498
|
+
|
|
499
|
+
L005_AirT = pd.read_csv(f'{input_dir}/L005_AIRT_Degrees Celsius_forecast.csv')
|
|
500
|
+
L005_AirT = DF_Date_Range(L005_AirT, st_year, st_month, st_day, end_year, end_month, end_day)
|
|
501
|
+
|
|
502
|
+
L006_AirT = pd.read_csv(f'{input_dir}/L006_AIRT_Degrees Celsius_forecast.csv')
|
|
503
|
+
L006_AirT = DF_Date_Range(L006_AirT, st_year, st_month, st_day, end_year, end_month, end_day)
|
|
504
|
+
|
|
505
|
+
LZ40_AirT = pd.read_csv(f'{input_dir}/LZ40_AIRT_Degrees Celsius_forecast.csv')
|
|
506
|
+
LZ40_AirT = DF_Date_Range(LZ40_AirT, st_year, st_month, st_day, end_year, end_month, end_day)
|
|
507
|
+
|
|
508
|
+
# Predict water temperatures using regression models
|
|
509
|
+
WaterT_pred_df = pd.DataFrame(L001_AirT['date'], columns=['date'])
|
|
510
|
+
WaterT_pred_df['L001_WaterT_pred'] = 1.862667 + 0.936899 * L001_AirT['L001_AIRT_Degrees Celsius'].values
|
|
511
|
+
WaterT_pred_df['L005_WaterT_pred'] = 1.330211 + 0.909713 * L005_AirT['L005_AIRT_Degrees Celsius'].values
|
|
512
|
+
WaterT_pred_df['L006_WaterT_pred'] = -0.88564 + 1.01585 * L006_AirT['L006_AIRT_Degrees Celsius'].values
|
|
513
|
+
WaterT_pred_df['LZ40_WaterT_pred'] = 0.388231 + 0.980154 * LZ40_AirT['LZ40_AIRT_Degrees Celsius'].values
|
|
514
|
+
|
|
515
|
+
# Compute average predicted water temperature
|
|
516
|
+
water_t_pred_filter_cols = ['L001_WaterT_pred', 'L005_WaterT_pred', 'L006_WaterT_pred', 'LZ40_WaterT_pred']
|
|
517
|
+
WaterT_pred_df['Water_T'] = WaterT_pred_df[water_t_pred_filter_cols].mean(axis=1)
|
|
518
|
+
|
|
519
|
+
# Export to CSV
|
|
520
|
+
WaterT_pred_df[['date', 'Water_T']].to_csv(f'{output_dir}/Filled_WaterT_predicted.csv', index=False)
|
|
521
|
+
|
|
492
522
|
|
|
493
|
-
# Filled_WaterT_2 = WaterT_pred_df_2["WaterT_pred_Mean"]
|
|
494
|
-
# Filled_WaterT_1df = pd.DataFrame(WaterT_pred_df_1["date"], columns=["date"])
|
|
495
|
-
# Filled_WaterT_2df = pd.DataFrame(WaterT_pred_df_2["date"], columns=["date"])
|
|
496
|
-
# Filled_WaterT_1df["Water_T"] = Filled_WaterT_1
|
|
497
|
-
# Filled_WaterT_2df["Water_T"] = Filled_WaterT_2
|
|
498
|
-
# Filled_WaterT = pd.concat([Filled_WaterT_1df, Filled_WaterT_2df]).reset_index(drop=True)
|
|
499
|
-
# Filled_WaterT.to_csv(f"{output_dir}/Filled_WaterT_20082023.csv", index=False)
|
|
500
523
|
|
|
501
524
|
# # TP Observations in Lake
|
|
502
525
|
# L001_TP = pd.read_csv(f"{input_dir}/water_quality_L001_PHOSPHATE, TOTAL AS P.csv")
|
|
@@ -569,27 +592,27 @@ def main(input_dir: str, output_dir: str, ensemble_number: str) -> None: # , hi
|
|
|
569
592
|
|
|
570
593
|
# Write Data into csv files
|
|
571
594
|
# write Avg Stage (ft, m) Storage (acft, m3) SA (acres) to csv
|
|
572
|
-
LO_Stg_Sto_SA_df.to_csv(f"{output_dir}/
|
|
595
|
+
LO_Stg_Sto_SA_df.to_csv(f"{output_dir}/Average_LO_Storage_3MLag_{ensemble_number}.csv", index=False)
|
|
573
596
|
# Write S65 TP concentrations (mg/L)
|
|
574
|
-
S65_total_TP.to_csv(f"{output_dir}/
|
|
597
|
+
S65_total_TP.to_csv(f"{output_dir}/S65_TP_3MLag_{ensemble_number}.csv", index=False)
|
|
575
598
|
# TP External Loads 3 Months Lag (mg)
|
|
576
599
|
TP_Loads_In_3MLag_df.to_csv(f"{output_dir}/LO_External_Loadings_3MLag_{ensemble_number}.csv", index=False)
|
|
577
600
|
# Flow dataframe including Inflows, NetFlows, and Outflows (all in m3/day)
|
|
578
601
|
geoglows_flow_df.to_csv(f"{output_dir}/geoglows_flow_df_ens_{ensemble_number}_predicted.csv", index=False)
|
|
579
602
|
# Inflows (cmd)
|
|
580
|
-
LO_Inflows_BK.to_csv(f"{output_dir}/
|
|
603
|
+
LO_Inflows_BK.to_csv(f"{output_dir}/LO_Inflows_BK_forecast_{ensemble_number}.csv", index=False)
|
|
581
604
|
# Outflows (cmd)
|
|
582
|
-
Outflows_consd.to_csv(f"{output_dir}/
|
|
605
|
+
Outflows_consd.to_csv(f"{output_dir}/Outflows_consd_{ensemble_number}.csv", index=False)
|
|
583
606
|
# NetFlows (cmd)
|
|
584
607
|
#Netflows.to_csv(f"{output_dir}/Netflows_acft.csv", index=False)
|
|
585
608
|
# # Total flows to WCAs (acft)
|
|
586
|
-
TotalQWCA.to_csv(f"{output_dir}/TotalQWCA_Obs.csv", index=False)
|
|
609
|
+
# TotalQWCA.to_csv(f"{output_dir}/TotalQWCA_Obs.csv", index=False)
|
|
587
610
|
# INDUST Outflows (cmd)
|
|
588
|
-
INDUST_Outflows.to_csv(f"{output_dir}/INDUST_Outflows.csv", index=False)
|
|
589
|
-
|
|
611
|
+
# INDUST_Outflows.to_csv(f"{output_dir}/INDUST_Outflows.csv", index=False)
|
|
590
612
|
|
|
613
|
+
#Does this code need to take in the ensemble_number? I am getting rid of it for now.
|
|
591
614
|
def _create_flow_inflow_cqpq(
|
|
592
|
-
df: pd.DataFrame,
|
|
615
|
+
df: pd.DataFrame, column_cq: str, column_pq: str, column_sum_name: str
|
|
593
616
|
):
|
|
594
617
|
"""Creates the inflow columns for the given column_cq column. For flows with (*_C_Q, *_P_Q). Handles ensembles.
|
|
595
618
|
|
|
@@ -609,7 +632,8 @@ def _create_flow_inflow_cqpq(
|
|
|
609
632
|
df[column_sum_name_e] = df[[column_cq_e, column_pq_e]].sum(axis=1)
|
|
610
633
|
|
|
611
634
|
|
|
612
|
-
|
|
635
|
+
#Does this code need to take in the ensemble_number? I am getting rid of it for now.
|
|
636
|
+
def _create_flow_inflow_q(df: pd.DataFrame, column_q: str, column_in: str):
|
|
613
637
|
"""Creates the inflow columns for the given column_q column. For flows with (*_Q). Handles ensembles.
|
|
614
638
|
|
|
615
639
|
Args:
|
|
@@ -624,8 +648,8 @@ def _create_flow_inflow_q(df: pd.DataFrame, ensemble_number: str, column_q: str,
|
|
|
624
648
|
df[column_in_e] = df[column_in_e] * -1
|
|
625
649
|
df[column_in_e] = df[column_in_e].fillna(0)
|
|
626
650
|
|
|
627
|
-
|
|
628
|
-
def _create_flow_outflow_q(df: pd.DataFrame,
|
|
651
|
+
#Does this code need to take in the ensemble_number? I am getting rid of it for now.
|
|
652
|
+
def _create_flow_outflow_q(df: pd.DataFrame, column_q: str, column_out: str):
|
|
629
653
|
"""Creates the outflow columns for the given column_q column. For flows with (*_Q). Handles ensembles.
|
|
630
654
|
|
|
631
655
|
Args:
|
|
@@ -642,4 +666,4 @@ def _create_flow_outflow_q(df: pd.DataFrame, ensemble_number: str, column_q: str
|
|
|
642
666
|
|
|
643
667
|
|
|
644
668
|
if __name__ == "__main__":
|
|
645
|
-
main(sys.argv[1].rstrip("/"), sys.argv[2].rstrip("/"), sys.argv[3]) # , sys.argv[4])
|
|
669
|
+
main(sys.argv[1].rstrip("/"), sys.argv[2].rstrip("/"), sys.argv[3]) # , sys.argv[4])
|