rtc-tools 2.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rtc_tools-2.7.3.dist-info/METADATA +53 -0
- rtc_tools-2.7.3.dist-info/RECORD +50 -0
- rtc_tools-2.7.3.dist-info/WHEEL +5 -0
- rtc_tools-2.7.3.dist-info/entry_points.txt +3 -0
- rtc_tools-2.7.3.dist-info/licenses/COPYING.LESSER +165 -0
- rtc_tools-2.7.3.dist-info/top_level.txt +1 -0
- rtctools/__init__.py +5 -0
- rtctools/_internal/__init__.py +0 -0
- rtctools/_internal/alias_tools.py +188 -0
- rtctools/_internal/caching.py +25 -0
- rtctools/_internal/casadi_helpers.py +99 -0
- rtctools/_internal/debug_check_helpers.py +41 -0
- rtctools/_version.py +21 -0
- rtctools/data/__init__.py +4 -0
- rtctools/data/csv.py +150 -0
- rtctools/data/interpolation/__init__.py +3 -0
- rtctools/data/interpolation/bspline.py +31 -0
- rtctools/data/interpolation/bspline1d.py +169 -0
- rtctools/data/interpolation/bspline2d.py +54 -0
- rtctools/data/netcdf.py +467 -0
- rtctools/data/pi.py +1236 -0
- rtctools/data/rtc.py +228 -0
- rtctools/data/storage.py +343 -0
- rtctools/optimization/__init__.py +0 -0
- rtctools/optimization/collocated_integrated_optimization_problem.py +3208 -0
- rtctools/optimization/control_tree_mixin.py +221 -0
- rtctools/optimization/csv_lookup_table_mixin.py +462 -0
- rtctools/optimization/csv_mixin.py +300 -0
- rtctools/optimization/goal_programming_mixin.py +769 -0
- rtctools/optimization/goal_programming_mixin_base.py +1094 -0
- rtctools/optimization/homotopy_mixin.py +165 -0
- rtctools/optimization/initial_state_estimation_mixin.py +89 -0
- rtctools/optimization/io_mixin.py +320 -0
- rtctools/optimization/linearization_mixin.py +33 -0
- rtctools/optimization/linearized_order_goal_programming_mixin.py +235 -0
- rtctools/optimization/min_abs_goal_programming_mixin.py +385 -0
- rtctools/optimization/modelica_mixin.py +482 -0
- rtctools/optimization/netcdf_mixin.py +177 -0
- rtctools/optimization/optimization_problem.py +1302 -0
- rtctools/optimization/pi_mixin.py +292 -0
- rtctools/optimization/planning_mixin.py +19 -0
- rtctools/optimization/single_pass_goal_programming_mixin.py +676 -0
- rtctools/optimization/timeseries.py +56 -0
- rtctools/rtctoolsapp.py +131 -0
- rtctools/simulation/__init__.py +0 -0
- rtctools/simulation/csv_mixin.py +171 -0
- rtctools/simulation/io_mixin.py +195 -0
- rtctools/simulation/pi_mixin.py +255 -0
- rtctools/simulation/simulation_problem.py +1293 -0
- rtctools/util.py +241 -0
rtctools/data/netcdf.py
ADDED
|
@@ -0,0 +1,467 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from collections import OrderedDict
|
|
3
|
+
from datetime import datetime, timedelta
|
|
4
|
+
from typing import Iterable, List, Union
|
|
5
|
+
|
|
6
|
+
try:
|
|
7
|
+
from netCDF4 import Dataset, Variable, chartostring
|
|
8
|
+
except ImportError:
|
|
9
|
+
raise ImportError("NetCDF4 is required when using NetCDF/NetCDFMixin")
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
from cftime import num2pydate as num2date
|
|
13
|
+
except ImportError:
|
|
14
|
+
from cftime import num2date
|
|
15
|
+
|
|
16
|
+
import numpy as np
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Stations:
|
|
20
|
+
def __init__(self, dataset: Dataset, station_variable: Variable):
|
|
21
|
+
self.__station_variable = station_variable
|
|
22
|
+
|
|
23
|
+
station_dimension = station_variable.dimensions[0]
|
|
24
|
+
|
|
25
|
+
# todo make this a bit smarter, right now variables like station_name would be forgotten
|
|
26
|
+
self.__attribute_variables = {}
|
|
27
|
+
for variable_name in dataset.variables:
|
|
28
|
+
variable = dataset.variables[variable_name]
|
|
29
|
+
if variable != station_variable and variable.dimensions == (station_dimension,):
|
|
30
|
+
self.__attribute_variables[variable_name] = variable
|
|
31
|
+
|
|
32
|
+
self.__attributes = OrderedDict()
|
|
33
|
+
for i in range(station_variable.shape[0]):
|
|
34
|
+
id = str(chartostring(station_variable[i]))
|
|
35
|
+
|
|
36
|
+
values = {}
|
|
37
|
+
for variable_name in self.__attribute_variables.keys():
|
|
38
|
+
values[variable_name] = dataset.variables[variable_name][i]
|
|
39
|
+
|
|
40
|
+
self.__attributes[id] = values
|
|
41
|
+
|
|
42
|
+
@property
|
|
43
|
+
def station_ids(self) -> Iterable:
|
|
44
|
+
"""
|
|
45
|
+
:return: An ordered iterable of the station ids (location ids) for which
|
|
46
|
+
station data is available.
|
|
47
|
+
|
|
48
|
+
"""
|
|
49
|
+
return self.__attributes.keys()
|
|
50
|
+
|
|
51
|
+
@property
|
|
52
|
+
def attributes(self) -> OrderedDict:
|
|
53
|
+
"""
|
|
54
|
+
:return: An OrderedDict containing dicts containing the values for all
|
|
55
|
+
station attributes of the input dataset.
|
|
56
|
+
"""
|
|
57
|
+
return self.__attributes
|
|
58
|
+
|
|
59
|
+
@property
|
|
60
|
+
def attribute_variables(self) -> dict:
|
|
61
|
+
"""
|
|
62
|
+
:return: A dict containing the station attribute variables of the input dataset.
|
|
63
|
+
"""
|
|
64
|
+
return self.__attribute_variables
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class ImportDataset:
|
|
68
|
+
"""
|
|
69
|
+
A class used to open and import the data from a NetCDF file.
|
|
70
|
+
Uses the NetCDF4 library. Contains various methods for reading the data in the file.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def __init__(self, folder: str, basename: str):
|
|
74
|
+
"""
|
|
75
|
+
:param folder: Folder the file is located in.
|
|
76
|
+
:param basename: Basename of the file, extension ".nc" will be appended to this
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
self.__ensemble_size = 1
|
|
80
|
+
|
|
81
|
+
# Load the content of a NetCDF file into a Dataset.
|
|
82
|
+
self.__filename = os.path.join(folder, basename + ".nc")
|
|
83
|
+
self.__dataset = Dataset(self.__filename)
|
|
84
|
+
|
|
85
|
+
# Find the number of ensemble members and the time and station id variables
|
|
86
|
+
self.__time_variable = self.__find_time_variable()
|
|
87
|
+
if self.__time_variable is None:
|
|
88
|
+
raise Exception(
|
|
89
|
+
"No time variable found in file " + self.__filename + ". "
|
|
90
|
+
"Please ensure the file contains a time variable with standard_name "
|
|
91
|
+
'"time" and axis "T".'
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
self.__ensemble_member_variable = self.__find_ensemble_member_variable()
|
|
95
|
+
if self.__ensemble_member_variable:
|
|
96
|
+
self.__ensemble_size = self.__dataset.dimensions["realization"].size
|
|
97
|
+
|
|
98
|
+
self.__station_variable = self.__find_station_variable()
|
|
99
|
+
if self.__station_variable is None:
|
|
100
|
+
raise Exception(
|
|
101
|
+
"No station variable found in file " + self.__filename + ". "
|
|
102
|
+
'Please ensure the file contains a variable with cf_role "timeseries_id".'
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
def __str__(self):
|
|
106
|
+
return self.__filename
|
|
107
|
+
|
|
108
|
+
def __find_time_variable(self) -> Union[Variable, None]:
|
|
109
|
+
"""
|
|
110
|
+
Find the variable containing the times in the given Dataset.
|
|
111
|
+
|
|
112
|
+
:param dataset: The Dataset to be searched.
|
|
113
|
+
:return: a netCDF4.Variable object of the time variable (or None if none found)
|
|
114
|
+
"""
|
|
115
|
+
for variable in self.__dataset.variables.values():
|
|
116
|
+
if (
|
|
117
|
+
"standard_name" in variable.ncattrs()
|
|
118
|
+
and "axis" in variable.ncattrs()
|
|
119
|
+
and variable.standard_name == "time"
|
|
120
|
+
and variable.axis == "T"
|
|
121
|
+
):
|
|
122
|
+
return variable
|
|
123
|
+
|
|
124
|
+
return None
|
|
125
|
+
|
|
126
|
+
def __find_ensemble_member_variable(self) -> Union[Variable, None]:
|
|
127
|
+
"""
|
|
128
|
+
Find the variable containing the ensemble member index in the given Dataset.
|
|
129
|
+
|
|
130
|
+
:param dataset: The Dataset to be searched.
|
|
131
|
+
:return: a netCDF4.Variable object of the ensemble member index variable (or None
|
|
132
|
+
if none found)
|
|
133
|
+
"""
|
|
134
|
+
for variable in self.__dataset.variables.values():
|
|
135
|
+
if "standard_name" in variable.ncattrs() and variable.standard_name == "realization":
|
|
136
|
+
return variable
|
|
137
|
+
|
|
138
|
+
return None
|
|
139
|
+
|
|
140
|
+
def __find_station_variable(self) -> Union[Variable, None]:
|
|
141
|
+
"""
|
|
142
|
+
Find the variable containing station id's (location id's) in the given Dataset.
|
|
143
|
+
|
|
144
|
+
:param dataset: The Dataset to be searched.
|
|
145
|
+
:return: a netCDF4.Variable object of the station id variable (or None if none found)
|
|
146
|
+
"""
|
|
147
|
+
for variable in self.__dataset.variables.values():
|
|
148
|
+
if "cf_role" in variable.ncattrs() and variable.cf_role == "timeseries_id":
|
|
149
|
+
return variable
|
|
150
|
+
|
|
151
|
+
return None
|
|
152
|
+
|
|
153
|
+
def read_import_times(self) -> np.ndarray:
|
|
154
|
+
"""
|
|
155
|
+
Reads the import times in the time variable of the dataset.
|
|
156
|
+
|
|
157
|
+
:param time_variable: The time variable containing input times
|
|
158
|
+
:return: an array containing the input times as datetime objects
|
|
159
|
+
"""
|
|
160
|
+
time_values = self.__time_variable[:]
|
|
161
|
+
time_unit = self.__time_variable.units
|
|
162
|
+
try:
|
|
163
|
+
time_calendar = self.__time_variable.calendar
|
|
164
|
+
except AttributeError:
|
|
165
|
+
time_calendar = "gregorian"
|
|
166
|
+
|
|
167
|
+
return num2date(time_values, units=time_unit, calendar=time_calendar)
|
|
168
|
+
|
|
169
|
+
def read_station_data(self) -> Stations:
|
|
170
|
+
return Stations(self.__dataset, self.__station_variable)
|
|
171
|
+
|
|
172
|
+
def find_timeseries_variables(self) -> List[str]:
|
|
173
|
+
"""
|
|
174
|
+
Find the keys of all 2-D or 3-D variables with dimensions {station, time} or {station, time,
|
|
175
|
+
realization} where station is the dimension of the station_variable, time the dimension of
|
|
176
|
+
the time_variable and realization the dimension for ensemble_member_index.
|
|
177
|
+
|
|
178
|
+
:param dataset: The Dataset to be searched.
|
|
179
|
+
:param station_variable: The station id variable.
|
|
180
|
+
:param time_variable: The time variable.
|
|
181
|
+
:return: a list of strings containing all keys found.
|
|
182
|
+
"""
|
|
183
|
+
station_dim = self.__station_variable.dimensions[0]
|
|
184
|
+
time_dim = self.__time_variable.dimensions[0]
|
|
185
|
+
if self.__ensemble_member_variable is not None:
|
|
186
|
+
ensemble_dim = self.__ensemble_member_variable.dimensions[0]
|
|
187
|
+
expected_dims = [
|
|
188
|
+
(time_dim, station_dim, ensemble_dim),
|
|
189
|
+
(time_dim, ensemble_dim, station_dim),
|
|
190
|
+
(station_dim, time_dim, ensemble_dim),
|
|
191
|
+
(station_dim, ensemble_dim, time_dim),
|
|
192
|
+
(ensemble_dim, time_dim, station_dim),
|
|
193
|
+
(ensemble_dim, station_dim, time_dim),
|
|
194
|
+
] + [(station_dim, time_dim), (time_dim, station_dim)]
|
|
195
|
+
else:
|
|
196
|
+
expected_dims = [(station_dim, time_dim), (time_dim, station_dim)]
|
|
197
|
+
|
|
198
|
+
timeseries_variables = []
|
|
199
|
+
for var_key, variable in self.__dataset.variables.items():
|
|
200
|
+
if variable.dimensions in expected_dims:
|
|
201
|
+
timeseries_variables.append(var_key)
|
|
202
|
+
|
|
203
|
+
return timeseries_variables
|
|
204
|
+
|
|
205
|
+
def read_timeseries_values(
|
|
206
|
+
self, station_index: int, variable_name: str, ensemble_member: int = 0
|
|
207
|
+
) -> np.ndarray:
|
|
208
|
+
"""
|
|
209
|
+
Reads the specified timeseries from the input file.
|
|
210
|
+
|
|
211
|
+
:param station_index: The index of the station for which the values should be read
|
|
212
|
+
:param variable_name: The name of the variable for which the values should be read
|
|
213
|
+
:return: an array of values
|
|
214
|
+
"""
|
|
215
|
+
|
|
216
|
+
station_dim = self.__station_variable.dimensions[0]
|
|
217
|
+
timeseries_variable = self.__dataset.variables[variable_name]
|
|
218
|
+
|
|
219
|
+
# possibly usefull for in a debugger mode
|
|
220
|
+
# assert set(timeseries_variable.dimensions)==set(('time', 'station')) \
|
|
221
|
+
# or set(timeseries_variable.dimensions)==set(('time', 'station', 'realization'))
|
|
222
|
+
|
|
223
|
+
if (
|
|
224
|
+
self.__ensemble_member_variable is not None
|
|
225
|
+
and "realization" in timeseries_variable.dimensions
|
|
226
|
+
):
|
|
227
|
+
ensemble_member_dim = self.__ensemble_member_variable.dimensions[0]
|
|
228
|
+
for i in range(3):
|
|
229
|
+
if timeseries_variable.dimensions[i] == station_dim:
|
|
230
|
+
station_arg_Index = i
|
|
231
|
+
elif timeseries_variable.dimensions[i] == ensemble_member_dim:
|
|
232
|
+
ensemble_arg_Index = i
|
|
233
|
+
time_arg_Index = set(range(3)) - {station_arg_Index, ensemble_arg_Index}
|
|
234
|
+
time_arg_Index = time_arg_Index.pop()
|
|
235
|
+
argument = [None] * 3
|
|
236
|
+
argument[station_arg_Index] = station_index
|
|
237
|
+
argument[ensemble_arg_Index] = ensemble_member
|
|
238
|
+
argument[time_arg_Index] = slice(None)
|
|
239
|
+
values = timeseries_variable[tuple(argument)]
|
|
240
|
+
else:
|
|
241
|
+
if timeseries_variable.dimensions[0] == station_dim:
|
|
242
|
+
values = timeseries_variable[station_index, :]
|
|
243
|
+
else:
|
|
244
|
+
values = timeseries_variable[:, station_index]
|
|
245
|
+
|
|
246
|
+
# NetCDF4 reads the values as a numpy masked array,
|
|
247
|
+
# convert to a normal array with nan where mask == True
|
|
248
|
+
return np.ma.filled(values, np.nan)
|
|
249
|
+
|
|
250
|
+
def variable_dimensions(self, variable):
|
|
251
|
+
return self.__dataset.variables[variable].dimensions
|
|
252
|
+
|
|
253
|
+
@property
|
|
254
|
+
def time_variable(self):
|
|
255
|
+
return self.__time_variable
|
|
256
|
+
|
|
257
|
+
@property
|
|
258
|
+
def station_variable(self):
|
|
259
|
+
return self.__station_variable
|
|
260
|
+
|
|
261
|
+
@property
|
|
262
|
+
def ensemble_member_variable(self):
|
|
263
|
+
return self.__ensemble_member_variable
|
|
264
|
+
|
|
265
|
+
@property
|
|
266
|
+
def ensemble_size(self):
|
|
267
|
+
"""
|
|
268
|
+
Ensemble size.
|
|
269
|
+
"""
|
|
270
|
+
return self.__ensemble_size
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
class ExportDataset:
|
|
274
|
+
"""
|
|
275
|
+
A class used to write data to a NetCDF file. Creates a new file or overwrites an old file. The
|
|
276
|
+
file metadata will be written upon initialization. Data such as times, station data and
|
|
277
|
+
timeseries data should be presented to the ExportDataset through the various methods. When all
|
|
278
|
+
data has been written, the close method must be called to flush the changes from local memory
|
|
279
|
+
to the actual file on disk.
|
|
280
|
+
"""
|
|
281
|
+
|
|
282
|
+
def __init__(self, folder: str, basename: str):
|
|
283
|
+
"""
|
|
284
|
+
:param folder: Folder the file will be located in.
|
|
285
|
+
:param basename: Basename of the file, extension ".nc" will be appended to this
|
|
286
|
+
"""
|
|
287
|
+
# Create the file and open a Dataset to access it
|
|
288
|
+
self.__filename = os.path.join(folder, basename + ".nc")
|
|
289
|
+
# use same write format as FEWS
|
|
290
|
+
self.__dataset = Dataset(self.__filename, mode="w", format="NETCDF3_CLASSIC")
|
|
291
|
+
|
|
292
|
+
# write metadata to the file
|
|
293
|
+
self.__dataset.title = "RTC-Tools Output Data"
|
|
294
|
+
self.__dataset.institution = "Deltares"
|
|
295
|
+
self.__dataset.source = "RTC-Tools"
|
|
296
|
+
self.__dataset.history = "Generated on {}".format(datetime.now())
|
|
297
|
+
self.__dataset.Conventions = "CF-1.6"
|
|
298
|
+
self.__dataset.featureType = "timeseries"
|
|
299
|
+
|
|
300
|
+
# dimensions are created when writing times and station data, must be created before
|
|
301
|
+
# writing variables
|
|
302
|
+
self.__time_dim = None
|
|
303
|
+
self.__station_dim = None
|
|
304
|
+
self.__station_id_to_index_mapping = None
|
|
305
|
+
self.__ensemble_member_index_dim = None
|
|
306
|
+
|
|
307
|
+
self.__timeseries_variables = {}
|
|
308
|
+
|
|
309
|
+
def __str__(self):
|
|
310
|
+
return self.__filename
|
|
311
|
+
|
|
312
|
+
def write_times(self, times: np.ndarray, forecast_time: float, forecast_date: datetime) -> None:
|
|
313
|
+
"""
|
|
314
|
+
Writes a time variable to the given dataset.
|
|
315
|
+
|
|
316
|
+
:param dataset: The NetCDF4.Dataset object that the times will be written to
|
|
317
|
+
(must have write permission)
|
|
318
|
+
:param times: The times that are to be written in seconds.
|
|
319
|
+
:param forecast_time: The forecast time in seconds corresponding to the forecast date
|
|
320
|
+
:param forecast_date: The datetime corresponding with time in seconds at the forecast
|
|
321
|
+
index.
|
|
322
|
+
"""
|
|
323
|
+
|
|
324
|
+
# in a NetCDF file times are written with respect to a reference date
|
|
325
|
+
# the written values for the times may never be negative, so use the earliest time as the
|
|
326
|
+
# reference date
|
|
327
|
+
reference_date = forecast_date
|
|
328
|
+
minimum_time = np.min(times)
|
|
329
|
+
if minimum_time < 0:
|
|
330
|
+
times = times - minimum_time
|
|
331
|
+
reference_date = reference_date - timedelta(seconds=forecast_time - minimum_time)
|
|
332
|
+
|
|
333
|
+
self.__time_dim = self.__dataset.createDimension("time", None)
|
|
334
|
+
|
|
335
|
+
time_var = self.__dataset.createVariable("time", "f8", ("time",))
|
|
336
|
+
time_var.standard_name = "time"
|
|
337
|
+
time_var.units = "seconds since {}".format(reference_date)
|
|
338
|
+
time_var.axis = "T"
|
|
339
|
+
time_var[:] = times
|
|
340
|
+
|
|
341
|
+
def write_ensemble_data(self, ensemble_size):
|
|
342
|
+
if ensemble_size > 1:
|
|
343
|
+
self.__ensemble_member_dim = self.__dataset.createDimension(
|
|
344
|
+
"realization", ensemble_size
|
|
345
|
+
)
|
|
346
|
+
ensemble_member_var = self.__dataset.createVariable(
|
|
347
|
+
"realization", "i", ("realization",)
|
|
348
|
+
)
|
|
349
|
+
ensemble_member_var.standard_name = "realization"
|
|
350
|
+
ensemble_member_var.long_name = "Index of an ensemble member within an ensemble"
|
|
351
|
+
ensemble_member_var.units = 1
|
|
352
|
+
|
|
353
|
+
def write_station_data(self, stations: Stations, output_station_ids: List[str]) -> None:
|
|
354
|
+
"""
|
|
355
|
+
Writes the station ids and additional station information to the given dataset.
|
|
356
|
+
|
|
357
|
+
:param stations: The stations data read from the input file.
|
|
358
|
+
:param output_station_ids: The set of station ids for which output will be written. Must be
|
|
359
|
+
unique.
|
|
360
|
+
"""
|
|
361
|
+
assert len(set(output_station_ids)) == len(output_station_ids)
|
|
362
|
+
|
|
363
|
+
self.__station_dim = self.__dataset.createDimension("station", len(output_station_ids))
|
|
364
|
+
|
|
365
|
+
# first write the ids
|
|
366
|
+
max_id_length = max(len(id) for id in output_station_ids)
|
|
367
|
+
self.__dataset.createDimension("char_leng_id", max_id_length)
|
|
368
|
+
station_id_var = self.__dataset.createVariable(
|
|
369
|
+
"station_id", "c", ("station", "char_leng_id")
|
|
370
|
+
)
|
|
371
|
+
station_id_var.long_name = "station identification code"
|
|
372
|
+
station_id_var.cf_role = "timeseries_id"
|
|
373
|
+
|
|
374
|
+
# we must store the index we use for each station id, to be able to write the data at the
|
|
375
|
+
# correct index later
|
|
376
|
+
self.__station_id_to_index_mapping = {}
|
|
377
|
+
for i, id in enumerate(output_station_ids):
|
|
378
|
+
station_id_var[i, :] = list(id)
|
|
379
|
+
self.__station_id_to_index_mapping[id] = i
|
|
380
|
+
|
|
381
|
+
# now write the stored attributes
|
|
382
|
+
for var_name, attr_var in stations.attribute_variables.items():
|
|
383
|
+
variable = self.__dataset.createVariable(var_name, attr_var.datatype, ("station",))
|
|
384
|
+
# copy all attributes from the original input variable
|
|
385
|
+
variable.setncatts(attr_var.__dict__)
|
|
386
|
+
|
|
387
|
+
for station_id in output_station_ids:
|
|
388
|
+
if station_id in stations.attributes:
|
|
389
|
+
station_index = self.__station_id_to_index_mapping[station_id]
|
|
390
|
+
variable[station_index] = stations.attributes[station_id][var_name]
|
|
391
|
+
|
|
392
|
+
def create_variables(self, variable_names: List[str], ensemble_size: int) -> None:
|
|
393
|
+
"""
|
|
394
|
+
Creates variables in the dataset for each of the provided parameter ids.
|
|
395
|
+
The write_times and write_station_data methods must be called first, to ensure the necessary
|
|
396
|
+
dimensions have already been created in the output NetCDF file.
|
|
397
|
+
|
|
398
|
+
:param variable_names: The parameter ids for which variables must be created. Must be
|
|
399
|
+
unique.
|
|
400
|
+
:param ensemble_size: the number of members in the ensemble
|
|
401
|
+
"""
|
|
402
|
+
assert len(set(variable_names)) == len(variable_names)
|
|
403
|
+
|
|
404
|
+
assert self.__time_dim is not None, (
|
|
405
|
+
"First call write_times to ensure the time dimension has been created."
|
|
406
|
+
)
|
|
407
|
+
assert self.__station_dim is not None, (
|
|
408
|
+
"First call write_station_data to ensure the station dimension has been created"
|
|
409
|
+
)
|
|
410
|
+
assert (
|
|
411
|
+
self.__station_id_to_index_mapping is not None
|
|
412
|
+
) # should also be created in write_station_data
|
|
413
|
+
|
|
414
|
+
if ensemble_size > 1:
|
|
415
|
+
assert self.__ensemble_member_dim is not None, (
|
|
416
|
+
"First call write_ensemble_data to ensure "
|
|
417
|
+
"the realization dimension has been created"
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
for variable_name in variable_names:
|
|
421
|
+
self.__dataset.createVariable(
|
|
422
|
+
variable_name, "f8", ("time", "station", "realization"), fill_value=np.nan
|
|
423
|
+
)
|
|
424
|
+
else:
|
|
425
|
+
for variable_name in variable_names:
|
|
426
|
+
self.__dataset.createVariable(
|
|
427
|
+
variable_name, "f8", ("time", "station"), fill_value=np.nan
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
def write_output_values(
|
|
431
|
+
self,
|
|
432
|
+
station_id: str,
|
|
433
|
+
variable_name: str,
|
|
434
|
+
ensemble_member_index: int,
|
|
435
|
+
values: np.ndarray,
|
|
436
|
+
ensemble_size: int,
|
|
437
|
+
) -> None:
|
|
438
|
+
"""
|
|
439
|
+
Writes the given data to the dataset. The variable must have already been created through
|
|
440
|
+
the create_variables method. After all calls to write_output_values, the close method must
|
|
441
|
+
be called to flush all changes.
|
|
442
|
+
|
|
443
|
+
:param station_id: The id of the station the data is written for.
|
|
444
|
+
:param variable_name: The name of the variable the data is written to (must have already
|
|
445
|
+
been created).
|
|
446
|
+
:param ensemble_member_index: The index associated to the ensemble member
|
|
447
|
+
:param values: The values that are to be written to the file
|
|
448
|
+
:param ensemble_size: the number of members in the ensemble
|
|
449
|
+
"""
|
|
450
|
+
assert self.__station_id_to_index_mapping is not None, (
|
|
451
|
+
"First call write_station_data and create_variables."
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
station_index = self.__station_id_to_index_mapping[station_id]
|
|
455
|
+
if ensemble_size > 1:
|
|
456
|
+
self.__dataset.variables[variable_name][:, station_index, ensemble_member_index] = (
|
|
457
|
+
values
|
|
458
|
+
)
|
|
459
|
+
else:
|
|
460
|
+
self.__dataset.variables[variable_name][:, station_index] = values
|
|
461
|
+
|
|
462
|
+
def close(self) -> None:
|
|
463
|
+
"""
|
|
464
|
+
Closes the NetCDF4 Dataset to ensure all changes made are written to the file.
|
|
465
|
+
This method must be called after writing all data through the various write method.
|
|
466
|
+
"""
|
|
467
|
+
self.__dataset.close()
|