py-ewr 2.2.0__tar.gz → 2.2.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. {py_ewr-2.2.0 → py_ewr-2.2.1}/PKG-INFO +14 -3
  2. {py_ewr-2.2.0 → py_ewr-2.2.1}/README.md +10 -1
  3. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr/data_inputs.py +13 -0
  4. py_ewr-2.2.1/py_ewr/io.py +24 -0
  5. py_ewr-2.2.1/py_ewr/model_metadata/iqqm_stations.csv +11 -0
  6. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr/scenario_handling.py +107 -0
  7. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr.egg-info/PKG-INFO +14 -3
  8. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr.egg-info/SOURCES.txt +2 -0
  9. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr.egg-info/requires.txt +3 -1
  10. {py_ewr-2.2.0 → py_ewr-2.2.1}/setup.py +4 -2
  11. {py_ewr-2.2.0 → py_ewr-2.2.1}/tests/test_data_inputs.py +15 -1
  12. {py_ewr-2.2.0 → py_ewr-2.2.1}/tests/test_scenario_handling.py +59 -2
  13. {py_ewr-2.2.0 → py_ewr-2.2.1}/LICENSE +0 -0
  14. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr/__init__.py +0 -0
  15. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr/evaluate_EWRs.py +0 -0
  16. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr/model_metadata/SiteID_MDBA.csv +0 -0
  17. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr/model_metadata/SiteID_NSW.csv +0 -0
  18. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr/observed_handling.py +0 -0
  19. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr/parameter_metadata/ewr_calc_config.json +0 -0
  20. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr/parameter_metadata/parameter_sheet.csv +0 -0
  21. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr/summarise_results.py +0 -0
  22. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr.egg-info/dependency_links.txt +0 -0
  23. {py_ewr-2.2.0 → py_ewr-2.2.1}/py_ewr.egg-info/top_level.txt +0 -0
  24. {py_ewr-2.2.0 → py_ewr-2.2.1}/pyproject.toml +0 -0
  25. {py_ewr-2.2.0 → py_ewr-2.2.1}/setup.cfg +0 -0
  26. {py_ewr-2.2.0 → py_ewr-2.2.1}/tests/test_evaluate_ewr_rest.py +0 -0
  27. {py_ewr-2.2.0 → py_ewr-2.2.1}/tests/test_evaluate_ewrs.py +0 -0
  28. {py_ewr-2.2.0 → py_ewr-2.2.1}/tests/test_observed_handling.py +0 -0
  29. {py_ewr-2.2.0 → py_ewr-2.2.1}/tests/test_summarise_results.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: py_ewr
3
- Version: 2.2.0
3
+ Version: 2.2.1
4
4
  Summary: Environmental Water Requirement calculator
5
5
  Home-page: https://github.com/MDBAuth/EWR_tool
6
6
  Author: Martin Job
@@ -23,9 +23,11 @@ Requires-Dist: ipython==8.8.0
23
23
  Requires-Dist: ipywidgets==7.7.0
24
24
  Requires-Dist: pandas==2.0.3
25
25
  Requires-Dist: requests==2.25.1
26
- Requires-Dist: tqdm==4.64.0
26
+ Requires-Dist: tqdm>=4.66.1
27
27
  Requires-Dist: mdba-gauge-getter==0.5.1
28
28
  Requires-Dist: cachetools==5.2.0
29
+ Requires-Dist: xarray==2023.01.0
30
+ Requires-Dist: netCDF4==1.6.4
29
31
  Requires-Dist: numpy<2
30
32
 
31
33
  [![CI](https://github.com/MDBAuth/EWR_tool/actions/workflows/test-release.yml/badge.svg)]()
@@ -33,9 +35,10 @@ Requires-Dist: numpy<2
33
35
  [![PyPI](https://img.shields.io/pypi/v/py-ewr)](https://pypi.org/project/py-ewr/)
34
36
  [![DOI](https://zenodo.org/badge/342122359.svg)](https://zenodo.org/badge/latestdoi/342122359)
35
37
 
36
- ### **EWR tool version 2.2.0 README**
38
+ ### **EWR tool version 2.2.1 README**
37
39
 
38
40
  ### **Notes on recent version update**
41
+ - Adding new model format handling - 'IQQM - netcdf'
39
42
  - Standard time-series handling added - each column needs a gauge, followed by and underscore, followed by either flow or level (e.g. 409025_flow). This handling also has missing date filling - so any missing dates will be filled with NaN values in all columns.
40
43
  - ten thousand year handling - This has been briefly taken offline for this version.
41
44
  - bug fixes: spells of length equal to the minimum required spell length were getting filtered out of the successful events table and successful interevents table, fixed misclassification of some gauges to flow, level, and lake level categories
@@ -222,3 +225,11 @@ NSW:
222
225
 
223
226
  Consult the user manual for instructions on how to run the tool. Please email the above email addresses for a copy of the user manual.
224
227
 
228
+ To disable progress bars, as for example when running remote scripted runs, use
229
+
230
+ ``` python
231
+ import os
232
+ os.environ["TQDM_DISABLE"] = "1"
233
+ ```
234
+ *before* importing py-ewr in your script.
235
+
@@ -3,9 +3,10 @@
3
3
  [![PyPI](https://img.shields.io/pypi/v/py-ewr)](https://pypi.org/project/py-ewr/)
4
4
  [![DOI](https://zenodo.org/badge/342122359.svg)](https://zenodo.org/badge/latestdoi/342122359)
5
5
 
6
- ### **EWR tool version 2.2.0 README**
6
+ ### **EWR tool version 2.2.1 README**
7
7
 
8
8
  ### **Notes on recent version update**
9
+ - Adding new model format handling - 'IQQM - netcdf'
9
10
  - Standard time-series handling added - each column needs a gauge, followed by and underscore, followed by either flow or level (e.g. 409025_flow). This handling also has missing date filling - so any missing dates will be filled with NaN values in all columns.
10
11
  - ten thousand year handling - This has been briefly taken offline for this version.
11
12
  - bug fixes: spells of length equal to the minimum required spell length were getting filtered out of the successful events table and successful interevents table, fixed misclassification of some gauges to flow, level, and lake level categories
@@ -192,3 +193,11 @@ NSW:
192
193
 
193
194
  Consult the user manual for instructions on how to run the tool. Please email the above email addresses for a copy of the user manual.
194
195
 
196
+ To disable progress bars, as for example when running remote scripted runs, use
197
+
198
+ ``` python
199
+ import os
200
+ os.environ["TQDM_DISABLE"] = "1"
201
+ ```
202
+ *before* importing py-ewr in your script.
203
+
@@ -138,6 +138,19 @@ def get_NSW_codes() -> pd.DataFrame:
138
138
 
139
139
  return metadata
140
140
 
141
+ def get_iqqm_codes() -> dict:
142
+ '''
143
+ Load metadata file for Macquarie containing model nodes
144
+ and gauges they correspond to
145
+
146
+ Returns:
147
+ dict: dict for linking model nodes to gauges
148
+ '''
149
+
150
+ metadf = pd.read_csv( BASE_PATH / 'model_metadata/iqqm_stations.csv', dtype=str)
151
+ metadata = metadf.set_index(metadf.columns[0]).to_dict()[metadf.columns[1]]
152
+ return metadata
153
+
141
154
  def get_level_gauges() -> tuple:
142
155
  '''Returning level gauges with EWRs
143
156
 
@@ -0,0 +1,24 @@
1
+ import xarray as xr
2
+ from pandas import DataFrame as Dataframe
3
+
4
+
5
+ def read_netcdf_as_dataframe(netcdf_path: str) -> Dataframe:
6
+ dataset = xr.open_dataset(netcdf_path, engine='netcdf4')
7
+ df = dataset.to_dataframe()
8
+ dataset.close()
9
+
10
+ return df
11
+
12
+
13
+ def save_dataframe_as_netcdf(df, output_path: str) -> None:
14
+ # Convert DataFrame to Xarray Dataset
15
+ ds = xr.Dataset.from_dataframe(df)
16
+
17
+ # Modify variable names to ensure they are valid for NetCDF
18
+ for var_name in ds.variables:
19
+ new_var_name = var_name.replace(" ", "_") # Replace spaces with underscores
20
+ new_var_name = ''.join(c for c in new_var_name if c.isalnum() or c == "_") # Remove non-alphanumeric characters
21
+ ds = ds.rename({var_name: new_var_name})
22
+
23
+ # Save the modified Xarray Dataset as a NetCDF file
24
+ ds.to_netcdf(output_path)
@@ -0,0 +1,11 @@
1
+ IQQM,gauge
2
+ 229,421023
3
+ 42,421001
4
+ 464,421011
5
+ 240,421019
6
+ 266,421146
7
+ 951,421090
8
+ 487,421022
9
+ 130,421012
10
+ 171,421004
11
+
@@ -8,6 +8,8 @@ import logging
8
8
 
9
9
  import pandas as pd
10
10
  from tqdm import tqdm
11
+ import xarray as xr
12
+ import netCDF4
11
13
 
12
14
  log = logging.getLogger(__name__)
13
15
  log.addHandler(logging.NullHandler())
@@ -15,6 +17,61 @@ log.addHandler(logging.NullHandler())
15
17
 
16
18
  from . import data_inputs, evaluate_EWRs, summarise_results
17
19
  #----------------------------------- Scenario testing handling functions--------------------------#
20
+ def is_valid_netcdf_file(file_path: str) -> bool:
21
+ try:
22
+ with netCDF4.Dataset(file_path, 'r'):
23
+ # If the file opens successfully, it's a valid NetCDF file
24
+ return True
25
+ except Exception as e:
26
+ # If an exception is raised, it's not a valid NetCDF file
27
+ return False
28
+
29
+
30
+ def unpack_netcdf_as_dataframe(netcdf_file: str) -> pd.DataFrame:
31
+ '''Ingesting netCDF files and outputting as dataframes in memory.
32
+ # Example usage:
33
+ # df = unpack_netcdf_as_dataframe('your_file.nc')
34
+
35
+ Args:
36
+ netcdf_file (str): location of netCDF file
37
+
38
+ Results:
39
+ pd.Dataframe: netCDF file converted to dataframe
40
+ '''
41
+ try:
42
+ # Check if the file is a valid NetCDF file
43
+ if not is_valid_netcdf_file(netcdf_file):
44
+ raise ValueError("Not a valid NetCDF file.")
45
+
46
+ # Open the NetCDF file
47
+ dataset = xr.open_dataset(netcdf_file, engine='netcdf4')
48
+
49
+ # Check if the dataset is empty
50
+ if dataset is None:
51
+ raise ValueError("NetCDF dataset is empty.")
52
+
53
+ # extract the bits we actually can use
54
+ # Some of this needs to move/get cleaned up
55
+ iqqm_dict = data_inputs.get_iqqm_codes()
56
+ # the nodes are ints, but the above is str
57
+ ints_list = list(map(int, list(iqqm_dict)))
58
+
59
+ # Is there any reason to do these in one step?
60
+ dataset = dataset.sel(node=dataset['node'].isin(ints_list))
61
+ dataset = dataset[['Simulated flow']]
62
+
63
+ # Convert to DataFrame
64
+ df = dataset.to_dataframe()
65
+
66
+ # Close the dataset
67
+ dataset.close()
68
+
69
+ return df
70
+ except Exception as e:
71
+ # Handle any exceptions that may occur
72
+ print(f"Error: {str(e)}")
73
+ return None
74
+
18
75
 
19
76
  def unpack_model_file(csv_file: str, main_key: str, header_key: str) -> tuple:
20
77
  '''Ingesting scenario file locations of model files with all formats (excluding standard timeseries format), seperates the flow data and header data
@@ -280,6 +337,52 @@ def cleaner_standard_timeseries(input_df: pd.DataFrame, ewr_table_path: str = No
280
337
  log.info('Could not identify gauge in column name:', gauge, ', skipping analysis of data in this column.')
281
338
  return df_flow, df_level
282
339
 
340
+ def cleaner_netcdf_werp(input_df: pd.DataFrame, stations: dict) -> pd.DataFrame:
341
+
342
+ '''Ingests dataframe, cleans up into a format matching IQQM csv
343
+
344
+ Args:
345
+ input_df (pd.DataFrame): raw xarray dataframe read-in
346
+
347
+ statios(dict): dict mapping IQQM stations to gauge numbers
348
+
349
+ Results:
350
+ tuple[pd.DataFrame, pd.DataFrame]: Cleaned flow dataframe; cleaned water level dataframe
351
+
352
+ '''
353
+
354
+ # organise like the rest of the dataframes- make this look just like we've read it in from an IQQM csv
355
+ cleaned_df = input_df.reset_index(level = 'node')
356
+ cleaned_df['node'] = cleaned_df['node'].astype(str)
357
+
358
+ cleaned_df['gauge'] = cleaned_df['node'].map(stations)
359
+ cleaned_df = cleaned_df.drop('node', axis = 1)
360
+
361
+ # drop the values that don't map to a gauge (lots of nodes in iqqm don't)
362
+ # This should be deprecated with the new way of choosing nodes on read-in, but being careful
363
+ cleaned_df = cleaned_df.query('gauge.notna()')
364
+
365
+ # give each gauge its own column- that's what the tool expects
366
+ cleaned_df = cleaned_df.pivot(columns = 'gauge', values = 'Simulated flow')
367
+ cleaned_df.columns.name = None
368
+
369
+ # the csvs return an 'object' type, not a datetime in the index
370
+ # but it gets converted to datetime in cleaner_***, so leave it.
371
+ cleaned_df.index.names = ['Date']
372
+
373
+ # Split gauges into flow and level, allocate to respective dataframe
374
+ flow_gauges = data_inputs.get_gauges('flow gauges')
375
+ level_gauges = data_inputs.get_gauges('level gauges')
376
+ df_flow = pd.DataFrame(index = cleaned_df.index)
377
+ df_level = pd.DataFrame(index = cleaned_df.index)
378
+ for gauge in cleaned_df.columns:
379
+ if gauge in flow_gauges:
380
+ df_flow[gauge] = cleaned_df[gauge].copy(deep=True)
381
+ if gauge in level_gauges:
382
+ df_level[gauge] = cleaned_df[gauge].copy(deep=True)
383
+
384
+ return df_flow, df_level
385
+
283
386
 
284
387
  def cleaner_ten_thousand_year(input_df: pd.DataFrame, ewr_table_path: str = None) -> pd.DataFrame:
285
388
  '''Ingests dataframe, removes junk columns, fixes date, allocates gauges to either flow/level
@@ -455,6 +558,10 @@ class ScenarioHandler:
455
558
  df_clean = cleaner_NSW(data)
456
559
  df_F, df_L = match_NSW_nodes(df_clean, data_inputs.get_NSW_codes())
457
560
 
561
+ elif self.model_format == 'IQQM - netcdf':
562
+ df_unpacked = unpack_netcdf_as_dataframe(scenarios[scenario])
563
+ df_F, df_L = cleaner_netcdf_werp(df_unpacked, data_inputs.get_iqqm_codes())
564
+
458
565
  elif self.model_format == 'ten thousand year':
459
566
  df = pd.read_csv(scenarios[scenario], index_col = 'Date')
460
567
  df_F, df_L = cleaner_ten_thousand_year(df, self.parameter_sheet)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: py_ewr
3
- Version: 2.2.0
3
+ Version: 2.2.1
4
4
  Summary: Environmental Water Requirement calculator
5
5
  Home-page: https://github.com/MDBAuth/EWR_tool
6
6
  Author: Martin Job
@@ -23,9 +23,11 @@ Requires-Dist: ipython==8.8.0
23
23
  Requires-Dist: ipywidgets==7.7.0
24
24
  Requires-Dist: pandas==2.0.3
25
25
  Requires-Dist: requests==2.25.1
26
- Requires-Dist: tqdm==4.64.0
26
+ Requires-Dist: tqdm>=4.66.1
27
27
  Requires-Dist: mdba-gauge-getter==0.5.1
28
28
  Requires-Dist: cachetools==5.2.0
29
+ Requires-Dist: xarray==2023.01.0
30
+ Requires-Dist: netCDF4==1.6.4
29
31
  Requires-Dist: numpy<2
30
32
 
31
33
  [![CI](https://github.com/MDBAuth/EWR_tool/actions/workflows/test-release.yml/badge.svg)]()
@@ -33,9 +35,10 @@ Requires-Dist: numpy<2
33
35
  [![PyPI](https://img.shields.io/pypi/v/py-ewr)](https://pypi.org/project/py-ewr/)
34
36
  [![DOI](https://zenodo.org/badge/342122359.svg)](https://zenodo.org/badge/latestdoi/342122359)
35
37
 
36
- ### **EWR tool version 2.2.0 README**
38
+ ### **EWR tool version 2.2.1 README**
37
39
 
38
40
  ### **Notes on recent version update**
41
+ - Adding new model format handling - 'IQQM - netcdf'
39
42
  - Standard time-series handling added - each column needs a gauge, followed by and underscore, followed by either flow or level (e.g. 409025_flow). This handling also has missing date filling - so any missing dates will be filled with NaN values in all columns.
40
43
  - ten thousand year handling - This has been briefly taken offline for this version.
41
44
  - bug fixes: spells of length equal to the minimum required spell length were getting filtered out of the successful events table and successful interevents table, fixed misclassification of some gauges to flow, level, and lake level categories
@@ -222,3 +225,11 @@ NSW:
222
225
 
223
226
  Consult the user manual for instructions on how to run the tool. Please email the above email addresses for a copy of the user manual.
224
227
 
228
+ To disable progress bars, as for example when running remote scripted runs, use
229
+
230
+ ``` python
231
+ import os
232
+ os.environ["TQDM_DISABLE"] = "1"
233
+ ```
234
+ *before* importing py-ewr in your script.
235
+
@@ -5,6 +5,7 @@ setup.py
5
5
  py_ewr/__init__.py
6
6
  py_ewr/data_inputs.py
7
7
  py_ewr/evaluate_EWRs.py
8
+ py_ewr/io.py
8
9
  py_ewr/observed_handling.py
9
10
  py_ewr/scenario_handling.py
10
11
  py_ewr/summarise_results.py
@@ -15,6 +16,7 @@ py_ewr.egg-info/requires.txt
15
16
  py_ewr.egg-info/top_level.txt
16
17
  py_ewr/model_metadata/SiteID_MDBA.csv
17
18
  py_ewr/model_metadata/SiteID_NSW.csv
19
+ py_ewr/model_metadata/iqqm_stations.csv
18
20
  py_ewr/parameter_metadata/ewr_calc_config.json
19
21
  py_ewr/parameter_metadata/parameter_sheet.csv
20
22
  tests/test_data_inputs.py
@@ -2,7 +2,9 @@ ipython==8.8.0
2
2
  ipywidgets==7.7.0
3
3
  pandas==2.0.3
4
4
  requests==2.25.1
5
- tqdm==4.64.0
5
+ tqdm>=4.66.1
6
6
  mdba-gauge-getter==0.5.1
7
7
  cachetools==5.2.0
8
+ xarray==2023.01.0
9
+ netCDF4==1.6.4
8
10
  numpy<2
@@ -6,7 +6,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
6
6
 
7
7
  setup(
8
8
  name="py_ewr",
9
- version="2.2.0",
9
+ version="2.2.1",
10
10
  author="Martin Job",
11
11
  author_email="Martin.Job@mdba.gov.au",
12
12
  description="Environmental Water Requirement calculator",
@@ -35,9 +35,11 @@ setup(
35
35
  "ipywidgets==7.7.0",
36
36
  "pandas==2.0.3",
37
37
  "requests==2.25.1",
38
- "tqdm==4.64.0",
38
+ "tqdm>=4.66.1",
39
39
  "mdba-gauge-getter==0.5.1",
40
40
  "cachetools==5.2.0",
41
+ "xarray==2023.01.0",
42
+ "netCDF4==1.6.4",
41
43
  "numpy<2"
42
44
  ],
43
45
  package_data={'': ["model_metadata/*.csv", "parameter_metadata/*.csv","parameter_metadata/*.json"]},
@@ -105,4 +105,18 @@ def test_get_cllmm_gauges():
105
105
  def test_get_scenario_gauges(gauge_results, expected_results):
106
106
  result = data_inputs.get_scenario_gauges(gauge_results)
107
107
  assert sorted(result) == expected_results
108
-
108
+
109
+ def test_get_iqqm_codes():
110
+ result = data_inputs.get_iqqm_codes()
111
+ stations = {
112
+ '229': '421023',
113
+ '42': '421001',
114
+ '464': '421011',
115
+ '240': '421019',
116
+ '266': '421146',
117
+ '951': '421090',
118
+ '487': '421022',
119
+ '130': '421012',
120
+ '171': '421004',
121
+ }
122
+ assert stations == result
@@ -197,6 +197,30 @@ def test_cleaner_MDBA():
197
197
 
198
198
  assert_frame_equal(df_clean, expected_df)
199
199
 
200
+ def test_cleaner_netcdf_werp():
201
+ '''
202
+ 1. check ncdf is unpacked correctly
203
+ '''
204
+ df = scenario_handling.unpack_netcdf_as_dataframe('unit_testing_files/werp_ncdf.nc')
205
+ df_F, df_L = scenario_handling.cleaner_netcdf_werp(df, data_inputs.get_iqqm_codes())
206
+
207
+ # the test ncdf is too big to mock, so check properties
208
+ assert df_F.dtypes.iloc[0] == 'float32'
209
+ assert isinstance(df_F.index, pd.DatetimeIndex)
210
+ assert all(df_F.columns == ['421001', '421004', '421012', '421019', '421022', '421023', '421090', '421146'])
211
+
212
+
213
+ def test_csv_input():
214
+ '''
215
+ 1. check we can feed scenario_handling a csv that looks like gauge data
216
+ '''
217
+
218
+ # Can we use standard time-series to feed csv scenarios?
219
+ ewr_sh_standard = scenario_handling.ScenarioHandler('unit_testing_files/multi_gauge_input_label.csv', 'Standard time-series')
220
+ standardout = ewr_sh_standard.get_ewr_results()
221
+
222
+ assert isinstance(standardout, pd.DataFrame)
223
+
200
224
 
201
225
  def test_build_NSW_columns():
202
226
  '''
@@ -266,6 +290,7 @@ def test_unpack_model_file():
266
290
 
267
291
  # assert_frame_equal(flow, expected_flow)
268
292
 
293
+
269
294
  def test_scenario_handler_class(scenario_handler_expected_detail, scenario_handler_instance):
270
295
 
271
296
  detailed = scenario_handler_instance.pu_ewr_statistics
@@ -325,11 +350,43 @@ def test_get_ewr_results(scenario_handler_instance):
325
350
  assert ewr_results.columns.to_list() == ['Scenario', 'Gauge', 'PlanningUnit', 'EwrCode', 'Multigauge','EventYears',
326
351
  'Frequency', 'TargetFrequency', 'AchievementCount',
327
352
  'AchievementPerYear', 'EventCount', 'EventCountAll', 'EventsPerYear', 'EventsPerYearAll',
328
- 'AverageEventLength', 'ThresholdDays',
353
+ 'AverageEventLength', 'ThresholdDays', #'InterEventExceedingCount',
329
354
  'MaxInterEventYears', 'NoDataDays', 'TotalDays']
330
355
 
356
+
357
+ def test_unpack_netcdf_as_dataframe():
358
+ test_flowcdf = 'unit_testing_files/werp_ncdf.nc'
359
+ result_flow = scenario_handling.unpack_netcdf_as_dataframe(test_flowcdf)
360
+ expected_flow_shape = (16000, 1)
361
+ assert result_flow.shape == expected_flow_shape
362
+
363
+
364
+ def test_unpack_netcdf_as_dataframe_invalid_file():
365
+ test_invalid_file = 'unit_testing_files/NSW_source_res_test_file_header_result.csv'
366
+ try:
367
+ result_df = scenario_handling.unpack_netcdf_as_dataframe(test_invalid_file)
368
+ except ValueError as e:
369
+ assert "Not a valid NetCDF file." in str(e)
370
+
371
+
331
372
  def test_any_cllmm_to_process(gauge_results):
332
373
  result = scenario_handling.any_cllmm_to_process(gauge_results)
333
374
  assert result == True
334
375
 
335
-
376
+ # This *should* likely use something like conftest.scenario_handler_instance, but that seems to be locked to bigmod.
377
+ def test_netcdf_processes():
378
+ # Testing the netcdf format:
379
+ # Input params
380
+ # scenarios = 'unit_testing_files/ex_tasker.nc'
381
+ scenarios = 'unit_testing_files/werp_ncdf.nc'
382
+ model_format = 'IQQM - netcdf'
383
+ # allowance = {'minThreshold': 1.0, 'maxThreshold': 1.0, 'duration': 1.0, 'drawdown': 1.0}
384
+ # climate = 'Standard - 1911 to 2018 climate categorisation'
385
+
386
+ # Pass to the class
387
+
388
+ ewr_sh = scenario_handling.ScenarioHandler(scenarios, model_format)
389
+
390
+ ewr_summary = ewr_sh.get_ewr_results()
391
+
392
+ assert ewr_summary.shape == (202, 19)
File without changes
File without changes
File without changes
File without changes
File without changes