py-ewr 2.3.7__py3-none-any.whl → 2.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -44,7 +44,6 @@ def unpack_netcdf_as_dataframe(netcdf_file: str) -> pd.DataFrame:
44
44
 
45
45
  # Open the NetCDF file
46
46
  dataset = xr.open_dataset(netcdf_file, engine='netcdf4')
47
-
48
47
  # Check if the dataset is empty
49
48
  if dataset is None:
50
49
  raise ValueError("NetCDF dataset is empty.")
@@ -363,15 +362,15 @@ def cleaner_netcdf_werp(input_df: pd.DataFrame, stations: dict, ewr_table_path:
363
362
  cleaned_df = input_df.reset_index(level = 'node')
364
363
  cleaned_df['node'] = cleaned_df['node'].astype(str)
365
364
 
366
- cleaned_df['gauge'] = cleaned_df['node'].map(stations)
365
+ cleaned_df['Gauge'] = cleaned_df['node'].map(stations)
367
366
  cleaned_df = cleaned_df.drop('node', axis = 1)
368
367
 
369
368
  # drop the values that don't map to a gauge (lots of nodes in iqqm don't)
370
369
  # This should be deprecated with the new way of choosing nodes on read-in, but being careful
371
- cleaned_df = cleaned_df.query('gauge.notna()')
370
+ cleaned_df = cleaned_df.query('Gauge.notna()')
372
371
 
373
372
  # give each gauge its own column- that's what the tool expects
374
- cleaned_df = cleaned_df.pivot(columns = 'gauge', values = 'Simulated flow')
373
+ cleaned_df = cleaned_df.pivot(columns = 'Gauge', values = 'Simulated flow')
375
374
  cleaned_df.columns.name = None
376
375
 
377
376
  # the csvs return an 'object' type, not a datetime in the index
@@ -492,7 +491,7 @@ def match_MDBA_nodes(input_df: pd.DataFrame, model_metadata: pd.DataFrame, ewr_t
492
491
  report.at[gauge, 'level'] = 'Y'
493
492
 
494
493
  if df_flow.empty and df_level.empty:
495
- raise ValueError('No relevant gauges and or measurands found in dataset, the EWR tool cannot evaluate this model output file')
494
+ raise ValueError('No relevant gauges and or measurands found in dataset, the ewr tool cannot evaluate this model output file')
496
495
 
497
496
  # report.to_csv('report_v1.csv')
498
497
  return df_flow, df_level, report
@@ -536,7 +535,7 @@ def match_MDBA_nodes(input_df: pd.DataFrame, model_metadata: pd.DataFrame, ewr_t
536
535
  # df_level[gauge] = input_df[col]
537
536
 
538
537
  # if df_flow.empty and df_level.empty:
539
- # raise ValueError('No relevant gauges and or measurands found in dataset, the EWR tool cannot evaluate this model output file')
538
+ # raise ValueError('No relevant gauges and or measurands found in dataset, the ewr tool cannot evaluate this model output file')
540
539
 
541
540
  # df_flow.to_csv('existing_flow_mapped.csv')
542
541
  # df_level.to_csv('existing_level_mapped.csv')
@@ -574,6 +573,120 @@ def any_cllmm_to_process(gauge_results: dict)->bool:
574
573
  cllmm_gauges = data_inputs.get_cllmm_gauges()
575
574
  processed_gauges = data_inputs.get_scenario_gauges(gauge_results)
576
575
  return any(gauge in processed_gauges for gauge in cllmm_gauges)
576
+ def extract_gauge_from_parenthesis(input_string: str) -> str:
577
+ '''Takes in a strings with the gauge inbetween the parenthesis
578
+
579
+ Args:
580
+ input_string (str): string which contains a gauge number between parenthesis
581
+
582
+ Returns:
583
+ str: Gauge number as a string
584
+
585
+ '''
586
+
587
+ # Find positions of the first pair of brackets
588
+ start = input_string.index("(") + 1
589
+ end = input_string.index(")")
590
+ # Extract the substring
591
+ gauge = input_string[start:end]
592
+
593
+ return gauge
594
+
595
+ def cleaner_res_csv_MDBA(input_df:pd.DataFrame, ewr_table_path: str = None) -> pd.DataFrame:
596
+ '''
597
+ Saves date column as a datetime object
598
+ Removes first row
599
+ Labels the column names with the gauge
600
+ Returns dataframe
601
+ '''
602
+
603
+ cleaned_df = input_df.copy(deep=True)
604
+ cleaned_df = cleaned_df.set_index('Date')
605
+ cleaned_df.index = pd.to_datetime(cleaned_df.index, format = '%Y-%m-%d')
606
+
607
+ df_flow = pd.DataFrame(index = cleaned_df.index)
608
+ df_level = pd.DataFrame(index = cleaned_df.index)
609
+ df_flow.index.name = 'Date'
610
+ df_level.index.name = 'Date'
611
+
612
+ flow_gauges = data_inputs.get_gauges('flow gauges', ewr_table_path=ewr_table_path)
613
+ level_gauges = data_inputs.get_gauges('level gauges', ewr_table_path=ewr_table_path)
614
+
615
+ report = pd.DataFrame(index = list(set(list(flow_gauges) + list(level_gauges))), columns = ['flow', 'level'])
616
+ report['flow'] = 'N'
617
+ report['level'] = 'N'
618
+
619
+ for gauge in cleaned_df.columns:
620
+ gauge_only = extract_gauge_from_parenthesis(gauge)
621
+ gauge_only = str.upper(gauge_only)
622
+ df_flow[gauge_only] = cleaned_df[gauge].copy(deep=True)
623
+ report.at[gauge_only, 'flow'] = 'Y'
624
+
625
+ if not gauge_only:
626
+ log.info('Could not identify gauge in column name:', gauge, ', skipping analysis of data in this column.')
627
+ return df_flow, df_level, report
628
+
629
+ def unpack_MDBA_res_csv_file(csv_file: str, main_key: str, header_key: str) -> tuple:
630
+ '''Ingesting scenario file locations of model files with all formats (excluding standard timeseries format), seperates the flow data and header data
631
+ returns a dictionary of flow dataframes with their associated header data
632
+
633
+ Args:
634
+ csv_file (str): location of model file
635
+ main_key (str): unique identifier for the start of the flow data (dependent on model format type being uploaded)
636
+ header_key (str): unique identifier for the start of the header data (dependent on model format type being uploaded)
637
+
638
+ Results:
639
+ tuple[pd.DataFrame, pd.DataFrame]: flow dataframe; header dataframe
640
+
641
+ '''
642
+ if csv_file[-3:] != 'csv':
643
+ raise ValueError('''Incorrect file type selected, bigmod format requires a csv file.
644
+ Rerun the program and try again.''')
645
+
646
+ #--------functions for pulling main data-------#
647
+
648
+ def mainData_url(url, line,**kwargs):
649
+ '''Get daily data (excluding the header data); remote file upload'''
650
+ response = urllib.request.urlopen(url)
651
+ lines = [l.decode('utf-8') for l in response.readlines()]
652
+ cr = csv.reader(lines)
653
+ pos = 0
654
+ for row in cr:
655
+ if row[0].startswith(line):
656
+ headerVal = pos
657
+ break
658
+ pos = pos + 1
659
+ if main_key == 'Date':
660
+ df = pd.read_csv(url, header=headerVal, skiprows=range(headerVal+1, headerVal+2))
661
+ df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
662
+
663
+ return df, headerVal
664
+
665
+ def mainData(file, line,**kwargs):
666
+ '''Get daily data (excluding the header data); local file upload'''
667
+ if os.stat(file).st_size == 0:
668
+ raise ValueError("File is empty")
669
+ with open(file) as csv_file:
670
+ csv_reader = csv.reader(csv_file) #, delimiter=','
671
+ line_count = 0
672
+ for row in csv_reader:
673
+ if row[0].startswith(line):
674
+ headerVal = line_count
675
+ break
676
+ line_count = line_count + 1
677
+ if main_key == 'Date':
678
+ df = pd.read_csv(file, header=headerVal, skiprows=range(headerVal+1, headerVal+2))
679
+ df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
680
+
681
+ return df, headerVal
682
+
683
+
684
+ if 'http' in csv_file:
685
+ mainData_df, endLine = mainData_url(csv_file, main_key, sep=",")
686
+ else:
687
+ mainData_df, endLine = mainData(csv_file, main_key, sep=",")
688
+
689
+ return mainData_df
577
690
 
578
691
  class ScenarioHandler:
579
692
 
@@ -621,7 +734,18 @@ class ScenarioHandler:
621
734
  data = build_MDBA_columns(data, header)
622
735
  df_clean = cleaner_MDBA(data)
623
736
  self.df_clean = df_clean
624
- df_F, df_L, self.report = match_MDBA_nodes(df_clean, data_inputs.get_MDBA_codes(), self.parameter_sheet)
737
+ df_F, df_L, self.report = match_MDBA_nodes(df_clean, data_inputs.get_MDBA_codes('Bigmod - MDBA'), self.parameter_sheet)
738
+
739
+ elif self.model_format == 'FIRM - MDBA':
740
+ data, header = unpack_model_file(scenarios[scenario], 'Dy', 'Field')
741
+ data = build_MDBA_columns(data, header)
742
+ df_clean = cleaner_MDBA(data)
743
+ self.df_clean = df_clean
744
+ df_F, df_L, self.report = match_MDBA_nodes(df_clean, data_inputs.get_MDBA_codes('FIRM - MDBA'), self.parameter_sheet)
745
+
746
+ elif self.model_format == 'res.csv - MDBA':
747
+ data = unpack_MDBA_res_csv_file(scenarios[scenario], 'Date', 'Field')
748
+ df_F, df_L, self.report = cleaner_res_csv_MDBA(data, self.parameter_sheet)
625
749
 
626
750
  elif self.model_format == 'Standard time-series':
627
751
  df = pd.read_csv(scenarios[scenario], index_col = 'Date')
@@ -631,7 +755,7 @@ class ScenarioHandler:
631
755
  data, header = unpack_model_file(scenarios[scenario], 'Date', 'Field')
632
756
  data = build_NSW_columns(data, header)
633
757
  df_clean = cleaner_NSW(data)
634
- df_F, df_L = match_NSW_nodes(df_clean, data_inputs.get_NSW_codes())
758
+ df_F, df_L = match_NSW_nodes(df_clean, data_inputs.get_NSW_codes(), self.parameter_sheet)
635
759
 
636
760
  elif self.model_format == 'IQQM - netcdf':
637
761
  df_unpacked = unpack_netcdf_as_dataframe(scenarios[scenario])
@@ -645,7 +769,6 @@ class ScenarioHandler:
645
769
 
646
770
  all_data = []
647
771
  for scenario_file in scenario:
648
- print(scenario_file)
649
772
  try:
650
773
  data, header = unpack_model_file(scenario_file, 'Dy', 'Field')
651
774
  data = build_MDBA_columns(data, header)
@@ -692,8 +815,8 @@ class ScenarioHandler:
692
815
 
693
816
  all_events = summarise_results.join_ewr_parameters(cols_to_add=['Multigauge', 'State', 'SWSDLName'],
694
817
  left_table=all_events,
695
- left_on=['gauge','pu','ewr'],
696
- selected_columns= ['scenario', 'gauge', 'pu', 'State', 'SWSDLName', 'ewr', 'waterYear', 'startDate', 'endDate',
818
+ left_on=['Gauge','PlanningUnit','Code'],
819
+ selected_columns= ['scenario', 'Gauge', 'PlanningUnit', 'State', 'SWSDLName', 'Code', 'waterYear', 'startDate', 'endDate',
697
820
  'eventDuration', 'eventLength',
698
821
  'Multigauge'],
699
822
  parameter_sheet_path=self.parameter_sheet)
@@ -712,8 +835,8 @@ class ScenarioHandler:
712
835
 
713
836
  all_events_temp = summarise_results.join_ewr_parameters(cols_to_add=['Multigauge', 'State', 'SWSDLName'],
714
837
  left_table=all_events_temp,
715
- left_on=['gauge','pu','ewr'],
716
- selected_columns= ['scenario', 'gauge', 'pu', 'State', 'SWSDLName', 'ewr', 'waterYear', 'startDate', 'endDate',
838
+ left_on=['Gauge','PlanningUnit','Code'],
839
+ selected_columns= ['scenario', 'Gauge', 'PlanningUnit', 'State', 'SWSDLName', 'Code', 'waterYear', 'startDate', 'endDate',
717
840
  'eventDuration', 'eventLength',
718
841
  'Multigauge'],
719
842
  parameter_sheet_path=self.parameter_sheet)
@@ -740,8 +863,8 @@ class ScenarioHandler:
740
863
 
741
864
  all_events_temp1 = summarise_results.join_ewr_parameters(cols_to_add=['Multigauge', 'State', 'SWSDLName'],
742
865
  left_table=all_events_temp1,
743
- left_on=['gauge','pu','ewr'],
744
- selected_columns= ['scenario', 'gauge', 'pu', 'State', 'SWSDLName', 'ewr', 'waterYear', 'startDate', 'endDate',
866
+ left_on=['Gauge','PlanningUnit','Code'],
867
+ selected_columns= ['scenario', 'Gauge', 'PlanningUnit', 'State', 'SWSDLName', 'Code', 'waterYear', 'startDate', 'endDate',
745
868
  'eventDuration', 'eventLength',
746
869
  'Multigauge'],
747
870
  parameter_sheet_path=self.parameter_sheet)
@@ -762,8 +885,8 @@ class ScenarioHandler:
762
885
 
763
886
  all_events_temp2 = summarise_results.join_ewr_parameters(cols_to_add=['Multigauge', 'State', 'SWSDLName'],
764
887
  left_table=all_events_temp2,
765
- left_on=['gauge','pu','ewr'],
766
- selected_columns= ['scenario', 'gauge', 'pu', 'State', 'SWSDLName', 'ewr', 'waterYear', 'startDate', 'endDate',
888
+ left_on=['Gauge','PlanningUnit','Code'],
889
+ selected_columns= ['scenario', 'Gauge', 'PlanningUnit', 'State', 'SWSDLName', 'Code', 'waterYear', 'startDate', 'endDate',
767
890
  'eventDuration', 'eventLength',
768
891
  'Multigauge'],
769
892
  parameter_sheet_path=self.parameter_sheet)
@@ -799,12 +922,12 @@ class ScenarioHandler:
799
922
 
800
923
  yearly_ewr_results = summarise_results.join_ewr_parameters(cols_to_add=['Multigauge', 'State', 'SWSDLName'],
801
924
  left_table=yearly_ewr_results,
802
- left_on=['gauge','pu','ewrCode'],
925
+ left_on=['Gauge','PlanningUnit','Code'],
803
926
  selected_columns= ['Year', 'eventYears', 'numAchieved', 'numEvents', 'numEventsAll',
804
927
  'eventLength', 'eventLengthAchieved', 'totalEventDays', 'totalEventDaysAchieved',
805
928
  'maxEventDays', 'maxRollingEvents', 'maxRollingAchievement',
806
- 'missingDays', 'totalPossibleDays', 'ewrCode',
807
- 'scenario', 'gauge', 'pu', 'State', 'SWSDLName', 'Multigauge'],
929
+ 'missingDays', 'totalPossibleDays', 'Code',
930
+ 'scenario', 'Gauge', 'PlanningUnit', 'State', 'SWSDLName', 'Multigauge'],
808
931
  parameter_sheet_path=self.parameter_sheet)
809
932
 
810
933
  # Setting up the dictionary of yearly rolling maximum interevent periods:
@@ -813,8 +936,8 @@ class ScenarioHandler:
813
936
 
814
937
  all_events_temp = summarise_results.join_ewr_parameters(cols_to_add=['Multigauge', 'State', 'SWSDLName'],
815
938
  left_table=all_events_temp,
816
- left_on=['gauge','pu','ewr'],
817
- selected_columns= ['scenario', 'gauge', 'pu', 'State', 'SWSDLName', 'ewr', 'waterYear', 'startDate', 'endDate',
939
+ left_on=['Gauge','PlanningUnit','Code'],
940
+ selected_columns= ['scenario', 'Gauge', 'PlanningUnit', 'State', 'SWSDLName', 'Code', 'waterYear', 'startDate', 'endDate',
818
941
  'eventDuration', 'eventLength',
819
942
  'Multigauge'],
820
943
  parameter_sheet_path=self.parameter_sheet)
@@ -855,11 +978,11 @@ class ScenarioHandler:
855
978
  For each unique ewr/pu/gauge - examines the ewr_results dataframe and if it exists here, then it is set to True in the logging_sheet dataframe.
856
979
  Create corresponding column in logging_sheet to log info.
857
980
  '''
858
- results = self.ewr_results[["PlanningUnit", "Gauge", "EwrCode"]].copy()
981
+ results = self.ewr_results[["PlanningUnit", 'Gauge', "Code"]].copy()
859
982
  results["Analysed?"] = True
860
- self.logging_sheet = self.logging_sheet.merge(right = results, left_on=["PlanningUnitName", "Primary Gauge", "Code"], right_on=["PlanningUnit", "Gauge", "EwrCode"], how="left")
983
+ self.logging_sheet = self.logging_sheet.merge(right = results, left_on=["PlanningUnitName", "Primary Gauge", "Code"], right_on=["PlanningUnit", 'Gauge', "Code"], how="left")
861
984
  self.logging_sheet["Analysed?"] = ~self.logging_sheet["Analysed?"].isna()
862
- self.logging_sheet["Gauge"] = self.logging_sheet["Gauge_x"].copy()
985
+ self.logging_sheet['Gauge'] = self.logging_sheet["Gauge_x"].copy()
863
986
 
864
987
 
865
988
  def log_if_node_in_siteID(self):
@@ -882,7 +1005,7 @@ class ScenarioHandler:
882
1005
  elif self.model_format == 'ten thousand year':
883
1006
  pass
884
1007
 
885
- self.logging_sheet["node_in_siteID?"] = self.logging_sheet["Gauge"].isin(self.site_id_df["AWRC"].unique())
1008
+ self.logging_sheet["node_in_siteID?"] = self.logging_sheet['Gauge'].isin(self.site_id_df["AWRC"].unique())
886
1009
 
887
1010
  def log_if_gauge_in_model_file(self):
888
1011
  '''
@@ -892,7 +1015,7 @@ class ScenarioHandler:
892
1015
  site_id_in_model_file = [n for name in self.df_clean.columns for n in name.split("-")]
893
1016
  self.site_id_df["IN_MODELFILE"] = self.site_id_df["SITEID"].isin(site_id_in_model_file)
894
1017
  self.gauges_in_model_file = self.site_id_df[self.site_id_df.IN_MODELFILE]["AWRC"]
895
- self.logging_sheet["gauge_in_model_file?"] = self.logging_sheet["Gauge"].isin(self.gauges_in_model_file)
1018
+ self.logging_sheet["gauge_in_model_file?"] = self.logging_sheet['Gauge'].isin(self.gauges_in_model_file)
896
1019
 
897
1020
  def log_measurand_info(self):
898
1021
  '''
@@ -901,9 +1024,9 @@ class ScenarioHandler:
901
1024
  '''
902
1025
  self.logging_sheet.loc[:, "gaugeANDmeasurand_in_model_file? (Yes/No)"] = False
903
1026
 
904
- for idx, row in self.logging_sheet[["Gauge", "GaugeType"]].drop_duplicates().iterrows():
1027
+ for idx, row in self.logging_sheet[['Gauge', "GaugeType"]].drop_duplicates().iterrows():
905
1028
 
906
- gauge = row["Gauge"]
1029
+ gauge = row['Gauge']
907
1030
  gauge_type = row["GaugeType"]
908
1031
 
909
1032
  if gauge_type == 'F':
@@ -932,16 +1055,16 @@ class ScenarioHandler:
932
1055
  PU_items = self.logging_sheet.groupby(['PlanningUnitID', 'PlanningUnitName']).size().reset_index().drop([0], axis=1)
933
1056
  gauge_table = self.logging_sheet[self.logging_sheet['Primary Gauge'] == gauge]
934
1057
 
935
- for PU in set(gauge_table['PlanningUnitID']):
1058
+ for pu in set(gauge_table['PlanningUnitID']):
936
1059
 
937
- PU_table = gauge_table[gauge_table['PlanningUnitID'] == PU]
1060
+ PU_table = gauge_table[gauge_table['PlanningUnitID'] == pu]
938
1061
  EWR_categories = PU_table['FlowLevelVolume'].values
939
1062
  EWR_codes = PU_table['Code']
940
1063
 
941
- for cat, EWR in zip(EWR_categories, EWR_codes):
1064
+ for cat, ewr in zip(EWR_categories, EWR_codes):
942
1065
 
943
1066
  ## CUSTOM MULTIGAUGE CHECK
944
- item = self.logging_sheet[(self.logging_sheet['Primary Gauge']==gauge) & (self.logging_sheet['Code']==EWR) & (self.logging_sheet['PlanningUnitID']==PU)]
1067
+ item = self.logging_sheet[(self.logging_sheet['Primary Gauge']==gauge) & (self.logging_sheet['Code']==ewr) & (self.logging_sheet['PlanningUnitID']==pu)]
945
1068
  item = item.replace({np.nan: None})
946
1069
  mg = item['Multigauge'].to_list()
947
1070
 
@@ -953,8 +1076,8 @@ class ScenarioHandler:
953
1076
  gauge_calc_type = 'multigauge'
954
1077
  ####
955
1078
 
956
- ewr_key = f'{EWR}-{gauge_calc_type}-{cat}'
957
- self.logging_sheet.loc[((self.logging_sheet['Primary Gauge']==gauge) & (self.logging_sheet['Code']==EWR) & (self.logging_sheet['PlanningUnitID']==PU)), "EWR_key"] = ewr_key
1079
+ ewr_key = f'{ewr}-{gauge_calc_type}-{cat}'
1080
+ self.logging_sheet.loc[((self.logging_sheet['Primary Gauge']==gauge) & (self.logging_sheet['Code']==ewr) & (self.logging_sheet['PlanningUnitID']==pu)), "EWR_key"] = ewr_key
958
1081
  function_name = evaluate_EWRs.find_function(ewr_key, calc_config)
959
1082
  ewr_keys_in_parameter_sheet.append(ewr_key)
960
1083
 
@@ -978,14 +1101,14 @@ class ScenarioHandler:
978
1101
  # spare_siteID_df = spare_siteID_df.groupby("AWRC").agg({'SITEID': lambda x: list(x)})
979
1102
  spare_siteID_df = spare_siteID_df.rename(columns={"SITEID": "spare_SITEID"})
980
1103
 
981
- self.logging_sheet = self.logging_sheet.merge(right = spare_siteID_df, left_on=["Gauge"], right_index=True, how="left")
1104
+ self.logging_sheet = self.logging_sheet.merge(right = spare_siteID_df, left_on=['Gauge'], right_index=True, how="left")
982
1105
 
983
1106
  ### section to add the used SITEID
984
1107
  used_siteID_df = self.site_id_df[self.site_id_df.IN_MODELFILE][["AWRC", "SITEID"]]
985
1108
  used_siteID_df = used_siteID_df.rename(columns={"SITEID": "matched_SITEID"})
986
1109
  used_siteID_df = used_siteID_df.set_index("AWRC")
987
1110
 
988
- self.logging_sheet = self.logging_sheet.merge(right = used_siteID_df, left_on=["Gauge"], right_index=True, how="left")
1111
+ self.logging_sheet = self.logging_sheet.merge(right = used_siteID_df, left_on=['Gauge'], right_index=True, how="left")
989
1112
 
990
1113
  # mark spare_SITEID column of those that dont have more than one SITEID to match with as EXACT MATCHES
991
1114
  self.logging_sheet.loc[~self.logging_sheet.matched_SITEID.isna() & self.logging_sheet.spare_SITEID.isna(), "spare_SITEID"] = "EXACT_MATCH"
@@ -1002,7 +1125,7 @@ class ScenarioHandler:
1002
1125
  for counter, (idx, row) in enumerate(rows_to_duplicate.iterrows()):
1003
1126
  updated_idx = counter + idx # update idx to account for all inserted rows
1004
1127
  duplicate_row = logging_sheet.loc[updated_idx, :].copy()
1005
- duplicate_row["Gauge"] = logging_sheet.loc[updated_idx, "Multigauge"]
1128
+ duplicate_row['Gauge'] = logging_sheet.loc[updated_idx, "Multigauge"]
1006
1129
  logging_sheet = pd.DataFrame(np.insert(logging_sheet.values, updated_idx+1, values=duplicate_row.values, axis=0), columns=logging_sheet.columns)
1007
1130
 
1008
1131
  ## Weirpool
@@ -1012,7 +1135,7 @@ class ScenarioHandler:
1012
1135
  for counter, (idx, row) in enumerate(rows_to_duplicate.iterrows()):
1013
1136
  updated_idx = counter + idx # update idx to account for all inserted rows
1014
1137
  duplicate_row = logging_sheet.loc[updated_idx, :].copy()
1015
- duplicate_row["Gauge"] = logging_sheet.loc[updated_idx, "WeirpoolGauge"]
1138
+ duplicate_row['Gauge'] = logging_sheet.loc[updated_idx, "WeirpoolGauge"]
1016
1139
  duplicate_row["GaugeType"] = "L"
1017
1140
  logging_sheet = pd.DataFrame(np.insert(logging_sheet.values, updated_idx+1, values=duplicate_row.values, axis=0), columns=logging_sheet.columns)
1018
1141
 
@@ -1036,7 +1159,7 @@ class ScenarioHandler:
1036
1159
  duplicate_row = logging_sheet.loc[updated_idx, :].copy()
1037
1160
 
1038
1161
  rows_to_insert = pd.DataFrame([duplicate_row] * len(gauge_list))
1039
- rows_to_insert["Gauge"] = gauge_list
1162
+ rows_to_insert['Gauge'] = gauge_list
1040
1163
 
1041
1164
  logging_sheet.drop(index=updated_idx, axis=0, inplace=True)
1042
1165
 
@@ -1053,7 +1176,7 @@ class ScenarioHandler:
1053
1176
  """
1054
1177
  parameter_sheet = pd.read_csv(self.parameter_sheet)
1055
1178
 
1056
- self.logging_sheet = parameter_sheet.copy()[["PlanningUnitName", "Code", "Gauge", "GaugeType", 'PlanningUnitID', 'FlowLevelVolume', "Multigauge", "WeirpoolGauge"]]
1179
+ self.logging_sheet = parameter_sheet.copy()[["PlanningUnitName", "Code", 'Gauge', "GaugeType", 'PlanningUnitID', 'FlowLevelVolume', "Multigauge", "WeirpoolGauge"]]
1057
1180
 
1058
1181
  self.logging_sheet = self.create_multi_index(self.logging_sheet)
1059
1182
 
@@ -1064,8 +1187,8 @@ class ScenarioHandler:
1064
1187
  self.log_calc_config_info()
1065
1188
  self.log_siteID_info()
1066
1189
 
1067
- self.logging_sheet = self.logging_sheet[["PlanningUnitName", "Code", "Primary Gauge", "Gauge", "GaugeType", "is_in_calc_config?", "node_in_siteID?", "gauge_in_model_file?", "gaugeANDmeasurand_in_model_file? (Yes/No)", "matched_SITEID", "spare_SITEID", "Analysed?"]]
1190
+ self.logging_sheet = self.logging_sheet[["PlanningUnitName", "Code", "Primary Gauge", 'Gauge', "GaugeType", "is_in_calc_config?", "node_in_siteID?", "gauge_in_model_file?", "gaugeANDmeasurand_in_model_file? (Yes/No)", "matched_SITEID", "spare_SITEID", "Analysed?"]]
1068
1191
 
1069
1192
  return self.logging_sheet
1070
1193
 
1071
-
1194
+