py-ewr 2.3.6__py3-none-any.whl → 2.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -44,7 +44,6 @@ def unpack_netcdf_as_dataframe(netcdf_file: str) -> pd.DataFrame:
44
44
 
45
45
  # Open the NetCDF file
46
46
  dataset = xr.open_dataset(netcdf_file, engine='netcdf4')
47
-
48
47
  # Check if the dataset is empty
49
48
  if dataset is None:
50
49
  raise ValueError("NetCDF dataset is empty.")
@@ -363,15 +362,15 @@ def cleaner_netcdf_werp(input_df: pd.DataFrame, stations: dict, ewr_table_path:
363
362
  cleaned_df = input_df.reset_index(level = 'node')
364
363
  cleaned_df['node'] = cleaned_df['node'].astype(str)
365
364
 
366
- cleaned_df['gauge'] = cleaned_df['node'].map(stations)
365
+ cleaned_df['Gauge'] = cleaned_df['node'].map(stations)
367
366
  cleaned_df = cleaned_df.drop('node', axis = 1)
368
367
 
369
368
  # drop the values that don't map to a gauge (lots of nodes in iqqm don't)
370
369
  # This should be deprecated with the new way of choosing nodes on read-in, but being careful
371
- cleaned_df = cleaned_df.query('gauge.notna()')
370
+ cleaned_df = cleaned_df.query('Gauge.notna()')
372
371
 
373
372
  # give each gauge its own column- that's what the tool expects
374
- cleaned_df = cleaned_df.pivot(columns = 'gauge', values = 'Simulated flow')
373
+ cleaned_df = cleaned_df.pivot(columns = 'Gauge', values = 'Simulated flow')
375
374
  cleaned_df.columns.name = None
376
375
 
377
376
  # the csvs return an 'object' type, not a datetime in the index
@@ -492,7 +491,7 @@ def match_MDBA_nodes(input_df: pd.DataFrame, model_metadata: pd.DataFrame, ewr_t
492
491
  report.at[gauge, 'level'] = 'Y'
493
492
 
494
493
  if df_flow.empty and df_level.empty:
495
- raise ValueError('No relevant gauges and or measurands found in dataset, the EWR tool cannot evaluate this model output file')
494
+ raise ValueError('No relevant gauges and or measurands found in dataset, the ewr tool cannot evaluate this model output file')
496
495
 
497
496
  # report.to_csv('report_v1.csv')
498
497
  return df_flow, df_level, report
@@ -536,7 +535,7 @@ def match_MDBA_nodes(input_df: pd.DataFrame, model_metadata: pd.DataFrame, ewr_t
536
535
  # df_level[gauge] = input_df[col]
537
536
 
538
537
  # if df_flow.empty and df_level.empty:
539
- # raise ValueError('No relevant gauges and or measurands found in dataset, the EWR tool cannot evaluate this model output file')
538
+ # raise ValueError('No relevant gauges and or measurands found in dataset, the ewr tool cannot evaluate this model output file')
540
539
 
541
540
  # df_flow.to_csv('existing_flow_mapped.csv')
542
541
  # df_level.to_csv('existing_level_mapped.csv')
@@ -574,6 +573,120 @@ def any_cllmm_to_process(gauge_results: dict)->bool:
574
573
  cllmm_gauges = data_inputs.get_cllmm_gauges()
575
574
  processed_gauges = data_inputs.get_scenario_gauges(gauge_results)
576
575
  return any(gauge in processed_gauges for gauge in cllmm_gauges)
576
+ def extract_gauge_from_parenthesis(input_string: str) -> str:
577
+ '''Takes in a strings with the gauge inbetween the parenthesis
578
+
579
+ Args:
580
+ input_string (str): string which contains a gauge number between parenthesis
581
+
582
+ Returns:
583
+ str: Gauge number as a string
584
+
585
+ '''
586
+
587
+ # Find positions of the first pair of brackets
588
+ start = input_string.index("(") + 1
589
+ end = input_string.index(")")
590
+ # Extract the substring
591
+ gauge = input_string[start:end]
592
+
593
+ return gauge
594
+
595
+ def cleaner_res_csv_MDBA(input_df:pd.DataFrame, ewr_table_path: str = None) -> pd.DataFrame:
596
+ '''
597
+ Saves date column as a datetime object
598
+ Removes first row
599
+ Labels the column names with the gauge
600
+ Returns dataframe
601
+ '''
602
+
603
+ cleaned_df = input_df.copy(deep=True)
604
+ cleaned_df = cleaned_df.set_index('Date')
605
+ cleaned_df.index = pd.to_datetime(cleaned_df.index, format = '%Y-%m-%d')
606
+
607
+ df_flow = pd.DataFrame(index = cleaned_df.index)
608
+ df_level = pd.DataFrame(index = cleaned_df.index)
609
+ df_flow.index.name = 'Date'
610
+ df_level.index.name = 'Date'
611
+
612
+ flow_gauges = data_inputs.get_gauges('flow gauges', ewr_table_path=ewr_table_path)
613
+ level_gauges = data_inputs.get_gauges('level gauges', ewr_table_path=ewr_table_path)
614
+
615
+ report = pd.DataFrame(index = list(set(list(flow_gauges) + list(level_gauges))), columns = ['flow', 'level'])
616
+ report['flow'] = 'N'
617
+ report['level'] = 'N'
618
+
619
+ for gauge in cleaned_df.columns:
620
+ gauge_only = extract_gauge_from_parenthesis(gauge)
621
+ gauge_only = str.upper(gauge_only)
622
+ df_flow[gauge_only] = cleaned_df[gauge].copy(deep=True)
623
+ report.at[gauge_only, 'flow'] = 'Y'
624
+
625
+ if not gauge_only:
626
+ log.info('Could not identify gauge in column name:', gauge, ', skipping analysis of data in this column.')
627
+ return df_flow, df_level, report
628
+
629
+ def unpack_MDBA_res_csv_file(csv_file: str, main_key: str, header_key: str) -> tuple:
630
+ '''Ingesting scenario file locations of model files with all formats (excluding standard timeseries format), seperates the flow data and header data
631
+ returns a dictionary of flow dataframes with their associated header data
632
+
633
+ Args:
634
+ csv_file (str): location of model file
635
+ main_key (str): unique identifier for the start of the flow data (dependent on model format type being uploaded)
636
+ header_key (str): unique identifier for the start of the header data (dependent on model format type being uploaded)
637
+
638
+ Results:
639
+ tuple[pd.DataFrame, pd.DataFrame]: flow dataframe; header dataframe
640
+
641
+ '''
642
+ if csv_file[-3:] != 'csv':
643
+ raise ValueError('''Incorrect file type selected, bigmod format requires a csv file.
644
+ Rerun the program and try again.''')
645
+
646
+ #--------functions for pulling main data-------#
647
+
648
+ def mainData_url(url, line,**kwargs):
649
+ '''Get daily data (excluding the header data); remote file upload'''
650
+ response = urllib.request.urlopen(url)
651
+ lines = [l.decode('utf-8') for l in response.readlines()]
652
+ cr = csv.reader(lines)
653
+ pos = 0
654
+ for row in cr:
655
+ if row[0].startswith(line):
656
+ headerVal = pos
657
+ break
658
+ pos = pos + 1
659
+ if main_key == 'Date':
660
+ df = pd.read_csv(url, header=headerVal, skiprows=range(headerVal+1, headerVal+2))
661
+ df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
662
+
663
+ return df, headerVal
664
+
665
+ def mainData(file, line,**kwargs):
666
+ '''Get daily data (excluding the header data); local file upload'''
667
+ if os.stat(file).st_size == 0:
668
+ raise ValueError("File is empty")
669
+ with open(file) as csv_file:
670
+ csv_reader = csv.reader(csv_file) #, delimiter=','
671
+ line_count = 0
672
+ for row in csv_reader:
673
+ if row[0].startswith(line):
674
+ headerVal = line_count
675
+ break
676
+ line_count = line_count + 1
677
+ if main_key == 'Date':
678
+ df = pd.read_csv(file, header=headerVal, skiprows=range(headerVal+1, headerVal+2))
679
+ df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
680
+
681
+ return df, headerVal
682
+
683
+
684
+ if 'http' in csv_file:
685
+ mainData_df, endLine = mainData_url(csv_file, main_key, sep=",")
686
+ else:
687
+ mainData_df, endLine = mainData(csv_file, main_key, sep=",")
688
+
689
+ return mainData_df
577
690
 
578
691
  class ScenarioHandler:
579
692
 
@@ -621,7 +734,18 @@ class ScenarioHandler:
621
734
  data = build_MDBA_columns(data, header)
622
735
  df_clean = cleaner_MDBA(data)
623
736
  self.df_clean = df_clean
624
- df_F, df_L, self.report = match_MDBA_nodes(df_clean, data_inputs.get_MDBA_codes(), self.parameter_sheet)
737
+ df_F, df_L, self.report = match_MDBA_nodes(df_clean, data_inputs.get_MDBA_codes('Bigmod - MDBA'), self.parameter_sheet)
738
+
739
+ elif self.model_format == 'FIRM - MDBA':
740
+ data, header = unpack_model_file(scenarios[scenario], 'Dy', 'Field')
741
+ data = build_MDBA_columns(data, header)
742
+ df_clean = cleaner_MDBA(data)
743
+ self.df_clean = df_clean
744
+ df_F, df_L, self.report = match_MDBA_nodes(df_clean, data_inputs.get_MDBA_codes('FIRM - MDBA'), self.parameter_sheet)
745
+
746
+ elif self.model_format == 'res.csv - MDBA':
747
+ data = unpack_MDBA_res_csv_file(scenarios[scenario], 'Date', 'Field')
748
+ df_F, df_L, self.report = cleaner_res_csv_MDBA(data, self.parameter_sheet)
625
749
 
626
750
  elif self.model_format == 'Standard time-series':
627
751
  df = pd.read_csv(scenarios[scenario], index_col = 'Date')
@@ -631,7 +755,7 @@ class ScenarioHandler:
631
755
  data, header = unpack_model_file(scenarios[scenario], 'Date', 'Field')
632
756
  data = build_NSW_columns(data, header)
633
757
  df_clean = cleaner_NSW(data)
634
- df_F, df_L = match_NSW_nodes(df_clean, data_inputs.get_NSW_codes())
758
+ df_F, df_L = match_NSW_nodes(df_clean, data_inputs.get_NSW_codes(), self.parameter_sheet)
635
759
 
636
760
  elif self.model_format == 'IQQM - netcdf':
637
761
  df_unpacked = unpack_netcdf_as_dataframe(scenarios[scenario])
@@ -692,8 +816,8 @@ class ScenarioHandler:
692
816
 
693
817
  all_events = summarise_results.join_ewr_parameters(cols_to_add=['Multigauge', 'State', 'SWSDLName'],
694
818
  left_table=all_events,
695
- left_on=['gauge','pu','ewr'],
696
- selected_columns= ['scenario', 'gauge', 'pu', 'State', 'SWSDLName', 'ewr', 'waterYear', 'startDate', 'endDate',
819
+ left_on=['Gauge','PlanningUnit','Code'],
820
+ selected_columns= ['scenario', 'Gauge', 'PlanningUnit', 'State', 'SWSDLName', 'Code', 'waterYear', 'startDate', 'endDate',
697
821
  'eventDuration', 'eventLength',
698
822
  'Multigauge'],
699
823
  parameter_sheet_path=self.parameter_sheet)
@@ -712,8 +836,8 @@ class ScenarioHandler:
712
836
 
713
837
  all_events_temp = summarise_results.join_ewr_parameters(cols_to_add=['Multigauge', 'State', 'SWSDLName'],
714
838
  left_table=all_events_temp,
715
- left_on=['gauge','pu','ewr'],
716
- selected_columns= ['scenario', 'gauge', 'pu', 'State', 'SWSDLName', 'ewr', 'waterYear', 'startDate', 'endDate',
839
+ left_on=['Gauge','PlanningUnit','Code'],
840
+ selected_columns= ['scenario', 'Gauge', 'PlanningUnit', 'State', 'SWSDLName', 'Code', 'waterYear', 'startDate', 'endDate',
717
841
  'eventDuration', 'eventLength',
718
842
  'Multigauge'],
719
843
  parameter_sheet_path=self.parameter_sheet)
@@ -740,8 +864,8 @@ class ScenarioHandler:
740
864
 
741
865
  all_events_temp1 = summarise_results.join_ewr_parameters(cols_to_add=['Multigauge', 'State', 'SWSDLName'],
742
866
  left_table=all_events_temp1,
743
- left_on=['gauge','pu','ewr'],
744
- selected_columns= ['scenario', 'gauge', 'pu', 'State', 'SWSDLName', 'ewr', 'waterYear', 'startDate', 'endDate',
867
+ left_on=['Gauge','PlanningUnit','Code'],
868
+ selected_columns= ['scenario', 'Gauge', 'PlanningUnit', 'State', 'SWSDLName', 'Code', 'waterYear', 'startDate', 'endDate',
745
869
  'eventDuration', 'eventLength',
746
870
  'Multigauge'],
747
871
  parameter_sheet_path=self.parameter_sheet)
@@ -762,8 +886,8 @@ class ScenarioHandler:
762
886
 
763
887
  all_events_temp2 = summarise_results.join_ewr_parameters(cols_to_add=['Multigauge', 'State', 'SWSDLName'],
764
888
  left_table=all_events_temp2,
765
- left_on=['gauge','pu','ewr'],
766
- selected_columns= ['scenario', 'gauge', 'pu', 'State', 'SWSDLName', 'ewr', 'waterYear', 'startDate', 'endDate',
889
+ left_on=['Gauge','PlanningUnit','Code'],
890
+ selected_columns= ['scenario', 'Gauge', 'PlanningUnit', 'State', 'SWSDLName', 'Code', 'waterYear', 'startDate', 'endDate',
767
891
  'eventDuration', 'eventLength',
768
892
  'Multigauge'],
769
893
  parameter_sheet_path=self.parameter_sheet)
@@ -799,12 +923,12 @@ class ScenarioHandler:
799
923
 
800
924
  yearly_ewr_results = summarise_results.join_ewr_parameters(cols_to_add=['Multigauge', 'State', 'SWSDLName'],
801
925
  left_table=yearly_ewr_results,
802
- left_on=['gauge','pu','ewrCode'],
926
+ left_on=['Gauge','PlanningUnit','Code'],
803
927
  selected_columns= ['Year', 'eventYears', 'numAchieved', 'numEvents', 'numEventsAll',
804
928
  'eventLength', 'eventLengthAchieved', 'totalEventDays', 'totalEventDaysAchieved',
805
929
  'maxEventDays', 'maxRollingEvents', 'maxRollingAchievement',
806
- 'missingDays', 'totalPossibleDays', 'ewrCode',
807
- 'scenario', 'gauge', 'pu', 'State', 'SWSDLName', 'Multigauge'],
930
+ 'missingDays', 'totalPossibleDays', 'Code',
931
+ 'scenario', 'Gauge', 'PlanningUnit', 'State', 'SWSDLName', 'Multigauge'],
808
932
  parameter_sheet_path=self.parameter_sheet)
809
933
 
810
934
  # Setting up the dictionary of yearly rolling maximum interevent periods:
@@ -813,8 +937,8 @@ class ScenarioHandler:
813
937
 
814
938
  all_events_temp = summarise_results.join_ewr_parameters(cols_to_add=['Multigauge', 'State', 'SWSDLName'],
815
939
  left_table=all_events_temp,
816
- left_on=['gauge','pu','ewr'],
817
- selected_columns= ['scenario', 'gauge', 'pu', 'State', 'SWSDLName', 'ewr', 'waterYear', 'startDate', 'endDate',
940
+ left_on=['Gauge','PlanningUnit','Code'],
941
+ selected_columns= ['scenario', 'Gauge', 'PlanningUnit', 'State', 'SWSDLName', 'Code', 'waterYear', 'startDate', 'endDate',
818
942
  'eventDuration', 'eventLength',
819
943
  'Multigauge'],
820
944
  parameter_sheet_path=self.parameter_sheet)
@@ -855,11 +979,11 @@ class ScenarioHandler:
855
979
  For each unique ewr/pu/gauge - examines the ewr_results dataframe and if it exists here, then it is set to True in the logging_sheet dataframe.
856
980
  Create corresponding column in logging_sheet to log info.
857
981
  '''
858
- results = self.ewr_results[["PlanningUnit", "Gauge", "EwrCode"]].copy()
982
+ results = self.ewr_results[["PlanningUnit", 'Gauge', "Code"]].copy()
859
983
  results["Analysed?"] = True
860
- self.logging_sheet = self.logging_sheet.merge(right = results, left_on=["PlanningUnitName", "Primary Gauge", "Code"], right_on=["PlanningUnit", "Gauge", "EwrCode"], how="left")
984
+ self.logging_sheet = self.logging_sheet.merge(right = results, left_on=["PlanningUnitName", "Primary Gauge", "Code"], right_on=["PlanningUnit", 'Gauge', "Code"], how="left")
861
985
  self.logging_sheet["Analysed?"] = ~self.logging_sheet["Analysed?"].isna()
862
- self.logging_sheet["Gauge"] = self.logging_sheet["Gauge_x"].copy()
986
+ self.logging_sheet['Gauge'] = self.logging_sheet["Gauge_x"].copy()
863
987
 
864
988
 
865
989
  def log_if_node_in_siteID(self):
@@ -882,7 +1006,7 @@ class ScenarioHandler:
882
1006
  elif self.model_format == 'ten thousand year':
883
1007
  pass
884
1008
 
885
- self.logging_sheet["node_in_siteID?"] = self.logging_sheet["Gauge"].isin(self.site_id_df["AWRC"].unique())
1009
+ self.logging_sheet["node_in_siteID?"] = self.logging_sheet['Gauge'].isin(self.site_id_df["AWRC"].unique())
886
1010
 
887
1011
  def log_if_gauge_in_model_file(self):
888
1012
  '''
@@ -892,7 +1016,7 @@ class ScenarioHandler:
892
1016
  site_id_in_model_file = [n for name in self.df_clean.columns for n in name.split("-")]
893
1017
  self.site_id_df["IN_MODELFILE"] = self.site_id_df["SITEID"].isin(site_id_in_model_file)
894
1018
  self.gauges_in_model_file = self.site_id_df[self.site_id_df.IN_MODELFILE]["AWRC"]
895
- self.logging_sheet["gauge_in_model_file?"] = self.logging_sheet["Gauge"].isin(self.gauges_in_model_file)
1019
+ self.logging_sheet["gauge_in_model_file?"] = self.logging_sheet['Gauge'].isin(self.gauges_in_model_file)
896
1020
 
897
1021
  def log_measurand_info(self):
898
1022
  '''
@@ -901,9 +1025,9 @@ class ScenarioHandler:
901
1025
  '''
902
1026
  self.logging_sheet.loc[:, "gaugeANDmeasurand_in_model_file? (Yes/No)"] = False
903
1027
 
904
- for idx, row in self.logging_sheet[["Gauge", "GaugeType"]].drop_duplicates().iterrows():
1028
+ for idx, row in self.logging_sheet[['Gauge', "GaugeType"]].drop_duplicates().iterrows():
905
1029
 
906
- gauge = row["Gauge"]
1030
+ gauge = row['Gauge']
907
1031
  gauge_type = row["GaugeType"]
908
1032
 
909
1033
  if gauge_type == 'F':
@@ -932,16 +1056,16 @@ class ScenarioHandler:
932
1056
  PU_items = self.logging_sheet.groupby(['PlanningUnitID', 'PlanningUnitName']).size().reset_index().drop([0], axis=1)
933
1057
  gauge_table = self.logging_sheet[self.logging_sheet['Primary Gauge'] == gauge]
934
1058
 
935
- for PU in set(gauge_table['PlanningUnitID']):
1059
+ for pu in set(gauge_table['PlanningUnitID']):
936
1060
 
937
- PU_table = gauge_table[gauge_table['PlanningUnitID'] == PU]
1061
+ PU_table = gauge_table[gauge_table['PlanningUnitID'] == pu]
938
1062
  EWR_categories = PU_table['FlowLevelVolume'].values
939
1063
  EWR_codes = PU_table['Code']
940
1064
 
941
- for cat, EWR in zip(EWR_categories, EWR_codes):
1065
+ for cat, ewr in zip(EWR_categories, EWR_codes):
942
1066
 
943
1067
  ## CUSTOM MULTIGAUGE CHECK
944
- item = self.logging_sheet[(self.logging_sheet['Primary Gauge']==gauge) & (self.logging_sheet['Code']==EWR) & (self.logging_sheet['PlanningUnitID']==PU)]
1068
+ item = self.logging_sheet[(self.logging_sheet['Primary Gauge']==gauge) & (self.logging_sheet['Code']==ewr) & (self.logging_sheet['PlanningUnitID']==pu)]
945
1069
  item = item.replace({np.nan: None})
946
1070
  mg = item['Multigauge'].to_list()
947
1071
 
@@ -953,8 +1077,8 @@ class ScenarioHandler:
953
1077
  gauge_calc_type = 'multigauge'
954
1078
  ####
955
1079
 
956
- ewr_key = f'{EWR}-{gauge_calc_type}-{cat}'
957
- self.logging_sheet.loc[((self.logging_sheet['Primary Gauge']==gauge) & (self.logging_sheet['Code']==EWR) & (self.logging_sheet['PlanningUnitID']==PU)), "EWR_key"] = ewr_key
1080
+ ewr_key = f'{ewr}-{gauge_calc_type}-{cat}'
1081
+ self.logging_sheet.loc[((self.logging_sheet['Primary Gauge']==gauge) & (self.logging_sheet['Code']==ewr) & (self.logging_sheet['PlanningUnitID']==pu)), "EWR_key"] = ewr_key
958
1082
  function_name = evaluate_EWRs.find_function(ewr_key, calc_config)
959
1083
  ewr_keys_in_parameter_sheet.append(ewr_key)
960
1084
 
@@ -978,14 +1102,14 @@ class ScenarioHandler:
978
1102
  # spare_siteID_df = spare_siteID_df.groupby("AWRC").agg({'SITEID': lambda x: list(x)})
979
1103
  spare_siteID_df = spare_siteID_df.rename(columns={"SITEID": "spare_SITEID"})
980
1104
 
981
- self.logging_sheet = self.logging_sheet.merge(right = spare_siteID_df, left_on=["Gauge"], right_index=True, how="left")
1105
+ self.logging_sheet = self.logging_sheet.merge(right = spare_siteID_df, left_on=['Gauge'], right_index=True, how="left")
982
1106
 
983
1107
  ### section to add the used SITEID
984
1108
  used_siteID_df = self.site_id_df[self.site_id_df.IN_MODELFILE][["AWRC", "SITEID"]]
985
1109
  used_siteID_df = used_siteID_df.rename(columns={"SITEID": "matched_SITEID"})
986
1110
  used_siteID_df = used_siteID_df.set_index("AWRC")
987
1111
 
988
- self.logging_sheet = self.logging_sheet.merge(right = used_siteID_df, left_on=["Gauge"], right_index=True, how="left")
1112
+ self.logging_sheet = self.logging_sheet.merge(right = used_siteID_df, left_on=['Gauge'], right_index=True, how="left")
989
1113
 
990
1114
  # mark spare_SITEID column of those that dont have more than one SITEID to match with as EXACT MATCHES
991
1115
  self.logging_sheet.loc[~self.logging_sheet.matched_SITEID.isna() & self.logging_sheet.spare_SITEID.isna(), "spare_SITEID"] = "EXACT_MATCH"
@@ -1002,7 +1126,7 @@ class ScenarioHandler:
1002
1126
  for counter, (idx, row) in enumerate(rows_to_duplicate.iterrows()):
1003
1127
  updated_idx = counter + idx # update idx to account for all inserted rows
1004
1128
  duplicate_row = logging_sheet.loc[updated_idx, :].copy()
1005
- duplicate_row["Gauge"] = logging_sheet.loc[updated_idx, "Multigauge"]
1129
+ duplicate_row['Gauge'] = logging_sheet.loc[updated_idx, "Multigauge"]
1006
1130
  logging_sheet = pd.DataFrame(np.insert(logging_sheet.values, updated_idx+1, values=duplicate_row.values, axis=0), columns=logging_sheet.columns)
1007
1131
 
1008
1132
  ## Weirpool
@@ -1012,7 +1136,7 @@ class ScenarioHandler:
1012
1136
  for counter, (idx, row) in enumerate(rows_to_duplicate.iterrows()):
1013
1137
  updated_idx = counter + idx # update idx to account for all inserted rows
1014
1138
  duplicate_row = logging_sheet.loc[updated_idx, :].copy()
1015
- duplicate_row["Gauge"] = logging_sheet.loc[updated_idx, "WeirpoolGauge"]
1139
+ duplicate_row['Gauge'] = logging_sheet.loc[updated_idx, "WeirpoolGauge"]
1016
1140
  duplicate_row["GaugeType"] = "L"
1017
1141
  logging_sheet = pd.DataFrame(np.insert(logging_sheet.values, updated_idx+1, values=duplicate_row.values, axis=0), columns=logging_sheet.columns)
1018
1142
 
@@ -1036,7 +1160,7 @@ class ScenarioHandler:
1036
1160
  duplicate_row = logging_sheet.loc[updated_idx, :].copy()
1037
1161
 
1038
1162
  rows_to_insert = pd.DataFrame([duplicate_row] * len(gauge_list))
1039
- rows_to_insert["Gauge"] = gauge_list
1163
+ rows_to_insert['Gauge'] = gauge_list
1040
1164
 
1041
1165
  logging_sheet.drop(index=updated_idx, axis=0, inplace=True)
1042
1166
 
@@ -1053,7 +1177,7 @@ class ScenarioHandler:
1053
1177
  """
1054
1178
  parameter_sheet = pd.read_csv(self.parameter_sheet)
1055
1179
 
1056
- self.logging_sheet = parameter_sheet.copy()[["PlanningUnitName", "Code", "Gauge", "GaugeType", 'PlanningUnitID', 'FlowLevelVolume', "Multigauge", "WeirpoolGauge"]]
1180
+ self.logging_sheet = parameter_sheet.copy()[["PlanningUnitName", "Code", 'Gauge', "GaugeType", 'PlanningUnitID', 'FlowLevelVolume', "Multigauge", "WeirpoolGauge"]]
1057
1181
 
1058
1182
  self.logging_sheet = self.create_multi_index(self.logging_sheet)
1059
1183
 
@@ -1064,8 +1188,8 @@ class ScenarioHandler:
1064
1188
  self.log_calc_config_info()
1065
1189
  self.log_siteID_info()
1066
1190
 
1067
- self.logging_sheet = self.logging_sheet[["PlanningUnitName", "Code", "Primary Gauge", "Gauge", "GaugeType", "is_in_calc_config?", "node_in_siteID?", "gauge_in_model_file?", "gaugeANDmeasurand_in_model_file? (Yes/No)", "matched_SITEID", "spare_SITEID", "Analysed?"]]
1191
+ self.logging_sheet = self.logging_sheet[["PlanningUnitName", "Code", "Primary Gauge", 'Gauge', "GaugeType", "is_in_calc_config?", "node_in_siteID?", "gauge_in_model_file?", "gaugeANDmeasurand_in_model_file? (Yes/No)", "matched_SITEID", "spare_SITEID", "Analysed?"]]
1068
1192
 
1069
1193
  return self.logging_sheet
1070
1194
 
1071
-
1195
+