psr-factory 5.0.0b16__py3-none-win_amd64.whl → 5.0.0b67__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
psr/factory/factorylib.py CHANGED
@@ -29,10 +29,16 @@ def initialize():
29
29
  global lib
30
30
  lib = load_lib()
31
31
 
32
- lib.psrd_initialize.restype = ctypes.c_int
33
- lib.psrd_initialize.argtypes = [ctypes.c_char_p, ctypes.c_long, ctypes.c_void_p]
32
+ lib.psrd_check_license.restype = ctypes.c_int
33
+ lib.psrd_check_license.argtypes = [ctypes.c_void_p]
34
+ lib.psrd_initialize_basic_data.restype = ctypes.c_int
35
+ lib.psrd_initialize_basic_data.argtypes = [ctypes.c_void_p]
36
+ lib.psrd_initialize_study_data.restype = ctypes.c_int
37
+ lib.psrd_initialize_study_data.argtypes = [ctypes.c_char_p, ctypes.c_long, ctypes.c_void_p]
34
38
  lib.psrd_unload.restype = ctypes.c_int
35
39
  lib.psrd_unload.argtypes = [ctypes.c_void_p]
40
+ lib.psrd_get_constant.restype = ctypes.c_int
41
+ lib.psrd_get_constant.argtypes = [ctypes.c_char_p, ctypes.c_long, ctypes.c_void_p, ctypes.c_void_p]
36
42
  lib.psrd_set_global_setting.restype = ctypes.c_int
37
43
  lib.psrd_set_global_setting.argtypes = [ctypes.c_char_p, ctypes.c_long, ctypes.c_void_p, ctypes.c_void_p]
38
44
  lib.psrd_get_global_setting.restype = ctypes.c_int
@@ -89,6 +95,8 @@ def initialize():
89
95
  lib.psrd_study_property_description_count.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_long), ctypes.c_void_p]
90
96
  lib.psrd_study_get_property_description.restype = ctypes.c_void_p
91
97
  lib.psrd_study_get_property_description.argtypes = [ctypes.c_void_p, ctypes.c_long, ctypes.c_void_p]
98
+ lib.psrd_study_get_property_description_by_name.restype = ctypes.c_void_p
99
+ lib.psrd_study_get_property_description_by_name.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_long, ctypes.c_void_p]
92
100
  lib.psrd_study_set_value.restype = ctypes.c_int
93
101
  lib.psrd_study_set_value.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_long, ctypes.c_void_p, ctypes.c_void_p]
94
102
  lib.psrd_study_set_value_at.restype = ctypes.c_int
@@ -99,6 +107,8 @@ def initialize():
99
107
  lib.psrd_study_get_value.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p]
100
108
  lib.psrd_study_get_value_at.restype = ctypes.c_int
101
109
  lib.psrd_study_get_value_at.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
110
+ lib.psrd_study_has_property.restype = ctypes.c_int
111
+ lib.psrd_study_has_property.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_long, ctypes.POINTER(ctypes.c_bool), ctypes.c_void_p]
102
112
  lib.psrd_study_get_as_dict.restype = ctypes.c_void_p
103
113
  lib.psrd_study_get_as_dict.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
104
114
  lib.psrd_study_get_table.restype = ctypes.c_int
@@ -121,6 +131,8 @@ def initialize():
121
131
  lib.psrd_study_remove.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
122
132
  lib.psrd_study_get_all_objects.restype = ctypes.c_void_p
123
133
  lib.psrd_study_get_all_objects.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
134
+ lib.psrd_study_get_key_object_map.restype = ctypes.c_void_p
135
+ lib.psrd_study_get_key_object_map.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
124
136
  lib.psrd_study_find.restype = ctypes.c_void_p
125
137
  lib.psrd_study_find.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p]
126
138
  lib.psrd_study_find_by_id.restype = ctypes.c_void_p
@@ -141,6 +153,10 @@ def initialize():
141
153
  lib.psrd_object_get_parent.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
142
154
  lib.psrd_object_get_type.restype = ctypes.c_int
143
155
  lib.psrd_object_get_type.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_long, ctypes.c_void_p]
156
+ lib.psrd_object_get_key.restype = ctypes.c_int
157
+ lib.psrd_object_get_key.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_long, ctypes.c_void_p]
158
+ lib.psrd_object_set_key.restype = ctypes.c_int
159
+ lib.psrd_object_set_key.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_long, ctypes.c_void_p]
144
160
  lib.psrd_object_get_code.restype = ctypes.c_int
145
161
  lib.psrd_object_get_code.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_int), ctypes.c_void_p]
146
162
  lib.psrd_object_set_code.restype = ctypes.c_int
@@ -157,6 +173,8 @@ def initialize():
157
173
  lib.psrd_object_property_description_count.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_long), ctypes.c_void_p]
158
174
  lib.psrd_object_get_property_description.restype = ctypes.c_void_p
159
175
  lib.psrd_object_get_property_description.argtypes = [ctypes.c_void_p, ctypes.c_long, ctypes.c_void_p]
176
+ lib.psrd_object_get_property_description_by_name.restype = ctypes.c_void_p
177
+ lib.psrd_object_get_property_description_by_name.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_long, ctypes.c_void_p]
160
178
  lib.psrd_object_set_value.restype = ctypes.c_int
161
179
  lib.psrd_object_set_value.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_long, ctypes.c_void_p, ctypes.c_void_p]
162
180
  lib.psrd_object_set_value_at.restype = ctypes.c_int
@@ -167,6 +185,8 @@ def initialize():
167
185
  lib.psrd_object_get_value.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p]
168
186
  lib.psrd_object_get_value_at.restype = ctypes.c_int
169
187
  lib.psrd_object_get_value_at.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
188
+ lib.psrd_object_has_property.restype = ctypes.c_int
189
+ lib.psrd_object_has_property.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_long, ctypes.POINTER(ctypes.c_bool), ctypes.c_void_p]
170
190
  lib.psrd_object_get_as_dict.restype = ctypes.c_void_p
171
191
  lib.psrd_object_get_as_dict.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
172
192
  lib.psrd_object_get_table.restype = ctypes.c_int
@@ -273,6 +293,10 @@ def initialize():
273
293
  lib.psrd_property_description_get_dimension_size.argtypes = [ctypes.c_void_p, ctypes.c_long, ctypes.POINTER(ctypes.c_long), ctypes.c_void_p]
274
294
  lib.psrd_property_description_get_dimension_name.restype = ctypes.c_int
275
295
  lib.psrd_property_description_get_dimension_name.argtypes = [ctypes.c_void_p, ctypes.c_long, ctypes.c_char_p, ctypes.c_long, ctypes.c_void_p]
296
+ lib.psrd_property_description_is_reference.restype = ctypes.c_int
297
+ lib.psrd_property_description_is_reference.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_bool), ctypes.c_void_p]
298
+ lib.psrd_property_description_is_required.restype = ctypes.c_int
299
+ lib.psrd_property_description_is_required.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_bool), ctypes.c_void_p]
276
300
  lib.psrd_property_description_is_dynamic.restype = ctypes.c_int
277
301
  lib.psrd_property_description_is_dynamic.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_bool), ctypes.c_void_p]
278
302
  lib.psrd_property_description_is_indexed.restype = ctypes.c_int
@@ -297,6 +321,10 @@ def initialize():
297
321
  lib.psrd_table_is_indexed.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_bool), ctypes.c_void_p]
298
322
  lib.psrd_table_resize.restype = ctypes.c_int
299
323
  lib.psrd_table_resize.argtypes = [ctypes.c_void_p, ctypes.c_long, ctypes.c_void_p]
324
+ lib.psrd_table_get_as_dict.restype = ctypes.c_void_p
325
+ lib.psrd_table_get_as_dict.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
326
+ lib.psrd_table_set_from_dict.restype = ctypes.c_int
327
+ lib.psrd_table_set_from_dict.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
300
328
  lib.psrd_table_configure_index.restype = ctypes.c_int
301
329
  lib.psrd_table_configure_index.argtypes = [ctypes.c_void_p, ctypes.c_long, ctypes.c_long, ctypes.c_void_p]
302
330
  lib.psrd_table_configure_column.restype = ctypes.c_int
@@ -371,4 +399,6 @@ def initialize():
371
399
  lib.psrd_date_iterator_at_end.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_bool), ctypes.c_void_p]
372
400
  lib.psrd_date_iterator_get_value.restype = ctypes.c_int
373
401
  lib.psrd_date_iterator_get_value.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_long, ctypes.c_void_p]
402
+ lib.psrd_convert_output.restype = ctypes.c_int
403
+ lib.psrd_convert_output.argtypes = [ctypes.c_char_p, ctypes.c_long, ctypes.c_char_p, ctypes.c_long, ctypes.c_void_p, ctypes.c_void_p]
374
404
 
@@ -28,7 +28,7 @@ def create_case01(legacy: bool = False) -> psr.factory.Study:
28
28
 
29
29
  # Study options
30
30
  study.from_dict({
31
- "Type": 2,
31
+ "PARModel": 2,
32
32
  "InitialYearOfHydrology": 1996,
33
33
  "NumberOfSystems": 1,
34
34
  "AggregateInTheOperationPolicy": 0,
@@ -62,6 +62,7 @@ def create_case01(legacy: bool = False) -> psr.factory.Study:
62
62
  # Create a demand segment - it's required to add at least
63
63
  # an inelastic segment to a demand object.
64
64
  segment = psr.factory.create("DemandSegment", study.context)
65
+ segment.code = 1
65
66
  # Set demand and cost data.
66
67
  segment.set_at("EnergyPerBlock(:)", "01/2013", 8.928)
67
68
  # Add segment to the study.
@@ -113,7 +114,7 @@ def create_case01(legacy: bool = False) -> psr.factory.Study:
113
114
  "InstalledCapacity": 10.0,
114
115
  "ThermalType": 0,
115
116
  "Type": 0,
116
- "NumberOfGeneratingUnits": 1,
117
+ "NumberOfUnits": 1,
117
118
  "NumberOfAlternativeFuels": 0,
118
119
  "CodeOfAlternativeFuels(:)": 0,
119
120
  "O&MCost": 0.0,
@@ -29,7 +29,7 @@ def create_case21(legacy: bool = False) -> psr.factory.Study:
29
29
  study.set("NumberOfSeries", 1)
30
30
 
31
31
  # Study options
32
- study.set("Type", 2)
32
+ study.set("PARModel", 2)
33
33
  study.set("InitialYearOfHydrology", 2016)
34
34
  study.set("NumberOfSystems", 1)
35
35
  study.set("AggregateInTheOperationPolicy", 1)
@@ -118,7 +118,7 @@ def create_case21(legacy: bool = False) -> psr.factory.Study:
118
118
  plant1.set("InstalledCapacity", 12.0)
119
119
  plant1.set("ThermalType", 0) # Standard operation mode.
120
120
  plant1.set("Type", 0) # It's an existing plant.
121
- plant1.set("NumberOfGeneratingUnits", 1)
121
+ plant1.set("NumberOfUnits", 1)
122
122
  plant1.set("NumberOfAlternativeFuels", 0) # No alternative fuels
123
123
  plant1.set("CodeOfAlternativeFuels(:)", 0)
124
124
  plant1.set("O&MCost", 0.0)
@@ -141,7 +141,7 @@ def create_case21(legacy: bool = False) -> psr.factory.Study:
141
141
  plant2.set("InstalledCapacity", 8.0)
142
142
  plant2.set("ThermalType", 0)
143
143
  plant2.set("Type", 0)
144
- plant2.set("NumberOfGeneratingUnits", 1)
144
+ plant2.set("NumberOfUnits", 1)
145
145
  plant2.set("NumberOfAlternativeFuels", 0)
146
146
  plant2.set("CodeOfAlternativeFuels(:)", 0)
147
147
  plant2.set("O&MCost", 0.0)
@@ -1,5 +1,5 @@
1
- # PSR Cloud. Copyright (C) PSR, Inc - All Rights Reserved
1
+ # PSR Factory. Copyright (C) PSR, Inc - All Rights Reserved
2
2
  # Unauthorized copying of this file, via any medium is strictly prohibited
3
3
  # Proprietary and confidential
4
4
 
5
- __version__ = "0.3.9"
5
+ from .outputs import *
psr/outputs/outputs.py ADDED
@@ -0,0 +1,179 @@
1
+ # PSR Factory. Copyright (C) PSR, Inc - All Rights Reserved
2
+ # Unauthorized copying of this file, via any medium is strictly prohibited
3
+ # Proprietary and confidential
4
+
5
+ import csv
6
+ import os
7
+ from pathlib import Path
8
+ from typing import (List)
9
+ import pandas
10
+ import psr.factory
11
+
12
+
13
+ _language_dict_map = {
14
+ 0: 'ENG',
15
+ 1: 'ESP',
16
+ 2: 'POR',
17
+ }
18
+
19
+ _default_language = 0
20
+
21
+
22
+ def __rename_lang_columns(df: pandas.DataFrame) -> pandas.DataFrame:
23
+ default_language_code = _language_dict_map.get(_default_language, 'ENG')
24
+ languages = list(_language_dict_map.values())
25
+ languages.remove(default_language_code)
26
+ lang_cols = [col for col in df.columns if col.startswith(tuple(languages))]
27
+ df = df.drop(columns=lang_cols)
28
+ # Rename the default language column to remove prefix ENG-
29
+ for column in df.columns:
30
+ if column.startswith(f"{default_language_code}-"):
31
+ new_col = column[len(f"{default_language_code}-"):]
32
+ df = df.rename(columns={column: new_col})
33
+ return df
34
+
35
+
36
+ def get_available_outputs_by_model(tool_path: str) -> pandas.DataFrame:
37
+ dat_file = os.path.join(tool_path, "indexdat.fmt")
38
+ if not os.path.exists(dat_file):
39
+ raise FileNotFoundError(f"Could not find {dat_file}")
40
+ cls_file = os.path.join(tool_path, "indexcls.fmt")
41
+ if not os.path.exists(cls_file):
42
+ raise FileNotFoundError(f"Could not find {cls_file}")
43
+ typ_file = os.path.join(tool_path, "indextyp.fmt")
44
+ if not os.path.exists(typ_file):
45
+ raise FileNotFoundError(f"Could not find {typ_file}")
46
+
47
+ dat_df = pandas.read_csv(dat_file, delimiter=',', encoding='latin1', skiprows=1)
48
+ dat_df = __rename_lang_columns(dat_df)
49
+ dat_df.drop(columns=["PSRIO"], inplace=True)
50
+
51
+ cls_df = pandas.read_csv(cls_file, delimiter=',', encoding='latin1', skiprows=1)
52
+ cls_df = __rename_lang_columns(cls_df)
53
+ cls_df.rename(columns={"Name": "ClassName"}, inplace=True)
54
+ cls_df.drop(columns=["Description", "PSRIO-Class"], inplace=True)
55
+
56
+ typ_df = pandas.read_csv(typ_file, delimiter=',', encoding='latin1', skiprows=1)
57
+ typ_df = __rename_lang_columns(typ_df)
58
+ typ_df.rename(columns={"Name": "TypeName"}, inplace=True)
59
+
60
+ # merge class names and type names
61
+ dat_df = dat_df.merge(cls_df, how='left', left_on='Class', right_on='!Class')
62
+ dat_df = dat_df.merge(typ_df, how='left', left_on='Type', right_on='!Type')
63
+ dat_df.drop(columns=["!Class", "!Type"], inplace=True)
64
+ dat_df.rename(columns={"!Num": "Number", "TypeName": "Type", "ClassName": "Class"}, inplace=True)
65
+
66
+ return dat_df
67
+
68
+
69
+ class AvailableOutput:
70
+ def __init__(self):
71
+ self.filename = ""
72
+ self.file_type = ""
73
+ self.description = ""
74
+ self.unit = ""
75
+ self.attribute_class = ""
76
+ self.case_path = ""
77
+ def load_dataframe(self) -> psr.factory.DataFrame:
78
+ full_file_name = str(self)
79
+ return psr.factory.load_dataframe(full_file_name)
80
+
81
+ def __str__(self):
82
+ return os.path.join(self.case_path, f"{self.filename}.{self.file_type}")
83
+
84
+ def __repr__(self):
85
+ return f"AvailableOutput(path='{str(self)}', description='{self.description}', unit='{self.unit}', attribute_class={self.attribute_class})"
86
+
87
+
88
+ def get_available_outputs(case_path: str) -> List[AvailableOutput]:
89
+ indice_grf_path = os.path.join(case_path, "indice.grf")
90
+ outputs = []
91
+ with open(indice_grf_path, 'r', encoding='latin1') as f:
92
+ next(f) # Skip header
93
+ next(f)
94
+ reader = csv.reader(f, delimiter=',')
95
+ for row in reader:
96
+ if len(row) >= 4:
97
+ output = AvailableOutput()
98
+ full_file_name = row[0].strip()
99
+ output.filename, output.file_type = os.path.splitext(full_file_name)
100
+ output.filename = output.filename.strip()
101
+ output.file_type = output.file_type.lstrip('.').strip()
102
+ output.description = row[1].strip()
103
+ output.unit = row[2].strip()
104
+ output.attribute_class = row[3].strip()
105
+ output.case_path = case_path
106
+ outputs.append(output)
107
+ return outputs
108
+
109
+
110
+ class OutputsDataFrame(pandas.DataFrame):
111
+ def __setitem__(self, key, value):
112
+ if isinstance(value, bool):
113
+ self.loc[key, 'Active'] = value
114
+ else:
115
+ super().__setitem__(key, value)
116
+
117
+ def save(self, case_path: str) -> None:
118
+ save(self, case_path)
119
+
120
+
121
+ def save(df: pandas.DataFrame, case_path: str) -> None:
122
+ index_df = load_index_dat(case_path)
123
+
124
+ for filename, row in df.iterrows():
125
+ mask = index_df['Num'] == row['Num']
126
+ if any(mask):
127
+ index_df.loc[mask, 'YN'] = 1 if row['Active'] else 0
128
+
129
+ output_lines = ['Num Graph...........................|...Unid Type Y/N']
130
+ for _, row in index_df.iterrows():
131
+ line = f"{row['Num']:>3d} {row['Description']:<33}{row['Unit']:7} {int(row['Type']):>4d} {row['YN']:>4d}"
132
+ output_lines.append(line)
133
+
134
+ index_file = os.path.join(case_path, "index.dat")
135
+ with open(index_file, 'w', encoding='utf-8') as f:
136
+ for line in output_lines:
137
+ f.write(f"{line}\n")
138
+
139
+
140
+ def load_index_dat(case_path: str) -> pandas.DataFrame:
141
+ index_file = os.path.join(case_path, "index.dat")
142
+ if not os.path.exists(index_file):
143
+ raise FileNotFoundError(f"Could not find {index_file}")
144
+
145
+ widths = [4, 33, 8, 5, 4]
146
+ names = ['Num', 'Description', 'Unit', 'Type', 'YN']
147
+
148
+ return pandas.read_fwf(
149
+ index_file,
150
+ widths=widths,
151
+ names=names,
152
+ skiprows=1
153
+ )
154
+
155
+
156
+ def load(case_path: str) -> OutputsDataFrame:
157
+ sddp_path = Path("C:/PSR")
158
+ sddp_dirs = [d for d in sddp_path.iterdir() if d.name.startswith("Sddp")]
159
+ if not sddp_dirs:
160
+ raise FileNotFoundError("Could not find SDDP installation")
161
+ sddp_path = sorted(sddp_dirs)[-1]
162
+
163
+ fmt_file = Path(sddp_path) / "Oper" / "indexdat.fmt"
164
+ if not fmt_file.exists():
165
+ raise FileNotFoundError(f"Could not find {fmt_file}")
166
+
167
+ fmt_df = pandas.read_csv(fmt_file, delimiter=',', encoding='latin1', skiprows=1)
168
+ index_df = load_index_dat(case_path)
169
+
170
+ outputs_df = OutputsDataFrame()
171
+ for _, row in fmt_df.iterrows():
172
+ num = row['!Num']
173
+ filename = row['Filename']
174
+ index_row = index_df[index_df['Num'] == num]
175
+ if not index_row.empty:
176
+ outputs_df.loc[filename, 'Num'] = num
177
+ outputs_df.loc[filename, 'Active'] = bool(index_row['YN'].iloc[0])
178
+
179
+ return outputs_df
@@ -0,0 +1,289 @@
1
+ import datetime as dt
2
+ from typing import List, Dict, Optional, Tuple
3
+
4
+ import numpy as np
5
+ import pandas
6
+ import pandas as pd
7
+ import psr.factory
8
+
9
+ HOURS_PER_DAY = 24
10
+ DAYS_PER_WEEK = 7
11
+ HOURS_PER_WEEK = HOURS_PER_DAY * DAYS_PER_WEEK # 168 hours
12
+ WEEKS_PER_YEAR = 52
13
+ AR_STAGE_SAMPLES = 6
14
+
15
+ _number_of_days_per_month = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
16
+
17
+ _g_week_start_date_by_year: Dict[int, List[dt.datetime]] = {}
18
+
19
+ _week_max_hours = 7 * 24
20
+
21
+
22
+ def get_sddp_stages_by_year(year: int) -> List[dt.datetime]:
23
+ global _g_week_start_date_by_year
24
+ if year not in _g_week_start_date_by_year:
25
+ _g_week_start_date_by_year[year] = [
26
+ dt.datetime(year, 1, 1),
27
+ dt.datetime(year, 1, 8),
28
+ dt.datetime(year, 1, 15),
29
+ dt.datetime(year, 1, 22),
30
+ dt.datetime(year, 1, 29),
31
+ dt.datetime(year, 2, 5),
32
+ dt.datetime(year, 2, 12),
33
+ dt.datetime(year, 2, 19),
34
+ dt.datetime(year, 2, 26),
35
+ dt.datetime(year, 3, 5),
36
+ dt.datetime(year, 3, 12),
37
+ dt.datetime(year, 3, 19),
38
+ dt.datetime(year, 3, 26),
39
+ dt.datetime(year, 4, 2),
40
+ dt.datetime(year, 4, 9),
41
+ dt.datetime(year, 4, 16),
42
+ dt.datetime(year, 4, 23),
43
+ dt.datetime(year, 4, 30),
44
+ dt.datetime(year, 5, 7),
45
+ dt.datetime(year, 5, 14),
46
+ dt.datetime(year, 5, 21),
47
+ dt.datetime(year, 5, 28),
48
+ dt.datetime(year, 6, 4),
49
+ dt.datetime(year, 6, 11),
50
+ dt.datetime(year, 6, 18),
51
+ dt.datetime(year, 6, 25),
52
+ dt.datetime(year, 7, 2),
53
+ dt.datetime(year, 7, 9),
54
+ dt.datetime(year, 7, 16),
55
+ dt.datetime(year, 7, 23),
56
+ dt.datetime(year, 7, 30),
57
+ dt.datetime(year, 8, 6),
58
+ dt.datetime(year, 8, 13),
59
+ dt.datetime(year, 8, 20),
60
+ dt.datetime(year, 8, 27),
61
+ dt.datetime(year, 9, 3),
62
+ dt.datetime(year, 9, 10),
63
+ dt.datetime(year, 9, 17),
64
+ dt.datetime(year, 9, 24),
65
+ dt.datetime(year, 10, 1),
66
+ dt.datetime(year, 10, 8),
67
+ dt.datetime(year, 10, 15),
68
+ dt.datetime(year, 10, 22),
69
+ dt.datetime(year, 10, 29),
70
+ dt.datetime(year, 11, 5),
71
+ dt.datetime(year, 11, 12),
72
+ dt.datetime(year, 11, 19),
73
+ dt.datetime(year, 11, 26),
74
+ dt.datetime(year, 12, 3),
75
+ dt.datetime(year, 12, 10),
76
+ dt.datetime(year, 12, 17),
77
+ dt.datetime(year, 12, 24)
78
+ ]
79
+ return _g_week_start_date_by_year[year]
80
+
81
+
82
+ def get_closest_sddp_stage_date(y: int, m: int, d: int, previous_date: bool = True) -> Optional[dt.datetime]:
83
+ """Get the closest SDDP stage date for a given year, month, and day."""
84
+ dates = get_sddp_stages_by_year(y)
85
+ sdat = dt.datetime(y, m, d)
86
+ offset = 0 if previous_date else + 1
87
+ last_date = dates[-1]
88
+ if previous_date and sdat >= last_date:
89
+ return last_date
90
+ elif not previous_date and sdat >= last_date:
91
+ dates = get_sddp_stages_by_year(y + 1)
92
+ return dates[0]
93
+ else:
94
+ for index in range(len(dates)-1):
95
+ if dates[index] <= sdat < dates[index+1]:
96
+ return dates[index + offset]
97
+ return None
98
+
99
+
100
+ def get_sddp_week(y: int, m: int, d: int) -> int:
101
+ dates = get_sddp_stages_by_year(y)
102
+ sdat = dt.datetime(y, m, d)
103
+ if dates[-1] <= sdat <= dt.datetime(y, 12, 31):
104
+ return WEEKS_PER_YEAR
105
+ else:
106
+ for index in range(len(dates)-1):
107
+ if dates[index] <= sdat < dates[index+1]:
108
+ return index + 1
109
+ return -1
110
+
111
+
112
+ def get_sddp_start_date_and_stage(year, month, day) -> Tuple[dt.datetime, int]:
113
+ sddp_date = get_closest_sddp_stage_date(year, month, day, previous_date=True)
114
+ sddp_week = get_sddp_week(sddp_date.year, sddp_date.month, sddp_date.day)
115
+ return sddp_date, sddp_week
116
+
117
+
118
+ def get_hour_block_map_from_study(study: psr.factory.Study) -> pandas.DataFrame:
119
+ """
120
+ Extract the HourBlockMap from the study and return as a DataFrame.
121
+ """
122
+ # FixedDurationOfBlocks(block)
123
+ stage_type = study.get("StageType")
124
+ hour_block_map_df = study.get_df("HourBlockMap")
125
+
126
+ if hour_block_map_df.empty:
127
+ initial_year = study.get("InitialYear")
128
+ end_year = initial_year + 10
129
+ total_blocks = study.get("NumberOfBlocks")
130
+ block_duration = {}
131
+ total_duration = 0
132
+ for block in range(1, total_blocks + 1):
133
+ # block duration is a percentage of total hours in a stage
134
+ block_duration[block] = study.get(f"FixedDurationOfBlocks({block})")
135
+ total_duration += block_duration[block]
136
+
137
+ if total_duration > 99.9:
138
+ # Group stage hours on blocks based on their relative durations to the total number of hours (total_duration)
139
+ if stage_type == 1:
140
+ total_duration = _week_max_hours
141
+ # weekly stages, fixed stage duration
142
+ mapping_data = []
143
+ for year in range(initial_year, end_year):
144
+ start_date = pandas.Timestamp(f"{year}-01-01")
145
+ for week in range(1, 53):
146
+ accumulated_hours = total_duration
147
+ for block in range(total_blocks, 0, -1):
148
+ current_duration = int(block_duration[block] * total_duration // 100)
149
+ if block != 1:
150
+ current_hours = accumulated_hours - current_duration
151
+ else:
152
+ current_hours = 0
153
+ for hour in range(current_hours, accumulated_hours):
154
+ datetime_point = start_date + pandas.Timedelta(weeks=week - 1, hours=hour)
155
+ formatted_datetime = f"{datetime_point.year}/{week:02d} {hour + 1}h"
156
+ mapping_data.append({
157
+ 'datetime': formatted_datetime,
158
+ 'year': datetime_point.year,
159
+ 'sddp_stage': week,
160
+ 'sddp_block': block,
161
+ 'stage_hour': hour + 1
162
+ })
163
+ accumulated_hours -= current_duration
164
+ hour_block_map_df = pandas.DataFrame(mapping_data).set_index('datetime')
165
+ # sort dataframe by year, sddp_stage, sddp_block, stage_hour
166
+ hour_block_map_df.sort_values(by=['year', 'sddp_stage', 'sddp_block', 'stage_hour'], inplace=True)
167
+ elif stage_type == 2:
168
+ # monthly stages, variable stage duration
169
+ mapping_data = []
170
+ for year in range(initial_year, end_year):
171
+ for month in range(1, 13):
172
+ start_date = pandas.Timestamp(f"{year}-{month:02d}-01")
173
+ days_in_month = _number_of_days_per_month[month]
174
+ total_duration = days_in_month * HOURS_PER_DAY
175
+ accumulated_hours = total_duration
176
+ for block in range(total_blocks, 0, -1):
177
+ current_duration = int(block_duration[block] * total_duration // 100)
178
+ if block != 1:
179
+ current_hours = accumulated_hours - current_duration
180
+ else:
181
+ current_hours = 0
182
+ for hour in range(current_hours, accumulated_hours):
183
+ datetime_point = start_date + pandas.Timedelta(hours=hour)
184
+ formatted_datetime = f"{datetime_point.year}/{datetime_point.month:02d} {hour + 1}h"
185
+ mapping_data.append({
186
+ 'datetime': formatted_datetime,
187
+ 'year': datetime_point.year,
188
+ 'sddp_stage': month,
189
+ 'sddp_block': block,
190
+ 'stage_hour': hour + 1
191
+ })
192
+ accumulated_hours -= current_duration
193
+ hour_block_map_df = pandas.DataFrame(mapping_data).set_index('datetime')
194
+ # sort dataframe by year, sddp_stage, sddp_block, stage_hour
195
+ hour_block_map_df.sort_values(by=['year', 'sddp_stage', 'sddp_block', 'stage_hour'], inplace=True)
196
+
197
+ else:
198
+ raise ValueError("Total duration of blocks must be 100% or more.")
199
+ else:
200
+ # format HourBlockMap dataframe to have year, sddp_week, sddp_block columns
201
+ # its index datetime column is in the following format: 'YYYY/WW HHHh', where WW is the week number (1-52) and HHH is the hour of the week (1-168)
202
+ # for weekly cases. for monthly cases, it is 'YYYY/MM HHHh', where MM is the month number (1-12) and HHH is the hour of the month (1-744).
203
+ hour_block_map_df = hour_block_map_df.reset_index()
204
+ hour_block_map_df['year'] = hour_block_map_df['datetime'].str.slice(0, 4).astype(int)
205
+ hour_block_map_df['sddp_stage'] = hour_block_map_df['datetime'].str.slice(5, 7).astype(int)
206
+ hour_block_map_df['stage_hour'] = hour_block_map_df['datetime'].str.slice(8, -1).astype(int)
207
+ hour_block_map_df['sddp_block'] = ((hour_block_map_df['hour_of_week'] - 1) // 6) + 1
208
+ hour_block_map_df = hour_block_map_df.set_index('datetime')[['year', 'sddp_week', 'sddp_block']]
209
+ return hour_block_map_df
210
+
211
+
212
+ def remap_hourly_to_stage(hourly_df: pd.DataFrame, hour_block_map_df: pd.DataFrame, stage_type: int,
213
+ aggregation_method: str = 'mean') -> pd.DataFrame:
214
+ """
215
+ Strategy to Map hourly data into weekly/monthly data:
216
+ - Merge the hourly data dataframe with the Study's hour block map dataframe
217
+ - Aggregate by stage and/or by block using avg, sum, max, etc
218
+ """
219
+ # create indices before merging
220
+ if stage_type == 1:
221
+ # weekly stages
222
+ hourly_df = hourly_df.copy()
223
+
224
+ hourly_df['year'] = hourly_df.index.year
225
+ hourly_df['sddp_stage'] = 0
226
+ hourly_df['stage_hour'] = 0
227
+ for irow, (index, row) in enumerate(hourly_df.iterrows()):
228
+ stage_start_date = get_closest_sddp_stage_date(index.year, index.month, index.day, previous_date=True)
229
+ week = get_sddp_week(index.year, index.month, index.day)
230
+ hour_of_week = ((index - stage_start_date).days * 24) + index.hour + 1
231
+ hourly_df.at[row.name, 'sddp_stage'] = week
232
+ hourly_df.at[row.name, 'stage_hour'] = hour_of_week
233
+ elif stage_type == 2:
234
+ # monthly stages
235
+ hourly_df = hourly_df.copy()
236
+ hourly_df['year'] = hourly_df.index.year
237
+ hourly_df['sddp_stage'] = hourly_df.index.month
238
+ hourly_df['stage_hour'] = ((hourly_df.index.day - 1) * 24) + hourly_df.index.hour + 1
239
+ else:
240
+ raise ValueError("Unsupported stage type. Only weekly (1) and monthly (2) are supported.")
241
+ hourly_df = hourly_df.set_index('year,sddp_stage,stage_hour'.split(','))
242
+ hour_block_map_df = hour_block_map_df.set_index('year,sddp_stage,stage_hour'.split(','))
243
+ merged_df = pd.merge(hourly_df, hour_block_map_df, left_index=True, right_index=True, how='inner')
244
+
245
+ numeric_cols = hourly_df.select_dtypes(include=[np.number]).columns.tolist()
246
+ result = merged_df.groupby(['year', 'sddp_stage'])[numeric_cols].agg(aggregation_method).reset_index()
247
+ result.sort_values(by=['year', 'sddp_stage'], inplace=True)
248
+ result.set_index(['year', 'sddp_stage'], inplace=True)
249
+ return result
250
+
251
+
252
+ def remap_hourly_to_blocks(hourly_df: pd.DataFrame, hour_block_map_df: pd.DataFrame, stage_type: int,
253
+ aggregation_method: str = 'mean') -> pd.DataFrame:
254
+ """
255
+ Strategy to Map hourly data into weekly/by block data:
256
+ - Merge the hourly data dataframe with the Study's hour block map dataframe
257
+ - Aggregate by stage and/or by block using avg, sum, max, etc
258
+ """
259
+ # create indices before merging
260
+ if stage_type == 1:
261
+ # weekly stages
262
+ hourly_df = hourly_df.copy()
263
+
264
+ hourly_df['year'] = hourly_df.index.year
265
+ hourly_df['sddp_stage'] = 0
266
+ hourly_df['stage_hour'] = 0
267
+ for irow, (index, row) in enumerate(hourly_df.iterrows()):
268
+ stage_start_date = get_closest_sddp_stage_date(index.year, index.month, index.day, previous_date=True)
269
+ week = get_sddp_week(index.year, index.month, index.day)
270
+ hour_of_week = ((index - stage_start_date).days * 24) + index.hour + 1
271
+ hourly_df.at[row.name, 'sddp_stage'] = week
272
+ hourly_df.at[row.name, 'stage_hour'] = hour_of_week
273
+ elif stage_type == 2:
274
+ # monthly stages
275
+ hourly_df = hourly_df.copy()
276
+ hourly_df['year'] = hourly_df.index.year
277
+ hourly_df['sddp_stage'] = hourly_df.index.month
278
+ hourly_df['stage_hour'] = ((hourly_df.index.day - 1) * 24) + hourly_df.index.hour + 1
279
+ else:
280
+ raise ValueError("Unsupported stage type. Only weekly (1) and monthly (2) are supported.")
281
+ hourly_df = hourly_df.set_index('year,sddp_stage,stage_hour'.split(','))
282
+ hour_block_map_df = hour_block_map_df.set_index('year,sddp_stage,stage_hour'.split(','))
283
+ merged_df = pd.merge(hourly_df, hour_block_map_df, left_index=True, right_index=True, how='inner')
284
+
285
+ numeric_cols = hourly_df.select_dtypes(include=[np.number]).columns.tolist()
286
+ result = merged_df.groupby(['year', 'sddp_stage', 'sddp_block'])[numeric_cols].agg(aggregation_method).reset_index()
287
+ result.sort_values(by=['year', 'sddp_stage', 'sddp_block'], inplace=True)
288
+ result.set_index(['year', 'sddp_stage', 'sddp_block'], inplace=True)
289
+ return result
@@ -23,6 +23,9 @@ def change_cwd(new_dir: Union[str, pathlib.Path]):
23
23
  def exec_cmd(cmd: Union[str, List[str]], **kwargs) -> int:
24
24
  dry_run = kwargs.get("dry_run", False)
25
25
  print_progress = kwargs.get("show_progress", False)
26
+ env = kwargs.get("env", {})
27
+ proc_env = os.environ.copy()
28
+ proc_env.update(env)
26
29
 
27
30
  if print_progress or dry_run:
28
31
  sys.stdout.flush()
@@ -35,7 +38,7 @@ def exec_cmd(cmd: Union[str, List[str]], **kwargs) -> int:
35
38
  return_code = 0
36
39
  else:
37
40
  try:
38
- return_code = subprocess.call(cmd, shell=True)
41
+ return_code = subprocess.call(cmd, shell=True, env=proc_env)
39
42
  if return_code > 0:
40
43
  raise RuntimeError(f"Execution error, code {return_code}")
41
44
  else: