psr-factory 5.0.0b35__py3-none-win_amd64.whl → 5.0.0b37__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of psr-factory might be problematic. Click here for more details.

psr/factory/__init__.py CHANGED
@@ -2,6 +2,6 @@
2
2
  # Unauthorized copying of this file, via any medium is strictly prohibited
3
3
  # Proprietary and confidential
4
4
 
5
- __version__ = "5.0.0b35"
5
+ __version__ = "5.0.0b37"
6
6
 
7
7
  from .api import *
psr/factory/factory.dll CHANGED
Binary file
@@ -0,0 +1,5 @@
1
+ # PSR Factory. Copyright (C) PSR, Inc - All Rights Reserved
2
+ # Unauthorized copying of this file, via any medium is strictly prohibited
3
+ # Proprietary and confidential
4
+
5
+ from outputs import *
psr/outputs/outputs.py ADDED
@@ -0,0 +1,179 @@
1
+ # PSR Factory. Copyright (C) PSR, Inc - All Rights Reserved
2
+ # Unauthorized copying of this file, via any medium is strictly prohibited
3
+ # Proprietary and confidential
4
+
5
+ import csv
6
+ import os
7
+ from pathlib import Path
8
+ from typing import (List)
9
+ import pandas
10
+ import psr.factory
11
+
12
+
13
+ _language_dict_map = {
14
+ 0: 'ENG',
15
+ 1: 'ESP',
16
+ 2: 'POR',
17
+ }
18
+
19
+ _default_language = 0
20
+
21
+
22
+ def __rename_lang_columns(df: pandas.DataFrame) -> pandas.DataFrame:
23
+ default_language_code = _language_dict_map.get(_default_language, 'ENG')
24
+ languages = list(_language_dict_map.values())
25
+ languages.remove(default_language_code)
26
+ lang_cols = [col for col in df.columns if col.startswith(tuple(languages))]
27
+ df = df.drop(columns=lang_cols)
28
+ # Rename the default language column to remove prefix ENG-
29
+ for column in df.columns:
30
+ if column.startswith(f"{default_language_code}-"):
31
+ new_col = column[len(f"{default_language_code}-"):]
32
+ df = df.rename(columns={column: new_col})
33
+ return df
34
+
35
+
36
+ def get_available_outputs_by_model(tool_path: str) -> pandas.DataFrame:
37
+ dat_file = os.path.join(tool_path, "indexdat.fmt")
38
+ if not os.path.exists(dat_file):
39
+ raise FileNotFoundError(f"Could not find {dat_file}")
40
+ cls_file = os.path.join(tool_path, "indexcls.fmt")
41
+ if not os.path.exists(cls_file):
42
+ raise FileNotFoundError(f"Could not find {cls_file}")
43
+ typ_file = os.path.join(tool_path, "indextyp.fmt")
44
+ if not os.path.exists(typ_file):
45
+ raise FileNotFoundError(f"Could not find {typ_file}")
46
+
47
+ dat_df = pandas.read_csv(dat_file, delimiter=',', encoding='latin1', skiprows=1)
48
+ dat_df = __rename_lang_columns(dat_df)
49
+ dat_df.drop(columns=["PSRIO"], inplace=True)
50
+
51
+ cls_df = pandas.read_csv(cls_file, delimiter=',', encoding='latin1', skiprows=1)
52
+ cls_df = __rename_lang_columns(cls_df)
53
+ cls_df.rename(columns={"Name": "ClassName"}, inplace=True)
54
+ cls_df.drop(columns=["Description", "PSRIO-Class"], inplace=True)
55
+
56
+ typ_df = pandas.read_csv(typ_file, delimiter=',', encoding='latin1', skiprows=1)
57
+ typ_df = __rename_lang_columns(typ_df)
58
+ typ_df.rename(columns={"Name": "TypeName"}, inplace=True)
59
+
60
+ # merge class names and type names
61
+ dat_df = dat_df.merge(cls_df, how='left', left_on='Class', right_on='!Class')
62
+ dat_df = dat_df.merge(typ_df, how='left', left_on='Type', right_on='!Type')
63
+ dat_df.drop(columns=["!Class", "!Type"], inplace=True)
64
+ dat_df.rename(columns={"!Num": "Number", "TypeName": "Type", "ClassName": "Class"}, inplace=True)
65
+
66
+ return dat_df
67
+
68
+
69
+ class AvailableOutput:
70
+ def __init__(self):
71
+ self.filename = ""
72
+ self.file_type = ""
73
+ self.description = ""
74
+ self.unit = ""
75
+ self.attribute_class = ""
76
+ self.case_path = ""
77
+ def load_dataframe(self) -> psr.factory.DataFrame:
78
+ full_file_name = str(self)
79
+ return psr.factory.load_dataframe(full_file_name)
80
+
81
+ def __str__(self):
82
+ return os.path.join(self.case_path, f"{self.filename}.{self.file_type}")
83
+
84
+ def __repr__(self):
85
+ return f"AvailableOutput(path='{str(self)}', description='{self.description}', unit='{self.unit}', attribute_class={self.attribute_class})"
86
+
87
+
88
+ def get_available_outputs(case_path: str) -> List[AvailableOutput]:
89
+ indice_grf_path = os.path.join(case_path, "indice.grf")
90
+ outputs = []
91
+ with open(indice_grf_path, 'r', encoding='latin1') as f:
92
+ next(f) # Skip header
93
+ next(f)
94
+ reader = csv.reader(f, delimiter=',')
95
+ for row in reader:
96
+ if len(row) >= 4:
97
+ output = AvailableOutput()
98
+ full_file_name = row[0].strip()
99
+ output.filename, output.file_type = os.path.splitext(full_file_name)
100
+ output.filename = output.filename.strip()
101
+ output.file_type = output.file_type.lstrip('.').strip()
102
+ output.description = row[1].strip()
103
+ output.unit = row[2].strip()
104
+ output.attribute_class = row[3].strip()
105
+ output.case_path = case_path
106
+ outputs.append(output)
107
+ return outputs
108
+
109
+
110
+ class OutputsDataFrame(pandas.DataFrame):
111
+ def __setitem__(self, key, value):
112
+ if isinstance(value, bool):
113
+ self.loc[key, 'Active'] = value
114
+ else:
115
+ super().__setitem__(key, value)
116
+
117
+ def save(self, case_path: str) -> None:
118
+ save(self, case_path)
119
+
120
+
121
+ def save(df: pandas.DataFrame, case_path: str) -> None:
122
+ index_df = load_index_dat(case_path)
123
+
124
+ for filename, row in df.iterrows():
125
+ mask = index_df['Num'] == row['Num']
126
+ if any(mask):
127
+ index_df.loc[mask, 'YN'] = 1 if row['Active'] else 0
128
+
129
+ output_lines = ['Num Graph...........................|...Unid Type Y/N']
130
+ for _, row in index_df.iterrows():
131
+ line = f"{row['Num']:>3d} {row['Description']:<33}{row['Unit']:7} {int(row['Type']):>4d} {row['YN']:>4d}"
132
+ output_lines.append(line)
133
+
134
+ index_file = os.path.join(case_path, "index.dat")
135
+ with open(index_file, 'w', encoding='utf-8') as f:
136
+ for line in output_lines:
137
+ f.write(f"{line}\n")
138
+
139
+
140
+ def load_index_dat(case_path: str) -> pandas.DataFrame:
141
+ index_file = os.path.join(case_path, "index.dat")
142
+ if not os.path.exists(index_file):
143
+ raise FileNotFoundError(f"Could not find {index_file}")
144
+
145
+ widths = [4, 33, 8, 5, 4]
146
+ names = ['Num', 'Description', 'Unit', 'Type', 'YN']
147
+
148
+ return pandas.read_fwf(
149
+ index_file,
150
+ widths=widths,
151
+ names=names,
152
+ skiprows=1
153
+ )
154
+
155
+
156
+ def load(case_path: str) -> OutputsDataFrame:
157
+ sddp_path = Path("C:/PSR")
158
+ sddp_dirs = [d for d in sddp_path.iterdir() if d.name.startswith("Sddp")]
159
+ if not sddp_dirs:
160
+ raise FileNotFoundError("Could not find SDDP installation")
161
+ sddp_path = sorted(sddp_dirs)[-1]
162
+
163
+ fmt_file = Path(sddp_path) / "Oper" / "indexdat.fmt"
164
+ if not fmt_file.exists():
165
+ raise FileNotFoundError(f"Could not find {fmt_file}")
166
+
167
+ fmt_df = pandas.read_csv(fmt_file, delimiter=',', encoding='latin1', skiprows=1)
168
+ index_df = load_index_dat(case_path)
169
+
170
+ outputs_df = OutputsDataFrame()
171
+ for _, row in fmt_df.iterrows():
172
+ num = row['!Num']
173
+ filename = row['Filename']
174
+ index_row = index_df[index_df['Num'] == num]
175
+ if not index_row.empty:
176
+ outputs_df.loc[filename, 'Num'] = num
177
+ outputs_df.loc[filename, 'Active'] = bool(index_row['YN'].iloc[0])
178
+
179
+ return outputs_df
@@ -0,0 +1,289 @@
1
+ import datetime as dt
2
+ from typing import List, Dict, Optional, Tuple
3
+
4
+ import numpy as np
5
+ import pandas
6
+ import pandas as pd
7
+ import psr.factory
8
+
9
+ HOURS_PER_DAY = 24
10
+ DAYS_PER_WEEK = 7
11
+ HOURS_PER_WEEK = HOURS_PER_DAY * DAYS_PER_WEEK # 168 hours
12
+ WEEKS_PER_YEAR = 52
13
+ AR_STAGE_SAMPLES = 6
14
+
15
+ _number_of_days_per_month = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
16
+
17
+ _g_week_start_date_by_year: Dict[int, List[dt.datetime]] = {}
18
+
19
+ _week_max_hours = 7 * 24
20
+
21
+
22
+ def get_sddp_stages_by_year(year: int) -> List[dt.datetime]:
23
+ global _g_week_start_date_by_year
24
+ if year not in _g_week_start_date_by_year:
25
+ _g_week_start_date_by_year[year] = [
26
+ dt.datetime(year, 1, 1),
27
+ dt.datetime(year, 1, 8),
28
+ dt.datetime(year, 1, 15),
29
+ dt.datetime(year, 1, 22),
30
+ dt.datetime(year, 1, 29),
31
+ dt.datetime(year, 2, 5),
32
+ dt.datetime(year, 2, 12),
33
+ dt.datetime(year, 2, 19),
34
+ dt.datetime(year, 2, 26),
35
+ dt.datetime(year, 3, 5),
36
+ dt.datetime(year, 3, 12),
37
+ dt.datetime(year, 3, 19),
38
+ dt.datetime(year, 3, 26),
39
+ dt.datetime(year, 4, 2),
40
+ dt.datetime(year, 4, 9),
41
+ dt.datetime(year, 4, 16),
42
+ dt.datetime(year, 4, 23),
43
+ dt.datetime(year, 4, 30),
44
+ dt.datetime(year, 5, 7),
45
+ dt.datetime(year, 5, 14),
46
+ dt.datetime(year, 5, 21),
47
+ dt.datetime(year, 5, 28),
48
+ dt.datetime(year, 6, 4),
49
+ dt.datetime(year, 6, 11),
50
+ dt.datetime(year, 6, 18),
51
+ dt.datetime(year, 6, 25),
52
+ dt.datetime(year, 7, 2),
53
+ dt.datetime(year, 7, 9),
54
+ dt.datetime(year, 7, 16),
55
+ dt.datetime(year, 7, 23),
56
+ dt.datetime(year, 7, 30),
57
+ dt.datetime(year, 8, 6),
58
+ dt.datetime(year, 8, 13),
59
+ dt.datetime(year, 8, 20),
60
+ dt.datetime(year, 8, 27),
61
+ dt.datetime(year, 9, 3),
62
+ dt.datetime(year, 9, 10),
63
+ dt.datetime(year, 9, 17),
64
+ dt.datetime(year, 9, 24),
65
+ dt.datetime(year, 10, 1),
66
+ dt.datetime(year, 10, 8),
67
+ dt.datetime(year, 10, 15),
68
+ dt.datetime(year, 10, 22),
69
+ dt.datetime(year, 10, 29),
70
+ dt.datetime(year, 11, 5),
71
+ dt.datetime(year, 11, 12),
72
+ dt.datetime(year, 11, 19),
73
+ dt.datetime(year, 11, 26),
74
+ dt.datetime(year, 12, 3),
75
+ dt.datetime(year, 12, 10),
76
+ dt.datetime(year, 12, 17),
77
+ dt.datetime(year, 12, 24)
78
+ ]
79
+ return _g_week_start_date_by_year[year]
80
+
81
+
82
+ def get_closest_sddp_stage_date(y: int, m: int, d: int, previous_date: bool = True) -> Optional[dt.datetime]:
83
+ """Get the closest SDDP stage date for a given year, month, and day."""
84
+ dates = get_sddp_stages_by_year(y)
85
+ sdat = dt.datetime(y, m, d)
86
+ offset = 0 if previous_date else + 1
87
+ last_date = dates[-1]
88
+ if previous_date and sdat >= last_date:
89
+ return last_date
90
+ elif not previous_date and sdat >= last_date:
91
+ dates = get_sddp_stages_by_year(y + 1)
92
+ return dates[0]
93
+ else:
94
+ for index in range(len(dates)-1):
95
+ if dates[index] <= sdat < dates[index+1]:
96
+ return dates[index + offset]
97
+ return None
98
+
99
+
100
+ def get_sddp_week(y: int, m: int, d: int) -> int:
101
+ dates = get_sddp_stages_by_year(y)
102
+ sdat = dt.datetime(y, m, d)
103
+ if dates[-1] <= sdat <= dt.datetime(y, 12, 31):
104
+ return WEEKS_PER_YEAR
105
+ else:
106
+ for index in range(len(dates)-1):
107
+ if dates[index] <= sdat < dates[index+1]:
108
+ return index + 1
109
+ return -1
110
+
111
+
112
+ def get_sddp_start_date_and_stage(year, month, day) -> Tuple[dt.datetime, int]:
113
+ sddp_date = get_closest_sddp_stage_date(year, month, day, previous_date=True)
114
+ sddp_week = get_sddp_week(sddp_date.year, sddp_date.month, sddp_date.day)
115
+ return sddp_date, sddp_week
116
+
117
+
118
+ def get_hour_block_map_from_study(study: psr.factory.Study) -> pandas.DataFrame:
119
+ """
120
+ Extract the HourBlockMap from the study and return as a DataFrame.
121
+ """
122
+ # FixedDurationOfBlocks(block)
123
+ stage_type = study.get("StageType")
124
+ hour_block_map_df = study.get_df("HourBlockMap")
125
+
126
+ if hour_block_map_df.empty:
127
+ initial_year = study.get("InitialYear")
128
+ end_year = initial_year + 10
129
+ total_blocks = study.get("NumberOfBlocks")
130
+ block_duration = {}
131
+ total_duration = 0
132
+ for block in range(1, total_blocks + 1):
133
+ # block duration is a percentage of total hours in a stage
134
+ block_duration[block] = study.get(f"FixedDurationOfBlocks({block})")
135
+ total_duration += block_duration[block]
136
+
137
+ if total_duration > 99.9:
138
+ # Group stage hours on blocks based on their relative durations to the total number of hours (total_duration)
139
+ if stage_type == 1:
140
+ total_duration = _week_max_hours
141
+ # weekly stages, fixed stage duration
142
+ mapping_data = []
143
+ for year in range(initial_year, end_year):
144
+ start_date = pandas.Timestamp(f"{year}-01-01")
145
+ for week in range(1, 53):
146
+ accumulated_hours = total_duration
147
+ for block in range(total_blocks, 0, -1):
148
+ current_duration = int(block_duration[block] * total_duration // 100)
149
+ if block != 1:
150
+ current_hours = accumulated_hours - current_duration
151
+ else:
152
+ current_hours = 0
153
+ for hour in range(current_hours, accumulated_hours):
154
+ datetime_point = start_date + pandas.Timedelta(weeks=week - 1, hours=hour)
155
+ formatted_datetime = f"{datetime_point.year}/{week:02d} {hour + 1}h"
156
+ mapping_data.append({
157
+ 'datetime': formatted_datetime,
158
+ 'year': datetime_point.year,
159
+ 'sddp_stage': week,
160
+ 'sddp_block': block,
161
+ 'stage_hour': hour + 1
162
+ })
163
+ accumulated_hours -= current_duration
164
+ hour_block_map_df = pandas.DataFrame(mapping_data).set_index('datetime')
165
+ # sort dataframe by year, sddp_stage, sddp_block, stage_hour
166
+ hour_block_map_df.sort_values(by=['year', 'sddp_stage', 'sddp_block', 'stage_hour'], inplace=True)
167
+ elif stage_type == 2:
168
+ # monthly stages, variable stage duration
169
+ mapping_data = []
170
+ for year in range(initial_year, end_year):
171
+ for month in range(1, 13):
172
+ start_date = pandas.Timestamp(f"{year}-{month:02d}-01")
173
+ days_in_month = _number_of_days_per_month[month]
174
+ total_duration = days_in_month * HOURS_PER_DAY
175
+ accumulated_hours = total_duration
176
+ for block in range(total_blocks, 0, -1):
177
+ current_duration = int(block_duration[block] * total_duration // 100)
178
+ if block != 1:
179
+ current_hours = accumulated_hours - current_duration
180
+ else:
181
+ current_hours = 0
182
+ for hour in range(current_hours, accumulated_hours):
183
+ datetime_point = start_date + pandas.Timedelta(hours=hour)
184
+ formatted_datetime = f"{datetime_point.year}/{datetime_point.month:02d} {hour + 1}h"
185
+ mapping_data.append({
186
+ 'datetime': formatted_datetime,
187
+ 'year': datetime_point.year,
188
+ 'sddp_stage': month,
189
+ 'sddp_block': block,
190
+ 'stage_hour': hour + 1
191
+ })
192
+ accumulated_hours -= current_duration
193
+ hour_block_map_df = pandas.DataFrame(mapping_data).set_index('datetime')
194
+ # sort dataframe by year, sddp_stage, sddp_block, stage_hour
195
+ hour_block_map_df.sort_values(by=['year', 'sddp_stage', 'sddp_block', 'stage_hour'], inplace=True)
196
+
197
+ else:
198
+ raise ValueError("Total duration of blocks must be 100% or more.")
199
+ else:
200
+ # format HourBlockMap dataframe to have year, sddp_week, sddp_block columns
201
+ # its index datetime column is in the following format: 'YYYY/WW HHHh', where WW is the week number (1-52) and HHH is the hour of the week (1-168)
202
+ # for weekly cases. for monthly cases, it is 'YYYY/MM HHHh', where MM is the month number (1-12) and HHH is the hour of the month (1-744).
203
+ hour_block_map_df = hour_block_map_df.reset_index()
204
+ hour_block_map_df['year'] = hour_block_map_df['datetime'].str.slice(0, 4).astype(int)
205
+ hour_block_map_df['sddp_stage'] = hour_block_map_df['datetime'].str.slice(5, 7).astype(int)
206
+ hour_block_map_df['stage_hour'] = hour_block_map_df['datetime'].str.slice(8, -1).astype(int)
207
+ hour_block_map_df['sddp_block'] = ((hour_block_map_df['hour_of_week'] - 1) // 6) + 1
208
+ hour_block_map_df = hour_block_map_df.set_index('datetime')[['year', 'sddp_week', 'sddp_block']]
209
+ return hour_block_map_df
210
+
211
+
212
+ def remap_hourly_to_stage(hourly_df: pd.DataFrame, hour_block_map_df: pd.DataFrame, stage_type: int,
213
+ aggregation_method: str = 'mean') -> pd.DataFrame:
214
+ """
215
+ Strategy to Map hourly data into weekly/monthly data:
216
+ - Merge the hourly data dataframe with the Study's hour block map dataframe
217
+ - Aggregate by stage and/or by block using avg, sum, max, etc
218
+ """
219
+ # create indices before merging
220
+ if stage_type == 1:
221
+ # weekly stages
222
+ hourly_df = hourly_df.copy()
223
+
224
+ hourly_df['year'] = hourly_df.index.year
225
+ hourly_df['sddp_stage'] = 0
226
+ hourly_df['stage_hour'] = 0
227
+ for irow, (index, row) in enumerate(hourly_df.iterrows()):
228
+ stage_start_date = get_closest_sddp_stage_date(index.year, index.month, index.day, previous_date=True)
229
+ week = get_sddp_week(index.year, index.month, index.day)
230
+ hour_of_week = ((index - stage_start_date).days * 24) + index.hour + 1
231
+ hourly_df.at[row.name, 'sddp_stage'] = week
232
+ hourly_df.at[row.name, 'stage_hour'] = hour_of_week
233
+ elif stage_type == 2:
234
+ # monthly stages
235
+ hourly_df = hourly_df.copy()
236
+ hourly_df['year'] = hourly_df.index.year
237
+ hourly_df['sddp_stage'] = hourly_df.index.month
238
+ hourly_df['stage_hour'] = ((hourly_df.index.day - 1) * 24) + hourly_df.index.hour + 1
239
+ else:
240
+ raise ValueError("Unsupported stage type. Only weekly (1) and monthly (2) are supported.")
241
+ hourly_df = hourly_df.set_index('year,sddp_stage,stage_hour'.split(','))
242
+ hour_block_map_df = hour_block_map_df.set_index('year,sddp_stage,stage_hour'.split(','))
243
+ merged_df = pd.merge(hourly_df, hour_block_map_df, left_index=True, right_index=True, how='inner')
244
+
245
+ numeric_cols = hourly_df.select_dtypes(include=[np.number]).columns.tolist()
246
+ result = merged_df.groupby(['year', 'sddp_stage'])[numeric_cols].agg(aggregation_method).reset_index()
247
+ result.sort_values(by=['year', 'sddp_stage'], inplace=True)
248
+ result.set_index(['year', 'sddp_stage'], inplace=True)
249
+ return result
250
+
251
+
252
+ def remap_hourly_to_blocks(hourly_df: pd.DataFrame, hour_block_map_df: pd.DataFrame, stage_type: int,
253
+ aggregation_method: str = 'mean') -> pd.DataFrame:
254
+ """
255
+ Strategy to Map hourly data into weekly/by block data:
256
+ - Merge the hourly data dataframe with the Study's hour block map dataframe
257
+ - Aggregate by stage and/or by block using avg, sum, max, etc
258
+ """
259
+ # create indices before merging
260
+ if stage_type == 1:
261
+ # weekly stages
262
+ hourly_df = hourly_df.copy()
263
+
264
+ hourly_df['year'] = hourly_df.index.year
265
+ hourly_df['sddp_stage'] = 0
266
+ hourly_df['stage_hour'] = 0
267
+ for irow, (index, row) in enumerate(hourly_df.iterrows()):
268
+ stage_start_date = get_closest_sddp_stage_date(index.year, index.month, index.day, previous_date=True)
269
+ week = get_sddp_week(index.year, index.month, index.day)
270
+ hour_of_week = ((index - stage_start_date).days * 24) + index.hour + 1
271
+ hourly_df.at[row.name, 'sddp_stage'] = week
272
+ hourly_df.at[row.name, 'stage_hour'] = hour_of_week
273
+ elif stage_type == 2:
274
+ # monthly stages
275
+ hourly_df = hourly_df.copy()
276
+ hourly_df['year'] = hourly_df.index.year
277
+ hourly_df['sddp_stage'] = hourly_df.index.month
278
+ hourly_df['stage_hour'] = ((hourly_df.index.day - 1) * 24) + hourly_df.index.hour + 1
279
+ else:
280
+ raise ValueError("Unsupported stage type. Only weekly (1) and monthly (2) are supported.")
281
+ hourly_df = hourly_df.set_index('year,sddp_stage,stage_hour'.split(','))
282
+ hour_block_map_df = hour_block_map_df.set_index('year,sddp_stage,stage_hour'.split(','))
283
+ merged_df = pd.merge(hourly_df, hour_block_map_df, left_index=True, right_index=True, how='inner')
284
+
285
+ numeric_cols = hourly_df.select_dtypes(include=[np.number]).columns.tolist()
286
+ result = merged_df.groupby(['year', 'sddp_stage', 'sddp_block'])[numeric_cols].agg(aggregation_method).reset_index()
287
+ result.sort_values(by=['year', 'sddp_stage', 'sddp_block'], inplace=True)
288
+ result.set_index(['year', 'sddp_stage', 'sddp_block'], inplace=True)
289
+ return result
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: psr-factory
3
- Version: 5.0.0b35
3
+ Version: 5.0.0b37
4
4
  Summary: PSR database management module.
5
5
  Author-email: "PSR Inc." <psrfactory@psr-inc.com>
6
6
  License-Expression: MIT
@@ -28,11 +28,7 @@ Requires-Dist: pandas; extra == "pandas"
28
28
  Provides-Extra: polars
29
29
  Requires-Dist: polars; extra == "polars"
30
30
  Provides-Extra: cloud
31
- Requires-Dist: zeep; extra == "cloud"
32
- Requires-Dist: filelock; extra == "cloud"
33
- Requires-Dist: pefile; extra == "cloud"
34
- Requires-Dist: boto3; extra == "cloud"
35
- Requires-Dist: botocore; extra == "cloud"
31
+ Requires-Dist: psr-cloud; extra == "cloud"
36
32
  Provides-Extra: execqueue-client
37
33
  Requires-Dist: requests; extra == "execqueue-client"
38
34
  Provides-Extra: execqueue-server
@@ -41,19 +37,12 @@ Requires-Dist: Flask; extra == "execqueue-server"
41
37
  Requires-Dist: python-ulid; extra == "execqueue-server"
42
38
  Requires-Dist: sqlalchemy; extra == "execqueue-server"
43
39
  Requires-Dist: python-dotenv; extra == "execqueue-server"
44
- Requires-Dist: pefile; extra == "execqueue-server"
45
- Requires-Dist: zeep; extra == "execqueue-server"
46
- Requires-Dist: filelock; extra == "execqueue-server"
47
- Requires-Dist: requests; extra == "execqueue-server"
40
+ Requires-Dist: psr-cloud; extra == "execqueue-server"
48
41
  Provides-Extra: all
49
42
  Requires-Dist: pandas; extra == "all"
50
43
  Requires-Dist: polars; extra == "all"
51
44
  Requires-Dist: psutil; extra == "all"
52
- Requires-Dist: zeep; extra == "all"
53
- Requires-Dist: filelock; extra == "all"
54
- Requires-Dist: pefile; extra == "all"
55
- Requires-Dist: boto3; extra == "all"
56
- Requires-Dist: botocore; extra == "all"
45
+ Requires-Dist: psr-cloud; extra == "all"
57
46
  Dynamic: license-file
58
47
 
59
48
  PSR Factory (version 4.0.40)
@@ -16,9 +16,9 @@ psr/execqueue/config.py,sha256=rUOzO5dtTkwWoZlZfk06K9RE94xCx53T1bJ1h5JaDUo,1446
16
16
  psr/execqueue/db.py,sha256=sNr_StNEgZZQCKcyCWiB1WrQJIhE9UvLUxPA2tWiXGs,8498
17
17
  psr/execqueue/server.py,sha256=LolYERWRt96P_ip4yKU7DsN7M_n9d_pbflbT0ckUV0E,15782
18
18
  psr/execqueue/watcher.py,sha256=R1dyXJ-OYn_QjqdItBwbLJZQ2LcbtdHqnRaYkyphi4w,5637
19
- psr/factory/__init__.py,sha256=IMo21Cp22rUOCVUwDb5X1n0jUeYrZFWeVQo0VrJig9I,219
19
+ psr/factory/__init__.py,sha256=xVvV3CZT_Z-M0QRD9SbJVZqMd4Gpx4Fv7MpB7f9Ifek,219
20
20
  psr/factory/api.py,sha256=QASwrk5SbbAqz63u7EhGoBBqqXOqMnicjL-eJ3gOGe0,104270
21
- psr/factory/factory.dll,sha256=7VbGrK_2TYm3eNGEyxpn_cmWjpXQRv7MuBhkD2W8LlI,18362192
21
+ psr/factory/factory.dll,sha256=0P76qLc9iW4NpQ0_bTzGdQTZrAht2W-qkBtT6mbSdBk,18362192
22
22
  psr/factory/factory.pmd,sha256=kr5xf2knYu_SJeyCsmoyYVgFwd4-VURi28rno40GIRY,250936
23
23
  psr/factory/factory.pmk,sha256=OvpqDnaCc1eeOWGQxogD0Nbg9M0PE1UZPcD65PeePV8,580337
24
24
  psr/factory/factorylib.py,sha256=o5Irbw6k-yIOJVUtDu2YYqw2x16P2LmCdouImwSssdw,28290
@@ -27,14 +27,17 @@ psr/factory/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
27
  psr/factory/samples/__init__.py,sha256=xxOch5Fokzjy571a6OHD87FWM17qKgvfcbr8xn-n36I,80
28
28
  psr/factory/samples/sddp_case01.py,sha256=eLhtOAS2STl9-H7Nr5VUG4ATO0bVcn-CJtCn3Rf-vpI,5044
29
29
  psr/factory/samples/sddp_case21.py,sha256=Eymn3q0uyaGSmZaTr0Yk2q9yf1-enqdoUFXPwuQ1YOk,8807
30
+ psr/outputs/__init__.py,sha256=Yo-4vz3BMuMQJ-9DqhVx3up6jn2UB3TP9XWGcLrHDEI,192
31
+ psr/outputs/outputs.py,sha256=gzmKl9Ma6K9gjpTTBc45p-IT7DgEX-PzRqtsQSWfja0,6727
32
+ psr/outputs/resample.py,sha256=6fVBWOzxu3utkavGS5EIUd0QpSZm6kc-J1Qv8Bk0X6o,14276
30
33
  psr/psrfcommon/__init__.py,sha256=WXR560XQllIjtFpWd0jiJEbUAQIyh5-6lwj-42_J95c,200
31
34
  psr/psrfcommon/psrfcommon.py,sha256=NABM5ahvyfSizDC9c0Vu9dVK1pD_vOzIGFHL1oz2E1o,1464
32
35
  psr/psrfcommon/tempfile.py,sha256=5S13wa2DCLYTUdwbLm_KMBRnDRJ0WDlu8GO2BmZoNdg,3939
33
36
  psr/runner/__init__.py,sha256=kI9HDX-B_LMQJUHHylFHas2rNpWfNNa0pZXoIvX_Alw,230
34
37
  psr/runner/runner.py,sha256=hCVH62HAZK_M9YUiHQgqCkMevN17utegjfRIw49MdvM,27542
35
38
  psr/runner/version.py,sha256=mch2Y8anSXGMn9w72Z78PhSRhOyn55EwaoLAYhY4McE,194
36
- psr_factory-5.0.0b35.dist-info/licenses/LICENSE.txt,sha256=N6mqZK2Ft3iXGHj-by_MHC_dJo9qwn0URjakEPys3H4,1089
37
- psr_factory-5.0.0b35.dist-info/METADATA,sha256=JeHb8F1LzhFMM2uWRK8d0uQRdkkb40hjcvtBgJdacSo,3957
38
- psr_factory-5.0.0b35.dist-info/WHEEL,sha256=ZjXRCNaQ9YSypEK2TE0LRB0sy2OVXSszb4Sx1XjM99k,97
39
- psr_factory-5.0.0b35.dist-info/top_level.txt,sha256=Jb393O96WQk3b5D1gMcrZBLKJJgZpzNjTPoldUi00ck,4
40
- psr_factory-5.0.0b35.dist-info/RECORD,,
39
+ psr_factory-5.0.0b37.dist-info/licenses/LICENSE.txt,sha256=N6mqZK2Ft3iXGHj-by_MHC_dJo9qwn0URjakEPys3H4,1089
40
+ psr_factory-5.0.0b37.dist-info/METADATA,sha256=-H1CZp6wnWtOr2veC2PtGGR6aBBs20Tc00eP1iDYs5E,3486
41
+ psr_factory-5.0.0b37.dist-info/WHEEL,sha256=ZjXRCNaQ9YSypEK2TE0LRB0sy2OVXSszb4Sx1XjM99k,97
42
+ psr_factory-5.0.0b37.dist-info/top_level.txt,sha256=Jb393O96WQk3b5D1gMcrZBLKJJgZpzNjTPoldUi00ck,4
43
+ psr_factory-5.0.0b37.dist-info/RECORD,,