psr-factory 5.0.0b69__py3-none-manylinux_2_28_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of psr-factory might be problematic. Click here for more details.
- psr/apps/__init__.py +7 -0
- psr/apps/apps.py +225 -0
- psr/apps/version.py +5 -0
- psr/execqueue/client.py +126 -0
- psr/execqueue/config.py +52 -0
- psr/execqueue/db.py +286 -0
- psr/execqueue/server.py +689 -0
- psr/execqueue/watcher.py +146 -0
- psr/factory/__init__.py +7 -0
- psr/factory/api.py +2745 -0
- psr/factory/factory.pmd +7322 -0
- psr/factory/factory.pmk +19461 -0
- psr/factory/factorylib.py +410 -0
- psr/factory/libfactory.so +0 -0
- psr/factory/py.typed +0 -0
- psr/factory/samples/__init__.py +2 -0
- psr/factory/samples/sddp_case01.py +166 -0
- psr/factory/samples/sddp_case21.py +242 -0
- psr/outputs/__init__.py +5 -0
- psr/outputs/outputs.py +179 -0
- psr/outputs/resample.py +289 -0
- psr/psrfcommon/__init__.py +6 -0
- psr/psrfcommon/psrfcommon.py +57 -0
- psr/psrfcommon/tempfile.py +118 -0
- psr/runner/__init__.py +7 -0
- psr/runner/runner.py +743 -0
- psr/runner/version.py +5 -0
- psr_factory-5.0.0b69.dist-info/METADATA +47 -0
- psr_factory-5.0.0b69.dist-info/RECORD +32 -0
- psr_factory-5.0.0b69.dist-info/WHEEL +5 -0
- psr_factory-5.0.0b69.dist-info/licenses/LICENSE.txt +21 -0
- psr_factory-5.0.0b69.dist-info/top_level.txt +1 -0
psr/outputs/resample.py
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
import datetime as dt
|
|
2
|
+
from typing import List, Dict, Optional, Tuple
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
import pandas
|
|
6
|
+
import pandas as pd
|
|
7
|
+
import psr.factory
|
|
8
|
+
|
|
9
|
+
HOURS_PER_DAY = 24
|
|
10
|
+
DAYS_PER_WEEK = 7
|
|
11
|
+
HOURS_PER_WEEK = HOURS_PER_DAY * DAYS_PER_WEEK # 168 hours
|
|
12
|
+
WEEKS_PER_YEAR = 52
|
|
13
|
+
AR_STAGE_SAMPLES = 6
|
|
14
|
+
|
|
15
|
+
_number_of_days_per_month = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
|
|
16
|
+
|
|
17
|
+
_g_week_start_date_by_year: Dict[int, List[dt.datetime]] = {}
|
|
18
|
+
|
|
19
|
+
_week_max_hours = 7 * 24
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def get_sddp_stages_by_year(year: int) -> List[dt.datetime]:
|
|
23
|
+
global _g_week_start_date_by_year
|
|
24
|
+
if year not in _g_week_start_date_by_year:
|
|
25
|
+
_g_week_start_date_by_year[year] = [
|
|
26
|
+
dt.datetime(year, 1, 1),
|
|
27
|
+
dt.datetime(year, 1, 8),
|
|
28
|
+
dt.datetime(year, 1, 15),
|
|
29
|
+
dt.datetime(year, 1, 22),
|
|
30
|
+
dt.datetime(year, 1, 29),
|
|
31
|
+
dt.datetime(year, 2, 5),
|
|
32
|
+
dt.datetime(year, 2, 12),
|
|
33
|
+
dt.datetime(year, 2, 19),
|
|
34
|
+
dt.datetime(year, 2, 26),
|
|
35
|
+
dt.datetime(year, 3, 5),
|
|
36
|
+
dt.datetime(year, 3, 12),
|
|
37
|
+
dt.datetime(year, 3, 19),
|
|
38
|
+
dt.datetime(year, 3, 26),
|
|
39
|
+
dt.datetime(year, 4, 2),
|
|
40
|
+
dt.datetime(year, 4, 9),
|
|
41
|
+
dt.datetime(year, 4, 16),
|
|
42
|
+
dt.datetime(year, 4, 23),
|
|
43
|
+
dt.datetime(year, 4, 30),
|
|
44
|
+
dt.datetime(year, 5, 7),
|
|
45
|
+
dt.datetime(year, 5, 14),
|
|
46
|
+
dt.datetime(year, 5, 21),
|
|
47
|
+
dt.datetime(year, 5, 28),
|
|
48
|
+
dt.datetime(year, 6, 4),
|
|
49
|
+
dt.datetime(year, 6, 11),
|
|
50
|
+
dt.datetime(year, 6, 18),
|
|
51
|
+
dt.datetime(year, 6, 25),
|
|
52
|
+
dt.datetime(year, 7, 2),
|
|
53
|
+
dt.datetime(year, 7, 9),
|
|
54
|
+
dt.datetime(year, 7, 16),
|
|
55
|
+
dt.datetime(year, 7, 23),
|
|
56
|
+
dt.datetime(year, 7, 30),
|
|
57
|
+
dt.datetime(year, 8, 6),
|
|
58
|
+
dt.datetime(year, 8, 13),
|
|
59
|
+
dt.datetime(year, 8, 20),
|
|
60
|
+
dt.datetime(year, 8, 27),
|
|
61
|
+
dt.datetime(year, 9, 3),
|
|
62
|
+
dt.datetime(year, 9, 10),
|
|
63
|
+
dt.datetime(year, 9, 17),
|
|
64
|
+
dt.datetime(year, 9, 24),
|
|
65
|
+
dt.datetime(year, 10, 1),
|
|
66
|
+
dt.datetime(year, 10, 8),
|
|
67
|
+
dt.datetime(year, 10, 15),
|
|
68
|
+
dt.datetime(year, 10, 22),
|
|
69
|
+
dt.datetime(year, 10, 29),
|
|
70
|
+
dt.datetime(year, 11, 5),
|
|
71
|
+
dt.datetime(year, 11, 12),
|
|
72
|
+
dt.datetime(year, 11, 19),
|
|
73
|
+
dt.datetime(year, 11, 26),
|
|
74
|
+
dt.datetime(year, 12, 3),
|
|
75
|
+
dt.datetime(year, 12, 10),
|
|
76
|
+
dt.datetime(year, 12, 17),
|
|
77
|
+
dt.datetime(year, 12, 24)
|
|
78
|
+
]
|
|
79
|
+
return _g_week_start_date_by_year[year]
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def get_closest_sddp_stage_date(y: int, m: int, d: int, previous_date: bool = True) -> Optional[dt.datetime]:
|
|
83
|
+
"""Get the closest SDDP stage date for a given year, month, and day."""
|
|
84
|
+
dates = get_sddp_stages_by_year(y)
|
|
85
|
+
sdat = dt.datetime(y, m, d)
|
|
86
|
+
offset = 0 if previous_date else + 1
|
|
87
|
+
last_date = dates[-1]
|
|
88
|
+
if previous_date and sdat >= last_date:
|
|
89
|
+
return last_date
|
|
90
|
+
elif not previous_date and sdat >= last_date:
|
|
91
|
+
dates = get_sddp_stages_by_year(y + 1)
|
|
92
|
+
return dates[0]
|
|
93
|
+
else:
|
|
94
|
+
for index in range(len(dates)-1):
|
|
95
|
+
if dates[index] <= sdat < dates[index+1]:
|
|
96
|
+
return dates[index + offset]
|
|
97
|
+
return None
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def get_sddp_week(y: int, m: int, d: int) -> int:
|
|
101
|
+
dates = get_sddp_stages_by_year(y)
|
|
102
|
+
sdat = dt.datetime(y, m, d)
|
|
103
|
+
if dates[-1] <= sdat <= dt.datetime(y, 12, 31):
|
|
104
|
+
return WEEKS_PER_YEAR
|
|
105
|
+
else:
|
|
106
|
+
for index in range(len(dates)-1):
|
|
107
|
+
if dates[index] <= sdat < dates[index+1]:
|
|
108
|
+
return index + 1
|
|
109
|
+
return -1
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def get_sddp_start_date_and_stage(year, month, day) -> Tuple[dt.datetime, int]:
|
|
113
|
+
sddp_date = get_closest_sddp_stage_date(year, month, day, previous_date=True)
|
|
114
|
+
sddp_week = get_sddp_week(sddp_date.year, sddp_date.month, sddp_date.day)
|
|
115
|
+
return sddp_date, sddp_week
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def get_hour_block_map_from_study(study: psr.factory.Study) -> pandas.DataFrame:
|
|
119
|
+
"""
|
|
120
|
+
Extract the HourBlockMap from the study and return as a DataFrame.
|
|
121
|
+
"""
|
|
122
|
+
# FixedDurationOfBlocks(block)
|
|
123
|
+
stage_type = study.get("StageType")
|
|
124
|
+
hour_block_map_df = study.get_df("HourBlockMap")
|
|
125
|
+
|
|
126
|
+
if hour_block_map_df.empty:
|
|
127
|
+
initial_year = study.get("InitialYear")
|
|
128
|
+
end_year = initial_year + 10
|
|
129
|
+
total_blocks = study.get("NumberOfBlocks")
|
|
130
|
+
block_duration = {}
|
|
131
|
+
total_duration = 0
|
|
132
|
+
for block in range(1, total_blocks + 1):
|
|
133
|
+
# block duration is a percentage of total hours in a stage
|
|
134
|
+
block_duration[block] = study.get(f"FixedDurationOfBlocks({block})")
|
|
135
|
+
total_duration += block_duration[block]
|
|
136
|
+
|
|
137
|
+
if total_duration > 99.9:
|
|
138
|
+
# Group stage hours on blocks based on their relative durations to the total number of hours (total_duration)
|
|
139
|
+
if stage_type == 1:
|
|
140
|
+
total_duration = _week_max_hours
|
|
141
|
+
# weekly stages, fixed stage duration
|
|
142
|
+
mapping_data = []
|
|
143
|
+
for year in range(initial_year, end_year):
|
|
144
|
+
start_date = pandas.Timestamp(f"{year}-01-01")
|
|
145
|
+
for week in range(1, 53):
|
|
146
|
+
accumulated_hours = total_duration
|
|
147
|
+
for block in range(total_blocks, 0, -1):
|
|
148
|
+
current_duration = int(block_duration[block] * total_duration // 100)
|
|
149
|
+
if block != 1:
|
|
150
|
+
current_hours = accumulated_hours - current_duration
|
|
151
|
+
else:
|
|
152
|
+
current_hours = 0
|
|
153
|
+
for hour in range(current_hours, accumulated_hours):
|
|
154
|
+
datetime_point = start_date + pandas.Timedelta(weeks=week - 1, hours=hour)
|
|
155
|
+
formatted_datetime = f"{datetime_point.year}/{week:02d} {hour + 1}h"
|
|
156
|
+
mapping_data.append({
|
|
157
|
+
'datetime': formatted_datetime,
|
|
158
|
+
'year': datetime_point.year,
|
|
159
|
+
'sddp_stage': week,
|
|
160
|
+
'sddp_block': block,
|
|
161
|
+
'stage_hour': hour + 1
|
|
162
|
+
})
|
|
163
|
+
accumulated_hours -= current_duration
|
|
164
|
+
hour_block_map_df = pandas.DataFrame(mapping_data).set_index('datetime')
|
|
165
|
+
# sort dataframe by year, sddp_stage, sddp_block, stage_hour
|
|
166
|
+
hour_block_map_df.sort_values(by=['year', 'sddp_stage', 'sddp_block', 'stage_hour'], inplace=True)
|
|
167
|
+
elif stage_type == 2:
|
|
168
|
+
# monthly stages, variable stage duration
|
|
169
|
+
mapping_data = []
|
|
170
|
+
for year in range(initial_year, end_year):
|
|
171
|
+
for month in range(1, 13):
|
|
172
|
+
start_date = pandas.Timestamp(f"{year}-{month:02d}-01")
|
|
173
|
+
days_in_month = _number_of_days_per_month[month]
|
|
174
|
+
total_duration = days_in_month * HOURS_PER_DAY
|
|
175
|
+
accumulated_hours = total_duration
|
|
176
|
+
for block in range(total_blocks, 0, -1):
|
|
177
|
+
current_duration = int(block_duration[block] * total_duration // 100)
|
|
178
|
+
if block != 1:
|
|
179
|
+
current_hours = accumulated_hours - current_duration
|
|
180
|
+
else:
|
|
181
|
+
current_hours = 0
|
|
182
|
+
for hour in range(current_hours, accumulated_hours):
|
|
183
|
+
datetime_point = start_date + pandas.Timedelta(hours=hour)
|
|
184
|
+
formatted_datetime = f"{datetime_point.year}/{datetime_point.month:02d} {hour + 1}h"
|
|
185
|
+
mapping_data.append({
|
|
186
|
+
'datetime': formatted_datetime,
|
|
187
|
+
'year': datetime_point.year,
|
|
188
|
+
'sddp_stage': month,
|
|
189
|
+
'sddp_block': block,
|
|
190
|
+
'stage_hour': hour + 1
|
|
191
|
+
})
|
|
192
|
+
accumulated_hours -= current_duration
|
|
193
|
+
hour_block_map_df = pandas.DataFrame(mapping_data).set_index('datetime')
|
|
194
|
+
# sort dataframe by year, sddp_stage, sddp_block, stage_hour
|
|
195
|
+
hour_block_map_df.sort_values(by=['year', 'sddp_stage', 'sddp_block', 'stage_hour'], inplace=True)
|
|
196
|
+
|
|
197
|
+
else:
|
|
198
|
+
raise ValueError("Total duration of blocks must be 100% or more.")
|
|
199
|
+
else:
|
|
200
|
+
# format HourBlockMap dataframe to have year, sddp_week, sddp_block columns
|
|
201
|
+
# its index datetime column is in the following format: 'YYYY/WW HHHh', where WW is the week number (1-52) and HHH is the hour of the week (1-168)
|
|
202
|
+
# for weekly cases. for monthly cases, it is 'YYYY/MM HHHh', where MM is the month number (1-12) and HHH is the hour of the month (1-744).
|
|
203
|
+
hour_block_map_df = hour_block_map_df.reset_index()
|
|
204
|
+
hour_block_map_df['year'] = hour_block_map_df['datetime'].str.slice(0, 4).astype(int)
|
|
205
|
+
hour_block_map_df['sddp_stage'] = hour_block_map_df['datetime'].str.slice(5, 7).astype(int)
|
|
206
|
+
hour_block_map_df['stage_hour'] = hour_block_map_df['datetime'].str.slice(8, -1).astype(int)
|
|
207
|
+
hour_block_map_df['sddp_block'] = ((hour_block_map_df['hour_of_week'] - 1) // 6) + 1
|
|
208
|
+
hour_block_map_df = hour_block_map_df.set_index('datetime')[['year', 'sddp_week', 'sddp_block']]
|
|
209
|
+
return hour_block_map_df
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def remap_hourly_to_stage(hourly_df: pd.DataFrame, hour_block_map_df: pd.DataFrame, stage_type: int,
|
|
213
|
+
aggregation_method: str = 'mean') -> pd.DataFrame:
|
|
214
|
+
"""
|
|
215
|
+
Strategy to Map hourly data into weekly/monthly data:
|
|
216
|
+
- Merge the hourly data dataframe with the Study's hour block map dataframe
|
|
217
|
+
- Aggregate by stage and/or by block using avg, sum, max, etc
|
|
218
|
+
"""
|
|
219
|
+
# create indices before merging
|
|
220
|
+
if stage_type == 1:
|
|
221
|
+
# weekly stages
|
|
222
|
+
hourly_df = hourly_df.copy()
|
|
223
|
+
|
|
224
|
+
hourly_df['year'] = hourly_df.index.year
|
|
225
|
+
hourly_df['sddp_stage'] = 0
|
|
226
|
+
hourly_df['stage_hour'] = 0
|
|
227
|
+
for irow, (index, row) in enumerate(hourly_df.iterrows()):
|
|
228
|
+
stage_start_date = get_closest_sddp_stage_date(index.year, index.month, index.day, previous_date=True)
|
|
229
|
+
week = get_sddp_week(index.year, index.month, index.day)
|
|
230
|
+
hour_of_week = ((index - stage_start_date).days * 24) + index.hour + 1
|
|
231
|
+
hourly_df.at[row.name, 'sddp_stage'] = week
|
|
232
|
+
hourly_df.at[row.name, 'stage_hour'] = hour_of_week
|
|
233
|
+
elif stage_type == 2:
|
|
234
|
+
# monthly stages
|
|
235
|
+
hourly_df = hourly_df.copy()
|
|
236
|
+
hourly_df['year'] = hourly_df.index.year
|
|
237
|
+
hourly_df['sddp_stage'] = hourly_df.index.month
|
|
238
|
+
hourly_df['stage_hour'] = ((hourly_df.index.day - 1) * 24) + hourly_df.index.hour + 1
|
|
239
|
+
else:
|
|
240
|
+
raise ValueError("Unsupported stage type. Only weekly (1) and monthly (2) are supported.")
|
|
241
|
+
hourly_df = hourly_df.set_index('year,sddp_stage,stage_hour'.split(','))
|
|
242
|
+
hour_block_map_df = hour_block_map_df.set_index('year,sddp_stage,stage_hour'.split(','))
|
|
243
|
+
merged_df = pd.merge(hourly_df, hour_block_map_df, left_index=True, right_index=True, how='inner')
|
|
244
|
+
|
|
245
|
+
numeric_cols = hourly_df.select_dtypes(include=[np.number]).columns.tolist()
|
|
246
|
+
result = merged_df.groupby(['year', 'sddp_stage'])[numeric_cols].agg(aggregation_method).reset_index()
|
|
247
|
+
result.sort_values(by=['year', 'sddp_stage'], inplace=True)
|
|
248
|
+
result.set_index(['year', 'sddp_stage'], inplace=True)
|
|
249
|
+
return result
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def remap_hourly_to_blocks(hourly_df: pd.DataFrame, hour_block_map_df: pd.DataFrame, stage_type: int,
|
|
253
|
+
aggregation_method: str = 'mean') -> pd.DataFrame:
|
|
254
|
+
"""
|
|
255
|
+
Strategy to Map hourly data into weekly/by block data:
|
|
256
|
+
- Merge the hourly data dataframe with the Study's hour block map dataframe
|
|
257
|
+
- Aggregate by stage and/or by block using avg, sum, max, etc
|
|
258
|
+
"""
|
|
259
|
+
# create indices before merging
|
|
260
|
+
if stage_type == 1:
|
|
261
|
+
# weekly stages
|
|
262
|
+
hourly_df = hourly_df.copy()
|
|
263
|
+
|
|
264
|
+
hourly_df['year'] = hourly_df.index.year
|
|
265
|
+
hourly_df['sddp_stage'] = 0
|
|
266
|
+
hourly_df['stage_hour'] = 0
|
|
267
|
+
for irow, (index, row) in enumerate(hourly_df.iterrows()):
|
|
268
|
+
stage_start_date = get_closest_sddp_stage_date(index.year, index.month, index.day, previous_date=True)
|
|
269
|
+
week = get_sddp_week(index.year, index.month, index.day)
|
|
270
|
+
hour_of_week = ((index - stage_start_date).days * 24) + index.hour + 1
|
|
271
|
+
hourly_df.at[row.name, 'sddp_stage'] = week
|
|
272
|
+
hourly_df.at[row.name, 'stage_hour'] = hour_of_week
|
|
273
|
+
elif stage_type == 2:
|
|
274
|
+
# monthly stages
|
|
275
|
+
hourly_df = hourly_df.copy()
|
|
276
|
+
hourly_df['year'] = hourly_df.index.year
|
|
277
|
+
hourly_df['sddp_stage'] = hourly_df.index.month
|
|
278
|
+
hourly_df['stage_hour'] = ((hourly_df.index.day - 1) * 24) + hourly_df.index.hour + 1
|
|
279
|
+
else:
|
|
280
|
+
raise ValueError("Unsupported stage type. Only weekly (1) and monthly (2) are supported.")
|
|
281
|
+
hourly_df = hourly_df.set_index('year,sddp_stage,stage_hour'.split(','))
|
|
282
|
+
hour_block_map_df = hour_block_map_df.set_index('year,sddp_stage,stage_hour'.split(','))
|
|
283
|
+
merged_df = pd.merge(hourly_df, hour_block_map_df, left_index=True, right_index=True, how='inner')
|
|
284
|
+
|
|
285
|
+
numeric_cols = hourly_df.select_dtypes(include=[np.number]).columns.tolist()
|
|
286
|
+
result = merged_df.groupby(['year', 'sddp_stage', 'sddp_block'])[numeric_cols].agg(aggregation_method).reset_index()
|
|
287
|
+
result.sort_values(by=['year', 'sddp_stage', 'sddp_block'], inplace=True)
|
|
288
|
+
result.set_index(['year', 'sddp_stage', 'sddp_block'], inplace=True)
|
|
289
|
+
return result
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
# PSR Factory. Copyright (C) PSR, Inc - All Rights Reserved
|
|
2
|
+
# Unauthorized copying of this file, via any medium is strictly prohibited
|
|
3
|
+
# Proprietary and confidential
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import pathlib
|
|
7
|
+
import subprocess
|
|
8
|
+
import sys
|
|
9
|
+
from contextlib import contextmanager
|
|
10
|
+
from typing import Union, List
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@contextmanager
|
|
14
|
+
def change_cwd(new_dir: Union[str, pathlib.Path]):
|
|
15
|
+
last_dir = os.getcwd()
|
|
16
|
+
os.chdir(new_dir)
|
|
17
|
+
try:
|
|
18
|
+
yield
|
|
19
|
+
finally:
|
|
20
|
+
os.chdir(last_dir)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def exec_cmd(cmd: Union[str, List[str]], **kwargs) -> int:
|
|
24
|
+
dry_run = kwargs.get("dry_run", False)
|
|
25
|
+
print_progress = kwargs.get("show_progress", False)
|
|
26
|
+
env = kwargs.get("env", {})
|
|
27
|
+
proc_env = os.environ.copy()
|
|
28
|
+
proc_env.update(env)
|
|
29
|
+
|
|
30
|
+
if print_progress or dry_run:
|
|
31
|
+
sys.stdout.flush()
|
|
32
|
+
|
|
33
|
+
if dry_run:
|
|
34
|
+
if isinstance(cmd, list):
|
|
35
|
+
print(" ".join(cmd))
|
|
36
|
+
else:
|
|
37
|
+
print(cmd)
|
|
38
|
+
return_code = 0
|
|
39
|
+
else:
|
|
40
|
+
try:
|
|
41
|
+
return_code = subprocess.call(cmd, shell=True, env=proc_env)
|
|
42
|
+
if return_code > 0:
|
|
43
|
+
raise RuntimeError(f"Execution error, code {return_code}")
|
|
44
|
+
else:
|
|
45
|
+
if print_progress:
|
|
46
|
+
print("Execution success", return_code)
|
|
47
|
+
except OSError as e:
|
|
48
|
+
msg = f"Execution failed: {e}"
|
|
49
|
+
if print_progress:
|
|
50
|
+
print(msg, file=sys.stderr)
|
|
51
|
+
raise RuntimeError(msg)
|
|
52
|
+
|
|
53
|
+
if print_progress or dry_run:
|
|
54
|
+
sys.stdout.flush()
|
|
55
|
+
return return_code
|
|
56
|
+
|
|
57
|
+
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
# PSR Factory. Copyright (C) PSR, Inc - All Rights Reserved
|
|
2
|
+
# Unauthorized copying of this file, via any medium is strictly prohibited
|
|
3
|
+
# Proprietary and confidential
|
|
4
|
+
|
|
5
|
+
import errno
|
|
6
|
+
import io
|
|
7
|
+
import os
|
|
8
|
+
from random import Random
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class _RandomNameSequence:
|
|
12
|
+
"""An instance of _RandomNameSequence generates an endless
|
|
13
|
+
sequence of unpredictable strings which can safely be incorporated
|
|
14
|
+
into file names. Each string is eight characters long. Multiple
|
|
15
|
+
threads can safely use the same instance at the same time.
|
|
16
|
+
|
|
17
|
+
_RandomNameSequence is an iterator."""
|
|
18
|
+
|
|
19
|
+
# Method extracted from tempfile Python's module.
|
|
20
|
+
|
|
21
|
+
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
|
|
22
|
+
|
|
23
|
+
@property
|
|
24
|
+
def rng(self):
|
|
25
|
+
cur_pid = os.getpid()
|
|
26
|
+
if cur_pid != getattr(self, "_rng_pid", None):
|
|
27
|
+
self._rng = Random() # nosec
|
|
28
|
+
self._rng_pid = cur_pid
|
|
29
|
+
return self._rng
|
|
30
|
+
|
|
31
|
+
def __iter__(self):
|
|
32
|
+
return self
|
|
33
|
+
|
|
34
|
+
def __next__(self):
|
|
35
|
+
c = self.characters
|
|
36
|
+
choose = self.rng.choice
|
|
37
|
+
letters = [choose(c) for dummy in range(8)]
|
|
38
|
+
return "".join(letters)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _get_tempfile_name(base_path: str, prefix: str):
|
|
42
|
+
"""Calculate the default directory to use for temporary files.
|
|
43
|
+
This routine should be called exactly once.
|
|
44
|
+
|
|
45
|
+
We determine whether a candidate temp dir is usable by
|
|
46
|
+
trying to create and write to a file in that directory. If this
|
|
47
|
+
is successful, the test file is deleted. To prevent denial of
|
|
48
|
+
service, the name of the test file must be randomized."""
|
|
49
|
+
# Method extracted from tempfile Python's module.
|
|
50
|
+
|
|
51
|
+
_text_openflags = os.O_RDWR | os.O_CREAT | os.O_EXCL
|
|
52
|
+
if hasattr(os, "O_NOFOLLOW"):
|
|
53
|
+
_text_openflags |= os.O_NOFOLLOW
|
|
54
|
+
|
|
55
|
+
_bin_openflags = _text_openflags
|
|
56
|
+
if hasattr(os, "O_BINARY"):
|
|
57
|
+
_bin_openflags |= os.O_BINARY
|
|
58
|
+
|
|
59
|
+
namer = _RandomNameSequence()
|
|
60
|
+
|
|
61
|
+
if base_path != os.curdir:
|
|
62
|
+
base_path = os.path.abspath(base_path)
|
|
63
|
+
# Try only a few names per directory.
|
|
64
|
+
for seq in range(100):
|
|
65
|
+
name = next(namer)
|
|
66
|
+
filename = os.path.join(base_path, prefix + name)
|
|
67
|
+
try:
|
|
68
|
+
fd = os.open(filename, _bin_openflags, 0o600)
|
|
69
|
+
try:
|
|
70
|
+
try:
|
|
71
|
+
with io.open(fd, "wb", closefd=False) as fp:
|
|
72
|
+
fp.write(b"blat")
|
|
73
|
+
finally:
|
|
74
|
+
os.close(fd)
|
|
75
|
+
finally:
|
|
76
|
+
os.unlink(filename)
|
|
77
|
+
return filename
|
|
78
|
+
except FileExistsError:
|
|
79
|
+
pass
|
|
80
|
+
except PermissionError:
|
|
81
|
+
# This exception is thrown when a directory with the chosen name
|
|
82
|
+
# already exists on windows.
|
|
83
|
+
if (
|
|
84
|
+
os.name == "nt"
|
|
85
|
+
and os.path.isdir(base_path)
|
|
86
|
+
and os.access(base_path, os.W_OK)
|
|
87
|
+
):
|
|
88
|
+
continue
|
|
89
|
+
break # no point trying more names in this directory
|
|
90
|
+
except OSError:
|
|
91
|
+
break # no point trying more names in this directory
|
|
92
|
+
raise FileNotFoundError(
|
|
93
|
+
errno.ENOENT, "No usable temporary file found in " % base_path
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class CreateTempFile:
|
|
98
|
+
def __init__(
|
|
99
|
+
self,
|
|
100
|
+
base_path: str,
|
|
101
|
+
prefix: str,
|
|
102
|
+
file_content: str,
|
|
103
|
+
extension: str = ".dat",
|
|
104
|
+
delete_tempfile: bool = True,
|
|
105
|
+
):
|
|
106
|
+
self.delete_tempfile = delete_tempfile
|
|
107
|
+
# get temp file name
|
|
108
|
+
self.temp_file_name = _get_tempfile_name(base_path, prefix) + extension
|
|
109
|
+
self.temp_content = file_content
|
|
110
|
+
|
|
111
|
+
def __enter__(self):
|
|
112
|
+
with open(self.temp_file_name, "w", encoding="utf-8-sig") as tempfile:
|
|
113
|
+
tempfile.write(self.temp_content)
|
|
114
|
+
return tempfile
|
|
115
|
+
|
|
116
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
117
|
+
if self.delete_tempfile:
|
|
118
|
+
os.remove(self.temp_file_name)
|