openstef 3.4.61__py3-none-any.whl → 3.4.63__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openstef/feature_engineering/weather_features.py +0 -1
- openstef/metrics/metrics.py +7 -4
- openstef/model/confidence_interval_applicator.py +1 -1
- openstef/model/fallback.py +2 -4
- openstef/model/metamodels/missing_values_handler.py +2 -2
- openstef/model/regressors/custom_regressor.py +2 -2
- openstef/model/regressors/gblinear_quantile.py +1 -1
- openstef/model/serializer.py +2 -3
- openstef/model/standard_deviation_generator.py +1 -1
- openstef/model_selection/model_selection.py +3 -1
- openstef/postprocessing/postprocessing.py +0 -5
- openstef/tasks/calculate_kpi.py +5 -5
- openstef/tasks/create_basecase_forecast.py +4 -4
- openstef/tasks/create_components_forecast.py +4 -6
- openstef/tasks/create_forecast.py +3 -3
- openstef/tasks/create_solar_forecast.py +2 -3
- openstef/tasks/optimize_hyperparameters.py +3 -3
- openstef/tasks/split_forecast.py +3 -4
- openstef/tasks/train_model.py +5 -5
- openstef/validation/validation.py +7 -6
- {openstef-3.4.61.dist-info → openstef-3.4.63.dist-info}/METADATA +1 -1
- {openstef-3.4.61.dist-info → openstef-3.4.63.dist-info}/RECORD +25 -25
- {openstef-3.4.61.dist-info → openstef-3.4.63.dist-info}/LICENSE +0 -0
- {openstef-3.4.61.dist-info → openstef-3.4.63.dist-info}/WHEEL +0 -0
- {openstef-3.4.61.dist-info → openstef-3.4.63.dist-info}/top_level.txt +0 -0
@@ -397,7 +397,6 @@ def calculate_dni(radiation: pd.Series, pj: PredictionJobDataClass) -> pd.Series
|
|
397
397
|
solar_zenith = solpos.apparent_zenith
|
398
398
|
|
399
399
|
# convert radiation (ghi) to right unit (J/m^2 to kWh/m^2)
|
400
|
-
# TODO: check whether unit conversion is necessary
|
401
400
|
ghi_forecasted = radiation / 3600
|
402
401
|
# convert ghi to dni
|
403
402
|
dni_converted = pvlib.irradiance.dni(
|
openstef/metrics/metrics.py
CHANGED
@@ -9,7 +9,7 @@
|
|
9
9
|
#
|
10
10
|
# SPDX-License-Identifier: MIT
|
11
11
|
"""This module contains all metrics to assess forecast quality."""
|
12
|
-
from typing import Callable
|
12
|
+
from typing import Callable, Optional, Tuple
|
13
13
|
|
14
14
|
import numpy as np
|
15
15
|
import pandas as pd
|
@@ -299,12 +299,15 @@ def skill_score_positive_peaks(
|
|
299
299
|
|
300
300
|
|
301
301
|
def franks_skill_score(
|
302
|
-
realised: pd.Series,
|
302
|
+
realised: pd.Series,
|
303
|
+
forecast: pd.Series,
|
304
|
+
basecase: pd.Series,
|
305
|
+
range_: Optional[float] = None,
|
303
306
|
) -> float:
|
304
307
|
"""Calculate Franks skill score."""
|
305
308
|
# Combine series in one DataFrame
|
306
309
|
combined = pd.concat([realised, forecast], axis=1)
|
307
|
-
if range_
|
310
|
+
if not range_:
|
308
311
|
range_ = (
|
309
312
|
combined[realised.name].max() - combined[realised.name].min()
|
310
313
|
if (combined[realised.name].max() - combined[realised.name].min()) != 0
|
@@ -360,7 +363,7 @@ def franks_skill_score_peaks(
|
|
360
363
|
|
361
364
|
def xgb_quantile_eval(
|
362
365
|
preds: np.ndarray, dmatrix: xgboost.DMatrix, quantile: float = 0.2
|
363
|
-
) ->
|
366
|
+
) -> Tuple:
|
364
367
|
"""Customized evaluational metric that equals to quantile regression loss (also known as pinball loss).
|
365
368
|
|
366
369
|
Quantile regression is regression that estimates a specified quantile of target's distribution conditional on given features.
|
@@ -137,7 +137,7 @@ class ConfidenceIntervalApplicator:
|
|
137
137
|
# Determine now, rounded on 15 minutes,
|
138
138
|
# Rounding helps to prevent fractional t_aheads
|
139
139
|
now = (
|
140
|
-
pd.Series(datetime.
|
140
|
+
pd.Series(datetime.now(tz=forecast_copy.index.tzinfo))
|
141
141
|
.min()
|
142
142
|
.round(f"{minimal_resolution}T")
|
143
143
|
.to_pydatetime()
|
openstef/model/fallback.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
# SPDX-FileCopyrightText: 2017-2023 Contributors to the OpenSTEF project <korte.termijn.prognoses@alliander.com> # noqa E501>
|
2
2
|
#
|
3
3
|
# SPDX-License-Identifier: MPL-2.0
|
4
|
-
from datetime import datetime
|
4
|
+
from datetime import datetime, UTC
|
5
5
|
|
6
6
|
import pandas as pd
|
7
7
|
|
@@ -43,9 +43,7 @@ def generate_fallback(
|
|
43
43
|
|
44
44
|
# Find most extreme historic day (do not count today as it is incomplete)
|
45
45
|
day_with_highest_load_date = (
|
46
|
-
load[load.index
|
47
|
-
.idxmax()
|
48
|
-
.load.date()
|
46
|
+
load[load.index < datetime.now(tz=UTC)].idxmax().load.date()
|
49
47
|
)
|
50
48
|
# generate datetime range of the day with the highest load
|
51
49
|
from_datetime = pd.Timestamp(day_with_highest_load_date, tz=load.index.tz)
|
@@ -90,7 +90,7 @@ class MissingValuesHandler(BaseEstimator, RegressorMixin, MetaEstimatorMixin):
|
|
90
90
|
def fit(self, x, y):
|
91
91
|
"""Fit model."""
|
92
92
|
_, y = check_X_y(x, y, force_all_finite="allow-nan", y_numeric=True)
|
93
|
-
if
|
93
|
+
if not isinstance(x, pd.DataFrame):
|
94
94
|
x = pd.DataFrame(np.asarray(x))
|
95
95
|
self.feature_in_names_ = list(x.columns)
|
96
96
|
self.n_features_in_ = x.shape[1]
|
@@ -133,6 +133,6 @@ class MissingValuesHandler(BaseEstimator, RegressorMixin, MetaEstimatorMixin):
|
|
133
133
|
x,
|
134
134
|
force_all_finite="allow-nan",
|
135
135
|
)
|
136
|
-
if
|
136
|
+
if not isinstance(x, pd.DataFrame):
|
137
137
|
x = pd.DataFrame(np.array(x))
|
138
138
|
return self.pipeline_.predict(x[self.non_null_columns_])
|
@@ -26,9 +26,9 @@ class CustomOpenstfRegressor(OpenstfRegressor):
|
|
26
26
|
def valid_kwargs() -> list[str]:
|
27
27
|
...
|
28
28
|
|
29
|
-
@
|
29
|
+
@staticmethod
|
30
30
|
@abstractmethod
|
31
|
-
def objective(
|
31
|
+
def objective() -> Type[RegressorObjective]:
|
32
32
|
...
|
33
33
|
|
34
34
|
|
@@ -306,7 +306,7 @@ class GBLinearQuantileOpenstfRegressor(OpenstfRegressor):
|
|
306
306
|
feature_names = booster.feature_names
|
307
307
|
|
308
308
|
# Get importance
|
309
|
-
feature_importance = [score.get(f, 0.0) for f in feature_names]
|
309
|
+
feature_importance = [np.abs(score.get(f, 0.0)) for f in feature_names]
|
310
310
|
# Convert to array
|
311
311
|
features_importance_array = np.array(feature_importance, dtype=np.float32)
|
312
312
|
|
openstef/model/serializer.py
CHANGED
@@ -5,7 +5,7 @@ import json
|
|
5
5
|
import logging
|
6
6
|
import os
|
7
7
|
import shutil
|
8
|
-
from datetime import datetime
|
8
|
+
from datetime import datetime, UTC
|
9
9
|
from json import JSONDecodeError
|
10
10
|
from typing import Optional, Union
|
11
11
|
from urllib.parse import unquote, urlparse
|
@@ -283,8 +283,7 @@ class MLflowSerializer:
|
|
283
283
|
"""Determines how many days ago a model is trained from the mlflow run."""
|
284
284
|
try:
|
285
285
|
model_datetime = run.end_time.to_pydatetime()
|
286
|
-
|
287
|
-
model_age_days = (datetime.utcnow() - model_datetime).days
|
286
|
+
model_age_days = (datetime.now(tz=UTC) - model_datetime).days
|
288
287
|
except Exception as e:
|
289
288
|
self.logger.warning(
|
290
289
|
"Could not get model age. Returning infinite age!", exception=str(e)
|
@@ -69,7 +69,7 @@ class StandardDeviationGenerator:
|
|
69
69
|
# Calculate the error for each predicted point
|
70
70
|
error = realised - predicted
|
71
71
|
error.index = error.index.hour # Hour only, remove the rest
|
72
|
-
# For the time starts with 00, 01, 02, etc.
|
72
|
+
# For the time starts with 00, 01, 02, etc.
|
73
73
|
for hour in range(24):
|
74
74
|
hour_error = error[error.index == hour]
|
75
75
|
|
@@ -230,7 +230,9 @@ def split_data_train_validation_test(
|
|
230
230
|
for date_set in [max_dates, min_dates, other_dates]:
|
231
231
|
n_days_val = max(1, int(validation_fraction * len(date_set)))
|
232
232
|
val_dates += list(
|
233
|
-
np.random.choice(
|
233
|
+
np.random.default_rng().choice(
|
234
|
+
list(date_set), n_days_val, replace=False
|
235
|
+
)
|
234
236
|
)
|
235
237
|
train_dates += [x for x in date_set if x not in val_dates]
|
236
238
|
|
@@ -239,11 +239,6 @@ def add_prediction_job_properties_to_forecast(
|
|
239
239
|
if forecast_quality is not None:
|
240
240
|
forecast["quality"] = forecast_quality
|
241
241
|
|
242
|
-
# TODO rename prediction job typ to type
|
243
|
-
# TODO algtype = model_file_path, perhaps we can find a more logical name
|
244
|
-
# TODO perhaps better to make a forecast its own class!
|
245
|
-
# TODO double check and sync this with make_basecase_forecast (other fields are added)
|
246
|
-
# !!!!! TODO fix the requirement for customer
|
247
242
|
forecast["pid"] = pj["id"]
|
248
243
|
forecast["customer"] = pj["name"]
|
249
244
|
forecast["description"] = pj["description"]
|
openstef/tasks/calculate_kpi.py
CHANGED
@@ -21,7 +21,7 @@ Example:
|
|
21
21
|
import logging
|
22
22
|
|
23
23
|
# Import builtins
|
24
|
-
from datetime import datetime, timedelta
|
24
|
+
from datetime import datetime, timedelta, UTC
|
25
25
|
from pathlib import Path
|
26
26
|
|
27
27
|
import numpy as np
|
@@ -56,8 +56,8 @@ def main(model_type: ModelType = None, config=None, database=None) -> None:
|
|
56
56
|
|
57
57
|
with TaskContext(taskname, config, database) as context:
|
58
58
|
# Set start and end time
|
59
|
-
|
60
|
-
|
59
|
+
end_time = datetime.now(tz=UTC)
|
60
|
+
start_time = end_time - timedelta(days=1)
|
61
61
|
|
62
62
|
PredictionJobLoop(context, model_type=model_type).map(
|
63
63
|
check_kpi_task,
|
@@ -77,9 +77,9 @@ def check_kpi_task(
|
|
77
77
|
) -> None:
|
78
78
|
# Apply default parameters if none are provided
|
79
79
|
if start_time is None:
|
80
|
-
start_time = datetime.
|
80
|
+
start_time = datetime.now(tz=UTC) - timedelta(days=1)
|
81
81
|
if end_time is None:
|
82
|
-
end_time = datetime.
|
82
|
+
end_time = datetime.now(tz=UTC)
|
83
83
|
|
84
84
|
# Get realised load data
|
85
85
|
realised = context.database.get_load_pid(pj["id"], start_time, end_time, "15T")
|
@@ -16,7 +16,7 @@ Example:
|
|
16
16
|
$ python create_basecase_forecast.py
|
17
17
|
|
18
18
|
"""
|
19
|
-
from datetime import datetime, timedelta
|
19
|
+
from datetime import datetime, timedelta, UTC
|
20
20
|
from pathlib import Path
|
21
21
|
|
22
22
|
import pandas as pd
|
@@ -68,8 +68,8 @@ def create_basecase_forecast_task(
|
|
68
68
|
return
|
69
69
|
|
70
70
|
# Define datetime range for input data
|
71
|
-
datetime_start = datetime.
|
72
|
-
datetime_end = datetime.
|
71
|
+
datetime_start = datetime.now(tz=UTC) - timedelta(days=t_behind_days)
|
72
|
+
datetime_end = datetime.now(tz=UTC) + timedelta(days=t_ahead_days)
|
73
73
|
|
74
74
|
# Retrieve input data
|
75
75
|
input_data = context.database.get_model_input(
|
@@ -87,7 +87,7 @@ def create_basecase_forecast_task(
|
|
87
87
|
basecase_forecast = basecase_forecast.loc[
|
88
88
|
basecase_forecast.index
|
89
89
|
> (
|
90
|
-
pd.to_datetime(datetime.
|
90
|
+
pd.to_datetime(datetime.now(tz=UTC), utc=True)
|
91
91
|
+ timedelta(minutes=pj.horizon_minutes)
|
92
92
|
),
|
93
93
|
:,
|
@@ -22,7 +22,7 @@ Example:
|
|
22
22
|
|
23
23
|
"""
|
24
24
|
import logging
|
25
|
-
from datetime import datetime, timedelta,
|
25
|
+
from datetime import datetime, timedelta, UTC
|
26
26
|
from pathlib import Path
|
27
27
|
|
28
28
|
import pandas as pd
|
@@ -76,8 +76,8 @@ def create_components_forecast_task(
|
|
76
76
|
return
|
77
77
|
|
78
78
|
# Define datetime range for input data
|
79
|
-
datetime_start = datetime.
|
80
|
-
datetime_end = datetime.
|
79
|
+
datetime_start = datetime.now(tz=UTC) - timedelta(days=t_behind_days)
|
80
|
+
datetime_end = datetime.now(tz=UTC) + timedelta(days=t_ahead_days)
|
81
81
|
|
82
82
|
logger.info(
|
83
83
|
"Get predicted load", datetime_start=datetime_start, datetime_end=datetime_end
|
@@ -120,9 +120,7 @@ def create_components_forecast_task(
|
|
120
120
|
logger.debug("Written forecast to database")
|
121
121
|
|
122
122
|
# Check if forecast was complete enough, otherwise raise exception
|
123
|
-
if forecasts.index.max() < datetime.
|
124
|
-
tzinfo=timezone.utc
|
125
|
-
) + timedelta(hours=30):
|
123
|
+
if forecasts.index.max() < datetime.now(tz=UTC) + timedelta(hours=30):
|
126
124
|
# Check which input data is missing the most.
|
127
125
|
# Do this by counting the NANs for (load)forecast, radiation and windspeed
|
128
126
|
max_index = forecasts.index.max()
|
@@ -20,7 +20,7 @@ Example:
|
|
20
20
|
$ python create_forecast.py
|
21
21
|
|
22
22
|
"""
|
23
|
-
from datetime import datetime, timedelta
|
23
|
+
from datetime import datetime, timedelta, UTC
|
24
24
|
from pathlib import Path
|
25
25
|
|
26
26
|
from openstef.data_classes.prediction_job import PredictionJobDataClass
|
@@ -73,8 +73,8 @@ def create_forecast_task(
|
|
73
73
|
mlflow_tracking_uri = context.config.paths_mlflow_tracking_uri
|
74
74
|
|
75
75
|
# Define datetime range for input data
|
76
|
-
datetime_start = datetime.
|
77
|
-
datetime_end = datetime.
|
76
|
+
datetime_start = datetime.now(tz=UTC) - timedelta(days=t_behind_days)
|
77
|
+
datetime_end = datetime.now(tz=UTC) + timedelta(seconds=pj.horizon_minutes * 60)
|
78
78
|
|
79
79
|
# Retrieve input data
|
80
80
|
input_data = context.database.get_model_input(
|
@@ -12,7 +12,7 @@ Example:
|
|
12
12
|
$ python create_solar_forecast
|
13
13
|
|
14
14
|
"""
|
15
|
-
from datetime import datetime, timedelta
|
15
|
+
from datetime import datetime, timedelta, UTC
|
16
16
|
from pathlib import Path
|
17
17
|
|
18
18
|
import numpy as np
|
@@ -23,7 +23,6 @@ from openstef import PROJECT_ROOT
|
|
23
23
|
from openstef.tasks.utils.predictionjobloop import PredictionJobLoop
|
24
24
|
from openstef.tasks.utils.taskcontext import TaskContext
|
25
25
|
|
26
|
-
# TODO move to config
|
27
26
|
PV_COEFS_FILEPATH = PROJECT_ROOT / "openstef" / "data" / "pv_single_coefs.csv"
|
28
27
|
|
29
28
|
|
@@ -231,7 +230,7 @@ def main(config=None, database=None, **kwargs):
|
|
231
230
|
num_prediction_jobs = len(prediction_jobs)
|
232
231
|
|
233
232
|
# only make customer = Provincie once an hour
|
234
|
-
utc_now_minute = datetime.
|
233
|
+
utc_now_minute = datetime.now(tz=UTC)().minute
|
235
234
|
if utc_now_minute >= 15:
|
236
235
|
prediction_jobs = [
|
237
236
|
pj for pj in prediction_jobs if str(pj["name"]).startswith("Provincie")
|
@@ -16,7 +16,7 @@ Example:
|
|
16
16
|
$ python optimize_hyperparameters.py
|
17
17
|
|
18
18
|
"""
|
19
|
-
from datetime import datetime, timedelta
|
19
|
+
from datetime import datetime, timedelta, UTC
|
20
20
|
from pathlib import Path
|
21
21
|
|
22
22
|
from openstef.data_classes.prediction_job import PredictionJobDataClass
|
@@ -88,8 +88,8 @@ def optimize_hyperparameters_task(
|
|
88
88
|
)
|
89
89
|
return
|
90
90
|
|
91
|
-
datetime_start = datetime.
|
92
|
-
datetime_end = datetime.
|
91
|
+
datetime_start = datetime.now(tz=UTC) - timedelta(days=DEFAULT_TRAINING_PERIOD_DAYS)
|
92
|
+
datetime_end = datetime.now(tz=UTC)
|
93
93
|
|
94
94
|
input_data = context.database.get_model_input(
|
95
95
|
pid=pj["id"],
|
openstef/tasks/split_forecast.py
CHANGED
@@ -23,7 +23,7 @@ Example:
|
|
23
23
|
|
24
24
|
"""
|
25
25
|
import logging
|
26
|
-
from datetime import datetime
|
26
|
+
from datetime import datetime, UTC
|
27
27
|
from pathlib import Path
|
28
28
|
|
29
29
|
import numpy as np
|
@@ -93,7 +93,6 @@ def split_forecast_task(
|
|
93
93
|
components, coefdict = find_components(input_split_function)
|
94
94
|
|
95
95
|
# Calculate mean absolute error (MAE)
|
96
|
-
# TODO: use a standard metric function for this
|
97
96
|
error = components[["load", "Inschatting"]].diff(axis=1).iloc[:, 1]
|
98
97
|
mae = error.abs().mean()
|
99
98
|
coefdict.update({"MAE": mae})
|
@@ -183,7 +182,7 @@ def convert_coefdict_to_coefsdf(
|
|
183
182
|
pj["id"],
|
184
183
|
input_split_function.index.min().date(),
|
185
184
|
input_split_function.index.max().date(),
|
186
|
-
datetime.
|
185
|
+
datetime.now(tz=UTC),
|
187
186
|
]
|
188
187
|
coefsdf = pd.DataFrame(
|
189
188
|
{"coef_name": list(coefdict.keys()), "coef_value": list(coefdict.values())}
|
@@ -237,7 +236,7 @@ def find_components(
|
|
237
236
|
|
238
237
|
# Carry out fitting
|
239
238
|
# See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html # noqa
|
240
|
-
coefs,
|
239
|
+
coefs, _ = scipy.optimize.curve_fit(
|
241
240
|
weighted_sum,
|
242
241
|
xdata=df.iloc[:, 1:].values.T,
|
243
242
|
ydata=load.values,
|
openstef/tasks/train_model.py
CHANGED
@@ -19,7 +19,7 @@ Example:
|
|
19
19
|
$ python model_train.py
|
20
20
|
|
21
21
|
"""
|
22
|
-
from datetime import datetime, timedelta
|
22
|
+
from datetime import datetime, timedelta, UTC
|
23
23
|
from pathlib import Path
|
24
24
|
|
25
25
|
import pandas as pd
|
@@ -123,7 +123,7 @@ def train_model_task(
|
|
123
123
|
)
|
124
124
|
|
125
125
|
if datetime_end is None:
|
126
|
-
datetime_end = datetime.
|
126
|
+
datetime_end = datetime.now(tz=UTC)
|
127
127
|
if datetime_start is None:
|
128
128
|
datetime_start = datetime_end - timedelta(days=training_period_days_to_fetch)
|
129
129
|
|
@@ -184,9 +184,9 @@ def train_model_task(
|
|
184
184
|
"'save_train_forecasts option was activated.'"
|
185
185
|
)
|
186
186
|
context.database.write_train_forecasts(pj, data_sets)
|
187
|
-
context.logger.debug(
|
187
|
+
context.logger.debug("Saved Forecasts from trained model on datasets")
|
188
188
|
except SkipSaveTrainingForecasts:
|
189
|
-
context.logger.debug(
|
189
|
+
context.logger.debug("Skip saving forecasts")
|
190
190
|
except InputDataOngoingZeroFlatlinerError:
|
191
191
|
if (
|
192
192
|
context.config.known_zero_flatliners
|
@@ -213,7 +213,7 @@ def main(model_type=None, config=None, database=None):
|
|
213
213
|
model_type = [ml.value for ml in ModelType]
|
214
214
|
|
215
215
|
taskname = Path(__file__).name.replace(".py", "")
|
216
|
-
datetime_now = datetime.
|
216
|
+
datetime_now = datetime.now(tz=UTC)
|
217
217
|
with TaskContext(taskname, config, database) as context:
|
218
218
|
PredictionJobLoop(context, model_type=model_type).map(
|
219
219
|
train_model_task, context, datetime_end=datetime_now
|
@@ -3,7 +3,7 @@
|
|
3
3
|
# SPDX-License-Identifier: MPL-2.0
|
4
4
|
import logging
|
5
5
|
import math
|
6
|
-
from datetime import datetime, timedelta
|
6
|
+
from datetime import datetime, timedelta, UTC
|
7
7
|
from typing import Union
|
8
8
|
|
9
9
|
import numpy as np
|
@@ -203,7 +203,7 @@ def calc_completeness_features(
|
|
203
203
|
df_copy.drop("horizon", inplace=True, axis=1)
|
204
204
|
|
205
205
|
if weights is None:
|
206
|
-
weights = np.array([1] * (
|
206
|
+
weights = np.array([1] * (len(df_copy.columns)))
|
207
207
|
|
208
208
|
length_weights = len(weights)
|
209
209
|
length_features = len(df_copy.columns)
|
@@ -243,7 +243,7 @@ def detect_ongoing_zero_flatliner(
|
|
243
243
|
|
244
244
|
"""
|
245
245
|
# remove all timestamps in the future
|
246
|
-
load = load[load.index
|
246
|
+
load = load[load.index <= datetime.now(tz=UTC)]
|
247
247
|
latest_measurement_time = load.dropna().index.max()
|
248
248
|
latest_measurements = load[
|
249
249
|
latest_measurement_time - timedelta(minutes=duration_threshold_minutes) :
|
@@ -297,9 +297,10 @@ def calc_completeness_dataframe(
|
|
297
297
|
# timecols: {delay:number of points expected to be missing}
|
298
298
|
# number of points expected to be missing = numberOfPointsUpToTwoDaysAhead - numberOfPointsAvailable
|
299
299
|
timecols = {
|
300
|
-
|
301
|
-
|
302
|
-
|
300
|
+
column: len(df)
|
301
|
+
- eval(column[2:].replace("min", "/60").replace("d", "*24.0")) / 0.25
|
302
|
+
for column in df.columns
|
303
|
+
if column.startswith("T-")
|
303
304
|
}
|
304
305
|
|
305
306
|
non_na_count = df.count()
|
@@ -31,30 +31,30 @@ openstef/feature_engineering/holiday_features.py,sha256=CbolIP5bfiQkqDct-9TbD828
|
|
31
31
|
openstef/feature_engineering/lag_features.py,sha256=Dr6qS8UhdgEHPZZSe-w6ibtjl_lcbcQohhqdZN9fqEU,5652
|
32
32
|
openstef/feature_engineering/missing_values_transformer.py,sha256=o_zCVEOCPn2tWzvlY44XZuDysV0TuxqeVYhilYU54YY,5010
|
33
33
|
openstef/feature_engineering/rolling_features.py,sha256=DTcrwJK1jf3-byg9ufiT7I5B-G8TFk4-VcK17AEdtYo,2160
|
34
|
-
openstef/feature_engineering/weather_features.py,sha256=
|
34
|
+
openstef/feature_engineering/weather_features.py,sha256=KOfrDaHaU-e05vqa4cm7i37XI0bgTQCY-PfLsW5O1cA,15695
|
35
35
|
openstef/metrics/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
|
36
36
|
openstef/metrics/figure.py,sha256=KDoezYem9wdS13kUx7M7FOy-4u88Sg3OX1DuhNT6kgQ,9751
|
37
|
-
openstef/metrics/metrics.py,sha256=
|
37
|
+
openstef/metrics/metrics.py,sha256=qV_EdzjKNiqEGKYUp4DL0KgsnCjTf4P9FqKcccFNF-o,15515
|
38
38
|
openstef/metrics/reporter.py,sha256=w1Q6xWoYGmvnjwjXik-Gz7_gnb0lOeJMep-whEV5mNk,7897
|
39
39
|
openstef/model/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
|
40
40
|
openstef/model/basecase.py,sha256=caI6Q-8y0ymlxGK9Js_H3Vh0q6ruNHlGD5RG0_kE5M0,2878
|
41
|
-
openstef/model/confidence_interval_applicator.py,sha256=
|
42
|
-
openstef/model/fallback.py,sha256=
|
41
|
+
openstef/model/confidence_interval_applicator.py,sha256=EQoF7DfdQBd_WmbEfU_rC0LMrZV-pot8yv4MFEB4v2E,9758
|
42
|
+
openstef/model/fallback.py,sha256=UGS2UYHyNUtAIB4zVk1BFmsmHeQ3Fx9ytv01AQqu3wo,2812
|
43
43
|
openstef/model/model_creator.py,sha256=L84A4_HVYL7bRZY0D77j4c4kHRleVGCRXqZZkT9BZY0,6668
|
44
44
|
openstef/model/objective.py,sha256=qJdI6GAzv8l5Mxd8G7BIqQnfdJNM7aOlg9DMzMGjWqA,14558
|
45
45
|
openstef/model/objective_creator.py,sha256=jqMvdXiVRc9GmOvSijQY0zuyxS07-ezkVXclvoW98g4,2208
|
46
|
-
openstef/model/serializer.py,sha256=
|
47
|
-
openstef/model/standard_deviation_generator.py,sha256=
|
46
|
+
openstef/model/serializer.py,sha256=4ow8hT3fP_h52PZICbECNADWcR_Q1884jSMIWKM-C2I,17181
|
47
|
+
openstef/model/standard_deviation_generator.py,sha256=OorRvX2wRScU7f4SIBoiT24yJeeM50sETP3xC6m5IG4,2865
|
48
48
|
openstef/model/metamodels/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
|
49
49
|
openstef/model/metamodels/feature_clipper.py,sha256=wDsf4k_2YuC6rrFlwE12LpgOdOwHHWuET2ZrJ_gr4yo,2861
|
50
50
|
openstef/model/metamodels/grouped_regressor.py,sha256=yMN_a6TnQSyFaqlB_6Nifq-ydpb5hs6w_b97IaBbHj4,8337
|
51
|
-
openstef/model/metamodels/missing_values_handler.py,sha256=
|
51
|
+
openstef/model/metamodels/missing_values_handler.py,sha256=glgAlkeubLZFWbD8trTYBik7_qOJi4GCPGl1sSybSkQ,5257
|
52
52
|
openstef/model/regressors/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
|
53
53
|
openstef/model/regressors/arima.py,sha256=wt7FVykjSvljpl7vjtliq61SiyjQ7KKtw8PF9x0xf04,7587
|
54
|
-
openstef/model/regressors/custom_regressor.py,sha256=
|
54
|
+
openstef/model/regressors/custom_regressor.py,sha256=T4JdJ-oTTt1PHQV0DdIEIhALvEEh07WCFlWxl8EFih0,1765
|
55
55
|
openstef/model/regressors/dazls.py,sha256=Xt89yFHjkwpIUTkkhPmPZ74F8_tht_XV88INuP5GU2E,3994
|
56
56
|
openstef/model/regressors/flatliner.py,sha256=T9u-ukhqFcatQmlgUtBL_G-1b_wQzgdVRq0ac64GnjQ,2789
|
57
|
-
openstef/model/regressors/gblinear_quantile.py,sha256=
|
57
|
+
openstef/model/regressors/gblinear_quantile.py,sha256=9O6w-4OAq0opOxbOFFxoMWn2gtNUcmrffQy9DdHCS0I,11263
|
58
58
|
openstef/model/regressors/lgbm.py,sha256=zCdn1euEdSFxYJzH8XqQFFnb6R4JVUnmineKjX_Gy-g,800
|
59
59
|
openstef/model/regressors/linear.py,sha256=uOvZMLGZH_9nXfmS5honCMfyVeyGXP1Cza9A_BdXlVw,3665
|
60
60
|
openstef/model/regressors/linear_quantile.py,sha256=sI5cl6_W-hh13mg4Gf09LQ1caZmBy7COc8_5BBJxySQ,10534
|
@@ -63,7 +63,7 @@ openstef/model/regressors/xgb.py,sha256=SH-UiYJtMbfmRBK6738dU0ZRfYfzNynnikwbxINC
|
|
63
63
|
openstef/model/regressors/xgb_multioutput_quantile.py,sha256=xWzA7tymC_o-F1OS3I7vUKf9zP6RR1ZglEeY4NAgjU0,9146
|
64
64
|
openstef/model/regressors/xgb_quantile.py,sha256=PzKIxqN_CnEPFmzXACNuzLSmZSHbooTuiJ5ckJ9vh_E,7805
|
65
65
|
openstef/model_selection/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
|
66
|
-
openstef/model_selection/model_selection.py,sha256=
|
66
|
+
openstef/model_selection/model_selection.py,sha256=ZTykej6aL5TY2oZ5XTZc7fTrTNrgxAUDRqu_rKhIyYg,11233
|
67
67
|
openstef/monitoring/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
|
68
68
|
openstef/monitoring/performance_meter.py,sha256=6aCGjJFXFq-7qwaJyBkF3MLqjgVK6FMFVcO-bcLLUb4,2803
|
69
69
|
openstef/monitoring/teams.py,sha256=A-tlZeuAgolxFHjgT3gGjraxzW2dmuB-UAOz4xgYNIQ,6668
|
@@ -76,27 +76,27 @@ openstef/pipeline/train_create_forecast_backtest.py,sha256=-kZqCWal5zYLL0k0Sapks
|
|
76
76
|
openstef/pipeline/train_model.py,sha256=ThZwPo5otikVqVe6NdXkYcxkVFh-kegRVxMsQg1lbFc,19743
|
77
77
|
openstef/pipeline/utils.py,sha256=23mB31p19FoGWelLJzxNmqlzGwEr3fCDBEA37V2kpYY,2167
|
78
78
|
openstef/postprocessing/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
|
79
|
-
openstef/postprocessing/postprocessing.py,sha256=
|
79
|
+
openstef/postprocessing/postprocessing.py,sha256=6x_2ZcZaHEKMg_kxBAuKUlA_dDEs-KaO5SgGqGWHK14,8997
|
80
80
|
openstef/preprocessing/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
|
81
81
|
openstef/preprocessing/preprocessing.py,sha256=bM_cSSSb2vGTD79RGzUrI6KoELbzlCyJwc7jqQGNEsE,1454
|
82
82
|
openstef/tasks/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
|
83
|
-
openstef/tasks/calculate_kpi.py,sha256=
|
84
|
-
openstef/tasks/create_basecase_forecast.py,sha256=
|
85
|
-
openstef/tasks/create_components_forecast.py,sha256=
|
86
|
-
openstef/tasks/create_forecast.py,sha256=
|
87
|
-
openstef/tasks/create_solar_forecast.py,sha256=
|
83
|
+
openstef/tasks/calculate_kpi.py,sha256=tcW_G0JRMA2tBcb8JN5eUbFFV9UcTsqHXQ1x3f-8Biw,11881
|
84
|
+
openstef/tasks/create_basecase_forecast.py,sha256=_4Ry7AQmXNAKq19J1qmVyG-94atygXePLxVCejCfGPw,4227
|
85
|
+
openstef/tasks/create_components_forecast.py,sha256=8LINqAHt7SnVsQAQMOuve5K-3bLJW-tK_dXTqzlh5Mw,6140
|
86
|
+
openstef/tasks/create_forecast.py,sha256=xASSfHehdcxS64--alYoA6oElx_1Sy4S0tfxvWucVRw,6107
|
87
|
+
openstef/tasks/create_solar_forecast.py,sha256=HDrJrvTPCM8GS7EQwNr9uJNamf-nH2pu0o4d_xo4w4E,15062
|
88
88
|
openstef/tasks/create_wind_forecast.py,sha256=RhshkmNSyFWx4Y6yQn02GzHjWTREbN5A5GAeWv0JpcE,2907
|
89
|
-
openstef/tasks/optimize_hyperparameters.py,sha256=
|
90
|
-
openstef/tasks/split_forecast.py,sha256=
|
91
|
-
openstef/tasks/train_model.py,sha256=
|
89
|
+
openstef/tasks/optimize_hyperparameters.py,sha256=3NT0KFgim8wAzWPJ0S-GULM3zoshyj63Ivp-g1_oPDw,4765
|
90
|
+
openstef/tasks/split_forecast.py,sha256=X1D3MnnMdAb9wzDWubAJwfMkWpNGdRUPDvPAbJApNhg,9277
|
91
|
+
openstef/tasks/train_model.py,sha256=gbKRB3F5qFNfTt0HQnxOjwGS721MEmF110_-FMVlYh4,8527
|
92
92
|
openstef/tasks/utils/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
|
93
93
|
openstef/tasks/utils/dependencies.py,sha256=Jy9dtV_G7lTEa5Cdy--wvMxJuAb0adb3R0X4QDjVteM,3077
|
94
94
|
openstef/tasks/utils/predictionjobloop.py,sha256=Ysy3zF5lzPMz_asYDKeF5m0qgVT3tCtwSPihqMjnI5Q,9580
|
95
95
|
openstef/tasks/utils/taskcontext.py,sha256=L9K14ycwgVxbIVUjH2DIn_QWbnu-OfxcGtQ1K9T6sus,5630
|
96
96
|
openstef/validation/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
|
97
|
-
openstef/validation/validation.py,sha256=
|
98
|
-
openstef-3.4.
|
99
|
-
openstef-3.4.
|
100
|
-
openstef-3.4.
|
101
|
-
openstef-3.4.
|
102
|
-
openstef-3.4.
|
97
|
+
openstef/validation/validation.py,sha256=DfnT7f29n9AbduJy9I6mXYQSnjt241Pn36Fp9SGehR0,11225
|
98
|
+
openstef-3.4.63.dist-info/LICENSE,sha256=7Pm2fWFFHHUG5lDHed1vl5CjzxObIXQglnYsEdtjo_k,14907
|
99
|
+
openstef-3.4.63.dist-info/METADATA,sha256=9kJWSv45Y-61ZmpZfIJWRzZYIogkP1lZrxGawsvDsEQ,8305
|
100
|
+
openstef-3.4.63.dist-info/WHEEL,sha256=nn6H5-ilmfVryoAQl3ZQ2l8SH5imPWFpm1A5FgEuFV4,91
|
101
|
+
openstef-3.4.63.dist-info/top_level.txt,sha256=kD0H4PqrQoncZ957FvqwfBxa89kTrun4Z_RAPs_HhLs,9
|
102
|
+
openstef-3.4.63.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|