openstef 3.4.61__tar.gz → 3.4.63__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. {openstef-3.4.61 → openstef-3.4.63}/PKG-INFO +1 -1
  2. {openstef-3.4.61 → openstef-3.4.63}/openstef/feature_engineering/weather_features.py +0 -1
  3. {openstef-3.4.61 → openstef-3.4.63}/openstef/metrics/metrics.py +7 -4
  4. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/confidence_interval_applicator.py +1 -1
  5. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/fallback.py +2 -4
  6. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/metamodels/missing_values_handler.py +2 -2
  7. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/regressors/custom_regressor.py +2 -2
  8. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/regressors/gblinear_quantile.py +1 -1
  9. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/serializer.py +2 -3
  10. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/standard_deviation_generator.py +1 -1
  11. {openstef-3.4.61 → openstef-3.4.63}/openstef/model_selection/model_selection.py +3 -1
  12. {openstef-3.4.61 → openstef-3.4.63}/openstef/postprocessing/postprocessing.py +0 -5
  13. {openstef-3.4.61 → openstef-3.4.63}/openstef/tasks/calculate_kpi.py +5 -5
  14. {openstef-3.4.61 → openstef-3.4.63}/openstef/tasks/create_basecase_forecast.py +4 -4
  15. {openstef-3.4.61 → openstef-3.4.63}/openstef/tasks/create_components_forecast.py +4 -6
  16. {openstef-3.4.61 → openstef-3.4.63}/openstef/tasks/create_forecast.py +3 -3
  17. {openstef-3.4.61 → openstef-3.4.63}/openstef/tasks/create_solar_forecast.py +2 -3
  18. {openstef-3.4.61 → openstef-3.4.63}/openstef/tasks/optimize_hyperparameters.py +3 -3
  19. {openstef-3.4.61 → openstef-3.4.63}/openstef/tasks/split_forecast.py +3 -4
  20. {openstef-3.4.61 → openstef-3.4.63}/openstef/tasks/train_model.py +5 -5
  21. {openstef-3.4.61 → openstef-3.4.63}/openstef/validation/validation.py +7 -6
  22. {openstef-3.4.61 → openstef-3.4.63}/openstef.egg-info/PKG-INFO +1 -1
  23. {openstef-3.4.61 → openstef-3.4.63}/setup.py +1 -1
  24. {openstef-3.4.61 → openstef-3.4.63}/LICENSE +0 -0
  25. {openstef-3.4.61 → openstef-3.4.63}/README.md +0 -0
  26. {openstef-3.4.61 → openstef-3.4.63}/openstef/__init__.py +0 -0
  27. {openstef-3.4.61 → openstef-3.4.63}/openstef/__main__.py +0 -0
  28. {openstef-3.4.61 → openstef-3.4.63}/openstef/app_settings.py +0 -0
  29. {openstef-3.4.61 → openstef-3.4.63}/openstef/data/NL_terrestrial_radiation.csv +0 -0
  30. {openstef-3.4.61 → openstef-3.4.63}/openstef/data/NL_terrestrial_radiation.csv.license +0 -0
  31. {openstef-3.4.61 → openstef-3.4.63}/openstef/data/dazls_model_3.4.24/dazls_stored_3.4.24_baseline_model.z +0 -0
  32. {openstef-3.4.61 → openstef-3.4.63}/openstef/data/dazls_model_3.4.24/dazls_stored_3.4.24_baseline_model.z.license +0 -0
  33. {openstef-3.4.61 → openstef-3.4.63}/openstef/data/dazls_model_3.4.24/dazls_stored_3.4.24_model_card.md +0 -0
  34. {openstef-3.4.61 → openstef-3.4.63}/openstef/data/dazls_model_3.4.24/dazls_stored_3.4.24_model_card.md.license +0 -0
  35. {openstef-3.4.61 → openstef-3.4.63}/openstef/data/dutch_holidays.csv +0 -0
  36. {openstef-3.4.61 → openstef-3.4.63}/openstef/data/dutch_holidays.csv.license +0 -0
  37. {openstef-3.4.61 → openstef-3.4.63}/openstef/data/pv_single_coefs.csv +0 -0
  38. {openstef-3.4.61 → openstef-3.4.63}/openstef/data/pv_single_coefs.csv.license +0 -0
  39. {openstef-3.4.61 → openstef-3.4.63}/openstef/data_classes/__init__.py +0 -0
  40. {openstef-3.4.61 → openstef-3.4.63}/openstef/data_classes/data_prep.py +0 -0
  41. {openstef-3.4.61 → openstef-3.4.63}/openstef/data_classes/model_specifications.py +0 -0
  42. {openstef-3.4.61 → openstef-3.4.63}/openstef/data_classes/prediction_job.py +0 -0
  43. {openstef-3.4.61 → openstef-3.4.63}/openstef/data_classes/split_function.py +0 -0
  44. {openstef-3.4.61 → openstef-3.4.63}/openstef/enums.py +0 -0
  45. {openstef-3.4.61 → openstef-3.4.63}/openstef/exceptions.py +0 -0
  46. {openstef-3.4.61 → openstef-3.4.63}/openstef/feature_engineering/__init__.py +0 -0
  47. {openstef-3.4.61 → openstef-3.4.63}/openstef/feature_engineering/apply_features.py +0 -0
  48. {openstef-3.4.61 → openstef-3.4.63}/openstef/feature_engineering/bidding_zone_to_country_mapping.py +0 -0
  49. {openstef-3.4.61 → openstef-3.4.63}/openstef/feature_engineering/cyclic_features.py +0 -0
  50. {openstef-3.4.61 → openstef-3.4.63}/openstef/feature_engineering/data_preparation.py +0 -0
  51. {openstef-3.4.61 → openstef-3.4.63}/openstef/feature_engineering/feature_adder.py +0 -0
  52. {openstef-3.4.61 → openstef-3.4.63}/openstef/feature_engineering/feature_applicator.py +0 -0
  53. {openstef-3.4.61 → openstef-3.4.63}/openstef/feature_engineering/general.py +0 -0
  54. {openstef-3.4.61 → openstef-3.4.63}/openstef/feature_engineering/holiday_features.py +0 -0
  55. {openstef-3.4.61 → openstef-3.4.63}/openstef/feature_engineering/lag_features.py +0 -0
  56. {openstef-3.4.61 → openstef-3.4.63}/openstef/feature_engineering/missing_values_transformer.py +0 -0
  57. {openstef-3.4.61 → openstef-3.4.63}/openstef/feature_engineering/rolling_features.py +0 -0
  58. {openstef-3.4.61 → openstef-3.4.63}/openstef/metrics/__init__.py +0 -0
  59. {openstef-3.4.61 → openstef-3.4.63}/openstef/metrics/figure.py +0 -0
  60. {openstef-3.4.61 → openstef-3.4.63}/openstef/metrics/reporter.py +0 -0
  61. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/__init__.py +0 -0
  62. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/basecase.py +0 -0
  63. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/metamodels/__init__.py +0 -0
  64. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/metamodels/feature_clipper.py +0 -0
  65. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/metamodels/grouped_regressor.py +0 -0
  66. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/model_creator.py +0 -0
  67. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/objective.py +0 -0
  68. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/objective_creator.py +0 -0
  69. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/regressors/__init__.py +0 -0
  70. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/regressors/arima.py +0 -0
  71. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/regressors/dazls.py +0 -0
  72. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/regressors/flatliner.py +0 -0
  73. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/regressors/lgbm.py +0 -0
  74. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/regressors/linear.py +0 -0
  75. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/regressors/linear_quantile.py +0 -0
  76. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/regressors/regressor.py +0 -0
  77. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/regressors/xgb.py +0 -0
  78. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/regressors/xgb_multioutput_quantile.py +0 -0
  79. {openstef-3.4.61 → openstef-3.4.63}/openstef/model/regressors/xgb_quantile.py +0 -0
  80. {openstef-3.4.61 → openstef-3.4.63}/openstef/model_selection/__init__.py +0 -0
  81. {openstef-3.4.61 → openstef-3.4.63}/openstef/monitoring/__init__.py +0 -0
  82. {openstef-3.4.61 → openstef-3.4.63}/openstef/monitoring/performance_meter.py +0 -0
  83. {openstef-3.4.61 → openstef-3.4.63}/openstef/monitoring/teams.py +0 -0
  84. {openstef-3.4.61 → openstef-3.4.63}/openstef/pipeline/__init__.py +0 -0
  85. {openstef-3.4.61 → openstef-3.4.63}/openstef/pipeline/create_basecase_forecast.py +0 -0
  86. {openstef-3.4.61 → openstef-3.4.63}/openstef/pipeline/create_component_forecast.py +0 -0
  87. {openstef-3.4.61 → openstef-3.4.63}/openstef/pipeline/create_forecast.py +0 -0
  88. {openstef-3.4.61 → openstef-3.4.63}/openstef/pipeline/optimize_hyperparameters.py +0 -0
  89. {openstef-3.4.61 → openstef-3.4.63}/openstef/pipeline/train_create_forecast_backtest.py +0 -0
  90. {openstef-3.4.61 → openstef-3.4.63}/openstef/pipeline/train_model.py +0 -0
  91. {openstef-3.4.61 → openstef-3.4.63}/openstef/pipeline/utils.py +0 -0
  92. {openstef-3.4.61 → openstef-3.4.63}/openstef/postprocessing/__init__.py +0 -0
  93. {openstef-3.4.61 → openstef-3.4.63}/openstef/preprocessing/__init__.py +0 -0
  94. {openstef-3.4.61 → openstef-3.4.63}/openstef/preprocessing/preprocessing.py +0 -0
  95. {openstef-3.4.61 → openstef-3.4.63}/openstef/settings.py +0 -0
  96. {openstef-3.4.61 → openstef-3.4.63}/openstef/tasks/__init__.py +0 -0
  97. {openstef-3.4.61 → openstef-3.4.63}/openstef/tasks/create_wind_forecast.py +0 -0
  98. {openstef-3.4.61 → openstef-3.4.63}/openstef/tasks/utils/__init__.py +0 -0
  99. {openstef-3.4.61 → openstef-3.4.63}/openstef/tasks/utils/dependencies.py +0 -0
  100. {openstef-3.4.61 → openstef-3.4.63}/openstef/tasks/utils/predictionjobloop.py +0 -0
  101. {openstef-3.4.61 → openstef-3.4.63}/openstef/tasks/utils/taskcontext.py +0 -0
  102. {openstef-3.4.61 → openstef-3.4.63}/openstef/validation/__init__.py +0 -0
  103. {openstef-3.4.61 → openstef-3.4.63}/openstef.egg-info/SOURCES.txt +0 -0
  104. {openstef-3.4.61 → openstef-3.4.63}/openstef.egg-info/dependency_links.txt +0 -0
  105. {openstef-3.4.61 → openstef-3.4.63}/openstef.egg-info/requires.txt +0 -0
  106. {openstef-3.4.61 → openstef-3.4.63}/openstef.egg-info/top_level.txt +0 -0
  107. {openstef-3.4.61 → openstef-3.4.63}/pyproject.toml +0 -0
  108. {openstef-3.4.61 → openstef-3.4.63}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: openstef
3
- Version: 3.4.61
3
+ Version: 3.4.63
4
4
  Summary: Open short term energy forecaster
5
5
  Home-page: https://github.com/OpenSTEF/openstef
6
6
  Author: Alliander N.V
@@ -397,7 +397,6 @@ def calculate_dni(radiation: pd.Series, pj: PredictionJobDataClass) -> pd.Series
397
397
  solar_zenith = solpos.apparent_zenith
398
398
 
399
399
  # convert radiation (ghi) to right unit (J/m^2 to kWh/m^2)
400
- # TODO: check whether unit conversion is necessary
401
400
  ghi_forecasted = radiation / 3600
402
401
  # convert ghi to dni
403
402
  dni_converted = pvlib.irradiance.dni(
@@ -9,7 +9,7 @@
9
9
  #
10
10
  # SPDX-License-Identifier: MIT
11
11
  """This module contains all metrics to assess forecast quality."""
12
- from typing import Callable
12
+ from typing import Callable, Optional, Tuple
13
13
 
14
14
  import numpy as np
15
15
  import pandas as pd
@@ -299,12 +299,15 @@ def skill_score_positive_peaks(
299
299
 
300
300
 
301
301
  def franks_skill_score(
302
- realised: pd.Series, forecast: pd.Series, basecase: pd.Series, range_: float = 1.0
302
+ realised: pd.Series,
303
+ forecast: pd.Series,
304
+ basecase: pd.Series,
305
+ range_: Optional[float] = None,
303
306
  ) -> float:
304
307
  """Calculate Franks skill score."""
305
308
  # Combine series in one DataFrame
306
309
  combined = pd.concat([realised, forecast], axis=1)
307
- if range_ == 1.0:
310
+ if not range_:
308
311
  range_ = (
309
312
  combined[realised.name].max() - combined[realised.name].min()
310
313
  if (combined[realised.name].max() - combined[realised.name].min()) != 0
@@ -360,7 +363,7 @@ def franks_skill_score_peaks(
360
363
 
361
364
  def xgb_quantile_eval(
362
365
  preds: np.ndarray, dmatrix: xgboost.DMatrix, quantile: float = 0.2
363
- ) -> str:
366
+ ) -> Tuple:
364
367
  """Customized evaluational metric that equals to quantile regression loss (also known as pinball loss).
365
368
 
366
369
  Quantile regression is regression that estimates a specified quantile of target's distribution conditional on given features.
@@ -137,7 +137,7 @@ class ConfidenceIntervalApplicator:
137
137
  # Determine now, rounded on 15 minutes,
138
138
  # Rounding helps to prevent fractional t_aheads
139
139
  now = (
140
- pd.Series(datetime.utcnow().replace(tzinfo=forecast_copy.index.tzinfo))
140
+ pd.Series(datetime.now(tz=forecast_copy.index.tzinfo))
141
141
  .min()
142
142
  .round(f"{minimal_resolution}T")
143
143
  .to_pydatetime()
@@ -1,7 +1,7 @@
1
1
  # SPDX-FileCopyrightText: 2017-2023 Contributors to the OpenSTEF project <korte.termijn.prognoses@alliander.com> # noqa E501>
2
2
  #
3
3
  # SPDX-License-Identifier: MPL-2.0
4
- from datetime import datetime
4
+ from datetime import datetime, UTC
5
5
 
6
6
  import pandas as pd
7
7
 
@@ -43,9 +43,7 @@ def generate_fallback(
43
43
 
44
44
  # Find most extreme historic day (do not count today as it is incomplete)
45
45
  day_with_highest_load_date = (
46
- load[load.index.tz_localize(None).date != datetime.utcnow().date()]
47
- .idxmax()
48
- .load.date()
46
+ load[load.index < datetime.now(tz=UTC)].idxmax().load.date()
49
47
  )
50
48
  # generate datetime range of the day with the highest load
51
49
  from_datetime = pd.Timestamp(day_with_highest_load_date, tz=load.index.tz)
@@ -90,7 +90,7 @@ class MissingValuesHandler(BaseEstimator, RegressorMixin, MetaEstimatorMixin):
90
90
  def fit(self, x, y):
91
91
  """Fit model."""
92
92
  _, y = check_X_y(x, y, force_all_finite="allow-nan", y_numeric=True)
93
- if type(x) != pd.DataFrame:
93
+ if not isinstance(x, pd.DataFrame):
94
94
  x = pd.DataFrame(np.asarray(x))
95
95
  self.feature_in_names_ = list(x.columns)
96
96
  self.n_features_in_ = x.shape[1]
@@ -133,6 +133,6 @@ class MissingValuesHandler(BaseEstimator, RegressorMixin, MetaEstimatorMixin):
133
133
  x,
134
134
  force_all_finite="allow-nan",
135
135
  )
136
- if type(x) != pd.DataFrame:
136
+ if not isinstance(x, pd.DataFrame):
137
137
  x = pd.DataFrame(np.array(x))
138
138
  return self.pipeline_.predict(x[self.non_null_columns_])
@@ -26,9 +26,9 @@ class CustomOpenstfRegressor(OpenstfRegressor):
26
26
  def valid_kwargs() -> list[str]:
27
27
  ...
28
28
 
29
- @classmethod
29
+ @staticmethod
30
30
  @abstractmethod
31
- def objective(self) -> Type[RegressorObjective]:
31
+ def objective() -> Type[RegressorObjective]:
32
32
  ...
33
33
 
34
34
 
@@ -306,7 +306,7 @@ class GBLinearQuantileOpenstfRegressor(OpenstfRegressor):
306
306
  feature_names = booster.feature_names
307
307
 
308
308
  # Get importance
309
- feature_importance = [score.get(f, 0.0) for f in feature_names]
309
+ feature_importance = [np.abs(score.get(f, 0.0)) for f in feature_names]
310
310
  # Convert to array
311
311
  features_importance_array = np.array(feature_importance, dtype=np.float32)
312
312
 
@@ -5,7 +5,7 @@ import json
5
5
  import logging
6
6
  import os
7
7
  import shutil
8
- from datetime import datetime
8
+ from datetime import datetime, UTC
9
9
  from json import JSONDecodeError
10
10
  from typing import Optional, Union
11
11
  from urllib.parse import unquote, urlparse
@@ -283,8 +283,7 @@ class MLflowSerializer:
283
283
  """Determines how many days ago a model is trained from the mlflow run."""
284
284
  try:
285
285
  model_datetime = run.end_time.to_pydatetime()
286
- model_datetime = model_datetime.replace(tzinfo=None)
287
- model_age_days = (datetime.utcnow() - model_datetime).days
286
+ model_age_days = (datetime.now(tz=UTC) - model_datetime).days
288
287
  except Exception as e:
289
288
  self.logger.warning(
290
289
  "Could not get model age. Returning infinite age!", exception=str(e)
@@ -69,7 +69,7 @@ class StandardDeviationGenerator:
69
69
  # Calculate the error for each predicted point
70
70
  error = realised - predicted
71
71
  error.index = error.index.hour # Hour only, remove the rest
72
- # For the time starts with 00, 01, 02, etc. TODO (MAKE MORE ELEGANT SOLUTION THAN A LOOP)
72
+ # For the time starts with 00, 01, 02, etc.
73
73
  for hour in range(24):
74
74
  hour_error = error[error.index == hour]
75
75
 
@@ -230,7 +230,9 @@ def split_data_train_validation_test(
230
230
  for date_set in [max_dates, min_dates, other_dates]:
231
231
  n_days_val = max(1, int(validation_fraction * len(date_set)))
232
232
  val_dates += list(
233
- np.random.choice(list(date_set), n_days_val, replace=False)
233
+ np.random.default_rng().choice(
234
+ list(date_set), n_days_val, replace=False
235
+ )
234
236
  )
235
237
  train_dates += [x for x in date_set if x not in val_dates]
236
238
 
@@ -239,11 +239,6 @@ def add_prediction_job_properties_to_forecast(
239
239
  if forecast_quality is not None:
240
240
  forecast["quality"] = forecast_quality
241
241
 
242
- # TODO rename prediction job typ to type
243
- # TODO algtype = model_file_path, perhaps we can find a more logical name
244
- # TODO perhaps better to make a forecast its own class!
245
- # TODO double check and sync this with make_basecase_forecast (other fields are added)
246
- # !!!!! TODO fix the requirement for customer
247
242
  forecast["pid"] = pj["id"]
248
243
  forecast["customer"] = pj["name"]
249
244
  forecast["description"] = pj["description"]
@@ -21,7 +21,7 @@ Example:
21
21
  import logging
22
22
 
23
23
  # Import builtins
24
- from datetime import datetime, timedelta
24
+ from datetime import datetime, timedelta, UTC
25
25
  from pathlib import Path
26
26
 
27
27
  import numpy as np
@@ -56,8 +56,8 @@ def main(model_type: ModelType = None, config=None, database=None) -> None:
56
56
 
57
57
  with TaskContext(taskname, config, database) as context:
58
58
  # Set start and end time
59
- start_time = datetime.utcnow() - timedelta(days=1)
60
- end_time = datetime.utcnow()
59
+ end_time = datetime.now(tz=UTC)
60
+ start_time = end_time - timedelta(days=1)
61
61
 
62
62
  PredictionJobLoop(context, model_type=model_type).map(
63
63
  check_kpi_task,
@@ -77,9 +77,9 @@ def check_kpi_task(
77
77
  ) -> None:
78
78
  # Apply default parameters if none are provided
79
79
  if start_time is None:
80
- start_time = datetime.utcnow() - timedelta(days=1)
80
+ start_time = datetime.now(tz=UTC) - timedelta(days=1)
81
81
  if end_time is None:
82
- end_time = datetime.utcnow()
82
+ end_time = datetime.now(tz=UTC)
83
83
 
84
84
  # Get realised load data
85
85
  realised = context.database.get_load_pid(pj["id"], start_time, end_time, "15T")
@@ -16,7 +16,7 @@ Example:
16
16
  $ python create_basecase_forecast.py
17
17
 
18
18
  """
19
- from datetime import datetime, timedelta
19
+ from datetime import datetime, timedelta, UTC
20
20
  from pathlib import Path
21
21
 
22
22
  import pandas as pd
@@ -68,8 +68,8 @@ def create_basecase_forecast_task(
68
68
  return
69
69
 
70
70
  # Define datetime range for input data
71
- datetime_start = datetime.utcnow() - timedelta(days=t_behind_days)
72
- datetime_end = datetime.utcnow() + timedelta(days=t_ahead_days)
71
+ datetime_start = datetime.now(tz=UTC) - timedelta(days=t_behind_days)
72
+ datetime_end = datetime.now(tz=UTC) + timedelta(days=t_ahead_days)
73
73
 
74
74
  # Retrieve input data
75
75
  input_data = context.database.get_model_input(
@@ -87,7 +87,7 @@ def create_basecase_forecast_task(
87
87
  basecase_forecast = basecase_forecast.loc[
88
88
  basecase_forecast.index
89
89
  > (
90
- pd.to_datetime(datetime.utcnow(), utc=True)
90
+ pd.to_datetime(datetime.now(tz=UTC), utc=True)
91
91
  + timedelta(minutes=pj.horizon_minutes)
92
92
  ),
93
93
  :,
@@ -22,7 +22,7 @@ Example:
22
22
 
23
23
  """
24
24
  import logging
25
- from datetime import datetime, timedelta, timezone
25
+ from datetime import datetime, timedelta, UTC
26
26
  from pathlib import Path
27
27
 
28
28
  import pandas as pd
@@ -76,8 +76,8 @@ def create_components_forecast_task(
76
76
  return
77
77
 
78
78
  # Define datetime range for input data
79
- datetime_start = datetime.utcnow() - timedelta(days=t_behind_days)
80
- datetime_end = datetime.utcnow() + timedelta(days=t_ahead_days)
79
+ datetime_start = datetime.now(tz=UTC) - timedelta(days=t_behind_days)
80
+ datetime_end = datetime.now(tz=UTC) + timedelta(days=t_ahead_days)
81
81
 
82
82
  logger.info(
83
83
  "Get predicted load", datetime_start=datetime_start, datetime_end=datetime_end
@@ -120,9 +120,7 @@ def create_components_forecast_task(
120
120
  logger.debug("Written forecast to database")
121
121
 
122
122
  # Check if forecast was complete enough, otherwise raise exception
123
- if forecasts.index.max() < datetime.utcnow().replace(
124
- tzinfo=timezone.utc
125
- ) + timedelta(hours=30):
123
+ if forecasts.index.max() < datetime.now(tz=UTC) + timedelta(hours=30):
126
124
  # Check which input data is missing the most.
127
125
  # Do this by counting the NANs for (load)forecast, radiation and windspeed
128
126
  max_index = forecasts.index.max()
@@ -20,7 +20,7 @@ Example:
20
20
  $ python create_forecast.py
21
21
 
22
22
  """
23
- from datetime import datetime, timedelta
23
+ from datetime import datetime, timedelta, UTC
24
24
  from pathlib import Path
25
25
 
26
26
  from openstef.data_classes.prediction_job import PredictionJobDataClass
@@ -73,8 +73,8 @@ def create_forecast_task(
73
73
  mlflow_tracking_uri = context.config.paths_mlflow_tracking_uri
74
74
 
75
75
  # Define datetime range for input data
76
- datetime_start = datetime.utcnow() - timedelta(days=t_behind_days)
77
- datetime_end = datetime.utcnow() + timedelta(seconds=pj.horizon_minutes * 60)
76
+ datetime_start = datetime.now(tz=UTC) - timedelta(days=t_behind_days)
77
+ datetime_end = datetime.now(tz=UTC) + timedelta(seconds=pj.horizon_minutes * 60)
78
78
 
79
79
  # Retrieve input data
80
80
  input_data = context.database.get_model_input(
@@ -12,7 +12,7 @@ Example:
12
12
  $ python create_solar_forecast
13
13
 
14
14
  """
15
- from datetime import datetime, timedelta
15
+ from datetime import datetime, timedelta, UTC
16
16
  from pathlib import Path
17
17
 
18
18
  import numpy as np
@@ -23,7 +23,6 @@ from openstef import PROJECT_ROOT
23
23
  from openstef.tasks.utils.predictionjobloop import PredictionJobLoop
24
24
  from openstef.tasks.utils.taskcontext import TaskContext
25
25
 
26
- # TODO move to config
27
26
  PV_COEFS_FILEPATH = PROJECT_ROOT / "openstef" / "data" / "pv_single_coefs.csv"
28
27
 
29
28
 
@@ -231,7 +230,7 @@ def main(config=None, database=None, **kwargs):
231
230
  num_prediction_jobs = len(prediction_jobs)
232
231
 
233
232
  # only make customer = Provincie once an hour
234
- utc_now_minute = datetime.utcnow().minute
233
+ utc_now_minute = datetime.now(tz=UTC)().minute
235
234
  if utc_now_minute >= 15:
236
235
  prediction_jobs = [
237
236
  pj for pj in prediction_jobs if str(pj["name"]).startswith("Provincie")
@@ -16,7 +16,7 @@ Example:
16
16
  $ python optimize_hyperparameters.py
17
17
 
18
18
  """
19
- from datetime import datetime, timedelta
19
+ from datetime import datetime, timedelta, UTC
20
20
  from pathlib import Path
21
21
 
22
22
  from openstef.data_classes.prediction_job import PredictionJobDataClass
@@ -88,8 +88,8 @@ def optimize_hyperparameters_task(
88
88
  )
89
89
  return
90
90
 
91
- datetime_start = datetime.utcnow() - timedelta(days=DEFAULT_TRAINING_PERIOD_DAYS)
92
- datetime_end = datetime.utcnow()
91
+ datetime_start = datetime.now(tz=UTC) - timedelta(days=DEFAULT_TRAINING_PERIOD_DAYS)
92
+ datetime_end = datetime.now(tz=UTC)
93
93
 
94
94
  input_data = context.database.get_model_input(
95
95
  pid=pj["id"],
@@ -23,7 +23,7 @@ Example:
23
23
 
24
24
  """
25
25
  import logging
26
- from datetime import datetime
26
+ from datetime import datetime, UTC
27
27
  from pathlib import Path
28
28
 
29
29
  import numpy as np
@@ -93,7 +93,6 @@ def split_forecast_task(
93
93
  components, coefdict = find_components(input_split_function)
94
94
 
95
95
  # Calculate mean absolute error (MAE)
96
- # TODO: use a standard metric function for this
97
96
  error = components[["load", "Inschatting"]].diff(axis=1).iloc[:, 1]
98
97
  mae = error.abs().mean()
99
98
  coefdict.update({"MAE": mae})
@@ -183,7 +182,7 @@ def convert_coefdict_to_coefsdf(
183
182
  pj["id"],
184
183
  input_split_function.index.min().date(),
185
184
  input_split_function.index.max().date(),
186
- datetime.utcnow(),
185
+ datetime.now(tz=UTC),
187
186
  ]
188
187
  coefsdf = pd.DataFrame(
189
188
  {"coef_name": list(coefdict.keys()), "coef_value": list(coefdict.values())}
@@ -237,7 +236,7 @@ def find_components(
237
236
 
238
237
  # Carry out fitting
239
238
  # See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html # noqa
240
- coefs, cov = scipy.optimize.curve_fit(
239
+ coefs, _ = scipy.optimize.curve_fit(
241
240
  weighted_sum,
242
241
  xdata=df.iloc[:, 1:].values.T,
243
242
  ydata=load.values,
@@ -19,7 +19,7 @@ Example:
19
19
  $ python model_train.py
20
20
 
21
21
  """
22
- from datetime import datetime, timedelta
22
+ from datetime import datetime, timedelta, UTC
23
23
  from pathlib import Path
24
24
 
25
25
  import pandas as pd
@@ -123,7 +123,7 @@ def train_model_task(
123
123
  )
124
124
 
125
125
  if datetime_end is None:
126
- datetime_end = datetime.utcnow()
126
+ datetime_end = datetime.now(tz=UTC)
127
127
  if datetime_start is None:
128
128
  datetime_start = datetime_end - timedelta(days=training_period_days_to_fetch)
129
129
 
@@ -184,9 +184,9 @@ def train_model_task(
184
184
  "'save_train_forecasts option was activated.'"
185
185
  )
186
186
  context.database.write_train_forecasts(pj, data_sets)
187
- context.logger.debug(f"Saved Forecasts from trained model on datasets")
187
+ context.logger.debug("Saved Forecasts from trained model on datasets")
188
188
  except SkipSaveTrainingForecasts:
189
- context.logger.debug(f"Skip saving forecasts")
189
+ context.logger.debug("Skip saving forecasts")
190
190
  except InputDataOngoingZeroFlatlinerError:
191
191
  if (
192
192
  context.config.known_zero_flatliners
@@ -213,7 +213,7 @@ def main(model_type=None, config=None, database=None):
213
213
  model_type = [ml.value for ml in ModelType]
214
214
 
215
215
  taskname = Path(__file__).name.replace(".py", "")
216
- datetime_now = datetime.utcnow()
216
+ datetime_now = datetime.now(tz=UTC)
217
217
  with TaskContext(taskname, config, database) as context:
218
218
  PredictionJobLoop(context, model_type=model_type).map(
219
219
  train_model_task, context, datetime_end=datetime_now
@@ -3,7 +3,7 @@
3
3
  # SPDX-License-Identifier: MPL-2.0
4
4
  import logging
5
5
  import math
6
- from datetime import datetime, timedelta
6
+ from datetime import datetime, timedelta, UTC
7
7
  from typing import Union
8
8
 
9
9
  import numpy as np
@@ -203,7 +203,7 @@ def calc_completeness_features(
203
203
  df_copy.drop("horizon", inplace=True, axis=1)
204
204
 
205
205
  if weights is None:
206
- weights = np.array([1] * ((len(df_copy.columns))))
206
+ weights = np.array([1] * (len(df_copy.columns)))
207
207
 
208
208
  length_weights = len(weights)
209
209
  length_features = len(df_copy.columns)
@@ -243,7 +243,7 @@ def detect_ongoing_zero_flatliner(
243
243
 
244
244
  """
245
245
  # remove all timestamps in the future
246
- load = load[load.index.tz_localize(None) <= datetime.utcnow()]
246
+ load = load[load.index <= datetime.now(tz=UTC)]
247
247
  latest_measurement_time = load.dropna().index.max()
248
248
  latest_measurements = load[
249
249
  latest_measurement_time - timedelta(minutes=duration_threshold_minutes) :
@@ -297,9 +297,10 @@ def calc_completeness_dataframe(
297
297
  # timecols: {delay:number of points expected to be missing}
298
298
  # number of points expected to be missing = numberOfPointsUpToTwoDaysAhead - numberOfPointsAvailable
299
299
  timecols = {
300
- x: len(df) - eval(x[2:].replace("min", "/60").replace("d", "*24.0")) / 0.25
301
- for x in df.columns
302
- if x[:2] == "T-"
300
+ column: len(df)
301
+ - eval(column[2:].replace("min", "/60").replace("d", "*24.0")) / 0.25
302
+ for column in df.columns
303
+ if column.startswith("T-")
303
304
  }
304
305
 
305
306
  non_na_count = df.count()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: openstef
3
- Version: 3.4.61
3
+ Version: 3.4.63
4
4
  Summary: Open short term energy forecaster
5
5
  Home-page: https://github.com/OpenSTEF/openstef
6
6
  Author: Alliander N.V
@@ -29,7 +29,7 @@ def read_long_description_from_readme():
29
29
 
30
30
  setup(
31
31
  name="openstef",
32
- version="3.4.61",
32
+ version="3.4.63",
33
33
  packages=find_packages(include=["openstef", "openstef.*"]),
34
34
  description="Open short term energy forecaster",
35
35
  long_description=read_long_description_from_readme(),
File without changes
File without changes
File without changes
File without changes
File without changes