openstef 3.4.52__py3-none-any.whl → 3.4.53__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -26,6 +26,7 @@ class PredictionJobDataClass(BaseModel):
26
26
  - ``"lgb"``
27
27
  - ``"linear"``
28
28
  - ``"linear_quantile"``
29
+ - ``"gblinear_quantile"``
29
30
  - ``"xgb_multioutput_quantile"``
30
31
  - ``"flatliner"``
31
32
 
openstef/enums.py CHANGED
@@ -115,6 +115,7 @@ class ModelType(Enum):
115
115
  LGB = "lgb"
116
116
  LINEAR = "linear"
117
117
  LINEAR_QUANTILE = "linear_quantile"
118
+ GBLINEAR_QUANTILE = "gblinear_quantile"
118
119
  ARIMA = "arima"
119
120
  FLATLINER = "flatliner"
120
121
 
@@ -9,6 +9,7 @@ import structlog
9
9
  from openstef.enums import ModelType
10
10
  from openstef.model.regressors.arima import ARIMAOpenstfRegressor
11
11
  from openstef.model.regressors.custom_regressor import is_custom_type, load_custom_model
12
+ from openstef.model.regressors.gblinear_quantile import GBLinearQuantileOpenstfRegressor
12
13
  from openstef.model.regressors.lgbm import LGBMOpenstfRegressor
13
14
  from openstef.model.regressors.linear import LinearOpenstfRegressor
14
15
  from openstef.model.regressors.linear_quantile import LinearQuantileOpenstfRegressor
@@ -121,6 +122,25 @@ valid_model_kwargs = {
121
122
  "weight_floor",
122
123
  "no_fill_future_values_features",
123
124
  ],
125
+ ModelType.GBLINEAR_QUANTILE: [
126
+ "quantiles",
127
+ "missing_values",
128
+ "imputation_strategy",
129
+ "fill_value",
130
+ "weight_scale_percentile",
131
+ "weight_exponent",
132
+ "weight_floor",
133
+ "no_fill_future_values_features",
134
+ "clipped_features",
135
+ "learning_rate",
136
+ "num_boost_round",
137
+ "early_stopping_rounds",
138
+ "reg_alpha",
139
+ "reg_lambda",
140
+ "updater",
141
+ "feature_selector",
142
+ "top_k",
143
+ ],
124
144
  ModelType.ARIMA: [
125
145
  "backtest_max_horizon",
126
146
  "order",
@@ -141,6 +161,7 @@ class ModelCreator:
141
161
  ModelType.XGB_MULTIOUTPUT_QUANTILE: XGBMultiOutputQuantileOpenstfRegressor,
142
162
  ModelType.LINEAR: LinearOpenstfRegressor,
143
163
  ModelType.LINEAR_QUANTILE: LinearQuantileOpenstfRegressor,
164
+ ModelType.GBLINEAR_QUANTILE: GBLinearQuantileOpenstfRegressor,
144
165
  ModelType.ARIMA: ARIMAOpenstfRegressor,
145
166
  ModelType.FLATLINER: FlatlinerRegressor,
146
167
  }
@@ -28,6 +28,7 @@ class ObjectiveCreator:
28
28
  ModelType.XGB_MULTIOUTPUT_QUANTILE: XGBMultioutputQuantileRegressorObjective,
29
29
  ModelType.LINEAR: LinearRegressorObjective,
30
30
  ModelType.LINEAR_QUANTILE: LinearRegressorObjective,
31
+ ModelType.GBLINEAR_QUANTILE: LinearRegressorObjective,
31
32
  ModelType.ARIMA: ARIMARegressorObjective,
32
33
  }
33
34
 
@@ -0,0 +1,334 @@
1
+ # SPDX-FileCopyrightText: 2017-2025 Contributors to the OpenSTEF project <korte.termijn.prognoses@alliander.com> # noqa E501>
2
+ #
3
+ # SPDX-License-Identifier: MPL-2.0
4
+ import math
5
+ import re
6
+ from typing import Union, Optional, List
7
+
8
+ import numpy as np
9
+ import pandas as pd
10
+ from sklearn.model_selection import train_test_split
11
+ import xgboost as xgb
12
+ from sklearn.preprocessing import StandardScaler
13
+ from sklearn.utils.validation import check_is_fitted
14
+
15
+ from openstef.feature_engineering.missing_values_transformer import (
16
+ MissingValuesTransformer,
17
+ )
18
+ from openstef.model.metamodels.feature_clipper import FeatureClipper
19
+ from openstef.model.regressors.regressor import OpenstfRegressor
20
+
21
+ DEFAULT_QUANTILES: tuple[float, ...] = (0.9, 0.5, 0.1)
22
+
23
+
24
+ class GBLinearQuantileOpenstfRegressor(OpenstfRegressor):
25
+ is_fitted_: bool = False
26
+
27
+ TO_KEEP_FEATURES: List[str] = [
28
+ "T-7d",
29
+ "T-1d",
30
+ ]
31
+ TO_IGNORE_FEATURES: List[str] = [
32
+ "Month",
33
+ "Quarter",
34
+ ]
35
+
36
+ def __init__(
37
+ self,
38
+ quantiles: tuple[float, ...] = DEFAULT_QUANTILES,
39
+ missing_values: Union[int, float, str, None] = np.nan,
40
+ imputation_strategy: Optional[str] = "mean",
41
+ fill_value: Union[str, int, float] = None,
42
+ weight_scale_percentile: int = 95,
43
+ weight_exponent: float = 1,
44
+ weight_floor: float = 0.1,
45
+ validation_fraction: float = 0.2,
46
+ no_fill_future_values_features: List[str] = None,
47
+ clipped_features: List[str] = None,
48
+ learning_rate: float = 0.15,
49
+ num_boost_round: int = 500,
50
+ early_stopping_rounds: int = 10,
51
+ reg_alpha: float = 0.0001,
52
+ reg_lambda: float = 0.1,
53
+ updater: str = "shotgun",
54
+ feature_selector: str = "shuffle",
55
+ top_k: int = 0,
56
+ ):
57
+ super().__init__()
58
+
59
+ # Check if quantile 0.5 is present. This is required.
60
+ if 0.5 not in quantiles:
61
+ raise ValueError(
62
+ "Cannot train quantile model as 0.5 is not in requested quantiles!"
63
+ )
64
+
65
+ if clipped_features is None:
66
+ clipped_features = ["APX"]
67
+
68
+ self.quantiles = quantiles
69
+ self.weight_scale_percentile = weight_scale_percentile
70
+ self.weight_exponent = weight_exponent
71
+ self.weight_floor = weight_floor
72
+ self.imputer_ = MissingValuesTransformer(
73
+ missing_values=missing_values,
74
+ imputation_strategy=imputation_strategy,
75
+ fill_value=fill_value,
76
+ no_fill_future_values_features=no_fill_future_values_features,
77
+ )
78
+ self.x_scaler_ = StandardScaler()
79
+ self.y_scaler_ = StandardScaler()
80
+ self.validation_fraction = validation_fraction
81
+ self.model_: xgb.Booster = None
82
+ self.feature_clipper_ = FeatureClipper(columns=clipped_features)
83
+
84
+ self.learning_rate = learning_rate
85
+ self.num_boost_round = num_boost_round
86
+ self.early_stopping_rounds = early_stopping_rounds
87
+ self.reg_alpha = reg_alpha
88
+ self.reg_labmda = reg_lambda
89
+ self.updater = updater
90
+ self.feature_selector = feature_selector
91
+ self.top_k = top_k
92
+
93
+ @property
94
+ def feature_names(self) -> list:
95
+ """The names of the features used to train the model."""
96
+ check_is_fitted(self)
97
+ return self.imputer_.non_null_feature_names
98
+
99
+ @staticmethod
100
+ def _get_importance_names():
101
+ return {
102
+ "gain_importance_name": "total_gain",
103
+ "weight_importance_name": "weight",
104
+ }
105
+
106
+ @property
107
+ def can_predict_quantiles(self) -> bool:
108
+ """Attribute that indicates if the model predict particular quantiles."""
109
+ return True
110
+
111
+ def _is_feature_ignored(self, feature_name: str) -> bool:
112
+ """Check if a feature is ignored by the model.
113
+
114
+ Args:
115
+ feature_name: Feature name
116
+
117
+ Returns:
118
+ True if the feature is ignored, False otherwise
119
+
120
+ """
121
+
122
+ if feature_name in self.TO_KEEP_FEATURES:
123
+ return False
124
+
125
+ return (
126
+ # Ignore named features
127
+ feature_name in self.TO_IGNORE_FEATURES
128
+ or
129
+ # Ignore holiday features
130
+ re.match(r"is_", feature_name) is not None
131
+ or
132
+ # Ignore lag features
133
+ re.match(r"T-", feature_name) is not None
134
+ or
135
+ # Ignore infeed MFFBAS profiles
136
+ re.match(r"E\d.*_I", feature_name) is not None
137
+ )
138
+
139
+ def _remove_ignored_features(self, x: pd.DataFrame) -> pd.DataFrame:
140
+ """Remove ignored features from the input data.
141
+
142
+ Args:
143
+ x: Input data
144
+
145
+ Returns:
146
+ Data without ignored features
147
+
148
+ """
149
+ return x.drop(columns=[c for c in x.columns if self._is_feature_ignored(c)])
150
+
151
+ def fit(self, x: pd.DataFrame, y: pd.Series, **kwargs) -> OpenstfRegressor:
152
+ if not isinstance(y, pd.Series):
153
+ y = pd.Series(np.asarray(y), name="load")
154
+
155
+ x = self._remove_ignored_features(x)
156
+ self.feature_clipper_.fit(x)
157
+
158
+ # Fix nan columns
159
+ x, y = self.imputer_.fit_transform(x, y)
160
+ if x.isna().any().any():
161
+ raise ValueError(
162
+ "There are nan values in the input data. Set "
163
+ "imputation_strategy to solve them."
164
+ )
165
+
166
+ # Apply feature scaling
167
+ x_scaled = self.x_scaler_.fit_transform(x)
168
+ y_scaled = self.y_scaler_.fit_transform(y.to_frame())[:, 0]
169
+
170
+ # Add more focus on extreme / peak values
171
+ sample_weight = self._calculate_sample_weights(y.values.squeeze())
172
+
173
+ # Split the data into training and validation sets
174
+ x_train, x_val, y_train, y_val, weight_train, weight_val = train_test_split(
175
+ x_scaled,
176
+ y_scaled,
177
+ sample_weight,
178
+ test_size=self.validation_fraction,
179
+ random_state=42,
180
+ )
181
+
182
+ # Preserve feature names
183
+ x_train = pd.DataFrame(x_train, columns=x.columns)
184
+ x_val = pd.DataFrame(x_val, columns=x.columns)
185
+
186
+ dtrain = xgb.DMatrix(x_train, label=y_train, weight=weight_train)
187
+ dval = xgb.DMatrix(x_val, label=y_val, weight=weight_val)
188
+
189
+ xgb_params = {
190
+ # Use the quantile objective function.
191
+ "objective": "reg:quantileerror", # This is pinball loss
192
+ "booster": "gblinear",
193
+ "updater": self.updater,
194
+ "alpha": self.reg_alpha,
195
+ "lambda": self.reg_labmda,
196
+ "feature_selector": self.feature_selector,
197
+ "quantile_alpha": np.array(self.quantiles),
198
+ "learning_rate": self.learning_rate,
199
+ }
200
+
201
+ if self.top_k > 0:
202
+ xgb_params["top_k"] = self.top_k
203
+
204
+ self.model_ = xgb.train(
205
+ params=xgb_params,
206
+ dtrain=dtrain,
207
+ num_boost_round=self.num_boost_round,
208
+ early_stopping_rounds=self.early_stopping_rounds,
209
+ evals=[(dtrain, "train"), (dval, "val")],
210
+ )
211
+
212
+ self._Booster = self.model_
213
+
214
+ self.is_fitted_ = True
215
+
216
+ self.feature_importances_ = self._get_feature_importances_from_booster(
217
+ self.model_
218
+ )
219
+
220
+ return self
221
+
222
+ def _calculate_sample_weights(self, y: np.array):
223
+ """Calculate sample weights based on the y values of arbitrary scale.
224
+
225
+ The resulting weights are in the range [0,1] and are used to put more emphasis
226
+ on certain samples. The sample weighting function does:
227
+
228
+ * Rescale data to a [-1, 1] range using quantile scaling. 90% of the data will
229
+ be within this range. Rest is outside.
230
+ * Calculate the weight by taking the exponent of scaled data.
231
+ * exponent=0: Results in uniform weights for all samples.
232
+ * exponent=1: Results in linearly increasing weights for samples that are
233
+ closer to the extremes.
234
+ * exponent>1: Results in exponentially increasing weights for samples that are
235
+ closer to the extremes.
236
+ * Clip the data to [0, 1] range with weight_floor as the minimum weight.
237
+ * Weight floor is used to make sure that all the samples are considered.
238
+
239
+ """
240
+ return np.clip(
241
+ _weight_exp(
242
+ _scale_percentile(y, percentile=self.weight_scale_percentile),
243
+ exponent=self.weight_exponent,
244
+ ),
245
+ a_min=self.weight_floor,
246
+ a_max=1,
247
+ )
248
+
249
+ def predict(self, x: pd.DataFrame, quantile: float = 0.5, **kwargs) -> np.array:
250
+ check_is_fitted(self)
251
+
252
+ # Preprocess input data
253
+ x = self._remove_ignored_features(x)
254
+ x = self.feature_clipper_.transform(x)
255
+ x = self.imputer_.transform(x)
256
+ x_scaled = self.x_scaler_.transform(x)
257
+
258
+ # Preserve feature names
259
+ x_scaled = pd.DataFrame(x_scaled, columns=x.columns)
260
+
261
+ d_x_scaled = xgb.DMatrix(x_scaled)
262
+
263
+ # Make prediction
264
+ y_pred = self.model_.predict(d_x_scaled)
265
+
266
+ # When multiple quantiles are trained,
267
+ # we need to select the requested quantile
268
+ if len(self.quantiles) > 1:
269
+ # Get index of the quantile value in the quantiles list
270
+ quantile_index = self.quantiles.index(quantile)
271
+
272
+ # Get the quantile prediction
273
+ y_pred = y_pred[:, quantile_index]
274
+
275
+ # Inverse scaling
276
+ y_pred = self.y_scaler_.inverse_transform(y_pred.reshape(-1, 1))[:, 0]
277
+
278
+ return y_pred
279
+
280
+ @classmethod
281
+ def _get_feature_importances_from_booster(cls, booster: xgb.Booster) -> np.ndarray:
282
+ """Gets feature importances from a XGB booster.
283
+
284
+ This is based on the feature_importance_ property defined in:
285
+ https://github.com/dmlc/xgboost/blob/master/python-package/xgboost/sklearn.py.
286
+
287
+ Args:
288
+ booster: Booster object,
289
+ most of the times the median model (quantile=0.5) is preferred
290
+
291
+ Returns:
292
+ Ndarray with normalized feature importances.
293
+
294
+ """
295
+ # Get score
296
+ score = booster.get_score(importance_type="weight")
297
+
298
+ if type(next(iter(score.values()))) is list:
299
+ num_quantiles = len(next(iter(score.values())))
300
+
301
+ # Select middle quantile, assuming odd number of quantiles
302
+ quantile_index = num_quantiles // 2
303
+
304
+ score = {f: score[f][quantile_index] for f in score}
305
+
306
+ # Get feature names from booster
307
+ feature_names = booster.feature_names
308
+
309
+ # Get importance
310
+ feature_importance = [score.get(f, 0.0) for f in feature_names]
311
+ # Convert to array
312
+ features_importance_array = np.array(feature_importance, dtype=np.float32)
313
+
314
+ total = features_importance_array.sum() # For normalizing
315
+ if total == 0:
316
+ return features_importance_array
317
+ return features_importance_array / total # Normalize
318
+
319
+ @classmethod
320
+ def _get_param_names(cls):
321
+ return [
322
+ "quantiles",
323
+ ]
324
+
325
+ def __sklearn_is_fitted__(self) -> bool:
326
+ return self.is_fitted_
327
+
328
+
329
+ def _scale_percentile(x: np.ndarray, percentile: int = 95):
330
+ return np.abs(x / np.percentile(np.abs(x), percentile))
331
+
332
+
333
+ def _weight_exp(x: np.ndarray, exponent: float = 1):
334
+ return np.abs(x) ** exponent
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: openstef
3
- Version: 3.4.52
3
+ Version: 3.4.53
4
4
  Summary: Open short term energy forecaster
5
5
  Home-page: https://github.com/OpenSTEF/openstef
6
6
  Author: Alliander N.V
@@ -1,7 +1,7 @@
1
1
  openstef/__init__.py,sha256=93UM6m0LLQhO69-mSqLuUy73jgs4W7Iuxfo3Lm8c98g,419
2
2
  openstef/__main__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
3
3
  openstef/app_settings.py,sha256=EJTDtimctFQQ-3f7ZcOQaRYohpZk3JD6aZBWPFYM2_A,582
4
- openstef/enums.py,sha256=Wmoag2p7G2cvENA1qt8FcVbAgo-MswXKxmq7vkxHaxs,2680
4
+ openstef/enums.py,sha256=wToJV56CAqgl-UY2DU1Zf12Ft9WsSiUNMrHYLnULOhQ,2724
5
5
  openstef/exceptions.py,sha256=U4u2LTcdT6cmzpipT2Jh7kq9nCjT_-6gntn8yjuhGU0,1993
6
6
  openstef/settings.py,sha256=nSgkBqFxuqB3w7Rwo60i8j37c5ngDbt6vpjHS6QtJXQ,354
7
7
  openstef/data/NL_terrestrial_radiation.csv,sha256=A4kbW56GDzWi4tWUwY2C-4PiOvcKJCwkWQQtdg4ekPE,820246
@@ -17,7 +17,7 @@ openstef/data/dazls_model_3.4.24/dazls_stored_3.4.24_model_card.md.license,sha25
17
17
  openstef/data_classes/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
18
18
  openstef/data_classes/data_prep.py,sha256=gRSL7UiHvZis8m8z7VoTCZc0Ccffhef5_hmSyApnqK0,3417
19
19
  openstef/data_classes/model_specifications.py,sha256=Uod1W3QzhRqVLb6zvXwxh9wRL3EHCzSvX0oDNd28cFk,1197
20
- openstef/data_classes/prediction_job.py,sha256=_o5_9HYv6ERTIWlcMpUE-mWwe7dRpaiP83dgNpqpa5Y,5657
20
+ openstef/data_classes/prediction_job.py,sha256=hJNwLkCIIVsG5nzOmu_yY6rqTYzjuMt8FiLi7Aei3n4,5691
21
21
  openstef/data_classes/split_function.py,sha256=ljQIQQu1t1Y_CVWGAy25jrM6wG9odIVVQVimrT1n-1s,3358
22
22
  openstef/feature_engineering/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
23
23
  openstef/feature_engineering/apply_features.py,sha256=EIxP9fvmnAjFRehQpVGBhZHt35GACL-HhDm65_ktHc0,5121
@@ -39,9 +39,9 @@ openstef/model/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,16
39
39
  openstef/model/basecase.py,sha256=caI6Q-8y0ymlxGK9Js_H3Vh0q6ruNHlGD5RG0_kE5M0,2878
40
40
  openstef/model/confidence_interval_applicator.py,sha256=Bx0mm4zGKlqopMZ589cVyDN_k6jfuyqtV1FoViXxc2Y,9775
41
41
  openstef/model/fallback.py,sha256=VV9ehgnoMZtWzqKk9H1t8wnERFh5CyC4TvDIuRP_ZDI,2861
42
- openstef/model/model_creator.py,sha256=EHWB5bIq9yYfWJpMWg62K5fns1p3aHFFB7_Izt2VWnA,6018
42
+ openstef/model/model_creator.py,sha256=L84A4_HVYL7bRZY0D77j4c4kHRleVGCRXqZZkT9BZY0,6668
43
43
  openstef/model/objective.py,sha256=qJdI6GAzv8l5Mxd8G7BIqQnfdJNM7aOlg9DMzMGjWqA,14558
44
- openstef/model/objective_creator.py,sha256=cIO-uiCEYHjqYrgZizeFEjjgLHLLwab8le9O8DJOF8I,2145
44
+ openstef/model/objective_creator.py,sha256=jqMvdXiVRc9GmOvSijQY0zuyxS07-ezkVXclvoW98g4,2208
45
45
  openstef/model/serializer.py,sha256=IUiiAWvoGVoWzmS-akI6LC7jHRY5Ln_vOCBZy1LnESY,17238
46
46
  openstef/model/standard_deviation_generator.py,sha256=Od9bzXi2TLb1v8Nz-VhBMZHSopWH6ssaDe8gYLlqO1I,2911
47
47
  openstef/model/metamodels/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
@@ -53,6 +53,7 @@ openstef/model/regressors/arima.py,sha256=wt7FVykjSvljpl7vjtliq61SiyjQ7KKtw8PF9x
53
53
  openstef/model/regressors/custom_regressor.py,sha256=Hsmxahc9nfSWD0aEZ6cm4pxW2noQ8B1SujS17_fmxcU,1768
54
54
  openstef/model/regressors/dazls.py,sha256=Xt89yFHjkwpIUTkkhPmPZ74F8_tht_XV88INuP5GU2E,3994
55
55
  openstef/model/regressors/flatliner.py,sha256=T9u-ukhqFcatQmlgUtBL_G-1b_wQzgdVRq0ac64GnjQ,2789
56
+ openstef/model/regressors/gblinear_quantile.py,sha256=DSRjL_kadBfDKrDEgrOfU1N60grTiAovtcBszBa41TI,11271
56
57
  openstef/model/regressors/lgbm.py,sha256=zCdn1euEdSFxYJzH8XqQFFnb6R4JVUnmineKjX_Gy-g,800
57
58
  openstef/model/regressors/linear.py,sha256=uOvZMLGZH_9nXfmS5honCMfyVeyGXP1Cza9A_BdXlVw,3665
58
59
  openstef/model/regressors/linear_quantile.py,sha256=VAyIhp7GPayqbk8Vj_ONqPLNYuaOvxkFKDrRxx6yGY0,10510
@@ -93,8 +94,8 @@ openstef/tasks/utils/predictionjobloop.py,sha256=Ysy3zF5lzPMz_asYDKeF5m0qgVT3tCt
93
94
  openstef/tasks/utils/taskcontext.py,sha256=L9K14ycwgVxbIVUjH2DIn_QWbnu-OfxcGtQ1K9T6sus,5630
94
95
  openstef/validation/__init__.py,sha256=bIyGTSA4V5VoOLTwdaiJJAnozmpSzvQooVYlsf8H4eU,163
95
96
  openstef/validation/validation.py,sha256=HVgreHvcZvPazfwC3NNE8_3lsMsZEd_42osCAg1_6W4,11128
96
- openstef-3.4.52.dist-info/LICENSE,sha256=7Pm2fWFFHHUG5lDHed1vl5CjzxObIXQglnYsEdtjo_k,14907
97
- openstef-3.4.52.dist-info/METADATA,sha256=r8E2LPnPIDJ-AhzrbVmYP3MPYIfx2OOpwIuYC9obtcE,8305
98
- openstef-3.4.52.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
99
- openstef-3.4.52.dist-info/top_level.txt,sha256=kD0H4PqrQoncZ957FvqwfBxa89kTrun4Z_RAPs_HhLs,9
100
- openstef-3.4.52.dist-info/RECORD,,
97
+ openstef-3.4.53.dist-info/LICENSE,sha256=7Pm2fWFFHHUG5lDHed1vl5CjzxObIXQglnYsEdtjo_k,14907
98
+ openstef-3.4.53.dist-info/METADATA,sha256=MCAC7JnwfKToob21A5_a-eFE4CkG7PJ3Wov0BIDJ6Uc,8305
99
+ openstef-3.4.53.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
100
+ openstef-3.4.53.dist-info/top_level.txt,sha256=kD0H4PqrQoncZ957FvqwfBxa89kTrun4Z_RAPs_HhLs,9
101
+ openstef-3.4.53.dist-info/RECORD,,