openenergyid 0.1.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openenergyid might be problematic. Click here for more details.

@@ -0,0 +1,450 @@
1
+ """Multi-variable linear regression based on statsmodels
2
+ and Ordinary Least Squares (ols)."""
3
+
4
+ import numpy as np
5
+ import pandas as pd
6
+ import statsmodels.formula.api as fm
7
+ from patsy import LookupFactor, ModelDesc, Term # pylint: disable=no-name-in-module
8
+ from statsmodels.sandbox.regression.predstd import wls_prediction_std
9
+
10
+ from openenergyid.enums import Granularity
11
+
12
+
13
+ class MultiVariableLinearRegression:
14
+ """Multi-variable linear regression.
15
+
16
+ Based on statsmodels and Ordinary Least Squares (ols).
17
+
18
+ Pass a dataframe with the variable to be modelled y (dependent variable)
19
+ and the possible independent variables x.
20
+ Specify as string the name of the dependent variable, and optionally pass a list with names of
21
+ independent variables to try
22
+ (by default all other columns will be tried as independent variables).
23
+
24
+ The analysis is based on a forward-selection approach: starting from a simple model,
25
+ the model is iteratively refined and verified until no statistical relevant improvements
26
+ can be obtained.
27
+ Each model in the iteration loop is stored in the attribute self.list_of_fits.
28
+ The selected model is self.fit (=pointer to the last element of self.list_of_fits).
29
+
30
+ The dataframe can contain daily, weekly, monthly, yearly ... values. Each row is an instance.
31
+ """
32
+
33
+ def __init__(
34
+ self,
35
+ data: pd.DataFrame,
36
+ y: str,
37
+ p_max: float = 0.05,
38
+ list_of_x: list[str] = None,
39
+ confint: float = 0.95,
40
+ cross_validation: bool = False,
41
+ allow_negative_predictions: bool = False,
42
+ granularity: Granularity = None,
43
+ single_use_exog_prefixes: list[str] = None,
44
+ exogs__disallow_negative_coefficient: list[str] = None,
45
+ ):
46
+ """Parameters
47
+ ----------
48
+ data : TimeSeries
49
+ Datetimeindex and both independent variables (x) and dependent variable (y) as columns
50
+ y : str
51
+ Name of the dependent (endogeneous) variable to model
52
+ p_max : float (default=0.05)
53
+ Acceptable p-value of the t-statistic for estimated parameters
54
+ list_of_x : list of str (default=None)
55
+ If None (default), try to build a model with all columns in the dataframe
56
+ If a list with column names is given, only try these columns as independent variables
57
+ confint : float, default=0.95
58
+ Two-sided confidence interval for predictions.
59
+ cross_validation : bool, default=False
60
+ If True, compute the model based on cross-validation (leave one out)
61
+ Only possible if the df has less than 15 entries.
62
+ Note: this will take much longer computation times!
63
+ allow_negative_predictions : bool, default=False
64
+ If True, allow predictions to be negative.
65
+ For gas consumption or PV production, this is not physical
66
+ so allow_negative_predictions should be False
67
+ granularity : Granularity, default=None
68
+ Granularity of the data. Is only used for the output of the model.
69
+ If None, the granularity is not set.
70
+ single_use_exog_prefixes : list of str, default=None
71
+ List of variable prefixes that indicate a variable type that should only be used once.
72
+ For example, if the list contains "HDD", only one of the columns "HDD1", "HDD2", "HDD3" etc.
73
+ will be used as an independent variable.
74
+ Once the best fit using a variable with a given prefix is found, the other variables with the same
75
+ prefix will not be used as independent variables.
76
+ exogs__disallow_negative_coefficient : list of str, default=None
77
+ List of variable names for which the coefficient is not allowed to be negative.
78
+ """
79
+ self.data = data.copy()
80
+ if y not in self.data.columns:
81
+ raise AssertionError(
82
+ f"The dependent variable {y} is not a column in the dataframe",
83
+ )
84
+ self.y = y
85
+
86
+ self.p_max = p_max
87
+ self.list_of_x = list_of_x or [x for x in self.data.columns if x != self.y]
88
+ self.confint = confint
89
+ self.cross_validation = cross_validation
90
+ self.allow_negative_predictions = allow_negative_predictions
91
+ self.granularity = granularity
92
+ self.single_use_exog_prefixes = single_use_exog_prefixes
93
+ self.exogs__disallow_negative_coefficient = exogs__disallow_negative_coefficient
94
+ self._fit = None
95
+ self._list_of_fits = []
96
+ self.list_of_cverrors = []
97
+
98
+ @property
99
+ def fit(self) -> fm.ols:
100
+ """Fits a model to the data.
101
+
102
+ Returns
103
+ -------
104
+ The fitted model.
105
+
106
+ Raises
107
+ ------
108
+ UnboundLocalError: If `do_analysis()` has not been run before calling `fit()`.
109
+ """
110
+ if self._fit is None:
111
+ raise UnboundLocalError(
112
+ 'Run "do_analysis()" first to fit a model to the data.',
113
+ )
114
+ else:
115
+ return self._fit
116
+
117
+ @property
118
+ def list_of_fits(self) -> list[fm.ols]:
119
+ """Returns the list of fits generated by the model.
120
+
121
+ Raises
122
+ ------
123
+ UnboundLocalError: If the model has not been fitted yet.
124
+
125
+ Returns
126
+ -------
127
+ list: The list of fits generated by the model.
128
+ """
129
+ if not self._list_of_fits:
130
+ raise UnboundLocalError(
131
+ 'Run "do_analysis()" first to fit a model to the data.',
132
+ )
133
+ else:
134
+ return self._list_of_fits
135
+
136
+ def do_analysis(self):
137
+ """Find the best model (fit) and create self.list_of_fits and self.fit"""
138
+ if self.cross_validation:
139
+ return self._do_analysis_cross_validation()
140
+ else:
141
+ return self._do_analysis_no_cross_validation()
142
+
143
+ def _do_analysis_no_cross_validation(self):
144
+ """Find the best model (fit) and create self.list_of_fits and self.fit"""
145
+ # first model is just the mean
146
+ response_term = [Term([LookupFactor(self.y)])]
147
+ model_terms = [Term([])] # empty term is the intercept
148
+ all_model_terms_dict = {x: Term([LookupFactor(x)]) for x in self.list_of_x}
149
+ # ...then add another term for each candidate
150
+ # model_terms += [Term([LookupFactor(c)]) for c in candidates]
151
+ model_desc = ModelDesc(response_term, model_terms)
152
+ self._list_of_fits.append(fm.ols(model_desc, data=self.data).fit())
153
+ # try to improve the model until no improvements can be found
154
+
155
+ while all_model_terms_dict:
156
+ # try each x and overwrite the best_fit if we find a better one
157
+ # the first best_fit is the one from the previous round
158
+ ref_fit = self._list_of_fits[-1]
159
+ best_fit = self._list_of_fits[-1]
160
+ best_bic = best_fit.bic
161
+ for x, term in all_model_terms_dict.items():
162
+ # make new_fit, compare with best found so far
163
+ model_desc = ModelDesc(
164
+ response_term,
165
+ ref_fit.model.formula.rhs_termlist + [term],
166
+ )
167
+ fit = fm.ols(model_desc, data=self.data).fit()
168
+
169
+ # Check if the coefficient of the variable is allowed to be negative
170
+ if (
171
+ self.exogs__disallow_negative_coefficient is not None
172
+ and x in self.exogs__disallow_negative_coefficient
173
+ and fit.params[x] < 0
174
+ ):
175
+ continue
176
+
177
+ if fit.bic < best_bic:
178
+ best_bic = fit.bic
179
+ best_fit = fit
180
+ best_x = x
181
+ # Sometimes, the obtained fit may be better, but contains unsignificant parameters.
182
+ # Correct the fit by removing the unsignificant parameters and estimate again
183
+ best_fit = self._prune(best_fit, p_max=self.p_max)
184
+
185
+ # if best_fit does not contain more variables than ref fit, exit
186
+ if len(best_fit.model.formula.rhs_termlist) == len(
187
+ ref_fit.model.formula.rhs_termlist,
188
+ ):
189
+ break
190
+
191
+ self._list_of_fits.append(best_fit)
192
+ all_model_terms_dict.pop(best_x)
193
+
194
+ # Check if `best_x` starts with a prefix that should only be used once
195
+ # If so, remove all other variables with the same prefix from the list of candidates
196
+ if self.single_use_exog_prefixes:
197
+ for prefix in self.single_use_exog_prefixes:
198
+ if best_x.startswith(prefix):
199
+ all_model_terms_dict = {
200
+ k: v
201
+ for k, v in all_model_terms_dict.items()
202
+ if not k.startswith(prefix)
203
+ }
204
+
205
+ self._fit = self._list_of_fits[-1]
206
+
207
+ def _do_analysis_cross_validation(self):
208
+ """Find the best model (fit) based on cross-valiation (leave one out)"""
209
+ assert (
210
+ len(self.data) < 15
211
+ ), "Cross-validation is not implemented if your sample contains more than 15 datapoints"
212
+
213
+ # initialization: first model is the mean, but compute cv correctly.
214
+ errors = []
215
+ response_term = [Term([LookupFactor(self.y)])]
216
+ model_terms = [Term([])] # empty term is the intercept
217
+ model_desc = ModelDesc(response_term, model_terms)
218
+ for i in self.data.index:
219
+ # make new_fit, compute cross-validation and store error
220
+ df_ = self.data.drop(i, axis=0)
221
+ fit = fm.ols(model_desc, data=df_).fit()
222
+ cross_prediction = self._predict(fit=fit, data=self.data.loc[[i], :])
223
+ errors.append(cross_prediction["predicted"] - cross_prediction[self.y])
224
+
225
+ self._list_of_fits = [fm.ols(model_desc, data=self.data).fit()]
226
+ self.list_of_cverrors = [np.mean(np.abs(np.array(errors)))]
227
+
228
+ # try to improve the model until no improvements can be found
229
+ all_model_terms_dict = {x: Term([LookupFactor(x)]) for x in self.list_of_x}
230
+ while all_model_terms_dict:
231
+ # import pdb;pdb.set_trace()
232
+ # try each x in all_exog and overwrite if we find a better one
233
+ # at the end of iteration (and not earlier), save the best of the iteration
234
+ better_model_found = False
235
+ best = dict(fit=self._list_of_fits[-1], cverror=self.list_of_cverrors[-1])
236
+ for x, term in all_model_terms_dict.items():
237
+ model_desc = ModelDesc(
238
+ response_term,
239
+ self._list_of_fits[-1].model.formula.rhs_termlist + [term],
240
+ )
241
+ # cross_validation, currently only implemented for monthly data
242
+ # compute the mean error for a given formula based on leave-one-out.
243
+ errors = []
244
+ for i in self.data.index:
245
+ # make new_fit, compute cross-validation and store error
246
+ df_ = self.data.drop(i, axis=0)
247
+ fit = fm.ols(model_desc, data=df_).fit()
248
+ cross_prediction = self._predict(
249
+ fit=fit,
250
+ data=self.data.loc[[i], :],
251
+ )
252
+ errors.append(
253
+ cross_prediction["predicted"] - cross_prediction[self.y],
254
+ )
255
+ cverror = np.mean(np.abs(np.array(errors)))
256
+ # compare the model with the current fit
257
+ if cverror < best["cverror"]:
258
+ # better model, keep it
259
+ # first, reidentify using all the datapoints
260
+ best["fit"] = fm.ols(model_desc, data=self.data).fit()
261
+ best["cverror"] = cverror
262
+ better_model_found = True
263
+ best_x = x
264
+
265
+ if better_model_found:
266
+ self._list_of_fits.append(best["fit"])
267
+ self.list_of_cverrors.append(best["cverror"])
268
+
269
+ else:
270
+ # if we did not find a better model, exit
271
+ break
272
+
273
+ # next iteration with the found exog removed
274
+ all_model_terms_dict.pop(best_x)
275
+
276
+ # Check if `best_x` starts with a prefix that should only be used once
277
+ # If so, remove all other variables with the same prefix from the list of candidates
278
+ if self.single_use_exog_prefixes:
279
+ for prefix in self.single_use_exog_prefixes:
280
+ if best_x.startswith(prefix):
281
+ all_model_terms_dict = {
282
+ k: v
283
+ for k, v in all_model_terms_dict.items()
284
+ if not k.startswith(prefix)
285
+ }
286
+
287
+ self._fit = self._list_of_fits[-1]
288
+
289
+ def _prune(self, fit: fm.ols, p_max: float) -> fm.ols:
290
+ """If the fit contains statistically insignificant parameters, remove them.
291
+ Returns a pruned fit where all parameters have p-values of the t-statistic below p_max
292
+
293
+ Parameters
294
+ ----------
295
+ fit: fm.ols fit object
296
+ Can contain insignificant parameters
297
+ p_max : float
298
+ Maximum allowed probability of the t-statistic
299
+
300
+ Returns
301
+ -------
302
+ fit: fm.ols fit object
303
+ Won't contain any insignificant parameters
304
+
305
+ """
306
+
307
+ def remove_from_model_desc(x: str, model_desc: ModelDesc) -> ModelDesc:
308
+ """Return a model_desc without x"""
309
+ rhs_termlist = []
310
+ for t in model_desc.rhs_termlist:
311
+ if not t.factors:
312
+ # intercept, add anyway
313
+ rhs_termlist.append(t)
314
+ elif x != t.factors[0]._varname: # pylint: disable=protected-access
315
+ # this is not the term with x
316
+ rhs_termlist.append(t)
317
+
318
+ md = ModelDesc(model_desc.lhs_termlist, rhs_termlist)
319
+ return md
320
+
321
+ corrected_model_desc = ModelDesc(
322
+ fit.model.formula.lhs_termlist[:],
323
+ fit.model.formula.rhs_termlist[:],
324
+ )
325
+ pars_to_prune = fit.pvalues.where(fit.pvalues > p_max).dropna().index.tolist()
326
+ try:
327
+ pars_to_prune.remove("Intercept")
328
+ except ValueError:
329
+ pass
330
+ while pars_to_prune:
331
+ corrected_model_desc = remove_from_model_desc(
332
+ pars_to_prune[0],
333
+ corrected_model_desc,
334
+ )
335
+ fit = fm.ols(corrected_model_desc, data=self.data).fit()
336
+ pars_to_prune = fit.pvalues.where(fit.pvalues > p_max).dropna().index.tolist()
337
+ try:
338
+ pars_to_prune.remove("Intercept")
339
+ except ValueError:
340
+ pass
341
+ return fit
342
+
343
+ @staticmethod
344
+ def find_best_rsquared(list_of_fits: list[fm.ols]) -> fm.ols:
345
+ """Return the best fit, based on rsquared"""
346
+ res = sorted(list_of_fits, key=lambda x: x.rsquared)
347
+ return res[-1]
348
+
349
+ @staticmethod
350
+ def find_best_akaike(list_of_fits: list[fm.ols]) -> fm.ols:
351
+ """Return the best fit, based on Akaike information criterion"""
352
+ res = sorted(list_of_fits, key=lambda x: x.aic)
353
+ return res[0]
354
+
355
+ @staticmethod
356
+ def find_best_bic(list_of_fits: list[fm.ols]) -> fm.ols:
357
+ """Return the best fit, based on Akaike information criterion"""
358
+ res = sorted(list_of_fits, key=lambda x: x.bic)
359
+ return res[0]
360
+
361
+ def _predict(self, fit: fm.ols, data: pd.DataFrame) -> pd.DataFrame:
362
+ """Return a df with predictions and confidence interval
363
+
364
+ Notes
365
+ -----
366
+ The df will contain the following columns:
367
+ - 'predicted': the model output
368
+ - 'interval_u', 'interval_l': upper and lower confidence bounds.
369
+ The result will depend on the following attributes of self:
370
+ confint : float (default=0.95)
371
+ Confidence level for two-sided hypothesis
372
+ allow_negative_predictions : bool (default=True)
373
+ If False, correct negative predictions to zero
374
+ (typically for energy consumption predictions)
375
+
376
+ Parameters
377
+ ----------
378
+ fit : Statsmodels fit
379
+ data : pandas DataFrame or None (default)
380
+ If None, use self.data
381
+
382
+ Returns
383
+ -------
384
+ result : pandas DataFrame
385
+ Copy of df with additional columns 'predicted', 'interval_u' and 'interval_l'
386
+ """
387
+ # Add model results to data as column 'predictions'
388
+ result = data.copy()
389
+ if "Intercept" in fit.model.exog_names:
390
+ result["Intercept"] = 1.0
391
+ result["predicted"] = fit.predict(result)
392
+ if not self.allow_negative_predictions:
393
+ result.loc[result["predicted"] < 0, "predicted"] = 0
394
+
395
+ _prstd, interval_l, interval_u = wls_prediction_std(
396
+ fit,
397
+ result[fit.model.exog_names],
398
+ alpha=1 - self.confint,
399
+ )
400
+ result["interval_l"] = interval_l
401
+ result["interval_u"] = interval_u
402
+
403
+ if "Intercept" in result:
404
+ result.drop(labels=["Intercept"], axis=1, inplace=True)
405
+
406
+ return result
407
+
408
+ def add_prediction(self):
409
+ """Add predictions and confidence interval to self.df
410
+ self.df will contain the following columns:
411
+ - 'predicted': the model output
412
+ - 'interval_u', 'interval_l': upper and lower confidence bounds.
413
+
414
+ Parameters
415
+ ----------
416
+ None, but the result depends on the following attributes of self:
417
+ confint : float (default=0.95)
418
+ Confidence level for two-sided hypothesis
419
+ allow_negative_predictions : bool (default=True)
420
+ If False, correct negative predictions to zero
421
+ (typically for energy consumption predictions)
422
+
423
+ Returns
424
+ -------
425
+ Nothing, adds columns to self.df
426
+ """
427
+ self.data = self._predict(fit=self.fit, data=self.data)
428
+
429
+ def validate(
430
+ self, min_rsquared: float = 0.75, max_f_pvalue: float = 0.05, max_pvalues: float = 0.05
431
+ ) -> bool:
432
+ """Checks if the model is valid.
433
+
434
+ Returns
435
+ -------
436
+ bool: True if the model is valid, False otherwise.
437
+ """
438
+ if self.fit.rsquared_adj < min_rsquared:
439
+ return False
440
+
441
+ if self.fit.f_pvalue > max_f_pvalue:
442
+ return False
443
+
444
+ param_keys = self.fit.pvalues.keys().tolist()
445
+ param_keys.remove("Intercept")
446
+ for k in param_keys:
447
+ if self.fit.pvalues[k] > max_pvalues:
448
+ return False
449
+
450
+ return True
@@ -0,0 +1,50 @@
1
+ Metadata-Version: 2.4
2
+ Name: openenergyid
3
+ Version: 0.1.21
4
+ Summary: Open Source Python library for energy analytics and simulations
5
+ Project-URL: Homepage, https://energyid.eu
6
+ Project-URL: Repository, https://github.com/EnergieID/OpenEnergyID
7
+ Project-URL: Bugs Tracker, https://github.com/EnergieID/OpenEnergyID/issues
8
+ Author-email: Jan Pecinovsky <jan@energieid.be>, Max Helskens <max@energieid.be>
9
+ Maintainer-email: Jan Pecinovsky <jan@energieid.be>
10
+ License: MIT License
11
+
12
+ Copyright (c) 2023 EnergieID cvba-so
13
+
14
+ Permission is hereby granted, free of charge, to any person obtaining a copy
15
+ of this software and associated documentation files (the "Software"), to deal
16
+ in the Software without restriction, including without limitation the rights
17
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
18
+ copies of the Software, and to permit persons to whom the Software is
19
+ furnished to do so, subject to the following conditions:
20
+
21
+ The above copyright notice and this permission notice shall be included in all
22
+ copies or substantial portions of the Software.
23
+
24
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
29
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30
+ SOFTWARE.
31
+ License-File: LICENSE
32
+ Keywords: analytics,energy,simulation
33
+ Classifier: Development Status :: 3 - Alpha
34
+ Classifier: Intended Audience :: Developers
35
+ Classifier: Intended Audience :: Science/Research
36
+ Classifier: License :: OSI Approved :: MIT License
37
+ Classifier: Natural Language :: English
38
+ Classifier: Operating System :: OS Independent
39
+ Classifier: Programming Language :: Python :: 3
40
+ Classifier: Topic :: Scientific/Engineering
41
+ Classifier: Topic :: Scientific/Engineering :: Mathematics
42
+ Classifier: Topic :: Scientific/Engineering :: Physics
43
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
44
+ Description-Content-Type: text/markdown
45
+
46
+ # OpenEnergyID
47
+
48
+ Open Source Python library for energy data analytics and simulations
49
+
50
+ [*more info for developers*](DEVELOPERS.md)
@@ -0,0 +1,29 @@
1
+ openenergyid/__init__.py,sha256=LOByZ5xMdSMme37T7DEKZJ_57qDoXhUNzP7uotgNx7w,193
2
+ openenergyid/const.py,sha256=D-xUnUyVuLmphClkePgxpFP6z0RDhw_6m7rX0BHBgrw,823
3
+ openenergyid/enums.py,sha256=jdw4CB1gkisx0re_SesrTEyh_T-UxYp6uieE7iYlHdA,357
4
+ openenergyid/models.py,sha256=IA6me6dsXH4mjBzewzHcs7U4jev07JX3pCadEcugeEI,5953
5
+ openenergyid/baseload/__init__.py,sha256=LXgnCnoSB1fyknMvkAv8nbtYczqW8Od_N2v_bp-LYVw,437
6
+ openenergyid/baseload/analysis.py,sha256=GAma3G3jE5ZZwxV3fL3lBlw3DDncXyAKgpB9XfNMibI,7520
7
+ openenergyid/baseload/exceptions.py,sha256=uPPQlFmOikp3wuwdVxj3Mx-45TzPkLF86rKMFjT5qB4,250
8
+ openenergyid/baseload/models.py,sha256=W_WCcdLdkbySH7o5adLE7_txXLZsAVTjJkwzjepwN1Y,917
9
+ openenergyid/capacity/__init__.py,sha256=1En96HlPV8kd1hOJO9RjRbXNInp5ZSkmjsjp0jfZlcQ,221
10
+ openenergyid/capacity/main.py,sha256=G6_EtXs1k_W-fxS33pFrCNKajuH81skdI32zp5RX9bI,3674
11
+ openenergyid/capacity/models.py,sha256=qi0IFyF_QOVleSzN8g0U2Fzqcc9ZDfNKt8oteFLY6Q0,832
12
+ openenergyid/dyntar/__init__.py,sha256=lUrk7ktS7yAqiafRHFoBE0RvFSI9mzDoO37diwLHuBg,495
13
+ openenergyid/dyntar/const.py,sha256=eJJV9VfpHlS9vWV47DWQkS3ICIXWhDmG4cU-ofbZJ3Q,1100
14
+ openenergyid/dyntar/main.py,sha256=i8EkayRicnMhG66cyrxGwUumFx3UGe7KDSImfFqmK04,10638
15
+ openenergyid/dyntar/models.py,sha256=lI4IjdAFallhsCqbw-EbBPbmk0g2MACgZnmMtTX7Pq0,3452
16
+ openenergyid/energysharing/__init__.py,sha256=A4JfrUYf-hBCzhUm0qL1GGlNMvpO8OwXJo80dJxFIvw,274
17
+ openenergyid/energysharing/const.py,sha256=X2zEPtTlsmZ66w6RmLS_h8NmdzObAEi5N6-0yrLN5V4,219
18
+ openenergyid/energysharing/data_formatting.py,sha256=Kwuhyn6ao_8Brdm9frlA6VzYOqimNYZsRbYwNXnE7yc,2583
19
+ openenergyid/energysharing/main.py,sha256=QKrtDyAlmKj0qtlqlUMjTJujQeKBK9U1_W80-RZWt-U,4449
20
+ openenergyid/energysharing/models.py,sha256=-FedTqWqoi7AYrbI4S_pX0bMScrbZxncQ21CXFz2cXM,2526
21
+ openenergyid/mvlr/__init__.py,sha256=Glrc218oqa8tq_Y2G9LXaSoN4Yba-vsjXUi9r9iPzaY,471
22
+ openenergyid/mvlr/helpers.py,sha256=Uzbfrj3IpH26wA206KOl0hNucKE-n9guJNC_EROBVKA,983
23
+ openenergyid/mvlr/main.py,sha256=Daj9UjcX70WETRrKu3QY-1LfMRkKP8Wvu4Ted-Smwzs,1491
24
+ openenergyid/mvlr/models.py,sha256=XvkViOLlYqi0ffgF3AD4Jvk3yL05gsoKdKgBAsGJ7L4,8581
25
+ openenergyid/mvlr/mvlr.py,sha256=F7WvWnZQtqUmK1vsguemsn9n8pDDk3tQ1weOlv-bo0c,18626
26
+ openenergyid-0.1.21.dist-info/METADATA,sha256=V1ysO8oMVbxB2ZTEOdHgXcG8P7erlagleMPPpe_efjc,2478
27
+ openenergyid-0.1.21.dist-info/WHEEL,sha256=tkmg4JIqwd9H8mL30xA7crRmoStyCtGp0VWshokd1Jc,105
28
+ openenergyid-0.1.21.dist-info/licenses/LICENSE,sha256=NgRdcNHwyXVCXZ8sJwoTp0DCowThJ9LWWl4xhbV1IUY,1074
29
+ openenergyid-0.1.21.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py2-none-any
5
+ Tag: py3-none-any
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2023 EnergieID cvba-so
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.