domainiac 8.0.6__tar.gz → 8.0.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: domainiac
3
- Version: 8.0.6
3
+ Version: 8.0.9
4
4
  Summary: Package for working with Energinet data, but with specialized functions used for Enigma.
5
5
  Author: Team Enigma
6
6
  Author-email: gridop-enigma@energinet.dk
@@ -10,4 +10,5 @@ Classifier: Programming Language :: Python :: 3.10
10
10
  Requires-Dist: datamazing (>=5.0.2,<6.0.0)
11
11
  Requires-Dist: pandas (>=2.2.0,<3.0.0)
12
12
  Requires-Dist: scikit-learn (>=1.3.0,<2.0.0)
13
+ Requires-Dist: scipy (>=1.15.3,<2.0.0)
13
14
  Requires-Dist: typeguard (>=4.2.1,<5.0.0)
@@ -0,0 +1,24 @@
1
+ import numpy as np
2
+
3
+ ORIGIN = np.datetime64("2020-01-01")
4
+ UNIT = np.timedelta64(1, "s")
5
+
6
+
7
+ def as_array(values):
8
+ return np.array(values)
9
+
10
+
11
+ def timedelta_to_float(values):
12
+ return as_array(values).astype("timedelta64") / UNIT
13
+
14
+
15
+ def float_to_timedelta(values):
16
+ return as_array(values) * UNIT
17
+
18
+
19
+ def datetime_to_float(values):
20
+ return timedelta_to_float(as_array(values).astype("datetime64") - ORIGIN)
21
+
22
+
23
+ def float_to_datetime(values):
24
+ return float_to_timedelta(values) + ORIGIN
@@ -0,0 +1,121 @@
1
+ import numpy as np
2
+ from numpy.typing import NDArray
3
+ from scipy import integrate, interpolate
4
+
5
+ from domainiac.functions.typing import IntegrableRealFunction
6
+
7
+
8
+ def cumulative_integral(
9
+ f: IntegrableRealFunction,
10
+ x: NDArray[np.float64],
11
+ step_size: np.float64,
12
+ ) -> NDArray[np.float64]:
13
+ """Given an integrable real function f : R -> R,
14
+ calculate the cumulative integral values
15
+
16
+ int_{x0}^{x}f(s)ds
17
+
18
+ The calculation is numerical, using the trapezoid
19
+ rule with the specified step size.
20
+
21
+ Args:
22
+ f (IntegrableUnivariate): Function
23
+ x (NDArray[np.float64]): Points to evaluate at.
24
+ step_size (np.float64): Step size used in the quadrature.
25
+
26
+ Returns:
27
+ NDArray[np.float64]: Values of the cumulative integral
28
+ at the specified points
29
+ """
30
+ xi = np.arange(start=x[0], stop=x[-1] + step_size, step=step_size)
31
+ yi = f(xi)
32
+
33
+ Yi = integrate.cumulative_trapezoid(yi, xi, initial=0)
34
+
35
+ Y = Yi[np.searchsorted(xi, x)]
36
+
37
+ return Y
38
+
39
+
40
+ def midpoint(x: NDArray) -> NDArray:
41
+ """Calculate the midpoints of intervals
42
+ giving a sequence of points.
43
+
44
+ Args:
45
+ x (NDArray): Sequence of points defining the intervals (size N)
46
+
47
+ Returns:
48
+ NDArray: Midpoints of intervals (size N-1)
49
+ """
50
+ return x[:-1] + 0.5 * np.diff(x)
51
+
52
+
53
+ def binned_index_interpolation(
54
+ profile: IntegrableRealFunction,
55
+ x: NDArray[np.float64],
56
+ y_avg: NDArray[np.float64],
57
+ quad_step_size: float,
58
+ ) -> IntegrableRealFunction:
59
+ """
60
+ Compute binned index interpolation function. The setup here is as follows:
61
+ Suppose you have an unknown function f, but which is known to have the
62
+ form f=i*profile, where i is an function taking values between 0 and 1.
63
+ Given is only the average value of f on a sequence of intervals.
64
+
65
+ This method calculates the ratios
66
+
67
+ (integral of f on interval i)/(integral of profile on interval i)
68
+
69
+ i is then estimated by doing spline interpolation between these ratios.
70
+
71
+ Note:
72
+ This method does NOT ensure that the average values are preserved.
73
+ To accomplish this, a different method is required, but is has been
74
+ decided to not go further into this topic, as the preliminary results
75
+ seem promising enough.
76
+
77
+ Args:
78
+ profile (IntegrableRealFunction): Profile function
79
+ x (NDArray[np.float64]): Sequence of points defining the intervals (size N)
80
+ y_avg (NDArray[np.float64]): Average value on the intervals (size N-1)
81
+ --- additional settings for solver ---
82
+ quad_step_size (float): Step size used in quadrature of profile function.
83
+
84
+ Returns:
85
+ IntegrableRealFunction: Estimated base function.
86
+ """
87
+ x = np.array(x)
88
+ y_avg = np.array(y_avg)
89
+
90
+ Yp = cumulative_integral(profile, x, step_size=quad_step_size)
91
+
92
+ yp_avg = (Yp[1:] - Yp[:-1]) / (x[1:] - x[:-1])
93
+
94
+ bin_index = y_avg / yp_avg
95
+
96
+ bin_midpoints = midpoint(x)
97
+
98
+ # if the index is invalid (due to the profile being zero)
99
+ # we set the index to 1. Since the profile is zero in this
100
+ # case, it will not matter to the final function (though
101
+ # it might have an impact on the values surrounding these
102
+ # point)
103
+ is_valid = ~np.isnan(bin_index) & ~np.isinf(bin_index)
104
+
105
+ bin_index[~is_valid] = 1
106
+
107
+ # the index might be outside the expected range
108
+ # due to errors in the input data. In this case
109
+ # we clip it
110
+ bin_index = np.clip(bin_index, a_min=0, a_max=1)
111
+
112
+ index = interpolate.make_interp_spline(
113
+ x=bin_midpoints,
114
+ y=bin_index,
115
+ k=2,
116
+ )
117
+
118
+ def estimate(x: NDArray[np.float64]) -> NDArray[np.float64]:
119
+ return index(x) * profile(x)
120
+
121
+ return estimate
@@ -0,0 +1,68 @@
1
+ import numpy as np
2
+ import pandas as pd
3
+ import pvlib
4
+ from numpy.typing import NDArray
5
+
6
+ from domainiac.functions import conversions, interpolation
7
+ from domainiac.modeling import Coordinate
8
+
9
+
10
+ def clearsky_irradiance(
11
+ times: NDArray[np.datetime64], coordinate: Coordinate
12
+ ) -> NDArray[np.float64]:
13
+ """Get the clearsky irradiance (W/m2) for a specific coordinate
14
+
15
+ Args:
16
+ times (NDArray[np.datetime64]): Times
17
+ coordinate (Coordinate): Coordinate
18
+ """
19
+
20
+ location = pvlib.location.Location(coordinate.latitude, coordinate.longitude)
21
+
22
+ # ensure 1D-array both when input is
23
+ # scalar and 1D-array already
24
+ array = conversions.as_array([times])
25
+ if array.size != 1:
26
+ array = array.squeeze()
27
+
28
+ times = pd.DatetimeIndex(array)
29
+ # pvlibs `get_clearsky` will return 0
30
+ # if datetime unit is anything else than
31
+ # nanoseconds (probably due to some
32
+ # internal conversion going wrong)
33
+ times = times.as_unit("ns")
34
+
35
+ clearsky = location.get_clearsky(times)
36
+
37
+ return clearsky["ghi"].to_numpy()
38
+
39
+
40
+ def interpolate_irradiance(
41
+ coordinate: Coordinate, times: pd.Series, radiation_avg: pd.Series
42
+ ):
43
+ """Interpolate solar irradiance (i.e. instantaneous values) using
44
+ binned index interpolation (using the clearsky irradiance as the reference profile)
45
+ between observed values of solar radiance (i.e. binned average values)
46
+
47
+ Args:
48
+ coordinate (Coordinate): Coordinate
49
+ times (pd.Series): Sequence of times defining the intervals of observations
50
+ radiation (pd.Series): Observed values of radiation
51
+ """
52
+
53
+ def profile(x: NDArray[np.float64]) -> NDArray[np.float64]:
54
+ times = conversions.float_to_datetime(x)
55
+ return clearsky_irradiance(times, coordinate)
56
+
57
+ x = conversions.datetime_to_float(times)
58
+ y_avg = conversions.as_array(radiation_avg)
59
+ # 5-minute step size should be sufficient for evaluating the clearsky integral
60
+ quad_step_size = conversions.timedelta_to_float(pd.Timedelta("PT5M"))
61
+
62
+ f = interpolation.binned_index_interpolation(profile, x, y_avg, quad_step_size)
63
+
64
+ def estimate(times: pd.Series) -> NDArray[np.float64]:
65
+ x = conversions.datetime_to_float(times)
66
+ return f(x)
67
+
68
+ return estimate
@@ -0,0 +1,8 @@
1
+ from typing import Callable
2
+
3
+ import numpy as np
4
+ from numpy.typing import NDArray
5
+
6
+ IntegrableRealFunction = Callable[[NDArray[np.float64]], NDArray[np.float64]]
7
+
8
+ RealFunction = Callable[[NDArray[np.float64]], NDArray[np.float64]]
@@ -28,6 +28,7 @@ class MasterdataManager:
28
28
  def _get_operational_entities(self, table: str) -> pd.DataFrame:
29
29
  filters = {"standing_entity_state": "InOperation"}
30
30
  df = self.db.query(table, filters=filters)
31
+ df = df[df["decommission_date_utc"].isna()].reset_index(drop=True)
31
32
  return df
32
33
 
33
34
  @typechecked
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "domainiac"
3
- version = "8.0.6"
3
+ version = "8.0.9"
4
4
  description = "Package for working with Energinet data, but with specialized functions used for Enigma."
5
5
  authors = ["Team Enigma <gridop-enigma@energinet.dk>"]
6
6
  packages = [
@@ -13,6 +13,7 @@ pandas = "^2.2.0"
13
13
  datamazing = "^5.0.2"
14
14
  typeguard = "^4.2.1"
15
15
  scikit-learn = "^1.3.0"
16
+ scipy = "^1.15.3"
16
17
 
17
18
  [tool.poetry.dev-dependencies]
18
19
  pytest = "^7"
@@ -20,6 +21,8 @@ pytest-cov = "^3.0.0"
20
21
  pre-commit = "^2.20.0"
21
22
  pytype = "^2023.7"
22
23
  parameterized = "^0.9.0"
24
+ plotly = "^6.0.0"
25
+ pvlib = "^0.13.1"
23
26
 
24
27
  [build-system]
25
28
  requires = ["poetry-core>=1.0.0"]