openenergyid 0.1.31__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openenergyid/__init__.py +8 -0
- openenergyid/abstractsim/__init__.py +5 -0
- openenergyid/abstractsim/abstract.py +102 -0
- openenergyid/baseload/__init__.py +15 -0
- openenergyid/baseload/analysis.py +190 -0
- openenergyid/baseload/exceptions.py +9 -0
- openenergyid/baseload/models.py +32 -0
- openenergyid/capacity/__init__.py +6 -0
- openenergyid/capacity/main.py +103 -0
- openenergyid/capacity/models.py +32 -0
- openenergyid/const.py +29 -0
- openenergyid/dyntar/__init__.py +20 -0
- openenergyid/dyntar/const.py +31 -0
- openenergyid/dyntar/main.py +313 -0
- openenergyid/dyntar/models.py +101 -0
- openenergyid/elia/__init__.py +4 -0
- openenergyid/elia/api.py +91 -0
- openenergyid/elia/const.py +18 -0
- openenergyid/energysharing/__init__.py +12 -0
- openenergyid/energysharing/const.py +8 -0
- openenergyid/energysharing/data_formatting.py +77 -0
- openenergyid/energysharing/main.py +122 -0
- openenergyid/energysharing/models.py +80 -0
- openenergyid/enums.py +16 -0
- openenergyid/models.py +174 -0
- openenergyid/mvlr/__init__.py +19 -0
- openenergyid/mvlr/helpers.py +30 -0
- openenergyid/mvlr/main.py +34 -0
- openenergyid/mvlr/models.py +227 -0
- openenergyid/mvlr/mvlr.py +450 -0
- openenergyid/pvsim/__init__.py +8 -0
- openenergyid/pvsim/abstract.py +60 -0
- openenergyid/pvsim/elia/__init__.py +3 -0
- openenergyid/pvsim/elia/main.py +89 -0
- openenergyid/pvsim/main.py +49 -0
- openenergyid/pvsim/pvlib/__init__.py +11 -0
- openenergyid/pvsim/pvlib/main.py +115 -0
- openenergyid/pvsim/pvlib/models.py +235 -0
- openenergyid/pvsim/pvlib/quickscan.py +99 -0
- openenergyid/pvsim/pvlib/weather.py +91 -0
- openenergyid/sim/__init__.py +5 -0
- openenergyid/sim/main.py +67 -0
- openenergyid/simeval/__init__.py +6 -0
- openenergyid/simeval/main.py +148 -0
- openenergyid/simeval/models.py +162 -0
- openenergyid-0.1.31.dist-info/METADATA +32 -0
- openenergyid-0.1.31.dist-info/RECORD +50 -0
- openenergyid-0.1.31.dist-info/WHEEL +5 -0
- openenergyid-0.1.31.dist-info/licenses/LICENSE +21 -0
- openenergyid-0.1.31.dist-info/top_level.txt +1 -0
openenergyid/__init__.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Annotated, Self
|
|
3
|
+
|
|
4
|
+
import pandas as pd
|
|
5
|
+
from aiohttp import ClientSession
|
|
6
|
+
from pydantic import BaseModel, Field
|
|
7
|
+
|
|
8
|
+
from ..simeval.models import ComparisonPayload, EvalPayload, EvaluationOutput
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class SimulationInputAbstract(BaseModel):
|
|
12
|
+
"""Abstract input parameters for any Simulation"""
|
|
13
|
+
|
|
14
|
+
type: str
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class SimulationSummary(BaseModel):
|
|
18
|
+
"""Summary of a simulation including ex-ante, simulation results, ex-post, and comparisons."""
|
|
19
|
+
|
|
20
|
+
ex_ante: Annotated[EvalPayload, Field(description="Ex-ante evaluation results.")]
|
|
21
|
+
simulation_result: Annotated[EvalPayload, Field(description="Simulation results.")]
|
|
22
|
+
ex_post: Annotated[EvalPayload, Field(description="Ex-post evaluation results.")]
|
|
23
|
+
comparison: Annotated[
|
|
24
|
+
ComparisonPayload, Field(description="Comparison between ex-ante and ex-post results.")
|
|
25
|
+
]
|
|
26
|
+
|
|
27
|
+
@classmethod
|
|
28
|
+
def from_simulation(
|
|
29
|
+
cls,
|
|
30
|
+
ex_ante: dict[str, pd.DataFrame | pd.Series],
|
|
31
|
+
simulation_result: dict[str, pd.DataFrame | pd.Series],
|
|
32
|
+
ex_post: dict[str, pd.DataFrame | pd.Series],
|
|
33
|
+
comparison: dict[str, dict[str, pd.DataFrame | pd.Series]],
|
|
34
|
+
) -> Self:
|
|
35
|
+
"""Create a SimulationSummary from simulation data."""
|
|
36
|
+
ea = {
|
|
37
|
+
k: EvaluationOutput.from_pandas(v) if isinstance(v, pd.DataFrame) else v.to_dict()
|
|
38
|
+
for k, v in ex_ante.items()
|
|
39
|
+
}
|
|
40
|
+
sr = {
|
|
41
|
+
k: EvaluationOutput.from_pandas(v) if isinstance(v, pd.DataFrame) else v.to_dict()
|
|
42
|
+
for k, v in simulation_result.items()
|
|
43
|
+
}
|
|
44
|
+
ep = {
|
|
45
|
+
k: EvaluationOutput.from_pandas(v) if isinstance(v, pd.DataFrame) else v.to_dict()
|
|
46
|
+
for k, v in ex_post.items()
|
|
47
|
+
}
|
|
48
|
+
c = {
|
|
49
|
+
k: {
|
|
50
|
+
kk: EvaluationOutput.from_pandas(vv)
|
|
51
|
+
if isinstance(vv, pd.DataFrame)
|
|
52
|
+
else vv.to_dict()
|
|
53
|
+
for kk, vv in v.items()
|
|
54
|
+
}
|
|
55
|
+
for k, v in comparison.items()
|
|
56
|
+
}
|
|
57
|
+
return cls(
|
|
58
|
+
ex_ante=ea, # type: ignore
|
|
59
|
+
simulation_result=sr, # type: ignore
|
|
60
|
+
ex_post=ep, # type: ignore
|
|
61
|
+
comparison=c, # type: ignore
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class Simulator(ABC):
|
|
66
|
+
"""
|
|
67
|
+
An abstract base class simulators.
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
@property
|
|
71
|
+
@abstractmethod
|
|
72
|
+
def simulation_results(self):
|
|
73
|
+
"""The results of the simulation."""
|
|
74
|
+
raise NotImplementedError()
|
|
75
|
+
|
|
76
|
+
@abstractmethod
|
|
77
|
+
def simulate(self, **kwargs):
|
|
78
|
+
"""
|
|
79
|
+
Run the simulation and return the results.
|
|
80
|
+
"""
|
|
81
|
+
raise NotImplementedError()
|
|
82
|
+
|
|
83
|
+
@abstractmethod
|
|
84
|
+
def result_as_frame(self) -> pd.DataFrame:
|
|
85
|
+
"""
|
|
86
|
+
Convert the simulation results to a DataFrame.
|
|
87
|
+
"""
|
|
88
|
+
raise NotImplementedError()
|
|
89
|
+
|
|
90
|
+
@classmethod
|
|
91
|
+
def from_pydantic(cls, input_: SimulationInputAbstract) -> Self:
|
|
92
|
+
"""
|
|
93
|
+
Create an instance of the simulator from Pydantic input data.
|
|
94
|
+
"""
|
|
95
|
+
return cls(**input_.model_dump())
|
|
96
|
+
|
|
97
|
+
@abstractmethod
|
|
98
|
+
async def load_resources(self, session: ClientSession) -> None:
|
|
99
|
+
"""
|
|
100
|
+
Asynchronously load any required resources using the provided session.
|
|
101
|
+
"""
|
|
102
|
+
raise NotImplementedError()
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""Baseload analysis package for power consumption data."""
|
|
2
|
+
|
|
3
|
+
from .analysis import BaseloadAnalyzer
|
|
4
|
+
from .exceptions import InsufficientDataError, InvalidDataError
|
|
5
|
+
from .models import BaseloadResultSchema, PowerReadingSchema, PowerSeriesSchema
|
|
6
|
+
|
|
7
|
+
__version__ = "0.1.0"
|
|
8
|
+
__all__ = [
|
|
9
|
+
"BaseloadAnalyzer",
|
|
10
|
+
"InsufficientDataError",
|
|
11
|
+
"InvalidDataError",
|
|
12
|
+
"PowerReadingSchema",
|
|
13
|
+
"PowerSeriesSchema",
|
|
14
|
+
"BaseloadResultSchema",
|
|
15
|
+
]
|
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
"""Baseload Power Consumption Analysis Module
|
|
2
|
+
|
|
3
|
+
This module provides tools for analyzing electrical power consumption patterns to identify
|
|
4
|
+
and quantify baseload - the continuous background power usage in electrical systems.
|
|
5
|
+
It uses sophisticated time-series analysis to detect consistent minimum power draws
|
|
6
|
+
that represent always-on devices and systems.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import polars as pl
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class BaseloadAnalyzer:
|
|
13
|
+
"""Analyzes power consumption data to determine baseload characteristics.
|
|
14
|
+
|
|
15
|
+
The BaseloadAnalyzer helps identify the minimum continuous power consumption in
|
|
16
|
+
an electrical system by analyzing regular energy readings. It uses a statistical
|
|
17
|
+
approach to determine baseload, which represents power used by devices that run
|
|
18
|
+
continuously (like refrigerators, standby electronics, or network equipment).
|
|
19
|
+
|
|
20
|
+
The analyzer works by:
|
|
21
|
+
1. Converting 15-minute energy readings to instantaneous power values
|
|
22
|
+
2. Analyzing daily patterns to identify consistent minimum usage
|
|
23
|
+
3. Aggregating results into configurable time periods
|
|
24
|
+
|
|
25
|
+
Parameters
|
|
26
|
+
----------
|
|
27
|
+
quantile : float, default=0.05
|
|
28
|
+
Defines what portion of lowest daily readings to consider as baseload.
|
|
29
|
+
The default 0.05 (5%) corresponds to roughly 72 minutes of lowest
|
|
30
|
+
consumption per day, which helps filter out brief power dips while
|
|
31
|
+
capturing true baseload patterns.
|
|
32
|
+
|
|
33
|
+
timezone : str
|
|
34
|
+
Timezone for analysis. All timestamps will be converted to this timezone
|
|
35
|
+
to ensure correct daily boundaries and consistent reporting periods.
|
|
36
|
+
|
|
37
|
+
Example Usage
|
|
38
|
+
------------
|
|
39
|
+
>>> analyzer = BaseloadAnalyzer(quantile=0.05)
|
|
40
|
+
>>> power_data = analyzer.prepare_power_seriespolars(energy_readings)
|
|
41
|
+
>>> hourly_analysis = analyzer.analyze(power_data, "1h")
|
|
42
|
+
>>> monthly_analysis = analyzer.analyze(power_data, "1mo")
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
def __init__(self, timezone: str, quantile: float = 0.05):
|
|
46
|
+
self.quantile = quantile
|
|
47
|
+
self.timezone = timezone
|
|
48
|
+
|
|
49
|
+
def prepare_power_seriespolars(self, energy_lf: pl.LazyFrame) -> pl.LazyFrame:
|
|
50
|
+
"""Converts energy readings into a power consumption time series.
|
|
51
|
+
|
|
52
|
+
Transforms 15-minute energy readings (kilowatt-hours) into instantaneous
|
|
53
|
+
power readings (watts) while handling timezone conversion.
|
|
54
|
+
|
|
55
|
+
Parameters
|
|
56
|
+
----------
|
|
57
|
+
energy_lf : pl.LazyFrame
|
|
58
|
+
Input energy data with columns:
|
|
59
|
+
- timestamp: Datetime with timezone (e.g. "2023-01-01T00:00:00+01:00")
|
|
60
|
+
- total: Energy readings in kilowatt-hours (kWh)
|
|
61
|
+
|
|
62
|
+
Returns
|
|
63
|
+
-------
|
|
64
|
+
pl.LazyFrame
|
|
65
|
+
Power series with columns:
|
|
66
|
+
- timestamp: Timezone-adjusted timestamps
|
|
67
|
+
- power: Power readings in watts
|
|
68
|
+
|
|
69
|
+
Notes
|
|
70
|
+
-----
|
|
71
|
+
The conversion from kWh/15min to watts uses the formula:
|
|
72
|
+
watts = kWh * 4000
|
|
73
|
+
where:
|
|
74
|
+
- Multiply by 4 to convert from 15-minute to hourly rate
|
|
75
|
+
- Multiply by 1000 to convert from kilowatts to watts
|
|
76
|
+
"""
|
|
77
|
+
return (
|
|
78
|
+
energy_lf.with_columns(
|
|
79
|
+
[
|
|
80
|
+
# Convert timezone
|
|
81
|
+
pl.col("timestamp")
|
|
82
|
+
.dt.replace_time_zone("UTC")
|
|
83
|
+
.dt.convert_time_zone(self.timezone)
|
|
84
|
+
.alias("timestamp"),
|
|
85
|
+
# Convert to watts and clip negative values
|
|
86
|
+
(pl.col("total") * 4000).clip(0).alias("power"),
|
|
87
|
+
]
|
|
88
|
+
)
|
|
89
|
+
.drop("total")
|
|
90
|
+
.sort("timestamp")
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
def analyze(
|
|
94
|
+
self, power_lf: pl.LazyFrame, reporting_granularity: str = "1h"
|
|
95
|
+
) -> tuple[pl.LazyFrame, float]:
|
|
96
|
+
"""
|
|
97
|
+
Analyze power consumption data to calculate baseload and total energy metrics.
|
|
98
|
+
|
|
99
|
+
Takes power readings (in watts) with 15-minute intervals and calculates:
|
|
100
|
+
- Daily baseload power using a percentile threshold
|
|
101
|
+
- Energy consumption from baseload vs total consumption
|
|
102
|
+
- Average power metrics
|
|
103
|
+
- Global median baseload value for the entire period
|
|
104
|
+
|
|
105
|
+
The analysis happens in three steps:
|
|
106
|
+
1. Calculate the daily baseload power level using the configured percentile
|
|
107
|
+
2. Join this daily baseload with the original power readings
|
|
108
|
+
3. Aggregate the combined data into the requested reporting periods
|
|
109
|
+
|
|
110
|
+
Parameters
|
|
111
|
+
----------
|
|
112
|
+
power_lf : pl.LazyFrame
|
|
113
|
+
Power consumption data with columns:
|
|
114
|
+
- timestamp: Datetime in configured timezone
|
|
115
|
+
- power: Power readings in watts
|
|
116
|
+
|
|
117
|
+
reporting_granularity : str, default="1h"
|
|
118
|
+
Time period for aggregating results. Must be a valid Polars interval string
|
|
119
|
+
like "1h", "1d", "1mo" etc.
|
|
120
|
+
|
|
121
|
+
Returns
|
|
122
|
+
-------
|
|
123
|
+
tuple[pl.LazyFrame, float]
|
|
124
|
+
- Analysis results (pl.LazyFrame) with metrics per reporting period:
|
|
125
|
+
- timestamp: Start of reporting period
|
|
126
|
+
- consumption_due_to_baseload_in_kilowatthour: Baseload energy
|
|
127
|
+
- total_consumption_in_kilowatthour: Total energy
|
|
128
|
+
- consumption_not_due_to_baseload_in_kilowatthour: Non-baseload energy
|
|
129
|
+
- average_daily_baseload_in_watt: Average baseload power level
|
|
130
|
+
- average_power_in_watt: Average total power
|
|
131
|
+
- baseload_ratio: Fraction of energy from baseload
|
|
132
|
+
- consumption_due_to_median_baseload_in_kilowatthour: Idealized consumption using global median baseload
|
|
133
|
+
- global_median_baseload (float): The global median baseload value in watts for the entire period
|
|
134
|
+
"""
|
|
135
|
+
# Step 1: Calculate the daily baseload level
|
|
136
|
+
# Group power readings by day and find the threshold power level that represents baseload
|
|
137
|
+
daily_baseload = power_lf.group_by_dynamic("timestamp", every="1d").agg(
|
|
138
|
+
pl.col("power").quantile(self.quantile).alias("daily_baseload")
|
|
139
|
+
)
|
|
140
|
+
# calculate median
|
|
141
|
+
global_median_baseload = (
|
|
142
|
+
daily_baseload.select(pl.col("daily_baseload").median()).collect().item()
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
# Join the daily baseload level with original power readings
|
|
146
|
+
results = (
|
|
147
|
+
# Using asof join since baseload changes daily but readings are every 15min
|
|
148
|
+
power_lf.join_asof(daily_baseload, on="timestamp")
|
|
149
|
+
# Group into requested reporting periods
|
|
150
|
+
.group_by_dynamic("timestamp", every=reporting_granularity)
|
|
151
|
+
.agg(
|
|
152
|
+
[
|
|
153
|
+
# Energy calculations:
|
|
154
|
+
# Each 15min power reading (watts) represents 0.25 hours
|
|
155
|
+
# Convert to kWh: watts * 0.25h * (1kW/1000W)
|
|
156
|
+
(pl.col("daily_baseload").sum() * 0.25 / 1000).alias(
|
|
157
|
+
"consumption_due_to_baseload_in_kilowatthour"
|
|
158
|
+
),
|
|
159
|
+
(pl.col("power").sum() * 0.25 / 1000).alias(
|
|
160
|
+
"total_consumption_in_kilowatthour"
|
|
161
|
+
),
|
|
162
|
+
# Average power levels during the period
|
|
163
|
+
pl.col("daily_baseload").mean().alias("average_daily_baseload_in_watt"),
|
|
164
|
+
pl.col("power").mean().alias("average_power_in_watt"),
|
|
165
|
+
# median baseload kWh
|
|
166
|
+
(pl.len() * 0.25 * global_median_baseload / 1000).alias(
|
|
167
|
+
"consumption_due_to_median_baseload_in_kilowatthour"
|
|
168
|
+
),
|
|
169
|
+
]
|
|
170
|
+
)
|
|
171
|
+
# Calculate derived metrics
|
|
172
|
+
.with_columns(
|
|
173
|
+
[
|
|
174
|
+
# Energy consumed above baseload level
|
|
175
|
+
(
|
|
176
|
+
pl.col("total_consumption_in_kilowatthour")
|
|
177
|
+
- pl.col("consumption_due_to_baseload_in_kilowatthour")
|
|
178
|
+
).alias("consumption_not_due_to_baseload_in_kilowatthour"),
|
|
179
|
+
pl.when(pl.col("total_consumption_in_kilowatthour") != 0)
|
|
180
|
+
.then(
|
|
181
|
+
pl.col("consumption_due_to_baseload_in_kilowatthour")
|
|
182
|
+
/ pl.col("total_consumption_in_kilowatthour")
|
|
183
|
+
)
|
|
184
|
+
.otherwise(None)
|
|
185
|
+
.alias("baseload_ratio"),
|
|
186
|
+
]
|
|
187
|
+
)
|
|
188
|
+
)
|
|
189
|
+
# Step 2 & 3: Join baseload data and aggregate metrics
|
|
190
|
+
return results, global_median_baseload
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import pandera.polars as pa
|
|
2
|
+
from pandera.engines.polars_engine import DateTime
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class PowerReadingSchema(pa.DataFrameModel):
|
|
6
|
+
"""Validates input energy readings"""
|
|
7
|
+
|
|
8
|
+
timestamp: DateTime = pa.Field()
|
|
9
|
+
total: float = pa.Field(ge=0)
|
|
10
|
+
|
|
11
|
+
class Config:
|
|
12
|
+
coerce = True
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class PowerSeriesSchema(pa.DataFrameModel):
|
|
16
|
+
"""Validates converted power series"""
|
|
17
|
+
|
|
18
|
+
timestamp: DateTime = pa.Field()
|
|
19
|
+
power: float = pa.Field(ge=0)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class BaseloadResultSchema(pa.DataFrameModel):
|
|
23
|
+
"""Validates analysis results"""
|
|
24
|
+
|
|
25
|
+
timestamp: DateTime = pa.Field()
|
|
26
|
+
consumption_due_to_baseload_in_kilowatthour: float = pa.Field(ge=0)
|
|
27
|
+
total_consumption_in_kilowatthour: float = pa.Field(ge=0)
|
|
28
|
+
average_daily_baseload_in_watt: float = pa.Field(ge=0)
|
|
29
|
+
average_power_in_watt: float = pa.Field(ge=0)
|
|
30
|
+
consumption_not_due_to_baseload_in_kilowatthour: float
|
|
31
|
+
baseload_ratio: float = pa.Field(ge=0, le=2)
|
|
32
|
+
consumption_due_to_median_baseload_in_kilowatthour: float = pa.Field(ge=0)
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
"""Main module for capacity analysis."""
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
import pandas as pd
|
|
7
|
+
import pandera.typing as pdt
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class CapacityAnalysis:
|
|
11
|
+
"""
|
|
12
|
+
A class for performing capacity analysis on a given dataset.
|
|
13
|
+
|
|
14
|
+
Attributes:
|
|
15
|
+
data (CapacityInput): The input data for capacity analysis.
|
|
16
|
+
threshold (float): The value above which a peak is considered significant.
|
|
17
|
+
window (str): The window size for grouping data before finding peaks. Defaults to "MS" (month start).
|
|
18
|
+
x_padding (int): The padding to apply on the x-axis for visualization purposes.
|
|
19
|
+
|
|
20
|
+
Methods:
|
|
21
|
+
find_peaks(): Identifies peaks in the data based on the specified threshold and window.
|
|
22
|
+
find_peaks_with_surroundings(num_peaks=10): Finds peaks along with their surrounding data points.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
data: pdt.Series,
|
|
28
|
+
threshold: float = 2.5,
|
|
29
|
+
window: str = "MS", # Default to month start
|
|
30
|
+
x_padding: int = 4,
|
|
31
|
+
):
|
|
32
|
+
"""
|
|
33
|
+
Constructs all the necessary attributes for the CapacityAnalysis object.
|
|
34
|
+
|
|
35
|
+
Parameters:
|
|
36
|
+
data (CapacityInput): Localized Pandas Series containing power measurements.
|
|
37
|
+
threshold (float): The value above which a peak is considered significant. Defaults to 2.5.
|
|
38
|
+
window (str): The window size for grouping data before finding peaks. Defaults to "MS" (month start).
|
|
39
|
+
x_padding (int): The padding to apply on the x-axis for visualization purposes. Defaults to 4.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
self.data = data
|
|
43
|
+
self.threshold = threshold
|
|
44
|
+
self.window = window
|
|
45
|
+
self.x_padding = x_padding
|
|
46
|
+
|
|
47
|
+
def find_peaks(self) -> pd.Series:
|
|
48
|
+
"""
|
|
49
|
+
Identifies peaks in the data based on the specified threshold and window.
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
pd.Series: A Pandas Series containing the peaks
|
|
53
|
+
"""
|
|
54
|
+
# Group by the specified window (default is month start)
|
|
55
|
+
grouped = self.data.groupby(pd.Grouper(freq=self.window))
|
|
56
|
+
|
|
57
|
+
# Find the index (timestamp) of the maximum value in each group
|
|
58
|
+
peak_indices = grouped.idxmax()
|
|
59
|
+
|
|
60
|
+
# Get the corresponding peak values
|
|
61
|
+
peaks = self.data.loc[peak_indices][self.data > self.threshold]
|
|
62
|
+
return peaks
|
|
63
|
+
|
|
64
|
+
def find_peaks_with_surroundings(
|
|
65
|
+
self, num_peaks: int = 10
|
|
66
|
+
) -> list[tuple[dt.datetime, float, pd.Series]]:
|
|
67
|
+
"""
|
|
68
|
+
Finds peaks along with their surrounding data points.
|
|
69
|
+
|
|
70
|
+
Parameters:
|
|
71
|
+
num_peaks (int): The number of peaks to find. Defaults to 10.
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
List[tuple[dt.datetime,float,pd.Series]]: A list of tuples containing peak time, peak value, and surrounding data.
|
|
75
|
+
"""
|
|
76
|
+
peaks = self.data.nlargest(num_peaks * 2)
|
|
77
|
+
peaks = peaks[peaks > self.threshold]
|
|
78
|
+
if peaks.empty:
|
|
79
|
+
return []
|
|
80
|
+
|
|
81
|
+
result = []
|
|
82
|
+
window_size = dt.timedelta(minutes=15 * (2 * self.x_padding + 1))
|
|
83
|
+
|
|
84
|
+
for peak_time, peak_value in peaks.items():
|
|
85
|
+
peak_time = typing.cast(pd.Timestamp, peak_time)
|
|
86
|
+
|
|
87
|
+
if any(abs(peak_time - prev_peak[0]) < window_size for prev_peak in result):
|
|
88
|
+
continue
|
|
89
|
+
|
|
90
|
+
start_time = peak_time - dt.timedelta(minutes=15 * self.x_padding)
|
|
91
|
+
end_time = peak_time + dt.timedelta(minutes=15 * (self.x_padding + 1))
|
|
92
|
+
surrounding_data = self.data[start_time:end_time]
|
|
93
|
+
|
|
94
|
+
result.append(
|
|
95
|
+
[
|
|
96
|
+
peak_time,
|
|
97
|
+
peak_value,
|
|
98
|
+
surrounding_data,
|
|
99
|
+
]
|
|
100
|
+
)
|
|
101
|
+
if len(result) == num_peaks:
|
|
102
|
+
break
|
|
103
|
+
return result
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
"""Model for Capacity Analysis."""
|
|
2
|
+
|
|
3
|
+
import datetime as dt
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
6
|
+
|
|
7
|
+
from openenergyid.models import TimeSeries
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class CapacityInput(BaseModel):
|
|
11
|
+
"""Model for capacity input"""
|
|
12
|
+
|
|
13
|
+
timezone: str = Field(alias="timeZone")
|
|
14
|
+
series: TimeSeries
|
|
15
|
+
threshold: float = Field(default=2.5, ge=0)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class PeakDetail(BaseModel):
|
|
19
|
+
"""Model for peak detail"""
|
|
20
|
+
|
|
21
|
+
peak_time: dt.datetime = Field(alias="peakTime")
|
|
22
|
+
peak_value: float = Field(alias="peakValue")
|
|
23
|
+
surrounding_data: TimeSeries = Field(alias="surroundingData")
|
|
24
|
+
model_config = ConfigDict(populate_by_name=True)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class CapacityOutput(BaseModel):
|
|
28
|
+
"""Model for capacity output"""
|
|
29
|
+
|
|
30
|
+
peaks: TimeSeries
|
|
31
|
+
peak_details: list[PeakDetail] = Field(alias="peakDetails")
|
|
32
|
+
model_config = ConfigDict(populate_by_name=True)
|
openenergyid/const.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
"""Constants for the Open Energy ID package."""
|
|
2
|
+
|
|
3
|
+
from typing import Literal
|
|
4
|
+
|
|
5
|
+
# METRICS
|
|
6
|
+
|
|
7
|
+
ELECTRICITY_DELIVERED: Literal["electricity_delivered"] = "electricity_delivered"
|
|
8
|
+
ELECTRICITY_EXPORTED: Literal["electricity_exported"] = "electricity_exported"
|
|
9
|
+
ELECTRICITY_PRODUCED: Literal["electricity_produced"] = "electricity_produced"
|
|
10
|
+
ELECTRICITY_CONSUMED: Literal["electricity_consumed"] = "electricity_consumed"
|
|
11
|
+
ELECTRICITY_SELF_CONSUMED: Literal["electricity_self_consumed"] = "electricity_self_consumed"
|
|
12
|
+
|
|
13
|
+
PRICE_DAY_AHEAD: Literal["price_day_ahead"] = "price_day_ahead"
|
|
14
|
+
PRICE_IMBALANCE_UPWARD: Literal["price_imbalance_upward"] = "price_imbalance_upward"
|
|
15
|
+
PRICE_IMBALANCE_DOWNWARD: Literal["price_imbalance_downward"] = "price_imbalance_downward"
|
|
16
|
+
PRICE_ELECTRICITY_DELIVERED: Literal["price_electricity_delivered"] = "price_electricity_delivered"
|
|
17
|
+
PRICE_ELECTRICITY_EXPORTED: Literal["price_electricity_exported"] = "price_electricity_exported"
|
|
18
|
+
|
|
19
|
+
RLP: Literal["RLP"] = "RLP"
|
|
20
|
+
SPP: Literal["SPP"] = "SPP"
|
|
21
|
+
|
|
22
|
+
COST_ELECTRICITY_DELIVERED: Literal["cost_electricity_delivered"] = "cost_electricity_delivered"
|
|
23
|
+
EARNINGS_ELECTRICITY_EXPORTED: Literal["earnings_electricity_exported"] = (
|
|
24
|
+
"earnings_electricity_exported"
|
|
25
|
+
)
|
|
26
|
+
COST_ELECTRICITY_NET: Literal["cost_electricity_net"] = "cost_electricity_net"
|
|
27
|
+
|
|
28
|
+
RATIO_SELF_CONSUMPTION: Literal["ratio_self_consumption"] = "ratio_self_consumption"
|
|
29
|
+
RATIO_SELF_SUFFICIENCY: Literal["ratio_self_sufficiency"] = "ratio_self_sufficiency"
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"""Dynamic Tariff Analysis module."""
|
|
2
|
+
|
|
3
|
+
from .main import calculate_dyntar_columns, summarize_result
|
|
4
|
+
from .models import (
|
|
5
|
+
DynamicTariffAnalysisInput,
|
|
6
|
+
DynamicTariffAnalysisOutput,
|
|
7
|
+
DynamicTariffAnalysisOutputSummary,
|
|
8
|
+
OutputColumns,
|
|
9
|
+
RequiredColumns,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"calculate_dyntar_columns",
|
|
14
|
+
"DynamicTariffAnalysisInput",
|
|
15
|
+
"DynamicTariffAnalysisOutput",
|
|
16
|
+
"DynamicTariffAnalysisOutputSummary",
|
|
17
|
+
"OutputColumns",
|
|
18
|
+
"RequiredColumns",
|
|
19
|
+
"summarize_result",
|
|
20
|
+
]
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
"""Constants for the dyntar analysis."""
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
|
|
5
|
+
ELECTRICITY_DELIVERED_SMR3 = "electricity_delivered_smr3"
|
|
6
|
+
ELECTRICITY_EXPORTED_SMR3 = "electricity_exported_smr3"
|
|
7
|
+
ELECTRICITY_DELIVERED_SMR2 = "electricity_delivered_smr2"
|
|
8
|
+
ELECTRICITY_EXPORTED_SMR2 = "electricity_exported_smr2"
|
|
9
|
+
|
|
10
|
+
COST_ELECTRICITY_DELIVERED_SMR2 = "cost_electricity_delivered_smr2"
|
|
11
|
+
COST_ELECTRICITY_EXPORTED_SMR2 = "cost_electricity_exported_smr2"
|
|
12
|
+
COST_ELECTRICITY_DELIVERED_SMR3 = "cost_electricity_delivered_smr3"
|
|
13
|
+
COST_ELECTRICITY_EXPORTED_SMR3 = "cost_electricity_exported_smr3"
|
|
14
|
+
|
|
15
|
+
RLP_WEIGHTED_PRICE_DELIVERED = "rlp_weighted_price_delivered"
|
|
16
|
+
SPP_WEIGHTED_PRICE_EXPORTED = "spp_weighted_price_exported"
|
|
17
|
+
|
|
18
|
+
HEATMAP_DELIVERED = "heatmap_delivered"
|
|
19
|
+
HEATMAP_EXPORTED = "heatmap_exported"
|
|
20
|
+
HEATMAP_TOTAL = "heatmap_total"
|
|
21
|
+
|
|
22
|
+
HEATMAP_DELIVERED_DESCRIPTION = "heatmap_delivered_description"
|
|
23
|
+
HEATMAP_EXPORTED_DESCRIPTION = "heatmap_exported_description"
|
|
24
|
+
HEATMAP_TOTAL_DESCRIPTION = "heatmap_total_description"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class Register(Enum):
|
|
28
|
+
"""Register for dynamic tariff analysis."""
|
|
29
|
+
|
|
30
|
+
DELIVERY = "delivery"
|
|
31
|
+
EXPORT = "export"
|