ebm 0.99.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ebm/__init__.py +0 -0
- ebm/__main__.py +152 -0
- ebm/__version__.py +1 -0
- ebm/cmd/__init__.py +0 -0
- ebm/cmd/calibrate.py +83 -0
- ebm/cmd/calibrate_excel_com_io.py +128 -0
- ebm/cmd/heating_systems_by_year.py +18 -0
- ebm/cmd/helpers.py +134 -0
- ebm/cmd/initialize.py +167 -0
- ebm/cmd/migrate.py +92 -0
- ebm/cmd/pipeline.py +227 -0
- ebm/cmd/prepare_main.py +174 -0
- ebm/cmd/result_handler.py +272 -0
- ebm/cmd/run_calculation.py +221 -0
- ebm/data/area.csv +92 -0
- ebm/data/area_new_residential_buildings.csv +3 -0
- ebm/data/area_per_person.csv +12 -0
- ebm/data/building_code_parameters.csv +9 -0
- ebm/data/energy_need_behaviour_factor.csv +6 -0
- ebm/data/energy_need_improvements.csv +7 -0
- ebm/data/energy_need_original_condition.csv +534 -0
- ebm/data/heating_system_efficiencies.csv +13 -0
- ebm/data/heating_system_forecast.csv +9 -0
- ebm/data/heating_system_initial_shares.csv +1113 -0
- ebm/data/holiday_home_energy_consumption.csv +24 -0
- ebm/data/holiday_home_stock.csv +25 -0
- ebm/data/improvement_building_upgrade.csv +9 -0
- ebm/data/new_buildings_residential.csv +32 -0
- ebm/data/population_forecast.csv +51 -0
- ebm/data/s_curve.csv +40 -0
- ebm/energy_consumption.py +307 -0
- ebm/extractors.py +115 -0
- ebm/heating_system_forecast.py +472 -0
- ebm/holiday_home_energy.py +341 -0
- ebm/migrations.py +224 -0
- ebm/model/__init__.py +0 -0
- ebm/model/area.py +403 -0
- ebm/model/bema.py +149 -0
- ebm/model/building_category.py +150 -0
- ebm/model/building_condition.py +78 -0
- ebm/model/calibrate_energy_requirements.py +84 -0
- ebm/model/calibrate_heating_systems.py +180 -0
- ebm/model/column_operations.py +157 -0
- ebm/model/construction.py +827 -0
- ebm/model/data_classes.py +223 -0
- ebm/model/database_manager.py +410 -0
- ebm/model/dataframemodels.py +115 -0
- ebm/model/defaults.py +30 -0
- ebm/model/energy_need.py +6 -0
- ebm/model/energy_need_filter.py +182 -0
- ebm/model/energy_purpose.py +115 -0
- ebm/model/energy_requirement.py +353 -0
- ebm/model/energy_use.py +202 -0
- ebm/model/enums.py +8 -0
- ebm/model/exceptions.py +4 -0
- ebm/model/file_handler.py +388 -0
- ebm/model/filter_scurve_params.py +83 -0
- ebm/model/filter_tek.py +152 -0
- ebm/model/heat_pump.py +53 -0
- ebm/model/heating_systems.py +20 -0
- ebm/model/heating_systems_parameter.py +17 -0
- ebm/model/heating_systems_projection.py +3 -0
- ebm/model/heating_systems_share.py +28 -0
- ebm/model/scurve.py +224 -0
- ebm/model/tek.py +1 -0
- ebm/s_curve.py +515 -0
- ebm/services/__init__.py +0 -0
- ebm/services/calibration_writer.py +262 -0
- ebm/services/console.py +106 -0
- ebm/services/excel_loader.py +66 -0
- ebm/services/files.py +38 -0
- ebm/services/spreadsheet.py +289 -0
- ebm/temp_calc.py +99 -0
- ebm/validators.py +565 -0
- ebm-0.99.3.dist-info/METADATA +217 -0
- ebm-0.99.3.dist-info/RECORD +80 -0
- ebm-0.99.3.dist-info/WHEEL +5 -0
- ebm-0.99.3.dist-info/entry_points.txt +3 -0
- ebm-0.99.3.dist-info/licenses/LICENSE +21 -0
- ebm-0.99.3.dist-info/top_level.txt +1 -0
@@ -0,0 +1,115 @@
|
|
1
|
+
from typing import cast, Optional
|
2
|
+
|
3
|
+
import pandas as pd
|
4
|
+
import pandera as pa
|
5
|
+
from pandera.typing import DataFrame, Series
|
6
|
+
from pandera.typing.common import DataFrameBase
|
7
|
+
|
8
|
+
from ebm.model.column_operations import explode_unique_columns, explode_column_alias
|
9
|
+
from ebm.model.energy_purpose import EnergyPurpose
|
10
|
+
|
11
|
+
|
12
|
+
class EnergyNeedYearlyImprovements(pa.DataFrameModel):
|
13
|
+
building_category: Series[str]
|
14
|
+
building_code: Series[str]
|
15
|
+
purpose: Series[str]
|
16
|
+
value: Series[float] = pa.Field(ge=0.0, coerce=True)
|
17
|
+
start_year: Optional[Series[int]] = pa.Field(coerce=True, default=2020)
|
18
|
+
function: Series[str]
|
19
|
+
end_year: Optional[Series[int]] = pa.Field(coerce=True, default=2050)
|
20
|
+
_filename = 'energy_need_improvements'
|
21
|
+
|
22
|
+
class Config:
|
23
|
+
unique = ['building_category', 'building_code', 'purpose', 'start_year', 'function', 'end_year']
|
24
|
+
|
25
|
+
|
26
|
+
class YearlyReduction(pa.DataFrameModel):
|
27
|
+
building_category: Series[str]
|
28
|
+
building_code: Series[str]
|
29
|
+
purpose: Series[str]
|
30
|
+
start_year: Series[int] = pa.Field(coerce=True, default=2020)
|
31
|
+
end_year: Series[int] = pa.Field(coerce=True, default=2050)
|
32
|
+
yearly_efficiency_improvement: Series[float] = pa.Field(ge=0.0, coerce=True)
|
33
|
+
|
34
|
+
class Config:
|
35
|
+
unique = ['building_category', 'building_code', 'purpose', 'start_year', 'function', 'end_year']
|
36
|
+
|
37
|
+
|
38
|
+
@staticmethod
|
39
|
+
def from_energy_need_yearly_improvements(
|
40
|
+
en_yearly_improvement: DataFrameBase[EnergyNeedYearlyImprovements]|EnergyNeedYearlyImprovements) -> 'DataFrameBase[YearlyReduction]':
|
41
|
+
"""
|
42
|
+
Transforms a EnergyNeedYearlyImprovement DataFrame into a EnergyNeedYearlyReduction DataFrame.
|
43
|
+
|
44
|
+
Parameters
|
45
|
+
----------
|
46
|
+
en_yearly_improvement : DataFrame[EnergyNeedYearlyImprovements]
|
47
|
+
|
48
|
+
Returns
|
49
|
+
-------
|
50
|
+
DataFrameBase[YearlyReduction]
|
51
|
+
|
52
|
+
Raises
|
53
|
+
------
|
54
|
+
pa.errors.SchemaError
|
55
|
+
When the resulting dataframe fails to validate
|
56
|
+
pa.errors.SchemaErrors
|
57
|
+
When the resulting dataframe fails to validate
|
58
|
+
|
59
|
+
"""
|
60
|
+
unique_columns = ['building_category', 'building_code', 'purpose', 'start_year', 'end_year']
|
61
|
+
|
62
|
+
# Casting en_yearly_improvement to DataFrame so that type checkers complaining about datatype
|
63
|
+
df = cast(pd.DataFrame, en_yearly_improvement)
|
64
|
+
if 'start_year' not in df.columns:
|
65
|
+
df['start_year'] = 2020
|
66
|
+
if 'end_year' not in df.columns:
|
67
|
+
df['end_year'] = 2050
|
68
|
+
df = df.query('function=="yearly_reduction"')
|
69
|
+
df = explode_unique_columns(df,
|
70
|
+
unique_columns=unique_columns)
|
71
|
+
|
72
|
+
df = explode_column_alias(df,
|
73
|
+
column='purpose',
|
74
|
+
values=[p for p in EnergyPurpose],
|
75
|
+
alias='default',
|
76
|
+
de_dup_by=unique_columns)
|
77
|
+
df['yearly_efficiency_improvement'] = df['value']
|
78
|
+
df = df[['building_category', 'building_code', 'purpose', 'start_year', 'end_year', 'yearly_efficiency_improvement']]
|
79
|
+
df = df.reset_index()
|
80
|
+
return YearlyReduction.validate(df, lazy=True)
|
81
|
+
|
82
|
+
|
83
|
+
class PolicyImprovement(pa.DataFrameModel):
|
84
|
+
building_category: Series[str]
|
85
|
+
building_code: Series[str]
|
86
|
+
purpose: Series[str]
|
87
|
+
start_year: Series[int] = pa.Field(ge=0, coerce=True)
|
88
|
+
end_year: Series[int] = pa.Field(ge=0, coerce=True)
|
89
|
+
improvement_at_end_year: Series[float] = pa.Field(ge=0.0, lt=2.0, coerce=True)
|
90
|
+
|
91
|
+
class Config:
|
92
|
+
unique = ['building_category', 'building_code', 'purpose', 'start_year', 'end_year']
|
93
|
+
|
94
|
+
@pa.dataframe_check
|
95
|
+
def start_year_before_end_year(cls, df: pd.DataFrame) -> Series[bool]:
|
96
|
+
return df.start_year < df.end_year
|
97
|
+
|
98
|
+
@staticmethod
|
99
|
+
def from_energy_need_yearly_improvements(
|
100
|
+
energy_need_improvements: DataFrameBase[EnergyNeedYearlyImprovements] | EnergyNeedYearlyImprovements) -> 'DataFrameBase[PolicyImprovement]':
|
101
|
+
|
102
|
+
energy_need_improvements = cast(pd.DataFrame, energy_need_improvements)
|
103
|
+
df = energy_need_improvements.query('function=="improvement_at_end_year"')
|
104
|
+
if 'start_year' not in df.columns:
|
105
|
+
df['start_year'] = 2020
|
106
|
+
if 'end_year' not in df.columns:
|
107
|
+
df['end_year'] = 2050
|
108
|
+
unique_columns = ('building_category', 'building_code', 'purpose', 'start_year', 'function', 'end_year',)
|
109
|
+
df = explode_unique_columns(df, unique_columns=unique_columns)
|
110
|
+
df = explode_column_alias(df, column='purpose', values=[p for p in EnergyPurpose], alias='default',
|
111
|
+
de_dup_by=unique_columns)
|
112
|
+
|
113
|
+
df['improvement_at_end_year'] = df['value']
|
114
|
+
|
115
|
+
return PolicyImprovement.validate(df)
|
ebm/model/defaults.py
ADDED
@@ -0,0 +1,30 @@
|
|
1
|
+
import pandas as pd
|
2
|
+
|
3
|
+
def default_calibrate_heating_rv() -> pd.DataFrame:
|
4
|
+
"""Creates a default dataframe for heating_rv calibration. The factor is set to 1.0 (no change)
|
5
|
+
|
6
|
+
Returns
|
7
|
+
-------
|
8
|
+
pd.DataFrame
|
9
|
+
"""
|
10
|
+
df = pd.DataFrame({
|
11
|
+
'building_category': ['non_residential', 'residential'],
|
12
|
+
'purpose': ['heating_rv', 'heating_rv'],
|
13
|
+
'heating_rv_factor': [1.0, 1.0]})
|
14
|
+
return df
|
15
|
+
|
16
|
+
def default_calibrate_energy_consumption() -> pd.DataFrame:
|
17
|
+
"""
|
18
|
+
Creates an empty dataframe for energy consumption calibration.
|
19
|
+
|
20
|
+
Returns
|
21
|
+
-------
|
22
|
+
pd.DataFrame
|
23
|
+
"""
|
24
|
+
df = pd.DataFrame({
|
25
|
+
'building_category': [],
|
26
|
+
'to': [],
|
27
|
+
'from': [],
|
28
|
+
'factor': []}
|
29
|
+
)
|
30
|
+
return df
|
ebm/model/energy_need.py
ADDED
@@ -0,0 +1,6 @@
|
|
1
|
+
def transform_total_energy_need(energy_need_kwh_m2, area_forecast):
|
2
|
+
total_energy_need = area_forecast.reset_index().set_index(
|
3
|
+
['building_category', 'building_code', 'building_condition', 'year']).merge(energy_need_kwh_m2, left_index=True,
|
4
|
+
right_index=True)
|
5
|
+
total_energy_need['energy_requirement'] = total_energy_need.kwh_m2 * total_energy_need.m2
|
6
|
+
return total_energy_need
|
@@ -0,0 +1,182 @@
|
|
1
|
+
from typing import Optional
|
2
|
+
|
3
|
+
import pandas as pd
|
4
|
+
|
5
|
+
from ebm.model.building_category import BuildingCategory
|
6
|
+
from ebm.model.building_condition import BuildingCondition
|
7
|
+
from ebm.model.column_operations import replace_column_alias
|
8
|
+
from ebm.model.energy_purpose import EnergyPurpose
|
9
|
+
|
10
|
+
|
11
|
+
def filter_original_condition(df: pd.DataFrame, building_category: BuildingCategory|str, tek:str, purpose: str) -> pd.DataFrame:
|
12
|
+
"""
|
13
|
+
Explode and deduplicates DataFrame df and returns rows matching building_category, tek, and purpose
|
14
|
+
|
15
|
+
Convenience function that does
|
16
|
+
|
17
|
+
```python
|
18
|
+
|
19
|
+
exploded = explode_dataframe(df)
|
20
|
+
de_duped = de_dupe_dataframe(exploded)
|
21
|
+
filtered = de_duped[(de_duped.building_category==building_category) & (de_duped.building_code==tek) & (de_duped.purpose == purpose)]
|
22
|
+
|
23
|
+
```
|
24
|
+
|
25
|
+
Parameters
|
26
|
+
----------
|
27
|
+
df : pd.DataFrame
|
28
|
+
building_category : BuildingCategory | str
|
29
|
+
tek : str
|
30
|
+
purpose : str
|
31
|
+
|
32
|
+
Returns
|
33
|
+
-------
|
34
|
+
pd.DataFrame
|
35
|
+
|
36
|
+
"""
|
37
|
+
exploded = explode_dataframe(df)
|
38
|
+
de_duped = de_dupe_dataframe(exploded)
|
39
|
+
return de_duped[(de_duped.building_category==building_category) & (de_duped.building_code==tek) & (de_duped.purpose == purpose)]
|
40
|
+
|
41
|
+
|
42
|
+
def filter_improvement_building_upgrade(df: pd.DataFrame, building_category: BuildingCategory|str, tek:str, purpose: str) -> pd.DataFrame:
|
43
|
+
"""
|
44
|
+
Explode and deduplicates DataFrame df and returns rows matching building_category, tek, and purpose
|
45
|
+
|
46
|
+
Convenience function that does
|
47
|
+
|
48
|
+
```python
|
49
|
+
|
50
|
+
exploded = explode_dataframe(df)
|
51
|
+
de_duped = de_dupe_dataframe(exploded)
|
52
|
+
filtered = de_duped[(de_duped.building_category==building_category) & (de_duped.building_code==tek) & (de_duped.purpose == purpose)]
|
53
|
+
|
54
|
+
```
|
55
|
+
|
56
|
+
Parameters
|
57
|
+
----------
|
58
|
+
df : pd.DataFrame
|
59
|
+
building_category : BuildingCategory | str
|
60
|
+
tek : str
|
61
|
+
purpose : str
|
62
|
+
|
63
|
+
Returns
|
64
|
+
-------
|
65
|
+
pd.DataFrame
|
66
|
+
|
67
|
+
"""
|
68
|
+
exploded = explode_dataframe(df)
|
69
|
+
de_duped = de_dupe_dataframe(exploded, unique_columns=['building_category', 'building_code', 'purpose', 'building_condition'])
|
70
|
+
filtered=de_duped[(de_duped.building_category==building_category) & (de_duped.building_code==tek) & (de_duped.purpose == purpose)]
|
71
|
+
|
72
|
+
filler_frame = pd.DataFrame([(building_category, tek, purpose, bc, 0.0) for bc in BuildingCondition.existing_conditions()],
|
73
|
+
columns=['building_category', 'building_code', 'purpose', 'building_condition', 'reduction_share'])
|
74
|
+
|
75
|
+
return pd.concat([filtered, filler_frame]).drop_duplicates(['building_category', 'building_code', 'purpose', 'building_condition'], keep='first')
|
76
|
+
|
77
|
+
|
78
|
+
def de_dupe_dataframe(df: pd.DataFrame, unique_columns: Optional[list[str]]=None) -> pd.DataFrame:
|
79
|
+
"""
|
80
|
+
Drops duplicate rows in df based on building_category, TEK and purpose
|
81
|
+
same as
|
82
|
+
df.drop_duplicates(unique_columns)
|
83
|
+
Parameters
|
84
|
+
----------
|
85
|
+
df : pd.DataFrame
|
86
|
+
unique_columns : list[str], optional
|
87
|
+
default= ['building_category', 'building_code', 'purpose']
|
88
|
+
|
89
|
+
Returns
|
90
|
+
-------
|
91
|
+
pd.DataFrame
|
92
|
+
|
93
|
+
"""
|
94
|
+
de_dupe_by = unique_columns if unique_columns else ['building_category', 'building_code', 'purpose']
|
95
|
+
|
96
|
+
de_duped = df.drop_duplicates(de_dupe_by)
|
97
|
+
return de_duped
|
98
|
+
|
99
|
+
|
100
|
+
def explode_dataframe(df: pd.DataFrame, building_code_list:Optional[list[str]]=None) -> pd.DataFrame:
|
101
|
+
"""
|
102
|
+
Explode column aliases for building_category, TEK, purpose in dataframe.
|
103
|
+
|
104
|
+
default in building_category is replaced with all options from BuildingCategory enum
|
105
|
+
default in TEK is replaced with all elements in optional building_code_list parameter
|
106
|
+
default in purpose is replaced with all options from EnergyPurpose enum
|
107
|
+
|
108
|
+
Parameters
|
109
|
+
----------
|
110
|
+
df : pd.DataFrame
|
111
|
+
building_code_list : list of TEK to replace default, Optional
|
112
|
+
default TEK49 PRE_TEK49 PRE_TEK49_RES_1950 TEK69 TEK87 TEK97 TEK07 TEK10 TEK17
|
113
|
+
|
114
|
+
Returns
|
115
|
+
-------
|
116
|
+
pd.DataFrame
|
117
|
+
|
118
|
+
"""
|
119
|
+
if not building_code_list:
|
120
|
+
building_code_list = 'TEK49 PRE_TEK49 PRE_TEK49_RES_1950 TEK69 TEK87 TEK97 TEK07 TEK10 TEK17 TEK21 TEK01'.split(' ')
|
121
|
+
# expand building_category
|
122
|
+
df = replace_column_alias(df,
|
123
|
+
column='building_category',
|
124
|
+
values={'default': [b for b in BuildingCategory],
|
125
|
+
'residential': [b for b in BuildingCategory if b.is_residential()],
|
126
|
+
'non_residential': [b for b in BuildingCategory if not b.is_residential()]})
|
127
|
+
# expand tek
|
128
|
+
df = replace_column_alias(df, 'building_code', values=building_code_list, alias='default')
|
129
|
+
|
130
|
+
# expand purpose
|
131
|
+
df = replace_column_alias(df, 'purpose', values=[p for p in EnergyPurpose], alias='default')
|
132
|
+
|
133
|
+
# Add priorty column and sort
|
134
|
+
df['bc_priority'] = df.building_category.apply(lambda x: 0 if '+' not in x else len(x.split('+')))
|
135
|
+
df['t_priority'] = df.building_code.apply(lambda x: 0 if '+' not in x else len(x.split('+')))
|
136
|
+
df['p_priority'] = df.purpose.apply(lambda x: 0 if '+' not in x else len(x.split('+')))
|
137
|
+
|
138
|
+
if not 'priority' in df.columns:
|
139
|
+
df['priority'] = 0
|
140
|
+
df['priority'] = df.bc_priority + df.t_priority + df.p_priority
|
141
|
+
|
142
|
+
# Explode
|
143
|
+
df = df.assign(**{'building_category': df['building_category'].str.split('+'), }).explode('building_category')
|
144
|
+
df = df.assign(**{'building_code': df['building_code'].str.split('+')}).explode('building_code')
|
145
|
+
df = df.assign(**{'purpose': df['purpose'].str.split('+'), }).explode('purpose')
|
146
|
+
# dedupe
|
147
|
+
deduped = df.sort_values(by=['building_category', 'building_code', 'purpose', 'priority'])
|
148
|
+
deduped['dupe'] = deduped.duplicated(['building_category', 'building_code', 'purpose'], keep=False)
|
149
|
+
return deduped
|
150
|
+
|
151
|
+
|
152
|
+
def main():
|
153
|
+
"""
|
154
|
+
Explode and prints all files listed in command line arguments. Default is reading files from input/
|
155
|
+
|
156
|
+
"""
|
157
|
+
import pathlib
|
158
|
+
import sys
|
159
|
+
def _load_file(infile):
|
160
|
+
df = pd.read_csv(infile)
|
161
|
+
building_code_list = 'TEK49 PRE_TEK49 TEK69 TEK87 TEK97 TEK07 TEK10 TEK17'.split(' ')
|
162
|
+
|
163
|
+
return explode_dataframe(df, building_code_list=building_code_list).sort_values(
|
164
|
+
by=['dupe', 'building_category', 'building_code', 'purpose', 'priority'])
|
165
|
+
pd.set_option('display.max_rows', None)
|
166
|
+
pd.set_option('display.max_columns', None)
|
167
|
+
pd.set_option('display.width', None)
|
168
|
+
if len(sys.argv) < 2:
|
169
|
+
files = list(pathlib.Path('input').glob('*.csv'))
|
170
|
+
else:
|
171
|
+
files = [pathlib.Path(f) for f in sys.argv[1:]]
|
172
|
+
for filename in files:
|
173
|
+
print(f'# {filename}')
|
174
|
+
try:
|
175
|
+
df = _load_file(filename)
|
176
|
+
print(df)
|
177
|
+
except KeyError as key_error:
|
178
|
+
print('KeyError: missing ', str(key_error), sys.stderr)
|
179
|
+
|
180
|
+
|
181
|
+
if __name__ == '__main__':
|
182
|
+
main()
|
@@ -0,0 +1,115 @@
|
|
1
|
+
import typing
|
2
|
+
|
3
|
+
from enum import StrEnum, unique, auto
|
4
|
+
|
5
|
+
import pandas as pd
|
6
|
+
|
7
|
+
from ebm.model.bema import BUILDING_CATEGORY_ORDER
|
8
|
+
from ebm.model.bema import TEK_ORDER
|
9
|
+
|
10
|
+
|
11
|
+
@unique
|
12
|
+
class EnergyPurpose(StrEnum):
|
13
|
+
HEATING_RV = auto()
|
14
|
+
HEATING_DHW = auto()
|
15
|
+
FANS_AND_PUMPS = auto()
|
16
|
+
LIGHTING = auto()
|
17
|
+
ELECTRICAL_EQUIPMENT = auto()
|
18
|
+
COOLING = auto()
|
19
|
+
|
20
|
+
@classmethod
|
21
|
+
def _missing_(cls, value: str):
|
22
|
+
"""
|
23
|
+
Attempts to create an enum member from a given value by normalizing the string.
|
24
|
+
|
25
|
+
This method is called when a value is not found in the enumeration. It converts the input value
|
26
|
+
to lowercase, replaces spaces and hyphens with underscores, and then checks if this transformed
|
27
|
+
value matches the value of any existing enum member.
|
28
|
+
|
29
|
+
Parameters
|
30
|
+
----------
|
31
|
+
value : str
|
32
|
+
The input value to convert and check against existing enum members.
|
33
|
+
|
34
|
+
Returns
|
35
|
+
-------
|
36
|
+
Enum member
|
37
|
+
The corresponding enum member if a match is found.
|
38
|
+
|
39
|
+
Raises
|
40
|
+
------
|
41
|
+
ValueError
|
42
|
+
If no matching enum member is found.
|
43
|
+
"""
|
44
|
+
value = value.lower().replace(' ', '_').replace('-', '_')
|
45
|
+
for member in cls:
|
46
|
+
if member.value == value:
|
47
|
+
return member
|
48
|
+
return ValueError(f'Invalid purpose given: {value}')
|
49
|
+
|
50
|
+
def __repr__(self):
|
51
|
+
return f'{self.__class__.__name__}.{self.name}'
|
52
|
+
|
53
|
+
@classmethod
|
54
|
+
def other(cls) -> typing.Iterable['EnergyPurpose']:
|
55
|
+
return [cls.LIGHTING, cls.ELECTRICAL_EQUIPMENT, cls.FANS_AND_PUMPS]
|
56
|
+
|
57
|
+
@classmethod
|
58
|
+
def heating(cls) -> typing.Iterable['EnergyPurpose']:
|
59
|
+
return [cls.HEATING_RV, cls.HEATING_DHW]
|
60
|
+
|
61
|
+
@classmethod
|
62
|
+
def cooling(cls) -> typing.Iterable['EnergyPurpose']:
|
63
|
+
return [cls.COOLING]
|
64
|
+
|
65
|
+
|
66
|
+
def group_energy_use_kwh_by_building_group_purpose_year_wide(energy_use_kwh: pd.DataFrame) -> pd.DataFrame:
|
67
|
+
df = (energy_use_kwh
|
68
|
+
.copy()
|
69
|
+
.reset_index()
|
70
|
+
.set_index(['building_category', 'building_condition', 'building_code', 'purpose', 'heating_systems', 'load', 'year'])
|
71
|
+
.sort_index())
|
72
|
+
|
73
|
+
df.loc[:, 'GWh'] = df.loc[:, 'kwh'] / 1_000_000
|
74
|
+
df.loc[:, ('building_code', 'building_condition')] = ('all', 'all')
|
75
|
+
|
76
|
+
df['building_group'] = 'non_residential'
|
77
|
+
df.loc['house', 'building_group'] = 'house'
|
78
|
+
df.loc['apartment_block', 'building_group'] = 'apartment_block'
|
79
|
+
|
80
|
+
summed = df.groupby(by=['building_group', 'purpose', 'year']).sum().reset_index()
|
81
|
+
summed = summed[['building_group', 'purpose', 'year', 'GWh']]
|
82
|
+
|
83
|
+
hz = summed.pivot(columns=['year'], index=['building_group', 'purpose'], values=['GWh']).reset_index()
|
84
|
+
hz = hz.sort_values(by=['building_group', 'purpose'],
|
85
|
+
key=lambda x: x.map(BUILDING_CATEGORY_ORDER) if x.name == 'building_group' else x.map(
|
86
|
+
TEK_ORDER) if x.name == 'building_code' else x.map(
|
87
|
+
{'heating_rv': 1, 'heating_dhw': 2, 'fans_and_pumps': 3, 'lighting': 4,
|
88
|
+
'electrical_equipment': 5, 'cooling': 6}) if x.name == 'purpose' else x)
|
89
|
+
|
90
|
+
hz.insert(2, 'U', 'GWh')
|
91
|
+
hz.columns = ['building_group', 'purpose', 'U'] + [y for y in range(2020, 2051)]
|
92
|
+
|
93
|
+
return hz.rename(columns={'building_group': 'building_category'})
|
94
|
+
|
95
|
+
|
96
|
+
def group_energy_use_by_year_category_building_code_purpose(energy_use_kwh: pd.DataFrame) -> pd.DataFrame:
|
97
|
+
df = (energy_use_kwh.copy().reset_index()
|
98
|
+
.set_index(['building_category', 'building_condition', 'building_code', 'purpose', 'heating_systems', 'load', 'year'])
|
99
|
+
.sort_index())
|
100
|
+
|
101
|
+
df.loc[:, 'GWh'] = (df['m2'] * df['kwh_m2']) / 1_000_000
|
102
|
+
|
103
|
+
df = df.reset_index().groupby(by=['year', 'building_category', 'building_code', 'purpose'], as_index=False).sum()
|
104
|
+
df = df[['year', 'building_category', 'building_code', 'purpose', 'GWh']]
|
105
|
+
df = df.sort_values(by=['year', 'building_category', 'building_code', 'purpose'],
|
106
|
+
key=lambda x: x.map(BUILDING_CATEGORY_ORDER) if x.name == 'building_category' else x.map(
|
107
|
+
TEK_ORDER) if x.name == 'building_category' else x.map(
|
108
|
+
TEK_ORDER) if x.name == 'building_code' else x.map(
|
109
|
+
{'heating_rv': 1, 'heating_dhw': 2, 'fans_and_pumps': 3, 'lighting': 4,
|
110
|
+
'electrical_equipment': 5, 'cooling': 6}) if x.name == 'purpose' else x)
|
111
|
+
|
112
|
+
df = df.rename(columns={'GWh': 'energy_use [GWh]'})
|
113
|
+
|
114
|
+
df.reset_index(inplace=True, drop=True)
|
115
|
+
return df
|