ebm 0.99.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ebm/__init__.py +0 -0
- ebm/__main__.py +152 -0
- ebm/__version__.py +1 -0
- ebm/cmd/__init__.py +0 -0
- ebm/cmd/calibrate.py +83 -0
- ebm/cmd/calibrate_excel_com_io.py +128 -0
- ebm/cmd/heating_systems_by_year.py +18 -0
- ebm/cmd/helpers.py +134 -0
- ebm/cmd/initialize.py +167 -0
- ebm/cmd/migrate.py +92 -0
- ebm/cmd/pipeline.py +227 -0
- ebm/cmd/prepare_main.py +174 -0
- ebm/cmd/result_handler.py +272 -0
- ebm/cmd/run_calculation.py +221 -0
- ebm/data/area.csv +92 -0
- ebm/data/area_new_residential_buildings.csv +3 -0
- ebm/data/area_per_person.csv +12 -0
- ebm/data/building_code_parameters.csv +9 -0
- ebm/data/energy_need_behaviour_factor.csv +6 -0
- ebm/data/energy_need_improvements.csv +7 -0
- ebm/data/energy_need_original_condition.csv +534 -0
- ebm/data/heating_system_efficiencies.csv +13 -0
- ebm/data/heating_system_forecast.csv +9 -0
- ebm/data/heating_system_initial_shares.csv +1113 -0
- ebm/data/holiday_home_energy_consumption.csv +24 -0
- ebm/data/holiday_home_stock.csv +25 -0
- ebm/data/improvement_building_upgrade.csv +9 -0
- ebm/data/new_buildings_residential.csv +32 -0
- ebm/data/population_forecast.csv +51 -0
- ebm/data/s_curve.csv +40 -0
- ebm/energy_consumption.py +307 -0
- ebm/extractors.py +115 -0
- ebm/heating_system_forecast.py +472 -0
- ebm/holiday_home_energy.py +341 -0
- ebm/migrations.py +224 -0
- ebm/model/__init__.py +0 -0
- ebm/model/area.py +403 -0
- ebm/model/bema.py +149 -0
- ebm/model/building_category.py +150 -0
- ebm/model/building_condition.py +78 -0
- ebm/model/calibrate_energy_requirements.py +84 -0
- ebm/model/calibrate_heating_systems.py +180 -0
- ebm/model/column_operations.py +157 -0
- ebm/model/construction.py +827 -0
- ebm/model/data_classes.py +223 -0
- ebm/model/database_manager.py +410 -0
- ebm/model/dataframemodels.py +115 -0
- ebm/model/defaults.py +30 -0
- ebm/model/energy_need.py +6 -0
- ebm/model/energy_need_filter.py +182 -0
- ebm/model/energy_purpose.py +115 -0
- ebm/model/energy_requirement.py +353 -0
- ebm/model/energy_use.py +202 -0
- ebm/model/enums.py +8 -0
- ebm/model/exceptions.py +4 -0
- ebm/model/file_handler.py +388 -0
- ebm/model/filter_scurve_params.py +83 -0
- ebm/model/filter_tek.py +152 -0
- ebm/model/heat_pump.py +53 -0
- ebm/model/heating_systems.py +20 -0
- ebm/model/heating_systems_parameter.py +17 -0
- ebm/model/heating_systems_projection.py +3 -0
- ebm/model/heating_systems_share.py +28 -0
- ebm/model/scurve.py +224 -0
- ebm/model/tek.py +1 -0
- ebm/s_curve.py +515 -0
- ebm/services/__init__.py +0 -0
- ebm/services/calibration_writer.py +262 -0
- ebm/services/console.py +106 -0
- ebm/services/excel_loader.py +66 -0
- ebm/services/files.py +38 -0
- ebm/services/spreadsheet.py +289 -0
- ebm/temp_calc.py +99 -0
- ebm/validators.py +565 -0
- ebm-0.99.3.dist-info/METADATA +217 -0
- ebm-0.99.3.dist-info/RECORD +80 -0
- ebm-0.99.3.dist-info/WHEEL +5 -0
- ebm-0.99.3.dist-info/entry_points.txt +3 -0
- ebm-0.99.3.dist-info/licenses/LICENSE +21 -0
- ebm-0.99.3.dist-info/top_level.txt +1 -0
@@ -0,0 +1,388 @@
|
|
1
|
+
import os
|
2
|
+
import pathlib
|
3
|
+
import shutil
|
4
|
+
import typing
|
5
|
+
|
6
|
+
import pandas as pd
|
7
|
+
from loguru import logger
|
8
|
+
from pandera.errors import SchemaErrors, SchemaError
|
9
|
+
|
10
|
+
import ebm.validators as validators
|
11
|
+
from ebm.model.defaults import default_calibrate_heating_rv, default_calibrate_energy_consumption
|
12
|
+
|
13
|
+
|
14
|
+
class FileHandler:
|
15
|
+
"""
|
16
|
+
Handles file operations.
|
17
|
+
"""
|
18
|
+
|
19
|
+
# Filenames
|
20
|
+
BUILDING_CONDITIONS = 'building_conditions.csv'
|
21
|
+
BUILDING_CODE_PARAMS = 'building_code_parameters.csv'
|
22
|
+
S_CURVE = 's_curve.csv'
|
23
|
+
POPULATION_FORECAST = 'population_forecast.csv'
|
24
|
+
NEW_BUILDINGS_RESIDENTIAL = 'new_buildings_residential.csv'
|
25
|
+
AREA_NEW_RESIDENTIAL_BUILDINGS = 'area_new_residential_buildings.csv'
|
26
|
+
AREA = 'area.csv'
|
27
|
+
BEHAVIOUR_FACTOR = 'energy_need_behaviour_factor.csv'
|
28
|
+
ENERGY_NEED_ORIGINAL_CONDITION = 'energy_need_original_condition.csv'
|
29
|
+
IMPROVEMENT_BUILDING_UPGRADE = 'improvement_building_upgrade.csv'
|
30
|
+
ENERGY_NEED_YEARLY_IMPROVEMENTS = 'energy_need_improvements.csv'
|
31
|
+
HOLIDAY_HOME_STOCK = 'holiday_home_stock.csv'
|
32
|
+
HOLIDAY_HOME_ENERGY_CONSUMPTION = 'holiday_home_energy_consumption.csv'
|
33
|
+
AREA_PER_PERSON = 'area_per_person.csv'
|
34
|
+
HEATING_SYSTEM_INITIAL_SHARES = 'heating_system_initial_shares.csv'
|
35
|
+
HEATING_SYSTEM_EFFICIENCIES = 'heating_system_efficiencies.csv'
|
36
|
+
HEATING_SYSTEM_FORECAST = 'heating_system_forecast.csv'
|
37
|
+
CALIBRATE_ENERGY_REQUIREMENT = 'calibrate_heating_rv.xlsx'
|
38
|
+
CALIBRATE_ENERGY_CONSUMPTION = 'calibrate_energy_consumption.xlsx'
|
39
|
+
|
40
|
+
input_directory: pathlib.Path
|
41
|
+
|
42
|
+
def __init__(self, directory: typing.Union[str, pathlib.Path, None] = None):
|
43
|
+
"""
|
44
|
+
Constructor for FileHandler Object. Sets FileHandler.input_directory.
|
45
|
+
|
46
|
+
Parameters
|
47
|
+
----------
|
48
|
+
directory : pathlib.Path | None | (str)
|
49
|
+
When directory is None the constructor will attempt to read directory location from
|
50
|
+
environment variable EBM_INPUT_DIRECTORY
|
51
|
+
"""
|
52
|
+
if directory is None:
|
53
|
+
# Use 'input' as fall back when EBM_INPUT_DIRECTORY is not set in environment.
|
54
|
+
directory = os.environ.get('EBM_INPUT_DIRECTORY', 'input')
|
55
|
+
|
56
|
+
self.input_directory = directory if isinstance(directory, pathlib.Path) else pathlib.Path(directory)
|
57
|
+
self.files_to_check = [self.BUILDING_CODE_PARAMS, self.S_CURVE, self.POPULATION_FORECAST,
|
58
|
+
self.NEW_BUILDINGS_RESIDENTIAL, self.AREA_NEW_RESIDENTIAL_BUILDINGS,
|
59
|
+
self.AREA, self.BEHAVIOUR_FACTOR, self.ENERGY_NEED_ORIGINAL_CONDITION,
|
60
|
+
self.IMPROVEMENT_BUILDING_UPGRADE, self.ENERGY_NEED_YEARLY_IMPROVEMENTS,
|
61
|
+
self.HOLIDAY_HOME_ENERGY_CONSUMPTION, self.HOLIDAY_HOME_STOCK,
|
62
|
+
self.AREA_PER_PERSON, self.HEATING_SYSTEM_INITIAL_SHARES, self.HEATING_SYSTEM_EFFICIENCIES, self.HEATING_SYSTEM_FORECAST]
|
63
|
+
|
64
|
+
def __repr__(self):
|
65
|
+
return f'FileHandler(input_directory="{self.input_directory}")'
|
66
|
+
|
67
|
+
def __str__(self):
|
68
|
+
return repr(self)
|
69
|
+
|
70
|
+
@staticmethod
|
71
|
+
def default_data_directory() -> pathlib.Path:
|
72
|
+
"""
|
73
|
+
Returns the path for ebm default data. The function is used when content is needed for a new input directory.
|
74
|
+
Not to be confused with FileHandler.input_directory.
|
75
|
+
|
76
|
+
Returns
|
77
|
+
-------
|
78
|
+
pathlib.Path
|
79
|
+
|
80
|
+
See Also
|
81
|
+
--------
|
82
|
+
create_missing_input_files
|
83
|
+
"""
|
84
|
+
return pathlib.Path(__file__).parent.parent / 'data'
|
85
|
+
|
86
|
+
def get_file(self, file_name: str) -> pd.DataFrame:
|
87
|
+
"""
|
88
|
+
Finds and returns a file by searching in the folder defined by self.input_folder.
|
89
|
+
|
90
|
+
Parameters:
|
91
|
+
- file_name (str): Name of the file to retrieve.
|
92
|
+
|
93
|
+
Returns:
|
94
|
+
- file_df (pd.DataFrame): DataFrame containing file data.
|
95
|
+
"""
|
96
|
+
logger.debug(f'get_file {file_name}')
|
97
|
+
file_path: pathlib.Path = pathlib.Path(self.input_directory) / file_name
|
98
|
+
logger.debug(f'{file_path=}')
|
99
|
+
|
100
|
+
try:
|
101
|
+
if file_path.suffix == '.xlsx':
|
102
|
+
file_df = pd.read_excel(file_path)
|
103
|
+
elif file_path.suffix == '.csv':
|
104
|
+
file_df = pd.read_csv(file_path)
|
105
|
+
else:
|
106
|
+
msg = f'{file_name} is not of type xlsx or csv'
|
107
|
+
logger.error(msg)
|
108
|
+
raise ValueError(msg)
|
109
|
+
return file_df
|
110
|
+
except FileNotFoundError as ex:
|
111
|
+
logger.exception(ex)
|
112
|
+
logger.debug(f'Current directory is {os.getcwd()}')
|
113
|
+
logger.error(f'Unable to open {file_path}. File not found.')
|
114
|
+
raise
|
115
|
+
except PermissionError as ex:
|
116
|
+
logger.exception(ex)
|
117
|
+
logger.error(f'Unable to open {file_path}. Permission denied.')
|
118
|
+
raise
|
119
|
+
except IOError as ex:
|
120
|
+
logger.exception(ex)
|
121
|
+
logger.error(f'Unable to open {file_path}. Unable to read file.')
|
122
|
+
raise
|
123
|
+
|
124
|
+
def get_building_code(self) -> pd.DataFrame:
|
125
|
+
"""
|
126
|
+
Get TEK parameters DataFrame.
|
127
|
+
|
128
|
+
Returns:
|
129
|
+
- building_code_params (pd.DataFrame): DataFrame containing TEK parameters.
|
130
|
+
"""
|
131
|
+
building_code_params = self.get_file(self.BUILDING_CODE_PARAMS)
|
132
|
+
return building_code_params
|
133
|
+
|
134
|
+
def get_s_curve(self) -> pd.DataFrame:
|
135
|
+
"""
|
136
|
+
Get S-curve parameters DataFrame.
|
137
|
+
|
138
|
+
Returns:
|
139
|
+
- scurve_params (pd.DataFrame): DataFrame containing S-curve parameters.
|
140
|
+
"""
|
141
|
+
scurve_params = self.get_file(self.S_CURVE)
|
142
|
+
return scurve_params
|
143
|
+
|
144
|
+
def get_construction_population(self) -> pd.DataFrame:
|
145
|
+
"""
|
146
|
+
Get population and household size DataFrame from a file.
|
147
|
+
|
148
|
+
Returns:
|
149
|
+
- construction_population (pd.DataFrame): Dataframe containing population numbers
|
150
|
+
year population household_size
|
151
|
+
"""
|
152
|
+
return self.get_file(self.POPULATION_FORECAST)
|
153
|
+
|
154
|
+
def get_population(self) -> pd.DataFrame:
|
155
|
+
"""
|
156
|
+
Loads population data from population.csv as float64
|
157
|
+
|
158
|
+
Should probably be merged with get_construction_population
|
159
|
+
|
160
|
+
Returns population : pd.DataFrame
|
161
|
+
dataframe with population
|
162
|
+
-------
|
163
|
+
|
164
|
+
"""
|
165
|
+
file_path = self.input_directory / self.POPULATION_FORECAST
|
166
|
+
logger.debug(f'{file_path=}')
|
167
|
+
return pd.read_csv(file_path, dtype={"household_size": "float64"})
|
168
|
+
|
169
|
+
def get_construction_building_category_share(self) -> pd.DataFrame:
|
170
|
+
"""
|
171
|
+
Get building category share by year DataFrame from a file.
|
172
|
+
|
173
|
+
The number can be used in conjunction with number of households to calculate total number
|
174
|
+
of buildings of category house and apartment block
|
175
|
+
|
176
|
+
Returns:
|
177
|
+
- construction_population (pd.DataFrame): Dataframe containing population numbers
|
178
|
+
"year", "Andel nye småhus", "Andel nye leiligheter", "Areal nye småhus", "Areal nye leiligheter"
|
179
|
+
"""
|
180
|
+
return self.get_file(self.NEW_BUILDINGS_RESIDENTIAL)
|
181
|
+
|
182
|
+
def get_building_category_area(self) -> pd.DataFrame:
|
183
|
+
"""
|
184
|
+
Get population and household size DataFrame from a file.
|
185
|
+
|
186
|
+
Returns:
|
187
|
+
- construction_population (pd.DataFrame): Dataframe containing population numbers
|
188
|
+
"area","type of building","2010","2011"
|
189
|
+
"""
|
190
|
+
file_path = self.input_directory / self.AREA_NEW_RESIDENTIAL_BUILDINGS
|
191
|
+
logger.debug(f'{file_path=}')
|
192
|
+
return pd.read_csv(file_path,
|
193
|
+
index_col=0, header=0)
|
194
|
+
|
195
|
+
def get_area_parameters(self) -> pd.DataFrame:
|
196
|
+
"""
|
197
|
+
Get dataframe with area parameters.
|
198
|
+
|
199
|
+
Returns:
|
200
|
+
- area_parameters (pd.DataFrame): Dataframe containing total area (m^2) per
|
201
|
+
building category and TEK.
|
202
|
+
"""
|
203
|
+
return self.get_file(self.AREA)
|
204
|
+
|
205
|
+
def get_energy_req_original_condition(self) -> pd.DataFrame:
|
206
|
+
"""
|
207
|
+
Get dataframe with energy requirement (kWh/m^2) for floor area in original condition.
|
208
|
+
|
209
|
+
Returns
|
210
|
+
-------
|
211
|
+
pd.DataFrame
|
212
|
+
Dataframe containing energy requirement (kWh/m^2) for floor area in original condition,
|
213
|
+
per building category and purpose.
|
214
|
+
"""
|
215
|
+
return self.get_file(self.ENERGY_NEED_ORIGINAL_CONDITION)
|
216
|
+
|
217
|
+
def get_energy_req_reduction_per_condition(self) -> pd.DataFrame:
|
218
|
+
"""
|
219
|
+
Get dataframe with shares for reducing the energy requirement of the different building conditions.
|
220
|
+
|
221
|
+
Returns
|
222
|
+
-------
|
223
|
+
pd.DataFrame
|
224
|
+
Dataframe containing energy requirement reduction shares for the different building conditions,
|
225
|
+
per building category, TEK and purpose.
|
226
|
+
"""
|
227
|
+
return self.get_file(self.IMPROVEMENT_BUILDING_UPGRADE)
|
228
|
+
|
229
|
+
def get_energy_need_yearly_improvements(self) -> pd.DataFrame:
|
230
|
+
"""
|
231
|
+
Get dataframe with yearly efficiency rates for energy requirement improvements.
|
232
|
+
|
233
|
+
Returns
|
234
|
+
-------
|
235
|
+
pd.DataFrame
|
236
|
+
Dataframe containing yearly efficiency rates (%) for energy requirement improvements,
|
237
|
+
per building category, tek and purpose.
|
238
|
+
"""
|
239
|
+
return self.get_file(self.ENERGY_NEED_YEARLY_IMPROVEMENTS)
|
240
|
+
|
241
|
+
def get_holiday_home_energy_consumption(self) -> pd.DataFrame:
|
242
|
+
return self.get_file(self.HOLIDAY_HOME_ENERGY_CONSUMPTION)
|
243
|
+
|
244
|
+
def get_holiday_home_by_year(self) -> pd.DataFrame:
|
245
|
+
return self.get_file(self.HOLIDAY_HOME_STOCK)
|
246
|
+
|
247
|
+
def get_area_per_person(self):
|
248
|
+
return self.get_file(self.AREA_PER_PERSON)
|
249
|
+
|
250
|
+
def get_calibrate_heating_rv(self) -> pd.DataFrame:
|
251
|
+
calibrate_heating_rv = self.input_directory / self.CALIBRATE_ENERGY_REQUIREMENT
|
252
|
+
if calibrate_heating_rv.is_file():
|
253
|
+
return self.get_file(calibrate_heating_rv.name)
|
254
|
+
return default_calibrate_heating_rv()
|
255
|
+
|
256
|
+
def get_calibrate_heating_systems(self) -> pd.DataFrame:
|
257
|
+
calibrate_energy_consumption = self.input_directory / self.CALIBRATE_ENERGY_CONSUMPTION
|
258
|
+
if calibrate_energy_consumption.is_file():
|
259
|
+
return self.get_file(calibrate_energy_consumption.name)
|
260
|
+
return default_calibrate_energy_consumption()
|
261
|
+
|
262
|
+
def get_heating_systems_shares_start_year(self) -> pd.DataFrame:
|
263
|
+
"""
|
264
|
+
"""
|
265
|
+
return self.get_file(self.HEATING_SYSTEM_INITIAL_SHARES)
|
266
|
+
|
267
|
+
def get_heating_system_efficiencies(self) -> pd.DataFrame:
|
268
|
+
"""Load heating_system_efficiencies.csv from file into a dataframe
|
269
|
+
|
270
|
+
Returns
|
271
|
+
-------
|
272
|
+
heating_system_efficiencies : pd.DataFrame
|
273
|
+
pandas DataFrame with heating system efficiencies
|
274
|
+
"""
|
275
|
+
|
276
|
+
return self.get_file(self.HEATING_SYSTEM_EFFICIENCIES)
|
277
|
+
|
278
|
+
def get_heating_system_forecast(self) -> pd.DataFrame:
|
279
|
+
"""
|
280
|
+
"""
|
281
|
+
return self.get_file(self.HEATING_SYSTEM_FORECAST)
|
282
|
+
|
283
|
+
def _check_is_file(self, filename: str) -> bool:
|
284
|
+
"""
|
285
|
+
Check if the filename is a file in self.input_folder
|
286
|
+
|
287
|
+
Parameters
|
288
|
+
----------
|
289
|
+
filename : str
|
290
|
+
|
291
|
+
Returns
|
292
|
+
-------
|
293
|
+
file_exists : bool
|
294
|
+
"""
|
295
|
+
return (pathlib.Path(self.input_directory) / filename).is_file()
|
296
|
+
|
297
|
+
def check_for_missing_files(self) -> typing.List[str]:
|
298
|
+
"""
|
299
|
+
Returns a list of required files that are not present in self.input_folder
|
300
|
+
|
301
|
+
Returns
|
302
|
+
-------
|
303
|
+
missing_files : List[str]
|
304
|
+
|
305
|
+
Raises
|
306
|
+
------
|
307
|
+
FileNotFoundError
|
308
|
+
If FileHandler::input_directory not found
|
309
|
+
NotADirectoryError
|
310
|
+
If FileHandler::input_directory is not a directory
|
311
|
+
"""
|
312
|
+
if not self.input_directory.exists():
|
313
|
+
msg=f'{self.input_directory.absolute()} not found'
|
314
|
+
logger.error(msg)
|
315
|
+
raise FileNotFoundError(f'Input Directory Not Found')
|
316
|
+
if not self.input_directory.is_dir():
|
317
|
+
raise NotADirectoryError(f'{self.input_directory} is not a directory')
|
318
|
+
|
319
|
+
missing_files = [file for file in self.files_to_check if not self._check_is_file(file)]
|
320
|
+
if missing_files:
|
321
|
+
plural = 's' if len(missing_files) != 1 else ''
|
322
|
+
msg = f'{len(missing_files)} required file{plural} missing from {self.input_directory}'
|
323
|
+
logger.error(msg)
|
324
|
+
for f in missing_files:
|
325
|
+
logger.error(f'Could not find {f}')
|
326
|
+
return missing_files
|
327
|
+
|
328
|
+
|
329
|
+
def create_missing_input_files(self, source_directory: (pathlib.Path | None)=None) -> None:
|
330
|
+
"""
|
331
|
+
Creates any input files missing in self.input_directory. When source is omitted FileHandler
|
332
|
+
|
333
|
+
Parameters
|
334
|
+
----------
|
335
|
+
source_directory : pathlib.Path, optional
|
336
|
+
Optional directory for sourcing files to copy.
|
337
|
+
|
338
|
+
Returns
|
339
|
+
-------
|
340
|
+
None
|
341
|
+
|
342
|
+
See Also
|
343
|
+
--------
|
344
|
+
default_data_directory : default source for data files
|
345
|
+
"""
|
346
|
+
source = FileHandler.default_data_directory() if not source_directory else source_directory
|
347
|
+
|
348
|
+
if not source.is_dir():
|
349
|
+
raise NotADirectoryError(f'{self.input_directory} is not a directory')
|
350
|
+
if not self.input_directory.is_dir():
|
351
|
+
logger.info(f'Creating directory {self.input_directory}')
|
352
|
+
self.input_directory.mkdir()
|
353
|
+
for file in self.files_to_check:
|
354
|
+
logger.debug(f'Create input file {file}')
|
355
|
+
self.create_input_file(file, source_directory=source)
|
356
|
+
|
357
|
+
def create_input_file(self, file, source_directory=None):
|
358
|
+
source_directory = FileHandler.default_data_directory() if not source_directory else source_directory
|
359
|
+
|
360
|
+
source_file = source_directory / file
|
361
|
+
target_file = self.input_directory / file
|
362
|
+
if target_file.is_file():
|
363
|
+
logger.debug(f'Skipping existing file {target_file}')
|
364
|
+
elif not source_file.is_file():
|
365
|
+
logger.error(f'Source file {source_file} does not exist!')
|
366
|
+
else:
|
367
|
+
shutil.copy(source_file, target_file)
|
368
|
+
logger.info( f'Creating missing file {target_file}')
|
369
|
+
|
370
|
+
def validate_input_files(self):
|
371
|
+
"""
|
372
|
+
Validates the input files for correct formatting and content using the validators module
|
373
|
+
|
374
|
+
Raises
|
375
|
+
------
|
376
|
+
pa.errors.SchemaErrors
|
377
|
+
If any invalid data for formatting is found when validating files. The validation is lazy, meaning
|
378
|
+
multiple errors may be listed in the exception.
|
379
|
+
"""
|
380
|
+
for file_to_validate in self.files_to_check:
|
381
|
+
df = self.get_file(file_to_validate)
|
382
|
+
validator = getattr(validators, file_to_validate[:-4].lower())
|
383
|
+
|
384
|
+
try:
|
385
|
+
validator.validate(df, lazy=True)
|
386
|
+
except (SchemaErrors, SchemaError):
|
387
|
+
logger.error(f'Got error while validating {file_to_validate}')
|
388
|
+
raise
|
@@ -0,0 +1,83 @@
|
|
1
|
+
import typing
|
2
|
+
|
3
|
+
import pandas as pd
|
4
|
+
|
5
|
+
from .building_category import BuildingCategory
|
6
|
+
from .building_condition import BuildingCondition
|
7
|
+
from .data_classes import ScurveParameters
|
8
|
+
|
9
|
+
|
10
|
+
class FilterScurveParams():
|
11
|
+
"""
|
12
|
+
A utility class for filtering S-curve parameters based on building category and condition.
|
13
|
+
|
14
|
+
This class provides a static method to filter a DataFrame containing S-curve parameters,
|
15
|
+
extracting the relevant data for a specific building category and set of conditions.
|
16
|
+
"""
|
17
|
+
|
18
|
+
COL_BUILDING_CATEGORY = 'building_category'
|
19
|
+
COL_BUILDING_CONDITION = 'condition'
|
20
|
+
COL_EARLIEST_AGE = 'earliest_age_for_measure'
|
21
|
+
COL_AVERAGE_AGE = 'average_age_for_measure'
|
22
|
+
COL_LAST_AGE = 'last_age_for_measure'
|
23
|
+
COL_RUSH_YEARS = 'rush_period_years'
|
24
|
+
COL_RUSH_SHARE = 'rush_share'
|
25
|
+
COL_NEVER_SHARE = 'never_share'
|
26
|
+
|
27
|
+
@staticmethod
|
28
|
+
def filter(building_category: BuildingCategory,
|
29
|
+
scurve_condition_list: typing.List[str],
|
30
|
+
scurve_params: pd.DataFrame) -> typing.Dict[str, ScurveParameters]:
|
31
|
+
"""
|
32
|
+
Filters S-curve parameters by building category and condition.
|
33
|
+
|
34
|
+
This method filters a DataFrame containing S-curve parameters to extract data specific to
|
35
|
+
the provided building category and conditions listed in `scurve_condition_list`. The filtered
|
36
|
+
data is then converted into a dictionary of `ScurveParameters` dataclass instances, each
|
37
|
+
representing the S-curve parameters for a particular condition.
|
38
|
+
|
39
|
+
Parameters:
|
40
|
+
- building_category (BuildingCategory): The building category for which the S-curve parameters are being filtered.
|
41
|
+
- scurve_condition_list (List[str]): A list of conditions (as strings) for which the S-curve parameters are needed.
|
42
|
+
- scurve_params (pd.DataFrame): DataFrame containing the S-curve parameters, with columns for building category, condition, and various age-related metrics.
|
43
|
+
|
44
|
+
Returns:
|
45
|
+
- filtered_scurve_params (Dict[str, ScurveParameters]): A dictionary where the keys are conditions (str) and the values
|
46
|
+
are `ScurveParameters` dataclass instances containing the
|
47
|
+
corresponding S-curve parameters for each condition.
|
48
|
+
|
49
|
+
Raises:
|
50
|
+
- KeyError: If the provided building category is not found in the S-curve parameters DataFrame.
|
51
|
+
"""
|
52
|
+
filtered_scurve_params = {}
|
53
|
+
|
54
|
+
for condition in scurve_condition_list:
|
55
|
+
if not scurve_params.building_category.str.contains(building_category).any():
|
56
|
+
msg = 'Unknown building_category "{}" encountered when setting up scurve parameters'.format(building_category)
|
57
|
+
raise KeyError(msg)
|
58
|
+
|
59
|
+
# Filter dataframe on building category and condition
|
60
|
+
scurve_params_filtered = scurve_params[(scurve_params[FilterScurveParams.COL_BUILDING_CATEGORY] == building_category) &
|
61
|
+
(scurve_params[FilterScurveParams.COL_BUILDING_CONDITION] == condition)]
|
62
|
+
|
63
|
+
# Assuming there is only one row in the filtered DataFrame
|
64
|
+
scurve_params_row = scurve_params_filtered.iloc[0]
|
65
|
+
|
66
|
+
# Convert the single row to a dictionary
|
67
|
+
scurve_params_dict = scurve_params_row.to_dict()
|
68
|
+
|
69
|
+
# Map the dictionary values to the dataclass attributes
|
70
|
+
scurve_parameters = ScurveParameters(
|
71
|
+
building_category=scurve_params_dict[FilterScurveParams.COL_BUILDING_CATEGORY],
|
72
|
+
condition=scurve_params_dict[FilterScurveParams.COL_BUILDING_CONDITION],
|
73
|
+
earliest_age=scurve_params_dict[FilterScurveParams.COL_EARLIEST_AGE],
|
74
|
+
average_age=scurve_params_dict[FilterScurveParams.COL_AVERAGE_AGE],
|
75
|
+
rush_years=scurve_params_dict[FilterScurveParams.COL_RUSH_YEARS],
|
76
|
+
last_age=scurve_params_dict[FilterScurveParams.COL_LAST_AGE],
|
77
|
+
rush_share=scurve_params_dict[FilterScurveParams.COL_RUSH_SHARE],
|
78
|
+
never_share=scurve_params_dict[FilterScurveParams.COL_NEVER_SHARE],
|
79
|
+
)
|
80
|
+
|
81
|
+
filtered_scurve_params[condition] = scurve_parameters
|
82
|
+
|
83
|
+
return filtered_scurve_params
|
ebm/model/filter_tek.py
ADDED
@@ -0,0 +1,152 @@
|
|
1
|
+
import typing
|
2
|
+
|
3
|
+
import pandas as pd
|
4
|
+
from loguru import logger
|
5
|
+
|
6
|
+
from ebm.model.building_category import BuildingCategory
|
7
|
+
from ebm.model.data_classes import TEKParameters
|
8
|
+
|
9
|
+
|
10
|
+
class FilterTek:
|
11
|
+
"""
|
12
|
+
Utility class for filtering TEK lists and parameters.
|
13
|
+
"""
|
14
|
+
|
15
|
+
CATEGORY_APARTMENT = 'apartment_block'
|
16
|
+
CATEGORY_HOUSE = 'house'
|
17
|
+
COMMERCIAL_BUILDING = 'COM'
|
18
|
+
RESIDENTIAL_BUILDING = 'RES'
|
19
|
+
PRE_TEK49_APARTMENT = 'PRE_TEK49_RES_1950'
|
20
|
+
PRE_TEK49_HOUSE = 'PRE_TEK49_RES_1940'
|
21
|
+
|
22
|
+
@staticmethod
|
23
|
+
def get_filtered_list(building_category: BuildingCategory, building_code_list: typing.List[str]) -> typing.List[str]:
|
24
|
+
"""
|
25
|
+
Filters the provided TEK list based on the building category.
|
26
|
+
|
27
|
+
Parameters:
|
28
|
+
- building_category (BuildingCategory): The category of the building.
|
29
|
+
- building_code_list (List[str]): List of TEK strings to be filtered.
|
30
|
+
|
31
|
+
Returns:
|
32
|
+
- filtered_building_code_list (List[str]): Filtered list of TEK strings.
|
33
|
+
"""
|
34
|
+
residential_building_list = [FilterTek.CATEGORY_APARTMENT, FilterTek.CATEGORY_HOUSE]
|
35
|
+
|
36
|
+
if building_category in residential_building_list:
|
37
|
+
# Filter out all TEKs associated with commercial buildings
|
38
|
+
filtered_building_code_list = [tek for tek in building_code_list if FilterTek.COMMERCIAL_BUILDING not in tek]
|
39
|
+
|
40
|
+
# Further filtering based on the specific residential building category
|
41
|
+
if building_category == FilterTek.CATEGORY_APARTMENT:
|
42
|
+
filtered_building_code_list = [tek for tek in filtered_building_code_list if tek != FilterTek.PRE_TEK49_HOUSE]
|
43
|
+
elif building_category == FilterTek.CATEGORY_HOUSE:
|
44
|
+
filtered_building_code_list = [tek for tek in filtered_building_code_list if tek != FilterTek.PRE_TEK49_APARTMENT]
|
45
|
+
|
46
|
+
else:
|
47
|
+
# Filter out all TEKs associated with residential buildings
|
48
|
+
filtered_building_code_list = [tek for tek in building_code_list if FilterTek.RESIDENTIAL_BUILDING not in tek]
|
49
|
+
|
50
|
+
return filtered_building_code_list
|
51
|
+
|
52
|
+
# This method is only needed if building_code_params are initialized in the Buildings class
|
53
|
+
@staticmethod
|
54
|
+
def get_filtered_params(building_code_list: typing.List[str],
|
55
|
+
building_code_params: typing.Dict[str, TEKParameters]) -> typing.Dict[str, TEKParameters]:
|
56
|
+
"""
|
57
|
+
Filters the TEK parameters to include only those relevant to the provided TEK list.
|
58
|
+
|
59
|
+
This method takes a dictionary of TEK parameters and filters it to include only
|
60
|
+
the parameters for TEKs that are present in the `building_code_list`. This ensures that
|
61
|
+
only the relevant TEK parameters are retained for use in subsequent calculations.
|
62
|
+
|
63
|
+
Parameters:
|
64
|
+
- building_code_list (List[str]): A list of TEK identifiers to filter by.
|
65
|
+
- building_code_params (Dict[str, TEKParameters]): A dictionary where the keys are TEK identifiers
|
66
|
+
and the values are TEKParameters objects containing
|
67
|
+
the parameters for each TEK.
|
68
|
+
|
69
|
+
Returns:
|
70
|
+
- filtered_building_code_params (Dict[str, TEKParameters]): A dictionary containing only the TEK parameters
|
71
|
+
for the TEKs present in the `building_code_list`.
|
72
|
+
"""
|
73
|
+
filtered_building_code_params = {}
|
74
|
+
for tek in building_code_list:
|
75
|
+
filtered_building_code_params[tek] = building_code_params[tek]
|
76
|
+
|
77
|
+
return filtered_building_code_params
|
78
|
+
|
79
|
+
@staticmethod
|
80
|
+
def merge_building_code(df: pd.DataFrame,
|
81
|
+
new_building_code_name: str,
|
82
|
+
old_building_code_names: typing.List[str],
|
83
|
+
aggregates: typing.Dict[str, str] = None) -> pd.DataFrame:
|
84
|
+
"""
|
85
|
+
Merge rows in a DataFrame based on specified 'tek' names and aggregate their values.
|
86
|
+
|
87
|
+
Parameters
|
88
|
+
----------
|
89
|
+
df : pd.DataFrame
|
90
|
+
The input DataFrame with a MultiIndex.
|
91
|
+
new_building_code_name : str
|
92
|
+
The new 'tek' name to assign to the merged rows.
|
93
|
+
old_building_code_names : typing.List[str]
|
94
|
+
A list of 'tek' names to be merged.
|
95
|
+
aggregates : typing.Dict[str, str], optional
|
96
|
+
A dictionary specifying the aggregation functions for each column.
|
97
|
+
If not provided, default aggregations will be used:
|
98
|
+
{'tek': 'max', 'm2': 'first', 'kwh_m2': 'mean', 'energy_requirement': 'sum'}.
|
99
|
+
|
100
|
+
Returns
|
101
|
+
-------
|
102
|
+
pd.DataFrame
|
103
|
+
The DataFrame with the specified 'tek' rows merged and aggregated.
|
104
|
+
"""
|
105
|
+
if not isinstance(df, pd.DataFrame):
|
106
|
+
raise ValueError("`df` should be a pandas DataFrame.")
|
107
|
+
if not isinstance(old_building_code_names, list):
|
108
|
+
raise ValueError("`old_building_code_names` should be a list of strings.")
|
109
|
+
|
110
|
+
# Apply default aggregates if the parameter is empty
|
111
|
+
aggregates = aggregates or {'building_code': 'max', 'm2': 'first', 'kwh_m2': 'mean', 'energy_requirement': 'sum'}
|
112
|
+
building_code_values = [tek for tek in old_building_code_names if tek in df.index.get_level_values('building_code')]
|
113
|
+
|
114
|
+
if not building_code_values:
|
115
|
+
return df
|
116
|
+
|
117
|
+
level_values = df.index.get_level_values('building_category')
|
118
|
+
building_categories = [bc for bc in BuildingCategory if bc.is_residential() and bc in level_values]
|
119
|
+
if not building_categories:
|
120
|
+
return df
|
121
|
+
|
122
|
+
residential = df.loc[
|
123
|
+
(building_categories, slice(None), slice(None), slice(None), slice(None))].reset_index()
|
124
|
+
|
125
|
+
building_code_to_merge = residential[residential.building_code.isin(building_code_values)]
|
126
|
+
agg_building_code = building_code_to_merge.groupby(by=['building_category',
|
127
|
+
'building_condition',
|
128
|
+
'purpose',
|
129
|
+
'year']).agg(aggregates)
|
130
|
+
agg_building_code = agg_building_code.reset_index()
|
131
|
+
|
132
|
+
agg_building_code['building_code'] = new_building_code_name
|
133
|
+
rows_to_remove = df.loc[(slice(None), building_code_values, slice(None), slice(None), slice(None))].index
|
134
|
+
df = df.drop(rows_to_remove)
|
135
|
+
df = pd.concat([df, agg_building_code.set_index(['building_category', 'building_code', 'building_condition', 'year', 'purpose'])])
|
136
|
+
df = df.sort_index()
|
137
|
+
|
138
|
+
return df
|
139
|
+
|
140
|
+
@staticmethod
|
141
|
+
def remove_building_code_suffix(df: pd.DataFrame, suffix) -> pd.DataFrame:
|
142
|
+
# Convert MultiIndex to DataFrame
|
143
|
+
index_df = df.index.to_frame(index=False)
|
144
|
+
|
145
|
+
key_name = 'tek' if 'tek' in index_df.keys() else 'building_code'
|
146
|
+
# Remove '_RES' from 'tek' values
|
147
|
+
index_df[key_name] = index_df[key_name].str.replace(suffix, '')
|
148
|
+
|
149
|
+
# Set the modified DataFrame back to MultiIndex
|
150
|
+
df.index = pd.MultiIndex.from_frame(index_df)
|
151
|
+
|
152
|
+
return df
|
ebm/model/heat_pump.py
ADDED
@@ -0,0 +1,53 @@
|
|
1
|
+
import pandas as pd
|
2
|
+
|
3
|
+
from ebm.energy_consumption import HP_ENERGY_SOURCE, HEAT_PUMP
|
4
|
+
|
5
|
+
|
6
|
+
def air_source_heat_pump(heating_systems_parameters: pd.DataFrame):
|
7
|
+
df = heating_systems_parameters.copy()
|
8
|
+
el_slice = df[df['heating_system'] == 'HP'].index
|
9
|
+
df.loc[el_slice, 'pump_factor'] = df.loc[el_slice, 'load_share'] * df.loc[el_slice, 'heating_system_share']
|
10
|
+
df.loc[el_slice, HP_ENERGY_SOURCE] = 'Luft/luft'
|
11
|
+
df.loc[el_slice, 'purpose'] = 'heating_rv'
|
12
|
+
|
13
|
+
return df.query('heating_system=="HP"')
|
14
|
+
|
15
|
+
|
16
|
+
def district_heating_heat_pump(heating_systems_parameters: pd.DataFrame):
|
17
|
+
df = heating_systems_parameters.copy()
|
18
|
+
vann_slice = df[df['heating_system'] == 'HP Central heating'].index
|
19
|
+
df.loc[vann_slice, 'pump_factor'] = df.loc[vann_slice, 'load_share'] * df.loc[vann_slice, 'heating_system_share']
|
20
|
+
df.loc[vann_slice, HP_ENERGY_SOURCE] = 'Vannbåren varme'
|
21
|
+
df.loc[vann_slice, 'purpose'] = 'heating_rv,heating_dhw'
|
22
|
+
df = df.assign(**{'purpose': df['purpose'].str.split(',')}).explode('purpose')
|
23
|
+
|
24
|
+
return df.query('heating_system=="HP Central heating"')
|
25
|
+
|
26
|
+
|
27
|
+
def heat_pump_production(energy_need, air_air, district_heating):
|
28
|
+
df_en = energy_need.copy()
|
29
|
+
df_hp = pd.concat([air_air, district_heating])
|
30
|
+
|
31
|
+
df = pd.merge(left=df_en,
|
32
|
+
left_on=['building_category', 'building_code', 'purpose', 'year'],
|
33
|
+
right=df_hp,
|
34
|
+
right_on=['building_category', 'building_code', 'purpose', 'year'])
|
35
|
+
|
36
|
+
|
37
|
+
df[HEAT_PUMP] = df.energy_requirement * df.pump_factor
|
38
|
+
|
39
|
+
return df
|
40
|
+
|
41
|
+
|
42
|
+
def heat_prod_hp(production: pd.DataFrame, group_by:list|None=None) -> pd.DataFrame:
|
43
|
+
grouping = ['building_group', 'year'] if not group_by else group_by
|
44
|
+
production.loc[production['building_category'].isin(['house', 'apartment_block']), 'building_group'] = 'Bolig'
|
45
|
+
production.loc[production['building_group'] != 'Bolig', 'building_group'] = 'Yrkesbygg'
|
46
|
+
return production.groupby(by=grouping+['hp_source']).agg({'RV_HP': 'sum'}) / 1_000_000
|
47
|
+
|
48
|
+
|
49
|
+
def heat_prod_hp_wide(production: pd.DataFrame) -> pd.DataFrame:
|
50
|
+
df = heat_prod_hp(production)
|
51
|
+
wide = df.reset_index().pivot(columns=['year'], index=['building_group', 'hp_source'], values=['RV_HP']).reset_index()
|
52
|
+
wide.columns = ['building_group', 'hp_source'] + [c for c in wide.columns.get_level_values(1)[2:]]
|
53
|
+
return wide
|