emhass 0.13.1__py3-none-any.whl → 0.13.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emhass/command_line.py ADDED
@@ -0,0 +1,1891 @@
1
+ #!/usr/bin/env python3
2
+
3
+ import argparse
4
+ import copy
5
+ import json
6
+ import logging
7
+ import os
8
+ import pathlib
9
+ import pickle
10
+ import re
11
+ import time
12
+ from datetime import UTC, datetime
13
+ from importlib.metadata import version
14
+
15
+ import numpy as np
16
+ import pandas as pd
17
+
18
+ from emhass import utils
19
+ from emhass.forecast import Forecast
20
+ from emhass.machine_learning_forecaster import MLForecaster
21
+ from emhass.machine_learning_regressor import MLRegressor
22
+ from emhass.optimization import Optimization
23
+ from emhass.retrieve_hass import RetrieveHass
24
+
25
+ default_csv_filename = "opt_res_latest.csv"
26
+ default_pkl_suffix = "_mlf.pkl"
27
+ default_metadata_json = "metadata.json"
28
+
29
+
30
+ def retrieve_home_assistant_data(
31
+ set_type: str,
32
+ get_data_from_file: bool,
33
+ retrieve_hass_conf: dict,
34
+ optim_conf: dict,
35
+ rh: RetrieveHass,
36
+ emhass_conf: dict,
37
+ test_df_literal: pd.DataFrame,
38
+ ) -> dict:
39
+ """Retrieve data from Home Assistant or file and prepare it for optimization."""
40
+ if get_data_from_file:
41
+ with open(emhass_conf["data_path"] / test_df_literal, "rb") as inp:
42
+ rh.df_final, days_list, var_list, rh.ha_config = pickle.load(inp)
43
+ rh.var_list = var_list
44
+ # Assign variables based on set_type
45
+ retrieve_hass_conf["sensor_power_load_no_var_loads"] = str(var_list[0])
46
+ if optim_conf.get("set_use_pv", True):
47
+ retrieve_hass_conf["sensor_power_photovoltaics"] = str(var_list[1])
48
+ retrieve_hass_conf["sensor_linear_interp"] = [
49
+ retrieve_hass_conf["sensor_power_photovoltaics"],
50
+ retrieve_hass_conf["sensor_power_load_no_var_loads"],
51
+ ]
52
+ retrieve_hass_conf["sensor_replace_zero"] = [
53
+ retrieve_hass_conf["sensor_power_photovoltaics"],
54
+ var_list[2],
55
+ ]
56
+ else:
57
+ retrieve_hass_conf["sensor_linear_interp"] = [
58
+ retrieve_hass_conf["sensor_power_load_no_var_loads"]
59
+ ]
60
+ retrieve_hass_conf["sensor_replace_zero"] = []
61
+ else:
62
+ # Determine days_list based on set_type
63
+ if set_type == "perfect-optim" or set_type == "adjust_pv":
64
+ days_list = utils.get_days_list(
65
+ retrieve_hass_conf["historic_days_to_retrieve"]
66
+ )
67
+ elif set_type == "naive-mpc-optim":
68
+ days_list = utils.get_days_list(1)
69
+ else:
70
+ days_list = None # Not needed for dayahead
71
+ var_list = [retrieve_hass_conf["sensor_power_load_no_var_loads"]]
72
+ if optim_conf.get("set_use_pv", True):
73
+ var_list.append(retrieve_hass_conf["sensor_power_photovoltaics"])
74
+ if optim_conf.get("set_use_adjusted_pv", True):
75
+ var_list.append(retrieve_hass_conf["sensor_power_photovoltaics_forecast"])
76
+ if not rh.get_data(
77
+ days_list, var_list, minimal_response=False, significant_changes_only=False
78
+ ):
79
+ return False, None, days_list
80
+ rh.prepare_data(
81
+ retrieve_hass_conf["sensor_power_load_no_var_loads"],
82
+ load_negative=retrieve_hass_conf["load_negative"],
83
+ set_zero_min=retrieve_hass_conf["set_zero_min"],
84
+ var_replace_zero=retrieve_hass_conf["sensor_replace_zero"],
85
+ var_interp=retrieve_hass_conf["sensor_linear_interp"],
86
+ )
87
+ return True, rh.df_final.copy(), days_list
88
+
89
+
90
+ def adjust_pv_forecast(
91
+ logger: logging.Logger,
92
+ fcst: Forecast,
93
+ P_PV_forecast: pd.Series,
94
+ get_data_from_file: bool,
95
+ retrieve_hass_conf: dict,
96
+ optim_conf: dict,
97
+ rh: RetrieveHass,
98
+ emhass_conf: dict,
99
+ test_df_literal: pd.DataFrame,
100
+ ) -> pd.Series:
101
+ """
102
+ Adjust the photovoltaic (PV) forecast using historical data and a regression model.
103
+
104
+ This method retrieves historical data, prepares it for model fitting, trains a regression
105
+ model, and adjusts the provided PV forecast based on the trained model.
106
+
107
+ :param logger: Logger object for logging information and errors.
108
+ :type logger: logging.Logger
109
+ :param fcst: Forecast object used for PV forecast adjustment.
110
+ :type fcst: Forecast
111
+ :param P_PV_forecast: The initial PV forecast to be adjusted.
112
+ :type P_PV_forecast: pd.Series
113
+ :param get_data_from_file: Whether to retrieve data from a file instead of Home Assistant.
114
+ :type get_data_from_file: bool
115
+ :param retrieve_hass_conf: Configuration dictionary for retrieving data from Home Assistant.
116
+ :type retrieve_hass_conf: dict
117
+ :param optim_conf: Configuration dictionary for optimization settings.
118
+ :type optim_conf: dict
119
+ :param rh: RetrieveHass object for interacting with Home Assistant.
120
+ :type rh: RetrieveHass
121
+ :param emhass_conf: Configuration dictionary for emhass paths and settings.
122
+ :type emhass_conf: dict
123
+ :param test_df_literal: DataFrame containing test data for debugging purposes.
124
+ :type test_df_literal: pd.DataFrame
125
+ :return: The adjusted PV forecast as a pandas Series.
126
+ :rtype: pd.Series
127
+ """
128
+ logger.info("Adjusting PV forecast, retrieving history data for model fit")
129
+ # Retrieve data from Home Assistant
130
+ success, df_input_data, _ = retrieve_home_assistant_data(
131
+ "adjust_pv",
132
+ get_data_from_file,
133
+ retrieve_hass_conf,
134
+ optim_conf,
135
+ rh,
136
+ emhass_conf,
137
+ test_df_literal,
138
+ )
139
+ if not success:
140
+ return False
141
+ # Call data preparation method
142
+ fcst.adjust_pv_forecast_data_prep(df_input_data)
143
+ # Call the fit method
144
+ fcst.adjust_pv_forecast_fit(
145
+ n_splits=5,
146
+ regression_model=optim_conf["adjusted_pv_regression_model"],
147
+ )
148
+ # Call the predict method
149
+ P_PV_forecast = P_PV_forecast.rename("forecast").to_frame()
150
+ P_PV_forecast = fcst.adjust_pv_forecast_predict(forecasted_pv=P_PV_forecast)
151
+ # Update the PV forecast
152
+ return P_PV_forecast["adjusted_forecast"].rename(None)
153
+
154
+
155
+ def set_input_data_dict(
156
+ emhass_conf: dict,
157
+ costfun: str,
158
+ params: str,
159
+ runtimeparams: str,
160
+ set_type: str,
161
+ logger: logging.Logger,
162
+ get_data_from_file: bool | None = False,
163
+ ) -> dict:
164
+ """
165
+ Set up some of the data needed for the different actions.
166
+
167
+ :param emhass_conf: Dictionary containing the needed emhass paths
168
+ :type emhass_conf: dict
169
+ :param costfun: The type of cost function to use for optimization problem
170
+ :type costfun: str
171
+ :param params: Configuration parameters passed from data/options.json
172
+ :type params: str
173
+ :param runtimeparams: Runtime optimization parameters passed as a dictionary
174
+ :type runtimeparams: str
175
+ :param set_type: Set the type of setup based on following type of optimization
176
+ :type set_type: str
177
+ :param logger: The passed logger object
178
+ :type logger: logging object
179
+ :param get_data_from_file: Use data from saved CSV file (useful for debug)
180
+ :type get_data_from_file: bool, optional
181
+ :return: A dictionnary with multiple data used by the action functions
182
+ :rtype: dict
183
+
184
+ """
185
+ logger.info("Setting up needed data")
186
+
187
+ # check if passed params is a dict
188
+ if (params is not None) and (params != "null"):
189
+ if type(params) is str:
190
+ params = json.loads(params)
191
+ else:
192
+ params = {}
193
+
194
+ # Parsing yaml
195
+ retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(params, logger)
196
+ if type(retrieve_hass_conf) is bool:
197
+ return False
198
+
199
+ # Treat runtimeparams
200
+ params, retrieve_hass_conf, optim_conf, plant_conf = utils.treat_runtimeparams(
201
+ runtimeparams,
202
+ params,
203
+ retrieve_hass_conf,
204
+ optim_conf,
205
+ plant_conf,
206
+ set_type,
207
+ logger,
208
+ emhass_conf,
209
+ )
210
+
211
+ # Define the data retrieve object
212
+ rh = RetrieveHass(
213
+ retrieve_hass_conf["hass_url"],
214
+ retrieve_hass_conf["long_lived_token"],
215
+ retrieve_hass_conf["optimization_time_step"],
216
+ retrieve_hass_conf["time_zone"],
217
+ params,
218
+ emhass_conf,
219
+ logger,
220
+ get_data_from_file=get_data_from_file,
221
+ )
222
+
223
+ # Retrieve basic configuration data from hass
224
+ test_df_literal = "test_df_final.pkl"
225
+ if get_data_from_file:
226
+ with open(emhass_conf["data_path"] / test_df_literal, "rb") as inp:
227
+ _, _, _, rh.ha_config = pickle.load(inp)
228
+ else:
229
+ response = rh.get_ha_config()
230
+ if type(response) is bool:
231
+ return False
232
+
233
+ # Update the params dict using data from the HA configuration
234
+ params = utils.update_params_with_ha_config(
235
+ params,
236
+ rh.ha_config,
237
+ )
238
+
239
+ # Define the forecast and optimization objects
240
+ fcst = Forecast(
241
+ retrieve_hass_conf,
242
+ optim_conf,
243
+ plant_conf,
244
+ params,
245
+ emhass_conf,
246
+ logger,
247
+ get_data_from_file=get_data_from_file,
248
+ )
249
+ opt = Optimization(
250
+ retrieve_hass_conf,
251
+ optim_conf,
252
+ plant_conf,
253
+ fcst.var_load_cost,
254
+ fcst.var_prod_price,
255
+ costfun,
256
+ emhass_conf,
257
+ logger,
258
+ )
259
+
260
+ # Perform setup based on type of action
261
+ if set_type == "perfect-optim":
262
+ # Retrieve data from hass
263
+ success, df_input_data, days_list = retrieve_home_assistant_data(
264
+ set_type,
265
+ get_data_from_file,
266
+ retrieve_hass_conf,
267
+ optim_conf,
268
+ rh,
269
+ emhass_conf,
270
+ test_df_literal,
271
+ )
272
+ if not success:
273
+ return False
274
+ # What we don't need for this type of action
275
+ P_PV_forecast, P_load_forecast, df_input_data_dayahead = None, None, None
276
+ elif set_type == "dayahead-optim":
277
+ # Get PV and load forecasts
278
+ if (
279
+ optim_conf["set_use_pv"]
280
+ or optim_conf.get("weather_forecast_method", None) == "list"
281
+ ):
282
+ df_weather = fcst.get_weather_forecast(
283
+ method=optim_conf["weather_forecast_method"]
284
+ )
285
+ if isinstance(df_weather, bool) and not df_weather:
286
+ return False
287
+ P_PV_forecast = fcst.get_power_from_weather(df_weather)
288
+ # Adjust PV forecast
289
+ if optim_conf["set_use_adjusted_pv"]:
290
+ # Update the PV forecast
291
+ P_PV_forecast = adjust_pv_forecast(
292
+ logger,
293
+ fcst,
294
+ P_PV_forecast,
295
+ get_data_from_file,
296
+ retrieve_hass_conf,
297
+ optim_conf,
298
+ rh,
299
+ emhass_conf,
300
+ test_df_literal,
301
+ )
302
+ else:
303
+ P_PV_forecast = pd.Series(0, index=fcst.forecast_dates)
304
+ P_load_forecast = fcst.get_load_forecast(
305
+ method=optim_conf["load_forecast_method"]
306
+ )
307
+ if isinstance(P_load_forecast, bool) and not P_load_forecast:
308
+ logger.error(
309
+ "Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data"
310
+ )
311
+ return False
312
+ df_input_data_dayahead = pd.DataFrame(
313
+ np.transpose(np.vstack([P_PV_forecast.values, P_load_forecast.values])),
314
+ index=P_PV_forecast.index,
315
+ columns=["P_PV_forecast", "P_load_forecast"],
316
+ )
317
+ if (
318
+ "optimization_time_step" in retrieve_hass_conf
319
+ and retrieve_hass_conf["optimization_time_step"]
320
+ ):
321
+ if not isinstance(
322
+ retrieve_hass_conf["optimization_time_step"],
323
+ pd._libs.tslibs.timedeltas.Timedelta,
324
+ ):
325
+ optimization_time_step = pd.to_timedelta(
326
+ retrieve_hass_conf["optimization_time_step"], "minute"
327
+ )
328
+ else:
329
+ optimization_time_step = retrieve_hass_conf["optimization_time_step"]
330
+ df_input_data_dayahead = df_input_data_dayahead.asfreq(
331
+ optimization_time_step
332
+ )
333
+ else:
334
+ df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
335
+ params = json.loads(params)
336
+ if (
337
+ "prediction_horizon" in params["passed_data"]
338
+ and params["passed_data"]["prediction_horizon"] is not None
339
+ ):
340
+ prediction_horizon = params["passed_data"]["prediction_horizon"]
341
+ df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
342
+ df_input_data_dayahead.index[0] : df_input_data_dayahead.index[
343
+ prediction_horizon - 1
344
+ ]
345
+ ]
346
+ # What we don't need for this type of action
347
+ df_input_data, days_list = None, None
348
+ elif set_type == "naive-mpc-optim":
349
+ if (
350
+ optim_conf.get("load_forecast_method", None) == "list"
351
+ and optim_conf.get("weather_forecast_method", None) == "list"
352
+ ) or (
353
+ optim_conf.get("load_forecast_method", None) == "list"
354
+ and not (optim_conf["set_use_pv"])
355
+ ):
356
+ days_list = None
357
+ set_mix_forecast = False
358
+ df_input_data = None
359
+ else:
360
+ # Retrieve data from hass
361
+ success, df_input_data, days_list = retrieve_home_assistant_data(
362
+ set_type,
363
+ get_data_from_file,
364
+ retrieve_hass_conf,
365
+ optim_conf,
366
+ rh,
367
+ emhass_conf,
368
+ test_df_literal,
369
+ )
370
+ if not success:
371
+ return False
372
+ set_mix_forecast = True
373
+ # Get PV and load forecasts
374
+ if (
375
+ optim_conf["set_use_pv"]
376
+ or optim_conf.get("weather_forecast_method", None) == "list"
377
+ ):
378
+ df_weather = fcst.get_weather_forecast(
379
+ method=optim_conf["weather_forecast_method"]
380
+ )
381
+ if isinstance(df_weather, bool) and not df_weather:
382
+ return False
383
+ P_PV_forecast = fcst.get_power_from_weather(
384
+ df_weather, set_mix_forecast=set_mix_forecast, df_now=df_input_data
385
+ )
386
+ # Adjust PV forecast
387
+ if optim_conf["set_use_adjusted_pv"]:
388
+ # Update the PV forecast
389
+ P_PV_forecast = adjust_pv_forecast(
390
+ logger,
391
+ fcst,
392
+ P_PV_forecast,
393
+ get_data_from_file,
394
+ retrieve_hass_conf,
395
+ optim_conf,
396
+ rh,
397
+ emhass_conf,
398
+ test_df_literal,
399
+ )
400
+ else:
401
+ P_PV_forecast = pd.Series(0, index=fcst.forecast_dates)
402
+ P_load_forecast = fcst.get_load_forecast(
403
+ method=optim_conf["load_forecast_method"],
404
+ set_mix_forecast=set_mix_forecast,
405
+ df_now=df_input_data,
406
+ )
407
+ if isinstance(P_load_forecast, bool) and not P_load_forecast:
408
+ logger.error(
409
+ "Unable to get sensor power photovoltaics, or sensor power load no var loads. Check HA sensors and their daily data"
410
+ )
411
+ return False
412
+ df_input_data_dayahead = pd.concat([P_PV_forecast, P_load_forecast], axis=1)
413
+ if (
414
+ "optimization_time_step" in retrieve_hass_conf
415
+ and retrieve_hass_conf["optimization_time_step"]
416
+ ):
417
+ if not isinstance(
418
+ retrieve_hass_conf["optimization_time_step"],
419
+ pd._libs.tslibs.timedeltas.Timedelta,
420
+ ):
421
+ optimization_time_step = pd.to_timedelta(
422
+ retrieve_hass_conf["optimization_time_step"], "minute"
423
+ )
424
+ else:
425
+ optimization_time_step = retrieve_hass_conf["optimization_time_step"]
426
+ df_input_data_dayahead = df_input_data_dayahead.asfreq(
427
+ optimization_time_step
428
+ )
429
+ else:
430
+ df_input_data_dayahead = utils.set_df_index_freq(df_input_data_dayahead)
431
+ df_input_data_dayahead.columns = ["P_PV_forecast", "P_load_forecast"]
432
+ params = json.loads(params)
433
+ if (
434
+ "prediction_horizon" in params["passed_data"]
435
+ and params["passed_data"]["prediction_horizon"] is not None
436
+ ):
437
+ prediction_horizon = params["passed_data"]["prediction_horizon"]
438
+ df_input_data_dayahead = copy.deepcopy(df_input_data_dayahead)[
439
+ df_input_data_dayahead.index[0] : df_input_data_dayahead.index[
440
+ prediction_horizon - 1
441
+ ]
442
+ ]
443
+ elif (
444
+ set_type == "forecast-model-fit"
445
+ or set_type == "forecast-model-predict"
446
+ or set_type == "forecast-model-tune"
447
+ ):
448
+ df_input_data_dayahead = None
449
+ P_PV_forecast, P_load_forecast = None, None
450
+ params = json.loads(params)
451
+ # Retrieve data from hass
452
+ days_to_retrieve = params["passed_data"]["historic_days_to_retrieve"]
453
+ model_type = params["passed_data"]["model_type"]
454
+ var_model = params["passed_data"]["var_model"]
455
+ if get_data_from_file:
456
+ days_list = None
457
+ filename = model_type + ".pkl"
458
+ filename_path = emhass_conf["data_path"] / filename
459
+ with open(filename_path, "rb") as inp:
460
+ df_input_data, _, _, _ = pickle.load(inp)
461
+ df_input_data = df_input_data[
462
+ df_input_data.index[-1] - pd.offsets.Day(days_to_retrieve) :
463
+ ]
464
+ else:
465
+ days_list = utils.get_days_list(days_to_retrieve)
466
+ var_list = [var_model]
467
+ if not rh.get_data(days_list, var_list):
468
+ return False
469
+ df_input_data = rh.df_final.copy()
470
+ elif set_type == "regressor-model-fit" or set_type == "regressor-model-predict":
471
+ df_input_data, df_input_data_dayahead = None, None
472
+ P_PV_forecast, P_load_forecast = None, None
473
+ params = json.loads(params)
474
+ days_list = None
475
+ csv_file = params["passed_data"].get("csv_file", None)
476
+ if "features" in params["passed_data"]:
477
+ features = params["passed_data"]["features"]
478
+ if "target" in params["passed_data"]:
479
+ target = params["passed_data"]["target"]
480
+ if "timestamp" in params["passed_data"]:
481
+ timestamp = params["passed_data"]["timestamp"]
482
+ if csv_file:
483
+ if get_data_from_file:
484
+ base_path = emhass_conf["data_path"] # + "/data"
485
+ filename_path = pathlib.Path(base_path) / csv_file
486
+ else:
487
+ filename_path = emhass_conf["data_path"] / csv_file
488
+ if filename_path.is_file():
489
+ df_input_data = pd.read_csv(filename_path, parse_dates=True)
490
+ else:
491
+ logger.error(
492
+ "The CSV file "
493
+ + csv_file
494
+ + " was not found in path: "
495
+ + str(emhass_conf["data_path"])
496
+ )
497
+ return False
498
+ required_columns = []
499
+ required_columns.extend(features)
500
+ required_columns.append(target)
501
+ if timestamp is not None:
502
+ required_columns.append(timestamp)
503
+ if not set(required_columns).issubset(df_input_data.columns):
504
+ logger.error("The cvs file does not contain the required columns.")
505
+ msg = f"CSV file should contain the following columns: {', '.join(required_columns)}"
506
+ logger.error(msg)
507
+ return False
508
+ elif set_type == "publish-data":
509
+ df_input_data, df_input_data_dayahead = None, None
510
+ P_PV_forecast, P_load_forecast = None, None
511
+ days_list = None
512
+ else:
513
+ logger.error(
514
+ "The passed action argument and hence the set_type parameter for setup is not valid",
515
+ )
516
+ df_input_data, df_input_data_dayahead = None, None
517
+ P_PV_forecast, P_load_forecast = None, None
518
+ days_list = None
519
+ # The input data dictionary to return
520
+ input_data_dict = {
521
+ "emhass_conf": emhass_conf,
522
+ "retrieve_hass_conf": retrieve_hass_conf,
523
+ "rh": rh,
524
+ "opt": opt,
525
+ "fcst": fcst,
526
+ "df_input_data": df_input_data,
527
+ "df_input_data_dayahead": df_input_data_dayahead,
528
+ "P_PV_forecast": P_PV_forecast,
529
+ "P_load_forecast": P_load_forecast,
530
+ "costfun": costfun,
531
+ "params": params,
532
+ "days_list": days_list,
533
+ }
534
+ return input_data_dict
535
+
536
+
537
+ def weather_forecast_cache(
538
+ emhass_conf: dict, params: str, runtimeparams: str, logger: logging.Logger
539
+ ) -> bool:
540
+ """
541
+ Perform a call to get forecast function, intend to save results to cache.
542
+
543
+ :param emhass_conf: Dictionary containing the needed emhass paths
544
+ :type emhass_conf: dict
545
+ :param params: Configuration parameters passed from data/options.json
546
+ :type params: str
547
+ :param runtimeparams: Runtime optimization parameters passed as a dictionary
548
+ :type runtimeparams: str
549
+ :param logger: The passed logger object
550
+ :type logger: logging object
551
+ :return: A bool for function completion
552
+ :rtype: bool
553
+
554
+ """
555
+ # Parsing yaml
556
+ retrieve_hass_conf, optim_conf, plant_conf = utils.get_yaml_parse(params, logger)
557
+ # Treat runtimeparams
558
+ params, retrieve_hass_conf, optim_conf, plant_conf = utils.treat_runtimeparams(
559
+ runtimeparams,
560
+ params,
561
+ retrieve_hass_conf,
562
+ optim_conf,
563
+ plant_conf,
564
+ "forecast",
565
+ logger,
566
+ emhass_conf,
567
+ )
568
+ # Make sure weather_forecast_cache is true
569
+ if (params is not None) and (params != "null"):
570
+ params = json.loads(params)
571
+ else:
572
+ params = {}
573
+ params["passed_data"]["weather_forecast_cache"] = True
574
+ params = json.dumps(params)
575
+ # Create Forecast object
576
+ fcst = Forecast(
577
+ retrieve_hass_conf, optim_conf, plant_conf, params, emhass_conf, logger
578
+ )
579
+ result = fcst.get_weather_forecast(optim_conf["weather_forecast_method"])
580
+ if isinstance(result, bool) and not result:
581
+ return False
582
+
583
+ return True
584
+
585
+
586
+ def perfect_forecast_optim(
587
+ input_data_dict: dict,
588
+ logger: logging.Logger,
589
+ save_data_to_file: bool | None = True,
590
+ debug: bool | None = False,
591
+ ) -> pd.DataFrame:
592
+ """
593
+ Perform a call to the perfect forecast optimization routine.
594
+
595
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
596
+ :type input_data_dict: dict
597
+ :param logger: The passed logger object
598
+ :type logger: logging object
599
+ :param save_data_to_file: Save optimization results to CSV file
600
+ :type save_data_to_file: bool, optional
601
+ :param debug: A debug option useful for unittests
602
+ :type debug: bool, optional
603
+ :return: The output data of the optimization
604
+ :rtype: pd.DataFrame
605
+
606
+ """
607
+ logger.info("Performing perfect forecast optimization")
608
+ # Load cost and prod price forecast
609
+ df_input_data = input_data_dict["fcst"].get_load_cost_forecast(
610
+ input_data_dict["df_input_data"],
611
+ method=input_data_dict["fcst"].optim_conf["load_cost_forecast_method"],
612
+ list_and_perfect=True,
613
+ )
614
+ if isinstance(df_input_data, bool) and not df_input_data:
615
+ return False
616
+ df_input_data = input_data_dict["fcst"].get_prod_price_forecast(
617
+ df_input_data,
618
+ method=input_data_dict["fcst"].optim_conf["production_price_forecast_method"],
619
+ list_and_perfect=True,
620
+ )
621
+ if isinstance(df_input_data, bool) and not df_input_data:
622
+ return False
623
+ opt_res = input_data_dict["opt"].perform_perfect_forecast_optim(
624
+ df_input_data, input_data_dict["days_list"]
625
+ )
626
+ # Save CSV file for analysis
627
+ if save_data_to_file:
628
+ filename = "opt_res_perfect_optim_" + input_data_dict["costfun"] + ".csv"
629
+ else: # Just save the latest optimization results
630
+ filename = default_csv_filename
631
+ if not debug:
632
+ opt_res.to_csv(
633
+ input_data_dict["emhass_conf"]["data_path"] / filename,
634
+ index_label="timestamp",
635
+ )
636
+ if not isinstance(input_data_dict["params"], dict):
637
+ params = json.loads(input_data_dict["params"])
638
+ else:
639
+ params = input_data_dict["params"]
640
+
641
+ # if continual_publish, save perfect results to data_path/entities json
642
+ if input_data_dict["retrieve_hass_conf"].get("continual_publish", False) or params[
643
+ "passed_data"
644
+ ].get("entity_save", False):
645
+ # Trigger the publish function, save entity data and not post to HA
646
+ publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
647
+
648
+ return opt_res
649
+
650
+
651
+ def dayahead_forecast_optim(
652
+ input_data_dict: dict,
653
+ logger: logging.Logger,
654
+ save_data_to_file: bool | None = False,
655
+ debug: bool | None = False,
656
+ ) -> pd.DataFrame:
657
+ """
658
+ Perform a call to the day-ahead optimization routine.
659
+
660
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
661
+ :type input_data_dict: dict
662
+ :param logger: The passed logger object
663
+ :type logger: logging object
664
+ :param save_data_to_file: Save optimization results to CSV file
665
+ :type save_data_to_file: bool, optional
666
+ :param debug: A debug option useful for unittests
667
+ :type debug: bool, optional
668
+ :return: The output data of the optimization
669
+ :rtype: pd.DataFrame
670
+
671
+ """
672
+ logger.info("Performing day-ahead forecast optimization")
673
+ # Load cost and prod price forecast
674
+ df_input_data_dayahead = input_data_dict["fcst"].get_load_cost_forecast(
675
+ input_data_dict["df_input_data_dayahead"],
676
+ method=input_data_dict["fcst"].optim_conf["load_cost_forecast_method"],
677
+ )
678
+ if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
679
+ return False
680
+ df_input_data_dayahead = input_data_dict["fcst"].get_prod_price_forecast(
681
+ df_input_data_dayahead,
682
+ method=input_data_dict["fcst"].optim_conf["production_price_forecast_method"],
683
+ )
684
+ if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
685
+ return False
686
+ if "outdoor_temperature_forecast" in input_data_dict["params"]["passed_data"]:
687
+ df_input_data_dayahead["outdoor_temperature_forecast"] = input_data_dict[
688
+ "params"
689
+ ]["passed_data"]["outdoor_temperature_forecast"]
690
+ opt_res_dayahead = input_data_dict["opt"].perform_dayahead_forecast_optim(
691
+ df_input_data_dayahead,
692
+ input_data_dict["P_PV_forecast"],
693
+ input_data_dict["P_load_forecast"],
694
+ )
695
+ # Save CSV file for publish_data
696
+ if save_data_to_file:
697
+ today = datetime.now(UTC).replace(hour=0, minute=0, second=0, microsecond=0)
698
+ filename = "opt_res_dayahead_" + today.strftime("%Y_%m_%d") + ".csv"
699
+ else: # Just save the latest optimization results
700
+ filename = default_csv_filename
701
+ if not debug:
702
+ opt_res_dayahead.to_csv(
703
+ input_data_dict["emhass_conf"]["data_path"] / filename,
704
+ index_label="timestamp",
705
+ )
706
+
707
+ if not isinstance(input_data_dict["params"], dict):
708
+ params = json.loads(input_data_dict["params"])
709
+ else:
710
+ params = input_data_dict["params"]
711
+
712
+ # if continual_publish, save day_ahead results to data_path/entities json
713
+ if input_data_dict["retrieve_hass_conf"].get("continual_publish", False) or params[
714
+ "passed_data"
715
+ ].get("entity_save", False):
716
+ # Trigger the publish function, save entity data and not post to HA
717
+ publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
718
+
719
+ return opt_res_dayahead
720
+
721
+
722
+ def naive_mpc_optim(
723
+ input_data_dict: dict,
724
+ logger: logging.Logger,
725
+ save_data_to_file: bool | None = False,
726
+ debug: bool | None = False,
727
+ ) -> pd.DataFrame:
728
+ """
729
+ Perform a call to the naive Model Predictive Controller optimization routine.
730
+
731
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
732
+ :type input_data_dict: dict
733
+ :param logger: The passed logger object
734
+ :type logger: logging object
735
+ :param save_data_to_file: Save optimization results to CSV file
736
+ :type save_data_to_file: bool, optional
737
+ :param debug: A debug option useful for unittests
738
+ :type debug: bool, optional
739
+ :return: The output data of the optimization
740
+ :rtype: pd.DataFrame
741
+
742
+ """
743
+ logger.info("Performing naive MPC optimization")
744
+ # Load cost and prod price forecast
745
+ df_input_data_dayahead = input_data_dict["fcst"].get_load_cost_forecast(
746
+ input_data_dict["df_input_data_dayahead"],
747
+ method=input_data_dict["fcst"].optim_conf["load_cost_forecast_method"],
748
+ )
749
+ if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
750
+ return False
751
+ df_input_data_dayahead = input_data_dict["fcst"].get_prod_price_forecast(
752
+ df_input_data_dayahead,
753
+ method=input_data_dict["fcst"].optim_conf["production_price_forecast_method"],
754
+ )
755
+ if isinstance(df_input_data_dayahead, bool) and not df_input_data_dayahead:
756
+ return False
757
+ if "outdoor_temperature_forecast" in input_data_dict["params"]["passed_data"]:
758
+ df_input_data_dayahead["outdoor_temperature_forecast"] = input_data_dict[
759
+ "params"
760
+ ]["passed_data"]["outdoor_temperature_forecast"]
761
+ # The specifics params for the MPC at runtime
762
+ prediction_horizon = input_data_dict["params"]["passed_data"]["prediction_horizon"]
763
+ soc_init = input_data_dict["params"]["passed_data"]["soc_init"]
764
+ soc_final = input_data_dict["params"]["passed_data"]["soc_final"]
765
+ def_total_hours = input_data_dict["params"]["optim_conf"].get(
766
+ "operating_hours_of_each_deferrable_load", None
767
+ )
768
+ def_total_timestep = input_data_dict["params"]["optim_conf"].get(
769
+ "operating_timesteps_of_each_deferrable_load", None
770
+ )
771
+ def_start_timestep = input_data_dict["params"]["optim_conf"][
772
+ "start_timesteps_of_each_deferrable_load"
773
+ ]
774
+ def_end_timestep = input_data_dict["params"]["optim_conf"][
775
+ "end_timesteps_of_each_deferrable_load"
776
+ ]
777
+ opt_res_naive_mpc = input_data_dict["opt"].perform_naive_mpc_optim(
778
+ df_input_data_dayahead,
779
+ input_data_dict["P_PV_forecast"],
780
+ input_data_dict["P_load_forecast"],
781
+ prediction_horizon,
782
+ soc_init,
783
+ soc_final,
784
+ def_total_hours,
785
+ def_total_timestep,
786
+ def_start_timestep,
787
+ def_end_timestep,
788
+ )
789
+ # Save CSV file for publish_data
790
+ if save_data_to_file:
791
+ today = datetime.now(UTC).replace(hour=0, minute=0, second=0, microsecond=0)
792
+ filename = "opt_res_naive_mpc_" + today.strftime("%Y_%m_%d") + ".csv"
793
+ else: # Just save the latest optimization results
794
+ filename = default_csv_filename
795
+ if not debug:
796
+ opt_res_naive_mpc.to_csv(
797
+ input_data_dict["emhass_conf"]["data_path"] / filename,
798
+ index_label="timestamp",
799
+ )
800
+
801
+ if not isinstance(input_data_dict["params"], dict):
802
+ params = json.loads(input_data_dict["params"])
803
+ else:
804
+ params = input_data_dict["params"]
805
+
806
+ # if continual_publish, save mpc results to data_path/entities json
807
+ if input_data_dict["retrieve_hass_conf"].get("continual_publish", False) or params[
808
+ "passed_data"
809
+ ].get("entity_save", False):
810
+ # Trigger the publish function, save entity data and not post to HA
811
+ publish_data(input_data_dict, logger, entity_save=True, dont_post=True)
812
+
813
+ return opt_res_naive_mpc
814
+
815
+
816
+ def forecast_model_fit(
817
+ input_data_dict: dict, logger: logging.Logger, debug: bool | None = False
818
+ ) -> tuple[pd.DataFrame, pd.DataFrame, MLForecaster]:
819
+ """Perform a forecast model fit from training data retrieved from Home Assistant.
820
+
821
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
822
+ :type input_data_dict: dict
823
+ :param logger: The passed logger object
824
+ :type logger: logging.Logger
825
+ :param debug: True to debug, useful for unit testing, defaults to False
826
+ :type debug: Optional[bool], optional
827
+ :return: The DataFrame containing the forecast data results without and with backtest and the `mlforecaster` object
828
+ :rtype: Tuple[pd.DataFrame, pd.DataFrame, mlforecaster]
829
+ """
830
+ data = copy.deepcopy(input_data_dict["df_input_data"])
831
+ model_type = input_data_dict["params"]["passed_data"]["model_type"]
832
+ var_model = input_data_dict["params"]["passed_data"]["var_model"]
833
+ sklearn_model = input_data_dict["params"]["passed_data"]["sklearn_model"]
834
+ num_lags = input_data_dict["params"]["passed_data"]["num_lags"]
835
+ split_date_delta = input_data_dict["params"]["passed_data"]["split_date_delta"]
836
+ perform_backtest = input_data_dict["params"]["passed_data"]["perform_backtest"]
837
+ # The ML forecaster object
838
+ mlf = MLForecaster(
839
+ data,
840
+ model_type,
841
+ var_model,
842
+ sklearn_model,
843
+ num_lags,
844
+ input_data_dict["emhass_conf"],
845
+ logger,
846
+ )
847
+ # Fit the ML model
848
+ df_pred, df_pred_backtest = mlf.fit(
849
+ split_date_delta=split_date_delta, perform_backtest=perform_backtest
850
+ )
851
+ # Save model
852
+ if not debug:
853
+ filename = model_type + default_pkl_suffix
854
+ filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
855
+ with open(filename_path, "wb") as outp:
856
+ pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
857
+ logger.debug("saved model to " + str(filename_path))
858
+ return df_pred, df_pred_backtest, mlf
859
+
860
+
861
+ def forecast_model_predict(
862
+ input_data_dict: dict,
863
+ logger: logging.Logger,
864
+ use_last_window: bool | None = True,
865
+ debug: bool | None = False,
866
+ mlf: MLForecaster | None = None,
867
+ ) -> pd.DataFrame:
868
+ r"""Perform a forecast model predict using a previously trained skforecast model.
869
+
870
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
871
+ :type input_data_dict: dict
872
+ :param logger: The passed logger object
873
+ :type logger: logging.Logger
874
+ :param use_last_window: True if the 'last_window' option should be used for the \
875
+ custom machine learning forecast model. The 'last_window=True' means that the data \
876
+ that will be used to generate the new forecast will be freshly retrieved from \
877
+ Home Assistant. This data is needed because the forecast model is an auto-regressive \
878
+ model with lags. If 'False' then the data using during the model train is used. Defaults to True
879
+ :type use_last_window: Optional[bool], optional
880
+ :param debug: True to debug, useful for unit testing, defaults to False
881
+ :type debug: Optional[bool], optional
882
+ :param mlf: The 'mlforecaster' object previously trained. This is mainly used for debug \
883
+ and unit testing. In production the actual model will be read from a saved pickle file. Defaults to None
884
+ :type mlf: Optional[mlforecaster], optional
885
+ :return: The DataFrame containing the forecast prediction data
886
+ :rtype: pd.DataFrame
887
+ """
888
+ # Load model
889
+ model_type = input_data_dict["params"]["passed_data"]["model_type"]
890
+ filename = model_type + default_pkl_suffix
891
+ filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
892
+ if not debug:
893
+ if filename_path.is_file():
894
+ with open(filename_path, "rb") as inp:
895
+ mlf = pickle.load(inp)
896
+ logger.debug("loaded saved model from " + str(filename_path))
897
+ else:
898
+ logger.error(
899
+ "The ML forecaster file ("
900
+ + str(filename_path)
901
+ + ") was not found, please run a model fit method before this predict method",
902
+ )
903
+ return
904
+ # Make predictions
905
+ if use_last_window:
906
+ data_last_window = copy.deepcopy(input_data_dict["df_input_data"])
907
+ else:
908
+ data_last_window = None
909
+ predictions = mlf.predict(data_last_window)
910
+ # Publish data to a Home Assistant sensor
911
+ model_predict_publish = input_data_dict["params"]["passed_data"][
912
+ "model_predict_publish"
913
+ ]
914
+ model_predict_entity_id = input_data_dict["params"]["passed_data"][
915
+ "model_predict_entity_id"
916
+ ]
917
+ model_predict_device_class = input_data_dict["params"]["passed_data"][
918
+ "model_predict_device_class"
919
+ ]
920
+ model_predict_unit_of_measurement = input_data_dict["params"]["passed_data"][
921
+ "model_predict_unit_of_measurement"
922
+ ]
923
+ model_predict_friendly_name = input_data_dict["params"]["passed_data"][
924
+ "model_predict_friendly_name"
925
+ ]
926
+ publish_prefix = input_data_dict["params"]["passed_data"]["publish_prefix"]
927
+ if model_predict_publish is True:
928
+ # Estimate the current index
929
+ now_precise = datetime.now(
930
+ input_data_dict["retrieve_hass_conf"]["time_zone"]
931
+ ).replace(second=0, microsecond=0)
932
+ if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
933
+ idx_closest = predictions.index.get_indexer(
934
+ [now_precise], method="nearest"
935
+ )[0]
936
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
937
+ idx_closest = predictions.index.get_indexer([now_precise], method="ffill")[
938
+ 0
939
+ ]
940
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
941
+ idx_closest = predictions.index.get_indexer([now_precise], method="bfill")[
942
+ 0
943
+ ]
944
+ if idx_closest == -1:
945
+ idx_closest = predictions.index.get_indexer(
946
+ [now_precise], method="nearest"
947
+ )[0]
948
+ # Publish Load forecast
949
+ input_data_dict["rh"].post_data(
950
+ predictions,
951
+ idx_closest,
952
+ model_predict_entity_id,
953
+ model_predict_device_class,
954
+ model_predict_unit_of_measurement,
955
+ model_predict_friendly_name,
956
+ type_var="mlforecaster",
957
+ publish_prefix=publish_prefix,
958
+ )
959
+ return predictions
960
+
961
+
962
+ def forecast_model_tune(
963
+ input_data_dict: dict,
964
+ logger: logging.Logger,
965
+ debug: bool | None = False,
966
+ mlf: MLForecaster | None = None,
967
+ ) -> tuple[pd.DataFrame, MLForecaster]:
968
+ """Tune a forecast model hyperparameters using bayesian optimization.
969
+
970
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
971
+ :type input_data_dict: dict
972
+ :param logger: The passed logger object
973
+ :type logger: logging.Logger
974
+ :param debug: True to debug, useful for unit testing, defaults to False
975
+ :type debug: Optional[bool], optional
976
+ :param mlf: The 'mlforecaster' object previously trained. This is mainly used for debug \
977
+ and unit testing. In production the actual model will be read from a saved pickle file. Defaults to None
978
+ :type mlf: Optional[mlforecaster], optional
979
+ :return: The DataFrame containing the forecast data results using the optimized model
980
+ :rtype: pd.DataFrame
981
+ """
982
+ # Load model
983
+ model_type = input_data_dict["params"]["passed_data"]["model_type"]
984
+ filename = model_type + default_pkl_suffix
985
+ filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
986
+ if not debug:
987
+ if filename_path.is_file():
988
+ with open(filename_path, "rb") as inp:
989
+ mlf = pickle.load(inp)
990
+ logger.debug("loaded saved model from " + str(filename_path))
991
+ else:
992
+ logger.error(
993
+ "The ML forecaster file ("
994
+ + str(filename_path)
995
+ + ") was not found, please run a model fit method before this tune method",
996
+ )
997
+ return None, None
998
+ # Tune the model
999
+ df_pred_optim = mlf.tune(debug=debug)
1000
+ # Save model
1001
+ if not debug:
1002
+ filename = model_type + default_pkl_suffix
1003
+ filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
1004
+ with open(filename_path, "wb") as outp:
1005
+ pickle.dump(mlf, outp, pickle.HIGHEST_PROTOCOL)
1006
+ logger.debug("Saved model to " + str(filename_path))
1007
+ return df_pred_optim, mlf
1008
+
1009
+
1010
+ def regressor_model_fit(
1011
+ input_data_dict: dict, logger: logging.Logger, debug: bool | None = False
1012
+ ) -> MLRegressor:
1013
+ """Perform a forecast model fit from training data retrieved from Home Assistant.
1014
+
1015
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
1016
+ :type input_data_dict: dict
1017
+ :param logger: The passed logger object
1018
+ :type logger: logging.Logger
1019
+ :param debug: True to debug, useful for unit testing, defaults to False
1020
+ :type debug: Optional[bool], optional
1021
+ """
1022
+ data = copy.deepcopy(input_data_dict["df_input_data"])
1023
+ if "model_type" in input_data_dict["params"]["passed_data"]:
1024
+ model_type = input_data_dict["params"]["passed_data"]["model_type"]
1025
+ else:
1026
+ logger.error("parameter: 'model_type' not passed")
1027
+ return False
1028
+ if "regression_model" in input_data_dict["params"]["passed_data"]:
1029
+ regression_model = input_data_dict["params"]["passed_data"]["regression_model"]
1030
+ else:
1031
+ logger.error("parameter: 'regression_model' not passed")
1032
+ return False
1033
+ if "features" in input_data_dict["params"]["passed_data"]:
1034
+ features = input_data_dict["params"]["passed_data"]["features"]
1035
+ else:
1036
+ logger.error("parameter: 'features' not passed")
1037
+ return False
1038
+ if "target" in input_data_dict["params"]["passed_data"]:
1039
+ target = input_data_dict["params"]["passed_data"]["target"]
1040
+ else:
1041
+ logger.error("parameter: 'target' not passed")
1042
+ return False
1043
+ if "timestamp" in input_data_dict["params"]["passed_data"]:
1044
+ timestamp = input_data_dict["params"]["passed_data"]["timestamp"]
1045
+ else:
1046
+ logger.error("parameter: 'timestamp' not passed")
1047
+ return False
1048
+ if "date_features" in input_data_dict["params"]["passed_data"]:
1049
+ date_features = input_data_dict["params"]["passed_data"]["date_features"]
1050
+ else:
1051
+ logger.error("parameter: 'date_features' not passed")
1052
+ return False
1053
+ # The MLRegressor object
1054
+ mlr = MLRegressor(
1055
+ data, model_type, regression_model, features, target, timestamp, logger
1056
+ )
1057
+ # Fit the ML model
1058
+ fit = mlr.fit(date_features=date_features)
1059
+ if not fit:
1060
+ return False
1061
+ # Save model
1062
+ if not debug:
1063
+ filename = model_type + "_mlr.pkl"
1064
+ filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
1065
+ with open(filename_path, "wb") as outp:
1066
+ pickle.dump(mlr, outp, pickle.HIGHEST_PROTOCOL)
1067
+ return mlr
1068
+
1069
+
1070
+ def regressor_model_predict(
1071
+ input_data_dict: dict,
1072
+ logger: logging.Logger,
1073
+ debug: bool | None = False,
1074
+ mlr: MLRegressor | None = None,
1075
+ ) -> np.ndarray:
1076
+ """Perform a prediction from csv file.
1077
+
1078
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
1079
+ :type input_data_dict: dict
1080
+ :param logger: The passed logger object
1081
+ :type logger: logging.Logger
1082
+ :param debug: True to debug, useful for unit testing, defaults to False
1083
+ :type debug: Optional[bool], optional
1084
+ """
1085
+ if "model_type" in input_data_dict["params"]["passed_data"]:
1086
+ model_type = input_data_dict["params"]["passed_data"]["model_type"]
1087
+ else:
1088
+ logger.error("parameter: 'model_type' not passed")
1089
+ return False
1090
+ filename = model_type + "_mlr.pkl"
1091
+ filename_path = input_data_dict["emhass_conf"]["data_path"] / filename
1092
+ if not debug:
1093
+ if filename_path.is_file():
1094
+ with open(filename_path, "rb") as inp:
1095
+ mlr = pickle.load(inp)
1096
+ else:
1097
+ logger.error(
1098
+ "The ML forecaster file was not found, please run a model fit method before this predict method",
1099
+ )
1100
+ return False
1101
+ if "new_values" in input_data_dict["params"]["passed_data"]:
1102
+ new_values = input_data_dict["params"]["passed_data"]["new_values"]
1103
+ else:
1104
+ logger.error("parameter: 'new_values' not passed")
1105
+ return False
1106
+ # Predict from csv file
1107
+ prediction = mlr.predict(new_values)
1108
+ mlr_predict_entity_id = input_data_dict["params"]["passed_data"].get(
1109
+ "mlr_predict_entity_id", "sensor.mlr_predict"
1110
+ )
1111
+ mlr_predict_device_class = input_data_dict["params"]["passed_data"].get(
1112
+ "mlr_predict_device_class", "power"
1113
+ )
1114
+ mlr_predict_unit_of_measurement = input_data_dict["params"]["passed_data"].get(
1115
+ "mlr_predict_unit_of_measurement", "W"
1116
+ )
1117
+ mlr_predict_friendly_name = input_data_dict["params"]["passed_data"].get(
1118
+ "mlr_predict_friendly_name", "mlr predictor"
1119
+ )
1120
+ # Publish prediction
1121
+ idx = 0
1122
+ if not debug:
1123
+ input_data_dict["rh"].post_data(
1124
+ prediction,
1125
+ idx,
1126
+ mlr_predict_entity_id,
1127
+ mlr_predict_device_class,
1128
+ mlr_predict_unit_of_measurement,
1129
+ mlr_predict_friendly_name,
1130
+ type_var="mlregressor",
1131
+ )
1132
+ return prediction
1133
+
1134
+
1135
+ def publish_data(
1136
+ input_data_dict: dict,
1137
+ logger: logging.Logger,
1138
+ save_data_to_file: bool | None = False,
1139
+ opt_res_latest: pd.DataFrame | None = None,
1140
+ entity_save: bool | None = False,
1141
+ dont_post: bool | None = False,
1142
+ ) -> pd.DataFrame:
1143
+ """
1144
+ Publish the data obtained from the optimization results.
1145
+
1146
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
1147
+ :type input_data_dict: dict
1148
+ :param logger: The passed logger object
1149
+ :type logger: logging object
1150
+ :param save_data_to_file: If True we will read data from optimization results in dayahead CSV file
1151
+ :type save_data_to_file: bool, optional
1152
+ :return: The output data of the optimization readed from a CSV file in the data folder
1153
+ :rtype: pd.DataFrame
1154
+ :param entity_save: Save built entities to data_path/entities
1155
+ :type entity_save: bool, optional
1156
+ :param dont_post: Do not post to Home Assistant. Works with entity_save
1157
+ :type dont_post: bool, optional
1158
+
1159
+ """
1160
+ logger.info("Publishing data to HASS instance")
1161
+ if input_data_dict:
1162
+ if not isinstance(input_data_dict.get("params", {}), dict):
1163
+ params = json.loads(input_data_dict["params"])
1164
+ else:
1165
+ params = input_data_dict.get("params", {})
1166
+
1167
+ # Check if a day ahead optimization has been performed (read CSV file)
1168
+ if save_data_to_file:
1169
+ today = datetime.now(UTC).replace(hour=0, minute=0, second=0, microsecond=0)
1170
+ filename = "opt_res_dayahead_" + today.strftime("%Y_%m_%d") + ".csv"
1171
+ # If publish_prefix is passed, check if there is saved entities in data_path/entities with prefix, publish to results
1172
+ elif params["passed_data"].get("publish_prefix", "") != "" and not dont_post:
1173
+ opt_res_list = []
1174
+ opt_res_list_names = []
1175
+ publish_prefix = params["passed_data"]["publish_prefix"]
1176
+ entity_path = input_data_dict["emhass_conf"]["data_path"] / "entities"
1177
+ # Check if items in entity_path
1178
+ if os.path.exists(entity_path) and len(os.listdir(entity_path)) > 0:
1179
+ # Obtain all files in entity_path
1180
+ entity_path_contents = os.listdir(entity_path)
1181
+ # Confirm the entity path contains at least one file containing publish prefix or publish_prefix='all'
1182
+ if (
1183
+ any(publish_prefix in entity for entity in entity_path_contents)
1184
+ or publish_prefix == "all"
1185
+ ):
1186
+ # Loop through all items in entity path
1187
+ for entity in entity_path_contents:
1188
+ # If publish_prefix is "all" publish all saved entities to Home Assistant
1189
+ # If publish_prefix matches the prefix from saved entities, publish to Home Assistant
1190
+ if entity != default_metadata_json and (
1191
+ publish_prefix in entity or publish_prefix == "all"
1192
+ ):
1193
+ entity_data = publish_json(
1194
+ entity, input_data_dict, entity_path, logger
1195
+ )
1196
+ if not isinstance(entity_data, bool):
1197
+ opt_res_list.append(entity_data)
1198
+ opt_res_list_names.append(entity.replace(".json", ""))
1199
+ else:
1200
+ return False
1201
+ # Build a DataFrame with published entities
1202
+ opt_res = pd.concat(opt_res_list, axis=1)
1203
+ opt_res.columns = opt_res_list_names
1204
+ return opt_res
1205
+ else:
1206
+ logger.warning(
1207
+ "No saved entity json files that match prefix: "
1208
+ + str(publish_prefix)
1209
+ )
1210
+ logger.warning("Falling back to opt_res_latest")
1211
+ else:
1212
+ logger.warning("No saved entity json files in path:" + str(entity_path))
1213
+ logger.warning("Falling back to opt_res_latest")
1214
+ filename = default_csv_filename
1215
+ else:
1216
+ filename = default_csv_filename
1217
+ if opt_res_latest is None:
1218
+ if not os.path.isfile(input_data_dict["emhass_conf"]["data_path"] / filename):
1219
+ logger.error("File not found error, run an optimization task first.")
1220
+ return
1221
+ else:
1222
+ opt_res_latest = pd.read_csv(
1223
+ input_data_dict["emhass_conf"]["data_path"] / filename,
1224
+ index_col="timestamp",
1225
+ )
1226
+ opt_res_latest.index = pd.to_datetime(opt_res_latest.index)
1227
+ opt_res_latest.index.freq = input_data_dict["retrieve_hass_conf"][
1228
+ "optimization_time_step"
1229
+ ]
1230
+ # Estimate the current index
1231
+ now_precise = datetime.now(
1232
+ input_data_dict["retrieve_hass_conf"]["time_zone"]
1233
+ ).replace(second=0, microsecond=0)
1234
+ if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
1235
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[
1236
+ 0
1237
+ ]
1238
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
1239
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="ffill")[0]
1240
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
1241
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="bfill")[0]
1242
+ if idx_closest == -1:
1243
+ idx_closest = opt_res_latest.index.get_indexer([now_precise], method="nearest")[
1244
+ 0
1245
+ ]
1246
+ # Publish the data
1247
+ publish_prefix = params["passed_data"]["publish_prefix"]
1248
+ # Publish PV forecast
1249
+ custom_pv_forecast_id = params["passed_data"]["custom_pv_forecast_id"]
1250
+ input_data_dict["rh"].post_data(
1251
+ opt_res_latest["P_PV"],
1252
+ idx_closest,
1253
+ custom_pv_forecast_id["entity_id"],
1254
+ "power",
1255
+ custom_pv_forecast_id["unit_of_measurement"],
1256
+ custom_pv_forecast_id["friendly_name"],
1257
+ type_var="power",
1258
+ publish_prefix=publish_prefix,
1259
+ save_entities=entity_save,
1260
+ dont_post=dont_post,
1261
+ )
1262
+ # Publish Load forecast
1263
+ custom_load_forecast_id = params["passed_data"]["custom_load_forecast_id"]
1264
+ input_data_dict["rh"].post_data(
1265
+ opt_res_latest["P_Load"],
1266
+ idx_closest,
1267
+ custom_load_forecast_id["entity_id"],
1268
+ "power",
1269
+ custom_load_forecast_id["unit_of_measurement"],
1270
+ custom_load_forecast_id["friendly_name"],
1271
+ type_var="power",
1272
+ publish_prefix=publish_prefix,
1273
+ save_entities=entity_save,
1274
+ dont_post=dont_post,
1275
+ )
1276
+ cols_published = ["P_PV", "P_Load"]
1277
+ # Publish PV curtailment
1278
+ if input_data_dict["fcst"].plant_conf["compute_curtailment"]:
1279
+ custom_pv_curtailment_id = params["passed_data"]["custom_pv_curtailment_id"]
1280
+ input_data_dict["rh"].post_data(
1281
+ opt_res_latest["P_PV_curtailment"],
1282
+ idx_closest,
1283
+ custom_pv_curtailment_id["entity_id"],
1284
+ "power",
1285
+ custom_pv_curtailment_id["unit_of_measurement"],
1286
+ custom_pv_curtailment_id["friendly_name"],
1287
+ type_var="power",
1288
+ publish_prefix=publish_prefix,
1289
+ save_entities=entity_save,
1290
+ dont_post=dont_post,
1291
+ )
1292
+ cols_published = cols_published + ["P_PV_curtailment"]
1293
+ # Publish P_hybrid_inverter
1294
+ if input_data_dict["fcst"].plant_conf["inverter_is_hybrid"]:
1295
+ custom_hybrid_inverter_id = params["passed_data"]["custom_hybrid_inverter_id"]
1296
+ input_data_dict["rh"].post_data(
1297
+ opt_res_latest["P_hybrid_inverter"],
1298
+ idx_closest,
1299
+ custom_hybrid_inverter_id["entity_id"],
1300
+ "power",
1301
+ custom_hybrid_inverter_id["unit_of_measurement"],
1302
+ custom_hybrid_inverter_id["friendly_name"],
1303
+ type_var="power",
1304
+ publish_prefix=publish_prefix,
1305
+ save_entities=entity_save,
1306
+ dont_post=dont_post,
1307
+ )
1308
+ cols_published = cols_published + ["P_hybrid_inverter"]
1309
+ # Publish deferrable loads
1310
+ custom_deferrable_forecast_id = params["passed_data"][
1311
+ "custom_deferrable_forecast_id"
1312
+ ]
1313
+ for k in range(input_data_dict["opt"].optim_conf["number_of_deferrable_loads"]):
1314
+ if f"P_deferrable{k}" not in opt_res_latest.columns:
1315
+ logger.error(
1316
+ f"P_deferrable{k}"
1317
+ + " was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.",
1318
+ )
1319
+ else:
1320
+ input_data_dict["rh"].post_data(
1321
+ opt_res_latest[f"P_deferrable{k}"],
1322
+ idx_closest,
1323
+ custom_deferrable_forecast_id[k]["entity_id"],
1324
+ "power",
1325
+ custom_deferrable_forecast_id[k]["unit_of_measurement"],
1326
+ custom_deferrable_forecast_id[k]["friendly_name"],
1327
+ type_var="deferrable",
1328
+ publish_prefix=publish_prefix,
1329
+ save_entities=entity_save,
1330
+ dont_post=dont_post,
1331
+ )
1332
+ cols_published = cols_published + [f"P_deferrable{k}"]
1333
+ # Publish thermal model data (predicted temperature)
1334
+ custom_predicted_temperature_id = params["passed_data"][
1335
+ "custom_predicted_temperature_id"
1336
+ ]
1337
+ for k in range(input_data_dict["opt"].optim_conf["number_of_deferrable_loads"]):
1338
+ if "def_load_config" in input_data_dict["opt"].optim_conf.keys():
1339
+ if (
1340
+ "thermal_config"
1341
+ in input_data_dict["opt"].optim_conf["def_load_config"][k]
1342
+ ):
1343
+ input_data_dict["rh"].post_data(
1344
+ opt_res_latest[f"predicted_temp_heater{k}"],
1345
+ idx_closest,
1346
+ custom_predicted_temperature_id[k]["entity_id"],
1347
+ "temperature",
1348
+ custom_predicted_temperature_id[k]["unit_of_measurement"],
1349
+ custom_predicted_temperature_id[k]["friendly_name"],
1350
+ type_var="temperature",
1351
+ publish_prefix=publish_prefix,
1352
+ save_entities=entity_save,
1353
+ dont_post=dont_post,
1354
+ )
1355
+ cols_published = cols_published + [f"predicted_temp_heater{k}"]
1356
+ # Publish battery power
1357
+ if input_data_dict["opt"].optim_conf["set_use_battery"]:
1358
+ if "P_batt" not in opt_res_latest.columns:
1359
+ logger.error(
1360
+ "P_batt was not found in results DataFrame. Optimization task may need to be relaunched or it did not converge to a solution.",
1361
+ )
1362
+ else:
1363
+ custom_batt_forecast_id = params["passed_data"]["custom_batt_forecast_id"]
1364
+ input_data_dict["rh"].post_data(
1365
+ opt_res_latest["P_batt"],
1366
+ idx_closest,
1367
+ custom_batt_forecast_id["entity_id"],
1368
+ "power",
1369
+ custom_batt_forecast_id["unit_of_measurement"],
1370
+ custom_batt_forecast_id["friendly_name"],
1371
+ type_var="batt",
1372
+ publish_prefix=publish_prefix,
1373
+ save_entities=entity_save,
1374
+ dont_post=dont_post,
1375
+ )
1376
+ cols_published = cols_published + ["P_batt"]
1377
+ custom_batt_soc_forecast_id = params["passed_data"][
1378
+ "custom_batt_soc_forecast_id"
1379
+ ]
1380
+ input_data_dict["rh"].post_data(
1381
+ opt_res_latest["SOC_opt"] * 100,
1382
+ idx_closest,
1383
+ custom_batt_soc_forecast_id["entity_id"],
1384
+ "battery",
1385
+ custom_batt_soc_forecast_id["unit_of_measurement"],
1386
+ custom_batt_soc_forecast_id["friendly_name"],
1387
+ type_var="SOC",
1388
+ publish_prefix=publish_prefix,
1389
+ save_entities=entity_save,
1390
+ dont_post=dont_post,
1391
+ )
1392
+ cols_published = cols_published + ["SOC_opt"]
1393
+ # Publish grid power
1394
+ custom_grid_forecast_id = params["passed_data"]["custom_grid_forecast_id"]
1395
+ input_data_dict["rh"].post_data(
1396
+ opt_res_latest["P_grid"],
1397
+ idx_closest,
1398
+ custom_grid_forecast_id["entity_id"],
1399
+ "power",
1400
+ custom_grid_forecast_id["unit_of_measurement"],
1401
+ custom_grid_forecast_id["friendly_name"],
1402
+ type_var="power",
1403
+ publish_prefix=publish_prefix,
1404
+ save_entities=entity_save,
1405
+ dont_post=dont_post,
1406
+ )
1407
+ cols_published = cols_published + ["P_grid"]
1408
+ # Publish total value of cost function
1409
+ custom_cost_fun_id = params["passed_data"]["custom_cost_fun_id"]
1410
+ col_cost_fun = [i for i in opt_res_latest.columns if "cost_fun_" in i]
1411
+ input_data_dict["rh"].post_data(
1412
+ opt_res_latest[col_cost_fun],
1413
+ idx_closest,
1414
+ custom_cost_fun_id["entity_id"],
1415
+ "monetary",
1416
+ custom_cost_fun_id["unit_of_measurement"],
1417
+ custom_cost_fun_id["friendly_name"],
1418
+ type_var="cost_fun",
1419
+ publish_prefix=publish_prefix,
1420
+ save_entities=entity_save,
1421
+ dont_post=dont_post,
1422
+ )
1423
+ # cols_published = cols_published + col_cost_fun
1424
+ # Publish the optimization status
1425
+ custom_cost_fun_id = params["passed_data"]["custom_optim_status_id"]
1426
+ if "optim_status" not in opt_res_latest:
1427
+ opt_res_latest["optim_status"] = "Optimal"
1428
+ logger.warning(
1429
+ "no optim_status in opt_res_latest, run an optimization task first",
1430
+ )
1431
+ else:
1432
+ input_data_dict["rh"].post_data(
1433
+ opt_res_latest["optim_status"],
1434
+ idx_closest,
1435
+ custom_cost_fun_id["entity_id"],
1436
+ "",
1437
+ custom_cost_fun_id["unit_of_measurement"],
1438
+ custom_cost_fun_id["friendly_name"],
1439
+ type_var="optim_status",
1440
+ publish_prefix=publish_prefix,
1441
+ save_entities=entity_save,
1442
+ dont_post=dont_post,
1443
+ )
1444
+ cols_published = cols_published + ["optim_status"]
1445
+ # Publish unit_load_cost
1446
+ custom_unit_load_cost_id = params["passed_data"]["custom_unit_load_cost_id"]
1447
+ input_data_dict["rh"].post_data(
1448
+ opt_res_latest["unit_load_cost"],
1449
+ idx_closest,
1450
+ custom_unit_load_cost_id["entity_id"],
1451
+ "monetary",
1452
+ custom_unit_load_cost_id["unit_of_measurement"],
1453
+ custom_unit_load_cost_id["friendly_name"],
1454
+ type_var="unit_load_cost",
1455
+ publish_prefix=publish_prefix,
1456
+ save_entities=entity_save,
1457
+ dont_post=dont_post,
1458
+ )
1459
+ cols_published = cols_published + ["unit_load_cost"]
1460
+ # Publish unit_prod_price
1461
+ custom_unit_prod_price_id = params["passed_data"]["custom_unit_prod_price_id"]
1462
+ input_data_dict["rh"].post_data(
1463
+ opt_res_latest["unit_prod_price"],
1464
+ idx_closest,
1465
+ custom_unit_prod_price_id["entity_id"],
1466
+ "monetary",
1467
+ custom_unit_prod_price_id["unit_of_measurement"],
1468
+ custom_unit_prod_price_id["friendly_name"],
1469
+ type_var="unit_prod_price",
1470
+ publish_prefix=publish_prefix,
1471
+ save_entities=entity_save,
1472
+ dont_post=dont_post,
1473
+ )
1474
+ cols_published = cols_published + ["unit_prod_price"]
1475
+ # Create a DF resuming what has been published
1476
+ opt_res = opt_res_latest[cols_published].loc[[opt_res_latest.index[idx_closest]]]
1477
+ return opt_res
1478
+
1479
+
1480
+ def continual_publish(
1481
+ input_data_dict: dict, entity_path: pathlib.Path, logger: logging.Logger
1482
+ ):
1483
+ """
1484
+ If continual_publish is true and a entity file saved in /data_path/entities, continually publish sensor on freq rate, updating entity current state value based on timestamp
1485
+
1486
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
1487
+ :type input_data_dict: dict
1488
+ :param entity_path: Path for entities folder in data_path
1489
+ :type entity_path: Path
1490
+ :param logger: The passed logger object
1491
+ :type logger: logging.Logger
1492
+
1493
+ """
1494
+ logger.info("Continual publish thread service started")
1495
+ freq = input_data_dict["retrieve_hass_conf"].get(
1496
+ "optimization_time_step", pd.to_timedelta(1, "minutes")
1497
+ )
1498
+ entity_path_contents = []
1499
+ while True:
1500
+ # Sleep for x seconds (using current time as a reference for time left)
1501
+ time.sleep(
1502
+ max(
1503
+ 0,
1504
+ freq.total_seconds()
1505
+ - (
1506
+ datetime.now(
1507
+ input_data_dict["retrieve_hass_conf"]["time_zone"]
1508
+ ).timestamp()
1509
+ % 60
1510
+ ),
1511
+ )
1512
+ )
1513
+ # Loop through all saved entity files
1514
+ if os.path.exists(entity_path) and len(os.listdir(entity_path)) > 0:
1515
+ entity_path_contents = os.listdir(entity_path)
1516
+ for entity in entity_path_contents:
1517
+ if entity != default_metadata_json:
1518
+ # Call publish_json with entity file, build entity, and publish
1519
+ publish_json(
1520
+ entity,
1521
+ input_data_dict,
1522
+ entity_path,
1523
+ logger,
1524
+ "continual_publish",
1525
+ )
1526
+ # Retrieve entity metadata from file
1527
+ if os.path.isfile(entity_path / default_metadata_json):
1528
+ with open(entity_path / default_metadata_json) as file:
1529
+ metadata = json.load(file)
1530
+ # Check if freq should be shorter
1531
+ if metadata.get("lowest_time_step", None) is not None:
1532
+ freq = pd.to_timedelta(metadata["lowest_time_step"], "minutes")
1533
+ pass
1534
+ # This function should never return
1535
+ return False
1536
+
1537
+
1538
+ def publish_json(
1539
+ entity: dict,
1540
+ input_data_dict: dict,
1541
+ entity_path: pathlib.Path,
1542
+ logger: logging.Logger,
1543
+ reference: str | None = "",
1544
+ ):
1545
+ """
1546
+ Extract saved entity data from .json (in data_path/entities), build entity, post results to post_data
1547
+
1548
+ :param entity: json file containing entity data
1549
+ :type entity: dict
1550
+ :param input_data_dict: A dictionnary with multiple data used by the action functions
1551
+ :type input_data_dict: dict
1552
+ :param entity_path: Path for entities folder in data_path
1553
+ :type entity_path: Path
1554
+ :param logger: The passed logger object
1555
+ :type logger: logging.Logger
1556
+ :param reference: String for identifying who ran the function
1557
+ :type reference: str, optional
1558
+
1559
+ """
1560
+ # Retrieve entity metadata from file
1561
+ if os.path.isfile(entity_path / default_metadata_json):
1562
+ with open(entity_path / default_metadata_json) as file:
1563
+ metadata = json.load(file)
1564
+ else:
1565
+ logger.error("unable to located metadata.json in:" + entity_path)
1566
+ return False
1567
+ # Round current timecode (now)
1568
+ now_precise = datetime.now(
1569
+ input_data_dict["retrieve_hass_conf"]["time_zone"]
1570
+ ).replace(second=0, microsecond=0)
1571
+ # Retrieve entity data from file
1572
+ entity_data = pd.read_json(entity_path / entity, orient="index")
1573
+ # Remove ".json" from string for entity_id
1574
+ entity_id = entity.replace(".json", "")
1575
+ # Adjust Dataframe from received entity json file
1576
+ entity_data.columns = [metadata[entity_id]["name"]]
1577
+ entity_data.index.name = "timestamp"
1578
+ entity_data.index = pd.to_datetime(entity_data.index).tz_convert(
1579
+ input_data_dict["retrieve_hass_conf"]["time_zone"]
1580
+ )
1581
+ entity_data.index.freq = pd.to_timedelta(
1582
+ int(metadata[entity_id]["optimization_time_step"]), "minutes"
1583
+ )
1584
+ # Calculate the current state value
1585
+ if input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "nearest":
1586
+ idx_closest = entity_data.index.get_indexer([now_precise], method="nearest")[0]
1587
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "first":
1588
+ idx_closest = entity_data.index.get_indexer([now_precise], method="ffill")[0]
1589
+ elif input_data_dict["retrieve_hass_conf"]["method_ts_round"] == "last":
1590
+ idx_closest = entity_data.index.get_indexer([now_precise], method="bfill")[0]
1591
+ if idx_closest == -1:
1592
+ idx_closest = entity_data.index.get_indexer([now_precise], method="nearest")[0]
1593
+ # Call post data
1594
+ if reference == "continual_publish":
1595
+ logger.debug("Auto Published sensor:")
1596
+ logger_levels = "DEBUG"
1597
+ else:
1598
+ logger_levels = "INFO"
1599
+ # post/save entity
1600
+ input_data_dict["rh"].post_data(
1601
+ data_df=entity_data[metadata[entity_id]["name"]],
1602
+ idx=idx_closest,
1603
+ entity_id=entity_id,
1604
+ device_class=dict.get(metadata[entity_id], "device_class"),
1605
+ unit_of_measurement=metadata[entity_id]["unit_of_measurement"],
1606
+ friendly_name=metadata[entity_id]["friendly_name"],
1607
+ type_var=metadata[entity_id].get("type_var", ""),
1608
+ save_entities=False,
1609
+ logger_levels=logger_levels,
1610
+ )
1611
+ return entity_data[metadata[entity_id]["name"]]
1612
+
1613
+
1614
+ def main():
1615
+ r"""Define the main command line entry function.
1616
+
1617
+ This function may take several arguments as inputs. You can type `emhass --help` to see the list of options:
1618
+
1619
+ - action: Set the desired action, options are: perfect-optim, dayahead-optim,
1620
+ naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune
1621
+
1622
+ - config: Define path to the config.yaml file
1623
+
1624
+ - costfun: Define the type of cost function, options are: profit, cost, self-consumption
1625
+
1626
+ - log2file: Define if we should log to a file or not
1627
+
1628
+ - params: Configuration parameters passed from data/options.json if using the add-on
1629
+
1630
+ - runtimeparams: Pass runtime optimization parameters as dictionnary
1631
+
1632
+ - debug: Use True for testing purposes
1633
+
1634
+ """
1635
+ # Parsing arguments
1636
+ parser = argparse.ArgumentParser()
1637
+ parser.add_argument(
1638
+ "--action",
1639
+ type=str,
1640
+ help="Set the desired action, options are: perfect-optim, dayahead-optim,\
1641
+ naive-mpc-optim, publish-data, forecast-model-fit, forecast-model-predict, forecast-model-tune",
1642
+ )
1643
+ parser.add_argument(
1644
+ "--config", type=str, help="Define path to the config.json/defaults.json file"
1645
+ )
1646
+ parser.add_argument(
1647
+ "--params",
1648
+ type=str,
1649
+ default=None,
1650
+ help="String of configuration parameters passed",
1651
+ )
1652
+ parser.add_argument(
1653
+ "--data", type=str, help="Define path to the Data files (.csv & .pkl)"
1654
+ )
1655
+ parser.add_argument("--root", type=str, help="Define path emhass root")
1656
+ parser.add_argument(
1657
+ "--costfun",
1658
+ type=str,
1659
+ default="profit",
1660
+ help="Define the type of cost function, options are: profit, cost, self-consumption",
1661
+ )
1662
+ parser.add_argument(
1663
+ "--log2file",
1664
+ type=bool,
1665
+ default=False,
1666
+ help="Define if we should log to a file or not",
1667
+ )
1668
+ parser.add_argument(
1669
+ "--secrets",
1670
+ type=str,
1671
+ default=None,
1672
+ help="Define secret parameter file (secrets_emhass.yaml) path",
1673
+ )
1674
+ parser.add_argument(
1675
+ "--runtimeparams",
1676
+ type=str,
1677
+ default=None,
1678
+ help="Pass runtime optimization parameters as dictionnary",
1679
+ )
1680
+ parser.add_argument(
1681
+ "--debug",
1682
+ type=bool,
1683
+ default=False,
1684
+ help="Use True for testing purposes",
1685
+ )
1686
+ args = parser.parse_args()
1687
+
1688
+ # The path to the configuration files
1689
+ if args.config is not None:
1690
+ config_path = pathlib.Path(args.config)
1691
+ else:
1692
+ config_path = pathlib.Path(
1693
+ str(utils.get_root(__file__, num_parent=3) / "config.json")
1694
+ )
1695
+ if args.data is not None:
1696
+ data_path = pathlib.Path(args.data)
1697
+ else:
1698
+ data_path = config_path.parent / "data/"
1699
+ if args.root is not None:
1700
+ root_path = pathlib.Path(args.root)
1701
+ else:
1702
+ root_path = utils.get_root(__file__, num_parent=1)
1703
+ if args.secrets is not None:
1704
+ secrets_path = pathlib.Path(args.secrets)
1705
+ else:
1706
+ secrets_path = pathlib.Path(config_path.parent / "secrets_emhass.yaml")
1707
+
1708
+ associations_path = root_path / "data/associations.csv"
1709
+ defaults_path = root_path / "data/config_defaults.json"
1710
+
1711
+ emhass_conf = {}
1712
+ emhass_conf["config_path"] = config_path
1713
+ emhass_conf["data_path"] = data_path
1714
+ emhass_conf["root_path"] = root_path
1715
+ emhass_conf["associations_path"] = associations_path
1716
+ emhass_conf["defaults_path"] = defaults_path
1717
+ # create logger
1718
+ logger, ch = utils.get_logger(
1719
+ __name__, emhass_conf, save_to_file=bool(args.log2file)
1720
+ )
1721
+
1722
+ # Check paths
1723
+ logger.debug("config path: " + str(config_path))
1724
+ logger.debug("data path: " + str(data_path))
1725
+ logger.debug("root path: " + str(root_path))
1726
+ if not associations_path.exists():
1727
+ logger.error(
1728
+ "Could not find associations.csv file in: " + str(associations_path)
1729
+ )
1730
+ logger.error("Try setting config file path with --associations")
1731
+ return False
1732
+ if not config_path.exists():
1733
+ logger.warning("Could not find config.json file in: " + str(config_path))
1734
+ logger.warning("Try setting config file path with --config")
1735
+ if not secrets_path.exists():
1736
+ logger.warning("Could not find secrets file in: " + str(secrets_path))
1737
+ logger.warning("Try setting secrets file path with --secrets")
1738
+ if not os.path.isdir(data_path):
1739
+ logger.error("Could not find data folder in: " + str(data_path))
1740
+ logger.error("Try setting data path with --data")
1741
+ return False
1742
+ if not os.path.isdir(root_path):
1743
+ logger.error("Could not find emhass/src folder in: " + str(root_path))
1744
+ logger.error("Try setting emhass root path with --root")
1745
+ return False
1746
+
1747
+ # Additional argument
1748
+ try:
1749
+ parser.add_argument(
1750
+ "--version",
1751
+ action="version",
1752
+ version="%(prog)s " + version("emhass"),
1753
+ )
1754
+ args = parser.parse_args()
1755
+ except Exception:
1756
+ logger.info(
1757
+ "Version not found for emhass package. Or importlib exited with PackageNotFoundError.",
1758
+ )
1759
+
1760
+ # Setup config
1761
+ config = {}
1762
+ # Check if passed config file is yaml of json, build config accordingly
1763
+ if config_path.exists():
1764
+ config_file_ending = re.findall(r"(?<=\.).*$", str(config_path))
1765
+ if len(config_file_ending) > 0:
1766
+ match config_file_ending[0]:
1767
+ case "json":
1768
+ config = utils.build_config(
1769
+ emhass_conf, logger, defaults_path, config_path
1770
+ )
1771
+ case "yaml":
1772
+ config = utils.build_config(
1773
+ emhass_conf, logger, defaults_path, config_path=config_path
1774
+ )
1775
+ case "yml":
1776
+ config = utils.build_config(
1777
+ emhass_conf, logger, defaults_path, config_path=config_path
1778
+ )
1779
+ # If unable to find config file, use only defaults_config.json
1780
+ else:
1781
+ logger.warning(
1782
+ "Unable to obtain config.json file, building parameters with only defaults"
1783
+ )
1784
+ config = utils.build_config(emhass_conf, logger, defaults_path)
1785
+ if type(config) is bool and not config:
1786
+ raise Exception("Failed to find default config")
1787
+
1788
+ # Obtain secrets from secrets_emhass.yaml?
1789
+ params_secrets = {}
1790
+ emhass_conf, built_secrets = utils.build_secrets(
1791
+ emhass_conf, logger, secrets_path=secrets_path
1792
+ )
1793
+ params_secrets.update(built_secrets)
1794
+
1795
+ # Build params
1796
+ params = utils.build_params(emhass_conf, params_secrets, config, logger)
1797
+ if type(params) is bool:
1798
+ raise Exception("A error has occurred while building parameters")
1799
+ # Add any passed params from args to params
1800
+ if args.params:
1801
+ params.update(json.loads(args.params))
1802
+
1803
+ input_data_dict = set_input_data_dict(
1804
+ emhass_conf,
1805
+ args.costfun,
1806
+ json.dumps(params),
1807
+ args.runtimeparams,
1808
+ args.action,
1809
+ logger,
1810
+ args.debug,
1811
+ )
1812
+ if type(input_data_dict) is bool:
1813
+ raise Exception("A error has occurred while creating action objects")
1814
+
1815
+ # Perform selected action
1816
+ if args.action == "perfect-optim":
1817
+ opt_res = perfect_forecast_optim(input_data_dict, logger, debug=args.debug)
1818
+ elif args.action == "dayahead-optim":
1819
+ opt_res = dayahead_forecast_optim(input_data_dict, logger, debug=args.debug)
1820
+ elif args.action == "naive-mpc-optim":
1821
+ opt_res = naive_mpc_optim(input_data_dict, logger, debug=args.debug)
1822
+ elif args.action == "forecast-model-fit":
1823
+ df_fit_pred, df_fit_pred_backtest, mlf = forecast_model_fit(
1824
+ input_data_dict, logger, debug=args.debug
1825
+ )
1826
+ opt_res = None
1827
+ elif args.action == "forecast-model-predict":
1828
+ if args.debug:
1829
+ _, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
1830
+ else:
1831
+ mlf = None
1832
+ df_pred = forecast_model_predict(
1833
+ input_data_dict, logger, debug=args.debug, mlf=mlf
1834
+ )
1835
+ opt_res = None
1836
+ elif args.action == "forecast-model-tune":
1837
+ if args.debug:
1838
+ _, _, mlf = forecast_model_fit(input_data_dict, logger, debug=args.debug)
1839
+ else:
1840
+ mlf = None
1841
+ df_pred_optim, mlf = forecast_model_tune(
1842
+ input_data_dict, logger, debug=args.debug, mlf=mlf
1843
+ )
1844
+ opt_res = None
1845
+ elif args.action == "regressor-model-fit":
1846
+ mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
1847
+ opt_res = None
1848
+ elif args.action == "regressor-model-predict":
1849
+ if args.debug:
1850
+ mlr = regressor_model_fit(input_data_dict, logger, debug=args.debug)
1851
+ else:
1852
+ mlr = None
1853
+ prediction = regressor_model_predict(
1854
+ input_data_dict, logger, debug=args.debug, mlr=mlr
1855
+ )
1856
+ opt_res = None
1857
+ elif args.action == "publish-data":
1858
+ opt_res = publish_data(input_data_dict, logger)
1859
+ else:
1860
+ logger.error("The passed action argument is not valid")
1861
+ logger.error(
1862
+ "Try setting --action: perfect-optim, dayahead-optim, naive-mpc-optim, forecast-model-fit, forecast-model-predict, forecast-model-tune or publish-data"
1863
+ )
1864
+ opt_res = None
1865
+ logger.info(opt_res)
1866
+ # Flush the logger
1867
+ ch.close()
1868
+ logger.removeHandler(ch)
1869
+ if (
1870
+ args.action == "perfect-optim"
1871
+ or args.action == "dayahead-optim"
1872
+ or args.action == "naive-mpc-optim"
1873
+ or args.action == "publish-data"
1874
+ ):
1875
+ return opt_res
1876
+ elif args.action == "forecast-model-fit":
1877
+ return df_fit_pred, df_fit_pred_backtest, mlf
1878
+ elif args.action == "forecast-model-predict":
1879
+ return df_pred
1880
+ elif args.action == "regressor-model-fit":
1881
+ return mlr
1882
+ elif args.action == "regressor-model-predict":
1883
+ return prediction
1884
+ elif args.action == "forecast-model-tune":
1885
+ return df_pred_optim, mlf
1886
+ else:
1887
+ return opt_res
1888
+
1889
+
1890
+ if __name__ == "__main__":
1891
+ main()