emhass 0.10.6__py3-none-any.whl → 0.15.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
emhass/web_server.py CHANGED
@@ -1,493 +1,895 @@
1
1
  #!/usr/bin/env python3
2
- # -*- coding: utf-8 -*-
3
2
 
4
- from flask import Flask, request, make_response, render_template
5
- from jinja2 import Environment, PackageLoader
6
- from requests import get
7
- from waitress import serve
8
- from importlib.metadata import version, PackageNotFoundError
3
+ import argparse
4
+ import asyncio
5
+ import logging
6
+ import os
7
+ import pickle
8
+ import re
9
+ import threading
10
+ from importlib.metadata import PackageNotFoundError, version
9
11
  from pathlib import Path
10
- import os, json, argparse, pickle, yaml, logging, re, threading
11
- from distutils.util import strtobool
12
-
13
- from emhass.command_line import set_input_data_dict
14
- from emhass.command_line import perfect_forecast_optim, dayahead_forecast_optim, naive_mpc_optim
15
- from emhass.command_line import forecast_model_fit, forecast_model_predict, forecast_model_tune, weather_forecast_cache
16
- from emhass.command_line import regressor_model_fit, regressor_model_predict
17
- from emhass.command_line import publish_data, continual_publish
18
- from emhass.utils import get_injection_dict, get_injection_dict_forecast_model_fit, \
19
- get_injection_dict_forecast_model_tune, build_params
20
-
21
- # Define the Flask instance
22
- app = Flask(__name__)
23
-
24
- # Check logfile for error, anything after string match if provided
25
- def checkFileLog(refString=None):
26
- if (refString is not None):
27
- logArray = grabLog(refString) #grab reduced log array
28
- else:
29
- if ((emhass_conf['data_path'] / 'actionLogs.txt')).exists():
30
- with open(str(emhass_conf['data_path'] / 'actionLogs.txt'), "r") as fp:
31
- logArray = fp.readlines()
32
- for logString in logArray:
33
- if (logString.split(' ', 1)[0] == "ERROR"):
34
- return True
12
+
13
+ import aiofiles
14
+ import jinja2
15
+ import orjson
16
+ import uvicorn
17
+ import yaml
18
+ from markupsafe import Markup
19
+ from quart import Quart, make_response, request
20
+ from quart import logging as log
21
+
22
+ from emhass.command_line import (
23
+ continual_publish,
24
+ dayahead_forecast_optim,
25
+ export_influxdb_to_csv,
26
+ forecast_model_fit,
27
+ forecast_model_predict,
28
+ forecast_model_tune,
29
+ naive_mpc_optim,
30
+ perfect_forecast_optim,
31
+ publish_data,
32
+ regressor_model_fit,
33
+ regressor_model_predict,
34
+ set_input_data_dict,
35
+ weather_forecast_cache,
36
+ )
37
+ from emhass.connection_manager import close_global_connection, get_websocket_client, is_connected
38
+ from emhass.utils import (
39
+ build_config,
40
+ build_legacy_config_params,
41
+ build_params,
42
+ build_secrets,
43
+ get_injection_dict,
44
+ get_injection_dict_forecast_model_fit,
45
+ get_injection_dict_forecast_model_tune,
46
+ param_to_config,
47
+ )
48
+
49
+ app = Quart(__name__)
50
+
51
+ emhass_conf: dict[str, Path] = {}
52
+ entity_path: Path = Path()
53
+ params_secrets: dict[str, str | float] = {}
54
+ continual_publish_thread: list = []
55
+ injection_dict: dict = {}
56
+
57
+ templates = jinja2.Environment(
58
+ autoescape=True,
59
+ loader=jinja2.PackageLoader("emhass", "templates"),
60
+ )
61
+
62
+ action_log_str = "action_logs.txt"
63
+ injection_dict_file = "injection_dict.pkl"
64
+ params_file = "params.pkl"
65
+ error_msg_associations_file = "Unable to obtain associations file"
66
+
67
+
68
+ # Add custom filter for trusted HTML content
69
+ def mark_safe(value):
70
+ """Mark pre-rendered HTML plots as safe (use only for trusted content)"""
71
+ if value is None:
72
+ return ""
73
+ return Markup(value)
74
+
75
+
76
+ templates.filters["mark_safe"] = mark_safe
77
+
78
+
79
+ # Register async startup and shutdown handlers
80
+ @app.before_serving
81
+ async def before_serving():
82
+ """Initialize EMHASS before starting to serve requests."""
83
+ # Initialize the application
84
+ try:
85
+ await initialize()
86
+ app.logger.info("Full initialization completed")
87
+ except Exception as e:
88
+ app.logger.warning(f"Full initialization failed (this is normal in test environments): {e}")
89
+ app.logger.info("Continuing without WebSocket connection...")
90
+ # The initialize() function already sets up all necessary components except WebSocket
91
+ # So we can continue serving requests even if WebSocket connection fails
92
+
93
+
94
+ @app.after_serving
95
+ async def after_serving():
96
+ """Clean up resources after serving."""
97
+ try:
98
+ # Only close WebSocket connection if it was established
99
+ if is_connected():
100
+ await close_global_connection()
101
+ app.logger.info("WebSocket connection closed")
102
+ else:
103
+ app.logger.info("No WebSocket connection to close")
104
+ except Exception as e:
105
+ app.logger.warning(f"WebSocket shutdown failed: {e}")
106
+ app.logger.info("Quart shutdown complete")
107
+
108
+
109
+ async def check_file_log(ref_string: str | None = None) -> bool:
110
+ """
111
+ Check logfile for error, anything after string match if provided.
112
+
113
+ :param ref_string: String to reduce log area to check for errors. Use to reduce log to check anything after string match (ie. an action).
114
+ :type ref_string: str
115
+ :return: Boolean return if error was found in logs
116
+ :rtype: bool
117
+
118
+ """
119
+ log_array: list[str] = []
120
+
121
+ if ref_string is not None:
122
+ log_array = await grab_log(
123
+ ref_string
124
+ ) # grab reduced log array (everything after string match)
125
+ else:
126
+ if (emhass_conf["data_path"] / action_log_str).exists():
127
+ async with aiofiles.open(str(emhass_conf["data_path"] / action_log_str)) as fp:
128
+ content = await fp.read()
129
+ log_array = content.splitlines()
130
+ else:
131
+ app.logger.debug("Unable to obtain {action_log_str}")
132
+ return False
133
+
134
+ for log_string in log_array:
135
+ if log_string.split(" ", 1)[0] == "ERROR":
136
+ return True
35
137
  return False
36
138
 
37
- # Find string in logs, append all lines after to return
38
- def grabLog(refString):
39
- isFound = []
139
+
140
+ async def grab_log(ref_string: str | None = None) -> list[str]:
141
+ """
142
+ Find string in logs, append all lines after into list to return.
143
+
144
+ :param ref_string: String used to string match log.
145
+ :type ref_string: str
146
+ :return: List of lines in log after string match.
147
+ :rtype: list
148
+
149
+ """
150
+ is_found = []
40
151
  output = []
41
- if ((emhass_conf['data_path'] / 'actionLogs.txt')).exists():
42
- with open(str(emhass_conf['data_path'] / 'actionLogs.txt'), "r") as fp:
43
- logArray = fp.readlines()
44
- for x in range(len(logArray)-1): #find all matches and log key in isFound
45
- if (re.search(refString,logArray[x])):
46
- isFound.append(x)
47
- if len(isFound) != 0:
48
- for x in range(isFound[-1],len(logArray)): #use isFound to extract last related action logs
49
- output.append(logArray[x])
152
+ if (emhass_conf["data_path"] / action_log_str).exists():
153
+ async with aiofiles.open(str(emhass_conf["data_path"] / action_log_str)) as fp:
154
+ content = await fp.read()
155
+ log_array = content.splitlines()
156
+ # Find all string matches, log key (line Number) in is_found
157
+ for x in range(len(log_array) - 1):
158
+ if re.search(ref_string, log_array[x]):
159
+ is_found.append(x)
160
+ if len(is_found) != 0:
161
+ # Use last item in is_found to extract action logs
162
+ for x in range(is_found[-1], len(log_array)):
163
+ output.append(log_array[x])
50
164
  return output
51
165
 
166
+
52
167
  # Clear the log file
53
- def clearFileLog():
54
- if ((emhass_conf['data_path'] / 'actionLogs.txt')).exists():
55
- with open(str(emhass_conf['data_path'] / 'actionLogs.txt'), "w") as fp:
56
- fp.truncate()
168
+ async def clear_file_log():
169
+ """
170
+ Clear the contents of the log file
171
+
172
+ """
173
+ if (emhass_conf["data_path"] / action_log_str).exists():
174
+ async with aiofiles.open(str(emhass_conf["data_path"] / action_log_str), "w") as fp:
175
+ await fp.write("")
57
176
 
58
177
 
59
- # Initial index page render
60
- @app.route('/')
61
- def index():
178
+ @app.route("/")
179
+ @app.route("/index")
180
+ async def index():
181
+ """
182
+ Render initial index page and serve to web server.
183
+ Appends plot tables saved from previous optimization into index.html, then serves.
184
+ """
62
185
  app.logger.info("EMHASS server online, serving index.html...")
63
- # Load HTML template
64
- file_loader = PackageLoader('emhass', 'templates')
65
- env = Environment(loader=file_loader)
66
- template = env.get_template('index.html')
67
- # Load cache dict
68
- if (emhass_conf['data_path'] / 'injection_dict.pkl').exists():
69
- with open(str(emhass_conf['data_path'] / 'injection_dict.pkl'), "rb") as fid:
70
- injection_dict = pickle.load(fid)
186
+
187
+ # Load cached dict (if exists), to present generated plot tables
188
+ if (emhass_conf["data_path"] / injection_dict_file).exists():
189
+ async with aiofiles.open(str(emhass_conf["data_path"] / injection_dict_file), "rb") as fid:
190
+ content = await fid.read()
191
+ injection_dict = pickle.loads(content)
71
192
  else:
72
- app.logger.warning("The data container dictionary is empty... Please launch an optimization task")
73
- injection_dict={}
74
-
75
- # replace {{basename}} in html template html with path root
76
- # basename = request.headers.get("X-Ingress-Path", "")
77
- # return make_response(template.render(injection_dict=injection_dict, basename=basename))
78
-
79
- return make_response(template.render(injection_dict=injection_dict))
80
-
81
-
82
- #get actions
83
- @app.route('/template/<action_name>', methods=['GET'])
84
- def template_action(action_name):
85
- app.logger.info(" >> Sending rendered template table data")
86
- if action_name == 'table-template':
87
- file_loader = PackageLoader('emhass', 'templates')
88
- env = Environment(loader=file_loader)
89
- template = env.get_template('template.html')
90
- if (emhass_conf['data_path'] / 'injection_dict.pkl').exists():
91
- with open(str(emhass_conf['data_path'] / 'injection_dict.pkl'), "rb") as fid:
92
- injection_dict = pickle.load(fid)
93
- else:
94
- app.logger.warning("The data container dictionary is empty... Please launch an optimization task")
95
- injection_dict={}
96
- return make_response(template.render(injection_dict=injection_dict))
97
-
98
- #post actions
99
- @app.route('/action/<action_name>', methods=['POST'])
100
- def action_call(action_name):
101
- # Setting up parameters
102
- with open(str(emhass_conf['data_path'] / 'params.pkl'), "rb") as fid:
103
- emhass_conf['config_path'], params = pickle.load(fid)
104
- runtimeparams = request.get_json(force=True)
105
- params = json.dumps(params)
106
- if runtimeparams is not None and runtimeparams != '{}':
107
- app.logger.info("Passed runtime parameters: " + str(runtimeparams))
108
- runtimeparams = json.dumps(runtimeparams)
109
-
110
- # Run action if weather_forecast_cache
111
- if action_name == 'weather-forecast-cache':
112
- ActionStr = " >> Performing weather forecast, try to caching result"
113
- app.logger.info(ActionStr)
114
- weather_forecast_cache(emhass_conf, params, runtimeparams, app.logger)
115
- msg = f'EMHASS >> Weather Forecast has run and results possibly cached... \n'
116
- if not checkFileLog(ActionStr):
117
- return make_response(msg, 201)
118
- return make_response(grabLog(ActionStr), 400)
119
-
120
- ActionStr = " >> Setting input data dict"
121
- app.logger.info(ActionStr)
122
- input_data_dict = set_input_data_dict(emhass_conf, costfun,
123
- params, runtimeparams, action_name, app.logger)
124
- if not input_data_dict:
125
- return make_response(grabLog(ActionStr), 400)
126
-
127
- # If continual_publish is True, start thread with loop function
128
- if len(continual_publish_thread) == 0 and input_data_dict['retrieve_hass_conf'].get("continual_publish",False):
129
- # Start Thread
130
- continualLoop = threading.Thread(name="continual_publish",target=continual_publish,args=[input_data_dict,entity_path,app.logger])
131
- continualLoop.start()
132
- continual_publish_thread.append(continualLoop)
133
-
134
- # run action based on POST request
135
- if action_name == 'publish-data':
136
- ActionStr = " >> Publishing data..."
137
- app.logger.info(ActionStr)
138
- _ = publish_data(input_data_dict, app.logger)
139
- msg = f'EMHASS >> Action publish-data executed... \n'
140
- if not checkFileLog(ActionStr):
141
- return make_response(msg, 201)
142
- return make_response(grabLog(ActionStr), 400)
143
- elif action_name == 'perfect-optim':
144
- ActionStr = " >> Performing perfect optimization..."
145
- app.logger.info(ActionStr)
146
- opt_res = perfect_forecast_optim(input_data_dict, app.logger)
147
- injection_dict = get_injection_dict(opt_res)
148
- with open(str(emhass_conf['data_path'] / 'injection_dict.pkl'), "wb") as fid:
149
- pickle.dump(injection_dict, fid)
150
- msg = f'EMHASS >> Action perfect-optim executed... \n'
151
- if not checkFileLog(ActionStr):
152
- return make_response(msg, 201)
153
- return make_response(grabLog(ActionStr), 400)
154
- elif action_name == 'dayahead-optim':
155
- ActionStr = " >> Performing dayahead optimization..."
156
- app.logger.info(ActionStr)
157
- opt_res = dayahead_forecast_optim(input_data_dict, app.logger)
158
- injection_dict = get_injection_dict(opt_res)
159
- with open(str(emhass_conf['data_path'] / 'injection_dict.pkl'), "wb") as fid:
160
- pickle.dump(injection_dict, fid)
161
- msg = f'EMHASS >> Action dayahead-optim executed... \n'
162
- if not checkFileLog(ActionStr):
163
- return make_response(msg, 201)
164
- return make_response(grabLog(ActionStr), 400)
165
- elif action_name == 'naive-mpc-optim':
166
- ActionStr = " >> Performing naive MPC optimization..."
167
- app.logger.info(ActionStr)
168
- opt_res = naive_mpc_optim(input_data_dict, app.logger)
169
- injection_dict = get_injection_dict(opt_res)
170
- with open(str(emhass_conf['data_path'] / 'injection_dict.pkl'), "wb") as fid:
171
- pickle.dump(injection_dict, fid)
172
- msg = f'EMHASS >> Action naive-mpc-optim executed... \n'
173
- if not checkFileLog(ActionStr):
174
- return make_response(msg, 201)
175
- return make_response(grabLog(ActionStr), 400)
176
- elif action_name == 'forecast-model-fit':
177
- ActionStr = " >> Performing a machine learning forecast model fit..."
178
- app.logger.info(ActionStr)
179
- df_fit_pred, _, mlf = forecast_model_fit(input_data_dict, app.logger)
180
- injection_dict = get_injection_dict_forecast_model_fit(
181
- df_fit_pred, mlf)
182
- with open(str(emhass_conf['data_path'] / 'injection_dict.pkl'), "wb") as fid:
183
- pickle.dump(injection_dict, fid)
184
- msg = f'EMHASS >> Action forecast-model-fit executed... \n'
185
- if not checkFileLog(ActionStr):
186
- return make_response(msg, 201)
187
- return make_response(grabLog(ActionStr), 400)
188
- elif action_name == 'forecast-model-predict':
189
- ActionStr = " >> Performing a machine learning forecast model predict..."
190
- app.logger.info(ActionStr)
191
- df_pred = forecast_model_predict(input_data_dict, app.logger)
192
- if df_pred is None:
193
- return make_response(grabLog(ActionStr), 400)
194
- table1 = df_pred.reset_index().to_html(classes='mystyle', index=False)
193
+ app.logger.info(
194
+ "The data container dictionary is empty... Please launch an optimization task"
195
+ )
195
196
  injection_dict = {}
196
- injection_dict['title'] = '<h2>Custom machine learning forecast model predict</h2>'
197
- injection_dict['subsubtitle0'] = '<h4>Performed a prediction using a pre-trained model</h4>'
198
- injection_dict['table1'] = table1
199
- with open(str(emhass_conf['data_path'] / 'injection_dict.pkl'), "wb") as fid:
200
- pickle.dump(injection_dict, fid)
201
- msg = f'EMHASS >> Action forecast-model-predict executed... \n'
202
- if not checkFileLog(ActionStr):
203
- return make_response(msg, 201)
204
- return make_response(grabLog(ActionStr), 400)
205
- elif action_name == 'forecast-model-tune':
206
- ActionStr = " >> Performing a machine learning forecast model tune..."
207
- app.logger.info(ActionStr)
208
- df_pred_optim, mlf = forecast_model_tune(input_data_dict, app.logger)
209
- if df_pred_optim is None or mlf is None:
210
- return make_response(grabLog(ActionStr), 400)
211
- injection_dict = get_injection_dict_forecast_model_tune(
212
- df_pred_optim, mlf)
213
- with open(str(emhass_conf['data_path'] / 'injection_dict.pkl'), "wb") as fid:
214
- pickle.dump(injection_dict, fid)
215
- msg = f'EMHASS >> Action forecast-model-tune executed... \n'
216
- if not checkFileLog(ActionStr):
217
- return make_response(msg, 201)
218
- return make_response(grabLog(ActionStr), 400)
219
- elif action_name == 'regressor-model-fit':
220
- ActionStr = " >> Performing a machine learning regressor fit..."
221
- app.logger.info(ActionStr)
222
- regressor_model_fit(input_data_dict, app.logger)
223
- msg = f'EMHASS >> Action regressor-model-fit executed... \n'
224
- if not checkFileLog(ActionStr):
225
- return make_response(msg, 201)
226
- return make_response(grabLog(ActionStr), 400)
227
- elif action_name == 'regressor-model-predict':
228
- ActionStr = " >> Performing a machine learning regressor predict..."
229
- app.logger.info(ActionStr)
230
- regressor_model_predict(input_data_dict, app.logger)
231
- msg = f'EMHASS >> Action regressor-model-predict executed... \n'
232
- if not checkFileLog(ActionStr):
233
- return make_response(msg, 201)
234
- return make_response(grabLog(ActionStr), 400)
197
+
198
+ template = templates.get_template("index.html")
199
+ return await make_response(template.render(injection_dict=injection_dict))
200
+
201
+
202
+ @app.route("/configuration")
203
+ async def configuration():
204
+ """
205
+ Configuration page actions:
206
+ Render and serve configuration page html
207
+ """
208
+ app.logger.info("serving configuration.html...")
209
+
210
+ # get params
211
+ if (emhass_conf["data_path"] / params_file).exists():
212
+ async with aiofiles.open(str(emhass_conf["data_path"] / params_file), "rb") as fid:
213
+ content = await fid.read()
214
+ emhass_conf["config_path"], params = pickle.loads(content)
235
215
  else:
236
- app.logger.error("ERROR: passed action is not valid")
237
- msg = f'EMHASS >> ERROR: Passed action is not valid... \n'
238
- return make_response(msg, 400)
216
+ # Safe fallback if params.pkl doesn't exist
217
+ params = {}
239
218
 
240
- if __name__ == "__main__":
241
- # Parsing arguments
242
- parser = argparse.ArgumentParser()
243
- parser.add_argument('--url', type=str, help='The URL to your Home Assistant instance, ex the external_url in your hass configuration')
244
- parser.add_argument('--key', type=str, help='Your access key. If using EMHASS in standalone this should be a Long-Lived Access Token')
245
- parser.add_argument('--addon', type=strtobool, default='False', help='Define if we are usinng EMHASS with the add-on or in standalone mode')
246
- parser.add_argument('--no_response', type=strtobool, default='False', help='This is set if json response errors occur')
247
- args = parser.parse_args()
248
-
249
- #Obtain url and key from ENV or ARG (if any)
250
- hass_url = os.getenv("EMHASS_URL", default=args.url)
251
- key = os.getenv("SUPERVISOR_TOKEN", default=args.key)
252
- if hass_url != "http://supervisor/core/api":
253
- key = os.getenv("EMHASS_KEY", key)
254
- #If url or key is None, Set as empty string to reduce NoneType errors bellow
255
- if key is None: key = ""
256
- if hass_url is None: hass_url = ""
257
-
258
- #find env's, not not set defaults
259
- use_options = os.getenv('USE_OPTIONS', default=False)
260
- CONFIG_PATH = os.getenv("CONFIG_PATH", default="/app/config_emhass.yaml")
261
- OPTIONS_PATH = os.getenv('OPTIONS_PATH', default="/app/options.json")
262
- DATA_PATH = os.getenv("DATA_PATH", default="/app/data/")
263
- ROOT_PATH = os.getenv("ROOT_PATH", default=str(Path(__file__).parent))
264
-
265
- #options None by default
266
- options = None
219
+ template = templates.get_template("configuration.html")
220
+ return await make_response(template.render(config=params))
267
221
 
268
- # Define the paths
269
- if args.addon==1:
270
- options_json = Path(OPTIONS_PATH)
271
- # Read options info
272
- if options_json.exists():
273
- with options_json.open('r') as data:
274
- options = json.load(data)
275
- else:
276
- app.logger.error("options.json does not exist")
277
- raise Exception("options.json does not exist in path: "+str(options_json))
222
+
223
+ @app.route("/template", methods=["GET"])
224
+ async def template_action():
225
+ """
226
+ template page actions:
227
+ Render and serve template html
228
+ """
229
+ app.logger.info(" >> Sending rendered template data")
230
+
231
+ if (emhass_conf["data_path"] / injection_dict_file).exists():
232
+ async with aiofiles.open(str(emhass_conf["data_path"] / injection_dict_file), "rb") as fid:
233
+ content = await fid.read()
234
+ injection_dict = pickle.loads(content)
235
+ else:
236
+ app.logger.warning("Unable to obtain plot data from {injection_dict_file}")
237
+ app.logger.warning("Try running an launch an optimization task")
238
+ injection_dict = {}
239
+
240
+ template = templates.get_template("template.html")
241
+ return await make_response(template.render(injection_dict=injection_dict))
242
+
243
+
244
+ @app.route("/get-config", methods=["GET"])
245
+ async def parameter_get():
246
+ """
247
+ Get request action that builds, formats and sends config as json (config.json format)
248
+
249
+ """
250
+ app.logger.debug("Obtaining current saved parameters as config")
251
+ # Build config from all possible sources (inc. legacy yaml config)
252
+ config = await build_config(
253
+ emhass_conf,
254
+ app.logger,
255
+ str(emhass_conf["defaults_path"]),
256
+ str(emhass_conf["config_path"]),
257
+ str(emhass_conf["legacy_config_path"]),
258
+ )
259
+ if type(config) is bool and not config:
260
+ return await make_response(["failed to retrieve default config file"], 500)
261
+ # Format parameters in config with params (converting legacy json parameters from options.json if any)
262
+ params = await build_params(emhass_conf, {}, config, app.logger)
263
+ if type(params) is bool and not params:
264
+ return await make_response([error_msg_associations_file], 500)
265
+ # Covert formatted parameters from params back into config.json format
266
+ return_config = param_to_config(params, app.logger)
267
+ # Send config
268
+ return await make_response(return_config, 201)
269
+
270
+
271
+ # Get default Config
272
+ @app.route("/get-config/defaults", methods=["GET"])
273
+ async def config_get():
274
+ """
275
+ Get request action, retrieves and sends default configuration
276
+
277
+ """
278
+ app.logger.debug("Obtaining default parameters")
279
+ # Build config, passing only default file
280
+ config = await build_config(emhass_conf, app.logger, str(emhass_conf["defaults_path"]))
281
+ if type(config) is bool and not config:
282
+ return await make_response(["failed to retrieve default config file"], 500)
283
+ # Format parameters in config with params
284
+ params = await build_params(emhass_conf, {}, config, app.logger)
285
+ if type(params) is bool and not params:
286
+ return await make_response([error_msg_associations_file], 500)
287
+ # Covert formatted parameters from params back into config.json format
288
+ return_config = param_to_config(params, app.logger)
289
+ # Send params
290
+ return await make_response(return_config, 201)
291
+
292
+
293
+ # Get YAML-to-JSON config
294
+ @app.route("/get-json", methods=["POST"])
295
+ async def json_convert():
296
+ """
297
+ Post request action, receives yaml config (config_emhass.yaml or EMHASS-Add-on config page) and converts to config json format.
298
+
299
+ """
300
+ app.logger.info("Attempting to convert YAML to JSON")
301
+ data = await request.get_data()
302
+ yaml_config = yaml.safe_load(data)
303
+
304
+ # If filed to Parse YAML
305
+ if yaml_config is None:
306
+ return await make_response(["failed to Parse YAML from data"], 400)
307
+ # Test YAML is legacy config format (from config_emhass.yaml)
308
+ test_legacy_config = await build_legacy_config_params(emhass_conf, yaml_config, app.logger)
309
+ if test_legacy_config:
310
+ yaml_config = test_legacy_config
311
+ # Format YAML to params (format params. check if params match legacy option.json format)
312
+ params = await build_params(emhass_conf, {}, yaml_config, app.logger)
313
+ if type(params) is bool and not params:
314
+ return await make_response([error_msg_associations_file], 500)
315
+ # Covert formatted parameters from params back into config.json format
316
+ config = param_to_config(params, app.logger)
317
+ # convert json to str
318
+ config = orjson.dumps(config).decode()
319
+
320
+ # Send params
321
+ return await make_response(config, 201)
322
+
323
+
324
+ @app.route("/set-config", methods=["POST"])
325
+ async def parameter_set():
326
+ """
327
+ Receive JSON config, and save config to file (config.json and param.pkl)
328
+
329
+ """
330
+ config = {}
331
+ if not emhass_conf["defaults_path"]:
332
+ return await make_response(["Unable to Obtain defaults_path from emhass_conf"], 500)
333
+ if not emhass_conf["config_path"]:
334
+ return await make_response(["Unable to Obtain config_path from emhass_conf"], 500)
335
+
336
+ # Load defaults as a reference point (for sorting) and a base to override
337
+ if (
338
+ os.path.exists(emhass_conf["defaults_path"])
339
+ and Path(emhass_conf["defaults_path"]).is_file()
340
+ ):
341
+ async with aiofiles.open(str(emhass_conf["defaults_path"])) as data:
342
+ content = await data.read()
343
+ config = orjson.loads(content)
278
344
  else:
279
- if use_options:
280
- options_json = Path(OPTIONS_PATH)
281
- # Read options info
282
- if options_json.exists():
283
- with options_json.open('r') as data:
284
- options = json.load(data)
285
- else:
286
- app.logger.error("options.json does not exist")
287
- raise Exception("options.json does not exist in path: "+str(options_json))
345
+ app.logger.warning(
346
+ "Unable to obtain default config. only parameters passed from request will be saved to config.json"
347
+ )
348
+
349
+ # Retrieve sent config json
350
+ request_data = await request.get_json(force=True)
351
+
352
+ # check if data is empty
353
+ if len(request_data) == 0:
354
+ return await make_response(["failed to retrieve config json"], 400)
355
+
356
+ # Format config by converting to params (format params. check if params match legacy option.json format. If so format)
357
+ params = await build_params(emhass_conf, params_secrets, request_data, app.logger)
358
+ if type(params) is bool and not params:
359
+ return await make_response([error_msg_associations_file], 500)
360
+
361
+ # Covert formatted parameters from params back into config.json format.
362
+ # Overwrite existing default parameters in config
363
+ config.update(param_to_config(params, app.logger))
364
+
365
+ # Save config to config.json
366
+ if os.path.exists(emhass_conf["config_path"].parent):
367
+ async with aiofiles.open(str(emhass_conf["config_path"]), "w") as f:
368
+ await f.write(orjson.dumps(config, option=orjson.OPT_INDENT_2).decode())
369
+ else:
370
+ return await make_response(["Unable to save config file"], 500)
371
+
372
+ # Save params with updated config
373
+ if os.path.exists(emhass_conf["data_path"]):
374
+ async with aiofiles.open(str(emhass_conf["data_path"] / params_file), "wb") as fid:
375
+ content = pickle.dumps(
376
+ (
377
+ emhass_conf["config_path"],
378
+ await build_params(emhass_conf, params_secrets, config, app.logger),
379
+ )
380
+ )
381
+ await fid.write(content)
382
+ else:
383
+ return await make_response(["Unable to save params file, missing data_path"], 500)
384
+
385
+ app.logger.info("Saved parameters from webserver")
386
+ return await make_response({}, 201)
387
+
388
+
389
+ async def _load_params_and_runtime(request, emhass_conf, logger):
390
+ """
391
+ Loads configuration parameters from pickle and runtime parameters from the request.
392
+ Returns a tuple (params, costfun, runtimeparams) or raises an exception/returns None on failure.
393
+ """
394
+ action_str = " >> Obtaining params: "
395
+ logger.info(action_str)
396
+
397
+ # Load params.pkl
398
+ params = None
399
+ costfun = "profit"
400
+ params_path = emhass_conf["data_path"] / params_file
401
+
402
+ if params_path.exists():
403
+ async with aiofiles.open(str(params_path), "rb") as fid:
404
+ content = await fid.read()
405
+ emhass_conf["config_path"], params = pickle.loads(content)
406
+ # Set local costfun variable
407
+ if params.get("optim_conf") is not None:
408
+ costfun = params["optim_conf"].get("costfun", "profit")
409
+ params = orjson.dumps(params).decode()
410
+ else:
411
+ logger.error("Unable to find params.pkl file")
412
+ return None, None, None
413
+
414
+ # Load runtime params
415
+ try:
416
+ runtimeparams = await request.get_json(force=True)
417
+ if runtimeparams:
418
+ logger.info("Passed runtime parameters: " + str(runtimeparams))
288
419
  else:
289
- options = None
420
+ runtimeparams = {}
421
+ except Exception as e:
422
+ logger.error(f"Error parsing runtime params JSON: {e}")
423
+ logger.error("Check your payload for syntax errors (e.g., use 'false' instead of 'False')")
424
+ runtimeparams = {}
425
+
426
+ runtimeparams = orjson.dumps(runtimeparams).decode()
427
+
428
+ return params, costfun, runtimeparams
429
+
430
+
431
+ async def _handle_action_dispatch(
432
+ action_name, input_data_dict, emhass_conf, params, runtimeparams, logger
433
+ ):
434
+ """
435
+ Dispatches the specific logic based on the action_name.
436
+ Returns (response_msg, status_code).
437
+ """
438
+ # Actions that don't require input_data_dict or have specific flows
439
+ if action_name == "weather-forecast-cache":
440
+ action_str = " >> Performing weather forecast, try to caching result"
441
+ logger.info(action_str)
442
+ await weather_forecast_cache(emhass_conf, params, runtimeparams, logger)
443
+ return "EMHASS >> Weather Forecast has run and results possibly cached... \n", 201
444
+
445
+ if action_name == "export-influxdb-to-csv":
446
+ action_str = " >> Exporting InfluxDB data to CSV..."
447
+ logger.info(action_str)
448
+ success = await export_influxdb_to_csv(None, logger, emhass_conf, params, runtimeparams)
449
+ if success:
450
+ return "EMHASS >> Action export-influxdb-to-csv executed successfully... \n", 201
451
+ return await grab_log(action_str), 400
452
+
453
+ # Actions requiring input_data_dict
454
+ if action_name == "publish-data":
455
+ action_str = " >> Publishing data..."
456
+ logger.info(action_str)
457
+ _ = await publish_data(input_data_dict, logger)
458
+ return "EMHASS >> Action publish-data executed... \n", 201
459
+
460
+ # Mapping for optimization actions to their functions
461
+ optim_actions = {
462
+ "perfect-optim": perfect_forecast_optim,
463
+ "dayahead-optim": dayahead_forecast_optim,
464
+ "naive-mpc-optim": naive_mpc_optim,
465
+ }
466
+
467
+ if action_name in optim_actions:
468
+ action_str = f" >> Performing {action_name}..."
469
+ logger.info(action_str)
470
+ opt_res = await optim_actions[action_name](input_data_dict, logger)
471
+ injection_dict = get_injection_dict(opt_res)
472
+ await _save_injection_dict(injection_dict, emhass_conf["data_path"])
473
+ return f"EMHASS >> Action {action_name} executed... \n", 201
474
+
475
+ # Delegate Machine Learning actions to helper
476
+ ml_response = await _handle_ml_actions(action_name, input_data_dict, emhass_conf, logger)
477
+ if ml_response:
478
+ return ml_response
479
+
480
+ # Fallback for invalid action
481
+ logger.error("ERROR: passed action is not valid")
482
+ return "EMHASS >> ERROR: Passed action is not valid... \n", 400
483
+
484
+
485
+ async def _handle_ml_actions(action_name, input_data_dict, emhass_conf, logger):
486
+ """
487
+ Helper function to handle Machine Learning specific actions.
488
+ Returns (msg, status) if action is handled, otherwise None.
489
+ """
490
+ # forecast-model-fit
491
+ if action_name == "forecast-model-fit":
492
+ action_str = " >> Performing a machine learning forecast model fit..."
493
+ logger.info(action_str)
494
+ df_fit_pred, _, mlf = await forecast_model_fit(input_data_dict, logger)
495
+ injection_dict = get_injection_dict_forecast_model_fit(df_fit_pred, mlf)
496
+ await _save_injection_dict(injection_dict, emhass_conf["data_path"])
497
+ return "EMHASS >> Action forecast-model-fit executed... \n", 201
498
+
499
+ # forecast-model-predict
500
+ if action_name == "forecast-model-predict":
501
+ action_str = " >> Performing a machine learning forecast model predict..."
502
+ logger.info(action_str)
503
+ df_pred = await forecast_model_predict(input_data_dict, logger)
504
+ if df_pred is None:
505
+ return await grab_log(action_str), 400
506
+
507
+ table1 = df_pred.reset_index().to_html(classes="mystyle", index=False)
508
+ injection_dict = {
509
+ "title": "<h2>Custom machine learning forecast model predict</h2>",
510
+ "subsubtitle0": "<h4>Performed a prediction using a pre-trained model</h4>",
511
+ "table1": table1,
512
+ }
513
+ await _save_injection_dict(injection_dict, emhass_conf["data_path"])
514
+ return "EMHASS >> Action forecast-model-predict executed... \n", 201
515
+
516
+ # forecast-model-tune
517
+ if action_name == "forecast-model-tune":
518
+ action_str = " >> Performing a machine learning forecast model tune..."
519
+ logger.info(action_str)
520
+ df_pred_optim, mlf = await forecast_model_tune(input_data_dict, logger)
521
+ if df_pred_optim is None or mlf is None:
522
+ return await grab_log(action_str), 400
523
+
524
+ injection_dict = get_injection_dict_forecast_model_tune(df_pred_optim, mlf)
525
+ await _save_injection_dict(injection_dict, emhass_conf["data_path"])
526
+ return "EMHASS >> Action forecast-model-tune executed... \n", 201
527
+
528
+ # regressor-model-fit
529
+ if action_name == "regressor-model-fit":
530
+ action_str = " >> Performing a machine learning regressor fit..."
531
+ logger.info(action_str)
532
+ await regressor_model_fit(input_data_dict, logger)
533
+ return "EMHASS >> Action regressor-model-fit executed... \n", 201
534
+
535
+ # regressor-model-predict
536
+ if action_name == "regressor-model-predict":
537
+ action_str = " >> Performing a machine learning regressor predict..."
538
+ logger.info(action_str)
539
+ await regressor_model_predict(input_data_dict, logger)
540
+ return "EMHASS >> Action regressor-model-predict executed... \n", 201
541
+
542
+ return None
543
+
544
+
545
+ async def _save_injection_dict(injection_dict, data_path):
546
+ """Helper to save injection dict to pickle."""
547
+ async with aiofiles.open(str(data_path / injection_dict_file), "wb") as fid:
548
+ content = pickle.dumps(injection_dict)
549
+ await fid.write(content)
550
+
551
+
552
+ @app.route("/action/<action_name>", methods=["POST"])
553
+ async def action_call(action_name: str):
554
+ """
555
+ Receive Post action, run action according to passed slug(action_name)
556
+ """
557
+ global continual_publish_thread
558
+ global injection_dict
559
+
560
+ # Load Parameters
561
+ params, costfun, runtimeparams = await _load_params_and_runtime(
562
+ request, emhass_conf, app.logger
563
+ )
564
+ if params is None:
565
+ return await make_response(await grab_log(" >> Obtaining params: "), 400)
566
+
567
+ # Check for actions that do not need input_data_dict
568
+ if action_name in ["weather-forecast-cache", "export-influxdb-to-csv"]:
569
+ msg, status = await _handle_action_dispatch(
570
+ action_name, None, emhass_conf, params, runtimeparams, app.logger
571
+ )
572
+ if status == 400:
573
+ return await make_response(msg, status)
574
+
575
+ # Check logs for these specific actions
576
+ action_str = f" >> Performing {action_name}..."
577
+ if not await check_file_log(action_str):
578
+ return await make_response(msg, status)
579
+ return await make_response(await grab_log(action_str), 400)
580
+
581
+ # Set Input Data Dict (Common for all other actions)
582
+ action_str = " >> Setting input data dict"
583
+ app.logger.info(action_str)
584
+ input_data_dict = await set_input_data_dict(
585
+ emhass_conf, costfun, params, runtimeparams, action_name, app.logger
586
+ )
587
+
588
+ if not input_data_dict:
589
+ return await make_response(await grab_log(action_str), 400)
290
590
 
291
- #if data path specified by options.json
292
- if options is not None:
293
- if options.get('data_path', None) != None and options.get('data_path', None) != "default":
294
- DATA_PATH = options.get('data_path', None);
591
+ # Handle Continual Publish Threading
592
+ if len(continual_publish_thread) == 0 and input_data_dict["retrieve_hass_conf"].get(
593
+ "continual_publish", False
594
+ ):
595
+ continual_loop = threading.Thread(
596
+ name="continual_publish",
597
+ target=lambda: asyncio.run(continual_publish(input_data_dict, entity_path, app.logger)),
598
+ )
599
+ continual_loop.start()
600
+ continual_publish_thread.append(continual_loop)
295
601
 
296
- #save paths to dictionary
602
+ # Execute Action
603
+ msg, status = await _handle_action_dispatch(
604
+ action_name, input_data_dict, emhass_conf, params, runtimeparams, app.logger
605
+ )
606
+
607
+ # Final Log Check & Response
608
+ if status == 201:
609
+ if not await check_file_log(" >> "):
610
+ return await make_response(msg, 201)
611
+ return await make_response(await grab_log(" >> "), 400)
612
+
613
+ return await make_response(msg, status)
614
+
615
+
616
+ async def _setup_paths() -> tuple[Path, Path, Path, Path, Path, Path]:
617
+ """Helper to set up environment paths and update emhass_conf."""
618
+ # Find env's, not not set defaults
619
+ DATA_PATH = os.getenv("DATA_PATH", default="/data/")
620
+ ROOT_PATH = os.getenv("ROOT_PATH", default=str(Path(__file__).parent))
621
+ CONFIG_PATH = os.getenv("CONFIG_PATH", default="/share/config.json")
622
+ OPTIONS_PATH = os.getenv("OPTIONS_PATH", default="/data/options.json")
623
+ DEFAULTS_PATH = os.getenv("DEFAULTS_PATH", default=ROOT_PATH + "/data/config_defaults.json")
624
+ ASSOCIATIONS_PATH = os.getenv("ASSOCIATIONS_PATH", default=ROOT_PATH + "/data/associations.csv")
625
+ LEGACY_CONFIG_PATH = os.getenv("LEGACY_CONFIG_PATH", default="/app/config_emhass.yaml")
626
+ # Define the paths
297
627
  config_path = Path(CONFIG_PATH)
628
+ options_path = Path(OPTIONS_PATH)
629
+ defaults_path = Path(DEFAULTS_PATH)
630
+ associations_path = Path(ASSOCIATIONS_PATH)
631
+ legacy_config_path = Path(LEGACY_CONFIG_PATH)
298
632
  data_path = Path(DATA_PATH)
299
633
  root_path = Path(ROOT_PATH)
300
- emhass_conf = {}
301
- emhass_conf['config_path'] = config_path
302
- emhass_conf['data_path'] = data_path
303
- emhass_conf['root_path'] = root_path
304
-
305
- # Read the example default config file
306
- if config_path.exists():
307
- with open(config_path, 'r') as file:
308
- config = yaml.load(file, Loader=yaml.FullLoader)
309
- retrieve_hass_conf = config['retrieve_hass_conf']
310
- optim_conf = config['optim_conf']
311
- plant_conf = config['plant_conf']
312
- else:
313
- app.logger.error("Unable to open the default configuration yaml file")
314
- raise Exception("Failed to open config file, config_path: "+str(config_path))
634
+ # Add paths to emhass_conf
635
+ emhass_conf["config_path"] = config_path
636
+ emhass_conf["options_path"] = options_path
637
+ emhass_conf["defaults_path"] = defaults_path
638
+ emhass_conf["associations_path"] = associations_path
639
+ emhass_conf["legacy_config_path"] = legacy_config_path
640
+ emhass_conf["data_path"] = data_path
641
+ emhass_conf["root_path"] = root_path
642
+ return (
643
+ config_path,
644
+ options_path,
645
+ defaults_path,
646
+ associations_path,
647
+ legacy_config_path,
648
+ root_path,
649
+ )
650
+
315
651
 
316
- params = {}
317
- params['retrieve_hass_conf'] = retrieve_hass_conf
318
- params['optim_conf'] = optim_conf
319
- params['plant_conf'] = plant_conf
320
- web_ui_url = '0.0.0.0'
652
+ async def _build_configuration(
653
+ config_path: Path, legacy_config_path: Path, defaults_path: Path
654
+ ) -> tuple[dict, str, str]:
655
+ """Helper to build configuration and local variables."""
656
+ config = {}
657
+ # Combine parameters from configuration sources (if exists)
658
+ config.update(
659
+ await build_config(
660
+ emhass_conf,
661
+ app.logger,
662
+ str(defaults_path),
663
+ str(config_path) if config_path.exists() else None,
664
+ str(legacy_config_path) if legacy_config_path.exists() else None,
665
+ )
666
+ )
667
+ if type(config) is bool and not config:
668
+ raise Exception("Failed to find default config")
669
+ # Set local variables
670
+ costfun = os.getenv("LOCAL_COSTFUN", config.get("costfun", "profit"))
671
+ logging_level = os.getenv("LOGGING_LEVEL", config.get("logging_level", "INFO"))
672
+ # Temporary set logging level if debug
673
+ if logging_level == "DEBUG":
674
+ app.logger.setLevel(logging.DEBUG)
675
+ return config, costfun, logging_level
321
676
 
677
+
678
+ async def _setup_secrets(args: dict | None, options_path: Path) -> str:
679
+ """Helper to parse arguments and build secrets."""
680
+ ## Secrets
681
+ # Argument
682
+ argument = {}
683
+ no_response = False
684
+ if args is not None:
685
+ if args.get("url", None):
686
+ argument["url"] = args["url"]
687
+ if args.get("key", None):
688
+ argument["key"] = args["key"]
689
+ if args.get("no_response", None):
690
+ no_response = args["no_response"]
691
+ # Combine secrets from ENV, Arguments/ARG, Secrets file (secrets_emhass.yaml), options (options.json from addon configuration file) and/or Home Assistant Standalone API (if exist)
692
+ global emhass_conf
693
+ emhass_conf, secrets = await build_secrets(
694
+ emhass_conf,
695
+ app.logger,
696
+ secrets_path=os.getenv("SECRETS_PATH", default="/app/secrets_emhass.yaml"),
697
+ options_path=str(options_path),
698
+ argument=argument,
699
+ no_response=bool(no_response),
700
+ )
701
+ params_secrets.update(secrets)
702
+ return params_secrets.get("server_ip", "0.0.0.0")
703
+
704
+
705
+ def _validate_data_path(root_path: Path) -> None:
706
+ """Helper to validate and create the data path if necessary."""
707
+ # Check if data path exists
708
+ if not os.path.isdir(emhass_conf["data_path"]):
709
+ app.logger.warning("Unable to find data_path: " + str(emhass_conf["data_path"]))
710
+ if os.path.isdir(Path("/data/")):
711
+ emhass_conf["data_path"] = Path("/data/")
712
+ else:
713
+ Path(root_path / "data/").mkdir(parents=True, exist_ok=True)
714
+ emhass_conf["data_path"] = root_path / "data/"
715
+ app.logger.info("data_path has been set to " + str(emhass_conf["data_path"]))
716
+
717
+
718
+ async def _load_injection_dict() -> dict | None:
719
+ """Helper to load the injection dictionary."""
322
720
  # Initialize this global dict
323
- if (emhass_conf['data_path'] / 'injection_dict.pkl').exists():
324
- with open(str(emhass_conf['data_path'] / 'injection_dict.pkl'), "rb") as fid:
325
- injection_dict = pickle.load(fid)
721
+ if (emhass_conf["data_path"] / injection_dict_file).exists():
722
+ async with aiofiles.open(str(emhass_conf["data_path"] / injection_dict_file), "rb") as fid:
723
+ content = await fid.read()
724
+ return pickle.loads(content)
326
725
  else:
327
- injection_dict = None
328
-
329
- if args.addon==1:
330
- # The cost function
331
- costfun = options.get('costfun', 'profit')
332
- # Some data from options
333
- logging_level = options.get('logging_level','INFO')
334
- url_from_options = options.get('hass_url', 'empty')
335
- if url_from_options == 'empty' or url_from_options == '' or url_from_options == "http://supervisor/core/api":
336
- url = "http://supervisor/core/api/config"
337
- else:
338
- hass_url = url_from_options
339
- url = hass_url+"api/config"
340
- token_from_options = options.get('long_lived_token', 'empty')
341
- if token_from_options == 'empty' or token_from_options == '':
342
- long_lived_token = key
343
- else:
344
- long_lived_token = token_from_options
345
- headers = {
346
- "Authorization": "Bearer " + long_lived_token,
347
- "content-type": "application/json"
348
- }
349
- if not args.no_response==1:
350
- response = get(url, headers=headers)
351
- config_hass = response.json()
352
- params_secrets = {
353
- 'hass_url': hass_url,
354
- 'long_lived_token': long_lived_token,
355
- 'time_zone': config_hass['time_zone'],
356
- 'lat': config_hass['latitude'],
357
- 'lon': config_hass['longitude'],
358
- 'alt': config_hass['elevation']
359
- }
360
- else: #if no_response is set to true
361
- costfun = os.getenv('LOCAL_COSTFUN', default='profit')
362
- logging_level = os.getenv('LOGGING_LEVEL', default='INFO')
363
- # check if secrets file exists
364
- if Path(os.getenv('SECRETS_PATH', default='/app/secrets_emhass.yaml')).is_file():
365
- with open(os.getenv('SECRETS_PATH', default='/app/secrets_emhass.yaml'), 'r') as file:
366
- params_secrets = yaml.load(file, Loader=yaml.FullLoader)
367
- app.logger.debug("Obtained secrets from secrets file")
368
- #If cant find secrets_emhass file, use env
369
- else:
370
- app.logger.debug("Failed to find secrets file: "+str(os.getenv('SECRETS_PATH', default='/app/secrets_emhass.yaml')))
371
- app.logger.debug("Setting location defaults")
372
- params_secrets = {}
373
- #If no secrets file try args, else set some defaults
374
- params_secrets['time_zone'] = os.getenv("TIME_ZONE", default="Europe/Paris")
375
- params_secrets['lat'] = float(os.getenv("LAT", default="45.83"))
376
- params_secrets['lon'] = float(os.getenv("LON", default="6.86"))
377
- params_secrets['alt'] = float(os.getenv("ALT", default="4807.8"))
378
- #If ARG/ENV specify url and key, then override secrets file
379
- if hass_url != "":
380
- params_secrets['hass_url'] = hass_url
381
- app.logger.debug("Using URL obtained from ARG/ENV")
382
- else:
383
- hass_url = params_secrets.get('hass_url',"http://localhost:8123/")
384
- if long_lived_token != "":
385
- params_secrets['long_lived_token'] = long_lived_token
386
- app.logger.debug("Using Key obtained from ARG/ENV")
387
- else: #If addon is false
388
- costfun = os.getenv('LOCAL_COSTFUN', default='profit')
389
- logging_level = os.getenv('LOGGING_LEVEL', default='INFO')
390
- if Path(os.getenv('SECRETS_PATH', default='/app/secrets_emhass.yaml')).is_file():
391
- with open(os.getenv('SECRETS_PATH', default='/app/secrets_emhass.yaml'), 'r') as file:
392
- params_secrets = yaml.load(file, Loader=yaml.FullLoader)
393
- #Check if URL and KEY are provided by file. If not attempt using values from ARG/ENV
394
- if params_secrets.get("hass_url", "empty") == "empty" or params_secrets['hass_url'] == "":
395
- app.logger.info("No specified Home Assistant URL in secrets_emhass.yaml. Attempting to get from ARG/ENV")
396
- if hass_url != "":
397
- params_secrets['hass_url'] = hass_url
398
- else:
399
- app.logger.error("Can not find Home Assistant URL from secrets_emhass.yaml or ARG/ENV")
400
- raise Exception("Can not find Home Assistant URL from secrets_emhass.yaml or ARG/ENV")
401
- else:
402
- hass_url = params_secrets['hass_url']
403
- if params_secrets.get("long_lived_token", "empty") == "empty" or params_secrets['long_lived_token'] == "":
404
- app.logger.info("No specified Home Assistant KEY in secrets_emhass.yaml. Attempting to get from ARG/ENV")
405
- if key != "":
406
- params_secrets['long_lived_token'] = key
407
- else:
408
- app.logger.error("Can not find Home Assistant KEY from secrets_emhass.yaml or ARG/ENV")
409
- raise Exception("Can not find Home Assistant KEY from secrets_emhass.yaml or ARG/ENV")
410
- else: #If no secrets file try args, else set some defaults
411
- app.logger.info("Failed to find secrets_emhass.yaml in directory:" + os.getenv('SECRETS_PATH', default='/app/secrets_emhass.yaml') )
412
- app.logger.info("Attempting to use secrets from arguments or environment variables")
413
- params_secrets = {}
414
- params_secrets['time_zone'] = os.getenv("TIME_ZONE", default="Europe/Paris")
415
- params_secrets['lat'] = float(os.getenv("LAT", default="45.83"))
416
- params_secrets['lon'] = float(os.getenv("LON", default="6.86"))
417
- params_secrets['alt'] = float(os.getenv("ALT", default="4807.8"))
418
- if hass_url != "":
419
- params_secrets['hass_url'] = hass_url
420
- else: #If cant find secrets_emhass and passed url ENV/ARG, then send error
421
- app.logger.error("No specified Home Assistant URL")
422
- raise Exception("Can not find Home Assistant URL from secrets_emhass.yaml or ARG/ENV")
423
- if key != "":
424
- params_secrets['long_lived_token'] = key
425
- else: #If cant find secrets_emhass and passed key ENV/ARG, then send error
426
- app.logger.error("No specified Home Assistant KEY")
427
- raise Exception("Can not find Home Assistant KEY from secrets_emhass.yaml or ARG/ENV")
428
- # Build params
429
- if use_options:
430
- params = build_params(params, params_secrets, options, 1, app.logger)
726
+ return None
727
+
728
+
729
+ async def _build_and_save_params(
730
+ config: dict, costfun: str, logging_level: str, config_path: Path
731
+ ) -> dict:
732
+ """Helper to build parameters and save them to a pickle file."""
733
+ # Build params from config and param_secrets (migrate params to correct config catagories), save result to params.pkl
734
+ params = await build_params(emhass_conf, params_secrets, config, app.logger)
735
+ if type(params) is bool:
736
+ raise Exception("A error has occurred while building params")
737
+ # Update params with local variables
738
+ params["optim_conf"]["costfun"] = costfun
739
+ params["optim_conf"]["logging_level"] = logging_level
740
+ # Save params to file for later reference
741
+ if os.path.exists(str(emhass_conf["data_path"])):
742
+ async with aiofiles.open(str(emhass_conf["data_path"] / params_file), "wb") as fid:
743
+ content = pickle.dumps((config_path, params))
744
+ await fid.write(content)
431
745
  else:
432
- params = build_params(params, params_secrets, options, args.addon, app.logger)
433
- if os.path.exists(str(emhass_conf['data_path'])):
434
- with open(str(emhass_conf['data_path'] / 'params.pkl'), "wb") as fid:
435
- pickle.dump((config_path, params), fid)
436
- else:
437
- raise Exception("missing: " + str(emhass_conf['data_path']))
438
-
439
- # Define logger
440
- #stream logger
441
- ch = logging.StreamHandler()
442
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
443
- ch.setFormatter(formatter)
444
- #Action File logger
445
- fileLogger = logging.FileHandler(str(emhass_conf['data_path'] / 'actionLogs.txt'))
446
- formatter = logging.Formatter('%(levelname)s - %(name)s - %(message)s')
447
- fileLogger.setFormatter(formatter) # add format to Handler
746
+ raise Exception("missing: " + str(emhass_conf["data_path"]))
747
+ return params
748
+
749
+
750
+ async def _configure_logging(logging_level: str) -> None:
751
+ """Helper to configure logging handlers and levels."""
752
+ # Define loggers
753
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
754
+ log.default_handler.setFormatter(formatter)
755
+ # Action file logger
756
+ file_logger = logging.FileHandler(str(emhass_conf["data_path"] / action_log_str))
757
+ formatter = logging.Formatter("%(levelname)s - %(name)s - %(message)s")
758
+ file_logger.setFormatter(formatter) # add format to Handler
448
759
  if logging_level == "DEBUG":
449
760
  app.logger.setLevel(logging.DEBUG)
450
- ch.setLevel(logging.DEBUG)
451
- fileLogger.setLevel(logging.DEBUG)
761
+ file_logger.setLevel(logging.DEBUG)
452
762
  elif logging_level == "INFO":
453
763
  app.logger.setLevel(logging.INFO)
454
- ch.setLevel(logging.INFO)
455
- fileLogger.setLevel(logging.INFO)
764
+ file_logger.setLevel(logging.INFO)
456
765
  elif logging_level == "WARNING":
457
766
  app.logger.setLevel(logging.WARNING)
458
- ch.setLevel(logging.WARNING)
459
- fileLogger.setLevel(logging.WARNING)
767
+ file_logger.setLevel(logging.WARNING)
460
768
  elif logging_level == "ERROR":
461
769
  app.logger.setLevel(logging.ERROR)
462
- ch.setLevel(logging.ERROR)
463
- fileLogger.setLevel(logging.ERROR)
770
+ file_logger.setLevel(logging.ERROR)
464
771
  else:
465
772
  app.logger.setLevel(logging.DEBUG)
466
- ch.setLevel(logging.DEBUG)
467
- fileLogger.setLevel(logging.DEBUG)
773
+ file_logger.setLevel(logging.DEBUG)
468
774
  app.logger.propagate = False
469
- app.logger.addHandler(ch)
470
- app.logger.addHandler(fileLogger)
471
- clearFileLog() #Clear Action File logger file, ready for new instance
472
-
473
- # If entity_path exists, remove any entity/metadata files
474
- entity_path = emhass_conf['data_path'] / "entities"
475
- if os.path.exists(entity_path):
476
- entity_pathContents = os.listdir(entity_path)
477
- if len(entity_pathContents) > 0:
478
- for entity in entity_pathContents:
479
- os.remove(entity_path / entity)
480
-
775
+ app.logger.addHandler(file_logger)
776
+ # Clear Action File logger file, ready for new instance
777
+ await clear_file_log()
778
+
779
+
780
+ def _cleanup_entities() -> Path:
781
+ """Helper to remove entity/metadata files."""
782
+ # If entity_path exists, remove any entity/metadata files
783
+ ent_path = emhass_conf["data_path"] / "entities"
784
+ if os.path.exists(ent_path):
785
+ entity_path_contents = os.listdir(ent_path)
786
+ if len(entity_path_contents) > 0:
787
+ for entity in entity_path_contents:
788
+ os.remove(ent_path / entity)
789
+ return ent_path
790
+
791
+
792
+ async def _initialize_connections(params: dict) -> None:
793
+ """Helper to initialize WebSocket or InfluxDB connections."""
794
+ # Initialize persistent WebSocket connection only if use_websocket is enabled
795
+ use_websocket = params.get("retrieve_hass_conf", {}).get("use_websocket", False)
796
+ use_influxdb = params.get("retrieve_hass_conf", {}).get("use_influxdb", False)
797
+ # Initialize persistent WebSocket connection if enabled
798
+ if use_websocket:
799
+ app.logger.info("WebSocket mode enabled - initializing connection...")
800
+ try:
801
+ await get_websocket_client(
802
+ hass_url=params_secrets["hass_url"],
803
+ token=params_secrets["long_lived_token"],
804
+ logger=app.logger,
805
+ )
806
+ app.logger.info("WebSocket connection established")
807
+ # WebSocket shutdown is already handled by @app.after_serving
808
+ except Exception as ws_error:
809
+ app.logger.warning(f"WebSocket connection failed: {ws_error}")
810
+ app.logger.info("Continuing without WebSocket connection...")
811
+ # Re-raise the exception so before_serving can handle it
812
+ raise
813
+ # Log InfluxDB mode if enabled (No persistent connection init required here)
814
+ elif use_influxdb:
815
+ app.logger.info("InfluxDB mode enabled - using InfluxDB for data retrieval")
816
+ # Default to REST API if neither is enabled
817
+ else:
818
+ app.logger.info("WebSocket and InfluxDB modes disabled - using REST API for data retrieval")
819
+
820
+
821
+ async def initialize(args: dict | None = None):
822
+ global emhass_conf, params_secrets, continual_publish_thread, injection_dict, entity_path
823
+ # Setup paths
824
+ (
825
+ config_path,
826
+ options_path,
827
+ defaults_path,
828
+ _,
829
+ legacy_config_path,
830
+ root_path,
831
+ ) = await _setup_paths()
832
+ # Build configuration
833
+ config, costfun, logging_level = await _build_configuration(
834
+ config_path, legacy_config_path, defaults_path
835
+ )
836
+ # Setup Secrets
837
+ server_ip = await _setup_secrets(args, options_path)
838
+ # Validate Data Path
839
+ _validate_data_path(root_path)
840
+ # Load Injection Dict
841
+ injection_dict = await _load_injection_dict()
842
+ # Build and Save Params
843
+ params = await _build_and_save_params(config, costfun, logging_level, config_path)
844
+ # Configure Logging
845
+ await _configure_logging(logging_level)
846
+ # Cleanup Entities
847
+ entity_path = _cleanup_entities()
848
+ # Initialize Continual Publish Thread
481
849
  # Initialise continual publish thread list
482
850
  continual_publish_thread = []
483
-
484
- # Launch server
485
- port = int(os.environ.get('PORT', 5000))
486
- app.logger.info("Launching the emhass webserver at: http://"+web_ui_url+":"+str(port))
487
- app.logger.info("Home Assistant data fetch will be performed using url: "+hass_url)
488
- app.logger.info("The data path is: "+str(emhass_conf['data_path']))
851
+ # Log Startup Info
852
+ # Logging
853
+ port = int(os.environ.get("PORT", 5000))
854
+ app.logger.info("Launching the emhass webserver at: http://" + server_ip + ":" + str(port))
855
+ app.logger.info(
856
+ "Home Assistant data fetch will be performed using url: " + params_secrets["hass_url"]
857
+ )
858
+ app.logger.info("The data path is: " + str(emhass_conf["data_path"]))
859
+ app.logger.info("The logging is: " + str(logging_level))
489
860
  try:
490
- app.logger.info("Using core emhass version: "+version('emhass'))
861
+ app.logger.info("Using core emhass version: " + version("emhass"))
491
862
  except PackageNotFoundError:
492
863
  app.logger.info("Using development emhass version")
493
- serve(app, host=web_ui_url, port=port, threads=8)
864
+ # Initialize Connections (WebSocket/InfluxDB)
865
+ await _initialize_connections(params)
866
+ app.logger.info("Initialization complete")
867
+
868
+
869
+ async def main() -> None:
870
+ """
871
+ Main function to handle command line arguments.
872
+
873
+ Note: In production, the app should be run via gunicorn with uvicorn workers:
874
+ gunicorn emhass.web_server:app -c gunicorn.conf.py -k uvicorn.workers.UvicornWorker
875
+ """
876
+ parser = argparse.ArgumentParser()
877
+ parser.add_argument("--url", type=str, help="HA URL")
878
+ parser.add_argument("--key", type=str, help="HA long‑lived token")
879
+ parser.add_argument("--no_response", action="store_true")
880
+ args = parser.parse_args()
881
+ args_dict = {k: v for k, v in vars(args).items() if v is not None}
882
+ # Initialize the app before starting server
883
+ await initialize(args_dict)
884
+ # For direct execution (development/testing), use uvicorn programmatically
885
+ host = params_secrets.get("server_ip", "0.0.0.0")
886
+ port = int(os.getenv("PORT", 5000))
887
+ app.logger.info(f"Starting server directly on {host}:{port}")
888
+ # Use uvicorn.Server to run within existing event loop
889
+ config = uvicorn.Config(app, host=host, port=port, log_level="warning")
890
+ server = uvicorn.Server(config)
891
+ await server.serve()
892
+
893
+
894
+ if __name__ == "__main__":
895
+ asyncio.run(main())