emhass 0.9.1__tar.gz → 0.10.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {emhass-0.9.1 → emhass-0.10.1}/CHANGELOG.md +18 -0
- {emhass-0.9.1 → emhass-0.10.1}/PKG-INFO +111 -8
- {emhass-0.9.1 → emhass-0.10.1}/README.md +110 -7
- emhass-0.10.1/data/opt_res_latest.csv +49 -0
- {emhass-0.9.1 → emhass-0.10.1}/setup.py +1 -1
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/command_line.py +209 -7
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/forecast.py +13 -7
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/machine_learning_forecaster.py +20 -20
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/optimization.py +261 -82
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/retrieve_hass.py +55 -7
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/utils.py +68 -102
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/web_server.py +32 -7
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass.egg-info/PKG-INFO +111 -8
- {emhass-0.9.1 → emhass-0.10.1}/tests/test_command_line_utils.py +2 -1
- {emhass-0.9.1 → emhass-0.10.1}/tests/test_forecast.py +1 -1
- {emhass-0.9.1 → emhass-0.10.1}/tests/test_machine_learning_forecaster.py +1 -1
- {emhass-0.9.1 → emhass-0.10.1}/tests/test_machine_learning_regressor.py +2 -1
- {emhass-0.9.1 → emhass-0.10.1}/tests/test_optimization.py +126 -2
- {emhass-0.9.1 → emhass-0.10.1}/tests/test_retrieve_hass.py +1 -1
- {emhass-0.9.1 → emhass-0.10.1}/tests/test_utils.py +3 -3
- emhass-0.9.1/data/opt_res_latest.csv +0 -11
- {emhass-0.9.1 → emhass-0.10.1}/CODE_OF_CONDUCT.md +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/CONTRIBUTING.md +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/LICENSE +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/MANIFEST.in +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/data_load_cost_forecast.csv +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/data_load_forecast.csv +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/data_prod_price_forecast.csv +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/data_train_load_clustering.pkl +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/data_train_load_forecast.pkl +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/data_weather_forecast.csv +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/heating_prediction.csv +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/opt_res_perfect_optim_cost.csv +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/opt_res_perfect_optim_profit.csv +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/opt_res_perfect_optim_self-consumption.csv +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/test_df_final.pkl +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/test_response_get_data_get_method.pbz2 +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/test_response_scrapper_get_method.pbz2 +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/test_response_solarforecast_get_method.pbz2 +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/data/test_response_solcast_get_method.pbz2 +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/pyproject.toml +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/setup.cfg +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/__init__.py +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/data/cec_inverters.pbz2 +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/data/cec_modules.pbz2 +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/machine_learning_regressor.py +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/static/advanced.html +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/static/basic.html +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/static/img/emhass_icon.png +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/static/img/emhass_logo_short.svg +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/static/img/feather-sprite.svg +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/static/script.js +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/static/style.css +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/templates/index.html +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass/templates/template.html +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass.egg-info/SOURCES.txt +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass.egg-info/dependency_links.txt +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass.egg-info/entry_points.txt +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass.egg-info/requires.txt +0 -0
- {emhass-0.9.1 → emhass-0.10.1}/src/emhass.egg-info/top_level.txt +0 -0
@@ -1,5 +1,23 @@
|
|
1
1
|
# Changelog
|
2
2
|
|
3
|
+
## 0.10.1 - 2024-06-03
|
4
|
+
### Fix
|
5
|
+
- Fixed PV curtailment maximum possible value constraint
|
6
|
+
- Added PV curtailement to variable to publish to HA
|
7
|
+
|
8
|
+
## 0.10.0 - 2024-06-02
|
9
|
+
### BREAKING CHANGE
|
10
|
+
- In this new version we have added support for PV curtailment computation. While doing this the nominal PV peak power is needed. The easiest way find this information is by directly using the `inverter_model` defined in the configuration. As this is needed in the optimization to correctly compute PV curtailment, this parameter need to be properly defined for your installation. Before this chage this parameter was only needed if using the PV forecast method `scrapper`, but now it is not optional as it is directly used in the optimization.
|
11
|
+
Use the dedicated webapp to find the correct model for your inverter, if you cannot find your exact brand/model then just pick an inverter with the same nominal power as yours: [https://emhass-pvlib-database.streamlit.app/](https://emhass-pvlib-database.streamlit.app/)
|
12
|
+
### Improvement
|
13
|
+
- Added support for hybrid inverters and PV curtailment computation
|
14
|
+
- Implemented a new `continual_publish` service that avoid the need of setting a special automation for data publish. Thanks to @GeoDerp
|
15
|
+
- Implement a deferrable load start penalty functionality. Thanks to @werdnum
|
16
|
+
- This feature also implement a `def_current_state` that can be passed at runtime to let the optimization consider that a deferrable load is currently scheduled or under operation when launching the optimization task
|
17
|
+
### Fix
|
18
|
+
- Fixed forecast methods to treat delta_forecast higher than 1 day
|
19
|
+
- Fixed solar.forecast wrong interpolation of nan values
|
20
|
+
|
3
21
|
## 0.9.1 - 2024-05-13
|
4
22
|
### Fix
|
5
23
|
- Fix patch for issue with paths to modules and inverters database
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: emhass
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.10.1
|
4
4
|
Summary: An Energy Management System for Home Assistant
|
5
5
|
Home-page: https://github.com/davidusb-geek/emhass
|
6
6
|
Author: David HERNANDEZ
|
@@ -285,6 +285,8 @@ sudo chmod +x /home/user/emhass/scripts/publish_data.sh
|
|
285
285
|
```
|
286
286
|
### Common for any installation method
|
287
287
|
|
288
|
+
#### Options 1, Home Assistant automate publish
|
289
|
+
|
288
290
|
In `automations.yaml`:
|
289
291
|
```yaml
|
290
292
|
- alias: EMHASS day-ahead optimization
|
@@ -300,9 +302,36 @@ In `automations.yaml`:
|
|
300
302
|
action:
|
301
303
|
- service: shell_command.publish_data
|
302
304
|
```
|
303
|
-
In these
|
305
|
+
In these automation's the day-ahead optimization is performed once a day, everyday at 5:30am, and the data *(output of automation)* is published every 5 minutes.
|
306
|
+
|
307
|
+
#### Option 2, EMHASS automate publish
|
304
308
|
|
305
|
-
|
309
|
+
In `automations.yaml`:
|
310
|
+
```yaml
|
311
|
+
- alias: EMHASS day-ahead optimization
|
312
|
+
trigger:
|
313
|
+
platform: time
|
314
|
+
at: '05:30:00'
|
315
|
+
action:
|
316
|
+
- service: shell_command.dayahead_optim
|
317
|
+
- service: shell_command.publish_data
|
318
|
+
```
|
319
|
+
in configuration page/`config_emhass.yaml`
|
320
|
+
```json
|
321
|
+
"method_ts_round": "first"
|
322
|
+
"continual_publish": true
|
323
|
+
```
|
324
|
+
In this automation the day-ahead optimization is performed once a day, everyday at 5:30am.
|
325
|
+
If the `freq` parameter is set to `30` *(default)* in the configuration, the results of the day-ahead optimization will generate 48 values *(for each entity)*, a value for each 30 minutes in a day *(i.e. 24 hrs x 2)*.
|
326
|
+
|
327
|
+
Setting the parameter `continual_publish` to `true` in the configuration page, will allow EMHASS to store the optimization results as entities/sensors into seperate json files. `continual_publish` will periodically (every `freq` amount of minutes) run a publish, and publish the optimization results of each generated entities/sensors to Home Assistant. The current state of the sensor/entity being updated every time publish runs, selecting one of the 48 stored values, by comparing the stored values timestamps, the current timestamp and [`"method_ts_round": "first"`](#the-publish-data-specificities) to select the optimal stored value for the current state.
|
328
|
+
|
329
|
+
option 1 and 2 are very similar, however option 2 (`continual_publish`) will require a cpu thread to constantly be run inside of EMHASS, lowering efficiency. The reason why you may pick one over the other is explained in more detail bellow in [continual_publish](#continual_publish-emhass-automation).
|
330
|
+
|
331
|
+
Lastly, we can link a EMHASS published entities/sensor's current state to a Home Assistant entity on/off switch, controlling a desired controllable load.
|
332
|
+
For example, imagine that I want to control my water heater. I can use a published `deferrable` EMHASS entity to control my water heaters desired behavior. In this case, we could use an automation like below, to control the desired water heater on and off:
|
333
|
+
|
334
|
+
on:
|
306
335
|
```yaml
|
307
336
|
automation:
|
308
337
|
- alias: Water Heater Optimized ON
|
@@ -317,7 +346,7 @@ automation:
|
|
317
346
|
- service: homeassistant.turn_on
|
318
347
|
entity_id: switch.water_heater_switch
|
319
348
|
```
|
320
|
-
|
349
|
+
off:
|
321
350
|
```yaml
|
322
351
|
automation:
|
323
352
|
- alias: Water Heater Optimized OFF
|
@@ -332,14 +361,15 @@ automation:
|
|
332
361
|
- service: homeassistant.turn_off
|
333
362
|
entity_id: switch.water_heater_switch
|
334
363
|
```
|
364
|
+
The result of these automation's will turn on and off the Home Assistant entity `switch.water_heater_switch` using the current state from the EMHASS entity `sensor.p_deferrable0`. `sensor.p_deferrable0` being the entity generated from the EMHASS day-ahead optimization and published by examples above. The `sensor.p_deferrable0` entity current state being updated every 30 minutes (or `freq` minutes) via a automated publish option 1 or 2. *(selecting one of the 48 stored data values)*
|
335
365
|
|
336
366
|
## The publish-data specificities
|
337
367
|
|
338
|
-
|
368
|
+
`publish-data` (which is either run manually, or automatically via `continual_publish` or Home Assistant automation), will push the optimization results to Home Assistant for each deferrable load defined in the configuration. For example if you have defined two deferrable loads, then the command will publish `sensor.p_deferrable0` and `sensor.p_deferrable1` to Home Assistant. When the `dayahead-optim` is launched, after the optimization, either entity json files or a csv file will be saved on disk. The `publish-data` command will load the latest csv/json files to look for the closest timestamp that match the current time using the `datetime.now()` method in Python. This means that if EMHASS is configured for 30min time step optimizations, the csv/json will be saved with timestamps 00:00, 00:30, 01:00, 01:30, ... and so on. If the current time is 00:05, and parameter `method_ts_round` is set to `nearest` in the configuration, then the closest timestamp of the optimization results that will be published is 00:00. If the current time is 00:25, then the closest timestamp of the optimization results that will be published is 00:30.
|
339
369
|
|
340
370
|
The `publish-data` command will also publish PV and load forecast data on sensors `p_pv_forecast` and `p_load_forecast`. If using a battery, then the battery optimized power and the SOC will be published on sensors `p_batt_forecast` and `soc_batt_forecast`. On these sensors the future values are passed as nested attributes.
|
341
371
|
|
342
|
-
|
372
|
+
If you run publish manually *(or via a Home Assistant Automation)*, it is possible to provide custom sensor names for all the data exported by the `publish-data` command. For this, when using the `publish-data` endpoint we can just add some runtime parameters as dictionaries like this:
|
343
373
|
```yaml
|
344
374
|
shell_command:
|
345
375
|
publish_data: "curl -i -H \"Content-Type:application/json\" -X POST -d '{\"custom_load_forecast_id\": {\"entity_id\": \"sensor.p_load_forecast\", \"unit_of_measurement\": \"W\", \"friendly_name\": \"Load Power Forecast\"}}' http://localhost:5000/action/publish-data"
|
@@ -387,12 +417,85 @@ In EMHASS we have basically 4 forecasts to deal with:
|
|
387
417
|
|
388
418
|
- PV production selling price forecast: at what price are you selling your excess PV production on the next 24h. This is given in EUR/kWh.
|
389
419
|
|
390
|
-
The sensor containing the load data should be specified in parameter `var_load` in the configuration file. As we want to optimize the household energies, when need to forecast the load power
|
420
|
+
The sensor containing the load data should be specified in parameter `var_load` in the configuration file. As we want to optimize the household energies, when need to forecast the load power consumption. The default method for this is a naive approach using 1-day persistence. The load data variable should not contain the data from the deferrable loads themselves. For example, lets say that you set your deferrable load to be the washing machine. The variable that you should enter in EMHASS will be: `var_load: 'sensor.power_load_no_var_loads'` and `sensor.power_load_no_var_loads = sensor.power_load - sensor.power_washing_machine`. This is supposing that the overall load of your house is contained in variable: `sensor.power_load`. The sensor `sensor.power_load_no_var_loads` can be easily created with a new template sensor in Home Assistant.
|
391
421
|
|
392
422
|
If you are implementing a MPC controller, then you should also need to provide some data at the optimization runtime using the key `runtimeparams`.
|
393
423
|
|
394
424
|
The valid values to pass for both forecast data and MPC related data are explained below.
|
395
425
|
|
426
|
+
### Alternative publish methods
|
427
|
+
Due to the flexibility of EMHASS, multiple different approaches to publishing the optimization results have been created. Select a option that best meets your use case:
|
428
|
+
|
429
|
+
#### publish last optimization *(manual)*
|
430
|
+
By default, running an optimization in EMHASS will output the results into the csv file: `data_path/opt_res_latest.csv` *(overriding the existing data on that file)*. We run the publish command to publish the last optimization saved in the `opt_res_latest.csv`:
|
431
|
+
```bash
|
432
|
+
# RUN dayahead
|
433
|
+
curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/dayahead-optim
|
434
|
+
# Then publish teh results of dayahead
|
435
|
+
curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/publish-data
|
436
|
+
```
|
437
|
+
*Note, the published entities from the publish-data action will not automatically update the entities current state (current state being used to check when to turn on and off appliances via Home Assistant automatons). To update the EMHASS entities state, another publish would have to be re-run later when the current time matches the next values timestamp (E.g every 30 minutes). See examples bellow for methods to automate the publish-action.*
|
438
|
+
|
439
|
+
#### continual_publish *(EMHASS Automation)*
|
440
|
+
As discussed in [Common for any installation method - option 2](#option-2-emhass-automate-publish), setting `continual_publish` to `true` in the configuration saves the output of the optimization into the `data_path/entities` folder *(a .json file for each sensor/entity)*. A constant loop (in `freq` minutes) will run, observe the .json files in that folder, and publish the saved files periodically (updating the current state of the entity by comparing date.now with the saved data value timestamps).
|
441
|
+
|
442
|
+
For users that wish to run multiple different optimizations, you can set the runtime parameter: `publish_prefix` to something like: `"mpc_"` or `"dh_"`. This will generate unique entity_id names per optimization and save these unique entities as separate files in the folder. All the entity files will then be updated when the next loop iteration runs. If a different `freq` integer was passed as a runtime parameter in an optimization, the `continual_publish` loop will be based on the lowest `freq` saved. An example:
|
443
|
+
|
444
|
+
```bash
|
445
|
+
# RUN dayahead, with freq=30 (default), prefix=dh_
|
446
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"publish_prefix":"dh_"}' http://localhost:5000/action/dayahead-optim
|
447
|
+
# RUN MPC, with freq=5, prefix=mpc_
|
448
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"freq":5,"publish_prefix":"mpc_"}' http://localhost:5000/action/naive-mpc-optim
|
449
|
+
```
|
450
|
+
This will tell continual_publish to loop every 5 minutes based on the freq passed in MPC. All entities from the output of dayahead "dh_" and MPC "mpc_" will be published every 5 minutes.
|
451
|
+
|
452
|
+
</br>
|
453
|
+
|
454
|
+
*It is recommended to use the 2 other options bellow once you have a more advance understanding of EMHASS and/or Home Assistant.*
|
455
|
+
|
456
|
+
#### Mixture of continual_publish and manual *(Home Assistant Automation for Publish)*
|
457
|
+
|
458
|
+
You can choose to save one optimization for continual_publish and bypass another optimization by setting `"continual_publish":false` runtime parameter:
|
459
|
+
```bash
|
460
|
+
# RUN dayahead, with freq=30 (default), prefix=dh_, included into continual_publish
|
461
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"publish_prefix":"dh_"}' http://localhost:5000/action/dayahead-optim
|
462
|
+
|
463
|
+
# RUN MPC, with freq=5, prefix=mpc_, Manually publish, excluded from continual_publish loop
|
464
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"continual_publish":false,"freq":5,"publish_prefix":"mpc_"}' http://localhost:5000/action/naive-mpc-optim
|
465
|
+
# Publish MPC output
|
466
|
+
curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/publish-data
|
467
|
+
```
|
468
|
+
This example saves the dayahead optimization into `data_path/entities` as .json files, being included in the `continutal_publish` loop (publishing every 30 minutes). The MPC optimization will not be saved in `data_path/entities`, and therefore only into `data_path/opt_res_latest.csv`. Requiring a publish-data action to be run manually (or via a Home Assistant) Automation for the MPC results.
|
469
|
+
|
470
|
+
#### Manual *(Home Assistant Automation for Publish)*
|
471
|
+
|
472
|
+
For users who wish to have full control of exactly when they will like to run a publish and have the ability to save multiple different optimizations. The `entity_save` runtime parameter has been created to save the optimization output entities to .json files whilst `continual_publish` is set to `false` in the configuration. Allowing the user to reference the saved .json files manually via a publish:
|
473
|
+
|
474
|
+
in configuration page/`config_emhass.yaml` :
|
475
|
+
```json
|
476
|
+
"continual_publish": false
|
477
|
+
```
|
478
|
+
POST action :
|
479
|
+
```bash
|
480
|
+
# RUN dayahead, with freq=30 (default), prefix=dh_, save entity
|
481
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"entity_save": true, "publish_prefix":"dh_"}' http://localhost:5000/action/dayahead-optim
|
482
|
+
# RUN MPC, with freq=5, prefix=mpc_, save entity
|
483
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"entity_save": true", "freq":5,"publish_prefix":"mpc_"}' http://localhost:5000/action/naive-mpc-optim
|
484
|
+
```
|
485
|
+
You can then reference these .json saved entities via their `publish_prefix`. Include the same `publish_prefix` in the `publish_data` action:
|
486
|
+
```bash
|
487
|
+
#Publish the MPC optimization ran above
|
488
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"publish_prefix":"mpc_"}' http://localhost:5000/action/publish-data
|
489
|
+
```
|
490
|
+
This will publish all entities from the MPC (_mpc) optimization above.
|
491
|
+
</br>
|
492
|
+
Alternatively, you can choose to publish all the saved files .json files with `publish_prefix` = all:
|
493
|
+
```bash
|
494
|
+
#Publish all saved entities
|
495
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"publish_prefix":"all"}' http://localhost:5000/action/publish-data
|
496
|
+
```
|
497
|
+
This action will publish the dayahead (_dh) and MPC (_mpc) optimization results from the optimizations above.
|
498
|
+
|
396
499
|
### Forecast data
|
397
500
|
|
398
501
|
It is possible to provide EMHASS with your own forecast data. For this just add the data as list of values to a data dictionary during the call to `emhass` using the `runtimeparams` option.
|
@@ -491,7 +594,7 @@ Check the dedicated section in the documentation here: [https://emhass.readthedo
|
|
491
594
|
|
492
595
|
## Development
|
493
596
|
|
494
|
-
Pull request are very much accepted on this project. For development you can find some instructions here [Development](https://emhass.readthedocs.io/en/latest/develop.html)
|
597
|
+
Pull request are very much accepted on this project. For development you can find some instructions here [Development](https://emhass.readthedocs.io/en/latest/develop.html).
|
495
598
|
|
496
599
|
## Troubleshooting
|
497
600
|
|
@@ -250,6 +250,8 @@ sudo chmod +x /home/user/emhass/scripts/publish_data.sh
|
|
250
250
|
```
|
251
251
|
### Common for any installation method
|
252
252
|
|
253
|
+
#### Options 1, Home Assistant automate publish
|
254
|
+
|
253
255
|
In `automations.yaml`:
|
254
256
|
```yaml
|
255
257
|
- alias: EMHASS day-ahead optimization
|
@@ -265,9 +267,36 @@ In `automations.yaml`:
|
|
265
267
|
action:
|
266
268
|
- service: shell_command.publish_data
|
267
269
|
```
|
268
|
-
In these
|
270
|
+
In these automation's the day-ahead optimization is performed once a day, everyday at 5:30am, and the data *(output of automation)* is published every 5 minutes.
|
271
|
+
|
272
|
+
#### Option 2, EMHASS automate publish
|
269
273
|
|
270
|
-
|
274
|
+
In `automations.yaml`:
|
275
|
+
```yaml
|
276
|
+
- alias: EMHASS day-ahead optimization
|
277
|
+
trigger:
|
278
|
+
platform: time
|
279
|
+
at: '05:30:00'
|
280
|
+
action:
|
281
|
+
- service: shell_command.dayahead_optim
|
282
|
+
- service: shell_command.publish_data
|
283
|
+
```
|
284
|
+
in configuration page/`config_emhass.yaml`
|
285
|
+
```json
|
286
|
+
"method_ts_round": "first"
|
287
|
+
"continual_publish": true
|
288
|
+
```
|
289
|
+
In this automation the day-ahead optimization is performed once a day, everyday at 5:30am.
|
290
|
+
If the `freq` parameter is set to `30` *(default)* in the configuration, the results of the day-ahead optimization will generate 48 values *(for each entity)*, a value for each 30 minutes in a day *(i.e. 24 hrs x 2)*.
|
291
|
+
|
292
|
+
Setting the parameter `continual_publish` to `true` in the configuration page, will allow EMHASS to store the optimization results as entities/sensors into seperate json files. `continual_publish` will periodically (every `freq` amount of minutes) run a publish, and publish the optimization results of each generated entities/sensors to Home Assistant. The current state of the sensor/entity being updated every time publish runs, selecting one of the 48 stored values, by comparing the stored values timestamps, the current timestamp and [`"method_ts_round": "first"`](#the-publish-data-specificities) to select the optimal stored value for the current state.
|
293
|
+
|
294
|
+
option 1 and 2 are very similar, however option 2 (`continual_publish`) will require a cpu thread to constantly be run inside of EMHASS, lowering efficiency. The reason why you may pick one over the other is explained in more detail bellow in [continual_publish](#continual_publish-emhass-automation).
|
295
|
+
|
296
|
+
Lastly, we can link a EMHASS published entities/sensor's current state to a Home Assistant entity on/off switch, controlling a desired controllable load.
|
297
|
+
For example, imagine that I want to control my water heater. I can use a published `deferrable` EMHASS entity to control my water heaters desired behavior. In this case, we could use an automation like below, to control the desired water heater on and off:
|
298
|
+
|
299
|
+
on:
|
271
300
|
```yaml
|
272
301
|
automation:
|
273
302
|
- alias: Water Heater Optimized ON
|
@@ -282,7 +311,7 @@ automation:
|
|
282
311
|
- service: homeassistant.turn_on
|
283
312
|
entity_id: switch.water_heater_switch
|
284
313
|
```
|
285
|
-
|
314
|
+
off:
|
286
315
|
```yaml
|
287
316
|
automation:
|
288
317
|
- alias: Water Heater Optimized OFF
|
@@ -297,14 +326,15 @@ automation:
|
|
297
326
|
- service: homeassistant.turn_off
|
298
327
|
entity_id: switch.water_heater_switch
|
299
328
|
```
|
329
|
+
The result of these automation's will turn on and off the Home Assistant entity `switch.water_heater_switch` using the current state from the EMHASS entity `sensor.p_deferrable0`. `sensor.p_deferrable0` being the entity generated from the EMHASS day-ahead optimization and published by examples above. The `sensor.p_deferrable0` entity current state being updated every 30 minutes (or `freq` minutes) via a automated publish option 1 or 2. *(selecting one of the 48 stored data values)*
|
300
330
|
|
301
331
|
## The publish-data specificities
|
302
332
|
|
303
|
-
|
333
|
+
`publish-data` (which is either run manually, or automatically via `continual_publish` or Home Assistant automation), will push the optimization results to Home Assistant for each deferrable load defined in the configuration. For example if you have defined two deferrable loads, then the command will publish `sensor.p_deferrable0` and `sensor.p_deferrable1` to Home Assistant. When the `dayahead-optim` is launched, after the optimization, either entity json files or a csv file will be saved on disk. The `publish-data` command will load the latest csv/json files to look for the closest timestamp that match the current time using the `datetime.now()` method in Python. This means that if EMHASS is configured for 30min time step optimizations, the csv/json will be saved with timestamps 00:00, 00:30, 01:00, 01:30, ... and so on. If the current time is 00:05, and parameter `method_ts_round` is set to `nearest` in the configuration, then the closest timestamp of the optimization results that will be published is 00:00. If the current time is 00:25, then the closest timestamp of the optimization results that will be published is 00:30.
|
304
334
|
|
305
335
|
The `publish-data` command will also publish PV and load forecast data on sensors `p_pv_forecast` and `p_load_forecast`. If using a battery, then the battery optimized power and the SOC will be published on sensors `p_batt_forecast` and `soc_batt_forecast`. On these sensors the future values are passed as nested attributes.
|
306
336
|
|
307
|
-
|
337
|
+
If you run publish manually *(or via a Home Assistant Automation)*, it is possible to provide custom sensor names for all the data exported by the `publish-data` command. For this, when using the `publish-data` endpoint we can just add some runtime parameters as dictionaries like this:
|
308
338
|
```yaml
|
309
339
|
shell_command:
|
310
340
|
publish_data: "curl -i -H \"Content-Type:application/json\" -X POST -d '{\"custom_load_forecast_id\": {\"entity_id\": \"sensor.p_load_forecast\", \"unit_of_measurement\": \"W\", \"friendly_name\": \"Load Power Forecast\"}}' http://localhost:5000/action/publish-data"
|
@@ -352,12 +382,85 @@ In EMHASS we have basically 4 forecasts to deal with:
|
|
352
382
|
|
353
383
|
- PV production selling price forecast: at what price are you selling your excess PV production on the next 24h. This is given in EUR/kWh.
|
354
384
|
|
355
|
-
The sensor containing the load data should be specified in parameter `var_load` in the configuration file. As we want to optimize the household energies, when need to forecast the load power
|
385
|
+
The sensor containing the load data should be specified in parameter `var_load` in the configuration file. As we want to optimize the household energies, when need to forecast the load power consumption. The default method for this is a naive approach using 1-day persistence. The load data variable should not contain the data from the deferrable loads themselves. For example, lets say that you set your deferrable load to be the washing machine. The variable that you should enter in EMHASS will be: `var_load: 'sensor.power_load_no_var_loads'` and `sensor.power_load_no_var_loads = sensor.power_load - sensor.power_washing_machine`. This is supposing that the overall load of your house is contained in variable: `sensor.power_load`. The sensor `sensor.power_load_no_var_loads` can be easily created with a new template sensor in Home Assistant.
|
356
386
|
|
357
387
|
If you are implementing a MPC controller, then you should also need to provide some data at the optimization runtime using the key `runtimeparams`.
|
358
388
|
|
359
389
|
The valid values to pass for both forecast data and MPC related data are explained below.
|
360
390
|
|
391
|
+
### Alternative publish methods
|
392
|
+
Due to the flexibility of EMHASS, multiple different approaches to publishing the optimization results have been created. Select a option that best meets your use case:
|
393
|
+
|
394
|
+
#### publish last optimization *(manual)*
|
395
|
+
By default, running an optimization in EMHASS will output the results into the csv file: `data_path/opt_res_latest.csv` *(overriding the existing data on that file)*. We run the publish command to publish the last optimization saved in the `opt_res_latest.csv`:
|
396
|
+
```bash
|
397
|
+
# RUN dayahead
|
398
|
+
curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/dayahead-optim
|
399
|
+
# Then publish teh results of dayahead
|
400
|
+
curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/publish-data
|
401
|
+
```
|
402
|
+
*Note, the published entities from the publish-data action will not automatically update the entities current state (current state being used to check when to turn on and off appliances via Home Assistant automatons). To update the EMHASS entities state, another publish would have to be re-run later when the current time matches the next values timestamp (E.g every 30 minutes). See examples bellow for methods to automate the publish-action.*
|
403
|
+
|
404
|
+
#### continual_publish *(EMHASS Automation)*
|
405
|
+
As discussed in [Common for any installation method - option 2](#option-2-emhass-automate-publish), setting `continual_publish` to `true` in the configuration saves the output of the optimization into the `data_path/entities` folder *(a .json file for each sensor/entity)*. A constant loop (in `freq` minutes) will run, observe the .json files in that folder, and publish the saved files periodically (updating the current state of the entity by comparing date.now with the saved data value timestamps).
|
406
|
+
|
407
|
+
For users that wish to run multiple different optimizations, you can set the runtime parameter: `publish_prefix` to something like: `"mpc_"` or `"dh_"`. This will generate unique entity_id names per optimization and save these unique entities as separate files in the folder. All the entity files will then be updated when the next loop iteration runs. If a different `freq` integer was passed as a runtime parameter in an optimization, the `continual_publish` loop will be based on the lowest `freq` saved. An example:
|
408
|
+
|
409
|
+
```bash
|
410
|
+
# RUN dayahead, with freq=30 (default), prefix=dh_
|
411
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"publish_prefix":"dh_"}' http://localhost:5000/action/dayahead-optim
|
412
|
+
# RUN MPC, with freq=5, prefix=mpc_
|
413
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"freq":5,"publish_prefix":"mpc_"}' http://localhost:5000/action/naive-mpc-optim
|
414
|
+
```
|
415
|
+
This will tell continual_publish to loop every 5 minutes based on the freq passed in MPC. All entities from the output of dayahead "dh_" and MPC "mpc_" will be published every 5 minutes.
|
416
|
+
|
417
|
+
</br>
|
418
|
+
|
419
|
+
*It is recommended to use the 2 other options bellow once you have a more advance understanding of EMHASS and/or Home Assistant.*
|
420
|
+
|
421
|
+
#### Mixture of continual_publish and manual *(Home Assistant Automation for Publish)*
|
422
|
+
|
423
|
+
You can choose to save one optimization for continual_publish and bypass another optimization by setting `"continual_publish":false` runtime parameter:
|
424
|
+
```bash
|
425
|
+
# RUN dayahead, with freq=30 (default), prefix=dh_, included into continual_publish
|
426
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"publish_prefix":"dh_"}' http://localhost:5000/action/dayahead-optim
|
427
|
+
|
428
|
+
# RUN MPC, with freq=5, prefix=mpc_, Manually publish, excluded from continual_publish loop
|
429
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"continual_publish":false,"freq":5,"publish_prefix":"mpc_"}' http://localhost:5000/action/naive-mpc-optim
|
430
|
+
# Publish MPC output
|
431
|
+
curl -i -H 'Content-Type:application/json' -X POST -d {} http://localhost:5000/action/publish-data
|
432
|
+
```
|
433
|
+
This example saves the dayahead optimization into `data_path/entities` as .json files, being included in the `continutal_publish` loop (publishing every 30 minutes). The MPC optimization will not be saved in `data_path/entities`, and therefore only into `data_path/opt_res_latest.csv`. Requiring a publish-data action to be run manually (or via a Home Assistant) Automation for the MPC results.
|
434
|
+
|
435
|
+
#### Manual *(Home Assistant Automation for Publish)*
|
436
|
+
|
437
|
+
For users who wish to have full control of exactly when they will like to run a publish and have the ability to save multiple different optimizations. The `entity_save` runtime parameter has been created to save the optimization output entities to .json files whilst `continual_publish` is set to `false` in the configuration. Allowing the user to reference the saved .json files manually via a publish:
|
438
|
+
|
439
|
+
in configuration page/`config_emhass.yaml` :
|
440
|
+
```json
|
441
|
+
"continual_publish": false
|
442
|
+
```
|
443
|
+
POST action :
|
444
|
+
```bash
|
445
|
+
# RUN dayahead, with freq=30 (default), prefix=dh_, save entity
|
446
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"entity_save": true, "publish_prefix":"dh_"}' http://localhost:5000/action/dayahead-optim
|
447
|
+
# RUN MPC, with freq=5, prefix=mpc_, save entity
|
448
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"entity_save": true", "freq":5,"publish_prefix":"mpc_"}' http://localhost:5000/action/naive-mpc-optim
|
449
|
+
```
|
450
|
+
You can then reference these .json saved entities via their `publish_prefix`. Include the same `publish_prefix` in the `publish_data` action:
|
451
|
+
```bash
|
452
|
+
#Publish the MPC optimization ran above
|
453
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"publish_prefix":"mpc_"}' http://localhost:5000/action/publish-data
|
454
|
+
```
|
455
|
+
This will publish all entities from the MPC (_mpc) optimization above.
|
456
|
+
</br>
|
457
|
+
Alternatively, you can choose to publish all the saved files .json files with `publish_prefix` = all:
|
458
|
+
```bash
|
459
|
+
#Publish all saved entities
|
460
|
+
curl -i -H 'Content-Type:application/json' -X POST -d '{"publish_prefix":"all"}' http://localhost:5000/action/publish-data
|
461
|
+
```
|
462
|
+
This action will publish the dayahead (_dh) and MPC (_mpc) optimization results from the optimizations above.
|
463
|
+
|
361
464
|
### Forecast data
|
362
465
|
|
363
466
|
It is possible to provide EMHASS with your own forecast data. For this just add the data as list of values to a data dictionary during the call to `emhass` using the `runtimeparams` option.
|
@@ -456,7 +559,7 @@ Check the dedicated section in the documentation here: [https://emhass.readthedo
|
|
456
559
|
|
457
560
|
## Development
|
458
561
|
|
459
|
-
Pull request are very much accepted on this project. For development you can find some instructions here [Development](https://emhass.readthedocs.io/en/latest/develop.html)
|
562
|
+
Pull request are very much accepted on this project. For development you can find some instructions here [Development](https://emhass.readthedocs.io/en/latest/develop.html).
|
460
563
|
|
461
564
|
## Troubleshooting
|
462
565
|
|
@@ -0,0 +1,49 @@
|
|
1
|
+
timestamp,P_PV,P_Load,P_deferrable0,P_deferrable1,P_grid_pos,P_grid_neg,P_grid,P_PV_curtailment,unit_load_cost,unit_prod_price,cost_profit,cost_fun_profit,optim_status
|
2
|
+
2024-06-03 23:00:00+02:00,0.0,2124.8911797752808,0.0,750.0,2874.8912,0.0,2874.8912,0.0,0.1419,0.065,-0.20397353064,-0.20397353064,Optimal
|
3
|
+
2024-06-03 23:30:00+02:00,0.0,393.7693220338983,3000.0,750.0,4143.7693,0.0,4143.7693,0.0,0.1419,0.065,-0.29400043183499996,-0.29400043183499996,Optimal
|
4
|
+
2024-06-04 00:00:00+02:00,0.0,329.5656571428571,0.0,750.0,1079.5657,0.0,1079.5657,0.0,0.1419,0.065,-0.076595186415,-0.076595186415,Optimal
|
5
|
+
2024-06-04 00:30:00+02:00,0.0,214.95473988439306,0.0,750.0,964.95474,0.0,964.95474,0.0,0.1419,0.065,-0.068463538803,-0.068463538803,Optimal
|
6
|
+
2024-06-04 01:00:00+02:00,0.0,254.92180790960455,3000.0,0.0,3254.9218,0.0,3254.9218,0.0,0.1419,0.065,-0.23093670171,-0.23093670171,Optimal
|
7
|
+
2024-06-04 01:30:00+02:00,0.0,653.0385393258427,0.0,0.0,653.03854,0.0,653.03854,0.0,0.1419,0.065,-0.046333084413000006,-0.046333084413000006,Optimal
|
8
|
+
2024-06-04 02:00:00+02:00,0.0,694.8668181818182,0.0,0.0,694.86682,0.0,694.86682,0.0,0.1419,0.065,-0.049300800879,-0.049300800879,Optimal
|
9
|
+
2024-06-04 02:30:00+02:00,0.0,856.8446739130435,0.0,0.0,856.84467,0.0,856.84467,0.0,0.1419,0.065,-0.060793129336499996,-0.060793129336499996,Optimal
|
10
|
+
2024-06-04 03:00:00+02:00,0.0,914.380597826087,0.0,0.0,914.3806,0.0,914.3806,0.0,0.1907,0.065,-0.08718619021000001,-0.08718619021000001,Optimal
|
11
|
+
2024-06-04 03:30:00+02:00,0.0,599.8399421965318,0.0,0.0,599.83994,0.0,599.83994,0.0,0.1907,0.065,-0.057194738279,-0.057194738279,Optimal
|
12
|
+
2024-06-04 04:00:00+02:00,0.0,703.5027607361963,0.0,0.0,703.50276,0.0,703.50276,0.0,0.1907,0.065,-0.067078988166,-0.067078988166,Optimal
|
13
|
+
2024-06-04 04:30:00+02:00,0.0,646.7419879518072,0.0,0.0,646.74199,0.0,646.74199,0.0,0.1907,0.065,-0.061666848746500004,-0.061666848746500004,Optimal
|
14
|
+
2024-06-04 05:00:00+02:00,0.0,1009.152816091954,0.0,0.0,1009.1528,0.0,1009.1528,0.0,0.1907,0.065,-0.09622271948,-0.09622271948,Optimal
|
15
|
+
2024-06-04 05:30:00+02:00,0.0,967.1363841807911,0.0,0.0,967.13638,0.0,967.13638,0.0,0.1907,0.065,-0.092216453833,-0.092216453833,Optimal
|
16
|
+
2024-06-04 06:00:00+02:00,0.0,935.1571508379889,0.0,0.0,935.15715,0.0,935.15715,0.0,0.1907,0.065,-0.0891672342525,-0.0891672342525,Optimal
|
17
|
+
2024-06-04 06:30:00+02:00,60.0,3267.5106703910615,0.0,0.0,3207.5107,0.0,3207.5107,0.0,0.1907,0.065,-0.305836145245,-0.305836145245,Optimal
|
18
|
+
2024-06-04 07:00:00+02:00,840.0,3286.2027777777776,0.0,0.0,2446.2028,0.0,2446.2028,0.0,0.1907,0.065,-0.23324543698000003,-0.23324543698000003,Optimal
|
19
|
+
2024-06-04 07:30:00+02:00,660.0,1496.1914772727273,0.0,0.0,836.19148,0.0,836.19148,0.0,0.1907,0.065,-0.079730857618,-0.079730857618,Optimal
|
20
|
+
2024-06-04 08:00:00+02:00,620.0,794.2991620111732,0.0,0.0,174.29916,0.0,174.29916,0.0,0.1907,0.065,-0.016619424906,-0.016619424906,Optimal
|
21
|
+
2024-06-04 08:30:00+02:00,620.0,832.2424719101124,0.0,0.0,212.24247,0.0,212.24247,0.0,0.1907,0.065,-0.020237319514500002,-0.020237319514500002,Optimal
|
22
|
+
2024-06-04 09:00:00+02:00,6380.0,788.9761235955057,3000.0,750.0,0.0,-1841.0239,-1841.0239,0.0,0.1907,0.065,0.05983327675,0.05983327675,Optimal
|
23
|
+
2024-06-04 09:30:00+02:00,1095.5620000000001,781.2152298850575,0.0,750.0,435.65323,0.0,435.65323,0.0,0.1907,0.065,-0.0415395354805,-0.0415395354805,Optimal
|
24
|
+
2024-06-04 10:00:00+02:00,811.4380000000002,664.0545197740113,0.0,0.0,0.0,-147.38348,-147.38348,0.0,0.1907,0.065,0.0047899631,0.0047899631,Optimal
|
25
|
+
2024-06-04 10:30:00+02:00,681.0759999999999,666.1989265536723,0.0,0.0,0.0,-14.877073,-14.877073,0.0,0.1907,0.065,0.00048350487250000003,0.00048350487250000003,Optimal
|
26
|
+
2024-06-04 11:00:00+02:00,671.9846666666667,669.4183146067417,0.0,0.0,0.0,-2.5663521,-2.5663521,0.0,0.1907,0.065,8.340644325000001e-05,8.340644325000001e-05,Optimal
|
27
|
+
2024-06-04 11:30:00+02:00,6469.634666666666,579.2235294117647,3000.0,750.0,0.0,0.0,0.0,2140.4111,0.1907,-0.07,-0.0,-0.0,Optimal
|
28
|
+
2024-06-04 12:00:00+02:00,2992.012,642.7344318181817,3000.0,0.0,650.72243,0.0,650.72243,0.0,0.1907,-0.07,-0.06204638370050001,-0.06204638370050001,Optimal
|
29
|
+
2024-06-04 12:30:00+02:00,1867.9053333333331,637.1688636363637,0.0,750.0,0.0,0.0,0.0,480.73647,0.1907,-0.07,-0.0,-0.0,Optimal
|
30
|
+
2024-06-04 13:00:00+02:00,2067.554666666667,649.3890173410405,3000.0,0.0,1581.8344,0.0,1581.8344,0.0,0.1907,-0.07,-0.15082791004,-0.15082791004,Optimal
|
31
|
+
2024-06-04 13:30:00+02:00,622.756,509.79664739884396,0.0,0.0,0.0,0.0,0.0,112.95935,0.1907,-0.07,-0.0,-0.0,Optimal
|
32
|
+
2024-06-04 14:00:00+02:00,1518.7553333333335,500.53686046511626,0.0,750.0,0.0,-268.21847,-268.21847,0.0,0.1907,0.065,0.008717100275000002,0.008717100275000002,Optimal
|
33
|
+
2024-06-04 14:30:00+02:00,2551.502,520.944,3000.0,0.0,969.442,0.0,969.442,0.0,0.1907,0.065,-0.0924362947,-0.0924362947,Optimal
|
34
|
+
2024-06-04 15:00:00+02:00,4160.0,415.32341040462427,3000.0,750.0,5.3234104,0.0,5.3234104,0.0,0.1907,0.065,-0.00050758718164,-0.00050758718164,Optimal
|
35
|
+
2024-06-04 15:30:00+02:00,4240.0,321.6410285714286,3000.0,750.0,0.0,-168.35897,-168.35897,0.0,0.1419,0.065,0.005471666525,0.005471666525,Optimal
|
36
|
+
2024-06-04 16:00:00+02:00,560.0,3778.0933888888894,3000.0,750.0,6968.0934,0.0,6968.0934,0.0,0.1419,0.065,-0.49438622673,-0.49438622673,Optimal
|
37
|
+
2024-06-04 16:30:00+02:00,380.0,3990.816179775281,0.0,750.0,4360.8162,0.0,4360.8162,0.0,0.1419,0.065,-0.30939990939,-0.30939990939,Optimal
|
38
|
+
2024-06-04 17:00:00+02:00,0.0,3909.8039890710384,0.0,750.0,4659.804,0.0,4659.804,0.0,0.1419,0.065,-0.33061309380000004,-0.33061309380000004,Optimal
|
39
|
+
2024-06-04 17:30:00+02:00,0.0,4206.869447513812,0.0,0.0,4206.8694,0.0,4206.8694,0.0,0.1907,0.065,-0.40112499728999995,-0.40112499728999995,Optimal
|
40
|
+
2024-06-04 18:00:00+02:00,0.0,486.26584269662925,0.0,0.0,486.26584,0.0,486.26584,0.0,0.1907,0.065,-0.046365447844000006,-0.046365447844000006,Optimal
|
41
|
+
2024-06-04 18:30:00+02:00,0.0,402.43446927374305,0.0,0.0,402.43447,0.0,402.43447,0.0,0.1907,0.065,-0.0383721267145,-0.0383721267145,Optimal
|
42
|
+
2024-06-04 19:00:00+02:00,0.0,316.17875,0.0,0.0,316.17875,0.0,316.17875,0.0,0.1907,0.065,-0.0301476438125,-0.0301476438125,Optimal
|
43
|
+
2024-06-04 19:30:00+02:00,0.0,867.4,0.0,0.0,867.4,0.0,867.4,0.0,0.1907,0.065,-0.08270659000000001,-0.08270659000000001,Optimal
|
44
|
+
2024-06-04 20:00:00+02:00,0.0,340.8070760233918,0.0,0.0,340.80708,0.0,340.80708,0.0,0.1907,0.065,-0.032495955078,-0.032495955078,Optimal
|
45
|
+
2024-06-04 20:30:00+02:00,0.0,349.07406779661017,0.0,750.0,1099.0741,0.0,1099.0741,0.0,0.1419,0.065,-0.077979307395,-0.077979307395,Optimal
|
46
|
+
2024-06-04 21:00:00+02:00,0.0,1790.5224581005587,0.0,750.0,2540.5225,0.0,2540.5225,0.0,0.1419,0.065,-0.18025007137500001,-0.18025007137500001,Optimal
|
47
|
+
2024-06-04 21:30:00+02:00,0.0,2612.0882857142856,0.0,0.0,2612.0883,0.0,2612.0883,0.0,0.1419,0.065,-0.185327664885,-0.185327664885,Optimal
|
48
|
+
2024-06-04 22:00:00+02:00,0.0,2617.098882681564,0.0,0.0,2617.0989,0.0,2617.0989,0.0,0.1419,0.065,-0.185683166955,-0.185683166955,Optimal
|
49
|
+
2024-06-04 22:30:00+02:00,0.0,2254.2344375000002,0.0,0.0,2254.2344,0.0,2254.2344,0.0,0.1419,0.065,-0.15993793068,-0.15993793068,Optimal
|
@@ -19,7 +19,7 @@ long_description = (here / 'README.md').read_text(encoding='utf-8')
|
|
19
19
|
|
20
20
|
setup(
|
21
21
|
name='emhass', # Required
|
22
|
-
version='0.
|
22
|
+
version='0.10.1', # Required
|
23
23
|
description='An Energy Management System for Home Assistant', # Optional
|
24
24
|
long_description=long_description, # Optional
|
25
25
|
long_description_content_type='text/markdown', # Optional (see note above)
|