evse-hub 0.2.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. evse_hub-0.2.3/.github/copilot-instructions.md +53 -0
  2. evse_hub-0.2.3/.gitignore +4 -0
  3. evse_hub-0.2.3/.gitlab-ci.yml +40 -0
  4. evse_hub-0.2.3/LICENSE +0 -0
  5. evse_hub-0.2.3/PKG-INFO +18 -0
  6. evse_hub-0.2.3/README.md +2 -0
  7. evse_hub-0.2.3/TODO.md +31 -0
  8. evse_hub-0.2.3/docs/architecture.md +42 -0
  9. evse_hub-0.2.3/docs/operations.md +0 -0
  10. evse_hub-0.2.3/pyproject.toml +39 -0
  11. evse_hub-0.2.3/secrets.yaml +0 -0
  12. evse_hub-0.2.3/setup.cfg +4 -0
  13. evse_hub-0.2.3/src/evse_hub/cli/evse_online.py +270 -0
  14. evse_hub-0.2.3/src/evse_hub/cli/evse_scan.py +75 -0
  15. evse_hub-0.2.3/src/evse_hub/poller/TODO.md +37 -0
  16. evse_hub-0.2.3/src/evse_hub/poller/__init__.py +0 -0
  17. evse_hub-0.2.3/src/evse_hub/poller/async_tools.py +27 -0
  18. evse_hub-0.2.3/src/evse_hub/poller/config.py +7 -0
  19. evse_hub-0.2.3/src/evse_hub/poller/etrel.py +41 -0
  20. evse_hub-0.2.3/src/evse_hub/poller/influx_writes.py +103 -0
  21. evse_hub-0.2.3/src/evse_hub/poller/main.py +84 -0
  22. evse_hub-0.2.3/src/evse_hub/poller/modbus_device.py +92 -0
  23. evse_hub-0.2.3/src/evse_hub/poller/modbus_reads.py +95 -0
  24. evse_hub-0.2.3/src/evse_hub/poller/wifi.py +20 -0
  25. evse_hub-0.2.3/src/evse_hub.egg-info/PKG-INFO +18 -0
  26. evse_hub-0.2.3/src/evse_hub.egg-info/SOURCES.txt +30 -0
  27. evse_hub-0.2.3/src/evse_hub.egg-info/dependency_links.txt +1 -0
  28. evse_hub-0.2.3/src/evse_hub.egg-info/entry_points.txt +3 -0
  29. evse_hub-0.2.3/src/evse_hub.egg-info/requires.txt +7 -0
  30. evse_hub-0.2.3/src/evse_hub.egg-info/top_level.txt +1 -0
  31. evse_hub-0.2.3/tests/read_registers.py +84 -0
  32. evse_hub-0.2.3/tests/test_asyncio.py +0 -0
@@ -0,0 +1,53 @@
1
+ # Copilot instructions for this repository
2
+
3
+ Purpose
4
+ - Provide concise, repository-specific guidance so an AI coding agent can be productive immediately.
5
+
6
+ Big picture
7
+ - This project is a small Python 3.11 daemon that polls Modbus/TCP devices and writes metrics to InfluxDB. See `src/poller/main.py` for the polling orchestration.
8
+ - Polling flow: `main.py` creates periodic tasks that call `poll_modbus_async` (in `src/poller/modbus_device.py`) -> `read_*` functions in `src/poller/modbus_reads.py` -> `write_influx_master` in `src/poller/influx_writes.py`.
9
+
10
+ Key files and responsibilities
11
+ - `src/poller/main.py`: application entrypoint and task orchestration (`periodic_master`).
12
+ - `src/poller/modbus_device.py`: `ModbusDevice` state, connection management, and exponential backoff logic.
13
+ - `src/poller/modbus_reads.py`: low-level Modbus register reads and helpers (e.g. `_f32_be`, `_read_input_registers`). Use these read functions as blocking callables passed to `poll_modbus_async`.
14
+ - `src/poller/influx_writes.py`: writes to two InfluxDB instances using keys from `secrets.yaml`.
15
+ - `src/poller/async_tools.py`: `run_blocking` helper; uses a shared `ThreadPoolExecutor` (`_EXECUTOR`). Prefer this helper for running blocking calls from async code.
16
+ - `src/poller/config.py`: `load_secrets(path)` reads YAML secrets; keys required by `influx_writes.py` are: `ha_ip`, `ha_influx_username`, `ha_influx_pwd`, `mons_ip`, `mons_influx_username`, `mons_influx_pwd`.
17
+ - `src/poller/wifi.py`: checks association via the `iw` tool; used to skip polls when Wi‑Fi is disconnected.
18
+
19
+ Conventions & patterns to follow
20
+ - Keep polling logic separate from transport-specific device state: add new device protocols by creating new `Device` classes and `poll_*` functions (see `ModbusDevice` as the pattern).
21
+ - Blocking I/O must be run through `run_blocking(...)` to avoid blocking the event loop. Example: `await run_blocking(write_influx_master, secrets, data, dryrun=True)` (see `main.py`).
22
+ - Read functions used by `poll_modbus_async` are synchronous and should accept `(client, unit) -> dict` and raise on error (see `read_master_loadguard`).
23
+ - Follow the backoff semantics in `modbus_device.py` if adding new polling helpers (respect `dev.next_ok_ts` and `dev.fail_count`).
24
+
25
+ Running & testing
26
+ - Python version: >=3.11 (declared in `pyproject.toml`).
27
+ - Recommended run (from repository root):
28
+
29
+ ```bash
30
+ PYTHONPATH=src python -m poller.main
31
+ ```
32
+
33
+ - Run tests (from repository root):
34
+
35
+ ```bash
36
+ PYTHONPATH=src pytest -q
37
+ ```
38
+
39
+ External dependencies & environment
40
+ - Code imports not listed in `pyproject.toml`: `pymodbus`, `influxdb` (the InfluxDBClient package), `pyyaml` is declared. Install with pip in a virtualenv as needed.
41
+ - `src/poller/wifi.py` requires the `iw` binary on the host.
42
+ - Secrets file: `secrets.yaml` (loaded by `src/poller/config.py`). Do not commit real credentials — the repo contains a placeholder `secrets.yaml` under `src/`.
43
+
44
+ Examples for common code changes
45
+ - Add a new Modbus read: implement a function `read_<device>(client, unit) -> dict` in `src/poller/modbus_reads.py` mirroring `read_master_loadguard`, then call it from `main.py` via `poll_modbus_async`.
46
+ - Add an output sink: create a new writer function (blocking) in `src/poller/influx_writes.py` or a new module; call it via `run_blocking` from async code. Respect `dryrun=True` for safe local testing.
47
+
48
+ Notes for AI agents
49
+ - Prefer minimal, focused diffs. Avoid wide refactors unless the change is necessary and accompanied by tests.
50
+ - When changing I/O or dependency usage, update `pyproject.toml` and document required system packages (e.g., `iw`) in the PR description.
51
+ - Use existing functions as canonical examples: `poll_modbus_async`, `read_master_loadguard`, `write_influx_master`, and `run_blocking`.
52
+
53
+ If anything in this guidance is unclear or you need more examples, ask for clarification and I will iterate.
@@ -0,0 +1,4 @@
1
+ __pycache__
2
+ *.egg-info*
3
+ tests/*yml
4
+ tests/*json
@@ -0,0 +1,40 @@
1
+ stages:
2
+ - build
3
+ - deploy
4
+
5
+ variables:
6
+ GIT_DEPTH: 0
7
+ TWINE_USERNAME: "__token__"
8
+ TWINE_PASSWORD: $PYPI_TOKEN
9
+
10
+ build_package:
11
+ stage: build
12
+ before_script:
13
+ - apt-get update
14
+ - apt-get install -y --no-install-recommends git
15
+ script:
16
+ - python -m venv venv
17
+ - source venv/bin/activate
18
+ - pip install --upgrade pip build setuptools setuptools-scm wheel
19
+ - python -m build --no-isolation
20
+ artifacts:
21
+ paths:
22
+ - dist
23
+ expire_in: 1 day
24
+
25
+
26
+ deploy_to_pypi:
27
+ stage: deploy
28
+ only:
29
+ - tags
30
+ before_script:
31
+ - apt-get update
32
+ - apt-get install -y --no-install-recommends git
33
+ - python -m venv venv
34
+ - source venv/bin/activate
35
+ - pip install --upgrade pip twine build setuptools setuptools-scm wheel
36
+ - echo $PYPI_API_TOKEN
37
+ script:
38
+ - rm -rf dist/
39
+ - python -m build --no-isolation
40
+ - twine upload dist/*
evse_hub-0.2.3/LICENSE ADDED
File without changes
@@ -0,0 +1,18 @@
1
+ Metadata-Version: 2.4
2
+ Name: evse-hub
3
+ Version: 0.2.3
4
+ Summary: Solar monitoring and control daemons
5
+ Requires-Python: >=3.11
6
+ Description-Content-Type: text/markdown
7
+ License-File: LICENSE
8
+ Requires-Dist: PyYAML>=6
9
+ Requires-Dist: requests>=2
10
+ Requires-Dist: pymodbus>=3.11
11
+ Requires-Dist: influxdb>=5.3
12
+ Requires-Dist: msgpack>=1.0
13
+ Requires-Dist: python-dateutil>=2.9
14
+ Requires-Dist: pydantic>=2.0
15
+ Dynamic: license-file
16
+
17
+ # README
18
+
@@ -0,0 +1,2 @@
1
+ # README
2
+
evse_hub-0.2.3/TODO.md ADDED
@@ -0,0 +1,31 @@
1
+ # Various ToDos
2
+
3
+ ## Watchdog
4
+
5
+ ### Algodue watchdog
6
+
7
+ ```bash
8
+ curl -sS -c /tmp/algodue.cookie \
9
+ -X POST http://ALGODUE_IP/index.htm \
10
+ -H 'Content-Type: application/x-www-form-urlencoded' \
11
+ --data 'user=admin&password=admin'
12
+ ```
13
+
14
+ This stores the auth cookie. Then
15
+ ```bash
16
+ curl -sS -b /tmp/algodue.cookie \
17
+ -X POST http://ALGODUE_IP/parameters_change.htm \
18
+ -H 'Content-Type: application/x-www-form-urlencoded' \
19
+ --data 'addressingType=Static' \
20
+ --data 'host_name=ETHBOARD' \
21
+ --data 'IP=192.168.1.249' \
22
+ --data 'gateway=192.168.1.1' \
23
+ --data 'mask=255.255.255.0' \
24
+ --data 'primary_dns=8.8.8.8' \
25
+ --data 'secondary_dns=8.8.4.4' \
26
+ --data 'logical_addr=01' \
27
+ --data 'chbSyncWithNTP=on' \
28
+ --data 'ntp_server=europe.pool.ntp.org' \
29
+ --data 'utc_correction=%2B01'
30
+ ```
31
+
@@ -0,0 +1,42 @@
1
+ # Architecture
2
+
3
+ ## General structure
4
+
5
+
6
+ > **Mission statement**
7
+ >
8
+ > We log building electricity consumption and PV production and allow users to **charge their EV conditioned on PV over-production**. Data for informative user dashboards is written to an influxDB.
9
+
10
+ ### Logging
11
+
12
+ We need to log from the algoDue load Guard to get the total current (positive or negative) to the house. We also want to get data out of the PV inverter to have the actual production data. We want to know the SOC of the building battery. We poll all EVSEs to know their current state (charging / cable connected / etc).
13
+
14
+ We also want to know when additional EVSEs come online. Need to think about that later.
15
+
16
+ ### Control
17
+
18
+ We want to set a max current on all EVSE:
19
+
20
+ - lift current limit for immediated charging
21
+ - time-dependent current limit for night-charging
22
+ - set adaptive current depending on solar state (SOC and/or actual over-prxoduction)
23
+
24
+
25
+ We also want a watchdog to restart the algoDue in case it stops communicating with the master EVSE (timing issue).
26
+
27
+ ### Software structure
28
+
29
+ Decision between the following general approaches
30
+
31
+ - One service fetches all necessary data stores it in a state variable and bases control decisions on those. It also stores the data and a heartbeat to the influxdb and controls the current limits on the EVSEs.
32
+
33
+ - Serveral services
34
+ - One only fetches data from buidling (algoDue, inverter, SOC,...). And publishes those via MQTT and/or influxdb. Maybe I sperated this into two more services yet
35
+ - building stuff
36
+ - EVSE stuf
37
+ - One only controls the currents on the EVSEs
38
+
39
+ I like the one service approach better because it requires less inter-process communication. But I want to get almost all the live data out to my influxdb instances anyway. So, maybe that argument does not hold. I like the multi service variant better because each service is less complex and there is a clear separation between data gathering and storing and control. One thing I do not understand yet: my EVSEs allow communication via modbus registers. Reading via port 502, writing via port 503. Can I separate this into two services, or exactly not? Another thing to think about: the master EVSE provides via it's port 502 registers both access to the EVSE details like cable connected, current charging power, etc., and also the data from the loadguard. Maybe the service which polls the EVSE has a device class which knows the attribute *master-EVSE*.
40
+
41
+
42
+
File without changes
@@ -0,0 +1,39 @@
1
+
2
+ [project]
3
+ name = "evse-hub"
4
+ dynamic = ["version"]
5
+ description = "Solar monitoring and control daemons"
6
+ readme = "README.md"
7
+ requires-python = ">=3.11"
8
+
9
+ dependencies = [
10
+ "PyYAML>=6",
11
+ "requests>=2",
12
+ "pymodbus>=3.11",
13
+ "influxdb>=5.3",
14
+ "msgpack>=1.0",
15
+ "python-dateutil>=2.9",
16
+ "pydantic>=2.0",]
17
+
18
+ [project.scripts]
19
+ evse-online = "evse_hub.cli.evse_online:main"
20
+ evse-scan = "evse_hub.cli.evse_scan:main"
21
+
22
+
23
+ [build-system]
24
+ requires = ["setuptools>=68", "wheel", "setuptools_scm[toml]>=6.0"]
25
+ build-backend = "setuptools.build_meta"
26
+
27
+ [tool.setuptools]
28
+ package-dir = {"" = "src"}
29
+
30
+ [tool.setuptools.packages.find]
31
+ where = ["src"]
32
+
33
+ [tool.pytest.ini_options]
34
+ testpaths = ["tests"]
35
+
36
+ [tool.setuptools_scm]
37
+ version_scheme = "post-release"
38
+ local_scheme = "no-local-version"
39
+ # fallback_version = "0.0.0"
File without changes
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+
@@ -0,0 +1,270 @@
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import asyncio
5
+ import json
6
+ import logging
7
+ import os
8
+ import random
9
+ import signal
10
+ import time
11
+ from dataclasses import dataclass
12
+ from pathlib import Path
13
+ from typing import Any, Dict, List, Optional, Tuple
14
+
15
+ import yaml
16
+
17
+
18
+ # Locations/configurable via environment (useful for systemd unit files)
19
+ EVSE_YML = Path(os.getenv("EVSE_YML", "evse.yml"))
20
+ STATUS_JSON = Path(os.getenv("STATUS_JSON", "status.json"))
21
+
22
+ # Configure logging from environment (default INFO). Example: LOG_LEVEL=DEBUG
23
+ LOG_LEVEL_NAME = os.getenv("LOG_LEVEL", "INFO").upper()
24
+ logging.basicConfig(level=LOG_LEVEL_NAME)
25
+ logger = logging.getLogger(__name__)
26
+
27
+ # If set to a level name (e.g. INFO, DEBUG), and the logger's level is
28
+ # at least that verbose, the status.json payload will also be written to
29
+ # the logger at INFO level. Leave unset to disable.
30
+ _status_level_name = os.getenv("WRITE_STATUS_TO_INFO_LEVEL", "")
31
+ WRITE_STATUS_TO_INFO_LEVEL: int | None = None
32
+ if _status_level_name:
33
+ try:
34
+ WRITE_STATUS_TO_INFO_LEVEL = logging._nameToLevel.get(_status_level_name.upper())
35
+ except Exception:
36
+ WRITE_STATUS_TO_INFO_LEVEL = None
37
+
38
+ PORT = 502
39
+
40
+ # Connect behavior
41
+ CONNECT_TIMEOUT_S = 0.45
42
+ RETRY_ONCE_DELAY_S_RANGE = (0.08, 0.25) # jitter before the retry attempt
43
+
44
+ # Scheduler behavior
45
+ # How often to re-check *online* devices. Configurable via env var
46
+ # `EVSE_POLL_INTERVAL_S` (seconds). Default: 3600 (1 hour).
47
+ try:
48
+ BASE_OK_INTERVAL_S = float(os.getenv("EVSE_POLL_INTERVAL_S", "3600"))
49
+ except Exception:
50
+ BASE_OK_INTERVAL_S = 3600.0
51
+ if BASE_OK_INTERVAL_S <= 0:
52
+ raise ValueError("EVSE_POLL_INTERVAL_S must be a positive number of seconds")
53
+
54
+ BASE_FAIL_INTERVAL_S = 60.0 # initial re-check interval after first failure
55
+ MAX_FAIL_INTERVAL_S = 300.0 # cap offline backoff at 5 minutes
56
+ JITTER_FRACTION = 0.15 # +/- 15% jitter on scheduling
57
+
58
+ # Concurrency limit (even if you have many devices)
59
+ CONCURRENCY = 50
60
+
61
+
62
+ @dataclass
63
+ class DeviceState:
64
+ unit_id: int
65
+ name: str
66
+ ip: Optional[str] = None
67
+
68
+ online: bool = False
69
+ fails: int = 0 # consecutive failures
70
+ next_check_ts: float = 0.0 # monotonic time when to check next
71
+
72
+
73
+ def _with_jitter(seconds: float, frac: float = JITTER_FRACTION) -> float:
74
+ """
75
+ Apply multiplicative jitter: seconds * U(1-frac, 1+frac)
76
+ """
77
+ lo = max(0.0, 1.0 - frac)
78
+ hi = 1.0 + frac
79
+ return seconds * random.uniform(lo, hi)
80
+
81
+
82
+ async def tcp_connect_ok(ip: str, port: int, timeout_s: float) -> bool:
83
+ """
84
+ True if TCP connect succeeds within timeout; closes immediately.
85
+ """
86
+ try:
87
+ reader, writer = await asyncio.wait_for(asyncio.open_connection(ip, port), timeout=timeout_s)
88
+ writer.close()
89
+ try:
90
+ await writer.wait_closed()
91
+ except Exception:
92
+ pass
93
+ return True
94
+ except (asyncio.TimeoutError, OSError):
95
+ return False
96
+
97
+
98
+ async def check_with_retry(ip: str) -> bool:
99
+ """
100
+ Try once; if it fails, wait a small jittered delay and try once more.
101
+ """
102
+ ok = await tcp_connect_ok(ip, PORT, CONNECT_TIMEOUT_S)
103
+ if ok:
104
+ return True
105
+
106
+ await asyncio.sleep(random.uniform(*RETRY_ONCE_DELAY_S_RANGE))
107
+ return await tcp_connect_ok(ip, PORT, CONNECT_TIMEOUT_S)
108
+
109
+
110
+ def load_devices(path: Path) -> List[Tuple[int, str, Optional[str]]]:
111
+ """
112
+ Returns list of (unit_id, name, ip) from evse.yml devices.
113
+ `ip` may be `None` when not present in the YAML; such devices will be
114
+ included in the returned list but not actively checked.
115
+ """
116
+ data = yaml.safe_load(path.read_text(encoding="utf-8"))
117
+ if not isinstance(data, dict):
118
+ raise ValueError("evse.yml root must be a mapping/dict")
119
+
120
+ devices = data.get("devices", [])
121
+ if not isinstance(devices, list):
122
+ raise ValueError("evse.yml: 'devices' must be a list")
123
+
124
+ out: List[Tuple[int, str, Optional[str]]] = []
125
+ for d in devices:
126
+ if not isinstance(d, dict):
127
+ continue
128
+ unit_id = d.get("unit_id")
129
+ name = d.get("name")
130
+ ip = d.get("ip")
131
+ if isinstance(unit_id, int) and isinstance(name, str):
132
+ out.append((unit_id, name, ip if isinstance(ip, str) else None))
133
+ return out
134
+
135
+
136
+ def compute_next_interval(online: bool, fails: int) -> float:
137
+ """
138
+ Online: check every BASE_OK_INTERVAL_S.
139
+ Offline: exponential backoff starting at BASE_FAIL_INTERVAL_S, doubling per consecutive failure,
140
+ capped at MAX_FAIL_INTERVAL_S.
141
+ """
142
+ if online:
143
+ return BASE_OK_INTERVAL_S
144
+ # fails is >= 1 here
145
+ interval = BASE_FAIL_INTERVAL_S * (2 ** max(0, fails - 1))
146
+ interval = min(interval, MAX_FAIL_INTERVAL_S)
147
+ return interval
148
+
149
+
150
+ def write_status_json(path: Path, states: Dict[int, DeviceState]) -> Dict[str, Any]:
151
+ """
152
+ Writes:
153
+ {
154
+ "ts": <unix>,
155
+ "<unit_id>": {"online": true/false},
156
+ ...
157
+ }
158
+ """
159
+ payload: Dict[str, Any] = {"ts": int(time.time())}
160
+ for unit_id in sorted(states.keys()):
161
+ payload[str(unit_id)] = {"online": bool(states[unit_id].online)}
162
+ path.write_text(json.dumps(payload, indent=2, sort_keys=False) + "\n", encoding="utf-8")
163
+ return payload
164
+
165
+
166
+ async def poll_loop(stop_event: asyncio.Event) -> None:
167
+ # Load config once at start (easy to add reload-on-change later)
168
+ devs = load_devices(EVSE_YML)
169
+ if not devs:
170
+ raise SystemExit("No devices found under 'devices:' in evse.yml")
171
+
172
+ # Create state per unit_id
173
+ states: Dict[int, DeviceState] = {}
174
+ now_mono = asyncio.get_running_loop().time()
175
+ for unit_id, name, ip in devs:
176
+ states[unit_id] = DeviceState(
177
+ unit_id=unit_id,
178
+ name=name,
179
+ ip=ip,
180
+ online=False,
181
+ fails=0,
182
+ next_check_ts=now_mono + random.uniform(0.0, 1.0) if ip is not None else float("inf"), # small initial jitter
183
+ )
184
+
185
+ sem = asyncio.Semaphore(CONCURRENCY)
186
+
187
+ async def check_one(st: DeviceState) -> None:
188
+ # If the device has no IP configured, never attempt to check it.
189
+ if st.ip is None:
190
+ st.online = False
191
+ st.fails = 0
192
+ st.next_check_ts = float("inf")
193
+ return
194
+ async with sem:
195
+ ok = await check_with_retry(st.ip)
196
+
197
+ if ok:
198
+ st.online = True
199
+ st.fails = 0
200
+ else:
201
+ st.online = False
202
+ st.fails += 1
203
+
204
+ interval = compute_next_interval(st.online, max(1, st.fails) if not st.online else 0)
205
+ interval = _with_jitter(interval)
206
+ st.next_check_ts = asyncio.get_running_loop().time() + interval
207
+
208
+ # Main scheduler loop
209
+ while not stop_event.is_set():
210
+ loop = asyncio.get_running_loop()
211
+ now = loop.time()
212
+
213
+ due = [st for st in states.values() if st.next_check_ts <= now]
214
+ if due:
215
+ # Kick all due checks concurrently
216
+ await asyncio.gather(*(check_one(st) for st in due))
217
+
218
+ # Update status.json after each batch of checks
219
+ payload = write_status_json(STATUS_JSON, states)
220
+
221
+ # Optionally also write the payload to the logger at INFO
222
+ # when the configured level is enabled (useful for CI/debug).
223
+ if WRITE_STATUS_TO_INFO_LEVEL is not None and logger.isEnabledFor(WRITE_STATUS_TO_INFO_LEVEL):
224
+ logger.info("status.json: %s", json.dumps(payload, sort_keys=True))
225
+
226
+ # Sleep until the next device is due (or a short minimum to avoid busy loop)
227
+ next_due = min(st.next_check_ts for st in states.values())
228
+ sleep_s = max(0.1, next_due - loop.time())
229
+
230
+ # Wait either for the sleep to finish or for a shutdown signal
231
+ sleep_task = asyncio.create_task(asyncio.sleep(sleep_s))
232
+ stop_task = asyncio.create_task(stop_event.wait())
233
+ done, pending = await asyncio.wait({sleep_task, stop_task}, return_when=asyncio.FIRST_COMPLETED)
234
+ for p in pending:
235
+ p.cancel()
236
+
237
+ # On shutdown request, write one final status snapshot
238
+ try:
239
+ payload = write_status_json(STATUS_JSON, states)
240
+ if WRITE_STATUS_TO_INFO_LEVEL is not None and logger.isEnabledFor(WRITE_STATUS_TO_INFO_LEVEL):
241
+ logger.info("final status.json: %s", json.dumps(payload, sort_keys=True))
242
+ except Exception:
243
+ logger.exception("Failed writing final status.json")
244
+
245
+
246
+ def main() -> None:
247
+ # Create an asyncio event and attach signal handlers for graceful shutdown
248
+ async def _main_async() -> None:
249
+ loop = asyncio.get_running_loop()
250
+ stop_event = asyncio.Event()
251
+
252
+ # Register handlers to set the stop event on SIGINT/SIGTERM
253
+ try:
254
+ loop.add_signal_handler(signal.SIGINT, stop_event.set)
255
+ loop.add_signal_handler(signal.SIGTERM, stop_event.set)
256
+ except NotImplementedError:
257
+ # Some platforms (or Python runtimes) may not support add_signal_handler
258
+ pass
259
+
260
+ await poll_loop(stop_event)
261
+
262
+ try:
263
+ asyncio.run(_main_async())
264
+ except KeyboardInterrupt:
265
+ # asyncio signal handlers should already set the stop event; ensure exit
266
+ logger.info("Interrupted by user; exiting")
267
+
268
+
269
+ if __name__ == "__main__":
270
+ main()
@@ -0,0 +1,75 @@
1
+ #!/usr/bin/env python3
2
+ from __future__ import annotations
3
+
4
+ import asyncio
5
+ import ipaddress
6
+ from dataclasses import dataclass
7
+ from typing import List
8
+
9
+
10
+ @dataclass(frozen=True)
11
+ class HostResult:
12
+ ip: str
13
+ port_502_open: bool
14
+
15
+
16
+ async def check_tcp_port(ip: str, port: int, timeout_s: float) -> bool:
17
+ """
18
+ True if TCP connect succeeds within timeout.
19
+ """
20
+ try:
21
+ conn = asyncio.open_connection(ip, port)
22
+ reader, writer = await asyncio.wait_for(conn, timeout=timeout_s)
23
+ writer.close()
24
+ # Python 3.11+: wait_closed exists for StreamWriter
25
+ try:
26
+ await writer.wait_closed()
27
+ except Exception:
28
+ pass
29
+ return True
30
+ except (asyncio.TimeoutError, OSError):
31
+ return False
32
+
33
+
34
+ async def scan_modbus_502_async(
35
+ cidr: str = "192.168.1.0/24",
36
+ timeout_s: float = 0.35,
37
+ concurrency: int = 400,
38
+ ) -> List[HostResult]:
39
+ net = ipaddress.ip_network(cidr, strict=False)
40
+ ips = [str(ip) for ip in net.hosts()]
41
+
42
+ sem = asyncio.Semaphore(concurrency)
43
+
44
+ async def one(ip: str) -> HostResult:
45
+ async with sem:
46
+ ok = await check_tcp_port(ip, 502, timeout_s)
47
+ return HostResult(ip=ip, port_502_open=ok)
48
+
49
+ results = await asyncio.gather(*(one(ip) for ip in ips))
50
+ hits = [r for r in results if r.port_502_open]
51
+ hits.sort(key=lambda r: ipaddress.ip_address(r.ip))
52
+ return hits
53
+
54
+
55
+ def main() -> None:
56
+ async def _main_async() -> None:
57
+
58
+ hits = await scan_modbus_502_async()
59
+ if not hits:
60
+ print("No hosts with TCP/502 open found in 192.168.1.0/24")
61
+ return
62
+
63
+ print("Hosts with TCP/502 open:")
64
+ for h in hits:
65
+ print(f" {h.ip}:502")
66
+
67
+ try:
68
+ asyncio.run(_main_async())
69
+ except KeyboardInterrupt:
70
+ # asyncio signal handlers should already set the stop event; ensure exit
71
+ print("Interrupted by user; exiting")
72
+
73
+
74
+ if __name__ == "__main__":
75
+ main()
@@ -0,0 +1,37 @@
1
+ # TODO
2
+
3
+ - [x] [influx_writes.py](influx_writes.py) actually need the wifi checking
4
+
5
+ ## What do I need to understand
6
+
7
+ - [x] [aync_tools.py](async_tools.py)
8
+ - Creates a pool of threads for asyncio
9
+ - Defines pool size
10
+ - relays `*args` and `**kwargs` to passed function
11
+ - [x] [config.py](config.py)
12
+ - Reads in configurations from a file `secrets.yml`
13
+ - [x] [device.py](device.py)
14
+ - I understood basic principles of hirachical loggin
15
+ - I understood that `@dataclass` just produces standard `__init__` and other methods for classes that are mostly data (think of it as dict+)
16
+ - `poll_modbus_async` can check for wifi and has exponential back-off implemented. Hopefully not needed now that `ceorl` is in ethernet of garage.
17
+ - [x] [influx_writes.py](influx_writes.py)
18
+ - writes messages to influxdb on ha and mons
19
+ - [x] [main.py](main.py)
20
+ - defines logging level
21
+ - defines the functions which will be run in the loop
22
+ - inside the main
23
+ - Creates a list of devices
24
+ - Creates a list of tasks
25
+ - Runs the tasks in a loop
26
+ - [x] [modbus_reads.py](modbus_reads.py)
27
+ - knows how to cast binary stuff into readable messages
28
+ - Has functions that poll specific things.
29
+ - Building power / currents
30
+ - ..
31
+
32
+ Each of those returns a dict that [influx_writes.py](influx_writes.py) will know what to do.
33
+ - [x] [wifi.py](wifi.py)
34
+ - checks via `iw` if wifi link is healthy ( `iw` must be present in system!)
35
+
36
+ ---
37
+ Overall: main.py imports devices, devices have the connection as member, for a device there needs to be function which unpacks that connection, "*_reads.py" and "*_writes.py" will use that connection.
File without changes
@@ -0,0 +1,27 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ from concurrent.futures import ThreadPoolExecutor
5
+ from typing import Any, Callable
6
+
7
+ # Small, shared pool for all blocking work (modbus + influx + subprocess).
8
+ # Tune max_workers if you add many devices; 4–8 is typically plenty here.
9
+ _EXECUTOR = ThreadPoolExecutor(max_workers=4)
10
+
11
+
12
+ async def run_blocking(fn: Callable[..., Any], /, *args: Any, timeout_s: float | None = None, **kwargs: Any) -> Any:
13
+ """
14
+ Run a blocking function in a bounded thread pool, optionally with a timeout.
15
+
16
+ This is a thin wrapper around loop.run_in_executor (aka to_thread with control).
17
+ """
18
+ loop = asyncio.get_running_loop()
19
+
20
+ def _call():
21
+ return fn(*args, **kwargs)
22
+
23
+ if timeout_s is None:
24
+ return await loop.run_in_executor(_EXECUTOR, _call)
25
+
26
+ async with asyncio.timeout(timeout_s):
27
+ return await loop.run_in_executor(_EXECUTOR, _call)
@@ -0,0 +1,7 @@
1
+ from __future__ import annotations
2
+ import yaml
3
+
4
+
5
+ def load_secrets(path: str = "secrets.yaml") -> dict:
6
+ with open(path, "r") as f:
7
+ return yaml.safe_load(f)
@@ -0,0 +1,41 @@
1
+ """Simple lookup table for Etrel connector status codes."""
2
+
3
+ CONNECTOR_STATUS: dict[int, str] = {
4
+ 0: "Unknown",
5
+ 1: "SocketAvailable",
6
+ 2: "WaitingForVehicleToBeConnected",
7
+ 3: "WaitingForVehicleToStart",
8
+ 4: "Charging",
9
+ 5: "ChargingPausedByEv",
10
+ 6: "ChargingPausedByEvse",
11
+ 7: "ChargingEnded",
12
+ 8: "ChargingFault",
13
+ 9: "UnpausingCharging",
14
+ 10: "Unavailable",
15
+ }
16
+
17
+ def get_connector_status(code: int) -> str:
18
+ """Return the human-readable status for a connector code."""
19
+ return CONNECTOR_STATUS.get(code, f"Unknown({code})")
20
+
21
+ CONNECTOR_TYPE: dict[int, str] = {
22
+ 1: "SocketType",
23
+ 2: "PlugType",
24
+ }
25
+
26
+ def get_connector_type(code: int) -> str:
27
+ """Return the human-readable connector type for a code."""
28
+ return CONNECTOR_TYPE.get(code, f"Unknown({code})")
29
+
30
+ MEASURED_PHASES: dict[int, str] = {
31
+ 0: "Three phases",
32
+ 1: "Single phase L1",
33
+ 2: "Single phase L2",
34
+ 3: "Single phase L3",
35
+ 4: "Unknown",
36
+ 5: "Two phases",
37
+ }
38
+
39
+ def get_measured_phases(code: int) -> str:
40
+ """Return the human-readable measured phases for a code."""
41
+ return MEASURED_PHASES.get(code, f"Unknown({code})")
@@ -0,0 +1,103 @@
1
+ from __future__ import annotations
2
+
3
+ from influxdb import InfluxDBClient
4
+ import logging
5
+ log = logging.getLogger(__name__)
6
+
7
+
8
+ def write_influx_master(secrets: dict, data: dict, dryrun: bool = False) -> dict:
9
+ """
10
+ Write measurements to two InfluxDB instances.
11
+
12
+ Returns a dict with per-host success booleans: {"ha": bool, "mons": bool}.
13
+ """
14
+ statuses = {"ha": False, "mons": False}
15
+
16
+ # 1) HA influx (building_power DB)
17
+ try:
18
+ payload_ha = {
19
+ "measurement": "power",
20
+ "fields": {"kW": data["p_total_kW"]},
21
+ "tags": {"success": "1"},
22
+ }
23
+
24
+ if dryrun:
25
+ log.info("HA dryrun payload: %s", payload_ha)
26
+ statuses["ha"] = True
27
+ else:
28
+ try:
29
+ ha = InfluxDBClient(
30
+ host=secrets["ha_ip"], port=8086,
31
+ username=secrets["ha_influx_username"],
32
+ password=secrets["ha_influx_pwd"],
33
+ timeout=5,
34
+ )
35
+ except Exception:
36
+ log.exception("Creating HA InfluxDB client failed")
37
+ ha = None
38
+
39
+ if ha is not None:
40
+ try:
41
+ ha.switch_database("building_power")
42
+ ha.write_points([payload_ha])
43
+ statuses["ha"] = True
44
+ except Exception:
45
+ log.exception("Writing to HA influx failed")
46
+ finally:
47
+ try:
48
+ ha.close()
49
+ except Exception:
50
+ pass
51
+
52
+ except Exception:
53
+ log.exception("Unexpected error preparing HA payload")
54
+
55
+ # 2) mons influx (building DB)
56
+ try:
57
+ payload_mons = {
58
+ "measurement": "power",
59
+ "fields": {
60
+ "power_kW": data["p_total_kW"],
61
+ "current_L1_A": data["i_l1_A"],
62
+ "current_L2_A": data["i_l2_A"],
63
+ "current_L3_A": data["i_l3_A"],
64
+ "voltage_L1N_V": data["u_l1_V"],
65
+ "voltage_L2N_V": data["u_l2_V"],
66
+ "voltage_L3N_V": data["u_l3_V"],
67
+ "lg_connected": int(data["lg_connected"]) if data["lg_connected"] is not None else -1,
68
+ },
69
+ "tags": {"asset": "hak", "flow": "bidirectional"},
70
+ }
71
+
72
+ if dryrun:
73
+ log.info("mons dryrun payload: %s", payload_mons)
74
+ statuses["mons"] = True
75
+ else:
76
+ try:
77
+ mons = InfluxDBClient(
78
+ host=secrets["mons_ip"], port=8086,
79
+ username=secrets["mons_influx_username"],
80
+ password=secrets["mons_influx_pwd"],
81
+ timeout=5,
82
+ )
83
+ except Exception:
84
+ log.exception("Creating mons InfluxDB client failed")
85
+ mons = None
86
+
87
+ if mons is not None:
88
+ try:
89
+ mons.switch_database("building")
90
+ mons.write_points([payload_mons])
91
+ statuses["mons"] = True
92
+ except Exception:
93
+ log.exception("Writing to mons influx failed")
94
+ finally:
95
+ try:
96
+ mons.close()
97
+ except Exception:
98
+ pass
99
+
100
+ except Exception:
101
+ log.exception("Unexpected error preparing mons payload")
102
+
103
+ return statuses
@@ -0,0 +1,84 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import logging
5
+ from typing import Dict
6
+
7
+ from .config import load_secrets
8
+ from .modbus_device import ModbusDevice, poll_modbus_async
9
+ from .modbus_reads import read_master_loadguard
10
+ from .influx_writes import write_influx_master
11
+ from .async_tools import run_blocking
12
+
13
+ logging.basicConfig(level=logging.INFO)
14
+ log = logging.getLogger(__name__)
15
+
16
+
17
+ async def periodic_master(dev: ModbusDevice, secrets: dict, period_s: float) -> None:
18
+ try:
19
+ while True:
20
+ data = await poll_modbus_async(dev, read_master_loadguard)
21
+ if data is not None:
22
+ await run_blocking(
23
+ write_influx_master,
24
+ secrets,
25
+ data,
26
+ dryrun=True,
27
+ timeout_s=5.0,
28
+ )
29
+ await asyncio.sleep(period_s)
30
+ except asyncio.CancelledError:
31
+ # Task shutdown is expected (Ctrl-C / cancellation); exit quietly.
32
+ return
33
+
34
+
35
+ async def main() -> None:
36
+ secrets = load_secrets("secrets.yaml")
37
+
38
+ mb_devices: Dict[str, ModbusDevice] = {
39
+ "master_etrel": ModbusDevice("master_etrel", "192.168.1.121", 503, timeout_s=2.0),
40
+ }
41
+
42
+ tasks = [
43
+ asyncio.create_task(
44
+ periodic_master(mb_devices["master_etrel"], secrets, period_s=15.0),
45
+ name="periodic_master",
46
+ ),
47
+ ]
48
+
49
+ try:
50
+ await asyncio.gather(*tasks)
51
+
52
+ except asyncio.CancelledError:
53
+ # If the event loop cancels us, proceed to cleanup.
54
+ pass
55
+
56
+ finally:
57
+ log.info("Shutting down...")
58
+
59
+ for t in tasks:
60
+ t.cancel()
61
+ await asyncio.gather(*tasks, return_exceptions=True)
62
+
63
+ for d in mb_devices.values():
64
+ d.close()
65
+
66
+ log.info("Shutdown complete.")
67
+
68
+
69
+ if __name__ == "__main__":
70
+ try:
71
+ asyncio.run(main())
72
+ except KeyboardInterrupt:
73
+ # Swallow the final KeyboardInterrupt that asyncio.run may raise on SIGINT.
74
+ pass
75
+
76
+
77
+ def cli() -> None:
78
+ try:
79
+ asyncio.run(main())
80
+ except KeyboardInterrupt:
81
+ pass
82
+
83
+ if __name__ == "__main__":
84
+ cli()
@@ -0,0 +1,92 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ import time
5
+ from dataclasses import dataclass
6
+ from typing import Callable, Optional
7
+
8
+ from pymodbus.client import ModbusTcpClient
9
+
10
+ from .wifi import wifi_is_associated
11
+ from .async_tools import run_blocking
12
+
13
+ log = logging.getLogger(__name__)
14
+
15
+
16
+ @dataclass
17
+ class ModbusDevice:
18
+ """
19
+ A Modbus/TCP endpoint + polling backoff state.
20
+
21
+ Keep this intentionally Modbus-specific. If/when you add MQTT or HTTP-polled
22
+ devices, create separate *Device + poll_* functions rather than overloading
23
+ this class with protocol-specific fields.
24
+ """
25
+ name: str
26
+ host: str
27
+ port: int
28
+ unit: int = 1
29
+ timeout_s: float = 2.0
30
+
31
+ client: Optional[ModbusTcpClient] = None
32
+ next_ok_ts: float = 0.0
33
+ fail_count: int = 0
34
+
35
+ def ensure_client(self) -> ModbusTcpClient:
36
+ if self.client is None:
37
+ self.client = ModbusTcpClient(self.host, port=self.port, timeout=self.timeout_s)
38
+ return self.client
39
+
40
+ def close(self) -> None:
41
+ if self.client is not None:
42
+ try:
43
+ self.client.close()
44
+ except Exception:
45
+ pass
46
+ self.client = None
47
+
48
+
49
+ async def poll_modbus_async(
50
+ dev: ModbusDevice,
51
+ read_fn: Callable[[ModbusTcpClient, int], dict],
52
+ *,
53
+ wifi_if: str = "wlan0",
54
+ ) -> Optional[dict]:
55
+ """
56
+ Poll a Modbus device with exponential backoff, returning measurements on success.
57
+
58
+ `read_fn` is your existing blocking block-read function:
59
+ read_fn(client: ModbusTcpClient, unit: int) -> dict
60
+ """
61
+ now = time.time()
62
+ if now < dev.next_ok_ts:
63
+ return None
64
+
65
+ if wifi_if:
66
+ associated = await run_blocking(wifi_is_associated, wifi_if, timeout_s=1.5)
67
+ if not associated:
68
+ log.warning("Wi-Fi %s not associated; skipping poll for %s", wifi_if, dev.name)
69
+ # Try again soon; don't "punish" the device with exponential backoff.
70
+ dev.next_ok_ts = now + 2.0
71
+ return None
72
+
73
+ client = dev.ensure_client()
74
+
75
+ try:
76
+ ok = await run_blocking(client.connect, timeout_s=dev.timeout_s + 1.0)
77
+ if not ok:
78
+ raise RuntimeError("connect failed")
79
+
80
+ data = await run_blocking(read_fn, client, dev.unit, timeout_s=dev.timeout_s + 2.0)
81
+
82
+ dev.fail_count = 0
83
+ dev.next_ok_ts = now
84
+ return data
85
+
86
+ except Exception as e:
87
+ dev.fail_count += 1
88
+ backoff = min(60, 2 ** min(dev.fail_count, 6))
89
+ dev.next_ok_ts = now + backoff
90
+ log.warning("%s poll failed (%s). backoff=%ss", dev.name, e, backoff)
91
+ dev.close()
92
+ return None
@@ -0,0 +1,95 @@
1
+ from __future__ import annotations
2
+
3
+ from struct import pack, unpack
4
+ from pymodbus.client import ModbusTcpClient
5
+
6
+
7
+ def _f32_be(regs: list[int], idx: int) -> float:
8
+ raw = pack(">HH", int(regs[idx]) & 0xFFFF, int(regs[idx + 1]) & 0xFFFF)
9
+ return float(unpack(">f", raw)[0])
10
+
11
+
12
+ def _i64_be(regs: list[int], idx: int) -> int:
13
+ raw = pack(
14
+ ">HHHH",
15
+ int(regs[idx]) & 0xFFFF,
16
+ int(regs[idx + 1]) & 0xFFFF,
17
+ int(regs[idx + 2]) & 0xFFFF,
18
+ int(regs[idx + 3]) & 0xFFFF,
19
+ )
20
+ return int(unpack(">q", raw)[0])
21
+
22
+ def _u32_be(regs: list[int], idx: int) -> int:
23
+ raw = pack(">HH", int(regs[idx + 1]) & 0xFFFF, int(regs[idx]) & 0xFFFF)
24
+ return int(unpack(">i", raw)[0])
25
+
26
+
27
+ def _ascii_be(regs: list[int], idx: int, n_regs: int, *, strip_null: bool = True) -> str:
28
+ b = bytearray()
29
+ for r in regs[idx: idx + n_regs]:
30
+ r &= 0xFFFF
31
+ b.append((r >> 8) & 0xFF) # high byte
32
+ b.append(r & 0xFF) # low byte
33
+
34
+ if strip_null:
35
+ b = b.split(b"\x00", 1)[0] # stop at first NUL
36
+
37
+ return b.decode("ascii", errors="replace").rstrip()
38
+
39
+ def _read_input_registers(client: ModbusTcpClient, *, address: int, count: int, unit_id: int):
40
+ """
41
+ pymodbus kwarg name varies by version: slave / unit / device_id / none.
42
+ Keep it simple and just try the common ones.
43
+ """
44
+ fn = client.read_input_registers
45
+
46
+ # Try keyword variations first
47
+ for kw in ("slave", "unit", "device_id"):
48
+ try:
49
+ return fn(address=address, count=count, **{kw: unit_id})
50
+ except TypeError:
51
+ pass
52
+
53
+ # Fallback: no unit parameter (some setups default to unit 1)
54
+ return fn(address=address, count=count)
55
+
56
+
57
+ def read_master_loadguard(client: ModbusTcpClient, unit: int) -> dict:
58
+ rr = _read_input_registers(client, address=2000, count=26, unit_id=unit)
59
+ if rr.isError():
60
+ raise RuntimeError(rr)
61
+
62
+ r = rr.registers
63
+
64
+ lg_raw = int(r[0])
65
+ if lg_raw == 0:
66
+ lg_status = "Not connected"
67
+ lg_connected = False
68
+ elif lg_raw == 1:
69
+ lg_status = "Connected"
70
+ lg_connected = True
71
+ else:
72
+ lg_status = "Unknown connection status!"
73
+ lg_connected = None
74
+
75
+ u_l1 = _f32_be(r, 2004 - 2000)
76
+ u_l2 = _f32_be(r, 2006 - 2000)
77
+ u_l3 = _f32_be(r, 2008 - 2000)
78
+
79
+ i_l1 = _f32_be(r, 2010 - 2000)
80
+ i_l2 = _f32_be(r, 2012 - 2000)
81
+ i_l3 = _f32_be(r, 2014 - 2000)
82
+
83
+ p_tot = _f32_be(r, 2022 - 2000)
84
+
85
+ return {
86
+ "lg_connected": lg_connected,
87
+ "lg_status": lg_status,
88
+ "u_l1_V": round(u_l1, 2),
89
+ "u_l2_V": round(u_l2, 2),
90
+ "u_l3_V": round(u_l3, 2),
91
+ "i_l1_A": round(i_l1, 2),
92
+ "i_l2_A": round(i_l2, 2),
93
+ "i_l3_A": round(i_l3, 2),
94
+ "p_total_kW": round(p_tot, 2),
95
+ }
@@ -0,0 +1,20 @@
1
+ from __future__ import annotations
2
+
3
+ import subprocess
4
+
5
+ def wifi_is_associated(ifname: str = "wlan0") -> bool:
6
+ """
7
+ True if the kernel reports the interface is associated to an AP.
8
+ Requires: iw
9
+ """
10
+ try:
11
+ out = subprocess.check_output(
12
+ ["iw", "dev", ifname, "link"],
13
+ text=True,
14
+ stderr=subprocess.STDOUT,
15
+ timeout=1.0,
16
+ )
17
+ except Exception:
18
+ return False
19
+
20
+ return out.startswith("Connected to ")
@@ -0,0 +1,18 @@
1
+ Metadata-Version: 2.4
2
+ Name: evse-hub
3
+ Version: 0.2.3
4
+ Summary: Solar monitoring and control daemons
5
+ Requires-Python: >=3.11
6
+ Description-Content-Type: text/markdown
7
+ License-File: LICENSE
8
+ Requires-Dist: PyYAML>=6
9
+ Requires-Dist: requests>=2
10
+ Requires-Dist: pymodbus>=3.11
11
+ Requires-Dist: influxdb>=5.3
12
+ Requires-Dist: msgpack>=1.0
13
+ Requires-Dist: python-dateutil>=2.9
14
+ Requires-Dist: pydantic>=2.0
15
+ Dynamic: license-file
16
+
17
+ # README
18
+
@@ -0,0 +1,30 @@
1
+ .gitignore
2
+ .gitlab-ci.yml
3
+ LICENSE
4
+ README.md
5
+ TODO.md
6
+ pyproject.toml
7
+ secrets.yaml
8
+ .github/copilot-instructions.md
9
+ docs/architecture.md
10
+ docs/operations.md
11
+ src/evse_hub.egg-info/PKG-INFO
12
+ src/evse_hub.egg-info/SOURCES.txt
13
+ src/evse_hub.egg-info/dependency_links.txt
14
+ src/evse_hub.egg-info/entry_points.txt
15
+ src/evse_hub.egg-info/requires.txt
16
+ src/evse_hub.egg-info/top_level.txt
17
+ src/evse_hub/cli/evse_online.py
18
+ src/evse_hub/cli/evse_scan.py
19
+ src/evse_hub/poller/TODO.md
20
+ src/evse_hub/poller/__init__.py
21
+ src/evse_hub/poller/async_tools.py
22
+ src/evse_hub/poller/config.py
23
+ src/evse_hub/poller/etrel.py
24
+ src/evse_hub/poller/influx_writes.py
25
+ src/evse_hub/poller/main.py
26
+ src/evse_hub/poller/modbus_device.py
27
+ src/evse_hub/poller/modbus_reads.py
28
+ src/evse_hub/poller/wifi.py
29
+ tests/read_registers.py
30
+ tests/test_asyncio.py
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ evse-online = evse_hub.cli.evse_online:main
3
+ evse-scan = evse_hub.cli.evse_scan:main
@@ -0,0 +1,7 @@
1
+ PyYAML>=6
2
+ requests>=2
3
+ pymodbus>=3.11
4
+ influxdb>=5.3
5
+ msgpack>=1.0
6
+ python-dateutil>=2.9
7
+ pydantic>=2.0
@@ -0,0 +1 @@
1
+ evse_hub
@@ -0,0 +1,84 @@
1
+ from evse_hub.poller.modbus_device import ModbusDevice
2
+ from evse_hub.poller.modbus_reads import _read_input_registers, _f32_be, _ascii_be, _i64_be, _u32_be
3
+ from evse_hub.poller.config import load_secrets
4
+ from evse_hub.poller.etrel import *
5
+ import datetime
6
+
7
+ evses=load_secrets("evse.yaml")['devices']
8
+
9
+ for evse in evses:
10
+ try:
11
+
12
+ dev=ModbusDevice(evse["name"], evse["ip"], 502, timeout_s=2.0)
13
+ client = dev.ensure_client()
14
+ ok=client.connect()
15
+ print(f"\n{evse["name"]} connected: {ok}\n")
16
+ rr=_read_input_registers(client, address=0, count=48, unit_id=1)
17
+ r=rr.registers
18
+ print(f"Connector status: {CONNECTOR_STATUS[r[0]]}")
19
+ print(f"Connector measured number of phases: {MEASURED_PHASES[r[1]]}")
20
+ print(f"EV max phase current: {_f32_be(r, 2)} A")
21
+ print(f"Target current from power mgm or modbus: {_f32_be(r, 4)} A")
22
+ print(f"Frequency: {_f32_be(r, 6):.3f} Hz")
23
+ print(f"L1-N Voltage: {_f32_be(r, 8):.1f} V")
24
+
25
+ print(f"L2-N Voltage: {_f32_be(r, 10):.1f} V")
26
+ print(f"L3-N Voltage: {_f32_be(r, 12):.1f} V")
27
+ print(f"L1 Current: {_f32_be(r, 14):.2f} A")
28
+ print(f"L2 Current: {_f32_be(r, 16):.2f} A")
29
+ print(f"L3 Current: {_f32_be(r, 18):.2f} A")
30
+ print(f"Active power L1: {_f32_be(r, 20):.2f} kW")
31
+ print(f"Active power L2: {_f32_be(r, 22):.2f} kW")
32
+ print(f"Active power L3: {_f32_be(r, 24):.2f} kW")
33
+ print(f"Total power: {_f32_be(r, 26):.2f} kW")
34
+ print(f"Total imported active energy in running session: {_f32_be(r, 30):.2f} kWh")
35
+ print(f"Running session max power: {_f32_be(r, 44):.2f} kW")
36
+
37
+ rr=_read_input_registers(client, address=990, count=40, unit_id=1)
38
+ r=rr.registers
39
+ print(f"Serial number: {(_ascii_be(r, 0, 10))}")
40
+ print(f"Model: {(_ascii_be(r, 1000-990, 10))}")
41
+ print(f"HW Version: {(_ascii_be(r, 1010-990, 5))}")
42
+ print(f"SW Version: {(_ascii_be(r, 1015-990, 5))}")
43
+ print(f"Number of connectors: {_u32_be(r, 1020-990)}")
44
+ print(f"Connector type: {CONNECTOR_TYPE[r[1022-990]]}")
45
+ print(f"Number of phases: {r[1023-990]}")
46
+ print(f"L1 connected to: {r[1024-990]}")
47
+ print(f"L2 connected to: {r[1025-990]}")
48
+ print(f"L3 connected to: {r[1026-990]}")
49
+ print(f"Custom max current: {_f32_be(r, 1028-990)} A")
50
+ client.close()
51
+ if evse['type'] == 'master':
52
+ dev=ModbusDevice(evse["name"], evse["ip"], 503, timeout_s=2.0)
53
+ client = dev.ensure_client()
54
+ ok=client.connect()
55
+ print(f"\n{evse["name"]} connected: {ok}\n")
56
+ rr=_read_input_registers(client, address=3000, count=1, unit_id=1)
57
+ r=rr.registers
58
+ print(f"Loadguard installed: {r[0]}")
59
+ rr=_read_input_registers(client, address=2000, count=26, unit_id=1)
60
+ r=rr.registers
61
+ print(f"LoadGuard connected: {r[0]}")
62
+ print(f"Frequency: {_f32_be(r, 2002-2000):.2f} Hz")
63
+ print(f"L1-N Voltage: {_f32_be(r, 2004-2000):.1f} V")
64
+ print(f"L2-N Voltage: {_f32_be(r, 2006-2000):.1f} V")
65
+ print(f"L3-N Voltage: {_f32_be(r, 2008-2000):.1f} V")
66
+ print(f"L1 Current: {_f32_be(r, 2010-2000):.2f} A")
67
+ print(f"L2 Current: {_f32_be(r, 2012-2000):.2f} A")
68
+ print(f"L3 Current: {_f32_be(r, 2014-2000):.2f} A")
69
+ print(f"Active power L 1: {_f32_be(r, 2016-2000):.2f} kW")
70
+ print(f"Active power L 2: {_f32_be(r, 2018-2000):.2f} kW")
71
+ print(f"Active power L 3: {_f32_be(r, 2020-2000):.2f} kW")
72
+ print(f"Total power: {_f32_be(r, 2022-2000):.2f} kW")
73
+ rr=_read_input_registers(client, address=2100, count=14, unit_id=1)
74
+ r=rr.registers
75
+ print(f"Power cluster Current L1: {_f32_be(r, 2100-2100):.2f} A")
76
+ print(f"Power cluster Current L2: {_f32_be(r, 2102-2100):.2f} A")
77
+ print(f"Power cluster Current L3: {_f32_be(r, 2104-2100):.2f} A")
78
+ print(f"Power cluster Active power L1: {_f32_be(r, 2106-2100):.2f} kW")
79
+ print(f"Power cluster Active power L2: {_f32_be(r, 2108-2100):.2f} kW")
80
+ print(f"Power cluster Active power L3: {_f32_be(r, 2110-2100):.2f} kW")
81
+ print(f"Power cluster Total power: {_f32_be(r, 2112-2100):.2f} kW")
82
+ client.close()
83
+ except Exception as e:
84
+ print(f"Error reading {evse['name']}: {e}")
File without changes