ariel-facility 0.17.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ariel_facility-0.17.4/.gitignore +46 -0
- ariel_facility-0.17.4/PKG-INFO +15 -0
- ariel_facility-0.17.4/README.MD +89 -0
- ariel_facility-0.17.4/pyproject.toml +63 -0
- ariel_facility-0.17.4/src/ariel_facility/__init__.py +0 -0
- ariel_facility-0.17.4/src/ariel_facility/cgse_explore.py +19 -0
- ariel_facility-0.17.4/src/ariel_facility/cgse_services.py +46 -0
- ariel_facility-0.17.4/src/ariel_facility/settings.yaml +20 -0
- ariel_facility-0.17.4/src/egse/ariel/facility/__init__.py +0 -0
- ariel_facility-0.17.4/src/egse/ariel/facility/database.py +214 -0
- ariel_facility-0.17.4/src/egse/ariel/facility/hk.py +341 -0
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
# Python versions and environment
|
|
2
|
+
|
|
3
|
+
__pycache__
|
|
4
|
+
.python-version
|
|
5
|
+
.envrc
|
|
6
|
+
|
|
7
|
+
# Build systems
|
|
8
|
+
|
|
9
|
+
build
|
|
10
|
+
dist
|
|
11
|
+
**/*.egg-info
|
|
12
|
+
|
|
13
|
+
# Apple specific
|
|
14
|
+
|
|
15
|
+
.DS_Store
|
|
16
|
+
|
|
17
|
+
# Unit testing
|
|
18
|
+
|
|
19
|
+
.pytest_cache
|
|
20
|
+
.coverage
|
|
21
|
+
.coverage.*
|
|
22
|
+
.nox
|
|
23
|
+
htmlcov
|
|
24
|
+
|
|
25
|
+
# Virtual environments
|
|
26
|
+
|
|
27
|
+
.env
|
|
28
|
+
.venv
|
|
29
|
+
venv
|
|
30
|
+
|
|
31
|
+
# PyCharm IDE
|
|
32
|
+
|
|
33
|
+
.idea
|
|
34
|
+
|
|
35
|
+
# VSCode IDE
|
|
36
|
+
|
|
37
|
+
.vscode
|
|
38
|
+
*.code-workspace
|
|
39
|
+
|
|
40
|
+
# MKDOCS documentation site
|
|
41
|
+
|
|
42
|
+
/site
|
|
43
|
+
|
|
44
|
+
# Packaging
|
|
45
|
+
|
|
46
|
+
uv.lock
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: ariel-facility
|
|
3
|
+
Version: 0.17.4
|
|
4
|
+
Summary: Extract HK from MySQL Facility Database for Ariel
|
|
5
|
+
Author: IVS KU Leuven
|
|
6
|
+
Maintainer-email: Rik Huygen <rik.huygen@kuleuven.be>, Sara Regibo <sara.regibo@kuleuven.be>
|
|
7
|
+
License-Expression: MIT
|
|
8
|
+
Keywords: Ariel,facility,hardware testing,housekeeping,software framework
|
|
9
|
+
Requires-Python: >=3.10
|
|
10
|
+
Requires-Dist: cgse-common
|
|
11
|
+
Requires-Dist: cgse-core
|
|
12
|
+
Requires-Dist: cgse-gui
|
|
13
|
+
Requires-Dist: crcmod>=1.7
|
|
14
|
+
Requires-Dist: mysql-replication
|
|
15
|
+
Requires-Dist: pyserial>=3.5
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
# Extraction of HK from the MySQL Facility Database
|
|
2
|
+
|
|
3
|
+
During the Ariel TA test campaign at CSL, facility (and potentially other) housekeeping data will be stored in the MySQL
|
|
4
|
+
facility database. To store (housekeeping) data in a consistent way across devices/processes and to enable quick-look
|
|
5
|
+
analysis (e.g. via Grafana dashboards), we want to extract the data from the MySQL facility database and ingest it
|
|
6
|
+
into our TA-EGSE framework.
|
|
7
|
+
|
|
8
|
+
---
|
|
9
|
+
|
|
10
|
+
## Database Structure
|
|
11
|
+
|
|
12
|
+
The structure of the MySQL facility database is as follows:
|
|
13
|
+
|
|
14
|
+
- Each sensor has its own table with recorded values (one measure every minute).
|
|
15
|
+
- Each table has the following column names:
|
|
16
|
+
- `measure_id`: Identifiers for the entries in the table (basically the row number),
|
|
17
|
+
- `measure_timestamp`: Timestamp of the measurements [Unix time],
|
|
18
|
+
- `measure_value`: Recorded values (already converted/calibrated).
|
|
19
|
+
|
|
20
|
+
---
|
|
21
|
+
|
|
22
|
+
## Local Settings
|
|
23
|
+
|
|
24
|
+
The following entries have to be included in the (local) settings file:
|
|
25
|
+
|
|
26
|
+
```yaml
|
|
27
|
+
Facility HK:
|
|
28
|
+
TABLES:
|
|
29
|
+
|
|
30
|
+
Facility DB:
|
|
31
|
+
USER:
|
|
32
|
+
PASSWORD:
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
- In the "TABLES" block under "Facility HK", you have to link the table names (as in the facility database) to the
|
|
36
|
+
storage mnemonic (as in the TA-EGSE framework, to pass to the Storage Manager) and the server identifier. This can be
|
|
37
|
+
done by adding entries to the "TABLES" block, in the following format:
|
|
38
|
+
|
|
39
|
+
```
|
|
40
|
+
<table name (in facility database)>: (storage mnemonic, server identifier)
|
|
41
|
+
```
|
|
42
|
+
|
|
43
|
+
- In the "Facility DB" block, the credentials to connect to the MySQL facility database have to be specified via "USER"
|
|
44
|
+
and "PASSWORD".
|
|
45
|
+
|
|
46
|
+
---
|
|
47
|
+
|
|
48
|
+
## Functionality
|
|
49
|
+
|
|
50
|
+
The `FacilityHousekeepingExporter` process is responsible for:
|
|
51
|
+
- Extracting housekeeping data from the MySQL facility database,
|
|
52
|
+
- Storing the extracted housekeeping in dedicated, TA-EGSE-consistent CSV files (via the Storage Manager),
|
|
53
|
+
- Ingesting the extracted housekeeping in the InfluxDB metrics database.
|
|
54
|
+
|
|
55
|
+
For each of the selected tables in the facility database, a dedicated thread will check for new entries in that table. When
|
|
56
|
+
a new entry appears in such a table, the corresponding thread will receive the new data as a `dict` and take the following action:
|
|
57
|
+
- Convert the timestamp to the format that we use throughout the TA-EGSE framework (YYYY-mm-ddTHH:MM:SS.μs+0000).
|
|
58
|
+
- Re-name the key for the timestamp in the dictionary to "timestamp".
|
|
59
|
+
- Re-name the key for the recorded value to the table name.
|
|
60
|
+
- If required by the telemetry, further re-naming of the keys in the dictionary will be performed.
|
|
61
|
+
- Send the new housekeeping value and corresponding timestamp to the Storage Manager. The latter will store it in a
|
|
62
|
+
dedicated CSV file.
|
|
63
|
+
- Send the new housekeeping value and corresponding timestamp to the InfluxDB metrics database.
|
|
64
|
+
|
|
65
|
+
---
|
|
66
|
+
|
|
67
|
+
## Enable Binary Logging
|
|
68
|
+
|
|
69
|
+
To make this all work, binary logging should be enabled on the MySQL server. This can be done by adding the following
|
|
70
|
+
information in the `my.cnf` file:
|
|
71
|
+
|
|
72
|
+
```
|
|
73
|
+
[mysqld]
|
|
74
|
+
log-bin=mysql-bin
|
|
75
|
+
server-id=<server identifier>
|
|
76
|
+
binlog_format=ROW
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
You would have to add an entry for each of the server identifiers listed in the (local) settings file (see section above).
|
|
80
|
+
|
|
81
|
+
To find this file, check the `MYSQL_HOME` environment variable.
|
|
82
|
+
|
|
83
|
+
When you have added all required server identifiers, the MySQL server should be re-started. Also make sure that your
|
|
84
|
+
user had `REPLICATION SLAVE` or `REPLICATION CLIENT` privileges. This can be configured as follows:
|
|
85
|
+
|
|
86
|
+
```mysql
|
|
87
|
+
GRANT REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'your_user'@'%';
|
|
88
|
+
FLUSH PRIVILEGES;
|
|
89
|
+
```
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "ariel-facility"
|
|
3
|
+
version = "0.17.4"
|
|
4
|
+
description = "Extract HK from MySQL Facility Database for Ariel"
|
|
5
|
+
authors = [
|
|
6
|
+
{name = "IVS KU Leuven"}
|
|
7
|
+
]
|
|
8
|
+
maintainers = [
|
|
9
|
+
{name = "Rik Huygen", email = "rik.huygen@kuleuven.be"},
|
|
10
|
+
{name = "Sara Regibo", email = "sara.regibo@kuleuven.be"}
|
|
11
|
+
]
|
|
12
|
+
|
|
13
|
+
requires-python = ">=3.10"
|
|
14
|
+
license = "MIT"
|
|
15
|
+
keywords = [
|
|
16
|
+
"hardware testing",
|
|
17
|
+
"software framework",
|
|
18
|
+
"facility",
|
|
19
|
+
"housekeeping",
|
|
20
|
+
"Ariel",
|
|
21
|
+
]
|
|
22
|
+
dependencies = [
|
|
23
|
+
"cgse-common",
|
|
24
|
+
"cgse-core",
|
|
25
|
+
"cgse-gui",
|
|
26
|
+
"crcmod>=1.7",
|
|
27
|
+
"pyserial>=3.5",
|
|
28
|
+
"mysql-replication",
|
|
29
|
+
]
|
|
30
|
+
|
|
31
|
+
[project.scripts]
|
|
32
|
+
facility_hk = 'egse.ariel.facility.hk:app'
|
|
33
|
+
|
|
34
|
+
[project.entry-points."cgse.version"]
|
|
35
|
+
ariel-facility = 'egse.version:get_version_installed'
|
|
36
|
+
|
|
37
|
+
[project.entry-points."cgse.settings"]
|
|
38
|
+
ariel-facility = "ariel_facility:settings.yaml"
|
|
39
|
+
|
|
40
|
+
[project.entry-points."cgse.service.device_command"]
|
|
41
|
+
facility_hk = 'ariel_facility.cgse_services:facility_hk'
|
|
42
|
+
|
|
43
|
+
[project.entry-points."cgse.explore"]
|
|
44
|
+
explore = "ariel_facility.cgse_explore"
|
|
45
|
+
|
|
46
|
+
[tool.hatch.build.targets.sdist]
|
|
47
|
+
exclude = [
|
|
48
|
+
"/tests",
|
|
49
|
+
"/.gitignore",
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
[tool.hatch.build.targets.wheel]
|
|
53
|
+
packages = ["src/egse", "src/ariel_facility"]
|
|
54
|
+
|
|
55
|
+
[tool.ruff]
|
|
56
|
+
line-length = 120
|
|
57
|
+
|
|
58
|
+
[tool.ruff.lint]
|
|
59
|
+
extend-select = ["E501"]
|
|
60
|
+
|
|
61
|
+
[build-system]
|
|
62
|
+
requires = ["hatchling"]
|
|
63
|
+
build-backend = "hatchling.build"
|
|
File without changes
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
__all__ = [
|
|
2
|
+
"show_processes",
|
|
3
|
+
]
|
|
4
|
+
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
from egse.process import ProcessInfo
|
|
8
|
+
from egse.process import get_processes
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def show_processes():
|
|
12
|
+
"""Returns of list of ProcessInfo data classes for matching processes from this package."""
|
|
13
|
+
|
|
14
|
+
def filter_procs(pi: ProcessInfo):
|
|
15
|
+
pattern = r"facility_hk"
|
|
16
|
+
|
|
17
|
+
return re.search(pattern, pi.command)
|
|
18
|
+
|
|
19
|
+
return get_processes(filter_procs)
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
import sys
|
|
3
|
+
|
|
4
|
+
import rich
|
|
5
|
+
import typer
|
|
6
|
+
|
|
7
|
+
from egse.system import all_logging_disabled
|
|
8
|
+
from egse.system import redirect_output_to_log
|
|
9
|
+
|
|
10
|
+
facility_hk = typer.Typer(name="facility_hk", help="Housekeeping from Facility Database")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@facility_hk.command(name="start")
|
|
14
|
+
def start_facility_hk():
|
|
15
|
+
"""Starts the extraction of HK from the facility DB into TA-EGSE CSV files."""
|
|
16
|
+
|
|
17
|
+
rich.print("Starting the extraction of HK from the facility DB into TA-EGSE CSV files")
|
|
18
|
+
|
|
19
|
+
out = redirect_output_to_log("facility_hk.start.log")
|
|
20
|
+
|
|
21
|
+
cmd = [sys.executable, "-m", "egse.ariel.facility.hk", "start"]
|
|
22
|
+
|
|
23
|
+
subprocess.Popen(cmd, stdout=out, stderr=out, stdin=subprocess.DEVNULL, close_fds=True)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@facility_hk.command(name="stop")
|
|
27
|
+
def stop_facility_hk():
|
|
28
|
+
"""Stops the extraction of HK from the facility DB into TA-EGSE CSV files."""
|
|
29
|
+
|
|
30
|
+
rich.print("Terminating the extraction of HK from the facility DB into TA-EGSE CSV files")
|
|
31
|
+
|
|
32
|
+
out = redirect_output_to_log("facility_hk.stop.log")
|
|
33
|
+
|
|
34
|
+
cmd = [sys.executable, "-m", "egse.ariel.facility.hk", "stop"]
|
|
35
|
+
|
|
36
|
+
subprocess.Popen(cmd, stdout=out, stderr=out, stdin=subprocess.DEVNULL, close_fds=True)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@facility_hk.command(name="status")
|
|
40
|
+
def status_facility_hk():
|
|
41
|
+
"""Prints status information the extraction of HK from the facility DB into TA-EGSE CSV files."""
|
|
42
|
+
|
|
43
|
+
with all_logging_disabled():
|
|
44
|
+
from egse.ariel.facility import hk
|
|
45
|
+
|
|
46
|
+
hk.status()
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
Facility HK:
|
|
2
|
+
HOSTNAME: localhost
|
|
3
|
+
PROTOCOL: tcp
|
|
4
|
+
COMMANDING_PORT: 0 # Port on which the controller listens to commands (REQ-REP)
|
|
5
|
+
MONITORING_PORT: 0 # Port on which the controller sends periodic status information about the device (PUB-SUB)
|
|
6
|
+
SERVICE_PORT: 0 # Port on which the controller listens for configuration and administration (REQ-REP)
|
|
7
|
+
METRICS_PORT: 0
|
|
8
|
+
STORAGE_MNEMONIC: FACILITY
|
|
9
|
+
SERVICE_NAME: FACILITY_HK
|
|
10
|
+
SERVICE_TYPE: FACILITY_HK
|
|
11
|
+
TABLES: # Table name: (storage mnemonic, server ID)
|
|
12
|
+
TABLE1: (ORIGIN1, SERVER_ID_1)
|
|
13
|
+
TABLE2: (ORIGIN2, SERVER_ID_2)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
Facility DB:
|
|
17
|
+
HOST: localhost
|
|
18
|
+
PORT: 3306
|
|
19
|
+
USER: TBD
|
|
20
|
+
PASSWORD: TBD
|
|
File without changes
|
|
@@ -0,0 +1,214 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Part of the housekeeping acquired during the Ariel TA test campaign at CSL will be stored directly into the MySQL
|
|
3
|
+
facility database. This module enables watching specific tables in that database for new entries. The goal is to
|
|
4
|
+
store those into the dedicated CSV files for the TA-EGSE framework.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import threading
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
|
|
10
|
+
from egse.confman.confman_cs import load_setup
|
|
11
|
+
from egse.hk import read_conversion_dict, convert_hk_names
|
|
12
|
+
from egse.log import egse_logger
|
|
13
|
+
from egse.system import str_to_datetime, format_datetime
|
|
14
|
+
from egse.metrics import get_metrics_repo
|
|
15
|
+
from egse.settings import Settings, get_site_id
|
|
16
|
+
from pymysqlreplication import BinLogStreamReader
|
|
17
|
+
from pymysqlreplication.row_event import WriteRowsEvent
|
|
18
|
+
from urllib3.exceptions import NewConnectionError
|
|
19
|
+
import os
|
|
20
|
+
|
|
21
|
+
from egse.storage import StorageProxy
|
|
22
|
+
|
|
23
|
+
LOGGER = egse_logger
|
|
24
|
+
SITE_ID = get_site_id()
|
|
25
|
+
FACILITY_DB_SETTINGS = Settings.load("Facility DB")
|
|
26
|
+
|
|
27
|
+
ID_COLUMN_NAME = "measure_id"
|
|
28
|
+
TIMESTAMP_COLUMN_NAME = "measure_timestamp"
|
|
29
|
+
VALUE_COLUMN_NAME = "measure_value"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class DatabaseTableWatcher:
|
|
33
|
+
def __init__(self, table_name: str, origin: str, server_id: int):
|
|
34
|
+
"""Initialisation of a watcher for a specific table in the facility database.
|
|
35
|
+
|
|
36
|
+
The watcher is a daemon thread the watches the specified table in the facility database for new entries. If
|
|
37
|
+
a new entry is encountered, it will be sent to the Storage Manager, which will store it in the HK file for the
|
|
38
|
+
given storage mnemonic.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
table_name (str): Name of the table in the facility database.
|
|
42
|
+
origin (str): Storage mnemonic for the data in the TA-EGSE framework.
|
|
43
|
+
server_id (int): Unique identifier for the MySQL binlog stream reader.
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
self.table_name = table_name
|
|
47
|
+
self.origin = origin
|
|
48
|
+
self.server_id = server_id
|
|
49
|
+
|
|
50
|
+
# noinspection PyBroadException
|
|
51
|
+
try:
|
|
52
|
+
self.hk_conversion_dict = read_conversion_dict(self.origin, use_site=False, setup=load_setup())
|
|
53
|
+
except:
|
|
54
|
+
self.hk_conversion_dict = None
|
|
55
|
+
|
|
56
|
+
# Make a thread and let it start watching the specified table in the facility database
|
|
57
|
+
|
|
58
|
+
self.watch_thread = threading.Thread(target=self.watch_db_table)
|
|
59
|
+
self.watch_thread.daemon = True
|
|
60
|
+
self.keep_watching = True
|
|
61
|
+
|
|
62
|
+
# Metrics client
|
|
63
|
+
|
|
64
|
+
token = os.getenv("INFLUXDB3_AUTH_TOKEN")
|
|
65
|
+
project = os.getenv("PROJECT")
|
|
66
|
+
|
|
67
|
+
if project and token:
|
|
68
|
+
self.metrics_client = get_metrics_repo(
|
|
69
|
+
"influxdb", {"host": "http://localhost:8181", "database": project, "token": token}
|
|
70
|
+
)
|
|
71
|
+
self.metrics_client.connect()
|
|
72
|
+
else:
|
|
73
|
+
self.metrics_client = None
|
|
74
|
+
LOGGER.warning(
|
|
75
|
+
"INFLUXDB3_AUTH_TOKEN and/or PROJECT environment variable is not set. Metrics will not be propagated "
|
|
76
|
+
"to InfluxDB."
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
def start_watching_db_table(self):
|
|
80
|
+
"""Starts the thread that checks for new entries in the specified table in the facility database."""
|
|
81
|
+
|
|
82
|
+
self.keep_watching = True
|
|
83
|
+
self.watch_thread.start()
|
|
84
|
+
|
|
85
|
+
def stop_watching_db_table(self):
|
|
86
|
+
"""Stops the thread that checks for new entries in the specified table in the facility database."""
|
|
87
|
+
|
|
88
|
+
self.keep_watching = False
|
|
89
|
+
self.watch_thread.join()
|
|
90
|
+
|
|
91
|
+
def watch_db_table(self):
|
|
92
|
+
"""Lets the thread watch for new entries in the specified table in the facility database.
|
|
93
|
+
|
|
94
|
+
If a new entry is encountered, it will be sent to the Storage Manager, which will store it in the HK file for
|
|
95
|
+
the specified storage mnemonic.
|
|
96
|
+
"""
|
|
97
|
+
|
|
98
|
+
# Connect to the (MySQL) facility database -> Start watching the specified table
|
|
99
|
+
|
|
100
|
+
mysql_settings = {
|
|
101
|
+
"host": FACILITY_DB_SETTINGS.HOST,
|
|
102
|
+
"port": FACILITY_DB_SETTINGS.PORT,
|
|
103
|
+
"user": FACILITY_DB_SETTINGS.USER,
|
|
104
|
+
"passwd": FACILITY_DB_SETTINGS.PASSWORD,
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
stream: BinLogStreamReader = BinLogStreamReader(
|
|
108
|
+
connection_settings=mysql_settings,
|
|
109
|
+
server_id=self.server_id, # Unique identifier per Python client watching the MySQL facility database
|
|
110
|
+
blocking=True,
|
|
111
|
+
only_events=[WriteRowsEvent], # Watch for new row entries
|
|
112
|
+
only_tables=[self.table_name], # Watch for changes in the specified table
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
while self.keep_watching:
|
|
116
|
+
for bin_log_event in stream:
|
|
117
|
+
for row in bin_log_event.rows:
|
|
118
|
+
values = row["values"] # Dictionary with the column names (from the facility database) as keys
|
|
119
|
+
hk = self.translate_parameter_names(values) # Convert to TA-EGSE-consistent names
|
|
120
|
+
self.store_housekeeping_information(hk)
|
|
121
|
+
self.propagate_metrics(hk)
|
|
122
|
+
|
|
123
|
+
stream.close()
|
|
124
|
+
|
|
125
|
+
def translate_parameter_names(self, hk: dict):
|
|
126
|
+
"""Converts the parameter names from the facility database to TA-EGSE-consistent names.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
hk (dict): Dictionary with the column names (from the facility database) as keys.
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
Dictionary with the TA-EGSE-consistent names as keys.
|
|
133
|
+
"""
|
|
134
|
+
|
|
135
|
+
# Timestamp
|
|
136
|
+
|
|
137
|
+
# noinspection PyUnresolvedReferences
|
|
138
|
+
hk["timestamp"] = format_datetime(
|
|
139
|
+
datetime.datetime.fromtimestamp(hk[TIMESTAMP_COLUMN_NAME], datetime.UTC)
|
|
140
|
+
) # Unix time -> datetime [UTC]
|
|
141
|
+
del hk[TIMESTAMP_COLUMN_NAME]
|
|
142
|
+
|
|
143
|
+
# Delete identifier of the entry
|
|
144
|
+
|
|
145
|
+
del hk[ID_COLUMN_NAME]
|
|
146
|
+
|
|
147
|
+
# Parameter value
|
|
148
|
+
|
|
149
|
+
hk[self.table_name] = hk[VALUE_COLUMN_NAME]
|
|
150
|
+
del hk[VALUE_COLUMN_NAME]
|
|
151
|
+
|
|
152
|
+
if self.hk_conversion_dict:
|
|
153
|
+
return convert_hk_names(hk, self.hk_conversion_dict)
|
|
154
|
+
else:
|
|
155
|
+
return hk
|
|
156
|
+
|
|
157
|
+
def store_housekeeping_information(self, hk: dict):
|
|
158
|
+
"""Sends the given housekeeping information to the Storage Manager.
|
|
159
|
+
|
|
160
|
+
The housekeeping is passed as a dictionary, with the parameter names as keys. There's also an entry for the
|
|
161
|
+
timestamp, which represents the date/time at which the value was received.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
hk (dict): Housekeeping that was extracted from the facility database, after converting the parameter names
|
|
165
|
+
to TA-EGSE-consistent names.
|
|
166
|
+
"""
|
|
167
|
+
|
|
168
|
+
try:
|
|
169
|
+
with StorageProxy() as storage:
|
|
170
|
+
response = storage.save({"origin": self.origin, "data": hk})
|
|
171
|
+
if not response.successful:
|
|
172
|
+
LOGGER.warning(
|
|
173
|
+
f"Couldn't save facility data to the Storage manager for {self.origin}, cause: {response}"
|
|
174
|
+
)
|
|
175
|
+
except ConnectionError as exc:
|
|
176
|
+
LOGGER.warning(
|
|
177
|
+
f"Couldn't connect to the Storage Manager to store facility housekeeping for {self.origin}: {exc}"
|
|
178
|
+
)
|
|
179
|
+
raise
|
|
180
|
+
|
|
181
|
+
def propagate_metrics(self, hk: dict):
|
|
182
|
+
"""Propagates the given housekeeping information to the metrics database.
|
|
183
|
+
|
|
184
|
+
The housekeeping is passed as a dictionary, with the parameter names as keys. There's also an entry for the
|
|
185
|
+
timestamp, which represents the date/time at which the value was received. In case only the timestamp is
|
|
186
|
+
present in the dictionary, nothing will be written to the metrics database.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
hk (dict): Housekeeping that was extracted from the facility database, after converting the parameter names
|
|
190
|
+
to TA-EGSE-consistent names.
|
|
191
|
+
"""
|
|
192
|
+
|
|
193
|
+
if not [x for x in hk if x != "timestamp"]:
|
|
194
|
+
LOGGER.debug(f"no metrics defined for {self.origin}")
|
|
195
|
+
return
|
|
196
|
+
|
|
197
|
+
try:
|
|
198
|
+
if self.metrics_client:
|
|
199
|
+
point = {
|
|
200
|
+
"measurement": self.origin.lower(),
|
|
201
|
+
"tags": {"site_id": SITE_ID, "origin": self.origin},
|
|
202
|
+
"fields": {hk_name.lower(): hk[hk_name] for hk_name in hk if hk_name != "timestamp"},
|
|
203
|
+
"time": str_to_datetime(hk["timestamp"]),
|
|
204
|
+
}
|
|
205
|
+
self.metrics_client.write(point)
|
|
206
|
+
else:
|
|
207
|
+
LOGGER.warning(
|
|
208
|
+
f"Could not write {self.origin} metrics to the time series database (self.metrics_client is None)."
|
|
209
|
+
)
|
|
210
|
+
except NewConnectionError:
|
|
211
|
+
LOGGER.warning(
|
|
212
|
+
f"No connection to the time series database could be established to propagate {self.origin} metrics. "
|
|
213
|
+
f"Check whether this service is (still) running."
|
|
214
|
+
)
|
|
@@ -0,0 +1,341 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
import sys
|
|
4
|
+
|
|
5
|
+
from egse.hk import read_conversion_dict
|
|
6
|
+
from egse.log import egse_logger
|
|
7
|
+
from egse.registry.client import RegistryClient
|
|
8
|
+
from egse.settings import Settings
|
|
9
|
+
import zmq
|
|
10
|
+
import pickle
|
|
11
|
+
from egse.ariel.facility.database import DatabaseTableWatcher
|
|
12
|
+
import typer
|
|
13
|
+
import rich
|
|
14
|
+
|
|
15
|
+
from egse.setup import load_setup
|
|
16
|
+
from egse.storage import register_to_storage_manager, TYPES, is_storage_manager_active, StorageProxy
|
|
17
|
+
from egse.system import get_host_ip
|
|
18
|
+
from egse.zmq_ser import connect_address, get_port_number, bind_address
|
|
19
|
+
|
|
20
|
+
LOGGER = egse_logger
|
|
21
|
+
CTRL_SETTINGS = Settings.load("Facility HK")
|
|
22
|
+
|
|
23
|
+
TIMEOUT_RECV = 1.0 # seconds
|
|
24
|
+
ORIGIN_LIST = {"TCU": "TCU_TABLE", "LAKESHORE": "LAKESHORE_TABLE"}
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class FacilityHousekeepingExporter:
|
|
28
|
+
def __init__(self):
|
|
29
|
+
"""Initialisation of a facility HK exporter.
|
|
30
|
+
|
|
31
|
+
This process will extract HK from the facility database, store it in TA-EGSE-consistent CSV files and ingest it
|
|
32
|
+
into the metrics database.
|
|
33
|
+
|
|
34
|
+
Upon initialisation, the following actions are performed:
|
|
35
|
+
|
|
36
|
+
- From the settings file, we read which tables in the facility database have to be watched for new entries
|
|
37
|
+
(one table per sensor). In this file, we also configure which is the corresponding storage mnemonic that
|
|
38
|
+
will be used by the Storage Manager.
|
|
39
|
+
- For each selected table, a dedicated watcher is defined. It is the task of the watcher to keep an eye
|
|
40
|
+
on its database table, extract new entries, pass them to the Storage Manager (with the corresponding
|
|
41
|
+
storage mnemonics), and ingest it into the metrics database.
|
|
42
|
+
- Register the process to the registry client. This way we can find back its host and ports (which is
|
|
43
|
+
required when using dynamic port allocation), and report its status.
|
|
44
|
+
- We will probably store the data from multiple sensors in the same file. We therefore need to pass on to
|
|
45
|
+
the Storage Manager which origin will receives and store which column names. If we don't do this, we end
|
|
46
|
+
up with corrupt CSV files.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
self.watchers = {}
|
|
50
|
+
|
|
51
|
+
# Define a watcher for the tables listed in the settings
|
|
52
|
+
# Keep track of all (unique) storage mnemonics / origins (to collect the corresponding column names)
|
|
53
|
+
|
|
54
|
+
self.origins = []
|
|
55
|
+
|
|
56
|
+
for table_name in CTRL_SETTINGS.TABLES:
|
|
57
|
+
origin, server_id = CTRL_SETTINGS.TABLES[table_name]
|
|
58
|
+
self.origins.append(origin)
|
|
59
|
+
self.watchers[table_name] = DatabaseTableWatcher(table_name, origin, server_id)
|
|
60
|
+
|
|
61
|
+
self.origins = list(set(self.origins)) # Remove duplicates
|
|
62
|
+
|
|
63
|
+
self.keep_extracting = True
|
|
64
|
+
print(f"Keep extracting: {self.keep_extracting}")
|
|
65
|
+
|
|
66
|
+
# Create ZeroMQ socket for commanding
|
|
67
|
+
|
|
68
|
+
self.zmq_context = zmq.Context.instance()
|
|
69
|
+
self.cmd_socket = self.zmq_context.socket(zmq.REP)
|
|
70
|
+
endpoint = bind_address(CTRL_SETTINGS.PROTOCOL, CTRL_SETTINGS.COMMANDING_PORT)
|
|
71
|
+
self.cmd_socket.bind(endpoint) # Bind the socket to the endpoint -> Port allocation happens here
|
|
72
|
+
|
|
73
|
+
# Registration to the registry client
|
|
74
|
+
|
|
75
|
+
self.registry = RegistryClient()
|
|
76
|
+
self.registry.connect()
|
|
77
|
+
self.register_service()
|
|
78
|
+
|
|
79
|
+
# Register to the Storage Manager (pass the columns names for each storage mnemonic)
|
|
80
|
+
|
|
81
|
+
self.register_to_storage_manager()
|
|
82
|
+
|
|
83
|
+
def run(self):
|
|
84
|
+
"""Starts watching for changes in the specified tables in the facility database."""
|
|
85
|
+
|
|
86
|
+
watcher: DatabaseTableWatcher
|
|
87
|
+
|
|
88
|
+
poller = zmq.Poller()
|
|
89
|
+
poller.register(self.cmd_socket, zmq.POLLIN)
|
|
90
|
+
|
|
91
|
+
# Start watching the tables
|
|
92
|
+
|
|
93
|
+
for _, watcher in self.watchers.items():
|
|
94
|
+
watcher.start_watching_db_table()
|
|
95
|
+
|
|
96
|
+
try:
|
|
97
|
+
while self.keep_extracting:
|
|
98
|
+
print("Keep extracting")
|
|
99
|
+
|
|
100
|
+
# Keep on listening for `quit` command
|
|
101
|
+
if _check_commander_status(self.cmd_socket, poller):
|
|
102
|
+
self.keep_extracting = False
|
|
103
|
+
break
|
|
104
|
+
|
|
105
|
+
except KeyboardInterrupt:
|
|
106
|
+
LOGGER.info("KeyboardInterrupt caught")
|
|
107
|
+
|
|
108
|
+
self.keep_extracting = False
|
|
109
|
+
|
|
110
|
+
# De-registration from the registry client
|
|
111
|
+
|
|
112
|
+
self.deregister_service()
|
|
113
|
+
|
|
114
|
+
# Close the commanding socket
|
|
115
|
+
|
|
116
|
+
poller.unregister(self.cmd_socket)
|
|
117
|
+
self.cmd_socket.close(linger=0)
|
|
118
|
+
|
|
119
|
+
# Stop watching the tables listed in the settings
|
|
120
|
+
|
|
121
|
+
for _, watcher in self.watchers.items():
|
|
122
|
+
watcher.stop_watching_db_table()
|
|
123
|
+
|
|
124
|
+
# De-registration from the Storage Manager
|
|
125
|
+
|
|
126
|
+
self.unregister_from_storage_manager()
|
|
127
|
+
|
|
128
|
+
def register_service(self) -> None:
|
|
129
|
+
"""Registers the FacilityHousekeepingExporter to the Registry Client."""
|
|
130
|
+
|
|
131
|
+
self.registry.stop_heartbeat()
|
|
132
|
+
self.registry.register(
|
|
133
|
+
name=CTRL_SETTINGS.SERVICE_NAME.lower(),
|
|
134
|
+
host=get_host_ip() or "127.0.0.1",
|
|
135
|
+
port=get_port_number(self.cmd_socket),
|
|
136
|
+
service_type=CTRL_SETTINGS.SERVICE_TYPE.lower(),
|
|
137
|
+
)
|
|
138
|
+
self.registry.start_heartbeat()
|
|
139
|
+
|
|
140
|
+
def deregister_service(self) -> None:
|
|
141
|
+
"""De-registers the FacilityHousekeepingExporter from the Registry Client."""
|
|
142
|
+
|
|
143
|
+
if self.registry:
|
|
144
|
+
self.registry.stop_heartbeat()
|
|
145
|
+
self.registry.deregister()
|
|
146
|
+
self.registry.close()
|
|
147
|
+
|
|
148
|
+
@staticmethod
|
|
149
|
+
def register_to_storage_manager() -> None:
|
|
150
|
+
"""Registers the origins to the Storage Manager.
|
|
151
|
+
|
|
152
|
+
Each sensor has its own table in the facility database. In the TA-EGSE framework, we want to offer the option
|
|
153
|
+
to store data from multiple sensors in the same file. Therefore we must register - for each storage
|
|
154
|
+
mnemonic - which are all the possible column names. Note that the column names in the CSV files do not
|
|
155
|
+
necessarily correspond to the table names in the facility database. This is configured in the telemetry
|
|
156
|
+
dictionary.
|
|
157
|
+
"""
|
|
158
|
+
|
|
159
|
+
if is_storage_manager_active():
|
|
160
|
+
storage_registrations = {}
|
|
161
|
+
|
|
162
|
+
# Data from which table in the facility database should be extracted and stored under which storage mnemonic?
|
|
163
|
+
|
|
164
|
+
for table_name, (origin, _) in CTRL_SETTINGS.TABLES.items():
|
|
165
|
+
storage_registrations.setdefault(origin, []).append(table_name)
|
|
166
|
+
|
|
167
|
+
# The column names in the CSV files do not necessarily correspond to the table names in the facility
|
|
168
|
+
# database. This is configured in the telemetry dictionary.
|
|
169
|
+
|
|
170
|
+
for origin, table_names in storage_registrations.items():
|
|
171
|
+
# noinspection PyBroadException
|
|
172
|
+
try:
|
|
173
|
+
hk_conversion_dict = read_conversion_dict(origin, use_site=False, setup=load_setup())
|
|
174
|
+
column_names = [hk_conversion_dict[table_name] for table_name in table_names]
|
|
175
|
+
except:
|
|
176
|
+
column_names = table_names
|
|
177
|
+
|
|
178
|
+
# Make sure there is also a column (the first one) for the timestamp
|
|
179
|
+
|
|
180
|
+
column_names = ["timestamp"] + column_names
|
|
181
|
+
|
|
182
|
+
register_to_storage_manager(
|
|
183
|
+
origin=origin,
|
|
184
|
+
persistence_class=TYPES["CSV"],
|
|
185
|
+
prep={
|
|
186
|
+
"column_names": column_names,
|
|
187
|
+
"mode": "a",
|
|
188
|
+
},
|
|
189
|
+
)
|
|
190
|
+
else:
|
|
191
|
+
LOGGER.warning("The Storage Manager is not active")
|
|
192
|
+
raise
|
|
193
|
+
|
|
194
|
+
def unregister_from_storage_manager(self) -> None:
|
|
195
|
+
"""De-registers all storage mnemonics for the facility database from the Storage Manager."""
|
|
196
|
+
|
|
197
|
+
if is_storage_manager_active():
|
|
198
|
+
try:
|
|
199
|
+
with StorageProxy() as proxy:
|
|
200
|
+
for origin in self.origins:
|
|
201
|
+
response = proxy.unregister({"origin": origin})
|
|
202
|
+
if not response.successful:
|
|
203
|
+
LOGGER.warning(f"Couldn't unregister {origin} from the Storage Manager: {response}")
|
|
204
|
+
else:
|
|
205
|
+
LOGGER.info(response)
|
|
206
|
+
except ConnectionError as exc:
|
|
207
|
+
LOGGER.warning(f"Couldn't connect to the Storage Manager for de-registration: {exc}")
|
|
208
|
+
raise
|
|
209
|
+
else:
|
|
210
|
+
LOGGER.warning("The Storage Manager is not active")
|
|
211
|
+
raise
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def _check_commander_status(commander, poller: zmq.Poller) -> bool:
|
|
215
|
+
"""Checks the status of the commander.
|
|
216
|
+
|
|
217
|
+
Checks whether a command has been received by the given commander.
|
|
218
|
+
|
|
219
|
+
Args:
|
|
220
|
+
commander: Commanding socket for the FOV HK generation.
|
|
221
|
+
poller (zmq.Poller): Poller for the FOV HK generation.
|
|
222
|
+
|
|
223
|
+
Returns: True if a quit command was received; False otherwise.
|
|
224
|
+
"""
|
|
225
|
+
|
|
226
|
+
socks = dict(poller.poll(timeout=5000)) # Timeout of 5s
|
|
227
|
+
|
|
228
|
+
if commander in socks:
|
|
229
|
+
pickle_string = commander.recv()
|
|
230
|
+
command = pickle.loads(pickle_string)
|
|
231
|
+
|
|
232
|
+
if command.lower() == "quit":
|
|
233
|
+
commander.send(pickle.dumps("ACK"))
|
|
234
|
+
return True
|
|
235
|
+
|
|
236
|
+
if command.lower() == "status":
|
|
237
|
+
response = dict(status="ACK", host=CTRL_SETTINGS.HOSTNAME, command_port=CTRL_SETTINGS.COMMANDING_PORT)
|
|
238
|
+
commander.send(pickle.dumps(response))
|
|
239
|
+
|
|
240
|
+
return False
|
|
241
|
+
|
|
242
|
+
return False
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def send_request(command_request: str) -> Any:
|
|
246
|
+
"""Sends a request to the FacilityHousekeepingExporter process and wait for a response.
|
|
247
|
+
|
|
248
|
+
Args:
|
|
249
|
+
command_request (str): Request.
|
|
250
|
+
|
|
251
|
+
Returns: Response to the request.
|
|
252
|
+
"""
|
|
253
|
+
|
|
254
|
+
with RegistryClient() as registry:
|
|
255
|
+
service = registry.discover_service(CTRL_SETTINGS.SERVICE_TYPE.lower())
|
|
256
|
+
|
|
257
|
+
if service:
|
|
258
|
+
protocol = service.get("protocol", "tcp")
|
|
259
|
+
hostname = service["host"]
|
|
260
|
+
port = service["port"]
|
|
261
|
+
endpoint = connect_address(protocol, hostname, port)
|
|
262
|
+
|
|
263
|
+
ctx = zmq.Context().instance()
|
|
264
|
+
socket = ctx.socket(zmq.REQ)
|
|
265
|
+
socket.connect(endpoint)
|
|
266
|
+
|
|
267
|
+
socket.send(pickle.dumps(command_request))
|
|
268
|
+
rlist, _, _ = zmq.select([socket], [], [], timeout=TIMEOUT_RECV)
|
|
269
|
+
|
|
270
|
+
if socket in rlist:
|
|
271
|
+
response = socket.recv()
|
|
272
|
+
response = pickle.loads(response)
|
|
273
|
+
else:
|
|
274
|
+
response = {"error": "Receive from ZeroMQ socket timed out for FacilityHousekeepingExporter."}
|
|
275
|
+
socket.close(linger=0)
|
|
276
|
+
|
|
277
|
+
return response
|
|
278
|
+
|
|
279
|
+
else:
|
|
280
|
+
return None
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
app = typer.Typer()
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
@app.command()
|
|
287
|
+
def start() -> None:
|
|
288
|
+
"""Starts the FacilityHousekeepingExporter."""
|
|
289
|
+
|
|
290
|
+
try:
|
|
291
|
+
rich.print("Starting the FacilityHousekeepingExporter")
|
|
292
|
+
FacilityHousekeepingExporter().run()
|
|
293
|
+
except KeyboardInterrupt:
|
|
294
|
+
print("Shutdown requested... exiting")
|
|
295
|
+
except SystemExit as exc:
|
|
296
|
+
exit_code = exc.code if hasattr(exc, "code") else 0
|
|
297
|
+
print(f"System Exit with code {exit_code}")
|
|
298
|
+
sys.exit(exit_code)
|
|
299
|
+
except Exception as exc:
|
|
300
|
+
LOGGER.exception(f"Cannot start FacilityHousekeepingExporter: {exc}")
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
@app.command()
|
|
304
|
+
def stop() -> None:
|
|
305
|
+
"""Stops the FacilityHousekeepingExporter."""
|
|
306
|
+
|
|
307
|
+
response = send_request("quit")
|
|
308
|
+
|
|
309
|
+
if response == "ACK":
|
|
310
|
+
rich.print("FacilityHousekeepingExporter successfully terminated.")
|
|
311
|
+
else:
|
|
312
|
+
rich.print(f"[red] ERROR: {response}")
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
@app.command()
|
|
316
|
+
def status() -> None:
|
|
317
|
+
"""Prints the status of the FacilityHousekeepingExporter."""
|
|
318
|
+
|
|
319
|
+
rich.print("FacilityHousekeepingExporter:")
|
|
320
|
+
|
|
321
|
+
response = send_request("status")
|
|
322
|
+
|
|
323
|
+
if response and response.get("status") == "ACK":
|
|
324
|
+
rich.print(" Status: [green]active")
|
|
325
|
+
rich.print(f" Hostname: {response.get('host')}")
|
|
326
|
+
rich.print(f" Commanding port: {response.get('command_port')}")
|
|
327
|
+
else:
|
|
328
|
+
rich.print(" Status: [red]not active")
|
|
329
|
+
|
|
330
|
+
with RegistryClient() as registry:
|
|
331
|
+
registry.list_services()
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
if __name__ == "__main__":
|
|
335
|
+
import logging
|
|
336
|
+
|
|
337
|
+
from egse.logger import set_all_logger_levels
|
|
338
|
+
|
|
339
|
+
set_all_logger_levels(logging.DEBUG)
|
|
340
|
+
|
|
341
|
+
sys.exit(app())
|