PyAutomationIO 0.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- automation/__init__.py +46 -0
- automation/alarms/__init__.py +563 -0
- automation/alarms/states.py +192 -0
- automation/alarms/trigger.py +64 -0
- automation/buffer.py +132 -0
- automation/core.py +1775 -0
- automation/dbmodels/__init__.py +23 -0
- automation/dbmodels/alarms.py +524 -0
- automation/dbmodels/core.py +86 -0
- automation/dbmodels/events.py +153 -0
- automation/dbmodels/logs.py +155 -0
- automation/dbmodels/machines.py +181 -0
- automation/dbmodels/opcua.py +81 -0
- automation/dbmodels/opcua_server.py +174 -0
- automation/dbmodels/tags.py +921 -0
- automation/dbmodels/users.py +259 -0
- automation/extensions/__init__.py +15 -0
- automation/extensions/api.py +149 -0
- automation/extensions/cors.py +18 -0
- automation/filter/__init__.py +19 -0
- automation/iad/__init__.py +3 -0
- automation/iad/frozen_data.py +54 -0
- automation/iad/out_of_range.py +51 -0
- automation/iad/outliers.py +51 -0
- automation/logger/__init__.py +0 -0
- automation/logger/alarms.py +426 -0
- automation/logger/core.py +265 -0
- automation/logger/datalogger.py +646 -0
- automation/logger/events.py +194 -0
- automation/logger/logdict.py +53 -0
- automation/logger/logs.py +203 -0
- automation/logger/machines.py +248 -0
- automation/logger/opcua_server.py +130 -0
- automation/logger/users.py +96 -0
- automation/managers/__init__.py +4 -0
- automation/managers/alarms.py +455 -0
- automation/managers/db.py +328 -0
- automation/managers/opcua_client.py +186 -0
- automation/managers/state_machine.py +183 -0
- automation/models.py +174 -0
- automation/modules/__init__.py +14 -0
- automation/modules/alarms/__init__.py +0 -0
- automation/modules/alarms/resources/__init__.py +10 -0
- automation/modules/alarms/resources/alarms.py +280 -0
- automation/modules/alarms/resources/summary.py +79 -0
- automation/modules/events/__init__.py +0 -0
- automation/modules/events/resources/__init__.py +10 -0
- automation/modules/events/resources/events.py +83 -0
- automation/modules/events/resources/logs.py +109 -0
- automation/modules/tags/__init__.py +0 -0
- automation/modules/tags/resources/__init__.py +8 -0
- automation/modules/tags/resources/tags.py +201 -0
- automation/modules/users/__init__.py +2 -0
- automation/modules/users/resources/__init__.py +10 -0
- automation/modules/users/resources/models/__init__.py +2 -0
- automation/modules/users/resources/models/roles.py +5 -0
- automation/modules/users/resources/models/users.py +14 -0
- automation/modules/users/resources/roles.py +38 -0
- automation/modules/users/resources/users.py +113 -0
- automation/modules/users/roles.py +121 -0
- automation/modules/users/users.py +335 -0
- automation/opcua/__init__.py +1 -0
- automation/opcua/models.py +541 -0
- automation/opcua/subscription.py +259 -0
- automation/pages/__init__.py +0 -0
- automation/pages/alarms.py +34 -0
- automation/pages/alarms_history.py +21 -0
- automation/pages/assets/styles.css +7 -0
- automation/pages/callbacks/__init__.py +28 -0
- automation/pages/callbacks/alarms.py +218 -0
- automation/pages/callbacks/alarms_summary.py +20 -0
- automation/pages/callbacks/db.py +222 -0
- automation/pages/callbacks/filter.py +238 -0
- automation/pages/callbacks/machines.py +29 -0
- automation/pages/callbacks/machines_detailed.py +581 -0
- automation/pages/callbacks/opcua.py +266 -0
- automation/pages/callbacks/opcua_server.py +244 -0
- automation/pages/callbacks/tags.py +495 -0
- automation/pages/callbacks/trends.py +119 -0
- automation/pages/communications.py +129 -0
- automation/pages/components/__init__.py +123 -0
- automation/pages/components/alarms.py +151 -0
- automation/pages/components/alarms_summary.py +45 -0
- automation/pages/components/database.py +128 -0
- automation/pages/components/gaussian_filter.py +69 -0
- automation/pages/components/machines.py +396 -0
- automation/pages/components/opcua.py +384 -0
- automation/pages/components/opcua_server.py +53 -0
- automation/pages/components/tags.py +253 -0
- automation/pages/components/trends.py +66 -0
- automation/pages/database.py +26 -0
- automation/pages/filter.py +55 -0
- automation/pages/machines.py +20 -0
- automation/pages/machines_detailed.py +41 -0
- automation/pages/main.py +63 -0
- automation/pages/opcua_server.py +28 -0
- automation/pages/tags.py +40 -0
- automation/pages/trends.py +35 -0
- automation/singleton.py +30 -0
- automation/state_machine.py +1672 -0
- automation/tags/__init__.py +2 -0
- automation/tags/cvt.py +1198 -0
- automation/tags/filter.py +55 -0
- automation/tags/tag.py +418 -0
- automation/tests/__init__.py +10 -0
- automation/tests/test_alarms.py +110 -0
- automation/tests/test_core.py +257 -0
- automation/tests/test_unit.py +21 -0
- automation/tests/test_user.py +155 -0
- automation/utils/__init__.py +164 -0
- automation/utils/decorators.py +222 -0
- automation/utils/npw.py +294 -0
- automation/utils/observer.py +21 -0
- automation/utils/units.py +118 -0
- automation/variables/__init__.py +55 -0
- automation/variables/adimentional.py +30 -0
- automation/variables/current.py +71 -0
- automation/variables/density.py +115 -0
- automation/variables/eng_time.py +68 -0
- automation/variables/force.py +90 -0
- automation/variables/length.py +104 -0
- automation/variables/mass.py +80 -0
- automation/variables/mass_flow.py +101 -0
- automation/variables/percentage.py +30 -0
- automation/variables/power.py +113 -0
- automation/variables/pressure.py +93 -0
- automation/variables/temperature.py +168 -0
- automation/variables/volume.py +70 -0
- automation/variables/volumetric_flow.py +100 -0
- automation/workers/__init__.py +2 -0
- automation/workers/logger.py +164 -0
- automation/workers/state_machine.py +207 -0
- automation/workers/worker.py +36 -0
- pyautomationio-0.0.0.dist-info/METADATA +198 -0
- pyautomationio-0.0.0.dist-info/RECORD +138 -0
- pyautomationio-0.0.0.dist-info/WHEEL +5 -0
- pyautomationio-0.0.0.dist-info/licenses/LICENSE +21 -0
- pyautomationio-0.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1672 @@
|
|
|
1
|
+
import logging, secrets, pytz
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
from opcua import Server, ua, Node
|
|
4
|
+
from hashlib import blake2b
|
|
5
|
+
from statemachine import State, StateMachine
|
|
6
|
+
from .workers.state_machine import StateMachineWorker
|
|
7
|
+
from .managers.state_machine import StateMachineManager
|
|
8
|
+
from .managers.opcua_client import OPCUAClientManager
|
|
9
|
+
from .managers.alarms import AlarmManager
|
|
10
|
+
from .managers.db import DBManager
|
|
11
|
+
from .singleton import Singleton
|
|
12
|
+
from .buffer import Buffer
|
|
13
|
+
from .models import StringType, IntegerType, FloatType, BooleanType, ProcessType
|
|
14
|
+
from .tags.cvt import CVTEngine, Tag
|
|
15
|
+
from .tags.tag import MachineObserver
|
|
16
|
+
from .opcua.subscription import DAS
|
|
17
|
+
from .modules.users.users import User
|
|
18
|
+
from .utils.decorators import set_event, validate_types, logging_error_handler
|
|
19
|
+
from .variables import VARIABLES
|
|
20
|
+
from .variables import (
|
|
21
|
+
Temperature,
|
|
22
|
+
Length,
|
|
23
|
+
Current,
|
|
24
|
+
Time,
|
|
25
|
+
Pressure,
|
|
26
|
+
Mass,
|
|
27
|
+
Force,
|
|
28
|
+
Power,
|
|
29
|
+
VolumetricFlow,
|
|
30
|
+
Volume,
|
|
31
|
+
MassFlow,
|
|
32
|
+
Density,
|
|
33
|
+
Percentage,
|
|
34
|
+
Adimentional)
|
|
35
|
+
from .logger.machines import MachinesLoggerEngine
|
|
36
|
+
from .logger.datalogger import DataLoggerEngine
|
|
37
|
+
from .logger.alarms import AlarmsLoggerEngine
|
|
38
|
+
from flask_socketio import SocketIO
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class Machine(Singleton):
|
|
43
|
+
r"""Documentation here
|
|
44
|
+
"""
|
|
45
|
+
def __init__(self):
|
|
46
|
+
|
|
47
|
+
self.machine_manager = StateMachineManager()
|
|
48
|
+
self.machines_engine = MachinesLoggerEngine()
|
|
49
|
+
self.logger_engine = DataLoggerEngine()
|
|
50
|
+
self.db_manager = DBManager()
|
|
51
|
+
self.alarm_manager = AlarmManager()
|
|
52
|
+
self.alarms_engine = AlarmsLoggerEngine()
|
|
53
|
+
self.state_worker = None
|
|
54
|
+
|
|
55
|
+
def append_machine(self, machine:StateMachine, interval:FloatType=FloatType(1), mode:str='async'):
|
|
56
|
+
r"""
|
|
57
|
+
Append a state machine to the state machine manager.
|
|
58
|
+
|
|
59
|
+
**Parameters:**
|
|
60
|
+
|
|
61
|
+
* **machine** (`PyHadesStateMachine`): a state machine object.
|
|
62
|
+
* **interval** (int): Interval execution time in seconds.
|
|
63
|
+
"""
|
|
64
|
+
if isinstance(machine, DAQ):
|
|
65
|
+
|
|
66
|
+
machine.name = StringType(f"DAQ-{int(interval.value * 1000)}")
|
|
67
|
+
|
|
68
|
+
machine.set_interval(interval)
|
|
69
|
+
self.machine_manager.append_machine((machine, interval, mode))
|
|
70
|
+
on_delay = None
|
|
71
|
+
if hasattr(machine, "on_delay"):
|
|
72
|
+
on_delay = machine.on_delay.value
|
|
73
|
+
threshold = None
|
|
74
|
+
if hasattr(machine, "threshold"):
|
|
75
|
+
threshold = machine.threshold.value
|
|
76
|
+
|
|
77
|
+
if self.machines_engine.get_db():
|
|
78
|
+
self.machines_engine.create(
|
|
79
|
+
identifier=machine.identifier.value,
|
|
80
|
+
name=machine.name.value,
|
|
81
|
+
interval=interval.value,
|
|
82
|
+
description=machine.description.value,
|
|
83
|
+
classification=machine.classification.value,
|
|
84
|
+
buffer_size=machine.buffer_size.value,
|
|
85
|
+
buffer_roll_type=machine.buffer_roll_type.value,
|
|
86
|
+
criticity=machine.criticity.value,
|
|
87
|
+
priority=machine.priority.value,
|
|
88
|
+
on_delay=on_delay,
|
|
89
|
+
threshold=threshold
|
|
90
|
+
)
|
|
91
|
+
self.create_tag_internal_process_type(machine=machine)
|
|
92
|
+
|
|
93
|
+
def drop(self, machine:StateMachine):
|
|
94
|
+
r"""
|
|
95
|
+
Documentation here
|
|
96
|
+
"""
|
|
97
|
+
self.state_worker._async_scheduler.drop(machine=machine)
|
|
98
|
+
|
|
99
|
+
def get_machine(self, name:str):
|
|
100
|
+
r"""
|
|
101
|
+
Returns a PyHades State Machine defined by its name.
|
|
102
|
+
|
|
103
|
+
**Parameters:**
|
|
104
|
+
|
|
105
|
+
* **name** (str): a pyhades state machine name.
|
|
106
|
+
|
|
107
|
+
Usage
|
|
108
|
+
|
|
109
|
+
```python
|
|
110
|
+
>>> state_machine = app.get_machine('state_machine_name')
|
|
111
|
+
```
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
return self.machine_manager.get_machine(name)
|
|
115
|
+
|
|
116
|
+
def get_machines(self)->list:
|
|
117
|
+
r"""
|
|
118
|
+
Returns all defined PyHades state machines.
|
|
119
|
+
|
|
120
|
+
**Returns** (list)
|
|
121
|
+
|
|
122
|
+
Usage
|
|
123
|
+
|
|
124
|
+
```python
|
|
125
|
+
>>> state_machines = app.get_machines()
|
|
126
|
+
```
|
|
127
|
+
"""
|
|
128
|
+
|
|
129
|
+
return self.machine_manager.get_machines()
|
|
130
|
+
|
|
131
|
+
def get_state_machine_manager(self)->StateMachineManager:
|
|
132
|
+
r"""
|
|
133
|
+
Gets state machine Manager
|
|
134
|
+
|
|
135
|
+
**Returns:** StateMachineManager instance
|
|
136
|
+
|
|
137
|
+
```python
|
|
138
|
+
>>> state_manager = app.get_state_machine_manager()
|
|
139
|
+
```
|
|
140
|
+
"""
|
|
141
|
+
return self.machine_manager
|
|
142
|
+
|
|
143
|
+
def start(self, machines:tuple=None):
|
|
144
|
+
r"""
|
|
145
|
+
Starts statemachine worker
|
|
146
|
+
"""
|
|
147
|
+
# StateMachine Worker
|
|
148
|
+
config = None
|
|
149
|
+
if self.machines_engine.get_db():
|
|
150
|
+
config = self.load_db_machines_config()
|
|
151
|
+
|
|
152
|
+
if config:
|
|
153
|
+
|
|
154
|
+
if machines:
|
|
155
|
+
|
|
156
|
+
for machine in machines:
|
|
157
|
+
|
|
158
|
+
if machine.name.value in config:
|
|
159
|
+
|
|
160
|
+
machine.description.value = config[machine.name.value]["description"]
|
|
161
|
+
machine.classification.value = config[machine.name.value]["classification"]
|
|
162
|
+
machine.buffer_size.value = config[machine.name.value]["buffer_size"]
|
|
163
|
+
machine.buffer_roll_type.value = config[machine.name.value]["buffer_roll_type"]
|
|
164
|
+
machine.criticity.value = config[machine.name.value]["criticity"]
|
|
165
|
+
machine.priority.value = config[machine.name.value]["priority"]
|
|
166
|
+
machine.identifier.value = config[machine.name.value]['identifier']
|
|
167
|
+
if config[machine.name.value]['on_delay']:
|
|
168
|
+
machine.on_delay.value = config[machine.name.value]['on_delay']
|
|
169
|
+
if config[machine.name.value]['threshold']:
|
|
170
|
+
threshold_value = config[machine.name.value]['threshold']
|
|
171
|
+
threshold_unit = machine.threshold.unit
|
|
172
|
+
class_name = machine.threshold.value.__class__.__name__
|
|
173
|
+
machine.threshold.value = eval(f"{class_name}({threshold_value}, unit='{threshold_unit}')")
|
|
174
|
+
if "leak detection" in machine.classification.value.lower():
|
|
175
|
+
|
|
176
|
+
if machine.name.value.lower() == "npw":
|
|
177
|
+
|
|
178
|
+
machine.wavelet.threshold_iqr = threshold_value
|
|
179
|
+
|
|
180
|
+
self.append_machine(machine=machine, interval=FloatType(config[machine.name.value]["interval"]))
|
|
181
|
+
|
|
182
|
+
else:
|
|
183
|
+
|
|
184
|
+
self.append_machine(machine=machine, interval=FloatType(machine.get_interval()))
|
|
185
|
+
|
|
186
|
+
else:
|
|
187
|
+
|
|
188
|
+
if machines:
|
|
189
|
+
|
|
190
|
+
for machine in machines:
|
|
191
|
+
|
|
192
|
+
self.append_machine(machine=machine, interval=FloatType(machine.get_interval()))
|
|
193
|
+
|
|
194
|
+
state_manager = self.get_state_machine_manager()
|
|
195
|
+
|
|
196
|
+
if state_manager.exist_machines():
|
|
197
|
+
|
|
198
|
+
self.state_worker = StateMachineWorker(state_manager)
|
|
199
|
+
self.state_worker.daemon = True
|
|
200
|
+
self.state_worker.start()
|
|
201
|
+
|
|
202
|
+
def load_db_machines_config(self):
|
|
203
|
+
|
|
204
|
+
return self.machines_engine.read_config()
|
|
205
|
+
|
|
206
|
+
def join(self, machine):
|
|
207
|
+
|
|
208
|
+
self.state_worker._async_scheduler.join(machine)
|
|
209
|
+
|
|
210
|
+
def create_tag_internal_process_type(self, machine:StateMachine):
|
|
211
|
+
r"""
|
|
212
|
+
Documentation here
|
|
213
|
+
"""
|
|
214
|
+
from . import SEGMENT, MANUFACTURER
|
|
215
|
+
cvt = CVTEngine()
|
|
216
|
+
internal_variables = machine.get_internal_process_type_variables()
|
|
217
|
+
for _tag_name, value in internal_variables.items():
|
|
218
|
+
|
|
219
|
+
for variable, units in VARIABLES.items():
|
|
220
|
+
|
|
221
|
+
if value.unit in units.values() or value.unit in units.keys():
|
|
222
|
+
|
|
223
|
+
tag_name = f"{machine.name.value}.{_tag_name}"
|
|
224
|
+
cvt.set_tag(
|
|
225
|
+
name=tag_name,
|
|
226
|
+
unit=value.unit,
|
|
227
|
+
data_type="float",
|
|
228
|
+
variable=variable,
|
|
229
|
+
description=f"process type variable",
|
|
230
|
+
segment=SEGMENT,
|
|
231
|
+
manufacturer=MANUFACTURER
|
|
232
|
+
)
|
|
233
|
+
# Persist Tag on Database
|
|
234
|
+
tag = cvt.get_tag_by_name(name=tag_name)
|
|
235
|
+
attr = getattr(machine, _tag_name)
|
|
236
|
+
attr.tag = tag
|
|
237
|
+
self.logger_engine.set_tag(tag=tag)
|
|
238
|
+
self.db_manager.attach(tag_name=tag_name)
|
|
239
|
+
break
|
|
240
|
+
|
|
241
|
+
# else:
|
|
242
|
+
|
|
243
|
+
# if value.unit is None:
|
|
244
|
+
|
|
245
|
+
# unit = "adim"
|
|
246
|
+
# variable = "Adimentional"
|
|
247
|
+
# tag_name = f"{machine.name.value}.{_tag_name}"
|
|
248
|
+
# if isinstance(value, StringType):
|
|
249
|
+
|
|
250
|
+
# data_type = "string"
|
|
251
|
+
|
|
252
|
+
# elif isinstance(value, IntegerType):
|
|
253
|
+
|
|
254
|
+
# data_type = "int"
|
|
255
|
+
|
|
256
|
+
# elif isinstance(value, FloatType):
|
|
257
|
+
|
|
258
|
+
# data_type = "float"
|
|
259
|
+
|
|
260
|
+
# else:
|
|
261
|
+
|
|
262
|
+
# data_type = "bool"
|
|
263
|
+
|
|
264
|
+
# cvt.set_tag(
|
|
265
|
+
# name=tag_name,
|
|
266
|
+
# unit=unit,
|
|
267
|
+
# data_type=data_type,
|
|
268
|
+
# variable=variable,
|
|
269
|
+
# description=f"process type variable",
|
|
270
|
+
# segment=SEGMENT,
|
|
271
|
+
# manufacturer=MANUFACTURER
|
|
272
|
+
# )
|
|
273
|
+
# # Persist Tag on Database
|
|
274
|
+
# tag = cvt.get_tag_by_name(name=tag_name)
|
|
275
|
+
# attr = getattr(machine, _tag_name)
|
|
276
|
+
# attr.tag = tag
|
|
277
|
+
# self.logger_engine.set_tag(tag=tag)
|
|
278
|
+
# self.db_manager.attach(tag_name=tag_name)
|
|
279
|
+
|
|
280
|
+
# break
|
|
281
|
+
|
|
282
|
+
internal_variables = machine.get_read_only_process_type_variables()
|
|
283
|
+
for _tag_name, value in internal_variables.items():
|
|
284
|
+
for variable, units in VARIABLES.items():
|
|
285
|
+
|
|
286
|
+
if value.unit in units.values() or value.unit in units.keys():
|
|
287
|
+
|
|
288
|
+
if hasattr(machine, "internal_tags_relationships"):
|
|
289
|
+
tag_name = f"{machine.internal_tags_relationships[_tag_name]['tag']}"
|
|
290
|
+
if SEGMENT:
|
|
291
|
+
tag_name = f"{SEGMENT}.{tag_name}"
|
|
292
|
+
if MANUFACTURER:
|
|
293
|
+
tag_name = f"{MANUFACTURER}.{tag_name}"
|
|
294
|
+
description = machine.internal_tags_relationships[_tag_name]['description']
|
|
295
|
+
|
|
296
|
+
attr = getattr(machine, _tag_name)
|
|
297
|
+
unit = attr.unit
|
|
298
|
+
tag, _ = cvt.set_tag(
|
|
299
|
+
name=tag_name,
|
|
300
|
+
unit=unit,
|
|
301
|
+
data_type="float",
|
|
302
|
+
variable=variable,
|
|
303
|
+
description=description,
|
|
304
|
+
segment=SEGMENT,
|
|
305
|
+
manufacturer=MANUFACTURER,
|
|
306
|
+
out_of_range_detection=True,
|
|
307
|
+
frozen_data_detection=True,
|
|
308
|
+
outlier_detection=True
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
if tag:
|
|
312
|
+
# Persist Tag on Database
|
|
313
|
+
tag = cvt.get_tag_by_name(name=tag_name)
|
|
314
|
+
attr = getattr(machine, _tag_name)
|
|
315
|
+
attr.tag = tag
|
|
316
|
+
self.logger_engine.set_tag(tag=tag)
|
|
317
|
+
self.db_manager.attach(tag_name=tag_name)
|
|
318
|
+
break
|
|
319
|
+
|
|
320
|
+
self.__define_iad_alarms()
|
|
321
|
+
|
|
322
|
+
def create_alarm(
|
|
323
|
+
self,
|
|
324
|
+
name:str,
|
|
325
|
+
tag:str,
|
|
326
|
+
alarm_type:str="BOOL",
|
|
327
|
+
trigger_value:bool|float|int=True,
|
|
328
|
+
description:str="",
|
|
329
|
+
identifier:str=None,
|
|
330
|
+
state:str="Normal",
|
|
331
|
+
timestamp:str=None,
|
|
332
|
+
ack_timestamp:str=None,
|
|
333
|
+
user:User=None,
|
|
334
|
+
reload:bool=False
|
|
335
|
+
):
|
|
336
|
+
r"""
|
|
337
|
+
Append alarm to the Alarm Manager
|
|
338
|
+
|
|
339
|
+
**Paramters**
|
|
340
|
+
|
|
341
|
+
* **alarm**: (Alarm Object)
|
|
342
|
+
|
|
343
|
+
**Returns**
|
|
344
|
+
|
|
345
|
+
* **None**
|
|
346
|
+
"""
|
|
347
|
+
alarm, message = self.alarm_manager.append_alarm(
|
|
348
|
+
name=name,
|
|
349
|
+
tag=tag,
|
|
350
|
+
type=alarm_type,
|
|
351
|
+
trigger_value=trigger_value,
|
|
352
|
+
description=description,
|
|
353
|
+
identifier=identifier,
|
|
354
|
+
state=state,
|
|
355
|
+
timestamp=timestamp,
|
|
356
|
+
ack_timestamp=ack_timestamp,
|
|
357
|
+
user=user,
|
|
358
|
+
reload=reload,
|
|
359
|
+
)
|
|
360
|
+
|
|
361
|
+
if alarm:
|
|
362
|
+
|
|
363
|
+
# Persist Tag on Database
|
|
364
|
+
if not reload:
|
|
365
|
+
if self.db_manager.get_db():
|
|
366
|
+
|
|
367
|
+
alarm = self.alarm_manager.get_alarm_by_name(name=name)
|
|
368
|
+
|
|
369
|
+
self.alarms_engine.create(
|
|
370
|
+
id=alarm.identifier,
|
|
371
|
+
name=name,
|
|
372
|
+
tag=tag,
|
|
373
|
+
trigger_type=alarm_type,
|
|
374
|
+
trigger_value=trigger_value,
|
|
375
|
+
description=description
|
|
376
|
+
)
|
|
377
|
+
|
|
378
|
+
return alarm, message
|
|
379
|
+
|
|
380
|
+
return None, message
|
|
381
|
+
|
|
382
|
+
def __define_iad_alarms(self):
|
|
383
|
+
r"""
|
|
384
|
+
Documentation here
|
|
385
|
+
"""
|
|
386
|
+
cvt = CVTEngine()
|
|
387
|
+
tags = cvt.get_tags()
|
|
388
|
+
for tag in tags:
|
|
389
|
+
|
|
390
|
+
if tag['frozen_data_detection'] or tag['out_of_range_detection'] or tag['outlier_detection']:
|
|
391
|
+
|
|
392
|
+
alarm_name = f"alarm.{tag['name']}.iad"
|
|
393
|
+
self.create_alarm(name=alarm_name, tag=tag['name'])
|
|
394
|
+
|
|
395
|
+
@logging_error_handler
|
|
396
|
+
def stop(self):
|
|
397
|
+
r"""
|
|
398
|
+
Safe stop workers execution
|
|
399
|
+
"""
|
|
400
|
+
if self.state_worker:
|
|
401
|
+
|
|
402
|
+
self.state_worker.stop()
|
|
403
|
+
|
|
404
|
+
|
|
405
|
+
class StateMachineCore(StateMachine):
|
|
406
|
+
r"""
|
|
407
|
+
Documentation here
|
|
408
|
+
"""
|
|
409
|
+
|
|
410
|
+
starting = State('start', initial=True)
|
|
411
|
+
waiting = State('wait')
|
|
412
|
+
running = State('run')
|
|
413
|
+
restarting = State("restart")
|
|
414
|
+
resetting = State('reset')
|
|
415
|
+
|
|
416
|
+
# Transitions
|
|
417
|
+
start_to_wait = starting.to(waiting)
|
|
418
|
+
wait_to_run = waiting.to(running)
|
|
419
|
+
run_to_reset = running.to(resetting)
|
|
420
|
+
reset_to_start = resetting.to(starting)
|
|
421
|
+
run_to_restart = running.to(restarting)
|
|
422
|
+
restart_to_wait = restarting.to(waiting)
|
|
423
|
+
wait_to_reset = waiting.to(resetting)
|
|
424
|
+
wait_to_restart = waiting.to(restarting)
|
|
425
|
+
|
|
426
|
+
def __init__(
|
|
427
|
+
self,
|
|
428
|
+
name:str,
|
|
429
|
+
description:str="",
|
|
430
|
+
classification:str="",
|
|
431
|
+
interval:float=1.0,
|
|
432
|
+
identifier:str=None,
|
|
433
|
+
buffer_size:int=10
|
|
434
|
+
):
|
|
435
|
+
from . import SEGMENT, MANUFACTURER
|
|
436
|
+
_identifier = secrets.token_hex(4)
|
|
437
|
+
|
|
438
|
+
if identifier:
|
|
439
|
+
|
|
440
|
+
_identifier = identifier
|
|
441
|
+
|
|
442
|
+
self.identifier = StringType(default=_identifier)
|
|
443
|
+
self.criticity = IntegerType(default=2)
|
|
444
|
+
self.priority = IntegerType(default=1)
|
|
445
|
+
self.description = StringType(default=description)
|
|
446
|
+
self.classification = StringType(default=classification)
|
|
447
|
+
self.name = StringType(default=name)
|
|
448
|
+
self.machine_interval = FloatType(default=interval)
|
|
449
|
+
self.buffer_size = IntegerType(default=buffer_size)
|
|
450
|
+
self.buffer_roll_type = StringType(default='backward')
|
|
451
|
+
self.sio:SocketIO|None = None
|
|
452
|
+
self.restart_buffer()
|
|
453
|
+
self.machine_engine = MachinesLoggerEngine()
|
|
454
|
+
transitions = []
|
|
455
|
+
for state in self.states:
|
|
456
|
+
transitions.extend(state.transitions)
|
|
457
|
+
self.transitions = transitions
|
|
458
|
+
self.manufacturer = MANUFACTURER
|
|
459
|
+
self.segment = SEGMENT
|
|
460
|
+
super(StateMachineCore, self).__init__()
|
|
461
|
+
|
|
462
|
+
# State Methods
|
|
463
|
+
def while_starting(self):
|
|
464
|
+
r"""
|
|
465
|
+
This method is executed every machine loop when it is on Start state
|
|
466
|
+
|
|
467
|
+
Configure your state machine here
|
|
468
|
+
"""
|
|
469
|
+
# DEFINE DATA BUFFER
|
|
470
|
+
self.set_buffer_size(size=self.buffer_size.value)
|
|
471
|
+
# TRANSITION
|
|
472
|
+
self.send('start_to_wait')
|
|
473
|
+
|
|
474
|
+
def while_waiting(self):
|
|
475
|
+
r"""
|
|
476
|
+
This method is executed every machine loop when it is on Wait state
|
|
477
|
+
|
|
478
|
+
It was designed to check your buffer data in self.data, if your buffer is full, so they pass to run state
|
|
479
|
+
"""
|
|
480
|
+
ready_to_run = True
|
|
481
|
+
|
|
482
|
+
if self.data:
|
|
483
|
+
|
|
484
|
+
for _, value in self.data.items():
|
|
485
|
+
|
|
486
|
+
if len(value) < value.size:
|
|
487
|
+
ready_to_run=False
|
|
488
|
+
break
|
|
489
|
+
|
|
490
|
+
if ready_to_run:
|
|
491
|
+
|
|
492
|
+
self.send('wait_to_run')
|
|
493
|
+
|
|
494
|
+
def while_running(self):
|
|
495
|
+
r"""
|
|
496
|
+
This method is executed every machine loop when it is on Run state
|
|
497
|
+
|
|
498
|
+
Depending on you state machine goal, write your script here
|
|
499
|
+
"""
|
|
500
|
+
self.criticity.value = 1
|
|
501
|
+
|
|
502
|
+
def while_resetting(self):
|
|
503
|
+
r"""
|
|
504
|
+
This method is executed every machine loop when it is on Reset state
|
|
505
|
+
"""
|
|
506
|
+
self.send("reset_to_start")
|
|
507
|
+
|
|
508
|
+
def while_restarting(self):
|
|
509
|
+
r"""
|
|
510
|
+
This method is executed every machine loop when it is on Restart state
|
|
511
|
+
"""
|
|
512
|
+
self.restart_buffer()
|
|
513
|
+
self.send("restart_to_wait")
|
|
514
|
+
|
|
515
|
+
# Auxiliaries Methods
|
|
516
|
+
def set_socketio(self, sio:SocketIO):
|
|
517
|
+
|
|
518
|
+
self.sio:SocketIO = sio
|
|
519
|
+
|
|
520
|
+
def put_attr(self, attr_name:str, value:StringType|FloatType|IntegerType|BooleanType|ProcessType, user:User=None):
|
|
521
|
+
|
|
522
|
+
attr = getattr(self, attr_name)
|
|
523
|
+
attr.set_value(value=value, user=user, name=attr_name)
|
|
524
|
+
kwargs = {
|
|
525
|
+
f"{attr_name}": value
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
# Update on DB
|
|
529
|
+
self.machine_engine.put(name=self.name, **kwargs)
|
|
530
|
+
|
|
531
|
+
def add_process_variable(self, name:str, tag:Tag, read_only:bool=False):
|
|
532
|
+
r"""
|
|
533
|
+
Documentation here
|
|
534
|
+
"""
|
|
535
|
+
|
|
536
|
+
props = self.__dict__
|
|
537
|
+
if name not in props.items():
|
|
538
|
+
process_variable = ProcessType(tag=Tag, default=tag.value, read_only=read_only)
|
|
539
|
+
setattr(self, name, process_variable)
|
|
540
|
+
self.machine_engine.bind_tag(tag=tag, machine=self)
|
|
541
|
+
|
|
542
|
+
def get_process_variables(self):
|
|
543
|
+
r"""
|
|
544
|
+
Documentation here
|
|
545
|
+
"""
|
|
546
|
+
|
|
547
|
+
result = dict()
|
|
548
|
+
props = self.__dict__
|
|
549
|
+
|
|
550
|
+
for key, value in props.items():
|
|
551
|
+
|
|
552
|
+
if isinstance(value, ProcessType):
|
|
553
|
+
|
|
554
|
+
result[key] = value.serialize()
|
|
555
|
+
|
|
556
|
+
return result
|
|
557
|
+
|
|
558
|
+
def get_process_variable(self, name:str):
|
|
559
|
+
r"""
|
|
560
|
+
Documentation here
|
|
561
|
+
"""
|
|
562
|
+
props = self.__dict__
|
|
563
|
+
if name in props.items():
|
|
564
|
+
|
|
565
|
+
value = props[name]
|
|
566
|
+
if isinstance(value, ProcessType):
|
|
567
|
+
|
|
568
|
+
return value.serialize()
|
|
569
|
+
|
|
570
|
+
@validate_types(size=int, output=None)
|
|
571
|
+
def set_buffer_size(self, size:int, user:User=None)->None:
|
|
572
|
+
r"""
|
|
573
|
+
Set data buffer size
|
|
574
|
+
|
|
575
|
+
# Parameters
|
|
576
|
+
|
|
577
|
+
- *size:* [int] buffer size
|
|
578
|
+
"""
|
|
579
|
+
self.buffer_size.value = size
|
|
580
|
+
self.restart_buffer()
|
|
581
|
+
|
|
582
|
+
def restart_buffer(self):
|
|
583
|
+
r"""
|
|
584
|
+
Restart Buffer
|
|
585
|
+
"""
|
|
586
|
+
self.data = {tag_name: Buffer(size=self.buffer_size.value, roll=self.buffer_roll_type.value) for tag_name, _ in self.get_subscribed_tags().items()}
|
|
587
|
+
|
|
588
|
+
@validate_types(output=dict)
|
|
589
|
+
def get_subscribed_tags(self)->dict:
|
|
590
|
+
r"""Documentation here
|
|
591
|
+
|
|
592
|
+
# Parameters
|
|
593
|
+
|
|
594
|
+
-
|
|
595
|
+
|
|
596
|
+
# Returns
|
|
597
|
+
|
|
598
|
+
-
|
|
599
|
+
"""
|
|
600
|
+
result = dict()
|
|
601
|
+
props = self.__dict__
|
|
602
|
+
|
|
603
|
+
for name, value in props.items():
|
|
604
|
+
|
|
605
|
+
if isinstance(value, ProcessType):
|
|
606
|
+
|
|
607
|
+
if value.read_only and value.tag:
|
|
608
|
+
|
|
609
|
+
result[value.tag.name] = value
|
|
610
|
+
|
|
611
|
+
return result
|
|
612
|
+
|
|
613
|
+
@validate_types(output=dict)
|
|
614
|
+
def get_not_subscribed_tags(self)->dict:
|
|
615
|
+
r"""Documentation here
|
|
616
|
+
|
|
617
|
+
# Parameters
|
|
618
|
+
|
|
619
|
+
-
|
|
620
|
+
|
|
621
|
+
# Returns
|
|
622
|
+
|
|
623
|
+
-
|
|
624
|
+
"""
|
|
625
|
+
result = dict()
|
|
626
|
+
props = self.__dict__
|
|
627
|
+
|
|
628
|
+
for name, value in props.items():
|
|
629
|
+
|
|
630
|
+
if isinstance(value, ProcessType):
|
|
631
|
+
|
|
632
|
+
if value.read_only and not value.tag:
|
|
633
|
+
|
|
634
|
+
result[name] = value
|
|
635
|
+
|
|
636
|
+
return result
|
|
637
|
+
|
|
638
|
+
def subscribe_to(self, tag:Tag, default_tag_name:str=None):
|
|
639
|
+
r"""
|
|
640
|
+
|
|
641
|
+
# Parameters
|
|
642
|
+
|
|
643
|
+
- *tags:* [list]
|
|
644
|
+
"""
|
|
645
|
+
if default_tag_name and tag: # Designed to default tags into State Machine
|
|
646
|
+
|
|
647
|
+
if self.process_type_exists(name=default_tag_name):
|
|
648
|
+
|
|
649
|
+
if default_tag_name in self.get_not_subscribed_tags():
|
|
650
|
+
|
|
651
|
+
process_type = getattr(self, default_tag_name)
|
|
652
|
+
|
|
653
|
+
if not process_type.tag:
|
|
654
|
+
|
|
655
|
+
process_type.tag = tag
|
|
656
|
+
self.attach(machine=self, tag=tag)
|
|
657
|
+
self.restart_buffer()
|
|
658
|
+
self.machine_engine.bind_tag(tag=tag, machine=self, default_tag_name=default_tag_name)
|
|
659
|
+
return True, f"successful subscription"
|
|
660
|
+
|
|
661
|
+
return False, f"{default_tag_name} already has a subscription"
|
|
662
|
+
|
|
663
|
+
return False, f"{default_tag_name} already has a subscription"
|
|
664
|
+
|
|
665
|
+
return False, f"{default_tag_name} is not a Process Type Variable"
|
|
666
|
+
|
|
667
|
+
elif tag and not default_tag_name:
|
|
668
|
+
|
|
669
|
+
tag_name = tag.get_name()
|
|
670
|
+
|
|
671
|
+
if tag_name not in self.get_subscribed_tags():
|
|
672
|
+
|
|
673
|
+
if not self.process_type_exists(name=tag_name):
|
|
674
|
+
|
|
675
|
+
setattr(self, tag_name, ProcessType(tag=tag, default=tag.value, read_only=True))
|
|
676
|
+
self.attach(machine=self, tag=tag)
|
|
677
|
+
self.restart_buffer()
|
|
678
|
+
self.machine_engine.bind_tag(tag=tag, machine=self)
|
|
679
|
+
return True
|
|
680
|
+
|
|
681
|
+
else:
|
|
682
|
+
|
|
683
|
+
process_type = getattr(self, tag_name)
|
|
684
|
+
|
|
685
|
+
if not process_type.tag:
|
|
686
|
+
|
|
687
|
+
process_type.tag = tag
|
|
688
|
+
self.machine_engine.bind_tag(tag=tag, machine=self)
|
|
689
|
+
return True
|
|
690
|
+
|
|
691
|
+
@validate_types(tag=Tag, output=None|bool)
|
|
692
|
+
def unsubscribe_to(self, tag:Tag=None, default_tag_name:str=None):
|
|
693
|
+
r"""Documentation here
|
|
694
|
+
|
|
695
|
+
# Parameters
|
|
696
|
+
|
|
697
|
+
-
|
|
698
|
+
|
|
699
|
+
# Returns
|
|
700
|
+
|
|
701
|
+
-
|
|
702
|
+
"""
|
|
703
|
+
if tag:
|
|
704
|
+
|
|
705
|
+
tags_subscribed = self.get_subscribed_tags()
|
|
706
|
+
|
|
707
|
+
if tag.name in tags_subscribed:
|
|
708
|
+
|
|
709
|
+
self.machine_engine.unbind_tag(tag=tag, machine=self)
|
|
710
|
+
tags_subscribed[tag.name].tag = None
|
|
711
|
+
self.restart_buffer()
|
|
712
|
+
return True
|
|
713
|
+
|
|
714
|
+
elif default_tag_name: # Default tags on leak state machine
|
|
715
|
+
|
|
716
|
+
if default_tag_name in self.get_subscribed_tags():
|
|
717
|
+
|
|
718
|
+
process_type = self.get_subscribed_tags[default_tag_name]
|
|
719
|
+
tag = process_type.tag
|
|
720
|
+
tags_subscribed[tag.name].tag = None
|
|
721
|
+
self.restart_buffer()
|
|
722
|
+
self.machine_engine.unbind_tag(tag=tag, machine=self)
|
|
723
|
+
return True
|
|
724
|
+
|
|
725
|
+
@validate_types(name=str, output=bool)
|
|
726
|
+
def process_type_exists(self, name:str)->bool:
|
|
727
|
+
|
|
728
|
+
props = self.__dict__
|
|
729
|
+
if name in props:
|
|
730
|
+
|
|
731
|
+
if isinstance(props[name], ProcessType):
|
|
732
|
+
|
|
733
|
+
return True
|
|
734
|
+
|
|
735
|
+
return False
|
|
736
|
+
|
|
737
|
+
@validate_types(output=dict)
|
|
738
|
+
def get_internal_process_type_variables(self)->dict:
|
|
739
|
+
|
|
740
|
+
result = dict()
|
|
741
|
+
props = self.__dict__
|
|
742
|
+
|
|
743
|
+
for name, value in props.items():
|
|
744
|
+
|
|
745
|
+
if isinstance(value, ProcessType):
|
|
746
|
+
|
|
747
|
+
if not value.read_only:
|
|
748
|
+
|
|
749
|
+
result[name] = value
|
|
750
|
+
|
|
751
|
+
# if isinstance(value, (IntegerType, StringType, FloatType)):
|
|
752
|
+
|
|
753
|
+
# result[name] = value
|
|
754
|
+
|
|
755
|
+
return result
|
|
756
|
+
|
|
757
|
+
def get_read_only_process_type_variables(self)->dict:
|
|
758
|
+
|
|
759
|
+
result = dict()
|
|
760
|
+
props = self.__dict__
|
|
761
|
+
|
|
762
|
+
for name, value in props.items():
|
|
763
|
+
|
|
764
|
+
if isinstance(value, ProcessType):
|
|
765
|
+
|
|
766
|
+
if value.read_only:
|
|
767
|
+
|
|
768
|
+
result[name] = value
|
|
769
|
+
|
|
770
|
+
return result
|
|
771
|
+
|
|
772
|
+
@validate_types(
|
|
773
|
+
tag=str,
|
|
774
|
+
value=Temperature|Length|Current|Time|Pressure|Mass|Force|Power|VolumetricFlow|Volume|MassFlow|Density|Percentage|Adimentional,
|
|
775
|
+
timestamp=datetime,
|
|
776
|
+
output=None)
|
|
777
|
+
def notify(
|
|
778
|
+
self,
|
|
779
|
+
tag:str,
|
|
780
|
+
value:Temperature|Length|Current|Time|Pressure|Mass|Force|Power|VolumetricFlow|Volume|MassFlow|Density|Percentage|Adimentional,
|
|
781
|
+
timestamp:datetime):
|
|
782
|
+
r"""
|
|
783
|
+
This method provide an interface to CVT to notify if tag value has change
|
|
784
|
+
|
|
785
|
+
# Parameters
|
|
786
|
+
|
|
787
|
+
- *tag:* [Tag] tag Object
|
|
788
|
+
- *value:* [int|float|bool] tag value
|
|
789
|
+
"""
|
|
790
|
+
subscribed_to = self.get_subscribed_tags()
|
|
791
|
+
|
|
792
|
+
if tag in subscribed_to:
|
|
793
|
+
|
|
794
|
+
process_type = subscribed_to[tag]
|
|
795
|
+
if process_type.tag.variable.lower()=="massflow":
|
|
796
|
+
value.change_unit(unit=self.mass_flow_unit_base)
|
|
797
|
+
elif process_type.tag.variable.lower()=="volumetricflow":
|
|
798
|
+
value.change_unit(unit=self.volumetric_flow_unit_base)
|
|
799
|
+
else:
|
|
800
|
+
value.change_unit(unit=process_type.tag.display_unit)
|
|
801
|
+
process_type.value = value
|
|
802
|
+
self.data_timestamp = timestamp
|
|
803
|
+
if hasattr(self, "verify_inputs"):
|
|
804
|
+
self.verify_inputs()
|
|
805
|
+
|
|
806
|
+
@logging_error_handler
|
|
807
|
+
def attach(self, machine, tag:Tag):
|
|
808
|
+
cvt = CVTEngine()
|
|
809
|
+
def attach_observer(machine, tag:Tag):
|
|
810
|
+
|
|
811
|
+
observer = MachineObserver(machine)
|
|
812
|
+
query = dict()
|
|
813
|
+
query["action"] = "attach_observer"
|
|
814
|
+
query["parameters"] = {
|
|
815
|
+
"name": tag.name,
|
|
816
|
+
"observer": observer,
|
|
817
|
+
}
|
|
818
|
+
cvt.request(query)
|
|
819
|
+
cvt.response()
|
|
820
|
+
|
|
821
|
+
attach_observer(machine, tag)
|
|
822
|
+
|
|
823
|
+
@set_event(message=f"Switched", classification="State Machine", priority=2, criticity=3)
|
|
824
|
+
@validate_types(to=str, user=User|type(None), output=tuple)
|
|
825
|
+
def transition(self, to:str, user:User=None):
|
|
826
|
+
r"""
|
|
827
|
+
Documentation here
|
|
828
|
+
"""
|
|
829
|
+
try:
|
|
830
|
+
_from = self.current_state.name.lower()
|
|
831
|
+
transition_name = f'{_from}_to_{to}'
|
|
832
|
+
allowed_transitions = self._get_active_transitions()
|
|
833
|
+
for _transition in allowed_transitions:
|
|
834
|
+
if f"{_transition.source.name}_to_{_transition.target.name}"==transition_name:
|
|
835
|
+
self.send(transition_name)
|
|
836
|
+
return self, f"[{self.name.value}] from: {_from} to: {to}"
|
|
837
|
+
|
|
838
|
+
return None, f"Transition to {to} not allowed"
|
|
839
|
+
|
|
840
|
+
except Exception as err:
|
|
841
|
+
|
|
842
|
+
logger = logging.getLogger("pyautomation")
|
|
843
|
+
logger.warning(f"Transition from {_from} state to {to} state for {self.name.value} is not allowed")
|
|
844
|
+
|
|
845
|
+
@validate_types(output=int|float)
|
|
846
|
+
def get_interval(self)->int|float:
|
|
847
|
+
r"""
|
|
848
|
+
Gets overall state machine interval
|
|
849
|
+
|
|
850
|
+
**Returns**
|
|
851
|
+
|
|
852
|
+
* **(float)**
|
|
853
|
+
|
|
854
|
+
Usage
|
|
855
|
+
|
|
856
|
+
```python
|
|
857
|
+
>>> machine = app.get_machine(name)
|
|
858
|
+
>>> interval = machine.get_interval()
|
|
859
|
+
```
|
|
860
|
+
"""
|
|
861
|
+
return self.machine_interval.value
|
|
862
|
+
|
|
863
|
+
@validate_types(interval=IntegerType|FloatType, user=User|type(None), output=None)
|
|
864
|
+
def set_interval(self, interval:IntegerType|FloatType, user:User=None):
|
|
865
|
+
r"""
|
|
866
|
+
Sets overall machine interval
|
|
867
|
+
|
|
868
|
+
**Parameters**
|
|
869
|
+
|
|
870
|
+
* **interval:** (float) overal machine interval in seconds
|
|
871
|
+
|
|
872
|
+
Usage
|
|
873
|
+
|
|
874
|
+
```python
|
|
875
|
+
>>> machine = app.get_machine(name)
|
|
876
|
+
>>> machine.set_interval(0.5)
|
|
877
|
+
```
|
|
878
|
+
"""
|
|
879
|
+
self.machine_interval = interval
|
|
880
|
+
|
|
881
|
+
def get_allowed_actions(self):
|
|
882
|
+
r"""Documentation here
|
|
883
|
+
"""
|
|
884
|
+
result = set()
|
|
885
|
+
|
|
886
|
+
current_state = self.current_state
|
|
887
|
+
transitions = self.transitions
|
|
888
|
+
|
|
889
|
+
for transition in transitions:
|
|
890
|
+
|
|
891
|
+
if transition.source == current_state:
|
|
892
|
+
|
|
893
|
+
if transition.target.name not in ("run", "switch", "wait", "start", "pre_alarm"):
|
|
894
|
+
|
|
895
|
+
result.add(transition.target.name)
|
|
896
|
+
|
|
897
|
+
if "confirm" in transition.target.name:
|
|
898
|
+
|
|
899
|
+
result.add(transition.target.name.replace("confirm", "deny"))
|
|
900
|
+
|
|
901
|
+
if current_state.value.lower() in ("con_restart", "con_reset"):
|
|
902
|
+
|
|
903
|
+
result.add(current_state.value.lower().replace("con_", "confirm_"))
|
|
904
|
+
result.add(current_state.value.lower().replace("con_", "deny_"))
|
|
905
|
+
|
|
906
|
+
return list(result)
|
|
907
|
+
|
|
908
|
+
def _get_active_transitions(self):
|
|
909
|
+
r"""
|
|
910
|
+
Gets allowed transitions based on the current state
|
|
911
|
+
|
|
912
|
+
**Returns**
|
|
913
|
+
|
|
914
|
+
* **(list)**
|
|
915
|
+
"""
|
|
916
|
+
result = list()
|
|
917
|
+
|
|
918
|
+
current_state = self.current_state
|
|
919
|
+
transitions = self.transitions
|
|
920
|
+
|
|
921
|
+
for transition in transitions:
|
|
922
|
+
|
|
923
|
+
if transition.source == current_state:
|
|
924
|
+
|
|
925
|
+
result.append(transition)
|
|
926
|
+
|
|
927
|
+
return result
|
|
928
|
+
|
|
929
|
+
def _activate_triggers(self):
|
|
930
|
+
r"""
|
|
931
|
+
Allows to execute the on_ method in transitions when it's necesary
|
|
932
|
+
"""
|
|
933
|
+
transitions = self._get_active_transitions()
|
|
934
|
+
|
|
935
|
+
for transition in transitions:
|
|
936
|
+
method_name = transition.identifier
|
|
937
|
+
method = getattr(self, method_name)
|
|
938
|
+
|
|
939
|
+
try:
|
|
940
|
+
source = transition.source
|
|
941
|
+
if not source._trigger:
|
|
942
|
+
continue
|
|
943
|
+
if source._trigger.evaluate():
|
|
944
|
+
method()
|
|
945
|
+
except Exception as e:
|
|
946
|
+
error = str(e)
|
|
947
|
+
logging.error(f"Machine - {self.name.value}:{error}")
|
|
948
|
+
|
|
949
|
+
def loop(self):
|
|
950
|
+
r"""
|
|
951
|
+
This method is executed by state machine worker every state machine interval to execute the correct method according its state
|
|
952
|
+
"""
|
|
953
|
+
method_name = f"while_{self.current_state.value}"
|
|
954
|
+
|
|
955
|
+
if method_name in dir(self):
|
|
956
|
+
|
|
957
|
+
method = getattr(self, method_name)
|
|
958
|
+
method()
|
|
959
|
+
|
|
960
|
+
@validate_types(output=list)
|
|
961
|
+
def get_states(self)->list[str]:
|
|
962
|
+
r"""
|
|
963
|
+
Gets a list of state machine's names
|
|
964
|
+
|
|
965
|
+
**Returns**
|
|
966
|
+
|
|
967
|
+
* **(list)**
|
|
968
|
+
|
|
969
|
+
Usage
|
|
970
|
+
|
|
971
|
+
```python
|
|
972
|
+
>>> machine = app.get_machine(name)
|
|
973
|
+
>>> states = machine.get_states()
|
|
974
|
+
```
|
|
975
|
+
"""
|
|
976
|
+
return [state.value for state in self.states]
|
|
977
|
+
|
|
978
|
+
@validate_types(output=dict)
|
|
979
|
+
def get_serialized_models(self)->dict:
|
|
980
|
+
r"""
|
|
981
|
+
Gets class attributes defined by [model types]()
|
|
982
|
+
|
|
983
|
+
**Returns**
|
|
984
|
+
|
|
985
|
+
* **(dict)**
|
|
986
|
+
"""
|
|
987
|
+
result = dict()
|
|
988
|
+
props = self.__dict__
|
|
989
|
+
|
|
990
|
+
for key, value in props.items():
|
|
991
|
+
|
|
992
|
+
if isinstance(value, (StringType, FloatType, IntegerType, BooleanType, ProcessType)):
|
|
993
|
+
|
|
994
|
+
if isinstance(value, ProcessType):
|
|
995
|
+
|
|
996
|
+
result[key] = value.serialize()
|
|
997
|
+
|
|
998
|
+
else:
|
|
999
|
+
|
|
1000
|
+
result[key] = value.value
|
|
1001
|
+
|
|
1002
|
+
return result
|
|
1003
|
+
|
|
1004
|
+
@validate_types(output=dict)
|
|
1005
|
+
def serialize(self)->dict:
|
|
1006
|
+
r"""
|
|
1007
|
+
It provides the values of attributes defined with PyAutomation Models
|
|
1008
|
+
|
|
1009
|
+
# Returns
|
|
1010
|
+
|
|
1011
|
+
- dict: Serialized state machine
|
|
1012
|
+
"""
|
|
1013
|
+
result = {
|
|
1014
|
+
"state": self.current_state.value,
|
|
1015
|
+
"actions": self.get_allowed_actions(),
|
|
1016
|
+
"manufacturer": self.manufacturer,
|
|
1017
|
+
"segment": self.segment
|
|
1018
|
+
}
|
|
1019
|
+
result.update(self.get_serialized_models())
|
|
1020
|
+
|
|
1021
|
+
return result
|
|
1022
|
+
|
|
1023
|
+
# TRANSITIONS
|
|
1024
|
+
def on_start_to_wait(self):
|
|
1025
|
+
r"""
|
|
1026
|
+
It's executed one time before enter to Wait state from Sleep state
|
|
1027
|
+
"""
|
|
1028
|
+
self.last_state = "start"
|
|
1029
|
+
self.criticity.value = 1
|
|
1030
|
+
|
|
1031
|
+
def on_wait_to_run(self):
|
|
1032
|
+
r"""
|
|
1033
|
+
It's executed one time before enter to Run state from Wait state
|
|
1034
|
+
"""
|
|
1035
|
+
self.last_state = "wait"
|
|
1036
|
+
self.criticity.value = 1
|
|
1037
|
+
|
|
1038
|
+
def on_wait_to_restart(self):
|
|
1039
|
+
r"""
|
|
1040
|
+
It's executed one time before enter to Restart state from Wait state
|
|
1041
|
+
"""
|
|
1042
|
+
self.last_state = "wait"
|
|
1043
|
+
self.criticity.value = 5
|
|
1044
|
+
|
|
1045
|
+
def on_wait_to_reset(self):
|
|
1046
|
+
r"""
|
|
1047
|
+
It's executed one time before enter to Reset state from Wait state
|
|
1048
|
+
"""
|
|
1049
|
+
self.last_state = "wait"
|
|
1050
|
+
self.criticity.value = 5
|
|
1051
|
+
|
|
1052
|
+
def on_run_to_restart(self):
|
|
1053
|
+
r"""
|
|
1054
|
+
It's executed one time before enter to Restart state from Run state
|
|
1055
|
+
"""
|
|
1056
|
+
self.last_state = "run"
|
|
1057
|
+
self.criticity.value = 5
|
|
1058
|
+
|
|
1059
|
+
def on_run_to_reset(self):
|
|
1060
|
+
r"""
|
|
1061
|
+
It's executed one time before enter to Reset state from Run state
|
|
1062
|
+
"""
|
|
1063
|
+
self.last_state = "run"
|
|
1064
|
+
self.criticity.value = 5
|
|
1065
|
+
|
|
1066
|
+
def on_reset_to_start(self):
|
|
1067
|
+
r"""
|
|
1068
|
+
It's executed one time before enter to Start state from Reset state
|
|
1069
|
+
"""
|
|
1070
|
+
self.last_state = "reset"
|
|
1071
|
+
self.criticity.value = 2
|
|
1072
|
+
|
|
1073
|
+
def on_restart_to_wait(self):
|
|
1074
|
+
r"""
|
|
1075
|
+
It's executed one time before enter to Wait state from Restart state
|
|
1076
|
+
"""
|
|
1077
|
+
self.last_state = "restart"
|
|
1078
|
+
self.criticity.value = 2
|
|
1079
|
+
|
|
1080
|
+
# ON ENTER TRANSITION
|
|
1081
|
+
def on_enter_starting(self):
|
|
1082
|
+
|
|
1083
|
+
if self.sio:
|
|
1084
|
+
|
|
1085
|
+
self.sio.emit("on.machine", data=self.serialize())
|
|
1086
|
+
|
|
1087
|
+
def on_enter_waiting(self):
|
|
1088
|
+
|
|
1089
|
+
if self.sio:
|
|
1090
|
+
|
|
1091
|
+
self.sio.emit("on.machine", data=self.serialize())
|
|
1092
|
+
|
|
1093
|
+
def on_enter_running(self):
|
|
1094
|
+
|
|
1095
|
+
if self.sio:
|
|
1096
|
+
|
|
1097
|
+
self.sio.emit("on.machine", data=self.serialize())
|
|
1098
|
+
|
|
1099
|
+
def on_enter_restarting(self):
|
|
1100
|
+
|
|
1101
|
+
if self.sio:
|
|
1102
|
+
|
|
1103
|
+
self.sio.emit("on.machine", data=self.serialize())
|
|
1104
|
+
|
|
1105
|
+
def on_enter_resetting(self):
|
|
1106
|
+
|
|
1107
|
+
if self.sio:
|
|
1108
|
+
|
|
1109
|
+
self.sio.emit("on.machine", data=self.serialize())
|
|
1110
|
+
|
|
1111
|
+
|
|
1112
|
+
class DAQ(StateMachineCore):
|
|
1113
|
+
r"""
|
|
1114
|
+
Documentation here
|
|
1115
|
+
"""
|
|
1116
|
+
|
|
1117
|
+
def __init__(
|
|
1118
|
+
self,
|
|
1119
|
+
name:str="DAQ",
|
|
1120
|
+
description:str="",
|
|
1121
|
+
classification:str="Data Acquisition System"
|
|
1122
|
+
):
|
|
1123
|
+
|
|
1124
|
+
self.cvt = CVTEngine()
|
|
1125
|
+
self.das = DAS()
|
|
1126
|
+
|
|
1127
|
+
if isinstance(name, StringType):
|
|
1128
|
+
|
|
1129
|
+
name = name.value
|
|
1130
|
+
|
|
1131
|
+
super(DAQ, self).__init__(
|
|
1132
|
+
name=name,
|
|
1133
|
+
description=description,
|
|
1134
|
+
classification=classification
|
|
1135
|
+
)
|
|
1136
|
+
|
|
1137
|
+
# State Methods
|
|
1138
|
+
def while_waiting(self):
|
|
1139
|
+
r"""
|
|
1140
|
+
This method is executed every machine loop when it is on Wait state
|
|
1141
|
+
|
|
1142
|
+
It was designed to check your buffer data in self.data, if your buffer is full, so they pass to run state
|
|
1143
|
+
"""
|
|
1144
|
+
self.send('wait_to_run')
|
|
1145
|
+
|
|
1146
|
+
def while_running(self):
|
|
1147
|
+
from . import TIMEZONE, MANUFACTURER, SEGMENT
|
|
1148
|
+
for tag_name, process_type in self.get_subscribed_tags().items():
|
|
1149
|
+
tag = process_type.tag
|
|
1150
|
+
namespace = tag.get_node_namespace()
|
|
1151
|
+
opcua_address = tag.get_opcua_address()
|
|
1152
|
+
values = self.opcua_client_manager.get_node_value_by_opcua_address(opcua_address=opcua_address, namespace=namespace)
|
|
1153
|
+
if values:
|
|
1154
|
+
data_value = values[0][0]["DataValue"]
|
|
1155
|
+
value = data_value.Value.Value
|
|
1156
|
+
timestamp = data_value.SourceTimestamp
|
|
1157
|
+
if not timestamp:
|
|
1158
|
+
timestamp = datetime.now(pytz.utc)
|
|
1159
|
+
timestamp = timestamp.replace(tzinfo=pytz.UTC)
|
|
1160
|
+
val = tag.value.convert_value(value=value, from_unit=tag.get_unit(), to_unit=tag.get_display_unit())
|
|
1161
|
+
if tag.manufacturer==MANUFACTURER and tag.segment==SEGMENT:
|
|
1162
|
+
val = self.cvt.set_value(id=tag.id, value=val, timestamp=timestamp)
|
|
1163
|
+
elif not MANUFACTURER and not SEGMENT:
|
|
1164
|
+
val = self.cvt.set_value(id=tag.id, value=val, timestamp=timestamp)
|
|
1165
|
+
timestamp = timestamp.astimezone(TIMEZONE)
|
|
1166
|
+
self.das.buffer[tag_name]["timestamp"](timestamp)
|
|
1167
|
+
self.das.buffer[tag_name]["values"](val)
|
|
1168
|
+
|
|
1169
|
+
super().while_running()
|
|
1170
|
+
|
|
1171
|
+
# Auxiliaries Methods
|
|
1172
|
+
def set_opcua_client_manager(self, manager:OPCUAClientManager):
|
|
1173
|
+
r"""
|
|
1174
|
+
Documentation here
|
|
1175
|
+
"""
|
|
1176
|
+
self.opcua_client_manager = manager
|
|
1177
|
+
|
|
1178
|
+
|
|
1179
|
+
class OPCUAServer(StateMachineCore):
|
|
1180
|
+
r"""
|
|
1181
|
+
Documentation here
|
|
1182
|
+
"""
|
|
1183
|
+
|
|
1184
|
+
def __init__(
|
|
1185
|
+
self,
|
|
1186
|
+
name:str="OPCUAServer",
|
|
1187
|
+
description:str="",
|
|
1188
|
+
classification:str="OPC UA Server"
|
|
1189
|
+
):
|
|
1190
|
+
from . import OPCUA_SERVER_PORT
|
|
1191
|
+
self.cvt = CVTEngine()
|
|
1192
|
+
self.alarm_manager = AlarmManager()
|
|
1193
|
+
self.machine = Machine()
|
|
1194
|
+
self.my_folders = dict()
|
|
1195
|
+
self.port = OPCUA_SERVER_PORT
|
|
1196
|
+
|
|
1197
|
+
if isinstance(name, StringType):
|
|
1198
|
+
|
|
1199
|
+
name = name.value
|
|
1200
|
+
|
|
1201
|
+
super(OPCUAServer, self).__init__(
|
|
1202
|
+
name=name,
|
|
1203
|
+
description=description,
|
|
1204
|
+
classification=classification
|
|
1205
|
+
)
|
|
1206
|
+
|
|
1207
|
+
@logging_error_handler
|
|
1208
|
+
def while_starting(self):
|
|
1209
|
+
r"""
|
|
1210
|
+
Documentation here
|
|
1211
|
+
"""
|
|
1212
|
+
self.server = Server()
|
|
1213
|
+
self.server.set_endpoint(f'opc.tcp://0.0.0.0:{self.port}/OPCUAServer/')
|
|
1214
|
+
|
|
1215
|
+
# setup our own namespace, not really necessary but should as spec
|
|
1216
|
+
uri = "http://examples.freeopcua.github.io"
|
|
1217
|
+
self.idx = self.server.register_namespace(uri)
|
|
1218
|
+
# get Objects node, this is where we should put our node
|
|
1219
|
+
self.objects = self.server.get_objects_node()
|
|
1220
|
+
# populating our address space
|
|
1221
|
+
self.my_folders['CVT'] = self.objects.add_folder(self.idx, "CVT")
|
|
1222
|
+
self.my_folders['Alarms'] = self.objects.add_folder(self.idx, "Alarms")
|
|
1223
|
+
self.my_folders['Engines'] = self.objects.add_folder(self.idx, "Engines")
|
|
1224
|
+
|
|
1225
|
+
# SET
|
|
1226
|
+
self.server.start()
|
|
1227
|
+
self.__set_cvt()
|
|
1228
|
+
self.__set_alarms()
|
|
1229
|
+
self.__set_engines()
|
|
1230
|
+
|
|
1231
|
+
logging.getLogger('opcua').setLevel(logging.ERROR)
|
|
1232
|
+
|
|
1233
|
+
self.send('start_to_wait')
|
|
1234
|
+
|
|
1235
|
+
def while_waiting(self):
|
|
1236
|
+
r"""
|
|
1237
|
+
Documentation here
|
|
1238
|
+
"""
|
|
1239
|
+
self.send('wait_to_run')
|
|
1240
|
+
|
|
1241
|
+
def while_running(self):
|
|
1242
|
+
r"""
|
|
1243
|
+
Documentation here
|
|
1244
|
+
"""
|
|
1245
|
+
self.__update_tags()
|
|
1246
|
+
self.__update_alarms()
|
|
1247
|
+
self.__update_engines()
|
|
1248
|
+
|
|
1249
|
+
def while_resetting(self):
|
|
1250
|
+
r"""
|
|
1251
|
+
Documentation here
|
|
1252
|
+
"""
|
|
1253
|
+
self.send('reset_to_starting')
|
|
1254
|
+
|
|
1255
|
+
def __set_engines(self):
|
|
1256
|
+
r"""
|
|
1257
|
+
documentation here
|
|
1258
|
+
"""
|
|
1259
|
+
from . import MANUFACTURER
|
|
1260
|
+
segment = "Engines"
|
|
1261
|
+
engines = self.machine.machine_manager.get_machines()
|
|
1262
|
+
|
|
1263
|
+
for engine, _, _ in engines:
|
|
1264
|
+
|
|
1265
|
+
engine = engine.serialize()
|
|
1266
|
+
engine_name = engine["name"]
|
|
1267
|
+
engine_description = engine["description"] or ""
|
|
1268
|
+
|
|
1269
|
+
if not hasattr(self, engine_name):
|
|
1270
|
+
|
|
1271
|
+
if engine["segment"]:
|
|
1272
|
+
|
|
1273
|
+
segment = engine["segment"]
|
|
1274
|
+
|
|
1275
|
+
if segment not in self.my_folders.keys():
|
|
1276
|
+
|
|
1277
|
+
self.my_folders[segment] = self.objects.add_folder(self.idx, segment)
|
|
1278
|
+
|
|
1279
|
+
segment = f"{engine['segment']}.engines"
|
|
1280
|
+
if segment not in self.my_folders.keys():
|
|
1281
|
+
|
|
1282
|
+
self.my_folders[segment] = self.my_folders[engine['segment']].add_folder(self.idx, 'Engines')
|
|
1283
|
+
|
|
1284
|
+
if segment not in self.my_folders.keys():
|
|
1285
|
+
|
|
1286
|
+
self.my_folders[segment] = self.my_folders[segment]
|
|
1287
|
+
|
|
1288
|
+
var_name = f"{segment}.{engine_name}"
|
|
1289
|
+
|
|
1290
|
+
if not hasattr(self, var_name):
|
|
1291
|
+
__var_name = engine_name.replace(f"{MANUFACTURER}.", "")
|
|
1292
|
+
|
|
1293
|
+
ID = blake2b(key=f"{__var_name}".encode('utf-8')[:64], digest_size=4).hexdigest()
|
|
1294
|
+
setattr(self, var_name, self.my_folders[segment].add_variable(
|
|
1295
|
+
ua.NodeId(identifier=ID, namespaceidx=self.idx),
|
|
1296
|
+
engine_name,
|
|
1297
|
+
0)
|
|
1298
|
+
)
|
|
1299
|
+
node = getattr(self, var_name)
|
|
1300
|
+
self.__load_saved_access_type(node=node, var_name=var_name)
|
|
1301
|
+
description = node.get_attribute(ua.AttributeIds.Description)
|
|
1302
|
+
description.Value.Value.Text = engine_description
|
|
1303
|
+
browse_name = node.get_attribute(ua.AttributeIds.BrowseName)
|
|
1304
|
+
browse_name.Value.Value.Name = ""
|
|
1305
|
+
|
|
1306
|
+
# Add Properties
|
|
1307
|
+
keep_list = (
|
|
1308
|
+
"state",
|
|
1309
|
+
"manufacturer",
|
|
1310
|
+
"segment",
|
|
1311
|
+
"criticity",
|
|
1312
|
+
"priority",
|
|
1313
|
+
"classification",
|
|
1314
|
+
"machine_interval",
|
|
1315
|
+
"fluid",
|
|
1316
|
+
"maneuver",
|
|
1317
|
+
"operation"
|
|
1318
|
+
)
|
|
1319
|
+
|
|
1320
|
+
for key in keep_list:
|
|
1321
|
+
if key in engine:
|
|
1322
|
+
ID = blake2b(key=f"{__var_name}.{key}".encode('utf-8')[:64], digest_size=4).hexdigest()
|
|
1323
|
+
prop = node.add_property(ua.NodeId(identifier=ID, namespaceidx=self.idx), key, engine[key])
|
|
1324
|
+
self.__load_saved_access_type(node=prop, var_name=f"{var_name}.{key}")
|
|
1325
|
+
browse_name = prop.get_attribute(ua.AttributeIds.BrowseName)
|
|
1326
|
+
browse_name.Value.Value.Name = ""
|
|
1327
|
+
|
|
1328
|
+
def __set_alarms(self):
|
|
1329
|
+
r"""
|
|
1330
|
+
Documentation here
|
|
1331
|
+
"""
|
|
1332
|
+
from . import MANUFACTURER
|
|
1333
|
+
alarms = self.alarm_manager.get_alarms()
|
|
1334
|
+
segment = "Alarms"
|
|
1335
|
+
for _, alarm in alarms.items():
|
|
1336
|
+
|
|
1337
|
+
alarm_name = alarm.name
|
|
1338
|
+
alarm_description = alarm.description or ""
|
|
1339
|
+
|
|
1340
|
+
if not hasattr(self, alarm_name):
|
|
1341
|
+
|
|
1342
|
+
if alarm.tag.segment:
|
|
1343
|
+
|
|
1344
|
+
segment = alarm.tag.segment
|
|
1345
|
+
|
|
1346
|
+
if segment not in self.my_folders.keys():
|
|
1347
|
+
self.my_folders[segment] = self.objects.add_folder(self.idx, segment)
|
|
1348
|
+
|
|
1349
|
+
segment = f"{alarm.tag.segment}.alarms"
|
|
1350
|
+
if segment not in self.my_folders.keys():
|
|
1351
|
+
self.my_folders[segment] = self.my_folders[alarm.tag.segment].add_folder(self.idx, 'Alarms')
|
|
1352
|
+
|
|
1353
|
+
if segment not in self.my_folders.keys():
|
|
1354
|
+
|
|
1355
|
+
self.my_folders[segment] = self.my_folders[segment]
|
|
1356
|
+
|
|
1357
|
+
var_name = f"{segment}.{alarm_name}"
|
|
1358
|
+
|
|
1359
|
+
if not hasattr(self, var_name):
|
|
1360
|
+
__var_name = alarm_name.replace(f"{MANUFACTURER}.", "")
|
|
1361
|
+
ID = blake2b(key=f"{__var_name}".encode('utf-8')[:64], digest_size=4).hexdigest()
|
|
1362
|
+
|
|
1363
|
+
setattr(self, var_name, self.my_folders[segment].add_variable(
|
|
1364
|
+
ua.NodeId(identifier=ID, namespaceidx=self.idx),
|
|
1365
|
+
alarm_name,
|
|
1366
|
+
0)
|
|
1367
|
+
)
|
|
1368
|
+
node = getattr(self, var_name)
|
|
1369
|
+
self.__load_saved_access_type(node=node, var_name=var_name)
|
|
1370
|
+
description = node.get_attribute(ua.AttributeIds.Description)
|
|
1371
|
+
description.Value.Value.Text = alarm_description
|
|
1372
|
+
browse_name = node.get_attribute(ua.AttributeIds.BrowseName)
|
|
1373
|
+
browse_name.Value.Value.Name = ""
|
|
1374
|
+
|
|
1375
|
+
# Add State Properties
|
|
1376
|
+
for state_key, state_value in alarm.state.serialize().items():
|
|
1377
|
+
ID = blake2b(key=f"{__var_name}.{state_key}".encode('utf-8')[:64], digest_size=4).hexdigest()
|
|
1378
|
+
prop = node.add_property(ua.NodeId(identifier=ID, namespaceidx=self.idx), state_key, state_value)
|
|
1379
|
+
self.__load_saved_access_type(node=prop, var_name=f"{var_name}.{state_key}")
|
|
1380
|
+
browse_name = prop.get_attribute(ua.AttributeIds.BrowseName)
|
|
1381
|
+
browse_name.Value.Value.Name = ""
|
|
1382
|
+
|
|
1383
|
+
def __set_cvt(self):
|
|
1384
|
+
r"""
|
|
1385
|
+
Documentation here
|
|
1386
|
+
"""
|
|
1387
|
+
from . import MANUFACTURER
|
|
1388
|
+
|
|
1389
|
+
segment = "CVT"
|
|
1390
|
+
for tag in self.cvt.get_tags():
|
|
1391
|
+
|
|
1392
|
+
if tag["segment"]:
|
|
1393
|
+
|
|
1394
|
+
segment = tag["segment"]
|
|
1395
|
+
|
|
1396
|
+
if segment not in self.my_folders.keys():
|
|
1397
|
+
|
|
1398
|
+
self.my_folders[segment] = self.objects.add_folder(self.idx, segment)
|
|
1399
|
+
|
|
1400
|
+
tag_name = tag['name']
|
|
1401
|
+
display_unit = tag["display_unit"]
|
|
1402
|
+
data_type = tag["data_type"]
|
|
1403
|
+
tag_description = tag["description"] or ""
|
|
1404
|
+
|
|
1405
|
+
var_name = f"{segment}_{tag_name}"
|
|
1406
|
+
__var_name = tag_name.replace(f"{MANUFACTURER}.", "")
|
|
1407
|
+
identifier = blake2b(key=__var_name.encode('utf-8')[:64], digest_size=4).hexdigest()
|
|
1408
|
+
|
|
1409
|
+
if not hasattr(self, var_name):
|
|
1410
|
+
|
|
1411
|
+
if data_type.lower()=='str':
|
|
1412
|
+
setattr(self, var_name, self.my_folders[f"{segment}"].add_variable(
|
|
1413
|
+
ua.NodeId(identifier=identifier, namespaceidx=self.idx),
|
|
1414
|
+
tag_name,
|
|
1415
|
+
"")
|
|
1416
|
+
)
|
|
1417
|
+
|
|
1418
|
+
else:
|
|
1419
|
+
|
|
1420
|
+
setattr(self, var_name, self.my_folders[f"{segment}"].add_variable(
|
|
1421
|
+
ua.NodeId(identifier=identifier, namespaceidx=self.idx),
|
|
1422
|
+
tag_name,
|
|
1423
|
+
0.0)
|
|
1424
|
+
)
|
|
1425
|
+
|
|
1426
|
+
node = getattr(self, var_name)
|
|
1427
|
+
self.__load_saved_access_type(node=node, var_name=var_name)
|
|
1428
|
+
description = node.get_attribute(ua.AttributeIds.Description)
|
|
1429
|
+
description.Value.Value.Text = tag_description
|
|
1430
|
+
browse_name = node.get_attribute(ua.AttributeIds.BrowseName)
|
|
1431
|
+
browse_name.Value.Value.Name = display_unit
|
|
1432
|
+
|
|
1433
|
+
pop_list = (
|
|
1434
|
+
"id",
|
|
1435
|
+
"value",
|
|
1436
|
+
"timestamp",
|
|
1437
|
+
"timestamps",
|
|
1438
|
+
"values",
|
|
1439
|
+
"name",
|
|
1440
|
+
"description",
|
|
1441
|
+
"opcua_address",
|
|
1442
|
+
"node_namespace",
|
|
1443
|
+
"process_filter",
|
|
1444
|
+
"gaussian_filter",
|
|
1445
|
+
"out_of_range_detection",
|
|
1446
|
+
"frozen_data_detection",
|
|
1447
|
+
"outlier_detection"
|
|
1448
|
+
)
|
|
1449
|
+
for key in pop_list:
|
|
1450
|
+
tag.pop(key)
|
|
1451
|
+
# Add State Properties
|
|
1452
|
+
for key, value in tag.items():
|
|
1453
|
+
|
|
1454
|
+
ID = blake2b(key=f"{__var_name}_{key}".encode('utf-8')[:64], digest_size=4).hexdigest()
|
|
1455
|
+
prop = node.add_property(ua.NodeId(identifier=ID, namespaceidx=self.idx), key, value)
|
|
1456
|
+
self.__load_saved_access_type(node=prop, var_name=f"{var_name}.{key}")
|
|
1457
|
+
browse_name = prop.get_attribute(ua.AttributeIds.BrowseName)
|
|
1458
|
+
browse_name.Value.Value.Name = ""
|
|
1459
|
+
|
|
1460
|
+
def __update_tags(self):
|
|
1461
|
+
r"""
|
|
1462
|
+
Documentation here
|
|
1463
|
+
"""
|
|
1464
|
+
for tag in self.cvt.get_tags():
|
|
1465
|
+
|
|
1466
|
+
segment = "CVT"
|
|
1467
|
+
value = tag["value"]
|
|
1468
|
+
|
|
1469
|
+
if tag['segment']:
|
|
1470
|
+
|
|
1471
|
+
segment = tag['segment']
|
|
1472
|
+
|
|
1473
|
+
var_name = f"{segment}_{tag['name']}"
|
|
1474
|
+
if hasattr(self, var_name):
|
|
1475
|
+
|
|
1476
|
+
_tag = getattr(self, var_name)
|
|
1477
|
+
|
|
1478
|
+
if isinstance(value, (float, int)):
|
|
1479
|
+
|
|
1480
|
+
_tag.set_value(round(value, 4))
|
|
1481
|
+
|
|
1482
|
+
else:
|
|
1483
|
+
|
|
1484
|
+
_tag.set_value(value)
|
|
1485
|
+
|
|
1486
|
+
def __update_alarms(self):
|
|
1487
|
+
r"""
|
|
1488
|
+
Documentation here
|
|
1489
|
+
"""
|
|
1490
|
+
alarms = self.alarm_manager.get_alarms()
|
|
1491
|
+
segment = "Alarms"
|
|
1492
|
+
for _, alarm in alarms.items():
|
|
1493
|
+
|
|
1494
|
+
alarm_name = alarm.name
|
|
1495
|
+
|
|
1496
|
+
if alarm.tag.segment:
|
|
1497
|
+
|
|
1498
|
+
segment = alarm.tag.segment
|
|
1499
|
+
segment = f"{segment}.alarms"
|
|
1500
|
+
|
|
1501
|
+
var_name = f"{segment}.{alarm_name}"
|
|
1502
|
+
if hasattr(self, var_name):
|
|
1503
|
+
|
|
1504
|
+
var = getattr(self, var_name)
|
|
1505
|
+
props = var.get_properties()
|
|
1506
|
+
|
|
1507
|
+
for prop in props:
|
|
1508
|
+
|
|
1509
|
+
display_name = prop.get_display_name().Text
|
|
1510
|
+
|
|
1511
|
+
if display_name.startswith("setpoint"):
|
|
1512
|
+
display_name = display_name.replace("setpoint.", "")
|
|
1513
|
+
attr = getattr(alarm.alarm_setpoint, display_name)
|
|
1514
|
+
prop.set_value(attr)
|
|
1515
|
+
|
|
1516
|
+
else:
|
|
1517
|
+
attr = getattr(alarm.state, display_name)
|
|
1518
|
+
prop.set_value(attr)
|
|
1519
|
+
|
|
1520
|
+
def __update_engines(self):
|
|
1521
|
+
r"""
|
|
1522
|
+
Documentation here
|
|
1523
|
+
"""
|
|
1524
|
+
segment = "Engines"
|
|
1525
|
+
engines = self.machine.machine_manager.get_machines()
|
|
1526
|
+
|
|
1527
|
+
for engine, _, _ in engines:
|
|
1528
|
+
|
|
1529
|
+
engine = engine.serialize()
|
|
1530
|
+
engine_name = engine["name"]
|
|
1531
|
+
|
|
1532
|
+
if engine["segment"]:
|
|
1533
|
+
|
|
1534
|
+
segment = engine["segment"]
|
|
1535
|
+
segment = f"{segment}.engines"
|
|
1536
|
+
|
|
1537
|
+
var_name = f"{segment}.{engine_name}"
|
|
1538
|
+
if hasattr(self, var_name):
|
|
1539
|
+
|
|
1540
|
+
var = getattr(self, var_name)
|
|
1541
|
+
props = var.get_properties()
|
|
1542
|
+
|
|
1543
|
+
for prop in props:
|
|
1544
|
+
|
|
1545
|
+
display_name = prop.get_display_name().Text
|
|
1546
|
+
attr = engine[display_name]
|
|
1547
|
+
prop.set_value(attr)
|
|
1548
|
+
|
|
1549
|
+
def __load_saved_access_type(self, node, var_name):
|
|
1550
|
+
from .core import PyAutomation
|
|
1551
|
+
from .opcua.subscription import SubHandlerServer
|
|
1552
|
+
|
|
1553
|
+
handler = SubHandlerServer()
|
|
1554
|
+
app = PyAutomation()
|
|
1555
|
+
namespace = node.nodeid.to_string()
|
|
1556
|
+
opcua_server_obj = app.get_opcua_server_record_by_namespace(namespace=namespace)
|
|
1557
|
+
access_type = "Read"
|
|
1558
|
+
if opcua_server_obj:
|
|
1559
|
+
record = opcua_server_obj.serialize()
|
|
1560
|
+
access_type = record['access_type']['name']
|
|
1561
|
+
else:
|
|
1562
|
+
app.create_opcua_server_record(name=var_name, namespace=namespace, access_type=access_type)
|
|
1563
|
+
|
|
1564
|
+
access_type = access_type.lower()
|
|
1565
|
+
# Limpiar todos los bits de acceso primero
|
|
1566
|
+
node.unset_attr_bit(ua.AttributeIds.AccessLevel, ua.AccessLevel.CurrentRead)
|
|
1567
|
+
node.unset_attr_bit(ua.AttributeIds.AccessLevel, ua.AccessLevel.CurrentWrite)
|
|
1568
|
+
node.unset_attr_bit(ua.AttributeIds.UserAccessLevel, ua.AccessLevel.CurrentRead)
|
|
1569
|
+
node.unset_attr_bit(ua.AttributeIds.UserAccessLevel, ua.AccessLevel.CurrentWrite)
|
|
1570
|
+
|
|
1571
|
+
if access_type == "write":
|
|
1572
|
+
# Solo escritura: deshabilitamos la lectura y habilitamos la escritura
|
|
1573
|
+
node.set_attr_bit(ua.AttributeIds.AccessLevel, ua.AccessLevel.CurrentWrite)
|
|
1574
|
+
node.set_attr_bit(ua.AttributeIds.UserAccessLevel, ua.AccessLevel.CurrentWrite)
|
|
1575
|
+
# Crea un manejador de suscripción
|
|
1576
|
+
sub = self.server.create_subscription(100, handler)
|
|
1577
|
+
sub.subscribe_data_change(node)
|
|
1578
|
+
|
|
1579
|
+
elif access_type == "read":
|
|
1580
|
+
# Solo lectura: habilitamos la lectura y deshabilitamos la escritura
|
|
1581
|
+
node.set_attr_bit(ua.AttributeIds.AccessLevel, ua.AccessLevel.CurrentRead)
|
|
1582
|
+
node.set_attr_bit(ua.AttributeIds.UserAccessLevel, ua.AccessLevel.CurrentRead)
|
|
1583
|
+
elif access_type == "readwrite":
|
|
1584
|
+
# Lectura y escritura: habilitamos ambos
|
|
1585
|
+
node.set_attr_bit(ua.AttributeIds.AccessLevel, ua.AccessLevel.CurrentRead)
|
|
1586
|
+
node.set_attr_bit(ua.AttributeIds.AccessLevel, ua.AccessLevel.CurrentWrite)
|
|
1587
|
+
node.set_attr_bit(ua.AttributeIds.UserAccessLevel, ua.AccessLevel.CurrentRead)
|
|
1588
|
+
node.set_attr_bit(ua.AttributeIds.UserAccessLevel, ua.AccessLevel.CurrentWrite)
|
|
1589
|
+
# Crea un manejador de suscripción
|
|
1590
|
+
sub = self.server.create_subscription(100, handler)
|
|
1591
|
+
sub.subscribe_data_change(node)
|
|
1592
|
+
|
|
1593
|
+
|
|
1594
|
+
class AutomationStateMachine(StateMachineCore):
|
|
1595
|
+
r"""
|
|
1596
|
+
Documentation here
|
|
1597
|
+
"""
|
|
1598
|
+
# States
|
|
1599
|
+
testing = State('test')
|
|
1600
|
+
sleeping = State('sleep')
|
|
1601
|
+
|
|
1602
|
+
# Transitions
|
|
1603
|
+
test_to_restart = testing.to(StateMachineCore.restarting)
|
|
1604
|
+
sleep_to_restart = sleeping.to(StateMachineCore.restarting)
|
|
1605
|
+
test_to_reset = testing.to(StateMachineCore.resetting)
|
|
1606
|
+
sleep_to_reset = sleeping.to(StateMachineCore.resetting)
|
|
1607
|
+
run_to_test = StateMachineCore.running.to(testing)
|
|
1608
|
+
wait_to_test = StateMachineCore.waiting.to(testing)
|
|
1609
|
+
run_to_sleep = StateMachineCore.running.to(sleeping)
|
|
1610
|
+
wait_to_sleep = StateMachineCore.waiting.to(sleeping)
|
|
1611
|
+
|
|
1612
|
+
def while_testing(self):
|
|
1613
|
+
r"""
|
|
1614
|
+
This method is executed every machine loop when it is on Test state
|
|
1615
|
+
"""
|
|
1616
|
+
self.criticity.value = 3
|
|
1617
|
+
|
|
1618
|
+
def while_sleeping(self):
|
|
1619
|
+
r"""
|
|
1620
|
+
This method is executed every machine loop when it is on Sleep state
|
|
1621
|
+
"""
|
|
1622
|
+
self.criticity.value = 5
|
|
1623
|
+
|
|
1624
|
+
# Transitions
|
|
1625
|
+
def on_test_to_restart(self):
|
|
1626
|
+
r"""
|
|
1627
|
+
It's executed one time before enter to Restart state from Test state
|
|
1628
|
+
"""
|
|
1629
|
+
self.last_state = "test"
|
|
1630
|
+
self.criticity.value = 4
|
|
1631
|
+
if self.sio:
|
|
1632
|
+
self.sio.emit("on.machine", data=self.serialize())
|
|
1633
|
+
|
|
1634
|
+
def on_test_to_reset(self):
|
|
1635
|
+
r"""
|
|
1636
|
+
It's executed one time before enter to Reset state from Test state
|
|
1637
|
+
"""
|
|
1638
|
+
self.last_state = "test"
|
|
1639
|
+
self.criticity.value = 4
|
|
1640
|
+
if self.sio:
|
|
1641
|
+
self.sio.emit("on.machine", data=self.serialize())
|
|
1642
|
+
|
|
1643
|
+
def on_sleep_to_restart(self):
|
|
1644
|
+
r"""
|
|
1645
|
+
It's executed one time before enter to Restart state from Sleep state
|
|
1646
|
+
"""
|
|
1647
|
+
self.last_state = "sleep"
|
|
1648
|
+
self.criticity.value = 4
|
|
1649
|
+
if self.sio:
|
|
1650
|
+
self.sio.emit("on.machine", data=self.serialize())
|
|
1651
|
+
|
|
1652
|
+
def on_sleep_to_reset(self):
|
|
1653
|
+
r"""
|
|
1654
|
+
It's executed one time before enter to Reset state from Sleep state
|
|
1655
|
+
"""
|
|
1656
|
+
self.last_state = "sleep"
|
|
1657
|
+
self.criticity.value = 4
|
|
1658
|
+
if self.sio:
|
|
1659
|
+
self.sio.emit("on.machine", data=self.serialize())
|
|
1660
|
+
|
|
1661
|
+
def on_enter_sleeping(self):
|
|
1662
|
+
|
|
1663
|
+
if self.sio:
|
|
1664
|
+
|
|
1665
|
+
self.sio.emit("on.machine", data=self.serialize())
|
|
1666
|
+
|
|
1667
|
+
def on_enter_testing(self):
|
|
1668
|
+
|
|
1669
|
+
if self.sio:
|
|
1670
|
+
|
|
1671
|
+
self.sio.emit("on.machine", data=self.serialize())
|
|
1672
|
+
|