DLMS-SPODES-client 0.19.22__py3-none-any.whl → 0.19.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- DLMS_SPODES_client/FCS16.py +39 -39
- DLMS_SPODES_client/__init__.py +12 -12
- DLMS_SPODES_client/client.py +2091 -2091
- DLMS_SPODES_client/gurux_common/enums/TraceLevel.py +21 -21
- DLMS_SPODES_client/gurux_dlms/AesGcmParameter.py +37 -37
- DLMS_SPODES_client/gurux_dlms/CountType.py +16 -16
- DLMS_SPODES_client/gurux_dlms/GXByteBuffer.py +545 -545
- DLMS_SPODES_client/gurux_dlms/GXCiphering.py +196 -196
- DLMS_SPODES_client/gurux_dlms/GXDLMS.py +426 -426
- DLMS_SPODES_client/gurux_dlms/GXDLMSChippering.py +237 -237
- DLMS_SPODES_client/gurux_dlms/GXDLMSChipperingStream.py +977 -977
- DLMS_SPODES_client/gurux_dlms/GXDLMSConfirmedServiceError.py +90 -90
- DLMS_SPODES_client/gurux_dlms/GXDLMSException.py +139 -139
- DLMS_SPODES_client/gurux_dlms/GXDLMSLNParameters.py +33 -33
- DLMS_SPODES_client/gurux_dlms/GXDLMSSNParameters.py +21 -21
- DLMS_SPODES_client/gurux_dlms/GXDLMSSettings.py +254 -254
- DLMS_SPODES_client/gurux_dlms/GXReplyData.py +87 -87
- DLMS_SPODES_client/gurux_dlms/HdlcControlFrame.py +9 -9
- DLMS_SPODES_client/gurux_dlms/MBusCommand.py +8 -8
- DLMS_SPODES_client/gurux_dlms/MBusEncryptionMode.py +27 -27
- DLMS_SPODES_client/gurux_dlms/ResponseType.py +8 -8
- DLMS_SPODES_client/gurux_dlms/SetResponseType.py +29 -29
- DLMS_SPODES_client/gurux_dlms/_HDLCInfo.py +9 -9
- DLMS_SPODES_client/gurux_dlms/__init__.py +75 -75
- DLMS_SPODES_client/gurux_dlms/enums/Access.py +12 -12
- DLMS_SPODES_client/gurux_dlms/enums/ApplicationReference.py +14 -14
- DLMS_SPODES_client/gurux_dlms/enums/Authentication.py +41 -41
- DLMS_SPODES_client/gurux_dlms/enums/BerType.py +35 -35
- DLMS_SPODES_client/gurux_dlms/enums/Command.py +285 -285
- DLMS_SPODES_client/gurux_dlms/enums/Definition.py +9 -9
- DLMS_SPODES_client/gurux_dlms/enums/ErrorCode.py +46 -46
- DLMS_SPODES_client/gurux_dlms/enums/ExceptionServiceError.py +12 -12
- DLMS_SPODES_client/gurux_dlms/enums/HardwareResource.py +10 -10
- DLMS_SPODES_client/gurux_dlms/enums/HdlcFrameType.py +9 -9
- DLMS_SPODES_client/gurux_dlms/enums/Initiate.py +10 -10
- DLMS_SPODES_client/gurux_dlms/enums/LoadDataSet.py +13 -13
- DLMS_SPODES_client/gurux_dlms/enums/ObjectType.py +306 -306
- DLMS_SPODES_client/gurux_dlms/enums/Priority.py +7 -7
- DLMS_SPODES_client/gurux_dlms/enums/RequestTypes.py +9 -9
- DLMS_SPODES_client/gurux_dlms/enums/Security.py +14 -14
- DLMS_SPODES_client/gurux_dlms/enums/Service.py +16 -16
- DLMS_SPODES_client/gurux_dlms/enums/ServiceClass.py +9 -9
- DLMS_SPODES_client/gurux_dlms/enums/ServiceError.py +8 -8
- DLMS_SPODES_client/gurux_dlms/enums/Standard.py +18 -18
- DLMS_SPODES_client/gurux_dlms/enums/StateError.py +7 -7
- DLMS_SPODES_client/gurux_dlms/enums/Task.py +10 -10
- DLMS_SPODES_client/gurux_dlms/enums/VdeStateError.py +10 -10
- DLMS_SPODES_client/gurux_dlms/enums/__init__.py +33 -33
- DLMS_SPODES_client/gurux_dlms/internal/_GXCommon.py +1673 -1673
- DLMS_SPODES_client/logger.py +56 -56
- DLMS_SPODES_client/services.py +97 -97
- DLMS_SPODES_client/session.py +365 -365
- DLMS_SPODES_client/settings.py +48 -48
- DLMS_SPODES_client/task.py +1842 -1842
- {dlms_spodes_client-0.19.22.dist-info → dlms_spodes_client-0.19.23.dist-info}/METADATA +29 -27
- dlms_spodes_client-0.19.23.dist-info/RECORD +61 -0
- dlms_spodes_client-0.19.22.dist-info/RECORD +0 -61
- {dlms_spodes_client-0.19.22.dist-info → dlms_spodes_client-0.19.23.dist-info}/WHEEL +0 -0
- {dlms_spodes_client-0.19.22.dist-info → dlms_spodes_client-0.19.23.dist-info}/entry_points.txt +0 -0
- {dlms_spodes_client-0.19.22.dist-info → dlms_spodes_client-0.19.23.dist-info}/top_level.txt +0 -0
DLMS_SPODES_client/session.py
CHANGED
|
@@ -1,365 +1,365 @@
|
|
|
1
|
-
from collections import deque
|
|
2
|
-
from time import time
|
|
3
|
-
import queue
|
|
4
|
-
from collections import defaultdict
|
|
5
|
-
from dataclasses import dataclass, field
|
|
6
|
-
from typing import Optional, Collection, Iterator, Self
|
|
7
|
-
import threading
|
|
8
|
-
from functools import cached_property
|
|
9
|
-
import asyncio
|
|
10
|
-
from StructResult import result
|
|
11
|
-
from DLMSCommunicationProfile.osi import OSI
|
|
12
|
-
from DLMS_SPODES import exceptions as exc
|
|
13
|
-
from .logger import LogLevel as logL
|
|
14
|
-
from .client import Client
|
|
15
|
-
from . import task
|
|
16
|
-
from .settings import settings
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
class UniversalLock:
|
|
20
|
-
def __init__(self) -> None:
|
|
21
|
-
self._thread_lock = threading.Lock()
|
|
22
|
-
self._async_lock = asyncio.Lock()
|
|
23
|
-
|
|
24
|
-
def __enter__(self) -> "UniversalLock":
|
|
25
|
-
self._thread_lock.acquire()
|
|
26
|
-
return self
|
|
27
|
-
|
|
28
|
-
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
|
|
29
|
-
self._thread_lock.release()
|
|
30
|
-
|
|
31
|
-
async def __aenter__(self) -> Self:
|
|
32
|
-
await self._async_lock.acquire()
|
|
33
|
-
return self
|
|
34
|
-
|
|
35
|
-
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
|
|
36
|
-
self._async_lock.release()
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
class DualStorage[T]:
|
|
40
|
-
_persistent: deque[T]
|
|
41
|
-
_volatile: deque[T]
|
|
42
|
-
_lock: UniversalLock
|
|
43
|
-
|
|
44
|
-
def __init__(self, persistent_depth: int, volatile_depth: int) -> None:
|
|
45
|
-
self._persistent = deque(maxlen=persistent_depth)
|
|
46
|
-
self._volatile = deque(maxlen=volatile_depth)
|
|
47
|
-
self._lock = UniversalLock()
|
|
48
|
-
|
|
49
|
-
async def add(self, el: T) -> None:
|
|
50
|
-
async with self._lock:
|
|
51
|
-
self._persistent.append(el)
|
|
52
|
-
self._volatile.append(el)
|
|
53
|
-
|
|
54
|
-
def get_persistent(self) -> list[T]:
|
|
55
|
-
with self._lock:
|
|
56
|
-
return list(self._persistent)
|
|
57
|
-
|
|
58
|
-
def get_volatile(self) -> set[T]:
|
|
59
|
-
with self._lock:
|
|
60
|
-
old = self._volatile
|
|
61
|
-
self._volatile = deque(maxlen=self._volatile.maxlen)
|
|
62
|
-
return set(old)
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
@dataclass(eq=False)
|
|
66
|
-
class Result:
|
|
67
|
-
c: Client
|
|
68
|
-
tsk: task.Base
|
|
69
|
-
res: result.Result
|
|
70
|
-
time: float
|
|
71
|
-
|
|
72
|
-
def __hash__(self) -> int:
|
|
73
|
-
return hash(self.c)
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
@dataclass(eq=False)
|
|
77
|
-
class Session[T: result.Result]:
|
|
78
|
-
c: Client
|
|
79
|
-
tsk: task.Base[T]
|
|
80
|
-
acquire_timeout: float = 10.0
|
|
81
|
-
complete: bool = field(init=False, default=False)
|
|
82
|
-
res: T | result.Error = field(init=False, default=result.OK)
|
|
83
|
-
|
|
84
|
-
async def run(self) -> None:
|
|
85
|
-
try:
|
|
86
|
-
await asyncio.wait_for(self.c.lock.acquire(), timeout=self.acquire_timeout)
|
|
87
|
-
except TimeoutError as e:
|
|
88
|
-
self.res = result.Error.from_e(e, "Client is buzy")
|
|
89
|
-
await self.__complete()
|
|
90
|
-
return
|
|
91
|
-
try:
|
|
92
|
-
self.c.log(logL.INFO, "Acquire")
|
|
93
|
-
self.res = await self.tsk.run(self.c)
|
|
94
|
-
await asyncio.sleep(0) # switch to other session ?? need
|
|
95
|
-
except asyncio.CancelledError:
|
|
96
|
-
self.res = result.Error.from_e(exc.Abort("Task cancelled"), "in session run")
|
|
97
|
-
self.c.level = OSI.NONE
|
|
98
|
-
# todo: make c.media.close()
|
|
99
|
-
return
|
|
100
|
-
finally:
|
|
101
|
-
await self.__complete()
|
|
102
|
-
self.c.lock.release()
|
|
103
|
-
# try media close
|
|
104
|
-
try:
|
|
105
|
-
await asyncio.wait_for(self.c.lock.acquire(), timeout=.1) # keep anywhere
|
|
106
|
-
except TimeoutError:
|
|
107
|
-
self.c.log(logL.INFO, "opened media use in other session")
|
|
108
|
-
return
|
|
109
|
-
try:
|
|
110
|
-
if self.c.media.is_open():
|
|
111
|
-
await asyncio.wait_for(self.c.media.close(), timeout=5) # keep anywhere
|
|
112
|
-
self.c.log(logL.DEB, f"closed communication channel: {self.c.media}")
|
|
113
|
-
else:
|
|
114
|
-
self.c.log(logL.WARN, F"communication channel: {self.c.media} already closed")
|
|
115
|
-
self.c.level = OSI.NONE
|
|
116
|
-
except asyncio.TimeoutError:
|
|
117
|
-
self.c.log(logL.ERR, "failed to close the channel in 5 seconds")
|
|
118
|
-
except asyncio.CancelledError: # todo: make better, need close anyway
|
|
119
|
-
self.res = result.Error.from_e(exc.Abort("Task cancelled"), "in closed channel")
|
|
120
|
-
finally:
|
|
121
|
-
self.c.lock.release()
|
|
122
|
-
|
|
123
|
-
async def __complete(self) -> None:
|
|
124
|
-
self.complete = True
|
|
125
|
-
if result_storage is not None:
|
|
126
|
-
await result_storage.add(Result(
|
|
127
|
-
c=self.c,
|
|
128
|
-
tsk=self.tsk,
|
|
129
|
-
res=self.res,
|
|
130
|
-
time=time(),
|
|
131
|
-
))
|
|
132
|
-
|
|
133
|
-
def __hash__(self) -> int:
|
|
134
|
-
return hash(self.c)
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
@dataclass(frozen=True)
|
|
138
|
-
class DistributedTask:
|
|
139
|
-
"""The task for distributed execution on several customers."""
|
|
140
|
-
tsk: task.Base
|
|
141
|
-
clients: Collection[Client]
|
|
142
|
-
|
|
143
|
-
def __str__(self) -> str:
|
|
144
|
-
return f"{self.tsk.msg}[{len(self.clients)}])"
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
if settings.session.result_storage.persistent_depth > 0:
|
|
148
|
-
|
|
149
|
-
class ResultStorage(DualStorage[Result]):
|
|
150
|
-
def client2res(self, c: Client) -> list[Result]:
|
|
151
|
-
with self._lock:
|
|
152
|
-
tmp = list(self._persistent)
|
|
153
|
-
return [res for res in tmp if res.c == c]
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
result_storage: ResultStorage = ResultStorage(
|
|
157
|
-
persistent_depth=settings.session.result_storage.persistent_depth,
|
|
158
|
-
volatile_depth=settings.session.result_storage.volatile_depth
|
|
159
|
-
)
|
|
160
|
-
"""exchange results archive"""
|
|
161
|
-
|
|
162
|
-
else:
|
|
163
|
-
result_storage: None = None
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
class Work:
|
|
167
|
-
name: str
|
|
168
|
-
__non_complete: set[Session]
|
|
169
|
-
__complete: set[Session]
|
|
170
|
-
time: float
|
|
171
|
-
__active_tasks: set[asyncio.Task]
|
|
172
|
-
__is_canceled: bool
|
|
173
|
-
|
|
174
|
-
def __init__(self, *sessions: Session, name: str) -> None:
|
|
175
|
-
self.name = name
|
|
176
|
-
self.__non_complete = set(sessions)
|
|
177
|
-
self.__complete = set()
|
|
178
|
-
self.time = time()
|
|
179
|
-
self.__active_tasks = set()
|
|
180
|
-
"""used for canceling the Work"""
|
|
181
|
-
self.__is_canceled = False
|
|
182
|
-
"""cancelation flag"""
|
|
183
|
-
|
|
184
|
-
def __str__(self) -> str:
|
|
185
|
-
return f"Worker[{len(self.__non_complete)}/{len(self.all)}]: {"complete" if self.is_complete() else "in work"}[{len(self.ok_results)}/{len(self.__complete)}]"
|
|
186
|
-
|
|
187
|
-
@classmethod
|
|
188
|
-
def from_distributed_task(cls, *dis_tasks: DistributedTask, name: str) -> Self:
|
|
189
|
-
sessions: list[Session] = list()
|
|
190
|
-
client_tasks: dict[Client, list[task.Base]] = defaultdict(list)
|
|
191
|
-
for dis_tsk in dis_tasks:
|
|
192
|
-
for client in dis_tsk.clients:
|
|
193
|
-
client_tasks[client].append(dis_tsk.tsk.copy())
|
|
194
|
-
for client, tasks in client_tasks.items():
|
|
195
|
-
if len(tasks) == 1:
|
|
196
|
-
sessions.append(Session(client, tsk=tasks[0]))
|
|
197
|
-
else:
|
|
198
|
-
sessions.append(Session(client, tsk=task.Sequence(*tasks, msg="from distributed")))
|
|
199
|
-
return cls(*sessions, name=name)
|
|
200
|
-
|
|
201
|
-
@cached_property
|
|
202
|
-
def all(self) -> set[Session[result.Result]]:
|
|
203
|
-
return self.__non_complete | self.__complete
|
|
204
|
-
|
|
205
|
-
def __iter__(self) -> Iterator[Session[result.Result]]:
|
|
206
|
-
for sess in self.__non_complete:
|
|
207
|
-
yield sess
|
|
208
|
-
|
|
209
|
-
def __getitem__(self, item) -> Session[result.Result]:
|
|
210
|
-
return tuple(self.all)[item]
|
|
211
|
-
|
|
212
|
-
@cached_property
|
|
213
|
-
def clients(self) -> set[Client]:
|
|
214
|
-
return {sess.c for sess in self.all}
|
|
215
|
-
|
|
216
|
-
@property
|
|
217
|
-
def ok_results(self) -> tuple[Session[result.Result], ...]:
|
|
218
|
-
"""without errors exchange clients"""
|
|
219
|
-
return tuple(sess for sess in self.__complete if sess.res.is_ok())
|
|
220
|
-
|
|
221
|
-
@property
|
|
222
|
-
def nok_results(self) -> tuple[Session[result.ErrorPropagator], ...]:
|
|
223
|
-
"""Sessions with errors (excluding incomplete and canceled)"""
|
|
224
|
-
return tuple(sess for sess in self.__complete if not sess.res.is_ok())
|
|
225
|
-
|
|
226
|
-
@property
|
|
227
|
-
def active_err(self) -> tuple[Session[result.Result], ...]:
|
|
228
|
-
"""Sessions with errors"""
|
|
229
|
-
return tuple(sess for sess in self.all if not sess.res.is_ok())
|
|
230
|
-
|
|
231
|
-
@property
|
|
232
|
-
def in_progress(self) -> tuple[Session[result.Result], ...]:
|
|
233
|
-
"""Sessions that are still performed (current condition)"""
|
|
234
|
-
return tuple(sess for sess in self.__non_complete if not sess.complete)
|
|
235
|
-
|
|
236
|
-
def pop(self) -> set[Session[result.Result]]:
|
|
237
|
-
"""get and move complete session"""
|
|
238
|
-
to_move = {sess for sess in self.__non_complete if sess.complete}
|
|
239
|
-
self.__complete |= to_move
|
|
240
|
-
self.__non_complete -= to_move
|
|
241
|
-
return to_move
|
|
242
|
-
|
|
243
|
-
def is_complete(self) -> bool:
|
|
244
|
-
"""check all complete sessions. call <pop> before"""
|
|
245
|
-
return (
|
|
246
|
-
len(self.__non_complete) == 0
|
|
247
|
-
or self.__is_canceled
|
|
248
|
-
)
|
|
249
|
-
|
|
250
|
-
async def cancel(self) -> None:
|
|
251
|
-
self.__is_canceled = True
|
|
252
|
-
tasks_to_cancel = list(self.__active_tasks)
|
|
253
|
-
self.__active_tasks.clear()
|
|
254
|
-
for task in tasks_to_cancel:
|
|
255
|
-
task.cancel()
|
|
256
|
-
try:
|
|
257
|
-
await task
|
|
258
|
-
except (asyncio.CancelledError, Exception):
|
|
259
|
-
pass
|
|
260
|
-
|
|
261
|
-
@property
|
|
262
|
-
def is_canceled(self) -> bool:
|
|
263
|
-
"""Checks whether the work was canceled"""
|
|
264
|
-
return self.__is_canceled
|
|
265
|
-
|
|
266
|
-
def add_active_task(self, task: asyncio.Task) -> None:
|
|
267
|
-
self.__active_tasks.add(task)
|
|
268
|
-
task.add_done_callback(lambda t: self.__active_tasks.discard(t))
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
work_storage: Optional[DualStorage[Work]] = None
|
|
272
|
-
if settings.session.work_storage.persistent_depth > 0:
|
|
273
|
-
work_storage = DualStorage[Work](
|
|
274
|
-
persistent_depth=settings.session.work_storage.persistent_depth,
|
|
275
|
-
volatile_depth=settings.session.work_storage.volatile_depth
|
|
276
|
-
)
|
|
277
|
-
"""exchange archive of Works"""
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
@dataclass
|
|
281
|
-
class Cancel:
|
|
282
|
-
work: Work
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
@dataclass
|
|
286
|
-
class Worker:
|
|
287
|
-
time_checking: float = 1.0
|
|
288
|
-
__t: Optional[threading.Thread] = field(init=False, default=None)
|
|
289
|
-
__stop: threading.Event = field(init=False, default_factory=threading.Event)
|
|
290
|
-
__works: queue.Queue[Work | Cancel] = field(init=False, default_factory=queue.Queue)
|
|
291
|
-
__has_work: asyncio.Event = field(init=False, default_factory=asyncio.Event)
|
|
292
|
-
|
|
293
|
-
def start(self, abort_timeout: int = 5) -> None:
|
|
294
|
-
if self.__t is not None and self.__t.is_alive():
|
|
295
|
-
raise RuntimeError("Thread is already running")
|
|
296
|
-
self.__t = threading.Thread(
|
|
297
|
-
target=self._run_async_loop,
|
|
298
|
-
args=(abort_timeout,),
|
|
299
|
-
daemon=True
|
|
300
|
-
)
|
|
301
|
-
self.__t.start()
|
|
302
|
-
|
|
303
|
-
def cancel(self, work: Work) -> Cancel:
|
|
304
|
-
self.__works.put(cancel := Cancel(work))
|
|
305
|
-
self.__has_work.set()
|
|
306
|
-
return cancel
|
|
307
|
-
|
|
308
|
-
def add_task(self, *dis_task: DistributedTask, name: str = "no_name") -> Work:
|
|
309
|
-
self.__works.put(worker := Work.from_distributed_task(*dis_task, name=name))
|
|
310
|
-
self.__has_work.set()
|
|
311
|
-
return worker
|
|
312
|
-
|
|
313
|
-
def add_sessions(self, *sess: Session[result.Result], name: str = "no_name") -> Work:
|
|
314
|
-
self.__works.put(worker := Work(*sess, name=name))
|
|
315
|
-
self.__has_work.set()
|
|
316
|
-
return worker
|
|
317
|
-
|
|
318
|
-
def stop(self) -> None:
|
|
319
|
-
self.__stop.set()
|
|
320
|
-
self.__has_work.set()
|
|
321
|
-
|
|
322
|
-
def join(self, timeout: Optional[float] = None) -> None:
|
|
323
|
-
if self.__t is not None:
|
|
324
|
-
self.__t.join(timeout)
|
|
325
|
-
|
|
326
|
-
def _run_async_loop(self, abort_timeout: int) -> None:
|
|
327
|
-
try:
|
|
328
|
-
asyncio.run(self._coro_loop(abort_timeout))
|
|
329
|
-
except Exception as e:
|
|
330
|
-
print(f"Transaction thread error: {e}")
|
|
331
|
-
|
|
332
|
-
async def _coro_loop(self, abort_timeout: int) -> None:
|
|
333
|
-
async with asyncio.TaskGroup() as tg:
|
|
334
|
-
tg.create_task(self._monitor(tg), name="main_monitor")
|
|
335
|
-
|
|
336
|
-
async def _monitor(self, tg: asyncio.TaskGroup) -> None:
|
|
337
|
-
while not self.__stop.is_set():
|
|
338
|
-
try:
|
|
339
|
-
await asyncio.wait_for(self.__has_work.wait(), timeout=1.0)
|
|
340
|
-
while not self.__stop.is_set():
|
|
341
|
-
try:
|
|
342
|
-
work = self.__works.get_nowait()
|
|
343
|
-
if isinstance(work, Cancel):
|
|
344
|
-
await work.work.cancel()
|
|
345
|
-
else:
|
|
346
|
-
for sess in work:
|
|
347
|
-
work.add_active_task(tg.create_task(sess.run()))
|
|
348
|
-
self.__works.task_done()
|
|
349
|
-
if work_storage is not None:
|
|
350
|
-
await work_storage.add(work)
|
|
351
|
-
except queue.Empty:
|
|
352
|
-
self.__has_work.clear()
|
|
353
|
-
break
|
|
354
|
-
await asyncio.sleep(0)
|
|
355
|
-
except asyncio.TimeoutError:
|
|
356
|
-
continue
|
|
357
|
-
if self.__stop.is_set():
|
|
358
|
-
raise asyncio.CancelledError("Stop requested")
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
worker: Optional[Worker]
|
|
362
|
-
if settings.session.worker.run:
|
|
363
|
-
worker = Worker(settings.session.worker.time_checking)
|
|
364
|
-
else:
|
|
365
|
-
worker = None
|
|
1
|
+
from collections import deque
|
|
2
|
+
from time import time
|
|
3
|
+
import queue
|
|
4
|
+
from collections import defaultdict
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from typing import Optional, Collection, Iterator, Self
|
|
7
|
+
import threading
|
|
8
|
+
from functools import cached_property
|
|
9
|
+
import asyncio
|
|
10
|
+
from StructResult import result
|
|
11
|
+
from DLMSCommunicationProfile.osi import OSI
|
|
12
|
+
from DLMS_SPODES import exceptions as exc
|
|
13
|
+
from .logger import LogLevel as logL
|
|
14
|
+
from .client import Client
|
|
15
|
+
from . import task
|
|
16
|
+
from .settings import settings
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class UniversalLock:
|
|
20
|
+
def __init__(self) -> None:
|
|
21
|
+
self._thread_lock = threading.Lock()
|
|
22
|
+
self._async_lock = asyncio.Lock()
|
|
23
|
+
|
|
24
|
+
def __enter__(self) -> "UniversalLock":
|
|
25
|
+
self._thread_lock.acquire()
|
|
26
|
+
return self
|
|
27
|
+
|
|
28
|
+
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
|
|
29
|
+
self._thread_lock.release()
|
|
30
|
+
|
|
31
|
+
async def __aenter__(self) -> Self:
|
|
32
|
+
await self._async_lock.acquire()
|
|
33
|
+
return self
|
|
34
|
+
|
|
35
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
|
|
36
|
+
self._async_lock.release()
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class DualStorage[T]:
|
|
40
|
+
_persistent: deque[T]
|
|
41
|
+
_volatile: deque[T]
|
|
42
|
+
_lock: UniversalLock
|
|
43
|
+
|
|
44
|
+
def __init__(self, persistent_depth: int, volatile_depth: int) -> None:
|
|
45
|
+
self._persistent = deque(maxlen=persistent_depth)
|
|
46
|
+
self._volatile = deque(maxlen=volatile_depth)
|
|
47
|
+
self._lock = UniversalLock()
|
|
48
|
+
|
|
49
|
+
async def add(self, el: T) -> None:
|
|
50
|
+
async with self._lock:
|
|
51
|
+
self._persistent.append(el)
|
|
52
|
+
self._volatile.append(el)
|
|
53
|
+
|
|
54
|
+
def get_persistent(self) -> list[T]:
|
|
55
|
+
with self._lock:
|
|
56
|
+
return list(self._persistent)
|
|
57
|
+
|
|
58
|
+
def get_volatile(self) -> set[T]:
|
|
59
|
+
with self._lock:
|
|
60
|
+
old = self._volatile
|
|
61
|
+
self._volatile = deque(maxlen=self._volatile.maxlen)
|
|
62
|
+
return set(old)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
@dataclass(eq=False)
|
|
66
|
+
class Result:
|
|
67
|
+
c: Client
|
|
68
|
+
tsk: task.Base
|
|
69
|
+
res: result.Result
|
|
70
|
+
time: float
|
|
71
|
+
|
|
72
|
+
def __hash__(self) -> int:
|
|
73
|
+
return hash(self.c)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass(eq=False)
|
|
77
|
+
class Session[T: result.Result]:
|
|
78
|
+
c: Client
|
|
79
|
+
tsk: task.Base[T]
|
|
80
|
+
acquire_timeout: float = 10.0
|
|
81
|
+
complete: bool = field(init=False, default=False)
|
|
82
|
+
res: T | result.Error = field(init=False, default=result.OK)
|
|
83
|
+
|
|
84
|
+
async def run(self) -> None:
|
|
85
|
+
try:
|
|
86
|
+
await asyncio.wait_for(self.c.lock.acquire(), timeout=self.acquire_timeout)
|
|
87
|
+
except TimeoutError as e:
|
|
88
|
+
self.res = result.Error.from_e(e, "Client is buzy")
|
|
89
|
+
await self.__complete()
|
|
90
|
+
return
|
|
91
|
+
try:
|
|
92
|
+
self.c.log(logL.INFO, "Acquire")
|
|
93
|
+
self.res = await self.tsk.run(self.c)
|
|
94
|
+
await asyncio.sleep(0) # switch to other session ?? need
|
|
95
|
+
except asyncio.CancelledError:
|
|
96
|
+
self.res = result.Error.from_e(exc.Abort("Task cancelled"), "in session run")
|
|
97
|
+
self.c.level = OSI.NONE
|
|
98
|
+
# todo: make c.media.close()
|
|
99
|
+
return
|
|
100
|
+
finally:
|
|
101
|
+
await self.__complete()
|
|
102
|
+
self.c.lock.release()
|
|
103
|
+
# try media close
|
|
104
|
+
try:
|
|
105
|
+
await asyncio.wait_for(self.c.lock.acquire(), timeout=.1) # keep anywhere
|
|
106
|
+
except TimeoutError:
|
|
107
|
+
self.c.log(logL.INFO, "opened media use in other session")
|
|
108
|
+
return
|
|
109
|
+
try:
|
|
110
|
+
if self.c.media.is_open():
|
|
111
|
+
await asyncio.wait_for(self.c.media.close(), timeout=5) # keep anywhere
|
|
112
|
+
self.c.log(logL.DEB, f"closed communication channel: {self.c.media}")
|
|
113
|
+
else:
|
|
114
|
+
self.c.log(logL.WARN, F"communication channel: {self.c.media} already closed")
|
|
115
|
+
self.c.level = OSI.NONE
|
|
116
|
+
except asyncio.TimeoutError:
|
|
117
|
+
self.c.log(logL.ERR, "failed to close the channel in 5 seconds")
|
|
118
|
+
except asyncio.CancelledError: # todo: make better, need close anyway
|
|
119
|
+
self.res = result.Error.from_e(exc.Abort("Task cancelled"), "in closed channel")
|
|
120
|
+
finally:
|
|
121
|
+
self.c.lock.release()
|
|
122
|
+
|
|
123
|
+
async def __complete(self) -> None:
|
|
124
|
+
self.complete = True
|
|
125
|
+
if result_storage is not None:
|
|
126
|
+
await result_storage.add(Result(
|
|
127
|
+
c=self.c,
|
|
128
|
+
tsk=self.tsk,
|
|
129
|
+
res=self.res,
|
|
130
|
+
time=time(),
|
|
131
|
+
))
|
|
132
|
+
|
|
133
|
+
def __hash__(self) -> int:
|
|
134
|
+
return hash(self.c)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
@dataclass(frozen=True)
|
|
138
|
+
class DistributedTask:
|
|
139
|
+
"""The task for distributed execution on several customers."""
|
|
140
|
+
tsk: task.Base
|
|
141
|
+
clients: Collection[Client]
|
|
142
|
+
|
|
143
|
+
def __str__(self) -> str:
|
|
144
|
+
return f"{self.tsk.msg}[{len(self.clients)}])"
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
if settings.session.result_storage.persistent_depth > 0:
|
|
148
|
+
|
|
149
|
+
class ResultStorage(DualStorage[Result]):
|
|
150
|
+
def client2res(self, c: Client) -> list[Result]:
|
|
151
|
+
with self._lock:
|
|
152
|
+
tmp = list(self._persistent)
|
|
153
|
+
return [res for res in tmp if res.c == c]
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
result_storage: ResultStorage = ResultStorage(
|
|
157
|
+
persistent_depth=settings.session.result_storage.persistent_depth,
|
|
158
|
+
volatile_depth=settings.session.result_storage.volatile_depth
|
|
159
|
+
)
|
|
160
|
+
"""exchange results archive"""
|
|
161
|
+
|
|
162
|
+
else:
|
|
163
|
+
result_storage: None = None
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
class Work:
|
|
167
|
+
name: str
|
|
168
|
+
__non_complete: set[Session]
|
|
169
|
+
__complete: set[Session]
|
|
170
|
+
time: float
|
|
171
|
+
__active_tasks: set[asyncio.Task]
|
|
172
|
+
__is_canceled: bool
|
|
173
|
+
|
|
174
|
+
def __init__(self, *sessions: Session, name: str) -> None:
|
|
175
|
+
self.name = name
|
|
176
|
+
self.__non_complete = set(sessions)
|
|
177
|
+
self.__complete = set()
|
|
178
|
+
self.time = time()
|
|
179
|
+
self.__active_tasks = set()
|
|
180
|
+
"""used for canceling the Work"""
|
|
181
|
+
self.__is_canceled = False
|
|
182
|
+
"""cancelation flag"""
|
|
183
|
+
|
|
184
|
+
def __str__(self) -> str:
|
|
185
|
+
return f"Worker[{len(self.__non_complete)}/{len(self.all)}]: {"complete" if self.is_complete() else "in work"}[{len(self.ok_results)}/{len(self.__complete)}]"
|
|
186
|
+
|
|
187
|
+
@classmethod
|
|
188
|
+
def from_distributed_task(cls, *dis_tasks: DistributedTask, name: str) -> Self:
|
|
189
|
+
sessions: list[Session] = list()
|
|
190
|
+
client_tasks: dict[Client, list[task.Base]] = defaultdict(list)
|
|
191
|
+
for dis_tsk in dis_tasks:
|
|
192
|
+
for client in dis_tsk.clients:
|
|
193
|
+
client_tasks[client].append(dis_tsk.tsk.copy())
|
|
194
|
+
for client, tasks in client_tasks.items():
|
|
195
|
+
if len(tasks) == 1:
|
|
196
|
+
sessions.append(Session(client, tsk=tasks[0]))
|
|
197
|
+
else:
|
|
198
|
+
sessions.append(Session(client, tsk=task.Sequence(*tasks, msg="from distributed")))
|
|
199
|
+
return cls(*sessions, name=name)
|
|
200
|
+
|
|
201
|
+
@cached_property
|
|
202
|
+
def all(self) -> set[Session[result.Result]]:
|
|
203
|
+
return self.__non_complete | self.__complete
|
|
204
|
+
|
|
205
|
+
def __iter__(self) -> Iterator[Session[result.Result]]:
|
|
206
|
+
for sess in self.__non_complete:
|
|
207
|
+
yield sess
|
|
208
|
+
|
|
209
|
+
def __getitem__(self, item) -> Session[result.Result]:
|
|
210
|
+
return tuple(self.all)[item]
|
|
211
|
+
|
|
212
|
+
@cached_property
|
|
213
|
+
def clients(self) -> set[Client]:
|
|
214
|
+
return {sess.c for sess in self.all}
|
|
215
|
+
|
|
216
|
+
@property
|
|
217
|
+
def ok_results(self) -> tuple[Session[result.Result], ...]:
|
|
218
|
+
"""without errors exchange clients"""
|
|
219
|
+
return tuple(sess for sess in self.__complete if sess.res.is_ok())
|
|
220
|
+
|
|
221
|
+
@property
|
|
222
|
+
def nok_results(self) -> tuple[Session[result.ErrorPropagator], ...]:
|
|
223
|
+
"""Sessions with errors (excluding incomplete and canceled)"""
|
|
224
|
+
return tuple(sess for sess in self.__complete if not sess.res.is_ok())
|
|
225
|
+
|
|
226
|
+
@property
|
|
227
|
+
def active_err(self) -> tuple[Session[result.Result], ...]:
|
|
228
|
+
"""Sessions with errors"""
|
|
229
|
+
return tuple(sess for sess in self.all if not sess.res.is_ok())
|
|
230
|
+
|
|
231
|
+
@property
|
|
232
|
+
def in_progress(self) -> tuple[Session[result.Result], ...]:
|
|
233
|
+
"""Sessions that are still performed (current condition)"""
|
|
234
|
+
return tuple(sess for sess in self.__non_complete if not sess.complete)
|
|
235
|
+
|
|
236
|
+
def pop(self) -> set[Session[result.Result]]:
|
|
237
|
+
"""get and move complete session"""
|
|
238
|
+
to_move = {sess for sess in self.__non_complete if sess.complete}
|
|
239
|
+
self.__complete |= to_move
|
|
240
|
+
self.__non_complete -= to_move
|
|
241
|
+
return to_move
|
|
242
|
+
|
|
243
|
+
def is_complete(self) -> bool:
|
|
244
|
+
"""check all complete sessions. call <pop> before"""
|
|
245
|
+
return (
|
|
246
|
+
len(self.__non_complete) == 0
|
|
247
|
+
or self.__is_canceled
|
|
248
|
+
)
|
|
249
|
+
|
|
250
|
+
async def cancel(self) -> None:
|
|
251
|
+
self.__is_canceled = True
|
|
252
|
+
tasks_to_cancel = list(self.__active_tasks)
|
|
253
|
+
self.__active_tasks.clear()
|
|
254
|
+
for task in tasks_to_cancel:
|
|
255
|
+
task.cancel()
|
|
256
|
+
try:
|
|
257
|
+
await task
|
|
258
|
+
except (asyncio.CancelledError, Exception):
|
|
259
|
+
pass
|
|
260
|
+
|
|
261
|
+
@property
|
|
262
|
+
def is_canceled(self) -> bool:
|
|
263
|
+
"""Checks whether the work was canceled"""
|
|
264
|
+
return self.__is_canceled
|
|
265
|
+
|
|
266
|
+
def add_active_task(self, task: asyncio.Task) -> None:
|
|
267
|
+
self.__active_tasks.add(task)
|
|
268
|
+
task.add_done_callback(lambda t: self.__active_tasks.discard(t))
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
work_storage: Optional[DualStorage[Work]] = None
|
|
272
|
+
if settings.session.work_storage.persistent_depth > 0:
|
|
273
|
+
work_storage = DualStorage[Work](
|
|
274
|
+
persistent_depth=settings.session.work_storage.persistent_depth,
|
|
275
|
+
volatile_depth=settings.session.work_storage.volatile_depth
|
|
276
|
+
)
|
|
277
|
+
"""exchange archive of Works"""
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
@dataclass
|
|
281
|
+
class Cancel:
|
|
282
|
+
work: Work
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
@dataclass
|
|
286
|
+
class Worker:
|
|
287
|
+
time_checking: float = 1.0
|
|
288
|
+
__t: Optional[threading.Thread] = field(init=False, default=None)
|
|
289
|
+
__stop: threading.Event = field(init=False, default_factory=threading.Event)
|
|
290
|
+
__works: queue.Queue[Work | Cancel] = field(init=False, default_factory=queue.Queue)
|
|
291
|
+
__has_work: asyncio.Event = field(init=False, default_factory=asyncio.Event)
|
|
292
|
+
|
|
293
|
+
def start(self, abort_timeout: int = 5) -> None:
|
|
294
|
+
if self.__t is not None and self.__t.is_alive():
|
|
295
|
+
raise RuntimeError("Thread is already running")
|
|
296
|
+
self.__t = threading.Thread(
|
|
297
|
+
target=self._run_async_loop,
|
|
298
|
+
args=(abort_timeout,),
|
|
299
|
+
daemon=True
|
|
300
|
+
)
|
|
301
|
+
self.__t.start()
|
|
302
|
+
|
|
303
|
+
def cancel(self, work: Work) -> Cancel:
|
|
304
|
+
self.__works.put(cancel := Cancel(work))
|
|
305
|
+
self.__has_work.set()
|
|
306
|
+
return cancel
|
|
307
|
+
|
|
308
|
+
def add_task(self, *dis_task: DistributedTask, name: str = "no_name") -> Work:
|
|
309
|
+
self.__works.put(worker := Work.from_distributed_task(*dis_task, name=name))
|
|
310
|
+
self.__has_work.set()
|
|
311
|
+
return worker
|
|
312
|
+
|
|
313
|
+
def add_sessions(self, *sess: Session[result.Result], name: str = "no_name") -> Work:
|
|
314
|
+
self.__works.put(worker := Work(*sess, name=name))
|
|
315
|
+
self.__has_work.set()
|
|
316
|
+
return worker
|
|
317
|
+
|
|
318
|
+
def stop(self) -> None:
|
|
319
|
+
self.__stop.set()
|
|
320
|
+
self.__has_work.set()
|
|
321
|
+
|
|
322
|
+
def join(self, timeout: Optional[float] = None) -> None:
|
|
323
|
+
if self.__t is not None:
|
|
324
|
+
self.__t.join(timeout)
|
|
325
|
+
|
|
326
|
+
def _run_async_loop(self, abort_timeout: int) -> None:
|
|
327
|
+
try:
|
|
328
|
+
asyncio.run(self._coro_loop(abort_timeout))
|
|
329
|
+
except Exception as e:
|
|
330
|
+
print(f"Transaction thread error: {e}")
|
|
331
|
+
|
|
332
|
+
async def _coro_loop(self, abort_timeout: int) -> None:
|
|
333
|
+
async with asyncio.TaskGroup() as tg:
|
|
334
|
+
tg.create_task(self._monitor(tg), name="main_monitor")
|
|
335
|
+
|
|
336
|
+
async def _monitor(self, tg: asyncio.TaskGroup) -> None:
|
|
337
|
+
while not self.__stop.is_set():
|
|
338
|
+
try:
|
|
339
|
+
await asyncio.wait_for(self.__has_work.wait(), timeout=1.0)
|
|
340
|
+
while not self.__stop.is_set():
|
|
341
|
+
try:
|
|
342
|
+
work = self.__works.get_nowait()
|
|
343
|
+
if isinstance(work, Cancel):
|
|
344
|
+
await work.work.cancel()
|
|
345
|
+
else:
|
|
346
|
+
for sess in work:
|
|
347
|
+
work.add_active_task(tg.create_task(sess.run()))
|
|
348
|
+
self.__works.task_done()
|
|
349
|
+
if work_storage is not None:
|
|
350
|
+
await work_storage.add(work)
|
|
351
|
+
except queue.Empty:
|
|
352
|
+
self.__has_work.clear()
|
|
353
|
+
break
|
|
354
|
+
await asyncio.sleep(0)
|
|
355
|
+
except asyncio.TimeoutError:
|
|
356
|
+
continue
|
|
357
|
+
if self.__stop.is_set():
|
|
358
|
+
raise asyncio.CancelledError("Stop requested")
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
worker: Optional[Worker]
|
|
362
|
+
if settings.session.worker.run:
|
|
363
|
+
worker = Worker(settings.session.worker.time_checking)
|
|
364
|
+
else:
|
|
365
|
+
worker = None
|