DLMS-SPODES-client 0.19.36__py3-none-any.whl → 0.19.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. DLMS_SPODES_client/FCS16.py +39 -39
  2. DLMS_SPODES_client/__init__.py +12 -12
  3. DLMS_SPODES_client/client.py +2093 -2093
  4. DLMS_SPODES_client/gurux_common/enums/TraceLevel.py +21 -21
  5. DLMS_SPODES_client/gurux_dlms/AesGcmParameter.py +37 -37
  6. DLMS_SPODES_client/gurux_dlms/CountType.py +16 -16
  7. DLMS_SPODES_client/gurux_dlms/GXByteBuffer.py +545 -545
  8. DLMS_SPODES_client/gurux_dlms/GXCiphering.py +196 -196
  9. DLMS_SPODES_client/gurux_dlms/GXDLMS.py +426 -426
  10. DLMS_SPODES_client/gurux_dlms/GXDLMSChippering.py +237 -237
  11. DLMS_SPODES_client/gurux_dlms/GXDLMSChipperingStream.py +977 -977
  12. DLMS_SPODES_client/gurux_dlms/GXDLMSConfirmedServiceError.py +90 -90
  13. DLMS_SPODES_client/gurux_dlms/GXDLMSException.py +139 -139
  14. DLMS_SPODES_client/gurux_dlms/GXDLMSLNParameters.py +33 -33
  15. DLMS_SPODES_client/gurux_dlms/GXDLMSSNParameters.py +21 -21
  16. DLMS_SPODES_client/gurux_dlms/GXDLMSSettings.py +254 -254
  17. DLMS_SPODES_client/gurux_dlms/GXReplyData.py +87 -87
  18. DLMS_SPODES_client/gurux_dlms/HdlcControlFrame.py +9 -9
  19. DLMS_SPODES_client/gurux_dlms/MBusCommand.py +8 -8
  20. DLMS_SPODES_client/gurux_dlms/MBusEncryptionMode.py +27 -27
  21. DLMS_SPODES_client/gurux_dlms/ResponseType.py +8 -8
  22. DLMS_SPODES_client/gurux_dlms/SetResponseType.py +29 -29
  23. DLMS_SPODES_client/gurux_dlms/_HDLCInfo.py +9 -9
  24. DLMS_SPODES_client/gurux_dlms/__init__.py +75 -75
  25. DLMS_SPODES_client/gurux_dlms/enums/Access.py +12 -12
  26. DLMS_SPODES_client/gurux_dlms/enums/ApplicationReference.py +14 -14
  27. DLMS_SPODES_client/gurux_dlms/enums/Authentication.py +41 -41
  28. DLMS_SPODES_client/gurux_dlms/enums/BerType.py +35 -35
  29. DLMS_SPODES_client/gurux_dlms/enums/Command.py +285 -285
  30. DLMS_SPODES_client/gurux_dlms/enums/Definition.py +9 -9
  31. DLMS_SPODES_client/gurux_dlms/enums/ErrorCode.py +46 -46
  32. DLMS_SPODES_client/gurux_dlms/enums/ExceptionServiceError.py +12 -12
  33. DLMS_SPODES_client/gurux_dlms/enums/HardwareResource.py +10 -10
  34. DLMS_SPODES_client/gurux_dlms/enums/HdlcFrameType.py +9 -9
  35. DLMS_SPODES_client/gurux_dlms/enums/Initiate.py +10 -10
  36. DLMS_SPODES_client/gurux_dlms/enums/LoadDataSet.py +13 -13
  37. DLMS_SPODES_client/gurux_dlms/enums/ObjectType.py +306 -306
  38. DLMS_SPODES_client/gurux_dlms/enums/Priority.py +7 -7
  39. DLMS_SPODES_client/gurux_dlms/enums/RequestTypes.py +9 -9
  40. DLMS_SPODES_client/gurux_dlms/enums/Security.py +14 -14
  41. DLMS_SPODES_client/gurux_dlms/enums/Service.py +16 -16
  42. DLMS_SPODES_client/gurux_dlms/enums/ServiceClass.py +9 -9
  43. DLMS_SPODES_client/gurux_dlms/enums/ServiceError.py +8 -8
  44. DLMS_SPODES_client/gurux_dlms/enums/Standard.py +18 -18
  45. DLMS_SPODES_client/gurux_dlms/enums/StateError.py +7 -7
  46. DLMS_SPODES_client/gurux_dlms/enums/Task.py +10 -10
  47. DLMS_SPODES_client/gurux_dlms/enums/VdeStateError.py +10 -10
  48. DLMS_SPODES_client/gurux_dlms/enums/__init__.py +33 -33
  49. DLMS_SPODES_client/gurux_dlms/internal/_GXCommon.py +1673 -1673
  50. DLMS_SPODES_client/logger.py +56 -56
  51. DLMS_SPODES_client/services.py +90 -90
  52. DLMS_SPODES_client/session.py +363 -363
  53. DLMS_SPODES_client/settings.py +48 -48
  54. DLMS_SPODES_client/task.py +1884 -1884
  55. {dlms_spodes_client-0.19.36.dist-info → dlms_spodes_client-0.19.37.dist-info}/METADATA +29 -29
  56. dlms_spodes_client-0.19.37.dist-info/RECORD +61 -0
  57. {dlms_spodes_client-0.19.36.dist-info → dlms_spodes_client-0.19.37.dist-info}/WHEEL +1 -1
  58. dlms_spodes_client-0.19.36.dist-info/RECORD +0 -61
  59. {dlms_spodes_client-0.19.36.dist-info → dlms_spodes_client-0.19.37.dist-info}/entry_points.txt +0 -0
  60. {dlms_spodes_client-0.19.36.dist-info → dlms_spodes_client-0.19.37.dist-info}/top_level.txt +0 -0
@@ -1,363 +1,363 @@
1
- from collections import deque
2
- from time import time
3
- import queue
4
- from collections import defaultdict
5
- from dataclasses import dataclass, field
6
- from typing import Optional, Collection, Iterator, Self
7
- import threading
8
- from functools import cached_property
9
- import asyncio
10
- from StructResult import result
11
- from DLMSCommunicationProfile.osi import OSI
12
- from DLMS_SPODES import exceptions as exc
13
- from .logger import LogLevel as logL
14
- from .client import Client
15
- from . import task
16
- from .settings import settings
17
-
18
-
19
- class UniversalLock:
20
- def __init__(self) -> None:
21
- self._thread_lock = threading.Lock()
22
- self._async_lock = asyncio.Lock()
23
-
24
- def __enter__(self) -> "UniversalLock":
25
- self._thread_lock.acquire()
26
- return self
27
-
28
- def __exit__(self, exc_type, exc_val, exc_tb) -> None:
29
- self._thread_lock.release()
30
-
31
- async def __aenter__(self) -> Self:
32
- await self._async_lock.acquire()
33
- return self
34
-
35
- async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
36
- self._async_lock.release()
37
-
38
-
39
- class DualStorage[T]:
40
- _persistent: deque[T]
41
- _volatile: deque[T]
42
- _lock: UniversalLock
43
-
44
- def __init__(self, persistent_depth: int, volatile_depth: int) -> None:
45
- self._persistent = deque(maxlen=persistent_depth)
46
- self._volatile = deque(maxlen=volatile_depth)
47
- self._lock = UniversalLock()
48
-
49
- async def add(self, el: T) -> None:
50
- async with self._lock:
51
- self._persistent.append(el)
52
- self._volatile.append(el)
53
-
54
- def get_persistent(self) -> list[T]:
55
- with self._lock:
56
- return list(self._persistent)
57
-
58
- def get_volatile(self) -> set[T]:
59
- with self._lock:
60
- old = self._volatile
61
- self._volatile = deque(maxlen=self._volatile.maxlen)
62
- return set(old)
63
-
64
-
65
- @dataclass(eq=False)
66
- class Result:
67
- c: Client
68
- tsk: task.Base
69
- res: result.Result
70
- time: float
71
-
72
- def __hash__(self) -> int:
73
- return hash(self.c)
74
-
75
-
76
- @dataclass(eq=False)
77
- class Session[T: result.Result]:
78
- c: Client
79
- tsk: task.Base[T]
80
- acquire_timeout: float = 10.0
81
- complete: bool = field(init=False, default=False)
82
- res: T | result.Error = field(init=False, default=result.OK)
83
-
84
- async def run(self) -> None:
85
- try:
86
- await asyncio.wait_for(self.c.lock.acquire(), timeout=self.acquire_timeout)
87
- except TimeoutError as e:
88
- self.res = result.Error.from_e(e, "Client is buzy")
89
- await self.__complete()
90
- return
91
- try:
92
- self.c.log(logL.INFO, "Acquire")
93
- self.res = await self.tsk.run(self.c)
94
- await asyncio.sleep(0) # switch to other session ?? need
95
- except asyncio.CancelledError:
96
- self.res = result.Error.from_e(exc.Abort("Task cancelled"), "in session run")
97
- self.c.level = OSI.NONE
98
- finally:
99
- await self.__complete()
100
- self.c.lock.release() # for other task
101
- # try media close
102
- try:
103
- await asyncio.wait_for(self.c.lock.acquire(), timeout=.1) # keep anywhere
104
- except TimeoutError:
105
- self.c.log(logL.INFO, "opened media use in other session")
106
- return
107
- try:
108
- if self.c.media.is_open():
109
- await asyncio.wait_for(self.c.media.close(), timeout=5) # keep anywhere
110
- self.c.log(logL.DEB, f"closed communication channel: {self.c.media}")
111
- else:
112
- self.c.log(logL.WARN, F"communication channel: {self.c.media} already closed")
113
- self.c.level = OSI.NONE
114
- except asyncio.TimeoutError:
115
- self.c.log(logL.ERR, "failed to close the channel in 5 seconds")
116
- except asyncio.CancelledError: # todo: make better, need close anyway
117
- self.res = result.Error.from_e(exc.Abort("Task cancelled"), "in closed channel")
118
- finally:
119
- self.c.lock.release()
120
-
121
- async def __complete(self) -> None:
122
- self.complete = True
123
- if result_storage is not None:
124
- await result_storage.add(Result(
125
- c=self.c,
126
- tsk=self.tsk,
127
- res=self.res,
128
- time=time(),
129
- ))
130
-
131
- def __hash__(self) -> int:
132
- return hash(self.c)
133
-
134
-
135
- @dataclass(frozen=True)
136
- class DistributedTask:
137
- """The task for distributed execution on several customers."""
138
- tsk: task.Base
139
- clients: Collection[Client]
140
-
141
- def __str__(self) -> str:
142
- return f"{self.tsk.msg}[{len(self.clients)}])"
143
-
144
-
145
- if settings.session.result_storage.persistent_depth > 0:
146
-
147
- class ResultStorage(DualStorage[Result]):
148
- def client2res(self, c: Client) -> list[Result]:
149
- with self._lock:
150
- tmp = list(self._persistent)
151
- return [res for res in tmp if res.c == c]
152
-
153
-
154
- result_storage: ResultStorage = ResultStorage(
155
- persistent_depth=settings.session.result_storage.persistent_depth,
156
- volatile_depth=settings.session.result_storage.volatile_depth
157
- )
158
- """exchange results archive"""
159
-
160
- else:
161
- result_storage: None = None
162
-
163
-
164
- class Work:
165
- name: str
166
- __non_complete: set[Session]
167
- __complete: set[Session]
168
- time: float
169
- __active_tasks: set[asyncio.Task]
170
- __is_canceled: bool
171
-
172
- def __init__(self, *sessions: Session, name: str) -> None:
173
- self.name = name
174
- self.__non_complete = set(sessions)
175
- self.__complete = set()
176
- self.time = time()
177
- self.__active_tasks = set()
178
- """used for canceling the Work"""
179
- self.__is_canceled = False
180
- """cancelation flag"""
181
-
182
- def __str__(self) -> str:
183
- return f"Worker[{len(self.__non_complete)}/{len(self.all)}]: {"complete" if self.is_complete() else "in work"}[{len(self.ok_results)}/{len(self.__complete)}]"
184
-
185
- @classmethod
186
- def from_distributed_task(cls, *dis_tasks: DistributedTask, name: str) -> Self:
187
- sessions: list[Session] = list()
188
- client_tasks: dict[Client, list[task.Base]] = defaultdict(list)
189
- for dis_tsk in dis_tasks:
190
- for client in dis_tsk.clients:
191
- client_tasks[client].append(dis_tsk.tsk.copy())
192
- for client, tasks in client_tasks.items():
193
- if len(tasks) == 1:
194
- sessions.append(Session(client, tsk=tasks[0]))
195
- else:
196
- sessions.append(Session(client, tsk=task.Sequence(*tasks, msg="from distributed")))
197
- return cls(*sessions, name=name)
198
-
199
- @cached_property
200
- def all(self) -> set[Session[result.Result]]:
201
- return self.__non_complete | self.__complete
202
-
203
- def __iter__(self) -> Iterator[Session[result.Result]]:
204
- for sess in self.__non_complete:
205
- yield sess
206
-
207
- def __getitem__(self, item) -> Session[result.Result]:
208
- return tuple(self.all)[item]
209
-
210
- @cached_property
211
- def clients(self) -> set[Client]:
212
- return {sess.c for sess in self.all}
213
-
214
- @property
215
- def ok_results(self) -> tuple[Session[result.Result], ...]:
216
- """without errors exchange clients"""
217
- return tuple(sess for sess in self.__complete if sess.res.is_ok())
218
-
219
- @property
220
- def nok_results(self) -> tuple[Session[result.ErrorPropagator], ...]:
221
- """Sessions with errors (excluding incomplete and canceled)"""
222
- return tuple(sess for sess in self.__complete if not sess.res.is_ok())
223
-
224
- @property
225
- def active_err(self) -> tuple[Session[result.Result], ...]:
226
- """Sessions with errors"""
227
- return tuple(sess for sess in self.all if not sess.res.is_ok())
228
-
229
- @property
230
- def in_progress(self) -> tuple[Session[result.Result], ...]:
231
- """Sessions that are still performed (current condition)"""
232
- return tuple(sess for sess in self.__non_complete if not sess.complete)
233
-
234
- def pop(self) -> set[Session[result.Result]]:
235
- """get and move complete session"""
236
- to_move = {sess for sess in self.__non_complete if sess.complete}
237
- self.__complete |= to_move
238
- self.__non_complete -= to_move
239
- return to_move
240
-
241
- def is_complete(self) -> bool:
242
- """check all complete sessions. call <pop> before"""
243
- return (
244
- len(self.__non_complete) == 0
245
- or self.__is_canceled
246
- )
247
-
248
- async def cancel(self) -> None:
249
- self.__is_canceled = True
250
- tasks_to_cancel = list(self.__active_tasks)
251
- self.__active_tasks.clear()
252
- for task in tasks_to_cancel:
253
- task.cancel()
254
- try:
255
- await task
256
- except (asyncio.CancelledError, Exception):
257
- pass
258
-
259
- @property
260
- def is_canceled(self) -> bool:
261
- """Checks whether the work was canceled"""
262
- return self.__is_canceled
263
-
264
- def add_active_task(self, task: asyncio.Task) -> None:
265
- self.__active_tasks.add(task)
266
- task.add_done_callback(lambda t: self.__active_tasks.discard(t))
267
-
268
-
269
- work_storage: Optional[DualStorage[Work]] = None
270
- if settings.session.work_storage.persistent_depth > 0:
271
- work_storage = DualStorage[Work](
272
- persistent_depth=settings.session.work_storage.persistent_depth,
273
- volatile_depth=settings.session.work_storage.volatile_depth
274
- )
275
- """exchange archive of Works"""
276
-
277
-
278
- @dataclass
279
- class Cancel:
280
- work: Work
281
-
282
-
283
- @dataclass
284
- class Worker:
285
- time_checking: float = 1.0
286
- __t: Optional[threading.Thread] = field(init=False, default=None)
287
- __stop: threading.Event = field(init=False, default_factory=threading.Event)
288
- __works: queue.Queue[Work | Cancel] = field(init=False, default_factory=queue.Queue)
289
- __has_work: asyncio.Event = field(init=False, default_factory=asyncio.Event)
290
-
291
- def start(self, abort_timeout: int = 5) -> None:
292
- if self.__t is not None and self.__t.is_alive():
293
- raise RuntimeError("Thread is already running")
294
- self.__t = threading.Thread(
295
- target=self._run_async_loop,
296
- args=(abort_timeout,),
297
- daemon=True
298
- )
299
- self.__t.start()
300
-
301
- def cancel(self, work: Work) -> Cancel:
302
- self.__works.put(cancel := Cancel(work))
303
- self.__has_work.set()
304
- return cancel
305
-
306
- def add_task(self, *dis_task: DistributedTask, name: str = "no_name") -> Work:
307
- self.__works.put(worker := Work.from_distributed_task(*dis_task, name=name))
308
- self.__has_work.set()
309
- return worker
310
-
311
- def add_sessions(self, *sess: Session[result.Result], name: str = "no_name") -> Work:
312
- self.__works.put(worker := Work(*sess, name=name))
313
- self.__has_work.set()
314
- return worker
315
-
316
- def stop(self) -> None:
317
- self.__stop.set()
318
- self.__has_work.set()
319
-
320
- def join(self, timeout: Optional[float] = None) -> None:
321
- if self.__t is not None:
322
- self.__t.join(timeout)
323
-
324
- def _run_async_loop(self, abort_timeout: int) -> None:
325
- try:
326
- asyncio.run(self._coro_loop(abort_timeout))
327
- except Exception as e:
328
- print(f"Transaction thread error: {e}")
329
-
330
- async def _coro_loop(self, abort_timeout: int) -> None:
331
- async with asyncio.TaskGroup() as tg:
332
- tg.create_task(self._monitor(tg), name="main_monitor")
333
-
334
- async def _monitor(self, tg: asyncio.TaskGroup) -> None:
335
- while not self.__stop.is_set():
336
- try:
337
- await asyncio.wait_for(self.__has_work.wait(), timeout=1.0)
338
- while not self.__stop.is_set():
339
- try:
340
- work = self.__works.get_nowait()
341
- if isinstance(work, Cancel):
342
- await work.work.cancel()
343
- else:
344
- for sess in work:
345
- work.add_active_task(tg.create_task(sess.run()))
346
- self.__works.task_done()
347
- if work_storage is not None:
348
- await work_storage.add(work)
349
- except queue.Empty:
350
- self.__has_work.clear()
351
- break
352
- await asyncio.sleep(0)
353
- except asyncio.TimeoutError:
354
- continue
355
- if self.__stop.is_set():
356
- raise asyncio.CancelledError("Stop requested")
357
-
358
-
359
- worker: Optional[Worker]
360
- if settings.session.worker.run:
361
- worker = Worker(settings.session.worker.time_checking)
362
- else:
363
- worker = None
1
+ from collections import deque
2
+ from time import time
3
+ import queue
4
+ from collections import defaultdict
5
+ from dataclasses import dataclass, field
6
+ from typing import Optional, Collection, Iterator, Self
7
+ import threading
8
+ from functools import cached_property
9
+ import asyncio
10
+ from StructResult import result
11
+ from DLMSCommunicationProfile.osi import OSI
12
+ from DLMS_SPODES import exceptions as exc
13
+ from .logger import LogLevel as logL
14
+ from .client import Client
15
+ from . import task
16
+ from .settings import settings
17
+
18
+
19
+ class UniversalLock:
20
+ def __init__(self) -> None:
21
+ self._thread_lock = threading.Lock()
22
+ self._async_lock = asyncio.Lock()
23
+
24
+ def __enter__(self) -> "UniversalLock":
25
+ self._thread_lock.acquire()
26
+ return self
27
+
28
+ def __exit__(self, exc_type, exc_val, exc_tb) -> None:
29
+ self._thread_lock.release()
30
+
31
+ async def __aenter__(self) -> Self:
32
+ await self._async_lock.acquire()
33
+ return self
34
+
35
+ async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
36
+ self._async_lock.release()
37
+
38
+
39
+ class DualStorage[T]:
40
+ _persistent: deque[T]
41
+ _volatile: deque[T]
42
+ _lock: UniversalLock
43
+
44
+ def __init__(self, persistent_depth: int, volatile_depth: int) -> None:
45
+ self._persistent = deque(maxlen=persistent_depth)
46
+ self._volatile = deque(maxlen=volatile_depth)
47
+ self._lock = UniversalLock()
48
+
49
+ async def add(self, el: T) -> None:
50
+ async with self._lock:
51
+ self._persistent.append(el)
52
+ self._volatile.append(el)
53
+
54
+ def get_persistent(self) -> list[T]:
55
+ with self._lock:
56
+ return list(self._persistent)
57
+
58
+ def get_volatile(self) -> set[T]:
59
+ with self._lock:
60
+ old = self._volatile
61
+ self._volatile = deque(maxlen=self._volatile.maxlen)
62
+ return set(old)
63
+
64
+
65
+ @dataclass(eq=False)
66
+ class Result:
67
+ c: Client
68
+ tsk: task.Base
69
+ res: result.Result
70
+ time: float
71
+
72
+ def __hash__(self) -> int:
73
+ return hash(self.c)
74
+
75
+
76
+ @dataclass(eq=False)
77
+ class Session[T: result.Result]:
78
+ c: Client
79
+ tsk: task.Base[T]
80
+ acquire_timeout: float = 10.0
81
+ complete: bool = field(init=False, default=False)
82
+ res: T | result.Error = field(init=False, default=result.OK)
83
+
84
+ async def run(self) -> None:
85
+ try:
86
+ await asyncio.wait_for(self.c.lock.acquire(), timeout=self.acquire_timeout)
87
+ except TimeoutError as e:
88
+ self.res = result.Error.from_e(e, "Client is buzy")
89
+ await self.__complete()
90
+ return
91
+ try:
92
+ self.c.log(logL.INFO, "Acquire")
93
+ self.res = await self.tsk.run(self.c)
94
+ await asyncio.sleep(0) # switch to other session ?? need
95
+ except asyncio.CancelledError:
96
+ self.res = result.Error.from_e(exc.Abort("Task cancelled"), "in session run")
97
+ self.c.level = OSI.NONE
98
+ finally:
99
+ await self.__complete()
100
+ self.c.lock.release() # for other task
101
+ # try media close
102
+ try:
103
+ await asyncio.wait_for(self.c.lock.acquire(), timeout=.1) # keep anywhere
104
+ except TimeoutError:
105
+ self.c.log(logL.INFO, "opened media use in other session")
106
+ return
107
+ try:
108
+ if self.c.media.is_open():
109
+ await asyncio.wait_for(self.c.media.close(), timeout=5) # keep anywhere
110
+ self.c.log(logL.DEB, f"closed communication channel: {self.c.media}")
111
+ else:
112
+ self.c.log(logL.WARN, F"communication channel: {self.c.media} already closed")
113
+ self.c.level = OSI.NONE
114
+ except asyncio.TimeoutError:
115
+ self.c.log(logL.ERR, "failed to close the channel in 5 seconds")
116
+ except asyncio.CancelledError: # todo: make better, need close anyway
117
+ self.res = result.Error.from_e(exc.Abort("Task cancelled"), "in closed channel")
118
+ finally:
119
+ self.c.lock.release()
120
+
121
+ async def __complete(self) -> None:
122
+ self.complete = True
123
+ if result_storage is not None:
124
+ await result_storage.add(Result(
125
+ c=self.c,
126
+ tsk=self.tsk,
127
+ res=self.res,
128
+ time=time(),
129
+ ))
130
+
131
+ def __hash__(self) -> int:
132
+ return hash(self.c)
133
+
134
+
135
+ @dataclass(frozen=True)
136
+ class DistributedTask:
137
+ """The task for distributed execution on several customers."""
138
+ tsk: task.Base
139
+ clients: Collection[Client]
140
+
141
+ def __str__(self) -> str:
142
+ return f"{self.tsk.msg}[{len(self.clients)}])"
143
+
144
+
145
+ if settings.session.result_storage.persistent_depth > 0:
146
+
147
+ class ResultStorage(DualStorage[Result]):
148
+ def client2res(self, c: Client) -> list[Result]:
149
+ with self._lock:
150
+ tmp = list(self._persistent)
151
+ return [res for res in tmp if res.c == c]
152
+
153
+
154
+ result_storage: ResultStorage = ResultStorage(
155
+ persistent_depth=settings.session.result_storage.persistent_depth,
156
+ volatile_depth=settings.session.result_storage.volatile_depth
157
+ )
158
+ """exchange results archive"""
159
+
160
+ else:
161
+ result_storage: None = None
162
+
163
+
164
+ class Work:
165
+ name: str
166
+ __non_complete: set[Session]
167
+ __complete: set[Session]
168
+ time: float
169
+ __active_tasks: set[asyncio.Task]
170
+ __is_canceled: bool
171
+
172
+ def __init__(self, *sessions: Session, name: str) -> None:
173
+ self.name = name
174
+ self.__non_complete = set(sessions)
175
+ self.__complete = set()
176
+ self.time = time()
177
+ self.__active_tasks = set()
178
+ """used for canceling the Work"""
179
+ self.__is_canceled = False
180
+ """cancelation flag"""
181
+
182
+ def __str__(self) -> str:
183
+ return f"Worker[{len(self.__non_complete)}/{len(self.all)}]: {"complete" if self.is_complete() else "in work"}[{len(self.ok_results)}/{len(self.__complete)}]"
184
+
185
+ @classmethod
186
+ def from_distributed_task(cls, *dis_tasks: DistributedTask, name: str) -> Self:
187
+ sessions: list[Session] = list()
188
+ client_tasks: dict[Client, list[task.Base]] = defaultdict(list)
189
+ for dis_tsk in dis_tasks:
190
+ for client in dis_tsk.clients:
191
+ client_tasks[client].append(dis_tsk.tsk.copy())
192
+ for client, tasks in client_tasks.items():
193
+ if len(tasks) == 1:
194
+ sessions.append(Session(client, tsk=tasks[0]))
195
+ else:
196
+ sessions.append(Session(client, tsk=task.Sequence(*tasks, msg="from distributed")))
197
+ return cls(*sessions, name=name)
198
+
199
+ @cached_property
200
+ def all(self) -> set[Session[result.Result]]:
201
+ return self.__non_complete | self.__complete
202
+
203
+ def __iter__(self) -> Iterator[Session[result.Result]]:
204
+ for sess in self.__non_complete:
205
+ yield sess
206
+
207
+ def __getitem__(self, item) -> Session[result.Result]:
208
+ return tuple(self.all)[item]
209
+
210
+ @cached_property
211
+ def clients(self) -> set[Client]:
212
+ return {sess.c for sess in self.all}
213
+
214
+ @property
215
+ def ok_results(self) -> tuple[Session[result.Result], ...]:
216
+ """without errors exchange clients"""
217
+ return tuple(sess for sess in self.__complete if sess.res.is_ok())
218
+
219
+ @property
220
+ def nok_results(self) -> tuple[Session[result.ErrorPropagator], ...]:
221
+ """Sessions with errors (excluding incomplete and canceled)"""
222
+ return tuple(sess for sess in self.__complete if not sess.res.is_ok())
223
+
224
+ @property
225
+ def active_err(self) -> tuple[Session[result.Result], ...]:
226
+ """Sessions with errors"""
227
+ return tuple(sess for sess in self.all if not sess.res.is_ok())
228
+
229
+ @property
230
+ def in_progress(self) -> tuple[Session[result.Result], ...]:
231
+ """Sessions that are still performed (current condition)"""
232
+ return tuple(sess for sess in self.__non_complete if not sess.complete)
233
+
234
+ def pop(self) -> set[Session[result.Result]]:
235
+ """get and move complete session"""
236
+ to_move = {sess for sess in self.__non_complete if sess.complete}
237
+ self.__complete |= to_move
238
+ self.__non_complete -= to_move
239
+ return to_move
240
+
241
+ def is_complete(self) -> bool:
242
+ """check all complete sessions. call <pop> before"""
243
+ return (
244
+ len(self.__non_complete) == 0
245
+ or self.__is_canceled
246
+ )
247
+
248
+ async def cancel(self) -> None:
249
+ self.__is_canceled = True
250
+ tasks_to_cancel = list(self.__active_tasks)
251
+ self.__active_tasks.clear()
252
+ for task in tasks_to_cancel:
253
+ task.cancel()
254
+ try:
255
+ await task
256
+ except (asyncio.CancelledError, Exception):
257
+ pass
258
+
259
+ @property
260
+ def is_canceled(self) -> bool:
261
+ """Checks whether the work was canceled"""
262
+ return self.__is_canceled
263
+
264
+ def add_active_task(self, task: asyncio.Task) -> None:
265
+ self.__active_tasks.add(task)
266
+ task.add_done_callback(lambda t: self.__active_tasks.discard(t))
267
+
268
+
269
+ work_storage: Optional[DualStorage[Work]] = None
270
+ if settings.session.work_storage.persistent_depth > 0:
271
+ work_storage = DualStorage[Work](
272
+ persistent_depth=settings.session.work_storage.persistent_depth,
273
+ volatile_depth=settings.session.work_storage.volatile_depth
274
+ )
275
+ """exchange archive of Works"""
276
+
277
+
278
+ @dataclass
279
+ class Cancel:
280
+ work: Work
281
+
282
+
283
+ @dataclass
284
+ class Worker:
285
+ time_checking: float = 1.0
286
+ __t: Optional[threading.Thread] = field(init=False, default=None)
287
+ __stop: threading.Event = field(init=False, default_factory=threading.Event)
288
+ __works: queue.Queue[Work | Cancel] = field(init=False, default_factory=queue.Queue)
289
+ __has_work: asyncio.Event = field(init=False, default_factory=asyncio.Event)
290
+
291
+ def start(self, abort_timeout: int = 5) -> None:
292
+ if self.__t is not None and self.__t.is_alive():
293
+ raise RuntimeError("Thread is already running")
294
+ self.__t = threading.Thread(
295
+ target=self._run_async_loop,
296
+ args=(abort_timeout,),
297
+ daemon=True
298
+ )
299
+ self.__t.start()
300
+
301
+ def cancel(self, work: Work) -> Cancel:
302
+ self.__works.put(cancel := Cancel(work))
303
+ self.__has_work.set()
304
+ return cancel
305
+
306
+ def add_task(self, *dis_task: DistributedTask, name: str = "no_name") -> Work:
307
+ self.__works.put(worker := Work.from_distributed_task(*dis_task, name=name))
308
+ self.__has_work.set()
309
+ return worker
310
+
311
+ def add_sessions(self, *sess: Session[result.Result], name: str = "no_name") -> Work:
312
+ self.__works.put(worker := Work(*sess, name=name))
313
+ self.__has_work.set()
314
+ return worker
315
+
316
+ def stop(self) -> None:
317
+ self.__stop.set()
318
+ self.__has_work.set()
319
+
320
+ def join(self, timeout: Optional[float] = None) -> None:
321
+ if self.__t is not None:
322
+ self.__t.join(timeout)
323
+
324
+ def _run_async_loop(self, abort_timeout: int) -> None:
325
+ try:
326
+ asyncio.run(self._coro_loop(abort_timeout))
327
+ except Exception as e:
328
+ print(f"Transaction thread error: {e}")
329
+
330
+ async def _coro_loop(self, abort_timeout: int) -> None:
331
+ async with asyncio.TaskGroup() as tg:
332
+ tg.create_task(self._monitor(tg), name="main_monitor")
333
+
334
+ async def _monitor(self, tg: asyncio.TaskGroup) -> None:
335
+ while not self.__stop.is_set():
336
+ try:
337
+ await asyncio.wait_for(self.__has_work.wait(), timeout=1.0)
338
+ while not self.__stop.is_set():
339
+ try:
340
+ work = self.__works.get_nowait()
341
+ if isinstance(work, Cancel):
342
+ await work.work.cancel()
343
+ else:
344
+ for sess in work:
345
+ work.add_active_task(tg.create_task(sess.run()))
346
+ self.__works.task_done()
347
+ if work_storage is not None:
348
+ await work_storage.add(work)
349
+ except queue.Empty:
350
+ self.__has_work.clear()
351
+ break
352
+ await asyncio.sleep(0)
353
+ except asyncio.TimeoutError:
354
+ continue
355
+ if self.__stop.is_set():
356
+ raise asyncio.CancelledError("Stop requested")
357
+
358
+
359
+ worker: Optional[Worker]
360
+ if settings.session.worker.run:
361
+ worker = Worker(settings.session.worker.time_checking)
362
+ else:
363
+ worker = None