tp-shared 0.2.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tp_shared/autoins_mpg_service/repos/autoins_results_ack_list_queue_repo.py +14 -0
- tp_shared/autoins_mpg_service/schemas/autoins_result_message.py +34 -0
- tp_shared/autoins_mpg_service/worker_services/base_autoins_results_ack_list_queue_worker_service.py +34 -0
- tp_shared/base/base_message.py +5 -0
- tp_shared/gibdd_service/repo/gibdd_dc_results_stream_queue_repo.py +17 -0
- tp_shared/gibdd_service/schemas/gibdd_dc_result_message.py +39 -0
- tp_shared/gibdd_service/worker_services/base_gibdd_dc_results_stream_queue_worker_service.py +122 -0
- tp_shared/mos_passes_service/repo/mos_passes_results_stream_queue_repo.py +17 -0
- tp_shared/mos_passes_service/schemas/mos_passes_result_message.py +23 -0
- tp_shared/mos_passes_service/worker_services/base_mos_passes_results_stream_queue_worker_service.py +109 -0
- tp_shared/nsis_service/repos/nsis_results_ack_list_queue_repo.py +13 -0
- tp_shared/nsis_service/schemas/nsis_result_message.py +29 -0
- tp_shared/nsis_service/types/nsis_task_type.py +6 -0
- tp_shared/nsis_service/worker_services/base_nsis_results_queue_worker_service.py +32 -0
- tp_shared/policies_service/repos/policies_event_stream_queue_repo.py +17 -0
- tp_shared/policies_service/schemas/policies_result_message.py +28 -0
- tp_shared/policies_service/worker_services/base_policies_results_queue_worker_service.py +109 -0
- tp_shared/rnis_check_service/repos/rnis_check_results_stream_queue_repo.py +17 -0
- tp_shared/rnis_check_service/schemas/rnis_check_result_message.py +9 -0
- tp_shared/rnis_check_service/worker_services/base_rnis_results_queue_worker_service.py +109 -0
- tp_shared/types/dc_operator_status.py +7 -0
- tp_shared/types/pass_allowed_zone.py +8 -0
- tp_shared/types/pass_series.py +14 -0
- tp_shared/types/pass_time_of_date.py +6 -0
- tp_shared/types/policy_series.py +19 -0
- tp_shared/types/policy_status.py +7 -0
- tp_shared-0.2.27.dist-info/METADATA +76 -0
- tp_shared-0.2.27.dist-info/RECORD +29 -0
- tp_shared-0.2.27.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from redis.asyncio import Redis
|
|
2
|
+
from tp_helper.base_queues.base_ack_list_queue_repo import BaseAckListQueueRepo
|
|
3
|
+
|
|
4
|
+
# from src.config import config
|
|
5
|
+
from tp_shared.autoins_mpg_service.schemas.autoins_result_message import (
|
|
6
|
+
AutoinsResultMessage,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AutoinsResultsAckListQueueRepo(BaseAckListQueueRepo):
|
|
11
|
+
QUEUE_NAME = "autoins:service:results:ack:list"
|
|
12
|
+
|
|
13
|
+
def __init__(self, redis_client: Redis):
|
|
14
|
+
super().__init__(redis_client=redis_client, schema=AutoinsResultMessage)
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from datetime import date
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
from tp_shared.types.policy_series import PolicySeries
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class AutoinsResultPolicy(BaseModel):
|
|
9
|
+
insurer_name: str
|
|
10
|
+
reg_number: str
|
|
11
|
+
series: PolicySeries
|
|
12
|
+
number: str
|
|
13
|
+
start_date: date
|
|
14
|
+
end_date: date
|
|
15
|
+
period1_start: date | None = None
|
|
16
|
+
period1_end: date | None = None
|
|
17
|
+
period2_start: date | None = None
|
|
18
|
+
period2_end: date | None = None
|
|
19
|
+
period3_start: date | None = None
|
|
20
|
+
period3_end: date | None = None
|
|
21
|
+
vin: str | None = None
|
|
22
|
+
body_number: str | None = None
|
|
23
|
+
chassis_number: str | None = None
|
|
24
|
+
car_mark: str | None = None
|
|
25
|
+
car_model: str | None = None
|
|
26
|
+
external_policy_id: int | None = None
|
|
27
|
+
policy_state: str | None = None
|
|
28
|
+
policy_status_t_use: str | None = None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class AutoinsResultMessage(BaseModel):
|
|
32
|
+
series: PolicySeries
|
|
33
|
+
number: str
|
|
34
|
+
policies: list[AutoinsResultPolicy] = []
|
tp_shared/autoins_mpg_service/worker_services/base_autoins_results_ack_list_queue_worker_service.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from logging import Logger
|
|
2
|
+
|
|
3
|
+
from redis.asyncio import Redis
|
|
4
|
+
from tp_helper.base_items.base_worker_service import BaseWorkerService
|
|
5
|
+
from tp_helper.decorators.decorator_retry_forever import retry_forever
|
|
6
|
+
|
|
7
|
+
from tp_shared.autoins_mpg_service.repos.autoins_results_ack_list_queue_repo import (
|
|
8
|
+
AutoinsResultsAckListQueueRepo,
|
|
9
|
+
)
|
|
10
|
+
from tp_shared.autoins_mpg_service.schemas.autoins_result_message import (
|
|
11
|
+
AutoinsResultMessage,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class BaseAutoinsResultsAckListQueueWorkerService(
|
|
16
|
+
BaseWorkerService, AutoinsResultsAckListQueueRepo
|
|
17
|
+
):
|
|
18
|
+
def __init__(self, redis_client: Redis, logger: Logger):
|
|
19
|
+
BaseWorkerService.__init__(self, logger=logger, redis_client=redis_client)
|
|
20
|
+
AutoinsResultsAckListQueueRepo.__init__(self, redis_client=redis_client)
|
|
21
|
+
|
|
22
|
+
@retry_forever(
|
|
23
|
+
start_message="π₯ Π§ΡΠ΅Π½ΠΈΠ΅ Π·Π°Π΄Π°Ρ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄ΠΈ {queue_name}",
|
|
24
|
+
error_message="β ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ ΡΡΠ΅Π½ΠΈΠΈ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄ΠΈ {queue_name}",
|
|
25
|
+
)
|
|
26
|
+
async def pop(self) -> AutoinsResultMessage | None:
|
|
27
|
+
return await AutoinsResultsAckListQueueRepo.pop(self)
|
|
28
|
+
|
|
29
|
+
@retry_forever(
|
|
30
|
+
start_message="ποΈ ack Π² ΠΎΡΠ΅ΡΠ΅Π΄Ρ {queue_name}",
|
|
31
|
+
error_message="β ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ ack Π² ΠΎΡΠ΅ΡΠ΅Π΄Ρ {queue_name}",
|
|
32
|
+
)
|
|
33
|
+
async def ack(self) -> None:
|
|
34
|
+
return await AutoinsResultsAckListQueueRepo.ack(self)
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from redis.asyncio import Redis
|
|
2
|
+
from tp_helper.base_queues.base_stream_queue_repo import BaseStreamQueueRepo
|
|
3
|
+
|
|
4
|
+
from tp_shared.gibdd_service.schemas.gibdd_dc_result_message import (
|
|
5
|
+
GibddDcResultMessage,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class GibddDcResultsStreamQueueRepo(BaseStreamQueueRepo):
|
|
10
|
+
QUEUE_NAME = "gibdd:service:dc:results:stream"
|
|
11
|
+
|
|
12
|
+
def __init__(self, redis_client: Redis):
|
|
13
|
+
super().__init__(
|
|
14
|
+
redis_client=redis_client,
|
|
15
|
+
schema=GibddDcResultMessage,
|
|
16
|
+
queue_name=self.QUEUE_NAME,
|
|
17
|
+
)
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
from datetime import date
|
|
2
|
+
|
|
3
|
+
from pydantic import ConfigDict
|
|
4
|
+
|
|
5
|
+
from tp_shared.base.base_message import BaseMessage
|
|
6
|
+
from tp_shared.types.dc_operator_status import DcOperatorStatus
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class GibddDcResultOperator(BaseMessage):
|
|
10
|
+
operator_id: int
|
|
11
|
+
status: DcOperatorStatus
|
|
12
|
+
name: str
|
|
13
|
+
address_line: str
|
|
14
|
+
phone_number: str
|
|
15
|
+
email: str
|
|
16
|
+
site: str
|
|
17
|
+
canceled_date: date | None
|
|
18
|
+
canceled_at: int | None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class GibddDcResultCard(BaseMessage):
|
|
22
|
+
card_number: str
|
|
23
|
+
vin: str
|
|
24
|
+
start_date: date
|
|
25
|
+
end_date: date
|
|
26
|
+
odometer_value: int
|
|
27
|
+
is_active: bool
|
|
28
|
+
updated_at: int
|
|
29
|
+
created_at: int
|
|
30
|
+
|
|
31
|
+
operator: GibddDcResultOperator
|
|
32
|
+
|
|
33
|
+
model_config = ConfigDict(from_attributes=True, populate_by_name=True)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class GibddDcResultMessage(BaseMessage):
|
|
37
|
+
version: str = "1.0"
|
|
38
|
+
vin: str
|
|
39
|
+
diagnostic_cards: list[GibddDcResultCard] = []
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
from datetime import timedelta
|
|
2
|
+
from logging import Logger
|
|
3
|
+
|
|
4
|
+
from redis.asyncio import Redis
|
|
5
|
+
from tp_helper.base_items.base_worker_service import BaseWorkerService
|
|
6
|
+
from tp_helper.decorators.decorator_retry_forever import retry_forever
|
|
7
|
+
|
|
8
|
+
from tp_shared.gibdd_service.repo.gibdd_dc_results_stream_queue_repo import (
|
|
9
|
+
GibddDcResultStreamQueueRepo,
|
|
10
|
+
)
|
|
11
|
+
from tp_shared.gibdd_service.schemas.gibdd_dc_result_message import (
|
|
12
|
+
GibddDcResultMessage,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class BaseGibddDcResultsStreamQueueWorkerService(
|
|
17
|
+
GibddDcResultStreamQueueRepo, BaseWorkerService
|
|
18
|
+
):
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
redis_client: Redis,
|
|
22
|
+
logger: Logger,
|
|
23
|
+
group_name: str,
|
|
24
|
+
consumer_name: str,
|
|
25
|
+
):
|
|
26
|
+
BaseWorkerService.__init__(self, redis_client=redis_client, logger=logger)
|
|
27
|
+
GibddDcResultStreamQueueRepo.__init__(self, redis_client=redis_client)
|
|
28
|
+
|
|
29
|
+
self.group_name = group_name
|
|
30
|
+
self.consumer_name = consumer_name
|
|
31
|
+
|
|
32
|
+
@retry_forever(
|
|
33
|
+
start_message="ΠΠΎΠ±Π°Π²Π»Π΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄Ρ {queue_name}",
|
|
34
|
+
error_message="ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ Π΄ΠΎΠ±Π°Π²Π»Π΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ Π² ΠΎΡΠ΅ΡΠ΅Π΄Ρ {queue_name}",
|
|
35
|
+
)
|
|
36
|
+
async def add(self, message: GibddDcResultMessage) -> None:
|
|
37
|
+
await GibddDcResultStreamQueueRepo.add(self, message)
|
|
38
|
+
|
|
39
|
+
@retry_forever(
|
|
40
|
+
start_message="ΠΠΎΠ»ΡΡΠ΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄ΠΈ {queue_name}",
|
|
41
|
+
error_message="ΠΡΠΈΠ±ΠΊΠ° ΠΏΠΎΠ»ΡΡΠ΅Π½ΠΈΡ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄ΠΈ {queue_name}",
|
|
42
|
+
)
|
|
43
|
+
async def pop(
|
|
44
|
+
self,
|
|
45
|
+
stream_id: str = ">",
|
|
46
|
+
block: int = 0,
|
|
47
|
+
count: int = 100,
|
|
48
|
+
prioritize_claimed: bool = True,
|
|
49
|
+
min_idle_time: int = 60000,
|
|
50
|
+
) -> list[tuple[str, GibddDcResultMessage]] | None:
|
|
51
|
+
return await GibddDcResultStreamQueueRepo.pop(
|
|
52
|
+
self,
|
|
53
|
+
group_name=self.group_name,
|
|
54
|
+
consumer_name=self.consumer_name,
|
|
55
|
+
stream_id=stream_id,
|
|
56
|
+
block=block,
|
|
57
|
+
count=count,
|
|
58
|
+
prioritize_claimed=prioritize_claimed,
|
|
59
|
+
min_idle_time=min_idle_time,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
@retry_forever(
|
|
63
|
+
start_message="ΠΠΎΠ΄ΡΠ²Π΅ΡΠΆΠ΄Π΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
64
|
+
error_message="ΠΡΠΈΠ±ΠΊΠ° ΠΏΠΎΠ΄ΡΠ²Π΅ΡΠΆΠ΄Π΅Π½ΠΈΡ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
65
|
+
)
|
|
66
|
+
async def ack(self, message_id: str):
|
|
67
|
+
await GibddDcResultStreamQueueRepo.ack(self, self.group_name, message_id)
|
|
68
|
+
|
|
69
|
+
@retry_forever(
|
|
70
|
+
start_message="ΠΠΎΠΈΡΠΊ Π·Π°Π²ΠΈΡΡΠΈΡ
ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
71
|
+
error_message="ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ auto-claim ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
72
|
+
)
|
|
73
|
+
async def claim_reassign(
|
|
74
|
+
self,
|
|
75
|
+
min_idle_time: int = 60000,
|
|
76
|
+
count: int = 100,
|
|
77
|
+
) -> list[tuple[str, GibddDcResultMessage]]:
|
|
78
|
+
return await GibddDcResultStreamQueueRepo.claim_reassign(
|
|
79
|
+
self,
|
|
80
|
+
group_name=self.group_name,
|
|
81
|
+
consumer_name=self.consumer_name,
|
|
82
|
+
min_idle_time=min_idle_time,
|
|
83
|
+
count=count,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
@retry_forever(
|
|
87
|
+
start_message="Π‘ΠΎΠ·Π΄Π°Π½ΠΈΠ΅ Π³ΡΡΠΏΠΏΡ ΠΏΠΎΡΡΠ΅Π±ΠΈΡΠ΅Π»Π΅ΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
88
|
+
error_message="ΠΡΠΈΠ±ΠΊΠ° ΡΠΎΠ·Π΄Π°Π½ΠΈΡ Π³ΡΡΠΏΠΏΡ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
89
|
+
)
|
|
90
|
+
async def create_consumer_group(self, create_stream: bool = True):
|
|
91
|
+
try:
|
|
92
|
+
await GibddDcResultStreamQueueRepo.create_consumer_group(
|
|
93
|
+
self,
|
|
94
|
+
group_name=self.group_name,
|
|
95
|
+
create_stream=create_stream,
|
|
96
|
+
stream_id="0",
|
|
97
|
+
)
|
|
98
|
+
except Exception as e:
|
|
99
|
+
print(e)
|
|
100
|
+
|
|
101
|
+
@retry_forever(
|
|
102
|
+
start_message="ΠΡΠΈΡΡΠΊΠ° ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ ΡΡΠ°ΡΡΠ΅ {retention} Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
103
|
+
error_message="ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ ΠΎΡΠΈΡΡΠΊΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
104
|
+
)
|
|
105
|
+
async def trim_by_age(self, retention: timedelta) -> int:
|
|
106
|
+
"""
|
|
107
|
+
Π£Π΄Π°Π»ΡΠ΅Ρ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ ΡΡΠ°ΡΡΠ΅ ΡΠΊΠ°Π·Π°Π½Π½ΠΎΠ³ΠΎ ΠΏΠ΅ΡΠΈΠΎΠ΄Π° (retention) ΡΠ΅ΡΠ΅Π· XTRIM MINID.
|
|
108
|
+
|
|
109
|
+
:param retention: ΠΠ°ΠΊΡΠΈΠΌΠ°Π»ΡΠ½ΡΠΉ "Π²ΠΎΠ·ΡΠ°ΡΡ" ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ, Π½Π°ΠΏΡΠΈΠΌΠ΅Ρ timedelta(days=1)
|
|
110
|
+
:return: ΠΠΎΠ»-Π²ΠΎ ΡΠ΄Π°Π»ΡΠ½Π½ΡΡ
ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ
|
|
111
|
+
"""
|
|
112
|
+
return await GibddDcResultStreamQueueRepo.trim_by_age(self, retention)
|
|
113
|
+
|
|
114
|
+
@retry_forever(
|
|
115
|
+
start_message="ΠΠΎΠ»Π½Π°Ρ ΠΎΡΠΈΡΡΠΊΠ° ΠΏΠΎΡΠΎΠΊΠ° {queue_name}",
|
|
116
|
+
error_message="ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ ΠΏΠΎΠ»Π½ΠΎΠΉ ΠΎΡΠΈΡΡΠΊΠ΅ ΠΏΠΎΡΠΎΠΊΠ° {queue_name}",
|
|
117
|
+
)
|
|
118
|
+
async def delete_all(self) -> None:
|
|
119
|
+
"""
|
|
120
|
+
Π£Π΄Π°Π»ΡΠ΅Ρ Π²ΡΠ΅ consumer group ΠΈ ΡΠ°ΠΌ ΠΏΠΎΡΠΎΠΊ (ΠΏΠΎΠ»Π½Π°Ρ ΠΎΡΠΈΡΡΠΊΠ°).
|
|
121
|
+
"""
|
|
122
|
+
await GibddDcResultStreamQueueRepo.delete_all(self)
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from redis.asyncio import Redis
|
|
2
|
+
from tp_helper.base_queues.base_stream_queue_repo import BaseStreamQueueRepo
|
|
3
|
+
|
|
4
|
+
from tp_shared.mos_passes_service.schemas.mos_passes_result_message import (
|
|
5
|
+
MosPassesResultMessage,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class MosPassesResultsStreamQueueRepo(BaseStreamQueueRepo):
|
|
10
|
+
QUEUE_NAME = "mos:passes:service:results:stream"
|
|
11
|
+
|
|
12
|
+
def __init__(self, redis_client: Redis):
|
|
13
|
+
super().__init__(
|
|
14
|
+
redis_client=redis_client,
|
|
15
|
+
schema=MosPassesResultMessage,
|
|
16
|
+
queue_name=self.QUEUE_NAME,
|
|
17
|
+
)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from datetime import date
|
|
2
|
+
|
|
3
|
+
from tp_shared.base.base_message import BaseMessage
|
|
4
|
+
from tp_shared.types.pass_allowed_zone import PassAllowedZone
|
|
5
|
+
from tp_shared.types.pass_series import PassSeries
|
|
6
|
+
from tp_shared.types.pass_time_of_date import PassTimeOfDate
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class MosPassesResultPass(BaseMessage):
|
|
10
|
+
reg_number: str
|
|
11
|
+
time_of_day: PassTimeOfDate
|
|
12
|
+
series: PassSeries
|
|
13
|
+
number: str
|
|
14
|
+
allowed_zone: PassAllowedZone
|
|
15
|
+
start_date: date
|
|
16
|
+
end_date: date
|
|
17
|
+
cancel_date: date | None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class MosPassesResultMessage(BaseMessage):
|
|
21
|
+
version: str = "1.0"
|
|
22
|
+
reg_number: str
|
|
23
|
+
passes: list[MosPassesResultPass] = []
|
tp_shared/mos_passes_service/worker_services/base_mos_passes_results_stream_queue_worker_service.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
from datetime import timedelta
|
|
2
|
+
from logging import Logger
|
|
3
|
+
|
|
4
|
+
from redis.asyncio import Redis
|
|
5
|
+
from tp_helper.base_items.base_worker_service import BaseWorkerService
|
|
6
|
+
from tp_helper.decorators.decorator_retry_forever import retry_forever
|
|
7
|
+
|
|
8
|
+
from tp_shared.mos_passes_service.repo.mos_passes_results_stream_queue_repo import (
|
|
9
|
+
MosPassesResultsStreamQueueRepo,
|
|
10
|
+
)
|
|
11
|
+
from tp_shared.mos_passes_service.schemas.mos_passes_result_message import (
|
|
12
|
+
MosPassesResultMessage,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class BaseMosPassesResultsStreamQueueWorkerService(
|
|
17
|
+
MosPassesResultsStreamQueueRepo, BaseWorkerService
|
|
18
|
+
):
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
redis_client: Redis,
|
|
22
|
+
logger: Logger,
|
|
23
|
+
group_name: str,
|
|
24
|
+
consumer_name: str,
|
|
25
|
+
):
|
|
26
|
+
BaseWorkerService.__init__(self, redis_client=redis_client, logger=logger)
|
|
27
|
+
MosPassesResultsStreamQueueRepo.__init__(self, redis_client=redis_client)
|
|
28
|
+
|
|
29
|
+
self.group_name = group_name
|
|
30
|
+
self.consumer_name = consumer_name
|
|
31
|
+
|
|
32
|
+
@retry_forever(
|
|
33
|
+
start_message="ΠΠΎΠ±Π°Π²Π»Π΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄Ρ {queue_name}",
|
|
34
|
+
error_message="ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ Π΄ΠΎΠ±Π°Π²Π»Π΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ Π² ΠΎΡΠ΅ΡΠ΅Π΄Ρ {queue_name}",
|
|
35
|
+
)
|
|
36
|
+
async def add(self, message: MosPassesResultMessage) -> None:
|
|
37
|
+
await MosPassesResultsStreamQueueRepo.add(self, message)
|
|
38
|
+
|
|
39
|
+
@retry_forever(
|
|
40
|
+
start_message="ΠΠΎΠ»ΡΡΠ΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄ΠΈ {queue_name}",
|
|
41
|
+
error_message="ΠΡΠΈΠ±ΠΊΠ° ΠΏΠΎΠ»ΡΡΠ΅Π½ΠΈΡ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄ΠΈ {queue_name}",
|
|
42
|
+
)
|
|
43
|
+
async def pop(
|
|
44
|
+
self,
|
|
45
|
+
stream_id: str = ">",
|
|
46
|
+
block: int = 0,
|
|
47
|
+
count: int = 100,
|
|
48
|
+
prioritize_claimed: bool = True,
|
|
49
|
+
min_idle_time: int = 60000,
|
|
50
|
+
) -> list[tuple[str, MosPassesResultMessage]] | None:
|
|
51
|
+
return await MosPassesResultsStreamQueueRepo.pop(
|
|
52
|
+
self,
|
|
53
|
+
group_name=self.group_name,
|
|
54
|
+
consumer_name=self.consumer_name,
|
|
55
|
+
stream_id=stream_id,
|
|
56
|
+
block=block,
|
|
57
|
+
count=count,
|
|
58
|
+
prioritize_claimed=prioritize_claimed,
|
|
59
|
+
min_idle_time=min_idle_time,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
@retry_forever(
|
|
63
|
+
start_message="ΠΠΎΠ΄ΡΠ²Π΅ΡΠΆΠ΄Π΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
64
|
+
error_message="ΠΡΠΈΠ±ΠΊΠ° ΠΏΠΎΠ΄ΡΠ²Π΅ΡΠΆΠ΄Π΅Π½ΠΈΡ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
65
|
+
)
|
|
66
|
+
async def ack(self, message_id: str):
|
|
67
|
+
await MosPassesResultsStreamQueueRepo.ack(self, self.group_name, message_id)
|
|
68
|
+
|
|
69
|
+
@retry_forever(
|
|
70
|
+
start_message="ΠΠΎΠΈΡΠΊ Π·Π°Π²ΠΈΡΡΠΈΡ
ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
71
|
+
error_message="ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ auto-claim ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
72
|
+
)
|
|
73
|
+
async def claim_reassign(
|
|
74
|
+
self,
|
|
75
|
+
min_idle_time: int = 60000,
|
|
76
|
+
count: int = 100,
|
|
77
|
+
) -> list[tuple[str, MosPassesResultMessage]]:
|
|
78
|
+
return await MosPassesResultsStreamQueueRepo.claim_reassign(
|
|
79
|
+
self,
|
|
80
|
+
group_name=self.group_name,
|
|
81
|
+
consumer_name=self.consumer_name,
|
|
82
|
+
min_idle_time=min_idle_time,
|
|
83
|
+
count=count,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
@retry_forever(
|
|
87
|
+
start_message="Π‘ΠΎΠ·Π΄Π°Π½ΠΈΠ΅ Π³ΡΡΠΏΠΏΡ ΠΏΠΎΡΡΠ΅Π±ΠΈΡΠ΅Π»Π΅ΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
88
|
+
error_message="ΠΡΠΈΠ±ΠΊΠ° ΡΠΎΠ·Π΄Π°Π½ΠΈΡ Π³ΡΡΠΏΠΏΡ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
89
|
+
)
|
|
90
|
+
async def create_consumer_group(self, create_stream: bool = True):
|
|
91
|
+
await MosPassesResultsStreamQueueRepo.create_consumer_group(
|
|
92
|
+
self,
|
|
93
|
+
group_name=self.group_name,
|
|
94
|
+
create_stream=create_stream,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
@retry_forever(
|
|
98
|
+
start_message="ΠΡΠΈΡΡΠΊΠ° ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ ΡΡΠ°ΡΡΠ΅ {retention} Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
99
|
+
error_message="ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ ΠΎΡΠΈΡΡΠΊΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
100
|
+
)
|
|
101
|
+
async def trim_by_age(self, retention: timedelta) -> int:
|
|
102
|
+
return await MosPassesResultsStreamQueueRepo.trim_by_age(self, retention)
|
|
103
|
+
|
|
104
|
+
@retry_forever(
|
|
105
|
+
start_message="ΠΠΎΠ»Π½Π°Ρ ΠΎΡΠΈΡΡΠΊΠ° ΠΏΠΎΡΠΎΠΊΠ° {queue_name}",
|
|
106
|
+
error_message="ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ ΠΏΠΎΠ»Π½ΠΎΠΉ ΠΎΡΠΈΡΡΠΊΠ΅ ΠΏΠΎΡΠΎΠΊΠ° {queue_name}",
|
|
107
|
+
)
|
|
108
|
+
async def delete_all(self) -> None:
|
|
109
|
+
await MosPassesResultsStreamQueueRepo.delete_all(self)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from redis.asyncio import Redis
|
|
2
|
+
from tp_helper.base_queues.base_ack_list_queue_repo import BaseAckListQueueRepo
|
|
3
|
+
|
|
4
|
+
from tp_shared.nsis_service.schemas.nsis_result_message import (
|
|
5
|
+
NsisResultMessage,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class NsisResultsAckListQueueRepo(BaseAckListQueueRepo):
|
|
10
|
+
QUEUE_NAME = "nsis:service:nsis:results:ack:list"
|
|
11
|
+
|
|
12
|
+
def __init__(self, redis_client: Redis):
|
|
13
|
+
super().__init__(redis_client, schema=NsisResultMessage)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from datetime import date
|
|
2
|
+
|
|
3
|
+
from tp_helper.base_items.base_schema import BaseSchema
|
|
4
|
+
|
|
5
|
+
from tp_shared.nsis_service.types.nsis_task_type import NsisTaskType
|
|
6
|
+
from tp_shared.types.policy_series import PolicySeries
|
|
7
|
+
from tp_shared.types.policy_status import PolicyStatus
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class NsisResultPolicy(BaseSchema):
|
|
11
|
+
status: PolicyStatus
|
|
12
|
+
vin: str | None = None
|
|
13
|
+
reg_number: str
|
|
14
|
+
series: PolicySeries
|
|
15
|
+
number: str
|
|
16
|
+
start_date: date | None = None
|
|
17
|
+
end_date: date | None = None
|
|
18
|
+
insurer_id: int
|
|
19
|
+
insurer_name: str | None = None
|
|
20
|
+
request_date: date
|
|
21
|
+
created_at: int
|
|
22
|
+
updated_at: int
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class NsisResultMessage(BaseSchema):
|
|
26
|
+
task_type: NsisTaskType
|
|
27
|
+
query: str
|
|
28
|
+
request_date: date
|
|
29
|
+
policies: list[NsisResultPolicy] = []
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from logging import Logger
|
|
2
|
+
|
|
3
|
+
from redis.asyncio import Redis
|
|
4
|
+
from tp_helper.base_items.base_worker_service import BaseWorkerService
|
|
5
|
+
from tp_helper.decorators.decorator_retry_forever import retry_forever
|
|
6
|
+
|
|
7
|
+
from tp_shared.nsis_service.repos.nsis_results_ack_list_queue_repo import (
|
|
8
|
+
NsisResultsAckListQueueRepo,
|
|
9
|
+
)
|
|
10
|
+
from tp_shared.nsis_service.schemas.nsis_result_message import NsisResultMessage
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BaseNsisResultsAckListQueueWorkerService(
|
|
14
|
+
NsisResultsAckListQueueRepo, BaseWorkerService
|
|
15
|
+
):
|
|
16
|
+
def __init__(self, redis_client: Redis, logger: Logger):
|
|
17
|
+
BaseWorkerService.__init__(self, logger=logger, redis_client=redis_client)
|
|
18
|
+
NsisResultsAckListQueueRepo.__init__(self, redis_client=redis_client)
|
|
19
|
+
|
|
20
|
+
@retry_forever(
|
|
21
|
+
start_message="π₯ ΠΠ°ΡΠ°Π»ΠΎ ΡΡΠ΅Π½ΠΈΡ Π·Π°Π΄Π°Ρ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄ΠΈ {queue_name}",
|
|
22
|
+
error_message="β ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ ΡΡΠ΅Π½ΠΈΠΈ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄ΠΈ {queue_name}",
|
|
23
|
+
)
|
|
24
|
+
async def pop(self) -> NsisResultMessage | None:
|
|
25
|
+
return await NsisResultsAckListQueueRepo.pop(self)
|
|
26
|
+
|
|
27
|
+
@retry_forever(
|
|
28
|
+
start_message="ποΈ Π£Π΄Π°Π»Π΅Π½ΠΈΠ΅ Π·Π°Π΄Π°Ρ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄ΠΈ {queue_name}",
|
|
29
|
+
error_message="β ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ ΡΠ΄Π°Π»Π΅Π½ΠΈΠΈ Π·Π°Π΄Π°Ρ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄ΠΈ {queue_name}",
|
|
30
|
+
)
|
|
31
|
+
async def ack(self) -> None:
|
|
32
|
+
return await NsisResultsAckListQueueRepo.ack(self)
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from redis.asyncio import Redis
|
|
2
|
+
from tp_helper.base_queues.base_stream_queue_repo import BaseStreamQueueRepo
|
|
3
|
+
|
|
4
|
+
from tp_shared.policies_service.schemas.policies_result_message import (
|
|
5
|
+
PoliciesResultMessage,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class PoliciesEventStreamQueueRepo(BaseStreamQueueRepo):
|
|
10
|
+
QUEUE_NAME = "policies:service:policies:event:stream"
|
|
11
|
+
|
|
12
|
+
def __init__(self, redis_client: Redis):
|
|
13
|
+
super().__init__(
|
|
14
|
+
redis_client=redis_client,
|
|
15
|
+
schema=PoliciesResultMessage,
|
|
16
|
+
queue_name=self.QUEUE_NAME,
|
|
17
|
+
)
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from datetime import date
|
|
2
|
+
|
|
3
|
+
from tp_shared.base.base_message import BaseMessage
|
|
4
|
+
from tp_shared.types.policy_series import PolicySeries
|
|
5
|
+
from tp_shared.types.policy_status import PolicyStatus
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class PoliciesResultPolicy(BaseMessage):
|
|
9
|
+
series: PolicySeries
|
|
10
|
+
number: str
|
|
11
|
+
status: PolicyStatus
|
|
12
|
+
start_date: date
|
|
13
|
+
end_date: date
|
|
14
|
+
period1_start: date
|
|
15
|
+
period1_end: date
|
|
16
|
+
period2_start: date
|
|
17
|
+
period2_end: date
|
|
18
|
+
period3_start: date
|
|
19
|
+
period3_end: date
|
|
20
|
+
vin: str
|
|
21
|
+
car_mark: str
|
|
22
|
+
car_model: str
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class PoliciesResultMessage(BaseMessage):
|
|
26
|
+
version: str = "1.0"
|
|
27
|
+
reg_number: str
|
|
28
|
+
policies: list[PoliciesResultPolicy] = []
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
from datetime import timedelta
|
|
2
|
+
from logging import Logger
|
|
3
|
+
|
|
4
|
+
from redis.asyncio import Redis
|
|
5
|
+
from tp_helper.base_items.base_worker_service import BaseWorkerService
|
|
6
|
+
from tp_helper.decorators.decorator_retry_forever import retry_forever
|
|
7
|
+
|
|
8
|
+
from tp_shared.policies_service.repos.policies_event_stream_queue_repo import (
|
|
9
|
+
PoliciesEventStreamQueueRepo,
|
|
10
|
+
)
|
|
11
|
+
from tp_shared.policies_service.schemas.policies_result_message import (
|
|
12
|
+
PoliciesResultMessage,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class BasePolicyEventStreamQueueWorkerService(
|
|
17
|
+
PoliciesEventStreamQueueRepo, BaseWorkerService
|
|
18
|
+
):
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
redis_client: Redis,
|
|
22
|
+
logger: Logger,
|
|
23
|
+
group_name: str = None,
|
|
24
|
+
consumer_name: str = None,
|
|
25
|
+
):
|
|
26
|
+
BaseWorkerService.__init__(self, redis_client=redis_client, logger=logger)
|
|
27
|
+
PoliciesEventStreamQueueRepo.__init__(self, redis_client=redis_client)
|
|
28
|
+
|
|
29
|
+
self.group_name = group_name
|
|
30
|
+
self.consumer_name = consumer_name
|
|
31
|
+
|
|
32
|
+
@retry_forever(
|
|
33
|
+
start_message="β ΠΠΎΠ±Π°Π²Π»Π΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄Ρ {queue_name}",
|
|
34
|
+
error_message="β ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ Π΄ΠΎΠ±Π°Π²Π»Π΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ Π² ΠΎΡΠ΅ΡΠ΅Π΄Ρ {queue_name}",
|
|
35
|
+
)
|
|
36
|
+
async def add(self, message: PoliciesResultMessage) -> None:
|
|
37
|
+
await PoliciesEventStreamQueueRepo.add(self, message)
|
|
38
|
+
|
|
39
|
+
@retry_forever(
|
|
40
|
+
start_message="π₯ ΠΠΎΠ»ΡΡΠ΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄ΠΈ {queue_name}",
|
|
41
|
+
error_message="β ΠΡΠΈΠ±ΠΊΠ° ΠΏΠΎΠ»ΡΡΠ΅Π½ΠΈΡ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄ΠΈ {queue_name}",
|
|
42
|
+
)
|
|
43
|
+
async def pop(
|
|
44
|
+
self,
|
|
45
|
+
stream_id: str = ">",
|
|
46
|
+
block: int = 0,
|
|
47
|
+
count: int = 100,
|
|
48
|
+
prioritize_claimed: bool = True,
|
|
49
|
+
min_idle_time: int = 60000,
|
|
50
|
+
) -> list[tuple[str, PoliciesResultMessage]] | None:
|
|
51
|
+
return await PoliciesEventStreamQueueRepo.pop(
|
|
52
|
+
self,
|
|
53
|
+
group_name=self.group_name,
|
|
54
|
+
consumer_name=self.consumer_name,
|
|
55
|
+
stream_id=stream_id,
|
|
56
|
+
block=block,
|
|
57
|
+
count=count,
|
|
58
|
+
prioritize_claimed=prioritize_claimed,
|
|
59
|
+
min_idle_time=min_idle_time,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
@retry_forever(
|
|
63
|
+
start_message="β
ΠΠΎΠ΄ΡΠ²Π΅ΡΠΆΠ΄Π΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
64
|
+
error_message="β οΈ ΠΡΠΈΠ±ΠΊΠ° ΠΏΠΎΠ΄ΡΠ²Π΅ΡΠΆΠ΄Π΅Π½ΠΈΡ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
65
|
+
)
|
|
66
|
+
async def ack(self, message_id: str):
|
|
67
|
+
await PoliciesEventStreamQueueRepo.ack(self, self.group_name, message_id)
|
|
68
|
+
|
|
69
|
+
@retry_forever(
|
|
70
|
+
start_message="π ΠΠΎΠΈΡΠΊ Π·Π°Π²ΠΈΡΡΠΈΡ
ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
71
|
+
error_message="π« ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ auto-claim ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
72
|
+
)
|
|
73
|
+
async def claim_reassign(
|
|
74
|
+
self,
|
|
75
|
+
min_idle_time: int = 60000,
|
|
76
|
+
count: int = 100,
|
|
77
|
+
) -> list[tuple[str, PoliciesResultMessage]]:
|
|
78
|
+
return await PoliciesEventStreamQueueRepo.claim_reassign(
|
|
79
|
+
self,
|
|
80
|
+
group_name=self.group_name,
|
|
81
|
+
consumer_name=self.consumer_name,
|
|
82
|
+
min_idle_time=min_idle_time,
|
|
83
|
+
count=count,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
@retry_forever(
|
|
87
|
+
start_message="π₯ Π‘ΠΎΠ·Π΄Π°Π½ΠΈΠ΅ Π³ΡΡΠΏΠΏΡ ΠΏΠΎΡΡΠ΅Π±ΠΈΡΠ΅Π»Π΅ΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
88
|
+
error_message="β ΠΡΠΈΠ±ΠΊΠ° ΡΠΎΠ·Π΄Π°Π½ΠΈΡ Π³ΡΡΠΏΠΏΡ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
89
|
+
)
|
|
90
|
+
async def create_consumer_group(self, create_stream: bool = True):
|
|
91
|
+
await PoliciesEventStreamQueueRepo.create_consumer_group(
|
|
92
|
+
self,
|
|
93
|
+
group_name=self.group_name,
|
|
94
|
+
create_stream=create_stream,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
@retry_forever(
|
|
98
|
+
start_message="π§Ή ΠΡΠΈΡΡΠΊΠ° ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ ΡΡΠ°ΡΡΠ΅ {retention} Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
99
|
+
error_message="β οΈ ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ ΠΎΡΠΈΡΡΠΊΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
100
|
+
)
|
|
101
|
+
async def trim_by_age(self, retention: timedelta) -> int:
|
|
102
|
+
return await PoliciesEventStreamQueueRepo.trim_by_age(self, retention)
|
|
103
|
+
|
|
104
|
+
@retry_forever(
|
|
105
|
+
start_message="ποΈ ΠΠΎΠ»Π½Π°Ρ ΠΎΡΠΈΡΡΠΊΠ° ΠΏΠΎΡΠΎΠΊΠ° {queue_name}",
|
|
106
|
+
error_message="β ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ ΠΏΠΎΠ»Π½ΠΎΠΉ ΠΎΡΠΈΡΡΠΊΠ΅ ΠΏΠΎΡΠΎΠΊΠ° {queue_name}",
|
|
107
|
+
)
|
|
108
|
+
async def delete_all(self) -> None:
|
|
109
|
+
await PoliciesEventStreamQueueRepo.delete_all(self)
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from redis.asyncio import Redis
|
|
2
|
+
from tp_helper.base_queues.base_stream_queue_repo import BaseStreamQueueRepo
|
|
3
|
+
|
|
4
|
+
from tp_shared.rnis_check_service.schemas.rnis_check_result_message import (
|
|
5
|
+
RNISCheckResultMessage,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class RNISCheckResultsStreamQueueRepo(BaseStreamQueueRepo):
|
|
10
|
+
QUEUE_NAME = "rnis:check:service:results:stream"
|
|
11
|
+
|
|
12
|
+
def __init__(self, redis_client: Redis):
|
|
13
|
+
super().__init__(
|
|
14
|
+
redis_client=redis_client,
|
|
15
|
+
schema=RNISCheckResultMessage,
|
|
16
|
+
queue_name=self.QUEUE_NAME,
|
|
17
|
+
)
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
from datetime import timedelta
|
|
2
|
+
from logging import Logger
|
|
3
|
+
|
|
4
|
+
from redis.asyncio import Redis
|
|
5
|
+
from tp_helper.base_items.base_worker_service import BaseWorkerService
|
|
6
|
+
from tp_helper.decorators.decorator_retry_forever import retry_forever
|
|
7
|
+
|
|
8
|
+
from tp_shared.rnis_check_service.repos.rnis_check_results_stream_queue_repo import (
|
|
9
|
+
RNISCheckResultsStreamQueueRepo,
|
|
10
|
+
)
|
|
11
|
+
from tp_shared.rnis_check_service.schemas.rnis_check_result_message import (
|
|
12
|
+
RNISCheckResultMessage,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class RNISCheckResultsStreamQueueWorkerService(
|
|
17
|
+
RNISCheckResultsStreamQueueRepo, BaseWorkerService
|
|
18
|
+
):
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
redis_client: Redis,
|
|
22
|
+
logger: Logger,
|
|
23
|
+
group_name: str,
|
|
24
|
+
consumer_name: str,
|
|
25
|
+
):
|
|
26
|
+
BaseWorkerService.__init__(self, redis_client=redis_client, logger=logger)
|
|
27
|
+
RNISCheckResultsStreamQueueRepo.__init__(self, redis_client=redis_client)
|
|
28
|
+
|
|
29
|
+
self.group_name = group_name
|
|
30
|
+
self.consumer_name = consumer_name
|
|
31
|
+
|
|
32
|
+
@retry_forever(
|
|
33
|
+
start_message="β ΠΠΎΠ±Π°Π²Π»Π΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄Ρ {queue_name}",
|
|
34
|
+
error_message="β ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ Π΄ΠΎΠ±Π°Π²Π»Π΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ Π² ΠΎΡΠ΅ΡΠ΅Π΄Ρ {queue_name}",
|
|
35
|
+
)
|
|
36
|
+
async def add(self, message: RNISCheckResultMessage) -> None:
|
|
37
|
+
await RNISCheckResultsStreamQueueRepo.add(self, message)
|
|
38
|
+
|
|
39
|
+
@retry_forever(
|
|
40
|
+
start_message="π₯ ΠΠΎΠ»ΡΡΠ΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄ΠΈ {queue_name}",
|
|
41
|
+
error_message="β οΈ ΠΡΠΈΠ±ΠΊΠ° ΠΏΠΎΠ»ΡΡΠ΅Π½ΠΈΡ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ ΠΈΠ· ΠΎΡΠ΅ΡΠ΅Π΄ΠΈ {queue_name}",
|
|
42
|
+
)
|
|
43
|
+
async def pop(
|
|
44
|
+
self,
|
|
45
|
+
stream_id: str = ">",
|
|
46
|
+
block: int = 0,
|
|
47
|
+
count: int = 100,
|
|
48
|
+
prioritize_claimed: bool = True,
|
|
49
|
+
min_idle_time: int = 60000,
|
|
50
|
+
) -> list[tuple[str, RNISCheckResultMessage]] | None:
|
|
51
|
+
return await RNISCheckResultsStreamQueueRepo.pop(
|
|
52
|
+
self,
|
|
53
|
+
group_name=self.group_name,
|
|
54
|
+
consumer_name=self.consumer_name,
|
|
55
|
+
stream_id=stream_id,
|
|
56
|
+
block=block,
|
|
57
|
+
count=count,
|
|
58
|
+
prioritize_claimed=prioritize_claimed,
|
|
59
|
+
min_idle_time=min_idle_time,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
@retry_forever(
|
|
63
|
+
start_message="β
ΠΠΎΠ΄ΡΠ²Π΅ΡΠΆΠ΄Π΅Π½ΠΈΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
64
|
+
error_message="β οΈ ΠΡΠΈΠ±ΠΊΠ° ΠΏΠΎΠ΄ΡΠ²Π΅ΡΠΆΠ΄Π΅Π½ΠΈΡ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΡ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
65
|
+
)
|
|
66
|
+
async def ack(self, message_id: str):
|
|
67
|
+
await RNISCheckResultsStreamQueueRepo.ack(self, self.group_name, message_id)
|
|
68
|
+
|
|
69
|
+
@retry_forever(
|
|
70
|
+
start_message="π ΠΠΎΠΈΡΠΊ Π·Π°Π²ΠΈΡΡΠΈΡ
ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
71
|
+
error_message="π« ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ auto-claim ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
72
|
+
)
|
|
73
|
+
async def claim_reassign(
|
|
74
|
+
self,
|
|
75
|
+
min_idle_time: int = 60000,
|
|
76
|
+
count: int = 100,
|
|
77
|
+
) -> list[tuple[str, RNISCheckResultMessage]]:
|
|
78
|
+
return await RNISCheckResultsStreamQueueRepo.claim_reassign(
|
|
79
|
+
self,
|
|
80
|
+
group_name=self.group_name,
|
|
81
|
+
consumer_name=self.consumer_name,
|
|
82
|
+
min_idle_time=min_idle_time,
|
|
83
|
+
count=count,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
@retry_forever(
|
|
87
|
+
start_message="π₯ Π‘ΠΎΠ·Π΄Π°Π½ΠΈΠ΅ Π³ΡΡΠΏΠΏΡ ΠΏΠΎΡΡΠ΅Π±ΠΈΡΠ΅Π»Π΅ΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
88
|
+
error_message="β ΠΡΠΈΠ±ΠΊΠ° ΡΠΎΠ·Π΄Π°Π½ΠΈΡ Π³ΡΡΠΏΠΏΡ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
89
|
+
)
|
|
90
|
+
async def create_consumer_group(self, create_stream: bool = True):
|
|
91
|
+
await RNISCheckResultsStreamQueueRepo.create_consumer_group(
|
|
92
|
+
self,
|
|
93
|
+
group_name=self.group_name,
|
|
94
|
+
create_stream=create_stream,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
@retry_forever(
|
|
98
|
+
start_message="π§Ή ΠΡΠΈΡΡΠΊΠ° ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ ΡΡΠ°ΡΡΠ΅ {retention} Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
99
|
+
error_message="β οΈ ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ ΠΎΡΠΈΡΡΠΊΠ΅ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ Π² ΠΏΠΎΡΠΎΠΊΠ΅ {queue_name}",
|
|
100
|
+
)
|
|
101
|
+
async def trim_by_age(self, retention: timedelta) -> int:
|
|
102
|
+
return await RNISCheckResultsStreamQueueRepo.trim_by_age(self, retention)
|
|
103
|
+
|
|
104
|
+
@retry_forever(
|
|
105
|
+
start_message="ποΈ ΠΠΎΠ»Π½Π°Ρ ΠΎΡΠΈΡΡΠΊΠ° ΠΏΠΎΡΠΎΠΊΠ° {queue_name}",
|
|
106
|
+
error_message="β ΠΡΠΈΠ±ΠΊΠ° ΠΏΡΠΈ ΠΏΠΎΠ»Π½ΠΎΠΉ ΠΎΡΠΈΡΡΠΊΠ΅ ΠΏΠΎΡΠΎΠΊΠ° {queue_name}",
|
|
107
|
+
)
|
|
108
|
+
async def delete_all(self) -> None:
|
|
109
|
+
await RNISCheckResultsStreamQueueRepo.delete_all(self)
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class PolicySeries(str, Enum):
|
|
5
|
+
XXX = "Π₯Π₯Π₯"
|
|
6
|
+
TTT = "Π’Π’Π’"
|
|
7
|
+
AAA = "ΠΠΠ"
|
|
8
|
+
AAV = "ΠΠΠ"
|
|
9
|
+
AAK = "ΠΠΠ"
|
|
10
|
+
AAM = "ΠΠΠ"
|
|
11
|
+
AAN = "ΠΠΠ"
|
|
12
|
+
AAS = "ΠΠΠ‘"
|
|
13
|
+
VVV = "ΠΠΠ"
|
|
14
|
+
EEE = "ΠΠΠ"
|
|
15
|
+
KKK = "ΠΠΠ"
|
|
16
|
+
MMM = "ΠΠΠ"
|
|
17
|
+
NNN = "ΠΠΠ"
|
|
18
|
+
RRR = "Π Π Π "
|
|
19
|
+
SSS = "Π‘Π‘Π‘"
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: tp-shared
|
|
3
|
+
Version: 0.2.27
|
|
4
|
+
Summary: Pydantic cΡ
Π΅ΠΌΡ Π΄Π»Ρ Π²ΡΠ΅Ρ
ΠΏΡΠΎΠ΅ΠΊΡΠΎΠ²
|
|
5
|
+
Requires-Python: >=3.12
|
|
6
|
+
Classifier: Programming Language :: Python :: 3
|
|
7
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
8
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
9
|
+
Requires-Dist: pydantic (>=2.11.7,<3.0.0)
|
|
10
|
+
Requires-Dist: tp-helper (>=0.4.34,<0.5.0)
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
|
|
13
|
+
# π§© tp-shared
|
|
14
|
+
|
|
15
|
+
ΠΠ±ΡΠΈΠΉ ΡΠ΅ΠΏΠΎΠ·ΠΈΡΠΎΡΠΈΠΉ ΡΡ
Π΅ΠΌ Π΄Π»Ρ ΠΈΡΠΏΠΎΠ»ΡΠ·ΠΎΠ²Π°Π½ΠΈΡ Π² Π½Π΅ΡΠΊΠΎΠ»ΡΠΊΠΈΡ
ΠΏΡΠΎΠ΅ΠΊΡΠ°Ρ
.
|
|
16
|
+
|
|
17
|
+
---
|
|
18
|
+
|
|
19
|
+
## Π£ΡΡΠ°Π½ΠΎΠ²ΠΊΠ°:
|
|
20
|
+
`poetry add tp-shared`
|
|
21
|
+
|
|
22
|
+
## ΠΡΠΈΡΡΠΊΠ° ΠΏΡΠΈ ΠΎΠ±Π½ΠΎΠ²Π»Π΅Π½ΠΈΠΈ
|
|
23
|
+
- `poetry cache clear --all PyPI`
|
|
24
|
+
- `poetry add tp-shared`
|
|
25
|
+
- `poetry update`
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
## ΠΡΠ±Π»ΠΈΠΊΠ°ΡΠΈΡ:
|
|
30
|
+
Π‘ΠΎΠ±ΠΈΡΠ°Π΅Ρ ΠΈ Π·Π°Π³ΡΡΠΆΠ°Π΅Ρ ΡΠΎΠ±ΡΠ°Π½Π½ΡΠΉ ΠΏΠ°ΠΊΠ΅Ρ Π² PyPI.
|
|
31
|
+
|
|
32
|
+
`poetry publish --build`
|
|
33
|
+
|
|
34
|
+
## Π‘ΡΡΡΠΊΡΡΡΠ° ΠΏΡΠΎΠ΅ΠΊΡΠ°
|
|
35
|
+
|
|
36
|
+
**messages**
|
|
37
|
+
-------------------------
|
|
38
|
+
Π‘Ρ
Π΅ΠΌΡ ΡΠΎΠΎΠ±ΡΠ΅Π½ΠΈΠΉ ΠΎΡ ΡΠ΅ΡΠ²ΠΈΡΠΎΠ²
|
|
39
|
+
|
|
40
|
+
ΠΡΠΈΠΌΠ΅Ρ ΠΈΠΌΠΏΠΎΡΡΠ°
|
|
41
|
+
|
|
42
|
+
from tp_shared_schemas.messages import GibddDcResultMessage
|
|
43
|
+
|
|
44
|
+
Π ΠΊΠ°ΠΆΠ΄ΠΎΠΉ ΠΏΠ°ΠΏΠΊΠ΅ Π»Π΅ΠΆΠ°Ρ ΡΠΎΠΎΡΠ²Π΅ΡΡΡΠ²ΡΡΡΠΈΠ΅ Pydantic-ΡΡ
Π΅ΠΌΡ, ΡΠ³ΡΡΠΏΠΏΠΈΡΠΎΠ²Π°Π½Π½ΡΠ΅ ΠΏΠΎ ΡΡΠ½ΠΊΡΠΈΠΎΠ½Π°Π»Ρ.
|
|
45
|
+
--------------------------
|
|
46
|
+
---
|
|
47
|
+
|
|
48
|
+
## ΠΠ°ΠΊ ΠΏΠΎΠ΄ΠΊΠ»ΡΡΠΈΡΡ ΡΠ΅ΠΏΠΎΠ·ΠΈΡΠΎΡΠΈΠΉ ΠΊ ΡΡΡΠ΅ΡΡΠ²ΡΡΡΠ΅ΠΌΡ ΠΏΡΠΎΠ΅ΠΊΡΡ
|
|
49
|
+
|
|
50
|
+
ΠΡΠ»ΠΈ Ρ Π²Π°Ρ Π΅ΡΡΡ Π»ΠΎΠΊΠ°Π»ΡΠ½ΡΠΉ ΠΏΡΠΎΠ΅ΠΊΡ ΠΈ Π²Ρ Ρ
ΠΎΡΠΈΡΠ΅ Π΄ΠΎΠ±Π°Π²ΠΈΡΡ ΡΠ΅ΠΏΠΎΠ·ΠΈΡΠΎΡΠΈΠΉ Ρ ΠΎΠ±ΡΠΈΠΌΠΈ ΡΡ
Π΅ΠΌΠ°ΠΌΠΈ, Π²ΡΠΏΠΎΠ»Π½ΠΈΡΠ΅ ΠΊΠΎΠΌΠ°Π½Π΄Ρ:
|
|
51
|
+
Π² ΡΠ°ΠΉΠ»Π΅ pyproject.toml ΠΏΡΠΎΠΏΠΈΡΠ°ΡΡ Π·Π°Π²ΠΈΡΠΈΠΌΠΎΡΡΡ:
|
|
52
|
+
1)
|
|
53
|
+
|
|
54
|
+
```Python
|
|
55
|
+
[tool.poetry.dependencies]
|
|
56
|
+
tp-shared = { git = "https://gitlab.8525.ru/modules/tp-shared.git", rev = "main" }
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
poetry add git
|
|
60
|
+
|
|
61
|
+
```python
|
|
62
|
+
poetry add git+https://gitlab.8525.ru/modules/tp-shared.git
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
2) ΠΡΠΏΠΎΠ»Π½ΠΈΡΡ ΠΊΠΎΠΌΠ°Π½Π΄Ρ poetry install ΠΈΠ»ΠΈ poetry update
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
## Π Π΅ΠΏΠΎΠ·ΠΈΡΠΎΡΠΈΠΉ
|
|
69
|
+
```
|
|
70
|
+
cd existing_repo
|
|
71
|
+
git remote add origin https://gitlab.8525.ru/modules/tp-shared.git
|
|
72
|
+
git branch -M main
|
|
73
|
+
git push -uf origin main
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
tp_shared/autoins_mpg_service/repos/autoins_results_ack_list_queue_repo.py,sha256=fDOF7DiaXKUvJDqih3Bix-MR-Fh7gWT3KfYBeSpE4qA,503
|
|
2
|
+
tp_shared/autoins_mpg_service/schemas/autoins_result_message.py,sha256=bc1fK_S6m22OMXFbE_7cYWCTuK_GgAKNEfLEzXVAOws,946
|
|
3
|
+
tp_shared/autoins_mpg_service/worker_services/base_autoins_results_ack_list_queue_worker_service.py,sha256=aauusMNRwetPxZaSYbxFUCQXs9wT1SDxByzCnZj40VI,1400
|
|
4
|
+
tp_shared/base/base_message.py,sha256=VdTP5beDfsNT5uLuR4WIs9S1szUIcJwARxZ65--qZEY,96
|
|
5
|
+
tp_shared/gibdd_service/repo/gibdd_dc_results_stream_queue_repo.py,sha256=tKAqHyRsb5N5CLBfvoq3PeEjT5xdnTVkj9KcWz1lz0o,538
|
|
6
|
+
tp_shared/gibdd_service/schemas/gibdd_dc_result_message.py,sha256=-7k7mxykw5JpxWwnsUjjYZfkyc2hOr1feB4dBeaR7t8,898
|
|
7
|
+
tp_shared/gibdd_service/worker_services/base_gibdd_dc_results_stream_queue_worker_service.py,sha256=PDIa8QrmbPnwVz4fQ1lKhNuolZBwjcMqjx6r1OABnGs,5001
|
|
8
|
+
tp_shared/mos_passes_service/repo/mos_passes_results_stream_queue_repo.py,sha256=AW-e8J8_oYmzjPkEt0myZ2-iGwiSVHLyt-dEujolZpw,553
|
|
9
|
+
tp_shared/mos_passes_service/schemas/mos_passes_result_message.py,sha256=WR9G7Xy268nPr2vlb-SgTZEEShsTAwO4r3VbY2y3GMw,645
|
|
10
|
+
tp_shared/mos_passes_service/worker_services/base_mos_passes_results_stream_queue_worker_service.py,sha256=9xPViK59pB077enyQ5GilIZZyhvoxTxeCb7CnrVdunc,4463
|
|
11
|
+
tp_shared/nsis_service/repos/nsis_results_ack_list_queue_repo.py,sha256=EmM24OEQny2U4ukuON_LFDkDLiU37C1sv3BhWIgC5uA,440
|
|
12
|
+
tp_shared/nsis_service/schemas/nsis_result_message.py,sha256=XU7lYU2AdpnYGlPFNpkyNscRI-lKlCYnl6drhfRY4SU,777
|
|
13
|
+
tp_shared/nsis_service/types/nsis_task_type.py,sha256=eNcqzojojnpo2esBjbAU8d_SSh0kq92Je7J_T7e49sc,102
|
|
14
|
+
tp_shared/nsis_service/worker_services/base_nsis_results_queue_worker_service.py,sha256=ZF_3y96cG2ufLXKIjd6g_N0n_hvJSBS5GkEHRnaqYqU,1410
|
|
15
|
+
tp_shared/policies_service/repos/policies_event_stream_queue_repo.py,sha256=igfDBdLc2P-ER-Hqdl3k-GgrLqkmDGKftqnA6gJKGy8,549
|
|
16
|
+
tp_shared/policies_service/schemas/policies_result_message.py,sha256=UiH3e6lyAvcuAoS_tR4kRgzMcS4AmDwsverUvAwFWx0,689
|
|
17
|
+
tp_shared/policies_service/worker_services/base_policies_results_queue_worker_service.py,sha256=LemQbwNzG20Ivh_OrixAIm_MGNeuAQEhlxnjQmHOYdg,4500
|
|
18
|
+
tp_shared/rnis_check_service/repos/rnis_check_results_stream_queue_repo.py,sha256=JNx7DMz0YAT19GWjvlVWBFIuhk179nVbjPNZa6tRpEE,553
|
|
19
|
+
tp_shared/rnis_check_service/schemas/rnis_check_result_message.py,sha256=AjPwwRUuPqHWm-wEHtV_68dw7YaLQeEViq60MQkeFEM,220
|
|
20
|
+
tp_shared/rnis_check_service/worker_services/base_rnis_results_queue_worker_service.py,sha256=Vidx0LEUDHic86E6bqOeN5jzo9WZF74oh1kyl7VLuSg,4534
|
|
21
|
+
tp_shared/types/dc_operator_status.py,sha256=JJ8ke9pCKLwIyES7C2WjYGybZwTbgG20IYe6NTxS5Cc,129
|
|
22
|
+
tp_shared/types/pass_allowed_zone.py,sha256=WRLZienfaFKssDag2ClFLoVOvFZONYtqlUlPqBZYr8A,139
|
|
23
|
+
tp_shared/types/pass_series.py,sha256=fpiDGUFSuHDYVARUXUWgoJeYnIWDtxVImyOHZotQYyY,229
|
|
24
|
+
tp_shared/types/pass_time_of_date.py,sha256=rjeYh8h_6ncXRu2C8kfFaaZNY9AJWpubLABpzSqg_8c,99
|
|
25
|
+
tp_shared/types/policy_series.py,sha256=YxIDrwYanaZtCLkrGMcwXY7iqh74W9jPFMkkPTtt3yM,359
|
|
26
|
+
tp_shared/types/policy_status.py,sha256=U3x4FY52-_RDZYkEp2FGAtxp0XEbOQvcy8OPACpJ2lo,154
|
|
27
|
+
tp_shared-0.2.27.dist-info/METADATA,sha256=cNWZ5Rq9sntHpU5hlU8gF9uqprDT0zEWlPLmU29lXQ8,2086
|
|
28
|
+
tp_shared-0.2.27.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
|
29
|
+
tp_shared-0.2.27.dist-info/RECORD,,
|