tp-shared 0.2.27__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tp_shared-0.2.27/PKG-INFO +76 -0
- tp_shared-0.2.27/README.md +63 -0
- tp_shared-0.2.27/pyproject.toml +42 -0
- tp_shared-0.2.27/src/tp_shared/autoins_mpg_service/repos/autoins_results_ack_list_queue_repo.py +14 -0
- tp_shared-0.2.27/src/tp_shared/autoins_mpg_service/schemas/autoins_result_message.py +34 -0
- tp_shared-0.2.27/src/tp_shared/autoins_mpg_service/worker_services/base_autoins_results_ack_list_queue_worker_service.py +34 -0
- tp_shared-0.2.27/src/tp_shared/base/base_message.py +5 -0
- tp_shared-0.2.27/src/tp_shared/gibdd_service/repo/gibdd_dc_results_stream_queue_repo.py +17 -0
- tp_shared-0.2.27/src/tp_shared/gibdd_service/schemas/gibdd_dc_result_message.py +39 -0
- tp_shared-0.2.27/src/tp_shared/gibdd_service/worker_services/base_gibdd_dc_results_stream_queue_worker_service.py +122 -0
- tp_shared-0.2.27/src/tp_shared/mos_passes_service/repo/mos_passes_results_stream_queue_repo.py +17 -0
- tp_shared-0.2.27/src/tp_shared/mos_passes_service/schemas/mos_passes_result_message.py +23 -0
- tp_shared-0.2.27/src/tp_shared/mos_passes_service/worker_services/base_mos_passes_results_stream_queue_worker_service.py +109 -0
- tp_shared-0.2.27/src/tp_shared/nsis_service/repos/nsis_results_ack_list_queue_repo.py +13 -0
- tp_shared-0.2.27/src/tp_shared/nsis_service/schemas/nsis_result_message.py +29 -0
- tp_shared-0.2.27/src/tp_shared/nsis_service/types/nsis_task_type.py +6 -0
- tp_shared-0.2.27/src/tp_shared/nsis_service/worker_services/base_nsis_results_queue_worker_service.py +32 -0
- tp_shared-0.2.27/src/tp_shared/policies_service/repos/policies_event_stream_queue_repo.py +17 -0
- tp_shared-0.2.27/src/tp_shared/policies_service/schemas/policies_result_message.py +28 -0
- tp_shared-0.2.27/src/tp_shared/policies_service/worker_services/base_policies_results_queue_worker_service.py +109 -0
- tp_shared-0.2.27/src/tp_shared/rnis_check_service/repos/rnis_check_results_stream_queue_repo.py +17 -0
- tp_shared-0.2.27/src/tp_shared/rnis_check_service/schemas/rnis_check_result_message.py +9 -0
- tp_shared-0.2.27/src/tp_shared/rnis_check_service/worker_services/base_rnis_results_queue_worker_service.py +109 -0
- tp_shared-0.2.27/src/tp_shared/types/dc_operator_status.py +7 -0
- tp_shared-0.2.27/src/tp_shared/types/pass_allowed_zone.py +8 -0
- tp_shared-0.2.27/src/tp_shared/types/pass_series.py +14 -0
- tp_shared-0.2.27/src/tp_shared/types/pass_time_of_date.py +6 -0
- tp_shared-0.2.27/src/tp_shared/types/policy_series.py +19 -0
- tp_shared-0.2.27/src/tp_shared/types/policy_status.py +7 -0
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: tp-shared
|
|
3
|
+
Version: 0.2.27
|
|
4
|
+
Summary: Pydantic cхемы для всех проектов
|
|
5
|
+
Requires-Python: >=3.12
|
|
6
|
+
Classifier: Programming Language :: Python :: 3
|
|
7
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
8
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
9
|
+
Requires-Dist: pydantic (>=2.11.7,<3.0.0)
|
|
10
|
+
Requires-Dist: tp-helper (>=0.4.34,<0.5.0)
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
|
|
13
|
+
# 🧩 tp-shared
|
|
14
|
+
|
|
15
|
+
Общий репозиторий схем для использования в нескольких проектах.
|
|
16
|
+
|
|
17
|
+
---
|
|
18
|
+
|
|
19
|
+
## Установка:
|
|
20
|
+
`poetry add tp-shared`
|
|
21
|
+
|
|
22
|
+
## Очистка при обновлении
|
|
23
|
+
- `poetry cache clear --all PyPI`
|
|
24
|
+
- `poetry add tp-shared`
|
|
25
|
+
- `poetry update`
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
## Публикация:
|
|
30
|
+
Собирает и загружает собранный пакет в PyPI.
|
|
31
|
+
|
|
32
|
+
`poetry publish --build`
|
|
33
|
+
|
|
34
|
+
## Структура проекта
|
|
35
|
+
|
|
36
|
+
**messages**
|
|
37
|
+
-------------------------
|
|
38
|
+
Схемы сообщений от сервисов
|
|
39
|
+
|
|
40
|
+
Пример импорта
|
|
41
|
+
|
|
42
|
+
from tp_shared_schemas.messages import GibddDcResultMessage
|
|
43
|
+
|
|
44
|
+
В каждой папке лежат соответствующие Pydantic-схемы, сгруппированные по функционалу.
|
|
45
|
+
--------------------------
|
|
46
|
+
---
|
|
47
|
+
|
|
48
|
+
## Как подключить репозиторий к существующему проекту
|
|
49
|
+
|
|
50
|
+
Если у вас есть локальный проект и вы хотите добавить репозиторий с общими схемами, выполните команды:
|
|
51
|
+
в файле pyproject.toml прописать зависимость:
|
|
52
|
+
1)
|
|
53
|
+
|
|
54
|
+
```Python
|
|
55
|
+
[tool.poetry.dependencies]
|
|
56
|
+
tp-shared = { git = "https://gitlab.8525.ru/modules/tp-shared.git", rev = "main" }
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
poetry add git
|
|
60
|
+
|
|
61
|
+
```python
|
|
62
|
+
poetry add git+https://gitlab.8525.ru/modules/tp-shared.git
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
2) Выполнить команду poetry install или poetry update
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
## Репозиторий
|
|
69
|
+
```
|
|
70
|
+
cd existing_repo
|
|
71
|
+
git remote add origin https://gitlab.8525.ru/modules/tp-shared.git
|
|
72
|
+
git branch -M main
|
|
73
|
+
git push -uf origin main
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# 🧩 tp-shared
|
|
2
|
+
|
|
3
|
+
Общий репозиторий схем для использования в нескольких проектах.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## Установка:
|
|
8
|
+
`poetry add tp-shared`
|
|
9
|
+
|
|
10
|
+
## Очистка при обновлении
|
|
11
|
+
- `poetry cache clear --all PyPI`
|
|
12
|
+
- `poetry add tp-shared`
|
|
13
|
+
- `poetry update`
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
## Публикация:
|
|
18
|
+
Собирает и загружает собранный пакет в PyPI.
|
|
19
|
+
|
|
20
|
+
`poetry publish --build`
|
|
21
|
+
|
|
22
|
+
## Структура проекта
|
|
23
|
+
|
|
24
|
+
**messages**
|
|
25
|
+
-------------------------
|
|
26
|
+
Схемы сообщений от сервисов
|
|
27
|
+
|
|
28
|
+
Пример импорта
|
|
29
|
+
|
|
30
|
+
from tp_shared_schemas.messages import GibddDcResultMessage
|
|
31
|
+
|
|
32
|
+
В каждой папке лежат соответствующие Pydantic-схемы, сгруппированные по функционалу.
|
|
33
|
+
--------------------------
|
|
34
|
+
---
|
|
35
|
+
|
|
36
|
+
## Как подключить репозиторий к существующему проекту
|
|
37
|
+
|
|
38
|
+
Если у вас есть локальный проект и вы хотите добавить репозиторий с общими схемами, выполните команды:
|
|
39
|
+
в файле pyproject.toml прописать зависимость:
|
|
40
|
+
1)
|
|
41
|
+
|
|
42
|
+
```Python
|
|
43
|
+
[tool.poetry.dependencies]
|
|
44
|
+
tp-shared = { git = "https://gitlab.8525.ru/modules/tp-shared.git", rev = "main" }
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
poetry add git
|
|
48
|
+
|
|
49
|
+
```python
|
|
50
|
+
poetry add git+https://gitlab.8525.ru/modules/tp-shared.git
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
2) Выполнить команду poetry install или poetry update
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
## Репозиторий
|
|
57
|
+
```
|
|
58
|
+
cd existing_repo
|
|
59
|
+
git remote add origin https://gitlab.8525.ru/modules/tp-shared.git
|
|
60
|
+
git branch -M main
|
|
61
|
+
git push -uf origin main
|
|
62
|
+
```
|
|
63
|
+
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
[tool.poetry]
|
|
2
|
+
name = "tp-shared"
|
|
3
|
+
version = "0.2.27"
|
|
4
|
+
description = "Pydantic cхемы для всех проектов"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
packages = [{ include = "tp_shared", from="src" }]
|
|
7
|
+
|
|
8
|
+
[tool.poetry.dependencies]
|
|
9
|
+
python = ">=3.12"
|
|
10
|
+
pydantic = "^2.11.7"
|
|
11
|
+
tp-helper = ">=0.4.34,<0.5.0"
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
[tool.poetry.group.dev.dependencies]
|
|
15
|
+
ruff = "^0.12.3"
|
|
16
|
+
pre-commit = "^4.2.0"
|
|
17
|
+
pylint = "^3.3.7"
|
|
18
|
+
|
|
19
|
+
[tool.ruff]
|
|
20
|
+
line-length = 88
|
|
21
|
+
target-version = "py313" # или "py312" для Python 3.12
|
|
22
|
+
fix = true
|
|
23
|
+
|
|
24
|
+
[tool.ruff.lint]
|
|
25
|
+
# Основные категории правил:
|
|
26
|
+
select = [
|
|
27
|
+
"E", # pycodestyle (стиль)
|
|
28
|
+
"F", # pyflakes (ошибки исполнения)
|
|
29
|
+
"I", # isort (сортировка импортов)
|
|
30
|
+
"UP", # pyupgrade (обновление синтаксиса)
|
|
31
|
+
"B", # bugbear (потенциальные баги)
|
|
32
|
+
"A", # flake8-builtins (конфликты с встроенными именами)
|
|
33
|
+
"C4", # flake8-comprehensions
|
|
34
|
+
"SIM", # flake8-simplify
|
|
35
|
+
]
|
|
36
|
+
|
|
37
|
+
# Исключить специфичные или часто неактуальные предупреждения
|
|
38
|
+
ignore = [
|
|
39
|
+
"B008", # Depends(...) в аргументах FastAPI — безопасно и распространённо
|
|
40
|
+
"E501", # Длина строки — игнорируем, т.к. используем black с line-length = 88
|
|
41
|
+
"SIM117", # Вложенные async with — читаемость в некоторых случаях важнее
|
|
42
|
+
]
|
tp_shared-0.2.27/src/tp_shared/autoins_mpg_service/repos/autoins_results_ack_list_queue_repo.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from redis.asyncio import Redis
|
|
2
|
+
from tp_helper.base_queues.base_ack_list_queue_repo import BaseAckListQueueRepo
|
|
3
|
+
|
|
4
|
+
# from src.config import config
|
|
5
|
+
from tp_shared.autoins_mpg_service.schemas.autoins_result_message import (
|
|
6
|
+
AutoinsResultMessage,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AutoinsResultsAckListQueueRepo(BaseAckListQueueRepo):
|
|
11
|
+
QUEUE_NAME = "autoins:service:results:ack:list"
|
|
12
|
+
|
|
13
|
+
def __init__(self, redis_client: Redis):
|
|
14
|
+
super().__init__(redis_client=redis_client, schema=AutoinsResultMessage)
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from datetime import date
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
from tp_shared.types.policy_series import PolicySeries
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class AutoinsResultPolicy(BaseModel):
|
|
9
|
+
insurer_name: str
|
|
10
|
+
reg_number: str
|
|
11
|
+
series: PolicySeries
|
|
12
|
+
number: str
|
|
13
|
+
start_date: date
|
|
14
|
+
end_date: date
|
|
15
|
+
period1_start: date | None = None
|
|
16
|
+
period1_end: date | None = None
|
|
17
|
+
period2_start: date | None = None
|
|
18
|
+
period2_end: date | None = None
|
|
19
|
+
period3_start: date | None = None
|
|
20
|
+
period3_end: date | None = None
|
|
21
|
+
vin: str | None = None
|
|
22
|
+
body_number: str | None = None
|
|
23
|
+
chassis_number: str | None = None
|
|
24
|
+
car_mark: str | None = None
|
|
25
|
+
car_model: str | None = None
|
|
26
|
+
external_policy_id: int | None = None
|
|
27
|
+
policy_state: str | None = None
|
|
28
|
+
policy_status_t_use: str | None = None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class AutoinsResultMessage(BaseModel):
|
|
32
|
+
series: PolicySeries
|
|
33
|
+
number: str
|
|
34
|
+
policies: list[AutoinsResultPolicy] = []
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from logging import Logger
|
|
2
|
+
|
|
3
|
+
from redis.asyncio import Redis
|
|
4
|
+
from tp_helper.base_items.base_worker_service import BaseWorkerService
|
|
5
|
+
from tp_helper.decorators.decorator_retry_forever import retry_forever
|
|
6
|
+
|
|
7
|
+
from tp_shared.autoins_mpg_service.repos.autoins_results_ack_list_queue_repo import (
|
|
8
|
+
AutoinsResultsAckListQueueRepo,
|
|
9
|
+
)
|
|
10
|
+
from tp_shared.autoins_mpg_service.schemas.autoins_result_message import (
|
|
11
|
+
AutoinsResultMessage,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class BaseAutoinsResultsAckListQueueWorkerService(
|
|
16
|
+
BaseWorkerService, AutoinsResultsAckListQueueRepo
|
|
17
|
+
):
|
|
18
|
+
def __init__(self, redis_client: Redis, logger: Logger):
|
|
19
|
+
BaseWorkerService.__init__(self, logger=logger, redis_client=redis_client)
|
|
20
|
+
AutoinsResultsAckListQueueRepo.__init__(self, redis_client=redis_client)
|
|
21
|
+
|
|
22
|
+
@retry_forever(
|
|
23
|
+
start_message="📥 Чтение задач из очереди {queue_name}",
|
|
24
|
+
error_message="❌ Ошибка при чтении из очереди {queue_name}",
|
|
25
|
+
)
|
|
26
|
+
async def pop(self) -> AutoinsResultMessage | None:
|
|
27
|
+
return await AutoinsResultsAckListQueueRepo.pop(self)
|
|
28
|
+
|
|
29
|
+
@retry_forever(
|
|
30
|
+
start_message="🗑️ ack в очередь {queue_name}",
|
|
31
|
+
error_message="❌ Ошибка при ack в очередь {queue_name}",
|
|
32
|
+
)
|
|
33
|
+
async def ack(self) -> None:
|
|
34
|
+
return await AutoinsResultsAckListQueueRepo.ack(self)
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from redis.asyncio import Redis
|
|
2
|
+
from tp_helper.base_queues.base_stream_queue_repo import BaseStreamQueueRepo
|
|
3
|
+
|
|
4
|
+
from tp_shared.gibdd_service.schemas.gibdd_dc_result_message import (
|
|
5
|
+
GibddDcResultMessage,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class GibddDcResultsStreamQueueRepo(BaseStreamQueueRepo):
|
|
10
|
+
QUEUE_NAME = "gibdd:service:dc:results:stream"
|
|
11
|
+
|
|
12
|
+
def __init__(self, redis_client: Redis):
|
|
13
|
+
super().__init__(
|
|
14
|
+
redis_client=redis_client,
|
|
15
|
+
schema=GibddDcResultMessage,
|
|
16
|
+
queue_name=self.QUEUE_NAME,
|
|
17
|
+
)
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
from datetime import date
|
|
2
|
+
|
|
3
|
+
from pydantic import ConfigDict
|
|
4
|
+
|
|
5
|
+
from tp_shared.base.base_message import BaseMessage
|
|
6
|
+
from tp_shared.types.dc_operator_status import DcOperatorStatus
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class GibddDcResultOperator(BaseMessage):
|
|
10
|
+
operator_id: int
|
|
11
|
+
status: DcOperatorStatus
|
|
12
|
+
name: str
|
|
13
|
+
address_line: str
|
|
14
|
+
phone_number: str
|
|
15
|
+
email: str
|
|
16
|
+
site: str
|
|
17
|
+
canceled_date: date | None
|
|
18
|
+
canceled_at: int | None
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class GibddDcResultCard(BaseMessage):
|
|
22
|
+
card_number: str
|
|
23
|
+
vin: str
|
|
24
|
+
start_date: date
|
|
25
|
+
end_date: date
|
|
26
|
+
odometer_value: int
|
|
27
|
+
is_active: bool
|
|
28
|
+
updated_at: int
|
|
29
|
+
created_at: int
|
|
30
|
+
|
|
31
|
+
operator: GibddDcResultOperator
|
|
32
|
+
|
|
33
|
+
model_config = ConfigDict(from_attributes=True, populate_by_name=True)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class GibddDcResultMessage(BaseMessage):
|
|
37
|
+
version: str = "1.0"
|
|
38
|
+
vin: str
|
|
39
|
+
diagnostic_cards: list[GibddDcResultCard] = []
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
from datetime import timedelta
|
|
2
|
+
from logging import Logger
|
|
3
|
+
|
|
4
|
+
from redis.asyncio import Redis
|
|
5
|
+
from tp_helper.base_items.base_worker_service import BaseWorkerService
|
|
6
|
+
from tp_helper.decorators.decorator_retry_forever import retry_forever
|
|
7
|
+
|
|
8
|
+
from tp_shared.gibdd_service.repo.gibdd_dc_results_stream_queue_repo import (
|
|
9
|
+
GibddDcResultStreamQueueRepo,
|
|
10
|
+
)
|
|
11
|
+
from tp_shared.gibdd_service.schemas.gibdd_dc_result_message import (
|
|
12
|
+
GibddDcResultMessage,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class BaseGibddDcResultsStreamQueueWorkerService(
|
|
17
|
+
GibddDcResultStreamQueueRepo, BaseWorkerService
|
|
18
|
+
):
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
redis_client: Redis,
|
|
22
|
+
logger: Logger,
|
|
23
|
+
group_name: str,
|
|
24
|
+
consumer_name: str,
|
|
25
|
+
):
|
|
26
|
+
BaseWorkerService.__init__(self, redis_client=redis_client, logger=logger)
|
|
27
|
+
GibddDcResultStreamQueueRepo.__init__(self, redis_client=redis_client)
|
|
28
|
+
|
|
29
|
+
self.group_name = group_name
|
|
30
|
+
self.consumer_name = consumer_name
|
|
31
|
+
|
|
32
|
+
@retry_forever(
|
|
33
|
+
start_message="Добавление сообщения из очередь {queue_name}",
|
|
34
|
+
error_message="Ошибка при добавление сообщения в очередь {queue_name}",
|
|
35
|
+
)
|
|
36
|
+
async def add(self, message: GibddDcResultMessage) -> None:
|
|
37
|
+
await GibddDcResultStreamQueueRepo.add(self, message)
|
|
38
|
+
|
|
39
|
+
@retry_forever(
|
|
40
|
+
start_message="Получение сообщений из очереди {queue_name}",
|
|
41
|
+
error_message="Ошибка получения сообщений из очереди {queue_name}",
|
|
42
|
+
)
|
|
43
|
+
async def pop(
|
|
44
|
+
self,
|
|
45
|
+
stream_id: str = ">",
|
|
46
|
+
block: int = 0,
|
|
47
|
+
count: int = 100,
|
|
48
|
+
prioritize_claimed: bool = True,
|
|
49
|
+
min_idle_time: int = 60000,
|
|
50
|
+
) -> list[tuple[str, GibddDcResultMessage]] | None:
|
|
51
|
+
return await GibddDcResultStreamQueueRepo.pop(
|
|
52
|
+
self,
|
|
53
|
+
group_name=self.group_name,
|
|
54
|
+
consumer_name=self.consumer_name,
|
|
55
|
+
stream_id=stream_id,
|
|
56
|
+
block=block,
|
|
57
|
+
count=count,
|
|
58
|
+
prioritize_claimed=prioritize_claimed,
|
|
59
|
+
min_idle_time=min_idle_time,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
@retry_forever(
|
|
63
|
+
start_message="Подтверждение сообщения в потоке {queue_name}",
|
|
64
|
+
error_message="Ошибка подтверждения сообщения в потоке {queue_name}",
|
|
65
|
+
)
|
|
66
|
+
async def ack(self, message_id: str):
|
|
67
|
+
await GibddDcResultStreamQueueRepo.ack(self, self.group_name, message_id)
|
|
68
|
+
|
|
69
|
+
@retry_forever(
|
|
70
|
+
start_message="Поиск зависших сообщений в потоке {queue_name}",
|
|
71
|
+
error_message="Ошибка при auto-claim сообщений в потоке {queue_name}",
|
|
72
|
+
)
|
|
73
|
+
async def claim_reassign(
|
|
74
|
+
self,
|
|
75
|
+
min_idle_time: int = 60000,
|
|
76
|
+
count: int = 100,
|
|
77
|
+
) -> list[tuple[str, GibddDcResultMessage]]:
|
|
78
|
+
return await GibddDcResultStreamQueueRepo.claim_reassign(
|
|
79
|
+
self,
|
|
80
|
+
group_name=self.group_name,
|
|
81
|
+
consumer_name=self.consumer_name,
|
|
82
|
+
min_idle_time=min_idle_time,
|
|
83
|
+
count=count,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
@retry_forever(
|
|
87
|
+
start_message="Создание группы потребителей в потоке {queue_name}",
|
|
88
|
+
error_message="Ошибка создания группы в потоке {queue_name}",
|
|
89
|
+
)
|
|
90
|
+
async def create_consumer_group(self, create_stream: bool = True):
|
|
91
|
+
try:
|
|
92
|
+
await GibddDcResultStreamQueueRepo.create_consumer_group(
|
|
93
|
+
self,
|
|
94
|
+
group_name=self.group_name,
|
|
95
|
+
create_stream=create_stream,
|
|
96
|
+
stream_id="0",
|
|
97
|
+
)
|
|
98
|
+
except Exception as e:
|
|
99
|
+
print(e)
|
|
100
|
+
|
|
101
|
+
@retry_forever(
|
|
102
|
+
start_message="Очистка сообщений старше {retention} в потоке {queue_name}",
|
|
103
|
+
error_message="Ошибка при очистке сообщений в потоке {queue_name}",
|
|
104
|
+
)
|
|
105
|
+
async def trim_by_age(self, retention: timedelta) -> int:
|
|
106
|
+
"""
|
|
107
|
+
Удаляет сообщения старше указанного периода (retention) через XTRIM MINID.
|
|
108
|
+
|
|
109
|
+
:param retention: Максимальный "возраст" сообщений, например timedelta(days=1)
|
|
110
|
+
:return: Кол-во удалённых сообщений
|
|
111
|
+
"""
|
|
112
|
+
return await GibddDcResultStreamQueueRepo.trim_by_age(self, retention)
|
|
113
|
+
|
|
114
|
+
@retry_forever(
|
|
115
|
+
start_message="Полная очистка потока {queue_name}",
|
|
116
|
+
error_message="Ошибка при полной очистке потока {queue_name}",
|
|
117
|
+
)
|
|
118
|
+
async def delete_all(self) -> None:
|
|
119
|
+
"""
|
|
120
|
+
Удаляет все consumer group и сам поток (полная очистка).
|
|
121
|
+
"""
|
|
122
|
+
await GibddDcResultStreamQueueRepo.delete_all(self)
|
tp_shared-0.2.27/src/tp_shared/mos_passes_service/repo/mos_passes_results_stream_queue_repo.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from redis.asyncio import Redis
|
|
2
|
+
from tp_helper.base_queues.base_stream_queue_repo import BaseStreamQueueRepo
|
|
3
|
+
|
|
4
|
+
from tp_shared.mos_passes_service.schemas.mos_passes_result_message import (
|
|
5
|
+
MosPassesResultMessage,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class MosPassesResultsStreamQueueRepo(BaseStreamQueueRepo):
|
|
10
|
+
QUEUE_NAME = "mos:passes:service:results:stream"
|
|
11
|
+
|
|
12
|
+
def __init__(self, redis_client: Redis):
|
|
13
|
+
super().__init__(
|
|
14
|
+
redis_client=redis_client,
|
|
15
|
+
schema=MosPassesResultMessage,
|
|
16
|
+
queue_name=self.QUEUE_NAME,
|
|
17
|
+
)
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from datetime import date
|
|
2
|
+
|
|
3
|
+
from tp_shared.base.base_message import BaseMessage
|
|
4
|
+
from tp_shared.types.pass_allowed_zone import PassAllowedZone
|
|
5
|
+
from tp_shared.types.pass_series import PassSeries
|
|
6
|
+
from tp_shared.types.pass_time_of_date import PassTimeOfDate
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class MosPassesResultPass(BaseMessage):
|
|
10
|
+
reg_number: str
|
|
11
|
+
time_of_day: PassTimeOfDate
|
|
12
|
+
series: PassSeries
|
|
13
|
+
number: str
|
|
14
|
+
allowed_zone: PassAllowedZone
|
|
15
|
+
start_date: date
|
|
16
|
+
end_date: date
|
|
17
|
+
cancel_date: date | None
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class MosPassesResultMessage(BaseMessage):
|
|
21
|
+
version: str = "1.0"
|
|
22
|
+
reg_number: str
|
|
23
|
+
passes: list[MosPassesResultPass] = []
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
from datetime import timedelta
|
|
2
|
+
from logging import Logger
|
|
3
|
+
|
|
4
|
+
from redis.asyncio import Redis
|
|
5
|
+
from tp_helper.base_items.base_worker_service import BaseWorkerService
|
|
6
|
+
from tp_helper.decorators.decorator_retry_forever import retry_forever
|
|
7
|
+
|
|
8
|
+
from tp_shared.mos_passes_service.repo.mos_passes_results_stream_queue_repo import (
|
|
9
|
+
MosPassesResultsStreamQueueRepo,
|
|
10
|
+
)
|
|
11
|
+
from tp_shared.mos_passes_service.schemas.mos_passes_result_message import (
|
|
12
|
+
MosPassesResultMessage,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class BaseMosPassesResultsStreamQueueWorkerService(
|
|
17
|
+
MosPassesResultsStreamQueueRepo, BaseWorkerService
|
|
18
|
+
):
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
redis_client: Redis,
|
|
22
|
+
logger: Logger,
|
|
23
|
+
group_name: str,
|
|
24
|
+
consumer_name: str,
|
|
25
|
+
):
|
|
26
|
+
BaseWorkerService.__init__(self, redis_client=redis_client, logger=logger)
|
|
27
|
+
MosPassesResultsStreamQueueRepo.__init__(self, redis_client=redis_client)
|
|
28
|
+
|
|
29
|
+
self.group_name = group_name
|
|
30
|
+
self.consumer_name = consumer_name
|
|
31
|
+
|
|
32
|
+
@retry_forever(
|
|
33
|
+
start_message="Добавление сообщения из очередь {queue_name}",
|
|
34
|
+
error_message="Ошибка при добавление сообщения в очередь {queue_name}",
|
|
35
|
+
)
|
|
36
|
+
async def add(self, message: MosPassesResultMessage) -> None:
|
|
37
|
+
await MosPassesResultsStreamQueueRepo.add(self, message)
|
|
38
|
+
|
|
39
|
+
@retry_forever(
|
|
40
|
+
start_message="Получение сообщений из очереди {queue_name}",
|
|
41
|
+
error_message="Ошибка получения сообщений из очереди {queue_name}",
|
|
42
|
+
)
|
|
43
|
+
async def pop(
|
|
44
|
+
self,
|
|
45
|
+
stream_id: str = ">",
|
|
46
|
+
block: int = 0,
|
|
47
|
+
count: int = 100,
|
|
48
|
+
prioritize_claimed: bool = True,
|
|
49
|
+
min_idle_time: int = 60000,
|
|
50
|
+
) -> list[tuple[str, MosPassesResultMessage]] | None:
|
|
51
|
+
return await MosPassesResultsStreamQueueRepo.pop(
|
|
52
|
+
self,
|
|
53
|
+
group_name=self.group_name,
|
|
54
|
+
consumer_name=self.consumer_name,
|
|
55
|
+
stream_id=stream_id,
|
|
56
|
+
block=block,
|
|
57
|
+
count=count,
|
|
58
|
+
prioritize_claimed=prioritize_claimed,
|
|
59
|
+
min_idle_time=min_idle_time,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
@retry_forever(
|
|
63
|
+
start_message="Подтверждение сообщения в потоке {queue_name}",
|
|
64
|
+
error_message="Ошибка подтверждения сообщения в потоке {queue_name}",
|
|
65
|
+
)
|
|
66
|
+
async def ack(self, message_id: str):
|
|
67
|
+
await MosPassesResultsStreamQueueRepo.ack(self, self.group_name, message_id)
|
|
68
|
+
|
|
69
|
+
@retry_forever(
|
|
70
|
+
start_message="Поиск зависших сообщений в потоке {queue_name}",
|
|
71
|
+
error_message="Ошибка при auto-claim сообщений в потоке {queue_name}",
|
|
72
|
+
)
|
|
73
|
+
async def claim_reassign(
|
|
74
|
+
self,
|
|
75
|
+
min_idle_time: int = 60000,
|
|
76
|
+
count: int = 100,
|
|
77
|
+
) -> list[tuple[str, MosPassesResultMessage]]:
|
|
78
|
+
return await MosPassesResultsStreamQueueRepo.claim_reassign(
|
|
79
|
+
self,
|
|
80
|
+
group_name=self.group_name,
|
|
81
|
+
consumer_name=self.consumer_name,
|
|
82
|
+
min_idle_time=min_idle_time,
|
|
83
|
+
count=count,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
@retry_forever(
|
|
87
|
+
start_message="Создание группы потребителей в потоке {queue_name}",
|
|
88
|
+
error_message="Ошибка создания группы в потоке {queue_name}",
|
|
89
|
+
)
|
|
90
|
+
async def create_consumer_group(self, create_stream: bool = True):
|
|
91
|
+
await MosPassesResultsStreamQueueRepo.create_consumer_group(
|
|
92
|
+
self,
|
|
93
|
+
group_name=self.group_name,
|
|
94
|
+
create_stream=create_stream,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
@retry_forever(
|
|
98
|
+
start_message="Очистка сообщений старше {retention} в потоке {queue_name}",
|
|
99
|
+
error_message="Ошибка при очистке сообщений в потоке {queue_name}",
|
|
100
|
+
)
|
|
101
|
+
async def trim_by_age(self, retention: timedelta) -> int:
|
|
102
|
+
return await MosPassesResultsStreamQueueRepo.trim_by_age(self, retention)
|
|
103
|
+
|
|
104
|
+
@retry_forever(
|
|
105
|
+
start_message="Полная очистка потока {queue_name}",
|
|
106
|
+
error_message="Ошибка при полной очистке потока {queue_name}",
|
|
107
|
+
)
|
|
108
|
+
async def delete_all(self) -> None:
|
|
109
|
+
await MosPassesResultsStreamQueueRepo.delete_all(self)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from redis.asyncio import Redis
|
|
2
|
+
from tp_helper.base_queues.base_ack_list_queue_repo import BaseAckListQueueRepo
|
|
3
|
+
|
|
4
|
+
from tp_shared.nsis_service.schemas.nsis_result_message import (
|
|
5
|
+
NsisResultMessage,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class NsisResultsAckListQueueRepo(BaseAckListQueueRepo):
|
|
10
|
+
QUEUE_NAME = "nsis:service:nsis:results:ack:list"
|
|
11
|
+
|
|
12
|
+
def __init__(self, redis_client: Redis):
|
|
13
|
+
super().__init__(redis_client, schema=NsisResultMessage)
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
from datetime import date
|
|
2
|
+
|
|
3
|
+
from tp_helper.base_items.base_schema import BaseSchema
|
|
4
|
+
|
|
5
|
+
from tp_shared.nsis_service.types.nsis_task_type import NsisTaskType
|
|
6
|
+
from tp_shared.types.policy_series import PolicySeries
|
|
7
|
+
from tp_shared.types.policy_status import PolicyStatus
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class NsisResultPolicy(BaseSchema):
|
|
11
|
+
status: PolicyStatus
|
|
12
|
+
vin: str | None = None
|
|
13
|
+
reg_number: str
|
|
14
|
+
series: PolicySeries
|
|
15
|
+
number: str
|
|
16
|
+
start_date: date | None = None
|
|
17
|
+
end_date: date | None = None
|
|
18
|
+
insurer_id: int
|
|
19
|
+
insurer_name: str | None = None
|
|
20
|
+
request_date: date
|
|
21
|
+
created_at: int
|
|
22
|
+
updated_at: int
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class NsisResultMessage(BaseSchema):
|
|
26
|
+
task_type: NsisTaskType
|
|
27
|
+
query: str
|
|
28
|
+
request_date: date
|
|
29
|
+
policies: list[NsisResultPolicy] = []
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
from logging import Logger
|
|
2
|
+
|
|
3
|
+
from redis.asyncio import Redis
|
|
4
|
+
from tp_helper.base_items.base_worker_service import BaseWorkerService
|
|
5
|
+
from tp_helper.decorators.decorator_retry_forever import retry_forever
|
|
6
|
+
|
|
7
|
+
from tp_shared.nsis_service.repos.nsis_results_ack_list_queue_repo import (
|
|
8
|
+
NsisResultsAckListQueueRepo,
|
|
9
|
+
)
|
|
10
|
+
from tp_shared.nsis_service.schemas.nsis_result_message import NsisResultMessage
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BaseNsisResultsAckListQueueWorkerService(
|
|
14
|
+
NsisResultsAckListQueueRepo, BaseWorkerService
|
|
15
|
+
):
|
|
16
|
+
def __init__(self, redis_client: Redis, logger: Logger):
|
|
17
|
+
BaseWorkerService.__init__(self, logger=logger, redis_client=redis_client)
|
|
18
|
+
NsisResultsAckListQueueRepo.__init__(self, redis_client=redis_client)
|
|
19
|
+
|
|
20
|
+
@retry_forever(
|
|
21
|
+
start_message="📥 Начало чтения задач из очереди {queue_name}",
|
|
22
|
+
error_message="❌ Ошибка при чтении из очереди {queue_name}",
|
|
23
|
+
)
|
|
24
|
+
async def pop(self) -> NsisResultMessage | None:
|
|
25
|
+
return await NsisResultsAckListQueueRepo.pop(self)
|
|
26
|
+
|
|
27
|
+
@retry_forever(
|
|
28
|
+
start_message="🗑️ Удаление задач из очереди {queue_name}",
|
|
29
|
+
error_message="❌ Ошибка при удалении задач из очереди {queue_name}",
|
|
30
|
+
)
|
|
31
|
+
async def ack(self) -> None:
|
|
32
|
+
return await NsisResultsAckListQueueRepo.ack(self)
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from redis.asyncio import Redis
|
|
2
|
+
from tp_helper.base_queues.base_stream_queue_repo import BaseStreamQueueRepo
|
|
3
|
+
|
|
4
|
+
from tp_shared.policies_service.schemas.policies_result_message import (
|
|
5
|
+
PoliciesResultMessage,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class PoliciesEventStreamQueueRepo(BaseStreamQueueRepo):
|
|
10
|
+
QUEUE_NAME = "policies:service:policies:event:stream"
|
|
11
|
+
|
|
12
|
+
def __init__(self, redis_client: Redis):
|
|
13
|
+
super().__init__(
|
|
14
|
+
redis_client=redis_client,
|
|
15
|
+
schema=PoliciesResultMessage,
|
|
16
|
+
queue_name=self.QUEUE_NAME,
|
|
17
|
+
)
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from datetime import date
|
|
2
|
+
|
|
3
|
+
from tp_shared.base.base_message import BaseMessage
|
|
4
|
+
from tp_shared.types.policy_series import PolicySeries
|
|
5
|
+
from tp_shared.types.policy_status import PolicyStatus
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class PoliciesResultPolicy(BaseMessage):
|
|
9
|
+
series: PolicySeries
|
|
10
|
+
number: str
|
|
11
|
+
status: PolicyStatus
|
|
12
|
+
start_date: date
|
|
13
|
+
end_date: date
|
|
14
|
+
period1_start: date
|
|
15
|
+
period1_end: date
|
|
16
|
+
period2_start: date
|
|
17
|
+
period2_end: date
|
|
18
|
+
period3_start: date
|
|
19
|
+
period3_end: date
|
|
20
|
+
vin: str
|
|
21
|
+
car_mark: str
|
|
22
|
+
car_model: str
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class PoliciesResultMessage(BaseMessage):
|
|
26
|
+
version: str = "1.0"
|
|
27
|
+
reg_number: str
|
|
28
|
+
policies: list[PoliciesResultPolicy] = []
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
from datetime import timedelta
|
|
2
|
+
from logging import Logger
|
|
3
|
+
|
|
4
|
+
from redis.asyncio import Redis
|
|
5
|
+
from tp_helper.base_items.base_worker_service import BaseWorkerService
|
|
6
|
+
from tp_helper.decorators.decorator_retry_forever import retry_forever
|
|
7
|
+
|
|
8
|
+
from tp_shared.policies_service.repos.policies_event_stream_queue_repo import (
|
|
9
|
+
PoliciesEventStreamQueueRepo,
|
|
10
|
+
)
|
|
11
|
+
from tp_shared.policies_service.schemas.policies_result_message import (
|
|
12
|
+
PoliciesResultMessage,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class BasePolicyEventStreamQueueWorkerService(
|
|
17
|
+
PoliciesEventStreamQueueRepo, BaseWorkerService
|
|
18
|
+
):
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
redis_client: Redis,
|
|
22
|
+
logger: Logger,
|
|
23
|
+
group_name: str = None,
|
|
24
|
+
consumer_name: str = None,
|
|
25
|
+
):
|
|
26
|
+
BaseWorkerService.__init__(self, redis_client=redis_client, logger=logger)
|
|
27
|
+
PoliciesEventStreamQueueRepo.__init__(self, redis_client=redis_client)
|
|
28
|
+
|
|
29
|
+
self.group_name = group_name
|
|
30
|
+
self.consumer_name = consumer_name
|
|
31
|
+
|
|
32
|
+
@retry_forever(
|
|
33
|
+
start_message="➕ Добавление сообщения из очередь {queue_name}",
|
|
34
|
+
error_message="❌ Ошибка при добавление сообщения в очередь {queue_name}",
|
|
35
|
+
)
|
|
36
|
+
async def add(self, message: PoliciesResultMessage) -> None:
|
|
37
|
+
await PoliciesEventStreamQueueRepo.add(self, message)
|
|
38
|
+
|
|
39
|
+
@retry_forever(
|
|
40
|
+
start_message="📥 Получение сообщений из очереди {queue_name}",
|
|
41
|
+
error_message="❗ Ошибка получения сообщений из очереди {queue_name}",
|
|
42
|
+
)
|
|
43
|
+
async def pop(
|
|
44
|
+
self,
|
|
45
|
+
stream_id: str = ">",
|
|
46
|
+
block: int = 0,
|
|
47
|
+
count: int = 100,
|
|
48
|
+
prioritize_claimed: bool = True,
|
|
49
|
+
min_idle_time: int = 60000,
|
|
50
|
+
) -> list[tuple[str, PoliciesResultMessage]] | None:
|
|
51
|
+
return await PoliciesEventStreamQueueRepo.pop(
|
|
52
|
+
self,
|
|
53
|
+
group_name=self.group_name,
|
|
54
|
+
consumer_name=self.consumer_name,
|
|
55
|
+
stream_id=stream_id,
|
|
56
|
+
block=block,
|
|
57
|
+
count=count,
|
|
58
|
+
prioritize_claimed=prioritize_claimed,
|
|
59
|
+
min_idle_time=min_idle_time,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
@retry_forever(
|
|
63
|
+
start_message="✅ Подтверждение сообщения в потоке {queue_name}",
|
|
64
|
+
error_message="⚠️ Ошибка подтверждения сообщения в потоке {queue_name}",
|
|
65
|
+
)
|
|
66
|
+
async def ack(self, message_id: str):
|
|
67
|
+
await PoliciesEventStreamQueueRepo.ack(self, self.group_name, message_id)
|
|
68
|
+
|
|
69
|
+
@retry_forever(
|
|
70
|
+
start_message="🔍 Поиск зависших сообщений в потоке {queue_name}",
|
|
71
|
+
error_message="🚫 Ошибка при auto-claim сообщений в потоке {queue_name}",
|
|
72
|
+
)
|
|
73
|
+
async def claim_reassign(
|
|
74
|
+
self,
|
|
75
|
+
min_idle_time: int = 60000,
|
|
76
|
+
count: int = 100,
|
|
77
|
+
) -> list[tuple[str, PoliciesResultMessage]]:
|
|
78
|
+
return await PoliciesEventStreamQueueRepo.claim_reassign(
|
|
79
|
+
self,
|
|
80
|
+
group_name=self.group_name,
|
|
81
|
+
consumer_name=self.consumer_name,
|
|
82
|
+
min_idle_time=min_idle_time,
|
|
83
|
+
count=count,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
@retry_forever(
|
|
87
|
+
start_message="👥 Создание группы потребителей в потоке {queue_name}",
|
|
88
|
+
error_message="❌ Ошибка создания группы в потоке {queue_name}",
|
|
89
|
+
)
|
|
90
|
+
async def create_consumer_group(self, create_stream: bool = True):
|
|
91
|
+
await PoliciesEventStreamQueueRepo.create_consumer_group(
|
|
92
|
+
self,
|
|
93
|
+
group_name=self.group_name,
|
|
94
|
+
create_stream=create_stream,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
@retry_forever(
|
|
98
|
+
start_message="🧹 Очистка сообщений старше {retention} в потоке {queue_name}",
|
|
99
|
+
error_message="⚠️ Ошибка при очистке сообщений в потоке {queue_name}",
|
|
100
|
+
)
|
|
101
|
+
async def trim_by_age(self, retention: timedelta) -> int:
|
|
102
|
+
return await PoliciesEventStreamQueueRepo.trim_by_age(self, retention)
|
|
103
|
+
|
|
104
|
+
@retry_forever(
|
|
105
|
+
start_message="🗑️ Полная очистка потока {queue_name}",
|
|
106
|
+
error_message="❌ Ошибка при полной очистке потока {queue_name}",
|
|
107
|
+
)
|
|
108
|
+
async def delete_all(self) -> None:
|
|
109
|
+
await PoliciesEventStreamQueueRepo.delete_all(self)
|
tp_shared-0.2.27/src/tp_shared/rnis_check_service/repos/rnis_check_results_stream_queue_repo.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from redis.asyncio import Redis
|
|
2
|
+
from tp_helper.base_queues.base_stream_queue_repo import BaseStreamQueueRepo
|
|
3
|
+
|
|
4
|
+
from tp_shared.rnis_check_service.schemas.rnis_check_result_message import (
|
|
5
|
+
RNISCheckResultMessage,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class RNISCheckResultsStreamQueueRepo(BaseStreamQueueRepo):
|
|
10
|
+
QUEUE_NAME = "rnis:check:service:results:stream"
|
|
11
|
+
|
|
12
|
+
def __init__(self, redis_client: Redis):
|
|
13
|
+
super().__init__(
|
|
14
|
+
redis_client=redis_client,
|
|
15
|
+
schema=RNISCheckResultMessage,
|
|
16
|
+
queue_name=self.QUEUE_NAME,
|
|
17
|
+
)
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
from datetime import timedelta
|
|
2
|
+
from logging import Logger
|
|
3
|
+
|
|
4
|
+
from redis.asyncio import Redis
|
|
5
|
+
from tp_helper.base_items.base_worker_service import BaseWorkerService
|
|
6
|
+
from tp_helper.decorators.decorator_retry_forever import retry_forever
|
|
7
|
+
|
|
8
|
+
from tp_shared.rnis_check_service.repos.rnis_check_results_stream_queue_repo import (
|
|
9
|
+
RNISCheckResultsStreamQueueRepo,
|
|
10
|
+
)
|
|
11
|
+
from tp_shared.rnis_check_service.schemas.rnis_check_result_message import (
|
|
12
|
+
RNISCheckResultMessage,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class RNISCheckResultsStreamQueueWorkerService(
|
|
17
|
+
RNISCheckResultsStreamQueueRepo, BaseWorkerService
|
|
18
|
+
):
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
redis_client: Redis,
|
|
22
|
+
logger: Logger,
|
|
23
|
+
group_name: str,
|
|
24
|
+
consumer_name: str,
|
|
25
|
+
):
|
|
26
|
+
BaseWorkerService.__init__(self, redis_client=redis_client, logger=logger)
|
|
27
|
+
RNISCheckResultsStreamQueueRepo.__init__(self, redis_client=redis_client)
|
|
28
|
+
|
|
29
|
+
self.group_name = group_name
|
|
30
|
+
self.consumer_name = consumer_name
|
|
31
|
+
|
|
32
|
+
@retry_forever(
|
|
33
|
+
start_message="➕ Добавление сообщения из очередь {queue_name}",
|
|
34
|
+
error_message="❌ Ошибка при добавление сообщения в очередь {queue_name}",
|
|
35
|
+
)
|
|
36
|
+
async def add(self, message: RNISCheckResultMessage) -> None:
|
|
37
|
+
await RNISCheckResultsStreamQueueRepo.add(self, message)
|
|
38
|
+
|
|
39
|
+
@retry_forever(
|
|
40
|
+
start_message="📥 Получение сообщений из очереди {queue_name}",
|
|
41
|
+
error_message="⚠️ Ошибка получения сообщений из очереди {queue_name}",
|
|
42
|
+
)
|
|
43
|
+
async def pop(
|
|
44
|
+
self,
|
|
45
|
+
stream_id: str = ">",
|
|
46
|
+
block: int = 0,
|
|
47
|
+
count: int = 100,
|
|
48
|
+
prioritize_claimed: bool = True,
|
|
49
|
+
min_idle_time: int = 60000,
|
|
50
|
+
) -> list[tuple[str, RNISCheckResultMessage]] | None:
|
|
51
|
+
return await RNISCheckResultsStreamQueueRepo.pop(
|
|
52
|
+
self,
|
|
53
|
+
group_name=self.group_name,
|
|
54
|
+
consumer_name=self.consumer_name,
|
|
55
|
+
stream_id=stream_id,
|
|
56
|
+
block=block,
|
|
57
|
+
count=count,
|
|
58
|
+
prioritize_claimed=prioritize_claimed,
|
|
59
|
+
min_idle_time=min_idle_time,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
@retry_forever(
|
|
63
|
+
start_message="✅ Подтверждение сообщения в потоке {queue_name}",
|
|
64
|
+
error_message="⚠️ Ошибка подтверждения сообщения в потоке {queue_name}",
|
|
65
|
+
)
|
|
66
|
+
async def ack(self, message_id: str):
|
|
67
|
+
await RNISCheckResultsStreamQueueRepo.ack(self, self.group_name, message_id)
|
|
68
|
+
|
|
69
|
+
@retry_forever(
|
|
70
|
+
start_message="🔍 Поиск зависших сообщений в потоке {queue_name}",
|
|
71
|
+
error_message="🚫 Ошибка при auto-claim сообщений в потоке {queue_name}",
|
|
72
|
+
)
|
|
73
|
+
async def claim_reassign(
|
|
74
|
+
self,
|
|
75
|
+
min_idle_time: int = 60000,
|
|
76
|
+
count: int = 100,
|
|
77
|
+
) -> list[tuple[str, RNISCheckResultMessage]]:
|
|
78
|
+
return await RNISCheckResultsStreamQueueRepo.claim_reassign(
|
|
79
|
+
self,
|
|
80
|
+
group_name=self.group_name,
|
|
81
|
+
consumer_name=self.consumer_name,
|
|
82
|
+
min_idle_time=min_idle_time,
|
|
83
|
+
count=count,
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
@retry_forever(
|
|
87
|
+
start_message="👥 Создание группы потребителей в потоке {queue_name}",
|
|
88
|
+
error_message="❌ Ошибка создания группы в потоке {queue_name}",
|
|
89
|
+
)
|
|
90
|
+
async def create_consumer_group(self, create_stream: bool = True):
|
|
91
|
+
await RNISCheckResultsStreamQueueRepo.create_consumer_group(
|
|
92
|
+
self,
|
|
93
|
+
group_name=self.group_name,
|
|
94
|
+
create_stream=create_stream,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
@retry_forever(
|
|
98
|
+
start_message="🧹 Очистка сообщений старше {retention} в потоке {queue_name}",
|
|
99
|
+
error_message="⚠️ Ошибка при очистке сообщений в потоке {queue_name}",
|
|
100
|
+
)
|
|
101
|
+
async def trim_by_age(self, retention: timedelta) -> int:
|
|
102
|
+
return await RNISCheckResultsStreamQueueRepo.trim_by_age(self, retention)
|
|
103
|
+
|
|
104
|
+
@retry_forever(
|
|
105
|
+
start_message="🗑️ Полная очистка потока {queue_name}",
|
|
106
|
+
error_message="❌ Ошибка при полной очистке потока {queue_name}",
|
|
107
|
+
)
|
|
108
|
+
async def delete_all(self) -> None:
|
|
109
|
+
await RNISCheckResultsStreamQueueRepo.delete_all(self)
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class PolicySeries(str, Enum):
|
|
5
|
+
XXX = "ХХХ"
|
|
6
|
+
TTT = "ТТТ"
|
|
7
|
+
AAA = "ААА"
|
|
8
|
+
AAV = "ААВ"
|
|
9
|
+
AAK = "ААК"
|
|
10
|
+
AAM = "ААМ"
|
|
11
|
+
AAN = "ААН"
|
|
12
|
+
AAS = "ААС"
|
|
13
|
+
VVV = "ВВВ"
|
|
14
|
+
EEE = "ЕЕЕ"
|
|
15
|
+
KKK = "ККК"
|
|
16
|
+
MMM = "МММ"
|
|
17
|
+
NNN = "ННН"
|
|
18
|
+
RRR = "РРР"
|
|
19
|
+
SSS = "ССС"
|