workerlib 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- workerlib/__init__.py +22 -0
- workerlib/connection.py +77 -0
- workerlib/consumer.py +217 -0
- workerlib/mq_queue.py +46 -0
- workerlib/pool.py +172 -0
- workerlib/producer.py +161 -0
- workerlib/retry.py +49 -0
- workerlib-0.3.1.dist-info/METADATA +245 -0
- workerlib-0.3.1.dist-info/RECORD +11 -0
- workerlib-0.3.1.dist-info/WHEEL +5 -0
- workerlib-0.3.1.dist-info/top_level.txt +1 -0
workerlib/__init__.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from src.workerlib.connection import RabbitMQConnection, ConnectionParams, ConnectionError
|
|
2
|
+
from src.workerlib.mq_queue import RabbitMQQueue, QueueConfig
|
|
3
|
+
from src.workerlib.consumer import RabbitMQConsumer, ErrorHandlingStrategy
|
|
4
|
+
from src.workerlib.pool import WorkerPool
|
|
5
|
+
from src.workerlib.producer import RabbitMQProducer
|
|
6
|
+
from src.workerlib.retry import RetryConfig, retry_on_failure
|
|
7
|
+
|
|
8
|
+
__version__ = "0.3.1"
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
'WorkerPool',
|
|
12
|
+
'RabbitMQConnection',
|
|
13
|
+
'ConnectionParams',
|
|
14
|
+
'ConnectionError',
|
|
15
|
+
'RabbitMQQueue',
|
|
16
|
+
'QueueConfig',
|
|
17
|
+
'RabbitMQConsumer',
|
|
18
|
+
'ErrorHandlingStrategy',
|
|
19
|
+
'RabbitMQProducer',
|
|
20
|
+
'RetryConfig',
|
|
21
|
+
'retry_on_failure'
|
|
22
|
+
]
|
workerlib/connection.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
2
|
+
from typing import Optional
|
|
3
|
+
import aio_pika
|
|
4
|
+
import logging
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass
|
|
8
|
+
class ConnectionParams:
|
|
9
|
+
host: str = "127.0.0.1"
|
|
10
|
+
port: int = 5672
|
|
11
|
+
virtual_host: str = "/"
|
|
12
|
+
username: str = "guest"
|
|
13
|
+
password: str = "guest"
|
|
14
|
+
heartbeat: int = 60
|
|
15
|
+
timeout: int = 10
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ConnectionError(Exception):
|
|
19
|
+
pass
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class RabbitMQConnection:
|
|
26
|
+
def __init__(self, params: ConnectionParams = None):
|
|
27
|
+
self.params = params or ConnectionParams()
|
|
28
|
+
self._connection: Optional[aio_pika.abc.AbstractRobustConnection] = None
|
|
29
|
+
self._channel: Optional[aio_pika.Channel] = None
|
|
30
|
+
|
|
31
|
+
@property
|
|
32
|
+
def channel(self) -> aio_pika.Channel:
|
|
33
|
+
if not self._channel or self._channel.is_closed:
|
|
34
|
+
raise ConnectionError("Channel not available. Call connect() first.")
|
|
35
|
+
return self._channel
|
|
36
|
+
|
|
37
|
+
def _get_url(self) -> str:
|
|
38
|
+
return (f"amqp://{self.params.username}:{self.params.password}"
|
|
39
|
+
f"@{self.params.host}:{self.params.port}"
|
|
40
|
+
f"/{self.params.virtual_host}")
|
|
41
|
+
|
|
42
|
+
async def is_healthy(self) -> bool:
|
|
43
|
+
return self._connection and not self._connection.is_closed
|
|
44
|
+
|
|
45
|
+
async def connect(self) -> None:
|
|
46
|
+
if self._connection and not self._connection.is_closed:
|
|
47
|
+
logger.info("Connection is already open.")
|
|
48
|
+
return
|
|
49
|
+
try:
|
|
50
|
+
self._connection = await aio_pika.connect_robust(
|
|
51
|
+
self._get_url(),
|
|
52
|
+
heartbeat=self.params.heartbeat,
|
|
53
|
+
timeout=self.params.timeout
|
|
54
|
+
)
|
|
55
|
+
self._channel = await self._connection.channel()
|
|
56
|
+
logger.info(f"Connected to RabbitMQ at {self.params.host}:{self.params.port}")
|
|
57
|
+
except Exception as e:
|
|
58
|
+
logger.error(f"Error connecting to RabbitMQ: {e}")
|
|
59
|
+
raise ConnectionError(f"Error connecting to RabbitMQ: {e}")
|
|
60
|
+
|
|
61
|
+
async def reconnect(self) -> None:
|
|
62
|
+
await self.close()
|
|
63
|
+
await self.connect()
|
|
64
|
+
|
|
65
|
+
async def close(self) -> None:
|
|
66
|
+
if self._connection and not self._connection.is_closed:
|
|
67
|
+
await self._connection.close()
|
|
68
|
+
self._connection = None
|
|
69
|
+
self._channel = None
|
|
70
|
+
logger.info("Connection to RabbitMQ closed.")
|
|
71
|
+
|
|
72
|
+
async def __aenter__(self):
|
|
73
|
+
await self.connect()
|
|
74
|
+
return self
|
|
75
|
+
|
|
76
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
77
|
+
await self.close()
|
workerlib/consumer.py
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
import logging
|
|
4
|
+
import time
|
|
5
|
+
from enum import Enum
|
|
6
|
+
from typing import Callable, Awaitable, Optional, Dict, Any
|
|
7
|
+
|
|
8
|
+
import aio_pika
|
|
9
|
+
|
|
10
|
+
from .mq_queue import RabbitMQQueue
|
|
11
|
+
from .retry import retry_on_failure, RetryConfig
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ErrorHandlingStrategy(Enum):
|
|
17
|
+
IGNORE = "ignore"
|
|
18
|
+
REQUEUE_END = "requeue_end"
|
|
19
|
+
REQUEUE_FRONT = "requeue_front"
|
|
20
|
+
DLQ = "dlq"
|
|
21
|
+
RETRY_THEN_DLQ = "retry_then_dlq"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class RabbitMQConsumer:
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
queue: RabbitMQQueue,
|
|
28
|
+
handler: Callable[[Dict[str, Any]], Awaitable[bool]],
|
|
29
|
+
error_strategy: ErrorHandlingStrategy = ErrorHandlingStrategy.RETRY_THEN_DLQ,
|
|
30
|
+
retry_config: Optional[RetryConfig] = None,
|
|
31
|
+
dlq_enabled: bool = True,
|
|
32
|
+
requeue_delay: float = 5.0
|
|
33
|
+
) -> None:
|
|
34
|
+
self.queue = queue
|
|
35
|
+
self.handler = handler
|
|
36
|
+
self.error_strategy = error_strategy
|
|
37
|
+
self.retry_config = retry_config or RetryConfig()
|
|
38
|
+
self.dlq_enabled = dlq_enabled
|
|
39
|
+
self.requeue_delay = requeue_delay
|
|
40
|
+
|
|
41
|
+
self.metrics = {
|
|
42
|
+
"processed": 0,
|
|
43
|
+
"failed": 0,
|
|
44
|
+
"requeued": 0,
|
|
45
|
+
"dlq_moved": 0,
|
|
46
|
+
"ignored": 0
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
async def _requeue_with_delay(self, message: aio_pika.abc.AbstractIncomingMessage):
|
|
50
|
+
try:
|
|
51
|
+
channel = message.channel
|
|
52
|
+
delay_queue_name = f"{self.queue.config.name}.delayed"
|
|
53
|
+
|
|
54
|
+
await channel.declare_queue(
|
|
55
|
+
delay_queue_name,
|
|
56
|
+
durable=True,
|
|
57
|
+
arguments={
|
|
58
|
+
"x-dead-letter-exchange": "",
|
|
59
|
+
"x-dead-letter-routing-key": self.queue.config.name,
|
|
60
|
+
"x-message-ttl": int(self.requeue_delay * 1000)
|
|
61
|
+
}
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
await channel.publish(
|
|
65
|
+
aio_pika.Message(
|
|
66
|
+
body=message.body,
|
|
67
|
+
headers=message.headers,
|
|
68
|
+
delivery_mode=message.delivery_mode
|
|
69
|
+
),
|
|
70
|
+
exchange='',
|
|
71
|
+
routing_key=delay_queue_name
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
self.metrics["requeued"] += 1
|
|
75
|
+
logger.info(f"Message requeued with {self.requeue_delay}s delay")
|
|
76
|
+
|
|
77
|
+
except Exception as e:
|
|
78
|
+
logger.error(f"Failed to requeue message: {e}")
|
|
79
|
+
await message.nack(requeue=True)
|
|
80
|
+
|
|
81
|
+
async def _move_to_dlq(self, message: aio_pika.abc.AbstractIncomingMessage, error_msg: str = None):
|
|
82
|
+
if not self.dlq_enabled:
|
|
83
|
+
return False
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
dlq_name = f"{self.queue.config.name}.dlq"
|
|
87
|
+
|
|
88
|
+
channel = self.queue.connection.channel
|
|
89
|
+
|
|
90
|
+
await channel.declare_queue(
|
|
91
|
+
dlq_name,
|
|
92
|
+
durable=True,
|
|
93
|
+
arguments={"x-queue-mode": "lazy"}
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
headers = message.headers or {}
|
|
97
|
+
headers.update({
|
|
98
|
+
"x-failure-reason": error_msg or "Unknown error",
|
|
99
|
+
"x-original-queue": self.queue.config.name,
|
|
100
|
+
"x-failed-at": time.time()
|
|
101
|
+
})
|
|
102
|
+
|
|
103
|
+
exchange = channel.default_exchange
|
|
104
|
+
await exchange.publish(
|
|
105
|
+
aio_pika.Message(
|
|
106
|
+
body=message.body,
|
|
107
|
+
headers=headers,
|
|
108
|
+
delivery_mode=message.delivery_mode,
|
|
109
|
+
content_type=message.content_type
|
|
110
|
+
),
|
|
111
|
+
routing_key=dlq_name
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
self.metrics["dlq_moved"] += 1
|
|
115
|
+
logger.info(f"Message moved to DLQ: {dlq_name}")
|
|
116
|
+
return True
|
|
117
|
+
|
|
118
|
+
except Exception as e:
|
|
119
|
+
logger.error(f"Failed to move message to DLQ: {e}")
|
|
120
|
+
return False
|
|
121
|
+
|
|
122
|
+
async def _handle_with_retry(self, data: Dict[str, Any]) -> bool:
|
|
123
|
+
|
|
124
|
+
async def attempt_handler():
|
|
125
|
+
return await self.handler(data)
|
|
126
|
+
|
|
127
|
+
try:
|
|
128
|
+
if self.retry_config.max_attempts > 1:
|
|
129
|
+
return await retry_on_failure(attempt_handler, config=self.retry_config)
|
|
130
|
+
else:
|
|
131
|
+
return await self.handler(data)
|
|
132
|
+
except Exception as e:
|
|
133
|
+
logger.error(f"Handler failed after retries: {e}")
|
|
134
|
+
return False
|
|
135
|
+
|
|
136
|
+
async def _handle_error(self, message: aio_pika.abc.AbstractIncomingMessage,
|
|
137
|
+
data: Optional[Dict[str, Any]] = None,
|
|
138
|
+
error_msg: str = None):
|
|
139
|
+
|
|
140
|
+
if self.error_strategy == ErrorHandlingStrategy.IGNORE:
|
|
141
|
+
await message.ack()
|
|
142
|
+
self.metrics["ignored"] += 1
|
|
143
|
+
logger.warning(f"Ignored failed message: {error_msg}")
|
|
144
|
+
|
|
145
|
+
elif self.error_strategy == ErrorHandlingStrategy.REQUEUE_END:
|
|
146
|
+
await self._requeue_with_delay(message)
|
|
147
|
+
await message.ack()
|
|
148
|
+
self.metrics["failed"] += 1
|
|
149
|
+
|
|
150
|
+
elif self.error_strategy == ErrorHandlingStrategy.REQUEUE_FRONT:
|
|
151
|
+
await message.nack(requeue=True)
|
|
152
|
+
self.metrics["failed"] += 1
|
|
153
|
+
logger.info("Message requeued to front")
|
|
154
|
+
|
|
155
|
+
elif self.error_strategy == ErrorHandlingStrategy.DLQ:
|
|
156
|
+
moved = await self._move_to_dlq(message, error_msg)
|
|
157
|
+
await message.ack()
|
|
158
|
+
self.metrics["failed"] += 1
|
|
159
|
+
if not moved:
|
|
160
|
+
logger.error("Failed to move to DLQ, message acknowledged anyway")
|
|
161
|
+
|
|
162
|
+
elif self.error_strategy == ErrorHandlingStrategy.RETRY_THEN_DLQ:
|
|
163
|
+
if data and await self._handle_with_retry(data):
|
|
164
|
+
await message.ack()
|
|
165
|
+
self.metrics["processed"] += 1
|
|
166
|
+
logger.info("Message processed successfully after retry")
|
|
167
|
+
else:
|
|
168
|
+
moved = await self._move_to_dlq(message, error_msg)
|
|
169
|
+
await message.ack()
|
|
170
|
+
self.metrics["failed"] += 1
|
|
171
|
+
if not moved:
|
|
172
|
+
logger.error("Failed to move to DLQ after retry")
|
|
173
|
+
|
|
174
|
+
async def process_message(self, message: aio_pika.abc.AbstractIncomingMessage):
|
|
175
|
+
try:
|
|
176
|
+
data = json.loads(message.body.decode())
|
|
177
|
+
|
|
178
|
+
if self.error_strategy == ErrorHandlingStrategy.RETRY_THEN_DLQ:
|
|
179
|
+
success = await self._handle_with_retry(data)
|
|
180
|
+
if success:
|
|
181
|
+
await message.ack()
|
|
182
|
+
self.metrics["processed"] += 1
|
|
183
|
+
logger.info("Message processed successfully")
|
|
184
|
+
else:
|
|
185
|
+
await self._handle_error(message, data, "Retry failed")
|
|
186
|
+
else:
|
|
187
|
+
success = await self.handler(data)
|
|
188
|
+
if success:
|
|
189
|
+
await message.ack()
|
|
190
|
+
self.metrics["processed"] += 1
|
|
191
|
+
logger.info("Message processed successfully")
|
|
192
|
+
else:
|
|
193
|
+
await self._handle_error(message, data, "Handler returned False")
|
|
194
|
+
|
|
195
|
+
except json.JSONDecodeError as e:
|
|
196
|
+
error_msg = f"Invalid JSON: {e}"
|
|
197
|
+
logger.error(error_msg)
|
|
198
|
+
await self._handle_error(message, None, error_msg)
|
|
199
|
+
|
|
200
|
+
except Exception as e:
|
|
201
|
+
error_msg = f"Unexpected error: {e}"
|
|
202
|
+
logger.error(error_msg)
|
|
203
|
+
await self._handle_error(message, None, error_msg)
|
|
204
|
+
|
|
205
|
+
async def consume(self, stop_event: Optional[asyncio.Event] = None) -> None:
|
|
206
|
+
queue = await self.queue.get_queue()
|
|
207
|
+
|
|
208
|
+
async with queue.iterator() as queue_iter:
|
|
209
|
+
async for message in queue_iter:
|
|
210
|
+
if stop_event and stop_event.is_set():
|
|
211
|
+
logger.info("Stop event received, stopping consumer")
|
|
212
|
+
break
|
|
213
|
+
|
|
214
|
+
logger.debug(f"Received message: {message.message_id}")
|
|
215
|
+
await self.process_message(message)
|
|
216
|
+
|
|
217
|
+
logger.info("Consumer stopped")
|
workerlib/mq_queue.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
import aio_pika
|
|
6
|
+
|
|
7
|
+
from .connection import RabbitMQConnection
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class QueueConfig:
|
|
12
|
+
name: str
|
|
13
|
+
durable: bool = True
|
|
14
|
+
exclusive: bool = False
|
|
15
|
+
auto_delete: bool = False
|
|
16
|
+
arguments: Optional[dict] = None
|
|
17
|
+
prefetch_count: int = 1
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
logger = logging.getLogger(__name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class RabbitMQQueue:
|
|
24
|
+
def __init__(self, connection: RabbitMQConnection, config: QueueConfig):
|
|
25
|
+
self.connection = connection
|
|
26
|
+
self.config = config
|
|
27
|
+
self._queue = None
|
|
28
|
+
|
|
29
|
+
@property
|
|
30
|
+
def queue_name(self) -> str:
|
|
31
|
+
return self.config.name
|
|
32
|
+
|
|
33
|
+
async def get_queue(self) -> aio_pika.Queue:
|
|
34
|
+
if not self._queue:
|
|
35
|
+
await self.connection.connect()
|
|
36
|
+
channel = self.connection.channel
|
|
37
|
+
await channel.set_qos(prefetch_count=self.config.prefetch_count)
|
|
38
|
+
self._queue = await channel.declare_queue(
|
|
39
|
+
self.config.name,
|
|
40
|
+
durable=self.config.durable,
|
|
41
|
+
exclusive=self.config.exclusive,
|
|
42
|
+
auto_delete=self.config.auto_delete,
|
|
43
|
+
arguments=self.config.arguments or {}
|
|
44
|
+
)
|
|
45
|
+
logger.info(f"Queue declared: {self.config.name}")
|
|
46
|
+
return self._queue
|
workerlib/pool.py
ADDED
|
@@ -0,0 +1,172 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
from asyncio import Task
|
|
4
|
+
from typing import Optional, Dict, Callable, Any, Awaitable
|
|
5
|
+
|
|
6
|
+
from .connection import RabbitMQConnection, ConnectionParams
|
|
7
|
+
from .mq_queue import RabbitMQQueue, QueueConfig
|
|
8
|
+
from .consumer import RabbitMQConsumer, ErrorHandlingStrategy
|
|
9
|
+
from .producer import RabbitMQProducer
|
|
10
|
+
from .retry import RetryConfig
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class WorkerPool:
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
connection_params: Optional[ConnectionParams] = None,
|
|
19
|
+
auto_start: bool = True
|
|
20
|
+
):
|
|
21
|
+
self.connection_params = connection_params or ConnectionParams()
|
|
22
|
+
self.connection: Optional[RabbitMQConnection] = None
|
|
23
|
+
self.workers: Dict[str, RabbitMQConsumer] = {}
|
|
24
|
+
self.producers: Dict[str, RabbitMQProducer] = {}
|
|
25
|
+
self.tasks: Dict[str, Task] = {}
|
|
26
|
+
self._auto_start = auto_start
|
|
27
|
+
|
|
28
|
+
async def __aenter__(self):
|
|
29
|
+
if self._auto_start:
|
|
30
|
+
await self.start()
|
|
31
|
+
return self
|
|
32
|
+
|
|
33
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
34
|
+
await self.stop()
|
|
35
|
+
|
|
36
|
+
async def start(self):
|
|
37
|
+
if self.connection and await self.connection.is_healthy():
|
|
38
|
+
logger.info("WorkerPool already started")
|
|
39
|
+
return
|
|
40
|
+
|
|
41
|
+
self.connection = RabbitMQConnection(self.connection_params)
|
|
42
|
+
await self.connection.connect()
|
|
43
|
+
logger.info("WorkerPool started")
|
|
44
|
+
|
|
45
|
+
async def stop(self):
|
|
46
|
+
for task_name, task in self.tasks.items():
|
|
47
|
+
task.cancel()
|
|
48
|
+
logger.debug(f"Cancelled task: {task_name}")
|
|
49
|
+
|
|
50
|
+
if self.tasks:
|
|
51
|
+
await asyncio.gather(*self.tasks.values(), return_exceptions=True)
|
|
52
|
+
self.tasks.clear()
|
|
53
|
+
|
|
54
|
+
if self.connection:
|
|
55
|
+
await self.connection.close()
|
|
56
|
+
self.connection = None
|
|
57
|
+
|
|
58
|
+
self.workers.clear()
|
|
59
|
+
self.producers.clear()
|
|
60
|
+
logger.info("WorkerPool stopped")
|
|
61
|
+
|
|
62
|
+
def add_worker(
|
|
63
|
+
self,
|
|
64
|
+
queue_name: str,
|
|
65
|
+
handler: Callable[[Dict[str, Any]], Awaitable[bool]],
|
|
66
|
+
prefetch_count: int = 1,
|
|
67
|
+
error_strategy: ErrorHandlingStrategy = ErrorHandlingStrategy.RETRY_THEN_DLQ,
|
|
68
|
+
retry_config: Optional[RetryConfig] = None,
|
|
69
|
+
dlq_enabled: bool = True,
|
|
70
|
+
auto_start: bool = True
|
|
71
|
+
):
|
|
72
|
+
if queue_name in self.workers:
|
|
73
|
+
raise KeyError(f"Worker for queue '{queue_name}' already exists")
|
|
74
|
+
|
|
75
|
+
if not self.connection:
|
|
76
|
+
raise RuntimeError("Connection not established. Call start() first or set auto_start=True")
|
|
77
|
+
|
|
78
|
+
queue_config = QueueConfig(
|
|
79
|
+
name=queue_name,
|
|
80
|
+
prefetch_count=prefetch_count
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
queue = RabbitMQQueue(self.connection, queue_config)
|
|
84
|
+
|
|
85
|
+
consumer = RabbitMQConsumer(
|
|
86
|
+
queue=queue,
|
|
87
|
+
handler=handler,
|
|
88
|
+
error_strategy=error_strategy,
|
|
89
|
+
retry_config=retry_config,
|
|
90
|
+
dlq_enabled=dlq_enabled
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
producer = RabbitMQProducer(
|
|
94
|
+
connection=self.connection,
|
|
95
|
+
queue=queue,
|
|
96
|
+
send_retries=3
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
self.workers[queue_name] = consumer
|
|
100
|
+
self.producers[queue_name] = producer
|
|
101
|
+
|
|
102
|
+
if auto_start:
|
|
103
|
+
task = asyncio.create_task(consumer.consume())
|
|
104
|
+
self.tasks[queue_name] = task
|
|
105
|
+
logger.info(f"Worker for queue '{queue_name}' started")
|
|
106
|
+
else:
|
|
107
|
+
logger.info(f"Worker for queue '{queue_name}' added (not started)")
|
|
108
|
+
|
|
109
|
+
async def send(
|
|
110
|
+
self,
|
|
111
|
+
queue_name: str,
|
|
112
|
+
data: Dict[str, Any],
|
|
113
|
+
with_retry: bool = True,
|
|
114
|
+
**kwargs
|
|
115
|
+
):
|
|
116
|
+
if queue_name not in self.producers:
|
|
117
|
+
raise ValueError(f"No producer for queue '{queue_name}'")
|
|
118
|
+
|
|
119
|
+
producer = self.producers[queue_name]
|
|
120
|
+
|
|
121
|
+
if with_retry:
|
|
122
|
+
await producer.send_with_retry(data, **kwargs)
|
|
123
|
+
else:
|
|
124
|
+
await producer.send(data, **kwargs)
|
|
125
|
+
|
|
126
|
+
async def send_raw(self, queue_name: str, body: bytes, **kwargs):
|
|
127
|
+
if queue_name not in self.producers:
|
|
128
|
+
raise ValueError(f"No producer for queue '{queue_name}'")
|
|
129
|
+
|
|
130
|
+
producer = self.producers[queue_name]
|
|
131
|
+
await producer.send_raw(body, **kwargs)
|
|
132
|
+
|
|
133
|
+
def get_metrics(self, queue_name: Optional[str] = None) -> Dict[str, Any]:
|
|
134
|
+
if queue_name:
|
|
135
|
+
if queue_name not in self.workers:
|
|
136
|
+
raise ValueError(f"No worker for queue '{queue_name}'")
|
|
137
|
+
|
|
138
|
+
consumer_metrics = self.workers[queue_name].metrics.copy()
|
|
139
|
+
producer_metrics = self.producers[queue_name].metrics.copy()
|
|
140
|
+
|
|
141
|
+
return {
|
|
142
|
+
"consumer": consumer_metrics,
|
|
143
|
+
"producer": producer_metrics,
|
|
144
|
+
"queue": queue_name
|
|
145
|
+
}
|
|
146
|
+
else:
|
|
147
|
+
all_metrics = {
|
|
148
|
+
"total_queues": len(self.workers),
|
|
149
|
+
"queues": {}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
for q_name in self.workers:
|
|
153
|
+
all_metrics["queues"][q_name] = self.get_metrics(q_name)
|
|
154
|
+
|
|
155
|
+
return all_metrics
|
|
156
|
+
|
|
157
|
+
async def restart_worker(self, queue_name: str):
|
|
158
|
+
if queue_name not in self.tasks:
|
|
159
|
+
raise ValueError(f"No running worker for queue '{queue_name}'")
|
|
160
|
+
|
|
161
|
+
old_task = self.tasks[queue_name]
|
|
162
|
+
old_task.cancel()
|
|
163
|
+
try:
|
|
164
|
+
await old_task
|
|
165
|
+
except asyncio.CancelledError:
|
|
166
|
+
pass
|
|
167
|
+
|
|
168
|
+
consumer = self.workers[queue_name]
|
|
169
|
+
new_task = asyncio.create_task(consumer.consume())
|
|
170
|
+
self.tasks[queue_name] = new_task
|
|
171
|
+
|
|
172
|
+
logger.info(f"Worker for queue '{queue_name}' restarted")
|
workerlib/producer.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import asyncio
|
|
4
|
+
from typing import Any, Dict, List, Optional
|
|
5
|
+
|
|
6
|
+
import aio_pika
|
|
7
|
+
from aio_pika.exceptions import AMQPConnectionError
|
|
8
|
+
|
|
9
|
+
from .connection import RabbitMQConnection
|
|
10
|
+
from .mq_queue import RabbitMQQueue
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class RabbitMQProducer:
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
connection: RabbitMQConnection,
|
|
19
|
+
queue: RabbitMQQueue,
|
|
20
|
+
send_retries: int = 3,
|
|
21
|
+
retry_delay: float = 1.0,
|
|
22
|
+
enable_auto_reconnect: bool = True
|
|
23
|
+
) -> None:
|
|
24
|
+
self.connection = connection
|
|
25
|
+
self.queue = queue
|
|
26
|
+
self.send_retries = send_retries
|
|
27
|
+
self.retry_delay = retry_delay
|
|
28
|
+
self.enable_auto_reconnect = enable_auto_reconnect
|
|
29
|
+
self.metrics = {
|
|
30
|
+
"sent": 0,
|
|
31
|
+
"failed": 0,
|
|
32
|
+
"retried": 0
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
async def _ensure_connection(self) -> None:
|
|
36
|
+
if not await self.connection.is_healthy():
|
|
37
|
+
if self.enable_auto_reconnect:
|
|
38
|
+
logger.warning("Connection lost, attempting to reconnect...")
|
|
39
|
+
await self.connection.reconnect()
|
|
40
|
+
else:
|
|
41
|
+
raise ConnectionError("Connection is not available")
|
|
42
|
+
|
|
43
|
+
async def send_with_retry(
|
|
44
|
+
self,
|
|
45
|
+
data: Dict[str, Any],
|
|
46
|
+
max_retries: Optional[int] = None,
|
|
47
|
+
retry_delay: Optional[float] = None,
|
|
48
|
+
**kwargs
|
|
49
|
+
) -> bool:
|
|
50
|
+
max_retries = max_retries or self.send_retries
|
|
51
|
+
retry_delay = retry_delay or self.retry_delay
|
|
52
|
+
last_exception = None
|
|
53
|
+
|
|
54
|
+
for attempt in range(max_retries):
|
|
55
|
+
try:
|
|
56
|
+
await self.send(data, **kwargs)
|
|
57
|
+
return True
|
|
58
|
+
|
|
59
|
+
except (AMQPConnectionError, ConnectionError) as e:
|
|
60
|
+
last_exception = e
|
|
61
|
+
self.metrics["retried"] += 1
|
|
62
|
+
|
|
63
|
+
if attempt == max_retries - 1:
|
|
64
|
+
logger.error(f"Failed to send after {max_retries} attempts: {e}")
|
|
65
|
+
break
|
|
66
|
+
|
|
67
|
+
logger.warning(
|
|
68
|
+
f"Connection error on send attempt {attempt + 1}/{max_retries}. "
|
|
69
|
+
f"Retrying in {retry_delay}s... Error: {e}"
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
await asyncio.sleep(retry_delay)
|
|
73
|
+
|
|
74
|
+
if self.enable_auto_reconnect:
|
|
75
|
+
try:
|
|
76
|
+
await self.connection.reconnect()
|
|
77
|
+
logger.info("Reconnected successfully")
|
|
78
|
+
except Exception as reconnect_error:
|
|
79
|
+
logger.error(f"Failed to reconnect: {reconnect_error}")
|
|
80
|
+
|
|
81
|
+
self.metrics["failed"] += 1
|
|
82
|
+
if last_exception:
|
|
83
|
+
raise last_exception
|
|
84
|
+
return False
|
|
85
|
+
|
|
86
|
+
async def send(self, data: Dict[str, Any], **kwargs) -> None:
|
|
87
|
+
await self._ensure_connection()
|
|
88
|
+
|
|
89
|
+
try:
|
|
90
|
+
queue_obj = await self.queue.get_queue()
|
|
91
|
+
body = json.dumps(data).encode()
|
|
92
|
+
|
|
93
|
+
message = aio_pika.Message(
|
|
94
|
+
body=body,
|
|
95
|
+
delivery_mode=aio_pika.DeliveryMode.PERSISTENT,
|
|
96
|
+
content_type="application/json",
|
|
97
|
+
**kwargs
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
channel = self.connection.channel
|
|
101
|
+
exchange = channel.default_exchange
|
|
102
|
+
|
|
103
|
+
await exchange.publish(
|
|
104
|
+
message,
|
|
105
|
+
routing_key=queue_obj.name
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
self.metrics["sent"] += 1
|
|
109
|
+
logger.debug(f"Message sent to queue {queue_obj.name}")
|
|
110
|
+
|
|
111
|
+
except Exception as e:
|
|
112
|
+
self.metrics["failed"] += 1
|
|
113
|
+
logger.error(f"Failed to send message: {e}")
|
|
114
|
+
raise
|
|
115
|
+
|
|
116
|
+
async def send_raw(self, body: bytes, **kwargs) -> None:
|
|
117
|
+
await self._ensure_connection()
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
queue_obj = await self.queue.get_queue()
|
|
121
|
+
message = aio_pika.Message(body=body, **kwargs)
|
|
122
|
+
|
|
123
|
+
channel = self.connection.channel
|
|
124
|
+
exchange = channel.default_exchange
|
|
125
|
+
await exchange.publish(
|
|
126
|
+
message,
|
|
127
|
+
routing_key=queue_obj.name
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
self.metrics["sent"] += 1
|
|
131
|
+
logger.debug(f"Raw message sent to queue {queue_obj.name}")
|
|
132
|
+
|
|
133
|
+
except Exception as e:
|
|
134
|
+
self.metrics["failed"] += 1
|
|
135
|
+
logger.error(f"Failed to send raw message: {e}")
|
|
136
|
+
raise
|
|
137
|
+
|
|
138
|
+
async def send_batch(self, messages: List[Dict[str, Any]]) -> None:
|
|
139
|
+
await self._ensure_connection()
|
|
140
|
+
|
|
141
|
+
queue_obj = await self.queue.get_queue()
|
|
142
|
+
channel = self.connection.channel
|
|
143
|
+
exchange = channel.default_exchange
|
|
144
|
+
sent_count = 0
|
|
145
|
+
|
|
146
|
+
for data in messages:
|
|
147
|
+
try:
|
|
148
|
+
body = json.dumps(data).encode()
|
|
149
|
+
message = aio_pika.Message(
|
|
150
|
+
body=body,
|
|
151
|
+
delivery_mode=aio_pika.DeliveryMode.PERSISTENT,
|
|
152
|
+
content_type="application/json"
|
|
153
|
+
)
|
|
154
|
+
await exchange.publish(message, routing_key=queue_obj.name)
|
|
155
|
+
sent_count += 1
|
|
156
|
+
|
|
157
|
+
except Exception as e:
|
|
158
|
+
logger.error(f"Failed to send message in batch: {e}")
|
|
159
|
+
|
|
160
|
+
self.metrics["sent"] += sent_count
|
|
161
|
+
logger.info(f"Sent {sent_count}/{len(messages)} messages to queue {queue_obj.name}")
|
workerlib/retry.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Optional, Any, Callable, Awaitable
|
|
5
|
+
|
|
6
|
+
logger = logging.getLogger(__name__)
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class RetryConfig:
|
|
11
|
+
max_attempts: int = 3
|
|
12
|
+
initial_delay: float = 1.0
|
|
13
|
+
backoff_factor: float = 2.0
|
|
14
|
+
max_delay: float = 60.0
|
|
15
|
+
|
|
16
|
+
def get_delay(self, attempt: int) -> float:
|
|
17
|
+
delay = self.initial_delay * (self.backoff_factor ** (attempt - 1))
|
|
18
|
+
return min(delay, self.max_delay)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
async def retry_on_failure(
|
|
22
|
+
func: Callable[..., Awaitable[Any]],
|
|
23
|
+
*args,
|
|
24
|
+
config: Optional[RetryConfig] = None,
|
|
25
|
+
**kwargs
|
|
26
|
+
) -> Any:
|
|
27
|
+
retry_config = config or RetryConfig()
|
|
28
|
+
last_error: Optional[Exception] = None
|
|
29
|
+
|
|
30
|
+
for attempt in range(1, retry_config.max_attempts + 1):
|
|
31
|
+
try:
|
|
32
|
+
result = await func(*args, **kwargs)
|
|
33
|
+
if attempt > 1:
|
|
34
|
+
logger.info(f"Success after {attempt} attempts")
|
|
35
|
+
return result
|
|
36
|
+
except Exception as e:
|
|
37
|
+
last_error = e
|
|
38
|
+
if attempt == retry_config.max_attempts:
|
|
39
|
+
logger.error(f"Max retries exceeded: {attempt}")
|
|
40
|
+
break
|
|
41
|
+
|
|
42
|
+
delay = retry_config.get_delay(attempt)
|
|
43
|
+
logger.warning(
|
|
44
|
+
f"Attempt {attempt}/{retry_config.max_attempts} failed. "
|
|
45
|
+
f"Retrying in {delay:.1f}s. Error: {e}"
|
|
46
|
+
)
|
|
47
|
+
await asyncio.sleep(delay)
|
|
48
|
+
|
|
49
|
+
raise last_error or Exception("Unknown error")
|
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: workerlib
|
|
3
|
+
Version: 0.3.1
|
|
4
|
+
Summary: Async RabbitMQ worker utilities
|
|
5
|
+
Author-email: YOUR_NAME <your.email@example.com>
|
|
6
|
+
Maintainer-email: YOUR_NAME <your.email@example.com>
|
|
7
|
+
Project-URL: Homepage, https://github.com/ametist-dev/workerlib
|
|
8
|
+
Project-URL: Repository, https://github.com/ametist-dev/workerlib
|
|
9
|
+
Project-URL: Issues, https://github.com/ametist-dev/workerlib/issues
|
|
10
|
+
Keywords: async,rabbitmq,aio-pika,workers
|
|
11
|
+
Classifier: Programming Language :: Python :: 3
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
16
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
17
|
+
Classifier: Operating System :: OS Independent
|
|
18
|
+
Classifier: Intended Audience :: Developers
|
|
19
|
+
Classifier: Topic :: Software Development :: Libraries
|
|
20
|
+
Classifier: Development Status :: 4 - Beta
|
|
21
|
+
Requires-Python: >=3.10
|
|
22
|
+
Description-Content-Type: text/markdown
|
|
23
|
+
Requires-Dist: aio-pika<10,>=9.0.0
|
|
24
|
+
|
|
25
|
+
# WorkerLib - асинхронная работа с RabbitMQ
|
|
26
|
+
## Быстрый старт
|
|
27
|
+
```python
|
|
28
|
+
import asyncio
|
|
29
|
+
from workerlib import WorkerPool
|
|
30
|
+
|
|
31
|
+
async def task_handler(data: dict) -> bool:
|
|
32
|
+
print(f"Обработка: {data}")
|
|
33
|
+
return True
|
|
34
|
+
|
|
35
|
+
async def main():
|
|
36
|
+
async with WorkerPool() as pool:
|
|
37
|
+
pool.add_worker("tasks", task_handler)
|
|
38
|
+
await pool.send("tasks", {"id": 1, "cmd": "start"})
|
|
39
|
+
await asyncio.sleep(2)
|
|
40
|
+
|
|
41
|
+
asyncio.run(main())
|
|
42
|
+
```
|
|
43
|
+
## Формат сообщений
|
|
44
|
+
**JSON сообщение**
|
|
45
|
+
Библиотека автоматически сериализует dict в JSON при отправке:
|
|
46
|
+
```python
|
|
47
|
+
# Отправка простого сообщения
|
|
48
|
+
await pool.send("queue", {
|
|
49
|
+
"event": "user_created",
|
|
50
|
+
"user_id": 123,
|
|
51
|
+
"email": "user@example.com",
|
|
52
|
+
"timestamp": "2024-01-15T10:30:00Z"
|
|
53
|
+
})
|
|
54
|
+
|
|
55
|
+
# Отправка вложенных структур
|
|
56
|
+
await pool.send("queue", {
|
|
57
|
+
"type": "order",
|
|
58
|
+
"data": {
|
|
59
|
+
"order_id": "ORD-12345",
|
|
60
|
+
"items": [
|
|
61
|
+
{"id": 1, "quantity": 2},
|
|
62
|
+
{"id": 2, "quantity": 1}
|
|
63
|
+
],
|
|
64
|
+
"total": 299.99
|
|
65
|
+
},
|
|
66
|
+
"metadata": {
|
|
67
|
+
"source": "api",
|
|
68
|
+
"version": "1.0"
|
|
69
|
+
}
|
|
70
|
+
})
|
|
71
|
+
```
|
|
72
|
+
## Основные примеры
|
|
73
|
+
1. Пул с несколькими воркерами
|
|
74
|
+
```python
|
|
75
|
+
from workerlib import WorkerPool, ErrorHandlingStrategy
|
|
76
|
+
|
|
77
|
+
async def main():
|
|
78
|
+
async with WorkerPool() as pool:
|
|
79
|
+
# Email воркер с DLQ
|
|
80
|
+
pool.add_worker(
|
|
81
|
+
queue_name="emails",
|
|
82
|
+
handler=email_handler,
|
|
83
|
+
error_strategy=ErrorHandlingStrategy.RETRY_THEN_DLQ,
|
|
84
|
+
prefetch_count=5
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
# Обработчик платежей
|
|
88
|
+
pool.add_worker(
|
|
89
|
+
queue_name="payments",
|
|
90
|
+
handler=payment_handler,
|
|
91
|
+
error_strategy=ErrorHandlingStrategy.REQUEUE_END
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
# Отправка задач
|
|
95
|
+
await pool.send("emails", {"to": "user@test.com"})
|
|
96
|
+
await pool.send("payments", {"amount": 100})
|
|
97
|
+
```
|
|
98
|
+
2. Кастомное подключение и retry
|
|
99
|
+
```python
|
|
100
|
+
from workerlib import ConnectionParams, RetryConfig
|
|
101
|
+
|
|
102
|
+
params = ConnectionParams(
|
|
103
|
+
host="rabbit.local",
|
|
104
|
+
username="admin",
|
|
105
|
+
password="secret"
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
retry_config = RetryConfig(
|
|
109
|
+
max_attempts=3,
|
|
110
|
+
initial_delay=1.0,
|
|
111
|
+
backoff_factor=2.0
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
async with WorkerPool(connection_params=params) as pool:
|
|
115
|
+
pool.add_worker(
|
|
116
|
+
queue_name="critical",
|
|
117
|
+
handler=critical_handler,
|
|
118
|
+
retry_config=retry_config
|
|
119
|
+
)
|
|
120
|
+
```
|
|
121
|
+
3. Обработка ошибок
|
|
122
|
+
```python
|
|
123
|
+
from workerlib import ErrorHandlingStrategy
|
|
124
|
+
|
|
125
|
+
# Варианты:
|
|
126
|
+
# IGNORE - проигнорировать ошибку
|
|
127
|
+
# REQUEUE_END - в конец очереди с задержкой
|
|
128
|
+
# REQUEUE_FRONT - в начало очереди
|
|
129
|
+
# DLQ - в Dead Letter Queue
|
|
130
|
+
# RETRY_THEN_DLQ - повторить, затем в DLQ
|
|
131
|
+
|
|
132
|
+
pool.add_worker(
|
|
133
|
+
queue_name="tasks",
|
|
134
|
+
handler=my_handler,
|
|
135
|
+
error_strategy=ErrorHandlingStrategy.RETRY_THEN_DLQ,
|
|
136
|
+
dlq_enabled=True,
|
|
137
|
+
requeue_delay=5.0 # задержка повторной обработки
|
|
138
|
+
)
|
|
139
|
+
```
|
|
140
|
+
4. Отдельные компоненты
|
|
141
|
+
```python
|
|
142
|
+
from workerlib import (
|
|
143
|
+
RabbitMQConnection,
|
|
144
|
+
RabbitMQQueue,
|
|
145
|
+
RabbitMQConsumer,
|
|
146
|
+
RabbitMQProducer
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
# Создание вручную
|
|
150
|
+
connection = RabbitMQConnection()
|
|
151
|
+
await connection.connect()
|
|
152
|
+
|
|
153
|
+
queue = RabbitMQQueue(connection, QueueConfig(name="my_queue"))
|
|
154
|
+
|
|
155
|
+
producer = RabbitMQProducer(connection, queue)
|
|
156
|
+
await producer.send({"test": "data"})
|
|
157
|
+
|
|
158
|
+
consumer = RabbitMQConsumer(queue, my_handler)
|
|
159
|
+
await consumer.consume()
|
|
160
|
+
```
|
|
161
|
+
5. Batch отправка
|
|
162
|
+
```python
|
|
163
|
+
async with WorkerPool() as pool:
|
|
164
|
+
messages = [
|
|
165
|
+
{"id": i, "data": f"item_{i}"}
|
|
166
|
+
for i in range(100)
|
|
167
|
+
]
|
|
168
|
+
|
|
169
|
+
tasks = [
|
|
170
|
+
pool.send("batch_queue", msg)
|
|
171
|
+
for msg in messages
|
|
172
|
+
]
|
|
173
|
+
|
|
174
|
+
await asyncio.gather(*tasks)
|
|
175
|
+
```
|
|
176
|
+
6. Метрики
|
|
177
|
+
```python
|
|
178
|
+
async with WorkerPool() as pool:
|
|
179
|
+
pool.add_worker("monitored", handler)
|
|
180
|
+
|
|
181
|
+
# Отправляем задачи
|
|
182
|
+
for i in range(10):
|
|
183
|
+
await pool.send("monitored", {"task": i})
|
|
184
|
+
|
|
185
|
+
# Получаем метрики
|
|
186
|
+
metrics = pool.get_metrics("monitored")
|
|
187
|
+
print(f"Обработано: {metrics['consumer']['processed']}")
|
|
188
|
+
print(f"Ошибок: {metrics['consumer']['failed']}")
|
|
189
|
+
```
|
|
190
|
+
7. FastAPI интеграция
|
|
191
|
+
```python
|
|
192
|
+
from fastapi import FastAPI
|
|
193
|
+
from workerlib import WorkerPool
|
|
194
|
+
|
|
195
|
+
app = FastAPI()
|
|
196
|
+
worker_pool = WorkerPool(auto_start=False)
|
|
197
|
+
|
|
198
|
+
@app.on_event("startup")
|
|
199
|
+
async def startup():
|
|
200
|
+
await worker_pool.start()
|
|
201
|
+
worker_pool.add_worker("api_tasks", task_handler)
|
|
202
|
+
|
|
203
|
+
@app.on_event("shutdown")
|
|
204
|
+
async def shutdown():
|
|
205
|
+
await worker_pool.stop()
|
|
206
|
+
|
|
207
|
+
@app.post("/task")
|
|
208
|
+
async def create_task(data: dict):
|
|
209
|
+
await worker_pool.send("api_tasks", data)
|
|
210
|
+
return {"status": "queued"}
|
|
211
|
+
```
|
|
212
|
+
## Конфигурация
|
|
213
|
+
ConnectionParams
|
|
214
|
+
```python
|
|
215
|
+
ConnectionParams(
|
|
216
|
+
host="127.0.0.1",
|
|
217
|
+
port=5672,
|
|
218
|
+
username="guest",
|
|
219
|
+
password="guest",
|
|
220
|
+
heartbeat=60,
|
|
221
|
+
timeout=10
|
|
222
|
+
)
|
|
223
|
+
```
|
|
224
|
+
QueueConfig
|
|
225
|
+
```python
|
|
226
|
+
QueueConfig(
|
|
227
|
+
name="queue_name",
|
|
228
|
+
durable=True,
|
|
229
|
+
prefetch_count=1
|
|
230
|
+
)
|
|
231
|
+
```
|
|
232
|
+
RetryConfig
|
|
233
|
+
```python
|
|
234
|
+
RetryConfig(
|
|
235
|
+
max_attempts=3,
|
|
236
|
+
initial_delay=1.0,
|
|
237
|
+
backoff_factor=2.0,
|
|
238
|
+
max_delay=60.0
|
|
239
|
+
)
|
|
240
|
+
```
|
|
241
|
+
## Установка
|
|
242
|
+
```bash
|
|
243
|
+
pip install workerlib
|
|
244
|
+
```
|
|
245
|
+
Требования: Python 3.8+, aio_pika
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
workerlib/__init__.py,sha256=-Kf1M5qJ-QOaNtMv1XUzFM3-IShzjIYbE-TQpkcvl-I,692
|
|
2
|
+
workerlib/connection.py,sha256=pJGTnxOORPGCRnMezpbo2JDZ7EVWSGGm2-joDLw1EaQ,2531
|
|
3
|
+
workerlib/consumer.py,sha256=xPpQxvnBoLaBB3nZH-P7PyAFzxyehD6ruvGQ0gPGj_I,7979
|
|
4
|
+
workerlib/mq_queue.py,sha256=yBsW7rVeNwWjzkYnBYO46kzJTttacrPrzGgwFaS-580,1329
|
|
5
|
+
workerlib/pool.py,sha256=_7lFiYp-b0Xj3E6iSFWLFt7t9jdyWrsZYxXBiSu6HCw,5765
|
|
6
|
+
workerlib/producer.py,sha256=L1iw2IHWOnRRZZOiFu3LzetIx5OnaEUyqP1AhcOCcf8,5421
|
|
7
|
+
workerlib/retry.py,sha256=3wIK3hqQ09A3q54k6kkpNZL6pBSSQGfvHaWCQ0XPwDA,1526
|
|
8
|
+
workerlib-0.3.1.dist-info/METADATA,sha256=kxfOazgUvEUb6BmmOLDJWoTdvbBTMnNvH76gAGQI44c,6689
|
|
9
|
+
workerlib-0.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
10
|
+
workerlib-0.3.1.dist-info/top_level.txt,sha256=CrIpxje3R-70l5ttCsQ7A-tjg6_oJU1tMC02RKkCl3Y,10
|
|
11
|
+
workerlib-0.3.1.dist-info/RECORD,,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
workerlib
|