redis-allocator 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redis_allocator/__init__.py +28 -0
- redis_allocator/allocator.py +601 -0
- redis_allocator/lock.py +682 -0
- redis_allocator/task_queue.py +382 -0
- redis_allocator-0.0.1.dist-info/METADATA +229 -0
- redis_allocator-0.0.1.dist-info/RECORD +14 -0
- redis_allocator-0.0.1.dist-info/WHEEL +5 -0
- redis_allocator-0.0.1.dist-info/licenses/LICENSE +21 -0
- redis_allocator-0.0.1.dist-info/top_level.txt +2 -0
- tests/__init__.py +0 -0
- tests/conftest.py +46 -0
- tests/test_allocator.py +525 -0
- tests/test_lock.py +851 -0
- tests/test_task_queue.py +778 -0
@@ -0,0 +1,382 @@
|
|
1
|
+
"""Redis-based task queue system for distributed task management.
|
2
|
+
|
3
|
+
This module provides a RedisTaskQueue class that enables distributed task
|
4
|
+
management and coordination using Redis as the underlying infrastructure.
|
5
|
+
"""
|
6
|
+
import time
|
7
|
+
import pickle
|
8
|
+
import base64
|
9
|
+
import logging
|
10
|
+
from typing import Any, Callable, Optional, List
|
11
|
+
from functools import cached_property
|
12
|
+
from dataclasses import dataclass
|
13
|
+
from enum import Enum
|
14
|
+
from threading import Event
|
15
|
+
from concurrent.futures import ThreadPoolExecutor
|
16
|
+
from contextlib import contextmanager
|
17
|
+
|
18
|
+
from redis import StrictRedis as Redis
|
19
|
+
|
20
|
+
logger = logging.getLogger(__name__)
|
21
|
+
|
22
|
+
|
23
|
+
class TaskExecutePolicy(Enum):
|
24
|
+
"""Defines different policies for task execution.
|
25
|
+
|
26
|
+
Attributes:
|
27
|
+
Local: Execute task only locally
|
28
|
+
Remote: Execute task only remotely
|
29
|
+
LocalFirst: Try to execute locally first, then remotely if it fails
|
30
|
+
RemoteFirst: Try to execute remotely first, then locally if it fails
|
31
|
+
Auto: Choose execution mode based on whether a remote listener exists
|
32
|
+
"""
|
33
|
+
Local = 0x00
|
34
|
+
Remote = 0x01
|
35
|
+
LocalFirst = 0x02
|
36
|
+
RemoteFirst = 0x03
|
37
|
+
Auto = 0x04
|
38
|
+
|
39
|
+
|
40
|
+
@dataclass
|
41
|
+
class RedisTask:
|
42
|
+
"""Represents a task in the Redis task queue system.
|
43
|
+
|
44
|
+
This class encapsulates all information related to a task, including
|
45
|
+
its unique ID, name, parameters, and execution status.
|
46
|
+
|
47
|
+
Attributes:
|
48
|
+
id: Unique identifier for the task
|
49
|
+
name: Name of the task category
|
50
|
+
params: Dictionary of parameters for the task
|
51
|
+
expiry: Unix timestamp when this task expires
|
52
|
+
result: The result of the task, initially None
|
53
|
+
error: Any error that occurred during task execution
|
54
|
+
update_progress_time: Last time the progress was updated
|
55
|
+
current_progress: Current progress value
|
56
|
+
total_progress: Total progress value for completion
|
57
|
+
_save: Internal callable to save task state to Redis
|
58
|
+
"""
|
59
|
+
id: str
|
60
|
+
name: str
|
61
|
+
params: dict
|
62
|
+
expiry: float
|
63
|
+
result: Any = None
|
64
|
+
error: Any = None
|
65
|
+
update_progress_time: float = 0.0
|
66
|
+
current_progress: float = 0.0
|
67
|
+
total_progress: float = 100.0
|
68
|
+
_save: Callable[[], None] = None
|
69
|
+
|
70
|
+
def save(self):
|
71
|
+
"""Save the current state of the task to Redis."""
|
72
|
+
if self._save is not None:
|
73
|
+
self._save()
|
74
|
+
|
75
|
+
def update(self, current_progress: float, total_progress: float):
|
76
|
+
"""Update the progress of the task.
|
77
|
+
|
78
|
+
Args:
|
79
|
+
current_progress: The current progress value
|
80
|
+
total_progress: The total progress value for completion
|
81
|
+
|
82
|
+
Raises:
|
83
|
+
TimeoutError: If the task has expired
|
84
|
+
"""
|
85
|
+
self.current_progress = current_progress
|
86
|
+
self.total_progress = total_progress
|
87
|
+
self.update_progress_time = time.time()
|
88
|
+
if self.expiry < time.time():
|
89
|
+
raise TimeoutError(f'Task {self.id} in {self.name} has expired')
|
90
|
+
self.save()
|
91
|
+
|
92
|
+
|
93
|
+
class RedisTaskQueue:
|
94
|
+
"""A class that provides a simple interface for managing a task queue in Redis.
|
95
|
+
|
96
|
+
This class enables distributed task processing through Redis, with support for
|
97
|
+
asynchronous processing, task listening, and result retrieval. Tasks can be
|
98
|
+
executed locally or remotely based on configurable policies.
|
99
|
+
"""
|
100
|
+
|
101
|
+
def __init__(self, redis: Redis, prefix: str, suffix='task-queue', timeout=300, interval=5,
|
102
|
+
task_fn: Callable[[RedisTask], Any] = None):
|
103
|
+
"""Initialize a RedisTaskQueue instance.
|
104
|
+
|
105
|
+
Args:
|
106
|
+
redis: The Redis client used for interacting with Redis.
|
107
|
+
prefix: The prefix to be added to the task queue key.
|
108
|
+
suffix: The suffix to be added to the task queue key. Default is 'task-queue'.
|
109
|
+
timeout: The query timeout in seconds. Default is 300s.
|
110
|
+
interval: The query interval in seconds. Default is 5s.
|
111
|
+
task_fn: A function to execute tasks locally. Takes a RedisTask and returns Any.
|
112
|
+
"""
|
113
|
+
self.redis = redis
|
114
|
+
self.prefix = prefix
|
115
|
+
self.suffix = suffix
|
116
|
+
self.timeout = timeout
|
117
|
+
self.interval = interval
|
118
|
+
self.task_fn = task_fn
|
119
|
+
|
120
|
+
def build_task(self, id: str, name: str, params: dict) -> RedisTask:
|
121
|
+
"""Create a new RedisTask instance with the given parameters.
|
122
|
+
|
123
|
+
Args:
|
124
|
+
id: Unique identifier for the task
|
125
|
+
name: Name of the task category
|
126
|
+
params: Dictionary of parameters for the task
|
127
|
+
|
128
|
+
Returns:
|
129
|
+
A new RedisTask instance with a save function configured
|
130
|
+
"""
|
131
|
+
task = RedisTask(id=id, name=name, params=params, expiry=time.time() + self.timeout)
|
132
|
+
task._save = lambda: self.set_task(task)
|
133
|
+
return task
|
134
|
+
|
135
|
+
def execute_task_remotely(self, task: RedisTask, timeout: Optional[float] = None, once: bool = False) -> Any:
|
136
|
+
"""Execute a task remotely by pushing it to the queue.
|
137
|
+
|
138
|
+
Args:
|
139
|
+
task: The RedisTask to execute
|
140
|
+
timeout: Optional timeout in seconds, defaults to self.timeout
|
141
|
+
once: Whether to delete the result after getting it
|
142
|
+
|
143
|
+
Returns:
|
144
|
+
The result of the task
|
145
|
+
|
146
|
+
Raises:
|
147
|
+
TimeoutError: If the task times out
|
148
|
+
Exception: Any exception raised during task execution
|
149
|
+
"""
|
150
|
+
self.set_task(task)
|
151
|
+
self.redis.rpush(self._queue_key(task.name), task.id)
|
152
|
+
if timeout is None:
|
153
|
+
timeout = self.timeout
|
154
|
+
while timeout >= 0:
|
155
|
+
time.sleep(self.interval)
|
156
|
+
result = self.get_task(task.id, once)
|
157
|
+
if result is not None:
|
158
|
+
if result.error is not None:
|
159
|
+
raise result.error
|
160
|
+
elif result.result is not None:
|
161
|
+
return result.result
|
162
|
+
timeout -= self.interval
|
163
|
+
raise TimeoutError(f'Task {task.id} in {task.name} has expired')
|
164
|
+
|
165
|
+
def execute_task_locally(self, task: RedisTask, timeout: Optional[float] = None) -> Any:
|
166
|
+
"""Execute a task locally using the task_fn.
|
167
|
+
|
168
|
+
Args:
|
169
|
+
task: The RedisTask to execute
|
170
|
+
timeout: Optional timeout in seconds, updates task.expiry if provided
|
171
|
+
|
172
|
+
Returns:
|
173
|
+
The result of the task
|
174
|
+
|
175
|
+
Raises:
|
176
|
+
Exception: Any exception raised during task execution
|
177
|
+
"""
|
178
|
+
if timeout is not None:
|
179
|
+
task.expiry = time.time() + timeout
|
180
|
+
try:
|
181
|
+
task.result = self.task_fn(task)
|
182
|
+
return task.result
|
183
|
+
except Exception as e:
|
184
|
+
task.error = e
|
185
|
+
raise e
|
186
|
+
finally:
|
187
|
+
task.save()
|
188
|
+
|
189
|
+
@cached_property
|
190
|
+
def _queue_prefix(self) -> str:
|
191
|
+
"""Get the prefix for queue keys.
|
192
|
+
|
193
|
+
Returns:
|
194
|
+
The queue prefix
|
195
|
+
"""
|
196
|
+
return f'{self.prefix}|{self.suffix}|task-queue'
|
197
|
+
|
198
|
+
def _queue_key(self, name: str) -> str:
|
199
|
+
"""Generate a queue key for the given task name.
|
200
|
+
|
201
|
+
Args:
|
202
|
+
name: The task name.
|
203
|
+
|
204
|
+
Returns:
|
205
|
+
The formatted queue key.
|
206
|
+
"""
|
207
|
+
return f'{self._queue_prefix}|{name}'
|
208
|
+
|
209
|
+
def _queue_name(self, key: str) -> str:
|
210
|
+
"""Extract the queue name from a queue key.
|
211
|
+
|
212
|
+
Args:
|
213
|
+
key: The queue key.
|
214
|
+
|
215
|
+
Returns:
|
216
|
+
The queue name.
|
217
|
+
|
218
|
+
Raises:
|
219
|
+
AssertionError: If the key doesn't start with the queue prefix.
|
220
|
+
"""
|
221
|
+
assert key.startswith(self._queue_prefix)
|
222
|
+
return key[len(self._queue_prefix) + 1:]
|
223
|
+
|
224
|
+
def _queue_listen_name(self, name: str) -> str:
|
225
|
+
"""Generate a listen name for the given task name.
|
226
|
+
|
227
|
+
Args:
|
228
|
+
name: The task name.
|
229
|
+
|
230
|
+
Returns:
|
231
|
+
The formatted listen name.
|
232
|
+
"""
|
233
|
+
return f'{self.prefix}|{self.suffix}|task-listen|{name}'
|
234
|
+
|
235
|
+
def set_queue_listened(self, name: str) -> None:
|
236
|
+
"""Set the queue as being listened to for the given task name.
|
237
|
+
|
238
|
+
Args:
|
239
|
+
name: The task name.
|
240
|
+
"""
|
241
|
+
self.redis.setex(self._queue_listen_name(name), self.timeout, '1')
|
242
|
+
|
243
|
+
def _result_key(self, task_id: str) -> str:
|
244
|
+
"""Generate a result key for the given task ID.
|
245
|
+
|
246
|
+
Args:
|
247
|
+
task_id: The task ID.
|
248
|
+
|
249
|
+
Returns:
|
250
|
+
The formatted result key.
|
251
|
+
"""
|
252
|
+
return f'{self.prefix}|{self.suffix}|task-result:{task_id}'
|
253
|
+
|
254
|
+
def set_task(self, task: RedisTask) -> str:
|
255
|
+
"""Save a task to Redis.
|
256
|
+
|
257
|
+
Args:
|
258
|
+
task: The RedisTask to save.
|
259
|
+
|
260
|
+
Returns:
|
261
|
+
The task ID.
|
262
|
+
"""
|
263
|
+
task._save = None
|
264
|
+
t = pickle.dumps(task)
|
265
|
+
result = str(base64.b64encode(t), 'ascii')
|
266
|
+
self.redis.setex(self._result_key(task.id), self.timeout, result)
|
267
|
+
return task.id
|
268
|
+
|
269
|
+
def get_task(self, task_id: str, once: bool = False) -> Optional[RedisTask]:
|
270
|
+
"""Get a task from Redis.
|
271
|
+
|
272
|
+
Args:
|
273
|
+
task_id: The task ID.
|
274
|
+
once: Whether to delete the task after getting it.
|
275
|
+
|
276
|
+
Returns:
|
277
|
+
The RedisTask, or None if no task is available.
|
278
|
+
"""
|
279
|
+
get = self.redis.getdel if once else self.redis.get
|
280
|
+
result = get(self._result_key(task_id))
|
281
|
+
if result is None:
|
282
|
+
return None
|
283
|
+
t = pickle.loads(base64.b64decode(result))
|
284
|
+
t._save = lambda: self.set_task(t)
|
285
|
+
return t
|
286
|
+
|
287
|
+
def has_task(self, task_id: str) -> bool:
|
288
|
+
"""Check if a task exists.
|
289
|
+
|
290
|
+
Args:
|
291
|
+
task_id: The task ID.
|
292
|
+
|
293
|
+
Returns:
|
294
|
+
True if the task exists, False otherwise.
|
295
|
+
"""
|
296
|
+
return self.redis.exists(self._result_key(task_id)) > 0
|
297
|
+
|
298
|
+
@contextmanager
|
299
|
+
def _executor_context(self, max_workers: int = 128):
|
300
|
+
"""Create a ThreadPoolExecutor context manager.
|
301
|
+
|
302
|
+
This is a helper method for testing and internal use.
|
303
|
+
|
304
|
+
Args:
|
305
|
+
max_workers: The maximum number of worker threads.
|
306
|
+
|
307
|
+
Yields:
|
308
|
+
The ThreadPoolExecutor instance.
|
309
|
+
"""
|
310
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
311
|
+
yield executor
|
312
|
+
|
313
|
+
def listen(self, names: List[str], workers: int = 128, event: Optional[Event] = None) -> None:
|
314
|
+
"""Listen for tasks on the specified queues.
|
315
|
+
|
316
|
+
This method continuously polls the specified queues for tasks,
|
317
|
+
and executes tasks locally when they are received.
|
318
|
+
|
319
|
+
Args:
|
320
|
+
names: The names of the queues to listen to.
|
321
|
+
workers: The number of worker threads to use. Default is 128.
|
322
|
+
event: An event to signal when to stop listening. Default is None.
|
323
|
+
"""
|
324
|
+
with self._executor_context(max_workers=workers) as executor:
|
325
|
+
while event is None or not event.is_set():
|
326
|
+
queue_names = [self._queue_key(name) for name in names]
|
327
|
+
while True:
|
328
|
+
task = self.redis.blpop(queue_names, timeout=self.interval)
|
329
|
+
if task is not None:
|
330
|
+
_queue_key, task_id = task
|
331
|
+
task = self.get_task(task_id)
|
332
|
+
executor.submit(self.execute_task_locally, task)
|
333
|
+
else:
|
334
|
+
break
|
335
|
+
for name in names:
|
336
|
+
self.set_queue_listened(name)
|
337
|
+
time.sleep(self.interval)
|
338
|
+
|
339
|
+
def query(self, id: str, name: str, params: dict, timeout: Optional[float] = None,
|
340
|
+
policy: TaskExecutePolicy = TaskExecutePolicy.Auto, once: bool = False) -> Any:
|
341
|
+
"""Execute a task according to the specified policy.
|
342
|
+
|
343
|
+
This method provides a flexible way to execute tasks with different
|
344
|
+
strategies based on the specified policy.
|
345
|
+
|
346
|
+
Args:
|
347
|
+
id: The task ID.
|
348
|
+
name: The task name.
|
349
|
+
params: The task parameters.
|
350
|
+
timeout: Optional timeout override.
|
351
|
+
policy: The execution policy to use.
|
352
|
+
once: Whether to delete the result after getting it.
|
353
|
+
|
354
|
+
Returns:
|
355
|
+
The result of the task.
|
356
|
+
|
357
|
+
Raises:
|
358
|
+
Exception: Any exception raised during task execution.
|
359
|
+
"""
|
360
|
+
t = self.build_task(id, name, params)
|
361
|
+
match policy:
|
362
|
+
case TaskExecutePolicy.Local:
|
363
|
+
return self.execute_task_locally(t, timeout)
|
364
|
+
case TaskExecutePolicy.Remote:
|
365
|
+
return self.execute_task_remotely(t)
|
366
|
+
case TaskExecutePolicy.LocalFirst:
|
367
|
+
try:
|
368
|
+
return self.execute_task_locally(t, timeout)
|
369
|
+
except Exception as e:
|
370
|
+
logger.exception(f'Failed to execute task {t.id} in {t.name} locally: {e}')
|
371
|
+
return self.execute_task_remotely(t, timeout)
|
372
|
+
case TaskExecutePolicy.RemoteFirst:
|
373
|
+
try:
|
374
|
+
return self.execute_task_remotely(t, timeout)
|
375
|
+
except Exception as e:
|
376
|
+
logger.exception(f'Failed to execute task {t.id} in {t.name} remotely: {e}')
|
377
|
+
return self.execute_task_locally(t, timeout)
|
378
|
+
case TaskExecutePolicy.Auto:
|
379
|
+
if self.redis.exists(self._queue_listen_name(name)):
|
380
|
+
return self.execute_task_remotely(t, timeout)
|
381
|
+
else:
|
382
|
+
return self.execute_task_locally(t, timeout)
|
@@ -0,0 +1,229 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: redis-allocator
|
3
|
+
Version: 0.0.1
|
4
|
+
Summary: Redis-based resource allocation system.
|
5
|
+
Home-page: https://github.com/invoker-bot/RedisAllocator-python
|
6
|
+
Author: Invoker Bot
|
7
|
+
Author-email: invoker-bot@outlook.com
|
8
|
+
License: MIT
|
9
|
+
Classifier: Development Status :: 4 - Beta
|
10
|
+
Classifier: Intended Audience :: Developers
|
11
|
+
Classifier: Programming Language :: Python :: 3.9
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
13
|
+
Classifier: Operating System :: OS Independent
|
14
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
15
|
+
Requires-Python: >=3.10
|
16
|
+
Description-Content-Type: text/markdown
|
17
|
+
License-File: LICENSE
|
18
|
+
Requires-Dist: redis>=5.0.0
|
19
|
+
Provides-Extra: test
|
20
|
+
Requires-Dist: pytest>=7.4.3; extra == "test"
|
21
|
+
Requires-Dist: pytest-cov>=4.1.0; extra == "test"
|
22
|
+
Requires-Dist: pytest-mock>=3.12.0; extra == "test"
|
23
|
+
Requires-Dist: fakeredis[lua]>=2.20.1; extra == "test"
|
24
|
+
Requires-Dist: flake8>=6.1.0; extra == "test"
|
25
|
+
Requires-Dist: freezegun>=1.4.0; extra == "test"
|
26
|
+
Dynamic: author
|
27
|
+
Dynamic: author-email
|
28
|
+
Dynamic: classifier
|
29
|
+
Dynamic: description
|
30
|
+
Dynamic: description-content-type
|
31
|
+
Dynamic: home-page
|
32
|
+
Dynamic: license
|
33
|
+
Dynamic: license-file
|
34
|
+
Dynamic: provides-extra
|
35
|
+
Dynamic: requires-dist
|
36
|
+
Dynamic: requires-python
|
37
|
+
Dynamic: summary
|
38
|
+
|
39
|
+
# RedisAllocator
|
40
|
+
|
41
|
+
## Project Overview
|
42
|
+
|
43
|
+
RedisAllocator is an efficient Redis-based distributed memory allocation system. This system simulates traditional memory allocation mechanisms but implements them in a distributed environment, using Redis as the underlying storage and coordination tool.
|
44
|
+
|
45
|
+
> **Note**: Currently, RedisAllocator only supports single Redis instance deployments. For Redis cluster environments, we recommend using RedLock for distributed locking operations.
|
46
|
+
|
47
|
+
### Core Features
|
48
|
+
|
49
|
+
- **Distributed Locking**: Provides robust distributed locking mechanisms to ensure data consistency in concurrent environments
|
50
|
+
- **Resource Allocation**: Implements a distributed resource allocation system with support for:
|
51
|
+
- Priority-based distribution
|
52
|
+
- Soft binding
|
53
|
+
- Garbage collection
|
54
|
+
- Health checking
|
55
|
+
- **Task Management**: Implements a distributed task queue system for efficient task processing across multiple workers
|
56
|
+
- **Object Allocation**: Supports allocation of resources with priority-based distribution and soft binding
|
57
|
+
- **Health Checking**: Monitors the health of distributed instances and automatically handles unhealthy resources
|
58
|
+
- **Garbage Collection**: Automatically identifies and reclaims unused resources, optimizing memory usage
|
59
|
+
|
60
|
+
|
61
|
+
## Installation
|
62
|
+
|
63
|
+
```bash
|
64
|
+
pip install redis-allocator
|
65
|
+
```
|
66
|
+
|
67
|
+
## Quick Start
|
68
|
+
|
69
|
+
### Using RedisLock for Distributed Locking
|
70
|
+
|
71
|
+
```python
|
72
|
+
from redis import Redis
|
73
|
+
from redis_allocator import RedisLock
|
74
|
+
|
75
|
+
# Initialize Redis client
|
76
|
+
redis = Redis(host='localhost', port=6379)
|
77
|
+
|
78
|
+
# Create a RedisLock instance
|
79
|
+
lock = RedisLock(redis, "myapp", "resource-lock")
|
80
|
+
|
81
|
+
# Acquire a lock
|
82
|
+
if lock.lock("resource-123", timeout=60):
|
83
|
+
try:
|
84
|
+
# Perform operations with the locked resource
|
85
|
+
print("Resource locked successfully")
|
86
|
+
finally:
|
87
|
+
# Release the lock when done
|
88
|
+
lock.unlock("resource-123")
|
89
|
+
```
|
90
|
+
|
91
|
+
### Using RedisAllocator for Resource Management
|
92
|
+
|
93
|
+
```python
|
94
|
+
from redis import Redis
|
95
|
+
from redis_allocator import RedisAllocator
|
96
|
+
|
97
|
+
# Initialize Redis client
|
98
|
+
redis = Redis(host='localhost', port=6379)
|
99
|
+
|
100
|
+
# Create a RedisAllocator instance
|
101
|
+
allocator = RedisAllocator(
|
102
|
+
redis,
|
103
|
+
prefix='myapp',
|
104
|
+
suffix='allocator',
|
105
|
+
shared=False # Whether resources can be shared
|
106
|
+
)
|
107
|
+
|
108
|
+
# Add resources to the pool
|
109
|
+
allocator.extend(['resource-1', 'resource-2', 'resource-3'])
|
110
|
+
|
111
|
+
# Allocate a resource key (returns only the key)
|
112
|
+
key = allocator.malloc_key(timeout=120)
|
113
|
+
if key:
|
114
|
+
try:
|
115
|
+
# Use the allocated resource
|
116
|
+
print(f"Allocated resource: {key}")
|
117
|
+
finally:
|
118
|
+
# Free the resource when done
|
119
|
+
allocator.free_keys(key)
|
120
|
+
|
121
|
+
# Allocate a resource with object (returns a RedisAllocatorObject)
|
122
|
+
allocated_obj = allocator.malloc(timeout=120)
|
123
|
+
if allocated_obj:
|
124
|
+
try:
|
125
|
+
# The key is available as a property
|
126
|
+
print(f"Allocated resource: {allocated_obj.key}")
|
127
|
+
|
128
|
+
# Update the resource's lock timeout
|
129
|
+
allocated_obj.update(timeout=60)
|
130
|
+
finally:
|
131
|
+
# Free the resource when done
|
132
|
+
allocator.free(allocated_obj)
|
133
|
+
|
134
|
+
# Using soft binding (associates a name with a resource)
|
135
|
+
allocator.update_soft_bind("worker-1", "resource-1")
|
136
|
+
# Later...
|
137
|
+
allocator.unbind_soft_bind("worker-1")
|
138
|
+
|
139
|
+
# Garbage collection (reclaims unused resources)
|
140
|
+
allocator.gc(count=10) # Check 10 items for cleanup
|
141
|
+
```
|
142
|
+
|
143
|
+
### Using RedisTaskQueue for Distributed Task Processing
|
144
|
+
|
145
|
+
```python
|
146
|
+
from redis import Redis
|
147
|
+
from redis_allocator import RedisTaskQueue, TaskExecutePolicy
|
148
|
+
import json
|
149
|
+
|
150
|
+
# Initialize Redis client
|
151
|
+
redis = Redis(host='localhost', port=6379)
|
152
|
+
|
153
|
+
# Process tasks in a worker
|
154
|
+
def process_task(task):
|
155
|
+
# Process the task (task is a RedisTask object)
|
156
|
+
# You can access task.id, task.name, task.params
|
157
|
+
# You can update progress with task.update(current, total)
|
158
|
+
return json.dumps({"result": "processed"})
|
159
|
+
|
160
|
+
|
161
|
+
# Create a task queue
|
162
|
+
task_queue = RedisTaskQueue(redis, "myapp", task_fn=process_task)
|
163
|
+
|
164
|
+
# Submit a task with query method
|
165
|
+
result = task_queue.query(
|
166
|
+
id="task-123",
|
167
|
+
name="example-task",
|
168
|
+
params={"input": "data"},
|
169
|
+
timeout=300, # Optional timeout in seconds
|
170
|
+
policy=TaskExecutePolicy.Auto, # Execution policy
|
171
|
+
once=False # Whether to delete the result after getting it
|
172
|
+
)
|
173
|
+
|
174
|
+
# Start listening for tasks
|
175
|
+
task_queue.listen(
|
176
|
+
names=["example-task"], # List of task names to listen for
|
177
|
+
workers=128, # Number of worker threads
|
178
|
+
event=None # Optional event to signal when to stop listening
|
179
|
+
)
|
180
|
+
```
|
181
|
+
|
182
|
+
## Modules
|
183
|
+
|
184
|
+
RedisAllocator consists of several modules, each providing specific functionality:
|
185
|
+
|
186
|
+
- **lock.py**: Provides `RedisLock` and `RedisLockPool` for distributed locking mechanisms
|
187
|
+
- **task_queue.py**: Implements `RedisTaskQueue` for distributed task processing
|
188
|
+
- **allocator.py**: Contains `RedisAllocator` and `RedisThreadHealthChecker` for resource allocation
|
189
|
+
|
190
|
+
|
191
|
+
## Roadmap
|
192
|
+
|
193
|
+
### Phase 1 (Completed)
|
194
|
+
- [x] Distributed lock mechanism implementation
|
195
|
+
- [x] Task queue processing system
|
196
|
+
- [x] Resource allocation and management
|
197
|
+
- [x] Basic health checking and monitoring
|
198
|
+
- [x] Object allocation with serialization
|
199
|
+
- [x] Unit tests for core components
|
200
|
+
|
201
|
+
### Phase 2 (In Progress)
|
202
|
+
- [ ] Advanced sharding implementation
|
203
|
+
- [ ] Performance optimization and benchmarking
|
204
|
+
- [ ] Documentation improvement
|
205
|
+
- [ ] Enhanced error handling and recovery
|
206
|
+
|
207
|
+
### Phase 3 (Planned)
|
208
|
+
- [ ] Advanced garbage collection strategies
|
209
|
+
- [ ] Redis cluster support
|
210
|
+
- [ ] Fault recovery mechanisms
|
211
|
+
- [ ] Automated resource scaling
|
212
|
+
|
213
|
+
### Phase 4 (Future)
|
214
|
+
- [ ] API stability and backward compatibility
|
215
|
+
- [ ] Performance monitoring and tuning tools
|
216
|
+
- [ ] Advanced features (transaction support, data compression, etc.)
|
217
|
+
- [ ] Production environment validation and case studies
|
218
|
+
|
219
|
+
## Contributing
|
220
|
+
|
221
|
+
Contributions and suggestions are welcome! Please see [CONTRIBUTING.md](CONTRIBUTING.md) for more information.
|
222
|
+
|
223
|
+
## License
|
224
|
+
|
225
|
+
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
|
226
|
+
|
227
|
+
## Contact
|
228
|
+
|
229
|
+
For questions or suggestions, please contact us through GitHub Issues.
|
@@ -0,0 +1,14 @@
|
|
1
|
+
redis_allocator/__init__.py,sha256=x4koK9zx8W1wWnrwt72tPve-LwKdhKbKxTGgacZ8q84,800
|
2
|
+
redis_allocator/allocator.py,sha256=qqxKOAPG_QgTp3gxvi8O443dPC9DGr3_iM5nCP3lidM,21831
|
3
|
+
redis_allocator/lock.py,sha256=HSCXyBhO3qBMylvByqskekHp6CMgqwbVcwMyP77pBmM,24768
|
4
|
+
redis_allocator/task_queue.py,sha256=NalI77hk1u8X-JBWpGoZFv5nJXzfem5fDthiSa9cgFg,13596
|
5
|
+
redis_allocator-0.0.1.dist-info/licenses/LICENSE,sha256=NiCCQOo0TQ_djjGpoCphsT59Bgu2sLOk8rVwIRxrqQE,1089
|
6
|
+
tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
|
+
tests/conftest.py,sha256=gsMBUrUTGxD-ki29rWrtJCZZymb7n7biRV6lWcd7fWo,1124
|
8
|
+
tests/test_allocator.py,sha256=F-GHbgWGss5CvbUsQt1AvLib9V0Rxyih1HisHWFi6jU,18608
|
9
|
+
tests/test_lock.py,sha256=wA2po5vSgeoyOeH9-bTF3fxVwh-CmR-CSH-ZThVHisY,35294
|
10
|
+
tests/test_task_queue.py,sha256=pohhYpw9RIPJma9wQPEF9r4pKkK__PZDd6Y10mEPtjs,33071
|
11
|
+
redis_allocator-0.0.1.dist-info/METADATA,sha256=nSxDu1WR1H6NJGdEu3eoVkPivnzEiM04DbVxk5VmLss,7465
|
12
|
+
redis_allocator-0.0.1.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
13
|
+
redis_allocator-0.0.1.dist-info/top_level.txt,sha256=0hXzU7sK5FCeSolTEYxThOt3HOybnwaXv1FLRJvHVgI,22
|
14
|
+
redis_allocator-0.0.1.dist-info/RECORD,,
|
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2025 Invoker Bot
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|
tests/__init__.py
ADDED
File without changes
|