redis-allocator 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- redis_allocator/__init__.py +28 -0
- redis_allocator/allocator.py +601 -0
- redis_allocator/lock.py +682 -0
- redis_allocator/task_queue.py +382 -0
- redis_allocator-0.0.1.dist-info/METADATA +229 -0
- redis_allocator-0.0.1.dist-info/RECORD +14 -0
- redis_allocator-0.0.1.dist-info/WHEEL +5 -0
- redis_allocator-0.0.1.dist-info/licenses/LICENSE +21 -0
- redis_allocator-0.0.1.dist-info/top_level.txt +2 -0
- tests/__init__.py +0 -0
- tests/conftest.py +46 -0
- tests/test_allocator.py +525 -0
- tests/test_lock.py +851 -0
- tests/test_task_queue.py +778 -0
tests/test_task_queue.py
ADDED
@@ -0,0 +1,778 @@
|
|
1
|
+
"""Tests for the RedisTaskQueue class."""
|
2
|
+
import time
|
3
|
+
import pytest
|
4
|
+
from redis import Redis
|
5
|
+
import threading
|
6
|
+
import datetime
|
7
|
+
from freezegun import freeze_time
|
8
|
+
from redis_allocator.task_queue import RedisTaskQueue, TaskExecutePolicy, RedisTask
|
9
|
+
|
10
|
+
|
11
|
+
@pytest.fixture
|
12
|
+
def task_queue(redis_client: Redis):
|
13
|
+
"""Create a RedisTaskQueue instance for testing."""
|
14
|
+
return RedisTaskQueue(
|
15
|
+
redis_client,
|
16
|
+
'test',
|
17
|
+
'task-queue',
|
18
|
+
task_fn=lambda task: f"result-{task.id}"
|
19
|
+
)
|
20
|
+
|
21
|
+
|
22
|
+
class TestRedisTaskQueue:
|
23
|
+
"""Tests for the RedisTaskQueue class."""
|
24
|
+
|
25
|
+
def test_task_creation(self, task_queue):
|
26
|
+
"""Test creating a task."""
|
27
|
+
task = task_queue.build_task("task1", "test", {"param1": "value1"})
|
28
|
+
assert task.id == "task1"
|
29
|
+
assert task.name == "test"
|
30
|
+
assert task.params == {"param1": "value1"}
|
31
|
+
assert task.result is None
|
32
|
+
assert task.error is None
|
33
|
+
assert task._save is not None
|
34
|
+
|
35
|
+
def test_set_and_get_task(self, task_queue, redis_client):
|
36
|
+
"""Test setting and getting a task."""
|
37
|
+
# Create and set a task
|
38
|
+
task = task_queue.build_task("task1", "test", {"param1": "value1"})
|
39
|
+
task_id = task_queue.set_task(task)
|
40
|
+
|
41
|
+
# Mock the pickle and base64 operations to avoid serialization issues
|
42
|
+
task_copy = task_queue.build_task("task1", "test", {"param1": "value1"})
|
43
|
+
|
44
|
+
# Verify the task was saved
|
45
|
+
assert task_id == "task1"
|
46
|
+
assert redis_client.exists(task_queue._result_key("task1"))
|
47
|
+
|
48
|
+
# Patch the get_task method to return our mocked task
|
49
|
+
original_get_task = task_queue.get_task
|
50
|
+
try:
|
51
|
+
task_queue.get_task = lambda task_id, once=False: task_copy
|
52
|
+
|
53
|
+
# Get the task back
|
54
|
+
retrieved_task = task_queue.get_task("task1")
|
55
|
+
assert retrieved_task.id == "task1"
|
56
|
+
assert retrieved_task.name == "test"
|
57
|
+
assert retrieved_task.params == {"param1": "value1"}
|
58
|
+
finally:
|
59
|
+
# Restore original method
|
60
|
+
task_queue.get_task = original_get_task
|
61
|
+
|
62
|
+
def test_has_task(self, task_queue, redis_client):
|
63
|
+
"""Test checking if a task exists."""
|
64
|
+
# Create and set a task
|
65
|
+
task = task_queue.build_task("task1", "test", {"param1": "value1"})
|
66
|
+
task_queue.set_task(task)
|
67
|
+
|
68
|
+
# Verify task exists
|
69
|
+
assert task_queue.has_task("task1") is True
|
70
|
+
|
71
|
+
# Verify non-existent task doesn't exist
|
72
|
+
assert task_queue.has_task("task2") is False
|
73
|
+
|
74
|
+
def test_local_execution(self, task_queue):
|
75
|
+
"""Test executing a task locally."""
|
76
|
+
task = task_queue.build_task("task1", "test", {"param1": "value1"})
|
77
|
+
result = task_queue.execute_task_locally(task)
|
78
|
+
|
79
|
+
assert result == "result-task1"
|
80
|
+
assert task.result == "result-task1"
|
81
|
+
assert task.error is None
|
82
|
+
|
83
|
+
def test_local_execution_with_error(self, task_queue, mocker):
|
84
|
+
"""Test local execution with an error."""
|
85
|
+
# Set up a task that will fail
|
86
|
+
task = task_queue.build_task("error_task", "test", {"param1": "value1"})
|
87
|
+
|
88
|
+
# Mock the task_fn to raise an exception
|
89
|
+
error = ValueError("Test error")
|
90
|
+
mocker.patch.object(task_queue, 'task_fn', side_effect=error)
|
91
|
+
|
92
|
+
# Execute the task and verify it fails properly
|
93
|
+
with pytest.raises(ValueError, match="Test error"):
|
94
|
+
task_queue.execute_task_locally(task)
|
95
|
+
|
96
|
+
# Verify the error was captured in the task
|
97
|
+
assert task.error == error
|
98
|
+
assert task.result is None
|
99
|
+
|
100
|
+
def test_remote_execution(self, task_queue, redis_client, mocker):
|
101
|
+
"""Test remote task execution."""
|
102
|
+
# Create a task
|
103
|
+
task = task_queue.build_task("task2", "test", {"param1": "value2"})
|
104
|
+
|
105
|
+
# Set up a listener for the test queue
|
106
|
+
task_queue.set_queue_listened("test")
|
107
|
+
|
108
|
+
# Start a thread that will listen for the task and execute it
|
109
|
+
# (Simulating a remote worker)
|
110
|
+
processed_event = threading.Event()
|
111
|
+
|
112
|
+
def process_task():
|
113
|
+
# Get the task from the queue
|
114
|
+
queue_key = task_queue._queue_key("test")
|
115
|
+
task_data = redis_client.blpop([queue_key], timeout=1)
|
116
|
+
if task_data:
|
117
|
+
_queue_key, task_id = task_data
|
118
|
+
task = task_queue.get_task(task_id)
|
119
|
+
if task:
|
120
|
+
# Execute the task and save the result
|
121
|
+
result = f"result-{task.id}"
|
122
|
+
task.result = result
|
123
|
+
task.save()
|
124
|
+
processed_event.set()
|
125
|
+
|
126
|
+
# Start the worker thread
|
127
|
+
worker_thread = threading.Thread(target=process_task)
|
128
|
+
worker_thread.daemon = True
|
129
|
+
worker_thread.start()
|
130
|
+
|
131
|
+
try:
|
132
|
+
# Replace execute_task_remotely with our own implementation
|
133
|
+
def mock_execute_remotely(task, timeout=None, once=False):
|
134
|
+
# Push the task to Redis queue
|
135
|
+
task_queue.set_task(task)
|
136
|
+
redis_client.rpush(task_queue._queue_key(task.name), task.id)
|
137
|
+
|
138
|
+
# Wait for worker to process it (with a reasonable timeout)
|
139
|
+
if not processed_event.wait(timeout=3):
|
140
|
+
raise TimeoutError("Worker did not process task within timeout")
|
141
|
+
|
142
|
+
# Get the processed task with result
|
143
|
+
processed_task = task_queue.get_task(task.id, once)
|
144
|
+
return processed_task.result
|
145
|
+
|
146
|
+
# Apply the mock
|
147
|
+
mocker.patch.object(task_queue, 'execute_task_remotely', side_effect=mock_execute_remotely)
|
148
|
+
|
149
|
+
# Execute the task remotely
|
150
|
+
result = task_queue.execute_task_remotely(task, timeout=3)
|
151
|
+
|
152
|
+
# Verify the result
|
153
|
+
assert result == f"result-{task.id}"
|
154
|
+
|
155
|
+
finally:
|
156
|
+
# Make sure thread is done
|
157
|
+
worker_thread.join(timeout=1)
|
158
|
+
|
159
|
+
def test_remote_execution_with_error(self, task_queue, redis_client):
|
160
|
+
"""Test remote execution with an error."""
|
161
|
+
# Create a task that will fail remotely
|
162
|
+
task = task_queue.build_task("error_remote", "test", {"param1": "value3"})
|
163
|
+
|
164
|
+
# Set up a listener for the test queue
|
165
|
+
task_queue.set_queue_listened("test")
|
166
|
+
|
167
|
+
# Start a thread that will listen for the task and execute it with an error
|
168
|
+
# (Simulating a remote worker)
|
169
|
+
processed_event = threading.Event()
|
170
|
+
|
171
|
+
def process_task():
|
172
|
+
# Get the task from the queue
|
173
|
+
queue_key = task_queue._queue_key("test")
|
174
|
+
task_data = redis_client.blpop([queue_key], timeout=1)
|
175
|
+
if task_data:
|
176
|
+
_queue_key, task_id = task_data
|
177
|
+
task = task_queue.get_task(task_id)
|
178
|
+
if task:
|
179
|
+
# Set an error on the task
|
180
|
+
task.error = ValueError("Remote error")
|
181
|
+
task.save()
|
182
|
+
processed_event.set()
|
183
|
+
|
184
|
+
# Start the worker thread
|
185
|
+
worker_thread = threading.Thread(target=process_task)
|
186
|
+
worker_thread.daemon = True
|
187
|
+
worker_thread.start()
|
188
|
+
|
189
|
+
# Execute the task remotely and expect an error
|
190
|
+
try:
|
191
|
+
with pytest.raises(ValueError, match="Remote error"):
|
192
|
+
task_queue.execute_task_remotely(task, timeout=3)
|
193
|
+
|
194
|
+
# Verify the thread processed the task
|
195
|
+
assert processed_event.is_set()
|
196
|
+
finally:
|
197
|
+
# Make sure thread is done
|
198
|
+
worker_thread.join(timeout=1)
|
199
|
+
|
200
|
+
def test_remote_execution_timeout(self, task_queue, redis_client, mocker):
|
201
|
+
"""Test remote execution with timeout."""
|
202
|
+
# Create a task
|
203
|
+
task = task_queue.build_task("timeout_task", "test", {"param1": "value4"})
|
204
|
+
|
205
|
+
# Set timeout to a very small value to ensure it times out
|
206
|
+
timeout = 0.1
|
207
|
+
|
208
|
+
# Mock time.sleep to avoid actual sleeping
|
209
|
+
time_sleep_mock = mocker.patch('time.sleep')
|
210
|
+
|
211
|
+
# Mock get_task to always return None (simulating no worker processing the task)
|
212
|
+
mocker.patch.object(task_queue, 'get_task', return_value=None)
|
213
|
+
|
214
|
+
# Use a task that will not get processed by any worker
|
215
|
+
# Execute the task remotely with a small timeout and expect a timeout
|
216
|
+
with pytest.raises(TimeoutError):
|
217
|
+
task_queue.execute_task_remotely(task, timeout=timeout)
|
218
|
+
|
219
|
+
# Verify time.sleep was called at least once
|
220
|
+
time_sleep_mock.assert_called()
|
221
|
+
|
222
|
+
def test_query_with_local_policy(self, task_queue):
|
223
|
+
"""Test query with local execution policy."""
|
224
|
+
# Call query with local policy
|
225
|
+
result = task_queue.query("task3", "test", {}, policy=TaskExecutePolicy.Local)
|
226
|
+
|
227
|
+
# Verify result
|
228
|
+
assert result == "result-task3"
|
229
|
+
|
230
|
+
def test_query_with_remote_policy(self, task_queue, redis_client, mocker):
|
231
|
+
"""Test query with remote execution policy."""
|
232
|
+
# Set up a listener for the test queue
|
233
|
+
task_queue.set_queue_listened("test")
|
234
|
+
|
235
|
+
# Start a thread that will listen for the task and execute it
|
236
|
+
# (Simulating a remote worker)
|
237
|
+
processed_event = threading.Event()
|
238
|
+
|
239
|
+
def process_task():
|
240
|
+
# Get the task from the queue
|
241
|
+
queue_key = task_queue._queue_key("test")
|
242
|
+
task_data = redis_client.blpop([queue_key], timeout=1)
|
243
|
+
if task_data:
|
244
|
+
_queue_key, task_id = task_data
|
245
|
+
task = task_queue.get_task(task_id)
|
246
|
+
if task:
|
247
|
+
# Execute the task and save the result
|
248
|
+
result = f"result-{task.id}"
|
249
|
+
task.result = result
|
250
|
+
task.save()
|
251
|
+
processed_event.set()
|
252
|
+
|
253
|
+
# Start the worker thread
|
254
|
+
worker_thread = threading.Thread(target=process_task)
|
255
|
+
worker_thread.daemon = True
|
256
|
+
worker_thread.start()
|
257
|
+
|
258
|
+
try:
|
259
|
+
# Replace execute_task_remotely with our own implementation
|
260
|
+
def mock_execute_remotely(task, timeout=None, once=False):
|
261
|
+
# Push the task to Redis queue
|
262
|
+
task_queue.set_task(task)
|
263
|
+
redis_client.rpush(task_queue._queue_key(task.name), task.id)
|
264
|
+
|
265
|
+
# Wait for worker to process it (with a reasonable timeout)
|
266
|
+
if not processed_event.wait(timeout=3):
|
267
|
+
raise TimeoutError("Worker did not process task within timeout")
|
268
|
+
|
269
|
+
# Get the processed task with result
|
270
|
+
processed_task = task_queue.get_task(task.id, once)
|
271
|
+
return processed_task.result
|
272
|
+
|
273
|
+
# Apply the mock
|
274
|
+
mocker.patch.object(task_queue, 'execute_task_remotely', side_effect=mock_execute_remotely)
|
275
|
+
|
276
|
+
# Call query with remote policy
|
277
|
+
result = task_queue.query("task4", "test", {}, policy=TaskExecutePolicy.Remote, timeout=3)
|
278
|
+
|
279
|
+
# Verify result
|
280
|
+
assert result == "result-task4"
|
281
|
+
finally:
|
282
|
+
# Make sure thread is done
|
283
|
+
worker_thread.join(timeout=1)
|
284
|
+
|
285
|
+
def test_query_with_local_first_policy(self, task_queue):
|
286
|
+
"""Test query with local-first execution policy."""
|
287
|
+
# Call query with local-first policy
|
288
|
+
result = task_queue.query("task5", "test", {}, policy=TaskExecutePolicy.LocalFirst)
|
289
|
+
|
290
|
+
# Verify result - should execute locally
|
291
|
+
assert result == "result-task5"
|
292
|
+
|
293
|
+
def test_query_with_local_first_policy_fallback(self, task_queue, redis_client, mocker):
|
294
|
+
"""Test query with local-first policy falling back to remote."""
|
295
|
+
# We'll only mock the task_fn to simulate a local execution failure
|
296
|
+
error = ValueError("Local error")
|
297
|
+
mocker.patch.object(task_queue, 'task_fn', side_effect=error)
|
298
|
+
|
299
|
+
# Mock logger.exception to prevent actual logging
|
300
|
+
mocker.patch('redis_allocator.task_queue.logger.exception')
|
301
|
+
|
302
|
+
# Set up a listener for the test queue
|
303
|
+
task_queue.set_queue_listened("test")
|
304
|
+
|
305
|
+
# Start a thread that will listen for the task and execute it
|
306
|
+
# (Simulating a remote worker)
|
307
|
+
processed_event = threading.Event()
|
308
|
+
|
309
|
+
def process_task():
|
310
|
+
# Get the task from the queue
|
311
|
+
queue_key = task_queue._queue_key("test")
|
312
|
+
task_data = redis_client.blpop([queue_key], timeout=1)
|
313
|
+
if task_data:
|
314
|
+
_queue_key, task_id = task_data
|
315
|
+
task = task_queue.get_task(task_id)
|
316
|
+
if task:
|
317
|
+
# Execute the task and save the result
|
318
|
+
task.error = None # Clear the error from local execution
|
319
|
+
task.result = f"remote-result-{task.id}"
|
320
|
+
task.save()
|
321
|
+
processed_event.set()
|
322
|
+
|
323
|
+
# Start the worker thread
|
324
|
+
worker_thread = threading.Thread(target=process_task)
|
325
|
+
worker_thread.daemon = True
|
326
|
+
worker_thread.start()
|
327
|
+
|
328
|
+
try:
|
329
|
+
# Make the execute_task_remotely method faster by skipping actual sleep
|
330
|
+
def mocked_execute_remotely(task, timeout=None, once=False):
|
331
|
+
# Push to queue but skip the actual sleeping
|
332
|
+
redis_client.rpush(task_queue._queue_key(task.name), task.id)
|
333
|
+
|
334
|
+
# Wait a moment for the worker thread to process
|
335
|
+
processed_event.wait(timeout=1)
|
336
|
+
|
337
|
+
# Get the task with result
|
338
|
+
result = task_queue.get_task(task.id, once)
|
339
|
+
if result is not None and result.result is not None:
|
340
|
+
return result.result
|
341
|
+
raise TimeoutError(f'Task {task.id} in {task.name} has expired')
|
342
|
+
|
343
|
+
# Replace with mock
|
344
|
+
mocker.patch.object(task_queue, 'execute_task_remotely', side_effect=mocked_execute_remotely)
|
345
|
+
|
346
|
+
# Call query with local-first policy (which should fail locally and fall back to remote)
|
347
|
+
result = task_queue.query("task6", "test", {}, policy=TaskExecutePolicy.LocalFirst, timeout=3)
|
348
|
+
|
349
|
+
# Verify the thread processed the task
|
350
|
+
assert processed_event.is_set()
|
351
|
+
|
352
|
+
# Verify result from remote execution
|
353
|
+
assert result == "remote-result-task6"
|
354
|
+
finally:
|
355
|
+
# Make sure thread is done
|
356
|
+
worker_thread.join(timeout=1)
|
357
|
+
|
358
|
+
def test_query_with_remote_first_policy(self, task_queue, redis_client, mocker):
|
359
|
+
"""Test query with remote-first execution policy."""
|
360
|
+
# Set up a listener for the test queue
|
361
|
+
task_queue.set_queue_listened("test")
|
362
|
+
|
363
|
+
# Start a thread that will listen for the task and execute it
|
364
|
+
# (Simulating a remote worker)
|
365
|
+
processed_event = threading.Event()
|
366
|
+
|
367
|
+
def process_task():
|
368
|
+
# Get the task from the queue
|
369
|
+
queue_key = task_queue._queue_key("test")
|
370
|
+
task_data = redis_client.blpop([queue_key], timeout=1)
|
371
|
+
if task_data:
|
372
|
+
_queue_key, task_id = task_data
|
373
|
+
task = task_queue.get_task(task_id)
|
374
|
+
if task:
|
375
|
+
# Execute the task and save the result
|
376
|
+
task.result = f"remote-result-{task.id}"
|
377
|
+
task.save()
|
378
|
+
processed_event.set()
|
379
|
+
|
380
|
+
# Start the worker thread
|
381
|
+
worker_thread = threading.Thread(target=process_task)
|
382
|
+
worker_thread.daemon = True
|
383
|
+
worker_thread.start()
|
384
|
+
|
385
|
+
try:
|
386
|
+
# Replace execute_task_remotely with our own implementation
|
387
|
+
def mock_execute_remotely(task, timeout=None, once=False):
|
388
|
+
# Push the task to Redis queue
|
389
|
+
task_queue.set_task(task)
|
390
|
+
redis_client.rpush(task_queue._queue_key(task.name), task.id)
|
391
|
+
|
392
|
+
# Wait for worker to process it (with a reasonable timeout)
|
393
|
+
if not processed_event.wait(timeout=3):
|
394
|
+
raise TimeoutError("Worker did not process task within timeout")
|
395
|
+
|
396
|
+
# Get the processed task with result
|
397
|
+
processed_task = task_queue.get_task(task.id, once)
|
398
|
+
return processed_task.result
|
399
|
+
|
400
|
+
# Apply the mock
|
401
|
+
mocker.patch.object(task_queue, 'execute_task_remotely', side_effect=mock_execute_remotely)
|
402
|
+
|
403
|
+
# Call query with remote-first policy
|
404
|
+
result = task_queue.query("task7", "test", {}, policy=TaskExecutePolicy.RemoteFirst, timeout=3)
|
405
|
+
|
406
|
+
# Verify result
|
407
|
+
assert result == "remote-result-task7"
|
408
|
+
finally:
|
409
|
+
# Make sure thread is done
|
410
|
+
worker_thread.join(timeout=1)
|
411
|
+
|
412
|
+
def test_query_with_remote_first_policy_fallback(self, task_queue, redis_client, mocker):
|
413
|
+
"""Test query with remote-first policy falling back to local."""
|
414
|
+
# We'll mock execute_task_remotely to fail
|
415
|
+
# This simulates a remote execution failure more reliably
|
416
|
+
mocker.patch.object(
|
417
|
+
task_queue,
|
418
|
+
'execute_task_remotely',
|
419
|
+
side_effect=TimeoutError("No remote listeners available")
|
420
|
+
)
|
421
|
+
|
422
|
+
# Mock logger.exception to prevent actual logging
|
423
|
+
mocker.patch('redis_allocator.task_queue.logger.exception')
|
424
|
+
|
425
|
+
# Call query with remote-first policy
|
426
|
+
# The remote execution should fail, and it will fall back to local
|
427
|
+
result = task_queue.query("task8", "test", {}, policy=TaskExecutePolicy.RemoteFirst)
|
428
|
+
|
429
|
+
# Verify local result (since remote failed)
|
430
|
+
assert result == "result-task8"
|
431
|
+
|
432
|
+
def test_query_with_auto_policy_with_listener(self, task_queue, redis_client, mocker):
|
433
|
+
"""Test query with auto policy when a listener exists."""
|
434
|
+
# Set up a listener for the test queue
|
435
|
+
task_queue.set_queue_listened("test")
|
436
|
+
|
437
|
+
# Start a thread that will listen for the task and execute it
|
438
|
+
# (Simulating a remote worker)
|
439
|
+
processed_event = threading.Event()
|
440
|
+
|
441
|
+
def process_task():
|
442
|
+
# Get the task from the queue
|
443
|
+
queue_key = task_queue._queue_key("test")
|
444
|
+
task_data = redis_client.blpop([queue_key], timeout=1)
|
445
|
+
if task_data:
|
446
|
+
_queue_key, task_id = task_data
|
447
|
+
task = task_queue.get_task(task_id)
|
448
|
+
if task:
|
449
|
+
# Execute the task and save the result
|
450
|
+
task.result = f"remote-result-{task.id}"
|
451
|
+
task.save()
|
452
|
+
processed_event.set()
|
453
|
+
|
454
|
+
# Start the worker thread
|
455
|
+
worker_thread = threading.Thread(target=process_task)
|
456
|
+
worker_thread.daemon = True
|
457
|
+
worker_thread.start()
|
458
|
+
|
459
|
+
try:
|
460
|
+
# Replace execute_task_remotely with our own implementation
|
461
|
+
def mock_execute_remotely(task, timeout=None, once=False):
|
462
|
+
# Push the task to Redis queue
|
463
|
+
task_queue.set_task(task)
|
464
|
+
redis_client.rpush(task_queue._queue_key(task.name), task.id)
|
465
|
+
|
466
|
+
# Wait for worker to process it (with a reasonable timeout)
|
467
|
+
if not processed_event.wait(timeout=3):
|
468
|
+
raise TimeoutError("Worker did not process task within timeout")
|
469
|
+
|
470
|
+
# Get the processed task with result
|
471
|
+
processed_task = task_queue.get_task(task.id, once)
|
472
|
+
return processed_task.result
|
473
|
+
|
474
|
+
# Apply the mock
|
475
|
+
mocker.patch.object(task_queue, 'execute_task_remotely', side_effect=mock_execute_remotely)
|
476
|
+
|
477
|
+
# Call query with auto policy when a listener exists - should execute remotely
|
478
|
+
result = task_queue.query("task9", "test", {}, policy=TaskExecutePolicy.Auto, timeout=3)
|
479
|
+
|
480
|
+
# Verify result from remote execution
|
481
|
+
assert result == "remote-result-task9"
|
482
|
+
finally:
|
483
|
+
# Make sure thread is done
|
484
|
+
worker_thread.join(timeout=1)
|
485
|
+
|
486
|
+
def test_query_with_auto_policy_without_listener(self, task_queue, redis_client):
|
487
|
+
"""Test query with auto policy when no listener exists."""
|
488
|
+
# Make sure there is no active listener
|
489
|
+
# Just don't call set_queue_listened()
|
490
|
+
|
491
|
+
# Call query with auto policy when no listener exists - should execute locally
|
492
|
+
result = task_queue.query("task10", "test", {}, policy=TaskExecutePolicy.Auto)
|
493
|
+
|
494
|
+
# Verify result from local execution
|
495
|
+
assert result == "result-task10"
|
496
|
+
|
497
|
+
def test_task_update_progress(self, task_queue, mocker):
|
498
|
+
"""Test updating task progress."""
|
499
|
+
# Create a task
|
500
|
+
task = task_queue.build_task("task11", "test", {"param1": "value1"})
|
501
|
+
|
502
|
+
# Fixed time for consistent testing
|
503
|
+
fixed_time_str = "2022-04-15T10:00:00Z" # 使用UTC时间
|
504
|
+
dt = datetime.datetime.fromisoformat(fixed_time_str.replace('Z', '+00:00'))
|
505
|
+
|
506
|
+
# Use freezegun to set a fixed time
|
507
|
+
with freeze_time(dt):
|
508
|
+
# Update progress
|
509
|
+
task.update(50.0, 100.0)
|
510
|
+
|
511
|
+
# Verify progress was updated
|
512
|
+
assert task.current_progress == 50.0
|
513
|
+
assert task.total_progress == 100.0
|
514
|
+
assert task.update_progress_time == dt.timestamp()
|
515
|
+
|
516
|
+
def test_task_update_progress_expired(self, task_queue, mocker):
|
517
|
+
"""Test updating progress for an expired task."""
|
518
|
+
# Current time for reference
|
519
|
+
current_time = time.time()
|
520
|
+
|
521
|
+
# Create a task with expiry in the past (100 seconds ago)
|
522
|
+
with freeze_time(datetime.datetime.fromtimestamp(current_time - 200)): # Create a time point far in the past
|
523
|
+
# Create task with expiry = current_time - 100 (which is expired from current perspective)
|
524
|
+
task = RedisTask(
|
525
|
+
id="expired_task",
|
526
|
+
name="test",
|
527
|
+
params={},
|
528
|
+
expiry=current_time - 100, # Set expiry to be 100 seconds before current time
|
529
|
+
_save=lambda: None
|
530
|
+
)
|
531
|
+
|
532
|
+
# Move to current time and try to update the expired task
|
533
|
+
with freeze_time(datetime.datetime.fromtimestamp(current_time)):
|
534
|
+
# Try to update progress and expect TimeoutError
|
535
|
+
with pytest.raises(TimeoutError, match=f"Task {task.id} in {task.name} has expired"):
|
536
|
+
task.update(50.0, 100.0)
|
537
|
+
|
538
|
+
def test_listen_single_iteration(self, task_queue, redis_client):
|
539
|
+
"""Test a single iteration of the listen method without running the actual loop."""
|
540
|
+
# Set up task queue names
|
541
|
+
names = ['test_queue']
|
542
|
+
queue_key = task_queue._queue_key(names[0])
|
543
|
+
|
544
|
+
# Create a task to be processed
|
545
|
+
task = task_queue.build_task("listen_task", "test_queue", {"param": "value"})
|
546
|
+
|
547
|
+
# Save the task
|
548
|
+
task_queue.set_task(task)
|
549
|
+
|
550
|
+
# Directly push task ID to queue
|
551
|
+
redis_client.rpush(queue_key, task.id)
|
552
|
+
|
553
|
+
# Manually run one iteration of the listen loop
|
554
|
+
# This avoids the infinite loop in the actual listen method
|
555
|
+
queue_names = [task_queue._queue_key(name) for name in names]
|
556
|
+
|
557
|
+
# Process task
|
558
|
+
task_data = redis_client.blpop(queue_names, timeout=task_queue.interval)
|
559
|
+
assert task_data is not None
|
560
|
+
|
561
|
+
_queue_key, task_id = task_data
|
562
|
+
# 检查task_id的类型并处理
|
563
|
+
if isinstance(task_id, bytes):
|
564
|
+
task_id = task_id.decode('utf-8')
|
565
|
+
assert task_id == "listen_task"
|
566
|
+
|
567
|
+
# Get the task
|
568
|
+
task = task_queue.get_task(task_id)
|
569
|
+
assert task is not None
|
570
|
+
|
571
|
+
# Execute task locally to verify
|
572
|
+
result = task_queue.execute_task_locally(task)
|
573
|
+
assert result == "result-listen_task"
|
574
|
+
|
575
|
+
# Mark queues as listened
|
576
|
+
for name in names:
|
577
|
+
task_queue.set_queue_listened(name)
|
578
|
+
|
579
|
+
# Verify that the queue is marked as listened
|
580
|
+
assert redis_client.exists(task_queue._queue_listen_name(names[0])) > 0
|
581
|
+
|
582
|
+
def test_listen_non_blocking(self, task_queue, redis_client, mocker):
|
583
|
+
"""Test the listen method using a separate thread to avoid blocking the test."""
|
584
|
+
# Set up task queue names
|
585
|
+
names = ['test_queue']
|
586
|
+
|
587
|
+
# Create a task to be processed
|
588
|
+
task = task_queue.build_task("listen_task", "test_queue", {"param": "value"})
|
589
|
+
|
590
|
+
# Patch methods to avoid actual Redis operations
|
591
|
+
mocker.patch.object(task_queue, 'set_task')
|
592
|
+
|
593
|
+
# Mock blpop to return our task ID first, then always return None
|
594
|
+
# This avoids StopIteration exception in the thread
|
595
|
+
def mock_blpop_side_effect(*args, **kwargs):
|
596
|
+
# Use an event to make sure we only return the task once
|
597
|
+
if not hasattr(mock_blpop_side_effect, 'called'):
|
598
|
+
mock_blpop_side_effect.called = True
|
599
|
+
return (task_queue._queue_key('test_queue'), b"listen_task")
|
600
|
+
return None
|
601
|
+
|
602
|
+
mocker.patch.object(redis_client, 'blpop', side_effect=mock_blpop_side_effect)
|
603
|
+
|
604
|
+
# Mock get_task to return our task
|
605
|
+
mocker.patch.object(task_queue, 'get_task', return_value=task)
|
606
|
+
|
607
|
+
# Track when task is executed
|
608
|
+
task_executed = threading.Event()
|
609
|
+
|
610
|
+
def mock_execute_task(t):
|
611
|
+
# Mark that the task was executed and return a result
|
612
|
+
task_executed.set()
|
613
|
+
return "result"
|
614
|
+
|
615
|
+
mocker.patch.object(task_queue, 'execute_task_locally', side_effect=mock_execute_task)
|
616
|
+
|
617
|
+
# Use freezegun to handle the sleep in the listen method
|
618
|
+
current_time = time.time()
|
619
|
+
with freeze_time(datetime.datetime.fromtimestamp(current_time)) as frozen_time:
|
620
|
+
# Replace time.sleep to advance the frozen time
|
621
|
+
original_sleep = time.sleep
|
622
|
+
|
623
|
+
def advance_time(seconds):
|
624
|
+
frozen_time.tick(datetime.timedelta(seconds=seconds))
|
625
|
+
|
626
|
+
time.sleep = advance_time
|
627
|
+
|
628
|
+
try:
|
629
|
+
# Run listen in a separate thread with a stop event
|
630
|
+
stop_event = threading.Event()
|
631
|
+
|
632
|
+
# Start a thread that will call listen for a short time
|
633
|
+
listen_thread = threading.Thread(
|
634
|
+
target=lambda: task_queue.listen(names, event=stop_event, workers=1)
|
635
|
+
)
|
636
|
+
listen_thread.daemon = True # Ensure thread doesn't block test exit
|
637
|
+
listen_thread.start()
|
638
|
+
|
639
|
+
# Wait for the task to be executed or timeout
|
640
|
+
executed = task_executed.wait(timeout=5)
|
641
|
+
|
642
|
+
# Stop the listen thread
|
643
|
+
stop_event.set()
|
644
|
+
listen_thread.join(timeout=1)
|
645
|
+
|
646
|
+
# Verify task was processed
|
647
|
+
assert executed, "Task execution timed out"
|
648
|
+
finally:
|
649
|
+
# Restore original time.sleep
|
650
|
+
time.sleep = original_sleep
|
651
|
+
|
652
|
+
def test_executor_context(self, task_queue):
|
653
|
+
"""Test the ThreadPoolExecutor context manager."""
|
654
|
+
with task_queue._executor_context(max_workers=2) as executor:
|
655
|
+
assert executor is not None
|
656
|
+
# Verify we can submit tasks
|
657
|
+
future = executor.submit(lambda: "test")
|
658
|
+
assert future.result() == "test"
|
659
|
+
|
660
|
+
def test_queue_name(self, task_queue):
|
661
|
+
"""Test the _queue_name method."""
|
662
|
+
# Create a valid queue key first
|
663
|
+
name = "test_queue"
|
664
|
+
queue_key = task_queue._queue_key(name)
|
665
|
+
|
666
|
+
# Extract the name back
|
667
|
+
extracted_name = task_queue._queue_name(queue_key)
|
668
|
+
|
669
|
+
# Verify extraction works
|
670
|
+
assert extracted_name == name
|
671
|
+
|
672
|
+
# Test with invalid queue key
|
673
|
+
with pytest.raises(AssertionError):
|
674
|
+
task_queue._queue_name("invalid_key")
|
675
|
+
|
676
|
+
def test_get_task_nonexistent(self, task_queue):
|
677
|
+
"""Test getting a task that doesn't exist."""
|
678
|
+
# Try to get a task with an ID that doesn't exist
|
679
|
+
result = task_queue.get_task("nonexistent_task")
|
680
|
+
|
681
|
+
# Verify that None is returned
|
682
|
+
assert result is None
|
683
|
+
|
684
|
+
# Try with once=True
|
685
|
+
result = task_queue.get_task("nonexistent_task", once=True)
|
686
|
+
assert result is None
|
687
|
+
|
688
|
+
def test_execute_task_remotely_with_task_error(self, task_queue, redis_client, mocker):
|
689
|
+
"""Test execute_task_remotely when task has error attribute set."""
|
690
|
+
# Create a task
|
691
|
+
task = task_queue.build_task("error_task", "test", {"param": "value"})
|
692
|
+
|
693
|
+
# Create a error for the task
|
694
|
+
task_error = ValueError("Task error")
|
695
|
+
|
696
|
+
# Mock time.sleep to avoid actual waiting
|
697
|
+
mocker.patch('time.sleep')
|
698
|
+
|
699
|
+
# Mock get_task to return a task with error attribute set
|
700
|
+
def mock_get_task(task_id, once=False):
|
701
|
+
task = RedisTask(
|
702
|
+
id=task_id,
|
703
|
+
name="test",
|
704
|
+
params={"param": "value"},
|
705
|
+
expiry=time.time() + 60,
|
706
|
+
error=task_error,
|
707
|
+
_save=lambda: None
|
708
|
+
)
|
709
|
+
return task
|
710
|
+
|
711
|
+
mocker.patch.object(task_queue, 'get_task', side_effect=mock_get_task)
|
712
|
+
|
713
|
+
# Execute the task and expect the error to be raised
|
714
|
+
with pytest.raises(ValueError, match="Task error"):
|
715
|
+
task_queue.execute_task_remotely(task)
|
716
|
+
|
717
|
+
def test_execute_task_remotely_timeout_logic(self, task_queue, redis_client, mocker):
|
718
|
+
"""Test the detailed timeout logic in execute_task_remotely."""
|
719
|
+
# Create a task
|
720
|
+
task = task_queue.build_task("timeout_task", "test", {"param": "value"})
|
721
|
+
task_queue.interval = 0.5 # Set interval to 0.5 seconds for testing
|
722
|
+
|
723
|
+
# Mock get_task to always return None (simulating no worker processing the task)
|
724
|
+
mocker.patch.object(task_queue, 'get_task', return_value=None)
|
725
|
+
|
726
|
+
# Mock time.sleep to count calls and track timeout reduction
|
727
|
+
sleep_call_count = 0
|
728
|
+
interval_sum = 0
|
729
|
+
|
730
|
+
def mock_sleep(seconds):
|
731
|
+
nonlocal sleep_call_count, interval_sum
|
732
|
+
sleep_call_count += 1
|
733
|
+
interval_sum += seconds
|
734
|
+
# Don't actually sleep
|
735
|
+
|
736
|
+
mocker.patch('time.sleep', side_effect=mock_sleep)
|
737
|
+
|
738
|
+
# Set a small timeout to make the test fast
|
739
|
+
timeout = 2.0 # Should allow for exactly 4 iterations with interval=0.5
|
740
|
+
|
741
|
+
# Execute the task remotely with timeout and expect TimeoutError
|
742
|
+
with pytest.raises(TimeoutError) as exc_info:
|
743
|
+
task_queue.execute_task_remotely(task, timeout=timeout)
|
744
|
+
|
745
|
+
# Verify error message
|
746
|
+
assert f"Task {task.id} in {task.name} has expired" in str(exc_info.value)
|
747
|
+
|
748
|
+
# Verify sleep was called
|
749
|
+
# In the implementation, the loop continues while timeout >= 0, so we get 5 iterations
|
750
|
+
# with timeout=2.0 and interval=0.5: [2.0, 1.5, 1.0, 0.5, 0.0]
|
751
|
+
expected_calls = int(timeout / task_queue.interval) + 1 # +1 for the last iteration when timeout=0
|
752
|
+
assert sleep_call_count == expected_calls
|
753
|
+
|
754
|
+
# Verify that the total time slept approximately equals the timeout
|
755
|
+
assert abs(interval_sum - timeout) <= task_queue.interval
|
756
|
+
|
757
|
+
def test_execute_task_remotely_direct_timeout(self, task_queue, redis_client, mocker):
|
758
|
+
"""Test execute_task_remotely method's timeout logic directly."""
|
759
|
+
# Create a task
|
760
|
+
task = task_queue.build_task("timeout_task2", "test", {"param": "value"})
|
761
|
+
|
762
|
+
# Mock redis and get_task to simulate the behavior
|
763
|
+
mocker.patch.object(task_queue, 'get_task', return_value=None)
|
764
|
+
|
765
|
+
# Mock time.sleep to avoid actual waiting
|
766
|
+
mocker.patch('time.sleep')
|
767
|
+
|
768
|
+
# Force timeout to be exactly 0 after one iteration
|
769
|
+
# This will trigger the exact lines we want to test
|
770
|
+
task_queue.interval = 1.0
|
771
|
+
timeout = 1.0 # Will become 0 after one iteration, then -1 after another
|
772
|
+
|
773
|
+
# Execute task and expect timeout
|
774
|
+
with pytest.raises(TimeoutError) as exc_info:
|
775
|
+
task_queue.execute_task_remotely(task, timeout=timeout)
|
776
|
+
|
777
|
+
# Verify timeout error
|
778
|
+
assert str(exc_info.value) == f"Task {task.id} in {task.name} has expired"
|