jettask 0.2.19__py3-none-any.whl → 0.2.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jettask/__init__.py +12 -3
- jettask/cli.py +314 -228
- jettask/config/__init__.py +9 -1
- jettask/config/config.py +245 -0
- jettask/config/env_loader.py +381 -0
- jettask/config/lua_scripts.py +158 -0
- jettask/config/nacos_config.py +132 -5
- jettask/core/__init__.py +1 -1
- jettask/core/app.py +1573 -666
- jettask/core/app_importer.py +33 -16
- jettask/core/container.py +532 -0
- jettask/core/task.py +1 -4
- jettask/core/unified_manager_base.py +2 -2
- jettask/executor/__init__.py +38 -0
- jettask/executor/core.py +625 -0
- jettask/executor/executor.py +338 -0
- jettask/executor/orchestrator.py +290 -0
- jettask/executor/process_entry.py +638 -0
- jettask/executor/task_executor.py +317 -0
- jettask/messaging/__init__.py +68 -0
- jettask/messaging/event_pool.py +2188 -0
- jettask/messaging/reader.py +519 -0
- jettask/messaging/registry.py +266 -0
- jettask/messaging/scanner.py +369 -0
- jettask/messaging/sender.py +312 -0
- jettask/persistence/__init__.py +118 -0
- jettask/persistence/backlog_monitor.py +567 -0
- jettask/{backend/data_access.py → persistence/base.py} +58 -57
- jettask/persistence/consumer.py +315 -0
- jettask/{core → persistence}/db_manager.py +23 -22
- jettask/persistence/maintenance.py +81 -0
- jettask/persistence/message_consumer.py +259 -0
- jettask/{backend/namespace_data_access.py → persistence/namespace.py} +66 -98
- jettask/persistence/offline_recovery.py +196 -0
- jettask/persistence/queue_discovery.py +215 -0
- jettask/persistence/task_persistence.py +218 -0
- jettask/persistence/task_updater.py +583 -0
- jettask/scheduler/__init__.py +2 -2
- jettask/scheduler/loader.py +6 -5
- jettask/scheduler/run_scheduler.py +1 -1
- jettask/scheduler/scheduler.py +7 -7
- jettask/scheduler/{unified_scheduler_manager.py → scheduler_coordinator.py} +18 -13
- jettask/task/__init__.py +16 -0
- jettask/{router.py → task/router.py} +26 -8
- jettask/task/task_center/__init__.py +9 -0
- jettask/task/task_executor.py +318 -0
- jettask/task/task_registry.py +291 -0
- jettask/test_connection_monitor.py +73 -0
- jettask/utils/__init__.py +31 -1
- jettask/{monitor/run_backlog_collector.py → utils/backlog_collector.py} +1 -1
- jettask/utils/db_connector.py +1629 -0
- jettask/{db_init.py → utils/db_init.py} +1 -1
- jettask/utils/rate_limit/__init__.py +30 -0
- jettask/utils/rate_limit/concurrency_limiter.py +665 -0
- jettask/utils/rate_limit/config.py +145 -0
- jettask/utils/rate_limit/limiter.py +41 -0
- jettask/utils/rate_limit/manager.py +269 -0
- jettask/utils/rate_limit/qps_limiter.py +154 -0
- jettask/utils/rate_limit/task_limiter.py +384 -0
- jettask/utils/serializer.py +3 -0
- jettask/{monitor/stream_backlog_monitor.py → utils/stream_backlog.py} +14 -6
- jettask/utils/time_sync.py +173 -0
- jettask/webui/__init__.py +27 -0
- jettask/{api/v1 → webui/api}/alerts.py +1 -1
- jettask/{api/v1 → webui/api}/analytics.py +2 -2
- jettask/{api/v1 → webui/api}/namespaces.py +1 -1
- jettask/{api/v1 → webui/api}/overview.py +1 -1
- jettask/{api/v1 → webui/api}/queues.py +3 -3
- jettask/{api/v1 → webui/api}/scheduled.py +1 -1
- jettask/{api/v1 → webui/api}/settings.py +1 -1
- jettask/{api.py → webui/app.py} +253 -145
- jettask/webui/namespace_manager/__init__.py +10 -0
- jettask/{multi_namespace_consumer.py → webui/namespace_manager/multi.py} +69 -22
- jettask/{unified_consumer_manager.py → webui/namespace_manager/unified.py} +1 -1
- jettask/{run.py → webui/run.py} +2 -2
- jettask/{services → webui/services}/__init__.py +1 -3
- jettask/{services → webui/services}/overview_service.py +34 -16
- jettask/{services → webui/services}/queue_service.py +1 -1
- jettask/{backend → webui/services}/queue_stats_v2.py +1 -1
- jettask/{services → webui/services}/settings_service.py +1 -1
- jettask/worker/__init__.py +53 -0
- jettask/worker/lifecycle.py +1507 -0
- jettask/worker/manager.py +583 -0
- jettask/{core/offline_worker_recovery.py → worker/recovery.py} +268 -175
- {jettask-0.2.19.dist-info → jettask-0.2.23.dist-info}/METADATA +2 -71
- jettask-0.2.23.dist-info/RECORD +145 -0
- jettask/__main__.py +0 -140
- jettask/api/__init__.py +0 -103
- jettask/backend/__init__.py +0 -1
- jettask/backend/api/__init__.py +0 -3
- jettask/backend/api/v1/__init__.py +0 -17
- jettask/backend/api/v1/monitoring.py +0 -431
- jettask/backend/api/v1/namespaces.py +0 -504
- jettask/backend/api/v1/queues.py +0 -342
- jettask/backend/api/v1/tasks.py +0 -367
- jettask/backend/core/__init__.py +0 -3
- jettask/backend/core/cache.py +0 -221
- jettask/backend/core/database.py +0 -200
- jettask/backend/core/exceptions.py +0 -102
- jettask/backend/dependencies.py +0 -261
- jettask/backend/init_meta_db.py +0 -158
- jettask/backend/main.py +0 -1426
- jettask/backend/main_unified.py +0 -78
- jettask/backend/main_v2.py +0 -394
- jettask/backend/models/__init__.py +0 -3
- jettask/backend/models/requests.py +0 -236
- jettask/backend/models/responses.py +0 -230
- jettask/backend/namespace_api_old.py +0 -267
- jettask/backend/services/__init__.py +0 -3
- jettask/backend/start.py +0 -42
- jettask/backend/unified_api_router.py +0 -1541
- jettask/cleanup_deprecated_tables.sql +0 -16
- jettask/core/consumer_manager.py +0 -1695
- jettask/core/delay_scanner.py +0 -256
- jettask/core/event_pool.py +0 -1700
- jettask/core/heartbeat_process.py +0 -222
- jettask/core/task_batch.py +0 -153
- jettask/core/worker_scanner.py +0 -271
- jettask/executors/__init__.py +0 -5
- jettask/executors/asyncio.py +0 -876
- jettask/executors/base.py +0 -30
- jettask/executors/common.py +0 -148
- jettask/executors/multi_asyncio.py +0 -309
- jettask/gradio_app.py +0 -570
- jettask/integrated_gradio_app.py +0 -1088
- jettask/main.py +0 -0
- jettask/monitoring/__init__.py +0 -3
- jettask/pg_consumer.py +0 -1896
- jettask/run_monitor.py +0 -22
- jettask/run_webui.py +0 -148
- jettask/scheduler/multi_namespace_scheduler.py +0 -294
- jettask/scheduler/unified_manager.py +0 -450
- jettask/task_center_client.py +0 -150
- jettask/utils/serializer_optimized.py +0 -33
- jettask/webui_exceptions.py +0 -67
- jettask-0.2.19.dist-info/RECORD +0 -150
- /jettask/{constants.py → config/constants.py} +0 -0
- /jettask/{backend/config.py → config/task_center.py} +0 -0
- /jettask/{pg_consumer → messaging/pg_consumer}/pg_consumer_v2.py +0 -0
- /jettask/{pg_consumer → messaging/pg_consumer}/sql/add_execution_time_field.sql +0 -0
- /jettask/{pg_consumer → messaging/pg_consumer}/sql/create_new_tables.sql +0 -0
- /jettask/{pg_consumer → messaging/pg_consumer}/sql/create_tables_v3.sql +0 -0
- /jettask/{pg_consumer → messaging/pg_consumer}/sql/migrate_to_new_structure.sql +0 -0
- /jettask/{pg_consumer → messaging/pg_consumer}/sql/modify_time_fields.sql +0 -0
- /jettask/{pg_consumer → messaging/pg_consumer}/sql_utils.py +0 -0
- /jettask/{models.py → persistence/models.py} +0 -0
- /jettask/scheduler/{manager.py → task_crud.py} +0 -0
- /jettask/{schema.sql → schemas/schema.sql} +0 -0
- /jettask/{task_center.py → task/task_center/client.py} +0 -0
- /jettask/{monitoring → utils}/file_watcher.py +0 -0
- /jettask/{services/redis_monitor_service.py → utils/redis_monitor.py} +0 -0
- /jettask/{api/v1 → webui/api}/__init__.py +0 -0
- /jettask/{webui_config.py → webui/config.py} +0 -0
- /jettask/{webui_models → webui/models}/__init__.py +0 -0
- /jettask/{webui_models → webui/models}/namespace.py +0 -0
- /jettask/{services → webui/services}/alert_service.py +0 -0
- /jettask/{services → webui/services}/analytics_service.py +0 -0
- /jettask/{services → webui/services}/scheduled_task_service.py +0 -0
- /jettask/{services → webui/services}/task_service.py +0 -0
- /jettask/{webui_sql → webui/sql}/batch_upsert_functions.sql +0 -0
- /jettask/{webui_sql → webui/sql}/verify_database.sql +0 -0
- {jettask-0.2.19.dist-info → jettask-0.2.23.dist-info}/WHEEL +0 -0
- {jettask-0.2.19.dist-info → jettask-0.2.23.dist-info}/entry_points.txt +0 -0
- {jettask-0.2.19.dist-info → jettask-0.2.23.dist-info}/licenses/LICENSE +0 -0
- {jettask-0.2.19.dist-info → jettask-0.2.23.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,312 @@
|
|
1
|
+
"""
|
2
|
+
消息发送器 - 统一的消息发送接口
|
3
|
+
从 EventPool 中提取的消息发送逻辑
|
4
|
+
"""
|
5
|
+
|
6
|
+
import time
|
7
|
+
import logging
|
8
|
+
from typing import List, Dict, Optional, Tuple
|
9
|
+
from redis.asyncio import Redis as AsyncRedis
|
10
|
+
|
11
|
+
from ..utils.serializer import dumps_str
|
12
|
+
|
13
|
+
logger = logging.getLogger('app')
|
14
|
+
|
15
|
+
|
16
|
+
class MessageSender:
|
17
|
+
"""
|
18
|
+
统一的消息发送接口
|
19
|
+
|
20
|
+
职责:
|
21
|
+
1. 发送普通消息到 Redis Stream
|
22
|
+
2. 发送延迟消息到 Redis Sorted Set + Stream
|
23
|
+
3. 发送优先级消息到优先级队列
|
24
|
+
4. 批量发送优化
|
25
|
+
"""
|
26
|
+
|
27
|
+
def __init__(
|
28
|
+
self,
|
29
|
+
async_redis_client: AsyncRedis,
|
30
|
+
redis_prefix: str = 'jettask'
|
31
|
+
):
|
32
|
+
"""
|
33
|
+
初始化消息发送器
|
34
|
+
|
35
|
+
Args:
|
36
|
+
async_redis_client: 异步Redis客户端(二进制模式,用于Stream操作)
|
37
|
+
redis_prefix: Redis键前缀
|
38
|
+
"""
|
39
|
+
self.redis = async_redis_client
|
40
|
+
self.redis_prefix = redis_prefix
|
41
|
+
|
42
|
+
# 缓存Lua脚本
|
43
|
+
self._batch_send_script = None
|
44
|
+
self._delayed_task_script = None
|
45
|
+
|
46
|
+
logger.debug(f"MessageSender initialized with prefix: {redis_prefix}")
|
47
|
+
|
48
|
+
def _get_prefixed_queue_name(self, queue: str) -> str:
|
49
|
+
"""获取带前缀的队列名"""
|
50
|
+
return f"{self.redis_prefix}:QUEUE:{queue}"
|
51
|
+
|
52
|
+
def _get_delayed_queue_name(self, queue: str) -> str:
|
53
|
+
"""获取延迟队列名(Sorted Set)"""
|
54
|
+
return f"{self.redis_prefix}:DELAYED_QUEUE:{queue}"
|
55
|
+
|
56
|
+
async def send_messages(
|
57
|
+
self,
|
58
|
+
queue: str,
|
59
|
+
messages: List[Dict],
|
60
|
+
priority: Optional[int] = None,
|
61
|
+
delay: Optional[float] = None
|
62
|
+
) -> List[str]:
|
63
|
+
"""
|
64
|
+
发送消息到队列(统一入口)
|
65
|
+
|
66
|
+
Args:
|
67
|
+
queue: 队列名(不带前缀)
|
68
|
+
messages: 消息列表,每个消息是一个字典
|
69
|
+
priority: 优先级(可选,如果指定则发送到优先级队列)
|
70
|
+
delay: 延迟时间(秒,可选)
|
71
|
+
|
72
|
+
Returns:
|
73
|
+
List[str]: Stream ID 列表
|
74
|
+
|
75
|
+
示例:
|
76
|
+
# 发送普通消息
|
77
|
+
ids = await sender.send_messages("orders", [{"order_id": 123}])
|
78
|
+
|
79
|
+
# 发送延迟消息
|
80
|
+
ids = await sender.send_messages("emails", [{"to": "user@example.com"}], delay=60)
|
81
|
+
|
82
|
+
# 发送优先级消息
|
83
|
+
ids = await sender.send_messages("tasks", [{"task": "urgent"}], priority=1)
|
84
|
+
"""
|
85
|
+
if not messages:
|
86
|
+
return []
|
87
|
+
|
88
|
+
# 确定实际队列名(考虑优先级)
|
89
|
+
actual_queue = queue
|
90
|
+
if priority is not None:
|
91
|
+
actual_queue = f"{queue}:{priority}"
|
92
|
+
|
93
|
+
# 根据是否延迟选择发送方式
|
94
|
+
if delay and delay > 0:
|
95
|
+
return await self._send_delayed_messages(actual_queue, messages, delay)
|
96
|
+
else:
|
97
|
+
return await self._send_normal_messages(actual_queue, messages)
|
98
|
+
|
99
|
+
async def _send_normal_messages(self, queue: str, messages: List[Dict]) -> List[str]:
|
100
|
+
"""
|
101
|
+
发送普通消息到 Redis Stream
|
102
|
+
使用 Lua 脚本实现批量发送 + 原子性offset分配
|
103
|
+
|
104
|
+
Args:
|
105
|
+
queue: 队列名(不带前缀)
|
106
|
+
messages: 消息列表
|
107
|
+
|
108
|
+
Returns:
|
109
|
+
List[str]: Stream ID 列表
|
110
|
+
"""
|
111
|
+
prefixed_queue = self._get_prefixed_queue_name(queue)
|
112
|
+
|
113
|
+
# Lua脚本:批量发送消息并添加自增offset
|
114
|
+
lua_script = """
|
115
|
+
local stream_key = KEYS[1]
|
116
|
+
local prefix = ARGV[1]
|
117
|
+
local results = {}
|
118
|
+
|
119
|
+
-- 使用Hash存储所有队列的offset
|
120
|
+
local offsets_hash = prefix .. ':QUEUE_OFFSETS'
|
121
|
+
|
122
|
+
-- 从stream_key中提取队列名(去掉prefix:QUEUE:前缀)
|
123
|
+
local queue_name = string.gsub(stream_key, '^' .. prefix .. ':QUEUE:', '')
|
124
|
+
|
125
|
+
-- 将队列添加到全局队列注册表
|
126
|
+
local queues_registry_key = prefix .. ':REGISTRY:QUEUES'
|
127
|
+
redis.call('SADD', queues_registry_key, queue_name)
|
128
|
+
|
129
|
+
-- 从ARGV[2]开始,每个参数是一个消息的data
|
130
|
+
for i = 2, #ARGV do
|
131
|
+
local data = ARGV[i]
|
132
|
+
|
133
|
+
-- 使用HINCRBY原子递增offset(如果不存在会自动创建并设为1)
|
134
|
+
local current_offset = redis.call('HINCRBY', offsets_hash, queue_name, 1)
|
135
|
+
|
136
|
+
-- 添加消息到Stream(包含offset字段)
|
137
|
+
local stream_id = redis.call('XADD', stream_key, '*',
|
138
|
+
'data', data,
|
139
|
+
'offset', current_offset)
|
140
|
+
|
141
|
+
table.insert(results, stream_id)
|
142
|
+
end
|
143
|
+
|
144
|
+
return results
|
145
|
+
"""
|
146
|
+
|
147
|
+
# 准备Lua脚本参数
|
148
|
+
lua_args = [self.redis_prefix.encode() if isinstance(self.redis_prefix, str) else self.redis_prefix]
|
149
|
+
|
150
|
+
for message in messages:
|
151
|
+
# 确保消息格式正确
|
152
|
+
if 'data' in message:
|
153
|
+
data = message['data'] if isinstance(message['data'], bytes) else dumps_str(message['data'])
|
154
|
+
else:
|
155
|
+
data = dumps_str(message)
|
156
|
+
lua_args.append(data)
|
157
|
+
|
158
|
+
# 注册并执行Lua脚本
|
159
|
+
if not self._batch_send_script:
|
160
|
+
self._batch_send_script = self.redis.register_script(lua_script)
|
161
|
+
|
162
|
+
results = await self._batch_send_script(
|
163
|
+
keys=[prefixed_queue],
|
164
|
+
args=lua_args
|
165
|
+
)
|
166
|
+
|
167
|
+
# 解码所有返回的Stream ID
|
168
|
+
decoded_results = [r.decode('utf-8') if isinstance(r, bytes) else r for r in results]
|
169
|
+
|
170
|
+
logger.debug(f"Sent {len(decoded_results)} messages to queue {queue}")
|
171
|
+
return decoded_results
|
172
|
+
|
173
|
+
async def _send_delayed_messages(
|
174
|
+
self,
|
175
|
+
queue: str,
|
176
|
+
messages: List[Dict],
|
177
|
+
delay: float
|
178
|
+
) -> List[str]:
|
179
|
+
"""
|
180
|
+
发送延迟消息到 Stream + Sorted Set
|
181
|
+
使用 Lua 脚本实现原子性操作
|
182
|
+
|
183
|
+
Args:
|
184
|
+
queue: 队列名(不带前缀)
|
185
|
+
messages: 消息列表
|
186
|
+
delay: 延迟时间(秒)
|
187
|
+
|
188
|
+
Returns:
|
189
|
+
List[str]: Stream ID 列表
|
190
|
+
"""
|
191
|
+
# Lua脚本:原子性地发送延迟任务
|
192
|
+
lua_script = """
|
193
|
+
local prefix = ARGV[1]
|
194
|
+
local results = {}
|
195
|
+
|
196
|
+
-- 从ARGV[2]开始,每3个参数为一组任务信息
|
197
|
+
-- [stream_key, stream_data, execute_at]
|
198
|
+
for i = 2, #ARGV, 3 do
|
199
|
+
local stream_key = ARGV[i]
|
200
|
+
local stream_data = ARGV[i+1]
|
201
|
+
local execute_at = tonumber(ARGV[i+2])
|
202
|
+
|
203
|
+
-- 使用Hash存储所有队列的offset
|
204
|
+
local offsets_hash = prefix .. ':QUEUE_OFFSETS'
|
205
|
+
|
206
|
+
-- 从stream_key中提取队列名
|
207
|
+
local queue_name = string.gsub(stream_key, '^' .. prefix .. ':QUEUE:', '')
|
208
|
+
|
209
|
+
-- 使用HINCRBY原子递增offset
|
210
|
+
local current_offset = redis.call('HINCRBY', offsets_hash, queue_name, 1)
|
211
|
+
|
212
|
+
-- 1. 添加消息到Stream(包含offset字段)
|
213
|
+
local stream_id = redis.call('XADD', stream_key, '*',
|
214
|
+
'data', stream_data,
|
215
|
+
'offset', current_offset)
|
216
|
+
|
217
|
+
-- 2. 添加到延迟队列ZSET(使用execute_at作为分数)
|
218
|
+
local delayed_queue_key = prefix .. ':DELAYED_QUEUE:' .. queue_name
|
219
|
+
redis.call('ZADD', delayed_queue_key, execute_at, stream_id)
|
220
|
+
|
221
|
+
-- 3. 设置任务状态Hash(用于追踪任务状态)
|
222
|
+
local task_key = prefix .. ':TASK:' .. stream_id
|
223
|
+
redis.call('HSET', task_key, 'status', 'delayed')
|
224
|
+
redis.call('EXPIRE', task_key, 3600) -- 1小时过期
|
225
|
+
|
226
|
+
-- 保存stream_id到结果
|
227
|
+
table.insert(results, stream_id)
|
228
|
+
end
|
229
|
+
|
230
|
+
return results
|
231
|
+
"""
|
232
|
+
|
233
|
+
prefixed_queue = self._get_prefixed_queue_name(queue)
|
234
|
+
current_time = time.time()
|
235
|
+
|
236
|
+
# 准备Lua脚本参数
|
237
|
+
lua_args = [self.redis_prefix]
|
238
|
+
|
239
|
+
for message in messages:
|
240
|
+
# 添加延迟执行标记到消息体
|
241
|
+
msg_dict = message.copy()
|
242
|
+
execute_at = current_time + delay
|
243
|
+
msg_dict['execute_at'] = execute_at
|
244
|
+
msg_dict['is_delayed'] = 1
|
245
|
+
|
246
|
+
stream_data = dumps_str(msg_dict)
|
247
|
+
|
248
|
+
lua_args.extend([
|
249
|
+
prefixed_queue,
|
250
|
+
stream_data,
|
251
|
+
str(execute_at)
|
252
|
+
])
|
253
|
+
|
254
|
+
# 注册并执行Lua脚本
|
255
|
+
if not self._delayed_task_script:
|
256
|
+
self._delayed_task_script = self.redis.register_script(lua_script)
|
257
|
+
|
258
|
+
results = await self._delayed_task_script(keys=[], args=lua_args)
|
259
|
+
|
260
|
+
# 解码结果
|
261
|
+
decoded_results = [r.decode('utf-8') if isinstance(r, bytes) else r for r in results]
|
262
|
+
|
263
|
+
logger.debug(f"Sent {len(decoded_results)} delayed messages to queue {queue} (delay={delay}s)")
|
264
|
+
return decoded_results
|
265
|
+
|
266
|
+
async def send_single_message(
|
267
|
+
self,
|
268
|
+
queue: str,
|
269
|
+
message: Dict,
|
270
|
+
priority: Optional[int] = None,
|
271
|
+
delay: Optional[float] = None
|
272
|
+
) -> str:
|
273
|
+
"""
|
274
|
+
发送单个消息(便捷方法)
|
275
|
+
|
276
|
+
Args:
|
277
|
+
queue: 队列名
|
278
|
+
message: 消息字典
|
279
|
+
priority: 优先级(可选)
|
280
|
+
delay: 延迟时间(可选)
|
281
|
+
|
282
|
+
Returns:
|
283
|
+
str: Stream ID
|
284
|
+
"""
|
285
|
+
results = await self.send_messages(queue, [message], priority=priority, delay=delay)
|
286
|
+
return results[0] if results else None
|
287
|
+
|
288
|
+
async def get_queue_size(self, queue: str) -> int:
|
289
|
+
"""
|
290
|
+
获取队列大小(Stream长度)
|
291
|
+
|
292
|
+
Args:
|
293
|
+
queue: 队列名
|
294
|
+
|
295
|
+
Returns:
|
296
|
+
int: 队列中的消息数量
|
297
|
+
"""
|
298
|
+
prefixed_queue = self._get_prefixed_queue_name(queue)
|
299
|
+
return await self.redis.xlen(prefixed_queue)
|
300
|
+
|
301
|
+
async def get_delayed_queue_size(self, queue: str) -> int:
|
302
|
+
"""
|
303
|
+
获取延迟队列大小(Sorted Set大小)
|
304
|
+
|
305
|
+
Args:
|
306
|
+
queue: 队列名
|
307
|
+
|
308
|
+
Returns:
|
309
|
+
int: 延迟队列中的消息数量
|
310
|
+
"""
|
311
|
+
delayed_queue = self._get_delayed_queue_name(queue)
|
312
|
+
return await self.redis.zcard(delayed_queue)
|
@@ -0,0 +1,118 @@
|
|
1
|
+
"""PostgreSQL Consumer模块
|
2
|
+
|
3
|
+
从Redis队列消费任务并持久化到PostgreSQL数据库。
|
4
|
+
|
5
|
+
模块结构:
|
6
|
+
- consumer.py: 主消费者类(协调器)
|
7
|
+
- backlog_monitor.py: Stream积压监控
|
8
|
+
- task_updater.py: 任务状态更新
|
9
|
+
- offline_recovery.py: 离线Worker恢复
|
10
|
+
- task_persistence.py: 任务数据持久化
|
11
|
+
- queue_discovery.py: 队列发现
|
12
|
+
- message_consumer.py: 消息消费
|
13
|
+
- maintenance.py: 数据库维护
|
14
|
+
|
15
|
+
使用示例:
|
16
|
+
from jettask.services.pg_consumer import PostgreSQLConsumer, run_pg_consumer, main
|
17
|
+
|
18
|
+
# 使用Consumer类
|
19
|
+
consumer = PostgreSQLConsumer(pg_config, redis_config)
|
20
|
+
await consumer.start()
|
21
|
+
|
22
|
+
# 或直接运行
|
23
|
+
await run_pg_consumer(pg_config, redis_config)
|
24
|
+
|
25
|
+
# 或使用main函数(从环境变量读取配置)
|
26
|
+
main()
|
27
|
+
"""
|
28
|
+
|
29
|
+
import asyncio
|
30
|
+
import logging
|
31
|
+
import os
|
32
|
+
|
33
|
+
from jettask.webui.config import PostgreSQLConfig, RedisConfig
|
34
|
+
# ConsumerStrategy 已移除,现在只使用 HEARTBEAT 策略
|
35
|
+
|
36
|
+
from .consumer import PostgreSQLConsumer
|
37
|
+
|
38
|
+
logger = logging.getLogger(__name__)
|
39
|
+
|
40
|
+
# 导出主要类和函数
|
41
|
+
__all__ = [
|
42
|
+
'PostgreSQLConsumer',
|
43
|
+
'run_pg_consumer',
|
44
|
+
'main'
|
45
|
+
]
|
46
|
+
|
47
|
+
|
48
|
+
async def run_pg_consumer(
|
49
|
+
pg_config: PostgreSQLConfig,
|
50
|
+
redis_config: RedisConfig,
|
51
|
+
# consumer_strategy 参数已移除,现在只使用 HEARTBEAT 策略
|
52
|
+
):
|
53
|
+
"""运行PostgreSQL消费者
|
54
|
+
|
55
|
+
Args:
|
56
|
+
pg_config: PostgreSQL配置
|
57
|
+
redis_config: Redis配置
|
58
|
+
consumer_strategy: 消费者策略
|
59
|
+
"""
|
60
|
+
# 从环境变量读取监控配置
|
61
|
+
enable_backlog_monitor = os.getenv('JETTASK_ENABLE_BACKLOG_MONITOR', 'true').lower() == 'true'
|
62
|
+
backlog_monitor_interval = int(os.getenv('JETTASK_BACKLOG_MONITOR_INTERVAL', '60'))
|
63
|
+
|
64
|
+
logger.info(f"Backlog monitor config: enabled={enable_backlog_monitor}, interval={backlog_monitor_interval}s")
|
65
|
+
|
66
|
+
consumer = PostgreSQLConsumer(
|
67
|
+
pg_config,
|
68
|
+
redis_config,
|
69
|
+
consumer_strategy=consumer_strategy,
|
70
|
+
enable_backlog_monitor=enable_backlog_monitor,
|
71
|
+
backlog_monitor_interval=backlog_monitor_interval
|
72
|
+
)
|
73
|
+
|
74
|
+
try:
|
75
|
+
await consumer.start()
|
76
|
+
while True:
|
77
|
+
await asyncio.sleep(1)
|
78
|
+
|
79
|
+
except KeyboardInterrupt:
|
80
|
+
logger.debug("Received interrupt signal")
|
81
|
+
finally:
|
82
|
+
await consumer.stop()
|
83
|
+
|
84
|
+
|
85
|
+
def main():
|
86
|
+
"""主入口函数(从环境变量读取配置)"""
|
87
|
+
from dotenv import load_dotenv
|
88
|
+
|
89
|
+
load_dotenv()
|
90
|
+
|
91
|
+
logging.basicConfig(
|
92
|
+
level=logging.INFO,
|
93
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
94
|
+
)
|
95
|
+
|
96
|
+
pg_config = PostgreSQLConfig(
|
97
|
+
host=os.getenv('JETTASK_PG_HOST', 'localhost'),
|
98
|
+
port=int(os.getenv('JETTASK_PG_PORT', '5432')),
|
99
|
+
database=os.getenv('JETTASK_PG_DB', 'jettask'),
|
100
|
+
user=os.getenv('JETTASK_PG_USER', 'jettask'),
|
101
|
+
password=os.getenv('JETTASK_PG_PASSWORD', '123456'),
|
102
|
+
)
|
103
|
+
|
104
|
+
redis_config = RedisConfig(
|
105
|
+
host=os.getenv('REDIS_HOST', 'localhost'),
|
106
|
+
port=int(os.getenv('REDIS_PORT', '6379')),
|
107
|
+
db=int(os.getenv('REDIS_DB', '0')),
|
108
|
+
password=os.getenv('REDIS_PASSWORD'),
|
109
|
+
)
|
110
|
+
|
111
|
+
# 使用 HEARTBEAT 策略(唯一支持的策略)
|
112
|
+
logger.debug("Using consumer strategy: HEARTBEAT")
|
113
|
+
|
114
|
+
asyncio.run(run_pg_consumer(pg_config, redis_config))
|
115
|
+
|
116
|
+
|
117
|
+
if __name__ == '__main__':
|
118
|
+
main()
|