jettask 0.2.18__py3-none-any.whl → 0.2.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. jettask/__init__.py +60 -2
  2. jettask/cli.py +314 -228
  3. jettask/config/__init__.py +9 -1
  4. jettask/config/config.py +245 -0
  5. jettask/config/env_loader.py +381 -0
  6. jettask/config/lua_scripts.py +158 -0
  7. jettask/config/nacos_config.py +132 -5
  8. jettask/core/__init__.py +1 -1
  9. jettask/core/app.py +1573 -666
  10. jettask/core/app_importer.py +33 -16
  11. jettask/core/container.py +532 -0
  12. jettask/core/task.py +1 -4
  13. jettask/core/unified_manager_base.py +2 -2
  14. jettask/executor/__init__.py +38 -0
  15. jettask/executor/core.py +625 -0
  16. jettask/executor/executor.py +338 -0
  17. jettask/executor/orchestrator.py +290 -0
  18. jettask/executor/process_entry.py +638 -0
  19. jettask/executor/task_executor.py +317 -0
  20. jettask/messaging/__init__.py +68 -0
  21. jettask/messaging/event_pool.py +2188 -0
  22. jettask/messaging/reader.py +519 -0
  23. jettask/messaging/registry.py +266 -0
  24. jettask/messaging/scanner.py +369 -0
  25. jettask/messaging/sender.py +312 -0
  26. jettask/persistence/__init__.py +118 -0
  27. jettask/persistence/backlog_monitor.py +567 -0
  28. jettask/{backend/data_access.py → persistence/base.py} +58 -57
  29. jettask/persistence/consumer.py +315 -0
  30. jettask/{core → persistence}/db_manager.py +23 -22
  31. jettask/persistence/maintenance.py +81 -0
  32. jettask/persistence/message_consumer.py +259 -0
  33. jettask/{backend/namespace_data_access.py → persistence/namespace.py} +66 -98
  34. jettask/persistence/offline_recovery.py +196 -0
  35. jettask/persistence/queue_discovery.py +215 -0
  36. jettask/persistence/task_persistence.py +218 -0
  37. jettask/persistence/task_updater.py +583 -0
  38. jettask/scheduler/__init__.py +2 -2
  39. jettask/scheduler/loader.py +6 -5
  40. jettask/scheduler/run_scheduler.py +1 -1
  41. jettask/scheduler/scheduler.py +7 -7
  42. jettask/scheduler/{unified_scheduler_manager.py → scheduler_coordinator.py} +18 -13
  43. jettask/task/__init__.py +16 -0
  44. jettask/{router.py → task/router.py} +26 -8
  45. jettask/task/task_center/__init__.py +9 -0
  46. jettask/task/task_executor.py +318 -0
  47. jettask/task/task_registry.py +291 -0
  48. jettask/test_connection_monitor.py +73 -0
  49. jettask/utils/__init__.py +31 -1
  50. jettask/{monitor/run_backlog_collector.py → utils/backlog_collector.py} +1 -1
  51. jettask/utils/db_connector.py +1629 -0
  52. jettask/{db_init.py → utils/db_init.py} +1 -1
  53. jettask/utils/rate_limit/__init__.py +30 -0
  54. jettask/utils/rate_limit/concurrency_limiter.py +665 -0
  55. jettask/utils/rate_limit/config.py +145 -0
  56. jettask/utils/rate_limit/limiter.py +41 -0
  57. jettask/utils/rate_limit/manager.py +269 -0
  58. jettask/utils/rate_limit/qps_limiter.py +154 -0
  59. jettask/utils/rate_limit/task_limiter.py +384 -0
  60. jettask/utils/serializer.py +3 -0
  61. jettask/{monitor/stream_backlog_monitor.py → utils/stream_backlog.py} +14 -6
  62. jettask/utils/time_sync.py +173 -0
  63. jettask/webui/__init__.py +27 -0
  64. jettask/{api/v1 → webui/api}/alerts.py +1 -1
  65. jettask/{api/v1 → webui/api}/analytics.py +2 -2
  66. jettask/{api/v1 → webui/api}/namespaces.py +1 -1
  67. jettask/{api/v1 → webui/api}/overview.py +1 -1
  68. jettask/{api/v1 → webui/api}/queues.py +3 -3
  69. jettask/{api/v1 → webui/api}/scheduled.py +1 -1
  70. jettask/{api/v1 → webui/api}/settings.py +1 -1
  71. jettask/{api.py → webui/app.py} +253 -145
  72. jettask/webui/namespace_manager/__init__.py +10 -0
  73. jettask/{multi_namespace_consumer.py → webui/namespace_manager/multi.py} +69 -22
  74. jettask/{unified_consumer_manager.py → webui/namespace_manager/unified.py} +1 -1
  75. jettask/{run.py → webui/run.py} +2 -2
  76. jettask/{services → webui/services}/__init__.py +1 -3
  77. jettask/{services → webui/services}/overview_service.py +34 -16
  78. jettask/{services → webui/services}/queue_service.py +1 -1
  79. jettask/{backend → webui/services}/queue_stats_v2.py +1 -1
  80. jettask/{services → webui/services}/settings_service.py +1 -1
  81. jettask/worker/__init__.py +53 -0
  82. jettask/worker/lifecycle.py +1507 -0
  83. jettask/worker/manager.py +583 -0
  84. jettask/{core/offline_worker_recovery.py → worker/recovery.py} +268 -175
  85. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/METADATA +2 -71
  86. jettask-0.2.20.dist-info/RECORD +145 -0
  87. jettask/__main__.py +0 -140
  88. jettask/api/__init__.py +0 -103
  89. jettask/backend/__init__.py +0 -1
  90. jettask/backend/api/__init__.py +0 -3
  91. jettask/backend/api/v1/__init__.py +0 -17
  92. jettask/backend/api/v1/monitoring.py +0 -431
  93. jettask/backend/api/v1/namespaces.py +0 -504
  94. jettask/backend/api/v1/queues.py +0 -342
  95. jettask/backend/api/v1/tasks.py +0 -367
  96. jettask/backend/core/__init__.py +0 -3
  97. jettask/backend/core/cache.py +0 -221
  98. jettask/backend/core/database.py +0 -200
  99. jettask/backend/core/exceptions.py +0 -102
  100. jettask/backend/dependencies.py +0 -261
  101. jettask/backend/init_meta_db.py +0 -158
  102. jettask/backend/main.py +0 -1426
  103. jettask/backend/main_unified.py +0 -78
  104. jettask/backend/main_v2.py +0 -394
  105. jettask/backend/models/__init__.py +0 -3
  106. jettask/backend/models/requests.py +0 -236
  107. jettask/backend/models/responses.py +0 -230
  108. jettask/backend/namespace_api_old.py +0 -267
  109. jettask/backend/services/__init__.py +0 -3
  110. jettask/backend/start.py +0 -42
  111. jettask/backend/unified_api_router.py +0 -1541
  112. jettask/cleanup_deprecated_tables.sql +0 -16
  113. jettask/core/consumer_manager.py +0 -1695
  114. jettask/core/delay_scanner.py +0 -256
  115. jettask/core/event_pool.py +0 -1700
  116. jettask/core/heartbeat_process.py +0 -222
  117. jettask/core/task_batch.py +0 -153
  118. jettask/core/worker_scanner.py +0 -271
  119. jettask/executors/__init__.py +0 -5
  120. jettask/executors/asyncio.py +0 -876
  121. jettask/executors/base.py +0 -30
  122. jettask/executors/common.py +0 -148
  123. jettask/executors/multi_asyncio.py +0 -309
  124. jettask/gradio_app.py +0 -570
  125. jettask/integrated_gradio_app.py +0 -1088
  126. jettask/main.py +0 -0
  127. jettask/monitoring/__init__.py +0 -3
  128. jettask/pg_consumer.py +0 -1896
  129. jettask/run_monitor.py +0 -22
  130. jettask/run_webui.py +0 -148
  131. jettask/scheduler/multi_namespace_scheduler.py +0 -294
  132. jettask/scheduler/unified_manager.py +0 -450
  133. jettask/task_center_client.py +0 -150
  134. jettask/utils/serializer_optimized.py +0 -33
  135. jettask/webui_exceptions.py +0 -67
  136. jettask-0.2.18.dist-info/RECORD +0 -150
  137. /jettask/{constants.py → config/constants.py} +0 -0
  138. /jettask/{backend/config.py → config/task_center.py} +0 -0
  139. /jettask/{pg_consumer → messaging/pg_consumer}/pg_consumer_v2.py +0 -0
  140. /jettask/{pg_consumer → messaging/pg_consumer}/sql/add_execution_time_field.sql +0 -0
  141. /jettask/{pg_consumer → messaging/pg_consumer}/sql/create_new_tables.sql +0 -0
  142. /jettask/{pg_consumer → messaging/pg_consumer}/sql/create_tables_v3.sql +0 -0
  143. /jettask/{pg_consumer → messaging/pg_consumer}/sql/migrate_to_new_structure.sql +0 -0
  144. /jettask/{pg_consumer → messaging/pg_consumer}/sql/modify_time_fields.sql +0 -0
  145. /jettask/{pg_consumer → messaging/pg_consumer}/sql_utils.py +0 -0
  146. /jettask/{models.py → persistence/models.py} +0 -0
  147. /jettask/scheduler/{manager.py → task_crud.py} +0 -0
  148. /jettask/{schema.sql → schemas/schema.sql} +0 -0
  149. /jettask/{task_center.py → task/task_center/client.py} +0 -0
  150. /jettask/{monitoring → utils}/file_watcher.py +0 -0
  151. /jettask/{services/redis_monitor_service.py → utils/redis_monitor.py} +0 -0
  152. /jettask/{api/v1 → webui/api}/__init__.py +0 -0
  153. /jettask/{webui_config.py → webui/config.py} +0 -0
  154. /jettask/{webui_models → webui/models}/__init__.py +0 -0
  155. /jettask/{webui_models → webui/models}/namespace.py +0 -0
  156. /jettask/{services → webui/services}/alert_service.py +0 -0
  157. /jettask/{services → webui/services}/analytics_service.py +0 -0
  158. /jettask/{services → webui/services}/scheduled_task_service.py +0 -0
  159. /jettask/{services → webui/services}/task_service.py +0 -0
  160. /jettask/{webui_sql → webui/sql}/batch_upsert_functions.sql +0 -0
  161. /jettask/{webui_sql → webui/sql}/verify_database.sql +0 -0
  162. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/WHEEL +0 -0
  163. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/entry_points.txt +0 -0
  164. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/licenses/LICENSE +0 -0
  165. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,312 @@
1
+ """
2
+ 消息发送器 - 统一的消息发送接口
3
+ 从 EventPool 中提取的消息发送逻辑
4
+ """
5
+
6
+ import time
7
+ import logging
8
+ from typing import List, Dict, Optional, Tuple
9
+ from redis.asyncio import Redis as AsyncRedis
10
+
11
+ from ..utils.serializer import dumps_str
12
+
13
+ logger = logging.getLogger('app')
14
+
15
+
16
+ class MessageSender:
17
+ """
18
+ 统一的消息发送接口
19
+
20
+ 职责:
21
+ 1. 发送普通消息到 Redis Stream
22
+ 2. 发送延迟消息到 Redis Sorted Set + Stream
23
+ 3. 发送优先级消息到优先级队列
24
+ 4. 批量发送优化
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ async_redis_client: AsyncRedis,
30
+ redis_prefix: str = 'jettask'
31
+ ):
32
+ """
33
+ 初始化消息发送器
34
+
35
+ Args:
36
+ async_redis_client: 异步Redis客户端(二进制模式,用于Stream操作)
37
+ redis_prefix: Redis键前缀
38
+ """
39
+ self.redis = async_redis_client
40
+ self.redis_prefix = redis_prefix
41
+
42
+ # 缓存Lua脚本
43
+ self._batch_send_script = None
44
+ self._delayed_task_script = None
45
+
46
+ logger.debug(f"MessageSender initialized with prefix: {redis_prefix}")
47
+
48
+ def _get_prefixed_queue_name(self, queue: str) -> str:
49
+ """获取带前缀的队列名"""
50
+ return f"{self.redis_prefix}:QUEUE:{queue}"
51
+
52
+ def _get_delayed_queue_name(self, queue: str) -> str:
53
+ """获取延迟队列名(Sorted Set)"""
54
+ return f"{self.redis_prefix}:DELAYED_QUEUE:{queue}"
55
+
56
+ async def send_messages(
57
+ self,
58
+ queue: str,
59
+ messages: List[Dict],
60
+ priority: Optional[int] = None,
61
+ delay: Optional[float] = None
62
+ ) -> List[str]:
63
+ """
64
+ 发送消息到队列(统一入口)
65
+
66
+ Args:
67
+ queue: 队列名(不带前缀)
68
+ messages: 消息列表,每个消息是一个字典
69
+ priority: 优先级(可选,如果指定则发送到优先级队列)
70
+ delay: 延迟时间(秒,可选)
71
+
72
+ Returns:
73
+ List[str]: Stream ID 列表
74
+
75
+ 示例:
76
+ # 发送普通消息
77
+ ids = await sender.send_messages("orders", [{"order_id": 123}])
78
+
79
+ # 发送延迟消息
80
+ ids = await sender.send_messages("emails", [{"to": "user@example.com"}], delay=60)
81
+
82
+ # 发送优先级消息
83
+ ids = await sender.send_messages("tasks", [{"task": "urgent"}], priority=1)
84
+ """
85
+ if not messages:
86
+ return []
87
+
88
+ # 确定实际队列名(考虑优先级)
89
+ actual_queue = queue
90
+ if priority is not None:
91
+ actual_queue = f"{queue}:{priority}"
92
+
93
+ # 根据是否延迟选择发送方式
94
+ if delay and delay > 0:
95
+ return await self._send_delayed_messages(actual_queue, messages, delay)
96
+ else:
97
+ return await self._send_normal_messages(actual_queue, messages)
98
+
99
+ async def _send_normal_messages(self, queue: str, messages: List[Dict]) -> List[str]:
100
+ """
101
+ 发送普通消息到 Redis Stream
102
+ 使用 Lua 脚本实现批量发送 + 原子性offset分配
103
+
104
+ Args:
105
+ queue: 队列名(不带前缀)
106
+ messages: 消息列表
107
+
108
+ Returns:
109
+ List[str]: Stream ID 列表
110
+ """
111
+ prefixed_queue = self._get_prefixed_queue_name(queue)
112
+
113
+ # Lua脚本:批量发送消息并添加自增offset
114
+ lua_script = """
115
+ local stream_key = KEYS[1]
116
+ local prefix = ARGV[1]
117
+ local results = {}
118
+
119
+ -- 使用Hash存储所有队列的offset
120
+ local offsets_hash = prefix .. ':QUEUE_OFFSETS'
121
+
122
+ -- 从stream_key中提取队列名(去掉prefix:QUEUE:前缀)
123
+ local queue_name = string.gsub(stream_key, '^' .. prefix .. ':QUEUE:', '')
124
+
125
+ -- 将队列添加到全局队列注册表
126
+ local queues_registry_key = prefix .. ':REGISTRY:QUEUES'
127
+ redis.call('SADD', queues_registry_key, queue_name)
128
+
129
+ -- 从ARGV[2]开始,每个参数是一个消息的data
130
+ for i = 2, #ARGV do
131
+ local data = ARGV[i]
132
+
133
+ -- 使用HINCRBY原子递增offset(如果不存在会自动创建并设为1)
134
+ local current_offset = redis.call('HINCRBY', offsets_hash, queue_name, 1)
135
+
136
+ -- 添加消息到Stream(包含offset字段)
137
+ local stream_id = redis.call('XADD', stream_key, '*',
138
+ 'data', data,
139
+ 'offset', current_offset)
140
+
141
+ table.insert(results, stream_id)
142
+ end
143
+
144
+ return results
145
+ """
146
+
147
+ # 准备Lua脚本参数
148
+ lua_args = [self.redis_prefix.encode() if isinstance(self.redis_prefix, str) else self.redis_prefix]
149
+
150
+ for message in messages:
151
+ # 确保消息格式正确
152
+ if 'data' in message:
153
+ data = message['data'] if isinstance(message['data'], bytes) else dumps_str(message['data'])
154
+ else:
155
+ data = dumps_str(message)
156
+ lua_args.append(data)
157
+
158
+ # 注册并执行Lua脚本
159
+ if not self._batch_send_script:
160
+ self._batch_send_script = self.redis.register_script(lua_script)
161
+
162
+ results = await self._batch_send_script(
163
+ keys=[prefixed_queue],
164
+ args=lua_args
165
+ )
166
+
167
+ # 解码所有返回的Stream ID
168
+ decoded_results = [r.decode('utf-8') if isinstance(r, bytes) else r for r in results]
169
+
170
+ logger.debug(f"Sent {len(decoded_results)} messages to queue {queue}")
171
+ return decoded_results
172
+
173
+ async def _send_delayed_messages(
174
+ self,
175
+ queue: str,
176
+ messages: List[Dict],
177
+ delay: float
178
+ ) -> List[str]:
179
+ """
180
+ 发送延迟消息到 Stream + Sorted Set
181
+ 使用 Lua 脚本实现原子性操作
182
+
183
+ Args:
184
+ queue: 队列名(不带前缀)
185
+ messages: 消息列表
186
+ delay: 延迟时间(秒)
187
+
188
+ Returns:
189
+ List[str]: Stream ID 列表
190
+ """
191
+ # Lua脚本:原子性地发送延迟任务
192
+ lua_script = """
193
+ local prefix = ARGV[1]
194
+ local results = {}
195
+
196
+ -- 从ARGV[2]开始,每3个参数为一组任务信息
197
+ -- [stream_key, stream_data, execute_at]
198
+ for i = 2, #ARGV, 3 do
199
+ local stream_key = ARGV[i]
200
+ local stream_data = ARGV[i+1]
201
+ local execute_at = tonumber(ARGV[i+2])
202
+
203
+ -- 使用Hash存储所有队列的offset
204
+ local offsets_hash = prefix .. ':QUEUE_OFFSETS'
205
+
206
+ -- 从stream_key中提取队列名
207
+ local queue_name = string.gsub(stream_key, '^' .. prefix .. ':QUEUE:', '')
208
+
209
+ -- 使用HINCRBY原子递增offset
210
+ local current_offset = redis.call('HINCRBY', offsets_hash, queue_name, 1)
211
+
212
+ -- 1. 添加消息到Stream(包含offset字段)
213
+ local stream_id = redis.call('XADD', stream_key, '*',
214
+ 'data', stream_data,
215
+ 'offset', current_offset)
216
+
217
+ -- 2. 添加到延迟队列ZSET(使用execute_at作为分数)
218
+ local delayed_queue_key = prefix .. ':DELAYED_QUEUE:' .. queue_name
219
+ redis.call('ZADD', delayed_queue_key, execute_at, stream_id)
220
+
221
+ -- 3. 设置任务状态Hash(用于追踪任务状态)
222
+ local task_key = prefix .. ':TASK:' .. stream_id
223
+ redis.call('HSET', task_key, 'status', 'delayed')
224
+ redis.call('EXPIRE', task_key, 3600) -- 1小时过期
225
+
226
+ -- 保存stream_id到结果
227
+ table.insert(results, stream_id)
228
+ end
229
+
230
+ return results
231
+ """
232
+
233
+ prefixed_queue = self._get_prefixed_queue_name(queue)
234
+ current_time = time.time()
235
+
236
+ # 准备Lua脚本参数
237
+ lua_args = [self.redis_prefix]
238
+
239
+ for message in messages:
240
+ # 添加延迟执行标记到消息体
241
+ msg_dict = message.copy()
242
+ execute_at = current_time + delay
243
+ msg_dict['execute_at'] = execute_at
244
+ msg_dict['is_delayed'] = 1
245
+
246
+ stream_data = dumps_str(msg_dict)
247
+
248
+ lua_args.extend([
249
+ prefixed_queue,
250
+ stream_data,
251
+ str(execute_at)
252
+ ])
253
+
254
+ # 注册并执行Lua脚本
255
+ if not self._delayed_task_script:
256
+ self._delayed_task_script = self.redis.register_script(lua_script)
257
+
258
+ results = await self._delayed_task_script(keys=[], args=lua_args)
259
+
260
+ # 解码结果
261
+ decoded_results = [r.decode('utf-8') if isinstance(r, bytes) else r for r in results]
262
+
263
+ logger.debug(f"Sent {len(decoded_results)} delayed messages to queue {queue} (delay={delay}s)")
264
+ return decoded_results
265
+
266
+ async def send_single_message(
267
+ self,
268
+ queue: str,
269
+ message: Dict,
270
+ priority: Optional[int] = None,
271
+ delay: Optional[float] = None
272
+ ) -> str:
273
+ """
274
+ 发送单个消息(便捷方法)
275
+
276
+ Args:
277
+ queue: 队列名
278
+ message: 消息字典
279
+ priority: 优先级(可选)
280
+ delay: 延迟时间(可选)
281
+
282
+ Returns:
283
+ str: Stream ID
284
+ """
285
+ results = await self.send_messages(queue, [message], priority=priority, delay=delay)
286
+ return results[0] if results else None
287
+
288
+ async def get_queue_size(self, queue: str) -> int:
289
+ """
290
+ 获取队列大小(Stream长度)
291
+
292
+ Args:
293
+ queue: 队列名
294
+
295
+ Returns:
296
+ int: 队列中的消息数量
297
+ """
298
+ prefixed_queue = self._get_prefixed_queue_name(queue)
299
+ return await self.redis.xlen(prefixed_queue)
300
+
301
+ async def get_delayed_queue_size(self, queue: str) -> int:
302
+ """
303
+ 获取延迟队列大小(Sorted Set大小)
304
+
305
+ Args:
306
+ queue: 队列名
307
+
308
+ Returns:
309
+ int: 延迟队列中的消息数量
310
+ """
311
+ delayed_queue = self._get_delayed_queue_name(queue)
312
+ return await self.redis.zcard(delayed_queue)
@@ -0,0 +1,118 @@
1
+ """PostgreSQL Consumer模块
2
+
3
+ 从Redis队列消费任务并持久化到PostgreSQL数据库。
4
+
5
+ 模块结构:
6
+ - consumer.py: 主消费者类(协调器)
7
+ - backlog_monitor.py: Stream积压监控
8
+ - task_updater.py: 任务状态更新
9
+ - offline_recovery.py: 离线Worker恢复
10
+ - task_persistence.py: 任务数据持久化
11
+ - queue_discovery.py: 队列发现
12
+ - message_consumer.py: 消息消费
13
+ - maintenance.py: 数据库维护
14
+
15
+ 使用示例:
16
+ from jettask.services.pg_consumer import PostgreSQLConsumer, run_pg_consumer, main
17
+
18
+ # 使用Consumer类
19
+ consumer = PostgreSQLConsumer(pg_config, redis_config)
20
+ await consumer.start()
21
+
22
+ # 或直接运行
23
+ await run_pg_consumer(pg_config, redis_config)
24
+
25
+ # 或使用main函数(从环境变量读取配置)
26
+ main()
27
+ """
28
+
29
+ import asyncio
30
+ import logging
31
+ import os
32
+
33
+ from jettask.webui.config import PostgreSQLConfig, RedisConfig
34
+ # ConsumerStrategy 已移除,现在只使用 HEARTBEAT 策略
35
+
36
+ from .consumer import PostgreSQLConsumer
37
+
38
+ logger = logging.getLogger(__name__)
39
+
40
+ # 导出主要类和函数
41
+ __all__ = [
42
+ 'PostgreSQLConsumer',
43
+ 'run_pg_consumer',
44
+ 'main'
45
+ ]
46
+
47
+
48
+ async def run_pg_consumer(
49
+ pg_config: PostgreSQLConfig,
50
+ redis_config: RedisConfig,
51
+ # consumer_strategy 参数已移除,现在只使用 HEARTBEAT 策略
52
+ ):
53
+ """运行PostgreSQL消费者
54
+
55
+ Args:
56
+ pg_config: PostgreSQL配置
57
+ redis_config: Redis配置
58
+ consumer_strategy: 消费者策略
59
+ """
60
+ # 从环境变量读取监控配置
61
+ enable_backlog_monitor = os.getenv('JETTASK_ENABLE_BACKLOG_MONITOR', 'true').lower() == 'true'
62
+ backlog_monitor_interval = int(os.getenv('JETTASK_BACKLOG_MONITOR_INTERVAL', '60'))
63
+
64
+ logger.info(f"Backlog monitor config: enabled={enable_backlog_monitor}, interval={backlog_monitor_interval}s")
65
+
66
+ consumer = PostgreSQLConsumer(
67
+ pg_config,
68
+ redis_config,
69
+ consumer_strategy=consumer_strategy,
70
+ enable_backlog_monitor=enable_backlog_monitor,
71
+ backlog_monitor_interval=backlog_monitor_interval
72
+ )
73
+
74
+ try:
75
+ await consumer.start()
76
+ while True:
77
+ await asyncio.sleep(1)
78
+
79
+ except KeyboardInterrupt:
80
+ logger.debug("Received interrupt signal")
81
+ finally:
82
+ await consumer.stop()
83
+
84
+
85
+ def main():
86
+ """主入口函数(从环境变量读取配置)"""
87
+ from dotenv import load_dotenv
88
+
89
+ load_dotenv()
90
+
91
+ logging.basicConfig(
92
+ level=logging.INFO,
93
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
94
+ )
95
+
96
+ pg_config = PostgreSQLConfig(
97
+ host=os.getenv('JETTASK_PG_HOST', 'localhost'),
98
+ port=int(os.getenv('JETTASK_PG_PORT', '5432')),
99
+ database=os.getenv('JETTASK_PG_DB', 'jettask'),
100
+ user=os.getenv('JETTASK_PG_USER', 'jettask'),
101
+ password=os.getenv('JETTASK_PG_PASSWORD', '123456'),
102
+ )
103
+
104
+ redis_config = RedisConfig(
105
+ host=os.getenv('REDIS_HOST', 'localhost'),
106
+ port=int(os.getenv('REDIS_PORT', '6379')),
107
+ db=int(os.getenv('REDIS_DB', '0')),
108
+ password=os.getenv('REDIS_PASSWORD'),
109
+ )
110
+
111
+ # 使用 HEARTBEAT 策略(唯一支持的策略)
112
+ logger.debug("Using consumer strategy: HEARTBEAT")
113
+
114
+ asyncio.run(run_pg_consumer(pg_config, redis_config))
115
+
116
+
117
+ if __name__ == '__main__':
118
+ main()