jettask 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. jettask/monitor/run_backlog_collector.py +96 -0
  2. jettask/monitor/stream_backlog_monitor.py +362 -0
  3. jettask/pg_consumer/pg_consumer_v2.py +403 -0
  4. jettask/pg_consumer/sql_utils.py +182 -0
  5. jettask/scheduler/__init__.py +17 -0
  6. jettask/scheduler/add_execution_count.sql +11 -0
  7. jettask/scheduler/add_priority_field.sql +26 -0
  8. jettask/scheduler/add_scheduler_id.sql +25 -0
  9. jettask/scheduler/add_scheduler_id_index.sql +10 -0
  10. jettask/scheduler/loader.py +249 -0
  11. jettask/scheduler/make_scheduler_id_required.sql +28 -0
  12. jettask/scheduler/manager.py +696 -0
  13. jettask/scheduler/migrate_interval_seconds.sql +9 -0
  14. jettask/scheduler/models.py +200 -0
  15. jettask/scheduler/multi_namespace_scheduler.py +294 -0
  16. jettask/scheduler/performance_optimization.sql +45 -0
  17. jettask/scheduler/run_scheduler.py +186 -0
  18. jettask/scheduler/scheduler.py +715 -0
  19. jettask/scheduler/schema.sql +84 -0
  20. jettask/scheduler/unified_manager.py +450 -0
  21. jettask/scheduler/unified_scheduler_manager.py +280 -0
  22. jettask/webui/backend/api/__init__.py +3 -0
  23. jettask/webui/backend/api/v1/__init__.py +17 -0
  24. jettask/webui/backend/api/v1/monitoring.py +431 -0
  25. jettask/webui/backend/api/v1/namespaces.py +504 -0
  26. jettask/webui/backend/api/v1/queues.py +342 -0
  27. jettask/webui/backend/api/v1/tasks.py +367 -0
  28. jettask/webui/backend/core/__init__.py +3 -0
  29. jettask/webui/backend/core/cache.py +221 -0
  30. jettask/webui/backend/core/database.py +200 -0
  31. jettask/webui/backend/core/exceptions.py +102 -0
  32. jettask/webui/backend/models/__init__.py +3 -0
  33. jettask/webui/backend/models/requests.py +236 -0
  34. jettask/webui/backend/models/responses.py +230 -0
  35. jettask/webui/backend/services/__init__.py +3 -0
  36. jettask/webui/frontend/index.html +13 -0
  37. jettask/webui/models/__init__.py +3 -0
  38. jettask/webui/models/namespace.py +63 -0
  39. jettask/webui/sql/batch_upsert_functions.sql +178 -0
  40. jettask/webui/sql/init_database.sql +640 -0
  41. {jettask-0.2.5.dist-info → jettask-0.2.7.dist-info}/METADATA +80 -10
  42. {jettask-0.2.5.dist-info → jettask-0.2.7.dist-info}/RECORD +46 -53
  43. jettask/webui/frontend/package-lock.json +0 -4833
  44. jettask/webui/frontend/package.json +0 -30
  45. jettask/webui/frontend/src/App.css +0 -109
  46. jettask/webui/frontend/src/App.jsx +0 -66
  47. jettask/webui/frontend/src/components/NamespaceSelector.jsx +0 -166
  48. jettask/webui/frontend/src/components/QueueBacklogChart.jsx +0 -298
  49. jettask/webui/frontend/src/components/QueueBacklogTrend.jsx +0 -638
  50. jettask/webui/frontend/src/components/QueueDetailsTable.css +0 -65
  51. jettask/webui/frontend/src/components/QueueDetailsTable.jsx +0 -487
  52. jettask/webui/frontend/src/components/QueueDetailsTableV2.jsx +0 -465
  53. jettask/webui/frontend/src/components/ScheduledTaskFilter.jsx +0 -423
  54. jettask/webui/frontend/src/components/TaskFilter.jsx +0 -425
  55. jettask/webui/frontend/src/components/TimeRangeSelector.css +0 -21
  56. jettask/webui/frontend/src/components/TimeRangeSelector.jsx +0 -160
  57. jettask/webui/frontend/src/components/charts/QueueChart.jsx +0 -111
  58. jettask/webui/frontend/src/components/charts/QueueTrendChart.jsx +0 -115
  59. jettask/webui/frontend/src/components/charts/WorkerChart.jsx +0 -40
  60. jettask/webui/frontend/src/components/common/StatsCard.jsx +0 -18
  61. jettask/webui/frontend/src/components/layout/AppLayout.css +0 -95
  62. jettask/webui/frontend/src/components/layout/AppLayout.jsx +0 -49
  63. jettask/webui/frontend/src/components/layout/Header.css +0 -106
  64. jettask/webui/frontend/src/components/layout/Header.jsx +0 -106
  65. jettask/webui/frontend/src/components/layout/SideMenu.css +0 -137
  66. jettask/webui/frontend/src/components/layout/SideMenu.jsx +0 -209
  67. jettask/webui/frontend/src/components/layout/TabsNav.css +0 -244
  68. jettask/webui/frontend/src/components/layout/TabsNav.jsx +0 -206
  69. jettask/webui/frontend/src/components/layout/UserInfo.css +0 -197
  70. jettask/webui/frontend/src/components/layout/UserInfo.jsx +0 -197
  71. jettask/webui/frontend/src/contexts/LoadingContext.jsx +0 -27
  72. jettask/webui/frontend/src/contexts/NamespaceContext.jsx +0 -72
  73. jettask/webui/frontend/src/contexts/TabsContext.backup.jsx +0 -245
  74. jettask/webui/frontend/src/index.css +0 -114
  75. jettask/webui/frontend/src/main.jsx +0 -20
  76. jettask/webui/frontend/src/pages/Alerts.jsx +0 -684
  77. jettask/webui/frontend/src/pages/Dashboard/index.css +0 -35
  78. jettask/webui/frontend/src/pages/Dashboard/index.jsx +0 -281
  79. jettask/webui/frontend/src/pages/Dashboard.jsx +0 -1330
  80. jettask/webui/frontend/src/pages/QueueDetail.jsx +0 -1117
  81. jettask/webui/frontend/src/pages/QueueMonitor.jsx +0 -527
  82. jettask/webui/frontend/src/pages/Queues.jsx +0 -12
  83. jettask/webui/frontend/src/pages/ScheduledTasks.jsx +0 -809
  84. jettask/webui/frontend/src/pages/Settings.jsx +0 -800
  85. jettask/webui/frontend/src/pages/Workers.jsx +0 -12
  86. jettask/webui/frontend/src/services/api.js +0 -114
  87. jettask/webui/frontend/src/services/queueTrend.js +0 -152
  88. jettask/webui/frontend/src/utils/suppressWarnings.js +0 -22
  89. jettask/webui/frontend/src/utils/userPreferences.js +0 -154
  90. {jettask-0.2.5.dist-info → jettask-0.2.7.dist-info}/WHEEL +0 -0
  91. {jettask-0.2.5.dist-info → jettask-0.2.7.dist-info}/entry_points.txt +0 -0
  92. {jettask-0.2.5.dist-info → jettask-0.2.7.dist-info}/licenses/LICENSE +0 -0
  93. {jettask-0.2.5.dist-info → jettask-0.2.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,96 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Stream积压监控采集服务
4
+ 可以作为独立服务运行,定期采集Redis Stream的积压情况
5
+ """
6
+
7
+ import asyncio
8
+ import argparse
9
+ import logging
10
+ import signal
11
+ import sys
12
+ from stream_backlog_monitor import StreamBacklogMonitor
13
+
14
+ # 配置日志
15
+ logging.basicConfig(
16
+ level=logging.INFO,
17
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
18
+ )
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ async def main(args):
23
+ """主函数"""
24
+ # 创建监控器实例
25
+ monitor = StreamBacklogMonitor(
26
+ redis_url=args.redis_url,
27
+ pg_url=args.pg_url,
28
+ redis_prefix=args.redis_prefix
29
+ )
30
+
31
+ # 设置信号处理
32
+ def signal_handler(sig, frame):
33
+ logger.info("Received stop signal, shutting down...")
34
+ sys.exit(0)
35
+
36
+ signal.signal(signal.SIGINT, signal_handler)
37
+ signal.signal(signal.SIGTERM, signal_handler)
38
+
39
+ # 运行采集器
40
+ logger.info(f"Starting backlog collector service...")
41
+ logger.info(f" Redis URL: {args.redis_url}")
42
+ logger.info(f" PostgreSQL URL: {args.pg_url}")
43
+ logger.info(f" Redis Prefix: {args.redis_prefix}")
44
+ logger.info(f" Collection Interval: {args.interval} seconds")
45
+
46
+ try:
47
+ await monitor.run_collector(interval=args.interval)
48
+ except KeyboardInterrupt:
49
+ logger.info("Collector stopped by user")
50
+ except Exception as e:
51
+ logger.error(f"Collector failed: {e}")
52
+ sys.exit(1)
53
+
54
+
55
+ if __name__ == "__main__":
56
+ parser = argparse.ArgumentParser(description="Redis Stream Backlog Monitor Collector")
57
+
58
+ parser.add_argument(
59
+ "--redis-url",
60
+ default="redis://localhost:6379/0",
61
+ help="Redis connection URL (default: redis://localhost:6379/0)"
62
+ )
63
+
64
+ parser.add_argument(
65
+ "--pg-url",
66
+ default="postgresql+asyncpg://jettask:123456@localhost:5432/jettask",
67
+ help="PostgreSQL connection URL"
68
+ )
69
+
70
+ parser.add_argument(
71
+ "--redis-prefix",
72
+ default="JETTASK",
73
+ help="Redis key prefix (default: JETTASK)"
74
+ )
75
+
76
+ parser.add_argument(
77
+ "--interval",
78
+ type=int,
79
+ default=60,
80
+ help="Collection interval in seconds (default: 60)"
81
+ )
82
+
83
+ parser.add_argument(
84
+ "--debug",
85
+ action="store_true",
86
+ help="Enable debug logging"
87
+ )
88
+
89
+ args = parser.parse_args()
90
+
91
+ # 设置日志级别
92
+ if args.debug:
93
+ logging.getLogger().setLevel(logging.DEBUG)
94
+
95
+ # 运行主函数
96
+ asyncio.run(main(args))
@@ -0,0 +1,362 @@
1
+ """
2
+ Redis Stream积压监控模块
3
+ 用于监控任务队列的积压情况
4
+ """
5
+
6
+ import asyncio
7
+ import redis.asyncio as redis
8
+ from datetime import datetime, timezone
9
+ from typing import Dict, List, Optional, Tuple
10
+ import logging
11
+ from sqlalchemy import text
12
+ from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
13
+ from sqlalchemy.orm import sessionmaker
14
+ import os
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ class StreamBacklogMonitor:
20
+ """Stream积压监控器"""
21
+
22
+ def __init__(self, redis_url: str = None, pg_url: str = None, redis_prefix: str = "JETTASK"):
23
+ """
24
+ 初始化监控器
25
+
26
+ Args:
27
+ redis_url: Redis连接URL
28
+ pg_url: PostgreSQL连接URL
29
+ redis_prefix: Redis键前缀
30
+ """
31
+ self.redis_url = redis_url or os.getenv('JETTASK_REDIS_URL', 'redis://localhost:6379/0')
32
+ self.pg_url = pg_url or os.getenv('JETTASK_PG_URL', 'postgresql+asyncpg://jettask:123456@localhost:5432/jettask')
33
+ self.redis_prefix = redis_prefix
34
+
35
+ self.redis_client = None
36
+ self.engine = None
37
+ self.AsyncSessionLocal = None
38
+
39
+ async def initialize(self):
40
+ """初始化连接"""
41
+ # 初始化Redis连接
42
+ self.redis_client = await redis.from_url(self.redis_url, decode_responses=True)
43
+
44
+ # 初始化PostgreSQL连接
45
+ self.engine = create_async_engine(self.pg_url, echo=False)
46
+ self.AsyncSessionLocal = sessionmaker(self.engine, class_=AsyncSession, expire_on_commit=False)
47
+
48
+ async def close(self):
49
+ """关闭连接"""
50
+ if self.redis_client:
51
+ await self.redis_client.close()
52
+ if self.engine:
53
+ await self.engine.dispose()
54
+
55
+ async def update_delivered_offset(self, stream_name: str, group_name: str, messages: List[Tuple]):
56
+ """
57
+ 更新消费组的已投递offset
58
+
59
+ Args:
60
+ stream_name: Stream名称(队列名)
61
+ group_name: 消费者组名
62
+ messages: 消息列表
63
+ """
64
+ if not messages:
65
+ return
66
+
67
+ try:
68
+ # 从消息中提取最大的offset
69
+ max_offset = 0
70
+ for _, msg_list in messages:
71
+ for msg_id, msg_data in msg_list:
72
+ if b'offset' in msg_data:
73
+ offset = int(msg_data[b'offset'])
74
+ max_offset = max(max_offset, offset)
75
+
76
+ if max_offset > 0:
77
+ # 更新Redis中的last_delivered_offset
78
+ key = f"{self.redis_prefix}:GROUP:{stream_name}:{group_name}:last_delivered_offset"
79
+
80
+ # 使用Lua脚本确保只更新更大的值
81
+ lua_script = """
82
+ local current = redis.call('GET', KEYS[1])
83
+ if not current or tonumber(ARGV[1]) > tonumber(current) then
84
+ redis.call('SET', KEYS[1], ARGV[1])
85
+ end
86
+ return redis.call('GET', KEYS[1])
87
+ """
88
+
89
+ await self.redis_client.eval(lua_script, 1, key, str(max_offset))
90
+ logger.debug(f"Updated delivered offset for {stream_name}:{group_name} to {max_offset}")
91
+
92
+ except Exception as e:
93
+ logger.error(f"Failed to update delivered offset: {e}")
94
+
95
+ async def update_acked_offset(self, stream_name: str, group_name: str, acked_messages: List):
96
+ """
97
+ 更新消费组的已确认offset
98
+
99
+ Args:
100
+ stream_name: Stream名称
101
+ group_name: 消费者组名
102
+ acked_messages: 已确认的消息列表
103
+ """
104
+ if not acked_messages:
105
+ return
106
+
107
+ try:
108
+ # 提取最大的已确认offset
109
+ max_offset = 0
110
+ for msg in acked_messages:
111
+ if 'offset' in msg:
112
+ offset = int(msg['offset'])
113
+ max_offset = max(max_offset, offset)
114
+
115
+ if max_offset > 0:
116
+ # 更新Redis中的last_acked_offset
117
+ key = f"{self.redis_prefix}:GROUP:{stream_name}:{group_name}:last_acked_offset"
118
+
119
+ # 使用Lua脚本确保只更新更大的值
120
+ lua_script = """
121
+ local current = redis.call('GET', KEYS[1])
122
+ if not current or tonumber(ARGV[1]) > tonumber(current) then
123
+ redis.call('SET', KEYS[1], ARGV[1])
124
+ end
125
+ return redis.call('GET', KEYS[1])
126
+ """
127
+
128
+ await self.redis_client.eval(lua_script, 1, key, str(max_offset))
129
+ logger.debug(f"Updated acked offset for {stream_name}:{group_name} to {max_offset}")
130
+
131
+ except Exception as e:
132
+ logger.error(f"Failed to update acked offset: {e}")
133
+
134
+ async def collect_metrics(self, namespace: str = "default", stream_names: List[str] = None) -> Dict:
135
+ """
136
+ 采集指定stream的积压指标
137
+ 使用 TASK_OFFSETS 和 QUEUE_OFFSETS 进行精确计算
138
+
139
+ Args:
140
+ namespace: 命名空间
141
+ stream_names: 要监控的stream列表,None表示监控所有
142
+
143
+ Returns:
144
+ 采集的指标数据
145
+ """
146
+ metrics = {}
147
+
148
+ try:
149
+ # 获取所有队列的最新offset (QUEUE_OFFSETS)
150
+ queue_offsets_key = f"{namespace}:QUEUE_OFFSETS"
151
+ queue_offsets = await self.redis_client.hgetall(queue_offsets_key)
152
+
153
+ # 获取所有任务组的消费offset (TASK_OFFSETS)
154
+ task_offsets_key = f"{namespace}:TASK_OFFSETS"
155
+ task_offsets = await self.redis_client.hgetall(task_offsets_key)
156
+
157
+ # 如果没有指定stream,从QUEUE_OFFSETS中获取所有队列
158
+ if not stream_names:
159
+ stream_names = list(queue_offsets.keys())
160
+
161
+ # 对每个stream采集指标
162
+ for stream_name in stream_names:
163
+ # 使用实际的Stream键格式
164
+ stream_key = f"{self.redis_prefix.lower()}:QUEUE:{stream_name}"
165
+
166
+ # 获取队列的最新offset
167
+ last_published_offset = int(queue_offsets.get(stream_name, 0))
168
+
169
+ # 获取stream信息
170
+ try:
171
+ stream_info = await self.redis_client.xinfo_stream(stream_key)
172
+ except:
173
+ # Stream可能不存在
174
+ continue
175
+
176
+ # 获取所有消费者组信息
177
+ try:
178
+ groups = await self.redis_client.xinfo_groups(stream_key)
179
+ except:
180
+ groups = []
181
+
182
+ stream_metrics = {
183
+ 'namespace': namespace,
184
+ 'stream_name': stream_name,
185
+ 'last_published_offset': last_published_offset,
186
+ 'groups': {}
187
+ }
188
+
189
+ # 对每个消费者组采集指标
190
+ for group in groups:
191
+ group_name = group['name']
192
+ pending_count = group['pending'] # Redis Stream中的pending数量(已投递未ACK)
193
+
194
+ # 从TASK_OFFSETS获取该组的消费offset
195
+ # key格式: f"{queue}:{group_name}"
196
+ task_offset_key = f"{stream_name}:{group_name}"
197
+ last_acked_offset = int(task_offsets.get(task_offset_key, 0))
198
+ print(f'{task_offset_key=} {last_acked_offset=}')
199
+ # 计算各种积压指标
200
+ # 1. 总积压 = 队列最新offset - 消费组已确认的offset
201
+ total_backlog = max(0, last_published_offset - last_acked_offset)
202
+
203
+ # 2. 未投递的积压 = 总积压 - pending数量
204
+ # pending_count 是已经投递给消费者但还未ACK的消息数量
205
+ backlog_undelivered = max(0, total_backlog - pending_count)
206
+
207
+ # 3. 已投递未确认 = pending数量(这是Redis Stream统计的)
208
+ backlog_delivered_unacked = pending_count
209
+
210
+ # 4. 已投递的offset = 已确认offset + pending数量
211
+ last_delivered_offset = last_acked_offset + pending_count
212
+
213
+ stream_metrics['groups'][group_name] = {
214
+ 'last_delivered_offset': last_delivered_offset, # 已投递的最新offset
215
+ 'last_acked_offset': last_acked_offset, # 已确认的最新offset
216
+ 'pending_count': pending_count, # 已投递未确认的数量
217
+ 'backlog_undelivered': backlog_undelivered, # 未投递的积压
218
+ 'backlog_delivered_unacked': backlog_delivered_unacked, # 已投递未确认的积压
219
+ 'backlog_unprocessed': total_backlog # 总积压(未投递+已投递未确认)
220
+ }
221
+
222
+ # 如果没有消费组但有队列offset,也记录
223
+ if not stream_metrics['groups'] and last_published_offset > 0:
224
+ stream_metrics['groups']['_total'] = {
225
+ 'last_delivered_offset': 0,
226
+ 'last_acked_offset': 0,
227
+ 'pending_count': 0,
228
+ 'backlog_undelivered': last_published_offset,
229
+ 'backlog_unprocessed': last_published_offset
230
+ }
231
+
232
+ metrics[stream_name] = stream_metrics
233
+
234
+ except Exception as e:
235
+ logger.error(f"Failed to collect metrics: {e}")
236
+ import traceback
237
+ traceback.print_exc()
238
+
239
+ return metrics
240
+
241
+ async def save_metrics(self, metrics: Dict):
242
+ """
243
+ 将采集的指标保存到数据库
244
+
245
+ Args:
246
+ metrics: 采集的指标数据
247
+ """
248
+ if not metrics:
249
+ return
250
+
251
+ try:
252
+ async with self.AsyncSessionLocal() as session:
253
+ # 准备插入数据
254
+ records = []
255
+ timestamp = datetime.now(timezone.utc)
256
+
257
+ for stream_name, stream_data in metrics.items():
258
+ # 保存每个消费组的数据
259
+ for group_name, group_data in stream_data.get('groups', {}).items():
260
+ record = {
261
+ 'namespace': stream_data['namespace'],
262
+ 'stream_name': stream_name,
263
+ 'consumer_group': group_name,
264
+ 'last_published_offset': stream_data['last_published_offset'],
265
+ 'last_delivered_offset': group_data['last_delivered_offset'],
266
+ 'last_acked_offset': group_data['last_acked_offset'],
267
+ 'pending_count': group_data['pending_count'],
268
+ 'backlog_undelivered': group_data['backlog_undelivered'],
269
+ 'backlog_unprocessed': group_data['backlog_unprocessed'],
270
+ 'backlog_delivered_unacked': group_data.get('backlog_delivered_unacked', group_data['pending_count']),
271
+ 'created_at': timestamp
272
+ }
273
+ records.append(record)
274
+
275
+ # 如果没有消费组,也保存stream级别的数据
276
+ if not stream_data.get('groups'):
277
+ record = {
278
+ 'namespace': stream_data['namespace'],
279
+ 'stream_name': stream_name,
280
+ 'consumer_group': None,
281
+ 'last_published_offset': stream_data['last_published_offset'],
282
+ 'last_delivered_offset': 0,
283
+ 'last_acked_offset': 0,
284
+ 'pending_count': 0,
285
+ 'backlog_undelivered': stream_data['last_published_offset'],
286
+ 'backlog_unprocessed': stream_data['last_published_offset'],
287
+ 'created_at': timestamp
288
+ }
289
+ records.append(record)
290
+
291
+ # 批量插入
292
+ if records:
293
+ insert_sql = text("""
294
+ INSERT INTO stream_backlog_monitor
295
+ (namespace, stream_name, consumer_group, last_published_offset,
296
+ last_delivered_offset, last_acked_offset, pending_count,
297
+ backlog_undelivered, backlog_unprocessed, created_at)
298
+ VALUES
299
+ (:namespace, :stream_name, :consumer_group, :last_published_offset,
300
+ :last_delivered_offset, :last_acked_offset, :pending_count,
301
+ :backlog_undelivered, :backlog_unprocessed, :created_at)
302
+ """)
303
+ # 注意:backlog_delivered_unacked 可以从 pending_count 推导,所以不单独存储
304
+
305
+ await session.execute(insert_sql, records)
306
+ await session.commit()
307
+ logger.info(f"Saved {len(records)} monitoring records")
308
+
309
+ except Exception as e:
310
+ logger.error(f"Failed to save metrics: {e}")
311
+
312
+ async def run_collector(self, interval: int = 60):
313
+ """
314
+ 运行采集器
315
+
316
+ Args:
317
+ interval: 采集间隔(秒)
318
+ """
319
+ await self.initialize()
320
+
321
+ logger.info(f"Starting backlog monitor collector with {interval}s interval")
322
+
323
+ try:
324
+ while True:
325
+ try:
326
+ # 采集指标
327
+ metrics = await self.collect_metrics()
328
+
329
+ # 保存到数据库
330
+ await self.save_metrics(metrics)
331
+
332
+ # 等待下一次采集
333
+ await asyncio.sleep(interval)
334
+
335
+ except Exception as e:
336
+ logger.error(f"Collector error: {e}")
337
+ await asyncio.sleep(interval)
338
+
339
+ except KeyboardInterrupt:
340
+ logger.info("Stopping collector...")
341
+ finally:
342
+ await self.close()
343
+
344
+ # 辅助函数 - 供其他模块调用
345
+ async def report_delivered_offset(redis_client, redis_prefix: str, queue: str, group_name: str, messages: List):
346
+ """
347
+ 上报已投递的offset(供event_pool调用)
348
+ 这个函数已弃用,改为直接更新TASK_OFFSETS
349
+ """
350
+ pass # 现在offset更新在executor中完成
351
+
352
+ async def report_queue_offset(redis_client, redis_prefix: str, queue: str, offset: int):
353
+ """
354
+ 上报队列的最新offset(供发送消息时调用)
355
+ 这个功能已经在发送时通过Lua脚本自动完成
356
+ """
357
+ pass # 现在offset更新在发送时通过Lua脚本完成
358
+
359
+ if __name__ == "__main__":
360
+ # 测试运行采集器
361
+ monitor = StreamBacklogMonitor()
362
+ asyncio.run(monitor.run_collector(interval=30))