jettask 0.2.18__py3-none-any.whl → 0.2.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. jettask/__init__.py +60 -2
  2. jettask/cli.py +314 -228
  3. jettask/config/__init__.py +9 -1
  4. jettask/config/config.py +245 -0
  5. jettask/config/env_loader.py +381 -0
  6. jettask/config/lua_scripts.py +158 -0
  7. jettask/config/nacos_config.py +132 -5
  8. jettask/core/__init__.py +1 -1
  9. jettask/core/app.py +1573 -666
  10. jettask/core/app_importer.py +33 -16
  11. jettask/core/container.py +532 -0
  12. jettask/core/task.py +1 -4
  13. jettask/core/unified_manager_base.py +2 -2
  14. jettask/executor/__init__.py +38 -0
  15. jettask/executor/core.py +625 -0
  16. jettask/executor/executor.py +338 -0
  17. jettask/executor/orchestrator.py +290 -0
  18. jettask/executor/process_entry.py +638 -0
  19. jettask/executor/task_executor.py +317 -0
  20. jettask/messaging/__init__.py +68 -0
  21. jettask/messaging/event_pool.py +2188 -0
  22. jettask/messaging/reader.py +519 -0
  23. jettask/messaging/registry.py +266 -0
  24. jettask/messaging/scanner.py +369 -0
  25. jettask/messaging/sender.py +312 -0
  26. jettask/persistence/__init__.py +118 -0
  27. jettask/persistence/backlog_monitor.py +567 -0
  28. jettask/{backend/data_access.py → persistence/base.py} +58 -57
  29. jettask/persistence/consumer.py +315 -0
  30. jettask/{core → persistence}/db_manager.py +23 -22
  31. jettask/persistence/maintenance.py +81 -0
  32. jettask/persistence/message_consumer.py +259 -0
  33. jettask/{backend/namespace_data_access.py → persistence/namespace.py} +66 -98
  34. jettask/persistence/offline_recovery.py +196 -0
  35. jettask/persistence/queue_discovery.py +215 -0
  36. jettask/persistence/task_persistence.py +218 -0
  37. jettask/persistence/task_updater.py +583 -0
  38. jettask/scheduler/__init__.py +2 -2
  39. jettask/scheduler/loader.py +6 -5
  40. jettask/scheduler/run_scheduler.py +1 -1
  41. jettask/scheduler/scheduler.py +7 -7
  42. jettask/scheduler/{unified_scheduler_manager.py → scheduler_coordinator.py} +18 -13
  43. jettask/task/__init__.py +16 -0
  44. jettask/{router.py → task/router.py} +26 -8
  45. jettask/task/task_center/__init__.py +9 -0
  46. jettask/task/task_executor.py +318 -0
  47. jettask/task/task_registry.py +291 -0
  48. jettask/test_connection_monitor.py +73 -0
  49. jettask/utils/__init__.py +31 -1
  50. jettask/{monitor/run_backlog_collector.py → utils/backlog_collector.py} +1 -1
  51. jettask/utils/db_connector.py +1629 -0
  52. jettask/{db_init.py → utils/db_init.py} +1 -1
  53. jettask/utils/rate_limit/__init__.py +30 -0
  54. jettask/utils/rate_limit/concurrency_limiter.py +665 -0
  55. jettask/utils/rate_limit/config.py +145 -0
  56. jettask/utils/rate_limit/limiter.py +41 -0
  57. jettask/utils/rate_limit/manager.py +269 -0
  58. jettask/utils/rate_limit/qps_limiter.py +154 -0
  59. jettask/utils/rate_limit/task_limiter.py +384 -0
  60. jettask/utils/serializer.py +3 -0
  61. jettask/{monitor/stream_backlog_monitor.py → utils/stream_backlog.py} +14 -6
  62. jettask/utils/time_sync.py +173 -0
  63. jettask/webui/__init__.py +27 -0
  64. jettask/{api/v1 → webui/api}/alerts.py +1 -1
  65. jettask/{api/v1 → webui/api}/analytics.py +2 -2
  66. jettask/{api/v1 → webui/api}/namespaces.py +1 -1
  67. jettask/{api/v1 → webui/api}/overview.py +1 -1
  68. jettask/{api/v1 → webui/api}/queues.py +3 -3
  69. jettask/{api/v1 → webui/api}/scheduled.py +1 -1
  70. jettask/{api/v1 → webui/api}/settings.py +1 -1
  71. jettask/{api.py → webui/app.py} +253 -145
  72. jettask/webui/namespace_manager/__init__.py +10 -0
  73. jettask/{multi_namespace_consumer.py → webui/namespace_manager/multi.py} +69 -22
  74. jettask/{unified_consumer_manager.py → webui/namespace_manager/unified.py} +1 -1
  75. jettask/{run.py → webui/run.py} +2 -2
  76. jettask/{services → webui/services}/__init__.py +1 -3
  77. jettask/{services → webui/services}/overview_service.py +34 -16
  78. jettask/{services → webui/services}/queue_service.py +1 -1
  79. jettask/{backend → webui/services}/queue_stats_v2.py +1 -1
  80. jettask/{services → webui/services}/settings_service.py +1 -1
  81. jettask/worker/__init__.py +53 -0
  82. jettask/worker/lifecycle.py +1507 -0
  83. jettask/worker/manager.py +583 -0
  84. jettask/{core/offline_worker_recovery.py → worker/recovery.py} +268 -175
  85. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/METADATA +2 -71
  86. jettask-0.2.20.dist-info/RECORD +145 -0
  87. jettask/__main__.py +0 -140
  88. jettask/api/__init__.py +0 -103
  89. jettask/backend/__init__.py +0 -1
  90. jettask/backend/api/__init__.py +0 -3
  91. jettask/backend/api/v1/__init__.py +0 -17
  92. jettask/backend/api/v1/monitoring.py +0 -431
  93. jettask/backend/api/v1/namespaces.py +0 -504
  94. jettask/backend/api/v1/queues.py +0 -342
  95. jettask/backend/api/v1/tasks.py +0 -367
  96. jettask/backend/core/__init__.py +0 -3
  97. jettask/backend/core/cache.py +0 -221
  98. jettask/backend/core/database.py +0 -200
  99. jettask/backend/core/exceptions.py +0 -102
  100. jettask/backend/dependencies.py +0 -261
  101. jettask/backend/init_meta_db.py +0 -158
  102. jettask/backend/main.py +0 -1426
  103. jettask/backend/main_unified.py +0 -78
  104. jettask/backend/main_v2.py +0 -394
  105. jettask/backend/models/__init__.py +0 -3
  106. jettask/backend/models/requests.py +0 -236
  107. jettask/backend/models/responses.py +0 -230
  108. jettask/backend/namespace_api_old.py +0 -267
  109. jettask/backend/services/__init__.py +0 -3
  110. jettask/backend/start.py +0 -42
  111. jettask/backend/unified_api_router.py +0 -1541
  112. jettask/cleanup_deprecated_tables.sql +0 -16
  113. jettask/core/consumer_manager.py +0 -1695
  114. jettask/core/delay_scanner.py +0 -256
  115. jettask/core/event_pool.py +0 -1700
  116. jettask/core/heartbeat_process.py +0 -222
  117. jettask/core/task_batch.py +0 -153
  118. jettask/core/worker_scanner.py +0 -271
  119. jettask/executors/__init__.py +0 -5
  120. jettask/executors/asyncio.py +0 -876
  121. jettask/executors/base.py +0 -30
  122. jettask/executors/common.py +0 -148
  123. jettask/executors/multi_asyncio.py +0 -309
  124. jettask/gradio_app.py +0 -570
  125. jettask/integrated_gradio_app.py +0 -1088
  126. jettask/main.py +0 -0
  127. jettask/monitoring/__init__.py +0 -3
  128. jettask/pg_consumer.py +0 -1896
  129. jettask/run_monitor.py +0 -22
  130. jettask/run_webui.py +0 -148
  131. jettask/scheduler/multi_namespace_scheduler.py +0 -294
  132. jettask/scheduler/unified_manager.py +0 -450
  133. jettask/task_center_client.py +0 -150
  134. jettask/utils/serializer_optimized.py +0 -33
  135. jettask/webui_exceptions.py +0 -67
  136. jettask-0.2.18.dist-info/RECORD +0 -150
  137. /jettask/{constants.py → config/constants.py} +0 -0
  138. /jettask/{backend/config.py → config/task_center.py} +0 -0
  139. /jettask/{pg_consumer → messaging/pg_consumer}/pg_consumer_v2.py +0 -0
  140. /jettask/{pg_consumer → messaging/pg_consumer}/sql/add_execution_time_field.sql +0 -0
  141. /jettask/{pg_consumer → messaging/pg_consumer}/sql/create_new_tables.sql +0 -0
  142. /jettask/{pg_consumer → messaging/pg_consumer}/sql/create_tables_v3.sql +0 -0
  143. /jettask/{pg_consumer → messaging/pg_consumer}/sql/migrate_to_new_structure.sql +0 -0
  144. /jettask/{pg_consumer → messaging/pg_consumer}/sql/modify_time_fields.sql +0 -0
  145. /jettask/{pg_consumer → messaging/pg_consumer}/sql_utils.py +0 -0
  146. /jettask/{models.py → persistence/models.py} +0 -0
  147. /jettask/scheduler/{manager.py → task_crud.py} +0 -0
  148. /jettask/{schema.sql → schemas/schema.sql} +0 -0
  149. /jettask/{task_center.py → task/task_center/client.py} +0 -0
  150. /jettask/{monitoring → utils}/file_watcher.py +0 -0
  151. /jettask/{services/redis_monitor_service.py → utils/redis_monitor.py} +0 -0
  152. /jettask/{api/v1 → webui/api}/__init__.py +0 -0
  153. /jettask/{webui_config.py → webui/config.py} +0 -0
  154. /jettask/{webui_models → webui/models}/__init__.py +0 -0
  155. /jettask/{webui_models → webui/models}/namespace.py +0 -0
  156. /jettask/{services → webui/services}/alert_service.py +0 -0
  157. /jettask/{services → webui/services}/analytics_service.py +0 -0
  158. /jettask/{services → webui/services}/scheduled_task_service.py +0 -0
  159. /jettask/{services → webui/services}/task_service.py +0 -0
  160. /jettask/{webui_sql → webui/sql}/batch_upsert_functions.sql +0 -0
  161. /jettask/{webui_sql → webui/sql}/verify_database.sql +0 -0
  162. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/WHEEL +0 -0
  163. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/entry_points.txt +0 -0
  164. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/licenses/LICENSE +0 -0
  165. {jettask-0.2.18.dist-info → jettask-0.2.20.dist-info}/top_level.txt +0 -0
@@ -7,7 +7,7 @@ from typing import Optional
7
7
  import logging
8
8
 
9
9
  from jettask.schemas import ScheduledTaskRequest
10
- from jettask.services.scheduled_task_service import ScheduledTaskService
10
+ from jettask.webui.services.scheduled_task_service import ScheduledTaskService
11
11
 
12
12
  router = APIRouter(prefix="/scheduled", tags=["scheduled"])
13
13
  logger = logging.getLogger(__name__)
@@ -6,7 +6,7 @@ from fastapi import APIRouter, HTTPException
6
6
  import logging
7
7
  import traceback
8
8
 
9
- from jettask.services.settings_service import SettingsService
9
+ from jettask.webui.services.settings_service import SettingsService
10
10
 
11
11
  logger = logging.getLogger(__name__)
12
12
 
@@ -8,6 +8,7 @@ from contextlib import asynccontextmanager
8
8
  from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Query
9
9
  from fastapi.responses import HTMLResponse
10
10
  from fastapi.staticfiles import StaticFiles
11
+ from fastapi.middleware.cors import CORSMiddleware
11
12
  from starlette.websockets import WebSocketState
12
13
  from redis import asyncio as aioredis
13
14
  import uvicorn
@@ -17,9 +18,9 @@ from sqlalchemy.orm import sessionmaker
17
18
  from sqlalchemy import select, func, and_, or_, text
18
19
  from sqlalchemy.dialects import postgresql
19
20
 
20
- from jettask.pg_consumer import PostgreSQLConsumer
21
- from jettask.webui_config import PostgreSQLConfig, RedisConfig
22
- from jettask.models import Base, Task, QueueStats, Worker
21
+ from jettask.persistence import PostgreSQLConsumer
22
+ from jettask.webui.config import PostgreSQLConfig, RedisConfig
23
+ from jettask.persistence.models import Base, Task
23
24
 
24
25
  logger = logging.getLogger(__name__)
25
26
 
@@ -105,6 +106,7 @@ class RedisMonitor:
105
106
  self.redis_url = redis_url
106
107
  self.redis_prefix = redis_prefix
107
108
  self.redis: Optional[aioredis.Redis] = None
109
+ self.worker_state_manager = None # 延迟初始化
108
110
  self.scanner_task: Optional[asyncio.Task] = None
109
111
  self.scanner_interval = 5 # 5秒扫描一次
110
112
  self.default_heartbeat_timeout = 30 # 默认30秒心跳超时
@@ -117,19 +119,26 @@ class RedisMonitor:
117
119
  self._scanner_running = False # 标记扫描器是否正在运行
118
120
 
119
121
  async def connect(self):
120
- # 使用项目中的连接池模式,优化连接参数
121
- pool = aioredis.ConnectionPool.from_url(
122
+ # 使用统一的连接池管理
123
+ from jettask.utils.db_connector import get_async_redis_pool
124
+
125
+ pool = get_async_redis_pool(
122
126
  self.redis_url,
123
127
  decode_responses=True,
124
- max_connections=100, # 增加最大连接数
125
- retry_on_timeout=True,
126
- retry_on_error=[ConnectionError, TimeoutError],
128
+ max_connections=100,
129
+ socket_connect_timeout=5,
130
+ socket_timeout=10,
127
131
  socket_keepalive=True,
128
- socket_connect_timeout=5, # 减少连接超时时间
129
- socket_timeout=10, # 设置socket超时
130
132
  health_check_interval=30
131
133
  )
132
134
  self.redis = aioredis.Redis(connection_pool=pool)
135
+
136
+ # 初始化 WorkerStateManager
137
+ from jettask.worker.lifecycle import WorkerStateManager
138
+ self.worker_state_manager = WorkerStateManager(
139
+ redis_client=self.redis,
140
+ redis_prefix=self.redis_prefix
141
+ )
133
142
 
134
143
  async def close(self):
135
144
  # 停止扫描器任务
@@ -387,18 +396,19 @@ class RedisMonitor:
387
396
  current_time = datetime.now(timezone.utc).timestamp()
388
397
 
389
398
  # 直接扫描所有WORKER键(排除HISTORY相关的键)
390
- pattern = f"{self.redis_prefix}:WORKER:*"
391
- cursor = 0
392
- worker_keys = []
399
+ # 使用 RegistryManager 替代 scan
400
+ from jettask.worker.manager import WorkerState as WorkerRegistry
401
+ from jettask.messaging.registry import QueueRegistry
402
+ worker_registry = WorkerRegistry(
403
+ queue_registry = QueueRegistry(
404
+ redis_client=None,
405
+ async_redis_client=self.redis,
406
+ redis_prefix=self.redis_prefix
407
+ )
393
408
 
394
- while True:
395
- cursor, keys = await self.redis.scan(cursor, match=pattern, count=100)
396
- # 过滤掉HISTORY相关的键
397
- for key in keys:
398
- if ':HISTORY:' not in key:
399
- worker_keys.append(key)
400
- if cursor == 0:
401
- break
409
+ # 获取所有 worker ID
410
+ worker_ids = await worker_registry.get_all_workers()
411
+ worker_keys = [f"{self.redis_prefix}:WORKER:{wid}" for wid in worker_ids]
402
412
 
403
413
  # 批量获取所有worker数据
404
414
  if worker_keys:
@@ -454,18 +464,18 @@ class RedisMonitor:
454
464
  """获取队列的worker汇总统计信息"""
455
465
  try:
456
466
  # 直接扫描所有WORKER键并过滤(排除HISTORY相关的键)
457
- pattern = f"{self.redis_prefix}:WORKER:*"
458
- cursor = 0
459
- worker_keys = []
460
-
461
- while True:
462
- cursor, keys = await self.redis.scan(cursor, match=pattern, count=100)
463
- # 过滤掉HISTORY相关的键
464
- for key in keys:
465
- if ':HISTORY:' not in key:
466
- worker_keys.append(key)
467
- if cursor == 0:
468
- break
467
+ # 使用 RegistryManager 替代 scan
468
+ from jettask.worker.manager import WorkerState as WorkerRegistry
469
+ from jettask.messaging.registry import QueueRegistry
470
+ registry = RegistryManager(
471
+ redis_client=None,
472
+ async_redis_client=self.redis,
473
+ redis_prefix=self.redis_prefix
474
+ )
475
+
476
+ # 获取所有 worker ID
477
+ worker_ids = await worker_registry.get_all_workers()
478
+ worker_keys = [f"{self.redis_prefix}:WORKER:{wid}" for wid in worker_ids]
469
479
 
470
480
  if not worker_keys:
471
481
  return {
@@ -619,18 +629,18 @@ class RedisMonitor:
619
629
  """获取队列的worker汇总统计信息(快速版,不包含历史)"""
620
630
  try:
621
631
  # 直接扫描所有WORKER键(排除HISTORY相关的键)
622
- pattern = f"{self.redis_prefix}:WORKER:*"
623
- cursor = 0
624
- worker_keys = []
625
-
626
- while True:
627
- cursor, keys = await self.redis.scan(cursor, match=pattern, count=100)
628
- # 过滤掉HISTORY相关的键
629
- for key in keys:
630
- if ':HISTORY:' not in key:
631
- worker_keys.append(key)
632
- if cursor == 0:
633
- break
632
+ # 使用 RegistryManager 替代 scan
633
+ from jettask.worker.manager import WorkerState as WorkerRegistry
634
+ from jettask.messaging.registry import QueueRegistry
635
+ registry = RegistryManager(
636
+ redis_client=None,
637
+ async_redis_client=self.redis,
638
+ redis_prefix=self.redis_prefix
639
+ )
640
+
641
+ # 获取所有 worker ID
642
+ worker_ids = await worker_registry.get_all_workers()
643
+ worker_keys = [f"{self.redis_prefix}:WORKER:{wid}" for wid in worker_ids]
634
644
 
635
645
  if not worker_keys:
636
646
  return {
@@ -748,16 +758,18 @@ class RedisMonitor:
748
758
  # 扫描所有WORKER键(排除HISTORY相关的键)
749
759
  pattern = f"{self.redis_prefix}:WORKER:*"
750
760
  cursor = 0
751
- worker_keys = []
752
-
753
- while True:
754
- cursor, keys = await self.redis.scan(cursor, match=pattern, count=100)
755
- # 过滤掉HISTORY相关的键(虽然已经不再创建,但以防万一)
756
- for key in keys:
757
- if ':HISTORY:' not in key:
758
- worker_keys.append(key)
759
- if cursor == 0:
760
- break
761
+ # 使用 RegistryManager 替代 scan
762
+ from jettask.worker.manager import WorkerState as WorkerRegistry
763
+ from jettask.messaging.registry import QueueRegistry
764
+ registry = RegistryManager(
765
+ redis_client=None,
766
+ async_redis_client=self.redis,
767
+ redis_prefix=self.redis_prefix
768
+ )
769
+
770
+ # 获取所有 worker ID
771
+ worker_ids = await worker_registry.get_all_workers()
772
+ worker_keys = [f"{self.redis_prefix}:WORKER:{wid}" for wid in worker_ids]
761
773
 
762
774
  if not worker_keys:
763
775
  return []
@@ -1028,27 +1040,17 @@ class RedisMonitor:
1028
1040
  pattern = f"{self.redis_prefix}:QUEUE:*"
1029
1041
  cursor = 0
1030
1042
 
1031
- # 使用pipeline批量检查类型
1032
- while True:
1033
- cursor, keys = await self.redis.scan(cursor, match=pattern, count=500) # 增加扫描批次大小
1034
-
1035
- if keys:
1036
- # 批量检查键类型
1037
- pipe = self.redis.pipeline()
1038
- for key in keys:
1039
- pipe.type(key)
1040
-
1041
- types = await pipe.execute()
1042
-
1043
- for i, key in enumerate(keys):
1044
- if types[i] == 'stream':
1045
- # 提取队列名(去掉前缀)
1046
- if key.startswith(f"{self.redis_prefix}:QUEUE:"):
1047
- queue_name = key[len(f"{self.redis_prefix}:QUEUE:"):]
1048
- queues.add(queue_name)
1049
-
1050
- if cursor == 0:
1051
- break
1043
+ # 使用 RegistryManager 替代 scan
1044
+ from jettask.worker.manager import WorkerState as WorkerRegistry
1045
+ from jettask.messaging.registry import QueueRegistry
1046
+ registry = RegistryManager(
1047
+ redis_client=None,
1048
+ async_redis_client=self.redis,
1049
+ redis_prefix=self.redis_prefix
1050
+ )
1051
+
1052
+ # 获取所有队列
1053
+ queues = await queue_registry.get_all_queues()
1052
1054
 
1053
1055
  # 返回排序后的队列列表并更新缓存
1054
1056
  result = sorted(list(queues))
@@ -1165,51 +1167,88 @@ class RedisMonitor:
1165
1167
 
1166
1168
  while self._scanner_running:
1167
1169
  try:
1168
- # 扫描所有worker
1169
- pattern = f"{self.redis_prefix}:WORKER:*"
1170
- cursor = 0
1171
- worker_keys = []
1170
+ # 使用 RegistryManager 获取所有 worker,避免 SCAN
1171
+ from jettask.worker.manager import WorkerState as WorkerRegistry
1172
+ from jettask.messaging.registry import QueueRegistry
1173
+ registry = RegistryManager(
1174
+ redis_client=None,
1175
+ async_redis_client=self.redis,
1176
+ redis_prefix=self.redis_prefix
1177
+ )
1172
1178
 
1173
- while True:
1174
- cursor, keys = await self.redis.scan(cursor, match=pattern, count=100)
1175
- # 过滤掉HISTORY相关的键
1176
- for key in keys:
1177
- if ':HISTORY:' not in key:
1178
- worker_keys.append(key)
1179
- if cursor == 0:
1180
- break
1179
+ # 获取所有 worker ID
1180
+ worker_ids = await worker_registry.get_all_workers()
1181
+
1182
+ # 构建 worker
1183
+ worker_keys = []
1184
+ for worker_id in worker_ids:
1185
+ worker_key = f"{self.redis_prefix}:WORKER:{worker_id}"
1186
+ # 过滤掉HISTORY相关的键(虽然注册表中不应该有)
1187
+ if ':HISTORY:' not in worker_key:
1188
+ worker_keys.append(worker_key)
1181
1189
 
1182
1190
  if worker_keys:
1183
- # 批量获取worker数据
1184
- pipe = self.redis.pipeline()
1185
- for key in worker_keys:
1186
- pipe.hgetall(key)
1187
- all_workers_data = await pipe.execute()
1188
-
1191
+ # 通过 WorkerStateManager 批量获取 worker 数据
1189
1192
  current_time = time.time()
1190
-
1191
- # 检查每个worker的心跳
1192
- for i, worker_data in enumerate(all_workers_data):
1193
- if not worker_data:
1194
- continue
1195
-
1196
- try:
1197
- # 获取心跳相关信息
1198
- last_heartbeat = float(worker_data.get('last_heartbeat', 0))
1199
- is_alive = worker_data.get('is_alive', 'true').lower() == 'true'
1200
- heartbeat_timeout = float(worker_data.get('heartbeat_timeout', self.default_heartbeat_timeout))
1201
- consumer_id = worker_data.get('consumer_id', '')
1202
-
1203
- # 检查是否超时
1204
- if is_alive and (current_time - last_heartbeat) > heartbeat_timeout:
1205
- logger.info(f"Worker {consumer_id} 心跳超时,标记为离线")
1206
-
1207
- # 更新worker状态为离线
1208
- worker_key = worker_keys[i]
1209
- await self.redis.hset(worker_key, 'is_alive', 'false')
1210
-
1211
- except Exception as e:
1212
- logger.error(f"检查worker心跳时出错: {e}")
1193
+
1194
+ if self.worker_state_manager:
1195
+ # 使用 WorkerStateManager 批量获取所有 worker 信息
1196
+ all_workers_info = await self.worker_state_manager.get_all_workers_info(only_alive=False)
1197
+
1198
+ # 检查每个worker的心跳
1199
+ for worker_id in worker_ids:
1200
+ worker_data = all_workers_info.get(worker_id)
1201
+ if not worker_data:
1202
+ continue
1203
+
1204
+ try:
1205
+ # 获取心跳相关信息
1206
+ last_heartbeat = float(worker_data.get('last_heartbeat', 0))
1207
+ is_alive = worker_data.get('is_alive') == 'true'
1208
+ heartbeat_timeout = float(worker_data.get('heartbeat_timeout', self.default_heartbeat_timeout))
1209
+ consumer_id = worker_data.get('consumer_id', '')
1210
+
1211
+ # 检查是否超时
1212
+ if is_alive and (current_time - last_heartbeat) > heartbeat_timeout:
1213
+ logger.info(f"Worker {consumer_id} 心跳超时,标记为离线")
1214
+
1215
+ # 通过 WorkerStateManager 更新worker状态为离线
1216
+ await self.worker_state_manager.set_worker_offline(
1217
+ worker_id=worker_id,
1218
+ reason="heartbeat_timeout"
1219
+ )
1220
+
1221
+ except Exception as e:
1222
+ logger.error(f"检查worker心跳时出错: {e}")
1223
+ else:
1224
+ # 降级处理:直接使用 Redis
1225
+ pipe = self.redis.pipeline()
1226
+ for key in worker_keys:
1227
+ pipe.hgetall(key)
1228
+ all_workers_data = await pipe.execute()
1229
+
1230
+ # 检查每个worker的心跳
1231
+ for i, worker_data in enumerate(all_workers_data):
1232
+ if not worker_data:
1233
+ continue
1234
+
1235
+ try:
1236
+ # 获取心跳相关信息
1237
+ last_heartbeat = float(worker_data.get('last_heartbeat', 0))
1238
+ is_alive = worker_data.get('is_alive', 'true').lower() == 'true'
1239
+ heartbeat_timeout = float(worker_data.get('heartbeat_timeout', self.default_heartbeat_timeout))
1240
+ consumer_id = worker_data.get('consumer_id', '')
1241
+
1242
+ # 检查是否超时
1243
+ if is_alive and (current_time - last_heartbeat) > heartbeat_timeout:
1244
+ logger.info(f"Worker {consumer_id} 心跳超时,标记为离线")
1245
+
1246
+ # 更新worker状态为离线
1247
+ worker_key = worker_keys[i]
1248
+ await self.redis.hset(worker_key, 'is_alive', 'false')
1249
+
1250
+ except Exception as e:
1251
+ logger.error(f"检查worker心跳时出错: {e}")
1213
1252
 
1214
1253
  # 等待下一次扫描
1215
1254
  await asyncio.sleep(self.scanner_interval)
@@ -1247,40 +1286,109 @@ pg_consumer = None
1247
1286
  @asynccontextmanager
1248
1287
  async def lifespan(app: FastAPI):
1249
1288
  global pg_consumer
1250
-
1289
+
1251
1290
  # Startup
1252
- await monitor.connect()
1253
- # 启动心跳扫描器
1254
- await monitor.start_heartbeat_scanner()
1255
-
1256
- # 启动PostgreSQL消费者(如果配置了且显式启用)
1257
- if hasattr(app.state, 'pg_config') and getattr(app.state, 'enable_consumer', False):
1258
- redis_config = RedisConfig.from_env()
1259
- pg_consumer = PostgreSQLConsumer(app.state.pg_config, redis_config)
1260
- await pg_consumer.start()
1261
- logging.info("PostgreSQL consumer started")
1262
- else:
1263
- logging.info("PostgreSQL consumer disabled (use --with-consumer to enable)")
1264
-
1291
+ try:
1292
+ import os
1293
+ # 检查是否使用Nacos配置
1294
+ use_nacos = os.getenv('USE_NACOS', 'false').lower() == 'true'
1295
+
1296
+ # 初始化数据库管理器
1297
+ from jettask.persistence.db_manager import init_db_manager
1298
+ await init_db_manager(use_nacos=use_nacos)
1299
+
1300
+ # 创建数据访问实例
1301
+ from jettask.persistence.base import JetTaskDataAccess
1302
+ from jettask.persistence.namespace import get_namespace_data_access
1303
+ from jettask.config.task_center import task_center_config
1304
+
1305
+ data_access = JetTaskDataAccess()
1306
+ namespace_data_access = get_namespace_data_access()
1307
+
1308
+ # 存储在app.state中供路由使用
1309
+ app.state.data_access = data_access
1310
+ app.state.namespace_data_access = namespace_data_access
1311
+
1312
+ # 初始化JetTask数据访问
1313
+ await data_access.initialize()
1314
+
1315
+ # 记录任务中心配置
1316
+ logger.info("=" * 60)
1317
+ logger.info("任务中心配置:")
1318
+ logger.info(f" 配置模式: {'Nacos' if use_nacos else '环境变量'}")
1319
+ logger.info(f" 元数据库: {task_center_config.meta_db_host}:{task_center_config.meta_db_port}/{task_center_config.meta_db_name}")
1320
+ logger.info(f" API服务: {task_center_config.api_host}:{task_center_config.api_port}")
1321
+ logger.info(f" 基础URL: {task_center_config.base_url}")
1322
+ logger.info("=" * 60)
1323
+
1324
+ # 连接 monitor
1325
+ await monitor.connect()
1326
+ # 启动心跳扫描器
1327
+ await monitor.start_heartbeat_scanner()
1328
+
1329
+ # 启动PostgreSQL消费者(如果配置了且显式启用)
1330
+ if hasattr(app.state, 'pg_config') and getattr(app.state, 'enable_consumer', False):
1331
+ redis_config = RedisConfig.from_env()
1332
+ pg_consumer = PostgreSQLConsumer(app.state.pg_config, redis_config)
1333
+ await pg_consumer.start()
1334
+ logging.info("PostgreSQL consumer started")
1335
+ else:
1336
+ logging.info("PostgreSQL consumer disabled (use --with-consumer to enable)")
1337
+
1338
+ logger.info("JetTask WebUI 启动成功")
1339
+ except Exception as e:
1340
+ logger.error(f"启动失败: {e}")
1341
+ import traceback
1342
+ traceback.print_exc()
1343
+ raise
1344
+
1265
1345
  yield
1266
-
1346
+
1267
1347
  # Shutdown
1268
- await monitor.stop_heartbeat_scanner()
1269
- await monitor.close()
1270
-
1271
- # 停止PostgreSQL消费者
1272
- if pg_consumer:
1273
- await pg_consumer.stop()
1274
-
1275
- # 关闭SQLAlchemy引擎
1276
- global async_engine
1277
- if async_engine:
1278
- await async_engine.dispose()
1279
- async_engine = None
1280
- logger.info("SQLAlchemy async engine closed")
1348
+ try:
1349
+ # 停止心跳扫描器
1350
+ await monitor.stop_heartbeat_scanner()
1351
+ await monitor.close()
1352
+
1353
+ # 停止PostgreSQL消费者
1354
+ if pg_consumer:
1355
+ await pg_consumer.stop()
1356
+
1357
+ # 关闭数据访问
1358
+ if hasattr(app.state, 'data_access'):
1359
+ await app.state.data_access.close()
1360
+
1361
+ # 关闭数据库管理器
1362
+ from jettask.persistence.db_manager import close_db_manager
1363
+ await close_db_manager()
1364
+
1365
+ # 关闭SQLAlchemy引擎
1366
+ global async_engine
1367
+ if async_engine:
1368
+ await async_engine.dispose()
1369
+ async_engine = None
1370
+
1371
+ logger.info("JetTask WebUI 关闭完成")
1372
+ except Exception as e:
1373
+ logger.error(f"关闭时出错: {e}")
1374
+ import traceback
1375
+ traceback.print_exc()
1281
1376
 
1282
1377
  app = FastAPI(title="Jettask Monitor", lifespan=lifespan)
1283
1378
 
1379
+ # 配置 CORS
1380
+ app.add_middleware(
1381
+ CORSMiddleware,
1382
+ allow_origins=["*"], # 允许所有来源(生产环境应该指定具体域名)
1383
+ allow_credentials=True,
1384
+ allow_methods=["*"], # 允许所有 HTTP 方法
1385
+ allow_headers=["*"], # 允许所有请求头
1386
+ )
1387
+
1388
+ # 注册 API 路由
1389
+ from jettask.webui.api import api_router
1390
+ app.include_router(api_router)
1391
+
1284
1392
 
1285
1393
  @app.get("/api/queue/{queue_name}/tasks")
1286
1394
  async def get_queue_tasks(
@@ -0,0 +1,10 @@
1
+ """
2
+ 命名空间管理器模块
3
+
4
+ 提供单命名空间和多命名空间的消费者管理
5
+ """
6
+
7
+ from .multi import NamespaceConsumerProcess
8
+ from .unified import UnifiedConsumerManager
9
+
10
+ __all__ = ['NamespaceConsumerProcess', 'UnifiedConsumerManager']