jettask 0.2.5__tar.gz → 0.2.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- jettask-0.2.7/MANIFEST.in +29 -0
- {jettask-0.2.5 → jettask-0.2.7}/PKG-INFO +80 -10
- jettask-0.2.7/README.md +67 -0
- jettask-0.2.7/jettask/monitor/run_backlog_collector.py +96 -0
- jettask-0.2.7/jettask/monitor/stream_backlog_monitor.py +362 -0
- jettask-0.2.7/jettask/pg_consumer/pg_consumer_v2.py +403 -0
- jettask-0.2.7/jettask/pg_consumer/sql_utils.py +182 -0
- jettask-0.2.7/jettask/scheduler/__init__.py +17 -0
- jettask-0.2.7/jettask/scheduler/add_execution_count.sql +11 -0
- jettask-0.2.7/jettask/scheduler/add_priority_field.sql +26 -0
- jettask-0.2.7/jettask/scheduler/add_scheduler_id.sql +25 -0
- jettask-0.2.7/jettask/scheduler/add_scheduler_id_index.sql +10 -0
- jettask-0.2.7/jettask/scheduler/loader.py +249 -0
- jettask-0.2.7/jettask/scheduler/make_scheduler_id_required.sql +28 -0
- jettask-0.2.7/jettask/scheduler/manager.py +696 -0
- jettask-0.2.7/jettask/scheduler/migrate_interval_seconds.sql +9 -0
- jettask-0.2.7/jettask/scheduler/models.py +200 -0
- jettask-0.2.7/jettask/scheduler/multi_namespace_scheduler.py +294 -0
- jettask-0.2.7/jettask/scheduler/performance_optimization.sql +45 -0
- jettask-0.2.7/jettask/scheduler/run_scheduler.py +186 -0
- jettask-0.2.7/jettask/scheduler/scheduler.py +715 -0
- jettask-0.2.7/jettask/scheduler/schema.sql +84 -0
- jettask-0.2.7/jettask/scheduler/unified_manager.py +450 -0
- jettask-0.2.7/jettask/scheduler/unified_scheduler_manager.py +280 -0
- jettask-0.2.7/jettask/webui/backend/api/__init__.py +3 -0
- jettask-0.2.7/jettask/webui/backend/api/v1/__init__.py +17 -0
- jettask-0.2.7/jettask/webui/backend/api/v1/monitoring.py +431 -0
- jettask-0.2.7/jettask/webui/backend/api/v1/namespaces.py +504 -0
- jettask-0.2.7/jettask/webui/backend/api/v1/queues.py +342 -0
- jettask-0.2.7/jettask/webui/backend/api/v1/tasks.py +367 -0
- jettask-0.2.7/jettask/webui/backend/core/__init__.py +3 -0
- jettask-0.2.7/jettask/webui/backend/core/cache.py +221 -0
- jettask-0.2.7/jettask/webui/backend/core/database.py +200 -0
- jettask-0.2.7/jettask/webui/backend/core/exceptions.py +102 -0
- jettask-0.2.7/jettask/webui/backend/models/__init__.py +3 -0
- jettask-0.2.7/jettask/webui/backend/models/requests.py +236 -0
- jettask-0.2.7/jettask/webui/backend/models/responses.py +230 -0
- jettask-0.2.7/jettask/webui/backend/services/__init__.py +3 -0
- jettask-0.2.7/jettask/webui/frontend/index.html +13 -0
- jettask-0.2.7/jettask/webui/models/__init__.py +3 -0
- jettask-0.2.7/jettask/webui/models/namespace.py +63 -0
- jettask-0.2.7/jettask/webui/sql/batch_upsert_functions.sql +178 -0
- jettask-0.2.7/jettask/webui/sql/init_database.sql +640 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask.egg-info/PKG-INFO +80 -10
- {jettask-0.2.5 → jettask-0.2.7}/jettask.egg-info/SOURCES.txt +42 -50
- {jettask-0.2.5 → jettask-0.2.7}/jettask.egg-info/requires.txt +10 -3
- {jettask-0.2.5 → jettask-0.2.7}/pyproject.toml +26 -6
- jettask-0.2.5/jettask/webui/frontend/package-lock.json +0 -4833
- jettask-0.2.5/jettask/webui/frontend/package.json +0 -30
- jettask-0.2.5/jettask/webui/frontend/src/App.css +0 -109
- jettask-0.2.5/jettask/webui/frontend/src/App.jsx +0 -66
- jettask-0.2.5/jettask/webui/frontend/src/components/NamespaceSelector.jsx +0 -166
- jettask-0.2.5/jettask/webui/frontend/src/components/QueueBacklogChart.jsx +0 -298
- jettask-0.2.5/jettask/webui/frontend/src/components/QueueBacklogTrend.jsx +0 -638
- jettask-0.2.5/jettask/webui/frontend/src/components/QueueDetailsTable.css +0 -65
- jettask-0.2.5/jettask/webui/frontend/src/components/QueueDetailsTable.jsx +0 -487
- jettask-0.2.5/jettask/webui/frontend/src/components/QueueDetailsTableV2.jsx +0 -465
- jettask-0.2.5/jettask/webui/frontend/src/components/ScheduledTaskFilter.jsx +0 -423
- jettask-0.2.5/jettask/webui/frontend/src/components/TaskFilter.jsx +0 -425
- jettask-0.2.5/jettask/webui/frontend/src/components/TimeRangeSelector.css +0 -21
- jettask-0.2.5/jettask/webui/frontend/src/components/TimeRangeSelector.jsx +0 -160
- jettask-0.2.5/jettask/webui/frontend/src/components/charts/QueueChart.jsx +0 -111
- jettask-0.2.5/jettask/webui/frontend/src/components/charts/QueueTrendChart.jsx +0 -115
- jettask-0.2.5/jettask/webui/frontend/src/components/charts/WorkerChart.jsx +0 -40
- jettask-0.2.5/jettask/webui/frontend/src/components/common/StatsCard.jsx +0 -18
- jettask-0.2.5/jettask/webui/frontend/src/components/layout/AppLayout.css +0 -95
- jettask-0.2.5/jettask/webui/frontend/src/components/layout/AppLayout.jsx +0 -49
- jettask-0.2.5/jettask/webui/frontend/src/components/layout/Header.css +0 -106
- jettask-0.2.5/jettask/webui/frontend/src/components/layout/Header.jsx +0 -106
- jettask-0.2.5/jettask/webui/frontend/src/components/layout/SideMenu.css +0 -137
- jettask-0.2.5/jettask/webui/frontend/src/components/layout/SideMenu.jsx +0 -209
- jettask-0.2.5/jettask/webui/frontend/src/components/layout/TabsNav.css +0 -244
- jettask-0.2.5/jettask/webui/frontend/src/components/layout/TabsNav.jsx +0 -206
- jettask-0.2.5/jettask/webui/frontend/src/components/layout/UserInfo.css +0 -197
- jettask-0.2.5/jettask/webui/frontend/src/components/layout/UserInfo.jsx +0 -197
- jettask-0.2.5/jettask/webui/frontend/src/contexts/LoadingContext.jsx +0 -27
- jettask-0.2.5/jettask/webui/frontend/src/contexts/NamespaceContext.jsx +0 -72
- jettask-0.2.5/jettask/webui/frontend/src/contexts/TabsContext.backup.jsx +0 -245
- jettask-0.2.5/jettask/webui/frontend/src/index.css +0 -114
- jettask-0.2.5/jettask/webui/frontend/src/main.jsx +0 -20
- jettask-0.2.5/jettask/webui/frontend/src/pages/Alerts.jsx +0 -684
- jettask-0.2.5/jettask/webui/frontend/src/pages/Dashboard/index.css +0 -35
- jettask-0.2.5/jettask/webui/frontend/src/pages/Dashboard/index.jsx +0 -281
- jettask-0.2.5/jettask/webui/frontend/src/pages/Dashboard.jsx +0 -1330
- jettask-0.2.5/jettask/webui/frontend/src/pages/QueueDetail.jsx +0 -1117
- jettask-0.2.5/jettask/webui/frontend/src/pages/QueueMonitor.jsx +0 -527
- jettask-0.2.5/jettask/webui/frontend/src/pages/Queues.jsx +0 -12
- jettask-0.2.5/jettask/webui/frontend/src/pages/ScheduledTasks.jsx +0 -809
- jettask-0.2.5/jettask/webui/frontend/src/pages/Settings.jsx +0 -800
- jettask-0.2.5/jettask/webui/frontend/src/pages/Workers.jsx +0 -12
- jettask-0.2.5/jettask/webui/frontend/src/services/api.js +0 -114
- jettask-0.2.5/jettask/webui/frontend/src/services/queueTrend.js +0 -152
- jettask-0.2.5/jettask/webui/frontend/src/utils/suppressWarnings.js +0 -22
- jettask-0.2.5/jettask/webui/frontend/src/utils/userPreferences.js +0 -154
- jettask-0.2.5/jettask.egg-info/not-zip-safe +0 -1
- jettask-0.2.5/requirements.txt +0 -26
- jettask-0.2.5/setup.py +0 -82
- {jettask-0.2.5 → jettask-0.2.7}/LICENSE +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/__init__.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/config/__init__.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/config/performance.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/constants.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/__init__.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/app.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/app_importer.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/cli.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/consumer_manager.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/context.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/delay_scanner.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/enums.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/event_pool.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/heartbeat_process.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/message.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/offline_worker_recovery.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/retry.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/task.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/task_batch.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/unified_manager_base.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/core/worker_scanner.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/exceptions.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/executors/__init__.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/executors/asyncio.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/executors/base.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/executors/common.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/executors/multi_asyncio.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/monitoring/__init__.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/monitoring/file_watcher.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/router.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/utils/__init__.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/utils/error_handler.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/utils/exception_hook.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/utils/helpers.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/utils/logger.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/utils/serializer.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/utils/serializer_optimized.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/utils/task_logger.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/utils/traceback_filter.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/__init__.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/__main__.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/api.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/__init__.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/config.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/data_access.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/data_api.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/dependencies.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/init_meta_db.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/main.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/main_unified.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/main_v2.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/namespace_api.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/namespace_api_old.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/namespace_data_access.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/queue_backlog_api.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/queue_stats_v2.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/redis_monitor_api.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/start.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/backend/unified_api_router.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/cleanup_deprecated_tables.sql +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/config.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/db_init.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/gradio_app.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/integrated_gradio_app.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/models.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/multi_namespace_consumer.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/pg_consumer.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/run.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/run_monitor.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/run_webui.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/schema.sql +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/static/dist/assets/index-7129cfe1.css +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/static/dist/assets/index-8d1935cc.js +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/static/dist/index.html +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/static/index.html +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/static/queue.html +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/static/queues.html +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/static/workers.html +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/task_center.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/task_center_client.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask/webui/unified_consumer_manager.py +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask.egg-info/dependency_links.txt +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask.egg-info/entry_points.txt +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/jettask.egg-info/top_level.txt +0 -0
- {jettask-0.2.5 → jettask-0.2.7}/setup.cfg +0 -0
@@ -0,0 +1,29 @@
|
|
1
|
+
# 包含必要的文件
|
2
|
+
include README.md
|
3
|
+
include LICENSE
|
4
|
+
include pyproject.toml
|
5
|
+
|
6
|
+
# 包含包数据
|
7
|
+
recursive-include jettask *.py
|
8
|
+
recursive-include jettask/webui/static *
|
9
|
+
recursive-include jettask/webui/frontend/dist *
|
10
|
+
recursive-include jettask/webui *.sql
|
11
|
+
recursive-include jettask/webui *.html
|
12
|
+
recursive-include jettask/scheduler *.sql
|
13
|
+
|
14
|
+
# 排除不必要的文件
|
15
|
+
global-exclude __pycache__
|
16
|
+
global-exclude *.py[co]
|
17
|
+
global-exclude .git*
|
18
|
+
global-exclude .DS_Store
|
19
|
+
global-exclude *.swp
|
20
|
+
global-exclude *~
|
21
|
+
|
22
|
+
# 排除不必要的目录
|
23
|
+
prune tests
|
24
|
+
prune docs
|
25
|
+
prune examples
|
26
|
+
prune lib_reference
|
27
|
+
prune docker
|
28
|
+
prune jettask/webui/frontend/node_modules
|
29
|
+
prune jettask/webui/frontend/src
|
@@ -1,10 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: jettask
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.7
|
4
4
|
Summary: A high-performance distributed task queue system with web monitoring
|
5
|
-
Home-page: https://github.com/yourusername/jettask
|
6
|
-
Author: JetTask Team
|
7
5
|
Author-email: JetTask Team <support@jettask.io>
|
6
|
+
License-Expression: MIT
|
8
7
|
Project-URL: Homepage, https://github.com/yourusername/jettask
|
9
8
|
Project-URL: Bug Tracker, https://github.com/yourusername/jettask/issues
|
10
9
|
Project-URL: Documentation, https://jettask.readthedocs.io
|
@@ -12,7 +11,6 @@ Classifier: Development Status :: 4 - Beta
|
|
12
11
|
Classifier: Intended Audience :: Developers
|
13
12
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
14
13
|
Classifier: Topic :: System :: Distributed Computing
|
15
|
-
Classifier: License :: OSI Approved :: MIT License
|
16
14
|
Classifier: Programming Language :: Python :: 3
|
17
15
|
Classifier: Programming Language :: Python :: 3.8
|
18
16
|
Classifier: Programming Language :: Python :: 3.9
|
@@ -23,17 +21,24 @@ Requires-Python: >=3.8
|
|
23
21
|
Description-Content-Type: text/markdown
|
24
22
|
License-File: LICENSE
|
25
23
|
Requires-Dist: redis>=4.5.0
|
24
|
+
Requires-Dist: aioredis>=2.0.0
|
25
|
+
Requires-Dist: msgpack>=1.0.0
|
26
26
|
Requires-Dist: watchdog>=3.0.0
|
27
27
|
Requires-Dist: uvloop>=0.17.0
|
28
28
|
Requires-Dist: ujson>=5.6.0
|
29
29
|
Requires-Dist: fastapi>=0.100.0
|
30
|
-
Requires-Dist: uvicorn>=0.23.0
|
30
|
+
Requires-Dist: uvicorn[standard]>=0.23.0
|
31
31
|
Requires-Dist: websockets>=11.0
|
32
|
-
Requires-Dist: sqlalchemy[asyncio]>=2.0.0
|
33
|
-
Requires-Dist: psycopg[binary,pool]>=3.1.0
|
34
32
|
Requires-Dist: pydantic>=2.0.0
|
35
33
|
Requires-Dist: python-multipart>=0.0.6
|
34
|
+
Requires-Dist: aiohttp>=3.8.0
|
35
|
+
Requires-Dist: sqlalchemy[asyncio]>=2.0.0
|
36
|
+
Requires-Dist: psycopg[binary,pool]>=3.1.0
|
37
|
+
Requires-Dist: asyncpg>=0.28.0
|
38
|
+
Requires-Dist: croniter>=1.4.0
|
36
39
|
Requires-Dist: click>=8.1.0
|
40
|
+
Requires-Dist: python-dotenv>=1.0.0
|
41
|
+
Requires-Dist: psutil>=5.9.0
|
37
42
|
Provides-Extra: dev
|
38
43
|
Requires-Dist: pytest>=7.0; extra == "dev"
|
39
44
|
Requires-Dist: pytest-asyncio>=0.21.0; extra == "dev"
|
@@ -41,7 +46,72 @@ Requires-Dist: black>=23.0; extra == "dev"
|
|
41
46
|
Requires-Dist: flake8>=6.0; extra == "dev"
|
42
47
|
Requires-Dist: mypy>=1.4.0; extra == "dev"
|
43
48
|
Requires-Dist: coverage>=7.0; extra == "dev"
|
44
|
-
Dynamic: author
|
45
|
-
Dynamic: home-page
|
46
49
|
Dynamic: license-file
|
47
|
-
|
50
|
+
|
51
|
+
# JetTask
|
52
|
+
|
53
|
+
一个高性能的分布式任务队列系统,支持Web监控界面。
|
54
|
+
|
55
|
+
## 特性
|
56
|
+
|
57
|
+
- 🚀 高性能异步任务执行
|
58
|
+
- 📊 实时Web监控界面
|
59
|
+
- ⏰ 支持定时任务和延迟任务
|
60
|
+
- 🔄 任务重试和错误处理
|
61
|
+
- 🎯 多队列和优先级支持
|
62
|
+
- 🌍 多命名空间隔离
|
63
|
+
- 📈 任务统计和性能监控
|
64
|
+
- 🔧 简单易用的API
|
65
|
+
|
66
|
+
## 安装
|
67
|
+
|
68
|
+
```bash
|
69
|
+
pip install jettask
|
70
|
+
```
|
71
|
+
|
72
|
+
## 快速开始
|
73
|
+
|
74
|
+
### 1. 创建任务
|
75
|
+
|
76
|
+
```python
|
77
|
+
from jettask import JetTask
|
78
|
+
|
79
|
+
app = JetTask()
|
80
|
+
|
81
|
+
@app.task(queue="default")
|
82
|
+
async def hello_task(name):
|
83
|
+
return f"Hello, {name}!"
|
84
|
+
```
|
85
|
+
|
86
|
+
### 2. 启动Worker
|
87
|
+
|
88
|
+
```bash
|
89
|
+
jettask worker -a app:app --queues default
|
90
|
+
```
|
91
|
+
|
92
|
+
### 3. 发送任务
|
93
|
+
|
94
|
+
```python
|
95
|
+
result = await hello_task.send("World")
|
96
|
+
print(result) # Hello, World!
|
97
|
+
```
|
98
|
+
|
99
|
+
### 4. 启动Web监控界面
|
100
|
+
|
101
|
+
```bash
|
102
|
+
# 启动API服务
|
103
|
+
jettask api
|
104
|
+
|
105
|
+
# 启动前端界面
|
106
|
+
jettask frontend
|
107
|
+
```
|
108
|
+
|
109
|
+
然后访问 http://localhost:3000 查看监控界面。
|
110
|
+
|
111
|
+
## 文档
|
112
|
+
|
113
|
+
详细文档请参见 [docs/](docs/) 目录。
|
114
|
+
|
115
|
+
## 许可证
|
116
|
+
|
117
|
+
MIT License
|
jettask-0.2.7/README.md
ADDED
@@ -0,0 +1,67 @@
|
|
1
|
+
# JetTask
|
2
|
+
|
3
|
+
一个高性能的分布式任务队列系统,支持Web监控界面。
|
4
|
+
|
5
|
+
## 特性
|
6
|
+
|
7
|
+
- 🚀 高性能异步任务执行
|
8
|
+
- 📊 实时Web监控界面
|
9
|
+
- ⏰ 支持定时任务和延迟任务
|
10
|
+
- 🔄 任务重试和错误处理
|
11
|
+
- 🎯 多队列和优先级支持
|
12
|
+
- 🌍 多命名空间隔离
|
13
|
+
- 📈 任务统计和性能监控
|
14
|
+
- 🔧 简单易用的API
|
15
|
+
|
16
|
+
## 安装
|
17
|
+
|
18
|
+
```bash
|
19
|
+
pip install jettask
|
20
|
+
```
|
21
|
+
|
22
|
+
## 快速开始
|
23
|
+
|
24
|
+
### 1. 创建任务
|
25
|
+
|
26
|
+
```python
|
27
|
+
from jettask import JetTask
|
28
|
+
|
29
|
+
app = JetTask()
|
30
|
+
|
31
|
+
@app.task(queue="default")
|
32
|
+
async def hello_task(name):
|
33
|
+
return f"Hello, {name}!"
|
34
|
+
```
|
35
|
+
|
36
|
+
### 2. 启动Worker
|
37
|
+
|
38
|
+
```bash
|
39
|
+
jettask worker -a app:app --queues default
|
40
|
+
```
|
41
|
+
|
42
|
+
### 3. 发送任务
|
43
|
+
|
44
|
+
```python
|
45
|
+
result = await hello_task.send("World")
|
46
|
+
print(result) # Hello, World!
|
47
|
+
```
|
48
|
+
|
49
|
+
### 4. 启动Web监控界面
|
50
|
+
|
51
|
+
```bash
|
52
|
+
# 启动API服务
|
53
|
+
jettask api
|
54
|
+
|
55
|
+
# 启动前端界面
|
56
|
+
jettask frontend
|
57
|
+
```
|
58
|
+
|
59
|
+
然后访问 http://localhost:3000 查看监控界面。
|
60
|
+
|
61
|
+
## 文档
|
62
|
+
|
63
|
+
详细文档请参见 [docs/](docs/) 目录。
|
64
|
+
|
65
|
+
## 许可证
|
66
|
+
|
67
|
+
MIT License
|
@@ -0,0 +1,96 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""
|
3
|
+
Stream积压监控采集服务
|
4
|
+
可以作为独立服务运行,定期采集Redis Stream的积压情况
|
5
|
+
"""
|
6
|
+
|
7
|
+
import asyncio
|
8
|
+
import argparse
|
9
|
+
import logging
|
10
|
+
import signal
|
11
|
+
import sys
|
12
|
+
from stream_backlog_monitor import StreamBacklogMonitor
|
13
|
+
|
14
|
+
# 配置日志
|
15
|
+
logging.basicConfig(
|
16
|
+
level=logging.INFO,
|
17
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
18
|
+
)
|
19
|
+
logger = logging.getLogger(__name__)
|
20
|
+
|
21
|
+
|
22
|
+
async def main(args):
|
23
|
+
"""主函数"""
|
24
|
+
# 创建监控器实例
|
25
|
+
monitor = StreamBacklogMonitor(
|
26
|
+
redis_url=args.redis_url,
|
27
|
+
pg_url=args.pg_url,
|
28
|
+
redis_prefix=args.redis_prefix
|
29
|
+
)
|
30
|
+
|
31
|
+
# 设置信号处理
|
32
|
+
def signal_handler(sig, frame):
|
33
|
+
logger.info("Received stop signal, shutting down...")
|
34
|
+
sys.exit(0)
|
35
|
+
|
36
|
+
signal.signal(signal.SIGINT, signal_handler)
|
37
|
+
signal.signal(signal.SIGTERM, signal_handler)
|
38
|
+
|
39
|
+
# 运行采集器
|
40
|
+
logger.info(f"Starting backlog collector service...")
|
41
|
+
logger.info(f" Redis URL: {args.redis_url}")
|
42
|
+
logger.info(f" PostgreSQL URL: {args.pg_url}")
|
43
|
+
logger.info(f" Redis Prefix: {args.redis_prefix}")
|
44
|
+
logger.info(f" Collection Interval: {args.interval} seconds")
|
45
|
+
|
46
|
+
try:
|
47
|
+
await monitor.run_collector(interval=args.interval)
|
48
|
+
except KeyboardInterrupt:
|
49
|
+
logger.info("Collector stopped by user")
|
50
|
+
except Exception as e:
|
51
|
+
logger.error(f"Collector failed: {e}")
|
52
|
+
sys.exit(1)
|
53
|
+
|
54
|
+
|
55
|
+
if __name__ == "__main__":
|
56
|
+
parser = argparse.ArgumentParser(description="Redis Stream Backlog Monitor Collector")
|
57
|
+
|
58
|
+
parser.add_argument(
|
59
|
+
"--redis-url",
|
60
|
+
default="redis://localhost:6379/0",
|
61
|
+
help="Redis connection URL (default: redis://localhost:6379/0)"
|
62
|
+
)
|
63
|
+
|
64
|
+
parser.add_argument(
|
65
|
+
"--pg-url",
|
66
|
+
default="postgresql+asyncpg://jettask:123456@localhost:5432/jettask",
|
67
|
+
help="PostgreSQL connection URL"
|
68
|
+
)
|
69
|
+
|
70
|
+
parser.add_argument(
|
71
|
+
"--redis-prefix",
|
72
|
+
default="JETTASK",
|
73
|
+
help="Redis key prefix (default: JETTASK)"
|
74
|
+
)
|
75
|
+
|
76
|
+
parser.add_argument(
|
77
|
+
"--interval",
|
78
|
+
type=int,
|
79
|
+
default=60,
|
80
|
+
help="Collection interval in seconds (default: 60)"
|
81
|
+
)
|
82
|
+
|
83
|
+
parser.add_argument(
|
84
|
+
"--debug",
|
85
|
+
action="store_true",
|
86
|
+
help="Enable debug logging"
|
87
|
+
)
|
88
|
+
|
89
|
+
args = parser.parse_args()
|
90
|
+
|
91
|
+
# 设置日志级别
|
92
|
+
if args.debug:
|
93
|
+
logging.getLogger().setLevel(logging.DEBUG)
|
94
|
+
|
95
|
+
# 运行主函数
|
96
|
+
asyncio.run(main(args))
|
@@ -0,0 +1,362 @@
|
|
1
|
+
"""
|
2
|
+
Redis Stream积压监控模块
|
3
|
+
用于监控任务队列的积压情况
|
4
|
+
"""
|
5
|
+
|
6
|
+
import asyncio
|
7
|
+
import redis.asyncio as redis
|
8
|
+
from datetime import datetime, timezone
|
9
|
+
from typing import Dict, List, Optional, Tuple
|
10
|
+
import logging
|
11
|
+
from sqlalchemy import text
|
12
|
+
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
|
13
|
+
from sqlalchemy.orm import sessionmaker
|
14
|
+
import os
|
15
|
+
|
16
|
+
logger = logging.getLogger(__name__)
|
17
|
+
|
18
|
+
|
19
|
+
class StreamBacklogMonitor:
|
20
|
+
"""Stream积压监控器"""
|
21
|
+
|
22
|
+
def __init__(self, redis_url: str = None, pg_url: str = None, redis_prefix: str = "JETTASK"):
|
23
|
+
"""
|
24
|
+
初始化监控器
|
25
|
+
|
26
|
+
Args:
|
27
|
+
redis_url: Redis连接URL
|
28
|
+
pg_url: PostgreSQL连接URL
|
29
|
+
redis_prefix: Redis键前缀
|
30
|
+
"""
|
31
|
+
self.redis_url = redis_url or os.getenv('JETTASK_REDIS_URL', 'redis://localhost:6379/0')
|
32
|
+
self.pg_url = pg_url or os.getenv('JETTASK_PG_URL', 'postgresql+asyncpg://jettask:123456@localhost:5432/jettask')
|
33
|
+
self.redis_prefix = redis_prefix
|
34
|
+
|
35
|
+
self.redis_client = None
|
36
|
+
self.engine = None
|
37
|
+
self.AsyncSessionLocal = None
|
38
|
+
|
39
|
+
async def initialize(self):
|
40
|
+
"""初始化连接"""
|
41
|
+
# 初始化Redis连接
|
42
|
+
self.redis_client = await redis.from_url(self.redis_url, decode_responses=True)
|
43
|
+
|
44
|
+
# 初始化PostgreSQL连接
|
45
|
+
self.engine = create_async_engine(self.pg_url, echo=False)
|
46
|
+
self.AsyncSessionLocal = sessionmaker(self.engine, class_=AsyncSession, expire_on_commit=False)
|
47
|
+
|
48
|
+
async def close(self):
|
49
|
+
"""关闭连接"""
|
50
|
+
if self.redis_client:
|
51
|
+
await self.redis_client.close()
|
52
|
+
if self.engine:
|
53
|
+
await self.engine.dispose()
|
54
|
+
|
55
|
+
async def update_delivered_offset(self, stream_name: str, group_name: str, messages: List[Tuple]):
|
56
|
+
"""
|
57
|
+
更新消费组的已投递offset
|
58
|
+
|
59
|
+
Args:
|
60
|
+
stream_name: Stream名称(队列名)
|
61
|
+
group_name: 消费者组名
|
62
|
+
messages: 消息列表
|
63
|
+
"""
|
64
|
+
if not messages:
|
65
|
+
return
|
66
|
+
|
67
|
+
try:
|
68
|
+
# 从消息中提取最大的offset
|
69
|
+
max_offset = 0
|
70
|
+
for _, msg_list in messages:
|
71
|
+
for msg_id, msg_data in msg_list:
|
72
|
+
if b'offset' in msg_data:
|
73
|
+
offset = int(msg_data[b'offset'])
|
74
|
+
max_offset = max(max_offset, offset)
|
75
|
+
|
76
|
+
if max_offset > 0:
|
77
|
+
# 更新Redis中的last_delivered_offset
|
78
|
+
key = f"{self.redis_prefix}:GROUP:{stream_name}:{group_name}:last_delivered_offset"
|
79
|
+
|
80
|
+
# 使用Lua脚本确保只更新更大的值
|
81
|
+
lua_script = """
|
82
|
+
local current = redis.call('GET', KEYS[1])
|
83
|
+
if not current or tonumber(ARGV[1]) > tonumber(current) then
|
84
|
+
redis.call('SET', KEYS[1], ARGV[1])
|
85
|
+
end
|
86
|
+
return redis.call('GET', KEYS[1])
|
87
|
+
"""
|
88
|
+
|
89
|
+
await self.redis_client.eval(lua_script, 1, key, str(max_offset))
|
90
|
+
logger.debug(f"Updated delivered offset for {stream_name}:{group_name} to {max_offset}")
|
91
|
+
|
92
|
+
except Exception as e:
|
93
|
+
logger.error(f"Failed to update delivered offset: {e}")
|
94
|
+
|
95
|
+
async def update_acked_offset(self, stream_name: str, group_name: str, acked_messages: List):
|
96
|
+
"""
|
97
|
+
更新消费组的已确认offset
|
98
|
+
|
99
|
+
Args:
|
100
|
+
stream_name: Stream名称
|
101
|
+
group_name: 消费者组名
|
102
|
+
acked_messages: 已确认的消息列表
|
103
|
+
"""
|
104
|
+
if not acked_messages:
|
105
|
+
return
|
106
|
+
|
107
|
+
try:
|
108
|
+
# 提取最大的已确认offset
|
109
|
+
max_offset = 0
|
110
|
+
for msg in acked_messages:
|
111
|
+
if 'offset' in msg:
|
112
|
+
offset = int(msg['offset'])
|
113
|
+
max_offset = max(max_offset, offset)
|
114
|
+
|
115
|
+
if max_offset > 0:
|
116
|
+
# 更新Redis中的last_acked_offset
|
117
|
+
key = f"{self.redis_prefix}:GROUP:{stream_name}:{group_name}:last_acked_offset"
|
118
|
+
|
119
|
+
# 使用Lua脚本确保只更新更大的值
|
120
|
+
lua_script = """
|
121
|
+
local current = redis.call('GET', KEYS[1])
|
122
|
+
if not current or tonumber(ARGV[1]) > tonumber(current) then
|
123
|
+
redis.call('SET', KEYS[1], ARGV[1])
|
124
|
+
end
|
125
|
+
return redis.call('GET', KEYS[1])
|
126
|
+
"""
|
127
|
+
|
128
|
+
await self.redis_client.eval(lua_script, 1, key, str(max_offset))
|
129
|
+
logger.debug(f"Updated acked offset for {stream_name}:{group_name} to {max_offset}")
|
130
|
+
|
131
|
+
except Exception as e:
|
132
|
+
logger.error(f"Failed to update acked offset: {e}")
|
133
|
+
|
134
|
+
async def collect_metrics(self, namespace: str = "default", stream_names: List[str] = None) -> Dict:
|
135
|
+
"""
|
136
|
+
采集指定stream的积压指标
|
137
|
+
使用 TASK_OFFSETS 和 QUEUE_OFFSETS 进行精确计算
|
138
|
+
|
139
|
+
Args:
|
140
|
+
namespace: 命名空间
|
141
|
+
stream_names: 要监控的stream列表,None表示监控所有
|
142
|
+
|
143
|
+
Returns:
|
144
|
+
采集的指标数据
|
145
|
+
"""
|
146
|
+
metrics = {}
|
147
|
+
|
148
|
+
try:
|
149
|
+
# 获取所有队列的最新offset (QUEUE_OFFSETS)
|
150
|
+
queue_offsets_key = f"{namespace}:QUEUE_OFFSETS"
|
151
|
+
queue_offsets = await self.redis_client.hgetall(queue_offsets_key)
|
152
|
+
|
153
|
+
# 获取所有任务组的消费offset (TASK_OFFSETS)
|
154
|
+
task_offsets_key = f"{namespace}:TASK_OFFSETS"
|
155
|
+
task_offsets = await self.redis_client.hgetall(task_offsets_key)
|
156
|
+
|
157
|
+
# 如果没有指定stream,从QUEUE_OFFSETS中获取所有队列
|
158
|
+
if not stream_names:
|
159
|
+
stream_names = list(queue_offsets.keys())
|
160
|
+
|
161
|
+
# 对每个stream采集指标
|
162
|
+
for stream_name in stream_names:
|
163
|
+
# 使用实际的Stream键格式
|
164
|
+
stream_key = f"{self.redis_prefix.lower()}:QUEUE:{stream_name}"
|
165
|
+
|
166
|
+
# 获取队列的最新offset
|
167
|
+
last_published_offset = int(queue_offsets.get(stream_name, 0))
|
168
|
+
|
169
|
+
# 获取stream信息
|
170
|
+
try:
|
171
|
+
stream_info = await self.redis_client.xinfo_stream(stream_key)
|
172
|
+
except:
|
173
|
+
# Stream可能不存在
|
174
|
+
continue
|
175
|
+
|
176
|
+
# 获取所有消费者组信息
|
177
|
+
try:
|
178
|
+
groups = await self.redis_client.xinfo_groups(stream_key)
|
179
|
+
except:
|
180
|
+
groups = []
|
181
|
+
|
182
|
+
stream_metrics = {
|
183
|
+
'namespace': namespace,
|
184
|
+
'stream_name': stream_name,
|
185
|
+
'last_published_offset': last_published_offset,
|
186
|
+
'groups': {}
|
187
|
+
}
|
188
|
+
|
189
|
+
# 对每个消费者组采集指标
|
190
|
+
for group in groups:
|
191
|
+
group_name = group['name']
|
192
|
+
pending_count = group['pending'] # Redis Stream中的pending数量(已投递未ACK)
|
193
|
+
|
194
|
+
# 从TASK_OFFSETS获取该组的消费offset
|
195
|
+
# key格式: f"{queue}:{group_name}"
|
196
|
+
task_offset_key = f"{stream_name}:{group_name}"
|
197
|
+
last_acked_offset = int(task_offsets.get(task_offset_key, 0))
|
198
|
+
print(f'{task_offset_key=} {last_acked_offset=}')
|
199
|
+
# 计算各种积压指标
|
200
|
+
# 1. 总积压 = 队列最新offset - 消费组已确认的offset
|
201
|
+
total_backlog = max(0, last_published_offset - last_acked_offset)
|
202
|
+
|
203
|
+
# 2. 未投递的积压 = 总积压 - pending数量
|
204
|
+
# pending_count 是已经投递给消费者但还未ACK的消息数量
|
205
|
+
backlog_undelivered = max(0, total_backlog - pending_count)
|
206
|
+
|
207
|
+
# 3. 已投递未确认 = pending数量(这是Redis Stream统计的)
|
208
|
+
backlog_delivered_unacked = pending_count
|
209
|
+
|
210
|
+
# 4. 已投递的offset = 已确认offset + pending数量
|
211
|
+
last_delivered_offset = last_acked_offset + pending_count
|
212
|
+
|
213
|
+
stream_metrics['groups'][group_name] = {
|
214
|
+
'last_delivered_offset': last_delivered_offset, # 已投递的最新offset
|
215
|
+
'last_acked_offset': last_acked_offset, # 已确认的最新offset
|
216
|
+
'pending_count': pending_count, # 已投递未确认的数量
|
217
|
+
'backlog_undelivered': backlog_undelivered, # 未投递的积压
|
218
|
+
'backlog_delivered_unacked': backlog_delivered_unacked, # 已投递未确认的积压
|
219
|
+
'backlog_unprocessed': total_backlog # 总积压(未投递+已投递未确认)
|
220
|
+
}
|
221
|
+
|
222
|
+
# 如果没有消费组但有队列offset,也记录
|
223
|
+
if not stream_metrics['groups'] and last_published_offset > 0:
|
224
|
+
stream_metrics['groups']['_total'] = {
|
225
|
+
'last_delivered_offset': 0,
|
226
|
+
'last_acked_offset': 0,
|
227
|
+
'pending_count': 0,
|
228
|
+
'backlog_undelivered': last_published_offset,
|
229
|
+
'backlog_unprocessed': last_published_offset
|
230
|
+
}
|
231
|
+
|
232
|
+
metrics[stream_name] = stream_metrics
|
233
|
+
|
234
|
+
except Exception as e:
|
235
|
+
logger.error(f"Failed to collect metrics: {e}")
|
236
|
+
import traceback
|
237
|
+
traceback.print_exc()
|
238
|
+
|
239
|
+
return metrics
|
240
|
+
|
241
|
+
async def save_metrics(self, metrics: Dict):
|
242
|
+
"""
|
243
|
+
将采集的指标保存到数据库
|
244
|
+
|
245
|
+
Args:
|
246
|
+
metrics: 采集的指标数据
|
247
|
+
"""
|
248
|
+
if not metrics:
|
249
|
+
return
|
250
|
+
|
251
|
+
try:
|
252
|
+
async with self.AsyncSessionLocal() as session:
|
253
|
+
# 准备插入数据
|
254
|
+
records = []
|
255
|
+
timestamp = datetime.now(timezone.utc)
|
256
|
+
|
257
|
+
for stream_name, stream_data in metrics.items():
|
258
|
+
# 保存每个消费组的数据
|
259
|
+
for group_name, group_data in stream_data.get('groups', {}).items():
|
260
|
+
record = {
|
261
|
+
'namespace': stream_data['namespace'],
|
262
|
+
'stream_name': stream_name,
|
263
|
+
'consumer_group': group_name,
|
264
|
+
'last_published_offset': stream_data['last_published_offset'],
|
265
|
+
'last_delivered_offset': group_data['last_delivered_offset'],
|
266
|
+
'last_acked_offset': group_data['last_acked_offset'],
|
267
|
+
'pending_count': group_data['pending_count'],
|
268
|
+
'backlog_undelivered': group_data['backlog_undelivered'],
|
269
|
+
'backlog_unprocessed': group_data['backlog_unprocessed'],
|
270
|
+
'backlog_delivered_unacked': group_data.get('backlog_delivered_unacked', group_data['pending_count']),
|
271
|
+
'created_at': timestamp
|
272
|
+
}
|
273
|
+
records.append(record)
|
274
|
+
|
275
|
+
# 如果没有消费组,也保存stream级别的数据
|
276
|
+
if not stream_data.get('groups'):
|
277
|
+
record = {
|
278
|
+
'namespace': stream_data['namespace'],
|
279
|
+
'stream_name': stream_name,
|
280
|
+
'consumer_group': None,
|
281
|
+
'last_published_offset': stream_data['last_published_offset'],
|
282
|
+
'last_delivered_offset': 0,
|
283
|
+
'last_acked_offset': 0,
|
284
|
+
'pending_count': 0,
|
285
|
+
'backlog_undelivered': stream_data['last_published_offset'],
|
286
|
+
'backlog_unprocessed': stream_data['last_published_offset'],
|
287
|
+
'created_at': timestamp
|
288
|
+
}
|
289
|
+
records.append(record)
|
290
|
+
|
291
|
+
# 批量插入
|
292
|
+
if records:
|
293
|
+
insert_sql = text("""
|
294
|
+
INSERT INTO stream_backlog_monitor
|
295
|
+
(namespace, stream_name, consumer_group, last_published_offset,
|
296
|
+
last_delivered_offset, last_acked_offset, pending_count,
|
297
|
+
backlog_undelivered, backlog_unprocessed, created_at)
|
298
|
+
VALUES
|
299
|
+
(:namespace, :stream_name, :consumer_group, :last_published_offset,
|
300
|
+
:last_delivered_offset, :last_acked_offset, :pending_count,
|
301
|
+
:backlog_undelivered, :backlog_unprocessed, :created_at)
|
302
|
+
""")
|
303
|
+
# 注意:backlog_delivered_unacked 可以从 pending_count 推导,所以不单独存储
|
304
|
+
|
305
|
+
await session.execute(insert_sql, records)
|
306
|
+
await session.commit()
|
307
|
+
logger.info(f"Saved {len(records)} monitoring records")
|
308
|
+
|
309
|
+
except Exception as e:
|
310
|
+
logger.error(f"Failed to save metrics: {e}")
|
311
|
+
|
312
|
+
async def run_collector(self, interval: int = 60):
|
313
|
+
"""
|
314
|
+
运行采集器
|
315
|
+
|
316
|
+
Args:
|
317
|
+
interval: 采集间隔(秒)
|
318
|
+
"""
|
319
|
+
await self.initialize()
|
320
|
+
|
321
|
+
logger.info(f"Starting backlog monitor collector with {interval}s interval")
|
322
|
+
|
323
|
+
try:
|
324
|
+
while True:
|
325
|
+
try:
|
326
|
+
# 采集指标
|
327
|
+
metrics = await self.collect_metrics()
|
328
|
+
|
329
|
+
# 保存到数据库
|
330
|
+
await self.save_metrics(metrics)
|
331
|
+
|
332
|
+
# 等待下一次采集
|
333
|
+
await asyncio.sleep(interval)
|
334
|
+
|
335
|
+
except Exception as e:
|
336
|
+
logger.error(f"Collector error: {e}")
|
337
|
+
await asyncio.sleep(interval)
|
338
|
+
|
339
|
+
except KeyboardInterrupt:
|
340
|
+
logger.info("Stopping collector...")
|
341
|
+
finally:
|
342
|
+
await self.close()
|
343
|
+
|
344
|
+
# 辅助函数 - 供其他模块调用
|
345
|
+
async def report_delivered_offset(redis_client, redis_prefix: str, queue: str, group_name: str, messages: List):
|
346
|
+
"""
|
347
|
+
上报已投递的offset(供event_pool调用)
|
348
|
+
这个函数已弃用,改为直接更新TASK_OFFSETS
|
349
|
+
"""
|
350
|
+
pass # 现在offset更新在executor中完成
|
351
|
+
|
352
|
+
async def report_queue_offset(redis_client, redis_prefix: str, queue: str, offset: int):
|
353
|
+
"""
|
354
|
+
上报队列的最新offset(供发送消息时调用)
|
355
|
+
这个功能已经在发送时通过Lua脚本自动完成
|
356
|
+
"""
|
357
|
+
pass # 现在offset更新在发送时通过Lua脚本完成
|
358
|
+
|
359
|
+
if __name__ == "__main__":
|
360
|
+
# 测试运行采集器
|
361
|
+
monitor = StreamBacklogMonitor()
|
362
|
+
asyncio.run(monitor.run_collector(interval=30))
|