aury-boot 0.0.39__py3-none-any.whl → 0.0.41__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aury/boot/_version.py +2 -2
- aury/boot/application/adapter/http.py +17 -6
- aury/boot/application/app/base.py +1 -0
- aury/boot/application/app/components.py +93 -3
- aury/boot/application/config/settings.py +80 -2
- aury/boot/commands/init.py +20 -0
- aury/boot/commands/pkg.py +31 -1
- aury/boot/commands/templates/project/aury_docs/00-overview.md.tpl +1 -0
- aury/boot/commands/templates/project/aury_docs/18-monitoring-profiling.md.tpl +239 -0
- aury/boot/commands/templates/project/env_templates/monitoring.tpl +15 -0
- aury/boot/common/logging/setup.py +8 -3
- aury/boot/infrastructure/cache/redis.py +82 -16
- aury/boot/infrastructure/channel/__init__.py +2 -1
- aury/boot/infrastructure/channel/backends/__init__.py +2 -1
- aury/boot/infrastructure/channel/backends/redis_cluster.py +124 -0
- aury/boot/infrastructure/channel/backends/redis_cluster_channel.py +139 -0
- aury/boot/infrastructure/channel/base.py +2 -0
- aury/boot/infrastructure/channel/manager.py +9 -1
- aury/boot/infrastructure/clients/redis/manager.py +90 -19
- aury/boot/infrastructure/database/manager.py +6 -4
- aury/boot/infrastructure/monitoring/__init__.py +10 -2
- aury/boot/infrastructure/monitoring/alerting/notifiers/feishu.py +33 -16
- aury/boot/infrastructure/monitoring/alerting/notifiers/webhook.py +14 -13
- aury/boot/infrastructure/monitoring/profiling/__init__.py +664 -0
- aury/boot/infrastructure/scheduler/__init__.py +2 -0
- aury/boot/infrastructure/scheduler/jobstores/__init__.py +10 -0
- aury/boot/infrastructure/scheduler/jobstores/redis_cluster.py +255 -0
- aury/boot/infrastructure/scheduler/manager.py +15 -3
- aury/boot/toolkit/http/__init__.py +180 -85
- {aury_boot-0.0.39.dist-info → aury_boot-0.0.41.dist-info}/METADATA +14 -4
- {aury_boot-0.0.39.dist-info → aury_boot-0.0.41.dist-info}/RECORD +33 -27
- {aury_boot-0.0.39.dist-info → aury_boot-0.0.41.dist-info}/WHEEL +0 -0
- {aury_boot-0.0.39.dist-info → aury_boot-0.0.41.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
"""Redis Cluster JobStore for APScheduler.
|
|
2
|
+
|
|
3
|
+
支持 Redis Cluster 的任务存储,使用 redis-cluster:// URL 格式。
|
|
4
|
+
|
|
5
|
+
使用示例:
|
|
6
|
+
from aury.boot.infrastructure.scheduler.jobstores import RedisClusterJobStore
|
|
7
|
+
|
|
8
|
+
# 使用 URL(推荐)
|
|
9
|
+
jobstore = RedisClusterJobStore(
|
|
10
|
+
url="redis-cluster://password@redis-cluster.example.com:6379"
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
# 使用参数
|
|
14
|
+
jobstore = RedisClusterJobStore(
|
|
15
|
+
host="redis-cluster.example.com",
|
|
16
|
+
port=6379,
|
|
17
|
+
password="password",
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
scheduler = SchedulerManager.get_instance(
|
|
21
|
+
jobstores={"default": jobstore}
|
|
22
|
+
)
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
from __future__ import annotations
|
|
26
|
+
|
|
27
|
+
import pickle
|
|
28
|
+
from datetime import datetime, timezone
|
|
29
|
+
from typing import TYPE_CHECKING, Any
|
|
30
|
+
from urllib.parse import parse_qs, urlparse
|
|
31
|
+
|
|
32
|
+
from apscheduler.job import Job
|
|
33
|
+
from apscheduler.jobstores.base import BaseJobStore, ConflictingIdError, JobLookupError
|
|
34
|
+
from apscheduler.util import datetime_to_utc_timestamp, utc_timestamp_to_datetime
|
|
35
|
+
|
|
36
|
+
try:
|
|
37
|
+
from redis.cluster import RedisCluster
|
|
38
|
+
except ImportError as exc:
|
|
39
|
+
raise ImportError(
|
|
40
|
+
"RedisClusterJobStore requires redis[cluster] installed: "
|
|
41
|
+
"pip install 'redis[cluster]'"
|
|
42
|
+
) from exc
|
|
43
|
+
|
|
44
|
+
if TYPE_CHECKING:
|
|
45
|
+
from apscheduler.schedulers.base import BaseScheduler
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class RedisClusterJobStore(BaseJobStore):
|
|
49
|
+
"""Redis Cluster 任务存储。
|
|
50
|
+
|
|
51
|
+
与 APScheduler 的 RedisJobStore 兼容,但使用 RedisCluster 客户端。
|
|
52
|
+
使用 hash tag 确保 jobs_key 和 run_times_key 在同一个 slot。
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
url: Redis Cluster URL,格式: redis-cluster://[password@]host:port
|
|
56
|
+
或标准格式: redis://[password@]host:port(会自动识别为集群)
|
|
57
|
+
jobs_key: 存储任务的 key,默认 "{apscheduler}.jobs"
|
|
58
|
+
run_times_key: 存储运行时间的 key,默认 "{apscheduler}.run_times"
|
|
59
|
+
pickle_protocol: pickle 序列化协议版本
|
|
60
|
+
**connect_args: 传递给 RedisCluster 的其他参数
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
def __init__(
|
|
64
|
+
self,
|
|
65
|
+
url: str | None = None,
|
|
66
|
+
jobs_key: str = "{apscheduler}.jobs",
|
|
67
|
+
run_times_key: str = "{apscheduler}.run_times",
|
|
68
|
+
pickle_protocol: int = pickle.HIGHEST_PROTOCOL,
|
|
69
|
+
**connect_args: Any,
|
|
70
|
+
) -> None:
|
|
71
|
+
super().__init__()
|
|
72
|
+
|
|
73
|
+
if not jobs_key:
|
|
74
|
+
raise ValueError('The "jobs_key" parameter must not be empty')
|
|
75
|
+
if not run_times_key:
|
|
76
|
+
raise ValueError('The "run_times_key" parameter must not be empty')
|
|
77
|
+
|
|
78
|
+
self.pickle_protocol = pickle_protocol
|
|
79
|
+
self.jobs_key = jobs_key
|
|
80
|
+
self.run_times_key = run_times_key
|
|
81
|
+
|
|
82
|
+
if url:
|
|
83
|
+
# 解析 URL
|
|
84
|
+
self.redis = self._create_client_from_url(url, **connect_args)
|
|
85
|
+
else:
|
|
86
|
+
# 使用参数直接连接
|
|
87
|
+
self.redis = RedisCluster(**connect_args)
|
|
88
|
+
|
|
89
|
+
def _create_client_from_url(self, url: str, **kwargs: Any) -> RedisCluster:
|
|
90
|
+
"""从 URL 创建 RedisCluster 客户端。
|
|
91
|
+
|
|
92
|
+
支持格式:
|
|
93
|
+
- redis-cluster://password@host:port (密码在用户名位置)
|
|
94
|
+
- redis-cluster://:password@host:port (标准格式)
|
|
95
|
+
- redis-cluster://username:password@host:port (ACL 模式)
|
|
96
|
+
"""
|
|
97
|
+
# 统一转换为 redis:// 格式供解析
|
|
98
|
+
if url.startswith("redis-cluster://"):
|
|
99
|
+
url = url.replace("redis-cluster://", "redis://", 1)
|
|
100
|
+
|
|
101
|
+
parsed = urlparse(url)
|
|
102
|
+
|
|
103
|
+
# 提取连接参数
|
|
104
|
+
host = parsed.hostname or "localhost"
|
|
105
|
+
port = parsed.port or 6379
|
|
106
|
+
username = parsed.username
|
|
107
|
+
password = parsed.password
|
|
108
|
+
|
|
109
|
+
# 处理 password@host 格式(密码在用户名位置)
|
|
110
|
+
if username and not password:
|
|
111
|
+
password = username
|
|
112
|
+
username = None
|
|
113
|
+
|
|
114
|
+
# 解析查询参数
|
|
115
|
+
query_params = parse_qs(parsed.query)
|
|
116
|
+
|
|
117
|
+
# 构建连接参数
|
|
118
|
+
connect_kwargs: dict[str, Any] = {
|
|
119
|
+
"host": host,
|
|
120
|
+
"port": port,
|
|
121
|
+
**kwargs,
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
if username:
|
|
125
|
+
connect_kwargs["username"] = username
|
|
126
|
+
if password:
|
|
127
|
+
connect_kwargs["password"] = password
|
|
128
|
+
|
|
129
|
+
# 处理常见查询参数
|
|
130
|
+
if "decode_responses" in query_params:
|
|
131
|
+
connect_kwargs["decode_responses"] = query_params["decode_responses"][0].lower() == "true"
|
|
132
|
+
|
|
133
|
+
return RedisCluster(**connect_kwargs)
|
|
134
|
+
|
|
135
|
+
def lookup_job(self, job_id: str) -> Job | None:
|
|
136
|
+
"""查找任务。"""
|
|
137
|
+
job_state = self.redis.hget(self.jobs_key, job_id)
|
|
138
|
+
return self._reconstitute_job(job_state) if job_state else None
|
|
139
|
+
|
|
140
|
+
def get_due_jobs(self, now: datetime) -> list[Job]:
|
|
141
|
+
"""获取到期的任务。"""
|
|
142
|
+
timestamp = datetime_to_utc_timestamp(now)
|
|
143
|
+
job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp)
|
|
144
|
+
if job_ids:
|
|
145
|
+
job_states = self.redis.hmget(self.jobs_key, *job_ids)
|
|
146
|
+
return self._reconstitute_jobs(zip(job_ids, job_states))
|
|
147
|
+
return []
|
|
148
|
+
|
|
149
|
+
def get_next_run_time(self) -> datetime | None:
|
|
150
|
+
"""获取下次运行时间。"""
|
|
151
|
+
next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True)
|
|
152
|
+
if next_run_time:
|
|
153
|
+
return utc_timestamp_to_datetime(next_run_time[0][1])
|
|
154
|
+
return None
|
|
155
|
+
|
|
156
|
+
def get_all_jobs(self) -> list[Job]:
|
|
157
|
+
"""获取所有任务。"""
|
|
158
|
+
job_states = self.redis.hgetall(self.jobs_key)
|
|
159
|
+
jobs = self._reconstitute_jobs(job_states.items())
|
|
160
|
+
paused_sort_key = datetime(9999, 12, 31, tzinfo=timezone.utc)
|
|
161
|
+
return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key)
|
|
162
|
+
|
|
163
|
+
def add_job(self, job: Job) -> None:
|
|
164
|
+
"""添加任务。"""
|
|
165
|
+
if self.redis.hexists(self.jobs_key, job.id):
|
|
166
|
+
raise ConflictingIdError(job.id)
|
|
167
|
+
|
|
168
|
+
with self.redis.pipeline() as pipe:
|
|
169
|
+
pipe.hset(
|
|
170
|
+
self.jobs_key,
|
|
171
|
+
job.id,
|
|
172
|
+
pickle.dumps(job.__getstate__(), self.pickle_protocol),
|
|
173
|
+
)
|
|
174
|
+
if job.next_run_time:
|
|
175
|
+
pipe.zadd(
|
|
176
|
+
self.run_times_key,
|
|
177
|
+
{job.id: datetime_to_utc_timestamp(job.next_run_time)},
|
|
178
|
+
)
|
|
179
|
+
pipe.execute()
|
|
180
|
+
|
|
181
|
+
def update_job(self, job: Job) -> None:
|
|
182
|
+
"""更新任务。"""
|
|
183
|
+
if not self.redis.hexists(self.jobs_key, job.id):
|
|
184
|
+
raise JobLookupError(job.id)
|
|
185
|
+
|
|
186
|
+
with self.redis.pipeline() as pipe:
|
|
187
|
+
pipe.hset(
|
|
188
|
+
self.jobs_key,
|
|
189
|
+
job.id,
|
|
190
|
+
pickle.dumps(job.__getstate__(), self.pickle_protocol),
|
|
191
|
+
)
|
|
192
|
+
if job.next_run_time:
|
|
193
|
+
pipe.zadd(
|
|
194
|
+
self.run_times_key,
|
|
195
|
+
{job.id: datetime_to_utc_timestamp(job.next_run_time)},
|
|
196
|
+
)
|
|
197
|
+
else:
|
|
198
|
+
pipe.zrem(self.run_times_key, job.id)
|
|
199
|
+
pipe.execute()
|
|
200
|
+
|
|
201
|
+
def remove_job(self, job_id: str) -> None:
|
|
202
|
+
"""移除任务。"""
|
|
203
|
+
if not self.redis.hexists(self.jobs_key, job_id):
|
|
204
|
+
raise JobLookupError(job_id)
|
|
205
|
+
|
|
206
|
+
with self.redis.pipeline() as pipe:
|
|
207
|
+
pipe.hdel(self.jobs_key, job_id)
|
|
208
|
+
pipe.zrem(self.run_times_key, job_id)
|
|
209
|
+
pipe.execute()
|
|
210
|
+
|
|
211
|
+
def remove_all_jobs(self) -> None:
|
|
212
|
+
"""移除所有任务。"""
|
|
213
|
+
with self.redis.pipeline() as pipe:
|
|
214
|
+
pipe.delete(self.jobs_key)
|
|
215
|
+
pipe.delete(self.run_times_key)
|
|
216
|
+
pipe.execute()
|
|
217
|
+
|
|
218
|
+
def shutdown(self) -> None:
|
|
219
|
+
"""关闭连接。"""
|
|
220
|
+
self.redis.close()
|
|
221
|
+
|
|
222
|
+
def _reconstitute_job(self, job_state: bytes) -> Job:
|
|
223
|
+
"""重建任务对象。"""
|
|
224
|
+
state = pickle.loads(job_state)
|
|
225
|
+
job = Job.__new__(Job)
|
|
226
|
+
job.__setstate__(state)
|
|
227
|
+
job._scheduler = self._scheduler
|
|
228
|
+
job._jobstore_alias = self._alias
|
|
229
|
+
return job
|
|
230
|
+
|
|
231
|
+
def _reconstitute_jobs(self, job_states: Any) -> list[Job]:
|
|
232
|
+
"""重建多个任务对象。"""
|
|
233
|
+
jobs = []
|
|
234
|
+
failed_job_ids = []
|
|
235
|
+
|
|
236
|
+
for job_id, job_state in job_states:
|
|
237
|
+
try:
|
|
238
|
+
jobs.append(self._reconstitute_job(job_state))
|
|
239
|
+
except Exception:
|
|
240
|
+
self._logger.exception(
|
|
241
|
+
'Unable to restore job "%s" -- removing it', job_id
|
|
242
|
+
)
|
|
243
|
+
failed_job_ids.append(job_id)
|
|
244
|
+
|
|
245
|
+
# 移除无法恢复的任务
|
|
246
|
+
if failed_job_ids:
|
|
247
|
+
with self.redis.pipeline() as pipe:
|
|
248
|
+
pipe.hdel(self.jobs_key, *failed_job_ids)
|
|
249
|
+
pipe.zrem(self.run_times_key, *failed_job_ids)
|
|
250
|
+
pipe.execute()
|
|
251
|
+
|
|
252
|
+
return jobs
|
|
253
|
+
|
|
254
|
+
def __repr__(self) -> str:
|
|
255
|
+
return f"<{self.__class__.__name__}>"
|
|
@@ -179,6 +179,12 @@ class SchedulerManager:
|
|
|
179
179
|
if timezone:
|
|
180
180
|
scheduler_kwargs["timezone"] = timezone
|
|
181
181
|
|
|
182
|
+
# 默认使用 AsyncIOExecutor 避免信号量泄漏
|
|
183
|
+
# ThreadPoolExecutor 在 uvicorn reload/多进程模式下会导致信号量泄漏
|
|
184
|
+
if "executors" not in scheduler_kwargs:
|
|
185
|
+
from apscheduler.executors.asyncio import AsyncIOExecutor
|
|
186
|
+
scheduler_kwargs["executors"] = {"default": AsyncIOExecutor()}
|
|
187
|
+
|
|
182
188
|
instance._scheduler = AsyncIOScheduler(**scheduler_kwargs)
|
|
183
189
|
instance._initialized = True
|
|
184
190
|
cls._instances[name] = instance
|
|
@@ -529,10 +535,16 @@ class SchedulerManager:
|
|
|
529
535
|
else:
|
|
530
536
|
logger.info("调度器已启动,无定时任务")
|
|
531
537
|
|
|
532
|
-
def shutdown(self) -> None:
|
|
533
|
-
"""关闭调度器。
|
|
538
|
+
def shutdown(self, wait: bool = True) -> None:
|
|
539
|
+
"""关闭调度器。
|
|
540
|
+
|
|
541
|
+
Args:
|
|
542
|
+
wait: 是否等待所有正在执行的任务完成。
|
|
543
|
+
默认 True,确保资源正确释放,避免信号量泄漏。
|
|
544
|
+
"""
|
|
534
545
|
if self._scheduler and self._scheduler.running:
|
|
535
|
-
self._scheduler.shutdown()
|
|
546
|
+
self._scheduler.shutdown(wait=wait)
|
|
547
|
+
self._started = False
|
|
536
548
|
logger.info("调度器已关闭")
|
|
537
549
|
|
|
538
550
|
def pause(self) -> None:
|