krons 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kronos/__init__.py +0 -0
- kronos/core/__init__.py +145 -0
- kronos/core/broadcaster.py +116 -0
- kronos/core/element.py +225 -0
- kronos/core/event.py +316 -0
- kronos/core/eventbus.py +116 -0
- kronos/core/flow.py +356 -0
- kronos/core/graph.py +442 -0
- kronos/core/node.py +982 -0
- kronos/core/pile.py +575 -0
- kronos/core/processor.py +494 -0
- kronos/core/progression.py +296 -0
- kronos/enforcement/__init__.py +57 -0
- kronos/enforcement/common/__init__.py +34 -0
- kronos/enforcement/common/boolean.py +85 -0
- kronos/enforcement/common/choice.py +97 -0
- kronos/enforcement/common/mapping.py +118 -0
- kronos/enforcement/common/model.py +102 -0
- kronos/enforcement/common/number.py +98 -0
- kronos/enforcement/common/string.py +140 -0
- kronos/enforcement/context.py +129 -0
- kronos/enforcement/policy.py +80 -0
- kronos/enforcement/registry.py +153 -0
- kronos/enforcement/rule.py +312 -0
- kronos/enforcement/service.py +370 -0
- kronos/enforcement/validator.py +198 -0
- kronos/errors.py +146 -0
- kronos/operations/__init__.py +32 -0
- kronos/operations/builder.py +228 -0
- kronos/operations/flow.py +398 -0
- kronos/operations/node.py +101 -0
- kronos/operations/registry.py +92 -0
- kronos/protocols.py +414 -0
- kronos/py.typed +0 -0
- kronos/services/__init__.py +81 -0
- kronos/services/backend.py +286 -0
- kronos/services/endpoint.py +608 -0
- kronos/services/hook.py +471 -0
- kronos/services/imodel.py +465 -0
- kronos/services/registry.py +115 -0
- kronos/services/utilities/__init__.py +36 -0
- kronos/services/utilities/header_factory.py +87 -0
- kronos/services/utilities/rate_limited_executor.py +271 -0
- kronos/services/utilities/rate_limiter.py +180 -0
- kronos/services/utilities/resilience.py +414 -0
- kronos/session/__init__.py +41 -0
- kronos/session/exchange.py +258 -0
- kronos/session/message.py +60 -0
- kronos/session/session.py +411 -0
- kronos/specs/__init__.py +25 -0
- kronos/specs/adapters/__init__.py +0 -0
- kronos/specs/adapters/_utils.py +45 -0
- kronos/specs/adapters/dataclass_field.py +246 -0
- kronos/specs/adapters/factory.py +56 -0
- kronos/specs/adapters/pydantic_adapter.py +309 -0
- kronos/specs/adapters/sql_ddl.py +946 -0
- kronos/specs/catalog/__init__.py +36 -0
- kronos/specs/catalog/_audit.py +39 -0
- kronos/specs/catalog/_common.py +43 -0
- kronos/specs/catalog/_content.py +59 -0
- kronos/specs/catalog/_enforcement.py +70 -0
- kronos/specs/factory.py +120 -0
- kronos/specs/operable.py +314 -0
- kronos/specs/phrase.py +405 -0
- kronos/specs/protocol.py +140 -0
- kronos/specs/spec.py +506 -0
- kronos/types/__init__.py +60 -0
- kronos/types/_sentinel.py +311 -0
- kronos/types/base.py +369 -0
- kronos/types/db_types.py +260 -0
- kronos/types/identity.py +66 -0
- kronos/utils/__init__.py +40 -0
- kronos/utils/_hash.py +234 -0
- kronos/utils/_json_dump.py +392 -0
- kronos/utils/_lazy_init.py +63 -0
- kronos/utils/_to_list.py +165 -0
- kronos/utils/_to_num.py +85 -0
- kronos/utils/_utils.py +375 -0
- kronos/utils/concurrency/__init__.py +205 -0
- kronos/utils/concurrency/_async_call.py +333 -0
- kronos/utils/concurrency/_cancel.py +122 -0
- kronos/utils/concurrency/_errors.py +96 -0
- kronos/utils/concurrency/_patterns.py +363 -0
- kronos/utils/concurrency/_primitives.py +328 -0
- kronos/utils/concurrency/_priority_queue.py +135 -0
- kronos/utils/concurrency/_resource_tracker.py +110 -0
- kronos/utils/concurrency/_run_async.py +67 -0
- kronos/utils/concurrency/_task.py +95 -0
- kronos/utils/concurrency/_utils.py +79 -0
- kronos/utils/fuzzy/__init__.py +14 -0
- kronos/utils/fuzzy/_extract_json.py +90 -0
- kronos/utils/fuzzy/_fuzzy_json.py +288 -0
- kronos/utils/fuzzy/_fuzzy_match.py +149 -0
- kronos/utils/fuzzy/_string_similarity.py +187 -0
- kronos/utils/fuzzy/_to_dict.py +396 -0
- kronos/utils/sql/__init__.py +13 -0
- kronos/utils/sql/_sql_validation.py +142 -0
- krons-0.1.0.dist-info/METADATA +70 -0
- krons-0.1.0.dist-info/RECORD +101 -0
- krons-0.1.0.dist-info/WHEEL +4 -0
- krons-0.1.0.dist-info/licenses/LICENSE +201 -0
kronos/core/processor.py
ADDED
|
@@ -0,0 +1,494 @@
|
|
|
1
|
+
# Copyright (c) 2025 - 2026, HaiyangLi <quantocean.li at gmail dot com>
|
|
2
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
3
|
+
|
|
4
|
+
from __future__ import annotations
|
|
5
|
+
|
|
6
|
+
import math
|
|
7
|
+
from typing import TYPE_CHECKING, Any, ClassVar, Self
|
|
8
|
+
|
|
9
|
+
from kronos.errors import ConfigurationError, NotFoundError, QueueFullError
|
|
10
|
+
from kronos.utils import concurrency
|
|
11
|
+
|
|
12
|
+
from .event import Event, EventStatus
|
|
13
|
+
from .flow import Flow
|
|
14
|
+
from .pile import Pile
|
|
15
|
+
from .progression import Progression
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from uuid import UUID
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
__all__ = (
|
|
22
|
+
"Executor",
|
|
23
|
+
"Processor",
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class Processor:
|
|
28
|
+
"""Priority queue processor with rate limiting and concurrency control.
|
|
29
|
+
|
|
30
|
+
Processes events from a priority queue (min-heap) with configurable:
|
|
31
|
+
- Batch capacity: max events per processing cycle
|
|
32
|
+
- Concurrency: semaphore-limited parallel execution
|
|
33
|
+
- Permission: extensible request_permission() for rate limits
|
|
34
|
+
|
|
35
|
+
Attributes:
|
|
36
|
+
event_type: Event subclass this processor handles (ClassVar).
|
|
37
|
+
queue_capacity: Max events per batch before refresh.
|
|
38
|
+
capacity_refresh_time: Seconds between capacity resets.
|
|
39
|
+
concurrency_limit: Max concurrent event executions.
|
|
40
|
+
pile: Shared event storage (reference to executor's Flow.items).
|
|
41
|
+
executor: Parent executor for progression updates.
|
|
42
|
+
|
|
43
|
+
Example:
|
|
44
|
+
>>> processor = await Processor.create(
|
|
45
|
+
... queue_capacity=10, capacity_refresh_time=1.0, pile=pile
|
|
46
|
+
... )
|
|
47
|
+
>>> await processor.enqueue(event.id)
|
|
48
|
+
>>> await processor.execute() # Runs until stop() called
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
event_type: ClassVar[type[Event]]
|
|
52
|
+
|
|
53
|
+
def __init__(
|
|
54
|
+
self,
|
|
55
|
+
queue_capacity: int,
|
|
56
|
+
capacity_refresh_time: float,
|
|
57
|
+
pile: Pile[Event],
|
|
58
|
+
executor: Executor | None = None,
|
|
59
|
+
concurrency_limit: int = 100,
|
|
60
|
+
max_queue_size: int = 1000,
|
|
61
|
+
max_denial_tracking: int = 10000,
|
|
62
|
+
) -> None:
|
|
63
|
+
"""Initialize processor with validated capacity constraints.
|
|
64
|
+
|
|
65
|
+
Args:
|
|
66
|
+
queue_capacity: Events per batch (1-10000).
|
|
67
|
+
capacity_refresh_time: Seconds between refreshes (0.01-3600).
|
|
68
|
+
pile: Event storage reference.
|
|
69
|
+
executor: Parent executor (optional).
|
|
70
|
+
concurrency_limit: Max parallel executions (default: 100).
|
|
71
|
+
max_queue_size: Queue size limit (default: 1000).
|
|
72
|
+
max_denial_tracking: Max tracked permission denials (default: 10000).
|
|
73
|
+
|
|
74
|
+
Raises:
|
|
75
|
+
ValueError: If parameters out of valid ranges.
|
|
76
|
+
"""
|
|
77
|
+
if queue_capacity < 1:
|
|
78
|
+
raise ValueError("Queue capacity must be greater than 0.")
|
|
79
|
+
if queue_capacity > 10000:
|
|
80
|
+
raise ValueError("Queue capacity must be <= 10000 (prevent unbounded batches).")
|
|
81
|
+
|
|
82
|
+
# Validate capacity_refresh_time (prevent hot loop or starvation)
|
|
83
|
+
if capacity_refresh_time < 0.01:
|
|
84
|
+
raise ValueError("Capacity refresh time must be >= 0.01s (prevent CPU hot loop).")
|
|
85
|
+
if capacity_refresh_time > 3600:
|
|
86
|
+
raise ValueError("Capacity refresh time must be <= 3600s (prevent starvation).")
|
|
87
|
+
|
|
88
|
+
# Validate concurrency_limit
|
|
89
|
+
if concurrency_limit < 1:
|
|
90
|
+
raise ValueError("Concurrency limit must be >= 1.")
|
|
91
|
+
|
|
92
|
+
# Validate max_queue_size
|
|
93
|
+
if max_queue_size < 1:
|
|
94
|
+
raise ValueError("Max queue size must be >= 1.")
|
|
95
|
+
|
|
96
|
+
if max_denial_tracking < 1:
|
|
97
|
+
raise ValueError("Max denial tracking must be >= 1.")
|
|
98
|
+
|
|
99
|
+
self.queue_capacity = queue_capacity
|
|
100
|
+
self.capacity_refresh_time = capacity_refresh_time
|
|
101
|
+
self.max_queue_size = max_queue_size
|
|
102
|
+
self.max_denial_tracking = max_denial_tracking
|
|
103
|
+
self.pile = pile
|
|
104
|
+
self.executor = executor
|
|
105
|
+
self.concurrency_limit = concurrency_limit
|
|
106
|
+
|
|
107
|
+
# Priority queue: (priority, event_uuid) tuples, min-heap ordering
|
|
108
|
+
self.queue: concurrency.PriorityQueue[tuple[float, UUID]] = concurrency.PriorityQueue()
|
|
109
|
+
|
|
110
|
+
self._available_capacity = queue_capacity
|
|
111
|
+
self._execution_mode = False
|
|
112
|
+
self._stop_event = concurrency.ConcurrencyEvent()
|
|
113
|
+
self._denial_counts: dict[UUID, int] = {}
|
|
114
|
+
self._concurrency_sem = concurrency.Semaphore(concurrency_limit)
|
|
115
|
+
|
|
116
|
+
@property
|
|
117
|
+
def available_capacity(self) -> int:
|
|
118
|
+
"""Remaining capacity in current batch."""
|
|
119
|
+
return self._available_capacity
|
|
120
|
+
|
|
121
|
+
@available_capacity.setter
|
|
122
|
+
def available_capacity(self, value: int) -> None:
|
|
123
|
+
self._available_capacity = value
|
|
124
|
+
|
|
125
|
+
@property
|
|
126
|
+
def execution_mode(self) -> bool:
|
|
127
|
+
"""True if execute() loop is running."""
|
|
128
|
+
return self._execution_mode
|
|
129
|
+
|
|
130
|
+
@execution_mode.setter
|
|
131
|
+
def execution_mode(self, value: bool) -> None:
|
|
132
|
+
self._execution_mode = value
|
|
133
|
+
|
|
134
|
+
async def enqueue(self, event_id: UUID, priority: float | None = None) -> None:
|
|
135
|
+
"""Add event to priority queue. Lower priority = processed first.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
event_id: UUID of event (must exist in pile).
|
|
139
|
+
priority: Sort key (default: event.created_at timestamp).
|
|
140
|
+
|
|
141
|
+
Raises:
|
|
142
|
+
QueueFullError: If queue at max_queue_size.
|
|
143
|
+
ValueError: If priority is NaN or infinite.
|
|
144
|
+
"""
|
|
145
|
+
if self.queue.qsize() >= self.max_queue_size:
|
|
146
|
+
raise QueueFullError(
|
|
147
|
+
f"Queue size ({self.queue.qsize()}) exceeds max ({self.max_queue_size})",
|
|
148
|
+
details={
|
|
149
|
+
"queue_size": self.queue.qsize(),
|
|
150
|
+
"max_size": self.max_queue_size,
|
|
151
|
+
},
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
if priority is None:
|
|
155
|
+
event = self.pile[event_id]
|
|
156
|
+
priority = event.created_at.timestamp()
|
|
157
|
+
|
|
158
|
+
if not math.isfinite(priority) or math.isnan(priority):
|
|
159
|
+
raise ValueError(
|
|
160
|
+
f"Priority must be finite and not NaN, got {priority}",
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
await self.queue.put((priority, event_id))
|
|
164
|
+
|
|
165
|
+
async def dequeue(self) -> Event:
|
|
166
|
+
"""Remove and return highest-priority event (lowest priority value)."""
|
|
167
|
+
_, event_id = await self.queue.get()
|
|
168
|
+
return self.pile[event_id]
|
|
169
|
+
|
|
170
|
+
async def join(self) -> None:
|
|
171
|
+
"""Block until queue is empty (polling at 100ms intervals)."""
|
|
172
|
+
while not self.queue.empty():
|
|
173
|
+
await concurrency.sleep(0.1)
|
|
174
|
+
|
|
175
|
+
async def stop(self) -> None:
|
|
176
|
+
"""Signal stop and clear denial tracking."""
|
|
177
|
+
self._stop_event.set()
|
|
178
|
+
self._denial_counts.clear()
|
|
179
|
+
|
|
180
|
+
async def start(self) -> None:
|
|
181
|
+
"""Clear stop signal to allow processing."""
|
|
182
|
+
if self._stop_event.is_set():
|
|
183
|
+
self._stop_event = concurrency.ConcurrencyEvent()
|
|
184
|
+
|
|
185
|
+
def is_stopped(self) -> bool:
|
|
186
|
+
"""True if stop() was called."""
|
|
187
|
+
return self._stop_event.is_set()
|
|
188
|
+
|
|
189
|
+
@classmethod
|
|
190
|
+
async def create(
|
|
191
|
+
cls,
|
|
192
|
+
queue_capacity: int,
|
|
193
|
+
capacity_refresh_time: float,
|
|
194
|
+
pile: Pile[Event],
|
|
195
|
+
executor: Executor | None = None,
|
|
196
|
+
concurrency_limit: int = 100,
|
|
197
|
+
max_queue_size: int = 1000,
|
|
198
|
+
max_denial_tracking: int = 10000,
|
|
199
|
+
) -> Self:
|
|
200
|
+
"""Async factory. Same args as __init__."""
|
|
201
|
+
return cls(
|
|
202
|
+
queue_capacity=queue_capacity,
|
|
203
|
+
capacity_refresh_time=capacity_refresh_time,
|
|
204
|
+
pile=pile,
|
|
205
|
+
executor=executor,
|
|
206
|
+
concurrency_limit=concurrency_limit,
|
|
207
|
+
max_queue_size=max_queue_size,
|
|
208
|
+
max_denial_tracking=max_denial_tracking,
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
async def process(self) -> None:
|
|
212
|
+
"""Process events up to available capacity in parallel.
|
|
213
|
+
|
|
214
|
+
Dequeues events, checks permissions, and executes with semaphore-limited
|
|
215
|
+
concurrency. Permission denials trigger retry with backoff (3 strikes = abort).
|
|
216
|
+
Resets capacity after processing if any events were handled.
|
|
217
|
+
"""
|
|
218
|
+
events_processed = 0
|
|
219
|
+
|
|
220
|
+
async with concurrency.create_task_group() as tg:
|
|
221
|
+
while self.available_capacity > 0 and not self.queue.empty():
|
|
222
|
+
priority, event_id = await self.queue.get()
|
|
223
|
+
|
|
224
|
+
try:
|
|
225
|
+
next_event = self.pile[event_id]
|
|
226
|
+
except NotFoundError:
|
|
227
|
+
self._denial_counts.pop(event_id, None)
|
|
228
|
+
continue
|
|
229
|
+
|
|
230
|
+
if await self.request_permission(**next_event.request):
|
|
231
|
+
self._denial_counts.pop(event_id, None)
|
|
232
|
+
|
|
233
|
+
if self.executor:
|
|
234
|
+
await self.executor._update_progression(next_event, EventStatus.PROCESSING)
|
|
235
|
+
|
|
236
|
+
if next_event.streaming:
|
|
237
|
+
|
|
238
|
+
async def consume_stream(event: Event):
|
|
239
|
+
try:
|
|
240
|
+
async for _ in event.stream(): # type: ignore[attr-defined]
|
|
241
|
+
pass
|
|
242
|
+
if self.executor:
|
|
243
|
+
await self.executor._update_progression(event)
|
|
244
|
+
except Exception:
|
|
245
|
+
if self.executor:
|
|
246
|
+
await self.executor._update_progression(event)
|
|
247
|
+
|
|
248
|
+
tg.start_soon(self._with_semaphore, consume_stream(next_event))
|
|
249
|
+
else:
|
|
250
|
+
|
|
251
|
+
async def invoke_and_update(event):
|
|
252
|
+
try:
|
|
253
|
+
await event.invoke()
|
|
254
|
+
finally:
|
|
255
|
+
if self.executor:
|
|
256
|
+
await self.executor._update_progression(event)
|
|
257
|
+
|
|
258
|
+
tg.start_soon(self._with_semaphore, invoke_and_update(next_event))
|
|
259
|
+
|
|
260
|
+
events_processed += 1
|
|
261
|
+
self._available_capacity -= 1
|
|
262
|
+
else:
|
|
263
|
+
# Permission denied: track and retry with backoff, abort after 3 denials
|
|
264
|
+
if len(self._denial_counts) >= self.max_denial_tracking:
|
|
265
|
+
oldest_key = next(iter(self._denial_counts))
|
|
266
|
+
self._denial_counts.pop(oldest_key)
|
|
267
|
+
|
|
268
|
+
denial_count = self._denial_counts.get(event_id, 0) + 1
|
|
269
|
+
self._denial_counts[event_id] = denial_count
|
|
270
|
+
|
|
271
|
+
if denial_count >= 3:
|
|
272
|
+
if self.executor:
|
|
273
|
+
await self.executor._update_progression(next_event, EventStatus.ABORTED)
|
|
274
|
+
self._denial_counts.pop(event_id, None)
|
|
275
|
+
else:
|
|
276
|
+
backoff = denial_count * 1.0
|
|
277
|
+
await self.queue.put((priority + backoff, next_event.id))
|
|
278
|
+
|
|
279
|
+
break
|
|
280
|
+
|
|
281
|
+
if events_processed > 0:
|
|
282
|
+
self.available_capacity = self.queue_capacity
|
|
283
|
+
|
|
284
|
+
async def request_permission(self, **kwargs: Any) -> bool:
|
|
285
|
+
"""Override for rate limits, auth, quotas. Returns True by default."""
|
|
286
|
+
return True
|
|
287
|
+
|
|
288
|
+
async def _with_semaphore(self, coro):
|
|
289
|
+
"""Execute coroutine under concurrency semaphore."""
|
|
290
|
+
if self._concurrency_sem:
|
|
291
|
+
async with self._concurrency_sem:
|
|
292
|
+
return await coro
|
|
293
|
+
return await coro
|
|
294
|
+
|
|
295
|
+
async def execute(self) -> None:
|
|
296
|
+
"""Run process() loop until stop() called. Sleeps capacity_refresh_time between batches."""
|
|
297
|
+
self.execution_mode = True
|
|
298
|
+
await self.start()
|
|
299
|
+
|
|
300
|
+
while not self.is_stopped():
|
|
301
|
+
await self.process()
|
|
302
|
+
await concurrency.sleep(self.capacity_refresh_time)
|
|
303
|
+
|
|
304
|
+
self.execution_mode = False
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
class Executor:
|
|
308
|
+
"""Event lifecycle manager with Flow-based state tracking.
|
|
309
|
+
|
|
310
|
+
Uses Flow progressions (1:1 with EventStatus) for O(1) status queries.
|
|
311
|
+
Delegates processing to Processor instance for async execution.
|
|
312
|
+
|
|
313
|
+
Attributes:
|
|
314
|
+
processor_type: Processor subclass (ClassVar, set by subclasses).
|
|
315
|
+
states: Flow with progressions per EventStatus.
|
|
316
|
+
processor: Background processor (created on start()).
|
|
317
|
+
|
|
318
|
+
Example:
|
|
319
|
+
>>> class MyExecutor(Executor):
|
|
320
|
+
... processor_type = MyProcessor
|
|
321
|
+
>>> exec = MyExecutor(processor_config={"queue_capacity": 10})
|
|
322
|
+
>>> await exec.append(event)
|
|
323
|
+
>>> await exec.start()
|
|
324
|
+
"""
|
|
325
|
+
|
|
326
|
+
processor_type: ClassVar[type[Processor]]
|
|
327
|
+
|
|
328
|
+
def __init__(
|
|
329
|
+
self,
|
|
330
|
+
processor_config: dict[str, Any] | None = None,
|
|
331
|
+
strict_event_type: bool = False,
|
|
332
|
+
name: str | None = None,
|
|
333
|
+
) -> None:
|
|
334
|
+
"""Initialize executor with Flow state tracking.
|
|
335
|
+
|
|
336
|
+
Args:
|
|
337
|
+
processor_config: Kwargs for Processor.create().
|
|
338
|
+
strict_event_type: Enforce exact event type (no subclasses).
|
|
339
|
+
name: Flow name (default: "executor_states").
|
|
340
|
+
"""
|
|
341
|
+
self.processor_config = processor_config or {}
|
|
342
|
+
self.processor: Processor | None = None
|
|
343
|
+
|
|
344
|
+
self.states = Flow[Event, Progression](
|
|
345
|
+
name=name or "executor_states",
|
|
346
|
+
item_type=self.processor_type.event_type,
|
|
347
|
+
strict_type=strict_event_type,
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
for status in EventStatus:
|
|
351
|
+
self.states.add_progression(Progression(name=status.value))
|
|
352
|
+
|
|
353
|
+
@property
|
|
354
|
+
def event_type(self) -> type[Event]:
|
|
355
|
+
"""Event subclass this executor handles."""
|
|
356
|
+
return self.processor_type.event_type
|
|
357
|
+
|
|
358
|
+
@property
|
|
359
|
+
def strict_event_type(self) -> bool:
|
|
360
|
+
"""True if Flow rejects event subclasses."""
|
|
361
|
+
return self.states.items.strict_type
|
|
362
|
+
|
|
363
|
+
async def _update_progression(
|
|
364
|
+
self, event: Event, force_status: EventStatus | None = None
|
|
365
|
+
) -> None:
|
|
366
|
+
"""Move event to progression matching its status. Thread-safe."""
|
|
367
|
+
target_status = force_status if force_status else event.execution.status
|
|
368
|
+
|
|
369
|
+
async with self.states.progressions:
|
|
370
|
+
for prog in self.states.progressions:
|
|
371
|
+
if event.id in prog:
|
|
372
|
+
prog.remove(event.id)
|
|
373
|
+
|
|
374
|
+
try:
|
|
375
|
+
status_prog = self.states.get_progression(target_status.value)
|
|
376
|
+
status_prog.append(event.id)
|
|
377
|
+
except KeyError as e:
|
|
378
|
+
raise ConfigurationError(
|
|
379
|
+
f"Progression '{target_status.value}' not found in executor",
|
|
380
|
+
details={
|
|
381
|
+
"status": target_status.value,
|
|
382
|
+
"available": [p.name for p in self.states.progressions],
|
|
383
|
+
},
|
|
384
|
+
) from e
|
|
385
|
+
|
|
386
|
+
async def forward(self) -> None:
|
|
387
|
+
"""Trigger immediate process() without waiting for capacity refresh."""
|
|
388
|
+
if self.processor:
|
|
389
|
+
await self.processor.process()
|
|
390
|
+
|
|
391
|
+
async def start(self) -> None:
|
|
392
|
+
"""Create processor (if needed), backfill pending events, and start."""
|
|
393
|
+
if not self.processor:
|
|
394
|
+
await self._create_processor()
|
|
395
|
+
if self.processor:
|
|
396
|
+
for event in self.pending_events:
|
|
397
|
+
await self.processor.enqueue(event.id)
|
|
398
|
+
if self.processor:
|
|
399
|
+
await self.processor.start()
|
|
400
|
+
|
|
401
|
+
async def stop(self) -> None:
|
|
402
|
+
"""Stop processor."""
|
|
403
|
+
if self.processor:
|
|
404
|
+
await self.processor.stop()
|
|
405
|
+
|
|
406
|
+
async def _create_processor(self) -> None:
|
|
407
|
+
"""Instantiate processor with stored config."""
|
|
408
|
+
self.processor = await self.processor_type.create(
|
|
409
|
+
pile=self.states.items,
|
|
410
|
+
executor=self,
|
|
411
|
+
**self.processor_config,
|
|
412
|
+
)
|
|
413
|
+
|
|
414
|
+
async def append(self, event: Event, priority: float | None = None) -> None:
|
|
415
|
+
"""Add event to Flow (pending) and enqueue if processor exists.
|
|
416
|
+
|
|
417
|
+
Args:
|
|
418
|
+
event: Event to add.
|
|
419
|
+
priority: Queue priority (default: event.created_at).
|
|
420
|
+
"""
|
|
421
|
+
self.states.add_item(event, progressions="pending")
|
|
422
|
+
|
|
423
|
+
if self.processor:
|
|
424
|
+
await self.processor.enqueue(event.id, priority=priority)
|
|
425
|
+
|
|
426
|
+
def get_events_by_status(self, status: EventStatus | str) -> list[Event]:
|
|
427
|
+
"""Get events in given status progression. O(n) where n = events in status."""
|
|
428
|
+
status_str = status.value if isinstance(status, EventStatus) else status
|
|
429
|
+
prog = self.states.get_progression(status_str)
|
|
430
|
+
return [self.states.items[uid] for uid in prog]
|
|
431
|
+
|
|
432
|
+
@property
|
|
433
|
+
def completed_events(self) -> list[Event]:
|
|
434
|
+
"""Events with COMPLETED status."""
|
|
435
|
+
return self.get_events_by_status(EventStatus.COMPLETED)
|
|
436
|
+
|
|
437
|
+
@property
|
|
438
|
+
def pending_events(self) -> list[Event]:
|
|
439
|
+
"""Events with PENDING status."""
|
|
440
|
+
return self.get_events_by_status(EventStatus.PENDING)
|
|
441
|
+
|
|
442
|
+
@property
|
|
443
|
+
def failed_events(self) -> list[Event]:
|
|
444
|
+
"""Events with FAILED status."""
|
|
445
|
+
return self.get_events_by_status(EventStatus.FAILED)
|
|
446
|
+
|
|
447
|
+
@property
|
|
448
|
+
def processing_events(self) -> list[Event]:
|
|
449
|
+
"""Events with PROCESSING status."""
|
|
450
|
+
return self.get_events_by_status(EventStatus.PROCESSING)
|
|
451
|
+
|
|
452
|
+
def status_counts(self) -> dict[str, int]:
|
|
453
|
+
"""Event count per status progression."""
|
|
454
|
+
return {prog.name or "unnamed": len(prog) for prog in self.states.progressions}
|
|
455
|
+
|
|
456
|
+
async def cleanup_events(self, statuses: list[EventStatus] | None = None) -> int:
|
|
457
|
+
"""Remove terminal events and clear denial tracking.
|
|
458
|
+
|
|
459
|
+
Args:
|
|
460
|
+
statuses: Statuses to clean (default: COMPLETED, FAILED, ABORTED).
|
|
461
|
+
|
|
462
|
+
Returns:
|
|
463
|
+
Number of events removed.
|
|
464
|
+
"""
|
|
465
|
+
if statuses is None:
|
|
466
|
+
statuses = [EventStatus.COMPLETED, EventStatus.FAILED, EventStatus.ABORTED]
|
|
467
|
+
|
|
468
|
+
removed_count = 0
|
|
469
|
+
async with self.states.items, self.states.progressions:
|
|
470
|
+
for status in statuses:
|
|
471
|
+
events = self.get_events_by_status(status)
|
|
472
|
+
for event in events:
|
|
473
|
+
if self.processor:
|
|
474
|
+
self.processor._denial_counts.pop(event.id, None)
|
|
475
|
+
self.states.remove_item(event.id)
|
|
476
|
+
removed_count += 1
|
|
477
|
+
|
|
478
|
+
return removed_count
|
|
479
|
+
|
|
480
|
+
def inspect_state(self) -> str:
|
|
481
|
+
"""Debug helper: multiline status summary."""
|
|
482
|
+
lines = [f"Executor State ({self.states.name}):"]
|
|
483
|
+
for status in EventStatus:
|
|
484
|
+
count = len(self.states.get_progression(status.value))
|
|
485
|
+
lines.append(f" {status.value}: {count} events")
|
|
486
|
+
return "\n".join(lines)
|
|
487
|
+
|
|
488
|
+
def __contains__(self, event: Event | UUID) -> bool:
|
|
489
|
+
return event in self.states.items
|
|
490
|
+
|
|
491
|
+
def __repr__(self) -> str:
|
|
492
|
+
counts = self.status_counts()
|
|
493
|
+
total = sum(counts.values())
|
|
494
|
+
return f"Executor(total={total}, {', '.join(f'{k}={v}' for k, v in counts.items())})"
|