omnibase_infra 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (139) hide show
  1. omnibase_infra/constants_topic_patterns.py +26 -0
  2. omnibase_infra/enums/__init__.py +3 -0
  3. omnibase_infra/enums/enum_consumer_group_purpose.py +92 -0
  4. omnibase_infra/enums/enum_handler_source_mode.py +16 -2
  5. omnibase_infra/errors/__init__.py +4 -0
  6. omnibase_infra/errors/error_binding_resolution.py +128 -0
  7. omnibase_infra/event_bus/configs/kafka_event_bus_config.yaml +0 -2
  8. omnibase_infra/event_bus/event_bus_inmemory.py +64 -10
  9. omnibase_infra/event_bus/event_bus_kafka.py +105 -47
  10. omnibase_infra/event_bus/mixin_kafka_broadcast.py +3 -7
  11. omnibase_infra/event_bus/mixin_kafka_dlq.py +12 -6
  12. omnibase_infra/event_bus/models/config/model_kafka_event_bus_config.py +0 -81
  13. omnibase_infra/event_bus/testing/__init__.py +26 -0
  14. omnibase_infra/event_bus/testing/adapter_protocol_event_publisher_inmemory.py +418 -0
  15. omnibase_infra/event_bus/testing/model_publisher_metrics.py +64 -0
  16. omnibase_infra/handlers/handler_consul.py +2 -0
  17. omnibase_infra/handlers/mixins/__init__.py +5 -0
  18. omnibase_infra/handlers/mixins/mixin_consul_service.py +274 -10
  19. omnibase_infra/handlers/mixins/mixin_consul_topic_index.py +585 -0
  20. omnibase_infra/handlers/models/model_filesystem_config.py +4 -4
  21. omnibase_infra/migrations/001_create_event_ledger.sql +166 -0
  22. omnibase_infra/migrations/001_drop_event_ledger.sql +18 -0
  23. omnibase_infra/mixins/mixin_node_introspection.py +189 -19
  24. omnibase_infra/models/__init__.py +8 -0
  25. omnibase_infra/models/bindings/__init__.py +59 -0
  26. omnibase_infra/models/bindings/constants.py +144 -0
  27. omnibase_infra/models/bindings/model_binding_resolution_result.py +103 -0
  28. omnibase_infra/models/bindings/model_operation_binding.py +44 -0
  29. omnibase_infra/models/bindings/model_operation_bindings_subcontract.py +152 -0
  30. omnibase_infra/models/bindings/model_parsed_binding.py +52 -0
  31. omnibase_infra/models/discovery/model_introspection_config.py +25 -17
  32. omnibase_infra/models/dispatch/__init__.py +8 -0
  33. omnibase_infra/models/dispatch/model_debug_trace_snapshot.py +114 -0
  34. omnibase_infra/models/dispatch/model_materialized_dispatch.py +141 -0
  35. omnibase_infra/models/handlers/model_handler_source_config.py +1 -1
  36. omnibase_infra/models/model_node_identity.py +126 -0
  37. omnibase_infra/models/projection/model_snapshot_topic_config.py +3 -2
  38. omnibase_infra/models/registration/__init__.py +9 -0
  39. omnibase_infra/models/registration/model_event_bus_topic_entry.py +59 -0
  40. omnibase_infra/models/registration/model_node_event_bus_config.py +99 -0
  41. omnibase_infra/models/registration/model_node_introspection_event.py +11 -0
  42. omnibase_infra/models/runtime/__init__.py +9 -0
  43. omnibase_infra/models/validation/model_coverage_metrics.py +2 -2
  44. omnibase_infra/nodes/__init__.py +9 -0
  45. omnibase_infra/nodes/contract_registry_reducer/__init__.py +29 -0
  46. omnibase_infra/nodes/contract_registry_reducer/contract.yaml +255 -0
  47. omnibase_infra/nodes/contract_registry_reducer/models/__init__.py +38 -0
  48. omnibase_infra/nodes/contract_registry_reducer/models/model_contract_registry_state.py +266 -0
  49. omnibase_infra/nodes/contract_registry_reducer/models/model_payload_cleanup_topic_references.py +55 -0
  50. omnibase_infra/nodes/contract_registry_reducer/models/model_payload_deactivate_contract.py +58 -0
  51. omnibase_infra/nodes/contract_registry_reducer/models/model_payload_mark_stale.py +49 -0
  52. omnibase_infra/nodes/contract_registry_reducer/models/model_payload_update_heartbeat.py +71 -0
  53. omnibase_infra/nodes/contract_registry_reducer/models/model_payload_update_topic.py +66 -0
  54. omnibase_infra/nodes/contract_registry_reducer/models/model_payload_upsert_contract.py +92 -0
  55. omnibase_infra/nodes/contract_registry_reducer/node.py +121 -0
  56. omnibase_infra/nodes/contract_registry_reducer/reducer.py +784 -0
  57. omnibase_infra/nodes/contract_registry_reducer/registry/__init__.py +9 -0
  58. omnibase_infra/nodes/contract_registry_reducer/registry/registry_infra_contract_registry_reducer.py +101 -0
  59. omnibase_infra/nodes/handlers/consul/contract.yaml +85 -0
  60. omnibase_infra/nodes/handlers/db/contract.yaml +72 -0
  61. omnibase_infra/nodes/handlers/graph/contract.yaml +127 -0
  62. omnibase_infra/nodes/handlers/http/contract.yaml +74 -0
  63. omnibase_infra/nodes/handlers/intent/contract.yaml +66 -0
  64. omnibase_infra/nodes/handlers/mcp/contract.yaml +69 -0
  65. omnibase_infra/nodes/handlers/vault/contract.yaml +91 -0
  66. omnibase_infra/nodes/node_ledger_projection_compute/__init__.py +50 -0
  67. omnibase_infra/nodes/node_ledger_projection_compute/contract.yaml +104 -0
  68. omnibase_infra/nodes/node_ledger_projection_compute/node.py +284 -0
  69. omnibase_infra/nodes/node_ledger_projection_compute/registry/__init__.py +29 -0
  70. omnibase_infra/nodes/node_ledger_projection_compute/registry/registry_infra_ledger_projection.py +118 -0
  71. omnibase_infra/nodes/node_ledger_write_effect/__init__.py +82 -0
  72. omnibase_infra/nodes/node_ledger_write_effect/contract.yaml +200 -0
  73. omnibase_infra/nodes/node_ledger_write_effect/handlers/__init__.py +22 -0
  74. omnibase_infra/nodes/node_ledger_write_effect/handlers/handler_ledger_append.py +372 -0
  75. omnibase_infra/nodes/node_ledger_write_effect/handlers/handler_ledger_query.py +597 -0
  76. omnibase_infra/nodes/node_ledger_write_effect/models/__init__.py +31 -0
  77. omnibase_infra/nodes/node_ledger_write_effect/models/model_ledger_append_result.py +54 -0
  78. omnibase_infra/nodes/node_ledger_write_effect/models/model_ledger_entry.py +92 -0
  79. omnibase_infra/nodes/node_ledger_write_effect/models/model_ledger_query.py +53 -0
  80. omnibase_infra/nodes/node_ledger_write_effect/models/model_ledger_query_result.py +41 -0
  81. omnibase_infra/nodes/node_ledger_write_effect/node.py +89 -0
  82. omnibase_infra/nodes/node_ledger_write_effect/protocols/__init__.py +13 -0
  83. omnibase_infra/nodes/node_ledger_write_effect/protocols/protocol_ledger_persistence.py +127 -0
  84. omnibase_infra/nodes/node_ledger_write_effect/registry/__init__.py +9 -0
  85. omnibase_infra/nodes/node_ledger_write_effect/registry/registry_infra_ledger_write.py +121 -0
  86. omnibase_infra/nodes/node_registration_orchestrator/registry/registry_infra_node_registration_orchestrator.py +7 -5
  87. omnibase_infra/nodes/reducers/models/__init__.py +7 -2
  88. omnibase_infra/nodes/reducers/models/model_payload_consul_register.py +11 -0
  89. omnibase_infra/nodes/reducers/models/model_payload_ledger_append.py +133 -0
  90. omnibase_infra/nodes/reducers/registration_reducer.py +1 -0
  91. omnibase_infra/protocols/__init__.py +3 -0
  92. omnibase_infra/protocols/protocol_dispatch_engine.py +152 -0
  93. omnibase_infra/runtime/__init__.py +60 -0
  94. omnibase_infra/runtime/binding_resolver.py +753 -0
  95. omnibase_infra/runtime/constants_security.py +70 -0
  96. omnibase_infra/runtime/contract_loaders/__init__.py +9 -0
  97. omnibase_infra/runtime/contract_loaders/operation_bindings_loader.py +789 -0
  98. omnibase_infra/runtime/emit_daemon/__init__.py +97 -0
  99. omnibase_infra/runtime/emit_daemon/cli.py +844 -0
  100. omnibase_infra/runtime/emit_daemon/client.py +811 -0
  101. omnibase_infra/runtime/emit_daemon/config.py +535 -0
  102. omnibase_infra/runtime/emit_daemon/daemon.py +812 -0
  103. omnibase_infra/runtime/emit_daemon/event_registry.py +477 -0
  104. omnibase_infra/runtime/emit_daemon/model_daemon_request.py +139 -0
  105. omnibase_infra/runtime/emit_daemon/model_daemon_response.py +191 -0
  106. omnibase_infra/runtime/emit_daemon/queue.py +618 -0
  107. omnibase_infra/runtime/event_bus_subcontract_wiring.py +466 -0
  108. omnibase_infra/runtime/handler_source_resolver.py +43 -2
  109. omnibase_infra/runtime/kafka_contract_source.py +984 -0
  110. omnibase_infra/runtime/models/__init__.py +13 -0
  111. omnibase_infra/runtime/models/model_contract_load_result.py +224 -0
  112. omnibase_infra/runtime/models/model_runtime_contract_config.py +268 -0
  113. omnibase_infra/runtime/models/model_runtime_scheduler_config.py +4 -3
  114. omnibase_infra/runtime/models/model_security_config.py +109 -0
  115. omnibase_infra/runtime/publisher_topic_scoped.py +294 -0
  116. omnibase_infra/runtime/runtime_contract_config_loader.py +406 -0
  117. omnibase_infra/runtime/service_kernel.py +76 -6
  118. omnibase_infra/runtime/service_message_dispatch_engine.py +558 -15
  119. omnibase_infra/runtime/service_runtime_host_process.py +770 -20
  120. omnibase_infra/runtime/transition_notification_publisher.py +3 -2
  121. omnibase_infra/runtime/util_wiring.py +206 -62
  122. omnibase_infra/services/mcp/service_mcp_tool_sync.py +27 -9
  123. omnibase_infra/services/session/config_consumer.py +25 -8
  124. omnibase_infra/services/session/config_store.py +2 -2
  125. omnibase_infra/services/session/consumer.py +1 -1
  126. omnibase_infra/topics/__init__.py +45 -0
  127. omnibase_infra/topics/platform_topic_suffixes.py +140 -0
  128. omnibase_infra/topics/util_topic_composition.py +95 -0
  129. omnibase_infra/types/typed_dict/__init__.py +9 -1
  130. omnibase_infra/types/typed_dict/typed_dict_envelope_build_params.py +115 -0
  131. omnibase_infra/utils/__init__.py +9 -0
  132. omnibase_infra/utils/util_consumer_group.py +232 -0
  133. omnibase_infra/validation/infra_validators.py +18 -1
  134. omnibase_infra/validation/validation_exemptions.yaml +192 -0
  135. {omnibase_infra-0.2.5.dist-info → omnibase_infra-0.2.7.dist-info}/METADATA +3 -3
  136. {omnibase_infra-0.2.5.dist-info → omnibase_infra-0.2.7.dist-info}/RECORD +139 -52
  137. {omnibase_infra-0.2.5.dist-info → omnibase_infra-0.2.7.dist-info}/entry_points.txt +1 -0
  138. {omnibase_infra-0.2.5.dist-info → omnibase_infra-0.2.7.dist-info}/WHEEL +0 -0
  139. {omnibase_infra-0.2.5.dist-info → omnibase_infra-0.2.7.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,618 @@
1
+ # SPDX-License-Identifier: MIT
2
+ # Copyright (c) 2025 OmniNode Team
3
+ """Bounded Event Queue with Disk Spool for Hook Event Daemon.
4
+
5
+ This module provides a bounded in-memory queue with disk spool overflow
6
+ for buffering events when Kafka is slow or unavailable.
7
+
8
+ Queue Behavior:
9
+ 1. Events are first added to the in-memory queue
10
+ 2. When memory queue is full, events overflow to disk spool
11
+ 3. When disk spool is full (by message count or bytes), oldest events are dropped
12
+ 4. Dequeue prioritizes memory queue, then disk spool (FIFO ordering)
13
+
14
+ Disk Spool Format:
15
+ - Directory: configurable (default: ~/.omniclaude/emit-spool/)
16
+ - Files: {timestamp}_{event_id}.json (one event per file)
17
+ - Sorted by filename for FIFO ordering
18
+
19
+ Concurrency Safety:
20
+ This implementation is coroutine-safe using asyncio.Lock.
21
+ Note: This is coroutine-safe, not thread-safe.
22
+
23
+ Related Tickets:
24
+ - OMN-1610: Hook Event Daemon MVP
25
+
26
+ .. versionadded:: 0.2.6
27
+ """
28
+
29
+ from __future__ import annotations
30
+
31
+ import asyncio
32
+ import logging
33
+ from collections import deque
34
+ from datetime import UTC, datetime, timedelta
35
+ from pathlib import Path
36
+
37
+ from pydantic import BaseModel, ConfigDict, Field, field_validator
38
+
39
+ from omnibase_core.types import JsonType
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ class ModelQueuedEvent(BaseModel):
45
+ """An event waiting to be published.
46
+
47
+ Represents a single event in the bounded queue, with metadata
48
+ for deduplication, routing, and retry tracking.
49
+
50
+ Attributes:
51
+ event_id: Unique identifier for deduplication (UUID string).
52
+ event_type: The type/name of the event.
53
+ topic: Kafka topic to publish to.
54
+ payload: Event payload data.
55
+ partition_key: Optional partition key for Kafka ordering.
56
+ queued_at: UTC timestamp when the event was queued.
57
+ retry_count: Number of publish retry attempts.
58
+
59
+ Example:
60
+ >>> from datetime import UTC, datetime
61
+ >>> event = ModelQueuedEvent(
62
+ ... event_id="550e8400-e29b-41d4-a716-446655440000",
63
+ ... event_type="hook.event",
64
+ ... topic="claude-code-hook-events",
65
+ ... payload={"action": "test"},
66
+ ... queued_at=datetime.now(UTC),
67
+ ... )
68
+ >>> event.retry_count
69
+ 0
70
+ """
71
+
72
+ model_config = ConfigDict(
73
+ strict=False, # Allow coercion for JSON deserialization
74
+ frozen=False, # Allow retry_count mutation
75
+ extra="forbid",
76
+ from_attributes=True,
77
+ )
78
+
79
+ # ONEX_EXCLUDE: string_id - event_id is string for JSON serialization compatibility
80
+ event_id: str = Field(
81
+ ...,
82
+ min_length=1,
83
+ description="Unique identifier for deduplication (UUID string).",
84
+ )
85
+ event_type: str = Field(
86
+ ...,
87
+ min_length=1,
88
+ description="The type/name of the event.",
89
+ )
90
+ topic: str = Field(
91
+ ...,
92
+ min_length=1,
93
+ description="Kafka topic to publish to.",
94
+ )
95
+ payload: JsonType = Field(
96
+ ...,
97
+ description="Event payload data (JSON-compatible value).",
98
+ )
99
+ partition_key: str | None = Field(
100
+ default=None,
101
+ description="Optional partition key for Kafka ordering.",
102
+ )
103
+ queued_at: datetime = Field(
104
+ ...,
105
+ description="UTC timestamp when the event was queued.",
106
+ )
107
+ retry_count: int = Field(
108
+ default=0,
109
+ ge=0,
110
+ description="Number of publish retry attempts.",
111
+ )
112
+
113
+ @field_validator("queued_at", mode="before")
114
+ @classmethod
115
+ def ensure_utc_aware(cls, v: object) -> object:
116
+ """Ensure queued_at is timezone-aware (UTC).
117
+
118
+ Args:
119
+ v: The input value.
120
+
121
+ Returns:
122
+ UTC-aware datetime if input is datetime, otherwise unchanged input.
123
+ """
124
+ if not isinstance(v, datetime):
125
+ return v
126
+ if v.tzinfo is None:
127
+ return v.replace(tzinfo=UTC)
128
+ if v.utcoffset() == timedelta(0):
129
+ if v.tzinfo is not UTC:
130
+ return v.replace(tzinfo=UTC)
131
+ return v
132
+ return v.astimezone(UTC)
133
+
134
+
135
+ class BoundedEventQueue:
136
+ """Bounded in-memory queue with disk spool overflow.
137
+
138
+ This queue manages event buffering with configurable limits for both
139
+ in-memory storage and disk spool. When limits are exceeded, the oldest
140
+ spooled events are dropped to make room for new events.
141
+
142
+ Attributes:
143
+ max_memory_queue: Maximum events in memory queue.
144
+ max_spool_messages: Maximum events in disk spool.
145
+ max_spool_bytes: Maximum total bytes for disk spool.
146
+ spool_dir: Directory for disk spool files.
147
+
148
+ Overflow Behavior:
149
+ 1. If memory queue full -> spool to disk
150
+ 2. If spool full (messages OR bytes) -> drop oldest, then spool new
151
+
152
+ Example:
153
+ >>> import asyncio
154
+ >>> from pathlib import Path
155
+ >>>
156
+ >>> async def example():
157
+ ... queue = BoundedEventQueue(
158
+ ... max_memory_queue=10,
159
+ ... max_spool_messages=100,
160
+ ... )
161
+ ... # Load any previously spooled events
162
+ ... await queue.load_spool()
163
+ ...
164
+ ... # Enqueue an event
165
+ ... event = ModelQueuedEvent(
166
+ ... event_id="abc-123",
167
+ ... event_type="test",
168
+ ... topic="test-topic",
169
+ ... payload={"key": "value"},
170
+ ... queued_at=datetime.now(UTC),
171
+ ... )
172
+ ... success = await queue.enqueue(event)
173
+ ...
174
+ ... # Dequeue for processing
175
+ ... next_event = await queue.dequeue()
176
+ """
177
+
178
+ def __init__(
179
+ self,
180
+ max_memory_queue: int = 100,
181
+ max_spool_messages: int = 1000,
182
+ max_spool_bytes: int = 10_485_760, # 10 MB
183
+ spool_dir: Path | None = None,
184
+ ) -> None:
185
+ """Initialize queue with limits.
186
+
187
+ Args:
188
+ max_memory_queue: Maximum number of events in memory queue.
189
+ Defaults to 100.
190
+ max_spool_messages: Maximum number of events in disk spool.
191
+ Defaults to 1000.
192
+ max_spool_bytes: Maximum total bytes for disk spool files.
193
+ Defaults to 10 MB (10,485,760 bytes).
194
+ spool_dir: Directory for disk spool files.
195
+ Defaults to ~/.omniclaude/emit-spool/
196
+ """
197
+ self._max_memory_queue = max_memory_queue
198
+ self._max_spool_messages = max_spool_messages
199
+ self._max_spool_bytes = max_spool_bytes
200
+ self._spool_dir = spool_dir or (Path.home() / ".omniclaude" / "emit-spool")
201
+
202
+ # In-memory queue (FIFO)
203
+ self._memory_queue: deque[ModelQueuedEvent] = deque()
204
+
205
+ # Spool tracking
206
+ self._spool_files: list[Path] = [] # Sorted by filename (FIFO order)
207
+ self._spool_bytes: int = 0
208
+
209
+ # Concurrency lock
210
+ self._lock = asyncio.Lock()
211
+
212
+ # Ensure spool directory exists
213
+ self._ensure_spool_dir()
214
+
215
+ def _ensure_spool_dir(self) -> None:
216
+ """Ensure the spool directory exists."""
217
+ try:
218
+ self._spool_dir.mkdir(parents=True, exist_ok=True)
219
+ except OSError as e:
220
+ logger.warning(
221
+ f"Failed to create spool directory {self._spool_dir}: {e}. "
222
+ "Disk spool will be unavailable."
223
+ )
224
+
225
+ async def enqueue(self, event: ModelQueuedEvent) -> bool:
226
+ """Add event to queue.
227
+
228
+ Events are first added to the in-memory queue. If the memory queue
229
+ is full, the event is spooled to disk. If the disk spool is also
230
+ full, the oldest spooled event is dropped before adding the new one.
231
+
232
+ Args:
233
+ event: The event to queue.
234
+
235
+ Returns:
236
+ True if the event was queued (in memory or spool).
237
+ False if the event could not be queued due to errors.
238
+
239
+ Note:
240
+ This method never raises exceptions. File I/O errors are logged
241
+ and result in False being returned.
242
+ """
243
+ async with self._lock:
244
+ # Try memory queue first
245
+ if len(self._memory_queue) < self._max_memory_queue:
246
+ self._memory_queue.append(event)
247
+ logger.debug(
248
+ f"Event {event.event_id} queued in memory "
249
+ f"(memory: {len(self._memory_queue)}/{self._max_memory_queue})"
250
+ )
251
+ return True
252
+
253
+ # Memory full - check if spooling is disabled
254
+ if self._max_spool_messages == 0 or self._max_spool_bytes == 0:
255
+ logger.warning(
256
+ f"Dropping event {event.event_id}: memory queue full "
257
+ f"({len(self._memory_queue)}/{self._max_memory_queue}) "
258
+ "and spooling is disabled (max_spool_messages=0 or max_spool_bytes=0)"
259
+ )
260
+ return False
261
+
262
+ # Memory full, spool to disk
263
+ return await self._spool_event(event)
264
+
265
+ async def _spool_event(self, event: ModelQueuedEvent) -> bool:
266
+ """Spool an event to disk.
267
+
268
+ If the spool is full (by messages or bytes), drops the oldest
269
+ event before adding the new one.
270
+
271
+ Args:
272
+ event: The event to spool.
273
+
274
+ Returns:
275
+ True if successfully spooled, False on error.
276
+
277
+ Note:
278
+ Caller must hold self._lock.
279
+ """
280
+ # Defensive check: if spooling is disabled, don't attempt to spool
281
+ # (This should be checked by caller, but verify here for safety)
282
+ if self._max_spool_messages == 0 or self._max_spool_bytes == 0:
283
+ logger.debug(f"Spooling disabled, cannot spool event {event.event_id}")
284
+ return False
285
+
286
+ # Serialize event
287
+ try:
288
+ event_json = event.model_dump_json()
289
+ event_bytes = len(event_json.encode("utf-8"))
290
+ except Exception:
291
+ logger.exception("Failed to serialize event %s", event.event_id)
292
+ return False
293
+
294
+ # Check if we need to drop oldest to make room
295
+ while (
296
+ len(self._spool_files) >= self._max_spool_messages
297
+ or self._spool_bytes + event_bytes > self._max_spool_bytes
298
+ ) and self._spool_files:
299
+ await self._drop_oldest_spool()
300
+
301
+ # Write to spool
302
+ timestamp = datetime.now(UTC).strftime("%Y%m%d%H%M%S%f")
303
+ filename = f"{timestamp}_{event.event_id}.json"
304
+ filepath = self._spool_dir / filename
305
+
306
+ try:
307
+ filepath.write_text(event_json, encoding="utf-8")
308
+ self._spool_files.append(filepath)
309
+ self._spool_bytes += event_bytes
310
+ logger.debug(
311
+ f"Event {event.event_id} spooled to disk "
312
+ f"(spool: {len(self._spool_files)}/{self._max_spool_messages}, "
313
+ f"bytes: {self._spool_bytes}/{self._max_spool_bytes})"
314
+ )
315
+ return True
316
+ except OSError:
317
+ logger.exception("Failed to write spool file %s", filepath)
318
+ return False
319
+
320
+ async def _drop_oldest_spool(self) -> None:
321
+ """Drop the oldest spooled event.
322
+
323
+ Note:
324
+ Caller must hold self._lock.
325
+ """
326
+ if not self._spool_files:
327
+ return
328
+
329
+ oldest = self._spool_files.pop(0)
330
+ try:
331
+ file_size = oldest.stat().st_size
332
+ oldest.unlink()
333
+ self._spool_bytes -= file_size
334
+ # Extract event_id from filename (timestamp_eventid.json)
335
+ event_id = (
336
+ oldest.stem.split("_", 1)[1] if "_" in oldest.stem else oldest.stem
337
+ )
338
+ logger.warning(
339
+ f"Dropping oldest spooled event {event_id} due to spool overflow"
340
+ )
341
+ except OSError:
342
+ logger.exception("Failed to delete oldest spool file %s", oldest)
343
+ # Still remove from tracking to avoid infinite loop
344
+ self._spool_bytes = max(0, self._spool_bytes)
345
+
346
+ async def dequeue(self) -> ModelQueuedEvent | None:
347
+ """Get next event to publish.
348
+
349
+ Prioritizes memory queue, then disk spool. Returns None if both
350
+ are empty.
351
+
352
+ Returns:
353
+ The next event to publish, or None if queue is empty.
354
+ """
355
+ async with self._lock:
356
+ # Try memory queue first
357
+ if self._memory_queue:
358
+ event = self._memory_queue.popleft()
359
+ logger.debug(
360
+ f"Dequeued event {event.event_id} from memory "
361
+ f"(remaining: {len(self._memory_queue)})"
362
+ )
363
+ return event
364
+
365
+ # Try disk spool
366
+ if self._spool_files:
367
+ return await self._dequeue_from_spool()
368
+
369
+ return None
370
+
371
+ async def _dequeue_from_spool(self) -> ModelQueuedEvent | None:
372
+ """Dequeue the next event from disk spool.
373
+
374
+ Note:
375
+ Caller must hold self._lock.
376
+
377
+ Returns:
378
+ The dequeued event, or None on error.
379
+ """
380
+ if not self._spool_files:
381
+ return None
382
+
383
+ filepath = self._spool_files.pop(0)
384
+ try:
385
+ # Read and parse
386
+ content = filepath.read_text(encoding="utf-8")
387
+ event = ModelQueuedEvent.model_validate_json(content)
388
+
389
+ # Update byte tracking
390
+ file_size = len(content.encode("utf-8"))
391
+ self._spool_bytes -= file_size
392
+ except OSError:
393
+ logger.exception("Failed to read spool file %s", filepath)
394
+ return None
395
+ except Exception:
396
+ logger.exception("Failed to parse spool file %s", filepath)
397
+ # Delete corrupted file
398
+ try:
399
+ filepath.unlink()
400
+ except OSError:
401
+ pass
402
+ return None
403
+
404
+ # Delete file separately - event is already successfully parsed
405
+ # If unlink fails, the event is still returned (not lost)
406
+ try:
407
+ filepath.unlink()
408
+ except OSError:
409
+ logger.warning(
410
+ "Failed to delete spool file %s after successful dequeue - "
411
+ "orphan file remains on disk",
412
+ filepath,
413
+ )
414
+ # Event is still returned - not lost
415
+
416
+ logger.debug(
417
+ f"Dequeued event {event.event_id} from spool "
418
+ f"(remaining spool: {len(self._spool_files)})"
419
+ )
420
+ return event
421
+
422
+ async def peek(self) -> ModelQueuedEvent | None:
423
+ """Peek at next event without removing it.
424
+
425
+ Returns:
426
+ The next event that would be dequeued, or None if empty.
427
+ """
428
+ async with self._lock:
429
+ # Check memory queue
430
+ if self._memory_queue:
431
+ return self._memory_queue[0]
432
+
433
+ # Check disk spool
434
+ if self._spool_files:
435
+ filepath = self._spool_files[0]
436
+ try:
437
+ content = filepath.read_text(encoding="utf-8")
438
+ return ModelQueuedEvent.model_validate_json(content)
439
+ except Exception:
440
+ logger.exception("Failed to peek at spool file %s", filepath)
441
+ return None
442
+
443
+ return None
444
+
445
+ def memory_size(self) -> int:
446
+ """Number of events in memory queue (approximate).
447
+
448
+ Returns:
449
+ Count of events currently in the in-memory queue.
450
+
451
+ Warning:
452
+ This method does NOT acquire the lock. The returned value may be
453
+ inconsistent during concurrent enqueue/dequeue operations. Use
454
+ :meth:`memory_size_locked` when an accurate count is required.
455
+
456
+ Note:
457
+ Suitable for monitoring, logging, and approximate status reporting
458
+ where eventual consistency is acceptable.
459
+ """
460
+ return len(self._memory_queue)
461
+
462
+ async def memory_size_locked(self) -> int:
463
+ """Number of events in memory queue (thread-safe).
464
+
465
+ Acquires the queue lock before reading the size, ensuring a consistent
466
+ value even during concurrent operations.
467
+
468
+ Returns:
469
+ Accurate count of events currently in the in-memory queue.
470
+
471
+ Note:
472
+ Use this method when an accurate count is required (e.g., for
473
+ capacity decisions or precise status reporting). For approximate
474
+ monitoring where lock contention is undesirable, use :meth:`memory_size`.
475
+ """
476
+ async with self._lock:
477
+ return len(self._memory_queue)
478
+
479
+ def spool_size(self) -> int:
480
+ """Number of events in disk spool (approximate).
481
+
482
+ Returns:
483
+ Count of events currently in the disk spool.
484
+
485
+ Warning:
486
+ This method does NOT acquire the lock. The returned value may be
487
+ inconsistent during concurrent enqueue/dequeue operations. Use
488
+ :meth:`spool_size_locked` when an accurate count is required.
489
+
490
+ Note:
491
+ Suitable for monitoring, logging, and approximate status reporting
492
+ where eventual consistency is acceptable.
493
+ """
494
+ return len(self._spool_files)
495
+
496
+ async def spool_size_locked(self) -> int:
497
+ """Number of events in disk spool (thread-safe).
498
+
499
+ Acquires the queue lock before reading the size, ensuring a consistent
500
+ value even during concurrent operations.
501
+
502
+ Returns:
503
+ Accurate count of events currently in the disk spool.
504
+
505
+ Note:
506
+ Use this method when an accurate count is required (e.g., for
507
+ capacity decisions or precise status reporting). For approximate
508
+ monitoring where lock contention is undesirable, use :meth:`spool_size`.
509
+ """
510
+ async with self._lock:
511
+ return len(self._spool_files)
512
+
513
+ def total_size(self) -> int:
514
+ """Total events in memory and spool (approximate).
515
+
516
+ Returns:
517
+ Total count of events across memory and disk spool.
518
+
519
+ Warning:
520
+ This method does NOT acquire the lock. The returned value may be
521
+ inconsistent during concurrent operations since it reads memory
522
+ and spool sizes separately. Use :meth:`total_size_locked` when
523
+ an accurate count is required.
524
+ """
525
+ return self.memory_size() + self.spool_size()
526
+
527
+ async def total_size_locked(self) -> int:
528
+ """Total events in memory and spool (thread-safe).
529
+
530
+ Acquires the queue lock before reading sizes, ensuring a consistent
531
+ total even during concurrent operations.
532
+
533
+ Returns:
534
+ Accurate total count of events across memory and disk spool.
535
+
536
+ Note:
537
+ Use this method when an accurate count is required. For approximate
538
+ monitoring where lock contention is undesirable, use :meth:`total_size`.
539
+ """
540
+ async with self._lock:
541
+ return len(self._memory_queue) + len(self._spool_files)
542
+
543
+ async def drain_to_spool(self) -> int:
544
+ """Move all memory events to spool for graceful shutdown.
545
+
546
+ This should be called during graceful shutdown to persist
547
+ in-memory events before the process exits.
548
+
549
+ Returns:
550
+ Number of events successfully moved to spool.
551
+
552
+ Note:
553
+ If spooling is disabled (max_spool_messages=0 or max_spool_bytes=0),
554
+ this method will log a warning and return 0 without draining any
555
+ events. Events in memory will be lost.
556
+ """
557
+ async with self._lock:
558
+ # Check if spooling is disabled
559
+ if self._max_spool_messages == 0 or self._max_spool_bytes == 0:
560
+ memory_count = len(self._memory_queue)
561
+ if memory_count > 0:
562
+ logger.warning(
563
+ f"Spooling is disabled (max_spool_messages=0 or max_spool_bytes=0). "
564
+ f"{memory_count} events in memory will be lost during shutdown."
565
+ )
566
+ return 0
567
+
568
+ count = 0
569
+ while self._memory_queue:
570
+ event = self._memory_queue.popleft()
571
+ if await self._spool_event(event):
572
+ count += 1
573
+ else:
574
+ logger.error(f"Failed to spool event {event.event_id} during drain")
575
+ logger.info(f"Drained {count} events from memory to spool")
576
+ return count
577
+
578
+ async def load_spool(self) -> int:
579
+ """Load spooled events on startup.
580
+
581
+ Scans the spool directory for existing event files and
582
+ rebuilds the spool tracking state. Files are sorted by
583
+ filename for FIFO ordering.
584
+
585
+ Returns:
586
+ Number of events loaded from spool.
587
+ """
588
+ async with self._lock:
589
+ self._spool_files.clear()
590
+ self._spool_bytes = 0
591
+
592
+ if not self._spool_dir.exists():
593
+ logger.debug(f"Spool directory {self._spool_dir} does not exist")
594
+ return 0
595
+
596
+ try:
597
+ # Find all .json files and sort by name (FIFO order)
598
+ files = sorted(self._spool_dir.glob("*.json"))
599
+ for filepath in files:
600
+ try:
601
+ file_size = filepath.stat().st_size
602
+ self._spool_files.append(filepath)
603
+ self._spool_bytes += file_size
604
+ except OSError as e:
605
+ logger.warning(f"Failed to stat spool file {filepath}: {e}")
606
+
607
+ count = len(self._spool_files)
608
+ if count > 0:
609
+ logger.info(
610
+ f"Loaded {count} events from spool ({self._spool_bytes} bytes)"
611
+ )
612
+ return count
613
+ except OSError:
614
+ logger.exception("Failed to scan spool directory")
615
+ return 0
616
+
617
+
618
+ __all__: list[str] = ["BoundedEventQueue", "ModelQueuedEvent"]