ccproxy-api 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. ccproxy/__init__.py +4 -0
  2. ccproxy/__main__.py +7 -0
  3. ccproxy/_version.py +21 -0
  4. ccproxy/adapters/__init__.py +11 -0
  5. ccproxy/adapters/base.py +80 -0
  6. ccproxy/adapters/openai/__init__.py +43 -0
  7. ccproxy/adapters/openai/adapter.py +915 -0
  8. ccproxy/adapters/openai/models.py +412 -0
  9. ccproxy/adapters/openai/streaming.py +449 -0
  10. ccproxy/api/__init__.py +28 -0
  11. ccproxy/api/app.py +225 -0
  12. ccproxy/api/dependencies.py +140 -0
  13. ccproxy/api/middleware/__init__.py +11 -0
  14. ccproxy/api/middleware/auth.py +0 -0
  15. ccproxy/api/middleware/cors.py +55 -0
  16. ccproxy/api/middleware/errors.py +703 -0
  17. ccproxy/api/middleware/headers.py +51 -0
  18. ccproxy/api/middleware/logging.py +175 -0
  19. ccproxy/api/middleware/request_id.py +69 -0
  20. ccproxy/api/middleware/server_header.py +62 -0
  21. ccproxy/api/responses.py +84 -0
  22. ccproxy/api/routes/__init__.py +16 -0
  23. ccproxy/api/routes/claude.py +181 -0
  24. ccproxy/api/routes/health.py +489 -0
  25. ccproxy/api/routes/metrics.py +1033 -0
  26. ccproxy/api/routes/proxy.py +238 -0
  27. ccproxy/auth/__init__.py +75 -0
  28. ccproxy/auth/bearer.py +68 -0
  29. ccproxy/auth/credentials_adapter.py +93 -0
  30. ccproxy/auth/dependencies.py +229 -0
  31. ccproxy/auth/exceptions.py +79 -0
  32. ccproxy/auth/manager.py +102 -0
  33. ccproxy/auth/models.py +118 -0
  34. ccproxy/auth/oauth/__init__.py +26 -0
  35. ccproxy/auth/oauth/models.py +49 -0
  36. ccproxy/auth/oauth/routes.py +396 -0
  37. ccproxy/auth/oauth/storage.py +0 -0
  38. ccproxy/auth/storage/__init__.py +12 -0
  39. ccproxy/auth/storage/base.py +57 -0
  40. ccproxy/auth/storage/json_file.py +159 -0
  41. ccproxy/auth/storage/keyring.py +192 -0
  42. ccproxy/claude_sdk/__init__.py +20 -0
  43. ccproxy/claude_sdk/client.py +169 -0
  44. ccproxy/claude_sdk/converter.py +331 -0
  45. ccproxy/claude_sdk/options.py +120 -0
  46. ccproxy/cli/__init__.py +14 -0
  47. ccproxy/cli/commands/__init__.py +8 -0
  48. ccproxy/cli/commands/auth.py +553 -0
  49. ccproxy/cli/commands/config/__init__.py +14 -0
  50. ccproxy/cli/commands/config/commands.py +766 -0
  51. ccproxy/cli/commands/config/schema_commands.py +119 -0
  52. ccproxy/cli/commands/serve.py +630 -0
  53. ccproxy/cli/docker/__init__.py +34 -0
  54. ccproxy/cli/docker/adapter_factory.py +157 -0
  55. ccproxy/cli/docker/params.py +278 -0
  56. ccproxy/cli/helpers.py +144 -0
  57. ccproxy/cli/main.py +193 -0
  58. ccproxy/cli/options/__init__.py +14 -0
  59. ccproxy/cli/options/claude_options.py +216 -0
  60. ccproxy/cli/options/core_options.py +40 -0
  61. ccproxy/cli/options/security_options.py +48 -0
  62. ccproxy/cli/options/server_options.py +117 -0
  63. ccproxy/config/__init__.py +40 -0
  64. ccproxy/config/auth.py +154 -0
  65. ccproxy/config/claude.py +124 -0
  66. ccproxy/config/cors.py +79 -0
  67. ccproxy/config/discovery.py +87 -0
  68. ccproxy/config/docker_settings.py +265 -0
  69. ccproxy/config/loader.py +108 -0
  70. ccproxy/config/observability.py +158 -0
  71. ccproxy/config/pricing.py +88 -0
  72. ccproxy/config/reverse_proxy.py +31 -0
  73. ccproxy/config/scheduler.py +89 -0
  74. ccproxy/config/security.py +14 -0
  75. ccproxy/config/server.py +81 -0
  76. ccproxy/config/settings.py +534 -0
  77. ccproxy/config/validators.py +231 -0
  78. ccproxy/core/__init__.py +274 -0
  79. ccproxy/core/async_utils.py +675 -0
  80. ccproxy/core/constants.py +97 -0
  81. ccproxy/core/errors.py +256 -0
  82. ccproxy/core/http.py +328 -0
  83. ccproxy/core/http_transformers.py +428 -0
  84. ccproxy/core/interfaces.py +247 -0
  85. ccproxy/core/logging.py +189 -0
  86. ccproxy/core/middleware.py +114 -0
  87. ccproxy/core/proxy.py +143 -0
  88. ccproxy/core/system.py +38 -0
  89. ccproxy/core/transformers.py +259 -0
  90. ccproxy/core/types.py +129 -0
  91. ccproxy/core/validators.py +288 -0
  92. ccproxy/docker/__init__.py +67 -0
  93. ccproxy/docker/adapter.py +588 -0
  94. ccproxy/docker/docker_path.py +207 -0
  95. ccproxy/docker/middleware.py +103 -0
  96. ccproxy/docker/models.py +228 -0
  97. ccproxy/docker/protocol.py +192 -0
  98. ccproxy/docker/stream_process.py +264 -0
  99. ccproxy/docker/validators.py +173 -0
  100. ccproxy/models/__init__.py +123 -0
  101. ccproxy/models/errors.py +42 -0
  102. ccproxy/models/messages.py +243 -0
  103. ccproxy/models/requests.py +85 -0
  104. ccproxy/models/responses.py +227 -0
  105. ccproxy/models/types.py +102 -0
  106. ccproxy/observability/__init__.py +51 -0
  107. ccproxy/observability/access_logger.py +400 -0
  108. ccproxy/observability/context.py +447 -0
  109. ccproxy/observability/metrics.py +539 -0
  110. ccproxy/observability/pushgateway.py +366 -0
  111. ccproxy/observability/sse_events.py +303 -0
  112. ccproxy/observability/stats_printer.py +755 -0
  113. ccproxy/observability/storage/__init__.py +1 -0
  114. ccproxy/observability/storage/duckdb_simple.py +665 -0
  115. ccproxy/observability/storage/models.py +55 -0
  116. ccproxy/pricing/__init__.py +19 -0
  117. ccproxy/pricing/cache.py +212 -0
  118. ccproxy/pricing/loader.py +267 -0
  119. ccproxy/pricing/models.py +106 -0
  120. ccproxy/pricing/updater.py +309 -0
  121. ccproxy/scheduler/__init__.py +39 -0
  122. ccproxy/scheduler/core.py +335 -0
  123. ccproxy/scheduler/exceptions.py +34 -0
  124. ccproxy/scheduler/manager.py +186 -0
  125. ccproxy/scheduler/registry.py +150 -0
  126. ccproxy/scheduler/tasks.py +484 -0
  127. ccproxy/services/__init__.py +10 -0
  128. ccproxy/services/claude_sdk_service.py +614 -0
  129. ccproxy/services/credentials/__init__.py +55 -0
  130. ccproxy/services/credentials/config.py +105 -0
  131. ccproxy/services/credentials/manager.py +562 -0
  132. ccproxy/services/credentials/oauth_client.py +482 -0
  133. ccproxy/services/proxy_service.py +1536 -0
  134. ccproxy/static/.keep +0 -0
  135. ccproxy/testing/__init__.py +34 -0
  136. ccproxy/testing/config.py +148 -0
  137. ccproxy/testing/content_generation.py +197 -0
  138. ccproxy/testing/mock_responses.py +262 -0
  139. ccproxy/testing/response_handlers.py +161 -0
  140. ccproxy/testing/scenarios.py +241 -0
  141. ccproxy/utils/__init__.py +6 -0
  142. ccproxy/utils/cost_calculator.py +210 -0
  143. ccproxy/utils/streaming_metrics.py +199 -0
  144. ccproxy_api-0.1.0.dist-info/METADATA +253 -0
  145. ccproxy_api-0.1.0.dist-info/RECORD +148 -0
  146. ccproxy_api-0.1.0.dist-info/WHEEL +4 -0
  147. ccproxy_api-0.1.0.dist-info/entry_points.txt +2 -0
  148. ccproxy_api-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,366 @@
1
+ """Prometheus Pushgateway integration for batch metrics."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import random
7
+ import time
8
+ from typing import Any
9
+
10
+ from structlog import get_logger
11
+
12
+ from ccproxy.config.observability import ObservabilitySettings
13
+
14
+
15
+ logger = get_logger(__name__)
16
+
17
+
18
+ # Import prometheus_client with graceful degradation (matching existing metrics.py pattern)
19
+ try:
20
+ from prometheus_client import (
21
+ CollectorRegistry,
22
+ delete_from_gateway,
23
+ push_to_gateway,
24
+ pushadd_to_gateway,
25
+ )
26
+
27
+ PROMETHEUS_AVAILABLE = True
28
+ except ImportError:
29
+ PROMETHEUS_AVAILABLE = False
30
+
31
+ # Dummy classes for graceful degradation
32
+ def push_to_gateway(*args: Any, **kwargs: Any) -> None: # type: ignore[misc]
33
+ pass
34
+
35
+ def pushadd_to_gateway(*args: Any, **kwargs: Any) -> None: # type: ignore[misc]
36
+ pass
37
+
38
+ def delete_from_gateway(*args: Any, **kwargs: Any) -> None: # type: ignore[misc]
39
+ pass
40
+
41
+ class CollectorRegistry: # type: ignore[no-redef]
42
+ pass
43
+
44
+
45
+ class CircuitBreaker:
46
+ """Simple circuit breaker for pushgateway operations."""
47
+
48
+ def __init__(self, failure_threshold: int = 5, recovery_timeout: float = 60.0):
49
+ self.failure_threshold = failure_threshold
50
+ self.recovery_timeout = recovery_timeout
51
+ self.failure_count = 0
52
+ self.last_failure_time = 0.0
53
+ self.state = "CLOSED" # CLOSED, OPEN, HALF_OPEN
54
+
55
+ def can_execute(self) -> bool:
56
+ """Check if operation can be executed."""
57
+ if self.state == "CLOSED":
58
+ return True
59
+ elif self.state == "OPEN":
60
+ if time.time() - self.last_failure_time > self.recovery_timeout:
61
+ self.state = "HALF_OPEN"
62
+ return True
63
+ return False
64
+ else: # HALF_OPEN
65
+ return True
66
+
67
+ def record_success(self) -> None:
68
+ """Record successful operation."""
69
+ self.failure_count = 0
70
+ self.state = "CLOSED"
71
+
72
+ def record_failure(self) -> None:
73
+ """Record failed operation."""
74
+ self.failure_count += 1
75
+ self.last_failure_time = time.time()
76
+
77
+ if self.failure_count >= self.failure_threshold:
78
+ self.state = "OPEN"
79
+ logger.warning(
80
+ "pushgateway_circuit_breaker_opened",
81
+ failure_count=self.failure_count,
82
+ recovery_timeout=self.recovery_timeout,
83
+ )
84
+
85
+
86
+ class PushgatewayClient:
87
+ """Prometheus Pushgateway client using official prometheus_client methods.
88
+
89
+ Supports standard pushgateway operations:
90
+ - push_to_gateway(): Replace all metrics for job/instance
91
+ - pushadd_to_gateway(): Add metrics to existing job/instance
92
+ - delete_from_gateway(): Delete metrics for job/instance
93
+
94
+ Also supports VictoriaMetrics remote write protocol for compatibility.
95
+ """
96
+
97
+ def __init__(self, settings: ObservabilitySettings) -> None:
98
+ """Initialize Pushgateway client.
99
+
100
+ Args:
101
+ settings: Observability configuration settings
102
+ """
103
+ self.settings = settings
104
+ # Pushgateway is enabled if URL is configured and prometheus_client is available
105
+ self._enabled = PROMETHEUS_AVAILABLE and bool(settings.pushgateway_url)
106
+ self._circuit_breaker = CircuitBreaker(
107
+ failure_threshold=5,
108
+ recovery_timeout=60.0,
109
+ )
110
+
111
+ # Only log if pushgateway URL is configured but prometheus is not available
112
+ if settings.pushgateway_url and not PROMETHEUS_AVAILABLE:
113
+ logger.warning(
114
+ "prometheus_client not available. Pushgateway will be disabled. "
115
+ "Install with: pip install prometheus-client"
116
+ )
117
+
118
+ def push_metrics(self, registry: CollectorRegistry, method: str = "push") -> bool:
119
+ """Push metrics to Pushgateway using official prometheus_client methods.
120
+
121
+ Args:
122
+ registry: Prometheus metrics registry to push
123
+ method: Push method - "push" (replace), "pushadd" (add), or "delete"
124
+
125
+ Returns:
126
+ True if push succeeded, False otherwise
127
+ """
128
+
129
+ if not self._enabled or not self.settings.pushgateway_url:
130
+ return False
131
+
132
+ # Check circuit breaker before attempting operation
133
+ if not self._circuit_breaker.can_execute():
134
+ logger.debug(
135
+ "pushgateway_circuit_breaker_blocking",
136
+ state=self._circuit_breaker.state,
137
+ failure_count=self._circuit_breaker.failure_count,
138
+ )
139
+ return False
140
+
141
+ try:
142
+ # Check if URL looks like VictoriaMetrics remote write endpoint
143
+ if "/api/v1/write" in self.settings.pushgateway_url:
144
+ success = self._push_remote_write(registry)
145
+ else:
146
+ success = self._push_standard(registry, method)
147
+
148
+ if success:
149
+ self._circuit_breaker.record_success()
150
+ else:
151
+ self._circuit_breaker.record_failure()
152
+
153
+ return success
154
+
155
+ except Exception as e:
156
+ self._circuit_breaker.record_failure()
157
+ logger.error(
158
+ "pushgateway_push_failed",
159
+ url=self.settings.pushgateway_url,
160
+ job=self.settings.pushgateway_job,
161
+ method=method,
162
+ error=str(e),
163
+ error_type=type(e).__name__,
164
+ )
165
+ return False
166
+
167
+ def _push_standard(self, registry: CollectorRegistry, method: str = "push") -> bool:
168
+ """Push using standard Prometheus pushgateway protocol with official client methods.
169
+
170
+ Args:
171
+ registry: Prometheus metrics registry
172
+ method: Push method - "push" (replace), "pushadd" (add), or "delete"
173
+ """
174
+ if not self.settings.pushgateway_url:
175
+ return False
176
+
177
+ try:
178
+ # Use the appropriate prometheus_client function based on method
179
+ if method == "push":
180
+ push_to_gateway(
181
+ gateway=self.settings.pushgateway_url,
182
+ job=self.settings.pushgateway_job,
183
+ registry=registry,
184
+ )
185
+ elif method == "pushadd":
186
+ pushadd_to_gateway(
187
+ gateway=self.settings.pushgateway_url,
188
+ job=self.settings.pushgateway_job,
189
+ registry=registry,
190
+ )
191
+ elif method == "delete":
192
+ delete_from_gateway(
193
+ gateway=self.settings.pushgateway_url,
194
+ job=self.settings.pushgateway_job,
195
+ )
196
+ else:
197
+ logger.error("pushgateway_invalid_method", method=method)
198
+ return False
199
+
200
+ logger.debug(
201
+ "pushgateway_push_success",
202
+ url=self.settings.pushgateway_url,
203
+ job=self.settings.pushgateway_job,
204
+ protocol="standard",
205
+ method=method,
206
+ )
207
+ return True
208
+
209
+ except Exception as e:
210
+ logger.error(
211
+ "pushgateway_standard_push_failed",
212
+ url=self.settings.pushgateway_url,
213
+ job=self.settings.pushgateway_job,
214
+ method=method,
215
+ error=str(e),
216
+ error_type=type(e).__name__,
217
+ )
218
+ return False
219
+
220
+ def _push_remote_write(self, registry: CollectorRegistry) -> bool:
221
+ """Push using VictoriaMetrics import protocol for exposition format data.
222
+
223
+ VictoriaMetrics supports importing Prometheus exposition format data
224
+ via the /api/v1/import/prometheus endpoint, which is simpler than
225
+ the full remote write protocol that requires protobuf encoding.
226
+ """
227
+ import httpx
228
+ from prometheus_client.exposition import generate_latest
229
+
230
+ if not self.settings.pushgateway_url:
231
+ return False
232
+
233
+ # Generate metrics in Prometheus exposition format
234
+ metrics_data = generate_latest(registry)
235
+
236
+ # Convert /api/v1/write URL to /api/v1/import/prometheus for VictoriaMetrics
237
+ # This endpoint accepts Prometheus exposition format directly
238
+ if "/api/v1/write" in self.settings.pushgateway_url:
239
+ import_url = self.settings.pushgateway_url.replace(
240
+ "/api/v1/write", "/api/v1/import/prometheus"
241
+ )
242
+ else:
243
+ # Fallback - assume it's already the correct import URL
244
+ import_url = self.settings.pushgateway_url
245
+
246
+ try:
247
+ # VictoriaMetrics import endpoint accepts text/plain exposition format
248
+ response = httpx.post(
249
+ import_url,
250
+ content=metrics_data,
251
+ headers={
252
+ "Content-Type": "text/plain; charset=utf-8",
253
+ "User-Agent": "ccproxy-pushgateway-client/1.0",
254
+ },
255
+ timeout=30,
256
+ )
257
+
258
+ if response.status_code in (200, 204):
259
+ logger.debug(
260
+ "pushgateway_import_success",
261
+ url=import_url,
262
+ job=self.settings.pushgateway_job,
263
+ protocol="victoriametrics_import",
264
+ status=response.status_code,
265
+ )
266
+ return True
267
+ else:
268
+ logger.error(
269
+ "pushgateway_import_failed",
270
+ url=import_url,
271
+ status=response.status_code,
272
+ response=response.text[:500] if response.text else "empty",
273
+ )
274
+ return False
275
+ except httpx.RequestError as e:
276
+ logger.error(
277
+ "pushgateway_import_request_error",
278
+ url=import_url,
279
+ error=str(e),
280
+ error_type=type(e).__name__,
281
+ )
282
+ return False
283
+
284
+ def push_add_metrics(self, registry: CollectorRegistry) -> bool:
285
+ """Add metrics to existing job/instance (pushadd operation).
286
+
287
+ Args:
288
+ registry: Prometheus metrics registry to add
289
+
290
+ Returns:
291
+ True if push succeeded, False otherwise
292
+ """
293
+ return self.push_metrics(registry, method="pushadd")
294
+
295
+ def delete_metrics(self) -> bool:
296
+ """Delete all metrics for the configured job.
297
+
298
+ Returns:
299
+ True if delete succeeded, False otherwise
300
+ """
301
+
302
+ if not self._enabled or not self.settings.pushgateway_url:
303
+ return False
304
+
305
+ # Check circuit breaker before attempting operation
306
+ if not self._circuit_breaker.can_execute():
307
+ logger.debug(
308
+ "pushgateway_circuit_breaker_blocking_delete",
309
+ state=self._circuit_breaker.state,
310
+ failure_count=self._circuit_breaker.failure_count,
311
+ )
312
+ return False
313
+
314
+ try:
315
+ # Only standard pushgateway supports delete operation
316
+ if "/api/v1/write" in self.settings.pushgateway_url:
317
+ logger.warning("pushgateway_delete_not_supported_for_remote_write")
318
+ return False
319
+ else:
320
+ success = self._push_standard(None, method="delete") # type: ignore[arg-type]
321
+
322
+ if success:
323
+ self._circuit_breaker.record_success()
324
+ else:
325
+ self._circuit_breaker.record_failure()
326
+
327
+ return success
328
+
329
+ except Exception as e:
330
+ self._circuit_breaker.record_failure()
331
+ logger.error(
332
+ "pushgateway_delete_failed",
333
+ url=self.settings.pushgateway_url,
334
+ job=self.settings.pushgateway_job,
335
+ error=str(e),
336
+ error_type=type(e).__name__,
337
+ )
338
+ return False
339
+
340
+ def is_enabled(self) -> bool:
341
+ """Check if Pushgateway client is enabled and configured."""
342
+ return self._enabled and bool(self.settings.pushgateway_url)
343
+
344
+
345
+ # Global pushgateway client instance
346
+ _global_pushgateway_client: PushgatewayClient | None = None
347
+
348
+
349
+ def get_pushgateway_client() -> PushgatewayClient:
350
+ """Get or create global pushgateway client instance."""
351
+ global _global_pushgateway_client
352
+
353
+ if _global_pushgateway_client is None:
354
+ # Import here to avoid circular imports
355
+ from ccproxy.config.settings import get_settings
356
+
357
+ settings = get_settings()
358
+ _global_pushgateway_client = PushgatewayClient(settings.observability)
359
+
360
+ return _global_pushgateway_client
361
+
362
+
363
+ def reset_pushgateway_client() -> None:
364
+ """Reset global pushgateway client instance (mainly for testing)."""
365
+ global _global_pushgateway_client
366
+ _global_pushgateway_client = None
@@ -0,0 +1,303 @@
1
+ """
2
+ Server-Sent Events (SSE) event manager for real-time dashboard updates.
3
+
4
+ This module provides centralized SSE connection management and event broadcasting
5
+ for real-time dashboard notifications when requests start, complete, or error.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import asyncio
11
+ import json
12
+ import time
13
+ import uuid
14
+ from collections.abc import AsyncGenerator
15
+ from typing import Any
16
+
17
+ import structlog
18
+
19
+
20
+ logger = structlog.get_logger(__name__)
21
+
22
+
23
+ class SSEEventManager:
24
+ """
25
+ Centralized SSE connection management and event broadcasting.
26
+
27
+ Manages multiple SSE connections and broadcasts events to all connected clients.
28
+ Uses bounded queues to prevent memory issues with slow clients.
29
+ """
30
+
31
+ def __init__(self, max_queue_size: int = 100) -> None:
32
+ """
33
+ Initialize SSE event manager.
34
+
35
+ Args:
36
+ max_queue_size: Maximum events to queue per connection before dropping
37
+ """
38
+ self._connections: dict[str, asyncio.Queue[dict[str, Any]]] = {}
39
+ self._lock = asyncio.Lock()
40
+ self._max_queue_size = max_queue_size
41
+
42
+ async def add_connection(
43
+ self, connection_id: str | None = None, request_id: str | None = None
44
+ ) -> AsyncGenerator[str, None]:
45
+ """
46
+ Add SSE connection and yield events as JSON strings.
47
+
48
+ Args:
49
+ connection_id: Unique connection identifier (generated if not provided)
50
+ request_id: Request identifier for tracking
51
+
52
+ Yields:
53
+ JSON-formatted event strings for SSE
54
+ """
55
+ if connection_id is None:
56
+ connection_id = str(uuid.uuid4())
57
+
58
+ # Create bounded queue for this connection
59
+ queue: asyncio.Queue[dict[str, Any]] = asyncio.Queue(
60
+ maxsize=self._max_queue_size
61
+ )
62
+
63
+ async with self._lock:
64
+ self._connections[connection_id] = queue
65
+
66
+ logger.debug(
67
+ "sse_connection_added", connection_id=connection_id, request_id=request_id
68
+ )
69
+
70
+ try:
71
+ # Send initial connection event
72
+ connection_event = {
73
+ "type": "connection",
74
+ "message": "Connected to metrics stream",
75
+ "connection_id": connection_id,
76
+ "timestamp": time.time(),
77
+ }
78
+ yield self._format_sse_event(connection_event)
79
+
80
+ while True:
81
+ # Wait for next event
82
+ event = await queue.get()
83
+
84
+ # Check for special disconnect event
85
+ if event.get("type") == "_disconnect":
86
+ break
87
+
88
+ # Yield formatted event
89
+ yield self._format_sse_event(event)
90
+
91
+ except asyncio.CancelledError:
92
+ logger.debug("sse_connection_cancelled", connection_id=connection_id)
93
+ raise
94
+ except GeneratorExit:
95
+ logger.debug("sse_connection_generator_exit", connection_id=connection_id)
96
+ raise
97
+ finally:
98
+ # Clean up connection
99
+ await self._cleanup_connection(connection_id)
100
+
101
+ # Send disconnect event only if not in shutdown
102
+ try:
103
+ disconnect_event = {
104
+ "type": "disconnect",
105
+ "message": "Stream disconnected",
106
+ "connection_id": connection_id,
107
+ "timestamp": time.time(),
108
+ }
109
+ yield self._format_sse_event(disconnect_event)
110
+ except (GeneratorExit, asyncio.CancelledError):
111
+ # Ignore errors during cleanup
112
+ pass
113
+
114
+ async def emit_event(self, event_type: str, data: dict[str, Any]) -> None:
115
+ """
116
+ Broadcast event to all connected clients.
117
+
118
+ Args:
119
+ event_type: Type of event (request_start, request_complete, request_error)
120
+ data: Event data dictionary
121
+ """
122
+ if not self._connections:
123
+ return # No connected clients
124
+
125
+ event = {
126
+ "type": event_type,
127
+ "data": data,
128
+ "timestamp": time.time(),
129
+ }
130
+
131
+ async with self._lock:
132
+ # Get copy of connections to avoid modification during iteration
133
+ connections = dict(self._connections)
134
+
135
+ # Broadcast to all connections
136
+ failed_connections = []
137
+
138
+ for connection_id, queue in connections.items():
139
+ try:
140
+ # Try to put event in queue without blocking
141
+ queue.put_nowait(event)
142
+ except asyncio.QueueFull:
143
+ # Queue is full, handle overflow
144
+ try:
145
+ # Try to drop oldest event and add overflow indicator
146
+ queue.get_nowait() # Remove oldest
147
+ overflow_event = {
148
+ "type": "overflow",
149
+ "message": "Event queue full, some events dropped",
150
+ "timestamp": time.time(),
151
+ }
152
+ try:
153
+ queue.put_nowait(overflow_event)
154
+ queue.put_nowait(event)
155
+ except asyncio.QueueFull:
156
+ # Still full after dropping, connection is problematic
157
+ failed_connections.append(connection_id)
158
+ continue
159
+
160
+ logger.warning(
161
+ "sse_queue_overflow",
162
+ connection_id=connection_id,
163
+ max_queue_size=self._max_queue_size,
164
+ )
165
+ except asyncio.QueueEmpty:
166
+ # Queue became empty, try again
167
+ try:
168
+ queue.put_nowait(event)
169
+ except asyncio.QueueFull:
170
+ # Still full, connection is problematic
171
+ failed_connections.append(connection_id)
172
+ except Exception as e:
173
+ logger.error(
174
+ "sse_overflow_error",
175
+ connection_id=connection_id,
176
+ error=str(e),
177
+ )
178
+ failed_connections.append(connection_id)
179
+ except Exception as e:
180
+ logger.error(
181
+ "sse_broadcast_error",
182
+ connection_id=connection_id,
183
+ error=str(e),
184
+ )
185
+ failed_connections.append(connection_id)
186
+
187
+ # Clean up failed connections
188
+ for connection_id in failed_connections:
189
+ await self._cleanup_connection(connection_id)
190
+
191
+ if failed_connections:
192
+ logger.debug(
193
+ "sse_connections_cleaned",
194
+ failed_count=len(failed_connections),
195
+ active_count=len(self._connections),
196
+ )
197
+
198
+ async def disconnect_all(self) -> None:
199
+ """Disconnect all active connections gracefully."""
200
+ async with self._lock:
201
+ connections = dict(self._connections)
202
+
203
+ for connection_id, queue in connections.items():
204
+ try:
205
+ # Send disconnect signal
206
+ disconnect_signal = {"type": "_disconnect"}
207
+ queue.put_nowait(disconnect_signal)
208
+ except asyncio.QueueFull:
209
+ # Queue is full, force cleanup
210
+ await self._cleanup_connection(connection_id)
211
+ except Exception as e:
212
+ logger.error(
213
+ "sse_disconnect_error",
214
+ connection_id=connection_id,
215
+ error=str(e),
216
+ )
217
+
218
+ logger.debug("sse_all_connections_disconnected")
219
+
220
+ async def _cleanup_connection(self, connection_id: str) -> None:
221
+ """Remove connection from active connections."""
222
+ async with self._lock:
223
+ if connection_id in self._connections:
224
+ del self._connections[connection_id]
225
+ logger.debug("sse_connection_removed", connection_id=connection_id)
226
+
227
+ def _format_sse_event(self, event: dict[str, Any]) -> str:
228
+ """Format event as SSE data string."""
229
+ try:
230
+ json_data = json.dumps(event, default=self._json_serializer)
231
+ return f"data: {json_data}\n\n"
232
+ except (TypeError, ValueError) as e:
233
+ logger.error("sse_format_error", error=str(e), event_type=event.get("type"))
234
+ # Return error event instead
235
+ error_event = {
236
+ "type": "error",
237
+ "message": "Failed to format event",
238
+ "timestamp": time.time(),
239
+ }
240
+ json_data = json.dumps(error_event, default=self._json_serializer)
241
+ return f"data: {json_data}\n\n"
242
+
243
+ def _json_serializer(self, obj: Any) -> Any:
244
+ """Custom JSON serializer for datetime and other objects."""
245
+ from datetime import datetime
246
+
247
+ if isinstance(obj, datetime):
248
+ return obj.isoformat()
249
+ raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
250
+
251
+ async def get_connection_count(self) -> int:
252
+ """Get number of active connections."""
253
+ async with self._lock:
254
+ return len(self._connections)
255
+
256
+ async def get_connection_info(self) -> dict[str, Any]:
257
+ """Get connection status information."""
258
+ async with self._lock:
259
+ return {
260
+ "active_connections": len(self._connections),
261
+ "max_queue_size": self._max_queue_size,
262
+ "connection_ids": list(self._connections.keys()),
263
+ }
264
+
265
+
266
+ # Global SSE event manager instance
267
+ _global_sse_manager: SSEEventManager | None = None
268
+
269
+
270
+ def get_sse_manager() -> SSEEventManager:
271
+ """Get or create global SSE event manager."""
272
+ global _global_sse_manager
273
+
274
+ if _global_sse_manager is None:
275
+ _global_sse_manager = SSEEventManager()
276
+
277
+ return _global_sse_manager
278
+
279
+
280
+ async def emit_sse_event(event_type: str, data: dict[str, Any]) -> None:
281
+ """
282
+ Convenience function to emit SSE event using global manager.
283
+
284
+ Args:
285
+ event_type: Type of event (request_start, request_complete, request_error)
286
+ data: Event data dictionary
287
+ """
288
+ try:
289
+ manager = get_sse_manager()
290
+ await manager.emit_event(event_type, data)
291
+ except Exception as e:
292
+ # Log error but don't fail the request
293
+ logger.debug("sse_emit_failed", event_type=event_type, error=str(e))
294
+
295
+
296
+ async def cleanup_sse_manager() -> None:
297
+ """Clean up global SSE manager and disconnect all clients."""
298
+ global _global_sse_manager
299
+
300
+ if _global_sse_manager is not None:
301
+ await _global_sse_manager.disconnect_all()
302
+ _global_sse_manager = None
303
+ logger.debug("sse_manager_cleaned_up")