ccproxy-api 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ccproxy/__init__.py +4 -0
- ccproxy/__main__.py +7 -0
- ccproxy/_version.py +21 -0
- ccproxy/adapters/__init__.py +11 -0
- ccproxy/adapters/base.py +80 -0
- ccproxy/adapters/openai/__init__.py +43 -0
- ccproxy/adapters/openai/adapter.py +915 -0
- ccproxy/adapters/openai/models.py +412 -0
- ccproxy/adapters/openai/streaming.py +449 -0
- ccproxy/api/__init__.py +28 -0
- ccproxy/api/app.py +225 -0
- ccproxy/api/dependencies.py +140 -0
- ccproxy/api/middleware/__init__.py +11 -0
- ccproxy/api/middleware/auth.py +0 -0
- ccproxy/api/middleware/cors.py +55 -0
- ccproxy/api/middleware/errors.py +703 -0
- ccproxy/api/middleware/headers.py +51 -0
- ccproxy/api/middleware/logging.py +175 -0
- ccproxy/api/middleware/request_id.py +69 -0
- ccproxy/api/middleware/server_header.py +62 -0
- ccproxy/api/responses.py +84 -0
- ccproxy/api/routes/__init__.py +16 -0
- ccproxy/api/routes/claude.py +181 -0
- ccproxy/api/routes/health.py +489 -0
- ccproxy/api/routes/metrics.py +1033 -0
- ccproxy/api/routes/proxy.py +238 -0
- ccproxy/auth/__init__.py +75 -0
- ccproxy/auth/bearer.py +68 -0
- ccproxy/auth/credentials_adapter.py +93 -0
- ccproxy/auth/dependencies.py +229 -0
- ccproxy/auth/exceptions.py +79 -0
- ccproxy/auth/manager.py +102 -0
- ccproxy/auth/models.py +118 -0
- ccproxy/auth/oauth/__init__.py +26 -0
- ccproxy/auth/oauth/models.py +49 -0
- ccproxy/auth/oauth/routes.py +396 -0
- ccproxy/auth/oauth/storage.py +0 -0
- ccproxy/auth/storage/__init__.py +12 -0
- ccproxy/auth/storage/base.py +57 -0
- ccproxy/auth/storage/json_file.py +159 -0
- ccproxy/auth/storage/keyring.py +192 -0
- ccproxy/claude_sdk/__init__.py +20 -0
- ccproxy/claude_sdk/client.py +169 -0
- ccproxy/claude_sdk/converter.py +331 -0
- ccproxy/claude_sdk/options.py +120 -0
- ccproxy/cli/__init__.py +14 -0
- ccproxy/cli/commands/__init__.py +8 -0
- ccproxy/cli/commands/auth.py +553 -0
- ccproxy/cli/commands/config/__init__.py +14 -0
- ccproxy/cli/commands/config/commands.py +766 -0
- ccproxy/cli/commands/config/schema_commands.py +119 -0
- ccproxy/cli/commands/serve.py +630 -0
- ccproxy/cli/docker/__init__.py +34 -0
- ccproxy/cli/docker/adapter_factory.py +157 -0
- ccproxy/cli/docker/params.py +278 -0
- ccproxy/cli/helpers.py +144 -0
- ccproxy/cli/main.py +193 -0
- ccproxy/cli/options/__init__.py +14 -0
- ccproxy/cli/options/claude_options.py +216 -0
- ccproxy/cli/options/core_options.py +40 -0
- ccproxy/cli/options/security_options.py +48 -0
- ccproxy/cli/options/server_options.py +117 -0
- ccproxy/config/__init__.py +40 -0
- ccproxy/config/auth.py +154 -0
- ccproxy/config/claude.py +124 -0
- ccproxy/config/cors.py +79 -0
- ccproxy/config/discovery.py +87 -0
- ccproxy/config/docker_settings.py +265 -0
- ccproxy/config/loader.py +108 -0
- ccproxy/config/observability.py +158 -0
- ccproxy/config/pricing.py +88 -0
- ccproxy/config/reverse_proxy.py +31 -0
- ccproxy/config/scheduler.py +89 -0
- ccproxy/config/security.py +14 -0
- ccproxy/config/server.py +81 -0
- ccproxy/config/settings.py +534 -0
- ccproxy/config/validators.py +231 -0
- ccproxy/core/__init__.py +274 -0
- ccproxy/core/async_utils.py +675 -0
- ccproxy/core/constants.py +97 -0
- ccproxy/core/errors.py +256 -0
- ccproxy/core/http.py +328 -0
- ccproxy/core/http_transformers.py +428 -0
- ccproxy/core/interfaces.py +247 -0
- ccproxy/core/logging.py +189 -0
- ccproxy/core/middleware.py +114 -0
- ccproxy/core/proxy.py +143 -0
- ccproxy/core/system.py +38 -0
- ccproxy/core/transformers.py +259 -0
- ccproxy/core/types.py +129 -0
- ccproxy/core/validators.py +288 -0
- ccproxy/docker/__init__.py +67 -0
- ccproxy/docker/adapter.py +588 -0
- ccproxy/docker/docker_path.py +207 -0
- ccproxy/docker/middleware.py +103 -0
- ccproxy/docker/models.py +228 -0
- ccproxy/docker/protocol.py +192 -0
- ccproxy/docker/stream_process.py +264 -0
- ccproxy/docker/validators.py +173 -0
- ccproxy/models/__init__.py +123 -0
- ccproxy/models/errors.py +42 -0
- ccproxy/models/messages.py +243 -0
- ccproxy/models/requests.py +85 -0
- ccproxy/models/responses.py +227 -0
- ccproxy/models/types.py +102 -0
- ccproxy/observability/__init__.py +51 -0
- ccproxy/observability/access_logger.py +400 -0
- ccproxy/observability/context.py +447 -0
- ccproxy/observability/metrics.py +539 -0
- ccproxy/observability/pushgateway.py +366 -0
- ccproxy/observability/sse_events.py +303 -0
- ccproxy/observability/stats_printer.py +755 -0
- ccproxy/observability/storage/__init__.py +1 -0
- ccproxy/observability/storage/duckdb_simple.py +665 -0
- ccproxy/observability/storage/models.py +55 -0
- ccproxy/pricing/__init__.py +19 -0
- ccproxy/pricing/cache.py +212 -0
- ccproxy/pricing/loader.py +267 -0
- ccproxy/pricing/models.py +106 -0
- ccproxy/pricing/updater.py +309 -0
- ccproxy/scheduler/__init__.py +39 -0
- ccproxy/scheduler/core.py +335 -0
- ccproxy/scheduler/exceptions.py +34 -0
- ccproxy/scheduler/manager.py +186 -0
- ccproxy/scheduler/registry.py +150 -0
- ccproxy/scheduler/tasks.py +484 -0
- ccproxy/services/__init__.py +10 -0
- ccproxy/services/claude_sdk_service.py +614 -0
- ccproxy/services/credentials/__init__.py +55 -0
- ccproxy/services/credentials/config.py +105 -0
- ccproxy/services/credentials/manager.py +562 -0
- ccproxy/services/credentials/oauth_client.py +482 -0
- ccproxy/services/proxy_service.py +1536 -0
- ccproxy/static/.keep +0 -0
- ccproxy/testing/__init__.py +34 -0
- ccproxy/testing/config.py +148 -0
- ccproxy/testing/content_generation.py +197 -0
- ccproxy/testing/mock_responses.py +262 -0
- ccproxy/testing/response_handlers.py +161 -0
- ccproxy/testing/scenarios.py +241 -0
- ccproxy/utils/__init__.py +6 -0
- ccproxy/utils/cost_calculator.py +210 -0
- ccproxy/utils/streaming_metrics.py +199 -0
- ccproxy_api-0.1.0.dist-info/METADATA +253 -0
- ccproxy_api-0.1.0.dist-info/RECORD +148 -0
- ccproxy_api-0.1.0.dist-info/WHEEL +4 -0
- ccproxy_api-0.1.0.dist-info/entry_points.txt +2 -0
- ccproxy_api-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,400 @@
|
|
|
1
|
+
"""Unified access logging utilities for comprehensive request tracking.
|
|
2
|
+
|
|
3
|
+
This module provides centralized access logging functionality that can be used
|
|
4
|
+
across different parts of the application to generate consistent, comprehensive
|
|
5
|
+
access logs with complete request metadata including token usage and costs.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import time
|
|
12
|
+
from typing import TYPE_CHECKING, Any, Optional
|
|
13
|
+
|
|
14
|
+
import structlog
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from ccproxy.observability.context import RequestContext
|
|
19
|
+
from ccproxy.observability.metrics import PrometheusMetrics
|
|
20
|
+
from ccproxy.observability.storage.duckdb_simple import (
|
|
21
|
+
AccessLogPayload,
|
|
22
|
+
SimpleDuckDBStorage,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
logger = structlog.get_logger(__name__)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
async def log_request_access(
|
|
30
|
+
context: RequestContext,
|
|
31
|
+
status_code: int | None = None,
|
|
32
|
+
client_ip: str | None = None,
|
|
33
|
+
user_agent: str | None = None,
|
|
34
|
+
method: str | None = None,
|
|
35
|
+
path: str | None = None,
|
|
36
|
+
query: str | None = None,
|
|
37
|
+
error_message: str | None = None,
|
|
38
|
+
storage: SimpleDuckDBStorage | None = None,
|
|
39
|
+
metrics: PrometheusMetrics | None = None,
|
|
40
|
+
**additional_metadata: Any,
|
|
41
|
+
) -> None:
|
|
42
|
+
"""Log comprehensive access information for a request.
|
|
43
|
+
|
|
44
|
+
This function generates a unified access log entry with complete request
|
|
45
|
+
metadata including timing, tokens, costs, and any additional context.
|
|
46
|
+
Also stores the access log in DuckDB if available and records Prometheus metrics.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
context: Request context with timing and metadata
|
|
50
|
+
status_code: HTTP status code
|
|
51
|
+
client_ip: Client IP address
|
|
52
|
+
user_agent: User agent string
|
|
53
|
+
method: HTTP method
|
|
54
|
+
path: Request path
|
|
55
|
+
query: Query parameters
|
|
56
|
+
error_message: Error message if applicable
|
|
57
|
+
storage: DuckDB storage instance (optional)
|
|
58
|
+
metrics: PrometheusMetrics instance for recording metrics (optional)
|
|
59
|
+
**additional_metadata: Any additional fields to include
|
|
60
|
+
"""
|
|
61
|
+
# Extract basic request info from context metadata if not provided
|
|
62
|
+
ctx_metadata = context.metadata
|
|
63
|
+
method = method or ctx_metadata.get("method")
|
|
64
|
+
path = path or ctx_metadata.get("path")
|
|
65
|
+
status_code = status_code or ctx_metadata.get("status_code")
|
|
66
|
+
|
|
67
|
+
# Prepare comprehensive log data
|
|
68
|
+
log_data = {
|
|
69
|
+
"request_id": context.request_id,
|
|
70
|
+
"method": method,
|
|
71
|
+
"path": path,
|
|
72
|
+
"query": query,
|
|
73
|
+
"status_code": status_code,
|
|
74
|
+
"client_ip": client_ip,
|
|
75
|
+
"user_agent": user_agent,
|
|
76
|
+
"duration_ms": context.duration_ms,
|
|
77
|
+
"duration_seconds": context.duration_seconds,
|
|
78
|
+
"error_message": error_message,
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
# Add token and cost metrics if available
|
|
82
|
+
token_fields = [
|
|
83
|
+
"tokens_input",
|
|
84
|
+
"tokens_output",
|
|
85
|
+
"cache_read_tokens",
|
|
86
|
+
"cache_write_tokens",
|
|
87
|
+
"cost_usd",
|
|
88
|
+
"cost_sdk_usd",
|
|
89
|
+
]
|
|
90
|
+
|
|
91
|
+
for field in token_fields:
|
|
92
|
+
value = ctx_metadata.get(field)
|
|
93
|
+
if value is not None:
|
|
94
|
+
log_data[field] = value
|
|
95
|
+
|
|
96
|
+
# Add service and endpoint info
|
|
97
|
+
service_fields = [
|
|
98
|
+
"endpoint",
|
|
99
|
+
"model",
|
|
100
|
+
"streaming",
|
|
101
|
+
"service_type",
|
|
102
|
+
]
|
|
103
|
+
|
|
104
|
+
for field in service_fields:
|
|
105
|
+
value = ctx_metadata.get(field)
|
|
106
|
+
if value is not None:
|
|
107
|
+
log_data[field] = value
|
|
108
|
+
|
|
109
|
+
# Add any additional metadata provided
|
|
110
|
+
log_data.update(additional_metadata)
|
|
111
|
+
|
|
112
|
+
# Remove None values to keep log clean
|
|
113
|
+
log_data = {k: v for k, v in log_data.items() if v is not None}
|
|
114
|
+
|
|
115
|
+
logger = context.logger.bind(**log_data)
|
|
116
|
+
if not log_data.get("streaming", False):
|
|
117
|
+
# Log as access_log event (structured logging)
|
|
118
|
+
logger.info("access_log")
|
|
119
|
+
elif log_data.get("event_type", "") == "streaming_complete":
|
|
120
|
+
logger.info("access_log")
|
|
121
|
+
else:
|
|
122
|
+
# if streaming is true, and not streaming_complete log as debug
|
|
123
|
+
# real access_log will come later
|
|
124
|
+
logger.debug("access_log")
|
|
125
|
+
|
|
126
|
+
# Store in DuckDB if available
|
|
127
|
+
await _store_access_log(log_data, storage)
|
|
128
|
+
|
|
129
|
+
# Emit SSE event for real-time dashboard updates
|
|
130
|
+
await _emit_access_event("request_complete", log_data)
|
|
131
|
+
|
|
132
|
+
# Record Prometheus metrics if metrics instance is provided
|
|
133
|
+
if metrics and not error_message:
|
|
134
|
+
# Extract required values for metrics
|
|
135
|
+
endpoint = ctx_metadata.get("endpoint", path or "unknown")
|
|
136
|
+
model = ctx_metadata.get("model")
|
|
137
|
+
service_type = ctx_metadata.get("service_type")
|
|
138
|
+
|
|
139
|
+
# Record request count
|
|
140
|
+
if method and status_code:
|
|
141
|
+
metrics.record_request(
|
|
142
|
+
method=method,
|
|
143
|
+
endpoint=endpoint,
|
|
144
|
+
model=model,
|
|
145
|
+
status=status_code,
|
|
146
|
+
service_type=service_type,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
# Record response time
|
|
150
|
+
if context.duration_seconds > 0:
|
|
151
|
+
metrics.record_response_time(
|
|
152
|
+
duration_seconds=context.duration_seconds,
|
|
153
|
+
model=model,
|
|
154
|
+
endpoint=endpoint,
|
|
155
|
+
service_type=service_type,
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
# Record token usage
|
|
159
|
+
tokens_input = ctx_metadata.get("tokens_input")
|
|
160
|
+
if tokens_input:
|
|
161
|
+
metrics.record_tokens(
|
|
162
|
+
token_count=tokens_input,
|
|
163
|
+
token_type="input",
|
|
164
|
+
model=model,
|
|
165
|
+
service_type=service_type,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
tokens_output = ctx_metadata.get("tokens_output")
|
|
169
|
+
if tokens_output:
|
|
170
|
+
metrics.record_tokens(
|
|
171
|
+
token_count=tokens_output,
|
|
172
|
+
token_type="output",
|
|
173
|
+
model=model,
|
|
174
|
+
service_type=service_type,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
cache_read_tokens = ctx_metadata.get("cache_read_tokens")
|
|
178
|
+
if cache_read_tokens:
|
|
179
|
+
metrics.record_tokens(
|
|
180
|
+
token_count=cache_read_tokens,
|
|
181
|
+
token_type="cache_read",
|
|
182
|
+
model=model,
|
|
183
|
+
service_type=service_type,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
cache_write_tokens = ctx_metadata.get("cache_write_tokens")
|
|
187
|
+
if cache_write_tokens:
|
|
188
|
+
metrics.record_tokens(
|
|
189
|
+
token_count=cache_write_tokens,
|
|
190
|
+
token_type="cache_write",
|
|
191
|
+
model=model,
|
|
192
|
+
service_type=service_type,
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
# Record cost
|
|
196
|
+
cost_usd = ctx_metadata.get("cost_usd")
|
|
197
|
+
if cost_usd:
|
|
198
|
+
metrics.record_cost(
|
|
199
|
+
cost_usd=cost_usd,
|
|
200
|
+
model=model,
|
|
201
|
+
cost_type="total",
|
|
202
|
+
service_type=service_type,
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
# Record error if there was one
|
|
206
|
+
if metrics and error_message:
|
|
207
|
+
endpoint = ctx_metadata.get("endpoint", path or "unknown")
|
|
208
|
+
model = ctx_metadata.get("model")
|
|
209
|
+
service_type = ctx_metadata.get("service_type")
|
|
210
|
+
|
|
211
|
+
# Extract error type from error message or use generic
|
|
212
|
+
error_type = additional_metadata.get(
|
|
213
|
+
"error_type",
|
|
214
|
+
type(error_message).__name__
|
|
215
|
+
if hasattr(error_message, "__class__")
|
|
216
|
+
else "unknown_error",
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
metrics.record_error(
|
|
220
|
+
error_type=error_type,
|
|
221
|
+
endpoint=endpoint,
|
|
222
|
+
model=model,
|
|
223
|
+
service_type=service_type,
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
async def _store_access_log(
|
|
228
|
+
log_data: dict[str, Any], storage: SimpleDuckDBStorage | None = None
|
|
229
|
+
) -> None:
|
|
230
|
+
"""Store access log in DuckDB storage if available.
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
log_data: Log data to store
|
|
234
|
+
storage: DuckDB storage instance (optional)
|
|
235
|
+
"""
|
|
236
|
+
if not storage:
|
|
237
|
+
return
|
|
238
|
+
|
|
239
|
+
try:
|
|
240
|
+
# Prepare data for DuckDB storage
|
|
241
|
+
storage_data: AccessLogPayload = {
|
|
242
|
+
"timestamp": time.time(),
|
|
243
|
+
"request_id": log_data.get("request_id") or "",
|
|
244
|
+
"method": log_data.get("method", ""),
|
|
245
|
+
"endpoint": log_data.get("endpoint", log_data.get("path", "")),
|
|
246
|
+
"path": log_data.get("path", ""),
|
|
247
|
+
"query": log_data.get("query", ""),
|
|
248
|
+
"client_ip": log_data.get("client_ip", ""),
|
|
249
|
+
"user_agent": log_data.get("user_agent", ""),
|
|
250
|
+
"service_type": log_data.get("service_type", ""),
|
|
251
|
+
"model": log_data.get("model", ""),
|
|
252
|
+
"streaming": log_data.get("streaming", False),
|
|
253
|
+
"status_code": log_data.get("status_code", 200),
|
|
254
|
+
"duration_ms": log_data.get("duration_ms", 0.0),
|
|
255
|
+
"duration_seconds": log_data.get("duration_seconds", 0.0),
|
|
256
|
+
"tokens_input": log_data.get("tokens_input", 0),
|
|
257
|
+
"tokens_output": log_data.get("tokens_output", 0),
|
|
258
|
+
"cache_read_tokens": log_data.get("cache_read_tokens", 0),
|
|
259
|
+
"cache_write_tokens": log_data.get("cache_write_tokens", 0),
|
|
260
|
+
"cost_usd": log_data.get("cost_usd", 0.0),
|
|
261
|
+
"cost_sdk_usd": log_data.get("cost_sdk_usd", 0.0),
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
# Store asynchronously using queue-based DuckDB (prevents deadlocks)
|
|
265
|
+
if storage:
|
|
266
|
+
await storage.store_request(storage_data)
|
|
267
|
+
|
|
268
|
+
except Exception as e:
|
|
269
|
+
# Log error but don't fail the request
|
|
270
|
+
logger.error(
|
|
271
|
+
"access_log_duckdb_error",
|
|
272
|
+
error=str(e),
|
|
273
|
+
request_id=log_data.get("request_id"),
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
async def _write_to_storage(storage: Any, data: dict[str, Any]) -> None:
|
|
278
|
+
"""Write data to storage asynchronously."""
|
|
279
|
+
try:
|
|
280
|
+
await storage.store_request(data)
|
|
281
|
+
except Exception as e:
|
|
282
|
+
logger.error(
|
|
283
|
+
"duckdb_store_error",
|
|
284
|
+
error=str(e),
|
|
285
|
+
request_id=data.get("request_id"),
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
async def _emit_access_event(event_type: str, data: dict[str, Any]) -> None:
|
|
290
|
+
"""Emit SSE event for real-time dashboard updates."""
|
|
291
|
+
try:
|
|
292
|
+
from ccproxy.observability.sse_events import emit_sse_event
|
|
293
|
+
|
|
294
|
+
# Create event data for SSE (exclude internal fields)
|
|
295
|
+
sse_data = {
|
|
296
|
+
"request_id": data.get("request_id"),
|
|
297
|
+
"method": data.get("method"),
|
|
298
|
+
"path": data.get("path"),
|
|
299
|
+
"query": data.get("query"),
|
|
300
|
+
"status_code": data.get("status_code"),
|
|
301
|
+
"client_ip": data.get("client_ip"),
|
|
302
|
+
"user_agent": data.get("user_agent"),
|
|
303
|
+
"service_type": data.get("service_type"),
|
|
304
|
+
"model": data.get("model"),
|
|
305
|
+
"streaming": data.get("streaming"),
|
|
306
|
+
"duration_ms": data.get("duration_ms"),
|
|
307
|
+
"duration_seconds": data.get("duration_seconds"),
|
|
308
|
+
"tokens_input": data.get("tokens_input"),
|
|
309
|
+
"tokens_output": data.get("tokens_output"),
|
|
310
|
+
"cost_usd": data.get("cost_usd"),
|
|
311
|
+
"endpoint": data.get("endpoint"),
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
# Remove None values
|
|
315
|
+
sse_data = {k: v for k, v in sse_data.items() if v is not None}
|
|
316
|
+
|
|
317
|
+
await emit_sse_event(event_type, sse_data)
|
|
318
|
+
|
|
319
|
+
except Exception as e:
|
|
320
|
+
# Log error but don't fail the request
|
|
321
|
+
logger.debug(
|
|
322
|
+
"sse_emit_failed",
|
|
323
|
+
event_type=event_type,
|
|
324
|
+
error=str(e),
|
|
325
|
+
request_id=data.get("request_id"),
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
def log_request_start(
|
|
330
|
+
request_id: str,
|
|
331
|
+
method: str,
|
|
332
|
+
path: str,
|
|
333
|
+
client_ip: str | None = None,
|
|
334
|
+
user_agent: str | None = None,
|
|
335
|
+
query: str | None = None,
|
|
336
|
+
**additional_metadata: Any,
|
|
337
|
+
) -> None:
|
|
338
|
+
"""Log request start event with basic information.
|
|
339
|
+
|
|
340
|
+
This is used for early/middleware logging when full context isn't available yet.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
request_id: Request identifier
|
|
344
|
+
method: HTTP method
|
|
345
|
+
path: Request path
|
|
346
|
+
client_ip: Client IP address
|
|
347
|
+
user_agent: User agent string
|
|
348
|
+
query: Query parameters
|
|
349
|
+
**additional_metadata: Any additional fields to include
|
|
350
|
+
"""
|
|
351
|
+
log_data = {
|
|
352
|
+
"request_id": request_id,
|
|
353
|
+
"method": method,
|
|
354
|
+
"path": path,
|
|
355
|
+
"client_ip": client_ip,
|
|
356
|
+
"user_agent": user_agent,
|
|
357
|
+
"query": query,
|
|
358
|
+
"event_type": "request_start",
|
|
359
|
+
"timestamp": time.time(),
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
# Add any additional metadata
|
|
363
|
+
log_data.update(additional_metadata)
|
|
364
|
+
|
|
365
|
+
# Remove None values
|
|
366
|
+
log_data = {k: v for k, v in log_data.items() if v is not None}
|
|
367
|
+
|
|
368
|
+
logger.debug("access_log_start", **log_data)
|
|
369
|
+
|
|
370
|
+
# Emit SSE event for real-time dashboard updates
|
|
371
|
+
# Note: This is a synchronous function, so we schedule the async emission
|
|
372
|
+
try:
|
|
373
|
+
import asyncio
|
|
374
|
+
|
|
375
|
+
from ccproxy.observability.sse_events import emit_sse_event
|
|
376
|
+
|
|
377
|
+
# Create event data for SSE
|
|
378
|
+
sse_data = {
|
|
379
|
+
"request_id": request_id,
|
|
380
|
+
"method": method,
|
|
381
|
+
"path": path,
|
|
382
|
+
"client_ip": client_ip,
|
|
383
|
+
"user_agent": user_agent,
|
|
384
|
+
"query": query,
|
|
385
|
+
}
|
|
386
|
+
|
|
387
|
+
# Remove None values
|
|
388
|
+
sse_data = {k: v for k, v in sse_data.items() if v is not None}
|
|
389
|
+
|
|
390
|
+
# Schedule async event emission
|
|
391
|
+
asyncio.create_task(emit_sse_event("request_start", sse_data))
|
|
392
|
+
|
|
393
|
+
except Exception as e:
|
|
394
|
+
# Log error but don't fail the request
|
|
395
|
+
logger.debug(
|
|
396
|
+
"sse_emit_failed",
|
|
397
|
+
event_type="request_start",
|
|
398
|
+
error=str(e),
|
|
399
|
+
request_id=request_id,
|
|
400
|
+
)
|