ccproxy-api 0.1.5__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. ccproxy/_version.py +2 -2
  2. ccproxy/adapters/codex/__init__.py +11 -0
  3. ccproxy/adapters/openai/models.py +1 -1
  4. ccproxy/adapters/openai/response_adapter.py +355 -0
  5. ccproxy/adapters/openai/response_models.py +178 -0
  6. ccproxy/api/app.py +31 -3
  7. ccproxy/api/dependencies.py +1 -8
  8. ccproxy/api/middleware/errors.py +15 -7
  9. ccproxy/api/routes/codex.py +1251 -0
  10. ccproxy/api/routes/health.py +228 -3
  11. ccproxy/auth/openai/__init__.py +13 -0
  12. ccproxy/auth/openai/credentials.py +166 -0
  13. ccproxy/auth/openai/oauth_client.py +334 -0
  14. ccproxy/auth/openai/storage.py +184 -0
  15. ccproxy/claude_sdk/options.py +1 -1
  16. ccproxy/cli/commands/auth.py +398 -1
  17. ccproxy/cli/commands/serve.py +3 -1
  18. ccproxy/config/claude.py +1 -1
  19. ccproxy/config/codex.py +100 -0
  20. ccproxy/config/scheduler.py +8 -8
  21. ccproxy/config/settings.py +19 -0
  22. ccproxy/core/codex_transformers.py +389 -0
  23. ccproxy/core/http_transformers.py +153 -2
  24. ccproxy/data/claude_headers_fallback.json +37 -0
  25. ccproxy/data/codex_headers_fallback.json +14 -0
  26. ccproxy/models/detection.py +82 -0
  27. ccproxy/models/requests.py +22 -0
  28. ccproxy/models/responses.py +16 -0
  29. ccproxy/scheduler/manager.py +2 -2
  30. ccproxy/scheduler/tasks.py +105 -65
  31. ccproxy/services/claude_detection_service.py +7 -33
  32. ccproxy/services/codex_detection_service.py +252 -0
  33. ccproxy/services/proxy_service.py +530 -0
  34. ccproxy/utils/model_mapping.py +7 -5
  35. ccproxy/utils/startup_helpers.py +205 -12
  36. ccproxy/utils/version_checker.py +6 -0
  37. ccproxy_api-0.1.7.dist-info/METADATA +615 -0
  38. {ccproxy_api-0.1.5.dist-info → ccproxy_api-0.1.7.dist-info}/RECORD +41 -28
  39. ccproxy_api-0.1.5.dist-info/METADATA +0 -396
  40. {ccproxy_api-0.1.5.dist-info → ccproxy_api-0.1.7.dist-info}/WHEEL +0 -0
  41. {ccproxy_api-0.1.5.dist-info → ccproxy_api-0.1.7.dist-info}/entry_points.txt +0 -0
  42. {ccproxy_api-0.1.5.dist-info → ccproxy_api-0.1.7.dist-info}/licenses/LICENSE +0 -0
ccproxy/_version.py CHANGED
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '0.1.5'
21
- __version_tuple__ = version_tuple = (0, 1, 5)
20
+ __version__ = version = '0.1.7'
21
+ __version_tuple__ = version_tuple = (0, 1, 7)
@@ -0,0 +1,11 @@
1
+ """Codex adapter for format conversion."""
2
+
3
+ from ccproxy.models.requests import CodexMessage, CodexRequest
4
+ from ccproxy.models.responses import CodexResponse
5
+
6
+
7
+ __all__ = [
8
+ "CodexMessage",
9
+ "CodexRequest",
10
+ "CodexResponse",
11
+ ]
@@ -286,7 +286,7 @@ class OpenAIChatCompletionResponse(BaseModel):
286
286
  created: int
287
287
  model: str
288
288
  choices: list[OpenAIChoice]
289
- usage: OpenAIUsage
289
+ usage: OpenAIUsage | None = None
290
290
  system_fingerprint: str | None = None
291
291
 
292
292
  model_config = ConfigDict(extra="forbid")
@@ -0,0 +1,355 @@
1
+ """Adapter for converting between OpenAI Chat Completions and Response API formats.
2
+
3
+ This adapter handles bidirectional conversion between:
4
+ - OpenAI Chat Completions API (used by most OpenAI clients)
5
+ - OpenAI Response API (used by Codex/ChatGPT backend)
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import json
11
+ import time
12
+ import uuid
13
+ from collections.abc import AsyncIterator
14
+ from typing import Any
15
+
16
+ import structlog
17
+
18
+ from ccproxy.adapters.openai.models import (
19
+ OpenAIChatCompletionRequest,
20
+ OpenAIChatCompletionResponse,
21
+ OpenAIChoice,
22
+ OpenAIResponseMessage,
23
+ OpenAIUsage,
24
+ )
25
+ from ccproxy.adapters.openai.response_models import (
26
+ ResponseCompleted,
27
+ ResponseMessage,
28
+ ResponseMessageContent,
29
+ ResponseReasoning,
30
+ ResponseRequest,
31
+ )
32
+
33
+
34
+ logger = structlog.get_logger(__name__)
35
+
36
+
37
+ class ResponseAdapter:
38
+ """Adapter for OpenAI Response API format conversion."""
39
+
40
+ def chat_to_response_request(
41
+ self, chat_request: dict[str, Any] | OpenAIChatCompletionRequest
42
+ ) -> ResponseRequest:
43
+ """Convert Chat Completions request to Response API format.
44
+
45
+ Args:
46
+ chat_request: OpenAI Chat Completions request
47
+
48
+ Returns:
49
+ Response API formatted request
50
+ """
51
+ if isinstance(chat_request, OpenAIChatCompletionRequest):
52
+ chat_dict = chat_request.model_dump()
53
+ else:
54
+ chat_dict = chat_request
55
+
56
+ # Extract messages and convert to Response API format
57
+ messages = chat_dict.get("messages", [])
58
+ response_input = []
59
+ instructions = None
60
+
61
+ for msg in messages:
62
+ role = msg.get("role", "user")
63
+ content = msg.get("content", "")
64
+
65
+ # System messages become instructions
66
+ if role == "system":
67
+ instructions = content
68
+ continue
69
+
70
+ # Convert user/assistant messages to Response API format
71
+ response_msg = ResponseMessage(
72
+ type="message",
73
+ id=None,
74
+ role=role if role in ["user", "assistant"] else "user",
75
+ content=[
76
+ ResponseMessageContent(
77
+ type="input_text" if role == "user" else "output_text",
78
+ text=content if isinstance(content, str) else str(content),
79
+ )
80
+ ],
81
+ )
82
+ response_input.append(response_msg)
83
+
84
+ # Leave instructions field unset to let codex_transformers inject them
85
+ # The backend validates instructions and needs the full Codex ones
86
+ instructions = None
87
+ # Actually, we need to not include the field at all if it's None
88
+ # Otherwise the backend complains "Instructions are required"
89
+
90
+ # Map model (Codex uses gpt-5)
91
+ model = chat_dict.get("model", "gpt-4")
92
+ # For Codex, we typically use gpt-5
93
+ response_model = (
94
+ "gpt-5" if "codex" in model.lower() or "gpt-5" in model.lower() else model
95
+ )
96
+
97
+ # Build Response API request
98
+ # Note: Response API always requires stream=true and store=false
99
+ # Also, Response API doesn't support temperature and other OpenAI-specific parameters
100
+ request = ResponseRequest(
101
+ model=response_model,
102
+ instructions=instructions,
103
+ input=response_input,
104
+ stream=True, # Always use streaming for Response API
105
+ tool_choice="auto",
106
+ parallel_tool_calls=chat_dict.get("parallel_tool_calls", False),
107
+ reasoning=ResponseReasoning(effort="medium", summary="auto"),
108
+ store=False, # Must be false for Response API
109
+ # The following parameters are not supported by Response API:
110
+ # temperature, max_output_tokens, top_p, frequency_penalty, presence_penalty
111
+ )
112
+
113
+ return request
114
+
115
+ def response_to_chat_completion(
116
+ self, response_data: dict[str, Any] | ResponseCompleted
117
+ ) -> OpenAIChatCompletionResponse:
118
+ """Convert Response API response to Chat Completions format.
119
+
120
+ Args:
121
+ response_data: Response API response
122
+
123
+ Returns:
124
+ Chat Completions formatted response
125
+ """
126
+ # Extract the actual response data
127
+ response_dict: dict[str, Any]
128
+ if isinstance(response_data, ResponseCompleted):
129
+ # Convert Pydantic model to dict
130
+ response_dict = response_data.response.model_dump()
131
+ else: # isinstance(response_data, dict)
132
+ if "response" in response_data:
133
+ response_dict = response_data["response"]
134
+ else:
135
+ response_dict = response_data
136
+
137
+ # Extract content from Response API output
138
+ content = ""
139
+ output = response_dict.get("output", [])
140
+ # Look for message type output (skip reasoning)
141
+ for output_item in output:
142
+ if output_item.get("type") == "message":
143
+ output_content = output_item.get("content", [])
144
+ for content_block in output_content:
145
+ if content_block.get("type") in ["output_text", "text"]:
146
+ content += content_block.get("text", "")
147
+
148
+ # Build Chat Completions response
149
+ usage_data = response_dict.get("usage")
150
+ converted_usage = self._convert_usage(usage_data) if usage_data else None
151
+
152
+ return OpenAIChatCompletionResponse(
153
+ id=response_dict.get("id", f"resp_{uuid.uuid4().hex}"),
154
+ object="chat.completion",
155
+ created=response_dict.get("created_at", int(time.time())),
156
+ model=response_dict.get("model", "gpt-5"),
157
+ choices=[
158
+ OpenAIChoice(
159
+ index=0,
160
+ message=OpenAIResponseMessage(
161
+ role="assistant", content=content or None
162
+ ),
163
+ finish_reason="stop",
164
+ )
165
+ ],
166
+ usage=converted_usage,
167
+ system_fingerprint=response_dict.get("safety_identifier"),
168
+ )
169
+
170
+ async def stream_response_to_chat(
171
+ self, response_stream: AsyncIterator[bytes]
172
+ ) -> AsyncIterator[dict[str, Any]]:
173
+ """Convert Response API SSE stream to Chat Completions format.
174
+
175
+ Args:
176
+ response_stream: Async iterator of SSE bytes from Response API
177
+
178
+ Yields:
179
+ Chat Completions formatted streaming chunks
180
+ """
181
+ stream_id = f"chatcmpl_{uuid.uuid4().hex[:29]}"
182
+ created = int(time.time())
183
+ accumulated_content = ""
184
+ buffer = ""
185
+
186
+ logger.debug("response_adapter_stream_started", stream_id=stream_id)
187
+ raw_chunk_count = 0
188
+ event_count = 0
189
+
190
+ async for chunk in response_stream:
191
+ raw_chunk_count += 1
192
+ chunk_size = len(chunk)
193
+ logger.debug(
194
+ "response_adapter_raw_chunk_received",
195
+ chunk_number=raw_chunk_count,
196
+ chunk_size=chunk_size,
197
+ buffer_size_before=len(buffer),
198
+ )
199
+
200
+ # Add chunk to buffer
201
+ buffer += chunk.decode("utf-8")
202
+
203
+ # Process complete SSE events (separated by double newlines)
204
+ while "\n\n" in buffer:
205
+ event_str, buffer = buffer.split("\n\n", 1)
206
+ event_count += 1
207
+
208
+ # Parse the SSE event
209
+ event_type = None
210
+ event_data = None
211
+
212
+ for line in event_str.strip().split("\n"):
213
+ if not line:
214
+ continue
215
+
216
+ if line.startswith("event:"):
217
+ event_type = line[6:].strip()
218
+ elif line.startswith("data:"):
219
+ data_str = line[5:].strip()
220
+ if data_str == "[DONE]":
221
+ logger.debug(
222
+ "response_adapter_done_marker_found",
223
+ event_number=event_count,
224
+ )
225
+ continue
226
+ try:
227
+ event_data = json.loads(data_str)
228
+ except json.JSONDecodeError:
229
+ logger.debug(
230
+ "response_adapter_sse_parse_failed",
231
+ data_preview=data_str[:100],
232
+ event_number=event_count,
233
+ )
234
+ continue
235
+
236
+ # Process complete events
237
+ if event_type and event_data:
238
+ logger.debug(
239
+ "response_adapter_sse_event_parsed",
240
+ event_type=event_type,
241
+ event_number=event_count,
242
+ has_output="output" in str(event_data),
243
+ )
244
+ if event_type in [
245
+ "response.output.delta",
246
+ "response.output_text.delta",
247
+ ]:
248
+ # Extract delta content
249
+ delta_content = ""
250
+
251
+ # Handle different event structures
252
+ if event_type == "response.output_text.delta":
253
+ # Direct text delta event
254
+ delta_content = event_data.get("delta", "")
255
+ else:
256
+ # Standard output delta with nested structure
257
+ output = event_data.get("output", [])
258
+ if output:
259
+ for output_item in output:
260
+ if output_item.get("type") == "message":
261
+ content_blocks = output_item.get("content", [])
262
+ for block in content_blocks:
263
+ if block.get("type") in [
264
+ "output_text",
265
+ "text",
266
+ ]:
267
+ delta_content += block.get("text", "")
268
+
269
+ if delta_content:
270
+ accumulated_content += delta_content
271
+
272
+ logger.debug(
273
+ "response_adapter_yielding_content",
274
+ content_length=len(delta_content),
275
+ accumulated_length=len(accumulated_content),
276
+ )
277
+
278
+ # Create Chat Completions streaming chunk
279
+ yield {
280
+ "id": stream_id,
281
+ "object": "chat.completion.chunk",
282
+ "created": created,
283
+ "model": event_data.get("model", "gpt-5"),
284
+ "choices": [
285
+ {
286
+ "index": 0,
287
+ "delta": {"content": delta_content},
288
+ "finish_reason": None,
289
+ }
290
+ ],
291
+ }
292
+
293
+ elif event_type == "response.completed":
294
+ # Final chunk with usage info
295
+ response = event_data.get("response", {})
296
+ usage = response.get("usage")
297
+
298
+ logger.debug(
299
+ "response_adapter_stream_completed",
300
+ total_content_length=len(accumulated_content),
301
+ has_usage=usage is not None,
302
+ )
303
+
304
+ chunk_data = {
305
+ "id": stream_id,
306
+ "object": "chat.completion.chunk",
307
+ "created": created,
308
+ "model": response.get("model", "gpt-5"),
309
+ "choices": [
310
+ {"index": 0, "delta": {}, "finish_reason": "stop"}
311
+ ],
312
+ }
313
+
314
+ # Add usage if available
315
+ converted_usage = self._convert_usage(usage) if usage else None
316
+ if converted_usage:
317
+ chunk_data["usage"] = converted_usage.model_dump()
318
+
319
+ yield chunk_data
320
+
321
+ logger.debug(
322
+ "response_adapter_stream_finished",
323
+ stream_id=stream_id,
324
+ total_raw_chunks=raw_chunk_count,
325
+ total_events=event_count,
326
+ final_buffer_size=len(buffer),
327
+ )
328
+
329
+ def _convert_usage(
330
+ self, response_usage: dict[str, Any] | None
331
+ ) -> OpenAIUsage | None:
332
+ """Convert Response API usage to Chat Completions format."""
333
+ if not response_usage:
334
+ return None
335
+
336
+ return OpenAIUsage(
337
+ prompt_tokens=response_usage.get("input_tokens", 0),
338
+ completion_tokens=response_usage.get("output_tokens", 0),
339
+ total_tokens=response_usage.get("total_tokens", 0),
340
+ )
341
+
342
+ def _get_default_codex_instructions(self) -> str:
343
+ """Get default Codex CLI instructions."""
344
+ return (
345
+ "You are a coding agent running in the Codex CLI, a terminal-based coding assistant. "
346
+ "Codex CLI is an open source project led by OpenAI. You are expected to be precise, safe, and helpful.\n\n"
347
+ "Your capabilities:\n"
348
+ "- Receive user prompts and other context provided by the harness, such as files in the workspace.\n"
349
+ "- Communicate with the user by streaming thinking & responses, and by making & updating plans.\n"
350
+ "- Emit function calls to run terminal commands and apply patches. Depending on how this specific run is configured, "
351
+ "you can request that these function calls be escalated to the user for approval before running. "
352
+ 'More on this in the "Sandbox and approvals" section.\n\n'
353
+ "Within this context, Codex refers to the open-source agentic coding interface "
354
+ "(not the old Codex language model built by OpenAI)."
355
+ )
@@ -0,0 +1,178 @@
1
+ """OpenAI Response API models.
2
+
3
+ This module contains data models for OpenAI's Response API format
4
+ used by Codex/ChatGPT backend.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import Any, Literal
10
+
11
+ from pydantic import BaseModel
12
+
13
+
14
+ # Request Models
15
+
16
+
17
+ class ResponseMessageContent(BaseModel):
18
+ """Content block in a Response API message."""
19
+
20
+ type: Literal["input_text", "output_text"]
21
+ text: str
22
+
23
+
24
+ class ResponseMessage(BaseModel):
25
+ """Message in Response API format."""
26
+
27
+ type: Literal["message"]
28
+ id: str | None = None
29
+ role: Literal["user", "assistant", "system"]
30
+ content: list[ResponseMessageContent]
31
+
32
+
33
+ class ResponseReasoning(BaseModel):
34
+ """Reasoning configuration for Response API."""
35
+
36
+ effort: Literal["low", "medium", "high"] = "medium"
37
+ summary: Literal["auto", "none"] | None = "auto"
38
+
39
+
40
+ class ResponseRequest(BaseModel):
41
+ """OpenAI Response API request format."""
42
+
43
+ model: str
44
+ instructions: str | None = None
45
+ input: list[ResponseMessage]
46
+ stream: bool = True
47
+ tool_choice: Literal["auto", "none", "required"] | str = "auto"
48
+ parallel_tool_calls: bool = False
49
+ reasoning: ResponseReasoning | None = None
50
+ store: bool = False
51
+ include: list[str] | None = None
52
+ prompt_cache_key: str | None = None
53
+ # Note: The following OpenAI parameters are not supported by Response API (Codex backend):
54
+ # temperature, max_output_tokens, top_p, frequency_penalty, presence_penalty, metadata
55
+ # If included, they'll cause "Unsupported parameter" errors
56
+
57
+
58
+ # Response Models
59
+
60
+
61
+ class ResponseOutput(BaseModel):
62
+ """Output content in Response API."""
63
+
64
+ id: str
65
+ type: Literal["message"]
66
+ status: Literal["completed", "in_progress"]
67
+ content: list[ResponseMessageContent]
68
+ role: Literal["assistant"]
69
+
70
+
71
+ class ResponseUsage(BaseModel):
72
+ """Usage statistics in Response API."""
73
+
74
+ input_tokens: int
75
+ output_tokens: int
76
+ total_tokens: int
77
+ input_tokens_details: dict[str, Any] | None = None
78
+ output_tokens_details: dict[str, Any] | None = None
79
+
80
+
81
+ class ResponseReasoningContent(BaseModel):
82
+ """Reasoning content in response."""
83
+
84
+ effort: Literal["low", "medium", "high"]
85
+ summary: str | None = None
86
+ encrypted_content: str | None = None
87
+
88
+
89
+ class ResponseData(BaseModel):
90
+ """Complete response data structure."""
91
+
92
+ id: str
93
+ object: Literal["response"]
94
+ created_at: int
95
+ status: Literal["completed", "failed", "cancelled"]
96
+ background: bool = False
97
+ error: dict[str, Any] | None = None
98
+ incomplete_details: dict[str, Any] | None = None
99
+ instructions: str | None = None
100
+ max_output_tokens: int | None = None
101
+ model: str
102
+ output: list[ResponseOutput]
103
+ parallel_tool_calls: bool = False
104
+ previous_response_id: str | None = None
105
+ prompt_cache_key: str | None = None
106
+ reasoning: ResponseReasoningContent | None = None
107
+ safety_identifier: str | None = None
108
+ service_tier: str | None = None
109
+ store: bool = False
110
+ temperature: float | None = None
111
+ text: dict[str, Any] | None = None
112
+ tool_choice: str | None = None
113
+ tools: list[dict[str, Any]] | None = None
114
+ top_logprobs: int | None = None
115
+ top_p: float | None = None
116
+ truncation: str | None = None
117
+ usage: ResponseUsage | None = None
118
+ user: str | None = None
119
+ metadata: dict[str, Any] | None = None
120
+
121
+
122
+ class ResponseCompleted(BaseModel):
123
+ """Complete response from Response API."""
124
+
125
+ type: Literal["response.completed"]
126
+ sequence_number: int
127
+ response: ResponseData
128
+
129
+
130
+ # Streaming Models
131
+
132
+
133
+ class StreamingDelta(BaseModel):
134
+ """Delta content in streaming response."""
135
+
136
+ content: str | None = None
137
+ role: Literal["assistant"] | None = None
138
+ reasoning_content: str | None = None
139
+ output: list[dict[str, Any]] | None = None
140
+
141
+
142
+ class StreamingChoice(BaseModel):
143
+ """Choice in streaming response."""
144
+
145
+ index: int
146
+ delta: StreamingDelta
147
+ finish_reason: Literal["stop", "length", "tool_calls", "content_filter"] | None = (
148
+ None
149
+ )
150
+
151
+
152
+ class StreamingChunk(BaseModel):
153
+ """Streaming chunk from Response API."""
154
+
155
+ id: str
156
+ object: Literal["response.chunk", "chat.completion.chunk"]
157
+ created: int
158
+ model: str
159
+ choices: list[StreamingChoice]
160
+ usage: ResponseUsage | None = None
161
+ system_fingerprint: str | None = None
162
+
163
+
164
+ class StreamingEvent(BaseModel):
165
+ """Server-sent event wrapper for streaming."""
166
+
167
+ event: (
168
+ Literal[
169
+ "response.created",
170
+ "response.output.started",
171
+ "response.output.delta",
172
+ "response.output.completed",
173
+ "response.completed",
174
+ "response.failed",
175
+ ]
176
+ | None
177
+ ) = None
178
+ data: dict[str, Any] | str
ccproxy/api/app.py CHANGED
@@ -18,6 +18,7 @@ from ccproxy.api.middleware.request_content_logging import (
18
18
  from ccproxy.api.middleware.request_id import RequestIDMiddleware
19
19
  from ccproxy.api.middleware.server_header import ServerHeaderMiddleware
20
20
  from ccproxy.api.routes.claude import router as claude_router
21
+ from ccproxy.api.routes.codex import router as codex_router
21
22
  from ccproxy.api.routes.health import router as health_router
22
23
  from ccproxy.api.routes.mcp import setup_mcp
23
24
  from ccproxy.api.routes.metrics import (
@@ -33,9 +34,12 @@ from ccproxy.core.logging import setup_logging
33
34
  from ccproxy.utils.models_provider import get_models_list
34
35
  from ccproxy.utils.startup_helpers import (
35
36
  check_claude_cli_startup,
37
+ check_codex_cli_startup,
38
+ check_version_updates_startup,
36
39
  flush_streaming_batches_shutdown,
37
40
  initialize_claude_detection_startup,
38
41
  initialize_claude_sdk_startup,
42
+ initialize_codex_detection_startup,
39
43
  initialize_log_storage_shutdown,
40
44
  initialize_log_storage_startup,
41
45
  initialize_permission_service_startup,
@@ -43,7 +47,8 @@ from ccproxy.utils.startup_helpers import (
43
47
  setup_scheduler_shutdown,
44
48
  setup_scheduler_startup,
45
49
  setup_session_manager_shutdown,
46
- validate_authentication_startup,
50
+ validate_claude_authentication_startup,
51
+ validate_codex_authentication_startup,
47
52
  )
48
53
 
49
54
 
@@ -69,20 +74,40 @@ class ShutdownComponent(TypedDict):
69
74
  # Define lifecycle components for startup/shutdown organization
70
75
  LIFECYCLE_COMPONENTS: list[LifecycleComponent] = [
71
76
  {
72
- "name": "Authentication",
73
- "startup": validate_authentication_startup,
77
+ "name": "Claude Authentication",
78
+ "startup": validate_claude_authentication_startup,
74
79
  "shutdown": None, # One-time validation, no cleanup needed
75
80
  },
81
+ {
82
+ "name": "Codex Authentication",
83
+ "startup": validate_codex_authentication_startup,
84
+ "shutdown": None, # One-time validation, no cleanup needed
85
+ },
86
+ {
87
+ "name": "Version Check",
88
+ "startup": check_version_updates_startup,
89
+ "shutdown": None, # One-time check, no cleanup needed
90
+ },
76
91
  {
77
92
  "name": "Claude CLI",
78
93
  "startup": check_claude_cli_startup,
79
94
  "shutdown": None, # Detection only, no cleanup needed
80
95
  },
96
+ {
97
+ "name": "Codex CLI",
98
+ "startup": check_codex_cli_startup,
99
+ "shutdown": None, # Detection only, no cleanup needed
100
+ },
81
101
  {
82
102
  "name": "Claude Detection",
83
103
  "startup": initialize_claude_detection_startup,
84
104
  "shutdown": None, # No cleanup needed
85
105
  },
106
+ {
107
+ "name": "Codex Detection",
108
+ "startup": initialize_codex_detection_startup,
109
+ "shutdown": None, # No cleanup needed
110
+ },
86
111
  {
87
112
  "name": "Claude SDK",
88
113
  "startup": initialize_claude_sdk_startup,
@@ -282,6 +307,9 @@ def create_app(settings: Settings | None = None) -> FastAPI:
282
307
 
283
308
  app.include_router(oauth_router, prefix="/oauth", tags=["oauth"])
284
309
 
310
+ # Codex routes for OpenAI integration
311
+ app.include_router(codex_router, tags=["codex"])
312
+
285
313
  # New /sdk/ routes for Claude SDK endpoints
286
314
  app.include_router(claude_router, prefix="/sdk", tags=["claude-sdk"])
287
315
 
@@ -7,7 +7,6 @@ from typing import Annotated
7
7
  from fastapi import Depends, Request
8
8
  from structlog import get_logger
9
9
 
10
- from ccproxy.auth.dependencies import AuthManagerDep
11
10
  from ccproxy.config.settings import Settings, get_settings
12
11
  from ccproxy.core.http import BaseProxyClient
13
12
  from ccproxy.observability import PrometheusMetrics, get_metrics
@@ -70,11 +69,8 @@ def get_cached_claude_service(request: Request) -> ClaudeSDKService:
70
69
  )
71
70
  # Get dependencies manually for fallback
72
71
  settings = get_cached_settings(request)
73
- # Create a simple auth manager for fallback
74
- from ccproxy.auth.credentials_adapter import CredentialsAuthManager
75
72
 
76
- auth_manager = CredentialsAuthManager()
77
- claude_service = get_claude_service(settings, auth_manager)
73
+ claude_service = get_claude_service(settings)
78
74
  return claude_service
79
75
 
80
76
 
@@ -84,13 +80,11 @@ SettingsDep = Annotated[Settings, Depends(get_cached_settings)]
84
80
 
85
81
  def get_claude_service(
86
82
  settings: SettingsDep,
87
- auth_manager: AuthManagerDep,
88
83
  ) -> ClaudeSDKService:
89
84
  """Get Claude SDK service instance.
90
85
 
91
86
  Args:
92
87
  settings: Application settings dependency
93
- auth_manager: Authentication manager dependency
94
88
 
95
89
  Returns:
96
90
  Claude SDK service instance
@@ -114,7 +108,6 @@ def get_claude_service(
114
108
  # This dependency function should not create stateful resources
115
109
 
116
110
  return ClaudeSDKService(
117
- auth_manager=auth_manager,
118
111
  metrics=metrics,
119
112
  settings=settings,
120
113
  session_manager=session_manager,