code-puppy 0.0.302__py3-none-any.whl → 0.0.323__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. code_puppy/agents/base_agent.py +373 -46
  2. code_puppy/chatgpt_codex_client.py +283 -0
  3. code_puppy/cli_runner.py +795 -0
  4. code_puppy/command_line/add_model_menu.py +8 -1
  5. code_puppy/command_line/autosave_menu.py +266 -35
  6. code_puppy/command_line/colors_menu.py +515 -0
  7. code_puppy/command_line/command_handler.py +8 -2
  8. code_puppy/command_line/config_commands.py +59 -10
  9. code_puppy/command_line/core_commands.py +19 -7
  10. code_puppy/command_line/mcp/edit_command.py +3 -1
  11. code_puppy/command_line/mcp/handler.py +7 -2
  12. code_puppy/command_line/mcp/install_command.py +8 -3
  13. code_puppy/command_line/mcp/logs_command.py +173 -64
  14. code_puppy/command_line/mcp/restart_command.py +7 -2
  15. code_puppy/command_line/mcp/search_command.py +10 -4
  16. code_puppy/command_line/mcp/start_all_command.py +16 -6
  17. code_puppy/command_line/mcp/start_command.py +3 -1
  18. code_puppy/command_line/mcp/status_command.py +2 -1
  19. code_puppy/command_line/mcp/stop_all_command.py +5 -1
  20. code_puppy/command_line/mcp/stop_command.py +3 -1
  21. code_puppy/command_line/mcp/wizard_utils.py +10 -4
  22. code_puppy/command_line/model_settings_menu.py +53 -7
  23. code_puppy/command_line/prompt_toolkit_completion.py +16 -2
  24. code_puppy/command_line/session_commands.py +11 -4
  25. code_puppy/config.py +103 -15
  26. code_puppy/keymap.py +8 -2
  27. code_puppy/main.py +5 -828
  28. code_puppy/mcp_/__init__.py +17 -0
  29. code_puppy/mcp_/blocking_startup.py +61 -32
  30. code_puppy/mcp_/config_wizard.py +5 -1
  31. code_puppy/mcp_/managed_server.py +23 -3
  32. code_puppy/mcp_/manager.py +65 -0
  33. code_puppy/mcp_/mcp_logs.py +224 -0
  34. code_puppy/messaging/__init__.py +20 -4
  35. code_puppy/messaging/bus.py +64 -0
  36. code_puppy/messaging/markdown_patches.py +57 -0
  37. code_puppy/messaging/messages.py +16 -0
  38. code_puppy/messaging/renderers.py +21 -9
  39. code_puppy/messaging/rich_renderer.py +113 -67
  40. code_puppy/messaging/spinner/console_spinner.py +34 -0
  41. code_puppy/model_factory.py +185 -30
  42. code_puppy/model_utils.py +57 -48
  43. code_puppy/models.json +19 -5
  44. code_puppy/plugins/chatgpt_oauth/config.py +5 -1
  45. code_puppy/plugins/chatgpt_oauth/oauth_flow.py +5 -6
  46. code_puppy/plugins/chatgpt_oauth/register_callbacks.py +3 -3
  47. code_puppy/plugins/chatgpt_oauth/test_plugin.py +26 -11
  48. code_puppy/plugins/chatgpt_oauth/utils.py +180 -65
  49. code_puppy/plugins/claude_code_oauth/register_callbacks.py +28 -0
  50. code_puppy/plugins/claude_code_oauth/utils.py +1 -0
  51. code_puppy/plugins/shell_safety/agent_shell_safety.py +1 -118
  52. code_puppy/plugins/shell_safety/register_callbacks.py +44 -3
  53. code_puppy/prompts/codex_system_prompt.md +310 -0
  54. code_puppy/pydantic_patches.py +131 -0
  55. code_puppy/terminal_utils.py +126 -0
  56. code_puppy/tools/agent_tools.py +34 -9
  57. code_puppy/tools/command_runner.py +361 -32
  58. code_puppy/tools/file_operations.py +33 -45
  59. {code_puppy-0.0.302.data → code_puppy-0.0.323.data}/data/code_puppy/models.json +19 -5
  60. {code_puppy-0.0.302.dist-info → code_puppy-0.0.323.dist-info}/METADATA +1 -1
  61. {code_puppy-0.0.302.dist-info → code_puppy-0.0.323.dist-info}/RECORD +65 -57
  62. {code_puppy-0.0.302.data → code_puppy-0.0.323.data}/data/code_puppy/models_dev_api.json +0 -0
  63. {code_puppy-0.0.302.dist-info → code_puppy-0.0.323.dist-info}/WHEEL +0 -0
  64. {code_puppy-0.0.302.dist-info → code_puppy-0.0.323.dist-info}/entry_points.txt +0 -0
  65. {code_puppy-0.0.302.dist-info → code_puppy-0.0.323.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,283 @@
1
+ """HTTP client interceptor for ChatGPT Codex API.
2
+
3
+ ChatGPTCodexAsyncClient: httpx client that injects required fields into
4
+ request bodies for the ChatGPT Codex API and handles stream-to-non-stream
5
+ conversion.
6
+
7
+ The Codex API requires:
8
+ - "store": false - Disables conversation storage
9
+ - "stream": true - Streaming is mandatory
10
+
11
+ Removes unsupported parameters:
12
+ - "max_output_tokens" - Not supported by Codex API
13
+ - "max_tokens" - Not supported by Codex API
14
+ - "verbosity" - Not supported by Codex API
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import json
20
+ import logging
21
+ from typing import Any
22
+
23
+ import httpx
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+
28
+ def _is_reasoning_model(model_name: str) -> bool:
29
+ """Check if a model supports reasoning parameters."""
30
+ reasoning_models = [
31
+ "gpt-5", # All GPT-5 variants
32
+ "o1", # o1 series
33
+ "o3", # o3 series
34
+ "o4", # o4 series
35
+ ]
36
+ model_lower = model_name.lower()
37
+ return any(model_lower.startswith(prefix) for prefix in reasoning_models)
38
+
39
+
40
+ class ChatGPTCodexAsyncClient(httpx.AsyncClient):
41
+ """Async HTTP client that handles ChatGPT Codex API requirements.
42
+
43
+ This client:
44
+ 1. Injects required fields (store=false, stream=true)
45
+ 2. Strips unsupported parameters
46
+ 3. Converts streaming responses to non-streaming format
47
+ """
48
+
49
+ async def send(
50
+ self, request: httpx.Request, *args: Any, **kwargs: Any
51
+ ) -> httpx.Response:
52
+ """Intercept requests and inject required Codex fields."""
53
+ force_stream_conversion = False
54
+
55
+ try:
56
+ # Only modify POST requests to the Codex API
57
+ if request.method == "POST":
58
+ body_bytes = self._extract_body_bytes(request)
59
+ if body_bytes:
60
+ updated, force_stream_conversion = self._inject_codex_fields(
61
+ body_bytes
62
+ )
63
+ if updated is not None:
64
+ try:
65
+ rebuilt = self.build_request(
66
+ method=request.method,
67
+ url=request.url,
68
+ headers=request.headers,
69
+ content=updated,
70
+ )
71
+
72
+ # Copy core internals so httpx uses the modified body/stream
73
+ if hasattr(rebuilt, "_content"):
74
+ setattr(request, "_content", rebuilt._content)
75
+ if hasattr(rebuilt, "stream"):
76
+ request.stream = rebuilt.stream
77
+ if hasattr(rebuilt, "extensions"):
78
+ request.extensions = rebuilt.extensions
79
+
80
+ # Ensure Content-Length matches the new body
81
+ request.headers["Content-Length"] = str(len(updated))
82
+
83
+ except Exception:
84
+ pass
85
+ except Exception:
86
+ pass
87
+
88
+ # Make the actual request
89
+ response = await super().send(request, *args, **kwargs)
90
+
91
+ # If we forced streaming, convert the SSE stream to a regular response
92
+ if force_stream_conversion and response.status_code == 200:
93
+ try:
94
+ response = await self._convert_stream_to_response(response)
95
+ except Exception as e:
96
+ logger.warning(f"Failed to convert stream response: {e}")
97
+
98
+ return response
99
+
100
+ @staticmethod
101
+ def _extract_body_bytes(request: httpx.Request) -> bytes | None:
102
+ """Extract the request body as bytes."""
103
+ try:
104
+ content = request.content
105
+ if content:
106
+ return content
107
+ except Exception:
108
+ pass
109
+
110
+ try:
111
+ content = getattr(request, "_content", None)
112
+ if content:
113
+ return content
114
+ except Exception:
115
+ pass
116
+
117
+ return None
118
+
119
+ @staticmethod
120
+ def _inject_codex_fields(body: bytes) -> tuple[bytes | None, bool]:
121
+ """Inject required Codex fields and remove unsupported ones.
122
+
123
+ Returns:
124
+ Tuple of (modified body bytes or None, whether stream was forced)
125
+ """
126
+ try:
127
+ data = json.loads(body.decode("utf-8"))
128
+ except Exception:
129
+ return None, False
130
+
131
+ if not isinstance(data, dict):
132
+ return None, False
133
+
134
+ modified = False
135
+ forced_stream = False
136
+
137
+ # CRITICAL: ChatGPT Codex backend requires store=false
138
+ if "store" not in data or data.get("store") is not False:
139
+ data["store"] = False
140
+ modified = True
141
+
142
+ # CRITICAL: ChatGPT Codex backend requires stream=true
143
+ # If stream is already true (e.g., pydantic-ai with event_stream_handler),
144
+ # don't force conversion - let streaming events flow through naturally
145
+ if data.get("stream") is not True:
146
+ data["stream"] = True
147
+ forced_stream = True # Only convert if WE forced streaming
148
+ modified = True
149
+
150
+ # Add reasoning settings for reasoning models (gpt-5.2, o-series, etc.)
151
+ model = data.get("model", "")
152
+ if "reasoning" not in data and _is_reasoning_model(model):
153
+ data["reasoning"] = {
154
+ "effort": "medium",
155
+ "summary": "auto",
156
+ }
157
+ modified = True
158
+
159
+ # Remove unsupported parameters
160
+ # Note: verbosity should be under "text" object, not top-level
161
+ unsupported_params = ["max_output_tokens", "max_tokens", "verbosity"]
162
+ for param in unsupported_params:
163
+ if param in data:
164
+ del data[param]
165
+ modified = True
166
+
167
+ if not modified:
168
+ return None, False
169
+
170
+ return json.dumps(data).encode("utf-8"), forced_stream
171
+
172
+ async def _convert_stream_to_response(
173
+ self, response: httpx.Response
174
+ ) -> httpx.Response:
175
+ """Convert an SSE streaming response to a complete response.
176
+
177
+ Consumes the SSE stream and reconstructs the final response object.
178
+ """
179
+ logger.debug("Converting SSE stream to non-streaming response")
180
+ final_response_data = None
181
+ collected_text = []
182
+ collected_tool_calls = []
183
+
184
+ # Read the entire stream
185
+ async for line in response.aiter_lines():
186
+ if not line or not line.startswith("data:"):
187
+ continue
188
+
189
+ data_str = line[5:].strip() # Remove "data:" prefix
190
+ if data_str == "[DONE]":
191
+ break
192
+
193
+ try:
194
+ event = json.loads(data_str)
195
+ event_type = event.get("type", "")
196
+
197
+ if event_type == "response.output_text.delta":
198
+ # Collect text deltas
199
+ delta = event.get("delta", "")
200
+ if delta:
201
+ collected_text.append(delta)
202
+
203
+ elif event_type == "response.completed":
204
+ # This contains the final response object
205
+ final_response_data = event.get("response", {})
206
+
207
+ elif event_type == "response.function_call_arguments.done":
208
+ # Collect tool calls
209
+ tool_call = {
210
+ "name": event.get("name", ""),
211
+ "arguments": event.get("arguments", ""),
212
+ "call_id": event.get("call_id", ""),
213
+ }
214
+ collected_tool_calls.append(tool_call)
215
+
216
+ except json.JSONDecodeError:
217
+ continue
218
+
219
+ logger.debug(
220
+ f"Collected {len(collected_text)} text chunks, {len(collected_tool_calls)} tool calls"
221
+ )
222
+ if final_response_data:
223
+ logger.debug(
224
+ f"Got final response data with keys: {list(final_response_data.keys())}"
225
+ )
226
+
227
+ # Build the final response body
228
+ if final_response_data:
229
+ response_body = final_response_data
230
+ else:
231
+ # Fallback: construct a minimal response from collected data
232
+ response_body = {
233
+ "id": "reconstructed",
234
+ "object": "response",
235
+ "output": [],
236
+ }
237
+
238
+ if collected_text:
239
+ response_body["output"].append(
240
+ {
241
+ "type": "message",
242
+ "role": "assistant",
243
+ "content": [
244
+ {"type": "output_text", "text": "".join(collected_text)}
245
+ ],
246
+ }
247
+ )
248
+
249
+ for tool_call in collected_tool_calls:
250
+ response_body["output"].append(
251
+ {
252
+ "type": "function_call",
253
+ "name": tool_call["name"],
254
+ "arguments": tool_call["arguments"],
255
+ "call_id": tool_call["call_id"],
256
+ }
257
+ )
258
+
259
+ # Create a new response with the complete body
260
+ body_bytes = json.dumps(response_body).encode("utf-8")
261
+ logger.debug(f"Reconstructed response body: {len(body_bytes)} bytes")
262
+
263
+ new_response = httpx.Response(
264
+ status_code=response.status_code,
265
+ headers=response.headers,
266
+ content=body_bytes,
267
+ request=response.request,
268
+ )
269
+ return new_response
270
+
271
+
272
+ def create_codex_async_client(
273
+ headers: dict[str, str] | None = None,
274
+ verify: str | bool = True,
275
+ **kwargs: Any,
276
+ ) -> ChatGPTCodexAsyncClient:
277
+ """Create a ChatGPT Codex async client with proper configuration."""
278
+ return ChatGPTCodexAsyncClient(
279
+ headers=headers,
280
+ verify=verify,
281
+ timeout=httpx.Timeout(300.0, connect=30.0),
282
+ **kwargs,
283
+ )