ccproxy-api 0.1.5__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. ccproxy/_version.py +2 -2
  2. ccproxy/adapters/codex/__init__.py +11 -0
  3. ccproxy/adapters/openai/models.py +1 -1
  4. ccproxy/adapters/openai/response_adapter.py +355 -0
  5. ccproxy/adapters/openai/response_models.py +178 -0
  6. ccproxy/api/app.py +31 -3
  7. ccproxy/api/dependencies.py +1 -8
  8. ccproxy/api/middleware/errors.py +15 -7
  9. ccproxy/api/routes/codex.py +1251 -0
  10. ccproxy/api/routes/health.py +228 -3
  11. ccproxy/auth/openai/__init__.py +13 -0
  12. ccproxy/auth/openai/credentials.py +166 -0
  13. ccproxy/auth/openai/oauth_client.py +334 -0
  14. ccproxy/auth/openai/storage.py +184 -0
  15. ccproxy/claude_sdk/options.py +1 -1
  16. ccproxy/cli/commands/auth.py +398 -1
  17. ccproxy/cli/commands/serve.py +3 -1
  18. ccproxy/config/claude.py +1 -1
  19. ccproxy/config/codex.py +100 -0
  20. ccproxy/config/scheduler.py +8 -8
  21. ccproxy/config/settings.py +19 -0
  22. ccproxy/core/codex_transformers.py +389 -0
  23. ccproxy/core/http_transformers.py +153 -2
  24. ccproxy/data/claude_headers_fallback.json +37 -0
  25. ccproxy/data/codex_headers_fallback.json +14 -0
  26. ccproxy/models/detection.py +82 -0
  27. ccproxy/models/requests.py +22 -0
  28. ccproxy/models/responses.py +16 -0
  29. ccproxy/scheduler/manager.py +2 -2
  30. ccproxy/scheduler/tasks.py +105 -65
  31. ccproxy/services/claude_detection_service.py +7 -33
  32. ccproxy/services/codex_detection_service.py +252 -0
  33. ccproxy/services/proxy_service.py +530 -0
  34. ccproxy/utils/model_mapping.py +7 -5
  35. ccproxy/utils/startup_helpers.py +205 -12
  36. ccproxy/utils/version_checker.py +6 -0
  37. ccproxy_api-0.1.7.dist-info/METADATA +615 -0
  38. {ccproxy_api-0.1.5.dist-info → ccproxy_api-0.1.7.dist-info}/RECORD +41 -28
  39. ccproxy_api-0.1.5.dist-info/METADATA +0 -396
  40. {ccproxy_api-0.1.5.dist-info → ccproxy_api-0.1.7.dist-info}/WHEEL +0 -0
  41. {ccproxy_api-0.1.5.dist-info → ccproxy_api-0.1.7.dist-info}/entry_points.txt +0 -0
  42. {ccproxy_api-0.1.5.dist-info → ccproxy_api-0.1.7.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,389 @@
1
+ """Codex-specific transformers for request/response transformation."""
2
+
3
+ import json
4
+
5
+ import structlog
6
+ from typing_extensions import TypedDict
7
+
8
+ from ccproxy.core.transformers import RequestTransformer
9
+ from ccproxy.core.types import ProxyRequest, TransformContext
10
+ from ccproxy.models.detection import CodexCacheData
11
+
12
+
13
+ logger = structlog.get_logger(__name__)
14
+
15
+
16
+ class CodexRequestData(TypedDict):
17
+ """Typed structure for transformed Codex request data."""
18
+
19
+ method: str
20
+ url: str
21
+ headers: dict[str, str]
22
+ body: bytes | None
23
+
24
+
25
+ class CodexRequestTransformer(RequestTransformer):
26
+ """Codex request transformer for header and instructions field injection."""
27
+
28
+ def __init__(self) -> None:
29
+ """Initialize Codex request transformer."""
30
+ super().__init__()
31
+
32
+ async def _transform_request(
33
+ self, request: ProxyRequest, context: TransformContext | None = None
34
+ ) -> ProxyRequest:
35
+ """Transform a proxy request for Codex API.
36
+
37
+ Args:
38
+ request: The structured proxy request to transform
39
+ context: Optional transformation context
40
+
41
+ Returns:
42
+ The transformed proxy request
43
+ """
44
+ # Extract required data from context
45
+ access_token = ""
46
+ session_id = ""
47
+ account_id = ""
48
+ codex_detection_data = None
49
+
50
+ if context:
51
+ if hasattr(context, "access_token"):
52
+ access_token = context.access_token
53
+ elif isinstance(context, dict):
54
+ access_token = context.get("access_token", "")
55
+
56
+ if hasattr(context, "session_id"):
57
+ session_id = context.session_id
58
+ elif isinstance(context, dict):
59
+ session_id = context.get("session_id", "")
60
+
61
+ if hasattr(context, "account_id"):
62
+ account_id = context.account_id
63
+ elif isinstance(context, dict):
64
+ account_id = context.get("account_id", "")
65
+
66
+ if hasattr(context, "codex_detection_data"):
67
+ codex_detection_data = context.codex_detection_data
68
+ elif isinstance(context, dict):
69
+ codex_detection_data = context.get("codex_detection_data")
70
+
71
+ # Transform URL - remove codex prefix and forward to ChatGPT backend
72
+ transformed_url = self._transform_codex_url(request.url)
73
+
74
+ # Convert request body to bytes for header processing
75
+ body_bytes = None
76
+ if request.body:
77
+ if isinstance(request.body, bytes):
78
+ body_bytes = request.body
79
+ elif isinstance(request.body, str):
80
+ body_bytes = request.body.encode("utf-8")
81
+ elif isinstance(request.body, dict):
82
+ body_bytes = json.dumps(request.body).encode("utf-8")
83
+
84
+ # Transform headers with Codex CLI identity
85
+ transformed_headers = self.create_codex_headers(
86
+ request.headers,
87
+ access_token,
88
+ session_id,
89
+ account_id,
90
+ body_bytes,
91
+ codex_detection_data,
92
+ )
93
+
94
+ # Transform body to inject instructions
95
+ transformed_body = request.body
96
+ if request.body:
97
+ if isinstance(request.body, bytes):
98
+ transformed_body = self.transform_codex_body(
99
+ request.body, codex_detection_data
100
+ )
101
+ else:
102
+ # Convert to bytes if needed
103
+ body_bytes = (
104
+ json.dumps(request.body).encode("utf-8")
105
+ if isinstance(request.body, dict)
106
+ else str(request.body).encode("utf-8")
107
+ )
108
+ transformed_body = self.transform_codex_body(
109
+ body_bytes, codex_detection_data
110
+ )
111
+
112
+ # Create new transformed request
113
+ return ProxyRequest(
114
+ method=request.method,
115
+ url=transformed_url,
116
+ headers=transformed_headers,
117
+ params={}, # Query params handled in URL
118
+ body=transformed_body,
119
+ protocol=request.protocol,
120
+ timeout=request.timeout,
121
+ metadata=request.metadata,
122
+ )
123
+
124
+ async def transform_codex_request(
125
+ self,
126
+ method: str,
127
+ path: str,
128
+ headers: dict[str, str],
129
+ body: bytes | None,
130
+ access_token: str,
131
+ session_id: str,
132
+ account_id: str,
133
+ codex_detection_data: CodexCacheData | None = None,
134
+ target_base_url: str = "https://chatgpt.com/backend-api/codex",
135
+ ) -> CodexRequestData:
136
+ """Transform Codex request using direct parameters from ProxyService.
137
+
138
+ Args:
139
+ method: HTTP method
140
+ path: Request path
141
+ headers: Request headers
142
+ body: Request body
143
+ access_token: OAuth access token
144
+ session_id: Codex session ID
145
+ account_id: ChatGPT account ID
146
+ codex_detection_data: Optional Codex detection data
147
+ target_base_url: Base URL for the Codex API
148
+
149
+ Returns:
150
+ Dictionary with transformed request data (method, url, headers, body)
151
+ """
152
+ # Transform URL path
153
+ transformed_path = self._transform_codex_path(path)
154
+ target_url = f"{target_base_url.rstrip('/')}{transformed_path}"
155
+
156
+ # Transform body first (inject instructions)
157
+ codex_body = None
158
+ if body:
159
+ # body is guaranteed to be bytes due to parameter type
160
+ codex_body = self.transform_codex_body(body, codex_detection_data)
161
+
162
+ # Transform headers with Codex CLI identity and authentication
163
+ codex_headers = self.create_codex_headers(
164
+ headers, access_token, session_id, account_id, body, codex_detection_data
165
+ )
166
+
167
+ # Update Content-Length if body was transformed and size changed
168
+ if codex_body and body and len(codex_body) != len(body):
169
+ # Remove any existing content-length headers (case-insensitive)
170
+ codex_headers = {
171
+ k: v for k, v in codex_headers.items() if k.lower() != "content-length"
172
+ }
173
+ codex_headers["Content-Length"] = str(len(codex_body))
174
+ elif codex_body and not body:
175
+ # New body was created where none existed
176
+ codex_headers["Content-Length"] = str(len(codex_body))
177
+
178
+ return CodexRequestData(
179
+ method=method,
180
+ url=target_url,
181
+ headers=codex_headers,
182
+ body=codex_body,
183
+ )
184
+
185
+ def _transform_codex_url(self, url: str) -> str:
186
+ """Transform URL from proxy format to ChatGPT backend format."""
187
+ # Extract base URL and path
188
+ if "://" in url:
189
+ protocol, rest = url.split("://", 1)
190
+ if "/" in rest:
191
+ domain, path = rest.split("/", 1)
192
+ path = "/" + path
193
+ else:
194
+ path = "/"
195
+ else:
196
+ path = url if url.startswith("/") else "/" + url
197
+
198
+ # Transform path and build target URL
199
+ transformed_path = self._transform_codex_path(path)
200
+ return f"https://chatgpt.com/backend-api/codex{transformed_path}"
201
+
202
+ def _transform_codex_path(self, path: str) -> str:
203
+ """Transform request path for Codex API."""
204
+ # Remove /codex prefix if present
205
+ if path.startswith("/codex"):
206
+ path = path[6:] # Remove "/codex" prefix
207
+
208
+ # Ensure we have a valid path
209
+ if not path or path == "/":
210
+ path = "/responses"
211
+
212
+ # Handle session_id in path for /codex/{session_id}/responses pattern
213
+ if path.startswith("/") and "/" in path[1:]:
214
+ # This might be /{session_id}/responses - extract the responses part
215
+ parts = path.strip("/").split("/")
216
+ if len(parts) >= 2 and parts[-1] == "responses":
217
+ # Keep the /responses endpoint, session_id will be in headers
218
+ path = "/responses"
219
+
220
+ return path
221
+
222
+ def create_codex_headers(
223
+ self,
224
+ headers: dict[str, str],
225
+ access_token: str,
226
+ session_id: str,
227
+ account_id: str,
228
+ body: bytes | None = None,
229
+ codex_detection_data: CodexCacheData | None = None,
230
+ ) -> dict[str, str]:
231
+ """Create Codex headers with CLI identity and authentication."""
232
+ codex_headers = {}
233
+
234
+ # Strip potentially problematic headers
235
+ excluded_headers = {
236
+ "host",
237
+ "x-forwarded-for",
238
+ "x-forwarded-proto",
239
+ "x-forwarded-host",
240
+ "forwarded",
241
+ # Authentication headers to be replaced
242
+ "authorization",
243
+ "x-api-key",
244
+ # Compression headers to avoid decompression issues
245
+ "accept-encoding",
246
+ "content-encoding",
247
+ # CORS headers - should not be forwarded to upstream
248
+ "origin",
249
+ "access-control-request-method",
250
+ "access-control-request-headers",
251
+ "access-control-allow-origin",
252
+ "access-control-allow-methods",
253
+ "access-control-allow-headers",
254
+ "access-control-allow-credentials",
255
+ "access-control-max-age",
256
+ "access-control-expose-headers",
257
+ }
258
+
259
+ # Copy important headers (excluding problematic ones)
260
+ for key, value in headers.items():
261
+ lower_key = key.lower()
262
+ if lower_key not in excluded_headers:
263
+ codex_headers[key] = value
264
+
265
+ # Set authentication with OAuth token
266
+ if access_token:
267
+ codex_headers["Authorization"] = f"Bearer {access_token}"
268
+
269
+ # Set defaults for essential headers
270
+ if "content-type" not in [k.lower() for k in codex_headers]:
271
+ codex_headers["Content-Type"] = "application/json"
272
+ if "accept" not in [k.lower() for k in codex_headers]:
273
+ codex_headers["Accept"] = "application/json"
274
+
275
+ # Use detected Codex CLI headers when available
276
+ if codex_detection_data:
277
+ detected_headers = codex_detection_data.headers.to_headers_dict()
278
+ # Override with session-specific values
279
+ detected_headers["session_id"] = session_id
280
+ if account_id:
281
+ detected_headers["chatgpt-account-id"] = account_id
282
+ codex_headers.update(detected_headers)
283
+ logger.debug(
284
+ "using_detected_codex_headers",
285
+ version=codex_detection_data.codex_version,
286
+ )
287
+ else:
288
+ # Fallback to hardcoded Codex headers
289
+ codex_headers.update(
290
+ {
291
+ "session_id": session_id,
292
+ "originator": "codex_cli_rs",
293
+ "openai-beta": "responses=experimental",
294
+ "version": "0.21.0",
295
+ }
296
+ )
297
+ if account_id:
298
+ codex_headers["chatgpt-account-id"] = account_id
299
+ logger.debug("using_fallback_codex_headers")
300
+
301
+ # Don't set Accept header - let the backend handle it based on stream parameter
302
+ # Setting Accept: text/event-stream with stream:true in body causes 400 Bad Request
303
+ # The backend will determine the response format based on the stream parameter
304
+
305
+ return codex_headers
306
+
307
+ def _is_streaming_request(self, body: bytes | None) -> bool:
308
+ """Check if the request body indicates a streaming request (including injected default)."""
309
+ if not body:
310
+ return False
311
+
312
+ try:
313
+ data = json.loads(body.decode("utf-8"))
314
+ return data.get("stream", False) is True
315
+ except (json.JSONDecodeError, UnicodeDecodeError):
316
+ return False
317
+
318
+ def _is_user_streaming_request(self, body: bytes | None) -> bool:
319
+ """Check if the user explicitly requested streaming (has 'stream' field in original body)."""
320
+ if not body:
321
+ return False
322
+
323
+ try:
324
+ data = json.loads(body.decode("utf-8"))
325
+ # Only return True if user explicitly included "stream" field (regardless of its value)
326
+ return "stream" in data and data.get("stream") is True
327
+ except (json.JSONDecodeError, UnicodeDecodeError):
328
+ return False
329
+
330
+ def transform_codex_body(
331
+ self, body: bytes, codex_detection_data: CodexCacheData | None = None
332
+ ) -> bytes:
333
+ """Transform request body to inject Codex CLI instructions."""
334
+ if not body:
335
+ return body
336
+
337
+ try:
338
+ data = json.loads(body.decode("utf-8"))
339
+ except (json.JSONDecodeError, UnicodeDecodeError) as e:
340
+ # Return original if not valid JSON
341
+ logger.warning(
342
+ "codex_transform_json_decode_failed",
343
+ error=str(e),
344
+ body_preview=body[:200].decode("utf-8", errors="replace")
345
+ if body
346
+ else None,
347
+ body_length=len(body) if body else 0,
348
+ )
349
+ return body
350
+
351
+ # Check if this request already has the full Codex instructions
352
+ # If instructions field exists and is longer than 1000 chars, it's already set
353
+ if (
354
+ "instructions" in data
355
+ and data["instructions"]
356
+ and len(data["instructions"]) > 1000
357
+ ):
358
+ # This already has full Codex instructions, don't replace them
359
+ logger.debug("skipping_codex_transform_has_full_instructions")
360
+ return body
361
+
362
+ # Get the instructions to inject
363
+ detected_instructions = None
364
+ if codex_detection_data:
365
+ detected_instructions = codex_detection_data.instructions.instructions_field
366
+ else:
367
+ # Fallback instructions from req.json
368
+ detected_instructions = (
369
+ "You are a coding agent running in the Codex CLI, a terminal-based coding assistant. "
370
+ "Codex CLI is an open source project led by OpenAI. You are expected to be precise, safe, and helpful.\n\n"
371
+ "Your capabilities:\n"
372
+ "- Receive user prompts and other context provided by the harness, such as files in the workspace.\n"
373
+ "- Communicate with the user by streaming thinking & responses, and by making & updating plans.\n"
374
+ "- Emit function calls to run terminal commands and apply patches. Depending on how this specific run is configured, "
375
+ "you can request that these function calls be escalated to the user for approval before running. "
376
+ 'More on this in the "Sandbox and approvals" section.\n\n'
377
+ "Within this context, Codex refers to the open-source agentic coding interface "
378
+ "(not the old Codex language model built by OpenAI)."
379
+ )
380
+
381
+ # Always inject/override the instructions field
382
+ data["instructions"] = detected_instructions
383
+
384
+ # Only inject stream: true if user explicitly requested streaming or didn't specify
385
+ # For now, we'll inject stream: true by default since Codex seems to expect it
386
+ if "stream" not in data:
387
+ data["stream"] = True
388
+
389
+ return json.dumps(data, separators=(",", ":")).encode("utf-8")
@@ -361,6 +361,139 @@ class HTTPRequestTransformer(RequestTransformer):
361
361
 
362
362
  return proxy_headers
363
363
 
364
+ def _count_cache_control_blocks(self, data: dict[str, Any]) -> dict[str, int]:
365
+ """Count cache_control blocks in different parts of the request.
366
+
367
+ Returns:
368
+ Dictionary with counts for 'injected_system', 'user_system', and 'messages'
369
+ """
370
+ counts = {"injected_system": 0, "user_system": 0, "messages": 0}
371
+
372
+ # Count in system field
373
+ system = data.get("system")
374
+ if system:
375
+ if isinstance(system, str):
376
+ # String system prompts don't have cache_control
377
+ pass
378
+ elif isinstance(system, list):
379
+ # Count cache_control in system prompt blocks
380
+ # The first block(s) are injected, rest are user's
381
+ injected_count = 0
382
+ for i, block in enumerate(system):
383
+ if isinstance(block, dict) and "cache_control" in block:
384
+ # Check if this is the injected prompt (contains Claude Code identity)
385
+ text = block.get("text", "")
386
+ if "Claude Code" in text or "Anthropic's official CLI" in text:
387
+ counts["injected_system"] += 1
388
+ injected_count = max(injected_count, i + 1)
389
+ elif i < injected_count:
390
+ # Part of injected system (multiple blocks)
391
+ counts["injected_system"] += 1
392
+ else:
393
+ counts["user_system"] += 1
394
+
395
+ # Count in messages
396
+ messages = data.get("messages", [])
397
+ for msg in messages:
398
+ content = msg.get("content")
399
+ if isinstance(content, list):
400
+ for block in content:
401
+ if isinstance(block, dict) and "cache_control" in block:
402
+ counts["messages"] += 1
403
+
404
+ return counts
405
+
406
+ def _limit_cache_control_blocks(
407
+ self, data: dict[str, Any], max_blocks: int = 4
408
+ ) -> dict[str, Any]:
409
+ """Limit the number of cache_control blocks to comply with Anthropic's limit.
410
+
411
+ Priority order:
412
+ 1. Injected system prompt cache_control (highest priority - Claude Code identity)
413
+ 2. User's system prompt cache_control
414
+ 3. User's message cache_control (lowest priority)
415
+
416
+ Args:
417
+ data: Request data dictionary
418
+ max_blocks: Maximum number of cache_control blocks allowed (default: 4)
419
+
420
+ Returns:
421
+ Modified data dictionary with cache_control blocks limited
422
+ """
423
+ import copy
424
+
425
+ # Deep copy to avoid modifying original
426
+ data = copy.deepcopy(data)
427
+
428
+ # Count existing blocks
429
+ counts = self._count_cache_control_blocks(data)
430
+ total = counts["injected_system"] + counts["user_system"] + counts["messages"]
431
+
432
+ if total <= max_blocks:
433
+ # No need to remove anything
434
+ return data
435
+
436
+ logger.warning(
437
+ "cache_control_limit_exceeded",
438
+ total_blocks=total,
439
+ max_blocks=max_blocks,
440
+ injected=counts["injected_system"],
441
+ user_system=counts["user_system"],
442
+ messages=counts["messages"],
443
+ )
444
+
445
+ # Calculate how many to remove
446
+ to_remove = total - max_blocks
447
+ removed = 0
448
+
449
+ # Remove from messages first (lowest priority)
450
+ if to_remove > 0 and counts["messages"] > 0:
451
+ messages = data.get("messages", [])
452
+ for msg in reversed(messages): # Remove from end first
453
+ if removed >= to_remove:
454
+ break
455
+ content = msg.get("content")
456
+ if isinstance(content, list):
457
+ for block in reversed(content):
458
+ if removed >= to_remove:
459
+ break
460
+ if isinstance(block, dict) and "cache_control" in block:
461
+ del block["cache_control"]
462
+ removed += 1
463
+ logger.debug("removed_cache_control", location="message")
464
+
465
+ # Remove from user system prompts next
466
+ if removed < to_remove and counts["user_system"] > 0:
467
+ system = data.get("system")
468
+ if isinstance(system, list):
469
+ # Find and remove cache_control from user system blocks (non-injected)
470
+ for block in reversed(system):
471
+ if removed >= to_remove:
472
+ break
473
+ if isinstance(block, dict) and "cache_control" in block:
474
+ text = block.get("text", "")
475
+ # Skip injected prompts (highest priority)
476
+ if (
477
+ "Claude Code" not in text
478
+ and "Anthropic's official CLI" not in text
479
+ ):
480
+ del block["cache_control"]
481
+ removed += 1
482
+ logger.debug(
483
+ "removed_cache_control", location="user_system"
484
+ )
485
+
486
+ # In theory, we should never need to remove injected system cache_control
487
+ # but include this for completeness
488
+ if removed < to_remove:
489
+ logger.error(
490
+ "cannot_preserve_injected_cache_control",
491
+ needed_to_remove=to_remove,
492
+ actually_removed=removed,
493
+ )
494
+
495
+ return data
496
+
364
497
  def transform_request_body(
365
498
  self,
366
499
  body: bytes,
@@ -398,8 +531,16 @@ class HTTPRequestTransformer(RequestTransformer):
398
531
  import json
399
532
 
400
533
  data = json.loads(body.decode("utf-8"))
401
- except (json.JSONDecodeError, UnicodeDecodeError):
534
+ except (json.JSONDecodeError, UnicodeDecodeError) as e:
402
535
  # Return original if not valid JSON
536
+ logger.warning(
537
+ "http_transform_json_decode_failed",
538
+ error=str(e),
539
+ body_preview=body[:200].decode("utf-8", errors="replace")
540
+ if body
541
+ else None,
542
+ body_length=len(body) if body else 0,
543
+ )
403
544
  return body
404
545
 
405
546
  # Get the system field to inject
@@ -440,6 +581,9 @@ class HTTPRequestTransformer(RequestTransformer):
440
581
  # Both are lists, concatenate
441
582
  data["system"] = detected_system + existing_system
442
583
 
584
+ # Limit cache_control blocks to comply with Anthropic's limit
585
+ data = self._limit_cache_control_blocks(data)
586
+
443
587
  return json.dumps(data).encode("utf-8")
444
588
 
445
589
  def _is_openai_request(self, path: str, body: bytes) -> bool:
@@ -462,7 +606,14 @@ class HTTPRequestTransformer(RequestTransformer):
462
606
  messages = data.get("messages", [])
463
607
  if messages and any(msg.get("role") == "system" for msg in messages):
464
608
  return True
465
- except (json.JSONDecodeError, UnicodeDecodeError):
609
+ except (json.JSONDecodeError, UnicodeDecodeError) as e:
610
+ logger.warning(
611
+ "openai_request_detection_json_decode_failed",
612
+ error=str(e),
613
+ body_preview=body[:100].decode("utf-8", errors="replace")
614
+ if body
615
+ else None,
616
+ )
466
617
  pass
467
618
 
468
619
  return False
@@ -0,0 +1,37 @@
1
+ {
2
+ "claude_version": "1.0.77",
3
+ "headers": {
4
+ "anthropic_beta": "claude-code-20250219,oauth-2025-04-20,interleaved-thinking-2025-05-14,fine-grained-tool-streaming-2025-05-14",
5
+ "anthropic_version": "2023-06-01",
6
+ "anthropic_dangerous_direct_browser_access": "true",
7
+ "x_app": "cli",
8
+ "user_agent": "claude-cli/1.0.77 (external, cli)",
9
+ "x_stainless_lang": "js",
10
+ "x_stainless_retry_count": "0",
11
+ "x_stainless_timeout": "60",
12
+ "x_stainless_package_version": "0.55.1",
13
+ "x_stainless_os": "Linux",
14
+ "x_stainless_arch": "x64",
15
+ "x_stainless_runtime": "node",
16
+ "x_stainless_runtime_version": "v22.17.0"
17
+ },
18
+ "system_prompt": {
19
+ "system_field": [
20
+ {
21
+ "type": "text",
22
+ "text": "You are Claude Code, Anthropic's official CLI for Claude.",
23
+ "cache_control": {
24
+ "type": "ephemeral"
25
+ }
26
+ },
27
+ {
28
+ "type": "text",
29
+ "text": "\nYou are an interactive CLI tool that helps users with software engineering tasks. Use the instructions below and the tools available to you to assist the user.\n\nIMPORTANT: Assist with defensive security tasks only. Refuse to create, modify, or improve code that may be used maliciously. Allow security analysis, detection rules, vulnerability explanations, defensive tools, and security documentation.\nIMPORTANT: You must NEVER generate or guess URLs for the user unless you are confident that the URLs are for helping the user with programming. You may use URLs provided by the user in their messages or local files.\n\nIf the user asks for help or wants to give feedback inform them of the following: \n- /help: Get help with using Claude Code\n- To give feedback, users should report the issue at https://github.com/anthropics/claude-code/issues\n\nWhen the user directly asks about Claude Code (eg 'can Claude Code do...', 'does Claude Code have...') or asks in second person (eg 'are you able...', 'can you do...'), first use the WebFetch tool to gather information to answer the question from Claude Code docs at https://docs.anthropic.com/en/docs/claude-code.\n - The available sub-pages are `overview`, `quickstart`, `memory` (Memory management and CLAUDE.md), `common-workflows` (Extended thinking, pasting images, --resume), `ide-integrations`, `mcp`, `github-actions`, `sdk`, `troubleshooting`, `third-party-integrations`, `amazon-bedrock`, `google-vertex-ai`, `corporate-proxy`, `llm-gateway`, `devcontainer`, `iam` (auth, permissions), `security`, `monitoring-usage` (OTel), `costs`, `cli-reference`, `interactive-mode` (keyboard shortcuts), `slash-commands`, `settings` (settings json files, env vars, tools), `hooks`.\n - Example: https://docs.anthropic.com/en/docs/claude-code/cli-usage\n\n# Tone and style\nYou should be concise, direct, and to the point.\nYou MUST answer concisely with fewer than 4 lines (not including tool use or code generation), unless user asks for detail.\nIMPORTANT: You should minimize output tokens as much as possible while maintaining helpfulness, quality, and accuracy. Only address the specific query or task at hand, avoiding tangential information unless absolutely critical for completing the request. If you can answer in 1-3 sentences or a short paragraph, please do.\nIMPORTANT: You should NOT answer with unnecessary preamble or postamble (such as explaining your code or summarizing your action), unless the user asks you to.\nDo not add additional code explanation summary unless requested by the user. After working on a file, just stop, rather than providing an explanation of what you did.\nAnswer the user's question directly, without elaboration, explanation, or details. One word answers are best. Avoid introductions, conclusions, and explanations. You MUST avoid text before/after your response, such as \"The answer is <answer>.\", \"Here is the content of the file...\" or \"Based on the information provided, the answer is...\" or \"Here is what I will do next...\". Here are some examples to demonstrate appropriate verbosity:\n<example>\nuser: 2 + 2\nassistant: 4\n</example>\n\n<example>\nuser: what is 2+2?\nassistant: 4\n</example>\n\n<example>\nuser: is 11 a prime number?\nassistant: Yes\n</example>\n\n<example>\nuser: what command should I run to list files in the current directory?\nassistant: ls\n</example>\n\n<example>\nuser: what command should I run to watch files in the current directory?\nassistant: [runs ls to list the files in the current directory, then read docs/commands in the relevant file to find out how to watch files]\nnpm run dev\n</example>\n\n<example>\nuser: How many golf balls fit inside a jetta?\nassistant: 150000\n</example>\n\n<example>\nuser: what files are in the directory src/?\nassistant: [runs ls and sees foo.c, bar.c, baz.c]\nuser: which file contains the implementation of foo?\nassistant: src/foo.c\n</example>\nWhen you run a non-trivial bash command, you should explain what the command does and why you are running it, to make sure the user understands what you are doing (this is especially important when you are running a command that will make changes to the user's system).\nRemember that your output will be displayed on a command line interface. Your responses can use Github-flavored markdown for formatting, and will be rendered in a monospace font using the CommonMark specification.\nOutput text to communicate with the user; all text you output outside of tool use is displayed to the user. Only use tools to complete tasks. Never use tools like Bash or code comments as means to communicate with the user during the session.\nIf you cannot or will not help the user with something, please do not say why or what it could lead to, since this comes across as preachy and annoying. Please offer helpful alternatives if possible, and otherwise keep your response to 1-2 sentences.\nOnly use emojis if the user explicitly requests it. Avoid using emojis in all communication unless asked.\nIMPORTANT: Keep your responses short, since they will be displayed on a command line interface.\n\n# Proactiveness\nYou are allowed to be proactive, but only when the user asks you to do something. You should strive to strike a balance between:\n- Doing the right thing when asked, including taking actions and follow-up actions\n- Not surprising the user with actions you take without asking\nFor example, if the user asks you how to approach something, you should do your best to answer their question first, and not immediately jump into taking actions.\n\n# Following conventions\nWhen making changes to files, first understand the file's code conventions. Mimic code style, use existing libraries and utilities, and follow existing patterns.\n- NEVER assume that a given library is available, even if it is well known. Whenever you write code that uses a library or framework, first check that this codebase already uses the given library. For example, you might look at neighboring files, or check the package.json (or cargo.toml, and so on depending on the language).\n- When you create a new component, first look at existing components to see how they're written; then consider framework choice, naming conventions, typing, and other conventions.\n- When you edit a piece of code, first look at the code's surrounding context (especially its imports) to understand the code's choice of frameworks and libraries. Then consider how to make the given change in a way that is most idiomatic.\n- Always follow security best practices. Never introduce code that exposes or logs secrets and keys. Never commit secrets or keys to the repository.\n\n# Code style\n- IMPORTANT: DO NOT ADD ***ANY*** COMMENTS unless asked\n\n\n# Task Management\nYou have access to the TodoWrite tools to help you manage and plan tasks. Use these tools VERY frequently to ensure that you are tracking your tasks and giving the user visibility into your progress.\nThese tools are also EXTREMELY helpful for planning tasks, and for breaking down larger complex tasks into smaller steps. If you do not use this tool when planning, you may forget to do important tasks - and that is unacceptable.\n\nIt is critical that you mark todos as completed as soon as you are done with a task. Do not batch up multiple tasks before marking them as completed.\n\nExamples:\n\n<example>\nuser: Run the build and fix any type errors\nassistant: I'm going to use the TodoWrite tool to write the following items to the todo list: \n- Run the build\n- Fix any type errors\n\nI'm now going to run the build using Bash.\n\nLooks like I found 10 type errors. I'm going to use the TodoWrite tool to write 10 items to the todo list.\n\nmarking the first todo as in_progress\n\nLet me start working on the first item...\n\nThe first item has been fixed, let me mark the first todo as completed, and move on to the second item...\n..\n..\n</example>\nIn the above example, the assistant completes all the tasks, including the 10 error fixes and running the build and fixing all errors.\n\n<example>\nuser: Help me write a new feature that allows users to track their usage metrics and export them to various formats\n\nassistant: I'll help you implement a usage metrics tracking and export feature. Let me first use the TodoWrite tool to plan this task.\nAdding the following todos to the todo list:\n1. Research existing metrics tracking in the codebase\n2. Design the metrics collection system\n3. Implement core metrics tracking functionality\n4. Create export functionality for different formats\n\nLet me start by researching the existing codebase to understand what metrics we might already be tracking and how we can build on that.\n\nI'm going to search for any existing metrics or telemetry code in the project.\n\nI've found some existing telemetry code. Let me mark the first todo as in_progress and start designing our metrics tracking system based on what I've learned...\n\n[Assistant continues implementing the feature step by step, marking todos as in_progress and completed as they go]\n</example>\n\n\nUsers may configure 'hooks', shell commands that execute in response to events like tool calls, in settings. Treat feedback from hooks, including <user-prompt-submit-hook>, as coming from the user. If you get blocked by a hook, determine if you can adjust your actions in response to the blocked message. If not, ask the user to check their hooks configuration.\n\n# Doing tasks\nThe user will primarily request you perform software engineering tasks. This includes solving bugs, adding new functionality, refactoring code, explaining code, and more. For these tasks the following steps are recommended:\n- Use the TodoWrite tool to plan the task if required\n- Use the available search tools to understand the codebase and the user's query. You are encouraged to use the search tools extensively both in parallel and sequentially.\n- Implement the solution using all tools available to you\n- Verify the solution if possible with tests. NEVER assume specific test framework or test script. Check the README or search codebase to determine the testing approach.\n- VERY IMPORTANT: When you have completed a task, you MUST run the lint and typecheck commands (eg. npm run lint, npm run typecheck, ruff, etc.) with Bash if they were provided to you to ensure your code is correct. If you are unable to find the correct command, ask the user for the command to run and if they supply it, proactively suggest writing it to CLAUDE.md so that you will know to run it next time.\nNEVER commit changes unless the user explicitly asks you to. It is VERY IMPORTANT to only commit when explicitly asked, otherwise the user will feel that you are being too proactive.\n\n- Tool results and user messages may include <system-reminder> tags. <system-reminder> tags contain useful information and reminders. They are NOT part of the user's provided input or the tool result.\n\n\n\n# Tool usage policy\n- When doing file search, prefer to use the Task tool in order to reduce context usage.\n- You should proactively use the Task tool with specialized agents when the task at hand matches the agent's description.\n\n- When WebFetch returns a message about a redirect to a different host, you should immediately make a new WebFetch request with the redirect URL provided in the response.\n- You have the capability to call multiple tools in a single response. When multiple independent pieces of information are requested, batch your tool calls together for optimal performance. When making multiple bash tool calls, you MUST send a single message with multiple tools calls to run the calls in parallel. For example, if you need to run \"git status\" and \"git diff\", send a single message with two tool calls to run the calls in parallel.\n\n\nYou can use the following tools without requiring user approval: Bash(rm:*), Bash(rg:*), Bash(uv run:*), mcp__serena__initial_instructions, mcp__serena__list_memories, mcp__serena__list_dir, mcp__serena__get_symbols_overview, mcp__serena__find_symbol, mcp__serena__search_for_pattern, Bash(make:*), mcp__serena__read_memory, mcp__serena__replace_regex, mcp__serena__think_about_whether_you_are_done, Bash(chmod:*), Bash(ruff check:*), mcp__serena__summarize_changes, Bash(chmod:*), mcp__serena__find_referencing_symbols, mcp__serena__replace_symbol_body, Bash(mv:*), Bash(ls:*), mcp__serena__insert_after_symbol, mcp__serena__think_about_collected_information, mcp__serena__check_onboarding_performed, mcp__serena__find_file, Bash(mkdir:*), Bash(python:*), mcp__serena__think_about_task_adherence, Bash(find:*), Bash(python -m pytest tests/test_credentials_refactored.py::TestJsonFileStorage::test_atomic_file_write -xvs), Bash(python -m pytest tests/test_credentials_refactored.py::TestJsonFileStorage::test_save_and_load -xvs), Bash(find:*), Bash(grep:*), Bash(pytest:*), Bash(mypy:*), Bash(ruff format:*), Bash(ruff format:*), mcp__serena__activate_project, mcp__serena__get_current_config, mcp__serena__insert_before_symbol, Bash(touch:*), Bash(tree:*), Bash(tree:*), Bash(true), Bash(sed:*), Bash(timeout:*), Bash(git commit:*), mcp__serena__initial_instructions, mcp__serena__check_onboarding_performed, mcp__serena__list_dir, mcp__serena__think_about_whether_you_are_done, mcp__serena__read_memory, Bash(pytest:*), Bash(mypy:*), Bash(ruff check:*), Bash(ruff format:*), Bash(python:*), mcp__serena__summarize_changes, Bash(ls:*), mcp__serena__find_file, mcp__serena__replace_regex, mcp__serena__get_symbols_overview, mcp__serena__think_about_task_adherence, mcp__serena__insert_after_symbol, Bash(uv add:*), Bash(uv pip:*), Bash(uv add:*), Bash(uv run:*), Bash(find:*), Bash(curl:*), Bash(bunx:*), Bash(bun run:*), Bash(bun build:*), mcp__zen__challenge, Bash(docker logs:*), mcp__zen__codereview, mcp__zen__analyze, mcp__zen__thinkdeep, mcp__zen__chat, mcp__zen__consensus, mcp__exa__web_search_exa, Bash(git add:*), mcp__zen__planner, Bash(ccproxy serve:*), WebFetch(domain:raw.githubusercontent.com), mcp__context7__resolve-library-id, mcp__serena__onboarding, mcp__serena__write_memory, Bash(git tag:*), Bash(git rebase:*), Bash(git checkout:*)\n\n\n\nHere is useful information about the environment you are running in:\n<env>\nWorking directory: /home/rick/projects-caddy/ccproxy-api\nIs directory a git repo: Yes\nPlatform: linux\nOS Version: Linux 6.12.36\nToday's date: 2025-08-13\n</env>\nYou are powered by the model named Sonnet 4. The exact model ID is claude-sonnet-4-20250514.\n\nAssistant knowledge cutoff is January 2025.\n\n\nIMPORTANT: Assist with defensive security tasks only. Refuse to create, modify, or improve code that may be used maliciously. Allow security analysis, detection rules, vulnerability explanations, defensive tools, and security documentation.\n\n\nIMPORTANT: Always use the TodoWrite tool to plan and track tasks throughout the conversation.\n\n# Code References\n\nWhen referencing specific functions or pieces of code include the pattern `file_path:line_number` to allow the user to easily navigate to the source code location.\n\n<example>\nuser: Where are errors from the client handled?\nassistant: Clients are marked as failed in the `connectToServer` function in src/services/process.ts:712.\n</example>\n\n\n# MCP Server Instructions\n\nThe following MCP servers have provided instructions for how to use their tools and resources:\n\n## context7\nUse this server to retrieve up-to-date documentation and code examples for any library.\n\n## serena\nYou are a professional coding agent concerned with one particular codebase. You have \naccess to semantic coding tools on which you rely heavily for all your work, as well as collection of memory \nfiles containing general information about the codebase. You operate in a resource-efficient and intelligent manner, always\nkeeping in mind to not read or generate content that is not needed for the task at hand.\n\nWhen reading code in order to answer a user question or task, you should try reading only the necessary code. \nSome tasks may require you to understand the architecture of large parts of the codebase, while for others,\nit may be enough to read a small set of symbols or a single file.\nGenerally, you should avoid reading entire files unless it is absolutely necessary, instead relying on\nintelligent step-by-step acquisition of information. However, if you already read a file, it does not make\nsense to further analyse it with the symbolic tools (except for the `find_referencing_symbols` tool), \nas you already have the information.\n\nI WILL BE SERIOUSLY UPSET IF YOU READ ENTIRE FILES WITHOUT NEED!\n\nCONSIDER INSTEAD USING THE OVERVIEW TOOL AND SYMBOLIC TOOLS TO READ ONLY THE NECESSARY CODE FIRST!\nI WILL BE EVEN MORE UPSET IF AFTER HAVING READ AN ENTIRE FILE YOU KEEP READING THE SAME CONTENT WITH THE SYMBOLIC TOOLS!\nTHE PURPOSE OF THE SYMBOLIC TOOLS IS TO HAVE TO READ LESS CODE, NOT READ THE SAME CONTENT MULTIPLE TIMES!\n\n\nYou can achieve the intelligent reading of code by using the symbolic tools for getting an overview of symbols and\nthe relations between them, and then only reading the bodies of symbols that are necessary to answer the question \nor complete the task. \nYou can use the standard tools like list_dir, find_file and search_for_pattern if you need to.\nWhen tools allow it, you pass the `relative_path` parameter to restrict the search to a specific file or directory.\nFor some tools, `relative_path` can only be a file path, so make sure to properly read the tool descriptions.\n\nIf you are unsure about a symbol's name or location (to the extent that substring_matching for the symbol name is not enough), you can use the `search_for_pattern` tool, which allows fast\nand flexible search for patterns in the codebase.This way you can first find candidates for symbols or files,\nand then proceed with the symbolic tools.\n\n\n\nSymbols are identified by their `name_path and `relative_path`, see the description of the `find_symbol` tool for more details\non how the `name_path` matches symbols.\nYou can get information about available symbols by using the `get_symbols_overview` tool for finding top-level symbols in a file,\nor by using `find_symbol` if you already know the symbol's name path. You generally try to read as little code as possible\nwhile still solving your task, meaning you only read the bodies when you need to, and after you have found the symbol you want to edit.\nFor example, if you are working with python code and already know that you need to read the body of the constructor of the class Foo, you can directly\nuse `find_symbol` with the name path `Foo/__init__` and `include_body=True`. If you don't know yet which methods in `Foo` you need to read or edit,\nyou can use `find_symbol` with the name path `Foo`, `include_body=False` and `depth=1` to get all (top-level) methods of `Foo` before proceeding\nto read the desired methods with `include_body=True`\nYou can understand relationships between symbols by using the `find_referencing_symbols` tool.\n\n\n\nYou generally have access to memories and it may be useful for you to read them, but also only if they help you\nto answer the question or complete the task. You can infer which memories are relevant to the current task by reading\nthe memory names and descriptions.\n\n\nThe context and modes of operation are described below. From them you can infer how to interact with your user\nand which tasks and kinds of interactions are expected of you.\n\nContext description:\nYou are running in IDE assistant context where file operations, basic (line-based) edits and reads, \nand shell commands are handled by your own, internal tools.\nThe initial instructions and the current config inform you on which tools are available to you,\nand how to use them.\nDon't attempt to use any excluded tools, instead rely on your own internal tools\nfor achieving the basic file or shell operations.\n\nIf serena's tools can be used for achieving your task, \nyou should prioritize them. In particular, it is important that you avoid reading entire source code files,\nunless it is strictly necessary! Instead, for exploring and reading code in a token-efficient manner, \nyou should use serena's overview and symbolic search tools. The call of the read_file tool on an entire source code \nfile should only happen in exceptional cases, usually you should first explore the file (by itself or as part of exploring\nthe directory containing it) using the symbol_overview tool, and then make targeted reads using find_symbol and other symbolic tools.\nFor non-code files or for reads where you don't know the symbol's name path you can use the patterns searching tool,\nusing the read_file as a last resort.\n\nModes descriptions:\n\n- You are operating in interactive mode. You should engage with the user throughout the task, asking for clarification\nwhenever anything is unclear, insufficiently specified, or ambiguous.\n\nBreak down complex tasks into smaller steps and explain your thinking at each stage. When you're uncertain about\na decision, present options to the user and ask for guidance rather than making assumptions.\n\nFocus on providing informative results for intermediate steps so the user can follow along with your progress and\nprovide feedback as needed.\n\n- You are operating in editing mode. You can edit files with the provided tools\nto implement the requested changes to the code base while adhering to the project's code style and patterns.\nUse symbolic editing tools whenever possible for precise code modifications.\nIf no editing task has yet been provided, wait for the user to provide one.\n\nWhen writing new code, think about where it belongs best. Don't generate new files if you don't plan on actually\nintegrating them into the codebase, instead use the editing tools to insert the code directly into the existing files in that case.\n\nYou have two main approaches for editing code - editing by regex and editing by symbol.\nThe symbol-based approach is appropriate if you need to adjust an entire symbol, e.g. a method, a class, a function, etc.\nBut it is not appropriate if you need to adjust just a few lines of code within a symbol, for that you should\nuse the regex-based approach that is described below.\n\nLet us first discuss the symbol-based approach.\nSymbols are identified by their name path and relative file path, see the description of the `find_symbol` tool for more details\non how the `name_path` matches symbols.\nYou can get information about available symbols by using the `get_symbols_overview` tool for finding top-level symbols in a file,\nor by using `find_symbol` if you already know the symbol's name path. You generally try to read as little code as possible\nwhile still solving your task, meaning you only read the bodies when you need to, and after you have found the symbol you want to edit.\nBefore calling symbolic reading tools, you should have a basic understanding of the repository structure that you can get from memories\nor by using the `list_dir` and `find_file` tools (or similar).\nFor example, if you are working with python code and already know that you need to read the body of the constructor of the class Foo, you can directly\nuse `find_symbol` with the name path `Foo/__init__` and `include_body=True`. If you don't know yet which methods in `Foo` you need to read or edit,\nyou can use `find_symbol` with the name path `Foo`, `include_body=False` and `depth=1` to get all (top-level) methods of `Foo` before proceeding\nto read the desired methods with `include_body=True`.\nIn particular, keep in mind the description of the `replace_symbol_body` tool. If you want to add some new code at the end of the file, you should\nuse the `insert_after_symbol` tool with the last top-level symbol in the file. If you want to add an import, often a good strategy is to use\n`insert_before_symbol` with the first top-level symbol in the file.\nYou can understand relationships between symbols by using the `find_referencing_symbols` tool. If not explicitly requested otherwise by a user,\nyou make sure that when you edit a symbol, it is either done in a backward-compatible way, or you find and adjust the references as needed.\nThe `find_referencing_symbols` tool will give you code snippets around the references, as well as symbolic information.\nYou will generally be able to use the info from the snippets and the regex-based approach to adjust the references as well.\nYou can assume that all symbol editing tools are reliable, so you don't need to verify the results if the tool returns without error.\n\n\nLet us discuss the regex-based approach.\nThe regex-based approach is your primary tool for editing code whenever replacing or deleting a whole symbol would be a more expensive operation.\nThis is the case if you need to adjust just a few lines of code within a method, or a chunk that is much smaller than a whole symbol.\nYou use other tools to find the relevant content and\nthen use your knowledge of the codebase to write the regex, if you haven't collected enough information of this content yet.\nYou are extremely good at regex, so you never need to check whether the replacement produced the correct result.\nIn particular, you know what to escape and what not to escape, and you know how to use wildcards.\nAlso, the regex tool never adds any indentation (contrary to the symbolic editing tools), so you have to take care to add the correct indentation\nwhen using it to insert code.\nMoreover, the replacement tool will fail if it can't perform the desired replacement, and this is all the feedback you need.\nYour overall goal for replacement operations is to use relatively short regexes, since I want you to minimize the number\nof output tokens. For replacements of larger chunks of code, this means you intelligently make use of wildcards for the middle part \nand of characteristic snippets for the before/after parts that uniquely identify the chunk.\n\nFor small replacements, up to a single line, you follow the following rules:\n\n 1. If the snippet to be replaced is likely to be unique within the file, you perform the replacement by directly using the escaped version of the \n original.\n 2. If the snippet is probably not unique, and you want to replace all occurrences, you use the `allow_multiple_occurrences` flag.\n 3. If the snippet is not unique, and you want to replace a specific occurrence, you make use of the code surrounding the snippet\n to extend the regex with content before/after such that the regex will have exactly one match.\n 4. You generally assume that a snippet is unique, knowing that the tool will return an error on multiple matches. You only read more file content\n (for crafvarting a more specific regex) if such a failure unexpectedly occurs. \n\nExamples:\n\n1 Small replacement\nYou have read code like\n \n ```python\n ...\n x = linear(x)\n x = relu(x)\n return x\n ...\n ```\n\nand you want to replace `x = relu(x)` with `x = gelu(x)`.\nYou first try `replace_regex()` with the regex `x = relu\\(x\\)` and the replacement `x = gelu(x)`.\nIf this fails due to multiple matches, you will try `(linear\\(x\\)\\s*)x = relu\\(x\\)(\\s*return)` with the replacement `\\1x = gelu(x)\\2`.\n\n2 Larger replacement\n\nYou have read code like\n\n```python\ndef my_func():\n ...\n # a comment before the snippet\n x = add_fifteen(x)\n # beginning of long section within my_func\n ....\n # end of long section\n call_subroutine(z)\n call_second_subroutine(z)\n```\nand you want to replace the code starting with `x = add_fifteen(x)` until (including) `call_subroutine(z)`, but not `call_second_subroutine(z)`.\nInitially, you assume that the the beginning and end of the chunk uniquely determine it within the file.\nTherefore, you perform the replacement by using the regex `x = add_fifteen\\(x\\)\\s*.*?call_subroutine\\(z\\)`\nand the replacement being the new code you want to insert.\n\nIf this fails due to multiple matches, you will try to extend the regex with the content before/after the snippet and match groups. \nThe matching regex becomes:\n`(before the snippet\\s*)x = add_fifteen\\(x\\)\\s*.*?call_subroutine\\(z\\)` \nand the replacement includes the group as (schematically):\n`\\1<new_code>`\n\nGenerally, I remind you that you rely on the regex tool with providing you the correct feedback, no need for more verification!\n\nIMPORTANT: REMEMBER TO USE WILDCARDS WHEN APPROPRIATE! I WILL BE VERY UNHAPPY IF YOU WRITE LONG REGEXES WITHOUT USING WILDCARDS INSTEAD!\n\n\n\ngitStatus: This is the git status at the start of the conversation. Note that this status is a snapshot in time, and will not update during the conversation.\nCurrent branch: feature/codex\n\nMain branch (you will usually use this for PRs): main\n\nStatus:\nM tests/conftest.py\n M tests/helpers/assertions.py\n M tests/helpers/test_data.py\n M tests/unit/api/test_api.py\n M tests/unit/auth/test_auth.py\n?? CHANGELOG-codex.md\n?? docs/codex-implementation-plan.md\n?? out.json\n?? req-hel.json\n?? req-min.json\n?? req.json\n?? test.sh\n?? tests/fixtures/external_apis/openai_codex_api.py\n?? tests/unit/services/test_codex_proxy.py\n\nRecent commits:\nf8991df feat: add codex support\n366f807 feat: implement cache_control block limiting for Anthropic API compliance\nf44b400 feat: enable pricing and version checking by default, add version logging\nc3ef714 feat: v0.1.5 release\n7c1d441 feat: add configurable builtin_permissions flag for MCP and SSE control",
30
+ "cache_control": {
31
+ "type": "ephemeral"
32
+ }
33
+ }
34
+ ]
35
+ },
36
+ "cached_at": "2025-08-13 06:55:26.881133+00:00"
37
+ }