codex-sdk-python 0.81.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- codex_sdk/__init__.py +140 -0
- codex_sdk/abort.py +40 -0
- codex_sdk/app_server.py +918 -0
- codex_sdk/codex.py +147 -0
- codex_sdk/config_overrides.py +70 -0
- codex_sdk/events.py +112 -0
- codex_sdk/exceptions.py +55 -0
- codex_sdk/exec.py +442 -0
- codex_sdk/hooks.py +74 -0
- codex_sdk/integrations/__init__.py +1 -0
- codex_sdk/integrations/pydantic_ai.py +172 -0
- codex_sdk/integrations/pydantic_ai_model.py +381 -0
- codex_sdk/items.py +173 -0
- codex_sdk/options.py +145 -0
- codex_sdk/telemetry.py +36 -0
- codex_sdk/thread.py +606 -0
- codex_sdk_python-0.81.0.dist-info/METADATA +880 -0
- codex_sdk_python-0.81.0.dist-info/RECORD +19 -0
- codex_sdk_python-0.81.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,381 @@
|
|
|
1
|
+
"""PydanticAI model-provider integration.
|
|
2
|
+
|
|
3
|
+
This module provides a `pydantic_ai.models.Model` implementation that delegates
|
|
4
|
+
completion + tool-call planning to Codex via `codex exec --output-schema`.
|
|
5
|
+
|
|
6
|
+
The goal is to let PydanticAI own the tool loop (tool execution, retries, output
|
|
7
|
+
validation), while Codex behaves like a "backend model" that emits either:
|
|
8
|
+
|
|
9
|
+
- tool calls (to be executed by PydanticAI), or
|
|
10
|
+
- a final text response (when text output is allowed).
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
from base64 import b64encode
|
|
17
|
+
from dataclasses import asdict, dataclass, is_dataclass
|
|
18
|
+
from typing import Any, Dict, List, Optional, Sequence
|
|
19
|
+
|
|
20
|
+
from ..codex import Codex
|
|
21
|
+
from ..options import CodexOptions, ThreadOptions
|
|
22
|
+
from ..telemetry import span
|
|
23
|
+
from ..thread import TurnOptions
|
|
24
|
+
|
|
25
|
+
try:
|
|
26
|
+
from pydantic_ai.messages import (
|
|
27
|
+
ModelMessage,
|
|
28
|
+
ModelRequest,
|
|
29
|
+
ModelResponse,
|
|
30
|
+
TextPart,
|
|
31
|
+
ToolCallPart,
|
|
32
|
+
)
|
|
33
|
+
from pydantic_ai.models import Model, ModelRequestParameters
|
|
34
|
+
from pydantic_ai.profiles import ModelProfile, ModelProfileSpec
|
|
35
|
+
from pydantic_ai.settings import ModelSettings
|
|
36
|
+
from pydantic_ai.tools import ToolDefinition
|
|
37
|
+
from pydantic_ai.usage import RequestUsage
|
|
38
|
+
except ImportError as exc: # pragma: no cover
|
|
39
|
+
raise ImportError(
|
|
40
|
+
"pydantic-ai is required for codex_sdk.integrations.pydantic_ai_model; "
|
|
41
|
+
'install with: uv add "codex-sdk-python[pydantic-ai]"'
|
|
42
|
+
) from exc
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass(frozen=True)
|
|
46
|
+
class _ToolCallEnvelope:
|
|
47
|
+
tool_call_id: str
|
|
48
|
+
tool_name: str
|
|
49
|
+
arguments_json: str
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def _jsonable(value: Any) -> Any:
|
|
53
|
+
if is_dataclass(value) and not isinstance(value, type):
|
|
54
|
+
return asdict(value)
|
|
55
|
+
if hasattr(value, "model_dump") and callable(getattr(value, "model_dump")):
|
|
56
|
+
return value.model_dump(mode="json")
|
|
57
|
+
if isinstance(value, bytes):
|
|
58
|
+
return {"type": "bytes", "base64": b64encode(value).decode("ascii")}
|
|
59
|
+
if isinstance(value, dict):
|
|
60
|
+
return {str(k): _jsonable(v) for k, v in value.items()}
|
|
61
|
+
if isinstance(value, (list, tuple)):
|
|
62
|
+
return [_jsonable(v) for v in value]
|
|
63
|
+
return value
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def _json_dumps(value: Any) -> str:
|
|
67
|
+
try:
|
|
68
|
+
return json.dumps(
|
|
69
|
+
_jsonable(value), ensure_ascii=False, separators=(",", ":"), sort_keys=True
|
|
70
|
+
)
|
|
71
|
+
except TypeError:
|
|
72
|
+
return str(value)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _build_envelope_schema(tool_names: Sequence[str]) -> Dict[str, Any]:
|
|
76
|
+
name_schema: Dict[str, Any] = {"type": "string"}
|
|
77
|
+
if tool_names:
|
|
78
|
+
name_schema = {"type": "string", "enum": list(tool_names)}
|
|
79
|
+
|
|
80
|
+
return {
|
|
81
|
+
"type": "object",
|
|
82
|
+
"properties": {
|
|
83
|
+
"tool_calls": {
|
|
84
|
+
"type": "array",
|
|
85
|
+
"items": {
|
|
86
|
+
"type": "object",
|
|
87
|
+
"properties": {
|
|
88
|
+
"id": {"type": "string"},
|
|
89
|
+
"name": name_schema,
|
|
90
|
+
"arguments": {"type": "string"},
|
|
91
|
+
},
|
|
92
|
+
"required": ["id", "name", "arguments"],
|
|
93
|
+
"additionalProperties": False,
|
|
94
|
+
},
|
|
95
|
+
},
|
|
96
|
+
"final": {"type": "string"},
|
|
97
|
+
},
|
|
98
|
+
"required": ["tool_calls", "final"],
|
|
99
|
+
"additionalProperties": False,
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _render_tool_definitions(
|
|
104
|
+
*,
|
|
105
|
+
function_tools: Sequence[ToolDefinition],
|
|
106
|
+
output_tools: Sequence[ToolDefinition],
|
|
107
|
+
) -> str:
|
|
108
|
+
lines: List[str] = []
|
|
109
|
+
if function_tools:
|
|
110
|
+
lines.append("Function tools:")
|
|
111
|
+
for tool in function_tools:
|
|
112
|
+
lines.append(f"- {tool.name}")
|
|
113
|
+
if tool.description:
|
|
114
|
+
lines.append(f" description: {tool.description}")
|
|
115
|
+
lines.append(
|
|
116
|
+
f" parameters_json_schema: {_json_dumps(tool.parameters_json_schema)}"
|
|
117
|
+
)
|
|
118
|
+
if tool.sequential:
|
|
119
|
+
lines.append(" sequential: true")
|
|
120
|
+
|
|
121
|
+
if output_tools:
|
|
122
|
+
if lines:
|
|
123
|
+
lines.append("")
|
|
124
|
+
lines.append(
|
|
125
|
+
"Output tools (use ONE of these to finish when text is not allowed):"
|
|
126
|
+
)
|
|
127
|
+
for tool in output_tools:
|
|
128
|
+
lines.append(f"- {tool.name}")
|
|
129
|
+
if tool.description:
|
|
130
|
+
lines.append(f" description: {tool.description}")
|
|
131
|
+
lines.append(
|
|
132
|
+
f" parameters_json_schema: {_json_dumps(tool.parameters_json_schema)}"
|
|
133
|
+
)
|
|
134
|
+
if tool.sequential:
|
|
135
|
+
lines.append(" sequential: true")
|
|
136
|
+
|
|
137
|
+
return "\n".join(lines).strip()
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def _tool_calls_from_envelope(output: Any) -> List[_ToolCallEnvelope]:
|
|
141
|
+
if not isinstance(output, dict):
|
|
142
|
+
return []
|
|
143
|
+
|
|
144
|
+
raw_calls = output.get("tool_calls")
|
|
145
|
+
if not isinstance(raw_calls, list):
|
|
146
|
+
return []
|
|
147
|
+
|
|
148
|
+
calls: List[_ToolCallEnvelope] = []
|
|
149
|
+
for call in raw_calls:
|
|
150
|
+
if not isinstance(call, dict):
|
|
151
|
+
continue
|
|
152
|
+
tool_call_id = call.get("id")
|
|
153
|
+
tool_name = call.get("name")
|
|
154
|
+
arguments = call.get("arguments")
|
|
155
|
+
if not isinstance(tool_call_id, str) or not tool_call_id:
|
|
156
|
+
continue
|
|
157
|
+
if not isinstance(tool_name, str) or not tool_name:
|
|
158
|
+
continue
|
|
159
|
+
if not isinstance(arguments, str):
|
|
160
|
+
continue
|
|
161
|
+
calls.append(
|
|
162
|
+
_ToolCallEnvelope(
|
|
163
|
+
tool_call_id=tool_call_id,
|
|
164
|
+
tool_name=tool_name,
|
|
165
|
+
arguments_json=arguments,
|
|
166
|
+
)
|
|
167
|
+
)
|
|
168
|
+
return calls
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def _final_from_envelope(output: Any) -> str:
|
|
172
|
+
if not isinstance(output, dict):
|
|
173
|
+
return ""
|
|
174
|
+
final = output.get("final")
|
|
175
|
+
return final if isinstance(final, str) else ""
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def _render_message_history(messages: Sequence[ModelMessage]) -> str:
|
|
179
|
+
lines: List[str] = []
|
|
180
|
+
|
|
181
|
+
for message in messages:
|
|
182
|
+
if isinstance(message, ModelRequest):
|
|
183
|
+
if message.instructions:
|
|
184
|
+
lines.append("[instructions]")
|
|
185
|
+
lines.append(message.instructions)
|
|
186
|
+
for part in message.parts:
|
|
187
|
+
kind = getattr(part, "part_kind", None)
|
|
188
|
+
if kind == "system-prompt":
|
|
189
|
+
lines.append("[system]")
|
|
190
|
+
lines.append(getattr(part, "content", ""))
|
|
191
|
+
elif kind == "user-prompt":
|
|
192
|
+
lines.append("[user]")
|
|
193
|
+
content = getattr(part, "content", "")
|
|
194
|
+
if isinstance(content, str):
|
|
195
|
+
lines.append(content)
|
|
196
|
+
else:
|
|
197
|
+
lines.append(_json_dumps(content))
|
|
198
|
+
elif kind == "tool-return":
|
|
199
|
+
tool_name = getattr(part, "tool_name", "")
|
|
200
|
+
tool_call_id = getattr(part, "tool_call_id", "")
|
|
201
|
+
response = getattr(part, "model_response_str", None)
|
|
202
|
+
if callable(response):
|
|
203
|
+
tool_text = response()
|
|
204
|
+
else:
|
|
205
|
+
tool_text = _json_dumps(getattr(part, "content", None))
|
|
206
|
+
lines.append(f"[tool:{tool_name} id={tool_call_id}]")
|
|
207
|
+
lines.append(tool_text)
|
|
208
|
+
elif kind == "retry-prompt":
|
|
209
|
+
response = getattr(part, "model_response", None)
|
|
210
|
+
lines.append("[retry]")
|
|
211
|
+
if callable(response):
|
|
212
|
+
lines.append(response())
|
|
213
|
+
else:
|
|
214
|
+
lines.append(_json_dumps(getattr(part, "content", "")))
|
|
215
|
+
else:
|
|
216
|
+
lines.append("[request-part]")
|
|
217
|
+
lines.append(_json_dumps(part))
|
|
218
|
+
else:
|
|
219
|
+
# ModelResponse
|
|
220
|
+
lines.append("[assistant]")
|
|
221
|
+
for part in message.parts:
|
|
222
|
+
part_kind = getattr(part, "part_kind", None)
|
|
223
|
+
if part_kind == "text":
|
|
224
|
+
lines.append(getattr(part, "content", ""))
|
|
225
|
+
elif part_kind == "tool-call":
|
|
226
|
+
tool_name = getattr(part, "tool_name", "")
|
|
227
|
+
tool_call_id = getattr(part, "tool_call_id", "")
|
|
228
|
+
args = getattr(part, "args", None)
|
|
229
|
+
args_json = args if isinstance(args, str) else _json_dumps(args)
|
|
230
|
+
lines.append(
|
|
231
|
+
f"[tool-call:{tool_name} id={tool_call_id}] {args_json}"
|
|
232
|
+
)
|
|
233
|
+
elif part_kind == "thinking":
|
|
234
|
+
# Intentionally omit to reduce prompt noise.
|
|
235
|
+
pass
|
|
236
|
+
else:
|
|
237
|
+
lines.append(f"[assistant-part:{part_kind}]")
|
|
238
|
+
|
|
239
|
+
return "\n\n".join([line for line in lines if line]).strip()
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
class CodexModel(Model):
|
|
243
|
+
"""Use Codex CLI as a PydanticAI model provider via structured output."""
|
|
244
|
+
|
|
245
|
+
def __init__(
|
|
246
|
+
self,
|
|
247
|
+
*,
|
|
248
|
+
codex: Optional[Codex] = None,
|
|
249
|
+
codex_options: Optional[CodexOptions] = None,
|
|
250
|
+
thread_options: Optional[ThreadOptions] = None,
|
|
251
|
+
profile: Optional[ModelProfileSpec] = None,
|
|
252
|
+
settings: Optional[ModelSettings] = None,
|
|
253
|
+
system: str = "openai",
|
|
254
|
+
) -> None:
|
|
255
|
+
if codex is None:
|
|
256
|
+
codex = Codex(codex_options or CodexOptions())
|
|
257
|
+
if thread_options is None:
|
|
258
|
+
thread_options = ThreadOptions()
|
|
259
|
+
|
|
260
|
+
# As a model-provider wrapper, prefer safe + portable defaults.
|
|
261
|
+
if thread_options.skip_git_repo_check is None:
|
|
262
|
+
thread_options.skip_git_repo_check = True
|
|
263
|
+
if thread_options.sandbox_mode is None:
|
|
264
|
+
thread_options.sandbox_mode = "read-only"
|
|
265
|
+
if thread_options.approval_policy is None:
|
|
266
|
+
thread_options.approval_policy = "never"
|
|
267
|
+
if thread_options.web_search_enabled is None:
|
|
268
|
+
thread_options.web_search_enabled = False
|
|
269
|
+
if thread_options.network_access_enabled is None:
|
|
270
|
+
thread_options.network_access_enabled = False
|
|
271
|
+
|
|
272
|
+
if profile is None:
|
|
273
|
+
profile = ModelProfile(supports_tools=True)
|
|
274
|
+
|
|
275
|
+
super().__init__(settings=settings, profile=profile)
|
|
276
|
+
self._codex = codex
|
|
277
|
+
self._thread_options = thread_options
|
|
278
|
+
self._system = system
|
|
279
|
+
|
|
280
|
+
@property
|
|
281
|
+
def model_name(self) -> str:
|
|
282
|
+
return self._thread_options.model or "codex"
|
|
283
|
+
|
|
284
|
+
@property
|
|
285
|
+
def system(self) -> str:
|
|
286
|
+
return self._system
|
|
287
|
+
|
|
288
|
+
async def request(
|
|
289
|
+
self,
|
|
290
|
+
messages: list[ModelMessage],
|
|
291
|
+
model_settings: Optional[ModelSettings],
|
|
292
|
+
model_request_parameters: ModelRequestParameters,
|
|
293
|
+
) -> ModelResponse:
|
|
294
|
+
model_settings, model_request_parameters = self.prepare_request(
|
|
295
|
+
model_settings,
|
|
296
|
+
model_request_parameters,
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
tool_defs = [
|
|
300
|
+
*model_request_parameters.function_tools,
|
|
301
|
+
*model_request_parameters.output_tools,
|
|
302
|
+
]
|
|
303
|
+
tool_names = [tool.name for tool in tool_defs]
|
|
304
|
+
output_schema = _build_envelope_schema(tool_names)
|
|
305
|
+
|
|
306
|
+
tool_manifest = _render_tool_definitions(
|
|
307
|
+
function_tools=model_request_parameters.function_tools,
|
|
308
|
+
output_tools=model_request_parameters.output_tools,
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
allow_text_output = model_request_parameters.allow_text_output
|
|
312
|
+
prompt_sections = [
|
|
313
|
+
"You are a model in a tool-calling loop controlled by the host application.",
|
|
314
|
+
"You MUST NOT run shell commands, edit files, or call any built-in tools.",
|
|
315
|
+
"Request tools ONLY by emitting tool calls in the JSON output (matching the output schema).",
|
|
316
|
+
"",
|
|
317
|
+
"JSON output rules:",
|
|
318
|
+
"- Always return an object with keys: tool_calls (array) and final (string).",
|
|
319
|
+
'- Each tool call is: {"id": "...", "name": "...", "arguments": "{...json...}"}',
|
|
320
|
+
"- arguments MUST be a JSON string encoding an object.",
|
|
321
|
+
"- If you are calling any tools, set final to an empty string.",
|
|
322
|
+
]
|
|
323
|
+
if allow_text_output:
|
|
324
|
+
prompt_sections.append(
|
|
325
|
+
"- If no tools are needed, set tool_calls to [] and put your full answer in final."
|
|
326
|
+
)
|
|
327
|
+
else:
|
|
328
|
+
prompt_sections.append(
|
|
329
|
+
"- Text output is NOT allowed; to finish, call exactly one output tool and keep final empty."
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
if tool_manifest:
|
|
333
|
+
prompt_sections.extend(["", tool_manifest])
|
|
334
|
+
|
|
335
|
+
history = _render_message_history(messages)
|
|
336
|
+
if history:
|
|
337
|
+
prompt_sections.extend(["", "Conversation so far:", history])
|
|
338
|
+
|
|
339
|
+
prompt = "\n".join(prompt_sections).strip()
|
|
340
|
+
|
|
341
|
+
with span(
|
|
342
|
+
"codex_sdk.pydantic_ai.model_request",
|
|
343
|
+
model=self._thread_options.model,
|
|
344
|
+
sandbox_mode=self._thread_options.sandbox_mode,
|
|
345
|
+
):
|
|
346
|
+
thread = self._codex.start_thread(self._thread_options)
|
|
347
|
+
parsed_turn = await thread.run_json(
|
|
348
|
+
prompt, output_schema=output_schema, turn_options=TurnOptions()
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
usage = RequestUsage()
|
|
352
|
+
if parsed_turn.turn.usage is not None:
|
|
353
|
+
usage = RequestUsage(
|
|
354
|
+
input_tokens=parsed_turn.turn.usage.input_tokens,
|
|
355
|
+
output_tokens=parsed_turn.turn.usage.output_tokens,
|
|
356
|
+
cache_read_tokens=parsed_turn.turn.usage.cached_input_tokens,
|
|
357
|
+
)
|
|
358
|
+
|
|
359
|
+
tool_calls = _tool_calls_from_envelope(parsed_turn.output)
|
|
360
|
+
parts: List[Any] = []
|
|
361
|
+
if tool_calls:
|
|
362
|
+
parts.extend(
|
|
363
|
+
ToolCallPart(
|
|
364
|
+
tool_name=call.tool_name,
|
|
365
|
+
args=call.arguments_json,
|
|
366
|
+
tool_call_id=call.tool_call_id,
|
|
367
|
+
)
|
|
368
|
+
for call in tool_calls
|
|
369
|
+
)
|
|
370
|
+
else:
|
|
371
|
+
final = _final_from_envelope(parsed_turn.output)
|
|
372
|
+
if allow_text_output and final:
|
|
373
|
+
parts.append(TextPart(final))
|
|
374
|
+
|
|
375
|
+
return ModelResponse(
|
|
376
|
+
parts=parts,
|
|
377
|
+
usage=usage,
|
|
378
|
+
model_name=self.model_name,
|
|
379
|
+
provider_name="codex",
|
|
380
|
+
provider_details={"thread_id": thread.id},
|
|
381
|
+
)
|
codex_sdk/items.py
ADDED
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Item types for the Codex SDK.
|
|
3
|
+
|
|
4
|
+
Based on item types from codex-rs/exec/src/exec_events.rs
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from typing import Any, List, Literal, Optional, Union
|
|
9
|
+
|
|
10
|
+
# The status of a command execution
|
|
11
|
+
CommandExecutionStatus = Literal["in_progress", "completed", "failed", "declined"]
|
|
12
|
+
|
|
13
|
+
# Indicates the type of the file change
|
|
14
|
+
PatchChangeKind = Literal["add", "delete", "update"]
|
|
15
|
+
|
|
16
|
+
# The status of a file change
|
|
17
|
+
PatchApplyStatus = Literal["in_progress", "completed", "failed"]
|
|
18
|
+
|
|
19
|
+
# The status of an MCP tool call
|
|
20
|
+
McpToolCallStatus = Literal["in_progress", "completed", "failed"]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class FileUpdateChange:
|
|
25
|
+
"""A set of file changes by the agent."""
|
|
26
|
+
|
|
27
|
+
path: str
|
|
28
|
+
kind: PatchChangeKind
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class TodoItem:
|
|
33
|
+
"""An item in the agent's to-do list."""
|
|
34
|
+
|
|
35
|
+
text: str
|
|
36
|
+
completed: bool
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@dataclass
|
|
40
|
+
class CommandExecutionItem:
|
|
41
|
+
"""A command executed by the agent."""
|
|
42
|
+
|
|
43
|
+
id: str
|
|
44
|
+
type: Literal["command_execution"]
|
|
45
|
+
|
|
46
|
+
# The command line executed by the agent
|
|
47
|
+
command: str
|
|
48
|
+
|
|
49
|
+
# Aggregated stdout and stderr captured while the command was running
|
|
50
|
+
aggregated_output: str
|
|
51
|
+
|
|
52
|
+
# Set when the command exits; omitted while still running
|
|
53
|
+
exit_code: Optional[int] = None
|
|
54
|
+
|
|
55
|
+
# Current status of the command execution
|
|
56
|
+
status: CommandExecutionStatus = "in_progress"
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
@dataclass
|
|
60
|
+
class FileChangeItem:
|
|
61
|
+
"""A set of file changes by the agent. Emitted once the patch succeeds or fails."""
|
|
62
|
+
|
|
63
|
+
id: str
|
|
64
|
+
type: Literal["file_change"]
|
|
65
|
+
|
|
66
|
+
# Individual file changes that comprise the patch
|
|
67
|
+
changes: List[FileUpdateChange]
|
|
68
|
+
|
|
69
|
+
# Whether the patch ultimately succeeded or failed
|
|
70
|
+
status: PatchApplyStatus
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
@dataclass
|
|
74
|
+
class McpToolCallItem:
|
|
75
|
+
"""
|
|
76
|
+
Represents a call to an MCP tool. The item starts when the invocation is dispatched
|
|
77
|
+
and completes when the MCP server reports success or failure.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
id: str
|
|
81
|
+
type: Literal["mcp_tool_call"]
|
|
82
|
+
|
|
83
|
+
# Name of the MCP server handling the request
|
|
84
|
+
server: str
|
|
85
|
+
|
|
86
|
+
# The tool invoked on the MCP server
|
|
87
|
+
tool: str
|
|
88
|
+
|
|
89
|
+
# Current status of the tool invocation
|
|
90
|
+
status: McpToolCallStatus
|
|
91
|
+
|
|
92
|
+
# Arguments forwarded to the tool invocation
|
|
93
|
+
arguments: Any = None
|
|
94
|
+
|
|
95
|
+
# Result payload returned by the MCP server for successful calls
|
|
96
|
+
result: Optional["McpToolCallItemResult"] = None
|
|
97
|
+
|
|
98
|
+
# Error message reported for failed calls
|
|
99
|
+
error: Optional["McpToolCallItemError"] = None
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
@dataclass
|
|
103
|
+
class McpToolCallItemResult:
|
|
104
|
+
content: List[Any]
|
|
105
|
+
structured_content: Optional[Any] = None
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
@dataclass
|
|
109
|
+
class McpToolCallItemError:
|
|
110
|
+
message: str
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
@dataclass
|
|
114
|
+
class AgentMessageItem:
|
|
115
|
+
"""Response from the agent. Either natural-language text or JSON when structured output is requested."""
|
|
116
|
+
|
|
117
|
+
id: str
|
|
118
|
+
type: Literal["agent_message"]
|
|
119
|
+
|
|
120
|
+
# Either natural-language text or JSON when structured output is requested
|
|
121
|
+
text: str
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
@dataclass
|
|
125
|
+
class ReasoningItem:
|
|
126
|
+
"""Agent's reasoning summary."""
|
|
127
|
+
|
|
128
|
+
id: str
|
|
129
|
+
type: Literal["reasoning"]
|
|
130
|
+
text: str
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
@dataclass
|
|
134
|
+
class WebSearchItem:
|
|
135
|
+
"""Captures a web search request. Completes when results are returned to the agent."""
|
|
136
|
+
|
|
137
|
+
id: str
|
|
138
|
+
type: Literal["web_search"]
|
|
139
|
+
query: str
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
@dataclass
|
|
143
|
+
class ErrorItem:
|
|
144
|
+
"""Describes a non-fatal error surfaced as an item."""
|
|
145
|
+
|
|
146
|
+
id: str
|
|
147
|
+
type: Literal["error"]
|
|
148
|
+
message: str
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
@dataclass
|
|
152
|
+
class TodoListItem:
|
|
153
|
+
"""
|
|
154
|
+
Tracks the agent's running to-do list. Starts when the plan is issued, updates as steps change,
|
|
155
|
+
and completes when the turn ends.
|
|
156
|
+
"""
|
|
157
|
+
|
|
158
|
+
id: str
|
|
159
|
+
type: Literal["todo_list"]
|
|
160
|
+
items: List[TodoItem]
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
# Canonical union of thread items and their type-specific payloads
|
|
164
|
+
ThreadItem = Union[
|
|
165
|
+
AgentMessageItem,
|
|
166
|
+
ReasoningItem,
|
|
167
|
+
CommandExecutionItem,
|
|
168
|
+
FileChangeItem,
|
|
169
|
+
McpToolCallItem,
|
|
170
|
+
WebSearchItem,
|
|
171
|
+
TodoListItem,
|
|
172
|
+
ErrorItem,
|
|
173
|
+
]
|
codex_sdk/options.py
ADDED
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
"""Configuration options for the Codex SDK."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import Any, List, Literal, Mapping, Optional
|
|
7
|
+
|
|
8
|
+
from .abort import AbortSignal
|
|
9
|
+
|
|
10
|
+
ApprovalMode = Literal["never", "on-request", "on-failure", "untrusted"]
|
|
11
|
+
|
|
12
|
+
SandboxMode = Literal["read-only", "workspace-write", "danger-full-access"]
|
|
13
|
+
|
|
14
|
+
ModelReasoningEffort = Literal["minimal", "low", "medium", "high", "xhigh"]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class CodexOptions:
|
|
19
|
+
"""Options for configuring the Codex client.
|
|
20
|
+
|
|
21
|
+
Attributes:
|
|
22
|
+
codex_path_override: Override the path to the codex binary.
|
|
23
|
+
base_url: Base URL for the API.
|
|
24
|
+
api_key: API key for authentication (sets CODEX_API_KEY for the child process).
|
|
25
|
+
env: Environment variables passed to the Codex CLI process. When provided, the SDK
|
|
26
|
+
will not inherit variables from os.environ.
|
|
27
|
+
config_overrides: Optional config overrides passed as `--config key=value` to the Codex
|
|
28
|
+
CLI. Values are encoded as TOML literals; for complex overrides, use strings.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
# Override the path to the codex binary
|
|
32
|
+
codex_path_override: Optional[str] = None
|
|
33
|
+
|
|
34
|
+
# Base URL for the API
|
|
35
|
+
base_url: Optional[str] = None
|
|
36
|
+
|
|
37
|
+
# API key for authentication
|
|
38
|
+
api_key: Optional[str] = None
|
|
39
|
+
|
|
40
|
+
# Environment variables passed to the Codex CLI process.
|
|
41
|
+
env: Optional[Mapping[str, str]] = None
|
|
42
|
+
|
|
43
|
+
# Optional config overrides passed to the Codex CLI process.
|
|
44
|
+
config_overrides: Optional[Mapping[str, Any]] = None
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@dataclass
|
|
48
|
+
class ThreadOptions:
|
|
49
|
+
"""Options for configuring a thread.
|
|
50
|
+
|
|
51
|
+
Attributes:
|
|
52
|
+
model: Model to use for the thread.
|
|
53
|
+
sandbox_mode: Sandbox mode for the thread.
|
|
54
|
+
working_directory: Working directory for the thread.
|
|
55
|
+
skip_git_repo_check: Skip Git repository safety check.
|
|
56
|
+
model_reasoning_effort: Model reasoning effort preset.
|
|
57
|
+
network_access_enabled: Enable/disable network access in workspace-write sandbox.
|
|
58
|
+
web_search_enabled: Enable/disable web search feature.
|
|
59
|
+
web_search_cached_enabled: Enable/disable cached web search feature.
|
|
60
|
+
skills_enabled: (Deprecated) Skills are always enabled in Codex 0.80+.
|
|
61
|
+
shell_snapshot_enabled: Enable/disable shell snapshotting.
|
|
62
|
+
background_terminals_enabled: Enable/disable background terminals (unified exec).
|
|
63
|
+
apply_patch_freeform_enabled: Enable/disable freeform apply_patch tool.
|
|
64
|
+
exec_policy_enabled: Enable/disable exec policy enforcement.
|
|
65
|
+
remote_models_enabled: Enable/disable remote model list refresh.
|
|
66
|
+
request_compression_enabled: Enable/disable request body compression.
|
|
67
|
+
feature_overrides: Arbitrary feature flag overrides (key -> bool).
|
|
68
|
+
approval_policy: Approval policy for tool execution.
|
|
69
|
+
additional_directories: Additional directories to add to the sandbox.
|
|
70
|
+
config_overrides: Optional config overrides passed as `--config key=value` for this
|
|
71
|
+
thread's invocations. Values are encoded as TOML literals.
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
# Model to use for the thread
|
|
75
|
+
model: Optional[str] = None
|
|
76
|
+
|
|
77
|
+
# Sandbox mode for the thread
|
|
78
|
+
sandbox_mode: Optional[SandboxMode] = None
|
|
79
|
+
|
|
80
|
+
# Working directory for the thread
|
|
81
|
+
working_directory: Optional[str] = None
|
|
82
|
+
|
|
83
|
+
# Skip Git repository check
|
|
84
|
+
skip_git_repo_check: Optional[bool] = None
|
|
85
|
+
|
|
86
|
+
# Model reasoning effort preset
|
|
87
|
+
model_reasoning_effort: Optional[ModelReasoningEffort] = None
|
|
88
|
+
|
|
89
|
+
# Enable/disable network access in workspace-write sandbox
|
|
90
|
+
network_access_enabled: Optional[bool] = None
|
|
91
|
+
|
|
92
|
+
# Enable/disable web search feature
|
|
93
|
+
web_search_enabled: Optional[bool] = None
|
|
94
|
+
|
|
95
|
+
# Enable/disable cached web search feature
|
|
96
|
+
web_search_cached_enabled: Optional[bool] = None
|
|
97
|
+
|
|
98
|
+
# (Deprecated) Skills are always enabled in Codex 0.80+
|
|
99
|
+
skills_enabled: Optional[bool] = None
|
|
100
|
+
|
|
101
|
+
# Enable/disable shell snapshotting
|
|
102
|
+
shell_snapshot_enabled: Optional[bool] = None
|
|
103
|
+
|
|
104
|
+
# Enable/disable background terminals (unified exec)
|
|
105
|
+
background_terminals_enabled: Optional[bool] = None
|
|
106
|
+
|
|
107
|
+
# Enable/disable freeform apply_patch tool
|
|
108
|
+
apply_patch_freeform_enabled: Optional[bool] = None
|
|
109
|
+
|
|
110
|
+
# Enable/disable exec policy enforcement
|
|
111
|
+
exec_policy_enabled: Optional[bool] = None
|
|
112
|
+
|
|
113
|
+
# Enable/disable remote model list refresh
|
|
114
|
+
remote_models_enabled: Optional[bool] = None
|
|
115
|
+
|
|
116
|
+
# Enable/disable request body compression
|
|
117
|
+
request_compression_enabled: Optional[bool] = None
|
|
118
|
+
|
|
119
|
+
# Arbitrary feature flag overrides (feature key -> bool)
|
|
120
|
+
feature_overrides: Optional[Mapping[str, bool]] = None
|
|
121
|
+
|
|
122
|
+
# Approval policy for tool execution
|
|
123
|
+
approval_policy: Optional[ApprovalMode] = None
|
|
124
|
+
|
|
125
|
+
# Additional directories to add to the sandbox
|
|
126
|
+
additional_directories: Optional[List[str]] = None
|
|
127
|
+
|
|
128
|
+
# Optional config overrides passed to the Codex CLI process.
|
|
129
|
+
config_overrides: Optional[Mapping[str, Any]] = None
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
@dataclass
|
|
133
|
+
class TurnOptions:
|
|
134
|
+
"""Options for configuring a turn.
|
|
135
|
+
|
|
136
|
+
Attributes:
|
|
137
|
+
output_schema: JSON schema describing the expected agent output as a JSON object.
|
|
138
|
+
signal: Abort signal to cancel the turn.
|
|
139
|
+
"""
|
|
140
|
+
|
|
141
|
+
# JSON schema describing the expected agent output
|
|
142
|
+
output_schema: Optional[Any] = None
|
|
143
|
+
|
|
144
|
+
# Abort signal to cancel the turn
|
|
145
|
+
signal: Optional[AbortSignal] = None
|