openai-agents 0.2.6__py3-none-any.whl → 0.6.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. agents/__init__.py +105 -4
  2. agents/_debug.py +15 -4
  3. agents/_run_impl.py +1203 -96
  4. agents/agent.py +294 -21
  5. agents/apply_diff.py +329 -0
  6. agents/editor.py +47 -0
  7. agents/exceptions.py +35 -0
  8. agents/extensions/experimental/__init__.py +6 -0
  9. agents/extensions/experimental/codex/__init__.py +92 -0
  10. agents/extensions/experimental/codex/codex.py +89 -0
  11. agents/extensions/experimental/codex/codex_options.py +35 -0
  12. agents/extensions/experimental/codex/codex_tool.py +1142 -0
  13. agents/extensions/experimental/codex/events.py +162 -0
  14. agents/extensions/experimental/codex/exec.py +263 -0
  15. agents/extensions/experimental/codex/items.py +245 -0
  16. agents/extensions/experimental/codex/output_schema_file.py +50 -0
  17. agents/extensions/experimental/codex/payloads.py +31 -0
  18. agents/extensions/experimental/codex/thread.py +214 -0
  19. agents/extensions/experimental/codex/thread_options.py +54 -0
  20. agents/extensions/experimental/codex/turn_options.py +36 -0
  21. agents/extensions/handoff_filters.py +13 -1
  22. agents/extensions/memory/__init__.py +120 -0
  23. agents/extensions/memory/advanced_sqlite_session.py +1285 -0
  24. agents/extensions/memory/async_sqlite_session.py +239 -0
  25. agents/extensions/memory/dapr_session.py +423 -0
  26. agents/extensions/memory/encrypt_session.py +185 -0
  27. agents/extensions/memory/redis_session.py +261 -0
  28. agents/extensions/memory/sqlalchemy_session.py +334 -0
  29. agents/extensions/models/litellm_model.py +449 -36
  30. agents/extensions/models/litellm_provider.py +3 -1
  31. agents/function_schema.py +47 -5
  32. agents/guardrail.py +16 -2
  33. agents/{handoffs.py → handoffs/__init__.py} +89 -47
  34. agents/handoffs/history.py +268 -0
  35. agents/items.py +238 -13
  36. agents/lifecycle.py +75 -14
  37. agents/mcp/server.py +280 -37
  38. agents/mcp/util.py +24 -3
  39. agents/memory/__init__.py +22 -2
  40. agents/memory/openai_conversations_session.py +91 -0
  41. agents/memory/openai_responses_compaction_session.py +249 -0
  42. agents/memory/session.py +19 -261
  43. agents/memory/sqlite_session.py +275 -0
  44. agents/memory/util.py +20 -0
  45. agents/model_settings.py +18 -3
  46. agents/models/__init__.py +13 -0
  47. agents/models/chatcmpl_converter.py +303 -50
  48. agents/models/chatcmpl_helpers.py +63 -0
  49. agents/models/chatcmpl_stream_handler.py +290 -68
  50. agents/models/default_models.py +58 -0
  51. agents/models/interface.py +4 -0
  52. agents/models/openai_chatcompletions.py +103 -48
  53. agents/models/openai_provider.py +10 -4
  54. agents/models/openai_responses.py +167 -46
  55. agents/realtime/__init__.py +4 -0
  56. agents/realtime/_util.py +14 -3
  57. agents/realtime/agent.py +7 -0
  58. agents/realtime/audio_formats.py +53 -0
  59. agents/realtime/config.py +78 -10
  60. agents/realtime/events.py +18 -0
  61. agents/realtime/handoffs.py +2 -2
  62. agents/realtime/items.py +17 -1
  63. agents/realtime/model.py +13 -0
  64. agents/realtime/model_events.py +12 -0
  65. agents/realtime/model_inputs.py +18 -1
  66. agents/realtime/openai_realtime.py +700 -151
  67. agents/realtime/session.py +309 -32
  68. agents/repl.py +7 -3
  69. agents/result.py +197 -38
  70. agents/run.py +1053 -178
  71. agents/run_context.py +13 -2
  72. agents/stream_events.py +1 -0
  73. agents/strict_schema.py +14 -0
  74. agents/tool.py +413 -15
  75. agents/tool_context.py +22 -1
  76. agents/tool_guardrails.py +279 -0
  77. agents/tracing/__init__.py +2 -0
  78. agents/tracing/config.py +9 -0
  79. agents/tracing/create.py +4 -0
  80. agents/tracing/processor_interface.py +84 -11
  81. agents/tracing/processors.py +65 -54
  82. agents/tracing/provider.py +64 -7
  83. agents/tracing/spans.py +105 -0
  84. agents/tracing/traces.py +116 -16
  85. agents/usage.py +134 -12
  86. agents/util/_json.py +19 -1
  87. agents/util/_transforms.py +12 -2
  88. agents/voice/input.py +5 -4
  89. agents/voice/models/openai_stt.py +17 -9
  90. agents/voice/pipeline.py +2 -0
  91. agents/voice/pipeline_config.py +4 -0
  92. {openai_agents-0.2.6.dist-info → openai_agents-0.6.8.dist-info}/METADATA +44 -19
  93. openai_agents-0.6.8.dist-info/RECORD +134 -0
  94. {openai_agents-0.2.6.dist-info → openai_agents-0.6.8.dist-info}/WHEEL +1 -1
  95. openai_agents-0.2.6.dist-info/RECORD +0 -103
  96. {openai_agents-0.2.6.dist-info → openai_agents-0.6.8.dist-info}/licenses/LICENSE +0 -0
agents/apply_diff.py ADDED
@@ -0,0 +1,329 @@
1
+ """Utility for applying V4A diffs against text inputs."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import re
6
+ from collections.abc import Sequence
7
+ from dataclasses import dataclass
8
+ from typing import Callable, Literal
9
+
10
+ ApplyDiffMode = Literal["default", "create"]
11
+
12
+
13
+ @dataclass
14
+ class Chunk:
15
+ orig_index: int
16
+ del_lines: list[str]
17
+ ins_lines: list[str]
18
+
19
+
20
+ @dataclass
21
+ class ParserState:
22
+ lines: list[str]
23
+ index: int = 0
24
+ fuzz: int = 0
25
+
26
+
27
+ @dataclass
28
+ class ParsedUpdateDiff:
29
+ chunks: list[Chunk]
30
+ fuzz: int
31
+
32
+
33
+ @dataclass
34
+ class ReadSectionResult:
35
+ next_context: list[str]
36
+ section_chunks: list[Chunk]
37
+ end_index: int
38
+ eof: bool
39
+
40
+
41
+ END_PATCH = "*** End Patch"
42
+ END_FILE = "*** End of File"
43
+ SECTION_TERMINATORS = [
44
+ END_PATCH,
45
+ "*** Update File:",
46
+ "*** Delete File:",
47
+ "*** Add File:",
48
+ ]
49
+ END_SECTION_MARKERS = [*SECTION_TERMINATORS, END_FILE]
50
+
51
+
52
+ def apply_diff(input: str, diff: str, mode: ApplyDiffMode = "default") -> str:
53
+ """Apply a V4A diff to the provided text.
54
+
55
+ This parser understands both the create-file syntax (only "+" prefixed
56
+ lines) and the default update syntax that includes context hunks.
57
+ """
58
+
59
+ diff_lines = _normalize_diff_lines(diff)
60
+ if mode == "create":
61
+ return _parse_create_diff(diff_lines)
62
+
63
+ parsed = _parse_update_diff(diff_lines, input)
64
+ return _apply_chunks(input, parsed.chunks)
65
+
66
+
67
+ def _normalize_diff_lines(diff: str) -> list[str]:
68
+ lines = [line.rstrip("\r") for line in re.split(r"\r?\n", diff)]
69
+ if lines and lines[-1] == "":
70
+ lines.pop()
71
+ return lines
72
+
73
+
74
+ def _is_done(state: ParserState, prefixes: Sequence[str]) -> bool:
75
+ if state.index >= len(state.lines):
76
+ return True
77
+ if any(state.lines[state.index].startswith(prefix) for prefix in prefixes):
78
+ return True
79
+ return False
80
+
81
+
82
+ def _read_str(state: ParserState, prefix: str) -> str:
83
+ if state.index >= len(state.lines):
84
+ return ""
85
+ current = state.lines[state.index]
86
+ if current.startswith(prefix):
87
+ state.index += 1
88
+ return current[len(prefix) :]
89
+ return ""
90
+
91
+
92
+ def _parse_create_diff(lines: list[str]) -> str:
93
+ parser = ParserState(lines=[*lines, END_PATCH])
94
+ output: list[str] = []
95
+
96
+ while not _is_done(parser, SECTION_TERMINATORS):
97
+ if parser.index >= len(parser.lines):
98
+ break
99
+ line = parser.lines[parser.index]
100
+ parser.index += 1
101
+ if not line.startswith("+"):
102
+ raise ValueError(f"Invalid Add File Line: {line}")
103
+ output.append(line[1:])
104
+
105
+ return "\n".join(output)
106
+
107
+
108
+ def _parse_update_diff(lines: list[str], input: str) -> ParsedUpdateDiff:
109
+ parser = ParserState(lines=[*lines, END_PATCH])
110
+ input_lines = input.split("\n")
111
+ chunks: list[Chunk] = []
112
+ cursor = 0
113
+
114
+ while not _is_done(parser, END_SECTION_MARKERS):
115
+ anchor = _read_str(parser, "@@ ")
116
+ has_bare_anchor = (
117
+ anchor == "" and parser.index < len(parser.lines) and parser.lines[parser.index] == "@@"
118
+ )
119
+ if has_bare_anchor:
120
+ parser.index += 1
121
+
122
+ if not (anchor or has_bare_anchor or cursor == 0):
123
+ current_line = parser.lines[parser.index] if parser.index < len(parser.lines) else ""
124
+ raise ValueError(f"Invalid Line:\n{current_line}")
125
+
126
+ if anchor.strip():
127
+ cursor = _advance_cursor_to_anchor(anchor, input_lines, cursor, parser)
128
+
129
+ section = _read_section(parser.lines, parser.index)
130
+ find_result = _find_context(input_lines, section.next_context, cursor, section.eof)
131
+ if find_result.new_index == -1:
132
+ ctx_text = "\n".join(section.next_context)
133
+ if section.eof:
134
+ raise ValueError(f"Invalid EOF Context {cursor}:\n{ctx_text}")
135
+ raise ValueError(f"Invalid Context {cursor}:\n{ctx_text}")
136
+
137
+ cursor = find_result.new_index + len(section.next_context)
138
+ parser.fuzz += find_result.fuzz
139
+ parser.index = section.end_index
140
+
141
+ for ch in section.section_chunks:
142
+ chunks.append(
143
+ Chunk(
144
+ orig_index=ch.orig_index + find_result.new_index,
145
+ del_lines=list(ch.del_lines),
146
+ ins_lines=list(ch.ins_lines),
147
+ )
148
+ )
149
+
150
+ return ParsedUpdateDiff(chunks=chunks, fuzz=parser.fuzz)
151
+
152
+
153
+ def _advance_cursor_to_anchor(
154
+ anchor: str,
155
+ input_lines: list[str],
156
+ cursor: int,
157
+ parser: ParserState,
158
+ ) -> int:
159
+ found = False
160
+
161
+ if not any(line == anchor for line in input_lines[:cursor]):
162
+ for i in range(cursor, len(input_lines)):
163
+ if input_lines[i] == anchor:
164
+ cursor = i + 1
165
+ found = True
166
+ break
167
+
168
+ if not found and not any(line.strip() == anchor.strip() for line in input_lines[:cursor]):
169
+ for i in range(cursor, len(input_lines)):
170
+ if input_lines[i].strip() == anchor.strip():
171
+ cursor = i + 1
172
+ parser.fuzz += 1
173
+ found = True
174
+ break
175
+
176
+ return cursor
177
+
178
+
179
+ def _read_section(lines: list[str], start_index: int) -> ReadSectionResult:
180
+ context: list[str] = []
181
+ del_lines: list[str] = []
182
+ ins_lines: list[str] = []
183
+ section_chunks: list[Chunk] = []
184
+ mode: Literal["keep", "add", "delete"] = "keep"
185
+ index = start_index
186
+ orig_index = index
187
+
188
+ while index < len(lines):
189
+ raw = lines[index]
190
+ if (
191
+ raw.startswith("@@")
192
+ or raw.startswith(END_PATCH)
193
+ or raw.startswith("*** Update File:")
194
+ or raw.startswith("*** Delete File:")
195
+ or raw.startswith("*** Add File:")
196
+ or raw.startswith(END_FILE)
197
+ ):
198
+ break
199
+ if raw == "***":
200
+ break
201
+ if raw.startswith("***"):
202
+ raise ValueError(f"Invalid Line: {raw}")
203
+
204
+ index += 1
205
+ last_mode = mode
206
+ line = raw if raw else " "
207
+ prefix = line[0]
208
+ if prefix == "+":
209
+ mode = "add"
210
+ elif prefix == "-":
211
+ mode = "delete"
212
+ elif prefix == " ":
213
+ mode = "keep"
214
+ else:
215
+ raise ValueError(f"Invalid Line: {line}")
216
+
217
+ line_content = line[1:]
218
+ switching_to_context = mode == "keep" and last_mode != mode
219
+ if switching_to_context and (del_lines or ins_lines):
220
+ section_chunks.append(
221
+ Chunk(
222
+ orig_index=len(context) - len(del_lines),
223
+ del_lines=list(del_lines),
224
+ ins_lines=list(ins_lines),
225
+ )
226
+ )
227
+ del_lines = []
228
+ ins_lines = []
229
+
230
+ if mode == "delete":
231
+ del_lines.append(line_content)
232
+ context.append(line_content)
233
+ elif mode == "add":
234
+ ins_lines.append(line_content)
235
+ else:
236
+ context.append(line_content)
237
+
238
+ if del_lines or ins_lines:
239
+ section_chunks.append(
240
+ Chunk(
241
+ orig_index=len(context) - len(del_lines),
242
+ del_lines=list(del_lines),
243
+ ins_lines=list(ins_lines),
244
+ )
245
+ )
246
+
247
+ if index < len(lines) and lines[index] == END_FILE:
248
+ return ReadSectionResult(context, section_chunks, index + 1, True)
249
+
250
+ if index == orig_index:
251
+ next_line = lines[index] if index < len(lines) else ""
252
+ raise ValueError(f"Nothing in this section - index={index} {next_line}")
253
+
254
+ return ReadSectionResult(context, section_chunks, index, False)
255
+
256
+
257
+ @dataclass
258
+ class ContextMatch:
259
+ new_index: int
260
+ fuzz: int
261
+
262
+
263
+ def _find_context(lines: list[str], context: list[str], start: int, eof: bool) -> ContextMatch:
264
+ if eof:
265
+ end_start = max(0, len(lines) - len(context))
266
+ end_match = _find_context_core(lines, context, end_start)
267
+ if end_match.new_index != -1:
268
+ return end_match
269
+ fallback = _find_context_core(lines, context, start)
270
+ return ContextMatch(new_index=fallback.new_index, fuzz=fallback.fuzz + 10000)
271
+ return _find_context_core(lines, context, start)
272
+
273
+
274
+ def _find_context_core(lines: list[str], context: list[str], start: int) -> ContextMatch:
275
+ if not context:
276
+ return ContextMatch(new_index=start, fuzz=0)
277
+
278
+ for i in range(start, len(lines)):
279
+ if _equals_slice(lines, context, i, lambda value: value):
280
+ return ContextMatch(new_index=i, fuzz=0)
281
+ for i in range(start, len(lines)):
282
+ if _equals_slice(lines, context, i, lambda value: value.rstrip()):
283
+ return ContextMatch(new_index=i, fuzz=1)
284
+ for i in range(start, len(lines)):
285
+ if _equals_slice(lines, context, i, lambda value: value.strip()):
286
+ return ContextMatch(new_index=i, fuzz=100)
287
+
288
+ return ContextMatch(new_index=-1, fuzz=0)
289
+
290
+
291
+ def _equals_slice(
292
+ source: list[str], target: list[str], start: int, map_fn: Callable[[str], str]
293
+ ) -> bool:
294
+ if start + len(target) > len(source):
295
+ return False
296
+ for offset, target_value in enumerate(target):
297
+ if map_fn(source[start + offset]) != map_fn(target_value):
298
+ return False
299
+ return True
300
+
301
+
302
+ def _apply_chunks(input: str, chunks: list[Chunk]) -> str:
303
+ orig_lines = input.split("\n")
304
+ dest_lines: list[str] = []
305
+ cursor = 0
306
+
307
+ for chunk in chunks:
308
+ if chunk.orig_index > len(orig_lines):
309
+ raise ValueError(
310
+ f"applyDiff: chunk.origIndex {chunk.orig_index} > input length {len(orig_lines)}"
311
+ )
312
+ if cursor > chunk.orig_index:
313
+ raise ValueError(
314
+ f"applyDiff: overlapping chunk at {chunk.orig_index} (cursor {cursor})"
315
+ )
316
+
317
+ dest_lines.extend(orig_lines[cursor : chunk.orig_index])
318
+ cursor = chunk.orig_index
319
+
320
+ if chunk.ins_lines:
321
+ dest_lines.extend(chunk.ins_lines)
322
+
323
+ cursor += len(chunk.del_lines)
324
+
325
+ dest_lines.extend(orig_lines[cursor:])
326
+ return "\n".join(dest_lines)
327
+
328
+
329
+ __all__ = ["apply_diff"]
agents/editor.py ADDED
@@ -0,0 +1,47 @@
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from dataclasses import dataclass
5
+ from typing import Literal, Protocol, runtime_checkable
6
+
7
+ from .run_context import RunContextWrapper
8
+ from .util._types import MaybeAwaitable
9
+
10
+ ApplyPatchOperationType = Literal["create_file", "update_file", "delete_file"]
11
+
12
+ _DATACLASS_KWARGS = {"slots": True} if sys.version_info >= (3, 10) else {}
13
+
14
+
15
+ @dataclass(**_DATACLASS_KWARGS)
16
+ class ApplyPatchOperation:
17
+ """Represents a single apply_patch editor operation requested by the model."""
18
+
19
+ type: ApplyPatchOperationType
20
+ path: str
21
+ diff: str | None = None
22
+ ctx_wrapper: RunContextWrapper | None = None
23
+
24
+
25
+ @dataclass(**_DATACLASS_KWARGS)
26
+ class ApplyPatchResult:
27
+ """Optional metadata returned by editor operations."""
28
+
29
+ status: Literal["completed", "failed"] | None = None
30
+ output: str | None = None
31
+
32
+
33
+ @runtime_checkable
34
+ class ApplyPatchEditor(Protocol):
35
+ """Host-defined editor that applies diffs on disk."""
36
+
37
+ def create_file(
38
+ self, operation: ApplyPatchOperation
39
+ ) -> MaybeAwaitable[ApplyPatchResult | str | None]: ...
40
+
41
+ def update_file(
42
+ self, operation: ApplyPatchOperation
43
+ ) -> MaybeAwaitable[ApplyPatchResult | str | None]: ...
44
+
45
+ def delete_file(
46
+ self, operation: ApplyPatchOperation
47
+ ) -> MaybeAwaitable[ApplyPatchResult | str | None]: ...
agents/exceptions.py CHANGED
@@ -8,6 +8,11 @@ if TYPE_CHECKING:
8
8
  from .guardrail import InputGuardrailResult, OutputGuardrailResult
9
9
  from .items import ModelResponse, RunItem, TResponseInputItem
10
10
  from .run_context import RunContextWrapper
11
+ from .tool_guardrails import (
12
+ ToolGuardrailFunctionOutput,
13
+ ToolInputGuardrail,
14
+ ToolOutputGuardrail,
15
+ )
11
16
 
12
17
  from .util._pretty_print import pretty_print_run_error_details
13
18
 
@@ -94,3 +99,33 @@ class OutputGuardrailTripwireTriggered(AgentsException):
94
99
  super().__init__(
95
100
  f"Guardrail {guardrail_result.guardrail.__class__.__name__} triggered tripwire"
96
101
  )
102
+
103
+
104
+ class ToolInputGuardrailTripwireTriggered(AgentsException):
105
+ """Exception raised when a tool input guardrail tripwire is triggered."""
106
+
107
+ guardrail: ToolInputGuardrail[Any]
108
+ """The guardrail that was triggered."""
109
+
110
+ output: ToolGuardrailFunctionOutput
111
+ """The output from the guardrail function."""
112
+
113
+ def __init__(self, guardrail: ToolInputGuardrail[Any], output: ToolGuardrailFunctionOutput):
114
+ self.guardrail = guardrail
115
+ self.output = output
116
+ super().__init__(f"Tool input guardrail {guardrail.__class__.__name__} triggered tripwire")
117
+
118
+
119
+ class ToolOutputGuardrailTripwireTriggered(AgentsException):
120
+ """Exception raised when a tool output guardrail tripwire is triggered."""
121
+
122
+ guardrail: ToolOutputGuardrail[Any]
123
+ """The guardrail that was triggered."""
124
+
125
+ output: ToolGuardrailFunctionOutput
126
+ """The output from the guardrail function."""
127
+
128
+ def __init__(self, guardrail: ToolOutputGuardrail[Any], output: ToolGuardrailFunctionOutput):
129
+ self.guardrail = guardrail
130
+ self.output = output
131
+ super().__init__(f"Tool output guardrail {guardrail.__class__.__name__} triggered tripwire")
@@ -0,0 +1,6 @@
1
+ # This package contains experimental extensions to the agents package.
2
+ # The interface and implementation details could be changed until being GAed.
3
+
4
+ __all__ = [
5
+ "codex",
6
+ ]
@@ -0,0 +1,92 @@
1
+ from .codex import Codex
2
+ from .codex_options import CodexOptions
3
+ from .codex_tool import (
4
+ CodexToolOptions,
5
+ CodexToolResult,
6
+ CodexToolStreamEvent,
7
+ OutputSchemaDescriptor,
8
+ codex_tool,
9
+ )
10
+ from .events import (
11
+ ItemCompletedEvent,
12
+ ItemStartedEvent,
13
+ ItemUpdatedEvent,
14
+ ThreadError,
15
+ ThreadErrorEvent,
16
+ ThreadEvent,
17
+ ThreadStartedEvent,
18
+ TurnCompletedEvent,
19
+ TurnFailedEvent,
20
+ TurnStartedEvent,
21
+ Usage,
22
+ )
23
+ from .items import (
24
+ AgentMessageItem,
25
+ CommandExecutionItem,
26
+ ErrorItem,
27
+ FileChangeItem,
28
+ FileUpdateChange,
29
+ McpToolCallError,
30
+ McpToolCallItem,
31
+ McpToolCallResult,
32
+ ReasoningItem,
33
+ ThreadItem,
34
+ TodoItem,
35
+ TodoListItem,
36
+ WebSearchItem,
37
+ )
38
+ from .thread import Input, RunResult, RunStreamedResult, Thread, Turn, UserInput
39
+ from .thread_options import (
40
+ ApprovalMode,
41
+ ModelReasoningEffort,
42
+ SandboxMode,
43
+ ThreadOptions,
44
+ WebSearchMode,
45
+ )
46
+ from .turn_options import TurnOptions
47
+
48
+ __all__ = [
49
+ "Codex",
50
+ "CodexOptions",
51
+ "Thread",
52
+ "Turn",
53
+ "RunResult",
54
+ "RunStreamedResult",
55
+ "Input",
56
+ "UserInput",
57
+ "ThreadOptions",
58
+ "TurnOptions",
59
+ "ApprovalMode",
60
+ "SandboxMode",
61
+ "ModelReasoningEffort",
62
+ "WebSearchMode",
63
+ "ThreadEvent",
64
+ "ThreadStartedEvent",
65
+ "TurnStartedEvent",
66
+ "TurnCompletedEvent",
67
+ "TurnFailedEvent",
68
+ "ItemStartedEvent",
69
+ "ItemUpdatedEvent",
70
+ "ItemCompletedEvent",
71
+ "ThreadError",
72
+ "ThreadErrorEvent",
73
+ "Usage",
74
+ "ThreadItem",
75
+ "AgentMessageItem",
76
+ "ReasoningItem",
77
+ "CommandExecutionItem",
78
+ "FileChangeItem",
79
+ "FileUpdateChange",
80
+ "McpToolCallItem",
81
+ "McpToolCallResult",
82
+ "McpToolCallError",
83
+ "WebSearchItem",
84
+ "TodoItem",
85
+ "TodoListItem",
86
+ "ErrorItem",
87
+ "codex_tool",
88
+ "CodexToolOptions",
89
+ "CodexToolResult",
90
+ "CodexToolStreamEvent",
91
+ "OutputSchemaDescriptor",
92
+ ]
@@ -0,0 +1,89 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Mapping
4
+ from typing import Any, overload
5
+
6
+ from agents.exceptions import UserError
7
+
8
+ from .codex_options import CodexOptions, coerce_codex_options
9
+ from .exec import CodexExec
10
+ from .thread import Thread
11
+ from .thread_options import ThreadOptions, coerce_thread_options
12
+
13
+
14
+ class _UnsetType:
15
+ pass
16
+
17
+
18
+ _UNSET = _UnsetType()
19
+
20
+
21
+ class Codex:
22
+ @overload
23
+ def __init__(self, options: CodexOptions | Mapping[str, Any] | None = None) -> None: ...
24
+
25
+ @overload
26
+ def __init__(
27
+ self,
28
+ *,
29
+ codex_path_override: str | None = None,
30
+ base_url: str | None = None,
31
+ api_key: str | None = None,
32
+ env: Mapping[str, str] | None = None,
33
+ ) -> None: ...
34
+
35
+ def __init__(
36
+ self,
37
+ options: CodexOptions | Mapping[str, Any] | None = None,
38
+ *,
39
+ codex_path_override: str | None | _UnsetType = _UNSET,
40
+ base_url: str | None | _UnsetType = _UNSET,
41
+ api_key: str | None | _UnsetType = _UNSET,
42
+ env: Mapping[str, str] | None | _UnsetType = _UNSET,
43
+ ) -> None:
44
+ kw_values = {
45
+ "codex_path_override": codex_path_override,
46
+ "base_url": base_url,
47
+ "api_key": api_key,
48
+ "env": env,
49
+ }
50
+ has_kwargs = any(value is not _UNSET for value in kw_values.values())
51
+ if options is not None and has_kwargs:
52
+ raise UserError(
53
+ "Codex options must be provided as a CodexOptions/mapping or keyword arguments, "
54
+ "not both."
55
+ )
56
+ if has_kwargs:
57
+ options = {key: value for key, value in kw_values.items() if value is not _UNSET}
58
+ resolved_options = coerce_codex_options(options) or CodexOptions()
59
+ self._exec = CodexExec(
60
+ executable_path=resolved_options.codex_path_override,
61
+ env=_normalize_env(resolved_options),
62
+ )
63
+ self._options = resolved_options
64
+
65
+ def start_thread(self, options: ThreadOptions | Mapping[str, Any] | None = None) -> Thread:
66
+ resolved_options = coerce_thread_options(options) or ThreadOptions()
67
+ return Thread(
68
+ exec_client=self._exec,
69
+ options=self._options,
70
+ thread_options=resolved_options,
71
+ )
72
+
73
+ def resume_thread(
74
+ self, thread_id: str, options: ThreadOptions | Mapping[str, Any] | None = None
75
+ ) -> Thread:
76
+ resolved_options = coerce_thread_options(options) or ThreadOptions()
77
+ return Thread(
78
+ exec_client=self._exec,
79
+ options=self._options,
80
+ thread_options=resolved_options,
81
+ thread_id=thread_id,
82
+ )
83
+
84
+
85
+ def _normalize_env(options: CodexOptions) -> dict[str, str] | None:
86
+ if options.env is None:
87
+ return None
88
+ # Normalize mapping values to strings for subprocess environment.
89
+ return {str(key): str(value) for key, value in options.env.items()}
@@ -0,0 +1,35 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Mapping
4
+ from dataclasses import dataclass, fields
5
+ from typing import Any
6
+
7
+ from agents.exceptions import UserError
8
+
9
+
10
+ @dataclass(frozen=True)
11
+ class CodexOptions:
12
+ # Optional absolute path to the codex CLI binary.
13
+ codex_path_override: str | None = None
14
+ # Override OpenAI base URL for the Codex CLI process.
15
+ base_url: str | None = None
16
+ # API key passed to the Codex CLI (CODEX_API_KEY).
17
+ api_key: str | None = None
18
+ # Environment variables for the Codex CLI process (do not inherit os.environ).
19
+ env: Mapping[str, str] | None = None
20
+
21
+
22
+ def coerce_codex_options(
23
+ options: CodexOptions | Mapping[str, Any] | None,
24
+ ) -> CodexOptions | None:
25
+ if options is None or isinstance(options, CodexOptions):
26
+ return options
27
+ if not isinstance(options, Mapping):
28
+ raise UserError("CodexOptions must be a CodexOptions or a mapping.")
29
+
30
+ allowed = {field.name for field in fields(CodexOptions)}
31
+ unknown = set(options.keys()) - allowed
32
+ if unknown:
33
+ raise UserError(f"Unknown CodexOptions field(s): {sorted(unknown)}")
34
+
35
+ return CodexOptions(**dict(options))