openai-agents 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

Files changed (53) hide show
  1. agents/__init__.py +223 -0
  2. agents/_config.py +23 -0
  3. agents/_debug.py +17 -0
  4. agents/_run_impl.py +792 -0
  5. agents/_utils.py +61 -0
  6. agents/agent.py +159 -0
  7. agents/agent_output.py +144 -0
  8. agents/computer.py +107 -0
  9. agents/exceptions.py +63 -0
  10. agents/extensions/handoff_filters.py +67 -0
  11. agents/extensions/handoff_prompt.py +19 -0
  12. agents/function_schema.py +340 -0
  13. agents/guardrail.py +320 -0
  14. agents/handoffs.py +236 -0
  15. agents/items.py +246 -0
  16. agents/lifecycle.py +105 -0
  17. agents/logger.py +3 -0
  18. agents/model_settings.py +36 -0
  19. agents/models/__init__.py +0 -0
  20. agents/models/_openai_shared.py +34 -0
  21. agents/models/fake_id.py +5 -0
  22. agents/models/interface.py +107 -0
  23. agents/models/openai_chatcompletions.py +952 -0
  24. agents/models/openai_provider.py +65 -0
  25. agents/models/openai_responses.py +384 -0
  26. agents/result.py +220 -0
  27. agents/run.py +904 -0
  28. agents/run_context.py +26 -0
  29. agents/stream_events.py +58 -0
  30. agents/strict_schema.py +167 -0
  31. agents/tool.py +288 -0
  32. agents/tracing/__init__.py +97 -0
  33. agents/tracing/create.py +306 -0
  34. agents/tracing/logger.py +3 -0
  35. agents/tracing/processor_interface.py +69 -0
  36. agents/tracing/processors.py +261 -0
  37. agents/tracing/scope.py +45 -0
  38. agents/tracing/setup.py +211 -0
  39. agents/tracing/span_data.py +188 -0
  40. agents/tracing/spans.py +264 -0
  41. agents/tracing/traces.py +195 -0
  42. agents/tracing/util.py +17 -0
  43. agents/usage.py +22 -0
  44. agents/version.py +7 -0
  45. openai_agents-0.0.3.dist-info/METADATA +204 -0
  46. openai_agents-0.0.3.dist-info/RECORD +49 -0
  47. openai_agents-0.0.3.dist-info/licenses/LICENSE +21 -0
  48. openai-agents/example.py +0 -2
  49. openai_agents-0.0.1.dist-info/METADATA +0 -17
  50. openai_agents-0.0.1.dist-info/RECORD +0 -6
  51. openai_agents-0.0.1.dist-info/licenses/LICENSE +0 -20
  52. {openai-agents → agents/extensions}/__init__.py +0 -0
  53. {openai_agents-0.0.1.dist-info → openai_agents-0.0.3.dist-info}/WHEEL +0 -0
@@ -0,0 +1,952 @@
1
+ from __future__ import annotations
2
+
3
+ import dataclasses
4
+ import json
5
+ import time
6
+ from collections.abc import AsyncIterator, Iterable
7
+ from dataclasses import dataclass, field
8
+ from typing import TYPE_CHECKING, Any, Literal, cast, overload
9
+
10
+ from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream, NotGiven
11
+ from openai.types import ChatModel
12
+ from openai.types.chat import (
13
+ ChatCompletion,
14
+ ChatCompletionAssistantMessageParam,
15
+ ChatCompletionChunk,
16
+ ChatCompletionContentPartImageParam,
17
+ ChatCompletionContentPartParam,
18
+ ChatCompletionContentPartTextParam,
19
+ ChatCompletionDeveloperMessageParam,
20
+ ChatCompletionMessage,
21
+ ChatCompletionMessageParam,
22
+ ChatCompletionMessageToolCallParam,
23
+ ChatCompletionSystemMessageParam,
24
+ ChatCompletionToolChoiceOptionParam,
25
+ ChatCompletionToolMessageParam,
26
+ ChatCompletionUserMessageParam,
27
+ )
28
+ from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
29
+ from openai.types.chat.completion_create_params import ResponseFormat
30
+ from openai.types.completion_usage import CompletionUsage
31
+ from openai.types.responses import (
32
+ EasyInputMessageParam,
33
+ Response,
34
+ ResponseCompletedEvent,
35
+ ResponseContentPartAddedEvent,
36
+ ResponseContentPartDoneEvent,
37
+ ResponseCreatedEvent,
38
+ ResponseFileSearchToolCallParam,
39
+ ResponseFunctionCallArgumentsDeltaEvent,
40
+ ResponseFunctionToolCall,
41
+ ResponseFunctionToolCallParam,
42
+ ResponseInputContentParam,
43
+ ResponseInputImageParam,
44
+ ResponseInputTextParam,
45
+ ResponseOutputItem,
46
+ ResponseOutputItemAddedEvent,
47
+ ResponseOutputItemDoneEvent,
48
+ ResponseOutputMessage,
49
+ ResponseOutputMessageParam,
50
+ ResponseOutputRefusal,
51
+ ResponseOutputText,
52
+ ResponseRefusalDeltaEvent,
53
+ ResponseTextDeltaEvent,
54
+ )
55
+ from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
56
+
57
+ from .. import _debug
58
+ from ..agent_output import AgentOutputSchema
59
+ from ..exceptions import AgentsException, UserError
60
+ from ..handoffs import Handoff
61
+ from ..items import ModelResponse, TResponseInputItem, TResponseOutputItem, TResponseStreamEvent
62
+ from ..logger import logger
63
+ from ..tool import FunctionTool, Tool
64
+ from ..tracing import generation_span
65
+ from ..tracing.span_data import GenerationSpanData
66
+ from ..tracing.spans import Span
67
+ from ..usage import Usage
68
+ from ..version import __version__
69
+ from .fake_id import FAKE_RESPONSES_ID
70
+ from .interface import Model, ModelTracing
71
+
72
+ if TYPE_CHECKING:
73
+ from ..model_settings import ModelSettings
74
+
75
+
76
+ _USER_AGENT = f"Agents/Python {__version__}"
77
+ _HEADERS = {"User-Agent": _USER_AGENT}
78
+
79
+
80
+ @dataclass
81
+ class _StreamingState:
82
+ started: bool = False
83
+ text_content_index_and_output: tuple[int, ResponseOutputText] | None = None
84
+ refusal_content_index_and_output: tuple[int, ResponseOutputRefusal] | None = None
85
+ function_calls: dict[int, ResponseFunctionToolCall] = field(default_factory=dict)
86
+
87
+
88
+ class OpenAIChatCompletionsModel(Model):
89
+ def __init__(
90
+ self,
91
+ model: str | ChatModel,
92
+ openai_client: AsyncOpenAI,
93
+ ) -> None:
94
+ self.model = model
95
+ self._client = openai_client
96
+
97
+ def _non_null_or_not_given(self, value: Any) -> Any:
98
+ return value if value is not None else NOT_GIVEN
99
+
100
+ async def get_response(
101
+ self,
102
+ system_instructions: str | None,
103
+ input: str | list[TResponseInputItem],
104
+ model_settings: ModelSettings,
105
+ tools: list[Tool],
106
+ output_schema: AgentOutputSchema | None,
107
+ handoffs: list[Handoff],
108
+ tracing: ModelTracing,
109
+ ) -> ModelResponse:
110
+ with generation_span(
111
+ model=str(self.model),
112
+ model_config=dataclasses.asdict(model_settings)
113
+ | {"base_url": str(self._client.base_url)},
114
+ disabled=tracing.is_disabled(),
115
+ ) as span_generation:
116
+ response = await self._fetch_response(
117
+ system_instructions,
118
+ input,
119
+ model_settings,
120
+ tools,
121
+ output_schema,
122
+ handoffs,
123
+ span_generation,
124
+ tracing,
125
+ stream=False,
126
+ )
127
+
128
+ if _debug.DONT_LOG_MODEL_DATA:
129
+ logger.debug("Received model response")
130
+ else:
131
+ logger.debug(
132
+ f"LLM resp:\n{json.dumps(response.choices[0].message.model_dump(), indent=2)}\n"
133
+ )
134
+
135
+ usage = (
136
+ Usage(
137
+ requests=1,
138
+ input_tokens=response.usage.prompt_tokens,
139
+ output_tokens=response.usage.completion_tokens,
140
+ total_tokens=response.usage.total_tokens,
141
+ )
142
+ if response.usage
143
+ else Usage()
144
+ )
145
+ if tracing.include_data():
146
+ span_generation.span_data.output = [response.choices[0].message.model_dump()]
147
+ span_generation.span_data.usage = {
148
+ "input_tokens": usage.input_tokens,
149
+ "output_tokens": usage.output_tokens,
150
+ }
151
+
152
+ items = _Converter.message_to_output_items(response.choices[0].message)
153
+
154
+ return ModelResponse(
155
+ output=items,
156
+ usage=usage,
157
+ referenceable_id=None,
158
+ )
159
+
160
+ async def stream_response(
161
+ self,
162
+ system_instructions: str | None,
163
+ input: str | list[TResponseInputItem],
164
+ model_settings: ModelSettings,
165
+ tools: list[Tool],
166
+ output_schema: AgentOutputSchema | None,
167
+ handoffs: list[Handoff],
168
+ tracing: ModelTracing,
169
+ ) -> AsyncIterator[TResponseStreamEvent]:
170
+ """
171
+ Yields a partial message as it is generated, as well as the usage information.
172
+ """
173
+ with generation_span(
174
+ model=str(self.model),
175
+ model_config=dataclasses.asdict(model_settings)
176
+ | {"base_url": str(self._client.base_url)},
177
+ disabled=tracing.is_disabled(),
178
+ ) as span_generation:
179
+ response, stream = await self._fetch_response(
180
+ system_instructions,
181
+ input,
182
+ model_settings,
183
+ tools,
184
+ output_schema,
185
+ handoffs,
186
+ span_generation,
187
+ tracing,
188
+ stream=True,
189
+ )
190
+
191
+ usage: CompletionUsage | None = None
192
+ state = _StreamingState()
193
+
194
+ async for chunk in stream:
195
+ if not state.started:
196
+ state.started = True
197
+ yield ResponseCreatedEvent(
198
+ response=response,
199
+ type="response.created",
200
+ )
201
+
202
+ # The usage is only available in the last chunk
203
+ usage = chunk.usage
204
+
205
+ if not chunk.choices or not chunk.choices[0].delta:
206
+ continue
207
+
208
+ delta = chunk.choices[0].delta
209
+
210
+ # Handle text
211
+ if delta.content:
212
+ if not state.text_content_index_and_output:
213
+ # Initialize a content tracker for streaming text
214
+ state.text_content_index_and_output = (
215
+ 0 if not state.refusal_content_index_and_output else 1,
216
+ ResponseOutputText(
217
+ text="",
218
+ type="output_text",
219
+ annotations=[],
220
+ ),
221
+ )
222
+ # Start a new assistant message stream
223
+ assistant_item = ResponseOutputMessage(
224
+ id=FAKE_RESPONSES_ID,
225
+ content=[],
226
+ role="assistant",
227
+ type="message",
228
+ status="in_progress",
229
+ )
230
+ # Notify consumers of the start of a new output message + first content part
231
+ yield ResponseOutputItemAddedEvent(
232
+ item=assistant_item,
233
+ output_index=0,
234
+ type="response.output_item.added",
235
+ )
236
+ yield ResponseContentPartAddedEvent(
237
+ content_index=state.text_content_index_and_output[0],
238
+ item_id=FAKE_RESPONSES_ID,
239
+ output_index=0,
240
+ part=ResponseOutputText(
241
+ text="",
242
+ type="output_text",
243
+ annotations=[],
244
+ ),
245
+ type="response.content_part.added",
246
+ )
247
+ # Emit the delta for this segment of content
248
+ yield ResponseTextDeltaEvent(
249
+ content_index=state.text_content_index_and_output[0],
250
+ delta=delta.content,
251
+ item_id=FAKE_RESPONSES_ID,
252
+ output_index=0,
253
+ type="response.output_text.delta",
254
+ )
255
+ # Accumulate the text into the response part
256
+ state.text_content_index_and_output[1].text += delta.content
257
+
258
+ # Handle refusals (model declines to answer)
259
+ if delta.refusal:
260
+ if not state.refusal_content_index_and_output:
261
+ # Initialize a content tracker for streaming refusal text
262
+ state.refusal_content_index_and_output = (
263
+ 0 if not state.text_content_index_and_output else 1,
264
+ ResponseOutputRefusal(refusal="", type="refusal"),
265
+ )
266
+ # Start a new assistant message if one doesn't exist yet (in-progress)
267
+ assistant_item = ResponseOutputMessage(
268
+ id=FAKE_RESPONSES_ID,
269
+ content=[],
270
+ role="assistant",
271
+ type="message",
272
+ status="in_progress",
273
+ )
274
+ # Notify downstream that assistant message + first content part are starting
275
+ yield ResponseOutputItemAddedEvent(
276
+ item=assistant_item,
277
+ output_index=0,
278
+ type="response.output_item.added",
279
+ )
280
+ yield ResponseContentPartAddedEvent(
281
+ content_index=state.refusal_content_index_and_output[0],
282
+ item_id=FAKE_RESPONSES_ID,
283
+ output_index=0,
284
+ part=ResponseOutputText(
285
+ text="",
286
+ type="output_text",
287
+ annotations=[],
288
+ ),
289
+ type="response.content_part.added",
290
+ )
291
+ # Emit the delta for this segment of refusal
292
+ yield ResponseRefusalDeltaEvent(
293
+ content_index=state.refusal_content_index_and_output[0],
294
+ delta=delta.refusal,
295
+ item_id=FAKE_RESPONSES_ID,
296
+ output_index=0,
297
+ type="response.refusal.delta",
298
+ )
299
+ # Accumulate the refusal string in the output part
300
+ state.refusal_content_index_and_output[1].refusal += delta.refusal
301
+
302
+ # Handle tool calls
303
+ # Because we don't know the name of the function until the end of the stream, we'll
304
+ # save everything and yield events at the end
305
+ if delta.tool_calls:
306
+ for tc_delta in delta.tool_calls:
307
+ if tc_delta.index not in state.function_calls:
308
+ state.function_calls[tc_delta.index] = ResponseFunctionToolCall(
309
+ id=FAKE_RESPONSES_ID,
310
+ arguments="",
311
+ name="",
312
+ type="function_call",
313
+ call_id="",
314
+ )
315
+ tc_function = tc_delta.function
316
+
317
+ state.function_calls[tc_delta.index].arguments += (
318
+ tc_function.arguments if tc_function else ""
319
+ ) or ""
320
+ state.function_calls[tc_delta.index].name += (
321
+ tc_function.name if tc_function else ""
322
+ ) or ""
323
+ state.function_calls[tc_delta.index].call_id += tc_delta.id or ""
324
+
325
+ function_call_starting_index = 0
326
+ if state.text_content_index_and_output:
327
+ function_call_starting_index += 1
328
+ # Send end event for this content part
329
+ yield ResponseContentPartDoneEvent(
330
+ content_index=state.text_content_index_and_output[0],
331
+ item_id=FAKE_RESPONSES_ID,
332
+ output_index=0,
333
+ part=state.text_content_index_and_output[1],
334
+ type="response.content_part.done",
335
+ )
336
+
337
+ if state.refusal_content_index_and_output:
338
+ function_call_starting_index += 1
339
+ # Send end event for this content part
340
+ yield ResponseContentPartDoneEvent(
341
+ content_index=state.refusal_content_index_and_output[0],
342
+ item_id=FAKE_RESPONSES_ID,
343
+ output_index=0,
344
+ part=state.refusal_content_index_and_output[1],
345
+ type="response.content_part.done",
346
+ )
347
+
348
+ # Actually send events for the function calls
349
+ for function_call in state.function_calls.values():
350
+ # First, a ResponseOutputItemAdded for the function call
351
+ yield ResponseOutputItemAddedEvent(
352
+ item=ResponseFunctionToolCall(
353
+ id=FAKE_RESPONSES_ID,
354
+ call_id=function_call.call_id,
355
+ arguments=function_call.arguments,
356
+ name=function_call.name,
357
+ type="function_call",
358
+ ),
359
+ output_index=function_call_starting_index,
360
+ type="response.output_item.added",
361
+ )
362
+ # Then, yield the args
363
+ yield ResponseFunctionCallArgumentsDeltaEvent(
364
+ delta=function_call.arguments,
365
+ item_id=FAKE_RESPONSES_ID,
366
+ output_index=function_call_starting_index,
367
+ type="response.function_call_arguments.delta",
368
+ )
369
+ # Finally, the ResponseOutputItemDone
370
+ yield ResponseOutputItemDoneEvent(
371
+ item=ResponseFunctionToolCall(
372
+ id=FAKE_RESPONSES_ID,
373
+ call_id=function_call.call_id,
374
+ arguments=function_call.arguments,
375
+ name=function_call.name,
376
+ type="function_call",
377
+ ),
378
+ output_index=function_call_starting_index,
379
+ type="response.output_item.done",
380
+ )
381
+
382
+ # Finally, send the Response completed event
383
+ outputs: list[ResponseOutputItem] = []
384
+ if state.text_content_index_and_output or state.refusal_content_index_and_output:
385
+ assistant_msg = ResponseOutputMessage(
386
+ id=FAKE_RESPONSES_ID,
387
+ content=[],
388
+ role="assistant",
389
+ type="message",
390
+ status="completed",
391
+ )
392
+ if state.text_content_index_and_output:
393
+ assistant_msg.content.append(state.text_content_index_and_output[1])
394
+ if state.refusal_content_index_and_output:
395
+ assistant_msg.content.append(state.refusal_content_index_and_output[1])
396
+ outputs.append(assistant_msg)
397
+
398
+ # send a ResponseOutputItemDone for the assistant message
399
+ yield ResponseOutputItemDoneEvent(
400
+ item=assistant_msg,
401
+ output_index=0,
402
+ type="response.output_item.done",
403
+ )
404
+
405
+ for function_call in state.function_calls.values():
406
+ outputs.append(function_call)
407
+
408
+ final_response = response.model_copy(update={"output": outputs, "usage": usage})
409
+
410
+ yield ResponseCompletedEvent(
411
+ response=final_response,
412
+ type="response.completed",
413
+ )
414
+ if tracing.include_data():
415
+ span_generation.span_data.output = [final_response.model_dump()]
416
+
417
+ if usage:
418
+ span_generation.span_data.usage = {
419
+ "input_tokens": usage.prompt_tokens,
420
+ "output_tokens": usage.completion_tokens,
421
+ }
422
+
423
+ @overload
424
+ async def _fetch_response(
425
+ self,
426
+ system_instructions: str | None,
427
+ input: str | list[TResponseInputItem],
428
+ model_settings: ModelSettings,
429
+ tools: list[Tool],
430
+ output_schema: AgentOutputSchema | None,
431
+ handoffs: list[Handoff],
432
+ span: Span[GenerationSpanData],
433
+ tracing: ModelTracing,
434
+ stream: Literal[True],
435
+ ) -> tuple[Response, AsyncStream[ChatCompletionChunk]]: ...
436
+
437
+ @overload
438
+ async def _fetch_response(
439
+ self,
440
+ system_instructions: str | None,
441
+ input: str | list[TResponseInputItem],
442
+ model_settings: ModelSettings,
443
+ tools: list[Tool],
444
+ output_schema: AgentOutputSchema | None,
445
+ handoffs: list[Handoff],
446
+ span: Span[GenerationSpanData],
447
+ tracing: ModelTracing,
448
+ stream: Literal[False],
449
+ ) -> ChatCompletion: ...
450
+
451
+ async def _fetch_response(
452
+ self,
453
+ system_instructions: str | None,
454
+ input: str | list[TResponseInputItem],
455
+ model_settings: ModelSettings,
456
+ tools: list[Tool],
457
+ output_schema: AgentOutputSchema | None,
458
+ handoffs: list[Handoff],
459
+ span: Span[GenerationSpanData],
460
+ tracing: ModelTracing,
461
+ stream: bool = False,
462
+ ) -> ChatCompletion | tuple[Response, AsyncStream[ChatCompletionChunk]]:
463
+ converted_messages = _Converter.items_to_messages(input)
464
+
465
+ if system_instructions:
466
+ converted_messages.insert(
467
+ 0,
468
+ {
469
+ "content": system_instructions,
470
+ "role": "system",
471
+ },
472
+ )
473
+ if tracing.include_data():
474
+ span.span_data.input = converted_messages
475
+
476
+ parallel_tool_calls = (
477
+ True if model_settings.parallel_tool_calls and tools and len(tools) > 0 else NOT_GIVEN
478
+ )
479
+ tool_choice = _Converter.convert_tool_choice(model_settings.tool_choice)
480
+ response_format = _Converter.convert_response_format(output_schema)
481
+
482
+ converted_tools = [ToolConverter.to_openai(tool) for tool in tools] if tools else []
483
+
484
+ for handoff in handoffs:
485
+ converted_tools.append(ToolConverter.convert_handoff_tool(handoff))
486
+
487
+ if _debug.DONT_LOG_MODEL_DATA:
488
+ logger.debug("Calling LLM")
489
+ else:
490
+ logger.debug(
491
+ f"{json.dumps(converted_messages, indent=2)}\n"
492
+ f"Tools:\n{json.dumps(converted_tools, indent=2)}\n"
493
+ f"Stream: {stream}\n"
494
+ f"Tool choice: {tool_choice}\n"
495
+ f"Response format: {response_format}\n"
496
+ )
497
+
498
+ ret = await self._get_client().chat.completions.create(
499
+ model=self.model,
500
+ messages=converted_messages,
501
+ tools=converted_tools or NOT_GIVEN,
502
+ temperature=self._non_null_or_not_given(model_settings.temperature),
503
+ top_p=self._non_null_or_not_given(model_settings.top_p),
504
+ frequency_penalty=self._non_null_or_not_given(model_settings.frequency_penalty),
505
+ presence_penalty=self._non_null_or_not_given(model_settings.presence_penalty),
506
+ tool_choice=tool_choice,
507
+ response_format=response_format,
508
+ parallel_tool_calls=parallel_tool_calls,
509
+ stream=stream,
510
+ stream_options={"include_usage": True} if stream else NOT_GIVEN,
511
+ extra_headers=_HEADERS,
512
+ )
513
+
514
+ if isinstance(ret, ChatCompletion):
515
+ return ret
516
+
517
+ response = Response(
518
+ id=FAKE_RESPONSES_ID,
519
+ created_at=time.time(),
520
+ model=self.model,
521
+ object="response",
522
+ output=[],
523
+ tool_choice=cast(Literal["auto", "required", "none"], tool_choice)
524
+ if tool_choice != NOT_GIVEN
525
+ else "auto",
526
+ top_p=model_settings.top_p,
527
+ temperature=model_settings.temperature,
528
+ tools=[],
529
+ parallel_tool_calls=parallel_tool_calls or False,
530
+ )
531
+ return response, ret
532
+
533
+ def _get_client(self) -> AsyncOpenAI:
534
+ if self._client is None:
535
+ self._client = AsyncOpenAI()
536
+ return self._client
537
+
538
+
539
+ class _Converter:
540
+ @classmethod
541
+ def convert_tool_choice(
542
+ cls, tool_choice: Literal["auto", "required", "none"] | str | None
543
+ ) -> ChatCompletionToolChoiceOptionParam | NotGiven:
544
+ if tool_choice is None:
545
+ return NOT_GIVEN
546
+ elif tool_choice == "auto":
547
+ return "auto"
548
+ elif tool_choice == "required":
549
+ return "required"
550
+ elif tool_choice == "none":
551
+ return "none"
552
+ else:
553
+ return {
554
+ "type": "function",
555
+ "function": {
556
+ "name": tool_choice,
557
+ },
558
+ }
559
+
560
+ @classmethod
561
+ def convert_response_format(
562
+ cls, final_output_schema: AgentOutputSchema | None
563
+ ) -> ResponseFormat | NotGiven:
564
+ if not final_output_schema or final_output_schema.is_plain_text():
565
+ return NOT_GIVEN
566
+
567
+ return {
568
+ "type": "json_schema",
569
+ "json_schema": {
570
+ "name": "final_output",
571
+ "strict": final_output_schema.strict_json_schema,
572
+ "schema": final_output_schema.json_schema(),
573
+ },
574
+ }
575
+
576
+ @classmethod
577
+ def message_to_output_items(cls, message: ChatCompletionMessage) -> list[TResponseOutputItem]:
578
+ items: list[TResponseOutputItem] = []
579
+
580
+ message_item = ResponseOutputMessage(
581
+ id=FAKE_RESPONSES_ID,
582
+ content=[],
583
+ role="assistant",
584
+ type="message",
585
+ status="completed",
586
+ )
587
+ if message.content:
588
+ message_item.content.append(
589
+ ResponseOutputText(text=message.content, type="output_text", annotations=[])
590
+ )
591
+ if message.refusal:
592
+ message_item.content.append(
593
+ ResponseOutputRefusal(refusal=message.refusal, type="refusal")
594
+ )
595
+ if message.audio:
596
+ raise AgentsException("Audio is not currently supported")
597
+
598
+ if message_item.content:
599
+ items.append(message_item)
600
+
601
+ if message.tool_calls:
602
+ for tool_call in message.tool_calls:
603
+ items.append(
604
+ ResponseFunctionToolCall(
605
+ id=FAKE_RESPONSES_ID,
606
+ call_id=tool_call.id,
607
+ arguments=tool_call.function.arguments,
608
+ name=tool_call.function.name,
609
+ type="function_call",
610
+ )
611
+ )
612
+
613
+ return items
614
+
615
+ @classmethod
616
+ def maybe_easy_input_message(cls, item: Any) -> EasyInputMessageParam | None:
617
+ if not isinstance(item, dict):
618
+ return None
619
+
620
+ keys = item.keys()
621
+ # EasyInputMessageParam only has these two keys
622
+ if keys != {"content", "role"}:
623
+ return None
624
+
625
+ role = item.get("role", None)
626
+ if role not in ("user", "assistant", "system", "developer"):
627
+ return None
628
+
629
+ if "content" not in item:
630
+ return None
631
+
632
+ return cast(EasyInputMessageParam, item)
633
+
634
+ @classmethod
635
+ def maybe_input_message(cls, item: Any) -> Message | None:
636
+ if (
637
+ isinstance(item, dict)
638
+ and item.get("type") == "message"
639
+ and item.get("role")
640
+ in (
641
+ "user",
642
+ "system",
643
+ "developer",
644
+ )
645
+ ):
646
+ return cast(Message, item)
647
+
648
+ return None
649
+
650
+ @classmethod
651
+ def maybe_file_search_call(cls, item: Any) -> ResponseFileSearchToolCallParam | None:
652
+ if isinstance(item, dict) and item.get("type") == "file_search_call":
653
+ return cast(ResponseFileSearchToolCallParam, item)
654
+ return None
655
+
656
+ @classmethod
657
+ def maybe_function_tool_call(cls, item: Any) -> ResponseFunctionToolCallParam | None:
658
+ if isinstance(item, dict) and item.get("type") == "function_call":
659
+ return cast(ResponseFunctionToolCallParam, item)
660
+ return None
661
+
662
+ @classmethod
663
+ def maybe_function_tool_call_output(
664
+ cls,
665
+ item: Any,
666
+ ) -> FunctionCallOutput | None:
667
+ if isinstance(item, dict) and item.get("type") == "function_call_output":
668
+ return cast(FunctionCallOutput, item)
669
+ return None
670
+
671
+ @classmethod
672
+ def maybe_item_reference(cls, item: Any) -> ItemReference | None:
673
+ if isinstance(item, dict) and item.get("type") == "item_reference":
674
+ return cast(ItemReference, item)
675
+ return None
676
+
677
+ @classmethod
678
+ def maybe_response_output_message(cls, item: Any) -> ResponseOutputMessageParam | None:
679
+ # ResponseOutputMessage is only used for messages with role assistant
680
+ if (
681
+ isinstance(item, dict)
682
+ and item.get("type") == "message"
683
+ and item.get("role") == "assistant"
684
+ ):
685
+ return cast(ResponseOutputMessageParam, item)
686
+ return None
687
+
688
+ @classmethod
689
+ def extract_text_content(
690
+ cls, content: str | Iterable[ResponseInputContentParam]
691
+ ) -> str | list[ChatCompletionContentPartTextParam]:
692
+ all_content = cls.extract_all_content(content)
693
+ if isinstance(all_content, str):
694
+ return all_content
695
+ out: list[ChatCompletionContentPartTextParam] = []
696
+ for c in all_content:
697
+ if c.get("type") == "text":
698
+ out.append(cast(ChatCompletionContentPartTextParam, c))
699
+ return out
700
+
701
+ @classmethod
702
+ def extract_all_content(
703
+ cls, content: str | Iterable[ResponseInputContentParam]
704
+ ) -> str | list[ChatCompletionContentPartParam]:
705
+ if isinstance(content, str):
706
+ return content
707
+ out: list[ChatCompletionContentPartParam] = []
708
+
709
+ for c in content:
710
+ if isinstance(c, dict) and c.get("type") == "input_text":
711
+ casted_text_param = cast(ResponseInputTextParam, c)
712
+ out.append(
713
+ ChatCompletionContentPartTextParam(
714
+ type="text",
715
+ text=casted_text_param["text"],
716
+ )
717
+ )
718
+ elif isinstance(c, dict) and c.get("type") == "input_image":
719
+ casted_image_param = cast(ResponseInputImageParam, c)
720
+ if "image_url" not in casted_image_param or not casted_image_param["image_url"]:
721
+ raise UserError(
722
+ f"Only image URLs are supported for input_image {casted_image_param}"
723
+ )
724
+ out.append(
725
+ ChatCompletionContentPartImageParam(
726
+ type="image_url",
727
+ image_url={
728
+ "url": casted_image_param["image_url"],
729
+ "detail": casted_image_param["detail"],
730
+ },
731
+ )
732
+ )
733
+ elif isinstance(c, dict) and c.get("type") == "input_file":
734
+ raise UserError(f"File uploads are not supported for chat completions {c}")
735
+ else:
736
+ raise UserError(f"Unknonw content: {c}")
737
+ return out
738
+
739
+ @classmethod
740
+ def items_to_messages(
741
+ cls,
742
+ items: str | Iterable[TResponseInputItem],
743
+ ) -> list[ChatCompletionMessageParam]:
744
+ """
745
+ Convert a sequence of 'Item' objects into a list of ChatCompletionMessageParam.
746
+
747
+ Rules:
748
+ - EasyInputMessage or InputMessage (role=user) => ChatCompletionUserMessageParam
749
+ - EasyInputMessage or InputMessage (role=system) => ChatCompletionSystemMessageParam
750
+ - EasyInputMessage or InputMessage (role=developer) => ChatCompletionDeveloperMessageParam
751
+ - InputMessage (role=assistant) => Start or flush a ChatCompletionAssistantMessageParam
752
+ - response_output_message => Also produces/flushes a ChatCompletionAssistantMessageParam
753
+ - tool calls get attached to the *current* assistant message, or create one if none.
754
+ - tool outputs => ChatCompletionToolMessageParam
755
+ """
756
+
757
+ if isinstance(items, str):
758
+ return [
759
+ ChatCompletionUserMessageParam(
760
+ role="user",
761
+ content=items,
762
+ )
763
+ ]
764
+
765
+ result: list[ChatCompletionMessageParam] = []
766
+ current_assistant_msg: ChatCompletionAssistantMessageParam | None = None
767
+
768
+ def flush_assistant_message() -> None:
769
+ nonlocal current_assistant_msg
770
+ if current_assistant_msg is not None:
771
+ # The API doesn't support empty arrays for tool_calls
772
+ if not current_assistant_msg.get("tool_calls"):
773
+ del current_assistant_msg["tool_calls"]
774
+ result.append(current_assistant_msg)
775
+ current_assistant_msg = None
776
+
777
+ def ensure_assistant_message() -> ChatCompletionAssistantMessageParam:
778
+ nonlocal current_assistant_msg
779
+ if current_assistant_msg is None:
780
+ current_assistant_msg = ChatCompletionAssistantMessageParam(role="assistant")
781
+ current_assistant_msg["tool_calls"] = []
782
+ return current_assistant_msg
783
+
784
+ for item in items:
785
+ # 1) Check easy input message
786
+ if easy_msg := cls.maybe_easy_input_message(item):
787
+ role = easy_msg["role"]
788
+ content = easy_msg["content"]
789
+
790
+ if role == "user":
791
+ flush_assistant_message()
792
+ msg_user: ChatCompletionUserMessageParam = {
793
+ "role": "user",
794
+ "content": cls.extract_all_content(content),
795
+ }
796
+ result.append(msg_user)
797
+ elif role == "system":
798
+ flush_assistant_message()
799
+ msg_system: ChatCompletionSystemMessageParam = {
800
+ "role": "system",
801
+ "content": cls.extract_text_content(content),
802
+ }
803
+ result.append(msg_system)
804
+ elif role == "developer":
805
+ flush_assistant_message()
806
+ msg_developer: ChatCompletionDeveloperMessageParam = {
807
+ "role": "developer",
808
+ "content": cls.extract_text_content(content),
809
+ }
810
+ result.append(msg_developer)
811
+ else:
812
+ raise UserError(f"Unexpected role in easy_input_message: {role}")
813
+
814
+ # 2) Check input message
815
+ elif in_msg := cls.maybe_input_message(item):
816
+ role = in_msg["role"]
817
+ content = in_msg["content"]
818
+ flush_assistant_message()
819
+
820
+ if role == "user":
821
+ msg_user = {
822
+ "role": "user",
823
+ "content": cls.extract_all_content(content),
824
+ }
825
+ result.append(msg_user)
826
+ elif role == "system":
827
+ msg_system = {
828
+ "role": "system",
829
+ "content": cls.extract_text_content(content),
830
+ }
831
+ result.append(msg_system)
832
+ elif role == "developer":
833
+ msg_developer = {
834
+ "role": "developer",
835
+ "content": cls.extract_text_content(content),
836
+ }
837
+ result.append(msg_developer)
838
+ else:
839
+ raise UserError(f"Unexpected role in input_message: {role}")
840
+
841
+ # 3) response output message => assistant
842
+ elif resp_msg := cls.maybe_response_output_message(item):
843
+ flush_assistant_message()
844
+ new_asst = ChatCompletionAssistantMessageParam(role="assistant")
845
+ contents = resp_msg["content"]
846
+
847
+ text_segments = []
848
+ for c in contents:
849
+ if c["type"] == "output_text":
850
+ text_segments.append(c["text"])
851
+ elif c["type"] == "refusal":
852
+ new_asst["refusal"] = c["refusal"]
853
+ elif c["type"] == "output_audio":
854
+ # Can't handle this, b/c chat completions expects an ID which we dont have
855
+ raise UserError(
856
+ f"Only audio IDs are supported for chat completions, but got: {c}"
857
+ )
858
+ else:
859
+ raise UserError(f"Unknown content type in ResponseOutputMessage: {c}")
860
+
861
+ if text_segments:
862
+ combined = "\n".join(text_segments)
863
+ new_asst["content"] = combined
864
+
865
+ new_asst["tool_calls"] = []
866
+ current_assistant_msg = new_asst
867
+
868
+ # 4) function/file-search calls => attach to assistant
869
+ elif file_search := cls.maybe_file_search_call(item):
870
+ asst = ensure_assistant_message()
871
+ tool_calls = list(asst.get("tool_calls", []))
872
+ new_tool_call = ChatCompletionMessageToolCallParam(
873
+ id=file_search["id"],
874
+ type="function",
875
+ function={
876
+ "name": "file_search_call",
877
+ "arguments": json.dumps(
878
+ {
879
+ "queries": file_search.get("queries", []),
880
+ "status": file_search.get("status"),
881
+ }
882
+ ),
883
+ },
884
+ )
885
+ tool_calls.append(new_tool_call)
886
+ asst["tool_calls"] = tool_calls
887
+
888
+ elif func_call := cls.maybe_function_tool_call(item):
889
+ asst = ensure_assistant_message()
890
+ tool_calls = list(asst.get("tool_calls", []))
891
+ new_tool_call = ChatCompletionMessageToolCallParam(
892
+ id=func_call["call_id"],
893
+ type="function",
894
+ function={
895
+ "name": func_call["name"],
896
+ "arguments": func_call["arguments"],
897
+ },
898
+ )
899
+ tool_calls.append(new_tool_call)
900
+ asst["tool_calls"] = tool_calls
901
+ # 5) function call output => tool message
902
+ elif func_output := cls.maybe_function_tool_call_output(item):
903
+ flush_assistant_message()
904
+ msg: ChatCompletionToolMessageParam = {
905
+ "role": "tool",
906
+ "tool_call_id": func_output["call_id"],
907
+ "content": func_output["output"],
908
+ }
909
+ result.append(msg)
910
+
911
+ # 6) item reference => handle or raise
912
+ elif item_ref := cls.maybe_item_reference(item):
913
+ raise UserError(
914
+ f"Encountered an item_reference, which is not supported: {item_ref}"
915
+ )
916
+
917
+ # 7) If we haven't recognized it => fail or ignore
918
+ else:
919
+ raise UserError(f"Unhandled item type or structure: {item}")
920
+
921
+ flush_assistant_message()
922
+ return result
923
+
924
+
925
+ class ToolConverter:
926
+ @classmethod
927
+ def to_openai(cls, tool: Tool) -> ChatCompletionToolParam:
928
+ if isinstance(tool, FunctionTool):
929
+ return {
930
+ "type": "function",
931
+ "function": {
932
+ "name": tool.name,
933
+ "description": tool.description or "",
934
+ "parameters": tool.params_json_schema,
935
+ },
936
+ }
937
+
938
+ raise UserError(
939
+ f"Hosted tools are not supported with the ChatCompletions API. FGot tool type: "
940
+ f"{type(tool)}, tool: {tool}"
941
+ )
942
+
943
+ @classmethod
944
+ def convert_handoff_tool(cls, handoff: Handoff[Any]) -> ChatCompletionToolParam:
945
+ return {
946
+ "type": "function",
947
+ "function": {
948
+ "name": handoff.tool_name,
949
+ "description": handoff.tool_description,
950
+ "parameters": handoff.input_json_schema,
951
+ },
952
+ }