openai-agents 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

@@ -51,8 +51,10 @@ from openai.types.responses import (
51
51
  ResponseOutputText,
52
52
  ResponseRefusalDeltaEvent,
53
53
  ResponseTextDeltaEvent,
54
+ ResponseUsage,
54
55
  )
55
56
  from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
57
+ from openai.types.responses.response_usage import InputTokensDetails, OutputTokensDetails
56
58
 
57
59
  from .. import _debug
58
60
  from ..agent_output import AgentOutputSchema
@@ -405,7 +407,28 @@ class OpenAIChatCompletionsModel(Model):
405
407
  for function_call in state.function_calls.values():
406
408
  outputs.append(function_call)
407
409
 
408
- final_response = response.model_copy(update={"output": outputs, "usage": usage})
410
+ final_response = response.model_copy()
411
+ final_response.output = outputs
412
+ final_response.usage = (
413
+ ResponseUsage(
414
+ input_tokens=usage.prompt_tokens,
415
+ output_tokens=usage.completion_tokens,
416
+ total_tokens=usage.total_tokens,
417
+ output_tokens_details=OutputTokensDetails(
418
+ reasoning_tokens=usage.completion_tokens_details.reasoning_tokens
419
+ if usage.completion_tokens_details
420
+ and usage.completion_tokens_details.reasoning_tokens
421
+ else 0
422
+ ),
423
+ input_tokens_details=InputTokensDetails(
424
+ cached_tokens=usage.prompt_tokens_details.cached_tokens
425
+ if usage.prompt_tokens_details and usage.prompt_tokens_details.cached_tokens
426
+ else 0
427
+ ),
428
+ )
429
+ if usage
430
+ else None
431
+ )
409
432
 
410
433
  yield ResponseCompletedEvent(
411
434
  response=final_response,
@@ -503,6 +526,7 @@ class OpenAIChatCompletionsModel(Model):
503
526
  top_p=self._non_null_or_not_given(model_settings.top_p),
504
527
  frequency_penalty=self._non_null_or_not_given(model_settings.frequency_penalty),
505
528
  presence_penalty=self._non_null_or_not_given(model_settings.presence_penalty),
529
+ max_tokens=self._non_null_or_not_given(model_settings.max_tokens),
506
530
  tool_choice=tool_choice,
507
531
  response_format=response_format,
508
532
  parallel_tool_calls=parallel_tool_calls,
@@ -808,6 +832,13 @@ class _Converter:
808
832
  "content": cls.extract_text_content(content),
809
833
  }
810
834
  result.append(msg_developer)
835
+ elif role == "assistant":
836
+ flush_assistant_message()
837
+ msg_assistant: ChatCompletionAssistantMessageParam = {
838
+ "role": "assistant",
839
+ "content": cls.extract_text_content(content),
840
+ }
841
+ result.append(msg_assistant)
811
842
  else:
812
843
  raise UserError(f"Unexpected role in easy_input_message: {role}")
813
844
 
@@ -38,28 +38,41 @@ class OpenAIProvider(ModelProvider):
38
38
  assert api_key is None and base_url is None, (
39
39
  "Don't provide api_key or base_url if you provide openai_client"
40
40
  )
41
- self._client = openai_client
41
+ self._client: AsyncOpenAI | None = openai_client
42
42
  else:
43
- self._client = _openai_shared.get_default_openai_client() or AsyncOpenAI(
44
- api_key=api_key or _openai_shared.get_default_openai_key(),
45
- base_url=base_url,
46
- organization=organization,
47
- project=project,
48
- http_client=shared_http_client(),
49
- )
43
+ self._client = None
44
+ self._stored_api_key = api_key
45
+ self._stored_base_url = base_url
46
+ self._stored_organization = organization
47
+ self._stored_project = project
50
48
 
51
- self._is_openai_model = self._client.base_url.host.startswith("api.openai.com")
52
49
  if use_responses is not None:
53
50
  self._use_responses = use_responses
54
51
  else:
55
52
  self._use_responses = _openai_shared.get_use_responses_by_default()
56
53
 
54
+ # We lazy load the client in case you never actually use OpenAIProvider(). Otherwise
55
+ # AsyncOpenAI() raises an error if you don't have an API key set.
56
+ def _get_client(self) -> AsyncOpenAI:
57
+ if self._client is None:
58
+ self._client = _openai_shared.get_default_openai_client() or AsyncOpenAI(
59
+ api_key=self._stored_api_key or _openai_shared.get_default_openai_key(),
60
+ base_url=self._stored_base_url,
61
+ organization=self._stored_organization,
62
+ project=self._stored_project,
63
+ http_client=shared_http_client(),
64
+ )
65
+
66
+ return self._client
67
+
57
68
  def get_model(self, model_name: str | None) -> Model:
58
69
  if model_name is None:
59
70
  model_name = DEFAULT_MODEL
60
71
 
72
+ client = self._get_client()
73
+
61
74
  return (
62
- OpenAIResponsesModel(model=model_name, openai_client=self._client)
75
+ OpenAIResponsesModel(model=model_name, openai_client=client)
63
76
  if self._use_responses
64
- else OpenAIChatCompletionsModel(model=model_name, openai_client=self._client)
77
+ else OpenAIChatCompletionsModel(model=model_name, openai_client=client)
65
78
  )
@@ -5,7 +5,7 @@ from collections.abc import AsyncIterator
5
5
  from dataclasses import dataclass
6
6
  from typing import TYPE_CHECKING, Any, Literal, overload
7
7
 
8
- from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream, NotGiven
8
+ from openai import NOT_GIVEN, APIStatusError, AsyncOpenAI, AsyncStream, NotGiven
9
9
  from openai.types import ChatModel
10
10
  from openai.types.responses import (
11
11
  Response,
@@ -113,7 +113,8 @@ class OpenAIResponsesModel(Model):
113
113
  },
114
114
  )
115
115
  )
116
- logger.error(f"Error getting response: {e}")
116
+ request_id = e.request_id if isinstance(e, APIStatusError) else None
117
+ logger.error(f"Error getting response: {e}. (request_id: {request_id})")
117
118
  raise
118
119
 
119
120
  return ModelResponse(
@@ -235,6 +236,7 @@ class OpenAIResponsesModel(Model):
235
236
  temperature=self._non_null_or_not_given(model_settings.temperature),
236
237
  top_p=self._non_null_or_not_given(model_settings.top_p),
237
238
  truncation=self._non_null_or_not_given(model_settings.truncation),
239
+ max_output_tokens=self._non_null_or_not_given(model_settings.max_tokens),
238
240
  tool_choice=tool_choice,
239
241
  parallel_tool_calls=parallel_tool_calls,
240
242
  stream=stream,
agents/result.py CHANGED
@@ -17,6 +17,7 @@ from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
17
17
  from .logger import logger
18
18
  from .stream_events import StreamEvent
19
19
  from .tracing import Trace
20
+ from .util._pretty_print import pretty_print_result, pretty_print_run_result_streaming
20
21
 
21
22
  if TYPE_CHECKING:
22
23
  from ._run_impl import QueueCompleteSentinel
@@ -89,6 +90,9 @@ class RunResult(RunResultBase):
89
90
  """The last agent that was run."""
90
91
  return self._last_agent
91
92
 
93
+ def __str__(self) -> str:
94
+ return pretty_print_result(self)
95
+
92
96
 
93
97
  @dataclass
94
98
  class RunResultStreaming(RunResultBase):
@@ -216,5 +220,6 @@ class RunResultStreaming(RunResultBase):
216
220
 
217
221
  if self._output_guardrails_task and not self._output_guardrails_task.done():
218
222
  self._output_guardrails_task.cancel()
219
- self._output_guardrails_task.cancel()
220
- self._output_guardrails_task.cancel()
223
+
224
+ def __str__(self) -> str:
225
+ return pretty_print_run_result_streaming(self)
agents/run.py CHANGED
@@ -7,7 +7,6 @@ from typing import Any, cast
7
7
 
8
8
  from openai.types.responses import ResponseCompletedEvent
9
9
 
10
- from . import Model, _utils
11
10
  from ._run_impl import (
12
11
  NextStepFinalOutput,
13
12
  NextStepHandoff,
@@ -33,7 +32,7 @@ from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
33
32
  from .lifecycle import RunHooks
34
33
  from .logger import logger
35
34
  from .model_settings import ModelSettings
36
- from .models.interface import ModelProvider
35
+ from .models.interface import Model, ModelProvider
37
36
  from .models.openai_provider import OpenAIProvider
38
37
  from .result import RunResult, RunResultStreaming
39
38
  from .run_context import RunContextWrapper, TContext
@@ -41,6 +40,7 @@ from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent
41
40
  from .tracing import Span, SpanError, agent_span, get_current_trace, trace
42
41
  from .tracing.span_data import AgentSpanData
43
42
  from .usage import Usage
43
+ from .util import _coro, _error_tracing
44
44
 
45
45
  DEFAULT_MAX_TURNS = 10
46
46
 
@@ -193,7 +193,7 @@ class Runner:
193
193
 
194
194
  current_turn += 1
195
195
  if current_turn > max_turns:
196
- _utils.attach_error_to_span(
196
+ _error_tracing.attach_error_to_span(
197
197
  current_span,
198
198
  SpanError(
199
199
  message="Max turns exceeded",
@@ -447,7 +447,7 @@ class Runner:
447
447
  for done in asyncio.as_completed(guardrail_tasks):
448
448
  result = await done
449
449
  if result.output.tripwire_triggered:
450
- _utils.attach_error_to_span(
450
+ _error_tracing.attach_error_to_span(
451
451
  parent_span,
452
452
  SpanError(
453
453
  message="Guardrail tripwire triggered",
@@ -511,7 +511,7 @@ class Runner:
511
511
  streamed_result.current_turn = current_turn
512
512
 
513
513
  if current_turn > max_turns:
514
- _utils.attach_error_to_span(
514
+ _error_tracing.attach_error_to_span(
515
515
  current_span,
516
516
  SpanError(
517
517
  message="Max turns exceeded",
@@ -583,7 +583,7 @@ class Runner:
583
583
  pass
584
584
  except Exception as e:
585
585
  if current_span:
586
- _utils.attach_error_to_span(
586
+ _error_tracing.attach_error_to_span(
587
587
  current_span,
588
588
  SpanError(
589
589
  message="Error in agent run",
@@ -615,7 +615,7 @@ class Runner:
615
615
  (
616
616
  agent.hooks.on_start(context_wrapper, agent)
617
617
  if agent.hooks
618
- else _utils.noop_coroutine()
618
+ else _coro.noop_coroutine()
619
619
  ),
620
620
  )
621
621
 
@@ -705,7 +705,7 @@ class Runner:
705
705
  (
706
706
  agent.hooks.on_start(context_wrapper, agent)
707
707
  if agent.hooks
708
- else _utils.noop_coroutine()
708
+ else _coro.noop_coroutine()
709
709
  ),
710
710
  )
711
711
 
@@ -796,7 +796,7 @@ class Runner:
796
796
  # Cancel all guardrail tasks if a tripwire is triggered.
797
797
  for t in guardrail_tasks:
798
798
  t.cancel()
799
- _utils.attach_error_to_current_span(
799
+ _error_tracing.attach_error_to_current_span(
800
800
  SpanError(
801
801
  message="Guardrail tripwire triggered",
802
802
  data={"guardrail": result.guardrail.get_name()},
@@ -834,7 +834,7 @@ class Runner:
834
834
  # Cancel all guardrail tasks if a tripwire is triggered.
835
835
  for t in guardrail_tasks:
836
836
  t.cancel()
837
- _utils.attach_error_to_current_span(
837
+ _error_tracing.attach_error_to_current_span(
838
838
  SpanError(
839
839
  message="Guardrail tripwire triggered",
840
840
  data={"guardrail": result.guardrail.get_name()},
agents/tool.py CHANGED
@@ -11,14 +11,16 @@ from openai.types.responses.web_search_tool_param import UserLocation
11
11
  from pydantic import ValidationError
12
12
  from typing_extensions import Concatenate, ParamSpec
13
13
 
14
- from . import _debug, _utils
15
- from ._utils import MaybeAwaitable
14
+ from . import _debug
16
15
  from .computer import AsyncComputer, Computer
17
16
  from .exceptions import ModelBehaviorError
18
17
  from .function_schema import DocstringStyle, function_schema
18
+ from .items import RunItem
19
19
  from .logger import logger
20
20
  from .run_context import RunContextWrapper
21
21
  from .tracing import SpanError
22
+ from .util import _error_tracing
23
+ from .util._types import MaybeAwaitable
22
24
 
23
25
  ToolParams = ParamSpec("ToolParams")
24
26
 
@@ -28,6 +30,18 @@ ToolFunctionWithContext = Callable[Concatenate[RunContextWrapper[Any], ToolParam
28
30
  ToolFunction = Union[ToolFunctionWithoutContext[ToolParams], ToolFunctionWithContext[ToolParams]]
29
31
 
30
32
 
33
+ @dataclass
34
+ class FunctionToolResult:
35
+ tool: FunctionTool
36
+ """The tool that was run."""
37
+
38
+ output: Any
39
+ """The output of the tool."""
40
+
41
+ run_item: RunItem
42
+ """The run item that was produced as a result of the tool call."""
43
+
44
+
31
45
  @dataclass
32
46
  class FunctionTool:
33
47
  """A tool that wraps a function. In most cases, you should use the `function_tool` helpers to
@@ -43,15 +57,15 @@ class FunctionTool:
43
57
  params_json_schema: dict[str, Any]
44
58
  """The JSON schema for the tool's parameters."""
45
59
 
46
- on_invoke_tool: Callable[[RunContextWrapper[Any], str], Awaitable[str]]
60
+ on_invoke_tool: Callable[[RunContextWrapper[Any], str], Awaitable[Any]]
47
61
  """A function that invokes the tool with the given context and parameters. The params passed
48
62
  are:
49
63
  1. The tool run context.
50
64
  2. The arguments from the LLM, as a JSON string.
51
65
 
52
- You must return a string representation of the tool output. In case of errors, you can either
53
- raise an Exception (which will cause the run to fail) or return a string error message (which
54
- will be sent back to the LLM).
66
+ You must return a string representation of the tool output, or something we can call `str()` on.
67
+ In case of errors, you can either raise an Exception (which will cause the run to fail) or
68
+ return a string error message (which will be sent back to the LLM).
55
69
  """
56
70
 
57
71
  strict_json_schema: bool = True
@@ -137,6 +151,7 @@ def function_tool(
137
151
  docstring_style: DocstringStyle | None = None,
138
152
  use_docstring_info: bool = True,
139
153
  failure_error_function: ToolErrorFunction | None = None,
154
+ strict_mode: bool = True,
140
155
  ) -> FunctionTool:
141
156
  """Overload for usage as @function_tool (no parentheses)."""
142
157
  ...
@@ -150,6 +165,7 @@ def function_tool(
150
165
  docstring_style: DocstringStyle | None = None,
151
166
  use_docstring_info: bool = True,
152
167
  failure_error_function: ToolErrorFunction | None = None,
168
+ strict_mode: bool = True,
153
169
  ) -> Callable[[ToolFunction[...]], FunctionTool]:
154
170
  """Overload for usage as @function_tool(...)."""
155
171
  ...
@@ -163,6 +179,7 @@ def function_tool(
163
179
  docstring_style: DocstringStyle | None = None,
164
180
  use_docstring_info: bool = True,
165
181
  failure_error_function: ToolErrorFunction | None = default_tool_error_function,
182
+ strict_mode: bool = True,
166
183
  ) -> FunctionTool | Callable[[ToolFunction[...]], FunctionTool]:
167
184
  """
168
185
  Decorator to create a FunctionTool from a function. By default, we will:
@@ -186,6 +203,11 @@ def function_tool(
186
203
  failure_error_function: If provided, use this function to generate an error message when
187
204
  the tool call fails. The error message is sent to the LLM. If you pass None, then no
188
205
  error message will be sent and instead an Exception will be raised.
206
+ strict_mode: Whether to enable strict mode for the tool's JSON schema. We *strongly*
207
+ recommend setting this to True, as it increases the likelihood of correct JSON input.
208
+ If False, it allows non-strict JSON schemas. For example, if a parameter has a default
209
+ value, it will be optional, additional properties are allowed, etc. See here for more:
210
+ https://platform.openai.com/docs/guides/structured-outputs?api-mode=responses#supported-schemas
189
211
  """
190
212
 
191
213
  def _create_function_tool(the_func: ToolFunction[...]) -> FunctionTool:
@@ -195,9 +217,10 @@ def function_tool(
195
217
  description_override=description_override,
196
218
  docstring_style=docstring_style,
197
219
  use_docstring_info=use_docstring_info,
220
+ strict_json_schema=strict_mode,
198
221
  )
199
222
 
200
- async def _on_invoke_tool_impl(ctx: RunContextWrapper[Any], input: str) -> str:
223
+ async def _on_invoke_tool_impl(ctx: RunContextWrapper[Any], input: str) -> Any:
201
224
  try:
202
225
  json_data: dict[str, Any] = json.loads(input) if input else {}
203
226
  except Exception as e:
@@ -244,9 +267,9 @@ def function_tool(
244
267
  else:
245
268
  logger.debug(f"Tool {schema.name} returned {result}")
246
269
 
247
- return str(result)
270
+ return result
248
271
 
249
- async def _on_invoke_tool(ctx: RunContextWrapper[Any], input: str) -> str:
272
+ async def _on_invoke_tool(ctx: RunContextWrapper[Any], input: str) -> Any:
250
273
  try:
251
274
  return await _on_invoke_tool_impl(ctx, input)
252
275
  except Exception as e:
@@ -257,7 +280,7 @@ def function_tool(
257
280
  if inspect.isawaitable(result):
258
281
  return await result
259
282
 
260
- _utils.attach_error_to_current_span(
283
+ _error_tracing.attach_error_to_current_span(
261
284
  SpanError(
262
285
  message="Error running tool (non-fatal)",
263
286
  data={
@@ -273,6 +296,7 @@ def function_tool(
273
296
  description=schema.description or "",
274
297
  params_json_schema=schema.params_json_schema,
275
298
  on_invoke_tool=_on_invoke_tool,
299
+ strict_json_schema=strict_mode,
276
300
  )
277
301
 
278
302
  # If func is actually a callable, we were used as @function_tool with no parentheses
@@ -284,5 +308,3 @@ def function_tool(
284
308
  return _create_function_tool(real_func)
285
309
 
286
310
  return decorator
287
- return decorator
288
- return decorator
agents/tracing/create.py CHANGED
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
  from collections.abc import Mapping, Sequence
4
4
  from typing import TYPE_CHECKING, Any
5
5
 
6
- from .logger import logger
6
+ from ..logger import logger
7
7
  from .setup import GLOBAL_TRACE_PROVIDER
8
8
  from .span_data import (
9
9
  AgentSpanData,
@@ -9,7 +9,7 @@ from typing import Any
9
9
 
10
10
  import httpx
11
11
 
12
- from .logger import logger
12
+ from ..logger import logger
13
13
  from .processor_interface import TracingExporter, TracingProcessor
14
14
  from .spans import Span
15
15
  from .traces import Trace
@@ -40,7 +40,7 @@ class BackendSpanExporter(TracingExporter):
40
40
  """
41
41
  Args:
42
42
  api_key: The API key for the "Authorization" header. Defaults to
43
- `os.environ["OPENAI_TRACE_API_KEY"]` if not provided.
43
+ `os.environ["OPENAI_API_KEY"]` if not provided.
44
44
  organization: The OpenAI organization to use. Defaults to
45
45
  `os.environ["OPENAI_ORG_ID"]` if not provided.
46
46
  project: The OpenAI project to use. Defaults to
@@ -78,9 +78,6 @@ class BackendSpanExporter(TracingExporter):
78
78
  logger.warning("OPENAI_API_KEY is not set, skipping trace export")
79
79
  return
80
80
 
81
- traces: list[dict[str, Any]] = []
82
- spans: list[dict[str, Any]] = []
83
-
84
81
  data = [item.export() for item in items if item.export()]
85
82
  payload = {"data": data}
86
83
 
@@ -100,7 +97,7 @@ class BackendSpanExporter(TracingExporter):
100
97
 
101
98
  # If the response is successful, break out of the loop
102
99
  if response.status_code < 300:
103
- logger.debug(f"Exported {len(traces)} traces, {len(spans)} spans")
100
+ logger.debug(f"Exported {len(items)} items")
104
101
  return
105
102
 
106
103
  # If the response is a client error (4xx), we wont retry
agents/tracing/scope.py CHANGED
@@ -2,7 +2,7 @@
2
2
  import contextvars
3
3
  from typing import TYPE_CHECKING, Any
4
4
 
5
- from .logger import logger
5
+ from ..logger import logger
6
6
 
7
7
  if TYPE_CHECKING:
8
8
  from .spans import Span
agents/tracing/setup.py CHANGED
@@ -4,8 +4,8 @@ import os
4
4
  import threading
5
5
  from typing import Any
6
6
 
7
+ from ..logger import logger
7
8
  from . import util
8
- from .logger import logger
9
9
  from .processor_interface import TracingProcessor
10
10
  from .scope import Scope
11
11
  from .spans import NoOpSpan, Span, SpanImpl, TSpanData
@@ -51,7 +51,7 @@ class AgentSpanData(SpanData):
51
51
  class FunctionSpanData(SpanData):
52
52
  __slots__ = ("name", "input", "output")
53
53
 
54
- def __init__(self, name: str, input: str | None, output: str | None):
54
+ def __init__(self, name: str, input: str | None, output: Any | None):
55
55
  self.name = name
56
56
  self.input = input
57
57
  self.output = output
@@ -65,7 +65,7 @@ class FunctionSpanData(SpanData):
65
65
  "type": self.type,
66
66
  "name": self.name,
67
67
  "input": self.input,
68
- "output": self.output,
68
+ "output": str(self.output) if self.output else None,
69
69
  }
70
70
 
71
71
 
agents/tracing/spans.py CHANGED
@@ -6,8 +6,8 @@ from typing import Any, Generic, TypeVar
6
6
 
7
7
  from typing_extensions import TypedDict
8
8
 
9
+ from ..logger import logger
9
10
  from . import util
10
- from .logger import logger
11
11
  from .processor_interface import TracingProcessor
12
12
  from .scope import Scope
13
13
  from .span_data import SpanData
agents/tracing/traces.py CHANGED
@@ -4,8 +4,8 @@ import abc
4
4
  import contextvars
5
5
  from typing import Any
6
6
 
7
+ from ..logger import logger
7
8
  from . import util
8
- from .logger import logger
9
9
  from .processor_interface import TracingProcessor
10
10
  from .scope import Scope
11
11
 
File without changes
agents/util/_coro.py ADDED
@@ -0,0 +1,2 @@
1
+ async def noop_coroutine() -> None:
2
+ pass
@@ -0,0 +1,16 @@
1
+ from typing import Any
2
+
3
+ from ..logger import logger
4
+ from ..tracing import Span, SpanError, get_current_span
5
+
6
+
7
+ def attach_error_to_span(span: Span[Any], error: SpanError) -> None:
8
+ span.set_error(error)
9
+
10
+
11
+ def attach_error_to_current_span(error: SpanError) -> None:
12
+ span = get_current_span()
13
+ if span:
14
+ attach_error_to_span(span, error)
15
+ else:
16
+ logger.warning(f"No span to add error {error} to")
agents/util/_json.py ADDED
@@ -0,0 +1,31 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Literal
4
+
5
+ from pydantic import TypeAdapter, ValidationError
6
+ from typing_extensions import TypeVar
7
+
8
+ from ..exceptions import ModelBehaviorError
9
+ from ..tracing import SpanError
10
+ from ._error_tracing import attach_error_to_current_span
11
+
12
+ T = TypeVar("T")
13
+
14
+
15
+ def validate_json(json_str: str, type_adapter: TypeAdapter[T], partial: bool) -> T:
16
+ partial_setting: bool | Literal["off", "on", "trailing-strings"] = (
17
+ "trailing-strings" if partial else False
18
+ )
19
+ try:
20
+ validated = type_adapter.validate_json(json_str, experimental_allow_partial=partial_setting)
21
+ return validated
22
+ except ValidationError as e:
23
+ attach_error_to_current_span(
24
+ SpanError(
25
+ message="Invalid JSON provided",
26
+ data={},
27
+ )
28
+ )
29
+ raise ModelBehaviorError(
30
+ f"Invalid JSON when parsing {json_str} for {type_adapter}; {e}"
31
+ ) from e
@@ -0,0 +1,56 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ from pydantic import BaseModel
4
+
5
+ if TYPE_CHECKING:
6
+ from ..result import RunResult, RunResultBase, RunResultStreaming
7
+
8
+
9
+ def _indent(text: str, indent_level: int) -> str:
10
+ indent_string = " " * indent_level
11
+ return "\n".join(f"{indent_string}{line}" for line in text.splitlines())
12
+
13
+
14
+ def _final_output_str(result: "RunResultBase") -> str:
15
+ if result.final_output is None:
16
+ return "None"
17
+ elif isinstance(result.final_output, str):
18
+ return result.final_output
19
+ elif isinstance(result.final_output, BaseModel):
20
+ return result.final_output.model_dump_json(indent=2)
21
+ else:
22
+ return str(result.final_output)
23
+
24
+
25
+ def pretty_print_result(result: "RunResult") -> str:
26
+ output = "RunResult:"
27
+ output += f'\n- Last agent: Agent(name="{result.last_agent.name}", ...)'
28
+ output += (
29
+ f"\n- Final output ({type(result.final_output).__name__}):\n"
30
+ f"{_indent(_final_output_str(result), 2)}"
31
+ )
32
+ output += f"\n- {len(result.new_items)} new item(s)"
33
+ output += f"\n- {len(result.raw_responses)} raw response(s)"
34
+ output += f"\n- {len(result.input_guardrail_results)} input guardrail result(s)"
35
+ output += f"\n- {len(result.output_guardrail_results)} output guardrail result(s)"
36
+ output += "\n(See `RunResult` for more details)"
37
+
38
+ return output
39
+
40
+
41
+ def pretty_print_run_result_streaming(result: "RunResultStreaming") -> str:
42
+ output = "RunResultStreaming:"
43
+ output += f'\n- Current agent: Agent(name="{result.current_agent.name}", ...)'
44
+ output += f"\n- Current turn: {result.current_turn}"
45
+ output += f"\n- Max turns: {result.max_turns}"
46
+ output += f"\n- Is complete: {result.is_complete}"
47
+ output += (
48
+ f"\n- Final output ({type(result.final_output).__name__}):\n"
49
+ f"{_indent(_final_output_str(result), 2)}"
50
+ )
51
+ output += f"\n- {len(result.new_items)} new item(s)"
52
+ output += f"\n- {len(result.raw_responses)} raw response(s)"
53
+ output += f"\n- {len(result.input_guardrail_results)} input guardrail result(s)"
54
+ output += f"\n- {len(result.output_guardrail_results)} output guardrail result(s)"
55
+ output += "\n(See `RunResultStreaming` for more details)"
56
+ return output
@@ -0,0 +1,11 @@
1
+ import re
2
+
3
+
4
+ def transform_string_function_style(name: str) -> str:
5
+ # Replace spaces with underscores
6
+ name = name.replace(" ", "_")
7
+
8
+ # Replace non-alphanumeric characters with underscores
9
+ name = re.sub(r"[^a-zA-Z0-9]", "_", name)
10
+
11
+ return name.lower()
agents/util/_types.py ADDED
@@ -0,0 +1,7 @@
1
+ from collections.abc import Awaitable
2
+ from typing import Union
3
+
4
+ from typing_extensions import TypeVar
5
+
6
+ T = TypeVar("T")
7
+ MaybeAwaitable = Union[Awaitable[T], T]