openai-agents 0.0.6__py3-none-any.whl → 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/run.py CHANGED
@@ -8,6 +8,7 @@ from typing import Any, cast
8
8
  from openai.types.responses import ResponseCompletedEvent
9
9
 
10
10
  from ._run_impl import (
11
+ AgentToolUseTracker,
11
12
  NextStepFinalOutput,
12
13
  NextStepHandoff,
13
14
  NextStepRunAgain,
@@ -37,6 +38,7 @@ from .models.openai_provider import OpenAIProvider
37
38
  from .result import RunResult, RunResultStreaming
38
39
  from .run_context import RunContextWrapper, TContext
39
40
  from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent
41
+ from .tool import Tool
40
42
  from .tracing import Span, SpanError, agent_span, get_current_trace, trace
41
43
  from .tracing.span_data import AgentSpanData
42
44
  from .usage import Usage
@@ -149,6 +151,8 @@ class Runner:
149
151
  if run_config is None:
150
152
  run_config = RunConfig()
151
153
 
154
+ tool_use_tracker = AgentToolUseTracker()
155
+
152
156
  with TraceCtxManager(
153
157
  workflow_name=run_config.workflow_name,
154
158
  trace_id=run_config.trace_id,
@@ -177,7 +181,6 @@ class Runner:
177
181
  # agent changes, or if the agent loop ends.
178
182
  if current_span is None:
179
183
  handoff_names = [h.agent_name for h in cls._get_handoffs(current_agent)]
180
- tool_names = [t.name for t in current_agent.tools]
181
184
  if output_schema := cls._get_output_schema(current_agent):
182
185
  output_type_name = output_schema.output_type_name()
183
186
  else:
@@ -186,11 +189,13 @@ class Runner:
186
189
  current_span = agent_span(
187
190
  name=current_agent.name,
188
191
  handoffs=handoff_names,
189
- tools=tool_names,
190
192
  output_type=output_type_name,
191
193
  )
192
194
  current_span.start(mark_as_current=True)
193
195
 
196
+ all_tools = await cls._get_all_tools(current_agent)
197
+ current_span.span_data.tools = [t.name for t in all_tools]
198
+
194
199
  current_turn += 1
195
200
  if current_turn > max_turns:
196
201
  _error_tracing.attach_error_to_span(
@@ -217,23 +222,27 @@ class Runner:
217
222
  ),
218
223
  cls._run_single_turn(
219
224
  agent=current_agent,
225
+ all_tools=all_tools,
220
226
  original_input=original_input,
221
227
  generated_items=generated_items,
222
228
  hooks=hooks,
223
229
  context_wrapper=context_wrapper,
224
230
  run_config=run_config,
225
231
  should_run_agent_start_hooks=should_run_agent_start_hooks,
232
+ tool_use_tracker=tool_use_tracker,
226
233
  ),
227
234
  )
228
235
  else:
229
236
  turn_result = await cls._run_single_turn(
230
237
  agent=current_agent,
238
+ all_tools=all_tools,
231
239
  original_input=original_input,
232
240
  generated_items=generated_items,
233
241
  hooks=hooks,
234
242
  context_wrapper=context_wrapper,
235
243
  run_config=run_config,
236
244
  should_run_agent_start_hooks=should_run_agent_start_hooks,
245
+ tool_use_tracker=tool_use_tracker,
237
246
  )
238
247
  should_run_agent_start_hooks = False
239
248
 
@@ -481,6 +490,7 @@ class Runner:
481
490
  current_agent = starting_agent
482
491
  current_turn = 0
483
492
  should_run_agent_start_hooks = True
493
+ tool_use_tracker = AgentToolUseTracker()
484
494
 
485
495
  streamed_result._event_queue.put_nowait(AgentUpdatedStreamEvent(new_agent=current_agent))
486
496
 
@@ -493,7 +503,6 @@ class Runner:
493
503
  # agent changes, or if the agent loop ends.
494
504
  if current_span is None:
495
505
  handoff_names = [h.agent_name for h in cls._get_handoffs(current_agent)]
496
- tool_names = [t.name for t in current_agent.tools]
497
506
  if output_schema := cls._get_output_schema(current_agent):
498
507
  output_type_name = output_schema.output_type_name()
499
508
  else:
@@ -502,11 +511,13 @@ class Runner:
502
511
  current_span = agent_span(
503
512
  name=current_agent.name,
504
513
  handoffs=handoff_names,
505
- tools=tool_names,
506
514
  output_type=output_type_name,
507
515
  )
508
516
  current_span.start(mark_as_current=True)
509
517
 
518
+ all_tools = await cls._get_all_tools(current_agent)
519
+ tool_names = [t.name for t in all_tools]
520
+ current_span.span_data.tools = tool_names
510
521
  current_turn += 1
511
522
  streamed_result.current_turn = current_turn
512
523
 
@@ -541,6 +552,8 @@ class Runner:
541
552
  context_wrapper,
542
553
  run_config,
543
554
  should_run_agent_start_hooks,
555
+ tool_use_tracker,
556
+ all_tools,
544
557
  )
545
558
  should_run_agent_start_hooks = False
546
559
 
@@ -608,6 +621,8 @@ class Runner:
608
621
  context_wrapper: RunContextWrapper[TContext],
609
622
  run_config: RunConfig,
610
623
  should_run_agent_start_hooks: bool,
624
+ tool_use_tracker: AgentToolUseTracker,
625
+ all_tools: list[Tool],
611
626
  ) -> SingleStepResult:
612
627
  if should_run_agent_start_hooks:
613
628
  await asyncio.gather(
@@ -627,9 +642,10 @@ class Runner:
627
642
  system_prompt = await agent.get_system_prompt(context_wrapper)
628
643
 
629
644
  handoffs = cls._get_handoffs(agent)
630
-
631
645
  model = cls._get_model(agent, run_config)
632
646
  model_settings = agent.model_settings.resolve(run_config.model_settings)
647
+ model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings)
648
+
633
649
  final_response: ModelResponse | None = None
634
650
 
635
651
  input = ItemHelpers.input_to_new_input_list(streamed_result.input)
@@ -640,7 +656,7 @@ class Runner:
640
656
  system_prompt,
641
657
  input,
642
658
  model_settings,
643
- agent.tools,
659
+ all_tools,
644
660
  output_schema,
645
661
  handoffs,
646
662
  get_model_tracing_impl(
@@ -677,10 +693,12 @@ class Runner:
677
693
  pre_step_items=streamed_result.new_items,
678
694
  new_response=final_response,
679
695
  output_schema=output_schema,
696
+ all_tools=all_tools,
680
697
  handoffs=handoffs,
681
698
  hooks=hooks,
682
699
  context_wrapper=context_wrapper,
683
700
  run_config=run_config,
701
+ tool_use_tracker=tool_use_tracker,
684
702
  )
685
703
 
686
704
  RunImpl.stream_step_result_to_queue(single_step_result, streamed_result._event_queue)
@@ -691,12 +709,14 @@ class Runner:
691
709
  cls,
692
710
  *,
693
711
  agent: Agent[TContext],
712
+ all_tools: list[Tool],
694
713
  original_input: str | list[TResponseInputItem],
695
714
  generated_items: list[RunItem],
696
715
  hooks: RunHooks[TContext],
697
716
  context_wrapper: RunContextWrapper[TContext],
698
717
  run_config: RunConfig,
699
718
  should_run_agent_start_hooks: bool,
719
+ tool_use_tracker: AgentToolUseTracker,
700
720
  ) -> SingleStepResult:
701
721
  # Ensure we run the hooks before anything else
702
722
  if should_run_agent_start_hooks:
@@ -721,9 +741,11 @@ class Runner:
721
741
  system_prompt,
722
742
  input,
723
743
  output_schema,
744
+ all_tools,
724
745
  handoffs,
725
746
  context_wrapper,
726
747
  run_config,
748
+ tool_use_tracker,
727
749
  )
728
750
 
729
751
  return await cls._get_single_step_result_from_response(
@@ -732,10 +754,12 @@ class Runner:
732
754
  pre_step_items=generated_items,
733
755
  new_response=new_response,
734
756
  output_schema=output_schema,
757
+ all_tools=all_tools,
735
758
  handoffs=handoffs,
736
759
  hooks=hooks,
737
760
  context_wrapper=context_wrapper,
738
761
  run_config=run_config,
762
+ tool_use_tracker=tool_use_tracker,
739
763
  )
740
764
 
741
765
  @classmethod
@@ -743,6 +767,7 @@ class Runner:
743
767
  cls,
744
768
  *,
745
769
  agent: Agent[TContext],
770
+ all_tools: list[Tool],
746
771
  original_input: str | list[TResponseInputItem],
747
772
  pre_step_items: list[RunItem],
748
773
  new_response: ModelResponse,
@@ -751,13 +776,18 @@ class Runner:
751
776
  hooks: RunHooks[TContext],
752
777
  context_wrapper: RunContextWrapper[TContext],
753
778
  run_config: RunConfig,
779
+ tool_use_tracker: AgentToolUseTracker,
754
780
  ) -> SingleStepResult:
755
781
  processed_response = RunImpl.process_model_response(
756
782
  agent=agent,
783
+ all_tools=all_tools,
757
784
  response=new_response,
758
785
  output_schema=output_schema,
759
786
  handoffs=handoffs,
760
787
  )
788
+
789
+ tool_use_tracker.add_tool_use(agent, processed_response.tools_used)
790
+
761
791
  return await RunImpl.execute_tools_and_side_effects(
762
792
  agent=agent,
763
793
  original_input=original_input,
@@ -853,17 +883,21 @@ class Runner:
853
883
  system_prompt: str | None,
854
884
  input: list[TResponseInputItem],
855
885
  output_schema: AgentOutputSchema | None,
886
+ all_tools: list[Tool],
856
887
  handoffs: list[Handoff],
857
888
  context_wrapper: RunContextWrapper[TContext],
858
889
  run_config: RunConfig,
890
+ tool_use_tracker: AgentToolUseTracker,
859
891
  ) -> ModelResponse:
860
892
  model = cls._get_model(agent, run_config)
861
893
  model_settings = agent.model_settings.resolve(run_config.model_settings)
894
+ model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings)
895
+
862
896
  new_response = await model.get_response(
863
897
  system_instructions=system_prompt,
864
898
  input=input,
865
899
  model_settings=model_settings,
866
- tools=agent.tools,
900
+ tools=all_tools,
867
901
  output_schema=output_schema,
868
902
  handoffs=handoffs,
869
903
  tracing=get_model_tracing_impl(
@@ -892,6 +926,10 @@ class Runner:
892
926
  handoffs.append(handoff(handoff_item))
893
927
  return handoffs
894
928
 
929
+ @classmethod
930
+ async def _get_all_tools(cls, agent: Agent[Any]) -> list[Tool]:
931
+ return await agent.get_all_tools()
932
+
895
933
  @classmethod
896
934
  def _get_model(cls, agent: Agent[Any], run_config: RunConfig) -> Model:
897
935
  if isinstance(run_config.model, Model):
@@ -9,6 +9,7 @@ from .create import (
9
9
  get_current_trace,
10
10
  guardrail_span,
11
11
  handoff_span,
12
+ mcp_tools_span,
12
13
  response_span,
13
14
  speech_group_span,
14
15
  speech_span,
@@ -25,6 +26,7 @@ from .span_data import (
25
26
  GenerationSpanData,
26
27
  GuardrailSpanData,
27
28
  HandoffSpanData,
29
+ MCPListToolsSpanData,
28
30
  ResponseSpanData,
29
31
  SpanData,
30
32
  SpeechGroupSpanData,
@@ -59,6 +61,7 @@ __all__ = [
59
61
  "GenerationSpanData",
60
62
  "GuardrailSpanData",
61
63
  "HandoffSpanData",
64
+ "MCPListToolsSpanData",
62
65
  "ResponseSpanData",
63
66
  "SpeechGroupSpanData",
64
67
  "SpeechSpanData",
@@ -69,6 +72,7 @@ __all__ = [
69
72
  "speech_group_span",
70
73
  "speech_span",
71
74
  "transcription_span",
75
+ "mcp_tools_span",
72
76
  ]
73
77
 
74
78
 
agents/tracing/create.py CHANGED
@@ -12,6 +12,7 @@ from .span_data import (
12
12
  GenerationSpanData,
13
13
  GuardrailSpanData,
14
14
  HandoffSpanData,
15
+ MCPListToolsSpanData,
15
16
  ResponseSpanData,
16
17
  SpeechGroupSpanData,
17
18
  SpeechSpanData,
@@ -424,3 +425,31 @@ def speech_group_span(
424
425
  parent=parent,
425
426
  disabled=disabled,
426
427
  )
428
+
429
+
430
+ def mcp_tools_span(
431
+ server: str | None = None,
432
+ result: list[str] | None = None,
433
+ span_id: str | None = None,
434
+ parent: Trace | Span[Any] | None = None,
435
+ disabled: bool = False,
436
+ ) -> Span[MCPListToolsSpanData]:
437
+ """Create a new MCP list tools span. The span will not be started automatically, you should
438
+ either do `with mcp_tools_span() ...` or call `span.start()` + `span.finish()` manually.
439
+
440
+ Args:
441
+ server: The name of the MCP server.
442
+ result: The result of the MCP list tools call.
443
+ span_id: The ID of the span. Optional. If not provided, we will generate an ID. We
444
+ recommend using `util.gen_span_id()` to generate a span ID, to guarantee that IDs are
445
+ correctly formatted.
446
+ parent: The parent span or trace. If not provided, we will automatically use the current
447
+ trace/span as the parent.
448
+ disabled: If True, we will return a Span but the Span will not be recorded.
449
+ """
450
+ return GLOBAL_TRACE_PROVIDER.create_span(
451
+ span_data=MCPListToolsSpanData(server=server, result=result),
452
+ span_id=span_id,
453
+ parent=parent,
454
+ disabled=disabled,
455
+ )
@@ -5,6 +5,7 @@ import queue
5
5
  import random
6
6
  import threading
7
7
  import time
8
+ from functools import cached_property
8
9
  from typing import Any
9
10
 
10
11
  import httpx
@@ -50,9 +51,9 @@ class BackendSpanExporter(TracingExporter):
50
51
  base_delay: Base delay (in seconds) for the first backoff.
51
52
  max_delay: Maximum delay (in seconds) for backoff growth.
52
53
  """
53
- self.api_key = api_key or os.environ.get("OPENAI_API_KEY")
54
- self.organization = organization or os.environ.get("OPENAI_ORG_ID")
55
- self.project = project or os.environ.get("OPENAI_PROJECT_ID")
54
+ self._api_key = api_key
55
+ self._organization = organization
56
+ self._project = project
56
57
  self.endpoint = endpoint
57
58
  self.max_retries = max_retries
58
59
  self.base_delay = base_delay
@@ -68,8 +69,22 @@ class BackendSpanExporter(TracingExporter):
68
69
  api_key: The OpenAI API key to use. This is the same key used by the OpenAI Python
69
70
  client.
70
71
  """
72
+ # We're specifically setting the underlying cached property as well
73
+ self._api_key = api_key
71
74
  self.api_key = api_key
72
75
 
76
+ @cached_property
77
+ def api_key(self):
78
+ return self._api_key or os.environ.get("OPENAI_API_KEY")
79
+
80
+ @cached_property
81
+ def organization(self):
82
+ return self._organization or os.environ.get("OPENAI_ORG_ID")
83
+
84
+ @cached_property
85
+ def project(self):
86
+ return self._project or os.environ.get("OPENAI_PROJECT_ID")
87
+
73
88
  def export(self, items: list[Trace | Span[Any]]) -> None:
74
89
  if not items:
75
90
  return
@@ -102,18 +117,22 @@ class BackendSpanExporter(TracingExporter):
102
117
 
103
118
  # If the response is a client error (4xx), we wont retry
104
119
  if 400 <= response.status_code < 500:
105
- logger.error(f"Tracing client error {response.status_code}: {response.text}")
120
+ logger.error(
121
+ f"[non-fatal] Tracing client error {response.status_code}: {response.text}"
122
+ )
106
123
  return
107
124
 
108
125
  # For 5xx or other unexpected codes, treat it as transient and retry
109
- logger.warning(f"Server error {response.status_code}, retrying.")
126
+ logger.warning(
127
+ f"[non-fatal] Tracing: server error {response.status_code}, retrying."
128
+ )
110
129
  except httpx.RequestError as exc:
111
130
  # Network or other I/O error, we'll retry
112
- logger.warning(f"Request failed: {exc}")
131
+ logger.warning(f"[non-fatal] Tracing: request failed: {exc}")
113
132
 
114
133
  # If we reach here, we need to retry or give up
115
134
  if attempt >= self.max_retries:
116
- logger.error("Max retries reached, giving up on this batch.")
135
+ logger.error("[non-fatal] Tracing: max retries reached, giving up on this batch.")
117
136
  return
118
137
 
119
138
  # Exponential backoff + jitter
@@ -49,12 +49,19 @@ class AgentSpanData(SpanData):
49
49
 
50
50
 
51
51
  class FunctionSpanData(SpanData):
52
- __slots__ = ("name", "input", "output")
52
+ __slots__ = ("name", "input", "output", "mcp_data")
53
53
 
54
- def __init__(self, name: str, input: str | None, output: Any | None):
54
+ def __init__(
55
+ self,
56
+ name: str,
57
+ input: str | None,
58
+ output: Any | None,
59
+ mcp_data: dict[str, Any] | None = None,
60
+ ):
55
61
  self.name = name
56
62
  self.input = input
57
63
  self.output = output
64
+ self.mcp_data = mcp_data
58
65
 
59
66
  @property
60
67
  def type(self) -> str:
@@ -66,6 +73,7 @@ class FunctionSpanData(SpanData):
66
73
  "name": self.name,
67
74
  "input": self.input,
68
75
  "output": str(self.output) if self.output else None,
76
+ "mcp_data": self.mcp_data,
69
77
  }
70
78
 
71
79
 
@@ -282,3 +290,25 @@ class SpeechGroupSpanData(SpanData):
282
290
  "type": self.type,
283
291
  "input": self.input,
284
292
  }
293
+
294
+
295
+ class MCPListToolsSpanData(SpanData):
296
+ __slots__ = (
297
+ "server",
298
+ "result",
299
+ )
300
+
301
+ def __init__(self, server: str | None = None, result: list[str] | None = None):
302
+ self.server = server
303
+ self.result = result
304
+
305
+ @property
306
+ def type(self) -> str:
307
+ return "mcp_tools"
308
+
309
+ def export(self) -> dict[str, Any]:
310
+ return {
311
+ "type": self.type,
312
+ "server": self.server,
313
+ "result": self.result,
314
+ }
agents/voice/imports.py CHANGED
@@ -5,7 +5,7 @@ try:
5
5
  except ImportError as _e:
6
6
  raise ImportError(
7
7
  "`numpy` + `websockets` are required to use voice. You can install them via the optional "
8
- "dependency group: `pip install openai-agents[voice]`."
8
+ "dependency group: `pip install 'openai-agents[voice]'`."
9
9
  ) from _e
10
10
 
11
11
  __all__ = ["np", "npt", "websockets"]
@@ -10,9 +10,8 @@ from typing import Any, cast
10
10
 
11
11
  from openai import AsyncOpenAI
12
12
 
13
- from agents.exceptions import AgentsException
14
-
15
13
  from ... import _debug
14
+ from ...exceptions import AgentsException
16
15
  from ...logger import logger
17
16
  from ...tracing import Span, SpanError, TranscriptionSpanData, transcription_span
18
17
  from ..exceptions import STTWebsocketConnectionError
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.0.6
3
+ Version: 0.0.7
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://github.com/openai/openai-agents-python
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -19,11 +19,14 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
19
19
  Classifier: Typing :: Typed
20
20
  Requires-Python: >=3.9
21
21
  Requires-Dist: griffe<2,>=1.5.6
22
+ Requires-Dist: mcp; python_version >= '3.10'
22
23
  Requires-Dist: openai>=1.66.5
23
24
  Requires-Dist: pydantic<3,>=2.10
24
25
  Requires-Dist: requests<3,>=2.0
25
26
  Requires-Dist: types-requests<3,>=2.0
26
27
  Requires-Dist: typing-extensions<5,>=4.12.2
28
+ Provides-Extra: viz
29
+ Requires-Dist: graphviz>=0.17; extra == 'viz'
27
30
  Provides-Extra: voice
28
31
  Requires-Dist: numpy<3,>=2.2.0; (python_version >= '3.10') and extra == 'voice'
29
32
  Requires-Dist: websockets<16,>=15.0; extra == 'voice'
@@ -61,7 +64,7 @@ source env/bin/activate
61
64
  pip install openai-agents
62
65
  ```
63
66
 
64
- For voice support, install with the optional `voice` group: `pip install openai-agents[voice]`.
67
+ For voice support, install with the optional `voice` group: `pip install 'openai-agents[voice]'`.
65
68
 
66
69
  ## Hello world example
67
70
 
@@ -1,8 +1,8 @@
1
- agents/__init__.py,sha256=PpYDMZH0h2NyguV6mtEn6aBZlGdwO34wndtP1CKRdl4,6706
1
+ agents/__init__.py,sha256=FRMQBdNZiprYebYm2M89-iZnTeeIgcaDvC37G-gHxfA,6802
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
- agents/_run_impl.py,sha256=B-YeWxms2vi3SHMSsHPEjif0ZbcpxDetRugo-_mkUVw,31991
5
- agents/agent.py,sha256=ODhpQ74vZfYpk_8vxExJEXBl1dJVsgudXabY9t069qQ,8324
4
+ agents/_run_impl.py,sha256=QVTLbSydSaXWmaUuXLsZXD0Iktu2fDqBeh965cA416g,34155
5
+ agents/agent.py,sha256=-gnR5pORVfjlybmAgyC_sRU_OU8DZCmYR_ATUpNHgd4,9501
6
6
  agents/agent_output.py,sha256=sUlsur0_C2pPokyvspo5gxIkM0PtcNxdbZmeu_6Z4TE,5379
7
7
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
8
8
  agents/exceptions.py,sha256=F3AltRt27PGdhbFqKBhRJL9eHqoN4SQx7oxBn0GWmhs,1856
@@ -13,8 +13,9 @@ agents/items.py,sha256=xCoX-ZcUUs3WHN90_o8PQSnX8jt8oQ2TJPz7k74ooQ4,8182
13
13
  agents/lifecycle.py,sha256=wYFG6PLSKQ7bICKVbB8oGtdoJNINGq9obh2RSKlAkDE,2938
14
14
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
15
15
  agents/model_settings.py,sha256=4JOqsLswjdrEszNqNEJ_dYjxUMCyt68hOIdgxlXELw0,2169
16
+ agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
16
17
  agents/result.py,sha256=JOscHoh2EIUY4w-ESO500Z3DnNYq67vtkRrWr70fOr4,8421
17
- agents/run.py,sha256=SI0u7XQ6e3lEP6v1k530rDDcJJbg8K_DcdK2o0leCqI,37129
18
+ agents/run.py,sha256=SHi8PgBIUkNsd5tPy0QKh240bvrDJUQrExNFfV9FPyY,38664
18
19
  agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
19
20
  agents/stream_events.py,sha256=ULgBEcL_H4vklZoxhpY2yomeoxVF0UiXvswsFsjFv4s,1547
20
21
  agents/strict_schema.py,sha256=FEyEvF3ZjxIHRLmraBGZyjJjuFiPCZGaCFV22LlwaTQ,5783
@@ -24,21 +25,25 @@ agents/version.py,sha256=bkeg2DaYBS8OnV7R7J6OuF5pNA__0mJ4QZsJjC1DTI0,223
24
25
  agents/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
26
  agents/extensions/handoff_filters.py,sha256=2cXxu1JROez96CpTiGuT9PIuaIrIE8ksP01fX83krKM,1977
26
27
  agents/extensions/handoff_prompt.py,sha256=oGWN0uNh3Z1L7E-Ev2up8W084fFrDNOsLDy7P6bcmic,1006
28
+ agents/extensions/visualization.py,sha256=bHtrkqwapHsp9z3hYfidAJXdhsKnW2KioisQcHRxgzM,4242
29
+ agents/mcp/__init__.py,sha256=x-4ZFiXNyJPn9Nbwcai6neKgonyRJ7by67HxnOLPgrw,359
30
+ agents/mcp/server.py,sha256=qbeFEPg2xiUvNKfUlA8qyfDeFsv2yXAJabLG2GhfExQ,11269
31
+ agents/mcp/util.py,sha256=RY9_j72OYtAHS702v3WaDoh7BbKA63yBmMnyaQ4wcSM,4494
27
32
  agents/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
33
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
29
34
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
30
35
  agents/models/interface.py,sha256=dgIlKyPaCbNRTHXxd6x7OQwJuAelG3F-C19P-aacHWQ,3129
31
- agents/models/openai_chatcompletions.py,sha256=xs2JdEl0taqz3LIRWL8etr88tzpa_UWggAwAQPTyoxQ,39375
36
+ agents/models/openai_chatcompletions.py,sha256=KWeVVZFYO-jUZmBcl9UgR_tpdI0u_413g25jVwHYiro,39375
32
37
  agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
33
- agents/models/openai_responses.py,sha256=4CowZT0wAMflEzDgi6hEidcMq_0zchIm2uX_vV090TM,13386
34
- agents/tracing/__init__.py,sha256=pmbNHEOORyHgufSQpHHT1DcmltFICbN6EIc3VcDwzc0,2708
35
- agents/tracing/create.py,sha256=WFH4qLhhTaHy3hBnnxlJq5PsRedvq-rtOeZkMaE2PTA,16925
38
+ agents/models/openai_responses.py,sha256=Vq6TjvWNffmNtWjl2Mmb_H0fo2XHwah3l-kVfd_rgPQ,13492
39
+ agents/tracing/__init__.py,sha256=-hJeEiNvgyQdEXpFTrr_qu_XYREvIrF5KyePDtovSak,2804
40
+ agents/tracing/create.py,sha256=kkMf2pp5Te20YkiSvf3Xj3J9qMibQCjEAxZs1Lr_kTE,18124
36
41
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
37
42
  agents/tracing/processor_interface.py,sha256=wNyZCwNJko5CrUIWD_lMou5ppQ67CFYwvWRsJRM3up8,1659
38
- agents/tracing/processors.py,sha256=z3NAwo4ZG8KloEIq7ihIadxMfduL_cECY5XCgOaK1H8,9595
43
+ agents/tracing/processors.py,sha256=XBCNY9J89NvzPkgDD5D3o1ItMIL8ALzwpwM_9oe8wbo,10135
39
44
  agents/tracing/scope.py,sha256=84gOESqFfR2E_XCZsT11DLyR-3UTyqxHrfBBjH1Ic44,1373
40
45
  agents/tracing/setup.py,sha256=1wRMIVnsMOx5nWWnldqbTXg44a7-ABcC0jZK4q4I-S8,6729
41
- agents/tracing/span_data.py,sha256=Aic98vMM3Os5IEHO8e4xB8zEHmVsENrEqeTsCDt578I,7005
46
+ agents/tracing/span_data.py,sha256=I5TSTnXWa1c71wL2Zr7ITaFwTVnCJiTKkpCqy88juJY,7657
42
47
  agents/tracing/spans.py,sha256=6vVzocGMsdgIma1ksqkBZmhar91xj4RpgcpUC3iibqg,6606
43
48
  agents/tracing/traces.py,sha256=G5LlECSK-DBRFP-bjT8maZjBQulz6SaHILYauUVlfq8,4775
44
49
  agents/tracing/util.py,sha256=x5tAw2YBKggwQ8rH5NG8GiJrFOnPErlJPk7oicBO1dA,501
@@ -52,7 +57,7 @@ agents/util/_types.py,sha256=8KxYfCw0gYSMWcQmacJoc3Q7Lc46LmT-AWvhF10KJ-E,160
52
57
  agents/voice/__init__.py,sha256=aEw6GdORLNIXHqIvKFc-5PFZr3XMala3jv4AoeLKt4Q,1507
53
58
  agents/voice/events.py,sha256=4aPAZC0__ocgmg_mcX4c1zv9Go-YdKIVItQ2kYgtye0,1216
54
59
  agents/voice/exceptions.py,sha256=QcyfvaUTBe4gxbFP82oDSa_puzZ4Z4O4k01B8pAHnK0,233
55
- agents/voice/imports.py,sha256=ANmL2vDcr8vdTQD70-vc2MJYEXUAxp-p0aZgzr2xbZ0,346
60
+ agents/voice/imports.py,sha256=VaE5I8aJTP9Zl_0-y9dx1UcAP7KPRDMaikFK2jFnn8s,348
56
61
  agents/voice/input.py,sha256=FSbdHMIdLVKX4vYcmf3WBJ5dAlh5zMDjCAuGfXOZTQs,2910
57
62
  agents/voice/model.py,sha256=4ptWkKPfUGbVsg8u10KUIl64iNhQX9rx7Y0D_ZcFlv0,5893
58
63
  agents/voice/pipeline.py,sha256=5LKTTDytQt4QlZzVKgbB9x3X2zA-TeR94FTi15vIUc0,6259
@@ -62,9 +67,9 @@ agents/voice/utils.py,sha256=MrRomVqBLXeMAOue-Itwh0Fc5HjB0QCMKXclqFPhrbI,1309
62
67
  agents/voice/workflow.py,sha256=lef1NulzNHWFiiPUESGeb_6WhD6CouP1W5NOUAYFewk,3527
63
68
  agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
64
69
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
65
- agents/voice/models/openai_stt.py,sha256=ApxBvvDjpnhU9OjwnQDxg0adbnrNGIUZ3wHiHP4bh3I,16887
70
+ agents/voice/models/openai_stt.py,sha256=rRsldkvkPhH4T0waX1dhccEqIwmPYh-teK_LRvBgiNI,16882
66
71
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
67
- openai_agents-0.0.6.dist-info/METADATA,sha256=0WGIjgvGUoJmKLDMM4u35fbxhpn7a4MH5bwky9MkExA,8010
68
- openai_agents-0.0.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
69
- openai_agents-0.0.6.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
70
- openai_agents-0.0.6.dist-info/RECORD,,
72
+ openai_agents-0.0.7.dist-info/METADATA,sha256=576_zSIWkxKLdJ3fNb11imj2acy3Vy_QZb_PBgycnM8,8123
73
+ openai_agents-0.0.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
74
+ openai_agents-0.0.7.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
75
+ openai_agents-0.0.7.dist-info/RECORD,,