openai-agents 0.2.9__py3-none-any.whl → 0.2.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/run.py CHANGED
@@ -2,10 +2,14 @@ from __future__ import annotations
2
2
 
3
3
  import asyncio
4
4
  import inspect
5
+ import os
5
6
  from dataclasses import dataclass, field
6
- from typing import Any, Callable, Generic, cast
7
+ from typing import Any, Callable, Generic, cast, get_args
7
8
 
8
- from openai.types.responses import ResponseCompletedEvent
9
+ from openai.types.responses import (
10
+ ResponseCompletedEvent,
11
+ ResponseOutputItemAddedEvent,
12
+ )
9
13
  from openai.types.responses.response_prompt_param import (
10
14
  ResponsePromptParam,
11
15
  )
@@ -40,7 +44,14 @@ from .guardrail import (
40
44
  OutputGuardrailResult,
41
45
  )
42
46
  from .handoffs import Handoff, HandoffInputFilter, handoff
43
- from .items import ItemHelpers, ModelResponse, RunItem, TResponseInputItem
47
+ from .items import (
48
+ ItemHelpers,
49
+ ModelResponse,
50
+ RunItem,
51
+ ToolCallItem,
52
+ ToolCallItemTypes,
53
+ TResponseInputItem,
54
+ )
44
55
  from .lifecycle import RunHooks
45
56
  from .logger import logger
46
57
  from .memory import Session
@@ -49,7 +60,7 @@ from .models.interface import Model, ModelProvider
49
60
  from .models.multi_provider import MultiProvider
50
61
  from .result import RunResult, RunResultStreaming
51
62
  from .run_context import RunContextWrapper, TContext
52
- from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent
63
+ from .stream_events import AgentUpdatedStreamEvent, RawResponsesStreamEvent, RunItemStreamEvent
53
64
  from .tool import Tool
54
65
  from .tracing import Span, SpanError, agent_span, get_current_trace, trace
55
66
  from .tracing.span_data import AgentSpanData
@@ -81,6 +92,12 @@ def get_default_agent_runner() -> AgentRunner:
81
92
  return DEFAULT_AGENT_RUNNER
82
93
 
83
94
 
95
+ def _default_trace_include_sensitive_data() -> bool:
96
+ """Returns the default value for trace_include_sensitive_data based on environment variable."""
97
+ val = os.getenv("OPENAI_AGENTS_TRACE_INCLUDE_SENSITIVE_DATA", "true")
98
+ return val.strip().lower() in ("1", "true", "yes", "on")
99
+
100
+
84
101
  @dataclass
85
102
  class ModelInputData:
86
103
  """Container for the data that will be sent to the model."""
@@ -135,7 +152,9 @@ class RunConfig:
135
152
  """Whether tracing is disabled for the agent run. If disabled, we will not trace the agent run.
136
153
  """
137
154
 
138
- trace_include_sensitive_data: bool = True
155
+ trace_include_sensitive_data: bool = field(
156
+ default_factory=_default_trace_include_sensitive_data
157
+ )
139
158
  """Whether we include potentially sensitive data (for example: inputs/outputs of tool calls or
140
159
  LLM generations) in traces. If False, we'll still create spans for these events, but the
141
160
  sensitive data will not be included.
@@ -189,6 +208,9 @@ class RunOptions(TypedDict, Generic[TContext]):
189
208
  previous_response_id: NotRequired[str | None]
190
209
  """The ID of the previous response, if any."""
191
210
 
211
+ conversation_id: NotRequired[str | None]
212
+ """The ID of the stored conversation, if any."""
213
+
192
214
  session: NotRequired[Session | None]
193
215
  """The session for the run."""
194
216
 
@@ -205,6 +227,7 @@ class Runner:
205
227
  hooks: RunHooks[TContext] | None = None,
206
228
  run_config: RunConfig | None = None,
207
229
  previous_response_id: str | None = None,
230
+ conversation_id: str | None = None,
208
231
  session: Session | None = None,
209
232
  ) -> RunResult:
210
233
  """Run a workflow starting at the given agent. The agent will run in a loop until a final
@@ -229,6 +252,13 @@ class Runner:
229
252
  run_config: Global settings for the entire agent run.
230
253
  previous_response_id: The ID of the previous response, if using OpenAI models via the
231
254
  Responses API, this allows you to skip passing in input from the previous turn.
255
+ conversation_id: The conversation ID (https://platform.openai.com/docs/guides/conversation-state?api-mode=responses).
256
+ If provided, the conversation will be used to read and write items.
257
+ Every agent will have access to the conversation history so far,
258
+ and it's output items will be written to the conversation.
259
+ We recommend only using this if you are exclusively using OpenAI models;
260
+ other model providers don't write to the Conversation object,
261
+ so you'll end up having partial conversations stored.
232
262
  Returns:
233
263
  A run result containing all the inputs, guardrail results and the output of the last
234
264
  agent. Agents may perform handoffs, so we don't know the specific type of the output.
@@ -242,6 +272,7 @@ class Runner:
242
272
  hooks=hooks,
243
273
  run_config=run_config,
244
274
  previous_response_id=previous_response_id,
275
+ conversation_id=conversation_id,
245
276
  session=session,
246
277
  )
247
278
 
@@ -256,6 +287,7 @@ class Runner:
256
287
  hooks: RunHooks[TContext] | None = None,
257
288
  run_config: RunConfig | None = None,
258
289
  previous_response_id: str | None = None,
290
+ conversation_id: str | None = None,
259
291
  session: Session | None = None,
260
292
  ) -> RunResult:
261
293
  """Run a workflow synchronously, starting at the given agent. Note that this just wraps the
@@ -283,6 +315,7 @@ class Runner:
283
315
  run_config: Global settings for the entire agent run.
284
316
  previous_response_id: The ID of the previous response, if using OpenAI models via the
285
317
  Responses API, this allows you to skip passing in input from the previous turn.
318
+ conversation_id: The ID of the stored conversation, if any.
286
319
  Returns:
287
320
  A run result containing all the inputs, guardrail results and the output of the last
288
321
  agent. Agents may perform handoffs, so we don't know the specific type of the output.
@@ -296,6 +329,7 @@ class Runner:
296
329
  hooks=hooks,
297
330
  run_config=run_config,
298
331
  previous_response_id=previous_response_id,
332
+ conversation_id=conversation_id,
299
333
  session=session,
300
334
  )
301
335
 
@@ -309,6 +343,7 @@ class Runner:
309
343
  hooks: RunHooks[TContext] | None = None,
310
344
  run_config: RunConfig | None = None,
311
345
  previous_response_id: str | None = None,
346
+ conversation_id: str | None = None,
312
347
  session: Session | None = None,
313
348
  ) -> RunResultStreaming:
314
349
  """Run a workflow starting at the given agent in streaming mode. The returned result object
@@ -334,6 +369,7 @@ class Runner:
334
369
  run_config: Global settings for the entire agent run.
335
370
  previous_response_id: The ID of the previous response, if using OpenAI models via the
336
371
  Responses API, this allows you to skip passing in input from the previous turn.
372
+ conversation_id: The ID of the stored conversation, if any.
337
373
  Returns:
338
374
  A result object that contains data about the run, as well as a method to stream events.
339
375
  """
@@ -346,6 +382,7 @@ class Runner:
346
382
  hooks=hooks,
347
383
  run_config=run_config,
348
384
  previous_response_id=previous_response_id,
385
+ conversation_id=conversation_id,
349
386
  session=session,
350
387
  )
351
388
 
@@ -367,6 +404,7 @@ class AgentRunner:
367
404
  hooks = kwargs.get("hooks")
368
405
  run_config = kwargs.get("run_config")
369
406
  previous_response_id = kwargs.get("previous_response_id")
407
+ conversation_id = kwargs.get("conversation_id")
370
408
  session = kwargs.get("session")
371
409
  if hooks is None:
372
410
  hooks = RunHooks[Any]()
@@ -459,6 +497,7 @@ class AgentRunner:
459
497
  should_run_agent_start_hooks=should_run_agent_start_hooks,
460
498
  tool_use_tracker=tool_use_tracker,
461
499
  previous_response_id=previous_response_id,
500
+ conversation_id=conversation_id,
462
501
  ),
463
502
  )
464
503
  else:
@@ -473,6 +512,7 @@ class AgentRunner:
473
512
  should_run_agent_start_hooks=should_run_agent_start_hooks,
474
513
  tool_use_tracker=tool_use_tracker,
475
514
  previous_response_id=previous_response_id,
515
+ conversation_id=conversation_id,
476
516
  )
477
517
  should_run_agent_start_hooks = False
478
518
 
@@ -539,6 +579,7 @@ class AgentRunner:
539
579
  hooks = kwargs.get("hooks")
540
580
  run_config = kwargs.get("run_config")
541
581
  previous_response_id = kwargs.get("previous_response_id")
582
+ conversation_id = kwargs.get("conversation_id")
542
583
  session = kwargs.get("session")
543
584
 
544
585
  return asyncio.get_event_loop().run_until_complete(
@@ -551,6 +592,7 @@ class AgentRunner:
551
592
  hooks=hooks,
552
593
  run_config=run_config,
553
594
  previous_response_id=previous_response_id,
595
+ conversation_id=conversation_id,
554
596
  )
555
597
  )
556
598
 
@@ -565,6 +607,7 @@ class AgentRunner:
565
607
  hooks = kwargs.get("hooks")
566
608
  run_config = kwargs.get("run_config")
567
609
  previous_response_id = kwargs.get("previous_response_id")
610
+ conversation_id = kwargs.get("conversation_id")
568
611
  session = kwargs.get("session")
569
612
 
570
613
  if hooks is None:
@@ -619,6 +662,7 @@ class AgentRunner:
619
662
  context_wrapper=context_wrapper,
620
663
  run_config=run_config,
621
664
  previous_response_id=previous_response_id,
665
+ conversation_id=conversation_id,
622
666
  session=session,
623
667
  )
624
668
  )
@@ -719,6 +763,7 @@ class AgentRunner:
719
763
  context_wrapper: RunContextWrapper[TContext],
720
764
  run_config: RunConfig,
721
765
  previous_response_id: str | None,
766
+ conversation_id: str | None,
722
767
  session: Session | None,
723
768
  ):
724
769
  if streamed_result.trace:
@@ -802,6 +847,7 @@ class AgentRunner:
802
847
  tool_use_tracker,
803
848
  all_tools,
804
849
  previous_response_id,
850
+ conversation_id,
805
851
  )
806
852
  should_run_agent_start_hooks = False
807
853
 
@@ -904,7 +950,10 @@ class AgentRunner:
904
950
  tool_use_tracker: AgentToolUseTracker,
905
951
  all_tools: list[Tool],
906
952
  previous_response_id: str | None,
953
+ conversation_id: str | None,
907
954
  ) -> SingleStepResult:
955
+ emitted_tool_call_ids: set[str] = set()
956
+
908
957
  if should_run_agent_start_hooks:
909
958
  await asyncio.gather(
910
959
  hooks.on_agent_start(context_wrapper, agent),
@@ -962,6 +1011,7 @@ class AgentRunner:
962
1011
  run_config.tracing_disabled, run_config.trace_include_sensitive_data
963
1012
  ),
964
1013
  previous_response_id=previous_response_id,
1014
+ conversation_id=conversation_id,
965
1015
  prompt=prompt_config,
966
1016
  ):
967
1017
  if isinstance(event, ResponseCompletedEvent):
@@ -984,6 +1034,25 @@ class AgentRunner:
984
1034
  )
985
1035
  context_wrapper.usage.add(usage)
986
1036
 
1037
+ if isinstance(event, ResponseOutputItemAddedEvent):
1038
+ output_item = event.item
1039
+
1040
+ if isinstance(output_item, _TOOL_CALL_TYPES):
1041
+ call_id: str | None = getattr(
1042
+ output_item, "call_id", getattr(output_item, "id", None)
1043
+ )
1044
+
1045
+ if call_id and call_id not in emitted_tool_call_ids:
1046
+ emitted_tool_call_ids.add(call_id)
1047
+
1048
+ tool_item = ToolCallItem(
1049
+ raw_item=cast(ToolCallItemTypes, output_item),
1050
+ agent=agent,
1051
+ )
1052
+ streamed_result._event_queue.put_nowait(
1053
+ RunItemStreamEvent(item=tool_item, name="tool_called")
1054
+ )
1055
+
987
1056
  streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event))
988
1057
 
989
1058
  # Call hook just after the model response is finalized.
@@ -995,9 +1064,10 @@ class AgentRunner:
995
1064
  raise ModelBehaviorError("Model did not produce a final response!")
996
1065
 
997
1066
  # 3. Now, we can process the turn as we do in the non-streaming case
998
- return await cls._get_single_step_result_from_streamed_response(
1067
+ single_step_result = await cls._get_single_step_result_from_response(
999
1068
  agent=agent,
1000
- streamed_result=streamed_result,
1069
+ original_input=streamed_result.input,
1070
+ pre_step_items=streamed_result.new_items,
1001
1071
  new_response=final_response,
1002
1072
  output_schema=output_schema,
1003
1073
  all_tools=all_tools,
@@ -1008,6 +1078,34 @@ class AgentRunner:
1008
1078
  tool_use_tracker=tool_use_tracker,
1009
1079
  )
1010
1080
 
1081
+ if emitted_tool_call_ids:
1082
+ import dataclasses as _dc
1083
+
1084
+ filtered_items = [
1085
+ item
1086
+ for item in single_step_result.new_step_items
1087
+ if not (
1088
+ isinstance(item, ToolCallItem)
1089
+ and (
1090
+ call_id := getattr(
1091
+ item.raw_item, "call_id", getattr(item.raw_item, "id", None)
1092
+ )
1093
+ )
1094
+ and call_id in emitted_tool_call_ids
1095
+ )
1096
+ ]
1097
+
1098
+ single_step_result_filtered = _dc.replace(
1099
+ single_step_result, new_step_items=filtered_items
1100
+ )
1101
+
1102
+ RunImpl.stream_step_result_to_queue(
1103
+ single_step_result_filtered, streamed_result._event_queue
1104
+ )
1105
+ else:
1106
+ RunImpl.stream_step_result_to_queue(single_step_result, streamed_result._event_queue)
1107
+ return single_step_result
1108
+
1011
1109
  @classmethod
1012
1110
  async def _run_single_turn(
1013
1111
  cls,
@@ -1022,6 +1120,7 @@ class AgentRunner:
1022
1120
  should_run_agent_start_hooks: bool,
1023
1121
  tool_use_tracker: AgentToolUseTracker,
1024
1122
  previous_response_id: str | None,
1123
+ conversation_id: str | None,
1025
1124
  ) -> SingleStepResult:
1026
1125
  # Ensure we run the hooks before anything else
1027
1126
  if should_run_agent_start_hooks:
@@ -1055,6 +1154,7 @@ class AgentRunner:
1055
1154
  run_config,
1056
1155
  tool_use_tracker,
1057
1156
  previous_response_id,
1157
+ conversation_id,
1058
1158
  prompt_config,
1059
1159
  )
1060
1160
 
@@ -1249,6 +1349,7 @@ class AgentRunner:
1249
1349
  run_config: RunConfig,
1250
1350
  tool_use_tracker: AgentToolUseTracker,
1251
1351
  previous_response_id: str | None,
1352
+ conversation_id: str | None,
1252
1353
  prompt_config: ResponsePromptParam | None,
1253
1354
  ) -> ModelResponse:
1254
1355
  # Allow user to modify model input right before the call, if configured
@@ -1283,6 +1384,7 @@ class AgentRunner:
1283
1384
  run_config.tracing_disabled, run_config.trace_include_sensitive_data
1284
1385
  ),
1285
1386
  previous_response_id=previous_response_id,
1387
+ conversation_id=conversation_id,
1286
1388
  prompt=prompt_config,
1287
1389
  )
1288
1390
  # If the agent has hooks, we need to call them after the LLM call
@@ -1397,6 +1499,7 @@ class AgentRunner:
1397
1499
 
1398
1500
 
1399
1501
  DEFAULT_AGENT_RUNNER = AgentRunner()
1502
+ _TOOL_CALL_TYPES: tuple[type, ...] = get_args(ToolCallItemTypes)
1400
1503
 
1401
1504
 
1402
1505
  def _copy_str_or_list(input: str | list[TResponseInputItem]) -> str | list[TResponseInputItem]:
agents/tool.py CHANGED
@@ -12,6 +12,7 @@ from openai.types.responses.response_computer_tool_call import (
12
12
  ResponseComputerToolCall,
13
13
  )
14
14
  from openai.types.responses.response_output_item import LocalShellCall, McpApprovalRequest
15
+ from openai.types.responses.tool import WebSearchToolFilters
15
16
  from openai.types.responses.tool_param import CodeInterpreter, ImageGeneration, Mcp
16
17
  from openai.types.responses.web_search_tool_param import UserLocation
17
18
  from pydantic import ValidationError
@@ -133,6 +134,9 @@ class WebSearchTool:
133
134
  user_location: UserLocation | None = None
134
135
  """Optional location for the search. Lets you customize results to be relevant to a location."""
135
136
 
137
+ filters: WebSearchToolFilters | None = None
138
+ """A filter to apply based on file attributes."""
139
+
136
140
  search_context_size: Literal["low", "medium", "high"] = "medium"
137
141
  """The amount of context to use for the search."""
138
142
 
@@ -70,8 +70,8 @@ class BackendSpanExporter(TracingExporter):
70
70
  client.
71
71
  """
72
72
  # Clear the cached property if it exists
73
- if 'api_key' in self.__dict__:
74
- del self.__dict__['api_key']
73
+ if "api_key" in self.__dict__:
74
+ del self.__dict__["api_key"]
75
75
 
76
76
  # Update the private attribute
77
77
  self._api_key = api_key
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.2.9
3
+ Version: 0.2.10
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://openai.github.io/openai-agents-python/
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -21,7 +21,7 @@ Classifier: Typing :: Typed
21
21
  Requires-Python: >=3.9
22
22
  Requires-Dist: griffe<2,>=1.5.6
23
23
  Requires-Dist: mcp<2,>=1.11.0; python_version >= '3.10'
24
- Requires-Dist: openai<2,>=1.99.6
24
+ Requires-Dist: openai<2,>=1.102.0
25
25
  Requires-Dist: pydantic<3,>=2.10
26
26
  Requires-Dist: requests<3,>=2.0
27
27
  Requires-Dist: types-requests<3,>=2.0
@@ -1,27 +1,27 @@
1
- agents/__init__.py,sha256=YXcfllpLrUjafU_5KwIZvVEdUzcjZYhatqCS5tb03UQ,7908
1
+ agents/__init__.py,sha256=Kr6-8HItTfnz5HFS9x7PVD99v_Lu9VxlE27UjX3BH8M,8000
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
- agents/_run_impl.py,sha256=bd3zWFgNlOye92SQSNrB1OZCvgOkabnup7SEYuayijE,45051
4
+ agents/_run_impl.py,sha256=cEGeEQDc3NCNGwwfpKWnTCXgxyznSmWgDpQkRq-6JgM,46461
5
5
  agents/agent.py,sha256=IINVHZyO5iFTN3rf94YB9Hv3hUIOouVUFt9cagSJwvQ,19120
6
6
  agents/agent_output.py,sha256=teTFK8unUN3esXhmEBO0bQGYQm1Axd5rYleDt9TFDgw,7153
7
7
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
8
8
  agents/exceptions.py,sha256=NHMdHE0cZ6AdA6UgUylTzVHAX05Ol1CkO814a0FdZcs,2862
9
9
  agents/function_schema.py,sha256=jXdpjl90lODRzdoOR_kUmEbfA3T8Dfa7kkSV8xWQDDo,13558
10
10
  agents/guardrail.py,sha256=7P-kd9rKPhgB8rtI31MCV5ho4ZrEaNCQxHvE8IK3EOk,9582
11
- agents/handoffs.py,sha256=31-rQ-iMWlWNd93ivgTTSMGkqlariXrNfWI_udMWt7s,11409
11
+ agents/handoffs.py,sha256=kDTM3nj3E_0khiJPMJAIN00gektMTRNbaYSbc5ZCnBM,11411
12
12
  agents/items.py,sha256=aHo7KTXZLBcHSrKHWDaBB6L7XmBCAIekG5e0xOIhkyM,9828
13
13
  agents/lifecycle.py,sha256=hGsqzumOSaal6oAjTqTfvBXl-ShAOkC42sthJigB5Fg,4308
14
14
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
15
- agents/model_settings.py,sha256=rqoIZe_sGm6_0hCCZlsVE29qln8yOmZr0dkpiV_cEpQ,6643
15
+ agents/model_settings.py,sha256=7Ul-Xg-aNVXIbK6V4Rm2t5EEfNR0tsy_A9ac_wFqLLk,6828
16
16
  agents/prompts.py,sha256=Ss5y_7s2HFcRAOAKu4WTxQszs5ybI8TfbxgEYdnj9sg,2231
17
17
  agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
18
18
  agents/repl.py,sha256=NX0BE5YDnmGQ2rdQsmLm3CKkQZ5m4GC95xXmUsAXJVs,2539
19
19
  agents/result.py,sha256=YCGYHoc5X1_vLKu5QiK6F8C1ZXI3tTfLXaZoqbYgUMA,10753
20
- agents/run.py,sha256=Q8nu906IwmgIUpMbxCXnAGYeFDbw1KspSh9a74PJGGc,56994
20
+ agents/run.py,sha256=sEqyIa5MAbOFOmDv5qp2BrJN8TizwiyiSo_q19Ca2gc,61298
21
21
  agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
22
22
  agents/stream_events.py,sha256=VFyTu-DT3ZMnHLtMbg-X_lxec0doQxNfx-hVxLB0BpI,1700
23
23
  agents/strict_schema.py,sha256=_KuEJkglmq-Fj3HSeYP4WqTvqrxbSKu6gezfz5Brhh0,5775
24
- agents/tool.py,sha256=poPA6wvHMpcbDW5VwXCbVLDDz5-6-c5ahDxb8xXMync,16845
24
+ agents/tool.py,sha256=jOmz-EMmROCERdGgOQaJyvY6ZyyNz93ByBc4y-loisQ,17009
25
25
  agents/tool_context.py,sha256=lbnctijZeanXAThddkklF7vDrXK1Ie2_wx6JZPCOihI,1434
26
26
  agents/usage.py,sha256=Tb5udGd3DPgD0JBdRD8fDctTE4M-zKML5uRn8ZG1yBc,1675
27
27
  agents/version.py,sha256=_1knUwzSK-HUeZTpRUkk6Z-CIcurqXuEplbV5TLJ08E,230
@@ -30,27 +30,29 @@ agents/extensions/handoff_filters.py,sha256=Bzkjb1SmIHoibgO26oesNO2Qdx2avfDGkHrS
30
30
  agents/extensions/handoff_prompt.py,sha256=oGWN0uNh3Z1L7E-Ev2up8W084fFrDNOsLDy7P6bcmic,1006
31
31
  agents/extensions/visualization.py,sha256=sf9D_C-HMwkbWdZccTZvvMPRy_NSiwbm48tRJlESQBI,5144
32
32
  agents/extensions/memory/__init__.py,sha256=Yionp3G3pj53zenHPZUHhR9aIDVEpu0d_PcvdytBRes,534
33
- agents/extensions/memory/sqlalchemy_session.py,sha256=EkzgCiagfWpjrFbzZCaJC50DUN3RLteT85YueNt6KY8,10711
33
+ agents/extensions/memory/sqlalchemy_session.py,sha256=H0aykdB4lUikmzKgwWQqI1PSYZBvHA4TDnaj9rP4HDI,11583
34
34
  agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
- agents/extensions/models/litellm_model.py,sha256=PF2xnWQRAaTVE38Q2TSFva17pz3McfUE_sZISeREHDw,15707
35
+ agents/extensions/models/litellm_model.py,sha256=cDLOcZVojLx-CZbipVJ5dn4RnXdegrXSPROVR-5Lxdc,15883
36
36
  agents/extensions/models/litellm_provider.py,sha256=ZHgh1nMoEvA7NpawkzLh3JDuDFtwXUV94Rs7UrwWqAk,1083
37
37
  agents/mcp/__init__.py,sha256=yHmmYlrmEHzUas1inRLKL2iPqbb_-107G3gKe_tyg4I,750
38
38
  agents/mcp/server.py,sha256=4T58xiWCLiCm6JoUy_3jYWz5A8ZNsHiV1hIxjahoedU,26624
39
39
  agents/mcp/util.py,sha256=YVdPst1wWkTwbeshs-FYbr_MtrYJwO_4NzhSwj5aE5c,8239
40
- agents/memory/__init__.py,sha256=bo2Rb3PqwSCo9PhBVVJOjvjMM1TfytuDPAFEDADYwwA,84
41
- agents/memory/session.py,sha256=9RQ1I7qGh_9DzsyUd9srSPrxRBlw7jks-67NxYqKvvs,13060
40
+ agents/memory/__init__.py,sha256=H6Nh01MRlinnEFb2Ca0ezqerIzAHuJ1izeXeYhkPrUM,255
41
+ agents/memory/openai_conversations_session.py,sha256=Xiod8qk-qX4-348FvLLIG35fy0m_JiQkNqNF_woc_a8,3341
42
+ agents/memory/session.py,sha256=pyyFn3r07ydhoy7vn1e_ky9OUa1a_7DKoEo8aZz2d3s,3038
43
+ agents/memory/sqlite_session.py,sha256=6HGzSL70mQgutITIPZUC2x2Qtj6U4hXiZTceu3Da7TM,10130
42
44
  agents/models/__init__.py,sha256=E0XVqWayVAsFqxucDLBW30siaqfNQsVrAnfidG_C3ok,287
43
45
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
44
46
  agents/models/chatcmpl_converter.py,sha256=fZHui5V0KwTr27L_Io-4iQxPXr0ZoEMOv1_kJNxW-y8,20320
45
47
  agents/models/chatcmpl_helpers.py,sha256=eIWySobaH7I0AQijAz5i-_rtsXrSvmEHD567s_8Zw1o,1318
46
- agents/models/chatcmpl_stream_handler.py,sha256=XUoMnNEcSqK6IRMI6GPH8CwMCXi6NhbfHfpCY3SXJOM,24124
48
+ agents/models/chatcmpl_stream_handler.py,sha256=LEaQ6N9AgzOSczo3fTrv-ckkb1guBGiuNeHBAaoinCE,27429
47
49
  agents/models/default_models.py,sha256=mlvBePn8H4UkHo7lN-wh7A3k2ciLgBUFKpROQxzdTfs,2098
48
50
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
49
- agents/models/interface.py,sha256=TpY_GEk3LLMozCcYAEcC-Y_VRpI3pwE7A7ZM317mk7M,3839
51
+ agents/models/interface.py,sha256=-AFUHC8iRuGZmtQwguDw4s-M4OPL2y2mct4TAmWvVrU,4057
50
52
  agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
51
- agents/models/openai_chatcompletions.py,sha256=lJJZCdWiZ0jTUp77OD1Zs6tSLZ7k8v1j_D2gB2Nw12Y,13179
53
+ agents/models/openai_chatcompletions.py,sha256=abwQj8CqUHa2KB4NC_DVeJhZOygyAnr4N4maZtYhchU,13321
52
54
  agents/models/openai_provider.py,sha256=vBu3mlgDBrI_cZVVmfnWBHoPlJlsmld3lfdX8sNQQAM,3624
53
- agents/models/openai_responses.py,sha256=BnlN9hH6J4LKWBuM0lDfhvRgAb8IjQJuk5Hfd3OJ8G0,17330
55
+ agents/models/openai_responses.py,sha256=XFR8gl25V4ZiyCIqExU3Xk4Lxtxh-afcFSuVJCeYwII,18255
54
56
  agents/realtime/README.md,sha256=5YCYXH5ULmlWoWo1PE9TlbHjeYgjnp-xY8ZssSFY2Vk,126
55
57
  agents/realtime/__init__.py,sha256=7qvzK8QJuHRnPHxDgDj21v8-lnSN4Uurg9znwJv_Tqg,4923
56
58
  agents/realtime/_default_tracker.py,sha256=4OMxBvD1MnZmMn6JZYKL42uWhVzvK6NdDLDfPP54d78,1765
@@ -63,14 +65,14 @@ agents/realtime/items.py,sha256=psT6AH65qmngmPsgwk6CXacVo5tEDYq0Za3EitHFpTA,5052
63
65
  agents/realtime/model.py,sha256=RJBA8-Dkd2JTqGzbKacoX4dN_qTWn_p7npL73To3ymw,6143
64
66
  agents/realtime/model_events.py,sha256=YixBKmzlCrhtzCosj0SysyZpyHbZ90455gDr4Kr7Ey8,4338
65
67
  agents/realtime/model_inputs.py,sha256=OW2bn3wD5_pXLunDUf35jhG2q_bTKbC_D7Qu-83aOEA,2243
66
- agents/realtime/openai_realtime.py,sha256=zwbyy3dkP4jmacQE-kVjFVbRWzWAHQEnf5VqQt7BZc0,30963
68
+ agents/realtime/openai_realtime.py,sha256=cVOxBYNwqn38WTSVK-A2E2pHrw0bGSmren2q7670pPU,31361
67
69
  agents/realtime/runner.py,sha256=KfU7utmc9QFH2htIKN2IN9H-5EnB0qN9ezmvlRTnOm4,2511
68
70
  agents/realtime/session.py,sha256=hPIxQSsVh5whkgYnEpxk_AgvG3suuDVnpPyqVoPJBRM,26822
69
71
  agents/tracing/__init__.py,sha256=5HO_6na5S6EwICgwl50OMtxiIIosUrqalhvldlYvSVc,2991
70
72
  agents/tracing/create.py,sha256=xpJ4ZRnGyUDPKoVVkA_8hmdhtwOKGhSkwRco2AQIhAo,18003
71
73
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
72
74
  agents/tracing/processor_interface.py,sha256=e1mWcIAoQFHID1BapcrAZ6MxZg98bPVYgbOPclVoCXc,1660
73
- agents/tracing/processors.py,sha256=IKZ_dfQmcs8OaMqNbzWRtimY4nm1xfNRjVguWl6I8SY,11432
75
+ agents/tracing/processors.py,sha256=yAGJ2aIlhU5kvdDLBx1R9_Qsbuq3296b0743GYbSnYM,11432
74
76
  agents/tracing/provider.py,sha256=a8bOZtBUih13Gjq8OtyIcx3AWJmCErc43gqPrccx_5k,10098
75
77
  agents/tracing/scope.py,sha256=u17_m8RPpGvbHrTkaO_kDi5ROBWhfOAIgBe7suiaRD4,1445
76
78
  agents/tracing/setup.py,sha256=2h9TH1GAKcXKM1U99dOKKR3XlHp8JKzh2JG3DQPKyhY,612
@@ -100,7 +102,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
100
102
  agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
101
103
  agents/voice/models/openai_stt.py,sha256=LcVDS7f1pmbm--PWX-IaV9uLg9uv5_L3vSCbVnTJeGs,16864
102
104
  agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
103
- openai_agents-0.2.9.dist-info/METADATA,sha256=oooDN4gwI_UfIxMfr9-uW4KPGpWhyazoNStz43iBD3Y,12379
104
- openai_agents-0.2.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
105
- openai_agents-0.2.9.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
106
- openai_agents-0.2.9.dist-info/RECORD,,
105
+ openai_agents-0.2.10.dist-info/METADATA,sha256=1imjcLQahGjZnwblAnDOdGVUJBXcLkG-3IG1xYlAEfw,12381
106
+ openai_agents-0.2.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
107
+ openai_agents-0.2.10.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
108
+ openai_agents-0.2.10.dist-info/RECORD,,