openai-agents 0.2.7__py3-none-any.whl → 0.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/_run_impl.py +4 -1
- agents/agent.py +36 -4
- agents/extensions/memory/__init__.py +15 -0
- agents/extensions/memory/sqlalchemy_session.py +298 -0
- agents/extensions/models/litellm_model.py +4 -2
- agents/extensions/models/litellm_provider.py +3 -1
- agents/function_schema.py +2 -2
- agents/items.py +1 -2
- agents/lifecycle.py +40 -1
- agents/mcp/server.py +59 -8
- agents/model_settings.py +4 -1
- agents/models/__init__.py +13 -0
- agents/models/chatcmpl_converter.py +5 -0
- agents/models/default_models.py +58 -0
- agents/models/openai_provider.py +3 -1
- agents/realtime/config.py +3 -0
- agents/realtime/events.py +11 -0
- agents/realtime/model_events.py +10 -0
- agents/realtime/openai_realtime.py +31 -5
- agents/realtime/session.py +61 -2
- agents/repl.py +7 -3
- agents/run.py +127 -11
- agents/tool.py +5 -1
- {openai_agents-0.2.7.dist-info → openai_agents-0.2.9.dist-info}/METADATA +15 -13
- {openai_agents-0.2.7.dist-info → openai_agents-0.2.9.dist-info}/RECORD +27 -24
- {openai_agents-0.2.7.dist-info → openai_agents-0.2.9.dist-info}/WHEEL +0 -0
- {openai_agents-0.2.7.dist-info → openai_agents-0.2.9.dist-info}/licenses/LICENSE +0 -0
agents/run.py
CHANGED
|
@@ -1,10 +1,9 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import asyncio
|
|
4
|
-
import copy
|
|
5
4
|
import inspect
|
|
6
5
|
from dataclasses import dataclass, field
|
|
7
|
-
from typing import Any, Generic, cast
|
|
6
|
+
from typing import Any, Callable, Generic, cast
|
|
8
7
|
|
|
9
8
|
from openai.types.responses import ResponseCompletedEvent
|
|
10
9
|
from openai.types.responses.response_prompt_param import (
|
|
@@ -56,6 +55,7 @@ from .tracing import Span, SpanError, agent_span, get_current_trace, trace
|
|
|
56
55
|
from .tracing.span_data import AgentSpanData
|
|
57
56
|
from .usage import Usage
|
|
58
57
|
from .util import _coro, _error_tracing
|
|
58
|
+
from .util._types import MaybeAwaitable
|
|
59
59
|
|
|
60
60
|
DEFAULT_MAX_TURNS = 10
|
|
61
61
|
|
|
@@ -81,6 +81,27 @@ def get_default_agent_runner() -> AgentRunner:
|
|
|
81
81
|
return DEFAULT_AGENT_RUNNER
|
|
82
82
|
|
|
83
83
|
|
|
84
|
+
@dataclass
|
|
85
|
+
class ModelInputData:
|
|
86
|
+
"""Container for the data that will be sent to the model."""
|
|
87
|
+
|
|
88
|
+
input: list[TResponseInputItem]
|
|
89
|
+
instructions: str | None
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
@dataclass
|
|
93
|
+
class CallModelData(Generic[TContext]):
|
|
94
|
+
"""Data passed to `RunConfig.call_model_input_filter` prior to model call."""
|
|
95
|
+
|
|
96
|
+
model_data: ModelInputData
|
|
97
|
+
agent: Agent[TContext]
|
|
98
|
+
context: TContext | None
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
# Type alias for the optional input filter callback
|
|
102
|
+
CallModelInputFilter = Callable[[CallModelData[Any]], MaybeAwaitable[ModelInputData]]
|
|
103
|
+
|
|
104
|
+
|
|
84
105
|
@dataclass
|
|
85
106
|
class RunConfig:
|
|
86
107
|
"""Configures settings for the entire agent run."""
|
|
@@ -139,6 +160,16 @@ class RunConfig:
|
|
|
139
160
|
An optional dictionary of additional metadata to include with the trace.
|
|
140
161
|
"""
|
|
141
162
|
|
|
163
|
+
call_model_input_filter: CallModelInputFilter | None = None
|
|
164
|
+
"""
|
|
165
|
+
Optional callback that is invoked immediately before calling the model. It receives the current
|
|
166
|
+
agent, context and the model input (instructions and input items), and must return a possibly
|
|
167
|
+
modified `ModelInputData` to use for the model call.
|
|
168
|
+
|
|
169
|
+
This allows you to edit the input sent to the model e.g. to stay within a token limit.
|
|
170
|
+
For example, you can use this to add a system prompt to the input.
|
|
171
|
+
"""
|
|
172
|
+
|
|
142
173
|
|
|
143
174
|
class RunOptions(TypedDict, Generic[TContext]):
|
|
144
175
|
"""Arguments for ``AgentRunner`` methods."""
|
|
@@ -355,7 +386,7 @@ class AgentRunner:
|
|
|
355
386
|
disabled=run_config.tracing_disabled,
|
|
356
387
|
):
|
|
357
388
|
current_turn = 0
|
|
358
|
-
original_input: str | list[TResponseInputItem] =
|
|
389
|
+
original_input: str | list[TResponseInputItem] = _copy_str_or_list(prepared_input)
|
|
359
390
|
generated_items: list[RunItem] = []
|
|
360
391
|
model_responses: list[ModelResponse] = []
|
|
361
392
|
|
|
@@ -414,7 +445,7 @@ class AgentRunner:
|
|
|
414
445
|
starting_agent,
|
|
415
446
|
starting_agent.input_guardrails
|
|
416
447
|
+ (run_config.input_guardrails or []),
|
|
417
|
-
|
|
448
|
+
_copy_str_or_list(prepared_input),
|
|
418
449
|
context_wrapper,
|
|
419
450
|
),
|
|
420
451
|
self._run_single_turn(
|
|
@@ -562,7 +593,7 @@ class AgentRunner:
|
|
|
562
593
|
)
|
|
563
594
|
|
|
564
595
|
streamed_result = RunResultStreaming(
|
|
565
|
-
input=
|
|
596
|
+
input=_copy_str_or_list(input),
|
|
566
597
|
new_items=[],
|
|
567
598
|
current_agent=starting_agent,
|
|
568
599
|
raw_responses=[],
|
|
@@ -593,6 +624,47 @@ class AgentRunner:
|
|
|
593
624
|
)
|
|
594
625
|
return streamed_result
|
|
595
626
|
|
|
627
|
+
@classmethod
|
|
628
|
+
async def _maybe_filter_model_input(
|
|
629
|
+
cls,
|
|
630
|
+
*,
|
|
631
|
+
agent: Agent[TContext],
|
|
632
|
+
run_config: RunConfig,
|
|
633
|
+
context_wrapper: RunContextWrapper[TContext],
|
|
634
|
+
input_items: list[TResponseInputItem],
|
|
635
|
+
system_instructions: str | None,
|
|
636
|
+
) -> ModelInputData:
|
|
637
|
+
"""Apply optional call_model_input_filter to modify model input.
|
|
638
|
+
|
|
639
|
+
Returns a `ModelInputData` that will be sent to the model.
|
|
640
|
+
"""
|
|
641
|
+
effective_instructions = system_instructions
|
|
642
|
+
effective_input: list[TResponseInputItem] = input_items
|
|
643
|
+
|
|
644
|
+
if run_config.call_model_input_filter is None:
|
|
645
|
+
return ModelInputData(input=effective_input, instructions=effective_instructions)
|
|
646
|
+
|
|
647
|
+
try:
|
|
648
|
+
model_input = ModelInputData(
|
|
649
|
+
input=effective_input.copy(),
|
|
650
|
+
instructions=effective_instructions,
|
|
651
|
+
)
|
|
652
|
+
filter_payload: CallModelData[TContext] = CallModelData(
|
|
653
|
+
model_data=model_input,
|
|
654
|
+
agent=agent,
|
|
655
|
+
context=context_wrapper.context,
|
|
656
|
+
)
|
|
657
|
+
maybe_updated = run_config.call_model_input_filter(filter_payload)
|
|
658
|
+
updated = await maybe_updated if inspect.isawaitable(maybe_updated) else maybe_updated
|
|
659
|
+
if not isinstance(updated, ModelInputData):
|
|
660
|
+
raise UserError("call_model_input_filter must return a ModelInputData instance")
|
|
661
|
+
return updated
|
|
662
|
+
except Exception as e:
|
|
663
|
+
_error_tracing.attach_error_to_current_span(
|
|
664
|
+
SpanError(message="Error in call_model_input_filter", data={"error": str(e)})
|
|
665
|
+
)
|
|
666
|
+
raise
|
|
667
|
+
|
|
596
668
|
@classmethod
|
|
597
669
|
async def _run_input_guardrails_with_queue(
|
|
598
670
|
cls,
|
|
@@ -713,7 +785,7 @@ class AgentRunner:
|
|
|
713
785
|
cls._run_input_guardrails_with_queue(
|
|
714
786
|
starting_agent,
|
|
715
787
|
starting_agent.input_guardrails + (run_config.input_guardrails or []),
|
|
716
|
-
|
|
788
|
+
ItemHelpers.input_to_new_input_list(prepared_input),
|
|
717
789
|
context_wrapper,
|
|
718
790
|
streamed_result,
|
|
719
791
|
current_span,
|
|
@@ -863,10 +935,25 @@ class AgentRunner:
|
|
|
863
935
|
input = ItemHelpers.input_to_new_input_list(streamed_result.input)
|
|
864
936
|
input.extend([item.to_input_item() for item in streamed_result.new_items])
|
|
865
937
|
|
|
938
|
+
# THIS IS THE RESOLVED CONFLICT BLOCK
|
|
939
|
+
filtered = await cls._maybe_filter_model_input(
|
|
940
|
+
agent=agent,
|
|
941
|
+
run_config=run_config,
|
|
942
|
+
context_wrapper=context_wrapper,
|
|
943
|
+
input_items=input,
|
|
944
|
+
system_instructions=system_prompt,
|
|
945
|
+
)
|
|
946
|
+
|
|
947
|
+
# Call hook just before the model is invoked, with the correct system_prompt.
|
|
948
|
+
if agent.hooks:
|
|
949
|
+
await agent.hooks.on_llm_start(
|
|
950
|
+
context_wrapper, agent, filtered.instructions, filtered.input
|
|
951
|
+
)
|
|
952
|
+
|
|
866
953
|
# 1. Stream the output events
|
|
867
954
|
async for event in model.stream_response(
|
|
868
|
-
|
|
869
|
-
input,
|
|
955
|
+
filtered.instructions,
|
|
956
|
+
filtered.input,
|
|
870
957
|
model_settings,
|
|
871
958
|
all_tools,
|
|
872
959
|
output_schema,
|
|
@@ -899,6 +986,10 @@ class AgentRunner:
|
|
|
899
986
|
|
|
900
987
|
streamed_result._event_queue.put_nowait(RawResponsesStreamEvent(data=event))
|
|
901
988
|
|
|
989
|
+
# Call hook just after the model response is finalized.
|
|
990
|
+
if agent.hooks and final_response is not None:
|
|
991
|
+
await agent.hooks.on_llm_end(context_wrapper, agent, final_response)
|
|
992
|
+
|
|
902
993
|
# 2. At this point, the streaming is complete for this turn of the agent loop.
|
|
903
994
|
if not final_response:
|
|
904
995
|
raise ModelBehaviorError("Model did not produce a final response!")
|
|
@@ -1034,7 +1125,6 @@ class AgentRunner:
|
|
|
1034
1125
|
run_config: RunConfig,
|
|
1035
1126
|
tool_use_tracker: AgentToolUseTracker,
|
|
1036
1127
|
) -> SingleStepResult:
|
|
1037
|
-
|
|
1038
1128
|
original_input = streamed_result.input
|
|
1039
1129
|
pre_step_items = streamed_result.new_items
|
|
1040
1130
|
event_queue = streamed_result._event_queue
|
|
@@ -1161,13 +1251,30 @@ class AgentRunner:
|
|
|
1161
1251
|
previous_response_id: str | None,
|
|
1162
1252
|
prompt_config: ResponsePromptParam | None,
|
|
1163
1253
|
) -> ModelResponse:
|
|
1254
|
+
# Allow user to modify model input right before the call, if configured
|
|
1255
|
+
filtered = await cls._maybe_filter_model_input(
|
|
1256
|
+
agent=agent,
|
|
1257
|
+
run_config=run_config,
|
|
1258
|
+
context_wrapper=context_wrapper,
|
|
1259
|
+
input_items=input,
|
|
1260
|
+
system_instructions=system_prompt,
|
|
1261
|
+
)
|
|
1262
|
+
|
|
1164
1263
|
model = cls._get_model(agent, run_config)
|
|
1165
1264
|
model_settings = agent.model_settings.resolve(run_config.model_settings)
|
|
1166
1265
|
model_settings = RunImpl.maybe_reset_tool_choice(agent, tool_use_tracker, model_settings)
|
|
1266
|
+
# If the agent has hooks, we need to call them before and after the LLM call
|
|
1267
|
+
if agent.hooks:
|
|
1268
|
+
await agent.hooks.on_llm_start(
|
|
1269
|
+
context_wrapper,
|
|
1270
|
+
agent,
|
|
1271
|
+
filtered.instructions, # Use filtered instructions
|
|
1272
|
+
filtered.input, # Use filtered input
|
|
1273
|
+
)
|
|
1167
1274
|
|
|
1168
1275
|
new_response = await model.get_response(
|
|
1169
|
-
system_instructions=
|
|
1170
|
-
input=input,
|
|
1276
|
+
system_instructions=filtered.instructions,
|
|
1277
|
+
input=filtered.input,
|
|
1171
1278
|
model_settings=model_settings,
|
|
1172
1279
|
tools=all_tools,
|
|
1173
1280
|
output_schema=output_schema,
|
|
@@ -1178,6 +1285,9 @@ class AgentRunner:
|
|
|
1178
1285
|
previous_response_id=previous_response_id,
|
|
1179
1286
|
prompt=prompt_config,
|
|
1180
1287
|
)
|
|
1288
|
+
# If the agent has hooks, we need to call them after the LLM call
|
|
1289
|
+
if agent.hooks:
|
|
1290
|
+
await agent.hooks.on_llm_end(context_wrapper, agent, new_response)
|
|
1181
1291
|
|
|
1182
1292
|
context_wrapper.usage.add(new_response.usage)
|
|
1183
1293
|
|
|
@@ -1287,3 +1397,9 @@ class AgentRunner:
|
|
|
1287
1397
|
|
|
1288
1398
|
|
|
1289
1399
|
DEFAULT_AGENT_RUNNER = AgentRunner()
|
|
1400
|
+
|
|
1401
|
+
|
|
1402
|
+
def _copy_str_or_list(input: str | list[TResponseInputItem]) -> str | list[TResponseInputItem]:
|
|
1403
|
+
if isinstance(input, str):
|
|
1404
|
+
return input
|
|
1405
|
+
return input.copy()
|
agents/tool.py
CHANGED
|
@@ -264,7 +264,11 @@ LocalShellExecutor = Callable[[LocalShellCommandRequest], MaybeAwaitable[str]]
|
|
|
264
264
|
|
|
265
265
|
@dataclass
|
|
266
266
|
class LocalShellTool:
|
|
267
|
-
"""A tool that allows the LLM to execute commands on a shell.
|
|
267
|
+
"""A tool that allows the LLM to execute commands on a shell.
|
|
268
|
+
|
|
269
|
+
For more details, see:
|
|
270
|
+
https://platform.openai.com/docs/guides/tools-local-shell
|
|
271
|
+
"""
|
|
268
272
|
|
|
269
273
|
executor: LocalShellExecutor
|
|
270
274
|
"""A function that executes a command on a shell."""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openai-agents
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.9
|
|
4
4
|
Summary: OpenAI Agents SDK
|
|
5
5
|
Project-URL: Homepage, https://openai.github.io/openai-agents-python/
|
|
6
6
|
Project-URL: Repository, https://github.com/openai/openai-agents-python
|
|
@@ -30,6 +30,9 @@ Provides-Extra: litellm
|
|
|
30
30
|
Requires-Dist: litellm<2,>=1.67.4.post1; extra == 'litellm'
|
|
31
31
|
Provides-Extra: realtime
|
|
32
32
|
Requires-Dist: websockets<16,>=15.0; extra == 'realtime'
|
|
33
|
+
Provides-Extra: sqlalchemy
|
|
34
|
+
Requires-Dist: asyncpg>=0.29.0; extra == 'sqlalchemy'
|
|
35
|
+
Requires-Dist: sqlalchemy>=2.0; extra == 'sqlalchemy'
|
|
33
36
|
Provides-Extra: viz
|
|
34
37
|
Requires-Dist: graphviz>=0.17; extra == 'viz'
|
|
35
38
|
Provides-Extra: voice
|
|
@@ -58,29 +61,28 @@ Explore the [examples](examples) directory to see the SDK in action, and read ou
|
|
|
58
61
|
|
|
59
62
|
## Get started
|
|
60
63
|
|
|
61
|
-
|
|
64
|
+
To get started, set up your Python environment (Python 3.9 or newer required), and then install OpenAI Agents SDK package.
|
|
62
65
|
|
|
63
|
-
|
|
66
|
+
### venv
|
|
64
67
|
|
|
65
68
|
```bash
|
|
66
|
-
python -m venv
|
|
67
|
-
source
|
|
69
|
+
python -m venv .venv
|
|
70
|
+
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
|
71
|
+
pip install openai-agents
|
|
68
72
|
```
|
|
69
73
|
|
|
70
|
-
|
|
74
|
+
For voice support, install with the optional `voice` group: `pip install 'openai-agents[voice]'`.
|
|
71
75
|
|
|
72
|
-
|
|
73
|
-
uv venv
|
|
74
|
-
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
|
75
|
-
```
|
|
76
|
+
### uv
|
|
76
77
|
|
|
77
|
-
|
|
78
|
+
If you're familiar with [uv](https://docs.astral.sh/uv/), using the tool would be even similar:
|
|
78
79
|
|
|
79
80
|
```bash
|
|
80
|
-
|
|
81
|
+
uv init
|
|
82
|
+
uv add openai-agents
|
|
81
83
|
```
|
|
82
84
|
|
|
83
|
-
For voice support, install with the optional `voice` group: `
|
|
85
|
+
For voice support, install with the optional `voice` group: `uv add 'openai-agents[voice]'`.
|
|
84
86
|
|
|
85
87
|
## Hello world example
|
|
86
88
|
|
|
@@ -1,27 +1,27 @@
|
|
|
1
1
|
agents/__init__.py,sha256=YXcfllpLrUjafU_5KwIZvVEdUzcjZYhatqCS5tb03UQ,7908
|
|
2
2
|
agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
|
|
3
3
|
agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
|
|
4
|
-
agents/_run_impl.py,sha256=
|
|
5
|
-
agents/agent.py,sha256=
|
|
4
|
+
agents/_run_impl.py,sha256=bd3zWFgNlOye92SQSNrB1OZCvgOkabnup7SEYuayijE,45051
|
|
5
|
+
agents/agent.py,sha256=IINVHZyO5iFTN3rf94YB9Hv3hUIOouVUFt9cagSJwvQ,19120
|
|
6
6
|
agents/agent_output.py,sha256=teTFK8unUN3esXhmEBO0bQGYQm1Axd5rYleDt9TFDgw,7153
|
|
7
7
|
agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
|
|
8
8
|
agents/exceptions.py,sha256=NHMdHE0cZ6AdA6UgUylTzVHAX05Ol1CkO814a0FdZcs,2862
|
|
9
|
-
agents/function_schema.py,sha256=
|
|
9
|
+
agents/function_schema.py,sha256=jXdpjl90lODRzdoOR_kUmEbfA3T8Dfa7kkSV8xWQDDo,13558
|
|
10
10
|
agents/guardrail.py,sha256=7P-kd9rKPhgB8rtI31MCV5ho4ZrEaNCQxHvE8IK3EOk,9582
|
|
11
11
|
agents/handoffs.py,sha256=31-rQ-iMWlWNd93ivgTTSMGkqlariXrNfWI_udMWt7s,11409
|
|
12
|
-
agents/items.py,sha256=
|
|
13
|
-
agents/lifecycle.py,sha256=
|
|
12
|
+
agents/items.py,sha256=aHo7KTXZLBcHSrKHWDaBB6L7XmBCAIekG5e0xOIhkyM,9828
|
|
13
|
+
agents/lifecycle.py,sha256=hGsqzumOSaal6oAjTqTfvBXl-ShAOkC42sthJigB5Fg,4308
|
|
14
14
|
agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
|
|
15
|
-
agents/model_settings.py,sha256=
|
|
15
|
+
agents/model_settings.py,sha256=rqoIZe_sGm6_0hCCZlsVE29qln8yOmZr0dkpiV_cEpQ,6643
|
|
16
16
|
agents/prompts.py,sha256=Ss5y_7s2HFcRAOAKu4WTxQszs5ybI8TfbxgEYdnj9sg,2231
|
|
17
17
|
agents/py.typed,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
18
|
-
agents/repl.py,sha256=
|
|
18
|
+
agents/repl.py,sha256=NX0BE5YDnmGQ2rdQsmLm3CKkQZ5m4GC95xXmUsAXJVs,2539
|
|
19
19
|
agents/result.py,sha256=YCGYHoc5X1_vLKu5QiK6F8C1ZXI3tTfLXaZoqbYgUMA,10753
|
|
20
|
-
agents/run.py,sha256=
|
|
20
|
+
agents/run.py,sha256=Q8nu906IwmgIUpMbxCXnAGYeFDbw1KspSh9a74PJGGc,56994
|
|
21
21
|
agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
|
|
22
22
|
agents/stream_events.py,sha256=VFyTu-DT3ZMnHLtMbg-X_lxec0doQxNfx-hVxLB0BpI,1700
|
|
23
23
|
agents/strict_schema.py,sha256=_KuEJkglmq-Fj3HSeYP4WqTvqrxbSKu6gezfz5Brhh0,5775
|
|
24
|
-
agents/tool.py,sha256=
|
|
24
|
+
agents/tool.py,sha256=poPA6wvHMpcbDW5VwXCbVLDDz5-6-c5ahDxb8xXMync,16845
|
|
25
25
|
agents/tool_context.py,sha256=lbnctijZeanXAThddkklF7vDrXK1Ie2_wx6JZPCOihI,1434
|
|
26
26
|
agents/usage.py,sha256=Tb5udGd3DPgD0JBdRD8fDctTE4M-zKML5uRn8ZG1yBc,1675
|
|
27
27
|
agents/version.py,sha256=_1knUwzSK-HUeZTpRUkk6Z-CIcurqXuEplbV5TLJ08E,230
|
|
@@ -29,40 +29,43 @@ agents/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU
|
|
|
29
29
|
agents/extensions/handoff_filters.py,sha256=Bzkjb1SmIHoibgO26oesNO2Qdx2avfDGkHrSTb-XAr0,2029
|
|
30
30
|
agents/extensions/handoff_prompt.py,sha256=oGWN0uNh3Z1L7E-Ev2up8W084fFrDNOsLDy7P6bcmic,1006
|
|
31
31
|
agents/extensions/visualization.py,sha256=sf9D_C-HMwkbWdZccTZvvMPRy_NSiwbm48tRJlESQBI,5144
|
|
32
|
+
agents/extensions/memory/__init__.py,sha256=Yionp3G3pj53zenHPZUHhR9aIDVEpu0d_PcvdytBRes,534
|
|
33
|
+
agents/extensions/memory/sqlalchemy_session.py,sha256=EkzgCiagfWpjrFbzZCaJC50DUN3RLteT85YueNt6KY8,10711
|
|
32
34
|
agents/extensions/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
33
|
-
agents/extensions/models/litellm_model.py,sha256=
|
|
34
|
-
agents/extensions/models/litellm_provider.py,sha256=
|
|
35
|
+
agents/extensions/models/litellm_model.py,sha256=PF2xnWQRAaTVE38Q2TSFva17pz3McfUE_sZISeREHDw,15707
|
|
36
|
+
agents/extensions/models/litellm_provider.py,sha256=ZHgh1nMoEvA7NpawkzLh3JDuDFtwXUV94Rs7UrwWqAk,1083
|
|
35
37
|
agents/mcp/__init__.py,sha256=yHmmYlrmEHzUas1inRLKL2iPqbb_-107G3gKe_tyg4I,750
|
|
36
|
-
agents/mcp/server.py,sha256=
|
|
38
|
+
agents/mcp/server.py,sha256=4T58xiWCLiCm6JoUy_3jYWz5A8ZNsHiV1hIxjahoedU,26624
|
|
37
39
|
agents/mcp/util.py,sha256=YVdPst1wWkTwbeshs-FYbr_MtrYJwO_4NzhSwj5aE5c,8239
|
|
38
40
|
agents/memory/__init__.py,sha256=bo2Rb3PqwSCo9PhBVVJOjvjMM1TfytuDPAFEDADYwwA,84
|
|
39
41
|
agents/memory/session.py,sha256=9RQ1I7qGh_9DzsyUd9srSPrxRBlw7jks-67NxYqKvvs,13060
|
|
40
|
-
agents/models/__init__.py,sha256=
|
|
42
|
+
agents/models/__init__.py,sha256=E0XVqWayVAsFqxucDLBW30siaqfNQsVrAnfidG_C3ok,287
|
|
41
43
|
agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
|
|
42
|
-
agents/models/chatcmpl_converter.py,sha256=
|
|
44
|
+
agents/models/chatcmpl_converter.py,sha256=fZHui5V0KwTr27L_Io-4iQxPXr0ZoEMOv1_kJNxW-y8,20320
|
|
43
45
|
agents/models/chatcmpl_helpers.py,sha256=eIWySobaH7I0AQijAz5i-_rtsXrSvmEHD567s_8Zw1o,1318
|
|
44
46
|
agents/models/chatcmpl_stream_handler.py,sha256=XUoMnNEcSqK6IRMI6GPH8CwMCXi6NhbfHfpCY3SXJOM,24124
|
|
47
|
+
agents/models/default_models.py,sha256=mlvBePn8H4UkHo7lN-wh7A3k2ciLgBUFKpROQxzdTfs,2098
|
|
45
48
|
agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
|
|
46
49
|
agents/models/interface.py,sha256=TpY_GEk3LLMozCcYAEcC-Y_VRpI3pwE7A7ZM317mk7M,3839
|
|
47
50
|
agents/models/multi_provider.py,sha256=aiDbls5G4YomPfN6qH1pGlj41WS5jlDp2T82zm6qcnM,5578
|
|
48
51
|
agents/models/openai_chatcompletions.py,sha256=lJJZCdWiZ0jTUp77OD1Zs6tSLZ7k8v1j_D2gB2Nw12Y,13179
|
|
49
|
-
agents/models/openai_provider.py,sha256=
|
|
52
|
+
agents/models/openai_provider.py,sha256=vBu3mlgDBrI_cZVVmfnWBHoPlJlsmld3lfdX8sNQQAM,3624
|
|
50
53
|
agents/models/openai_responses.py,sha256=BnlN9hH6J4LKWBuM0lDfhvRgAb8IjQJuk5Hfd3OJ8G0,17330
|
|
51
54
|
agents/realtime/README.md,sha256=5YCYXH5ULmlWoWo1PE9TlbHjeYgjnp-xY8ZssSFY2Vk,126
|
|
52
55
|
agents/realtime/__init__.py,sha256=7qvzK8QJuHRnPHxDgDj21v8-lnSN4Uurg9znwJv_Tqg,4923
|
|
53
56
|
agents/realtime/_default_tracker.py,sha256=4OMxBvD1MnZmMn6JZYKL42uWhVzvK6NdDLDfPP54d78,1765
|
|
54
57
|
agents/realtime/_util.py,sha256=uawurhWKi3_twNFcZ5Yn1mVvv0RKl4IoyCSag8hGxrE,313
|
|
55
58
|
agents/realtime/agent.py,sha256=yZDgycnLFtJcfl7UHak5GEyL2vdBGxegfqEiuuzGPEk,4027
|
|
56
|
-
agents/realtime/config.py,sha256=
|
|
57
|
-
agents/realtime/events.py,sha256=
|
|
59
|
+
agents/realtime/config.py,sha256=49ZsKY9ySBFRfiL3RGWW1aVNhahzmoNATb3Buj2npJk,5963
|
|
60
|
+
agents/realtime/events.py,sha256=eANiNNyYlp_1Ybdl-MOwXRVTDtrK9hfgn6iw0xNxnaY,5889
|
|
58
61
|
agents/realtime/handoffs.py,sha256=avLFix5kEutel57IRcddssGiVHzGptOzWL9OqPaLVh8,6702
|
|
59
62
|
agents/realtime/items.py,sha256=psT6AH65qmngmPsgwk6CXacVo5tEDYq0Za3EitHFpTA,5052
|
|
60
63
|
agents/realtime/model.py,sha256=RJBA8-Dkd2JTqGzbKacoX4dN_qTWn_p7npL73To3ymw,6143
|
|
61
|
-
agents/realtime/model_events.py,sha256=
|
|
64
|
+
agents/realtime/model_events.py,sha256=YixBKmzlCrhtzCosj0SysyZpyHbZ90455gDr4Kr7Ey8,4338
|
|
62
65
|
agents/realtime/model_inputs.py,sha256=OW2bn3wD5_pXLunDUf35jhG2q_bTKbC_D7Qu-83aOEA,2243
|
|
63
|
-
agents/realtime/openai_realtime.py,sha256=
|
|
66
|
+
agents/realtime/openai_realtime.py,sha256=zwbyy3dkP4jmacQE-kVjFVbRWzWAHQEnf5VqQt7BZc0,30963
|
|
64
67
|
agents/realtime/runner.py,sha256=KfU7utmc9QFH2htIKN2IN9H-5EnB0qN9ezmvlRTnOm4,2511
|
|
65
|
-
agents/realtime/session.py,sha256=
|
|
68
|
+
agents/realtime/session.py,sha256=hPIxQSsVh5whkgYnEpxk_AgvG3suuDVnpPyqVoPJBRM,26822
|
|
66
69
|
agents/tracing/__init__.py,sha256=5HO_6na5S6EwICgwl50OMtxiIIosUrqalhvldlYvSVc,2991
|
|
67
70
|
agents/tracing/create.py,sha256=xpJ4ZRnGyUDPKoVVkA_8hmdhtwOKGhSkwRco2AQIhAo,18003
|
|
68
71
|
agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
|
|
@@ -97,7 +100,7 @@ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
|
|
|
97
100
|
agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
|
|
98
101
|
agents/voice/models/openai_stt.py,sha256=LcVDS7f1pmbm--PWX-IaV9uLg9uv5_L3vSCbVnTJeGs,16864
|
|
99
102
|
agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
|
|
100
|
-
openai_agents-0.2.
|
|
101
|
-
openai_agents-0.2.
|
|
102
|
-
openai_agents-0.2.
|
|
103
|
-
openai_agents-0.2.
|
|
103
|
+
openai_agents-0.2.9.dist-info/METADATA,sha256=oooDN4gwI_UfIxMfr9-uW4KPGpWhyazoNStz43iBD3Y,12379
|
|
104
|
+
openai_agents-0.2.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
105
|
+
openai_agents-0.2.9.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
|
|
106
|
+
openai_agents-0.2.9.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|