openai-agents 0.0.3__py3-none-any.whl → 0.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +10 -4
- agents/_config.py +6 -3
- agents/_run_impl.py +1 -1
- agents/guardrail.py +1 -1
- agents/model_settings.py +20 -0
- agents/models/openai_chatcompletions.py +27 -1
- agents/models/openai_provider.py +24 -11
- agents/models/openai_responses.py +4 -2
- agents/result.py +0 -2
- agents/tool.py +0 -2
- agents/tracing/processors.py +1 -4
- {openai_agents-0.0.3.dist-info → openai_agents-0.0.4.dist-info}/METADATA +7 -5
- {openai_agents-0.0.3.dist-info → openai_agents-0.0.4.dist-info}/RECORD +15 -15
- {openai_agents-0.0.3.dist-info → openai_agents-0.0.4.dist-info}/WHEEL +0 -0
- {openai_agents-0.0.3.dist-info → openai_agents-0.0.4.dist-info}/licenses/LICENSE +0 -0
agents/__init__.py
CHANGED
|
@@ -92,13 +92,19 @@ from .tracing import (
|
|
|
92
92
|
from .usage import Usage
|
|
93
93
|
|
|
94
94
|
|
|
95
|
-
def set_default_openai_key(key: str) -> None:
|
|
96
|
-
"""Set the default OpenAI API key to use for LLM requests and tracing. This is
|
|
97
|
-
the OPENAI_API_KEY environment variable is not already set.
|
|
95
|
+
def set_default_openai_key(key: str, use_for_tracing: bool = True) -> None:
|
|
96
|
+
"""Set the default OpenAI API key to use for LLM requests (and optionally tracing(). This is
|
|
97
|
+
only necessary if the OPENAI_API_KEY environment variable is not already set.
|
|
98
98
|
|
|
99
99
|
If provided, this key will be used instead of the OPENAI_API_KEY environment variable.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
key: The OpenAI key to use.
|
|
103
|
+
use_for_tracing: Whether to also use this key to send traces to OpenAI. Defaults to True
|
|
104
|
+
If False, you'll either need to set the OPENAI_API_KEY environment variable or call
|
|
105
|
+
set_tracing_export_api_key() with the API key you want to use for tracing.
|
|
100
106
|
"""
|
|
101
|
-
_config.set_default_openai_key(key)
|
|
107
|
+
_config.set_default_openai_key(key, use_for_tracing)
|
|
102
108
|
|
|
103
109
|
|
|
104
110
|
def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool = True) -> None:
|
agents/_config.py
CHANGED
|
@@ -5,15 +5,18 @@ from .models import _openai_shared
|
|
|
5
5
|
from .tracing import set_tracing_export_api_key
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
def set_default_openai_key(key: str) -> None:
|
|
9
|
-
set_tracing_export_api_key(key)
|
|
8
|
+
def set_default_openai_key(key: str, use_for_tracing: bool) -> None:
|
|
10
9
|
_openai_shared.set_default_openai_key(key)
|
|
11
10
|
|
|
11
|
+
if use_for_tracing:
|
|
12
|
+
set_tracing_export_api_key(key)
|
|
13
|
+
|
|
12
14
|
|
|
13
15
|
def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool) -> None:
|
|
16
|
+
_openai_shared.set_default_openai_client(client)
|
|
17
|
+
|
|
14
18
|
if use_for_tracing:
|
|
15
19
|
set_tracing_export_api_key(client.api_key)
|
|
16
|
-
_openai_shared.set_default_openai_client(client)
|
|
17
20
|
|
|
18
21
|
|
|
19
22
|
def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> None:
|
agents/_run_impl.py
CHANGED
|
@@ -167,7 +167,7 @@ class RunImpl:
|
|
|
167
167
|
agent: Agent[TContext],
|
|
168
168
|
# The original input to the Runner
|
|
169
169
|
original_input: str | list[TResponseInputItem],
|
|
170
|
-
#
|
|
170
|
+
# Everything generated by Runner since the original input, but before the current step
|
|
171
171
|
pre_step_items: list[RunItem],
|
|
172
172
|
new_response: ModelResponse,
|
|
173
173
|
processed_response: ProcessedResponse,
|
agents/guardrail.py
CHANGED
|
@@ -86,7 +86,7 @@ class InputGuardrail(Generic[TContext]):
|
|
|
86
86
|
[RunContextWrapper[TContext], Agent[Any], str | list[TResponseInputItem]],
|
|
87
87
|
MaybeAwaitable[GuardrailFunctionOutput],
|
|
88
88
|
]
|
|
89
|
-
"""A function that receives the
|
|
89
|
+
"""A function that receives the agent input and the context, and returns a
|
|
90
90
|
`GuardrailResult`. The result marks whether the tripwire was triggered, and can optionally
|
|
91
91
|
include information about the guardrail's output.
|
|
92
92
|
"""
|
agents/model_settings.py
CHANGED
|
@@ -10,15 +10,34 @@ class ModelSettings:
|
|
|
10
10
|
|
|
11
11
|
This class holds optional model configuration parameters (e.g. temperature,
|
|
12
12
|
top_p, penalties, truncation, etc.).
|
|
13
|
+
|
|
14
|
+
Not all models/providers support all of these parameters, so please check the API documentation
|
|
15
|
+
for the specific model and provider you are using.
|
|
13
16
|
"""
|
|
14
17
|
|
|
15
18
|
temperature: float | None = None
|
|
19
|
+
"""The temperature to use when calling the model."""
|
|
20
|
+
|
|
16
21
|
top_p: float | None = None
|
|
22
|
+
"""The top_p to use when calling the model."""
|
|
23
|
+
|
|
17
24
|
frequency_penalty: float | None = None
|
|
25
|
+
"""The frequency penalty to use when calling the model."""
|
|
26
|
+
|
|
18
27
|
presence_penalty: float | None = None
|
|
28
|
+
"""The presence penalty to use when calling the model."""
|
|
29
|
+
|
|
19
30
|
tool_choice: Literal["auto", "required", "none"] | str | None = None
|
|
31
|
+
"""The tool choice to use when calling the model."""
|
|
32
|
+
|
|
20
33
|
parallel_tool_calls: bool | None = False
|
|
34
|
+
"""Whether to use parallel tool calls when calling the model."""
|
|
35
|
+
|
|
21
36
|
truncation: Literal["auto", "disabled"] | None = None
|
|
37
|
+
"""The truncation strategy to use when calling the model."""
|
|
38
|
+
|
|
39
|
+
max_tokens: int | None = None
|
|
40
|
+
"""The maximum number of output tokens to generate."""
|
|
22
41
|
|
|
23
42
|
def resolve(self, override: ModelSettings | None) -> ModelSettings:
|
|
24
43
|
"""Produce a new ModelSettings by overlaying any non-None values from the
|
|
@@ -33,4 +52,5 @@ class ModelSettings:
|
|
|
33
52
|
tool_choice=override.tool_choice or self.tool_choice,
|
|
34
53
|
parallel_tool_calls=override.parallel_tool_calls or self.parallel_tool_calls,
|
|
35
54
|
truncation=override.truncation or self.truncation,
|
|
55
|
+
max_tokens=override.max_tokens or self.max_tokens,
|
|
36
56
|
)
|
|
@@ -51,8 +51,10 @@ from openai.types.responses import (
|
|
|
51
51
|
ResponseOutputText,
|
|
52
52
|
ResponseRefusalDeltaEvent,
|
|
53
53
|
ResponseTextDeltaEvent,
|
|
54
|
+
ResponseUsage,
|
|
54
55
|
)
|
|
55
56
|
from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
|
|
57
|
+
from openai.types.responses.response_usage import OutputTokensDetails
|
|
56
58
|
|
|
57
59
|
from .. import _debug
|
|
58
60
|
from ..agent_output import AgentOutputSchema
|
|
@@ -405,7 +407,23 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
405
407
|
for function_call in state.function_calls.values():
|
|
406
408
|
outputs.append(function_call)
|
|
407
409
|
|
|
408
|
-
final_response = response.model_copy(
|
|
410
|
+
final_response = response.model_copy()
|
|
411
|
+
final_response.output = outputs
|
|
412
|
+
final_response.usage = (
|
|
413
|
+
ResponseUsage(
|
|
414
|
+
input_tokens=usage.prompt_tokens,
|
|
415
|
+
output_tokens=usage.completion_tokens,
|
|
416
|
+
total_tokens=usage.total_tokens,
|
|
417
|
+
output_tokens_details=OutputTokensDetails(
|
|
418
|
+
reasoning_tokens=usage.completion_tokens_details.reasoning_tokens
|
|
419
|
+
if usage.completion_tokens_details
|
|
420
|
+
and usage.completion_tokens_details.reasoning_tokens
|
|
421
|
+
else 0
|
|
422
|
+
),
|
|
423
|
+
)
|
|
424
|
+
if usage
|
|
425
|
+
else None
|
|
426
|
+
)
|
|
409
427
|
|
|
410
428
|
yield ResponseCompletedEvent(
|
|
411
429
|
response=final_response,
|
|
@@ -503,6 +521,7 @@ class OpenAIChatCompletionsModel(Model):
|
|
|
503
521
|
top_p=self._non_null_or_not_given(model_settings.top_p),
|
|
504
522
|
frequency_penalty=self._non_null_or_not_given(model_settings.frequency_penalty),
|
|
505
523
|
presence_penalty=self._non_null_or_not_given(model_settings.presence_penalty),
|
|
524
|
+
max_tokens=self._non_null_or_not_given(model_settings.max_tokens),
|
|
506
525
|
tool_choice=tool_choice,
|
|
507
526
|
response_format=response_format,
|
|
508
527
|
parallel_tool_calls=parallel_tool_calls,
|
|
@@ -808,6 +827,13 @@ class _Converter:
|
|
|
808
827
|
"content": cls.extract_text_content(content),
|
|
809
828
|
}
|
|
810
829
|
result.append(msg_developer)
|
|
830
|
+
elif role == "assistant":
|
|
831
|
+
flush_assistant_message()
|
|
832
|
+
msg_assistant: ChatCompletionAssistantMessageParam = {
|
|
833
|
+
"role": "assistant",
|
|
834
|
+
"content": cls.extract_text_content(content),
|
|
835
|
+
}
|
|
836
|
+
result.append(msg_assistant)
|
|
811
837
|
else:
|
|
812
838
|
raise UserError(f"Unexpected role in easy_input_message: {role}")
|
|
813
839
|
|
agents/models/openai_provider.py
CHANGED
|
@@ -38,28 +38,41 @@ class OpenAIProvider(ModelProvider):
|
|
|
38
38
|
assert api_key is None and base_url is None, (
|
|
39
39
|
"Don't provide api_key or base_url if you provide openai_client"
|
|
40
40
|
)
|
|
41
|
-
self._client = openai_client
|
|
41
|
+
self._client: AsyncOpenAI | None = openai_client
|
|
42
42
|
else:
|
|
43
|
-
self._client =
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
http_client=shared_http_client(),
|
|
49
|
-
)
|
|
43
|
+
self._client = None
|
|
44
|
+
self._stored_api_key = api_key
|
|
45
|
+
self._stored_base_url = base_url
|
|
46
|
+
self._stored_organization = organization
|
|
47
|
+
self._stored_project = project
|
|
50
48
|
|
|
51
|
-
self._is_openai_model = self._client.base_url.host.startswith("api.openai.com")
|
|
52
49
|
if use_responses is not None:
|
|
53
50
|
self._use_responses = use_responses
|
|
54
51
|
else:
|
|
55
52
|
self._use_responses = _openai_shared.get_use_responses_by_default()
|
|
56
53
|
|
|
54
|
+
# We lazy load the client in case you never actually use OpenAIProvider(). Otherwise
|
|
55
|
+
# AsyncOpenAI() raises an error if you don't have an API key set.
|
|
56
|
+
def _get_client(self) -> AsyncOpenAI:
|
|
57
|
+
if self._client is None:
|
|
58
|
+
self._client = _openai_shared.get_default_openai_client() or AsyncOpenAI(
|
|
59
|
+
api_key=self._stored_api_key or _openai_shared.get_default_openai_key(),
|
|
60
|
+
base_url=self._stored_base_url,
|
|
61
|
+
organization=self._stored_organization,
|
|
62
|
+
project=self._stored_project,
|
|
63
|
+
http_client=shared_http_client(),
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
return self._client
|
|
67
|
+
|
|
57
68
|
def get_model(self, model_name: str | None) -> Model:
|
|
58
69
|
if model_name is None:
|
|
59
70
|
model_name = DEFAULT_MODEL
|
|
60
71
|
|
|
72
|
+
client = self._get_client()
|
|
73
|
+
|
|
61
74
|
return (
|
|
62
|
-
OpenAIResponsesModel(model=model_name, openai_client=
|
|
75
|
+
OpenAIResponsesModel(model=model_name, openai_client=client)
|
|
63
76
|
if self._use_responses
|
|
64
|
-
else OpenAIChatCompletionsModel(model=model_name, openai_client=
|
|
77
|
+
else OpenAIChatCompletionsModel(model=model_name, openai_client=client)
|
|
65
78
|
)
|
|
@@ -5,7 +5,7 @@ from collections.abc import AsyncIterator
|
|
|
5
5
|
from dataclasses import dataclass
|
|
6
6
|
from typing import TYPE_CHECKING, Any, Literal, overload
|
|
7
7
|
|
|
8
|
-
from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream, NotGiven
|
|
8
|
+
from openai import NOT_GIVEN, APIStatusError, AsyncOpenAI, AsyncStream, NotGiven
|
|
9
9
|
from openai.types import ChatModel
|
|
10
10
|
from openai.types.responses import (
|
|
11
11
|
Response,
|
|
@@ -113,7 +113,8 @@ class OpenAIResponsesModel(Model):
|
|
|
113
113
|
},
|
|
114
114
|
)
|
|
115
115
|
)
|
|
116
|
-
|
|
116
|
+
request_id = e.request_id if isinstance(e, APIStatusError) else None
|
|
117
|
+
logger.error(f"Error getting response: {e}. (request_id: {request_id})")
|
|
117
118
|
raise
|
|
118
119
|
|
|
119
120
|
return ModelResponse(
|
|
@@ -235,6 +236,7 @@ class OpenAIResponsesModel(Model):
|
|
|
235
236
|
temperature=self._non_null_or_not_given(model_settings.temperature),
|
|
236
237
|
top_p=self._non_null_or_not_given(model_settings.top_p),
|
|
237
238
|
truncation=self._non_null_or_not_given(model_settings.truncation),
|
|
239
|
+
max_output_tokens=self._non_null_or_not_given(model_settings.max_tokens),
|
|
238
240
|
tool_choice=tool_choice,
|
|
239
241
|
parallel_tool_calls=parallel_tool_calls,
|
|
240
242
|
stream=stream,
|
agents/result.py
CHANGED
agents/tool.py
CHANGED
agents/tracing/processors.py
CHANGED
|
@@ -78,9 +78,6 @@ class BackendSpanExporter(TracingExporter):
|
|
|
78
78
|
logger.warning("OPENAI_API_KEY is not set, skipping trace export")
|
|
79
79
|
return
|
|
80
80
|
|
|
81
|
-
traces: list[dict[str, Any]] = []
|
|
82
|
-
spans: list[dict[str, Any]] = []
|
|
83
|
-
|
|
84
81
|
data = [item.export() for item in items if item.export()]
|
|
85
82
|
payload = {"data": data}
|
|
86
83
|
|
|
@@ -100,7 +97,7 @@ class BackendSpanExporter(TracingExporter):
|
|
|
100
97
|
|
|
101
98
|
# If the response is successful, break out of the loop
|
|
102
99
|
if response.status_code < 300:
|
|
103
|
-
logger.debug(f"Exported {len(
|
|
100
|
+
logger.debug(f"Exported {len(items)} items")
|
|
104
101
|
return
|
|
105
102
|
|
|
106
103
|
# If the response is a client error (4xx), we wont retry
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: openai-agents
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.4
|
|
4
4
|
Summary: OpenAI Agents SDK
|
|
5
5
|
Project-URL: Homepage, https://github.com/openai/openai-agents-python
|
|
6
6
|
Project-URL: Repository, https://github.com/openai/openai-agents-python
|
|
@@ -75,9 +75,11 @@ print(result.final_output)
|
|
|
75
75
|
|
|
76
76
|
(_If running this, ensure you set the `OPENAI_API_KEY` environment variable_)
|
|
77
77
|
|
|
78
|
+
(_For Jupyter notebook users, see [hello_world_jupyter.py](examples/basic/hello_world_jupyter.py)_)
|
|
79
|
+
|
|
78
80
|
## Handoffs example
|
|
79
81
|
|
|
80
|
-
```
|
|
82
|
+
```python
|
|
81
83
|
from agents import Agent, Runner
|
|
82
84
|
import asyncio
|
|
83
85
|
|
|
@@ -144,9 +146,9 @@ When you call `Runner.run()`, we run a loop until we get a final output.
|
|
|
144
146
|
|
|
145
147
|
1. We call the LLM, using the model and settings on the agent, and the message history.
|
|
146
148
|
2. The LLM returns a response, which may include tool calls.
|
|
147
|
-
3. If the response has a final output (see below for
|
|
149
|
+
3. If the response has a final output (see below for more on this), we return it and end the loop.
|
|
148
150
|
4. If the response has a handoff, we set the agent to the new agent and go back to step 1.
|
|
149
|
-
5. We process the tool calls (if any) and append the tool responses
|
|
151
|
+
5. We process the tool calls (if any) and append the tool responses messages. Then we go to step 1.
|
|
150
152
|
|
|
151
153
|
There is a `max_turns` parameter that you can use to limit the number of times the loop executes.
|
|
152
154
|
|
|
@@ -168,7 +170,7 @@ The Agents SDK is designed to be highly flexible, allowing you to model a wide r
|
|
|
168
170
|
|
|
169
171
|
## Tracing
|
|
170
172
|
|
|
171
|
-
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk),
|
|
173
|
+
The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk), [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk), [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration), and [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent). For more details about how to customize or disable tracing, see [Tracing](http://openai.github.io/openai-agents-python/tracing).
|
|
172
174
|
|
|
173
175
|
## Development (only needed if you need to edit the SDK/examples)
|
|
174
176
|
|
|
@@ -1,25 +1,25 @@
|
|
|
1
|
-
agents/__init__.py,sha256=
|
|
2
|
-
agents/_config.py,sha256=
|
|
1
|
+
agents/__init__.py,sha256=NGc7r2Su7RM8c1Ym3gl_LWDFMiIiL_bY-YUgFDDugYo,6267
|
|
2
|
+
agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
|
|
3
3
|
agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
|
|
4
|
-
agents/_run_impl.py,sha256=
|
|
4
|
+
agents/_run_impl.py,sha256=jMlWtHi7blDC8bJTpzQ1-Xi9wcPBiGUSyfItgw-L1io,28550
|
|
5
5
|
agents/_utils.py,sha256=L21Hdl20U66Asp-W61yTnahmo8b6X58jsgdUBWb9_Rk,1685
|
|
6
6
|
agents/agent.py,sha256=Y0lnIva9qL_WJVUVxDQtSrMa0KuM5IXLWK0q6CzIxas,6297
|
|
7
7
|
agents/agent_output.py,sha256=k271F9MgMaoS1nPtsSwsURP8mNxv8VrEOWrv7PJSQT4,5345
|
|
8
8
|
agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
|
|
9
9
|
agents/exceptions.py,sha256=F3AltRt27PGdhbFqKBhRJL9eHqoN4SQx7oxBn0GWmhs,1856
|
|
10
10
|
agents/function_schema.py,sha256=OgeuiDhLowhYt6T9CU-7Fk05uKIxPaDPgL2hdnMFjpQ,12666
|
|
11
|
-
agents/guardrail.py,sha256=
|
|
11
|
+
agents/guardrail.py,sha256=3y4oGa-dPp75nsS15zZdJ-GBT34jDu5c8gMeFHC4SME,9286
|
|
12
12
|
agents/handoffs.py,sha256=onlvwSCTNJKof2Ftk-qZ5-zxTNT9AimjvyOcxj4Rp38,8999
|
|
13
13
|
agents/items.py,sha256=DQPAJQkAVRR9Js-RVDtp4eizxiVaL30bbB0W-8U7GuQ,8069
|
|
14
14
|
agents/lifecycle.py,sha256=wYFG6PLSKQ7bICKVbB8oGtdoJNINGq9obh2RSKlAkDE,2938
|
|
15
15
|
agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
|
|
16
|
-
agents/model_settings.py,sha256=
|
|
17
|
-
agents/result.py,sha256=
|
|
16
|
+
agents/model_settings.py,sha256=4JOqsLswjdrEszNqNEJ_dYjxUMCyt68hOIdgxlXELw0,2169
|
|
17
|
+
agents/result.py,sha256=k8B5Q9Vf-H6IzGaEHqJyMNoairUcF4yCfnePS8Qanzo,8176
|
|
18
18
|
agents/run.py,sha256=GLPPfHH7MswO_5oW27y7RsZVY5rbkvyCBxG4kbN5y-Q,37064
|
|
19
19
|
agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
|
|
20
20
|
agents/stream_events.py,sha256=ULgBEcL_H4vklZoxhpY2yomeoxVF0UiXvswsFsjFv4s,1547
|
|
21
21
|
agents/strict_schema.py,sha256=FEyEvF3ZjxIHRLmraBGZyjJjuFiPCZGaCFV22LlwaTQ,5783
|
|
22
|
-
agents/tool.py,sha256=
|
|
22
|
+
agents/tool.py,sha256=I6MD3H3wB9ka9FUOg6hlx9Swe9fceSnbH2BPMmNYXl0,10629
|
|
23
23
|
agents/usage.py,sha256=-MZOmSDVdWxA2V_yVVnmUcwVcLdvYFccv0HXZ7Ow3_A,733
|
|
24
24
|
agents/version.py,sha256=bkeg2DaYBS8OnV7R7J6OuF5pNA__0mJ4QZsJjC1DTI0,223
|
|
25
25
|
agents/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
@@ -29,21 +29,21 @@ agents/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
|
29
29
|
agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
|
|
30
30
|
agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
|
|
31
31
|
agents/models/interface.py,sha256=dgIlKyPaCbNRTHXxd6x7OQwJuAelG3F-C19P-aacHWQ,3129
|
|
32
|
-
agents/models/openai_chatcompletions.py,sha256=
|
|
33
|
-
agents/models/openai_provider.py,sha256=
|
|
34
|
-
agents/models/openai_responses.py,sha256=
|
|
32
|
+
agents/models/openai_chatcompletions.py,sha256=e7iA9mxflbVKNCbt11gxCXKHRjMS1JXd0vpzjlOQOI8,39059
|
|
33
|
+
agents/models/openai_provider.py,sha256=3zKt8stSm0IcDJzX8GqXa3UcECKK79A290Zzem1nlUo,2784
|
|
34
|
+
agents/models/openai_responses.py,sha256=4CowZT0wAMflEzDgi6hEidcMq_0zchIm2uX_vV090TM,13386
|
|
35
35
|
agents/tracing/__init__.py,sha256=pp2_mBCQGL9oN6_czCWHQsV4ZTEOcy1AVxdjQ41PNr0,2424
|
|
36
36
|
agents/tracing/create.py,sha256=PAhfJKAeJ8jbZvxylTiikU_LqAhezYHphR4jG5EdaAE,12110
|
|
37
37
|
agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
|
|
38
38
|
agents/tracing/processor_interface.py,sha256=wNyZCwNJko5CrUIWD_lMou5ppQ67CFYwvWRsJRM3up8,1659
|
|
39
|
-
agents/tracing/processors.py,sha256=
|
|
39
|
+
agents/tracing/processors.py,sha256=74BB0w3XQjerlYN6kgRiqtV4VPAvZSMTPByutcX464c,9600
|
|
40
40
|
agents/tracing/scope.py,sha256=x1m-aYilS1DeeV4L7Ckv55LVWod7c_nnTKoCGhJCumk,1372
|
|
41
41
|
agents/tracing/setup.py,sha256=P5JaIcHej6m62rb27bSutN2Bqv0XSD9Z_Ki7ynCVdbs,6728
|
|
42
42
|
agents/tracing/span_data.py,sha256=UQUPpMQ7Z1XOqKFJNHUxAJUVPwa6JMfGa7dm_NovuhQ,4574
|
|
43
43
|
agents/tracing/spans.py,sha256=KWCqcRwUlt85NCZPQp98UIF5vAQAVWuVWQh3tgPK0WE,6605
|
|
44
44
|
agents/tracing/traces.py,sha256=GL9EoEQKVk7eo0BcfRfQ6C7tdzlmPhkneQn4fdsCdqA,4774
|
|
45
45
|
agents/tracing/util.py,sha256=BsDvn2rjE4SRQvfm55utljT8agdA0Z36KWXd1vdx4hs,392
|
|
46
|
-
openai_agents-0.0.
|
|
47
|
-
openai_agents-0.0.
|
|
48
|
-
openai_agents-0.0.
|
|
49
|
-
openai_agents-0.0.
|
|
46
|
+
openai_agents-0.0.4.dist-info/METADATA,sha256=8a-UqdtxRJCgwuT6jsfJ1MwDwwYWS-NnbcJB52QpZP4,7582
|
|
47
|
+
openai_agents-0.0.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
48
|
+
openai_agents-0.0.4.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
|
|
49
|
+
openai_agents-0.0.4.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|