openai-agents 0.0.2__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

agents/__init__.py CHANGED
@@ -92,13 +92,19 @@ from .tracing import (
92
92
  from .usage import Usage
93
93
 
94
94
 
95
- def set_default_openai_key(key: str) -> None:
96
- """Set the default OpenAI API key to use for LLM requests and tracing. This is only necessary if
97
- the OPENAI_API_KEY environment variable is not already set.
95
+ def set_default_openai_key(key: str, use_for_tracing: bool = True) -> None:
96
+ """Set the default OpenAI API key to use for LLM requests (and optionally tracing(). This is
97
+ only necessary if the OPENAI_API_KEY environment variable is not already set.
98
98
 
99
99
  If provided, this key will be used instead of the OPENAI_API_KEY environment variable.
100
+
101
+ Args:
102
+ key: The OpenAI key to use.
103
+ use_for_tracing: Whether to also use this key to send traces to OpenAI. Defaults to True
104
+ If False, you'll either need to set the OPENAI_API_KEY environment variable or call
105
+ set_tracing_export_api_key() with the API key you want to use for tracing.
100
106
  """
101
- _config.set_default_openai_key(key)
107
+ _config.set_default_openai_key(key, use_for_tracing)
102
108
 
103
109
 
104
110
  def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool = True) -> None:
agents/_config.py CHANGED
@@ -5,15 +5,18 @@ from .models import _openai_shared
5
5
  from .tracing import set_tracing_export_api_key
6
6
 
7
7
 
8
- def set_default_openai_key(key: str) -> None:
9
- set_tracing_export_api_key(key)
8
+ def set_default_openai_key(key: str, use_for_tracing: bool) -> None:
10
9
  _openai_shared.set_default_openai_key(key)
11
10
 
11
+ if use_for_tracing:
12
+ set_tracing_export_api_key(key)
13
+
12
14
 
13
15
  def set_default_openai_client(client: AsyncOpenAI, use_for_tracing: bool) -> None:
16
+ _openai_shared.set_default_openai_client(client)
17
+
14
18
  if use_for_tracing:
15
19
  set_tracing_export_api_key(client.api_key)
16
- _openai_shared.set_default_openai_client(client)
17
20
 
18
21
 
19
22
  def set_default_openai_api(api: Literal["chat_completions", "responses"]) -> None:
agents/_run_impl.py CHANGED
@@ -23,7 +23,7 @@ from openai.types.responses.response_computer_tool_call import (
23
23
  ActionWait,
24
24
  )
25
25
  from openai.types.responses.response_input_param import ComputerCallOutput
26
- from openai.types.responses.response_output_item import Reasoning
26
+ from openai.types.responses.response_reasoning_item import ResponseReasoningItem
27
27
 
28
28
  from . import _utils
29
29
  from .agent import Agent
@@ -167,7 +167,7 @@ class RunImpl:
167
167
  agent: Agent[TContext],
168
168
  # The original input to the Runner
169
169
  original_input: str | list[TResponseInputItem],
170
- # Eveything generated by Runner since the original input, but before the current step
170
+ # Everything generated by Runner since the original input, but before the current step
171
171
  pre_step_items: list[RunItem],
172
172
  new_response: ModelResponse,
173
173
  processed_response: ProcessedResponse,
@@ -288,7 +288,7 @@ class RunImpl:
288
288
  items.append(ToolCallItem(raw_item=output, agent=agent))
289
289
  elif isinstance(output, ResponseFunctionWebSearch):
290
290
  items.append(ToolCallItem(raw_item=output, agent=agent))
291
- elif isinstance(output, Reasoning):
291
+ elif isinstance(output, ResponseReasoningItem):
292
292
  items.append(ReasoningItem(raw_item=output, agent=agent))
293
293
  elif isinstance(output, ResponseComputerToolCall):
294
294
  items.append(ToolCallItem(raw_item=output, agent=agent))
agents/agent_output.py CHANGED
@@ -138,7 +138,7 @@ def _type_to_str(t: type[Any]) -> str:
138
138
  # It's a simple type like `str`, `int`, etc.
139
139
  return t.__name__
140
140
  elif args:
141
- args_str = ', '.join(_type_to_str(arg) for arg in args)
141
+ args_str = ", ".join(_type_to_str(arg) for arg in args)
142
142
  return f"{origin.__name__}[{args_str}]"
143
143
  else:
144
144
  return str(t)
agents/guardrail.py CHANGED
@@ -86,7 +86,7 @@ class InputGuardrail(Generic[TContext]):
86
86
  [RunContextWrapper[TContext], Agent[Any], str | list[TResponseInputItem]],
87
87
  MaybeAwaitable[GuardrailFunctionOutput],
88
88
  ]
89
- """A function that receives the the agent input and the context, and returns a
89
+ """A function that receives the agent input and the context, and returns a
90
90
  `GuardrailResult`. The result marks whether the tripwire was triggered, and can optionally
91
91
  include information about the guardrail's output.
92
92
  """
agents/items.py CHANGED
@@ -19,7 +19,7 @@ from openai.types.responses import (
19
19
  ResponseStreamEvent,
20
20
  )
21
21
  from openai.types.responses.response_input_item_param import ComputerCallOutput, FunctionCallOutput
22
- from openai.types.responses.response_output_item import Reasoning
22
+ from openai.types.responses.response_reasoning_item import ResponseReasoningItem
23
23
  from pydantic import BaseModel
24
24
  from typing_extensions import TypeAlias
25
25
 
@@ -136,10 +136,10 @@ class ToolCallOutputItem(RunItemBase[Union[FunctionCallOutput, ComputerCallOutpu
136
136
 
137
137
 
138
138
  @dataclass
139
- class ReasoningItem(RunItemBase[Reasoning]):
139
+ class ReasoningItem(RunItemBase[ResponseReasoningItem]):
140
140
  """Represents a reasoning item."""
141
141
 
142
- raw_item: Reasoning
142
+ raw_item: ResponseReasoningItem
143
143
  """The raw reasoning item."""
144
144
 
145
145
  type: Literal["reasoning_item"] = "reasoning_item"
agents/model_settings.py CHANGED
@@ -10,14 +10,34 @@ class ModelSettings:
10
10
 
11
11
  This class holds optional model configuration parameters (e.g. temperature,
12
12
  top_p, penalties, truncation, etc.).
13
+
14
+ Not all models/providers support all of these parameters, so please check the API documentation
15
+ for the specific model and provider you are using.
13
16
  """
17
+
14
18
  temperature: float | None = None
19
+ """The temperature to use when calling the model."""
20
+
15
21
  top_p: float | None = None
22
+ """The top_p to use when calling the model."""
23
+
16
24
  frequency_penalty: float | None = None
25
+ """The frequency penalty to use when calling the model."""
26
+
17
27
  presence_penalty: float | None = None
28
+ """The presence penalty to use when calling the model."""
29
+
18
30
  tool_choice: Literal["auto", "required", "none"] | str | None = None
31
+ """The tool choice to use when calling the model."""
32
+
19
33
  parallel_tool_calls: bool | None = False
34
+ """Whether to use parallel tool calls when calling the model."""
35
+
20
36
  truncation: Literal["auto", "disabled"] | None = None
37
+ """The truncation strategy to use when calling the model."""
38
+
39
+ max_tokens: int | None = None
40
+ """The maximum number of output tokens to generate."""
21
41
 
22
42
  def resolve(self, override: ModelSettings | None) -> ModelSettings:
23
43
  """Produce a new ModelSettings by overlaying any non-None values from the
@@ -32,4 +52,5 @@ class ModelSettings:
32
52
  tool_choice=override.tool_choice or self.tool_choice,
33
53
  parallel_tool_calls=override.parallel_tool_calls or self.parallel_tool_calls,
34
54
  truncation=override.truncation or self.truncation,
55
+ max_tokens=override.max_tokens or self.max_tokens,
35
56
  )
@@ -51,8 +51,10 @@ from openai.types.responses import (
51
51
  ResponseOutputText,
52
52
  ResponseRefusalDeltaEvent,
53
53
  ResponseTextDeltaEvent,
54
+ ResponseUsage,
54
55
  )
55
56
  from openai.types.responses.response_input_param import FunctionCallOutput, ItemReference, Message
57
+ from openai.types.responses.response_usage import OutputTokensDetails
56
58
 
57
59
  from .. import _debug
58
60
  from ..agent_output import AgentOutputSchema
@@ -405,7 +407,23 @@ class OpenAIChatCompletionsModel(Model):
405
407
  for function_call in state.function_calls.values():
406
408
  outputs.append(function_call)
407
409
 
408
- final_response = response.model_copy(update={"output": outputs, "usage": usage})
410
+ final_response = response.model_copy()
411
+ final_response.output = outputs
412
+ final_response.usage = (
413
+ ResponseUsage(
414
+ input_tokens=usage.prompt_tokens,
415
+ output_tokens=usage.completion_tokens,
416
+ total_tokens=usage.total_tokens,
417
+ output_tokens_details=OutputTokensDetails(
418
+ reasoning_tokens=usage.completion_tokens_details.reasoning_tokens
419
+ if usage.completion_tokens_details
420
+ and usage.completion_tokens_details.reasoning_tokens
421
+ else 0
422
+ ),
423
+ )
424
+ if usage
425
+ else None
426
+ )
409
427
 
410
428
  yield ResponseCompletedEvent(
411
429
  response=final_response,
@@ -503,6 +521,7 @@ class OpenAIChatCompletionsModel(Model):
503
521
  top_p=self._non_null_or_not_given(model_settings.top_p),
504
522
  frequency_penalty=self._non_null_or_not_given(model_settings.frequency_penalty),
505
523
  presence_penalty=self._non_null_or_not_given(model_settings.presence_penalty),
524
+ max_tokens=self._non_null_or_not_given(model_settings.max_tokens),
506
525
  tool_choice=tool_choice,
507
526
  response_format=response_format,
508
527
  parallel_tool_calls=parallel_tool_calls,
@@ -808,6 +827,13 @@ class _Converter:
808
827
  "content": cls.extract_text_content(content),
809
828
  }
810
829
  result.append(msg_developer)
830
+ elif role == "assistant":
831
+ flush_assistant_message()
832
+ msg_assistant: ChatCompletionAssistantMessageParam = {
833
+ "role": "assistant",
834
+ "content": cls.extract_text_content(content),
835
+ }
836
+ result.append(msg_assistant)
811
837
  else:
812
838
  raise UserError(f"Unexpected role in easy_input_message: {role}")
813
839
 
@@ -38,28 +38,41 @@ class OpenAIProvider(ModelProvider):
38
38
  assert api_key is None and base_url is None, (
39
39
  "Don't provide api_key or base_url if you provide openai_client"
40
40
  )
41
- self._client = openai_client
41
+ self._client: AsyncOpenAI | None = openai_client
42
42
  else:
43
- self._client = _openai_shared.get_default_openai_client() or AsyncOpenAI(
44
- api_key=api_key or _openai_shared.get_default_openai_key(),
45
- base_url=base_url,
46
- organization=organization,
47
- project=project,
48
- http_client=shared_http_client(),
49
- )
43
+ self._client = None
44
+ self._stored_api_key = api_key
45
+ self._stored_base_url = base_url
46
+ self._stored_organization = organization
47
+ self._stored_project = project
50
48
 
51
- self._is_openai_model = self._client.base_url.host.startswith("api.openai.com")
52
49
  if use_responses is not None:
53
50
  self._use_responses = use_responses
54
51
  else:
55
52
  self._use_responses = _openai_shared.get_use_responses_by_default()
56
53
 
54
+ # We lazy load the client in case you never actually use OpenAIProvider(). Otherwise
55
+ # AsyncOpenAI() raises an error if you don't have an API key set.
56
+ def _get_client(self) -> AsyncOpenAI:
57
+ if self._client is None:
58
+ self._client = _openai_shared.get_default_openai_client() or AsyncOpenAI(
59
+ api_key=self._stored_api_key or _openai_shared.get_default_openai_key(),
60
+ base_url=self._stored_base_url,
61
+ organization=self._stored_organization,
62
+ project=self._stored_project,
63
+ http_client=shared_http_client(),
64
+ )
65
+
66
+ return self._client
67
+
57
68
  def get_model(self, model_name: str | None) -> Model:
58
69
  if model_name is None:
59
70
  model_name = DEFAULT_MODEL
60
71
 
72
+ client = self._get_client()
73
+
61
74
  return (
62
- OpenAIResponsesModel(model=model_name, openai_client=self._client)
75
+ OpenAIResponsesModel(model=model_name, openai_client=client)
63
76
  if self._use_responses
64
- else OpenAIChatCompletionsModel(model=model_name, openai_client=self._client)
77
+ else OpenAIChatCompletionsModel(model=model_name, openai_client=client)
65
78
  )
@@ -5,7 +5,7 @@ from collections.abc import AsyncIterator
5
5
  from dataclasses import dataclass
6
6
  from typing import TYPE_CHECKING, Any, Literal, overload
7
7
 
8
- from openai import NOT_GIVEN, AsyncOpenAI, AsyncStream, NotGiven
8
+ from openai import NOT_GIVEN, APIStatusError, AsyncOpenAI, AsyncStream, NotGiven
9
9
  from openai.types import ChatModel
10
10
  from openai.types.responses import (
11
11
  Response,
@@ -113,7 +113,8 @@ class OpenAIResponsesModel(Model):
113
113
  },
114
114
  )
115
115
  )
116
- logger.error(f"Error getting response: {e}")
116
+ request_id = e.request_id if isinstance(e, APIStatusError) else None
117
+ logger.error(f"Error getting response: {e}. (request_id: {request_id})")
117
118
  raise
118
119
 
119
120
  return ModelResponse(
@@ -235,6 +236,7 @@ class OpenAIResponsesModel(Model):
235
236
  temperature=self._non_null_or_not_given(model_settings.temperature),
236
237
  top_p=self._non_null_or_not_given(model_settings.top_p),
237
238
  truncation=self._non_null_or_not_given(model_settings.truncation),
239
+ max_output_tokens=self._non_null_or_not_given(model_settings.max_tokens),
238
240
  tool_choice=tool_choice,
239
241
  parallel_tool_calls=parallel_tool_calls,
240
242
  stream=stream,
@@ -361,7 +363,7 @@ class Converter:
361
363
  includes = "file_search_call.results" if tool.include_search_results else None
362
364
  elif isinstance(tool, ComputerTool):
363
365
  converted_tool = {
364
- "type": "computer-preview",
366
+ "type": "computer_use_preview",
365
367
  "environment": tool.computer.environment,
366
368
  "display_width": tool.computer.dimensions[0],
367
369
  "display_height": tool.computer.dimensions[1],
agents/result.py CHANGED
@@ -216,5 +216,3 @@ class RunResultStreaming(RunResultBase):
216
216
 
217
217
  if self._output_guardrails_task and not self._output_guardrails_task.done():
218
218
  self._output_guardrails_task.cancel()
219
- self._output_guardrails_task.cancel()
220
- self._output_guardrails_task.cancel()
@@ -78,9 +78,6 @@ class BackendSpanExporter(TracingExporter):
78
78
  logger.warning("OPENAI_API_KEY is not set, skipping trace export")
79
79
  return
80
80
 
81
- traces: list[dict[str, Any]] = []
82
- spans: list[dict[str, Any]] = []
83
-
84
81
  data = [item.export() for item in items if item.export()]
85
82
  payload = {"data": data}
86
83
 
@@ -100,7 +97,7 @@ class BackendSpanExporter(TracingExporter):
100
97
 
101
98
  # If the response is successful, break out of the loop
102
99
  if response.status_code < 300:
103
- logger.debug(f"Exported {len(traces)} traces, {len(spans)} spans")
100
+ logger.debug(f"Exported {len(items)} items")
104
101
  return
105
102
 
106
103
  # If the response is a client error (4xx), we wont retry
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.0.2
3
+ Version: 0.0.4
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://github.com/openai/openai-agents-python
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -19,7 +19,7 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
19
19
  Classifier: Typing :: Typed
20
20
  Requires-Python: >=3.9
21
21
  Requires-Dist: griffe<2,>=1.5.6
22
- Requires-Dist: openai>=1.66.0
22
+ Requires-Dist: openai>=1.66.2
23
23
  Requires-Dist: pydantic<3,>=2.10
24
24
  Requires-Dist: requests<3,>=2.0
25
25
  Requires-Dist: types-requests<3,>=2.0
@@ -30,16 +30,18 @@ Description-Content-Type: text/markdown
30
30
 
31
31
  The OpenAI Agents SDK is a lightweight yet powerful framework for building multi-agent workflows.
32
32
 
33
- <img src="docs/assets/images/orchestration.png" alt="Image of the Agents Tracing UI" style="max-height: 803px;">
33
+ <img src="https://cdn.openai.com/API/docs/images/orchestration.png" alt="Image of the Agents Tracing UI" style="max-height: 803px;">
34
34
 
35
35
  ### Core concepts:
36
36
 
37
- 1. [**Agents**](docs/agents.md): LLMs configured with instructions, tools, guardrails, and handoffs
38
- 2. [**Handoffs**](docs/handoffs.md): Allow agents to transfer control to other agents for specific tasks
39
- 3. [**Guardrails**](docs/guardrails.md): Configurable safety checks for input and output validation
40
- 4. [**Tracing**](docs/tracing.md): Built-in tracking of agent runs, allowing you to view, debug and optimize your workflows
37
+ 1. [**Agents**](https://openai.github.io/openai-agents-python/agents): LLMs configured with instructions, tools, guardrails, and handoffs
38
+ 2. [**Handoffs**](https://openai.github.io/openai-agents-python/handoffs/): Allow agents to transfer control to other agents for specific tasks
39
+ 3. [**Guardrails**](https://openai.github.io/openai-agents-python/guardrails/): Configurable safety checks for input and output validation
40
+ 4. [**Tracing**](https://openai.github.io/openai-agents-python/tracing/): Built-in tracking of agent runs, allowing you to view, debug and optimize your workflows
41
41
 
42
- Explore the [examples](examples) directory to see the SDK in action.
42
+ Explore the [examples](examples) directory to see the SDK in action, and read our [documentation](https://openai.github.io/openai-agents-python/) for more details.
43
+
44
+ Notably, our SDK [is compatible](https://openai.github.io/openai-agents-python/models/) with any model providers that support the OpenAI Chat Completions API format.
43
45
 
44
46
  ## Get started
45
47
 
@@ -73,9 +75,11 @@ print(result.final_output)
73
75
 
74
76
  (_If running this, ensure you set the `OPENAI_API_KEY` environment variable_)
75
77
 
78
+ (_For Jupyter notebook users, see [hello_world_jupyter.py](examples/basic/hello_world_jupyter.py)_)
79
+
76
80
  ## Handoffs example
77
81
 
78
- ```py
82
+ ```python
79
83
  from agents import Agent, Runner
80
84
  import asyncio
81
85
 
@@ -142,9 +146,9 @@ When you call `Runner.run()`, we run a loop until we get a final output.
142
146
 
143
147
  1. We call the LLM, using the model and settings on the agent, and the message history.
144
148
  2. The LLM returns a response, which may include tool calls.
145
- 3. If the response has a final output (see below for the more on this), we return it and end the loop.
149
+ 3. If the response has a final output (see below for more on this), we return it and end the loop.
146
150
  4. If the response has a handoff, we set the agent to the new agent and go back to step 1.
147
- 5. We process the tool calls (if any) and append the tool responses messsages. Then we go to step 1.
151
+ 5. We process the tool calls (if any) and append the tool responses messages. Then we go to step 1.
148
152
 
149
153
  There is a `max_turns` parameter that you can use to limit the number of times the loop executes.
150
154
 
@@ -166,7 +170,7 @@ The Agents SDK is designed to be highly flexible, allowing you to model a wide r
166
170
 
167
171
  ## Tracing
168
172
 
169
- The Agents SDK includes built-in tracing, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk), and [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk). See [Tracing](http://openai.github.io/openai-agents-python/tracing.md) for more details.
173
+ The Agents SDK automatically traces your agent runs, making it easy to track and debug the behavior of your agents. Tracing is extensible by design, supporting custom spans and a wide variety of external destinations, including [Logfire](https://logfire.pydantic.dev/docs/integrations/llms/openai/#openai-agents), [AgentOps](https://docs.agentops.ai/v1/integrations/agentssdk), [Braintrust](https://braintrust.dev/docs/guides/traces/integrations#openai-agents-sdk), [Scorecard](https://docs.scorecard.io/docs/documentation/features/tracing#openai-agents-sdk-integration), and [Keywords AI](https://docs.keywordsai.co/integration/development-frameworks/openai-agent). For more details about how to customize or disable tracing, see [Tracing](http://openai.github.io/openai-agents-python/tracing).
170
174
 
171
175
  ## Development (only needed if you need to edit the SDK/examples)
172
176
 
@@ -1,20 +1,20 @@
1
- agents/__init__.py,sha256=Qd1eatUlALblGTqHV4o5jL-h65furZ-s0IK8RFNRGWY,5879
2
- agents/_config.py,sha256=5qrDSZuguiL0gCTd_6f9J6ulpsRySueZ3re4lyd4PU0,743
1
+ agents/__init__.py,sha256=NGc7r2Su7RM8c1Ym3gl_LWDFMiIiL_bY-YUgFDDugYo,6267
2
+ agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
- agents/_run_impl.py,sha256=v9mMCT_v0pwGnPW-fydzHpdu1gI9pROC7Wvhe50IVrw,28522
4
+ agents/_run_impl.py,sha256=jMlWtHi7blDC8bJTpzQ1-Xi9wcPBiGUSyfItgw-L1io,28550
5
5
  agents/_utils.py,sha256=L21Hdl20U66Asp-W61yTnahmo8b6X58jsgdUBWb9_Rk,1685
6
6
  agents/agent.py,sha256=Y0lnIva9qL_WJVUVxDQtSrMa0KuM5IXLWK0q6CzIxas,6297
7
- agents/agent_output.py,sha256=xk17hMtFQSviQNydzt70ENvsPDTWgZazv1NhswC16e4,5345
7
+ agents/agent_output.py,sha256=k271F9MgMaoS1nPtsSwsURP8mNxv8VrEOWrv7PJSQT4,5345
8
8
  agents/computer.py,sha256=XD44UgiUWSfniv-xKwwDP6wFKVwBiZkpaL1hO-0-7ZA,2516
9
9
  agents/exceptions.py,sha256=F3AltRt27PGdhbFqKBhRJL9eHqoN4SQx7oxBn0GWmhs,1856
10
10
  agents/function_schema.py,sha256=OgeuiDhLowhYt6T9CU-7Fk05uKIxPaDPgL2hdnMFjpQ,12666
11
- agents/guardrail.py,sha256=3A355heAUkaGBmyKArq-3XVFunydlAZKkFRo8mHuH5w,9290
11
+ agents/guardrail.py,sha256=3y4oGa-dPp75nsS15zZdJ-GBT34jDu5c8gMeFHC4SME,9286
12
12
  agents/handoffs.py,sha256=onlvwSCTNJKof2Ftk-qZ5-zxTNT9AimjvyOcxj4Rp38,8999
13
- agents/items.py,sha256=kvhgsyKyIxkycUw33fKKX17SAQyIo4jGwyFLqgroB7I,8030
13
+ agents/items.py,sha256=DQPAJQkAVRR9Js-RVDtp4eizxiVaL30bbB0W-8U7GuQ,8069
14
14
  agents/lifecycle.py,sha256=wYFG6PLSKQ7bICKVbB8oGtdoJNINGq9obh2RSKlAkDE,2938
15
15
  agents/logger.py,sha256=p_ef7vWKpBev5FFybPJjhrCCQizK08Yy1A2EDO1SNNg,60
16
- agents/model_settings.py,sha256=lKYXNS6M6R7Ybku7o3sAfeyasXOctwHVRk3joYGe7Nk,1425
17
- agents/result.py,sha256=vBLf6wUMIeCVcLKoaXLtxXZzmqK2QYRoba76uRCjAcs,8276
16
+ agents/model_settings.py,sha256=4JOqsLswjdrEszNqNEJ_dYjxUMCyt68hOIdgxlXELw0,2169
17
+ agents/result.py,sha256=k8B5Q9Vf-H6IzGaEHqJyMNoairUcF4yCfnePS8Qanzo,8176
18
18
  agents/run.py,sha256=GLPPfHH7MswO_5oW27y7RsZVY5rbkvyCBxG4kbN5y-Q,37064
19
19
  agents/run_context.py,sha256=vuSUQM8O4CLensQY27-22fOqECnw7yvwL9U3WO8b_bk,851
20
20
  agents/stream_events.py,sha256=ULgBEcL_H4vklZoxhpY2yomeoxVF0UiXvswsFsjFv4s,1547
@@ -29,21 +29,21 @@ agents/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
29
  agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-iE,836
30
30
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
31
31
  agents/models/interface.py,sha256=dgIlKyPaCbNRTHXxd6x7OQwJuAelG3F-C19P-aacHWQ,3129
32
- agents/models/openai_chatcompletions.py,sha256=pVAS0WnXhCUR1XK90KCDf985TaiWEkfkjgvdiOHamxU,37880
33
- agents/models/openai_provider.py,sha256=0F_tiftdpTx3mDj0fWzthjw8ZC91HAs1kHQs5oEYnDE,2295
34
- agents/models/openai_responses.py,sha256=PuUSByOvq7eeNLC3OWH8JUcvy8icy9ROxxv2O2-rGBQ,13167
32
+ agents/models/openai_chatcompletions.py,sha256=e7iA9mxflbVKNCbt11gxCXKHRjMS1JXd0vpzjlOQOI8,39059
33
+ agents/models/openai_provider.py,sha256=3zKt8stSm0IcDJzX8GqXa3UcECKK79A290Zzem1nlUo,2784
34
+ agents/models/openai_responses.py,sha256=4CowZT0wAMflEzDgi6hEidcMq_0zchIm2uX_vV090TM,13386
35
35
  agents/tracing/__init__.py,sha256=pp2_mBCQGL9oN6_czCWHQsV4ZTEOcy1AVxdjQ41PNr0,2424
36
36
  agents/tracing/create.py,sha256=PAhfJKAeJ8jbZvxylTiikU_LqAhezYHphR4jG5EdaAE,12110
37
37
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
38
38
  agents/tracing/processor_interface.py,sha256=wNyZCwNJko5CrUIWD_lMou5ppQ67CFYwvWRsJRM3up8,1659
39
- agents/tracing/processors.py,sha256=iGtVJMmOZZqpNBr9S7Xbp-rvu7A92DKYOkXwExgkwEE,9706
39
+ agents/tracing/processors.py,sha256=74BB0w3XQjerlYN6kgRiqtV4VPAvZSMTPByutcX464c,9600
40
40
  agents/tracing/scope.py,sha256=x1m-aYilS1DeeV4L7Ckv55LVWod7c_nnTKoCGhJCumk,1372
41
41
  agents/tracing/setup.py,sha256=P5JaIcHej6m62rb27bSutN2Bqv0XSD9Z_Ki7ynCVdbs,6728
42
42
  agents/tracing/span_data.py,sha256=UQUPpMQ7Z1XOqKFJNHUxAJUVPwa6JMfGa7dm_NovuhQ,4574
43
43
  agents/tracing/spans.py,sha256=KWCqcRwUlt85NCZPQp98UIF5vAQAVWuVWQh3tgPK0WE,6605
44
44
  agents/tracing/traces.py,sha256=GL9EoEQKVk7eo0BcfRfQ6C7tdzlmPhkneQn4fdsCdqA,4774
45
45
  agents/tracing/util.py,sha256=BsDvn2rjE4SRQvfm55utljT8agdA0Z36KWXd1vdx4hs,392
46
- openai_agents-0.0.2.dist-info/METADATA,sha256=4usCZcGhHblA5qcC2MwN4ybHHdvfuC2wzqqos5xFBVo,6797
47
- openai_agents-0.0.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
48
- openai_agents-0.0.2.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
49
- openai_agents-0.0.2.dist-info/RECORD,,
46
+ openai_agents-0.0.4.dist-info/METADATA,sha256=8a-UqdtxRJCgwuT6jsfJ1MwDwwYWS-NnbcJB52QpZP4,7582
47
+ openai_agents-0.0.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
48
+ openai_agents-0.0.4.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
49
+ openai_agents-0.0.4.dist-info/RECORD,,