openai-agents 0.0.5__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of openai-agents might be problematic. Click here for more details.

@@ -0,0 +1,46 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass, field
4
+ from typing import Any
5
+
6
+ from ..tracing.util import gen_group_id
7
+ from .model import STTModelSettings, TTSModelSettings, VoiceModelProvider
8
+ from .models.openai_model_provider import OpenAIVoiceModelProvider
9
+
10
+
11
+ @dataclass
12
+ class VoicePipelineConfig:
13
+ """Configuration for a `VoicePipeline`."""
14
+
15
+ model_provider: VoiceModelProvider = field(default_factory=OpenAIVoiceModelProvider)
16
+ """The voice model provider to use for the pipeline. Defaults to OpenAI."""
17
+
18
+ tracing_disabled: bool = False
19
+ """Whether to disable tracing of the pipeline. Defaults to `False`."""
20
+
21
+ trace_include_sensitive_data: bool = True
22
+ """Whether to include sensitive data in traces. Defaults to `True`. This is specifically for the
23
+ voice pipeline, and not for anything that goes on inside your Workflow."""
24
+
25
+ trace_include_sensitive_audio_data: bool = True
26
+ """Whether to include audio data in traces. Defaults to `True`."""
27
+
28
+ workflow_name: str = "Voice Agent"
29
+ """The name of the workflow to use for tracing. Defaults to `Voice Agent`."""
30
+
31
+ group_id: str = field(default_factory=gen_group_id)
32
+ """
33
+ A grouping identifier to use for tracing, to link multiple traces from the same conversation
34
+ or process. If not provided, we will create a random group ID.
35
+ """
36
+
37
+ trace_metadata: dict[str, Any] | None = None
38
+ """
39
+ An optional dictionary of additional metadata to include with the trace.
40
+ """
41
+
42
+ stt_settings: STTModelSettings = field(default_factory=STTModelSettings)
43
+ """The settings to use for the STT model."""
44
+
45
+ tts_settings: TTSModelSettings = field(default_factory=TTSModelSettings)
46
+ """The settings to use for the TTS model."""
agents/voice/result.py ADDED
@@ -0,0 +1,287 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import base64
5
+ from collections.abc import AsyncIterator
6
+ from typing import Any
7
+
8
+ from ..exceptions import UserError
9
+ from ..logger import logger
10
+ from ..tracing import Span, SpeechGroupSpanData, speech_group_span, speech_span
11
+ from ..tracing.util import time_iso
12
+ from .events import (
13
+ VoiceStreamEvent,
14
+ VoiceStreamEventAudio,
15
+ VoiceStreamEventError,
16
+ VoiceStreamEventLifecycle,
17
+ )
18
+ from .imports import np, npt
19
+ from .model import TTSModel, TTSModelSettings
20
+ from .pipeline_config import VoicePipelineConfig
21
+
22
+
23
+ def _audio_to_base64(audio_data: list[bytes]) -> str:
24
+ joined_audio_data = b"".join(audio_data)
25
+ return base64.b64encode(joined_audio_data).decode("utf-8")
26
+
27
+
28
+ class StreamedAudioResult:
29
+ """The output of a `VoicePipeline`. Streams events and audio data as they're generated."""
30
+
31
+ def __init__(
32
+ self,
33
+ tts_model: TTSModel,
34
+ tts_settings: TTSModelSettings,
35
+ voice_pipeline_config: VoicePipelineConfig,
36
+ ):
37
+ """Create a new `StreamedAudioResult` instance.
38
+
39
+ Args:
40
+ tts_model: The TTS model to use.
41
+ tts_settings: The TTS settings to use.
42
+ voice_pipeline_config: The voice pipeline config to use.
43
+ """
44
+ self.tts_model = tts_model
45
+ self.tts_settings = tts_settings
46
+ self.total_output_text = ""
47
+ self.instructions = tts_settings.instructions
48
+ self.text_generation_task: asyncio.Task[Any] | None = None
49
+
50
+ self._voice_pipeline_config = voice_pipeline_config
51
+ self._text_buffer = ""
52
+ self._turn_text_buffer = ""
53
+ self._queue: asyncio.Queue[VoiceStreamEvent] = asyncio.Queue()
54
+ self._tasks: list[asyncio.Task[Any]] = []
55
+ self._ordered_tasks: list[
56
+ asyncio.Queue[VoiceStreamEvent | None]
57
+ ] = [] # New: list to hold local queues for each text segment
58
+ self._dispatcher_task: asyncio.Task[Any] | None = (
59
+ None # Task to dispatch audio chunks in order
60
+ )
61
+
62
+ self._done_processing = False
63
+ self._buffer_size = tts_settings.buffer_size
64
+ self._started_processing_turn = False
65
+ self._first_byte_received = False
66
+ self._generation_start_time: str | None = None
67
+ self._completed_session = False
68
+ self._stored_exception: BaseException | None = None
69
+ self._tracing_span: Span[SpeechGroupSpanData] | None = None
70
+
71
+ async def _start_turn(self):
72
+ if self._started_processing_turn:
73
+ return
74
+
75
+ self._tracing_span = speech_group_span()
76
+ self._tracing_span.start()
77
+ self._started_processing_turn = True
78
+ self._first_byte_received = False
79
+ self._generation_start_time = time_iso()
80
+ await self._queue.put(VoiceStreamEventLifecycle(event="turn_started"))
81
+
82
+ def _set_task(self, task: asyncio.Task[Any]):
83
+ self.text_generation_task = task
84
+
85
+ async def _add_error(self, error: Exception):
86
+ await self._queue.put(VoiceStreamEventError(error))
87
+
88
+ def _transform_audio_buffer(
89
+ self, buffer: list[bytes], output_dtype: npt.DTypeLike
90
+ ) -> npt.NDArray[np.int16 | np.float32]:
91
+ np_array = np.frombuffer(b"".join(buffer), dtype=np.int16)
92
+
93
+ if output_dtype == np.int16:
94
+ return np_array
95
+ elif output_dtype == np.float32:
96
+ return (np_array.astype(np.float32) / 32767.0).reshape(-1, 1)
97
+ else:
98
+ raise UserError("Invalid output dtype")
99
+
100
+ async def _stream_audio(
101
+ self,
102
+ text: str,
103
+ local_queue: asyncio.Queue[VoiceStreamEvent | None],
104
+ finish_turn: bool = False,
105
+ ):
106
+ with speech_span(
107
+ model=self.tts_model.model_name,
108
+ input=text if self._voice_pipeline_config.trace_include_sensitive_data else "",
109
+ model_config={
110
+ "voice": self.tts_settings.voice,
111
+ "instructions": self.instructions,
112
+ "speed": self.tts_settings.speed,
113
+ },
114
+ output_format="pcm",
115
+ parent=self._tracing_span,
116
+ ) as tts_span:
117
+ try:
118
+ first_byte_received = False
119
+ buffer: list[bytes] = []
120
+ full_audio_data: list[bytes] = []
121
+
122
+ async for chunk in self.tts_model.run(text, self.tts_settings):
123
+ if not first_byte_received:
124
+ first_byte_received = True
125
+ tts_span.span_data.first_content_at = time_iso()
126
+
127
+ if chunk:
128
+ buffer.append(chunk)
129
+ full_audio_data.append(chunk)
130
+ if len(buffer) >= self._buffer_size:
131
+ audio_np = self._transform_audio_buffer(buffer, self.tts_settings.dtype)
132
+ if self.tts_settings.transform_data:
133
+ audio_np = self.tts_settings.transform_data(audio_np)
134
+ await local_queue.put(
135
+ VoiceStreamEventAudio(data=audio_np)
136
+ ) # Use local queue
137
+ buffer = []
138
+ if buffer:
139
+ audio_np = self._transform_audio_buffer(buffer, self.tts_settings.dtype)
140
+ if self.tts_settings.transform_data:
141
+ audio_np = self.tts_settings.transform_data(audio_np)
142
+ await local_queue.put(VoiceStreamEventAudio(data=audio_np)) # Use local queue
143
+
144
+ if self._voice_pipeline_config.trace_include_sensitive_audio_data:
145
+ tts_span.span_data.output = _audio_to_base64(full_audio_data)
146
+ else:
147
+ tts_span.span_data.output = ""
148
+
149
+ if finish_turn:
150
+ await local_queue.put(VoiceStreamEventLifecycle(event="turn_ended"))
151
+ else:
152
+ await local_queue.put(None) # Signal completion for this segment
153
+ except Exception as e:
154
+ tts_span.set_error(
155
+ {
156
+ "message": str(e),
157
+ "data": {
158
+ "text": text
159
+ if self._voice_pipeline_config.trace_include_sensitive_data
160
+ else "",
161
+ },
162
+ }
163
+ )
164
+ logger.error(f"Error streaming audio: {e}")
165
+
166
+ # Signal completion for whole session because of error
167
+ await local_queue.put(VoiceStreamEventLifecycle(event="session_ended"))
168
+ raise e
169
+
170
+ async def _add_text(self, text: str):
171
+ await self._start_turn()
172
+
173
+ self._text_buffer += text
174
+ self.total_output_text += text
175
+ self._turn_text_buffer += text
176
+
177
+ combined_sentences, self._text_buffer = self.tts_settings.text_splitter(self._text_buffer)
178
+
179
+ if len(combined_sentences) >= 20:
180
+ local_queue: asyncio.Queue[VoiceStreamEvent | None] = asyncio.Queue()
181
+ self._ordered_tasks.append(local_queue)
182
+ self._tasks.append(
183
+ asyncio.create_task(self._stream_audio(combined_sentences, local_queue))
184
+ )
185
+ if self._dispatcher_task is None:
186
+ self._dispatcher_task = asyncio.create_task(self._dispatch_audio())
187
+
188
+ async def _turn_done(self):
189
+ if self._text_buffer:
190
+ local_queue: asyncio.Queue[VoiceStreamEvent | None] = asyncio.Queue()
191
+ self._ordered_tasks.append(local_queue) # Append the local queue for the final segment
192
+ self._tasks.append(
193
+ asyncio.create_task(
194
+ self._stream_audio(self._text_buffer, local_queue, finish_turn=True)
195
+ )
196
+ )
197
+ self._text_buffer = ""
198
+ self._done_processing = True
199
+ if self._dispatcher_task is None:
200
+ self._dispatcher_task = asyncio.create_task(self._dispatch_audio())
201
+ await asyncio.gather(*self._tasks)
202
+
203
+ def _finish_turn(self):
204
+ if self._tracing_span:
205
+ if self._voice_pipeline_config.trace_include_sensitive_data:
206
+ self._tracing_span.span_data.input = self._turn_text_buffer
207
+ else:
208
+ self._tracing_span.span_data.input = ""
209
+
210
+ self._tracing_span.finish()
211
+ self._tracing_span = None
212
+ self._turn_text_buffer = ""
213
+ self._started_processing_turn = False
214
+
215
+ async def _done(self):
216
+ self._completed_session = True
217
+ await self._wait_for_completion()
218
+
219
+ async def _dispatch_audio(self):
220
+ # Dispatch audio chunks from each segment in the order they were added
221
+ while True:
222
+ if len(self._ordered_tasks) == 0:
223
+ if self._completed_session:
224
+ break
225
+ await asyncio.sleep(0)
226
+ continue
227
+ local_queue = self._ordered_tasks.pop(0)
228
+ while True:
229
+ chunk = await local_queue.get()
230
+ if chunk is None:
231
+ break
232
+ await self._queue.put(chunk)
233
+ if isinstance(chunk, VoiceStreamEventLifecycle):
234
+ local_queue.task_done()
235
+ if chunk.event == "turn_ended":
236
+ self._finish_turn()
237
+ break
238
+ await self._queue.put(VoiceStreamEventLifecycle(event="session_ended"))
239
+
240
+ async def _wait_for_completion(self):
241
+ tasks: list[asyncio.Task[Any]] = self._tasks
242
+ if self._dispatcher_task is not None:
243
+ tasks.append(self._dispatcher_task)
244
+ await asyncio.gather(*tasks)
245
+
246
+ def _cleanup_tasks(self):
247
+ self._finish_turn()
248
+
249
+ for task in self._tasks:
250
+ if not task.done():
251
+ task.cancel()
252
+
253
+ if self._dispatcher_task and not self._dispatcher_task.done():
254
+ self._dispatcher_task.cancel()
255
+
256
+ if self.text_generation_task and not self.text_generation_task.done():
257
+ self.text_generation_task.cancel()
258
+
259
+ def _check_errors(self):
260
+ for task in self._tasks:
261
+ if task.done():
262
+ if task.exception():
263
+ self._stored_exception = task.exception()
264
+ break
265
+
266
+ async def stream(self) -> AsyncIterator[VoiceStreamEvent]:
267
+ """Stream the events and audio data as they're generated."""
268
+ while True:
269
+ try:
270
+ event = await self._queue.get()
271
+ except asyncio.CancelledError:
272
+ break
273
+ if isinstance(event, VoiceStreamEventError):
274
+ self._stored_exception = event.error
275
+ logger.error(f"Error processing output: {event.error}")
276
+ break
277
+ if event is None:
278
+ break
279
+ yield event
280
+ if event.type == "voice_stream_event_lifecycle" and event.event == "session_ended":
281
+ break
282
+
283
+ self._check_errors()
284
+ self._cleanup_tasks()
285
+
286
+ if self._stored_exception:
287
+ raise self._stored_exception
agents/voice/utils.py ADDED
@@ -0,0 +1,37 @@
1
+ import re
2
+ from typing import Callable
3
+
4
+
5
+ def get_sentence_based_splitter(
6
+ min_sentence_length: int = 20,
7
+ ) -> Callable[[str], tuple[str, str]]:
8
+ """Returns a function that splits text into chunks based on sentence boundaries.
9
+
10
+ Args:
11
+ min_sentence_length: The minimum length of a sentence to be included in a chunk.
12
+
13
+ Returns:
14
+ A function that splits text into chunks based on sentence boundaries.
15
+ """
16
+
17
+ def sentence_based_text_splitter(text_buffer: str) -> tuple[str, str]:
18
+ """
19
+ A function to split the text into chunks. This is useful if you want to split the text into
20
+ chunks before sending it to the TTS model rather than waiting for the whole text to be
21
+ processed.
22
+
23
+ Args:
24
+ text_buffer: The text to split.
25
+
26
+ Returns:
27
+ A tuple of the text to process and the remaining text buffer.
28
+ """
29
+ sentences = re.split(r"(?<=[.!?])\s+", text_buffer.strip())
30
+ if len(sentences) >= 1:
31
+ combined_sentences = " ".join(sentences[:-1])
32
+ if len(combined_sentences) >= min_sentence_length:
33
+ remaining_text_buffer = sentences[-1]
34
+ return combined_sentences, remaining_text_buffer
35
+ return "", text_buffer
36
+
37
+ return sentence_based_text_splitter
@@ -0,0 +1,93 @@
1
+ from __future__ import annotations
2
+
3
+ import abc
4
+ from collections.abc import AsyncIterator
5
+ from typing import Any
6
+
7
+ from ..agent import Agent
8
+ from ..items import TResponseInputItem
9
+ from ..result import RunResultStreaming
10
+ from ..run import Runner
11
+
12
+
13
+ class VoiceWorkflowBase(abc.ABC):
14
+ """
15
+ A base class for a voice workflow. You must implement the `run` method. A "workflow" is any
16
+ code you want, that receives a transcription and yields text that will be turned into speech
17
+ by a text-to-speech model.
18
+ In most cases, you'll create `Agent`s and use `Runner.run_streamed()` to run them, returning
19
+ some or all of the text events from the stream. You can use the `VoiceWorkflowHelper` class to
20
+ help with extracting text events from the stream.
21
+ If you have a simple workflow that has a single starting agent and no custom logic, you can
22
+ use `SingleAgentVoiceWorkflow` directly.
23
+ """
24
+
25
+ @abc.abstractmethod
26
+ def run(self, transcription: str) -> AsyncIterator[str]:
27
+ """
28
+ Run the voice workflow. You will receive an input transcription, and must yield text that
29
+ will be spoken to the user. You can run whatever logic you want here. In most cases, the
30
+ final logic will involve calling `Runner.run_streamed()` and yielding any text events from
31
+ the stream.
32
+ """
33
+ pass
34
+
35
+
36
+ class VoiceWorkflowHelper:
37
+ @classmethod
38
+ async def stream_text_from(cls, result: RunResultStreaming) -> AsyncIterator[str]:
39
+ """Wraps a `RunResultStreaming` object and yields text events from the stream."""
40
+ async for event in result.stream_events():
41
+ if (
42
+ event.type == "raw_response_event"
43
+ and event.data.type == "response.output_text.delta"
44
+ ):
45
+ yield event.data.delta
46
+
47
+
48
+ class SingleAgentWorkflowCallbacks:
49
+ def on_run(self, workflow: SingleAgentVoiceWorkflow, transcription: str) -> None:
50
+ """Called when the workflow is run."""
51
+ pass
52
+
53
+
54
+ class SingleAgentVoiceWorkflow(VoiceWorkflowBase):
55
+ """A simple voice workflow that runs a single agent. Each transcription and result is added to
56
+ the input history.
57
+ For more complex workflows (e.g. multiple Runner calls, custom message history, custom logic,
58
+ custom configs), subclass `VoiceWorkflowBase` and implement your own logic.
59
+ """
60
+
61
+ def __init__(self, agent: Agent[Any], callbacks: SingleAgentWorkflowCallbacks | None = None):
62
+ """Create a new single agent voice workflow.
63
+
64
+ Args:
65
+ agent: The agent to run.
66
+ callbacks: Optional callbacks to call during the workflow.
67
+ """
68
+ self._input_history: list[TResponseInputItem] = []
69
+ self._current_agent = agent
70
+ self._callbacks = callbacks
71
+
72
+ async def run(self, transcription: str) -> AsyncIterator[str]:
73
+ if self._callbacks:
74
+ self._callbacks.on_run(self, transcription)
75
+
76
+ # Add the transcription to the input history
77
+ self._input_history.append(
78
+ {
79
+ "role": "user",
80
+ "content": transcription,
81
+ }
82
+ )
83
+
84
+ # Run the agent
85
+ result = Runner.run_streamed(self._current_agent, self._input_history)
86
+
87
+ # Stream the text from the result
88
+ async for chunk in VoiceWorkflowHelper.stream_text_from(result):
89
+ yield chunk
90
+
91
+ # Update the input history and current agent
92
+ self._input_history = result.to_input_list()
93
+ self._current_agent = result.last_agent
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: openai-agents
3
- Version: 0.0.5
3
+ Version: 0.0.6
4
4
  Summary: OpenAI Agents SDK
5
5
  Project-URL: Homepage, https://github.com/openai/openai-agents-python
6
6
  Project-URL: Repository, https://github.com/openai/openai-agents-python
@@ -24,6 +24,9 @@ Requires-Dist: pydantic<3,>=2.10
24
24
  Requires-Dist: requests<3,>=2.0
25
25
  Requires-Dist: types-requests<3,>=2.0
26
26
  Requires-Dist: typing-extensions<5,>=4.12.2
27
+ Provides-Extra: voice
28
+ Requires-Dist: numpy<3,>=2.2.0; (python_version >= '3.10') and extra == 'voice'
29
+ Requires-Dist: websockets<16,>=15.0; extra == 'voice'
27
30
  Description-Content-Type: text/markdown
28
31
 
29
32
  # OpenAI Agents SDK
@@ -58,6 +61,8 @@ source env/bin/activate
58
61
  pip install openai-agents
59
62
  ```
60
63
 
64
+ For voice support, install with the optional `voice` group: `pip install openai-agents[voice]`.
65
+
61
66
  ## Hello world example
62
67
 
63
68
  ```python
@@ -1,4 +1,4 @@
1
- agents/__init__.py,sha256=vdpHPLytdNImdTlOJUnZi_Wx6Egot3q_InhcFixd6zU,6422
1
+ agents/__init__.py,sha256=PpYDMZH0h2NyguV6mtEn6aBZlGdwO34wndtP1CKRdl4,6706
2
2
  agents/_config.py,sha256=ANrM7GP2VSQehDkMc9qocxkUlPwqU-i5sieMJyEwxpM,796
3
3
  agents/_debug.py,sha256=7OKys2lDjeCtGggTkM53m_8vw0WIr3yt-_JPBDAnsw0,608
4
4
  agents/_run_impl.py,sha256=B-YeWxms2vi3SHMSsHPEjif0ZbcpxDetRugo-_mkUVw,31991
@@ -29,19 +29,19 @@ agents/models/_openai_shared.py,sha256=4Ngwo2Fv2RXY61Pqck1cYPkSln2tDnb8Ai-ao4QG-
29
29
  agents/models/fake_id.py,sha256=lbXjUUSMeAQ8eFx4V5QLUnBClHE6adJlYYav55RlG5w,268
30
30
  agents/models/interface.py,sha256=dgIlKyPaCbNRTHXxd6x7OQwJuAelG3F-C19P-aacHWQ,3129
31
31
  agents/models/openai_chatcompletions.py,sha256=xs2JdEl0taqz3LIRWL8etr88tzpa_UWggAwAQPTyoxQ,39375
32
- agents/models/openai_provider.py,sha256=3zKt8stSm0IcDJzX8GqXa3UcECKK79A290Zzem1nlUo,2784
32
+ agents/models/openai_provider.py,sha256=NMxTNaoTa329GrA7jj51LC02pb_e2eFh-PCvWADJrkY,3478
33
33
  agents/models/openai_responses.py,sha256=4CowZT0wAMflEzDgi6hEidcMq_0zchIm2uX_vV090TM,13386
34
- agents/tracing/__init__.py,sha256=pp2_mBCQGL9oN6_czCWHQsV4ZTEOcy1AVxdjQ41PNr0,2424
35
- agents/tracing/create.py,sha256=xn0n1Zr6Az4SMw0x_OeBNiBHJ1yYxL1FNhA_bLhBodY,12111
34
+ agents/tracing/__init__.py,sha256=pmbNHEOORyHgufSQpHHT1DcmltFICbN6EIc3VcDwzc0,2708
35
+ agents/tracing/create.py,sha256=WFH4qLhhTaHy3hBnnxlJq5PsRedvq-rtOeZkMaE2PTA,16925
36
36
  agents/tracing/logger.py,sha256=J4KUDRSGa7x5UVfUwWe-gbKwoaq8AeETRqkPt3QvtGg,68
37
37
  agents/tracing/processor_interface.py,sha256=wNyZCwNJko5CrUIWD_lMou5ppQ67CFYwvWRsJRM3up8,1659
38
38
  agents/tracing/processors.py,sha256=z3NAwo4ZG8KloEIq7ihIadxMfduL_cECY5XCgOaK1H8,9595
39
39
  agents/tracing/scope.py,sha256=84gOESqFfR2E_XCZsT11DLyR-3UTyqxHrfBBjH1Ic44,1373
40
40
  agents/tracing/setup.py,sha256=1wRMIVnsMOx5nWWnldqbTXg44a7-ABcC0jZK4q4I-S8,6729
41
- agents/tracing/span_data.py,sha256=5VOoiHHakviJDeiLcPAQS_jy2hPS__GwKsREAUg8Bd4,4604
41
+ agents/tracing/span_data.py,sha256=Aic98vMM3Os5IEHO8e4xB8zEHmVsENrEqeTsCDt578I,7005
42
42
  agents/tracing/spans.py,sha256=6vVzocGMsdgIma1ksqkBZmhar91xj4RpgcpUC3iibqg,6606
43
43
  agents/tracing/traces.py,sha256=G5LlECSK-DBRFP-bjT8maZjBQulz6SaHILYauUVlfq8,4775
44
- agents/tracing/util.py,sha256=BsDvn2rjE4SRQvfm55utljT8agdA0Z36KWXd1vdx4hs,392
44
+ agents/tracing/util.py,sha256=x5tAw2YBKggwQ8rH5NG8GiJrFOnPErlJPk7oicBO1dA,501
45
45
  agents/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
46
  agents/util/_coro.py,sha256=S38XUYFC7bqTELSgMUBsAX1GoRlIrV7coupcUAWH__4,45
47
47
  agents/util/_error_tracing.py,sha256=hdkYNx180b18lP0PSB1toE5atNHsMg_Bm9Osw812vLo,421
@@ -49,7 +49,22 @@ agents/util/_json.py,sha256=eKeQeMlQkBXRFeL3ilNZFmszGyfhtzZdW_GW_As6dcg,972
49
49
  agents/util/_pretty_print.py,sha256=rRVp24UmTgzCm-W4ritWBOxxnPRinzFdrZlOhTi1KVQ,2227
50
50
  agents/util/_transforms.py,sha256=CZe74NOHkHneyo4fHYfFWksCSTn-kXtEyejL9P0_xlA,270
51
51
  agents/util/_types.py,sha256=8KxYfCw0gYSMWcQmacJoc3Q7Lc46LmT-AWvhF10KJ-E,160
52
- openai_agents-0.0.5.dist-info/METADATA,sha256=fHalk7G3p4YxZCj85yRVbOpYX63vgxcR0mIFdEEpfgU,7757
53
- openai_agents-0.0.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
54
- openai_agents-0.0.5.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
55
- openai_agents-0.0.5.dist-info/RECORD,,
52
+ agents/voice/__init__.py,sha256=aEw6GdORLNIXHqIvKFc-5PFZr3XMala3jv4AoeLKt4Q,1507
53
+ agents/voice/events.py,sha256=4aPAZC0__ocgmg_mcX4c1zv9Go-YdKIVItQ2kYgtye0,1216
54
+ agents/voice/exceptions.py,sha256=QcyfvaUTBe4gxbFP82oDSa_puzZ4Z4O4k01B8pAHnK0,233
55
+ agents/voice/imports.py,sha256=ANmL2vDcr8vdTQD70-vc2MJYEXUAxp-p0aZgzr2xbZ0,346
56
+ agents/voice/input.py,sha256=FSbdHMIdLVKX4vYcmf3WBJ5dAlh5zMDjCAuGfXOZTQs,2910
57
+ agents/voice/model.py,sha256=4ptWkKPfUGbVsg8u10KUIl64iNhQX9rx7Y0D_ZcFlv0,5893
58
+ agents/voice/pipeline.py,sha256=5LKTTDytQt4QlZzVKgbB9x3X2zA-TeR94FTi15vIUc0,6259
59
+ agents/voice/pipeline_config.py,sha256=_cynbnzxvQijxkGrMYHJzIV54F9bRvDsPV24qexVO8c,1759
60
+ agents/voice/result.py,sha256=Yx9JCMGCE9OfXacaBFfFLQJRwkNo5-h4Nqm9OPnemU4,11107
61
+ agents/voice/utils.py,sha256=MrRomVqBLXeMAOue-Itwh0Fc5HjB0QCMKXclqFPhrbI,1309
62
+ agents/voice/workflow.py,sha256=lef1NulzNHWFiiPUESGeb_6WhD6CouP1W5NOUAYFewk,3527
63
+ agents/voice/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
64
+ agents/voice/models/openai_model_provider.py,sha256=Khn0uT-VhsEbe7_OhBMGFQzXNwL80gcWZyTHl3CaBII,3587
65
+ agents/voice/models/openai_stt.py,sha256=ApxBvvDjpnhU9OjwnQDxg0adbnrNGIUZ3wHiHP4bh3I,16887
66
+ agents/voice/models/openai_tts.py,sha256=4KoLQuFDHKu5a1VTJlu9Nj3MHwMlrn9wfT_liJDJ2dw,1477
67
+ openai_agents-0.0.6.dist-info/METADATA,sha256=0WGIjgvGUoJmKLDMM4u35fbxhpn7a4MH5bwky9MkExA,8010
68
+ openai_agents-0.0.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
69
+ openai_agents-0.0.6.dist-info/licenses/LICENSE,sha256=E994EspT7Krhy0qGiES7WYNzBHrh1YDk3r--8d1baRU,1063
70
+ openai_agents-0.0.6.dist-info/RECORD,,