openai-agents 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of openai-agents might be problematic. Click here for more details.
- agents/__init__.py +22 -5
- agents/_run_impl.py +101 -22
- agents/agent.py +55 -7
- agents/agent_output.py +4 -4
- agents/function_schema.py +4 -0
- agents/guardrail.py +1 -1
- agents/handoffs.py +4 -4
- agents/items.py +4 -2
- agents/models/openai_chatcompletions.py +6 -1
- agents/models/openai_provider.py +13 -0
- agents/result.py +7 -0
- agents/run.py +10 -10
- agents/tool.py +34 -10
- agents/tracing/__init__.py +12 -0
- agents/tracing/create.py +122 -2
- agents/tracing/processors.py +2 -2
- agents/tracing/scope.py +1 -1
- agents/tracing/setup.py +1 -1
- agents/tracing/span_data.py +98 -2
- agents/tracing/spans.py +1 -1
- agents/tracing/traces.py +1 -1
- agents/tracing/util.py +5 -0
- agents/util/__init__.py +0 -0
- agents/util/_coro.py +2 -0
- agents/util/_error_tracing.py +16 -0
- agents/util/_json.py +31 -0
- agents/util/_pretty_print.py +56 -0
- agents/util/_transforms.py +11 -0
- agents/util/_types.py +7 -0
- agents/voice/__init__.py +51 -0
- agents/voice/events.py +47 -0
- agents/voice/exceptions.py +8 -0
- agents/voice/imports.py +11 -0
- agents/voice/input.py +88 -0
- agents/voice/model.py +193 -0
- agents/voice/models/__init__.py +0 -0
- agents/voice/models/openai_model_provider.py +97 -0
- agents/voice/models/openai_stt.py +457 -0
- agents/voice/models/openai_tts.py +54 -0
- agents/voice/pipeline.py +151 -0
- agents/voice/pipeline_config.py +46 -0
- agents/voice/result.py +287 -0
- agents/voice/utils.py +37 -0
- agents/voice/workflow.py +93 -0
- {openai_agents-0.0.4.dist-info → openai_agents-0.0.6.dist-info}/METADATA +9 -4
- openai_agents-0.0.6.dist-info/RECORD +70 -0
- agents/_utils.py +0 -61
- openai_agents-0.0.4.dist-info/RECORD +0 -49
- {openai_agents-0.0.4.dist-info → openai_agents-0.0.6.dist-info}/WHEEL +0 -0
- {openai_agents-0.0.4.dist-info → openai_agents-0.0.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from typing import TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
from ..result import RunResult, RunResultBase, RunResultStreaming
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _indent(text: str, indent_level: int) -> str:
|
|
10
|
+
indent_string = " " * indent_level
|
|
11
|
+
return "\n".join(f"{indent_string}{line}" for line in text.splitlines())
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _final_output_str(result: "RunResultBase") -> str:
|
|
15
|
+
if result.final_output is None:
|
|
16
|
+
return "None"
|
|
17
|
+
elif isinstance(result.final_output, str):
|
|
18
|
+
return result.final_output
|
|
19
|
+
elif isinstance(result.final_output, BaseModel):
|
|
20
|
+
return result.final_output.model_dump_json(indent=2)
|
|
21
|
+
else:
|
|
22
|
+
return str(result.final_output)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def pretty_print_result(result: "RunResult") -> str:
|
|
26
|
+
output = "RunResult:"
|
|
27
|
+
output += f'\n- Last agent: Agent(name="{result.last_agent.name}", ...)'
|
|
28
|
+
output += (
|
|
29
|
+
f"\n- Final output ({type(result.final_output).__name__}):\n"
|
|
30
|
+
f"{_indent(_final_output_str(result), 2)}"
|
|
31
|
+
)
|
|
32
|
+
output += f"\n- {len(result.new_items)} new item(s)"
|
|
33
|
+
output += f"\n- {len(result.raw_responses)} raw response(s)"
|
|
34
|
+
output += f"\n- {len(result.input_guardrail_results)} input guardrail result(s)"
|
|
35
|
+
output += f"\n- {len(result.output_guardrail_results)} output guardrail result(s)"
|
|
36
|
+
output += "\n(See `RunResult` for more details)"
|
|
37
|
+
|
|
38
|
+
return output
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def pretty_print_run_result_streaming(result: "RunResultStreaming") -> str:
|
|
42
|
+
output = "RunResultStreaming:"
|
|
43
|
+
output += f'\n- Current agent: Agent(name="{result.current_agent.name}", ...)'
|
|
44
|
+
output += f"\n- Current turn: {result.current_turn}"
|
|
45
|
+
output += f"\n- Max turns: {result.max_turns}"
|
|
46
|
+
output += f"\n- Is complete: {result.is_complete}"
|
|
47
|
+
output += (
|
|
48
|
+
f"\n- Final output ({type(result.final_output).__name__}):\n"
|
|
49
|
+
f"{_indent(_final_output_str(result), 2)}"
|
|
50
|
+
)
|
|
51
|
+
output += f"\n- {len(result.new_items)} new item(s)"
|
|
52
|
+
output += f"\n- {len(result.raw_responses)} raw response(s)"
|
|
53
|
+
output += f"\n- {len(result.input_guardrail_results)} input guardrail result(s)"
|
|
54
|
+
output += f"\n- {len(result.output_guardrail_results)} output guardrail result(s)"
|
|
55
|
+
output += "\n(See `RunResultStreaming` for more details)"
|
|
56
|
+
return output
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import re
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def transform_string_function_style(name: str) -> str:
|
|
5
|
+
# Replace spaces with underscores
|
|
6
|
+
name = name.replace(" ", "_")
|
|
7
|
+
|
|
8
|
+
# Replace non-alphanumeric characters with underscores
|
|
9
|
+
name = re.sub(r"[^a-zA-Z0-9]", "_", name)
|
|
10
|
+
|
|
11
|
+
return name.lower()
|
agents/util/_types.py
ADDED
agents/voice/__init__.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from .events import VoiceStreamEvent, VoiceStreamEventAudio, VoiceStreamEventLifecycle
|
|
2
|
+
from .exceptions import STTWebsocketConnectionError
|
|
3
|
+
from .input import AudioInput, StreamedAudioInput
|
|
4
|
+
from .model import (
|
|
5
|
+
StreamedTranscriptionSession,
|
|
6
|
+
STTModel,
|
|
7
|
+
STTModelSettings,
|
|
8
|
+
TTSModel,
|
|
9
|
+
TTSModelSettings,
|
|
10
|
+
VoiceModelProvider,
|
|
11
|
+
)
|
|
12
|
+
from .models.openai_model_provider import OpenAIVoiceModelProvider
|
|
13
|
+
from .models.openai_stt import OpenAISTTModel, OpenAISTTTranscriptionSession
|
|
14
|
+
from .models.openai_tts import OpenAITTSModel
|
|
15
|
+
from .pipeline import VoicePipeline
|
|
16
|
+
from .pipeline_config import VoicePipelineConfig
|
|
17
|
+
from .result import StreamedAudioResult
|
|
18
|
+
from .utils import get_sentence_based_splitter
|
|
19
|
+
from .workflow import (
|
|
20
|
+
SingleAgentVoiceWorkflow,
|
|
21
|
+
SingleAgentWorkflowCallbacks,
|
|
22
|
+
VoiceWorkflowBase,
|
|
23
|
+
VoiceWorkflowHelper,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
__all__ = [
|
|
27
|
+
"AudioInput",
|
|
28
|
+
"StreamedAudioInput",
|
|
29
|
+
"STTModel",
|
|
30
|
+
"STTModelSettings",
|
|
31
|
+
"TTSModel",
|
|
32
|
+
"TTSModelSettings",
|
|
33
|
+
"VoiceModelProvider",
|
|
34
|
+
"StreamedAudioResult",
|
|
35
|
+
"SingleAgentVoiceWorkflow",
|
|
36
|
+
"OpenAIVoiceModelProvider",
|
|
37
|
+
"OpenAISTTModel",
|
|
38
|
+
"OpenAITTSModel",
|
|
39
|
+
"VoiceStreamEventAudio",
|
|
40
|
+
"VoiceStreamEventLifecycle",
|
|
41
|
+
"VoiceStreamEvent",
|
|
42
|
+
"VoicePipeline",
|
|
43
|
+
"VoicePipelineConfig",
|
|
44
|
+
"get_sentence_based_splitter",
|
|
45
|
+
"VoiceWorkflowHelper",
|
|
46
|
+
"VoiceWorkflowBase",
|
|
47
|
+
"SingleAgentWorkflowCallbacks",
|
|
48
|
+
"StreamedTranscriptionSession",
|
|
49
|
+
"OpenAISTTTranscriptionSession",
|
|
50
|
+
"STTWebsocketConnectionError",
|
|
51
|
+
]
|
agents/voice/events.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Literal, Union
|
|
5
|
+
|
|
6
|
+
from typing_extensions import TypeAlias
|
|
7
|
+
|
|
8
|
+
from .imports import np, npt
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass
|
|
12
|
+
class VoiceStreamEventAudio:
|
|
13
|
+
"""Streaming event from the VoicePipeline"""
|
|
14
|
+
|
|
15
|
+
data: npt.NDArray[np.int16 | np.float32] | None
|
|
16
|
+
"""The audio data."""
|
|
17
|
+
|
|
18
|
+
type: Literal["voice_stream_event_audio"] = "voice_stream_event_audio"
|
|
19
|
+
"""The type of event."""
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class VoiceStreamEventLifecycle:
|
|
24
|
+
"""Streaming event from the VoicePipeline"""
|
|
25
|
+
|
|
26
|
+
event: Literal["turn_started", "turn_ended", "session_ended"]
|
|
27
|
+
"""The event that occurred."""
|
|
28
|
+
|
|
29
|
+
type: Literal["voice_stream_event_lifecycle"] = "voice_stream_event_lifecycle"
|
|
30
|
+
"""The type of event."""
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class VoiceStreamEventError:
|
|
35
|
+
"""Streaming event from the VoicePipeline"""
|
|
36
|
+
|
|
37
|
+
error: Exception
|
|
38
|
+
"""The error that occurred."""
|
|
39
|
+
|
|
40
|
+
type: Literal["voice_stream_event_error"] = "voice_stream_event_error"
|
|
41
|
+
"""The type of event."""
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
VoiceStreamEvent: TypeAlias = Union[
|
|
45
|
+
VoiceStreamEventAudio, VoiceStreamEventLifecycle, VoiceStreamEventError
|
|
46
|
+
]
|
|
47
|
+
"""An event from the `VoicePipeline`, streamed via `StreamedAudioResult.stream()`."""
|
agents/voice/imports.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
try:
|
|
2
|
+
import numpy as np
|
|
3
|
+
import numpy.typing as npt
|
|
4
|
+
import websockets
|
|
5
|
+
except ImportError as _e:
|
|
6
|
+
raise ImportError(
|
|
7
|
+
"`numpy` + `websockets` are required to use voice. You can install them via the optional "
|
|
8
|
+
"dependency group: `pip install openai-agents[voice]`."
|
|
9
|
+
) from _e
|
|
10
|
+
|
|
11
|
+
__all__ = ["np", "npt", "websockets"]
|
agents/voice/input.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import base64
|
|
5
|
+
import io
|
|
6
|
+
import wave
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
|
|
9
|
+
from ..exceptions import UserError
|
|
10
|
+
from .imports import np, npt
|
|
11
|
+
|
|
12
|
+
DEFAULT_SAMPLE_RATE = 24000
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _buffer_to_audio_file(
|
|
16
|
+
buffer: npt.NDArray[np.int16 | np.float32],
|
|
17
|
+
frame_rate: int = DEFAULT_SAMPLE_RATE,
|
|
18
|
+
sample_width: int = 2,
|
|
19
|
+
channels: int = 1,
|
|
20
|
+
) -> tuple[str, io.BytesIO, str]:
|
|
21
|
+
if buffer.dtype == np.float32:
|
|
22
|
+
# convert to int16
|
|
23
|
+
buffer = np.clip(buffer, -1.0, 1.0)
|
|
24
|
+
buffer = (buffer * 32767).astype(np.int16)
|
|
25
|
+
elif buffer.dtype != np.int16:
|
|
26
|
+
raise UserError("Buffer must be a numpy array of int16 or float32")
|
|
27
|
+
|
|
28
|
+
audio_file = io.BytesIO()
|
|
29
|
+
with wave.open(audio_file, "w") as wav_file:
|
|
30
|
+
wav_file.setnchannels(channels)
|
|
31
|
+
wav_file.setsampwidth(sample_width)
|
|
32
|
+
wav_file.setframerate(frame_rate)
|
|
33
|
+
wav_file.writeframes(buffer.tobytes())
|
|
34
|
+
audio_file.seek(0)
|
|
35
|
+
|
|
36
|
+
# (filename, bytes, content_type)
|
|
37
|
+
return ("audio.wav", audio_file, "audio/wav")
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class AudioInput:
|
|
42
|
+
"""Static audio to be used as input for the VoicePipeline."""
|
|
43
|
+
|
|
44
|
+
buffer: npt.NDArray[np.int16 | np.float32]
|
|
45
|
+
"""
|
|
46
|
+
A buffer containing the audio data for the agent. Must be a numpy array of int16 or float32.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
frame_rate: int = DEFAULT_SAMPLE_RATE
|
|
50
|
+
"""The sample rate of the audio data. Defaults to 24000."""
|
|
51
|
+
|
|
52
|
+
sample_width: int = 2
|
|
53
|
+
"""The sample width of the audio data. Defaults to 2."""
|
|
54
|
+
|
|
55
|
+
channels: int = 1
|
|
56
|
+
"""The number of channels in the audio data. Defaults to 1."""
|
|
57
|
+
|
|
58
|
+
def to_audio_file(self) -> tuple[str, io.BytesIO, str]:
|
|
59
|
+
"""Returns a tuple of (filename, bytes, content_type)"""
|
|
60
|
+
return _buffer_to_audio_file(self.buffer, self.frame_rate, self.sample_width, self.channels)
|
|
61
|
+
|
|
62
|
+
def to_base64(self) -> str:
|
|
63
|
+
"""Returns the audio data as a base64 encoded string."""
|
|
64
|
+
if self.buffer.dtype == np.float32:
|
|
65
|
+
# convert to int16
|
|
66
|
+
self.buffer = np.clip(self.buffer, -1.0, 1.0)
|
|
67
|
+
self.buffer = (self.buffer * 32767).astype(np.int16)
|
|
68
|
+
elif self.buffer.dtype != np.int16:
|
|
69
|
+
raise UserError("Buffer must be a numpy array of int16 or float32")
|
|
70
|
+
|
|
71
|
+
return base64.b64encode(self.buffer.tobytes()).decode("utf-8")
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class StreamedAudioInput:
|
|
75
|
+
"""Audio input represented as a stream of audio data. You can pass this to the `VoicePipeline`
|
|
76
|
+
and then push audio data into the queue using the `add_audio` method.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def __init__(self):
|
|
80
|
+
self.queue: asyncio.Queue[npt.NDArray[np.int16 | np.float32]] = asyncio.Queue()
|
|
81
|
+
|
|
82
|
+
async def add_audio(self, audio: npt.NDArray[np.int16 | np.float32]):
|
|
83
|
+
"""Adds more audio data to the stream.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
audio: The audio data to add. Must be a numpy array of int16 or float32.
|
|
87
|
+
"""
|
|
88
|
+
await self.queue.put(audio)
|
agents/voice/model.py
ADDED
|
@@ -0,0 +1,193 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import abc
|
|
4
|
+
from collections.abc import AsyncIterator
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import Any, Callable, Literal
|
|
7
|
+
|
|
8
|
+
from .imports import np, npt
|
|
9
|
+
from .input import AudioInput, StreamedAudioInput
|
|
10
|
+
from .utils import get_sentence_based_splitter
|
|
11
|
+
|
|
12
|
+
DEFAULT_TTS_INSTRUCTIONS = (
|
|
13
|
+
"You will receive partial sentences. Do not complete the sentence, just read out the text."
|
|
14
|
+
)
|
|
15
|
+
DEFAULT_TTS_BUFFER_SIZE = 120
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class TTSModelSettings:
|
|
20
|
+
"""Settings for a TTS model."""
|
|
21
|
+
|
|
22
|
+
voice: (
|
|
23
|
+
Literal["alloy", "ash", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer"] | None
|
|
24
|
+
) = None
|
|
25
|
+
"""
|
|
26
|
+
The voice to use for the TTS model. If not provided, the default voice for the respective model
|
|
27
|
+
will be used.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
buffer_size: int = 120
|
|
31
|
+
"""The minimal size of the chunks of audio data that are being streamed out."""
|
|
32
|
+
|
|
33
|
+
dtype: npt.DTypeLike = np.int16
|
|
34
|
+
"""The data type for the audio data to be returned in."""
|
|
35
|
+
|
|
36
|
+
transform_data: (
|
|
37
|
+
Callable[[npt.NDArray[np.int16 | np.float32]], npt.NDArray[np.int16 | np.float32]] | None
|
|
38
|
+
) = None
|
|
39
|
+
"""
|
|
40
|
+
A function to transform the data from the TTS model. This is useful if you want the resulting
|
|
41
|
+
audio stream to have the data in a specific shape already.
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
instructions: str = (
|
|
45
|
+
"You will receive partial sentences. Do not complete the sentence just read out the text."
|
|
46
|
+
)
|
|
47
|
+
"""
|
|
48
|
+
The instructions to use for the TTS model. This is useful if you want to control the tone of the
|
|
49
|
+
audio output.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
text_splitter: Callable[[str], tuple[str, str]] = get_sentence_based_splitter()
|
|
53
|
+
"""
|
|
54
|
+
A function to split the text into chunks. This is useful if you want to split the text into
|
|
55
|
+
chunks before sending it to the TTS model rather than waiting for the whole text to be
|
|
56
|
+
processed.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
speed: float | None = None
|
|
60
|
+
"""The speed with which the TTS model will read the text. Between 0.25 and 4.0."""
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class TTSModel(abc.ABC):
|
|
64
|
+
"""A text-to-speech model that can convert text into audio output."""
|
|
65
|
+
|
|
66
|
+
@property
|
|
67
|
+
@abc.abstractmethod
|
|
68
|
+
def model_name(self) -> str:
|
|
69
|
+
"""The name of the TTS model."""
|
|
70
|
+
pass
|
|
71
|
+
|
|
72
|
+
@abc.abstractmethod
|
|
73
|
+
def run(self, text: str, settings: TTSModelSettings) -> AsyncIterator[bytes]:
|
|
74
|
+
"""Given a text string, produces a stream of audio bytes, in PCM format.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
text: The text to convert to audio.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
An async iterator of audio bytes, in PCM format.
|
|
81
|
+
"""
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class StreamedTranscriptionSession(abc.ABC):
|
|
86
|
+
"""A streamed transcription of audio input."""
|
|
87
|
+
|
|
88
|
+
@abc.abstractmethod
|
|
89
|
+
def transcribe_turns(self) -> AsyncIterator[str]:
|
|
90
|
+
"""Yields a stream of text transcriptions. Each transcription is a turn in the conversation.
|
|
91
|
+
|
|
92
|
+
This method is expected to return only after `close()` is called.
|
|
93
|
+
"""
|
|
94
|
+
pass
|
|
95
|
+
|
|
96
|
+
@abc.abstractmethod
|
|
97
|
+
async def close(self) -> None:
|
|
98
|
+
"""Closes the session."""
|
|
99
|
+
pass
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
@dataclass
|
|
103
|
+
class STTModelSettings:
|
|
104
|
+
"""Settings for a speech-to-text model."""
|
|
105
|
+
|
|
106
|
+
prompt: str | None = None
|
|
107
|
+
"""Instructions for the model to follow."""
|
|
108
|
+
|
|
109
|
+
language: str | None = None
|
|
110
|
+
"""The language of the audio input."""
|
|
111
|
+
|
|
112
|
+
temperature: float | None = None
|
|
113
|
+
"""The temperature of the model."""
|
|
114
|
+
|
|
115
|
+
turn_detection: dict[str, Any] | None = None
|
|
116
|
+
"""The turn detection settings for the model when using streamed audio input."""
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class STTModel(abc.ABC):
|
|
120
|
+
"""A speech-to-text model that can convert audio input into text."""
|
|
121
|
+
|
|
122
|
+
@property
|
|
123
|
+
@abc.abstractmethod
|
|
124
|
+
def model_name(self) -> str:
|
|
125
|
+
"""The name of the STT model."""
|
|
126
|
+
pass
|
|
127
|
+
|
|
128
|
+
@abc.abstractmethod
|
|
129
|
+
async def transcribe(
|
|
130
|
+
self,
|
|
131
|
+
input: AudioInput,
|
|
132
|
+
settings: STTModelSettings,
|
|
133
|
+
trace_include_sensitive_data: bool,
|
|
134
|
+
trace_include_sensitive_audio_data: bool,
|
|
135
|
+
) -> str:
|
|
136
|
+
"""Given an audio input, produces a text transcription.
|
|
137
|
+
|
|
138
|
+
Args:
|
|
139
|
+
input: The audio input to transcribe.
|
|
140
|
+
settings: The settings to use for the transcription.
|
|
141
|
+
trace_include_sensitive_data: Whether to include sensitive data in traces.
|
|
142
|
+
trace_include_sensitive_audio_data: Whether to include sensitive audio data in traces.
|
|
143
|
+
|
|
144
|
+
Returns:
|
|
145
|
+
The text transcription of the audio input.
|
|
146
|
+
"""
|
|
147
|
+
pass
|
|
148
|
+
|
|
149
|
+
@abc.abstractmethod
|
|
150
|
+
async def create_session(
|
|
151
|
+
self,
|
|
152
|
+
input: StreamedAudioInput,
|
|
153
|
+
settings: STTModelSettings,
|
|
154
|
+
trace_include_sensitive_data: bool,
|
|
155
|
+
trace_include_sensitive_audio_data: bool,
|
|
156
|
+
) -> StreamedTranscriptionSession:
|
|
157
|
+
"""Creates a new transcription session, which you can push audio to, and receive a stream
|
|
158
|
+
of text transcriptions.
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
input: The audio input to transcribe.
|
|
162
|
+
settings: The settings to use for the transcription.
|
|
163
|
+
trace_include_sensitive_data: Whether to include sensitive data in traces.
|
|
164
|
+
trace_include_sensitive_audio_data: Whether to include sensitive audio data in traces.
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
A new transcription session.
|
|
168
|
+
"""
|
|
169
|
+
pass
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
class VoiceModelProvider(abc.ABC):
|
|
173
|
+
"""The base interface for a voice model provider.
|
|
174
|
+
|
|
175
|
+
A model provider is responsible for creating speech-to-text and text-to-speech models, given a
|
|
176
|
+
name.
|
|
177
|
+
"""
|
|
178
|
+
|
|
179
|
+
@abc.abstractmethod
|
|
180
|
+
def get_stt_model(self, model_name: str | None) -> STTModel:
|
|
181
|
+
"""Get a speech-to-text model by name.
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
model_name: The name of the model to get.
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
The speech-to-text model.
|
|
188
|
+
"""
|
|
189
|
+
pass
|
|
190
|
+
|
|
191
|
+
@abc.abstractmethod
|
|
192
|
+
def get_tts_model(self, model_name: str | None) -> TTSModel:
|
|
193
|
+
"""Get a text-to-speech model by name."""
|
|
File without changes
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import httpx
|
|
4
|
+
from openai import AsyncOpenAI, DefaultAsyncHttpxClient
|
|
5
|
+
|
|
6
|
+
from ...models import _openai_shared
|
|
7
|
+
from ..model import STTModel, TTSModel, VoiceModelProvider
|
|
8
|
+
from .openai_stt import OpenAISTTModel
|
|
9
|
+
from .openai_tts import OpenAITTSModel
|
|
10
|
+
|
|
11
|
+
_http_client: httpx.AsyncClient | None = None
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# If we create a new httpx client for each request, that would mean no sharing of connection pools,
|
|
15
|
+
# which would mean worse latency and resource usage. So, we share the client across requests.
|
|
16
|
+
def shared_http_client() -> httpx.AsyncClient:
|
|
17
|
+
global _http_client
|
|
18
|
+
if _http_client is None:
|
|
19
|
+
_http_client = DefaultAsyncHttpxClient()
|
|
20
|
+
return _http_client
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
DEFAULT_STT_MODEL = "gpt-4o-transcribe"
|
|
24
|
+
DEFAULT_TTS_MODEL = "gpt-4o-mini-tts"
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class OpenAIVoiceModelProvider(VoiceModelProvider):
|
|
28
|
+
"""A voice model provider that uses OpenAI models."""
|
|
29
|
+
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
*,
|
|
33
|
+
api_key: str | None = None,
|
|
34
|
+
base_url: str | None = None,
|
|
35
|
+
openai_client: AsyncOpenAI | None = None,
|
|
36
|
+
organization: str | None = None,
|
|
37
|
+
project: str | None = None,
|
|
38
|
+
) -> None:
|
|
39
|
+
"""Create a new OpenAI voice model provider.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
api_key: The API key to use for the OpenAI client. If not provided, we will use the
|
|
43
|
+
default API key.
|
|
44
|
+
base_url: The base URL to use for the OpenAI client. If not provided, we will use the
|
|
45
|
+
default base URL.
|
|
46
|
+
openai_client: An optional OpenAI client to use. If not provided, we will create a new
|
|
47
|
+
OpenAI client using the api_key and base_url.
|
|
48
|
+
organization: The organization to use for the OpenAI client.
|
|
49
|
+
project: The project to use for the OpenAI client.
|
|
50
|
+
"""
|
|
51
|
+
if openai_client is not None:
|
|
52
|
+
assert api_key is None and base_url is None, (
|
|
53
|
+
"Don't provide api_key or base_url if you provide openai_client"
|
|
54
|
+
)
|
|
55
|
+
self._client: AsyncOpenAI | None = openai_client
|
|
56
|
+
else:
|
|
57
|
+
self._client = None
|
|
58
|
+
self._stored_api_key = api_key
|
|
59
|
+
self._stored_base_url = base_url
|
|
60
|
+
self._stored_organization = organization
|
|
61
|
+
self._stored_project = project
|
|
62
|
+
|
|
63
|
+
# We lazy load the client in case you never actually use OpenAIProvider(). Otherwise
|
|
64
|
+
# AsyncOpenAI() raises an error if you don't have an API key set.
|
|
65
|
+
def _get_client(self) -> AsyncOpenAI:
|
|
66
|
+
if self._client is None:
|
|
67
|
+
self._client = _openai_shared.get_default_openai_client() or AsyncOpenAI(
|
|
68
|
+
api_key=self._stored_api_key or _openai_shared.get_default_openai_key(),
|
|
69
|
+
base_url=self._stored_base_url,
|
|
70
|
+
organization=self._stored_organization,
|
|
71
|
+
project=self._stored_project,
|
|
72
|
+
http_client=shared_http_client(),
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
return self._client
|
|
76
|
+
|
|
77
|
+
def get_stt_model(self, model_name: str | None) -> STTModel:
|
|
78
|
+
"""Get a speech-to-text model by name.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
model_name: The name of the model to get.
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
The speech-to-text model.
|
|
85
|
+
"""
|
|
86
|
+
return OpenAISTTModel(model_name or DEFAULT_STT_MODEL, self._get_client())
|
|
87
|
+
|
|
88
|
+
def get_tts_model(self, model_name: str | None) -> TTSModel:
|
|
89
|
+
"""Get a text-to-speech model by name.
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
model_name: The name of the model to get.
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
The text-to-speech model.
|
|
96
|
+
"""
|
|
97
|
+
return OpenAITTSModel(model_name or DEFAULT_TTS_MODEL, self._get_client())
|