universal-mcp-applications 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/applications/ahrefs/README.md +51 -0
- universal_mcp/applications/ahrefs/__init__.py +1 -0
- universal_mcp/applications/ahrefs/app.py +2291 -0
- universal_mcp/applications/airtable/README.md +22 -0
- universal_mcp/applications/airtable/__init__.py +1 -0
- universal_mcp/applications/airtable/app.py +479 -0
- universal_mcp/applications/apollo/README.md +44 -0
- universal_mcp/applications/apollo/__init__.py +1 -0
- universal_mcp/applications/apollo/app.py +1847 -0
- universal_mcp/applications/asana/README.md +199 -0
- universal_mcp/applications/asana/__init__.py +1 -0
- universal_mcp/applications/asana/app.py +9509 -0
- universal_mcp/applications/aws-s3/README.md +0 -0
- universal_mcp/applications/aws-s3/__init__.py +1 -0
- universal_mcp/applications/aws-s3/app.py +552 -0
- universal_mcp/applications/bill/README.md +0 -0
- universal_mcp/applications/bill/__init__.py +1 -0
- universal_mcp/applications/bill/app.py +8705 -0
- universal_mcp/applications/box/README.md +307 -0
- universal_mcp/applications/box/__init__.py +1 -0
- universal_mcp/applications/box/app.py +15987 -0
- universal_mcp/applications/braze/README.md +106 -0
- universal_mcp/applications/braze/__init__.py +1 -0
- universal_mcp/applications/braze/app.py +4754 -0
- universal_mcp/applications/cal-com-v2/README.md +150 -0
- universal_mcp/applications/cal-com-v2/__init__.py +1 -0
- universal_mcp/applications/cal-com-v2/app.py +5541 -0
- universal_mcp/applications/calendly/README.md +53 -0
- universal_mcp/applications/calendly/__init__.py +1 -0
- universal_mcp/applications/calendly/app.py +1436 -0
- universal_mcp/applications/canva/README.md +43 -0
- universal_mcp/applications/canva/__init__.py +1 -0
- universal_mcp/applications/canva/app.py +941 -0
- universal_mcp/applications/clickup/README.md +135 -0
- universal_mcp/applications/clickup/__init__.py +1 -0
- universal_mcp/applications/clickup/app.py +5009 -0
- universal_mcp/applications/coda/README.md +108 -0
- universal_mcp/applications/coda/__init__.py +1 -0
- universal_mcp/applications/coda/app.py +3671 -0
- universal_mcp/applications/confluence/README.md +198 -0
- universal_mcp/applications/confluence/__init__.py +1 -0
- universal_mcp/applications/confluence/app.py +6273 -0
- universal_mcp/applications/contentful/README.md +17 -0
- universal_mcp/applications/contentful/__init__.py +1 -0
- universal_mcp/applications/contentful/app.py +364 -0
- universal_mcp/applications/crustdata/README.md +25 -0
- universal_mcp/applications/crustdata/__init__.py +1 -0
- universal_mcp/applications/crustdata/app.py +586 -0
- universal_mcp/applications/dialpad/README.md +202 -0
- universal_mcp/applications/dialpad/__init__.py +1 -0
- universal_mcp/applications/dialpad/app.py +5949 -0
- universal_mcp/applications/digitalocean/README.md +463 -0
- universal_mcp/applications/digitalocean/__init__.py +1 -0
- universal_mcp/applications/digitalocean/app.py +20835 -0
- universal_mcp/applications/domain-checker/README.md +13 -0
- universal_mcp/applications/domain-checker/__init__.py +1 -0
- universal_mcp/applications/domain-checker/app.py +265 -0
- universal_mcp/applications/e2b/README.md +12 -0
- universal_mcp/applications/e2b/__init__.py +1 -0
- universal_mcp/applications/e2b/app.py +187 -0
- universal_mcp/applications/elevenlabs/README.md +88 -0
- universal_mcp/applications/elevenlabs/__init__.py +1 -0
- universal_mcp/applications/elevenlabs/app.py +3235 -0
- universal_mcp/applications/exa/README.md +15 -0
- universal_mcp/applications/exa/__init__.py +1 -0
- universal_mcp/applications/exa/app.py +221 -0
- universal_mcp/applications/falai/README.md +17 -0
- universal_mcp/applications/falai/__init__.py +1 -0
- universal_mcp/applications/falai/app.py +331 -0
- universal_mcp/applications/figma/README.md +49 -0
- universal_mcp/applications/figma/__init__.py +1 -0
- universal_mcp/applications/figma/app.py +1090 -0
- universal_mcp/applications/firecrawl/README.md +20 -0
- universal_mcp/applications/firecrawl/__init__.py +1 -0
- universal_mcp/applications/firecrawl/app.py +514 -0
- universal_mcp/applications/fireflies/README.md +25 -0
- universal_mcp/applications/fireflies/__init__.py +1 -0
- universal_mcp/applications/fireflies/app.py +506 -0
- universal_mcp/applications/fpl/README.md +23 -0
- universal_mcp/applications/fpl/__init__.py +1 -0
- universal_mcp/applications/fpl/app.py +1327 -0
- universal_mcp/applications/fpl/utils/api.py +142 -0
- universal_mcp/applications/fpl/utils/fixtures.py +629 -0
- universal_mcp/applications/fpl/utils/helper.py +982 -0
- universal_mcp/applications/fpl/utils/league_utils.py +546 -0
- universal_mcp/applications/fpl/utils/position_utils.py +68 -0
- universal_mcp/applications/ghost-content/README.md +25 -0
- universal_mcp/applications/ghost-content/__init__.py +1 -0
- universal_mcp/applications/ghost-content/app.py +654 -0
- universal_mcp/applications/github/README.md +1049 -0
- universal_mcp/applications/github/__init__.py +1 -0
- universal_mcp/applications/github/app.py +50600 -0
- universal_mcp/applications/gong/README.md +63 -0
- universal_mcp/applications/gong/__init__.py +1 -0
- universal_mcp/applications/gong/app.py +2297 -0
- universal_mcp/applications/google-ads/README.md +0 -0
- universal_mcp/applications/google-ads/__init__.py +1 -0
- universal_mcp/applications/google-ads/app.py +23 -0
- universal_mcp/applications/google-calendar/README.md +21 -0
- universal_mcp/applications/google-calendar/__init__.py +1 -0
- universal_mcp/applications/google-calendar/app.py +574 -0
- universal_mcp/applications/google-docs/README.md +25 -0
- universal_mcp/applications/google-docs/__init__.py +1 -0
- universal_mcp/applications/google-docs/app.py +760 -0
- universal_mcp/applications/google-drive/README.md +68 -0
- universal_mcp/applications/google-drive/__init__.py +1 -0
- universal_mcp/applications/google-drive/app.py +4936 -0
- universal_mcp/applications/google-gemini/README.md +25 -0
- universal_mcp/applications/google-gemini/__init__.py +1 -0
- universal_mcp/applications/google-gemini/app.py +663 -0
- universal_mcp/applications/google-mail/README.md +31 -0
- universal_mcp/applications/google-mail/__init__.py +1 -0
- universal_mcp/applications/google-mail/app.py +1354 -0
- universal_mcp/applications/google-searchconsole/README.md +21 -0
- universal_mcp/applications/google-searchconsole/__init__.py +1 -0
- universal_mcp/applications/google-searchconsole/app.py +320 -0
- universal_mcp/applications/google-sheet/README.md +36 -0
- universal_mcp/applications/google-sheet/__init__.py +1 -0
- universal_mcp/applications/google-sheet/app.py +1941 -0
- universal_mcp/applications/hashnode/README.md +20 -0
- universal_mcp/applications/hashnode/__init__.py +1 -0
- universal_mcp/applications/hashnode/app.py +455 -0
- universal_mcp/applications/heygen/README.md +44 -0
- universal_mcp/applications/heygen/__init__.py +1 -0
- universal_mcp/applications/heygen/app.py +961 -0
- universal_mcp/applications/http-tools/README.md +16 -0
- universal_mcp/applications/http-tools/__init__.py +1 -0
- universal_mcp/applications/http-tools/app.py +153 -0
- universal_mcp/applications/hubspot/README.md +239 -0
- universal_mcp/applications/hubspot/__init__.py +1 -0
- universal_mcp/applications/hubspot/app.py +416 -0
- universal_mcp/applications/jira/README.md +600 -0
- universal_mcp/applications/jira/__init__.py +1 -0
- universal_mcp/applications/jira/app.py +28804 -0
- universal_mcp/applications/klaviyo/README.md +313 -0
- universal_mcp/applications/klaviyo/__init__.py +1 -0
- universal_mcp/applications/klaviyo/app.py +11236 -0
- universal_mcp/applications/linkedin/README.md +15 -0
- universal_mcp/applications/linkedin/__init__.py +1 -0
- universal_mcp/applications/linkedin/app.py +243 -0
- universal_mcp/applications/mailchimp/README.md +281 -0
- universal_mcp/applications/mailchimp/__init__.py +1 -0
- universal_mcp/applications/mailchimp/app.py +10937 -0
- universal_mcp/applications/markitdown/README.md +12 -0
- universal_mcp/applications/markitdown/__init__.py +1 -0
- universal_mcp/applications/markitdown/app.py +63 -0
- universal_mcp/applications/miro/README.md +151 -0
- universal_mcp/applications/miro/__init__.py +1 -0
- universal_mcp/applications/miro/app.py +5429 -0
- universal_mcp/applications/ms-teams/README.md +42 -0
- universal_mcp/applications/ms-teams/__init__.py +1 -0
- universal_mcp/applications/ms-teams/app.py +1823 -0
- universal_mcp/applications/neon/README.md +74 -0
- universal_mcp/applications/neon/__init__.py +1 -0
- universal_mcp/applications/neon/app.py +2018 -0
- universal_mcp/applications/notion/README.md +30 -0
- universal_mcp/applications/notion/__init__.py +1 -0
- universal_mcp/applications/notion/app.py +527 -0
- universal_mcp/applications/openai/README.md +22 -0
- universal_mcp/applications/openai/__init__.py +1 -0
- universal_mcp/applications/openai/app.py +759 -0
- universal_mcp/applications/outlook/README.md +20 -0
- universal_mcp/applications/outlook/__init__.py +1 -0
- universal_mcp/applications/outlook/app.py +444 -0
- universal_mcp/applications/perplexity/README.md +12 -0
- universal_mcp/applications/perplexity/__init__.py +1 -0
- universal_mcp/applications/perplexity/app.py +65 -0
- universal_mcp/applications/pipedrive/README.md +284 -0
- universal_mcp/applications/pipedrive/__init__.py +1 -0
- universal_mcp/applications/pipedrive/app.py +12924 -0
- universal_mcp/applications/posthog/README.md +132 -0
- universal_mcp/applications/posthog/__init__.py +1 -0
- universal_mcp/applications/posthog/app.py +7125 -0
- universal_mcp/applications/reddit/README.md +135 -0
- universal_mcp/applications/reddit/__init__.py +1 -0
- universal_mcp/applications/reddit/app.py +4652 -0
- universal_mcp/applications/replicate/README.md +18 -0
- universal_mcp/applications/replicate/__init__.py +1 -0
- universal_mcp/applications/replicate/app.py +495 -0
- universal_mcp/applications/resend/README.md +40 -0
- universal_mcp/applications/resend/__init__.py +1 -0
- universal_mcp/applications/resend/app.py +881 -0
- universal_mcp/applications/retell/README.md +21 -0
- universal_mcp/applications/retell/__init__.py +1 -0
- universal_mcp/applications/retell/app.py +333 -0
- universal_mcp/applications/rocketlane/README.md +70 -0
- universal_mcp/applications/rocketlane/__init__.py +1 -0
- universal_mcp/applications/rocketlane/app.py +4346 -0
- universal_mcp/applications/semanticscholar/README.md +25 -0
- universal_mcp/applications/semanticscholar/__init__.py +1 -0
- universal_mcp/applications/semanticscholar/app.py +482 -0
- universal_mcp/applications/semrush/README.md +44 -0
- universal_mcp/applications/semrush/__init__.py +1 -0
- universal_mcp/applications/semrush/app.py +2081 -0
- universal_mcp/applications/sendgrid/README.md +362 -0
- universal_mcp/applications/sendgrid/__init__.py +1 -0
- universal_mcp/applications/sendgrid/app.py +9752 -0
- universal_mcp/applications/sentry/README.md +186 -0
- universal_mcp/applications/sentry/__init__.py +1 -0
- universal_mcp/applications/sentry/app.py +7471 -0
- universal_mcp/applications/serpapi/README.md +14 -0
- universal_mcp/applications/serpapi/__init__.py +1 -0
- universal_mcp/applications/serpapi/app.py +293 -0
- universal_mcp/applications/sharepoint/README.md +0 -0
- universal_mcp/applications/sharepoint/__init__.py +1 -0
- universal_mcp/applications/sharepoint/app.py +215 -0
- universal_mcp/applications/shopify/README.md +321 -0
- universal_mcp/applications/shopify/__init__.py +1 -0
- universal_mcp/applications/shopify/app.py +15392 -0
- universal_mcp/applications/shortcut/README.md +128 -0
- universal_mcp/applications/shortcut/__init__.py +1 -0
- universal_mcp/applications/shortcut/app.py +4478 -0
- universal_mcp/applications/slack/README.md +0 -0
- universal_mcp/applications/slack/__init__.py +1 -0
- universal_mcp/applications/slack/app.py +570 -0
- universal_mcp/applications/spotify/README.md +91 -0
- universal_mcp/applications/spotify/__init__.py +1 -0
- universal_mcp/applications/spotify/app.py +2526 -0
- universal_mcp/applications/supabase/README.md +87 -0
- universal_mcp/applications/supabase/__init__.py +1 -0
- universal_mcp/applications/supabase/app.py +2970 -0
- universal_mcp/applications/tavily/README.md +12 -0
- universal_mcp/applications/tavily/__init__.py +1 -0
- universal_mcp/applications/tavily/app.py +51 -0
- universal_mcp/applications/trello/README.md +266 -0
- universal_mcp/applications/trello/__init__.py +1 -0
- universal_mcp/applications/trello/app.py +10875 -0
- universal_mcp/applications/twillo/README.md +0 -0
- universal_mcp/applications/twillo/__init__.py +1 -0
- universal_mcp/applications/twillo/app.py +269 -0
- universal_mcp/applications/twitter/README.md +100 -0
- universal_mcp/applications/twitter/__init__.py +1 -0
- universal_mcp/applications/twitter/api_segments/__init__.py +0 -0
- universal_mcp/applications/twitter/api_segments/api_segment_base.py +51 -0
- universal_mcp/applications/twitter/api_segments/compliance_api.py +122 -0
- universal_mcp/applications/twitter/api_segments/dm_conversations_api.py +255 -0
- universal_mcp/applications/twitter/api_segments/dm_events_api.py +140 -0
- universal_mcp/applications/twitter/api_segments/likes_api.py +159 -0
- universal_mcp/applications/twitter/api_segments/lists_api.py +395 -0
- universal_mcp/applications/twitter/api_segments/openapi_json_api.py +34 -0
- universal_mcp/applications/twitter/api_segments/spaces_api.py +309 -0
- universal_mcp/applications/twitter/api_segments/trends_api.py +40 -0
- universal_mcp/applications/twitter/api_segments/tweets_api.py +1403 -0
- universal_mcp/applications/twitter/api_segments/usage_api.py +40 -0
- universal_mcp/applications/twitter/api_segments/users_api.py +1498 -0
- universal_mcp/applications/twitter/app.py +46 -0
- universal_mcp/applications/unipile/README.md +28 -0
- universal_mcp/applications/unipile/__init__.py +1 -0
- universal_mcp/applications/unipile/app.py +829 -0
- universal_mcp/applications/whatsapp/README.md +23 -0
- universal_mcp/applications/whatsapp/__init__.py +1 -0
- universal_mcp/applications/whatsapp/app.py +595 -0
- universal_mcp/applications/whatsapp-business/README.md +34 -0
- universal_mcp/applications/whatsapp-business/__init__.py +1 -0
- universal_mcp/applications/whatsapp-business/app.py +1065 -0
- universal_mcp/applications/wrike/README.md +46 -0
- universal_mcp/applications/wrike/__init__.py +1 -0
- universal_mcp/applications/wrike/app.py +1583 -0
- universal_mcp/applications/youtube/README.md +57 -0
- universal_mcp/applications/youtube/__init__.py +1 -0
- universal_mcp/applications/youtube/app.py +1696 -0
- universal_mcp/applications/zenquotes/README.md +12 -0
- universal_mcp/applications/zenquotes/__init__.py +1 -0
- universal_mcp/applications/zenquotes/app.py +31 -0
- universal_mcp_applications-0.1.1.dist-info/METADATA +172 -0
- universal_mcp_applications-0.1.1.dist-info/RECORD +268 -0
- universal_mcp_applications-0.1.1.dist-info/WHEEL +4 -0
- universal_mcp_applications-0.1.1.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,759 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
from typing import Any, Literal
|
|
3
|
+
|
|
4
|
+
from universal_mcp.applications.application import APIApplication
|
|
5
|
+
from universal_mcp.integrations import Integration
|
|
6
|
+
|
|
7
|
+
from openai import NOT_GIVEN, AsyncOpenAI, OpenAIError
|
|
8
|
+
from openai._types import FileTypes as OpenAiFileTypes
|
|
9
|
+
from openai.types import FilePurpose as OpenAiFilePurpose
|
|
10
|
+
from openai.types.audio import (
|
|
11
|
+
Transcription,
|
|
12
|
+
TranscriptionVerbose,
|
|
13
|
+
Translation,
|
|
14
|
+
TranslationVerbose,
|
|
15
|
+
)
|
|
16
|
+
from openai.types.audio.speech_model import SpeechModel as OpenAiSpeechModel
|
|
17
|
+
from openai.types.audio_model import AudioModel as OpenAiAudioModel
|
|
18
|
+
from openai.types.chat import ChatCompletionMessageParam
|
|
19
|
+
from openai.types.file_object import FileObject
|
|
20
|
+
from openai.types.image_model import ImageModel as OpenAiImageModel
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class OpenaiApp(APIApplication):
|
|
24
|
+
"""
|
|
25
|
+
Application for interacting with the OpenAI API (api.openai.com)
|
|
26
|
+
to generate chat completions, manage files, and create images.
|
|
27
|
+
Requires an OpenAI API key configured via integration.
|
|
28
|
+
Optionally, organization ID and project ID can also be configured.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(self, integration: Integration | None = None) -> None:
|
|
32
|
+
super().__init__(name="openai", integration=integration)
|
|
33
|
+
|
|
34
|
+
async def _get_client(self) -> AsyncOpenAI:
|
|
35
|
+
"""Initializes and returns the AsyncOpenAI client."""
|
|
36
|
+
if not self.integration:
|
|
37
|
+
raise ValueError("Integration not provided for OpenaiApp.")
|
|
38
|
+
|
|
39
|
+
creds = self.integration.get_credentials()
|
|
40
|
+
api_key = creds.get("api_key")
|
|
41
|
+
organization = creds.get("organization")
|
|
42
|
+
project = creds.get("project")
|
|
43
|
+
|
|
44
|
+
return AsyncOpenAI(
|
|
45
|
+
api_key=api_key,
|
|
46
|
+
organization=organization,
|
|
47
|
+
project=project,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
async def create_chat_completion(
|
|
51
|
+
self,
|
|
52
|
+
messages: list[ChatCompletionMessageParam],
|
|
53
|
+
model: str = "gpt-4o", # Default model set to gpt-4o
|
|
54
|
+
stream: bool = False,
|
|
55
|
+
temperature: float | None = None,
|
|
56
|
+
max_tokens: int | None = None,
|
|
57
|
+
top_p: float | None = None,
|
|
58
|
+
frequency_penalty: float | None = None,
|
|
59
|
+
presence_penalty: float | None = None,
|
|
60
|
+
stop: str | None | list[str] = None,
|
|
61
|
+
user: str | None = None,
|
|
62
|
+
# Add other common parameters as needed, or rely on
|
|
63
|
+
) -> dict[str, Any] | str:
|
|
64
|
+
"""
|
|
65
|
+
Creates a model response for the given chat conversation.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
messages: A list of messages comprising the conversation so far.
|
|
69
|
+
model: ID of the model to use. Defaults to "gpt-4o".
|
|
70
|
+
Other examples include "gpt-4-turbo", "gpt-3.5-turbo",
|
|
71
|
+
"gpt-4o-mini", etc.
|
|
72
|
+
Ensure the model ID is valid for the OpenAI API.
|
|
73
|
+
stream: If True, the response will be streamed and internally aggregated
|
|
74
|
+
into a single response object. Usage data will not be available in this mode.
|
|
75
|
+
If False (default), a single, complete response is requested.
|
|
76
|
+
temperature: Sampling temperature to use, between 0 and 2.
|
|
77
|
+
max_tokens: The maximum number of tokens to generate in the chat completion.
|
|
78
|
+
top_p: An alternative to sampling with temperature, called nucleus sampling.
|
|
79
|
+
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency.
|
|
80
|
+
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far.
|
|
81
|
+
stop: Up to 4 sequences where the API will stop generating further tokens.
|
|
82
|
+
user: A unique identifier representing your end-user.
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
A dictionary containing the chat completion response on success,
|
|
86
|
+
or a string containing an error message on failure.
|
|
87
|
+
If stream=True, usage data in the response will be None.
|
|
88
|
+
|
|
89
|
+
Tags:
|
|
90
|
+
chat, llm, important
|
|
91
|
+
"""
|
|
92
|
+
try:
|
|
93
|
+
client = await self._get_client()
|
|
94
|
+
common_params = {
|
|
95
|
+
"model": model,
|
|
96
|
+
"messages": messages,
|
|
97
|
+
"temperature": temperature,
|
|
98
|
+
"max_tokens": max_tokens,
|
|
99
|
+
"top_p": top_p,
|
|
100
|
+
"frequency_penalty": frequency_penalty,
|
|
101
|
+
"presence_penalty": presence_penalty,
|
|
102
|
+
"stop": stop,
|
|
103
|
+
"user": user,
|
|
104
|
+
}
|
|
105
|
+
common_params = {k: v for k, v in common_params.items() if v is not None}
|
|
106
|
+
|
|
107
|
+
if not stream:
|
|
108
|
+
response = await client.chat.completions.create(
|
|
109
|
+
stream=False, **common_params
|
|
110
|
+
)
|
|
111
|
+
return response.model_dump()
|
|
112
|
+
else:
|
|
113
|
+
stream_response = await client.chat.completions.create(
|
|
114
|
+
stream=True, **common_params
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
final_content_parts: list[str] = []
|
|
118
|
+
final_role: str = "assistant"
|
|
119
|
+
first_chunk_data: dict[str, Any] = {}
|
|
120
|
+
finish_reason: str | None = None
|
|
121
|
+
|
|
122
|
+
async for chunk in stream_response:
|
|
123
|
+
if not first_chunk_data and chunk.id:
|
|
124
|
+
first_chunk_data = {
|
|
125
|
+
"id": chunk.id,
|
|
126
|
+
"created": chunk.created,
|
|
127
|
+
"model": chunk.model,
|
|
128
|
+
"system_fingerprint": chunk.system_fingerprint,
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
if chunk.choices:
|
|
132
|
+
choice = chunk.choices[0]
|
|
133
|
+
if choice.delta:
|
|
134
|
+
if choice.delta.content:
|
|
135
|
+
final_content_parts.append(choice.delta.content)
|
|
136
|
+
if choice.delta.role:
|
|
137
|
+
final_role = choice.delta.role
|
|
138
|
+
if choice.finish_reason:
|
|
139
|
+
finish_reason = choice.finish_reason
|
|
140
|
+
|
|
141
|
+
aggregated_choice = {
|
|
142
|
+
"message": {
|
|
143
|
+
"role": final_role,
|
|
144
|
+
"content": "".join(final_content_parts),
|
|
145
|
+
},
|
|
146
|
+
"finish_reason": finish_reason,
|
|
147
|
+
"index": 0,
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
response_dict = {
|
|
151
|
+
**first_chunk_data,
|
|
152
|
+
"object": "chat.completion",
|
|
153
|
+
"choices": [aggregated_choice],
|
|
154
|
+
"usage": None,
|
|
155
|
+
}
|
|
156
|
+
return response_dict
|
|
157
|
+
|
|
158
|
+
except OpenAIError as e:
|
|
159
|
+
return f"OpenAI API error creating chat completion for model {model}: {type(e).__name__} - {e}"
|
|
160
|
+
except Exception as e:
|
|
161
|
+
return f"Error creating chat completion for model {model}: {type(e).__name__} - {e}"
|
|
162
|
+
|
|
163
|
+
async def upload_file(
|
|
164
|
+
self, file: OpenAiFileTypes, purpose: OpenAiFilePurpose
|
|
165
|
+
) -> dict[str, Any] | str:
|
|
166
|
+
"""
|
|
167
|
+
Upload a file that can be used across various OpenAI API endpoints.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
file: The File object (not file name) or path to be uploaded.
|
|
171
|
+
Can be bytes, a PathLike object, or a file-like object.
|
|
172
|
+
purpose: The intended purpose of the uploaded file (e.g., 'fine-tune', 'assistants').
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
A dictionary containing the file object details on success,
|
|
176
|
+
or a string containing an error message on failure.
|
|
177
|
+
|
|
178
|
+
Tags:
|
|
179
|
+
files, upload, storage
|
|
180
|
+
"""
|
|
181
|
+
try:
|
|
182
|
+
client = await self._get_client()
|
|
183
|
+
response: FileObject = await client.files.create(file=file, purpose=purpose)
|
|
184
|
+
return response.model_dump()
|
|
185
|
+
except OpenAIError as e:
|
|
186
|
+
return f"OpenAI API error uploading file: {type(e).__name__} - {e}"
|
|
187
|
+
except Exception as e:
|
|
188
|
+
return f"Error uploading file: {type(e).__name__} - {e}"
|
|
189
|
+
|
|
190
|
+
async def list_files(
|
|
191
|
+
self,
|
|
192
|
+
purpose: str | None = None,
|
|
193
|
+
limit: int | None = None,
|
|
194
|
+
after: str | None = None,
|
|
195
|
+
order: Literal["asc", "desc"] | None = None,
|
|
196
|
+
) -> dict[str, Any] | str:
|
|
197
|
+
"""
|
|
198
|
+
Lists the files that have been uploaded to your OpenAI account.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
purpose: Only return files with the given purpose.
|
|
202
|
+
limit: A limit on the number of objects to be returned.
|
|
203
|
+
after: A cursor for use in pagination.
|
|
204
|
+
order: Sort order by the `created_at` timestamp.
|
|
205
|
+
|
|
206
|
+
Returns:
|
|
207
|
+
A dictionary representing a page of file objects on success,
|
|
208
|
+
or a string containing an error message on failure.
|
|
209
|
+
|
|
210
|
+
Tags:
|
|
211
|
+
files, list, storage
|
|
212
|
+
"""
|
|
213
|
+
try:
|
|
214
|
+
client = await self._get_client()
|
|
215
|
+
params = {}
|
|
216
|
+
if purpose:
|
|
217
|
+
params["purpose"] = purpose
|
|
218
|
+
if limit:
|
|
219
|
+
params["limit"] = limit
|
|
220
|
+
if after:
|
|
221
|
+
params["after"] = after
|
|
222
|
+
if order:
|
|
223
|
+
params["order"] = order
|
|
224
|
+
|
|
225
|
+
response_page = await client.files.list(**params)
|
|
226
|
+
return response_page.model_dump()
|
|
227
|
+
except OpenAIError as e:
|
|
228
|
+
return f"OpenAI API error listing files: {type(e).__name__} - {e}"
|
|
229
|
+
except Exception as e:
|
|
230
|
+
return f"Error listing files: {type(e).__name__} - {e}"
|
|
231
|
+
|
|
232
|
+
async def retrieve_file(self, file_id: str) -> dict[str, Any] | str:
|
|
233
|
+
"""
|
|
234
|
+
Retrieves information about a specific file.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
file_id: The ID of the file to retrieve.
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
A dictionary containing the file object details on success,
|
|
241
|
+
or a string containing an error message on failure.
|
|
242
|
+
|
|
243
|
+
Tags:
|
|
244
|
+
files, retrieve, storage
|
|
245
|
+
"""
|
|
246
|
+
try:
|
|
247
|
+
client = await self._get_client()
|
|
248
|
+
response: FileObject = await client.files.retrieve(file_id=file_id)
|
|
249
|
+
return response.model_dump()
|
|
250
|
+
except OpenAIError as e:
|
|
251
|
+
return (
|
|
252
|
+
f"OpenAI API error retrieving file {file_id}: {type(e).__name__} - {e}"
|
|
253
|
+
)
|
|
254
|
+
except Exception as e:
|
|
255
|
+
return f"Error retrieving file {file_id}: {type(e).__name__} - {e}"
|
|
256
|
+
|
|
257
|
+
async def delete_file(self, file_id: str) -> dict[str, Any] | str:
|
|
258
|
+
"""
|
|
259
|
+
Deletes a file.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
file_id: The ID of the file to delete.
|
|
263
|
+
|
|
264
|
+
Returns:
|
|
265
|
+
A dictionary containing the deletion status on success,
|
|
266
|
+
or a string containing an error message on failure.
|
|
267
|
+
|
|
268
|
+
Tags:
|
|
269
|
+
files, delete, storage
|
|
270
|
+
"""
|
|
271
|
+
try:
|
|
272
|
+
client = await self._get_client()
|
|
273
|
+
response = await client.files.delete(file_id=file_id)
|
|
274
|
+
return response.model_dump()
|
|
275
|
+
except OpenAIError as e:
|
|
276
|
+
return f"OpenAI API error deleting file {file_id}: {type(e).__name__} - {e}"
|
|
277
|
+
except Exception as e:
|
|
278
|
+
return f"Error deleting file {file_id}: {type(e).__name__} - {e}"
|
|
279
|
+
|
|
280
|
+
async def retrieve_file_content(self, file_id: str) -> dict[str, Any] | str:
|
|
281
|
+
"""
|
|
282
|
+
Retrieves the content of the specified file.
|
|
283
|
+
Returns text content directly, or base64 encoded content in a dictionary for binary files.
|
|
284
|
+
|
|
285
|
+
Args:
|
|
286
|
+
file_id: The ID of the file whose content to retrieve.
|
|
287
|
+
|
|
288
|
+
Returns:
|
|
289
|
+
The file content as a string if text, a dictionary with base64 encoded
|
|
290
|
+
content if binary, or an error message string on failure.
|
|
291
|
+
|
|
292
|
+
Tags:
|
|
293
|
+
files, content, download
|
|
294
|
+
"""
|
|
295
|
+
try:
|
|
296
|
+
client = await self._get_client()
|
|
297
|
+
api_response = await client.files.content(file_id=file_id)
|
|
298
|
+
|
|
299
|
+
http_response_headers = api_response.response.headers
|
|
300
|
+
content_type = http_response_headers.get("Content-Type", "").lower()
|
|
301
|
+
|
|
302
|
+
if (
|
|
303
|
+
"text" in content_type
|
|
304
|
+
or "json" in content_type
|
|
305
|
+
or "xml" in content_type
|
|
306
|
+
or "javascript" in content_type
|
|
307
|
+
or "csv" in content_type
|
|
308
|
+
):
|
|
309
|
+
return api_response.text # Decoded text
|
|
310
|
+
else:
|
|
311
|
+
binary_content = api_response.content
|
|
312
|
+
return {
|
|
313
|
+
"file_id": file_id,
|
|
314
|
+
"content_type": content_type,
|
|
315
|
+
"content_base64": base64.b64encode(binary_content).decode(),
|
|
316
|
+
}
|
|
317
|
+
except UnicodeDecodeError:
|
|
318
|
+
client = await self._get_client()
|
|
319
|
+
api_response = await client.files.content(file_id=file_id)
|
|
320
|
+
binary_content = api_response.content
|
|
321
|
+
content_type = api_response.response.headers.get("Content-Type", "").lower()
|
|
322
|
+
return {
|
|
323
|
+
"file_id": file_id,
|
|
324
|
+
"content_type": content_type,
|
|
325
|
+
"content_base64": base64.b64encode(binary_content).decode(),
|
|
326
|
+
"warning": "Content could not be decoded as text, returned as base64.",
|
|
327
|
+
}
|
|
328
|
+
except OpenAIError as e:
|
|
329
|
+
return f"OpenAI API error retrieving content for file {file_id}: {type(e).__name__} - {e}"
|
|
330
|
+
except Exception as e:
|
|
331
|
+
return (
|
|
332
|
+
f"Error retrieving content for file {file_id}: {type(e).__name__} - {e}"
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
# --- Images Methods ---
|
|
336
|
+
async def generate_image(
|
|
337
|
+
self,
|
|
338
|
+
prompt: str,
|
|
339
|
+
model: str
|
|
340
|
+
| OpenAiImageModel
|
|
341
|
+
| None = "dall-e-3", # Default model set to dall-e-3
|
|
342
|
+
n: int | None = None, # 1-10 for dall-e-2, 1 for dall-e-3
|
|
343
|
+
quality: Literal["standard", "hd"] | None = None, # For dall-e-3
|
|
344
|
+
response_format: Literal["url", "b64_json"] | None = None,
|
|
345
|
+
size: Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]
|
|
346
|
+
| None = None,
|
|
347
|
+
style: Literal["vivid", "natural"] | None = None, # For dall-e-3
|
|
348
|
+
user: str | None = None,
|
|
349
|
+
) -> dict[str, Any] | str:
|
|
350
|
+
"""
|
|
351
|
+
Creates an image given a prompt.
|
|
352
|
+
|
|
353
|
+
Args:
|
|
354
|
+
prompt: A text description of the desired image(s).
|
|
355
|
+
model: The model to use for image generation. Defaults to "dall-e-3".
|
|
356
|
+
The other primary option is "dall-e-2".
|
|
357
|
+
Ensure the model ID is valid for the OpenAI API.
|
|
358
|
+
n: The number of images to generate. For "dall-e-3", only n=1 is supported.
|
|
359
|
+
For "dall-e-2", n can be between 1 and 10.
|
|
360
|
+
If model is "dall-e-3" and n is not 1, it may result in an API error.
|
|
361
|
+
quality: The quality of the image ("standard" or "hd"). Only for "dall-e-3".
|
|
362
|
+
response_format: The format in which the generated images are returned ("url" or "b64_json").
|
|
363
|
+
Defaults to "url" if not specified.
|
|
364
|
+
size: The size of the generated images.
|
|
365
|
+
For "dall-e-2": "256x256", "512x512", or "1024x1024".
|
|
366
|
+
For "dall-e-3": "1024x1024", "1792x1024", or "1024x1792".
|
|
367
|
+
style: The style of the generated images ("vivid" or "natural"). Only for "dall-e-3".
|
|
368
|
+
user: A unique identifier representing your end-user.
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
A dictionary containing the image generation response on success,
|
|
372
|
+
or a string containing an error message on failure.
|
|
373
|
+
|
|
374
|
+
Tags:
|
|
375
|
+
images, generate, dalle, important
|
|
376
|
+
"""
|
|
377
|
+
try:
|
|
378
|
+
client = await self._get_client()
|
|
379
|
+
|
|
380
|
+
effective_model = (
|
|
381
|
+
model if model is not None else "dall-e-3"
|
|
382
|
+
) # Ensure effective_model is not None
|
|
383
|
+
|
|
384
|
+
effective_params = {
|
|
385
|
+
"prompt": prompt,
|
|
386
|
+
"model": effective_model,
|
|
387
|
+
"n": n,
|
|
388
|
+
"quality": quality,
|
|
389
|
+
"response_format": response_format,
|
|
390
|
+
"size": size,
|
|
391
|
+
"style": style,
|
|
392
|
+
"user": user,
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
effective_params = {
|
|
396
|
+
k: v for k, v in effective_params.items() if v is not None
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
response = await client.images.generate(**effective_params)
|
|
400
|
+
return response.model_dump()
|
|
401
|
+
except OpenAIError as e:
|
|
402
|
+
return f"OpenAI API error generating image with model {model}: {type(e).__name__} - {e}"
|
|
403
|
+
except Exception as e:
|
|
404
|
+
return (
|
|
405
|
+
f"Error generating image with model {model}: {type(e).__name__} - {e}"
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
async def create_image_edit(
|
|
409
|
+
self,
|
|
410
|
+
image: OpenAiFileTypes,
|
|
411
|
+
prompt: str,
|
|
412
|
+
mask: OpenAiFileTypes | None = None,
|
|
413
|
+
model: str | OpenAiImageModel | None = "dall-e-2",
|
|
414
|
+
n: int | None = None,
|
|
415
|
+
response_format: Literal["url", "b64_json"] | None = None,
|
|
416
|
+
size: Literal["256x256", "512x512", "1024x1024"] | None = None,
|
|
417
|
+
user: str | None = None,
|
|
418
|
+
) -> dict[str, Any] | str:
|
|
419
|
+
"""
|
|
420
|
+
Creates an edited or extended image given an original image and a prompt.
|
|
421
|
+
|
|
422
|
+
Args:
|
|
423
|
+
image: The image to edit. Must be a valid PNG file, less than 4MB, and square.
|
|
424
|
+
prompt: A text description of the desired image(s).
|
|
425
|
+
mask: An additional image whose fully transparent areas indicate where `image` should be edited.
|
|
426
|
+
model: The model to use. Defaults to "dall-e-2", which is currently the only
|
|
427
|
+
model supported for image edits by the OpenAI API.
|
|
428
|
+
n: The number of images to generate. Must be between 1 and 10.
|
|
429
|
+
response_format: The format of the returned images ("url" or "b64_json"). Defaults to "url".
|
|
430
|
+
size: The size of the generated images. Must be one of "256x256", "512x512", or "1024x1024".
|
|
431
|
+
user: A unique identifier representing your end-user.
|
|
432
|
+
|
|
433
|
+
Returns:
|
|
434
|
+
A dictionary containing the image edit response on success,
|
|
435
|
+
or a string containing an error message on failure.
|
|
436
|
+
|
|
437
|
+
Tags:
|
|
438
|
+
images, edit, dalle
|
|
439
|
+
"""
|
|
440
|
+
try:
|
|
441
|
+
client = await self._get_client()
|
|
442
|
+
effective_model = model if model is not None else "dall-e-2"
|
|
443
|
+
|
|
444
|
+
params = {
|
|
445
|
+
"image": image,
|
|
446
|
+
"prompt": prompt,
|
|
447
|
+
"mask": mask,
|
|
448
|
+
"model": effective_model,
|
|
449
|
+
"n": n,
|
|
450
|
+
"response_format": response_format,
|
|
451
|
+
"size": size,
|
|
452
|
+
"user": user,
|
|
453
|
+
}
|
|
454
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
455
|
+
|
|
456
|
+
response = await client.images.edit(**params)
|
|
457
|
+
return response.model_dump()
|
|
458
|
+
except OpenAIError as e:
|
|
459
|
+
return f"OpenAI API error creating image edit with model {effective_model}: {type(e).__name__} - {e}"
|
|
460
|
+
except Exception as e:
|
|
461
|
+
return f"Error creating image edit with model {effective_model}: {type(e).__name__} - {e}"
|
|
462
|
+
|
|
463
|
+
async def create_image_variation(
|
|
464
|
+
self,
|
|
465
|
+
image: OpenAiFileTypes,
|
|
466
|
+
model: str | OpenAiImageModel | None = "dall-e-2",
|
|
467
|
+
n: int | None = None,
|
|
468
|
+
response_format: Literal["url", "b64_json"] | None = None,
|
|
469
|
+
size: Literal["256x256", "512x512", "1024x1024"] | None = None,
|
|
470
|
+
user: str | None = None,
|
|
471
|
+
) -> dict[str, Any] | str:
|
|
472
|
+
"""
|
|
473
|
+
Creates a variation of a given image.
|
|
474
|
+
|
|
475
|
+
Args:
|
|
476
|
+
image: The image to use as the basis for the variation(s). Must be a valid PNG file.
|
|
477
|
+
model: The model to use. Defaults to "dall-e-2", which is currently the only
|
|
478
|
+
model supported for image variations by the OpenAI API.
|
|
479
|
+
n: The number of images to generate. Must be between 1 and 10.
|
|
480
|
+
response_format: The format of the returned images ("url" or "b64_json"). Defaults to "url".
|
|
481
|
+
size: The size of the generated images. Must be one of "256x256", "512x512", or "1024x1024".
|
|
482
|
+
user: A unique identifier representing your end-user.
|
|
483
|
+
|
|
484
|
+
Returns:
|
|
485
|
+
A dictionary containing the image variation response on success,
|
|
486
|
+
or a string containing an error message on failure.
|
|
487
|
+
|
|
488
|
+
Tags:
|
|
489
|
+
images, variation, dalle
|
|
490
|
+
"""
|
|
491
|
+
try:
|
|
492
|
+
client = await self._get_client()
|
|
493
|
+
effective_model = model if model is not None else "dall-e-2"
|
|
494
|
+
|
|
495
|
+
params = {
|
|
496
|
+
"image": image,
|
|
497
|
+
"model": effective_model,
|
|
498
|
+
"n": n,
|
|
499
|
+
"response_format": response_format,
|
|
500
|
+
"size": size,
|
|
501
|
+
"user": user,
|
|
502
|
+
}
|
|
503
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
504
|
+
|
|
505
|
+
response = await client.images.create_variation(**params)
|
|
506
|
+
return response.model_dump()
|
|
507
|
+
except OpenAIError as e:
|
|
508
|
+
return f"OpenAI API error creating image variation with model {effective_model}: {type(e).__name__} - {e}"
|
|
509
|
+
except Exception as e:
|
|
510
|
+
return f"Error creating image variation with model {effective_model}: {type(e).__name__} - {e}"
|
|
511
|
+
|
|
512
|
+
async def create_transcription(
|
|
513
|
+
self,
|
|
514
|
+
file: OpenAiFileTypes,
|
|
515
|
+
model: str | OpenAiAudioModel = "gpt-4o-transcribe",
|
|
516
|
+
language: str | None = None,
|
|
517
|
+
prompt: str | None = None,
|
|
518
|
+
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"]
|
|
519
|
+
| None = None,
|
|
520
|
+
temperature: float | None = None,
|
|
521
|
+
timestamp_granularities: list[Literal["word", "segment"]] | None = None,
|
|
522
|
+
include: list[Literal["logprobs"]] | None = None, # For gpt-4o models
|
|
523
|
+
stream: bool = False,
|
|
524
|
+
) -> dict[str, Any] | str:
|
|
525
|
+
"""
|
|
526
|
+
Transcribes audio into the input language.
|
|
527
|
+
|
|
528
|
+
Args:
|
|
529
|
+
file: The audio file object (not file name) to transcribe.
|
|
530
|
+
model: ID of the model to use (e.g., "whisper-1", "gpt-4o-transcribe").
|
|
531
|
+
language: The language of the input audio (ISO-639-1 format).
|
|
532
|
+
prompt: Optional text to guide the model's style.
|
|
533
|
+
response_format: The format of the transcript ("json", "text", "srt", "verbose_json", "vtt").
|
|
534
|
+
For "gpt-4o-transcribe" and "gpt-4o-mini-transcribe" with streaming,
|
|
535
|
+
this should effectively lead to a JSON-like final object.
|
|
536
|
+
temperature: Sampling temperature between 0 and 1.
|
|
537
|
+
timestamp_granularities: Granularities for timestamps ("word", "segment").
|
|
538
|
+
Requires `response_format` to be "verbose_json".
|
|
539
|
+
include: Additional information to include, e.g., ["logprobs"].
|
|
540
|
+
Only works with response_format="json" and gpt-4o models.
|
|
541
|
+
stream: If True, streams the response. The method will aggregate the stream
|
|
542
|
+
into a final response object. Streaming is not supported for "whisper-1".
|
|
543
|
+
|
|
544
|
+
Returns:
|
|
545
|
+
A dictionary containing the transcription or a string, depending on `response_format`.
|
|
546
|
+
If `stream` is True, an aggregated response dictionary.
|
|
547
|
+
Returns an error message string on failure.
|
|
548
|
+
|
|
549
|
+
Tags:
|
|
550
|
+
audio, transcription, speech-to-text, important
|
|
551
|
+
"""
|
|
552
|
+
try:
|
|
553
|
+
client = await self._get_client()
|
|
554
|
+
|
|
555
|
+
params = {
|
|
556
|
+
"file": file,
|
|
557
|
+
"model": model,
|
|
558
|
+
"language": language if language is not None else NOT_GIVEN,
|
|
559
|
+
"prompt": prompt if prompt is not None else NOT_GIVEN,
|
|
560
|
+
"response_format": response_format
|
|
561
|
+
if response_format is not None
|
|
562
|
+
else NOT_GIVEN,
|
|
563
|
+
"temperature": temperature if temperature is not None else NOT_GIVEN,
|
|
564
|
+
"timestamp_granularities": timestamp_granularities
|
|
565
|
+
if timestamp_granularities is not None
|
|
566
|
+
else NOT_GIVEN,
|
|
567
|
+
"include": include if include is not None else NOT_GIVEN,
|
|
568
|
+
}
|
|
569
|
+
|
|
570
|
+
if stream:
|
|
571
|
+
stream_response = await client.audio.transcriptions.create(
|
|
572
|
+
**params, stream=True
|
|
573
|
+
)
|
|
574
|
+
|
|
575
|
+
final_transcription_value = None
|
|
576
|
+
async for event in stream_response:
|
|
577
|
+
if hasattr(event, "value") and isinstance(
|
|
578
|
+
event.value, Transcription | TranscriptionVerbose
|
|
579
|
+
):
|
|
580
|
+
if event.__class__.__name__ == "FinalTranscriptionEvent":
|
|
581
|
+
final_transcription_value = event.value
|
|
582
|
+
break
|
|
583
|
+
|
|
584
|
+
if final_transcription_value:
|
|
585
|
+
return final_transcription_value.model_dump()
|
|
586
|
+
else:
|
|
587
|
+
return {
|
|
588
|
+
"error": "Stream aggregation failed to find final transcription object."
|
|
589
|
+
}
|
|
590
|
+
else:
|
|
591
|
+
response = await client.audio.transcriptions.create(
|
|
592
|
+
**params, stream=False
|
|
593
|
+
)
|
|
594
|
+
if isinstance(response, Transcription | TranscriptionVerbose):
|
|
595
|
+
return response.model_dump()
|
|
596
|
+
elif isinstance(response, str):
|
|
597
|
+
return response
|
|
598
|
+
else:
|
|
599
|
+
return {
|
|
600
|
+
"error": "Unexpected_response_type_from_transcription_api",
|
|
601
|
+
"data": str(response),
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
except OpenAIError as e:
|
|
605
|
+
return f"OpenAI API error creating transcription: {type(e).__name__} - {e}"
|
|
606
|
+
except Exception as e:
|
|
607
|
+
return f"Error creating transcription: {type(e).__name__} - {e}"
|
|
608
|
+
|
|
609
|
+
async def create_translation(
|
|
610
|
+
self,
|
|
611
|
+
file: OpenAiFileTypes,
|
|
612
|
+
model: str | OpenAiAudioModel = "whisper-1",
|
|
613
|
+
prompt: str | None = None,
|
|
614
|
+
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"]
|
|
615
|
+
| None = None,
|
|
616
|
+
temperature: float | None = None,
|
|
617
|
+
) -> dict[str, Any] | str:
|
|
618
|
+
"""
|
|
619
|
+
Translates audio into English text.
|
|
620
|
+
|
|
621
|
+
Args:
|
|
622
|
+
file: The audio file object (not file name) to translate.
|
|
623
|
+
model: ID of the model to use (currently, only "whisper-1" is supported).
|
|
624
|
+
prompt: Optional text to guide the model's style (should be in English).
|
|
625
|
+
response_format: The format of the translated text.
|
|
626
|
+
temperature: Sampling temperature between 0 and 1.
|
|
627
|
+
|
|
628
|
+
Returns:
|
|
629
|
+
A dictionary containing the translation or a string, depending on `response_format`.
|
|
630
|
+
Returns an error message string on failure.
|
|
631
|
+
|
|
632
|
+
Tags:
|
|
633
|
+
audio, translation, speech-to-text
|
|
634
|
+
"""
|
|
635
|
+
try:
|
|
636
|
+
client = await self._get_client()
|
|
637
|
+
params = {
|
|
638
|
+
"file": file,
|
|
639
|
+
"model": model,
|
|
640
|
+
"prompt": prompt if prompt is not None else NOT_GIVEN,
|
|
641
|
+
"response_format": response_format
|
|
642
|
+
if response_format is not None
|
|
643
|
+
else NOT_GIVEN,
|
|
644
|
+
"temperature": temperature if temperature is not None else NOT_GIVEN,
|
|
645
|
+
}
|
|
646
|
+
response = await client.audio.translations.create(**params)
|
|
647
|
+
|
|
648
|
+
if isinstance(response, Translation | TranslationVerbose):
|
|
649
|
+
return response.model_dump()
|
|
650
|
+
elif isinstance(response, str):
|
|
651
|
+
return response
|
|
652
|
+
else: # Should not happen
|
|
653
|
+
return {
|
|
654
|
+
"error": "Unexpected_response_type_from_translation_api",
|
|
655
|
+
"data": str(response),
|
|
656
|
+
}
|
|
657
|
+
except OpenAIError as e:
|
|
658
|
+
return f"OpenAI API error creating translation: {type(e).__name__} - {e}"
|
|
659
|
+
except Exception as e:
|
|
660
|
+
return f"Error creating translation: {type(e).__name__} - {e}"
|
|
661
|
+
|
|
662
|
+
async def create_speech(
|
|
663
|
+
self,
|
|
664
|
+
input_text: str,
|
|
665
|
+
voice: Literal[
|
|
666
|
+
"alloy",
|
|
667
|
+
"ash",
|
|
668
|
+
"ballad",
|
|
669
|
+
"coral",
|
|
670
|
+
"echo",
|
|
671
|
+
"fable",
|
|
672
|
+
"onyx",
|
|
673
|
+
"nova",
|
|
674
|
+
"sage",
|
|
675
|
+
"shimmer",
|
|
676
|
+
"verse",
|
|
677
|
+
],
|
|
678
|
+
model: str | OpenAiSpeechModel = "tts-1",
|
|
679
|
+
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"]
|
|
680
|
+
| None = None, # Defaults to "mp3"
|
|
681
|
+
speed: float | None = None,
|
|
682
|
+
instructions: str | None = None, # For gpt-4o-mini-tts or newer models
|
|
683
|
+
) -> dict[str, Any] | str:
|
|
684
|
+
"""
|
|
685
|
+
Generates audio from the input text.
|
|
686
|
+
|
|
687
|
+
Args:
|
|
688
|
+
input_text: The text to generate audio for (max 4096 characters).
|
|
689
|
+
model: The TTS model to use (e.g., "tts-1", "tts-1-hd", "gpt-4o-mini-tts").
|
|
690
|
+
voice: The voice to use for the audio.
|
|
691
|
+
response_format: The format of the audio ("mp3", "opus", "aac", "flac", "wav", "pcm"). Defaults to "mp3".
|
|
692
|
+
speed: Speed of the generated audio (0.25 to 4.0). Defaults to 1.0.
|
|
693
|
+
instructions: Control voice with additional instructions (not for tts-1/tts-1-hd).
|
|
694
|
+
|
|
695
|
+
|
|
696
|
+
Returns:
|
|
697
|
+
A dictionary containing the base64 encoded audio content and content type,
|
|
698
|
+
or an error message string on failure.
|
|
699
|
+
|
|
700
|
+
Tags:
|
|
701
|
+
audio, speech, text-to-speech, tts, important
|
|
702
|
+
"""
|
|
703
|
+
try:
|
|
704
|
+
client = await self._get_client()
|
|
705
|
+
params = {
|
|
706
|
+
"input": input_text,
|
|
707
|
+
"model": model,
|
|
708
|
+
"voice": voice,
|
|
709
|
+
"response_format": response_format
|
|
710
|
+
if response_format is not None
|
|
711
|
+
else NOT_GIVEN,
|
|
712
|
+
"speed": speed if speed is not None else NOT_GIVEN,
|
|
713
|
+
"instructions": instructions if instructions is not None else NOT_GIVEN,
|
|
714
|
+
}
|
|
715
|
+
|
|
716
|
+
api_response = await client.audio.speech.create(**params)
|
|
717
|
+
binary_content = api_response.content
|
|
718
|
+
actual_content_type = api_response.response.headers.get(
|
|
719
|
+
"Content-Type", "application/octet-stream"
|
|
720
|
+
)
|
|
721
|
+
|
|
722
|
+
if response_format and actual_content_type == "application/octet-stream":
|
|
723
|
+
mime_map = {
|
|
724
|
+
"mp3": "audio/mpeg",
|
|
725
|
+
"opus": "audio/opus",
|
|
726
|
+
"aac": "audio/aac",
|
|
727
|
+
"flac": "audio/flac",
|
|
728
|
+
"wav": "audio/wav",
|
|
729
|
+
"pcm": "audio/L16",
|
|
730
|
+
}
|
|
731
|
+
actual_content_type = mime_map.get(response_format, actual_content_type)
|
|
732
|
+
|
|
733
|
+
return {
|
|
734
|
+
"model_used": str(model),
|
|
735
|
+
"voice_used": voice,
|
|
736
|
+
"content_type": actual_content_type,
|
|
737
|
+
"content_base64": base64.b64encode(binary_content).decode(),
|
|
738
|
+
}
|
|
739
|
+
except OpenAIError as e:
|
|
740
|
+
return f"OpenAI API error creating speech: {type(e).__name__} - {e}"
|
|
741
|
+
except Exception as e:
|
|
742
|
+
return f"Error creating speech: {type(e).__name__} - {e}"
|
|
743
|
+
|
|
744
|
+
def list_tools(self) -> list[callable]:
|
|
745
|
+
"""Returns a list of methods exposed as tools."""
|
|
746
|
+
return [
|
|
747
|
+
self.create_chat_completion,
|
|
748
|
+
self.upload_file,
|
|
749
|
+
self.list_files,
|
|
750
|
+
self.retrieve_file,
|
|
751
|
+
self.delete_file,
|
|
752
|
+
self.retrieve_file_content,
|
|
753
|
+
self.generate_image,
|
|
754
|
+
self.create_image_edit,
|
|
755
|
+
self.create_image_variation,
|
|
756
|
+
self.create_transcription,
|
|
757
|
+
self.create_translation,
|
|
758
|
+
self.create_speech,
|
|
759
|
+
]
|