universal-mcp-applications 0.1.32__py3-none-any.whl → 0.1.36rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- universal_mcp/applications/ahrefs/app.py +52 -198
- universal_mcp/applications/airtable/app.py +23 -122
- universal_mcp/applications/apollo/app.py +111 -464
- universal_mcp/applications/asana/app.py +417 -1567
- universal_mcp/applications/aws_s3/app.py +36 -103
- universal_mcp/applications/bill/app.py +546 -1957
- universal_mcp/applications/box/app.py +1068 -3981
- universal_mcp/applications/braze/app.py +364 -1430
- universal_mcp/applications/browser_use/app.py +2 -8
- universal_mcp/applications/cal_com_v2/app.py +207 -625
- universal_mcp/applications/calendly/app.py +61 -200
- universal_mcp/applications/canva/app.py +45 -110
- universal_mcp/applications/clickup/app.py +207 -674
- universal_mcp/applications/coda/app.py +146 -426
- universal_mcp/applications/confluence/app.py +310 -1098
- universal_mcp/applications/contentful/app.py +36 -151
- universal_mcp/applications/crustdata/app.py +28 -107
- universal_mcp/applications/dialpad/app.py +283 -756
- universal_mcp/applications/digitalocean/app.py +1766 -5777
- universal_mcp/applications/domain_checker/app.py +3 -54
- universal_mcp/applications/e2b/app.py +14 -64
- universal_mcp/applications/elevenlabs/app.py +9 -47
- universal_mcp/applications/exa/app.py +6 -17
- universal_mcp/applications/falai/app.py +24 -101
- universal_mcp/applications/figma/app.py +53 -137
- universal_mcp/applications/file_system/app.py +2 -13
- universal_mcp/applications/firecrawl/app.py +51 -152
- universal_mcp/applications/fireflies/app.py +59 -281
- universal_mcp/applications/fpl/app.py +91 -528
- universal_mcp/applications/fpl/utils/fixtures.py +15 -49
- universal_mcp/applications/fpl/utils/helper.py +25 -89
- universal_mcp/applications/fpl/utils/league_utils.py +20 -64
- universal_mcp/applications/ghost_content/app.py +52 -161
- universal_mcp/applications/github/app.py +19 -56
- universal_mcp/applications/gong/app.py +88 -248
- universal_mcp/applications/google_calendar/app.py +16 -68
- universal_mcp/applications/google_docs/app.py +85 -189
- universal_mcp/applications/google_drive/app.py +141 -463
- universal_mcp/applications/google_gemini/app.py +12 -64
- universal_mcp/applications/google_mail/app.py +28 -157
- universal_mcp/applications/google_searchconsole/app.py +15 -48
- universal_mcp/applications/google_sheet/app.py +100 -581
- universal_mcp/applications/google_sheet/helper.py +10 -37
- universal_mcp/applications/hashnode/app.py +57 -269
- universal_mcp/applications/heygen/app.py +44 -122
- universal_mcp/applications/http_tools/app.py +10 -32
- universal_mcp/applications/hubspot/api_segments/crm_api.py +460 -1573
- universal_mcp/applications/hubspot/api_segments/marketing_api.py +74 -262
- universal_mcp/applications/hubspot/app.py +23 -87
- universal_mcp/applications/jira/app.py +2071 -7986
- universal_mcp/applications/klaviyo/app.py +494 -1376
- universal_mcp/applications/linkedin/README.md +9 -2
- universal_mcp/applications/linkedin/app.py +240 -181
- universal_mcp/applications/mailchimp/app.py +450 -1605
- universal_mcp/applications/markitdown/app.py +8 -20
- universal_mcp/applications/miro/app.py +217 -699
- universal_mcp/applications/ms_teams/app.py +64 -186
- universal_mcp/applications/neon/app.py +86 -192
- universal_mcp/applications/notion/app.py +21 -36
- universal_mcp/applications/onedrive/app.py +16 -38
- universal_mcp/applications/openai/app.py +42 -165
- universal_mcp/applications/outlook/app.py +24 -84
- universal_mcp/applications/perplexity/app.py +4 -19
- universal_mcp/applications/pipedrive/app.py +832 -3142
- universal_mcp/applications/posthog/app.py +163 -432
- universal_mcp/applications/reddit/app.py +40 -139
- universal_mcp/applications/resend/app.py +41 -107
- universal_mcp/applications/retell/app.py +14 -41
- universal_mcp/applications/rocketlane/app.py +221 -934
- universal_mcp/applications/scraper/README.md +7 -4
- universal_mcp/applications/scraper/app.py +50 -109
- universal_mcp/applications/semanticscholar/app.py +22 -64
- universal_mcp/applications/semrush/app.py +43 -77
- universal_mcp/applications/sendgrid/app.py +512 -1262
- universal_mcp/applications/sentry/app.py +271 -906
- universal_mcp/applications/serpapi/app.py +40 -143
- universal_mcp/applications/sharepoint/app.py +17 -39
- universal_mcp/applications/shopify/app.py +1551 -4287
- universal_mcp/applications/shortcut/app.py +155 -417
- universal_mcp/applications/slack/app.py +33 -115
- universal_mcp/applications/spotify/app.py +126 -325
- universal_mcp/applications/supabase/app.py +104 -213
- universal_mcp/applications/tavily/app.py +1 -1
- universal_mcp/applications/trello/app.py +693 -2656
- universal_mcp/applications/twilio/app.py +14 -50
- universal_mcp/applications/twitter/api_segments/compliance_api.py +4 -14
- universal_mcp/applications/twitter/api_segments/dm_conversations_api.py +6 -18
- universal_mcp/applications/twitter/api_segments/likes_api.py +1 -3
- universal_mcp/applications/twitter/api_segments/lists_api.py +5 -15
- universal_mcp/applications/twitter/api_segments/trends_api.py +1 -3
- universal_mcp/applications/twitter/api_segments/tweets_api.py +9 -31
- universal_mcp/applications/twitter/api_segments/usage_api.py +1 -5
- universal_mcp/applications/twitter/api_segments/users_api.py +14 -42
- universal_mcp/applications/whatsapp/app.py +35 -186
- universal_mcp/applications/whatsapp/audio.py +2 -6
- universal_mcp/applications/whatsapp/whatsapp.py +17 -51
- universal_mcp/applications/whatsapp_business/app.py +70 -283
- universal_mcp/applications/wrike/app.py +45 -118
- universal_mcp/applications/yahoo_finance/app.py +19 -65
- universal_mcp/applications/youtube/app.py +75 -261
- universal_mcp/applications/zenquotes/app.py +2 -2
- {universal_mcp_applications-0.1.32.dist-info → universal_mcp_applications-0.1.36rc2.dist-info}/METADATA +2 -2
- {universal_mcp_applications-0.1.32.dist-info → universal_mcp_applications-0.1.36rc2.dist-info}/RECORD +105 -105
- {universal_mcp_applications-0.1.32.dist-info → universal_mcp_applications-0.1.36rc2.dist-info}/WHEEL +0 -0
- {universal_mcp_applications-0.1.32.dist-info → universal_mcp_applications-0.1.36rc2.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,18 +1,11 @@
|
|
|
1
1
|
import base64
|
|
2
2
|
from typing import Any, Literal
|
|
3
|
-
|
|
4
3
|
from universal_mcp.applications.application import APIApplication
|
|
5
4
|
from universal_mcp.integrations import Integration
|
|
6
|
-
|
|
7
5
|
from openai import NOT_GIVEN, AsyncOpenAI, OpenAIError
|
|
8
6
|
from openai._types import FileTypes as OpenAiFileTypes
|
|
9
7
|
from openai.types import FilePurpose as OpenAiFilePurpose
|
|
10
|
-
from openai.types.audio import
|
|
11
|
-
Transcription,
|
|
12
|
-
TranscriptionVerbose,
|
|
13
|
-
Translation,
|
|
14
|
-
TranslationVerbose,
|
|
15
|
-
)
|
|
8
|
+
from openai.types.audio import Transcription, TranscriptionVerbose, Translation, TranslationVerbose
|
|
16
9
|
from openai.types.audio.speech_model import SpeechModel as OpenAiSpeechModel
|
|
17
10
|
from openai.types.audio_model import AudioModel as OpenAiAudioModel
|
|
18
11
|
from openai.types.chat import ChatCompletionMessageParam
|
|
@@ -35,22 +28,16 @@ class OpenaiApp(APIApplication):
|
|
|
35
28
|
"""Initializes and returns the AsyncOpenAI client."""
|
|
36
29
|
if not self.integration:
|
|
37
30
|
raise ValueError("Integration not provided for OpenaiApp.")
|
|
38
|
-
|
|
39
31
|
creds = self.integration.get_credentials()
|
|
40
32
|
api_key = creds.get("api_key")
|
|
41
33
|
organization = creds.get("organization")
|
|
42
34
|
project = creds.get("project")
|
|
43
|
-
|
|
44
|
-
return AsyncOpenAI(
|
|
45
|
-
api_key=api_key,
|
|
46
|
-
organization=organization,
|
|
47
|
-
project=project,
|
|
48
|
-
)
|
|
35
|
+
return AsyncOpenAI(api_key=api_key, organization=organization, project=project)
|
|
49
36
|
|
|
50
37
|
async def create_chat_completion(
|
|
51
38
|
self,
|
|
52
39
|
messages: list[ChatCompletionMessageParam],
|
|
53
|
-
model: str = "gpt-4o",
|
|
40
|
+
model: str = "gpt-4o",
|
|
54
41
|
stream: bool = False,
|
|
55
42
|
temperature: float | None = None,
|
|
56
43
|
max_tokens: int | None = None,
|
|
@@ -59,7 +46,6 @@ class OpenaiApp(APIApplication):
|
|
|
59
46
|
presence_penalty: float | None = None,
|
|
60
47
|
stop: str | None | list[str] = None,
|
|
61
48
|
user: str | None = None,
|
|
62
|
-
# Add other common parameters as needed, or rely on
|
|
63
49
|
) -> dict[str, Any] | str:
|
|
64
50
|
"""
|
|
65
51
|
Generates a model response for a chat conversation. It supports both standard and streaming modes; if streaming, it internally aggregates response chunks into a single complete object, simplifying stream handling. Returns the completion data as a dictionary on success or an error string on failure.
|
|
@@ -103,22 +89,15 @@ class OpenaiApp(APIApplication):
|
|
|
103
89
|
"user": user,
|
|
104
90
|
}
|
|
105
91
|
common_params = {k: v for k, v in common_params.items() if v is not None}
|
|
106
|
-
|
|
107
92
|
if not stream:
|
|
108
|
-
response = await client.chat.completions.create(
|
|
109
|
-
stream=False, **common_params
|
|
110
|
-
)
|
|
93
|
+
response = await client.chat.completions.create(stream=False, **common_params)
|
|
111
94
|
return response.model_dump()
|
|
112
95
|
else:
|
|
113
|
-
stream_response = await client.chat.completions.create(
|
|
114
|
-
stream=True, **common_params
|
|
115
|
-
)
|
|
116
|
-
|
|
96
|
+
stream_response = await client.chat.completions.create(stream=True, **common_params)
|
|
117
97
|
final_content_parts: list[str] = []
|
|
118
98
|
final_role: str = "assistant"
|
|
119
99
|
first_chunk_data: dict[str, Any] = {}
|
|
120
100
|
finish_reason: str | None = None
|
|
121
|
-
|
|
122
101
|
async for chunk in stream_response:
|
|
123
102
|
if not first_chunk_data and chunk.id:
|
|
124
103
|
first_chunk_data = {
|
|
@@ -127,7 +106,6 @@ class OpenaiApp(APIApplication):
|
|
|
127
106
|
"model": chunk.model,
|
|
128
107
|
"system_fingerprint": chunk.system_fingerprint,
|
|
129
108
|
}
|
|
130
|
-
|
|
131
109
|
if chunk.choices:
|
|
132
110
|
choice = chunk.choices[0]
|
|
133
111
|
if choice.delta:
|
|
@@ -137,32 +115,19 @@ class OpenaiApp(APIApplication):
|
|
|
137
115
|
final_role = choice.delta.role
|
|
138
116
|
if choice.finish_reason:
|
|
139
117
|
finish_reason = choice.finish_reason
|
|
140
|
-
|
|
141
118
|
aggregated_choice = {
|
|
142
|
-
"message": {
|
|
143
|
-
"role": final_role,
|
|
144
|
-
"content": "".join(final_content_parts),
|
|
145
|
-
},
|
|
119
|
+
"message": {"role": final_role, "content": "".join(final_content_parts)},
|
|
146
120
|
"finish_reason": finish_reason,
|
|
147
121
|
"index": 0,
|
|
148
122
|
}
|
|
149
|
-
|
|
150
|
-
response_dict = {
|
|
151
|
-
**first_chunk_data,
|
|
152
|
-
"object": "chat.completion",
|
|
153
|
-
"choices": [aggregated_choice],
|
|
154
|
-
"usage": None,
|
|
155
|
-
}
|
|
123
|
+
response_dict = {**first_chunk_data, "object": "chat.completion", "choices": [aggregated_choice], "usage": None}
|
|
156
124
|
return response_dict
|
|
157
|
-
|
|
158
125
|
except OpenAIError as e:
|
|
159
126
|
return f"OpenAI API error creating chat completion for model {model}: {type(e).__name__} - {e}"
|
|
160
127
|
except Exception as e:
|
|
161
128
|
return f"Error creating chat completion for model {model}: {type(e).__name__} - {e}"
|
|
162
129
|
|
|
163
|
-
async def upload_file(
|
|
164
|
-
self, file: OpenAiFileTypes, purpose: OpenAiFilePurpose
|
|
165
|
-
) -> dict[str, Any] | str:
|
|
130
|
+
async def upload_file(self, file: OpenAiFileTypes, purpose: OpenAiFilePurpose) -> dict[str, Any] | str:
|
|
166
131
|
"""
|
|
167
132
|
Uploads a file to the user's OpenAI account for use across various endpoints like 'assistants'. It accepts a file path or object and a purpose string, returning a dictionary with the created file object's details upon success.
|
|
168
133
|
|
|
@@ -188,11 +153,7 @@ class OpenaiApp(APIApplication):
|
|
|
188
153
|
return f"Error uploading file: {type(e).__name__} - {e}"
|
|
189
154
|
|
|
190
155
|
async def list_files(
|
|
191
|
-
self,
|
|
192
|
-
purpose: str | None = None,
|
|
193
|
-
limit: int | None = None,
|
|
194
|
-
after: str | None = None,
|
|
195
|
-
order: Literal["asc", "desc"] | None = None,
|
|
156
|
+
self, purpose: str | None = None, limit: int | None = None, after: str | None = None, order: Literal["asc", "desc"] | None = None
|
|
196
157
|
) -> dict[str, Any] | str:
|
|
197
158
|
"""
|
|
198
159
|
Retrieves a paginated list of files uploaded to the OpenAI account. Allows filtering by purpose and controlling the output with limit, `after` cursor, and sort order parameters to efficiently navigate through the file collection.
|
|
@@ -221,7 +182,6 @@ class OpenaiApp(APIApplication):
|
|
|
221
182
|
params["after"] = after
|
|
222
183
|
if order:
|
|
223
184
|
params["order"] = order
|
|
224
|
-
|
|
225
185
|
response_page = await client.files.list(**params)
|
|
226
186
|
return response_page.model_dump()
|
|
227
187
|
except OpenAIError as e:
|
|
@@ -248,9 +208,7 @@ class OpenaiApp(APIApplication):
|
|
|
248
208
|
response: FileObject = await client.files.retrieve(file_id=file_id)
|
|
249
209
|
return response.model_dump()
|
|
250
210
|
except OpenAIError as e:
|
|
251
|
-
return (
|
|
252
|
-
f"OpenAI API error retrieving file {file_id}: {type(e).__name__} - {e}"
|
|
253
|
-
)
|
|
211
|
+
return f"OpenAI API error retrieving file {file_id}: {type(e).__name__} - {e}"
|
|
254
212
|
except Exception as e:
|
|
255
213
|
return f"Error retrieving file {file_id}: {type(e).__name__} - {e}"
|
|
256
214
|
|
|
@@ -294,25 +252,19 @@ class OpenaiApp(APIApplication):
|
|
|
294
252
|
try:
|
|
295
253
|
client = await self._get_client()
|
|
296
254
|
api_response = await client.files.content(file_id=file_id)
|
|
297
|
-
|
|
298
255
|
http_response_headers = api_response.response.headers
|
|
299
256
|
content_type = http_response_headers.get("Content-Type", "").lower()
|
|
300
|
-
|
|
301
257
|
if (
|
|
302
258
|
"text" in content_type
|
|
303
259
|
or "json" in content_type
|
|
304
260
|
or "xml" in content_type
|
|
305
|
-
or "javascript" in content_type
|
|
306
|
-
or "csv" in content_type
|
|
261
|
+
or ("javascript" in content_type)
|
|
262
|
+
or ("csv" in content_type)
|
|
307
263
|
):
|
|
308
|
-
return api_response.text
|
|
264
|
+
return api_response.text
|
|
309
265
|
else:
|
|
310
266
|
binary_content = api_response.content
|
|
311
|
-
return {
|
|
312
|
-
"file_id": file_id,
|
|
313
|
-
"content_type": content_type,
|
|
314
|
-
"content_base64": base64.b64encode(binary_content).decode(),
|
|
315
|
-
}
|
|
267
|
+
return {"file_id": file_id, "content_type": content_type, "content_base64": base64.b64encode(binary_content).decode()}
|
|
316
268
|
except UnicodeDecodeError:
|
|
317
269
|
client = await self._get_client()
|
|
318
270
|
api_response = await client.files.content(file_id=file_id)
|
|
@@ -327,23 +279,17 @@ class OpenaiApp(APIApplication):
|
|
|
327
279
|
except OpenAIError as e:
|
|
328
280
|
return f"OpenAI API error retrieving content for file {file_id}: {type(e).__name__} - {e}"
|
|
329
281
|
except Exception as e:
|
|
330
|
-
return (
|
|
331
|
-
f"Error retrieving content for file {file_id}: {type(e).__name__} - {e}"
|
|
332
|
-
)
|
|
282
|
+
return f"Error retrieving content for file {file_id}: {type(e).__name__} - {e}"
|
|
333
283
|
|
|
334
|
-
# --- Images Methods ---
|
|
335
284
|
async def create_image(
|
|
336
285
|
self,
|
|
337
286
|
prompt: str,
|
|
338
|
-
model: str
|
|
339
|
-
|
|
|
340
|
-
|
|
341
|
-
n: int | None = None, # 1-10 for dall-e-2, 1 for dall-e-3
|
|
342
|
-
quality: Literal["standard", "hd"] | None = None, # For dall-e-3
|
|
287
|
+
model: str | OpenAiImageModel | None = "dall-e-3",
|
|
288
|
+
n: int | None = None,
|
|
289
|
+
quality: Literal["standard", "hd"] | None = None,
|
|
343
290
|
response_format: Literal["url", "b64_json"] | None = None,
|
|
344
|
-
size: Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]
|
|
345
|
-
| None = None,
|
|
346
|
-
style: Literal["vivid", "natural"] | None = None, # For dall-e-3
|
|
291
|
+
size: Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] | None = None,
|
|
292
|
+
style: Literal["vivid", "natural"] | None = None,
|
|
347
293
|
user: str | None = None,
|
|
348
294
|
) -> dict[str, Any] | str:
|
|
349
295
|
"""
|
|
@@ -375,11 +321,7 @@ class OpenaiApp(APIApplication):
|
|
|
375
321
|
"""
|
|
376
322
|
try:
|
|
377
323
|
client = await self._get_client()
|
|
378
|
-
|
|
379
|
-
effective_model = (
|
|
380
|
-
model if model is not None else "dall-e-3"
|
|
381
|
-
) # Ensure effective_model is not None
|
|
382
|
-
|
|
324
|
+
effective_model = model if model is not None else "dall-e-3"
|
|
383
325
|
effective_params = {
|
|
384
326
|
"prompt": prompt,
|
|
385
327
|
"model": effective_model,
|
|
@@ -390,19 +332,13 @@ class OpenaiApp(APIApplication):
|
|
|
390
332
|
"style": style,
|
|
391
333
|
"user": user,
|
|
392
334
|
}
|
|
393
|
-
|
|
394
|
-
effective_params = {
|
|
395
|
-
k: v for k, v in effective_params.items() if v is not None
|
|
396
|
-
}
|
|
397
|
-
|
|
335
|
+
effective_params = {k: v for k, v in effective_params.items() if v is not None}
|
|
398
336
|
response = await client.images.generate(**effective_params)
|
|
399
337
|
return response.model_dump()
|
|
400
338
|
except OpenAIError as e:
|
|
401
339
|
return f"OpenAI API error generating image with model {model}: {type(e).__name__} - {e}"
|
|
402
340
|
except Exception as e:
|
|
403
|
-
return (
|
|
404
|
-
f"Error generating image with model {model}: {type(e).__name__} - {e}"
|
|
405
|
-
)
|
|
341
|
+
return f"Error generating image with model {model}: {type(e).__name__} - {e}"
|
|
406
342
|
|
|
407
343
|
async def create_image_edit(
|
|
408
344
|
self,
|
|
@@ -439,7 +375,6 @@ class OpenaiApp(APIApplication):
|
|
|
439
375
|
try:
|
|
440
376
|
client = await self._get_client()
|
|
441
377
|
effective_model = model if model is not None else "dall-e-2"
|
|
442
|
-
|
|
443
378
|
params = {
|
|
444
379
|
"image": image,
|
|
445
380
|
"prompt": prompt,
|
|
@@ -451,7 +386,6 @@ class OpenaiApp(APIApplication):
|
|
|
451
386
|
"user": user,
|
|
452
387
|
}
|
|
453
388
|
params = {k: v for k, v in params.items() if v is not None}
|
|
454
|
-
|
|
455
389
|
response = await client.images.edit(**params)
|
|
456
390
|
return response.model_dump()
|
|
457
391
|
except OpenAIError as e:
|
|
@@ -490,17 +424,8 @@ class OpenaiApp(APIApplication):
|
|
|
490
424
|
try:
|
|
491
425
|
client = await self._get_client()
|
|
492
426
|
effective_model = model if model is not None else "dall-e-2"
|
|
493
|
-
|
|
494
|
-
params = {
|
|
495
|
-
"image": image,
|
|
496
|
-
"model": effective_model,
|
|
497
|
-
"n": n,
|
|
498
|
-
"response_format": response_format,
|
|
499
|
-
"size": size,
|
|
500
|
-
"user": user,
|
|
501
|
-
}
|
|
427
|
+
params = {"image": image, "model": effective_model, "n": n, "response_format": response_format, "size": size, "user": user}
|
|
502
428
|
params = {k: v for k, v in params.items() if v is not None}
|
|
503
|
-
|
|
504
429
|
response = await client.images.create_variation(**params)
|
|
505
430
|
return response.model_dump()
|
|
506
431
|
except OpenAIError as e:
|
|
@@ -514,11 +439,10 @@ class OpenaiApp(APIApplication):
|
|
|
514
439
|
model: str | OpenAiAudioModel = "gpt-4o-transcribe",
|
|
515
440
|
language: str | None = None,
|
|
516
441
|
prompt: str | None = None,
|
|
517
|
-
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"]
|
|
518
|
-
| None = None,
|
|
442
|
+
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | None = None,
|
|
519
443
|
temperature: float | None = None,
|
|
520
444
|
timestamp_granularities: list[Literal["word", "segment"]] | None = None,
|
|
521
|
-
include: list[Literal["logprobs"]] | None = None,
|
|
445
|
+
include: list[Literal["logprobs"]] | None = None,
|
|
522
446
|
stream: bool = False,
|
|
523
447
|
) -> dict[str, Any] | str:
|
|
524
448
|
"""
|
|
@@ -550,56 +474,36 @@ class OpenaiApp(APIApplication):
|
|
|
550
474
|
"""
|
|
551
475
|
try:
|
|
552
476
|
client = await self._get_client()
|
|
553
|
-
|
|
554
477
|
params = {
|
|
555
478
|
"file": file,
|
|
556
479
|
"model": model,
|
|
557
480
|
"language": language if language is not None else NOT_GIVEN,
|
|
558
481
|
"prompt": prompt if prompt is not None else NOT_GIVEN,
|
|
559
|
-
"response_format": response_format
|
|
560
|
-
if response_format is not None
|
|
561
|
-
else NOT_GIVEN,
|
|
482
|
+
"response_format": response_format if response_format is not None else NOT_GIVEN,
|
|
562
483
|
"temperature": temperature if temperature is not None else NOT_GIVEN,
|
|
563
|
-
"timestamp_granularities": timestamp_granularities
|
|
564
|
-
if timestamp_granularities is not None
|
|
565
|
-
else NOT_GIVEN,
|
|
484
|
+
"timestamp_granularities": timestamp_granularities if timestamp_granularities is not None else NOT_GIVEN,
|
|
566
485
|
"include": include if include is not None else NOT_GIVEN,
|
|
567
486
|
}
|
|
568
|
-
|
|
569
487
|
if stream:
|
|
570
|
-
stream_response = await client.audio.transcriptions.create(
|
|
571
|
-
**params, stream=True
|
|
572
|
-
)
|
|
573
|
-
|
|
488
|
+
stream_response = await client.audio.transcriptions.create(**params, stream=True)
|
|
574
489
|
final_transcription_value = None
|
|
575
490
|
async for event in stream_response:
|
|
576
|
-
if hasattr(event, "value") and isinstance(
|
|
577
|
-
event.value, Transcription | TranscriptionVerbose
|
|
578
|
-
):
|
|
491
|
+
if hasattr(event, "value") and isinstance(event.value, Transcription | TranscriptionVerbose):
|
|
579
492
|
if event.__class__.__name__ == "FinalTranscriptionEvent":
|
|
580
493
|
final_transcription_value = event.value
|
|
581
494
|
break
|
|
582
|
-
|
|
583
495
|
if final_transcription_value:
|
|
584
496
|
return final_transcription_value.model_dump()
|
|
585
497
|
else:
|
|
586
|
-
return {
|
|
587
|
-
"error": "Stream aggregation failed to find final transcription object."
|
|
588
|
-
}
|
|
498
|
+
return {"error": "Stream aggregation failed to find final transcription object."}
|
|
589
499
|
else:
|
|
590
|
-
response = await client.audio.transcriptions.create(
|
|
591
|
-
**params, stream=False
|
|
592
|
-
)
|
|
500
|
+
response = await client.audio.transcriptions.create(**params, stream=False)
|
|
593
501
|
if isinstance(response, Transcription | TranscriptionVerbose):
|
|
594
502
|
return response.model_dump()
|
|
595
503
|
elif isinstance(response, str):
|
|
596
504
|
return response
|
|
597
505
|
else:
|
|
598
|
-
return {
|
|
599
|
-
"error": "Unexpected_response_type_from_transcription_api",
|
|
600
|
-
"data": str(response),
|
|
601
|
-
}
|
|
602
|
-
|
|
506
|
+
return {"error": "Unexpected_response_type_from_transcription_api", "data": str(response)}
|
|
603
507
|
except OpenAIError as e:
|
|
604
508
|
return f"OpenAI API error creating transcription: {type(e).__name__} - {e}"
|
|
605
509
|
except Exception as e:
|
|
@@ -610,8 +514,7 @@ class OpenaiApp(APIApplication):
|
|
|
610
514
|
file: OpenAiFileTypes,
|
|
611
515
|
model: str | OpenAiAudioModel = "whisper-1",
|
|
612
516
|
prompt: str | None = None,
|
|
613
|
-
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"]
|
|
614
|
-
| None = None,
|
|
517
|
+
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] | None = None,
|
|
615
518
|
temperature: float | None = None,
|
|
616
519
|
) -> dict[str, Any] | str:
|
|
617
520
|
"""
|
|
@@ -637,22 +540,16 @@ class OpenaiApp(APIApplication):
|
|
|
637
540
|
"file": file,
|
|
638
541
|
"model": model,
|
|
639
542
|
"prompt": prompt if prompt is not None else NOT_GIVEN,
|
|
640
|
-
"response_format": response_format
|
|
641
|
-
if response_format is not None
|
|
642
|
-
else NOT_GIVEN,
|
|
543
|
+
"response_format": response_format if response_format is not None else NOT_GIVEN,
|
|
643
544
|
"temperature": temperature if temperature is not None else NOT_GIVEN,
|
|
644
545
|
}
|
|
645
546
|
response = await client.audio.translations.create(**params)
|
|
646
|
-
|
|
647
547
|
if isinstance(response, Translation | TranslationVerbose):
|
|
648
548
|
return response.model_dump()
|
|
649
549
|
elif isinstance(response, str):
|
|
650
550
|
return response
|
|
651
|
-
else:
|
|
652
|
-
return {
|
|
653
|
-
"error": "Unexpected_response_type_from_translation_api",
|
|
654
|
-
"data": str(response),
|
|
655
|
-
}
|
|
551
|
+
else:
|
|
552
|
+
return {"error": "Unexpected_response_type_from_translation_api", "data": str(response)}
|
|
656
553
|
except OpenAIError as e:
|
|
657
554
|
return f"OpenAI API error creating translation: {type(e).__name__} - {e}"
|
|
658
555
|
except Exception as e:
|
|
@@ -661,24 +558,11 @@ class OpenaiApp(APIApplication):
|
|
|
661
558
|
async def create_speech(
|
|
662
559
|
self,
|
|
663
560
|
input_text: str,
|
|
664
|
-
voice: Literal[
|
|
665
|
-
"alloy",
|
|
666
|
-
"ash",
|
|
667
|
-
"ballad",
|
|
668
|
-
"coral",
|
|
669
|
-
"echo",
|
|
670
|
-
"fable",
|
|
671
|
-
"onyx",
|
|
672
|
-
"nova",
|
|
673
|
-
"sage",
|
|
674
|
-
"shimmer",
|
|
675
|
-
"verse",
|
|
676
|
-
],
|
|
561
|
+
voice: Literal["alloy", "ash", "ballad", "coral", "echo", "fable", "onyx", "nova", "sage", "shimmer", "verse"],
|
|
677
562
|
model: str | OpenAiSpeechModel = "tts-1",
|
|
678
|
-
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"]
|
|
679
|
-
| None = None, # Defaults to "mp3"
|
|
563
|
+
response_format: Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] | None = None,
|
|
680
564
|
speed: float | None = None,
|
|
681
|
-
instructions: str | None = None,
|
|
565
|
+
instructions: str | None = None,
|
|
682
566
|
) -> dict[str, Any] | str:
|
|
683
567
|
"""
|
|
684
568
|
Generates audio from input text using a specified TTS model and voice. This text-to-speech function allows customizing audio format and speed. On success, it returns a dictionary containing the base64-encoded audio content and its corresponding MIME type, or an error string on failure.
|
|
@@ -705,19 +589,13 @@ class OpenaiApp(APIApplication):
|
|
|
705
589
|
"input": input_text,
|
|
706
590
|
"model": model,
|
|
707
591
|
"voice": voice,
|
|
708
|
-
"response_format": response_format
|
|
709
|
-
if response_format is not None
|
|
710
|
-
else NOT_GIVEN,
|
|
592
|
+
"response_format": response_format if response_format is not None else NOT_GIVEN,
|
|
711
593
|
"speed": speed if speed is not None else NOT_GIVEN,
|
|
712
594
|
"instructions": instructions if instructions is not None else NOT_GIVEN,
|
|
713
595
|
}
|
|
714
|
-
|
|
715
596
|
api_response = await client.audio.speech.create(**params)
|
|
716
597
|
binary_content = api_response.content
|
|
717
|
-
actual_content_type = api_response.response.headers.get(
|
|
718
|
-
"Content-Type", "application/octet-stream"
|
|
719
|
-
)
|
|
720
|
-
|
|
598
|
+
actual_content_type = api_response.response.headers.get("Content-Type", "application/octet-stream")
|
|
721
599
|
if response_format and actual_content_type == "application/octet-stream":
|
|
722
600
|
mime_map = {
|
|
723
601
|
"mp3": "audio/mpeg",
|
|
@@ -728,7 +606,6 @@ class OpenaiApp(APIApplication):
|
|
|
728
606
|
"pcm": "audio/L16",
|
|
729
607
|
}
|
|
730
608
|
actual_content_type = mime_map.get(response_format, actual_content_type)
|
|
731
|
-
|
|
732
609
|
return {
|
|
733
610
|
"model_used": str(model),
|
|
734
611
|
"voice_used": voice,
|