universal-mcp-applications 0.1.13__py3-none-any.whl → 0.1.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of universal-mcp-applications might be problematic. Click here for more details.

Files changed (54) hide show
  1. universal_mcp/applications/aws_s3/app.py +71 -71
  2. universal_mcp/applications/calendly/app.py +199 -199
  3. universal_mcp/applications/canva/app.py +189 -189
  4. universal_mcp/applications/domain_checker/app.py +31 -24
  5. universal_mcp/applications/e2b/app.py +6 -7
  6. universal_mcp/applications/elevenlabs/app.py +24 -20
  7. universal_mcp/applications/exa/app.py +25 -20
  8. universal_mcp/applications/falai/app.py +44 -41
  9. universal_mcp/applications/file_system/app.py +20 -12
  10. universal_mcp/applications/firecrawl/app.py +46 -47
  11. universal_mcp/applications/fireflies/app.py +79 -79
  12. universal_mcp/applications/fpl/app.py +83 -74
  13. universal_mcp/applications/github/README.md +0 -1028
  14. universal_mcp/applications/github/app.py +55 -50227
  15. universal_mcp/applications/google_calendar/app.py +63 -65
  16. universal_mcp/applications/google_docs/app.py +78 -78
  17. universal_mcp/applications/google_drive/app.py +361 -440
  18. universal_mcp/applications/google_gemini/app.py +34 -17
  19. universal_mcp/applications/google_mail/app.py +117 -117
  20. universal_mcp/applications/google_searchconsole/app.py +41 -47
  21. universal_mcp/applications/google_sheet/app.py +157 -164
  22. universal_mcp/applications/http_tools/app.py +16 -16
  23. universal_mcp/applications/linkedin/app.py +26 -31
  24. universal_mcp/applications/ms_teams/app.py +190 -190
  25. universal_mcp/applications/openai/app.py +55 -56
  26. universal_mcp/applications/outlook/app.py +71 -71
  27. universal_mcp/applications/perplexity/app.py +17 -17
  28. universal_mcp/applications/reddit/app.py +225 -4053
  29. universal_mcp/applications/replicate/app.py +40 -42
  30. universal_mcp/applications/resend/app.py +157 -154
  31. universal_mcp/applications/scraper/app.py +24 -24
  32. universal_mcp/applications/serpapi/app.py +18 -20
  33. universal_mcp/applications/sharepoint/app.py +46 -36
  34. universal_mcp/applications/slack/app.py +66 -66
  35. universal_mcp/applications/tavily/app.py +7 -7
  36. universal_mcp/applications/twitter/api_segments/compliance_api.py +17 -20
  37. universal_mcp/applications/twitter/api_segments/dm_conversations_api.py +35 -40
  38. universal_mcp/applications/twitter/api_segments/dm_events_api.py +18 -21
  39. universal_mcp/applications/twitter/api_segments/likes_api.py +19 -22
  40. universal_mcp/applications/twitter/api_segments/lists_api.py +59 -68
  41. universal_mcp/applications/twitter/api_segments/spaces_api.py +36 -42
  42. universal_mcp/applications/twitter/api_segments/trends_api.py +7 -8
  43. universal_mcp/applications/twitter/api_segments/tweets_api.py +159 -185
  44. universal_mcp/applications/twitter/api_segments/usage_api.py +5 -6
  45. universal_mcp/applications/twitter/api_segments/users_api.py +230 -264
  46. universal_mcp/applications/unipile/app.py +99 -105
  47. universal_mcp/applications/whatsapp/app.py +86 -82
  48. universal_mcp/applications/whatsapp_business/app.py +147 -147
  49. universal_mcp/applications/youtube/app.py +290 -290
  50. universal_mcp/applications/zenquotes/app.py +6 -6
  51. {universal_mcp_applications-0.1.13.dist-info → universal_mcp_applications-0.1.15.dist-info}/METADATA +2 -2
  52. {universal_mcp_applications-0.1.13.dist-info → universal_mcp_applications-0.1.15.dist-info}/RECORD +54 -54
  53. {universal_mcp_applications-0.1.13.dist-info → universal_mcp_applications-0.1.15.dist-info}/WHEEL +0 -0
  54. {universal_mcp_applications-0.1.13.dist-info → universal_mcp_applications-0.1.15.dist-info}/licenses/LICENSE +0 -0
@@ -61,8 +61,8 @@ class OpenaiApp(APIApplication):
61
61
  # Add other common parameters as needed, or rely on
62
62
  ) -> dict[str, Any] | str:
63
63
  """
64
- Creates a model response for the given chat conversation.
65
-
64
+ Generates a model response for a chat conversation. It supports both standard and streaming modes; if streaming, it internally aggregates response chunks into a single complete object, simplifying stream handling. Returns the completion data as a dictionary on success or an error string on failure.
65
+
66
66
  Args:
67
67
  messages: A list of messages comprising the conversation so far.
68
68
  model: ID of the model to use. Defaults to "gpt-4o".
@@ -79,12 +79,12 @@ class OpenaiApp(APIApplication):
79
79
  presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far.
80
80
  stop: Up to 4 sequences where the API will stop generating further tokens.
81
81
  user: A unique identifier representing your end-user.
82
-
82
+
83
83
  Returns:
84
84
  A dictionary containing the chat completion response on success,
85
85
  or a string containing an error message on failure.
86
86
  If stream=True, usage data in the response will be None.
87
-
87
+
88
88
  Tags:
89
89
  chat, llm, important
90
90
  """
@@ -163,17 +163,17 @@ class OpenaiApp(APIApplication):
163
163
  self, file: OpenAiFileTypes, purpose: OpenAiFilePurpose
164
164
  ) -> dict[str, Any] | str:
165
165
  """
166
- Upload a file that can be used across various OpenAI API endpoints.
167
-
166
+ Uploads a file to the user's OpenAI account for use across various endpoints like 'assistants'. It accepts a file path or object and a purpose string, returning a dictionary with the created file object's details upon success.
167
+
168
168
  Args:
169
169
  file: The File object (not file name) or path to be uploaded.
170
170
  Can be bytes, a PathLike object, or a file-like object.
171
171
  purpose: The intended purpose of the uploaded file (e.g., 'fine-tune', 'assistants').
172
-
172
+
173
173
  Returns:
174
174
  A dictionary containing the file object details on success,
175
175
  or a string containing an error message on failure.
176
-
176
+
177
177
  Tags:
178
178
  files, upload, storage
179
179
  """
@@ -194,18 +194,18 @@ class OpenaiApp(APIApplication):
194
194
  order: Literal["asc", "desc"] | None = None,
195
195
  ) -> dict[str, Any] | str:
196
196
  """
197
- Lists the files that have been uploaded to your OpenAI account.
198
-
197
+ Retrieves a paginated list of files uploaded to the OpenAI account. Allows filtering by purpose and controlling the output with limit, `after` cursor, and sort order parameters to efficiently navigate through the file collection.
198
+
199
199
  Args:
200
200
  purpose: Only return files with the given purpose.
201
201
  limit: A limit on the number of objects to be returned.
202
202
  after: A cursor for use in pagination.
203
203
  order: Sort order by the `created_at` timestamp.
204
-
204
+
205
205
  Returns:
206
206
  A dictionary representing a page of file objects on success,
207
207
  or a string containing an error message on failure.
208
-
208
+
209
209
  Tags:
210
210
  files, list, storage
211
211
  """
@@ -228,17 +228,17 @@ class OpenaiApp(APIApplication):
228
228
  except Exception as e:
229
229
  return f"Error listing files: {type(e).__name__} - {e}"
230
230
 
231
- async def retrieve_file(self, file_id: str) -> dict[str, Any] | str:
231
+ async def retrieve_file_metadata(self, file_id: str) -> dict[str, Any] | str:
232
232
  """
233
- Retrieves information about a specific file.
234
-
233
+ Retrieves the metadata (e.g., purpose, creation date) for a specific file from the OpenAI account using its ID. Unlike `retrieve_file_content`, this function does not download the file's contents. It returns a dictionary with file details on success or an error string on failure.
234
+
235
235
  Args:
236
236
  file_id: The ID of the file to retrieve.
237
-
237
+
238
238
  Returns:
239
239
  A dictionary containing the file object details on success,
240
240
  or a string containing an error message on failure.
241
-
241
+
242
242
  Tags:
243
243
  files, retrieve, storage
244
244
  """
@@ -255,15 +255,15 @@ class OpenaiApp(APIApplication):
255
255
 
256
256
  async def delete_file(self, file_id: str) -> dict[str, Any] | str:
257
257
  """
258
- Deletes a file.
259
-
258
+ Permanently deletes a file from the user's OpenAI account using its unique file ID. This action is irreversible. It returns a dictionary confirming the deletion on success or an error message string on failure. This is the 'delete' operation in the file management lifecycle.
259
+
260
260
  Args:
261
261
  file_id: The ID of the file to delete.
262
-
262
+
263
263
  Returns:
264
264
  A dictionary containing the deletion status on success,
265
265
  or a string containing an error message on failure.
266
-
266
+
267
267
  Tags:
268
268
  files, delete, storage
269
269
  """
@@ -278,16 +278,15 @@ class OpenaiApp(APIApplication):
278
278
 
279
279
  async def retrieve_file_content(self, file_id: str) -> dict[str, Any] | str:
280
280
  """
281
- Retrieves the content of the specified file.
282
- Returns text content directly, or base64 encoded content in a dictionary for binary files.
283
-
281
+ Retrieves a file's content from OpenAI using its ID. It returns plain text for common text formats (e.g., JSON, CSV). For binary or undecodable files, it returns a dictionary with base64-encoded data, differentiating it from `retrieve_file` which only fetches metadata.
282
+
284
283
  Args:
285
284
  file_id: The ID of the file whose content to retrieve.
286
-
285
+
287
286
  Returns:
288
287
  The file content as a string if text, a dictionary with base64 encoded
289
288
  content if binary, or an error message string on failure.
290
-
289
+
291
290
  Tags:
292
291
  files, content, download
293
292
  """
@@ -332,7 +331,7 @@ class OpenaiApp(APIApplication):
332
331
  )
333
332
 
334
333
  # --- Images Methods ---
335
- async def generate_image(
334
+ async def create_image(
336
335
  self,
337
336
  prompt: str,
338
337
  model: str
@@ -347,8 +346,8 @@ class OpenaiApp(APIApplication):
347
346
  user: str | None = None,
348
347
  ) -> dict[str, Any] | str:
349
348
  """
350
- Creates an image given a prompt.
351
-
349
+ Generates new images from a textual prompt using OpenAI's DALL-E models. It allows customization of parameters like image size, quality, style, and response format (URL or base64 JSON). Unlike other image functions in this class, it creates images entirely from scratch based on text.
350
+
352
351
  Args:
353
352
  prompt: A text description of the desired image(s).
354
353
  model: The model to use for image generation. Defaults to "dall-e-3".
@@ -365,11 +364,11 @@ class OpenaiApp(APIApplication):
365
364
  For "dall-e-3": "1024x1024", "1792x1024", or "1024x1792".
366
365
  style: The style of the generated images ("vivid" or "natural"). Only for "dall-e-3".
367
366
  user: A unique identifier representing your end-user.
368
-
367
+
369
368
  Returns:
370
369
  A dictionary containing the image generation response on success,
371
370
  or a string containing an error message on failure.
372
-
371
+
373
372
  Tags:
374
373
  images, generate, dalle, important
375
374
  """
@@ -416,8 +415,8 @@ class OpenaiApp(APIApplication):
416
415
  user: str | None = None,
417
416
  ) -> dict[str, Any] | str:
418
417
  """
419
- Creates an edited or extended image given an original image and a prompt.
420
-
418
+ Modifies a source image using the DALL-E 2 model based on a text prompt. An optional mask can be supplied to specify the exact area for editing. This function is distinct from generating images from scratch (`generate_image`) or creating simple variations (`create_image_variation`).
419
+
421
420
  Args:
422
421
  image: The image to edit. Must be a valid PNG file, less than 4MB, and square.
423
422
  prompt: A text description of the desired image(s).
@@ -428,11 +427,11 @@ class OpenaiApp(APIApplication):
428
427
  response_format: The format of the returned images ("url" or "b64_json"). Defaults to "url".
429
428
  size: The size of the generated images. Must be one of "256x256", "512x512", or "1024x1024".
430
429
  user: A unique identifier representing your end-user.
431
-
430
+
432
431
  Returns:
433
432
  A dictionary containing the image edit response on success,
434
433
  or a string containing an error message on failure.
435
-
434
+
436
435
  Tags:
437
436
  images, edit, dalle
438
437
  """
@@ -469,8 +468,8 @@ class OpenaiApp(APIApplication):
469
468
  user: str | None = None,
470
469
  ) -> dict[str, Any] | str:
471
470
  """
472
- Creates a variation of a given image.
473
-
471
+ Generates one or more variations of a provided image using the OpenAI API, specifically the DALL-E 2 model. It allows customization of the number, size, and response format of the resulting images. Returns a dictionary with image data on success or an error string on failure.
472
+
474
473
  Args:
475
474
  image: The image to use as the basis for the variation(s). Must be a valid PNG file.
476
475
  model: The model to use. Defaults to "dall-e-2", which is currently the only
@@ -479,11 +478,11 @@ class OpenaiApp(APIApplication):
479
478
  response_format: The format of the returned images ("url" or "b64_json"). Defaults to "url".
480
479
  size: The size of the generated images. Must be one of "256x256", "512x512", or "1024x1024".
481
480
  user: A unique identifier representing your end-user.
482
-
481
+
483
482
  Returns:
484
483
  A dictionary containing the image variation response on success,
485
484
  or a string containing an error message on failure.
486
-
485
+
487
486
  Tags:
488
487
  images, variation, dalle
489
488
  """
@@ -508,7 +507,7 @@ class OpenaiApp(APIApplication):
508
507
  except Exception as e:
509
508
  return f"Error creating image variation with model {effective_model}: {type(e).__name__} - {e}"
510
509
 
511
- async def create_transcription(
510
+ async def transcribe_audio(
512
511
  self,
513
512
  file: OpenAiFileTypes,
514
513
  model: str | OpenAiAudioModel = "gpt-4o-transcribe",
@@ -522,8 +521,8 @@ class OpenaiApp(APIApplication):
522
521
  stream: bool = False,
523
522
  ) -> dict[str, Any] | str:
524
523
  """
525
- Transcribes audio into the input language.
526
-
524
+ Transcribes an audio file into text in its original language using models like Whisper or GPT-4o. It supports multiple response formats and internally aggregates streaming data into a final object. This differs from `create_translation`, which translates audio specifically into English text.
525
+
527
526
  Args:
528
527
  file: The audio file object (not file name) to transcribe.
529
528
  model: ID of the model to use (e.g., "whisper-1", "gpt-4o-transcribe").
@@ -539,12 +538,12 @@ class OpenaiApp(APIApplication):
539
538
  Only works with response_format="json" and gpt-4o models.
540
539
  stream: If True, streams the response. The method will aggregate the stream
541
540
  into a final response object. Streaming is not supported for "whisper-1".
542
-
541
+
543
542
  Returns:
544
543
  A dictionary containing the transcription or a string, depending on `response_format`.
545
544
  If `stream` is True, an aggregated response dictionary.
546
545
  Returns an error message string on failure.
547
-
546
+
548
547
  Tags:
549
548
  audio, transcription, speech-to-text, important
550
549
  """
@@ -615,19 +614,19 @@ class OpenaiApp(APIApplication):
615
614
  temperature: float | None = None,
616
615
  ) -> dict[str, Any] | str:
617
616
  """
618
- Translates audio into English text.
619
-
617
+ Translates audio from any supported language into English text using OpenAI's API. Unlike `create_transcription`, which converts audio to text in its original language, this function's output is always English. It supports various response formats like JSON, text, or SRT for the translated content.
618
+
620
619
  Args:
621
620
  file: The audio file object (not file name) to translate.
622
621
  model: ID of the model to use (currently, only "whisper-1" is supported).
623
622
  prompt: Optional text to guide the model's style (should be in English).
624
623
  response_format: The format of the translated text.
625
624
  temperature: Sampling temperature between 0 and 1.
626
-
625
+
627
626
  Returns:
628
627
  A dictionary containing the translation or a string, depending on `response_format`.
629
628
  Returns an error message string on failure.
630
-
629
+
631
630
  Tags:
632
631
  audio, translation, speech-to-text
633
632
  """
@@ -681,8 +680,8 @@ class OpenaiApp(APIApplication):
681
680
  instructions: str | None = None, # For gpt-4o-mini-tts or newer models
682
681
  ) -> dict[str, Any] | str:
683
682
  """
684
- Generates audio from the input text.
685
-
683
+ Generates audio from input text using a specified TTS model and voice. This text-to-speech function allows customizing audio format and speed. On success, it returns a dictionary containing the base64-encoded audio content and its corresponding MIME type, or an error string on failure.
684
+
686
685
  Args:
687
686
  input_text: The text to generate audio for (max 4096 characters).
688
687
  model: The TTS model to use (e.g., "tts-1", "tts-1-hd", "gpt-4o-mini-tts").
@@ -690,12 +689,12 @@ class OpenaiApp(APIApplication):
690
689
  response_format: The format of the audio ("mp3", "opus", "aac", "flac", "wav", "pcm"). Defaults to "mp3".
691
690
  speed: Speed of the generated audio (0.25 to 4.0). Defaults to 1.0.
692
691
  instructions: Control voice with additional instructions (not for tts-1/tts-1-hd).
693
-
694
-
692
+
693
+
695
694
  Returns:
696
695
  A dictionary containing the base64 encoded audio content and content type,
697
696
  or an error message string on failure.
698
-
697
+
699
698
  Tags:
700
699
  audio, speech, text-to-speech, tts, important
701
700
  """
@@ -746,13 +745,13 @@ class OpenaiApp(APIApplication):
746
745
  self.create_chat_completion,
747
746
  self.upload_file,
748
747
  self.list_files,
749
- self.retrieve_file,
748
+ self.retrieve_file_metadata,
750
749
  self.delete_file,
751
750
  self.retrieve_file_content,
752
- self.generate_image,
751
+ self.create_image,
753
752
  self.create_image_edit,
754
753
  self.create_image_variation,
755
- self.create_transcription,
754
+ self.transcribe_audio,
756
755
  self.create_translation,
757
756
  self.create_speech,
758
757
  ]