universal-mcp-applications 0.1.13__py3-none-any.whl → 0.1.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of universal-mcp-applications might be problematic. Click here for more details.

Files changed (54) hide show
  1. universal_mcp/applications/aws_s3/app.py +71 -71
  2. universal_mcp/applications/calendly/app.py +199 -199
  3. universal_mcp/applications/canva/app.py +189 -189
  4. universal_mcp/applications/domain_checker/app.py +31 -24
  5. universal_mcp/applications/e2b/app.py +6 -7
  6. universal_mcp/applications/elevenlabs/app.py +24 -20
  7. universal_mcp/applications/exa/app.py +25 -20
  8. universal_mcp/applications/falai/app.py +44 -41
  9. universal_mcp/applications/file_system/app.py +20 -12
  10. universal_mcp/applications/firecrawl/app.py +46 -47
  11. universal_mcp/applications/fireflies/app.py +79 -79
  12. universal_mcp/applications/fpl/app.py +83 -74
  13. universal_mcp/applications/github/README.md +0 -1028
  14. universal_mcp/applications/github/app.py +55 -50227
  15. universal_mcp/applications/google_calendar/app.py +63 -65
  16. universal_mcp/applications/google_docs/app.py +78 -78
  17. universal_mcp/applications/google_drive/app.py +361 -440
  18. universal_mcp/applications/google_gemini/app.py +34 -17
  19. universal_mcp/applications/google_mail/app.py +117 -117
  20. universal_mcp/applications/google_searchconsole/app.py +41 -47
  21. universal_mcp/applications/google_sheet/app.py +157 -164
  22. universal_mcp/applications/http_tools/app.py +16 -16
  23. universal_mcp/applications/linkedin/app.py +26 -31
  24. universal_mcp/applications/ms_teams/app.py +190 -190
  25. universal_mcp/applications/openai/app.py +55 -56
  26. universal_mcp/applications/outlook/app.py +71 -71
  27. universal_mcp/applications/perplexity/app.py +17 -17
  28. universal_mcp/applications/reddit/app.py +225 -4053
  29. universal_mcp/applications/replicate/app.py +40 -42
  30. universal_mcp/applications/resend/app.py +157 -154
  31. universal_mcp/applications/scraper/app.py +24 -24
  32. universal_mcp/applications/serpapi/app.py +18 -20
  33. universal_mcp/applications/sharepoint/app.py +46 -36
  34. universal_mcp/applications/slack/app.py +66 -66
  35. universal_mcp/applications/tavily/app.py +7 -7
  36. universal_mcp/applications/twitter/api_segments/compliance_api.py +17 -20
  37. universal_mcp/applications/twitter/api_segments/dm_conversations_api.py +35 -40
  38. universal_mcp/applications/twitter/api_segments/dm_events_api.py +18 -21
  39. universal_mcp/applications/twitter/api_segments/likes_api.py +19 -22
  40. universal_mcp/applications/twitter/api_segments/lists_api.py +59 -68
  41. universal_mcp/applications/twitter/api_segments/spaces_api.py +36 -42
  42. universal_mcp/applications/twitter/api_segments/trends_api.py +7 -8
  43. universal_mcp/applications/twitter/api_segments/tweets_api.py +159 -185
  44. universal_mcp/applications/twitter/api_segments/usage_api.py +5 -6
  45. universal_mcp/applications/twitter/api_segments/users_api.py +230 -264
  46. universal_mcp/applications/unipile/app.py +99 -105
  47. universal_mcp/applications/whatsapp/app.py +86 -82
  48. universal_mcp/applications/whatsapp_business/app.py +147 -147
  49. universal_mcp/applications/youtube/app.py +290 -290
  50. universal_mcp/applications/zenquotes/app.py +6 -6
  51. {universal_mcp_applications-0.1.13.dist-info → universal_mcp_applications-0.1.15.dist-info}/METADATA +2 -2
  52. {universal_mcp_applications-0.1.13.dist-info → universal_mcp_applications-0.1.15.dist-info}/RECORD +54 -54
  53. {universal_mcp_applications-0.1.13.dist-info → universal_mcp_applications-0.1.15.dist-info}/WHEEL +0 -0
  54. {universal_mcp_applications-0.1.13.dist-info → universal_mcp_applications-0.1.15.dist-info}/licenses/LICENSE +0 -0
@@ -15,14 +15,15 @@ class FileSystemApp(BaseApplication):
15
15
 
16
16
  @staticmethod
17
17
  async def read_file(file_path: str):
18
- """Reads file data from a file path.
19
-
18
+ """
19
+ Asynchronously reads the entire content of a specified file in binary mode. This static method takes a file path and returns its data as a bytes object, serving as a fundamental file retrieval operation within the FileSystem application.
20
+
20
21
  Args:
21
22
  file_path (str): The path to the file to read.
22
-
23
+
23
24
  Returns:
24
25
  bytes: The file content as bytes.
25
-
26
+
26
27
  Raises:
27
28
  FileNotFoundError: If the file doesn't exist.
28
29
  IOError: If there's an error reading the file.
@@ -35,13 +36,14 @@ class FileSystemApp(BaseApplication):
35
36
 
36
37
  @staticmethod
37
38
  async def write_file(file_data: bytes, file_path: str = None):
38
- """Writes file data to a file path.
39
-
39
+ """
40
+ Writes binary data to a specified file path. If no path is provided, it creates a unique temporary file in `/tmp`. The function returns a dictionary confirming success and providing metadata about the new file, including its path and size.
41
+
40
42
  Args:
41
43
  file_data (bytes): The data to write to the file.
42
44
  file_path (str, optional): The path where to write the file.
43
45
  If None, generates a random path in /tmp. Defaults to None.
44
-
46
+
45
47
  Returns:
46
48
  dict: A dictionary containing the operation result with keys:
47
49
  - status (str): "success" if the operation completed successfully
@@ -49,7 +51,7 @@ class FileSystemApp(BaseApplication):
49
51
  - url (str): The file path where the data was written
50
52
  - filename (str): The filename (same as url in this implementation)
51
53
  - size (int): The size of the written data in bytes
52
-
54
+
53
55
  Raises:
54
56
  IOError: If there's an error writing the file.
55
57
  PermissionError: If there are insufficient permissions to write to the path.
@@ -72,20 +74,26 @@ class FileSystemApp(BaseApplication):
72
74
  return result
73
75
 
74
76
  @staticmethod
75
- async def delete_file(file_path: str):
76
- """Deletes a file from the file system."""
77
+ async def remove_file(file_path: str):
78
+ """
79
+ Permanently removes a file from the local file system at the specified path. Unlike `move_file`, which relocates a file, this operation is irreversible. It returns a dictionary with a 'success' status to confirm deletion.
80
+ """
77
81
  os.remove(file_path)
78
82
  return {"status": "success"}
79
83
 
80
84
  @staticmethod
81
85
  async def move_file(source_file_path: str, dest_file_path: str):
82
- """Moves a file from one path to another."""
86
+ """
87
+ Relocates a file from a source path to a destination path on the same filesystem. This function effectively renames or moves the file, differing from `copy_file` which creates a duplicate. It returns a dictionary confirming the successful completion of the operation.
88
+ """
83
89
  os.rename(source_file_path, dest_file_path)
84
90
  return {"status": "success"}
85
91
 
86
92
  @staticmethod
87
93
  async def copy_file(source_file_path: str, dest_file_path: str):
88
- """Copies a file from one path to another."""
94
+ """
95
+ Duplicates a file by copying it from a source path to a destination path, leaving the original file untouched. This contrasts with `move_file`, which relocates the file. It returns a success status dictionary upon successful completion of the operation.
96
+ """
89
97
  shutil.copy(source_file_path, dest_file_path)
90
98
  return {"status": "success"}
91
99
 
@@ -38,8 +38,7 @@ class FirecrawlApp(APIApplication):
38
38
  @property
39
39
  def firecrawl_api_key(self) -> str:
40
40
  """
41
- Retrieves and caches the Firecrawl API key from the integration.
42
- Raises NotAuthorizedError if the key cannot be obtained.
41
+ A property that lazily retrieves and caches the Firecrawl API key from the configured integration. On first access, it fetches credentials and raises a `NotAuthorizedError` if the key is unobtainable, ensuring all subsequent API calls are properly authenticated.
43
42
  """
44
43
  if self._firecrawl_api_key is None:
45
44
  if not self.integration:
@@ -167,19 +166,19 @@ class FirecrawlApp(APIApplication):
167
166
 
168
167
  def scrape_url(self, url: str) -> Any:
169
168
  """
170
- Scrapes a single URL using Firecrawl and returns the extracted data.
171
-
169
+ Synchronously scrapes a single web page's content using the Firecrawl service. This function executes immediately and returns the extracted data, unlike the asynchronous `start_batch_scrape` or `start_crawl` jobs which require status checks. Returns an error message on failure.
170
+
172
171
  Args:
173
172
  url: The URL of the web page to scrape.
174
-
173
+
175
174
  Returns:
176
175
  A dictionary containing the scraped data on success,
177
176
  or a string containing an error message on failure.
178
-
177
+
179
178
  Raises:
180
179
  NotAuthorizedError: If API key is missing or invalid.
181
180
  ToolError: If the Firecrawl SDK is not installed.
182
-
181
+
183
182
  Tags:
184
183
  scrape, important
185
184
  """
@@ -199,19 +198,19 @@ class FirecrawlApp(APIApplication):
199
198
 
200
199
  def search(self, query: str) -> dict[str, Any] | str:
201
200
  """
202
- Performs a web search using Firecrawl's search capability.
203
-
201
+ Executes a web search using the Firecrawl service for a specified query. It returns a dictionary of results on success or an error string on failure, raising specific exceptions for authorization or SDK installation issues. This provides a direct, synchronous method for information retrieval.
202
+
204
203
  Args:
205
204
  query: The search query string.
206
-
205
+
207
206
  Returns:
208
207
  A dictionary containing the search results on success,
209
208
  or a string containing an error message on failure.
210
-
209
+
211
210
  Raises:
212
211
  NotAuthorizedError: If API key is missing or invalid.
213
212
  ToolError: If the Firecrawl SDK is not installed.
214
-
213
+
215
214
  Tags:
216
215
  search, important
217
216
  """
@@ -233,19 +232,19 @@ class FirecrawlApp(APIApplication):
233
232
  url: str,
234
233
  ) -> dict[str, Any] | str:
235
234
  """
236
- Starts a async crawl job for a given URL using Firecrawl. Returns the job ID immediately.
237
-
235
+ Initiates an asynchronous Firecrawl job to crawl a website starting from a given URL. It returns immediately with a job ID, which can be used with `check_crawl_status` to monitor progress. This differs from `scrape_url`, which performs a synchronous scrape of a single page.
236
+
238
237
  Args:
239
238
  url: The starting URL for the crawl.
240
-
239
+
241
240
  Returns:
242
241
  A dictionary containing the job initiation response on success,
243
242
  or a string containing an error message on failure.
244
-
243
+
245
244
  Raises:
246
245
  NotAuthorizedError: If API key is missing or invalid.
247
246
  ToolError: If the Firecrawl SDK is not installed.
248
-
247
+
249
248
  Tags:
250
249
  crawl, async_job, start
251
250
  """
@@ -269,19 +268,19 @@ class FirecrawlApp(APIApplication):
269
268
 
270
269
  def check_crawl_status(self, job_id: str) -> dict[str, Any] | str:
271
270
  """
272
- Checks the status of a previously initiated async Firecrawl crawl job.
273
-
271
+ Retrieves the status of an asynchronous Firecrawl crawl job using its unique ID. Returns a dictionary with the job's details on success or an error message on failure. This function specifically handles jobs initiated by `start_crawl`, distinct from checkers for batch scrapes or extractions.
272
+
274
273
  Args:
275
274
  job_id: The ID of the crawl job to check.
276
-
275
+
277
276
  Returns:
278
277
  A dictionary containing the job status details on success,
279
278
  or a string containing an error message on failure.
280
-
279
+
281
280
  Raises:
282
281
  NotAuthorizedError: If API key is missing or invalid.
283
282
  ToolError: If the Firecrawl SDK is not installed.
284
-
283
+
285
284
  Tags:
286
285
  crawl, async_job, status
287
286
  """
@@ -304,20 +303,20 @@ class FirecrawlApp(APIApplication):
304
303
 
305
304
  def cancel_crawl(self, job_id: str) -> dict[str, Any] | str:
306
305
  """
307
- Cancels a currently running Firecrawl crawl job.
308
-
306
+ Cancels a running asynchronous Firecrawl crawl job identified by its unique ID. As part of the crawl job lifecycle, this function terminates a process initiated by `start_crawl`, returning a confirmation status upon success or an error message if the cancellation fails or is not supported.
307
+
309
308
  Args:
310
309
  job_id: The ID of the crawl job to cancel.
311
-
310
+
312
311
  Returns:
313
312
  A dictionary confirming the cancellation status on success,
314
313
  or a string containing an error message on failure.
315
314
  (Note: This functionality might depend on Firecrawl API capabilities)
316
-
315
+
317
316
  Raises:
318
317
  NotAuthorizedError: If API key is missing or invalid.
319
318
  ToolError: If the Firecrawl SDK is not installed or operation not supported.
320
-
319
+
321
320
  Tags:
322
321
  crawl, async_job, management, cancel
323
322
  """
@@ -343,19 +342,19 @@ class FirecrawlApp(APIApplication):
343
342
  urls: list[str],
344
343
  ) -> dict[str, Any] | str:
345
344
  """
346
- Starts a batch scrape job for multiple URLs using Firecrawl. (Note: May map to multiple individual scrapes or a specific batch API endpoint if available)
347
-
345
+ Initiates an asynchronous batch job to scrape a list of URLs using Firecrawl. It returns a response containing a job ID, which can be tracked with `check_batch_scrape_status`. This differs from the synchronous `scrape_url` which handles a single URL and returns data directly.
346
+
348
347
  Args:
349
348
  urls: A list of URLs to scrape.
350
-
349
+
351
350
  Returns:
352
351
  A dictionary containing the job initiation response (e.g., a batch job ID or list of results/job IDs) on success,
353
352
  or a string containing an error message on failure.
354
-
353
+
355
354
  Raises:
356
355
  NotAuthorizedError: If API key is missing or invalid.
357
356
  ToolError: If the Firecrawl SDK is not installed.
358
-
357
+
359
358
  Tags:
360
359
  scrape, batch, async_job, start
361
360
  """
@@ -378,19 +377,19 @@ class FirecrawlApp(APIApplication):
378
377
 
379
378
  def check_batch_scrape_status(self, job_id: str) -> dict[str, Any] | str:
380
379
  """
381
- Checks the status of a previously initiated Firecrawl batch scrape job.
382
-
380
+ Checks the status of a previously initiated asynchronous Firecrawl batch scrape job using its job ID. It returns detailed progress information or an error message. This function is the counterpart to `start_batch_scrape` for monitoring multi-URL scraping tasks.
381
+
383
382
  Args:
384
383
  job_id: The ID of the batch scrape job to check.
385
-
384
+
386
385
  Returns:
387
386
  A dictionary containing the job status details on success,
388
387
  or a string containing an error message on failure.
389
-
388
+
390
389
  Raises:
391
390
  NotAuthorizedError: If API key is missing or invalid.
392
391
  ToolError: If the Firecrawl SDK is not installed or operation not supported.
393
-
392
+
394
393
  Tags:
395
394
  scrape, batch, async_job, status
396
395
  """
@@ -422,22 +421,22 @@ class FirecrawlApp(APIApplication):
422
421
  allow_external_links: bool | None = False,
423
422
  ) -> dict[str, Any]:
424
423
  """
425
- Performs a quick, synchronous extraction of data from one or more URLs using Firecrawl and returns the results directly.
426
-
424
+ Performs synchronous, AI-driven data extraction from URLs using an optional prompt or schema. Unlike asynchronous job functions (e.g., `start_crawl`), it returns the structured data directly. This function raises `NotAuthorizedError` or `ToolError` on failure, contrasting with others that return an error string.
425
+
427
426
  Args:
428
427
  urls: A list of URLs to extract data from.
429
428
  prompt: Optional custom extraction prompt describing what data to extract.
430
429
  schema: Optional JSON schema or Pydantic model for the desired output structure.
431
430
  system_prompt: Optional system context for the extraction.
432
431
  allow_external_links: Optional boolean to allow following external links.
433
-
432
+
434
433
  Returns:
435
434
  A dictionary containing the extracted data on success.
436
-
435
+
437
436
  Raises:
438
437
  NotAuthorizedError: If API key is missing or invalid.
439
438
  ToolError: If the Firecrawl SDK is not installed or extraction fails.
440
-
439
+
441
440
  Tags:
442
441
  extract, ai, sync, quick, important
443
442
  """
@@ -477,19 +476,19 @@ class FirecrawlApp(APIApplication):
477
476
 
478
477
  def check_extract_status(self, job_id: str) -> dict[str, Any] | str:
479
478
  """
480
- Checks the status of a previously initiated Firecrawl extraction job.
481
-
479
+ Checks the status of a specific asynchronous, AI-powered data extraction job on Firecrawl using its job ID. This is distinct from `check_crawl_status` for web crawling and `check_batch_scrape_status` for bulk scraping, as it specifically monitors AI-driven extractions.
480
+
482
481
  Args:
483
482
  job_id: The ID of the extraction job to check.
484
-
483
+
485
484
  Returns:
486
485
  A dictionary containing the job status details on success,
487
486
  or a string containing an error message on failure.
488
-
487
+
489
488
  Raises:
490
489
  NotAuthorizedError: If API key is missing or invalid.
491
490
  ToolError: If the Firecrawl SDK is not installed or operation not supported.
492
-
491
+
493
492
  Tags:
494
493
  extract, ai, async_job, status
495
494
  """