universal-mcp-applications 0.1.39rc8__py3-none-any.whl → 0.1.39rc16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of universal-mcp-applications might be problematic. Click here for more details.

Files changed (45) hide show
  1. universal_mcp/applications/BEST_PRACTICES.md +1 -1
  2. universal_mcp/applications/airtable/app.py +13 -13
  3. universal_mcp/applications/apollo/app.py +2 -2
  4. universal_mcp/applications/aws_s3/app.py +30 -19
  5. universal_mcp/applications/browser_use/app.py +10 -7
  6. universal_mcp/applications/contentful/app.py +4 -4
  7. universal_mcp/applications/crustdata/app.py +2 -2
  8. universal_mcp/applications/e2b/app.py +3 -4
  9. universal_mcp/applications/elevenlabs/README.md +27 -3
  10. universal_mcp/applications/elevenlabs/app.py +753 -48
  11. universal_mcp/applications/exa/app.py +18 -11
  12. universal_mcp/applications/falai/README.md +5 -7
  13. universal_mcp/applications/falai/app.py +160 -159
  14. universal_mcp/applications/firecrawl/app.py +14 -15
  15. universal_mcp/applications/ghost_content/app.py +4 -4
  16. universal_mcp/applications/github/app.py +2 -2
  17. universal_mcp/applications/gong/app.py +2 -2
  18. universal_mcp/applications/google_docs/README.md +15 -14
  19. universal_mcp/applications/google_docs/app.py +5 -4
  20. universal_mcp/applications/google_gemini/app.py +61 -17
  21. universal_mcp/applications/google_sheet/README.md +2 -1
  22. universal_mcp/applications/google_sheet/app.py +55 -0
  23. universal_mcp/applications/heygen/README.md +10 -32
  24. universal_mcp/applications/heygen/app.py +350 -744
  25. universal_mcp/applications/klaviyo/app.py +2 -2
  26. universal_mcp/applications/linkedin/README.md +14 -2
  27. universal_mcp/applications/linkedin/app.py +411 -38
  28. universal_mcp/applications/ms_teams/app.py +420 -1285
  29. universal_mcp/applications/notion/app.py +2 -2
  30. universal_mcp/applications/openai/app.py +1 -1
  31. universal_mcp/applications/perplexity/app.py +6 -7
  32. universal_mcp/applications/reddit/app.py +4 -4
  33. universal_mcp/applications/resend/app.py +31 -32
  34. universal_mcp/applications/rocketlane/app.py +2 -2
  35. universal_mcp/applications/scraper/app.py +51 -21
  36. universal_mcp/applications/semrush/app.py +1 -1
  37. universal_mcp/applications/serpapi/app.py +8 -7
  38. universal_mcp/applications/shopify/app.py +5 -7
  39. universal_mcp/applications/shortcut/app.py +3 -2
  40. universal_mcp/applications/slack/app.py +2 -2
  41. universal_mcp/applications/twilio/app.py +14 -13
  42. {universal_mcp_applications-0.1.39rc8.dist-info → universal_mcp_applications-0.1.39rc16.dist-info}/METADATA +1 -1
  43. {universal_mcp_applications-0.1.39rc8.dist-info → universal_mcp_applications-0.1.39rc16.dist-info}/RECORD +45 -45
  44. {universal_mcp_applications-0.1.39rc8.dist-info → universal_mcp_applications-0.1.39rc16.dist-info}/WHEEL +0 -0
  45. {universal_mcp_applications-0.1.39rc8.dist-info → universal_mcp_applications-0.1.39rc16.dist-info}/licenses/LICENSE +0 -0
@@ -26,8 +26,7 @@ class ExaApp(APIApplication):
26
26
  super().__init__(name="exa", integration=integration, **kwargs)
27
27
  self._exa_client: AsyncExa | None = None
28
28
 
29
- @property
30
- def exa_client(self) -> AsyncExa:
29
+ async def get_exa_client(self) -> AsyncExa:
31
30
  """
32
31
  Lazily initializes and returns the Exa client.
33
32
  """
@@ -38,7 +37,7 @@ class ExaApp(APIApplication):
38
37
  if not self.integration:
39
38
  raise NotAuthorizedError("Exa App: Integration not configured.")
40
39
 
41
- credentials = self.integration.get_credentials()
40
+ credentials = await self.integration.get_credentials_async()
42
41
  api_key = credentials.get("api_key") or credentials.get("API_KEY") or credentials.get("apiKey")
43
42
 
44
43
  if not api_key:
@@ -140,7 +139,8 @@ class ExaApp(APIApplication):
140
139
  if context:
141
140
  contents["context"] = context
142
141
 
143
- response = await self.exa_client.search(
142
+ client = await self.get_exa_client()
143
+ response = await client.search(
144
144
  query=query,
145
145
  num_results=num_results,
146
146
  include_domains=include_domains,
@@ -218,7 +218,8 @@ class ExaApp(APIApplication):
218
218
  if summary:
219
219
  contents["summary"] = summary
220
220
 
221
- response = await self.exa_client.find_similar(
221
+ client = await self.get_exa_client()
222
+ response = await client.find_similar(
222
223
  url=url,
223
224
  num_results=num_results,
224
225
  include_domains=include_domains,
@@ -276,7 +277,8 @@ class ExaApp(APIApplication):
276
277
  content, fetch, crawl, subpages, extract
277
278
  """
278
279
  logger.info(f"Exa get_contents for {len(urls)} URLs.")
279
- response = await self.exa_client.get_contents(
280
+ client = await self.get_exa_client()
281
+ response = await client.get_contents(
280
282
  urls=urls,
281
283
  text=text,
282
284
  summary=summary,
@@ -320,7 +322,8 @@ class ExaApp(APIApplication):
320
322
  answer, synthesis, knowledge, citations, research, important
321
323
  """
322
324
  logger.info(f"Exa answer for query: {query}")
323
- response = await self.exa_client.answer(
325
+ client = await self.get_exa_client()
326
+ response = await client.answer(
324
327
  query=query,
325
328
  text=text,
326
329
  system_prompt=system_prompt,
@@ -356,7 +359,8 @@ class ExaApp(APIApplication):
356
359
  research, task, async, create
357
360
  """
358
361
  logger.info(f"Exa create_research_task: {instructions}")
359
- response = await self.exa_client.research.create(
362
+ client = await self.get_exa_client()
363
+ response = await client.research.create(
360
364
  instructions=instructions,
361
365
  output_schema=output_schema,
362
366
  model=model,
@@ -378,7 +382,8 @@ class ExaApp(APIApplication):
378
382
  research, status, task, check
379
383
  """
380
384
  logger.info(f"Exa get_research_task: {task_id}")
381
- response = await self.exa_client.research.get(research_id=task_id, events=events)
385
+ client = await self.get_exa_client()
386
+ response = await client.research.get(research_id=task_id, events=events)
382
387
  return self._to_serializable(response)
383
388
 
384
389
  async def poll_research_task(
@@ -405,7 +410,8 @@ class ExaApp(APIApplication):
405
410
  research, poll, wait, task, terminal
406
411
  """
407
412
  logger.info(f"Exa poll_research_task: {task_id}")
408
- response = await self.exa_client.research.poll_until_finished(
413
+ client = await self.get_exa_client()
414
+ response = await client.research.poll_until_finished(
409
415
  research_id=task_id,
410
416
  poll_interval=poll_interval_ms,
411
417
  timeout_ms=timeout_ms,
@@ -432,7 +438,8 @@ class ExaApp(APIApplication):
432
438
  research, list, tasks, history
433
439
  """
434
440
  logger.info(f"Exa list_research_tasks (limit: {limit})")
435
- response = await self.exa_client.research.list(cursor=cursor, limit=limit)
441
+ client = await self.get_exa_client()
442
+ response = await client.research.list(cursor=cursor, limit=limit)
436
443
  return self._to_serializable(response)
437
444
 
438
445
  def list_tools(self):
@@ -9,10 +9,8 @@ This is automatically generated from OpenAPI schema for the FalaiApp API.
9
9
 
10
10
  | Tool | Description |
11
11
  |------|-------------|
12
- | `run` | Executes a Fal AI application synchronously, waiting for completion and returning the result directly. This method is suited for short-running tasks, unlike `submit` which queues a job for asynchronous processing and returns a request ID instead of the final output. |
13
- | `submit` | Submits a job to the Fal AI queue for asynchronous processing, immediately returning a request ID. This contrasts with the `run` method, which waits for completion. The returned ID is used by `check_status`, `get_result`, and `cancel` to manage the job's lifecycle. |
14
- | `check_status` | Checks the execution state (e.g., Queued, InProgress) of an asynchronous Fal AI job using its request ID. It provides a non-blocking way to monitor jobs initiated via `submit` without fetching the final `result`, and can optionally include logs. |
15
- | `get_result` | Retrieves the final result of an asynchronous job, identified by its `request_id`. This function waits for the job, initiated via `submit`, to complete. Unlike the non-blocking `check_status`, this method blocks execution to fetch and return the job's actual output upon completion. |
16
- | `cancel` | Asynchronously cancels a running or queued Fal AI job using its `request_id`. This function complements the `submit` method, providing a way to terminate asynchronous tasks before completion. It raises a `ToolError` if the cancellation request fails. |
17
- | `upload_file` | Asynchronously uploads a local file to the Fal Content Delivery Network (CDN), returning a public URL. This URL makes the file accessible for use as input in other Fal AI job execution methods like `run` or `submit`. A `ToolError` is raised if the upload fails. |
18
- | `run_image_generation` | A specialized wrapper for the `run` method that synchronously generates images using the 'fal-ai/flux/dev' model. It simplifies image creation with common parameters like `prompt` and `seed`, waits for the task to complete, and directly returns the result containing image URLs and metadata. |
12
+ | `generate_image` | Generates an image from a text prompt using specified Fal AI models. |
13
+ | `submit_video_generation` | Submits a video generation task using Fal AI models and returns a request ID. |
14
+ | `get_generation_status` | Checks the status of a video generation task. |
15
+ | `get_generation_result` | Retrieves the result of a completed video generation task. |
16
+ | `transcribe_audio` | Converts speech to text from an audio file URL using Fal AI models. |
@@ -1,4 +1,3 @@
1
- from pathlib import Path
2
1
  from typing import Any, Literal
3
2
  from fal_client import AsyncClient, AsyncRequestHandle, Status
4
3
  from loguru import logger
@@ -6,9 +5,6 @@ from universal_mcp.applications.application import APIApplication
6
5
  from universal_mcp.exceptions import NotAuthorizedError, ToolError
7
6
  from universal_mcp.integrations import Integration
8
7
 
9
- Priority = Literal["normal", "low"]
10
-
11
-
12
8
  class FalaiApp(APIApplication):
13
9
  """
14
10
  Application for interacting with the Fal AI platform.
@@ -24,13 +20,12 @@ class FalaiApp(APIApplication):
24
20
  super().__init__(name="falai", integration=integration, **kwargs)
25
21
  self._fal_client = None
26
22
 
27
- @property
28
- def fal_client(self) -> AsyncClient:
23
+ async def get_fal_client(self) -> AsyncClient:
29
24
  """
30
25
  A cached property that lazily initializes an `AsyncClient` instance. It retrieves the API key from the configured integration, providing a single, centralized authentication point for all methods that interact with the Fal AI API. Raises `NotAuthorizedError` if credentials are not found.
31
26
  """
32
27
  if self._fal_client is None:
33
- credentials = self.integration.get_credentials()
28
+ credentials = await self.integration.get_credentials_async()
34
29
  logger.info(f"Credentials: {credentials}")
35
30
  api_key = credentials.get("api_key") or credentials.get("API_KEY") or credentials.get("apiKey")
36
31
  if not api_key:
@@ -39,219 +34,225 @@ class FalaiApp(APIApplication):
39
34
  self._fal_client = AsyncClient(key=api_key)
40
35
  return self._fal_client
41
36
 
42
- async def run(
43
- self, arguments: Any, application: str = "fal-ai/flux/dev", path: str = "", timeout: float | None = None, hint: str | None = None
37
+ async def generate_image(
38
+ self,
39
+ prompt: str,
40
+ model: Literal[
41
+ "fal-ai/flux/dev", "fal-ai/recraft-v3", "fal-ai/stable-diffusion-v35-large"
42
+ ] = "fal-ai/flux/dev",
43
+ image_size: Literal[
44
+ "square_hd", "square", "portrait_4_3", "portrait_16_9", "landscape_4_3", "landscape_16_9"
45
+ ]
46
+ | None = "landscape_4_3",
47
+ num_images: int | None = 1,
48
+ seed: int | None = None,
49
+ safety_tolerance: str | None = None,
50
+ extra_arguments: dict[str, Any] | None = None,
44
51
  ) -> Any:
45
52
  """
46
- Executes a Fal AI application synchronously, waiting for completion and returning the result directly. This method is suited for short-running tasks, unlike `submit` which queues a job for asynchronous processing and returns a request ID instead of the final output.
53
+ Generates an image from a text prompt using specified Fal AI models.
54
+ This tool supports state-of-the-art models like Flux, Recraft V3, and Stable Diffusion 3.5.
47
55
 
48
56
  Args:
49
- arguments: A dictionary of arguments for the application
50
- application: The name or ID of the Fal application (defaults to 'fal-ai/flux/dev')
51
- path: Optional subpath for the application endpoint
52
- timeout: Optional timeout in seconds for the request
53
- hint: Optional hint for runner selection
57
+ prompt: The text description of the image to generate.
58
+ model: The model to use for generation. Options:
59
+ - 'fal-ai/flux/dev': High-quality, 12B param flow transformer.
60
+ - 'fal-ai/recraft-v3': SOTA model, great for text, vector art, and brand styles.
61
+ - 'fal-ai/stable-diffusion-v35-large': MMDiT model, excellent typography and complex prompts.
62
+ Defaults to 'fal-ai/flux/dev'.
63
+ image_size: The size/aspect ratio of the generated image. Common values: 'landscape_4_3', 'square_hd'.
64
+ num_images: Number of images to generate (default: 1).
65
+ seed: Optional random seed for reproducibility.
66
+ safety_tolerance: Optional safety filter level (if supported by model).
67
+ extra_arguments: Additional model-specific parameters to pass in the request.
54
68
 
55
69
  Returns:
56
- The result of the application execution as a Python object (converted from JSON response)
57
-
58
- Raises:
59
- ToolError: Raised when the Fal API request fails, wrapping the original exception
70
+ A dictionary containing the generated image URLs and metadata.
60
71
 
61
72
  Tags:
62
- run, execute, ai, synchronous, fal, important
73
+ generate, image, text-to-image, ai, flux, recraft, stable-diffusion, important
63
74
  """
75
+ arguments = {"prompt": prompt}
76
+
77
+ # Common arguments that most models support
78
+ if image_size:
79
+ arguments["image_size"] = image_size
80
+ if num_images:
81
+ arguments["num_images"] = num_images
82
+ if seed is not None:
83
+ arguments["seed"] = seed
84
+ if safety_tolerance:
85
+ arguments["safety_tolerance"] = safety_tolerance
86
+
87
+ if extra_arguments:
88
+ arguments.update(extra_arguments)
89
+ logger.debug(f"Merged extra_arguments. Final arguments: {arguments}")
90
+
64
91
  try:
65
- result = await self.fal_client.run(application=application, arguments=arguments, path=path, timeout=timeout, hint=hint)
92
+ client = await self.get_fal_client()
93
+ # The run method is equivalent to subscribe() in the JS SDK - it submits and waits for the result.
94
+ result = await client.run(application=model, arguments=arguments)
66
95
  return result
67
96
  except Exception as e:
68
- logger.error(f"Error running Fal application {application}: {e}", exc_info=True)
69
- raise ToolError(f"Failed to run Fal application {application}: {e}") from e
97
+ logger.error(f"Error generating image with model {model}: {e}", exc_info=True)
98
+ raise ToolError(f"Failed to generate image with {model}: {e}") from e
70
99
 
71
- async def submit(
100
+ async def submit_video_generation(
72
101
  self,
73
- arguments: Any,
74
- application: str = "fal-ai/flux/dev",
75
- path: str = "",
76
- hint: str | None = None,
77
- webhook_url: str | None = None,
78
- priority: Priority | None = None,
102
+ image_url: str,
103
+ prompt: str = "",
104
+ model: Literal[
105
+ "fal-ai/minimax-video/image-to-video",
106
+ "fal-ai/luma-dream-machine/image-to-video",
107
+ "fal-ai/kling-video/v1/standard/image-to-video",
108
+ ] = "fal-ai/minimax-video/image-to-video",
109
+ duration: Literal["5", "10"] | None = None,
110
+ aspect_ratio: Literal["16:9", "9:16", "1:1"] | None = None,
111
+ extra_arguments: dict[str, Any] | None = None,
79
112
  ) -> str:
80
113
  """
81
- Submits a job to the Fal AI queue for asynchronous processing, immediately returning a request ID. This contrasts with the `run` method, which waits for completion. The returned ID is used by `check_status`, `get_result`, and `cancel` to manage the job's lifecycle.
82
-
83
- Args:
84
- arguments: A dictionary of arguments for the application
85
- application: The name or ID of the Fal application, defaulting to 'fal-ai/flux/dev'
86
- path: Optional subpath for the application endpoint
87
- hint: Optional hint for runner selection
88
- webhook_url: Optional URL to receive a webhook when the request completes
89
- priority: Optional queue priority ('normal' or 'low')
90
-
91
- Returns:
92
- The request ID (str) of the submitted asynchronous job
93
-
94
- Raises:
95
- ToolError: Raised when the Fal API request fails, wrapping the original exception
96
-
97
- Tags:
98
- submit, async_job, start, ai, queue
99
- """
100
- try:
101
- handle: AsyncRequestHandle = await self.fal_client.submit(
102
- application=application, arguments=arguments, path=path, hint=hint, webhook_url=webhook_url, priority=priority
103
- )
104
- request_id = handle.request_id
105
- return request_id
106
- except Exception as e:
107
- logger.error(f"Error submitting Fal application {application}: {e}", exc_info=True)
108
- raise ToolError(f"Failed to submit Fal application {application}: {e}") from e
109
-
110
- async def check_status(self, request_id: str, application: str = "fal-ai/flux/dev", with_logs: bool = False) -> Status:
111
- """
112
- Checks the execution state (e.g., Queued, InProgress) of an asynchronous Fal AI job using its request ID. It provides a non-blocking way to monitor jobs initiated via `submit` without fetching the final `result`, and can optionally include logs.
113
-
114
- Args:
115
- request_id: The unique identifier of the submitted request, obtained from a previous submit operation
116
- application: The name or ID of the Fal application (defaults to 'fal-ai/flux/dev')
117
- with_logs: Boolean flag to include execution logs in the status response (defaults to False)
118
-
119
- Returns:
120
- A Status object containing the current state of the request (Queued, InProgress, or Completed)
121
-
122
- Raises:
123
- ToolError: Raised when the Fal API request fails or when the provided request ID is invalid
124
-
125
- Tags:
126
- status, check, async_job, monitoring, ai
127
- """
128
- try:
129
- handle = self.fal_client.get_handle(application=application, request_id=request_id)
130
- status = await handle.status(with_logs=with_logs)
131
- return status
132
- except Exception as e:
133
- logger.error(f"Error getting status for Fal request_id {request_id}: {e}", exc_info=True)
134
- raise ToolError(f"Failed to get status for Fal request_id {request_id}: {e}") from e
135
-
136
- async def get_result(self, request_id: str, application: str = "fal-ai/flux/dev") -> Any:
137
- """
138
- Retrieves the final result of an asynchronous job, identified by its `request_id`. This function waits for the job, initiated via `submit`, to complete. Unlike the non-blocking `check_status`, this method blocks execution to fetch and return the job's actual output upon completion.
114
+ Submits a video generation task using Fal AI models and returns a request ID.
115
+ This is an asynchronous operation. Use `get_generation_status` and `get_generation_result` with the returned ID.
139
116
 
140
117
  Args:
141
- request_id: The unique identifier of the submitted request
142
- application: The name or ID of the Fal application (defaults to 'fal-ai/flux/dev')
118
+ image_url: URL of the input image.
119
+ prompt: Text prompt to guide the video generation.
120
+ model: The video generation model to use.
121
+ duration: Duration of the video in seconds (supported by some models like Kling).
122
+ aspect_ratio: Aspect ratio of the generated video (supported by some models like Kling).
123
+ extra_arguments: Additional model-specific parameters.
143
124
 
144
125
  Returns:
145
- The result of the application execution, converted from JSON response to Python data structures (dict/list)
146
-
147
- Raises:
148
- ToolError: When the Fal API request fails or the request does not complete successfully
126
+ The request ID (str) for the submitted task.
149
127
 
150
128
  Tags:
151
- result, async-job, status, wait, ai
129
+ submit, video, async, ai, minimax, luma, kling, important
152
130
  """
131
+ arguments = {"image_url": image_url}
132
+ if prompt:
133
+ arguments["prompt"] = prompt
134
+
135
+ if duration:
136
+ arguments["duration"] = duration
137
+ if aspect_ratio:
138
+ arguments["aspect_ratio"] = aspect_ratio
139
+
140
+ if extra_arguments:
141
+ arguments.update(extra_arguments)
142
+ logger.debug(f"Merged extra_arguments for video generation. Final arguments: {arguments}")
143
+
153
144
  try:
154
- handle = self.fal_client.get_handle(application=application, request_id=request_id)
155
- result = await handle.get()
156
- return result
145
+ client = await self.get_fal_client()
146
+ handle = await client.submit(application=model, arguments=arguments)
147
+ return handle.request_id
157
148
  except Exception as e:
158
- logger.error(f"Error getting result for Fal request_id {request_id}: {e}", exc_info=True)
159
- raise ToolError(f"Failed to get result for Fal request_id {request_id}: {e}") from e
149
+ logger.error(f"Error submitting video generation with model {model}: {e}", exc_info=True)
150
+ raise ToolError(f"Failed to submit video generation with {model}: {e}") from e
160
151
 
161
- async def cancel(self, request_id: str, application: str = "fal-ai/flux/dev") -> None:
152
+ async def get_generation_status(
153
+ self,
154
+ request_id: str,
155
+ model: Literal[
156
+ "fal-ai/minimax-video/image-to-video",
157
+ "fal-ai/luma-dream-machine/image-to-video",
158
+ "fal-ai/kling-video/v1/standard/image-to-video",
159
+ ] = "fal-ai/minimax-video/image-to-video",
160
+ with_logs: bool = False,
161
+ ) -> Status:
162
162
  """
163
- Asynchronously cancels a running or queued Fal AI job using its `request_id`. This function complements the `submit` method, providing a way to terminate asynchronous tasks before completion. It raises a `ToolError` if the cancellation request fails.
163
+ Checks the status of a video generation task.
164
164
 
165
165
  Args:
166
- request_id: The unique identifier of the submitted Fal AI request to cancel
167
- application: The name or ID of the Fal application (defaults to 'fal-ai/flux/dev')
166
+ request_id: The ID of the request to check.
167
+ model: The model used for the request (must match the submission).
168
+ with_logs: Whether to include logs in the status.
168
169
 
169
170
  Returns:
170
- None. The function doesn't return any value.
171
-
172
- Raises:
173
- ToolError: Raised when the cancellation request fails due to API errors or if the request cannot be cancelled
174
-
171
+ A Status object (Queued, InProgress, Completed, or Failed).
172
+
175
173
  Tags:
176
- cancel, async_job, ai, fal, management
174
+ check, status, video, async, important
177
175
  """
178
176
  try:
179
- handle = self.fal_client.get_handle(application=application, request_id=request_id)
180
- await handle.cancel()
181
- return None
177
+ client = await self.get_fal_client()
178
+ handle = client.get_handle(application=model, request_id=request_id)
179
+ return await handle.status(with_logs=with_logs)
182
180
  except Exception as e:
183
- logger.error(f"Error cancelling Fal request_id {request_id}: {e}", exc_info=True)
184
- raise ToolError(f"Failed to cancel Fal request_id {request_id}: {e}") from e
181
+ logger.error(f"Error getting status for request {request_id}: {e}", exc_info=True)
182
+ raise ToolError(f"Failed to get status for {request_id}: {e}") from e
185
183
 
186
- async def upload_file(self, path: str) -> str:
184
+ async def get_generation_result(
185
+ self,
186
+ request_id: str,
187
+ model: Literal[
188
+ "fal-ai/minimax-video/image-to-video",
189
+ "fal-ai/luma-dream-machine/image-to-video",
190
+ "fal-ai/kling-video/v1/standard/image-to-video",
191
+ ] = "fal-ai/minimax-video/image-to-video",
192
+ ) -> Any:
187
193
  """
188
- Asynchronously uploads a local file to the Fal Content Delivery Network (CDN), returning a public URL. This URL makes the file accessible for use as input in other Fal AI job execution methods like `run` or `submit`. A `ToolError` is raised if the upload fails.
194
+ Retrieves the result of a completed video generation task.
195
+ This method will block until the task is complete if it is not already.
189
196
 
190
197
  Args:
191
- path: The absolute or relative path to the local file
198
+ request_id: The ID of the request.
199
+ model: The model used for the request.
192
200
 
193
201
  Returns:
194
- A string containing the public URL of the uploaded file on the CDN
195
-
196
- Raises:
197
- ToolError: If the file is not found or if the upload operation fails
202
+ The final result of the generation (video URL and metadata).
198
203
 
199
204
  Tags:
200
- upload, file, cdn, storage, async, important
205
+ result, get, video, async, important
201
206
  """
202
207
  try:
203
- file_url = await self.fal_client.upload_file(Path(path))
204
- return file_url
205
- except FileNotFoundError as e:
206
- logger.error(f"File not found for upload: {path}", exc_info=True)
207
- raise ToolError(f"File not found: {path}") from e
208
+ client = await self.get_fal_client()
209
+ handle = client.get_handle(application=model, request_id=request_id)
210
+ return await handle.get()
208
211
  except Exception as e:
209
- logger.error(f"Error uploading file {path} to Fal CDN: {e}", exc_info=True)
210
- raise ToolError(f"Failed to upload file {path}: {e}") from e
212
+ logger.error(f"Error getting result for request {request_id}: {e}", exc_info=True)
213
+ raise ToolError(f"Failed to get result for {request_id}: {e}") from e
211
214
 
212
- async def run_image_generation(
215
+ async def transcribe_audio(
213
216
  self,
214
- prompt: str,
215
- seed: int | None = 6252023,
216
- image_size: str | None = "landscape_4_3",
217
- num_images: int | None = 1,
217
+ audio_url: str,
218
+ model: Literal["fal-ai/whisper"] = "fal-ai/whisper",
218
219
  extra_arguments: dict[str, Any] | None = None,
219
- path: str = "",
220
- timeout: float | None = None,
221
- hint: str | None = None,
222
220
  ) -> Any:
223
221
  """
224
- A specialized wrapper for the `run` method that synchronously generates images using the 'fal-ai/flux/dev' model. It simplifies image creation with common parameters like `prompt` and `seed`, waits for the task to complete, and directly returns the result containing image URLs and metadata.
222
+ Converts speech to text from an audio file URL using Fal AI models.
225
223
 
226
224
  Args:
227
- prompt: The text prompt used to guide the image generation
228
- seed: Random seed for reproducible image generation (default: 6252023)
229
- image_size: Dimensions of the generated image (default: 'landscape_4_3')
230
- num_images: Number of images to generate in one request (default: 1)
231
- extra_arguments: Additional arguments dictionary to pass to the application, can override defaults
232
- path: Subpath for the application endpoint (rarely used)
233
- timeout: Maximum time in seconds to wait for the request to complete
234
- hint: Hint string for runner selection
225
+ audio_url: URL of the audio file to transcribe.
226
+ model: The speech-to-text model to use. Options:
227
+ - 'fal-ai/whisper': Standard Whisper model.
228
+ Defaults to 'fal-ai/whisper'.
229
+ extra_arguments: Additional model-specific parameters.
235
230
 
236
231
  Returns:
237
- A dictionary containing the generated image URLs and related metadata
238
-
239
- Raises:
240
- ToolError: When the image generation request fails or encounters an error
232
+ A dictionary containing the transcription text and metadata.
241
233
 
242
234
  Tags:
243
- generate, image, ai, async, important, flux, customizable, default
235
+ transcribe, audio, speech-to-text, ai, whisper
244
236
  """
245
- application = "fal-ai/flux/dev"
246
- arguments = {"prompt": prompt, "seed": seed, "image_size": image_size, "num_images": num_images}
237
+ arguments = {"audio_url": audio_url}
238
+
247
239
  if extra_arguments:
248
240
  arguments.update(extra_arguments)
249
- logger.debug(f"Merged extra_arguments. Final arguments: {arguments}")
241
+ logger.debug(f"Merged extra_arguments for transcription. Final arguments: {arguments}")
242
+
250
243
  try:
251
- result = await self.run(application=application, arguments=arguments, path=path, timeout=timeout, hint=hint)
244
+ client = await self.get_fal_client()
245
+ result = await client.run(application=model, arguments=arguments)
252
246
  return result
253
- except Exception:
254
- raise
247
+ except Exception as e:
248
+ logger.error(f"Error transcribing audio with model {model}: {e}", exc_info=True)
249
+ raise ToolError(f"Failed to transcribe audio with {model}: {e}") from e
255
250
 
256
251
  def list_tools(self):
257
- return [self.run, self.submit, self.check_status, self.get_result, self.cancel, self.upload_file, self.run_image_generation]
252
+ return [
253
+ self.generate_image,
254
+ self.submit_video_generation,
255
+ self.get_generation_status,
256
+ self.get_generation_result,
257
+ self.transcribe_audio
258
+ ]