universal-mcp-applications 0.1.19__py3-none-any.whl → 0.1.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of universal-mcp-applications might be problematic. Click here for more details.
- universal_mcp/applications/fireflies/app.py +109 -1
- universal_mcp/applications/google_drive/README.md +1 -1
- universal_mcp/applications/google_drive/app.py +22 -10
- universal_mcp/applications/google_sheet/app.py +130 -130
- universal_mcp/applications/yahoo_finance/README.md +17 -0
- universal_mcp/applications/yahoo_finance/__init__.py +1 -0
- universal_mcp/applications/yahoo_finance/app.py +258 -0
- {universal_mcp_applications-0.1.19.dist-info → universal_mcp_applications-0.1.20.dist-info}/METADATA +2 -1
- {universal_mcp_applications-0.1.19.dist-info → universal_mcp_applications-0.1.20.dist-info}/RECORD +11 -11
- universal_mcp/applications/replicate/README.md +0 -18
- universal_mcp/applications/replicate/__init__.py +0 -1
- universal_mcp/applications/replicate/app.py +0 -493
- {universal_mcp_applications-0.1.19.dist-info → universal_mcp_applications-0.1.20.dist-info}/WHEEL +0 -0
- {universal_mcp_applications-0.1.19.dist-info → universal_mcp_applications-0.1.20.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,493 +0,0 @@
|
|
|
1
|
-
import collections.abc
|
|
2
|
-
from pathlib import Path
|
|
3
|
-
from typing import Any
|
|
4
|
-
|
|
5
|
-
from loguru import logger
|
|
6
|
-
|
|
7
|
-
import replicate
|
|
8
|
-
from replicate.exceptions import ModelError as ReplicateModelError
|
|
9
|
-
from replicate.exceptions import ReplicateError as ReplicateAPIError
|
|
10
|
-
from replicate.prediction import Prediction
|
|
11
|
-
from universal_mcp.applications.application import APIApplication
|
|
12
|
-
from universal_mcp.exceptions import NotAuthorizedError, ToolError
|
|
13
|
-
from universal_mcp.integrations import Integration
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class ReplicateApp(APIApplication):
|
|
17
|
-
"""
|
|
18
|
-
Application for interacting with the Replicate API.
|
|
19
|
-
|
|
20
|
-
Provides tools to run models, manage predictions (submit, get status, retrieve results, cancel),
|
|
21
|
-
upload files, and a specialized tool for generating images.
|
|
22
|
-
|
|
23
|
-
Authentication is handled by the configured Integration provided by the
|
|
24
|
-
Universal MCP server, fetching the necessary Replicate API token.
|
|
25
|
-
"""
|
|
26
|
-
|
|
27
|
-
def __init__(self, integration: Integration, **kwargs) -> None:
|
|
28
|
-
super().__init__(name="replicate", integration=integration, **kwargs)
|
|
29
|
-
self._replicate_client: replicate.Client | None = None
|
|
30
|
-
|
|
31
|
-
@property
|
|
32
|
-
def replicate_client(self) -> replicate.Client:
|
|
33
|
-
"""
|
|
34
|
-
Lazily initializes and returns an authenticated `replicate.Client` instance. On first access, it fetches the API key from the configured integration and caches the client, ensuring a single instance is used for all subsequent API calls within the application. Raises `NotAuthorizedError` on credential failure.
|
|
35
|
-
"""
|
|
36
|
-
if self._replicate_client is None:
|
|
37
|
-
credentials = self.integration.get_credentials()
|
|
38
|
-
logger.info(
|
|
39
|
-
f"ReplicateApp: Credentials from integration: {credentials}"
|
|
40
|
-
) # Be careful logging credentials
|
|
41
|
-
api_key = (
|
|
42
|
-
credentials.get("api_key")
|
|
43
|
-
or credentials.get("API_KEY")
|
|
44
|
-
or credentials.get("apiKey")
|
|
45
|
-
)
|
|
46
|
-
if not api_key:
|
|
47
|
-
logger.error(
|
|
48
|
-
f"Integration {type(self.integration).__name__} returned credentials for Replicate in unexpected format or key is missing."
|
|
49
|
-
)
|
|
50
|
-
raise NotAuthorizedError(
|
|
51
|
-
"Integration returned empty or invalid API key/token for Replicate."
|
|
52
|
-
)
|
|
53
|
-
self._replicate_client = replicate.Client(api_token=api_key)
|
|
54
|
-
return self._replicate_client
|
|
55
|
-
|
|
56
|
-
async def run(
|
|
57
|
-
self,
|
|
58
|
-
model_ref: str,
|
|
59
|
-
inputs: dict[str, Any],
|
|
60
|
-
use_file_output: bool | None = True,
|
|
61
|
-
) -> Any:
|
|
62
|
-
"""
|
|
63
|
-
Executes a Replicate model synchronously, blocking until it completes and returns the final output. This abstracts the polling logic required by asynchronous jobs. If a model streams results, this function conveniently collects all items into a list before returning, unlike the non-blocking `submit_prediction` method.
|
|
64
|
-
|
|
65
|
-
Args:
|
|
66
|
-
model_ref: The model identifier string (e.g., "owner/name" or "owner/name:version_id").
|
|
67
|
-
inputs: A dictionary of inputs for the model.
|
|
68
|
-
use_file_output: If True (default), file URLs in output are wrapped in FileOutput objects.
|
|
69
|
-
|
|
70
|
-
Returns:
|
|
71
|
-
The model's output. If the model streams output, a list of all streamed items is returned.
|
|
72
|
-
|
|
73
|
-
Raises:
|
|
74
|
-
ToolError: If the Replicate API request fails or the model encounters an error.
|
|
75
|
-
|
|
76
|
-
Tags:
|
|
77
|
-
run, execute, ai, synchronous, replicate, important
|
|
78
|
-
"""
|
|
79
|
-
try:
|
|
80
|
-
logger.info(
|
|
81
|
-
f"Running Replicate model {model_ref} with inputs: {list(inputs.keys())}"
|
|
82
|
-
)
|
|
83
|
-
# Use async_run which handles polling and waiting
|
|
84
|
-
result = await self.replicate_client.async_run(
|
|
85
|
-
ref=model_ref,
|
|
86
|
-
input=inputs,
|
|
87
|
-
use_file_output=use_file_output,
|
|
88
|
-
)
|
|
89
|
-
if isinstance(result, collections.abc.AsyncIterator):
|
|
90
|
-
logger.info(
|
|
91
|
-
f"Model {model_ref} returned an async iterator, collecting results."
|
|
92
|
-
)
|
|
93
|
-
collected_result = [item async for item in result]
|
|
94
|
-
logger.info(
|
|
95
|
-
f"Collected {len(collected_result)} items from iterator for {model_ref}."
|
|
96
|
-
)
|
|
97
|
-
return collected_result
|
|
98
|
-
logger.info(f"Model {model_ref} run completed successfully.")
|
|
99
|
-
return result
|
|
100
|
-
except ReplicateModelError as e:
|
|
101
|
-
logger.error(
|
|
102
|
-
f"Model error running Replicate model {model_ref}: {e.prediction.error}",
|
|
103
|
-
exc_info=True,
|
|
104
|
-
)
|
|
105
|
-
raise ToolError(
|
|
106
|
-
f"Model {model_ref} failed with error: {e.prediction.error}"
|
|
107
|
-
) from e
|
|
108
|
-
except ReplicateAPIError as e:
|
|
109
|
-
logger.error(
|
|
110
|
-
f"API error running Replicate model {model_ref}: {e.detail}",
|
|
111
|
-
exc_info=True,
|
|
112
|
-
)
|
|
113
|
-
raise ToolError(
|
|
114
|
-
f"Failed to run Replicate model {model_ref}: {e.detail}"
|
|
115
|
-
) from e
|
|
116
|
-
except Exception as e:
|
|
117
|
-
logger.error(
|
|
118
|
-
f"Unexpected error running Replicate model {model_ref}: {e}",
|
|
119
|
-
exc_info=True,
|
|
120
|
-
)
|
|
121
|
-
raise ToolError(
|
|
122
|
-
f"An unexpected error occurred while running model {model_ref}: {e}"
|
|
123
|
-
) from e
|
|
124
|
-
|
|
125
|
-
async def submit_prediction(
|
|
126
|
-
self,
|
|
127
|
-
model_ref: str,
|
|
128
|
-
inputs: dict[str, Any],
|
|
129
|
-
webhook: str | None = None,
|
|
130
|
-
webhook_events_filter: list[str] | None = None,
|
|
131
|
-
) -> str:
|
|
132
|
-
"""
|
|
133
|
-
Submits a model execution request to Replicate for non-blocking, asynchronous processing. It immediately returns a prediction ID for tracking, unlike the synchronous `run` method which waits for the final result. The returned ID can be used with `get_prediction` or `fetch_prediction_output` to monitor the job.
|
|
134
|
-
|
|
135
|
-
Args:
|
|
136
|
-
model_ref: The model identifier string (e.g., "owner/name" or "owner/name:version_id").
|
|
137
|
-
inputs: A dictionary of inputs for the model.
|
|
138
|
-
webhook: URL to receive a POST request with prediction updates.
|
|
139
|
-
webhook_events_filter: List of events to trigger webhooks (e.g., ["start", "output", "logs", "completed"]).
|
|
140
|
-
|
|
141
|
-
Returns:
|
|
142
|
-
The ID (str) of the created prediction.
|
|
143
|
-
|
|
144
|
-
Raises:
|
|
145
|
-
ToolError: If the Replicate API request fails.
|
|
146
|
-
|
|
147
|
-
Tags:
|
|
148
|
-
submit, async_job, start, ai, queue, replicate
|
|
149
|
-
"""
|
|
150
|
-
try:
|
|
151
|
-
logger.info(
|
|
152
|
-
f"Submitting prediction for Replicate model {model_ref} with inputs: {list(inputs.keys())}"
|
|
153
|
-
)
|
|
154
|
-
# Ensure wait is False for async submission behavior
|
|
155
|
-
# The version parameter in predictions.create can be a model_ref string
|
|
156
|
-
prediction_params = {}
|
|
157
|
-
if webhook:
|
|
158
|
-
prediction_params["webhook"] = webhook
|
|
159
|
-
if webhook_events_filter:
|
|
160
|
-
prediction_params["webhook_events_filter"] = webhook_events_filter
|
|
161
|
-
|
|
162
|
-
prediction = await self.replicate_client.predictions.async_create(
|
|
163
|
-
version=model_ref, # 'version' here means the model/version ref string
|
|
164
|
-
input=inputs,
|
|
165
|
-
wait=False, # Explicitly set wait to False for non-blocking submission
|
|
166
|
-
**prediction_params,
|
|
167
|
-
)
|
|
168
|
-
logger.info(f"Submitted prediction for {model_ref}, ID: {prediction.id}")
|
|
169
|
-
return prediction.id
|
|
170
|
-
except ReplicateAPIError as e:
|
|
171
|
-
logger.error(
|
|
172
|
-
f"API error submitting prediction for Replicate model {model_ref}: {e.detail}",
|
|
173
|
-
exc_info=True,
|
|
174
|
-
)
|
|
175
|
-
raise ToolError(
|
|
176
|
-
f"Failed to submit prediction for Replicate model {model_ref}: {e.detail}"
|
|
177
|
-
) from e
|
|
178
|
-
except Exception as e:
|
|
179
|
-
logger.error(
|
|
180
|
-
f"Unexpected error submitting prediction for Replicate model {model_ref}: {e}",
|
|
181
|
-
exc_info=True,
|
|
182
|
-
)
|
|
183
|
-
raise ToolError(
|
|
184
|
-
f"An unexpected error occurred while submitting prediction for {model_ref}: {e}"
|
|
185
|
-
) from e
|
|
186
|
-
|
|
187
|
-
async def get_prediction(self, prediction_id: str) -> Prediction:
|
|
188
|
-
"""
|
|
189
|
-
Retrieves the full details and current state of a Replicate prediction by its ID. This function performs a non-blocking status check, returning the prediction object immediately. Unlike `fetch_prediction_output`, it does not wait for the job to complete and is used for monitoring progress.
|
|
190
|
-
|
|
191
|
-
Args:
|
|
192
|
-
prediction_id: The unique ID of the prediction.
|
|
193
|
-
|
|
194
|
-
Returns:
|
|
195
|
-
A Replicate Prediction object containing status, logs, output (if ready), etc.
|
|
196
|
-
|
|
197
|
-
Raises:
|
|
198
|
-
ToolError: If the Replicate API request fails.
|
|
199
|
-
|
|
200
|
-
Tags:
|
|
201
|
-
status, check, async_job, monitoring, ai, replicate
|
|
202
|
-
"""
|
|
203
|
-
try:
|
|
204
|
-
logger.info(f"Getting status for Replicate prediction ID: {prediction_id}")
|
|
205
|
-
prediction = await self.replicate_client.predictions.async_get(
|
|
206
|
-
id=prediction_id
|
|
207
|
-
)
|
|
208
|
-
logger.info(f"Status for prediction {prediction_id}: {prediction.status}")
|
|
209
|
-
return prediction
|
|
210
|
-
except ReplicateAPIError as e:
|
|
211
|
-
logger.error(
|
|
212
|
-
f"API error getting status for Replicate prediction {prediction_id}: {e.detail}",
|
|
213
|
-
exc_info=True,
|
|
214
|
-
)
|
|
215
|
-
raise ToolError(
|
|
216
|
-
f"Failed to get status for Replicate prediction {prediction_id}: {e.detail}"
|
|
217
|
-
) from e
|
|
218
|
-
except Exception as e:
|
|
219
|
-
logger.error(
|
|
220
|
-
f"Unexpected error getting status for Replicate prediction {prediction_id}: {e}",
|
|
221
|
-
exc_info=True,
|
|
222
|
-
)
|
|
223
|
-
raise ToolError(
|
|
224
|
-
f"An unexpected error occurred while getting status for prediction {prediction_id}: {e}"
|
|
225
|
-
) from e
|
|
226
|
-
|
|
227
|
-
async def await_prediction_result(self, prediction_id: str) -> Any:
|
|
228
|
-
"""
|
|
229
|
-
Retrieves the final output for a given prediction ID, waiting for the job to complete if it is still running. This function complements `submit_prediction` by blocking until the asynchronous task finishes, raising an error if the prediction fails or is canceled.
|
|
230
|
-
|
|
231
|
-
Args:
|
|
232
|
-
prediction_id: The unique ID of the prediction.
|
|
233
|
-
|
|
234
|
-
Returns:
|
|
235
|
-
The output of the prediction. If the model streams output, a list of all streamed items is returned.
|
|
236
|
-
|
|
237
|
-
Raises:
|
|
238
|
-
ToolError: If the prediction fails, is canceled, or an API error occurs.
|
|
239
|
-
|
|
240
|
-
Tags:
|
|
241
|
-
result, fetch_output, async_job, wait, ai, replicate
|
|
242
|
-
"""
|
|
243
|
-
try:
|
|
244
|
-
logger.info(f"Fetching output for Replicate prediction ID: {prediction_id}")
|
|
245
|
-
prediction = await self.replicate_client.predictions.async_get(
|
|
246
|
-
id=prediction_id
|
|
247
|
-
)
|
|
248
|
-
|
|
249
|
-
if prediction.status not in ["succeeded", "failed", "canceled"]:
|
|
250
|
-
logger.info(
|
|
251
|
-
f"Prediction {prediction_id} status is {prediction.status}. Waiting for completion..."
|
|
252
|
-
)
|
|
253
|
-
await (
|
|
254
|
-
prediction.async_wait()
|
|
255
|
-
) # This updates the prediction object in-place
|
|
256
|
-
logger.info(
|
|
257
|
-
f"Prediction {prediction_id} finished with status: {prediction.status}"
|
|
258
|
-
)
|
|
259
|
-
|
|
260
|
-
if prediction.status == "failed":
|
|
261
|
-
logger.error(f"Prediction {prediction_id} failed: {prediction.error}")
|
|
262
|
-
raise ToolError(
|
|
263
|
-
f"Prediction {prediction_id} failed: {prediction.error}"
|
|
264
|
-
)
|
|
265
|
-
if prediction.status == "canceled":
|
|
266
|
-
logger.warning(f"Prediction {prediction_id} was canceled.")
|
|
267
|
-
raise ToolError(f"Prediction {prediction_id} was canceled.")
|
|
268
|
-
if prediction.status != "succeeded":
|
|
269
|
-
logger.error(
|
|
270
|
-
f"Prediction {prediction_id} did not succeed. Status: {prediction.status}"
|
|
271
|
-
)
|
|
272
|
-
raise ToolError(
|
|
273
|
-
f"Prediction {prediction_id} did not succeed. Status: {prediction.status}"
|
|
274
|
-
)
|
|
275
|
-
|
|
276
|
-
output = prediction.output
|
|
277
|
-
logger.info(f"Successfully fetched output for prediction {prediction_id}.")
|
|
278
|
-
return output
|
|
279
|
-
except (
|
|
280
|
-
ReplicateModelError
|
|
281
|
-
) as e: # Should be caught by prediction.status == "failed" mostly
|
|
282
|
-
logger.error(
|
|
283
|
-
f"Model error fetching output for Replicate prediction {prediction_id}: {e.prediction.error}",
|
|
284
|
-
exc_info=True,
|
|
285
|
-
)
|
|
286
|
-
raise ToolError(
|
|
287
|
-
f"Prediction {prediction_id} (model) failed: {e.prediction.error}"
|
|
288
|
-
) from e
|
|
289
|
-
except ReplicateAPIError as e:
|
|
290
|
-
logger.error(
|
|
291
|
-
f"API error fetching output for Replicate prediction {prediction_id}: {e.detail}",
|
|
292
|
-
exc_info=True,
|
|
293
|
-
)
|
|
294
|
-
raise ToolError(
|
|
295
|
-
f"Failed to fetch output for Replicate prediction {prediction_id}: {e.detail}"
|
|
296
|
-
) from e
|
|
297
|
-
except Exception as e:
|
|
298
|
-
logger.error(
|
|
299
|
-
f"Unexpected error fetching output for Replicate prediction {prediction_id}: {e}",
|
|
300
|
-
exc_info=True,
|
|
301
|
-
)
|
|
302
|
-
raise ToolError(
|
|
303
|
-
f"An unexpected error occurred while fetching output for prediction {prediction_id}: {e}"
|
|
304
|
-
) from e
|
|
305
|
-
|
|
306
|
-
async def cancel_prediction(self, prediction_id: str) -> None:
|
|
307
|
-
"""
|
|
308
|
-
Sends a request to cancel a running or queued Replicate prediction. It first checks the prediction's status, only proceeding if it is not already in a terminal state (e.g., succeeded, failed), and gracefully handles jobs that cannot be canceled.
|
|
309
|
-
|
|
310
|
-
Args:
|
|
311
|
-
prediction_id: The unique ID of the prediction to cancel.
|
|
312
|
-
|
|
313
|
-
Returns:
|
|
314
|
-
None.
|
|
315
|
-
|
|
316
|
-
Raises:
|
|
317
|
-
ToolError: If the cancellation request fails.
|
|
318
|
-
|
|
319
|
-
Tags:
|
|
320
|
-
cancel, async_job, ai, replicate, management
|
|
321
|
-
"""
|
|
322
|
-
try:
|
|
323
|
-
logger.info(f"Cancelling Replicate prediction ID: {prediction_id}")
|
|
324
|
-
prediction = await self.replicate_client.predictions.async_get(
|
|
325
|
-
id=prediction_id
|
|
326
|
-
)
|
|
327
|
-
if prediction.status not in ["succeeded", "failed", "canceled"]:
|
|
328
|
-
await prediction.async_cancel()
|
|
329
|
-
logger.info(
|
|
330
|
-
f"Cancel request sent for prediction {prediction_id}. New status: {prediction.status}"
|
|
331
|
-
)
|
|
332
|
-
else:
|
|
333
|
-
logger.warning(
|
|
334
|
-
f"Prediction {prediction_id} is already in a terminal state: {prediction.status}. Cannot cancel."
|
|
335
|
-
)
|
|
336
|
-
return None
|
|
337
|
-
except ReplicateAPIError as e:
|
|
338
|
-
logger.error(
|
|
339
|
-
f"API error cancelling Replicate prediction {prediction_id}: {e.detail}",
|
|
340
|
-
exc_info=True,
|
|
341
|
-
)
|
|
342
|
-
raise ToolError(
|
|
343
|
-
f"Failed to cancel Replicate prediction {prediction_id}: {e.detail}"
|
|
344
|
-
) from e
|
|
345
|
-
except Exception as e:
|
|
346
|
-
logger.error(
|
|
347
|
-
f"Unexpected error cancelling Replicate prediction {prediction_id}: {e}",
|
|
348
|
-
exc_info=True,
|
|
349
|
-
)
|
|
350
|
-
raise ToolError(
|
|
351
|
-
f"An unexpected error occurred while cancelling prediction {prediction_id}: {e}"
|
|
352
|
-
) from e
|
|
353
|
-
|
|
354
|
-
async def upload_file(self, file_path: str) -> str:
|
|
355
|
-
"""
|
|
356
|
-
Uploads a local file from a given path to Replicate's storage, returning a public URL. This URL is essential for providing file-based inputs to Replicate models via functions like `run` or `submit_prediction`. Fails if the file is not found or the upload encounters an error.
|
|
357
|
-
|
|
358
|
-
Args:
|
|
359
|
-
file_path: The absolute or relative path to the local file.
|
|
360
|
-
|
|
361
|
-
Returns:
|
|
362
|
-
A string containing the public URL of the uploaded file (e.g., "https://replicate.delivery/pbxt/...").
|
|
363
|
-
|
|
364
|
-
Raises:
|
|
365
|
-
ToolError: If the file is not found or if the upload operation fails.
|
|
366
|
-
|
|
367
|
-
Tags:
|
|
368
|
-
upload, file, storage, replicate, important
|
|
369
|
-
"""
|
|
370
|
-
try:
|
|
371
|
-
path_obj = Path(file_path)
|
|
372
|
-
if not path_obj.exists():
|
|
373
|
-
raise FileNotFoundError(f"File not found at path: {file_path}")
|
|
374
|
-
|
|
375
|
-
logger.info(f"Uploading file to Replicate: {file_path}")
|
|
376
|
-
# The `async_create` method in `replicate.file.Files` handles opening the file.
|
|
377
|
-
uploaded_file_obj = await self.replicate_client.files.async_create(
|
|
378
|
-
file=path_obj
|
|
379
|
-
)
|
|
380
|
-
file_url = uploaded_file_obj.urls["get"]
|
|
381
|
-
logger.info(
|
|
382
|
-
f"File {file_path} uploaded successfully to Replicate. URL: {file_url}"
|
|
383
|
-
)
|
|
384
|
-
return file_url
|
|
385
|
-
except FileNotFoundError as e:
|
|
386
|
-
logger.error(
|
|
387
|
-
f"File not found for Replicate upload: {file_path}", exc_info=True
|
|
388
|
-
)
|
|
389
|
-
raise ToolError(f"File not found for Replicate upload: {file_path}") from e
|
|
390
|
-
except ReplicateAPIError as e:
|
|
391
|
-
logger.error(
|
|
392
|
-
f"API error uploading file {file_path} to Replicate: {e.detail}",
|
|
393
|
-
exc_info=True,
|
|
394
|
-
)
|
|
395
|
-
raise ToolError(
|
|
396
|
-
f"Failed to upload file {file_path} to Replicate: {e.detail}"
|
|
397
|
-
) from e
|
|
398
|
-
except Exception as e:
|
|
399
|
-
logger.error(
|
|
400
|
-
f"Unexpected error uploading file {file_path} to Replicate: {e}",
|
|
401
|
-
exc_info=True,
|
|
402
|
-
)
|
|
403
|
-
raise ToolError(
|
|
404
|
-
f"An unexpected error occurred while uploading file {file_path} to Replicate: {e}"
|
|
405
|
-
) from e
|
|
406
|
-
|
|
407
|
-
async def generate_image(
|
|
408
|
-
self,
|
|
409
|
-
prompt: str,
|
|
410
|
-
model_ref: str = "stability-ai/sdxl:c221b2b8ef527988fb59bf24a8b97c4561f1c671f73bd389f866bfb27c061316",
|
|
411
|
-
negative_prompt: str | None = None,
|
|
412
|
-
width: int | None = 1024,
|
|
413
|
-
height: int | None = 1024,
|
|
414
|
-
num_outputs: int | None = 1,
|
|
415
|
-
seed: int | None = None,
|
|
416
|
-
extra_arguments: dict[str, Any] | None = None,
|
|
417
|
-
) -> Any:
|
|
418
|
-
"""
|
|
419
|
-
Generates images synchronously using a specified model, defaulting to SDXL. As a convenience wrapper around the generic `run` function, it simplifies image creation by exposing common parameters like `prompt` and `width`, and waits for the model to complete before returning the resulting image URLs.
|
|
420
|
-
|
|
421
|
-
Args:
|
|
422
|
-
prompt: The text prompt for image generation.
|
|
423
|
-
model_ref: The Replicate model identifier string.
|
|
424
|
-
Defaults to "stability-ai/sdxl:7762fd07cf82c948538e41f63f77d685e02b063e37e496e96eefd46c929f9bdc".
|
|
425
|
-
negative_prompt: Optional text to specify what not to include.
|
|
426
|
-
width: Width of the generated image(s).
|
|
427
|
-
height: Height of the generated image(s).
|
|
428
|
-
num_outputs: Number of images to generate.
|
|
429
|
-
seed: Optional random seed for reproducibility.
|
|
430
|
-
extra_arguments: Dictionary of additional arguments specific to the model.
|
|
431
|
-
|
|
432
|
-
Returns:
|
|
433
|
-
The output from the image generation model, typically a list of image URLs.
|
|
434
|
-
|
|
435
|
-
Raises:
|
|
436
|
-
ToolError: If the image generation fails.
|
|
437
|
-
|
|
438
|
-
Tags:
|
|
439
|
-
generate, image, ai, replicate, sdxl, important, default
|
|
440
|
-
"""
|
|
441
|
-
logger.info(
|
|
442
|
-
f"Generating image with Replicate model {model_ref} for prompt: '{prompt[:30]}...'"
|
|
443
|
-
)
|
|
444
|
-
inputs = {
|
|
445
|
-
"prompt": prompt,
|
|
446
|
-
}
|
|
447
|
-
if negative_prompt is not None:
|
|
448
|
-
inputs["negative_prompt"] = negative_prompt
|
|
449
|
-
if width is not None:
|
|
450
|
-
inputs["width"] = width
|
|
451
|
-
if height is not None:
|
|
452
|
-
inputs["height"] = height
|
|
453
|
-
if num_outputs is not None:
|
|
454
|
-
inputs["num_outputs"] = num_outputs
|
|
455
|
-
if seed is not None:
|
|
456
|
-
inputs["seed"] = seed
|
|
457
|
-
|
|
458
|
-
if extra_arguments:
|
|
459
|
-
inputs.update(extra_arguments)
|
|
460
|
-
logger.debug(
|
|
461
|
-
f"Merged extra_arguments for image generation. Final input keys: {list(inputs.keys())}"
|
|
462
|
-
)
|
|
463
|
-
|
|
464
|
-
try:
|
|
465
|
-
# Use the run method which handles waiting and iterator collection
|
|
466
|
-
result = await self.run(model_ref=model_ref, inputs=inputs)
|
|
467
|
-
logger.info(f"Image generation successful for model {model_ref}.")
|
|
468
|
-
for index, item in enumerate(result):
|
|
469
|
-
with open(f"output_{index}.png", "wb") as file:
|
|
470
|
-
file.write(item.read())
|
|
471
|
-
return result
|
|
472
|
-
except Exception as e: # run method already wraps in ToolError
|
|
473
|
-
logger.error(
|
|
474
|
-
f"Error during generate_image call for model {model_ref}: {e}",
|
|
475
|
-
exc_info=True,
|
|
476
|
-
)
|
|
477
|
-
# Re-raise if it's already ToolError, or wrap if it's something unexpected from this level
|
|
478
|
-
if isinstance(e, ToolError):
|
|
479
|
-
raise
|
|
480
|
-
raise ToolError(
|
|
481
|
-
f"Image generation failed for model {model_ref}: {e}"
|
|
482
|
-
) from e
|
|
483
|
-
|
|
484
|
-
def list_tools(self) -> list[callable]:
|
|
485
|
-
return [
|
|
486
|
-
self.run,
|
|
487
|
-
self.submit_prediction,
|
|
488
|
-
self.get_prediction,
|
|
489
|
-
self.await_prediction_result,
|
|
490
|
-
self.cancel_prediction,
|
|
491
|
-
self.upload_file,
|
|
492
|
-
self.generate_image,
|
|
493
|
-
]
|
{universal_mcp_applications-0.1.19.dist-info → universal_mcp_applications-0.1.20.dist-info}/WHEEL
RENAMED
|
File without changes
|
|
File without changes
|