@mixio-pro/kalaasetu-mcp 2.0.4-beta → 2.0.6-beta
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +4 -4
- package/src/tools/fal/generate.ts +30 -29
- package/src/tools/get-status.ts +114 -61
- package/src/tools/image-to-video.ts +24 -50
package/package.json
CHANGED
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@mixio-pro/kalaasetu-mcp",
|
|
3
|
-
"version": "2.0.
|
|
3
|
+
"version": "2.0.6-beta",
|
|
4
4
|
"description": "A powerful Model Context Protocol server providing AI tools for content generation and analysis",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"module": "src/index.ts",
|
|
7
7
|
"main": "src/index.ts",
|
|
8
8
|
"bin": {
|
|
9
|
-
"kalaasetu-mcp": "
|
|
9
|
+
"kalaasetu-mcp": "bin/cli.js"
|
|
10
10
|
},
|
|
11
11
|
"files": [
|
|
12
12
|
"src",
|
|
@@ -35,7 +35,7 @@
|
|
|
35
35
|
"license": "MIT",
|
|
36
36
|
"repository": {
|
|
37
37
|
"type": "git",
|
|
38
|
-
"url": "https://github.com/mixiopro/kalaasetu-mcp.git"
|
|
38
|
+
"url": "git+https://github.com/mixiopro/kalaasetu-mcp.git"
|
|
39
39
|
},
|
|
40
40
|
"bugs": {
|
|
41
41
|
"url": "https://github.com/mixiopro/kalaasetu-mcp/issues"
|
|
@@ -58,4 +58,4 @@
|
|
|
58
58
|
"wav": "^1.0.2",
|
|
59
59
|
"zod": "^4.1.12"
|
|
60
60
|
}
|
|
61
|
-
}
|
|
61
|
+
}
|
|
@@ -12,6 +12,7 @@ import {
|
|
|
12
12
|
getApiKey,
|
|
13
13
|
loadFalConfig,
|
|
14
14
|
} from "./config";
|
|
15
|
+
import { checkFalStatus, fetchFalResult } from "../get-status";
|
|
15
16
|
|
|
16
17
|
/**
|
|
17
18
|
* Helper to wait for a specified duration.
|
|
@@ -92,7 +93,7 @@ export const falGenerate = {
|
|
|
92
93
|
description:
|
|
93
94
|
"The primary tool for generating AI content (images, videos, etc.) using fal.ai. " +
|
|
94
95
|
"This tool handles polling internally and streams progress updates to the client. " +
|
|
95
|
-
"If the generation takes too long (timeout or error), it returns a '
|
|
96
|
+
"If the generation takes too long (timeout or error), it returns a 'resume_endpoint' that you can use to resume polling. " +
|
|
96
97
|
"Use 'fal_list_presets' to discover available intents and names. " +
|
|
97
98
|
"PREREQUISITE: If using local files as parameters, you MUST upload them first using 'fal_upload_file' and use the resulting CDN URL. " +
|
|
98
99
|
"ONLY USE WHEN WORKING WITH FAL MODELS/PRESETS.",
|
|
@@ -111,12 +112,12 @@ export const falGenerate = {
|
|
|
111
112
|
"These override the default values defined in the preset. " +
|
|
112
113
|
"NOTE: For image-to-video or video-to-video tasks, use 'fal_upload_file' first and pass the resulting CDN URL here."
|
|
113
114
|
),
|
|
114
|
-
|
|
115
|
+
resume_endpoint: z
|
|
115
116
|
.string()
|
|
116
117
|
.optional()
|
|
117
118
|
.describe(
|
|
118
119
|
"If provided, the tool will resume polling for an existing request instead of starting a new one. " +
|
|
119
|
-
"Use the '
|
|
120
|
+
"Use the 'resume_endpoint' returned in an 'IN_PROGRESS' response or after a timeout error."
|
|
120
121
|
),
|
|
121
122
|
}),
|
|
122
123
|
timeoutMs: 90000, // 90 seconds MCP timeout (internal timeout is 60s)
|
|
@@ -124,7 +125,7 @@ export const falGenerate = {
|
|
|
124
125
|
args: {
|
|
125
126
|
preset_name?: string;
|
|
126
127
|
parameters?: Record<string, any>;
|
|
127
|
-
|
|
128
|
+
resume_endpoint?: string;
|
|
128
129
|
auto_enhance?: boolean;
|
|
129
130
|
},
|
|
130
131
|
context?: ProgressContext
|
|
@@ -135,21 +136,21 @@ export const falGenerate = {
|
|
|
135
136
|
let requestId: string;
|
|
136
137
|
const config = loadFalConfig();
|
|
137
138
|
|
|
138
|
-
if (args.
|
|
139
|
-
// Check if
|
|
140
|
-
if (args.
|
|
141
|
-
// NEW:
|
|
142
|
-
statusUrl = args.
|
|
139
|
+
if (args.resume_endpoint) {
|
|
140
|
+
// Check if resume_endpoint is a full URL (new format) or legacy ID
|
|
141
|
+
if (args.resume_endpoint.startsWith("http")) {
|
|
142
|
+
// NEW: resume_endpoint IS the status/response URL
|
|
143
|
+
statusUrl = args.resume_endpoint;
|
|
143
144
|
// Derive responseUrl by removing /status suffix if present
|
|
144
|
-
responseUrl = args.
|
|
145
|
+
responseUrl = args.resume_endpoint.replace(/\/status$/, "");
|
|
145
146
|
// Extract requestId from URL for logging
|
|
146
|
-
const urlParts = args.
|
|
147
|
+
const urlParts = args.resume_endpoint.split("/");
|
|
147
148
|
const lastPart = urlParts[urlParts.length - 1] || "";
|
|
148
149
|
requestId =
|
|
149
150
|
lastPart.replace("/status", "") ||
|
|
150
151
|
urlParts[urlParts.length - 2] ||
|
|
151
152
|
"unknown";
|
|
152
|
-
context?.log?.info(`Resuming with FAL URL: ${args.
|
|
153
|
+
context?.log?.info(`Resuming with FAL URL: ${args.resume_endpoint}`);
|
|
153
154
|
} else {
|
|
154
155
|
// LEGACY: Try to resolve model from preset_name or parse modelId::requestId
|
|
155
156
|
let modelIdFromPreset: string | undefined;
|
|
@@ -165,8 +166,8 @@ export const falGenerate = {
|
|
|
165
166
|
}
|
|
166
167
|
}
|
|
167
168
|
|
|
168
|
-
if (args.
|
|
169
|
-
const parts = args.
|
|
169
|
+
if (args.resume_endpoint.includes("::")) {
|
|
170
|
+
const parts = args.resume_endpoint.split("::");
|
|
170
171
|
const mId = parts[0];
|
|
171
172
|
const rId = parts[1] || "";
|
|
172
173
|
|
|
@@ -181,7 +182,7 @@ export const falGenerate = {
|
|
|
181
182
|
);
|
|
182
183
|
} else {
|
|
183
184
|
// Legacy/Fallback for raw UUIDs
|
|
184
|
-
requestId = args.
|
|
185
|
+
requestId = args.resume_endpoint;
|
|
185
186
|
|
|
186
187
|
if (modelIdFromPreset) {
|
|
187
188
|
// Best case: User provided the preset name!
|
|
@@ -192,8 +193,8 @@ export const falGenerate = {
|
|
|
192
193
|
);
|
|
193
194
|
} else {
|
|
194
195
|
// Worst case: No preset, no model in ID. Try legacy generic URL
|
|
195
|
-
statusUrl = `${FAL_QUEUE_URL}/requests/${args.
|
|
196
|
-
responseUrl = `${FAL_QUEUE_URL}/requests/${args.
|
|
196
|
+
statusUrl = `${FAL_QUEUE_URL}/requests/${args.resume_endpoint}/status`;
|
|
197
|
+
responseUrl = `${FAL_QUEUE_URL}/requests/${args.resume_endpoint}`;
|
|
197
198
|
|
|
198
199
|
// Verify/Recovery: Check if generic URL works, if not try to guess model
|
|
199
200
|
// ... (Smart recovery logic below)
|
|
@@ -230,7 +231,7 @@ export const falGenerate = {
|
|
|
230
231
|
}
|
|
231
232
|
}
|
|
232
233
|
context?.log?.info(
|
|
233
|
-
`Resuming polling for request: ${args.
|
|
234
|
+
`Resuming polling for request: ${args.resume_endpoint}`
|
|
234
235
|
);
|
|
235
236
|
}
|
|
236
237
|
} // Close the LEGACY else block (line 149)
|
|
@@ -348,13 +349,13 @@ export const falGenerate = {
|
|
|
348
349
|
if (context?.streamContent) {
|
|
349
350
|
await context.streamContent({
|
|
350
351
|
type: "text" as const,
|
|
351
|
-
text: `[FAL] Generation started.
|
|
352
|
+
text: `[FAL] Generation started. resume_endpoint: ${statusUrl} (use this URL to check status)`,
|
|
352
353
|
});
|
|
353
354
|
}
|
|
354
355
|
}
|
|
355
356
|
|
|
356
357
|
// Stream message for resume calls
|
|
357
|
-
if (args.
|
|
358
|
+
if (args.resume_endpoint && context?.streamContent) {
|
|
358
359
|
await context.streamContent({
|
|
359
360
|
type: "text" as const,
|
|
360
361
|
text: `[FAL] Resuming status check for job: ${requestId}`,
|
|
@@ -362,7 +363,7 @@ export const falGenerate = {
|
|
|
362
363
|
}
|
|
363
364
|
|
|
364
365
|
const startTime = Date.now();
|
|
365
|
-
const MAX_POLL_TIME = 60000; // 60 seconds internal timeout - then return
|
|
366
|
+
const MAX_POLL_TIME = 60000; // 60 seconds internal timeout - then return resume_endpoint
|
|
366
367
|
let pollCount = 0;
|
|
367
368
|
const POLL_INTERVAL = 3000;
|
|
368
369
|
|
|
@@ -370,14 +371,14 @@ export const falGenerate = {
|
|
|
370
371
|
pollCount++;
|
|
371
372
|
let res;
|
|
372
373
|
try {
|
|
373
|
-
res = await
|
|
374
|
+
res = await checkFalStatus(statusUrl);
|
|
374
375
|
} catch (e: any) {
|
|
375
376
|
if (`${e}`.includes("405")) {
|
|
376
377
|
context?.log?.info(
|
|
377
378
|
`Status check 405 on ${statusUrl}, trying fallback to responseUrl...`
|
|
378
379
|
);
|
|
379
380
|
// Try checking the request root URL instead of /status
|
|
380
|
-
res = await
|
|
381
|
+
res = await fetchFalResult(responseUrl);
|
|
381
382
|
// If successful, update statusUrl to match for future polls
|
|
382
383
|
statusUrl = responseUrl;
|
|
383
384
|
} else {
|
|
@@ -420,7 +421,7 @@ export const falGenerate = {
|
|
|
420
421
|
await context.reportProgress({ progress: 100, total: 100 });
|
|
421
422
|
}
|
|
422
423
|
// responseUrl is now guaranteed to be correct/fresh from polling
|
|
423
|
-
const finalResult = await
|
|
424
|
+
const finalResult = await fetchFalResult(responseUrl);
|
|
424
425
|
return JSON.stringify(finalResult);
|
|
425
426
|
}
|
|
426
427
|
|
|
@@ -433,12 +434,12 @@ export const falGenerate = {
|
|
|
433
434
|
await wait(POLL_INTERVAL);
|
|
434
435
|
}
|
|
435
436
|
|
|
436
|
-
// Timeout - return composite
|
|
437
|
+
// Timeout - return composite resume_endpoint
|
|
437
438
|
// We need to know modelId here. If we started new, we have 'preset'
|
|
438
439
|
// If we resumed, we parsed 'mId' or used raw.
|
|
439
440
|
const currentModelId =
|
|
440
|
-
args.
|
|
441
|
-
? args.
|
|
441
|
+
args.resume_endpoint && args.resume_endpoint.includes("::")
|
|
442
|
+
? args.resume_endpoint.split("::")[0]
|
|
442
443
|
: args.preset_name
|
|
443
444
|
? config.presets.find((p) => p.presetName === args.preset_name)
|
|
444
445
|
?.modelId
|
|
@@ -447,11 +448,11 @@ export const falGenerate = {
|
|
|
447
448
|
return JSON.stringify({
|
|
448
449
|
status: "IN_PROGRESS",
|
|
449
450
|
request_id: requestId,
|
|
450
|
-
|
|
451
|
+
resume_endpoint: statusUrl, // Use the FULL URL for reliable resume
|
|
451
452
|
status_url: statusUrl,
|
|
452
453
|
response_url: responseUrl,
|
|
453
454
|
message:
|
|
454
|
-
"The generation is still in progress. Call this tool again with
|
|
455
|
+
"The generation is still in progress. Call this tool again with resume_endpoint (the URL) to continue polling.",
|
|
455
456
|
});
|
|
456
457
|
}, "fal_generate");
|
|
457
458
|
},
|
package/src/tools/get-status.ts
CHANGED
|
@@ -12,7 +12,20 @@ const FAL_KEY = process.env.FAL_KEY;
|
|
|
12
12
|
/**
|
|
13
13
|
* Check FAL generation status using the status URL
|
|
14
14
|
*/
|
|
15
|
-
|
|
15
|
+
interface VertexOperation {
|
|
16
|
+
done?: boolean;
|
|
17
|
+
response?: {
|
|
18
|
+
videos?: Array<{
|
|
19
|
+
bytesBase64Encoded?: string;
|
|
20
|
+
}>;
|
|
21
|
+
saved_videos?: any[];
|
|
22
|
+
[key: string]: any;
|
|
23
|
+
};
|
|
24
|
+
error?: any;
|
|
25
|
+
[key: string]: any;
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
export async function checkFalStatus(statusUrl: string): Promise<any> {
|
|
16
29
|
if (!FAL_KEY) {
|
|
17
30
|
throw new Error("FAL_KEY environment variable not set");
|
|
18
31
|
}
|
|
@@ -30,45 +43,54 @@ async function checkFalStatus(statusUrl: string): Promise<any> {
|
|
|
30
43
|
throw new Error(`FAL API error [${response.status}]: ${errorText}`);
|
|
31
44
|
}
|
|
32
45
|
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
46
|
+
return await response.json();
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
export async function fetchFalResult(responseUrl: string): Promise<any> {
|
|
50
|
+
if (!FAL_KEY) {
|
|
51
|
+
throw new Error("FAL_KEY environment variable not set");
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
const response = await fetch(responseUrl, {
|
|
55
|
+
method: "GET",
|
|
56
|
+
headers: {
|
|
57
|
+
Authorization: `Key ${FAL_KEY}`,
|
|
58
|
+
"Content-Type": "application/json",
|
|
59
|
+
},
|
|
60
|
+
});
|
|
61
|
+
|
|
62
|
+
if (!response.ok) {
|
|
63
|
+
const errorText = await response.text();
|
|
64
|
+
throw new Error(`FAL API error [${response.status}]: ${errorText}`);
|
|
49
65
|
}
|
|
50
66
|
|
|
51
|
-
return
|
|
67
|
+
return await response.json();
|
|
52
68
|
}
|
|
53
69
|
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
*/
|
|
57
|
-
async function checkVertexStatus(
|
|
58
|
-
operationName: string,
|
|
59
|
-
projectId: string,
|
|
60
|
-
locationId: string
|
|
70
|
+
export async function checkVertexStatus(
|
|
71
|
+
resumeEndpoint: string
|
|
61
72
|
): Promise<any> {
|
|
62
73
|
const accessToken = await getGoogleAccessToken();
|
|
63
74
|
|
|
64
|
-
|
|
75
|
+
// resumeEndpoint is composite format: fetchUrl||operationName||outputPath
|
|
76
|
+
const parts = resumeEndpoint.split("||");
|
|
77
|
+
const fetchUrl = parts[0] || "";
|
|
78
|
+
const operationName = parts[1] || "";
|
|
79
|
+
const outputPath = parts[2] || ""; // Optional custom output path
|
|
65
80
|
|
|
66
|
-
|
|
67
|
-
|
|
81
|
+
if (!fetchUrl || !operationName) {
|
|
82
|
+
throw new Error(
|
|
83
|
+
"Invalid Vertex resume_endpoint format. Expected 'fetchUrl||operationName[||outputPath]'."
|
|
84
|
+
);
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
const response = await fetch(fetchUrl, {
|
|
88
|
+
method: "POST",
|
|
68
89
|
headers: {
|
|
69
90
|
Authorization: `Bearer ${accessToken}`,
|
|
70
91
|
"Content-Type": "application/json",
|
|
71
92
|
},
|
|
93
|
+
body: JSON.stringify({ operationName }),
|
|
72
94
|
});
|
|
73
95
|
|
|
74
96
|
if (!response.ok) {
|
|
@@ -76,64 +98,94 @@ async function checkVertexStatus(
|
|
|
76
98
|
throw new Error(`Vertex AI API error [${response.status}]: ${errorText}`);
|
|
77
99
|
}
|
|
78
100
|
|
|
79
|
-
|
|
101
|
+
const result = (await response.json()) as VertexOperation;
|
|
102
|
+
|
|
103
|
+
// If completed, save videos if present
|
|
104
|
+
const done = !!result.done || !!result.response;
|
|
105
|
+
if (done) {
|
|
106
|
+
const resp = result.response || result;
|
|
107
|
+
if (Array.isArray(resp?.videos) && resp.videos.length > 0) {
|
|
108
|
+
const { getStorage } = await import("../storage");
|
|
109
|
+
const { generateTimestampedFilename } = await import("../utils/filename");
|
|
110
|
+
const storage = getStorage();
|
|
111
|
+
const savedVideos: any[] = [];
|
|
112
|
+
|
|
113
|
+
for (let i = 0; i < resp.videos.length; i++) {
|
|
114
|
+
const v = resp.videos[i];
|
|
115
|
+
if (v?.bytesBase64Encoded) {
|
|
116
|
+
let filePath: string;
|
|
117
|
+
if (outputPath) {
|
|
118
|
+
// Use custom path, add index for subsequent videos
|
|
119
|
+
filePath =
|
|
120
|
+
i === 0
|
|
121
|
+
? outputPath
|
|
122
|
+
: outputPath.replace(/\.mp4$/i, `_${i}.mp4`);
|
|
123
|
+
} else {
|
|
124
|
+
// Default timestamped filename
|
|
125
|
+
filePath = generateTimestampedFilename(`video_output_${i}.mp4`);
|
|
126
|
+
}
|
|
127
|
+
const buf = Buffer.from(v.bytesBase64Encoded, "base64");
|
|
128
|
+
const url = await storage.writeFile(filePath, buf);
|
|
129
|
+
savedVideos.push({
|
|
130
|
+
url,
|
|
131
|
+
filename: filePath,
|
|
132
|
+
mimeType: "video/mp4",
|
|
133
|
+
});
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
if (savedVideos.length > 0) {
|
|
138
|
+
resp.saved_videos = savedVideos;
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
return result;
|
|
80
144
|
}
|
|
81
145
|
|
|
82
146
|
export const getGenerationStatus = {
|
|
83
147
|
name: "get_generation_status",
|
|
84
148
|
description:
|
|
85
149
|
"Check the status or retrieve the result of a generation operation that was started by 'fal_generate' or 'generateVideoi2v'. " +
|
|
86
|
-
"Use this when the original generation tool returned an 'IN_PROGRESS' status with a '
|
|
87
|
-
"Pass the
|
|
88
|
-
"For FAL operations, the
|
|
89
|
-
"For Vertex AI operations, the
|
|
150
|
+
"Use this when the original generation tool returned an 'IN_PROGRESS' status with a 'resume_endpoint'. " +
|
|
151
|
+
"Pass the resume_endpoint exactly as it was returned. " +
|
|
152
|
+
"For FAL operations, the resume_endpoint is a full URL. " +
|
|
153
|
+
"For Vertex AI operations, the resume_endpoint is an operation name or full path.",
|
|
90
154
|
parameters: z.object({
|
|
91
|
-
|
|
155
|
+
resume_endpoint: z
|
|
92
156
|
.string()
|
|
93
157
|
.describe(
|
|
94
|
-
"The
|
|
158
|
+
"The resume_endpoint returned by the original generation tool. " +
|
|
95
159
|
"For FAL: This is a full URL (starts with 'https://queue.fal.run/...'). " +
|
|
96
|
-
"For Vertex AI: This is an operation name."
|
|
160
|
+
"For Vertex AI: This is an operation name or full path."
|
|
97
161
|
),
|
|
98
162
|
source: z
|
|
99
163
|
.enum(["fal", "vertex", "auto"])
|
|
100
164
|
.optional()
|
|
101
165
|
.default("auto")
|
|
102
166
|
.describe(
|
|
103
|
-
"Source of the operation: 'fal' for FAL AI, 'vertex' for Google Vertex AI, or 'auto' to auto-detect based on
|
|
167
|
+
"Source of the operation: 'fal' for FAL AI, 'vertex' for Google Vertex AI, or 'auto' to auto-detect based on resume_endpoint format."
|
|
104
168
|
),
|
|
105
|
-
project_id: z
|
|
106
|
-
.string()
|
|
107
|
-
.optional()
|
|
108
|
-
.default("mixio-pro")
|
|
109
|
-
.describe("GCP Project ID (only needed for Vertex AI operations)."),
|
|
110
|
-
location_id: z
|
|
111
|
-
.string()
|
|
112
|
-
.optional()
|
|
113
|
-
.default("us-central1")
|
|
114
|
-
.describe("GCP region (only needed for Vertex AI operations)."),
|
|
115
169
|
}),
|
|
116
170
|
timeoutMs: 30000, // 30 seconds for status check
|
|
117
171
|
execute: async (args: {
|
|
118
|
-
|
|
172
|
+
resume_endpoint: string;
|
|
119
173
|
source?: "fal" | "vertex" | "auto";
|
|
120
|
-
project_id?: string;
|
|
121
|
-
location_id?: string;
|
|
122
174
|
}) => {
|
|
123
175
|
return safeToolExecute(async () => {
|
|
124
176
|
const {
|
|
125
|
-
|
|
177
|
+
resume_endpoint,
|
|
126
178
|
source = "auto",
|
|
127
|
-
project_id = "mixio-pro",
|
|
128
|
-
location_id = "us-central1",
|
|
129
179
|
} = args;
|
|
180
|
+
const project_id = "mixio-pro";
|
|
181
|
+
const location_id = "us-central1";
|
|
130
182
|
|
|
131
|
-
// Auto-detect source based on
|
|
183
|
+
// Auto-detect source based on resume_endpoint format
|
|
132
184
|
let detectedSource = source;
|
|
133
185
|
if (source === "auto") {
|
|
134
186
|
if (
|
|
135
|
-
|
|
136
|
-
|
|
187
|
+
resume_endpoint.startsWith("https://queue.fal.run") ||
|
|
188
|
+
resume_endpoint.startsWith("https://fal.run")
|
|
137
189
|
) {
|
|
138
190
|
detectedSource = "fal";
|
|
139
191
|
} else {
|
|
@@ -144,27 +196,27 @@ export const getGenerationStatus = {
|
|
|
144
196
|
let result: any;
|
|
145
197
|
|
|
146
198
|
if (detectedSource === "fal") {
|
|
147
|
-
result = await checkFalStatus(
|
|
199
|
+
result = await checkFalStatus(resume_endpoint);
|
|
148
200
|
} else {
|
|
149
|
-
result = await checkVertexStatus(
|
|
201
|
+
result = await checkVertexStatus(resume_endpoint);
|
|
150
202
|
}
|
|
151
203
|
|
|
152
204
|
// Normalize the response
|
|
153
205
|
const status =
|
|
154
|
-
result.status || (result.done ? "COMPLETED" : "IN_PROGRESS");
|
|
206
|
+
(result as any).status || ((result as any).done ? "COMPLETED" : "IN_PROGRESS");
|
|
155
207
|
|
|
156
208
|
return JSON.stringify(
|
|
157
209
|
{
|
|
158
210
|
source: detectedSource,
|
|
159
211
|
status,
|
|
160
|
-
|
|
212
|
+
resume_endpoint,
|
|
161
213
|
result,
|
|
162
214
|
message:
|
|
163
215
|
status === "COMPLETED"
|
|
164
216
|
? "Generation completed! The result is included in the 'result' field."
|
|
165
217
|
: status === "FAILED"
|
|
166
218
|
? "Generation failed. Check the 'result' field for error details."
|
|
167
|
-
: "Generation is still in progress. Call this tool again with the same
|
|
219
|
+
: "Generation is still in progress. Call this tool again with the same resume_endpoint to check later.",
|
|
168
220
|
},
|
|
169
221
|
null,
|
|
170
222
|
2
|
|
@@ -172,3 +224,4 @@ export const getGenerationStatus = {
|
|
|
172
224
|
}, "get_generation_status");
|
|
173
225
|
},
|
|
174
226
|
};
|
|
227
|
+
|
|
@@ -8,6 +8,7 @@ import {
|
|
|
8
8
|
} from "../utils/prompt-enhancer-presets";
|
|
9
9
|
|
|
10
10
|
import { getGoogleAccessToken } from "../utils/google-auth";
|
|
11
|
+
import { checkVertexStatus } from "./get-status";
|
|
11
12
|
|
|
12
13
|
async function wait(ms: number): Promise<void> {
|
|
13
14
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
@@ -47,7 +48,7 @@ export const imageToVideo = {
|
|
|
47
48
|
description:
|
|
48
49
|
"Generate professional-quality cinematic videos from a starting image and text prompt using Google's Vertex AI Veo models. " +
|
|
49
50
|
"This tool follows a 'Synchronous Facade' pattern: it handles polling internally but can be paused/resumed. " +
|
|
50
|
-
"If the generation takes too long, it returns a '
|
|
51
|
+
"If the generation takes too long, it returns a 'resume_endpoint' that you MUST use to call this tool again to pick up progress. " +
|
|
51
52
|
"It produces state-of-the-art cinematic results. " +
|
|
52
53
|
"ONLY USE WHEN WORKING WITH GOOGLE VERTEX AI MODELS.",
|
|
53
54
|
parameters: z.object({
|
|
@@ -109,18 +110,6 @@ export const imageToVideo = {
|
|
|
109
110
|
.describe(
|
|
110
111
|
"Optional: Local path to save the resulting .mp4 file. Defaults to timestamped filename."
|
|
111
112
|
),
|
|
112
|
-
project_id: z
|
|
113
|
-
.string()
|
|
114
|
-
.optional()
|
|
115
|
-
.default("mixio-pro")
|
|
116
|
-
.describe("GCP Project ID for Vertex billing. Default is mixio-pro."),
|
|
117
|
-
location_id: z
|
|
118
|
-
.string()
|
|
119
|
-
.optional()
|
|
120
|
-
.default("us-central1")
|
|
121
|
-
.describe(
|
|
122
|
-
"GCP region for Vertex AI processing (Default is 'us-central1')."
|
|
123
|
-
),
|
|
124
113
|
model_id: z
|
|
125
114
|
.string()
|
|
126
115
|
.optional()
|
|
@@ -135,12 +124,12 @@ export const imageToVideo = {
|
|
|
135
124
|
"If true, Vertex will attempt to synthesize synchronized audio for the video."
|
|
136
125
|
)
|
|
137
126
|
.default(false),
|
|
138
|
-
|
|
127
|
+
resume_endpoint: z
|
|
139
128
|
.string()
|
|
140
129
|
.optional()
|
|
141
130
|
.describe(
|
|
142
131
|
"If provided, the tool will check the status of an existing Vertex operation instead of starting a new one. " +
|
|
143
|
-
"Use the '
|
|
132
|
+
"Use the 'resume_endpoint' returned in an 'IN_PROGRESS' response."
|
|
144
133
|
),
|
|
145
134
|
auto_enhance: z
|
|
146
135
|
.boolean()
|
|
@@ -169,11 +158,9 @@ export const imageToVideo = {
|
|
|
169
158
|
person_generation?: string;
|
|
170
159
|
reference_images?: string[] | string;
|
|
171
160
|
output_path?: string;
|
|
172
|
-
project_id?: string;
|
|
173
|
-
location_id?: string;
|
|
174
161
|
model_id?: string;
|
|
175
162
|
generate_audio?: boolean;
|
|
176
|
-
|
|
163
|
+
resume_endpoint?: string;
|
|
177
164
|
enhancer_preset?: string;
|
|
178
165
|
auto_enhance?: boolean;
|
|
179
166
|
},
|
|
@@ -193,8 +180,8 @@ export const imageToVideo = {
|
|
|
193
180
|
}
|
|
194
181
|
) {
|
|
195
182
|
return safeToolExecute(async () => {
|
|
196
|
-
const projectId =
|
|
197
|
-
const location =
|
|
183
|
+
const projectId = "mixio-pro";
|
|
184
|
+
const location = "us-central1";
|
|
198
185
|
const modelId = args.model_id || "veo-3.1-fast-generate-preview";
|
|
199
186
|
|
|
200
187
|
// Validate and parse duration_seconds - snap to nearest 4, 6, or 8
|
|
@@ -260,13 +247,13 @@ export const imageToVideo = {
|
|
|
260
247
|
|
|
261
248
|
// If resuming, reconstruct the full operation path from the UUID
|
|
262
249
|
let operationName: string | undefined;
|
|
263
|
-
if (args.
|
|
250
|
+
if (args.resume_endpoint) {
|
|
264
251
|
// Support both UUID-only and full path formats
|
|
265
|
-
if (args.
|
|
266
|
-
operationName = args.
|
|
252
|
+
if (args.resume_endpoint.includes("/")) {
|
|
253
|
+
operationName = args.resume_endpoint; // Already a full path
|
|
267
254
|
} else {
|
|
268
255
|
// Reconstruct full path from UUID
|
|
269
|
-
operationName = `projects/${projectId}/locations/${location}/publishers/google/models/${modelId}/operations/${args.
|
|
256
|
+
operationName = `projects/${projectId}/locations/${location}/publishers/google/models/${modelId}/operations/${args.resume_endpoint}`;
|
|
270
257
|
}
|
|
271
258
|
}
|
|
272
259
|
let current: any;
|
|
@@ -461,46 +448,33 @@ export const imageToVideo = {
|
|
|
461
448
|
);
|
|
462
449
|
}
|
|
463
450
|
|
|
464
|
-
//
|
|
465
|
-
//
|
|
466
|
-
const
|
|
451
|
+
// Construct the composite resume_endpoint: fetchUrl||operationName||outputPath
|
|
452
|
+
// This allows get_generation_status to use the URL directly and preserve output_path
|
|
453
|
+
const outputPathPart = args.output_path || "";
|
|
454
|
+
const compositeResumeEndpoint = `${fetchUrl}||${operationName}||${outputPathPart}`;
|
|
467
455
|
|
|
468
|
-
// Stream the
|
|
456
|
+
// Stream the resume_endpoint to the LLM immediately (before polling starts)
|
|
469
457
|
// This way the LLM has it even if MCP client times out during polling
|
|
470
458
|
if (context?.streamContent) {
|
|
471
|
-
const isResume = !!args.
|
|
459
|
+
const isResume = !!args.resume_endpoint;
|
|
472
460
|
await context.streamContent({
|
|
473
461
|
type: "text" as const,
|
|
474
462
|
text: isResume
|
|
475
|
-
? `[Vertex] Resuming status check for job
|
|
476
|
-
: `[Vertex] Video generation started.
|
|
463
|
+
? `[Vertex] Resuming status check for job`
|
|
464
|
+
: `[Vertex] Video generation started. resume_endpoint: ${compositeResumeEndpoint} (use this to check status if needed)`,
|
|
477
465
|
});
|
|
478
466
|
}
|
|
479
467
|
|
|
480
468
|
// Poll for status - keep polling until done
|
|
481
|
-
//
|
|
469
|
+
// Resume_endpoint was already streamed, so if MCP client times out the LLM still has it
|
|
482
470
|
let done = current ? !!current.done || !!current.response : false;
|
|
483
471
|
const startTime = Date.now();
|
|
484
|
-
const MAX_POLL_TIME = 60000; // 60 seconds internal timeout - then return
|
|
472
|
+
const MAX_POLL_TIME = 60000; // 60 seconds internal timeout - then return resume_endpoint
|
|
485
473
|
|
|
486
474
|
while (!done && Date.now() - startTime < MAX_POLL_TIME) {
|
|
487
475
|
await wait(10000); // 10 second intervals
|
|
488
476
|
|
|
489
|
-
|
|
490
|
-
method: "POST",
|
|
491
|
-
headers: {
|
|
492
|
-
Authorization: `Bearer ${token}`,
|
|
493
|
-
"Content-Type": "application/json",
|
|
494
|
-
},
|
|
495
|
-
body: JSON.stringify({ operationName }),
|
|
496
|
-
});
|
|
497
|
-
if (!poll.ok) {
|
|
498
|
-
const text = await poll.text();
|
|
499
|
-
throw new Error(
|
|
500
|
-
`Vertex operation poll failed: ${poll.status} ${text}`
|
|
501
|
-
);
|
|
502
|
-
}
|
|
503
|
-
current = (await poll.json()) as any;
|
|
477
|
+
current = await checkVertexStatus(compositeResumeEndpoint);
|
|
504
478
|
done = !!current.done || !!current.response;
|
|
505
479
|
|
|
506
480
|
if (context?.reportProgress) {
|
|
@@ -529,9 +503,9 @@ export const imageToVideo = {
|
|
|
529
503
|
return JSON.stringify({
|
|
530
504
|
status: "IN_PROGRESS",
|
|
531
505
|
request_id: operationName,
|
|
532
|
-
|
|
506
|
+
resume_endpoint: compositeResumeEndpoint,
|
|
533
507
|
message:
|
|
534
|
-
"Still in progress. Call this tool again with
|
|
508
|
+
"Still in progress. Call this tool again with resume_endpoint to continue checking.",
|
|
535
509
|
});
|
|
536
510
|
}
|
|
537
511
|
|