cerevox 2.19.0 → 2.20.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1003,7 +1003,11 @@ server.registerTool('generate-image', {
1003
1003
  prompt: zod_1.z
1004
1004
  .string()
1005
1005
  .describe('The prompt to generate. 一般要严格对应 story_board 中当前场景的 start_frame 或 end_frame 中的字段描述'),
1006
- sceneIndex: zod_1.z.number().min(1).describe('场景索引,从1开始的下标'),
1006
+ sceneIndex: zod_1.z
1007
+ .number()
1008
+ .min(1)
1009
+ .optional()
1010
+ .describe('场景索引,从1开始的下标,如果非场景对应素材,则可不传,场景素材必传'),
1007
1011
  storyBoardFile: zod_1.z
1008
1012
  .string()
1009
1013
  .optional()
@@ -1088,16 +1092,16 @@ server.registerTool('generate-image', {
1088
1092
  try {
1089
1093
  // 验证session状态
1090
1094
  const currentSession = await validateSession('generate-image');
1095
+ const storyBoardPath = (0, node_path_1.resolve)(process.env.ZEROCUT_PROJECT_CWD || process.cwd(), projectLocalDir, storyBoardFile);
1091
1096
  // 检查 storyboard 标志
1092
- if (!checkStoryboardFlag) {
1097
+ if (!checkStoryboardFlag && (0, node_fs_1.existsSync)(storyBoardPath)) {
1093
1098
  checkStoryboardFlag = true;
1094
1099
  return createErrorResponse('必须先审查生成的 story_board.json 内容,确保每个场景中的stage_atmosphere内容按照规则被正确融合到start_frame和video_prompt中,不得遗漏,检查完成后先汇报,如果有问题,应当先修改 story_board.json 内容,然后再调用 generate-image 生成图片。注意修改 story_board 内容时,仅修改相应字段的字符串值,不要破坏JSON格式!', 'generate-image');
1095
1100
  }
1096
1101
  const validatedFileName = validateFileName(saveToFileName);
1097
1102
  // 校验 prompt 与 story_board.json 中场景设定的一致性
1098
- if (!skipConsistencyCheck) {
1103
+ if (sceneIndex && !skipConsistencyCheck) {
1099
1104
  try {
1100
- const storyBoardPath = (0, node_path_1.resolve)(process.env.ZEROCUT_PROJECT_CWD || process.cwd(), projectLocalDir, storyBoardFile);
1101
1105
  if ((0, node_fs_1.existsSync)(storyBoardPath)) {
1102
1106
  const storyBoardContent = await (0, promises_1.readFile)(storyBoardPath, 'utf8');
1103
1107
  // 检查 storyBoard JSON 语法合法性
@@ -1343,249 +1347,6 @@ ${processedPrompt}`.trim();
1343
1347
  return createErrorResponse(error, 'generate-image');
1344
1348
  }
1345
1349
  });
1346
- server.registerTool('generate-image-series', {
1347
- title: 'Generate Image Series',
1348
- description: 'Generate a series of images based on prompts.',
1349
- inputSchema: {
1350
- prompts: zod_1.z
1351
- .union([zod_1.z.string(), zod_1.z.array(zod_1.z.string())])
1352
- .describe('The prompts to generate images. Can be a single string or array of strings.'),
1353
- size: zod_1.z
1354
- .enum([
1355
- '1024x1024',
1356
- '864x1152',
1357
- '1152x864',
1358
- '1280x720',
1359
- '720x1280',
1360
- '832x1248',
1361
- '1248x832',
1362
- '1512x648',
1363
- // 2K
1364
- '2048x2048',
1365
- '1728x2304',
1366
- '2304x1728',
1367
- '2560x1440',
1368
- '1440x2560',
1369
- '1664x2496',
1370
- '2496x1664',
1371
- '3024x1456',
1372
- // 4K
1373
- '4096x4096',
1374
- '3072x4096',
1375
- '4096x3072',
1376
- '4096x2304',
1377
- '2304x4096',
1378
- '2731x4096',
1379
- '4096x2731',
1380
- '4096x1968',
1381
- ])
1382
- .default('720x1280')
1383
- .describe('The size of the image.'),
1384
- watermark: zod_1.z
1385
- .boolean()
1386
- .optional()
1387
- .default(false)
1388
- .describe('Whether to add watermark to the images.'),
1389
- max_count: zod_1.z
1390
- .number()
1391
- .min(1)
1392
- .max(15)
1393
- .optional()
1394
- .default(15)
1395
- .describe('Maximum number of images to generate (1-15).'),
1396
- referenceImages: zod_1.z
1397
- .array(zod_1.z.object({
1398
- image: zod_1.z.string().describe('Local image file path'),
1399
- type: zod_1.z
1400
- .enum(['character', 'object', 'background'])
1401
- .describe('Type of the reference image. 必须传,如果是参考角色三视图,传character,如果是参考背景图,传background,否则传object'),
1402
- name: zod_1.z.string().describe('Name for this reference image'),
1403
- isTurnaround: zod_1.z
1404
- .boolean()
1405
- .describe('Whether this is a turnaround image.如果是三视图,这个参数务必传true'),
1406
- }))
1407
- .optional()
1408
- .describe(`Array of reference images with character or object names.如果stage_atmosphere中有角色apply_reference_image,那么必须要传这个参数生成分镜图片
1409
-
1410
- 传参示例
1411
- \`\`\`
1412
- {
1413
- "image": "latiao.jpeg",
1414
- "type": "object",
1415
- "name": "卫龙辣条",
1416
- }
1417
- \`\`\`
1418
- `),
1419
- saveToFileNames: zod_1.z
1420
- .array(zod_1.z.string())
1421
- .describe('Array of filenames to save the generated images.'),
1422
- },
1423
- }, async ({ prompts, size, watermark, max_count, referenceImages, saveToFileNames }, context) => {
1424
- try {
1425
- const currentSession = await validateSession('generate-image-series');
1426
- // Validate file names
1427
- const validatedFileNames = saveToFileNames.map(fileName => validateFileName(fileName));
1428
- if (typeof prompts === 'string' && prompts.startsWith('[')) {
1429
- try {
1430
- prompts = JSON.parse(prompts);
1431
- }
1432
- catch (ex) {
1433
- // 解析失败,保持原始字符串
1434
- }
1435
- }
1436
- console.log(`Generating image series with ${Array.isArray(prompts) ? prompts.length : 1} prompts...`);
1437
- // 处理参考图片
1438
- let imageBase64Array;
1439
- let refPrefix = '';
1440
- if (referenceImages && referenceImages.length > 0) {
1441
- imageBase64Array = [];
1442
- const objectPrefix = [];
1443
- let index = 0;
1444
- let hasTurnaround = false;
1445
- for (const refImage of referenceImages) {
1446
- const imagePath = (0, node_path_1.dirname)(refImage.image) !== '.'
1447
- ? refImage.image
1448
- : `./materials/${refImage.image}`;
1449
- // 需要得到当前项目的绝对路径
1450
- const imageFilePath = (0, node_path_1.resolve)(process.env.ZEROCUT_PROJECT_CWD || process.cwd(), projectLocalDir, imagePath);
1451
- try {
1452
- // 直接读取本地文件
1453
- if (!(0, node_fs_1.existsSync)(imageFilePath)) {
1454
- return createErrorResponse(`Reference image not found: ${imageFilePath}`, 'generate-image-series');
1455
- }
1456
- const imageBuffer = await (0, promises_1.readFile)(imageFilePath);
1457
- const fileName = (0, node_path_1.basename)(imagePath);
1458
- const mimeType = fileName.toLowerCase().endsWith('.png')
1459
- ? 'image/png'
1460
- : fileName.toLowerCase().endsWith('.jpg') ||
1461
- fileName.toLowerCase().endsWith('.jpeg')
1462
- ? 'image/jpeg'
1463
- : 'image/png';
1464
- const base64String = `data:${mimeType};base64,${imageBuffer.toString('base64')}`;
1465
- imageBase64Array.push(base64String);
1466
- console.log(`Loaded reference image: ${imagePath} for character: ${refImage.name}`);
1467
- if (refImage.type === 'character') {
1468
- if (refImage.isTurnaround) {
1469
- objectPrefix.push(`[参考图${++index}]是名为"${refImage.name}"的人物角色三视图`);
1470
- hasTurnaround = true;
1471
- }
1472
- else {
1473
- objectPrefix.push(`[参考图${++index}]是名为"${refImage.name}"的人物角色形象`);
1474
- }
1475
- }
1476
- else if (refImage.type === 'object') {
1477
- if (refImage.isTurnaround) {
1478
- objectPrefix.push(`[参考图${++index}]是名为"${refImage.name}"的物件三视图`);
1479
- hasTurnaround = true;
1480
- }
1481
- else {
1482
- objectPrefix.push(`[参考图${++index}]是名为"${refImage.name}"的物件`);
1483
- }
1484
- }
1485
- else if (refImage.type === 'background') {
1486
- objectPrefix.push(`[参考图${++index}]是背景图,描述了"${refImage.name}"`);
1487
- }
1488
- }
1489
- catch (error) {
1490
- console.error(`Failed to load reference image ${imageFilePath} for ${refImage.name}:`, error);
1491
- return createErrorResponse(`Failed to load reference image ${imageFilePath} for ${refImage.name}: ${error}`, 'generate-image-series');
1492
- }
1493
- }
1494
- const turnaroundMessage = hasTurnaround
1495
- ? '\n\n⚠️ 注意**三视图**是指**同一个**人或物体的不同视角合成图,三部分都表示同一个人或物体,只能参考其信息画出**一个人或一个物体**,不要画成多个人或多个物体!\n'
1496
- : '';
1497
- if (objectPrefix.length > 0) {
1498
- refPrefix = `${objectPrefix.join('\n')}${turnaroundMessage}`;
1499
- }
1500
- }
1501
- const ai = currentSession.ai;
1502
- let progress = 0;
1503
- const res = await ai.generateImageSeries({
1504
- prompts,
1505
- refPrefix,
1506
- size,
1507
- watermark,
1508
- image: imageBase64Array,
1509
- max_count,
1510
- onProgress: async (metaData) => {
1511
- // 心跳机制,每分钟调用一次,传递空的 metaData
1512
- try {
1513
- await sendProgress(context, ++progress, undefined, JSON.stringify(metaData));
1514
- }
1515
- catch (progressError) {
1516
- console.warn('Failed to send progress update:', progressError);
1517
- }
1518
- },
1519
- });
1520
- if (!res) {
1521
- throw new Error('Failed to generate image series: no response from AI service');
1522
- }
1523
- if (res.urls && Array.isArray(res.urls)) {
1524
- console.log(`Image series generated successfully (${res.urls.length} images), saving to materials...`);
1525
- const results = [];
1526
- const maxImages = Math.min(res.urls.length, validatedFileNames.length);
1527
- for (let i = 0; i < maxImages; i++) {
1528
- const url = res.urls[i];
1529
- const fileName = validatedFileNames[i];
1530
- try {
1531
- const uri = await saveMaterial(currentSession, url, fileName);
1532
- results.push({
1533
- success: true,
1534
- uri,
1535
- fileName,
1536
- index: i,
1537
- timestamp: new Date().toISOString(),
1538
- });
1539
- }
1540
- catch (error) {
1541
- console.error(`Failed to save image ${i + 1}:`, error);
1542
- results.push({
1543
- success: false,
1544
- error: `Failed to save image ${i + 1}: ${error}`,
1545
- fileName,
1546
- index: i,
1547
- timestamp: new Date().toISOString(),
1548
- });
1549
- }
1550
- }
1551
- const result = {
1552
- success: true,
1553
- prompt: res.prompt,
1554
- totalGenerated: res.urls.length,
1555
- totalSaved: results.filter(r => r.success).length,
1556
- results,
1557
- timestamp: new Date().toISOString(),
1558
- };
1559
- return {
1560
- content: [
1561
- {
1562
- type: 'text',
1563
- text: JSON.stringify(result),
1564
- },
1565
- ],
1566
- };
1567
- }
1568
- else {
1569
- console.warn('Image series generation completed but no URLs returned');
1570
- return {
1571
- content: [
1572
- {
1573
- type: 'text',
1574
- text: JSON.stringify({
1575
- success: false,
1576
- error: 'No image URLs returned from AI service',
1577
- response: res,
1578
- timestamp: new Date().toISOString(),
1579
- }),
1580
- },
1581
- ],
1582
- };
1583
- }
1584
- }
1585
- catch (error) {
1586
- return createErrorResponse(error, 'generate-image-series');
1587
- }
1588
- });
1589
1350
  server.registerTool('edit-image', {
1590
1351
  title: 'Edit Image',
1591
1352
  description: 'Edit the image.',
@@ -1696,7 +1457,11 @@ server.registerTool('generate-video', {
1696
1457
  prompt: zod_1.z
1697
1458
  .string()
1698
1459
  .describe('The prompt to generate. 一般要严格对应 story_board 中当前场景的 video_prompt 字段描述'),
1699
- sceneIndex: zod_1.z.number().min(1).describe('场景索引,从1开始的下标'),
1460
+ sceneIndex: zod_1.z
1461
+ .number()
1462
+ .min(1)
1463
+ .optional()
1464
+ .describe('场景索引,从1开始的下标,如果非场景对应素材,则可不传,场景素材必传'),
1700
1465
  storyBoardFile: zod_1.z
1701
1466
  .string()
1702
1467
  .optional()
@@ -1754,7 +1519,7 @@ server.registerTool('generate-video', {
1754
1519
  // 验证session状态
1755
1520
  const currentSession = await validateSession('generate-video');
1756
1521
  // 校验 prompt 与 story_board.json 中场景设定的一致性以及视频时长与 timeline_analysis.json 中 proposed_video_scenes 的匹配
1757
- if (!skipConsistencyCheck) {
1522
+ if (sceneIndex && !skipConsistencyCheck) {
1758
1523
  try {
1759
1524
  const storyBoardPath = (0, node_path_1.resolve)(process.env.ZEROCUT_PROJECT_CWD || process.cwd(), projectLocalDir, storyBoardFile);
1760
1525
  if ((0, node_fs_1.existsSync)(storyBoardPath)) {
@@ -2453,7 +2218,11 @@ server.registerTool('generate-scene-tts', {
2453
2218
  description: `生成场景配音`,
2454
2219
  inputSchema: {
2455
2220
  text: zod_1.z.string().describe('The text to generate.'),
2456
- sceneIndex: zod_1.z.number().min(1).describe('场景索引,从1开始的下标'),
2221
+ sceneIndex: zod_1.z
2222
+ .number()
2223
+ .min(1)
2224
+ .optional()
2225
+ .describe('场景索引,从1开始的下标,如果非场景对应素材,则可不传,场景素材必传'),
2457
2226
  storyBoardFile: zod_1.z
2458
2227
  .string()
2459
2228
  .optional()
@@ -2523,7 +2292,7 @@ server.registerTool('generate-scene-tts', {
2523
2292
  const finalSpeed = speed ?? 1;
2524
2293
  volume = volume ?? 1;
2525
2294
  // 校验 text 与 story_board.json 中场景设定的一致性
2526
- if (!skipConsistencyCheck) {
2295
+ if (sceneIndex && !skipConsistencyCheck) {
2527
2296
  try {
2528
2297
  const storyBoardPath = (0, node_path_1.resolve)(process.env.ZEROCUT_PROJECT_CWD || process.cwd(), projectLocalDir, storyBoardFile);
2529
2298
  if ((0, node_fs_1.existsSync)(storyBoardPath)) {
@@ -2697,7 +2466,7 @@ server.registerTool('compile-and-run', {
2697
2466
  // 检查字幕内容匹配标记
2698
2467
  if (!checkStoryboardSubtitlesFlag) {
2699
2468
  checkStoryboardSubtitlesFlag = true;
2700
- return createErrorResponse('请先检查字幕文字内容是否与 story_board 中各个场景的 script 或 dialog 内容完全匹配,若不匹配,修改 draft_content 使其匹配,返回检查结果,然后再进行合成', 'compile-and-run');
2469
+ return createErrorResponse('请先检查字幕文字内容是否与 story_board 中各个场景的 script 或 dialog 内容完全一致(允许字幕分段,只要最终文本一致就行),若不匹配,修改 draft_content 使其匹配,返回检查结果,然后再进行合成', 'compile-and-run');
2701
2470
  }
2702
2471
  console.log('Starting video compilation and rendering...');
2703
2472
  // 验证terminal可用性
@@ -3476,7 +3245,7 @@ server.registerTool('build-capcat-draft', {
3476
3245
  });
3477
3246
  server.registerTool('generate-video-by-ref', {
3478
3247
  title: 'Generate Video by Reference Images',
3479
- description: 'Generate video using reference images. Supports sora2 (1 image max), lite and pro (4 images max). Can work without reference images (0 images).',
3248
+ description: 'Generate video using reference images. Supports sora2, veo3.1, veo3.1-pro (1 image max), lite and pro (4 images max), vidu (7 images max). Can work without reference images (0 images).',
3480
3249
  inputSchema: {
3481
3250
  prompt: zod_1.z
3482
3251
  .string()
@@ -3504,7 +3273,7 @@ server.registerTool('generate-video-by-ref', {
3504
3273
  .default(false)
3505
3274
  .describe('Whether to add watermark to the video.'),
3506
3275
  type: zod_1.z
3507
- .enum(['lite', 'pro', 'sora2', 'vidu'])
3276
+ .enum(['lite', 'sora2', 'veo3.1', 'veo3.1-pro', 'vidu'])
3508
3277
  .optional()
3509
3278
  .default('lite')
3510
3279
  .describe('The model type to use. sora2 allows max 1 reference image, lite and pro allow max 4.'),
@@ -3516,18 +3285,122 @@ server.registerTool('generate-video-by-ref', {
3516
3285
  saveToFileName: zod_1.z
3517
3286
  .string()
3518
3287
  .describe('The filename to save the generated video.'),
3288
+ sceneIndex: zod_1.z
3289
+ .number()
3290
+ .min(1)
3291
+ .optional()
3292
+ .describe('场景索引,从1开始的下标,如果非场景对应素材,则可不传,场景素材必传'),
3293
+ storyBoardFile: zod_1.z
3294
+ .string()
3295
+ .optional()
3296
+ .default('story_board.json')
3297
+ .describe('故事板文件路径'),
3298
+ skipConsistencyCheck: zod_1.z
3299
+ .boolean()
3300
+ .optional()
3301
+ .default(false)
3302
+ .describe('是否跳过一致性检查,默认为false(即默认进行一致性检查)'),
3303
+ skipCheckWithSceneReason: zod_1.z
3304
+ .string()
3305
+ .optional()
3306
+ .describe('跳过校验的理由,如果skipConsistencyCheck设为true,必须要传这个参数'),
3307
+ optimizePrompt: zod_1.z
3308
+ .boolean()
3309
+ .optional()
3310
+ .default(false)
3311
+ .describe('Whether to optimize the prompt.'),
3519
3312
  },
3520
- }, async ({ prompt, referenceImages, duration, size, watermark, type, mute, saveToFileName, }, context) => {
3313
+ }, async ({ prompt, referenceImages, duration, size, watermark, type, mute, saveToFileName, sceneIndex, storyBoardFile, skipConsistencyCheck, optimizePrompt, }, context) => {
3521
3314
  try {
3522
3315
  // 验证session状态
3523
3316
  const currentSession = await validateSession('generate-video-by-ref');
3317
+ const storyBoardPath = (0, node_path_1.resolve)(process.env.ZEROCUT_PROJECT_CWD || process.cwd(), projectLocalDir, storyBoardFile);
3318
+ // 检查 storyboard 标志
3319
+ if (!checkStoryboardFlag && (0, node_fs_1.existsSync)(storyBoardPath)) {
3320
+ checkStoryboardFlag = true;
3321
+ return createErrorResponse(`必须先审查生成的 story_board.json 内容,按照如下步骤:
3322
+
3323
+ 1. 确保每个场景中的stage_atmosphere内容按照规则被正确融合到video_prompt中,不得遗漏
3324
+ 2. 如有main_characters设定且包含了reference_image,或有reference_objects,需确保video_prompt描述已包含该场景相关main_characters和所有reference_objects中的物品或背景,并确保参考图具体内容已经在video_prompt中有明确描述,如果没有,可忽略。
3325
+ 3. 如有配音,先自我检查 media_logs 中的查音频时长,确保以匹配音频时长来生成视频
3326
+
3327
+ 检查完上述问题后先汇报,如果有需要,应当先修改 story_board.json 内容,然后再调用 generate-video-by-ref 生成视频。注意修改 story_board 内容时,仅修改相应字段的字符串值,不要破坏JSON格式!
3328
+
3329
+ 再次调用 generate-video-by-ref 时,如需要参考图,要确保referenceImages使用正确(main_characters中的reference_image作为参考人物,reference_objects中的image作为参考物品或参考背景)`, 'generate-image');
3330
+ }
3331
+ // 校验 prompt 与 story_board.json 中场景设定的一致性(如果提供了 sceneIndex)
3332
+ if (!skipConsistencyCheck && sceneIndex) {
3333
+ try {
3334
+ if ((0, node_fs_1.existsSync)(storyBoardPath)) {
3335
+ const storyBoardContent = await (0, promises_1.readFile)(storyBoardPath, 'utf8');
3336
+ // 检查 storyBoard JSON 语法合法性
3337
+ let storyBoard;
3338
+ try {
3339
+ storyBoard = JSON.parse(storyBoardContent);
3340
+ }
3341
+ catch (jsonError) {
3342
+ return createErrorResponse(`storyBoard 文件 ${storyBoardFile} 存在 JSON 语法错误,请修复后重试。错误详情: ${jsonError instanceof Error ? jsonError.message : String(jsonError)}`, 'generate-video-by-ref');
3343
+ }
3344
+ if (storyBoard.scenes && Array.isArray(storyBoard.scenes)) {
3345
+ const scene = storyBoard.scenes[sceneIndex - 1]; // sceneIndex 从1开始,数组从0开始
3346
+ if (scene) {
3347
+ const videoPrompt = scene.video_prompt;
3348
+ if (videoPrompt && prompt !== videoPrompt) {
3349
+ return createErrorResponse('视频提示词必须严格遵照story_board的设定,如果用户明确指出不需要遵守,请将skipConsistencyCheck设置为true后再次调用', 'generate-video-by-ref');
3350
+ }
3351
+ // 检查 scene.is_continuous 是否为 true
3352
+ if (scene.is_continuous === true) {
3353
+ return createErrorResponse('连续镜头应使用首尾帧,请修改连续镜头设置,或将本场景改为首尾帧方式实现', 'generate-video-by-ref');
3354
+ }
3355
+ }
3356
+ else {
3357
+ console.warn(`Scene index ${sceneIndex} not found in story_board.json`);
3358
+ }
3359
+ }
3360
+ }
3361
+ else {
3362
+ console.warn(`Story board file not found: ${storyBoardPath}`);
3363
+ }
3364
+ }
3365
+ catch (error) {
3366
+ console.error('Failed to validate prompt with story board:', error);
3367
+ // 如果读取或解析 story_board.json 失败,继续执行但记录警告
3368
+ }
3369
+ }
3524
3370
  const validatedFileName = validateFileName(saveToFileName);
3371
+ // 优化提示词(如果启用)
3372
+ const ai = currentSession.ai;
3373
+ if (optimizePrompt) {
3374
+ try {
3375
+ const promptOptimizer = await (0, promises_1.readFile)((0, node_path_1.resolve)(__dirname, './prompts/video-prompt-optimizer.md'), 'utf8');
3376
+ const completion = await ai.getCompletions({
3377
+ messages: [
3378
+ {
3379
+ role: 'system',
3380
+ content: promptOptimizer,
3381
+ },
3382
+ {
3383
+ role: 'user',
3384
+ content: prompt.trim(),
3385
+ },
3386
+ ],
3387
+ });
3388
+ const optimizedPrompt = completion.choices[0]?.message?.content;
3389
+ if (optimizedPrompt) {
3390
+ prompt = optimizedPrompt;
3391
+ }
3392
+ }
3393
+ catch (error) {
3394
+ console.error('Failed to optimize prompt:', error);
3395
+ }
3396
+ }
3525
3397
  // 验证参考图数量限制
3526
- if (type === 'sora2' && referenceImages.length > 1) {
3527
- return createErrorResponse('sora2 model only supports maximum 1 reference image', 'generate-video-by-ref');
3398
+ if ((type === 'sora2' || type === 'veo3.1' || type === 'veo3.1-pro') &&
3399
+ referenceImages.length > 1) {
3400
+ return createErrorResponse(`${type} model only supports maximum 1 reference image`, 'generate-video-by-ref');
3528
3401
  }
3529
- if ((type === 'lite' || type === 'pro') && referenceImages.length > 4) {
3530
- return createErrorResponse('lite and pro models only support maximum 4 reference images', 'generate-video-by-ref');
3402
+ if (type === 'lite' && referenceImages.length > 4) {
3403
+ return createErrorResponse('lite model only support maximum 4 reference images', 'generate-video-by-ref');
3531
3404
  }
3532
3405
  if (type === 'vidu' && referenceImages.length > 7) {
3533
3406
  return createErrorResponse('vidu models only support maximum 7 reference images', 'generate-video-by-ref');
@@ -3593,7 +3466,7 @@ server.registerTool('generate-video-by-ref', {
3593
3466
  }
3594
3467
  catch (error) {
3595
3468
  console.error('Error generating video by reference:', error);
3596
- return createErrorResponse(error, 'generate-video-by-ref');
3469
+ return createErrorResponse(error.message, 'generate-video-by-ref');
3597
3470
  }
3598
3471
  });
3599
3472
  server.registerTool('run-ffmpeg-command', {