cerevox 3.0.0-beta.3 → 3.0.0-beta.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -50,7 +50,6 @@ const constants_1 = require("../../utils/constants");
50
50
  const videokit_1 = require("../../utils/videokit");
51
51
  const promises_1 = require("node:fs/promises");
52
52
  const node_path_1 = __importStar(require("node:path"));
53
- const doubao_voices_full_1 = require("./helper/doubao_voices_full");
54
53
  const node_fs_1 = require("node:fs");
55
54
  const coze_1 = require("../../utils/coze");
56
55
  const mp3_duration_1 = __importDefault(require("mp3-duration"));
@@ -94,10 +93,13 @@ function validateFileName(fileName) {
94
93
  if (!fileName || fileName.trim() === '') {
95
94
  throw new Error('File name cannot be empty');
96
95
  }
96
+ if (fileName.startsWith('materials/')) {
97
+ fileName = fileName.replace('materials/', '');
98
+ }
97
99
  if (fileName.includes('..') ||
98
100
  fileName.includes('/') ||
99
101
  fileName.includes('\\')) {
100
- throw new Error('Invalid file name: contains illegal characters');
102
+ throw new Error('Invalid file name: contains illegal characters, cannot contain ".." or "/" or "\\"');
101
103
  }
102
104
  return fileName.trim();
103
105
  }
@@ -130,6 +132,7 @@ async function initProject(session) {
130
132
  return workDir;
131
133
  }
132
134
  async function saveMaterial(session, url, saveToFileName) {
135
+ saveToFileName = validateFileName(saveToFileName);
133
136
  const terminal = session.terminal;
134
137
  const saveToPath = `/home/user/cerevox-zerocut/projects/${terminal.id}/materials/${saveToFileName}`;
135
138
  const saveLocalPath = (0, node_path_1.resolve)(projectLocalDir, 'materials', saveToFileName);
@@ -514,7 +517,7 @@ server.registerTool('project-close', {
514
517
  .min(0)
515
518
  .max(20)
516
519
  .default(5)
517
- .describe('Close the session after the specified number of minutes. Default is 5 minutes. 如果用户要求立即关闭会话,请将该参数设置为0'),
520
+ .describe('Close the session after the specified number of minutes. Default is 5 minutes. 除非用户要求立即关闭会话,将该参数设置为0,否则应默认设为5'),
518
521
  },
519
522
  }, async ({ inMinutes }) => {
520
523
  try {
@@ -663,75 +666,12 @@ server.registerTool('retrieve-rules-context', {
663
666
  return createErrorResponse(`Failed to load rules context prompt for ${prompt}: ${error}`, 'retrieve-rules-context');
664
667
  }
665
668
  });
666
- // 列出项目下的所有文件
667
- server.registerTool('list-project-files', {
668
- title: 'List Project Files',
669
- description: 'List all files in the materials directory.',
670
- inputSchema: {},
671
- }, async () => {
672
- try {
673
- // 验证session状态
674
- const currentSession = await validateSession('list-project-files');
675
- console.log('Listing project files...');
676
- const terminal = currentSession.terminal;
677
- if (!terminal) {
678
- throw new Error('Terminal not available in current session');
679
- }
680
- let cwd;
681
- try {
682
- cwd = await terminal.getCwd();
683
- }
684
- catch (cwdError) {
685
- console.error('Failed to get current working directory:', cwdError);
686
- throw new Error('Failed to get current working directory');
687
- }
688
- console.log(`Current working directory: ${cwd}`);
689
- // 安全地列出各目录文件,失败时返回空数组
690
- const listFilesWithFallback = async (path, dirName) => {
691
- try {
692
- const files = await currentSession.files.listFiles(path);
693
- console.log(`Found ${files?.length || 0} files in ${dirName}`);
694
- return files || [];
695
- }
696
- catch (error) {
697
- console.warn(`Failed to list files in ${dirName} (${path}):`, error);
698
- return [];
699
- }
700
- };
701
- const [rootFiles, materialsFiles, outputFiles] = await Promise.all([
702
- listFilesWithFallback(cwd, 'root'),
703
- listFilesWithFallback(`${cwd}/materials`, 'materials'),
704
- listFilesWithFallback(`${cwd}/output`, 'output'),
705
- ]);
706
- const result = {
707
- success: true,
708
- cwd,
709
- root: rootFiles,
710
- materials: materialsFiles,
711
- output: outputFiles,
712
- totalFiles: rootFiles.length + materialsFiles.length + outputFiles.length,
713
- timestamp: new Date().toISOString(),
714
- };
715
- console.log(`Total files found: ${result.totalFiles}`);
716
- return {
717
- content: [
718
- {
719
- type: 'text',
720
- text: JSON.stringify(result),
721
- },
722
- ],
723
- };
724
- }
725
- catch (error) {
726
- return createErrorResponse(error, 'list-project-files');
727
- }
728
- });
729
669
  server.registerTool('generate-character-image', {
730
670
  title: 'Generate Character Image',
731
671
  description: 'Generate a turnaround image or portrait for any character.',
732
672
  inputSchema: {
733
673
  type: zod_1.z
734
- .enum(['banana', 'banana-pro', 'seedream'])
674
+ .enum(['banana', 'banana-pro', 'seedream', 'seedream-pro'])
735
675
  .optional()
736
676
  .default('banana'),
737
677
  name: zod_1.z.string().describe('The name of the character.'),
@@ -761,7 +701,9 @@ server.registerTool('generate-character-image', {
761
701
  .boolean()
762
702
  .default(true)
763
703
  .describe('是否生成三视图。true: 生成4096x3072的三视图,false: 生成2304x4096的竖版人物正视图'),
764
- saveToFileName: zod_1.z.string().describe('The filename to save.'),
704
+ saveToFileName: zod_1.z
705
+ .string()
706
+ .describe('The filename to save. 应该是png文件'),
765
707
  },
766
708
  }, async ({ type, name, gender, age, appearance, clothing, personality, detail_features, style, saveToFileName, referenceImage, referenceImagePrompt, isTurnaround, }) => {
767
709
  try {
@@ -930,49 +872,6 @@ ${roleDescriptionPrompt}
930
872
  return createErrorResponse(error, 'generate-character-image');
931
873
  }
932
874
  });
933
- server.registerTool('generate-line-sketch', {
934
- title: 'Generate Line Sketch',
935
- description: 'Generate line sketch material based on user prompt.',
936
- inputSchema: {
937
- prompt: zod_1.z.string().describe('The prompt to generate line sketch.'),
938
- saveToFileName: zod_1.z
939
- .string()
940
- .describe('The filename to save the generated line sketch.'),
941
- },
942
- }, async ({ prompt, saveToFileName }) => {
943
- try {
944
- // 验证session状态
945
- await validateSession('generate-line-sketch');
946
- // 验证文件名
947
- validateFileName(saveToFileName);
948
- // 调用AI生成线稿
949
- const res = await session.ai.generateLineSketch({ prompt });
950
- if (res && res.url) {
951
- // 保存到本地
952
- await saveMaterial(session, res.url, saveToFileName);
953
- const result = {
954
- success: true,
955
- url: res.url,
956
- localPath: getMaterialUri(session, saveToFileName),
957
- timestamp: new Date().toISOString(),
958
- };
959
- return {
960
- content: [
961
- {
962
- type: 'text',
963
- text: JSON.stringify(result),
964
- },
965
- ],
966
- };
967
- }
968
- else {
969
- throw new Error('No URL returned from AI service');
970
- }
971
- }
972
- catch (error) {
973
- return createErrorResponse(error, 'generate-line-sketch');
974
- }
975
- });
976
875
  server.registerTool('upload-custom-material', {
977
876
  title: 'Upload Custom Material',
978
877
  description: 'Upload material files (images: jpeg/png, videos: mp4, audio: mp3) from the local filesystem to the materials directory. For video and audio files, duration information will be included in the response.',
@@ -1061,15 +960,21 @@ server.registerTool('upload-custom-material', {
1061
960
  });
1062
961
  server.registerTool('generate-image', {
1063
962
  title: 'Generate Image',
1064
- description: `生成图片`,
963
+ description: `生成图片,支持批量生成1-4张图`,
1065
964
  inputSchema: {
1066
965
  type: zod_1.z
1067
- .enum(['banana', 'banana-pro', 'seedream'])
966
+ .enum([
967
+ 'banana',
968
+ 'banana-pro',
969
+ 'seedream',
970
+ 'seedream-pro',
971
+ 'line-sketch',
972
+ ])
1068
973
  .optional()
1069
974
  .default('seedream'),
1070
975
  prompt: zod_1.z
1071
976
  .string()
1072
- .describe('The prompt to generate. 一般要严格对应 storyboard 中当前场景的 start_frame 或 end_frame 中的字段描述'),
977
+ .describe('The prompt to generate. 一般要严格对应 storyboard 中当前场景的 start_frame 或 end_frame 中的字段描述,如果是生成线稿,则 type 使用 line-sketch'),
1073
978
  sceneIndex: zod_1.z
1074
979
  .number()
1075
980
  .min(1)
@@ -1124,17 +1029,21 @@ server.registerTool('generate-image', {
1124
1029
  ])
1125
1030
  .default('720x1280')
1126
1031
  .describe('The size of the image.'),
1127
- saveToFileName: zod_1.z.string().describe('The filename to save.'),
1032
+ imageCount: zod_1.z
1033
+ .number()
1034
+ .min(1)
1035
+ .max(4)
1036
+ .optional()
1037
+ .default(1)
1038
+ .describe('The number of images to generate. 暂时最多支持4张图,多了容易超时'),
1039
+ saveToFileNames: zod_1.z
1040
+ .array(zod_1.z.string())
1041
+ .describe('The filenames to save. 数量要和imageCount对应,应该是png文件'),
1128
1042
  watermark: zod_1.z
1129
1043
  .boolean()
1130
1044
  .optional()
1131
1045
  .default(false)
1132
1046
  .describe('Whether to add watermark to the image.'),
1133
- optimizePrompt: zod_1.z
1134
- .boolean()
1135
- .optional()
1136
- .default(false)
1137
- .describe('Whether to optimize the prompt.'),
1138
1047
  referenceImages: zod_1.z
1139
1048
  .array(zod_1.z.object({
1140
1049
  image: zod_1.z.string().describe('Local image file path'),
@@ -1163,7 +1072,7 @@ server.registerTool('generate-image', {
1163
1072
  \`\`\`
1164
1073
  `),
1165
1074
  },
1166
- }, async ({ type = 'seedream', prompt, sceneIndex, storyBoardFile = 'storyboard.json', skipConsistencyCheck = false, size = '720x1280', saveToFileName, watermark, referenceImages, optimizePrompt, }) => {
1075
+ }, async ({ type = 'seedream', prompt, sceneIndex, storyBoardFile = 'storyboard.json', skipConsistencyCheck = false, size = '720x1280', imageCount = 1, saveToFileNames, watermark, referenceImages, }) => {
1167
1076
  try {
1168
1077
  // 验证session状态
1169
1078
  const currentSession = await validateSession('generate-image');
@@ -1173,7 +1082,6 @@ server.registerTool('generate-image', {
1173
1082
  checkStoryboardFlag = true;
1174
1083
  return createErrorResponse('必须先审查生成的 storyboard.json 内容,确保每个场景中的stage_atmosphere内容按照规则被正确融合到start_frame和video_prompt中,不得遗漏,检查完成后先汇报,如果有问题,应当先修改 storyboard.json 内容,然后再调用 generate-image 生成图片。注意修改 storyboard 内容时,仅修改相应字段的字符串值,不要破坏JSON格式!', 'generate-image');
1175
1084
  }
1176
- const validatedFileName = validateFileName(saveToFileName);
1177
1085
  // 校验 prompt 与 storyboard.json 中场景设定的一致性
1178
1086
  if (sceneIndex && !skipConsistencyCheck) {
1179
1087
  try {
@@ -1257,52 +1165,73 @@ server.registerTool('generate-image', {
1257
1165
  // 检查并替换英文单引号包裹的中文内容为中文双引号
1258
1166
  // 这样才能让 seedream 生成更好的中文文字
1259
1167
  let processedPrompt = prompt.replace(/'([^']*[\u4e00-\u9fff][^']*)'/g, '“$1”');
1260
- if (optimizePrompt) {
1261
- try {
1262
- const ai = currentSession.ai;
1263
- const promptOptimizer = await (0, promises_1.readFile)((0, node_path_1.resolve)(__dirname, './prompts/image-prompt-optimizer.md'), 'utf8');
1264
- const completion = await ai.getCompletions({
1265
- model: 'Doubao-Seed-1.6-flash',
1266
- messages: [
1267
- {
1268
- role: 'system',
1269
- content: promptOptimizer,
1168
+ try {
1169
+ const ai = currentSession.ai;
1170
+ const promptOptimizer = await (0, promises_1.readFile)((0, node_path_1.resolve)(__dirname, './prompts/image-prompt-optimizer.md'), 'utf8');
1171
+ const schema = {
1172
+ name: 'optimize_image_prompt',
1173
+ schema: {
1174
+ type: 'object',
1175
+ properties: {
1176
+ prompt_optimized: {
1177
+ type: 'string',
1178
+ description: '优化后的提示词',
1270
1179
  },
1271
- {
1272
- role: 'user',
1273
- content: processedPrompt.trim(),
1180
+ metaphor_modifiers: {
1181
+ type: 'array',
1182
+ description: '从 prompt_optimized 中抽取的所有比喻修饰词(字符串数组)',
1183
+ items: {
1184
+ type: 'string',
1185
+ description: '比喻性修饰词,例如 “如羽毛般轻盈”、“像晨雾一样柔和”',
1186
+ },
1274
1187
  },
1275
- ],
1276
- });
1277
- let optimizedPrompt = completion.choices[0]?.message?.content.trim();
1278
- if (optimizedPrompt) {
1279
- if (optimizedPrompt.startsWith('```json')) {
1280
- // 提取 JSON 代码块中的内容
1281
- const jsonMatch = optimizedPrompt.match(/```json\s*([\s\S]*?)\s*```/);
1282
- if (jsonMatch && jsonMatch[1]) {
1283
- optimizedPrompt = jsonMatch[1];
1284
- }
1285
- }
1286
- if (optimizedPrompt.startsWith('{')) {
1287
- try {
1288
- const { prompt_optimized, metaphor_modifiers } = JSON.parse(optimizedPrompt);
1289
- processedPrompt = `${prompt_optimized}`;
1290
- if (metaphor_modifiers?.length) {
1291
- processedPrompt += `\n\n注意:下面这些是形象比喻,并不是输出内容。\n${metaphor_modifiers}`;
1292
- }
1293
- }
1294
- catch (ex) {
1295
- processedPrompt = optimizedPrompt;
1296
- }
1297
- }
1298
- else {
1299
- processedPrompt = optimizedPrompt;
1188
+ },
1189
+ required: ['prompt_optimized', 'metaphor_modifiers'],
1190
+ },
1191
+ };
1192
+ const completion = await ai.getCompletions({
1193
+ model: 'Doubao-Seed-1.6',
1194
+ messages: [
1195
+ {
1196
+ role: 'system',
1197
+ content: promptOptimizer,
1198
+ },
1199
+ {
1200
+ role: 'user',
1201
+ content: `## 用户指令
1202
+
1203
+ ${processedPrompt.trim()}
1204
+
1205
+ ## 参考图
1206
+
1207
+ ${referenceImages?.map((ref, index) => `图${index + 1}:${ref.image}`).join('\n') || '无'}`,
1208
+ },
1209
+ ],
1210
+ response_format: {
1211
+ type: 'json_schema',
1212
+ json_schema: schema,
1213
+ },
1214
+ });
1215
+ const optimizedPrompt = completion.choices[0]?.message?.content.trim();
1216
+ if (optimizedPrompt) {
1217
+ try {
1218
+ const { prompt_optimized, metaphor_modifiers } = JSON.parse(optimizedPrompt);
1219
+ processedPrompt = `${prompt_optimized}`;
1220
+ if (metaphor_modifiers?.length) {
1221
+ processedPrompt += `\n\n注意:下面这些是形象比喻,并不是输出内容。\n${metaphor_modifiers}`;
1300
1222
  }
1301
1223
  }
1224
+ catch (ex) {
1225
+ console.error('Failed to parse optimized prompt:', ex);
1226
+ processedPrompt = optimizedPrompt;
1227
+ }
1302
1228
  }
1303
- catch (error) {
1304
- console.error('Failed to optimize prompt:', error);
1305
- }
1229
+ }
1230
+ catch (error) {
1231
+ console.error('Failed to optimize prompt:', error);
1232
+ }
1233
+ if (imageCount > 1) {
1234
+ processedPrompt = `$请生成${imageCount}张相关图片 ${processedPrompt}`;
1306
1235
  }
1307
1236
  console.log(`Generating image with prompt: ${processedPrompt.substring(0, 100)}...`);
1308
1237
  // 处理参考图片
@@ -1385,13 +1314,25 @@ ${processedPrompt}`.trim();
1385
1314
  if (!res) {
1386
1315
  throw new Error('Failed to generate image: no response from AI service');
1387
1316
  }
1388
- if (res.url) {
1317
+ if (res.urls && res.urls.length > 0) {
1389
1318
  console.log('Image generated successfully, saving to materials...');
1390
- const uri = await saveMaterial(currentSession, res.url, validatedFileName);
1319
+ let uris = [];
1320
+ if (res.urls.length === 1) {
1321
+ uris = [
1322
+ await saveMaterial(currentSession, res.urls[0], validateFileName(saveToFileNames[0])),
1323
+ ];
1324
+ }
1325
+ else {
1326
+ // 多图场景
1327
+ uris = await Promise.all(res.urls.map((url, i) => {
1328
+ const fileName = validateFileName(saveToFileNames[i]);
1329
+ return saveMaterial(currentSession, url, fileName);
1330
+ }));
1331
+ }
1391
1332
  const result = {
1392
1333
  success: true,
1393
1334
  // source: res.url,
1394
- uri,
1335
+ uris,
1395
1336
  prompt: processedPrompt,
1396
1337
  size,
1397
1338
  timestamp: new Date().toISOString(),
@@ -1432,12 +1373,14 @@ server.registerTool('edit-image', {
1432
1373
  inputSchema: {
1433
1374
  prompt: zod_1.z.string().describe('要编辑图片的中文提示词'),
1434
1375
  type: zod_1.z
1435
- .enum(['banana-pro', 'banana', 'seedream'])
1376
+ .enum(['banana-pro', 'banana', 'seedream', 'seedream-pro'])
1436
1377
  .optional()
1437
1378
  .default('seedream')
1438
1379
  .describe('The type of image model to use.'),
1439
1380
  sourceImageFileName: zod_1.z.string().describe('The source image file name.'),
1440
- saveToFileName: zod_1.z.string().describe('The filename to save.'),
1381
+ saveToFileName: zod_1.z
1382
+ .string()
1383
+ .describe('The filename to save. 应该是png文件'),
1441
1384
  size: zod_1.z
1442
1385
  .enum([
1443
1386
  '1024x1024',
@@ -1560,7 +1503,7 @@ server.registerTool('edit-image', {
1560
1503
  let lastEffect = '';
1561
1504
  server.registerTool('generate-video', {
1562
1505
  title: 'Generate Video',
1563
- description: `生成视频`,
1506
+ description: `图生视频和首尾帧生视频工具`,
1564
1507
  inputSchema: {
1565
1508
  prompt: zod_1.z
1566
1509
  .string()
@@ -1592,6 +1535,8 @@ server.registerTool('generate-video', {
1592
1535
  'hailuo-fast',
1593
1536
  'vidu',
1594
1537
  'vidu-pro',
1538
+ 'vidu-uc',
1539
+ 'vidu-uc-pro',
1595
1540
  'kling',
1596
1541
  'kling-pro',
1597
1542
  'pixv',
@@ -1601,7 +1546,9 @@ server.registerTool('generate-video', {
1601
1546
  ])
1602
1547
  .default('lite')
1603
1548
  .describe('除非用户明确提出使用其他模型,否则一律用lite模型;zero 系列模型适合创作8-23秒带故事情节的短片'),
1604
- saveToFileName: zod_1.z.string().describe('The filename to save.'),
1549
+ saveToFileName: zod_1.z
1550
+ .string()
1551
+ .describe('The filename to save. 应该是mp4文件'),
1605
1552
  start_frame: zod_1.z
1606
1553
  .string()
1607
1554
  .optional()
@@ -2008,7 +1955,7 @@ server.registerTool('generate-video', {
2008
1955
  console.warn('Failed to send progress update:', progressError);
2009
1956
  }
2010
1957
  },
2011
- waitForFinish: true,
1958
+ waitForFinish: type !== 'zero',
2012
1959
  });
2013
1960
  if (!res) {
2014
1961
  throw new Error('Failed to generate video: no response from AI service');
@@ -2056,7 +2003,7 @@ server.registerTool('generate-video', {
2056
2003
  type: 'text',
2057
2004
  text: JSON.stringify({
2058
2005
  success: true,
2059
- message: '该视频生成任务正在运行中,它是异步任务,且执行时间较长,你应立即调用工具 wait-for-task-finish 来等待任务结束,如该工具调用超时,你应立即再次重新调用直到任务结束。',
2006
+ message: '该视频生成任务正在运行中,它是异步任务,且执行时间较长,你应立即调用工具 wait-for-task-finish 来等待任务结束,如 wait-for-task-finish 工具调用超时,你应立即再次重新调用直到任务结束。',
2060
2007
  taskUrl: res.taskUrl,
2061
2008
  }),
2062
2009
  },
@@ -2086,7 +2033,7 @@ server.registerTool('generate-video', {
2086
2033
  }
2087
2034
  });
2088
2035
  server.registerTool('wait-for-task-finish', {
2089
- title: 'Wait Workflow or VideoTask Done',
2036
+ title: 'Wait Workflow or VideoTask Done;只有正在运行Coze工作流或者有异步生成视频任务时才需要执行这个工具',
2090
2037
  description: 'Wait for a workflow to complete.',
2091
2038
  inputSchema: {
2092
2039
  taskUrl: zod_1.z
@@ -2094,7 +2041,7 @@ server.registerTool('wait-for-task-finish', {
2094
2041
  .describe('The taskUrl of the video task to wait for.'),
2095
2042
  saveToFileName: zod_1.z
2096
2043
  .string()
2097
- .describe('The file name to save the video to.'),
2044
+ .describe('The file name to save the video to. 应该是mp4文件'),
2098
2045
  },
2099
2046
  }, async ({ taskUrl, saveToFileName }, context) => {
2100
2047
  try {
@@ -2169,7 +2116,7 @@ server.registerTool('generate-sound-effect', {
2169
2116
  .describe('The duration of the sound which will be generated in seconds. Must be at least 0.5 and at most 30. If set to None we will guess the optimal duration using the prompt. Defaults to None.'),
2170
2117
  saveToFileName: zod_1.z
2171
2118
  .string()
2172
- .describe('The filename to save. The mime type is audio/mpeg (mp3).'),
2119
+ .describe('The filename to save. 应该是mp3文件'),
2173
2120
  },
2174
2121
  }, async ({ prompt_in_english, loop, saveToFileName, duration_seconds }) => {
2175
2122
  try {
@@ -2216,14 +2163,32 @@ server.registerTool('generate-sound-effect', {
2216
2163
  return createErrorResponse(error, 'generate-sound-effect');
2217
2164
  }
2218
2165
  });
2219
- server.registerTool('generate-music', {
2220
- title: 'Generate Music',
2221
- description: 'Generate the music. Include background music or song.',
2166
+ server.registerTool('generate-music-or-mv', {
2167
+ title: '创作音乐(Music)或音乐视频(Music Video)',
2168
+ description: '生成音乐,包括MV(music video)、BGM 歌曲',
2222
2169
  inputSchema: {
2223
2170
  prompt: zod_1.z.string().describe('The prompt to generate.'),
2171
+ singerPhoto: zod_1.z
2172
+ .string()
2173
+ .optional()
2174
+ .describe('The singer photo to use. 只有type为music_video的时候才生效,也可以不传,模型会自动生成'),
2175
+ mvOrientation: zod_1.z
2176
+ .enum(['portrait', 'landscape'])
2177
+ .optional()
2178
+ .describe('The orientation of the music video. Defaults to portrait.')
2179
+ .default('portrait'),
2180
+ mvOriginalSong: zod_1.z
2181
+ .string()
2182
+ .optional()
2183
+ .describe('用于生成mv的音乐. 只有type为music_video的时候才生效,也可以不传,模型会自动创作'),
2184
+ mvGenSubtitles: zod_1.z
2185
+ .boolean()
2186
+ .optional()
2187
+ .default(false)
2188
+ .describe('是否生成mv的字幕. 默认为false,只有type为music_video的时候才生效'),
2224
2189
  type: zod_1.z
2225
- .enum(['bgm', 'song'])
2226
- .describe('The type of music. Defaults to background music.')
2190
+ .enum(['bgm', 'song', 'music_video'])
2191
+ .describe('The type of music. Defaults to BGM. ⚠️ 如果 type 是 music_video,会直接生成音频和视频,**不需要**额外专门生成歌曲')
2227
2192
  .default('bgm'),
2228
2193
  model: zod_1.z
2229
2194
  .enum(['doubao', 'minimax'])
@@ -2239,9 +2204,11 @@ server.registerTool('generate-music', {
2239
2204
  .boolean()
2240
2205
  .default(false)
2241
2206
  .describe('Whether to skip copyright check.'),
2242
- saveToFileName: zod_1.z.string().describe('The filename to save.'),
2207
+ saveToFileName: zod_1.z
2208
+ .string()
2209
+ .describe('The filename to save. 如果type是music video,应该是mp4文件,否则应该是mp3文件'),
2243
2210
  },
2244
- }, async ({ prompt, type, model, duration, skipCopyCheck, saveToFileName }, context) => {
2211
+ }, async ({ prompt, singerPhoto, mvOrientation, mvOriginalSong, mvGenSubtitles, type, model, duration, skipCopyCheck, saveToFileName, }, context) => {
2245
2212
  try {
2246
2213
  // 验证session状态
2247
2214
  const currentSession = await validateSession('generate-music');
@@ -2252,24 +2219,54 @@ server.registerTool('generate-music', {
2252
2219
  if (type === 'bgm' && duration > 120) {
2253
2220
  throw new Error('BGM duration must be at most 120 seconds.');
2254
2221
  }
2255
- const finalPrompt = `${prompt.trim()} ${type === 'bgm' ? `纯音乐无歌词,时长${duration}秒` : `时长${duration}秒,使用${model}模型`}`;
2256
- const res = await ai.generateMusic({
2257
- prompt: finalPrompt,
2258
- skipCopyCheck,
2259
- onProgress: async (metaData) => {
2260
- try {
2261
- await sendProgress(context, metaData.Result?.Progress ?? ++progress, metaData.Result?.Progress ? 100 : undefined, JSON.stringify(metaData));
2262
- }
2263
- catch (progressError) {
2264
- console.warn('Failed to send progress update:', progressError);
2265
- }
2266
- },
2267
- });
2222
+ let res;
2223
+ if (type === 'music_video') {
2224
+ const singer_photo = singerPhoto
2225
+ ? await getMaterialUri(currentSession, singerPhoto)
2226
+ : undefined;
2227
+ const original_song = mvOriginalSong
2228
+ ? await getMaterialUri(currentSession, mvOriginalSong)
2229
+ : undefined;
2230
+ res = await ai.generateZeroCutMusicVideo({
2231
+ // prompt: `${prompt.trim()} 音乐时长${duration}秒`,
2232
+ prompt,
2233
+ singerPhoto: singer_photo,
2234
+ orientation: mvOrientation,
2235
+ genSubtitles: mvGenSubtitles,
2236
+ originalSong: original_song,
2237
+ duration,
2238
+ resolution: '720p',
2239
+ onProgress: async (metaData) => {
2240
+ try {
2241
+ await sendProgress(context, metaData.Result?.Progress ?? ++progress, metaData.Result?.Progress ? 100 : undefined, JSON.stringify(metaData));
2242
+ }
2243
+ catch (progressError) {
2244
+ console.warn('Failed to send progress update:', progressError);
2245
+ }
2246
+ },
2247
+ waitForFinish: false,
2248
+ });
2249
+ }
2250
+ else {
2251
+ const finalPrompt = `${prompt.trim()} ${type === 'bgm' ? `纯音乐无歌词,时长${duration}秒` : `时长${duration}秒,使用${model}模型`}`;
2252
+ res = await ai.generateMusic({
2253
+ prompt: finalPrompt,
2254
+ skipCopyCheck,
2255
+ onProgress: async (metaData) => {
2256
+ try {
2257
+ await sendProgress(context, metaData.Result?.Progress ?? ++progress, metaData.Result?.Progress ? 100 : undefined, JSON.stringify(metaData));
2258
+ }
2259
+ catch (progressError) {
2260
+ console.warn('Failed to send progress update:', progressError);
2261
+ }
2262
+ },
2263
+ });
2264
+ }
2268
2265
  if (!res) {
2269
- throw new Error('Failed to generate BGM: no response from AI service');
2266
+ throw new Error('Failed to generate Music: no response from AI service');
2270
2267
  }
2271
2268
  if (res.url) {
2272
- console.log('BGM generated successfully, saving to materials...');
2269
+ console.log('Music generated successfully, saving to materials...');
2273
2270
  const uri = await saveMaterial(currentSession, res.url, validatedFileName);
2274
2271
  const { url, duration: bgmDuration, captions, ...opts } = res;
2275
2272
  // 保存captions到本地
@@ -2307,15 +2304,29 @@ server.registerTool('generate-music', {
2307
2304
  ],
2308
2305
  };
2309
2306
  }
2307
+ else if (res.taskUrl) {
2308
+ return {
2309
+ content: [
2310
+ {
2311
+ type: 'text',
2312
+ text: JSON.stringify({
2313
+ success: true,
2314
+ message: '该视频生成任务正在运行中,它是异步任务,且执行时间较长,你应立即调用工具 wait-for-task-finish 来等待任务结束,如如 wait-for-task-finish 工具调用超时,你应立即再次重新调用直到任务结束。',
2315
+ taskUrl: res.taskUrl,
2316
+ }),
2317
+ },
2318
+ ],
2319
+ };
2320
+ }
2310
2321
  else {
2311
- console.warn('BGM generation completed but no URL returned');
2322
+ console.warn('Music generation completed but no URL returned');
2312
2323
  return {
2313
2324
  content: [
2314
2325
  {
2315
2326
  type: 'text',
2316
2327
  text: JSON.stringify({
2317
2328
  success: false,
2318
- error: 'No BGM URL returned from AI service',
2329
+ error: 'No Music URL returned from AI service',
2319
2330
  response: res,
2320
2331
  timestamp: new Date().toISOString(),
2321
2332
  }),
@@ -2325,7 +2336,7 @@ server.registerTool('generate-music', {
2325
2336
  }
2326
2337
  }
2327
2338
  catch (error) {
2328
- return createErrorResponse(error, 'generate-bgm');
2339
+ return createErrorResponse(error, 'generate-music');
2329
2340
  }
2330
2341
  });
2331
2342
  server.registerTool('generate-scene-tts', {
@@ -2352,7 +2363,9 @@ server.registerTool('generate-scene-tts', {
2352
2363
  .string()
2353
2364
  .optional()
2354
2365
  .describe('跳过校验的理由,如果skipConsistencyCheck设为true,必须要传这个参数'),
2355
- saveToFileName: zod_1.z.string().describe('The filename to save.'),
2366
+ saveToFileName: zod_1.z
2367
+ .string()
2368
+ .describe('The filename to save. 应该是mp3文件'),
2356
2369
  speed: zod_1.z
2357
2370
  .number()
2358
2371
  .min(0.5)
@@ -2374,44 +2387,44 @@ server.registerTool('generate-scene-tts', {
2374
2387
  .optional()
2375
2388
  .default(1.0)
2376
2389
  .describe('The volume of the tts.'),
2377
- emotion: zod_1.z
2378
- .enum([
2379
- 'storytelling',
2380
- 'neutral',
2381
- 'excited',
2382
- 'coldness',
2383
- 'angry',
2384
- 'sad',
2385
- 'happy',
2386
- 'surprised',
2387
- 'fear',
2388
- 'depressed',
2389
- 'lovey-dovey',
2390
- 'shy',
2391
- 'comfort',
2392
- 'tension',
2393
- 'tender',
2394
- 'magnetic',
2395
- 'vocal - fry',
2396
- 'ASMR',
2397
- ])
2398
- .optional(),
2399
2390
  voiceID: zod_1.z
2400
2391
  .string()
2401
- .describe(`适合作为视频配音的音色ID,除非用户指定,否则你必须已通过 search_voice 工具检查确定该音色确实是存在的。`),
2392
+ .describe(`适合作为视频配音的音色ID,除非用户指定,否则你必须确保已通过 pick-voice 工具挑选出真实存在的音色。`),
2393
+ context_texts: zod_1.z
2394
+ .array(zod_1.z.string())
2395
+ .default([])
2396
+ .describe(`语音合成的辅助信息,用于模型对话式合成,能更好的体现语音情感
2397
+
2398
+ 可以探索,比如常见示例有以下几种:
2399
+
2400
+ 1. 语速调整
2401
+ - context_texts: ["你可以说慢一点吗?"]
2402
+ 2. 情绪/语气调整
2403
+ - context_texts=["你可以用特别特别痛心的语气说话吗?"]
2404
+ - context_texts=["嗯,你的语气再欢乐一点"]
2405
+ 3. 音量调整
2406
+ - context_texts=["你嗓门再小点。"]
2407
+ 4. 音感调整
2408
+ - context_texts=["你能用骄傲的语气来说话吗?"]
2409
+ `),
2402
2410
  explicit_language: zod_1.z.enum(['zh', 'en', 'ja']).optional().default('zh'),
2403
2411
  },
2404
- }, async ({ text, sceneIndex, storyBoardFile, skipConsistencyCheck, voiceID, saveToFileName, speed, pitch, volume, emotion, explicit_language, }) => {
2412
+ }, async ({ text, sceneIndex, storyBoardFile, skipConsistencyCheck, voiceID, saveToFileName, speed, pitch, volume, context_texts, explicit_language, }) => {
2405
2413
  try {
2406
2414
  // 验证session状态
2407
2415
  const currentSession = await validateSession('generate-scene-tts');
2408
2416
  const validatedFileName = validateFileName(saveToFileName);
2409
2417
  const finalSpeed = speed ?? 1;
2410
2418
  volume = volume ?? 1;
2419
+ const ai = currentSession.ai;
2411
2420
  let scene = null;
2412
2421
  // 校验 text 与 storyboard.json 中场景设定的一致性
2413
2422
  if (sceneIndex && !skipConsistencyCheck) {
2414
2423
  try {
2424
+ const voice = (await ai.listVoices()).find(v => v.id === voiceID);
2425
+ if (!voice) {
2426
+ return createErrorResponse(`Voice ${voiceID} not found in voice-list. Use pick-voice tool to pick an available voice. 若用户坚持要使用该音色,需跳过一致性检查。`, 'generate-scene-tts');
2427
+ }
2415
2428
  const storyBoardPath = (0, node_path_1.resolve)(process.env.ZEROCUT_PROJECT_CWD || process.cwd(), projectLocalDir, storyBoardFile);
2416
2429
  if ((0, node_fs_1.existsSync)(storyBoardPath)) {
2417
2430
  const storyBoardContent = await (0, promises_1.readFile)(storyBoardPath, 'utf8');
@@ -2462,9 +2475,8 @@ server.registerTool('generate-scene-tts', {
2462
2475
  }
2463
2476
  }
2464
2477
  console.log(`Generating TTS with voice: ${voiceID}, speed: ${finalSpeed}, text: ${text.substring(0, 100)}...`);
2465
- const ai = currentSession.ai;
2466
2478
  if (voiceID.startsWith('BV0')) {
2467
- throw new Error(`BV0* 系列音色已弃用,你必须已通过 search_voice 工具检查确定该音色确实是存在的。`);
2479
+ throw new Error(`BV0* 系列音色已弃用,你必须通过 pick-voice 工具挑选一个真实存在的音色。`);
2468
2480
  }
2469
2481
  const type = voiceID.startsWith('zh_') ||
2470
2482
  voiceID.startsWith('en_') ||
@@ -2474,30 +2486,80 @@ server.registerTool('generate-scene-tts', {
2474
2486
  ? 'volcano'
2475
2487
  : 'minimax';
2476
2488
  let res;
2489
+ let emotion = 'auto';
2477
2490
  if (type === 'volcano') {
2478
- const voice = doubao_voices_full_1.doubaoVoicesFull.find(v => v.voiceID === voiceID);
2479
- if (!voice) {
2480
- return createErrorResponse(`Voice ${voiceID} not found in Doubao voices. Use search-voices tool to find available voices.`, 'generate-scene-tts');
2481
- }
2482
- const emotions = voice.emotions || [];
2483
- if (emotion && !emotions.includes(emotion)) {
2484
- emotion = 'neutral';
2485
- }
2486
- // 修复可能的 emotion 错误情况
2487
- emotion = emotion || 'neutral';
2488
2491
  volume = Math.max(Math.min(volume, 2.0), 0.5);
2489
2492
  res = await ai.textToSpeechVolc({
2490
2493
  text: text.trim(),
2491
2494
  speaker: voiceID,
2492
2495
  speed: Math.floor(100 * (finalSpeed - 1)),
2493
2496
  volume: Math.floor(100 * (volume - 1)),
2494
- emotion,
2497
+ context_texts,
2495
2498
  explicit_language,
2496
2499
  voice_to_caption: explicit_language === 'zh' || explicit_language === 'en',
2497
2500
  });
2498
2501
  }
2499
2502
  else {
2500
- emotion = emotion || 'neutral';
2503
+ emotion = 'neutral';
2504
+ if (context_texts.length > 0) {
2505
+ const prompt = `根据用户输入语音内容和上下文内容,从文字判断语音合理的情感,然后选择以下情感**之一**返回结果:
2506
+
2507
+ "happy", "sad", "angry", "fearful", "disgusted", "surprised", "calm", "fluent", "whisper", "neutral"
2508
+
2509
+ ## 要求
2510
+ 输出 JSON 格式,包含一个 emotion 字段,值为以上情感之一。
2511
+ `;
2512
+ const schema = {
2513
+ name: 'emotion_schema',
2514
+ schema: {
2515
+ type: 'object',
2516
+ properties: {
2517
+ emotion: {
2518
+ type: 'string',
2519
+ enum: [
2520
+ 'neutral',
2521
+ 'happy',
2522
+ 'sad',
2523
+ 'angry',
2524
+ 'fearful',
2525
+ 'disgusted',
2526
+ 'surprised',
2527
+ 'calm',
2528
+ 'fluent',
2529
+ 'whisper',
2530
+ ],
2531
+ description: '用户输入语音的情感',
2532
+ },
2533
+ },
2534
+ required: ['emotion'],
2535
+ },
2536
+ };
2537
+ const payload = {
2538
+ model: 'Doubao-Seed-1.6',
2539
+ messages: [
2540
+ {
2541
+ role: 'system',
2542
+ content: prompt,
2543
+ },
2544
+ {
2545
+ role: 'user',
2546
+ content: `## 语音内容:
2547
+ ${text.trim()}
2548
+
2549
+ ## 语音上下文
2550
+ ${context_texts.join('\n')}
2551
+ `,
2552
+ },
2553
+ ],
2554
+ response_format: {
2555
+ type: 'json_schema',
2556
+ json_schema: schema,
2557
+ },
2558
+ };
2559
+ const completion = await ai.getCompletions(payload);
2560
+ const emotionObj = JSON.parse(completion.choices[0]?.message?.content ?? '{}');
2561
+ emotion = emotionObj.emotion ?? 'neutral';
2562
+ }
2501
2563
  res = await ai.textToSpeech({
2502
2564
  text: text.trim(),
2503
2565
  voiceName: voiceID,
@@ -2538,6 +2600,8 @@ server.registerTool('generate-scene-tts', {
2538
2600
  uri,
2539
2601
  durationMs: Math.floor((duration || 0) * 1000),
2540
2602
  text,
2603
+ emotion,
2604
+ context_texts,
2541
2605
  voiceName: voiceID,
2542
2606
  speed: finalSpeed,
2543
2607
  timestamp: new Date().toISOString(),
@@ -2567,7 +2631,7 @@ server.registerTool('generate-scene-tts', {
2567
2631
  type: 'text',
2568
2632
  text: JSON.stringify({
2569
2633
  success: false,
2570
- error: 'No TTS URL returned from AI service. You should use search-voices tool to find available voices.',
2634
+ error: 'No TTS URL returned from AI service. You should use pick-voice tool to pick an available voice.',
2571
2635
  response: res,
2572
2636
  timestamp: new Date().toISOString(),
2573
2637
  }),
@@ -2800,111 +2864,71 @@ server.registerTool('get-schema', {
2800
2864
  return createErrorResponse(error, 'get-schema');
2801
2865
  }
2802
2866
  });
2803
- server.registerTool('search-voices', {
2804
- title: 'Search Voices',
2805
- description: 'Search voices from doubao_voices_full based on scenes, emotions, languages, and gender.',
2867
+ server.registerTool('pick-voice', {
2868
+ title: 'Pick Voice',
2869
+ description: '根据用户需求,选择尽可能符合要求的语音,在合适的情况下,优先采用 volcano_tts_2 类型的语音',
2806
2870
  inputSchema: {
2807
- scenes: zod_1.z
2808
- .array(zod_1.z.enum([
2809
- 'asmr',
2810
- 'audiobook',
2811
- 'customer_service',
2812
- 'dialect_fun',
2813
- 'dialogue',
2814
- 'kids_content',
2815
- 'news_explainer',
2816
- 'podcast_voiceover',
2817
- 'product_ad',
2818
- 'promo_trailer',
2819
- 'roleplay_drama',
2820
- 'story_narration',
2821
- 'storytelling',
2822
- 'tutorial',
2823
- ]))
2824
- .optional()
2825
- .describe('Filter by scenes (e.g., ["product_ad", "tutorial"]). If not provided, no scene filtering is applied.'),
2826
- emotions: zod_1.z
2827
- .array(zod_1.z.enum([
2828
- 'ASMR',
2829
- 'affectionate',
2830
- 'angry',
2831
- 'authoritative',
2832
- 'chat',
2833
- 'coldness',
2834
- 'depressed',
2835
- 'excited',
2836
- 'fear',
2837
- 'happy',
2838
- 'hate',
2839
- 'neutral',
2840
- 'sad',
2841
- 'surprised',
2842
- 'warm',
2843
- ]))
2871
+ prompt: zod_1.z
2872
+ .string()
2873
+ .describe('用户需求描述,例如:一个有亲和力的,适合给孩子讲故事的语音'),
2874
+ custom_design: zod_1.z
2875
+ .boolean()
2844
2876
  .optional()
2845
- .describe('Filter by emotions (e.g., ["happy", "neutral"]). If not provided, no emotion filtering is applied.'),
2846
- languages: zod_1.z
2847
- .array(zod_1.z.string())
2877
+ .describe('是否自定义语音,由于要消耗较多积分,因此**只有用户明确要求自己设计语音**,才将该参数设为true'),
2878
+ custom_design_preview: zod_1.z
2879
+ .string()
2848
2880
  .optional()
2849
- .describe('Filter by languages (e.g., ["zh", "en-US"]). If not provided, no language filtering is applied.'),
2850
- gender: zod_1.z
2851
- .enum(['male', 'female'])
2881
+ .describe('用户自定义语音的预览文本,用于展示自定义语音的效果,只有 custom_design true 时才需要'),
2882
+ custom_design_save_to: zod_1.z
2883
+ .string()
2852
2884
  .optional()
2853
- .describe('Filter by gender (male or female). If not provided, no gender filtering is applied.'),
2885
+ .describe('自定义语音的保存路径,例如:custom_voice.mp3 custom_voice_{id}.mp3'),
2854
2886
  },
2855
- }, async ({ scenes, emotions, languages, gender }) => {
2887
+ }, async ({ prompt, custom_design, custom_design_preview, custom_design_save_to, }) => {
2856
2888
  try {
2857
- let filteredVoices = [...doubao_voices_full_1.doubaoVoicesFull];
2858
- // Filter by scenes
2859
- if (scenes && scenes.length > 0) {
2860
- filteredVoices = filteredVoices.filter(voice => voice.scenes &&
2861
- voice.scenes.some(scene => scenes.includes(scene)));
2862
- }
2863
- // Filter by emotions
2864
- if (emotions && emotions.length > 0) {
2865
- filteredVoices = filteredVoices.filter(voice => {
2866
- // If emotions includes 'neutral', also include voices without emotions field
2867
- if (emotions.includes('neutral') && !voice.emotions) {
2868
- return true;
2869
- }
2870
- return (voice.emotions &&
2871
- voice.emotions.some(emotion => emotions.includes(emotion)));
2889
+ // 验证session状态
2890
+ const currentSession = await validateSession('pick-voice');
2891
+ const ai = currentSession.ai;
2892
+ if (custom_design) {
2893
+ if (!custom_design_preview) {
2894
+ throw new Error('custom_design_preview is required when custom_design is true');
2895
+ }
2896
+ const data = await currentSession.ai.voiceDesign({
2897
+ prompt,
2898
+ previewText: custom_design_preview,
2872
2899
  });
2873
- }
2874
- // Filter by languages
2875
- if (languages && languages.length > 0) {
2876
- filteredVoices = filteredVoices.filter(voice => voice.languages &&
2877
- voice.languages.some(lang => languages.includes(lang)));
2878
- }
2879
- // Filter by gender
2880
- if (gender) {
2881
- filteredVoices = filteredVoices.filter(voice => {
2882
- const voiceId = voice.voiceID.toLowerCase();
2883
- if (gender === 'male') {
2884
- return voiceId.includes('_male_');
2885
- }
2886
- else if (gender === 'female') {
2887
- return voiceId.includes('_female_');
2900
+ if (data.voice_id) {
2901
+ const trial_audio = data.trial_audio;
2902
+ let uri = '';
2903
+ if (trial_audio) {
2904
+ uri = await saveMaterial(currentSession, trial_audio, custom_design_save_to || `custom_voice_${data.voice_id}.mp3`);
2888
2905
  }
2889
- return true;
2890
- });
2906
+ return {
2907
+ content: [
2908
+ {
2909
+ type: 'text',
2910
+ text: JSON.stringify({
2911
+ success: true,
2912
+ ...data,
2913
+ uri,
2914
+ timestamp: new Date().toISOString(),
2915
+ }),
2916
+ },
2917
+ ],
2918
+ };
2919
+ }
2920
+ else {
2921
+ throw new Error(`Voice design failed, ${JSON.stringify(data)}`);
2922
+ }
2891
2923
  }
2924
+ const data = await ai.pickVoice({ prompt });
2892
2925
  return {
2893
2926
  content: [
2894
2927
  {
2895
2928
  type: 'text',
2896
2929
  text: JSON.stringify({
2897
2930
  success: true,
2898
- data: {
2899
- totalCount: filteredVoices.length,
2900
- voices: filteredVoices,
2901
- filters: {
2902
- scenes: scenes || null,
2903
- emotions: emotions || null,
2904
- languages: languages || null,
2905
- gender: gender || null,
2906
- },
2907
- },
2931
+ ...data,
2908
2932
  timestamp: new Date().toISOString(),
2909
2933
  }),
2910
2934
  },
@@ -2912,52 +2936,7 @@ server.registerTool('search-voices', {
2912
2936
  };
2913
2937
  }
2914
2938
  catch (error) {
2915
- return createErrorResponse(error, 'search-voices');
2916
- }
2917
- });
2918
- server.registerTool('voice-design', {
2919
- title: 'Voice Design',
2920
- description: 'Design a voice based on a prompt. The voice will be designed based on the prompt and preview text.',
2921
- inputSchema: {
2922
- prompt: zod_1.z.string().describe('The prompt to design the voice.'),
2923
- previewText: zod_1.z.string().describe('The preview text to design the voice.'),
2924
- saveToFileName: zod_1.z
2925
- .string()
2926
- .describe('The file name to save the designed voice.'),
2927
- },
2928
- }, async ({ prompt, previewText, saveToFileName }) => {
2929
- try {
2930
- const currentSession = await validateSession('voice-design');
2931
- const data = await currentSession.ai.voiceDesign({
2932
- prompt,
2933
- previewText,
2934
- });
2935
- if (data.voice_id) {
2936
- const trial_audio = data.trial_audio;
2937
- let uri = '';
2938
- if (trial_audio) {
2939
- uri = await saveMaterial(currentSession, trial_audio, saveToFileName);
2940
- }
2941
- return {
2942
- content: [
2943
- {
2944
- type: 'text',
2945
- text: JSON.stringify({
2946
- success: true,
2947
- ...data,
2948
- uri,
2949
- timestamp: new Date().toISOString(),
2950
- }),
2951
- },
2952
- ],
2953
- };
2954
- }
2955
- else {
2956
- throw new Error(`Voice design failed, ${JSON.stringify(data)}`);
2957
- }
2958
- }
2959
- catch (error) {
2960
- return createErrorResponse(error, 'voice-design');
2939
+ return createErrorResponse(error, 'pick-voice');
2961
2940
  }
2962
2941
  });
2963
2942
  server.registerTool('media-analyzer', {
@@ -3173,206 +3152,6 @@ server.registerTool('media-analyzer', {
3173
3152
  return createErrorResponse(error, 'media-analyzer');
3174
3153
  }
3175
3154
  });
3176
- // server.registerTool(
3177
- // 'image-aligner',
3178
- // {
3179
- // title: 'Image Aligner',
3180
- // description:
3181
- // 'Analyze image quality and alignment with prompt using AI Image Quality Inspector.',
3182
- // inputSchema: {
3183
- // imageFileName: z
3184
- // .string()
3185
- // .describe('The image file name in materials directory to analyze.'),
3186
- // sceneIndex: z.number().min(1).describe('场景索引,从1开始的下标'),
3187
- // storyBoardFile: z
3188
- // .string()
3189
- // .optional()
3190
- // .default('storyboard.json')
3191
- // .describe('故事板文件路径'),
3192
- // imagePrompt: z
3193
- // .string()
3194
- // .optional()
3195
- // .describe('可选的图片提示词,如果提供则覆盖storyboard中的提示词'),
3196
- // customPrompt: z
3197
- // .string()
3198
- // .optional()
3199
- // .describe('可选的额外用户要求,用于补充图片质量评估的特定需求'),
3200
- // },
3201
- // },
3202
- // async ({
3203
- // imageFileName,
3204
- // sceneIndex,
3205
- // storyBoardFile = 'storyboard.json',
3206
- // imagePrompt,
3207
- // customPrompt,
3208
- // }) => {
3209
- // try {
3210
- // const currentSession = await validateSession('image-aligner');
3211
- // // 验证图片文件
3212
- // validateImageFile(imageFileName);
3213
- // // 获取图片 URL
3214
- // const imageUrl = getMaterialUri(currentSession, imageFileName);
3215
- // // 确定要使用的提示词
3216
- // let finalPrompt = imagePrompt;
3217
- // // 如果没有提供imagePrompt,则从storyboard中获取
3218
- // if (!imagePrompt) {
3219
- // try {
3220
- // const storyBoardPath = resolve(
3221
- // process.env.ZEROCUT_PROJECT_CWD || process.cwd(),
3222
- // projectLocalDir,
3223
- // storyBoardFile
3224
- // );
3225
- // if (existsSync(storyBoardPath)) {
3226
- // const storyBoardContent = await readFile(storyBoardPath, 'utf8');
3227
- // const storyBoard = JSON.parse(storyBoardContent);
3228
- // if (storyBoard.scenes && Array.isArray(storyBoard.scenes)) {
3229
- // const scene = storyBoard.scenes[sceneIndex - 1]; // sceneIndex 从1开始,数组从0开始
3230
- // if (scene) {
3231
- // // 根据文件名判断优先级:若end_frame存在且imageFileName包含"_end"则优先取end_frame,否则取start_frame
3232
- // if (scene.end_frame && imageFileName.includes('_end')) {
3233
- // finalPrompt = scene.end_frame;
3234
- // } else {
3235
- // finalPrompt = scene.start_frame || scene.end_frame;
3236
- // }
3237
- // if (!finalPrompt) {
3238
- // return createErrorResponse(
3239
- // `场景 ${sceneIndex} 中未找到 start_frame 或 end_frame 提示词`,
3240
- // 'image-aligner'
3241
- // );
3242
- // }
3243
- // } else {
3244
- // return createErrorResponse(
3245
- // `在 ${storyBoardFile} 中未找到场景索引 ${sceneIndex}`,
3246
- // 'image-aligner'
3247
- // );
3248
- // }
3249
- // } else {
3250
- // return createErrorResponse(
3251
- // `${storyBoardFile} 文件格式不正确,缺少 scenes 数组`,
3252
- // 'image-aligner'
3253
- // );
3254
- // }
3255
- // } else {
3256
- // return createErrorResponse(
3257
- // `故事板文件不存在: ${storyBoardPath}`,
3258
- // 'image-aligner'
3259
- // );
3260
- // }
3261
- // } catch (error) {
3262
- // return createErrorResponse(
3263
- // `读取或解析故事板文件失败: ${error}`,
3264
- // 'image-aligner'
3265
- // );
3266
- // }
3267
- // }
3268
- // // 如果仍然没有提示词,返回错误
3269
- // if (!finalPrompt) {
3270
- // return createErrorResponse(
3271
- // '未提供 imagePrompt 且无法从故事板中获取提示词',
3272
- // 'image-aligner'
3273
- // );
3274
- // }
3275
- // // 读取图片质量检查指南
3276
- // const alignerGuidelinePath = resolve(
3277
- // __dirname,
3278
- // './prompts/reasonings/image_aligner.md'
3279
- // );
3280
- // let alignerGuideline = '';
3281
- // try {
3282
- // alignerGuideline = await readFile(alignerGuidelinePath, 'utf8');
3283
- // } catch (error) {
3284
- // console.warn('无法读取图片质量检查指南:', error);
3285
- // alignerGuideline =
3286
- // '请对图片质量进行评估,包括构图、色彩、清晰度等方面。';
3287
- // }
3288
- // // 构建系统提示
3289
- // const systemPrompt = `你是一个专业的AI图片质量检查员。请根据以下指南对图片进行评估:
3290
- // ${alignerGuideline}
3291
- // 请严格按照指南中的JSON格式返回评估结果。`;
3292
- // // 构建用户提示
3293
- // const userPrompt = `请对这张图片进行质量评估。
3294
- // 原始提示词:${finalPrompt}${
3295
- // customPrompt
3296
- // ? `
3297
- // 额外要求:${customPrompt}`
3298
- // : ''
3299
- // }
3300
- // 请按照指南要求,返回包含评分、问题列表和优化建议的JSON格式结果。`;
3301
- // // 调用AI模型进行图片质量评估
3302
- // const ai = currentSession.ai;
3303
- // const completion = await ai.getCompletions({
3304
- // model: 'Doubao-Seed-1.6',
3305
- // messages: [
3306
- // {
3307
- // role: 'system',
3308
- // content: systemPrompt,
3309
- // },
3310
- // {
3311
- // role: 'user',
3312
- // content: [
3313
- // {
3314
- // type: 'image_url',
3315
- // image_url: {
3316
- // url: imageUrl,
3317
- // },
3318
- // },
3319
- // {
3320
- // type: 'text',
3321
- // text: userPrompt,
3322
- // },
3323
- // ],
3324
- // },
3325
- // ],
3326
- // });
3327
- // const result = completion.choices[0]?.message?.content;
3328
- // if (!result) {
3329
- // throw new Error('No response from AI model');
3330
- // }
3331
- // // 解析AI响应
3332
- // let alignmentResult;
3333
- // try {
3334
- // // 尝试从响应中提取JSON
3335
- // const jsonMatch =
3336
- // result.match(/```json\s*([\s\S]*?)\s*```/) ||
3337
- // result.match(/\{[\s\S]*\}/);
3338
- // if (jsonMatch) {
3339
- // alignmentResult = JSON.parse(jsonMatch[1] || jsonMatch[0]);
3340
- // } else {
3341
- // // 如果没有找到JSON格式,尝试直接解析整个响应
3342
- // alignmentResult = JSON.parse(result);
3343
- // }
3344
- // } catch (error) {
3345
- // // 如果解析失败,返回原始响应
3346
- // alignmentResult = {
3347
- // error: 'JSON解析失败',
3348
- // raw_response: result,
3349
- // };
3350
- // }
3351
- // return {
3352
- // content: [
3353
- // {
3354
- // type: 'text',
3355
- // text: JSON.stringify({
3356
- // success: true,
3357
- // imageFileName,
3358
- // sceneIndex,
3359
- // storyBoardFile,
3360
- // imagePrompt: finalPrompt,
3361
- // customPrompt,
3362
- // promptSource: imagePrompt ? 'manual_override' : 'storyboard',
3363
- // analysis: alignmentResult,
3364
- // imageUrl,
3365
- // nextActionSuggest:
3366
- // '可根据分析结果调整提示词,修改storyboard后,重新生成图片。',
3367
- // }),
3368
- // },
3369
- // ],
3370
- // };
3371
- // } catch (error) {
3372
- // return createErrorResponse(error, 'image-aligner');
3373
- // }
3374
- // }
3375
- // );
3376
3155
  server.registerTool('audio-video-sync', {
3377
3156
  title: 'Audio Video Sync',
3378
3157
  description: 'Generate audio-video-synced video by matching video with audio. 还可以对口型。',
@@ -3399,7 +3178,7 @@ server.registerTool('audio-video-sync', {
3399
3178
  .describe('The reference photo face for lip sync.'),
3400
3179
  saveToFileName: zod_1.z
3401
3180
  .string()
3402
- .describe('The filename to save the audio-video-synced video.'),
3181
+ .describe('The filename to save the audio-video-synced video. 应该是mp4文件'),
3403
3182
  },
3404
3183
  }, async ({ lipSync, lipSyncType, lipSyncPadAudio, videoFileName, audioFileName, audioInMs, refPhotoFileName, saveToFileName, }, context) => {
3405
3184
  try {
@@ -3527,7 +3306,7 @@ server.registerTool('audio-video-sync', {
3527
3306
  }
3528
3307
  });
3529
3308
  server.registerTool('generate-video-by-ref', {
3530
- title: 'Generate Video by Reference Images',
3309
+ title: '参考生视频(包含文生视频)工具',
3531
3310
  description: 'Generate video using reference images. Supports sora2, sora2-pro (1 image max), veo3.1, veo3.1-pro (1 image max), lite and pro (4 images max), vidu (7 images max). Can work without reference images (0 images).',
3532
3311
  inputSchema: {
3533
3312
  prompt: zod_1.z
@@ -3577,6 +3356,7 @@ server.registerTool('generate-video-by-ref', {
3577
3356
  'veo3.1',
3578
3357
  'veo3.1-pro',
3579
3358
  'vidu',
3359
+ 'vidu-uc',
3580
3360
  'pixv',
3581
3361
  ])
3582
3362
  .default('lite')
@@ -3588,7 +3368,7 @@ server.registerTool('generate-video-by-ref', {
3588
3368
  .describe('Whether to mute the video (effective for sora2 and veo3.1).'),
3589
3369
  saveToFileName: zod_1.z
3590
3370
  .string()
3591
- .describe('The filename to save the generated video.'),
3371
+ .describe('The filename to save the generated video. 应该是mp4文件'),
3592
3372
  sceneIndex: zod_1.z
3593
3373
  .number()
3594
3374
  .min(1)
@@ -3794,7 +3574,54 @@ server.registerTool('generate-video-by-ref', {
3794
3574
  if (promptPrefix) {
3795
3575
  promptPrefix += '\n';
3796
3576
  }
3797
- const finalPrompt = `${promptPrefix}${prompt}`;
3577
+ let finalPrompt = `${promptPrefix}${prompt}`;
3578
+ if (type === 'pixv') {
3579
+ const completion = await ai.getCompletions({
3580
+ model: 'Doubao-Seed-1.6',
3581
+ messages: [
3582
+ {
3583
+ role: 'system',
3584
+ content: `你根据主体信息,优化用户指令,使描述中的内容正确引用主体名称。
3585
+
3586
+ 具体方式为,将用户指令中引用主体信息中主体名称的部分,用 “@主体名” 的形式替代,注意它和前后内容之间也需要用**空格**分隔。
3587
+
3588
+ ## 例子
3589
+
3590
+ ### 输入:
3591
+
3592
+ 主体信息
3593
+ [
3594
+ {"type": "subject", "fileName": "dog.png", "ref_name": "狗"},
3595
+ {"type": "background", "fileName": "room.png", "ref_name": "房间"}
3596
+ ]
3597
+
3598
+ 用户指令
3599
+ 一只狗在房间里玩耍
3600
+
3601
+ ### 输出:
3602
+ 一只 @狗 在 @房间 里玩耍
3603
+
3604
+ ---
3605
+
3606
+ ## 要求与约束
3607
+
3608
+ 只输出替换主体名后的用户指令,不要输出其他任何额外内容
3609
+ `,
3610
+ },
3611
+ {
3612
+ role: 'user',
3613
+ content: `## 主体信息
3614
+
3615
+ ${JSON.stringify(referenceImages)}
3616
+
3617
+ ## 用户指令
3618
+
3619
+ ${prompt.trim()}`,
3620
+ },
3621
+ ],
3622
+ });
3623
+ finalPrompt = completion.choices[0]?.message?.content.trim();
3624
+ }
3798
3625
  // 调用 referencesToVideo 函数
3799
3626
  const result = await currentSession.ai.referencesToVideo({
3800
3627
  prompt: finalPrompt,
@@ -3823,7 +3650,7 @@ server.registerTool('generate-video-by-ref', {
3823
3650
  type: 'text',
3824
3651
  text: JSON.stringify({
3825
3652
  success: true,
3826
- message: '该视频生成任务正在运行中,它是异步任务,且执行时间较长,你应立即调用工具 wait-for-task-finish 来等待任务结束,如该工具调用超时,你应立即再次重新调用直到任务结束。',
3653
+ message: '该视频生成任务正在运行中,它是异步任务,且执行时间较长,你应立即调用工具 wait-for-task-finish 来等待任务结束,如如 wait-for-task-finish 工具调用超时,你应立即再次重新调用直到任务结束。',
3827
3654
  taskUrl: result.taskUrl,
3828
3655
  }),
3829
3656
  },
@@ -3898,7 +3725,7 @@ server.registerTool('extend-video-duration', {
3898
3725
  .describe('Optional end frame image file name in materials directory to guide the video extension.'),
3899
3726
  saveToFileName: zod_1.z
3900
3727
  .string()
3901
- .describe('The filename to save the extended video.'),
3728
+ .describe('The filename to save the extended video. 应该是mp4文件'),
3902
3729
  },
3903
3730
  }, async ({ videoFileName, duration, resolution, prompt, type = 'turbo', endFrame, saveToFileName, }, context) => {
3904
3731
  try {
@@ -3994,11 +3821,11 @@ server.registerTool('use-template', {
3994
3821
  .describe('Optional materials to use in the template.'),
3995
3822
  saveToFileName: zod_1.z
3996
3823
  .string()
3997
- .describe('The filename to save the generated material.'),
3824
+ .describe('The filename to save the generated material. 根据用户具体需求,应该是mp4或png文件'),
3998
3825
  },
3999
3826
  }, async ({ user_request, saveToFileName, materials }) => {
4000
3827
  try {
4001
- const currentSession = await validateSession('generate-video-by-template');
3828
+ const currentSession = await validateSession('use-template');
4002
3829
  const ai = currentSession.ai;
4003
3830
  const data = await ai.listTemplates('all');
4004
3831
  const templates = data.map(item => ({
@@ -4013,7 +3840,7 @@ server.registerTool('use-template', {
4013
3840
  messages: [
4014
3841
  {
4015
3842
  role: 'system',
4016
- content: `你根据用户需求,从以下模板中选择一个匹配的模板,返回模板ID:\n\n${JSON.stringify(templates)}\n\n**约束**:只输出模板ID,不需要其他解释,如果没有匹配的模版,输出"无匹配模版"`,
3843
+ content: `你根据用户需求,分析需求与模板描述(description)和触发器(trigger)的匹配程度,从以下模板中选择一个匹配的模板,返回模板ID:\n\n${JSON.stringify(templates)}\n\n**约束**:只输出模板ID,不需要其他解释,如果没有匹配的模版,输出"无匹配模版"`,
4017
3844
  },
4018
3845
  {
4019
3846
  role: 'user',
@@ -4256,6 +4083,69 @@ server.registerTool('search-context', {
4256
4083
  return createErrorResponse(error, 'search-context');
4257
4084
  }
4258
4085
  });
4086
+ // 列出项目下的所有文件
4087
+ server.registerTool('list-project-files', {
4088
+ title: 'List Project Files',
4089
+ description: 'List all files in the materials directory.',
4090
+ inputSchema: {},
4091
+ }, async () => {
4092
+ try {
4093
+ // 验证session状态
4094
+ const currentSession = await validateSession('list-project-files');
4095
+ console.log('Listing project files...');
4096
+ const terminal = currentSession.terminal;
4097
+ if (!terminal) {
4098
+ throw new Error('Terminal not available in current session');
4099
+ }
4100
+ let cwd;
4101
+ try {
4102
+ cwd = await terminal.getCwd();
4103
+ }
4104
+ catch (cwdError) {
4105
+ console.error('Failed to get current working directory:', cwdError);
4106
+ throw new Error('Failed to get current working directory');
4107
+ }
4108
+ console.log(`Current working directory: ${cwd}`);
4109
+ // 安全地列出各目录文件,失败时返回空数组
4110
+ const listFilesWithFallback = async (path, dirName) => {
4111
+ try {
4112
+ const files = await currentSession.files.listFiles(path);
4113
+ console.log(`Found ${files?.length || 0} files in ${dirName}`);
4114
+ return files || [];
4115
+ }
4116
+ catch (error) {
4117
+ console.warn(`Failed to list files in ${dirName} (${path}):`, error);
4118
+ return [];
4119
+ }
4120
+ };
4121
+ const [rootFiles, materialsFiles, outputFiles] = await Promise.all([
4122
+ listFilesWithFallback(cwd, 'root'),
4123
+ listFilesWithFallback(`${cwd}/materials`, 'materials'),
4124
+ listFilesWithFallback(`${cwd}/output`, 'output'),
4125
+ ]);
4126
+ const result = {
4127
+ success: true,
4128
+ cwd,
4129
+ root: rootFiles,
4130
+ materials: materialsFiles,
4131
+ output: outputFiles,
4132
+ totalFiles: rootFiles.length + materialsFiles.length + outputFiles.length,
4133
+ timestamp: new Date().toISOString(),
4134
+ };
4135
+ console.log(`Total files found: ${result.totalFiles}`);
4136
+ return {
4137
+ content: [
4138
+ {
4139
+ type: 'text',
4140
+ text: JSON.stringify(result),
4141
+ },
4142
+ ],
4143
+ };
4144
+ }
4145
+ catch (error) {
4146
+ return createErrorResponse(error, 'list-project-files');
4147
+ }
4148
+ });
4259
4149
  server.registerTool('build-capcat-draft', {
4260
4150
  title: 'Build CapCut Draft',
4261
4151
  description: 'Read draft_content.json file, parse JSON and generate URIs for all assets in timeline tracks, then output the processed JSON string.',