@aj-archipelago/cortex 1.4.0 → 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/config.js +25 -4
  2. package/helper-apps/cortex-autogen2/agents.py +19 -6
  3. package/helper-apps/cortex-autogen2/services/azure_ai_search.py +115 -0
  4. package/helper-apps/cortex-autogen2/services/run_analyzer.py +594 -0
  5. package/helper-apps/cortex-autogen2/task_processor.py +98 -2
  6. package/lib/crypto.js +1 -0
  7. package/lib/entityConstants.js +12 -35
  8. package/lib/keyValueStorageClient.js +53 -1
  9. package/lib/util.js +33 -6
  10. package/package.json +2 -1
  11. package/pathways/system/entity/memory/sys_memory_manager.js +1 -0
  12. package/pathways/system/entity/memory/sys_memory_process.js +4 -3
  13. package/pathways/system/entity/memory/sys_memory_update.js +4 -3
  14. package/pathways/system/entity/memory/sys_read_memory.js +12 -4
  15. package/pathways/system/entity/memory/sys_save_memory.js +16 -9
  16. package/pathways/system/entity/memory/sys_search_memory.js +5 -4
  17. package/pathways/system/entity/sys_entity_agent.js +2 -1
  18. package/pathways/system/entity/tools/sys_tool_bing_search.js +2 -2
  19. package/pathways/system/entity/tools/sys_tool_bing_search_afagent.js +1 -2
  20. package/pathways/system/entity/tools/sys_tool_callmodel.js +2 -1
  21. package/pathways/system/entity/tools/sys_tool_coding.js +1 -2
  22. package/pathways/system/entity/tools/sys_tool_grok_x_search.js +1 -1
  23. package/pathways/system/entity/tools/sys_tool_image.js +2 -1
  24. package/pathways/system/entity/tools/sys_tool_image_gemini.js +3 -3
  25. package/pathways/system/entity/tools/sys_tool_mermaid.js +187 -38
  26. package/pathways/system/entity/tools/sys_tool_reasoning.js +2 -0
  27. package/pathways/system/entity/tools/sys_tool_verify.js +1 -1
  28. package/pathways/transcribe_gemini.js +3 -2
  29. package/server/graphql.js +1 -1
  30. package/server/pathwayResolver.js +8 -7
  31. package/server/plugins/veoVideoPlugin.js +29 -1
  32. package/testrun.log +35371 -0
  33. package/tests/integration/graphql/async/stream/vendors/openai_streaming.test.js +1 -3
  34. package/tests/unit/core/crypto.test.js +65 -0
  35. package/tests/unit/core/doubleEncryptionStorageClient.test.js +262 -0
@@ -23,9 +23,9 @@ export default {
23
23
  enableDuplicateRequests: false,
24
24
  timeout: 600,
25
25
  // Tool disabled for now
26
- /*
27
26
  toolDefinition: [{
28
27
  type: "function",
28
+ enabled: false,
29
29
  icon: "💻",
30
30
  function: {
31
31
  name: "Code",
@@ -46,7 +46,6 @@ export default {
46
46
  }
47
47
  }
48
48
  }],
49
- */
50
49
 
51
50
  executePathway: async ({args, runAllPrompts, resolver}) => {
52
51
  if (args.detailedInstructions) {
@@ -21,7 +21,7 @@ export default {
21
21
  icon: "🔍",
22
22
  function: {
23
23
  name: "SearchXPlatform",
24
- description: "This tool allows you to search the X platform (formerly Twitter) for current posts, discussions, and real-time information. Use this for finding recent social media content, trending topics, public opinions, and real-time updates. Always call this tool in parallel rather than serially if you have several searches to do as it will be faster.",
24
+ description: "This tool allows you to search the X platform (formerly Twitter) for current posts, discussions, and real-time information. Use this for finding recent social media content, trending topics, public opinions, and real-time updates. This tool can be slow - 10-60s per search, so only use it when you really want X platform information. Always call this tool in parallel rather than serially if you have several searches to do as it will be faster.",
25
25
  parameters: {
26
26
  type: "object",
27
27
  properties: {
@@ -12,6 +12,7 @@ export default {
12
12
  timeout: 300,
13
13
  toolDefinition: [{
14
14
  type: "function",
15
+ enabled: false,
15
16
  icon: "🎨",
16
17
  function: {
17
18
  name: "GenerateImage",
@@ -37,7 +38,7 @@ export default {
37
38
  icon: "🔄",
38
39
  function: {
39
40
  name: "ModifyImage",
40
- description: "Use when asked to modify, transform, or edit an existing image. This tool can apply various transformations like style changes, artistic effects, or specific modifications to an image that has been previously uploaded or generated. It takes up to two input images as a reference and outputs a new image based on the instructions. This tool does not display the image to the user - you need to do that with markdown in your response.",
41
+ description: "Use when asked to modify, transform, or edit an existing image. This tool can apply various transformations like style changes, artistic effects, or specific modifications to an image that has been previously uploaded or generated. It takes up to three input images as a reference and outputs a new image based on the instructions. This tool does not display the image to the user - you need to do that with markdown in your response.",
41
42
  parameters: {
42
43
  type: "object",
43
44
  properties: {
@@ -11,9 +11,9 @@ export default {
11
11
  model: 'oai-gpt4o',
12
12
  },
13
13
  timeout: 300,
14
- /*
15
14
  toolDefinition: [{
16
15
  type: "function",
16
+ enabled: true,
17
17
  icon: "🎨",
18
18
  function: {
19
19
  name: "GenerateImage",
@@ -30,12 +30,13 @@ export default {
30
30
  description: "A user-friendly message that describes what you're doing with this tool"
31
31
  }
32
32
  },
33
- required: ["detailedInstructions", "renderText", "userMessage"]
33
+ required: ["detailedInstructions", "userMessage"]
34
34
  }
35
35
  }
36
36
  },
37
37
  {
38
38
  type: "function",
39
+ enabled: false,
39
40
  icon: "🔄",
40
41
  function: {
41
42
  name: "ModifyImage",
@@ -68,7 +69,6 @@ export default {
68
69
  }
69
70
  }
70
71
  }],
71
- */
72
72
  executePathway: async ({args, runAllPrompts, resolver}) => {
73
73
  const pathwayResolver = resolver;
74
74
 
@@ -2,49 +2,76 @@
2
2
  // Entity tool that provides advanced mermaid charting capabilities
3
3
 
4
4
  import { Prompt } from '../../../../server/prompt.js';
5
+ import { validateMermaid, isValidMermaid, getDiagramType } from '@aj-archipelago/merval';
6
+ import logger from '../../../../lib/logger.js';
5
7
 
6
- export default {
7
- prompt:
8
- [
9
- new Prompt({ messages: [
10
- {"role": "system", "content":`You are the part of an AI entity named {{aiName}} that creates mermaid charts. Follow the user's detailed instructions and create a mermaid chart that meets the user's needs.
11
-
12
- Mermaid Charts Instructions:
13
-
14
- You are using Mermaid 11.6 with the xychart-beta extension, so you can write all standard Mermaid chart types in a markdown block (flowcharts, sequence diagrams, etc.) as well as bar charts and line charts using the xychart-beta extension.
15
-
16
- Here is some example code of the xychart-beta extension that combines both bar and line functions:
17
-
18
- \`\`\`mermaid
19
- xychart-beta
20
- title "Sales Revenue"
21
- x-axis [jan, feb, mar, apr, may, jun, jul, aug, sep, oct, nov, dec]
22
- y-axis "Revenue (in $)" 4000 --> 11000
23
- bar [5000, 6000, 7500, 8200, 9500, 10500, 11000, 10200, 9200, 8500, 7000, 6000]
24
- line [5000, 6000, 7500, 8200, 9500, 10500, 11000, 10200, 9200, 8500, 7000, 6000]
25
- \`\`\`
26
-
27
- Mermaid is very sensitive to syntax errors, so make sure you check your chart definitions before finalizing your response. Some things to check for:
8
+ // Function to validate mermaid syntax using our lightweight validator
9
+ function validateMermaidSyntax(mermaidCode) {
10
+ try {
11
+ // Ensure mermaidCode is a string
12
+ const codeStr = typeof mermaidCode === 'string' ? mermaidCode : String(mermaidCode);
13
+
14
+ // Extract mermaid code from markdown block if present
15
+ const mermaidMatch = codeStr.match(/```mermaid\s*([\s\S]*?)\s*```/);
16
+ const codeToValidate = mermaidMatch ? mermaidMatch[1].trim() : codeStr.trim();
17
+
18
+ if (!codeToValidate) {
19
+ return { isValid: false, error: "Empty mermaid code", diagramType: 'unknown' };
20
+ }
21
+
22
+ // Use our lightweight validator
23
+ const result = validateMermaid(codeToValidate);
24
+
25
+ return {
26
+ isValid: result.isValid,
27
+ error: result.isValid ? null : result.errors,
28
+ diagramType: result.diagramType,
29
+ ast: result.ast
30
+ };
31
+ } catch (error) {
32
+ return {
33
+ isValid: false,
34
+ error: `Validation error: ${error.message}`,
35
+ diagramType: 'unknown'
36
+ };
37
+ }
38
+ }
28
39
 
29
- - All [] labels must be either quoted strings OR HTML-safe (no raw \\n or other special characters)
30
- - No strings (e.g. null) in number series data
31
- - Every subgraph has a matching end
32
- - No lone arrows
33
- - Use comments (%%) instead of stray text lines
40
+ // Function to extract mermaid code from response
41
+ function extractMermaidFromResponse(response) {
42
+ // Ensure response is a string
43
+ const responseStr = typeof response === 'string' ? response : String(response);
44
+ const mermaidMatch = responseStr.match(/```mermaid\s*([\s\S]*?)\s*```/);
45
+ return mermaidMatch ? mermaidMatch[1].trim() : null;
46
+ }
34
47
 
35
- Return only the mermaid chart markdown block and separate markdown for the chart key if necessary, with no other notes or comments.
48
+ // Function to format validation errors for detailed feedback
49
+ function formatValidationErrors(errors) {
50
+ if (!errors || !Array.isArray(errors)) {
51
+ return 'Unknown validation error';
52
+ }
53
+
54
+ return errors.map(error => {
55
+ let errorText = `Line ${error.line}, Column ${error.column}: ${error.message}`;
56
+ if (error.code) {
57
+ errorText += ` (Error Code: ${error.code})`;
58
+ }
59
+ if (error.suggestion) {
60
+ errorText += `\nSuggestion: ${error.suggestion}`;
61
+ }
62
+ return errorText;
63
+ }).join('\n\n');
64
+ }
36
65
 
37
- {{renderTemplate AI_DATETIME}}`},
38
- "{{chatHistory}}"
39
- ]}),
40
- ],
66
+ export default {
67
+ prompt: [], // Prompts are set dynamically in executePathway
41
68
  inputParameters: {
42
69
  chatHistory: [{role: '', content: []}],
43
70
  contextId: ``,
44
71
  aiName: "Jarvis",
45
72
  language: "English",
46
73
  },
47
- model: 'oai-gpt41',
74
+ model: 'oai-gpt5-chat',
48
75
  useInputChunking: false,
49
76
  enableDuplicateRequests: false,
50
77
  timeout: 600,
@@ -52,8 +79,8 @@ Return only the mermaid chart markdown block and separate markdown for the chart
52
79
  type: "function",
53
80
  icon: "📊",
54
81
  function: {
55
- name: "CreateMermaidChart",
56
- description: "Creates a Mermaid chart in markdown format to visualize data or concepts. Call this tool any time you need to create a Mermaid chart as it will ensure that the chart is properly formatted and syntax-checked.",
82
+ name: "CreateChart",
83
+ description: "Creates a single chart or diagram that will render in the UI to visualize data or concepts. You can create all the standard Mermaid chart types (flowcharts, sequence diagrams, gantt charts, etc.) as well as bar charts and line and scatter plots. This tool also validates the syntax and ensures proper formatting. Call this tool any time you need to create a chart outside of your coding agent. If you need to create multiple charts, you can call this tool multiple times in parallel to create multiple charts.",
57
84
  parameters: {
58
85
  type: "object",
59
86
  properties: {
@@ -75,8 +102,130 @@ Return only the mermaid chart markdown block and separate markdown for the chart
75
102
  if (args.detailedInstructions) {
76
103
  args.chatHistory.push({role: "user", content: args.detailedInstructions});
77
104
  }
78
- let result = await runAllPrompts({ ...args, stream: false });
79
- resolver.tool = JSON.stringify({ toolUsed: "coding" });
80
- return result;
105
+
106
+ const maxRetries = 10;
107
+ let attempts = 0;
108
+ let lastError = null;
109
+ let lastMermaidCode = null;
110
+ let pathwayResolver = resolver;
111
+
112
+ while (attempts < maxRetries) {
113
+ attempts++;
114
+
115
+ try {
116
+ let result;
117
+
118
+ if (attempts === 1) {
119
+ // First attempt: use full chat history for context
120
+ // Set the initial prompt with full chat history
121
+ pathwayResolver.pathwayPrompt = [
122
+ new Prompt({ messages: [
123
+ {"role": "system", "content":`You are the part of an AI entity named {{aiName}} that creates mermaid charts. Follow the user's detailed instructions and create a mermaid chart that meets the user's needs.
124
+
125
+ Mermaid Charts Instructions:
126
+
127
+ You are using Mermaid 11.6 with the xychart-beta extension, so you can write all standard Mermaid chart types in a markdown block (flowcharts, sequence diagrams, etc.) as well as bar charts and line charts using the xychart-beta extension.
128
+
129
+ Here is some example code of the xychart-beta extension that combines both bar and line functions:
130
+
131
+ \`\`\`mermaid
132
+ xychart-beta
133
+ title "Sales Revenue"
134
+ x-axis [jan, feb, mar, apr, may, jun, jul, aug, sep, oct, nov, dec]
135
+ y-axis "Revenue (in $)" 4000 --> 11000
136
+ bar [5000, 6000, 7500, 8200, 9500, 10500, 11000, 10200, 9200, 8500, 7000, 6000]
137
+ line [5000, 6000, 7500, 8200, 9500, 10500, 11000, 10200, 9200, 8500, 7000, 6000]
138
+ \`\`\`
139
+
140
+ Mermaid is very sensitive to syntax errors, so carefully consider your syntax before producing mermaid code.
141
+
142
+ - All [] labels must be either quoted strings OR otherwise HTML-safe (no raw \\n or other special characters - this is the most common parsing issue - wonky labels)
143
+ - No strings (e.g. null) in number series data
144
+ - Every subgraph has a matching end
145
+ - No lone arrows
146
+ - Use comments (%%) instead of stray text lines for any text that is not part of the mermaid code
147
+
148
+ Return only the mermaid chart markdown block with no other notes or comments.
149
+
150
+ {{renderTemplate AI_DATETIME}}`},
151
+ "{{chatHistory}}"
152
+ ]})
153
+ ];
154
+
155
+ result = await runAllPrompts({ ...args, stream: false });
156
+ } else {
157
+ // Retry attempts: use streamlined prompt with just the error and code
158
+ pathwayResolver.pathwayPrompt = [
159
+ new Prompt({ messages: [
160
+ {"role": "system", "content":`You are fixing a mermaid chart syntax error. The previous attempt generated invalid mermaid code. Please fix the syntax errors and regenerate the chart.
161
+
162
+ The error details below include line numbers, column positions, error codes, and suggestions. Use this information to precisely locate and fix the syntax issues.
163
+
164
+ Focus only on fixing the syntax issues mentioned in the error details. Return only the corrected mermaid chart in markdown block format with no other comments.
165
+
166
+ {{renderTemplate AI_DATETIME}}`},
167
+ {"role": "user", "content": `Here is the mermaid code that was generated:\n\n\`\`\`mermaid\n${lastMermaidCode || ''}\n\`\`\`\n\nAnd here are the detailed error messages:\n\n${lastError || 'Unknown error'}\n\nPlease fix the syntax errors and regenerate the chart.`}
168
+ ]})
169
+ ];
170
+
171
+ result = await runAllPrompts({ ...args, stream: false });
172
+ }
173
+
174
+ // Extract mermaid code from the response
175
+ const mermaidCode = extractMermaidFromResponse(result);
176
+
177
+ if (mermaidCode) {
178
+ // Store the mermaid code for potential retry
179
+ lastMermaidCode = mermaidCode;
180
+
181
+ // Validate the mermaid chart using our lightweight validator
182
+ const validation = validateMermaidSyntax(mermaidCode);
183
+
184
+ if (validation.isValid) {
185
+ pathwayResolver.tool = JSON.stringify({
186
+ toolUsed: "CreateMermaidChart",
187
+ diagramType: validation.diagramType,
188
+ attempts: attempts,
189
+ validationPassed: true
190
+ });
191
+
192
+ // Return the validated mermaid chart
193
+ return result;
194
+ } else {
195
+ const formattedErrors = formatValidationErrors(validation.error);
196
+ logger.warn(`Mermaid chart has syntax errors: ${formattedErrors}`);
197
+ lastError = formattedErrors;
198
+
199
+ if (attempts < maxRetries) {
200
+ continue; // Retry with streamlined prompt
201
+ }
202
+ }
203
+ } else {
204
+ // No mermaid code found in response
205
+ lastError = "No mermaid chart found in response";
206
+
207
+ if (attempts < maxRetries) {
208
+ // For retry, we'll use the streamlined prompt with the error message
209
+ continue;
210
+ }
211
+ }
212
+ } catch (error) {
213
+ lastError = error.message;
214
+ if (attempts < maxRetries) {
215
+ continue; // Retry with streamlined prompt
216
+ }
217
+ }
218
+ }
219
+
220
+ // If we've exhausted all retries, return the last result with error info
221
+ pathwayResolver.tool = JSON.stringify({
222
+ toolUsed: "CreateMermaidChart",
223
+ error: lastError,
224
+ attempts: attempts,
225
+ validationFailed: true
226
+ });
227
+
228
+ // Return a fallback response
229
+ return `Failed to generate valid mermaid chart after ${maxRetries} attempts. Last error: ${lastError}`;
81
230
  }
82
231
  }
@@ -24,6 +24,7 @@ export default {
24
24
  timeout: 600,
25
25
  toolDefinition: [{
26
26
  type: "function",
27
+ enabled: false,
27
28
  icon: "🗺️",
28
29
  function: {
29
30
  name: "PlanMultiStepTask",
@@ -46,6 +47,7 @@ export default {
46
47
  },
47
48
  {
48
49
  type: "function",
50
+ enabled: false,
49
51
  icon: "🧠",
50
52
  function: {
51
53
  name: "ApplyAdvancedReasoning",
@@ -25,6 +25,7 @@ export default {
25
25
  timeout: 600,
26
26
  toolDefinition: [{
27
27
  type: "function",
28
+ enabled: false,
28
29
  icon: "🔍",
29
30
  function: {
30
31
  name: "VerifyResponse",
@@ -54,7 +55,6 @@ export default {
54
55
  }
55
56
  }
56
57
  }],
57
-
58
58
  executePathway: async ({args, runAllPrompts, resolver}) => {
59
59
  let result = await runAllPrompts({ ...args, stream: false });
60
60
  resolver.tool = JSON.stringify({ toolUsed: "verification" });
@@ -273,12 +273,13 @@ REMEMBER:
273
273
  // }
274
274
 
275
275
  const result = await processChunksParallel(chunks, args);
276
+ const transcriptArray = result.map(item => item?.output_text || item);
276
277
 
277
278
  if (['srt','vtt'].includes(responseFormat.toLowerCase()) || wordTimestamped) { // align subtitles for formats
278
279
  const offsets = chunks.map((chunk, index) => chunk?.offset || index * OFFSET_CHUNK);
279
- return alignSubtitles(result, responseFormat, offsets);
280
+ return alignSubtitles(transcriptArray, responseFormat, offsets);
280
281
  }
281
- return result.join(` `);
282
+ return transcriptArray.join(` `);
282
283
  }catch(error){
283
284
  logger.error(`Error in transcribing: ${error}`);
284
285
  throw error;
package/server/graphql.js CHANGED
@@ -181,7 +181,7 @@ const build = async (config) => {
181
181
 
182
182
  const server = new ApolloServer({
183
183
  schema: schema,
184
- introspection: config.get('env') === 'development',
184
+ introspection: config.get('env') === 'development' || config.get('env') === 'debug',
185
185
  csrfPrevention: true,
186
186
  plugins: plugins.concat([// Proper shutdown for the HTTP server.
187
187
  ApolloServerPluginDrainHttpServer({ httpServer }),
@@ -6,6 +6,7 @@ import { getFirstNToken, getLastNToken, getSemanticChunks } from './chunker.js';
6
6
  import { PathwayResponseParser } from './pathwayResponseParser.js';
7
7
  import { Prompt } from './prompt.js';
8
8
  import { getv, setv } from '../lib/keyValueStorageClient.js';
9
+ import { getvWithDoubleDecryption, setvWithDoubleEncryption } from '../lib/keyValueStorageClient.js';
9
10
  import { requestState } from './requestState.js';
10
11
  import { callPathway, addCitationsToResolver } from '../lib/pathwayTools.js';
11
12
  import logger from '../lib/logger.js';
@@ -392,12 +393,12 @@ class PathwayResolver {
392
393
  try {
393
394
  // Load saved context and core memory if it exists
394
395
  const [savedContext, memorySelf, memoryDirectives, memoryTopics, memoryUser, memoryContext] = await Promise.all([
395
- (getv && await getv(this.savedContextId)) || {},
396
- callPathway('sys_read_memory', { contextId: this.savedContextId, section: 'memorySelf', priority: 1, stripMetadata: true }),
397
- callPathway('sys_read_memory', { contextId: this.savedContextId, section: 'memoryDirectives', priority: 1, stripMetadata: true }),
398
- callPathway('sys_read_memory', { contextId: this.savedContextId, section: 'memoryTopics', priority: 0, numResults: 10 }),
399
- callPathway('sys_read_memory', { contextId: this.savedContextId, section: 'memoryUser', priority: 1, stripMetadata: true }),
400
- callPathway('sys_read_memory', { contextId: this.savedContextId, section: 'memoryContext', priority: 0 }),
396
+ (getvWithDoubleDecryption && await getvWithDoubleDecryption(this.savedContextId, this.args?.contextKey)) || {},
397
+ callPathway('sys_read_memory', { contextId: this.savedContextId, section: 'memorySelf', priority: 1, stripMetadata: true, contextKey: this.args?.contextKey }),
398
+ callPathway('sys_read_memory', { contextId: this.savedContextId, section: 'memoryDirectives', priority: 1, stripMetadata: true, contextKey: this.args?.contextKey }),
399
+ callPathway('sys_read_memory', { contextId: this.savedContextId, section: 'memoryTopics', priority: 0, numResults: 10, contextKey: this.args?.contextKey }),
400
+ callPathway('sys_read_memory', { contextId: this.savedContextId, section: 'memoryUser', priority: 1, stripMetadata: true, contextKey: this.args?.contextKey }),
401
+ callPathway('sys_read_memory', { contextId: this.savedContextId, section: 'memoryContext', priority: 0, contextKey: this.args?.contextKey }),
401
402
  ]).catch(error => {
402
403
  this.logError(`Failed to load memory: ${error.message}`);
403
404
  return [{},'','','','',''];
@@ -435,7 +436,7 @@ class PathwayResolver {
435
436
  };
436
437
 
437
438
  if (currentState.savedContext !== this.initialState.savedContext) {
438
- setv && await setv(this.savedContextId, this.savedContext);
439
+ setvWithDoubleEncryption && await setvWithDoubleEncryption(this.savedContextId, this.savedContext, this.args?.contextKey);
439
440
  }
440
441
  };
441
442
 
@@ -20,7 +20,9 @@ class VeoVideoPlugin extends ModelPlugin {
20
20
  const availableModels = {
21
21
  'veo-2.0-generate': 'GA',
22
22
  'veo-3.0-generate': 'Preview',
23
- 'veo-3.0-fast-generate': 'Preview'
23
+ 'veo-3.0-fast-generate': 'Preview',
24
+ 'veo-3.1-generate': 'Preview',
25
+ 'veo-3.1-fast-generate': 'Preview'
24
26
  };
25
27
 
26
28
  // Get the model ID from the pathway or use default
@@ -53,6 +55,8 @@ class VeoVideoPlugin extends ModelPlugin {
53
55
  // generateAudio is required for 3.0 and not supported by 2.0
54
56
  ...(model === 'veo-3.0-generate' && { generateAudio: combinedParameters.generateAudio !== undefined ? combinedParameters.generateAudio : true }),
55
57
  ...(model === 'veo-3.0-fast-generate' && { generateAudio: combinedParameters.generateAudio !== undefined ? combinedParameters.generateAudio : true }),
58
+ ...(model === 'veo-3.1-generate' && { generateAudio: combinedParameters.generateAudio !== undefined ? combinedParameters.generateAudio : true }),
59
+ ...(model === 'veo-3.1-fast-generate' && { generateAudio: combinedParameters.generateAudio !== undefined ? combinedParameters.generateAudio : true }),
56
60
  ...(combinedParameters.negativePrompt && { negativePrompt: combinedParameters.negativePrompt }),
57
61
  ...(combinedParameters.personGeneration && { personGeneration: combinedParameters.personGeneration }),
58
62
  ...(combinedParameters.sampleCount && { sampleCount: combinedParameters.sampleCount }),
@@ -74,6 +78,12 @@ class VeoVideoPlugin extends ModelPlugin {
74
78
  if (model === 'veo-3.0-fast-generate' && parameters.durationSeconds !== 8) {
75
79
  throw new Error(`Veo 3.0 only supports durationSeconds: 8, got: ${parameters.durationSeconds}`);
76
80
  }
81
+ if (model === 'veo-3.1-generate' && parameters.durationSeconds !== 8) {
82
+ throw new Error(`Veo 3.1 only supports durationSeconds: 8, got: ${parameters.durationSeconds}`);
83
+ }
84
+ if (model === 'veo-3.1-fast-generate' && parameters.durationSeconds !== 8) {
85
+ throw new Error(`Veo 3.1 only supports durationSeconds: 8, got: ${parameters.durationSeconds}`);
86
+ }
77
87
  if (model === 'veo-2.0-generate' && (parameters.durationSeconds < 5 || parameters.durationSeconds > 8)) {
78
88
  throw new Error(`Veo 2.0 supports durationSeconds between 5-8, got: ${parameters.durationSeconds}`);
79
89
  }
@@ -93,6 +103,18 @@ class VeoVideoPlugin extends ModelPlugin {
93
103
  if (model === 'veo-3.0-fast-generate' && parameters.video) {
94
104
  throw new Error('video parameter is not supported in Veo 3.0');
95
105
  }
106
+ if (model === 'veo-3.1-generate' && parameters.lastFrame) {
107
+ throw new Error('lastFrame parameter is not supported in Veo 3.1');
108
+ }
109
+ if (model === 'veo-3.1-generate' && parameters.video) {
110
+ throw new Error('video parameter is not supported in Veo 3.1');
111
+ }
112
+ if (model === 'veo-3.1-fast-generate' && parameters.lastFrame) {
113
+ throw new Error('lastFrame parameter is not supported in Veo 3.1');
114
+ }
115
+ if (model === 'veo-3.1-fast-generate' && parameters.video) {
116
+ throw new Error('video parameter is not supported in Veo 3.1');
117
+ }
96
118
  }
97
119
 
98
120
  // generateAudio constraints
@@ -105,6 +127,12 @@ class VeoVideoPlugin extends ModelPlugin {
105
127
  if (model === 'veo-3.0-fast-generate' && parameters.generateAudio === undefined) {
106
128
  logger.warn('generateAudio is required for Veo 3.0, defaulting to true');
107
129
  }
130
+ if (model === 'veo-3.1-generate' && parameters.generateAudio === undefined) {
131
+ logger.warn('generateAudio is required for Veo 3.1, defaulting to true');
132
+ }
133
+ if (model === 'veo-3.1-fast-generate' && parameters.generateAudio === undefined) {
134
+ logger.warn('generateAudio is required for Veo 3.1, defaulting to true');
135
+ }
108
136
  }
109
137
 
110
138
  // Execute the request to the Veo API