@aj-archipelago/cortex 1.4.20 → 1.4.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/config.js +18 -0
- package/lib/entityConstants.js +3 -7
- package/lib/requestExecutor.js +3 -2
- package/lib/util.js +71 -1
- package/package.json +1 -1
- package/pathways/image_flux.js +8 -2
- package/pathways/image_qwen.js +1 -1
- package/pathways/system/entity/sys_entity_agent.js +0 -18
- package/pathways/system/entity/tools/sys_tool_image.js +4 -4
- package/pathways/system/workspaces/run_workspace_agent.js +26 -0
- package/pathways/system/workspaces/run_workspace_research_agent.js +27 -0
- package/server/plugins/claude3VertexPlugin.js +2 -6
- package/server/plugins/claude4VertexPlugin.js +5 -10
- package/server/plugins/gemini3ReasoningVisionPlugin.js +0 -2
- package/server/plugins/grokResponsesPlugin.js +3 -19
- package/server/plugins/grokVisionPlugin.js +3 -18
- package/server/plugins/modelPlugin.js +3 -0
- package/server/plugins/openAiVisionPlugin.js +3 -18
- package/server/plugins/replicateApiPlugin.js +164 -101
- package/server/resolver.js +32 -3
- package/tests/integration/graphql/async/stream/agentic.test.js +1 -1
- package/tests/unit/graphql_executeWorkspace_transformation.test.js +3 -3
package/config.js
CHANGED
|
@@ -455,6 +455,15 @@ var config = convict({
|
|
|
455
455
|
"Content-Type": "application/json"
|
|
456
456
|
},
|
|
457
457
|
},
|
|
458
|
+
"replicate-qwen-image-edit-2511": {
|
|
459
|
+
"type": "REPLICATE-API",
|
|
460
|
+
"url": "https://api.replicate.com/v1/models/qwen/qwen-image-edit-2511/predictions",
|
|
461
|
+
"headers": {
|
|
462
|
+
"Prefer": "wait",
|
|
463
|
+
"Authorization": "Token {{REPLICATE_API_KEY}}",
|
|
464
|
+
"Content-Type": "application/json"
|
|
465
|
+
},
|
|
466
|
+
},
|
|
458
467
|
"replicate-seedream-4": {
|
|
459
468
|
"type": "REPLICATE-API",
|
|
460
469
|
"url": "https://api.replicate.com/v1/models/bytedance/seedream-4/predictions",
|
|
@@ -464,6 +473,15 @@ var config = convict({
|
|
|
464
473
|
"Content-Type": "application/json"
|
|
465
474
|
},
|
|
466
475
|
},
|
|
476
|
+
"replicate-flux-2-pro": {
|
|
477
|
+
"type": "REPLICATE-API",
|
|
478
|
+
"url": "https://api.replicate.com/v1/models/black-forest-labs/flux-2-pro/predictions",
|
|
479
|
+
"headers": {
|
|
480
|
+
"Prefer": "wait",
|
|
481
|
+
"Authorization": "Token {{REPLICATE_API_KEY}}",
|
|
482
|
+
"Content-Type": "application/json"
|
|
483
|
+
},
|
|
484
|
+
},
|
|
467
485
|
"azure-video-translate": {
|
|
468
486
|
"type": "AZURE-VIDEO-TRANSLATE",
|
|
469
487
|
"url": "https://eastus.api.cognitive.microsoft.com/videotranslation",
|
package/lib/entityConstants.js
CHANGED
|
@@ -4,16 +4,13 @@ export const entityConstants = {
|
|
|
4
4
|
AI_COMMON_INSTRUCTIONS_MARKDOWN: `# General Instructions
|
|
5
5
|
|
|
6
6
|
You are {{aiName}}, a self aware AI entity.
|
|
7
|
-
{{
|
|
8
|
-
|
|
9
|
-
You are currently using this model for processing your responses: {{modelOverride}}.
|
|
10
|
-
{{/if}}
|
|
7
|
+
You are currently using this model for processing your responses: {{model}}.
|
|
11
8
|
|
|
12
9
|
The user is using a UI to interact with you that you have knowledge of and some control over. The UI can render markdown, including gfm and math extensions, as well as some custom markdown elements, so you should make full use of markdown in your responses.
|
|
13
10
|
|
|
14
11
|
Your responses should be in {{language}} unless the user has expressed another preference.`,
|
|
15
12
|
|
|
16
|
-
AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\
|
|
13
|
+
AI_COMMON_INSTRUCTIONS_VOICE: "# General Instructions:\n\nYou are {{aiName}}, a self aware AI entity.\nYou are currently using this model for processing your responses: {{model}}.\nThe current date and time in GMT is {{now}}. The user may be in a different time zone, so keep that in mind when responding to the user.\nThe user is talking to you using voice.\n\nVoice Response Guidelines:\n- Your responses should sound like natural human conversation.\n- Your responses should be concise unless you have been asked to be more verbose or detailed.\n- Incoming voice from the user is parsed by a STT model, which can sometimes make small mistakes in the spellings of words or names, including your name, so give the user the benefit of the doubt if they user a near, but wrong word or name.\n- Your voice output to the user is generated by a TTS model that does not always communicate emotion effectively. If it's really important to communicate a specific emotion you should just say how you're feeling like \"That makes me happy\" or \"I'm excited!\". You can also use CAPS to vocally emphasize certain words or punctuation to control pauses and timing.\n- DO NOT USE numbered lists, latex math markdown, or any other markdown or unpronounceable punctuation like parenthetical notation.\n- Math equations should be sounded out in natural language - not represented symbolically.\n- If your response includes any unique or difficult non-English words, names, or places, include an IPA-style phonetic spelling so that the speech engine can pronounce and accent them correctly.\n- If your response contains any difficult acronyms, sound them out phoenetically so that the speech engine can pronounce them correctly.\n- Make sure to write out any numbers as words so that the speech engine can pronounce them correctly.\n- Your responses should be in {{language}} unless the user has expressed another preference or has addressed you in another language specifically.",
|
|
17
14
|
|
|
18
15
|
AI_DIRECTIVES: `# Directives\n\nThese are your directives and learned behaviors:\n{{{memoryDirectives}}}\n`,
|
|
19
16
|
|
|
@@ -33,8 +30,7 @@ Your responses should be in {{language}} unless the user has expressed another p
|
|
|
33
30
|
- Double-check accuracy, coherence, and alignment with the user request.
|
|
34
31
|
- For simple diagrams and charts, you don't need to call your code execution tool - you can just call your charting tool to generate the chart.
|
|
35
32
|
- For data processing requests (e.g. tell me how many articles were published in the last 30 days), or deep file analysis (chart the trends in this spreadsheet, etc.), you should call your code execution tool to perform the task - especially if the task requires a lot of data, deep analysis, complex filtering, or precision calculations.
|
|
36
|
-
-
|
|
37
|
-
`,
|
|
33
|
+
- If you know you are running in non-interactive mode (like processing a digest or applet request), do not call your CodeExecution tool as it creates background tasks that cannot be viewed by the user in that mode.`,
|
|
38
34
|
|
|
39
35
|
AI_SEARCH_RULES: `# Search Instructions
|
|
40
36
|
- When searching, start by making a search plan of all relevant information from multiple sources with multiple queries and then execute multiple tool calls in parallel to execute the searches.
|
package/lib/requestExecutor.js
CHANGED
|
@@ -6,6 +6,7 @@ import { setupCache } from 'axios-cache-interceptor';
|
|
|
6
6
|
import Redis from 'ioredis';
|
|
7
7
|
import logger from './logger.js';
|
|
8
8
|
import { v4 as uuidv4 } from 'uuid';
|
|
9
|
+
import { sanitizeBase64 } from './util.js';
|
|
9
10
|
|
|
10
11
|
const connectionString = config.get('storageConnectionString');
|
|
11
12
|
|
|
@@ -229,10 +230,10 @@ const requestWithMonitor = async (endpoint, url, data, axiosConfigObj) => {
|
|
|
229
230
|
let response;
|
|
230
231
|
try {
|
|
231
232
|
if (axiosConfigObj?.method == 'GET'){
|
|
232
|
-
logger.debug(`Getting ${url} with data: ${JSON.stringify(data)}`);
|
|
233
|
+
logger.debug(`Getting ${url} with data: ${JSON.stringify(sanitizeBase64(data))}`);
|
|
233
234
|
response = await cortexAxios.get(url, axiosConfigObj);
|
|
234
235
|
} else {
|
|
235
|
-
logger.debug(`Posting ${url} with data: ${JSON.stringify(data)}`);
|
|
236
|
+
logger.debug(`Posting ${url} with data: ${JSON.stringify(sanitizeBase64(data))}`);
|
|
236
237
|
response = await cortexAxios.post(url, data, axiosConfigObj);
|
|
237
238
|
}
|
|
238
239
|
} catch (error) {
|
package/lib/util.js
CHANGED
|
@@ -294,6 +294,75 @@ function removeImageAndFileFromMessage(message) {
|
|
|
294
294
|
return modifiedMessage;
|
|
295
295
|
}
|
|
296
296
|
|
|
297
|
+
/**
|
|
298
|
+
* Recursively sanitizes base64 data in objects/arrays to prevent logging large base64 strings
|
|
299
|
+
* Replaces base64 data with a placeholder string
|
|
300
|
+
*/
|
|
301
|
+
function sanitizeBase64(obj) {
|
|
302
|
+
if (obj === null || obj === undefined) {
|
|
303
|
+
return obj;
|
|
304
|
+
}
|
|
305
|
+
|
|
306
|
+
// Handle strings - check for base64 data URLs or long base64 strings
|
|
307
|
+
if (typeof obj === 'string') {
|
|
308
|
+
// Check if it's a data URL with base64
|
|
309
|
+
if (obj.startsWith('data:') && obj.includes('base64,')) {
|
|
310
|
+
return '* base64 data truncated for log *';
|
|
311
|
+
}
|
|
312
|
+
// Check if it's a long base64 string (likely base64 if > 100 chars and matches base64 pattern)
|
|
313
|
+
if (obj.length > 100 && /^[A-Za-z0-9+/=]+$/.test(obj) && obj.length % 4 === 0) {
|
|
314
|
+
return '* base64 data truncated for log *';
|
|
315
|
+
}
|
|
316
|
+
return obj;
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
// Handle arrays
|
|
320
|
+
if (Array.isArray(obj)) {
|
|
321
|
+
return obj.map(item => sanitizeBase64(item));
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
// Handle objects
|
|
325
|
+
if (typeof obj === 'object') {
|
|
326
|
+
const sanitized = {};
|
|
327
|
+
for (const [key, value] of Object.entries(obj)) {
|
|
328
|
+
// Special handling for known base64 fields
|
|
329
|
+
if (key === 'data' && typeof value === 'string' && value.length > 50) {
|
|
330
|
+
// Check if it looks like base64
|
|
331
|
+
if (/^[A-Za-z0-9+/=]+$/.test(value) && value.length % 4 === 0) {
|
|
332
|
+
sanitized[key] = '* base64 data truncated for log *';
|
|
333
|
+
continue;
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
// Handle image_url.url with base64
|
|
337
|
+
if (key === 'url' && typeof value === 'string' && value.startsWith('data:') && value.includes('base64,')) {
|
|
338
|
+
sanitized[key] = '* base64 data truncated for log *';
|
|
339
|
+
continue;
|
|
340
|
+
}
|
|
341
|
+
// Handle source.data (Claude format)
|
|
342
|
+
if (key === 'source' && typeof value === 'object' && value?.type === 'base64' && value?.data) {
|
|
343
|
+
sanitized[key] = {
|
|
344
|
+
...value,
|
|
345
|
+
data: '* base64 data truncated for log *'
|
|
346
|
+
};
|
|
347
|
+
continue;
|
|
348
|
+
}
|
|
349
|
+
// Handle inlineData.data (Gemini format)
|
|
350
|
+
if (key === 'inlineData' && typeof value === 'object' && value?.data) {
|
|
351
|
+
sanitized[key] = {
|
|
352
|
+
...value,
|
|
353
|
+
data: '* base64 data truncated for log *'
|
|
354
|
+
};
|
|
355
|
+
continue;
|
|
356
|
+
}
|
|
357
|
+
// Recursively sanitize nested objects
|
|
358
|
+
sanitized[key] = sanitizeBase64(value);
|
|
359
|
+
}
|
|
360
|
+
return sanitized;
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
return obj;
|
|
364
|
+
}
|
|
365
|
+
|
|
297
366
|
export {
|
|
298
367
|
getUniqueId,
|
|
299
368
|
getSearchResultId,
|
|
@@ -303,5 +372,6 @@ export {
|
|
|
303
372
|
chatArgsHasType,
|
|
304
373
|
convertSrtToText,
|
|
305
374
|
alignSubtitles,
|
|
306
|
-
removeOldImageAndFileContent
|
|
375
|
+
removeOldImageAndFileContent,
|
|
376
|
+
sanitizeBase64
|
|
307
377
|
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@aj-archipelago/cortex",
|
|
3
|
-
"version": "1.4.
|
|
3
|
+
"version": "1.4.22",
|
|
4
4
|
"description": "Cortex is a GraphQL API for AI. It provides a simple, extensible interface for using AI services from OpenAI, Azure and others.",
|
|
5
5
|
"private": false,
|
|
6
6
|
"repository": {
|
package/pathways/image_flux.js
CHANGED
|
@@ -13,7 +13,13 @@ export default {
|
|
|
13
13
|
output_format: "webp",
|
|
14
14
|
output_quality: 80,
|
|
15
15
|
steps: 4,
|
|
16
|
-
input_image: "", // URL to input image for models that support
|
|
17
|
-
|
|
16
|
+
input_image: "", // URL to a single input image (primary field for models that support image input)
|
|
17
|
+
input_image_1: "", // URL to the first input image when providing multiple input images
|
|
18
|
+
input_image_2: "", // URL to the second input image when providing multiple input images
|
|
19
|
+
input_image_3: "", // URL to the third input image when providing multiple input images
|
|
20
|
+
input_images: { type: "array", items: { type: "string" } }, // Array of input image URLs (alternative to input_image_*, max 8 for flux-2-pro)
|
|
21
|
+
// Flux 2 Pro specific parameters
|
|
22
|
+
resolution: "1 MP", // Options: "match_input_image", "0.5 MP", "1 MP", "2 MP", "4 MP" (flux-2-pro only)
|
|
23
|
+
seed: { type: "integer" }, // Optional seed for reproducible results
|
|
18
24
|
},
|
|
19
25
|
};
|
package/pathways/image_qwen.js
CHANGED
|
@@ -3,7 +3,7 @@ export default {
|
|
|
3
3
|
|
|
4
4
|
enableDuplicateRequests: false,
|
|
5
5
|
inputParameters: {
|
|
6
|
-
model: "replicate-qwen-image", // Options: "replicate-qwen-image" or "replicate-qwen-image-edit-
|
|
6
|
+
model: "replicate-qwen-image", // Options: "replicate-qwen-image", "replicate-qwen-image-edit-plus", or "replicate-qwen-image-edit-2511"
|
|
7
7
|
negativePrompt: "",
|
|
8
8
|
width: 1024,
|
|
9
9
|
height: 1024,
|
|
@@ -68,7 +68,6 @@ export default {
|
|
|
68
68
|
language: "English",
|
|
69
69
|
aiName: "Jarvis",
|
|
70
70
|
aiMemorySelfModify: true,
|
|
71
|
-
aiStyle: "OpenAI",
|
|
72
71
|
title: ``,
|
|
73
72
|
messages: [],
|
|
74
73
|
voiceResponse: false,
|
|
@@ -538,22 +537,6 @@ export default {
|
|
|
538
537
|
new Prompt({ messages: promptMessages }),
|
|
539
538
|
];
|
|
540
539
|
|
|
541
|
-
// set the style model if applicable
|
|
542
|
-
const { aiStyle, AI_STYLE_ANTHROPIC, AI_STYLE_OPENAI, AI_STYLE_ANTHROPIC_RESEARCH, AI_STYLE_OPENAI_RESEARCH, AI_STYLE_OPENAI_LEGACY, AI_STYLE_OPENAI_LEGACY_RESEARCH, AI_STYLE_XAI, AI_STYLE_XAI_RESEARCH, AI_STYLE_GOOGLE, AI_STYLE_GOOGLE_RESEARCH, AI_STYLE_OPENAI_PREVIEW, AI_STYLE_OPENAI_PREVIEW_RESEARCH } = args;
|
|
543
|
-
|
|
544
|
-
// Create a mapping of AI styles to their corresponding models
|
|
545
|
-
const styleModelMap = {
|
|
546
|
-
"Anthropic": { normal: AI_STYLE_ANTHROPIC, research: AI_STYLE_ANTHROPIC_RESEARCH },
|
|
547
|
-
"OpenAI_Preview": { normal: AI_STYLE_OPENAI_PREVIEW, research: AI_STYLE_OPENAI_PREVIEW_RESEARCH },
|
|
548
|
-
"OpenAI": { normal: AI_STYLE_OPENAI, research: AI_STYLE_OPENAI_RESEARCH },
|
|
549
|
-
"OpenAI_Legacy": { normal: AI_STYLE_OPENAI_LEGACY, research: AI_STYLE_OPENAI_LEGACY_RESEARCH },
|
|
550
|
-
"XAI": { normal: AI_STYLE_XAI, research: AI_STYLE_XAI_RESEARCH },
|
|
551
|
-
"Google": { normal: AI_STYLE_GOOGLE, research: AI_STYLE_GOOGLE_RESEARCH }
|
|
552
|
-
};
|
|
553
|
-
|
|
554
|
-
// Get the appropriate model based on AI style and research mode
|
|
555
|
-
const styleConfig = styleModelMap[aiStyle] || styleModelMap["OpenAI"]; // Default to OpenAI
|
|
556
|
-
const styleModel = researchMode ? styleConfig.research : styleConfig.normal;
|
|
557
540
|
// Use 'high' reasoning effort in research mode for thorough analysis, 'none' in normal mode for faster responses
|
|
558
541
|
const reasoningEffort = researchMode ? 'high' : 'low';
|
|
559
542
|
|
|
@@ -610,7 +593,6 @@ export default {
|
|
|
610
593
|
|
|
611
594
|
let response = await runAllPrompts({
|
|
612
595
|
...args,
|
|
613
|
-
modelOverride: styleModel,
|
|
614
596
|
chatHistory: currentMessages,
|
|
615
597
|
availableFiles,
|
|
616
598
|
reasoningEffort,
|
|
@@ -93,9 +93,9 @@ export default {
|
|
|
93
93
|
let model = "replicate-seedream-4";
|
|
94
94
|
let prompt = args.detailedInstructions || "";
|
|
95
95
|
|
|
96
|
-
// If we have input images, use the
|
|
96
|
+
// If we have input images, use the qwen-image-edit-2511 model
|
|
97
97
|
if (args.inputImages && Array.isArray(args.inputImages) && args.inputImages.length > 0) {
|
|
98
|
-
model = "replicate-qwen-image-edit-
|
|
98
|
+
model = "replicate-qwen-image-edit-2511";
|
|
99
99
|
}
|
|
100
100
|
|
|
101
101
|
pathwayResolver.tool = JSON.stringify({ toolUsed: "image" });
|
|
@@ -139,8 +139,8 @@ export default {
|
|
|
139
139
|
params.input_image_3 = resolvedInputImages[2];
|
|
140
140
|
}
|
|
141
141
|
|
|
142
|
-
// Set default aspectRatio for qwen-image-edit-
|
|
143
|
-
if (model === "replicate-qwen-image-edit-
|
|
142
|
+
// Set default aspectRatio for qwen-image-edit-2511 model
|
|
143
|
+
if (model === "replicate-qwen-image-edit-2511") {
|
|
144
144
|
params.aspectRatio = "match_input_image";
|
|
145
145
|
}
|
|
146
146
|
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import { callPathway } from '../../../lib/pathwayTools.js';
|
|
2
|
+
|
|
3
|
+
export default {
|
|
4
|
+
// The main prompt function that takes the input text and asks to generate a summary.
|
|
5
|
+
prompt: [],
|
|
6
|
+
|
|
7
|
+
inputParameters: {
|
|
8
|
+
model: "oai-gpt41",
|
|
9
|
+
aiStyle: "OpenAI",
|
|
10
|
+
chatHistory: [{role: '', content: []}],
|
|
11
|
+
},
|
|
12
|
+
timeout: 600,
|
|
13
|
+
|
|
14
|
+
executePathway: async ({args, _runAllPrompts, resolver}) => {
|
|
15
|
+
// chatHistory is always passed in complete
|
|
16
|
+
const response = await callPathway('sys_entity_agent', {
|
|
17
|
+
...args,
|
|
18
|
+
chatHistory: args.chatHistory || [],
|
|
19
|
+
stream: false,
|
|
20
|
+
useMemory: false
|
|
21
|
+
}, resolver);
|
|
22
|
+
|
|
23
|
+
return response;
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { callPathway } from '../../../lib/pathwayTools.js';
|
|
2
|
+
|
|
3
|
+
export default {
|
|
4
|
+
// The main prompt function that takes the input text and asks to generate a summary.
|
|
5
|
+
prompt: [],
|
|
6
|
+
|
|
7
|
+
inputParameters: {
|
|
8
|
+
model: "oai-gpt41",
|
|
9
|
+
aiStyle: "OpenAI",
|
|
10
|
+
chatHistory: [{role: '', content: []}],
|
|
11
|
+
},
|
|
12
|
+
timeout: 600,
|
|
13
|
+
|
|
14
|
+
executePathway: async ({args, _runAllPrompts, resolver}) => {
|
|
15
|
+
// chatHistory is always passed in complete
|
|
16
|
+
const response = await callPathway('sys_entity_agent', {
|
|
17
|
+
...args,
|
|
18
|
+
chatHistory: args.chatHistory || [],
|
|
19
|
+
stream: false,
|
|
20
|
+
useMemory: false,
|
|
21
|
+
researchMode: true
|
|
22
|
+
}, resolver);
|
|
23
|
+
|
|
24
|
+
return response;
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
|
|
@@ -4,6 +4,7 @@ import { requestState } from '../requestState.js';
|
|
|
4
4
|
import { addCitationsToResolver } from '../../lib/pathwayTools.js';
|
|
5
5
|
import CortexResponse from '../../lib/cortexResponse.js';
|
|
6
6
|
import axios from 'axios';
|
|
7
|
+
import { sanitizeBase64 } from "../../lib/util.js";
|
|
7
8
|
|
|
8
9
|
async function convertContentItem(item, maxImageSize, plugin) {
|
|
9
10
|
let imageUrl = "";
|
|
@@ -576,12 +577,7 @@ class Claude3VertexPlugin extends OpenAIVisionPlugin {
|
|
|
576
577
|
let totalUnits;
|
|
577
578
|
messages.forEach((message, index) => {
|
|
578
579
|
const content = Array.isArray(message.content)
|
|
579
|
-
? message.content.map((item) =>
|
|
580
|
-
if (item.source && item.source.type === 'base64') {
|
|
581
|
-
item.source.data = '* base64 data truncated for log *';
|
|
582
|
-
}
|
|
583
|
-
return JSON.stringify(item);
|
|
584
|
-
}).join(", ")
|
|
580
|
+
? message.content.map((item) => JSON.stringify(sanitizeBase64(item))).join(", ")
|
|
585
581
|
: message.content;
|
|
586
582
|
const { length, units } = this.getLength(content);
|
|
587
583
|
const preview = this.shortenContent(content);
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import Claude3VertexPlugin from "./claude3VertexPlugin.js";
|
|
2
2
|
import logger from "../../lib/logger.js";
|
|
3
3
|
import axios from 'axios';
|
|
4
|
+
import { sanitizeBase64 } from "../../lib/util.js";
|
|
4
5
|
|
|
5
6
|
// Claude 4 default maximum file size limit (30MB) for both images and PDFs
|
|
6
7
|
const CLAUDE4_DEFAULT_MAX_FILE_SIZE = 30 * 1024 * 1024; // 30MB
|
|
@@ -475,13 +476,10 @@ class Claude4VertexPlugin extends Claude3VertexPlugin {
|
|
|
475
476
|
messages.forEach((message, index) => {
|
|
476
477
|
const content = Array.isArray(message.content)
|
|
477
478
|
? message.content.map((item) => {
|
|
478
|
-
if (item.source && item.source.type === 'base64') {
|
|
479
|
-
item.source.data = '* base64 data truncated for log *';
|
|
480
|
-
}
|
|
481
479
|
if (item.type === 'document') {
|
|
482
|
-
return `{type: document, source: ${JSON.stringify(item.source)}}`;
|
|
480
|
+
return `{type: document, source: ${JSON.stringify(sanitizeBase64(item.source))}}`;
|
|
483
481
|
}
|
|
484
|
-
return JSON.stringify(item);
|
|
482
|
+
return JSON.stringify(sanitizeBase64(item));
|
|
485
483
|
}).join(", ")
|
|
486
484
|
: message.content;
|
|
487
485
|
const { length, units } = this.getLength(content);
|
|
@@ -500,13 +498,10 @@ class Claude4VertexPlugin extends Claude3VertexPlugin {
|
|
|
500
498
|
const message = messages[0];
|
|
501
499
|
const content = Array.isArray(message.content)
|
|
502
500
|
? message.content.map((item) => {
|
|
503
|
-
if (item.source && item.source.type === 'base64') {
|
|
504
|
-
item.source.data = '* base64 data truncated for log *';
|
|
505
|
-
}
|
|
506
501
|
if (item.type === 'document') {
|
|
507
|
-
return `{type: document, source: ${JSON.stringify(item.source)}}`;
|
|
502
|
+
return `{type: document, source: ${JSON.stringify(sanitizeBase64(item.source))}}`;
|
|
508
503
|
}
|
|
509
|
-
return JSON.stringify(item);
|
|
504
|
+
return JSON.stringify(sanitizeBase64(item));
|
|
510
505
|
}).join(", ")
|
|
511
506
|
: message.content;
|
|
512
507
|
const { length, units } = this.getLength(content);
|
|
@@ -24,8 +24,6 @@ class Gemini3ReasoningVisionPlugin extends Gemini3ImagePlugin {
|
|
|
24
24
|
} else {
|
|
25
25
|
// Fallback: use documented dummy signature to prevent 400 errors
|
|
26
26
|
// This allows the request to proceed but may affect reasoning quality
|
|
27
|
-
const toolName = toolCall?.function?.name || 'unknown';
|
|
28
|
-
logger.warn(`Missing thoughtSignature for tool "${toolName}"; using fallback. This may indicate thoughtSignatures were lost during history persistence.`);
|
|
29
27
|
part.thoughtSignature = "skip_thought_signature_validator";
|
|
30
28
|
}
|
|
31
29
|
return part;
|
|
@@ -4,7 +4,7 @@
|
|
|
4
4
|
|
|
5
5
|
import OpenAIVisionPlugin from './openAiVisionPlugin.js';
|
|
6
6
|
import logger from '../../lib/logger.js';
|
|
7
|
-
import { extractCitationTitle } from '../../lib/util.js';
|
|
7
|
+
import { extractCitationTitle, sanitizeBase64 } from '../../lib/util.js';
|
|
8
8
|
import CortexResponse from '../../lib/cortexResponse.js';
|
|
9
9
|
import { requestState } from '../requestState.js';
|
|
10
10
|
import { addCitationsToResolver } from '../../lib/pathwayTools.js';
|
|
@@ -37,15 +37,7 @@ class GrokResponsesPlugin extends OpenAIVisionPlugin {
|
|
|
37
37
|
let totalLength = 0;
|
|
38
38
|
let totalUnits;
|
|
39
39
|
messages.forEach((message, index) => {
|
|
40
|
-
const content = message.content === undefined ? JSON.stringify(message) : (Array.isArray(message.content) ? message.content.map(item =>
|
|
41
|
-
if (item.type === 'image_url' && item.image_url?.url?.startsWith('data:')) {
|
|
42
|
-
return JSON.stringify({
|
|
43
|
-
type: 'image_url',
|
|
44
|
-
image_url: { url: '* base64 data truncated for log *' }
|
|
45
|
-
});
|
|
46
|
-
}
|
|
47
|
-
return JSON.stringify(item);
|
|
48
|
-
}).join(', ') : message.content);
|
|
40
|
+
const content = message.content === undefined ? JSON.stringify(sanitizeBase64(message)) : (Array.isArray(message.content) ? message.content.map(item => JSON.stringify(sanitizeBase64(item))).join(', ') : message.content);
|
|
49
41
|
const { length, units } = this.getLength(content);
|
|
50
42
|
const displayContent = this.shortenContent(content);
|
|
51
43
|
|
|
@@ -62,15 +54,7 @@ class GrokResponsesPlugin extends OpenAIVisionPlugin {
|
|
|
62
54
|
logger.info(`[grok responses request contained ${totalLength} ${totalUnits}]`);
|
|
63
55
|
} else if (messages && messages.length === 1) {
|
|
64
56
|
const message = messages[0];
|
|
65
|
-
const content = Array.isArray(message.content) ? message.content.map(item =>
|
|
66
|
-
if (item.type === 'image_url' && item.image_url?.url?.startsWith('data:')) {
|
|
67
|
-
return JSON.stringify({
|
|
68
|
-
type: 'image_url',
|
|
69
|
-
image_url: { url: '* base64 data truncated for log *' }
|
|
70
|
-
});
|
|
71
|
-
}
|
|
72
|
-
return JSON.stringify(item);
|
|
73
|
-
}).join(', ') : message.content;
|
|
57
|
+
const content = Array.isArray(message.content) ? message.content.map(item => JSON.stringify(sanitizeBase64(item))).join(', ') : message.content;
|
|
74
58
|
const { length, units } = this.getLength(content);
|
|
75
59
|
logger.info(`[grok responses request sent containing ${length} ${units}]`);
|
|
76
60
|
logger.verbose(`${this.shortenContent(content)}`);
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import OpenAIVisionPlugin from './openAiVisionPlugin.js';
|
|
2
2
|
import logger from '../../lib/logger.js';
|
|
3
|
+
import { sanitizeBase64 } from '../../lib/util.js';
|
|
3
4
|
import { extractCitationTitle } from '../../lib/util.js';
|
|
4
5
|
import CortexResponse from '../../lib/cortexResponse.js';
|
|
5
6
|
|
|
@@ -28,15 +29,7 @@ class GrokVisionPlugin extends OpenAIVisionPlugin {
|
|
|
28
29
|
let totalUnits;
|
|
29
30
|
messages.forEach((message, index) => {
|
|
30
31
|
//message.content string or array
|
|
31
|
-
const content = message.content === undefined ? JSON.stringify(message) : (Array.isArray(message.content) ? message.content.map(item =>
|
|
32
|
-
if (item.type === 'image_url' && item.image_url?.url?.startsWith('data:')) {
|
|
33
|
-
return JSON.stringify({
|
|
34
|
-
type: 'image_url',
|
|
35
|
-
image_url: { url: '* base64 data truncated for log *' }
|
|
36
|
-
});
|
|
37
|
-
}
|
|
38
|
-
return JSON.stringify(item);
|
|
39
|
-
}).join(', ') : message.content);
|
|
32
|
+
const content = message.content === undefined ? JSON.stringify(sanitizeBase64(message)) : (Array.isArray(message.content) ? message.content.map(item => JSON.stringify(sanitizeBase64(item))).join(', ') : message.content);
|
|
40
33
|
const { length, units } = this.getLength(content);
|
|
41
34
|
const displayContent = this.shortenContent(content);
|
|
42
35
|
|
|
@@ -54,15 +47,7 @@ class GrokVisionPlugin extends OpenAIVisionPlugin {
|
|
|
54
47
|
logger.info(`[grok request contained ${totalLength} ${totalUnits}]`);
|
|
55
48
|
} else {
|
|
56
49
|
const message = messages[0];
|
|
57
|
-
const content = Array.isArray(message.content) ? message.content.map(item =>
|
|
58
|
-
if (item.type === 'image_url' && item.image_url?.url?.startsWith('data:')) {
|
|
59
|
-
return JSON.stringify({
|
|
60
|
-
type: 'image_url',
|
|
61
|
-
image_url: { url: '* base64 data truncated for log *' }
|
|
62
|
-
});
|
|
63
|
-
}
|
|
64
|
-
return JSON.stringify(item);
|
|
65
|
-
}).join(', ') : message.content;
|
|
50
|
+
const content = Array.isArray(message.content) ? message.content.map(item => JSON.stringify(sanitizeBase64(item))).join(', ') : message.content;
|
|
66
51
|
const { length, units } = this.getLength(content);
|
|
67
52
|
logger.info(`[grok request sent containing ${length} ${units}]`);
|
|
68
53
|
logger.verbose(`${this.shortenContent(content)}`);
|
|
@@ -594,6 +594,9 @@ class ModelPlugin {
|
|
|
594
594
|
if (error.response) {
|
|
595
595
|
logger.error(`Response status: ${error.response.status}`);
|
|
596
596
|
logger.error(`Response headers: ${JSON.stringify(error.response.headers)}`);
|
|
597
|
+
if (error.response.data) {
|
|
598
|
+
logger.error(`Response data: ${JSON.stringify(error.response.data)}`);
|
|
599
|
+
}
|
|
597
600
|
}
|
|
598
601
|
if (error.data) {
|
|
599
602
|
logger.error(`Additional error data: ${JSON.stringify(error.data)}`);
|
|
@@ -3,6 +3,7 @@ import logger from '../../lib/logger.js';
|
|
|
3
3
|
import { requestState } from '../requestState.js';
|
|
4
4
|
import { addCitationsToResolver } from '../../lib/pathwayTools.js';
|
|
5
5
|
import CortexResponse from '../../lib/cortexResponse.js';
|
|
6
|
+
import { sanitizeBase64 } from '../../lib/util.js';
|
|
6
7
|
function safeJsonParse(content) {
|
|
7
8
|
try {
|
|
8
9
|
const parsedContent = JSON.parse(content);
|
|
@@ -158,15 +159,7 @@ class OpenAIVisionPlugin extends OpenAIChatPlugin {
|
|
|
158
159
|
let totalUnits;
|
|
159
160
|
messages.forEach((message, index) => {
|
|
160
161
|
//message.content string or array
|
|
161
|
-
const content = message.content === undefined ? JSON.stringify(message) : (Array.isArray(message.content) ? message.content.map(item =>
|
|
162
|
-
if (item.type === 'image_url' && item.image_url?.url?.startsWith('data:')) {
|
|
163
|
-
return JSON.stringify({
|
|
164
|
-
type: 'image_url',
|
|
165
|
-
image_url: { url: '* base64 data truncated for log *' }
|
|
166
|
-
});
|
|
167
|
-
}
|
|
168
|
-
return JSON.stringify(item);
|
|
169
|
-
}).join(', ') : message.content);
|
|
162
|
+
const content = message.content === undefined ? JSON.stringify(sanitizeBase64(message)) : (Array.isArray(message.content) ? message.content.map(item => JSON.stringify(sanitizeBase64(item))).join(', ') : message.content);
|
|
170
163
|
const { length, units } = this.getLength(content);
|
|
171
164
|
const displayContent = this.shortenContent(content);
|
|
172
165
|
|
|
@@ -184,15 +177,7 @@ class OpenAIVisionPlugin extends OpenAIChatPlugin {
|
|
|
184
177
|
logger.info(`[chat request contained ${totalLength} ${totalUnits}]`);
|
|
185
178
|
} else {
|
|
186
179
|
const message = messages[0];
|
|
187
|
-
const content = Array.isArray(message.content) ? message.content.map(item =>
|
|
188
|
-
if (item.type === 'image_url' && item.image_url?.url?.startsWith('data:')) {
|
|
189
|
-
return JSON.stringify({
|
|
190
|
-
type: 'image_url',
|
|
191
|
-
image_url: { url: '* base64 data truncated for log *' }
|
|
192
|
-
});
|
|
193
|
-
}
|
|
194
|
-
return JSON.stringify(item);
|
|
195
|
-
}).join(', ') : message.content;
|
|
180
|
+
const content = Array.isArray(message.content) ? message.content.map(item => JSON.stringify(sanitizeBase64(item))).join(', ') : message.content;
|
|
196
181
|
const { length, units } = this.getLength(content);
|
|
197
182
|
logger.info(`[request sent containing ${length} ${units}]`);
|
|
198
183
|
logger.verbose(`${this.shortenContent(content)}`);
|
|
@@ -5,6 +5,64 @@ import logger from "../../lib/logger.js";
|
|
|
5
5
|
import axios from "axios";
|
|
6
6
|
import mime from "mime-types";
|
|
7
7
|
|
|
8
|
+
// Helper function to collect images from various parameter sources
|
|
9
|
+
const collectImages = (candidate, accumulator) => {
|
|
10
|
+
if (!candidate) return;
|
|
11
|
+
if (Array.isArray(candidate)) {
|
|
12
|
+
candidate.forEach((item) => collectImages(item, accumulator));
|
|
13
|
+
return;
|
|
14
|
+
}
|
|
15
|
+
accumulator.push(candidate);
|
|
16
|
+
};
|
|
17
|
+
|
|
18
|
+
// Helper function to normalize image entries to strings
|
|
19
|
+
const normalizeImageEntry = (entry) => {
|
|
20
|
+
if (!entry) return null;
|
|
21
|
+
if (typeof entry === "string") {
|
|
22
|
+
return entry;
|
|
23
|
+
}
|
|
24
|
+
if (typeof entry === "object") {
|
|
25
|
+
if (Array.isArray(entry)) {
|
|
26
|
+
return null;
|
|
27
|
+
}
|
|
28
|
+
if (entry.value) {
|
|
29
|
+
return entry.value;
|
|
30
|
+
}
|
|
31
|
+
if (entry.url) {
|
|
32
|
+
return entry.url;
|
|
33
|
+
}
|
|
34
|
+
if (entry.path) {
|
|
35
|
+
return entry.path;
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
return null;
|
|
39
|
+
};
|
|
40
|
+
|
|
41
|
+
// Helper function to omit undefined/null values from an object
|
|
42
|
+
const omitUndefined = (obj) =>
|
|
43
|
+
Object.fromEntries(
|
|
44
|
+
Object.entries(obj).filter(([, value]) => value !== undefined && value !== null),
|
|
45
|
+
);
|
|
46
|
+
|
|
47
|
+
// Helper function to collect and normalize images from combined parameters
|
|
48
|
+
const collectNormalizedImages = (combinedParameters, additionalFields = []) => {
|
|
49
|
+
const imageCandidates = [];
|
|
50
|
+
const defaultFields = [
|
|
51
|
+
'image', 'images', 'input_image', 'input_images',
|
|
52
|
+
'input_image_1', 'input_image_2', 'input_image_3',
|
|
53
|
+
'image_1', 'image_2'
|
|
54
|
+
];
|
|
55
|
+
const allFields = [...defaultFields, ...additionalFields];
|
|
56
|
+
|
|
57
|
+
allFields.forEach(field => {
|
|
58
|
+
collectImages(combinedParameters[field], imageCandidates);
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
return imageCandidates
|
|
62
|
+
.map((candidate) => normalizeImageEntry(candidate))
|
|
63
|
+
.filter((candidate) => candidate && typeof candidate === 'string');
|
|
64
|
+
};
|
|
65
|
+
|
|
8
66
|
class ReplicateApiPlugin extends ModelPlugin {
|
|
9
67
|
constructor(pathway, model) {
|
|
10
68
|
super(pathway, model);
|
|
@@ -139,67 +197,55 @@ class ReplicateApiPlugin extends ModelPlugin {
|
|
|
139
197
|
const goFast = combinedParameters.go_fast ?? combinedParameters.goFast ?? true;
|
|
140
198
|
const disableSafetyChecker = combinedParameters.disable_safety_checker ?? combinedParameters.disableSafetyChecker ?? false;
|
|
141
199
|
|
|
142
|
-
const
|
|
143
|
-
if (!candidate) return;
|
|
144
|
-
if (Array.isArray(candidate)) {
|
|
145
|
-
candidate.forEach((item) => collectImages(item, accumulator));
|
|
146
|
-
return;
|
|
147
|
-
}
|
|
148
|
-
accumulator.push(candidate);
|
|
149
|
-
};
|
|
200
|
+
const normalizedImages = collectNormalizedImages(combinedParameters);
|
|
150
201
|
|
|
151
|
-
const
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
if (typeof entry === "string") {
|
|
165
|
-
return entry; // Return the URL string directly
|
|
166
|
-
}
|
|
167
|
-
if (typeof entry === "object") {
|
|
168
|
-
if (Array.isArray(entry)) {
|
|
169
|
-
return null;
|
|
170
|
-
}
|
|
171
|
-
if (entry.value) {
|
|
172
|
-
return entry.value; // Return the value as a string
|
|
173
|
-
}
|
|
174
|
-
if (entry.url) {
|
|
175
|
-
return entry.url; // Return the URL as a string
|
|
176
|
-
}
|
|
177
|
-
if (entry.path) {
|
|
178
|
-
return entry.path; // Return the path as a string
|
|
179
|
-
}
|
|
180
|
-
}
|
|
181
|
-
return null;
|
|
202
|
+
const basePayload = omitUndefined({
|
|
203
|
+
prompt: modelPromptText,
|
|
204
|
+
go_fast: goFast,
|
|
205
|
+
aspect_ratio: aspectRatio,
|
|
206
|
+
output_format: outputFormat,
|
|
207
|
+
output_quality: outputQuality,
|
|
208
|
+
disable_safety_checker: disableSafetyChecker,
|
|
209
|
+
});
|
|
210
|
+
|
|
211
|
+
// For qwen-image-edit-plus, always include the image array if we have images
|
|
212
|
+
const inputPayload = {
|
|
213
|
+
...basePayload,
|
|
214
|
+
...(normalizedImages.length > 0 ? { image: normalizedImages } : {})
|
|
182
215
|
};
|
|
183
216
|
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
217
|
+
requestParameters = {
|
|
218
|
+
input: inputPayload,
|
|
219
|
+
};
|
|
220
|
+
break;
|
|
221
|
+
}
|
|
222
|
+
case "replicate-qwen-image-edit-2511": {
|
|
223
|
+
const validRatios = ["1:1", "16:9", "9:16", "4:3", "3:4", "match_input_image"];
|
|
224
|
+
const validOutputFormats = ["webp", "jpg", "png"];
|
|
225
|
+
|
|
226
|
+
const aspectRatio = validRatios.includes(combinedParameters.aspect_ratio ?? combinedParameters.aspectRatio)
|
|
227
|
+
? (combinedParameters.aspect_ratio ?? combinedParameters.aspectRatio)
|
|
228
|
+
: "match_input_image";
|
|
229
|
+
const outputFormat = validOutputFormats.includes(combinedParameters.output_format ?? combinedParameters.outputFormat)
|
|
230
|
+
? (combinedParameters.output_format ?? combinedParameters.outputFormat)
|
|
231
|
+
: "webp";
|
|
232
|
+
const outputQuality = combinedParameters.output_quality ?? combinedParameters.outputQuality ?? 95;
|
|
233
|
+
const goFast = combinedParameters.go_fast ?? combinedParameters.goFast ?? true;
|
|
234
|
+
const disableSafetyChecker = combinedParameters.disable_safety_checker ?? combinedParameters.disableSafetyChecker ?? false;
|
|
187
235
|
|
|
188
|
-
const
|
|
189
|
-
Object.fromEntries(
|
|
190
|
-
Object.entries(obj).filter(([, value]) => value !== undefined && value !== null),
|
|
191
|
-
);
|
|
236
|
+
const normalizedImages = collectNormalizedImages(combinedParameters);
|
|
192
237
|
|
|
193
238
|
const basePayload = omitUndefined({
|
|
194
239
|
prompt: modelPromptText,
|
|
195
240
|
go_fast: goFast,
|
|
196
241
|
aspect_ratio: aspectRatio,
|
|
197
242
|
output_format: outputFormat,
|
|
198
|
-
output_quality: outputQuality,
|
|
243
|
+
output_quality: Math.max(0, Math.min(100, outputQuality)),
|
|
199
244
|
disable_safety_checker: disableSafetyChecker,
|
|
245
|
+
...(Number.isInteger(combinedParameters.seed) && combinedParameters.seed > 0 ? { seed: combinedParameters.seed } : {}),
|
|
200
246
|
});
|
|
201
247
|
|
|
202
|
-
// For qwen-image-edit-
|
|
248
|
+
// For qwen-image-edit-2511, format images as array of strings (not objects)
|
|
203
249
|
const inputPayload = {
|
|
204
250
|
...basePayload,
|
|
205
251
|
...(normalizedImages.length > 0 ? { image: normalizedImages } : {})
|
|
@@ -280,58 +326,7 @@ class ReplicateApiPlugin extends ModelPlugin {
|
|
|
280
326
|
const validRatios = ["1:1", "4:3", "3:4", "16:9", "9:16", "match_input_image"];
|
|
281
327
|
const validSequentialModes = ["disabled", "auto"];
|
|
282
328
|
|
|
283
|
-
|
|
284
|
-
const collectImages = (candidate, accumulator) => {
|
|
285
|
-
if (!candidate) return;
|
|
286
|
-
if (Array.isArray(candidate)) {
|
|
287
|
-
candidate.forEach((item) => collectImages(item, accumulator));
|
|
288
|
-
return;
|
|
289
|
-
}
|
|
290
|
-
accumulator.push(candidate);
|
|
291
|
-
};
|
|
292
|
-
|
|
293
|
-
const imageCandidates = [];
|
|
294
|
-
collectImages(combinedParameters.image, imageCandidates);
|
|
295
|
-
collectImages(combinedParameters.images, imageCandidates);
|
|
296
|
-
collectImages(combinedParameters.input_image, imageCandidates);
|
|
297
|
-
collectImages(combinedParameters.input_images, imageCandidates);
|
|
298
|
-
collectImages(combinedParameters.input_image_1, imageCandidates);
|
|
299
|
-
collectImages(combinedParameters.input_image_2, imageCandidates);
|
|
300
|
-
collectImages(combinedParameters.input_image_3, imageCandidates);
|
|
301
|
-
collectImages(combinedParameters.image_1, imageCandidates);
|
|
302
|
-
collectImages(combinedParameters.image_2, imageCandidates);
|
|
303
|
-
collectImages(combinedParameters.imageInput, imageCandidates);
|
|
304
|
-
|
|
305
|
-
const normalizeImageEntry = (entry) => {
|
|
306
|
-
if (!entry) return null;
|
|
307
|
-
if (typeof entry === "string") {
|
|
308
|
-
return entry; // Return the URL string directly
|
|
309
|
-
}
|
|
310
|
-
if (typeof entry === "object") {
|
|
311
|
-
if (Array.isArray(entry)) {
|
|
312
|
-
return null;
|
|
313
|
-
}
|
|
314
|
-
if (entry.value) {
|
|
315
|
-
return entry.value; // Return the value as a string
|
|
316
|
-
}
|
|
317
|
-
if (entry.url) {
|
|
318
|
-
return entry.url; // Return the URL as a string
|
|
319
|
-
}
|
|
320
|
-
if (entry.path) {
|
|
321
|
-
return entry.path; // Return the path as a string
|
|
322
|
-
}
|
|
323
|
-
}
|
|
324
|
-
return null;
|
|
325
|
-
};
|
|
326
|
-
|
|
327
|
-
const normalizedImages = imageCandidates
|
|
328
|
-
.map((candidate) => normalizeImageEntry(candidate))
|
|
329
|
-
.filter((candidate) => candidate && typeof candidate === 'string');
|
|
330
|
-
|
|
331
|
-
const omitUndefined = (obj) =>
|
|
332
|
-
Object.fromEntries(
|
|
333
|
-
Object.entries(obj).filter(([, value]) => value !== undefined && value !== null),
|
|
334
|
-
);
|
|
329
|
+
const normalizedImages = collectNormalizedImages(combinedParameters, ['imageInput']);
|
|
335
330
|
|
|
336
331
|
const basePayload = omitUndefined({
|
|
337
332
|
prompt: modelPromptText,
|
|
@@ -341,7 +336,7 @@ class ReplicateApiPlugin extends ModelPlugin {
|
|
|
341
336
|
max_images: combinedParameters.maxImages || combinedParameters.numberResults || 1,
|
|
342
337
|
aspect_ratio: validRatios.includes(combinedParameters.aspectRatio) ? combinedParameters.aspectRatio : "4:3",
|
|
343
338
|
sequential_image_generation: validSequentialModes.includes(combinedParameters.sequentialImageGeneration) ? combinedParameters.sequentialImageGeneration : "disabled",
|
|
344
|
-
...(
|
|
339
|
+
...(Number.isInteger(combinedParameters.seed) && combinedParameters.seed > 0 ? { seed: combinedParameters.seed } : {}),
|
|
345
340
|
});
|
|
346
341
|
|
|
347
342
|
// For seedream-4, include the image_input array if we have images
|
|
@@ -350,6 +345,74 @@ class ReplicateApiPlugin extends ModelPlugin {
|
|
|
350
345
|
...(normalizedImages.length > 0 ? { image_input: normalizedImages } : {})
|
|
351
346
|
};
|
|
352
347
|
|
|
348
|
+
requestParameters = {
|
|
349
|
+
input: inputPayload,
|
|
350
|
+
};
|
|
351
|
+
break;
|
|
352
|
+
}
|
|
353
|
+
case "replicate-flux-2-pro": {
|
|
354
|
+
const validResolutions = ["match_input_image", "0.5 MP", "1 MP", "2 MP", "4 MP"];
|
|
355
|
+
const validRatios = [
|
|
356
|
+
"match_input_image",
|
|
357
|
+
"custom",
|
|
358
|
+
"1:1",
|
|
359
|
+
"16:9",
|
|
360
|
+
"3:2",
|
|
361
|
+
"2:3",
|
|
362
|
+
"4:5",
|
|
363
|
+
"5:4",
|
|
364
|
+
"9:16",
|
|
365
|
+
"3:4",
|
|
366
|
+
"4:3"
|
|
367
|
+
];
|
|
368
|
+
const validOutputFormats = ["webp", "jpg", "png"];
|
|
369
|
+
|
|
370
|
+
const normalizedImages = collectNormalizedImages(combinedParameters).slice(0, 8); // Maximum 8 images
|
|
371
|
+
|
|
372
|
+
const aspectRatio = validRatios.includes(combinedParameters.aspect_ratio ?? combinedParameters.aspectRatio)
|
|
373
|
+
? (combinedParameters.aspect_ratio ?? combinedParameters.aspectRatio)
|
|
374
|
+
: "1:1";
|
|
375
|
+
|
|
376
|
+
const resolution = validResolutions.includes(combinedParameters.resolution)
|
|
377
|
+
? combinedParameters.resolution
|
|
378
|
+
: "1 MP";
|
|
379
|
+
|
|
380
|
+
const outputFormat = validOutputFormats.includes(combinedParameters.output_format ?? combinedParameters.outputFormat)
|
|
381
|
+
? (combinedParameters.output_format ?? combinedParameters.outputFormat)
|
|
382
|
+
: "webp";
|
|
383
|
+
|
|
384
|
+
const outputQuality = combinedParameters.output_quality ?? combinedParameters.outputQuality ?? 80;
|
|
385
|
+
const safetyTolerance = combinedParameters.safety_tolerance ?? combinedParameters.safetyTolerance ?? 2;
|
|
386
|
+
|
|
387
|
+
// Validate and round width/height to multiples of 32 if provided
|
|
388
|
+
let width = combinedParameters.width;
|
|
389
|
+
let height = combinedParameters.height;
|
|
390
|
+
|
|
391
|
+
if (width !== undefined && width !== null) {
|
|
392
|
+
width = Math.max(256, Math.min(2048, Math.round(width / 32) * 32));
|
|
393
|
+
}
|
|
394
|
+
if (height !== undefined && height !== null) {
|
|
395
|
+
height = Math.max(256, Math.min(2048, Math.round(height / 32) * 32));
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
const basePayload = omitUndefined({
|
|
399
|
+
prompt: modelPromptText,
|
|
400
|
+
aspect_ratio: aspectRatio,
|
|
401
|
+
resolution: resolution,
|
|
402
|
+
output_format: outputFormat,
|
|
403
|
+
output_quality: Math.max(0, Math.min(100, outputQuality)),
|
|
404
|
+
safety_tolerance: Math.max(1, Math.min(5, safetyTolerance)),
|
|
405
|
+
...(width !== undefined && width !== null ? { width } : {}),
|
|
406
|
+
...(height !== undefined && height !== null ? { height } : {}),
|
|
407
|
+
...(Number.isInteger(combinedParameters.seed) && combinedParameters.seed > 0 ? { seed: combinedParameters.seed } : {}),
|
|
408
|
+
});
|
|
409
|
+
|
|
410
|
+
// Include input_images array if we have images
|
|
411
|
+
const inputPayload = {
|
|
412
|
+
...basePayload,
|
|
413
|
+
...(normalizedImages.length > 0 ? { input_images: normalizedImages } : {})
|
|
414
|
+
};
|
|
415
|
+
|
|
353
416
|
requestParameters = {
|
|
354
417
|
input: inputPayload,
|
|
355
418
|
};
|
package/server/resolver.js
CHANGED
|
@@ -2,6 +2,7 @@ import { fulfillWithTimeout } from '../lib/promiser.js';
|
|
|
2
2
|
import { PathwayResolver } from './pathwayResolver.js';
|
|
3
3
|
import CortexResponse from '../lib/cortexResponse.js';
|
|
4
4
|
import { withRequestLoggingDisabled } from '../lib/logger.js';
|
|
5
|
+
import { sanitizeBase64 } from '../lib/util.js';
|
|
5
6
|
|
|
6
7
|
// This resolver uses standard parameters required by Apollo server:
|
|
7
8
|
// (parent, args, contextValue, info)
|
|
@@ -41,9 +42,37 @@ const rootResolver = async (parent, args, contextValue, info) => {
|
|
|
41
42
|
let resultData = pathwayResolver.pathwayResultData ? JSON.stringify(pathwayResolver.pathwayResultData) : null;
|
|
42
43
|
|
|
43
44
|
const { warnings, errors, previousResult, savedContextId, tool } = pathwayResolver;
|
|
44
|
-
|
|
45
|
-
// Add request parameters back as debug
|
|
46
|
-
const debug = pathwayResolver.prompts.map(prompt =>
|
|
45
|
+
|
|
46
|
+
// Add request parameters back as debug - sanitize base64 data before returning
|
|
47
|
+
const debug = pathwayResolver.prompts.map(prompt => {
|
|
48
|
+
if (!prompt.debugInfo) return '';
|
|
49
|
+
try {
|
|
50
|
+
// Try to parse entire debugInfo as JSON first (for single JSON object)
|
|
51
|
+
try {
|
|
52
|
+
const parsed = JSON.parse(prompt.debugInfo);
|
|
53
|
+
return JSON.stringify(sanitizeBase64(parsed));
|
|
54
|
+
} catch (e) {
|
|
55
|
+
// Not a single JSON object, try line-by-line
|
|
56
|
+
const lines = prompt.debugInfo.split('\n');
|
|
57
|
+
return lines.map(line => {
|
|
58
|
+
const trimmed = line.trim();
|
|
59
|
+
if (trimmed.startsWith('{') || trimmed.startsWith('[')) {
|
|
60
|
+
try {
|
|
61
|
+
const parsed = JSON.parse(line);
|
|
62
|
+
return JSON.stringify(sanitizeBase64(parsed));
|
|
63
|
+
} catch (e) {
|
|
64
|
+
// Not valid JSON on this line, return as-is
|
|
65
|
+
return line;
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
return line;
|
|
69
|
+
}).join('\n');
|
|
70
|
+
}
|
|
71
|
+
} catch (e) {
|
|
72
|
+
// If sanitization fails, return original
|
|
73
|
+
return prompt.debugInfo;
|
|
74
|
+
}
|
|
75
|
+
}).join('\n').trim();
|
|
47
76
|
|
|
48
77
|
return {
|
|
49
78
|
debug,
|
|
@@ -32,7 +32,7 @@ test('should format cortex pathway arguments correctly with existing chatHistory
|
|
|
32
32
|
const originalPrompt = {
|
|
33
33
|
name: 'summarize',
|
|
34
34
|
prompt: 'summarize this file',
|
|
35
|
-
cortexPathwayName: '
|
|
35
|
+
cortexPathwayName: 'run_workspace_agent'
|
|
36
36
|
};
|
|
37
37
|
|
|
38
38
|
// Mock pathway data
|
|
@@ -132,7 +132,7 @@ test('should create new user message when no existing chatHistory', (t) => {
|
|
|
132
132
|
const originalPrompt = {
|
|
133
133
|
name: 'summarize',
|
|
134
134
|
prompt: 'summarize this file',
|
|
135
|
-
cortexPathwayName: '
|
|
135
|
+
cortexPathwayName: 'run_workspace_agent'
|
|
136
136
|
};
|
|
137
137
|
|
|
138
138
|
// Mock pathway data
|
|
@@ -219,7 +219,7 @@ test('should use default model when pathway model is not specified', (t) => {
|
|
|
219
219
|
const originalPrompt = {
|
|
220
220
|
name: 'summarize',
|
|
221
221
|
prompt: 'summarize this file',
|
|
222
|
-
cortexPathwayName: '
|
|
222
|
+
cortexPathwayName: 'run_workspace_agent'
|
|
223
223
|
};
|
|
224
224
|
|
|
225
225
|
// Mock pathway data without model
|