@juspay/neurolink 7.35.0 → 7.37.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/dist/adapters/providerImageAdapter.d.ts +56 -0
- package/dist/adapters/providerImageAdapter.js +257 -0
- package/dist/cli/commands/config.d.ts +20 -20
- package/dist/cli/factories/commandFactory.d.ts +1 -0
- package/dist/cli/factories/commandFactory.js +26 -3
- package/dist/config/taskClassificationConfig.d.ts +51 -0
- package/dist/config/taskClassificationConfig.js +148 -0
- package/dist/core/baseProvider.js +99 -45
- package/dist/core/types.d.ts +3 -0
- package/dist/lib/adapters/providerImageAdapter.d.ts +56 -0
- package/dist/lib/adapters/providerImageAdapter.js +257 -0
- package/dist/lib/config/taskClassificationConfig.d.ts +51 -0
- package/dist/lib/config/taskClassificationConfig.js +148 -0
- package/dist/lib/core/baseProvider.js +99 -45
- package/dist/lib/core/types.d.ts +3 -0
- package/dist/lib/neurolink.d.ts +20 -0
- package/dist/lib/neurolink.js +276 -8
- package/dist/lib/types/content.d.ts +78 -0
- package/dist/lib/types/content.js +5 -0
- package/dist/lib/types/conversation.d.ts +19 -0
- package/dist/lib/types/generateTypes.d.ts +4 -1
- package/dist/lib/types/index.d.ts +2 -0
- package/dist/lib/types/index.js +2 -0
- package/dist/lib/types/streamTypes.d.ts +6 -3
- package/dist/lib/types/taskClassificationTypes.d.ts +52 -0
- package/dist/lib/types/taskClassificationTypes.js +5 -0
- package/dist/lib/utils/imageProcessor.d.ts +84 -0
- package/dist/lib/utils/imageProcessor.js +362 -0
- package/dist/lib/utils/messageBuilder.d.ts +8 -1
- package/dist/lib/utils/messageBuilder.js +279 -0
- package/dist/lib/utils/modelRouter.d.ts +107 -0
- package/dist/lib/utils/modelRouter.js +292 -0
- package/dist/lib/utils/promptRedaction.d.ts +29 -0
- package/dist/lib/utils/promptRedaction.js +62 -0
- package/dist/lib/utils/taskClassificationUtils.d.ts +55 -0
- package/dist/lib/utils/taskClassificationUtils.js +149 -0
- package/dist/lib/utils/taskClassifier.d.ts +23 -0
- package/dist/lib/utils/taskClassifier.js +94 -0
- package/dist/neurolink.d.ts +20 -0
- package/dist/neurolink.js +276 -8
- package/dist/types/content.d.ts +78 -0
- package/dist/types/content.js +5 -0
- package/dist/types/conversation.d.ts +19 -0
- package/dist/types/generateTypes.d.ts +4 -1
- package/dist/types/index.d.ts +2 -0
- package/dist/types/index.js +2 -0
- package/dist/types/streamTypes.d.ts +6 -3
- package/dist/types/taskClassificationTypes.d.ts +52 -0
- package/dist/types/taskClassificationTypes.js +5 -0
- package/dist/utils/imageProcessor.d.ts +84 -0
- package/dist/utils/imageProcessor.js +362 -0
- package/dist/utils/messageBuilder.d.ts +8 -1
- package/dist/utils/messageBuilder.js +279 -0
- package/dist/utils/modelRouter.d.ts +107 -0
- package/dist/utils/modelRouter.js +292 -0
- package/dist/utils/promptRedaction.d.ts +29 -0
- package/dist/utils/promptRedaction.js +62 -0
- package/dist/utils/taskClassificationUtils.d.ts +55 -0
- package/dist/utils/taskClassificationUtils.js +149 -0
- package/dist/utils/taskClassifier.d.ts +23 -0
- package/dist/utils/taskClassifier.js +94 -0
- package/package.json +1 -1
@@ -0,0 +1,148 @@
|
|
1
|
+
/**
|
2
|
+
* Task Classification Configuration
|
3
|
+
* Contains patterns, keywords, and scoring weights for task classification
|
4
|
+
*/
|
5
|
+
/**
|
6
|
+
* Regular expression patterns that indicate fast response tasks
|
7
|
+
*/
|
8
|
+
export const FAST_PATTERNS = [
|
9
|
+
// Greetings and social
|
10
|
+
/^(hi|hello|hey|good morning|good afternoon|good evening)/i,
|
11
|
+
/^(thanks?|thank you|thx)/i,
|
12
|
+
/^(yes|no|ok|okay|sure|fine)/i,
|
13
|
+
// Simple questions
|
14
|
+
/^what is\s+\w+\??$/i,
|
15
|
+
/^how are you/i,
|
16
|
+
/^tell me about\s+\w+$/i,
|
17
|
+
// Simple requests
|
18
|
+
/^(list|show|display)\s+/i,
|
19
|
+
/^give me\s+/i,
|
20
|
+
/^can you\s+(help|assist)/i,
|
21
|
+
// Simple definitions
|
22
|
+
/^define\s+/i,
|
23
|
+
/^meaning of\s+/i,
|
24
|
+
/^what does\s+\w+\s+mean/i,
|
25
|
+
// Quick facts
|
26
|
+
/^when (is|was|did)/i,
|
27
|
+
/^where (is|was)/i,
|
28
|
+
/^who (is|was)/i,
|
29
|
+
// Simple translations
|
30
|
+
/^translate\s+["'].*["']\s+to\s+\w+/i,
|
31
|
+
/^how do you say\s+/i,
|
32
|
+
];
|
33
|
+
/**
|
34
|
+
* Regular expression patterns that indicate reasoning tasks
|
35
|
+
*/
|
36
|
+
export const REASONING_PATTERNS = [
|
37
|
+
// Analysis and comparison
|
38
|
+
/\b(analyz|compar|evaluat|assess|examin)\w*/i,
|
39
|
+
/\b(pros and cons|advantages and disadvantages)/i,
|
40
|
+
/\b(better|worse|best|worst)\b.*\b(than|versus|vs)\b/i,
|
41
|
+
// Problem solving
|
42
|
+
/\b(solve|solution|problem|issue|challenge)\b/i,
|
43
|
+
/\b(how to|step by step|strategy|approach)\b/i,
|
44
|
+
/\b(optimize|improve|enhance|maximize|minimize)\b/i,
|
45
|
+
// Planning and design
|
46
|
+
/\b(plan|design|architect|structure|framework)\b/i,
|
47
|
+
/\b(implement|develop|build|create|construct)\b/i,
|
48
|
+
/\b(roadmap|timeline|schedule|phases)\b/i,
|
49
|
+
// Complex questions
|
50
|
+
/\b(why|explain|reason|cause|effect|impact)\b/i,
|
51
|
+
/\b(implications|consequences|considerations)\b/i,
|
52
|
+
/\b(should I|would you recommend|what if)\b/i,
|
53
|
+
// Research and investigation
|
54
|
+
/\b(research|investigate|explore|discover)\b/i,
|
55
|
+
/\b(evidence|proof|validate|verify)\b/i,
|
56
|
+
/\b(trends|patterns|insights|conclusions)\b/i,
|
57
|
+
// Business and strategy
|
58
|
+
/\b(business|strategy|market|competitive|financial)\b/i,
|
59
|
+
/\b(ROI|revenue|profit|investment|budget)\b/i,
|
60
|
+
/\b(stakeholder|customer|user experience|UX)\b/i,
|
61
|
+
// Technical complexity
|
62
|
+
/\b(algorithm|architecture|system|infrastructure)\b/i,
|
63
|
+
/\b(performance|scalability|security|reliability)\b/i,
|
64
|
+
/\b(integration|API|database|server)\b/i,
|
65
|
+
];
|
66
|
+
/**
|
67
|
+
* Keywords that indicate fast tasks regardless of context
|
68
|
+
*/
|
69
|
+
export const FAST_KEYWORDS = [
|
70
|
+
"quick",
|
71
|
+
"simple",
|
72
|
+
"brief",
|
73
|
+
"short",
|
74
|
+
"summary",
|
75
|
+
"overview",
|
76
|
+
"definition",
|
77
|
+
"meaning",
|
78
|
+
"list",
|
79
|
+
"show",
|
80
|
+
"display",
|
81
|
+
"name",
|
82
|
+
"tell",
|
83
|
+
"what",
|
84
|
+
"when",
|
85
|
+
"where",
|
86
|
+
"who",
|
87
|
+
"how many",
|
88
|
+
"count",
|
89
|
+
];
|
90
|
+
/**
|
91
|
+
* Keywords that indicate reasoning tasks regardless of context
|
92
|
+
*/
|
93
|
+
export const REASONING_KEYWORDS = [
|
94
|
+
"complex",
|
95
|
+
"detailed",
|
96
|
+
"comprehensive",
|
97
|
+
"thorough",
|
98
|
+
"in-depth",
|
99
|
+
"analyze",
|
100
|
+
"compare",
|
101
|
+
"evaluate",
|
102
|
+
"assess",
|
103
|
+
"research",
|
104
|
+
"investigate",
|
105
|
+
"strategy",
|
106
|
+
"plan",
|
107
|
+
"design",
|
108
|
+
"solve",
|
109
|
+
"optimize",
|
110
|
+
"recommend",
|
111
|
+
"explain",
|
112
|
+
"why",
|
113
|
+
"justify",
|
114
|
+
"pros",
|
115
|
+
"cons",
|
116
|
+
"trade-offs",
|
117
|
+
];
|
118
|
+
/**
|
119
|
+
* Scoring weights for different classification factors
|
120
|
+
*/
|
121
|
+
export const SCORING_WEIGHTS = {
|
122
|
+
SHORT_PROMPT_BONUS: 2,
|
123
|
+
LONG_PROMPT_BONUS: 1,
|
124
|
+
PATTERN_MATCH_SCORE: 3,
|
125
|
+
KEYWORD_MATCH_SCORE: 1,
|
126
|
+
MULTIPLE_QUESTIONS_BONUS: 1,
|
127
|
+
MULTI_SENTENCE_BONUS: 1,
|
128
|
+
TECHNICAL_DOMAIN_BONUS: 1,
|
129
|
+
SIMPLE_DEFINITION_BONUS: 2,
|
130
|
+
};
|
131
|
+
/**
|
132
|
+
* Classification thresholds and constraints
|
133
|
+
*/
|
134
|
+
export const CLASSIFICATION_THRESHOLDS = {
|
135
|
+
SHORT_PROMPT_LENGTH: 50,
|
136
|
+
LONG_PROMPT_LENGTH: 200,
|
137
|
+
SIMPLE_DEFINITION_LENGTH: 100,
|
138
|
+
MIN_CONFIDENCE: 0.6,
|
139
|
+
MAX_CONFIDENCE: 0.95,
|
140
|
+
DEFAULT_CONFIDENCE: 0.5,
|
141
|
+
};
|
142
|
+
/**
|
143
|
+
* Domain-specific patterns for enhanced classification
|
144
|
+
*/
|
145
|
+
export const DOMAIN_PATTERNS = {
|
146
|
+
TECHNICAL: /\b(code|programming|development|software)\b/i,
|
147
|
+
SIMPLE_DEFINITION: /\b(definition|meaning|what is)\b/i,
|
148
|
+
};
|
@@ -1,3 +1,4 @@
|
|
1
|
+
import { generateText } from "ai";
|
1
2
|
import { MiddlewareFactory } from "../middleware/factory.js";
|
2
3
|
import { logger } from "../utils/logger.js";
|
3
4
|
import { DEFAULT_MAX_STEPS, STEP_LIMITS } from "../core/constants.js";
|
@@ -5,12 +6,11 @@ import { directAgentTools } from "../agent/directTools.js";
|
|
5
6
|
import { getSafeMaxTokens } from "../utils/tokenLimits.js";
|
6
7
|
import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
|
7
8
|
import { shouldDisableBuiltinTools } from "../utils/toolUtils.js";
|
8
|
-
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
9
|
+
import { buildMessagesArray, buildMultimodalMessagesArray, } from "../utils/messageBuilder.js";
|
9
10
|
import { getKeysAsString, getKeyCount } from "../utils/transformationUtils.js";
|
10
11
|
import { validateStreamOptions as validateStreamOpts, validateTextGenerationOptions, ValidationError, createValidationSummary, } from "../utils/parameterValidation.js";
|
11
12
|
import { recordProviderPerformanceFromMetrics, getPerformanceOptimizedProvider, } from "./evaluationProviders.js";
|
12
13
|
import { modelConfig } from "./modelConfiguration.js";
|
13
|
-
// Provider types moved to ../types/providers.js
|
14
14
|
/**
|
15
15
|
* Abstract base class for all AI providers
|
16
16
|
* Tools are integrated as first-class citizens - always available by default
|
@@ -166,7 +166,7 @@ export class BaseProvider {
|
|
166
166
|
try {
|
167
167
|
// Import streamText dynamically to avoid circular dependencies
|
168
168
|
// Using streamText instead of generateText for unified implementation
|
169
|
-
const { streamText } = await import("ai");
|
169
|
+
// const { streamText } = await import("ai");
|
170
170
|
// Get ALL available tools (direct + MCP + external from options)
|
171
171
|
const shouldUseTools = !options.disableTools && this.supportsTools();
|
172
172
|
const baseTools = shouldUseTools ? await this.getAllTools() : {};
|
@@ -211,42 +211,86 @@ export class BaseProvider {
|
|
211
211
|
});
|
212
212
|
const model = await this.getAISDKModelWithMiddleware(options);
|
213
213
|
// Build proper message array with conversation history
|
214
|
-
|
215
|
-
|
216
|
-
|
214
|
+
// Check if this is a multimodal request (images or content present)
|
215
|
+
let messages;
|
216
|
+
// Type guard to check if options has multimodal input
|
217
|
+
const hasMultimodalInput = (opts) => {
|
218
|
+
const input = opts.input;
|
219
|
+
const hasImages = !!input?.images?.length;
|
220
|
+
const hasContent = !!input?.content?.length;
|
221
|
+
return hasImages || hasContent;
|
222
|
+
};
|
223
|
+
if (hasMultimodalInput(options)) {
|
224
|
+
if (process.env.NEUROLINK_DEBUG === "true") {
|
225
|
+
logger.info("🖼️ [MULTIMODAL-REQUEST] Detected multimodal input, using multimodal message builder");
|
226
|
+
}
|
227
|
+
// This is a multimodal request - use multimodal message builder
|
228
|
+
// Convert TextGenerationOptions to GenerateOptions format for multimodal processing
|
229
|
+
const input = options.input;
|
230
|
+
const multimodalOptions = {
|
231
|
+
input: {
|
232
|
+
text: options.prompt || options.input?.text || "",
|
233
|
+
images: input?.images,
|
234
|
+
content: input?.content,
|
235
|
+
},
|
236
|
+
provider: options.provider,
|
237
|
+
model: options.model,
|
238
|
+
temperature: options.temperature,
|
239
|
+
maxTokens: options.maxTokens,
|
240
|
+
systemPrompt: options.systemPrompt,
|
241
|
+
enableAnalytics: options.enableAnalytics,
|
242
|
+
enableEvaluation: options.enableEvaluation,
|
243
|
+
context: options.context,
|
244
|
+
};
|
245
|
+
messages = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
|
246
|
+
}
|
247
|
+
else {
|
248
|
+
if (process.env.NEUROLINK_DEBUG === "true") {
|
249
|
+
logger.info("📝 [TEXT-ONLY-REQUEST] No multimodal input detected, using standard message builder");
|
250
|
+
}
|
251
|
+
// Standard text-only request
|
252
|
+
messages = buildMessagesArray(options);
|
253
|
+
}
|
254
|
+
// Convert messages to Vercel AI SDK format
|
255
|
+
const aiSDKMessages = messages.map((msg) => {
|
256
|
+
if (typeof msg.content === "string") {
|
257
|
+
// Simple text content
|
258
|
+
return {
|
259
|
+
role: msg.role,
|
260
|
+
content: msg.content,
|
261
|
+
};
|
262
|
+
}
|
263
|
+
else {
|
264
|
+
// Multimodal content array - convert to Vercel AI SDK format
|
265
|
+
// The Vercel AI SDK expects content to be in a specific format
|
266
|
+
return {
|
267
|
+
role: msg.role,
|
268
|
+
content: msg.content.map((item) => {
|
269
|
+
if (item.type === "text") {
|
270
|
+
return { type: "text", text: item.text || "" };
|
271
|
+
}
|
272
|
+
else if (item.type === "image") {
|
273
|
+
return { type: "image", image: item.image || "" };
|
274
|
+
}
|
275
|
+
return item;
|
276
|
+
}),
|
277
|
+
};
|
278
|
+
}
|
279
|
+
});
|
280
|
+
const generateResult = await generateText({
|
217
281
|
model,
|
218
|
-
messages:
|
282
|
+
messages: aiSDKMessages,
|
219
283
|
tools,
|
220
284
|
maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
|
221
285
|
toolChoice: shouldUseTools ? "auto" : "none",
|
222
286
|
temperature: options.temperature,
|
223
287
|
maxTokens: options.maxTokens, // No default limit - unlimited unless specified
|
224
288
|
});
|
225
|
-
// Accumulate the streamed content
|
226
|
-
let accumulatedContent = "";
|
227
|
-
// Wait for the stream to complete and accumulate content
|
228
|
-
try {
|
229
|
-
for await (const chunk of streamResult.textStream) {
|
230
|
-
accumulatedContent += chunk;
|
231
|
-
}
|
232
|
-
}
|
233
|
-
catch (streamError) {
|
234
|
-
logger.error(`Error reading text stream for ${this.providerName}:`, streamError);
|
235
|
-
throw streamError;
|
236
|
-
}
|
237
|
-
// Get the final result - this should include usage, toolCalls, etc.
|
238
|
-
const usage = await streamResult.usage;
|
239
|
-
const toolCalls = await streamResult.toolCalls;
|
240
|
-
const toolResults = await streamResult.toolResults;
|
241
289
|
const responseTime = Date.now() - startTime;
|
242
|
-
//
|
243
|
-
const
|
244
|
-
|
245
|
-
|
246
|
-
toolCalls: toolCalls,
|
247
|
-
toolResults: toolResults,
|
248
|
-
steps: streamResult.steps, // Include steps for tool execution tracking
|
249
|
-
};
|
290
|
+
// Extract properties from generateResult
|
291
|
+
const usage = generateResult.usage;
|
292
|
+
const toolCalls = generateResult.toolCalls;
|
293
|
+
const toolResults = generateResult.toolResults;
|
250
294
|
try {
|
251
295
|
const actualCost = await this.calculateActualCost(usage || { promptTokens: 0, completionTokens: 0, totalTokens: 0 });
|
252
296
|
recordProviderPerformanceFromMetrics(this.providerName, {
|
@@ -273,14 +317,14 @@ export class BaseProvider {
|
|
273
317
|
// First check direct tool calls (fallback)
|
274
318
|
if (toolCalls && toolCalls.length > 0) {
|
275
319
|
toolsUsed.push(...toolCalls.map((tc) => {
|
276
|
-
return tc.toolName || "unknown";
|
320
|
+
return tc.toolName || tc.name || "unknown";
|
277
321
|
}));
|
278
322
|
}
|
279
323
|
// Then check steps for tool calls (primary source for multi-step)
|
280
|
-
if (
|
281
|
-
Array.isArray(
|
282
|
-
for (const step of
|
283
|
-
[]) {
|
324
|
+
if (generateResult.steps &&
|
325
|
+
Array.isArray(generateResult.steps)) {
|
326
|
+
for (const step of generateResult
|
327
|
+
.steps || []) {
|
284
328
|
if (step?.toolCalls && Array.isArray(step.toolCalls)) {
|
285
329
|
toolsUsed.push(...step.toolCalls.map((tc) => {
|
286
330
|
return tc.toolName || tc.name || "unknown";
|
@@ -295,10 +339,10 @@ export class BaseProvider {
|
|
295
339
|
// Create a map of tool calls to their arguments for matching with results
|
296
340
|
const toolCallArgsMap = new Map();
|
297
341
|
// Extract tool executions from AI SDK result steps
|
298
|
-
if (
|
299
|
-
Array.isArray(
|
300
|
-
for (const step of
|
301
|
-
[]) {
|
342
|
+
if (generateResult.steps &&
|
343
|
+
Array.isArray(generateResult.steps)) {
|
344
|
+
for (const step of generateResult
|
345
|
+
.steps || []) {
|
302
346
|
// First, collect tool calls and their arguments
|
303
347
|
if (step?.toolCalls && Array.isArray(step.toolCalls)) {
|
304
348
|
for (const toolCall of step.toolCalls) {
|
@@ -359,11 +403,11 @@ export class BaseProvider {
|
|
359
403
|
}
|
360
404
|
// Format the result with tool executions included
|
361
405
|
const enhancedResult = {
|
362
|
-
content:
|
406
|
+
content: generateResult.text,
|
363
407
|
usage: {
|
364
|
-
input:
|
365
|
-
output:
|
366
|
-
total:
|
408
|
+
input: generateResult.usage?.promptTokens || 0,
|
409
|
+
output: generateResult.usage?.completionTokens || 0,
|
410
|
+
total: generateResult.usage?.totalTokens || 0,
|
367
411
|
},
|
368
412
|
provider: this.providerName,
|
369
413
|
model: this.modelName,
|
@@ -943,13 +987,23 @@ export class BaseProvider {
|
|
943
987
|
const providerName = optionsOrPrompt.provider || this.providerName;
|
944
988
|
// Apply safe maxTokens based on provider and model
|
945
989
|
const safeMaxTokens = getSafeMaxTokens(providerName, modelName, optionsOrPrompt.maxTokens);
|
946
|
-
|
990
|
+
// CRITICAL FIX: Preserve the entire input object for multimodal support
|
991
|
+
// This ensures images and content arrays are not lost during normalization
|
992
|
+
const normalizedOptions = {
|
947
993
|
...optionsOrPrompt,
|
948
994
|
prompt,
|
949
995
|
provider: providerName,
|
950
996
|
model: modelName,
|
951
997
|
maxTokens: safeMaxTokens,
|
952
998
|
};
|
999
|
+
// Ensure input object is preserved if it exists (for multimodal support)
|
1000
|
+
if (optionsOrPrompt.input) {
|
1001
|
+
normalizedOptions.input = {
|
1002
|
+
...optionsOrPrompt.input,
|
1003
|
+
text: prompt, // Ensure text is consistent
|
1004
|
+
};
|
1005
|
+
}
|
1006
|
+
return normalizedOptions;
|
953
1007
|
}
|
954
1008
|
normalizeStreamOptions(optionsOrPrompt) {
|
955
1009
|
if (typeof optionsOrPrompt === "string") {
|
package/dist/core/types.d.ts
CHANGED
@@ -175,11 +175,14 @@ export interface StreamingOptions {
|
|
175
175
|
}
|
176
176
|
/**
|
177
177
|
* Text generation options interface
|
178
|
+
* Extended to support multimodal content with zero breaking changes
|
178
179
|
*/
|
179
180
|
export interface TextGenerationOptions {
|
180
181
|
prompt?: string;
|
181
182
|
input?: {
|
182
183
|
text: string;
|
184
|
+
images?: Array<Buffer | string>;
|
185
|
+
content?: Array<import("../types/content.js").TextContent | import("../types/content.js").ImageContent>;
|
183
186
|
};
|
184
187
|
provider?: AIProviderName;
|
185
188
|
model?: string;
|
@@ -0,0 +1,56 @@
|
|
1
|
+
/**
|
2
|
+
* Provider Image Adapter - Smart routing for multimodal content
|
3
|
+
* Handles provider-specific image formatting and vision capability validation
|
4
|
+
*/
|
5
|
+
import type { Content } from "../types/content.js";
|
6
|
+
/**
|
7
|
+
* Simplified logger for essential error reporting only
|
8
|
+
*/
|
9
|
+
export declare class MultimodalLogger {
|
10
|
+
static logError(step: string, error: Error, context: unknown): void;
|
11
|
+
}
|
12
|
+
/**
|
13
|
+
* Provider Image Adapter - Smart routing and formatting
|
14
|
+
*/
|
15
|
+
export declare class ProviderImageAdapter {
|
16
|
+
/**
|
17
|
+
* Main adapter method - routes to provider-specific formatting
|
18
|
+
*/
|
19
|
+
static adaptForProvider(text: string, images: Array<Buffer | string>, provider: string, model: string): Promise<unknown>;
|
20
|
+
/**
|
21
|
+
* Format content for OpenAI (GPT-4o format)
|
22
|
+
*/
|
23
|
+
private static formatForOpenAI;
|
24
|
+
/**
|
25
|
+
* Format content for Google AI (Gemini format)
|
26
|
+
*/
|
27
|
+
private static formatForGoogleAI;
|
28
|
+
/**
|
29
|
+
* Format content for Anthropic (Claude format)
|
30
|
+
*/
|
31
|
+
private static formatForAnthropic;
|
32
|
+
/**
|
33
|
+
* Format content for Vertex AI (model-specific routing)
|
34
|
+
*/
|
35
|
+
private static formatForVertex;
|
36
|
+
/**
|
37
|
+
* Validate that provider and model support vision
|
38
|
+
*/
|
39
|
+
private static validateVisionSupport;
|
40
|
+
/**
|
41
|
+
* Convert simple images array to advanced content format
|
42
|
+
*/
|
43
|
+
static convertToContent(text: string, images?: Array<Buffer | string>): Content[];
|
44
|
+
/**
|
45
|
+
* Check if provider supports multimodal content
|
46
|
+
*/
|
47
|
+
static supportsVision(provider: string, model?: string): boolean;
|
48
|
+
/**
|
49
|
+
* Get supported models for a provider
|
50
|
+
*/
|
51
|
+
static getSupportedModels(provider: string): string[];
|
52
|
+
/**
|
53
|
+
* Get all vision-capable providers
|
54
|
+
*/
|
55
|
+
static getVisionProviders(): string[];
|
56
|
+
}
|