@juspay/neurolink 8.3.0 → 8.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/README.md +1 -0
- package/dist/adapters/providerImageAdapter.d.ts +1 -1
- package/dist/adapters/providerImageAdapter.js +62 -0
- package/dist/agent/directTools.d.ts +0 -72
- package/dist/agent/directTools.js +3 -74
- package/dist/cli/commands/config.d.ts +18 -18
- package/dist/cli/factories/commandFactory.js +1 -0
- package/dist/constants/enums.d.ts +1 -0
- package/dist/constants/enums.js +3 -1
- package/dist/constants/tokens.d.ts +3 -0
- package/dist/constants/tokens.js +3 -0
- package/dist/core/baseProvider.d.ts +56 -53
- package/dist/core/baseProvider.js +107 -1095
- package/dist/core/constants.d.ts +3 -0
- package/dist/core/constants.js +6 -3
- package/dist/core/modelConfiguration.js +10 -0
- package/dist/core/modules/GenerationHandler.d.ts +63 -0
- package/dist/core/modules/GenerationHandler.js +230 -0
- package/dist/core/modules/MessageBuilder.d.ts +39 -0
- package/dist/core/modules/MessageBuilder.js +179 -0
- package/dist/core/modules/StreamHandler.d.ts +52 -0
- package/dist/core/modules/StreamHandler.js +103 -0
- package/dist/core/modules/TelemetryHandler.d.ts +64 -0
- package/dist/core/modules/TelemetryHandler.js +170 -0
- package/dist/core/modules/ToolsManager.d.ts +98 -0
- package/dist/core/modules/ToolsManager.js +521 -0
- package/dist/core/modules/Utilities.d.ts +88 -0
- package/dist/core/modules/Utilities.js +329 -0
- package/dist/factories/providerRegistry.js +1 -1
- package/dist/lib/adapters/providerImageAdapter.d.ts +1 -1
- package/dist/lib/adapters/providerImageAdapter.js +62 -0
- package/dist/lib/agent/directTools.d.ts +0 -72
- package/dist/lib/agent/directTools.js +3 -74
- package/dist/lib/constants/enums.d.ts +1 -0
- package/dist/lib/constants/enums.js +3 -1
- package/dist/lib/constants/tokens.d.ts +3 -0
- package/dist/lib/constants/tokens.js +3 -0
- package/dist/lib/core/baseProvider.d.ts +56 -53
- package/dist/lib/core/baseProvider.js +107 -1095
- package/dist/lib/core/constants.d.ts +3 -0
- package/dist/lib/core/constants.js +6 -3
- package/dist/lib/core/modelConfiguration.js +10 -0
- package/dist/lib/core/modules/GenerationHandler.d.ts +63 -0
- package/dist/lib/core/modules/GenerationHandler.js +231 -0
- package/dist/lib/core/modules/MessageBuilder.d.ts +39 -0
- package/dist/lib/core/modules/MessageBuilder.js +180 -0
- package/dist/lib/core/modules/StreamHandler.d.ts +52 -0
- package/dist/lib/core/modules/StreamHandler.js +104 -0
- package/dist/lib/core/modules/TelemetryHandler.d.ts +64 -0
- package/dist/lib/core/modules/TelemetryHandler.js +171 -0
- package/dist/lib/core/modules/ToolsManager.d.ts +98 -0
- package/dist/lib/core/modules/ToolsManager.js +522 -0
- package/dist/lib/core/modules/Utilities.d.ts +88 -0
- package/dist/lib/core/modules/Utilities.js +330 -0
- package/dist/lib/factories/providerRegistry.js +1 -1
- package/dist/lib/mcp/servers/agent/directToolsServer.js +0 -1
- package/dist/lib/models/modelRegistry.js +44 -0
- package/dist/lib/neurolink.js +35 -3
- package/dist/lib/providers/amazonBedrock.js +59 -10
- package/dist/lib/providers/anthropic.js +2 -30
- package/dist/lib/providers/azureOpenai.js +2 -24
- package/dist/lib/providers/googleAiStudio.js +2 -24
- package/dist/lib/providers/googleVertex.js +2 -45
- package/dist/lib/providers/huggingFace.js +3 -31
- package/dist/lib/providers/litellm.d.ts +1 -1
- package/dist/lib/providers/litellm.js +110 -44
- package/dist/lib/providers/mistral.js +5 -32
- package/dist/lib/providers/ollama.d.ts +1 -0
- package/dist/lib/providers/ollama.js +476 -129
- package/dist/lib/providers/openAI.js +2 -28
- package/dist/lib/providers/openaiCompatible.js +3 -31
- package/dist/lib/types/content.d.ts +16 -113
- package/dist/lib/types/content.js +16 -2
- package/dist/lib/types/conversation.d.ts +3 -17
- package/dist/lib/types/generateTypes.d.ts +2 -2
- package/dist/lib/types/index.d.ts +2 -0
- package/dist/lib/types/index.js +2 -0
- package/dist/lib/types/multimodal.d.ts +282 -0
- package/dist/lib/types/multimodal.js +101 -0
- package/dist/lib/types/streamTypes.d.ts +2 -2
- package/dist/lib/utils/imageProcessor.d.ts +1 -1
- package/dist/lib/utils/messageBuilder.js +25 -2
- package/dist/lib/utils/multimodalOptionsBuilder.d.ts +1 -1
- package/dist/lib/utils/pdfProcessor.d.ts +9 -0
- package/dist/lib/utils/pdfProcessor.js +67 -9
- package/dist/mcp/servers/agent/directToolsServer.js +0 -1
- package/dist/models/modelRegistry.js +44 -0
- package/dist/neurolink.js +35 -3
- package/dist/providers/amazonBedrock.js +59 -10
- package/dist/providers/anthropic.js +2 -30
- package/dist/providers/azureOpenai.js +2 -24
- package/dist/providers/googleAiStudio.js +2 -24
- package/dist/providers/googleVertex.js +2 -45
- package/dist/providers/huggingFace.js +3 -31
- package/dist/providers/litellm.d.ts +1 -1
- package/dist/providers/litellm.js +110 -44
- package/dist/providers/mistral.js +5 -32
- package/dist/providers/ollama.d.ts +1 -0
- package/dist/providers/ollama.js +476 -129
- package/dist/providers/openAI.js +2 -28
- package/dist/providers/openaiCompatible.js +3 -31
- package/dist/types/content.d.ts +16 -113
- package/dist/types/content.js +16 -2
- package/dist/types/conversation.d.ts +3 -17
- package/dist/types/generateTypes.d.ts +2 -2
- package/dist/types/index.d.ts +2 -0
- package/dist/types/index.js +2 -0
- package/dist/types/multimodal.d.ts +282 -0
- package/dist/types/multimodal.js +100 -0
- package/dist/types/streamTypes.d.ts +2 -2
- package/dist/utils/imageProcessor.d.ts +1 -1
- package/dist/utils/messageBuilder.js +25 -2
- package/dist/utils/multimodalOptionsBuilder.d.ts +1 -1
- package/dist/utils/pdfProcessor.d.ts +9 -0
- package/dist/utils/pdfProcessor.js +67 -9
- package/package.json +5 -2
|
@@ -12,7 +12,6 @@ import fs from "fs";
|
|
|
12
12
|
import path from "path";
|
|
13
13
|
import os from "os";
|
|
14
14
|
import dns from "dns";
|
|
15
|
-
import { buildMessagesArray, buildMultimodalMessagesArray, convertToCoreMessages, } from "../utils/messageBuilder.js";
|
|
16
15
|
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
17
16
|
// Import proper types for multimodal message handling
|
|
18
17
|
// Enhanced Anthropic support with direct imports
|
|
@@ -605,50 +604,8 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
605
604
|
// Validate stream options
|
|
606
605
|
this.validateStreamOptionsOnly(options);
|
|
607
606
|
// Build message array from options with multimodal support
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
options.input?.files?.length ||
|
|
611
|
-
options.input?.csvFiles?.length ||
|
|
612
|
-
options.input?.pdfFiles?.length);
|
|
613
|
-
let messages;
|
|
614
|
-
if (hasMultimodalInput) {
|
|
615
|
-
logger.debug(`${functionTag}: Detected multimodal input, using multimodal message builder`, {
|
|
616
|
-
hasImages: !!options.input?.images?.length,
|
|
617
|
-
imageCount: options.input?.images?.length || 0,
|
|
618
|
-
hasContent: !!options.input?.content?.length,
|
|
619
|
-
contentCount: options.input?.content?.length || 0,
|
|
620
|
-
hasPDFs: !!options.input?.pdfFiles?.length,
|
|
621
|
-
pdfCount: options.input?.pdfFiles?.length || 0,
|
|
622
|
-
});
|
|
623
|
-
// Create multimodal options for buildMultimodalMessagesArray
|
|
624
|
-
const multimodalOptions = {
|
|
625
|
-
input: {
|
|
626
|
-
text: options.input?.text || "",
|
|
627
|
-
images: options.input?.images,
|
|
628
|
-
content: options.input?.content,
|
|
629
|
-
files: options.input?.files,
|
|
630
|
-
csvFiles: options.input?.csvFiles,
|
|
631
|
-
pdfFiles: options.input?.pdfFiles,
|
|
632
|
-
},
|
|
633
|
-
csvOptions: options.csvOptions,
|
|
634
|
-
systemPrompt: options.systemPrompt,
|
|
635
|
-
conversationHistory: options.conversationMessages,
|
|
636
|
-
provider: this.providerName,
|
|
637
|
-
model: this.modelName,
|
|
638
|
-
temperature: options.temperature,
|
|
639
|
-
maxTokens: options.maxTokens,
|
|
640
|
-
enableAnalytics: options.enableAnalytics,
|
|
641
|
-
enableEvaluation: options.enableEvaluation,
|
|
642
|
-
context: options.context,
|
|
643
|
-
};
|
|
644
|
-
const mm = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
|
|
645
|
-
// Convert multimodal messages to Vercel AI SDK format (CoreMessage[])
|
|
646
|
-
messages = convertToCoreMessages(mm);
|
|
647
|
-
}
|
|
648
|
-
else {
|
|
649
|
-
logger.debug(`${functionTag}: Text-only input, using standard message builder`);
|
|
650
|
-
messages = await buildMessagesArray(options);
|
|
651
|
-
}
|
|
607
|
+
// Using protected helper from BaseProvider to eliminate code duplication
|
|
608
|
+
const messages = await this.buildMessagesForStream(options);
|
|
652
609
|
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
653
610
|
// Get all available tools (direct + MCP + external) for streaming
|
|
654
611
|
const shouldUseTools = !options.disableTools && this.supportsTools();
|
|
@@ -6,8 +6,6 @@ import { logger } from "../utils/logger.js";
|
|
|
6
6
|
import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
|
|
7
7
|
import { DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
8
8
|
import { validateApiKey, createHuggingFaceConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
9
|
-
import { buildMessagesArray, buildMultimodalMessagesArray, convertToCoreMessages, } from "../utils/messageBuilder.js";
|
|
10
|
-
import { buildMultimodalOptions } from "../utils/multimodalOptionsBuilder.js";
|
|
11
9
|
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
12
10
|
// Configuration helpers - now using consolidated utility
|
|
13
11
|
const getHuggingFaceApiKey = () => {
|
|
@@ -116,35 +114,9 @@ export class HuggingFaceProvider extends BaseProvider {
|
|
|
116
114
|
try {
|
|
117
115
|
// Enhanced tool handling for HuggingFace models
|
|
118
116
|
const streamOptions = this.prepareStreamOptions(options, analysisSchema);
|
|
119
|
-
//
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
options.input?.files?.length ||
|
|
123
|
-
options.input?.csvFiles?.length ||
|
|
124
|
-
options.input?.pdfFiles?.length);
|
|
125
|
-
let messages;
|
|
126
|
-
if (hasMultimodalInput) {
|
|
127
|
-
logger.debug(`HuggingFace: Detected multimodal input, using multimodal message builder`, {
|
|
128
|
-
hasImages: !!options.input?.images?.length,
|
|
129
|
-
imageCount: options.input?.images?.length || 0,
|
|
130
|
-
hasContent: !!options.input?.content?.length,
|
|
131
|
-
contentCount: options.input?.content?.length || 0,
|
|
132
|
-
hasFiles: !!options.input?.files?.length,
|
|
133
|
-
fileCount: options.input?.files?.length || 0,
|
|
134
|
-
hasCSVFiles: !!options.input?.csvFiles?.length,
|
|
135
|
-
csvFileCount: options.input?.csvFiles?.length || 0,
|
|
136
|
-
hasPDFFiles: !!options.input?.pdfFiles?.length,
|
|
137
|
-
pdfFileCount: options.input?.pdfFiles?.length || 0,
|
|
138
|
-
});
|
|
139
|
-
const multimodalOptions = buildMultimodalOptions(options, this.providerName, this.modelName);
|
|
140
|
-
const mm = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
|
|
141
|
-
// Convert multimodal messages to Vercel AI SDK format (CoreMessage[])
|
|
142
|
-
messages = convertToCoreMessages(mm);
|
|
143
|
-
}
|
|
144
|
-
else {
|
|
145
|
-
logger.debug(`HuggingFace: Text-only input, using standard message builder`);
|
|
146
|
-
messages = await buildMessagesArray(options);
|
|
147
|
-
}
|
|
117
|
+
// Build message array from options with multimodal support
|
|
118
|
+
// Using protected helper from BaseProvider to eliminate code duplication
|
|
119
|
+
const messages = await this.buildMessagesForStream(options);
|
|
148
120
|
const result = await streamText({
|
|
149
121
|
model: this.model,
|
|
150
122
|
messages: messages,
|
|
@@ -28,7 +28,7 @@ export declare class LiteLLMProvider extends BaseProvider {
|
|
|
28
28
|
* Provider-specific streaming implementation
|
|
29
29
|
* Note: This is only used when tools are disabled
|
|
30
30
|
*/
|
|
31
|
-
protected executeStream(options: StreamOptions,
|
|
31
|
+
protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
32
32
|
/**
|
|
33
33
|
* Get available models from LiteLLM proxy server
|
|
34
34
|
* Dynamically fetches from /v1/models endpoint with caching and fallback
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { createOpenAI } from "@ai-sdk/openai";
|
|
2
|
-
import { streamText } from "ai";
|
|
2
|
+
import { streamText, Output } from "ai";
|
|
3
3
|
import { AIProviderName } from "../constants/enums.js";
|
|
4
4
|
import { BaseProvider } from "../core/baseProvider.js";
|
|
5
5
|
import { logger } from "../utils/logger.js";
|
|
@@ -7,8 +7,6 @@ import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
|
|
|
7
7
|
import { getProviderModel } from "../utils/providerConfig.js";
|
|
8
8
|
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
9
9
|
import { DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
10
|
-
import { buildMessagesArray, buildMultimodalMessagesArray, convertToCoreMessages, } from "../utils/messageBuilder.js";
|
|
11
|
-
import { buildMultimodalOptions } from "../utils/multimodalOptionsBuilder.js";
|
|
12
10
|
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
13
11
|
// Configuration helpers
|
|
14
12
|
const getLiteLLMConfig = () => {
|
|
@@ -117,52 +115,69 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
117
115
|
* Provider-specific streaming implementation
|
|
118
116
|
* Note: This is only used when tools are disabled
|
|
119
117
|
*/
|
|
120
|
-
async executeStream(options,
|
|
118
|
+
async executeStream(options, analysisSchema) {
|
|
121
119
|
this.validateStreamOptions(options);
|
|
122
120
|
const startTime = Date.now();
|
|
121
|
+
let chunkCount = 0; // Track chunk count for debugging
|
|
123
122
|
const timeout = this.getTimeout(options);
|
|
124
123
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
125
124
|
try {
|
|
126
|
-
//
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
125
|
+
// Build message array from options with multimodal support
|
|
126
|
+
// Using protected helper from BaseProvider to eliminate code duplication
|
|
127
|
+
const messages = await this.buildMessagesForStream(options);
|
|
128
|
+
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
129
|
+
// Get all available tools (direct + MCP + external) for streaming - matching Vertex pattern
|
|
130
|
+
const shouldUseTools = !options.disableTools && this.supportsTools();
|
|
131
|
+
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
132
|
+
logger.debug(`LiteLLM: Tools for streaming`, {
|
|
133
|
+
shouldUseTools,
|
|
134
|
+
toolCount: Object.keys(tools).length,
|
|
135
|
+
toolNames: Object.keys(tools),
|
|
136
|
+
});
|
|
137
|
+
// Model-specific maxTokens handling - Gemini 2.5 models have issues with maxTokens
|
|
138
|
+
const modelName = this.modelName || getDefaultLiteLLMModel();
|
|
139
|
+
const isGemini25Model = modelName.includes("gemini-2.5") || modelName.includes("gemini/2.5");
|
|
140
|
+
const maxTokens = isGemini25Model ? undefined : options.maxTokens;
|
|
141
|
+
if (isGemini25Model && options.maxTokens) {
|
|
142
|
+
logger.debug(`LiteLLM: Skipping maxTokens for Gemini 2.5 model (known compatibility issue)`, {
|
|
143
|
+
modelName,
|
|
144
|
+
requestedMaxTokens: options.maxTokens,
|
|
145
145
|
});
|
|
146
|
-
const multimodalOptions = buildMultimodalOptions(options, this.providerName, this.modelName);
|
|
147
|
-
const mm = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
|
|
148
|
-
// Convert multimodal messages to Vercel AI SDK format (CoreMessage[])
|
|
149
|
-
messages = convertToCoreMessages(mm);
|
|
150
146
|
}
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
messages = await buildMessagesArray(options);
|
|
154
|
-
}
|
|
155
|
-
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
156
|
-
const result = streamText({
|
|
147
|
+
// Build complete stream options with proper typing - matching Vertex pattern
|
|
148
|
+
let streamOptions = {
|
|
157
149
|
model: model,
|
|
158
150
|
messages: messages,
|
|
159
151
|
temperature: options.temperature,
|
|
160
|
-
maxTokens
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
152
|
+
...(maxTokens && { maxTokens }), // Conditionally include maxTokens
|
|
153
|
+
...(shouldUseTools &&
|
|
154
|
+
Object.keys(tools).length > 0 && {
|
|
155
|
+
tools,
|
|
156
|
+
toolChoice: "auto",
|
|
157
|
+
maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
|
|
158
|
+
}),
|
|
164
159
|
abortSignal: timeoutController?.controller.signal,
|
|
160
|
+
onError: (event) => {
|
|
161
|
+
const error = event.error;
|
|
162
|
+
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
163
|
+
logger.error(`LiteLLM: Stream error`, {
|
|
164
|
+
provider: this.providerName,
|
|
165
|
+
modelName: this.modelName,
|
|
166
|
+
error: errorMessage,
|
|
167
|
+
chunkCount,
|
|
168
|
+
});
|
|
169
|
+
},
|
|
170
|
+
onFinish: (event) => {
|
|
171
|
+
logger.debug(`LiteLLM: Stream finished`, {
|
|
172
|
+
finishReason: event.finishReason,
|
|
173
|
+
totalChunks: chunkCount,
|
|
174
|
+
});
|
|
175
|
+
},
|
|
176
|
+
onChunk: () => {
|
|
177
|
+
chunkCount++;
|
|
178
|
+
},
|
|
165
179
|
onStepFinish: ({ toolCalls, toolResults }) => {
|
|
180
|
+
logger.info("Tool execution completed", { toolResults, toolCalls });
|
|
166
181
|
this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
|
|
167
182
|
logger.warn("LiteLLMProvider] Failed to store tool executions", {
|
|
168
183
|
provider: this.providerName,
|
|
@@ -170,21 +185,72 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
170
185
|
});
|
|
171
186
|
});
|
|
172
187
|
},
|
|
173
|
-
}
|
|
188
|
+
};
|
|
189
|
+
// Add analysisSchema support if provided
|
|
190
|
+
if (analysisSchema) {
|
|
191
|
+
try {
|
|
192
|
+
streamOptions = {
|
|
193
|
+
...streamOptions,
|
|
194
|
+
experimental_output: Output.object({
|
|
195
|
+
schema: analysisSchema,
|
|
196
|
+
}),
|
|
197
|
+
};
|
|
198
|
+
}
|
|
199
|
+
catch (error) {
|
|
200
|
+
logger.warn("Schema application failed, continuing without schema", {
|
|
201
|
+
error: String(error),
|
|
202
|
+
});
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
const result = await streamText(streamOptions);
|
|
174
206
|
timeoutController?.cleanup();
|
|
175
|
-
// Transform stream to
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
207
|
+
// Transform stream to content object stream using fullStream (handles both text and tool calls)
|
|
208
|
+
// Note: fullStream includes tool results, textStream only has text
|
|
209
|
+
const transformedStream = (async function* () {
|
|
210
|
+
// Try fullStream first (handles both text and tool calls), fallback to textStream
|
|
211
|
+
const streamToUse = result.fullStream || result.textStream;
|
|
212
|
+
for await (const chunk of streamToUse) {
|
|
213
|
+
// Handle different chunk types from fullStream
|
|
214
|
+
if (chunk && typeof chunk === "object") {
|
|
215
|
+
// Check for error chunks first (critical error handling)
|
|
216
|
+
if ("type" in chunk && chunk.type === "error") {
|
|
217
|
+
const errorChunk = chunk;
|
|
218
|
+
logger.error(`LiteLLM: Error chunk received:`, {
|
|
219
|
+
errorType: errorChunk.type,
|
|
220
|
+
errorDetails: errorChunk.error,
|
|
221
|
+
});
|
|
222
|
+
throw new Error(`LiteLLM streaming error: ${errorChunk.error?.message ||
|
|
223
|
+
"Unknown error"}`);
|
|
224
|
+
}
|
|
225
|
+
if ("textDelta" in chunk) {
|
|
226
|
+
// Text delta from fullStream
|
|
227
|
+
const textDelta = chunk.textDelta;
|
|
228
|
+
if (textDelta) {
|
|
229
|
+
yield { content: textDelta };
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
else if (chunk.type === "tool-call-streaming-start") {
|
|
233
|
+
// Tool call streaming start event - log for debugging
|
|
234
|
+
const toolCall = chunk;
|
|
235
|
+
logger.debug("LiteLLM: Tool call streaming start", {
|
|
236
|
+
toolCallId: toolCall.toolCallId,
|
|
237
|
+
toolName: toolCall.toolName,
|
|
238
|
+
});
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
else if (typeof chunk === "string") {
|
|
242
|
+
// Direct string chunk from textStream fallback
|
|
243
|
+
yield { content: chunk };
|
|
244
|
+
}
|
|
179
245
|
}
|
|
180
|
-
};
|
|
246
|
+
})();
|
|
181
247
|
// Create analytics promise that resolves after stream completion
|
|
182
248
|
const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
|
|
183
249
|
requestId: `litellm-stream-${Date.now()}`,
|
|
184
250
|
streamingMode: true,
|
|
185
251
|
});
|
|
186
252
|
return {
|
|
187
|
-
stream: transformedStream
|
|
253
|
+
stream: transformedStream,
|
|
188
254
|
provider: this.providerName,
|
|
189
255
|
model: this.modelName,
|
|
190
256
|
analytics: analyticsPromise,
|
|
@@ -7,15 +7,14 @@ import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
|
|
|
7
7
|
import { DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
8
8
|
import { validateApiKey, createMistralConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
9
9
|
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
10
|
-
import { buildMessagesArray, buildMultimodalMessagesArray, convertToCoreMessages, } from "../utils/messageBuilder.js";
|
|
11
|
-
import { buildMultimodalOptions } from "../utils/multimodalOptionsBuilder.js";
|
|
12
10
|
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
13
11
|
// Configuration helpers - now using consolidated utility
|
|
14
12
|
const getMistralApiKey = () => {
|
|
15
13
|
return validateApiKey(createMistralConfig());
|
|
16
14
|
};
|
|
17
15
|
const getDefaultMistralModel = () => {
|
|
18
|
-
|
|
16
|
+
// Default to vision-capable Mistral Small 2506 (June 2025) with multimodal support
|
|
17
|
+
return getProviderModel("MISTRAL_MODEL", "mistral-small-2506");
|
|
19
18
|
};
|
|
20
19
|
/**
|
|
21
20
|
* Mistral AI Provider v2 - BaseProvider Implementation
|
|
@@ -51,35 +50,9 @@ export class MistralProvider extends BaseProvider {
|
|
|
51
50
|
// Get tools consistently with generate method
|
|
52
51
|
const shouldUseTools = !options.disableTools && this.supportsTools();
|
|
53
52
|
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
54
|
-
//
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
options.input?.files?.length ||
|
|
58
|
-
options.input?.csvFiles?.length ||
|
|
59
|
-
options.input?.pdfFiles?.length);
|
|
60
|
-
let messages;
|
|
61
|
-
if (hasMultimodalInput) {
|
|
62
|
-
logger.debug(`Mistral: Detected multimodal input, using multimodal message builder`, {
|
|
63
|
-
hasImages: !!options.input?.images?.length,
|
|
64
|
-
imageCount: options.input?.images?.length || 0,
|
|
65
|
-
hasContent: !!options.input?.content?.length,
|
|
66
|
-
contentCount: options.input?.content?.length || 0,
|
|
67
|
-
hasFiles: !!options.input?.files?.length,
|
|
68
|
-
fileCount: options.input?.files?.length || 0,
|
|
69
|
-
hasCSVFiles: !!options.input?.csvFiles?.length,
|
|
70
|
-
csvFileCount: options.input?.csvFiles?.length || 0,
|
|
71
|
-
hasPDFFiles: !!options.input?.pdfFiles?.length,
|
|
72
|
-
pdfFileCount: options.input?.pdfFiles?.length || 0,
|
|
73
|
-
});
|
|
74
|
-
const multimodalOptions = buildMultimodalOptions(options, this.providerName, this.modelName);
|
|
75
|
-
const mm = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
|
|
76
|
-
// Convert multimodal messages to Vercel AI SDK format (CoreMessage[])
|
|
77
|
-
messages = convertToCoreMessages(mm);
|
|
78
|
-
}
|
|
79
|
-
else {
|
|
80
|
-
logger.debug(`Mistral: Text-only input, using standard message builder`);
|
|
81
|
-
messages = await buildMessagesArray(options);
|
|
82
|
-
}
|
|
53
|
+
// Build message array from options with multimodal support
|
|
54
|
+
// Using protected helper from BaseProvider to eliminate code duplication
|
|
55
|
+
const messages = await this.buildMessagesForStream(options);
|
|
83
56
|
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
84
57
|
const result = await streamText({
|
|
85
58
|
model,
|
|
@@ -114,6 +114,7 @@ export declare class OllamaProvider extends BaseProvider {
|
|
|
114
114
|
* Create stream generator for Ollama generate API (non-tool mode)
|
|
115
115
|
*/
|
|
116
116
|
private createOllamaStream;
|
|
117
|
+
private createOpenAIStream;
|
|
117
118
|
protected handleProviderError(error: unknown): Error;
|
|
118
119
|
/**
|
|
119
120
|
* Check if Ollama service is healthy and accessible
|