@juspay/neurolink 7.49.0 → 7.51.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +13 -0
- package/README.md +12 -9
- package/dist/adapters/providerImageAdapter.js +82 -10
- package/dist/agent/directTools.d.ts +10 -10
- package/dist/agent/directTools.js +5 -3
- package/dist/cli/commands/config.js +1 -0
- package/dist/cli/commands/mcp.js +1 -0
- package/dist/cli/commands/models.js +1 -0
- package/dist/cli/commands/ollama.js +1 -0
- package/dist/cli/commands/setup-anthropic.js +1 -0
- package/dist/cli/commands/setup-azure.js +1 -0
- package/dist/cli/commands/setup-bedrock.js +1 -0
- package/dist/cli/commands/setup-gcp.js +1 -0
- package/dist/cli/commands/setup-google-ai.js +1 -0
- package/dist/cli/commands/setup-huggingface.js +1 -0
- package/dist/cli/commands/setup-mistral.js +1 -0
- package/dist/cli/commands/setup-openai.js +1 -0
- package/dist/cli/commands/setup.js +1 -0
- package/dist/cli/errorHandler.js +1 -0
- package/dist/cli/factories/commandFactory.d.ts +5 -0
- package/dist/cli/factories/commandFactory.js +42 -6
- package/dist/cli/factories/ollamaCommandFactory.js +1 -0
- package/dist/cli/factories/sagemakerCommandFactory.js +1 -0
- package/dist/cli/factories/setupCommandFactory.js +1 -0
- package/dist/cli/index.js +14 -2
- package/dist/cli/loop/conversationSelector.js +1 -0
- package/dist/cli/loop/optionsSchema.js +1 -0
- package/dist/cli/loop/session.js +1 -0
- package/dist/cli/parser.js +1 -0
- package/dist/cli/utils/completeSetup.js +1 -0
- package/dist/cli/utils/envManager.js +1 -0
- package/dist/cli/utils/interactiveSetup.js +1 -0
- package/dist/cli/utils/ollamaUtils.js +1 -0
- package/dist/constants/index.js +1 -1
- package/dist/core/baseProvider.d.ts +14 -0
- package/dist/core/baseProvider.js +106 -23
- package/dist/index.d.ts +11 -5
- package/dist/index.js +11 -10
- package/dist/lib/adapters/providerImageAdapter.js +83 -10
- package/dist/lib/agent/directTools.d.ts +10 -10
- package/dist/lib/agent/directTools.js +6 -3
- package/dist/lib/config/configManager.js +1 -0
- package/dist/lib/config/conversationMemory.js +1 -0
- package/dist/lib/config/taskClassificationConfig.js +1 -0
- package/dist/lib/constants/index.js +2 -1
- package/dist/lib/constants/performance.js +1 -0
- package/dist/lib/constants/retry.js +1 -0
- package/dist/lib/constants/timeouts.js +1 -0
- package/dist/lib/constants/tokens.js +1 -0
- package/dist/lib/core/analytics.js +1 -0
- package/dist/lib/core/baseProvider.d.ts +14 -0
- package/dist/lib/core/baseProvider.js +107 -23
- package/dist/lib/core/constants.js +1 -0
- package/dist/lib/core/conversationMemoryFactory.js +1 -0
- package/dist/lib/core/conversationMemoryInitializer.js +1 -0
- package/dist/lib/core/conversationMemoryManager.js +1 -0
- package/dist/lib/core/dynamicModels.js +1 -0
- package/dist/lib/core/evaluation.js +1 -0
- package/dist/lib/core/evaluationProviders.js +1 -0
- package/dist/lib/core/factory.js +1 -0
- package/dist/lib/core/modelConfiguration.js +1 -0
- package/dist/lib/core/redisConversationMemoryManager.js +1 -0
- package/dist/lib/core/serviceRegistry.js +1 -0
- package/dist/lib/core/streamAnalytics.js +1 -0
- package/dist/lib/evaluation/contextBuilder.js +1 -0
- package/dist/lib/evaluation/index.js +1 -0
- package/dist/lib/evaluation/prompts.js +1 -0
- package/dist/lib/evaluation/ragasEvaluator.js +1 -0
- package/dist/lib/evaluation/retryManager.js +1 -0
- package/dist/lib/evaluation/scoring.js +1 -0
- package/dist/lib/factories/providerFactory.js +1 -0
- package/dist/lib/factories/providerRegistry.js +1 -0
- package/dist/lib/hitl/hitlErrors.js +1 -0
- package/dist/lib/hitl/hitlManager.js +1 -0
- package/dist/lib/hitl/index.js +1 -0
- package/dist/lib/hitl/types.js +1 -0
- package/dist/lib/index.d.ts +11 -5
- package/dist/lib/index.js +12 -10
- package/dist/lib/mcp/externalServerManager.js +1 -0
- package/dist/lib/mcp/factory.js +1 -0
- package/dist/lib/mcp/flexibleToolValidator.js +1 -0
- package/dist/lib/mcp/index.js +1 -0
- package/dist/lib/mcp/mcpCircuitBreaker.js +1 -0
- package/dist/lib/mcp/mcpClientFactory.js +2 -1
- package/dist/lib/mcp/registry.js +1 -0
- package/dist/lib/mcp/servers/agent/directToolsServer.js +2 -0
- package/dist/lib/mcp/servers/aiProviders/aiAnalysisTools.js +1 -0
- package/dist/lib/mcp/servers/aiProviders/aiCoreServer.js +1 -0
- package/dist/lib/mcp/servers/aiProviders/aiWorkflowTools.js +1 -0
- package/dist/lib/mcp/servers/utilities/utilityServer.js +1 -0
- package/dist/lib/mcp/toolDiscoveryService.js +1 -0
- package/dist/lib/mcp/toolRegistry.js +1 -0
- package/dist/lib/memory/mem0Initializer.js +1 -0
- package/dist/lib/middleware/builtin/analytics.js +1 -0
- package/dist/lib/middleware/builtin/autoEvaluation.js +1 -0
- package/dist/lib/middleware/builtin/guardrails.js +1 -0
- package/dist/lib/middleware/factory.js +1 -0
- package/dist/lib/middleware/index.js +1 -0
- package/dist/lib/middleware/registry.js +1 -0
- package/dist/lib/middleware/utils/guardrailsUtils.js +1 -0
- package/dist/lib/models/modelRegistry.js +1 -0
- package/dist/lib/models/modelResolver.js +2 -0
- package/dist/lib/neurolink.d.ts +41 -6
- package/dist/lib/neurolink.js +276 -5
- package/dist/lib/providers/amazonBedrock.d.ts +1 -0
- package/dist/lib/providers/amazonBedrock.js +166 -14
- package/dist/lib/providers/amazonSagemaker.js +1 -0
- package/dist/lib/providers/anthropic.js +8 -21
- package/dist/lib/providers/anthropicBaseProvider.js +1 -0
- package/dist/lib/providers/azureOpenai.js +6 -21
- package/dist/lib/providers/googleAiStudio.js +6 -21
- package/dist/lib/providers/googleVertex.js +9 -1
- package/dist/lib/providers/huggingFace.js +34 -3
- package/dist/lib/providers/index.js +1 -0
- package/dist/lib/providers/litellm.js +34 -3
- package/dist/lib/providers/mistral.js +32 -2
- package/dist/lib/providers/ollama.d.ts +37 -1
- package/dist/lib/providers/ollama.js +544 -58
- package/dist/lib/providers/openAI.js +6 -21
- package/dist/lib/providers/openaiCompatible.js +41 -4
- package/dist/lib/providers/sagemaker/adaptive-semaphore.js +1 -0
- package/dist/lib/providers/sagemaker/client.js +1 -0
- package/dist/lib/providers/sagemaker/config.js +1 -0
- package/dist/lib/providers/sagemaker/detection.js +1 -0
- package/dist/lib/providers/sagemaker/diagnostics.js +1 -0
- package/dist/lib/providers/sagemaker/error-constants.js +1 -0
- package/dist/lib/providers/sagemaker/errors.js +1 -0
- package/dist/lib/providers/sagemaker/index.js +1 -0
- package/dist/lib/providers/sagemaker/language-model.js +1 -0
- package/dist/lib/providers/sagemaker/parsers.js +1 -0
- package/dist/lib/providers/sagemaker/streaming.js +1 -0
- package/dist/lib/providers/sagemaker/structured-parser.js +1 -0
- package/dist/lib/proxy/awsProxyIntegration.js +1 -0
- package/dist/lib/proxy/proxyFetch.js +1 -0
- package/dist/lib/proxy/utils/noProxyUtils.js +1 -0
- package/dist/lib/sdk/toolRegistration.js +2 -0
- package/dist/lib/services/server/ai/observability/instrumentation.d.ts +57 -0
- package/dist/lib/services/server/ai/observability/instrumentation.js +171 -0
- package/dist/lib/session/globalSessionState.js +38 -1
- package/dist/lib/telemetry/index.d.ts +1 -0
- package/dist/lib/telemetry/index.js +1 -0
- package/dist/lib/telemetry/telemetryService.d.ts +2 -0
- package/dist/lib/telemetry/telemetryService.js +8 -7
- package/dist/lib/types/analytics.js +1 -0
- package/dist/lib/types/cli.js +1 -0
- package/dist/lib/types/common.js +1 -0
- package/dist/lib/types/configTypes.js +1 -0
- package/dist/lib/types/content.d.ts +14 -1
- package/dist/lib/types/content.js +1 -0
- package/dist/lib/types/contextTypes.js +1 -0
- package/dist/lib/types/conversation.d.ts +2 -0
- package/dist/lib/types/conversation.js +1 -0
- package/dist/lib/types/domainTypes.js +1 -0
- package/dist/lib/types/errors.js +1 -0
- package/dist/lib/types/evaluation.js +1 -0
- package/dist/lib/types/evaluationProviders.js +1 -0
- package/dist/lib/types/evaluationTypes.js +1 -0
- package/dist/lib/types/externalMcp.js +1 -0
- package/dist/lib/types/fileTypes.d.ts +44 -0
- package/dist/lib/types/fileTypes.js +1 -0
- package/dist/lib/types/generateTypes.d.ts +1 -0
- package/dist/lib/types/generateTypes.js +1 -0
- package/dist/lib/types/guardrails.js +1 -0
- package/dist/lib/types/index.js +1 -0
- package/dist/lib/types/mcpTypes.js +1 -0
- package/dist/lib/types/middlewareTypes.js +1 -0
- package/dist/lib/types/modelTypes.d.ts +6 -6
- package/dist/lib/types/modelTypes.js +1 -0
- package/dist/lib/types/observability.d.ts +49 -0
- package/dist/lib/types/observability.js +7 -0
- package/dist/lib/types/providers.d.ts +44 -0
- package/dist/lib/types/providers.js +1 -0
- package/dist/lib/types/sdkTypes.js +1 -0
- package/dist/lib/types/serviceTypes.js +1 -0
- package/dist/lib/types/streamTypes.d.ts +1 -0
- package/dist/lib/types/streamTypes.js +1 -0
- package/dist/lib/types/taskClassificationTypes.js +1 -0
- package/dist/lib/types/tools.js +2 -0
- package/dist/lib/types/typeAliases.js +1 -0
- package/dist/lib/types/universalProviderOptions.js +1 -0
- package/dist/lib/utils/analyticsUtils.js +1 -0
- package/dist/lib/utils/conversationMemory.js +1 -0
- package/dist/lib/utils/conversationMemoryUtils.js +1 -0
- package/dist/lib/utils/csvProcessor.js +1 -0
- package/dist/lib/utils/errorHandling.js +1 -0
- package/dist/lib/utils/evaluationUtils.js +1 -0
- package/dist/lib/utils/factoryProcessing.js +1 -0
- package/dist/lib/utils/fileDetector.js +7 -3
- package/dist/lib/utils/imageProcessor.js +1 -0
- package/dist/lib/utils/logger.js +1 -0
- package/dist/lib/utils/loopUtils.js +1 -0
- package/dist/lib/utils/mcpDefaults.js +1 -0
- package/dist/lib/utils/messageBuilder.js +96 -9
- package/dist/lib/utils/modelRouter.js +1 -0
- package/dist/lib/utils/multimodalOptionsBuilder.d.ts +67 -0
- package/dist/lib/utils/multimodalOptionsBuilder.js +65 -0
- package/dist/lib/utils/optionsConversion.js +1 -0
- package/dist/lib/utils/optionsUtils.js +1 -0
- package/dist/lib/utils/parameterValidation.js +1 -0
- package/dist/lib/utils/pdfProcessor.d.ts +10 -0
- package/dist/lib/utils/pdfProcessor.js +199 -0
- package/dist/lib/utils/performance.js +1 -0
- package/dist/lib/utils/promptRedaction.js +1 -0
- package/dist/lib/utils/providerConfig.js +1 -0
- package/dist/lib/utils/providerHealth.js +1 -0
- package/dist/lib/utils/providerSetupMessages.js +1 -0
- package/dist/lib/utils/providerUtils.js +1 -0
- package/dist/lib/utils/redis.js +1 -0
- package/dist/lib/utils/retryHandler.js +1 -0
- package/dist/lib/utils/schemaConversion.js +1 -0
- package/dist/lib/utils/taskClassificationUtils.js +1 -0
- package/dist/lib/utils/taskClassifier.js +1 -0
- package/dist/lib/utils/timeout.js +1 -0
- package/dist/lib/utils/tokenLimits.js +1 -0
- package/dist/lib/utils/toolUtils.js +1 -0
- package/dist/lib/utils/transformationUtils.js +1 -0
- package/dist/lib/utils/typeUtils.js +1 -0
- package/dist/mcp/mcpClientFactory.js +1 -1
- package/dist/mcp/servers/agent/directToolsServer.js +1 -0
- package/dist/models/modelResolver.js +1 -0
- package/dist/neurolink.d.ts +41 -6
- package/dist/neurolink.js +275 -5
- package/dist/providers/amazonBedrock.d.ts +1 -0
- package/dist/providers/amazonBedrock.js +165 -14
- package/dist/providers/anthropic.js +7 -21
- package/dist/providers/azureOpenai.js +5 -21
- package/dist/providers/googleAiStudio.js +5 -21
- package/dist/providers/googleVertex.js +8 -1
- package/dist/providers/huggingFace.js +33 -3
- package/dist/providers/litellm.js +33 -3
- package/dist/providers/mistral.js +31 -2
- package/dist/providers/ollama.d.ts +37 -1
- package/dist/providers/ollama.js +543 -58
- package/dist/providers/openAI.js +5 -21
- package/dist/providers/openaiCompatible.js +40 -4
- package/dist/sdk/toolRegistration.js +1 -0
- package/dist/services/server/ai/observability/instrumentation.d.ts +57 -0
- package/dist/services/server/ai/observability/instrumentation.js +170 -0
- package/dist/session/globalSessionState.js +37 -1
- package/dist/telemetry/index.d.ts +1 -0
- package/dist/telemetry/telemetryService.d.ts +2 -0
- package/dist/telemetry/telemetryService.js +7 -7
- package/dist/types/content.d.ts +14 -1
- package/dist/types/conversation.d.ts +2 -0
- package/dist/types/fileTypes.d.ts +44 -0
- package/dist/types/generateTypes.d.ts +1 -0
- package/dist/types/observability.d.ts +49 -0
- package/dist/types/observability.js +6 -0
- package/dist/types/providers.d.ts +44 -0
- package/dist/types/streamTypes.d.ts +1 -0
- package/dist/types/tools.js +1 -0
- package/dist/utils/fileDetector.js +6 -3
- package/dist/utils/messageBuilder.js +95 -9
- package/dist/utils/multimodalOptionsBuilder.d.ts +67 -0
- package/dist/utils/multimodalOptionsBuilder.js +64 -0
- package/dist/utils/pdfProcessor.d.ts +10 -0
- package/dist/utils/pdfProcessor.js +198 -0
- package/package.json +12 -16
|
@@ -1,8 +1,12 @@
|
|
|
1
|
-
import { BedrockRuntimeClient, ConverseCommand, ConverseStreamCommand, } from "@aws-sdk/client-bedrock-runtime";
|
|
1
|
+
import { BedrockRuntimeClient, ConverseCommand, ConverseStreamCommand, ImageFormat, } from "@aws-sdk/client-bedrock-runtime";
|
|
2
2
|
import { BedrockClient, ListFoundationModelsCommand, } from "@aws-sdk/client-bedrock";
|
|
3
3
|
import { BaseProvider } from "../core/baseProvider.js";
|
|
4
4
|
import { logger } from "../utils/logger.js";
|
|
5
5
|
import { convertZodToJsonSchema } from "../utils/schemaConversion.js";
|
|
6
|
+
import { buildMultimodalMessagesArray } from "../utils/messageBuilder.js";
|
|
7
|
+
import { buildMultimodalOptions } from "../utils/multimodalOptionsBuilder.js";
|
|
8
|
+
import { DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
9
|
+
import { createAnalytics } from "../core/analytics.js";
|
|
6
10
|
// Bedrock-specific types now imported from ../types/providerSpecific.js
|
|
7
11
|
export class AmazonBedrockProvider extends BaseProvider {
|
|
8
12
|
bedrockClient;
|
|
@@ -349,6 +353,16 @@ export class AmazonBedrockProvider extends BaseProvider {
|
|
|
349
353
|
text: item.text,
|
|
350
354
|
};
|
|
351
355
|
}
|
|
356
|
+
if (item.image) {
|
|
357
|
+
return {
|
|
358
|
+
image: item.image,
|
|
359
|
+
};
|
|
360
|
+
}
|
|
361
|
+
if (item.document) {
|
|
362
|
+
return {
|
|
363
|
+
document: item.document,
|
|
364
|
+
};
|
|
365
|
+
}
|
|
352
366
|
if (item.toolUse) {
|
|
353
367
|
return {
|
|
354
368
|
toolUse: {
|
|
@@ -520,6 +534,73 @@ export class AmazonBedrockProvider extends BaseProvider {
|
|
|
520
534
|
logger.debug(`[AmazonBedrockProvider] Formatted ${bedrockTools.length} tools for Bedrock`);
|
|
521
535
|
return { tools: bedrockTools };
|
|
522
536
|
}
|
|
537
|
+
// Convert multimodal messages to Bedrock format
|
|
538
|
+
convertToBedrockMessages(messages) {
|
|
539
|
+
return messages.map((msg) => {
|
|
540
|
+
const bedrockMessage = {
|
|
541
|
+
role: msg.role === "system" ? "user" : msg.role,
|
|
542
|
+
content: [],
|
|
543
|
+
};
|
|
544
|
+
if (typeof msg.content === "string") {
|
|
545
|
+
bedrockMessage.content.push({ text: msg.content });
|
|
546
|
+
}
|
|
547
|
+
else {
|
|
548
|
+
msg.content.forEach((contentItem) => {
|
|
549
|
+
if (contentItem.type === "text" && contentItem.text) {
|
|
550
|
+
bedrockMessage.content.push({ text: contentItem.text });
|
|
551
|
+
}
|
|
552
|
+
else if (contentItem.type === "image" && contentItem.image) {
|
|
553
|
+
const imageData = typeof contentItem.image === "string"
|
|
554
|
+
? Buffer.from(contentItem.image.replace(/^data:image\/\w+;base64,/, ""), "base64")
|
|
555
|
+
: contentItem.image;
|
|
556
|
+
let format = contentItem.mimeType?.split("/")[1] || "png";
|
|
557
|
+
if (format === "jpg") {
|
|
558
|
+
format = "jpeg";
|
|
559
|
+
}
|
|
560
|
+
bedrockMessage.content.push({
|
|
561
|
+
image: {
|
|
562
|
+
format: format === "jpeg"
|
|
563
|
+
? ImageFormat.JPEG
|
|
564
|
+
: format === "png"
|
|
565
|
+
? ImageFormat.PNG
|
|
566
|
+
: format === "gif"
|
|
567
|
+
? ImageFormat.GIF
|
|
568
|
+
: ImageFormat.WEBP,
|
|
569
|
+
source: {
|
|
570
|
+
bytes: imageData,
|
|
571
|
+
},
|
|
572
|
+
},
|
|
573
|
+
});
|
|
574
|
+
}
|
|
575
|
+
else if (contentItem.type === "document" ||
|
|
576
|
+
contentItem.type === "pdf" ||
|
|
577
|
+
(contentItem.type === "file" &&
|
|
578
|
+
contentItem.mimeType?.toLowerCase().startsWith("application/pdf"))) {
|
|
579
|
+
let docData;
|
|
580
|
+
if (typeof contentItem.data === "string") {
|
|
581
|
+
const pdfString = contentItem.data.replace(/^data:application\/pdf;base64,/i, "");
|
|
582
|
+
docData = Buffer.from(pdfString, "base64");
|
|
583
|
+
}
|
|
584
|
+
else {
|
|
585
|
+
docData = contentItem.data;
|
|
586
|
+
}
|
|
587
|
+
bedrockMessage.content.push({
|
|
588
|
+
document: {
|
|
589
|
+
format: "pdf",
|
|
590
|
+
name: typeof contentItem.name === "string" && contentItem.name
|
|
591
|
+
? contentItem.name
|
|
592
|
+
: "document.pdf",
|
|
593
|
+
source: {
|
|
594
|
+
bytes: docData,
|
|
595
|
+
},
|
|
596
|
+
},
|
|
597
|
+
});
|
|
598
|
+
}
|
|
599
|
+
});
|
|
600
|
+
}
|
|
601
|
+
return bedrockMessage;
|
|
602
|
+
});
|
|
603
|
+
}
|
|
523
604
|
// Bedrock-MCP-Connector compatibility
|
|
524
605
|
getBedrockClient() {
|
|
525
606
|
return this.bedrockClient;
|
|
@@ -529,16 +610,43 @@ export class AmazonBedrockProvider extends BaseProvider {
|
|
|
529
610
|
logger.info("🚀 [AmazonBedrockProvider] Attempting real streaming with ConverseStreamCommand");
|
|
530
611
|
try {
|
|
531
612
|
logger.debug("🟢 [TRACE] executeStream TRY block - about to call streamingConversationLoop");
|
|
532
|
-
// CRITICAL FIX: Initialize conversation history like generate() does
|
|
533
613
|
// Clear conversation history for new streaming session
|
|
534
614
|
this.conversationHistory = [];
|
|
535
|
-
//
|
|
536
|
-
const
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
615
|
+
// Check for multimodal input (images, PDFs, CSVs, files)
|
|
616
|
+
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
617
|
+
options.input?.content?.length ||
|
|
618
|
+
options.input?.files?.length ||
|
|
619
|
+
options.input?.csvFiles?.length ||
|
|
620
|
+
options.input?.pdfFiles?.length);
|
|
621
|
+
if (hasMultimodalInput) {
|
|
622
|
+
logger.debug(`[AmazonBedrockProvider] Detected multimodal input, using multimodal message builder`, {
|
|
623
|
+
hasImages: !!options.input?.images?.length,
|
|
624
|
+
imageCount: options.input?.images?.length || 0,
|
|
625
|
+
hasContent: !!options.input?.content?.length,
|
|
626
|
+
contentCount: options.input?.content?.length || 0,
|
|
627
|
+
hasFiles: !!options.input?.files?.length,
|
|
628
|
+
fileCount: options.input?.files?.length || 0,
|
|
629
|
+
hasCSVFiles: !!options.input?.csvFiles?.length,
|
|
630
|
+
csvFileCount: options.input?.csvFiles?.length || 0,
|
|
631
|
+
hasPDFFiles: !!options.input?.pdfFiles?.length,
|
|
632
|
+
pdfFileCount: options.input?.pdfFiles?.length || 0,
|
|
633
|
+
});
|
|
634
|
+
const multimodalOptions = buildMultimodalOptions(options, this.providerName, this.modelName);
|
|
635
|
+
const multimodalMessages = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
|
|
636
|
+
// Convert to Bedrock format
|
|
637
|
+
this.conversationHistory =
|
|
638
|
+
this.convertToBedrockMessages(multimodalMessages);
|
|
639
|
+
}
|
|
640
|
+
else {
|
|
641
|
+
logger.debug(`[AmazonBedrockProvider] Text-only input, using simple message builder`);
|
|
642
|
+
// Add user message to conversation - simple text-only case
|
|
643
|
+
const userMessage = {
|
|
644
|
+
role: "user",
|
|
645
|
+
content: [{ text: options.input.text }],
|
|
646
|
+
};
|
|
647
|
+
this.conversationHistory.push(userMessage);
|
|
648
|
+
}
|
|
649
|
+
logger.debug(`[AmazonBedrockProvider] Starting streaming conversation with ${this.conversationHistory.length} message(s)`);
|
|
542
650
|
// Call the actual streaming implementation that already exists
|
|
543
651
|
logger.debug("🟢 [TRACE] executeStream - calling streamingConversationLoop NOW");
|
|
544
652
|
const result = await this.streamingConversationLoop(options);
|
|
@@ -615,7 +723,8 @@ export class AmazonBedrockProvider extends BaseProvider {
|
|
|
615
723
|
}
|
|
616
724
|
async streamingConversationLoop(options) {
|
|
617
725
|
logger.debug("🟦 [TRACE] streamingConversationLoop ENTRY");
|
|
618
|
-
const
|
|
726
|
+
const startTime = Date.now();
|
|
727
|
+
const maxIterations = options.maxSteps || DEFAULT_MAX_STEPS;
|
|
619
728
|
let iteration = 0;
|
|
620
729
|
// The REAL issue: ReadableStream errors don't bubble up to the caller
|
|
621
730
|
// So we need to make the first streaming call synchronously to test permissions
|
|
@@ -650,7 +759,7 @@ export class AmazonBedrockProvider extends BaseProvider {
|
|
|
650
759
|
logger.debug(`[AmazonBedrockProvider] Streaming iteration ${iteration}`);
|
|
651
760
|
const commandInput = await this.prepareStreamCommand(options);
|
|
652
761
|
const { stopReason, assistantMessage } = await this.processStreamResponse(commandInput, controller);
|
|
653
|
-
const shouldContinue = await this.handleStreamStopReason(stopReason, assistantMessage, controller);
|
|
762
|
+
const shouldContinue = await this.handleStreamStopReason(stopReason, assistantMessage, controller, options);
|
|
654
763
|
if (!shouldContinue) {
|
|
655
764
|
break;
|
|
656
765
|
}
|
|
@@ -665,11 +774,22 @@ export class AmazonBedrockProvider extends BaseProvider {
|
|
|
665
774
|
}
|
|
666
775
|
},
|
|
667
776
|
});
|
|
777
|
+
// Create analytics promise (without token tracking for now due to AWS SDK limitations)
|
|
778
|
+
const analyticsPromise = Promise.resolve(createAnalytics(this.providerName, this.modelName || this.getDefaultModel(), { usage: { input: 0, output: 0, total: 0 } }, Date.now() - startTime, {
|
|
779
|
+
requestId: `bedrock-stream-${Date.now()}`,
|
|
780
|
+
streamingMode: true,
|
|
781
|
+
note: "Token usage not available from AWS SDK streaming responses",
|
|
782
|
+
}));
|
|
668
783
|
return {
|
|
669
784
|
stream: this.convertToAsyncIterable(stream),
|
|
670
785
|
usage: { total: 0, input: 0, output: 0 },
|
|
671
786
|
model: this.modelName || this.getDefaultModel(),
|
|
672
787
|
provider: this.getProviderName(),
|
|
788
|
+
analytics: analyticsPromise,
|
|
789
|
+
metadata: {
|
|
790
|
+
startTime,
|
|
791
|
+
streamId: `bedrock-${Date.now()}`,
|
|
792
|
+
},
|
|
673
793
|
};
|
|
674
794
|
}
|
|
675
795
|
catch (error) {
|
|
@@ -825,7 +945,7 @@ export class AmazonBedrockProvider extends BaseProvider {
|
|
|
825
945
|
this.conversationHistory.push(assistantMessage);
|
|
826
946
|
return { stopReason, assistantMessage };
|
|
827
947
|
}
|
|
828
|
-
async handleStreamStopReason(stopReason, assistantMessage, controller) {
|
|
948
|
+
async handleStreamStopReason(stopReason, assistantMessage, controller, options) {
|
|
829
949
|
if (stopReason === "end_turn" || stopReason === "stop_sequence") {
|
|
830
950
|
// Conversation completed
|
|
831
951
|
controller.close();
|
|
@@ -833,7 +953,7 @@ export class AmazonBedrockProvider extends BaseProvider {
|
|
|
833
953
|
}
|
|
834
954
|
else if (stopReason === "tool_use") {
|
|
835
955
|
logger.debug(`🛠️ [AmazonBedrockProvider] Tool use detected in streaming - executing tools`);
|
|
836
|
-
await this.executeStreamTools(assistantMessage.content);
|
|
956
|
+
await this.executeStreamTools(assistantMessage.content, options);
|
|
837
957
|
return true; // Continue conversation loop
|
|
838
958
|
}
|
|
839
959
|
else if (stopReason === "max_tokens") {
|
|
@@ -851,10 +971,13 @@ export class AmazonBedrockProvider extends BaseProvider {
|
|
|
851
971
|
return false;
|
|
852
972
|
}
|
|
853
973
|
}
|
|
854
|
-
async executeStreamTools(messageContent) {
|
|
974
|
+
async executeStreamTools(messageContent, options) {
|
|
855
975
|
// Execute all tool uses in the message - ensure 1:1 mapping like Bedrock-MCP-Connector
|
|
856
976
|
const toolResults = [];
|
|
857
977
|
let toolUseCount = 0;
|
|
978
|
+
// Track tool calls and results for storage (similar to Vertex onStepFinish)
|
|
979
|
+
const toolCalls = [];
|
|
980
|
+
const toolResultsForStorage = [];
|
|
858
981
|
// Count toolUse blocks first to ensure 1:1 mapping
|
|
859
982
|
for (const contentItem of messageContent) {
|
|
860
983
|
if (contentItem.toolUse) {
|
|
@@ -865,9 +988,23 @@ export class AmazonBedrockProvider extends BaseProvider {
|
|
|
865
988
|
for (const contentItem of messageContent) {
|
|
866
989
|
if (contentItem.toolUse) {
|
|
867
990
|
logger.debug(`🔧 [AmazonBedrockProvider] Executing tool: ${contentItem.toolUse.name}`);
|
|
991
|
+
// Track tool call
|
|
992
|
+
toolCalls.push({
|
|
993
|
+
type: "tool-call",
|
|
994
|
+
toolCallId: contentItem.toolUse.toolUseId,
|
|
995
|
+
toolName: contentItem.toolUse.name,
|
|
996
|
+
args: contentItem.toolUse.input || {},
|
|
997
|
+
});
|
|
868
998
|
try {
|
|
869
999
|
const toolResult = await this.executeSingleTool(contentItem.toolUse.name, contentItem.toolUse.input || {}, contentItem.toolUse.toolUseId);
|
|
870
1000
|
logger.debug(`✅ [AmazonBedrockProvider] Tool execution successful: ${contentItem.toolUse.name}`);
|
|
1001
|
+
// Track tool result for storage
|
|
1002
|
+
toolResultsForStorage.push({
|
|
1003
|
+
type: "tool-result",
|
|
1004
|
+
toolCallId: contentItem.toolUse.toolUseId,
|
|
1005
|
+
toolName: contentItem.toolUse.name,
|
|
1006
|
+
result: toolResult,
|
|
1007
|
+
});
|
|
871
1008
|
// Ensure exact structure matching Bedrock-MCP-Connector
|
|
872
1009
|
toolResults.push({
|
|
873
1010
|
toolResult: {
|
|
@@ -880,6 +1017,13 @@ export class AmazonBedrockProvider extends BaseProvider {
|
|
|
880
1017
|
catch (error) {
|
|
881
1018
|
logger.error(`❌ [AmazonBedrockProvider] Tool execution failed: ${contentItem.toolUse.name}`, error);
|
|
882
1019
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
1020
|
+
// Track failed tool result
|
|
1021
|
+
toolResultsForStorage.push({
|
|
1022
|
+
type: "tool-result",
|
|
1023
|
+
toolCallId: contentItem.toolUse.toolUseId,
|
|
1024
|
+
toolName: contentItem.toolUse.name,
|
|
1025
|
+
result: { error: errorMessage },
|
|
1026
|
+
});
|
|
883
1027
|
toolResults.push({
|
|
884
1028
|
toolResult: {
|
|
885
1029
|
toolUseId: contentItem.toolUse.toolUseId,
|
|
@@ -908,6 +1052,13 @@ export class AmazonBedrockProvider extends BaseProvider {
|
|
|
908
1052
|
};
|
|
909
1053
|
this.conversationHistory.push(userMessageWithToolResults);
|
|
910
1054
|
logger.debug(`📤 [AmazonBedrockProvider] Added ${toolResults.length} tool results to conversation (1:1 mapping validated)`);
|
|
1055
|
+
// Store tool execution for analytics and debugging (similar to Vertex onStepFinish)
|
|
1056
|
+
this.handleToolExecutionStorage(toolCalls, toolResultsForStorage, options, new Date()).catch((error) => {
|
|
1057
|
+
logger.warn("[AmazonBedrockProvider] Failed to store tool executions", {
|
|
1058
|
+
provider: this.providerName,
|
|
1059
|
+
error: error instanceof Error ? error.message : String(error),
|
|
1060
|
+
});
|
|
1061
|
+
});
|
|
911
1062
|
}
|
|
912
1063
|
}
|
|
913
1064
|
/**
|
|
@@ -8,6 +8,7 @@ import { AuthenticationError, NetworkError, ProviderError, RateLimitError, } fro
|
|
|
8
8
|
import { DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
9
9
|
import { validateApiKey, createAnthropicConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
10
10
|
import { buildMessagesArray, buildMultimodalMessagesArray, convertToCoreMessages, } from "../utils/messageBuilder.js";
|
|
11
|
+
import { buildMultimodalOptions } from "../utils/multimodalOptionsBuilder.js";
|
|
11
12
|
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
12
13
|
// Configuration helpers - now using consolidated utility
|
|
13
14
|
const getAnthropicApiKey = () => {
|
|
@@ -96,7 +97,8 @@ export class AnthropicProvider extends BaseProvider {
|
|
|
96
97
|
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
97
98
|
options.input?.content?.length ||
|
|
98
99
|
options.input?.files?.length ||
|
|
99
|
-
options.input?.csvFiles?.length
|
|
100
|
+
options.input?.csvFiles?.length ||
|
|
101
|
+
options.input?.pdfFiles?.length);
|
|
100
102
|
let messages;
|
|
101
103
|
if (hasMultimodalInput) {
|
|
102
104
|
logger.debug(`Anthropic: Detected multimodal input, using multimodal message builder`, {
|
|
@@ -108,27 +110,10 @@ export class AnthropicProvider extends BaseProvider {
|
|
|
108
110
|
fileCount: options.input?.files?.length || 0,
|
|
109
111
|
hasCSVFiles: !!options.input?.csvFiles?.length,
|
|
110
112
|
csvFileCount: options.input?.csvFiles?.length || 0,
|
|
113
|
+
hasPDFFiles: !!options.input?.pdfFiles?.length,
|
|
114
|
+
pdfFileCount: options.input?.pdfFiles?.length || 0,
|
|
111
115
|
});
|
|
112
|
-
|
|
113
|
-
const multimodalOptions = {
|
|
114
|
-
input: {
|
|
115
|
-
text: options.input?.text || "",
|
|
116
|
-
images: options.input?.images,
|
|
117
|
-
content: options.input?.content,
|
|
118
|
-
files: options.input?.files,
|
|
119
|
-
csvFiles: options.input?.csvFiles,
|
|
120
|
-
},
|
|
121
|
-
csvOptions: options.csvOptions,
|
|
122
|
-
systemPrompt: options.systemPrompt,
|
|
123
|
-
conversationHistory: options.conversationMessages,
|
|
124
|
-
provider: this.providerName,
|
|
125
|
-
model: this.modelName,
|
|
126
|
-
temperature: options.temperature,
|
|
127
|
-
maxTokens: options.maxTokens,
|
|
128
|
-
enableAnalytics: options.enableAnalytics,
|
|
129
|
-
enableEvaluation: options.enableEvaluation,
|
|
130
|
-
context: options.context,
|
|
131
|
-
};
|
|
116
|
+
const multimodalOptions = buildMultimodalOptions(options, this.providerName, this.modelName);
|
|
132
117
|
const mm = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
|
|
133
118
|
// Convert multimodal messages to Vercel AI SDK format (CoreMessage[])
|
|
134
119
|
messages = convertToCoreMessages(mm);
|
|
@@ -147,6 +132,7 @@ export class AnthropicProvider extends BaseProvider {
|
|
|
147
132
|
maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
|
|
148
133
|
toolChoice: shouldUseTools ? "auto" : "none",
|
|
149
134
|
abortSignal: timeoutController?.controller.signal,
|
|
135
|
+
experimental_telemetry: this.getStreamTelemetryConfig(options),
|
|
150
136
|
onStepFinish: ({ toolCalls, toolResults }) => {
|
|
151
137
|
this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
|
|
152
138
|
logger.warn("[AnthropicProvider] Failed to store tool executions", {
|
|
@@ -5,6 +5,7 @@ import { APIVersions } from "../types/providers.js";
|
|
|
5
5
|
import { validateApiKey, createAzureAPIKeyConfig, createAzureEndpointConfig, } from "../utils/providerConfig.js";
|
|
6
6
|
import { logger } from "../utils/logger.js";
|
|
7
7
|
import { buildMessagesArray, buildMultimodalMessagesArray, convertToCoreMessages, } from "../utils/messageBuilder.js";
|
|
8
|
+
import { buildMultimodalOptions } from "../utils/multimodalOptionsBuilder.js";
|
|
8
9
|
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
9
10
|
import { DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
10
11
|
export class AzureOpenAIProvider extends BaseProvider {
|
|
@@ -113,7 +114,8 @@ export class AzureOpenAIProvider extends BaseProvider {
|
|
|
113
114
|
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
114
115
|
options.input?.content?.length ||
|
|
115
116
|
options.input?.files?.length ||
|
|
116
|
-
options.input?.csvFiles?.length
|
|
117
|
+
options.input?.csvFiles?.length ||
|
|
118
|
+
options.input?.pdfFiles?.length);
|
|
117
119
|
let messages;
|
|
118
120
|
if (hasMultimodalInput) {
|
|
119
121
|
logger.debug(`Azure OpenAI: Detected multimodal input, using multimodal message builder`, {
|
|
@@ -122,26 +124,7 @@ export class AzureOpenAIProvider extends BaseProvider {
|
|
|
122
124
|
hasContent: !!options.input?.content?.length,
|
|
123
125
|
contentCount: options.input?.content?.length || 0,
|
|
124
126
|
});
|
|
125
|
-
|
|
126
|
-
const multimodalOptions = {
|
|
127
|
-
input: {
|
|
128
|
-
text: options.input?.text || "",
|
|
129
|
-
images: options.input?.images,
|
|
130
|
-
content: options.input?.content,
|
|
131
|
-
files: options.input?.files,
|
|
132
|
-
csvFiles: options.input?.csvFiles,
|
|
133
|
-
},
|
|
134
|
-
csvOptions: options.csvOptions,
|
|
135
|
-
systemPrompt: options.systemPrompt,
|
|
136
|
-
conversationHistory: options.conversationMessages,
|
|
137
|
-
provider: this.providerName,
|
|
138
|
-
model: this.modelName,
|
|
139
|
-
temperature: options.temperature,
|
|
140
|
-
maxTokens: options.maxTokens,
|
|
141
|
-
enableAnalytics: options.enableAnalytics,
|
|
142
|
-
enableEvaluation: options.enableEvaluation,
|
|
143
|
-
context: options.context,
|
|
144
|
-
};
|
|
127
|
+
const multimodalOptions = buildMultimodalOptions(options, this.providerName, this.modelName);
|
|
145
128
|
const mm = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
|
|
146
129
|
// Convert multimodal messages to Vercel AI SDK format (CoreMessage[])
|
|
147
130
|
messages = convertToCoreMessages(mm);
|
|
@@ -162,6 +145,7 @@ export class AzureOpenAIProvider extends BaseProvider {
|
|
|
162
145
|
: {}),
|
|
163
146
|
tools,
|
|
164
147
|
toolChoice: shouldUseTools ? "auto" : "none",
|
|
148
|
+
experimental_telemetry: this.getStreamTelemetryConfig(options),
|
|
165
149
|
onStepFinish: ({ toolCalls, toolResults }) => {
|
|
166
150
|
this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
|
|
167
151
|
logger.warn("[AzureOpenaiProvider] Failed to store tool executions", {
|
|
@@ -8,6 +8,7 @@ import { AuthenticationError, NetworkError, ProviderError, RateLimitError, } fro
|
|
|
8
8
|
import { DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
9
9
|
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
10
10
|
import { buildMessagesArray, buildMultimodalMessagesArray, convertToCoreMessages, } from "../utils/messageBuilder.js";
|
|
11
|
+
import { buildMultimodalOptions } from "../utils/multimodalOptionsBuilder.js";
|
|
11
12
|
// Google AI Live API types now imported from ../types/providerSpecific.js
|
|
12
13
|
// Import proper types for multimodal message handling
|
|
13
14
|
// Create Google GenAI client
|
|
@@ -95,7 +96,8 @@ export class GoogleAIStudioProvider extends BaseProvider {
|
|
|
95
96
|
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
96
97
|
options.input?.content?.length ||
|
|
97
98
|
options.input?.files?.length ||
|
|
98
|
-
options.input?.csvFiles?.length
|
|
99
|
+
options.input?.csvFiles?.length ||
|
|
100
|
+
options.input?.pdfFiles?.length);
|
|
99
101
|
let messages;
|
|
100
102
|
if (hasMultimodalInput) {
|
|
101
103
|
logger.debug(`Google AI Studio: Detected multimodal input, using multimodal message builder`, {
|
|
@@ -104,26 +106,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
|
|
|
104
106
|
hasContent: !!options.input?.content?.length,
|
|
105
107
|
contentCount: options.input?.content?.length || 0,
|
|
106
108
|
});
|
|
107
|
-
|
|
108
|
-
const multimodalOptions = {
|
|
109
|
-
input: {
|
|
110
|
-
text: options.input?.text || "",
|
|
111
|
-
images: options.input?.images,
|
|
112
|
-
content: options.input?.content,
|
|
113
|
-
files: options.input?.files,
|
|
114
|
-
csvFiles: options.input?.csvFiles,
|
|
115
|
-
},
|
|
116
|
-
csvOptions: options.csvOptions,
|
|
117
|
-
systemPrompt: options.systemPrompt,
|
|
118
|
-
conversationHistory: options.conversationMessages,
|
|
119
|
-
provider: this.providerName,
|
|
120
|
-
model: this.modelName,
|
|
121
|
-
temperature: options.temperature,
|
|
122
|
-
maxTokens: options.maxTokens,
|
|
123
|
-
enableAnalytics: options.enableAnalytics,
|
|
124
|
-
enableEvaluation: options.enableEvaluation,
|
|
125
|
-
context: options.context,
|
|
126
|
-
};
|
|
109
|
+
const multimodalOptions = buildMultimodalOptions(options, this.providerName, this.modelName);
|
|
127
110
|
const mm = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
|
|
128
111
|
// Convert multimodal messages to Vercel AI SDK format (CoreMessage[])
|
|
129
112
|
messages = convertToCoreMessages(mm);
|
|
@@ -141,6 +124,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
|
|
|
141
124
|
maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
|
|
142
125
|
toolChoice: shouldUseTools ? "auto" : "none",
|
|
143
126
|
abortSignal: timeoutController?.controller.signal,
|
|
127
|
+
experimental_telemetry: this.getStreamTelemetryConfig(options),
|
|
144
128
|
onStepFinish: ({ toolCalls, toolResults }) => {
|
|
145
129
|
this.handleToolExecutionStorage(toolCalls, toolResults, options, new Date()).catch((error) => {
|
|
146
130
|
logger.warn("[GoogleAiStudioProvider] Failed to store tool executions", {
|
|
@@ -599,7 +599,8 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
599
599
|
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
600
600
|
options.input?.content?.length ||
|
|
601
601
|
options.input?.files?.length ||
|
|
602
|
-
options.input?.csvFiles?.length
|
|
602
|
+
options.input?.csvFiles?.length ||
|
|
603
|
+
options.input?.pdfFiles?.length);
|
|
603
604
|
let messages;
|
|
604
605
|
if (hasMultimodalInput) {
|
|
605
606
|
logger.debug(`${functionTag}: Detected multimodal input, using multimodal message builder`, {
|
|
@@ -607,6 +608,8 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
607
608
|
imageCount: options.input?.images?.length || 0,
|
|
608
609
|
hasContent: !!options.input?.content?.length,
|
|
609
610
|
contentCount: options.input?.content?.length || 0,
|
|
611
|
+
hasPDFs: !!options.input?.pdfFiles?.length,
|
|
612
|
+
pdfCount: options.input?.pdfFiles?.length || 0,
|
|
610
613
|
});
|
|
611
614
|
// Create multimodal options for buildMultimodalMessagesArray
|
|
612
615
|
const multimodalOptions = {
|
|
@@ -616,6 +619,7 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
616
619
|
content: options.input?.content,
|
|
617
620
|
files: options.input?.files,
|
|
618
621
|
csvFiles: options.input?.csvFiles,
|
|
622
|
+
pdfFiles: options.input?.pdfFiles,
|
|
619
623
|
},
|
|
620
624
|
csvOptions: options.csvOptions,
|
|
621
625
|
systemPrompt: options.systemPrompt,
|
|
@@ -666,6 +670,7 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
666
670
|
maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
|
|
667
671
|
}),
|
|
668
672
|
abortSignal: timeoutController?.controller.signal,
|
|
673
|
+
experimental_telemetry: this.getStreamTelemetryConfig(options),
|
|
669
674
|
onError: (event) => {
|
|
670
675
|
const error = event.error;
|
|
671
676
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
@@ -1178,6 +1183,8 @@ export class GoogleVertexProvider extends BaseProvider {
|
|
|
1178
1183
|
/^claude-sonnet-4@\d{8}$/,
|
|
1179
1184
|
/^claude-sonnet-4-5@\d{8}$/,
|
|
1180
1185
|
/^claude-opus-4@\d{8}$/,
|
|
1186
|
+
/^claude-opus-4-1@\d{8}$/,
|
|
1187
|
+
/^claude-3-7-sonnet@\d{8}$/,
|
|
1181
1188
|
/^claude-3-5-sonnet-\d{8}$/,
|
|
1182
1189
|
/^claude-3-5-haiku-\d{8}$/,
|
|
1183
1190
|
/^claude-3-sonnet-\d{8}$/,
|
|
@@ -3,8 +3,10 @@ import { streamText, } from "ai";
|
|
|
3
3
|
import { BaseProvider } from "../core/baseProvider.js";
|
|
4
4
|
import { logger } from "../utils/logger.js";
|
|
5
5
|
import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
|
|
6
|
+
import { DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
6
7
|
import { validateApiKey, createHuggingFaceConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
7
|
-
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
8
|
+
import { buildMessagesArray, buildMultimodalMessagesArray, convertToCoreMessages, } from "../utils/messageBuilder.js";
|
|
9
|
+
import { buildMultimodalOptions } from "../utils/multimodalOptionsBuilder.js";
|
|
8
10
|
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
9
11
|
// Configuration helpers - now using consolidated utility
|
|
10
12
|
const getHuggingFaceApiKey = () => {
|
|
@@ -113,13 +115,41 @@ export class HuggingFaceProvider extends BaseProvider {
|
|
|
113
115
|
try {
|
|
114
116
|
// Enhanced tool handling for HuggingFace models
|
|
115
117
|
const streamOptions = this.prepareStreamOptions(options, analysisSchema);
|
|
116
|
-
//
|
|
117
|
-
const
|
|
118
|
+
// Check for multimodal input (images, PDFs, CSVs, files)
|
|
119
|
+
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
120
|
+
options.input?.content?.length ||
|
|
121
|
+
options.input?.files?.length ||
|
|
122
|
+
options.input?.csvFiles?.length ||
|
|
123
|
+
options.input?.pdfFiles?.length);
|
|
124
|
+
let messages;
|
|
125
|
+
if (hasMultimodalInput) {
|
|
126
|
+
logger.debug(`HuggingFace: Detected multimodal input, using multimodal message builder`, {
|
|
127
|
+
hasImages: !!options.input?.images?.length,
|
|
128
|
+
imageCount: options.input?.images?.length || 0,
|
|
129
|
+
hasContent: !!options.input?.content?.length,
|
|
130
|
+
contentCount: options.input?.content?.length || 0,
|
|
131
|
+
hasFiles: !!options.input?.files?.length,
|
|
132
|
+
fileCount: options.input?.files?.length || 0,
|
|
133
|
+
hasCSVFiles: !!options.input?.csvFiles?.length,
|
|
134
|
+
csvFileCount: options.input?.csvFiles?.length || 0,
|
|
135
|
+
hasPDFFiles: !!options.input?.pdfFiles?.length,
|
|
136
|
+
pdfFileCount: options.input?.pdfFiles?.length || 0,
|
|
137
|
+
});
|
|
138
|
+
const multimodalOptions = buildMultimodalOptions(options, this.providerName, this.modelName);
|
|
139
|
+
const mm = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
|
|
140
|
+
// Convert multimodal messages to Vercel AI SDK format (CoreMessage[])
|
|
141
|
+
messages = convertToCoreMessages(mm);
|
|
142
|
+
}
|
|
143
|
+
else {
|
|
144
|
+
logger.debug(`HuggingFace: Text-only input, using standard message builder`);
|
|
145
|
+
messages = await buildMessagesArray(options);
|
|
146
|
+
}
|
|
118
147
|
const result = await streamText({
|
|
119
148
|
model: this.model,
|
|
120
149
|
messages: messages,
|
|
121
150
|
temperature: options.temperature,
|
|
122
151
|
maxTokens: options.maxTokens, // No default limit - unlimited unless specified
|
|
152
|
+
maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
|
|
123
153
|
tools: streamOptions.tools, // Tools format conversion handled by prepareStreamOptions
|
|
124
154
|
toolChoice: streamOptions.toolChoice, // Tool choice handled by prepareStreamOptions
|
|
125
155
|
abortSignal: timeoutController?.controller.signal,
|
|
@@ -5,7 +5,9 @@ import { logger } from "../utils/logger.js";
|
|
|
5
5
|
import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
|
|
6
6
|
import { getProviderModel } from "../utils/providerConfig.js";
|
|
7
7
|
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
8
|
-
import {
|
|
8
|
+
import { DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
9
|
+
import { buildMessagesArray, buildMultimodalMessagesArray, convertToCoreMessages, } from "../utils/messageBuilder.js";
|
|
10
|
+
import { buildMultimodalOptions } from "../utils/multimodalOptionsBuilder.js";
|
|
9
11
|
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
10
12
|
// Configuration helpers
|
|
11
13
|
const getLiteLLMConfig = () => {
|
|
@@ -120,14 +122,42 @@ export class LiteLLMProvider extends BaseProvider {
|
|
|
120
122
|
const timeout = this.getTimeout(options);
|
|
121
123
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
122
124
|
try {
|
|
123
|
-
//
|
|
124
|
-
const
|
|
125
|
+
// Check for multimodal input (images, PDFs, CSVs, files)
|
|
126
|
+
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
127
|
+
options.input?.content?.length ||
|
|
128
|
+
options.input?.files?.length ||
|
|
129
|
+
options.input?.csvFiles?.length ||
|
|
130
|
+
options.input?.pdfFiles?.length);
|
|
131
|
+
let messages;
|
|
132
|
+
if (hasMultimodalInput) {
|
|
133
|
+
logger.debug(`LiteLLM: Detected multimodal input, using multimodal message builder`, {
|
|
134
|
+
hasImages: !!options.input?.images?.length,
|
|
135
|
+
imageCount: options.input?.images?.length || 0,
|
|
136
|
+
hasContent: !!options.input?.content?.length,
|
|
137
|
+
contentCount: options.input?.content?.length || 0,
|
|
138
|
+
hasFiles: !!options.input?.files?.length,
|
|
139
|
+
fileCount: options.input?.files?.length || 0,
|
|
140
|
+
hasCSVFiles: !!options.input?.csvFiles?.length,
|
|
141
|
+
csvFileCount: options.input?.csvFiles?.length || 0,
|
|
142
|
+
hasPDFFiles: !!options.input?.pdfFiles?.length,
|
|
143
|
+
pdfFileCount: options.input?.pdfFiles?.length || 0,
|
|
144
|
+
});
|
|
145
|
+
const multimodalOptions = buildMultimodalOptions(options, this.providerName, this.modelName);
|
|
146
|
+
const mm = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
|
|
147
|
+
// Convert multimodal messages to Vercel AI SDK format (CoreMessage[])
|
|
148
|
+
messages = convertToCoreMessages(mm);
|
|
149
|
+
}
|
|
150
|
+
else {
|
|
151
|
+
logger.debug(`LiteLLM: Text-only input, using standard message builder`);
|
|
152
|
+
messages = await buildMessagesArray(options);
|
|
153
|
+
}
|
|
125
154
|
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
126
155
|
const result = streamText({
|
|
127
156
|
model: model,
|
|
128
157
|
messages: messages,
|
|
129
158
|
temperature: options.temperature,
|
|
130
159
|
maxTokens: options.maxTokens, // No default limit - unlimited unless specified
|
|
160
|
+
maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
|
|
131
161
|
tools: options.tools,
|
|
132
162
|
toolChoice: "auto",
|
|
133
163
|
abortSignal: timeoutController?.controller.signal,
|
|
@@ -6,7 +6,8 @@ import { createTimeoutController, TimeoutError } from "../utils/timeout.js";
|
|
|
6
6
|
import { DEFAULT_MAX_STEPS } from "../core/constants.js";
|
|
7
7
|
import { validateApiKey, createMistralConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
8
8
|
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
9
|
-
import { buildMessagesArray } from "../utils/messageBuilder.js";
|
|
9
|
+
import { buildMessagesArray, buildMultimodalMessagesArray, convertToCoreMessages, } from "../utils/messageBuilder.js";
|
|
10
|
+
import { buildMultimodalOptions } from "../utils/multimodalOptionsBuilder.js";
|
|
10
11
|
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
11
12
|
// Configuration helpers - now using consolidated utility
|
|
12
13
|
const getMistralApiKey = () => {
|
|
@@ -49,7 +50,35 @@ export class MistralProvider extends BaseProvider {
|
|
|
49
50
|
// Get tools consistently with generate method
|
|
50
51
|
const shouldUseTools = !options.disableTools && this.supportsTools();
|
|
51
52
|
const tools = shouldUseTools ? await this.getAllTools() : {};
|
|
52
|
-
|
|
53
|
+
// Check for multimodal input (images, PDFs, CSVs, files)
|
|
54
|
+
const hasMultimodalInput = !!(options.input?.images?.length ||
|
|
55
|
+
options.input?.content?.length ||
|
|
56
|
+
options.input?.files?.length ||
|
|
57
|
+
options.input?.csvFiles?.length ||
|
|
58
|
+
options.input?.pdfFiles?.length);
|
|
59
|
+
let messages;
|
|
60
|
+
if (hasMultimodalInput) {
|
|
61
|
+
logger.debug(`Mistral: Detected multimodal input, using multimodal message builder`, {
|
|
62
|
+
hasImages: !!options.input?.images?.length,
|
|
63
|
+
imageCount: options.input?.images?.length || 0,
|
|
64
|
+
hasContent: !!options.input?.content?.length,
|
|
65
|
+
contentCount: options.input?.content?.length || 0,
|
|
66
|
+
hasFiles: !!options.input?.files?.length,
|
|
67
|
+
fileCount: options.input?.files?.length || 0,
|
|
68
|
+
hasCSVFiles: !!options.input?.csvFiles?.length,
|
|
69
|
+
csvFileCount: options.input?.csvFiles?.length || 0,
|
|
70
|
+
hasPDFFiles: !!options.input?.pdfFiles?.length,
|
|
71
|
+
pdfFileCount: options.input?.pdfFiles?.length || 0,
|
|
72
|
+
});
|
|
73
|
+
const multimodalOptions = buildMultimodalOptions(options, this.providerName, this.modelName);
|
|
74
|
+
const mm = await buildMultimodalMessagesArray(multimodalOptions, this.providerName, this.modelName);
|
|
75
|
+
// Convert multimodal messages to Vercel AI SDK format (CoreMessage[])
|
|
76
|
+
messages = convertToCoreMessages(mm);
|
|
77
|
+
}
|
|
78
|
+
else {
|
|
79
|
+
logger.debug(`Mistral: Text-only input, using standard message builder`);
|
|
80
|
+
messages = await buildMessagesArray(options);
|
|
81
|
+
}
|
|
53
82
|
const model = await this.getAISDKModelWithMiddleware(options); // This is where network connection happens!
|
|
54
83
|
const result = await streamText({
|
|
55
84
|
model,
|