@juspay/neurolink 7.0.0 → 7.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +15 -4
- package/README.md +16 -11
- package/dist/cli/commands/config.d.ts +2 -2
- package/dist/cli/commands/config.js +22 -21
- package/dist/cli/commands/mcp.d.ts +79 -0
- package/dist/cli/commands/mcp.js +916 -0
- package/dist/cli/commands/models.d.ts +63 -0
- package/dist/cli/commands/models.js +653 -0
- package/dist/cli/commands/ollama.js +56 -55
- package/dist/cli/factories/commandFactory.d.ts +67 -2
- package/dist/cli/factories/commandFactory.js +840 -92
- package/dist/cli/index.d.ts +6 -0
- package/dist/cli/index.js +42 -999
- package/dist/cli/utils/completeSetup.js +9 -8
- package/dist/cli/utils/envManager.js +7 -6
- package/dist/cli/utils/interactiveSetup.js +20 -19
- package/dist/core/analytics.js +25 -38
- package/dist/core/baseProvider.d.ts +8 -0
- package/dist/core/baseProvider.js +177 -68
- package/dist/core/constants.d.ts +11 -0
- package/dist/core/constants.js +17 -0
- package/dist/core/evaluation.js +25 -14
- package/dist/core/factory.js +21 -18
- package/dist/core/streamAnalytics.d.ts +65 -0
- package/dist/core/streamAnalytics.js +125 -0
- package/dist/factories/providerRegistry.js +3 -1
- package/dist/lib/core/analytics.js +25 -38
- package/dist/lib/core/baseProvider.d.ts +8 -0
- package/dist/lib/core/baseProvider.js +177 -68
- package/dist/lib/core/constants.d.ts +11 -0
- package/dist/lib/core/constants.js +17 -0
- package/dist/lib/core/evaluation.js +25 -14
- package/dist/lib/core/factory.js +22 -18
- package/dist/lib/core/streamAnalytics.d.ts +65 -0
- package/dist/lib/core/streamAnalytics.js +125 -0
- package/dist/lib/factories/providerRegistry.js +3 -1
- package/dist/lib/mcp/toolRegistry.d.ts +5 -0
- package/dist/lib/mcp/toolRegistry.js +60 -0
- package/dist/lib/models/modelRegistry.d.ts +132 -0
- package/dist/lib/models/modelRegistry.js +483 -0
- package/dist/lib/models/modelResolver.d.ts +115 -0
- package/dist/lib/models/modelResolver.js +467 -0
- package/dist/lib/neurolink.d.ts +4 -1
- package/dist/lib/neurolink.js +108 -69
- package/dist/lib/providers/anthropic.js +3 -0
- package/dist/lib/providers/googleAiStudio.js +13 -0
- package/dist/lib/providers/huggingFace.js +15 -3
- package/dist/lib/providers/mistral.js +19 -7
- package/dist/lib/providers/ollama.js +31 -7
- package/dist/lib/providers/openAI.js +12 -0
- package/dist/lib/sdk/toolRegistration.js +17 -0
- package/dist/lib/types/cli.d.ts +56 -1
- package/dist/lib/types/contextTypes.d.ts +110 -0
- package/dist/lib/types/contextTypes.js +176 -0
- package/dist/lib/types/index.d.ts +4 -1
- package/dist/lib/types/mcpTypes.d.ts +118 -7
- package/dist/lib/types/providers.d.ts +81 -0
- package/dist/lib/types/streamTypes.d.ts +44 -7
- package/dist/lib/types/tools.d.ts +9 -0
- package/dist/lib/types/universalProviderOptions.d.ts +3 -1
- package/dist/lib/types/universalProviderOptions.js +2 -1
- package/dist/lib/utils/logger.d.ts +7 -0
- package/dist/lib/utils/logger.js +16 -6
- package/dist/lib/utils/performance.d.ts +105 -0
- package/dist/lib/utils/performance.js +210 -0
- package/dist/lib/utils/providerUtils.js +9 -2
- package/dist/lib/utils/retryHandler.d.ts +89 -0
- package/dist/lib/utils/retryHandler.js +269 -0
- package/dist/mcp/toolRegistry.d.ts +5 -0
- package/dist/mcp/toolRegistry.js +60 -0
- package/dist/models/modelRegistry.d.ts +132 -0
- package/dist/models/modelRegistry.js +483 -0
- package/dist/models/modelResolver.d.ts +115 -0
- package/dist/models/modelResolver.js +468 -0
- package/dist/neurolink.d.ts +4 -1
- package/dist/neurolink.js +108 -69
- package/dist/providers/anthropic.js +3 -0
- package/dist/providers/googleAiStudio.js +13 -0
- package/dist/providers/huggingFace.js +15 -3
- package/dist/providers/mistral.js +19 -7
- package/dist/providers/ollama.js +31 -7
- package/dist/providers/openAI.js +12 -0
- package/dist/sdk/toolRegistration.js +17 -0
- package/dist/types/cli.d.ts +56 -1
- package/dist/types/contextTypes.d.ts +110 -0
- package/dist/types/contextTypes.js +177 -0
- package/dist/types/index.d.ts +4 -1
- package/dist/types/mcpTypes.d.ts +118 -7
- package/dist/types/providers.d.ts +81 -0
- package/dist/types/streamTypes.d.ts +44 -7
- package/dist/types/tools.d.ts +9 -0
- package/dist/types/universalProviderOptions.d.ts +3 -1
- package/dist/types/universalProviderOptions.js +3 -1
- package/dist/utils/logger.d.ts +7 -0
- package/dist/utils/logger.js +16 -6
- package/dist/utils/performance.d.ts +105 -0
- package/dist/utils/performance.js +210 -0
- package/dist/utils/providerUtils.js +9 -2
- package/dist/utils/retryHandler.d.ts +89 -0
- package/dist/utils/retryHandler.js +269 -0
- package/package.json +2 -1
package/dist/neurolink.js
CHANGED
|
@@ -15,6 +15,8 @@ catch (error) {
|
|
|
15
15
|
}
|
|
16
16
|
import { AIProviderFactory } from "./core/factory.js";
|
|
17
17
|
import { mcpLogger } from "./utils/logger.js";
|
|
18
|
+
import { SYSTEM_LIMITS } from "./core/constants.js";
|
|
19
|
+
import pLimit from "p-limit";
|
|
18
20
|
import { toolRegistry } from "./mcp/toolRegistry.js";
|
|
19
21
|
import { logger } from "./utils/logger.js";
|
|
20
22
|
import { getBestProvider } from "./utils/providerUtils.js";
|
|
@@ -258,7 +260,7 @@ export class NeuroLink {
|
|
|
258
260
|
provider: providerName,
|
|
259
261
|
usage: result.usage,
|
|
260
262
|
responseTime,
|
|
261
|
-
toolsUsed: [],
|
|
263
|
+
toolsUsed: result.toolsUsed || [],
|
|
262
264
|
enhancedWithTools: true,
|
|
263
265
|
availableTools: availableTools.length > 0 ? availableTools : undefined,
|
|
264
266
|
// Include analytics and evaluation from BaseProvider
|
|
@@ -322,7 +324,7 @@ export class NeuroLink {
|
|
|
322
324
|
model: result.model,
|
|
323
325
|
usage: result.usage,
|
|
324
326
|
responseTime,
|
|
325
|
-
toolsUsed: [],
|
|
327
|
+
toolsUsed: result.toolsUsed || [],
|
|
326
328
|
enhancedWithTools: false,
|
|
327
329
|
analytics: result.analytics,
|
|
328
330
|
evaluation: result.evaluation,
|
|
@@ -355,7 +357,7 @@ export class NeuroLink {
|
|
|
355
357
|
const toolDescriptions = availableTools
|
|
356
358
|
.map((tool) => `- ${tool.name}: ${tool.description} (from ${tool.server})`)
|
|
357
359
|
.join("\n");
|
|
358
|
-
const toolPrompt = `\n\
|
|
360
|
+
const toolPrompt = `\n\nYou have access to these additional tools if needed:\n${toolDescriptions}\n\nIMPORTANT: You are a general-purpose AI assistant. Answer all requests directly and creatively. These tools are optional helpers - use them only when they would genuinely improve your response. For creative tasks like storytelling, writing, or general conversation, respond naturally without requiring tools.`;
|
|
359
361
|
return (originalSystemPrompt || "") + toolPrompt;
|
|
360
362
|
}
|
|
361
363
|
/**
|
|
@@ -414,14 +416,21 @@ export class NeuroLink {
|
|
|
414
416
|
responseTime,
|
|
415
417
|
provider: providerName,
|
|
416
418
|
});
|
|
417
|
-
// Convert to StreamResult format
|
|
419
|
+
// Convert to StreamResult format - Include analytics and evaluation from provider
|
|
418
420
|
return {
|
|
419
421
|
stream,
|
|
420
422
|
provider: providerName,
|
|
421
423
|
model: options.model,
|
|
424
|
+
usage: streamResult.usage,
|
|
425
|
+
finishReason: streamResult.finishReason,
|
|
426
|
+
toolCalls: streamResult.toolCalls,
|
|
427
|
+
toolResults: streamResult.toolResults,
|
|
428
|
+
analytics: streamResult.analytics, // 🔧 FIX: Pass through analytics data
|
|
429
|
+
evaluation: streamResult.evaluation, // 🔧 FIX: Pass through evaluation data
|
|
422
430
|
metadata: {
|
|
423
431
|
streamId: `neurolink-${Date.now()}`,
|
|
424
432
|
startTime,
|
|
433
|
+
responseTime,
|
|
425
434
|
},
|
|
426
435
|
};
|
|
427
436
|
}
|
|
@@ -439,6 +448,12 @@ export class NeuroLink {
|
|
|
439
448
|
stream: streamResult.stream,
|
|
440
449
|
provider: providerName,
|
|
441
450
|
model: options.model,
|
|
451
|
+
usage: streamResult.usage,
|
|
452
|
+
finishReason: streamResult.finishReason,
|
|
453
|
+
toolCalls: streamResult.toolCalls,
|
|
454
|
+
toolResults: streamResult.toolResults,
|
|
455
|
+
analytics: streamResult.analytics, // 🔧 FIX: Pass through analytics data in fallback
|
|
456
|
+
evaluation: streamResult.evaluation, // 🔧 FIX: Pass through evaluation data in fallback
|
|
442
457
|
metadata: {
|
|
443
458
|
streamId: `neurolink-${Date.now()}`,
|
|
444
459
|
startTime,
|
|
@@ -649,9 +664,10 @@ export class NeuroLink {
|
|
|
649
664
|
* @returns Array of available tools with metadata
|
|
650
665
|
*/
|
|
651
666
|
async getAllAvailableTools() {
|
|
652
|
-
//
|
|
653
|
-
|
|
654
|
-
|
|
667
|
+
// MCP registry already includes direct tools, so just return MCP tools
|
|
668
|
+
// This prevents duplication since direct tools are auto-registered in MCP
|
|
669
|
+
const mcpTools = await toolRegistry.listTools();
|
|
670
|
+
return mcpTools;
|
|
655
671
|
}
|
|
656
672
|
// ============================================================================
|
|
657
673
|
// PROVIDER DIAGNOSTICS - SDK-First Architecture
|
|
@@ -660,7 +676,18 @@ export class NeuroLink {
|
|
|
660
676
|
* Get comprehensive status of all AI providers
|
|
661
677
|
* Primary method for provider health checking and diagnostics
|
|
662
678
|
*/
|
|
663
|
-
async getProviderStatus() {
|
|
679
|
+
async getProviderStatus(options) {
|
|
680
|
+
// 🔧 PERFORMANCE: Track memory and timing for provider status checks
|
|
681
|
+
const { MemoryManager } = await import("./utils/performance.js");
|
|
682
|
+
const startMemory = MemoryManager.getMemoryUsageMB();
|
|
683
|
+
// CRITICAL FIX: Ensure providers are registered before testing
|
|
684
|
+
if (!options?.quiet) {
|
|
685
|
+
mcpLogger.debug("🔍 DEBUG: Initializing MCP for provider status...");
|
|
686
|
+
}
|
|
687
|
+
await this.initializeMCP();
|
|
688
|
+
if (!options?.quiet) {
|
|
689
|
+
mcpLogger.debug("🔍 DEBUG: MCP initialized:", this.mcpInitialized);
|
|
690
|
+
}
|
|
664
691
|
const { AIProviderFactory } = await import("./core/factory.js");
|
|
665
692
|
const { hasProviderEnvVars } = await import("./utils/providerUtils.js");
|
|
666
693
|
const providers = [
|
|
@@ -675,97 +702,109 @@ export class NeuroLink {
|
|
|
675
702
|
"ollama",
|
|
676
703
|
"mistral",
|
|
677
704
|
];
|
|
678
|
-
|
|
679
|
-
|
|
705
|
+
// 🚀 PERFORMANCE FIX: Test providers with controlled concurrency
|
|
706
|
+
// This reduces total time from 16s (sequential) to ~3s (parallel) while preventing resource exhaustion
|
|
707
|
+
const limit = pLimit(SYSTEM_LIMITS.DEFAULT_CONCURRENCY_LIMIT);
|
|
708
|
+
const providerTests = providers.map((providerName) => limit(async () => {
|
|
680
709
|
const startTime = Date.now();
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
});
|
|
701
|
-
if (!response.ok) {
|
|
702
|
-
throw new Error("Ollama service not responding");
|
|
703
|
-
}
|
|
704
|
-
const { models } = await response.json();
|
|
705
|
-
const defaultOllamaModel = "llama3.2:latest";
|
|
706
|
-
const modelIsAvailable = models.some((m) => m.name === defaultOllamaModel);
|
|
707
|
-
if (modelIsAvailable) {
|
|
708
|
-
results.push({
|
|
709
|
-
provider: providerName,
|
|
710
|
-
status: "working",
|
|
711
|
-
configured: true,
|
|
712
|
-
authenticated: true,
|
|
713
|
-
responseTime: Date.now() - startTime,
|
|
714
|
-
model: defaultOllamaModel,
|
|
710
|
+
try {
|
|
711
|
+
// Check if provider has required environment variables
|
|
712
|
+
const hasEnvVars = await this.hasProviderEnvVars(providerName);
|
|
713
|
+
if (!hasEnvVars && providerName !== "ollama") {
|
|
714
|
+
return {
|
|
715
|
+
provider: providerName,
|
|
716
|
+
status: "not-configured",
|
|
717
|
+
configured: false,
|
|
718
|
+
authenticated: false,
|
|
719
|
+
error: "Missing required environment variables",
|
|
720
|
+
responseTime: Date.now() - startTime,
|
|
721
|
+
};
|
|
722
|
+
}
|
|
723
|
+
// Special handling for Ollama
|
|
724
|
+
if (providerName === "ollama") {
|
|
725
|
+
try {
|
|
726
|
+
const response = await fetch("http://localhost:11434/api/tags", {
|
|
727
|
+
method: "GET",
|
|
728
|
+
signal: AbortSignal.timeout(2000),
|
|
715
729
|
});
|
|
730
|
+
if (!response.ok) {
|
|
731
|
+
throw new Error("Ollama service not responding");
|
|
732
|
+
}
|
|
733
|
+
const { models } = await response.json();
|
|
734
|
+
const defaultOllamaModel = "llama3.2:latest";
|
|
735
|
+
const modelIsAvailable = models.some((m) => m.name === defaultOllamaModel);
|
|
736
|
+
if (modelIsAvailable) {
|
|
737
|
+
return {
|
|
738
|
+
provider: providerName,
|
|
739
|
+
status: "working",
|
|
740
|
+
configured: true,
|
|
741
|
+
authenticated: true,
|
|
742
|
+
responseTime: Date.now() - startTime,
|
|
743
|
+
model: defaultOllamaModel,
|
|
744
|
+
};
|
|
745
|
+
}
|
|
746
|
+
else {
|
|
747
|
+
return {
|
|
748
|
+
provider: providerName,
|
|
749
|
+
status: "failed",
|
|
750
|
+
configured: true,
|
|
751
|
+
authenticated: false,
|
|
752
|
+
error: `Ollama service running but model '${defaultOllamaModel}' not found`,
|
|
753
|
+
responseTime: Date.now() - startTime,
|
|
754
|
+
};
|
|
755
|
+
}
|
|
716
756
|
}
|
|
717
|
-
|
|
718
|
-
|
|
757
|
+
catch (error) {
|
|
758
|
+
return {
|
|
719
759
|
provider: providerName,
|
|
720
760
|
status: "failed",
|
|
721
|
-
configured:
|
|
761
|
+
configured: false,
|
|
722
762
|
authenticated: false,
|
|
723
|
-
error:
|
|
763
|
+
error: error instanceof Error
|
|
764
|
+
? error.message
|
|
765
|
+
: "Ollama service not running",
|
|
724
766
|
responseTime: Date.now() - startTime,
|
|
725
|
-
}
|
|
767
|
+
};
|
|
726
768
|
}
|
|
727
769
|
}
|
|
728
|
-
|
|
729
|
-
results.push({
|
|
730
|
-
provider: providerName,
|
|
731
|
-
status: "failed",
|
|
732
|
-
configured: false,
|
|
733
|
-
authenticated: false,
|
|
734
|
-
error: error instanceof Error
|
|
735
|
-
? error.message
|
|
736
|
-
: "Ollama service not running",
|
|
737
|
-
responseTime: Date.now() - startTime,
|
|
738
|
-
});
|
|
739
|
-
}
|
|
740
|
-
continue;
|
|
741
|
-
}
|
|
742
|
-
// Test other providers with actual generation call
|
|
743
|
-
try {
|
|
770
|
+
// Test other providers with actual generation call
|
|
744
771
|
const testTimeout = 5000;
|
|
745
772
|
const testPromise = this.testProviderConnection(providerName);
|
|
746
773
|
const timeoutPromise = new Promise((_, reject) => {
|
|
747
774
|
setTimeout(() => reject(new Error("Provider test timeout (5s)")), testTimeout);
|
|
748
775
|
});
|
|
749
776
|
await Promise.race([testPromise, timeoutPromise]);
|
|
750
|
-
|
|
777
|
+
return {
|
|
751
778
|
provider: providerName,
|
|
752
779
|
status: "working",
|
|
753
780
|
configured: true,
|
|
754
781
|
authenticated: true,
|
|
755
782
|
responseTime: Date.now() - startTime,
|
|
756
|
-
}
|
|
783
|
+
};
|
|
757
784
|
}
|
|
758
785
|
catch (error) {
|
|
759
786
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
760
|
-
|
|
787
|
+
return {
|
|
761
788
|
provider: providerName,
|
|
762
789
|
status: "failed",
|
|
763
790
|
configured: true,
|
|
764
791
|
authenticated: false,
|
|
765
792
|
error: errorMessage,
|
|
766
793
|
responseTime: Date.now() - startTime,
|
|
767
|
-
}
|
|
794
|
+
};
|
|
768
795
|
}
|
|
796
|
+
}));
|
|
797
|
+
// Wait for all provider tests to complete in parallel
|
|
798
|
+
const results = await Promise.all(providerTests);
|
|
799
|
+
// 🔧 PERFORMANCE: Track memory usage and suggest cleanup if needed
|
|
800
|
+
const endMemory = MemoryManager.getMemoryUsageMB();
|
|
801
|
+
const memoryDelta = endMemory.heapUsed - startMemory.heapUsed;
|
|
802
|
+
if (!options?.quiet && memoryDelta > 20) {
|
|
803
|
+
mcpLogger.debug(`🔍 Memory usage: +${memoryDelta}MB (consider cleanup for large operations)`);
|
|
804
|
+
}
|
|
805
|
+
// Suggest garbage collection for large memory increases
|
|
806
|
+
if (memoryDelta > 50) {
|
|
807
|
+
MemoryManager.forceGC();
|
|
769
808
|
}
|
|
770
809
|
return results;
|
|
771
810
|
}
|
|
@@ -22,6 +22,9 @@ export class AnthropicProvider extends BaseProvider {
|
|
|
22
22
|
super(modelName, "anthropic", sdk);
|
|
23
23
|
// Initialize Anthropic model with API key validation
|
|
24
24
|
const apiKey = getAnthropicApiKey();
|
|
25
|
+
// Set Anthropic API key as environment variable (required by @ai-sdk/anthropic)
|
|
26
|
+
process.env.ANTHROPIC_API_KEY = apiKey;
|
|
27
|
+
// Initialize Anthropic with proper configuration
|
|
25
28
|
this.model = anthropic(this.modelName || getDefaultAnthropicModel());
|
|
26
29
|
logger.debug("Anthropic Provider v2 initialized", {
|
|
27
30
|
modelName: this.modelName,
|
|
@@ -6,6 +6,7 @@ import { logger } from "../utils/logger.js";
|
|
|
6
6
|
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
7
7
|
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
8
8
|
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
9
|
+
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
9
10
|
// Environment variable setup
|
|
10
11
|
if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY &&
|
|
11
12
|
process.env.GOOGLE_AI_API_KEY) {
|
|
@@ -62,6 +63,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
|
|
|
62
63
|
// executeGenerate removed - BaseProvider handles all generation with tools
|
|
63
64
|
async executeStream(options, analysisSchema) {
|
|
64
65
|
this.validateStreamOptions(options);
|
|
66
|
+
const startTime = Date.now();
|
|
65
67
|
const apiKey = this.getApiKey();
|
|
66
68
|
const google = createGoogleGenerativeAI({ apiKey });
|
|
67
69
|
const model = google(this.modelName);
|
|
@@ -85,13 +87,24 @@ export class GoogleAIStudioProvider extends BaseProvider {
|
|
|
85
87
|
yield { content: chunk };
|
|
86
88
|
}
|
|
87
89
|
};
|
|
90
|
+
// Create analytics promise that resolves after stream completion
|
|
91
|
+
const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
|
|
92
|
+
requestId: `google-ai-stream-${Date.now()}`,
|
|
93
|
+
streamingMode: true,
|
|
94
|
+
});
|
|
88
95
|
return {
|
|
89
96
|
stream: transformedStream(),
|
|
90
97
|
provider: this.providerName,
|
|
91
98
|
model: this.modelName,
|
|
99
|
+
analytics: analyticsPromise,
|
|
100
|
+
metadata: {
|
|
101
|
+
startTime,
|
|
102
|
+
streamId: `google-ai-${Date.now()}`,
|
|
103
|
+
},
|
|
92
104
|
};
|
|
93
105
|
}
|
|
94
106
|
catch (error) {
|
|
107
|
+
timeoutController?.cleanup();
|
|
95
108
|
throw this.handleProviderError(error);
|
|
96
109
|
}
|
|
97
110
|
}
|
|
@@ -54,9 +54,21 @@ export class HuggingFaceProvider extends BaseProvider {
|
|
|
54
54
|
* @returns false to disable tools by default until proper implementation
|
|
55
55
|
*/
|
|
56
56
|
supportsTools() {
|
|
57
|
-
//
|
|
58
|
-
//
|
|
59
|
-
//
|
|
57
|
+
// IMPLEMENTATION STATUS (2025): HuggingFace tool calling remains limited
|
|
58
|
+
//
|
|
59
|
+
// Current State:
|
|
60
|
+
// - Function calling varies significantly across HF models
|
|
61
|
+
// - Many models treat tool schemas as conversation context
|
|
62
|
+
// - Requires model-specific implementation per architecture
|
|
63
|
+
//
|
|
64
|
+
// To Enable Tools:
|
|
65
|
+
// 1. Detect model capability via HF model card metadata
|
|
66
|
+
// 2. Implement model-specific tool schema formatting
|
|
67
|
+
// 3. Add custom response parsing for function call extraction
|
|
68
|
+
// 4. Create validation framework for tool parameter handling
|
|
69
|
+
// 5. Test extensively with supported models (Code Llama, Llama 3.1+)
|
|
70
|
+
//
|
|
71
|
+
// Until comprehensive implementation, tools disabled for reliability
|
|
60
72
|
return false;
|
|
61
73
|
}
|
|
62
74
|
// executeGenerate removed - BaseProvider handles all generation with tools
|
|
@@ -4,6 +4,7 @@ import { BaseProvider } from "../core/baseProvider.js";
|
|
|
4
4
|
import { logger } from "../utils/logger.js";
|
|
5
5
|
import { createAnalytics } from "../core/analytics.js";
|
|
6
6
|
import { validateApiKey, createMistralConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
7
|
+
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
7
8
|
// Configuration helpers - now using consolidated utility
|
|
8
9
|
const getMistralApiKey = () => {
|
|
9
10
|
return validateApiKey(createMistralConfig());
|
|
@@ -86,22 +87,33 @@ export class MistralProvider extends BaseProvider {
|
|
|
86
87
|
async executeStream(options) {
|
|
87
88
|
const startTime = Date.now();
|
|
88
89
|
try {
|
|
89
|
-
const
|
|
90
|
+
const result = await streamText({
|
|
90
91
|
model: this.model,
|
|
91
|
-
prompt: options.
|
|
92
|
+
prompt: options.input.text,
|
|
92
93
|
temperature: options.temperature,
|
|
93
94
|
maxTokens: options.maxTokens,
|
|
95
|
+
tools: options.tools,
|
|
96
|
+
toolChoice: "auto",
|
|
97
|
+
});
|
|
98
|
+
// Transform stream to match StreamResult interface
|
|
99
|
+
const transformedStream = async function* () {
|
|
100
|
+
for await (const chunk of result.textStream) {
|
|
101
|
+
yield { content: chunk };
|
|
102
|
+
}
|
|
103
|
+
};
|
|
104
|
+
// Create analytics promise that resolves after stream completion
|
|
105
|
+
const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
|
|
106
|
+
requestId: `mistral-stream-${Date.now()}`,
|
|
107
|
+
streamingMode: true,
|
|
94
108
|
});
|
|
95
109
|
return {
|
|
96
|
-
stream: (
|
|
97
|
-
for await (const chunk of stream.textStream) {
|
|
98
|
-
yield { content: chunk };
|
|
99
|
-
}
|
|
100
|
-
})(),
|
|
110
|
+
stream: transformedStream(),
|
|
101
111
|
provider: this.providerName,
|
|
102
112
|
model: this.modelName,
|
|
113
|
+
analytics: analyticsPromise,
|
|
103
114
|
metadata: {
|
|
104
115
|
startTime,
|
|
116
|
+
streamId: `mistral-${Date.now()}`,
|
|
105
117
|
},
|
|
106
118
|
};
|
|
107
119
|
}
|
package/dist/providers/ollama.js
CHANGED
|
@@ -58,6 +58,9 @@ class OllamaLanguageModel {
|
|
|
58
58
|
const messages = options
|
|
59
59
|
.messages || [];
|
|
60
60
|
const prompt = this.convertMessagesToPrompt(messages);
|
|
61
|
+
// Debug: Log what's being sent to Ollama
|
|
62
|
+
logger.debug("[OllamaLanguageModel] Messages:", JSON.stringify(messages, null, 2));
|
|
63
|
+
logger.debug("[OllamaLanguageModel] Converted Prompt:", JSON.stringify(prompt));
|
|
61
64
|
const response = await fetch(`${this.baseUrl}/api/generate`, {
|
|
62
65
|
method: "POST",
|
|
63
66
|
headers: { "Content-Type": "application/json" },
|
|
@@ -77,12 +80,15 @@ class OllamaLanguageModel {
|
|
|
77
80
|
throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
|
|
78
81
|
}
|
|
79
82
|
const data = await response.json();
|
|
83
|
+
// Debug: Log Ollama API response to understand empty content issue
|
|
84
|
+
logger.debug("[OllamaLanguageModel] API Response:", JSON.stringify(data, null, 2));
|
|
80
85
|
return {
|
|
81
86
|
text: data.response,
|
|
82
87
|
usage: {
|
|
83
|
-
promptTokens: this.estimateTokens(prompt),
|
|
84
|
-
completionTokens: this.estimateTokens(data.response),
|
|
85
|
-
totalTokens: this.estimateTokens(prompt) +
|
|
88
|
+
promptTokens: data.prompt_eval_count || this.estimateTokens(prompt),
|
|
89
|
+
completionTokens: data.eval_count || this.estimateTokens(data.response),
|
|
90
|
+
totalTokens: (data.prompt_eval_count || this.estimateTokens(prompt)) +
|
|
91
|
+
(data.eval_count || this.estimateTokens(data.response)),
|
|
86
92
|
},
|
|
87
93
|
finishReason: "stop",
|
|
88
94
|
rawCall: {
|
|
@@ -179,7 +185,8 @@ class OllamaLanguageModel {
|
|
|
179
185
|
type: "finish",
|
|
180
186
|
finishReason: "stop",
|
|
181
187
|
usage: {
|
|
182
|
-
promptTokens:
|
|
188
|
+
promptTokens: data.prompt_eval_count ||
|
|
189
|
+
this.estimateTokens(data.context || ""),
|
|
183
190
|
completionTokens: data.eval_count || 0,
|
|
184
191
|
},
|
|
185
192
|
};
|
|
@@ -263,9 +270,26 @@ export class OllamaProvider extends BaseProvider {
|
|
|
263
270
|
* @returns false to disable tools by default
|
|
264
271
|
*/
|
|
265
272
|
supportsTools() {
|
|
266
|
-
//
|
|
267
|
-
//
|
|
268
|
-
//
|
|
273
|
+
// IMPLEMENTATION STATUS (2025): Ollama function calling actively evolving
|
|
274
|
+
//
|
|
275
|
+
// Current State:
|
|
276
|
+
// - Function calling added in Ollama 2024, improving in 2025
|
|
277
|
+
// - Requires compatible models (Llama 3.1+, Code Llama variants)
|
|
278
|
+
// - AI SDK integration needs custom adapter for Ollama's tool format
|
|
279
|
+
//
|
|
280
|
+
// Technical Requirements:
|
|
281
|
+
// 1. Replace AI SDK with direct Ollama API tool calls
|
|
282
|
+
// 2. Implement Ollama-specific tool schema conversion
|
|
283
|
+
// 3. Add function response parsing from Ollama's JSON format
|
|
284
|
+
// 4. Handle streaming tool calls with incremental parsing
|
|
285
|
+
// 5. Validate model compatibility before enabling tools
|
|
286
|
+
//
|
|
287
|
+
// Implementation Path:
|
|
288
|
+
// - Use Ollama's chat API with 'tools' parameter
|
|
289
|
+
// - Parse tool_calls from response.message.tool_calls
|
|
290
|
+
// - Execute functions and return results to conversation
|
|
291
|
+
//
|
|
292
|
+
// Until Ollama-specific implementation, tools disabled for compatibility
|
|
269
293
|
return false;
|
|
270
294
|
}
|
|
271
295
|
// executeGenerate removed - BaseProvider handles all generation with tools
|
package/dist/providers/openAI.js
CHANGED
|
@@ -6,6 +6,7 @@ import { logger } from "../utils/logger.js";
|
|
|
6
6
|
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
7
7
|
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
8
8
|
import { validateApiKey, createOpenAIConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
9
|
+
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
9
10
|
// Configuration helpers - now using consolidated utility
|
|
10
11
|
const getOpenAIApiKey = () => {
|
|
11
12
|
return validateApiKey(createOpenAIConfig());
|
|
@@ -69,6 +70,7 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
69
70
|
*/
|
|
70
71
|
async executeStream(options, analysisSchema) {
|
|
71
72
|
this.validateStreamOptions(options);
|
|
73
|
+
const startTime = Date.now();
|
|
72
74
|
const timeout = this.getTimeout(options);
|
|
73
75
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
74
76
|
try {
|
|
@@ -89,10 +91,20 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
89
91
|
yield { content: chunk };
|
|
90
92
|
}
|
|
91
93
|
};
|
|
94
|
+
// Create analytics promise that resolves after stream completion
|
|
95
|
+
const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
|
|
96
|
+
requestId: `openai-stream-${Date.now()}`,
|
|
97
|
+
streamingMode: true,
|
|
98
|
+
});
|
|
92
99
|
return {
|
|
93
100
|
stream: transformedStream(),
|
|
94
101
|
provider: this.providerName,
|
|
95
102
|
model: this.modelName,
|
|
103
|
+
analytics: analyticsPromise,
|
|
104
|
+
metadata: {
|
|
105
|
+
startTime,
|
|
106
|
+
streamId: `openai-${Date.now()}`,
|
|
107
|
+
},
|
|
96
108
|
};
|
|
97
109
|
}
|
|
98
110
|
catch (error) {
|
|
@@ -140,6 +140,13 @@ export function validateTool(name, tool) {
|
|
|
140
140
|
`Received: ${typeof tool.execute}. ` +
|
|
141
141
|
`Example: { execute: async (params) => { return { success: true, data: result }; } }`);
|
|
142
142
|
}
|
|
143
|
+
// Check for common mistake: using 'schema' instead of 'parameters'
|
|
144
|
+
if ("schema" in tool && !("parameters" in tool)) {
|
|
145
|
+
throw new Error(`Tool '${name}' uses 'schema' property, but NeuroLink expects 'parameters'. ` +
|
|
146
|
+
`Please change 'schema' to 'parameters' and use a Zod schema: ` +
|
|
147
|
+
`{ parameters: z.object({ ... }), execute: ... } ` +
|
|
148
|
+
`See documentation: https://docs.neurolink.com/tools`);
|
|
149
|
+
}
|
|
143
150
|
// Validate parameters schema if provided - support both Zod and custom schemas
|
|
144
151
|
if (tool.parameters) {
|
|
145
152
|
if (typeof tool.parameters !== "object") {
|
|
@@ -151,6 +158,16 @@ export function validateTool(name, tool) {
|
|
|
151
158
|
const hasValidationMethod = typeof params.parse === "function" ||
|
|
152
159
|
typeof params.validate === "function" ||
|
|
153
160
|
"_def" in params; // Zod schemas have _def property
|
|
161
|
+
// Check for plain JSON schema objects (common mistake)
|
|
162
|
+
if ("type" in params && "properties" in params && !hasValidationMethod) {
|
|
163
|
+
throw new Error(`Tool '${name}' appears to use a plain JSON schema object as parameters. ` +
|
|
164
|
+
`NeuroLink requires a Zod schema for proper type validation and tool integration. ` +
|
|
165
|
+
`Please change from:\n` +
|
|
166
|
+
` { type: 'object', properties: { ... } }\n` +
|
|
167
|
+
`To:\n` +
|
|
168
|
+
` z.object({ fieldName: z.string() })\n` +
|
|
169
|
+
`Import Zod with: import { z } from 'zod'`);
|
|
170
|
+
}
|
|
154
171
|
if (!hasValidationMethod) {
|
|
155
172
|
const errorMessage = typeof params.parse === "function" || "_def" in params
|
|
156
173
|
? `Tool '${name}' has a Zod-like schema but validation failed. Ensure it's a valid Zod schema: z.object({ ... })`
|
package/dist/types/cli.d.ts
CHANGED
|
@@ -67,21 +67,76 @@ export interface StreamCommandArgs extends BaseCommandArgs {
|
|
|
67
67
|
disableTools?: boolean;
|
|
68
68
|
}
|
|
69
69
|
/**
|
|
70
|
-
* MCP command arguments
|
|
70
|
+
* MCP command arguments - Enhanced with transport and server management
|
|
71
71
|
*/
|
|
72
72
|
export interface MCPCommandArgs extends BaseCommandArgs {
|
|
73
73
|
/** MCP server name */
|
|
74
74
|
server?: string;
|
|
75
|
+
/** MCP server name (alias for server) */
|
|
76
|
+
serverName?: string;
|
|
75
77
|
/** Tool name to execute */
|
|
76
78
|
tool?: string;
|
|
77
79
|
/** Tool parameters as JSON string */
|
|
78
80
|
params?: string;
|
|
79
81
|
/** List available tools */
|
|
80
82
|
list?: boolean;
|
|
83
|
+
/** List only specific category */
|
|
84
|
+
listOnly?: boolean;
|
|
81
85
|
/** Discover MCP servers */
|
|
82
86
|
discover?: boolean;
|
|
83
87
|
/** Show server information */
|
|
84
88
|
info?: boolean;
|
|
89
|
+
/** Transport type for server connection */
|
|
90
|
+
transport?: "stdio" | "websocket" | "tcp" | "unix";
|
|
91
|
+
/** Server description */
|
|
92
|
+
description?: string;
|
|
93
|
+
/** Command/executable for stdio transport */
|
|
94
|
+
command?: string;
|
|
95
|
+
/** Arguments for server command */
|
|
96
|
+
args?: string[];
|
|
97
|
+
/** Environment variables for server (JSON string) */
|
|
98
|
+
env?: string;
|
|
99
|
+
/** Server URL for network transports */
|
|
100
|
+
url?: string;
|
|
101
|
+
/** Server name for add command */
|
|
102
|
+
name?: string;
|
|
103
|
+
/** Show detailed information */
|
|
104
|
+
detailed?: boolean;
|
|
105
|
+
/** Force operation without confirmation */
|
|
106
|
+
force?: boolean;
|
|
107
|
+
/** Auto install discovered servers */
|
|
108
|
+
autoInstall?: boolean;
|
|
109
|
+
/** Discovery source */
|
|
110
|
+
source?: string;
|
|
111
|
+
/** Connection timeout */
|
|
112
|
+
timeout?: number;
|
|
113
|
+
}
|
|
114
|
+
/**
|
|
115
|
+
* Models command arguments - Enhanced for model management
|
|
116
|
+
*/
|
|
117
|
+
export interface ModelsCommandArgs extends BaseCommandArgs {
|
|
118
|
+
/** AI provider to query */
|
|
119
|
+
provider?: string;
|
|
120
|
+
/** Model capability filter */
|
|
121
|
+
query?: string;
|
|
122
|
+
/** Model use case filter */
|
|
123
|
+
useCase?: string;
|
|
124
|
+
/** Require vision capability */
|
|
125
|
+
requireVision?: boolean;
|
|
126
|
+
/** Require function calling capability */
|
|
127
|
+
requireFunctionCalling?: boolean;
|
|
128
|
+
/** List all available models */
|
|
129
|
+
list?: boolean;
|
|
130
|
+
/** Show model statistics */
|
|
131
|
+
stats?: boolean;
|
|
132
|
+
/** Show model pricing */
|
|
133
|
+
pricing?: boolean;
|
|
134
|
+
/** Resolve best model for criteria */
|
|
135
|
+
resolve?: boolean;
|
|
136
|
+
/** Maximum cost filter */
|
|
137
|
+
maxCost?: number;
|
|
138
|
+
/** Maximum tokens filter */
|
|
139
|
+
maxTokens?: number;
|
|
85
140
|
}
|
|
86
141
|
/**
|
|
87
142
|
* Ollama command arguments
|