@juspay/neurolink 7.1.0 → 7.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +8 -2
- package/README.md +16 -11
- package/dist/cli/commands/config.d.ts +2 -2
- package/dist/cli/commands/config.js +22 -21
- package/dist/cli/commands/mcp.d.ts +79 -0
- package/dist/cli/commands/mcp.js +916 -0
- package/dist/cli/commands/models.d.ts +63 -0
- package/dist/cli/commands/models.js +653 -0
- package/dist/cli/commands/ollama.js +56 -55
- package/dist/cli/factories/commandFactory.d.ts +14 -0
- package/dist/cli/factories/commandFactory.js +346 -47
- package/dist/cli/index.js +25 -10
- package/dist/cli/utils/completeSetup.js +9 -8
- package/dist/cli/utils/envManager.js +7 -6
- package/dist/cli/utils/interactiveSetup.js +20 -19
- package/dist/core/analytics.js +25 -38
- package/dist/core/baseProvider.d.ts +8 -0
- package/dist/core/baseProvider.js +177 -68
- package/dist/core/constants.d.ts +11 -0
- package/dist/core/constants.js +17 -0
- package/dist/core/evaluation.js +25 -14
- package/dist/core/factory.js +19 -18
- package/dist/core/streamAnalytics.d.ts +65 -0
- package/dist/core/streamAnalytics.js +125 -0
- package/dist/lib/core/analytics.js +25 -38
- package/dist/lib/core/baseProvider.d.ts +8 -0
- package/dist/lib/core/baseProvider.js +177 -68
- package/dist/lib/core/constants.d.ts +11 -0
- package/dist/lib/core/constants.js +17 -0
- package/dist/lib/core/evaluation.js +25 -14
- package/dist/lib/core/factory.js +19 -18
- package/dist/lib/core/streamAnalytics.d.ts +65 -0
- package/dist/lib/core/streamAnalytics.js +125 -0
- package/dist/lib/models/modelRegistry.d.ts +132 -0
- package/dist/lib/models/modelRegistry.js +483 -0
- package/dist/lib/models/modelResolver.d.ts +115 -0
- package/dist/lib/models/modelResolver.js +467 -0
- package/dist/lib/neurolink.d.ts +4 -1
- package/dist/lib/neurolink.js +101 -67
- package/dist/lib/providers/anthropic.js +3 -0
- package/dist/lib/providers/googleAiStudio.js +13 -0
- package/dist/lib/providers/huggingFace.js +15 -3
- package/dist/lib/providers/mistral.js +19 -7
- package/dist/lib/providers/ollama.js +31 -7
- package/dist/lib/providers/openAI.js +12 -0
- package/dist/lib/sdk/toolRegistration.js +2 -2
- package/dist/lib/types/cli.d.ts +56 -1
- package/dist/lib/types/contextTypes.d.ts +110 -0
- package/dist/lib/types/contextTypes.js +176 -0
- package/dist/lib/types/index.d.ts +4 -1
- package/dist/lib/types/mcpTypes.d.ts +118 -7
- package/dist/lib/types/providers.d.ts +81 -0
- package/dist/lib/types/streamTypes.d.ts +44 -7
- package/dist/lib/types/tools.d.ts +9 -0
- package/dist/lib/types/universalProviderOptions.d.ts +3 -1
- package/dist/lib/types/universalProviderOptions.js +2 -1
- package/dist/lib/utils/logger.d.ts +7 -0
- package/dist/lib/utils/logger.js +11 -0
- package/dist/lib/utils/performance.d.ts +105 -0
- package/dist/lib/utils/performance.js +210 -0
- package/dist/lib/utils/retryHandler.d.ts +89 -0
- package/dist/lib/utils/retryHandler.js +269 -0
- package/dist/models/modelRegistry.d.ts +132 -0
- package/dist/models/modelRegistry.js +483 -0
- package/dist/models/modelResolver.d.ts +115 -0
- package/dist/models/modelResolver.js +468 -0
- package/dist/neurolink.d.ts +4 -1
- package/dist/neurolink.js +101 -67
- package/dist/providers/anthropic.js +3 -0
- package/dist/providers/googleAiStudio.js +13 -0
- package/dist/providers/huggingFace.js +15 -3
- package/dist/providers/mistral.js +19 -7
- package/dist/providers/ollama.js +31 -7
- package/dist/providers/openAI.js +12 -0
- package/dist/sdk/toolRegistration.js +2 -2
- package/dist/types/cli.d.ts +56 -1
- package/dist/types/contextTypes.d.ts +110 -0
- package/dist/types/contextTypes.js +177 -0
- package/dist/types/index.d.ts +4 -1
- package/dist/types/mcpTypes.d.ts +118 -7
- package/dist/types/providers.d.ts +81 -0
- package/dist/types/streamTypes.d.ts +44 -7
- package/dist/types/tools.d.ts +9 -0
- package/dist/types/universalProviderOptions.d.ts +3 -1
- package/dist/types/universalProviderOptions.js +3 -1
- package/dist/utils/logger.d.ts +7 -0
- package/dist/utils/logger.js +11 -0
- package/dist/utils/performance.d.ts +105 -0
- package/dist/utils/performance.js +210 -0
- package/dist/utils/retryHandler.d.ts +89 -0
- package/dist/utils/retryHandler.js +269 -0
- package/package.json +2 -1
package/dist/neurolink.js
CHANGED
|
@@ -15,6 +15,8 @@ catch (error) {
|
|
|
15
15
|
}
|
|
16
16
|
import { AIProviderFactory } from "./core/factory.js";
|
|
17
17
|
import { mcpLogger } from "./utils/logger.js";
|
|
18
|
+
import { SYSTEM_LIMITS } from "./core/constants.js";
|
|
19
|
+
import pLimit from "p-limit";
|
|
18
20
|
import { toolRegistry } from "./mcp/toolRegistry.js";
|
|
19
21
|
import { logger } from "./utils/logger.js";
|
|
20
22
|
import { getBestProvider } from "./utils/providerUtils.js";
|
|
@@ -258,7 +260,7 @@ export class NeuroLink {
|
|
|
258
260
|
provider: providerName,
|
|
259
261
|
usage: result.usage,
|
|
260
262
|
responseTime,
|
|
261
|
-
toolsUsed: [],
|
|
263
|
+
toolsUsed: result.toolsUsed || [],
|
|
262
264
|
enhancedWithTools: true,
|
|
263
265
|
availableTools: availableTools.length > 0 ? availableTools : undefined,
|
|
264
266
|
// Include analytics and evaluation from BaseProvider
|
|
@@ -322,7 +324,7 @@ export class NeuroLink {
|
|
|
322
324
|
model: result.model,
|
|
323
325
|
usage: result.usage,
|
|
324
326
|
responseTime,
|
|
325
|
-
toolsUsed: [],
|
|
327
|
+
toolsUsed: result.toolsUsed || [],
|
|
326
328
|
enhancedWithTools: false,
|
|
327
329
|
analytics: result.analytics,
|
|
328
330
|
evaluation: result.evaluation,
|
|
@@ -414,14 +416,21 @@ export class NeuroLink {
|
|
|
414
416
|
responseTime,
|
|
415
417
|
provider: providerName,
|
|
416
418
|
});
|
|
417
|
-
// Convert to StreamResult format
|
|
419
|
+
// Convert to StreamResult format - Include analytics and evaluation from provider
|
|
418
420
|
return {
|
|
419
421
|
stream,
|
|
420
422
|
provider: providerName,
|
|
421
423
|
model: options.model,
|
|
424
|
+
usage: streamResult.usage,
|
|
425
|
+
finishReason: streamResult.finishReason,
|
|
426
|
+
toolCalls: streamResult.toolCalls,
|
|
427
|
+
toolResults: streamResult.toolResults,
|
|
428
|
+
analytics: streamResult.analytics, // 🔧 FIX: Pass through analytics data
|
|
429
|
+
evaluation: streamResult.evaluation, // 🔧 FIX: Pass through evaluation data
|
|
422
430
|
metadata: {
|
|
423
431
|
streamId: `neurolink-${Date.now()}`,
|
|
424
432
|
startTime,
|
|
433
|
+
responseTime,
|
|
425
434
|
},
|
|
426
435
|
};
|
|
427
436
|
}
|
|
@@ -439,6 +448,12 @@ export class NeuroLink {
|
|
|
439
448
|
stream: streamResult.stream,
|
|
440
449
|
provider: providerName,
|
|
441
450
|
model: options.model,
|
|
451
|
+
usage: streamResult.usage,
|
|
452
|
+
finishReason: streamResult.finishReason,
|
|
453
|
+
toolCalls: streamResult.toolCalls,
|
|
454
|
+
toolResults: streamResult.toolResults,
|
|
455
|
+
analytics: streamResult.analytics, // 🔧 FIX: Pass through analytics data in fallback
|
|
456
|
+
evaluation: streamResult.evaluation, // 🔧 FIX: Pass through evaluation data in fallback
|
|
442
457
|
metadata: {
|
|
443
458
|
streamId: `neurolink-${Date.now()}`,
|
|
444
459
|
startTime,
|
|
@@ -661,11 +676,18 @@ export class NeuroLink {
|
|
|
661
676
|
* Get comprehensive status of all AI providers
|
|
662
677
|
* Primary method for provider health checking and diagnostics
|
|
663
678
|
*/
|
|
664
|
-
async getProviderStatus() {
|
|
679
|
+
async getProviderStatus(options) {
|
|
680
|
+
// 🔧 PERFORMANCE: Track memory and timing for provider status checks
|
|
681
|
+
const { MemoryManager } = await import("./utils/performance.js");
|
|
682
|
+
const startMemory = MemoryManager.getMemoryUsageMB();
|
|
665
683
|
// CRITICAL FIX: Ensure providers are registered before testing
|
|
666
|
-
|
|
684
|
+
if (!options?.quiet) {
|
|
685
|
+
mcpLogger.debug("🔍 DEBUG: Initializing MCP for provider status...");
|
|
686
|
+
}
|
|
667
687
|
await this.initializeMCP();
|
|
668
|
-
|
|
688
|
+
if (!options?.quiet) {
|
|
689
|
+
mcpLogger.debug("🔍 DEBUG: MCP initialized:", this.mcpInitialized);
|
|
690
|
+
}
|
|
669
691
|
const { AIProviderFactory } = await import("./core/factory.js");
|
|
670
692
|
const { hasProviderEnvVars } = await import("./utils/providerUtils.js");
|
|
671
693
|
const providers = [
|
|
@@ -680,97 +702,109 @@ export class NeuroLink {
|
|
|
680
702
|
"ollama",
|
|
681
703
|
"mistral",
|
|
682
704
|
];
|
|
683
|
-
|
|
684
|
-
|
|
705
|
+
// 🚀 PERFORMANCE FIX: Test providers with controlled concurrency
|
|
706
|
+
// This reduces total time from 16s (sequential) to ~3s (parallel) while preventing resource exhaustion
|
|
707
|
+
const limit = pLimit(SYSTEM_LIMITS.DEFAULT_CONCURRENCY_LIMIT);
|
|
708
|
+
const providerTests = providers.map((providerName) => limit(async () => {
|
|
685
709
|
const startTime = Date.now();
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
});
|
|
706
|
-
if (!response.ok) {
|
|
707
|
-
throw new Error("Ollama service not responding");
|
|
708
|
-
}
|
|
709
|
-
const { models } = await response.json();
|
|
710
|
-
const defaultOllamaModel = "llama3.2:latest";
|
|
711
|
-
const modelIsAvailable = models.some((m) => m.name === defaultOllamaModel);
|
|
712
|
-
if (modelIsAvailable) {
|
|
713
|
-
results.push({
|
|
714
|
-
provider: providerName,
|
|
715
|
-
status: "working",
|
|
716
|
-
configured: true,
|
|
717
|
-
authenticated: true,
|
|
718
|
-
responseTime: Date.now() - startTime,
|
|
719
|
-
model: defaultOllamaModel,
|
|
710
|
+
try {
|
|
711
|
+
// Check if provider has required environment variables
|
|
712
|
+
const hasEnvVars = await this.hasProviderEnvVars(providerName);
|
|
713
|
+
if (!hasEnvVars && providerName !== "ollama") {
|
|
714
|
+
return {
|
|
715
|
+
provider: providerName,
|
|
716
|
+
status: "not-configured",
|
|
717
|
+
configured: false,
|
|
718
|
+
authenticated: false,
|
|
719
|
+
error: "Missing required environment variables",
|
|
720
|
+
responseTime: Date.now() - startTime,
|
|
721
|
+
};
|
|
722
|
+
}
|
|
723
|
+
// Special handling for Ollama
|
|
724
|
+
if (providerName === "ollama") {
|
|
725
|
+
try {
|
|
726
|
+
const response = await fetch("http://localhost:11434/api/tags", {
|
|
727
|
+
method: "GET",
|
|
728
|
+
signal: AbortSignal.timeout(2000),
|
|
720
729
|
});
|
|
730
|
+
if (!response.ok) {
|
|
731
|
+
throw new Error("Ollama service not responding");
|
|
732
|
+
}
|
|
733
|
+
const { models } = await response.json();
|
|
734
|
+
const defaultOllamaModel = "llama3.2:latest";
|
|
735
|
+
const modelIsAvailable = models.some((m) => m.name === defaultOllamaModel);
|
|
736
|
+
if (modelIsAvailable) {
|
|
737
|
+
return {
|
|
738
|
+
provider: providerName,
|
|
739
|
+
status: "working",
|
|
740
|
+
configured: true,
|
|
741
|
+
authenticated: true,
|
|
742
|
+
responseTime: Date.now() - startTime,
|
|
743
|
+
model: defaultOllamaModel,
|
|
744
|
+
};
|
|
745
|
+
}
|
|
746
|
+
else {
|
|
747
|
+
return {
|
|
748
|
+
provider: providerName,
|
|
749
|
+
status: "failed",
|
|
750
|
+
configured: true,
|
|
751
|
+
authenticated: false,
|
|
752
|
+
error: `Ollama service running but model '${defaultOllamaModel}' not found`,
|
|
753
|
+
responseTime: Date.now() - startTime,
|
|
754
|
+
};
|
|
755
|
+
}
|
|
721
756
|
}
|
|
722
|
-
|
|
723
|
-
|
|
757
|
+
catch (error) {
|
|
758
|
+
return {
|
|
724
759
|
provider: providerName,
|
|
725
760
|
status: "failed",
|
|
726
|
-
configured:
|
|
761
|
+
configured: false,
|
|
727
762
|
authenticated: false,
|
|
728
|
-
error:
|
|
763
|
+
error: error instanceof Error
|
|
764
|
+
? error.message
|
|
765
|
+
: "Ollama service not running",
|
|
729
766
|
responseTime: Date.now() - startTime,
|
|
730
|
-
}
|
|
767
|
+
};
|
|
731
768
|
}
|
|
732
769
|
}
|
|
733
|
-
|
|
734
|
-
results.push({
|
|
735
|
-
provider: providerName,
|
|
736
|
-
status: "failed",
|
|
737
|
-
configured: false,
|
|
738
|
-
authenticated: false,
|
|
739
|
-
error: error instanceof Error
|
|
740
|
-
? error.message
|
|
741
|
-
: "Ollama service not running",
|
|
742
|
-
responseTime: Date.now() - startTime,
|
|
743
|
-
});
|
|
744
|
-
}
|
|
745
|
-
continue;
|
|
746
|
-
}
|
|
747
|
-
// Test other providers with actual generation call
|
|
748
|
-
try {
|
|
770
|
+
// Test other providers with actual generation call
|
|
749
771
|
const testTimeout = 5000;
|
|
750
772
|
const testPromise = this.testProviderConnection(providerName);
|
|
751
773
|
const timeoutPromise = new Promise((_, reject) => {
|
|
752
774
|
setTimeout(() => reject(new Error("Provider test timeout (5s)")), testTimeout);
|
|
753
775
|
});
|
|
754
776
|
await Promise.race([testPromise, timeoutPromise]);
|
|
755
|
-
|
|
777
|
+
return {
|
|
756
778
|
provider: providerName,
|
|
757
779
|
status: "working",
|
|
758
780
|
configured: true,
|
|
759
781
|
authenticated: true,
|
|
760
782
|
responseTime: Date.now() - startTime,
|
|
761
|
-
}
|
|
783
|
+
};
|
|
762
784
|
}
|
|
763
785
|
catch (error) {
|
|
764
786
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
765
|
-
|
|
787
|
+
return {
|
|
766
788
|
provider: providerName,
|
|
767
789
|
status: "failed",
|
|
768
790
|
configured: true,
|
|
769
791
|
authenticated: false,
|
|
770
792
|
error: errorMessage,
|
|
771
793
|
responseTime: Date.now() - startTime,
|
|
772
|
-
}
|
|
794
|
+
};
|
|
773
795
|
}
|
|
796
|
+
}));
|
|
797
|
+
// Wait for all provider tests to complete in parallel
|
|
798
|
+
const results = await Promise.all(providerTests);
|
|
799
|
+
// 🔧 PERFORMANCE: Track memory usage and suggest cleanup if needed
|
|
800
|
+
const endMemory = MemoryManager.getMemoryUsageMB();
|
|
801
|
+
const memoryDelta = endMemory.heapUsed - startMemory.heapUsed;
|
|
802
|
+
if (!options?.quiet && memoryDelta > 20) {
|
|
803
|
+
mcpLogger.debug(`🔍 Memory usage: +${memoryDelta}MB (consider cleanup for large operations)`);
|
|
804
|
+
}
|
|
805
|
+
// Suggest garbage collection for large memory increases
|
|
806
|
+
if (memoryDelta > 50) {
|
|
807
|
+
MemoryManager.forceGC();
|
|
774
808
|
}
|
|
775
809
|
return results;
|
|
776
810
|
}
|
|
@@ -22,6 +22,9 @@ export class AnthropicProvider extends BaseProvider {
|
|
|
22
22
|
super(modelName, "anthropic", sdk);
|
|
23
23
|
// Initialize Anthropic model with API key validation
|
|
24
24
|
const apiKey = getAnthropicApiKey();
|
|
25
|
+
// Set Anthropic API key as environment variable (required by @ai-sdk/anthropic)
|
|
26
|
+
process.env.ANTHROPIC_API_KEY = apiKey;
|
|
27
|
+
// Initialize Anthropic with proper configuration
|
|
25
28
|
this.model = anthropic(this.modelName || getDefaultAnthropicModel());
|
|
26
29
|
logger.debug("Anthropic Provider v2 initialized", {
|
|
27
30
|
modelName: this.modelName,
|
|
@@ -6,6 +6,7 @@ import { logger } from "../utils/logger.js";
|
|
|
6
6
|
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
7
7
|
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
8
8
|
import { createProxyFetch } from "../proxy/proxyFetch.js";
|
|
9
|
+
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
9
10
|
// Environment variable setup
|
|
10
11
|
if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY &&
|
|
11
12
|
process.env.GOOGLE_AI_API_KEY) {
|
|
@@ -62,6 +63,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
|
|
|
62
63
|
// executeGenerate removed - BaseProvider handles all generation with tools
|
|
63
64
|
async executeStream(options, analysisSchema) {
|
|
64
65
|
this.validateStreamOptions(options);
|
|
66
|
+
const startTime = Date.now();
|
|
65
67
|
const apiKey = this.getApiKey();
|
|
66
68
|
const google = createGoogleGenerativeAI({ apiKey });
|
|
67
69
|
const model = google(this.modelName);
|
|
@@ -85,13 +87,24 @@ export class GoogleAIStudioProvider extends BaseProvider {
|
|
|
85
87
|
yield { content: chunk };
|
|
86
88
|
}
|
|
87
89
|
};
|
|
90
|
+
// Create analytics promise that resolves after stream completion
|
|
91
|
+
const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
|
|
92
|
+
requestId: `google-ai-stream-${Date.now()}`,
|
|
93
|
+
streamingMode: true,
|
|
94
|
+
});
|
|
88
95
|
return {
|
|
89
96
|
stream: transformedStream(),
|
|
90
97
|
provider: this.providerName,
|
|
91
98
|
model: this.modelName,
|
|
99
|
+
analytics: analyticsPromise,
|
|
100
|
+
metadata: {
|
|
101
|
+
startTime,
|
|
102
|
+
streamId: `google-ai-${Date.now()}`,
|
|
103
|
+
},
|
|
92
104
|
};
|
|
93
105
|
}
|
|
94
106
|
catch (error) {
|
|
107
|
+
timeoutController?.cleanup();
|
|
95
108
|
throw this.handleProviderError(error);
|
|
96
109
|
}
|
|
97
110
|
}
|
|
@@ -54,9 +54,21 @@ export class HuggingFaceProvider extends BaseProvider {
|
|
|
54
54
|
* @returns false to disable tools by default until proper implementation
|
|
55
55
|
*/
|
|
56
56
|
supportsTools() {
|
|
57
|
-
//
|
|
58
|
-
//
|
|
59
|
-
//
|
|
57
|
+
// IMPLEMENTATION STATUS (2025): HuggingFace tool calling remains limited
|
|
58
|
+
//
|
|
59
|
+
// Current State:
|
|
60
|
+
// - Function calling varies significantly across HF models
|
|
61
|
+
// - Many models treat tool schemas as conversation context
|
|
62
|
+
// - Requires model-specific implementation per architecture
|
|
63
|
+
//
|
|
64
|
+
// To Enable Tools:
|
|
65
|
+
// 1. Detect model capability via HF model card metadata
|
|
66
|
+
// 2. Implement model-specific tool schema formatting
|
|
67
|
+
// 3. Add custom response parsing for function call extraction
|
|
68
|
+
// 4. Create validation framework for tool parameter handling
|
|
69
|
+
// 5. Test extensively with supported models (Code Llama, Llama 3.1+)
|
|
70
|
+
//
|
|
71
|
+
// Until comprehensive implementation, tools disabled for reliability
|
|
60
72
|
return false;
|
|
61
73
|
}
|
|
62
74
|
// executeGenerate removed - BaseProvider handles all generation with tools
|
|
@@ -4,6 +4,7 @@ import { BaseProvider } from "../core/baseProvider.js";
|
|
|
4
4
|
import { logger } from "../utils/logger.js";
|
|
5
5
|
import { createAnalytics } from "../core/analytics.js";
|
|
6
6
|
import { validateApiKey, createMistralConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
7
|
+
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
7
8
|
// Configuration helpers - now using consolidated utility
|
|
8
9
|
const getMistralApiKey = () => {
|
|
9
10
|
return validateApiKey(createMistralConfig());
|
|
@@ -86,22 +87,33 @@ export class MistralProvider extends BaseProvider {
|
|
|
86
87
|
async executeStream(options) {
|
|
87
88
|
const startTime = Date.now();
|
|
88
89
|
try {
|
|
89
|
-
const
|
|
90
|
+
const result = await streamText({
|
|
90
91
|
model: this.model,
|
|
91
|
-
prompt: options.
|
|
92
|
+
prompt: options.input.text,
|
|
92
93
|
temperature: options.temperature,
|
|
93
94
|
maxTokens: options.maxTokens,
|
|
95
|
+
tools: options.tools,
|
|
96
|
+
toolChoice: "auto",
|
|
97
|
+
});
|
|
98
|
+
// Transform stream to match StreamResult interface
|
|
99
|
+
const transformedStream = async function* () {
|
|
100
|
+
for await (const chunk of result.textStream) {
|
|
101
|
+
yield { content: chunk };
|
|
102
|
+
}
|
|
103
|
+
};
|
|
104
|
+
// Create analytics promise that resolves after stream completion
|
|
105
|
+
const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
|
|
106
|
+
requestId: `mistral-stream-${Date.now()}`,
|
|
107
|
+
streamingMode: true,
|
|
94
108
|
});
|
|
95
109
|
return {
|
|
96
|
-
stream: (
|
|
97
|
-
for await (const chunk of stream.textStream) {
|
|
98
|
-
yield { content: chunk };
|
|
99
|
-
}
|
|
100
|
-
})(),
|
|
110
|
+
stream: transformedStream(),
|
|
101
111
|
provider: this.providerName,
|
|
102
112
|
model: this.modelName,
|
|
113
|
+
analytics: analyticsPromise,
|
|
103
114
|
metadata: {
|
|
104
115
|
startTime,
|
|
116
|
+
streamId: `mistral-${Date.now()}`,
|
|
105
117
|
},
|
|
106
118
|
};
|
|
107
119
|
}
|
package/dist/providers/ollama.js
CHANGED
|
@@ -58,6 +58,9 @@ class OllamaLanguageModel {
|
|
|
58
58
|
const messages = options
|
|
59
59
|
.messages || [];
|
|
60
60
|
const prompt = this.convertMessagesToPrompt(messages);
|
|
61
|
+
// Debug: Log what's being sent to Ollama
|
|
62
|
+
logger.debug("[OllamaLanguageModel] Messages:", JSON.stringify(messages, null, 2));
|
|
63
|
+
logger.debug("[OllamaLanguageModel] Converted Prompt:", JSON.stringify(prompt));
|
|
61
64
|
const response = await fetch(`${this.baseUrl}/api/generate`, {
|
|
62
65
|
method: "POST",
|
|
63
66
|
headers: { "Content-Type": "application/json" },
|
|
@@ -77,12 +80,15 @@ class OllamaLanguageModel {
|
|
|
77
80
|
throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
|
|
78
81
|
}
|
|
79
82
|
const data = await response.json();
|
|
83
|
+
// Debug: Log Ollama API response to understand empty content issue
|
|
84
|
+
logger.debug("[OllamaLanguageModel] API Response:", JSON.stringify(data, null, 2));
|
|
80
85
|
return {
|
|
81
86
|
text: data.response,
|
|
82
87
|
usage: {
|
|
83
|
-
promptTokens: this.estimateTokens(prompt),
|
|
84
|
-
completionTokens: this.estimateTokens(data.response),
|
|
85
|
-
totalTokens: this.estimateTokens(prompt) +
|
|
88
|
+
promptTokens: data.prompt_eval_count || this.estimateTokens(prompt),
|
|
89
|
+
completionTokens: data.eval_count || this.estimateTokens(data.response),
|
|
90
|
+
totalTokens: (data.prompt_eval_count || this.estimateTokens(prompt)) +
|
|
91
|
+
(data.eval_count || this.estimateTokens(data.response)),
|
|
86
92
|
},
|
|
87
93
|
finishReason: "stop",
|
|
88
94
|
rawCall: {
|
|
@@ -179,7 +185,8 @@ class OllamaLanguageModel {
|
|
|
179
185
|
type: "finish",
|
|
180
186
|
finishReason: "stop",
|
|
181
187
|
usage: {
|
|
182
|
-
promptTokens:
|
|
188
|
+
promptTokens: data.prompt_eval_count ||
|
|
189
|
+
this.estimateTokens(data.context || ""),
|
|
183
190
|
completionTokens: data.eval_count || 0,
|
|
184
191
|
},
|
|
185
192
|
};
|
|
@@ -263,9 +270,26 @@ export class OllamaProvider extends BaseProvider {
|
|
|
263
270
|
* @returns false to disable tools by default
|
|
264
271
|
*/
|
|
265
272
|
supportsTools() {
|
|
266
|
-
//
|
|
267
|
-
//
|
|
268
|
-
//
|
|
273
|
+
// IMPLEMENTATION STATUS (2025): Ollama function calling actively evolving
|
|
274
|
+
//
|
|
275
|
+
// Current State:
|
|
276
|
+
// - Function calling added in Ollama 2024, improving in 2025
|
|
277
|
+
// - Requires compatible models (Llama 3.1+, Code Llama variants)
|
|
278
|
+
// - AI SDK integration needs custom adapter for Ollama's tool format
|
|
279
|
+
//
|
|
280
|
+
// Technical Requirements:
|
|
281
|
+
// 1. Replace AI SDK with direct Ollama API tool calls
|
|
282
|
+
// 2. Implement Ollama-specific tool schema conversion
|
|
283
|
+
// 3. Add function response parsing from Ollama's JSON format
|
|
284
|
+
// 4. Handle streaming tool calls with incremental parsing
|
|
285
|
+
// 5. Validate model compatibility before enabling tools
|
|
286
|
+
//
|
|
287
|
+
// Implementation Path:
|
|
288
|
+
// - Use Ollama's chat API with 'tools' parameter
|
|
289
|
+
// - Parse tool_calls from response.message.tool_calls
|
|
290
|
+
// - Execute functions and return results to conversation
|
|
291
|
+
//
|
|
292
|
+
// Until Ollama-specific implementation, tools disabled for compatibility
|
|
269
293
|
return false;
|
|
270
294
|
}
|
|
271
295
|
// executeGenerate removed - BaseProvider handles all generation with tools
|
package/dist/providers/openAI.js
CHANGED
|
@@ -6,6 +6,7 @@ import { logger } from "../utils/logger.js";
|
|
|
6
6
|
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
7
7
|
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
8
8
|
import { validateApiKey, createOpenAIConfig, getProviderModel, } from "../utils/providerConfig.js";
|
|
9
|
+
import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
|
|
9
10
|
// Configuration helpers - now using consolidated utility
|
|
10
11
|
const getOpenAIApiKey = () => {
|
|
11
12
|
return validateApiKey(createOpenAIConfig());
|
|
@@ -69,6 +70,7 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
69
70
|
*/
|
|
70
71
|
async executeStream(options, analysisSchema) {
|
|
71
72
|
this.validateStreamOptions(options);
|
|
73
|
+
const startTime = Date.now();
|
|
72
74
|
const timeout = this.getTimeout(options);
|
|
73
75
|
const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
|
|
74
76
|
try {
|
|
@@ -89,10 +91,20 @@ export class OpenAIProvider extends BaseProvider {
|
|
|
89
91
|
yield { content: chunk };
|
|
90
92
|
}
|
|
91
93
|
};
|
|
94
|
+
// Create analytics promise that resolves after stream completion
|
|
95
|
+
const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
|
|
96
|
+
requestId: `openai-stream-${Date.now()}`,
|
|
97
|
+
streamingMode: true,
|
|
98
|
+
});
|
|
92
99
|
return {
|
|
93
100
|
stream: transformedStream(),
|
|
94
101
|
provider: this.providerName,
|
|
95
102
|
model: this.modelName,
|
|
103
|
+
analytics: analyticsPromise,
|
|
104
|
+
metadata: {
|
|
105
|
+
startTime,
|
|
106
|
+
streamId: `openai-${Date.now()}`,
|
|
107
|
+
},
|
|
96
108
|
};
|
|
97
109
|
}
|
|
98
110
|
catch (error) {
|
|
@@ -141,7 +141,7 @@ export function validateTool(name, tool) {
|
|
|
141
141
|
`Example: { execute: async (params) => { return { success: true, data: result }; } }`);
|
|
142
142
|
}
|
|
143
143
|
// Check for common mistake: using 'schema' instead of 'parameters'
|
|
144
|
-
if (
|
|
144
|
+
if ("schema" in tool && !("parameters" in tool)) {
|
|
145
145
|
throw new Error(`Tool '${name}' uses 'schema' property, but NeuroLink expects 'parameters'. ` +
|
|
146
146
|
`Please change 'schema' to 'parameters' and use a Zod schema: ` +
|
|
147
147
|
`{ parameters: z.object({ ... }), execute: ... } ` +
|
|
@@ -159,7 +159,7 @@ export function validateTool(name, tool) {
|
|
|
159
159
|
typeof params.validate === "function" ||
|
|
160
160
|
"_def" in params; // Zod schemas have _def property
|
|
161
161
|
// Check for plain JSON schema objects (common mistake)
|
|
162
|
-
if (
|
|
162
|
+
if ("type" in params && "properties" in params && !hasValidationMethod) {
|
|
163
163
|
throw new Error(`Tool '${name}' appears to use a plain JSON schema object as parameters. ` +
|
|
164
164
|
`NeuroLink requires a Zod schema for proper type validation and tool integration. ` +
|
|
165
165
|
`Please change from:\n` +
|
package/dist/types/cli.d.ts
CHANGED
|
@@ -67,21 +67,76 @@ export interface StreamCommandArgs extends BaseCommandArgs {
|
|
|
67
67
|
disableTools?: boolean;
|
|
68
68
|
}
|
|
69
69
|
/**
|
|
70
|
-
* MCP command arguments
|
|
70
|
+
* MCP command arguments - Enhanced with transport and server management
|
|
71
71
|
*/
|
|
72
72
|
export interface MCPCommandArgs extends BaseCommandArgs {
|
|
73
73
|
/** MCP server name */
|
|
74
74
|
server?: string;
|
|
75
|
+
/** MCP server name (alias for server) */
|
|
76
|
+
serverName?: string;
|
|
75
77
|
/** Tool name to execute */
|
|
76
78
|
tool?: string;
|
|
77
79
|
/** Tool parameters as JSON string */
|
|
78
80
|
params?: string;
|
|
79
81
|
/** List available tools */
|
|
80
82
|
list?: boolean;
|
|
83
|
+
/** List only specific category */
|
|
84
|
+
listOnly?: boolean;
|
|
81
85
|
/** Discover MCP servers */
|
|
82
86
|
discover?: boolean;
|
|
83
87
|
/** Show server information */
|
|
84
88
|
info?: boolean;
|
|
89
|
+
/** Transport type for server connection */
|
|
90
|
+
transport?: "stdio" | "websocket" | "tcp" | "unix";
|
|
91
|
+
/** Server description */
|
|
92
|
+
description?: string;
|
|
93
|
+
/** Command/executable for stdio transport */
|
|
94
|
+
command?: string;
|
|
95
|
+
/** Arguments for server command */
|
|
96
|
+
args?: string[];
|
|
97
|
+
/** Environment variables for server (JSON string) */
|
|
98
|
+
env?: string;
|
|
99
|
+
/** Server URL for network transports */
|
|
100
|
+
url?: string;
|
|
101
|
+
/** Server name for add command */
|
|
102
|
+
name?: string;
|
|
103
|
+
/** Show detailed information */
|
|
104
|
+
detailed?: boolean;
|
|
105
|
+
/** Force operation without confirmation */
|
|
106
|
+
force?: boolean;
|
|
107
|
+
/** Auto install discovered servers */
|
|
108
|
+
autoInstall?: boolean;
|
|
109
|
+
/** Discovery source */
|
|
110
|
+
source?: string;
|
|
111
|
+
/** Connection timeout */
|
|
112
|
+
timeout?: number;
|
|
113
|
+
}
|
|
114
|
+
/**
|
|
115
|
+
* Models command arguments - Enhanced for model management
|
|
116
|
+
*/
|
|
117
|
+
export interface ModelsCommandArgs extends BaseCommandArgs {
|
|
118
|
+
/** AI provider to query */
|
|
119
|
+
provider?: string;
|
|
120
|
+
/** Model capability filter */
|
|
121
|
+
query?: string;
|
|
122
|
+
/** Model use case filter */
|
|
123
|
+
useCase?: string;
|
|
124
|
+
/** Require vision capability */
|
|
125
|
+
requireVision?: boolean;
|
|
126
|
+
/** Require function calling capability */
|
|
127
|
+
requireFunctionCalling?: boolean;
|
|
128
|
+
/** List all available models */
|
|
129
|
+
list?: boolean;
|
|
130
|
+
/** Show model statistics */
|
|
131
|
+
stats?: boolean;
|
|
132
|
+
/** Show model pricing */
|
|
133
|
+
pricing?: boolean;
|
|
134
|
+
/** Resolve best model for criteria */
|
|
135
|
+
resolve?: boolean;
|
|
136
|
+
/** Maximum cost filter */
|
|
137
|
+
maxCost?: number;
|
|
138
|
+
/** Maximum tokens filter */
|
|
139
|
+
maxTokens?: number;
|
|
85
140
|
}
|
|
86
141
|
/**
|
|
87
142
|
* Ollama command arguments
|