@juspay/neurolink 7.0.0 → 7.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. package/CHANGELOG.md +15 -4
  2. package/README.md +16 -11
  3. package/dist/cli/commands/config.d.ts +2 -2
  4. package/dist/cli/commands/config.js +22 -21
  5. package/dist/cli/commands/mcp.d.ts +79 -0
  6. package/dist/cli/commands/mcp.js +916 -0
  7. package/dist/cli/commands/models.d.ts +63 -0
  8. package/dist/cli/commands/models.js +653 -0
  9. package/dist/cli/commands/ollama.js +56 -55
  10. package/dist/cli/factories/commandFactory.d.ts +67 -2
  11. package/dist/cli/factories/commandFactory.js +840 -92
  12. package/dist/cli/index.d.ts +6 -0
  13. package/dist/cli/index.js +42 -999
  14. package/dist/cli/utils/completeSetup.js +9 -8
  15. package/dist/cli/utils/envManager.js +7 -6
  16. package/dist/cli/utils/interactiveSetup.js +20 -19
  17. package/dist/core/analytics.js +25 -38
  18. package/dist/core/baseProvider.d.ts +8 -0
  19. package/dist/core/baseProvider.js +177 -68
  20. package/dist/core/constants.d.ts +11 -0
  21. package/dist/core/constants.js +17 -0
  22. package/dist/core/evaluation.js +25 -14
  23. package/dist/core/factory.js +21 -18
  24. package/dist/core/streamAnalytics.d.ts +65 -0
  25. package/dist/core/streamAnalytics.js +125 -0
  26. package/dist/factories/providerRegistry.js +3 -1
  27. package/dist/lib/core/analytics.js +25 -38
  28. package/dist/lib/core/baseProvider.d.ts +8 -0
  29. package/dist/lib/core/baseProvider.js +177 -68
  30. package/dist/lib/core/constants.d.ts +11 -0
  31. package/dist/lib/core/constants.js +17 -0
  32. package/dist/lib/core/evaluation.js +25 -14
  33. package/dist/lib/core/factory.js +22 -18
  34. package/dist/lib/core/streamAnalytics.d.ts +65 -0
  35. package/dist/lib/core/streamAnalytics.js +125 -0
  36. package/dist/lib/factories/providerRegistry.js +3 -1
  37. package/dist/lib/mcp/toolRegistry.d.ts +5 -0
  38. package/dist/lib/mcp/toolRegistry.js +60 -0
  39. package/dist/lib/models/modelRegistry.d.ts +132 -0
  40. package/dist/lib/models/modelRegistry.js +483 -0
  41. package/dist/lib/models/modelResolver.d.ts +115 -0
  42. package/dist/lib/models/modelResolver.js +467 -0
  43. package/dist/lib/neurolink.d.ts +4 -1
  44. package/dist/lib/neurolink.js +108 -69
  45. package/dist/lib/providers/anthropic.js +3 -0
  46. package/dist/lib/providers/googleAiStudio.js +13 -0
  47. package/dist/lib/providers/huggingFace.js +15 -3
  48. package/dist/lib/providers/mistral.js +19 -7
  49. package/dist/lib/providers/ollama.js +31 -7
  50. package/dist/lib/providers/openAI.js +12 -0
  51. package/dist/lib/sdk/toolRegistration.js +17 -0
  52. package/dist/lib/types/cli.d.ts +56 -1
  53. package/dist/lib/types/contextTypes.d.ts +110 -0
  54. package/dist/lib/types/contextTypes.js +176 -0
  55. package/dist/lib/types/index.d.ts +4 -1
  56. package/dist/lib/types/mcpTypes.d.ts +118 -7
  57. package/dist/lib/types/providers.d.ts +81 -0
  58. package/dist/lib/types/streamTypes.d.ts +44 -7
  59. package/dist/lib/types/tools.d.ts +9 -0
  60. package/dist/lib/types/universalProviderOptions.d.ts +3 -1
  61. package/dist/lib/types/universalProviderOptions.js +2 -1
  62. package/dist/lib/utils/logger.d.ts +7 -0
  63. package/dist/lib/utils/logger.js +16 -6
  64. package/dist/lib/utils/performance.d.ts +105 -0
  65. package/dist/lib/utils/performance.js +210 -0
  66. package/dist/lib/utils/providerUtils.js +9 -2
  67. package/dist/lib/utils/retryHandler.d.ts +89 -0
  68. package/dist/lib/utils/retryHandler.js +269 -0
  69. package/dist/mcp/toolRegistry.d.ts +5 -0
  70. package/dist/mcp/toolRegistry.js +60 -0
  71. package/dist/models/modelRegistry.d.ts +132 -0
  72. package/dist/models/modelRegistry.js +483 -0
  73. package/dist/models/modelResolver.d.ts +115 -0
  74. package/dist/models/modelResolver.js +468 -0
  75. package/dist/neurolink.d.ts +4 -1
  76. package/dist/neurolink.js +108 -69
  77. package/dist/providers/anthropic.js +3 -0
  78. package/dist/providers/googleAiStudio.js +13 -0
  79. package/dist/providers/huggingFace.js +15 -3
  80. package/dist/providers/mistral.js +19 -7
  81. package/dist/providers/ollama.js +31 -7
  82. package/dist/providers/openAI.js +12 -0
  83. package/dist/sdk/toolRegistration.js +17 -0
  84. package/dist/types/cli.d.ts +56 -1
  85. package/dist/types/contextTypes.d.ts +110 -0
  86. package/dist/types/contextTypes.js +177 -0
  87. package/dist/types/index.d.ts +4 -1
  88. package/dist/types/mcpTypes.d.ts +118 -7
  89. package/dist/types/providers.d.ts +81 -0
  90. package/dist/types/streamTypes.d.ts +44 -7
  91. package/dist/types/tools.d.ts +9 -0
  92. package/dist/types/universalProviderOptions.d.ts +3 -1
  93. package/dist/types/universalProviderOptions.js +3 -1
  94. package/dist/utils/logger.d.ts +7 -0
  95. package/dist/utils/logger.js +16 -6
  96. package/dist/utils/performance.d.ts +105 -0
  97. package/dist/utils/performance.js +210 -0
  98. package/dist/utils/providerUtils.js +9 -2
  99. package/dist/utils/retryHandler.d.ts +89 -0
  100. package/dist/utils/retryHandler.js +269 -0
  101. package/package.json +2 -1
@@ -15,6 +15,8 @@ catch (error) {
15
15
  }
16
16
  import { AIProviderFactory } from "./core/factory.js";
17
17
  import { mcpLogger } from "./utils/logger.js";
18
+ import { SYSTEM_LIMITS } from "./core/constants.js";
19
+ import pLimit from "p-limit";
18
20
  import { toolRegistry } from "./mcp/toolRegistry.js";
19
21
  import { logger } from "./utils/logger.js";
20
22
  import { getBestProvider } from "./utils/providerUtils.js";
@@ -258,7 +260,7 @@ export class NeuroLink {
258
260
  provider: providerName,
259
261
  usage: result.usage,
260
262
  responseTime,
261
- toolsUsed: [],
263
+ toolsUsed: result.toolsUsed || [],
262
264
  enhancedWithTools: true,
263
265
  availableTools: availableTools.length > 0 ? availableTools : undefined,
264
266
  // Include analytics and evaluation from BaseProvider
@@ -322,7 +324,7 @@ export class NeuroLink {
322
324
  model: result.model,
323
325
  usage: result.usage,
324
326
  responseTime,
325
- toolsUsed: [],
327
+ toolsUsed: result.toolsUsed || [],
326
328
  enhancedWithTools: false,
327
329
  analytics: result.analytics,
328
330
  evaluation: result.evaluation,
@@ -355,7 +357,7 @@ export class NeuroLink {
355
357
  const toolDescriptions = availableTools
356
358
  .map((tool) => `- ${tool.name}: ${tool.description} (from ${tool.server})`)
357
359
  .join("\n");
358
- const toolPrompt = `\n\nAvailable Tools:\n${toolDescriptions}\n\nYou can use these tools when appropriate to enhance your responses.`;
360
+ const toolPrompt = `\n\nYou have access to these additional tools if needed:\n${toolDescriptions}\n\nIMPORTANT: You are a general-purpose AI assistant. Answer all requests directly and creatively. These tools are optional helpers - use them only when they would genuinely improve your response. For creative tasks like storytelling, writing, or general conversation, respond naturally without requiring tools.`;
359
361
  return (originalSystemPrompt || "") + toolPrompt;
360
362
  }
361
363
  /**
@@ -414,14 +416,21 @@ export class NeuroLink {
414
416
  responseTime,
415
417
  provider: providerName,
416
418
  });
417
- // Convert to StreamResult format
419
+ // Convert to StreamResult format - Include analytics and evaluation from provider
418
420
  return {
419
421
  stream,
420
422
  provider: providerName,
421
423
  model: options.model,
424
+ usage: streamResult.usage,
425
+ finishReason: streamResult.finishReason,
426
+ toolCalls: streamResult.toolCalls,
427
+ toolResults: streamResult.toolResults,
428
+ analytics: streamResult.analytics, // 🔧 FIX: Pass through analytics data
429
+ evaluation: streamResult.evaluation, // 🔧 FIX: Pass through evaluation data
422
430
  metadata: {
423
431
  streamId: `neurolink-${Date.now()}`,
424
432
  startTime,
433
+ responseTime,
425
434
  },
426
435
  };
427
436
  }
@@ -439,6 +448,12 @@ export class NeuroLink {
439
448
  stream: streamResult.stream,
440
449
  provider: providerName,
441
450
  model: options.model,
451
+ usage: streamResult.usage,
452
+ finishReason: streamResult.finishReason,
453
+ toolCalls: streamResult.toolCalls,
454
+ toolResults: streamResult.toolResults,
455
+ analytics: streamResult.analytics, // 🔧 FIX: Pass through analytics data in fallback
456
+ evaluation: streamResult.evaluation, // 🔧 FIX: Pass through evaluation data in fallback
442
457
  metadata: {
443
458
  streamId: `neurolink-${Date.now()}`,
444
459
  startTime,
@@ -649,9 +664,10 @@ export class NeuroLink {
649
664
  * @returns Array of available tools with metadata
650
665
  */
651
666
  async getAllAvailableTools() {
652
- // Simplified tool listing - removed initialize-tools dependency
653
- const tools = await toolRegistry.listTools();
654
- return tools;
667
+ // MCP registry already includes direct tools, so just return MCP tools
668
+ // This prevents duplication since direct tools are auto-registered in MCP
669
+ const mcpTools = await toolRegistry.listTools();
670
+ return mcpTools;
655
671
  }
656
672
  // ============================================================================
657
673
  // PROVIDER DIAGNOSTICS - SDK-First Architecture
@@ -660,7 +676,18 @@ export class NeuroLink {
660
676
  * Get comprehensive status of all AI providers
661
677
  * Primary method for provider health checking and diagnostics
662
678
  */
663
- async getProviderStatus() {
679
+ async getProviderStatus(options) {
680
+ // 🔧 PERFORMANCE: Track memory and timing for provider status checks
681
+ const { MemoryManager } = await import("./utils/performance.js");
682
+ const startMemory = MemoryManager.getMemoryUsageMB();
683
+ // CRITICAL FIX: Ensure providers are registered before testing
684
+ if (!options?.quiet) {
685
+ mcpLogger.debug("🔍 DEBUG: Initializing MCP for provider status...");
686
+ }
687
+ await this.initializeMCP();
688
+ if (!options?.quiet) {
689
+ mcpLogger.debug("🔍 DEBUG: MCP initialized:", this.mcpInitialized);
690
+ }
664
691
  const { AIProviderFactory } = await import("./core/factory.js");
665
692
  const { hasProviderEnvVars } = await import("./utils/providerUtils.js");
666
693
  const providers = [
@@ -675,97 +702,109 @@ export class NeuroLink {
675
702
  "ollama",
676
703
  "mistral",
677
704
  ];
678
- const results = [];
679
- for (const providerName of providers) {
705
+ // 🚀 PERFORMANCE FIX: Test providers with controlled concurrency
706
+ // This reduces total time from 16s (sequential) to ~3s (parallel) while preventing resource exhaustion
707
+ const limit = pLimit(SYSTEM_LIMITS.DEFAULT_CONCURRENCY_LIMIT);
708
+ const providerTests = providers.map((providerName) => limit(async () => {
680
709
  const startTime = Date.now();
681
- // Check if provider has required environment variables
682
- const hasEnvVars = await this.hasProviderEnvVars(providerName);
683
- if (!hasEnvVars && providerName !== "ollama") {
684
- results.push({
685
- provider: providerName,
686
- status: "not-configured",
687
- configured: false,
688
- authenticated: false,
689
- error: "Missing required environment variables",
690
- responseTime: 0,
691
- });
692
- continue;
693
- }
694
- // Special handling for Ollama
695
- if (providerName === "ollama") {
696
- try {
697
- const response = await fetch("http://localhost:11434/api/tags", {
698
- method: "GET",
699
- signal: AbortSignal.timeout(2000),
700
- });
701
- if (!response.ok) {
702
- throw new Error("Ollama service not responding");
703
- }
704
- const { models } = await response.json();
705
- const defaultOllamaModel = "llama3.2:latest";
706
- const modelIsAvailable = models.some((m) => m.name === defaultOllamaModel);
707
- if (modelIsAvailable) {
708
- results.push({
709
- provider: providerName,
710
- status: "working",
711
- configured: true,
712
- authenticated: true,
713
- responseTime: Date.now() - startTime,
714
- model: defaultOllamaModel,
710
+ try {
711
+ // Check if provider has required environment variables
712
+ const hasEnvVars = await this.hasProviderEnvVars(providerName);
713
+ if (!hasEnvVars && providerName !== "ollama") {
714
+ return {
715
+ provider: providerName,
716
+ status: "not-configured",
717
+ configured: false,
718
+ authenticated: false,
719
+ error: "Missing required environment variables",
720
+ responseTime: Date.now() - startTime,
721
+ };
722
+ }
723
+ // Special handling for Ollama
724
+ if (providerName === "ollama") {
725
+ try {
726
+ const response = await fetch("http://localhost:11434/api/tags", {
727
+ method: "GET",
728
+ signal: AbortSignal.timeout(2000),
715
729
  });
730
+ if (!response.ok) {
731
+ throw new Error("Ollama service not responding");
732
+ }
733
+ const { models } = await response.json();
734
+ const defaultOllamaModel = "llama3.2:latest";
735
+ const modelIsAvailable = models.some((m) => m.name === defaultOllamaModel);
736
+ if (modelIsAvailable) {
737
+ return {
738
+ provider: providerName,
739
+ status: "working",
740
+ configured: true,
741
+ authenticated: true,
742
+ responseTime: Date.now() - startTime,
743
+ model: defaultOllamaModel,
744
+ };
745
+ }
746
+ else {
747
+ return {
748
+ provider: providerName,
749
+ status: "failed",
750
+ configured: true,
751
+ authenticated: false,
752
+ error: `Ollama service running but model '${defaultOllamaModel}' not found`,
753
+ responseTime: Date.now() - startTime,
754
+ };
755
+ }
716
756
  }
717
- else {
718
- results.push({
757
+ catch (error) {
758
+ return {
719
759
  provider: providerName,
720
760
  status: "failed",
721
- configured: true,
761
+ configured: false,
722
762
  authenticated: false,
723
- error: `Ollama service running but model '${defaultOllamaModel}' not found`,
763
+ error: error instanceof Error
764
+ ? error.message
765
+ : "Ollama service not running",
724
766
  responseTime: Date.now() - startTime,
725
- });
767
+ };
726
768
  }
727
769
  }
728
- catch (error) {
729
- results.push({
730
- provider: providerName,
731
- status: "failed",
732
- configured: false,
733
- authenticated: false,
734
- error: error instanceof Error
735
- ? error.message
736
- : "Ollama service not running",
737
- responseTime: Date.now() - startTime,
738
- });
739
- }
740
- continue;
741
- }
742
- // Test other providers with actual generation call
743
- try {
770
+ // Test other providers with actual generation call
744
771
  const testTimeout = 5000;
745
772
  const testPromise = this.testProviderConnection(providerName);
746
773
  const timeoutPromise = new Promise((_, reject) => {
747
774
  setTimeout(() => reject(new Error("Provider test timeout (5s)")), testTimeout);
748
775
  });
749
776
  await Promise.race([testPromise, timeoutPromise]);
750
- results.push({
777
+ return {
751
778
  provider: providerName,
752
779
  status: "working",
753
780
  configured: true,
754
781
  authenticated: true,
755
782
  responseTime: Date.now() - startTime,
756
- });
783
+ };
757
784
  }
758
785
  catch (error) {
759
786
  const errorMessage = error instanceof Error ? error.message : String(error);
760
- results.push({
787
+ return {
761
788
  provider: providerName,
762
789
  status: "failed",
763
790
  configured: true,
764
791
  authenticated: false,
765
792
  error: errorMessage,
766
793
  responseTime: Date.now() - startTime,
767
- });
794
+ };
768
795
  }
796
+ }));
797
+ // Wait for all provider tests to complete in parallel
798
+ const results = await Promise.all(providerTests);
799
+ // 🔧 PERFORMANCE: Track memory usage and suggest cleanup if needed
800
+ const endMemory = MemoryManager.getMemoryUsageMB();
801
+ const memoryDelta = endMemory.heapUsed - startMemory.heapUsed;
802
+ if (!options?.quiet && memoryDelta > 20) {
803
+ mcpLogger.debug(`🔍 Memory usage: +${memoryDelta}MB (consider cleanup for large operations)`);
804
+ }
805
+ // Suggest garbage collection for large memory increases
806
+ if (memoryDelta > 50) {
807
+ MemoryManager.forceGC();
769
808
  }
770
809
  return results;
771
810
  }
@@ -22,6 +22,9 @@ export class AnthropicProvider extends BaseProvider {
22
22
  super(modelName, "anthropic", sdk);
23
23
  // Initialize Anthropic model with API key validation
24
24
  const apiKey = getAnthropicApiKey();
25
+ // Set Anthropic API key as environment variable (required by @ai-sdk/anthropic)
26
+ process.env.ANTHROPIC_API_KEY = apiKey;
27
+ // Initialize Anthropic with proper configuration
25
28
  this.model = anthropic(this.modelName || getDefaultAnthropicModel());
26
29
  logger.debug("Anthropic Provider v2 initialized", {
27
30
  modelName: this.modelName,
@@ -5,6 +5,7 @@ import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
7
7
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
8
+ import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
8
9
  // Environment variable setup
9
10
  if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY &&
10
11
  process.env.GOOGLE_AI_API_KEY) {
@@ -61,6 +62,7 @@ export class GoogleAIStudioProvider extends BaseProvider {
61
62
  // executeGenerate removed - BaseProvider handles all generation with tools
62
63
  async executeStream(options, analysisSchema) {
63
64
  this.validateStreamOptions(options);
65
+ const startTime = Date.now();
64
66
  const apiKey = this.getApiKey();
65
67
  const google = createGoogleGenerativeAI({ apiKey });
66
68
  const model = google(this.modelName);
@@ -84,13 +86,24 @@ export class GoogleAIStudioProvider extends BaseProvider {
84
86
  yield { content: chunk };
85
87
  }
86
88
  };
89
+ // Create analytics promise that resolves after stream completion
90
+ const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
91
+ requestId: `google-ai-stream-${Date.now()}`,
92
+ streamingMode: true,
93
+ });
87
94
  return {
88
95
  stream: transformedStream(),
89
96
  provider: this.providerName,
90
97
  model: this.modelName,
98
+ analytics: analyticsPromise,
99
+ metadata: {
100
+ startTime,
101
+ streamId: `google-ai-${Date.now()}`,
102
+ },
91
103
  };
92
104
  }
93
105
  catch (error) {
106
+ timeoutController?.cleanup();
94
107
  throw this.handleProviderError(error);
95
108
  }
96
109
  }
@@ -54,9 +54,21 @@ export class HuggingFaceProvider extends BaseProvider {
54
54
  * @returns false to disable tools by default until proper implementation
55
55
  */
56
56
  supportsTools() {
57
- // TODO: Implement proper HuggingFace tool calling support
58
- // Requires: Custom tool schema formatting, response parsing, execution flow
59
- // Track models that support function calling: CodeLlama, Llama variants
57
+ // IMPLEMENTATION STATUS (2025): HuggingFace tool calling remains limited
58
+ //
59
+ // Current State:
60
+ // - Function calling varies significantly across HF models
61
+ // - Many models treat tool schemas as conversation context
62
+ // - Requires model-specific implementation per architecture
63
+ //
64
+ // To Enable Tools:
65
+ // 1. Detect model capability via HF model card metadata
66
+ // 2. Implement model-specific tool schema formatting
67
+ // 3. Add custom response parsing for function call extraction
68
+ // 4. Create validation framework for tool parameter handling
69
+ // 5. Test extensively with supported models (Code Llama, Llama 3.1+)
70
+ //
71
+ // Until comprehensive implementation, tools disabled for reliability
60
72
  return false;
61
73
  }
62
74
  // executeGenerate removed - BaseProvider handles all generation with tools
@@ -4,6 +4,7 @@ import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
5
  import { createAnalytics } from "../core/analytics.js";
6
6
  import { validateApiKey, createMistralConfig, getProviderModel, } from "../utils/providerConfig.js";
7
+ import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
7
8
  // Configuration helpers - now using consolidated utility
8
9
  const getMistralApiKey = () => {
9
10
  return validateApiKey(createMistralConfig());
@@ -86,22 +87,33 @@ export class MistralProvider extends BaseProvider {
86
87
  async executeStream(options) {
87
88
  const startTime = Date.now();
88
89
  try {
89
- const stream = await streamText({
90
+ const result = await streamText({
90
91
  model: this.model,
91
- prompt: options.prompt || "",
92
+ prompt: options.input.text,
92
93
  temperature: options.temperature,
93
94
  maxTokens: options.maxTokens,
95
+ tools: options.tools,
96
+ toolChoice: "auto",
97
+ });
98
+ // Transform stream to match StreamResult interface
99
+ const transformedStream = async function* () {
100
+ for await (const chunk of result.textStream) {
101
+ yield { content: chunk };
102
+ }
103
+ };
104
+ // Create analytics promise that resolves after stream completion
105
+ const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
106
+ requestId: `mistral-stream-${Date.now()}`,
107
+ streamingMode: true,
94
108
  });
95
109
  return {
96
- stream: (async function* () {
97
- for await (const chunk of stream.textStream) {
98
- yield { content: chunk };
99
- }
100
- })(),
110
+ stream: transformedStream(),
101
111
  provider: this.providerName,
102
112
  model: this.modelName,
113
+ analytics: analyticsPromise,
103
114
  metadata: {
104
115
  startTime,
116
+ streamId: `mistral-${Date.now()}`,
105
117
  },
106
118
  };
107
119
  }
@@ -57,6 +57,9 @@ class OllamaLanguageModel {
57
57
  const messages = options
58
58
  .messages || [];
59
59
  const prompt = this.convertMessagesToPrompt(messages);
60
+ // Debug: Log what's being sent to Ollama
61
+ logger.debug("[OllamaLanguageModel] Messages:", JSON.stringify(messages, null, 2));
62
+ logger.debug("[OllamaLanguageModel] Converted Prompt:", JSON.stringify(prompt));
60
63
  const response = await fetch(`${this.baseUrl}/api/generate`, {
61
64
  method: "POST",
62
65
  headers: { "Content-Type": "application/json" },
@@ -76,12 +79,15 @@ class OllamaLanguageModel {
76
79
  throw new Error(`Ollama API error: ${response.status} ${response.statusText}`);
77
80
  }
78
81
  const data = await response.json();
82
+ // Debug: Log Ollama API response to understand empty content issue
83
+ logger.debug("[OllamaLanguageModel] API Response:", JSON.stringify(data, null, 2));
79
84
  return {
80
85
  text: data.response,
81
86
  usage: {
82
- promptTokens: this.estimateTokens(prompt),
83
- completionTokens: this.estimateTokens(data.response),
84
- totalTokens: this.estimateTokens(prompt) + this.estimateTokens(data.response),
87
+ promptTokens: data.prompt_eval_count || this.estimateTokens(prompt),
88
+ completionTokens: data.eval_count || this.estimateTokens(data.response),
89
+ totalTokens: (data.prompt_eval_count || this.estimateTokens(prompt)) +
90
+ (data.eval_count || this.estimateTokens(data.response)),
85
91
  },
86
92
  finishReason: "stop",
87
93
  rawCall: {
@@ -178,7 +184,8 @@ class OllamaLanguageModel {
178
184
  type: "finish",
179
185
  finishReason: "stop",
180
186
  usage: {
181
- promptTokens: this.estimateTokens(data.context || ""),
187
+ promptTokens: data.prompt_eval_count ||
188
+ this.estimateTokens(data.context || ""),
182
189
  completionTokens: data.eval_count || 0,
183
190
  },
184
191
  };
@@ -262,9 +269,26 @@ export class OllamaProvider extends BaseProvider {
262
269
  * @returns false to disable tools by default
263
270
  */
264
271
  supportsTools() {
265
- // TODO: Fix the OllamaLanguageModel integration with BaseProvider for tool support.
266
- // Track progress on resolving this issue. See the detailed steps above.
267
- // Issue tracking required for enabling tool support
272
+ // IMPLEMENTATION STATUS (2025): Ollama function calling actively evolving
273
+ //
274
+ // Current State:
275
+ // - Function calling added in Ollama 2024, improving in 2025
276
+ // - Requires compatible models (Llama 3.1+, Code Llama variants)
277
+ // - AI SDK integration needs custom adapter for Ollama's tool format
278
+ //
279
+ // Technical Requirements:
280
+ // 1. Replace AI SDK with direct Ollama API tool calls
281
+ // 2. Implement Ollama-specific tool schema conversion
282
+ // 3. Add function response parsing from Ollama's JSON format
283
+ // 4. Handle streaming tool calls with incremental parsing
284
+ // 5. Validate model compatibility before enabling tools
285
+ //
286
+ // Implementation Path:
287
+ // - Use Ollama's chat API with 'tools' parameter
288
+ // - Parse tool_calls from response.message.tool_calls
289
+ // - Execute functions and return results to conversation
290
+ //
291
+ // Until Ollama-specific implementation, tools disabled for compatibility
268
292
  return false;
269
293
  }
270
294
  // executeGenerate removed - BaseProvider handles all generation with tools
@@ -6,6 +6,7 @@ import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
7
7
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
8
8
  import { validateApiKey, createOpenAIConfig, getProviderModel, } from "../utils/providerConfig.js";
9
+ import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
9
10
  // Configuration helpers - now using consolidated utility
10
11
  const getOpenAIApiKey = () => {
11
12
  return validateApiKey(createOpenAIConfig());
@@ -69,6 +70,7 @@ export class OpenAIProvider extends BaseProvider {
69
70
  */
70
71
  async executeStream(options, analysisSchema) {
71
72
  this.validateStreamOptions(options);
73
+ const startTime = Date.now();
72
74
  const timeout = this.getTimeout(options);
73
75
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
74
76
  try {
@@ -89,10 +91,20 @@ export class OpenAIProvider extends BaseProvider {
89
91
  yield { content: chunk };
90
92
  }
91
93
  };
94
+ // Create analytics promise that resolves after stream completion
95
+ const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
96
+ requestId: `openai-stream-${Date.now()}`,
97
+ streamingMode: true,
98
+ });
92
99
  return {
93
100
  stream: transformedStream(),
94
101
  provider: this.providerName,
95
102
  model: this.modelName,
103
+ analytics: analyticsPromise,
104
+ metadata: {
105
+ startTime,
106
+ streamId: `openai-${Date.now()}`,
107
+ },
96
108
  };
97
109
  }
98
110
  catch (error) {
@@ -139,6 +139,13 @@ export function validateTool(name, tool) {
139
139
  `Received: ${typeof tool.execute}. ` +
140
140
  `Example: { execute: async (params) => { return { success: true, data: result }; } }`);
141
141
  }
142
+ // Check for common mistake: using 'schema' instead of 'parameters'
143
+ if ("schema" in tool && !("parameters" in tool)) {
144
+ throw new Error(`Tool '${name}' uses 'schema' property, but NeuroLink expects 'parameters'. ` +
145
+ `Please change 'schema' to 'parameters' and use a Zod schema: ` +
146
+ `{ parameters: z.object({ ... }), execute: ... } ` +
147
+ `See documentation: https://docs.neurolink.com/tools`);
148
+ }
142
149
  // Validate parameters schema if provided - support both Zod and custom schemas
143
150
  if (tool.parameters) {
144
151
  if (typeof tool.parameters !== "object") {
@@ -150,6 +157,16 @@ export function validateTool(name, tool) {
150
157
  const hasValidationMethod = typeof params.parse === "function" ||
151
158
  typeof params.validate === "function" ||
152
159
  "_def" in params; // Zod schemas have _def property
160
+ // Check for plain JSON schema objects (common mistake)
161
+ if ("type" in params && "properties" in params && !hasValidationMethod) {
162
+ throw new Error(`Tool '${name}' appears to use a plain JSON schema object as parameters. ` +
163
+ `NeuroLink requires a Zod schema for proper type validation and tool integration. ` +
164
+ `Please change from:\n` +
165
+ ` { type: 'object', properties: { ... } }\n` +
166
+ `To:\n` +
167
+ ` z.object({ fieldName: z.string() })\n` +
168
+ `Import Zod with: import { z } from 'zod'`);
169
+ }
153
170
  if (!hasValidationMethod) {
154
171
  const errorMessage = typeof params.parse === "function" || "_def" in params
155
172
  ? `Tool '${name}' has a Zod-like schema but validation failed. Ensure it's a valid Zod schema: z.object({ ... })`
@@ -67,21 +67,76 @@ export interface StreamCommandArgs extends BaseCommandArgs {
67
67
  disableTools?: boolean;
68
68
  }
69
69
  /**
70
- * MCP command arguments
70
+ * MCP command arguments - Enhanced with transport and server management
71
71
  */
72
72
  export interface MCPCommandArgs extends BaseCommandArgs {
73
73
  /** MCP server name */
74
74
  server?: string;
75
+ /** MCP server name (alias for server) */
76
+ serverName?: string;
75
77
  /** Tool name to execute */
76
78
  tool?: string;
77
79
  /** Tool parameters as JSON string */
78
80
  params?: string;
79
81
  /** List available tools */
80
82
  list?: boolean;
83
+ /** List only specific category */
84
+ listOnly?: boolean;
81
85
  /** Discover MCP servers */
82
86
  discover?: boolean;
83
87
  /** Show server information */
84
88
  info?: boolean;
89
+ /** Transport type for server connection */
90
+ transport?: "stdio" | "websocket" | "tcp" | "unix";
91
+ /** Server description */
92
+ description?: string;
93
+ /** Command/executable for stdio transport */
94
+ command?: string;
95
+ /** Arguments for server command */
96
+ args?: string[];
97
+ /** Environment variables for server (JSON string) */
98
+ env?: string;
99
+ /** Server URL for network transports */
100
+ url?: string;
101
+ /** Server name for add command */
102
+ name?: string;
103
+ /** Show detailed information */
104
+ detailed?: boolean;
105
+ /** Force operation without confirmation */
106
+ force?: boolean;
107
+ /** Auto install discovered servers */
108
+ autoInstall?: boolean;
109
+ /** Discovery source */
110
+ source?: string;
111
+ /** Connection timeout */
112
+ timeout?: number;
113
+ }
114
+ /**
115
+ * Models command arguments - Enhanced for model management
116
+ */
117
+ export interface ModelsCommandArgs extends BaseCommandArgs {
118
+ /** AI provider to query */
119
+ provider?: string;
120
+ /** Model capability filter */
121
+ query?: string;
122
+ /** Model use case filter */
123
+ useCase?: string;
124
+ /** Require vision capability */
125
+ requireVision?: boolean;
126
+ /** Require function calling capability */
127
+ requireFunctionCalling?: boolean;
128
+ /** List all available models */
129
+ list?: boolean;
130
+ /** Show model statistics */
131
+ stats?: boolean;
132
+ /** Show model pricing */
133
+ pricing?: boolean;
134
+ /** Resolve best model for criteria */
135
+ resolve?: boolean;
136
+ /** Maximum cost filter */
137
+ maxCost?: number;
138
+ /** Maximum tokens filter */
139
+ maxTokens?: number;
85
140
  }
86
141
  /**
87
142
  * Ollama command arguments