@juspay/neurolink 7.10.3 → 7.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/CHANGELOG.md +6 -0
  2. package/dist/config/types.d.ts +14 -0
  3. package/dist/config/types.js +6 -0
  4. package/dist/core/baseProvider.d.ts +45 -340
  5. package/dist/core/baseProvider.js +205 -30
  6. package/dist/core/types.d.ts +4 -0
  7. package/dist/factories/providerFactory.js +1 -1
  8. package/dist/factories/providerRegistry.js +8 -8
  9. package/dist/lib/config/types.d.ts +14 -0
  10. package/dist/lib/config/types.js +6 -0
  11. package/dist/lib/core/baseProvider.d.ts +45 -340
  12. package/dist/lib/core/baseProvider.js +205 -30
  13. package/dist/lib/core/types.d.ts +4 -0
  14. package/dist/lib/factories/providerFactory.js +1 -1
  15. package/dist/lib/factories/providerRegistry.js +8 -8
  16. package/dist/lib/mcp/servers/agent/directToolsServer.js +80 -68
  17. package/dist/lib/mcp/toolRegistry.js +8 -2
  18. package/dist/lib/neurolink.js +20 -0
  19. package/dist/lib/providers/amazonBedrock.d.ts +0 -1
  20. package/dist/lib/providers/amazonBedrock.js +0 -13
  21. package/dist/lib/providers/anthropic.js +8 -25
  22. package/dist/lib/providers/googleAiStudio.d.ts +0 -1
  23. package/dist/lib/providers/googleAiStudio.js +10 -15
  24. package/dist/lib/providers/googleVertex.d.ts +0 -1
  25. package/dist/lib/providers/googleVertex.js +17 -24
  26. package/dist/lib/providers/huggingFace.d.ts +0 -1
  27. package/dist/lib/providers/huggingFace.js +0 -8
  28. package/dist/lib/providers/litellm.d.ts +0 -1
  29. package/dist/lib/providers/litellm.js +0 -8
  30. package/dist/lib/providers/mistral.d.ts +9 -24
  31. package/dist/lib/providers/mistral.js +44 -82
  32. package/dist/lib/providers/ollama.d.ts +0 -1
  33. package/dist/lib/providers/ollama.js +0 -12
  34. package/dist/lib/providers/openAI.d.ts +2 -3
  35. package/dist/lib/providers/openAI.js +12 -20
  36. package/dist/lib/providers/openaiCompatible.d.ts +0 -1
  37. package/dist/lib/providers/openaiCompatible.js +0 -8
  38. package/dist/lib/utils/toolUtils.d.ts +32 -0
  39. package/dist/lib/utils/toolUtils.js +60 -0
  40. package/dist/mcp/servers/agent/directToolsServer.js +80 -68
  41. package/dist/mcp/toolRegistry.js +8 -2
  42. package/dist/neurolink.js +20 -0
  43. package/dist/providers/amazonBedrock.d.ts +0 -1
  44. package/dist/providers/amazonBedrock.js +0 -13
  45. package/dist/providers/anthropic.js +8 -25
  46. package/dist/providers/googleAiStudio.d.ts +0 -1
  47. package/dist/providers/googleAiStudio.js +10 -15
  48. package/dist/providers/googleVertex.d.ts +0 -1
  49. package/dist/providers/googleVertex.js +17 -24
  50. package/dist/providers/huggingFace.d.ts +0 -1
  51. package/dist/providers/huggingFace.js +0 -8
  52. package/dist/providers/litellm.d.ts +0 -1
  53. package/dist/providers/litellm.js +0 -8
  54. package/dist/providers/mistral.d.ts +9 -24
  55. package/dist/providers/mistral.js +44 -82
  56. package/dist/providers/ollama.d.ts +0 -1
  57. package/dist/providers/ollama.js +0 -12
  58. package/dist/providers/openAI.d.ts +2 -3
  59. package/dist/providers/openAI.js +12 -20
  60. package/dist/providers/openaiCompatible.d.ts +0 -1
  61. package/dist/providers/openaiCompatible.js +0 -8
  62. package/dist/utils/toolUtils.d.ts +32 -0
  63. package/dist/utils/toolUtils.js +60 -0
  64. package/package.json +1 -1
@@ -1,5 +1,6 @@
1
- import { type LanguageModelV1 } from "ai";
2
- import type { AIProviderName, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
1
+ import type { ZodType, ZodTypeDef } from "zod";
2
+ import { type Schema, type LanguageModelV1 } from "ai";
3
+ import type { AIProviderName } from "../core/types.js";
3
4
  import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
4
5
  import { BaseProvider } from "../core/baseProvider.js";
5
6
  /**
@@ -9,30 +10,14 @@ import { BaseProvider } from "../core/baseProvider.js";
9
10
  export declare class MistralProvider extends BaseProvider {
10
11
  private model;
11
12
  constructor(modelName?: string, sdk?: unknown);
13
+ protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
14
+ protected getProviderName(): AIProviderName;
15
+ protected getDefaultModel(): string;
12
16
  /**
13
- * Generate text using Mistral API
17
+ * Returns the Vercel AI SDK model instance for Mistral
14
18
  */
15
- generate(options: TextGenerationOptions): Promise<EnhancedGenerateResult>;
16
- /**
17
- * Stream text generation using Mistral API
18
- */
19
- executeStream(options: StreamOptions): Promise<StreamResult>;
20
- /**
21
- * Get default model name for this provider
22
- */
23
- getDefaultModel(): string;
24
- /**
25
- * Get provider name
26
- */
27
- getProviderName(): AIProviderName;
28
- /**
29
- * Get AI SDK model instance
30
- */
31
- getAISDKModel(): LanguageModelV1;
32
- /**
33
- * Handle provider-specific errors
34
- */
35
- handleProviderError(error: unknown): Error;
19
+ protected getAISDKModel(): LanguageModelV1;
20
+ protected handleProviderError(error: unknown): Error;
36
21
  /**
37
22
  * Validate provider configuration
38
23
  */
@@ -2,7 +2,8 @@ import { createMistral } from "@ai-sdk/mistral";
2
2
  import { streamText } from "ai";
3
3
  import { BaseProvider } from "../core/baseProvider.js";
4
4
  import { logger } from "../utils/logger.js";
5
- import { createAnalytics } from "../core/analytics.js";
5
+ import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
6
+ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
6
7
  import { validateApiKey, createMistralConfig, getProviderModel, } from "../utils/providerConfig.js";
7
8
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
8
9
  // Configuration helpers - now using consolidated utility
@@ -29,85 +30,43 @@ export class MistralProvider extends BaseProvider {
29
30
  const mistral = createMistral({
30
31
  apiKey: apiKey,
31
32
  });
32
- this.model = mistral(this.modelName || getDefaultMistralModel());
33
+ this.model = mistral(this.modelName);
33
34
  logger.debug("Mistral Provider v2 initialized", {
34
35
  modelName: this.modelName,
35
36
  providerName: this.providerName,
36
37
  });
37
38
  }
38
- /**
39
- * Generate text using Mistral API
40
- */
41
- async generate(options) {
42
- const startTime = Date.now();
43
- try {
44
- const result = await this.model.doGenerate({
45
- inputFormat: "prompt",
46
- mode: { type: "regular" },
47
- prompt: [
48
- {
49
- role: "user",
50
- content: [{ type: "text", text: options.prompt || "" }],
51
- },
52
- ],
53
- temperature: options.temperature,
54
- maxTokens: options.maxTokens,
55
- });
56
- const responseTime = Date.now() - startTime;
57
- // Extract token usage and text content
58
- const tokenUsage = result.usage;
59
- const textContent = result.text || "";
60
- // Create analytics data using helper
61
- const analytics = createAnalytics("mistral", this.modelName, { usage: tokenUsage, content: textContent }, responseTime, { requestId: `mistral-${Date.now()}` });
62
- return {
63
- content: textContent,
64
- usage: {
65
- inputTokens: tokenUsage?.promptTokens || 0,
66
- outputTokens: tokenUsage?.completionTokens || 0,
67
- totalTokens: (tokenUsage?.promptTokens || 0) +
68
- (tokenUsage?.completionTokens || 0),
69
- },
70
- provider: this.providerName,
71
- model: this.modelName,
72
- analytics,
73
- };
74
- }
75
- catch (error) {
76
- const responseTime = Date.now() - startTime;
77
- logger.error("Mistral generation failed", {
78
- error: error instanceof Error ? error.message : String(error),
79
- responseTime,
80
- });
81
- throw new Error(`Mistral generation failed: ${error instanceof Error ? error.message : String(error)}`);
82
- }
83
- }
84
- /**
85
- * Stream text generation using Mistral API
86
- */
87
- async executeStream(options) {
39
+ // generate() method is inherited from BaseProvider; this provider uses the base implementation for generation with tools
40
+ async executeStream(options, analysisSchema) {
41
+ this.validateStreamOptions(options);
88
42
  const startTime = Date.now();
43
+ const timeout = this.getTimeout(options);
44
+ const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
89
45
  try {
46
+ // Get tools consistently with generate method
47
+ const shouldUseTools = !options.disableTools && this.supportsTools();
48
+ const tools = shouldUseTools ? await this.getAllTools() : {};
90
49
  const result = await streamText({
91
50
  model: this.model,
92
51
  prompt: options.input.text,
52
+ system: options.systemPrompt,
93
53
  temperature: options.temperature,
94
- maxTokens: options.maxTokens,
95
- tools: options.tools,
96
- toolChoice: "auto",
54
+ maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
55
+ tools,
56
+ maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
57
+ toolChoice: shouldUseTools ? "auto" : "none",
58
+ abortSignal: timeoutController?.controller.signal,
97
59
  });
98
- // Transform stream to match StreamResult interface
99
- const transformedStream = async function* () {
100
- for await (const chunk of result.textStream) {
101
- yield { content: chunk };
102
- }
103
- };
60
+ timeoutController?.cleanup();
61
+ // Transform string stream to content object stream using BaseProvider method
62
+ const transformedStream = this.createTextStream(result);
104
63
  // Create analytics promise that resolves after stream completion
105
64
  const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
106
65
  requestId: `mistral-stream-${Date.now()}`,
107
66
  streamingMode: true,
108
67
  });
109
68
  return {
110
- stream: transformedStream(),
69
+ stream: transformedStream,
111
70
  provider: this.providerName,
112
71
  model: this.modelName,
113
72
  analytics: analyticsPromise,
@@ -118,38 +77,41 @@ export class MistralProvider extends BaseProvider {
118
77
  };
119
78
  }
120
79
  catch (error) {
121
- logger.error("Mistral streaming failed", {
122
- error: error instanceof Error ? error.message : String(error),
123
- });
124
- throw new Error(`Mistral streaming failed: ${error instanceof Error ? error.message : String(error)}`);
80
+ timeoutController?.cleanup();
81
+ throw this.handleProviderError(error);
125
82
  }
126
83
  }
127
- /**
128
- * Get default model name for this provider
129
- */
130
- getDefaultModel() {
131
- return getDefaultMistralModel();
132
- }
133
- /**
134
- * Get provider name
135
- */
84
+ // ===================
85
+ // ABSTRACT METHOD IMPLEMENTATIONS
86
+ // ===================
136
87
  getProviderName() {
137
88
  return this.providerName;
138
89
  }
90
+ getDefaultModel() {
91
+ return getDefaultMistralModel();
92
+ }
139
93
  /**
140
- * Get AI SDK model instance
94
+ * Returns the Vercel AI SDK model instance for Mistral
141
95
  */
142
96
  getAISDKModel() {
143
97
  return this.model;
144
98
  }
145
- /**
146
- * Handle provider-specific errors
147
- */
148
99
  handleProviderError(error) {
149
- if (error instanceof Error) {
150
- return error;
100
+ if (error instanceof TimeoutError) {
101
+ return new Error(`Mistral request timed out: ${error.message}`);
102
+ }
103
+ const errorRecord = error;
104
+ const message = typeof errorRecord?.message === "string"
105
+ ? errorRecord.message
106
+ : "Unknown error";
107
+ if (message.includes("API_KEY_INVALID") ||
108
+ message.includes("Invalid API key")) {
109
+ return new Error("Invalid Mistral API key. Please check your MISTRAL_API_KEY environment variable.");
110
+ }
111
+ if (message.includes("rate limit")) {
112
+ return new Error("Mistral rate limit exceeded. Please try again later.");
151
113
  }
152
- return new Error(`Mistral provider error: ${String(error)}`);
114
+ return new Error(`Mistral error: ${message}`);
153
115
  }
154
116
  /**
155
117
  * Validate provider configuration
@@ -75,7 +75,6 @@ export declare class OllamaProvider extends BaseProvider {
75
75
  */
76
76
  private createOllamaStream;
77
77
  protected handleProviderError(error: unknown): Error;
78
- private validateStreamOptions;
79
78
  /**
80
79
  * Check if Ollama service is healthy and accessible
81
80
  */
@@ -560,18 +560,6 @@ export class OllamaProvider extends BaseProvider {
560
560
  }
561
561
  return new Error(`❌ Ollama Provider Error\n\n${error.message || "Unknown error occurred"}\n\n🔧 Troubleshooting:\n1. Check if Ollama service is running\n2. Verify model is installed: 'ollama list'\n3. Check network connectivity to ${this.baseUrl}\n4. Review Ollama logs for details`);
562
562
  }
563
- validateStreamOptions(options) {
564
- if (!options.input?.text?.trim()) {
565
- throw new Error("Prompt is required for streaming");
566
- }
567
- if (options.maxTokens && options.maxTokens < 1) {
568
- throw new Error("maxTokens must be greater than 0");
569
- }
570
- if (options.temperature &&
571
- (options.temperature < 0 || options.temperature > 2)) {
572
- throw new Error("temperature must be between 0 and 2");
573
- }
574
- }
575
563
  /**
576
564
  * Check if Ollama service is healthy and accessible
577
565
  */
@@ -2,14 +2,14 @@ import type { ZodType, ZodTypeDef } from "zod";
2
2
  import { type Schema, type LanguageModelV1 } from "ai";
3
3
  import { AIProviderName } from "../core/types.js";
4
4
  import type { StreamOptions, StreamResult } from "../types/streamTypes.js";
5
- import { BaseProvider } from "../core/baseProvider.js";
5
+ import { BaseProvider, type NeuroLinkSDK } from "../core/baseProvider.js";
6
6
  /**
7
7
  * OpenAI Provider v2 - BaseProvider Implementation
8
8
  * Migrated to use factory pattern with exact Google AI provider pattern
9
9
  */
10
10
  export declare class OpenAIProvider extends BaseProvider {
11
11
  private model;
12
- constructor(modelName?: string);
12
+ constructor(modelName?: string, sdk?: NeuroLinkSDK);
13
13
  protected getProviderName(): AIProviderName;
14
14
  protected getDefaultModel(): string;
15
15
  /**
@@ -23,6 +23,5 @@ export declare class OpenAIProvider extends BaseProvider {
23
23
  * and the migration guide in the project repository.
24
24
  */
25
25
  protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
26
- private validateStreamOptions;
27
26
  }
28
27
  export default OpenAIProvider;
@@ -4,7 +4,7 @@ import { AIProviderName } from "../core/types.js";
4
4
  import { BaseProvider } from "../core/baseProvider.js";
5
5
  import { logger } from "../utils/logger.js";
6
6
  import { createTimeoutController, TimeoutError, } from "../utils/timeout.js";
7
- import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
7
+ import { DEFAULT_MAX_TOKENS, DEFAULT_MAX_STEPS } from "../core/constants.js";
8
8
  import { validateApiKey, createOpenAIConfig, getProviderModel, } from "../utils/providerConfig.js";
9
9
  import { streamAnalyticsCollector } from "../core/streamAnalytics.js";
10
10
  // Configuration helpers - now using consolidated utility
@@ -20,8 +20,8 @@ const getOpenAIModel = () => {
20
20
  */
21
21
  export class OpenAIProvider extends BaseProvider {
22
22
  model;
23
- constructor(modelName) {
24
- super(modelName, AIProviderName.OPENAI);
23
+ constructor(modelName, sdk) {
24
+ super(modelName || getOpenAIModel(), AIProviderName.OPENAI, sdk);
25
25
  // Set OpenAI API key as environment variable (required by @ai-sdk/openai)
26
26
  process.env.OPENAI_API_KEY = getOpenAIApiKey();
27
27
  // Initialize model
@@ -74,30 +74,30 @@ export class OpenAIProvider extends BaseProvider {
74
74
  const timeout = this.getTimeout(options);
75
75
  const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
76
76
  try {
77
+ // Get tools consistently with generate method
78
+ const shouldUseTools = !options.disableTools && this.supportsTools();
79
+ const tools = shouldUseTools ? await this.getAllTools() : {};
77
80
  const result = await streamText({
78
81
  model: this.model,
79
82
  prompt: options.input.text,
80
83
  system: options.systemPrompt,
81
84
  temperature: options.temperature,
82
85
  maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
83
- tools: options.tools,
84
- toolChoice: "auto",
86
+ tools,
87
+ maxSteps: options.maxSteps || DEFAULT_MAX_STEPS,
88
+ toolChoice: shouldUseTools ? "auto" : "none",
85
89
  abortSignal: timeoutController?.controller.signal,
86
90
  });
87
91
  timeoutController?.cleanup();
88
- // Transform stream to match StreamResult interface
89
- const transformedStream = async function* () {
90
- for await (const chunk of result.textStream) {
91
- yield { content: chunk };
92
- }
93
- };
92
+ // Transform stream to match StreamResult interface using BaseProvider method
93
+ const transformedStream = this.createTextStream(result);
94
94
  // Create analytics promise that resolves after stream completion
95
95
  const analyticsPromise = streamAnalyticsCollector.createAnalytics(this.providerName, this.modelName, result, Date.now() - startTime, {
96
96
  requestId: `openai-stream-${Date.now()}`,
97
97
  streamingMode: true,
98
98
  });
99
99
  return {
100
- stream: transformedStream(),
100
+ stream: transformedStream,
101
101
  provider: this.providerName,
102
102
  model: this.modelName,
103
103
  analytics: analyticsPromise,
@@ -112,14 +112,6 @@ export class OpenAIProvider extends BaseProvider {
112
112
  throw this.handleProviderError(error);
113
113
  }
114
114
  }
115
- // ===================
116
- // PRIVATE VALIDATION METHODS
117
- // ===================
118
- validateStreamOptions(options) {
119
- if (!options.input?.text || options.input.text.trim().length === 0) {
120
- throw new Error("Input text is required and cannot be empty");
121
- }
122
- }
123
115
  }
124
116
  // Export for factory registration
125
117
  export default OpenAIProvider;
@@ -45,5 +45,4 @@ export declare class OpenAICompatibleProvider extends BaseProvider {
45
45
  * Fallback models when discovery fails
46
46
  */
47
47
  private getFallbackModels;
48
- private validateStreamOptions;
49
48
  }
@@ -249,12 +249,4 @@ export class OpenAICompatibleProvider extends BaseProvider {
249
249
  "gemini-pro",
250
250
  ];
251
251
  }
252
- // ===================
253
- // PRIVATE VALIDATION METHODS
254
- // ===================
255
- validateStreamOptions(options) {
256
- if (!options.input?.text || options.input.text.trim().length === 0) {
257
- throw new Error("Input text is required and cannot be empty");
258
- }
259
- }
260
252
  }
@@ -0,0 +1,32 @@
1
+ /**
2
+ * Tool Utilities - Centralized tool configuration access
3
+ *
4
+ * Consolidates environment variable access to avoid scattered process.env calls
5
+ */
6
+ import type { ToolConfig } from "../config/types.js";
7
+ /**
8
+ * Check if built-in tools should be disabled
9
+ * Centralized function to replace direct process.env access
10
+ *
11
+ * @param toolConfig - Optional tool configuration (if available from config)
12
+ * @returns true if built-in tools should be disabled
13
+ */
14
+ export declare function shouldDisableBuiltinTools(toolConfig?: ToolConfig): boolean;
15
+ /**
16
+ * Check if custom tools should be allowed
17
+ * @param toolConfig - Optional tool configuration
18
+ * @returns true if custom tools should be allowed
19
+ */
20
+ export declare function shouldAllowCustomTools(toolConfig?: ToolConfig): boolean;
21
+ /**
22
+ * Check if MCP tools should be enabled
23
+ * @param toolConfig - Optional tool configuration
24
+ * @returns true if MCP tools should be enabled
25
+ */
26
+ export declare function shouldEnableMCPTools(toolConfig?: ToolConfig): boolean;
27
+ /**
28
+ * Get maximum tools per provider
29
+ * @param toolConfig - Optional tool configuration
30
+ * @returns maximum number of tools per provider
31
+ */
32
+ export declare function getMaxToolsPerProvider(toolConfig?: ToolConfig): number;
@@ -0,0 +1,60 @@
1
+ /**
2
+ * Tool Utilities - Centralized tool configuration access
3
+ *
4
+ * Consolidates environment variable access to avoid scattered process.env calls
5
+ */
6
+ /**
7
+ * Check if built-in tools should be disabled
8
+ * Centralized function to replace direct process.env access
9
+ *
10
+ * @param toolConfig - Optional tool configuration (if available from config)
11
+ * @returns true if built-in tools should be disabled
12
+ */
13
+ export function shouldDisableBuiltinTools(toolConfig) {
14
+ // Priority: explicit config > environment variable > default (false)
15
+ if (toolConfig?.disableBuiltinTools !== undefined) {
16
+ return toolConfig.disableBuiltinTools;
17
+ }
18
+ // Single source of truth for environment variable access
19
+ return process.env.NEUROLINK_DISABLE_BUILTIN_TOOLS === "true";
20
+ }
21
+ /**
22
+ * Check if custom tools should be allowed
23
+ * @param toolConfig - Optional tool configuration
24
+ * @returns true if custom tools should be allowed
25
+ */
26
+ export function shouldAllowCustomTools(toolConfig) {
27
+ if (toolConfig?.allowCustomTools !== undefined) {
28
+ return toolConfig.allowCustomTools;
29
+ }
30
+ return process.env.NEUROLINK_DISABLE_CUSTOM_TOOLS !== "true";
31
+ }
32
+ /**
33
+ * Check if MCP tools should be enabled
34
+ * @param toolConfig - Optional tool configuration
35
+ * @returns true if MCP tools should be enabled
36
+ */
37
+ export function shouldEnableMCPTools(toolConfig) {
38
+ if (toolConfig?.enableMCPTools !== undefined) {
39
+ return toolConfig.enableMCPTools;
40
+ }
41
+ return process.env.NEUROLINK_DISABLE_MCP_TOOLS !== "true";
42
+ }
43
+ /**
44
+ * Get maximum tools per provider
45
+ * @param toolConfig - Optional tool configuration
46
+ * @returns maximum number of tools per provider
47
+ */
48
+ export function getMaxToolsPerProvider(toolConfig) {
49
+ if (toolConfig?.maxToolsPerProvider !== undefined) {
50
+ return toolConfig.maxToolsPerProvider;
51
+ }
52
+ const envMax = process.env.NEUROLINK_MAX_TOOLS_PER_PROVIDER;
53
+ if (envMax) {
54
+ const parsed = parseInt(envMax, 10);
55
+ if (!isNaN(parsed) && parsed > 0) {
56
+ return parsed;
57
+ }
58
+ }
59
+ return 100; // Default
60
+ }
@@ -5,6 +5,7 @@
5
5
  import { createMCPServer } from "../../factory.js";
6
6
  import { directAgentTools } from "../../../agent/directTools.js";
7
7
  import { logger } from "../../../utils/logger.js";
8
+ import { shouldDisableBuiltinTools } from "../../../utils/toolUtils.js";
8
9
  /**
9
10
  * Direct Tools Server - Agent direct tools for immediate use
10
11
  */
@@ -17,58 +18,77 @@ export const directToolsServer = createMCPServer({
17
18
  });
18
19
  /**
19
20
  * Wrap each direct tool and register it with the server
21
+ * Only register if built-in tools are not disabled
20
22
  */
21
- Object.entries(directAgentTools).forEach(([toolName, toolDef]) => {
22
- // The toolDef is a Vercel AI SDK Tool object
23
- // Extract properties from the Tool object
24
- const toolSpec = toolDef._spec || toolDef;
25
- const description = typeof toolSpec === "object" &&
26
- toolSpec &&
27
- "description" in toolSpec &&
28
- typeof toolSpec.description === "string"
29
- ? toolSpec.description
30
- : `Direct tool: ${toolName}`;
31
- const inputSchema = typeof toolSpec === "object" && toolSpec && "parameters" in toolSpec
32
- ? toolSpec.parameters
33
- : undefined;
34
- const execute = typeof toolSpec === "object" && toolSpec && "execute" in toolSpec
35
- ? toolSpec.execute
36
- : undefined;
37
- directToolsServer.registerTool({
38
- name: toolName,
39
- description: description,
40
- category: getToolCategory(toolName),
41
- inputSchema: inputSchema,
42
- isImplemented: true,
43
- execute: async (params, context) => {
44
- const startTime = Date.now();
45
- try {
46
- logger.debug(`[Direct Tools] Executing ${toolName} with params:`, params);
47
- // Execute the direct tool
48
- if (!execute || typeof execute !== "function") {
49
- throw new Error(`Tool ${toolName} has no execute function`);
23
+ if (!shouldDisableBuiltinTools()) {
24
+ Object.entries(directAgentTools).forEach(([toolName, toolDef]) => {
25
+ // The toolDef is a Vercel AI SDK Tool object
26
+ // Extract properties from the Tool object
27
+ const toolSpec = toolDef._spec || toolDef;
28
+ const description = typeof toolSpec === "object" &&
29
+ toolSpec &&
30
+ "description" in toolSpec &&
31
+ typeof toolSpec.description === "string"
32
+ ? toolSpec.description
33
+ : `Direct tool: ${toolName}`;
34
+ const inputSchema = typeof toolSpec === "object" && toolSpec && "parameters" in toolSpec
35
+ ? toolSpec.parameters
36
+ : undefined;
37
+ const execute = typeof toolSpec === "object" && toolSpec && "execute" in toolSpec
38
+ ? toolSpec.execute
39
+ : undefined;
40
+ directToolsServer.registerTool({
41
+ name: toolName,
42
+ description: description,
43
+ category: getToolCategory(toolName),
44
+ inputSchema: inputSchema,
45
+ isImplemented: true,
46
+ execute: async (params, context) => {
47
+ const startTime = Date.now();
48
+ try {
49
+ logger.debug(`[Direct Tools] Executing ${toolName} with params:`, params);
50
+ // Execute the direct tool
51
+ if (!execute || typeof execute !== "function") {
52
+ throw new Error(`Tool ${toolName} has no execute function`);
53
+ }
54
+ const result = await execute(params);
55
+ // Convert direct tool result to ToolResult format
56
+ if (result?.success) {
57
+ return {
58
+ success: true,
59
+ data: result.data || result,
60
+ usage: {
61
+ executionTime: Date.now() - startTime,
62
+ },
63
+ metadata: {
64
+ toolName,
65
+ serverId: "neurolink-direct",
66
+ sessionId: context.sessionId,
67
+ },
68
+ };
69
+ }
70
+ else {
71
+ return {
72
+ success: false,
73
+ data: null,
74
+ error: String(result?.error) || "Unknown error",
75
+ usage: {
76
+ executionTime: Date.now() - startTime,
77
+ },
78
+ metadata: {
79
+ toolName,
80
+ serverId: "neurolink-direct",
81
+ sessionId: context.sessionId,
82
+ },
83
+ };
84
+ }
50
85
  }
51
- const result = await execute(params);
52
- // Convert direct tool result to ToolResult format
53
- if (result?.success) {
54
- return {
55
- success: true,
56
- data: result.data || result,
57
- usage: {
58
- executionTime: Date.now() - startTime,
59
- },
60
- metadata: {
61
- toolName,
62
- serverId: "neurolink-direct",
63
- sessionId: context.sessionId,
64
- },
65
- };
66
- }
67
- else {
86
+ catch (error) {
87
+ logger.error(`[Direct Tools] Error executing ${toolName}:`, error);
68
88
  return {
69
89
  success: false,
70
90
  data: null,
71
- error: String(result?.error) || "Unknown error",
91
+ error: error instanceof Error ? error.message : String(error),
72
92
  usage: {
73
93
  executionTime: Date.now() - startTime,
74
94
  },
@@ -79,26 +99,13 @@ Object.entries(directAgentTools).forEach(([toolName, toolDef]) => {
79
99
  },
80
100
  };
81
101
  }
82
- }
83
- catch (error) {
84
- logger.error(`[Direct Tools] Error executing ${toolName}:`, error);
85
- return {
86
- success: false,
87
- data: null,
88
- error: error instanceof Error ? error.message : String(error),
89
- usage: {
90
- executionTime: Date.now() - startTime,
91
- },
92
- metadata: {
93
- toolName,
94
- serverId: "neurolink-direct",
95
- sessionId: context.sessionId,
96
- },
97
- };
98
- }
99
- },
102
+ },
103
+ });
100
104
  });
101
- });
105
+ }
106
+ else {
107
+ logger.info("[Direct Tools] Built-in tools disabled via configuration");
108
+ }
102
109
  /**
103
110
  * Get tool category based on tool name
104
111
  */
@@ -117,5 +124,10 @@ function getToolCategory(toolName) {
117
124
  return "utility";
118
125
  }
119
126
  }
120
- // Log successful registration
121
- logger.info(`[Direct Tools] Registered ${Object.keys(directAgentTools).length} direct tools`);
127
+ // Log successful registration or disable status
128
+ if (!shouldDisableBuiltinTools()) {
129
+ logger.info(`[Direct Tools] Registered ${Object.keys(directAgentTools).length} direct tools`);
130
+ }
131
+ else {
132
+ logger.info("[Direct Tools] 0 direct tools registered (disabled via environment variable)");
133
+ }