@juspay/neurolink 5.1.0 → 5.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (166) hide show
  1. package/CHANGELOG.md +15 -9
  2. package/README.md +123 -126
  3. package/dist/agent/direct-tools.d.ts +6 -6
  4. package/dist/cli/commands/config.d.ts +3 -3
  5. package/dist/cli/commands/mcp.js +8 -7
  6. package/dist/cli/factories/command-factory.d.ts +4 -0
  7. package/dist/cli/factories/command-factory.js +57 -3
  8. package/dist/cli/index.js +87 -140
  9. package/dist/core/base-provider.d.ts +423 -0
  10. package/dist/core/base-provider.js +365 -0
  11. package/dist/core/constants.d.ts +1 -1
  12. package/dist/core/constants.js +1 -1
  13. package/dist/core/dynamic-models.d.ts +6 -6
  14. package/dist/core/evaluation.d.ts +19 -80
  15. package/dist/core/evaluation.js +185 -484
  16. package/dist/core/factory.d.ts +3 -3
  17. package/dist/core/factory.js +31 -91
  18. package/dist/core/service-registry.d.ts +47 -0
  19. package/dist/core/service-registry.js +112 -0
  20. package/dist/core/types.d.ts +8 -1
  21. package/dist/factories/compatibility-factory.js +1 -1
  22. package/dist/factories/provider-factory.d.ts +72 -0
  23. package/dist/factories/provider-factory.js +144 -0
  24. package/dist/factories/provider-registry.d.ts +38 -0
  25. package/dist/factories/provider-registry.js +107 -0
  26. package/dist/index.d.ts +4 -3
  27. package/dist/index.js +2 -4
  28. package/dist/lib/agent/direct-tools.d.ts +6 -6
  29. package/dist/lib/core/base-provider.d.ts +423 -0
  30. package/dist/lib/core/base-provider.js +365 -0
  31. package/dist/lib/core/constants.d.ts +1 -1
  32. package/dist/lib/core/constants.js +1 -1
  33. package/dist/lib/core/dynamic-models.d.ts +6 -6
  34. package/dist/lib/core/evaluation.d.ts +19 -80
  35. package/dist/lib/core/evaluation.js +185 -484
  36. package/dist/lib/core/factory.d.ts +3 -3
  37. package/dist/lib/core/factory.js +30 -91
  38. package/dist/lib/core/service-registry.d.ts +47 -0
  39. package/dist/lib/core/service-registry.js +112 -0
  40. package/dist/lib/core/types.d.ts +8 -1
  41. package/dist/lib/factories/compatibility-factory.js +1 -1
  42. package/dist/lib/factories/provider-factory.d.ts +72 -0
  43. package/dist/lib/factories/provider-factory.js +144 -0
  44. package/dist/lib/factories/provider-registry.d.ts +38 -0
  45. package/dist/lib/factories/provider-registry.js +107 -0
  46. package/dist/lib/index.d.ts +4 -3
  47. package/dist/lib/index.js +2 -4
  48. package/dist/lib/mcp/config.js +28 -3
  49. package/dist/lib/mcp/function-calling.js +1 -1
  50. package/dist/lib/mcp/initialize-tools.d.ts +1 -1
  51. package/dist/lib/mcp/initialize-tools.js +45 -1
  52. package/dist/lib/mcp/initialize.js +16 -6
  53. package/dist/lib/mcp/servers/agent/direct-tools-server.d.ts +8 -0
  54. package/dist/lib/mcp/servers/agent/direct-tools-server.js +109 -0
  55. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +3 -1
  56. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  57. package/dist/lib/mcp/unified-registry.d.ts +4 -0
  58. package/dist/lib/mcp/unified-registry.js +42 -9
  59. package/dist/lib/neurolink.d.ts +156 -117
  60. package/dist/lib/neurolink.js +619 -404
  61. package/dist/lib/providers/amazon-bedrock.d.ts +32 -0
  62. package/dist/lib/providers/amazon-bedrock.js +143 -0
  63. package/dist/lib/providers/analytics-helper.js +7 -4
  64. package/dist/lib/providers/anthropic-baseprovider.d.ts +23 -0
  65. package/dist/lib/providers/anthropic-baseprovider.js +114 -0
  66. package/dist/lib/providers/anthropic.d.ts +19 -43
  67. package/dist/lib/providers/anthropic.js +82 -306
  68. package/dist/lib/providers/azure-openai.d.ts +20 -0
  69. package/dist/lib/providers/azure-openai.js +89 -0
  70. package/dist/lib/providers/google-ai-studio.d.ts +23 -0
  71. package/dist/lib/providers/google-ai-studio.js +107 -0
  72. package/dist/lib/providers/google-vertex.d.ts +47 -0
  73. package/dist/lib/providers/google-vertex.js +205 -0
  74. package/dist/lib/providers/huggingFace.d.ts +32 -25
  75. package/dist/lib/providers/huggingFace.js +97 -431
  76. package/dist/lib/providers/index.d.ts +9 -9
  77. package/dist/lib/providers/index.js +9 -9
  78. package/dist/lib/providers/mcp-provider.js +4 -0
  79. package/dist/lib/providers/mistral.d.ts +42 -0
  80. package/dist/lib/providers/mistral.js +160 -0
  81. package/dist/lib/providers/ollama.d.ts +52 -36
  82. package/dist/lib/providers/ollama.js +297 -520
  83. package/dist/lib/providers/openAI.d.ts +19 -18
  84. package/dist/lib/providers/openAI.js +76 -275
  85. package/dist/lib/sdk/tool-extension.d.ts +181 -0
  86. package/dist/lib/sdk/tool-extension.js +283 -0
  87. package/dist/lib/sdk/tool-registration.d.ts +95 -0
  88. package/dist/lib/sdk/tool-registration.js +167 -0
  89. package/dist/lib/types/generate-types.d.ts +1 -0
  90. package/dist/lib/types/mcp-types.d.ts +116 -0
  91. package/dist/lib/types/mcp-types.js +5 -0
  92. package/dist/lib/types/stream-types.d.ts +30 -18
  93. package/dist/lib/types/universal-provider-options.d.ts +87 -0
  94. package/dist/lib/types/universal-provider-options.js +53 -0
  95. package/dist/mcp/config.js +28 -3
  96. package/dist/mcp/function-calling.js +1 -1
  97. package/dist/mcp/initialize-tools.d.ts +1 -1
  98. package/dist/mcp/initialize-tools.js +45 -1
  99. package/dist/mcp/initialize.js +16 -6
  100. package/dist/mcp/servers/agent/direct-tools-server.d.ts +8 -0
  101. package/dist/mcp/servers/agent/direct-tools-server.js +109 -0
  102. package/dist/mcp/servers/ai-providers/ai-core-server.js +3 -1
  103. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  104. package/dist/mcp/unified-registry.d.ts +4 -0
  105. package/dist/mcp/unified-registry.js +42 -9
  106. package/dist/neurolink.d.ts +156 -117
  107. package/dist/neurolink.js +619 -404
  108. package/dist/providers/amazon-bedrock.d.ts +32 -0
  109. package/dist/providers/amazon-bedrock.js +143 -0
  110. package/dist/providers/analytics-helper.js +7 -4
  111. package/dist/providers/anthropic-baseprovider.d.ts +23 -0
  112. package/dist/providers/anthropic-baseprovider.js +114 -0
  113. package/dist/providers/anthropic.d.ts +19 -43
  114. package/dist/providers/anthropic.js +81 -305
  115. package/dist/providers/azure-openai.d.ts +20 -0
  116. package/dist/providers/azure-openai.js +89 -0
  117. package/dist/providers/google-ai-studio.d.ts +23 -0
  118. package/dist/providers/google-ai-studio.js +108 -0
  119. package/dist/providers/google-vertex.d.ts +47 -0
  120. package/dist/providers/google-vertex.js +205 -0
  121. package/dist/providers/huggingFace.d.ts +32 -25
  122. package/dist/providers/huggingFace.js +96 -430
  123. package/dist/providers/index.d.ts +9 -9
  124. package/dist/providers/index.js +9 -9
  125. package/dist/providers/mcp-provider.js +4 -0
  126. package/dist/providers/mistral.d.ts +42 -0
  127. package/dist/providers/mistral.js +160 -0
  128. package/dist/providers/ollama.d.ts +52 -36
  129. package/dist/providers/ollama.js +297 -519
  130. package/dist/providers/openAI.d.ts +19 -18
  131. package/dist/providers/openAI.js +76 -276
  132. package/dist/sdk/tool-extension.d.ts +181 -0
  133. package/dist/sdk/tool-extension.js +283 -0
  134. package/dist/sdk/tool-registration.d.ts +95 -0
  135. package/dist/sdk/tool-registration.js +168 -0
  136. package/dist/types/generate-types.d.ts +1 -0
  137. package/dist/types/mcp-types.d.ts +116 -0
  138. package/dist/types/mcp-types.js +5 -0
  139. package/dist/types/stream-types.d.ts +30 -18
  140. package/dist/types/universal-provider-options.d.ts +87 -0
  141. package/dist/types/universal-provider-options.js +53 -0
  142. package/package.json +15 -10
  143. package/dist/lib/providers/agent-enhanced-provider.d.ts +0 -93
  144. package/dist/lib/providers/agent-enhanced-provider.js +0 -605
  145. package/dist/lib/providers/amazonBedrock.d.ts +0 -28
  146. package/dist/lib/providers/amazonBedrock.js +0 -364
  147. package/dist/lib/providers/azureOpenAI.d.ts +0 -42
  148. package/dist/lib/providers/azureOpenAI.js +0 -347
  149. package/dist/lib/providers/googleAIStudio.d.ts +0 -42
  150. package/dist/lib/providers/googleAIStudio.js +0 -364
  151. package/dist/lib/providers/googleVertexAI.d.ts +0 -34
  152. package/dist/lib/providers/googleVertexAI.js +0 -547
  153. package/dist/lib/providers/mistralAI.d.ts +0 -37
  154. package/dist/lib/providers/mistralAI.js +0 -325
  155. package/dist/providers/agent-enhanced-provider.d.ts +0 -93
  156. package/dist/providers/agent-enhanced-provider.js +0 -606
  157. package/dist/providers/amazonBedrock.d.ts +0 -28
  158. package/dist/providers/amazonBedrock.js +0 -364
  159. package/dist/providers/azureOpenAI.d.ts +0 -42
  160. package/dist/providers/azureOpenAI.js +0 -348
  161. package/dist/providers/googleAIStudio.d.ts +0 -42
  162. package/dist/providers/googleAIStudio.js +0 -364
  163. package/dist/providers/googleVertexAI.d.ts +0 -34
  164. package/dist/providers/googleVertexAI.js +0 -547
  165. package/dist/providers/mistralAI.d.ts +0 -37
  166. package/dist/providers/mistralAI.js +0 -325
@@ -1,27 +1,28 @@
1
1
  import type { ZodType, ZodTypeDef } from "zod";
2
2
  import { type Schema, type LanguageModelV1 } from "ai";
3
- import type { GenerateResult } from "../types/generate-types.js";
3
+ import { AIProviderName } from "../core/types.js";
4
4
  import type { StreamOptions, StreamResult } from "../types/stream-types.js";
5
- import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
6
- export declare class OpenAI implements AIProvider {
7
- private modelName;
5
+ import { BaseProvider } from "../core/base-provider.js";
6
+ /**
7
+ * OpenAI Provider v2 - BaseProvider Implementation
8
+ * Migrated to use factory pattern with exact Google AI provider pattern
9
+ */
10
+ export declare class OpenAIProvider extends BaseProvider {
8
11
  private model;
9
- constructor(modelName?: string | null);
12
+ constructor(modelName?: string);
13
+ protected getProviderName(): AIProviderName;
14
+ protected getDefaultModel(): string;
10
15
  /**
11
- * Get the underlying model for function calling
16
+ * Returns the Vercel AI SDK model instance for OpenAI
12
17
  */
13
- getModel(): LanguageModelV1;
14
- generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
18
+ protected getAISDKModel(): LanguageModelV1;
19
+ protected handleProviderError(error: any): Error;
15
20
  /**
16
- * PRIMARY METHOD: Stream content using AI (recommended for new code)
17
- * Future-ready for multi-modal capabilities with current text focus
21
+ * executeGenerate method removed - generation is now handled by BaseProvider.
22
+ * For details on the changes and migration steps, refer to the BaseProvider documentation
23
+ * and the migration guide in the project repository.
18
24
  */
19
- stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
20
- /**
21
- * Short alias for generate() - CLI-SDK consistency
22
- * @param optionsOrPrompt - TextGenerationOptions object or prompt string
23
- * @param analysisSchema - Optional schema for output validation
24
- * @returns Promise resolving to GenerateResult or null
25
- */
26
- gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateResult | null>;
25
+ protected executeStream(options: StreamOptions, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
26
+ private validateStreamOptions;
27
27
  }
28
+ export default OpenAIProvider;
@@ -1,312 +1,112 @@
1
1
  import { openai } from "@ai-sdk/openai";
2
- import { streamText, generateText, Output, } from "ai";
2
+ import { streamText, Output } from "ai";
3
+ import { AIProviderName } from "../core/types.js";
4
+ import { BaseProvider } from "../core/base-provider.js";
3
5
  import { logger } from "../utils/logger.js";
4
- import { createTimeoutController, getDefaultTimeout, TimeoutError, } from "../utils/timeout.js";
6
+ import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
5
7
  import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
6
- import { evaluateResponse } from "../core/evaluation.js";
7
- import { createAnalytics } from "../core/analytics.js";
8
- // Default system context
9
- const DEFAULT_SYSTEM_CONTEXT = {
10
- systemPrompt: "You are a helpful AI assistant.",
11
- };
12
8
  // Configuration helpers
13
9
  const getOpenAIApiKey = () => {
14
10
  const apiKey = process.env.OPENAI_API_KEY;
15
11
  if (!apiKey) {
16
- // 🔧 FIX: Enhanced error message with setup instructions
17
- throw new Error(`❌ OPENAI Provider Configuration Error
18
-
19
- Missing required environment variables: OPENAI_API_KEY
20
-
21
- 🔧 Step 1: Get Credentials
22
- Get your API key from https://platform.openai.com/api-keys
23
-
24
- 💡 Step 2: Add to your .env file (or export in CLI):
25
- OPENAI_API_KEY="sk-proj-your-openai-api-key"
26
- # Optional:
27
- OPENAI_MODEL="gpt-4o"
28
- OPENAI_BASE_URL="https://api.openai.com"
29
-
30
- 🚀 Step 3: Test the setup:
31
- npx neurolink generate "Hello" --provider openai
32
-
33
- 📖 Full setup guide: https://docs.neurolink.ai/providers/openai`);
12
+ throw new Error(`❌ OPENAI Provider Configuration Error\n\nMissing required environment variables: OPENAI_API_KEY\n\n🔧 Step 1: Get Credentials\n1. Visit: https://platform.openai.com/api-keys\n2. Create new API key\n3. Copy the key\n\n🔧 Step 2: Set Environment Variable\nAdd to your .env file:\nOPENAI_API_KEY=your_api_key_here\n\n🔧 Step 3: Restart Application\nRestart your application to load the new environment variables.`);
34
13
  }
35
14
  return apiKey;
36
15
  };
37
16
  const getOpenAIModel = () => {
38
17
  return process.env.OPENAI_MODEL || "gpt-4o";
39
18
  };
40
- // OpenAI class with enhanced error handling
41
- export class OpenAI {
42
- modelName;
19
+ /**
20
+ * OpenAI Provider v2 - BaseProvider Implementation
21
+ * Migrated to use factory pattern with exact Google AI provider pattern
22
+ */
23
+ export class OpenAIProvider extends BaseProvider {
43
24
  model;
44
25
  constructor(modelName) {
45
- const functionTag = "OpenAI.constructor";
46
- this.modelName = modelName || getOpenAIModel();
47
- try {
48
- logger.debug(`[${functionTag}] Function called`, {
49
- modelName: this.modelName,
50
- });
51
- // Set OpenAI API key as environment variable
52
- process.env.OPENAI_API_KEY = getOpenAIApiKey();
53
- this.model = openai(this.modelName);
54
- logger.debug(`[${functionTag}] Function result`, {
55
- modelName: this.modelName,
56
- success: true,
57
- });
58
- }
59
- catch (err) {
60
- logger.debug(`[${functionTag}] Exception`, {
61
- message: "Error in initializing OpenAI",
62
- modelName: this.modelName,
63
- err: String(err),
64
- });
65
- throw err;
66
- }
26
+ super(modelName, AIProviderName.OPENAI);
27
+ // Set OpenAI API key as environment variable (required by @ai-sdk/openai)
28
+ process.env.OPENAI_API_KEY = getOpenAIApiKey();
29
+ // Initialize model
30
+ this.model = openai(this.modelName);
31
+ logger.debug("OpenAIProviderV2 initialized", {
32
+ model: this.modelName,
33
+ provider: this.providerName,
34
+ });
35
+ }
36
+ // ===================
37
+ // ABSTRACT METHOD IMPLEMENTATIONS
38
+ // ===================
39
+ getProviderName() {
40
+ return AIProviderName.OPENAI;
41
+ }
42
+ getDefaultModel() {
43
+ return getOpenAIModel();
67
44
  }
68
45
  /**
69
- * Get the underlying model for function calling
46
+ * Returns the Vercel AI SDK model instance for OpenAI
70
47
  */
71
- getModel() {
48
+ getAISDKModel() {
72
49
  return this.model;
73
50
  }
74
- async generate(optionsOrPrompt, analysisSchema) {
75
- const functionTag = "OpenAI.generate";
76
- const provider = "openai";
77
- const startTime = Date.now();
78
- try {
79
- // Parse parameters - support both string and options object
80
- const options = typeof optionsOrPrompt === "string"
81
- ? { prompt: optionsOrPrompt }
82
- : optionsOrPrompt;
83
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "generate"), } = options;
84
- // Use schema from options or fallback parameter
85
- const finalSchema = schema || analysisSchema;
86
- logger.debug(`[${functionTag}] Generate text started`, {
87
- provider,
88
- modelName: this.modelName,
89
- promptLength: prompt?.length || 0,
90
- temperature,
91
- maxTokens,
92
- timeout,
93
- });
94
- // Create timeout controller if timeout is specified
95
- const timeoutController = createTimeoutController(timeout, provider, "generate");
96
- const generateOptions = {
97
- model: this.model,
98
- prompt: prompt,
99
- system: systemPrompt,
100
- temperature,
101
- maxTokens,
102
- // Add abort signal if available
103
- ...(timeoutController && {
104
- abortSignal: timeoutController.controller.signal,
105
- }),
106
- };
107
- if (finalSchema) {
108
- generateOptions.experimental_output = Output.object({
109
- schema: finalSchema,
110
- });
111
- }
112
- try {
113
- const result = await generateText(generateOptions);
114
- // Clean up timeout if successful
115
- timeoutController?.cleanup();
116
- logger.debug(`[${functionTag}] Generate text completed`, {
117
- provider,
118
- modelName: this.modelName,
119
- usage: result.usage,
120
- finishReason: result.finishReason,
121
- responseLength: result.text?.length || 0,
122
- timeout,
123
- });
124
- // Add analytics if enabled
125
- if (options.enableAnalytics) {
126
- const { createAnalytics } = await import("./analytics-helper.js");
127
- result.analytics = createAnalytics(provider, this.modelName, result, Date.now() - startTime, options.context);
128
- }
129
- // Add evaluation if enabled
130
- if (options.enableEvaluation) {
131
- result.evaluation = await evaluateResponse(prompt, result.text, options.context, options.evaluationDomain, options.toolUsageContext, options.conversationHistory);
132
- }
133
- return {
134
- content: result.text,
135
- provider: "openai",
136
- model: this.modelName,
137
- usage: result.usage
138
- ? {
139
- inputTokens: result.usage.promptTokens,
140
- outputTokens: result.usage.completionTokens,
141
- totalTokens: result.usage.totalTokens,
142
- }
143
- : undefined,
144
- responseTime: Date.now() - startTime,
145
- };
146
- }
147
- finally {
148
- // Always cleanup timeout
149
- timeoutController?.cleanup();
150
- }
51
+ handleProviderError(error) {
52
+ if (error instanceof TimeoutError) {
53
+ return new Error(`OpenAI request timed out: ${error.message}`);
151
54
  }
152
- catch (err) {
153
- // Log timeout errors specifically
154
- if (err instanceof TimeoutError) {
155
- logger.debug(`[${functionTag}] Timeout error`, {
156
- provider,
157
- modelName: this.modelName,
158
- timeout: err.timeout,
159
- message: err.message,
160
- });
161
- }
162
- else {
163
- logger.debug(`[${functionTag}] Exception`, {
164
- provider,
165
- modelName: this.modelName,
166
- message: "Error in generating text",
167
- err: String(err),
168
- });
169
- }
170
- throw err; // Re-throw error to trigger fallback
55
+ if (error?.message?.includes("API_KEY_INVALID") ||
56
+ error?.message?.includes("Invalid API key")) {
57
+ return new Error("Invalid OpenAI API key. Please check your OPENAI_API_KEY environment variable.");
171
58
  }
59
+ if (error?.message?.includes("rate limit")) {
60
+ return new Error("OpenAI rate limit exceeded. Please try again later.");
61
+ }
62
+ return new Error(`OpenAI error: ${error?.message || "Unknown error"}`);
172
63
  }
173
64
  /**
174
- * PRIMARY METHOD: Stream content using AI (recommended for new code)
175
- * Future-ready for multi-modal capabilities with current text focus
65
+ * executeGenerate method removed - generation is now handled by BaseProvider.
66
+ * For details on the changes and migration steps, refer to the BaseProvider documentation
67
+ * and the migration guide in the project repository.
176
68
  */
177
- async stream(optionsOrPrompt, analysisSchema) {
178
- const functionTag = "OpenAI.stream";
179
- const provider = "openai";
180
- let chunkCount = 0;
181
- const startTime = Date.now();
69
+ async executeStream(options, analysisSchema) {
70
+ this.validateStreamOptions(options);
71
+ const timeout = this.getTimeout(options);
72
+ const timeoutController = createTimeoutController(timeout, this.providerName, "stream");
182
73
  try {
183
- // Parse parameters - support both string and options object
184
- const options = typeof optionsOrPrompt === "string"
185
- ? { input: { text: optionsOrPrompt } }
186
- : optionsOrPrompt;
187
- // Validate input
188
- if (!options?.input?.text ||
189
- typeof options.input.text !== "string" ||
190
- options.input.text.trim() === "") {
191
- throw new Error("Stream options must include input.text as a non-empty string");
192
- }
193
- // Convert to internal parameters
194
- const { prompt = options.input.text, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, "stream"), } = options;
195
- // Use schema from options or fallback parameter
196
- const finalSchema = schema || analysisSchema;
197
- logger.debug(`[${functionTag}] Stream request started`, {
198
- provider,
199
- modelName: this.modelName,
200
- promptLength: prompt?.length || 0,
201
- temperature,
202
- maxTokens,
203
- timeout,
204
- });
205
- // Create timeout controller if timeout is specified
206
- const timeoutController = createTimeoutController(timeout, provider, "stream");
207
- const streamOptions = {
74
+ const result = await streamText({
208
75
  model: this.model,
209
- prompt: prompt,
210
- system: systemPrompt,
211
- temperature,
212
- maxTokens,
213
- // Add abort signal if available
214
- ...(timeoutController && {
215
- abortSignal: timeoutController.controller.signal,
216
- }),
217
- onError: (event) => {
218
- const error = event.error;
219
- const errorMessage = error instanceof Error ? error.message : String(error);
220
- const errorStack = error instanceof Error ? error.stack : undefined;
221
- logger.debug(`[${functionTag}] Stream error`, {
222
- provider,
223
- modelName: this.modelName,
224
- error: errorMessage,
225
- stack: errorStack,
226
- promptLength: prompt.length,
227
- chunkCount,
228
- });
229
- },
230
- onFinish: (event) => {
231
- logger.debug(`[${functionTag}] Stream finished`, {
232
- provider,
233
- modelName: this.modelName,
234
- finishReason: event.finishReason,
235
- usage: event.usage,
236
- totalChunks: chunkCount,
237
- promptLength: prompt.length,
238
- responseLength: event.text?.length || 0,
239
- });
240
- },
241
- onChunk: (event) => {
242
- chunkCount++;
243
- logger.debug(`[${functionTag}] Stream chunk`, {
244
- provider,
245
- modelName: this.modelName,
246
- chunkNumber: chunkCount,
247
- chunkLength: event.chunk.text?.length || 0,
248
- chunkType: event.chunk.type,
249
- });
250
- },
251
- };
252
- if (finalSchema) {
253
- streamOptions.experimental_output = Output.object({
254
- schema: finalSchema,
255
- });
256
- }
257
- const result = streamText(streamOptions);
258
- logger.debug(`[${functionTag}] Stream request completed`, {
259
- provider,
260
- modelName: this.modelName,
76
+ prompt: options.input.text,
77
+ system: options.systemPrompt,
78
+ temperature: options.temperature,
79
+ maxTokens: options.maxTokens || DEFAULT_MAX_TOKENS,
80
+ tools: options.tools,
81
+ toolChoice: "auto",
82
+ abortSignal: timeoutController?.controller.signal,
261
83
  });
262
- // Convert to StreamResult format
84
+ timeoutController?.cleanup();
85
+ // Transform stream to match StreamResult interface
86
+ const transformedStream = async function* () {
87
+ for await (const chunk of result.textStream) {
88
+ yield { content: chunk };
89
+ }
90
+ };
263
91
  return {
264
- stream: result.textStream
265
- ? (async function* () {
266
- for await (const chunk of result.textStream) {
267
- yield { content: chunk };
268
- }
269
- })()
270
- : (async function* () {
271
- yield { content: "" };
272
- throw new Error("No textStream available from AI SDK");
273
- })(),
274
- provider: "openai",
92
+ stream: transformedStream(),
93
+ provider: this.providerName,
275
94
  model: this.modelName,
276
- metadata: {
277
- streamId: `openai-${Date.now()}`,
278
- startTime,
279
- },
280
95
  };
281
96
  }
282
- catch (err) {
283
- // Log timeout errors specifically
284
- if (err instanceof TimeoutError) {
285
- logger.debug(`[${functionTag}] Timeout error`, {
286
- provider,
287
- modelName: this.modelName,
288
- timeout: err.timeout,
289
- message: err.message,
290
- });
291
- }
292
- else {
293
- logger.debug(`[${functionTag}] Exception`, {
294
- provider,
295
- modelName: this.modelName,
296
- message: "Error in streaming content",
297
- err: String(err),
298
- });
299
- }
300
- throw err; // Re-throw error to trigger fallback
97
+ catch (error) {
98
+ timeoutController?.cleanup();
99
+ throw this.handleProviderError(error);
301
100
  }
302
101
  }
303
- /**
304
- * Short alias for generate() - CLI-SDK consistency
305
- * @param optionsOrPrompt - TextGenerationOptions object or prompt string
306
- * @param analysisSchema - Optional schema for output validation
307
- * @returns Promise resolving to GenerateResult or null
308
- */
309
- async gen(optionsOrPrompt, analysisSchema) {
310
- return this.generate(optionsOrPrompt, analysisSchema);
102
+ // ===================
103
+ // PRIVATE VALIDATION METHODS
104
+ // ===================
105
+ validateStreamOptions(options) {
106
+ if (!options.input?.text || options.input.text.trim().length === 0) {
107
+ throw new Error("Input text is required and cannot be empty");
108
+ }
311
109
  }
312
110
  }
111
+ // Export for factory registration
112
+ export default OpenAIProvider;
@@ -0,0 +1,181 @@
1
+ /**
2
+ * NeuroLink SDK Tool Extension System
3
+ * Allows developers to register custom tools that integrate with AI providers
4
+ */
5
+ import { z } from "zod";
6
+ import type { Tool } from "ai";
7
+ import { logger } from "../utils/logger.js";
8
+ /**
9
+ * Custom tool interface for SDK users
10
+ */
11
+ export interface CustomTool {
12
+ /**
13
+ * Tool description that helps AI understand when to use it
14
+ */
15
+ description: string;
16
+ /**
17
+ * Parameters schema using Zod or JSON Schema
18
+ */
19
+ parameters?: z.ZodSchema | Record<string, any>;
20
+ /**
21
+ * Tool execution function
22
+ */
23
+ execute: (args: any, context?: ToolContext) => Promise<any> | any;
24
+ /**
25
+ * Optional metadata
26
+ */
27
+ category?: string;
28
+ version?: string;
29
+ author?: string;
30
+ /**
31
+ * Optional configuration
32
+ */
33
+ config?: {
34
+ timeout?: number;
35
+ retries?: number;
36
+ rateLimit?: {
37
+ requests: number;
38
+ window: number;
39
+ };
40
+ };
41
+ }
42
+ /**
43
+ * Context provided to tools during execution
44
+ */
45
+ export interface ToolContext {
46
+ /**
47
+ * Call another tool
48
+ */
49
+ callTool: (name: string, args: any) => Promise<any>;
50
+ /**
51
+ * Current session information
52
+ */
53
+ session: {
54
+ id: string;
55
+ userId?: string;
56
+ provider?: string;
57
+ model?: string;
58
+ };
59
+ /**
60
+ * Logger instance
61
+ */
62
+ logger: typeof logger;
63
+ }
64
+ /**
65
+ * Tool middleware function
66
+ */
67
+ export type ToolMiddleware = (toolName: string, args: any, next: () => Promise<any>, context: ToolContext) => Promise<any>;
68
+ /**
69
+ * Tool permission configuration
70
+ */
71
+ export interface ToolPermissions {
72
+ allowlist?: string[];
73
+ denylist?: string[];
74
+ requireApproval?: string[];
75
+ customValidator?: (toolName: string, args: any) => boolean | Promise<boolean>;
76
+ }
77
+ /**
78
+ * Converts a custom tool to Vercel AI SDK format
79
+ */
80
+ export declare function convertToAISDKTool(name: string, customTool: CustomTool): Tool;
81
+ /**
82
+ * Tool registry for managing custom tools
83
+ */
84
+ export declare class ToolRegistry {
85
+ private tools;
86
+ private middleware;
87
+ private permissions;
88
+ private rateLimits;
89
+ /**
90
+ * Simple rate limiting check with automatic cleanup
91
+ */
92
+ private checkRateLimit;
93
+ /**
94
+ * Register a custom tool
95
+ */
96
+ register(name: string, tool: CustomTool): void;
97
+ /**
98
+ * Register multiple tools at once
99
+ */
100
+ registerMany(tools: Record<string, CustomTool>): void;
101
+ /**
102
+ * Unregister a tool
103
+ */
104
+ unregister(name: string): boolean;
105
+ /**
106
+ * Get a tool by name
107
+ */
108
+ get(name: string): CustomTool | undefined;
109
+ /**
110
+ * Get all registered tools
111
+ */
112
+ getAll(): Map<string, CustomTool>;
113
+ /**
114
+ * Convert all tools to AI SDK format
115
+ */
116
+ toAISDKTools(): Record<string, Tool>;
117
+ /**
118
+ * Add middleware
119
+ */
120
+ use(middleware: ToolMiddleware): void;
121
+ /**
122
+ * Set permissions
123
+ */
124
+ setPermissions(permissions: ToolPermissions): void;
125
+ /**
126
+ * Check if a tool is allowed
127
+ */
128
+ private isToolAllowed;
129
+ /**
130
+ * Execute a tool with middleware
131
+ */
132
+ execute(name: string, args: any, context: ToolContext): Promise<any>;
133
+ }
134
+ /**
135
+ * Create a simple tool helper
136
+ */
137
+ export declare function createTool(config: CustomTool): CustomTool;
138
+ /**
139
+ * Create an async tool helper
140
+ */
141
+ export declare function createAsyncTool(config: Omit<CustomTool, "execute"> & {
142
+ execute: (args: any, context?: ToolContext) => Promise<any>;
143
+ }): CustomTool;
144
+ /**
145
+ * Create a batch tool that processes multiple items
146
+ */
147
+ export declare function createBatchTool<T, R>(config: Omit<CustomTool, "execute" | "parameters"> & {
148
+ parameters: z.ZodSchema<{
149
+ items: T[];
150
+ }>;
151
+ processItem: (item: T, context?: ToolContext) => Promise<R> | R;
152
+ batchSize?: number;
153
+ }): CustomTool;
154
+ /**
155
+ * Tool testing utilities
156
+ */
157
+ export declare const TestUtils: {
158
+ /**
159
+ * Create a mock tool context
160
+ */
161
+ mockContext(overrides?: Partial<ToolContext>): ToolContext;
162
+ /**
163
+ * Test a tool with mock data
164
+ */
165
+ testTool(tool: CustomTool, testCases: Array<{
166
+ input: any;
167
+ expected?: any;
168
+ }>): Promise<({
169
+ input: any;
170
+ output: any;
171
+ success: boolean;
172
+ matches: boolean | undefined;
173
+ error?: undefined;
174
+ } | {
175
+ input: any;
176
+ error: string;
177
+ success: boolean;
178
+ output?: undefined;
179
+ matches?: undefined;
180
+ })[]>;
181
+ };