@juspay/neurolink 4.2.0 → 5.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/CHANGELOG.md +47 -2
  2. package/README.md +51 -60
  3. package/dist/chat/sse-handler.js +5 -4
  4. package/dist/chat/websocket-chat-handler.js +9 -9
  5. package/dist/cli/commands/mcp.js +1 -1
  6. package/dist/cli/commands/ollama.js +3 -3
  7. package/dist/cli/factories/command-factory.d.ts +14 -0
  8. package/dist/cli/factories/command-factory.js +129 -0
  9. package/dist/cli/index.js +27 -29
  10. package/dist/cli/utils/interactive-setup.js +2 -2
  11. package/dist/core/evaluation.d.ts +9 -9
  12. package/dist/core/evaluation.js +14 -14
  13. package/dist/core/types.d.ts +41 -48
  14. package/dist/core/types.js +1 -0
  15. package/dist/factories/compatibility-factory.d.ts +20 -0
  16. package/dist/factories/compatibility-factory.js +69 -0
  17. package/dist/factories/provider-generate-factory.d.ts +20 -0
  18. package/dist/factories/provider-generate-factory.js +87 -0
  19. package/dist/index.d.ts +4 -2
  20. package/dist/index.js +3 -1
  21. package/dist/lib/chat/sse-handler.js +5 -4
  22. package/dist/lib/chat/websocket-chat-handler.js +9 -9
  23. package/dist/lib/core/evaluation.d.ts +9 -9
  24. package/dist/lib/core/evaluation.js +14 -14
  25. package/dist/lib/core/types.d.ts +41 -48
  26. package/dist/lib/core/types.js +1 -0
  27. package/dist/lib/factories/compatibility-factory.d.ts +20 -0
  28. package/dist/lib/factories/compatibility-factory.js +69 -0
  29. package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
  30. package/dist/lib/factories/provider-generate-factory.js +87 -0
  31. package/dist/lib/index.d.ts +4 -2
  32. package/dist/lib/index.js +3 -1
  33. package/dist/lib/mcp/client.js +5 -5
  34. package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
  35. package/dist/lib/mcp/external-client.js +2 -2
  36. package/dist/lib/mcp/factory.d.ts +1 -1
  37. package/dist/lib/mcp/factory.js +1 -1
  38. package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
  39. package/dist/lib/mcp/orchestrator.js +4 -4
  40. package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  41. package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +5 -5
  42. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  43. package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  44. package/dist/lib/neurolink.d.ts +21 -73
  45. package/dist/lib/neurolink.js +230 -119
  46. package/dist/lib/providers/agent-enhanced-provider.d.ts +12 -8
  47. package/dist/lib/providers/agent-enhanced-provider.js +87 -96
  48. package/dist/lib/providers/amazonBedrock.d.ts +17 -8
  49. package/dist/lib/providers/amazonBedrock.js +60 -30
  50. package/dist/lib/providers/anthropic.d.ts +14 -10
  51. package/dist/lib/providers/anthropic.js +84 -154
  52. package/dist/lib/providers/azureOpenAI.d.ts +9 -6
  53. package/dist/lib/providers/azureOpenAI.js +70 -159
  54. package/dist/lib/providers/function-calling-provider.d.ts +14 -12
  55. package/dist/lib/providers/function-calling-provider.js +114 -64
  56. package/dist/lib/providers/googleAIStudio.d.ts +12 -19
  57. package/dist/lib/providers/googleAIStudio.js +65 -34
  58. package/dist/lib/providers/googleVertexAI.d.ts +11 -15
  59. package/dist/lib/providers/googleVertexAI.js +146 -118
  60. package/dist/lib/providers/huggingFace.d.ts +10 -11
  61. package/dist/lib/providers/huggingFace.js +61 -24
  62. package/dist/lib/providers/mcp-provider.d.ts +13 -8
  63. package/dist/lib/providers/mcp-provider.js +59 -18
  64. package/dist/lib/providers/mistralAI.d.ts +14 -11
  65. package/dist/lib/providers/mistralAI.js +60 -29
  66. package/dist/lib/providers/ollama.d.ts +9 -8
  67. package/dist/lib/providers/ollama.js +134 -91
  68. package/dist/lib/providers/openAI.d.ts +11 -12
  69. package/dist/lib/providers/openAI.js +132 -97
  70. package/dist/lib/types/generate-types.d.ts +79 -0
  71. package/dist/lib/types/generate-types.js +1 -0
  72. package/dist/lib/types/stream-types.d.ts +83 -0
  73. package/dist/lib/types/stream-types.js +1 -0
  74. package/dist/lib/utils/providerUtils-fixed.js +1 -1
  75. package/dist/lib/utils/streaming-utils.d.ts +14 -2
  76. package/dist/lib/utils/streaming-utils.js +0 -3
  77. package/dist/mcp/client.js +5 -5
  78. package/dist/mcp/dynamic-orchestrator.js +8 -8
  79. package/dist/mcp/external-client.js +2 -2
  80. package/dist/mcp/factory.d.ts +1 -1
  81. package/dist/mcp/factory.js +1 -1
  82. package/dist/mcp/neurolink-mcp-client.js +10 -10
  83. package/dist/mcp/orchestrator.js +4 -4
  84. package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
  85. package/dist/mcp/servers/ai-providers/ai-core-server.js +5 -5
  86. package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
  87. package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
  88. package/dist/neurolink.d.ts +21 -73
  89. package/dist/neurolink.js +230 -119
  90. package/dist/providers/agent-enhanced-provider.d.ts +12 -8
  91. package/dist/providers/agent-enhanced-provider.js +87 -95
  92. package/dist/providers/amazonBedrock.d.ts +17 -8
  93. package/dist/providers/amazonBedrock.js +60 -30
  94. package/dist/providers/anthropic.d.ts +14 -10
  95. package/dist/providers/anthropic.js +84 -154
  96. package/dist/providers/azureOpenAI.d.ts +9 -6
  97. package/dist/providers/azureOpenAI.js +70 -159
  98. package/dist/providers/function-calling-provider.d.ts +14 -12
  99. package/dist/providers/function-calling-provider.js +114 -64
  100. package/dist/providers/googleAIStudio.d.ts +12 -19
  101. package/dist/providers/googleAIStudio.js +65 -34
  102. package/dist/providers/googleVertexAI.d.ts +11 -15
  103. package/dist/providers/googleVertexAI.js +146 -118
  104. package/dist/providers/huggingFace.d.ts +10 -11
  105. package/dist/providers/huggingFace.js +61 -24
  106. package/dist/providers/mcp-provider.d.ts +13 -8
  107. package/dist/providers/mcp-provider.js +59 -18
  108. package/dist/providers/mistralAI.d.ts +14 -11
  109. package/dist/providers/mistralAI.js +60 -29
  110. package/dist/providers/ollama.d.ts +9 -8
  111. package/dist/providers/ollama.js +133 -90
  112. package/dist/providers/openAI.d.ts +11 -12
  113. package/dist/providers/openAI.js +132 -97
  114. package/dist/types/generate-types.d.ts +79 -0
  115. package/dist/types/generate-types.js +1 -0
  116. package/dist/types/stream-types.d.ts +83 -0
  117. package/dist/types/stream-types.js +1 -0
  118. package/dist/utils/providerUtils-fixed.js +1 -1
  119. package/dist/utils/streaming-utils.d.ts +14 -2
  120. package/dist/utils/streaming-utils.js +0 -3
  121. package/package.json +2 -3
  122. package/dist/cli/commands/agent-generate.d.ts +0 -1
  123. package/dist/cli/commands/agent-generate.js +0 -67
@@ -58,8 +58,83 @@ export class AnthropicProvider {
58
58
  }
59
59
  return response;
60
60
  }
61
- async generateText(optionsOrPrompt, schema) {
62
- const functionTag = "AnthropicProvider.generateText";
61
+ /**
62
+ * PRIMARY METHOD: Stream content using AI (recommended for new code)
63
+ * Future-ready for multi-modal capabilities with current text focus
64
+ */
65
+ async stream(optionsOrPrompt, analysisSchema) {
66
+ const functionTag = "AnthropicProvider.stream";
67
+ const provider = "anthropic";
68
+ const startTime = Date.now();
69
+ logger.debug(`[${functionTag}] Starting content streaming`);
70
+ // Parse parameters - support both string and options object
71
+ const options = typeof optionsOrPrompt === "string"
72
+ ? { input: { text: optionsOrPrompt } }
73
+ : optionsOrPrompt;
74
+ // Validate input
75
+ if (!options?.input?.text ||
76
+ typeof options.input.text !== "string" ||
77
+ options.input.text.trim() === "") {
78
+ throw new Error("Stream options must include input.text as a non-empty string");
79
+ }
80
+ // Extract parameters
81
+ const { prompt = options.input.text, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", timeout = getDefaultTimeout(provider, "stream"), } = options;
82
+ logger.debug(`[${functionTag}] Streaming prompt: "${prompt.substring(0, 100)}...", Timeout: ${timeout}`);
83
+ // Create timeout controller if timeout is specified
84
+ const timeoutController = createTimeoutController(timeout, provider, "stream");
85
+ try {
86
+ const body = {
87
+ model: this.getModel(),
88
+ max_tokens: maxTokens,
89
+ messages: [
90
+ ...(systemPrompt
91
+ ? [{ role: "assistant", content: systemPrompt }]
92
+ : []),
93
+ { role: "user", content: prompt },
94
+ ],
95
+ temperature,
96
+ stream: true,
97
+ };
98
+ const response = await this.makeRequest("messages", body, true, timeoutController?.controller.signal);
99
+ const streamIterable = this.createAsyncIterable(response.body, timeoutController?.controller.signal);
100
+ // Clean up timeout controller
101
+ timeoutController?.cleanup();
102
+ logger.debug(`[${functionTag}] Stream initialized successfully`);
103
+ // Convert to StreamResult format
104
+ return {
105
+ stream: (async function* () {
106
+ for await (const chunk of streamIterable) {
107
+ yield { content: chunk };
108
+ }
109
+ })(),
110
+ provider: "anthropic",
111
+ model: this.getModel(),
112
+ metadata: {
113
+ streamId: `anthropic-${Date.now()}`,
114
+ startTime,
115
+ },
116
+ };
117
+ }
118
+ catch (error) {
119
+ // Always cleanup timeout on error
120
+ timeoutController?.cleanup();
121
+ if (error.name === "AbortError" || error.message.includes("timeout")) {
122
+ const timeoutError = new TimeoutError(`${provider} stream operation timed out after ${timeout}`, timeoutController?.timeoutMs || 0, provider, "stream");
123
+ logger.error(`[${functionTag}] Timeout error`, {
124
+ provider,
125
+ timeout: timeoutController?.timeoutMs,
126
+ message: timeoutError.message,
127
+ });
128
+ throw timeoutError;
129
+ }
130
+ else {
131
+ logger.error(`[${functionTag}] Error:`, error);
132
+ }
133
+ throw error;
134
+ }
135
+ }
136
+ async generate(optionsOrPrompt, schema) {
137
+ const functionTag = "AnthropicProvider.generate";
63
138
  const provider = "anthropic";
64
139
  const startTime = Date.now();
65
140
  logger.debug(`[${functionTag}] Starting text generation`);
@@ -138,73 +213,10 @@ export class AnthropicProvider {
138
213
  throw error;
139
214
  }
140
215
  }
141
- async streamText(optionsOrPrompt, schema) {
142
- const functionTag = "AnthropicProvider.streamText";
143
- const provider = "anthropic";
144
- logger.debug(`[${functionTag}] Starting text streaming`);
145
- // Parse parameters with backward compatibility
146
- const options = typeof optionsOrPrompt === "string"
147
- ? { prompt: optionsOrPrompt }
148
- : optionsOrPrompt;
149
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", timeout = getDefaultTimeout(provider, "stream"), } = options;
150
- logger.debug(`[${functionTag}] Streaming prompt: "${prompt.substring(0, 100)}...", Timeout: ${timeout}`);
151
- const requestBody = {
152
- model: this.getModel(),
153
- max_tokens: maxTokens,
154
- messages: [
155
- {
156
- role: "user",
157
- content: prompt,
158
- },
159
- ],
160
- temperature,
161
- system: systemPrompt,
162
- stream: true,
163
- };
164
- // Create timeout controller if timeout is specified
165
- const timeoutController = createTimeoutController(timeout, provider, "stream");
166
- try {
167
- const response = await this.makeRequest("messages", requestBody, true, timeoutController?.controller.signal);
168
- if (!response.body) {
169
- throw new Error("No response body received");
170
- }
171
- // Return a StreamTextResult-like object with timeout signal
172
- return {
173
- textStream: this.createAsyncIterable(response.body, timeoutController?.controller.signal),
174
- text: "",
175
- usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
176
- finishReason: "end_turn",
177
- // Store timeout controller for external cleanup if needed
178
- _timeoutController: timeoutController,
179
- };
180
- }
181
- catch (error) {
182
- // Cleanup timeout on error
183
- timeoutController?.cleanup();
184
- // Log timeout errors specifically
185
- if (error instanceof TimeoutError) {
186
- logger.error(`[${functionTag}] Timeout error`, {
187
- provider,
188
- timeout: error.timeout,
189
- message: error.message,
190
- });
191
- }
192
- else if (error?.name === "AbortError") {
193
- // Convert AbortError to TimeoutError
194
- const timeoutError = new TimeoutError(`${provider} stream operation timed out after ${timeout}`, timeoutController?.timeoutMs || 0, provider, "stream");
195
- logger.error(`[${functionTag}] Timeout error`, {
196
- provider,
197
- timeout: timeoutController?.timeoutMs,
198
- message: timeoutError.message,
199
- });
200
- throw timeoutError;
201
- }
202
- else {
203
- logger.error(`[${functionTag}] Error:`, error);
204
- }
205
- throw error;
206
- }
207
- }
216
+ /**
217
+ * LEGACY METHOD: Use stream() instead for new code
218
+ * @deprecated Use stream() method instead
219
+ */
208
220
  async *createAsyncIterable(body, signal) {
209
221
  const reader = body.getReader();
210
222
  const decoder = new TextDecoder();
@@ -250,87 +262,11 @@ export class AnthropicProvider {
250
262
  reader.releaseLock();
251
263
  }
252
264
  }
253
- async *generateTextStream(optionsOrPrompt) {
254
- logger.debug("[AnthropicProvider.generateTextStream] Starting text streaming");
255
- // Parse parameters with backward compatibility
256
- const options = typeof optionsOrPrompt === "string"
257
- ? { prompt: optionsOrPrompt }
258
- : optionsOrPrompt;
259
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", } = options;
260
- logger.debug(`[AnthropicProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
261
- const requestBody = {
262
- model: this.getModel(),
263
- max_tokens: maxTokens,
264
- messages: [
265
- {
266
- role: "user",
267
- content: prompt,
268
- },
269
- ],
270
- temperature,
271
- system: systemPrompt,
272
- stream: true,
273
- };
274
- try {
275
- const response = await this.makeRequest("messages", requestBody, true);
276
- if (!response.body) {
277
- throw new Error("No response body received");
278
- }
279
- const reader = response.body.getReader();
280
- const decoder = new TextDecoder();
281
- let buffer = "";
282
- try {
283
- while (true) {
284
- const { done, value } = await reader.read();
285
- if (done) {
286
- break;
287
- }
288
- buffer += decoder.decode(value, { stream: true });
289
- const lines = buffer.split("\n");
290
- buffer = lines.pop() || "";
291
- for (const line of lines) {
292
- if (line.trim() === "") {
293
- continue;
294
- }
295
- if (line.startsWith("data: ")) {
296
- const data = line.slice(6);
297
- if (data.trim() === "[DONE]") {
298
- continue;
299
- }
300
- try {
301
- const chunk = JSON.parse(data);
302
- // Extract text content from different chunk types
303
- if (chunk.type === "content_block_delta" && chunk.delta?.text) {
304
- yield {
305
- content: chunk.delta.text,
306
- provider: this.name,
307
- model: this.getModel(),
308
- };
309
- }
310
- }
311
- catch (parseError) {
312
- logger.warn("[AnthropicProvider.generateTextStream] Failed to parse chunk:", parseError);
313
- continue;
314
- }
315
- }
316
- }
317
- }
318
- }
319
- finally {
320
- reader.releaseLock();
321
- }
322
- logger.debug("[AnthropicProvider.generateTextStream] Streaming completed");
323
- }
324
- catch (error) {
325
- logger.error("[AnthropicProvider.generateTextStream] Error:", error);
326
- throw error;
327
- }
328
- }
329
265
  async testConnection() {
330
266
  logger.debug("[AnthropicProvider.testConnection] Testing connection to Anthropic API");
331
267
  const startTime = Date.now();
332
268
  try {
333
- await this.generateText({
269
+ await this.generate({
334
270
  prompt: "Hello",
335
271
  maxTokens: 5,
336
272
  });
@@ -391,15 +327,9 @@ export class AnthropicProvider {
391
327
  ];
392
328
  }
393
329
  /**
394
- * Alias for generateText() - CLI-SDK consistency
395
- */
396
- async generate(optionsOrPrompt, analysisSchema) {
397
- return this.generateText(optionsOrPrompt, analysisSchema);
398
- }
399
- /**
400
- * Short alias for generateText() - CLI-SDK consistency
330
+ * Short alias for generate() - CLI-SDK consistency
401
331
  */
402
332
  async gen(optionsOrPrompt, analysisSchema) {
403
- return this.generateText(optionsOrPrompt, analysisSchema);
333
+ return this.generate(optionsOrPrompt, analysisSchema);
404
334
  }
405
335
  }
@@ -4,7 +4,8 @@
4
4
  * Enterprise-grade OpenAI integration through Microsoft Azure.
5
5
  * Supports all OpenAI models with enhanced security and compliance.
6
6
  */
7
- import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
7
+ import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
8
+ import type { StreamOptions, StreamResult } from "../types/stream-types.js";
8
9
  import { AIProviderName } from "../core/types.js";
9
10
  export declare class AzureOpenAIProvider implements AIProvider {
10
11
  readonly name: AIProviderName;
@@ -18,10 +19,13 @@ export declare class AzureOpenAIProvider implements AIProvider {
18
19
  private getDeploymentId;
19
20
  private getApiUrl;
20
21
  private makeRequest;
21
- generateText(optionsOrPrompt: TextGenerationOptions | string, schema?: any): Promise<any>;
22
- streamText(optionsOrPrompt: StreamTextOptions | string, schema?: any): Promise<any>;
22
+ /**
23
+ * PRIMARY METHOD: Stream content using AI (recommended for new code)
24
+ * Future-ready for multi-modal capabilities with current text focus
25
+ */
26
+ stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: any): Promise<StreamResult>;
27
+ generate(optionsOrPrompt: TextGenerationOptions | string, schema?: any): Promise<any>;
23
28
  private createAsyncIterable;
24
- generateTextStream(optionsOrPrompt: StreamTextOptions | string): AsyncGenerator<any, void, unknown>;
25
29
  testConnection(): Promise<{
26
30
  success: boolean;
27
31
  error?: string;
@@ -34,6 +38,5 @@ export declare class AzureOpenAIProvider implements AIProvider {
34
38
  supportsStreaming(): boolean;
35
39
  supportsSchema(): boolean;
36
40
  getCapabilities(): string[];
37
- generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
38
- gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
41
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateResult | null>;
39
42
  }
@@ -68,8 +68,73 @@ export class AzureOpenAIProvider {
68
68
  }
69
69
  return response;
70
70
  }
71
- async generateText(optionsOrPrompt, schema) {
72
- const functionTag = "AzureOpenAIProvider.generateText";
71
+ /**
72
+ * PRIMARY METHOD: Stream content using AI (recommended for new code)
73
+ * Future-ready for multi-modal capabilities with current text focus
74
+ */
75
+ async stream(optionsOrPrompt, analysisSchema) {
76
+ const functionTag = "AzureOpenAIProvider.stream";
77
+ const startTime = Date.now();
78
+ // Parse parameters - support both string and options object
79
+ const options = typeof optionsOrPrompt === "string"
80
+ ? { input: { text: optionsOrPrompt } }
81
+ : optionsOrPrompt;
82
+ // Validate input
83
+ if (!options?.input?.text ||
84
+ typeof options.input.text !== "string" ||
85
+ options.input.text.trim() === "") {
86
+ throw new Error("Stream options must include input.text as a non-empty string");
87
+ }
88
+ // Convert StreamOptions for internal use
89
+ const convertedOptions = {
90
+ prompt: options.input.text,
91
+ provider: options.provider,
92
+ model: options.model,
93
+ temperature: options.temperature,
94
+ maxTokens: options.maxTokens,
95
+ systemPrompt: options.systemPrompt,
96
+ timeout: options.timeout,
97
+ };
98
+ // Prepare Azure OpenAI messages
99
+ const messages = [];
100
+ if (convertedOptions.systemPrompt) {
101
+ messages.push({
102
+ role: "system",
103
+ content: convertedOptions.systemPrompt,
104
+ });
105
+ }
106
+ messages.push({
107
+ role: "user",
108
+ content: convertedOptions.prompt,
109
+ });
110
+ const requestBody = {
111
+ messages,
112
+ temperature: convertedOptions.temperature ?? 0.7,
113
+ max_tokens: convertedOptions.maxTokens ?? DEFAULT_MAX_TOKENS,
114
+ stream: true,
115
+ };
116
+ // Create timeout controller if timeout is specified
117
+ const timeoutController = createTimeoutController(convertedOptions.timeout, this.name, "stream");
118
+ try {
119
+ const response = await this.makeRequest(requestBody, true, timeoutController?.controller.signal);
120
+ // Clean up timeout if successful
121
+ timeoutController?.cleanup();
122
+ // Return an async iterable for streaming chunks
123
+ const streamIterable = this.createAsyncIterable(response.body, timeoutController?.controller.signal);
124
+ // Compose the StreamResult object
125
+ return {
126
+ stream: streamIterable,
127
+ provider: this.name,
128
+ model: convertedOptions.model,
129
+ };
130
+ }
131
+ catch (error) {
132
+ timeoutController?.cleanup();
133
+ throw error;
134
+ }
135
+ }
136
+ async generate(optionsOrPrompt, schema) {
137
+ const functionTag = "AzureOpenAIProvider.generate";
73
138
  const provider = "azure";
74
139
  const startTime = Date.now();
75
140
  logger.debug(`[${functionTag}] Starting text generation`);
@@ -158,77 +223,6 @@ export class AzureOpenAIProvider {
158
223
  throw error;
159
224
  }
160
225
  }
161
- async streamText(optionsOrPrompt, schema) {
162
- const functionTag = "AzureOpenAIProvider.streamText";
163
- const provider = "azure";
164
- logger.debug(`[${functionTag}] Starting text streaming`);
165
- // Parse parameters with backward compatibility
166
- const options = typeof optionsOrPrompt === "string"
167
- ? { prompt: optionsOrPrompt }
168
- : optionsOrPrompt;
169
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are a helpful AI assistant.", timeout = getDefaultTimeout(provider, "stream"), } = options;
170
- logger.debug(`[${functionTag}] Streaming prompt: "${prompt.substring(0, 100)}...", Timeout: ${timeout}`);
171
- const messages = [];
172
- if (systemPrompt) {
173
- messages.push({
174
- role: "system",
175
- content: systemPrompt,
176
- });
177
- }
178
- messages.push({
179
- role: "user",
180
- content: prompt,
181
- });
182
- const requestBody = {
183
- messages,
184
- temperature,
185
- max_tokens: maxTokens,
186
- stream: true,
187
- };
188
- // Create timeout controller if timeout is specified
189
- const timeoutController = createTimeoutController(timeout, provider, "stream");
190
- try {
191
- const response = await this.makeRequest(requestBody, true, timeoutController?.controller.signal);
192
- if (!response.body) {
193
- throw new Error("No response body received");
194
- }
195
- // Return a StreamTextResult-like object with timeout signal
196
- return {
197
- textStream: this.createAsyncIterable(response.body, timeoutController?.controller.signal),
198
- text: "",
199
- usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
200
- finishReason: "stop",
201
- // Store timeout controller for external cleanup if needed
202
- _timeoutController: timeoutController,
203
- };
204
- }
205
- catch (error) {
206
- // Cleanup timeout on error
207
- timeoutController?.cleanup();
208
- // Log timeout errors specifically
209
- if (error instanceof TimeoutError) {
210
- logger.error(`[${functionTag}] Timeout error`, {
211
- provider,
212
- timeout: error.timeout,
213
- message: error.message,
214
- });
215
- }
216
- else if (error?.name === "AbortError") {
217
- // Convert AbortError to TimeoutError
218
- const timeoutError = new TimeoutError(`${provider} stream operation timed out after ${timeout}`, timeoutController?.timeoutMs || 0, provider, "stream");
219
- logger.error(`[${functionTag}] Timeout error`, {
220
- provider,
221
- timeout: timeoutController?.timeoutMs,
222
- message: timeoutError.message,
223
- });
224
- throw timeoutError;
225
- }
226
- else {
227
- logger.error(`[${functionTag}] Error:`, error);
228
- }
229
- throw error;
230
- }
231
- }
232
226
  async *createAsyncIterable(body, signal) {
233
227
  const reader = body.getReader();
234
228
  const decoder = new TextDecoder();
@@ -259,7 +253,7 @@ export class AzureOpenAIProvider {
259
253
  const chunk = JSON.parse(data);
260
254
  // Extract text content from chunk
261
255
  if (chunk.choices?.[0]?.delta?.content) {
262
- yield chunk.choices[0].delta.content;
256
+ yield { content: chunk.choices[0].delta.content };
263
257
  }
264
258
  }
265
259
  catch (parseError) {
@@ -274,91 +268,11 @@ export class AzureOpenAIProvider {
274
268
  reader.releaseLock();
275
269
  }
276
270
  }
277
- async *generateTextStream(optionsOrPrompt) {
278
- logger.debug("[AzureOpenAIProvider.generateTextStream] Starting text streaming");
279
- // Parse parameters with backward compatibility
280
- const options = typeof optionsOrPrompt === "string"
281
- ? { prompt: optionsOrPrompt }
282
- : optionsOrPrompt;
283
- const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are a helpful AI assistant.", } = options;
284
- logger.debug(`[AzureOpenAIProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
285
- const messages = [];
286
- if (systemPrompt) {
287
- messages.push({
288
- role: "system",
289
- content: systemPrompt,
290
- });
291
- }
292
- messages.push({
293
- role: "user",
294
- content: prompt,
295
- });
296
- const requestBody = {
297
- messages,
298
- temperature,
299
- max_tokens: maxTokens,
300
- stream: true,
301
- };
302
- try {
303
- const response = await this.makeRequest(requestBody, true);
304
- if (!response.body) {
305
- throw new Error("No response body received");
306
- }
307
- const reader = response.body.getReader();
308
- const decoder = new TextDecoder();
309
- let buffer = "";
310
- try {
311
- while (true) {
312
- const { done, value } = await reader.read();
313
- if (done) {
314
- break;
315
- }
316
- buffer += decoder.decode(value, { stream: true });
317
- const lines = buffer.split("\n");
318
- buffer = lines.pop() || "";
319
- for (const line of lines) {
320
- if (line.trim() === "") {
321
- continue;
322
- }
323
- if (line.startsWith("data: ")) {
324
- const data = line.slice(6);
325
- if (data.trim() === "[DONE]") {
326
- continue;
327
- }
328
- try {
329
- const chunk = JSON.parse(data);
330
- // Extract text content from chunk
331
- if (chunk.choices?.[0]?.delta?.content) {
332
- yield {
333
- content: chunk.choices[0].delta.content,
334
- provider: this.name,
335
- model: chunk.model || this.deploymentId,
336
- };
337
- }
338
- }
339
- catch (parseError) {
340
- logger.warn("[AzureOpenAIProvider.generateTextStream] Failed to parse chunk:", parseError);
341
- continue;
342
- }
343
- }
344
- }
345
- }
346
- }
347
- finally {
348
- reader.releaseLock();
349
- }
350
- logger.debug("[AzureOpenAIProvider.generateTextStream] Streaming completed");
351
- }
352
- catch (error) {
353
- logger.error("[AzureOpenAIProvider.generateTextStream] Error:", error);
354
- throw error;
355
- }
356
- }
357
271
  async testConnection() {
358
272
  logger.debug("[AzureOpenAIProvider.testConnection] Testing connection to Azure OpenAI");
359
273
  const startTime = Date.now();
360
274
  try {
361
- await this.generateText({
275
+ await this.generate({
362
276
  prompt: "Hello",
363
277
  maxTokens: 5,
364
278
  });
@@ -427,10 +341,7 @@ export class AzureOpenAIProvider {
427
341
  "content-filtering",
428
342
  ];
429
343
  }
430
- async generate(optionsOrPrompt, analysisSchema) {
431
- return this.generateText(optionsOrPrompt, analysisSchema);
432
- }
433
344
  async gen(optionsOrPrompt, analysisSchema) {
434
- return this.generateText(optionsOrPrompt, analysisSchema);
345
+ return this.generate(optionsOrPrompt, analysisSchema);
435
346
  }
436
347
  }
@@ -3,9 +3,11 @@
3
3
  * Integrates MCP tools directly with AI SDK's function calling capabilities
4
4
  * This is the missing piece that enables true AI function calling!
5
5
  */
6
- import type { AIProvider, TextGenerationOptions, StreamTextOptions, EnhancedGenerateTextResult } from "../core/types.js";
7
- import { type GenerateTextResult, type StreamTextResult, type ToolSet, type Schema } from "ai";
6
+ import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
7
+ import { type Schema } from "ai";
8
+ import type { GenerateResult } from "../types/generate-types.js";
8
9
  import type { ZodType, ZodTypeDef } from "zod";
10
+ import type { StreamOptions, StreamResult } from "../types/stream-types.js";
9
11
  /**
10
12
  * Enhanced provider that enables real function calling with MCP tools
11
13
  */
@@ -19,14 +21,19 @@ export declare class FunctionCallingProvider implements AIProvider {
19
21
  sessionId?: string;
20
22
  userId?: string;
21
23
  });
24
+ /**
25
+ * PRIMARY METHOD: Stream content using AI (recommended for new code)
26
+ * Future-ready for multi-modal capabilities with current text focus
27
+ */
28
+ stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: any): Promise<StreamResult>;
22
29
  /**
23
30
  * Generate text with real function calling support
24
31
  */
25
- generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
32
+ generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
26
33
  /**
27
34
  * Generate text using AI SDK's native function calling
28
35
  */
29
- private generateTextWithTools;
36
+ private generateWithTools;
30
37
  /**
31
38
  * Get the model from the base provider
32
39
  * This is a temporary solution - ideally we'd have a getModel() method on AIProvider
@@ -45,17 +52,12 @@ export declare class FunctionCallingProvider implements AIProvider {
45
52
  */
46
53
  private createFunctionAwareSystemPrompt;
47
54
  /**
48
- * Stream text with function calling support
49
- */
50
- streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
51
- /**
52
- * Alias for generateText() - CLI-SDK consistency
55
+ * Alias for generate() - CLI-SDK consistency
53
56
  */
54
- generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
55
57
  /**
56
- * Short alias for generateText() - CLI-SDK consistency
58
+ * Short alias for generate() - CLI-SDK consistency
57
59
  */
58
- gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
60
+ gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateResult | null>;
59
61
  }
60
62
  /**
61
63
  * Create a function-calling enhanced version of any AI provider