@juspay/neurolink 4.2.0 → 5.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +47 -2
- package/README.md +51 -60
- package/dist/chat/sse-handler.js +5 -4
- package/dist/chat/websocket-chat-handler.js +9 -9
- package/dist/cli/commands/mcp.js +1 -1
- package/dist/cli/commands/ollama.js +3 -3
- package/dist/cli/factories/command-factory.d.ts +14 -0
- package/dist/cli/factories/command-factory.js +129 -0
- package/dist/cli/index.js +27 -29
- package/dist/cli/utils/interactive-setup.js +2 -2
- package/dist/core/evaluation.d.ts +9 -9
- package/dist/core/evaluation.js +14 -14
- package/dist/core/types.d.ts +41 -48
- package/dist/core/types.js +1 -0
- package/dist/factories/compatibility-factory.d.ts +20 -0
- package/dist/factories/compatibility-factory.js +69 -0
- package/dist/factories/provider-generate-factory.d.ts +20 -0
- package/dist/factories/provider-generate-factory.js +87 -0
- package/dist/index.d.ts +4 -2
- package/dist/index.js +3 -1
- package/dist/lib/chat/sse-handler.js +5 -4
- package/dist/lib/chat/websocket-chat-handler.js +9 -9
- package/dist/lib/core/evaluation.d.ts +9 -9
- package/dist/lib/core/evaluation.js +14 -14
- package/dist/lib/core/types.d.ts +41 -48
- package/dist/lib/core/types.js +1 -0
- package/dist/lib/factories/compatibility-factory.d.ts +20 -0
- package/dist/lib/factories/compatibility-factory.js +69 -0
- package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
- package/dist/lib/factories/provider-generate-factory.js +87 -0
- package/dist/lib/index.d.ts +4 -2
- package/dist/lib/index.js +3 -1
- package/dist/lib/mcp/client.js +5 -5
- package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
- package/dist/lib/mcp/external-client.js +2 -2
- package/dist/lib/mcp/factory.d.ts +1 -1
- package/dist/lib/mcp/factory.js +1 -1
- package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
- package/dist/lib/mcp/orchestrator.js +4 -4
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +5 -5
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/lib/neurolink.d.ts +21 -73
- package/dist/lib/neurolink.js +230 -119
- package/dist/lib/providers/agent-enhanced-provider.d.ts +12 -8
- package/dist/lib/providers/agent-enhanced-provider.js +87 -96
- package/dist/lib/providers/amazonBedrock.d.ts +17 -8
- package/dist/lib/providers/amazonBedrock.js +60 -30
- package/dist/lib/providers/anthropic.d.ts +14 -10
- package/dist/lib/providers/anthropic.js +84 -154
- package/dist/lib/providers/azureOpenAI.d.ts +9 -6
- package/dist/lib/providers/azureOpenAI.js +70 -159
- package/dist/lib/providers/function-calling-provider.d.ts +14 -12
- package/dist/lib/providers/function-calling-provider.js +114 -64
- package/dist/lib/providers/googleAIStudio.d.ts +12 -19
- package/dist/lib/providers/googleAIStudio.js +65 -34
- package/dist/lib/providers/googleVertexAI.d.ts +11 -15
- package/dist/lib/providers/googleVertexAI.js +146 -118
- package/dist/lib/providers/huggingFace.d.ts +10 -11
- package/dist/lib/providers/huggingFace.js +61 -24
- package/dist/lib/providers/mcp-provider.d.ts +13 -8
- package/dist/lib/providers/mcp-provider.js +59 -18
- package/dist/lib/providers/mistralAI.d.ts +14 -11
- package/dist/lib/providers/mistralAI.js +60 -29
- package/dist/lib/providers/ollama.d.ts +9 -8
- package/dist/lib/providers/ollama.js +134 -91
- package/dist/lib/providers/openAI.d.ts +11 -12
- package/dist/lib/providers/openAI.js +132 -97
- package/dist/lib/types/generate-types.d.ts +79 -0
- package/dist/lib/types/generate-types.js +1 -0
- package/dist/lib/types/stream-types.d.ts +83 -0
- package/dist/lib/types/stream-types.js +1 -0
- package/dist/lib/utils/providerUtils-fixed.js +1 -1
- package/dist/lib/utils/streaming-utils.d.ts +14 -2
- package/dist/lib/utils/streaming-utils.js +0 -3
- package/dist/mcp/client.js +5 -5
- package/dist/mcp/dynamic-orchestrator.js +8 -8
- package/dist/mcp/external-client.js +2 -2
- package/dist/mcp/factory.d.ts +1 -1
- package/dist/mcp/factory.js +1 -1
- package/dist/mcp/neurolink-mcp-client.js +10 -10
- package/dist/mcp/orchestrator.js +4 -4
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/mcp/servers/ai-providers/ai-core-server.js +5 -5
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/neurolink.d.ts +21 -73
- package/dist/neurolink.js +230 -119
- package/dist/providers/agent-enhanced-provider.d.ts +12 -8
- package/dist/providers/agent-enhanced-provider.js +87 -95
- package/dist/providers/amazonBedrock.d.ts +17 -8
- package/dist/providers/amazonBedrock.js +60 -30
- package/dist/providers/anthropic.d.ts +14 -10
- package/dist/providers/anthropic.js +84 -154
- package/dist/providers/azureOpenAI.d.ts +9 -6
- package/dist/providers/azureOpenAI.js +70 -159
- package/dist/providers/function-calling-provider.d.ts +14 -12
- package/dist/providers/function-calling-provider.js +114 -64
- package/dist/providers/googleAIStudio.d.ts +12 -19
- package/dist/providers/googleAIStudio.js +65 -34
- package/dist/providers/googleVertexAI.d.ts +11 -15
- package/dist/providers/googleVertexAI.js +146 -118
- package/dist/providers/huggingFace.d.ts +10 -11
- package/dist/providers/huggingFace.js +61 -24
- package/dist/providers/mcp-provider.d.ts +13 -8
- package/dist/providers/mcp-provider.js +59 -18
- package/dist/providers/mistralAI.d.ts +14 -11
- package/dist/providers/mistralAI.js +60 -29
- package/dist/providers/ollama.d.ts +9 -8
- package/dist/providers/ollama.js +133 -90
- package/dist/providers/openAI.d.ts +11 -12
- package/dist/providers/openAI.js +132 -97
- package/dist/types/generate-types.d.ts +79 -0
- package/dist/types/generate-types.js +1 -0
- package/dist/types/stream-types.d.ts +83 -0
- package/dist/types/stream-types.js +1 -0
- package/dist/utils/providerUtils-fixed.js +1 -1
- package/dist/utils/streaming-utils.d.ts +14 -2
- package/dist/utils/streaming-utils.js +0 -3
- package/package.json +2 -3
- package/dist/cli/commands/agent-generate.d.ts +0 -1
- package/dist/cli/commands/agent-generate.js +0 -67
|
@@ -58,8 +58,83 @@ export class AnthropicProvider {
|
|
|
58
58
|
}
|
|
59
59
|
return response;
|
|
60
60
|
}
|
|
61
|
-
|
|
62
|
-
|
|
61
|
+
/**
|
|
62
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
63
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
64
|
+
*/
|
|
65
|
+
async stream(optionsOrPrompt, analysisSchema) {
|
|
66
|
+
const functionTag = "AnthropicProvider.stream";
|
|
67
|
+
const provider = "anthropic";
|
|
68
|
+
const startTime = Date.now();
|
|
69
|
+
logger.debug(`[${functionTag}] Starting content streaming`);
|
|
70
|
+
// Parse parameters - support both string and options object
|
|
71
|
+
const options = typeof optionsOrPrompt === "string"
|
|
72
|
+
? { input: { text: optionsOrPrompt } }
|
|
73
|
+
: optionsOrPrompt;
|
|
74
|
+
// Validate input
|
|
75
|
+
if (!options?.input?.text ||
|
|
76
|
+
typeof options.input.text !== "string" ||
|
|
77
|
+
options.input.text.trim() === "") {
|
|
78
|
+
throw new Error("Stream options must include input.text as a non-empty string");
|
|
79
|
+
}
|
|
80
|
+
// Extract parameters
|
|
81
|
+
const { prompt = options.input.text, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
82
|
+
logger.debug(`[${functionTag}] Streaming prompt: "${prompt.substring(0, 100)}...", Timeout: ${timeout}`);
|
|
83
|
+
// Create timeout controller if timeout is specified
|
|
84
|
+
const timeoutController = createTimeoutController(timeout, provider, "stream");
|
|
85
|
+
try {
|
|
86
|
+
const body = {
|
|
87
|
+
model: this.getModel(),
|
|
88
|
+
max_tokens: maxTokens,
|
|
89
|
+
messages: [
|
|
90
|
+
...(systemPrompt
|
|
91
|
+
? [{ role: "assistant", content: systemPrompt }]
|
|
92
|
+
: []),
|
|
93
|
+
{ role: "user", content: prompt },
|
|
94
|
+
],
|
|
95
|
+
temperature,
|
|
96
|
+
stream: true,
|
|
97
|
+
};
|
|
98
|
+
const response = await this.makeRequest("messages", body, true, timeoutController?.controller.signal);
|
|
99
|
+
const streamIterable = this.createAsyncIterable(response.body, timeoutController?.controller.signal);
|
|
100
|
+
// Clean up timeout controller
|
|
101
|
+
timeoutController?.cleanup();
|
|
102
|
+
logger.debug(`[${functionTag}] Stream initialized successfully`);
|
|
103
|
+
// Convert to StreamResult format
|
|
104
|
+
return {
|
|
105
|
+
stream: (async function* () {
|
|
106
|
+
for await (const chunk of streamIterable) {
|
|
107
|
+
yield { content: chunk };
|
|
108
|
+
}
|
|
109
|
+
})(),
|
|
110
|
+
provider: "anthropic",
|
|
111
|
+
model: this.getModel(),
|
|
112
|
+
metadata: {
|
|
113
|
+
streamId: `anthropic-${Date.now()}`,
|
|
114
|
+
startTime,
|
|
115
|
+
},
|
|
116
|
+
};
|
|
117
|
+
}
|
|
118
|
+
catch (error) {
|
|
119
|
+
// Always cleanup timeout on error
|
|
120
|
+
timeoutController?.cleanup();
|
|
121
|
+
if (error.name === "AbortError" || error.message.includes("timeout")) {
|
|
122
|
+
const timeoutError = new TimeoutError(`${provider} stream operation timed out after ${timeout}`, timeoutController?.timeoutMs || 0, provider, "stream");
|
|
123
|
+
logger.error(`[${functionTag}] Timeout error`, {
|
|
124
|
+
provider,
|
|
125
|
+
timeout: timeoutController?.timeoutMs,
|
|
126
|
+
message: timeoutError.message,
|
|
127
|
+
});
|
|
128
|
+
throw timeoutError;
|
|
129
|
+
}
|
|
130
|
+
else {
|
|
131
|
+
logger.error(`[${functionTag}] Error:`, error);
|
|
132
|
+
}
|
|
133
|
+
throw error;
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
async generate(optionsOrPrompt, schema) {
|
|
137
|
+
const functionTag = "AnthropicProvider.generate";
|
|
63
138
|
const provider = "anthropic";
|
|
64
139
|
const startTime = Date.now();
|
|
65
140
|
logger.debug(`[${functionTag}] Starting text generation`);
|
|
@@ -138,73 +213,10 @@ export class AnthropicProvider {
|
|
|
138
213
|
throw error;
|
|
139
214
|
}
|
|
140
215
|
}
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
// Parse parameters with backward compatibility
|
|
146
|
-
const options = typeof optionsOrPrompt === "string"
|
|
147
|
-
? { prompt: optionsOrPrompt }
|
|
148
|
-
: optionsOrPrompt;
|
|
149
|
-
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
150
|
-
logger.debug(`[${functionTag}] Streaming prompt: "${prompt.substring(0, 100)}...", Timeout: ${timeout}`);
|
|
151
|
-
const requestBody = {
|
|
152
|
-
model: this.getModel(),
|
|
153
|
-
max_tokens: maxTokens,
|
|
154
|
-
messages: [
|
|
155
|
-
{
|
|
156
|
-
role: "user",
|
|
157
|
-
content: prompt,
|
|
158
|
-
},
|
|
159
|
-
],
|
|
160
|
-
temperature,
|
|
161
|
-
system: systemPrompt,
|
|
162
|
-
stream: true,
|
|
163
|
-
};
|
|
164
|
-
// Create timeout controller if timeout is specified
|
|
165
|
-
const timeoutController = createTimeoutController(timeout, provider, "stream");
|
|
166
|
-
try {
|
|
167
|
-
const response = await this.makeRequest("messages", requestBody, true, timeoutController?.controller.signal);
|
|
168
|
-
if (!response.body) {
|
|
169
|
-
throw new Error("No response body received");
|
|
170
|
-
}
|
|
171
|
-
// Return a StreamTextResult-like object with timeout signal
|
|
172
|
-
return {
|
|
173
|
-
textStream: this.createAsyncIterable(response.body, timeoutController?.controller.signal),
|
|
174
|
-
text: "",
|
|
175
|
-
usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
|
|
176
|
-
finishReason: "end_turn",
|
|
177
|
-
// Store timeout controller for external cleanup if needed
|
|
178
|
-
_timeoutController: timeoutController,
|
|
179
|
-
};
|
|
180
|
-
}
|
|
181
|
-
catch (error) {
|
|
182
|
-
// Cleanup timeout on error
|
|
183
|
-
timeoutController?.cleanup();
|
|
184
|
-
// Log timeout errors specifically
|
|
185
|
-
if (error instanceof TimeoutError) {
|
|
186
|
-
logger.error(`[${functionTag}] Timeout error`, {
|
|
187
|
-
provider,
|
|
188
|
-
timeout: error.timeout,
|
|
189
|
-
message: error.message,
|
|
190
|
-
});
|
|
191
|
-
}
|
|
192
|
-
else if (error?.name === "AbortError") {
|
|
193
|
-
// Convert AbortError to TimeoutError
|
|
194
|
-
const timeoutError = new TimeoutError(`${provider} stream operation timed out after ${timeout}`, timeoutController?.timeoutMs || 0, provider, "stream");
|
|
195
|
-
logger.error(`[${functionTag}] Timeout error`, {
|
|
196
|
-
provider,
|
|
197
|
-
timeout: timeoutController?.timeoutMs,
|
|
198
|
-
message: timeoutError.message,
|
|
199
|
-
});
|
|
200
|
-
throw timeoutError;
|
|
201
|
-
}
|
|
202
|
-
else {
|
|
203
|
-
logger.error(`[${functionTag}] Error:`, error);
|
|
204
|
-
}
|
|
205
|
-
throw error;
|
|
206
|
-
}
|
|
207
|
-
}
|
|
216
|
+
/**
|
|
217
|
+
* LEGACY METHOD: Use stream() instead for new code
|
|
218
|
+
* @deprecated Use stream() method instead
|
|
219
|
+
*/
|
|
208
220
|
async *createAsyncIterable(body, signal) {
|
|
209
221
|
const reader = body.getReader();
|
|
210
222
|
const decoder = new TextDecoder();
|
|
@@ -250,87 +262,11 @@ export class AnthropicProvider {
|
|
|
250
262
|
reader.releaseLock();
|
|
251
263
|
}
|
|
252
264
|
}
|
|
253
|
-
async *generateTextStream(optionsOrPrompt) {
|
|
254
|
-
logger.debug("[AnthropicProvider.generateTextStream] Starting text streaming");
|
|
255
|
-
// Parse parameters with backward compatibility
|
|
256
|
-
const options = typeof optionsOrPrompt === "string"
|
|
257
|
-
? { prompt: optionsOrPrompt }
|
|
258
|
-
: optionsOrPrompt;
|
|
259
|
-
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are Claude, an AI assistant created by Anthropic. You are helpful, harmless, and honest.", } = options;
|
|
260
|
-
logger.debug(`[AnthropicProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
|
|
261
|
-
const requestBody = {
|
|
262
|
-
model: this.getModel(),
|
|
263
|
-
max_tokens: maxTokens,
|
|
264
|
-
messages: [
|
|
265
|
-
{
|
|
266
|
-
role: "user",
|
|
267
|
-
content: prompt,
|
|
268
|
-
},
|
|
269
|
-
],
|
|
270
|
-
temperature,
|
|
271
|
-
system: systemPrompt,
|
|
272
|
-
stream: true,
|
|
273
|
-
};
|
|
274
|
-
try {
|
|
275
|
-
const response = await this.makeRequest("messages", requestBody, true);
|
|
276
|
-
if (!response.body) {
|
|
277
|
-
throw new Error("No response body received");
|
|
278
|
-
}
|
|
279
|
-
const reader = response.body.getReader();
|
|
280
|
-
const decoder = new TextDecoder();
|
|
281
|
-
let buffer = "";
|
|
282
|
-
try {
|
|
283
|
-
while (true) {
|
|
284
|
-
const { done, value } = await reader.read();
|
|
285
|
-
if (done) {
|
|
286
|
-
break;
|
|
287
|
-
}
|
|
288
|
-
buffer += decoder.decode(value, { stream: true });
|
|
289
|
-
const lines = buffer.split("\n");
|
|
290
|
-
buffer = lines.pop() || "";
|
|
291
|
-
for (const line of lines) {
|
|
292
|
-
if (line.trim() === "") {
|
|
293
|
-
continue;
|
|
294
|
-
}
|
|
295
|
-
if (line.startsWith("data: ")) {
|
|
296
|
-
const data = line.slice(6);
|
|
297
|
-
if (data.trim() === "[DONE]") {
|
|
298
|
-
continue;
|
|
299
|
-
}
|
|
300
|
-
try {
|
|
301
|
-
const chunk = JSON.parse(data);
|
|
302
|
-
// Extract text content from different chunk types
|
|
303
|
-
if (chunk.type === "content_block_delta" && chunk.delta?.text) {
|
|
304
|
-
yield {
|
|
305
|
-
content: chunk.delta.text,
|
|
306
|
-
provider: this.name,
|
|
307
|
-
model: this.getModel(),
|
|
308
|
-
};
|
|
309
|
-
}
|
|
310
|
-
}
|
|
311
|
-
catch (parseError) {
|
|
312
|
-
logger.warn("[AnthropicProvider.generateTextStream] Failed to parse chunk:", parseError);
|
|
313
|
-
continue;
|
|
314
|
-
}
|
|
315
|
-
}
|
|
316
|
-
}
|
|
317
|
-
}
|
|
318
|
-
}
|
|
319
|
-
finally {
|
|
320
|
-
reader.releaseLock();
|
|
321
|
-
}
|
|
322
|
-
logger.debug("[AnthropicProvider.generateTextStream] Streaming completed");
|
|
323
|
-
}
|
|
324
|
-
catch (error) {
|
|
325
|
-
logger.error("[AnthropicProvider.generateTextStream] Error:", error);
|
|
326
|
-
throw error;
|
|
327
|
-
}
|
|
328
|
-
}
|
|
329
265
|
async testConnection() {
|
|
330
266
|
logger.debug("[AnthropicProvider.testConnection] Testing connection to Anthropic API");
|
|
331
267
|
const startTime = Date.now();
|
|
332
268
|
try {
|
|
333
|
-
await this.
|
|
269
|
+
await this.generate({
|
|
334
270
|
prompt: "Hello",
|
|
335
271
|
maxTokens: 5,
|
|
336
272
|
});
|
|
@@ -391,15 +327,9 @@ export class AnthropicProvider {
|
|
|
391
327
|
];
|
|
392
328
|
}
|
|
393
329
|
/**
|
|
394
|
-
*
|
|
395
|
-
*/
|
|
396
|
-
async generate(optionsOrPrompt, analysisSchema) {
|
|
397
|
-
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
398
|
-
}
|
|
399
|
-
/**
|
|
400
|
-
* Short alias for generateText() - CLI-SDK consistency
|
|
330
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
401
331
|
*/
|
|
402
332
|
async gen(optionsOrPrompt, analysisSchema) {
|
|
403
|
-
return this.
|
|
333
|
+
return this.generate(optionsOrPrompt, analysisSchema);
|
|
404
334
|
}
|
|
405
335
|
}
|
|
@@ -4,7 +4,8 @@
|
|
|
4
4
|
* Enterprise-grade OpenAI integration through Microsoft Azure.
|
|
5
5
|
* Supports all OpenAI models with enhanced security and compliance.
|
|
6
6
|
*/
|
|
7
|
-
import type { AIProvider, TextGenerationOptions,
|
|
7
|
+
import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
|
|
8
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
8
9
|
import { AIProviderName } from "../core/types.js";
|
|
9
10
|
export declare class AzureOpenAIProvider implements AIProvider {
|
|
10
11
|
readonly name: AIProviderName;
|
|
@@ -18,10 +19,13 @@ export declare class AzureOpenAIProvider implements AIProvider {
|
|
|
18
19
|
private getDeploymentId;
|
|
19
20
|
private getApiUrl;
|
|
20
21
|
private makeRequest;
|
|
21
|
-
|
|
22
|
-
|
|
22
|
+
/**
|
|
23
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
24
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
25
|
+
*/
|
|
26
|
+
stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: any): Promise<StreamResult>;
|
|
27
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, schema?: any): Promise<any>;
|
|
23
28
|
private createAsyncIterable;
|
|
24
|
-
generateTextStream(optionsOrPrompt: StreamTextOptions | string): AsyncGenerator<any, void, unknown>;
|
|
25
29
|
testConnection(): Promise<{
|
|
26
30
|
success: boolean;
|
|
27
31
|
error?: string;
|
|
@@ -34,6 +38,5 @@ export declare class AzureOpenAIProvider implements AIProvider {
|
|
|
34
38
|
supportsStreaming(): boolean;
|
|
35
39
|
supportsSchema(): boolean;
|
|
36
40
|
getCapabilities(): string[];
|
|
37
|
-
|
|
38
|
-
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
41
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateResult | null>;
|
|
39
42
|
}
|
|
@@ -69,8 +69,73 @@ export class AzureOpenAIProvider {
|
|
|
69
69
|
}
|
|
70
70
|
return response;
|
|
71
71
|
}
|
|
72
|
-
|
|
73
|
-
|
|
72
|
+
/**
|
|
73
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
74
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
75
|
+
*/
|
|
76
|
+
async stream(optionsOrPrompt, analysisSchema) {
|
|
77
|
+
const functionTag = "AzureOpenAIProvider.stream";
|
|
78
|
+
const startTime = Date.now();
|
|
79
|
+
// Parse parameters - support both string and options object
|
|
80
|
+
const options = typeof optionsOrPrompt === "string"
|
|
81
|
+
? { input: { text: optionsOrPrompt } }
|
|
82
|
+
: optionsOrPrompt;
|
|
83
|
+
// Validate input
|
|
84
|
+
if (!options?.input?.text ||
|
|
85
|
+
typeof options.input.text !== "string" ||
|
|
86
|
+
options.input.text.trim() === "") {
|
|
87
|
+
throw new Error("Stream options must include input.text as a non-empty string");
|
|
88
|
+
}
|
|
89
|
+
// Convert StreamOptions for internal use
|
|
90
|
+
const convertedOptions = {
|
|
91
|
+
prompt: options.input.text,
|
|
92
|
+
provider: options.provider,
|
|
93
|
+
model: options.model,
|
|
94
|
+
temperature: options.temperature,
|
|
95
|
+
maxTokens: options.maxTokens,
|
|
96
|
+
systemPrompt: options.systemPrompt,
|
|
97
|
+
timeout: options.timeout,
|
|
98
|
+
};
|
|
99
|
+
// Prepare Azure OpenAI messages
|
|
100
|
+
const messages = [];
|
|
101
|
+
if (convertedOptions.systemPrompt) {
|
|
102
|
+
messages.push({
|
|
103
|
+
role: "system",
|
|
104
|
+
content: convertedOptions.systemPrompt,
|
|
105
|
+
});
|
|
106
|
+
}
|
|
107
|
+
messages.push({
|
|
108
|
+
role: "user",
|
|
109
|
+
content: convertedOptions.prompt,
|
|
110
|
+
});
|
|
111
|
+
const requestBody = {
|
|
112
|
+
messages,
|
|
113
|
+
temperature: convertedOptions.temperature ?? 0.7,
|
|
114
|
+
max_tokens: convertedOptions.maxTokens ?? DEFAULT_MAX_TOKENS,
|
|
115
|
+
stream: true,
|
|
116
|
+
};
|
|
117
|
+
// Create timeout controller if timeout is specified
|
|
118
|
+
const timeoutController = createTimeoutController(convertedOptions.timeout, this.name, "stream");
|
|
119
|
+
try {
|
|
120
|
+
const response = await this.makeRequest(requestBody, true, timeoutController?.controller.signal);
|
|
121
|
+
// Clean up timeout if successful
|
|
122
|
+
timeoutController?.cleanup();
|
|
123
|
+
// Return an async iterable for streaming chunks
|
|
124
|
+
const streamIterable = this.createAsyncIterable(response.body, timeoutController?.controller.signal);
|
|
125
|
+
// Compose the StreamResult object
|
|
126
|
+
return {
|
|
127
|
+
stream: streamIterable,
|
|
128
|
+
provider: this.name,
|
|
129
|
+
model: convertedOptions.model,
|
|
130
|
+
};
|
|
131
|
+
}
|
|
132
|
+
catch (error) {
|
|
133
|
+
timeoutController?.cleanup();
|
|
134
|
+
throw error;
|
|
135
|
+
}
|
|
136
|
+
}
|
|
137
|
+
async generate(optionsOrPrompt, schema) {
|
|
138
|
+
const functionTag = "AzureOpenAIProvider.generate";
|
|
74
139
|
const provider = "azure";
|
|
75
140
|
const startTime = Date.now();
|
|
76
141
|
logger.debug(`[${functionTag}] Starting text generation`);
|
|
@@ -159,77 +224,6 @@ export class AzureOpenAIProvider {
|
|
|
159
224
|
throw error;
|
|
160
225
|
}
|
|
161
226
|
}
|
|
162
|
-
async streamText(optionsOrPrompt, schema) {
|
|
163
|
-
const functionTag = "AzureOpenAIProvider.streamText";
|
|
164
|
-
const provider = "azure";
|
|
165
|
-
logger.debug(`[${functionTag}] Starting text streaming`);
|
|
166
|
-
// Parse parameters with backward compatibility
|
|
167
|
-
const options = typeof optionsOrPrompt === "string"
|
|
168
|
-
? { prompt: optionsOrPrompt }
|
|
169
|
-
: optionsOrPrompt;
|
|
170
|
-
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are a helpful AI assistant.", timeout = getDefaultTimeout(provider, "stream"), } = options;
|
|
171
|
-
logger.debug(`[${functionTag}] Streaming prompt: "${prompt.substring(0, 100)}...", Timeout: ${timeout}`);
|
|
172
|
-
const messages = [];
|
|
173
|
-
if (systemPrompt) {
|
|
174
|
-
messages.push({
|
|
175
|
-
role: "system",
|
|
176
|
-
content: systemPrompt,
|
|
177
|
-
});
|
|
178
|
-
}
|
|
179
|
-
messages.push({
|
|
180
|
-
role: "user",
|
|
181
|
-
content: prompt,
|
|
182
|
-
});
|
|
183
|
-
const requestBody = {
|
|
184
|
-
messages,
|
|
185
|
-
temperature,
|
|
186
|
-
max_tokens: maxTokens,
|
|
187
|
-
stream: true,
|
|
188
|
-
};
|
|
189
|
-
// Create timeout controller if timeout is specified
|
|
190
|
-
const timeoutController = createTimeoutController(timeout, provider, "stream");
|
|
191
|
-
try {
|
|
192
|
-
const response = await this.makeRequest(requestBody, true, timeoutController?.controller.signal);
|
|
193
|
-
if (!response.body) {
|
|
194
|
-
throw new Error("No response body received");
|
|
195
|
-
}
|
|
196
|
-
// Return a StreamTextResult-like object with timeout signal
|
|
197
|
-
return {
|
|
198
|
-
textStream: this.createAsyncIterable(response.body, timeoutController?.controller.signal),
|
|
199
|
-
text: "",
|
|
200
|
-
usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
|
|
201
|
-
finishReason: "stop",
|
|
202
|
-
// Store timeout controller for external cleanup if needed
|
|
203
|
-
_timeoutController: timeoutController,
|
|
204
|
-
};
|
|
205
|
-
}
|
|
206
|
-
catch (error) {
|
|
207
|
-
// Cleanup timeout on error
|
|
208
|
-
timeoutController?.cleanup();
|
|
209
|
-
// Log timeout errors specifically
|
|
210
|
-
if (error instanceof TimeoutError) {
|
|
211
|
-
logger.error(`[${functionTag}] Timeout error`, {
|
|
212
|
-
provider,
|
|
213
|
-
timeout: error.timeout,
|
|
214
|
-
message: error.message,
|
|
215
|
-
});
|
|
216
|
-
}
|
|
217
|
-
else if (error?.name === "AbortError") {
|
|
218
|
-
// Convert AbortError to TimeoutError
|
|
219
|
-
const timeoutError = new TimeoutError(`${provider} stream operation timed out after ${timeout}`, timeoutController?.timeoutMs || 0, provider, "stream");
|
|
220
|
-
logger.error(`[${functionTag}] Timeout error`, {
|
|
221
|
-
provider,
|
|
222
|
-
timeout: timeoutController?.timeoutMs,
|
|
223
|
-
message: timeoutError.message,
|
|
224
|
-
});
|
|
225
|
-
throw timeoutError;
|
|
226
|
-
}
|
|
227
|
-
else {
|
|
228
|
-
logger.error(`[${functionTag}] Error:`, error);
|
|
229
|
-
}
|
|
230
|
-
throw error;
|
|
231
|
-
}
|
|
232
|
-
}
|
|
233
227
|
async *createAsyncIterable(body, signal) {
|
|
234
228
|
const reader = body.getReader();
|
|
235
229
|
const decoder = new TextDecoder();
|
|
@@ -260,7 +254,7 @@ export class AzureOpenAIProvider {
|
|
|
260
254
|
const chunk = JSON.parse(data);
|
|
261
255
|
// Extract text content from chunk
|
|
262
256
|
if (chunk.choices?.[0]?.delta?.content) {
|
|
263
|
-
yield chunk.choices[0].delta.content;
|
|
257
|
+
yield { content: chunk.choices[0].delta.content };
|
|
264
258
|
}
|
|
265
259
|
}
|
|
266
260
|
catch (parseError) {
|
|
@@ -275,91 +269,11 @@ export class AzureOpenAIProvider {
|
|
|
275
269
|
reader.releaseLock();
|
|
276
270
|
}
|
|
277
271
|
}
|
|
278
|
-
async *generateTextStream(optionsOrPrompt) {
|
|
279
|
-
logger.debug("[AzureOpenAIProvider.generateTextStream] Starting text streaming");
|
|
280
|
-
// Parse parameters with backward compatibility
|
|
281
|
-
const options = typeof optionsOrPrompt === "string"
|
|
282
|
-
? { prompt: optionsOrPrompt }
|
|
283
|
-
: optionsOrPrompt;
|
|
284
|
-
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = "You are a helpful AI assistant.", } = options;
|
|
285
|
-
logger.debug(`[AzureOpenAIProvider.generateTextStream] Streaming prompt: "${prompt.substring(0, 100)}..."`);
|
|
286
|
-
const messages = [];
|
|
287
|
-
if (systemPrompt) {
|
|
288
|
-
messages.push({
|
|
289
|
-
role: "system",
|
|
290
|
-
content: systemPrompt,
|
|
291
|
-
});
|
|
292
|
-
}
|
|
293
|
-
messages.push({
|
|
294
|
-
role: "user",
|
|
295
|
-
content: prompt,
|
|
296
|
-
});
|
|
297
|
-
const requestBody = {
|
|
298
|
-
messages,
|
|
299
|
-
temperature,
|
|
300
|
-
max_tokens: maxTokens,
|
|
301
|
-
stream: true,
|
|
302
|
-
};
|
|
303
|
-
try {
|
|
304
|
-
const response = await this.makeRequest(requestBody, true);
|
|
305
|
-
if (!response.body) {
|
|
306
|
-
throw new Error("No response body received");
|
|
307
|
-
}
|
|
308
|
-
const reader = response.body.getReader();
|
|
309
|
-
const decoder = new TextDecoder();
|
|
310
|
-
let buffer = "";
|
|
311
|
-
try {
|
|
312
|
-
while (true) {
|
|
313
|
-
const { done, value } = await reader.read();
|
|
314
|
-
if (done) {
|
|
315
|
-
break;
|
|
316
|
-
}
|
|
317
|
-
buffer += decoder.decode(value, { stream: true });
|
|
318
|
-
const lines = buffer.split("\n");
|
|
319
|
-
buffer = lines.pop() || "";
|
|
320
|
-
for (const line of lines) {
|
|
321
|
-
if (line.trim() === "") {
|
|
322
|
-
continue;
|
|
323
|
-
}
|
|
324
|
-
if (line.startsWith("data: ")) {
|
|
325
|
-
const data = line.slice(6);
|
|
326
|
-
if (data.trim() === "[DONE]") {
|
|
327
|
-
continue;
|
|
328
|
-
}
|
|
329
|
-
try {
|
|
330
|
-
const chunk = JSON.parse(data);
|
|
331
|
-
// Extract text content from chunk
|
|
332
|
-
if (chunk.choices?.[0]?.delta?.content) {
|
|
333
|
-
yield {
|
|
334
|
-
content: chunk.choices[0].delta.content,
|
|
335
|
-
provider: this.name,
|
|
336
|
-
model: chunk.model || this.deploymentId,
|
|
337
|
-
};
|
|
338
|
-
}
|
|
339
|
-
}
|
|
340
|
-
catch (parseError) {
|
|
341
|
-
logger.warn("[AzureOpenAIProvider.generateTextStream] Failed to parse chunk:", parseError);
|
|
342
|
-
continue;
|
|
343
|
-
}
|
|
344
|
-
}
|
|
345
|
-
}
|
|
346
|
-
}
|
|
347
|
-
}
|
|
348
|
-
finally {
|
|
349
|
-
reader.releaseLock();
|
|
350
|
-
}
|
|
351
|
-
logger.debug("[AzureOpenAIProvider.generateTextStream] Streaming completed");
|
|
352
|
-
}
|
|
353
|
-
catch (error) {
|
|
354
|
-
logger.error("[AzureOpenAIProvider.generateTextStream] Error:", error);
|
|
355
|
-
throw error;
|
|
356
|
-
}
|
|
357
|
-
}
|
|
358
272
|
async testConnection() {
|
|
359
273
|
logger.debug("[AzureOpenAIProvider.testConnection] Testing connection to Azure OpenAI");
|
|
360
274
|
const startTime = Date.now();
|
|
361
275
|
try {
|
|
362
|
-
await this.
|
|
276
|
+
await this.generate({
|
|
363
277
|
prompt: "Hello",
|
|
364
278
|
maxTokens: 5,
|
|
365
279
|
});
|
|
@@ -428,10 +342,7 @@ export class AzureOpenAIProvider {
|
|
|
428
342
|
"content-filtering",
|
|
429
343
|
];
|
|
430
344
|
}
|
|
431
|
-
async generate(optionsOrPrompt, analysisSchema) {
|
|
432
|
-
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
433
|
-
}
|
|
434
345
|
async gen(optionsOrPrompt, analysisSchema) {
|
|
435
|
-
return this.
|
|
346
|
+
return this.generate(optionsOrPrompt, analysisSchema);
|
|
436
347
|
}
|
|
437
348
|
}
|
|
@@ -3,9 +3,11 @@
|
|
|
3
3
|
* Integrates MCP tools directly with AI SDK's function calling capabilities
|
|
4
4
|
* This is the missing piece that enables true AI function calling!
|
|
5
5
|
*/
|
|
6
|
-
import type { AIProvider, TextGenerationOptions,
|
|
7
|
-
import { type
|
|
6
|
+
import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
|
|
7
|
+
import { type Schema } from "ai";
|
|
8
|
+
import type { GenerateResult } from "../types/generate-types.js";
|
|
8
9
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
10
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
9
11
|
/**
|
|
10
12
|
* Enhanced provider that enables real function calling with MCP tools
|
|
11
13
|
*/
|
|
@@ -19,14 +21,19 @@ export declare class FunctionCallingProvider implements AIProvider {
|
|
|
19
21
|
sessionId?: string;
|
|
20
22
|
userId?: string;
|
|
21
23
|
});
|
|
24
|
+
/**
|
|
25
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
26
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
27
|
+
*/
|
|
28
|
+
stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: any): Promise<StreamResult>;
|
|
22
29
|
/**
|
|
23
30
|
* Generate text with real function calling support
|
|
24
31
|
*/
|
|
25
|
-
|
|
32
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
|
|
26
33
|
/**
|
|
27
34
|
* Generate text using AI SDK's native function calling
|
|
28
35
|
*/
|
|
29
|
-
private
|
|
36
|
+
private generateWithTools;
|
|
30
37
|
/**
|
|
31
38
|
* Get the model from the base provider
|
|
32
39
|
* This is a temporary solution - ideally we'd have a getModel() method on AIProvider
|
|
@@ -45,17 +52,12 @@ export declare class FunctionCallingProvider implements AIProvider {
|
|
|
45
52
|
*/
|
|
46
53
|
private createFunctionAwareSystemPrompt;
|
|
47
54
|
/**
|
|
48
|
-
*
|
|
49
|
-
*/
|
|
50
|
-
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
51
|
-
/**
|
|
52
|
-
* Alias for generateText() - CLI-SDK consistency
|
|
55
|
+
* Alias for generate() - CLI-SDK consistency
|
|
53
56
|
*/
|
|
54
|
-
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateTextResult | null>;
|
|
55
57
|
/**
|
|
56
|
-
* Short alias for
|
|
58
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
57
59
|
*/
|
|
58
|
-
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<
|
|
60
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: any): Promise<EnhancedGenerateResult | null>;
|
|
59
61
|
}
|
|
60
62
|
/**
|
|
61
63
|
* Create a function-calling enhanced version of any AI provider
|