@juspay/neurolink 5.0.0 → 5.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/README.md +51 -60
- package/dist/chat/sse-handler.js +5 -4
- package/dist/chat/websocket-chat-handler.js +9 -9
- package/dist/cli/commands/mcp.js +1 -1
- package/dist/cli/commands/ollama.js +3 -3
- package/dist/cli/factories/command-factory.d.ts +14 -0
- package/dist/cli/factories/command-factory.js +129 -0
- package/dist/cli/index.js +27 -26
- package/dist/cli/utils/interactive-setup.js +2 -2
- package/dist/core/evaluation.d.ts +9 -9
- package/dist/core/evaluation.js +14 -14
- package/dist/core/types.d.ts +41 -48
- package/dist/core/types.js +1 -0
- package/dist/factories/compatibility-factory.d.ts +20 -0
- package/dist/factories/compatibility-factory.js +69 -0
- package/dist/factories/provider-generate-factory.d.ts +20 -0
- package/dist/factories/provider-generate-factory.js +87 -0
- package/dist/index.d.ts +4 -2
- package/dist/index.js +3 -1
- package/dist/lib/chat/sse-handler.js +5 -4
- package/dist/lib/chat/websocket-chat-handler.js +9 -9
- package/dist/lib/core/evaluation.d.ts +9 -9
- package/dist/lib/core/evaluation.js +14 -14
- package/dist/lib/core/types.d.ts +41 -48
- package/dist/lib/core/types.js +1 -0
- package/dist/lib/factories/compatibility-factory.d.ts +20 -0
- package/dist/lib/factories/compatibility-factory.js +69 -0
- package/dist/lib/factories/provider-generate-factory.d.ts +20 -0
- package/dist/lib/factories/provider-generate-factory.js +87 -0
- package/dist/lib/index.d.ts +4 -2
- package/dist/lib/index.js +3 -1
- package/dist/lib/mcp/client.js +5 -5
- package/dist/lib/mcp/dynamic-orchestrator.js +8 -8
- package/dist/lib/mcp/external-client.js +2 -2
- package/dist/lib/mcp/factory.d.ts +1 -1
- package/dist/lib/mcp/factory.js +1 -1
- package/dist/lib/mcp/neurolink-mcp-client.js +10 -10
- package/dist/lib/mcp/orchestrator.js +4 -4
- package/dist/lib/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +5 -5
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/lib/neurolink.d.ts +21 -73
- package/dist/lib/neurolink.js +230 -119
- package/dist/lib/providers/agent-enhanced-provider.d.ts +12 -8
- package/dist/lib/providers/agent-enhanced-provider.js +87 -96
- package/dist/lib/providers/amazonBedrock.d.ts +17 -8
- package/dist/lib/providers/amazonBedrock.js +60 -30
- package/dist/lib/providers/anthropic.d.ts +14 -10
- package/dist/lib/providers/anthropic.js +84 -154
- package/dist/lib/providers/azureOpenAI.d.ts +9 -6
- package/dist/lib/providers/azureOpenAI.js +70 -159
- package/dist/lib/providers/function-calling-provider.d.ts +14 -12
- package/dist/lib/providers/function-calling-provider.js +114 -64
- package/dist/lib/providers/googleAIStudio.d.ts +12 -19
- package/dist/lib/providers/googleAIStudio.js +65 -34
- package/dist/lib/providers/googleVertexAI.d.ts +11 -15
- package/dist/lib/providers/googleVertexAI.js +146 -118
- package/dist/lib/providers/huggingFace.d.ts +10 -11
- package/dist/lib/providers/huggingFace.js +61 -24
- package/dist/lib/providers/mcp-provider.d.ts +13 -8
- package/dist/lib/providers/mcp-provider.js +59 -18
- package/dist/lib/providers/mistralAI.d.ts +14 -11
- package/dist/lib/providers/mistralAI.js +60 -29
- package/dist/lib/providers/ollama.d.ts +9 -8
- package/dist/lib/providers/ollama.js +134 -91
- package/dist/lib/providers/openAI.d.ts +11 -12
- package/dist/lib/providers/openAI.js +132 -97
- package/dist/lib/types/generate-types.d.ts +79 -0
- package/dist/lib/types/generate-types.js +1 -0
- package/dist/lib/types/stream-types.d.ts +83 -0
- package/dist/lib/types/stream-types.js +1 -0
- package/dist/lib/utils/providerUtils-fixed.js +1 -1
- package/dist/lib/utils/streaming-utils.d.ts +14 -2
- package/dist/lib/utils/streaming-utils.js +0 -3
- package/dist/mcp/client.js +5 -5
- package/dist/mcp/dynamic-orchestrator.js +8 -8
- package/dist/mcp/external-client.js +2 -2
- package/dist/mcp/factory.d.ts +1 -1
- package/dist/mcp/factory.js +1 -1
- package/dist/mcp/neurolink-mcp-client.js +10 -10
- package/dist/mcp/orchestrator.js +4 -4
- package/dist/mcp/servers/ai-providers/ai-analysis-tools.js +10 -10
- package/dist/mcp/servers/ai-providers/ai-core-server.js +5 -5
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.js +16 -16
- package/dist/neurolink.d.ts +21 -73
- package/dist/neurolink.js +230 -119
- package/dist/providers/agent-enhanced-provider.d.ts +12 -8
- package/dist/providers/agent-enhanced-provider.js +87 -95
- package/dist/providers/amazonBedrock.d.ts +17 -8
- package/dist/providers/amazonBedrock.js +60 -30
- package/dist/providers/anthropic.d.ts +14 -10
- package/dist/providers/anthropic.js +84 -154
- package/dist/providers/azureOpenAI.d.ts +9 -6
- package/dist/providers/azureOpenAI.js +70 -159
- package/dist/providers/function-calling-provider.d.ts +14 -12
- package/dist/providers/function-calling-provider.js +114 -64
- package/dist/providers/googleAIStudio.d.ts +12 -19
- package/dist/providers/googleAIStudio.js +65 -34
- package/dist/providers/googleVertexAI.d.ts +11 -15
- package/dist/providers/googleVertexAI.js +146 -118
- package/dist/providers/huggingFace.d.ts +10 -11
- package/dist/providers/huggingFace.js +61 -24
- package/dist/providers/mcp-provider.d.ts +13 -8
- package/dist/providers/mcp-provider.js +59 -18
- package/dist/providers/mistralAI.d.ts +14 -11
- package/dist/providers/mistralAI.js +60 -29
- package/dist/providers/ollama.d.ts +9 -8
- package/dist/providers/ollama.js +133 -90
- package/dist/providers/openAI.d.ts +11 -12
- package/dist/providers/openAI.js +132 -97
- package/dist/types/generate-types.d.ts +79 -0
- package/dist/types/generate-types.js +1 -0
- package/dist/types/stream-types.d.ts +83 -0
- package/dist/types/stream-types.js +1 -0
- package/dist/utils/providerUtils-fixed.js +1 -1
- package/dist/utils/streaming-utils.d.ts +14 -2
- package/dist/utils/streaming-utils.js +0 -3
- package/package.json +1 -1
|
@@ -3,7 +3,7 @@
|
|
|
3
3
|
* Integrates MCP tools directly with AI SDK's function calling capabilities
|
|
4
4
|
* This is the missing piece that enables true AI function calling!
|
|
5
5
|
*/
|
|
6
|
-
import { generateText as
|
|
6
|
+
import { generateText as aiGenerate, Output, } from "ai";
|
|
7
7
|
import { getAvailableFunctionTools, executeFunctionCall, isFunctionCallingAvailable, } from "../mcp/function-calling.js";
|
|
8
8
|
import { createExecutionContext } from "../mcp/context-manager.js";
|
|
9
9
|
import { mcpLogger } from "../mcp/logging.js";
|
|
@@ -22,31 +22,88 @@ export class FunctionCallingProvider {
|
|
|
22
22
|
this.sessionId = options.sessionId || `function-calling-${Date.now()}`;
|
|
23
23
|
this.userId = options.userId || "function-calling-user";
|
|
24
24
|
}
|
|
25
|
+
/**
|
|
26
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
27
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
28
|
+
*/
|
|
29
|
+
async stream(optionsOrPrompt, analysisSchema) {
|
|
30
|
+
const functionTag = "FunctionCallingProvider.stream";
|
|
31
|
+
const startTime = Date.now();
|
|
32
|
+
// Parse parameters - support both string and options object
|
|
33
|
+
const options = typeof optionsOrPrompt === "string"
|
|
34
|
+
? { input: { text: optionsOrPrompt } }
|
|
35
|
+
: optionsOrPrompt;
|
|
36
|
+
// Validate input
|
|
37
|
+
if (!options?.input?.text ||
|
|
38
|
+
typeof options.input.text !== "string" ||
|
|
39
|
+
options.input.text.trim() === "") {
|
|
40
|
+
throw new Error("Stream options must include input.text as a non-empty string");
|
|
41
|
+
}
|
|
42
|
+
// Use base provider's stream implementation
|
|
43
|
+
const baseResult = await this.baseProvider.stream(options);
|
|
44
|
+
if (!baseResult) {
|
|
45
|
+
throw new Error("No stream response received from provider");
|
|
46
|
+
}
|
|
47
|
+
// Return the result with function-calling metadata
|
|
48
|
+
return {
|
|
49
|
+
...baseResult,
|
|
50
|
+
provider: "function-calling",
|
|
51
|
+
model: options.model || "unknown",
|
|
52
|
+
metadata: {
|
|
53
|
+
streamId: `function-calling-${Date.now()}`,
|
|
54
|
+
startTime,
|
|
55
|
+
},
|
|
56
|
+
};
|
|
57
|
+
}
|
|
25
58
|
/**
|
|
26
59
|
* Generate text with real function calling support
|
|
27
60
|
*/
|
|
28
|
-
async
|
|
61
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
29
62
|
const options = typeof optionsOrPrompt === "string"
|
|
30
63
|
? { prompt: optionsOrPrompt }
|
|
31
64
|
: optionsOrPrompt;
|
|
32
|
-
const functionTag = "FunctionCallingProvider.
|
|
65
|
+
const functionTag = "FunctionCallingProvider.generate";
|
|
33
66
|
// If function calling is disabled, use base provider
|
|
34
67
|
if (!this.enableFunctionCalling) {
|
|
35
68
|
mcpLogger.debug(`[${functionTag}] Function calling disabled, using base provider`);
|
|
36
|
-
|
|
69
|
+
const result = await this.baseProvider.generate(options, analysisSchema);
|
|
70
|
+
if (!result) {
|
|
71
|
+
return {
|
|
72
|
+
content: "No response generated",
|
|
73
|
+
provider: "function-calling",
|
|
74
|
+
model: "unknown",
|
|
75
|
+
};
|
|
76
|
+
}
|
|
77
|
+
return result;
|
|
37
78
|
}
|
|
38
79
|
try {
|
|
39
80
|
// Check if function calling is available
|
|
40
81
|
const functionsAvailable = await isFunctionCallingAvailable();
|
|
41
82
|
if (!functionsAvailable) {
|
|
42
83
|
mcpLogger.debug(`[${functionTag}] No functions available, using base provider`);
|
|
43
|
-
|
|
84
|
+
const result = await this.baseProvider.generate(options, analysisSchema);
|
|
85
|
+
if (!result) {
|
|
86
|
+
return {
|
|
87
|
+
content: "No response generated",
|
|
88
|
+
provider: "function-calling",
|
|
89
|
+
model: "unknown",
|
|
90
|
+
};
|
|
91
|
+
}
|
|
92
|
+
return result;
|
|
44
93
|
}
|
|
45
94
|
// Get available function tools
|
|
46
95
|
const { tools, toolMap } = await getAvailableFunctionTools();
|
|
47
96
|
if (tools.length === 0) {
|
|
48
97
|
mcpLogger.debug(`[${functionTag}] No tools available, using base provider`);
|
|
49
|
-
|
|
98
|
+
const result = await this.baseProvider.generate(options, analysisSchema);
|
|
99
|
+
if (!result) {
|
|
100
|
+
return {
|
|
101
|
+
content: "No response generated",
|
|
102
|
+
provider: "function-calling",
|
|
103
|
+
model: "unknown",
|
|
104
|
+
};
|
|
105
|
+
}
|
|
106
|
+
return result;
|
|
50
107
|
}
|
|
51
108
|
mcpLogger.debug(`[${functionTag}] Function calling enabled with ${tools.length} tools`);
|
|
52
109
|
// Create execution context
|
|
@@ -55,11 +112,15 @@ export class FunctionCallingProvider {
|
|
|
55
112
|
userId: this.userId,
|
|
56
113
|
aiProvider: this.baseProvider.constructor.name,
|
|
57
114
|
});
|
|
58
|
-
// Use the AI SDK's native function calling by calling
|
|
115
|
+
// Use the AI SDK's native function calling by calling generate directly
|
|
59
116
|
// We need to get the underlying model from the base provider
|
|
60
|
-
const result = await this.
|
|
117
|
+
const result = await this.generateWithTools(options, tools, toolMap, context, analysisSchema);
|
|
61
118
|
if (!result) {
|
|
62
|
-
return
|
|
119
|
+
return {
|
|
120
|
+
content: "No response generated",
|
|
121
|
+
provider: "function-calling",
|
|
122
|
+
model: "unknown",
|
|
123
|
+
};
|
|
63
124
|
}
|
|
64
125
|
// Enhance result with function calling metadata
|
|
65
126
|
const enhancedResult = {
|
|
@@ -77,18 +138,26 @@ export class FunctionCallingProvider {
|
|
|
77
138
|
}
|
|
78
139
|
catch (error) {
|
|
79
140
|
mcpLogger.warn(`[${functionTag}] Function calling failed, using base provider:`, error);
|
|
80
|
-
|
|
141
|
+
const result = await this.baseProvider.generate(options, analysisSchema);
|
|
142
|
+
if (!result) {
|
|
143
|
+
return {
|
|
144
|
+
content: "No response generated",
|
|
145
|
+
provider: "function-calling",
|
|
146
|
+
model: "unknown",
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
return result;
|
|
81
150
|
}
|
|
82
151
|
}
|
|
83
152
|
/**
|
|
84
153
|
* Generate text using AI SDK's native function calling
|
|
85
154
|
*/
|
|
86
|
-
async
|
|
87
|
-
const functionTag = "FunctionCallingProvider.
|
|
155
|
+
async generateWithTools(options, tools, toolMap, context, analysisSchema) {
|
|
156
|
+
const functionTag = "FunctionCallingProvider.generateWithTools";
|
|
88
157
|
try {
|
|
89
158
|
// Convert our tools to AI SDK format with proper execution
|
|
90
159
|
const toolsWithExecution = this.convertToAISDKTools(tools, toolMap, context);
|
|
91
|
-
mcpLogger.debug(`[${functionTag}] Calling AI SDK
|
|
160
|
+
mcpLogger.debug(`[${functionTag}] Calling AI SDK generate with ${Object.keys(toolsWithExecution).length} tools and maxSteps: 5`);
|
|
92
161
|
mcpLogger.debug(`[${functionTag}] Sanitized tool names:`, Object.keys(toolsWithExecution));
|
|
93
162
|
// Log the first few tools to debug the issue
|
|
94
163
|
const toolNames = Object.keys(toolsWithExecution);
|
|
@@ -99,9 +168,17 @@ export class FunctionCallingProvider {
|
|
|
99
168
|
const modelInfo = await this.getModelFromProvider();
|
|
100
169
|
if (!modelInfo) {
|
|
101
170
|
mcpLogger.warn(`[${functionTag}] Could not get model from provider, falling back to base provider`);
|
|
102
|
-
|
|
171
|
+
const result = await this.baseProvider.generate(options, analysisSchema);
|
|
172
|
+
if (!result) {
|
|
173
|
+
return {
|
|
174
|
+
content: "No response generated",
|
|
175
|
+
provider: "function-calling",
|
|
176
|
+
model: "unknown",
|
|
177
|
+
};
|
|
178
|
+
}
|
|
179
|
+
return result;
|
|
103
180
|
}
|
|
104
|
-
// Use AI SDK's
|
|
181
|
+
// Use AI SDK's generate directly with tools
|
|
105
182
|
const generateOptions = {
|
|
106
183
|
model: modelInfo.model,
|
|
107
184
|
prompt: options.prompt,
|
|
@@ -118,13 +195,29 @@ export class FunctionCallingProvider {
|
|
|
118
195
|
schema: analysisSchema,
|
|
119
196
|
});
|
|
120
197
|
}
|
|
121
|
-
const result = await
|
|
122
|
-
mcpLogger.debug(`[${functionTag}] AI SDK
|
|
198
|
+
const result = await aiGenerate(generateOptions);
|
|
199
|
+
mcpLogger.debug(`[${functionTag}] AI SDK generate completed`, {
|
|
123
200
|
toolCalls: result.toolCalls?.length || 0,
|
|
124
201
|
finishReason: result.finishReason,
|
|
125
202
|
usage: result.usage,
|
|
126
203
|
});
|
|
127
|
-
return
|
|
204
|
+
return {
|
|
205
|
+
content: result.text,
|
|
206
|
+
provider: "function-calling",
|
|
207
|
+
model: "unknown",
|
|
208
|
+
usage: result.usage
|
|
209
|
+
? {
|
|
210
|
+
inputTokens: result.usage.promptTokens,
|
|
211
|
+
outputTokens: result.usage.completionTokens,
|
|
212
|
+
totalTokens: result.usage.totalTokens,
|
|
213
|
+
}
|
|
214
|
+
: undefined,
|
|
215
|
+
responseTime: 0,
|
|
216
|
+
toolsUsed: result.toolCalls?.map((tc) => tc.toolName) || [],
|
|
217
|
+
toolExecutions: [],
|
|
218
|
+
enhancedWithTools: (result.toolCalls?.length || 0) > 0,
|
|
219
|
+
availableTools: [],
|
|
220
|
+
};
|
|
128
221
|
}
|
|
129
222
|
catch (error) {
|
|
130
223
|
mcpLogger.error(`[${functionTag}] Failed to generate text with tools:`, error);
|
|
@@ -299,56 +392,13 @@ CRITICAL INSTRUCTIONS:
|
|
|
299
392
|
These functions provide accurate, real-time data. Use them actively to enhance your responses.`;
|
|
300
393
|
}
|
|
301
394
|
/**
|
|
302
|
-
*
|
|
395
|
+
* Alias for generate() - CLI-SDK consistency
|
|
303
396
|
*/
|
|
304
|
-
async streamText(optionsOrPrompt, analysisSchema) {
|
|
305
|
-
const options = typeof optionsOrPrompt === "string"
|
|
306
|
-
? { prompt: optionsOrPrompt }
|
|
307
|
-
: optionsOrPrompt;
|
|
308
|
-
const functionTag = "FunctionCallingProvider.streamText";
|
|
309
|
-
// If function calling is disabled, use base provider
|
|
310
|
-
if (!this.enableFunctionCalling) {
|
|
311
|
-
mcpLogger.debug(`[${functionTag}] Function calling disabled, using base provider`);
|
|
312
|
-
return this.baseProvider.streamText(options, analysisSchema);
|
|
313
|
-
}
|
|
314
|
-
try {
|
|
315
|
-
// Check if function calling is available
|
|
316
|
-
const functionsAvailable = await isFunctionCallingAvailable();
|
|
317
|
-
if (!functionsAvailable) {
|
|
318
|
-
mcpLogger.debug(`[${functionTag}] No functions available, using base provider`);
|
|
319
|
-
return this.baseProvider.streamText(options, analysisSchema);
|
|
320
|
-
}
|
|
321
|
-
// Get available function tools
|
|
322
|
-
const { tools } = await getAvailableFunctionTools();
|
|
323
|
-
if (tools.length === 0) {
|
|
324
|
-
mcpLogger.debug(`[${functionTag}] No tools available, using base provider`);
|
|
325
|
-
return this.baseProvider.streamText(options, analysisSchema);
|
|
326
|
-
}
|
|
327
|
-
mcpLogger.debug(`[${functionTag}] Streaming with ${tools.length} functions available`);
|
|
328
|
-
// Enhance system prompt
|
|
329
|
-
const enhancedSystemPrompt = this.createFunctionAwareSystemPrompt(options.systemPrompt, tools);
|
|
330
|
-
// Stream with enhanced prompt
|
|
331
|
-
return this.baseProvider.streamText({
|
|
332
|
-
...options,
|
|
333
|
-
systemPrompt: enhancedSystemPrompt,
|
|
334
|
-
}, analysisSchema);
|
|
335
|
-
}
|
|
336
|
-
catch (error) {
|
|
337
|
-
mcpLogger.warn(`[${functionTag}] Function calling failed, using base provider:`, error);
|
|
338
|
-
return this.baseProvider.streamText(options, analysisSchema);
|
|
339
|
-
}
|
|
340
|
-
}
|
|
341
|
-
/**
|
|
342
|
-
* Alias for generateText() - CLI-SDK consistency
|
|
343
|
-
*/
|
|
344
|
-
async generate(optionsOrPrompt, analysisSchema) {
|
|
345
|
-
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
346
|
-
}
|
|
347
397
|
/**
|
|
348
|
-
* Short alias for
|
|
398
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
349
399
|
*/
|
|
350
400
|
async gen(optionsOrPrompt, analysisSchema) {
|
|
351
|
-
return this.
|
|
401
|
+
return this.generate(optionsOrPrompt, analysisSchema);
|
|
352
402
|
}
|
|
353
403
|
}
|
|
354
404
|
/**
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
|
-
import { type
|
|
3
|
-
import type {
|
|
2
|
+
import { type Schema, type LanguageModelV1 } from "ai";
|
|
3
|
+
import type { GenerateResult } from "../types/generate-types.js";
|
|
4
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
5
|
+
import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
|
|
4
6
|
export declare class GoogleAIStudio implements AIProvider {
|
|
5
7
|
private modelName;
|
|
6
8
|
/**
|
|
@@ -19,31 +21,22 @@ export declare class GoogleAIStudio implements AIProvider {
|
|
|
19
21
|
*/
|
|
20
22
|
get model(): LanguageModelV1;
|
|
21
23
|
/**
|
|
22
|
-
*
|
|
23
|
-
*
|
|
24
|
-
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
25
|
-
* @returns Promise resolving to StreamTextResult or null if operation fails
|
|
24
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
25
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
26
26
|
*/
|
|
27
|
-
|
|
27
|
+
stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
28
28
|
/**
|
|
29
29
|
* Processes text using non-streaming approach with optional schema validation
|
|
30
30
|
* @param prompt - The input text prompt to analyze
|
|
31
31
|
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
32
|
-
* @returns Promise resolving to
|
|
33
|
-
*/
|
|
34
|
-
generateText(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateTextResult<ToolSet, unknown> | null>;
|
|
35
|
-
/**
|
|
36
|
-
* Alias for generateText() - CLI-SDK consistency
|
|
37
|
-
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
38
|
-
* @param analysisSchema - Optional schema for output validation
|
|
39
|
-
* @returns Promise resolving to GenerateTextResult or null
|
|
32
|
+
* @returns Promise resolving to GenerateResult or null if operation fails
|
|
40
33
|
*/
|
|
41
|
-
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<
|
|
34
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
|
|
42
35
|
/**
|
|
43
|
-
* Short alias for
|
|
36
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
44
37
|
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
45
38
|
* @param analysisSchema - Optional schema for output validation
|
|
46
|
-
* @returns Promise resolving to
|
|
39
|
+
* @returns Promise resolving to GenerateResult or null
|
|
47
40
|
*/
|
|
48
|
-
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<
|
|
41
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateResult | null>;
|
|
49
42
|
}
|
|
@@ -101,21 +101,38 @@ export class GoogleAIStudio {
|
|
|
101
101
|
return this.getModel();
|
|
102
102
|
}
|
|
103
103
|
/**
|
|
104
|
-
*
|
|
105
|
-
*
|
|
106
|
-
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
107
|
-
* @returns Promise resolving to StreamTextResult or null if operation fails
|
|
104
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
105
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
108
106
|
*/
|
|
109
|
-
async
|
|
110
|
-
const functionTag = "GoogleAIStudio.
|
|
107
|
+
async stream(optionsOrPrompt, analysisSchema) {
|
|
108
|
+
const functionTag = "GoogleAIStudio.stream";
|
|
111
109
|
const provider = "google-ai";
|
|
112
110
|
let chunkCount = 0;
|
|
111
|
+
const startTime = Date.now();
|
|
113
112
|
try {
|
|
114
113
|
// Parse parameters - support both string and options object
|
|
115
114
|
const options = typeof optionsOrPrompt === "string"
|
|
116
|
-
? {
|
|
115
|
+
? { input: { text: optionsOrPrompt } }
|
|
117
116
|
: optionsOrPrompt;
|
|
118
|
-
|
|
117
|
+
// Validate input
|
|
118
|
+
if (!options?.input?.text ||
|
|
119
|
+
typeof options.input.text !== "string" ||
|
|
120
|
+
options.input.text.trim() === "") {
|
|
121
|
+
throw new Error("Stream options must include input.text as a non-empty string");
|
|
122
|
+
}
|
|
123
|
+
// Convert StreamOptions for internal use
|
|
124
|
+
const convertedOptions = {
|
|
125
|
+
prompt: options.input.text,
|
|
126
|
+
provider: options.provider,
|
|
127
|
+
model: options.model,
|
|
128
|
+
temperature: options.temperature,
|
|
129
|
+
maxTokens: options.maxTokens,
|
|
130
|
+
systemPrompt: options.systemPrompt,
|
|
131
|
+
timeout: options.timeout,
|
|
132
|
+
schema: options.schema,
|
|
133
|
+
tools: options.tools,
|
|
134
|
+
};
|
|
135
|
+
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, tools, timeout = getDefaultTimeout(provider, "stream"), } = convertedOptions;
|
|
119
136
|
// Use schema from options or fallback parameter
|
|
120
137
|
const finalSchema = schema || analysisSchema;
|
|
121
138
|
logger.debug(`[${functionTag}] Stream request started`, {
|
|
@@ -147,7 +164,7 @@ export class GoogleAIStudio {
|
|
|
147
164
|
const error = event.error;
|
|
148
165
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
149
166
|
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
150
|
-
logger.error(`[${functionTag}] Stream
|
|
167
|
+
logger.error(`[${functionTag}] Stream error`, {
|
|
151
168
|
provider,
|
|
152
169
|
modelName: this.modelName,
|
|
153
170
|
error: errorMessage,
|
|
@@ -157,7 +174,7 @@ export class GoogleAIStudio {
|
|
|
157
174
|
});
|
|
158
175
|
},
|
|
159
176
|
onFinish: (event) => {
|
|
160
|
-
logger.debug(`[${functionTag}] Stream
|
|
177
|
+
logger.debug(`[${functionTag}] Stream finished`, {
|
|
161
178
|
provider,
|
|
162
179
|
modelName: this.modelName,
|
|
163
180
|
finishReason: event.finishReason,
|
|
@@ -169,7 +186,7 @@ export class GoogleAIStudio {
|
|
|
169
186
|
},
|
|
170
187
|
onChunk: (event) => {
|
|
171
188
|
chunkCount++;
|
|
172
|
-
logger.debug(`[${functionTag}] Stream
|
|
189
|
+
logger.debug(`[${functionTag}] Stream chunk`, {
|
|
173
190
|
provider,
|
|
174
191
|
modelName: this.modelName,
|
|
175
192
|
chunkNumber: chunkCount,
|
|
@@ -178,15 +195,26 @@ export class GoogleAIStudio {
|
|
|
178
195
|
});
|
|
179
196
|
},
|
|
180
197
|
};
|
|
181
|
-
if (
|
|
198
|
+
if (finalSchema) {
|
|
182
199
|
streamOptions.experimental_output = Output.object({
|
|
183
|
-
schema:
|
|
200
|
+
schema: finalSchema,
|
|
184
201
|
});
|
|
185
202
|
}
|
|
186
203
|
const result = streamText(streamOptions);
|
|
187
|
-
//
|
|
188
|
-
|
|
189
|
-
|
|
204
|
+
// Convert to StreamResult format
|
|
205
|
+
return {
|
|
206
|
+
stream: (async function* () {
|
|
207
|
+
for await (const chunk of result.textStream) {
|
|
208
|
+
yield { content: chunk };
|
|
209
|
+
}
|
|
210
|
+
})(),
|
|
211
|
+
provider: "google-ai",
|
|
212
|
+
model: this.modelName,
|
|
213
|
+
metadata: {
|
|
214
|
+
streamId: `google-ai-${Date.now()}`,
|
|
215
|
+
startTime,
|
|
216
|
+
},
|
|
217
|
+
};
|
|
190
218
|
}
|
|
191
219
|
catch (err) {
|
|
192
220
|
// Log timeout errors specifically
|
|
@@ -202,11 +230,11 @@ export class GoogleAIStudio {
|
|
|
202
230
|
logger.error(`[${functionTag}] Exception`, {
|
|
203
231
|
provider,
|
|
204
232
|
modelName: this.modelName,
|
|
205
|
-
message: "Error in streaming
|
|
233
|
+
message: "Error in streaming content",
|
|
206
234
|
err: String(err),
|
|
207
235
|
promptLength: typeof optionsOrPrompt === "string"
|
|
208
236
|
? optionsOrPrompt.length
|
|
209
|
-
: optionsOrPrompt.
|
|
237
|
+
: optionsOrPrompt.input?.text?.length || 0,
|
|
210
238
|
});
|
|
211
239
|
}
|
|
212
240
|
throw err; // Re-throw error to trigger fallback
|
|
@@ -216,10 +244,10 @@ export class GoogleAIStudio {
|
|
|
216
244
|
* Processes text using non-streaming approach with optional schema validation
|
|
217
245
|
* @param prompt - The input text prompt to analyze
|
|
218
246
|
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
219
|
-
* @returns Promise resolving to
|
|
247
|
+
* @returns Promise resolving to GenerateResult or null if operation fails
|
|
220
248
|
*/
|
|
221
|
-
async
|
|
222
|
-
const functionTag = "GoogleAIStudio.
|
|
249
|
+
async generate(optionsOrPrompt, analysisSchema) {
|
|
250
|
+
const functionTag = "GoogleAIStudio.generate";
|
|
223
251
|
const provider = "google-ai";
|
|
224
252
|
const startTime = Date.now();
|
|
225
253
|
try {
|
|
@@ -284,7 +312,19 @@ export class GoogleAIStudio {
|
|
|
284
312
|
if (options.enableEvaluation) {
|
|
285
313
|
result.evaluation = await evaluateResponse(prompt, result.text, options.context, options.evaluationDomain, options.toolUsageContext, options.conversationHistory);
|
|
286
314
|
}
|
|
287
|
-
return
|
|
315
|
+
return {
|
|
316
|
+
content: result.text,
|
|
317
|
+
provider: "google-ai",
|
|
318
|
+
model: this.modelName || "gemini-2.5-pro",
|
|
319
|
+
usage: result.usage
|
|
320
|
+
? {
|
|
321
|
+
inputTokens: result.usage.promptTokens,
|
|
322
|
+
outputTokens: result.usage.completionTokens,
|
|
323
|
+
totalTokens: result.usage.totalTokens,
|
|
324
|
+
}
|
|
325
|
+
: undefined,
|
|
326
|
+
responseTime: Date.now() - startTime,
|
|
327
|
+
};
|
|
288
328
|
}
|
|
289
329
|
finally {
|
|
290
330
|
// Always cleanup timeout
|
|
@@ -313,21 +353,12 @@ export class GoogleAIStudio {
|
|
|
313
353
|
}
|
|
314
354
|
}
|
|
315
355
|
/**
|
|
316
|
-
*
|
|
317
|
-
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
318
|
-
* @param analysisSchema - Optional schema for output validation
|
|
319
|
-
* @returns Promise resolving to GenerateTextResult or null
|
|
320
|
-
*/
|
|
321
|
-
async generate(optionsOrPrompt, analysisSchema) {
|
|
322
|
-
return this.generateText(optionsOrPrompt, analysisSchema);
|
|
323
|
-
}
|
|
324
|
-
/**
|
|
325
|
-
* Short alias for generateText() - CLI-SDK consistency
|
|
356
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
326
357
|
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
327
358
|
* @param analysisSchema - Optional schema for output validation
|
|
328
|
-
* @returns Promise resolving to
|
|
359
|
+
* @returns Promise resolving to GenerateResult or null
|
|
329
360
|
*/
|
|
330
361
|
async gen(optionsOrPrompt, analysisSchema) {
|
|
331
|
-
return this.
|
|
362
|
+
return this.generate(optionsOrPrompt, analysisSchema);
|
|
332
363
|
}
|
|
333
364
|
}
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
|
-
import { type
|
|
3
|
-
import type {
|
|
2
|
+
import { type Schema } from "ai";
|
|
3
|
+
import type { GenerateResult } from "../types/generate-types.js";
|
|
4
|
+
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
5
|
+
import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
|
|
4
6
|
export declare class GoogleVertexAI implements AIProvider {
|
|
5
7
|
private modelName;
|
|
6
8
|
/**
|
|
@@ -13,26 +15,20 @@ export declare class GoogleVertexAI implements AIProvider {
|
|
|
13
15
|
* @private
|
|
14
16
|
*/
|
|
15
17
|
private getModel;
|
|
16
|
-
/**
|
|
17
|
-
* Processes text using streaming approach with enhanced error handling callbacks
|
|
18
|
-
* @param prompt - The input text prompt to analyze
|
|
19
|
-
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
20
|
-
* @returns Promise resolving to StreamTextResult or null if operation fails
|
|
21
|
-
*/
|
|
22
|
-
streamText(optionsOrPrompt: StreamTextOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamTextResult<ToolSet, unknown> | null>;
|
|
23
18
|
/**
|
|
24
19
|
* Processes text using non-streaming approach with optional schema validation
|
|
25
20
|
* @param prompt - The input text prompt to analyze
|
|
26
21
|
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
27
|
-
* @returns Promise resolving to
|
|
22
|
+
* @returns Promise resolving to GenerateResult or null if operation fails
|
|
28
23
|
*/
|
|
29
|
-
|
|
24
|
+
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
|
|
30
25
|
/**
|
|
31
|
-
*
|
|
26
|
+
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
27
|
+
* Future-ready for multi-modal capabilities with current text focus
|
|
32
28
|
*/
|
|
33
|
-
|
|
29
|
+
stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
34
30
|
/**
|
|
35
|
-
* Short alias for
|
|
31
|
+
* Short alias for generate() - CLI-SDK consistency
|
|
36
32
|
*/
|
|
37
|
-
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<
|
|
33
|
+
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateResult | null>;
|
|
38
34
|
}
|