@juspay/neurolink 5.1.0 → 5.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +21 -9
- package/README.md +123 -126
- package/dist/agent/direct-tools.d.ts +6 -6
- package/dist/cli/commands/config.d.ts +3 -3
- package/dist/cli/commands/mcp.js +8 -7
- package/dist/cli/factories/command-factory.d.ts +4 -0
- package/dist/cli/factories/command-factory.js +63 -8
- package/dist/cli/index.js +87 -140
- package/dist/core/base-provider.d.ts +423 -0
- package/dist/core/base-provider.js +376 -0
- package/dist/core/constants.d.ts +2 -1
- package/dist/core/constants.js +2 -1
- package/dist/core/dynamic-models.d.ts +6 -6
- package/dist/core/evaluation.d.ts +19 -80
- package/dist/core/evaluation.js +185 -484
- package/dist/core/factory.d.ts +3 -3
- package/dist/core/factory.js +31 -91
- package/dist/core/service-registry.d.ts +47 -0
- package/dist/core/service-registry.js +112 -0
- package/dist/core/types.d.ts +8 -1
- package/dist/factories/compatibility-factory.js +1 -1
- package/dist/factories/provider-factory.d.ts +72 -0
- package/dist/factories/provider-factory.js +144 -0
- package/dist/factories/provider-registry.d.ts +38 -0
- package/dist/factories/provider-registry.js +107 -0
- package/dist/index.d.ts +4 -3
- package/dist/index.js +2 -4
- package/dist/lib/agent/direct-tools.d.ts +6 -6
- package/dist/lib/core/base-provider.d.ts +423 -0
- package/dist/lib/core/base-provider.js +376 -0
- package/dist/lib/core/constants.d.ts +2 -1
- package/dist/lib/core/constants.js +2 -1
- package/dist/lib/core/dynamic-models.d.ts +6 -6
- package/dist/lib/core/evaluation.d.ts +19 -80
- package/dist/lib/core/evaluation.js +185 -484
- package/dist/lib/core/factory.d.ts +3 -3
- package/dist/lib/core/factory.js +30 -91
- package/dist/lib/core/service-registry.d.ts +47 -0
- package/dist/lib/core/service-registry.js +112 -0
- package/dist/lib/core/types.d.ts +8 -1
- package/dist/lib/factories/compatibility-factory.js +1 -1
- package/dist/lib/factories/provider-factory.d.ts +72 -0
- package/dist/lib/factories/provider-factory.js +144 -0
- package/dist/lib/factories/provider-registry.d.ts +38 -0
- package/dist/lib/factories/provider-registry.js +107 -0
- package/dist/lib/index.d.ts +4 -3
- package/dist/lib/index.js +2 -4
- package/dist/lib/mcp/client.d.ts +1 -0
- package/dist/lib/mcp/client.js +1 -0
- package/dist/lib/mcp/config.js +28 -3
- package/dist/lib/mcp/context-manager.d.ts +1 -0
- package/dist/lib/mcp/context-manager.js +8 -4
- package/dist/lib/mcp/function-calling.d.ts +13 -0
- package/dist/lib/mcp/function-calling.js +134 -35
- package/dist/lib/mcp/initialize-tools.d.ts +1 -1
- package/dist/lib/mcp/initialize-tools.js +45 -1
- package/dist/lib/mcp/initialize.js +16 -6
- package/dist/lib/mcp/neurolink-mcp-client.d.ts +1 -0
- package/dist/lib/mcp/neurolink-mcp-client.js +21 -5
- package/dist/lib/mcp/servers/agent/direct-tools-server.d.ts +8 -0
- package/dist/lib/mcp/servers/agent/direct-tools-server.js +109 -0
- package/dist/lib/mcp/servers/ai-providers/ai-core-server.js +3 -1
- package/dist/lib/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/lib/mcp/unified-registry.d.ts +4 -0
- package/dist/lib/mcp/unified-registry.js +42 -9
- package/dist/lib/neurolink.d.ts +156 -117
- package/dist/lib/neurolink.js +619 -404
- package/dist/lib/providers/amazon-bedrock.d.ts +32 -0
- package/dist/lib/providers/amazon-bedrock.js +143 -0
- package/dist/lib/providers/analytics-helper.js +7 -4
- package/dist/lib/providers/anthropic-baseprovider.d.ts +23 -0
- package/dist/lib/providers/anthropic-baseprovider.js +114 -0
- package/dist/lib/providers/anthropic.d.ts +19 -43
- package/dist/lib/providers/anthropic.js +82 -306
- package/dist/lib/providers/azure-openai.d.ts +20 -0
- package/dist/lib/providers/azure-openai.js +89 -0
- package/dist/lib/providers/function-calling-provider.d.ts +64 -2
- package/dist/lib/providers/function-calling-provider.js +208 -9
- package/dist/lib/providers/google-ai-studio.d.ts +23 -0
- package/dist/lib/providers/google-ai-studio.js +107 -0
- package/dist/lib/providers/google-vertex.d.ts +47 -0
- package/dist/lib/providers/google-vertex.js +205 -0
- package/dist/lib/providers/huggingFace.d.ts +32 -25
- package/dist/lib/providers/huggingFace.js +97 -431
- package/dist/lib/providers/index.d.ts +9 -9
- package/dist/lib/providers/index.js +9 -9
- package/dist/lib/providers/mcp-provider.js +24 -5
- package/dist/lib/providers/mistral.d.ts +42 -0
- package/dist/lib/providers/mistral.js +160 -0
- package/dist/lib/providers/ollama.d.ts +52 -36
- package/dist/lib/providers/ollama.js +297 -520
- package/dist/lib/providers/openAI.d.ts +19 -18
- package/dist/lib/providers/openAI.js +76 -275
- package/dist/lib/sdk/tool-extension.d.ts +181 -0
- package/dist/lib/sdk/tool-extension.js +283 -0
- package/dist/lib/sdk/tool-registration.d.ts +95 -0
- package/dist/lib/sdk/tool-registration.js +167 -0
- package/dist/lib/services/streaming/streaming-manager.js +11 -10
- package/dist/lib/services/websocket/websocket-server.js +12 -11
- package/dist/lib/telemetry/telemetry-service.js +8 -7
- package/dist/lib/types/generate-types.d.ts +1 -0
- package/dist/lib/types/mcp-types.d.ts +116 -0
- package/dist/lib/types/mcp-types.js +5 -0
- package/dist/lib/types/stream-types.d.ts +30 -18
- package/dist/lib/types/universal-provider-options.d.ts +87 -0
- package/dist/lib/types/universal-provider-options.js +53 -0
- package/dist/mcp/client.d.ts +1 -0
- package/dist/mcp/client.js +1 -0
- package/dist/mcp/config.js +28 -3
- package/dist/mcp/context-manager.d.ts +1 -0
- package/dist/mcp/context-manager.js +8 -4
- package/dist/mcp/function-calling.d.ts +13 -0
- package/dist/mcp/function-calling.js +134 -35
- package/dist/mcp/initialize-tools.d.ts +1 -1
- package/dist/mcp/initialize-tools.js +45 -1
- package/dist/mcp/initialize.js +16 -6
- package/dist/mcp/neurolink-mcp-client.d.ts +1 -0
- package/dist/mcp/neurolink-mcp-client.js +21 -5
- package/dist/mcp/servers/agent/direct-tools-server.d.ts +8 -0
- package/dist/mcp/servers/agent/direct-tools-server.js +109 -0
- package/dist/mcp/servers/ai-providers/ai-core-server.js +3 -1
- package/dist/mcp/servers/ai-providers/ai-workflow-tools.d.ts +2 -2
- package/dist/mcp/unified-registry.d.ts +4 -0
- package/dist/mcp/unified-registry.js +42 -9
- package/dist/neurolink.d.ts +156 -117
- package/dist/neurolink.js +619 -404
- package/dist/providers/amazon-bedrock.d.ts +32 -0
- package/dist/providers/amazon-bedrock.js +143 -0
- package/dist/providers/analytics-helper.js +7 -4
- package/dist/providers/anthropic-baseprovider.d.ts +23 -0
- package/dist/providers/anthropic-baseprovider.js +114 -0
- package/dist/providers/anthropic.d.ts +19 -43
- package/dist/providers/anthropic.js +81 -305
- package/dist/providers/azure-openai.d.ts +20 -0
- package/dist/providers/azure-openai.js +89 -0
- package/dist/providers/function-calling-provider.d.ts +64 -2
- package/dist/providers/function-calling-provider.js +208 -9
- package/dist/providers/google-ai-studio.d.ts +23 -0
- package/dist/providers/google-ai-studio.js +108 -0
- package/dist/providers/google-vertex.d.ts +47 -0
- package/dist/providers/google-vertex.js +205 -0
- package/dist/providers/huggingFace.d.ts +32 -25
- package/dist/providers/huggingFace.js +96 -430
- package/dist/providers/index.d.ts +9 -9
- package/dist/providers/index.js +9 -9
- package/dist/providers/mcp-provider.js +24 -5
- package/dist/providers/mistral.d.ts +42 -0
- package/dist/providers/mistral.js +160 -0
- package/dist/providers/ollama.d.ts +52 -36
- package/dist/providers/ollama.js +297 -519
- package/dist/providers/openAI.d.ts +19 -18
- package/dist/providers/openAI.js +76 -276
- package/dist/sdk/tool-extension.d.ts +181 -0
- package/dist/sdk/tool-extension.js +283 -0
- package/dist/sdk/tool-registration.d.ts +95 -0
- package/dist/sdk/tool-registration.js +168 -0
- package/dist/services/streaming/streaming-manager.js +11 -10
- package/dist/services/websocket/websocket-server.js +12 -11
- package/dist/telemetry/telemetry-service.js +8 -7
- package/dist/types/generate-types.d.ts +1 -0
- package/dist/types/mcp-types.d.ts +116 -0
- package/dist/types/mcp-types.js +5 -0
- package/dist/types/stream-types.d.ts +30 -18
- package/dist/types/universal-provider-options.d.ts +87 -0
- package/dist/types/universal-provider-options.js +53 -0
- package/package.json +12 -5
- package/dist/lib/providers/agent-enhanced-provider.d.ts +0 -93
- package/dist/lib/providers/agent-enhanced-provider.js +0 -605
- package/dist/lib/providers/amazonBedrock.d.ts +0 -28
- package/dist/lib/providers/amazonBedrock.js +0 -364
- package/dist/lib/providers/azureOpenAI.d.ts +0 -42
- package/dist/lib/providers/azureOpenAI.js +0 -347
- package/dist/lib/providers/googleAIStudio.d.ts +0 -42
- package/dist/lib/providers/googleAIStudio.js +0 -364
- package/dist/lib/providers/googleVertexAI.d.ts +0 -34
- package/dist/lib/providers/googleVertexAI.js +0 -547
- package/dist/lib/providers/mistralAI.d.ts +0 -37
- package/dist/lib/providers/mistralAI.js +0 -325
- package/dist/providers/agent-enhanced-provider.d.ts +0 -93
- package/dist/providers/agent-enhanced-provider.js +0 -606
- package/dist/providers/amazonBedrock.d.ts +0 -28
- package/dist/providers/amazonBedrock.js +0 -364
- package/dist/providers/azureOpenAI.d.ts +0 -42
- package/dist/providers/azureOpenAI.js +0 -348
- package/dist/providers/googleAIStudio.d.ts +0 -42
- package/dist/providers/googleAIStudio.js +0 -364
- package/dist/providers/googleVertexAI.d.ts +0 -34
- package/dist/providers/googleVertexAI.js +0 -547
- package/dist/providers/mistralAI.d.ts +0 -37
- package/dist/providers/mistralAI.js +0 -325
|
@@ -1,364 +0,0 @@
|
|
|
1
|
-
import { createGoogleGenerativeAI } from "@ai-sdk/google";
|
|
2
|
-
import { streamText, generateText, Output, } from "ai";
|
|
3
|
-
import { logger } from "../utils/logger.js";
|
|
4
|
-
import { createTimeoutController, TimeoutError, getDefaultTimeout, } from "../utils/timeout.js";
|
|
5
|
-
import { DEFAULT_MAX_TOKENS } from "../core/constants.js";
|
|
6
|
-
import { createProxyFetch } from "../proxy/proxy-fetch.js";
|
|
7
|
-
import { evaluateResponse } from "../core/evaluation.js";
|
|
8
|
-
// CRITICAL: Setup environment variables early for AI SDK compatibility
|
|
9
|
-
// The AI SDK specifically looks for GOOGLE_GENERATIVE_AI_API_KEY
|
|
10
|
-
// We need to ensure this is set before any AI SDK operations
|
|
11
|
-
if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY &&
|
|
12
|
-
process.env.GOOGLE_AI_API_KEY) {
|
|
13
|
-
process.env.GOOGLE_GENERATIVE_AI_API_KEY = process.env.GOOGLE_AI_API_KEY;
|
|
14
|
-
}
|
|
15
|
-
// Default system context
|
|
16
|
-
const DEFAULT_SYSTEM_CONTEXT = {
|
|
17
|
-
systemPrompt: "You are a helpful AI assistant.",
|
|
18
|
-
};
|
|
19
|
-
// Configuration helpers
|
|
20
|
-
const getGoogleAIApiKey = () => {
|
|
21
|
-
// Check for both possible environment variables
|
|
22
|
-
const apiKey = process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY;
|
|
23
|
-
if (!apiKey) {
|
|
24
|
-
throw new Error("GOOGLE_AI_API_KEY or GOOGLE_GENERATIVE_AI_API_KEY environment variable is not set");
|
|
25
|
-
}
|
|
26
|
-
// Ensure GOOGLE_GENERATIVE_AI_API_KEY is set for @ai-sdk/google compatibility
|
|
27
|
-
// The AI SDK specifically looks for this variable name
|
|
28
|
-
if (!process.env.GOOGLE_GENERATIVE_AI_API_KEY &&
|
|
29
|
-
process.env.GOOGLE_AI_API_KEY) {
|
|
30
|
-
process.env.GOOGLE_GENERATIVE_AI_API_KEY = process.env.GOOGLE_AI_API_KEY;
|
|
31
|
-
}
|
|
32
|
-
return apiKey;
|
|
33
|
-
};
|
|
34
|
-
const getGoogleAIModelId = () => {
|
|
35
|
-
return process.env.GOOGLE_AI_MODEL || "gemini-2.5-pro";
|
|
36
|
-
};
|
|
37
|
-
const hasValidAuth = () => {
|
|
38
|
-
return !!(process.env.GOOGLE_AI_API_KEY || process.env.GOOGLE_GENERATIVE_AI_API_KEY);
|
|
39
|
-
};
|
|
40
|
-
// Lazy initialization cache
|
|
41
|
-
let _google = null;
|
|
42
|
-
function getGoogleInstance() {
|
|
43
|
-
if (!_google) {
|
|
44
|
-
const apiKey = getGoogleAIApiKey();
|
|
45
|
-
const proxyFetch = createProxyFetch();
|
|
46
|
-
_google = createGoogleGenerativeAI({
|
|
47
|
-
apiKey: apiKey,
|
|
48
|
-
fetch: proxyFetch,
|
|
49
|
-
headers: {
|
|
50
|
-
"X-Powered-By": "NeuroLink",
|
|
51
|
-
},
|
|
52
|
-
});
|
|
53
|
-
}
|
|
54
|
-
return _google;
|
|
55
|
-
}
|
|
56
|
-
// Google AI Studio class with enhanced error handling
|
|
57
|
-
export class GoogleAIStudio {
|
|
58
|
-
modelName;
|
|
59
|
-
/**
|
|
60
|
-
* Initializes a new instance of GoogleAIStudio
|
|
61
|
-
* @param modelName - Optional model name to override the default from config
|
|
62
|
-
*/
|
|
63
|
-
constructor(modelName) {
|
|
64
|
-
const functionTag = "GoogleAIStudio.constructor";
|
|
65
|
-
this.modelName = modelName || getGoogleAIModelId();
|
|
66
|
-
try {
|
|
67
|
-
logger.debug(`[${functionTag}] Initialization started`, {
|
|
68
|
-
modelName: this.modelName,
|
|
69
|
-
hasApiKey: hasValidAuth(),
|
|
70
|
-
});
|
|
71
|
-
logger.debug(`[${functionTag}] Initialization completed`, {
|
|
72
|
-
modelName: this.modelName,
|
|
73
|
-
success: true,
|
|
74
|
-
});
|
|
75
|
-
}
|
|
76
|
-
catch (err) {
|
|
77
|
-
logger.error(`[${functionTag}] Initialization failed`, {
|
|
78
|
-
message: "Error in initializing Google AI Studio",
|
|
79
|
-
modelName: this.modelName,
|
|
80
|
-
error: err instanceof Error ? err.message : String(err),
|
|
81
|
-
stack: err instanceof Error ? err.stack : undefined,
|
|
82
|
-
});
|
|
83
|
-
}
|
|
84
|
-
}
|
|
85
|
-
/**
|
|
86
|
-
* Gets the appropriate model instance
|
|
87
|
-
* Made public to support FunctionCallingProvider integration
|
|
88
|
-
*/
|
|
89
|
-
getModel() {
|
|
90
|
-
logger.debug("GoogleAIStudio.getModel - Google AI model selected", {
|
|
91
|
-
modelName: this.modelName,
|
|
92
|
-
});
|
|
93
|
-
const google = getGoogleInstance();
|
|
94
|
-
return google(this.modelName);
|
|
95
|
-
}
|
|
96
|
-
/**
|
|
97
|
-
* Expose model property for FunctionCallingProvider
|
|
98
|
-
* This allows the enhanced provider to access the underlying model
|
|
99
|
-
*/
|
|
100
|
-
get model() {
|
|
101
|
-
return this.getModel();
|
|
102
|
-
}
|
|
103
|
-
/**
|
|
104
|
-
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
105
|
-
* Future-ready for multi-modal capabilities with current text focus
|
|
106
|
-
*/
|
|
107
|
-
async stream(optionsOrPrompt, analysisSchema) {
|
|
108
|
-
const functionTag = "GoogleAIStudio.stream";
|
|
109
|
-
const provider = "google-ai";
|
|
110
|
-
let chunkCount = 0;
|
|
111
|
-
const startTime = Date.now();
|
|
112
|
-
try {
|
|
113
|
-
// Parse parameters - support both string and options object
|
|
114
|
-
const options = typeof optionsOrPrompt === "string"
|
|
115
|
-
? { input: { text: optionsOrPrompt } }
|
|
116
|
-
: optionsOrPrompt;
|
|
117
|
-
// Validate input
|
|
118
|
-
if (!options?.input?.text ||
|
|
119
|
-
typeof options.input.text !== "string" ||
|
|
120
|
-
options.input.text.trim() === "") {
|
|
121
|
-
throw new Error("Stream options must include input.text as a non-empty string");
|
|
122
|
-
}
|
|
123
|
-
// Convert StreamOptions for internal use
|
|
124
|
-
const convertedOptions = {
|
|
125
|
-
prompt: options.input.text,
|
|
126
|
-
provider: options.provider,
|
|
127
|
-
model: options.model,
|
|
128
|
-
temperature: options.temperature,
|
|
129
|
-
maxTokens: options.maxTokens,
|
|
130
|
-
systemPrompt: options.systemPrompt,
|
|
131
|
-
timeout: options.timeout,
|
|
132
|
-
schema: options.schema,
|
|
133
|
-
tools: options.tools,
|
|
134
|
-
};
|
|
135
|
-
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, tools, timeout = getDefaultTimeout(provider, "stream"), } = convertedOptions;
|
|
136
|
-
// Use schema from options or fallback parameter
|
|
137
|
-
const finalSchema = schema || analysisSchema;
|
|
138
|
-
logger.debug(`[${functionTag}] Stream request started`, {
|
|
139
|
-
provider,
|
|
140
|
-
modelName: this.modelName,
|
|
141
|
-
promptLength: prompt.length,
|
|
142
|
-
temperature,
|
|
143
|
-
maxTokens,
|
|
144
|
-
hasSchema: !!finalSchema,
|
|
145
|
-
hasTools: !!tools,
|
|
146
|
-
toolCount: tools ? Object.keys(tools).length : 0,
|
|
147
|
-
timeout,
|
|
148
|
-
});
|
|
149
|
-
const model = this.getModel();
|
|
150
|
-
// Create timeout controller if timeout is specified
|
|
151
|
-
const timeoutController = createTimeoutController(timeout, provider, "stream");
|
|
152
|
-
const streamOptions = {
|
|
153
|
-
model: model,
|
|
154
|
-
prompt: prompt,
|
|
155
|
-
system: systemPrompt,
|
|
156
|
-
temperature,
|
|
157
|
-
maxTokens,
|
|
158
|
-
...(tools && { tools }), // Add tools if provided
|
|
159
|
-
// Add abort signal if available
|
|
160
|
-
...(timeoutController && {
|
|
161
|
-
abortSignal: timeoutController.controller.signal,
|
|
162
|
-
}),
|
|
163
|
-
onError: (event) => {
|
|
164
|
-
const error = event.error;
|
|
165
|
-
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
166
|
-
const errorStack = error instanceof Error ? error.stack : undefined;
|
|
167
|
-
logger.error(`[${functionTag}] Stream error`, {
|
|
168
|
-
provider,
|
|
169
|
-
modelName: this.modelName,
|
|
170
|
-
error: errorMessage,
|
|
171
|
-
stack: errorStack,
|
|
172
|
-
promptLength: prompt.length,
|
|
173
|
-
chunkCount,
|
|
174
|
-
});
|
|
175
|
-
},
|
|
176
|
-
onFinish: (event) => {
|
|
177
|
-
logger.debug(`[${functionTag}] Stream finished`, {
|
|
178
|
-
provider,
|
|
179
|
-
modelName: this.modelName,
|
|
180
|
-
finishReason: event.finishReason,
|
|
181
|
-
usage: event.usage,
|
|
182
|
-
totalChunks: chunkCount,
|
|
183
|
-
promptLength: prompt.length,
|
|
184
|
-
responseLength: event.text?.length || 0,
|
|
185
|
-
});
|
|
186
|
-
},
|
|
187
|
-
onChunk: (event) => {
|
|
188
|
-
chunkCount++;
|
|
189
|
-
logger.debug(`[${functionTag}] Stream chunk`, {
|
|
190
|
-
provider,
|
|
191
|
-
modelName: this.modelName,
|
|
192
|
-
chunkNumber: chunkCount,
|
|
193
|
-
chunkLength: event.chunk.text?.length || 0,
|
|
194
|
-
chunkType: event.chunk.type,
|
|
195
|
-
});
|
|
196
|
-
},
|
|
197
|
-
};
|
|
198
|
-
if (finalSchema) {
|
|
199
|
-
streamOptions.experimental_output = Output.object({
|
|
200
|
-
schema: finalSchema,
|
|
201
|
-
});
|
|
202
|
-
}
|
|
203
|
-
const result = streamText(streamOptions);
|
|
204
|
-
// Convert to StreamResult format
|
|
205
|
-
return {
|
|
206
|
-
stream: (async function* () {
|
|
207
|
-
for await (const chunk of result.textStream) {
|
|
208
|
-
yield { content: chunk };
|
|
209
|
-
}
|
|
210
|
-
})(),
|
|
211
|
-
provider: "google-ai",
|
|
212
|
-
model: this.modelName,
|
|
213
|
-
metadata: {
|
|
214
|
-
streamId: `google-ai-${Date.now()}`,
|
|
215
|
-
startTime,
|
|
216
|
-
},
|
|
217
|
-
};
|
|
218
|
-
}
|
|
219
|
-
catch (err) {
|
|
220
|
-
// Log timeout errors specifically
|
|
221
|
-
if (err instanceof TimeoutError) {
|
|
222
|
-
logger.error(`[${functionTag}] Timeout error`, {
|
|
223
|
-
provider,
|
|
224
|
-
modelName: this.modelName,
|
|
225
|
-
timeout: err.timeout,
|
|
226
|
-
message: err.message,
|
|
227
|
-
});
|
|
228
|
-
}
|
|
229
|
-
else {
|
|
230
|
-
logger.error(`[${functionTag}] Exception`, {
|
|
231
|
-
provider,
|
|
232
|
-
modelName: this.modelName,
|
|
233
|
-
message: "Error in streaming content",
|
|
234
|
-
err: String(err),
|
|
235
|
-
promptLength: typeof optionsOrPrompt === "string"
|
|
236
|
-
? optionsOrPrompt.length
|
|
237
|
-
: optionsOrPrompt.input?.text?.length || 0,
|
|
238
|
-
});
|
|
239
|
-
}
|
|
240
|
-
throw err; // Re-throw error to trigger fallback
|
|
241
|
-
}
|
|
242
|
-
}
|
|
243
|
-
/**
|
|
244
|
-
* Processes text using non-streaming approach with optional schema validation
|
|
245
|
-
* @param prompt - The input text prompt to analyze
|
|
246
|
-
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
247
|
-
* @returns Promise resolving to GenerateResult or null if operation fails
|
|
248
|
-
*/
|
|
249
|
-
async generate(optionsOrPrompt, analysisSchema) {
|
|
250
|
-
const functionTag = "GoogleAIStudio.generate";
|
|
251
|
-
const provider = "google-ai";
|
|
252
|
-
const startTime = Date.now();
|
|
253
|
-
try {
|
|
254
|
-
// Parse parameters - support both string and options object
|
|
255
|
-
const options = typeof optionsOrPrompt === "string"
|
|
256
|
-
? { prompt: optionsOrPrompt }
|
|
257
|
-
: optionsOrPrompt;
|
|
258
|
-
const { prompt, temperature = 0.7, maxTokens = DEFAULT_MAX_TOKENS, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, tools, timeout = getDefaultTimeout(provider, "generate"), } = options;
|
|
259
|
-
// Use schema from options or fallback parameter
|
|
260
|
-
const finalSchema = schema || analysisSchema;
|
|
261
|
-
logger.debug(`[${functionTag}] Generate request started`, {
|
|
262
|
-
provider,
|
|
263
|
-
modelName: this.modelName,
|
|
264
|
-
promptLength: prompt.length,
|
|
265
|
-
temperature,
|
|
266
|
-
maxTokens,
|
|
267
|
-
hasTools: !!tools,
|
|
268
|
-
toolCount: tools ? Object.keys(tools).length : 0,
|
|
269
|
-
timeout,
|
|
270
|
-
});
|
|
271
|
-
const model = this.getModel();
|
|
272
|
-
// Create timeout controller if timeout is specified
|
|
273
|
-
const timeoutController = createTimeoutController(timeout, provider, "generate");
|
|
274
|
-
const generateOptions = {
|
|
275
|
-
model: model,
|
|
276
|
-
prompt: prompt,
|
|
277
|
-
system: systemPrompt,
|
|
278
|
-
temperature,
|
|
279
|
-
maxTokens,
|
|
280
|
-
...(tools && {
|
|
281
|
-
tools,
|
|
282
|
-
maxSteps: 5, // Allow multiple steps for tool execution and response generation
|
|
283
|
-
}), // Add tools if provided
|
|
284
|
-
// Add abort signal if available
|
|
285
|
-
...(timeoutController && {
|
|
286
|
-
abortSignal: timeoutController.controller.signal,
|
|
287
|
-
}),
|
|
288
|
-
};
|
|
289
|
-
if (finalSchema) {
|
|
290
|
-
generateOptions.experimental_output = Output.object({
|
|
291
|
-
schema: finalSchema,
|
|
292
|
-
});
|
|
293
|
-
}
|
|
294
|
-
try {
|
|
295
|
-
const result = await generateText(generateOptions);
|
|
296
|
-
// Clean up timeout if successful
|
|
297
|
-
timeoutController?.cleanup();
|
|
298
|
-
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
299
|
-
provider,
|
|
300
|
-
modelName: this.modelName,
|
|
301
|
-
usage: result.usage,
|
|
302
|
-
finishReason: result.finishReason,
|
|
303
|
-
responseLength: result.text?.length || 0,
|
|
304
|
-
timeout,
|
|
305
|
-
});
|
|
306
|
-
// Add analytics if enabled
|
|
307
|
-
if (options.enableAnalytics) {
|
|
308
|
-
const { createAnalytics } = await import("./analytics-helper.js");
|
|
309
|
-
result.analytics = createAnalytics(provider, this.modelName, result, Date.now() - startTime, options.context);
|
|
310
|
-
}
|
|
311
|
-
// Add evaluation if enabled
|
|
312
|
-
if (options.enableEvaluation) {
|
|
313
|
-
result.evaluation = await evaluateResponse(prompt, result.text, options.context, options.evaluationDomain, options.toolUsageContext, options.conversationHistory);
|
|
314
|
-
}
|
|
315
|
-
return {
|
|
316
|
-
content: result.text,
|
|
317
|
-
provider: "google-ai",
|
|
318
|
-
model: this.modelName || "gemini-2.5-pro",
|
|
319
|
-
usage: result.usage
|
|
320
|
-
? {
|
|
321
|
-
inputTokens: result.usage.promptTokens,
|
|
322
|
-
outputTokens: result.usage.completionTokens,
|
|
323
|
-
totalTokens: result.usage.totalTokens,
|
|
324
|
-
}
|
|
325
|
-
: undefined,
|
|
326
|
-
responseTime: Date.now() - startTime,
|
|
327
|
-
};
|
|
328
|
-
}
|
|
329
|
-
finally {
|
|
330
|
-
// Always cleanup timeout
|
|
331
|
-
timeoutController?.cleanup();
|
|
332
|
-
}
|
|
333
|
-
}
|
|
334
|
-
catch (err) {
|
|
335
|
-
// Log timeout errors specifically
|
|
336
|
-
if (err instanceof TimeoutError) {
|
|
337
|
-
logger.error(`[${functionTag}] Timeout error`, {
|
|
338
|
-
provider,
|
|
339
|
-
modelName: this.modelName,
|
|
340
|
-
timeout: err.timeout,
|
|
341
|
-
message: err.message,
|
|
342
|
-
});
|
|
343
|
-
}
|
|
344
|
-
else {
|
|
345
|
-
logger.error(`[${functionTag}] Exception`, {
|
|
346
|
-
provider,
|
|
347
|
-
modelName: this.modelName,
|
|
348
|
-
message: "Error in generating text",
|
|
349
|
-
err: String(err),
|
|
350
|
-
});
|
|
351
|
-
}
|
|
352
|
-
throw err; // Re-throw error to trigger fallback
|
|
353
|
-
}
|
|
354
|
-
}
|
|
355
|
-
/**
|
|
356
|
-
* Short alias for generate() - CLI-SDK consistency
|
|
357
|
-
* @param optionsOrPrompt - TextGenerationOptions object or prompt string
|
|
358
|
-
* @param analysisSchema - Optional schema for output validation
|
|
359
|
-
* @returns Promise resolving to GenerateResult or null
|
|
360
|
-
*/
|
|
361
|
-
async gen(optionsOrPrompt, analysisSchema) {
|
|
362
|
-
return this.generate(optionsOrPrompt, analysisSchema);
|
|
363
|
-
}
|
|
364
|
-
}
|
|
@@ -1,34 +0,0 @@
|
|
|
1
|
-
import type { ZodType, ZodTypeDef } from "zod";
|
|
2
|
-
import { type Schema } from "ai";
|
|
3
|
-
import type { GenerateResult } from "../types/generate-types.js";
|
|
4
|
-
import type { StreamOptions, StreamResult } from "../types/stream-types.js";
|
|
5
|
-
import type { AIProvider, TextGenerationOptions, EnhancedGenerateResult } from "../core/types.js";
|
|
6
|
-
export declare class GoogleVertexAI implements AIProvider {
|
|
7
|
-
private modelName;
|
|
8
|
-
/**
|
|
9
|
-
* Initializes a new instance of GoogleVertexAI
|
|
10
|
-
* @param modelName - Optional model name to override the default from config
|
|
11
|
-
*/
|
|
12
|
-
constructor(modelName?: string | null);
|
|
13
|
-
/**
|
|
14
|
-
* Gets the appropriate model instance (Google or Anthropic)
|
|
15
|
-
* @private
|
|
16
|
-
*/
|
|
17
|
-
private getModel;
|
|
18
|
-
/**
|
|
19
|
-
* Processes text using non-streaming approach with optional schema validation
|
|
20
|
-
* @param prompt - The input text prompt to analyze
|
|
21
|
-
* @param analysisSchema - Optional Zod schema or Schema object for output validation
|
|
22
|
-
* @returns Promise resolving to GenerateResult or null if operation fails
|
|
23
|
-
*/
|
|
24
|
-
generate(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<GenerateResult>;
|
|
25
|
-
/**
|
|
26
|
-
* PRIMARY METHOD: Stream content using AI (recommended for new code)
|
|
27
|
-
* Future-ready for multi-modal capabilities with current text focus
|
|
28
|
-
*/
|
|
29
|
-
stream(optionsOrPrompt: StreamOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<StreamResult>;
|
|
30
|
-
/**
|
|
31
|
-
* Short alias for generate() - CLI-SDK consistency
|
|
32
|
-
*/
|
|
33
|
-
gen(optionsOrPrompt: TextGenerationOptions | string, analysisSchema?: ZodType<unknown, ZodTypeDef, unknown> | Schema<unknown>): Promise<EnhancedGenerateResult | null>;
|
|
34
|
-
}
|