@juspay/neurolink 2.0.0 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/README.md +31 -5
- package/dist/cli/commands/config.d.ts +6 -6
- package/dist/cli/index.js +29 -30
- package/dist/core/types.d.ts +2 -0
- package/dist/lib/core/types.d.ts +2 -0
- package/dist/lib/neurolink.d.ts +2 -0
- package/dist/lib/neurolink.js +23 -2
- package/dist/lib/providers/agent-enhanced-provider.d.ts +1 -0
- package/dist/lib/providers/agent-enhanced-provider.js +59 -3
- package/dist/lib/providers/amazonBedrock.js +70 -24
- package/dist/lib/providers/anthropic.js +77 -15
- package/dist/lib/providers/azureOpenAI.js +77 -15
- package/dist/lib/providers/googleAIStudio.js +70 -26
- package/dist/lib/providers/googleVertexAI.js +70 -24
- package/dist/lib/providers/huggingFace.js +70 -26
- package/dist/lib/providers/mistralAI.js +70 -26
- package/dist/lib/providers/ollama.d.ts +1 -1
- package/dist/lib/providers/ollama.js +24 -10
- package/dist/lib/providers/openAI.js +67 -23
- package/dist/lib/providers/timeout-wrapper.d.ts +40 -0
- package/dist/lib/providers/timeout-wrapper.js +100 -0
- package/dist/lib/utils/timeout.d.ts +69 -0
- package/dist/lib/utils/timeout.js +130 -0
- package/dist/neurolink.d.ts +2 -0
- package/dist/neurolink.js +23 -2
- package/dist/providers/agent-enhanced-provider.d.ts +1 -0
- package/dist/providers/agent-enhanced-provider.js +59 -3
- package/dist/providers/amazonBedrock.js +70 -24
- package/dist/providers/anthropic.js +77 -15
- package/dist/providers/azureOpenAI.js +77 -15
- package/dist/providers/googleAIStudio.js +70 -26
- package/dist/providers/googleVertexAI.js +70 -24
- package/dist/providers/huggingFace.js +70 -26
- package/dist/providers/mistralAI.js +70 -26
- package/dist/providers/ollama.d.ts +1 -1
- package/dist/providers/ollama.js +24 -10
- package/dist/providers/openAI.js +67 -23
- package/dist/providers/timeout-wrapper.d.ts +40 -0
- package/dist/providers/timeout-wrapper.js +100 -0
- package/dist/utils/timeout.d.ts +69 -0
- package/dist/utils/timeout.js +130 -0
- package/package.json +1 -1
|
@@ -6,6 +6,7 @@
|
|
|
6
6
|
*/
|
|
7
7
|
import { AIProviderName } from "../core/types.js";
|
|
8
8
|
import { logger } from "../utils/logger.js";
|
|
9
|
+
import { createTimeoutController, TimeoutError, getDefaultTimeout } from "../utils/timeout.js";
|
|
9
10
|
export class AzureOpenAIProvider {
|
|
10
11
|
name = AIProviderName.AZURE;
|
|
11
12
|
apiKey;
|
|
@@ -44,7 +45,7 @@ export class AzureOpenAIProvider {
|
|
|
44
45
|
getApiUrl(stream = false) {
|
|
45
46
|
return `${this.endpoint}/openai/deployments/${this.deploymentId}/chat/completions?api-version=${this.apiVersion}`;
|
|
46
47
|
}
|
|
47
|
-
async makeRequest(body, stream = false) {
|
|
48
|
+
async makeRequest(body, stream = false, signal) {
|
|
48
49
|
const url = this.getApiUrl(stream);
|
|
49
50
|
const headers = {
|
|
50
51
|
"Content-Type": "application/json",
|
|
@@ -56,6 +57,7 @@ export class AzureOpenAIProvider {
|
|
|
56
57
|
method: "POST",
|
|
57
58
|
headers,
|
|
58
59
|
body: JSON.stringify(body),
|
|
60
|
+
signal, // Add abort signal for timeout support
|
|
59
61
|
});
|
|
60
62
|
if (!response.ok) {
|
|
61
63
|
const errorText = await response.text();
|
|
@@ -65,13 +67,15 @@ export class AzureOpenAIProvider {
|
|
|
65
67
|
return response;
|
|
66
68
|
}
|
|
67
69
|
async generateText(optionsOrPrompt, schema) {
|
|
68
|
-
|
|
70
|
+
const functionTag = "AzureOpenAIProvider.generateText";
|
|
71
|
+
const provider = "azure";
|
|
72
|
+
logger.debug(`[${functionTag}] Starting text generation`);
|
|
69
73
|
// Parse parameters with backward compatibility
|
|
70
74
|
const options = typeof optionsOrPrompt === "string"
|
|
71
75
|
? { prompt: optionsOrPrompt }
|
|
72
76
|
: optionsOrPrompt;
|
|
73
|
-
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = "You are a helpful AI assistant.", } = options;
|
|
74
|
-
logger.debug(`[
|
|
77
|
+
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = "You are a helpful AI assistant.", timeout = getDefaultTimeout(provider, 'generate'), } = options;
|
|
78
|
+
logger.debug(`[${functionTag}] Prompt: "${prompt.substring(0, 100)}...", Temperature: ${temperature}, Max tokens: ${maxTokens}, Timeout: ${timeout}`);
|
|
75
79
|
const messages = [];
|
|
76
80
|
if (systemPrompt) {
|
|
77
81
|
messages.push({
|
|
@@ -88,10 +92,14 @@ export class AzureOpenAIProvider {
|
|
|
88
92
|
temperature,
|
|
89
93
|
max_tokens: maxTokens,
|
|
90
94
|
};
|
|
95
|
+
// Create timeout controller if timeout is specified
|
|
96
|
+
const timeoutController = createTimeoutController(timeout, provider, 'generate');
|
|
91
97
|
try {
|
|
92
|
-
const response = await this.makeRequest(requestBody);
|
|
98
|
+
const response = await this.makeRequest(requestBody, false, timeoutController?.controller.signal);
|
|
93
99
|
const data = await response.json();
|
|
94
|
-
|
|
100
|
+
// Clean up timeout if successful
|
|
101
|
+
timeoutController?.cleanup();
|
|
102
|
+
logger.debug(`[${functionTag}] Success. Generated ${data.usage.completion_tokens} tokens`);
|
|
95
103
|
const content = data.choices[0]?.message?.content || "";
|
|
96
104
|
return {
|
|
97
105
|
content,
|
|
@@ -106,18 +114,42 @@ export class AzureOpenAIProvider {
|
|
|
106
114
|
};
|
|
107
115
|
}
|
|
108
116
|
catch (error) {
|
|
109
|
-
|
|
117
|
+
// Always cleanup timeout
|
|
118
|
+
timeoutController?.cleanup();
|
|
119
|
+
// Log timeout errors specifically
|
|
120
|
+
if (error instanceof TimeoutError) {
|
|
121
|
+
logger.error(`[${functionTag}] Timeout error`, {
|
|
122
|
+
provider,
|
|
123
|
+
timeout: error.timeout,
|
|
124
|
+
message: error.message,
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
else if (error?.name === 'AbortError') {
|
|
128
|
+
// Convert AbortError to TimeoutError
|
|
129
|
+
const timeoutError = new TimeoutError(`${provider} generate operation timed out after ${timeout}`, timeoutController?.timeoutMs || 0, provider, 'generate');
|
|
130
|
+
logger.error(`[${functionTag}] Timeout error`, {
|
|
131
|
+
provider,
|
|
132
|
+
timeout: timeoutController?.timeoutMs,
|
|
133
|
+
message: timeoutError.message,
|
|
134
|
+
});
|
|
135
|
+
throw timeoutError;
|
|
136
|
+
}
|
|
137
|
+
else {
|
|
138
|
+
logger.error(`[${functionTag}] Error:`, error);
|
|
139
|
+
}
|
|
110
140
|
throw error;
|
|
111
141
|
}
|
|
112
142
|
}
|
|
113
143
|
async streamText(optionsOrPrompt, schema) {
|
|
114
|
-
|
|
144
|
+
const functionTag = "AzureOpenAIProvider.streamText";
|
|
145
|
+
const provider = "azure";
|
|
146
|
+
logger.debug(`[${functionTag}] Starting text streaming`);
|
|
115
147
|
// Parse parameters with backward compatibility
|
|
116
148
|
const options = typeof optionsOrPrompt === "string"
|
|
117
149
|
? { prompt: optionsOrPrompt }
|
|
118
150
|
: optionsOrPrompt;
|
|
119
|
-
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = "You are a helpful AI assistant.", } = options;
|
|
120
|
-
logger.debug(`[
|
|
151
|
+
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = "You are a helpful AI assistant.", timeout = getDefaultTimeout(provider, 'stream'), } = options;
|
|
152
|
+
logger.debug(`[${functionTag}] Streaming prompt: "${prompt.substring(0, 100)}...", Timeout: ${timeout}`);
|
|
121
153
|
const messages = [];
|
|
122
154
|
if (systemPrompt) {
|
|
123
155
|
messages.push({
|
|
@@ -135,30 +167,60 @@ export class AzureOpenAIProvider {
|
|
|
135
167
|
max_tokens: maxTokens,
|
|
136
168
|
stream: true,
|
|
137
169
|
};
|
|
170
|
+
// Create timeout controller if timeout is specified
|
|
171
|
+
const timeoutController = createTimeoutController(timeout, provider, 'stream');
|
|
138
172
|
try {
|
|
139
|
-
const response = await this.makeRequest(requestBody, true);
|
|
173
|
+
const response = await this.makeRequest(requestBody, true, timeoutController?.controller.signal);
|
|
140
174
|
if (!response.body) {
|
|
141
175
|
throw new Error("No response body received");
|
|
142
176
|
}
|
|
143
|
-
// Return a StreamTextResult-like object
|
|
177
|
+
// Return a StreamTextResult-like object with timeout signal
|
|
144
178
|
return {
|
|
145
|
-
textStream: this.createAsyncIterable(response.body),
|
|
179
|
+
textStream: this.createAsyncIterable(response.body, timeoutController?.controller.signal),
|
|
146
180
|
text: "",
|
|
147
181
|
usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 },
|
|
148
182
|
finishReason: "stop",
|
|
183
|
+
// Store timeout controller for external cleanup if needed
|
|
184
|
+
_timeoutController: timeoutController,
|
|
149
185
|
};
|
|
150
186
|
}
|
|
151
187
|
catch (error) {
|
|
152
|
-
|
|
188
|
+
// Cleanup timeout on error
|
|
189
|
+
timeoutController?.cleanup();
|
|
190
|
+
// Log timeout errors specifically
|
|
191
|
+
if (error instanceof TimeoutError) {
|
|
192
|
+
logger.error(`[${functionTag}] Timeout error`, {
|
|
193
|
+
provider,
|
|
194
|
+
timeout: error.timeout,
|
|
195
|
+
message: error.message,
|
|
196
|
+
});
|
|
197
|
+
}
|
|
198
|
+
else if (error?.name === 'AbortError') {
|
|
199
|
+
// Convert AbortError to TimeoutError
|
|
200
|
+
const timeoutError = new TimeoutError(`${provider} stream operation timed out after ${timeout}`, timeoutController?.timeoutMs || 0, provider, 'stream');
|
|
201
|
+
logger.error(`[${functionTag}] Timeout error`, {
|
|
202
|
+
provider,
|
|
203
|
+
timeout: timeoutController?.timeoutMs,
|
|
204
|
+
message: timeoutError.message,
|
|
205
|
+
});
|
|
206
|
+
throw timeoutError;
|
|
207
|
+
}
|
|
208
|
+
else {
|
|
209
|
+
logger.error(`[${functionTag}] Error:`, error);
|
|
210
|
+
}
|
|
153
211
|
throw error;
|
|
154
212
|
}
|
|
155
213
|
}
|
|
156
|
-
async *createAsyncIterable(body) {
|
|
214
|
+
async *createAsyncIterable(body, signal) {
|
|
157
215
|
const reader = body.getReader();
|
|
158
216
|
const decoder = new TextDecoder();
|
|
159
217
|
let buffer = "";
|
|
160
218
|
try {
|
|
161
219
|
while (true) {
|
|
220
|
+
// Check if aborted
|
|
221
|
+
if (signal?.aborted) {
|
|
222
|
+
throw new Error('AbortError');
|
|
223
|
+
}
|
|
162
224
|
const { done, value } = await reader.read();
|
|
163
225
|
if (done) {
|
|
164
226
|
break;
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import { createGoogleGenerativeAI } from "@ai-sdk/google";
|
|
2
2
|
import { streamText, generateText, Output, } from "ai";
|
|
3
3
|
import { logger } from "../utils/logger.js";
|
|
4
|
+
import { createTimeoutController, TimeoutError, getDefaultTimeout } from "../utils/timeout.js";
|
|
4
5
|
// CRITICAL: Setup environment variables early for AI SDK compatibility
|
|
5
6
|
// The AI SDK specifically looks for GOOGLE_GENERATIVE_AI_API_KEY
|
|
6
7
|
// We need to ensure this is set before any AI SDK operations
|
|
@@ -109,7 +110,7 @@ export class GoogleAIStudio {
|
|
|
109
110
|
const options = typeof optionsOrPrompt === "string"
|
|
110
111
|
? { prompt: optionsOrPrompt }
|
|
111
112
|
: optionsOrPrompt;
|
|
112
|
-
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, tools, } = options;
|
|
113
|
+
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, tools, timeout = getDefaultTimeout(provider, 'stream'), } = options;
|
|
113
114
|
// Use schema from options or fallback parameter
|
|
114
115
|
const finalSchema = schema || analysisSchema;
|
|
115
116
|
logger.debug(`[${functionTag}] Stream request started`, {
|
|
@@ -121,8 +122,11 @@ export class GoogleAIStudio {
|
|
|
121
122
|
hasSchema: !!finalSchema,
|
|
122
123
|
hasTools: !!tools,
|
|
123
124
|
toolCount: tools ? Object.keys(tools).length : 0,
|
|
125
|
+
timeout,
|
|
124
126
|
});
|
|
125
127
|
const model = this.getModel();
|
|
128
|
+
// Create timeout controller if timeout is specified
|
|
129
|
+
const timeoutController = createTimeoutController(timeout, provider, 'stream');
|
|
126
130
|
const streamOptions = {
|
|
127
131
|
model: model,
|
|
128
132
|
prompt: prompt,
|
|
@@ -130,6 +134,8 @@ export class GoogleAIStudio {
|
|
|
130
134
|
temperature,
|
|
131
135
|
maxTokens,
|
|
132
136
|
...(tools && { tools }), // Add tools if provided
|
|
137
|
+
// Add abort signal if available
|
|
138
|
+
...(timeoutController && { abortSignal: timeoutController.controller.signal }),
|
|
133
139
|
onError: (event) => {
|
|
134
140
|
const error = event.error;
|
|
135
141
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
@@ -171,18 +177,31 @@ export class GoogleAIStudio {
|
|
|
171
177
|
});
|
|
172
178
|
}
|
|
173
179
|
const result = streamText(streamOptions);
|
|
180
|
+
// For streaming, we can't clean up immediately, but the timeout will auto-clean
|
|
181
|
+
// The user should handle the stream and any timeout errors
|
|
174
182
|
return result;
|
|
175
183
|
}
|
|
176
184
|
catch (err) {
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
}
|
|
185
|
+
// Log timeout errors specifically
|
|
186
|
+
if (err instanceof TimeoutError) {
|
|
187
|
+
logger.error(`[${functionTag}] Timeout error`, {
|
|
188
|
+
provider,
|
|
189
|
+
modelName: this.modelName,
|
|
190
|
+
timeout: err.timeout,
|
|
191
|
+
message: err.message,
|
|
192
|
+
});
|
|
193
|
+
}
|
|
194
|
+
else {
|
|
195
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
196
|
+
provider,
|
|
197
|
+
modelName: this.modelName,
|
|
198
|
+
message: "Error in streaming text",
|
|
199
|
+
err: String(err),
|
|
200
|
+
promptLength: typeof optionsOrPrompt === "string"
|
|
201
|
+
? optionsOrPrompt.length
|
|
202
|
+
: optionsOrPrompt.prompt.length,
|
|
203
|
+
});
|
|
204
|
+
}
|
|
186
205
|
throw err; // Re-throw error to trigger fallback
|
|
187
206
|
}
|
|
188
207
|
}
|
|
@@ -200,7 +219,7 @@ export class GoogleAIStudio {
|
|
|
200
219
|
const options = typeof optionsOrPrompt === "string"
|
|
201
220
|
? { prompt: optionsOrPrompt }
|
|
202
221
|
: optionsOrPrompt;
|
|
203
|
-
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, tools, } = options;
|
|
222
|
+
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, tools, timeout = getDefaultTimeout(provider, 'generate'), } = options;
|
|
204
223
|
// Use schema from options or fallback parameter
|
|
205
224
|
const finalSchema = schema || analysisSchema;
|
|
206
225
|
logger.debug(`[${functionTag}] Generate request started`, {
|
|
@@ -211,8 +230,11 @@ export class GoogleAIStudio {
|
|
|
211
230
|
maxTokens,
|
|
212
231
|
hasTools: !!tools,
|
|
213
232
|
toolCount: tools ? Object.keys(tools).length : 0,
|
|
233
|
+
timeout,
|
|
214
234
|
});
|
|
215
235
|
const model = this.getModel();
|
|
236
|
+
// Create timeout controller if timeout is specified
|
|
237
|
+
const timeoutController = createTimeoutController(timeout, provider, 'generate');
|
|
216
238
|
const generateOptions = {
|
|
217
239
|
model: model,
|
|
218
240
|
prompt: prompt,
|
|
@@ -223,29 +245,51 @@ export class GoogleAIStudio {
|
|
|
223
245
|
tools,
|
|
224
246
|
maxSteps: 5, // Allow multiple steps for tool execution and response generation
|
|
225
247
|
}), // Add tools if provided
|
|
248
|
+
// Add abort signal if available
|
|
249
|
+
...(timeoutController && { abortSignal: timeoutController.controller.signal }),
|
|
226
250
|
};
|
|
227
251
|
if (finalSchema) {
|
|
228
252
|
generateOptions.experimental_output = Output.object({
|
|
229
253
|
schema: finalSchema,
|
|
230
254
|
});
|
|
231
255
|
}
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
256
|
+
try {
|
|
257
|
+
const result = await generateText(generateOptions);
|
|
258
|
+
// Clean up timeout if successful
|
|
259
|
+
timeoutController?.cleanup();
|
|
260
|
+
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
261
|
+
provider,
|
|
262
|
+
modelName: this.modelName,
|
|
263
|
+
usage: result.usage,
|
|
264
|
+
finishReason: result.finishReason,
|
|
265
|
+
responseLength: result.text?.length || 0,
|
|
266
|
+
timeout,
|
|
267
|
+
});
|
|
268
|
+
return result;
|
|
269
|
+
}
|
|
270
|
+
finally {
|
|
271
|
+
// Always cleanup timeout
|
|
272
|
+
timeoutController?.cleanup();
|
|
273
|
+
}
|
|
241
274
|
}
|
|
242
275
|
catch (err) {
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
276
|
+
// Log timeout errors specifically
|
|
277
|
+
if (err instanceof TimeoutError) {
|
|
278
|
+
logger.error(`[${functionTag}] Timeout error`, {
|
|
279
|
+
provider,
|
|
280
|
+
modelName: this.modelName,
|
|
281
|
+
timeout: err.timeout,
|
|
282
|
+
message: err.message,
|
|
283
|
+
});
|
|
284
|
+
}
|
|
285
|
+
else {
|
|
286
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
287
|
+
provider,
|
|
288
|
+
modelName: this.modelName,
|
|
289
|
+
message: "Error in generating text",
|
|
290
|
+
err: String(err),
|
|
291
|
+
});
|
|
292
|
+
}
|
|
249
293
|
throw err; // Re-throw error to trigger fallback
|
|
250
294
|
}
|
|
251
295
|
}
|
|
@@ -23,6 +23,7 @@ async function getCreateVertexAnthropic() {
|
|
|
23
23
|
}
|
|
24
24
|
import { streamText, generateText, Output, } from "ai";
|
|
25
25
|
import { logger } from "../utils/logger.js";
|
|
26
|
+
import { createTimeoutController, TimeoutError, getDefaultTimeout } from "../utils/timeout.js";
|
|
26
27
|
// Default system context
|
|
27
28
|
const DEFAULT_SYSTEM_CONTEXT = {
|
|
28
29
|
systemPrompt: "You are a helpful AI assistant.",
|
|
@@ -285,7 +286,7 @@ export class GoogleVertexAI {
|
|
|
285
286
|
const options = typeof optionsOrPrompt === "string"
|
|
286
287
|
? { prompt: optionsOrPrompt }
|
|
287
288
|
: optionsOrPrompt;
|
|
288
|
-
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
|
|
289
|
+
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, 'stream'), } = options;
|
|
289
290
|
// Use schema from options or fallback parameter
|
|
290
291
|
const finalSchema = schema || analysisSchema;
|
|
291
292
|
logger.debug(`[${functionTag}] Stream request started`, {
|
|
@@ -296,14 +297,19 @@ export class GoogleVertexAI {
|
|
|
296
297
|
temperature,
|
|
297
298
|
maxTokens,
|
|
298
299
|
hasSchema: !!finalSchema,
|
|
300
|
+
timeout,
|
|
299
301
|
});
|
|
300
302
|
const model = await this.getModel();
|
|
303
|
+
// Create timeout controller if timeout is specified
|
|
304
|
+
const timeoutController = createTimeoutController(timeout, provider, 'stream');
|
|
301
305
|
const streamOptions = {
|
|
302
306
|
model: model,
|
|
303
307
|
prompt: prompt,
|
|
304
308
|
system: systemPrompt,
|
|
305
309
|
temperature,
|
|
306
310
|
maxTokens,
|
|
311
|
+
// Add abort signal if available
|
|
312
|
+
...(timeoutController && { abortSignal: timeoutController.controller.signal }),
|
|
307
313
|
onError: (event) => {
|
|
308
314
|
const error = event.error;
|
|
309
315
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
@@ -345,16 +351,30 @@ export class GoogleVertexAI {
|
|
|
345
351
|
});
|
|
346
352
|
}
|
|
347
353
|
const result = streamText(streamOptions);
|
|
354
|
+
// For streaming, we can't clean up immediately, but the timeout will auto-clean
|
|
355
|
+
// The user should handle the stream and any timeout errors
|
|
348
356
|
return result;
|
|
349
357
|
}
|
|
350
358
|
catch (err) {
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
359
|
+
// Log timeout errors specifically
|
|
360
|
+
if (err instanceof TimeoutError) {
|
|
361
|
+
logger.error(`[${functionTag}] Timeout error`, {
|
|
362
|
+
provider,
|
|
363
|
+
modelName: this.modelName,
|
|
364
|
+
isAnthropic: isAnthropicModel(this.modelName),
|
|
365
|
+
timeout: err.timeout,
|
|
366
|
+
message: err.message,
|
|
367
|
+
});
|
|
368
|
+
}
|
|
369
|
+
else {
|
|
370
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
371
|
+
provider,
|
|
372
|
+
modelName: this.modelName,
|
|
373
|
+
message: "Error in streaming text",
|
|
374
|
+
err: String(err),
|
|
375
|
+
promptLength: prompt.length,
|
|
376
|
+
});
|
|
377
|
+
}
|
|
358
378
|
throw err; // Re-throw error to trigger fallback
|
|
359
379
|
}
|
|
360
380
|
}
|
|
@@ -372,7 +392,7 @@ export class GoogleVertexAI {
|
|
|
372
392
|
const options = typeof optionsOrPrompt === "string"
|
|
373
393
|
? { prompt: optionsOrPrompt }
|
|
374
394
|
: optionsOrPrompt;
|
|
375
|
-
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
|
|
395
|
+
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, 'generate'), } = options;
|
|
376
396
|
// Use schema from options or fallback parameter
|
|
377
397
|
const finalSchema = schema || analysisSchema;
|
|
378
398
|
logger.debug(`[${functionTag}] Generate request started`, {
|
|
@@ -382,37 +402,63 @@ export class GoogleVertexAI {
|
|
|
382
402
|
promptLength: prompt.length,
|
|
383
403
|
temperature,
|
|
384
404
|
maxTokens,
|
|
405
|
+
timeout,
|
|
385
406
|
});
|
|
386
407
|
const model = await this.getModel();
|
|
408
|
+
// Create timeout controller if timeout is specified
|
|
409
|
+
const timeoutController = createTimeoutController(timeout, provider, 'generate');
|
|
387
410
|
const generateOptions = {
|
|
388
411
|
model: model,
|
|
389
412
|
prompt: prompt,
|
|
390
413
|
system: systemPrompt,
|
|
391
414
|
temperature,
|
|
392
415
|
maxTokens,
|
|
416
|
+
// Add abort signal if available
|
|
417
|
+
...(timeoutController && { abortSignal: timeoutController.controller.signal }),
|
|
393
418
|
};
|
|
394
419
|
if (finalSchema) {
|
|
395
420
|
generateOptions.experimental_output = Output.object({
|
|
396
421
|
schema: finalSchema,
|
|
397
422
|
});
|
|
398
423
|
}
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
424
|
+
try {
|
|
425
|
+
const result = await generateText(generateOptions);
|
|
426
|
+
// Clean up timeout if successful
|
|
427
|
+
timeoutController?.cleanup();
|
|
428
|
+
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
429
|
+
provider,
|
|
430
|
+
modelName: this.modelName,
|
|
431
|
+
usage: result.usage,
|
|
432
|
+
finishReason: result.finishReason,
|
|
433
|
+
responseLength: result.text?.length || 0,
|
|
434
|
+
timeout,
|
|
435
|
+
});
|
|
436
|
+
return result;
|
|
437
|
+
}
|
|
438
|
+
finally {
|
|
439
|
+
// Always cleanup timeout
|
|
440
|
+
timeoutController?.cleanup();
|
|
441
|
+
}
|
|
408
442
|
}
|
|
409
443
|
catch (err) {
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
444
|
+
// Log timeout errors specifically
|
|
445
|
+
if (err instanceof TimeoutError) {
|
|
446
|
+
logger.error(`[${functionTag}] Timeout error`, {
|
|
447
|
+
provider,
|
|
448
|
+
modelName: this.modelName,
|
|
449
|
+
isAnthropic: isAnthropicModel(this.modelName),
|
|
450
|
+
timeout: err.timeout,
|
|
451
|
+
message: err.message,
|
|
452
|
+
});
|
|
453
|
+
}
|
|
454
|
+
else {
|
|
455
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
456
|
+
provider,
|
|
457
|
+
modelName: this.modelName,
|
|
458
|
+
message: "Error in generating text",
|
|
459
|
+
err: String(err),
|
|
460
|
+
});
|
|
461
|
+
}
|
|
416
462
|
throw err; // Re-throw error to trigger fallback
|
|
417
463
|
}
|
|
418
464
|
}
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import { HfInference } from "@huggingface/inference";
|
|
2
2
|
import { streamText, generateText, Output, } from "ai";
|
|
3
3
|
import { logger } from "../utils/logger.js";
|
|
4
|
+
import { createTimeoutController, TimeoutError, getDefaultTimeout } from "../utils/timeout.js";
|
|
4
5
|
// Default system context
|
|
5
6
|
const DEFAULT_SYSTEM_CONTEXT = {
|
|
6
7
|
systemPrompt: "You are a helpful AI assistant.",
|
|
@@ -228,7 +229,7 @@ export class HuggingFace {
|
|
|
228
229
|
const options = typeof optionsOrPrompt === "string"
|
|
229
230
|
? { prompt: optionsOrPrompt }
|
|
230
231
|
: optionsOrPrompt;
|
|
231
|
-
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
|
|
232
|
+
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, 'stream'), } = options;
|
|
232
233
|
// Use schema from options or fallback parameter
|
|
233
234
|
const finalSchema = schema || analysisSchema;
|
|
234
235
|
logger.debug(`[${functionTag}] Stream request started`, {
|
|
@@ -238,14 +239,19 @@ export class HuggingFace {
|
|
|
238
239
|
temperature,
|
|
239
240
|
maxTokens,
|
|
240
241
|
hasSchema: !!finalSchema,
|
|
242
|
+
timeout,
|
|
241
243
|
});
|
|
242
244
|
const model = this.getModel();
|
|
245
|
+
// Create timeout controller if timeout is specified
|
|
246
|
+
const timeoutController = createTimeoutController(timeout, provider, 'stream');
|
|
243
247
|
const streamOptions = {
|
|
244
248
|
model: model,
|
|
245
249
|
prompt: prompt,
|
|
246
250
|
system: systemPrompt,
|
|
247
251
|
temperature,
|
|
248
252
|
maxTokens,
|
|
253
|
+
// Add abort signal if available
|
|
254
|
+
...(timeoutController && { abortSignal: timeoutController.controller.signal }),
|
|
249
255
|
onError: (event) => {
|
|
250
256
|
const error = event.error;
|
|
251
257
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
@@ -287,18 +293,31 @@ export class HuggingFace {
|
|
|
287
293
|
});
|
|
288
294
|
}
|
|
289
295
|
const result = streamText(streamOptions);
|
|
296
|
+
// For streaming, we can't clean up immediately, but the timeout will auto-clean
|
|
297
|
+
// The user should handle the stream and any timeout errors
|
|
290
298
|
return result;
|
|
291
299
|
}
|
|
292
300
|
catch (err) {
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
}
|
|
301
|
+
// Log timeout errors specifically
|
|
302
|
+
if (err instanceof TimeoutError) {
|
|
303
|
+
logger.error(`[${functionTag}] Timeout error`, {
|
|
304
|
+
provider,
|
|
305
|
+
modelName: this.modelName,
|
|
306
|
+
timeout: err.timeout,
|
|
307
|
+
message: err.message,
|
|
308
|
+
});
|
|
309
|
+
}
|
|
310
|
+
else {
|
|
311
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
312
|
+
provider,
|
|
313
|
+
modelName: this.modelName,
|
|
314
|
+
message: "Error in streaming text",
|
|
315
|
+
err: String(err),
|
|
316
|
+
promptLength: typeof optionsOrPrompt === "string"
|
|
317
|
+
? optionsOrPrompt.length
|
|
318
|
+
: optionsOrPrompt.prompt.length,
|
|
319
|
+
});
|
|
320
|
+
}
|
|
302
321
|
throw err; // Re-throw error to trigger fallback
|
|
303
322
|
}
|
|
304
323
|
}
|
|
@@ -316,7 +335,7 @@ export class HuggingFace {
|
|
|
316
335
|
const options = typeof optionsOrPrompt === "string"
|
|
317
336
|
? { prompt: optionsOrPrompt }
|
|
318
337
|
: optionsOrPrompt;
|
|
319
|
-
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, } = options;
|
|
338
|
+
const { prompt, temperature = 0.7, maxTokens = 1000, systemPrompt = DEFAULT_SYSTEM_CONTEXT.systemPrompt, schema, timeout = getDefaultTimeout(provider, 'generate'), } = options;
|
|
320
339
|
// Use schema from options or fallback parameter
|
|
321
340
|
const finalSchema = schema || analysisSchema;
|
|
322
341
|
logger.debug(`[${functionTag}] Generate request started`, {
|
|
@@ -325,37 +344,62 @@ export class HuggingFace {
|
|
|
325
344
|
promptLength: prompt.length,
|
|
326
345
|
temperature,
|
|
327
346
|
maxTokens,
|
|
347
|
+
timeout,
|
|
328
348
|
});
|
|
329
349
|
const model = this.getModel();
|
|
350
|
+
// Create timeout controller if timeout is specified
|
|
351
|
+
const timeoutController = createTimeoutController(timeout, provider, 'generate');
|
|
330
352
|
const generateOptions = {
|
|
331
353
|
model: model,
|
|
332
354
|
prompt: prompt,
|
|
333
355
|
system: systemPrompt,
|
|
334
356
|
temperature,
|
|
335
357
|
maxTokens,
|
|
358
|
+
// Add abort signal if available
|
|
359
|
+
...(timeoutController && { abortSignal: timeoutController.controller.signal }),
|
|
336
360
|
};
|
|
337
361
|
if (finalSchema) {
|
|
338
362
|
generateOptions.experimental_output = Output.object({
|
|
339
363
|
schema: finalSchema,
|
|
340
364
|
});
|
|
341
365
|
}
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
366
|
+
try {
|
|
367
|
+
const result = await generateText(generateOptions);
|
|
368
|
+
// Clean up timeout if successful
|
|
369
|
+
timeoutController?.cleanup();
|
|
370
|
+
logger.debug(`[${functionTag}] Generate text completed`, {
|
|
371
|
+
provider,
|
|
372
|
+
modelName: this.modelName,
|
|
373
|
+
usage: result.usage,
|
|
374
|
+
finishReason: result.finishReason,
|
|
375
|
+
responseLength: result.text?.length || 0,
|
|
376
|
+
timeout,
|
|
377
|
+
});
|
|
378
|
+
return result;
|
|
379
|
+
}
|
|
380
|
+
finally {
|
|
381
|
+
// Always cleanup timeout
|
|
382
|
+
timeoutController?.cleanup();
|
|
383
|
+
}
|
|
351
384
|
}
|
|
352
385
|
catch (err) {
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
386
|
+
// Log timeout errors specifically
|
|
387
|
+
if (err instanceof TimeoutError) {
|
|
388
|
+
logger.error(`[${functionTag}] Timeout error`, {
|
|
389
|
+
provider,
|
|
390
|
+
modelName: this.modelName,
|
|
391
|
+
timeout: err.timeout,
|
|
392
|
+
message: err.message,
|
|
393
|
+
});
|
|
394
|
+
}
|
|
395
|
+
else {
|
|
396
|
+
logger.error(`[${functionTag}] Exception`, {
|
|
397
|
+
provider,
|
|
398
|
+
modelName: this.modelName,
|
|
399
|
+
message: "Error in generating text",
|
|
400
|
+
err: String(err),
|
|
401
|
+
});
|
|
402
|
+
}
|
|
359
403
|
throw err; // Re-throw error to trigger fallback
|
|
360
404
|
}
|
|
361
405
|
}
|