@promptbook/openai 0.101.0-14 → 0.101.0-15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/esm/index.es.js +237 -95
- package/esm/index.es.js.map +1 -1
- package/esm/typings/src/book-2.0/agent-source/createAgentModelRequirements.d.ts +3 -5
- package/esm/typings/src/llm-providers/_common/utils/removeUnsupportedModelRequirements.d.ts +25 -0
- package/esm/typings/src/llm-providers/openai/OpenAiCompatibleExecutionTools.d.ts +15 -8
- package/esm/typings/src/version.d.ts +1 -1
- package/package.json +2 -2
- package/umd/index.umd.js +237 -95
- package/umd/index.umd.js.map +1 -1
package/umd/index.umd.js
CHANGED
|
@@ -25,7 +25,7 @@
|
|
|
25
25
|
* @generated
|
|
26
26
|
* @see https://github.com/webgptorg/promptbook
|
|
27
27
|
*/
|
|
28
|
-
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-
|
|
28
|
+
const PROMPTBOOK_ENGINE_VERSION = '0.101.0-15';
|
|
29
29
|
/**
|
|
30
30
|
* TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
|
|
31
31
|
* Note: [💞] Ignore a discrepancy between file name and entity name
|
|
@@ -2018,6 +2018,62 @@
|
|
|
2018
2018
|
* TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
|
|
2019
2019
|
*/
|
|
2020
2020
|
|
|
2021
|
+
/**
|
|
2022
|
+
* Parses an OpenAI error message to identify which parameter is unsupported
|
|
2023
|
+
*
|
|
2024
|
+
* @param errorMessage The error message from OpenAI API
|
|
2025
|
+
* @returns The parameter name that is unsupported, or null if not an unsupported parameter error
|
|
2026
|
+
* @private utility of LLM Tools
|
|
2027
|
+
*/
|
|
2028
|
+
function parseUnsupportedParameterError(errorMessage) {
|
|
2029
|
+
// Pattern to match "Unsupported value: 'parameter' does not support ..."
|
|
2030
|
+
const unsupportedValueMatch = errorMessage.match(/Unsupported value:\s*'([^']+)'\s*does not support/i);
|
|
2031
|
+
if (unsupportedValueMatch === null || unsupportedValueMatch === void 0 ? void 0 : unsupportedValueMatch[1]) {
|
|
2032
|
+
return unsupportedValueMatch[1];
|
|
2033
|
+
}
|
|
2034
|
+
// Pattern to match "'parameter' of type ... is not supported with this model"
|
|
2035
|
+
const parameterTypeMatch = errorMessage.match(/'([^']+)'\s*of type.*is not supported with this model/i);
|
|
2036
|
+
if (parameterTypeMatch === null || parameterTypeMatch === void 0 ? void 0 : parameterTypeMatch[1]) {
|
|
2037
|
+
return parameterTypeMatch[1];
|
|
2038
|
+
}
|
|
2039
|
+
return null;
|
|
2040
|
+
}
|
|
2041
|
+
/**
|
|
2042
|
+
* Creates a copy of model requirements with the specified parameter removed
|
|
2043
|
+
*
|
|
2044
|
+
* @param modelRequirements Original model requirements
|
|
2045
|
+
* @param unsupportedParameter The parameter to remove
|
|
2046
|
+
* @returns New model requirements without the unsupported parameter
|
|
2047
|
+
* @private utility of LLM Tools
|
|
2048
|
+
*/
|
|
2049
|
+
function removeUnsupportedModelRequirement(modelRequirements, unsupportedParameter) {
|
|
2050
|
+
const newRequirements = { ...modelRequirements };
|
|
2051
|
+
// Map of parameter names that might appear in error messages to ModelRequirements properties
|
|
2052
|
+
const parameterMap = {
|
|
2053
|
+
'temperature': 'temperature',
|
|
2054
|
+
'max_tokens': 'maxTokens',
|
|
2055
|
+
'maxTokens': 'maxTokens',
|
|
2056
|
+
'seed': 'seed',
|
|
2057
|
+
};
|
|
2058
|
+
const propertyToRemove = parameterMap[unsupportedParameter];
|
|
2059
|
+
if (propertyToRemove && propertyToRemove in newRequirements) {
|
|
2060
|
+
delete newRequirements[propertyToRemove];
|
|
2061
|
+
}
|
|
2062
|
+
return newRequirements;
|
|
2063
|
+
}
|
|
2064
|
+
/**
|
|
2065
|
+
* Checks if an error is an "Unsupported value" error from OpenAI
|
|
2066
|
+
* @param error The error to check
|
|
2067
|
+
* @returns true if this is an unsupported parameter error
|
|
2068
|
+
* @private utility of LLM Tools
|
|
2069
|
+
*/
|
|
2070
|
+
function isUnsupportedParameterError(error) {
|
|
2071
|
+
const errorMessage = error.message.toLowerCase();
|
|
2072
|
+
return errorMessage.includes('unsupported value:') ||
|
|
2073
|
+
errorMessage.includes('is not supported with this model') ||
|
|
2074
|
+
errorMessage.includes('does not support');
|
|
2075
|
+
}
|
|
2076
|
+
|
|
2021
2077
|
/**
|
|
2022
2078
|
* Execution Tools for calling OpenAI API or other OpenAI compatible provider
|
|
2023
2079
|
*
|
|
@@ -2035,6 +2091,10 @@
|
|
|
2035
2091
|
* OpenAI API client.
|
|
2036
2092
|
*/
|
|
2037
2093
|
this.client = null;
|
|
2094
|
+
/**
|
|
2095
|
+
* Tracks models and parameters that have already been retried to prevent infinite loops
|
|
2096
|
+
*/
|
|
2097
|
+
this.retriedUnsupportedParameters = new Set();
|
|
2038
2098
|
// TODO: Allow configuring rate limits via options
|
|
2039
2099
|
this.limiter = new Bottleneck__default["default"]({
|
|
2040
2100
|
minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
|
|
@@ -2096,21 +2156,27 @@
|
|
|
2096
2156
|
* Calls OpenAI compatible API to use a chat model.
|
|
2097
2157
|
*/
|
|
2098
2158
|
async callChatModel(prompt) {
|
|
2159
|
+
return this.callChatModelWithRetry(prompt, prompt.modelRequirements);
|
|
2160
|
+
}
|
|
2161
|
+
/**
|
|
2162
|
+
* Internal method that handles parameter retry for chat model calls
|
|
2163
|
+
*/
|
|
2164
|
+
async callChatModelWithRetry(prompt, currentModelRequirements) {
|
|
2099
2165
|
var _a;
|
|
2100
2166
|
if (this.options.isVerbose) {
|
|
2101
|
-
console.info(`💬 ${this.title} callChatModel call`, { prompt });
|
|
2167
|
+
console.info(`💬 ${this.title} callChatModel call`, { prompt, currentModelRequirements });
|
|
2102
2168
|
}
|
|
2103
|
-
const { content, parameters,
|
|
2169
|
+
const { content, parameters, format } = prompt;
|
|
2104
2170
|
const client = await this.getClient();
|
|
2105
2171
|
// TODO: [☂] Use here more modelRequirements
|
|
2106
|
-
if (
|
|
2172
|
+
if (currentModelRequirements.modelVariant !== 'CHAT') {
|
|
2107
2173
|
throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
|
|
2108
2174
|
}
|
|
2109
|
-
const modelName =
|
|
2175
|
+
const modelName = currentModelRequirements.modelName || this.getDefaultChatModel().modelName;
|
|
2110
2176
|
const modelSettings = {
|
|
2111
2177
|
model: modelName,
|
|
2112
|
-
max_tokens:
|
|
2113
|
-
temperature:
|
|
2178
|
+
max_tokens: currentModelRequirements.maxTokens,
|
|
2179
|
+
temperature: currentModelRequirements.temperature,
|
|
2114
2180
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
2115
2181
|
// <- Note: [🧆]
|
|
2116
2182
|
}; // <- TODO: [💩] Guard here types better
|
|
@@ -2125,12 +2191,12 @@
|
|
|
2125
2191
|
const rawRequest = {
|
|
2126
2192
|
...modelSettings,
|
|
2127
2193
|
messages: [
|
|
2128
|
-
...(
|
|
2194
|
+
...(currentModelRequirements.systemMessage === undefined
|
|
2129
2195
|
? []
|
|
2130
2196
|
: [
|
|
2131
2197
|
{
|
|
2132
2198
|
role: 'system',
|
|
2133
|
-
content:
|
|
2199
|
+
content: currentModelRequirements.systemMessage,
|
|
2134
2200
|
},
|
|
2135
2201
|
]),
|
|
2136
2202
|
{
|
|
@@ -2144,69 +2210,110 @@
|
|
|
2144
2210
|
if (this.options.isVerbose) {
|
|
2145
2211
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
2146
2212
|
}
|
|
2147
|
-
|
|
2148
|
-
|
|
2149
|
-
|
|
2150
|
-
|
|
2213
|
+
try {
|
|
2214
|
+
const rawResponse = await this.limiter
|
|
2215
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
|
|
2216
|
+
.catch((error) => {
|
|
2217
|
+
assertsError(error);
|
|
2218
|
+
if (this.options.isVerbose) {
|
|
2219
|
+
console.info(colors__default["default"].bgRed('error'), error);
|
|
2220
|
+
}
|
|
2221
|
+
throw error;
|
|
2222
|
+
});
|
|
2151
2223
|
if (this.options.isVerbose) {
|
|
2152
|
-
console.info(colors__default["default"].
|
|
2224
|
+
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
2153
2225
|
}
|
|
2154
|
-
|
|
2155
|
-
|
|
2156
|
-
|
|
2157
|
-
|
|
2158
|
-
|
|
2159
|
-
|
|
2160
|
-
|
|
2161
|
-
|
|
2162
|
-
|
|
2163
|
-
|
|
2164
|
-
|
|
2165
|
-
|
|
2226
|
+
const complete = $getCurrentDate();
|
|
2227
|
+
if (!rawResponse.choices[0]) {
|
|
2228
|
+
throw new PipelineExecutionError(`No choises from ${this.title}`);
|
|
2229
|
+
}
|
|
2230
|
+
if (rawResponse.choices.length > 1) {
|
|
2231
|
+
// TODO: This should be maybe only warning
|
|
2232
|
+
throw new PipelineExecutionError(`More than one choise from ${this.title}`);
|
|
2233
|
+
}
|
|
2234
|
+
const resultContent = rawResponse.choices[0].message.content;
|
|
2235
|
+
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
|
2236
|
+
if (resultContent === null) {
|
|
2237
|
+
throw new PipelineExecutionError(`No response message from ${this.title}`);
|
|
2238
|
+
}
|
|
2239
|
+
return exportJson({
|
|
2240
|
+
name: 'promptResult',
|
|
2241
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
|
|
2242
|
+
order: [],
|
|
2243
|
+
value: {
|
|
2244
|
+
content: resultContent,
|
|
2245
|
+
modelName: rawResponse.model || modelName,
|
|
2246
|
+
timing: {
|
|
2247
|
+
start,
|
|
2248
|
+
complete,
|
|
2249
|
+
},
|
|
2250
|
+
usage,
|
|
2251
|
+
rawPromptContent,
|
|
2252
|
+
rawRequest,
|
|
2253
|
+
rawResponse,
|
|
2254
|
+
// <- [🗯]
|
|
2255
|
+
},
|
|
2256
|
+
});
|
|
2166
2257
|
}
|
|
2167
|
-
|
|
2168
|
-
|
|
2169
|
-
|
|
2170
|
-
|
|
2258
|
+
catch (error) {
|
|
2259
|
+
assertsError(error);
|
|
2260
|
+
// Check if this is an unsupported parameter error
|
|
2261
|
+
if (!isUnsupportedParameterError(error)) {
|
|
2262
|
+
throw error;
|
|
2263
|
+
}
|
|
2264
|
+
// Parse which parameter is unsupported
|
|
2265
|
+
const unsupportedParameter = parseUnsupportedParameterError(error.message);
|
|
2266
|
+
if (!unsupportedParameter) {
|
|
2267
|
+
if (this.options.isVerbose) {
|
|
2268
|
+
console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
|
|
2269
|
+
}
|
|
2270
|
+
throw error;
|
|
2271
|
+
}
|
|
2272
|
+
// Create a unique key for this model + parameter combination to prevent infinite loops
|
|
2273
|
+
const retryKey = `${modelName}-${unsupportedParameter}`;
|
|
2274
|
+
if (this.retriedUnsupportedParameters.has(retryKey)) {
|
|
2275
|
+
// Already retried this parameter, throw the error
|
|
2276
|
+
if (this.options.isVerbose) {
|
|
2277
|
+
console.warn(colors__default["default"].bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
|
|
2278
|
+
}
|
|
2279
|
+
throw error;
|
|
2280
|
+
}
|
|
2281
|
+
// Mark this parameter as retried
|
|
2282
|
+
this.retriedUnsupportedParameters.add(retryKey);
|
|
2283
|
+
// Log warning in verbose mode
|
|
2284
|
+
if (this.options.isVerbose) {
|
|
2285
|
+
console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
|
|
2286
|
+
}
|
|
2287
|
+
// Remove the unsupported parameter and retry
|
|
2288
|
+
const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
|
|
2289
|
+
return this.callChatModelWithRetry(prompt, modifiedModelRequirements);
|
|
2171
2290
|
}
|
|
2172
|
-
return exportJson({
|
|
2173
|
-
name: 'promptResult',
|
|
2174
|
-
message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
|
|
2175
|
-
order: [],
|
|
2176
|
-
value: {
|
|
2177
|
-
content: resultContent,
|
|
2178
|
-
modelName: rawResponse.model || modelName,
|
|
2179
|
-
timing: {
|
|
2180
|
-
start,
|
|
2181
|
-
complete,
|
|
2182
|
-
},
|
|
2183
|
-
usage,
|
|
2184
|
-
rawPromptContent,
|
|
2185
|
-
rawRequest,
|
|
2186
|
-
rawResponse,
|
|
2187
|
-
// <- [🗯]
|
|
2188
|
-
},
|
|
2189
|
-
});
|
|
2190
2291
|
}
|
|
2191
2292
|
/**
|
|
2192
2293
|
* Calls OpenAI API to use a complete model.
|
|
2193
2294
|
*/
|
|
2194
2295
|
async callCompletionModel(prompt) {
|
|
2296
|
+
return this.callCompletionModelWithRetry(prompt, prompt.modelRequirements);
|
|
2297
|
+
}
|
|
2298
|
+
/**
|
|
2299
|
+
* Internal method that handles parameter retry for completion model calls
|
|
2300
|
+
*/
|
|
2301
|
+
async callCompletionModelWithRetry(prompt, currentModelRequirements) {
|
|
2195
2302
|
var _a;
|
|
2196
2303
|
if (this.options.isVerbose) {
|
|
2197
|
-
console.info(`🖋 ${this.title} callCompletionModel call`, { prompt });
|
|
2304
|
+
console.info(`🖋 ${this.title} callCompletionModel call`, { prompt, currentModelRequirements });
|
|
2198
2305
|
}
|
|
2199
|
-
const { content, parameters
|
|
2306
|
+
const { content, parameters } = prompt;
|
|
2200
2307
|
const client = await this.getClient();
|
|
2201
2308
|
// TODO: [☂] Use here more modelRequirements
|
|
2202
|
-
if (
|
|
2309
|
+
if (currentModelRequirements.modelVariant !== 'COMPLETION') {
|
|
2203
2310
|
throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
|
|
2204
2311
|
}
|
|
2205
|
-
const modelName =
|
|
2312
|
+
const modelName = currentModelRequirements.modelName || this.getDefaultCompletionModel().modelName;
|
|
2206
2313
|
const modelSettings = {
|
|
2207
2314
|
model: modelName,
|
|
2208
|
-
max_tokens:
|
|
2209
|
-
temperature:
|
|
2315
|
+
max_tokens: currentModelRequirements.maxTokens,
|
|
2316
|
+
temperature: currentModelRequirements.temperature,
|
|
2210
2317
|
// <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
|
|
2211
2318
|
// <- Note: [🧆]
|
|
2212
2319
|
};
|
|
@@ -2220,46 +2327,81 @@
|
|
|
2220
2327
|
if (this.options.isVerbose) {
|
|
2221
2328
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
2222
2329
|
}
|
|
2223
|
-
|
|
2224
|
-
|
|
2225
|
-
|
|
2226
|
-
|
|
2330
|
+
try {
|
|
2331
|
+
const rawResponse = await this.limiter
|
|
2332
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.completions.create(rawRequest)))
|
|
2333
|
+
.catch((error) => {
|
|
2334
|
+
assertsError(error);
|
|
2335
|
+
if (this.options.isVerbose) {
|
|
2336
|
+
console.info(colors__default["default"].bgRed('error'), error);
|
|
2337
|
+
}
|
|
2338
|
+
throw error;
|
|
2339
|
+
});
|
|
2227
2340
|
if (this.options.isVerbose) {
|
|
2228
|
-
console.info(colors__default["default"].
|
|
2341
|
+
console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
|
|
2229
2342
|
}
|
|
2230
|
-
|
|
2231
|
-
|
|
2232
|
-
|
|
2233
|
-
|
|
2234
|
-
|
|
2235
|
-
|
|
2236
|
-
|
|
2237
|
-
|
|
2343
|
+
const complete = $getCurrentDate();
|
|
2344
|
+
if (!rawResponse.choices[0]) {
|
|
2345
|
+
throw new PipelineExecutionError(`No choises from ${this.title}`);
|
|
2346
|
+
}
|
|
2347
|
+
if (rawResponse.choices.length > 1) {
|
|
2348
|
+
// TODO: This should be maybe only warning
|
|
2349
|
+
throw new PipelineExecutionError(`More than one choise from ${this.title}`);
|
|
2350
|
+
}
|
|
2351
|
+
const resultContent = rawResponse.choices[0].text;
|
|
2352
|
+
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
|
2353
|
+
return exportJson({
|
|
2354
|
+
name: 'promptResult',
|
|
2355
|
+
message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
|
|
2356
|
+
order: [],
|
|
2357
|
+
value: {
|
|
2358
|
+
content: resultContent,
|
|
2359
|
+
modelName: rawResponse.model || modelName,
|
|
2360
|
+
timing: {
|
|
2361
|
+
start,
|
|
2362
|
+
complete,
|
|
2363
|
+
},
|
|
2364
|
+
usage,
|
|
2365
|
+
rawPromptContent,
|
|
2366
|
+
rawRequest,
|
|
2367
|
+
rawResponse,
|
|
2368
|
+
// <- [🗯]
|
|
2369
|
+
},
|
|
2370
|
+
});
|
|
2238
2371
|
}
|
|
2239
|
-
|
|
2240
|
-
|
|
2241
|
-
|
|
2372
|
+
catch (error) {
|
|
2373
|
+
assertsError(error);
|
|
2374
|
+
// Check if this is an unsupported parameter error
|
|
2375
|
+
if (!isUnsupportedParameterError(error)) {
|
|
2376
|
+
throw error;
|
|
2377
|
+
}
|
|
2378
|
+
// Parse which parameter is unsupported
|
|
2379
|
+
const unsupportedParameter = parseUnsupportedParameterError(error.message);
|
|
2380
|
+
if (!unsupportedParameter) {
|
|
2381
|
+
if (this.options.isVerbose) {
|
|
2382
|
+
console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
|
|
2383
|
+
}
|
|
2384
|
+
throw error;
|
|
2385
|
+
}
|
|
2386
|
+
// Create a unique key for this model + parameter combination to prevent infinite loops
|
|
2387
|
+
const retryKey = `${modelName}-${unsupportedParameter}`;
|
|
2388
|
+
if (this.retriedUnsupportedParameters.has(retryKey)) {
|
|
2389
|
+
// Already retried this parameter, throw the error
|
|
2390
|
+
if (this.options.isVerbose) {
|
|
2391
|
+
console.warn(colors__default["default"].bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
|
|
2392
|
+
}
|
|
2393
|
+
throw error;
|
|
2394
|
+
}
|
|
2395
|
+
// Mark this parameter as retried
|
|
2396
|
+
this.retriedUnsupportedParameters.add(retryKey);
|
|
2397
|
+
// Log warning in verbose mode
|
|
2398
|
+
if (this.options.isVerbose) {
|
|
2399
|
+
console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
|
|
2400
|
+
}
|
|
2401
|
+
// Remove the unsupported parameter and retry
|
|
2402
|
+
const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
|
|
2403
|
+
return this.callCompletionModelWithRetry(prompt, modifiedModelRequirements);
|
|
2242
2404
|
}
|
|
2243
|
-
const resultContent = rawResponse.choices[0].text;
|
|
2244
|
-
const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
|
|
2245
|
-
return exportJson({
|
|
2246
|
-
name: 'promptResult',
|
|
2247
|
-
message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
|
|
2248
|
-
order: [],
|
|
2249
|
-
value: {
|
|
2250
|
-
content: resultContent,
|
|
2251
|
-
modelName: rawResponse.model || modelName,
|
|
2252
|
-
timing: {
|
|
2253
|
-
start,
|
|
2254
|
-
complete,
|
|
2255
|
-
},
|
|
2256
|
-
usage,
|
|
2257
|
-
rawPromptContent,
|
|
2258
|
-
rawRequest,
|
|
2259
|
-
rawResponse,
|
|
2260
|
-
// <- [🗯]
|
|
2261
|
-
},
|
|
2262
|
-
});
|
|
2263
2405
|
}
|
|
2264
2406
|
/**
|
|
2265
2407
|
* Calls OpenAI compatible API to use a embedding model
|
|
@@ -2285,7 +2427,7 @@
|
|
|
2285
2427
|
console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
|
|
2286
2428
|
}
|
|
2287
2429
|
const rawResponse = await this.limiter
|
|
2288
|
-
.schedule(() => this.
|
|
2430
|
+
.schedule(() => this.makeRequestWithNetworkRetry(() => client.embeddings.create(rawRequest)))
|
|
2289
2431
|
.catch((error) => {
|
|
2290
2432
|
assertsError(error);
|
|
2291
2433
|
if (this.options.isVerbose) {
|
|
@@ -2347,7 +2489,7 @@
|
|
|
2347
2489
|
/**
|
|
2348
2490
|
* Makes a request with retry logic for network errors like ECONNRESET
|
|
2349
2491
|
*/
|
|
2350
|
-
async
|
|
2492
|
+
async makeRequestWithNetworkRetry(requestFn) {
|
|
2351
2493
|
let lastError;
|
|
2352
2494
|
for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
|
|
2353
2495
|
try {
|
|
@@ -2359,8 +2501,8 @@
|
|
|
2359
2501
|
// Check if this is a retryable network error
|
|
2360
2502
|
const isRetryableError = this.isRetryableNetworkError(error);
|
|
2361
2503
|
if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
|
|
2362
|
-
if (this.options.isVerbose) {
|
|
2363
|
-
console.info(colors__default["default"].bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
|
|
2504
|
+
if (this.options.isVerbose && this.isRetryableNetworkError(error)) {
|
|
2505
|
+
console.info(colors__default["default"].bgRed('Final network error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
|
|
2364
2506
|
}
|
|
2365
2507
|
throw error;
|
|
2366
2508
|
}
|
|
@@ -2370,7 +2512,7 @@
|
|
|
2370
2512
|
const jitterDelay = Math.random() * 500; // Add some randomness
|
|
2371
2513
|
const totalDelay = backoffDelay + jitterDelay;
|
|
2372
2514
|
if (this.options.isVerbose) {
|
|
2373
|
-
console.info(colors__default["default"].bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
|
|
2515
|
+
console.info(colors__default["default"].bgYellow('Retrying network request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
|
|
2374
2516
|
}
|
|
2375
2517
|
// Wait before retrying
|
|
2376
2518
|
await new Promise((resolve) => setTimeout(resolve, totalDelay));
|