@promptbook/ollama 0.101.0-13 → 0.101.0-15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -18,7 +18,7 @@ const BOOK_LANGUAGE_VERSION = '1.0.0';
18
18
  * @generated
19
19
  * @see https://github.com/webgptorg/promptbook
20
20
  */
21
- const PROMPTBOOK_ENGINE_VERSION = '0.101.0-13';
21
+ const PROMPTBOOK_ENGINE_VERSION = '0.101.0-15';
22
22
  /**
23
23
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
24
24
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -1912,6 +1912,62 @@ function templateParameters(template, parameters) {
1912
1912
  return replacedTemplates;
1913
1913
  }
1914
1914
 
1915
+ /**
1916
+ * Parses an OpenAI error message to identify which parameter is unsupported
1917
+ *
1918
+ * @param errorMessage The error message from OpenAI API
1919
+ * @returns The parameter name that is unsupported, or null if not an unsupported parameter error
1920
+ * @private utility of LLM Tools
1921
+ */
1922
+ function parseUnsupportedParameterError(errorMessage) {
1923
+ // Pattern to match "Unsupported value: 'parameter' does not support ..."
1924
+ const unsupportedValueMatch = errorMessage.match(/Unsupported value:\s*'([^']+)'\s*does not support/i);
1925
+ if (unsupportedValueMatch === null || unsupportedValueMatch === void 0 ? void 0 : unsupportedValueMatch[1]) {
1926
+ return unsupportedValueMatch[1];
1927
+ }
1928
+ // Pattern to match "'parameter' of type ... is not supported with this model"
1929
+ const parameterTypeMatch = errorMessage.match(/'([^']+)'\s*of type.*is not supported with this model/i);
1930
+ if (parameterTypeMatch === null || parameterTypeMatch === void 0 ? void 0 : parameterTypeMatch[1]) {
1931
+ return parameterTypeMatch[1];
1932
+ }
1933
+ return null;
1934
+ }
1935
+ /**
1936
+ * Creates a copy of model requirements with the specified parameter removed
1937
+ *
1938
+ * @param modelRequirements Original model requirements
1939
+ * @param unsupportedParameter The parameter to remove
1940
+ * @returns New model requirements without the unsupported parameter
1941
+ * @private utility of LLM Tools
1942
+ */
1943
+ function removeUnsupportedModelRequirement(modelRequirements, unsupportedParameter) {
1944
+ const newRequirements = { ...modelRequirements };
1945
+ // Map of parameter names that might appear in error messages to ModelRequirements properties
1946
+ const parameterMap = {
1947
+ 'temperature': 'temperature',
1948
+ 'max_tokens': 'maxTokens',
1949
+ 'maxTokens': 'maxTokens',
1950
+ 'seed': 'seed',
1951
+ };
1952
+ const propertyToRemove = parameterMap[unsupportedParameter];
1953
+ if (propertyToRemove && propertyToRemove in newRequirements) {
1954
+ delete newRequirements[propertyToRemove];
1955
+ }
1956
+ return newRequirements;
1957
+ }
1958
+ /**
1959
+ * Checks if an error is an "Unsupported value" error from OpenAI
1960
+ * @param error The error to check
1961
+ * @returns true if this is an unsupported parameter error
1962
+ * @private utility of LLM Tools
1963
+ */
1964
+ function isUnsupportedParameterError(error) {
1965
+ const errorMessage = error.message.toLowerCase();
1966
+ return errorMessage.includes('unsupported value:') ||
1967
+ errorMessage.includes('is not supported with this model') ||
1968
+ errorMessage.includes('does not support');
1969
+ }
1970
+
1915
1971
  /**
1916
1972
  * Execution Tools for calling OpenAI API or other OpenAI compatible provider
1917
1973
  *
@@ -1929,6 +1985,10 @@ class OpenAiCompatibleExecutionTools {
1929
1985
  * OpenAI API client.
1930
1986
  */
1931
1987
  this.client = null;
1988
+ /**
1989
+ * Tracks models and parameters that have already been retried to prevent infinite loops
1990
+ */
1991
+ this.retriedUnsupportedParameters = new Set();
1932
1992
  // TODO: Allow configuring rate limits via options
1933
1993
  this.limiter = new Bottleneck({
1934
1994
  minTime: 60000 / (this.options.maxRequestsPerMinute || DEFAULT_MAX_REQUESTS_PER_MINUTE),
@@ -1990,21 +2050,27 @@ class OpenAiCompatibleExecutionTools {
1990
2050
  * Calls OpenAI compatible API to use a chat model.
1991
2051
  */
1992
2052
  async callChatModel(prompt) {
2053
+ return this.callChatModelWithRetry(prompt, prompt.modelRequirements);
2054
+ }
2055
+ /**
2056
+ * Internal method that handles parameter retry for chat model calls
2057
+ */
2058
+ async callChatModelWithRetry(prompt, currentModelRequirements) {
1993
2059
  var _a;
1994
2060
  if (this.options.isVerbose) {
1995
- console.info(`💬 ${this.title} callChatModel call`, { prompt });
2061
+ console.info(`💬 ${this.title} callChatModel call`, { prompt, currentModelRequirements });
1996
2062
  }
1997
- const { content, parameters, modelRequirements, format } = prompt;
2063
+ const { content, parameters, format } = prompt;
1998
2064
  const client = await this.getClient();
1999
2065
  // TODO: [☂] Use here more modelRequirements
2000
- if (modelRequirements.modelVariant !== 'CHAT') {
2066
+ if (currentModelRequirements.modelVariant !== 'CHAT') {
2001
2067
  throw new PipelineExecutionError('Use callChatModel only for CHAT variant');
2002
2068
  }
2003
- const modelName = modelRequirements.modelName || this.getDefaultChatModel().modelName;
2069
+ const modelName = currentModelRequirements.modelName || this.getDefaultChatModel().modelName;
2004
2070
  const modelSettings = {
2005
2071
  model: modelName,
2006
- max_tokens: modelRequirements.maxTokens,
2007
- temperature: modelRequirements.temperature,
2072
+ max_tokens: currentModelRequirements.maxTokens,
2073
+ temperature: currentModelRequirements.temperature,
2008
2074
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2009
2075
  // <- Note: [🧆]
2010
2076
  }; // <- TODO: [💩] Guard here types better
@@ -2019,12 +2085,12 @@ class OpenAiCompatibleExecutionTools {
2019
2085
  const rawRequest = {
2020
2086
  ...modelSettings,
2021
2087
  messages: [
2022
- ...(modelRequirements.systemMessage === undefined
2088
+ ...(currentModelRequirements.systemMessage === undefined
2023
2089
  ? []
2024
2090
  : [
2025
2091
  {
2026
2092
  role: 'system',
2027
- content: modelRequirements.systemMessage,
2093
+ content: currentModelRequirements.systemMessage,
2028
2094
  },
2029
2095
  ]),
2030
2096
  {
@@ -2038,69 +2104,110 @@ class OpenAiCompatibleExecutionTools {
2038
2104
  if (this.options.isVerbose) {
2039
2105
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2040
2106
  }
2041
- const rawResponse = await this.limiter
2042
- .schedule(() => this.makeRequestWithRetry(() => client.chat.completions.create(rawRequest)))
2043
- .catch((error) => {
2044
- assertsError(error);
2107
+ try {
2108
+ const rawResponse = await this.limiter
2109
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
2110
+ .catch((error) => {
2111
+ assertsError(error);
2112
+ if (this.options.isVerbose) {
2113
+ console.info(colors.bgRed('error'), error);
2114
+ }
2115
+ throw error;
2116
+ });
2045
2117
  if (this.options.isVerbose) {
2046
- console.info(colors.bgRed('error'), error);
2118
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2047
2119
  }
2048
- throw error;
2049
- });
2050
- if (this.options.isVerbose) {
2051
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2052
- }
2053
- const complete = $getCurrentDate();
2054
- if (!rawResponse.choices[0]) {
2055
- throw new PipelineExecutionError(`No choises from ${this.title}`);
2056
- }
2057
- if (rawResponse.choices.length > 1) {
2058
- // TODO: This should be maybe only warning
2059
- throw new PipelineExecutionError(`More than one choise from ${this.title}`);
2120
+ const complete = $getCurrentDate();
2121
+ if (!rawResponse.choices[0]) {
2122
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
2123
+ }
2124
+ if (rawResponse.choices.length > 1) {
2125
+ // TODO: This should be maybe only warning
2126
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
2127
+ }
2128
+ const resultContent = rawResponse.choices[0].message.content;
2129
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
2130
+ if (resultContent === null) {
2131
+ throw new PipelineExecutionError(`No response message from ${this.title}`);
2132
+ }
2133
+ return exportJson({
2134
+ name: 'promptResult',
2135
+ message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
2136
+ order: [],
2137
+ value: {
2138
+ content: resultContent,
2139
+ modelName: rawResponse.model || modelName,
2140
+ timing: {
2141
+ start,
2142
+ complete,
2143
+ },
2144
+ usage,
2145
+ rawPromptContent,
2146
+ rawRequest,
2147
+ rawResponse,
2148
+ // <- [🗯]
2149
+ },
2150
+ });
2060
2151
  }
2061
- const resultContent = rawResponse.choices[0].message.content;
2062
- const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
2063
- if (resultContent === null) {
2064
- throw new PipelineExecutionError(`No response message from ${this.title}`);
2152
+ catch (error) {
2153
+ assertsError(error);
2154
+ // Check if this is an unsupported parameter error
2155
+ if (!isUnsupportedParameterError(error)) {
2156
+ throw error;
2157
+ }
2158
+ // Parse which parameter is unsupported
2159
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
2160
+ if (!unsupportedParameter) {
2161
+ if (this.options.isVerbose) {
2162
+ console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
2163
+ }
2164
+ throw error;
2165
+ }
2166
+ // Create a unique key for this model + parameter combination to prevent infinite loops
2167
+ const retryKey = `${modelName}-${unsupportedParameter}`;
2168
+ if (this.retriedUnsupportedParameters.has(retryKey)) {
2169
+ // Already retried this parameter, throw the error
2170
+ if (this.options.isVerbose) {
2171
+ console.warn(colors.bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
2172
+ }
2173
+ throw error;
2174
+ }
2175
+ // Mark this parameter as retried
2176
+ this.retriedUnsupportedParameters.add(retryKey);
2177
+ // Log warning in verbose mode
2178
+ if (this.options.isVerbose) {
2179
+ console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
2180
+ }
2181
+ // Remove the unsupported parameter and retry
2182
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
2183
+ return this.callChatModelWithRetry(prompt, modifiedModelRequirements);
2065
2184
  }
2066
- return exportJson({
2067
- name: 'promptResult',
2068
- message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
2069
- order: [],
2070
- value: {
2071
- content: resultContent,
2072
- modelName: rawResponse.model || modelName,
2073
- timing: {
2074
- start,
2075
- complete,
2076
- },
2077
- usage,
2078
- rawPromptContent,
2079
- rawRequest,
2080
- rawResponse,
2081
- // <- [🗯]
2082
- },
2083
- });
2084
2185
  }
2085
2186
  /**
2086
2187
  * Calls OpenAI API to use a complete model.
2087
2188
  */
2088
2189
  async callCompletionModel(prompt) {
2190
+ return this.callCompletionModelWithRetry(prompt, prompt.modelRequirements);
2191
+ }
2192
+ /**
2193
+ * Internal method that handles parameter retry for completion model calls
2194
+ */
2195
+ async callCompletionModelWithRetry(prompt, currentModelRequirements) {
2089
2196
  var _a;
2090
2197
  if (this.options.isVerbose) {
2091
- console.info(`🖋 ${this.title} callCompletionModel call`, { prompt });
2198
+ console.info(`🖋 ${this.title} callCompletionModel call`, { prompt, currentModelRequirements });
2092
2199
  }
2093
- const { content, parameters, modelRequirements } = prompt;
2200
+ const { content, parameters } = prompt;
2094
2201
  const client = await this.getClient();
2095
2202
  // TODO: [☂] Use here more modelRequirements
2096
- if (modelRequirements.modelVariant !== 'COMPLETION') {
2203
+ if (currentModelRequirements.modelVariant !== 'COMPLETION') {
2097
2204
  throw new PipelineExecutionError('Use callCompletionModel only for COMPLETION variant');
2098
2205
  }
2099
- const modelName = modelRequirements.modelName || this.getDefaultCompletionModel().modelName;
2206
+ const modelName = currentModelRequirements.modelName || this.getDefaultCompletionModel().modelName;
2100
2207
  const modelSettings = {
2101
2208
  model: modelName,
2102
- max_tokens: modelRequirements.maxTokens,
2103
- temperature: modelRequirements.temperature,
2209
+ max_tokens: currentModelRequirements.maxTokens,
2210
+ temperature: currentModelRequirements.temperature,
2104
2211
  // <- TODO: [🈁] Use `seed` here AND/OR use is `isDeterministic` for entire execution tools
2105
2212
  // <- Note: [🧆]
2106
2213
  };
@@ -2114,46 +2221,81 @@ class OpenAiCompatibleExecutionTools {
2114
2221
  if (this.options.isVerbose) {
2115
2222
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2116
2223
  }
2117
- const rawResponse = await this.limiter
2118
- .schedule(() => this.makeRequestWithRetry(() => client.completions.create(rawRequest)))
2119
- .catch((error) => {
2120
- assertsError(error);
2224
+ try {
2225
+ const rawResponse = await this.limiter
2226
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.completions.create(rawRequest)))
2227
+ .catch((error) => {
2228
+ assertsError(error);
2229
+ if (this.options.isVerbose) {
2230
+ console.info(colors.bgRed('error'), error);
2231
+ }
2232
+ throw error;
2233
+ });
2121
2234
  if (this.options.isVerbose) {
2122
- console.info(colors.bgRed('error'), error);
2235
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2123
2236
  }
2124
- throw error;
2125
- });
2126
- if (this.options.isVerbose) {
2127
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
2128
- }
2129
- const complete = $getCurrentDate();
2130
- if (!rawResponse.choices[0]) {
2131
- throw new PipelineExecutionError(`No choises from ${this.title}`);
2237
+ const complete = $getCurrentDate();
2238
+ if (!rawResponse.choices[0]) {
2239
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
2240
+ }
2241
+ if (rawResponse.choices.length > 1) {
2242
+ // TODO: This should be maybe only warning
2243
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
2244
+ }
2245
+ const resultContent = rawResponse.choices[0].text;
2246
+ const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
2247
+ return exportJson({
2248
+ name: 'promptResult',
2249
+ message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
2250
+ order: [],
2251
+ value: {
2252
+ content: resultContent,
2253
+ modelName: rawResponse.model || modelName,
2254
+ timing: {
2255
+ start,
2256
+ complete,
2257
+ },
2258
+ usage,
2259
+ rawPromptContent,
2260
+ rawRequest,
2261
+ rawResponse,
2262
+ // <- [🗯]
2263
+ },
2264
+ });
2132
2265
  }
2133
- if (rawResponse.choices.length > 1) {
2134
- // TODO: This should be maybe only warning
2135
- throw new PipelineExecutionError(`More than one choise from ${this.title}`);
2266
+ catch (error) {
2267
+ assertsError(error);
2268
+ // Check if this is an unsupported parameter error
2269
+ if (!isUnsupportedParameterError(error)) {
2270
+ throw error;
2271
+ }
2272
+ // Parse which parameter is unsupported
2273
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
2274
+ if (!unsupportedParameter) {
2275
+ if (this.options.isVerbose) {
2276
+ console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
2277
+ }
2278
+ throw error;
2279
+ }
2280
+ // Create a unique key for this model + parameter combination to prevent infinite loops
2281
+ const retryKey = `${modelName}-${unsupportedParameter}`;
2282
+ if (this.retriedUnsupportedParameters.has(retryKey)) {
2283
+ // Already retried this parameter, throw the error
2284
+ if (this.options.isVerbose) {
2285
+ console.warn(colors.bgRed('Error'), `Parameter '${unsupportedParameter}' for model '${modelName}' already retried once, throwing error:`, error.message);
2286
+ }
2287
+ throw error;
2288
+ }
2289
+ // Mark this parameter as retried
2290
+ this.retriedUnsupportedParameters.add(retryKey);
2291
+ // Log warning in verbose mode
2292
+ if (this.options.isVerbose) {
2293
+ console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
2294
+ }
2295
+ // Remove the unsupported parameter and retry
2296
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
2297
+ return this.callCompletionModelWithRetry(prompt, modifiedModelRequirements);
2136
2298
  }
2137
- const resultContent = rawResponse.choices[0].text;
2138
- const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
2139
- return exportJson({
2140
- name: 'promptResult',
2141
- message: `Result of \`OpenAiCompatibleExecutionTools.callCompletionModel\``,
2142
- order: [],
2143
- value: {
2144
- content: resultContent,
2145
- modelName: rawResponse.model || modelName,
2146
- timing: {
2147
- start,
2148
- complete,
2149
- },
2150
- usage,
2151
- rawPromptContent,
2152
- rawRequest,
2153
- rawResponse,
2154
- // <- [🗯]
2155
- },
2156
- });
2157
2299
  }
2158
2300
  /**
2159
2301
  * Calls OpenAI compatible API to use a embedding model
@@ -2179,7 +2321,7 @@ class OpenAiCompatibleExecutionTools {
2179
2321
  console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
2180
2322
  }
2181
2323
  const rawResponse = await this.limiter
2182
- .schedule(() => this.makeRequestWithRetry(() => client.embeddings.create(rawRequest)))
2324
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.embeddings.create(rawRequest)))
2183
2325
  .catch((error) => {
2184
2326
  assertsError(error);
2185
2327
  if (this.options.isVerbose) {
@@ -2241,7 +2383,7 @@ class OpenAiCompatibleExecutionTools {
2241
2383
  /**
2242
2384
  * Makes a request with retry logic for network errors like ECONNRESET
2243
2385
  */
2244
- async makeRequestWithRetry(requestFn) {
2386
+ async makeRequestWithNetworkRetry(requestFn) {
2245
2387
  let lastError;
2246
2388
  for (let attempt = 1; attempt <= CONNECTION_RETRIES_LIMIT; attempt++) {
2247
2389
  try {
@@ -2253,8 +2395,8 @@ class OpenAiCompatibleExecutionTools {
2253
2395
  // Check if this is a retryable network error
2254
2396
  const isRetryableError = this.isRetryableNetworkError(error);
2255
2397
  if (!isRetryableError || attempt === CONNECTION_RETRIES_LIMIT) {
2256
- if (this.options.isVerbose) {
2257
- console.info(colors.bgRed('Final error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
2398
+ if (this.options.isVerbose && this.isRetryableNetworkError(error)) {
2399
+ console.info(colors.bgRed('Final network error after retries'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}:`, error);
2258
2400
  }
2259
2401
  throw error;
2260
2402
  }
@@ -2264,7 +2406,7 @@ class OpenAiCompatibleExecutionTools {
2264
2406
  const jitterDelay = Math.random() * 500; // Add some randomness
2265
2407
  const totalDelay = backoffDelay + jitterDelay;
2266
2408
  if (this.options.isVerbose) {
2267
- console.info(colors.bgYellow('Retrying request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
2409
+ console.info(colors.bgYellow('Retrying network request'), `Attempt ${attempt}/${CONNECTION_RETRIES_LIMIT}, waiting ${Math.round(totalDelay)}ms:`, error.message);
2268
2410
  }
2269
2411
  // Wait before retrying
2270
2412
  await new Promise((resolve) => setTimeout(resolve, totalDelay));