@promptbook/ollama 0.104.0 → 0.105.0-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -213,6 +213,12 @@ export declare const DEFAULT_INTERMEDIATE_FILES_STRATEGY: IntermediateFilesStrat
213
213
  * @public exported from `@promptbook/core`
214
214
  */
215
215
  export declare const DEFAULT_MAX_PARALLEL_COUNT = 5;
216
+ /**
217
+ * The maximum depth to which recursion can occur
218
+ *
219
+ * @public exported from `@promptbook/core`
220
+ */
221
+ export declare const DEFAULT_MAX_RECURSION = 10;
216
222
  /**
217
223
  * The maximum number of attempts to execute LLM task before giving up
218
224
  *
@@ -4,4 +4,4 @@ import type { ErrorJson } from './ErrorJson';
4
4
  *
5
5
  * @public exported from `@promptbook/utils`
6
6
  */
7
- export declare function deserializeError(error: ErrorJson): Error;
7
+ export declare function deserializeError(error: ErrorJson, isStackAddedToMessage?: boolean): Error;
@@ -1,4 +1,5 @@
1
1
  import type { string_date_iso8601, string_model_name, string_prompt } from '../types/typeAliases';
2
+ import type { TODO_any } from '../utils/organization/TODO_any';
2
3
  import type { TODO_object } from '../utils/organization/TODO_object';
3
4
  import type { EmbeddingVector } from './EmbeddingVector';
4
5
  import type { Usage } from './Usage';
@@ -21,7 +22,29 @@ export type CompletionPromptResult = CommonPromptResult;
21
22
  *
22
23
  * Note: [🚉] This is fully serializable as JSON
23
24
  */
24
- export type ChatPromptResult = CommonPromptResult & {};
25
+ export type ChatPromptResult = CommonPromptResult & {
26
+ /**
27
+ * Optional tool calls made during the execution
28
+ */
29
+ readonly toolCalls?: ReadonlyArray<{
30
+ /**
31
+ * Name of the tool
32
+ */
33
+ readonly name: string;
34
+ /**
35
+ * Arguments of the tool call
36
+ */
37
+ readonly arguments: string;
38
+ /**
39
+ * Result of the tool call
40
+ */
41
+ readonly result: string;
42
+ /**
43
+ * Raw tool call from the model
44
+ */
45
+ readonly rawToolCall: TODO_any;
46
+ }>;
47
+ };
25
48
  /**
26
49
  * Image prompt result
27
50
  *
@@ -1,5 +1,6 @@
1
1
  import type { ClientOptions } from 'openai';
2
2
  import type { CommonToolsOptions } from '../../execution/CommonToolsOptions';
3
+ import type { ExecutionTools } from '../../execution/ExecutionTools';
3
4
  import type { RemoteClientOptions } from '../../remote-server/types/RemoteClientOptions';
4
5
  /**
5
6
  * Options for `createOpenAiCompatibleExecutionTools` and `OpenAiCompatibleExecutionTools`
@@ -28,6 +29,10 @@ export type OpenAiCompatibleExecutionToolsNonProxiedOptions = CommonToolsOptions
28
29
  * @example 'https://api.deepseek.com/v1' (DeepSeek)
29
30
  */
30
31
  baseURL?: string;
32
+ /**
33
+ * Tools for executing the scripts
34
+ */
35
+ readonly executionTools?: Pick<ExecutionTools, 'script'>;
31
36
  isProxied?: false;
32
37
  };
33
38
  /**
@@ -16,5 +16,12 @@ export type LlmToolDefinition = {
16
16
  /**
17
17
  * Parameters of the tool in JSON Schema format
18
18
  */
19
- readonly parameters: Record<string, unknown>;
19
+ readonly parameters: {
20
+ readonly type: 'object';
21
+ readonly properties: Record<string, {
22
+ type: string;
23
+ description?: string;
24
+ }>;
25
+ readonly required?: string[];
26
+ };
20
27
  };
@@ -1,6 +1,7 @@
1
1
  import type { FormatCommand } from '../commands/FORMAT/FormatCommand';
2
2
  import type { ChatMessage } from '../book-components/Chat/types/ChatMessage';
3
3
  import type { Expectations } from '../pipeline/PipelineJson/Expectations';
4
+ import type { LlmToolDefinition } from './LlmToolDefinition';
4
5
  import type { ChatModelRequirements } from './ModelRequirements';
5
6
  import type { CompletionModelRequirements } from './ModelRequirements';
6
7
  import type { EmbeddingModelRequirements } from './ModelRequirements';
@@ -44,6 +45,18 @@ export type ChatPrompt = CommonPrompt & {
44
45
  * Optional chat thread (history of previous messages)
45
46
  */
46
47
  thread?: ChatMessage[];
48
+ /**
49
+ * Optional file attachments
50
+ */
51
+ attachments?: Array<{
52
+ name: string;
53
+ type: string;
54
+ url: string;
55
+ }>;
56
+ /**
57
+ * Optional tools that can be called by the model
58
+ */
59
+ tools?: Array<LlmToolDefinition>;
47
60
  };
48
61
  /**
49
62
  * Image prompt
@@ -10,7 +10,7 @@
10
10
  *
11
11
  * @public exported from `@promptbook/utils`
12
12
  */
13
- export declare function parseNumber(value: string | number): number;
13
+ export declare function parseNumber(value: string | number | null | undefined): number;
14
14
  /**
15
15
  * TODO: Maybe use sth. like safe-eval in fraction/calculation case @see https://www.npmjs.com/package/safe-eval
16
16
  * TODO: [🧠][🌻] Maybe export through `@promptbook/markdown-utils` not `@promptbook/utils`
@@ -15,6 +15,10 @@ type GenerateBookBoilerplateOptions = PartialDeep<Omit<AgentBasicInformation, 'p
15
15
  * @default 'ENGLISH'
16
16
  */
17
17
  namePool?: string;
18
+ /**
19
+ * Initial rules for the agent
20
+ */
21
+ initialRules?: Array<string>;
18
22
  };
19
23
  /**
20
24
  * Generates boilerplate for a new agent book
@@ -5,10 +5,11 @@ import type { string_persona_description } from '../../types/typeAliases';
5
5
  * This function selects a random personality profile from a predefined pool
6
6
  * of common AI agent characteristics (e.g., friendly, professional, creative).
7
7
  *
8
+ * @param language - The language code (e.g. 'ENGLISH', 'CZECH')
8
9
  * @returns A string describing the agent's persona
9
10
  * @private internal helper function
10
11
  */
11
- export declare function $randomAgentPersona(): string_persona_description;
12
+ export declare function $randomAgentPersona(language?: string): string_persona_description;
12
13
  /**
13
14
  * TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
14
15
  */
@@ -0,0 +1,14 @@
1
+ import type { string_persona_description } from '../../types/typeAliases';
2
+ /**
3
+ * Generates a random agent rule description.
4
+ *
5
+ * This function selects a random rule
6
+ *
7
+ * @param language - The language code (e.g. 'ENGLISH', 'CZECH')
8
+ * @returns A string describing the agent's rule
9
+ * @private internal helper function
10
+ */
11
+ export declare function $randomAgentRule(language?: string): string_persona_description;
12
+ /**
13
+ * TODO: [🤶] Maybe export through `@promptbook/utils` or `@promptbook/random` package
14
+ */
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.104.0-19`).
18
+ * It follows semantic versioning (e.g., `0.105.0-0`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/ollama",
3
- "version": "0.104.0",
3
+ "version": "0.105.0-1",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -94,7 +94,7 @@
94
94
  "module": "./esm/index.es.js",
95
95
  "typings": "./esm/typings/src/_packages/ollama.index.d.ts",
96
96
  "peerDependencies": {
97
- "@promptbook/core": "0.104.0"
97
+ "@promptbook/core": "0.105.0-1"
98
98
  },
99
99
  "dependencies": {
100
100
  "bottleneck": "2.19.5",
package/umd/index.umd.js CHANGED
@@ -25,7 +25,7 @@
25
25
  * @generated
26
26
  * @see https://github.com/webgptorg/promptbook
27
27
  */
28
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0';
28
+ const PROMPTBOOK_ENGINE_VERSION = '0.105.0-1';
29
29
  /**
30
30
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
31
31
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -116,7 +116,7 @@
116
116
  *
117
117
  * @public exported from `@promptbook/core`
118
118
  */
119
- $deepFreeze({
119
+ const ZERO_USAGE = $deepFreeze({
120
120
  price: ZERO_VALUE,
121
121
  input: {
122
122
  tokensCount: ZERO_VALUE,
@@ -2865,6 +2865,82 @@
2865
2865
  return replacedTemplates;
2866
2866
  }
2867
2867
 
2868
+ /**
2869
+ * Function `addUsage` will add multiple usages into one
2870
+ *
2871
+ * Note: If you provide 0 values, it returns ZERO_USAGE
2872
+ *
2873
+ * @public exported from `@promptbook/core`
2874
+ */
2875
+ function addUsage(...usageItems) {
2876
+ return usageItems.reduce((acc, item) => {
2877
+ var _a;
2878
+ acc.price.value += ((_a = item.price) === null || _a === void 0 ? void 0 : _a.value) || 0;
2879
+ for (const key of Object.keys(acc.input)) {
2880
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2881
+ //@ts-ignore
2882
+ if (item.input[key]) {
2883
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2884
+ //@ts-ignore
2885
+ acc.input[key].value += item.input[key].value || 0;
2886
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2887
+ //@ts-ignore
2888
+ if (item.input[key].isUncertain) {
2889
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2890
+ //@ts-ignore
2891
+ acc.input[key].isUncertain = true;
2892
+ }
2893
+ }
2894
+ }
2895
+ for (const key of Object.keys(acc.output)) {
2896
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2897
+ //@ts-ignore
2898
+ if (item.output[key]) {
2899
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2900
+ //@ts-ignore
2901
+ acc.output[key].value += item.output[key].value || 0;
2902
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2903
+ //@ts-ignore
2904
+ if (item.output[key].isUncertain) {
2905
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2906
+ //@ts-ignore
2907
+ acc.output[key].isUncertain = true;
2908
+ }
2909
+ }
2910
+ }
2911
+ return acc;
2912
+ }, deepClone(ZERO_USAGE));
2913
+ }
2914
+
2915
+ /**
2916
+ * Async version of Array.forEach
2917
+ *
2918
+ * @param array - Array to iterate over
2919
+ * @param options - Options for the function
2920
+ * @param callbackfunction - Function to call for each item
2921
+ * @public exported from `@promptbook/utils`
2922
+ * @deprecated [🪂] Use queues instead
2923
+ */
2924
+ async function forEachAsync(array, options, callbackfunction) {
2925
+ const { maxParallelCount = Infinity } = options;
2926
+ let index = 0;
2927
+ let runningTasks = [];
2928
+ const tasks = [];
2929
+ for (const item of array) {
2930
+ const currentIndex = index++;
2931
+ const task = callbackfunction(item, currentIndex, array);
2932
+ tasks.push(task);
2933
+ runningTasks.push(task);
2934
+ /* not await */ Promise.resolve(task).then(() => {
2935
+ runningTasks = runningTasks.filter((runningTask) => runningTask !== task);
2936
+ });
2937
+ if (maxParallelCount < runningTasks.length) {
2938
+ await Promise.race(runningTasks);
2939
+ }
2940
+ }
2941
+ await Promise.all(tasks);
2942
+ }
2943
+
2868
2944
  /**
2869
2945
  * Maps Promptbook tools to OpenAI tools.
2870
2946
  *
@@ -3060,83 +3136,180 @@
3060
3136
  content: msg.content,
3061
3137
  }));
3062
3138
  }
3063
- const rawRequest = {
3064
- ...modelSettings,
3065
- messages: [
3066
- ...(currentModelRequirements.systemMessage === undefined
3067
- ? []
3068
- : [
3069
- {
3070
- role: 'system',
3071
- content: currentModelRequirements.systemMessage,
3072
- },
3073
- ]),
3074
- ...threadMessages,
3075
- {
3076
- role: 'user',
3077
- content: rawPromptContent,
3078
- },
3079
- ],
3080
- user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3081
- tools: currentModelRequirements.tools === undefined
3082
- ? undefined
3083
- : mapToolsToOpenAi(currentModelRequirements.tools),
3139
+ const messages = [
3140
+ ...(currentModelRequirements.systemMessage === undefined
3141
+ ? []
3142
+ : [
3143
+ {
3144
+ role: 'system',
3145
+ content: currentModelRequirements.systemMessage,
3146
+ },
3147
+ ]),
3148
+ ...threadMessages,
3149
+ {
3150
+ role: 'user',
3151
+ content: rawPromptContent,
3152
+ },
3153
+ ];
3154
+ let totalUsage = {
3155
+ price: uncertainNumber(0),
3156
+ input: {
3157
+ tokensCount: uncertainNumber(0),
3158
+ charactersCount: uncertainNumber(0),
3159
+ wordsCount: uncertainNumber(0),
3160
+ sentencesCount: uncertainNumber(0),
3161
+ linesCount: uncertainNumber(0),
3162
+ paragraphsCount: uncertainNumber(0),
3163
+ pagesCount: uncertainNumber(0),
3164
+ },
3165
+ output: {
3166
+ tokensCount: uncertainNumber(0),
3167
+ charactersCount: uncertainNumber(0),
3168
+ wordsCount: uncertainNumber(0),
3169
+ sentencesCount: uncertainNumber(0),
3170
+ linesCount: uncertainNumber(0),
3171
+ paragraphsCount: uncertainNumber(0),
3172
+ pagesCount: uncertainNumber(0),
3173
+ },
3084
3174
  };
3175
+ const toolCalls = [];
3085
3176
  const start = $getCurrentDate();
3086
- if (this.options.isVerbose) {
3087
- console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
3088
- }
3089
- try {
3090
- const rawResponse = await this.limiter
3091
- .schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
3092
- .catch((error) => {
3093
- assertsError(error);
3094
- if (this.options.isVerbose) {
3095
- console.info(colors__default["default"].bgRed('error'), error);
3096
- }
3097
- throw error;
3098
- });
3177
+ const tools = 'tools' in prompt && Array.isArray(prompt.tools) ? prompt.tools : currentModelRequirements.tools;
3178
+ let isLooping = true;
3179
+ while (isLooping) {
3180
+ const rawRequest = {
3181
+ ...modelSettings,
3182
+ messages,
3183
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3184
+ tools: tools === undefined ? undefined : mapToolsToOpenAi(tools),
3185
+ };
3099
3186
  if (this.options.isVerbose) {
3100
- console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
3187
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
3101
3188
  }
3102
- const complete = $getCurrentDate();
3103
- if (!rawResponse.choices[0]) {
3104
- throw new PipelineExecutionError(`No choises from ${this.title}`);
3105
- }
3106
- if (rawResponse.choices.length > 1) {
3107
- // TODO: This should be maybe only warning
3108
- throw new PipelineExecutionError(`More than one choise from ${this.title}`);
3109
- }
3110
- const resultContent = rawResponse.choices[0].message.content;
3111
- const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
3112
- if (resultContent === null) {
3113
- throw new PipelineExecutionError(`No response message from ${this.title}`);
3114
- }
3115
- return exportJson({
3116
- name: 'promptResult',
3117
- message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
3118
- order: [],
3119
- value: {
3120
- content: resultContent,
3121
- modelName: rawResponse.model || modelName,
3122
- timing: {
3123
- start,
3124
- complete,
3189
+ try {
3190
+ const rawResponse = await this.limiter
3191
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
3192
+ .catch((error) => {
3193
+ assertsError(error);
3194
+ if (this.options.isVerbose) {
3195
+ console.info(colors__default["default"].bgRed('error'), error);
3196
+ }
3197
+ throw error;
3198
+ });
3199
+ if (this.options.isVerbose) {
3200
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
3201
+ }
3202
+ if (!rawResponse.choices[0]) {
3203
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
3204
+ }
3205
+ const responseMessage = rawResponse.choices[0].message;
3206
+ messages.push(responseMessage);
3207
+ const usage = this.computeUsage(content || '', responseMessage.content || '', rawResponse);
3208
+ totalUsage = addUsage(totalUsage, usage);
3209
+ if (responseMessage.tool_calls && responseMessage.tool_calls.length > 0) {
3210
+ await forEachAsync(responseMessage.tool_calls, {}, async (toolCall) => {
3211
+ const functionName = toolCall.function.name;
3212
+ const functionArgs = toolCall.function.arguments;
3213
+ const executionTools = this.options
3214
+ .executionTools;
3215
+ if (!executionTools || !executionTools.script) {
3216
+ throw new PipelineExecutionError(`Model requested tool '${functionName}' but no executionTools.script were provided in OpenAiCompatibleExecutionTools options`);
3217
+ }
3218
+ // TODO: [DRY] Use some common tool caller
3219
+ const scriptTools = Array.isArray(executionTools.script)
3220
+ ? executionTools.script
3221
+ : [executionTools.script];
3222
+ let functionResponse;
3223
+ try {
3224
+ const scriptTool = scriptTools[0]; // <- TODO: [🧠] Which script tool to use?
3225
+ functionResponse = await scriptTool.execute({
3226
+ scriptLanguage: 'javascript',
3227
+ script: `
3228
+ const args = ${functionArgs};
3229
+ return await ${functionName}(args);
3230
+ `,
3231
+ parameters: {}, // <- TODO: [🧠] What parameters to pass?
3232
+ });
3233
+ }
3234
+ catch (error) {
3235
+ assertsError(error);
3236
+ functionResponse = `Error: ${error.message}`;
3237
+ }
3238
+ messages.push({
3239
+ role: 'tool',
3240
+ tool_call_id: toolCall.id,
3241
+ content: functionResponse,
3242
+ });
3243
+ toolCalls.push({
3244
+ name: functionName,
3245
+ arguments: functionArgs,
3246
+ result: functionResponse,
3247
+ rawToolCall: toolCall,
3248
+ });
3249
+ });
3250
+ continue;
3251
+ }
3252
+ const complete = $getCurrentDate();
3253
+ const resultContent = responseMessage.content;
3254
+ if (resultContent === null) {
3255
+ throw new PipelineExecutionError(`No response message from ${this.title}`);
3256
+ }
3257
+ isLooping = false;
3258
+ return exportJson({
3259
+ name: 'promptResult',
3260
+ message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
3261
+ order: [],
3262
+ value: {
3263
+ content: resultContent,
3264
+ modelName: rawResponse.model || modelName,
3265
+ timing: {
3266
+ start,
3267
+ complete,
3268
+ },
3269
+ usage: totalUsage,
3270
+ toolCalls,
3271
+ rawPromptContent,
3272
+ rawRequest,
3273
+ rawResponse,
3125
3274
  },
3126
- usage,
3127
- rawPromptContent,
3128
- rawRequest,
3129
- rawResponse,
3130
- // <- [🗯]
3131
- },
3132
- });
3133
- }
3134
- catch (error) {
3135
- assertsError(error);
3136
- // Check if this is an unsupported parameter error
3137
- if (!isUnsupportedParameterError(error)) {
3138
- // If we have attemptStack, include it in the error message
3139
- if (attemptStack.length > 0) {
3275
+ });
3276
+ }
3277
+ catch (error) {
3278
+ isLooping = false;
3279
+ assertsError(error);
3280
+ // Check if this is an unsupported parameter error
3281
+ if (!isUnsupportedParameterError(error)) {
3282
+ // If we have attemptStack, include it in the error message
3283
+ if (attemptStack.length > 0) {
3284
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
3285
+ attemptStack
3286
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
3287
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
3288
+ `, Error: ${a.errorMessage}` +
3289
+ (a.stripped ? ' (stripped and retried)' : ''))
3290
+ .join('\n') +
3291
+ `\nFinal error: ${error.message}`);
3292
+ }
3293
+ throw error;
3294
+ }
3295
+ // Parse which parameter is unsupported
3296
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
3297
+ if (!unsupportedParameter) {
3298
+ if (this.options.isVerbose) {
3299
+ console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
3300
+ }
3301
+ throw error;
3302
+ }
3303
+ // Create a unique key for this model + parameter combination to prevent infinite loops
3304
+ const retryKey = `${modelName}-${unsupportedParameter}`;
3305
+ if (retriedUnsupportedParameters.has(retryKey)) {
3306
+ // Already retried this parameter, throw the error with attemptStack
3307
+ attemptStack.push({
3308
+ modelName,
3309
+ unsupportedParameter,
3310
+ errorMessage: error.message,
3311
+ stripped: true,
3312
+ });
3140
3313
  throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
3141
3314
  attemptStack
3142
3315
  .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
@@ -3146,52 +3319,25 @@
3146
3319
  .join('\n') +
3147
3320
  `\nFinal error: ${error.message}`);
3148
3321
  }
3149
- throw error;
3150
- }
3151
- // Parse which parameter is unsupported
3152
- const unsupportedParameter = parseUnsupportedParameterError(error.message);
3153
- if (!unsupportedParameter) {
3322
+ // Mark this parameter as retried
3323
+ retriedUnsupportedParameters.add(retryKey);
3324
+ // Log warning in verbose mode
3154
3325
  if (this.options.isVerbose) {
3155
- console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
3326
+ console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
3156
3327
  }
3157
- throw error;
3158
- }
3159
- // Create a unique key for this model + parameter combination to prevent infinite loops
3160
- const retryKey = `${modelName}-${unsupportedParameter}`;
3161
- if (retriedUnsupportedParameters.has(retryKey)) {
3162
- // Already retried this parameter, throw the error with attemptStack
3328
+ // Add to attemptStack
3163
3329
  attemptStack.push({
3164
3330
  modelName,
3165
3331
  unsupportedParameter,
3166
3332
  errorMessage: error.message,
3167
3333
  stripped: true,
3168
3334
  });
3169
- throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
3170
- attemptStack
3171
- .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
3172
- (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
3173
- `, Error: ${a.errorMessage}` +
3174
- (a.stripped ? ' (stripped and retried)' : ''))
3175
- .join('\n') +
3176
- `\nFinal error: ${error.message}`);
3177
- }
3178
- // Mark this parameter as retried
3179
- retriedUnsupportedParameters.add(retryKey);
3180
- // Log warning in verbose mode
3181
- if (this.options.isVerbose) {
3182
- console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
3335
+ // Remove the unsupported parameter and retry
3336
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
3337
+ return this.callChatModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
3183
3338
  }
3184
- // Add to attemptStack
3185
- attemptStack.push({
3186
- modelName,
3187
- unsupportedParameter,
3188
- errorMessage: error.message,
3189
- stripped: true,
3190
- });
3191
- // Remove the unsupported parameter and retry
3192
- const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
3193
- return this.callChatModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
3194
3339
  }
3340
+ throw new PipelineExecutionError(`Tool calling loop did not return a result from ${this.title}`);
3195
3341
  }
3196
3342
  /**
3197
3343
  * Calls OpenAI API to use a complete model.