@promptbook/openai 0.104.0 → 0.105.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -27,6 +27,10 @@ Turn your company's scattered knowledge into AI ready Books
27
27
 
28
28
 
29
29
 
30
+ <blockquote style="color: #ff8811">
31
+ <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
32
+ </blockquote>
33
+
30
34
  ## 📦 Package `@promptbook/openai`
31
35
 
32
36
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
package/esm/index.es.js CHANGED
@@ -19,7 +19,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
19
19
  * @generated
20
20
  * @see https://github.com/webgptorg/promptbook
21
21
  */
22
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0';
22
+ const PROMPTBOOK_ENGINE_VERSION = '0.105.0-0';
23
23
  /**
24
24
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
25
25
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -182,7 +182,7 @@ const UNCERTAIN_ZERO_VALUE = $deepFreeze({ value: 0, isUncertain: true });
182
182
  *
183
183
  * @public exported from `@promptbook/core`
184
184
  */
185
- $deepFreeze({
185
+ const ZERO_USAGE = $deepFreeze({
186
186
  price: ZERO_VALUE,
187
187
  input: {
188
188
  tokensCount: ZERO_VALUE,
@@ -2901,6 +2901,82 @@ resultContent, rawResponse) {
2901
2901
  * TODO: [🤝] DRY Maybe some common abstraction between `computeOpenAiUsage` and `computeAnthropicClaudeUsage`
2902
2902
  */
2903
2903
 
2904
+ /**
2905
+ * Function `addUsage` will add multiple usages into one
2906
+ *
2907
+ * Note: If you provide 0 values, it returns ZERO_USAGE
2908
+ *
2909
+ * @public exported from `@promptbook/core`
2910
+ */
2911
+ function addUsage(...usageItems) {
2912
+ return usageItems.reduce((acc, item) => {
2913
+ var _a;
2914
+ acc.price.value += ((_a = item.price) === null || _a === void 0 ? void 0 : _a.value) || 0;
2915
+ for (const key of Object.keys(acc.input)) {
2916
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2917
+ //@ts-ignore
2918
+ if (item.input[key]) {
2919
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2920
+ //@ts-ignore
2921
+ acc.input[key].value += item.input[key].value || 0;
2922
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2923
+ //@ts-ignore
2924
+ if (item.input[key].isUncertain) {
2925
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2926
+ //@ts-ignore
2927
+ acc.input[key].isUncertain = true;
2928
+ }
2929
+ }
2930
+ }
2931
+ for (const key of Object.keys(acc.output)) {
2932
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2933
+ //@ts-ignore
2934
+ if (item.output[key]) {
2935
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2936
+ //@ts-ignore
2937
+ acc.output[key].value += item.output[key].value || 0;
2938
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2939
+ //@ts-ignore
2940
+ if (item.output[key].isUncertain) {
2941
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2942
+ //@ts-ignore
2943
+ acc.output[key].isUncertain = true;
2944
+ }
2945
+ }
2946
+ }
2947
+ return acc;
2948
+ }, deepClone(ZERO_USAGE));
2949
+ }
2950
+
2951
+ /**
2952
+ * Async version of Array.forEach
2953
+ *
2954
+ * @param array - Array to iterate over
2955
+ * @param options - Options for the function
2956
+ * @param callbackfunction - Function to call for each item
2957
+ * @public exported from `@promptbook/utils`
2958
+ * @deprecated [🪂] Use queues instead
2959
+ */
2960
+ async function forEachAsync(array, options, callbackfunction) {
2961
+ const { maxParallelCount = Infinity } = options;
2962
+ let index = 0;
2963
+ let runningTasks = [];
2964
+ const tasks = [];
2965
+ for (const item of array) {
2966
+ const currentIndex = index++;
2967
+ const task = callbackfunction(item, currentIndex, array);
2968
+ tasks.push(task);
2969
+ runningTasks.push(task);
2970
+ /* not await */ Promise.resolve(task).then(() => {
2971
+ runningTasks = runningTasks.filter((runningTask) => runningTask !== task);
2972
+ });
2973
+ if (maxParallelCount < runningTasks.length) {
2974
+ await Promise.race(runningTasks);
2975
+ }
2976
+ }
2977
+ await Promise.all(tasks);
2978
+ }
2979
+
2904
2980
  /**
2905
2981
  * Maps Promptbook tools to OpenAI tools.
2906
2982
  *
@@ -3096,83 +3172,180 @@ class OpenAiCompatibleExecutionTools {
3096
3172
  content: msg.content,
3097
3173
  }));
3098
3174
  }
3099
- const rawRequest = {
3100
- ...modelSettings,
3101
- messages: [
3102
- ...(currentModelRequirements.systemMessage === undefined
3103
- ? []
3104
- : [
3105
- {
3106
- role: 'system',
3107
- content: currentModelRequirements.systemMessage,
3108
- },
3109
- ]),
3110
- ...threadMessages,
3111
- {
3112
- role: 'user',
3113
- content: rawPromptContent,
3114
- },
3115
- ],
3116
- user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3117
- tools: currentModelRequirements.tools === undefined
3118
- ? undefined
3119
- : mapToolsToOpenAi(currentModelRequirements.tools),
3175
+ const messages = [
3176
+ ...(currentModelRequirements.systemMessage === undefined
3177
+ ? []
3178
+ : [
3179
+ {
3180
+ role: 'system',
3181
+ content: currentModelRequirements.systemMessage,
3182
+ },
3183
+ ]),
3184
+ ...threadMessages,
3185
+ {
3186
+ role: 'user',
3187
+ content: rawPromptContent,
3188
+ },
3189
+ ];
3190
+ let totalUsage = {
3191
+ price: uncertainNumber(0),
3192
+ input: {
3193
+ tokensCount: uncertainNumber(0),
3194
+ charactersCount: uncertainNumber(0),
3195
+ wordsCount: uncertainNumber(0),
3196
+ sentencesCount: uncertainNumber(0),
3197
+ linesCount: uncertainNumber(0),
3198
+ paragraphsCount: uncertainNumber(0),
3199
+ pagesCount: uncertainNumber(0),
3200
+ },
3201
+ output: {
3202
+ tokensCount: uncertainNumber(0),
3203
+ charactersCount: uncertainNumber(0),
3204
+ wordsCount: uncertainNumber(0),
3205
+ sentencesCount: uncertainNumber(0),
3206
+ linesCount: uncertainNumber(0),
3207
+ paragraphsCount: uncertainNumber(0),
3208
+ pagesCount: uncertainNumber(0),
3209
+ },
3120
3210
  };
3211
+ const toolCalls = [];
3121
3212
  const start = $getCurrentDate();
3122
- if (this.options.isVerbose) {
3123
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
3124
- }
3125
- try {
3126
- const rawResponse = await this.limiter
3127
- .schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
3128
- .catch((error) => {
3129
- assertsError(error);
3130
- if (this.options.isVerbose) {
3131
- console.info(colors.bgRed('error'), error);
3132
- }
3133
- throw error;
3134
- });
3213
+ const tools = 'tools' in prompt && Array.isArray(prompt.tools) ? prompt.tools : currentModelRequirements.tools;
3214
+ let isLooping = true;
3215
+ while (isLooping) {
3216
+ const rawRequest = {
3217
+ ...modelSettings,
3218
+ messages,
3219
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3220
+ tools: tools === undefined ? undefined : mapToolsToOpenAi(tools),
3221
+ };
3135
3222
  if (this.options.isVerbose) {
3136
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
3223
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
3137
3224
  }
3138
- const complete = $getCurrentDate();
3139
- if (!rawResponse.choices[0]) {
3140
- throw new PipelineExecutionError(`No choises from ${this.title}`);
3141
- }
3142
- if (rawResponse.choices.length > 1) {
3143
- // TODO: This should be maybe only warning
3144
- throw new PipelineExecutionError(`More than one choise from ${this.title}`);
3145
- }
3146
- const resultContent = rawResponse.choices[0].message.content;
3147
- const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
3148
- if (resultContent === null) {
3149
- throw new PipelineExecutionError(`No response message from ${this.title}`);
3150
- }
3151
- return exportJson({
3152
- name: 'promptResult',
3153
- message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
3154
- order: [],
3155
- value: {
3156
- content: resultContent,
3157
- modelName: rawResponse.model || modelName,
3158
- timing: {
3159
- start,
3160
- complete,
3225
+ try {
3226
+ const rawResponse = await this.limiter
3227
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
3228
+ .catch((error) => {
3229
+ assertsError(error);
3230
+ if (this.options.isVerbose) {
3231
+ console.info(colors.bgRed('error'), error);
3232
+ }
3233
+ throw error;
3234
+ });
3235
+ if (this.options.isVerbose) {
3236
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
3237
+ }
3238
+ if (!rawResponse.choices[0]) {
3239
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
3240
+ }
3241
+ const responseMessage = rawResponse.choices[0].message;
3242
+ messages.push(responseMessage);
3243
+ const usage = this.computeUsage(content || '', responseMessage.content || '', rawResponse);
3244
+ totalUsage = addUsage(totalUsage, usage);
3245
+ if (responseMessage.tool_calls && responseMessage.tool_calls.length > 0) {
3246
+ await forEachAsync(responseMessage.tool_calls, {}, async (toolCall) => {
3247
+ const functionName = toolCall.function.name;
3248
+ const functionArgs = toolCall.function.arguments;
3249
+ const executionTools = this.options
3250
+ .executionTools;
3251
+ if (!executionTools || !executionTools.script) {
3252
+ throw new PipelineExecutionError(`Model requested tool '${functionName}' but no executionTools.script were provided in OpenAiCompatibleExecutionTools options`);
3253
+ }
3254
+ // TODO: [DRY] Use some common tool caller
3255
+ const scriptTools = Array.isArray(executionTools.script)
3256
+ ? executionTools.script
3257
+ : [executionTools.script];
3258
+ let functionResponse;
3259
+ try {
3260
+ const scriptTool = scriptTools[0]; // <- TODO: [🧠] Which script tool to use?
3261
+ functionResponse = await scriptTool.execute({
3262
+ scriptLanguage: 'javascript',
3263
+ script: `
3264
+ const args = ${functionArgs};
3265
+ return await ${functionName}(args);
3266
+ `,
3267
+ parameters: {}, // <- TODO: [🧠] What parameters to pass?
3268
+ });
3269
+ }
3270
+ catch (error) {
3271
+ assertsError(error);
3272
+ functionResponse = `Error: ${error.message}`;
3273
+ }
3274
+ messages.push({
3275
+ role: 'tool',
3276
+ tool_call_id: toolCall.id,
3277
+ content: functionResponse,
3278
+ });
3279
+ toolCalls.push({
3280
+ name: functionName,
3281
+ arguments: functionArgs,
3282
+ result: functionResponse,
3283
+ rawToolCall: toolCall,
3284
+ });
3285
+ });
3286
+ continue;
3287
+ }
3288
+ const complete = $getCurrentDate();
3289
+ const resultContent = responseMessage.content;
3290
+ if (resultContent === null) {
3291
+ throw new PipelineExecutionError(`No response message from ${this.title}`);
3292
+ }
3293
+ isLooping = false;
3294
+ return exportJson({
3295
+ name: 'promptResult',
3296
+ message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
3297
+ order: [],
3298
+ value: {
3299
+ content: resultContent,
3300
+ modelName: rawResponse.model || modelName,
3301
+ timing: {
3302
+ start,
3303
+ complete,
3304
+ },
3305
+ usage: totalUsage,
3306
+ toolCalls,
3307
+ rawPromptContent,
3308
+ rawRequest,
3309
+ rawResponse,
3161
3310
  },
3162
- usage,
3163
- rawPromptContent,
3164
- rawRequest,
3165
- rawResponse,
3166
- // <- [🗯]
3167
- },
3168
- });
3169
- }
3170
- catch (error) {
3171
- assertsError(error);
3172
- // Check if this is an unsupported parameter error
3173
- if (!isUnsupportedParameterError(error)) {
3174
- // If we have attemptStack, include it in the error message
3175
- if (attemptStack.length > 0) {
3311
+ });
3312
+ }
3313
+ catch (error) {
3314
+ isLooping = false;
3315
+ assertsError(error);
3316
+ // Check if this is an unsupported parameter error
3317
+ if (!isUnsupportedParameterError(error)) {
3318
+ // If we have attemptStack, include it in the error message
3319
+ if (attemptStack.length > 0) {
3320
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
3321
+ attemptStack
3322
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
3323
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
3324
+ `, Error: ${a.errorMessage}` +
3325
+ (a.stripped ? ' (stripped and retried)' : ''))
3326
+ .join('\n') +
3327
+ `\nFinal error: ${error.message}`);
3328
+ }
3329
+ throw error;
3330
+ }
3331
+ // Parse which parameter is unsupported
3332
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
3333
+ if (!unsupportedParameter) {
3334
+ if (this.options.isVerbose) {
3335
+ console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
3336
+ }
3337
+ throw error;
3338
+ }
3339
+ // Create a unique key for this model + parameter combination to prevent infinite loops
3340
+ const retryKey = `${modelName}-${unsupportedParameter}`;
3341
+ if (retriedUnsupportedParameters.has(retryKey)) {
3342
+ // Already retried this parameter, throw the error with attemptStack
3343
+ attemptStack.push({
3344
+ modelName,
3345
+ unsupportedParameter,
3346
+ errorMessage: error.message,
3347
+ stripped: true,
3348
+ });
3176
3349
  throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
3177
3350
  attemptStack
3178
3351
  .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
@@ -3182,52 +3355,25 @@ class OpenAiCompatibleExecutionTools {
3182
3355
  .join('\n') +
3183
3356
  `\nFinal error: ${error.message}`);
3184
3357
  }
3185
- throw error;
3186
- }
3187
- // Parse which parameter is unsupported
3188
- const unsupportedParameter = parseUnsupportedParameterError(error.message);
3189
- if (!unsupportedParameter) {
3358
+ // Mark this parameter as retried
3359
+ retriedUnsupportedParameters.add(retryKey);
3360
+ // Log warning in verbose mode
3190
3361
  if (this.options.isVerbose) {
3191
- console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
3362
+ console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
3192
3363
  }
3193
- throw error;
3194
- }
3195
- // Create a unique key for this model + parameter combination to prevent infinite loops
3196
- const retryKey = `${modelName}-${unsupportedParameter}`;
3197
- if (retriedUnsupportedParameters.has(retryKey)) {
3198
- // Already retried this parameter, throw the error with attemptStack
3364
+ // Add to attemptStack
3199
3365
  attemptStack.push({
3200
3366
  modelName,
3201
3367
  unsupportedParameter,
3202
3368
  errorMessage: error.message,
3203
3369
  stripped: true,
3204
3370
  });
3205
- throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
3206
- attemptStack
3207
- .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
3208
- (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
3209
- `, Error: ${a.errorMessage}` +
3210
- (a.stripped ? ' (stripped and retried)' : ''))
3211
- .join('\n') +
3212
- `\nFinal error: ${error.message}`);
3213
- }
3214
- // Mark this parameter as retried
3215
- retriedUnsupportedParameters.add(retryKey);
3216
- // Log warning in verbose mode
3217
- if (this.options.isVerbose) {
3218
- console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
3371
+ // Remove the unsupported parameter and retry
3372
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
3373
+ return this.callChatModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
3219
3374
  }
3220
- // Add to attemptStack
3221
- attemptStack.push({
3222
- modelName,
3223
- unsupportedParameter,
3224
- errorMessage: error.message,
3225
- stripped: true,
3226
- });
3227
- // Remove the unsupported parameter and retry
3228
- const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
3229
- return this.callChatModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
3230
3375
  }
3376
+ throw new PipelineExecutionError(`Tool calling loop did not return a result from ${this.title}`);
3231
3377
  }
3232
3378
  /**
3233
3379
  * Calls OpenAI API to use a complete model.
@@ -4543,7 +4689,7 @@ const ALL_ERRORS = {
4543
4689
  *
4544
4690
  * @public exported from `@promptbook/utils`
4545
4691
  */
4546
- function deserializeError(error) {
4692
+ function deserializeError(error, isStackAddedToMessage = true) {
4547
4693
  const { name, stack, id } = error; // Added id
4548
4694
  let { message } = error;
4549
4695
  let ErrorClass = ALL_ERRORS[error.name];
@@ -4551,7 +4697,7 @@ function deserializeError(error) {
4551
4697
  ErrorClass = Error;
4552
4698
  message = `${name}: ${message}`;
4553
4699
  }
4554
- if (stack !== undefined && stack !== '') {
4700
+ if (isStackAddedToMessage && stack !== undefined && stack !== '') {
4555
4701
  message = spaceTrim$2((block) => `
4556
4702
  ${block(message)}
4557
4703