@promptbook/ollama 0.104.0 → 0.105.0-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -27,6 +27,10 @@ Turn your company's scattered knowledge into AI ready Books
27
27
 
28
28
 
29
29
 
30
+ <blockquote style="color: #ff8811">
31
+ <b>⚠ Warning:</b> This is a pre-release version of the library. It is not yet ready for production use. Please look at <a href="https://www.npmjs.com/package/@promptbook/core?activeTab=versions">latest stable release</a>.
32
+ </blockquote>
33
+
30
34
  ## 📦 Package `@promptbook/ollama`
31
35
 
32
36
  - Promptbooks are [divided into several](#-packages) packages, all are published from [single monorepo](https://github.com/webgptorg/promptbook).
package/esm/index.es.js CHANGED
@@ -18,7 +18,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
18
18
  * @generated
19
19
  * @see https://github.com/webgptorg/promptbook
20
20
  */
21
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0';
21
+ const PROMPTBOOK_ENGINE_VERSION = '0.105.0-0';
22
22
  /**
23
23
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
24
24
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -109,7 +109,7 @@ const UNCERTAIN_ZERO_VALUE = $deepFreeze({ value: 0, isUncertain: true });
109
109
  *
110
110
  * @public exported from `@promptbook/core`
111
111
  */
112
- $deepFreeze({
112
+ const ZERO_USAGE = $deepFreeze({
113
113
  price: ZERO_VALUE,
114
114
  input: {
115
115
  tokensCount: ZERO_VALUE,
@@ -2858,6 +2858,82 @@ function templateParameters(template, parameters) {
2858
2858
  return replacedTemplates;
2859
2859
  }
2860
2860
 
2861
+ /**
2862
+ * Function `addUsage` will add multiple usages into one
2863
+ *
2864
+ * Note: If you provide 0 values, it returns ZERO_USAGE
2865
+ *
2866
+ * @public exported from `@promptbook/core`
2867
+ */
2868
+ function addUsage(...usageItems) {
2869
+ return usageItems.reduce((acc, item) => {
2870
+ var _a;
2871
+ acc.price.value += ((_a = item.price) === null || _a === void 0 ? void 0 : _a.value) || 0;
2872
+ for (const key of Object.keys(acc.input)) {
2873
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2874
+ //@ts-ignore
2875
+ if (item.input[key]) {
2876
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2877
+ //@ts-ignore
2878
+ acc.input[key].value += item.input[key].value || 0;
2879
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2880
+ //@ts-ignore
2881
+ if (item.input[key].isUncertain) {
2882
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2883
+ //@ts-ignore
2884
+ acc.input[key].isUncertain = true;
2885
+ }
2886
+ }
2887
+ }
2888
+ for (const key of Object.keys(acc.output)) {
2889
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2890
+ //@ts-ignore
2891
+ if (item.output[key]) {
2892
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2893
+ //@ts-ignore
2894
+ acc.output[key].value += item.output[key].value || 0;
2895
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2896
+ //@ts-ignore
2897
+ if (item.output[key].isUncertain) {
2898
+ // eslint-disable-next-line @typescript-eslint/ban-ts-comment
2899
+ //@ts-ignore
2900
+ acc.output[key].isUncertain = true;
2901
+ }
2902
+ }
2903
+ }
2904
+ return acc;
2905
+ }, deepClone(ZERO_USAGE));
2906
+ }
2907
+
2908
+ /**
2909
+ * Async version of Array.forEach
2910
+ *
2911
+ * @param array - Array to iterate over
2912
+ * @param options - Options for the function
2913
+ * @param callbackfunction - Function to call for each item
2914
+ * @public exported from `@promptbook/utils`
2915
+ * @deprecated [🪂] Use queues instead
2916
+ */
2917
+ async function forEachAsync(array, options, callbackfunction) {
2918
+ const { maxParallelCount = Infinity } = options;
2919
+ let index = 0;
2920
+ let runningTasks = [];
2921
+ const tasks = [];
2922
+ for (const item of array) {
2923
+ const currentIndex = index++;
2924
+ const task = callbackfunction(item, currentIndex, array);
2925
+ tasks.push(task);
2926
+ runningTasks.push(task);
2927
+ /* not await */ Promise.resolve(task).then(() => {
2928
+ runningTasks = runningTasks.filter((runningTask) => runningTask !== task);
2929
+ });
2930
+ if (maxParallelCount < runningTasks.length) {
2931
+ await Promise.race(runningTasks);
2932
+ }
2933
+ }
2934
+ await Promise.all(tasks);
2935
+ }
2936
+
2861
2937
  /**
2862
2938
  * Maps Promptbook tools to OpenAI tools.
2863
2939
  *
@@ -3053,83 +3129,180 @@ class OpenAiCompatibleExecutionTools {
3053
3129
  content: msg.content,
3054
3130
  }));
3055
3131
  }
3056
- const rawRequest = {
3057
- ...modelSettings,
3058
- messages: [
3059
- ...(currentModelRequirements.systemMessage === undefined
3060
- ? []
3061
- : [
3062
- {
3063
- role: 'system',
3064
- content: currentModelRequirements.systemMessage,
3065
- },
3066
- ]),
3067
- ...threadMessages,
3068
- {
3069
- role: 'user',
3070
- content: rawPromptContent,
3071
- },
3072
- ],
3073
- user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3074
- tools: currentModelRequirements.tools === undefined
3075
- ? undefined
3076
- : mapToolsToOpenAi(currentModelRequirements.tools),
3132
+ const messages = [
3133
+ ...(currentModelRequirements.systemMessage === undefined
3134
+ ? []
3135
+ : [
3136
+ {
3137
+ role: 'system',
3138
+ content: currentModelRequirements.systemMessage,
3139
+ },
3140
+ ]),
3141
+ ...threadMessages,
3142
+ {
3143
+ role: 'user',
3144
+ content: rawPromptContent,
3145
+ },
3146
+ ];
3147
+ let totalUsage = {
3148
+ price: uncertainNumber(0),
3149
+ input: {
3150
+ tokensCount: uncertainNumber(0),
3151
+ charactersCount: uncertainNumber(0),
3152
+ wordsCount: uncertainNumber(0),
3153
+ sentencesCount: uncertainNumber(0),
3154
+ linesCount: uncertainNumber(0),
3155
+ paragraphsCount: uncertainNumber(0),
3156
+ pagesCount: uncertainNumber(0),
3157
+ },
3158
+ output: {
3159
+ tokensCount: uncertainNumber(0),
3160
+ charactersCount: uncertainNumber(0),
3161
+ wordsCount: uncertainNumber(0),
3162
+ sentencesCount: uncertainNumber(0),
3163
+ linesCount: uncertainNumber(0),
3164
+ paragraphsCount: uncertainNumber(0),
3165
+ pagesCount: uncertainNumber(0),
3166
+ },
3077
3167
  };
3168
+ const toolCalls = [];
3078
3169
  const start = $getCurrentDate();
3079
- if (this.options.isVerbose) {
3080
- console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
3081
- }
3082
- try {
3083
- const rawResponse = await this.limiter
3084
- .schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
3085
- .catch((error) => {
3086
- assertsError(error);
3087
- if (this.options.isVerbose) {
3088
- console.info(colors.bgRed('error'), error);
3089
- }
3090
- throw error;
3091
- });
3170
+ const tools = 'tools' in prompt && Array.isArray(prompt.tools) ? prompt.tools : currentModelRequirements.tools;
3171
+ let isLooping = true;
3172
+ while (isLooping) {
3173
+ const rawRequest = {
3174
+ ...modelSettings,
3175
+ messages,
3176
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3177
+ tools: tools === undefined ? undefined : mapToolsToOpenAi(tools),
3178
+ };
3092
3179
  if (this.options.isVerbose) {
3093
- console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
3180
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
3094
3181
  }
3095
- const complete = $getCurrentDate();
3096
- if (!rawResponse.choices[0]) {
3097
- throw new PipelineExecutionError(`No choises from ${this.title}`);
3098
- }
3099
- if (rawResponse.choices.length > 1) {
3100
- // TODO: This should be maybe only warning
3101
- throw new PipelineExecutionError(`More than one choise from ${this.title}`);
3102
- }
3103
- const resultContent = rawResponse.choices[0].message.content;
3104
- const usage = this.computeUsage(content || '', resultContent || '', rawResponse);
3105
- if (resultContent === null) {
3106
- throw new PipelineExecutionError(`No response message from ${this.title}`);
3107
- }
3108
- return exportJson({
3109
- name: 'promptResult',
3110
- message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
3111
- order: [],
3112
- value: {
3113
- content: resultContent,
3114
- modelName: rawResponse.model || modelName,
3115
- timing: {
3116
- start,
3117
- complete,
3182
+ try {
3183
+ const rawResponse = await this.limiter
3184
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.chat.completions.create(rawRequest)))
3185
+ .catch((error) => {
3186
+ assertsError(error);
3187
+ if (this.options.isVerbose) {
3188
+ console.info(colors.bgRed('error'), error);
3189
+ }
3190
+ throw error;
3191
+ });
3192
+ if (this.options.isVerbose) {
3193
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
3194
+ }
3195
+ if (!rawResponse.choices[0]) {
3196
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
3197
+ }
3198
+ const responseMessage = rawResponse.choices[0].message;
3199
+ messages.push(responseMessage);
3200
+ const usage = this.computeUsage(content || '', responseMessage.content || '', rawResponse);
3201
+ totalUsage = addUsage(totalUsage, usage);
3202
+ if (responseMessage.tool_calls && responseMessage.tool_calls.length > 0) {
3203
+ await forEachAsync(responseMessage.tool_calls, {}, async (toolCall) => {
3204
+ const functionName = toolCall.function.name;
3205
+ const functionArgs = toolCall.function.arguments;
3206
+ const executionTools = this.options
3207
+ .executionTools;
3208
+ if (!executionTools || !executionTools.script) {
3209
+ throw new PipelineExecutionError(`Model requested tool '${functionName}' but no executionTools.script were provided in OpenAiCompatibleExecutionTools options`);
3210
+ }
3211
+ // TODO: [DRY] Use some common tool caller
3212
+ const scriptTools = Array.isArray(executionTools.script)
3213
+ ? executionTools.script
3214
+ : [executionTools.script];
3215
+ let functionResponse;
3216
+ try {
3217
+ const scriptTool = scriptTools[0]; // <- TODO: [🧠] Which script tool to use?
3218
+ functionResponse = await scriptTool.execute({
3219
+ scriptLanguage: 'javascript',
3220
+ script: `
3221
+ const args = ${functionArgs};
3222
+ return await ${functionName}(args);
3223
+ `,
3224
+ parameters: {}, // <- TODO: [🧠] What parameters to pass?
3225
+ });
3226
+ }
3227
+ catch (error) {
3228
+ assertsError(error);
3229
+ functionResponse = `Error: ${error.message}`;
3230
+ }
3231
+ messages.push({
3232
+ role: 'tool',
3233
+ tool_call_id: toolCall.id,
3234
+ content: functionResponse,
3235
+ });
3236
+ toolCalls.push({
3237
+ name: functionName,
3238
+ arguments: functionArgs,
3239
+ result: functionResponse,
3240
+ rawToolCall: toolCall,
3241
+ });
3242
+ });
3243
+ continue;
3244
+ }
3245
+ const complete = $getCurrentDate();
3246
+ const resultContent = responseMessage.content;
3247
+ if (resultContent === null) {
3248
+ throw new PipelineExecutionError(`No response message from ${this.title}`);
3249
+ }
3250
+ isLooping = false;
3251
+ return exportJson({
3252
+ name: 'promptResult',
3253
+ message: `Result of \`OpenAiCompatibleExecutionTools.callChatModel\``,
3254
+ order: [],
3255
+ value: {
3256
+ content: resultContent,
3257
+ modelName: rawResponse.model || modelName,
3258
+ timing: {
3259
+ start,
3260
+ complete,
3261
+ },
3262
+ usage: totalUsage,
3263
+ toolCalls,
3264
+ rawPromptContent,
3265
+ rawRequest,
3266
+ rawResponse,
3118
3267
  },
3119
- usage,
3120
- rawPromptContent,
3121
- rawRequest,
3122
- rawResponse,
3123
- // <- [🗯]
3124
- },
3125
- });
3126
- }
3127
- catch (error) {
3128
- assertsError(error);
3129
- // Check if this is an unsupported parameter error
3130
- if (!isUnsupportedParameterError(error)) {
3131
- // If we have attemptStack, include it in the error message
3132
- if (attemptStack.length > 0) {
3268
+ });
3269
+ }
3270
+ catch (error) {
3271
+ isLooping = false;
3272
+ assertsError(error);
3273
+ // Check if this is an unsupported parameter error
3274
+ if (!isUnsupportedParameterError(error)) {
3275
+ // If we have attemptStack, include it in the error message
3276
+ if (attemptStack.length > 0) {
3277
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
3278
+ attemptStack
3279
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
3280
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
3281
+ `, Error: ${a.errorMessage}` +
3282
+ (a.stripped ? ' (stripped and retried)' : ''))
3283
+ .join('\n') +
3284
+ `\nFinal error: ${error.message}`);
3285
+ }
3286
+ throw error;
3287
+ }
3288
+ // Parse which parameter is unsupported
3289
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
3290
+ if (!unsupportedParameter) {
3291
+ if (this.options.isVerbose) {
3292
+ console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
3293
+ }
3294
+ throw error;
3295
+ }
3296
+ // Create a unique key for this model + parameter combination to prevent infinite loops
3297
+ const retryKey = `${modelName}-${unsupportedParameter}`;
3298
+ if (retriedUnsupportedParameters.has(retryKey)) {
3299
+ // Already retried this parameter, throw the error with attemptStack
3300
+ attemptStack.push({
3301
+ modelName,
3302
+ unsupportedParameter,
3303
+ errorMessage: error.message,
3304
+ stripped: true,
3305
+ });
3133
3306
  throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
3134
3307
  attemptStack
3135
3308
  .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
@@ -3139,52 +3312,25 @@ class OpenAiCompatibleExecutionTools {
3139
3312
  .join('\n') +
3140
3313
  `\nFinal error: ${error.message}`);
3141
3314
  }
3142
- throw error;
3143
- }
3144
- // Parse which parameter is unsupported
3145
- const unsupportedParameter = parseUnsupportedParameterError(error.message);
3146
- if (!unsupportedParameter) {
3315
+ // Mark this parameter as retried
3316
+ retriedUnsupportedParameters.add(retryKey);
3317
+ // Log warning in verbose mode
3147
3318
  if (this.options.isVerbose) {
3148
- console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
3319
+ console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
3149
3320
  }
3150
- throw error;
3151
- }
3152
- // Create a unique key for this model + parameter combination to prevent infinite loops
3153
- const retryKey = `${modelName}-${unsupportedParameter}`;
3154
- if (retriedUnsupportedParameters.has(retryKey)) {
3155
- // Already retried this parameter, throw the error with attemptStack
3321
+ // Add to attemptStack
3156
3322
  attemptStack.push({
3157
3323
  modelName,
3158
3324
  unsupportedParameter,
3159
3325
  errorMessage: error.message,
3160
3326
  stripped: true,
3161
3327
  });
3162
- throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
3163
- attemptStack
3164
- .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
3165
- (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
3166
- `, Error: ${a.errorMessage}` +
3167
- (a.stripped ? ' (stripped and retried)' : ''))
3168
- .join('\n') +
3169
- `\nFinal error: ${error.message}`);
3170
- }
3171
- // Mark this parameter as retried
3172
- retriedUnsupportedParameters.add(retryKey);
3173
- // Log warning in verbose mode
3174
- if (this.options.isVerbose) {
3175
- console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
3328
+ // Remove the unsupported parameter and retry
3329
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
3330
+ return this.callChatModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
3176
3331
  }
3177
- // Add to attemptStack
3178
- attemptStack.push({
3179
- modelName,
3180
- unsupportedParameter,
3181
- errorMessage: error.message,
3182
- stripped: true,
3183
- });
3184
- // Remove the unsupported parameter and retry
3185
- const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
3186
- return this.callChatModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
3187
3332
  }
3333
+ throw new PipelineExecutionError(`Tool calling loop did not return a result from ${this.title}`);
3188
3334
  }
3189
3335
  /**
3190
3336
  * Calls OpenAI API to use a complete model.