@promptbook/pdf 0.104.0-3 → 0.104.0-5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -24,7 +24,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
24
24
  * @generated
25
25
  * @see https://github.com/webgptorg/promptbook
26
26
  */
27
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-3';
27
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-5';
28
28
  /**
29
29
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
30
30
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3885,6 +3885,15 @@ function countUsage(llmTools) {
3885
3885
  return promptResult;
3886
3886
  };
3887
3887
  }
3888
+ if (llmTools.callImageGenerationModel !== undefined) {
3889
+ proxyTools.callImageGenerationModel = async (prompt) => {
3890
+ // console.info('[🚕] callImageGenerationModel through countTotalUsage');
3891
+ const promptResult = await llmTools.callImageGenerationModel(prompt);
3892
+ totalUsage = addUsage(totalUsage, promptResult.usage);
3893
+ spending.next(promptResult.usage);
3894
+ return promptResult;
3895
+ };
3896
+ }
3888
3897
  // <- Note: [🤖]
3889
3898
  return proxyTools;
3890
3899
  }
@@ -3994,6 +4003,12 @@ class MultipleLlmExecutionTools {
3994
4003
  callEmbeddingModel(prompt) {
3995
4004
  return this.callCommonModel(prompt);
3996
4005
  }
4006
+ /**
4007
+ * Calls the best available embedding model
4008
+ */
4009
+ callImageGenerationModel(prompt) {
4010
+ return this.callCommonModel(prompt);
4011
+ }
3997
4012
  // <- Note: [🤖]
3998
4013
  /**
3999
4014
  * Calls the best available model
@@ -4020,6 +4035,11 @@ class MultipleLlmExecutionTools {
4020
4035
  continue llm;
4021
4036
  }
4022
4037
  return await llmExecutionTools.callEmbeddingModel(prompt);
4038
+ case 'IMAGE_GENERATION':
4039
+ if (llmExecutionTools.callImageGenerationModel === undefined) {
4040
+ continue llm;
4041
+ }
4042
+ return await llmExecutionTools.callImageGenerationModel(prompt);
4023
4043
  // <- case [🤖]:
4024
4044
  default:
4025
4045
  throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
@@ -6154,8 +6174,9 @@ async function executeAttempts(options) {
6154
6174
  $ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
6155
6175
  break variant;
6156
6176
  case 'EMBEDDING':
6177
+ case 'IMAGE_GENERATION':
6157
6178
  throw new PipelineExecutionError(spaceTrim$1((block) => `
6158
- Embedding model can not be used in pipeline
6179
+ ${modelRequirements.modelVariant} model can not be used in pipeline
6159
6180
 
6160
6181
  This should be catched during parsing
6161
6182