@promptbook/website-crawler 0.104.0-2 → 0.104.0-4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -27,7 +27,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
27
27
  * @generated
28
28
  * @see https://github.com/webgptorg/promptbook
29
29
  */
30
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-2';
30
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-4';
31
31
  /**
32
32
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
33
33
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -4004,6 +4004,15 @@ function countUsage(llmTools) {
4004
4004
  return promptResult;
4005
4005
  };
4006
4006
  }
4007
+ if (llmTools.callImageGenerationModel !== undefined) {
4008
+ proxyTools.callImageGenerationModel = async (prompt) => {
4009
+ // console.info('[🚕] callImageGenerationModel through countTotalUsage');
4010
+ const promptResult = await llmTools.callImageGenerationModel(prompt);
4011
+ totalUsage = addUsage(totalUsage, promptResult.usage);
4012
+ spending.next(promptResult.usage);
4013
+ return promptResult;
4014
+ };
4015
+ }
4007
4016
  // <- Note: [🤖]
4008
4017
  return proxyTools;
4009
4018
  }
@@ -4113,6 +4122,12 @@ class MultipleLlmExecutionTools {
4113
4122
  callEmbeddingModel(prompt) {
4114
4123
  return this.callCommonModel(prompt);
4115
4124
  }
4125
+ /**
4126
+ * Calls the best available embedding model
4127
+ */
4128
+ callImageGenerationModel(prompt) {
4129
+ return this.callCommonModel(prompt);
4130
+ }
4116
4131
  // <- Note: [🤖]
4117
4132
  /**
4118
4133
  * Calls the best available model
@@ -4139,6 +4154,11 @@ class MultipleLlmExecutionTools {
4139
4154
  continue llm;
4140
4155
  }
4141
4156
  return await llmExecutionTools.callEmbeddingModel(prompt);
4157
+ case 'IMAGE_GENERATION':
4158
+ if (llmExecutionTools.callImageGenerationModel === undefined) {
4159
+ continue llm;
4160
+ }
4161
+ return await llmExecutionTools.callImageGenerationModel(prompt);
4142
4162
  // <- case [🤖]:
4143
4163
  default:
4144
4164
  throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
@@ -6154,8 +6174,9 @@ async function executeAttempts(options) {
6154
6174
  $ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
6155
6175
  break variant;
6156
6176
  case 'EMBEDDING':
6177
+ case 'IMAGE_GENERATION':
6157
6178
  throw new PipelineExecutionError(spaceTrim$1((block) => `
6158
- Embedding model can not be used in pipeline
6179
+ ${modelRequirements.modelVariant} model can not be used in pipeline
6159
6180
 
6160
6181
  This should be catched during parsing
6161
6182