@promptbook/wizard 0.104.0-3 → 0.104.0-5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -36,7 +36,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
36
36
  * @generated
37
37
  * @see https://github.com/webgptorg/promptbook
38
38
  */
39
- const PROMPTBOOK_ENGINE_VERSION = '0.104.0-3';
39
+ const PROMPTBOOK_ENGINE_VERSION = '0.104.0-5';
40
40
  /**
41
41
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
42
42
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2526,6 +2526,7 @@ class RemoteLlmExecutionTools {
2526
2526
  }
2527
2527
  }
2528
2528
  /**
2529
+ * TODO: !!!! Deprecate pipeline server and all of its components
2529
2530
  * TODO: Maybe use `$exportJson`
2530
2531
  * TODO: [🧠][🛍] Maybe not `isAnonymous: boolean` BUT `mode: 'ANONYMOUS'|'COLLECTION'`
2531
2532
  * TODO: [🍓] Allow to list compatible models with each variant
@@ -9417,6 +9418,15 @@ function countUsage(llmTools) {
9417
9418
  return promptResult;
9418
9419
  };
9419
9420
  }
9421
+ if (llmTools.callImageGenerationModel !== undefined) {
9422
+ proxyTools.callImageGenerationModel = async (prompt) => {
9423
+ // console.info('[🚕] callImageGenerationModel through countTotalUsage');
9424
+ const promptResult = await llmTools.callImageGenerationModel(prompt);
9425
+ totalUsage = addUsage(totalUsage, promptResult.usage);
9426
+ spending.next(promptResult.usage);
9427
+ return promptResult;
9428
+ };
9429
+ }
9420
9430
  // <- Note: [🤖]
9421
9431
  return proxyTools;
9422
9432
  }
@@ -9526,6 +9536,12 @@ class MultipleLlmExecutionTools {
9526
9536
  callEmbeddingModel(prompt) {
9527
9537
  return this.callCommonModel(prompt);
9528
9538
  }
9539
+ /**
9540
+ * Calls the best available embedding model
9541
+ */
9542
+ callImageGenerationModel(prompt) {
9543
+ return this.callCommonModel(prompt);
9544
+ }
9529
9545
  // <- Note: [🤖]
9530
9546
  /**
9531
9547
  * Calls the best available model
@@ -9552,6 +9568,11 @@ class MultipleLlmExecutionTools {
9552
9568
  continue llm;
9553
9569
  }
9554
9570
  return await llmExecutionTools.callEmbeddingModel(prompt);
9571
+ case 'IMAGE_GENERATION':
9572
+ if (llmExecutionTools.callImageGenerationModel === undefined) {
9573
+ continue llm;
9574
+ }
9575
+ return await llmExecutionTools.callImageGenerationModel(prompt);
9555
9576
  // <- case [🤖]:
9556
9577
  default:
9557
9578
  throw new UnexpectedError(`Unknown model variant "${prompt.modelRequirements.modelVariant}" in ${llmExecutionTools.title}`);
@@ -11255,8 +11276,9 @@ async function executeAttempts(options) {
11255
11276
  $ongoingTaskResult.$resultString = $ongoingTaskResult.$completionResult.content;
11256
11277
  break variant;
11257
11278
  case 'EMBEDDING':
11279
+ case 'IMAGE_GENERATION':
11258
11280
  throw new PipelineExecutionError(spaceTrim$1((block) => `
11259
- Embedding model can not be used in pipeline
11281
+ ${modelRequirements.modelVariant} model can not be used in pipeline
11260
11282
 
11261
11283
  This should be catched during parsing
11262
11284
 
@@ -19109,6 +19131,9 @@ function cacheLlmTools(llmTools, options = {}) {
19109
19131
  case 'EMBEDDING':
19110
19132
  promptResult = await llmTools.callEmbeddingModel(prompt);
19111
19133
  break variant;
19134
+ case 'IMAGE_GENERATION':
19135
+ promptResult = await llmTools.callImageGenerationModel(prompt);
19136
+ break variant;
19112
19137
  // <- case [🤖]:
19113
19138
  default:
19114
19139
  throw new PipelineExecutionError(`Unknown model variant "${prompt.modelRequirements.modelVariant}"`);
@@ -19145,12 +19170,13 @@ function cacheLlmTools(llmTools, options = {}) {
19145
19170
  }
19146
19171
  }
19147
19172
  catch (error) {
19173
+ assertsError(error);
19148
19174
  // If validation throws an unexpected error, don't cache
19149
19175
  shouldCache = false;
19150
19176
  if (isVerbose) {
19151
19177
  console.info('Not caching result due to validation error for key:', key, {
19152
19178
  content: promptResult.content,
19153
- validationError: error instanceof Error ? error.message : String(error),
19179
+ validationError: serializeError(error),
19154
19180
  });
19155
19181
  }
19156
19182
  }
@@ -19196,6 +19222,11 @@ function cacheLlmTools(llmTools, options = {}) {
19196
19222
  return /* not await */ callCommonModel(prompt);
19197
19223
  };
19198
19224
  }
19225
+ if (llmTools.callImageGenerationModel !== undefined) {
19226
+ proxyTools.callImageGenerationModel = async (prompt) => {
19227
+ return /* not await */ callCommonModel(prompt);
19228
+ };
19229
+ }
19199
19230
  // <- Note: [🤖]
19200
19231
  return proxyTools;
19201
19232
  }