@promptbook/wizard 0.103.0-67 โ†’ 0.103.0-68

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -36,7 +36,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
36
36
  * @generated
37
37
  * @see https://github.com/webgptorg/promptbook
38
38
  */
39
- const PROMPTBOOK_ENGINE_VERSION = '0.103.0-67';
39
+ const PROMPTBOOK_ENGINE_VERSION = '0.103.0-68';
40
40
  /**
41
41
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
42
42
  * Note: [๐Ÿ’ž] Ignore a discrepancy between file name and entity name
@@ -3898,12 +3898,18 @@ const OPENAI_MODELS = exportJson({
3898
3898
  },
3899
3899
  },
3900
3900
  /**/
3901
- /*/
3902
- {
3903
- modelTitle: 'dall-e-3',
3904
- modelName: 'dall-e-3',
3905
- },
3906
- /**/
3901
+ /**/
3902
+ {
3903
+ modelVariant: 'IMAGE_GENERATION',
3904
+ modelTitle: 'dall-e-3',
3905
+ modelName: 'dall-e-3',
3906
+ modelDescription: 'DALLยทE 3 is the latest version of the DALLยทE art generation model. It understands significantly more nuance and detail than our previous systems, allowing you to easily translate your ideas into exceptionally accurate images.',
3907
+ pricing: {
3908
+ prompt: 0,
3909
+ output: 0.04,
3910
+ },
3911
+ },
3912
+ /**/
3907
3913
  /*/
3908
3914
  {
3909
3915
  modelTitle: 'whisper-1',
@@ -3922,12 +3928,18 @@ const OPENAI_MODELS = exportJson({
3922
3928
  },
3923
3929
  },
3924
3930
  /**/
3925
- /*/
3926
- {
3927
- modelTitle: 'dall-e-2',
3928
- modelName: 'dall-e-2',
3929
- },
3930
- /**/
3931
+ /**/
3932
+ {
3933
+ modelVariant: 'IMAGE_GENERATION',
3934
+ modelTitle: 'dall-e-2',
3935
+ modelName: 'dall-e-2',
3936
+ modelDescription: 'DALLยทE 2 is an AI system that can create realistic images and art from a description in natural language.',
3937
+ pricing: {
3938
+ prompt: 0,
3939
+ output: 0.02,
3940
+ },
3941
+ },
3942
+ /**/
3931
3943
  /**/
3932
3944
  {
3933
3945
  modelVariant: 'CHAT',
@@ -6200,6 +6212,151 @@ class OpenAiCompatibleExecutionTools {
6200
6212
  return this.callEmbeddingModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
6201
6213
  }
6202
6214
  }
6215
+ /**
6216
+ * Calls OpenAI compatible API to use a image generation model
6217
+ */
6218
+ async callImageGenerationModel(prompt) {
6219
+ // Deep clone prompt and modelRequirements to avoid mutation across calls
6220
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
6221
+ const retriedUnsupportedParameters = new Set();
6222
+ return this.callImageGenerationModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters);
6223
+ }
6224
+ /**
6225
+ * Internal method that handles parameter retry for image generation model calls
6226
+ */
6227
+ async callImageGenerationModelWithRetry(prompt, currentModelRequirements, attemptStack = [], retriedUnsupportedParameters = new Set()) {
6228
+ var _a, _b;
6229
+ if (this.options.isVerbose) {
6230
+ console.info(`๐ŸŽจ ${this.title} callImageGenerationModel call`, { prompt, currentModelRequirements });
6231
+ }
6232
+ const { content, parameters } = prompt;
6233
+ const client = await this.getClient();
6234
+ // TODO: [โ˜‚] Use here more modelRequirements
6235
+ if (currentModelRequirements.modelVariant !== 'IMAGE_GENERATION') {
6236
+ throw new PipelineExecutionError('Use callImageGenerationModel only for IMAGE_GENERATION variant');
6237
+ }
6238
+ const modelName = currentModelRequirements.modelName || this.getDefaultImageGenerationModel().modelName;
6239
+ const modelSettings = {
6240
+ model: modelName,
6241
+ // size: currentModelRequirements.size,
6242
+ // quality: currentModelRequirements.quality,
6243
+ // style: currentModelRequirements.style,
6244
+ };
6245
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
6246
+ const rawRequest = {
6247
+ ...modelSettings,
6248
+ prompt: rawPromptContent,
6249
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
6250
+ response_format: 'url', // TODO: [๐Ÿง ] Maybe allow b64_json
6251
+ };
6252
+ const start = $getCurrentDate();
6253
+ if (this.options.isVerbose) {
6254
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6255
+ }
6256
+ try {
6257
+ const rawResponse = await this.limiter
6258
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.images.generate(rawRequest)))
6259
+ .catch((error) => {
6260
+ assertsError(error);
6261
+ if (this.options.isVerbose) {
6262
+ console.info(colors.bgRed('error'), error);
6263
+ }
6264
+ throw error;
6265
+ });
6266
+ if (this.options.isVerbose) {
6267
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
6268
+ }
6269
+ const complete = $getCurrentDate();
6270
+ if (!rawResponse.data[0]) {
6271
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
6272
+ }
6273
+ if (rawResponse.data.length > 1) {
6274
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
6275
+ }
6276
+ const resultContent = rawResponse.data[0].url;
6277
+ const modelInfo = this.HARDCODED_MODELS.find((model) => model.modelName === modelName);
6278
+ const price = ((_b = modelInfo === null || modelInfo === void 0 ? void 0 : modelInfo.pricing) === null || _b === void 0 ? void 0 : _b.output) ? uncertainNumber(modelInfo.pricing.output) : uncertainNumber();
6279
+ return exportJson({
6280
+ name: 'promptResult',
6281
+ message: `Result of \`OpenAiCompatibleExecutionTools.callImageGenerationModel\``,
6282
+ order: [],
6283
+ value: {
6284
+ content: resultContent,
6285
+ modelName: modelName,
6286
+ timing: {
6287
+ start,
6288
+ complete,
6289
+ },
6290
+ usage: {
6291
+ price,
6292
+ input: {
6293
+ tokensCount: uncertainNumber(0),
6294
+ ...computeUsageCounts(rawPromptContent),
6295
+ },
6296
+ output: {
6297
+ tokensCount: uncertainNumber(0),
6298
+ ...computeUsageCounts(''),
6299
+ },
6300
+ },
6301
+ rawPromptContent,
6302
+ rawRequest,
6303
+ rawResponse,
6304
+ },
6305
+ });
6306
+ }
6307
+ catch (error) {
6308
+ assertsError(error);
6309
+ if (!isUnsupportedParameterError(error)) {
6310
+ if (attemptStack.length > 0) {
6311
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
6312
+ attemptStack
6313
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
6314
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
6315
+ `, Error: ${a.errorMessage}` +
6316
+ (a.stripped ? ' (stripped and retried)' : ''))
6317
+ .join('\n') +
6318
+ `\nFinal error: ${error.message}`);
6319
+ }
6320
+ throw error;
6321
+ }
6322
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
6323
+ if (!unsupportedParameter) {
6324
+ if (this.options.isVerbose) {
6325
+ console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
6326
+ }
6327
+ throw error;
6328
+ }
6329
+ const retryKey = `${modelName}-${unsupportedParameter}`;
6330
+ if (retriedUnsupportedParameters.has(retryKey)) {
6331
+ attemptStack.push({
6332
+ modelName,
6333
+ unsupportedParameter,
6334
+ errorMessage: error.message,
6335
+ stripped: true,
6336
+ });
6337
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
6338
+ attemptStack
6339
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
6340
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
6341
+ `, Error: ${a.errorMessage}` +
6342
+ (a.stripped ? ' (stripped and retried)' : ''))
6343
+ .join('\n') +
6344
+ `\nFinal error: ${error.message}`);
6345
+ }
6346
+ retriedUnsupportedParameters.add(retryKey);
6347
+ if (this.options.isVerbose) {
6348
+ console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
6349
+ }
6350
+ attemptStack.push({
6351
+ modelName,
6352
+ unsupportedParameter,
6353
+ errorMessage: error.message,
6354
+ stripped: true,
6355
+ });
6356
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
6357
+ return this.callImageGenerationModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
6358
+ }
6359
+ }
6203
6360
  // <- Note: [๐Ÿค–] callXxxModel
6204
6361
  /**
6205
6362
  * Get the model that should be used as default
@@ -6623,6 +6780,13 @@ class OllamaExecutionTools extends OpenAiCompatibleExecutionTools {
6623
6780
  return this.getDefaultModel('text-embedding-3-large'); // <- TODO: [๐Ÿง ] Pick the best default model
6624
6781
  // <- TODO: [๐Ÿ›„]
6625
6782
  }
6783
+ /**
6784
+ * Default model for image generation variant.
6785
+ */
6786
+ getDefaultImageGenerationModel() {
6787
+ return this.getDefaultModel('!!!'); // <- TODO: [๐Ÿง ] Pick the best default model
6788
+ // <- TODO: [๐Ÿ›„]
6789
+ }
6626
6790
  }
6627
6791
  /**
6628
6792
  * TODO: [๐Ÿ›„] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
@@ -6858,6 +7022,12 @@ class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools {
6858
7022
  getDefaultEmbeddingModel() {
6859
7023
  return this.getDefaultModel('text-embedding-3-large');
6860
7024
  }
7025
+ /**
7026
+ * Default model for image generation variant.
7027
+ */
7028
+ getDefaultImageGenerationModel() {
7029
+ return this.getDefaultModel('dall-e-3');
7030
+ }
6861
7031
  }
6862
7032
 
6863
7033
  /**
@@ -7428,6 +7598,13 @@ class HardcodedOpenAiCompatibleExecutionTools extends OpenAiCompatibleExecutionT
7428
7598
  getDefaultEmbeddingModel() {
7429
7599
  throw new PipelineExecutionError(`${this.title} does not support EMBEDDING model variant`);
7430
7600
  }
7601
+ /**
7602
+ * Default model for image generation variant.
7603
+ */
7604
+ getDefaultImageGenerationModel() {
7605
+ return this.getDefaultModel('!!!'); // <- TODO: [๐Ÿง ] Pick the best default model
7606
+ // <- TODO: [๐Ÿ›„]
7607
+ }
7431
7608
  }
7432
7609
  /**
7433
7610
  * TODO: [๐Ÿฆบ] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
@@ -20857,7 +21034,7 @@ const jokerCommandParser = {
20857
21034
  * @see {@link ModelVariant}
20858
21035
  * @public exported from `@promptbook/core`
20859
21036
  */
20860
- const MODEL_VARIANTS = ['COMPLETION', 'CHAT', 'EMBEDDING' /* <- TODO [๐Ÿณ] */ /* <- [๐Ÿค–] */];
21037
+ const MODEL_VARIANTS = ['COMPLETION', 'CHAT', 'IMAGE_GENERATION', 'EMBEDDING' /* <- TODO [๐Ÿณ] */ /* <- [๐Ÿค–] */];
20861
21038
 
20862
21039
  /**
20863
21040
  * Parses the model command