@promptbook/ollama 0.103.0-67 → 0.103.0-68

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/esm/index.es.js CHANGED
@@ -18,7 +18,7 @@ const BOOK_LANGUAGE_VERSION = '2.0.0';
18
18
  * @generated
19
19
  * @see https://github.com/webgptorg/promptbook
20
20
  */
21
- const PROMPTBOOK_ENGINE_VERSION = '0.103.0-67';
21
+ const PROMPTBOOK_ENGINE_VERSION = '0.103.0-68';
22
22
  /**
23
23
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
24
24
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -2167,12 +2167,18 @@ const OPENAI_MODELS = exportJson({
2167
2167
  },
2168
2168
  },
2169
2169
  /**/
2170
- /*/
2171
- {
2172
- modelTitle: 'dall-e-3',
2173
- modelName: 'dall-e-3',
2174
- },
2175
- /**/
2170
+ /**/
2171
+ {
2172
+ modelVariant: 'IMAGE_GENERATION',
2173
+ modelTitle: 'dall-e-3',
2174
+ modelName: 'dall-e-3',
2175
+ modelDescription: 'DALL·E 3 is the latest version of the DALL·E art generation model. It understands significantly more nuance and detail than our previous systems, allowing you to easily translate your ideas into exceptionally accurate images.',
2176
+ pricing: {
2177
+ prompt: 0,
2178
+ output: 0.04,
2179
+ },
2180
+ },
2181
+ /**/
2176
2182
  /*/
2177
2183
  {
2178
2184
  modelTitle: 'whisper-1',
@@ -2191,12 +2197,18 @@ const OPENAI_MODELS = exportJson({
2191
2197
  },
2192
2198
  },
2193
2199
  /**/
2194
- /*/
2195
- {
2196
- modelTitle: 'dall-e-2',
2197
- modelName: 'dall-e-2',
2198
- },
2199
- /**/
2200
+ /**/
2201
+ {
2202
+ modelVariant: 'IMAGE_GENERATION',
2203
+ modelTitle: 'dall-e-2',
2204
+ modelName: 'dall-e-2',
2205
+ modelDescription: 'DALL·E 2 is an AI system that can create realistic images and art from a description in natural language.',
2206
+ pricing: {
2207
+ prompt: 0,
2208
+ output: 0.02,
2209
+ },
2210
+ },
2211
+ /**/
2200
2212
  /**/
2201
2213
  {
2202
2214
  modelVariant: 'CHAT',
@@ -3407,6 +3419,151 @@ class OpenAiCompatibleExecutionTools {
3407
3419
  return this.callEmbeddingModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
3408
3420
  }
3409
3421
  }
3422
+ /**
3423
+ * Calls OpenAI compatible API to use a image generation model
3424
+ */
3425
+ async callImageGenerationModel(prompt) {
3426
+ // Deep clone prompt and modelRequirements to avoid mutation across calls
3427
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
3428
+ const retriedUnsupportedParameters = new Set();
3429
+ return this.callImageGenerationModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters);
3430
+ }
3431
+ /**
3432
+ * Internal method that handles parameter retry for image generation model calls
3433
+ */
3434
+ async callImageGenerationModelWithRetry(prompt, currentModelRequirements, attemptStack = [], retriedUnsupportedParameters = new Set()) {
3435
+ var _a, _b;
3436
+ if (this.options.isVerbose) {
3437
+ console.info(`🎨 ${this.title} callImageGenerationModel call`, { prompt, currentModelRequirements });
3438
+ }
3439
+ const { content, parameters } = prompt;
3440
+ const client = await this.getClient();
3441
+ // TODO: [☂] Use here more modelRequirements
3442
+ if (currentModelRequirements.modelVariant !== 'IMAGE_GENERATION') {
3443
+ throw new PipelineExecutionError('Use callImageGenerationModel only for IMAGE_GENERATION variant');
3444
+ }
3445
+ const modelName = currentModelRequirements.modelName || this.getDefaultImageGenerationModel().modelName;
3446
+ const modelSettings = {
3447
+ model: modelName,
3448
+ // size: currentModelRequirements.size,
3449
+ // quality: currentModelRequirements.quality,
3450
+ // style: currentModelRequirements.style,
3451
+ };
3452
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
3453
+ const rawRequest = {
3454
+ ...modelSettings,
3455
+ prompt: rawPromptContent,
3456
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
3457
+ response_format: 'url', // TODO: [🧠] Maybe allow b64_json
3458
+ };
3459
+ const start = $getCurrentDate();
3460
+ if (this.options.isVerbose) {
3461
+ console.info(colors.bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
3462
+ }
3463
+ try {
3464
+ const rawResponse = await this.limiter
3465
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.images.generate(rawRequest)))
3466
+ .catch((error) => {
3467
+ assertsError(error);
3468
+ if (this.options.isVerbose) {
3469
+ console.info(colors.bgRed('error'), error);
3470
+ }
3471
+ throw error;
3472
+ });
3473
+ if (this.options.isVerbose) {
3474
+ console.info(colors.bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
3475
+ }
3476
+ const complete = $getCurrentDate();
3477
+ if (!rawResponse.data[0]) {
3478
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
3479
+ }
3480
+ if (rawResponse.data.length > 1) {
3481
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
3482
+ }
3483
+ const resultContent = rawResponse.data[0].url;
3484
+ const modelInfo = this.HARDCODED_MODELS.find((model) => model.modelName === modelName);
3485
+ const price = ((_b = modelInfo === null || modelInfo === void 0 ? void 0 : modelInfo.pricing) === null || _b === void 0 ? void 0 : _b.output) ? uncertainNumber(modelInfo.pricing.output) : uncertainNumber();
3486
+ return exportJson({
3487
+ name: 'promptResult',
3488
+ message: `Result of \`OpenAiCompatibleExecutionTools.callImageGenerationModel\``,
3489
+ order: [],
3490
+ value: {
3491
+ content: resultContent,
3492
+ modelName: modelName,
3493
+ timing: {
3494
+ start,
3495
+ complete,
3496
+ },
3497
+ usage: {
3498
+ price,
3499
+ input: {
3500
+ tokensCount: uncertainNumber(0),
3501
+ ...computeUsageCounts(rawPromptContent),
3502
+ },
3503
+ output: {
3504
+ tokensCount: uncertainNumber(0),
3505
+ ...computeUsageCounts(''),
3506
+ },
3507
+ },
3508
+ rawPromptContent,
3509
+ rawRequest,
3510
+ rawResponse,
3511
+ },
3512
+ });
3513
+ }
3514
+ catch (error) {
3515
+ assertsError(error);
3516
+ if (!isUnsupportedParameterError(error)) {
3517
+ if (attemptStack.length > 0) {
3518
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
3519
+ attemptStack
3520
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
3521
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
3522
+ `, Error: ${a.errorMessage}` +
3523
+ (a.stripped ? ' (stripped and retried)' : ''))
3524
+ .join('\n') +
3525
+ `\nFinal error: ${error.message}`);
3526
+ }
3527
+ throw error;
3528
+ }
3529
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
3530
+ if (!unsupportedParameter) {
3531
+ if (this.options.isVerbose) {
3532
+ console.warn(colors.bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
3533
+ }
3534
+ throw error;
3535
+ }
3536
+ const retryKey = `${modelName}-${unsupportedParameter}`;
3537
+ if (retriedUnsupportedParameters.has(retryKey)) {
3538
+ attemptStack.push({
3539
+ modelName,
3540
+ unsupportedParameter,
3541
+ errorMessage: error.message,
3542
+ stripped: true,
3543
+ });
3544
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
3545
+ attemptStack
3546
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
3547
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
3548
+ `, Error: ${a.errorMessage}` +
3549
+ (a.stripped ? ' (stripped and retried)' : ''))
3550
+ .join('\n') +
3551
+ `\nFinal error: ${error.message}`);
3552
+ }
3553
+ retriedUnsupportedParameters.add(retryKey);
3554
+ if (this.options.isVerbose) {
3555
+ console.warn(colors.bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
3556
+ }
3557
+ attemptStack.push({
3558
+ modelName,
3559
+ unsupportedParameter,
3560
+ errorMessage: error.message,
3561
+ stripped: true,
3562
+ });
3563
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
3564
+ return this.callImageGenerationModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
3565
+ }
3566
+ }
3410
3567
  // <- Note: [🤖] callXxxModel
3411
3568
  /**
3412
3569
  * Get the model that should be used as default
@@ -3837,6 +3994,13 @@ class OllamaExecutionTools extends OpenAiCompatibleExecutionTools {
3837
3994
  return this.getDefaultModel('text-embedding-3-large'); // <- TODO: [🧠] Pick the best default model
3838
3995
  // <- TODO: [🛄]
3839
3996
  }
3997
+ /**
3998
+ * Default model for image generation variant.
3999
+ */
4000
+ getDefaultImageGenerationModel() {
4001
+ return this.getDefaultModel('!!!'); // <- TODO: [🧠] Pick the best default model
4002
+ // <- TODO: [🛄]
4003
+ }
3840
4004
  }
3841
4005
  /**
3842
4006
  * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`