@promptbook/wizard 0.103.0-66 → 0.103.0-68

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -45,6 +45,7 @@ import type { MessageButton } from '../book-components/Chat/utils/parseMessageBu
45
45
  import { parseMessageButtons } from '../book-components/Chat/utils/parseMessageButtons';
46
46
  import { ArrowIcon } from '../book-components/icons/ArrowIcon';
47
47
  import { AttachmentIcon } from '../book-components/icons/AttachmentIcon';
48
+ import { CameraIcon } from '../book-components/icons/CameraIcon';
48
49
  import { MicIcon } from '../book-components/icons/MicIcon';
49
50
  import { PauseIcon } from '../book-components/icons/PauseIcon';
50
51
  import { PlayIcon } from '../book-components/icons/PlayIcon';
@@ -105,6 +106,7 @@ export type { MessageButton };
105
106
  export { parseMessageButtons };
106
107
  export { ArrowIcon };
107
108
  export { AttachmentIcon };
109
+ export { CameraIcon };
108
110
  export { MicIcon };
109
111
  export { PauseIcon };
110
112
  export { PlayIcon };
@@ -88,6 +88,7 @@ import type { PromptbookFetch } from '../execution/PromptbookFetch';
88
88
  import type { PromptResult } from '../execution/PromptResult';
89
89
  import type { CompletionPromptResult } from '../execution/PromptResult';
90
90
  import type { ChatPromptResult } from '../execution/PromptResult';
91
+ import type { ImagePromptResult } from '../execution/PromptResult';
91
92
  import type { EmbeddingPromptResult } from '../execution/PromptResult';
92
93
  import type { ScriptExecutionTools } from '../execution/ScriptExecutionTools';
93
94
  import type { ScriptExecutionToolsExecuteOptions } from '../execution/ScriptExecutionTools';
@@ -187,6 +188,7 @@ import type { LlmCall } from '../types/LlmCall';
187
188
  import type { ModelRequirements } from '../types/ModelRequirements';
188
189
  import type { CompletionModelRequirements } from '../types/ModelRequirements';
189
190
  import type { ChatModelRequirements } from '../types/ModelRequirements';
191
+ import type { ImageGenerationModelRequirements } from '../types/ModelRequirements';
190
192
  import type { EmbeddingModelRequirements } from '../types/ModelRequirements';
191
193
  import type { ModelVariant } from '../types/ModelVariant';
192
194
  import type { NonEmptyArray } from '../types/NonEmptyArray';
@@ -194,6 +196,7 @@ import type { NonEmptyReadonlyArray } from '../types/NonEmptyArray';
194
196
  import type { Prompt } from '../types/Prompt';
195
197
  import type { CompletionPrompt } from '../types/Prompt';
196
198
  import type { ChatPrompt } from '../types/Prompt';
199
+ import type { ImagePrompt } from '../types/Prompt';
197
200
  import type { EmbeddingPrompt } from '../types/Prompt';
198
201
  import type { ScriptLanguage } from '../types/ScriptLanguage';
199
202
  import type { SectionType } from '../types/SectionType';
@@ -449,6 +452,7 @@ export type { PromptbookFetch };
449
452
  export type { PromptResult };
450
453
  export type { CompletionPromptResult };
451
454
  export type { ChatPromptResult };
455
+ export type { ImagePromptResult };
452
456
  export type { EmbeddingPromptResult };
453
457
  export type { ScriptExecutionTools };
454
458
  export type { ScriptExecutionToolsExecuteOptions };
@@ -548,6 +552,7 @@ export type { LlmCall };
548
552
  export type { ModelRequirements };
549
553
  export type { CompletionModelRequirements };
550
554
  export type { ChatModelRequirements };
555
+ export type { ImageGenerationModelRequirements };
551
556
  export type { EmbeddingModelRequirements };
552
557
  export type { ModelVariant };
553
558
  export type { NonEmptyArray };
@@ -555,6 +560,7 @@ export type { NonEmptyReadonlyArray };
555
560
  export type { Prompt };
556
561
  export type { CompletionPrompt };
557
562
  export type { ChatPrompt };
563
+ export type { ImagePrompt };
558
564
  export type { EmbeddingPrompt };
559
565
  export type { ScriptLanguage };
560
566
  export type { SectionType };
@@ -81,6 +81,16 @@ export type BookEditorProps = {
81
81
  */
82
82
  readonly readonlyMessage?: string;
83
83
  };
84
+ /**
85
+ * If true, shows the upload button in the action bar.
86
+ * By default, the upload button is shown.
87
+ */
88
+ readonly isUploadButtonShown?: boolean;
89
+ /**
90
+ * If true, shows the camera button in the action bar.
91
+ * By default, the camera button is shown on mobile devices.
92
+ */
93
+ readonly isCameraButtonShown?: boolean;
84
94
  /**
85
95
  * If true, shows the download button in the action bar.
86
96
  * By default, the download button is shown.
@@ -1,9 +1,13 @@
1
1
  type BookEditorActionbarProps = {
2
2
  value: string | undefined;
3
3
  isDownloadButtonShown?: boolean;
4
+ isUploadButtonShown?: boolean;
5
+ isCameraButtonShown?: boolean;
4
6
  isAboutButtonShown?: boolean;
5
7
  isFullscreenButtonShown?: boolean;
6
8
  onFullscreenClick?: () => void;
9
+ onUploadDocument?: () => void;
10
+ onTakePhoto?: () => void;
7
11
  isFullscreen?: boolean;
8
12
  };
9
13
  /**
@@ -0,0 +1,11 @@
1
+ type CameraIconProps = {
2
+ size?: number;
3
+ color?: string;
4
+ };
5
+ /**
6
+ * @@@
7
+ *
8
+ * @public exported from `@promptbook/components`
9
+ */
10
+ export declare function CameraIcon({ size, color }: CameraIconProps): import("react/jsx-runtime").JSX.Element;
11
+ export {};
@@ -3,7 +3,7 @@ import type { ChatParticipant } from '../book-components/Chat/types/ChatParticip
3
3
  import type { Prompt } from '../types/Prompt';
4
4
  import type { string_markdown, string_markdown_text, string_title } from '../types/typeAliases';
5
5
  import type { AvailableModel } from './AvailableModel';
6
- import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from './PromptResult';
6
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, ImagePromptResult } from './PromptResult';
7
7
  /**
8
8
  * Container for all the tools needed to execute prompts to large language models like GPT-4
9
9
  * On its interface it exposes common methods for prompt execution.
@@ -70,6 +70,10 @@ export type LlmExecutionTools = {
70
70
  * Calls a completion model
71
71
  */
72
72
  callCompletionModel?(prompt: Prompt): Promise<CompletionPromptResult>;
73
+ /**
74
+ * Calls a image generation model
75
+ */
76
+ callImageGenerationModel?(prompt: Prompt): Promise<ImagePromptResult>;
73
77
  /**
74
78
  * Calls an embedding model
75
79
  */
@@ -8,7 +8,7 @@ import type { Usage } from './Usage';
8
8
  *
9
9
  * @see https://github.com/webgptorg/promptbook#prompt-result
10
10
  */
11
- export type PromptResult = CompletionPromptResult | ChatPromptResult | EmbeddingPromptResult;
11
+ export type PromptResult = CompletionPromptResult | ChatPromptResult | ImagePromptResult | EmbeddingPromptResult;
12
12
  /**
13
13
  * Completion prompt result
14
14
  *
@@ -22,6 +22,12 @@ export type CompletionPromptResult = CommonPromptResult;
22
22
  * Note: [🚉] This is fully serializable as JSON
23
23
  */
24
24
  export type ChatPromptResult = CommonPromptResult & {};
25
+ /**
26
+ * Image prompt result
27
+ *
28
+ * Note: [🚉] This is fully serializable as JSON
29
+ */
30
+ export type ImagePromptResult = CommonPromptResult;
25
31
  /**
26
32
  * Embedding prompt result
27
33
  *
@@ -38,6 +38,10 @@ export declare class OllamaExecutionTools extends OpenAiCompatibleExecutionTools
38
38
  * Default model for completion variant.
39
39
  */
40
40
  protected getDefaultEmbeddingModel(): AvailableModel;
41
+ /**
42
+ * Default model for image generation variant.
43
+ */
44
+ protected getDefaultImageGenerationModel(): AvailableModel;
41
45
  }
42
46
  /**
43
47
  * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
@@ -1,7 +1,7 @@
1
1
  import OpenAI from 'openai';
2
2
  import type { AvailableModel } from '../../execution/AvailableModel';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, ImagePromptResult } from '../../execution/PromptResult';
5
5
  import type { Usage } from '../../execution/Usage';
6
6
  import type { Prompt } from '../../types/Prompt';
7
7
  import type { string_markdown, string_markdown_text, string_model_name, string_title } from '../../types/typeAliases';
@@ -63,6 +63,14 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
63
63
  * Internal method that handles parameter retry for embedding model calls
64
64
  */
65
65
  private callEmbeddingModelWithRetry;
66
+ /**
67
+ * Calls OpenAI compatible API to use a image generation model
68
+ */
69
+ callImageGenerationModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<ImagePromptResult>;
70
+ /**
71
+ * Internal method that handles parameter retry for image generation model calls
72
+ */
73
+ private callImageGenerationModelWithRetry;
66
74
  /**
67
75
  * Get the model that should be used as default
68
76
  */
@@ -89,6 +97,10 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
89
97
  * Default model for completion variant.
90
98
  */
91
99
  protected abstract getDefaultEmbeddingModel(): AvailableModel;
100
+ /**
101
+ * Default model for image generation variant.
102
+ */
103
+ protected abstract getDefaultImageGenerationModel(): AvailableModel;
92
104
  /**
93
105
  * Makes a request with retry logic for network errors like ECONNRESET
94
106
  */
@@ -35,4 +35,8 @@ export declare class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools
35
35
  * Default model for completion variant.
36
36
  */
37
37
  protected getDefaultEmbeddingModel(): AvailableModel;
38
+ /**
39
+ * Default model for image generation variant.
40
+ */
41
+ protected getDefaultImageGenerationModel(): AvailableModel;
38
42
  }
@@ -1,15 +1,11 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
3
  import type { Usage } from '../../execution/Usage';
4
- import type { string_markdown } from '../../types/typeAliases';
5
- import type { string_markdown_text } from '../../types/typeAliases';
6
- import type { string_model_name } from '../../types/typeAliases';
7
- import type { string_title } from '../../types/typeAliases';
4
+ import type { string_markdown, string_markdown_text, string_model_name, string_title } from '../../types/typeAliases';
8
5
  import { RemoteLlmExecutionTools } from '../remote/RemoteLlmExecutionTools';
9
6
  import { computeOpenAiUsage } from './computeOpenAiUsage';
10
7
  import { OpenAiCompatibleExecutionTools } from './OpenAiCompatibleExecutionTools';
11
- import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from './OpenAiCompatibleExecutionToolsOptions';
12
- import type { OpenAiCompatibleExecutionToolsOptions } from './OpenAiCompatibleExecutionToolsOptions';
8
+ import type { OpenAiCompatibleExecutionToolsNonProxiedOptions, OpenAiCompatibleExecutionToolsOptions } from './OpenAiCompatibleExecutionToolsOptions';
13
9
  /**
14
10
  * Execution Tools for calling OpenAI compatible API
15
11
  *
@@ -67,6 +63,10 @@ export declare class HardcodedOpenAiCompatibleExecutionTools extends OpenAiCompa
67
63
  * Default model for completion variant.
68
64
  */
69
65
  protected getDefaultEmbeddingModel(): AvailableModel;
66
+ /**
67
+ * Default model for image generation variant.
68
+ */
69
+ protected getDefaultImageGenerationModel(): AvailableModel;
70
70
  }
71
71
  /**
72
72
  * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
@@ -7,7 +7,7 @@ import type { number_model_temperature, number_seed, string_model_name, string_s
7
7
  * Note: [🚉] This is fully serializable as JSON
8
8
  * @see https://github.com/webgptorg/promptbook#model-requirements
9
9
  */
10
- export type ModelRequirements = CompletionModelRequirements | ChatModelRequirements | EmbeddingModelRequirements;
10
+ export type ModelRequirements = CompletionModelRequirements | ChatModelRequirements | ImageGenerationModelRequirements | EmbeddingModelRequirements;
11
11
  /**
12
12
  * Model requirements for the completion variant
13
13
  *
@@ -34,6 +34,17 @@ export type ChatModelRequirements = CommonModelRequirements & {
34
34
  */
35
35
  readonly systemMessage?: string_system_message;
36
36
  };
37
+ /**
38
+ * Model requirements for the image generation variant
39
+ *
40
+ * Note: [🚉] This is fully serializable as JSON
41
+ */
42
+ export type ImageGenerationModelRequirements = CommonModelRequirements & {
43
+ /**
44
+ * Image generation model variant
45
+ */
46
+ modelVariant: 'IMAGE_GENERATION';
47
+ };
37
48
  /**
38
49
  * Model requirements for the embedding variant
39
50
  *
@@ -59,6 +70,7 @@ export type CommonModelRequirements = {
59
70
  * There are 3 variants:
60
71
  * - **COMPLETION** - model that takes prompt and writes the rest of the text
61
72
  * - **CHAT** - model that takes prompt and previous messages and returns response
73
+ * - **IMAGE_GENERATION** - model that takes prompt and returns image
62
74
  * - **EMBEDDING** - model that takes prompt and returns embedding
63
75
  * <- [🤖]
64
76
  */
@@ -12,4 +12,4 @@ export type ModelVariant = TupleToUnion<typeof MODEL_VARIANTS>;
12
12
  * @see {@link ModelVariant}
13
13
  * @public exported from `@promptbook/core`
14
14
  */
15
- export declare const MODEL_VARIANTS: readonly ["COMPLETION", "CHAT", "EMBEDDING"];
15
+ export declare const MODEL_VARIANTS: readonly ["COMPLETION", "CHAT", "IMAGE_GENERATION", "EMBEDDING"];
@@ -4,6 +4,7 @@ import type { Expectations } from '../pipeline/PipelineJson/Expectations';
4
4
  import type { ChatModelRequirements } from './ModelRequirements';
5
5
  import type { CompletionModelRequirements } from './ModelRequirements';
6
6
  import type { EmbeddingModelRequirements } from './ModelRequirements';
7
+ import type { ImageGenerationModelRequirements } from './ModelRequirements';
7
8
  import type { ModelRequirements } from './ModelRequirements';
8
9
  import type { Parameters } from './typeAliases';
9
10
  import type { string_pipeline_url_with_task_hash } from './typeAliases';
@@ -17,7 +18,7 @@ import type { string_title } from './typeAliases';
17
18
  * Note: [🚉] This is fully serializable as JSON
18
19
  * @see https://github.com/webgptorg/promptbook#prompt
19
20
  */
20
- export type Prompt = CompletionPrompt | ChatPrompt | EmbeddingPrompt;
21
+ export type Prompt = CompletionPrompt | ChatPrompt | ImagePrompt | EmbeddingPrompt;
21
22
  /**
22
23
  * Completion prompt
23
24
  *
@@ -44,6 +45,17 @@ export type ChatPrompt = CommonPrompt & {
44
45
  */
45
46
  thread?: ChatMessage[];
46
47
  };
48
+ /**
49
+ * Image prompt
50
+ *
51
+ * Note: [🚉] This is fully serializable as JSON
52
+ */
53
+ export type ImagePrompt = CommonPrompt & {
54
+ /**
55
+ * Requirements for image generation model
56
+ */
57
+ modelRequirements: ImageGenerationModelRequirements;
58
+ };
47
59
  /**
48
60
  * Embedding prompt
49
61
  *
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.103.0-65`).
18
+ * It follows semantic versioning (e.g., `0.103.0-67`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/wizard",
3
- "version": "0.103.0-66",
3
+ "version": "0.103.0-68",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
@@ -95,7 +95,7 @@
95
95
  "module": "./esm/index.es.js",
96
96
  "typings": "./esm/typings/src/_packages/wizard.index.d.ts",
97
97
  "peerDependencies": {
98
- "@promptbook/core": "0.103.0-66"
98
+ "@promptbook/core": "0.103.0-68"
99
99
  },
100
100
  "dependencies": {
101
101
  "@ai-sdk/deepseek": "0.1.17",
package/umd/index.umd.js CHANGED
@@ -48,7 +48,7 @@
48
48
  * @generated
49
49
  * @see https://github.com/webgptorg/promptbook
50
50
  */
51
- const PROMPTBOOK_ENGINE_VERSION = '0.103.0-66';
51
+ const PROMPTBOOK_ENGINE_VERSION = '0.103.0-68';
52
52
  /**
53
53
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
54
54
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -3910,12 +3910,18 @@
3910
3910
  },
3911
3911
  },
3912
3912
  /**/
3913
- /*/
3914
- {
3915
- modelTitle: 'dall-e-3',
3916
- modelName: 'dall-e-3',
3917
- },
3918
- /**/
3913
+ /**/
3914
+ {
3915
+ modelVariant: 'IMAGE_GENERATION',
3916
+ modelTitle: 'dall-e-3',
3917
+ modelName: 'dall-e-3',
3918
+ modelDescription: 'DALL·E 3 is the latest version of the DALL·E art generation model. It understands significantly more nuance and detail than our previous systems, allowing you to easily translate your ideas into exceptionally accurate images.',
3919
+ pricing: {
3920
+ prompt: 0,
3921
+ output: 0.04,
3922
+ },
3923
+ },
3924
+ /**/
3919
3925
  /*/
3920
3926
  {
3921
3927
  modelTitle: 'whisper-1',
@@ -3934,12 +3940,18 @@
3934
3940
  },
3935
3941
  },
3936
3942
  /**/
3937
- /*/
3938
- {
3939
- modelTitle: 'dall-e-2',
3940
- modelName: 'dall-e-2',
3941
- },
3942
- /**/
3943
+ /**/
3944
+ {
3945
+ modelVariant: 'IMAGE_GENERATION',
3946
+ modelTitle: 'dall-e-2',
3947
+ modelName: 'dall-e-2',
3948
+ modelDescription: 'DALL·E 2 is an AI system that can create realistic images and art from a description in natural language.',
3949
+ pricing: {
3950
+ prompt: 0,
3951
+ output: 0.02,
3952
+ },
3953
+ },
3954
+ /**/
3943
3955
  /**/
3944
3956
  {
3945
3957
  modelVariant: 'CHAT',
@@ -6212,6 +6224,151 @@
6212
6224
  return this.callEmbeddingModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
6213
6225
  }
6214
6226
  }
6227
+ /**
6228
+ * Calls OpenAI compatible API to use a image generation model
6229
+ */
6230
+ async callImageGenerationModel(prompt) {
6231
+ // Deep clone prompt and modelRequirements to avoid mutation across calls
6232
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
6233
+ const retriedUnsupportedParameters = new Set();
6234
+ return this.callImageGenerationModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters);
6235
+ }
6236
+ /**
6237
+ * Internal method that handles parameter retry for image generation model calls
6238
+ */
6239
+ async callImageGenerationModelWithRetry(prompt, currentModelRequirements, attemptStack = [], retriedUnsupportedParameters = new Set()) {
6240
+ var _a, _b;
6241
+ if (this.options.isVerbose) {
6242
+ console.info(`🎨 ${this.title} callImageGenerationModel call`, { prompt, currentModelRequirements });
6243
+ }
6244
+ const { content, parameters } = prompt;
6245
+ const client = await this.getClient();
6246
+ // TODO: [☂] Use here more modelRequirements
6247
+ if (currentModelRequirements.modelVariant !== 'IMAGE_GENERATION') {
6248
+ throw new PipelineExecutionError('Use callImageGenerationModel only for IMAGE_GENERATION variant');
6249
+ }
6250
+ const modelName = currentModelRequirements.modelName || this.getDefaultImageGenerationModel().modelName;
6251
+ const modelSettings = {
6252
+ model: modelName,
6253
+ // size: currentModelRequirements.size,
6254
+ // quality: currentModelRequirements.quality,
6255
+ // style: currentModelRequirements.style,
6256
+ };
6257
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
6258
+ const rawRequest = {
6259
+ ...modelSettings,
6260
+ prompt: rawPromptContent,
6261
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
6262
+ response_format: 'url', // TODO: [🧠] Maybe allow b64_json
6263
+ };
6264
+ const start = $getCurrentDate();
6265
+ if (this.options.isVerbose) {
6266
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
6267
+ }
6268
+ try {
6269
+ const rawResponse = await this.limiter
6270
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.images.generate(rawRequest)))
6271
+ .catch((error) => {
6272
+ assertsError(error);
6273
+ if (this.options.isVerbose) {
6274
+ console.info(colors__default["default"].bgRed('error'), error);
6275
+ }
6276
+ throw error;
6277
+ });
6278
+ if (this.options.isVerbose) {
6279
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
6280
+ }
6281
+ const complete = $getCurrentDate();
6282
+ if (!rawResponse.data[0]) {
6283
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
6284
+ }
6285
+ if (rawResponse.data.length > 1) {
6286
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
6287
+ }
6288
+ const resultContent = rawResponse.data[0].url;
6289
+ const modelInfo = this.HARDCODED_MODELS.find((model) => model.modelName === modelName);
6290
+ const price = ((_b = modelInfo === null || modelInfo === void 0 ? void 0 : modelInfo.pricing) === null || _b === void 0 ? void 0 : _b.output) ? uncertainNumber(modelInfo.pricing.output) : uncertainNumber();
6291
+ return exportJson({
6292
+ name: 'promptResult',
6293
+ message: `Result of \`OpenAiCompatibleExecutionTools.callImageGenerationModel\``,
6294
+ order: [],
6295
+ value: {
6296
+ content: resultContent,
6297
+ modelName: modelName,
6298
+ timing: {
6299
+ start,
6300
+ complete,
6301
+ },
6302
+ usage: {
6303
+ price,
6304
+ input: {
6305
+ tokensCount: uncertainNumber(0),
6306
+ ...computeUsageCounts(rawPromptContent),
6307
+ },
6308
+ output: {
6309
+ tokensCount: uncertainNumber(0),
6310
+ ...computeUsageCounts(''),
6311
+ },
6312
+ },
6313
+ rawPromptContent,
6314
+ rawRequest,
6315
+ rawResponse,
6316
+ },
6317
+ });
6318
+ }
6319
+ catch (error) {
6320
+ assertsError(error);
6321
+ if (!isUnsupportedParameterError(error)) {
6322
+ if (attemptStack.length > 0) {
6323
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
6324
+ attemptStack
6325
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
6326
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
6327
+ `, Error: ${a.errorMessage}` +
6328
+ (a.stripped ? ' (stripped and retried)' : ''))
6329
+ .join('\n') +
6330
+ `\nFinal error: ${error.message}`);
6331
+ }
6332
+ throw error;
6333
+ }
6334
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
6335
+ if (!unsupportedParameter) {
6336
+ if (this.options.isVerbose) {
6337
+ console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
6338
+ }
6339
+ throw error;
6340
+ }
6341
+ const retryKey = `${modelName}-${unsupportedParameter}`;
6342
+ if (retriedUnsupportedParameters.has(retryKey)) {
6343
+ attemptStack.push({
6344
+ modelName,
6345
+ unsupportedParameter,
6346
+ errorMessage: error.message,
6347
+ stripped: true,
6348
+ });
6349
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
6350
+ attemptStack
6351
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
6352
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
6353
+ `, Error: ${a.errorMessage}` +
6354
+ (a.stripped ? ' (stripped and retried)' : ''))
6355
+ .join('\n') +
6356
+ `\nFinal error: ${error.message}`);
6357
+ }
6358
+ retriedUnsupportedParameters.add(retryKey);
6359
+ if (this.options.isVerbose) {
6360
+ console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
6361
+ }
6362
+ attemptStack.push({
6363
+ modelName,
6364
+ unsupportedParameter,
6365
+ errorMessage: error.message,
6366
+ stripped: true,
6367
+ });
6368
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
6369
+ return this.callImageGenerationModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
6370
+ }
6371
+ }
6215
6372
  // <- Note: [🤖] callXxxModel
6216
6373
  /**
6217
6374
  * Get the model that should be used as default
@@ -6635,6 +6792,13 @@
6635
6792
  return this.getDefaultModel('text-embedding-3-large'); // <- TODO: [🧠] Pick the best default model
6636
6793
  // <- TODO: [🛄]
6637
6794
  }
6795
+ /**
6796
+ * Default model for image generation variant.
6797
+ */
6798
+ getDefaultImageGenerationModel() {
6799
+ return this.getDefaultModel('!!!'); // <- TODO: [🧠] Pick the best default model
6800
+ // <- TODO: [🛄]
6801
+ }
6638
6802
  }
6639
6803
  /**
6640
6804
  * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
@@ -6870,6 +7034,12 @@
6870
7034
  getDefaultEmbeddingModel() {
6871
7035
  return this.getDefaultModel('text-embedding-3-large');
6872
7036
  }
7037
+ /**
7038
+ * Default model for image generation variant.
7039
+ */
7040
+ getDefaultImageGenerationModel() {
7041
+ return this.getDefaultModel('dall-e-3');
7042
+ }
6873
7043
  }
6874
7044
 
6875
7045
  /**
@@ -7440,6 +7610,13 @@
7440
7610
  getDefaultEmbeddingModel() {
7441
7611
  throw new PipelineExecutionError(`${this.title} does not support EMBEDDING model variant`);
7442
7612
  }
7613
+ /**
7614
+ * Default model for image generation variant.
7615
+ */
7616
+ getDefaultImageGenerationModel() {
7617
+ return this.getDefaultModel('!!!'); // <- TODO: [🧠] Pick the best default model
7618
+ // <- TODO: [🛄]
7619
+ }
7443
7620
  }
7444
7621
  /**
7445
7622
  * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
@@ -20869,7 +21046,7 @@
20869
21046
  * @see {@link ModelVariant}
20870
21047
  * @public exported from `@promptbook/core`
20871
21048
  */
20872
- const MODEL_VARIANTS = ['COMPLETION', 'CHAT', 'EMBEDDING' /* <- TODO [🏳] */ /* <- [🤖] */];
21049
+ const MODEL_VARIANTS = ['COMPLETION', 'CHAT', 'IMAGE_GENERATION', 'EMBEDDING' /* <- TODO [🏳] */ /* <- [🤖] */];
20873
21050
 
20874
21051
  /**
20875
21052
  * Parses the model command