@promptbook/core 0.103.0-66 → 0.103.0-68

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -45,6 +45,7 @@ import type { MessageButton } from '../book-components/Chat/utils/parseMessageBu
45
45
  import { parseMessageButtons } from '../book-components/Chat/utils/parseMessageButtons';
46
46
  import { ArrowIcon } from '../book-components/icons/ArrowIcon';
47
47
  import { AttachmentIcon } from '../book-components/icons/AttachmentIcon';
48
+ import { CameraIcon } from '../book-components/icons/CameraIcon';
48
49
  import { MicIcon } from '../book-components/icons/MicIcon';
49
50
  import { PauseIcon } from '../book-components/icons/PauseIcon';
50
51
  import { PlayIcon } from '../book-components/icons/PlayIcon';
@@ -105,6 +106,7 @@ export type { MessageButton };
105
106
  export { parseMessageButtons };
106
107
  export { ArrowIcon };
107
108
  export { AttachmentIcon };
109
+ export { CameraIcon };
108
110
  export { MicIcon };
109
111
  export { PauseIcon };
110
112
  export { PlayIcon };
@@ -88,6 +88,7 @@ import type { PromptbookFetch } from '../execution/PromptbookFetch';
88
88
  import type { PromptResult } from '../execution/PromptResult';
89
89
  import type { CompletionPromptResult } from '../execution/PromptResult';
90
90
  import type { ChatPromptResult } from '../execution/PromptResult';
91
+ import type { ImagePromptResult } from '../execution/PromptResult';
91
92
  import type { EmbeddingPromptResult } from '../execution/PromptResult';
92
93
  import type { ScriptExecutionTools } from '../execution/ScriptExecutionTools';
93
94
  import type { ScriptExecutionToolsExecuteOptions } from '../execution/ScriptExecutionTools';
@@ -187,6 +188,7 @@ import type { LlmCall } from '../types/LlmCall';
187
188
  import type { ModelRequirements } from '../types/ModelRequirements';
188
189
  import type { CompletionModelRequirements } from '../types/ModelRequirements';
189
190
  import type { ChatModelRequirements } from '../types/ModelRequirements';
191
+ import type { ImageGenerationModelRequirements } from '../types/ModelRequirements';
190
192
  import type { EmbeddingModelRequirements } from '../types/ModelRequirements';
191
193
  import type { ModelVariant } from '../types/ModelVariant';
192
194
  import type { NonEmptyArray } from '../types/NonEmptyArray';
@@ -194,6 +196,7 @@ import type { NonEmptyReadonlyArray } from '../types/NonEmptyArray';
194
196
  import type { Prompt } from '../types/Prompt';
195
197
  import type { CompletionPrompt } from '../types/Prompt';
196
198
  import type { ChatPrompt } from '../types/Prompt';
199
+ import type { ImagePrompt } from '../types/Prompt';
197
200
  import type { EmbeddingPrompt } from '../types/Prompt';
198
201
  import type { ScriptLanguage } from '../types/ScriptLanguage';
199
202
  import type { SectionType } from '../types/SectionType';
@@ -449,6 +452,7 @@ export type { PromptbookFetch };
449
452
  export type { PromptResult };
450
453
  export type { CompletionPromptResult };
451
454
  export type { ChatPromptResult };
455
+ export type { ImagePromptResult };
452
456
  export type { EmbeddingPromptResult };
453
457
  export type { ScriptExecutionTools };
454
458
  export type { ScriptExecutionToolsExecuteOptions };
@@ -548,6 +552,7 @@ export type { LlmCall };
548
552
  export type { ModelRequirements };
549
553
  export type { CompletionModelRequirements };
550
554
  export type { ChatModelRequirements };
555
+ export type { ImageGenerationModelRequirements };
551
556
  export type { EmbeddingModelRequirements };
552
557
  export type { ModelVariant };
553
558
  export type { NonEmptyArray };
@@ -555,6 +560,7 @@ export type { NonEmptyReadonlyArray };
555
560
  export type { Prompt };
556
561
  export type { CompletionPrompt };
557
562
  export type { ChatPrompt };
563
+ export type { ImagePrompt };
558
564
  export type { EmbeddingPrompt };
559
565
  export type { ScriptLanguage };
560
566
  export type { SectionType };
@@ -81,6 +81,16 @@ export type BookEditorProps = {
81
81
  */
82
82
  readonly readonlyMessage?: string;
83
83
  };
84
+ /**
85
+ * If true, shows the upload button in the action bar.
86
+ * By default, the upload button is shown.
87
+ */
88
+ readonly isUploadButtonShown?: boolean;
89
+ /**
90
+ * If true, shows the camera button in the action bar.
91
+ * By default, the camera button is shown on mobile devices.
92
+ */
93
+ readonly isCameraButtonShown?: boolean;
84
94
  /**
85
95
  * If true, shows the download button in the action bar.
86
96
  * By default, the download button is shown.
@@ -1,9 +1,13 @@
1
1
  type BookEditorActionbarProps = {
2
2
  value: string | undefined;
3
3
  isDownloadButtonShown?: boolean;
4
+ isUploadButtonShown?: boolean;
5
+ isCameraButtonShown?: boolean;
4
6
  isAboutButtonShown?: boolean;
5
7
  isFullscreenButtonShown?: boolean;
6
8
  onFullscreenClick?: () => void;
9
+ onUploadDocument?: () => void;
10
+ onTakePhoto?: () => void;
7
11
  isFullscreen?: boolean;
8
12
  };
9
13
  /**
@@ -0,0 +1,11 @@
1
+ type CameraIconProps = {
2
+ size?: number;
3
+ color?: string;
4
+ };
5
+ /**
6
+ * @@@
7
+ *
8
+ * @public exported from `@promptbook/components`
9
+ */
10
+ export declare function CameraIcon({ size, color }: CameraIconProps): import("react/jsx-runtime").JSX.Element;
11
+ export {};
@@ -3,7 +3,7 @@ import type { ChatParticipant } from '../book-components/Chat/types/ChatParticip
3
3
  import type { Prompt } from '../types/Prompt';
4
4
  import type { string_markdown, string_markdown_text, string_title } from '../types/typeAliases';
5
5
  import type { AvailableModel } from './AvailableModel';
6
- import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from './PromptResult';
6
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, ImagePromptResult } from './PromptResult';
7
7
  /**
8
8
  * Container for all the tools needed to execute prompts to large language models like GPT-4
9
9
  * On its interface it exposes common methods for prompt execution.
@@ -70,6 +70,10 @@ export type LlmExecutionTools = {
70
70
  * Calls a completion model
71
71
  */
72
72
  callCompletionModel?(prompt: Prompt): Promise<CompletionPromptResult>;
73
+ /**
74
+ * Calls a image generation model
75
+ */
76
+ callImageGenerationModel?(prompt: Prompt): Promise<ImagePromptResult>;
73
77
  /**
74
78
  * Calls an embedding model
75
79
  */
@@ -8,7 +8,7 @@ import type { Usage } from './Usage';
8
8
  *
9
9
  * @see https://github.com/webgptorg/promptbook#prompt-result
10
10
  */
11
- export type PromptResult = CompletionPromptResult | ChatPromptResult | EmbeddingPromptResult;
11
+ export type PromptResult = CompletionPromptResult | ChatPromptResult | ImagePromptResult | EmbeddingPromptResult;
12
12
  /**
13
13
  * Completion prompt result
14
14
  *
@@ -22,6 +22,12 @@ export type CompletionPromptResult = CommonPromptResult;
22
22
  * Note: [🚉] This is fully serializable as JSON
23
23
  */
24
24
  export type ChatPromptResult = CommonPromptResult & {};
25
+ /**
26
+ * Image prompt result
27
+ *
28
+ * Note: [🚉] This is fully serializable as JSON
29
+ */
30
+ export type ImagePromptResult = CommonPromptResult;
25
31
  /**
26
32
  * Embedding prompt result
27
33
  *
@@ -38,6 +38,10 @@ export declare class OllamaExecutionTools extends OpenAiCompatibleExecutionTools
38
38
  * Default model for completion variant.
39
39
  */
40
40
  protected getDefaultEmbeddingModel(): AvailableModel;
41
+ /**
42
+ * Default model for image generation variant.
43
+ */
44
+ protected getDefaultImageGenerationModel(): AvailableModel;
41
45
  }
42
46
  /**
43
47
  * TODO: [🛄] Some way how to re-wrap the errors from `OpenAiCompatibleExecutionTools`
@@ -1,7 +1,7 @@
1
1
  import OpenAI from 'openai';
2
2
  import type { AvailableModel } from '../../execution/AvailableModel';
3
3
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
4
- import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult } from '../../execution/PromptResult';
4
+ import type { ChatPromptResult, CompletionPromptResult, EmbeddingPromptResult, ImagePromptResult } from '../../execution/PromptResult';
5
5
  import type { Usage } from '../../execution/Usage';
6
6
  import type { Prompt } from '../../types/Prompt';
7
7
  import type { string_markdown, string_markdown_text, string_model_name, string_title } from '../../types/typeAliases';
@@ -63,6 +63,14 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
63
63
  * Internal method that handles parameter retry for embedding model calls
64
64
  */
65
65
  private callEmbeddingModelWithRetry;
66
+ /**
67
+ * Calls OpenAI compatible API to use a image generation model
68
+ */
69
+ callImageGenerationModel(prompt: Pick<Prompt, 'content' | 'parameters' | 'modelRequirements'>): Promise<ImagePromptResult>;
70
+ /**
71
+ * Internal method that handles parameter retry for image generation model calls
72
+ */
73
+ private callImageGenerationModelWithRetry;
66
74
  /**
67
75
  * Get the model that should be used as default
68
76
  */
@@ -89,6 +97,10 @@ export declare abstract class OpenAiCompatibleExecutionTools implements LlmExecu
89
97
  * Default model for completion variant.
90
98
  */
91
99
  protected abstract getDefaultEmbeddingModel(): AvailableModel;
100
+ /**
101
+ * Default model for image generation variant.
102
+ */
103
+ protected abstract getDefaultImageGenerationModel(): AvailableModel;
92
104
  /**
93
105
  * Makes a request with retry logic for network errors like ECONNRESET
94
106
  */
@@ -35,4 +35,8 @@ export declare class OpenAiExecutionTools extends OpenAiCompatibleExecutionTools
35
35
  * Default model for completion variant.
36
36
  */
37
37
  protected getDefaultEmbeddingModel(): AvailableModel;
38
+ /**
39
+ * Default model for image generation variant.
40
+ */
41
+ protected getDefaultImageGenerationModel(): AvailableModel;
38
42
  }
@@ -1,15 +1,11 @@
1
1
  import type { AvailableModel } from '../../execution/AvailableModel';
2
2
  import type { LlmExecutionTools } from '../../execution/LlmExecutionTools';
3
3
  import type { Usage } from '../../execution/Usage';
4
- import type { string_markdown } from '../../types/typeAliases';
5
- import type { string_markdown_text } from '../../types/typeAliases';
6
- import type { string_model_name } from '../../types/typeAliases';
7
- import type { string_title } from '../../types/typeAliases';
4
+ import type { string_markdown, string_markdown_text, string_model_name, string_title } from '../../types/typeAliases';
8
5
  import { RemoteLlmExecutionTools } from '../remote/RemoteLlmExecutionTools';
9
6
  import { computeOpenAiUsage } from './computeOpenAiUsage';
10
7
  import { OpenAiCompatibleExecutionTools } from './OpenAiCompatibleExecutionTools';
11
- import type { OpenAiCompatibleExecutionToolsNonProxiedOptions } from './OpenAiCompatibleExecutionToolsOptions';
12
- import type { OpenAiCompatibleExecutionToolsOptions } from './OpenAiCompatibleExecutionToolsOptions';
8
+ import type { OpenAiCompatibleExecutionToolsNonProxiedOptions, OpenAiCompatibleExecutionToolsOptions } from './OpenAiCompatibleExecutionToolsOptions';
13
9
  /**
14
10
  * Execution Tools for calling OpenAI compatible API
15
11
  *
@@ -67,6 +63,10 @@ export declare class HardcodedOpenAiCompatibleExecutionTools extends OpenAiCompa
67
63
  * Default model for completion variant.
68
64
  */
69
65
  protected getDefaultEmbeddingModel(): AvailableModel;
66
+ /**
67
+ * Default model for image generation variant.
68
+ */
69
+ protected getDefaultImageGenerationModel(): AvailableModel;
70
70
  }
71
71
  /**
72
72
  * TODO: [🦺] Is there some way how to put `packageName` and `className` on top and function definition on bottom?
@@ -7,7 +7,7 @@ import type { number_model_temperature, number_seed, string_model_name, string_s
7
7
  * Note: [🚉] This is fully serializable as JSON
8
8
  * @see https://github.com/webgptorg/promptbook#model-requirements
9
9
  */
10
- export type ModelRequirements = CompletionModelRequirements | ChatModelRequirements | EmbeddingModelRequirements;
10
+ export type ModelRequirements = CompletionModelRequirements | ChatModelRequirements | ImageGenerationModelRequirements | EmbeddingModelRequirements;
11
11
  /**
12
12
  * Model requirements for the completion variant
13
13
  *
@@ -34,6 +34,17 @@ export type ChatModelRequirements = CommonModelRequirements & {
34
34
  */
35
35
  readonly systemMessage?: string_system_message;
36
36
  };
37
+ /**
38
+ * Model requirements for the image generation variant
39
+ *
40
+ * Note: [🚉] This is fully serializable as JSON
41
+ */
42
+ export type ImageGenerationModelRequirements = CommonModelRequirements & {
43
+ /**
44
+ * Image generation model variant
45
+ */
46
+ modelVariant: 'IMAGE_GENERATION';
47
+ };
37
48
  /**
38
49
  * Model requirements for the embedding variant
39
50
  *
@@ -59,6 +70,7 @@ export type CommonModelRequirements = {
59
70
  * There are 3 variants:
60
71
  * - **COMPLETION** - model that takes prompt and writes the rest of the text
61
72
  * - **CHAT** - model that takes prompt and previous messages and returns response
73
+ * - **IMAGE_GENERATION** - model that takes prompt and returns image
62
74
  * - **EMBEDDING** - model that takes prompt and returns embedding
63
75
  * <- [🤖]
64
76
  */
@@ -12,4 +12,4 @@ export type ModelVariant = TupleToUnion<typeof MODEL_VARIANTS>;
12
12
  * @see {@link ModelVariant}
13
13
  * @public exported from `@promptbook/core`
14
14
  */
15
- export declare const MODEL_VARIANTS: readonly ["COMPLETION", "CHAT", "EMBEDDING"];
15
+ export declare const MODEL_VARIANTS: readonly ["COMPLETION", "CHAT", "IMAGE_GENERATION", "EMBEDDING"];
@@ -4,6 +4,7 @@ import type { Expectations } from '../pipeline/PipelineJson/Expectations';
4
4
  import type { ChatModelRequirements } from './ModelRequirements';
5
5
  import type { CompletionModelRequirements } from './ModelRequirements';
6
6
  import type { EmbeddingModelRequirements } from './ModelRequirements';
7
+ import type { ImageGenerationModelRequirements } from './ModelRequirements';
7
8
  import type { ModelRequirements } from './ModelRequirements';
8
9
  import type { Parameters } from './typeAliases';
9
10
  import type { string_pipeline_url_with_task_hash } from './typeAliases';
@@ -17,7 +18,7 @@ import type { string_title } from './typeAliases';
17
18
  * Note: [🚉] This is fully serializable as JSON
18
19
  * @see https://github.com/webgptorg/promptbook#prompt
19
20
  */
20
- export type Prompt = CompletionPrompt | ChatPrompt | EmbeddingPrompt;
21
+ export type Prompt = CompletionPrompt | ChatPrompt | ImagePrompt | EmbeddingPrompt;
21
22
  /**
22
23
  * Completion prompt
23
24
  *
@@ -44,6 +45,17 @@ export type ChatPrompt = CommonPrompt & {
44
45
  */
45
46
  thread?: ChatMessage[];
46
47
  };
48
+ /**
49
+ * Image prompt
50
+ *
51
+ * Note: [🚉] This is fully serializable as JSON
52
+ */
53
+ export type ImagePrompt = CommonPrompt & {
54
+ /**
55
+ * Requirements for image generation model
56
+ */
57
+ modelRequirements: ImageGenerationModelRequirements;
58
+ };
47
59
  /**
48
60
  * Embedding prompt
49
61
  *
@@ -15,7 +15,7 @@ export declare const BOOK_LANGUAGE_VERSION: string_semantic_version;
15
15
  export declare const PROMPTBOOK_ENGINE_VERSION: string_promptbook_version;
16
16
  /**
17
17
  * Represents the version string of the Promptbook engine.
18
- * It follows semantic versioning (e.g., `0.103.0-65`).
18
+ * It follows semantic versioning (e.g., `0.103.0-67`).
19
19
  *
20
20
  * @generated
21
21
  */
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@promptbook/core",
3
- "version": "0.103.0-66",
3
+ "version": "0.103.0-68",
4
4
  "description": "Promptbook: Turn your company's scattered knowledge into AI ready books",
5
5
  "private": false,
6
6
  "sideEffects": false,
package/umd/index.umd.js CHANGED
@@ -28,7 +28,7 @@
28
28
  * @generated
29
29
  * @see https://github.com/webgptorg/promptbook
30
30
  */
31
- const PROMPTBOOK_ENGINE_VERSION = '0.103.0-66';
31
+ const PROMPTBOOK_ENGINE_VERSION = '0.103.0-68';
32
32
  /**
33
33
  * TODO: string_promptbook_version should be constrained to the all versions of Promptbook engine
34
34
  * Note: [💞] Ignore a discrepancy between file name and entity name
@@ -13809,7 +13809,7 @@
13809
13809
  * @see {@link ModelVariant}
13810
13810
  * @public exported from `@promptbook/core`
13811
13811
  */
13812
- const MODEL_VARIANTS = ['COMPLETION', 'CHAT', 'EMBEDDING' /* <- TODO [🏳] */ /* <- [🤖] */];
13812
+ const MODEL_VARIANTS = ['COMPLETION', 'CHAT', 'IMAGE_GENERATION', 'EMBEDDING' /* <- TODO [🏳] */ /* <- [🤖] */];
13813
13813
 
13814
13814
  /**
13815
13815
  * Parses the model command
@@ -17181,12 +17181,18 @@
17181
17181
  },
17182
17182
  },
17183
17183
  /**/
17184
- /*/
17185
- {
17186
- modelTitle: 'dall-e-3',
17187
- modelName: 'dall-e-3',
17188
- },
17189
- /**/
17184
+ /**/
17185
+ {
17186
+ modelVariant: 'IMAGE_GENERATION',
17187
+ modelTitle: 'dall-e-3',
17188
+ modelName: 'dall-e-3',
17189
+ modelDescription: 'DALL·E 3 is the latest version of the DALL·E art generation model. It understands significantly more nuance and detail than our previous systems, allowing you to easily translate your ideas into exceptionally accurate images.',
17190
+ pricing: {
17191
+ prompt: 0,
17192
+ output: 0.04,
17193
+ },
17194
+ },
17195
+ /**/
17190
17196
  /*/
17191
17197
  {
17192
17198
  modelTitle: 'whisper-1',
@@ -17205,12 +17211,18 @@
17205
17211
  },
17206
17212
  },
17207
17213
  /**/
17208
- /*/
17209
- {
17210
- modelTitle: 'dall-e-2',
17211
- modelName: 'dall-e-2',
17212
- },
17213
- /**/
17214
+ /**/
17215
+ {
17216
+ modelVariant: 'IMAGE_GENERATION',
17217
+ modelTitle: 'dall-e-2',
17218
+ modelName: 'dall-e-2',
17219
+ modelDescription: 'DALL·E 2 is an AI system that can create realistic images and art from a description in natural language.',
17220
+ pricing: {
17221
+ prompt: 0,
17222
+ output: 0.02,
17223
+ },
17224
+ },
17225
+ /**/
17214
17226
  /**/
17215
17227
  {
17216
17228
  modelVariant: 'CHAT',
@@ -18244,6 +18256,151 @@
18244
18256
  return this.callEmbeddingModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
18245
18257
  }
18246
18258
  }
18259
+ /**
18260
+ * Calls OpenAI compatible API to use a image generation model
18261
+ */
18262
+ async callImageGenerationModel(prompt) {
18263
+ // Deep clone prompt and modelRequirements to avoid mutation across calls
18264
+ const clonedPrompt = JSON.parse(JSON.stringify(prompt));
18265
+ const retriedUnsupportedParameters = new Set();
18266
+ return this.callImageGenerationModelWithRetry(clonedPrompt, clonedPrompt.modelRequirements, [], retriedUnsupportedParameters);
18267
+ }
18268
+ /**
18269
+ * Internal method that handles parameter retry for image generation model calls
18270
+ */
18271
+ async callImageGenerationModelWithRetry(prompt, currentModelRequirements, attemptStack = [], retriedUnsupportedParameters = new Set()) {
18272
+ var _a, _b;
18273
+ if (this.options.isVerbose) {
18274
+ console.info(`🎨 ${this.title} callImageGenerationModel call`, { prompt, currentModelRequirements });
18275
+ }
18276
+ const { content, parameters } = prompt;
18277
+ const client = await this.getClient();
18278
+ // TODO: [☂] Use here more modelRequirements
18279
+ if (currentModelRequirements.modelVariant !== 'IMAGE_GENERATION') {
18280
+ throw new PipelineExecutionError('Use callImageGenerationModel only for IMAGE_GENERATION variant');
18281
+ }
18282
+ const modelName = currentModelRequirements.modelName || this.getDefaultImageGenerationModel().modelName;
18283
+ const modelSettings = {
18284
+ model: modelName,
18285
+ // size: currentModelRequirements.size,
18286
+ // quality: currentModelRequirements.quality,
18287
+ // style: currentModelRequirements.style,
18288
+ };
18289
+ const rawPromptContent = templateParameters(content, { ...parameters, modelName });
18290
+ const rawRequest = {
18291
+ ...modelSettings,
18292
+ prompt: rawPromptContent,
18293
+ user: (_a = this.options.userId) === null || _a === void 0 ? void 0 : _a.toString(),
18294
+ response_format: 'url', // TODO: [🧠] Maybe allow b64_json
18295
+ };
18296
+ const start = $getCurrentDate();
18297
+ if (this.options.isVerbose) {
18298
+ console.info(colors__default["default"].bgWhite('rawRequest'), JSON.stringify(rawRequest, null, 4));
18299
+ }
18300
+ try {
18301
+ const rawResponse = await this.limiter
18302
+ .schedule(() => this.makeRequestWithNetworkRetry(() => client.images.generate(rawRequest)))
18303
+ .catch((error) => {
18304
+ assertsError(error);
18305
+ if (this.options.isVerbose) {
18306
+ console.info(colors__default["default"].bgRed('error'), error);
18307
+ }
18308
+ throw error;
18309
+ });
18310
+ if (this.options.isVerbose) {
18311
+ console.info(colors__default["default"].bgWhite('rawResponse'), JSON.stringify(rawResponse, null, 4));
18312
+ }
18313
+ const complete = $getCurrentDate();
18314
+ if (!rawResponse.data[0]) {
18315
+ throw new PipelineExecutionError(`No choises from ${this.title}`);
18316
+ }
18317
+ if (rawResponse.data.length > 1) {
18318
+ throw new PipelineExecutionError(`More than one choise from ${this.title}`);
18319
+ }
18320
+ const resultContent = rawResponse.data[0].url;
18321
+ const modelInfo = this.HARDCODED_MODELS.find((model) => model.modelName === modelName);
18322
+ const price = ((_b = modelInfo === null || modelInfo === void 0 ? void 0 : modelInfo.pricing) === null || _b === void 0 ? void 0 : _b.output) ? uncertainNumber(modelInfo.pricing.output) : uncertainNumber();
18323
+ return exportJson({
18324
+ name: 'promptResult',
18325
+ message: `Result of \`OpenAiCompatibleExecutionTools.callImageGenerationModel\``,
18326
+ order: [],
18327
+ value: {
18328
+ content: resultContent,
18329
+ modelName: modelName,
18330
+ timing: {
18331
+ start,
18332
+ complete,
18333
+ },
18334
+ usage: {
18335
+ price,
18336
+ input: {
18337
+ tokensCount: uncertainNumber(0),
18338
+ ...computeUsageCounts(rawPromptContent),
18339
+ },
18340
+ output: {
18341
+ tokensCount: uncertainNumber(0),
18342
+ ...computeUsageCounts(''),
18343
+ },
18344
+ },
18345
+ rawPromptContent,
18346
+ rawRequest,
18347
+ rawResponse,
18348
+ },
18349
+ });
18350
+ }
18351
+ catch (error) {
18352
+ assertsError(error);
18353
+ if (!isUnsupportedParameterError(error)) {
18354
+ if (attemptStack.length > 0) {
18355
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
18356
+ attemptStack
18357
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
18358
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
18359
+ `, Error: ${a.errorMessage}` +
18360
+ (a.stripped ? ' (stripped and retried)' : ''))
18361
+ .join('\n') +
18362
+ `\nFinal error: ${error.message}`);
18363
+ }
18364
+ throw error;
18365
+ }
18366
+ const unsupportedParameter = parseUnsupportedParameterError(error.message);
18367
+ if (!unsupportedParameter) {
18368
+ if (this.options.isVerbose) {
18369
+ console.warn(colors__default["default"].bgYellow('Warning'), 'Could not parse unsupported parameter from error:', error.message);
18370
+ }
18371
+ throw error;
18372
+ }
18373
+ const retryKey = `${modelName}-${unsupportedParameter}`;
18374
+ if (retriedUnsupportedParameters.has(retryKey)) {
18375
+ attemptStack.push({
18376
+ modelName,
18377
+ unsupportedParameter,
18378
+ errorMessage: error.message,
18379
+ stripped: true,
18380
+ });
18381
+ throw new PipelineExecutionError(`All attempts failed. Attempt history:\n` +
18382
+ attemptStack
18383
+ .map((a, i) => ` ${i + 1}. Model: ${a.modelName}` +
18384
+ (a.unsupportedParameter ? `, Stripped: ${a.unsupportedParameter}` : '') +
18385
+ `, Error: ${a.errorMessage}` +
18386
+ (a.stripped ? ' (stripped and retried)' : ''))
18387
+ .join('\n') +
18388
+ `\nFinal error: ${error.message}`);
18389
+ }
18390
+ retriedUnsupportedParameters.add(retryKey);
18391
+ if (this.options.isVerbose) {
18392
+ console.warn(colors__default["default"].bgYellow('Warning'), `Removing unsupported parameter '${unsupportedParameter}' for model '${modelName}' and retrying request`);
18393
+ }
18394
+ attemptStack.push({
18395
+ modelName,
18396
+ unsupportedParameter,
18397
+ errorMessage: error.message,
18398
+ stripped: true,
18399
+ });
18400
+ const modifiedModelRequirements = removeUnsupportedModelRequirement(currentModelRequirements, unsupportedParameter);
18401
+ return this.callImageGenerationModelWithRetry(prompt, modifiedModelRequirements, attemptStack, retriedUnsupportedParameters);
18402
+ }
18403
+ }
18247
18404
  // <- Note: [🤖] callXxxModel
18248
18405
  /**
18249
18406
  * Get the model that should be used as default
@@ -18413,6 +18570,12 @@
18413
18570
  getDefaultEmbeddingModel() {
18414
18571
  return this.getDefaultModel('text-embedding-3-large');
18415
18572
  }
18573
+ /**
18574
+ * Default model for image generation variant.
18575
+ */
18576
+ getDefaultImageGenerationModel() {
18577
+ return this.getDefaultModel('dall-e-3');
18578
+ }
18416
18579
  }
18417
18580
 
18418
18581
  /**