modelfusion 0.101.0 → 0.102.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. package/CHANGELOG.md +21 -0
  2. package/model-function/generate-image/ImageGenerationModel.d.ts +12 -2
  3. package/model-function/generate-image/PromptTemplateImageGenerationModel.cjs +3 -3
  4. package/model-function/generate-image/PromptTemplateImageGenerationModel.d.ts +2 -2
  5. package/model-function/generate-image/PromptTemplateImageGenerationModel.js +3 -3
  6. package/model-function/generate-image/generateImage.cjs +9 -7
  7. package/model-function/generate-image/generateImage.d.ts +2 -0
  8. package/model-function/generate-image/generateImage.js +9 -7
  9. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +7 -5
  10. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +16 -2
  11. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +7 -5
  12. package/model-provider/automatic1111/Automatic1111ImageGenerationPrompt.d.ts +0 -1
  13. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +6 -6
  14. package/model-provider/openai/OpenAIImageGenerationModel.cjs +25 -31
  15. package/model-provider/openai/OpenAIImageGenerationModel.d.ts +2 -3
  16. package/model-provider/openai/OpenAIImageGenerationModel.js +25 -31
  17. package/model-provider/stability/StabilityApiConfiguration.cjs +12 -5
  18. package/model-provider/stability/StabilityApiConfiguration.d.ts +7 -8
  19. package/model-provider/stability/StabilityApiConfiguration.js +12 -5
  20. package/model-provider/stability/StabilityError.cjs +7 -31
  21. package/model-provider/stability/StabilityError.d.ts +2 -11
  22. package/model-provider/stability/StabilityError.js +6 -28
  23. package/model-provider/stability/StabilityFacade.cjs +11 -3
  24. package/model-provider/stability/StabilityFacade.d.ts +10 -2
  25. package/model-provider/stability/StabilityFacade.js +9 -2
  26. package/model-provider/stability/StabilityImageGenerationModel.cjs +39 -50
  27. package/model-provider/stability/StabilityImageGenerationModel.d.ts +37 -22
  28. package/model-provider/stability/StabilityImageGenerationModel.js +39 -50
  29. package/model-provider/stability/index.cjs +1 -3
  30. package/model-provider/stability/index.d.ts +1 -1
  31. package/model-provider/stability/index.js +0 -1
  32. package/package.json +1 -1
package/CHANGELOG.md CHANGED
@@ -1,5 +1,26 @@
1
1
  # Changelog
2
2
 
3
+ ## v0.102.0 - 2023-12-22
4
+
5
+ ### Added
6
+
7
+ - You can specify `numberOfGenerations` on image generation models and create multiple images by using the `fullResponse: true` option. Example:
8
+
9
+ ```ts
10
+ // generate 2 images:
11
+ const { images } = await generateImage(
12
+ openai.ImageGenerator({
13
+ model: "dall-e-3",
14
+ numberOfGenerations: 2,
15
+ size: "1024x1024",
16
+ }),
17
+ "the wicked witch of the west in the style of early 19th century painting",
18
+ { fullResponse: true }
19
+ );
20
+ ```
21
+
22
+ - **breaking change**: Image generation models use a generalized `numberOfGenerations` parameter (instead of model specific parameters) to specify the number of generations.
23
+
3
24
  ## v0.101.0 - 2023-12-22
4
25
 
5
26
  ### Changed
@@ -2,11 +2,21 @@ import { FunctionOptions } from "../../core/FunctionOptions.js";
2
2
  import { Model, ModelSettings } from "../Model.js";
3
3
  import { PromptTemplate } from "../PromptTemplate.js";
4
4
  export interface ImageGenerationModelSettings extends ModelSettings {
5
+ /**
6
+ * Number of images to generate.
7
+ *
8
+ * Specifies the number of images the model should generate for a given prompt.
9
+ *
10
+ * Does nothing if the model does not support this setting.
11
+ *
12
+ * Example: `numberOfGenerations: 2` // The model will produce 2 images
13
+ */
14
+ numberOfGenerations?: number;
5
15
  }
6
16
  export interface ImageGenerationModel<PROMPT, SETTINGS extends ImageGenerationModelSettings = ImageGenerationModelSettings> extends Model<SETTINGS> {
7
- doGenerateImage(prompt: PROMPT, options?: FunctionOptions): PromiseLike<{
17
+ doGenerateImages(prompt: PROMPT, options?: FunctionOptions): PromiseLike<{
8
18
  response: unknown;
9
- base64Image: string;
19
+ base64Images: string[];
10
20
  }>;
11
21
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: PromptTemplate<INPUT_PROMPT, PROMPT>): ImageGenerationModel<INPUT_PROMPT, SETTINGS>;
12
22
  }
@@ -24,15 +24,15 @@ class PromptTemplateImageGenerationModel {
24
24
  get settings() {
25
25
  return this.model.settings;
26
26
  }
27
- doGenerateImage(prompt, options) {
27
+ doGenerateImages(prompt, options) {
28
28
  const mappedPrompt = this.promptTemplate.format(prompt);
29
- return this.model.doGenerateImage(mappedPrompt, options);
29
+ return this.model.doGenerateImages(mappedPrompt, options);
30
30
  }
31
31
  get settingsForEvent() {
32
32
  return this.model.settingsForEvent;
33
33
  }
34
34
  withPromptTemplate(promptTemplate) {
35
- return new PromptTemplateImageGenerationModel({ model: this, promptTemplate: promptTemplate });
35
+ return new PromptTemplateImageGenerationModel({ model: this, promptTemplate });
36
36
  }
37
37
  withSettings(additionalSettings) {
38
38
  return new PromptTemplateImageGenerationModel({
@@ -10,9 +10,9 @@ export declare class PromptTemplateImageGenerationModel<PROMPT, MODEL_PROMPT, SE
10
10
  });
11
11
  get modelInformation(): import("../ModelInformation.js").ModelInformation;
12
12
  get settings(): SETTINGS;
13
- doGenerateImage(prompt: PROMPT, options?: FunctionOptions): PromiseLike<{
13
+ doGenerateImages(prompt: PROMPT, options?: FunctionOptions): PromiseLike<{
14
14
  response: unknown;
15
- base64Image: string;
15
+ base64Images: string[];
16
16
  }>;
17
17
  get settingsForEvent(): Partial<SETTINGS>;
18
18
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: PromptTemplate<INPUT_PROMPT, PROMPT>): PromptTemplateImageGenerationModel<INPUT_PROMPT, PROMPT, SETTINGS, this>;
@@ -21,15 +21,15 @@ export class PromptTemplateImageGenerationModel {
21
21
  get settings() {
22
22
  return this.model.settings;
23
23
  }
24
- doGenerateImage(prompt, options) {
24
+ doGenerateImages(prompt, options) {
25
25
  const mappedPrompt = this.promptTemplate.format(prompt);
26
- return this.model.doGenerateImage(mappedPrompt, options);
26
+ return this.model.doGenerateImages(mappedPrompt, options);
27
27
  }
28
28
  get settingsForEvent() {
29
29
  return this.model.settingsForEvent;
30
30
  }
31
31
  withPromptTemplate(promptTemplate) {
32
- return new PromptTemplateImageGenerationModel({ model: this, promptTemplate: promptTemplate });
32
+ return new PromptTemplateImageGenerationModel({ model: this, promptTemplate });
33
33
  }
34
34
  withSettings(additionalSettings) {
35
35
  return new PromptTemplateImageGenerationModel({
@@ -9,22 +9,24 @@ async function generateImage(model, prompt, options) {
9
9
  model,
10
10
  options,
11
11
  generateResponse: async (options) => {
12
- const result = await model.doGenerateImage(prompt, options);
12
+ const result = await model.doGenerateImages(prompt, options);
13
13
  return {
14
14
  response: result.response,
15
- extractedValue: result.base64Image,
15
+ extractedValue: result.base64Images,
16
16
  };
17
17
  },
18
18
  });
19
- const imageBase64 = fullResponse.value;
20
- const image = Buffer.from(imageBase64, "base64");
19
+ const imagesBase64 = fullResponse.value;
20
+ const images = imagesBase64.map((imageBase64) => Buffer.from(imageBase64, "base64"));
21
21
  return options?.fullResponse
22
22
  ? {
23
- image,
24
- imageBase64,
23
+ image: images[0],
24
+ imageBase64: imagesBase64[0],
25
+ images,
26
+ imagesBase64,
25
27
  response: fullResponse.response,
26
28
  metadata: fullResponse.metadata,
27
29
  }
28
- : image;
30
+ : images[0];
29
31
  }
30
32
  exports.generateImage = generateImage;
@@ -34,6 +34,8 @@ export declare function generateImage<PROMPT>(model: ImageGenerationModel<PROMPT
34
34
  }): Promise<{
35
35
  image: Buffer;
36
36
  imageBase64: string;
37
+ images: Buffer[];
38
+ imagesBase64: string[];
37
39
  response: unknown;
38
40
  metadata: ModelCallMetadata;
39
41
  }>;
@@ -6,21 +6,23 @@ export async function generateImage(model, prompt, options) {
6
6
  model,
7
7
  options,
8
8
  generateResponse: async (options) => {
9
- const result = await model.doGenerateImage(prompt, options);
9
+ const result = await model.doGenerateImages(prompt, options);
10
10
  return {
11
11
  response: result.response,
12
- extractedValue: result.base64Image,
12
+ extractedValue: result.base64Images,
13
13
  };
14
14
  },
15
15
  });
16
- const imageBase64 = fullResponse.value;
17
- const image = Buffer.from(imageBase64, "base64");
16
+ const imagesBase64 = fullResponse.value;
17
+ const images = imagesBase64.map((imageBase64) => Buffer.from(imageBase64, "base64"));
18
18
  return options?.fullResponse
19
19
  ? {
20
- image,
21
- imageBase64,
20
+ image: images[0],
21
+ imageBase64: imagesBase64[0],
22
+ images,
23
+ imagesBase64,
22
24
  response: fullResponse.response,
23
25
  metadata: fullResponse.metadata,
24
26
  }
25
- : image;
27
+ : images[0];
26
28
  }
@@ -37,13 +37,15 @@ class Automatic1111ImageGenerationModel extends AbstractModel_js_1.AbstractModel
37
37
  url: api.assembleUrl(`/txt2img`),
38
38
  headers: api.headers,
39
39
  body: {
40
- height: this.settings.height,
41
- width: this.settings.width,
42
40
  prompt: input.prompt,
43
41
  negative_prompt: input.negativePrompt,
42
+ seed: this.settings.seed,
43
+ batch_size: this.settings.numberOfGenerations,
44
+ height: this.settings.height,
45
+ width: this.settings.width,
46
+ cfg_scale: this.settings.cfgScale,
44
47
  sampler_index: this.settings.sampler,
45
48
  steps: this.settings.steps,
46
- seed: input.seed,
47
49
  override_settings: {
48
50
  sd_model_checkpoint: this.settings.model,
49
51
  },
@@ -62,11 +64,11 @@ class Automatic1111ImageGenerationModel extends AbstractModel_js_1.AbstractModel
62
64
  steps: this.settings.steps,
63
65
  };
64
66
  }
65
- async doGenerateImage(prompt, options) {
67
+ async doGenerateImages(prompt, options) {
66
68
  const response = await this.callAPI(prompt, options);
67
69
  return {
68
70
  response,
69
- base64Image: response.images[0],
71
+ base64Images: response.images,
70
72
  };
71
73
  }
72
74
  withTextPrompt() {
@@ -8,11 +8,25 @@ import { PromptTemplateImageGenerationModel } from "../../model-function/generat
8
8
  import { Automatic1111ImageGenerationPrompt } from "./Automatic1111ImageGenerationPrompt.js";
9
9
  export interface Automatic1111ImageGenerationSettings extends ImageGenerationModelSettings {
10
10
  api?: ApiConfiguration;
11
+ /**
12
+ * Stable Diffusion checkpoint.
13
+ */
11
14
  model: string;
12
15
  height?: number;
13
16
  width?: number;
17
+ /**
18
+ * Sampling method.
19
+ */
14
20
  sampler?: string;
21
+ /**
22
+ * Sampling steps.
23
+ */
15
24
  steps?: number;
25
+ /**
26
+ * CFG Scale.
27
+ */
28
+ cfgScale?: number;
29
+ seed?: number;
16
30
  }
17
31
  /**
18
32
  * Create an image generation model that calls the AUTOMATIC1111 Stable Diffusion Web UI API.
@@ -25,13 +39,13 @@ export declare class Automatic1111ImageGenerationModel extends AbstractModel<Aut
25
39
  get modelName(): string;
26
40
  callAPI(input: Automatic1111ImageGenerationPrompt, options?: FunctionOptions): Promise<Automatic1111ImageGenerationResponse>;
27
41
  get settingsForEvent(): Partial<Automatic1111ImageGenerationSettings>;
28
- doGenerateImage(prompt: Automatic1111ImageGenerationPrompt, options?: FunctionOptions): Promise<{
42
+ doGenerateImages(prompt: Automatic1111ImageGenerationPrompt, options?: FunctionOptions): Promise<{
29
43
  response: {
30
44
  images: string[];
31
45
  parameters: {};
32
46
  info: string;
33
47
  };
34
- base64Image: string;
48
+ base64Images: string[];
35
49
  }>;
36
50
  withTextPrompt(): PromptTemplateImageGenerationModel<string, Automatic1111ImageGenerationPrompt, Automatic1111ImageGenerationSettings, this>;
37
51
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: PromptTemplate<INPUT_PROMPT, Automatic1111ImageGenerationPrompt>): PromptTemplateImageGenerationModel<INPUT_PROMPT, Automatic1111ImageGenerationPrompt, Automatic1111ImageGenerationSettings, this>;
@@ -34,13 +34,15 @@ export class Automatic1111ImageGenerationModel extends AbstractModel {
34
34
  url: api.assembleUrl(`/txt2img`),
35
35
  headers: api.headers,
36
36
  body: {
37
- height: this.settings.height,
38
- width: this.settings.width,
39
37
  prompt: input.prompt,
40
38
  negative_prompt: input.negativePrompt,
39
+ seed: this.settings.seed,
40
+ batch_size: this.settings.numberOfGenerations,
41
+ height: this.settings.height,
42
+ width: this.settings.width,
43
+ cfg_scale: this.settings.cfgScale,
41
44
  sampler_index: this.settings.sampler,
42
45
  steps: this.settings.steps,
43
- seed: input.seed,
44
46
  override_settings: {
45
47
  sd_model_checkpoint: this.settings.model,
46
48
  },
@@ -59,11 +61,11 @@ export class Automatic1111ImageGenerationModel extends AbstractModel {
59
61
  steps: this.settings.steps,
60
62
  };
61
63
  }
62
- async doGenerateImage(prompt, options) {
64
+ async doGenerateImages(prompt, options) {
63
65
  const response = await this.callAPI(prompt, options);
64
66
  return {
65
67
  response,
66
- base64Image: response.images[0],
68
+ base64Images: response.images,
67
69
  };
68
70
  }
69
71
  withTextPrompt() {
@@ -2,7 +2,6 @@ import { PromptTemplate } from "../../model-function/PromptTemplate.js";
2
2
  export type Automatic1111ImageGenerationPrompt = {
3
3
  prompt: string;
4
4
  negativePrompt?: string;
5
- seed?: number;
6
5
  };
7
6
  /**
8
7
  * Formats a basic text prompt as an Automatic1111 prompt.
@@ -65,8 +65,8 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
65
65
  generation_settings: {
66
66
  model: string;
67
67
  stream: boolean;
68
- mirostat: number;
69
68
  seed: number;
69
+ mirostat: number;
70
70
  stop: string[];
71
71
  frequency_penalty: number;
72
72
  ignore_eos: boolean;
@@ -155,8 +155,8 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
155
155
  }, "strip", z.ZodTypeAny, {
156
156
  model: string;
157
157
  stream: boolean;
158
- mirostat: number;
159
158
  seed: number;
159
+ mirostat: number;
160
160
  stop: string[];
161
161
  frequency_penalty: number;
162
162
  ignore_eos: boolean;
@@ -179,8 +179,8 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
179
179
  }, {
180
180
  model: string;
181
181
  stream: boolean;
182
- mirostat: number;
183
182
  seed: number;
183
+ mirostat: number;
184
184
  stop: string[];
185
185
  frequency_penalty: number;
186
186
  ignore_eos: boolean;
@@ -247,8 +247,8 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
247
247
  generation_settings: {
248
248
  model: string;
249
249
  stream: boolean;
250
- mirostat: number;
251
250
  seed: number;
251
+ mirostat: number;
252
252
  stop: string[];
253
253
  frequency_penalty: number;
254
254
  ignore_eos: boolean;
@@ -295,8 +295,8 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
295
295
  generation_settings: {
296
296
  model: string;
297
297
  stream: boolean;
298
- mirostat: number;
299
298
  seed: number;
299
+ mirostat: number;
300
300
  stop: string[];
301
301
  frequency_penalty: number;
302
302
  ignore_eos: boolean;
@@ -360,8 +360,8 @@ export declare const LlamaCppTextGenerationResponseFormat: {
360
360
  generation_settings: {
361
361
  model: string;
362
362
  stream: boolean;
363
- mirostat: number;
364
363
  seed: number;
364
+ mirostat: number;
365
365
  stop: string[];
366
366
  frequency_penalty: number;
367
367
  ignore_eos: boolean;
@@ -60,7 +60,7 @@ const calculateOpenAIImageGenerationCostInMillicents = ({ model, settings, }) =>
60
60
  if (cost == null) {
61
61
  return null;
62
62
  }
63
- return (settings.n ?? 1) * cost;
63
+ return (settings.numberOfGenerations ?? 1) * cost;
64
64
  };
65
65
  exports.calculateOpenAIImageGenerationCostInMillicents = calculateOpenAIImageGenerationCostInMillicents;
66
66
  /**
@@ -88,38 +88,48 @@ class OpenAIImageGenerationModel extends AbstractModel_js_1.AbstractModel {
88
88
  return this.settings.model;
89
89
  }
90
90
  async callAPI(prompt, options) {
91
- const run = options?.run;
91
+ const api = this.settings.api ?? new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration();
92
+ const abortSignal = options?.run?.abortSignal;
93
+ const userId = options?.run?.userId;
92
94
  const responseFormat = options?.responseFormat;
93
- const callSettings = {
94
- ...this.settings,
95
- user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
96
- abortSignal: run?.abortSignal,
97
- responseFormat,
98
- prompt,
99
- };
100
95
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
101
- retry: callSettings.api?.retry,
102
- throttle: callSettings.api?.throttle,
103
- call: async () => callOpenAIImageGenerationAPI(callSettings),
96
+ retry: api.retry,
97
+ throttle: api.throttle,
98
+ call: async () => {
99
+ return (0, postToApi_js_1.postJsonToApi)({
100
+ url: api.assembleUrl("/images/generations"),
101
+ headers: api.headers,
102
+ body: {
103
+ prompt,
104
+ n: this.settings.numberOfGenerations,
105
+ size: this.settings.size,
106
+ response_format: responseFormat.type,
107
+ user: this.settings.isUserIdForwardingEnabled ? userId : undefined,
108
+ },
109
+ failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
110
+ successfulResponseHandler: responseFormat?.handler,
111
+ abortSignal,
112
+ });
113
+ },
104
114
  });
105
115
  }
106
116
  get settingsForEvent() {
107
117
  const eventSettingProperties = [
108
- "n",
118
+ "numberOfGenerations",
109
119
  "size",
110
120
  "quality",
111
121
  "style",
112
122
  ];
113
123
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
114
124
  }
115
- async doGenerateImage(prompt, options) {
125
+ async doGenerateImages(prompt, options) {
116
126
  const response = await this.callAPI(prompt, {
117
127
  responseFormat: exports.OpenAIImageGenerationResponseFormat.base64Json,
118
128
  ...options,
119
129
  });
120
130
  return {
121
131
  response,
122
- base64Image: response.data[0].b64_json,
132
+ base64Images: response.data.map((item) => item.b64_json),
123
133
  };
124
134
  }
125
135
  withPromptTemplate(promptTemplate) {
@@ -155,19 +165,3 @@ exports.OpenAIImageGenerationResponseFormat = {
155
165
  handler: (0, postToApi_js_1.createJsonResponseHandler)(openAIImageGenerationBase64JsonSchema),
156
166
  },
157
167
  };
158
- async function callOpenAIImageGenerationAPI({ api = new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration(), abortSignal, prompt, n, size, responseFormat, user, }) {
159
- return (0, postToApi_js_1.postJsonToApi)({
160
- url: api.assembleUrl("/images/generations"),
161
- headers: api.headers,
162
- body: {
163
- prompt,
164
- n,
165
- size,
166
- response_format: responseFormat.type,
167
- user,
168
- },
169
- failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
170
- successfulResponseHandler: responseFormat?.handler,
171
- abortSignal,
172
- });
173
- }
@@ -24,7 +24,6 @@ export declare const calculateOpenAIImageGenerationCostInMillicents: ({ model, s
24
24
  export type OpenAIImageModelType = keyof typeof OPENAI_IMAGE_MODELS;
25
25
  export interface OpenAIImageGenerationCallSettings {
26
26
  model: OpenAIImageModelType;
27
- n?: number;
28
27
  size?: "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792";
29
28
  quality?: "standard" | "hd";
30
29
  style?: "vivid" | "natural";
@@ -52,14 +51,14 @@ export declare class OpenAIImageGenerationModel extends AbstractModel<OpenAIImag
52
51
  responseFormat: OpenAIImageGenerationResponseFormatType<RESULT>;
53
52
  } & FunctionOptions): Promise<RESULT>;
54
53
  get settingsForEvent(): Partial<OpenAIImageGenerationSettings>;
55
- doGenerateImage(prompt: string, options?: FunctionOptions): Promise<{
54
+ doGenerateImages(prompt: string, options?: FunctionOptions): Promise<{
56
55
  response: {
57
56
  data: {
58
57
  b64_json: string;
59
58
  }[];
60
59
  created: number;
61
60
  };
62
- base64Image: string;
61
+ base64Images: string[];
63
62
  }>;
64
63
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: PromptTemplate<INPUT_PROMPT, string>): PromptTemplateImageGenerationModel<INPUT_PROMPT, string, OpenAIImageGenerationSettings, this>;
65
64
  withSettings(additionalSettings: Partial<OpenAIImageGenerationSettings>): this;
@@ -57,7 +57,7 @@ export const calculateOpenAIImageGenerationCostInMillicents = ({ model, settings
57
57
  if (cost == null) {
58
58
  return null;
59
59
  }
60
- return (settings.n ?? 1) * cost;
60
+ return (settings.numberOfGenerations ?? 1) * cost;
61
61
  };
62
62
  /**
63
63
  * Create an image generation model that calls the OpenAI AI image creation API.
@@ -84,38 +84,48 @@ export class OpenAIImageGenerationModel extends AbstractModel {
84
84
  return this.settings.model;
85
85
  }
86
86
  async callAPI(prompt, options) {
87
- const run = options?.run;
87
+ const api = this.settings.api ?? new OpenAIApiConfiguration();
88
+ const abortSignal = options?.run?.abortSignal;
89
+ const userId = options?.run?.userId;
88
90
  const responseFormat = options?.responseFormat;
89
- const callSettings = {
90
- ...this.settings,
91
- user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
92
- abortSignal: run?.abortSignal,
93
- responseFormat,
94
- prompt,
95
- };
96
91
  return callWithRetryAndThrottle({
97
- retry: callSettings.api?.retry,
98
- throttle: callSettings.api?.throttle,
99
- call: async () => callOpenAIImageGenerationAPI(callSettings),
92
+ retry: api.retry,
93
+ throttle: api.throttle,
94
+ call: async () => {
95
+ return postJsonToApi({
96
+ url: api.assembleUrl("/images/generations"),
97
+ headers: api.headers,
98
+ body: {
99
+ prompt,
100
+ n: this.settings.numberOfGenerations,
101
+ size: this.settings.size,
102
+ response_format: responseFormat.type,
103
+ user: this.settings.isUserIdForwardingEnabled ? userId : undefined,
104
+ },
105
+ failedResponseHandler: failedOpenAICallResponseHandler,
106
+ successfulResponseHandler: responseFormat?.handler,
107
+ abortSignal,
108
+ });
109
+ },
100
110
  });
101
111
  }
102
112
  get settingsForEvent() {
103
113
  const eventSettingProperties = [
104
- "n",
114
+ "numberOfGenerations",
105
115
  "size",
106
116
  "quality",
107
117
  "style",
108
118
  ];
109
119
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
110
120
  }
111
- async doGenerateImage(prompt, options) {
121
+ async doGenerateImages(prompt, options) {
112
122
  const response = await this.callAPI(prompt, {
113
123
  responseFormat: OpenAIImageGenerationResponseFormat.base64Json,
114
124
  ...options,
115
125
  });
116
126
  return {
117
127
  response,
118
- base64Image: response.data[0].b64_json,
128
+ base64Images: response.data.map((item) => item.b64_json),
119
129
  };
120
130
  }
121
131
  withPromptTemplate(promptTemplate) {
@@ -150,19 +160,3 @@ export const OpenAIImageGenerationResponseFormat = {
150
160
  handler: createJsonResponseHandler(openAIImageGenerationBase64JsonSchema),
151
161
  },
152
162
  };
153
- async function callOpenAIImageGenerationAPI({ api = new OpenAIApiConfiguration(), abortSignal, prompt, n, size, responseFormat, user, }) {
154
- return postJsonToApi({
155
- url: api.assembleUrl("/images/generations"),
156
- headers: api.headers,
157
- body: {
158
- prompt,
159
- n,
160
- size,
161
- response_format: responseFormat.type,
162
- user,
163
- },
164
- failedResponseHandler: failedOpenAICallResponseHandler,
165
- successfulResponseHandler: responseFormat?.handler,
166
- abortSignal,
167
- });
168
- }
@@ -1,13 +1,20 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.StabilityApiConfiguration = void 0;
4
- const BaseUrlApiConfiguration_js_1 = require("../../core/api/BaseUrlApiConfiguration.cjs");
4
+ const BaseUrlPartsApiConfiguration_js_1 = require("../../core/api/BaseUrlPartsApiConfiguration.cjs");
5
5
  const loadApiKey_js_1 = require("../../core/api/loadApiKey.cjs");
6
- class StabilityApiConfiguration extends BaseUrlApiConfiguration_js_1.BaseUrlApiConfiguration {
7
- constructor({ baseUrl = "https://api.stability.ai/v1", apiKey, retry, throttle, } = {}) {
6
+ /**
7
+ * Creates an API configuration for the Stability AI API.
8
+ * It calls the API at https://api.stability.ai/v1 by default.
9
+ */
10
+ class StabilityApiConfiguration extends BaseUrlPartsApiConfiguration_js_1.BaseUrlPartsApiConfiguration {
11
+ constructor({ protocol = "https", host = "api.stability.ai", port = "443", path = "/v1", apiKey, headers, retry, throttle, } = {}) {
8
12
  super({
9
- baseUrl,
10
- headers: {
13
+ protocol,
14
+ host,
15
+ port,
16
+ path,
17
+ headers: headers ?? {
11
18
  Authorization: `Bearer ${(0, loadApiKey_js_1.loadApiKey)({
12
19
  apiKey,
13
20
  environmentVariableName: "STABILITY_API_KEY",
@@ -1,11 +1,10 @@
1
- import { BaseUrlApiConfiguration } from "../../core/api/BaseUrlApiConfiguration.js";
2
- import { RetryFunction } from "../../core/api/RetryFunction.js";
3
- import { ThrottleFunction } from "../../core/api/ThrottleFunction.js";
4
- export declare class StabilityApiConfiguration extends BaseUrlApiConfiguration {
5
- constructor({ baseUrl, apiKey, retry, throttle, }?: {
6
- baseUrl?: string;
1
+ import { BaseUrlPartsApiConfiguration, BaseUrlPartsApiConfigurationOptions } from "../../core/api/BaseUrlPartsApiConfiguration.js";
2
+ /**
3
+ * Creates an API configuration for the Stability AI API.
4
+ * It calls the API at https://api.stability.ai/v1 by default.
5
+ */
6
+ export declare class StabilityApiConfiguration extends BaseUrlPartsApiConfiguration {
7
+ constructor({ protocol, host, port, path, apiKey, headers, retry, throttle, }?: Partial<BaseUrlPartsApiConfigurationOptions> & {
7
8
  apiKey?: string;
8
- retry?: RetryFunction;
9
- throttle?: ThrottleFunction;
10
9
  });
11
10
  }
@@ -1,10 +1,17 @@
1
- import { BaseUrlApiConfiguration } from "../../core/api/BaseUrlApiConfiguration.js";
1
+ import { BaseUrlPartsApiConfiguration, } from "../../core/api/BaseUrlPartsApiConfiguration.js";
2
2
  import { loadApiKey } from "../../core/api/loadApiKey.js";
3
- export class StabilityApiConfiguration extends BaseUrlApiConfiguration {
4
- constructor({ baseUrl = "https://api.stability.ai/v1", apiKey, retry, throttle, } = {}) {
3
+ /**
4
+ * Creates an API configuration for the Stability AI API.
5
+ * It calls the API at https://api.stability.ai/v1 by default.
6
+ */
7
+ export class StabilityApiConfiguration extends BaseUrlPartsApiConfiguration {
8
+ constructor({ protocol = "https", host = "api.stability.ai", port = "443", path = "/v1", apiKey, headers, retry, throttle, } = {}) {
5
9
  super({
6
- baseUrl,
7
- headers: {
10
+ protocol,
11
+ host,
12
+ port,
13
+ path,
14
+ headers: headers ?? {
8
15
  Authorization: `Bearer ${loadApiKey({
9
16
  apiKey,
10
17
  environmentVariableName: "STABILITY_API_KEY",
@@ -1,37 +1,13 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.failedStabilityCallResponseHandler = exports.StabilityError = exports.stabilityErrorDataSchema = void 0;
3
+ exports.failedStabilityCallResponseHandler = void 0;
4
4
  const zod_1 = require("zod");
5
- const ApiCallError_js_1 = require("../../core/api/ApiCallError.cjs");
5
+ const postToApi_js_1 = require("../../core/api/postToApi.cjs");
6
6
  const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
7
- const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
8
- exports.stabilityErrorDataSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
7
+ const stabilityErrorDataSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
9
8
  message: zod_1.z.string(),
10
9
  }));
11
- class StabilityError extends ApiCallError_js_1.ApiCallError {
12
- constructor({ data, statusCode, url, requestBodyValues, message = data.message, }) {
13
- super({ message, statusCode, requestBodyValues, url });
14
- Object.defineProperty(this, "data", {
15
- enumerable: true,
16
- configurable: true,
17
- writable: true,
18
- value: void 0
19
- });
20
- this.data = data;
21
- }
22
- }
23
- exports.StabilityError = StabilityError;
24
- const failedStabilityCallResponseHandler = async ({ response, url, requestBodyValues }) => {
25
- const responseBody = await response.text();
26
- const parsedError = (0, parseJSON_js_1.parseJSON)({
27
- text: responseBody,
28
- schema: exports.stabilityErrorDataSchema,
29
- });
30
- return new StabilityError({
31
- url,
32
- requestBodyValues,
33
- statusCode: response.status,
34
- data: parsedError,
35
- });
36
- };
37
- exports.failedStabilityCallResponseHandler = failedStabilityCallResponseHandler;
10
+ exports.failedStabilityCallResponseHandler = (0, postToApi_js_1.createJsonErrorResponseHandler)({
11
+ errorSchema: stabilityErrorDataSchema,
12
+ errorToMessage: (error) => error.message,
13
+ });
@@ -1,18 +1,9 @@
1
1
  import { ApiCallError } from "../../core/api/ApiCallError.js";
2
2
  import { ResponseHandler } from "../../core/api/postToApi.js";
3
3
  import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
- export declare const stabilityErrorDataSchema: ZodSchema<{
4
+ declare const stabilityErrorDataSchema: ZodSchema<{
5
5
  message: string;
6
6
  }>;
7
7
  export type StabilityErrorData = (typeof stabilityErrorDataSchema)["_type"];
8
- export declare class StabilityError extends ApiCallError {
9
- readonly data: StabilityErrorData;
10
- constructor({ data, statusCode, url, requestBodyValues, message, }: {
11
- message?: string;
12
- statusCode: number;
13
- url: string;
14
- requestBodyValues: unknown;
15
- data: StabilityErrorData;
16
- });
17
- }
18
8
  export declare const failedStabilityCallResponseHandler: ResponseHandler<ApiCallError>;
9
+ export {};
@@ -1,32 +1,10 @@
1
1
  import { z } from "zod";
2
- import { ApiCallError } from "../../core/api/ApiCallError.js";
2
+ import { createJsonErrorResponseHandler, } from "../../core/api/postToApi.js";
3
3
  import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
- import { parseJSON } from "../../core/schema/parseJSON.js";
5
- export const stabilityErrorDataSchema = new ZodSchema(z.object({
4
+ const stabilityErrorDataSchema = new ZodSchema(z.object({
6
5
  message: z.string(),
7
6
  }));
8
- export class StabilityError extends ApiCallError {
9
- constructor({ data, statusCode, url, requestBodyValues, message = data.message, }) {
10
- super({ message, statusCode, requestBodyValues, url });
11
- Object.defineProperty(this, "data", {
12
- enumerable: true,
13
- configurable: true,
14
- writable: true,
15
- value: void 0
16
- });
17
- this.data = data;
18
- }
19
- }
20
- export const failedStabilityCallResponseHandler = async ({ response, url, requestBodyValues }) => {
21
- const responseBody = await response.text();
22
- const parsedError = parseJSON({
23
- text: responseBody,
24
- schema: stabilityErrorDataSchema,
25
- });
26
- return new StabilityError({
27
- url,
28
- requestBodyValues,
29
- statusCode: response.status,
30
- data: parsedError,
31
- });
32
- };
7
+ export const failedStabilityCallResponseHandler = createJsonErrorResponseHandler({
8
+ errorSchema: stabilityErrorDataSchema,
9
+ errorToMessage: (error) => error.message,
10
+ });
@@ -1,6 +1,7 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.ImageGenerator = void 0;
3
+ exports.Api = exports.ImageGenerator = void 0;
4
+ const StabilityApiConfiguration_js_1 = require("./StabilityApiConfiguration.cjs");
4
5
  const StabilityImageGenerationModel_js_1 = require("./StabilityImageGenerationModel.cjs");
5
6
  /**
6
7
  * Create an image generation model that calls the Stability AI image generation API.
@@ -10,12 +11,11 @@ const StabilityImageGenerationModel_js_1 = require("./StabilityImageGenerationMo
10
11
  * @example
11
12
  * const image = await generateImage(
12
13
  * stability.ImageGenerator({
13
- * model: "stable-diffusion-512-v2-1",
14
+ * model: "stable-diffusion-v1-6",
14
15
  * cfgScale: 7,
15
16
  * clipGuidancePreset: "FAST_BLUE",
16
17
  * height: 512,
17
18
  * width: 512,
18
- * samples: 1,
19
19
  * steps: 30,
20
20
  * })
21
21
  * [
@@ -30,3 +30,11 @@ function ImageGenerator(settings) {
30
30
  return new StabilityImageGenerationModel_js_1.StabilityImageGenerationModel(settings);
31
31
  }
32
32
  exports.ImageGenerator = ImageGenerator;
33
+ /**
34
+ * Creates an API configuration for the Stability AI API.
35
+ * It calls the API at https://api.stability.ai/v1 by default.
36
+ */
37
+ function Api(settings) {
38
+ return new StabilityApiConfiguration_js_1.StabilityApiConfiguration(settings);
39
+ }
40
+ exports.Api = Api;
@@ -1,3 +1,5 @@
1
+ import { BaseUrlPartsApiConfigurationOptions } from "../../core/api/BaseUrlPartsApiConfiguration.js";
2
+ import { StabilityApiConfiguration } from "./StabilityApiConfiguration.js";
1
3
  import { StabilityImageGenerationModel, StabilityImageGenerationSettings } from "./StabilityImageGenerationModel.js";
2
4
  /**
3
5
  * Create an image generation model that calls the Stability AI image generation API.
@@ -7,12 +9,11 @@ import { StabilityImageGenerationModel, StabilityImageGenerationSettings } from
7
9
  * @example
8
10
  * const image = await generateImage(
9
11
  * stability.ImageGenerator({
10
- * model: "stable-diffusion-512-v2-1",
12
+ * model: "stable-diffusion-v1-6",
11
13
  * cfgScale: 7,
12
14
  * clipGuidancePreset: "FAST_BLUE",
13
15
  * height: 512,
14
16
  * width: 512,
15
- * samples: 1,
16
17
  * steps: 30,
17
18
  * })
18
19
  * [
@@ -24,3 +25,10 @@ import { StabilityImageGenerationModel, StabilityImageGenerationSettings } from
24
25
  * @returns A new instance of {@link StabilityImageGenerationModel}.
25
26
  */
26
27
  export declare function ImageGenerator(settings: StabilityImageGenerationSettings): StabilityImageGenerationModel;
28
+ /**
29
+ * Creates an API configuration for the Stability AI API.
30
+ * It calls the API at https://api.stability.ai/v1 by default.
31
+ */
32
+ export declare function Api(settings: Partial<BaseUrlPartsApiConfigurationOptions> & {
33
+ apiKey?: string;
34
+ }): StabilityApiConfiguration;
@@ -1,3 +1,4 @@
1
+ import { StabilityApiConfiguration } from "./StabilityApiConfiguration.js";
1
2
  import { StabilityImageGenerationModel, } from "./StabilityImageGenerationModel.js";
2
3
  /**
3
4
  * Create an image generation model that calls the Stability AI image generation API.
@@ -7,12 +8,11 @@ import { StabilityImageGenerationModel, } from "./StabilityImageGenerationModel.
7
8
  * @example
8
9
  * const image = await generateImage(
9
10
  * stability.ImageGenerator({
10
- * model: "stable-diffusion-512-v2-1",
11
+ * model: "stable-diffusion-v1-6",
11
12
  * cfgScale: 7,
12
13
  * clipGuidancePreset: "FAST_BLUE",
13
14
  * height: 512,
14
15
  * width: 512,
15
- * samples: 1,
16
16
  * steps: 30,
17
17
  * })
18
18
  * [
@@ -26,3 +26,10 @@ import { StabilityImageGenerationModel, } from "./StabilityImageGenerationModel.
26
26
  export function ImageGenerator(settings) {
27
27
  return new StabilityImageGenerationModel(settings);
28
28
  }
29
+ /**
30
+ * Creates an API configuration for the Stability AI API.
31
+ * It calls the API at https://api.stability.ai/v1 by default.
32
+ */
33
+ export function Api(settings) {
34
+ return new StabilityApiConfiguration(settings);
35
+ }
@@ -9,6 +9,10 @@ const PromptTemplateImageGenerationModel_js_1 = require("../../model-function/ge
9
9
  const StabilityApiConfiguration_js_1 = require("./StabilityApiConfiguration.cjs");
10
10
  const StabilityError_js_1 = require("./StabilityError.cjs");
11
11
  const StabilityImageGenerationPrompt_js_1 = require("./StabilityImageGenerationPrompt.cjs");
12
+ const stabilityImageGenerationModels = [
13
+ "stable-diffusion-v1-6",
14
+ "stable-diffusion-xl-1024-v1-0",
15
+ ];
12
16
  /**
13
17
  * Create an image generation model that calls the Stability AI image generation API.
14
18
  *
@@ -16,13 +20,12 @@ const StabilityImageGenerationPrompt_js_1 = require("./StabilityImageGenerationP
16
20
  *
17
21
  * @example
18
22
  * const image = await generateImage(
19
- * new StabilityImageGenerationModel({
20
- * model: "stable-diffusion-512-v2-1",
23
+ * stability.ImageGenerator({
24
+ * model: "stable-diffusion-v1-6",
21
25
  * cfgScale: 7,
22
26
  * clipGuidancePreset: "FAST_BLUE",
23
27
  * height: 512,
24
28
  * width: 512,
25
- * samples: 1,
26
29
  * steps: 30,
27
30
  * })
28
31
  * [
@@ -45,37 +48,50 @@ class StabilityImageGenerationModel extends AbstractModel_js_1.AbstractModel {
45
48
  return this.settings.model;
46
49
  }
47
50
  async callAPI(input, options) {
51
+ const api = this.settings.api ?? new StabilityApiConfiguration_js_1.StabilityApiConfiguration();
52
+ const abortSignal = options?.run?.abortSignal;
48
53
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
49
54
  retry: this.settings.api?.retry,
50
55
  throttle: this.settings.api?.throttle,
51
- call: async () => callStabilityImageGenerationAPI({
52
- ...this.settings,
53
- abortSignal: options?.run?.abortSignal,
54
- engineId: this.settings.model,
55
- textPrompts: input,
56
+ call: async () => (0, postToApi_js_1.postJsonToApi)({
57
+ url: api.assembleUrl(`/generation/${this.settings.model}/text-to-image`),
58
+ headers: api.headers,
59
+ body: {
60
+ height: this.settings.height,
61
+ width: this.settings.width,
62
+ text_prompts: input,
63
+ cfg_scale: this.settings.cfgScale,
64
+ clip_guidance_preset: this.settings.clipGuidancePreset,
65
+ sampler: this.settings.sampler,
66
+ samples: this.settings.numberOfGenerations,
67
+ seed: this.settings.seed,
68
+ steps: this.settings.steps,
69
+ style_preset: this.settings.stylePreset,
70
+ },
71
+ failedResponseHandler: StabilityError_js_1.failedStabilityCallResponseHandler,
72
+ successfulResponseHandler: (0, postToApi_js_1.createJsonResponseHandler)(stabilityImageGenerationResponseSchema),
73
+ abortSignal,
56
74
  }),
57
75
  });
58
76
  }
59
77
  get settingsForEvent() {
60
- const eventSettingProperties = [
61
- "baseUrl",
62
- "height",
63
- "width",
64
- "cfgScale",
65
- "clipGuidancePreset",
66
- "sampler",
67
- "samples",
68
- "seed",
69
- "steps",
70
- "stylePreset",
71
- ];
72
- return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
78
+ return {
79
+ numberOfGenerations: this.settings.numberOfGenerations,
80
+ height: this.settings.height,
81
+ width: this.settings.width,
82
+ cfgScale: this.settings.cfgScale,
83
+ clipGuidancePreset: this.settings.clipGuidancePreset,
84
+ sampler: this.settings.sampler,
85
+ seed: this.settings.seed,
86
+ steps: this.settings.steps,
87
+ stylePreset: this.settings.stylePreset,
88
+ };
73
89
  }
74
- async doGenerateImage(prompt, options) {
90
+ async doGenerateImages(prompt, options) {
75
91
  const response = await this.callAPI(prompt, options);
76
92
  return {
77
93
  response,
78
- base64Image: response.artifacts[0].base64,
94
+ base64Images: response.artifacts.map((artifact) => artifact.base64),
79
95
  };
80
96
  }
81
97
  withTextPrompt() {
@@ -92,12 +108,6 @@ class StabilityImageGenerationModel extends AbstractModel_js_1.AbstractModel {
92
108
  }
93
109
  }
94
110
  exports.StabilityImageGenerationModel = StabilityImageGenerationModel;
95
- const stabilityImageGenerationModels = [
96
- "stable-diffusion-v1-5",
97
- "stable-diffusion-512-v2-1",
98
- "stable-diffusion-xl-1024-v0-9",
99
- "stable-diffusion-xl-1024-v1-0",
100
- ];
101
111
  const stabilityImageGenerationResponseSchema = zod_1.z.object({
102
112
  artifacts: zod_1.z.array(zod_1.z.object({
103
113
  base64: zod_1.z.string(),
@@ -105,24 +115,3 @@ const stabilityImageGenerationResponseSchema = zod_1.z.object({
105
115
  finishReason: zod_1.z.enum(["SUCCESS", "ERROR", "CONTENT_FILTERED"]),
106
116
  })),
107
117
  });
108
- async function callStabilityImageGenerationAPI({ api = new StabilityApiConfiguration_js_1.StabilityApiConfiguration(), abortSignal, engineId, height, width, textPrompts, cfgScale, clipGuidancePreset, sampler, samples, seed, steps, stylePreset, }) {
109
- return (0, postToApi_js_1.postJsonToApi)({
110
- url: api.assembleUrl(`/generation/${engineId}/text-to-image`),
111
- headers: api.headers,
112
- body: {
113
- height,
114
- width,
115
- text_prompts: textPrompts,
116
- cfg_scale: cfgScale,
117
- clip_guidance_preset: clipGuidancePreset,
118
- sampler,
119
- samples,
120
- seed,
121
- steps,
122
- style_preset: stylePreset,
123
- },
124
- failedResponseHandler: StabilityError_js_1.failedStabilityCallResponseHandler,
125
- successfulResponseHandler: (0, postToApi_js_1.createJsonResponseHandler)(stabilityImageGenerationResponseSchema),
126
- abortSignal,
127
- });
128
- }
@@ -6,6 +6,39 @@ import { PromptTemplate } from "../../model-function/PromptTemplate.js";
6
6
  import { ImageGenerationModel, ImageGenerationModelSettings } from "../../model-function/generate-image/ImageGenerationModel.js";
7
7
  import { PromptTemplateImageGenerationModel } from "../../model-function/generate-image/PromptTemplateImageGenerationModel.js";
8
8
  import { StabilityImageGenerationPrompt } from "./StabilityImageGenerationPrompt.js";
9
+ declare const stabilityImageGenerationModels: readonly ["stable-diffusion-v1-6", "stable-diffusion-xl-1024-v1-0"];
10
+ export type StabilityImageGenerationModelType = (typeof stabilityImageGenerationModels)[number] | (string & {});
11
+ export type StabilityImageGenerationStylePreset = "3d-model" | "analog-film" | "anime" | "cinematic" | "comic-book" | "digital-art" | "enhance" | "fantasy-art" | "isometric" | "line-art" | "low-poly" | "modeling-compound" | "neon-punk" | "origami" | "photographic" | "pixel-art" | "tile-texture";
12
+ export type StabilityImageGenerationSampler = "DDIM" | "DDPM" | "K_DPMPP_2M" | "K_DPMPP_2S_ANCESTRAL" | "K_DPM_2" | "K_DPM_2_ANCESTRAL" | "K_EULER" | "K_EULER_ANCESTRAL" | "K_HEUN" | "K_LMS";
13
+ export type StabilityClipGuidancePreset = "FAST_BLUE" | "FAST_GREEN" | "NONE" | "SIMPLE" | "SLOW" | "SLOWER" | "SLOWEST";
14
+ export interface StabilityImageGenerationSettings extends ImageGenerationModelSettings {
15
+ api?: ApiConfiguration;
16
+ model: StabilityImageGenerationModelType;
17
+ height?: number;
18
+ width?: number;
19
+ /**
20
+ * How strictly the diffusion process adheres to the prompt text (higher values keep your image closer to your prompt)
21
+ */
22
+ cfgScale?: number;
23
+ clipGuidancePreset?: StabilityClipGuidancePreset;
24
+ /**
25
+ * Which sampler to use for the diffusion process.
26
+ * If this value is omitted we'll automatically select an appropriate sampler for you.
27
+ */
28
+ sampler?: StabilityImageGenerationSampler;
29
+ /**
30
+ * Random noise seed (omit this option or use 0 for a random seed).
31
+ */
32
+ seed?: number;
33
+ /**
34
+ * Number of diffusion steps to run.
35
+ */
36
+ steps?: number;
37
+ /**
38
+ * Pass in a style preset to guide the image model towards a particular style.
39
+ */
40
+ stylePreset?: StabilityImageGenerationStylePreset;
41
+ }
9
42
  /**
10
43
  * Create an image generation model that calls the Stability AI image generation API.
11
44
  *
@@ -13,13 +46,12 @@ import { StabilityImageGenerationPrompt } from "./StabilityImageGenerationPrompt
13
46
  *
14
47
  * @example
15
48
  * const image = await generateImage(
16
- * new StabilityImageGenerationModel({
17
- * model: "stable-diffusion-512-v2-1",
49
+ * stability.ImageGenerator({
50
+ * model: "stable-diffusion-v1-6",
18
51
  * cfgScale: 7,
19
52
  * clipGuidancePreset: "FAST_BLUE",
20
53
  * height: 512,
21
54
  * width: 512,
22
- * samples: 1,
23
55
  * steps: 30,
24
56
  * })
25
57
  * [
@@ -34,7 +66,7 @@ export declare class StabilityImageGenerationModel extends AbstractModel<Stabili
34
66
  get modelName(): StabilityImageGenerationModelType;
35
67
  callAPI(input: StabilityImageGenerationPrompt, options?: FunctionOptions): Promise<StabilityImageGenerationResponse>;
36
68
  get settingsForEvent(): Partial<StabilityImageGenerationSettings>;
37
- doGenerateImage(prompt: StabilityImageGenerationPrompt, options?: FunctionOptions): Promise<{
69
+ doGenerateImages(prompt: StabilityImageGenerationPrompt, options?: FunctionOptions): Promise<{
38
70
  response: {
39
71
  artifacts: {
40
72
  base64: string;
@@ -42,27 +74,12 @@ export declare class StabilityImageGenerationModel extends AbstractModel<Stabili
42
74
  finishReason: "SUCCESS" | "ERROR" | "CONTENT_FILTERED";
43
75
  }[];
44
76
  };
45
- base64Image: string;
77
+ base64Images: string[];
46
78
  }>;
47
79
  withTextPrompt(): PromptTemplateImageGenerationModel<string, StabilityImageGenerationPrompt, StabilityImageGenerationSettings, this>;
48
80
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: PromptTemplate<INPUT_PROMPT, StabilityImageGenerationPrompt>): PromptTemplateImageGenerationModel<INPUT_PROMPT, StabilityImageGenerationPrompt, StabilityImageGenerationSettings, this>;
49
81
  withSettings(additionalSettings: StabilityImageGenerationSettings): this;
50
82
  }
51
- declare const stabilityImageGenerationModels: readonly ["stable-diffusion-v1-5", "stable-diffusion-512-v2-1", "stable-diffusion-xl-1024-v0-9", "stable-diffusion-xl-1024-v1-0"];
52
- export type StabilityImageGenerationModelType = (typeof stabilityImageGenerationModels)[number] | (string & {});
53
- export interface StabilityImageGenerationSettings extends ImageGenerationModelSettings {
54
- api?: ApiConfiguration;
55
- model: StabilityImageGenerationModelType;
56
- height?: number;
57
- width?: number;
58
- cfgScale?: number;
59
- clipGuidancePreset?: string;
60
- sampler?: StabilityImageGenerationSampler;
61
- samples?: number;
62
- seed?: number;
63
- steps?: number;
64
- stylePreset?: StabilityImageGenerationStylePreset;
65
- }
66
83
  declare const stabilityImageGenerationResponseSchema: z.ZodObject<{
67
84
  artifacts: z.ZodArray<z.ZodObject<{
68
85
  base64: z.ZodString;
@@ -91,6 +108,4 @@ declare const stabilityImageGenerationResponseSchema: z.ZodObject<{
91
108
  }[];
92
109
  }>;
93
110
  export type StabilityImageGenerationResponse = z.infer<typeof stabilityImageGenerationResponseSchema>;
94
- export type StabilityImageGenerationStylePreset = "enhance" | "anime" | "photographic" | "digital-art" | "comic-book" | "fantasy-art" | "line-art" | "analog-film" | "neon-punk" | "isometric" | "low-poly" | "origami" | "modeling-compound" | "cinematic" | "3d-model" | "pixel-art" | "tile-texture";
95
- export type StabilityImageGenerationSampler = "DDIM" | "DDPM" | "K_DPMPP_2M" | "K_DPMPP_2S_ANCESTRAL" | "K_DPM_2" | "K_DPM_2_ANCESTRAL" | "K_EULER" | "K_EULER_ANCESTRAL" | "K_HEUN" | "K_LMS";
96
111
  export {};
@@ -6,6 +6,10 @@ import { PromptTemplateImageGenerationModel } from "../../model-function/generat
6
6
  import { StabilityApiConfiguration } from "./StabilityApiConfiguration.js";
7
7
  import { failedStabilityCallResponseHandler } from "./StabilityError.js";
8
8
  import { mapBasicPromptToStabilityFormat, } from "./StabilityImageGenerationPrompt.js";
9
+ const stabilityImageGenerationModels = [
10
+ "stable-diffusion-v1-6",
11
+ "stable-diffusion-xl-1024-v1-0",
12
+ ];
9
13
  /**
10
14
  * Create an image generation model that calls the Stability AI image generation API.
11
15
  *
@@ -13,13 +17,12 @@ import { mapBasicPromptToStabilityFormat, } from "./StabilityImageGenerationProm
13
17
  *
14
18
  * @example
15
19
  * const image = await generateImage(
16
- * new StabilityImageGenerationModel({
17
- * model: "stable-diffusion-512-v2-1",
20
+ * stability.ImageGenerator({
21
+ * model: "stable-diffusion-v1-6",
18
22
  * cfgScale: 7,
19
23
  * clipGuidancePreset: "FAST_BLUE",
20
24
  * height: 512,
21
25
  * width: 512,
22
- * samples: 1,
23
26
  * steps: 30,
24
27
  * })
25
28
  * [
@@ -42,37 +45,50 @@ export class StabilityImageGenerationModel extends AbstractModel {
42
45
  return this.settings.model;
43
46
  }
44
47
  async callAPI(input, options) {
48
+ const api = this.settings.api ?? new StabilityApiConfiguration();
49
+ const abortSignal = options?.run?.abortSignal;
45
50
  return callWithRetryAndThrottle({
46
51
  retry: this.settings.api?.retry,
47
52
  throttle: this.settings.api?.throttle,
48
- call: async () => callStabilityImageGenerationAPI({
49
- ...this.settings,
50
- abortSignal: options?.run?.abortSignal,
51
- engineId: this.settings.model,
52
- textPrompts: input,
53
+ call: async () => postJsonToApi({
54
+ url: api.assembleUrl(`/generation/${this.settings.model}/text-to-image`),
55
+ headers: api.headers,
56
+ body: {
57
+ height: this.settings.height,
58
+ width: this.settings.width,
59
+ text_prompts: input,
60
+ cfg_scale: this.settings.cfgScale,
61
+ clip_guidance_preset: this.settings.clipGuidancePreset,
62
+ sampler: this.settings.sampler,
63
+ samples: this.settings.numberOfGenerations,
64
+ seed: this.settings.seed,
65
+ steps: this.settings.steps,
66
+ style_preset: this.settings.stylePreset,
67
+ },
68
+ failedResponseHandler: failedStabilityCallResponseHandler,
69
+ successfulResponseHandler: createJsonResponseHandler(stabilityImageGenerationResponseSchema),
70
+ abortSignal,
53
71
  }),
54
72
  });
55
73
  }
56
74
  get settingsForEvent() {
57
- const eventSettingProperties = [
58
- "baseUrl",
59
- "height",
60
- "width",
61
- "cfgScale",
62
- "clipGuidancePreset",
63
- "sampler",
64
- "samples",
65
- "seed",
66
- "steps",
67
- "stylePreset",
68
- ];
69
- return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
75
+ return {
76
+ numberOfGenerations: this.settings.numberOfGenerations,
77
+ height: this.settings.height,
78
+ width: this.settings.width,
79
+ cfgScale: this.settings.cfgScale,
80
+ clipGuidancePreset: this.settings.clipGuidancePreset,
81
+ sampler: this.settings.sampler,
82
+ seed: this.settings.seed,
83
+ steps: this.settings.steps,
84
+ stylePreset: this.settings.stylePreset,
85
+ };
70
86
  }
71
- async doGenerateImage(prompt, options) {
87
+ async doGenerateImages(prompt, options) {
72
88
  const response = await this.callAPI(prompt, options);
73
89
  return {
74
90
  response,
75
- base64Image: response.artifacts[0].base64,
91
+ base64Images: response.artifacts.map((artifact) => artifact.base64),
76
92
  };
77
93
  }
78
94
  withTextPrompt() {
@@ -88,12 +104,6 @@ export class StabilityImageGenerationModel extends AbstractModel {
88
104
  return new StabilityImageGenerationModel(Object.assign({}, this.settings, additionalSettings));
89
105
  }
90
106
  }
91
- const stabilityImageGenerationModels = [
92
- "stable-diffusion-v1-5",
93
- "stable-diffusion-512-v2-1",
94
- "stable-diffusion-xl-1024-v0-9",
95
- "stable-diffusion-xl-1024-v1-0",
96
- ];
97
107
  const stabilityImageGenerationResponseSchema = z.object({
98
108
  artifacts: z.array(z.object({
99
109
  base64: z.string(),
@@ -101,24 +111,3 @@ const stabilityImageGenerationResponseSchema = z.object({
101
111
  finishReason: z.enum(["SUCCESS", "ERROR", "CONTENT_FILTERED"]),
102
112
  })),
103
113
  });
104
- async function callStabilityImageGenerationAPI({ api = new StabilityApiConfiguration(), abortSignal, engineId, height, width, textPrompts, cfgScale, clipGuidancePreset, sampler, samples, seed, steps, stylePreset, }) {
105
- return postJsonToApi({
106
- url: api.assembleUrl(`/generation/${engineId}/text-to-image`),
107
- headers: api.headers,
108
- body: {
109
- height,
110
- width,
111
- text_prompts: textPrompts,
112
- cfg_scale: cfgScale,
113
- clip_guidance_preset: clipGuidancePreset,
114
- sampler,
115
- samples,
116
- seed,
117
- steps,
118
- style_preset: stylePreset,
119
- },
120
- failedResponseHandler: failedStabilityCallResponseHandler,
121
- successfulResponseHandler: createJsonResponseHandler(stabilityImageGenerationResponseSchema),
122
- abortSignal,
123
- });
124
- }
@@ -26,10 +26,8 @@ var __importStar = (this && this.__importStar) || function (mod) {
26
26
  return result;
27
27
  };
28
28
  Object.defineProperty(exports, "__esModule", { value: true });
29
- exports.stability = exports.StabilityError = void 0;
29
+ exports.stability = void 0;
30
30
  __exportStar(require("./StabilityApiConfiguration.cjs"), exports);
31
- var StabilityError_js_1 = require("./StabilityError.cjs");
32
- Object.defineProperty(exports, "StabilityError", { enumerable: true, get: function () { return StabilityError_js_1.StabilityError; } });
33
31
  exports.stability = __importStar(require("./StabilityFacade.cjs"));
34
32
  __exportStar(require("./StabilityImageGenerationModel.cjs"), exports);
35
33
  __exportStar(require("./StabilityImageGenerationPrompt.cjs"), exports);
@@ -1,5 +1,5 @@
1
1
  export * from "./StabilityApiConfiguration.js";
2
- export { StabilityError, StabilityErrorData } from "./StabilityError.js";
2
+ export { StabilityErrorData } from "./StabilityError.js";
3
3
  export * as stability from "./StabilityFacade.js";
4
4
  export * from "./StabilityImageGenerationModel.js";
5
5
  export * from "./StabilityImageGenerationPrompt.js";
@@ -1,5 +1,4 @@
1
1
  export * from "./StabilityApiConfiguration.js";
2
- export { StabilityError } from "./StabilityError.js";
3
2
  export * as stability from "./StabilityFacade.js";
4
3
  export * from "./StabilityImageGenerationModel.js";
5
4
  export * from "./StabilityImageGenerationPrompt.js";
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "The TypeScript library for building multi-modal AI applications.",
4
- "version": "0.101.0",
4
+ "version": "0.102.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [