modelfusion 0.31.0 → 0.32.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -45,7 +45,9 @@ You can use [prompt formats](https://modelfusion.dev/guide/function/generate-tex
45
45
 
46
46
  ```ts
47
47
  const text = await generateText(
48
- new OpenAITextGenerationModel({ model: "text-davinci-003" }),
48
+ new OpenAITextGenerationModel({
49
+ model: "gpt-3.5-turbo-instruct",
50
+ }),
49
51
  "Write a short story about a robot learning to love:\n\n"
50
52
  );
51
53
  ```
@@ -56,14 +58,10 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
56
58
 
57
59
  ```ts
58
60
  const textStream = await streamText(
59
- new OpenAIChatModel({
60
- model: "gpt-3.5-turbo",
61
- maxCompletionTokens: 1000,
61
+ new OpenAITextGenerationModel({
62
+ model: "gpt-3.5-turbo-instruct",
62
63
  }),
63
- [
64
- OpenAIChatMessage.system("You are a story writer."),
65
- OpenAIChatMessage.user("Write a story about a robot learning to love"),
66
- ]
64
+ "Write a short story about a robot learning to love:\n\n"
67
65
  );
68
66
 
69
67
  for await (const textFragment of textStream) {
@@ -121,7 +119,7 @@ ModelFusion model functions return rich results that include the original respon
121
119
  // the response type is specific to the model that's being used
122
120
  const { output, response, metadata } = await generateText(
123
121
  new OpenAITextGenerationModel({
124
- model: "text-davinci-003",
122
+ model: "gpt-3.5-turbo-instruct",
125
123
  maxCompletionTokens: 1000,
126
124
  n: 2, // generate 2 completions
127
125
  }),
@@ -19,7 +19,7 @@ export interface Model<SETTINGS extends ModelSettings> {
19
19
  *
20
20
  * @example
21
21
  * const model = new OpenAITextGenerationModel({
22
- * model: "text-davinci-003",
22
+ * model: "gpt-3.5-turbo-instruct",
23
23
  * maxCompletionTokens: 500,
24
24
  * });
25
25
  *
@@ -21,55 +21,72 @@ const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
21
21
  * @see https://openai.com/pricing
22
22
  */
23
23
  exports.OPENAI_TEXT_GENERATION_MODELS = {
24
+ "gpt-3.5-turbo-instruct": {
25
+ contextWindowSize: 4097,
26
+ promptTokenCostInMillicents: 0.15,
27
+ completionTokenCostInMillicents: 0.2,
28
+ },
24
29
  "davinci-002": {
25
30
  contextWindowSize: 16384,
26
- tokenCostInMillicents: 0.2,
31
+ promptTokenCostInMillicents: 0.2,
32
+ completionTokenCostInMillicents: 0.2,
27
33
  fineTunedTokenCostInMillicents: 1.2,
28
34
  },
29
35
  "babbage-002": {
30
36
  contextWindowSize: 16384,
31
- tokenCostInMillicents: 0.04,
37
+ promptTokenCostInMillicents: 0.04,
38
+ completionTokenCostInMillicents: 0.04,
32
39
  fineTunedTokenCostInMillicents: 0.16,
33
40
  },
34
41
  "text-davinci-003": {
35
42
  contextWindowSize: 4096,
36
- tokenCostInMillicents: 2,
43
+ promptTokenCostInMillicents: 2,
44
+ completionTokenCostInMillicents: 2,
37
45
  },
38
46
  "text-davinci-002": {
39
47
  contextWindowSize: 4096,
40
- tokenCostInMillicents: 2,
48
+ promptTokenCostInMillicents: 2,
49
+ completionTokenCostInMillicents: 2,
41
50
  },
42
51
  "code-davinci-002": {
43
52
  contextWindowSize: 8000,
44
- tokenCostInMillicents: 2,
53
+ promptTokenCostInMillicents: 2,
54
+ completionTokenCostInMillicents: 2,
45
55
  },
46
56
  davinci: {
47
57
  contextWindowSize: 2048,
48
- tokenCostInMillicents: 2,
58
+ promptTokenCostInMillicents: 2,
59
+ completionTokenCostInMillicents: 2,
49
60
  },
50
61
  "text-curie-001": {
51
62
  contextWindowSize: 2048,
52
- tokenCostInMillicents: 0.2,
63
+ promptTokenCostInMillicents: 0.2,
64
+ completionTokenCostInMillicents: 0.2,
53
65
  },
54
66
  curie: {
55
67
  contextWindowSize: 2048,
56
- tokenCostInMillicents: 0.2,
68
+ promptTokenCostInMillicents: 0.2,
69
+ completionTokenCostInMillicents: 0.2,
57
70
  },
58
71
  "text-babbage-001": {
59
72
  contextWindowSize: 2048,
60
- tokenCostInMillicents: 0.05,
73
+ promptTokenCostInMillicents: 0.05,
74
+ completionTokenCostInMillicents: 0.05,
61
75
  },
62
76
  babbage: {
63
77
  contextWindowSize: 2048,
64
- tokenCostInMillicents: 0.05,
78
+ promptTokenCostInMillicents: 0.05,
79
+ completionTokenCostInMillicents: 0.05,
65
80
  },
66
81
  "text-ada-001": {
67
82
  contextWindowSize: 2048,
68
- tokenCostInMillicents: 0.04,
83
+ promptTokenCostInMillicents: 0.04,
84
+ completionTokenCostInMillicents: 0.04,
69
85
  },
70
86
  ada: {
71
87
  contextWindowSize: 2048,
72
- tokenCostInMillicents: 0.04,
88
+ promptTokenCostInMillicents: 0.04,
89
+ completionTokenCostInMillicents: 0.04,
73
90
  },
74
91
  };
75
92
  function getOpenAITextGenerationModelInformation(model) {
@@ -80,7 +97,8 @@ function getOpenAITextGenerationModelInformation(model) {
80
97
  baseModel: model,
81
98
  isFineTuned: false,
82
99
  contextWindowSize: baseModelInformation.contextWindowSize,
83
- tokenCostInMillicents: baseModelInformation.tokenCostInMillicents,
100
+ promptTokenCostInMillicents: baseModelInformation.promptTokenCostInMillicents,
101
+ completionTokenCostInMillicents: baseModelInformation.completionTokenCostInMillicents,
84
102
  };
85
103
  }
86
104
  // Extract the base model from the fine-tuned model:
@@ -92,7 +110,8 @@ function getOpenAITextGenerationModelInformation(model) {
92
110
  baseModel: baseModel,
93
111
  isFineTuned: true,
94
112
  contextWindowSize: baseModelInformation.contextWindowSize,
95
- tokenCostInMillicents: baseModelInformation.fineTunedTokenCostInMillicents,
113
+ promptTokenCostInMillicents: baseModelInformation.fineTunedTokenCostInMillicents,
114
+ completionTokenCostInMillicents: baseModelInformation.fineTunedTokenCostInMillicents,
96
115
  };
97
116
  }
98
117
  throw new Error(`Unknown OpenAI chat base model ${baseModel}.`);
@@ -102,8 +121,13 @@ const isOpenAITextGenerationModel = (model) => model in exports.OPENAI_TEXT_GENE
102
121
  model.startsWith("ft:davinci-002:") ||
103
122
  model.startsWith("ft:babbage-002:");
104
123
  exports.isOpenAITextGenerationModel = isOpenAITextGenerationModel;
105
- const calculateOpenAITextGenerationCostInMillicents = ({ model, response, }) => response.usage.total_tokens *
106
- getOpenAITextGenerationModelInformation(model).tokenCostInMillicents;
124
+ const calculateOpenAITextGenerationCostInMillicents = ({ model, response, }) => {
125
+ const modelInformation = getOpenAITextGenerationModelInformation(model);
126
+ return (response.usage.prompt_tokens *
127
+ modelInformation.promptTokenCostInMillicents +
128
+ response.usage.completion_tokens *
129
+ modelInformation.completionTokenCostInMillicents);
130
+ };
107
131
  exports.calculateOpenAITextGenerationCostInMillicents = calculateOpenAITextGenerationCostInMillicents;
108
132
  /**
109
133
  * Create a text generation model that calls the OpenAI text completion API.
@@ -112,7 +136,7 @@ exports.calculateOpenAITextGenerationCostInMillicents = calculateOpenAITextGener
112
136
  *
113
137
  * @example
114
138
  * const model = new OpenAITextGenerationModel({
115
- * model: "text-davinci-003",
139
+ * model: "gpt-3.5-turbo-instruct",
116
140
  * temperature: 0.7,
117
141
  * maxCompletionTokens: 500,
118
142
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
@@ -14,62 +14,80 @@ import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
14
14
  * @see https://openai.com/pricing
15
15
  */
16
16
  export declare const OPENAI_TEXT_GENERATION_MODELS: {
17
+ "gpt-3.5-turbo-instruct": {
18
+ contextWindowSize: number;
19
+ promptTokenCostInMillicents: number;
20
+ completionTokenCostInMillicents: number;
21
+ };
17
22
  "davinci-002": {
18
23
  contextWindowSize: number;
19
- tokenCostInMillicents: number;
24
+ promptTokenCostInMillicents: number;
25
+ completionTokenCostInMillicents: number;
20
26
  fineTunedTokenCostInMillicents: number;
21
27
  };
22
28
  "babbage-002": {
23
29
  contextWindowSize: number;
24
- tokenCostInMillicents: number;
30
+ promptTokenCostInMillicents: number;
31
+ completionTokenCostInMillicents: number;
25
32
  fineTunedTokenCostInMillicents: number;
26
33
  };
27
34
  "text-davinci-003": {
28
35
  contextWindowSize: number;
29
- tokenCostInMillicents: number;
36
+ promptTokenCostInMillicents: number;
37
+ completionTokenCostInMillicents: number;
30
38
  };
31
39
  "text-davinci-002": {
32
40
  contextWindowSize: number;
33
- tokenCostInMillicents: number;
41
+ promptTokenCostInMillicents: number;
42
+ completionTokenCostInMillicents: number;
34
43
  };
35
44
  "code-davinci-002": {
36
45
  contextWindowSize: number;
37
- tokenCostInMillicents: number;
46
+ promptTokenCostInMillicents: number;
47
+ completionTokenCostInMillicents: number;
38
48
  };
39
49
  davinci: {
40
50
  contextWindowSize: number;
41
- tokenCostInMillicents: number;
51
+ promptTokenCostInMillicents: number;
52
+ completionTokenCostInMillicents: number;
42
53
  };
43
54
  "text-curie-001": {
44
55
  contextWindowSize: number;
45
- tokenCostInMillicents: number;
56
+ promptTokenCostInMillicents: number;
57
+ completionTokenCostInMillicents: number;
46
58
  };
47
59
  curie: {
48
60
  contextWindowSize: number;
49
- tokenCostInMillicents: number;
61
+ promptTokenCostInMillicents: number;
62
+ completionTokenCostInMillicents: number;
50
63
  };
51
64
  "text-babbage-001": {
52
65
  contextWindowSize: number;
53
- tokenCostInMillicents: number;
66
+ promptTokenCostInMillicents: number;
67
+ completionTokenCostInMillicents: number;
54
68
  };
55
69
  babbage: {
56
70
  contextWindowSize: number;
57
- tokenCostInMillicents: number;
71
+ promptTokenCostInMillicents: number;
72
+ completionTokenCostInMillicents: number;
58
73
  };
59
74
  "text-ada-001": {
60
75
  contextWindowSize: number;
61
- tokenCostInMillicents: number;
76
+ promptTokenCostInMillicents: number;
77
+ completionTokenCostInMillicents: number;
62
78
  };
63
79
  ada: {
64
80
  contextWindowSize: number;
65
- tokenCostInMillicents: number;
81
+ promptTokenCostInMillicents: number;
82
+ completionTokenCostInMillicents: number;
66
83
  };
67
84
  };
68
85
  export declare function getOpenAITextGenerationModelInformation(model: OpenAITextGenerationModelType): {
69
86
  baseModel: OpenAITextGenerationBaseModelType;
70
87
  isFineTuned: boolean;
71
88
  contextWindowSize: number;
72
- tokenCostInMillicents: number;
89
+ promptTokenCostInMillicents: number;
90
+ completionTokenCostInMillicents: number;
73
91
  };
74
92
  type FineTuneableOpenAITextGenerationModelType = "davinci-002" | "babbage-002";
75
93
  type FineTunedOpenAITextGenerationModelType = `ft:${FineTuneableOpenAITextGenerationModelType}:${string}:${string}:${string}`;
@@ -106,7 +124,7 @@ export interface OpenAITextGenerationModelSettings extends TextGenerationModelSe
106
124
  *
107
125
  * @example
108
126
  * const model = new OpenAITextGenerationModel({
109
- * model: "text-davinci-003",
127
+ * model: "gpt-3.5-turbo-instruct",
110
128
  * temperature: 0.7,
111
129
  * maxCompletionTokens: 500,
112
130
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
@@ -15,55 +15,72 @@ import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
15
15
  * @see https://openai.com/pricing
16
16
  */
17
17
  export const OPENAI_TEXT_GENERATION_MODELS = {
18
+ "gpt-3.5-turbo-instruct": {
19
+ contextWindowSize: 4097,
20
+ promptTokenCostInMillicents: 0.15,
21
+ completionTokenCostInMillicents: 0.2,
22
+ },
18
23
  "davinci-002": {
19
24
  contextWindowSize: 16384,
20
- tokenCostInMillicents: 0.2,
25
+ promptTokenCostInMillicents: 0.2,
26
+ completionTokenCostInMillicents: 0.2,
21
27
  fineTunedTokenCostInMillicents: 1.2,
22
28
  },
23
29
  "babbage-002": {
24
30
  contextWindowSize: 16384,
25
- tokenCostInMillicents: 0.04,
31
+ promptTokenCostInMillicents: 0.04,
32
+ completionTokenCostInMillicents: 0.04,
26
33
  fineTunedTokenCostInMillicents: 0.16,
27
34
  },
28
35
  "text-davinci-003": {
29
36
  contextWindowSize: 4096,
30
- tokenCostInMillicents: 2,
37
+ promptTokenCostInMillicents: 2,
38
+ completionTokenCostInMillicents: 2,
31
39
  },
32
40
  "text-davinci-002": {
33
41
  contextWindowSize: 4096,
34
- tokenCostInMillicents: 2,
42
+ promptTokenCostInMillicents: 2,
43
+ completionTokenCostInMillicents: 2,
35
44
  },
36
45
  "code-davinci-002": {
37
46
  contextWindowSize: 8000,
38
- tokenCostInMillicents: 2,
47
+ promptTokenCostInMillicents: 2,
48
+ completionTokenCostInMillicents: 2,
39
49
  },
40
50
  davinci: {
41
51
  contextWindowSize: 2048,
42
- tokenCostInMillicents: 2,
52
+ promptTokenCostInMillicents: 2,
53
+ completionTokenCostInMillicents: 2,
43
54
  },
44
55
  "text-curie-001": {
45
56
  contextWindowSize: 2048,
46
- tokenCostInMillicents: 0.2,
57
+ promptTokenCostInMillicents: 0.2,
58
+ completionTokenCostInMillicents: 0.2,
47
59
  },
48
60
  curie: {
49
61
  contextWindowSize: 2048,
50
- tokenCostInMillicents: 0.2,
62
+ promptTokenCostInMillicents: 0.2,
63
+ completionTokenCostInMillicents: 0.2,
51
64
  },
52
65
  "text-babbage-001": {
53
66
  contextWindowSize: 2048,
54
- tokenCostInMillicents: 0.05,
67
+ promptTokenCostInMillicents: 0.05,
68
+ completionTokenCostInMillicents: 0.05,
55
69
  },
56
70
  babbage: {
57
71
  contextWindowSize: 2048,
58
- tokenCostInMillicents: 0.05,
72
+ promptTokenCostInMillicents: 0.05,
73
+ completionTokenCostInMillicents: 0.05,
59
74
  },
60
75
  "text-ada-001": {
61
76
  contextWindowSize: 2048,
62
- tokenCostInMillicents: 0.04,
77
+ promptTokenCostInMillicents: 0.04,
78
+ completionTokenCostInMillicents: 0.04,
63
79
  },
64
80
  ada: {
65
81
  contextWindowSize: 2048,
66
- tokenCostInMillicents: 0.04,
82
+ promptTokenCostInMillicents: 0.04,
83
+ completionTokenCostInMillicents: 0.04,
67
84
  },
68
85
  };
69
86
  export function getOpenAITextGenerationModelInformation(model) {
@@ -74,7 +91,8 @@ export function getOpenAITextGenerationModelInformation(model) {
74
91
  baseModel: model,
75
92
  isFineTuned: false,
76
93
  contextWindowSize: baseModelInformation.contextWindowSize,
77
- tokenCostInMillicents: baseModelInformation.tokenCostInMillicents,
94
+ promptTokenCostInMillicents: baseModelInformation.promptTokenCostInMillicents,
95
+ completionTokenCostInMillicents: baseModelInformation.completionTokenCostInMillicents,
78
96
  };
79
97
  }
80
98
  // Extract the base model from the fine-tuned model:
@@ -86,7 +104,8 @@ export function getOpenAITextGenerationModelInformation(model) {
86
104
  baseModel: baseModel,
87
105
  isFineTuned: true,
88
106
  contextWindowSize: baseModelInformation.contextWindowSize,
89
- tokenCostInMillicents: baseModelInformation.fineTunedTokenCostInMillicents,
107
+ promptTokenCostInMillicents: baseModelInformation.fineTunedTokenCostInMillicents,
108
+ completionTokenCostInMillicents: baseModelInformation.fineTunedTokenCostInMillicents,
90
109
  };
91
110
  }
92
111
  throw new Error(`Unknown OpenAI chat base model ${baseModel}.`);
@@ -94,8 +113,13 @@ export function getOpenAITextGenerationModelInformation(model) {
94
113
  export const isOpenAITextGenerationModel = (model) => model in OPENAI_TEXT_GENERATION_MODELS ||
95
114
  model.startsWith("ft:davinci-002:") ||
96
115
  model.startsWith("ft:babbage-002:");
97
- export const calculateOpenAITextGenerationCostInMillicents = ({ model, response, }) => response.usage.total_tokens *
98
- getOpenAITextGenerationModelInformation(model).tokenCostInMillicents;
116
+ export const calculateOpenAITextGenerationCostInMillicents = ({ model, response, }) => {
117
+ const modelInformation = getOpenAITextGenerationModelInformation(model);
118
+ return (response.usage.prompt_tokens *
119
+ modelInformation.promptTokenCostInMillicents +
120
+ response.usage.completion_tokens *
121
+ modelInformation.completionTokenCostInMillicents);
122
+ };
99
123
  /**
100
124
  * Create a text generation model that calls the OpenAI text completion API.
101
125
  *
@@ -103,7 +127,7 @@ export const calculateOpenAITextGenerationCostInMillicents = ({ model, response,
103
127
  *
104
128
  * @example
105
129
  * const model = new OpenAITextGenerationModel({
106
- * model: "text-davinci-003",
130
+ * model: "gpt-3.5-turbo-instruct",
107
131
  * temperature: 0.7,
108
132
  * maxCompletionTokens: 500,
109
133
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
@@ -57,8 +57,6 @@ function getEncodingNameForModel(model) {
57
57
  case "text-davinci-003": {
58
58
  return "p50k_base";
59
59
  }
60
- case "babbage-002":
61
- case "davinci-002":
62
60
  case "ada":
63
61
  case "babbage":
64
62
  case "curie":
@@ -68,11 +66,14 @@ function getEncodingNameForModel(model) {
68
66
  case "text-curie-001": {
69
67
  return "r50k_base";
70
68
  }
69
+ case "babbage-002":
70
+ case "davinci-002":
71
71
  case "gpt-3.5-turbo":
72
72
  case "gpt-3.5-turbo-0301":
73
73
  case "gpt-3.5-turbo-0613":
74
74
  case "gpt-3.5-turbo-16k":
75
75
  case "gpt-3.5-turbo-16k-0613":
76
+ case "gpt-3.5-turbo-instruct":
76
77
  case "gpt-4":
77
78
  case "gpt-4-0314":
78
79
  case "gpt-4-0613":
@@ -53,8 +53,6 @@ function getEncodingNameForModel(model) {
53
53
  case "text-davinci-003": {
54
54
  return "p50k_base";
55
55
  }
56
- case "babbage-002":
57
- case "davinci-002":
58
56
  case "ada":
59
57
  case "babbage":
60
58
  case "curie":
@@ -64,11 +62,14 @@ function getEncodingNameForModel(model) {
64
62
  case "text-curie-001": {
65
63
  return "r50k_base";
66
64
  }
65
+ case "babbage-002":
66
+ case "davinci-002":
67
67
  case "gpt-3.5-turbo":
68
68
  case "gpt-3.5-turbo-0301":
69
69
  case "gpt-3.5-turbo-0613":
70
70
  case "gpt-3.5-turbo-16k":
71
71
  case "gpt-3.5-turbo-16k-0613":
72
+ case "gpt-3.5-turbo-instruct":
72
73
  case "gpt-4":
73
74
  case "gpt-4-0314":
74
75
  case "gpt-4-0613":
@@ -86,6 +86,12 @@ class StabilityImageGenerationModel extends AbstractModel_js_1.AbstractModel {
86
86
  }
87
87
  }
88
88
  exports.StabilityImageGenerationModel = StabilityImageGenerationModel;
89
+ const stabilityImageGenerationModels = [
90
+ "stable-diffusion-v1-5",
91
+ "stable-diffusion-512-v2-1",
92
+ "stable-diffusion-xl-1024-v0-9",
93
+ "stable-diffusion-xl-1024-v1-0",
94
+ ];
89
95
  const stabilityImageGenerationResponseSchema = zod_1.z.object({
90
96
  artifacts: zod_1.z.array(zod_1.z.object({
91
97
  base64: zod_1.z.string(),
@@ -28,7 +28,7 @@ import { ImageGenerationModel, ImageGenerationModelSettings } from "../../model-
28
28
  export declare class StabilityImageGenerationModel extends AbstractModel<StabilityImageGenerationModelSettings> implements ImageGenerationModel<StabilityImageGenerationPrompt, StabilityImageGenerationResponse, StabilityImageGenerationModelSettings> {
29
29
  constructor(settings: StabilityImageGenerationModelSettings);
30
30
  readonly provider: "stability";
31
- get modelName(): string;
31
+ get modelName(): StabilityImageGenerationModelType;
32
32
  callAPI(input: StabilityImageGenerationPrompt, options?: ModelFunctionOptions<StabilityImageGenerationModelSettings>): Promise<StabilityImageGenerationResponse>;
33
33
  get settingsForEvent(): Partial<StabilityImageGenerationModelSettings>;
34
34
  generateImageResponse(prompt: StabilityImageGenerationPrompt, options?: ModelFunctionOptions<StabilityImageGenerationModelSettings>): Promise<{
@@ -41,9 +41,11 @@ export declare class StabilityImageGenerationModel extends AbstractModel<Stabili
41
41
  extractBase64Image(response: StabilityImageGenerationResponse): string;
42
42
  withSettings(additionalSettings: StabilityImageGenerationModelSettings): this;
43
43
  }
44
+ declare const stabilityImageGenerationModels: readonly ["stable-diffusion-v1-5", "stable-diffusion-512-v2-1", "stable-diffusion-xl-1024-v0-9", "stable-diffusion-xl-1024-v1-0"];
45
+ export type StabilityImageGenerationModelType = (typeof stabilityImageGenerationModels)[number] | (string & {});
44
46
  export interface StabilityImageGenerationModelSettings extends ImageGenerationModelSettings {
45
47
  api?: ApiConfiguration;
46
- model: string;
48
+ model: StabilityImageGenerationModelType;
47
49
  height?: number;
48
50
  width?: number;
49
51
  cfgScale?: number;
@@ -82,6 +82,12 @@ export class StabilityImageGenerationModel extends AbstractModel {
82
82
  return new StabilityImageGenerationModel(Object.assign({}, this.settings, additionalSettings));
83
83
  }
84
84
  }
85
+ const stabilityImageGenerationModels = [
86
+ "stable-diffusion-v1-5",
87
+ "stable-diffusion-512-v2-1",
88
+ "stable-diffusion-xl-1024-v0-9",
89
+ "stable-diffusion-xl-1024-v1-0",
90
+ ];
85
91
  const stabilityImageGenerationResponseSchema = z.object({
86
92
  artifacts: z.array(z.object({
87
93
  base64: z.string(),
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build multimodal applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.31.0",
4
+ "version": "0.32.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -55,7 +55,7 @@
55
55
  "js-tiktoken": "1.0.7",
56
56
  "nanoid": "3.3.6",
57
57
  "secure-json-parse": "2.7.0",
58
- "zod": "3.22.2",
58
+ "zod": "3.21.4",
59
59
  "zod-to-json-schema": "3.21.4"
60
60
  },
61
61
  "devDependencies": {