modelfusion 0.59.0 → 0.61.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -39,8 +39,7 @@ You can provide API keys for the different [integrations](https://modelfusion.de
39
39
 
40
40
  ### [Generate Text](https://modelfusion.dev/guide/function/generate-text)
41
41
 
42
- Generate text using a language model and a prompt.
43
- You can stream the text if it is supported by the model.
42
+ Generate text using a language model and a prompt. You can stream the text if it is supported by the model. You can use images for multi-modal prompting if the model supports it (e.g. with [llama.cpp](https://modelfusion.dev/guide/)).
44
43
  You can use [prompt formats](https://modelfusion.dev/guide/function/generate-text#prompt-format) to change the prompt format of a model.
45
44
 
46
45
  #### generateText
@@ -79,7 +78,10 @@ Generate an image from a prompt.
79
78
 
80
79
  ```ts
81
80
  const image = await generateImage(
82
- new OpenAIImageGenerationModel({ size: "512x512" }),
81
+ new OpenAIImageGenerationModel({
82
+ model: "dall-e-3",
83
+ size: "1024x1024",
84
+ }),
83
85
  "the wicked witch of the west in the style of early 19th century painting"
84
86
  );
85
87
  ```
@@ -461,7 +463,9 @@ const text = await generateText(
461
463
  new LlamaCppTextGenerationModel({
462
464
  contextWindowSize: 4096, // Llama 2 context window size
463
465
  maxCompletionTokens: 1000,
464
- }).withPromptFormat(mapInstructionPromptToLlama2Format()),
466
+ })
467
+ .withTextPrompt()
468
+ .withPromptFormat(mapInstructionPromptToLlama2Format()),
465
469
  {
466
470
  system: "You are a story writer.",
467
471
  instruction: "Write a short story about a robot learning to love.",
@@ -75,7 +75,7 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
75
75
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
76
76
  }
77
77
  async countPromptTokens(prompt) {
78
- const tokens = await this.tokenizer.tokenize(prompt);
78
+ const tokens = await this.tokenizer.tokenize(prompt.text);
79
79
  return tokens.length;
80
80
  }
81
81
  async doGenerateText(prompt, options) {
@@ -99,6 +99,14 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
99
99
  responseFormat: exports.LlamaCppTextGenerationResponseFormat.deltaIterable,
100
100
  });
101
101
  }
102
+ withTextPrompt() {
103
+ return this.withPromptFormat({
104
+ format(prompt) {
105
+ return { text: prompt };
106
+ },
107
+ stopSequences: [],
108
+ });
109
+ }
102
110
  withPromptFormat(promptFormat) {
103
111
  return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
104
112
  model: this.withSettings({
@@ -177,7 +185,7 @@ async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguratio
177
185
  headers: api.headers,
178
186
  body: {
179
187
  stream: responseFormat.stream,
180
- prompt,
188
+ prompt: prompt.text,
181
189
  temperature,
182
190
  top_k: topK,
183
191
  top_p: topP,
@@ -195,6 +203,12 @@ async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguratio
195
203
  seed,
196
204
  ignore_eos: ignoreEos,
197
205
  logit_bias: logitBias,
206
+ image_data: prompt.images != null
207
+ ? Object.entries(prompt.images).map(([id, data]) => ({
208
+ id: +id,
209
+ data,
210
+ }))
211
+ : undefined,
198
212
  },
199
213
  failedResponseHandler: LlamaCppError_js_1.failedLlamaCppCallResponseHandler,
200
214
  successfulResponseHandler: responseFormat.handler,
@@ -31,18 +31,28 @@ export interface LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends
31
31
  ignoreEos?: boolean;
32
32
  logitBias?: Array<[number, number | false]>;
33
33
  }
34
- export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingModel<string, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> {
34
+ export interface LlamaCppTextGenerationPrompt {
35
+ /**
36
+ * Text prompt. Images can be included through references such as `[img-ID]`, e.g. `[img-1]`.
37
+ */
38
+ text: string;
39
+ /**
40
+ * Maps image id to image base data.
41
+ */
42
+ images?: Record<number, string>;
43
+ }
44
+ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingModel<LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> {
35
45
  constructor(settings?: LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>);
36
46
  readonly provider = "llamacpp";
37
47
  get modelName(): null;
38
48
  get contextWindowSize(): CONTEXT_WINDOW_SIZE;
39
49
  readonly tokenizer: LlamaCppTokenizer;
40
- callAPI<RESPONSE>(prompt: string, options: {
50
+ callAPI<RESPONSE>(prompt: LlamaCppTextGenerationPrompt, options: {
41
51
  responseFormat: LlamaCppTextGenerationResponseFormatType<RESPONSE>;
42
52
  } & FunctionOptions): Promise<RESPONSE>;
43
53
  get settingsForEvent(): Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>;
44
- countPromptTokens(prompt: string): Promise<number>;
45
- doGenerateText(prompt: string, options?: FunctionOptions): Promise<{
54
+ countPromptTokens(prompt: LlamaCppTextGenerationPrompt): Promise<number>;
55
+ doGenerateText(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<{
46
56
  response: {
47
57
  model: string;
48
58
  prompt: string;
@@ -99,8 +109,9 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
99
109
  totalTokens: number;
100
110
  };
101
111
  }>;
102
- doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
103
- withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
112
+ doStreamText(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
113
+ withTextPrompt(): PromptFormatTextStreamingModel<string, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
114
+ withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, LlamaCppTextGenerationPrompt>): PromptFormatTextStreamingModel<INPUT_PROMPT, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
104
115
  withSettings(additionalSettings: Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
105
116
  }
106
117
  declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
@@ -72,7 +72,7 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
72
72
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
73
73
  }
74
74
  async countPromptTokens(prompt) {
75
- const tokens = await this.tokenizer.tokenize(prompt);
75
+ const tokens = await this.tokenizer.tokenize(prompt.text);
76
76
  return tokens.length;
77
77
  }
78
78
  async doGenerateText(prompt, options) {
@@ -96,6 +96,14 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
96
96
  responseFormat: LlamaCppTextGenerationResponseFormat.deltaIterable,
97
97
  });
98
98
  }
99
+ withTextPrompt() {
100
+ return this.withPromptFormat({
101
+ format(prompt) {
102
+ return { text: prompt };
103
+ },
104
+ stopSequences: [],
105
+ });
106
+ }
99
107
  withPromptFormat(promptFormat) {
100
108
  return new PromptFormatTextStreamingModel({
101
109
  model: this.withSettings({
@@ -173,7 +181,7 @@ async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguratio
173
181
  headers: api.headers,
174
182
  body: {
175
183
  stream: responseFormat.stream,
176
- prompt,
184
+ prompt: prompt.text,
177
185
  temperature,
178
186
  top_k: topK,
179
187
  top_p: topP,
@@ -191,6 +199,12 @@ async function callLlamaCppTextGenerationAPI({ api = new LlamaCppApiConfiguratio
191
199
  seed,
192
200
  ignore_eos: ignoreEos,
193
201
  logit_bias: logitBias,
202
+ image_data: prompt.images != null
203
+ ? Object.entries(prompt.images).map(([id, data]) => ({
204
+ id: +id,
205
+ data,
206
+ }))
207
+ : undefined,
194
208
  },
195
209
  failedResponseHandler: failedLlamaCppCallResponseHandler,
196
210
  successfulResponseHandler: responseFormat.handler,
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.OpenAIImageGenerationResponseFormat = exports.OpenAIImageGenerationModel = exports.calculateOpenAIImageGenerationCostInMillicents = void 0;
3
+ exports.OpenAIImageGenerationResponseFormat = exports.OpenAIImageGenerationModel = exports.calculateOpenAIImageGenerationCostInMillicents = exports.OPENAI_IMAGE_MODELS = void 0;
4
4
  const zod_1 = require("zod");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
@@ -8,15 +8,61 @@ const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
8
8
  const PromptFormatImageGenerationModel_js_1 = require("../../model-function/generate-image/PromptFormatImageGenerationModel.cjs");
9
9
  const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
10
10
  const OpenAIError_js_1 = require("./OpenAIError.cjs");
11
+ exports.OPENAI_IMAGE_MODELS = {
12
+ "dall-e-2": {
13
+ getCost(settings) {
14
+ switch (settings.size ?? "1024x1024") {
15
+ case "1024x1024":
16
+ return 2000;
17
+ case "512x512":
18
+ return 1800;
19
+ case "256x256":
20
+ return 1600;
21
+ default:
22
+ return null;
23
+ }
24
+ },
25
+ },
26
+ "dall-e-3": {
27
+ getCost(settings) {
28
+ switch (settings.quality ?? "standard") {
29
+ case "standard": {
30
+ switch (settings.size ?? "1024x1024") {
31
+ case "1024x1024":
32
+ return 4000;
33
+ case "1024x1792":
34
+ case "1792x1024":
35
+ return 8000;
36
+ default:
37
+ return null;
38
+ }
39
+ }
40
+ case "hd": {
41
+ switch (settings.size ?? "1024x1024") {
42
+ case "1024x1024":
43
+ return 8000;
44
+ case "1024x1792":
45
+ case "1792x1024":
46
+ return 12000;
47
+ default:
48
+ return null;
49
+ }
50
+ }
51
+ }
52
+ },
53
+ },
54
+ };
11
55
  /**
12
56
  * @see https://openai.com/pricing
13
57
  */
14
- const sizeToCostInMillicents = {
15
- "1024x1024": 2000,
16
- "512x512": 1800,
17
- "256x256": 1600,
58
+ const calculateOpenAIImageGenerationCostInMillicents = ({ settings, }) => {
59
+ console.log(settings);
60
+ const cost = exports.OPENAI_IMAGE_MODELS[settings.model]?.getCost(settings);
61
+ if (cost == null) {
62
+ return null;
63
+ }
64
+ return (settings.n ?? 1) * cost;
18
65
  };
19
- const calculateOpenAIImageGenerationCostInMillicents = ({ settings, }) => (settings.n ?? 1) * sizeToCostInMillicents[settings.size ?? "1024x1024"];
20
66
  exports.calculateOpenAIImageGenerationCostInMillicents = calculateOpenAIImageGenerationCostInMillicents;
21
67
  /**
22
68
  * Create an image generation model that calls the OpenAI AI image creation API.
@@ -38,12 +84,9 @@ class OpenAIImageGenerationModel extends AbstractModel_js_1.AbstractModel {
38
84
  writable: true,
39
85
  value: "openai"
40
86
  });
41
- Object.defineProperty(this, "modelName", {
42
- enumerable: true,
43
- configurable: true,
44
- writable: true,
45
- value: null
46
- });
87
+ }
88
+ get modelName() {
89
+ return this.settings.model;
47
90
  }
48
91
  async callAPI(prompt, options) {
49
92
  const run = options?.run;
@@ -63,8 +106,11 @@ class OpenAIImageGenerationModel extends AbstractModel_js_1.AbstractModel {
63
106
  }
64
107
  get settingsForEvent() {
65
108
  const eventSettingProperties = [
109
+ "model",
66
110
  "n",
67
111
  "size",
112
+ "quality",
113
+ "style",
68
114
  ];
69
115
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
70
116
  }
@@ -6,13 +6,28 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
6
6
  import { PromptFormat } from "../../model-function/PromptFormat.js";
7
7
  import { ImageGenerationModel, ImageGenerationModelSettings } from "../../model-function/generate-image/ImageGenerationModel.js";
8
8
  import { PromptFormatImageGenerationModel } from "../../model-function/generate-image/PromptFormatImageGenerationModel.js";
9
+ export declare const OPENAI_IMAGE_MODELS: {
10
+ "dall-e-2": {
11
+ getCost(settings: OpenAIImageGenerationSettings): 2000 | 1800 | 1600 | null;
12
+ };
13
+ "dall-e-3": {
14
+ getCost(settings: OpenAIImageGenerationSettings): 4000 | 8000 | 12000 | null;
15
+ };
16
+ };
17
+ /**
18
+ * @see https://openai.com/pricing
19
+ */
20
+ export declare const calculateOpenAIImageGenerationCostInMillicents: ({ settings, }: {
21
+ settings: OpenAIImageGenerationSettings;
22
+ }) => number | null;
23
+ export type OpenAIImageModelType = keyof typeof OPENAI_IMAGE_MODELS;
9
24
  export interface OpenAIImageGenerationCallSettings {
25
+ model: OpenAIImageModelType;
10
26
  n?: number;
11
- size?: "256x256" | "512x512" | "1024x1024";
27
+ size?: "256x256" | "512x512" | "1024x1024" | "1792x1024" | "1024x1792";
28
+ quality?: "standard" | "hd";
29
+ style?: "vivid" | "natural";
12
30
  }
13
- export declare const calculateOpenAIImageGenerationCostInMillicents: ({ settings, }: {
14
- settings: OpenAIImageGenerationSettings;
15
- }) => number;
16
31
  export interface OpenAIImageGenerationSettings extends ImageGenerationModelSettings, OpenAIImageGenerationCallSettings {
17
32
  api?: ApiConfiguration;
18
33
  isUserIdForwardingEnabled?: boolean;
@@ -31,7 +46,7 @@ export interface OpenAIImageGenerationSettings extends ImageGenerationModelSetti
31
46
  export declare class OpenAIImageGenerationModel extends AbstractModel<OpenAIImageGenerationSettings> implements ImageGenerationModel<string, OpenAIImageGenerationSettings> {
32
47
  constructor(settings: OpenAIImageGenerationSettings);
33
48
  readonly provider: "openai";
34
- readonly modelName: null;
49
+ get modelName(): "dall-e-2" | "dall-e-3";
35
50
  callAPI<RESULT>(prompt: string, options: {
36
51
  responseFormat: OpenAIImageGenerationResponseFormatType<RESULT>;
37
52
  } & FunctionOptions): Promise<RESULT>;
@@ -5,15 +5,61 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
5
5
  import { PromptFormatImageGenerationModel } from "../../model-function/generate-image/PromptFormatImageGenerationModel.js";
6
6
  import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
7
7
  import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
8
+ export const OPENAI_IMAGE_MODELS = {
9
+ "dall-e-2": {
10
+ getCost(settings) {
11
+ switch (settings.size ?? "1024x1024") {
12
+ case "1024x1024":
13
+ return 2000;
14
+ case "512x512":
15
+ return 1800;
16
+ case "256x256":
17
+ return 1600;
18
+ default:
19
+ return null;
20
+ }
21
+ },
22
+ },
23
+ "dall-e-3": {
24
+ getCost(settings) {
25
+ switch (settings.quality ?? "standard") {
26
+ case "standard": {
27
+ switch (settings.size ?? "1024x1024") {
28
+ case "1024x1024":
29
+ return 4000;
30
+ case "1024x1792":
31
+ case "1792x1024":
32
+ return 8000;
33
+ default:
34
+ return null;
35
+ }
36
+ }
37
+ case "hd": {
38
+ switch (settings.size ?? "1024x1024") {
39
+ case "1024x1024":
40
+ return 8000;
41
+ case "1024x1792":
42
+ case "1792x1024":
43
+ return 12000;
44
+ default:
45
+ return null;
46
+ }
47
+ }
48
+ }
49
+ },
50
+ },
51
+ };
8
52
  /**
9
53
  * @see https://openai.com/pricing
10
54
  */
11
- const sizeToCostInMillicents = {
12
- "1024x1024": 2000,
13
- "512x512": 1800,
14
- "256x256": 1600,
55
+ export const calculateOpenAIImageGenerationCostInMillicents = ({ settings, }) => {
56
+ console.log(settings);
57
+ const cost = OPENAI_IMAGE_MODELS[settings.model]?.getCost(settings);
58
+ if (cost == null) {
59
+ return null;
60
+ }
61
+ return (settings.n ?? 1) * cost;
15
62
  };
16
- export const calculateOpenAIImageGenerationCostInMillicents = ({ settings, }) => (settings.n ?? 1) * sizeToCostInMillicents[settings.size ?? "1024x1024"];
17
63
  /**
18
64
  * Create an image generation model that calls the OpenAI AI image creation API.
19
65
  *
@@ -34,12 +80,9 @@ export class OpenAIImageGenerationModel extends AbstractModel {
34
80
  writable: true,
35
81
  value: "openai"
36
82
  });
37
- Object.defineProperty(this, "modelName", {
38
- enumerable: true,
39
- configurable: true,
40
- writable: true,
41
- value: null
42
- });
83
+ }
84
+ get modelName() {
85
+ return this.settings.model;
43
86
  }
44
87
  async callAPI(prompt, options) {
45
88
  const run = options?.run;
@@ -59,8 +102,11 @@ export class OpenAIImageGenerationModel extends AbstractModel {
59
102
  }
60
103
  get settingsForEvent() {
61
104
  const eventSettingProperties = [
105
+ "model",
62
106
  "n",
63
107
  "size",
108
+ "quality",
109
+ "style",
64
110
  ];
65
111
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
66
112
  }
@@ -75,12 +75,15 @@ function getTiktokenBPE(model) {
75
75
  case "gpt-3.5-turbo":
76
76
  case "gpt-3.5-turbo-0301":
77
77
  case "gpt-3.5-turbo-0613":
78
+ case "gpt-3.5-turbo-1106":
78
79
  case "gpt-3.5-turbo-16k":
79
80
  case "gpt-3.5-turbo-16k-0613":
80
81
  case "gpt-3.5-turbo-instruct":
81
82
  case "gpt-4":
82
83
  case "gpt-4-0314":
83
84
  case "gpt-4-0613":
85
+ case "gpt-4-1106-preview":
86
+ case "gpt-4-vision-preview":
84
87
  case "gpt-4-32k":
85
88
  case "gpt-4-32k-0314":
86
89
  case "gpt-4-32k-0613":
@@ -68,12 +68,15 @@ function getTiktokenBPE(model) {
68
68
  case "gpt-3.5-turbo":
69
69
  case "gpt-3.5-turbo-0301":
70
70
  case "gpt-3.5-turbo-0613":
71
+ case "gpt-3.5-turbo-1106":
71
72
  case "gpt-3.5-turbo-16k":
72
73
  case "gpt-3.5-turbo-16k-0613":
73
74
  case "gpt-3.5-turbo-instruct":
74
75
  case "gpt-4":
75
76
  case "gpt-4-0314":
76
77
  case "gpt-4-0613":
78
+ case "gpt-4-1106-preview":
79
+ case "gpt-4-vision-preview":
77
80
  case "gpt-4-32k":
78
81
  case "gpt-4-32k-0314":
79
82
  case "gpt-4-32k-0613":
@@ -40,6 +40,16 @@ exports.OPENAI_CHAT_MODELS = {
40
40
  promptTokenCostInMillicents: 3,
41
41
  completionTokenCostInMillicents: 6,
42
42
  },
43
+ "gpt-4-1106-preview": {
44
+ contextWindowSize: 128000,
45
+ promptTokenCostInMillicents: 1,
46
+ completionTokenCostInMillicents: 3,
47
+ },
48
+ "gpt-4-vision-preview": {
49
+ contextWindowSize: 128000,
50
+ promptTokenCostInMillicents: 1,
51
+ completionTokenCostInMillicents: 3,
52
+ },
43
53
  "gpt-4-32k": {
44
54
  contextWindowSize: 32768,
45
55
  promptTokenCostInMillicents: 6,
@@ -59,8 +69,13 @@ exports.OPENAI_CHAT_MODELS = {
59
69
  contextWindowSize: 4096,
60
70
  promptTokenCostInMillicents: 0.15,
61
71
  completionTokenCostInMillicents: 0.2,
62
- fineTunedPromptTokenCostInMillicents: 1.2,
63
- fineTunedCompletionTokenCostInMillicents: 1.6,
72
+ fineTunedPromptTokenCostInMillicents: 0.3,
73
+ fineTunedCompletionTokenCostInMillicents: 0.6,
74
+ },
75
+ "gpt-3.5-turbo-1106": {
76
+ contextWindowSize: 16385,
77
+ promptTokenCostInMillicents: 0.1,
78
+ completionTokenCostInMillicents: 0.2,
64
79
  },
65
80
  "gpt-3.5-turbo-0301": {
66
81
  contextWindowSize: 4096,
@@ -28,6 +28,16 @@ export declare const OPENAI_CHAT_MODELS: {
28
28
  promptTokenCostInMillicents: number;
29
29
  completionTokenCostInMillicents: number;
30
30
  };
31
+ "gpt-4-1106-preview": {
32
+ contextWindowSize: number;
33
+ promptTokenCostInMillicents: number;
34
+ completionTokenCostInMillicents: number;
35
+ };
36
+ "gpt-4-vision-preview": {
37
+ contextWindowSize: number;
38
+ promptTokenCostInMillicents: number;
39
+ completionTokenCostInMillicents: number;
40
+ };
31
41
  "gpt-4-32k": {
32
42
  contextWindowSize: number;
33
43
  promptTokenCostInMillicents: number;
@@ -50,6 +60,11 @@ export declare const OPENAI_CHAT_MODELS: {
50
60
  fineTunedPromptTokenCostInMillicents: number;
51
61
  fineTunedCompletionTokenCostInMillicents: number;
52
62
  };
63
+ "gpt-3.5-turbo-1106": {
64
+ contextWindowSize: number;
65
+ promptTokenCostInMillicents: number;
66
+ completionTokenCostInMillicents: number;
67
+ };
53
68
  "gpt-3.5-turbo-0301": {
54
69
  contextWindowSize: number;
55
70
  promptTokenCostInMillicents: number;
@@ -34,6 +34,16 @@ export const OPENAI_CHAT_MODELS = {
34
34
  promptTokenCostInMillicents: 3,
35
35
  completionTokenCostInMillicents: 6,
36
36
  },
37
+ "gpt-4-1106-preview": {
38
+ contextWindowSize: 128000,
39
+ promptTokenCostInMillicents: 1,
40
+ completionTokenCostInMillicents: 3,
41
+ },
42
+ "gpt-4-vision-preview": {
43
+ contextWindowSize: 128000,
44
+ promptTokenCostInMillicents: 1,
45
+ completionTokenCostInMillicents: 3,
46
+ },
37
47
  "gpt-4-32k": {
38
48
  contextWindowSize: 32768,
39
49
  promptTokenCostInMillicents: 6,
@@ -53,8 +63,13 @@ export const OPENAI_CHAT_MODELS = {
53
63
  contextWindowSize: 4096,
54
64
  promptTokenCostInMillicents: 0.15,
55
65
  completionTokenCostInMillicents: 0.2,
56
- fineTunedPromptTokenCostInMillicents: 1.2,
57
- fineTunedCompletionTokenCostInMillicents: 1.6,
66
+ fineTunedPromptTokenCostInMillicents: 0.3,
67
+ fineTunedCompletionTokenCostInMillicents: 0.6,
68
+ },
69
+ "gpt-3.5-turbo-1106": {
70
+ contextWindowSize: 16385,
71
+ promptTokenCostInMillicents: 0.1,
72
+ completionTokenCostInMillicents: 0.2,
58
73
  },
59
74
  "gpt-3.5-turbo-0301": {
60
75
  contextWindowSize: 4096,
@@ -17,7 +17,7 @@ const chatResponseStreamEventSchema = zod_1.z.object({
17
17
  })
18
18
  .optional(),
19
19
  }),
20
- finish_reason: zod_1.z.enum(["stop", "length"]).nullable(),
20
+ finish_reason: zod_1.z.enum(["stop", "length"]).nullable().optional(),
21
21
  index: zod_1.z.number(),
22
22
  })),
23
23
  created: zod_1.z.number(),
@@ -14,7 +14,7 @@ const chatResponseStreamEventSchema = z.object({
14
14
  })
15
15
  .optional(),
16
16
  }),
17
- finish_reason: z.enum(["stop", "length"]).nullable(),
17
+ finish_reason: z.enum(["stop", "length"]).nullable().optional(),
18
18
  index: z.number(),
19
19
  })),
20
20
  created: z.number(),
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build multimodal applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.59.0",
4
+ "version": "0.61.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [