modelfusion 0.62.0 → 0.63.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,12 +1,12 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.OpenAICostCalculator = void 0;
4
+ const OpenAICompletionModel_js_1 = require("./OpenAICompletionModel.cjs");
4
5
  const OpenAIImageGenerationModel_js_1 = require("./OpenAIImageGenerationModel.cjs");
6
+ const OpenAISpeechModel_js_1 = require("./OpenAISpeechModel.cjs");
5
7
  const OpenAITextEmbeddingModel_js_1 = require("./OpenAITextEmbeddingModel.cjs");
6
- const OpenAICompletionModel_js_1 = require("./OpenAICompletionModel.cjs");
7
8
  const OpenAITranscriptionModel_js_1 = require("./OpenAITranscriptionModel.cjs");
8
9
  const OpenAIChatModel_js_1 = require("./chat/OpenAIChatModel.cjs");
9
- const OpenAISpeechModel_js_1 = require("./OpenAISpeechModel.cjs");
10
10
  class OpenAICostCalculator {
11
11
  constructor() {
12
12
  Object.defineProperty(this, "provider", {
@@ -21,7 +21,11 @@ class OpenAICostCalculator {
21
21
  const model = call.model.modelName;
22
22
  switch (type) {
23
23
  case "generate-image": {
24
+ if (model == null) {
25
+ return null;
26
+ }
24
27
  return (0, OpenAIImageGenerationModel_js_1.calculateOpenAIImageGenerationCostInMillicents)({
28
+ model: model,
25
29
  settings: call.settings,
26
30
  });
27
31
  }
@@ -1,9 +1,9 @@
1
+ import { calculateOpenAICompletionCostInMillicents, isOpenAICompletionModel, } from "./OpenAICompletionModel.js";
1
2
  import { calculateOpenAIImageGenerationCostInMillicents, } from "./OpenAIImageGenerationModel.js";
3
+ import { calculateOpenAISpeechCostInMillicents, } from "./OpenAISpeechModel.js";
2
4
  import { calculateOpenAIEmbeddingCostInMillicents, isOpenAIEmbeddingModel, } from "./OpenAITextEmbeddingModel.js";
3
- import { calculateOpenAICompletionCostInMillicents, isOpenAICompletionModel, } from "./OpenAICompletionModel.js";
4
5
  import { calculateOpenAITranscriptionCostInMillicents, } from "./OpenAITranscriptionModel.js";
5
6
  import { calculateOpenAIChatCostInMillicents, isOpenAIChatModel, } from "./chat/OpenAIChatModel.js";
6
- import { calculateOpenAISpeechCostInMillicents, } from "./OpenAISpeechModel.js";
7
7
  export class OpenAICostCalculator {
8
8
  constructor() {
9
9
  Object.defineProperty(this, "provider", {
@@ -18,7 +18,11 @@ export class OpenAICostCalculator {
18
18
  const model = call.model.modelName;
19
19
  switch (type) {
20
20
  case "generate-image": {
21
+ if (model == null) {
22
+ return null;
23
+ }
21
24
  return calculateOpenAIImageGenerationCostInMillicents({
25
+ model: model,
22
26
  settings: call.settings,
23
27
  });
24
28
  }
@@ -55,9 +55,8 @@ exports.OPENAI_IMAGE_MODELS = {
55
55
  /**
56
56
  * @see https://openai.com/pricing
57
57
  */
58
- const calculateOpenAIImageGenerationCostInMillicents = ({ settings, }) => {
59
- console.log(settings);
60
- const cost = exports.OPENAI_IMAGE_MODELS[settings.model]?.getCost(settings);
58
+ const calculateOpenAIImageGenerationCostInMillicents = ({ model, settings, }) => {
59
+ const cost = exports.OPENAI_IMAGE_MODELS[model]?.getCost(settings);
61
60
  if (cost == null) {
62
61
  return null;
63
62
  }
@@ -106,7 +105,6 @@ class OpenAIImageGenerationModel extends AbstractModel_js_1.AbstractModel {
106
105
  }
107
106
  get settingsForEvent() {
108
107
  const eventSettingProperties = [
109
- "model",
110
108
  "n",
111
109
  "size",
112
110
  "quality",
@@ -17,7 +17,8 @@ export declare const OPENAI_IMAGE_MODELS: {
17
17
  /**
18
18
  * @see https://openai.com/pricing
19
19
  */
20
- export declare const calculateOpenAIImageGenerationCostInMillicents: ({ settings, }: {
20
+ export declare const calculateOpenAIImageGenerationCostInMillicents: ({ model, settings, }: {
21
+ model: OpenAIImageModelType;
21
22
  settings: OpenAIImageGenerationSettings;
22
23
  }) => number | null;
23
24
  export type OpenAIImageModelType = keyof typeof OPENAI_IMAGE_MODELS;
@@ -52,9 +52,8 @@ export const OPENAI_IMAGE_MODELS = {
52
52
  /**
53
53
  * @see https://openai.com/pricing
54
54
  */
55
- export const calculateOpenAIImageGenerationCostInMillicents = ({ settings, }) => {
56
- console.log(settings);
57
- const cost = OPENAI_IMAGE_MODELS[settings.model]?.getCost(settings);
55
+ export const calculateOpenAIImageGenerationCostInMillicents = ({ model, settings, }) => {
56
+ const cost = OPENAI_IMAGE_MODELS[model]?.getCost(settings);
58
57
  if (cost == null) {
59
58
  return null;
60
59
  }
@@ -102,7 +101,6 @@ export class OpenAIImageGenerationModel extends AbstractModel {
102
101
  }
103
102
  get settingsForEvent() {
104
103
  const eventSettingProperties = [
105
- "model",
106
104
  "n",
107
105
  "size",
108
106
  "quality",
@@ -211,6 +211,7 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
211
211
  // map to OpenAI API names:
212
212
  stop: this.settings.stopSequences,
213
213
  maxTokens: this.settings.maxCompletionTokens,
214
+ openAIResponseFormat: this.settings.responseFormat,
214
215
  // other settings:
215
216
  user: this.settings.isUserIdForwardingEnabled
216
217
  ? options.run?.userId
@@ -390,6 +391,7 @@ const openAIChatResponseSchema = zod_1.z.object({
390
391
  object: zod_1.z.literal("chat.completion"),
391
392
  created: zod_1.z.number(),
392
393
  model: zod_1.z.string(),
394
+ system_fingerprint: zod_1.z.string(),
393
395
  choices: zod_1.z.array(zod_1.z.object({
394
396
  message: zod_1.z.object({
395
397
  role: zod_1.z.literal("assistant"),
@@ -403,7 +405,13 @@ const openAIChatResponseSchema = zod_1.z.object({
403
405
  }),
404
406
  index: zod_1.z.number(),
405
407
  logprobs: zod_1.z.nullable(zod_1.z.any()),
406
- finish_reason: zod_1.z.string(),
408
+ finish_reason: zod_1.z.enum([
409
+ "stop",
410
+ "length",
411
+ "tool_calls",
412
+ "content_filter",
413
+ "function_call",
414
+ ]),
407
415
  })),
408
416
  usage: zod_1.z.object({
409
417
  prompt_tokens: zod_1.z.number(),
@@ -411,7 +419,7 @@ const openAIChatResponseSchema = zod_1.z.object({
411
419
  total_tokens: zod_1.z.number(),
412
420
  }),
413
421
  });
414
- async function callOpenAIChatCompletionAPI({ api = new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration(), abortSignal, responseFormat, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, logitBias, user, }) {
422
+ async function callOpenAIChatCompletionAPI({ api = new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration(), abortSignal, responseFormat, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, logitBias, user, openAIResponseFormat, seed, }) {
415
423
  // empty arrays are not allowed for stop:
416
424
  if (stop != null && Array.isArray(stop) && stop.length === 0) {
417
425
  stop = undefined;
@@ -433,6 +441,8 @@ async function callOpenAIChatCompletionAPI({ api = new OpenAIApiConfiguration_js
433
441
  presence_penalty: presencePenalty,
434
442
  frequency_penalty: frequencyPenalty,
435
443
  logit_bias: logitBias,
444
+ seed,
445
+ response_format: openAIResponseFormat,
436
446
  user,
437
447
  },
438
448
  failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
@@ -119,6 +119,10 @@ export interface OpenAIChatCallSettings {
119
119
  maxTokens?: number;
120
120
  temperature?: number;
121
121
  topP?: number;
122
+ seed?: number | null;
123
+ responseFormat?: {
124
+ type?: "text" | "json_object";
125
+ };
122
126
  n?: number;
123
127
  presencePenalty?: number;
124
128
  frequencyPenalty?: number;
@@ -189,11 +193,12 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
189
193
  arguments: string;
190
194
  } | undefined;
191
195
  };
192
- finish_reason: string;
196
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
193
197
  index: number;
194
198
  logprobs?: any;
195
199
  }[];
196
200
  created: number;
201
+ system_fingerprint: string;
197
202
  };
198
203
  text: string;
199
204
  usage: {
@@ -229,11 +234,12 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
229
234
  arguments: string;
230
235
  } | undefined;
231
236
  };
232
- finish_reason: string;
237
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
233
238
  index: number;
234
239
  logprobs?: any;
235
240
  }[];
236
241
  created: number;
242
+ system_fingerprint: string;
237
243
  };
238
244
  valueText: string;
239
245
  value: any;
@@ -263,11 +269,12 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
263
269
  arguments: string;
264
270
  } | undefined;
265
271
  };
266
- finish_reason: string;
272
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
267
273
  index: number;
268
274
  logprobs?: any;
269
275
  }[];
270
276
  created: number;
277
+ system_fingerprint: string;
271
278
  };
272
279
  structureAndText: {
273
280
  structure: null;
@@ -299,11 +306,12 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
299
306
  arguments: string;
300
307
  } | undefined;
301
308
  };
302
- finish_reason: string;
309
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
303
310
  index: number;
304
311
  logprobs?: any;
305
312
  }[];
306
313
  created: number;
314
+ system_fingerprint: string;
307
315
  };
308
316
  structureAndText: {
309
317
  structure: string;
@@ -338,6 +346,7 @@ declare const openAIChatResponseSchema: z.ZodObject<{
338
346
  object: z.ZodLiteral<"chat.completion">;
339
347
  created: z.ZodNumber;
340
348
  model: z.ZodString;
349
+ system_fingerprint: z.ZodString;
341
350
  choices: z.ZodArray<z.ZodObject<{
342
351
  message: z.ZodObject<{
343
352
  role: z.ZodLiteral<"assistant">;
@@ -369,7 +378,7 @@ declare const openAIChatResponseSchema: z.ZodObject<{
369
378
  }>;
370
379
  index: z.ZodNumber;
371
380
  logprobs: z.ZodNullable<z.ZodAny>;
372
- finish_reason: z.ZodString;
381
+ finish_reason: z.ZodEnum<["stop", "length", "tool_calls", "content_filter", "function_call"]>;
373
382
  }, "strip", z.ZodTypeAny, {
374
383
  message: {
375
384
  content: string | null;
@@ -379,7 +388,7 @@ declare const openAIChatResponseSchema: z.ZodObject<{
379
388
  arguments: string;
380
389
  } | undefined;
381
390
  };
382
- finish_reason: string;
391
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
383
392
  index: number;
384
393
  logprobs?: any;
385
394
  }, {
@@ -391,7 +400,7 @@ declare const openAIChatResponseSchema: z.ZodObject<{
391
400
  arguments: string;
392
401
  } | undefined;
393
402
  };
394
- finish_reason: string;
403
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
395
404
  index: number;
396
405
  logprobs?: any;
397
406
  }>, "many">;
@@ -426,11 +435,12 @@ declare const openAIChatResponseSchema: z.ZodObject<{
426
435
  arguments: string;
427
436
  } | undefined;
428
437
  };
429
- finish_reason: string;
438
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
430
439
  index: number;
431
440
  logprobs?: any;
432
441
  }[];
433
442
  created: number;
443
+ system_fingerprint: string;
434
444
  }, {
435
445
  object: "chat.completion";
436
446
  usage: {
@@ -449,11 +459,12 @@ declare const openAIChatResponseSchema: z.ZodObject<{
449
459
  arguments: string;
450
460
  } | undefined;
451
461
  };
452
- finish_reason: string;
462
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
453
463
  index: number;
454
464
  logprobs?: any;
455
465
  }[];
456
466
  created: number;
467
+ system_fingerprint: string;
457
468
  }>;
458
469
  export type OpenAIChatResponse = z.infer<typeof openAIChatResponseSchema>;
459
470
  export type OpenAIChatResponseFormatType<T> = {
@@ -484,11 +495,12 @@ export declare const OpenAIChatResponseFormat: {
484
495
  arguments: string;
485
496
  } | undefined;
486
497
  };
487
- finish_reason: string;
498
+ finish_reason: "length" | "stop" | "function_call" | "tool_calls" | "content_filter";
488
499
  index: number;
489
500
  logprobs?: any;
490
501
  }[];
491
502
  created: number;
503
+ system_fingerprint: string;
492
504
  }>;
493
505
  };
494
506
  /**
@@ -202,6 +202,7 @@ export class OpenAIChatModel extends AbstractModel {
202
202
  // map to OpenAI API names:
203
203
  stop: this.settings.stopSequences,
204
204
  maxTokens: this.settings.maxCompletionTokens,
205
+ openAIResponseFormat: this.settings.responseFormat,
205
206
  // other settings:
206
207
  user: this.settings.isUserIdForwardingEnabled
207
208
  ? options.run?.userId
@@ -380,6 +381,7 @@ const openAIChatResponseSchema = z.object({
380
381
  object: z.literal("chat.completion"),
381
382
  created: z.number(),
382
383
  model: z.string(),
384
+ system_fingerprint: z.string(),
383
385
  choices: z.array(z.object({
384
386
  message: z.object({
385
387
  role: z.literal("assistant"),
@@ -393,7 +395,13 @@ const openAIChatResponseSchema = z.object({
393
395
  }),
394
396
  index: z.number(),
395
397
  logprobs: z.nullable(z.any()),
396
- finish_reason: z.string(),
398
+ finish_reason: z.enum([
399
+ "stop",
400
+ "length",
401
+ "tool_calls",
402
+ "content_filter",
403
+ "function_call",
404
+ ]),
397
405
  })),
398
406
  usage: z.object({
399
407
  prompt_tokens: z.number(),
@@ -401,7 +409,7 @@ const openAIChatResponseSchema = z.object({
401
409
  total_tokens: z.number(),
402
410
  }),
403
411
  });
404
- async function callOpenAIChatCompletionAPI({ api = new OpenAIApiConfiguration(), abortSignal, responseFormat, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, logitBias, user, }) {
412
+ async function callOpenAIChatCompletionAPI({ api = new OpenAIApiConfiguration(), abortSignal, responseFormat, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, logitBias, user, openAIResponseFormat, seed, }) {
405
413
  // empty arrays are not allowed for stop:
406
414
  if (stop != null && Array.isArray(stop) && stop.length === 0) {
407
415
  stop = undefined;
@@ -423,6 +431,8 @@ async function callOpenAIChatCompletionAPI({ api = new OpenAIApiConfiguration(),
423
431
  presence_penalty: presencePenalty,
424
432
  frequency_penalty: frequencyPenalty,
425
433
  logit_bias: logitBias,
434
+ seed,
435
+ response_format: openAIResponseFormat,
426
436
  user,
427
437
  },
428
438
  failedResponseHandler: failedOpenAICallResponseHandler,
@@ -17,7 +17,16 @@ const chatResponseStreamEventSchema = zod_1.z.object({
17
17
  })
18
18
  .optional(),
19
19
  }),
20
- finish_reason: zod_1.z.enum(["stop", "length"]).nullable().optional(),
20
+ finish_reason: zod_1.z
21
+ .enum([
22
+ "stop",
23
+ "length",
24
+ "tool_calls",
25
+ "content_filter",
26
+ "function_call",
27
+ ])
28
+ .nullable()
29
+ .optional(),
21
30
  index: zod_1.z.number(),
22
31
  })),
23
32
  created: zod_1.z.number(),
@@ -14,7 +14,16 @@ const chatResponseStreamEventSchema = z.object({
14
14
  })
15
15
  .optional(),
16
16
  }),
17
- finish_reason: z.enum(["stop", "length"]).nullable().optional(),
17
+ finish_reason: z
18
+ .enum([
19
+ "stop",
20
+ "length",
21
+ "tool_calls",
22
+ "content_filter",
23
+ "function_call",
24
+ ])
25
+ .nullable()
26
+ .optional(),
18
27
  index: z.number(),
19
28
  })),
20
29
  created: z.number(),
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build multimodal applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.62.0",
4
+ "version": "0.63.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [