modelfusion 0.83.0 → 0.84.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/README.md +3 -3
  2. package/model-function/AbstractModel.d.ts +1 -1
  3. package/model-provider/index.cjs +1 -0
  4. package/model-provider/index.d.ts +1 -0
  5. package/model-provider/index.js +1 -0
  6. package/model-provider/openai/AzureOpenAIApiConfiguration.cjs +1 -1
  7. package/model-provider/openai/AzureOpenAIApiConfiguration.d.ts +1 -1
  8. package/model-provider/openai/AzureOpenAIApiConfiguration.js +1 -1
  9. package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +228 -0
  10. package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +467 -0
  11. package/model-provider/openai/chat/AbstractOpenAIChatModel.js +224 -0
  12. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.cjs +3 -3
  13. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +6 -6
  14. package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.js +1 -1
  15. package/model-provider/openai/chat/OpenAIChatModel.cjs +5 -218
  16. package/model-provider/openai/chat/OpenAIChatModel.d.ts +11 -460
  17. package/model-provider/openai/chat/OpenAIChatModel.js +4 -217
  18. package/model-provider/openai/index.cjs +1 -0
  19. package/model-provider/openai/index.d.ts +1 -0
  20. package/model-provider/openai/index.js +1 -0
  21. package/model-provider/openai-compatible/FireworksAIApiConfiguration.cjs +29 -0
  22. package/model-provider/openai-compatible/FireworksAIApiConfiguration.d.ts +18 -0
  23. package/model-provider/openai-compatible/FireworksAIApiConfiguration.js +25 -0
  24. package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +100 -0
  25. package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +45 -0
  26. package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +96 -0
  27. package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +30 -0
  28. package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +24 -0
  29. package/model-provider/openai-compatible/OpenAICompatibleFacade.js +26 -0
  30. package/model-provider/openai-compatible/index.cjs +32 -0
  31. package/model-provider/openai-compatible/index.d.ts +3 -0
  32. package/model-provider/openai-compatible/index.js +3 -0
  33. package/package.json +1 -1
@@ -1,20 +1,13 @@
1
- import { z } from "zod";
2
- import { FunctionOptions } from "../../../core/FunctionOptions.js";
3
- import { ApiConfiguration } from "../../../core/api/ApiConfiguration.js";
4
- import { ResponseHandler } from "../../../core/api/postToApi.js";
5
- import { AbstractModel } from "../../../model-function/AbstractModel.js";
6
- import { Delta } from "../../../model-function/Delta.js";
7
1
  import { StructureFromTextPromptFormat } from "../../../model-function/generate-structure/StructureFromTextPromptFormat.js";
8
2
  import { StructureFromTextStreamingModel } from "../../../model-function/generate-structure/StructureFromTextStreamingModel.js";
9
3
  import { PromptFormatTextStreamingModel } from "../../../model-function/generate-text/PromptFormatTextStreamingModel.js";
10
4
  import { TextGenerationModelSettings, TextStreamingModel } from "../../../model-function/generate-text/TextGenerationModel.js";
11
5
  import { TextGenerationPromptFormat } from "../../../model-function/generate-text/TextGenerationPromptFormat.js";
12
- import { ToolDefinition } from "../../../tool/ToolDefinition.js";
13
6
  import { ToolCallGenerationModel } from "../../../tool/generate-tool-call/ToolCallGenerationModel.js";
14
7
  import { ToolCallsOrTextGenerationModel } from "../../../tool/generate-tool-calls-or-text/ToolCallsOrTextGenerationModel.js";
15
8
  import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
9
+ import { AbstractOpenAIChatCallSettings, AbstractOpenAIChatModel, OpenAIChatPrompt, OpenAIChatResponse } from "./AbstractOpenAIChatModel.js";
16
10
  import { OpenAIChatFunctionCallStructureGenerationModel } from "./OpenAIChatFunctionCallStructureGenerationModel.js";
17
- import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
18
11
  export declare const OPENAI_CHAT_MODELS: {
19
12
  "gpt-4": {
20
13
  contextWindowSize: number;
@@ -109,49 +102,14 @@ export declare const calculateOpenAIChatCostInMillicents: ({ model, response, }:
109
102
  model: OpenAIChatModelType;
110
103
  response: OpenAIChatResponse;
111
104
  }) => number | null;
112
- export interface OpenAIChatCallSettings {
113
- api?: ApiConfiguration;
105
+ export interface OpenAIChatCallSettings extends AbstractOpenAIChatCallSettings {
114
106
  model: OpenAIChatModelType;
115
- functions?: Array<{
116
- name: string;
117
- description?: string;
118
- parameters: unknown;
119
- }>;
120
- functionCall?: "none" | "auto" | {
121
- name: string;
122
- };
123
- tools?: Array<{
124
- type: "function";
125
- function: {
126
- name: string;
127
- description?: string;
128
- parameters: unknown;
129
- };
130
- }>;
131
- toolChoice?: "none" | "auto" | {
132
- type: "function";
133
- function: {
134
- name: string;
135
- };
136
- };
137
- stop?: string | string[];
138
- maxTokens?: number;
139
- temperature?: number;
140
- topP?: number;
141
- seed?: number | null;
142
- responseFormat?: {
143
- type?: "text" | "json_object";
144
- };
145
- n?: number;
146
- presencePenalty?: number;
147
- frequencyPenalty?: number;
148
- logitBias?: Record<number, number>;
149
107
  }
150
108
  export interface OpenAIChatSettings extends TextGenerationModelSettings, Omit<OpenAIChatCallSettings, "stop" | "maxTokens"> {
151
109
  isUserIdForwardingEnabled?: boolean;
152
110
  }
153
111
  /**
154
- * Create a text generation model that calls the OpenAI chat completion API.
112
+ * Create a text generation model that calls the OpenAI chat API.
155
113
  *
156
114
  * @see https://platform.openai.com/docs/api-reference/chat/create
157
115
  *
@@ -169,7 +127,7 @@ export interface OpenAIChatSettings extends TextGenerationModelSettings, Omit<Op
169
127
  * ),
170
128
  * ]);
171
129
  */
172
- export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> implements TextStreamingModel<OpenAIChatMessage[], OpenAIChatSettings>, ToolCallGenerationModel<OpenAIChatMessage[], OpenAIChatSettings>, ToolCallsOrTextGenerationModel<OpenAIChatMessage[], OpenAIChatSettings> {
130
+ export declare class OpenAIChatModel extends AbstractOpenAIChatModel<OpenAIChatSettings> implements TextStreamingModel<OpenAIChatPrompt, OpenAIChatSettings>, ToolCallGenerationModel<OpenAIChatPrompt, OpenAIChatSettings>, ToolCallsOrTextGenerationModel<OpenAIChatPrompt, OpenAIChatSettings> {
173
131
  constructor(settings: OpenAIChatSettings);
174
132
  readonly provider: "openai";
175
133
  get modelName(): OpenAIChatModelType;
@@ -179,433 +137,26 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
179
137
  * Counts the prompt tokens required for the messages. This includes the message base tokens
180
138
  * and the prompt base tokens.
181
139
  */
182
- countPromptTokens(messages: OpenAIChatMessage[]): Promise<number>;
183
- callAPI<RESULT>(messages: Array<OpenAIChatMessage>, options: {
184
- responseFormat: OpenAIChatResponseFormatType<RESULT>;
185
- } & FunctionOptions & {
186
- functions?: OpenAIChatCallSettings["functions"];
187
- functionCall?: OpenAIChatCallSettings["functionCall"];
188
- tools?: OpenAIChatCallSettings["tools"];
189
- toolChoice?: OpenAIChatCallSettings["toolChoice"];
190
- }): Promise<RESULT>;
140
+ countPromptTokens(messages: OpenAIChatPrompt): Promise<number>;
191
141
  get settingsForEvent(): Partial<OpenAIChatSettings>;
192
- doGenerateText(prompt: OpenAIChatMessage[], options?: FunctionOptions): Promise<{
193
- response: {
194
- object: "chat.completion";
195
- usage: {
196
- prompt_tokens: number;
197
- total_tokens: number;
198
- completion_tokens: number;
199
- };
200
- model: string;
201
- id: string;
202
- choices: {
203
- message: {
204
- role: "assistant";
205
- content: string | null;
206
- function_call?: {
207
- name: string;
208
- arguments: string;
209
- } | undefined;
210
- tool_calls?: {
211
- function: {
212
- name: string;
213
- arguments: string;
214
- };
215
- type: "function";
216
- id: string;
217
- }[] | undefined;
218
- };
219
- index: number;
220
- logprobs?: any;
221
- finish_reason?: "length" | "stop" | "tool_calls" | "function_call" | "content_filter" | null | undefined;
222
- }[];
223
- created: number;
224
- system_fingerprint?: string | undefined;
225
- };
226
- text: string;
227
- usage: {
228
- promptTokens: number;
229
- completionTokens: number;
230
- totalTokens: number;
231
- };
232
- }>;
233
- doStreamText(prompt: OpenAIChatMessage[], options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
234
- doGenerateToolCall(tool: ToolDefinition<string, unknown>, prompt: OpenAIChatMessage[], options?: FunctionOptions): Promise<{
235
- response: {
236
- object: "chat.completion";
237
- usage: {
238
- prompt_tokens: number;
239
- total_tokens: number;
240
- completion_tokens: number;
241
- };
242
- model: string;
243
- id: string;
244
- choices: {
245
- message: {
246
- role: "assistant";
247
- content: string | null;
248
- function_call?: {
249
- name: string;
250
- arguments: string;
251
- } | undefined;
252
- tool_calls?: {
253
- function: {
254
- name: string;
255
- arguments: string;
256
- };
257
- type: "function";
258
- id: string;
259
- }[] | undefined;
260
- };
261
- index: number;
262
- logprobs?: any;
263
- finish_reason?: "length" | "stop" | "tool_calls" | "function_call" | "content_filter" | null | undefined;
264
- }[];
265
- created: number;
266
- system_fingerprint?: string | undefined;
267
- };
268
- toolCall: {
269
- id: string;
270
- args: unknown;
271
- } | null;
272
- usage: {
273
- promptTokens: number;
274
- completionTokens: number;
275
- totalTokens: number;
276
- };
277
- }>;
278
- doGenerateToolCallsOrText(tools: Array<ToolDefinition<string, unknown>>, prompt: OpenAIChatMessage[], options?: FunctionOptions): Promise<{
279
- response: {
280
- object: "chat.completion";
281
- usage: {
282
- prompt_tokens: number;
283
- total_tokens: number;
284
- completion_tokens: number;
285
- };
286
- model: string;
287
- id: string;
288
- choices: {
289
- message: {
290
- role: "assistant";
291
- content: string | null;
292
- function_call?: {
293
- name: string;
294
- arguments: string;
295
- } | undefined;
296
- tool_calls?: {
297
- function: {
298
- name: string;
299
- arguments: string;
300
- };
301
- type: "function";
302
- id: string;
303
- }[] | undefined;
304
- };
305
- index: number;
306
- logprobs?: any;
307
- finish_reason?: "length" | "stop" | "tool_calls" | "function_call" | "content_filter" | null | undefined;
308
- }[];
309
- created: number;
310
- system_fingerprint?: string | undefined;
311
- };
312
- text: string | null;
313
- toolCalls: {
314
- id: string;
315
- name: string;
316
- args: unknown;
317
- }[] | null;
318
- usage: {
319
- promptTokens: number;
320
- completionTokens: number;
321
- totalTokens: number;
322
- };
323
- }>;
324
- extractUsage(response: OpenAIChatResponse): {
325
- promptTokens: number;
326
- completionTokens: number;
327
- totalTokens: number;
328
- };
329
142
  asFunctionCallStructureGenerationModel({ fnName, fnDescription, }: {
330
143
  fnName: string;
331
144
  fnDescription?: string;
332
- }): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptFormat<OpenAIChatMessage[], OpenAIChatMessage[]>>;
333
- asStructureGenerationModel<INPUT_PROMPT>(promptFormat: StructureFromTextPromptFormat<INPUT_PROMPT, OpenAIChatMessage[]>): StructureFromTextStreamingModel<INPUT_PROMPT, OpenAIChatMessage[], this>;
145
+ }): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptFormat<import("./OpenAIChatMessage.js").OpenAIChatMessage[], import("./OpenAIChatMessage.js").OpenAIChatMessage[]>>;
146
+ asStructureGenerationModel<INPUT_PROMPT>(promptFormat: StructureFromTextPromptFormat<INPUT_PROMPT, OpenAIChatPrompt>): StructureFromTextStreamingModel<INPUT_PROMPT, OpenAIChatPrompt, this>;
334
147
  /**
335
148
  * Returns this model with a text prompt format.
336
149
  */
337
- withTextPrompt(): PromptFormatTextStreamingModel<string, OpenAIChatMessage[], OpenAIChatSettings, this>;
150
+ withTextPrompt(): PromptFormatTextStreamingModel<string, OpenAIChatPrompt, OpenAIChatSettings, this>;
338
151
  /**
339
152
  * Returns this model with an instruction prompt format.
340
153
  */
341
- withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../../index.js").MultiModalInstructionPrompt | import("../../../index.js").TextInstructionPrompt, OpenAIChatMessage[], OpenAIChatSettings, this>;
154
+ withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../../index.js").MultiModalInstructionPrompt | import("../../../index.js").TextInstructionPrompt, OpenAIChatPrompt, OpenAIChatSettings, this>;
342
155
  /**
343
156
  * Returns this model with a chat prompt format.
344
157
  */
345
- withChatPrompt(): PromptFormatTextStreamingModel<import("../../../index.js").ChatPrompt, OpenAIChatMessage[], OpenAIChatSettings, this>;
346
- withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, OpenAIChatMessage[]>): PromptFormatTextStreamingModel<INPUT_PROMPT, OpenAIChatMessage[], OpenAIChatSettings, this>;
158
+ withChatPrompt(): PromptFormatTextStreamingModel<import("../../../index.js").ChatPrompt, OpenAIChatPrompt, OpenAIChatSettings, this>;
159
+ withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, OpenAIChatPrompt>): PromptFormatTextStreamingModel<INPUT_PROMPT, OpenAIChatPrompt, OpenAIChatSettings, this>;
347
160
  withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
348
161
  }
349
- declare const openAIChatResponseSchema: z.ZodObject<{
350
- id: z.ZodString;
351
- choices: z.ZodArray<z.ZodObject<{
352
- message: z.ZodObject<{
353
- role: z.ZodLiteral<"assistant">;
354
- content: z.ZodNullable<z.ZodString>;
355
- function_call: z.ZodOptional<z.ZodObject<{
356
- name: z.ZodString;
357
- arguments: z.ZodString;
358
- }, "strip", z.ZodTypeAny, {
359
- name: string;
360
- arguments: string;
361
- }, {
362
- name: string;
363
- arguments: string;
364
- }>>;
365
- tool_calls: z.ZodOptional<z.ZodArray<z.ZodObject<{
366
- id: z.ZodString;
367
- type: z.ZodLiteral<"function">;
368
- function: z.ZodObject<{
369
- name: z.ZodString;
370
- arguments: z.ZodString;
371
- }, "strip", z.ZodTypeAny, {
372
- name: string;
373
- arguments: string;
374
- }, {
375
- name: string;
376
- arguments: string;
377
- }>;
378
- }, "strip", z.ZodTypeAny, {
379
- function: {
380
- name: string;
381
- arguments: string;
382
- };
383
- type: "function";
384
- id: string;
385
- }, {
386
- function: {
387
- name: string;
388
- arguments: string;
389
- };
390
- type: "function";
391
- id: string;
392
- }>, "many">>;
393
- }, "strip", z.ZodTypeAny, {
394
- role: "assistant";
395
- content: string | null;
396
- function_call?: {
397
- name: string;
398
- arguments: string;
399
- } | undefined;
400
- tool_calls?: {
401
- function: {
402
- name: string;
403
- arguments: string;
404
- };
405
- type: "function";
406
- id: string;
407
- }[] | undefined;
408
- }, {
409
- role: "assistant";
410
- content: string | null;
411
- function_call?: {
412
- name: string;
413
- arguments: string;
414
- } | undefined;
415
- tool_calls?: {
416
- function: {
417
- name: string;
418
- arguments: string;
419
- };
420
- type: "function";
421
- id: string;
422
- }[] | undefined;
423
- }>;
424
- index: z.ZodNumber;
425
- logprobs: z.ZodNullable<z.ZodAny>;
426
- finish_reason: z.ZodNullable<z.ZodOptional<z.ZodEnum<["stop", "length", "tool_calls", "content_filter", "function_call"]>>>;
427
- }, "strip", z.ZodTypeAny, {
428
- message: {
429
- role: "assistant";
430
- content: string | null;
431
- function_call?: {
432
- name: string;
433
- arguments: string;
434
- } | undefined;
435
- tool_calls?: {
436
- function: {
437
- name: string;
438
- arguments: string;
439
- };
440
- type: "function";
441
- id: string;
442
- }[] | undefined;
443
- };
444
- index: number;
445
- logprobs?: any;
446
- finish_reason?: "length" | "stop" | "tool_calls" | "function_call" | "content_filter" | null | undefined;
447
- }, {
448
- message: {
449
- role: "assistant";
450
- content: string | null;
451
- function_call?: {
452
- name: string;
453
- arguments: string;
454
- } | undefined;
455
- tool_calls?: {
456
- function: {
457
- name: string;
458
- arguments: string;
459
- };
460
- type: "function";
461
- id: string;
462
- }[] | undefined;
463
- };
464
- index: number;
465
- logprobs?: any;
466
- finish_reason?: "length" | "stop" | "tool_calls" | "function_call" | "content_filter" | null | undefined;
467
- }>, "many">;
468
- created: z.ZodNumber;
469
- model: z.ZodString;
470
- system_fingerprint: z.ZodOptional<z.ZodString>;
471
- object: z.ZodLiteral<"chat.completion">;
472
- usage: z.ZodObject<{
473
- prompt_tokens: z.ZodNumber;
474
- completion_tokens: z.ZodNumber;
475
- total_tokens: z.ZodNumber;
476
- }, "strip", z.ZodTypeAny, {
477
- prompt_tokens: number;
478
- total_tokens: number;
479
- completion_tokens: number;
480
- }, {
481
- prompt_tokens: number;
482
- total_tokens: number;
483
- completion_tokens: number;
484
- }>;
485
- }, "strip", z.ZodTypeAny, {
486
- object: "chat.completion";
487
- usage: {
488
- prompt_tokens: number;
489
- total_tokens: number;
490
- completion_tokens: number;
491
- };
492
- model: string;
493
- id: string;
494
- choices: {
495
- message: {
496
- role: "assistant";
497
- content: string | null;
498
- function_call?: {
499
- name: string;
500
- arguments: string;
501
- } | undefined;
502
- tool_calls?: {
503
- function: {
504
- name: string;
505
- arguments: string;
506
- };
507
- type: "function";
508
- id: string;
509
- }[] | undefined;
510
- };
511
- index: number;
512
- logprobs?: any;
513
- finish_reason?: "length" | "stop" | "tool_calls" | "function_call" | "content_filter" | null | undefined;
514
- }[];
515
- created: number;
516
- system_fingerprint?: string | undefined;
517
- }, {
518
- object: "chat.completion";
519
- usage: {
520
- prompt_tokens: number;
521
- total_tokens: number;
522
- completion_tokens: number;
523
- };
524
- model: string;
525
- id: string;
526
- choices: {
527
- message: {
528
- role: "assistant";
529
- content: string | null;
530
- function_call?: {
531
- name: string;
532
- arguments: string;
533
- } | undefined;
534
- tool_calls?: {
535
- function: {
536
- name: string;
537
- arguments: string;
538
- };
539
- type: "function";
540
- id: string;
541
- }[] | undefined;
542
- };
543
- index: number;
544
- logprobs?: any;
545
- finish_reason?: "length" | "stop" | "tool_calls" | "function_call" | "content_filter" | null | undefined;
546
- }[];
547
- created: number;
548
- system_fingerprint?: string | undefined;
549
- }>;
550
- export type OpenAIChatResponse = z.infer<typeof openAIChatResponseSchema>;
551
- export type OpenAIChatResponseFormatType<T> = {
552
- stream: boolean;
553
- handler: ResponseHandler<T>;
554
- };
555
- export declare const OpenAIChatResponseFormat: {
556
- /**
557
- * Returns the response as a JSON object.
558
- */
559
- json: {
560
- stream: false;
561
- handler: ResponseHandler<{
562
- object: "chat.completion";
563
- usage: {
564
- prompt_tokens: number;
565
- total_tokens: number;
566
- completion_tokens: number;
567
- };
568
- model: string;
569
- id: string;
570
- choices: {
571
- message: {
572
- role: "assistant";
573
- content: string | null;
574
- function_call?: {
575
- name: string;
576
- arguments: string;
577
- } | undefined;
578
- tool_calls?: {
579
- function: {
580
- name: string;
581
- arguments: string;
582
- };
583
- type: "function";
584
- id: string;
585
- }[] | undefined;
586
- };
587
- index: number;
588
- logprobs?: any;
589
- finish_reason?: "length" | "stop" | "tool_calls" | "function_call" | "content_filter" | null | undefined;
590
- }[];
591
- created: number;
592
- system_fingerprint?: string | undefined;
593
- }>;
594
- };
595
- /**
596
- * Returns an async iterable over the text deltas (only the tex different of the first choice).
597
- */
598
- textDeltaIterable: {
599
- stream: true;
600
- handler: ({ response }: {
601
- response: Response;
602
- }) => Promise<AsyncIterable<Delta<string>>>;
603
- };
604
- structureDeltaIterable: {
605
- stream: true;
606
- handler: ({ response }: {
607
- response: Response;
608
- }) => Promise<AsyncIterable<Delta<unknown>>>;
609
- };
610
- };
611
162
  export {};