modelfusion 0.24.0 → 0.24.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/model-provider/openai/OpenAITextGenerationModel.cjs +3 -1
- package/model-provider/openai/OpenAITextGenerationModel.d.ts +7 -8
- package/model-provider/openai/OpenAITextGenerationModel.js +3 -1
- package/model-provider/openai/chat/OpenAIChatModel.cjs +5 -1
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +3 -2
- package/model-provider/openai/chat/OpenAIChatModel.js +5 -1
- package/package.json +1 -1
@@ -201,6 +201,7 @@ class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
201
201
|
"presencePenalty",
|
202
202
|
"frequencyPenalty",
|
203
203
|
"bestOf",
|
204
|
+
"logitBias",
|
204
205
|
];
|
205
206
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
206
207
|
}
|
@@ -275,7 +276,7 @@ const openAITextGenerationResponseSchema = zod_1.default.object({
|
|
275
276
|
*
|
276
277
|
* console.log(response.choices[0].text);
|
277
278
|
*/
|
278
|
-
async function callOpenAITextGenerationAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, user, }) {
|
279
|
+
async function callOpenAITextGenerationAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, logitBias, user, }) {
|
279
280
|
return (0, postToApi_js_1.postJsonToApi)({
|
280
281
|
url: `${baseUrl}/completions`,
|
281
282
|
headers: {
|
@@ -297,6 +298,7 @@ async function callOpenAITextGenerationAPI({ baseUrl = "https://api.openai.com/v
|
|
297
298
|
presence_penalty: presencePenalty,
|
298
299
|
frequency_penalty: frequencyPenalty,
|
299
300
|
best_of: bestOf,
|
301
|
+
logit_bias: logitBias,
|
300
302
|
user,
|
301
303
|
},
|
302
304
|
failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
|
@@ -5,8 +5,6 @@ import { DeltaEvent } from "../../model-function/generate-text/DeltaEvent.js";
|
|
5
5
|
import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
|
6
6
|
import { PromptFormat } from "../../prompt/PromptFormat.js";
|
7
7
|
import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
|
8
|
-
import { RetryFunction } from "../../util/api/RetryFunction.js";
|
9
|
-
import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
|
10
8
|
import { ResponseHandler } from "../../util/api/postToApi.js";
|
11
9
|
import { OpenAIImageGenerationCallSettings } from "./OpenAIImageGenerationModel.js";
|
12
10
|
import { OpenAIModelSettings } from "./OpenAIModelSettings.js";
|
@@ -82,23 +80,24 @@ export declare const calculateOpenAITextGenerationCostInMillicents: ({ model, re
|
|
82
80
|
model: OpenAITextGenerationModelType;
|
83
81
|
response: OpenAITextGenerationResponse;
|
84
82
|
}) => number;
|
85
|
-
export interface
|
83
|
+
export interface OpenAITextGenerationCallSettings {
|
86
84
|
model: OpenAITextGenerationModelType;
|
87
85
|
headers?: Record<string, string>;
|
88
|
-
baseUrl?: string;
|
89
|
-
apiKey?: string;
|
90
|
-
retry?: RetryFunction;
|
91
|
-
throttle?: ThrottleFunction;
|
92
|
-
isUserIdForwardingEnabled?: boolean;
|
93
86
|
suffix?: string;
|
87
|
+
maxTokens?: number;
|
94
88
|
temperature?: number;
|
95
89
|
topP?: number;
|
96
90
|
n?: number;
|
97
91
|
logprobs?: number;
|
98
92
|
echo?: boolean;
|
93
|
+
stop?: string | string[];
|
99
94
|
presencePenalty?: number;
|
100
95
|
frequencyPenalty?: number;
|
101
96
|
bestOf?: number;
|
97
|
+
logitBias?: Record<number, number>;
|
98
|
+
}
|
99
|
+
export interface OpenAITextGenerationModelSettings extends TextGenerationModelSettings, OpenAIModelSettings, Omit<OpenAITextGenerationCallSettings, "stop" | "maxTokens"> {
|
100
|
+
isUserIdForwardingEnabled?: boolean;
|
102
101
|
}
|
103
102
|
/**
|
104
103
|
* Create a text generation model that calls the OpenAI text completion API.
|
@@ -192,6 +192,7 @@ export class OpenAITextGenerationModel extends AbstractModel {
|
|
192
192
|
"presencePenalty",
|
193
193
|
"frequencyPenalty",
|
194
194
|
"bestOf",
|
195
|
+
"logitBias",
|
195
196
|
];
|
196
197
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
197
198
|
}
|
@@ -265,7 +266,7 @@ const openAITextGenerationResponseSchema = z.object({
|
|
265
266
|
*
|
266
267
|
* console.log(response.choices[0].text);
|
267
268
|
*/
|
268
|
-
async function callOpenAITextGenerationAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, user, }) {
|
269
|
+
async function callOpenAITextGenerationAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, logitBias, user, }) {
|
269
270
|
return postJsonToApi({
|
270
271
|
url: `${baseUrl}/completions`,
|
271
272
|
headers: {
|
@@ -287,6 +288,7 @@ async function callOpenAITextGenerationAPI({ baseUrl = "https://api.openai.com/v
|
|
287
288
|
presence_penalty: presencePenalty,
|
288
289
|
frequency_penalty: frequencyPenalty,
|
289
290
|
best_of: bestOf,
|
291
|
+
logit_bias: logitBias,
|
290
292
|
user,
|
291
293
|
},
|
292
294
|
failedResponseHandler: failedOpenAICallResponseHandler,
|
@@ -219,6 +219,9 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
|
|
219
219
|
"temperature",
|
220
220
|
"topP",
|
221
221
|
"n",
|
222
|
+
"presencePenalty",
|
223
|
+
"frequencyPenalty",
|
224
|
+
"logitBias",
|
222
225
|
];
|
223
226
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
224
227
|
}
|
@@ -307,7 +310,7 @@ const openAIChatResponseSchema = zod_1.default.object({
|
|
307
310
|
total_tokens: zod_1.default.number(),
|
308
311
|
}),
|
309
312
|
});
|
310
|
-
async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, user, }) {
|
313
|
+
async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, logitBias, user, }) {
|
311
314
|
return (0, postToApi_js_1.postJsonToApi)({
|
312
315
|
url: `${baseUrl}/chat/completions`,
|
313
316
|
headers: {
|
@@ -327,6 +330,7 @@ async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v
|
|
327
330
|
max_tokens: maxTokens,
|
328
331
|
presence_penalty: presencePenalty,
|
329
332
|
frequency_penalty: frequencyPenalty,
|
333
|
+
logit_bias: logitBias,
|
330
334
|
user,
|
331
335
|
},
|
332
336
|
failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
|
@@ -101,13 +101,14 @@ export interface OpenAIChatCallSettings {
|
|
101
101
|
functionCall?: "none" | "auto" | {
|
102
102
|
name: string;
|
103
103
|
};
|
104
|
+
stop?: string | string[];
|
105
|
+
maxTokens?: number;
|
104
106
|
temperature?: number;
|
105
107
|
topP?: number;
|
106
108
|
n?: number;
|
107
|
-
stop?: string | string[];
|
108
|
-
maxTokens?: number;
|
109
109
|
presencePenalty?: number;
|
110
110
|
frequencyPenalty?: number;
|
111
|
+
logitBias?: Record<number, number>;
|
111
112
|
}
|
112
113
|
export interface OpenAIChatSettings extends TextGenerationModelSettings, OpenAIModelSettings, Omit<OpenAIChatCallSettings, "stop" | "maxTokens"> {
|
113
114
|
isUserIdForwardingEnabled?: boolean;
|
@@ -210,6 +210,9 @@ export class OpenAIChatModel extends AbstractModel {
|
|
210
210
|
"temperature",
|
211
211
|
"topP",
|
212
212
|
"n",
|
213
|
+
"presencePenalty",
|
214
|
+
"frequencyPenalty",
|
215
|
+
"logitBias",
|
213
216
|
];
|
214
217
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
215
218
|
}
|
@@ -297,7 +300,7 @@ const openAIChatResponseSchema = z.object({
|
|
297
300
|
total_tokens: z.number(),
|
298
301
|
}),
|
299
302
|
});
|
300
|
-
async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, user, }) {
|
303
|
+
async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v1", headers, abortSignal, responseFormat, apiKey, model, messages, functions, functionCall, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, logitBias, user, }) {
|
301
304
|
return postJsonToApi({
|
302
305
|
url: `${baseUrl}/chat/completions`,
|
303
306
|
headers: {
|
@@ -317,6 +320,7 @@ async function callOpenAIChatCompletionAPI({ baseUrl = "https://api.openai.com/v
|
|
317
320
|
max_tokens: maxTokens,
|
318
321
|
presence_penalty: presencePenalty,
|
319
322
|
frequency_penalty: frequencyPenalty,
|
323
|
+
logit_bias: logitBias,
|
320
324
|
user,
|
321
325
|
},
|
322
326
|
failedResponseHandler: failedOpenAICallResponseHandler,
|