modelfusion 0.83.0 → 0.85.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +9 -9
- package/guard/fixStructure.cjs +3 -5
- package/guard/fixStructure.d.ts +3 -5
- package/guard/fixStructure.js +3 -5
- package/model-function/AbstractModel.d.ts +1 -1
- package/model-provider/index.cjs +1 -0
- package/model-provider/index.d.ts +1 -0
- package/model-provider/index.js +1 -0
- package/model-provider/openai/AzureOpenAIApiConfiguration.cjs +1 -1
- package/model-provider/openai/AzureOpenAIApiConfiguration.d.ts +1 -1
- package/model-provider/openai/AzureOpenAIApiConfiguration.js +1 -1
- package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +228 -0
- package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +467 -0
- package/model-provider/openai/chat/AbstractOpenAIChatModel.js +224 -0
- package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.cjs +3 -3
- package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +7 -7
- package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.js +1 -1
- package/model-provider/openai/chat/OpenAIChatMessage.cjs +23 -1
- package/model-provider/openai/chat/OpenAIChatMessage.d.ts +23 -2
- package/model-provider/openai/chat/OpenAIChatMessage.js +23 -1
- package/model-provider/openai/chat/OpenAIChatModel.cjs +5 -218
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +11 -460
- package/model-provider/openai/chat/OpenAIChatModel.js +4 -217
- package/model-provider/openai/index.cjs +1 -0
- package/model-provider/openai/index.d.ts +1 -0
- package/model-provider/openai/index.js +1 -0
- package/model-provider/openai-compatible/FireworksAIApiConfiguration.cjs +29 -0
- package/model-provider/openai-compatible/FireworksAIApiConfiguration.d.ts +18 -0
- package/model-provider/openai-compatible/FireworksAIApiConfiguration.js +25 -0
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +100 -0
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +45 -0
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +96 -0
- package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +30 -0
- package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +24 -0
- package/model-provider/openai-compatible/OpenAICompatibleFacade.js +26 -0
- package/model-provider/openai-compatible/index.cjs +32 -0
- package/model-provider/openai-compatible/index.d.ts +3 -0
- package/model-provider/openai-compatible/index.js +3 -0
- package/package.json +1 -1
@@ -1,17 +1,9 @@
|
|
1
|
-
import { z } from "zod";
|
2
|
-
import { callWithRetryAndThrottle } from "../../../core/api/callWithRetryAndThrottle.js";
|
3
|
-
import { createJsonResponseHandler, postJsonToApi, } from "../../../core/api/postToApi.js";
|
4
|
-
import { parseJSON } from "../../../core/schema/parseJSON.js";
|
5
|
-
import { AbstractModel } from "../../../model-function/AbstractModel.js";
|
6
1
|
import { StructureFromTextStreamingModel } from "../../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
7
|
-
import { parsePartialJson } from "../../../model-function/generate-structure/parsePartialJson.js";
|
8
2
|
import { PromptFormatTextStreamingModel } from "../../../model-function/generate-text/PromptFormatTextStreamingModel.js";
|
9
|
-
import { OpenAIApiConfiguration } from "../OpenAIApiConfiguration.js";
|
10
|
-
import { failedOpenAICallResponseHandler } from "../OpenAIError.js";
|
11
3
|
import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
|
4
|
+
import { AbstractOpenAIChatModel, } from "./AbstractOpenAIChatModel.js";
|
12
5
|
import { OpenAIChatFunctionCallStructureGenerationModel } from "./OpenAIChatFunctionCallStructureGenerationModel.js";
|
13
6
|
import { chat, identity, instruction, text } from "./OpenAIChatPromptFormat.js";
|
14
|
-
import { createOpenAIChatDeltaIterableQueue } from "./OpenAIChatStreamIterable.js";
|
15
7
|
import { countOpenAIChatPromptTokens } from "./countOpenAIChatMessageTokens.js";
|
16
8
|
/*
|
17
9
|
* Available OpenAI chat models, their token limits, and pricing.
|
@@ -138,7 +130,7 @@ export const calculateOpenAIChatCostInMillicents = ({ model, response, }) => {
|
|
138
130
|
response.usage.completion_tokens * completionTokenCostInMillicents);
|
139
131
|
};
|
140
132
|
/**
|
141
|
-
* Create a text generation model that calls the OpenAI chat
|
133
|
+
* Create a text generation model that calls the OpenAI chat API.
|
142
134
|
*
|
143
135
|
* @see https://platform.openai.com/docs/api-reference/chat/create
|
144
136
|
*
|
@@ -156,9 +148,9 @@ export const calculateOpenAIChatCostInMillicents = ({ model, response, }) => {
|
|
156
148
|
* ),
|
157
149
|
* ]);
|
158
150
|
*/
|
159
|
-
export class OpenAIChatModel extends
|
151
|
+
export class OpenAIChatModel extends AbstractOpenAIChatModel {
|
160
152
|
constructor(settings) {
|
161
|
-
super(
|
153
|
+
super(settings);
|
162
154
|
Object.defineProperty(this, "provider", {
|
163
155
|
enumerable: true,
|
164
156
|
configurable: true,
|
@@ -196,31 +188,6 @@ export class OpenAIChatModel extends AbstractModel {
|
|
196
188
|
model: this.modelName,
|
197
189
|
});
|
198
190
|
}
|
199
|
-
async callAPI(messages, options) {
|
200
|
-
return callWithRetryAndThrottle({
|
201
|
-
retry: this.settings.api?.retry,
|
202
|
-
throttle: this.settings.api?.throttle,
|
203
|
-
call: async () => callOpenAIChatCompletionAPI({
|
204
|
-
...this.settings,
|
205
|
-
// function & tool calling:
|
206
|
-
functions: options.functions ?? this.settings.functions,
|
207
|
-
functionCall: options.functionCall ?? this.settings.functionCall,
|
208
|
-
tools: options.tools ?? this.settings.tools,
|
209
|
-
toolChoice: options.toolChoice ?? this.settings.toolChoice,
|
210
|
-
// map to OpenAI API names:
|
211
|
-
stop: this.settings.stopSequences,
|
212
|
-
maxTokens: this.settings.maxCompletionTokens,
|
213
|
-
openAIResponseFormat: this.settings.responseFormat,
|
214
|
-
// other settings:
|
215
|
-
user: this.settings.isUserIdForwardingEnabled
|
216
|
-
? options.run?.userId
|
217
|
-
: undefined,
|
218
|
-
abortSignal: options.run?.abortSignal,
|
219
|
-
responseFormat: options.responseFormat,
|
220
|
-
messages,
|
221
|
-
}),
|
222
|
-
});
|
223
|
-
}
|
224
191
|
get settingsForEvent() {
|
225
192
|
const eventSettingProperties = [
|
226
193
|
"stopSequences",
|
@@ -238,87 +205,6 @@ export class OpenAIChatModel extends AbstractModel {
|
|
238
205
|
];
|
239
206
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
240
207
|
}
|
241
|
-
async doGenerateText(prompt, options) {
|
242
|
-
const response = await this.callAPI(prompt, {
|
243
|
-
...options,
|
244
|
-
responseFormat: OpenAIChatResponseFormat.json,
|
245
|
-
});
|
246
|
-
return {
|
247
|
-
response,
|
248
|
-
text: response.choices[0].message.content,
|
249
|
-
usage: this.extractUsage(response),
|
250
|
-
};
|
251
|
-
}
|
252
|
-
doStreamText(prompt, options) {
|
253
|
-
return this.callAPI(prompt, {
|
254
|
-
...options,
|
255
|
-
responseFormat: OpenAIChatResponseFormat.textDeltaIterable,
|
256
|
-
});
|
257
|
-
}
|
258
|
-
async doGenerateToolCall(tool, prompt, options) {
|
259
|
-
const response = await this.callAPI(prompt, {
|
260
|
-
...options,
|
261
|
-
responseFormat: OpenAIChatResponseFormat.json,
|
262
|
-
toolChoice: {
|
263
|
-
type: "function",
|
264
|
-
function: { name: tool.name },
|
265
|
-
},
|
266
|
-
tools: [
|
267
|
-
{
|
268
|
-
type: "function",
|
269
|
-
function: {
|
270
|
-
name: tool.name,
|
271
|
-
description: tool.description,
|
272
|
-
parameters: tool.parameters.getJsonSchema(),
|
273
|
-
},
|
274
|
-
},
|
275
|
-
],
|
276
|
-
});
|
277
|
-
const toolCalls = response.choices[0]?.message.tool_calls;
|
278
|
-
return {
|
279
|
-
response,
|
280
|
-
toolCall: toolCalls == null || toolCalls.length === 0
|
281
|
-
? null
|
282
|
-
: {
|
283
|
-
id: toolCalls[0].id,
|
284
|
-
args: parseJSON({ text: toolCalls[0].function.arguments }),
|
285
|
-
},
|
286
|
-
usage: this.extractUsage(response),
|
287
|
-
};
|
288
|
-
}
|
289
|
-
async doGenerateToolCallsOrText(tools, prompt, options) {
|
290
|
-
const response = await this.callAPI(prompt, {
|
291
|
-
...options,
|
292
|
-
responseFormat: OpenAIChatResponseFormat.json,
|
293
|
-
toolChoice: "auto",
|
294
|
-
tools: tools.map((tool) => ({
|
295
|
-
type: "function",
|
296
|
-
function: {
|
297
|
-
name: tool.name,
|
298
|
-
description: tool.description,
|
299
|
-
parameters: tool.parameters.getJsonSchema(),
|
300
|
-
},
|
301
|
-
})),
|
302
|
-
});
|
303
|
-
const message = response.choices[0]?.message;
|
304
|
-
return {
|
305
|
-
response,
|
306
|
-
text: message.content ?? null,
|
307
|
-
toolCalls: message.tool_calls?.map((toolCall) => ({
|
308
|
-
id: toolCall.id,
|
309
|
-
name: toolCall.function.name,
|
310
|
-
args: parseJSON({ text: toolCall.function.arguments }),
|
311
|
-
})) ?? null,
|
312
|
-
usage: this.extractUsage(response),
|
313
|
-
};
|
314
|
-
}
|
315
|
-
extractUsage(response) {
|
316
|
-
return {
|
317
|
-
promptTokens: response.usage.prompt_tokens,
|
318
|
-
completionTokens: response.usage.completion_tokens,
|
319
|
-
totalTokens: response.usage.total_tokens,
|
320
|
-
};
|
321
|
-
}
|
322
208
|
asFunctionCallStructureGenerationModel({ fnName, fnDescription, }) {
|
323
209
|
return new OpenAIChatFunctionCallStructureGenerationModel({
|
324
210
|
model: this,
|
@@ -366,102 +252,3 @@ export class OpenAIChatModel extends AbstractModel {
|
|
366
252
|
return new OpenAIChatModel(Object.assign({}, this.settings, additionalSettings));
|
367
253
|
}
|
368
254
|
}
|
369
|
-
const openAIChatResponseSchema = z.object({
|
370
|
-
id: z.string(),
|
371
|
-
choices: z.array(z.object({
|
372
|
-
message: z.object({
|
373
|
-
role: z.literal("assistant"),
|
374
|
-
content: z.string().nullable(),
|
375
|
-
function_call: z
|
376
|
-
.object({
|
377
|
-
name: z.string(),
|
378
|
-
arguments: z.string(),
|
379
|
-
})
|
380
|
-
.optional(),
|
381
|
-
tool_calls: z
|
382
|
-
.array(z.object({
|
383
|
-
id: z.string(),
|
384
|
-
type: z.literal("function"),
|
385
|
-
function: z.object({
|
386
|
-
name: z.string(),
|
387
|
-
arguments: z.string(),
|
388
|
-
}),
|
389
|
-
}))
|
390
|
-
.optional(),
|
391
|
-
}),
|
392
|
-
index: z.number(),
|
393
|
-
logprobs: z.nullable(z.any()),
|
394
|
-
finish_reason: z
|
395
|
-
.enum([
|
396
|
-
"stop",
|
397
|
-
"length",
|
398
|
-
"tool_calls",
|
399
|
-
"content_filter",
|
400
|
-
"function_call",
|
401
|
-
])
|
402
|
-
.optional()
|
403
|
-
.nullable(),
|
404
|
-
})),
|
405
|
-
created: z.number(),
|
406
|
-
model: z.string(),
|
407
|
-
system_fingerprint: z.string().optional(),
|
408
|
-
object: z.literal("chat.completion"),
|
409
|
-
usage: z.object({
|
410
|
-
prompt_tokens: z.number(),
|
411
|
-
completion_tokens: z.number(),
|
412
|
-
total_tokens: z.number(),
|
413
|
-
}),
|
414
|
-
});
|
415
|
-
async function callOpenAIChatCompletionAPI({ api = new OpenAIApiConfiguration(), abortSignal, responseFormat, model, messages, functions, functionCall, tools, toolChoice, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, logitBias, user, openAIResponseFormat, seed, }) {
|
416
|
-
// empty arrays are not allowed for stop:
|
417
|
-
if (stop != null && Array.isArray(stop) && stop.length === 0) {
|
418
|
-
stop = undefined;
|
419
|
-
}
|
420
|
-
return postJsonToApi({
|
421
|
-
url: api.assembleUrl("/chat/completions"),
|
422
|
-
headers: api.headers,
|
423
|
-
body: {
|
424
|
-
stream: responseFormat.stream,
|
425
|
-
model,
|
426
|
-
messages,
|
427
|
-
functions,
|
428
|
-
function_call: functionCall,
|
429
|
-
tools,
|
430
|
-
tool_choice: toolChoice,
|
431
|
-
temperature,
|
432
|
-
top_p: topP,
|
433
|
-
n,
|
434
|
-
stop,
|
435
|
-
max_tokens: maxTokens,
|
436
|
-
presence_penalty: presencePenalty,
|
437
|
-
frequency_penalty: frequencyPenalty,
|
438
|
-
logit_bias: logitBias,
|
439
|
-
seed,
|
440
|
-
response_format: openAIResponseFormat,
|
441
|
-
user,
|
442
|
-
},
|
443
|
-
failedResponseHandler: failedOpenAICallResponseHandler,
|
444
|
-
successfulResponseHandler: responseFormat.handler,
|
445
|
-
abortSignal,
|
446
|
-
});
|
447
|
-
}
|
448
|
-
export const OpenAIChatResponseFormat = {
|
449
|
-
/**
|
450
|
-
* Returns the response as a JSON object.
|
451
|
-
*/
|
452
|
-
json: {
|
453
|
-
stream: false,
|
454
|
-
handler: createJsonResponseHandler(openAIChatResponseSchema),
|
455
|
-
},
|
456
|
-
/**
|
457
|
-
* Returns an async iterable over the text deltas (only the tex different of the first choice).
|
458
|
-
*/
|
459
|
-
textDeltaIterable: {
|
460
|
-
stream: true,
|
461
|
-
handler: async ({ response }) => createOpenAIChatDeltaIterableQueue(response.body, (delta) => delta[0]?.delta.content ?? ""),
|
462
|
-
},
|
463
|
-
structureDeltaIterable: {
|
464
|
-
stream: true,
|
465
|
-
handler: async ({ response }) => createOpenAIChatDeltaIterableQueue(response.body, (delta) => parsePartialJson(delta[0]?.function_call?.arguments)),
|
466
|
-
},
|
467
|
-
};
|
@@ -39,6 +39,7 @@ __exportStar(require("./OpenAISpeechModel.cjs"), exports);
|
|
39
39
|
__exportStar(require("./OpenAITextEmbeddingModel.cjs"), exports);
|
40
40
|
__exportStar(require("./OpenAITranscriptionModel.cjs"), exports);
|
41
41
|
__exportStar(require("./TikTokenTokenizer.cjs"), exports);
|
42
|
+
__exportStar(require("./chat/AbstractOpenAIChatModel.cjs"), exports);
|
42
43
|
__exportStar(require("./chat/OpenAIChatMessage.cjs"), exports);
|
43
44
|
__exportStar(require("./chat/OpenAIChatModel.cjs"), exports);
|
44
45
|
exports.OpenAIChatPromptFormat = __importStar(require("./chat/OpenAIChatPromptFormat.cjs"));
|
@@ -9,6 +9,7 @@ export * from "./OpenAISpeechModel.js";
|
|
9
9
|
export * from "./OpenAITextEmbeddingModel.js";
|
10
10
|
export * from "./OpenAITranscriptionModel.js";
|
11
11
|
export * from "./TikTokenTokenizer.js";
|
12
|
+
export * from "./chat/AbstractOpenAIChatModel.js";
|
12
13
|
export * from "./chat/OpenAIChatMessage.js";
|
13
14
|
export * from "./chat/OpenAIChatModel.js";
|
14
15
|
export * as OpenAIChatPromptFormat from "./chat/OpenAIChatPromptFormat.js";
|
@@ -9,6 +9,7 @@ export * from "./OpenAISpeechModel.js";
|
|
9
9
|
export * from "./OpenAITextEmbeddingModel.js";
|
10
10
|
export * from "./OpenAITranscriptionModel.js";
|
11
11
|
export * from "./TikTokenTokenizer.js";
|
12
|
+
export * from "./chat/AbstractOpenAIChatModel.js";
|
12
13
|
export * from "./chat/OpenAIChatMessage.js";
|
13
14
|
export * from "./chat/OpenAIChatModel.js";
|
14
15
|
export * as OpenAIChatPromptFormat from "./chat/OpenAIChatPromptFormat.js";
|
@@ -0,0 +1,29 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.FireworksAIApiConfiguration = void 0;
|
4
|
+
const BaseUrlApiConfiguration_js_1 = require("../../core/api/BaseUrlApiConfiguration.cjs");
|
5
|
+
const loadApiKey_js_1 = require("../../core/api/loadApiKey.cjs");
|
6
|
+
/**
|
7
|
+
* Configuration for the Fireworks.ai API.
|
8
|
+
*
|
9
|
+
* It uses the `FIREWORKS_API_KEY` api key environment variable.
|
10
|
+
*
|
11
|
+
* @see https://readme.fireworks.ai/docs/openai-compatibility
|
12
|
+
*/
|
13
|
+
class FireworksAIApiConfiguration extends BaseUrlApiConfiguration_js_1.BaseUrlApiConfiguration {
|
14
|
+
constructor({ baseUrl = "https://api.fireworks.ai/inference/v1", apiKey, retry, throttle, } = {}) {
|
15
|
+
super({
|
16
|
+
baseUrl,
|
17
|
+
headers: {
|
18
|
+
Authorization: `Bearer ${(0, loadApiKey_js_1.loadApiKey)({
|
19
|
+
apiKey,
|
20
|
+
environmentVariableName: "FIREWORKS_API_KEY",
|
21
|
+
description: "Fireworks AI",
|
22
|
+
})}`,
|
23
|
+
},
|
24
|
+
retry,
|
25
|
+
throttle,
|
26
|
+
});
|
27
|
+
}
|
28
|
+
}
|
29
|
+
exports.FireworksAIApiConfiguration = FireworksAIApiConfiguration;
|
@@ -0,0 +1,18 @@
|
|
1
|
+
import { BaseUrlApiConfiguration } from "../../core/api/BaseUrlApiConfiguration.js";
|
2
|
+
import { RetryFunction } from "../../core/api/RetryFunction.js";
|
3
|
+
import { ThrottleFunction } from "../../core/api/ThrottleFunction.js";
|
4
|
+
/**
|
5
|
+
* Configuration for the Fireworks.ai API.
|
6
|
+
*
|
7
|
+
* It uses the `FIREWORKS_API_KEY` api key environment variable.
|
8
|
+
*
|
9
|
+
* @see https://readme.fireworks.ai/docs/openai-compatibility
|
10
|
+
*/
|
11
|
+
export declare class FireworksAIApiConfiguration extends BaseUrlApiConfiguration {
|
12
|
+
constructor({ baseUrl, apiKey, retry, throttle, }?: {
|
13
|
+
baseUrl?: string;
|
14
|
+
apiKey?: string;
|
15
|
+
retry?: RetryFunction;
|
16
|
+
throttle?: ThrottleFunction;
|
17
|
+
});
|
18
|
+
}
|
@@ -0,0 +1,25 @@
|
|
1
|
+
import { BaseUrlApiConfiguration } from "../../core/api/BaseUrlApiConfiguration.js";
|
2
|
+
import { loadApiKey } from "../../core/api/loadApiKey.js";
|
3
|
+
/**
|
4
|
+
* Configuration for the Fireworks.ai API.
|
5
|
+
*
|
6
|
+
* It uses the `FIREWORKS_API_KEY` api key environment variable.
|
7
|
+
*
|
8
|
+
* @see https://readme.fireworks.ai/docs/openai-compatibility
|
9
|
+
*/
|
10
|
+
export class FireworksAIApiConfiguration extends BaseUrlApiConfiguration {
|
11
|
+
constructor({ baseUrl = "https://api.fireworks.ai/inference/v1", apiKey, retry, throttle, } = {}) {
|
12
|
+
super({
|
13
|
+
baseUrl,
|
14
|
+
headers: {
|
15
|
+
Authorization: `Bearer ${loadApiKey({
|
16
|
+
apiKey,
|
17
|
+
environmentVariableName: "FIREWORKS_API_KEY",
|
18
|
+
description: "Fireworks AI",
|
19
|
+
})}`,
|
20
|
+
},
|
21
|
+
retry,
|
22
|
+
throttle,
|
23
|
+
});
|
24
|
+
}
|
25
|
+
}
|
@@ -0,0 +1,100 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.OpenAICompatibleChatModel = void 0;
|
4
|
+
const StructureFromTextStreamingModel_js_1 = require("../../model-function/generate-structure/StructureFromTextStreamingModel.cjs");
|
5
|
+
const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
|
6
|
+
const AbstractOpenAIChatModel_js_1 = require("../openai/chat/AbstractOpenAIChatModel.cjs");
|
7
|
+
const OpenAIChatPromptFormat_js_1 = require("../openai/chat/OpenAIChatPromptFormat.cjs");
|
8
|
+
/**
|
9
|
+
* Create a text generation model that calls an API that is compatible with OpenAI's chat API.
|
10
|
+
*
|
11
|
+
* Please note that many providers implement the API with slight differences, which can cause
|
12
|
+
* unexpected errors and different behavior in less common scenarios.
|
13
|
+
*
|
14
|
+
* @see https://platform.openai.com/docs/api-reference/chat/create
|
15
|
+
*/
|
16
|
+
class OpenAICompatibleChatModel extends AbstractOpenAIChatModel_js_1.AbstractOpenAIChatModel {
|
17
|
+
constructor(settings) {
|
18
|
+
super(settings);
|
19
|
+
Object.defineProperty(this, "contextWindowSize", {
|
20
|
+
enumerable: true,
|
21
|
+
configurable: true,
|
22
|
+
writable: true,
|
23
|
+
value: undefined
|
24
|
+
});
|
25
|
+
Object.defineProperty(this, "tokenizer", {
|
26
|
+
enumerable: true,
|
27
|
+
configurable: true,
|
28
|
+
writable: true,
|
29
|
+
value: undefined
|
30
|
+
});
|
31
|
+
Object.defineProperty(this, "countPromptTokens", {
|
32
|
+
enumerable: true,
|
33
|
+
configurable: true,
|
34
|
+
writable: true,
|
35
|
+
value: undefined
|
36
|
+
});
|
37
|
+
}
|
38
|
+
get provider() {
|
39
|
+
return this.settings.provider ?? "openaicompatible";
|
40
|
+
}
|
41
|
+
get modelName() {
|
42
|
+
return this.settings.model;
|
43
|
+
}
|
44
|
+
get settingsForEvent() {
|
45
|
+
const eventSettingProperties = [
|
46
|
+
"stopSequences",
|
47
|
+
"maxCompletionTokens",
|
48
|
+
"functions",
|
49
|
+
"functionCall",
|
50
|
+
"temperature",
|
51
|
+
"topP",
|
52
|
+
"n",
|
53
|
+
"presencePenalty",
|
54
|
+
"frequencyPenalty",
|
55
|
+
"logitBias",
|
56
|
+
"seed",
|
57
|
+
"responseFormat",
|
58
|
+
];
|
59
|
+
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
60
|
+
}
|
61
|
+
asStructureGenerationModel(promptFormat) {
|
62
|
+
return new StructureFromTextStreamingModel_js_1.StructureFromTextStreamingModel({
|
63
|
+
model: this,
|
64
|
+
format: promptFormat,
|
65
|
+
});
|
66
|
+
}
|
67
|
+
/**
|
68
|
+
* Returns this model with a text prompt format.
|
69
|
+
*/
|
70
|
+
withTextPrompt() {
|
71
|
+
return this.withPromptFormat((0, OpenAIChatPromptFormat_js_1.text)());
|
72
|
+
}
|
73
|
+
/**
|
74
|
+
* Returns this model with an instruction prompt format.
|
75
|
+
*/
|
76
|
+
withInstructionPrompt() {
|
77
|
+
return this.withPromptFormat((0, OpenAIChatPromptFormat_js_1.instruction)());
|
78
|
+
}
|
79
|
+
/**
|
80
|
+
* Returns this model with a chat prompt format.
|
81
|
+
*/
|
82
|
+
withChatPrompt() {
|
83
|
+
return this.withPromptFormat((0, OpenAIChatPromptFormat_js_1.chat)());
|
84
|
+
}
|
85
|
+
withPromptFormat(promptFormat) {
|
86
|
+
return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
|
87
|
+
model: this.withSettings({
|
88
|
+
stopSequences: [
|
89
|
+
...(this.settings.stopSequences ?? []),
|
90
|
+
...promptFormat.stopSequences,
|
91
|
+
],
|
92
|
+
}),
|
93
|
+
promptFormat,
|
94
|
+
});
|
95
|
+
}
|
96
|
+
withSettings(additionalSettings) {
|
97
|
+
return new OpenAICompatibleChatModel(Object.assign({}, this.settings, additionalSettings));
|
98
|
+
}
|
99
|
+
}
|
100
|
+
exports.OpenAICompatibleChatModel = OpenAICompatibleChatModel;
|
@@ -0,0 +1,45 @@
|
|
1
|
+
import { StructureFromTextPromptFormat } from "../../model-function/generate-structure/StructureFromTextPromptFormat.js";
|
2
|
+
import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
3
|
+
import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
|
4
|
+
import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
5
|
+
import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
|
6
|
+
import { ToolCallGenerationModel } from "../../tool/generate-tool-call/ToolCallGenerationModel.js";
|
7
|
+
import { ToolCallsOrTextGenerationModel } from "../../tool/generate-tool-calls-or-text/ToolCallsOrTextGenerationModel.js";
|
8
|
+
import { AbstractOpenAIChatCallSettings, AbstractOpenAIChatModel, OpenAIChatPrompt } from "../openai/chat/AbstractOpenAIChatModel.js";
|
9
|
+
export type OpenAICompatibleProviderName = `openaicompatible` | `openaicompatible-${string}`;
|
10
|
+
export interface OpenAICompatibleChatSettings extends TextGenerationModelSettings, Omit<AbstractOpenAIChatCallSettings, "stop" | "maxTokens"> {
|
11
|
+
provider?: OpenAICompatibleProviderName;
|
12
|
+
isUserIdForwardingEnabled?: boolean;
|
13
|
+
}
|
14
|
+
/**
|
15
|
+
* Create a text generation model that calls an API that is compatible with OpenAI's chat API.
|
16
|
+
*
|
17
|
+
* Please note that many providers implement the API with slight differences, which can cause
|
18
|
+
* unexpected errors and different behavior in less common scenarios.
|
19
|
+
*
|
20
|
+
* @see https://platform.openai.com/docs/api-reference/chat/create
|
21
|
+
*/
|
22
|
+
export declare class OpenAICompatibleChatModel extends AbstractOpenAIChatModel<OpenAICompatibleChatSettings> implements TextStreamingModel<OpenAIChatPrompt, OpenAICompatibleChatSettings>, ToolCallGenerationModel<OpenAIChatPrompt, OpenAICompatibleChatSettings>, ToolCallsOrTextGenerationModel<OpenAIChatPrompt, OpenAICompatibleChatSettings> {
|
23
|
+
constructor(settings: OpenAICompatibleChatSettings);
|
24
|
+
get provider(): OpenAICompatibleProviderName;
|
25
|
+
get modelName(): string;
|
26
|
+
readonly contextWindowSize: undefined;
|
27
|
+
readonly tokenizer: undefined;
|
28
|
+
readonly countPromptTokens: undefined;
|
29
|
+
get settingsForEvent(): Partial<OpenAICompatibleChatSettings>;
|
30
|
+
asStructureGenerationModel<INPUT_PROMPT>(promptFormat: StructureFromTextPromptFormat<INPUT_PROMPT, OpenAIChatPrompt>): StructureFromTextStreamingModel<INPUT_PROMPT, OpenAIChatPrompt, this>;
|
31
|
+
/**
|
32
|
+
* Returns this model with a text prompt format.
|
33
|
+
*/
|
34
|
+
withTextPrompt(): PromptFormatTextStreamingModel<string, OpenAIChatPrompt, OpenAICompatibleChatSettings, this>;
|
35
|
+
/**
|
36
|
+
* Returns this model with an instruction prompt format.
|
37
|
+
*/
|
38
|
+
withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../index.js").MultiModalInstructionPrompt | import("../../index.js").TextInstructionPrompt, OpenAIChatPrompt, OpenAICompatibleChatSettings, this>;
|
39
|
+
/**
|
40
|
+
* Returns this model with a chat prompt format.
|
41
|
+
*/
|
42
|
+
withChatPrompt(): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, OpenAIChatPrompt, OpenAICompatibleChatSettings, this>;
|
43
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, OpenAIChatPrompt>): PromptFormatTextStreamingModel<INPUT_PROMPT, OpenAIChatPrompt, OpenAICompatibleChatSettings, this>;
|
44
|
+
withSettings(additionalSettings: Partial<OpenAICompatibleChatSettings>): this;
|
45
|
+
}
|
@@ -0,0 +1,96 @@
|
|
1
|
+
import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
2
|
+
import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
|
3
|
+
import { AbstractOpenAIChatModel, } from "../openai/chat/AbstractOpenAIChatModel.js";
|
4
|
+
import { chat, instruction, text, } from "../openai/chat/OpenAIChatPromptFormat.js";
|
5
|
+
/**
|
6
|
+
* Create a text generation model that calls an API that is compatible with OpenAI's chat API.
|
7
|
+
*
|
8
|
+
* Please note that many providers implement the API with slight differences, which can cause
|
9
|
+
* unexpected errors and different behavior in less common scenarios.
|
10
|
+
*
|
11
|
+
* @see https://platform.openai.com/docs/api-reference/chat/create
|
12
|
+
*/
|
13
|
+
export class OpenAICompatibleChatModel extends AbstractOpenAIChatModel {
|
14
|
+
constructor(settings) {
|
15
|
+
super(settings);
|
16
|
+
Object.defineProperty(this, "contextWindowSize", {
|
17
|
+
enumerable: true,
|
18
|
+
configurable: true,
|
19
|
+
writable: true,
|
20
|
+
value: undefined
|
21
|
+
});
|
22
|
+
Object.defineProperty(this, "tokenizer", {
|
23
|
+
enumerable: true,
|
24
|
+
configurable: true,
|
25
|
+
writable: true,
|
26
|
+
value: undefined
|
27
|
+
});
|
28
|
+
Object.defineProperty(this, "countPromptTokens", {
|
29
|
+
enumerable: true,
|
30
|
+
configurable: true,
|
31
|
+
writable: true,
|
32
|
+
value: undefined
|
33
|
+
});
|
34
|
+
}
|
35
|
+
get provider() {
|
36
|
+
return this.settings.provider ?? "openaicompatible";
|
37
|
+
}
|
38
|
+
get modelName() {
|
39
|
+
return this.settings.model;
|
40
|
+
}
|
41
|
+
get settingsForEvent() {
|
42
|
+
const eventSettingProperties = [
|
43
|
+
"stopSequences",
|
44
|
+
"maxCompletionTokens",
|
45
|
+
"functions",
|
46
|
+
"functionCall",
|
47
|
+
"temperature",
|
48
|
+
"topP",
|
49
|
+
"n",
|
50
|
+
"presencePenalty",
|
51
|
+
"frequencyPenalty",
|
52
|
+
"logitBias",
|
53
|
+
"seed",
|
54
|
+
"responseFormat",
|
55
|
+
];
|
56
|
+
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
57
|
+
}
|
58
|
+
asStructureGenerationModel(promptFormat) {
|
59
|
+
return new StructureFromTextStreamingModel({
|
60
|
+
model: this,
|
61
|
+
format: promptFormat,
|
62
|
+
});
|
63
|
+
}
|
64
|
+
/**
|
65
|
+
* Returns this model with a text prompt format.
|
66
|
+
*/
|
67
|
+
withTextPrompt() {
|
68
|
+
return this.withPromptFormat(text());
|
69
|
+
}
|
70
|
+
/**
|
71
|
+
* Returns this model with an instruction prompt format.
|
72
|
+
*/
|
73
|
+
withInstructionPrompt() {
|
74
|
+
return this.withPromptFormat(instruction());
|
75
|
+
}
|
76
|
+
/**
|
77
|
+
* Returns this model with a chat prompt format.
|
78
|
+
*/
|
79
|
+
withChatPrompt() {
|
80
|
+
return this.withPromptFormat(chat());
|
81
|
+
}
|
82
|
+
withPromptFormat(promptFormat) {
|
83
|
+
return new PromptFormatTextStreamingModel({
|
84
|
+
model: this.withSettings({
|
85
|
+
stopSequences: [
|
86
|
+
...(this.settings.stopSequences ?? []),
|
87
|
+
...promptFormat.stopSequences,
|
88
|
+
],
|
89
|
+
}),
|
90
|
+
promptFormat,
|
91
|
+
});
|
92
|
+
}
|
93
|
+
withSettings(additionalSettings) {
|
94
|
+
return new OpenAICompatibleChatModel(Object.assign({}, this.settings, additionalSettings));
|
95
|
+
}
|
96
|
+
}
|
@@ -0,0 +1,30 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.ChatTextGenerator = void 0;
|
4
|
+
const OpenAICompatibleChatModel_js_1 = require("./OpenAICompatibleChatModel.cjs");
|
5
|
+
/**
|
6
|
+
* Create a text generation model that calls an API that is compatible with OpenAI's chat API.
|
7
|
+
*
|
8
|
+
* Please note that many providers implement the API with slight differences, which can cause
|
9
|
+
* unexpected errors and different behavior in less common scenarios.
|
10
|
+
*
|
11
|
+
* @see https://platform.openai.com/docs/api-reference/chat/create
|
12
|
+
*
|
13
|
+
* @example
|
14
|
+
* const model = openaicompatible.ChatTextGenerator({
|
15
|
+
* model: "provider-specific-model-name",
|
16
|
+
* temperature: 0.7,
|
17
|
+
* maxCompletionTokens: 500,
|
18
|
+
* });
|
19
|
+
*
|
20
|
+
* const text = await generateText([
|
21
|
+
* model,
|
22
|
+
* OpenAIChatMessage.system(
|
23
|
+
* "Write a short story about a robot learning to love:"
|
24
|
+
* ),
|
25
|
+
* ]);
|
26
|
+
*/
|
27
|
+
function ChatTextGenerator(settings) {
|
28
|
+
return new OpenAICompatibleChatModel_js_1.OpenAICompatibleChatModel(settings);
|
29
|
+
}
|
30
|
+
exports.ChatTextGenerator = ChatTextGenerator;
|
@@ -0,0 +1,24 @@
|
|
1
|
+
import { OpenAICompatibleChatModel, OpenAICompatibleChatSettings } from "./OpenAICompatibleChatModel.js";
|
2
|
+
/**
|
3
|
+
* Create a text generation model that calls an API that is compatible with OpenAI's chat API.
|
4
|
+
*
|
5
|
+
* Please note that many providers implement the API with slight differences, which can cause
|
6
|
+
* unexpected errors and different behavior in less common scenarios.
|
7
|
+
*
|
8
|
+
* @see https://platform.openai.com/docs/api-reference/chat/create
|
9
|
+
*
|
10
|
+
* @example
|
11
|
+
* const model = openaicompatible.ChatTextGenerator({
|
12
|
+
* model: "provider-specific-model-name",
|
13
|
+
* temperature: 0.7,
|
14
|
+
* maxCompletionTokens: 500,
|
15
|
+
* });
|
16
|
+
*
|
17
|
+
* const text = await generateText([
|
18
|
+
* model,
|
19
|
+
* OpenAIChatMessage.system(
|
20
|
+
* "Write a short story about a robot learning to love:"
|
21
|
+
* ),
|
22
|
+
* ]);
|
23
|
+
*/
|
24
|
+
export declare function ChatTextGenerator(settings: OpenAICompatibleChatSettings): OpenAICompatibleChatModel;
|