modelfusion 0.82.0 → 0.84.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +13 -10
- package/core/schema/UncheckedSchema.cjs +5 -1
- package/core/schema/UncheckedSchema.d.ts +1 -0
- package/core/schema/UncheckedSchema.js +3 -0
- package/model-function/AbstractModel.d.ts +1 -1
- package/model-function/generate-text/prompt-format/InstructionPrompt.d.ts +1 -1
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +1 -1
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +1 -1
- package/model-provider/index.cjs +1 -0
- package/model-provider/index.d.ts +1 -0
- package/model-provider/index.js +1 -0
- package/model-provider/llamacpp/LlamaCppBakLLaVA1Format.d.ts +2 -2
- package/model-provider/openai/AzureOpenAIApiConfiguration.cjs +1 -1
- package/model-provider/openai/AzureOpenAIApiConfiguration.d.ts +1 -1
- package/model-provider/openai/AzureOpenAIApiConfiguration.js +1 -1
- package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +228 -0
- package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +467 -0
- package/model-provider/openai/chat/AbstractOpenAIChatModel.js +224 -0
- package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.cjs +3 -3
- package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +6 -6
- package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.js +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.cjs +5 -218
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +11 -460
- package/model-provider/openai/chat/OpenAIChatModel.js +4 -217
- package/model-provider/openai/chat/OpenAIChatPromptFormat.d.ts +2 -2
- package/model-provider/openai/index.cjs +1 -0
- package/model-provider/openai/index.d.ts +1 -0
- package/model-provider/openai/index.js +1 -0
- package/model-provider/openai-compatible/FireworksAIApiConfiguration.cjs +29 -0
- package/model-provider/openai-compatible/FireworksAIApiConfiguration.d.ts +18 -0
- package/model-provider/openai-compatible/FireworksAIApiConfiguration.js +25 -0
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +100 -0
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +45 -0
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +96 -0
- package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +30 -0
- package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +24 -0
- package/model-provider/openai-compatible/OpenAICompatibleFacade.js +26 -0
- package/model-provider/openai-compatible/index.cjs +32 -0
- package/model-provider/openai-compatible/index.d.ts +3 -0
- package/model-provider/openai-compatible/index.js +3 -0
- package/model-provider/stability/StabilityImageGenerationModel.cjs +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
- package/model-provider/stability/StabilityImageGenerationModel.js +1 -1
- package/package.json +1 -1
package/README.md
CHANGED
@@ -58,7 +58,7 @@ const text = await generateText(
|
|
58
58
|
);
|
59
59
|
```
|
60
60
|
|
61
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
|
61
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Hugging Face](https://modelfusion.dev/integration/model-provider/huggingface), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
|
62
62
|
|
63
63
|
#### streamText
|
64
64
|
|
@@ -75,7 +75,7 @@ for await (const textPart of textStream) {
|
|
75
75
|
}
|
76
76
|
```
|
77
77
|
|
78
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
|
78
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),[OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic)
|
79
79
|
|
80
80
|
#### streamText with multi-modal prompt
|
81
81
|
|
@@ -95,7 +95,7 @@ const textStream = await streamText(
|
|
95
95
|
);
|
96
96
|
```
|
97
97
|
|
98
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)
|
98
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)
|
99
99
|
|
100
100
|
### [Generate Image](https://modelfusion.dev/guide/function/generate-image)
|
101
101
|
|
@@ -196,6 +196,7 @@ Generate a structure that matches a schema.
|
|
196
196
|
import { zodSchema, generateStructure, openai } from "modelfusion";
|
197
197
|
|
198
198
|
const sentiment = await generateStructure(
|
199
|
+
// model:
|
199
200
|
openai
|
200
201
|
.ChatTextGenerator({
|
201
202
|
model: "gpt-3.5-turbo",
|
@@ -205,6 +206,7 @@ const sentiment = await generateStructure(
|
|
205
206
|
.asFunctionCallStructureGenerationModel({ fnName: "sentiment" })
|
206
207
|
.withInstructionPrompt(),
|
207
208
|
|
209
|
+
// schema:
|
208
210
|
zodSchema(
|
209
211
|
z.object({
|
210
212
|
sentiment: z
|
@@ -213,6 +215,7 @@ const sentiment = await generateStructure(
|
|
213
215
|
})
|
214
216
|
),
|
215
217
|
|
218
|
+
// prompt:
|
216
219
|
{
|
217
220
|
system:
|
218
221
|
"You are a sentiment evaluator. " +
|
@@ -224,7 +227,7 @@ const sentiment = await generateStructure(
|
|
224
227
|
);
|
225
228
|
```
|
226
229
|
|
227
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai)
|
230
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Ollama](https://modelfusion.dev//integration/model-provider/ollama)
|
228
231
|
|
229
232
|
#### streamStructure
|
230
233
|
|
@@ -270,7 +273,7 @@ for await (const part of structureStream) {
|
|
270
273
|
}
|
271
274
|
```
|
272
275
|
|
273
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai)
|
276
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Ollama](https://modelfusion.dev//integration/model-provider/ollama)
|
274
277
|
|
275
278
|
### [Embed Value](https://modelfusion.dev/guide/function/embed)
|
276
279
|
|
@@ -591,15 +594,15 @@ const image = await generateImage(
|
|
591
594
|
.ImageGenerator({
|
592
595
|
//...
|
593
596
|
})
|
594
|
-
.
|
597
|
+
.withTextPrompt(),
|
595
598
|
"the wicked witch of the west in the style of early 19th century painting"
|
596
599
|
);
|
597
600
|
```
|
598
601
|
|
599
|
-
| Prompt Format |
|
600
|
-
| ------------- |
|
601
|
-
| Automatic1111 | ✅
|
602
|
-
| Stability | ✅
|
602
|
+
| Prompt Format | Text Prompt |
|
603
|
+
| ------------- | ----------- |
|
604
|
+
| Automatic1111 | ✅ |
|
605
|
+
| Stability | ✅ |
|
603
606
|
|
604
607
|
### Metadata and original responses
|
605
608
|
|
@@ -1,6 +1,10 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.UncheckedSchema = void 0;
|
3
|
+
exports.UncheckedSchema = exports.uncheckedSchema = void 0;
|
4
|
+
function uncheckedSchema(jsonSchema) {
|
5
|
+
return new UncheckedSchema(jsonSchema);
|
6
|
+
}
|
7
|
+
exports.uncheckedSchema = uncheckedSchema;
|
4
8
|
class UncheckedSchema {
|
5
9
|
constructor(jsonSchema) {
|
6
10
|
Object.defineProperty(this, "jsonSchema", {
|
@@ -1,5 +1,6 @@
|
|
1
1
|
import { JsonSchemaProducer } from "./JsonSchemaProducer.js";
|
2
2
|
import { Schema } from "./Schema.js";
|
3
|
+
export declare function uncheckedSchema<STRUCTURE>(jsonSchema?: unknown): UncheckedSchema<STRUCTURE>;
|
3
4
|
export declare class UncheckedSchema<STRUCTURE> implements Schema<STRUCTURE>, JsonSchemaProducer {
|
4
5
|
private readonly jsonSchema?;
|
5
6
|
constructor(jsonSchema?: unknown);
|
@@ -1,6 +1,7 @@
|
|
1
1
|
import { ModelInformation } from "./ModelInformation.js";
|
2
2
|
import { Model, ModelSettings } from "./Model.js";
|
3
3
|
export declare abstract class AbstractModel<SETTINGS extends ModelSettings> implements Model<SETTINGS> {
|
4
|
+
readonly settings: SETTINGS;
|
4
5
|
constructor({ settings }: {
|
5
6
|
settings: SETTINGS;
|
6
7
|
});
|
@@ -8,6 +9,5 @@ export declare abstract class AbstractModel<SETTINGS extends ModelSettings> impl
|
|
8
9
|
abstract readonly modelName: string | null;
|
9
10
|
get modelInformation(): ModelInformation;
|
10
11
|
abstract get settingsForEvent(): Partial<SETTINGS>;
|
11
|
-
readonly settings: SETTINGS;
|
12
12
|
abstract withSettings(additionalSettings: Partial<SETTINGS>): this;
|
13
13
|
}
|
@@ -4,7 +4,7 @@ import { MultiModalInput } from "./Content.js";
|
|
4
4
|
* the role and behavior of the language model.
|
5
5
|
* The instruction is a multi-model input (`array` of content).
|
6
6
|
*/
|
7
|
-
export interface
|
7
|
+
export interface MultiModalInstructionPrompt {
|
8
8
|
/**
|
9
9
|
* Optional system message to provide context for the language model. Note that for some models,
|
10
10
|
* changing the system message can impact the results, because the model may be trained on the default system message.
|
@@ -53,7 +53,7 @@ class Automatic1111ImageGenerationModel extends AbstractModel_js_1.AbstractModel
|
|
53
53
|
base64Image: response.images[0],
|
54
54
|
};
|
55
55
|
}
|
56
|
-
|
56
|
+
withTextPrompt() {
|
57
57
|
return this.withPromptFormat((0, Automatic1111ImageGenerationPrompt_js_1.mapBasicPromptToAutomatic1111Format)());
|
58
58
|
}
|
59
59
|
withPromptFormat(promptFormat) {
|
@@ -25,7 +25,7 @@ export declare class Automatic1111ImageGenerationModel extends AbstractModel<Aut
|
|
25
25
|
};
|
26
26
|
base64Image: string;
|
27
27
|
}>;
|
28
|
-
|
28
|
+
withTextPrompt(): PromptFormatImageGenerationModel<string, Automatic1111ImageGenerationPrompt, Automatic1111ImageGenerationSettings, this>;
|
29
29
|
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, Automatic1111ImageGenerationPrompt>): PromptFormatImageGenerationModel<INPUT_PROMPT, Automatic1111ImageGenerationPrompt, Automatic1111ImageGenerationSettings, this>;
|
30
30
|
withSettings(additionalSettings: Automatic1111ImageGenerationSettings): this;
|
31
31
|
}
|
@@ -50,7 +50,7 @@ export class Automatic1111ImageGenerationModel extends AbstractModel {
|
|
50
50
|
base64Image: response.images[0],
|
51
51
|
};
|
52
52
|
}
|
53
|
-
|
53
|
+
withTextPrompt() {
|
54
54
|
return this.withPromptFormat(mapBasicPromptToAutomatic1111Format());
|
55
55
|
}
|
56
56
|
withPromptFormat(promptFormat) {
|
package/model-provider/index.cjs
CHANGED
@@ -23,4 +23,5 @@ __exportStar(require("./llamacpp/index.cjs"), exports);
|
|
23
23
|
__exportStar(require("./lmnt/index.cjs"), exports);
|
24
24
|
__exportStar(require("./ollama/index.cjs"), exports);
|
25
25
|
__exportStar(require("./openai/index.cjs"), exports);
|
26
|
+
__exportStar(require("./openai-compatible/index.cjs"), exports);
|
26
27
|
__exportStar(require("./stability/index.cjs"), exports);
|
package/model-provider/index.js
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
import {
|
1
|
+
import { MultiModalInstructionPrompt } from "../../model-function/generate-text/prompt-format/InstructionPrompt.js";
|
2
2
|
import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
|
3
3
|
import { LlamaCppTextGenerationPrompt } from "./LlamaCppTextGenerationModel.js";
|
4
4
|
/**
|
@@ -6,4 +6,4 @@ import { LlamaCppTextGenerationPrompt } from "./LlamaCppTextGenerationModel.js";
|
|
6
6
|
*
|
7
7
|
* @see https://github.com/SkunkworksAI/BakLLaVA
|
8
8
|
*/
|
9
|
-
export declare function instruction(): TextGenerationPromptFormat<
|
9
|
+
export declare function instruction(): TextGenerationPromptFormat<MultiModalInstructionPrompt, LlamaCppTextGenerationPrompt>;
|
@@ -4,7 +4,7 @@ exports.AzureOpenAIApiConfiguration = void 0;
|
|
4
4
|
const AbstractApiConfiguration_js_1 = require("../../core/api/AbstractApiConfiguration.cjs");
|
5
5
|
const loadApiKey_js_1 = require("../../core/api/loadApiKey.cjs");
|
6
6
|
/**
|
7
|
-
*
|
7
|
+
* Configuration for the Azure OpenAI API. This class is responsible for constructing URLs specific to the Azure OpenAI deployment.
|
8
8
|
* It creates URLs of the form
|
9
9
|
* `https://[resourceName].openai.azure.com/openai/deployments/[deploymentId]/[path]?api-version=[apiVersion]`
|
10
10
|
*
|
@@ -2,7 +2,7 @@ import { AbstractApiConfiguration } from "../../core/api/AbstractApiConfiguratio
|
|
2
2
|
import { RetryFunction } from "../../core/api/RetryFunction.js";
|
3
3
|
import { ThrottleFunction } from "../../core/api/ThrottleFunction.js";
|
4
4
|
/**
|
5
|
-
*
|
5
|
+
* Configuration for the Azure OpenAI API. This class is responsible for constructing URLs specific to the Azure OpenAI deployment.
|
6
6
|
* It creates URLs of the form
|
7
7
|
* `https://[resourceName].openai.azure.com/openai/deployments/[deploymentId]/[path]?api-version=[apiVersion]`
|
8
8
|
*
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { AbstractApiConfiguration } from "../../core/api/AbstractApiConfiguration.js";
|
2
2
|
import { loadApiKey } from "../../core/api/loadApiKey.js";
|
3
3
|
/**
|
4
|
-
*
|
4
|
+
* Configuration for the Azure OpenAI API. This class is responsible for constructing URLs specific to the Azure OpenAI deployment.
|
5
5
|
* It creates URLs of the form
|
6
6
|
* `https://[resourceName].openai.azure.com/openai/deployments/[deploymentId]/[path]?api-version=[apiVersion]`
|
7
7
|
*
|
@@ -0,0 +1,228 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.OpenAIChatResponseFormat = exports.AbstractOpenAIChatModel = void 0;
|
4
|
+
const zod_1 = require("zod");
|
5
|
+
const callWithRetryAndThrottle_js_1 = require("../../../core/api/callWithRetryAndThrottle.cjs");
|
6
|
+
const postToApi_js_1 = require("../../../core/api/postToApi.cjs");
|
7
|
+
const parseJSON_js_1 = require("../../../core/schema/parseJSON.cjs");
|
8
|
+
const AbstractModel_js_1 = require("../../../model-function/AbstractModel.cjs");
|
9
|
+
const parsePartialJson_js_1 = require("../../../model-function/generate-structure/parsePartialJson.cjs");
|
10
|
+
const OpenAIApiConfiguration_js_1 = require("../OpenAIApiConfiguration.cjs");
|
11
|
+
const OpenAIError_js_1 = require("../OpenAIError.cjs");
|
12
|
+
const OpenAIChatStreamIterable_js_1 = require("./OpenAIChatStreamIterable.cjs");
|
13
|
+
/**
|
14
|
+
* Abstract text generation model that calls an API that is compatible with the OpenAI chat API.
|
15
|
+
*
|
16
|
+
* @see https://platform.openai.com/docs/api-reference/chat/create
|
17
|
+
*/
|
18
|
+
class AbstractOpenAIChatModel extends AbstractModel_js_1.AbstractModel {
|
19
|
+
constructor(settings) {
|
20
|
+
super({ settings });
|
21
|
+
}
|
22
|
+
async callAPI(messages, options) {
|
23
|
+
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
24
|
+
retry: this.settings.api?.retry,
|
25
|
+
throttle: this.settings.api?.throttle,
|
26
|
+
call: async () => callOpenAIChatCompletionAPI({
|
27
|
+
...this.settings,
|
28
|
+
// function & tool calling:
|
29
|
+
functions: options.functions ?? this.settings.functions,
|
30
|
+
functionCall: options.functionCall ?? this.settings.functionCall,
|
31
|
+
tools: options.tools ?? this.settings.tools,
|
32
|
+
toolChoice: options.toolChoice ?? this.settings.toolChoice,
|
33
|
+
// map to OpenAI API names:
|
34
|
+
stop: this.settings.stopSequences,
|
35
|
+
maxTokens: this.settings.maxCompletionTokens,
|
36
|
+
openAIResponseFormat: this.settings.responseFormat,
|
37
|
+
// other settings:
|
38
|
+
user: this.settings.isUserIdForwardingEnabled
|
39
|
+
? options.run?.userId
|
40
|
+
: undefined,
|
41
|
+
abortSignal: options.run?.abortSignal,
|
42
|
+
responseFormat: options.responseFormat,
|
43
|
+
messages,
|
44
|
+
}),
|
45
|
+
});
|
46
|
+
}
|
47
|
+
async doGenerateText(prompt, options) {
|
48
|
+
const response = await this.callAPI(prompt, {
|
49
|
+
...options,
|
50
|
+
responseFormat: exports.OpenAIChatResponseFormat.json,
|
51
|
+
});
|
52
|
+
return {
|
53
|
+
response,
|
54
|
+
text: response.choices[0].message.content,
|
55
|
+
usage: this.extractUsage(response),
|
56
|
+
};
|
57
|
+
}
|
58
|
+
doStreamText(prompt, options) {
|
59
|
+
return this.callAPI(prompt, {
|
60
|
+
...options,
|
61
|
+
responseFormat: exports.OpenAIChatResponseFormat.textDeltaIterable,
|
62
|
+
});
|
63
|
+
}
|
64
|
+
async doGenerateToolCall(tool, prompt, options) {
|
65
|
+
const response = await this.callAPI(prompt, {
|
66
|
+
...options,
|
67
|
+
responseFormat: exports.OpenAIChatResponseFormat.json,
|
68
|
+
toolChoice: {
|
69
|
+
type: "function",
|
70
|
+
function: { name: tool.name },
|
71
|
+
},
|
72
|
+
tools: [
|
73
|
+
{
|
74
|
+
type: "function",
|
75
|
+
function: {
|
76
|
+
name: tool.name,
|
77
|
+
description: tool.description,
|
78
|
+
parameters: tool.parameters.getJsonSchema(),
|
79
|
+
},
|
80
|
+
},
|
81
|
+
],
|
82
|
+
});
|
83
|
+
const toolCalls = response.choices[0]?.message.tool_calls;
|
84
|
+
return {
|
85
|
+
response,
|
86
|
+
toolCall: toolCalls == null || toolCalls.length === 0
|
87
|
+
? null
|
88
|
+
: {
|
89
|
+
id: toolCalls[0].id,
|
90
|
+
args: (0, parseJSON_js_1.parseJSON)({ text: toolCalls[0].function.arguments }),
|
91
|
+
},
|
92
|
+
usage: this.extractUsage(response),
|
93
|
+
};
|
94
|
+
}
|
95
|
+
async doGenerateToolCallsOrText(tools, prompt, options) {
|
96
|
+
const response = await this.callAPI(prompt, {
|
97
|
+
...options,
|
98
|
+
responseFormat: exports.OpenAIChatResponseFormat.json,
|
99
|
+
toolChoice: "auto",
|
100
|
+
tools: tools.map((tool) => ({
|
101
|
+
type: "function",
|
102
|
+
function: {
|
103
|
+
name: tool.name,
|
104
|
+
description: tool.description,
|
105
|
+
parameters: tool.parameters.getJsonSchema(),
|
106
|
+
},
|
107
|
+
})),
|
108
|
+
});
|
109
|
+
const message = response.choices[0]?.message;
|
110
|
+
return {
|
111
|
+
response,
|
112
|
+
text: message.content ?? null,
|
113
|
+
toolCalls: message.tool_calls?.map((toolCall) => ({
|
114
|
+
id: toolCall.id,
|
115
|
+
name: toolCall.function.name,
|
116
|
+
args: (0, parseJSON_js_1.parseJSON)({ text: toolCall.function.arguments }),
|
117
|
+
})) ?? null,
|
118
|
+
usage: this.extractUsage(response),
|
119
|
+
};
|
120
|
+
}
|
121
|
+
extractUsage(response) {
|
122
|
+
return {
|
123
|
+
promptTokens: response.usage.prompt_tokens,
|
124
|
+
completionTokens: response.usage.completion_tokens,
|
125
|
+
totalTokens: response.usage.total_tokens,
|
126
|
+
};
|
127
|
+
}
|
128
|
+
}
|
129
|
+
exports.AbstractOpenAIChatModel = AbstractOpenAIChatModel;
|
130
|
+
const openAIChatResponseSchema = zod_1.z.object({
|
131
|
+
id: zod_1.z.string(),
|
132
|
+
choices: zod_1.z.array(zod_1.z.object({
|
133
|
+
message: zod_1.z.object({
|
134
|
+
role: zod_1.z.literal("assistant"),
|
135
|
+
content: zod_1.z.string().nullable(),
|
136
|
+
function_call: zod_1.z
|
137
|
+
.object({
|
138
|
+
name: zod_1.z.string(),
|
139
|
+
arguments: zod_1.z.string(),
|
140
|
+
})
|
141
|
+
.optional(),
|
142
|
+
tool_calls: zod_1.z
|
143
|
+
.array(zod_1.z.object({
|
144
|
+
id: zod_1.z.string(),
|
145
|
+
type: zod_1.z.literal("function"),
|
146
|
+
function: zod_1.z.object({
|
147
|
+
name: zod_1.z.string(),
|
148
|
+
arguments: zod_1.z.string(),
|
149
|
+
}),
|
150
|
+
}))
|
151
|
+
.optional(),
|
152
|
+
}),
|
153
|
+
index: zod_1.z.number(),
|
154
|
+
logprobs: zod_1.z.nullable(zod_1.z.any()),
|
155
|
+
finish_reason: zod_1.z
|
156
|
+
.enum([
|
157
|
+
"stop",
|
158
|
+
"length",
|
159
|
+
"tool_calls",
|
160
|
+
"content_filter",
|
161
|
+
"function_call",
|
162
|
+
])
|
163
|
+
.optional()
|
164
|
+
.nullable(),
|
165
|
+
})),
|
166
|
+
created: zod_1.z.number(),
|
167
|
+
model: zod_1.z.string(),
|
168
|
+
system_fingerprint: zod_1.z.string().optional(),
|
169
|
+
object: zod_1.z.literal("chat.completion"),
|
170
|
+
usage: zod_1.z.object({
|
171
|
+
prompt_tokens: zod_1.z.number(),
|
172
|
+
completion_tokens: zod_1.z.number(),
|
173
|
+
total_tokens: zod_1.z.number(),
|
174
|
+
}),
|
175
|
+
});
|
176
|
+
async function callOpenAIChatCompletionAPI({ api = new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration(), abortSignal, responseFormat, model, messages, functions, functionCall, tools, toolChoice, temperature, topP, n, stop, maxTokens, presencePenalty, frequencyPenalty, logitBias, user, openAIResponseFormat, seed, }) {
|
177
|
+
// empty arrays are not allowed for stop:
|
178
|
+
if (stop != null && Array.isArray(stop) && stop.length === 0) {
|
179
|
+
stop = undefined;
|
180
|
+
}
|
181
|
+
return (0, postToApi_js_1.postJsonToApi)({
|
182
|
+
url: api.assembleUrl("/chat/completions"),
|
183
|
+
headers: api.headers,
|
184
|
+
body: {
|
185
|
+
stream: responseFormat.stream,
|
186
|
+
model,
|
187
|
+
messages,
|
188
|
+
functions,
|
189
|
+
function_call: functionCall,
|
190
|
+
tools,
|
191
|
+
tool_choice: toolChoice,
|
192
|
+
temperature,
|
193
|
+
top_p: topP,
|
194
|
+
n,
|
195
|
+
stop,
|
196
|
+
max_tokens: maxTokens,
|
197
|
+
presence_penalty: presencePenalty,
|
198
|
+
frequency_penalty: frequencyPenalty,
|
199
|
+
logit_bias: logitBias,
|
200
|
+
seed,
|
201
|
+
response_format: openAIResponseFormat,
|
202
|
+
user,
|
203
|
+
},
|
204
|
+
failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
|
205
|
+
successfulResponseHandler: responseFormat.handler,
|
206
|
+
abortSignal,
|
207
|
+
});
|
208
|
+
}
|
209
|
+
exports.OpenAIChatResponseFormat = {
|
210
|
+
/**
|
211
|
+
* Returns the response as a JSON object.
|
212
|
+
*/
|
213
|
+
json: {
|
214
|
+
stream: false,
|
215
|
+
handler: (0, postToApi_js_1.createJsonResponseHandler)(openAIChatResponseSchema),
|
216
|
+
},
|
217
|
+
/**
|
218
|
+
* Returns an async iterable over the text deltas (only the tex different of the first choice).
|
219
|
+
*/
|
220
|
+
textDeltaIterable: {
|
221
|
+
stream: true,
|
222
|
+
handler: async ({ response }) => (0, OpenAIChatStreamIterable_js_1.createOpenAIChatDeltaIterableQueue)(response.body, (delta) => delta[0]?.delta.content ?? ""),
|
223
|
+
},
|
224
|
+
structureDeltaIterable: {
|
225
|
+
stream: true,
|
226
|
+
handler: async ({ response }) => (0, OpenAIChatStreamIterable_js_1.createOpenAIChatDeltaIterableQueue)(response.body, (delta) => (0, parsePartialJson_js_1.parsePartialJson)(delta[0]?.function_call?.arguments)),
|
227
|
+
},
|
228
|
+
};
|