modelfusion 0.102.0 → 0.104.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +27 -0
- package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +1 -1
- package/model-function/generate-text/TextGenerationModel.cjs +7 -0
- package/model-function/generate-text/TextGenerationModel.d.ts +3 -1
- package/model-function/generate-text/TextGenerationModel.js +6 -1
- package/model-function/generate-text/TextGenerationResult.cjs +2 -0
- package/model-function/generate-text/TextGenerationResult.d.ts +11 -0
- package/model-function/generate-text/TextGenerationResult.js +1 -0
- package/model-function/generate-text/generateText.cjs +14 -9
- package/model-function/generate-text/generateText.d.ts +3 -0
- package/model-function/generate-text/generateText.js +14 -9
- package/model-function/generate-text/index.cjs +1 -0
- package/model-function/generate-text/index.d.ts +1 -0
- package/model-function/generate-text/index.js +1 -0
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +2 -1
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +2 -2
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +2 -1
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs +5 -4
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.js +5 -4
- package/model-function/generate-text/prompt-template/ChatPrompt.cjs +0 -24
- package/model-function/generate-text/prompt-template/ChatPrompt.d.ts +11 -34
- package/model-function/generate-text/prompt-template/ChatPrompt.js +1 -22
- package/model-function/generate-text/prompt-template/Content.cjs +9 -0
- package/model-function/generate-text/prompt-template/Content.d.ts +9 -4
- package/model-function/generate-text/prompt-template/Content.js +7 -1
- package/model-function/generate-text/prompt-template/InstructionPrompt.d.ts +6 -22
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +36 -5
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.d.ts +16 -4
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +34 -4
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs +5 -4
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js +5 -4
- package/model-function/generate-text/prompt-template/TextPromptTemplate.cjs +3 -4
- package/model-function/generate-text/prompt-template/TextPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/TextPromptTemplate.js +3 -4
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +3 -3
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +2 -2
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +3 -3
- package/model-function/generate-text/prompt-template/trimChatPrompt.cjs +0 -2
- package/model-function/generate-text/prompt-template/trimChatPrompt.d.ts +4 -4
- package/model-function/generate-text/prompt-template/trimChatPrompt.js +0 -2
- package/model-provider/anthropic/AnthropicPromptTemplate.cjs +5 -4
- package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +4 -4
- package/model-provider/anthropic/AnthropicPromptTemplate.js +5 -4
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +23 -8
- package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +8 -3
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +24 -9
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +22 -6
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +8 -3
- package/model-provider/cohere/CohereTextGenerationModel.js +22 -6
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +2 -2
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +2 -2
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +9 -8
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +4 -5
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +9 -8
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +23 -16
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -4
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +23 -16
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +51 -51
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +14 -11
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +51 -51
- package/model-provider/mistral/MistralChatModel.cjs +19 -2
- package/model-provider/mistral/MistralChatModel.d.ts +8 -3
- package/model-provider/mistral/MistralChatModel.js +19 -2
- package/model-provider/mistral/MistralPromptTemplate.cjs +5 -4
- package/model-provider/mistral/MistralPromptTemplate.d.ts +4 -4
- package/model-provider/mistral/MistralPromptTemplate.js +5 -4
- package/model-provider/ollama/OllamaChatModel.cjs +8 -3
- package/model-provider/ollama/OllamaChatModel.d.ts +6 -3
- package/model-provider/ollama/OllamaChatModel.js +8 -3
- package/model-provider/ollama/OllamaChatPromptTemplate.cjs +9 -13
- package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +4 -4
- package/model-provider/ollama/OllamaChatPromptTemplate.js +9 -13
- package/model-provider/ollama/OllamaCompletionModel.cjs +8 -3
- package/model-provider/ollama/OllamaCompletionModel.d.ts +4 -1
- package/model-provider/ollama/OllamaCompletionModel.js +8 -3
- package/model-provider/openai/OpenAICompletionModel.cjs +20 -4
- package/model-provider/openai/OpenAICompletionModel.d.ts +8 -3
- package/model-provider/openai/OpenAICompletionModel.js +20 -4
- package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +19 -1
- package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +6 -1
- package/model-provider/openai/chat/AbstractOpenAIChatModel.js +19 -1
- package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +2 -2
- package/model-provider/openai/chat/OpenAIChatMessage.d.ts +2 -2
- package/model-provider/openai/chat/OpenAIChatModel.cjs +2 -3
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +2 -2
- package/model-provider/openai/chat/OpenAIChatModel.js +2 -3
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.cjs +0 -2
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +4 -4
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +0 -2
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +2 -3
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +2 -2
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +2 -3
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +6 -6
- package/package.json +2 -2
@@ -6,6 +6,7 @@ import { ZodSchema } from "../../core/schema/ZodSchema.js";
|
|
6
6
|
import { safeParseJSON } from "../../core/schema/parseJSON.js";
|
7
7
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
8
8
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
9
|
+
import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
10
|
import { TextGenerationToolCallModel, } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
|
10
11
|
import { TextGenerationToolCallsOrGenerateTextModel, } from "../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js";
|
11
12
|
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
@@ -89,8 +90,7 @@ export class OllamaChatModel extends AbstractModel {
|
|
89
90
|
}
|
90
91
|
get settingsForEvent() {
|
91
92
|
const eventSettingProperties = [
|
92
|
-
|
93
|
-
"stopSequences",
|
93
|
+
...textGenerationModelProperties,
|
94
94
|
"temperature",
|
95
95
|
"mirostat",
|
96
96
|
"mirostatEta",
|
@@ -116,7 +116,12 @@ export class OllamaChatModel extends AbstractModel {
|
|
116
116
|
});
|
117
117
|
return {
|
118
118
|
response,
|
119
|
-
|
119
|
+
textGenerationResults: [
|
120
|
+
{
|
121
|
+
text: response.message.content,
|
122
|
+
finishReason: "unknown",
|
123
|
+
},
|
124
|
+
],
|
120
125
|
};
|
121
126
|
}
|
122
127
|
doStreamText(prompt, options) {
|
@@ -1,7 +1,6 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.chat = exports.instruction = exports.text = exports.identity = void 0;
|
4
|
-
const ChatPrompt_js_1 = require("../../model-function/generate-text/prompt-template/ChatPrompt.cjs");
|
5
4
|
/**
|
6
5
|
* OllamaChatPrompt identity chat format.
|
7
6
|
*/
|
@@ -42,7 +41,6 @@ exports.instruction = instruction;
|
|
42
41
|
function chat() {
|
43
42
|
return {
|
44
43
|
format(prompt) {
|
45
|
-
(0, ChatPrompt_js_1.validateChatPrompt)(prompt);
|
46
44
|
const messages = [];
|
47
45
|
if (prompt.system != null) {
|
48
46
|
messages.push({ role: "system", content: prompt.system });
|
@@ -60,17 +58,15 @@ function extractContent(input) {
|
|
60
58
|
if (typeof input === "string") {
|
61
59
|
return { content: input, images: undefined };
|
62
60
|
}
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
images.push(part.base64Image);
|
72
|
-
}
|
61
|
+
const images = [];
|
62
|
+
let content = "";
|
63
|
+
for (const part of input) {
|
64
|
+
if (part.type === "text") {
|
65
|
+
content += part.text;
|
66
|
+
}
|
67
|
+
else {
|
68
|
+
images.push(part.base64Image);
|
73
69
|
}
|
74
|
-
return { content, images };
|
75
70
|
}
|
71
|
+
return { content, images };
|
76
72
|
}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
2
|
-
import {
|
3
|
-
import {
|
2
|
+
import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
3
|
+
import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
4
4
|
import { OllamaChatPrompt } from "./OllamaChatModel.js";
|
5
5
|
/**
|
6
6
|
* OllamaChatPrompt identity chat format.
|
@@ -13,8 +13,8 @@ export declare function text(): TextGenerationPromptTemplate<string, OllamaChatP
|
|
13
13
|
/**
|
14
14
|
* Formats an instruction prompt as an Ollama chat prompt.
|
15
15
|
*/
|
16
|
-
export declare function instruction(): TextGenerationPromptTemplate<
|
16
|
+
export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, OllamaChatPrompt>;
|
17
17
|
/**
|
18
18
|
* Formats a chat prompt as an Ollama chat prompt.
|
19
19
|
*/
|
20
|
-
export declare function chat(): TextGenerationPromptTemplate<
|
20
|
+
export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, OllamaChatPrompt>;
|
@@ -1,4 +1,3 @@
|
|
1
|
-
import { validateChatPrompt, } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
2
1
|
/**
|
3
2
|
* OllamaChatPrompt identity chat format.
|
4
3
|
*/
|
@@ -36,7 +35,6 @@ export function instruction() {
|
|
36
35
|
export function chat() {
|
37
36
|
return {
|
38
37
|
format(prompt) {
|
39
|
-
validateChatPrompt(prompt);
|
40
38
|
const messages = [];
|
41
39
|
if (prompt.system != null) {
|
42
40
|
messages.push({ role: "system", content: prompt.system });
|
@@ -53,17 +51,15 @@ function extractContent(input) {
|
|
53
51
|
if (typeof input === "string") {
|
54
52
|
return { content: input, images: undefined };
|
55
53
|
}
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
images.push(part.base64Image);
|
65
|
-
}
|
54
|
+
const images = [];
|
55
|
+
let content = "";
|
56
|
+
for (const part of input) {
|
57
|
+
if (part.type === "text") {
|
58
|
+
content += part.text;
|
59
|
+
}
|
60
|
+
else {
|
61
|
+
images.push(part.base64Image);
|
66
62
|
}
|
67
|
-
return { content, images };
|
68
63
|
}
|
64
|
+
return { content, images };
|
69
65
|
}
|
@@ -9,6 +9,7 @@ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
|
9
9
|
const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
|
10
10
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
11
11
|
const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
|
12
|
+
const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
|
12
13
|
const TextGenerationToolCallModel_js_1 = require("../../tool/generate-tool-call/TextGenerationToolCallModel.cjs");
|
13
14
|
const TextGenerationToolCallsOrGenerateTextModel_js_1 = require("../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.cjs");
|
14
15
|
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
@@ -90,8 +91,7 @@ class OllamaCompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
90
91
|
}
|
91
92
|
get settingsForEvent() {
|
92
93
|
const eventSettingProperties = [
|
93
|
-
|
94
|
-
"stopSequences",
|
94
|
+
...TextGenerationModel_js_1.textGenerationModelProperties,
|
95
95
|
"contextWindowSize",
|
96
96
|
"temperature",
|
97
97
|
"mirostat",
|
@@ -121,7 +121,12 @@ class OllamaCompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
121
121
|
});
|
122
122
|
return {
|
123
123
|
response,
|
124
|
-
|
124
|
+
textGenerationResults: [
|
125
|
+
{
|
126
|
+
text: response.response,
|
127
|
+
finishReason: "unknown",
|
128
|
+
},
|
129
|
+
],
|
125
130
|
};
|
126
131
|
}
|
127
132
|
doStreamText(prompt, options) {
|
@@ -65,7 +65,10 @@ export declare class OllamaCompletionModel<CONTEXT_WINDOW_SIZE extends number |
|
|
65
65
|
prompt_eval_duration?: number | undefined;
|
66
66
|
context?: number[] | undefined;
|
67
67
|
};
|
68
|
-
|
68
|
+
textGenerationResults: {
|
69
|
+
text: string;
|
70
|
+
finishReason: "unknown";
|
71
|
+
}[];
|
69
72
|
}>;
|
70
73
|
doStreamText(prompt: OllamaCompletionPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
|
71
74
|
asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, OllamaCompletionPrompt>): TextGenerationToolCallModel<INPUT_PROMPT, OllamaCompletionPrompt, this>;
|
@@ -6,6 +6,7 @@ import { ZodSchema } from "../../core/schema/ZodSchema.js";
|
|
6
6
|
import { safeParseJSON } from "../../core/schema/parseJSON.js";
|
7
7
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
8
8
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
9
|
+
import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
10
|
import { TextGenerationToolCallModel, } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
|
10
11
|
import { TextGenerationToolCallsOrGenerateTextModel, } from "../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js";
|
11
12
|
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
@@ -87,8 +88,7 @@ export class OllamaCompletionModel extends AbstractModel {
|
|
87
88
|
}
|
88
89
|
get settingsForEvent() {
|
89
90
|
const eventSettingProperties = [
|
90
|
-
|
91
|
-
"stopSequences",
|
91
|
+
...textGenerationModelProperties,
|
92
92
|
"contextWindowSize",
|
93
93
|
"temperature",
|
94
94
|
"mirostat",
|
@@ -118,7 +118,12 @@ export class OllamaCompletionModel extends AbstractModel {
|
|
118
118
|
});
|
119
119
|
return {
|
120
120
|
response,
|
121
|
-
|
121
|
+
textGenerationResults: [
|
122
|
+
{
|
123
|
+
text: response.response,
|
124
|
+
finishReason: "unknown",
|
125
|
+
},
|
126
|
+
],
|
122
127
|
};
|
123
128
|
}
|
124
129
|
doStreamText(prompt, options) {
|
@@ -8,6 +8,7 @@ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
|
8
8
|
const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
|
9
9
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
10
10
|
const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
|
11
|
+
const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
|
11
12
|
const TextPromptTemplate_js_1 = require("../../model-function/generate-text/prompt-template/TextPromptTemplate.cjs");
|
12
13
|
const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
|
13
14
|
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
@@ -228,9 +229,7 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
228
229
|
}
|
229
230
|
get settingsForEvent() {
|
230
231
|
const eventSettingProperties = [
|
231
|
-
|
232
|
-
"stopSequences",
|
233
|
-
"numberOfGenerations",
|
232
|
+
...TextGenerationModel_js_1.textGenerationModelProperties,
|
234
233
|
"suffix",
|
235
234
|
"temperature",
|
236
235
|
"topP",
|
@@ -251,7 +250,12 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
251
250
|
});
|
252
251
|
return {
|
253
252
|
response,
|
254
|
-
|
253
|
+
textGenerationResults: response.choices.map((choice) => {
|
254
|
+
return {
|
255
|
+
finishReason: this.translateFinishReason(choice.finish_reason),
|
256
|
+
text: choice.text,
|
257
|
+
};
|
258
|
+
}),
|
255
259
|
usage: {
|
256
260
|
promptTokens: response.usage.prompt_tokens,
|
257
261
|
completionTokens: response.usage.completion_tokens,
|
@@ -259,6 +263,18 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
259
263
|
},
|
260
264
|
};
|
261
265
|
}
|
266
|
+
translateFinishReason(finishReason) {
|
267
|
+
switch (finishReason) {
|
268
|
+
case "stop":
|
269
|
+
return "stop";
|
270
|
+
case "length":
|
271
|
+
return "length";
|
272
|
+
case "content_filter":
|
273
|
+
return "content-filter";
|
274
|
+
default:
|
275
|
+
return "unknown";
|
276
|
+
}
|
277
|
+
}
|
262
278
|
doStreamText(prompt, options) {
|
263
279
|
return this.callAPI(prompt, {
|
264
280
|
...options,
|
@@ -7,6 +7,7 @@ import { Delta } from "../../model-function/Delta.js";
|
|
7
7
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
8
8
|
import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
9
|
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
10
|
+
import { TextGenerationFinishReason } from "../../model-function/generate-text/TextGenerationResult.js";
|
10
11
|
import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
|
11
12
|
/**
|
12
13
|
* @see https://platform.openai.com/docs/models/
|
@@ -162,25 +163,29 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
|
|
162
163
|
}[];
|
163
164
|
system_fingerprint?: string | undefined;
|
164
165
|
};
|
165
|
-
|
166
|
+
textGenerationResults: {
|
167
|
+
finishReason: TextGenerationFinishReason;
|
168
|
+
text: string;
|
169
|
+
}[];
|
166
170
|
usage: {
|
167
171
|
promptTokens: number;
|
168
172
|
completionTokens: number;
|
169
173
|
totalTokens: number;
|
170
174
|
};
|
171
175
|
}>;
|
176
|
+
private translateFinishReason;
|
172
177
|
doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
|
173
178
|
/**
|
174
179
|
* Returns this model with an instruction prompt template.
|
175
180
|
*/
|
176
|
-
withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").
|
181
|
+
withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").InstructionPrompt, string, OpenAICompletionModelSettings, this>;
|
177
182
|
/**
|
178
183
|
* Returns this model with a chat prompt template.
|
179
184
|
*/
|
180
185
|
withChatPrompt(options?: {
|
181
186
|
user?: string;
|
182
187
|
assistant?: string;
|
183
|
-
}): PromptTemplateTextStreamingModel<import("../../index.js").
|
188
|
+
}): PromptTemplateTextStreamingModel<import("../../index.js").ChatPrompt, string, OpenAICompletionModelSettings, this>;
|
184
189
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, OpenAICompletionModelSettings, this>;
|
185
190
|
withSettings(additionalSettings: Partial<OpenAICompletionModelSettings>): this;
|
186
191
|
}
|
@@ -5,6 +5,7 @@ import { ZodSchema } from "../../core/schema/ZodSchema.js";
|
|
5
5
|
import { parseJSON } from "../../core/schema/parseJSON.js";
|
6
6
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
7
7
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
8
|
+
import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
|
8
9
|
import { chat, instruction, } from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
|
9
10
|
import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
|
10
11
|
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
@@ -222,9 +223,7 @@ export class OpenAICompletionModel extends AbstractModel {
|
|
222
223
|
}
|
223
224
|
get settingsForEvent() {
|
224
225
|
const eventSettingProperties = [
|
225
|
-
|
226
|
-
"stopSequences",
|
227
|
-
"numberOfGenerations",
|
226
|
+
...textGenerationModelProperties,
|
228
227
|
"suffix",
|
229
228
|
"temperature",
|
230
229
|
"topP",
|
@@ -245,7 +244,12 @@ export class OpenAICompletionModel extends AbstractModel {
|
|
245
244
|
});
|
246
245
|
return {
|
247
246
|
response,
|
248
|
-
|
247
|
+
textGenerationResults: response.choices.map((choice) => {
|
248
|
+
return {
|
249
|
+
finishReason: this.translateFinishReason(choice.finish_reason),
|
250
|
+
text: choice.text,
|
251
|
+
};
|
252
|
+
}),
|
249
253
|
usage: {
|
250
254
|
promptTokens: response.usage.prompt_tokens,
|
251
255
|
completionTokens: response.usage.completion_tokens,
|
@@ -253,6 +257,18 @@ export class OpenAICompletionModel extends AbstractModel {
|
|
253
257
|
},
|
254
258
|
};
|
255
259
|
}
|
260
|
+
translateFinishReason(finishReason) {
|
261
|
+
switch (finishReason) {
|
262
|
+
case "stop":
|
263
|
+
return "stop";
|
264
|
+
case "length":
|
265
|
+
return "length";
|
266
|
+
case "content_filter":
|
267
|
+
return "content-filter";
|
268
|
+
default:
|
269
|
+
return "unknown";
|
270
|
+
}
|
271
|
+
}
|
256
272
|
doStreamText(prompt, options) {
|
257
273
|
return this.callAPI(prompt, {
|
258
274
|
...options,
|
@@ -80,10 +80,28 @@ class AbstractOpenAIChatModel extends AbstractModel_js_1.AbstractModel {
|
|
80
80
|
});
|
81
81
|
return {
|
82
82
|
response,
|
83
|
-
|
83
|
+
textGenerationResults: response.choices.map((choice) => ({
|
84
|
+
text: choice.message.content ?? "",
|
85
|
+
finishReason: this.translateFinishReason(choice.finish_reason),
|
86
|
+
})),
|
84
87
|
usage: this.extractUsage(response),
|
85
88
|
};
|
86
89
|
}
|
90
|
+
translateFinishReason(finishReason) {
|
91
|
+
switch (finishReason) {
|
92
|
+
case "stop":
|
93
|
+
return "stop";
|
94
|
+
case "length":
|
95
|
+
return "length";
|
96
|
+
case "content_filter":
|
97
|
+
return "content-filter";
|
98
|
+
case "function_call":
|
99
|
+
case "tool_calls":
|
100
|
+
return "tool-calls";
|
101
|
+
default:
|
102
|
+
return "unknown";
|
103
|
+
}
|
104
|
+
}
|
87
105
|
doStreamText(prompt, options) {
|
88
106
|
return this.callAPI(prompt, {
|
89
107
|
...options,
|
@@ -5,6 +5,7 @@ import { ResponseHandler } from "../../../core/api/postToApi.js";
|
|
5
5
|
import { AbstractModel } from "../../../model-function/AbstractModel.js";
|
6
6
|
import { Delta } from "../../../model-function/Delta.js";
|
7
7
|
import { TextGenerationModelSettings } from "../../../model-function/generate-text/TextGenerationModel.js";
|
8
|
+
import { TextGenerationFinishReason } from "../../../model-function/generate-text/TextGenerationResult.js";
|
8
9
|
import { ToolDefinition } from "../../../tool/ToolDefinition.js";
|
9
10
|
import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
|
10
11
|
export interface AbstractOpenAIChatCallSettings {
|
@@ -126,13 +127,17 @@ export declare abstract class AbstractOpenAIChatModel<SETTINGS extends AbstractO
|
|
126
127
|
}[];
|
127
128
|
system_fingerprint?: string | null | undefined;
|
128
129
|
};
|
129
|
-
|
130
|
+
textGenerationResults: {
|
131
|
+
text: string;
|
132
|
+
finishReason: TextGenerationFinishReason;
|
133
|
+
}[];
|
130
134
|
usage: {
|
131
135
|
promptTokens: number;
|
132
136
|
completionTokens: number;
|
133
137
|
totalTokens: number;
|
134
138
|
};
|
135
139
|
}>;
|
140
|
+
private translateFinishReason;
|
136
141
|
doStreamText(prompt: OpenAIChatPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
|
137
142
|
doGenerateToolCall(tool: ToolDefinition<string, unknown>, prompt: OpenAIChatPrompt, options?: FunctionOptions): Promise<{
|
138
143
|
response: {
|
@@ -77,10 +77,28 @@ export class AbstractOpenAIChatModel extends AbstractModel {
|
|
77
77
|
});
|
78
78
|
return {
|
79
79
|
response,
|
80
|
-
|
80
|
+
textGenerationResults: response.choices.map((choice) => ({
|
81
|
+
text: choice.message.content ?? "",
|
82
|
+
finishReason: this.translateFinishReason(choice.finish_reason),
|
83
|
+
})),
|
81
84
|
usage: this.extractUsage(response),
|
82
85
|
};
|
83
86
|
}
|
87
|
+
translateFinishReason(finishReason) {
|
88
|
+
switch (finishReason) {
|
89
|
+
case "stop":
|
90
|
+
return "stop";
|
91
|
+
case "length":
|
92
|
+
return "length";
|
93
|
+
case "content_filter":
|
94
|
+
return "content-filter";
|
95
|
+
case "function_call":
|
96
|
+
case "tool_calls":
|
97
|
+
return "tool-calls";
|
98
|
+
default:
|
99
|
+
return "unknown";
|
100
|
+
}
|
101
|
+
}
|
84
102
|
doStreamText(prompt, options) {
|
85
103
|
return this.callAPI(prompt, {
|
86
104
|
...options,
|
@@ -27,11 +27,11 @@ OpenAIChatSettings> {
|
|
27
27
|
/**
|
28
28
|
* Returns this model with an instruction prompt template.
|
29
29
|
*/
|
30
|
-
withInstructionPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<import("../../../index.js").
|
30
|
+
withInstructionPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<import("../../../index.js").InstructionPrompt, OpenAIChatPrompt>>;
|
31
31
|
/**
|
32
32
|
* Returns this model with a chat prompt template.
|
33
33
|
*/
|
34
|
-
withChatPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<import("../../../index.js").
|
34
|
+
withChatPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<import("../../../index.js").ChatPrompt, OpenAIChatPrompt>>;
|
35
35
|
withPromptTemplate<TARGET_PROMPT_FORMAT extends TextGenerationPromptTemplate<unknown, OpenAIChatPrompt>>(promptTemplate: TARGET_PROMPT_FORMAT): OpenAIChatFunctionCallStructureGenerationModel<TARGET_PROMPT_FORMAT>;
|
36
36
|
withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
|
37
37
|
/**
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import {
|
1
|
+
import { Content } from "../../../model-function/generate-text/prompt-template/Content.js";
|
2
2
|
import { ToolCall } from "../../../tool/ToolCall.js";
|
3
3
|
export type OpenAIChatMessage = {
|
4
4
|
role: "system";
|
@@ -50,7 +50,7 @@ export declare const OpenAIChatMessage: {
|
|
50
50
|
/**
|
51
51
|
* Creates a user chat message. The message can be a string or a multi-modal input.
|
52
52
|
*/
|
53
|
-
user(content:
|
53
|
+
user(content: Content, options?: {
|
54
54
|
name?: string;
|
55
55
|
}): OpenAIChatMessage;
|
56
56
|
/**
|
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.OpenAIChatModel = exports.calculateOpenAIChatCostInMillicents = exports.isOpenAIChatModel = exports.getOpenAIChatModelInformation = exports.OPENAI_CHAT_MODELS = void 0;
|
4
4
|
const StructureFromTextStreamingModel_js_1 = require("../../../model-function/generate-structure/StructureFromTextStreamingModel.cjs");
|
5
5
|
const PromptTemplateTextStreamingModel_js_1 = require("../../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
|
6
|
+
const TextGenerationModel_js_1 = require("../../../model-function/generate-text/TextGenerationModel.cjs");
|
6
7
|
const TikTokenTokenizer_js_1 = require("../TikTokenTokenizer.cjs");
|
7
8
|
const AbstractOpenAIChatModel_js_1 = require("./AbstractOpenAIChatModel.cjs");
|
8
9
|
const OpenAIChatFunctionCallStructureGenerationModel_js_1 = require("./OpenAIChatFunctionCallStructureGenerationModel.cjs");
|
@@ -196,9 +197,7 @@ class OpenAIChatModel extends AbstractOpenAIChatModel_js_1.AbstractOpenAIChatMod
|
|
196
197
|
}
|
197
198
|
get settingsForEvent() {
|
198
199
|
const eventSettingProperties = [
|
199
|
-
|
200
|
-
"stopSequences",
|
201
|
-
"numberOfGenerations",
|
200
|
+
...TextGenerationModel_js_1.textGenerationModelProperties,
|
202
201
|
"functions",
|
203
202
|
"functionCall",
|
204
203
|
"temperature",
|
@@ -151,11 +151,11 @@ export declare class OpenAIChatModel extends AbstractOpenAIChatModel<OpenAIChatS
|
|
151
151
|
/**
|
152
152
|
* Returns this model with an instruction prompt template.
|
153
153
|
*/
|
154
|
-
withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../../index.js").
|
154
|
+
withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../../index.js").InstructionPrompt, OpenAIChatPrompt, OpenAIChatSettings, this>;
|
155
155
|
/**
|
156
156
|
* Returns this model with a chat prompt template.
|
157
157
|
*/
|
158
|
-
withChatPrompt(): PromptTemplateTextStreamingModel<import("../../../index.js").
|
158
|
+
withChatPrompt(): PromptTemplateTextStreamingModel<import("../../../index.js").ChatPrompt, OpenAIChatPrompt, OpenAIChatSettings, this>;
|
159
159
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OpenAIChatPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, OpenAIChatPrompt, OpenAIChatSettings, this>;
|
160
160
|
withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
|
161
161
|
}
|
@@ -1,5 +1,6 @@
|
|
1
1
|
import { StructureFromTextStreamingModel } from "../../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
2
2
|
import { PromptTemplateTextStreamingModel } from "../../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
3
|
+
import { textGenerationModelProperties, } from "../../../model-function/generate-text/TextGenerationModel.js";
|
3
4
|
import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
|
4
5
|
import { AbstractOpenAIChatModel, } from "./AbstractOpenAIChatModel.js";
|
5
6
|
import { OpenAIChatFunctionCallStructureGenerationModel } from "./OpenAIChatFunctionCallStructureGenerationModel.js";
|
@@ -190,9 +191,7 @@ export class OpenAIChatModel extends AbstractOpenAIChatModel {
|
|
190
191
|
}
|
191
192
|
get settingsForEvent() {
|
192
193
|
const eventSettingProperties = [
|
193
|
-
|
194
|
-
"stopSequences",
|
195
|
-
"numberOfGenerations",
|
194
|
+
...textGenerationModelProperties,
|
196
195
|
"functions",
|
197
196
|
"functionCall",
|
198
197
|
"temperature",
|
@@ -1,7 +1,6 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.chat = exports.instruction = exports.text = exports.identity = void 0;
|
4
|
-
const ChatPrompt_js_1 = require("../../../model-function/generate-text/prompt-template/ChatPrompt.cjs");
|
5
4
|
const OpenAIChatMessage_js_1 = require("./OpenAIChatMessage.cjs");
|
6
5
|
/**
|
7
6
|
* OpenAIMessage[] identity chat format.
|
@@ -43,7 +42,6 @@ exports.instruction = instruction;
|
|
43
42
|
function chat() {
|
44
43
|
return {
|
45
44
|
format(prompt) {
|
46
|
-
(0, ChatPrompt_js_1.validateChatPrompt)(prompt);
|
47
45
|
const messages = [];
|
48
46
|
if (prompt.system != null) {
|
49
47
|
messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.system(prompt.system));
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { TextGenerationPromptTemplate } from "../../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
2
|
-
import {
|
3
|
-
import {
|
2
|
+
import { ChatPrompt } from "../../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
3
|
+
import { InstructionPrompt } from "../../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
4
4
|
import { OpenAIChatPrompt } from "./AbstractOpenAIChatModel.js";
|
5
5
|
/**
|
6
6
|
* OpenAIMessage[] identity chat format.
|
@@ -13,8 +13,8 @@ export declare function text(): TextGenerationPromptTemplate<string, OpenAIChatP
|
|
13
13
|
/**
|
14
14
|
* Formats an instruction prompt as an OpenAI chat prompt.
|
15
15
|
*/
|
16
|
-
export declare function instruction(): TextGenerationPromptTemplate<
|
16
|
+
export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, OpenAIChatPrompt>;
|
17
17
|
/**
|
18
18
|
* Formats a chat prompt as an OpenAI chat prompt.
|
19
19
|
*/
|
20
|
-
export declare function chat(): TextGenerationPromptTemplate<
|
20
|
+
export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, OpenAIChatPrompt>;
|
@@ -1,4 +1,3 @@
|
|
1
|
-
import { validateChatPrompt, } from "../../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
2
1
|
import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
|
3
2
|
/**
|
4
3
|
* OpenAIMessage[] identity chat format.
|
@@ -37,7 +36,6 @@ export function instruction() {
|
|
37
36
|
export function chat() {
|
38
37
|
return {
|
39
38
|
format(prompt) {
|
40
|
-
validateChatPrompt(prompt);
|
41
39
|
const messages = [];
|
42
40
|
if (prompt.system != null) {
|
43
41
|
messages.push(OpenAIChatMessage.system(prompt.system));
|
@@ -3,6 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.OpenAICompatibleChatModel = void 0;
|
4
4
|
const StructureFromTextStreamingModel_js_1 = require("../../model-function/generate-structure/StructureFromTextStreamingModel.cjs");
|
5
5
|
const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
|
6
|
+
const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
|
6
7
|
const AbstractOpenAIChatModel_js_1 = require("../openai/chat/AbstractOpenAIChatModel.cjs");
|
7
8
|
const OpenAIChatPromptTemplate_js_1 = require("../openai/chat/OpenAIChatPromptTemplate.cjs");
|
8
9
|
/**
|
@@ -43,9 +44,7 @@ class OpenAICompatibleChatModel extends AbstractOpenAIChatModel_js_1.AbstractOpe
|
|
43
44
|
}
|
44
45
|
get settingsForEvent() {
|
45
46
|
const eventSettingProperties = [
|
46
|
-
|
47
|
-
"maxGenerationTokens",
|
48
|
-
"numberOfGenerations",
|
47
|
+
...TextGenerationModel_js_1.textGenerationModelProperties,
|
49
48
|
"functions",
|
50
49
|
"functionCall",
|
51
50
|
"temperature",
|
@@ -35,11 +35,11 @@ export declare class OpenAICompatibleChatModel extends AbstractOpenAIChatModel<O
|
|
35
35
|
/**
|
36
36
|
* Returns this model with an instruction prompt template.
|
37
37
|
*/
|
38
|
-
withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").
|
38
|
+
withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").InstructionPrompt, OpenAIChatPrompt, OpenAICompatibleChatSettings, this>;
|
39
39
|
/**
|
40
40
|
* Returns this model with a chat prompt template.
|
41
41
|
*/
|
42
|
-
withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").
|
42
|
+
withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").ChatPrompt, OpenAIChatPrompt, OpenAICompatibleChatSettings, this>;
|
43
43
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OpenAIChatPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, OpenAIChatPrompt, OpenAICompatibleChatSettings, this>;
|
44
44
|
withSettings(additionalSettings: Partial<OpenAICompatibleChatSettings>): this;
|
45
45
|
}
|
@@ -1,5 +1,6 @@
|
|
1
1
|
import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
2
2
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
3
|
+
import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
|
3
4
|
import { AbstractOpenAIChatModel, } from "../openai/chat/AbstractOpenAIChatModel.js";
|
4
5
|
import { chat, instruction, text, } from "../openai/chat/OpenAIChatPromptTemplate.js";
|
5
6
|
/**
|
@@ -40,9 +41,7 @@ export class OpenAICompatibleChatModel extends AbstractOpenAIChatModel {
|
|
40
41
|
}
|
41
42
|
get settingsForEvent() {
|
42
43
|
const eventSettingProperties = [
|
43
|
-
|
44
|
-
"maxGenerationTokens",
|
45
|
-
"numberOfGenerations",
|
44
|
+
...textGenerationModelProperties,
|
46
45
|
"functions",
|
47
46
|
"functionCall",
|
48
47
|
"temperature",
|