modelfusion 0.103.0 → 0.105.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +56 -0
- package/model-function/Delta.d.ts +1 -2
- package/model-function/executeStreamCall.cjs +6 -4
- package/model-function/executeStreamCall.d.ts +2 -2
- package/model-function/executeStreamCall.js +6 -4
- package/model-function/generate-speech/streamSpeech.cjs +1 -2
- package/model-function/generate-speech/streamSpeech.js +1 -2
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +25 -29
- package/model-function/generate-structure/StructureFromTextStreamingModel.d.ts +3 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +25 -29
- package/model-function/generate-structure/StructureGenerationModel.d.ts +2 -0
- package/model-function/generate-structure/streamStructure.cjs +7 -8
- package/model-function/generate-structure/streamStructure.d.ts +1 -1
- package/model-function/generate-structure/streamStructure.js +7 -8
- package/model-function/generate-text/PromptTemplateFullTextModel.cjs +35 -0
- package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +41 -0
- package/model-function/generate-text/PromptTemplateFullTextModel.js +31 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +3 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +2 -1
- package/model-function/generate-text/PromptTemplateTextStreamingModel.js +3 -0
- package/model-function/generate-text/TextGenerationModel.d.ts +2 -1
- package/model-function/generate-text/index.cjs +1 -0
- package/model-function/generate-text/index.d.ts +1 -0
- package/model-function/generate-text/index.js +1 -0
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +2 -1
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +2 -2
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +2 -1
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs +9 -5
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.js +9 -5
- package/model-function/generate-text/prompt-template/ChatPrompt.cjs +38 -20
- package/model-function/generate-text/prompt-template/ChatPrompt.d.ts +33 -34
- package/model-function/generate-text/prompt-template/ChatPrompt.js +37 -18
- package/model-function/generate-text/prompt-template/ContentPart.cjs +11 -0
- package/model-function/generate-text/prompt-template/ContentPart.d.ts +30 -0
- package/model-function/generate-text/prompt-template/ContentPart.js +7 -0
- package/model-function/generate-text/prompt-template/InstructionPrompt.d.ts +7 -22
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +40 -6
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.d.ts +16 -4
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +38 -5
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs +10 -5
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js +10 -5
- package/model-function/generate-text/prompt-template/TextPromptTemplate.cjs +8 -5
- package/model-function/generate-text/prompt-template/TextPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/TextPromptTemplate.js +8 -5
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +8 -4
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +2 -2
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +8 -4
- package/model-function/generate-text/prompt-template/index.cjs +1 -1
- package/model-function/generate-text/prompt-template/index.d.ts +1 -1
- package/model-function/generate-text/prompt-template/index.js +1 -1
- package/model-function/generate-text/prompt-template/trimChatPrompt.cjs +0 -2
- package/model-function/generate-text/prompt-template/trimChatPrompt.d.ts +4 -4
- package/model-function/generate-text/prompt-template/trimChatPrompt.js +0 -2
- package/model-function/generate-text/streamText.cjs +27 -28
- package/model-function/generate-text/streamText.d.ts +1 -0
- package/model-function/generate-text/streamText.js +27 -28
- package/model-provider/anthropic/AnthropicPromptTemplate.cjs +9 -4
- package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +4 -4
- package/model-provider/anthropic/AnthropicPromptTemplate.js +9 -4
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +8 -14
- package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +13 -4
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +8 -14
- package/model-provider/anthropic/AnthropicTextGenerationModel.test.cjs +44 -0
- package/model-provider/anthropic/AnthropicTextGenerationModel.test.js +42 -0
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +6 -44
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +47 -13
- package/model-provider/cohere/CohereTextGenerationModel.js +7 -45
- package/model-provider/cohere/CohereTextGenerationModel.test.cjs +33 -0
- package/model-provider/cohere/CohereTextGenerationModel.test.js +31 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +1 -2
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +1 -2
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +29 -17
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -4
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +29 -17
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +7 -14
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +157 -6
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +8 -15
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.cjs +37 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.d.ts +1 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.js +35 -0
- package/model-provider/mistral/MistralChatModel.cjs +30 -104
- package/model-provider/mistral/MistralChatModel.d.ts +49 -16
- package/model-provider/mistral/MistralChatModel.js +30 -104
- package/model-provider/mistral/MistralChatModel.test.cjs +51 -0
- package/model-provider/mistral/MistralChatModel.test.d.ts +1 -0
- package/model-provider/mistral/MistralChatModel.test.js +49 -0
- package/model-provider/mistral/MistralPromptTemplate.cjs +13 -5
- package/model-provider/mistral/MistralPromptTemplate.d.ts +4 -4
- package/model-provider/mistral/MistralPromptTemplate.js +13 -5
- package/model-provider/ollama/OllamaChatModel.cjs +7 -43
- package/model-provider/ollama/OllamaChatModel.d.ts +63 -11
- package/model-provider/ollama/OllamaChatModel.js +7 -43
- package/model-provider/ollama/OllamaChatModel.test.cjs +27 -0
- package/model-provider/ollama/OllamaChatModel.test.d.ts +1 -0
- package/model-provider/ollama/OllamaChatModel.test.js +25 -0
- package/model-provider/ollama/OllamaChatPromptTemplate.cjs +43 -17
- package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +4 -4
- package/model-provider/ollama/OllamaChatPromptTemplate.js +43 -17
- package/model-provider/ollama/OllamaCompletionModel.cjs +22 -43
- package/model-provider/ollama/OllamaCompletionModel.d.ts +65 -9
- package/model-provider/ollama/OllamaCompletionModel.js +23 -44
- package/model-provider/ollama/OllamaCompletionModel.test.cjs +101 -13
- package/model-provider/ollama/OllamaCompletionModel.test.js +78 -13
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.cjs → AbstractOpenAIChatModel.cjs} +71 -15
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.d.ts → AbstractOpenAIChatModel.d.ts} +273 -19
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.js → AbstractOpenAIChatModel.js} +71 -15
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.cjs → OpenAIChatFunctionCallStructureGenerationModel.cjs} +18 -2
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts → OpenAIChatFunctionCallStructureGenerationModel.d.ts} +41 -11
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.js → OpenAIChatFunctionCallStructureGenerationModel.js} +18 -2
- package/model-provider/openai/{chat/OpenAIChatMessage.d.ts → OpenAIChatMessage.d.ts} +3 -3
- package/model-provider/openai/{chat/OpenAIChatModel.cjs → OpenAIChatModel.cjs} +5 -5
- package/model-provider/openai/{chat/OpenAIChatModel.d.ts → OpenAIChatModel.d.ts} +12 -12
- package/model-provider/openai/{chat/OpenAIChatModel.js → OpenAIChatModel.js} +5 -5
- package/model-provider/openai/OpenAIChatModel.test.cjs +94 -0
- package/model-provider/openai/OpenAIChatModel.test.d.ts +1 -0
- package/model-provider/openai/OpenAIChatModel.test.js +92 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.cjs +114 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.d.ts +20 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.js +107 -0
- package/model-provider/openai/OpenAICompletionModel.cjs +32 -84
- package/model-provider/openai/OpenAICompletionModel.d.ts +29 -12
- package/model-provider/openai/OpenAICompletionModel.js +33 -85
- package/model-provider/openai/OpenAICompletionModel.test.cjs +53 -0
- package/model-provider/openai/OpenAICompletionModel.test.d.ts +1 -0
- package/model-provider/openai/OpenAICompletionModel.test.js +51 -0
- package/model-provider/openai/OpenAICostCalculator.cjs +1 -1
- package/model-provider/openai/OpenAICostCalculator.js +1 -1
- package/model-provider/openai/OpenAIFacade.cjs +2 -2
- package/model-provider/openai/OpenAIFacade.d.ts +3 -3
- package/model-provider/openai/OpenAIFacade.js +2 -2
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +6 -6
- package/model-provider/openai/TikTokenTokenizer.d.ts +1 -1
- package/model-provider/openai/{chat/countOpenAIChatMessageTokens.cjs → countOpenAIChatMessageTokens.cjs} +2 -2
- package/model-provider/openai/{chat/countOpenAIChatMessageTokens.js → countOpenAIChatMessageTokens.js} +2 -2
- package/model-provider/openai/index.cjs +6 -6
- package/model-provider/openai/index.d.ts +5 -6
- package/model-provider/openai/index.js +5 -5
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +4 -4
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +6 -6
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +4 -4
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
- package/package.json +5 -5
- package/test/JsonTestServer.cjs +33 -0
- package/test/JsonTestServer.d.ts +7 -0
- package/test/JsonTestServer.js +29 -0
- package/test/StreamingTestServer.cjs +55 -0
- package/test/StreamingTestServer.d.ts +7 -0
- package/test/StreamingTestServer.js +51 -0
- package/test/arrayFromAsync.cjs +13 -0
- package/test/arrayFromAsync.d.ts +1 -0
- package/test/arrayFromAsync.js +9 -0
- package/util/streaming/createEventSourceResponseHandler.cjs +9 -0
- package/util/streaming/createEventSourceResponseHandler.d.ts +4 -0
- package/util/streaming/createEventSourceResponseHandler.js +5 -0
- package/util/streaming/createJsonStreamResponseHandler.cjs +9 -0
- package/util/streaming/createJsonStreamResponseHandler.d.ts +4 -0
- package/util/streaming/createJsonStreamResponseHandler.js +5 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.cjs +52 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.d.ts +6 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.js +48 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.cjs +21 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.d.ts +6 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.js +17 -0
- package/model-function/generate-text/prompt-template/Content.cjs +0 -2
- package/model-function/generate-text/prompt-template/Content.d.ts +0 -20
- package/model-provider/openai/chat/OpenAIChatModel.test.cjs +0 -61
- package/model-provider/openai/chat/OpenAIChatModel.test.js +0 -59
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.cjs +0 -72
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +0 -20
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +0 -65
- package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +0 -156
- package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +0 -19
- package/model-provider/openai/chat/OpenAIChatStreamIterable.js +0 -152
- /package/{model-function/generate-text/prompt-template/Content.js → model-provider/anthropic/AnthropicTextGenerationModel.test.d.ts} +0 -0
- /package/model-provider/{openai/chat/OpenAIChatModel.test.d.ts → cohere/CohereTextGenerationModel.test.d.ts} +0 -0
- /package/model-provider/openai/{chat/OpenAIChatMessage.cjs → OpenAIChatMessage.cjs} +0 -0
- /package/model-provider/openai/{chat/OpenAIChatMessage.js → OpenAIChatMessage.js} +0 -0
- /package/model-provider/openai/{chat/countOpenAIChatMessageTokens.d.ts → countOpenAIChatMessageTokens.d.ts} +0 -0
@@ -5,7 +5,8 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
5
5
|
Object.defineProperty(exports, "__esModule", { value: true });
|
6
6
|
exports.OpenAIChatFunctionCallStructureGenerationModel = void 0;
|
7
7
|
const secure_json_parse_1 = __importDefault(require("secure-json-parse"));
|
8
|
-
const StructureParseError_js_1 = require("
|
8
|
+
const StructureParseError_js_1 = require("../../model-function/generate-structure/StructureParseError.cjs");
|
9
|
+
const parsePartialJson_js_1 = require("../../model-function/generate-structure/parsePartialJson.cjs");
|
9
10
|
const AbstractOpenAIChatModel_js_1 = require("./AbstractOpenAIChatModel.cjs");
|
10
11
|
const OpenAIChatPromptTemplate_js_1 = require("./OpenAIChatPromptTemplate.cjs");
|
11
12
|
class OpenAIChatFunctionCallStructureGenerationModel {
|
@@ -132,7 +133,7 @@ class OpenAIChatFunctionCallStructureGenerationModel {
|
|
132
133
|
const expandedPrompt = this.promptTemplate.format(prompt);
|
133
134
|
return this.model.callAPI(expandedPrompt, {
|
134
135
|
...options,
|
135
|
-
responseFormat: AbstractOpenAIChatModel_js_1.OpenAIChatResponseFormat.
|
136
|
+
responseFormat: AbstractOpenAIChatModel_js_1.OpenAIChatResponseFormat.deltaIterable,
|
136
137
|
functionCall: { name: this.fnName },
|
137
138
|
functions: [
|
138
139
|
{
|
@@ -143,5 +144,20 @@ class OpenAIChatFunctionCallStructureGenerationModel {
|
|
143
144
|
],
|
144
145
|
});
|
145
146
|
}
|
147
|
+
extractStructureTextDelta(delta) {
|
148
|
+
const chunk = delta;
|
149
|
+
if (chunk.object !== "chat.completion.chunk") {
|
150
|
+
return undefined;
|
151
|
+
}
|
152
|
+
const chatChunk = chunk;
|
153
|
+
const firstChoice = chatChunk.choices[0];
|
154
|
+
if (firstChoice.index > 0) {
|
155
|
+
return undefined;
|
156
|
+
}
|
157
|
+
return firstChoice.delta.function_call?.arguments;
|
158
|
+
}
|
159
|
+
parseAccumulatedStructureText(accumulatedText) {
|
160
|
+
return (0, parsePartialJson_js_1.parsePartialJson)(accumulatedText);
|
161
|
+
}
|
146
162
|
}
|
147
163
|
exports.OpenAIChatFunctionCallStructureGenerationModel = OpenAIChatFunctionCallStructureGenerationModel;
|
@@ -1,11 +1,11 @@
|
|
1
|
-
import { FunctionOptions } from "
|
2
|
-
import { JsonSchemaProducer } from "
|
3
|
-
import { Schema } from "
|
4
|
-
import {
|
5
|
-
import { TextGenerationPromptTemplate } from "
|
1
|
+
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
|
+
import { JsonSchemaProducer } from "../../core/schema/JsonSchemaProducer.js";
|
3
|
+
import { Schema } from "../../core/schema/Schema.js";
|
4
|
+
import { StructureStreamingModel } from "../../model-function/generate-structure/StructureGenerationModel.js";
|
5
|
+
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
6
6
|
import { OpenAIChatPrompt } from "./AbstractOpenAIChatModel.js";
|
7
|
-
import { OpenAIChatModel, OpenAIChatSettings } from "./OpenAIChatModel";
|
8
|
-
export declare class OpenAIChatFunctionCallStructureGenerationModel<PROMPT_TEMPLATE extends TextGenerationPromptTemplate<unknown, OpenAIChatPrompt>> implements
|
7
|
+
import { OpenAIChatModel, OpenAIChatSettings } from "./OpenAIChatModel.js";
|
8
|
+
export declare class OpenAIChatFunctionCallStructureGenerationModel<PROMPT_TEMPLATE extends TextGenerationPromptTemplate<unknown, OpenAIChatPrompt>> implements StructureStreamingModel<Parameters<PROMPT_TEMPLATE["format"]>[0], // first argument of the function
|
9
9
|
OpenAIChatSettings> {
|
10
10
|
readonly model: OpenAIChatModel;
|
11
11
|
readonly fnName: string;
|
@@ -17,7 +17,7 @@ OpenAIChatSettings> {
|
|
17
17
|
fnDescription?: string;
|
18
18
|
promptTemplate: PROMPT_TEMPLATE;
|
19
19
|
});
|
20
|
-
get modelInformation(): import("
|
20
|
+
get modelInformation(): import("../../index.js").ModelInformation;
|
21
21
|
get settings(): OpenAIChatSettings;
|
22
22
|
get settingsForEvent(): Partial<OpenAIChatSettings>;
|
23
23
|
/**
|
@@ -27,11 +27,11 @@ OpenAIChatSettings> {
|
|
27
27
|
/**
|
28
28
|
* Returns this model with an instruction prompt template.
|
29
29
|
*/
|
30
|
-
withInstructionPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<import("
|
30
|
+
withInstructionPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<import("../../index.js").InstructionPrompt, OpenAIChatPrompt>>;
|
31
31
|
/**
|
32
32
|
* Returns this model with a chat prompt template.
|
33
33
|
*/
|
34
|
-
withChatPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<import("
|
34
|
+
withChatPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<import("../../index.js").ChatPrompt, OpenAIChatPrompt>>;
|
35
35
|
withPromptTemplate<TARGET_PROMPT_FORMAT extends TextGenerationPromptTemplate<unknown, OpenAIChatPrompt>>(promptTemplate: TARGET_PROMPT_FORMAT): OpenAIChatFunctionCallStructureGenerationModel<TARGET_PROMPT_FORMAT>;
|
36
36
|
withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
|
37
37
|
/**
|
@@ -85,5 +85,35 @@ OpenAIChatSettings> {
|
|
85
85
|
};
|
86
86
|
}>;
|
87
87
|
doStreamStructure(schema: Schema<unknown> & JsonSchemaProducer, prompt: Parameters<PROMPT_TEMPLATE["format"]>[0], // first argument of the function
|
88
|
-
options?: FunctionOptions): Promise<AsyncIterable<import("
|
88
|
+
options?: FunctionOptions): Promise<AsyncIterable<import("../../index.js").Delta<{
|
89
|
+
object: "chat.completion.chunk";
|
90
|
+
model: string;
|
91
|
+
id: string;
|
92
|
+
created: number;
|
93
|
+
choices: {
|
94
|
+
delta: {
|
95
|
+
role?: "user" | "assistant" | undefined;
|
96
|
+
content?: string | null | undefined;
|
97
|
+
function_call?: {
|
98
|
+
name?: string | undefined;
|
99
|
+
arguments?: string | undefined;
|
100
|
+
} | undefined;
|
101
|
+
tool_calls?: {
|
102
|
+
function: {
|
103
|
+
name: string;
|
104
|
+
arguments: string;
|
105
|
+
};
|
106
|
+
type: "function";
|
107
|
+
id: string;
|
108
|
+
}[] | undefined;
|
109
|
+
};
|
110
|
+
index: number;
|
111
|
+
finish_reason?: "length" | "stop" | "function_call" | "tool_calls" | "content_filter" | null | undefined;
|
112
|
+
}[];
|
113
|
+
system_fingerprint?: string | null | undefined;
|
114
|
+
} | {
|
115
|
+
object: string;
|
116
|
+
}>>>;
|
117
|
+
extractStructureTextDelta(delta: unknown): string | undefined;
|
118
|
+
parseAccumulatedStructureText(accumulatedText: string): unknown;
|
89
119
|
}
|
@@ -1,5 +1,6 @@
|
|
1
1
|
import SecureJSON from "secure-json-parse";
|
2
|
-
import { StructureParseError } from "
|
2
|
+
import { StructureParseError } from "../../model-function/generate-structure/StructureParseError.js";
|
3
|
+
import { parsePartialJson } from "../../model-function/generate-structure/parsePartialJson.js";
|
3
4
|
import { OpenAIChatResponseFormat, } from "./AbstractOpenAIChatModel.js";
|
4
5
|
import { chat, instruction, text } from "./OpenAIChatPromptTemplate.js";
|
5
6
|
export class OpenAIChatFunctionCallStructureGenerationModel {
|
@@ -126,7 +127,7 @@ export class OpenAIChatFunctionCallStructureGenerationModel {
|
|
126
127
|
const expandedPrompt = this.promptTemplate.format(prompt);
|
127
128
|
return this.model.callAPI(expandedPrompt, {
|
128
129
|
...options,
|
129
|
-
responseFormat: OpenAIChatResponseFormat.
|
130
|
+
responseFormat: OpenAIChatResponseFormat.deltaIterable,
|
130
131
|
functionCall: { name: this.fnName },
|
131
132
|
functions: [
|
132
133
|
{
|
@@ -137,4 +138,19 @@ export class OpenAIChatFunctionCallStructureGenerationModel {
|
|
137
138
|
],
|
138
139
|
});
|
139
140
|
}
|
141
|
+
extractStructureTextDelta(delta) {
|
142
|
+
const chunk = delta;
|
143
|
+
if (chunk.object !== "chat.completion.chunk") {
|
144
|
+
return undefined;
|
145
|
+
}
|
146
|
+
const chatChunk = chunk;
|
147
|
+
const firstChoice = chatChunk.choices[0];
|
148
|
+
if (firstChoice.index > 0) {
|
149
|
+
return undefined;
|
150
|
+
}
|
151
|
+
return firstChoice.delta.function_call?.arguments;
|
152
|
+
}
|
153
|
+
parseAccumulatedStructureText(accumulatedText) {
|
154
|
+
return parsePartialJson(accumulatedText);
|
155
|
+
}
|
140
156
|
}
|
@@ -1,5 +1,5 @@
|
|
1
|
-
import {
|
2
|
-
import { ToolCall } from "
|
1
|
+
import { ImagePart, TextPart } from "../../model-function/generate-text/prompt-template/ContentPart.js";
|
2
|
+
import { ToolCall } from "../../tool/ToolCall.js";
|
3
3
|
export type OpenAIChatMessage = {
|
4
4
|
role: "system";
|
5
5
|
content: string;
|
@@ -50,7 +50,7 @@ export declare const OpenAIChatMessage: {
|
|
50
50
|
/**
|
51
51
|
* Creates a user chat message. The message can be a string or a multi-modal input.
|
52
52
|
*/
|
53
|
-
user(content: string |
|
53
|
+
user(content: string | Array<TextPart | ImagePart>, options?: {
|
54
54
|
name?: string;
|
55
55
|
}): OpenAIChatMessage;
|
56
56
|
/**
|
@@ -1,13 +1,13 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.OpenAIChatModel = exports.calculateOpenAIChatCostInMillicents = exports.isOpenAIChatModel = exports.getOpenAIChatModelInformation = exports.OPENAI_CHAT_MODELS = void 0;
|
4
|
-
const StructureFromTextStreamingModel_js_1 = require("
|
5
|
-
const
|
6
|
-
const TextGenerationModel_js_1 = require("
|
7
|
-
const TikTokenTokenizer_js_1 = require("../TikTokenTokenizer.cjs");
|
4
|
+
const StructureFromTextStreamingModel_js_1 = require("../../model-function/generate-structure/StructureFromTextStreamingModel.cjs");
|
5
|
+
const PromptTemplateFullTextModel_js_1 = require("../../model-function/generate-text/PromptTemplateFullTextModel.cjs");
|
6
|
+
const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
|
8
7
|
const AbstractOpenAIChatModel_js_1 = require("./AbstractOpenAIChatModel.cjs");
|
9
8
|
const OpenAIChatFunctionCallStructureGenerationModel_js_1 = require("./OpenAIChatFunctionCallStructureGenerationModel.cjs");
|
10
9
|
const OpenAIChatPromptTemplate_js_1 = require("./OpenAIChatPromptTemplate.cjs");
|
10
|
+
const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
|
11
11
|
const countOpenAIChatMessageTokens_js_1 = require("./countOpenAIChatMessageTokens.cjs");
|
12
12
|
/*
|
13
13
|
* Available OpenAI chat models, their token limits, and pricing.
|
@@ -243,7 +243,7 @@ class OpenAIChatModel extends AbstractOpenAIChatModel_js_1.AbstractOpenAIChatMod
|
|
243
243
|
return this.withPromptTemplate((0, OpenAIChatPromptTemplate_js_1.chat)());
|
244
244
|
}
|
245
245
|
withPromptTemplate(promptTemplate) {
|
246
|
-
return new
|
246
|
+
return new PromptTemplateFullTextModel_js_1.PromptTemplateFullTextModel({
|
247
247
|
model: this.withSettings({
|
248
248
|
stopSequences: [
|
249
249
|
...(this.settings.stopSequences ?? []),
|
@@ -1,13 +1,13 @@
|
|
1
|
-
import { StructureFromTextPromptTemplate } from "
|
2
|
-
import { StructureFromTextStreamingModel } from "
|
3
|
-
import {
|
4
|
-
import { TextGenerationModelSettings, TextStreamingModel } from "
|
5
|
-
import { TextGenerationPromptTemplate } from "
|
6
|
-
import { ToolCallGenerationModel } from "
|
7
|
-
import { ToolCallsOrTextGenerationModel } from "
|
8
|
-
import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
|
1
|
+
import { StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
|
2
|
+
import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
3
|
+
import { PromptTemplateFullTextModel } from "../../model-function/generate-text/PromptTemplateFullTextModel.js";
|
4
|
+
import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
5
|
+
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
6
|
+
import { ToolCallGenerationModel } from "../../tool/generate-tool-call/ToolCallGenerationModel.js";
|
7
|
+
import { ToolCallsOrTextGenerationModel } from "../../tool/generate-tool-calls-or-text/ToolCallsOrTextGenerationModel.js";
|
9
8
|
import { AbstractOpenAIChatCallSettings, AbstractOpenAIChatModel, OpenAIChatPrompt, OpenAIChatResponse } from "./AbstractOpenAIChatModel.js";
|
10
9
|
import { OpenAIChatFunctionCallStructureGenerationModel } from "./OpenAIChatFunctionCallStructureGenerationModel.js";
|
10
|
+
import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
|
11
11
|
export declare const OPENAI_CHAT_MODELS: {
|
12
12
|
"gpt-4": {
|
13
13
|
contextWindowSize: number;
|
@@ -147,16 +147,16 @@ export declare class OpenAIChatModel extends AbstractOpenAIChatModel<OpenAIChatS
|
|
147
147
|
/**
|
148
148
|
* Returns this model with a text prompt template.
|
149
149
|
*/
|
150
|
-
withTextPrompt():
|
150
|
+
withTextPrompt(): PromptTemplateFullTextModel<string, OpenAIChatPrompt, OpenAIChatSettings, this>;
|
151
151
|
/**
|
152
152
|
* Returns this model with an instruction prompt template.
|
153
153
|
*/
|
154
|
-
withInstructionPrompt():
|
154
|
+
withInstructionPrompt(): PromptTemplateFullTextModel<import("../../index.js").InstructionPrompt, OpenAIChatPrompt, OpenAIChatSettings, this>;
|
155
155
|
/**
|
156
156
|
* Returns this model with a chat prompt template.
|
157
157
|
*/
|
158
|
-
withChatPrompt():
|
159
|
-
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OpenAIChatPrompt>):
|
158
|
+
withChatPrompt(): PromptTemplateFullTextModel<import("../../index.js").ChatPrompt, OpenAIChatPrompt, OpenAIChatSettings, this>;
|
159
|
+
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OpenAIChatPrompt>): PromptTemplateFullTextModel<INPUT_PROMPT, OpenAIChatPrompt, OpenAIChatSettings, this>;
|
160
160
|
withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
|
161
161
|
}
|
162
162
|
export {};
|
@@ -1,10 +1,10 @@
|
|
1
|
-
import { StructureFromTextStreamingModel } from "
|
2
|
-
import {
|
3
|
-
import { textGenerationModelProperties, } from "
|
4
|
-
import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
|
1
|
+
import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
2
|
+
import { PromptTemplateFullTextModel } from "../../model-function/generate-text/PromptTemplateFullTextModel.js";
|
3
|
+
import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
|
5
4
|
import { AbstractOpenAIChatModel, } from "./AbstractOpenAIChatModel.js";
|
6
5
|
import { OpenAIChatFunctionCallStructureGenerationModel } from "./OpenAIChatFunctionCallStructureGenerationModel.js";
|
7
6
|
import { chat, identity, instruction, text, } from "./OpenAIChatPromptTemplate.js";
|
7
|
+
import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
|
8
8
|
import { countOpenAIChatPromptTokens } from "./countOpenAIChatMessageTokens.js";
|
9
9
|
/*
|
10
10
|
* Available OpenAI chat models, their token limits, and pricing.
|
@@ -237,7 +237,7 @@ export class OpenAIChatModel extends AbstractOpenAIChatModel {
|
|
237
237
|
return this.withPromptTemplate(chat());
|
238
238
|
}
|
239
239
|
withPromptTemplate(promptTemplate) {
|
240
|
-
return new
|
240
|
+
return new PromptTemplateFullTextModel({
|
241
241
|
model: this.withSettings({
|
242
242
|
stopSequences: [
|
243
243
|
...(this.settings.stopSequences ?? []),
|
@@ -0,0 +1,94 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
const zod_1 = require("zod");
|
4
|
+
const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
5
|
+
const streamStructure_js_1 = require("../../model-function/generate-structure/streamStructure.cjs");
|
6
|
+
const streamText_js_1 = require("../../model-function/generate-text/streamText.cjs");
|
7
|
+
const StreamingTestServer_js_1 = require("../../test/StreamingTestServer.cjs");
|
8
|
+
const arrayFromAsync_js_1 = require("../../test/arrayFromAsync.cjs");
|
9
|
+
const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
|
10
|
+
const OpenAIChatModel_js_1 = require("./OpenAIChatModel.cjs");
|
11
|
+
describe("streamText", () => {
|
12
|
+
const server = new StreamingTestServer_js_1.StreamingTestServer("https://api.openai.com/v1/chat/completions");
|
13
|
+
server.setupTestEnvironment();
|
14
|
+
it("should return only values from the first choice when using streamText", async () => {
|
15
|
+
server.responseChunks = [
|
16
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
|
17
|
+
`"system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
|
18
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
|
19
|
+
`"system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"A"},"finish_reason":null}]}\n\n`,
|
20
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
|
21
|
+
`"system_fingerprint":null,"choices":[{"index":1,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
|
22
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
|
23
|
+
`"system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"B"},"finish_reason":null}]}\n\n`,
|
24
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
|
25
|
+
`"system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}\n\n`,
|
26
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
|
27
|
+
`"system_fingerprint":null,"choices":[{"index":1,"delta":{},"finish_reason":"stop"}]}\n\n`,
|
28
|
+
"data: [DONE]\n\n",
|
29
|
+
];
|
30
|
+
const stream = await (0, streamText_js_1.streamText)(new OpenAIChatModel_js_1.OpenAIChatModel({
|
31
|
+
api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
|
32
|
+
model: "gpt-3.5-turbo",
|
33
|
+
numberOfGenerations: 2,
|
34
|
+
}).withTextPrompt(), "test prompt");
|
35
|
+
expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual(["A"]);
|
36
|
+
});
|
37
|
+
});
|
38
|
+
describe("streamStructure", () => {
|
39
|
+
const server = new StreamingTestServer_js_1.StreamingTestServer("https://api.openai.com/v1/chat/completions");
|
40
|
+
server.setupTestEnvironment();
|
41
|
+
it("should return a text stream", async () => {
|
42
|
+
server.responseChunks = [
|
43
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
44
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
45
|
+
`"choices":[{"index":0,"delta":{"role":"assistant","content":null,` +
|
46
|
+
`"function_call":{"name":"generateCharacter","arguments":""}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
47
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
48
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
49
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":"{\\n"}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
50
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
51
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
52
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":" "}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
53
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
54
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
55
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":" \\""}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
56
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
57
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
58
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":"name"}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
59
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
60
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
61
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":"\\":\\""}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
62
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
63
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
64
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":"M"}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
65
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
66
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
67
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":"ike\\"\\n"}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
68
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
69
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
70
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":"}"}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
71
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
72
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
73
|
+
`"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}\n\n`,
|
74
|
+
`data: [DONE]\n\n`,
|
75
|
+
];
|
76
|
+
const stream = await (0, streamStructure_js_1.streamStructure)(new OpenAIChatModel_js_1.OpenAIChatModel({
|
77
|
+
api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test-key" }),
|
78
|
+
model: "gpt-3.5-turbo",
|
79
|
+
})
|
80
|
+
.asFunctionCallStructureGenerationModel({
|
81
|
+
fnName: "generateCharacter",
|
82
|
+
fnDescription: "Generate character descriptions.",
|
83
|
+
})
|
84
|
+
.withTextPrompt(), (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({ name: zod_1.z.string() })), "generate a name");
|
85
|
+
// note: space moved to last chunk bc of trimming
|
86
|
+
expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
|
87
|
+
{ isComplete: false, value: {} },
|
88
|
+
{ isComplete: false, value: { name: "" } },
|
89
|
+
{ isComplete: false, value: { name: "M" } },
|
90
|
+
{ isComplete: false, value: { name: "Mike" } },
|
91
|
+
{ isComplete: true, value: { name: "Mike" } },
|
92
|
+
]);
|
93
|
+
});
|
94
|
+
});
|
@@ -0,0 +1 @@
|
|
1
|
+
export {};
|
@@ -0,0 +1,92 @@
|
|
1
|
+
import { z } from "zod";
|
2
|
+
import { zodSchema } from "../../core/schema/ZodSchema.js";
|
3
|
+
import { streamStructure } from "../../model-function/generate-structure/streamStructure.js";
|
4
|
+
import { streamText } from "../../model-function/generate-text/streamText.js";
|
5
|
+
import { StreamingTestServer } from "../../test/StreamingTestServer.js";
|
6
|
+
import { arrayFromAsync } from "../../test/arrayFromAsync.js";
|
7
|
+
import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
|
8
|
+
import { OpenAIChatModel } from "./OpenAIChatModel.js";
|
9
|
+
describe("streamText", () => {
|
10
|
+
const server = new StreamingTestServer("https://api.openai.com/v1/chat/completions");
|
11
|
+
server.setupTestEnvironment();
|
12
|
+
it("should return only values from the first choice when using streamText", async () => {
|
13
|
+
server.responseChunks = [
|
14
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
|
15
|
+
`"system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
|
16
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
|
17
|
+
`"system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"A"},"finish_reason":null}]}\n\n`,
|
18
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
|
19
|
+
`"system_fingerprint":null,"choices":[{"index":1,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
|
20
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
|
21
|
+
`"system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"B"},"finish_reason":null}]}\n\n`,
|
22
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
|
23
|
+
`"system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}\n\n`,
|
24
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613",` +
|
25
|
+
`"system_fingerprint":null,"choices":[{"index":1,"delta":{},"finish_reason":"stop"}]}\n\n`,
|
26
|
+
"data: [DONE]\n\n",
|
27
|
+
];
|
28
|
+
const stream = await streamText(new OpenAIChatModel({
|
29
|
+
api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
|
30
|
+
model: "gpt-3.5-turbo",
|
31
|
+
numberOfGenerations: 2,
|
32
|
+
}).withTextPrompt(), "test prompt");
|
33
|
+
expect(await arrayFromAsync(stream)).toStrictEqual(["A"]);
|
34
|
+
});
|
35
|
+
});
|
36
|
+
describe("streamStructure", () => {
|
37
|
+
const server = new StreamingTestServer("https://api.openai.com/v1/chat/completions");
|
38
|
+
server.setupTestEnvironment();
|
39
|
+
it("should return a text stream", async () => {
|
40
|
+
server.responseChunks = [
|
41
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
42
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
43
|
+
`"choices":[{"index":0,"delta":{"role":"assistant","content":null,` +
|
44
|
+
`"function_call":{"name":"generateCharacter","arguments":""}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
45
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
46
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
47
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":"{\\n"}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
48
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
49
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
50
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":" "}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
51
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
52
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
53
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":" \\""}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
54
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
55
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
56
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":"name"}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
57
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
58
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
59
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":"\\":\\""}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
60
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
61
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
62
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":"M"}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
63
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
64
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
65
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":"ike\\"\\n"}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
66
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
67
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
68
|
+
`"choices":[{"index":0,"delta":{"function_call":{"arguments":"}"}},"logprobs":null,"finish_reason":null}]}\n\n`,
|
69
|
+
`data: {"id":"chatcmpl-8ZhZtizjTCGmZaPRwyUiuDJ1DYUD0","object":"chat.completion.chunk",` +
|
70
|
+
`"created":1703519685,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,` +
|
71
|
+
`"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}\n\n`,
|
72
|
+
`data: [DONE]\n\n`,
|
73
|
+
];
|
74
|
+
const stream = await streamStructure(new OpenAIChatModel({
|
75
|
+
api: new OpenAIApiConfiguration({ apiKey: "test-key" }),
|
76
|
+
model: "gpt-3.5-turbo",
|
77
|
+
})
|
78
|
+
.asFunctionCallStructureGenerationModel({
|
79
|
+
fnName: "generateCharacter",
|
80
|
+
fnDescription: "Generate character descriptions.",
|
81
|
+
})
|
82
|
+
.withTextPrompt(), zodSchema(z.object({ name: z.string() })), "generate a name");
|
83
|
+
// note: space moved to last chunk bc of trimming
|
84
|
+
expect(await arrayFromAsync(stream)).toStrictEqual([
|
85
|
+
{ isComplete: false, value: {} },
|
86
|
+
{ isComplete: false, value: { name: "" } },
|
87
|
+
{ isComplete: false, value: { name: "M" } },
|
88
|
+
{ isComplete: false, value: { name: "Mike" } },
|
89
|
+
{ isComplete: true, value: { name: "Mike" } },
|
90
|
+
]);
|
91
|
+
});
|
92
|
+
});
|
@@ -0,0 +1,114 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.chat = exports.instruction = exports.text = exports.identity = void 0;
|
4
|
+
const OpenAIChatMessage_js_1 = require("./OpenAIChatMessage.cjs");
|
5
|
+
/**
|
6
|
+
* OpenAIMessage[] identity chat format.
|
7
|
+
*/
|
8
|
+
function identity() {
|
9
|
+
return { format: (prompt) => prompt, stopSequences: [] };
|
10
|
+
}
|
11
|
+
exports.identity = identity;
|
12
|
+
/**
|
13
|
+
* Formats a text prompt as an OpenAI chat prompt.
|
14
|
+
*/
|
15
|
+
function text() {
|
16
|
+
return {
|
17
|
+
format: (prompt) => [OpenAIChatMessage_js_1.OpenAIChatMessage.user(prompt)],
|
18
|
+
stopSequences: [],
|
19
|
+
};
|
20
|
+
}
|
21
|
+
exports.text = text;
|
22
|
+
/**
|
23
|
+
* Formats an instruction prompt as an OpenAI chat prompt.
|
24
|
+
*/
|
25
|
+
function instruction() {
|
26
|
+
return {
|
27
|
+
format(prompt) {
|
28
|
+
const messages = [];
|
29
|
+
if (prompt.system != null) {
|
30
|
+
messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.system(prompt.system));
|
31
|
+
}
|
32
|
+
messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.user(prompt.instruction));
|
33
|
+
return messages;
|
34
|
+
},
|
35
|
+
stopSequences: [],
|
36
|
+
};
|
37
|
+
}
|
38
|
+
exports.instruction = instruction;
|
39
|
+
/**
|
40
|
+
* Formats a chat prompt as an OpenAI chat prompt.
|
41
|
+
*/
|
42
|
+
function chat() {
|
43
|
+
return {
|
44
|
+
format(prompt) {
|
45
|
+
const messages = [];
|
46
|
+
if (prompt.system != null) {
|
47
|
+
messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.system(prompt.system));
|
48
|
+
}
|
49
|
+
for (const { role, content } of prompt.messages) {
|
50
|
+
switch (role) {
|
51
|
+
case "user": {
|
52
|
+
messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.user(content));
|
53
|
+
break;
|
54
|
+
}
|
55
|
+
case "assistant": {
|
56
|
+
if (typeof content === "string") {
|
57
|
+
messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.assistant(content));
|
58
|
+
}
|
59
|
+
else {
|
60
|
+
let text = "";
|
61
|
+
const toolCalls = [];
|
62
|
+
for (const part of content) {
|
63
|
+
switch (part.type) {
|
64
|
+
case "text": {
|
65
|
+
text += part.text;
|
66
|
+
break;
|
67
|
+
}
|
68
|
+
case "tool-call": {
|
69
|
+
toolCalls.push({
|
70
|
+
id: part.id,
|
71
|
+
type: "function",
|
72
|
+
function: {
|
73
|
+
name: part.name,
|
74
|
+
arguments: JSON.stringify(part.args),
|
75
|
+
},
|
76
|
+
});
|
77
|
+
break;
|
78
|
+
}
|
79
|
+
default: {
|
80
|
+
const _exhaustiveCheck = part;
|
81
|
+
throw new Error(`Unsupported part: ${_exhaustiveCheck}`);
|
82
|
+
}
|
83
|
+
}
|
84
|
+
}
|
85
|
+
messages.push({
|
86
|
+
role: "assistant",
|
87
|
+
content: text,
|
88
|
+
tool_calls: toolCalls,
|
89
|
+
});
|
90
|
+
}
|
91
|
+
break;
|
92
|
+
}
|
93
|
+
case "tool": {
|
94
|
+
for (const toolResponse of content) {
|
95
|
+
messages.push({
|
96
|
+
role: "tool",
|
97
|
+
tool_call_id: toolResponse.id,
|
98
|
+
content: JSON.stringify(toolResponse.response),
|
99
|
+
});
|
100
|
+
}
|
101
|
+
break;
|
102
|
+
}
|
103
|
+
default: {
|
104
|
+
const _exhaustiveCheck = role;
|
105
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
106
|
+
}
|
107
|
+
}
|
108
|
+
}
|
109
|
+
return messages;
|
110
|
+
},
|
111
|
+
stopSequences: [],
|
112
|
+
};
|
113
|
+
}
|
114
|
+
exports.chat = chat;
|
@@ -0,0 +1,20 @@
|
|
1
|
+
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
2
|
+
import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
3
|
+
import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
4
|
+
import { OpenAIChatPrompt } from "./AbstractOpenAIChatModel.js";
|
5
|
+
/**
|
6
|
+
* OpenAIMessage[] identity chat format.
|
7
|
+
*/
|
8
|
+
export declare function identity(): TextGenerationPromptTemplate<OpenAIChatPrompt, OpenAIChatPrompt>;
|
9
|
+
/**
|
10
|
+
* Formats a text prompt as an OpenAI chat prompt.
|
11
|
+
*/
|
12
|
+
export declare function text(): TextGenerationPromptTemplate<string, OpenAIChatPrompt>;
|
13
|
+
/**
|
14
|
+
* Formats an instruction prompt as an OpenAI chat prompt.
|
15
|
+
*/
|
16
|
+
export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, OpenAIChatPrompt>;
|
17
|
+
/**
|
18
|
+
* Formats a chat prompt as an OpenAI chat prompt.
|
19
|
+
*/
|
20
|
+
export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, OpenAIChatPrompt>;
|