modelfusion 0.103.0 → 0.105.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +56 -0
- package/model-function/Delta.d.ts +1 -2
- package/model-function/executeStreamCall.cjs +6 -4
- package/model-function/executeStreamCall.d.ts +2 -2
- package/model-function/executeStreamCall.js +6 -4
- package/model-function/generate-speech/streamSpeech.cjs +1 -2
- package/model-function/generate-speech/streamSpeech.js +1 -2
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +25 -29
- package/model-function/generate-structure/StructureFromTextStreamingModel.d.ts +3 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +25 -29
- package/model-function/generate-structure/StructureGenerationModel.d.ts +2 -0
- package/model-function/generate-structure/streamStructure.cjs +7 -8
- package/model-function/generate-structure/streamStructure.d.ts +1 -1
- package/model-function/generate-structure/streamStructure.js +7 -8
- package/model-function/generate-text/PromptTemplateFullTextModel.cjs +35 -0
- package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +41 -0
- package/model-function/generate-text/PromptTemplateFullTextModel.js +31 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +3 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +2 -1
- package/model-function/generate-text/PromptTemplateTextStreamingModel.js +3 -0
- package/model-function/generate-text/TextGenerationModel.d.ts +2 -1
- package/model-function/generate-text/index.cjs +1 -0
- package/model-function/generate-text/index.d.ts +1 -0
- package/model-function/generate-text/index.js +1 -0
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +2 -1
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +2 -2
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +2 -1
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs +9 -5
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.js +9 -5
- package/model-function/generate-text/prompt-template/ChatPrompt.cjs +38 -20
- package/model-function/generate-text/prompt-template/ChatPrompt.d.ts +33 -34
- package/model-function/generate-text/prompt-template/ChatPrompt.js +37 -18
- package/model-function/generate-text/prompt-template/ContentPart.cjs +11 -0
- package/model-function/generate-text/prompt-template/ContentPart.d.ts +30 -0
- package/model-function/generate-text/prompt-template/ContentPart.js +7 -0
- package/model-function/generate-text/prompt-template/InstructionPrompt.d.ts +7 -22
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +40 -6
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.d.ts +16 -4
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +38 -5
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs +10 -5
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js +10 -5
- package/model-function/generate-text/prompt-template/TextPromptTemplate.cjs +8 -5
- package/model-function/generate-text/prompt-template/TextPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/TextPromptTemplate.js +8 -5
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +8 -4
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +2 -2
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +8 -4
- package/model-function/generate-text/prompt-template/index.cjs +1 -1
- package/model-function/generate-text/prompt-template/index.d.ts +1 -1
- package/model-function/generate-text/prompt-template/index.js +1 -1
- package/model-function/generate-text/prompt-template/trimChatPrompt.cjs +0 -2
- package/model-function/generate-text/prompt-template/trimChatPrompt.d.ts +4 -4
- package/model-function/generate-text/prompt-template/trimChatPrompt.js +0 -2
- package/model-function/generate-text/streamText.cjs +27 -28
- package/model-function/generate-text/streamText.d.ts +1 -0
- package/model-function/generate-text/streamText.js +27 -28
- package/model-provider/anthropic/AnthropicPromptTemplate.cjs +9 -4
- package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +4 -4
- package/model-provider/anthropic/AnthropicPromptTemplate.js +9 -4
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +8 -14
- package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +13 -4
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +8 -14
- package/model-provider/anthropic/AnthropicTextGenerationModel.test.cjs +44 -0
- package/model-provider/anthropic/AnthropicTextGenerationModel.test.js +42 -0
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +6 -44
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +47 -13
- package/model-provider/cohere/CohereTextGenerationModel.js +7 -45
- package/model-provider/cohere/CohereTextGenerationModel.test.cjs +33 -0
- package/model-provider/cohere/CohereTextGenerationModel.test.js +31 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +1 -2
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +1 -2
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +29 -17
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -4
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +29 -17
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +7 -14
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +157 -6
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +8 -15
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.cjs +37 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.d.ts +1 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.js +35 -0
- package/model-provider/mistral/MistralChatModel.cjs +30 -104
- package/model-provider/mistral/MistralChatModel.d.ts +49 -16
- package/model-provider/mistral/MistralChatModel.js +30 -104
- package/model-provider/mistral/MistralChatModel.test.cjs +51 -0
- package/model-provider/mistral/MistralChatModel.test.d.ts +1 -0
- package/model-provider/mistral/MistralChatModel.test.js +49 -0
- package/model-provider/mistral/MistralPromptTemplate.cjs +13 -5
- package/model-provider/mistral/MistralPromptTemplate.d.ts +4 -4
- package/model-provider/mistral/MistralPromptTemplate.js +13 -5
- package/model-provider/ollama/OllamaChatModel.cjs +7 -43
- package/model-provider/ollama/OllamaChatModel.d.ts +63 -11
- package/model-provider/ollama/OllamaChatModel.js +7 -43
- package/model-provider/ollama/OllamaChatModel.test.cjs +27 -0
- package/model-provider/ollama/OllamaChatModel.test.d.ts +1 -0
- package/model-provider/ollama/OllamaChatModel.test.js +25 -0
- package/model-provider/ollama/OllamaChatPromptTemplate.cjs +43 -17
- package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +4 -4
- package/model-provider/ollama/OllamaChatPromptTemplate.js +43 -17
- package/model-provider/ollama/OllamaCompletionModel.cjs +22 -43
- package/model-provider/ollama/OllamaCompletionModel.d.ts +65 -9
- package/model-provider/ollama/OllamaCompletionModel.js +23 -44
- package/model-provider/ollama/OllamaCompletionModel.test.cjs +101 -13
- package/model-provider/ollama/OllamaCompletionModel.test.js +78 -13
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.cjs → AbstractOpenAIChatModel.cjs} +71 -15
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.d.ts → AbstractOpenAIChatModel.d.ts} +273 -19
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.js → AbstractOpenAIChatModel.js} +71 -15
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.cjs → OpenAIChatFunctionCallStructureGenerationModel.cjs} +18 -2
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts → OpenAIChatFunctionCallStructureGenerationModel.d.ts} +41 -11
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.js → OpenAIChatFunctionCallStructureGenerationModel.js} +18 -2
- package/model-provider/openai/{chat/OpenAIChatMessage.d.ts → OpenAIChatMessage.d.ts} +3 -3
- package/model-provider/openai/{chat/OpenAIChatModel.cjs → OpenAIChatModel.cjs} +5 -5
- package/model-provider/openai/{chat/OpenAIChatModel.d.ts → OpenAIChatModel.d.ts} +12 -12
- package/model-provider/openai/{chat/OpenAIChatModel.js → OpenAIChatModel.js} +5 -5
- package/model-provider/openai/OpenAIChatModel.test.cjs +94 -0
- package/model-provider/openai/OpenAIChatModel.test.d.ts +1 -0
- package/model-provider/openai/OpenAIChatModel.test.js +92 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.cjs +114 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.d.ts +20 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.js +107 -0
- package/model-provider/openai/OpenAICompletionModel.cjs +32 -84
- package/model-provider/openai/OpenAICompletionModel.d.ts +29 -12
- package/model-provider/openai/OpenAICompletionModel.js +33 -85
- package/model-provider/openai/OpenAICompletionModel.test.cjs +53 -0
- package/model-provider/openai/OpenAICompletionModel.test.d.ts +1 -0
- package/model-provider/openai/OpenAICompletionModel.test.js +51 -0
- package/model-provider/openai/OpenAICostCalculator.cjs +1 -1
- package/model-provider/openai/OpenAICostCalculator.js +1 -1
- package/model-provider/openai/OpenAIFacade.cjs +2 -2
- package/model-provider/openai/OpenAIFacade.d.ts +3 -3
- package/model-provider/openai/OpenAIFacade.js +2 -2
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +6 -6
- package/model-provider/openai/TikTokenTokenizer.d.ts +1 -1
- package/model-provider/openai/{chat/countOpenAIChatMessageTokens.cjs → countOpenAIChatMessageTokens.cjs} +2 -2
- package/model-provider/openai/{chat/countOpenAIChatMessageTokens.js → countOpenAIChatMessageTokens.js} +2 -2
- package/model-provider/openai/index.cjs +6 -6
- package/model-provider/openai/index.d.ts +5 -6
- package/model-provider/openai/index.js +5 -5
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +4 -4
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +6 -6
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +4 -4
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
- package/package.json +5 -5
- package/test/JsonTestServer.cjs +33 -0
- package/test/JsonTestServer.d.ts +7 -0
- package/test/JsonTestServer.js +29 -0
- package/test/StreamingTestServer.cjs +55 -0
- package/test/StreamingTestServer.d.ts +7 -0
- package/test/StreamingTestServer.js +51 -0
- package/test/arrayFromAsync.cjs +13 -0
- package/test/arrayFromAsync.d.ts +1 -0
- package/test/arrayFromAsync.js +9 -0
- package/util/streaming/createEventSourceResponseHandler.cjs +9 -0
- package/util/streaming/createEventSourceResponseHandler.d.ts +4 -0
- package/util/streaming/createEventSourceResponseHandler.js +5 -0
- package/util/streaming/createJsonStreamResponseHandler.cjs +9 -0
- package/util/streaming/createJsonStreamResponseHandler.d.ts +4 -0
- package/util/streaming/createJsonStreamResponseHandler.js +5 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.cjs +52 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.d.ts +6 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.js +48 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.cjs +21 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.d.ts +6 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.js +17 -0
- package/model-function/generate-text/prompt-template/Content.cjs +0 -2
- package/model-function/generate-text/prompt-template/Content.d.ts +0 -20
- package/model-provider/openai/chat/OpenAIChatModel.test.cjs +0 -61
- package/model-provider/openai/chat/OpenAIChatModel.test.js +0 -59
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.cjs +0 -72
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +0 -20
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +0 -65
- package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +0 -156
- package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +0 -19
- package/model-provider/openai/chat/OpenAIChatStreamIterable.js +0 -152
- /package/{model-function/generate-text/prompt-template/Content.js → model-provider/anthropic/AnthropicTextGenerationModel.test.d.ts} +0 -0
- /package/model-provider/{openai/chat/OpenAIChatModel.test.d.ts → cohere/CohereTextGenerationModel.test.d.ts} +0 -0
- /package/model-provider/openai/{chat/OpenAIChatMessage.cjs → OpenAIChatMessage.cjs} +0 -0
- /package/model-provider/openai/{chat/OpenAIChatMessage.js → OpenAIChatMessage.js} +0 -0
- /package/model-provider/openai/{chat/countOpenAIChatMessageTokens.d.ts → countOpenAIChatMessageTokens.d.ts} +0 -0
@@ -1,7 +1,8 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.chat = exports.instruction = exports.text = void 0;
|
4
|
-
const
|
4
|
+
const ContentPart_js_1 = require("../../model-function/generate-text/prompt-template/ContentPart.cjs");
|
5
|
+
const InvalidPromptError_js_1 = require("../../model-function/generate-text/prompt-template/InvalidPromptError.cjs");
|
5
6
|
/**
|
6
7
|
* Formats a text prompt as a Mistral prompt.
|
7
8
|
*/
|
@@ -22,7 +23,8 @@ function instruction() {
|
|
22
23
|
if (prompt.system != null) {
|
23
24
|
messages.push({ role: "system", content: prompt.system });
|
24
25
|
}
|
25
|
-
|
26
|
+
const instruction = (0, ContentPart_js_1.validateContentIsString)(prompt.instruction, prompt);
|
27
|
+
messages.push({ role: "user", content: instruction });
|
26
28
|
return messages;
|
27
29
|
},
|
28
30
|
stopSequences: [],
|
@@ -35,7 +37,6 @@ exports.instruction = instruction;
|
|
35
37
|
function chat() {
|
36
38
|
return {
|
37
39
|
format(prompt) {
|
38
|
-
(0, ChatPrompt_js_1.validateChatPrompt)(prompt);
|
39
40
|
const messages = [];
|
40
41
|
if (prompt.system != null) {
|
41
42
|
messages.push({ role: "system", content: prompt.system });
|
@@ -43,13 +44,20 @@ function chat() {
|
|
43
44
|
for (const { role, content } of prompt.messages) {
|
44
45
|
switch (role) {
|
45
46
|
case "user": {
|
46
|
-
|
47
|
+
const textContent = (0, ContentPart_js_1.validateContentIsString)(content, prompt);
|
48
|
+
messages.push({ role: "user", content: textContent });
|
47
49
|
break;
|
48
50
|
}
|
49
51
|
case "assistant": {
|
50
|
-
messages.push({
|
52
|
+
messages.push({
|
53
|
+
role: "assistant",
|
54
|
+
content: (0, ContentPart_js_1.validateContentIsString)(content, prompt),
|
55
|
+
});
|
51
56
|
break;
|
52
57
|
}
|
58
|
+
case "tool": {
|
59
|
+
throw new InvalidPromptError_js_1.InvalidPromptError("Tool messages are not supported.", prompt);
|
60
|
+
}
|
53
61
|
default: {
|
54
62
|
const _exhaustiveCheck = role;
|
55
63
|
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
2
|
-
import {
|
3
|
-
import {
|
2
|
+
import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
3
|
+
import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
4
4
|
import { MistralChatPrompt } from "./MistralChatModel.js";
|
5
5
|
/**
|
6
6
|
* Formats a text prompt as a Mistral prompt.
|
@@ -9,8 +9,8 @@ export declare function text(): TextGenerationPromptTemplate<string, MistralChat
|
|
9
9
|
/**
|
10
10
|
* Formats an instruction prompt as a Mistral prompt.
|
11
11
|
*/
|
12
|
-
export declare function instruction(): TextGenerationPromptTemplate<
|
12
|
+
export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, MistralChatPrompt>;
|
13
13
|
/**
|
14
14
|
* Formats a chat prompt as a Mistral prompt.
|
15
15
|
*/
|
16
|
-
export declare function chat(): TextGenerationPromptTemplate<
|
16
|
+
export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, MistralChatPrompt>;
|
@@ -1,4 +1,5 @@
|
|
1
|
-
import {
|
1
|
+
import { validateContentIsString } from "../../model-function/generate-text/prompt-template/ContentPart.js";
|
2
|
+
import { InvalidPromptError } from "../../model-function/generate-text/prompt-template/InvalidPromptError.js";
|
2
3
|
/**
|
3
4
|
* Formats a text prompt as a Mistral prompt.
|
4
5
|
*/
|
@@ -18,7 +19,8 @@ export function instruction() {
|
|
18
19
|
if (prompt.system != null) {
|
19
20
|
messages.push({ role: "system", content: prompt.system });
|
20
21
|
}
|
21
|
-
|
22
|
+
const instruction = validateContentIsString(prompt.instruction, prompt);
|
23
|
+
messages.push({ role: "user", content: instruction });
|
22
24
|
return messages;
|
23
25
|
},
|
24
26
|
stopSequences: [],
|
@@ -30,7 +32,6 @@ export function instruction() {
|
|
30
32
|
export function chat() {
|
31
33
|
return {
|
32
34
|
format(prompt) {
|
33
|
-
validateChatPrompt(prompt);
|
34
35
|
const messages = [];
|
35
36
|
if (prompt.system != null) {
|
36
37
|
messages.push({ role: "system", content: prompt.system });
|
@@ -38,13 +39,20 @@ export function chat() {
|
|
38
39
|
for (const { role, content } of prompt.messages) {
|
39
40
|
switch (role) {
|
40
41
|
case "user": {
|
41
|
-
|
42
|
+
const textContent = validateContentIsString(content, prompt);
|
43
|
+
messages.push({ role: "user", content: textContent });
|
42
44
|
break;
|
43
45
|
}
|
44
46
|
case "assistant": {
|
45
|
-
messages.push({
|
47
|
+
messages.push({
|
48
|
+
role: "assistant",
|
49
|
+
content: validateContentIsString(content, prompt),
|
50
|
+
});
|
46
51
|
break;
|
47
52
|
}
|
53
|
+
case "tool": {
|
54
|
+
throw new InvalidPromptError("Tool messages are not supported.", prompt);
|
55
|
+
}
|
48
56
|
default: {
|
49
57
|
const _exhaustiveCheck = role;
|
50
58
|
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
@@ -12,8 +12,7 @@ const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/gene
|
|
12
12
|
const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
|
13
13
|
const TextGenerationToolCallModel_js_1 = require("../../tool/generate-tool-call/TextGenerationToolCallModel.cjs");
|
14
14
|
const TextGenerationToolCallsOrGenerateTextModel_js_1 = require("../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.cjs");
|
15
|
-
const
|
16
|
-
const parseJsonStream_js_1 = require("../../util/streaming/parseJsonStream.cjs");
|
15
|
+
const createJsonStreamResponseHandler_js_1 = require("../../util/streaming/createJsonStreamResponseHandler.cjs");
|
17
16
|
const OllamaApiConfiguration_js_1 = require("./OllamaApiConfiguration.cjs");
|
18
17
|
const OllamaChatPromptTemplate_js_1 = require("./OllamaChatPromptTemplate.cjs");
|
19
18
|
const OllamaError_js_1 = require("./OllamaError.cjs");
|
@@ -133,6 +132,10 @@ class OllamaChatModel extends AbstractModel_js_1.AbstractModel {
|
|
133
132
|
responseFormat: exports.OllamaChatResponseFormat.deltaIterable,
|
134
133
|
});
|
135
134
|
}
|
135
|
+
extractTextDelta(delta) {
|
136
|
+
const chunk = delta;
|
137
|
+
return chunk.done === true ? undefined : chunk.message.content;
|
138
|
+
}
|
136
139
|
asToolCallGenerationModel(promptTemplate) {
|
137
140
|
return new TextGenerationToolCallModel_js_1.TextGenerationToolCallModel({
|
138
141
|
model: this,
|
@@ -194,7 +197,7 @@ const ollamaChatResponseSchema = zod_1.z.object({
|
|
194
197
|
eval_count: zod_1.z.number(),
|
195
198
|
eval_duration: zod_1.z.number(),
|
196
199
|
});
|
197
|
-
const
|
200
|
+
const ollamaChatStreamChunkSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.discriminatedUnion("done", [
|
198
201
|
zod_1.z.object({
|
199
202
|
done: zod_1.z.literal(false),
|
200
203
|
model: zod_1.z.string(),
|
@@ -216,45 +219,6 @@ const ollamaChatStreamSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.discriminate
|
|
216
219
|
eval_duration: zod_1.z.number(),
|
217
220
|
}),
|
218
221
|
]));
|
219
|
-
async function createOllamaFullDeltaIterableQueue(stream) {
|
220
|
-
const queue = new AsyncQueue_js_1.AsyncQueue();
|
221
|
-
let accumulatedText = "";
|
222
|
-
// process the stream asynchonously (no 'await' on purpose):
|
223
|
-
(0, parseJsonStream_js_1.parseJsonStream)({
|
224
|
-
stream,
|
225
|
-
schema: ollamaChatStreamSchema,
|
226
|
-
process(event) {
|
227
|
-
if (event.done === true) {
|
228
|
-
queue.push({
|
229
|
-
type: "delta",
|
230
|
-
fullDelta: {
|
231
|
-
content: accumulatedText,
|
232
|
-
isComplete: true,
|
233
|
-
delta: "",
|
234
|
-
},
|
235
|
-
valueDelta: "",
|
236
|
-
});
|
237
|
-
}
|
238
|
-
else {
|
239
|
-
const deltaText = event.message.content;
|
240
|
-
accumulatedText += deltaText;
|
241
|
-
queue.push({
|
242
|
-
type: "delta",
|
243
|
-
fullDelta: {
|
244
|
-
content: accumulatedText,
|
245
|
-
isComplete: false,
|
246
|
-
delta: deltaText,
|
247
|
-
},
|
248
|
-
valueDelta: deltaText,
|
249
|
-
});
|
250
|
-
}
|
251
|
-
},
|
252
|
-
onDone() {
|
253
|
-
queue.close();
|
254
|
-
},
|
255
|
-
});
|
256
|
-
return queue;
|
257
|
-
}
|
258
222
|
exports.OllamaChatResponseFormat = {
|
259
223
|
/**
|
260
224
|
* Returns the response as a JSON object.
|
@@ -303,6 +267,6 @@ exports.OllamaChatResponseFormat = {
|
|
303
267
|
*/
|
304
268
|
deltaIterable: {
|
305
269
|
stream: true,
|
306
|
-
handler:
|
270
|
+
handler: (0, createJsonStreamResponseHandler_js_1.createJsonStreamResponseHandler)(ollamaChatStreamChunkSchema),
|
307
271
|
},
|
308
272
|
};
|
@@ -2,8 +2,8 @@ import { z } from "zod";
|
|
2
2
|
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
3
3
|
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
4
4
|
import { ResponseHandler } from "../../core/api/postToApi.js";
|
5
|
+
import { ZodSchema } from "../../core/schema/ZodSchema.js";
|
5
6
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
|
-
import { Delta } from "../../model-function/Delta.js";
|
7
7
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
8
8
|
import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
9
|
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
@@ -57,7 +57,26 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
|
|
57
57
|
finishReason: "unknown";
|
58
58
|
}[];
|
59
59
|
}>;
|
60
|
-
doStreamText(prompt: OllamaChatPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<
|
60
|
+
doStreamText(prompt: OllamaChatPrompt, options?: FunctionOptions): Promise<AsyncIterable<import("../../index.js").Delta<{
|
61
|
+
model: string;
|
62
|
+
message: {
|
63
|
+
role: string;
|
64
|
+
content: string;
|
65
|
+
};
|
66
|
+
done: false;
|
67
|
+
created_at: string;
|
68
|
+
} | {
|
69
|
+
model: string;
|
70
|
+
done: true;
|
71
|
+
created_at: string;
|
72
|
+
total_duration: number;
|
73
|
+
prompt_eval_count: number;
|
74
|
+
eval_count: number;
|
75
|
+
eval_duration: number;
|
76
|
+
load_duration?: number | undefined;
|
77
|
+
prompt_eval_duration?: number | undefined;
|
78
|
+
}>>>;
|
79
|
+
extractTextDelta(delta: unknown): string | undefined;
|
61
80
|
asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, OllamaChatPrompt>): TextGenerationToolCallModel<INPUT_PROMPT, OllamaChatPrompt, this>;
|
62
81
|
asToolCallsOrTextGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallsOrGenerateTextPromptTemplate<INPUT_PROMPT, OllamaChatPrompt>): TextGenerationToolCallsOrGenerateTextModel<INPUT_PROMPT, OllamaChatPrompt, this>;
|
63
82
|
/**
|
@@ -67,11 +86,11 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
|
|
67
86
|
/**
|
68
87
|
* Returns this model with an instruction prompt template.
|
69
88
|
*/
|
70
|
-
withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").
|
89
|
+
withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").InstructionPrompt, OllamaChatPrompt, OllamaChatModelSettings, this>;
|
71
90
|
/**
|
72
91
|
* Returns this model with a chat prompt template.
|
73
92
|
*/
|
74
|
-
withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").
|
93
|
+
withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").ChatPrompt, OllamaChatPrompt, OllamaChatModelSettings, this>;
|
75
94
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OllamaChatPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, OllamaChatPrompt, OllamaChatModelSettings, this>;
|
76
95
|
withSettings(additionalSettings: Partial<OllamaChatModelSettings>): this;
|
77
96
|
}
|
@@ -125,11 +144,26 @@ declare const ollamaChatResponseSchema: z.ZodObject<{
|
|
125
144
|
prompt_eval_duration?: number | undefined;
|
126
145
|
}>;
|
127
146
|
export type OllamaChatResponse = z.infer<typeof ollamaChatResponseSchema>;
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
147
|
+
declare const ollamaChatStreamChunkSchema: ZodSchema<{
|
148
|
+
model: string;
|
149
|
+
message: {
|
150
|
+
role: string;
|
151
|
+
content: string;
|
152
|
+
};
|
153
|
+
done: false;
|
154
|
+
created_at: string;
|
155
|
+
} | {
|
156
|
+
model: string;
|
157
|
+
done: true;
|
158
|
+
created_at: string;
|
159
|
+
total_duration: number;
|
160
|
+
prompt_eval_count: number;
|
161
|
+
eval_count: number;
|
162
|
+
eval_duration: number;
|
163
|
+
load_duration?: number | undefined;
|
164
|
+
prompt_eval_duration?: number | undefined;
|
165
|
+
}>;
|
166
|
+
export type OllamaChatStreamChunk = (typeof ollamaChatStreamChunkSchema)["_type"];
|
133
167
|
export type OllamaChatResponseFormatType<T> = {
|
134
168
|
stream: boolean;
|
135
169
|
handler: ResponseHandler<T>;
|
@@ -165,10 +199,28 @@ export declare const OllamaChatResponseFormat: {
|
|
165
199
|
* of the response stream.
|
166
200
|
*/
|
167
201
|
deltaIterable: {
|
168
|
-
stream:
|
202
|
+
stream: boolean;
|
169
203
|
handler: ({ response }: {
|
170
204
|
response: Response;
|
171
|
-
}) => Promise<AsyncIterable<Delta<
|
205
|
+
}) => Promise<AsyncIterable<import("../../index.js").Delta<{
|
206
|
+
model: string;
|
207
|
+
message: {
|
208
|
+
role: string;
|
209
|
+
content: string;
|
210
|
+
};
|
211
|
+
done: false;
|
212
|
+
created_at: string;
|
213
|
+
} | {
|
214
|
+
model: string;
|
215
|
+
done: true;
|
216
|
+
created_at: string;
|
217
|
+
total_duration: number;
|
218
|
+
prompt_eval_count: number;
|
219
|
+
eval_count: number;
|
220
|
+
eval_duration: number;
|
221
|
+
load_duration?: number | undefined;
|
222
|
+
prompt_eval_duration?: number | undefined;
|
223
|
+
}>>>;
|
172
224
|
};
|
173
225
|
};
|
174
226
|
export {};
|
@@ -9,8 +9,7 @@ import { PromptTemplateTextStreamingModel } from "../../model-function/generate-
|
|
9
9
|
import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
|
10
10
|
import { TextGenerationToolCallModel, } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
|
11
11
|
import { TextGenerationToolCallsOrGenerateTextModel, } from "../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js";
|
12
|
-
import {
|
13
|
-
import { parseJsonStream } from "../../util/streaming/parseJsonStream.js";
|
12
|
+
import { createJsonStreamResponseHandler } from "../../util/streaming/createJsonStreamResponseHandler.js";
|
14
13
|
import { OllamaApiConfiguration } from "./OllamaApiConfiguration.js";
|
15
14
|
import { chat, instruction, text } from "./OllamaChatPromptTemplate.js";
|
16
15
|
import { failedOllamaCallResponseHandler } from "./OllamaError.js";
|
@@ -130,6 +129,10 @@ export class OllamaChatModel extends AbstractModel {
|
|
130
129
|
responseFormat: OllamaChatResponseFormat.deltaIterable,
|
131
130
|
});
|
132
131
|
}
|
132
|
+
extractTextDelta(delta) {
|
133
|
+
const chunk = delta;
|
134
|
+
return chunk.done === true ? undefined : chunk.message.content;
|
135
|
+
}
|
133
136
|
asToolCallGenerationModel(promptTemplate) {
|
134
137
|
return new TextGenerationToolCallModel({
|
135
138
|
model: this,
|
@@ -190,7 +193,7 @@ const ollamaChatResponseSchema = z.object({
|
|
190
193
|
eval_count: z.number(),
|
191
194
|
eval_duration: z.number(),
|
192
195
|
});
|
193
|
-
const
|
196
|
+
const ollamaChatStreamChunkSchema = new ZodSchema(z.discriminatedUnion("done", [
|
194
197
|
z.object({
|
195
198
|
done: z.literal(false),
|
196
199
|
model: z.string(),
|
@@ -212,45 +215,6 @@ const ollamaChatStreamSchema = new ZodSchema(z.discriminatedUnion("done", [
|
|
212
215
|
eval_duration: z.number(),
|
213
216
|
}),
|
214
217
|
]));
|
215
|
-
async function createOllamaFullDeltaIterableQueue(stream) {
|
216
|
-
const queue = new AsyncQueue();
|
217
|
-
let accumulatedText = "";
|
218
|
-
// process the stream asynchonously (no 'await' on purpose):
|
219
|
-
parseJsonStream({
|
220
|
-
stream,
|
221
|
-
schema: ollamaChatStreamSchema,
|
222
|
-
process(event) {
|
223
|
-
if (event.done === true) {
|
224
|
-
queue.push({
|
225
|
-
type: "delta",
|
226
|
-
fullDelta: {
|
227
|
-
content: accumulatedText,
|
228
|
-
isComplete: true,
|
229
|
-
delta: "",
|
230
|
-
},
|
231
|
-
valueDelta: "",
|
232
|
-
});
|
233
|
-
}
|
234
|
-
else {
|
235
|
-
const deltaText = event.message.content;
|
236
|
-
accumulatedText += deltaText;
|
237
|
-
queue.push({
|
238
|
-
type: "delta",
|
239
|
-
fullDelta: {
|
240
|
-
content: accumulatedText,
|
241
|
-
isComplete: false,
|
242
|
-
delta: deltaText,
|
243
|
-
},
|
244
|
-
valueDelta: deltaText,
|
245
|
-
});
|
246
|
-
}
|
247
|
-
},
|
248
|
-
onDone() {
|
249
|
-
queue.close();
|
250
|
-
},
|
251
|
-
});
|
252
|
-
return queue;
|
253
|
-
}
|
254
218
|
export const OllamaChatResponseFormat = {
|
255
219
|
/**
|
256
220
|
* Returns the response as a JSON object.
|
@@ -299,6 +263,6 @@ export const OllamaChatResponseFormat = {
|
|
299
263
|
*/
|
300
264
|
deltaIterable: {
|
301
265
|
stream: true,
|
302
|
-
handler:
|
266
|
+
handler: createJsonStreamResponseHandler(ollamaChatStreamChunkSchema),
|
303
267
|
},
|
304
268
|
};
|
@@ -0,0 +1,27 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
const streamText_js_1 = require("../../model-function/generate-text/streamText.cjs");
|
4
|
+
const StreamingTestServer_js_1 = require("../../test/StreamingTestServer.cjs");
|
5
|
+
const arrayFromAsync_js_1 = require("../../test/arrayFromAsync.cjs");
|
6
|
+
const OllamaChatModel_js_1 = require("./OllamaChatModel.cjs");
|
7
|
+
describe("streamText", () => {
|
8
|
+
const server = new StreamingTestServer_js_1.StreamingTestServer("http://127.0.0.1:11434/api/chat");
|
9
|
+
server.setupTestEnvironment();
|
10
|
+
it("should return a text stream", async () => {
|
11
|
+
server.responseChunks = [
|
12
|
+
`{"model":"mistral:text","created_at":"2023-12-24T16:49:17.948267Z","message":{"role":"assistant","content":"Hello"},"done":false}\n`,
|
13
|
+
`{"model":"mistral:text","created_at":"2023-12-24T16:49:17.948267Z","message":{"role":"assistant","content":", "},"done":false}\n`,
|
14
|
+
`{"model":"mistral:text","created_at":"2023-12-24T16:49:17.948267Z","message":{"role":"assistant","content":"world!"},"done":false}\n`,
|
15
|
+
`{"model":"mistral:text","created_at":"2023-12-24T16:49:19.927399Z","message":{"role":"assistant","content":""},` +
|
16
|
+
`"done":true,"total_duration":4843619375,"load_duration":1101458,"prompt_eval_count":5,"prompt_eval_duration":199339000,` +
|
17
|
+
`"eval_count":317,"eval_duration":4639772000}\n`,
|
18
|
+
];
|
19
|
+
const stream = await (0, streamText_js_1.streamText)(new OllamaChatModel_js_1.OllamaChatModel({ model: "mistral:text" }).withTextPrompt(), "hello");
|
20
|
+
// note: space moved to last chunk bc of trimming
|
21
|
+
expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
|
22
|
+
"Hello",
|
23
|
+
",",
|
24
|
+
" world!",
|
25
|
+
]);
|
26
|
+
});
|
27
|
+
});
|
@@ -0,0 +1 @@
|
|
1
|
+
export {};
|
@@ -0,0 +1,25 @@
|
|
1
|
+
import { streamText } from "../../model-function/generate-text/streamText.js";
|
2
|
+
import { StreamingTestServer } from "../../test/StreamingTestServer.js";
|
3
|
+
import { arrayFromAsync } from "../../test/arrayFromAsync.js";
|
4
|
+
import { OllamaChatModel } from "./OllamaChatModel.js";
|
5
|
+
describe("streamText", () => {
|
6
|
+
const server = new StreamingTestServer("http://127.0.0.1:11434/api/chat");
|
7
|
+
server.setupTestEnvironment();
|
8
|
+
it("should return a text stream", async () => {
|
9
|
+
server.responseChunks = [
|
10
|
+
`{"model":"mistral:text","created_at":"2023-12-24T16:49:17.948267Z","message":{"role":"assistant","content":"Hello"},"done":false}\n`,
|
11
|
+
`{"model":"mistral:text","created_at":"2023-12-24T16:49:17.948267Z","message":{"role":"assistant","content":", "},"done":false}\n`,
|
12
|
+
`{"model":"mistral:text","created_at":"2023-12-24T16:49:17.948267Z","message":{"role":"assistant","content":"world!"},"done":false}\n`,
|
13
|
+
`{"model":"mistral:text","created_at":"2023-12-24T16:49:19.927399Z","message":{"role":"assistant","content":""},` +
|
14
|
+
`"done":true,"total_duration":4843619375,"load_duration":1101458,"prompt_eval_count":5,"prompt_eval_duration":199339000,` +
|
15
|
+
`"eval_count":317,"eval_duration":4639772000}\n`,
|
16
|
+
];
|
17
|
+
const stream = await streamText(new OllamaChatModel({ model: "mistral:text" }).withTextPrompt(), "hello");
|
18
|
+
// note: space moved to last chunk bc of trimming
|
19
|
+
expect(await arrayFromAsync(stream)).toStrictEqual([
|
20
|
+
"Hello",
|
21
|
+
",",
|
22
|
+
" world!",
|
23
|
+
]);
|
24
|
+
});
|
25
|
+
});
|
@@ -1,7 +1,8 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.chat = exports.instruction = exports.text = exports.identity = void 0;
|
4
|
-
const
|
4
|
+
const ContentPart_js_1 = require("../../model-function/generate-text/prompt-template/ContentPart.cjs");
|
5
|
+
const InvalidPromptError_js_1 = require("../../model-function/generate-text/prompt-template/InvalidPromptError.cjs");
|
5
6
|
/**
|
6
7
|
* OllamaChatPrompt identity chat format.
|
7
8
|
*/
|
@@ -27,9 +28,15 @@ function instruction() {
|
|
27
28
|
format(prompt) {
|
28
29
|
const messages = [];
|
29
30
|
if (prompt.system != null) {
|
30
|
-
messages.push({
|
31
|
+
messages.push({
|
32
|
+
role: "system",
|
33
|
+
content: prompt.system,
|
34
|
+
});
|
31
35
|
}
|
32
|
-
messages.push({
|
36
|
+
messages.push({
|
37
|
+
role: "user",
|
38
|
+
...extractUserContent(prompt.instruction),
|
39
|
+
});
|
33
40
|
return messages;
|
34
41
|
},
|
35
42
|
stopSequences: [],
|
@@ -42,13 +49,34 @@ exports.instruction = instruction;
|
|
42
49
|
function chat() {
|
43
50
|
return {
|
44
51
|
format(prompt) {
|
45
|
-
(0, ChatPrompt_js_1.validateChatPrompt)(prompt);
|
46
52
|
const messages = [];
|
47
53
|
if (prompt.system != null) {
|
48
54
|
messages.push({ role: "system", content: prompt.system });
|
49
55
|
}
|
50
56
|
for (const { role, content } of prompt.messages) {
|
51
|
-
|
57
|
+
switch (role) {
|
58
|
+
case "user": {
|
59
|
+
messages.push({
|
60
|
+
role: "user",
|
61
|
+
...extractUserContent(content),
|
62
|
+
});
|
63
|
+
break;
|
64
|
+
}
|
65
|
+
case "assistant": {
|
66
|
+
messages.push({
|
67
|
+
role: "assistant",
|
68
|
+
content: (0, ContentPart_js_1.validateContentIsString)(content, prompt),
|
69
|
+
});
|
70
|
+
break;
|
71
|
+
}
|
72
|
+
case "tool": {
|
73
|
+
throw new InvalidPromptError_js_1.InvalidPromptError("Tool messages are not supported.", prompt);
|
74
|
+
}
|
75
|
+
default: {
|
76
|
+
const _exhaustiveCheck = role;
|
77
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
78
|
+
}
|
79
|
+
}
|
52
80
|
}
|
53
81
|
return messages;
|
54
82
|
},
|
@@ -56,21 +84,19 @@ function chat() {
|
|
56
84
|
};
|
57
85
|
}
|
58
86
|
exports.chat = chat;
|
59
|
-
function
|
87
|
+
function extractUserContent(input) {
|
60
88
|
if (typeof input === "string") {
|
61
89
|
return { content: input, images: undefined };
|
62
90
|
}
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
images.push(part.base64Image);
|
72
|
-
}
|
91
|
+
const images = [];
|
92
|
+
let content = "";
|
93
|
+
for (const part of input) {
|
94
|
+
if (part.type === "text") {
|
95
|
+
content += part.text;
|
96
|
+
}
|
97
|
+
else {
|
98
|
+
images.push(part.base64Image);
|
73
99
|
}
|
74
|
-
return { content, images };
|
75
100
|
}
|
101
|
+
return { content, images };
|
76
102
|
}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
2
|
-
import {
|
3
|
-
import {
|
2
|
+
import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
3
|
+
import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
4
4
|
import { OllamaChatPrompt } from "./OllamaChatModel.js";
|
5
5
|
/**
|
6
6
|
* OllamaChatPrompt identity chat format.
|
@@ -13,8 +13,8 @@ export declare function text(): TextGenerationPromptTemplate<string, OllamaChatP
|
|
13
13
|
/**
|
14
14
|
* Formats an instruction prompt as an Ollama chat prompt.
|
15
15
|
*/
|
16
|
-
export declare function instruction(): TextGenerationPromptTemplate<
|
16
|
+
export declare function instruction(): TextGenerationPromptTemplate<InstructionPrompt, OllamaChatPrompt>;
|
17
17
|
/**
|
18
18
|
* Formats a chat prompt as an Ollama chat prompt.
|
19
19
|
*/
|
20
|
-
export declare function chat(): TextGenerationPromptTemplate<
|
20
|
+
export declare function chat(): TextGenerationPromptTemplate<ChatPrompt, OllamaChatPrompt>;
|