modelfusion 0.103.0 → 0.105.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +56 -0
- package/model-function/Delta.d.ts +1 -2
- package/model-function/executeStreamCall.cjs +6 -4
- package/model-function/executeStreamCall.d.ts +2 -2
- package/model-function/executeStreamCall.js +6 -4
- package/model-function/generate-speech/streamSpeech.cjs +1 -2
- package/model-function/generate-speech/streamSpeech.js +1 -2
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +25 -29
- package/model-function/generate-structure/StructureFromTextStreamingModel.d.ts +3 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +25 -29
- package/model-function/generate-structure/StructureGenerationModel.d.ts +2 -0
- package/model-function/generate-structure/streamStructure.cjs +7 -8
- package/model-function/generate-structure/streamStructure.d.ts +1 -1
- package/model-function/generate-structure/streamStructure.js +7 -8
- package/model-function/generate-text/PromptTemplateFullTextModel.cjs +35 -0
- package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +41 -0
- package/model-function/generate-text/PromptTemplateFullTextModel.js +31 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +3 -0
- package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +2 -1
- package/model-function/generate-text/PromptTemplateTextStreamingModel.js +3 -0
- package/model-function/generate-text/TextGenerationModel.d.ts +2 -1
- package/model-function/generate-text/index.cjs +1 -0
- package/model-function/generate-text/index.d.ts +1 -0
- package/model-function/generate-text/index.js +1 -0
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs +2 -1
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.d.ts +2 -2
- package/model-function/generate-text/prompt-template/AlpacaPromptTemplate.js +2 -1
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs +9 -5
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/ChatMLPromptTemplate.js +9 -5
- package/model-function/generate-text/prompt-template/ChatPrompt.cjs +38 -20
- package/model-function/generate-text/prompt-template/ChatPrompt.d.ts +33 -34
- package/model-function/generate-text/prompt-template/ChatPrompt.js +37 -18
- package/model-function/generate-text/prompt-template/ContentPart.cjs +11 -0
- package/model-function/generate-text/prompt-template/ContentPart.d.ts +30 -0
- package/model-function/generate-text/prompt-template/ContentPart.js +7 -0
- package/model-function/generate-text/prompt-template/InstructionPrompt.d.ts +7 -22
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs +40 -6
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.d.ts +16 -4
- package/model-function/generate-text/prompt-template/Llama2PromptTemplate.js +38 -5
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs +10 -5
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js +10 -5
- package/model-function/generate-text/prompt-template/TextPromptTemplate.cjs +8 -5
- package/model-function/generate-text/prompt-template/TextPromptTemplate.d.ts +4 -4
- package/model-function/generate-text/prompt-template/TextPromptTemplate.js +8 -5
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs +8 -4
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.d.ts +2 -2
- package/model-function/generate-text/prompt-template/VicunaPromptTemplate.js +8 -4
- package/model-function/generate-text/prompt-template/index.cjs +1 -1
- package/model-function/generate-text/prompt-template/index.d.ts +1 -1
- package/model-function/generate-text/prompt-template/index.js +1 -1
- package/model-function/generate-text/prompt-template/trimChatPrompt.cjs +0 -2
- package/model-function/generate-text/prompt-template/trimChatPrompt.d.ts +4 -4
- package/model-function/generate-text/prompt-template/trimChatPrompt.js +0 -2
- package/model-function/generate-text/streamText.cjs +27 -28
- package/model-function/generate-text/streamText.d.ts +1 -0
- package/model-function/generate-text/streamText.js +27 -28
- package/model-provider/anthropic/AnthropicPromptTemplate.cjs +9 -4
- package/model-provider/anthropic/AnthropicPromptTemplate.d.ts +4 -4
- package/model-provider/anthropic/AnthropicPromptTemplate.js +9 -4
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +8 -14
- package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +13 -4
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +8 -14
- package/model-provider/anthropic/AnthropicTextGenerationModel.test.cjs +44 -0
- package/model-provider/anthropic/AnthropicTextGenerationModel.test.js +42 -0
- package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +6 -44
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +47 -13
- package/model-provider/cohere/CohereTextGenerationModel.js +7 -45
- package/model-provider/cohere/CohereTextGenerationModel.test.cjs +33 -0
- package/model-provider/cohere/CohereTextGenerationModel.test.js +31 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +1 -2
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +1 -2
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.cjs +29 -17
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.d.ts +4 -4
- package/model-provider/llamacpp/LlamaCppBakLLaVA1PromptTemplate.js +29 -17
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +7 -14
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +157 -6
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +8 -15
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.cjs +37 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.d.ts +1 -0
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.test.js +35 -0
- package/model-provider/mistral/MistralChatModel.cjs +30 -104
- package/model-provider/mistral/MistralChatModel.d.ts +49 -16
- package/model-provider/mistral/MistralChatModel.js +30 -104
- package/model-provider/mistral/MistralChatModel.test.cjs +51 -0
- package/model-provider/mistral/MistralChatModel.test.d.ts +1 -0
- package/model-provider/mistral/MistralChatModel.test.js +49 -0
- package/model-provider/mistral/MistralPromptTemplate.cjs +13 -5
- package/model-provider/mistral/MistralPromptTemplate.d.ts +4 -4
- package/model-provider/mistral/MistralPromptTemplate.js +13 -5
- package/model-provider/ollama/OllamaChatModel.cjs +7 -43
- package/model-provider/ollama/OllamaChatModel.d.ts +63 -11
- package/model-provider/ollama/OllamaChatModel.js +7 -43
- package/model-provider/ollama/OllamaChatModel.test.cjs +27 -0
- package/model-provider/ollama/OllamaChatModel.test.d.ts +1 -0
- package/model-provider/ollama/OllamaChatModel.test.js +25 -0
- package/model-provider/ollama/OllamaChatPromptTemplate.cjs +43 -17
- package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +4 -4
- package/model-provider/ollama/OllamaChatPromptTemplate.js +43 -17
- package/model-provider/ollama/OllamaCompletionModel.cjs +22 -43
- package/model-provider/ollama/OllamaCompletionModel.d.ts +65 -9
- package/model-provider/ollama/OllamaCompletionModel.js +23 -44
- package/model-provider/ollama/OllamaCompletionModel.test.cjs +101 -13
- package/model-provider/ollama/OllamaCompletionModel.test.js +78 -13
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.cjs → AbstractOpenAIChatModel.cjs} +71 -15
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.d.ts → AbstractOpenAIChatModel.d.ts} +273 -19
- package/model-provider/openai/{chat/AbstractOpenAIChatModel.js → AbstractOpenAIChatModel.js} +71 -15
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.cjs → OpenAIChatFunctionCallStructureGenerationModel.cjs} +18 -2
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts → OpenAIChatFunctionCallStructureGenerationModel.d.ts} +41 -11
- package/model-provider/openai/{chat/OpenAIChatFunctionCallStructureGenerationModel.js → OpenAIChatFunctionCallStructureGenerationModel.js} +18 -2
- package/model-provider/openai/{chat/OpenAIChatMessage.d.ts → OpenAIChatMessage.d.ts} +3 -3
- package/model-provider/openai/{chat/OpenAIChatModel.cjs → OpenAIChatModel.cjs} +5 -5
- package/model-provider/openai/{chat/OpenAIChatModel.d.ts → OpenAIChatModel.d.ts} +12 -12
- package/model-provider/openai/{chat/OpenAIChatModel.js → OpenAIChatModel.js} +5 -5
- package/model-provider/openai/OpenAIChatModel.test.cjs +94 -0
- package/model-provider/openai/OpenAIChatModel.test.d.ts +1 -0
- package/model-provider/openai/OpenAIChatModel.test.js +92 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.cjs +114 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.d.ts +20 -0
- package/model-provider/openai/OpenAIChatPromptTemplate.js +107 -0
- package/model-provider/openai/OpenAICompletionModel.cjs +32 -84
- package/model-provider/openai/OpenAICompletionModel.d.ts +29 -12
- package/model-provider/openai/OpenAICompletionModel.js +33 -85
- package/model-provider/openai/OpenAICompletionModel.test.cjs +53 -0
- package/model-provider/openai/OpenAICompletionModel.test.d.ts +1 -0
- package/model-provider/openai/OpenAICompletionModel.test.js +51 -0
- package/model-provider/openai/OpenAICostCalculator.cjs +1 -1
- package/model-provider/openai/OpenAICostCalculator.js +1 -1
- package/model-provider/openai/OpenAIFacade.cjs +2 -2
- package/model-provider/openai/OpenAIFacade.d.ts +3 -3
- package/model-provider/openai/OpenAIFacade.js +2 -2
- package/model-provider/openai/OpenAITranscriptionModel.d.ts +6 -6
- package/model-provider/openai/TikTokenTokenizer.d.ts +1 -1
- package/model-provider/openai/{chat/countOpenAIChatMessageTokens.cjs → countOpenAIChatMessageTokens.cjs} +2 -2
- package/model-provider/openai/{chat/countOpenAIChatMessageTokens.js → countOpenAIChatMessageTokens.js} +2 -2
- package/model-provider/openai/index.cjs +6 -6
- package/model-provider/openai/index.d.ts +5 -6
- package/model-provider/openai/index.js +5 -5
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +4 -4
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +6 -6
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +4 -4
- package/model-provider/stability/StabilityImageGenerationModel.d.ts +1 -1
- package/package.json +5 -5
- package/test/JsonTestServer.cjs +33 -0
- package/test/JsonTestServer.d.ts +7 -0
- package/test/JsonTestServer.js +29 -0
- package/test/StreamingTestServer.cjs +55 -0
- package/test/StreamingTestServer.d.ts +7 -0
- package/test/StreamingTestServer.js +51 -0
- package/test/arrayFromAsync.cjs +13 -0
- package/test/arrayFromAsync.d.ts +1 -0
- package/test/arrayFromAsync.js +9 -0
- package/util/streaming/createEventSourceResponseHandler.cjs +9 -0
- package/util/streaming/createEventSourceResponseHandler.d.ts +4 -0
- package/util/streaming/createEventSourceResponseHandler.js +5 -0
- package/util/streaming/createJsonStreamResponseHandler.cjs +9 -0
- package/util/streaming/createJsonStreamResponseHandler.d.ts +4 -0
- package/util/streaming/createJsonStreamResponseHandler.js +5 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.cjs +52 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.d.ts +6 -0
- package/util/streaming/parseEventSourceStreamAsAsyncIterable.js +48 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.cjs +21 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.d.ts +6 -0
- package/util/streaming/parseJsonStreamAsAsyncIterable.js +17 -0
- package/model-function/generate-text/prompt-template/Content.cjs +0 -2
- package/model-function/generate-text/prompt-template/Content.d.ts +0 -20
- package/model-provider/openai/chat/OpenAIChatModel.test.cjs +0 -61
- package/model-provider/openai/chat/OpenAIChatModel.test.js +0 -59
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.cjs +0 -72
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +0 -20
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +0 -65
- package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +0 -156
- package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +0 -19
- package/model-provider/openai/chat/OpenAIChatStreamIterable.js +0 -152
- /package/{model-function/generate-text/prompt-template/Content.js → model-provider/anthropic/AnthropicTextGenerationModel.test.d.ts} +0 -0
- /package/model-provider/{openai/chat/OpenAIChatModel.test.d.ts → cohere/CohereTextGenerationModel.test.d.ts} +0 -0
- /package/model-provider/openai/{chat/OpenAIChatMessage.cjs → OpenAIChatMessage.cjs} +0 -0
- /package/model-provider/openai/{chat/OpenAIChatMessage.js → OpenAIChatMessage.js} +0 -0
- /package/model-provider/openai/{chat/countOpenAIChatMessageTokens.d.ts → countOpenAIChatMessageTokens.d.ts} +0 -0
package/CHANGELOG.md
CHANGED
@@ -1,5 +1,61 @@
|
|
1
1
|
# Changelog
|
2
2
|
|
3
|
+
## v0.105.0 - 2023-12-26
|
4
|
+
|
5
|
+
### Added
|
6
|
+
|
7
|
+
- Tool call support for chat prompts. Assistant messages can contain tool calls, and tool messages can contain tool call results. Tool calls can be used to implement e.g. agents:
|
8
|
+
|
9
|
+
```ts
|
10
|
+
const chat: ChatPrompt = {
|
11
|
+
system: "You are ...",
|
12
|
+
messages: [ChatMessage.user({ text: instruction })],
|
13
|
+
};
|
14
|
+
|
15
|
+
while (true) {
|
16
|
+
const { text, toolResults } = await useToolsOrGenerateText(
|
17
|
+
openai
|
18
|
+
.ChatTextGenerator({ model: "gpt-4-1106-preview" })
|
19
|
+
.withChatPrompt(),
|
20
|
+
tools, // array of tools
|
21
|
+
chat
|
22
|
+
);
|
23
|
+
|
24
|
+
// add the assistant and tool messages to the chat:
|
25
|
+
chat.messages.push(
|
26
|
+
ChatMessage.assistant({ text, toolResults }),
|
27
|
+
ChatMessage.tool({ toolResults })
|
28
|
+
);
|
29
|
+
|
30
|
+
if (toolResults == null) {
|
31
|
+
return; // no more actions, break loop
|
32
|
+
}
|
33
|
+
|
34
|
+
// ... (handle tool results)
|
35
|
+
}
|
36
|
+
```
|
37
|
+
|
38
|
+
- `streamText` returns a `text` promise when invoked with `fullResponse: true`. After the streaming has finished, the promise resolves with the full text.
|
39
|
+
|
40
|
+
```ts
|
41
|
+
const { text, textStream } = await streamText(
|
42
|
+
openai.ChatTextGenerator({ model: "gpt-3.5-turbo" }).withTextPrompt(),
|
43
|
+
"Write a short story about a robot learning to love:",
|
44
|
+
{ fullResponse: true }
|
45
|
+
);
|
46
|
+
|
47
|
+
// ... (handle streaming)
|
48
|
+
|
49
|
+
console.log(await text); // full text
|
50
|
+
```
|
51
|
+
|
52
|
+
## v0.104.0 - 2023-12-24
|
53
|
+
|
54
|
+
### Changed
|
55
|
+
|
56
|
+
- **breaking change**: Unified text and multimodal prompt templates. `[Text/MultiModal]InstructionPrompt` is now `InstructionPrompt`, and `[Text/MultiModalChatPrompt]` is now `ChatPrompt`.
|
57
|
+
- More flexible chat prompts: The chat prompt validation is now chat template specific and validated at runtime. E.g. the Llama2 prompt template only supports turns of user and assistant messages, whereas other formats are more flexible.
|
58
|
+
|
3
59
|
## v0.103.0 - 2023-12-23
|
4
60
|
|
5
61
|
### Added
|
@@ -11,7 +11,7 @@ const getRun_js_1 = require("../core/getRun.cjs");
|
|
11
11
|
const AsyncQueue_js_1 = require("../util/AsyncQueue.cjs");
|
12
12
|
const DurationMeasurement_js_1 = require("../util/DurationMeasurement.cjs");
|
13
13
|
const runSafe_js_1 = require("../util/runSafe.cjs");
|
14
|
-
async function executeStreamCall({ model, options, input, functionType, startStream, processDelta, processFinished,
|
14
|
+
async function executeStreamCall({ model, options, input, functionType, startStream, processDelta, processFinished, onDone, }) {
|
15
15
|
const run = await (0, getRun_js_1.getRun)(options?.run);
|
16
16
|
const settings = model.settings;
|
17
17
|
const eventSource = new FunctionEventSource_js_1.FunctionEventSource({
|
@@ -121,6 +121,7 @@ async function executeStreamCall({ model, options, input, functionType, startStr
|
|
121
121
|
responseQueue.error(loopResult.error);
|
122
122
|
return; // error is handled through queue
|
123
123
|
}
|
124
|
+
onDone?.();
|
124
125
|
const finishMetadata = {
|
125
126
|
eventType: "finished",
|
126
127
|
...startMetadata,
|
@@ -131,7 +132,6 @@ async function executeStreamCall({ model, options, input, functionType, startStr
|
|
131
132
|
...finishMetadata,
|
132
133
|
result: {
|
133
134
|
status: "success",
|
134
|
-
...getResult(),
|
135
135
|
},
|
136
136
|
});
|
137
137
|
}
|
@@ -140,7 +140,9 @@ async function executeStreamCall({ model, options, input, functionType, startStr
|
|
140
140
|
responseQueue.close();
|
141
141
|
}
|
142
142
|
})();
|
143
|
-
return
|
143
|
+
return {
|
144
|
+
stream: responseQueue,
|
145
|
+
};
|
144
146
|
});
|
145
147
|
if (!result.ok) {
|
146
148
|
const finishMetadata = {
|
@@ -170,7 +172,7 @@ async function executeStreamCall({ model, options, input, functionType, startStr
|
|
170
172
|
throw result.error;
|
171
173
|
}
|
172
174
|
return {
|
173
|
-
value: result.value,
|
175
|
+
value: result.value.stream,
|
174
176
|
metadata: startMetadata,
|
175
177
|
};
|
176
178
|
}
|
@@ -3,7 +3,7 @@ import { Delta } from "./Delta.js";
|
|
3
3
|
import { Model, ModelSettings } from "./Model.js";
|
4
4
|
import { ModelCallStartedEvent } from "./ModelCallEvent.js";
|
5
5
|
import { ModelCallMetadata } from "./ModelCallMetadata.js";
|
6
|
-
export declare function executeStreamCall<DELTA_VALUE, VALUE, MODEL extends Model<ModelSettings>>({ model, options, input, functionType, startStream, processDelta, processFinished,
|
6
|
+
export declare function executeStreamCall<DELTA_VALUE, VALUE, MODEL extends Model<ModelSettings>>({ model, options, input, functionType, startStream, processDelta, processFinished, onDone, }: {
|
7
7
|
model: MODEL;
|
8
8
|
options?: FunctionOptions;
|
9
9
|
input: unknown;
|
@@ -13,7 +13,7 @@ export declare function executeStreamCall<DELTA_VALUE, VALUE, MODEL extends Mode
|
|
13
13
|
type: "delta";
|
14
14
|
}) => VALUE | undefined;
|
15
15
|
processFinished?: () => VALUE | undefined;
|
16
|
-
|
16
|
+
onDone?: () => void;
|
17
17
|
}): Promise<{
|
18
18
|
value: AsyncIterable<VALUE>;
|
19
19
|
metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
|
@@ -8,7 +8,7 @@ import { getRun } from "../core/getRun.js";
|
|
8
8
|
import { AsyncQueue } from "../util/AsyncQueue.js";
|
9
9
|
import { startDurationMeasurement } from "../util/DurationMeasurement.js";
|
10
10
|
import { runSafe } from "../util/runSafe.js";
|
11
|
-
export async function executeStreamCall({ model, options, input, functionType, startStream, processDelta, processFinished,
|
11
|
+
export async function executeStreamCall({ model, options, input, functionType, startStream, processDelta, processFinished, onDone, }) {
|
12
12
|
const run = await getRun(options?.run);
|
13
13
|
const settings = model.settings;
|
14
14
|
const eventSource = new FunctionEventSource({
|
@@ -118,6 +118,7 @@ export async function executeStreamCall({ model, options, input, functionType, s
|
|
118
118
|
responseQueue.error(loopResult.error);
|
119
119
|
return; // error is handled through queue
|
120
120
|
}
|
121
|
+
onDone?.();
|
121
122
|
const finishMetadata = {
|
122
123
|
eventType: "finished",
|
123
124
|
...startMetadata,
|
@@ -128,7 +129,6 @@ export async function executeStreamCall({ model, options, input, functionType, s
|
|
128
129
|
...finishMetadata,
|
129
130
|
result: {
|
130
131
|
status: "success",
|
131
|
-
...getResult(),
|
132
132
|
},
|
133
133
|
});
|
134
134
|
}
|
@@ -137,7 +137,9 @@ export async function executeStreamCall({ model, options, input, functionType, s
|
|
137
137
|
responseQueue.close();
|
138
138
|
}
|
139
139
|
})();
|
140
|
-
return
|
140
|
+
return {
|
141
|
+
stream: responseQueue,
|
142
|
+
};
|
141
143
|
});
|
142
144
|
if (!result.ok) {
|
143
145
|
const finishMetadata = {
|
@@ -167,7 +169,7 @@ export async function executeStreamCall({ model, options, input, functionType, s
|
|
167
169
|
throw result.error;
|
168
170
|
}
|
169
171
|
return {
|
170
|
-
value: result.value,
|
172
|
+
value: result.value.stream,
|
171
173
|
metadata: startMetadata,
|
172
174
|
};
|
173
175
|
}
|
@@ -21,8 +21,7 @@ async function streamSpeech(model, text, options) {
|
|
21
21
|
model,
|
22
22
|
options,
|
23
23
|
startStream: async (options) => model.doGenerateSpeechStreamDuplex(textStream, options),
|
24
|
-
processDelta: (delta) => delta.
|
25
|
-
getResult: () => ({}),
|
24
|
+
processDelta: (delta) => delta.deltaValue,
|
26
25
|
});
|
27
26
|
return options?.fullResponse
|
28
27
|
? {
|
@@ -18,8 +18,7 @@ export async function streamSpeech(model, text, options) {
|
|
18
18
|
model,
|
19
19
|
options,
|
20
20
|
startStream: async (options) => model.doGenerateSpeechStreamDuplex(textStream, options),
|
21
|
-
processDelta: (delta) => delta.
|
22
|
-
getResult: () => ({}),
|
21
|
+
processDelta: (delta) => delta.deltaValue,
|
23
22
|
});
|
24
23
|
return options?.fullResponse
|
25
24
|
? {
|
@@ -11,35 +11,6 @@ class StructureFromTextStreamingModel extends StructureFromTextGenerationModel_j
|
|
11
11
|
constructor(options) {
|
12
12
|
super(options);
|
13
13
|
}
|
14
|
-
async doStreamStructure(schema, prompt, options) {
|
15
|
-
const textStream = await (0, streamText_js_1.streamText)(this.model, this.template.createPrompt(prompt, schema), options);
|
16
|
-
const queue = new AsyncQueue_js_1.AsyncQueue();
|
17
|
-
// run async on purpose:
|
18
|
-
(async () => {
|
19
|
-
try {
|
20
|
-
let fullText = "";
|
21
|
-
for await (const deltaText of textStream) {
|
22
|
-
fullText += deltaText;
|
23
|
-
const deltaStructure = (0, parsePartialJson_js_1.parsePartialJson)(fullText);
|
24
|
-
// only publish parsable structures
|
25
|
-
if (deltaStructure != null) {
|
26
|
-
queue.push({
|
27
|
-
type: "delta",
|
28
|
-
fullDelta: fullText,
|
29
|
-
valueDelta: deltaStructure,
|
30
|
-
});
|
31
|
-
}
|
32
|
-
}
|
33
|
-
}
|
34
|
-
catch (error) {
|
35
|
-
queue.push({ type: "error", error });
|
36
|
-
}
|
37
|
-
finally {
|
38
|
-
queue.close();
|
39
|
-
}
|
40
|
-
})();
|
41
|
-
return queue;
|
42
|
-
}
|
43
14
|
async doGenerateStructure(schema, prompt, options) {
|
44
15
|
const { response, text } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
|
45
16
|
...options,
|
@@ -59,6 +30,31 @@ class StructureFromTextStreamingModel extends StructureFromTextGenerationModel_j
|
|
59
30
|
});
|
60
31
|
}
|
61
32
|
}
|
33
|
+
async doStreamStructure(schema, prompt, options) {
|
34
|
+
const textStream = await (0, streamText_js_1.streamText)(this.model, this.template.createPrompt(prompt, schema), options);
|
35
|
+
const queue = new AsyncQueue_js_1.AsyncQueue();
|
36
|
+
// run async on purpose:
|
37
|
+
(async () => {
|
38
|
+
try {
|
39
|
+
for await (const deltaText of textStream) {
|
40
|
+
queue.push({ type: "delta", deltaValue: deltaText });
|
41
|
+
}
|
42
|
+
}
|
43
|
+
catch (error) {
|
44
|
+
queue.push({ type: "error", error });
|
45
|
+
}
|
46
|
+
finally {
|
47
|
+
queue.close();
|
48
|
+
}
|
49
|
+
})();
|
50
|
+
return queue;
|
51
|
+
}
|
52
|
+
extractStructureTextDelta(delta) {
|
53
|
+
return delta;
|
54
|
+
}
|
55
|
+
parseAccumulatedStructureText(accumulatedText) {
|
56
|
+
return (0, parsePartialJson_js_1.parsePartialJson)(accumulatedText);
|
57
|
+
}
|
62
58
|
withSettings(additionalSettings) {
|
63
59
|
return new StructureFromTextStreamingModel({
|
64
60
|
model: this.model.withSettings(additionalSettings),
|
@@ -12,11 +12,13 @@ export declare class StructureFromTextStreamingModel<SOURCE_PROMPT, TARGET_PROMP
|
|
12
12
|
model: MODEL;
|
13
13
|
template: StructureFromTextPromptTemplate<SOURCE_PROMPT, TARGET_PROMPT>;
|
14
14
|
});
|
15
|
-
doStreamStructure(schema: Schema<unknown> & JsonSchemaProducer, prompt: SOURCE_PROMPT, options?: FunctionOptions): Promise<AsyncQueue<Delta<unknown>>>;
|
16
15
|
doGenerateStructure(schema: Schema<unknown> & JsonSchemaProducer, prompt: SOURCE_PROMPT, options?: FunctionOptions): Promise<{
|
17
16
|
response: unknown;
|
18
17
|
value: unknown;
|
19
18
|
valueText: string;
|
20
19
|
}>;
|
20
|
+
doStreamStructure(schema: Schema<unknown> & JsonSchemaProducer, prompt: SOURCE_PROMPT, options?: FunctionOptions): Promise<AsyncQueue<Delta<string>>>;
|
21
|
+
extractStructureTextDelta(delta: unknown): string;
|
22
|
+
parseAccumulatedStructureText(accumulatedText: string): unknown;
|
21
23
|
withSettings(additionalSettings: Partial<MODEL["settings"]>): this;
|
22
24
|
}
|
@@ -8,35 +8,6 @@ export class StructureFromTextStreamingModel extends StructureFromTextGeneration
|
|
8
8
|
constructor(options) {
|
9
9
|
super(options);
|
10
10
|
}
|
11
|
-
async doStreamStructure(schema, prompt, options) {
|
12
|
-
const textStream = await streamText(this.model, this.template.createPrompt(prompt, schema), options);
|
13
|
-
const queue = new AsyncQueue();
|
14
|
-
// run async on purpose:
|
15
|
-
(async () => {
|
16
|
-
try {
|
17
|
-
let fullText = "";
|
18
|
-
for await (const deltaText of textStream) {
|
19
|
-
fullText += deltaText;
|
20
|
-
const deltaStructure = parsePartialJson(fullText);
|
21
|
-
// only publish parsable structures
|
22
|
-
if (deltaStructure != null) {
|
23
|
-
queue.push({
|
24
|
-
type: "delta",
|
25
|
-
fullDelta: fullText,
|
26
|
-
valueDelta: deltaStructure,
|
27
|
-
});
|
28
|
-
}
|
29
|
-
}
|
30
|
-
}
|
31
|
-
catch (error) {
|
32
|
-
queue.push({ type: "error", error });
|
33
|
-
}
|
34
|
-
finally {
|
35
|
-
queue.close();
|
36
|
-
}
|
37
|
-
})();
|
38
|
-
return queue;
|
39
|
-
}
|
40
11
|
async doGenerateStructure(schema, prompt, options) {
|
41
12
|
const { response, text } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
|
42
13
|
...options,
|
@@ -56,6 +27,31 @@ export class StructureFromTextStreamingModel extends StructureFromTextGeneration
|
|
56
27
|
});
|
57
28
|
}
|
58
29
|
}
|
30
|
+
async doStreamStructure(schema, prompt, options) {
|
31
|
+
const textStream = await streamText(this.model, this.template.createPrompt(prompt, schema), options);
|
32
|
+
const queue = new AsyncQueue();
|
33
|
+
// run async on purpose:
|
34
|
+
(async () => {
|
35
|
+
try {
|
36
|
+
for await (const deltaText of textStream) {
|
37
|
+
queue.push({ type: "delta", deltaValue: deltaText });
|
38
|
+
}
|
39
|
+
}
|
40
|
+
catch (error) {
|
41
|
+
queue.push({ type: "error", error });
|
42
|
+
}
|
43
|
+
finally {
|
44
|
+
queue.close();
|
45
|
+
}
|
46
|
+
})();
|
47
|
+
return queue;
|
48
|
+
}
|
49
|
+
extractStructureTextDelta(delta) {
|
50
|
+
return delta;
|
51
|
+
}
|
52
|
+
parseAccumulatedStructureText(accumulatedText) {
|
53
|
+
return parsePartialJson(accumulatedText);
|
54
|
+
}
|
59
55
|
withSettings(additionalSettings) {
|
60
56
|
return new StructureFromTextStreamingModel({
|
61
57
|
model: this.model.withSettings(additionalSettings),
|
@@ -19,4 +19,6 @@ export interface StructureGenerationModel<PROMPT, SETTINGS extends StructureGene
|
|
19
19
|
}
|
20
20
|
export interface StructureStreamingModel<PROMPT, SETTINGS extends StructureGenerationModelSettings = StructureGenerationModelSettings> extends StructureGenerationModel<PROMPT, SETTINGS> {
|
21
21
|
doStreamStructure(schema: Schema<unknown> & JsonSchemaProducer, prompt: PROMPT, options?: FunctionOptions): PromiseLike<AsyncIterable<Delta<unknown>>>;
|
22
|
+
extractStructureTextDelta(delta: unknown): string | undefined;
|
23
|
+
parseAccumulatedStructureText(accumulatedText: string): unknown;
|
22
24
|
}
|
@@ -8,8 +8,8 @@ async function streamStructure(model, schema, prompt, options) {
|
|
8
8
|
const expandedPrompt = typeof prompt === "function"
|
9
9
|
? prompt(schema)
|
10
10
|
: prompt;
|
11
|
+
let accumulatedText = "";
|
11
12
|
let lastStructure;
|
12
|
-
let lastFullDelta;
|
13
13
|
const fullResponse = await (0, executeStreamCall_js_1.executeStreamCall)({
|
14
14
|
functionType: "stream-structure",
|
15
15
|
input: prompt,
|
@@ -17,11 +17,14 @@ async function streamStructure(model, schema, prompt, options) {
|
|
17
17
|
options,
|
18
18
|
startStream: async (options) => model.doStreamStructure(schema, expandedPrompt, options),
|
19
19
|
processDelta: (delta) => {
|
20
|
-
const
|
21
|
-
|
20
|
+
const textDelta = model.extractStructureTextDelta(delta.deltaValue);
|
21
|
+
if (textDelta == null) {
|
22
|
+
return undefined;
|
23
|
+
}
|
24
|
+
accumulatedText += textDelta;
|
25
|
+
const latestStructure = model.parseAccumulatedStructureText(accumulatedText);
|
22
26
|
// only send a new part into the stream when the partial structure has changed:
|
23
27
|
if (!(0, isDeepEqualData_js_1.isDeepEqualData)(lastStructure, latestStructure)) {
|
24
|
-
lastFullDelta = latestFullDelta;
|
25
28
|
lastStructure = latestStructure;
|
26
29
|
return {
|
27
30
|
isComplete: false,
|
@@ -42,10 +45,6 @@ async function streamStructure(model, schema, prompt, options) {
|
|
42
45
|
value: parseResult.data,
|
43
46
|
};
|
44
47
|
},
|
45
|
-
getResult: () => ({
|
46
|
-
response: lastFullDelta,
|
47
|
-
value: lastStructure,
|
48
|
-
}),
|
49
48
|
});
|
50
49
|
return options?.fullResponse
|
51
50
|
? {
|
@@ -27,7 +27,7 @@ export type StructureStreamPart<STRUCTURE> = {
|
|
27
27
|
* @example
|
28
28
|
* const structureStream = await streamStructure(
|
29
29
|
* openai.ChatTextGenerator(...).asFunctionCallStructureGenerationModel(...),
|
30
|
-
*
|
30
|
+
* zodSchema(
|
31
31
|
* z.array(
|
32
32
|
* z.object({
|
33
33
|
* name: z.string(),
|
@@ -5,8 +5,8 @@ export async function streamStructure(model, schema, prompt, options) {
|
|
5
5
|
const expandedPrompt = typeof prompt === "function"
|
6
6
|
? prompt(schema)
|
7
7
|
: prompt;
|
8
|
+
let accumulatedText = "";
|
8
9
|
let lastStructure;
|
9
|
-
let lastFullDelta;
|
10
10
|
const fullResponse = await executeStreamCall({
|
11
11
|
functionType: "stream-structure",
|
12
12
|
input: prompt,
|
@@ -14,11 +14,14 @@ export async function streamStructure(model, schema, prompt, options) {
|
|
14
14
|
options,
|
15
15
|
startStream: async (options) => model.doStreamStructure(schema, expandedPrompt, options),
|
16
16
|
processDelta: (delta) => {
|
17
|
-
const
|
18
|
-
|
17
|
+
const textDelta = model.extractStructureTextDelta(delta.deltaValue);
|
18
|
+
if (textDelta == null) {
|
19
|
+
return undefined;
|
20
|
+
}
|
21
|
+
accumulatedText += textDelta;
|
22
|
+
const latestStructure = model.parseAccumulatedStructureText(accumulatedText);
|
19
23
|
// only send a new part into the stream when the partial structure has changed:
|
20
24
|
if (!isDeepEqualData(lastStructure, latestStructure)) {
|
21
|
-
lastFullDelta = latestFullDelta;
|
22
25
|
lastStructure = latestStructure;
|
23
26
|
return {
|
24
27
|
isComplete: false,
|
@@ -39,10 +42,6 @@ export async function streamStructure(model, schema, prompt, options) {
|
|
39
42
|
value: parseResult.data,
|
40
43
|
};
|
41
44
|
},
|
42
|
-
getResult: () => ({
|
43
|
-
response: lastFullDelta,
|
44
|
-
value: lastStructure,
|
45
|
-
}),
|
46
45
|
});
|
47
46
|
return options?.fullResponse
|
48
47
|
? {
|
@@ -0,0 +1,35 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.PromptTemplateFullTextModel = void 0;
|
4
|
+
const PromptTemplateTextStreamingModel_js_1 = require("./PromptTemplateTextStreamingModel.cjs");
|
5
|
+
class PromptTemplateFullTextModel extends PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel {
|
6
|
+
constructor(options) {
|
7
|
+
super(options);
|
8
|
+
}
|
9
|
+
doGenerateToolCall(tool, prompt, options) {
|
10
|
+
const mappedPrompt = this.promptTemplate.format(prompt);
|
11
|
+
return this.model.doGenerateToolCall(tool, mappedPrompt, options);
|
12
|
+
}
|
13
|
+
doGenerateToolCallsOrText(tools, prompt, options) {
|
14
|
+
const mappedPrompt = this.promptTemplate.format(prompt);
|
15
|
+
return this.model.doGenerateToolCallsOrText(tools, mappedPrompt, options);
|
16
|
+
}
|
17
|
+
withPromptTemplate(promptTemplate) {
|
18
|
+
return new PromptTemplateFullTextModel({
|
19
|
+
model: this.withSettings({
|
20
|
+
stopSequences: [
|
21
|
+
...(this.settings.stopSequences ?? []),
|
22
|
+
...promptTemplate.stopSequences,
|
23
|
+
],
|
24
|
+
}),
|
25
|
+
promptTemplate,
|
26
|
+
});
|
27
|
+
}
|
28
|
+
withSettings(additionalSettings) {
|
29
|
+
return new PromptTemplateFullTextModel({
|
30
|
+
model: this.model.withSettings(additionalSettings),
|
31
|
+
promptTemplate: this.promptTemplate,
|
32
|
+
});
|
33
|
+
}
|
34
|
+
}
|
35
|
+
exports.PromptTemplateFullTextModel = PromptTemplateFullTextModel;
|
@@ -0,0 +1,41 @@
|
|
1
|
+
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
|
+
import { ToolDefinition } from "../../tool/ToolDefinition.js";
|
3
|
+
import { ToolCallGenerationModel } from "../../tool/generate-tool-call/ToolCallGenerationModel.js";
|
4
|
+
import { ToolCallsOrTextGenerationModel } from "../../tool/generate-tool-calls-or-text/ToolCallsOrTextGenerationModel.js";
|
5
|
+
import { PromptTemplateTextStreamingModel } from "./PromptTemplateTextStreamingModel.js";
|
6
|
+
import { TextGenerationModelSettings, TextStreamingModel } from "./TextGenerationModel.js";
|
7
|
+
import { TextGenerationPromptTemplate } from "./TextGenerationPromptTemplate.js";
|
8
|
+
export declare class PromptTemplateFullTextModel<PROMPT, MODEL_PROMPT, SETTINGS extends TextGenerationModelSettings, MODEL extends TextStreamingModel<MODEL_PROMPT, SETTINGS> & ToolCallGenerationModel<MODEL_PROMPT, SETTINGS> & ToolCallsOrTextGenerationModel<MODEL_PROMPT, SETTINGS>> extends PromptTemplateTextStreamingModel<PROMPT, MODEL_PROMPT, SETTINGS, MODEL> implements TextStreamingModel<PROMPT, SETTINGS>, ToolCallGenerationModel<PROMPT, SETTINGS>, ToolCallsOrTextGenerationModel<PROMPT, SETTINGS> {
|
9
|
+
constructor(options: {
|
10
|
+
model: MODEL;
|
11
|
+
promptTemplate: TextGenerationPromptTemplate<PROMPT, MODEL_PROMPT>;
|
12
|
+
});
|
13
|
+
doGenerateToolCall(tool: ToolDefinition<string, unknown>, prompt: PROMPT, options?: FunctionOptions | undefined): PromiseLike<{
|
14
|
+
response: unknown;
|
15
|
+
toolCall: {
|
16
|
+
id: string;
|
17
|
+
args: unknown;
|
18
|
+
} | null;
|
19
|
+
usage?: {
|
20
|
+
promptTokens: number;
|
21
|
+
completionTokens: number;
|
22
|
+
totalTokens: number;
|
23
|
+
} | undefined;
|
24
|
+
}>;
|
25
|
+
doGenerateToolCallsOrText(tools: ToolDefinition<string, unknown>[], prompt: PROMPT, options?: FunctionOptions | undefined): PromiseLike<{
|
26
|
+
response: unknown;
|
27
|
+
text: string | null;
|
28
|
+
toolCalls: {
|
29
|
+
id: string;
|
30
|
+
name: string;
|
31
|
+
args: unknown;
|
32
|
+
}[] | null;
|
33
|
+
usage?: {
|
34
|
+
promptTokens: number;
|
35
|
+
completionTokens: number;
|
36
|
+
totalTokens: number;
|
37
|
+
} | undefined;
|
38
|
+
}>;
|
39
|
+
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): PromptTemplateFullTextModel<INPUT_PROMPT, PROMPT, SETTINGS, this>;
|
40
|
+
withSettings(additionalSettings: Partial<SETTINGS>): this;
|
41
|
+
}
|
@@ -0,0 +1,31 @@
|
|
1
|
+
import { PromptTemplateTextStreamingModel } from "./PromptTemplateTextStreamingModel.js";
|
2
|
+
export class PromptTemplateFullTextModel extends PromptTemplateTextStreamingModel {
|
3
|
+
constructor(options) {
|
4
|
+
super(options);
|
5
|
+
}
|
6
|
+
doGenerateToolCall(tool, prompt, options) {
|
7
|
+
const mappedPrompt = this.promptTemplate.format(prompt);
|
8
|
+
return this.model.doGenerateToolCall(tool, mappedPrompt, options);
|
9
|
+
}
|
10
|
+
doGenerateToolCallsOrText(tools, prompt, options) {
|
11
|
+
const mappedPrompt = this.promptTemplate.format(prompt);
|
12
|
+
return this.model.doGenerateToolCallsOrText(tools, mappedPrompt, options);
|
13
|
+
}
|
14
|
+
withPromptTemplate(promptTemplate) {
|
15
|
+
return new PromptTemplateFullTextModel({
|
16
|
+
model: this.withSettings({
|
17
|
+
stopSequences: [
|
18
|
+
...(this.settings.stopSequences ?? []),
|
19
|
+
...promptTemplate.stopSequences,
|
20
|
+
],
|
21
|
+
}),
|
22
|
+
promptTemplate,
|
23
|
+
});
|
24
|
+
}
|
25
|
+
withSettings(additionalSettings) {
|
26
|
+
return new PromptTemplateFullTextModel({
|
27
|
+
model: this.model.withSettings(additionalSettings),
|
28
|
+
promptTemplate: this.promptTemplate,
|
29
|
+
});
|
30
|
+
}
|
31
|
+
}
|
@@ -11,6 +11,9 @@ class PromptTemplateTextStreamingModel extends PromptTemplateTextGenerationModel
|
|
11
11
|
const mappedPrompt = this.promptTemplate.format(prompt);
|
12
12
|
return this.model.doStreamText(mappedPrompt, options);
|
13
13
|
}
|
14
|
+
extractTextDelta(delta) {
|
15
|
+
return this.model.extractTextDelta(delta);
|
16
|
+
}
|
14
17
|
asStructureGenerationModel(promptTemplate) {
|
15
18
|
return new StructureFromTextStreamingModel_js_1.StructureFromTextStreamingModel({
|
16
19
|
model: this,
|
@@ -9,7 +9,8 @@ export declare class PromptTemplateTextStreamingModel<PROMPT, MODEL_PROMPT, SETT
|
|
9
9
|
model: MODEL;
|
10
10
|
promptTemplate: TextGenerationPromptTemplate<PROMPT, MODEL_PROMPT>;
|
11
11
|
});
|
12
|
-
doStreamText(prompt: PROMPT, options?: FunctionOptions): PromiseLike<AsyncIterable<import("../Delta.js").Delta<
|
12
|
+
doStreamText(prompt: PROMPT, options?: FunctionOptions): PromiseLike<AsyncIterable<import("../Delta.js").Delta<unknown>>>;
|
13
|
+
extractTextDelta(delta: unknown): string | undefined;
|
13
14
|
asStructureGenerationModel<INPUT_PROMPT>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, PROMPT>): StructureFromTextStreamingModel<INPUT_PROMPT, PROMPT, this>;
|
14
15
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): PromptTemplateTextStreamingModel<INPUT_PROMPT, PROMPT, SETTINGS, this>;
|
15
16
|
withSettings(additionalSettings: Partial<SETTINGS>): this;
|
@@ -8,6 +8,9 @@ export class PromptTemplateTextStreamingModel extends PromptTemplateTextGenerati
|
|
8
8
|
const mappedPrompt = this.promptTemplate.format(prompt);
|
9
9
|
return this.model.doStreamText(mappedPrompt, options);
|
10
10
|
}
|
11
|
+
extractTextDelta(delta) {
|
12
|
+
return this.model.extractTextDelta(delta);
|
13
|
+
}
|
11
14
|
asStructureGenerationModel(promptTemplate) {
|
12
15
|
return new StructureFromTextStreamingModel({
|
13
16
|
model: this,
|
@@ -74,6 +74,7 @@ export interface TextGenerationModel<PROMPT, SETTINGS extends TextGenerationMode
|
|
74
74
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): TextGenerationModel<INPUT_PROMPT, SETTINGS>;
|
75
75
|
}
|
76
76
|
export interface TextStreamingModel<PROMPT, SETTINGS extends TextGenerationModelSettings = TextGenerationModelSettings> extends TextGenerationModel<PROMPT, SETTINGS> {
|
77
|
-
doStreamText(prompt: PROMPT, options?: FunctionOptions): PromiseLike<AsyncIterable<Delta<
|
77
|
+
doStreamText(prompt: PROMPT, options?: FunctionOptions): PromiseLike<AsyncIterable<Delta<unknown>>>;
|
78
|
+
extractTextDelta(delta: unknown): string | undefined;
|
78
79
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, PROMPT>): TextStreamingModel<INPUT_PROMPT, SETTINGS>;
|
79
80
|
}
|
@@ -14,6 +14,7 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
15
15
|
};
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
17
|
+
__exportStar(require("./PromptTemplateFullTextModel.cjs"), exports);
|
17
18
|
__exportStar(require("./PromptTemplateTextGenerationModel.cjs"), exports);
|
18
19
|
__exportStar(require("./PromptTemplateTextStreamingModel.cjs"), exports);
|
19
20
|
__exportStar(require("./TextGenerationEvent.cjs"), exports);
|
@@ -1,6 +1,7 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.instruction = exports.text = void 0;
|
4
|
+
const ContentPart_js_1 = require("./ContentPart.cjs");
|
4
5
|
const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
|
5
6
|
const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
|
6
7
|
/**
|
@@ -67,7 +68,7 @@ function instruction() {
|
|
67
68
|
if (prompt.system != null) {
|
68
69
|
text += `${prompt.system}\n`;
|
69
70
|
}
|
70
|
-
text += prompt.instruction;
|
71
|
+
text += (0, ContentPart_js_1.validateContentIsString)(prompt.instruction, prompt);
|
71
72
|
if (prompt.input != null) {
|
72
73
|
text += `\n\n### Input:\n${prompt.input}`;
|
73
74
|
}
|