modelfusion 0.97.0 → 0.99.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +15 -8
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs +1 -1
- package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js +1 -1
- package/model-function/Model.d.ts +2 -2
- package/model-function/embed/embed.cjs +14 -2
- package/model-function/embed/embed.d.ts +6 -6
- package/model-function/embed/embed.js +14 -2
- package/model-function/generate-image/generateImage.cjs +10 -9
- package/model-function/generate-image/generateImage.d.ts +4 -6
- package/model-function/generate-image/generateImage.js +10 -9
- package/model-function/generate-speech/generateSpeech.cjs +7 -1
- package/model-function/generate-speech/generateSpeech.d.ts +3 -3
- package/model-function/generate-speech/generateSpeech.js +7 -1
- package/model-function/generate-speech/streamSpeech.cjs +6 -1
- package/model-function/generate-speech/streamSpeech.d.ts +3 -3
- package/model-function/generate-speech/streamSpeech.js +6 -1
- package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +5 -5
- package/model-function/generate-structure/StructureFromTextGenerationModel.js +5 -5
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +5 -5
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +5 -5
- package/model-function/generate-structure/generateStructure.cjs +7 -1
- package/model-function/generate-structure/generateStructure.d.ts +3 -3
- package/model-function/generate-structure/generateStructure.js +7 -1
- package/model-function/generate-structure/streamStructure.cjs +6 -1
- package/model-function/generate-structure/streamStructure.d.ts +3 -3
- package/model-function/generate-structure/streamStructure.js +6 -1
- package/model-function/generate-text/PromptTemplateTextGenerationModel.cjs +2 -2
- package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +2 -2
- package/model-function/generate-text/PromptTemplateTextGenerationModel.js +2 -2
- package/model-function/generate-text/TextGenerationModel.d.ts +31 -5
- package/model-function/generate-text/generateText.cjs +15 -3
- package/model-function/generate-text/generateText.d.ts +4 -3
- package/model-function/generate-text/generateText.js +15 -3
- package/model-function/generate-text/prompt-template/trimChatPrompt.cjs +1 -1
- package/model-function/generate-text/prompt-template/trimChatPrompt.js +1 -1
- package/model-function/generate-text/streamText.cjs +6 -1
- package/model-function/generate-text/streamText.d.ts +3 -3
- package/model-function/generate-text/streamText.js +6 -1
- package/model-function/generate-transcription/generateTranscription.cjs +1 -1
- package/model-function/generate-transcription/generateTranscription.d.ts +2 -2
- package/model-function/generate-transcription/generateTranscription.js +1 -1
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +27 -31
- package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +2 -2
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +27 -31
- package/model-provider/cohere/CohereFacade.cjs +1 -1
- package/model-provider/cohere/CohereFacade.d.ts +1 -1
- package/model-provider/cohere/CohereFacade.js +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +5 -5
- package/model-provider/cohere/CohereTextGenerationModel.cjs +34 -43
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +3 -4
- package/model-provider/cohere/CohereTextGenerationModel.js +34 -43
- package/model-provider/huggingface/HuggingFaceFacade.cjs +1 -1
- package/model-provider/huggingface/HuggingFaceFacade.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceFacade.js +1 -1
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +31 -41
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +3 -4
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +31 -41
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +4 -4
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +2 -2
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +4 -4
- package/model-provider/mistral/MistralTextGenerationModel.cjs +5 -5
- package/model-provider/mistral/MistralTextGenerationModel.d.ts +2 -2
- package/model-provider/mistral/MistralTextGenerationModel.js +5 -5
- package/model-provider/ollama/OllamaTextGenerationModel.cjs +4 -4
- package/model-provider/ollama/OllamaTextGenerationModel.d.ts +2 -2
- package/model-provider/ollama/OllamaTextGenerationModel.js +4 -4
- package/model-provider/openai/OpenAICompletionModel.cjs +48 -53
- package/model-provider/openai/OpenAICompletionModel.d.ts +3 -6
- package/model-provider/openai/OpenAICompletionModel.js +48 -53
- package/model-provider/openai/OpenAIFacade.cjs +2 -2
- package/model-provider/openai/OpenAIFacade.d.ts +2 -2
- package/model-provider/openai/OpenAIFacade.js +2 -2
- package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +51 -55
- package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +36 -8
- package/model-provider/openai/chat/AbstractOpenAIChatModel.js +51 -55
- package/model-provider/openai/chat/OpenAIChatModel.cjs +3 -3
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.js +3 -3
- package/model-provider/openai/chat/OpenAIChatModel.test.cjs +61 -0
- package/model-provider/openai/chat/OpenAIChatModel.test.d.ts +1 -0
- package/model-provider/openai/chat/OpenAIChatModel.test.js +59 -0
- package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +8 -3
- package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +1 -1
- package/model-provider/openai/chat/OpenAIChatStreamIterable.js +8 -3
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +2 -2
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +2 -2
- package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +1 -1
- package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +1 -1
- package/model-provider/openai-compatible/OpenAICompatibleFacade.js +1 -1
- package/package.json +1 -1
- package/tool/execute-tool/executeTool.cjs +1 -1
- package/tool/execute-tool/executeTool.d.ts +2 -2
- package/tool/execute-tool/executeTool.js +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +4 -4
- package/tool/generate-tool-call/TextGenerationToolCallModel.js +4 -4
- package/tool/generate-tool-call/generateToolCall.cjs +7 -1
- package/tool/generate-tool-call/generateToolCall.d.ts +3 -3
- package/tool/generate-tool-call/generateToolCall.js +7 -1
- package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.cjs +4 -4
- package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js +4 -4
- package/tool/generate-tool-calls-or-text/generateToolCallsOrText.cjs +1 -1
- package/tool/generate-tool-calls-or-text/generateToolCallsOrText.d.ts +2 -2
- package/tool/generate-tool-calls-or-text/generateToolCallsOrText.js +1 -1
- package/tool/use-tools-or-generate-text/useToolsOrGenerateText.cjs +1 -1
- package/tool/use-tools-or-generate-text/useToolsOrGenerateText.js +1 -1
package/README.md
CHANGED
@@ -85,7 +85,10 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
|
|
85
85
|
Multi-modal vision models such as GPT 4 Vision can process images as part of the prompt.
|
86
86
|
|
87
87
|
```ts
|
88
|
-
import { streamText, openai } from "modelfusion";
|
88
|
+
import { streamText, openai, OpenAIChatMessage } from "modelfusion";
|
89
|
+
import { readFileSync } from "fs";
|
90
|
+
|
91
|
+
const image = readFileSync("./image.png").toString("base64");
|
89
92
|
|
90
93
|
const textStream = await streamText(
|
91
94
|
openai.ChatTextGenerator({ model: "gpt-4-vision-preview" }),
|
@@ -96,9 +99,13 @@ const textStream = await streamText(
|
|
96
99
|
]),
|
97
100
|
]
|
98
101
|
);
|
102
|
+
|
103
|
+
for await (const textPart of textStream) {
|
104
|
+
process.stdout.write(textPart);
|
105
|
+
}
|
99
106
|
```
|
100
107
|
|
101
|
-
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)
|
108
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [OpenAI compatible](https://modelfusion.dev/integration/model-provider/openaicompatible), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama)
|
102
109
|
|
103
110
|
### [Generate Image](https://modelfusion.dev/guide/function/generate-image)
|
104
111
|
|
@@ -204,7 +211,7 @@ const sentiment = await generateStructure(
|
|
204
211
|
.ChatTextGenerator({
|
205
212
|
model: "gpt-3.5-turbo",
|
206
213
|
temperature: 0,
|
207
|
-
|
214
|
+
maxGenerationTokens: 50,
|
208
215
|
})
|
209
216
|
.asFunctionCallStructureGenerationModel({ fnName: "sentiment" })
|
210
217
|
.withInstructionPrompt(),
|
@@ -534,7 +541,7 @@ const text = await generateText(
|
|
534
541
|
llamacpp
|
535
542
|
.TextGenerator({
|
536
543
|
contextWindowSize: 4096, // Llama 2 context window size
|
537
|
-
|
544
|
+
maxGenerationTokens: 1000,
|
538
545
|
})
|
539
546
|
.withTextPromptTemplate(Llama2Prompt.instruction()),
|
540
547
|
{
|
@@ -608,18 +615,18 @@ const image = await generateImage(
|
|
608
615
|
|
609
616
|
### Metadata and original responses
|
610
617
|
|
611
|
-
ModelFusion model functions return rich
|
618
|
+
ModelFusion model functions return rich responses that include the original response and metadata when you set the `fullResponse` option to `true`.
|
612
619
|
|
613
620
|
```ts
|
614
621
|
// access the full response (needs to be typed) and the metadata:
|
615
|
-
const {
|
622
|
+
const { text, response, metadata } = await generateText(
|
616
623
|
openai.CompletionTextGenerator({
|
617
624
|
model: "gpt-3.5-turbo-instruct",
|
618
|
-
|
625
|
+
maxGenerationTokens: 1000,
|
619
626
|
n: 2, // generate 2 completions
|
620
627
|
}),
|
621
628
|
"Write a short story about a robot learning to love:\n\n",
|
622
|
-
{
|
629
|
+
{ fullResponse: true }
|
623
630
|
);
|
624
631
|
|
625
632
|
console.log(metadata);
|
package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs
CHANGED
@@ -10,7 +10,7 @@ const summarizeRecursively_js_1 = require("./summarizeRecursively.cjs");
|
|
10
10
|
* while leaving enough space for the model to generate text.
|
11
11
|
*/
|
12
12
|
async function summarizeRecursivelyWithTextGenerationAndTokenSplitting({ text, model, prompt, tokenLimit = model.contextWindowSize -
|
13
|
-
(model.settings.
|
13
|
+
(model.settings.maxGenerationTokens ?? model.contextWindowSize / 4), join, }, options) {
|
14
14
|
const emptyPromptTokens = await model.countPromptTokens(await prompt({ text: "" }));
|
15
15
|
return (0, summarizeRecursively_js_1.summarizeRecursively)({
|
16
16
|
split: (0, splitRecursively_js_1.splitAtToken)({
|
package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js
CHANGED
@@ -7,7 +7,7 @@ import { summarizeRecursively } from "./summarizeRecursively.js";
|
|
7
7
|
* while leaving enough space for the model to generate text.
|
8
8
|
*/
|
9
9
|
export async function summarizeRecursivelyWithTextGenerationAndTokenSplitting({ text, model, prompt, tokenLimit = model.contextWindowSize -
|
10
|
-
(model.settings.
|
10
|
+
(model.settings.maxGenerationTokens ?? model.contextWindowSize / 4), join, }, options) {
|
11
11
|
const emptyPromptTokens = await model.countPromptTokens(await prompt({ text: "" }));
|
12
12
|
return summarizeRecursively({
|
13
13
|
split: splitAtToken({
|
@@ -20,11 +20,11 @@ export interface Model<SETTINGS extends ModelSettings> {
|
|
20
20
|
* @example
|
21
21
|
* const model = new OpenAICompletionModel({
|
22
22
|
* model: "gpt-3.5-turbo-instruct",
|
23
|
-
*
|
23
|
+
* maxGenerationTokens: 500,
|
24
24
|
* });
|
25
25
|
*
|
26
26
|
* const modelWithMoreTokens = model.withSettings({
|
27
|
-
*
|
27
|
+
* maxGenerationTokens: 1000,
|
28
28
|
* });
|
29
29
|
*/
|
30
30
|
withSettings(additionalSettings: Partial<SETTINGS>): this;
|
@@ -43,7 +43,13 @@ async function embedMany(model, values, options) {
|
|
43
43
|
};
|
44
44
|
},
|
45
45
|
});
|
46
|
-
return options?.
|
46
|
+
return options?.fullResponse
|
47
|
+
? {
|
48
|
+
embeddings: fullResponse.value,
|
49
|
+
response: fullResponse.response,
|
50
|
+
metadata: fullResponse.metadata,
|
51
|
+
}
|
52
|
+
: fullResponse.value;
|
47
53
|
}
|
48
54
|
exports.embedMany = embedMany;
|
49
55
|
async function embed(model, value, options) {
|
@@ -60,6 +66,12 @@ async function embed(model, value, options) {
|
|
60
66
|
};
|
61
67
|
},
|
62
68
|
});
|
63
|
-
return options?.
|
69
|
+
return options?.fullResponse
|
70
|
+
? {
|
71
|
+
embedding: fullResponse.value,
|
72
|
+
response: fullResponse.response,
|
73
|
+
metadata: fullResponse.metadata,
|
74
|
+
}
|
75
|
+
: fullResponse.value;
|
64
76
|
}
|
65
77
|
exports.embed = embed;
|
@@ -23,12 +23,12 @@ import { EmbeddingModel, EmbeddingModelSettings } from "./EmbeddingModel.js";
|
|
23
23
|
* @returns {Promise<Vector[]>} - A promise that resolves to an array of vectors representing the embeddings.
|
24
24
|
*/
|
25
25
|
export declare function embedMany<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModelSettings>, values: VALUE[], options?: FunctionOptions & {
|
26
|
-
|
26
|
+
fullResponse?: false;
|
27
27
|
}): Promise<Vector[]>;
|
28
28
|
export declare function embedMany<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModelSettings>, values: VALUE[], options: FunctionOptions & {
|
29
|
-
|
29
|
+
fullResponse: true;
|
30
30
|
}): Promise<{
|
31
|
-
|
31
|
+
embeddings: Vector[];
|
32
32
|
response: unknown;
|
33
33
|
metadata: ModelCallMetadata;
|
34
34
|
}>;
|
@@ -50,12 +50,12 @@ export declare function embedMany<VALUE>(model: EmbeddingModel<VALUE, EmbeddingM
|
|
50
50
|
* @returns {Promise<Vector>} - A promise that resolves to a vector representing the embedding.
|
51
51
|
*/
|
52
52
|
export declare function embed<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModelSettings>, value: VALUE, options?: FunctionOptions & {
|
53
|
-
|
53
|
+
fullResponse?: false;
|
54
54
|
}): Promise<Vector>;
|
55
55
|
export declare function embed<VALUE>(model: EmbeddingModel<VALUE, EmbeddingModelSettings>, value: VALUE, options: FunctionOptions & {
|
56
|
-
|
56
|
+
fullResponse: true;
|
57
57
|
}): Promise<{
|
58
|
-
|
58
|
+
embedding: Vector;
|
59
59
|
response: unknown;
|
60
60
|
metadata: ModelCallMetadata;
|
61
61
|
}>;
|
@@ -40,7 +40,13 @@ export async function embedMany(model, values, options) {
|
|
40
40
|
};
|
41
41
|
},
|
42
42
|
});
|
43
|
-
return options?.
|
43
|
+
return options?.fullResponse
|
44
|
+
? {
|
45
|
+
embeddings: fullResponse.value,
|
46
|
+
response: fullResponse.response,
|
47
|
+
metadata: fullResponse.metadata,
|
48
|
+
}
|
49
|
+
: fullResponse.value;
|
44
50
|
}
|
45
51
|
export async function embed(model, value, options) {
|
46
52
|
const fullResponse = await executeStandardCall({
|
@@ -56,5 +62,11 @@ export async function embed(model, value, options) {
|
|
56
62
|
};
|
57
63
|
},
|
58
64
|
});
|
59
|
-
return options?.
|
65
|
+
return options?.fullResponse
|
66
|
+
? {
|
67
|
+
embedding: fullResponse.value,
|
68
|
+
response: fullResponse.response,
|
69
|
+
metadata: fullResponse.metadata,
|
70
|
+
}
|
71
|
+
: fullResponse.value;
|
60
72
|
}
|
@@ -16,14 +16,15 @@ async function generateImage(model, prompt, options) {
|
|
16
16
|
};
|
17
17
|
},
|
18
18
|
});
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
19
|
+
const imageBase64 = fullResponse.value;
|
20
|
+
const image = Buffer.from(imageBase64, "base64");
|
21
|
+
return options?.fullResponse
|
22
|
+
? {
|
23
|
+
image,
|
24
|
+
imageBase64,
|
25
|
+
response: fullResponse.response,
|
26
|
+
metadata: fullResponse.metadata,
|
27
|
+
}
|
28
|
+
: image;
|
28
29
|
}
|
29
30
|
exports.generateImage = generateImage;
|
@@ -27,15 +27,13 @@ import { ImageGenerationModel, ImageGenerationModelSettings } from "./ImageGener
|
|
27
27
|
* The image is a Buffer containing the image data in PNG format.
|
28
28
|
*/
|
29
29
|
export declare function generateImage<PROMPT>(model: ImageGenerationModel<PROMPT, ImageGenerationModelSettings>, prompt: PROMPT, options?: FunctionOptions & {
|
30
|
-
|
30
|
+
fullResponse?: false;
|
31
31
|
}): Promise<Buffer>;
|
32
32
|
export declare function generateImage<PROMPT>(model: ImageGenerationModel<PROMPT, ImageGenerationModelSettings>, prompt: PROMPT, options: FunctionOptions & {
|
33
|
-
|
34
|
-
}): Promise<string>;
|
35
|
-
export declare function generateImage<PROMPT>(model: ImageGenerationModel<PROMPT, ImageGenerationModelSettings>, prompt: PROMPT, options: FunctionOptions & {
|
36
|
-
returnType: "full";
|
33
|
+
fullResponse: true;
|
37
34
|
}): Promise<{
|
38
|
-
|
35
|
+
image: Buffer;
|
36
|
+
imageBase64: string;
|
39
37
|
response: unknown;
|
40
38
|
metadata: ModelCallMetadata;
|
41
39
|
}>;
|
@@ -13,13 +13,14 @@ export async function generateImage(model, prompt, options) {
|
|
13
13
|
};
|
14
14
|
},
|
15
15
|
});
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
16
|
+
const imageBase64 = fullResponse.value;
|
17
|
+
const image = Buffer.from(imageBase64, "base64");
|
18
|
+
return options?.fullResponse
|
19
|
+
? {
|
20
|
+
image,
|
21
|
+
imageBase64,
|
22
|
+
response: fullResponse.response,
|
23
|
+
metadata: fullResponse.metadata,
|
24
|
+
}
|
25
|
+
: image;
|
25
26
|
}
|
@@ -16,6 +16,12 @@ async function generateSpeech(model, text, options) {
|
|
16
16
|
};
|
17
17
|
},
|
18
18
|
});
|
19
|
-
return options?.
|
19
|
+
return options?.fullResponse
|
20
|
+
? {
|
21
|
+
speech: fullResponse.value,
|
22
|
+
response: fullResponse.response,
|
23
|
+
metadata: fullResponse.metadata,
|
24
|
+
}
|
25
|
+
: fullResponse.value;
|
20
26
|
}
|
21
27
|
exports.generateSpeech = generateSpeech;
|
@@ -21,12 +21,12 @@ import { SpeechGenerationModel, SpeechGenerationModelSettings } from "./SpeechGe
|
|
21
21
|
* @returns {Promise<Buffer>} - A promise that resolves to a buffer containing the synthesized speech.
|
22
22
|
*/
|
23
23
|
export declare function generateSpeech(model: SpeechGenerationModel<SpeechGenerationModelSettings>, text: string, options?: FunctionOptions & {
|
24
|
-
|
24
|
+
fullResponse?: false;
|
25
25
|
}): Promise<Buffer>;
|
26
26
|
export declare function generateSpeech(model: SpeechGenerationModel<SpeechGenerationModelSettings>, text: string, options: FunctionOptions & {
|
27
|
-
|
27
|
+
fullResponse: true;
|
28
28
|
}): Promise<{
|
29
|
-
|
29
|
+
speech: Buffer;
|
30
30
|
response: unknown;
|
31
31
|
metadata: ModelCallMetadata;
|
32
32
|
}>;
|
@@ -13,5 +13,11 @@ export async function generateSpeech(model, text, options) {
|
|
13
13
|
};
|
14
14
|
},
|
15
15
|
});
|
16
|
-
return options?.
|
16
|
+
return options?.fullResponse
|
17
|
+
? {
|
18
|
+
speech: fullResponse.value,
|
19
|
+
response: fullResponse.response,
|
20
|
+
metadata: fullResponse.metadata,
|
21
|
+
}
|
22
|
+
: fullResponse.value;
|
17
23
|
}
|
@@ -24,6 +24,11 @@ async function streamSpeech(model, text, options) {
|
|
24
24
|
processDelta: (delta) => delta.valueDelta,
|
25
25
|
getResult: () => ({}),
|
26
26
|
});
|
27
|
-
return options?.
|
27
|
+
return options?.fullResponse
|
28
|
+
? {
|
29
|
+
speechStream: fullResponse.value,
|
30
|
+
metadata: fullResponse.metadata,
|
31
|
+
}
|
32
|
+
: fullResponse.value;
|
28
33
|
}
|
29
34
|
exports.streamSpeech = streamSpeech;
|
@@ -27,11 +27,11 @@ import { SpeechGenerationModelSettings, StreamingSpeechGenerationModel } from ".
|
|
27
27
|
* @returns {AsyncIterableResultPromise<Buffer>} An async iterable promise that contains the synthesized speech chunks.
|
28
28
|
*/
|
29
29
|
export declare function streamSpeech(model: StreamingSpeechGenerationModel<SpeechGenerationModelSettings>, text: AsyncIterable<string> | string, options?: FunctionOptions & {
|
30
|
-
|
30
|
+
fullResponse?: false;
|
31
31
|
}): Promise<AsyncIterable<Buffer>>;
|
32
32
|
export declare function streamSpeech(model: StreamingSpeechGenerationModel<SpeechGenerationModelSettings>, text: AsyncIterable<string> | string, options: FunctionOptions & {
|
33
|
-
|
33
|
+
fullResponse: true;
|
34
34
|
}): Promise<{
|
35
|
-
|
35
|
+
speechStream: AsyncIterable<Buffer>;
|
36
36
|
metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
|
37
37
|
}>;
|
@@ -21,5 +21,10 @@ export async function streamSpeech(model, text, options) {
|
|
21
21
|
processDelta: (delta) => delta.valueDelta,
|
22
22
|
getResult: () => ({}),
|
23
23
|
});
|
24
|
-
return options?.
|
24
|
+
return options?.fullResponse
|
25
|
+
? {
|
26
|
+
speechStream: fullResponse.value,
|
27
|
+
metadata: fullResponse.metadata,
|
28
|
+
}
|
29
|
+
: fullResponse.value;
|
25
30
|
}
|
@@ -30,20 +30,20 @@ class StructureFromTextGenerationModel {
|
|
30
30
|
return this.model.settingsForEvent;
|
31
31
|
}
|
32
32
|
async doGenerateStructure(schema, prompt, options) {
|
33
|
-
const { response,
|
33
|
+
const { response, text } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
|
34
34
|
...options,
|
35
|
-
|
35
|
+
fullResponse: true,
|
36
36
|
});
|
37
37
|
try {
|
38
38
|
return {
|
39
39
|
response,
|
40
|
-
value: this.template.extractStructure(
|
41
|
-
valueText:
|
40
|
+
value: this.template.extractStructure(text),
|
41
|
+
valueText: text,
|
42
42
|
};
|
43
43
|
}
|
44
44
|
catch (error) {
|
45
45
|
throw new StructureParseError_js_1.StructureParseError({
|
46
|
-
valueText:
|
46
|
+
valueText: text,
|
47
47
|
cause: error,
|
48
48
|
});
|
49
49
|
}
|
@@ -27,20 +27,20 @@ export class StructureFromTextGenerationModel {
|
|
27
27
|
return this.model.settingsForEvent;
|
28
28
|
}
|
29
29
|
async doGenerateStructure(schema, prompt, options) {
|
30
|
-
const { response,
|
30
|
+
const { response, text } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
|
31
31
|
...options,
|
32
|
-
|
32
|
+
fullResponse: true,
|
33
33
|
});
|
34
34
|
try {
|
35
35
|
return {
|
36
36
|
response,
|
37
|
-
value: this.template.extractStructure(
|
38
|
-
valueText:
|
37
|
+
value: this.template.extractStructure(text),
|
38
|
+
valueText: text,
|
39
39
|
};
|
40
40
|
}
|
41
41
|
catch (error) {
|
42
42
|
throw new StructureParseError({
|
43
|
-
valueText:
|
43
|
+
valueText: text,
|
44
44
|
cause: error,
|
45
45
|
});
|
46
46
|
}
|
@@ -41,20 +41,20 @@ class StructureFromTextStreamingModel extends StructureFromTextGenerationModel_j
|
|
41
41
|
return queue;
|
42
42
|
}
|
43
43
|
async doGenerateStructure(schema, prompt, options) {
|
44
|
-
const { response,
|
44
|
+
const { response, text } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, schema), {
|
45
45
|
...options,
|
46
|
-
|
46
|
+
fullResponse: true,
|
47
47
|
});
|
48
48
|
try {
|
49
49
|
return {
|
50
50
|
response,
|
51
|
-
value: this.template.extractStructure(
|
52
|
-
valueText:
|
51
|
+
value: this.template.extractStructure(text),
|
52
|
+
valueText: text,
|
53
53
|
};
|
54
54
|
}
|
55
55
|
catch (error) {
|
56
56
|
throw new StructureParseError_js_1.StructureParseError({
|
57
|
-
valueText:
|
57
|
+
valueText: text,
|
58
58
|
cause: error,
|
59
59
|
});
|
60
60
|
}
|
@@ -38,20 +38,20 @@ export class StructureFromTextStreamingModel extends StructureFromTextGeneration
|
|
38
38
|
return queue;
|
39
39
|
}
|
40
40
|
async doGenerateStructure(schema, prompt, options) {
|
41
|
-
const { response,
|
41
|
+
const { response, text } = await generateText(this.model, this.template.createPrompt(prompt, schema), {
|
42
42
|
...options,
|
43
|
-
|
43
|
+
fullResponse: true,
|
44
44
|
});
|
45
45
|
try {
|
46
46
|
return {
|
47
47
|
response,
|
48
|
-
value: this.template.extractStructure(
|
49
|
-
valueText:
|
48
|
+
value: this.template.extractStructure(text),
|
49
|
+
valueText: text,
|
50
50
|
};
|
51
51
|
}
|
52
52
|
catch (error) {
|
53
53
|
throw new StructureParseError({
|
54
|
-
valueText:
|
54
|
+
valueText: text,
|
55
55
|
cause: error,
|
56
56
|
});
|
57
57
|
}
|
@@ -32,6 +32,12 @@ async function generateStructure(model, schema, prompt, options) {
|
|
32
32
|
};
|
33
33
|
},
|
34
34
|
});
|
35
|
-
return options?.
|
35
|
+
return options?.fullResponse
|
36
|
+
? {
|
37
|
+
structure: fullResponse.value,
|
38
|
+
response: fullResponse.response,
|
39
|
+
metadata: fullResponse.metadata,
|
40
|
+
}
|
41
|
+
: fullResponse.value;
|
36
42
|
}
|
37
43
|
exports.generateStructure = generateStructure;
|
@@ -38,12 +38,12 @@ import { StructureGenerationModel, StructureGenerationModelSettings } from "./St
|
|
38
38
|
* @returns {Promise<STRUCTURE>} - Returns a promise that resolves to the generated structure.
|
39
39
|
*/
|
40
40
|
export declare function generateStructure<STRUCTURE, PROMPT, SETTINGS extends StructureGenerationModelSettings>(model: StructureGenerationModel<PROMPT, SETTINGS>, schema: Schema<STRUCTURE> & JsonSchemaProducer, prompt: PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT), options?: FunctionOptions & {
|
41
|
-
|
41
|
+
fullResponse?: false;
|
42
42
|
}): Promise<STRUCTURE>;
|
43
43
|
export declare function generateStructure<STRUCTURE, PROMPT, SETTINGS extends StructureGenerationModelSettings>(model: StructureGenerationModel<PROMPT, SETTINGS>, schema: Schema<STRUCTURE> & JsonSchemaProducer, prompt: PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT), options: FunctionOptions & {
|
44
|
-
|
44
|
+
fullResponse: true;
|
45
45
|
}): Promise<{
|
46
|
-
|
46
|
+
structure: STRUCTURE;
|
47
47
|
response: unknown;
|
48
48
|
metadata: ModelCallMetadata;
|
49
49
|
}>;
|
@@ -29,5 +29,11 @@ export async function generateStructure(model, schema, prompt, options) {
|
|
29
29
|
};
|
30
30
|
},
|
31
31
|
});
|
32
|
-
return options?.
|
32
|
+
return options?.fullResponse
|
33
|
+
? {
|
34
|
+
structure: fullResponse.value,
|
35
|
+
response: fullResponse.response,
|
36
|
+
metadata: fullResponse.metadata,
|
37
|
+
}
|
38
|
+
: fullResponse.value;
|
33
39
|
}
|
@@ -47,6 +47,11 @@ async function streamStructure(model, schema, prompt, options) {
|
|
47
47
|
value: lastStructure,
|
48
48
|
}),
|
49
49
|
});
|
50
|
-
return options?.
|
50
|
+
return options?.fullResponse
|
51
|
+
? {
|
52
|
+
structureStream: fullResponse.value,
|
53
|
+
metadata: fullResponse.metadata,
|
54
|
+
}
|
55
|
+
: fullResponse.value;
|
51
56
|
}
|
52
57
|
exports.streamStructure = streamStructure;
|
@@ -69,11 +69,11 @@ export type StructureStreamPart<STRUCTURE> = {
|
|
69
69
|
* and a value that is either the partial structure or the final structure.
|
70
70
|
*/
|
71
71
|
export declare function streamStructure<STRUCTURE, PROMPT>(model: StructureStreamingModel<PROMPT>, schema: Schema<STRUCTURE> & JsonSchemaProducer, prompt: PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT), options?: FunctionOptions & {
|
72
|
-
|
72
|
+
fullResponse?: false;
|
73
73
|
}): Promise<AsyncIterable<StructureStreamPart<STRUCTURE>>>;
|
74
74
|
export declare function streamStructure<STRUCTURE, PROMPT>(model: StructureStreamingModel<PROMPT>, schema: Schema<STRUCTURE> & JsonSchemaProducer, prompt: PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT), options: FunctionOptions & {
|
75
|
-
|
75
|
+
fullResponse: true;
|
76
76
|
}): Promise<{
|
77
|
-
|
77
|
+
structureStream: AsyncIterable<StructureStreamPart<STRUCTURE>>;
|
78
78
|
metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
|
79
79
|
}>;
|
@@ -44,5 +44,10 @@ export async function streamStructure(model, schema, prompt, options) {
|
|
44
44
|
value: lastStructure,
|
45
45
|
}),
|
46
46
|
});
|
47
|
-
return options?.
|
47
|
+
return options?.fullResponse
|
48
|
+
? {
|
49
|
+
structureStream: fullResponse.value,
|
50
|
+
metadata: fullResponse.metadata,
|
51
|
+
}
|
52
|
+
: fullResponse.value;
|
48
53
|
}
|
@@ -40,9 +40,9 @@ class PromptTemplateTextGenerationModel {
|
|
40
40
|
}
|
41
41
|
return ((prompt) => originalCountPromptTokens(this.promptTemplate.format(prompt)));
|
42
42
|
}
|
43
|
-
|
43
|
+
doGenerateTexts(prompt, options) {
|
44
44
|
const mappedPrompt = this.promptTemplate.format(prompt);
|
45
|
-
return this.model.
|
45
|
+
return this.model.doGenerateTexts(mappedPrompt, options);
|
46
46
|
}
|
47
47
|
get settingsForEvent() {
|
48
48
|
return this.model.settingsForEvent;
|
@@ -17,9 +17,9 @@ export declare class PromptTemplateTextGenerationModel<PROMPT, MODEL_PROMPT, SET
|
|
17
17
|
get tokenizer(): MODEL["tokenizer"];
|
18
18
|
get contextWindowSize(): MODEL["contextWindowSize"];
|
19
19
|
get countPromptTokens(): MODEL["countPromptTokens"] extends undefined ? undefined : (prompt: PROMPT) => PromiseLike<number>;
|
20
|
-
|
20
|
+
doGenerateTexts(prompt: PROMPT, options?: FunctionOptions): PromiseLike<{
|
21
21
|
response: unknown;
|
22
|
-
|
22
|
+
texts: string[];
|
23
23
|
usage?: {
|
24
24
|
promptTokens: number;
|
25
25
|
completionTokens: number;
|
@@ -37,9 +37,9 @@ export class PromptTemplateTextGenerationModel {
|
|
37
37
|
}
|
38
38
|
return ((prompt) => originalCountPromptTokens(this.promptTemplate.format(prompt)));
|
39
39
|
}
|
40
|
-
|
40
|
+
doGenerateTexts(prompt, options) {
|
41
41
|
const mappedPrompt = this.promptTemplate.format(prompt);
|
42
|
-
return this.model.
|
42
|
+
return this.model.doGenerateTexts(mappedPrompt, options);
|
43
43
|
}
|
44
44
|
get settingsForEvent() {
|
45
45
|
return this.model.settingsForEvent;
|