modelfusion 0.121.2 → 0.122.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +41 -1
- package/README.md +86 -84
- package/classifier/SemanticClassifier.cjs +8 -2
- package/classifier/SemanticClassifier.js +8 -2
- package/model-function/ModelCallEvent.d.ts +3 -0
- package/model-function/embed/embed.cjs +14 -14
- package/model-function/embed/embed.d.ts +24 -18
- package/model-function/embed/embed.js +14 -14
- package/model-function/generate-image/generateImage.cjs +6 -6
- package/model-function/generate-image/generateImage.d.ts +12 -9
- package/model-function/generate-image/generateImage.js +6 -6
- package/model-function/generate-speech/generateSpeech.cjs +7 -7
- package/model-function/generate-speech/generateSpeech.d.ts +12 -9
- package/model-function/generate-speech/generateSpeech.js +7 -7
- package/model-function/generate-speech/streamSpeech.cjs +6 -6
- package/model-function/generate-speech/streamSpeech.d.ts +12 -8
- package/model-function/generate-speech/streamSpeech.js +6 -6
- package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +5 -3
- package/model-function/generate-structure/StructureFromTextGenerationModel.d.ts +1 -1
- package/model-function/generate-structure/StructureFromTextGenerationModel.js +5 -3
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +5 -1
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +5 -1
- package/model-function/generate-structure/StructureGenerationModel.d.ts +1 -1
- package/model-function/generate-structure/generateStructure.cjs +8 -8
- package/model-function/generate-structure/generateStructure.d.ts +17 -10
- package/model-function/generate-structure/generateStructure.js +8 -8
- package/model-function/generate-structure/streamStructure.cjs +6 -6
- package/model-function/generate-structure/streamStructure.d.ts +16 -10
- package/model-function/generate-structure/streamStructure.js +6 -6
- package/model-function/generate-text/generateText.cjs +6 -6
- package/model-function/generate-text/generateText.d.ts +12 -9
- package/model-function/generate-text/generateText.js +6 -6
- package/model-function/generate-text/streamText.cjs +6 -6
- package/model-function/generate-text/streamText.d.ts +12 -8
- package/model-function/generate-text/streamText.js +6 -6
- package/model-function/generate-transcription/generateTranscription.cjs +3 -3
- package/model-function/generate-transcription/generateTranscription.d.ts +12 -9
- package/model-function/generate-transcription/generateTranscription.js +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +12 -12
- package/model-provider/cohere/CohereTextGenerationModel.test.cjs +7 -4
- package/model-provider/cohere/CohereTextGenerationModel.test.js +7 -4
- package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +10 -10
- package/model-provider/llamacpp/LlamaCppCompletionModel.test.cjs +4 -1
- package/model-provider/llamacpp/LlamaCppCompletionModel.test.js +4 -1
- package/model-provider/mistral/MistralChatModel.test.cjs +15 -8
- package/model-provider/mistral/MistralChatModel.test.js +15 -8
- package/model-provider/ollama/OllamaChatModel.test.cjs +6 -1
- package/model-provider/ollama/OllamaChatModel.test.js +6 -1
- package/model-provider/ollama/OllamaCompletionModel.test.cjs +31 -16
- package/model-provider/ollama/OllamaCompletionModel.test.js +31 -16
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.cjs +4 -4
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.js +4 -4
- package/model-provider/openai/OpenAIChatModel.test.cjs +21 -14
- package/model-provider/openai/OpenAIChatModel.test.js +21 -14
- package/model-provider/openai/OpenAICompletionModel.test.cjs +15 -9
- package/model-provider/openai/OpenAICompletionModel.test.js +15 -9
- package/package.json +1 -1
- package/tool/execute-tool/executeTool.cjs +5 -5
- package/tool/execute-tool/executeTool.d.ts +8 -4
- package/tool/execute-tool/executeTool.js +5 -5
- package/tool/execute-tool/safeExecuteToolCall.cjs +1 -1
- package/tool/execute-tool/safeExecuteToolCall.js +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +4 -2
- package/tool/generate-tool-call/TextGenerationToolCallModel.js +4 -2
- package/tool/generate-tool-call/generateToolCall.cjs +7 -7
- package/tool/generate-tool-call/generateToolCall.d.ts +11 -5
- package/tool/generate-tool-call/generateToolCall.js +7 -7
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.cjs +4 -2
- package/tool/generate-tool-calls/TextGenerationToolCallsModel.js +4 -2
- package/tool/generate-tool-calls/generateToolCalls.cjs +3 -3
- package/tool/generate-tool-calls/generateToolCalls.d.ts +11 -5
- package/tool/generate-tool-calls/generateToolCalls.js +3 -3
- package/tool/use-tool/useTool.cjs +2 -2
- package/tool/use-tool/useTool.d.ts +5 -1
- package/tool/use-tool/useTool.js +2 -2
- package/tool/use-tools/useTools.cjs +8 -2
- package/tool/use-tools/useTools.d.ts +5 -1
- package/tool/use-tools/useTools.js +8 -2
- package/vector-index/VectorIndexRetriever.cjs +5 -1
- package/vector-index/VectorIndexRetriever.js +5 -1
- package/vector-index/upsertIntoVectorIndex.cjs +5 -1
- package/vector-index/upsertIntoVectorIndex.js +5 -1
@@ -2,8 +2,8 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.generateImage = void 0;
|
4
4
|
const executeStandardCall_js_1 = require("../executeStandardCall.cjs");
|
5
|
-
async function generateImage(model, prompt, options) {
|
6
|
-
const
|
5
|
+
async function generateImage({ model, prompt, fullResponse, ...options }) {
|
6
|
+
const callResponse = await (0, executeStandardCall_js_1.executeStandardCall)({
|
7
7
|
functionType: "generate-image",
|
8
8
|
input: prompt,
|
9
9
|
model,
|
@@ -16,16 +16,16 @@ async function generateImage(model, prompt, options) {
|
|
16
16
|
};
|
17
17
|
},
|
18
18
|
});
|
19
|
-
const imagesBase64 =
|
19
|
+
const imagesBase64 = callResponse.value;
|
20
20
|
const images = imagesBase64.map((imageBase64) => Buffer.from(imageBase64, "base64"));
|
21
|
-
return
|
21
|
+
return fullResponse
|
22
22
|
? {
|
23
23
|
image: images[0],
|
24
24
|
imageBase64: imagesBase64[0],
|
25
25
|
images,
|
26
26
|
imagesBase64,
|
27
|
-
rawResponse:
|
28
|
-
metadata:
|
27
|
+
rawResponse: callResponse.rawResponse,
|
28
|
+
metadata: callResponse.metadata,
|
29
29
|
}
|
30
30
|
: images[0];
|
31
31
|
}
|
@@ -11,27 +11,30 @@ import { ImageGenerationModel, ImageGenerationModelSettings } from "./ImageGener
|
|
11
11
|
* @see https://modelfusion.dev/guide/function/generate-image
|
12
12
|
*
|
13
13
|
* @example
|
14
|
-
* const image = await generateImage(
|
15
|
-
* stability.ImageGenerator(...),
|
16
|
-
* [
|
14
|
+
* const image = await generateImage({
|
15
|
+
* imageGenerator: stability.ImageGenerator(...),
|
16
|
+
* prompt: [
|
17
17
|
* { text: "the wicked witch of the west" },
|
18
18
|
* { text: "style of early 19th century painting", weight: 0.5 },
|
19
19
|
* ]
|
20
|
-
* );
|
20
|
+
* });
|
21
21
|
*
|
22
22
|
* @param {ImageGenerationModel<PROMPT, ImageGenerationModelSettings>} model - The image generation model to be used.
|
23
23
|
* @param {PROMPT} prompt - The prompt to be used for image generation.
|
24
|
-
* @param {FunctionOptions} [options] - Optional settings for the function.
|
25
24
|
*
|
26
25
|
* @returns {Promise} - Returns a promise that resolves to the generated image.
|
27
26
|
* The image is a Buffer containing the image data in PNG format.
|
28
27
|
*/
|
29
|
-
export declare function generateImage<PROMPT>(
|
28
|
+
export declare function generateImage<PROMPT>(args: {
|
29
|
+
model: ImageGenerationModel<PROMPT, ImageGenerationModelSettings>;
|
30
|
+
prompt: PROMPT;
|
30
31
|
fullResponse?: false;
|
31
|
-
}): Promise<Buffer>;
|
32
|
-
export declare function generateImage<PROMPT>(
|
32
|
+
} & FunctionOptions): Promise<Buffer>;
|
33
|
+
export declare function generateImage<PROMPT>(args: {
|
34
|
+
model: ImageGenerationModel<PROMPT, ImageGenerationModelSettings>;
|
35
|
+
prompt: PROMPT;
|
33
36
|
fullResponse: true;
|
34
|
-
}): Promise<{
|
37
|
+
} & FunctionOptions): Promise<{
|
35
38
|
image: Buffer;
|
36
39
|
imageBase64: string;
|
37
40
|
images: Buffer[];
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { executeStandardCall } from "../executeStandardCall.js";
|
2
|
-
export async function generateImage(model, prompt, options) {
|
3
|
-
const
|
2
|
+
export async function generateImage({ model, prompt, fullResponse, ...options }) {
|
3
|
+
const callResponse = await executeStandardCall({
|
4
4
|
functionType: "generate-image",
|
5
5
|
input: prompt,
|
6
6
|
model,
|
@@ -13,16 +13,16 @@ export async function generateImage(model, prompt, options) {
|
|
13
13
|
};
|
14
14
|
},
|
15
15
|
});
|
16
|
-
const imagesBase64 =
|
16
|
+
const imagesBase64 = callResponse.value;
|
17
17
|
const images = imagesBase64.map((imageBase64) => Buffer.from(imageBase64, "base64"));
|
18
|
-
return
|
18
|
+
return fullResponse
|
19
19
|
? {
|
20
20
|
image: images[0],
|
21
21
|
imageBase64: imagesBase64[0],
|
22
22
|
images,
|
23
23
|
imagesBase64,
|
24
|
-
rawResponse:
|
25
|
-
metadata:
|
24
|
+
rawResponse: callResponse.rawResponse,
|
25
|
+
metadata: callResponse.metadata,
|
26
26
|
}
|
27
27
|
: images[0];
|
28
28
|
}
|
@@ -2,8 +2,8 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.generateSpeech = void 0;
|
4
4
|
const executeStandardCall_js_1 = require("../executeStandardCall.cjs");
|
5
|
-
async function generateSpeech(model, text, options) {
|
6
|
-
const
|
5
|
+
async function generateSpeech({ model, text, fullResponse, ...options }) {
|
6
|
+
const callResponse = await (0, executeStandardCall_js_1.executeStandardCall)({
|
7
7
|
functionType: "generate-speech",
|
8
8
|
input: text,
|
9
9
|
model,
|
@@ -16,12 +16,12 @@ async function generateSpeech(model, text, options) {
|
|
16
16
|
};
|
17
17
|
},
|
18
18
|
});
|
19
|
-
return
|
19
|
+
return fullResponse
|
20
20
|
? {
|
21
|
-
speech:
|
22
|
-
rawResponse:
|
23
|
-
metadata:
|
21
|
+
speech: callResponse.value,
|
22
|
+
rawResponse: callResponse.rawResponse,
|
23
|
+
metadata: callResponse.metadata,
|
24
24
|
}
|
25
|
-
:
|
25
|
+
: callResponse.value;
|
26
26
|
}
|
27
27
|
exports.generateSpeech = generateSpeech;
|
@@ -8,24 +8,27 @@ import { SpeechGenerationModel, SpeechGenerationModelSettings } from "./SpeechGe
|
|
8
8
|
* @see https://modelfusion.dev/guide/function/generate-speech
|
9
9
|
*
|
10
10
|
* @example
|
11
|
-
* const speech = await generateSpeech(
|
12
|
-
* lmnt.SpeechGenerator(...),
|
13
|
-
* "Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
|
11
|
+
* const speech = await generateSpeech({
|
12
|
+
* model: lmnt.SpeechGenerator(...),
|
13
|
+
* text: "Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
|
14
14
|
* "as The Rolling Stones unveil 'Hackney Diamonds.'
|
15
|
-
* );
|
15
|
+
* });
|
16
16
|
*
|
17
17
|
* @param {SpeechGenerationModel<SpeechGenerationModelSettings>} model - The speech generation model.
|
18
18
|
* @param {string} text - The text to be converted to speech.
|
19
|
-
* @param {FunctionOptions} [options] - Optional function options.
|
20
19
|
*
|
21
20
|
* @returns {Promise<Buffer>} - A promise that resolves to a buffer containing the synthesized speech.
|
22
21
|
*/
|
23
|
-
export declare function generateSpeech(
|
22
|
+
export declare function generateSpeech(args: {
|
23
|
+
model: SpeechGenerationModel<SpeechGenerationModelSettings>;
|
24
|
+
text: string;
|
24
25
|
fullResponse?: false;
|
25
|
-
}): Promise<Buffer>;
|
26
|
-
export declare function generateSpeech(
|
26
|
+
} & FunctionOptions): Promise<Buffer>;
|
27
|
+
export declare function generateSpeech(args: {
|
28
|
+
model: SpeechGenerationModel<SpeechGenerationModelSettings>;
|
29
|
+
text: string;
|
27
30
|
fullResponse: true;
|
28
|
-
}): Promise<{
|
31
|
+
} & FunctionOptions): Promise<{
|
29
32
|
speech: Buffer;
|
30
33
|
rawResponse: unknown;
|
31
34
|
metadata: ModelCallMetadata;
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { executeStandardCall } from "../executeStandardCall.js";
|
2
|
-
export async function generateSpeech(model, text, options) {
|
3
|
-
const
|
2
|
+
export async function generateSpeech({ model, text, fullResponse, ...options }) {
|
3
|
+
const callResponse = await executeStandardCall({
|
4
4
|
functionType: "generate-speech",
|
5
5
|
input: text,
|
6
6
|
model,
|
@@ -13,11 +13,11 @@ export async function generateSpeech(model, text, options) {
|
|
13
13
|
};
|
14
14
|
},
|
15
15
|
});
|
16
|
-
return
|
16
|
+
return fullResponse
|
17
17
|
? {
|
18
|
-
speech:
|
19
|
-
rawResponse:
|
20
|
-
metadata:
|
18
|
+
speech: callResponse.value,
|
19
|
+
rawResponse: callResponse.rawResponse,
|
20
|
+
metadata: callResponse.metadata,
|
21
21
|
}
|
22
|
-
:
|
22
|
+
: callResponse.value;
|
23
23
|
}
|
@@ -3,7 +3,7 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.streamSpeech = void 0;
|
4
4
|
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
5
5
|
const executeStreamCall_js_1 = require("../executeStreamCall.cjs");
|
6
|
-
async function streamSpeech(model, text, options) {
|
6
|
+
async function streamSpeech({ model, text, fullResponse, ...options }) {
|
7
7
|
let textStream;
|
8
8
|
// simulate a stream with a single value for a string input:
|
9
9
|
if (typeof text === "string") {
|
@@ -15,7 +15,7 @@ async function streamSpeech(model, text, options) {
|
|
15
15
|
else {
|
16
16
|
textStream = text;
|
17
17
|
}
|
18
|
-
const
|
18
|
+
const callResponse = await (0, executeStreamCall_js_1.executeStreamCall)({
|
19
19
|
functionType: "stream-speech",
|
20
20
|
input: text,
|
21
21
|
model,
|
@@ -23,11 +23,11 @@ async function streamSpeech(model, text, options) {
|
|
23
23
|
startStream: async (options) => model.doGenerateSpeechStreamDuplex(textStream, options),
|
24
24
|
processDelta: (delta) => delta.deltaValue,
|
25
25
|
});
|
26
|
-
return
|
26
|
+
return fullResponse
|
27
27
|
? {
|
28
|
-
speechStream:
|
29
|
-
metadata:
|
28
|
+
speechStream: callResponse.value,
|
29
|
+
metadata: callResponse.metadata,
|
30
30
|
}
|
31
|
-
:
|
31
|
+
: callResponse.value;
|
32
32
|
}
|
33
33
|
exports.streamSpeech = streamSpeech;
|
@@ -11,10 +11,10 @@ import { SpeechGenerationModelSettings, StreamingSpeechGenerationModel } from ".
|
|
11
11
|
* @example
|
12
12
|
* const textStream = await streamText(...);
|
13
13
|
*
|
14
|
-
* const speechStream = await streamSpeech(
|
15
|
-
* elevenlabs.SpeechGenerator(...),
|
16
|
-
* textStream
|
17
|
-
* );
|
14
|
+
* const speechStream = await streamSpeech({
|
15
|
+
* model: elevenlabs.SpeechGenerator(...),
|
16
|
+
* text: textStream
|
17
|
+
* });
|
18
18
|
*
|
19
19
|
* for await (const speechPart of speechStream) {
|
20
20
|
* // ...
|
@@ -26,12 +26,16 @@ import { SpeechGenerationModelSettings, StreamingSpeechGenerationModel } from ".
|
|
26
26
|
*
|
27
27
|
* @returns {AsyncIterableResultPromise<Buffer>} An async iterable promise that contains the synthesized speech chunks.
|
28
28
|
*/
|
29
|
-
export declare function streamSpeech(
|
29
|
+
export declare function streamSpeech(args: {
|
30
|
+
model: StreamingSpeechGenerationModel<SpeechGenerationModelSettings>;
|
31
|
+
text: AsyncIterable<string> | string;
|
30
32
|
fullResponse?: false;
|
31
|
-
}): Promise<AsyncIterable<Buffer>>;
|
32
|
-
export declare function streamSpeech(
|
33
|
+
} & FunctionOptions): Promise<AsyncIterable<Buffer>>;
|
34
|
+
export declare function streamSpeech(args: {
|
35
|
+
model: StreamingSpeechGenerationModel<SpeechGenerationModelSettings>;
|
36
|
+
text: AsyncIterable<string> | string;
|
33
37
|
fullResponse: true;
|
34
|
-
}): Promise<{
|
38
|
+
} & FunctionOptions): Promise<{
|
35
39
|
speechStream: AsyncIterable<Buffer>;
|
36
40
|
metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
|
37
41
|
}>;
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
2
2
|
import { executeStreamCall } from "../executeStreamCall.js";
|
3
|
-
export async function streamSpeech(model, text, options) {
|
3
|
+
export async function streamSpeech({ model, text, fullResponse, ...options }) {
|
4
4
|
let textStream;
|
5
5
|
// simulate a stream with a single value for a string input:
|
6
6
|
if (typeof text === "string") {
|
@@ -12,7 +12,7 @@ export async function streamSpeech(model, text, options) {
|
|
12
12
|
else {
|
13
13
|
textStream = text;
|
14
14
|
}
|
15
|
-
const
|
15
|
+
const callResponse = await executeStreamCall({
|
16
16
|
functionType: "stream-speech",
|
17
17
|
input: text,
|
18
18
|
model,
|
@@ -20,10 +20,10 @@ export async function streamSpeech(model, text, options) {
|
|
20
20
|
startStream: async (options) => model.doGenerateSpeechStreamDuplex(textStream, options),
|
21
21
|
processDelta: (delta) => delta.deltaValue,
|
22
22
|
});
|
23
|
-
return
|
23
|
+
return fullResponse
|
24
24
|
? {
|
25
|
-
speechStream:
|
26
|
-
metadata:
|
25
|
+
speechStream: callResponse.value,
|
26
|
+
metadata: callResponse.metadata,
|
27
27
|
}
|
28
|
-
:
|
28
|
+
: callResponse.value;
|
29
29
|
}
|
@@ -39,13 +39,15 @@ class StructureFromTextGenerationModel {
|
|
39
39
|
return this.model;
|
40
40
|
}
|
41
41
|
async doGenerateStructure(schema, prompt, options) {
|
42
|
-
const { rawResponse
|
43
|
-
|
42
|
+
const { rawResponse, text } = await (0, generateText_js_1.generateText)({
|
43
|
+
model: this.model,
|
44
|
+
prompt: this.template.createPrompt(prompt, schema),
|
44
45
|
fullResponse: true,
|
46
|
+
...options,
|
45
47
|
});
|
46
48
|
try {
|
47
49
|
return {
|
48
|
-
|
50
|
+
rawResponse,
|
49
51
|
value: this.template.extractStructure(text),
|
50
52
|
valueText: text,
|
51
53
|
};
|
@@ -16,7 +16,7 @@ export declare class StructureFromTextGenerationModel<SOURCE_PROMPT, TARGET_PROM
|
|
16
16
|
get settingsForEvent(): Partial<MODEL["settings"]>;
|
17
17
|
getModelWithJsonOutput(schema: Schema<unknown> & JsonSchemaProducer): MODEL;
|
18
18
|
doGenerateStructure(schema: Schema<unknown> & JsonSchemaProducer, prompt: SOURCE_PROMPT, options?: FunctionOptions): Promise<{
|
19
|
-
|
19
|
+
rawResponse: unknown;
|
20
20
|
value: unknown;
|
21
21
|
valueText: string;
|
22
22
|
}>;
|
@@ -36,13 +36,15 @@ export class StructureFromTextGenerationModel {
|
|
36
36
|
return this.model;
|
37
37
|
}
|
38
38
|
async doGenerateStructure(schema, prompt, options) {
|
39
|
-
const { rawResponse
|
40
|
-
|
39
|
+
const { rawResponse, text } = await generateText({
|
40
|
+
model: this.model,
|
41
|
+
prompt: this.template.createPrompt(prompt, schema),
|
41
42
|
fullResponse: true,
|
43
|
+
...options,
|
42
44
|
});
|
43
45
|
try {
|
44
46
|
return {
|
45
|
-
|
47
|
+
rawResponse,
|
46
48
|
value: this.template.extractStructure(text),
|
47
49
|
valueText: text,
|
48
50
|
};
|
@@ -10,7 +10,11 @@ class StructureFromTextStreamingModel extends StructureFromTextGenerationModel_j
|
|
10
10
|
super(options);
|
11
11
|
}
|
12
12
|
async doStreamStructure(schema, prompt, options) {
|
13
|
-
const textStream = await (0, streamText_js_1.streamText)(
|
13
|
+
const textStream = await (0, streamText_js_1.streamText)({
|
14
|
+
model: this.model,
|
15
|
+
prompt: this.template.createPrompt(prompt, schema),
|
16
|
+
...options,
|
17
|
+
});
|
14
18
|
const queue = new AsyncQueue_js_1.AsyncQueue();
|
15
19
|
// run async on purpose:
|
16
20
|
(async () => {
|
@@ -7,7 +7,11 @@ export class StructureFromTextStreamingModel extends StructureFromTextGeneration
|
|
7
7
|
super(options);
|
8
8
|
}
|
9
9
|
async doStreamStructure(schema, prompt, options) {
|
10
|
-
const textStream = await streamText(
|
10
|
+
const textStream = await streamText({
|
11
|
+
model: this.model,
|
12
|
+
prompt: this.template.createPrompt(prompt, schema),
|
13
|
+
...options,
|
14
|
+
});
|
11
15
|
const queue = new AsyncQueue();
|
12
16
|
// run async on purpose:
|
13
17
|
(async () => {
|
@@ -7,7 +7,7 @@ export interface StructureGenerationModelSettings extends ModelSettings {
|
|
7
7
|
}
|
8
8
|
export interface StructureGenerationModel<PROMPT, SETTINGS extends StructureGenerationModelSettings = StructureGenerationModelSettings> extends Model<SETTINGS> {
|
9
9
|
doGenerateStructure(schema: Schema<unknown> & JsonSchemaProducer, prompt: PROMPT, options?: FunctionOptions): PromiseLike<{
|
10
|
-
|
10
|
+
rawResponse: unknown;
|
11
11
|
valueText: string;
|
12
12
|
value: unknown;
|
13
13
|
usage?: {
|
@@ -3,12 +3,12 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.generateStructure = void 0;
|
4
4
|
const executeStandardCall_js_1 = require("../executeStandardCall.cjs");
|
5
5
|
const StructureValidationError_js_1 = require("./StructureValidationError.cjs");
|
6
|
-
async function generateStructure(model, schema, prompt, options) {
|
6
|
+
async function generateStructure({ model, schema, prompt, fullResponse, ...options }) {
|
7
7
|
// Note: PROMPT must not be a function.
|
8
8
|
const expandedPrompt = typeof prompt === "function"
|
9
9
|
? prompt(schema)
|
10
10
|
: prompt;
|
11
|
-
const
|
11
|
+
const callResponse = await (0, executeStandardCall_js_1.executeStandardCall)({
|
12
12
|
functionType: "generate-structure",
|
13
13
|
input: {
|
14
14
|
schema,
|
@@ -29,18 +29,18 @@ async function generateStructure(model, schema, prompt, options) {
|
|
29
29
|
}
|
30
30
|
const value = parseResult.data;
|
31
31
|
return {
|
32
|
-
rawResponse: result.
|
32
|
+
rawResponse: result.rawResponse,
|
33
33
|
extractedValue: value,
|
34
34
|
usage: result.usage,
|
35
35
|
};
|
36
36
|
},
|
37
37
|
});
|
38
|
-
return
|
38
|
+
return fullResponse
|
39
39
|
? {
|
40
|
-
structure:
|
41
|
-
rawResponse:
|
42
|
-
metadata:
|
40
|
+
structure: callResponse.value,
|
41
|
+
rawResponse: callResponse.rawResponse,
|
42
|
+
metadata: callResponse.metadata,
|
43
43
|
}
|
44
|
-
:
|
44
|
+
: callResponse.value;
|
45
45
|
}
|
46
46
|
exports.generateStructure = generateStructure;
|
@@ -9,14 +9,16 @@ import { StructureGenerationModel, StructureGenerationModelSettings } from "./St
|
|
9
9
|
* @see https://modelfusion.dev/guide/function/generate-structure
|
10
10
|
*
|
11
11
|
* @example
|
12
|
-
* const sentiment = await generateStructure(
|
13
|
-
* openai.ChatTextGenerator(...).asFunctionCallStructureGenerationModel(...),
|
14
|
-
*
|
12
|
+
* const sentiment = await generateStructure({
|
13
|
+
* model: openai.ChatTextGenerator(...).asFunctionCallStructureGenerationModel(...),
|
14
|
+
*
|
15
|
+
* schema: zodSchema(z.object({
|
15
16
|
* sentiment: z
|
16
17
|
* .enum(["positive", "neutral", "negative"])
|
17
18
|
* .describe("Sentiment."),
|
18
19
|
* })),
|
19
|
-
*
|
20
|
+
*
|
21
|
+
* prompt: [
|
20
22
|
* openai.ChatMessage.system(
|
21
23
|
* "You are a sentiment evaluator. " +
|
22
24
|
* "Analyze the sentiment of the following product review:"
|
@@ -26,23 +28,28 @@ import { StructureGenerationModel, StructureGenerationModelSettings } from "./St
|
|
26
28
|
* "that did not disappear even after washing. Never again!"
|
27
29
|
* ),
|
28
30
|
* ]
|
29
|
-
* );
|
31
|
+
* });
|
30
32
|
*
|
31
33
|
* @param {StructureGenerationModel<PROMPT, SETTINGS>} model - The model to generate the structure.
|
32
34
|
* @param {Schema<STRUCTURE>} schema - The schema to be used.
|
33
35
|
* @param {PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT)} prompt
|
34
36
|
* The prompt to be used.
|
35
37
|
* You can also pass a function that takes the schema as an argument and returns the prompt.
|
36
|
-
* @param {FunctionOptions} [options] - Optional function options.
|
37
38
|
*
|
38
39
|
* @returns {Promise<STRUCTURE>} - Returns a promise that resolves to the generated structure.
|
39
40
|
*/
|
40
|
-
export declare function generateStructure<STRUCTURE, PROMPT, SETTINGS extends StructureGenerationModelSettings>(
|
41
|
+
export declare function generateStructure<STRUCTURE, PROMPT, SETTINGS extends StructureGenerationModelSettings>(args: {
|
42
|
+
model: StructureGenerationModel<PROMPT, SETTINGS>;
|
43
|
+
schema: Schema<STRUCTURE> & JsonSchemaProducer;
|
44
|
+
prompt: PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT);
|
41
45
|
fullResponse?: false;
|
42
|
-
}): Promise<STRUCTURE>;
|
43
|
-
export declare function generateStructure<STRUCTURE, PROMPT, SETTINGS extends StructureGenerationModelSettings>(
|
46
|
+
} & FunctionOptions): Promise<STRUCTURE>;
|
47
|
+
export declare function generateStructure<STRUCTURE, PROMPT, SETTINGS extends StructureGenerationModelSettings>(args: {
|
48
|
+
model: StructureGenerationModel<PROMPT, SETTINGS>;
|
49
|
+
schema: Schema<STRUCTURE> & JsonSchemaProducer;
|
50
|
+
prompt: PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT);
|
44
51
|
fullResponse: true;
|
45
|
-
}): Promise<{
|
52
|
+
} & FunctionOptions): Promise<{
|
46
53
|
structure: STRUCTURE;
|
47
54
|
rawResponse: unknown;
|
48
55
|
metadata: ModelCallMetadata;
|
@@ -1,11 +1,11 @@
|
|
1
1
|
import { executeStandardCall } from "../executeStandardCall.js";
|
2
2
|
import { StructureValidationError } from "./StructureValidationError.js";
|
3
|
-
export async function generateStructure(model, schema, prompt, options) {
|
3
|
+
export async function generateStructure({ model, schema, prompt, fullResponse, ...options }) {
|
4
4
|
// Note: PROMPT must not be a function.
|
5
5
|
const expandedPrompt = typeof prompt === "function"
|
6
6
|
? prompt(schema)
|
7
7
|
: prompt;
|
8
|
-
const
|
8
|
+
const callResponse = await executeStandardCall({
|
9
9
|
functionType: "generate-structure",
|
10
10
|
input: {
|
11
11
|
schema,
|
@@ -26,17 +26,17 @@ export async function generateStructure(model, schema, prompt, options) {
|
|
26
26
|
}
|
27
27
|
const value = parseResult.data;
|
28
28
|
return {
|
29
|
-
rawResponse: result.
|
29
|
+
rawResponse: result.rawResponse,
|
30
30
|
extractedValue: value,
|
31
31
|
usage: result.usage,
|
32
32
|
};
|
33
33
|
},
|
34
34
|
});
|
35
|
-
return
|
35
|
+
return fullResponse
|
36
36
|
? {
|
37
|
-
structure:
|
38
|
-
rawResponse:
|
39
|
-
metadata:
|
37
|
+
structure: callResponse.value,
|
38
|
+
rawResponse: callResponse.rawResponse,
|
39
|
+
metadata: callResponse.metadata,
|
40
40
|
}
|
41
|
-
:
|
41
|
+
: callResponse.value;
|
42
42
|
}
|
@@ -3,14 +3,14 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.streamStructure = void 0;
|
4
4
|
const isDeepEqualData_js_1 = require("../../util/isDeepEqualData.cjs");
|
5
5
|
const executeStreamCall_js_1 = require("../executeStreamCall.cjs");
|
6
|
-
async function streamStructure(model, schema, prompt, options) {
|
6
|
+
async function streamStructure({ model, schema, prompt, fullResponse, ...options }) {
|
7
7
|
// Note: PROMPT must not be a function.
|
8
8
|
const expandedPrompt = typeof prompt === "function"
|
9
9
|
? prompt(schema)
|
10
10
|
: prompt;
|
11
11
|
let accumulatedText = "";
|
12
12
|
let lastStructure;
|
13
|
-
const
|
13
|
+
const callResponse = await (0, executeStreamCall_js_1.executeStreamCall)({
|
14
14
|
functionType: "stream-structure",
|
15
15
|
input: {
|
16
16
|
schema,
|
@@ -49,11 +49,11 @@ async function streamStructure(model, schema, prompt, options) {
|
|
49
49
|
};
|
50
50
|
},
|
51
51
|
});
|
52
|
-
return
|
52
|
+
return fullResponse
|
53
53
|
? {
|
54
|
-
structureStream:
|
55
|
-
metadata:
|
54
|
+
structureStream: callResponse.value,
|
55
|
+
metadata: callResponse.metadata,
|
56
56
|
}
|
57
|
-
:
|
57
|
+
: callResponse.value;
|
58
58
|
}
|
59
59
|
exports.streamStructure = streamStructure;
|
@@ -25,9 +25,9 @@ export type StructureStreamPart<STRUCTURE> = {
|
|
25
25
|
* @see https://modelfusion.dev/guide/function/generate-structure
|
26
26
|
*
|
27
27
|
* @example
|
28
|
-
* const structureStream = await streamStructure(
|
29
|
-
* openai.ChatTextGenerator(...).asFunctionCallStructureGenerationModel(...),
|
30
|
-
* zodSchema(
|
28
|
+
* const structureStream = await streamStructure({
|
29
|
+
* structureGenerator: openai.ChatTextGenerator(...).asFunctionCallStructureGenerationModel(...),
|
30
|
+
* schema: zodSchema(
|
31
31
|
* z.array(
|
32
32
|
* z.object({
|
33
33
|
* name: z.string(),
|
@@ -37,12 +37,12 @@ export type StructureStreamPart<STRUCTURE> = {
|
|
37
37
|
* description: z.string(),
|
38
38
|
* })
|
39
39
|
* ),
|
40
|
-
* [
|
40
|
+
* prompt: [
|
41
41
|
* openai.ChatMessage.user(
|
42
42
|
* "Generate 3 character descriptions for a fantasy role playing game."
|
43
43
|
* ),
|
44
44
|
* ]
|
45
|
-
* );
|
45
|
+
* });
|
46
46
|
*
|
47
47
|
* for await (const part of structureStream) {
|
48
48
|
* if (!part.isComplete) {
|
@@ -55,7 +55,7 @@ export type StructureStreamPart<STRUCTURE> = {
|
|
55
55
|
* }
|
56
56
|
* }
|
57
57
|
*
|
58
|
-
* @param {StructureStreamingModel<PROMPT>}
|
58
|
+
* @param {StructureStreamingModel<PROMPT>} structureGenerator - The model to use for streaming
|
59
59
|
* @param {Schema<STRUCTURE>} schema - The schema to be used.
|
60
60
|
* @param {PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT)} prompt
|
61
61
|
* The prompt to be used.
|
@@ -68,12 +68,18 @@ export type StructureStreamPart<STRUCTURE> = {
|
|
68
68
|
* It contains a isComplete flag to indicate whether the structure is complete,
|
69
69
|
* and a value that is either the partial structure or the final structure.
|
70
70
|
*/
|
71
|
-
export declare function streamStructure<STRUCTURE, PROMPT>(
|
71
|
+
export declare function streamStructure<STRUCTURE, PROMPT>(args: {
|
72
|
+
model: StructureStreamingModel<PROMPT>;
|
73
|
+
schema: Schema<STRUCTURE> & JsonSchemaProducer;
|
74
|
+
prompt: PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT);
|
72
75
|
fullResponse?: false;
|
73
|
-
}): Promise<AsyncIterable<StructureStreamPart<STRUCTURE>>>;
|
74
|
-
export declare function streamStructure<STRUCTURE, PROMPT>(
|
76
|
+
} & FunctionOptions): Promise<AsyncIterable<StructureStreamPart<STRUCTURE>>>;
|
77
|
+
export declare function streamStructure<STRUCTURE, PROMPT>(args: {
|
78
|
+
model: StructureStreamingModel<PROMPT>;
|
79
|
+
schema: Schema<STRUCTURE> & JsonSchemaProducer;
|
80
|
+
prompt: PROMPT | ((schema: Schema<STRUCTURE>) => PROMPT);
|
75
81
|
fullResponse: true;
|
76
|
-
}): Promise<{
|
82
|
+
} & FunctionOptions): Promise<{
|
77
83
|
structureStream: AsyncIterable<StructureStreamPart<STRUCTURE>>;
|
78
84
|
metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
|
79
85
|
}>;
|
@@ -1,13 +1,13 @@
|
|
1
1
|
import { isDeepEqualData } from "../../util/isDeepEqualData.js";
|
2
2
|
import { executeStreamCall } from "../executeStreamCall.js";
|
3
|
-
export async function streamStructure(model, schema, prompt, options) {
|
3
|
+
export async function streamStructure({ model, schema, prompt, fullResponse, ...options }) {
|
4
4
|
// Note: PROMPT must not be a function.
|
5
5
|
const expandedPrompt = typeof prompt === "function"
|
6
6
|
? prompt(schema)
|
7
7
|
: prompt;
|
8
8
|
let accumulatedText = "";
|
9
9
|
let lastStructure;
|
10
|
-
const
|
10
|
+
const callResponse = await executeStreamCall({
|
11
11
|
functionType: "stream-structure",
|
12
12
|
input: {
|
13
13
|
schema,
|
@@ -46,10 +46,10 @@ export async function streamStructure(model, schema, prompt, options) {
|
|
46
46
|
};
|
47
47
|
},
|
48
48
|
});
|
49
|
-
return
|
49
|
+
return fullResponse
|
50
50
|
? {
|
51
|
-
structureStream:
|
52
|
-
metadata:
|
51
|
+
structureStream: callResponse.value,
|
52
|
+
metadata: callResponse.metadata,
|
53
53
|
}
|
54
|
-
:
|
54
|
+
: callResponse.value;
|
55
55
|
}
|