modelfusion 0.57.2 → 0.59.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -4
- package/core/api/retryWithExponentialBackoff.cjs +5 -4
- package/core/api/retryWithExponentialBackoff.js +5 -4
- package/model-function/embed/embed.cjs +8 -44
- package/model-function/embed/embed.d.ts +23 -5
- package/model-function/embed/embed.js +8 -44
- package/model-function/generate-image/generateImage.cjs +12 -28
- package/model-function/generate-image/generateImage.d.ts +16 -3
- package/model-function/generate-image/generateImage.js +12 -28
- package/model-function/generate-speech/generateSpeech.cjs +4 -22
- package/model-function/generate-speech/generateSpeech.d.ts +12 -3
- package/model-function/generate-speech/generateSpeech.js +4 -22
- package/model-function/generate-speech/streamSpeech.cjs +5 -31
- package/model-function/generate-speech/streamSpeech.d.ts +11 -3
- package/model-function/generate-speech/streamSpeech.js +5 -31
- package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +4 -1
- package/model-function/generate-structure/StructureFromTextGenerationModel.js +4 -1
- package/model-function/generate-structure/generateStructure.cjs +4 -45
- package/model-function/generate-structure/generateStructure.d.ts +12 -3
- package/model-function/generate-structure/generateStructure.js +4 -45
- package/model-function/generate-structure/generateStructureOrText.cjs +4 -66
- package/model-function/generate-structure/generateStructureOrText.d.ts +16 -3
- package/model-function/generate-structure/generateStructureOrText.js +4 -66
- package/model-function/generate-structure/streamStructure.cjs +4 -71
- package/model-function/generate-structure/streamStructure.d.ts +10 -2
- package/model-function/generate-structure/streamStructure.js +4 -71
- package/model-function/generate-text/generateText.cjs +5 -28
- package/model-function/generate-text/generateText.d.ts +12 -3
- package/model-function/generate-text/generateText.js +5 -28
- package/model-function/generate-text/streamText.cjs +4 -29
- package/model-function/generate-text/streamText.d.ts +10 -2
- package/model-function/generate-text/streamText.js +4 -29
- package/model-function/generate-transcription/generateTranscription.cjs +4 -23
- package/model-function/generate-transcription/generateTranscription.d.ts +12 -3
- package/model-function/generate-transcription/generateTranscription.js +4 -23
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.cjs +1 -0
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.d.ts +1 -1
- package/model-provider/elevenlabs/ElevenLabsSpeechModel.js +1 -0
- package/model-provider/ollama/OllamaTextGenerationModel.cjs +18 -2
- package/model-provider/ollama/OllamaTextGenerationModel.js +18 -2
- package/package.json +1 -1
- package/tool/NoSuchToolError.cjs +8 -0
- package/tool/NoSuchToolError.d.ts +6 -0
- package/tool/NoSuchToolError.js +8 -0
- package/tool/ToolExecutionError.cjs +10 -0
- package/tool/ToolExecutionError.d.ts +8 -0
- package/tool/ToolExecutionError.js +10 -0
- package/tool/executeTool.cjs +5 -41
- package/tool/executeTool.d.ts +11 -16
- package/tool/executeTool.js +4 -39
- package/tool/useTool.cjs +4 -1
- package/tool/useTool.js +4 -1
- package/tool/useToolOrGenerateText.cjs +4 -1
- package/tool/useToolOrGenerateText.js +4 -1
- package/util/JSONParseError.cjs +1 -0
- package/util/JSONParseError.d.ts +1 -0
- package/util/JSONParseError.js +1 -0
- package/util/runSafe.test.cjs +10 -1
- package/util/runSafe.test.js +10 -1
- package/model-function/AsyncIterableResultPromise.cjs +0 -37
- package/model-function/AsyncIterableResultPromise.d.ts +0 -16
- package/model-function/AsyncIterableResultPromise.js +0 -33
- package/model-function/ModelFunctionPromise.cjs +0 -37
- package/model-function/ModelFunctionPromise.d.ts +0 -18
- package/model-function/ModelFunctionPromise.js +0 -33
- package/model-function/generate-image/ImageGenerationPromise.cjs +0 -50
- package/model-function/generate-image/ImageGenerationPromise.d.ts +0 -22
- package/model-function/generate-image/ImageGenerationPromise.js +0 -46
@@ -1,5 +1,5 @@
|
|
1
1
|
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
|
-
import {
|
2
|
+
import { ModelCallMetadata } from "../ModelCallMetadata.js";
|
3
3
|
import { TextGenerationModel, TextGenerationModelSettings } from "./TextGenerationModel.js";
|
4
4
|
/**
|
5
5
|
* Generate text for a prompt and return it as a string.
|
@@ -20,6 +20,15 @@ import { TextGenerationModel, TextGenerationModelSettings } from "./TextGenerati
|
|
20
20
|
* @param {PROMPT} prompt - The prompt to use for text generation.
|
21
21
|
* @param {FunctionOptions} [options] - Optional parameters for the function.
|
22
22
|
*
|
23
|
-
* @returns {
|
23
|
+
* @returns {Promise<string>} - A promise that resolves to the generated text.
|
24
24
|
*/
|
25
|
-
export declare function generateText<PROMPT>(model: TextGenerationModel<PROMPT, TextGenerationModelSettings>, prompt: PROMPT, options?: FunctionOptions
|
25
|
+
export declare function generateText<PROMPT>(model: TextGenerationModel<PROMPT, TextGenerationModelSettings>, prompt: PROMPT, options?: FunctionOptions & {
|
26
|
+
returnType?: "text";
|
27
|
+
}): Promise<string>;
|
28
|
+
export declare function generateText<PROMPT>(model: TextGenerationModel<PROMPT, TextGenerationModelSettings>, prompt: PROMPT, options: FunctionOptions & {
|
29
|
+
returnType: "full";
|
30
|
+
}): Promise<{
|
31
|
+
value: string;
|
32
|
+
response: unknown;
|
33
|
+
metadata: ModelCallMetadata;
|
34
|
+
}>;
|
@@ -1,28 +1,6 @@
|
|
1
1
|
import { executeStandardCall } from "../executeStandardCall.js";
|
2
|
-
|
3
|
-
|
4
|
-
* Generate text for a prompt and return it as a string.
|
5
|
-
*
|
6
|
-
* The prompt depends on the model used.
|
7
|
-
* For instance, OpenAI completion models expect a string prompt,
|
8
|
-
* whereas OpenAI chat models expect an array of chat messages.
|
9
|
-
*
|
10
|
-
* @see https://modelfusion.dev/guide/function/generate-text
|
11
|
-
*
|
12
|
-
* @example
|
13
|
-
* const text = await generateText(
|
14
|
-
* new OpenAICompletionModel(...),
|
15
|
-
* "Write a short story about a robot learning to love:\n\n"
|
16
|
-
* );
|
17
|
-
*
|
18
|
-
* @param {TextGenerationModel<PROMPT, TextGenerationModelSettings>} model - The text generation model to use.
|
19
|
-
* @param {PROMPT} prompt - The prompt to use for text generation.
|
20
|
-
* @param {FunctionOptions} [options] - Optional parameters for the function.
|
21
|
-
*
|
22
|
-
* @returns {ModelFunctionPromise<string>} - A promise that resolves to the generated text.
|
23
|
-
*/
|
24
|
-
export function generateText(model, prompt, options) {
|
25
|
-
return new ModelFunctionPromise(executeStandardCall({
|
2
|
+
export async function generateText(model, prompt, options) {
|
3
|
+
const fullResponse = await executeStandardCall({
|
26
4
|
functionType: "generate-text",
|
27
5
|
input: prompt,
|
28
6
|
model,
|
@@ -32,11 +10,10 @@ export function generateText(model, prompt, options) {
|
|
32
10
|
const shouldTrimWhitespace = model.settings.trimWhitespace ?? true;
|
33
11
|
return {
|
34
12
|
response: result.response,
|
35
|
-
extractedValue: shouldTrimWhitespace
|
36
|
-
? result.text.trim()
|
37
|
-
: result.text,
|
13
|
+
extractedValue: shouldTrimWhitespace ? result.text.trim() : result.text,
|
38
14
|
usage: result.usage,
|
39
15
|
};
|
40
16
|
},
|
41
|
-
})
|
17
|
+
});
|
18
|
+
return options?.returnType === "full" ? fullResponse : fullResponse.value;
|
42
19
|
}
|
@@ -1,37 +1,11 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.streamText = void 0;
|
4
|
-
const AsyncIterableResultPromise_js_1 = require("../AsyncIterableResultPromise.cjs");
|
5
4
|
const executeStreamCall_js_1 = require("../executeStreamCall.cjs");
|
6
|
-
|
7
|
-
* Stream the generated text for a prompt as an async iterable.
|
8
|
-
*
|
9
|
-
* The prompt depends on the model used.
|
10
|
-
* For instance, OpenAI completion models expect a string prompt,
|
11
|
-
* whereas OpenAI chat models expect an array of chat messages.
|
12
|
-
*
|
13
|
-
* @see https://modelfusion.dev/guide/function/generate-text
|
14
|
-
*
|
15
|
-
* @example
|
16
|
-
* const textStream = await streamText(
|
17
|
-
* new OpenAICompletionModel(...),
|
18
|
-
* "Write a short story about a robot learning to love:\n\n"
|
19
|
-
* );
|
20
|
-
*
|
21
|
-
* for await (const textPart of textStream) {
|
22
|
-
* // ...
|
23
|
-
* }
|
24
|
-
*
|
25
|
-
* @param {TextStreamingModel<PROMPT>} model - The model to stream text from.
|
26
|
-
* @param {PROMPT} prompt - The prompt to use for text generation.
|
27
|
-
* @param {FunctionOptions} [options] - Optional parameters for the function.
|
28
|
-
*
|
29
|
-
* @returns {AsyncIterableResultPromise<string>} An async iterable promise that yields the generated text.
|
30
|
-
*/
|
31
|
-
function streamText(model, prompt, options) {
|
5
|
+
async function streamText(model, prompt, options) {
|
32
6
|
let accumulatedText = "";
|
33
7
|
let lastFullDelta;
|
34
|
-
|
8
|
+
const fullResponse = await (0, executeStreamCall_js_1.executeStreamCall)({
|
35
9
|
functionType: "stream-text",
|
36
10
|
input: prompt,
|
37
11
|
model,
|
@@ -50,6 +24,7 @@ function streamText(model, prompt, options) {
|
|
50
24
|
response: lastFullDelta,
|
51
25
|
value: accumulatedText,
|
52
26
|
}),
|
53
|
-
})
|
27
|
+
});
|
28
|
+
return options?.returnType === "full" ? fullResponse : fullResponse.value;
|
54
29
|
}
|
55
30
|
exports.streamText = streamText;
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
|
-
import {
|
2
|
+
import { ModelCallMetadata } from "../ModelCallMetadata.js";
|
3
3
|
import { TextStreamingModel } from "./TextGenerationModel.js";
|
4
4
|
/**
|
5
5
|
* Stream the generated text for a prompt as an async iterable.
|
@@ -26,4 +26,12 @@ import { TextStreamingModel } from "./TextGenerationModel.js";
|
|
26
26
|
*
|
27
27
|
* @returns {AsyncIterableResultPromise<string>} An async iterable promise that yields the generated text.
|
28
28
|
*/
|
29
|
-
export declare function streamText<PROMPT>(model: TextStreamingModel<PROMPT>, prompt: PROMPT, options?: FunctionOptions
|
29
|
+
export declare function streamText<PROMPT>(model: TextStreamingModel<PROMPT>, prompt: PROMPT, options?: FunctionOptions & {
|
30
|
+
returnType?: "text-stream";
|
31
|
+
}): Promise<AsyncIterable<string>>;
|
32
|
+
export declare function streamText<PROMPT>(model: TextStreamingModel<PROMPT>, prompt: PROMPT, options: FunctionOptions & {
|
33
|
+
returnType: "full";
|
34
|
+
}): Promise<{
|
35
|
+
value: AsyncIterable<string>;
|
36
|
+
metadata: Omit<ModelCallMetadata, "durationInMs" | "finishTimestamp">;
|
37
|
+
}>;
|
@@ -1,34 +1,8 @@
|
|
1
|
-
import { AsyncIterableResultPromise } from "../AsyncIterableResultPromise.js";
|
2
1
|
import { executeStreamCall } from "../executeStreamCall.js";
|
3
|
-
|
4
|
-
* Stream the generated text for a prompt as an async iterable.
|
5
|
-
*
|
6
|
-
* The prompt depends on the model used.
|
7
|
-
* For instance, OpenAI completion models expect a string prompt,
|
8
|
-
* whereas OpenAI chat models expect an array of chat messages.
|
9
|
-
*
|
10
|
-
* @see https://modelfusion.dev/guide/function/generate-text
|
11
|
-
*
|
12
|
-
* @example
|
13
|
-
* const textStream = await streamText(
|
14
|
-
* new OpenAICompletionModel(...),
|
15
|
-
* "Write a short story about a robot learning to love:\n\n"
|
16
|
-
* );
|
17
|
-
*
|
18
|
-
* for await (const textPart of textStream) {
|
19
|
-
* // ...
|
20
|
-
* }
|
21
|
-
*
|
22
|
-
* @param {TextStreamingModel<PROMPT>} model - The model to stream text from.
|
23
|
-
* @param {PROMPT} prompt - The prompt to use for text generation.
|
24
|
-
* @param {FunctionOptions} [options] - Optional parameters for the function.
|
25
|
-
*
|
26
|
-
* @returns {AsyncIterableResultPromise<string>} An async iterable promise that yields the generated text.
|
27
|
-
*/
|
28
|
-
export function streamText(model, prompt, options) {
|
2
|
+
export async function streamText(model, prompt, options) {
|
29
3
|
let accumulatedText = "";
|
30
4
|
let lastFullDelta;
|
31
|
-
|
5
|
+
const fullResponse = await executeStreamCall({
|
32
6
|
functionType: "stream-text",
|
33
7
|
input: prompt,
|
34
8
|
model,
|
@@ -47,5 +21,6 @@ export function streamText(model, prompt, options) {
|
|
47
21
|
response: lastFullDelta,
|
48
22
|
value: accumulatedText,
|
49
23
|
}),
|
50
|
-
})
|
24
|
+
});
|
25
|
+
return options?.returnType === "full" ? fullResponse : fullResponse.value;
|
51
26
|
}
|
@@ -2,28 +2,8 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.generateTranscription = void 0;
|
4
4
|
const executeStandardCall_js_1 = require("../executeStandardCall.cjs");
|
5
|
-
|
6
|
-
|
7
|
-
* Transcribe audio data into text. Also called speech-to-text (STT) or automatic speech recognition (ASR).
|
8
|
-
*
|
9
|
-
* @see https://modelfusion.dev/guide/function/generate-transcription
|
10
|
-
*
|
11
|
-
* @example
|
12
|
-
* const data = await fs.promises.readFile("data/test.mp3");
|
13
|
-
*
|
14
|
-
* const transcription = await generateTranscription(
|
15
|
-
* new OpenAITranscriptionModel({ model: "whisper-1" }),
|
16
|
-
* { type: "mp3", data }
|
17
|
-
* );
|
18
|
-
*
|
19
|
-
* @param {TranscriptionModel<DATA, TranscriptionModelSettings>} model - The model to use for transcription.
|
20
|
-
* @param {DATA} data - The data to transcribe.
|
21
|
-
* @param {FunctionOptions} [options] - Optional parameters for the function.
|
22
|
-
*
|
23
|
-
* @returns {ModelFunctionPromise<string>} A promise that resolves to the transcribed text.
|
24
|
-
*/
|
25
|
-
function generateTranscription(model, data, options) {
|
26
|
-
return new ModelFunctionPromise_js_1.ModelFunctionPromise((0, executeStandardCall_js_1.executeStandardCall)({
|
5
|
+
async function generateTranscription(model, data, options) {
|
6
|
+
const fullResponse = await (0, executeStandardCall_js_1.executeStandardCall)({
|
27
7
|
functionType: "generate-transcription",
|
28
8
|
input: data,
|
29
9
|
model,
|
@@ -35,6 +15,7 @@ function generateTranscription(model, data, options) {
|
|
35
15
|
extractedValue: result.transcription,
|
36
16
|
};
|
37
17
|
},
|
38
|
-
})
|
18
|
+
});
|
19
|
+
return options?.returnType === "full" ? fullResponse : fullResponse.value;
|
39
20
|
}
|
40
21
|
exports.generateTranscription = generateTranscription;
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
|
-
import {
|
2
|
+
import { ModelCallMetadata } from "../ModelCallMetadata.js";
|
3
3
|
import { TranscriptionModel, TranscriptionModelSettings } from "./TranscriptionModel.js";
|
4
4
|
/**
|
5
5
|
* Transcribe audio data into text. Also called speech-to-text (STT) or automatic speech recognition (ASR).
|
@@ -18,6 +18,15 @@ import { TranscriptionModel, TranscriptionModelSettings } from "./TranscriptionM
|
|
18
18
|
* @param {DATA} data - The data to transcribe.
|
19
19
|
* @param {FunctionOptions} [options] - Optional parameters for the function.
|
20
20
|
*
|
21
|
-
* @returns {
|
21
|
+
* @returns {Promise<string>} A promise that resolves to the transcribed text.
|
22
22
|
*/
|
23
|
-
export declare function generateTranscription<DATA>(model: TranscriptionModel<DATA, TranscriptionModelSettings>, data: DATA, options?: FunctionOptions
|
23
|
+
export declare function generateTranscription<DATA>(model: TranscriptionModel<DATA, TranscriptionModelSettings>, data: DATA, options?: FunctionOptions & {
|
24
|
+
returnType?: "text";
|
25
|
+
}): Promise<string>;
|
26
|
+
export declare function generateTranscription<DATA>(model: TranscriptionModel<DATA, TranscriptionModelSettings>, data: DATA, options: FunctionOptions & {
|
27
|
+
returnType: "full";
|
28
|
+
}): Promise<{
|
29
|
+
value: string;
|
30
|
+
response: unknown;
|
31
|
+
metadata: ModelCallMetadata;
|
32
|
+
}>;
|
@@ -1,26 +1,6 @@
|
|
1
1
|
import { executeStandardCall } from "../executeStandardCall.js";
|
2
|
-
|
3
|
-
|
4
|
-
* Transcribe audio data into text. Also called speech-to-text (STT) or automatic speech recognition (ASR).
|
5
|
-
*
|
6
|
-
* @see https://modelfusion.dev/guide/function/generate-transcription
|
7
|
-
*
|
8
|
-
* @example
|
9
|
-
* const data = await fs.promises.readFile("data/test.mp3");
|
10
|
-
*
|
11
|
-
* const transcription = await generateTranscription(
|
12
|
-
* new OpenAITranscriptionModel({ model: "whisper-1" }),
|
13
|
-
* { type: "mp3", data }
|
14
|
-
* );
|
15
|
-
*
|
16
|
-
* @param {TranscriptionModel<DATA, TranscriptionModelSettings>} model - The model to use for transcription.
|
17
|
-
* @param {DATA} data - The data to transcribe.
|
18
|
-
* @param {FunctionOptions} [options] - Optional parameters for the function.
|
19
|
-
*
|
20
|
-
* @returns {ModelFunctionPromise<string>} A promise that resolves to the transcribed text.
|
21
|
-
*/
|
22
|
-
export function generateTranscription(model, data, options) {
|
23
|
-
return new ModelFunctionPromise(executeStandardCall({
|
2
|
+
export async function generateTranscription(model, data, options) {
|
3
|
+
const fullResponse = await executeStandardCall({
|
24
4
|
functionType: "generate-transcription",
|
25
5
|
input: data,
|
26
6
|
model,
|
@@ -32,5 +12,6 @@ export function generateTranscription(model, data, options) {
|
|
32
12
|
extractedValue: result.transcription,
|
33
13
|
};
|
34
14
|
},
|
35
|
-
})
|
15
|
+
});
|
16
|
+
return options?.returnType === "full" ? fullResponse : fullResponse.value;
|
36
17
|
}
|
@@ -4,7 +4,7 @@ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
|
4
4
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
5
|
import { Delta } from "../../model-function/Delta.js";
|
6
6
|
import { StreamingSpeechGenerationModel, SpeechGenerationModelSettings } from "../../model-function/generate-speech/SpeechGenerationModel.js";
|
7
|
-
declare const elevenLabsModels: readonly ["eleven_multilingual_v2", "eleven_multilingual_v1", "eleven_monolingual_v1"];
|
7
|
+
declare const elevenLabsModels: readonly ["eleven_multilingual_v2", "eleven_multilingual_v1", "eleven_monolingual_v1", "eleven_turbo_v2"];
|
8
8
|
export interface ElevenLabsSpeechModelSettings extends SpeechGenerationModelSettings {
|
9
9
|
api?: ApiConfiguration & {
|
10
10
|
apiKey: string;
|
@@ -53,9 +53,25 @@ class OllamaTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
53
53
|
}
|
54
54
|
get settingsForEvent() {
|
55
55
|
const eventSettingProperties = [
|
56
|
-
|
57
|
-
|
56
|
+
"maxCompletionTokens",
|
57
|
+
"stopSequences",
|
58
58
|
"contextWindowSize",
|
59
|
+
"temperature",
|
60
|
+
"mirostat",
|
61
|
+
"mirostat_eta",
|
62
|
+
"mirostat_tau",
|
63
|
+
"num_gqa",
|
64
|
+
"num_gpu",
|
65
|
+
"num_threads",
|
66
|
+
"repeat_last_n",
|
67
|
+
"repeat_penalty",
|
68
|
+
"seed",
|
69
|
+
"tfs_z",
|
70
|
+
"top_k",
|
71
|
+
"top_p",
|
72
|
+
"system",
|
73
|
+
"template",
|
74
|
+
"context",
|
59
75
|
];
|
60
76
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
61
77
|
}
|
@@ -50,9 +50,25 @@ export class OllamaTextGenerationModel extends AbstractModel {
|
|
50
50
|
}
|
51
51
|
get settingsForEvent() {
|
52
52
|
const eventSettingProperties = [
|
53
|
-
|
54
|
-
|
53
|
+
"maxCompletionTokens",
|
54
|
+
"stopSequences",
|
55
55
|
"contextWindowSize",
|
56
|
+
"temperature",
|
57
|
+
"mirostat",
|
58
|
+
"mirostat_eta",
|
59
|
+
"mirostat_tau",
|
60
|
+
"num_gqa",
|
61
|
+
"num_gpu",
|
62
|
+
"num_threads",
|
63
|
+
"repeat_last_n",
|
64
|
+
"repeat_penalty",
|
65
|
+
"seed",
|
66
|
+
"tfs_z",
|
67
|
+
"top_k",
|
68
|
+
"top_p",
|
69
|
+
"system",
|
70
|
+
"template",
|
71
|
+
"context",
|
56
72
|
];
|
57
73
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
58
74
|
}
|
package/package.json
CHANGED
package/tool/NoSuchToolError.cjs
CHANGED
@@ -13,5 +13,13 @@ class NoSuchToolError extends Error {
|
|
13
13
|
this.name = "NoSuchToolError";
|
14
14
|
this.toolName = toolName;
|
15
15
|
}
|
16
|
+
toJSON() {
|
17
|
+
return {
|
18
|
+
name: this.name,
|
19
|
+
toolName: this.toolName,
|
20
|
+
message: this.message,
|
21
|
+
stack: this.stack,
|
22
|
+
};
|
23
|
+
}
|
16
24
|
}
|
17
25
|
exports.NoSuchToolError = NoSuchToolError;
|
package/tool/NoSuchToolError.js
CHANGED
@@ -27,5 +27,15 @@ class ToolExecutionError extends Error {
|
|
27
27
|
this.input = input;
|
28
28
|
this.cause = cause;
|
29
29
|
}
|
30
|
+
toJSON() {
|
31
|
+
return {
|
32
|
+
name: this.name,
|
33
|
+
toolName: this.toolName,
|
34
|
+
input: this.input,
|
35
|
+
cause: this.cause,
|
36
|
+
message: this.message,
|
37
|
+
stack: this.stack,
|
38
|
+
};
|
39
|
+
}
|
30
40
|
}
|
31
41
|
exports.ToolExecutionError = ToolExecutionError;
|
@@ -8,4 +8,12 @@ export declare class ToolExecutionError extends Error {
|
|
8
8
|
message: string | undefined;
|
9
9
|
cause: unknown | undefined;
|
10
10
|
});
|
11
|
+
toJSON(): {
|
12
|
+
name: string;
|
13
|
+
toolName: string;
|
14
|
+
input: unknown;
|
15
|
+
cause: unknown;
|
16
|
+
message: string;
|
17
|
+
stack: string | undefined;
|
18
|
+
};
|
11
19
|
}
|
@@ -24,4 +24,14 @@ export class ToolExecutionError extends Error {
|
|
24
24
|
this.input = input;
|
25
25
|
this.cause = cause;
|
26
26
|
}
|
27
|
+
toJSON() {
|
28
|
+
return {
|
29
|
+
name: this.name,
|
30
|
+
toolName: this.toolName,
|
31
|
+
input: this.input,
|
32
|
+
cause: this.cause,
|
33
|
+
message: this.message,
|
34
|
+
stack: this.stack,
|
35
|
+
};
|
36
|
+
}
|
27
37
|
}
|
package/tool/executeTool.cjs
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.executeTool =
|
3
|
+
exports.executeTool = void 0;
|
4
4
|
const nanoid_1 = require("nanoid");
|
5
5
|
const FunctionEventSource_js_1 = require("../core/FunctionEventSource.cjs");
|
6
6
|
const GlobalFunctionLogging_js_1 = require("../core/GlobalFunctionLogging.cjs");
|
@@ -11,46 +11,10 @@ const getRun_js_1 = require("../core/getRun.cjs");
|
|
11
11
|
const DurationMeasurement_js_1 = require("../util/DurationMeasurement.cjs");
|
12
12
|
const runSafe_js_1 = require("../util/runSafe.cjs");
|
13
13
|
const ToolExecutionError_js_1 = require("./ToolExecutionError.cjs");
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
resolve(null); // we override the resolve function
|
19
|
-
});
|
20
|
-
Object.defineProperty(this, "fullPromise", {
|
21
|
-
enumerable: true,
|
22
|
-
configurable: true,
|
23
|
-
writable: true,
|
24
|
-
value: fullPromise
|
25
|
-
});
|
26
|
-
Object.defineProperty(this, "outputPromise", {
|
27
|
-
enumerable: true,
|
28
|
-
configurable: true,
|
29
|
-
writable: true,
|
30
|
-
value: void 0
|
31
|
-
});
|
32
|
-
this.outputPromise = fullPromise.then((result) => result.output);
|
33
|
-
}
|
34
|
-
asFullResponse() {
|
35
|
-
return this.fullPromise;
|
36
|
-
}
|
37
|
-
then(onfulfilled, onrejected) {
|
38
|
-
return this.outputPromise.then(onfulfilled, onrejected);
|
39
|
-
}
|
40
|
-
catch(onrejected) {
|
41
|
-
return this.outputPromise.catch(onrejected);
|
42
|
-
}
|
43
|
-
finally(onfinally) {
|
44
|
-
return this.outputPromise.finally(onfinally);
|
45
|
-
}
|
46
|
-
}
|
47
|
-
exports.ExecuteToolPromise = ExecuteToolPromise;
|
48
|
-
/**
|
49
|
-
* `executeTool` directly executes a tool with the given parameters.
|
50
|
-
*/
|
51
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
52
|
-
function executeTool(tool, input, options) {
|
53
|
-
return new ExecuteToolPromise(doExecuteTool(tool, input, options));
|
14
|
+
async function executeTool(// eslint-disable-line @typescript-eslint/no-explicit-any
|
15
|
+
tool, input, options) {
|
16
|
+
const fullResponse = await doExecuteTool(tool, input, options);
|
17
|
+
return options?.returnType === "full" ? fullResponse : fullResponse.output;
|
54
18
|
}
|
55
19
|
exports.executeTool = executeTool;
|
56
20
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
package/tool/executeTool.d.ts
CHANGED
@@ -10,22 +10,17 @@ export type ExecuteToolMetadata = {
|
|
10
10
|
finishTimestamp: Date;
|
11
11
|
durationInMs: number;
|
12
12
|
};
|
13
|
-
export declare class ExecuteToolPromise<OUTPUT> extends Promise<OUTPUT> {
|
14
|
-
private fullPromise;
|
15
|
-
private outputPromise;
|
16
|
-
constructor(fullPromise: Promise<{
|
17
|
-
output: OUTPUT;
|
18
|
-
metadata: ExecuteToolMetadata;
|
19
|
-
}>);
|
20
|
-
asFullResponse(): Promise<{
|
21
|
-
output: OUTPUT;
|
22
|
-
metadata: ExecuteToolMetadata;
|
23
|
-
}>;
|
24
|
-
then<TResult1 = OUTPUT, TResult2 = never>(onfulfilled?: ((value: OUTPUT) => TResult1 | PromiseLike<TResult1>) | undefined | null, onrejected?: ((reason: unknown) => TResult2 | PromiseLike<TResult2>) | undefined | null): Promise<TResult1 | TResult2>;
|
25
|
-
catch<TResult = never>(onrejected?: ((reason: unknown) => TResult | PromiseLike<TResult>) | undefined | null): Promise<OUTPUT | TResult>;
|
26
|
-
finally(onfinally?: (() => void) | undefined | null): Promise<OUTPUT>;
|
27
|
-
}
|
28
13
|
/**
|
29
14
|
* `executeTool` directly executes a tool with the given parameters.
|
30
15
|
*/
|
31
|
-
export declare function executeTool<TOOL extends Tool<any, any, any>>(
|
16
|
+
export declare function executeTool<TOOL extends Tool<any, any, any>>(// eslint-disable-line @typescript-eslint/no-explicit-any
|
17
|
+
tool: TOOL, input: TOOL["inputSchema"]["_type"], options?: FunctionOptions & {
|
18
|
+
returnType?: "output";
|
19
|
+
}): Promise<ReturnType<TOOL["execute"]>>;
|
20
|
+
export declare function executeTool<TOOL extends Tool<any, any, any>>(// eslint-disable-line @typescript-eslint/no-explicit-any
|
21
|
+
tool: TOOL, input: TOOL["inputSchema"]["_type"], options: FunctionOptions & {
|
22
|
+
returnType: "full";
|
23
|
+
}): Promise<{
|
24
|
+
output: ReturnType<TOOL["execute"]>;
|
25
|
+
metadata: ExecuteToolMetadata;
|
26
|
+
}>;
|
package/tool/executeTool.js
CHANGED
@@ -8,45 +8,10 @@ import { getRun } from "../core/getRun.js";
|
|
8
8
|
import { startDurationMeasurement } from "../util/DurationMeasurement.js";
|
9
9
|
import { runSafe } from "../util/runSafe.js";
|
10
10
|
import { ToolExecutionError } from "./ToolExecutionError.js";
|
11
|
-
export
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
resolve(null); // we override the resolve function
|
16
|
-
});
|
17
|
-
Object.defineProperty(this, "fullPromise", {
|
18
|
-
enumerable: true,
|
19
|
-
configurable: true,
|
20
|
-
writable: true,
|
21
|
-
value: fullPromise
|
22
|
-
});
|
23
|
-
Object.defineProperty(this, "outputPromise", {
|
24
|
-
enumerable: true,
|
25
|
-
configurable: true,
|
26
|
-
writable: true,
|
27
|
-
value: void 0
|
28
|
-
});
|
29
|
-
this.outputPromise = fullPromise.then((result) => result.output);
|
30
|
-
}
|
31
|
-
asFullResponse() {
|
32
|
-
return this.fullPromise;
|
33
|
-
}
|
34
|
-
then(onfulfilled, onrejected) {
|
35
|
-
return this.outputPromise.then(onfulfilled, onrejected);
|
36
|
-
}
|
37
|
-
catch(onrejected) {
|
38
|
-
return this.outputPromise.catch(onrejected);
|
39
|
-
}
|
40
|
-
finally(onfinally) {
|
41
|
-
return this.outputPromise.finally(onfinally);
|
42
|
-
}
|
43
|
-
}
|
44
|
-
/**
|
45
|
-
* `executeTool` directly executes a tool with the given parameters.
|
46
|
-
*/
|
47
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
48
|
-
export function executeTool(tool, input, options) {
|
49
|
-
return new ExecuteToolPromise(doExecuteTool(tool, input, options));
|
11
|
+
export async function executeTool(// eslint-disable-line @typescript-eslint/no-explicit-any
|
12
|
+
tool, input, options) {
|
13
|
+
const fullResponse = await doExecuteTool(tool, input, options);
|
14
|
+
return options?.returnType === "full" ? fullResponse : fullResponse.output;
|
50
15
|
}
|
51
16
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
52
17
|
async function doExecuteTool(tool, input, options) {
|
package/tool/useTool.cjs
CHANGED
@@ -25,7 +25,10 @@ async function useTool(model, tool, prompt, options) {
|
|
25
25
|
name: tool.name,
|
26
26
|
description: tool.description,
|
27
27
|
schema: tool.inputSchema,
|
28
|
-
}, expandedPrompt,
|
28
|
+
}, expandedPrompt, {
|
29
|
+
...options,
|
30
|
+
returnType: "full",
|
31
|
+
});
|
29
32
|
return {
|
30
33
|
tool: tool.name,
|
31
34
|
parameters: value,
|
package/tool/useTool.js
CHANGED
@@ -22,7 +22,10 @@ export async function useTool(model, tool, prompt, options) {
|
|
22
22
|
name: tool.name,
|
23
23
|
description: tool.description,
|
24
24
|
schema: tool.inputSchema,
|
25
|
-
}, expandedPrompt,
|
25
|
+
}, expandedPrompt, {
|
26
|
+
...options,
|
27
|
+
returnType: "full",
|
28
|
+
});
|
26
29
|
return {
|
27
30
|
tool: tool.name,
|
28
31
|
parameters: value,
|
@@ -19,7 +19,10 @@ async function useToolOrGenerateText(model, tools, prompt, options) {
|
|
19
19
|
name: tool.name,
|
20
20
|
description: tool.description,
|
21
21
|
schema: tool.inputSchema,
|
22
|
-
})), expandedPrompt,
|
22
|
+
})), expandedPrompt, {
|
23
|
+
...options,
|
24
|
+
returnType: "structure",
|
25
|
+
});
|
23
26
|
const { structure, text } = modelResponse;
|
24
27
|
if (structure == null) {
|
25
28
|
return {
|