modelfusion 0.97.0 → 0.98.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +11 -4
- package/model-function/embed/embed.cjs +14 -2
- package/model-function/embed/embed.d.ts +6 -6
- package/model-function/embed/embed.js +14 -2
- package/model-function/generate-image/generateImage.cjs +10 -9
- package/model-function/generate-image/generateImage.d.ts +4 -6
- package/model-function/generate-image/generateImage.js +10 -9
- package/model-function/generate-speech/generateSpeech.cjs +7 -1
- package/model-function/generate-speech/generateSpeech.d.ts +3 -3
- package/model-function/generate-speech/generateSpeech.js +7 -1
- package/model-function/generate-speech/streamSpeech.cjs +6 -1
- package/model-function/generate-speech/streamSpeech.d.ts +3 -3
- package/model-function/generate-speech/streamSpeech.js +6 -1
- package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +5 -5
- package/model-function/generate-structure/StructureFromTextGenerationModel.js +5 -5
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +5 -5
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +5 -5
- package/model-function/generate-structure/generateStructure.cjs +7 -1
- package/model-function/generate-structure/generateStructure.d.ts +3 -3
- package/model-function/generate-structure/generateStructure.js +7 -1
- package/model-function/generate-structure/streamStructure.cjs +6 -1
- package/model-function/generate-structure/streamStructure.d.ts +3 -3
- package/model-function/generate-structure/streamStructure.js +6 -1
- package/model-function/generate-text/generateText.cjs +7 -1
- package/model-function/generate-text/generateText.d.ts +3 -3
- package/model-function/generate-text/generateText.js +7 -1
- package/model-function/generate-text/streamText.cjs +6 -1
- package/model-function/generate-text/streamText.d.ts +3 -3
- package/model-function/generate-text/streamText.js +6 -1
- package/model-function/generate-transcription/generateTranscription.cjs +1 -1
- package/model-function/generate-transcription/generateTranscription.d.ts +2 -2
- package/model-function/generate-transcription/generateTranscription.js +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
- package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +1 -1
- package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +49 -0
- package/model-provider/openai/chat/AbstractOpenAIChatModel.js +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.test.cjs +61 -0
- package/model-provider/openai/chat/OpenAIChatModel.test.d.ts +1 -0
- package/model-provider/openai/chat/OpenAIChatModel.test.js +59 -0
- package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +8 -3
- package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +1 -1
- package/model-provider/openai/chat/OpenAIChatStreamIterable.js +8 -3
- package/package.json +1 -1
- package/tool/execute-tool/executeTool.cjs +1 -1
- package/tool/execute-tool/executeTool.d.ts +2 -2
- package/tool/execute-tool/executeTool.js +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +4 -4
- package/tool/generate-tool-call/TextGenerationToolCallModel.js +4 -4
- package/tool/generate-tool-call/generateToolCall.cjs +7 -1
- package/tool/generate-tool-call/generateToolCall.d.ts +3 -3
- package/tool/generate-tool-call/generateToolCall.js +7 -1
- package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.cjs +4 -4
- package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js +4 -4
- package/tool/generate-tool-calls-or-text/generateToolCallsOrText.cjs +1 -1
- package/tool/generate-tool-calls-or-text/generateToolCallsOrText.d.ts +2 -2
- package/tool/generate-tool-calls-or-text/generateToolCallsOrText.js +1 -1
- package/tool/use-tools-or-generate-text/useToolsOrGenerateText.cjs +1 -1
- package/tool/use-tools-or-generate-text/useToolsOrGenerateText.js +1 -1
@@ -74,6 +74,7 @@ export declare class CohereTextEmbeddingModel extends AbstractModel<CohereTextEm
|
|
74
74
|
get settingsForEvent(): Partial<CohereTextEmbeddingModelSettings>;
|
75
75
|
doEmbedValues(texts: string[], options?: FunctionOptions): Promise<{
|
76
76
|
response: {
|
77
|
+
embeddings: number[][];
|
77
78
|
id: string;
|
78
79
|
meta: {
|
79
80
|
api_version: {
|
@@ -81,7 +82,6 @@ export declare class CohereTextEmbeddingModel extends AbstractModel<CohereTextEm
|
|
81
82
|
};
|
82
83
|
};
|
83
84
|
texts: string[];
|
84
|
-
embeddings: number[][];
|
85
85
|
};
|
86
86
|
embeddings: number[][];
|
87
87
|
}>;
|
@@ -109,6 +109,7 @@ declare const cohereTextEmbeddingResponseSchema: z.ZodObject<{
|
|
109
109
|
};
|
110
110
|
}>;
|
111
111
|
}, "strip", z.ZodTypeAny, {
|
112
|
+
embeddings: number[][];
|
112
113
|
id: string;
|
113
114
|
meta: {
|
114
115
|
api_version: {
|
@@ -116,8 +117,8 @@ declare const cohereTextEmbeddingResponseSchema: z.ZodObject<{
|
|
116
117
|
};
|
117
118
|
};
|
118
119
|
texts: string[];
|
119
|
-
embeddings: number[][];
|
120
120
|
}, {
|
121
|
+
embeddings: number[][];
|
121
122
|
id: string;
|
122
123
|
meta: {
|
123
124
|
api_version: {
|
@@ -125,7 +126,6 @@ declare const cohereTextEmbeddingResponseSchema: z.ZodObject<{
|
|
125
126
|
};
|
126
127
|
};
|
127
128
|
texts: string[];
|
128
|
-
embeddings: number[][];
|
129
129
|
}>;
|
130
130
|
export type CohereTextEmbeddingResponse = z.infer<typeof cohereTextEmbeddingResponseSchema>;
|
131
131
|
export {};
|
@@ -219,7 +219,7 @@ exports.OpenAIChatResponseFormat = {
|
|
219
219
|
*/
|
220
220
|
textDeltaIterable: {
|
221
221
|
stream: true,
|
222
|
-
handler: async ({ response }) => (0, OpenAIChatStreamIterable_js_1.createOpenAIChatDeltaIterableQueue)(response.body, (delta) => delta[0]?.delta
|
222
|
+
handler: async ({ response }) => (0, OpenAIChatStreamIterable_js_1.createOpenAIChatDeltaIterableQueue)(response.body, (delta) => delta[0]?.delta?.content ?? ""),
|
223
223
|
},
|
224
224
|
structureDeltaIterable: {
|
225
225
|
stream: true,
|
@@ -32,16 +32,65 @@ export interface AbstractOpenAIChatCallSettings {
|
|
32
32
|
name: string;
|
33
33
|
};
|
34
34
|
};
|
35
|
+
/**
|
36
|
+
* An array of strings or a single string that the model will recognize as end-of-text indicators.
|
37
|
+
* The model stops generating more content when it encounters any of these strings.
|
38
|
+
* This is particularly useful in scripted or formatted text generation, where a specific end point is required.
|
39
|
+
* Example: stop: ['\n', 'END']
|
40
|
+
*/
|
35
41
|
stop?: string | string[];
|
42
|
+
/**
|
43
|
+
* Specifies the maximum number of tokens (words, punctuation, parts of words) that the model can generate in a single response.
|
44
|
+
* It helps to control the length of the output, this can help prevent wasted time and tokens when tweaker topP or temperature.
|
45
|
+
* Example: maxTokens: 1000
|
46
|
+
*/
|
36
47
|
maxTokens?: number;
|
48
|
+
/**
|
49
|
+
* `temperature`: Controls the randomness and creativity in the model's responses.
|
50
|
+
* A lower temperature (close to 0) results in more predictable, conservative text, while a higher temperature (close to 1) produces more varied and creative output.
|
51
|
+
* Adjust this to balance between consistency and creativity in the model's replies.
|
52
|
+
* Example: temperature: 0.5
|
53
|
+
*/
|
37
54
|
temperature?: number;
|
55
|
+
/**
|
56
|
+
* This parameter sets a threshold for token selection based on probability.
|
57
|
+
* The model will only consider the most likely tokens that cumulatively exceed this threshold while generating a response.
|
58
|
+
* It's a way to control the randomness of the output, balancing between diverse responses and sticking to more likely words.
|
59
|
+
* This means a topP of .1 will be far less random than one at .9
|
60
|
+
* Example: topP: 0.2
|
61
|
+
*/
|
38
62
|
topP?: number;
|
63
|
+
/**
|
64
|
+
* Used to set the initial state for the random number generator in the model.
|
65
|
+
* Providing a specific seed value ensures consistent outputs for the same inputs across different runs - useful for testing and reproducibility.
|
66
|
+
* A `null` value (or not setting it) results in varied, non-repeatable outputs each time.
|
67
|
+
* Example: seed: 89 (or) seed: null
|
68
|
+
*/
|
39
69
|
seed?: number | null;
|
40
70
|
responseFormat?: {
|
41
71
|
type?: "text" | "json_object";
|
42
72
|
};
|
73
|
+
/**
|
74
|
+
* Specifies the number of responses or completions the model should generate for a given prompt.
|
75
|
+
* This is useful when you need multiple different outputs or ideas for a single prompt.
|
76
|
+
* The model will generate 'n' distinct responses, each based on the same initial prompt.
|
77
|
+
* In a streaming model this will result in both responses streamed back in real time.
|
78
|
+
* Example: n: 3 // The model will produce 3 different responses.
|
79
|
+
*/
|
43
80
|
n?: number;
|
81
|
+
/**
|
82
|
+
* Discourages the model from repeating the same information or context already mentioned in the conversation or prompt.
|
83
|
+
* Increasing this value encourages the model to introduce new topics or ideas, rather than reiterating what has been said.
|
84
|
+
* This is useful for maintaining a diverse and engaging conversation or for brainstorming sessions where varied ideas are needed.
|
85
|
+
* Example: presencePenalty: 1.0 // Strongly discourages repeating the same content.
|
86
|
+
*/
|
44
87
|
presencePenalty?: number;
|
88
|
+
/**
|
89
|
+
* This parameter reduces the likelihood of the model repeatedly using the same words or phrases in its responses.
|
90
|
+
* A higher frequency penalty promotes a wider variety of language and expressions in the output.
|
91
|
+
* This is particularly useful in creative writing or content generation tasks where diversity in language is desirable.
|
92
|
+
* Example: frequencyPenalty: 0.5 // Moderately discourages repetitive language.
|
93
|
+
*/
|
45
94
|
frequencyPenalty?: number;
|
46
95
|
logitBias?: Record<number, number>;
|
47
96
|
}
|
@@ -215,7 +215,7 @@ export const OpenAIChatResponseFormat = {
|
|
215
215
|
*/
|
216
216
|
textDeltaIterable: {
|
217
217
|
stream: true,
|
218
|
-
handler: async ({ response }) => createOpenAIChatDeltaIterableQueue(response.body, (delta) => delta[0]?.delta
|
218
|
+
handler: async ({ response }) => createOpenAIChatDeltaIterableQueue(response.body, (delta) => delta[0]?.delta?.content ?? ""),
|
219
219
|
},
|
220
220
|
structureDeltaIterable: {
|
221
221
|
stream: true,
|
@@ -0,0 +1,61 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
const msw_1 = require("msw");
|
4
|
+
const node_1 = require("msw/node");
|
5
|
+
const streamText_js_1 = require("../../../model-function/generate-text/streamText.cjs");
|
6
|
+
const OpenAIChatModel_js_1 = require("./OpenAIChatModel.cjs");
|
7
|
+
const OpenAIApiConfiguration_js_1 = require("../OpenAIApiConfiguration.cjs");
|
8
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
9
|
+
let responseChunks = [];
|
10
|
+
const server = (0, node_1.setupServer)(msw_1.http.post("https://api.openai.com/v1/chat/completions", () => {
|
11
|
+
const encoder = new TextEncoder();
|
12
|
+
const stream = new ReadableStream({
|
13
|
+
async start(controller) {
|
14
|
+
try {
|
15
|
+
for (const chunk of responseChunks) {
|
16
|
+
controller.enqueue(encoder.encode(chunk));
|
17
|
+
}
|
18
|
+
}
|
19
|
+
finally {
|
20
|
+
controller.close();
|
21
|
+
}
|
22
|
+
},
|
23
|
+
});
|
24
|
+
return new msw_1.HttpResponse(stream, {
|
25
|
+
status: 200,
|
26
|
+
headers: {
|
27
|
+
"Content-Type": "text/event-stream",
|
28
|
+
"Cache-Control": "no-cache",
|
29
|
+
Connection: "keep-alive",
|
30
|
+
},
|
31
|
+
});
|
32
|
+
}));
|
33
|
+
beforeAll(() => server.listen());
|
34
|
+
beforeEach(() => {
|
35
|
+
responseChunks = [];
|
36
|
+
});
|
37
|
+
afterEach(() => server.resetHandlers());
|
38
|
+
afterAll(() => server.close());
|
39
|
+
describe("streamText", () => {
|
40
|
+
it("should return only values from the first choice when using streamText", async () => {
|
41
|
+
responseChunks = [
|
42
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
|
43
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"A"},"finish_reason":null}]}\n\n`,
|
44
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
|
45
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"B"},"finish_reason":null}]}\n\n`,
|
46
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}\n\n`,
|
47
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{},"finish_reason":"stop"}]}\n\n`,
|
48
|
+
"data: [DONE]\n\n",
|
49
|
+
];
|
50
|
+
const stream = await (0, streamText_js_1.streamText)(new OpenAIChatModel_js_1.OpenAIChatModel({
|
51
|
+
api: new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration({ apiKey: "test" }),
|
52
|
+
model: "gpt-3.5-turbo",
|
53
|
+
n: 2,
|
54
|
+
}).withTextPrompt(), "test prompt");
|
55
|
+
const chunks = [];
|
56
|
+
for await (const part of stream) {
|
57
|
+
chunks.push(part);
|
58
|
+
}
|
59
|
+
expect(chunks).toStrictEqual(["A"]);
|
60
|
+
});
|
61
|
+
});
|
@@ -0,0 +1 @@
|
|
1
|
+
export {};
|
@@ -0,0 +1,59 @@
|
|
1
|
+
import { HttpResponse, http } from "msw";
|
2
|
+
import { setupServer } from "msw/node";
|
3
|
+
import { streamText } from "../../../model-function/generate-text/streamText.js";
|
4
|
+
import { OpenAIChatModel } from "./OpenAIChatModel.js";
|
5
|
+
import { OpenAIApiConfiguration } from "../OpenAIApiConfiguration.js";
|
6
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
7
|
+
let responseChunks = [];
|
8
|
+
const server = setupServer(http.post("https://api.openai.com/v1/chat/completions", () => {
|
9
|
+
const encoder = new TextEncoder();
|
10
|
+
const stream = new ReadableStream({
|
11
|
+
async start(controller) {
|
12
|
+
try {
|
13
|
+
for (const chunk of responseChunks) {
|
14
|
+
controller.enqueue(encoder.encode(chunk));
|
15
|
+
}
|
16
|
+
}
|
17
|
+
finally {
|
18
|
+
controller.close();
|
19
|
+
}
|
20
|
+
},
|
21
|
+
});
|
22
|
+
return new HttpResponse(stream, {
|
23
|
+
status: 200,
|
24
|
+
headers: {
|
25
|
+
"Content-Type": "text/event-stream",
|
26
|
+
"Cache-Control": "no-cache",
|
27
|
+
Connection: "keep-alive",
|
28
|
+
},
|
29
|
+
});
|
30
|
+
}));
|
31
|
+
beforeAll(() => server.listen());
|
32
|
+
beforeEach(() => {
|
33
|
+
responseChunks = [];
|
34
|
+
});
|
35
|
+
afterEach(() => server.resetHandlers());
|
36
|
+
afterAll(() => server.close());
|
37
|
+
describe("streamText", () => {
|
38
|
+
it("should return only values from the first choice when using streamText", async () => {
|
39
|
+
responseChunks = [
|
40
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
|
41
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"A"},"finish_reason":null}]}\n\n`,
|
42
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{"role":"assistant","content":""},"finish_reason":null}]}\n\n`,
|
43
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{"content":"B"},"finish_reason":null}]}\n\n`,
|
44
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"finish_reason":"stop"}]}\n\n`,
|
45
|
+
`data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1702657020,"model":"gpt-3.5-turbo-0613","system_fingerprint":null,"choices":[{"index":1,"delta":{},"finish_reason":"stop"}]}\n\n`,
|
46
|
+
"data: [DONE]\n\n",
|
47
|
+
];
|
48
|
+
const stream = await streamText(new OpenAIChatModel({
|
49
|
+
api: new OpenAIApiConfiguration({ apiKey: "test" }),
|
50
|
+
model: "gpt-3.5-turbo",
|
51
|
+
n: 2,
|
52
|
+
}).withTextPrompt(), "test prompt");
|
53
|
+
const chunks = [];
|
54
|
+
for await (const part of stream) {
|
55
|
+
chunks.push(part);
|
56
|
+
}
|
57
|
+
expect(chunks).toStrictEqual(["A"]);
|
58
|
+
});
|
59
|
+
});
|
@@ -87,18 +87,23 @@ async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaValue) {
|
|
87
87
|
continue;
|
88
88
|
}
|
89
89
|
const completionChunk = eventData;
|
90
|
+
// reset delta for all existing streamDeltas
|
91
|
+
for (const delta of streamDelta) {
|
92
|
+
delta.delta = undefined;
|
93
|
+
}
|
90
94
|
for (let i = 0; i < completionChunk.choices.length; i++) {
|
91
95
|
const eventChoice = completionChunk.choices[i];
|
96
|
+
const index = eventChoice.index;
|
92
97
|
const delta = eventChoice.delta;
|
93
|
-
if (streamDelta[
|
94
|
-
streamDelta[
|
98
|
+
if (streamDelta[index] == null) {
|
99
|
+
streamDelta[index] = {
|
95
100
|
role: undefined,
|
96
101
|
content: "",
|
97
102
|
isComplete: false,
|
98
103
|
delta,
|
99
104
|
};
|
100
105
|
}
|
101
|
-
const choice = streamDelta[
|
106
|
+
const choice = streamDelta[index];
|
102
107
|
choice.delta = delta;
|
103
108
|
if (eventChoice.finish_reason != null) {
|
104
109
|
choice.isComplete = true;
|
@@ -14,6 +14,6 @@ export type OpenAIChatDelta = Array<{
|
|
14
14
|
name?: string;
|
15
15
|
arguments?: string;
|
16
16
|
};
|
17
|
-
};
|
17
|
+
} | undefined;
|
18
18
|
}>;
|
19
19
|
export declare function createOpenAIChatDeltaIterableQueue<VALUE>(stream: ReadableStream<Uint8Array>, extractDeltaValue: (delta: OpenAIChatDelta) => VALUE): Promise<AsyncIterable<Delta<VALUE>>>;
|
@@ -84,18 +84,23 @@ export async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaVal
|
|
84
84
|
continue;
|
85
85
|
}
|
86
86
|
const completionChunk = eventData;
|
87
|
+
// reset delta for all existing streamDeltas
|
88
|
+
for (const delta of streamDelta) {
|
89
|
+
delta.delta = undefined;
|
90
|
+
}
|
87
91
|
for (let i = 0; i < completionChunk.choices.length; i++) {
|
88
92
|
const eventChoice = completionChunk.choices[i];
|
93
|
+
const index = eventChoice.index;
|
89
94
|
const delta = eventChoice.delta;
|
90
|
-
if (streamDelta[
|
91
|
-
streamDelta[
|
95
|
+
if (streamDelta[index] == null) {
|
96
|
+
streamDelta[index] = {
|
92
97
|
role: undefined,
|
93
98
|
content: "",
|
94
99
|
isComplete: false,
|
95
100
|
delta,
|
96
101
|
};
|
97
102
|
}
|
98
|
-
const choice = streamDelta[
|
103
|
+
const choice = streamDelta[index];
|
99
104
|
choice.delta = delta;
|
100
105
|
if (eventChoice.finish_reason != null) {
|
101
106
|
choice.isComplete = true;
|
package/package.json
CHANGED
@@ -13,7 +13,7 @@ const ToolExecutionError_js_1 = require("../ToolExecutionError.cjs");
|
|
13
13
|
async function executeTool(// eslint-disable-line @typescript-eslint/no-explicit-any
|
14
14
|
tool, args, options) {
|
15
15
|
const fullResponse = await doExecuteTool(tool, args, options);
|
16
|
-
return options?.
|
16
|
+
return options?.fullResponse ? fullResponse : fullResponse.output;
|
17
17
|
}
|
18
18
|
exports.executeTool = executeTool;
|
19
19
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
@@ -15,11 +15,11 @@ export type ExecuteToolMetadata = {
|
|
15
15
|
*/
|
16
16
|
export declare function executeTool<TOOL extends Tool<any, any, any>>(// eslint-disable-line @typescript-eslint/no-explicit-any
|
17
17
|
tool: TOOL, args: TOOL["parameters"]["_type"], options?: FunctionOptions & {
|
18
|
-
|
18
|
+
fullResponse?: false;
|
19
19
|
}): Promise<ReturnType<TOOL["execute"]>>;
|
20
20
|
export declare function executeTool<TOOL extends Tool<any, any, any>>(// eslint-disable-line @typescript-eslint/no-explicit-any
|
21
21
|
tool: TOOL, args: TOOL["parameters"]["_type"], options: FunctionOptions & {
|
22
|
-
|
22
|
+
fullResponse: true;
|
23
23
|
}): Promise<{
|
24
24
|
output: Awaited<ReturnType<TOOL["execute"]>>;
|
25
25
|
metadata: ExecuteToolMetadata;
|
@@ -10,7 +10,7 @@ import { ToolExecutionError } from "../ToolExecutionError.js";
|
|
10
10
|
export async function executeTool(// eslint-disable-line @typescript-eslint/no-explicit-any
|
11
11
|
tool, args, options) {
|
12
12
|
const fullResponse = await doExecuteTool(tool, args, options);
|
13
|
-
return options?.
|
13
|
+
return options?.fullResponse ? fullResponse : fullResponse.output;
|
14
14
|
}
|
15
15
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
16
16
|
async function doExecuteTool(tool, args, options) {
|
@@ -30,21 +30,21 @@ class TextGenerationToolCallModel {
|
|
30
30
|
return this.model.settingsForEvent;
|
31
31
|
}
|
32
32
|
async doGenerateToolCall(tool, prompt, options) {
|
33
|
-
const { response,
|
33
|
+
const { response, text, metadata } = await (0, generateText_js_1.generateText)(this.model, this.format.createPrompt(prompt, tool), {
|
34
34
|
...options,
|
35
|
-
|
35
|
+
fullResponse: true,
|
36
36
|
});
|
37
37
|
try {
|
38
38
|
return {
|
39
39
|
response,
|
40
|
-
toolCall: this.format.extractToolCall(
|
40
|
+
toolCall: this.format.extractToolCall(text),
|
41
41
|
usage: metadata?.usage,
|
42
42
|
};
|
43
43
|
}
|
44
44
|
catch (error) {
|
45
45
|
throw new ToolCallParseError_js_1.ToolCallParseError({
|
46
46
|
toolName: tool.name,
|
47
|
-
valueText:
|
47
|
+
valueText: text,
|
48
48
|
cause: error,
|
49
49
|
});
|
50
50
|
}
|
@@ -27,21 +27,21 @@ export class TextGenerationToolCallModel {
|
|
27
27
|
return this.model.settingsForEvent;
|
28
28
|
}
|
29
29
|
async doGenerateToolCall(tool, prompt, options) {
|
30
|
-
const { response,
|
30
|
+
const { response, text, metadata } = await generateText(this.model, this.format.createPrompt(prompt, tool), {
|
31
31
|
...options,
|
32
|
-
|
32
|
+
fullResponse: true,
|
33
33
|
});
|
34
34
|
try {
|
35
35
|
return {
|
36
36
|
response,
|
37
|
-
toolCall: this.format.extractToolCall(
|
37
|
+
toolCall: this.format.extractToolCall(text),
|
38
38
|
usage: metadata?.usage,
|
39
39
|
};
|
40
40
|
}
|
41
41
|
catch (error) {
|
42
42
|
throw new ToolCallParseError({
|
43
43
|
toolName: tool.name,
|
44
|
-
valueText:
|
44
|
+
valueText: text,
|
45
45
|
cause: error,
|
46
46
|
});
|
47
47
|
}
|
@@ -54,6 +54,12 @@ async function generateToolCall(model, tool, prompt, options) {
|
|
54
54
|
}
|
55
55
|
},
|
56
56
|
});
|
57
|
-
return options?.
|
57
|
+
return options?.fullResponse
|
58
|
+
? {
|
59
|
+
toolCall: fullResponse.value,
|
60
|
+
response: fullResponse.response,
|
61
|
+
metadata: fullResponse.metadata,
|
62
|
+
}
|
63
|
+
: fullResponse.value;
|
58
64
|
}
|
59
65
|
exports.generateToolCall = generateToolCall;
|
@@ -4,12 +4,12 @@ import { ToolCall } from "../ToolCall.js";
|
|
4
4
|
import { ToolDefinition } from "../ToolDefinition.js";
|
5
5
|
import { ToolCallGenerationModel, ToolCallGenerationModelSettings } from "./ToolCallGenerationModel.js";
|
6
6
|
export declare function generateToolCall<PARAMETERS, PROMPT, NAME extends string, SETTINGS extends ToolCallGenerationModelSettings>(model: ToolCallGenerationModel<PROMPT, SETTINGS>, tool: ToolDefinition<NAME, PARAMETERS>, prompt: PROMPT | ((tool: ToolDefinition<NAME, PARAMETERS>) => PROMPT), options?: FunctionOptions & {
|
7
|
-
|
7
|
+
fullResponse?: false;
|
8
8
|
}): Promise<ToolCall<NAME, PARAMETERS>>;
|
9
9
|
export declare function generateToolCall<PARAMETERS, PROMPT, NAME extends string, SETTINGS extends ToolCallGenerationModelSettings>(model: ToolCallGenerationModel<PROMPT, SETTINGS>, tool: ToolDefinition<NAME, PARAMETERS>, prompt: PROMPT | ((tool: ToolDefinition<NAME, PARAMETERS>) => PROMPT), options: FunctionOptions & {
|
10
|
-
|
10
|
+
fullResponse: true;
|
11
11
|
}): Promise<{
|
12
|
-
|
12
|
+
toolCall: ToolCall<NAME, PARAMETERS>;
|
13
13
|
response: unknown;
|
14
14
|
metadata: ModelCallMetadata;
|
15
15
|
}>;
|
@@ -51,5 +51,11 @@ export async function generateToolCall(model, tool, prompt, options) {
|
|
51
51
|
}
|
52
52
|
},
|
53
53
|
});
|
54
|
-
return options?.
|
54
|
+
return options?.fullResponse
|
55
|
+
? {
|
56
|
+
toolCall: fullResponse.value,
|
57
|
+
response: fullResponse.response,
|
58
|
+
metadata: fullResponse.metadata,
|
59
|
+
}
|
60
|
+
: fullResponse.value;
|
55
61
|
}
|
@@ -30,12 +30,12 @@ class TextGenerationToolCallsOrGenerateTextModel {
|
|
30
30
|
return this.model.settingsForEvent;
|
31
31
|
}
|
32
32
|
async doGenerateToolCallsOrText(tools, prompt, options) {
|
33
|
-
const { response,
|
33
|
+
const { response, text: generatedText, metadata, } = await (0, generateText_js_1.generateText)(this.model, this.template.createPrompt(prompt, tools), {
|
34
34
|
...options,
|
35
|
-
|
35
|
+
fullResponse: true,
|
36
36
|
});
|
37
37
|
try {
|
38
|
-
const { text, toolCalls } = this.template.extractToolCallsAndText(
|
38
|
+
const { text, toolCalls } = this.template.extractToolCallsAndText(generatedText);
|
39
39
|
return {
|
40
40
|
response,
|
41
41
|
text,
|
@@ -45,7 +45,7 @@ class TextGenerationToolCallsOrGenerateTextModel {
|
|
45
45
|
}
|
46
46
|
catch (error) {
|
47
47
|
throw new ToolCallsOrTextParseError_js_1.ToolCallsOrTextParseError({
|
48
|
-
valueText:
|
48
|
+
valueText: generatedText,
|
49
49
|
cause: error,
|
50
50
|
});
|
51
51
|
}
|
@@ -27,12 +27,12 @@ export class TextGenerationToolCallsOrGenerateTextModel {
|
|
27
27
|
return this.model.settingsForEvent;
|
28
28
|
}
|
29
29
|
async doGenerateToolCallsOrText(tools, prompt, options) {
|
30
|
-
const { response,
|
30
|
+
const { response, text: generatedText, metadata, } = await generateText(this.model, this.template.createPrompt(prompt, tools), {
|
31
31
|
...options,
|
32
|
-
|
32
|
+
fullResponse: true,
|
33
33
|
});
|
34
34
|
try {
|
35
|
-
const { text, toolCalls } = this.template.extractToolCallsAndText(
|
35
|
+
const { text, toolCalls } = this.template.extractToolCallsAndText(generatedText);
|
36
36
|
return {
|
37
37
|
response,
|
38
38
|
text,
|
@@ -42,7 +42,7 @@ export class TextGenerationToolCallsOrGenerateTextModel {
|
|
42
42
|
}
|
43
43
|
catch (error) {
|
44
44
|
throw new ToolCallsOrTextParseError({
|
45
|
-
valueText:
|
45
|
+
valueText: generatedText,
|
46
46
|
cause: error,
|
47
47
|
});
|
48
48
|
}
|
@@ -58,6 +58,6 @@ async function generateToolCallsOrText(model, tools, prompt, options) {
|
|
58
58
|
};
|
59
59
|
},
|
60
60
|
});
|
61
|
-
return options?.
|
61
|
+
return options?.fullResponse ? fullResponse : fullResponse.value;
|
62
62
|
}
|
63
63
|
exports.generateToolCallsOrText = generateToolCallsOrText;
|
@@ -15,13 +15,13 @@ type ToToolCallUnion<T> = {
|
|
15
15
|
}[keyof T];
|
16
16
|
type ToOutputValue<TOOL_CALLS extends ToolCallDefinitionArray<ToolDefinition<any, any>[]>> = ToToolCallUnion<ToToolCallDefinitionMap<TOOL_CALLS>>;
|
17
17
|
export declare function generateToolCallsOrText<TOOLS extends Array<ToolDefinition<any, any>>, PROMPT>(model: ToolCallsOrTextGenerationModel<PROMPT, ToolCallsOrTextGenerationModelSettings>, tools: TOOLS, prompt: PROMPT | ((tools: TOOLS) => PROMPT), options?: FunctionOptions & {
|
18
|
-
|
18
|
+
fullResponse?: false;
|
19
19
|
}): Promise<{
|
20
20
|
text: string | null;
|
21
21
|
toolCalls: Array<ToOutputValue<TOOLS>> | null;
|
22
22
|
}>;
|
23
23
|
export declare function generateToolCallsOrText<TOOLS extends ToolDefinition<any, any>[], PROMPT>(model: ToolCallsOrTextGenerationModel<PROMPT, ToolCallsOrTextGenerationModelSettings>, tools: TOOLS, prompt: PROMPT | ((tools: TOOLS) => PROMPT), options: FunctionOptions & {
|
24
|
-
|
24
|
+
fullResponse?: boolean;
|
25
25
|
}): Promise<{
|
26
26
|
value: {
|
27
27
|
text: string | null;
|
@@ -15,7 +15,7 @@ async function useToolsOrGenerateText(model, tools, prompt, options) {
|
|
15
15
|
input: expandedPrompt,
|
16
16
|
functionType: "use-tools-or-generate-text",
|
17
17
|
execute: async (options) => {
|
18
|
-
const modelResponse = await (0, generateToolCallsOrText_js_1.generateToolCallsOrText)(model, tools, expandedPrompt, { ...options,
|
18
|
+
const modelResponse = await (0, generateToolCallsOrText_js_1.generateToolCallsOrText)(model, tools, expandedPrompt, { ...options, fullResponse: false });
|
19
19
|
const { toolCalls, text } = modelResponse;
|
20
20
|
// no tool calls:
|
21
21
|
if (toolCalls == null) {
|
@@ -12,7 +12,7 @@ export async function useToolsOrGenerateText(model, tools, prompt, options) {
|
|
12
12
|
input: expandedPrompt,
|
13
13
|
functionType: "use-tools-or-generate-text",
|
14
14
|
execute: async (options) => {
|
15
|
-
const modelResponse = await generateToolCallsOrText(model, tools, expandedPrompt, { ...options,
|
15
|
+
const modelResponse = await generateToolCallsOrText(model, tools, expandedPrompt, { ...options, fullResponse: false });
|
16
16
|
const { toolCalls, text } = modelResponse;
|
17
17
|
// no tool calls:
|
18
18
|
if (toolCalls == null) {
|