modelfusion 0.69.0 → 0.71.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +26 -13
- package/model-function/ModelCallEvent.d.ts +3 -2
- package/model-function/generate-text/index.cjs +1 -8
- package/model-function/generate-text/index.d.ts +1 -8
- package/model-function/generate-text/index.js +1 -8
- package/model-function/generate-text/prompt-format/AlpacaPromptFormat.cjs +31 -3
- package/model-function/generate-text/prompt-format/AlpacaPromptFormat.d.ts +29 -1
- package/model-function/generate-text/prompt-format/AlpacaPromptFormat.js +29 -1
- package/model-function/generate-text/prompt-format/ChatMLPromptFormat.cjs +79 -0
- package/model-function/generate-text/prompt-format/ChatMLPromptFormat.d.ts +31 -0
- package/model-function/generate-text/prompt-format/ChatMLPromptFormat.js +74 -0
- package/model-function/generate-text/prompt-format/ChatPrompt.d.ts +28 -23
- package/model-function/generate-text/prompt-format/ChatPromptValidationError.cjs +17 -0
- package/model-function/generate-text/prompt-format/ChatPromptValidationError.d.ts +8 -0
- package/model-function/generate-text/prompt-format/ChatPromptValidationError.js +13 -0
- package/model-function/generate-text/prompt-format/Llama2PromptFormat.cjs +41 -27
- package/model-function/generate-text/prompt-format/Llama2PromptFormat.d.ts +20 -2
- package/model-function/generate-text/prompt-format/Llama2PromptFormat.js +38 -24
- package/model-function/generate-text/prompt-format/TextPromptFormat.cjs +27 -30
- package/model-function/generate-text/prompt-format/TextPromptFormat.d.ts +7 -5
- package/model-function/generate-text/prompt-format/TextPromptFormat.js +24 -27
- package/model-function/generate-text/prompt-format/VicunaPromptFormat.cjs +21 -29
- package/model-function/generate-text/prompt-format/VicunaPromptFormat.d.ts +2 -2
- package/model-function/generate-text/prompt-format/VicunaPromptFormat.js +19 -27
- package/model-function/generate-text/prompt-format/index.cjs +39 -0
- package/model-function/generate-text/prompt-format/index.d.ts +10 -0
- package/model-function/generate-text/prompt-format/index.js +10 -0
- package/model-function/generate-text/prompt-format/trimChatPrompt.cjs +17 -22
- package/model-function/generate-text/prompt-format/trimChatPrompt.js +17 -22
- package/model-function/generate-text/prompt-format/validateChatPrompt.cjs +12 -24
- package/model-function/generate-text/prompt-format/validateChatPrompt.d.ts +0 -3
- package/model-function/generate-text/prompt-format/validateChatPrompt.js +10 -21
- package/model-function/generate-tool-call/NoSuchToolDefinitionError.cjs +41 -0
- package/model-function/generate-tool-call/NoSuchToolDefinitionError.d.ts +17 -0
- package/model-function/generate-tool-call/NoSuchToolDefinitionError.js +37 -0
- package/model-function/generate-tool-call/ToolCall.d.ts +5 -0
- package/model-function/generate-tool-call/ToolCallGenerationModel.d.ts +3 -3
- package/model-function/generate-tool-call/ToolCallParametersValidationError.cjs +1 -1
- package/model-function/generate-tool-call/ToolCallParametersValidationError.js +1 -1
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationEvent.cjs +2 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationEvent.d.ts +23 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationEvent.js +1 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationModel.cjs +2 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationModel.d.ts +21 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationModel.js +1 -0
- package/model-function/generate-tool-call/ToolDefinition.cjs +2 -0
- package/model-function/generate-tool-call/{ToolCallDefinition.d.ts → ToolDefinition.d.ts} +1 -1
- package/model-function/generate-tool-call/ToolDefinition.js +1 -0
- package/model-function/generate-tool-call/generateToolCall.cjs +2 -1
- package/model-function/generate-tool-call/generateToolCall.d.ts +6 -11
- package/model-function/generate-tool-call/generateToolCall.js +2 -1
- package/model-function/generate-tool-call/generateToolCallsOrText.cjs +63 -0
- package/model-function/generate-tool-call/generateToolCallsOrText.d.ts +33 -0
- package/model-function/generate-tool-call/generateToolCallsOrText.js +59 -0
- package/model-function/generate-tool-call/index.cjs +7 -2
- package/model-function/generate-tool-call/index.d.ts +7 -2
- package/model-function/generate-tool-call/index.js +7 -2
- package/model-provider/anthropic/AnthropicPromptFormat.cjs +22 -26
- package/model-provider/anthropic/AnthropicPromptFormat.d.ts +4 -2
- package/model-provider/anthropic/AnthropicPromptFormat.js +19 -23
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +2 -2
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +3 -3
- package/model-provider/anthropic/index.cjs +14 -2
- package/model-provider/anthropic/index.d.ts +1 -1
- package/model-provider/anthropic/index.js +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.js +4 -4
- package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.cjs → LlamaCppBakLLaVA1Format.cjs} +4 -4
- package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.d.ts → LlamaCppBakLLaVA1Format.d.ts} +2 -2
- package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.js → LlamaCppBakLLaVA1Format.js} +2 -2
- package/model-provider/llamacpp/index.cjs +14 -2
- package/model-provider/llamacpp/index.d.ts +1 -1
- package/model-provider/llamacpp/index.js +1 -1
- package/model-provider/openai/OpenAICompletionModel.cjs +4 -4
- package/model-provider/openai/OpenAICompletionModel.d.ts +1 -1
- package/model-provider/openai/OpenAICompletionModel.js +5 -5
- package/model-provider/openai/chat/OpenAIChatMessage.d.ts +4 -1
- package/model-provider/openai/chat/OpenAIChatModel.cjs +29 -3
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +63 -16
- package/model-provider/openai/chat/OpenAIChatModel.js +30 -4
- package/model-provider/openai/chat/OpenAIChatPromptFormat.cjs +22 -34
- package/model-provider/openai/chat/OpenAIChatPromptFormat.d.ts +2 -2
- package/model-provider/openai/chat/OpenAIChatPromptFormat.js +19 -31
- package/model-provider/openai/index.cjs +14 -2
- package/model-provider/openai/index.d.ts +1 -1
- package/model-provider/openai/index.js +1 -1
- package/package.json +2 -2
- package/tool/Tool.cjs +1 -1
- package/tool/Tool.d.ts +1 -1
- package/tool/Tool.js +1 -1
- /package/model-function/generate-tool-call/{ToolCallDefinition.cjs → ToolCall.cjs} +0 -0
- /package/model-function/generate-tool-call/{ToolCallDefinition.js → ToolCall.js} +0 -0
@@ -15,39 +15,34 @@ const validateChatPrompt_js_1 = require("./validateChatPrompt.cjs");
|
|
15
15
|
async function trimChatPrompt({ prompt, model, tokenLimit = model.contextWindowSize -
|
16
16
|
(model.settings.maxCompletionTokens ?? model.contextWindowSize / 4), }) {
|
17
17
|
(0, validateChatPrompt_js_1.validateChatPrompt)(prompt);
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
messages.push(prompt[prompt.length - 1]);
|
18
|
+
let minimalPrompt = {
|
19
|
+
system: prompt.system,
|
20
|
+
messages: [prompt.messages[prompt.messages.length - 1]], // last user message
|
21
|
+
};
|
23
22
|
// check if the minimal prompt is already too long
|
24
|
-
const promptTokenCount = await model.countPromptTokens(
|
25
|
-
...systemMessage,
|
26
|
-
...messages,
|
27
|
-
]);
|
23
|
+
const promptTokenCount = await model.countPromptTokens(minimalPrompt);
|
28
24
|
// the minimal chat prompt is already over the token limit and cannot be trimmed further:
|
29
25
|
if (promptTokenCount > tokenLimit) {
|
30
|
-
return
|
26
|
+
return minimalPrompt;
|
31
27
|
}
|
32
28
|
// inner messages
|
33
|
-
const innerMessages = prompt.slice(
|
29
|
+
const innerMessages = prompt.messages.slice(0, -1);
|
34
30
|
// taking always a pair of user-message and ai-message from the end, moving backwards
|
35
31
|
for (let i = innerMessages.length - 1; i >= 0; i -= 2) {
|
36
|
-
const
|
32
|
+
const assistantMessage = innerMessages[i];
|
37
33
|
const userMessage = innerMessages[i - 1];
|
38
|
-
// create a temporary
|
39
|
-
const
|
40
|
-
|
41
|
-
userMessage,
|
42
|
-
|
43
|
-
|
44
|
-
]);
|
34
|
+
// create a temporary prompt and check if it fits within the token limit
|
35
|
+
const attemptedPrompt = {
|
36
|
+
system: prompt.system,
|
37
|
+
messages: [userMessage, assistantMessage, ...minimalPrompt.messages],
|
38
|
+
};
|
39
|
+
const tokenCount = await model.countPromptTokens(attemptedPrompt);
|
45
40
|
if (tokenCount > tokenLimit) {
|
46
41
|
break;
|
47
42
|
}
|
48
|
-
// if it fits,
|
49
|
-
|
43
|
+
// if it fits, its the new minimal prompt
|
44
|
+
minimalPrompt = attemptedPrompt;
|
50
45
|
}
|
51
|
-
return
|
46
|
+
return minimalPrompt;
|
52
47
|
}
|
53
48
|
exports.trimChatPrompt = trimChatPrompt;
|
@@ -12,38 +12,33 @@ import { validateChatPrompt } from "./validateChatPrompt.js";
|
|
12
12
|
export async function trimChatPrompt({ prompt, model, tokenLimit = model.contextWindowSize -
|
13
13
|
(model.settings.maxCompletionTokens ?? model.contextWindowSize / 4), }) {
|
14
14
|
validateChatPrompt(prompt);
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
messages.push(prompt[prompt.length - 1]);
|
15
|
+
let minimalPrompt = {
|
16
|
+
system: prompt.system,
|
17
|
+
messages: [prompt.messages[prompt.messages.length - 1]], // last user message
|
18
|
+
};
|
20
19
|
// check if the minimal prompt is already too long
|
21
|
-
const promptTokenCount = await model.countPromptTokens(
|
22
|
-
...systemMessage,
|
23
|
-
...messages,
|
24
|
-
]);
|
20
|
+
const promptTokenCount = await model.countPromptTokens(minimalPrompt);
|
25
21
|
// the minimal chat prompt is already over the token limit and cannot be trimmed further:
|
26
22
|
if (promptTokenCount > tokenLimit) {
|
27
|
-
return
|
23
|
+
return minimalPrompt;
|
28
24
|
}
|
29
25
|
// inner messages
|
30
|
-
const innerMessages = prompt.slice(
|
26
|
+
const innerMessages = prompt.messages.slice(0, -1);
|
31
27
|
// taking always a pair of user-message and ai-message from the end, moving backwards
|
32
28
|
for (let i = innerMessages.length - 1; i >= 0; i -= 2) {
|
33
|
-
const
|
29
|
+
const assistantMessage = innerMessages[i];
|
34
30
|
const userMessage = innerMessages[i - 1];
|
35
|
-
// create a temporary
|
36
|
-
const
|
37
|
-
|
38
|
-
userMessage,
|
39
|
-
|
40
|
-
|
41
|
-
]);
|
31
|
+
// create a temporary prompt and check if it fits within the token limit
|
32
|
+
const attemptedPrompt = {
|
33
|
+
system: prompt.system,
|
34
|
+
messages: [userMessage, assistantMessage, ...minimalPrompt.messages],
|
35
|
+
};
|
36
|
+
const tokenCount = await model.countPromptTokens(attemptedPrompt);
|
42
37
|
if (tokenCount > tokenLimit) {
|
43
38
|
break;
|
44
39
|
}
|
45
|
-
// if it fits,
|
46
|
-
|
40
|
+
// if it fits, its the new minimal prompt
|
41
|
+
minimalPrompt = attemptedPrompt;
|
47
42
|
}
|
48
|
-
return
|
43
|
+
return minimalPrompt;
|
49
44
|
}
|
@@ -1,36 +1,24 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.validateChatPrompt =
|
4
|
-
|
5
|
-
constructor(message) {
|
6
|
-
super(message);
|
7
|
-
this.name = "ChatPromptValidationError";
|
8
|
-
}
|
9
|
-
}
|
10
|
-
exports.ChatPromptValidationError = ChatPromptValidationError;
|
3
|
+
exports.validateChatPrompt = void 0;
|
4
|
+
const ChatPromptValidationError_js_1 = require("./ChatPromptValidationError.cjs");
|
11
5
|
/**
|
12
6
|
* Checks if a chat prompt is valid. Throws a `ChatPromptValidationError` if it's not.
|
13
7
|
*/
|
14
8
|
function validateChatPrompt(chatPrompt) {
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
const initialType = "system" in chatPrompt[0] ? "system" : "user";
|
19
|
-
if (initialType === "system" && chatPrompt.length === 1) {
|
20
|
-
throw new ChatPromptValidationError("A system message should be followed by a user message.");
|
9
|
+
const messages = chatPrompt.messages;
|
10
|
+
if (messages.length < 1) {
|
11
|
+
throw new ChatPromptValidationError_js_1.ChatPromptValidationError("ChatPrompt should have at least one message.");
|
21
12
|
}
|
22
|
-
let
|
23
|
-
|
24
|
-
const
|
25
|
-
if (
|
26
|
-
throw new ChatPromptValidationError(`Message at index ${i} should
|
13
|
+
for (let i = 0; i < messages.length; i++) {
|
14
|
+
const expectedRole = i % 2 === 0 ? "user" : "assistant";
|
15
|
+
const role = messages[i].role;
|
16
|
+
if (role !== expectedRole) {
|
17
|
+
throw new ChatPromptValidationError_js_1.ChatPromptValidationError(`Message at index ${i} should have role '${expectedRole}', but has role '${role}'.`);
|
27
18
|
}
|
28
|
-
// Flip the expected type for the next iteration.
|
29
|
-
expectedType = expectedType === "user" ? "ai" : "user";
|
30
19
|
}
|
31
|
-
|
32
|
-
|
33
|
-
throw new ChatPromptValidationError("The last message should be a user message.");
|
20
|
+
if (messages.length % 2 === 0) {
|
21
|
+
throw new ChatPromptValidationError_js_1.ChatPromptValidationError("The last message must be a user message.");
|
34
22
|
}
|
35
23
|
}
|
36
24
|
exports.validateChatPrompt = validateChatPrompt;
|
@@ -1,31 +1,20 @@
|
|
1
|
-
|
2
|
-
constructor(message) {
|
3
|
-
super(message);
|
4
|
-
this.name = "ChatPromptValidationError";
|
5
|
-
}
|
6
|
-
}
|
1
|
+
import { ChatPromptValidationError } from "./ChatPromptValidationError.js";
|
7
2
|
/**
|
8
3
|
* Checks if a chat prompt is valid. Throws a `ChatPromptValidationError` if it's not.
|
9
4
|
*/
|
10
5
|
export function validateChatPrompt(chatPrompt) {
|
11
|
-
|
6
|
+
const messages = chatPrompt.messages;
|
7
|
+
if (messages.length < 1) {
|
12
8
|
throw new ChatPromptValidationError("ChatPrompt should have at least one message.");
|
13
9
|
}
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
for (let i = 1; i < chatPrompt.length; i++) {
|
20
|
-
const messageType = "user" in chatPrompt[i] ? "user" : "ai";
|
21
|
-
if (messageType !== expectedType) {
|
22
|
-
throw new ChatPromptValidationError(`Message at index ${i} should be a ${expectedType} message, but it's a ${messageType} message.`);
|
10
|
+
for (let i = 0; i < messages.length; i++) {
|
11
|
+
const expectedRole = i % 2 === 0 ? "user" : "assistant";
|
12
|
+
const role = messages[i].role;
|
13
|
+
if (role !== expectedRole) {
|
14
|
+
throw new ChatPromptValidationError(`Message at index ${i} should have role '${expectedRole}', but has role '${role}'.`);
|
23
15
|
}
|
24
|
-
// Flip the expected type for the next iteration.
|
25
|
-
expectedType = expectedType === "user" ? "ai" : "user";
|
26
16
|
}
|
27
|
-
|
28
|
-
|
29
|
-
throw new ChatPromptValidationError("The last message should be a user message.");
|
17
|
+
if (messages.length % 2 === 0) {
|
18
|
+
throw new ChatPromptValidationError("The last message must be a user message.");
|
30
19
|
}
|
31
20
|
}
|
@@ -0,0 +1,41 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.NoSuchToolDefinitionError = void 0;
|
4
|
+
class NoSuchToolDefinitionError extends Error {
|
5
|
+
constructor({ toolName, parameters, }) {
|
6
|
+
super(`Tool definition '${toolName}' not found. ` +
|
7
|
+
`Parameters: ${JSON.stringify(parameters)}.`);
|
8
|
+
Object.defineProperty(this, "toolName", {
|
9
|
+
enumerable: true,
|
10
|
+
configurable: true,
|
11
|
+
writable: true,
|
12
|
+
value: void 0
|
13
|
+
});
|
14
|
+
Object.defineProperty(this, "cause", {
|
15
|
+
enumerable: true,
|
16
|
+
configurable: true,
|
17
|
+
writable: true,
|
18
|
+
value: void 0
|
19
|
+
});
|
20
|
+
Object.defineProperty(this, "parameters", {
|
21
|
+
enumerable: true,
|
22
|
+
configurable: true,
|
23
|
+
writable: true,
|
24
|
+
value: void 0
|
25
|
+
});
|
26
|
+
this.name = "NoSuchToolDefinitionError";
|
27
|
+
this.toolName = toolName;
|
28
|
+
this.parameters = parameters;
|
29
|
+
}
|
30
|
+
toJSON() {
|
31
|
+
return {
|
32
|
+
name: this.name,
|
33
|
+
message: this.message,
|
34
|
+
cause: this.cause,
|
35
|
+
stack: this.stack,
|
36
|
+
toolName: this.toolName,
|
37
|
+
parameter: this.parameters,
|
38
|
+
};
|
39
|
+
}
|
40
|
+
}
|
41
|
+
exports.NoSuchToolDefinitionError = NoSuchToolDefinitionError;
|
@@ -0,0 +1,17 @@
|
|
1
|
+
export declare class NoSuchToolDefinitionError extends Error {
|
2
|
+
readonly toolName: string;
|
3
|
+
readonly cause: unknown;
|
4
|
+
readonly parameters: unknown;
|
5
|
+
constructor({ toolName, parameters, }: {
|
6
|
+
toolName: string;
|
7
|
+
parameters: unknown;
|
8
|
+
});
|
9
|
+
toJSON(): {
|
10
|
+
name: string;
|
11
|
+
message: string;
|
12
|
+
cause: unknown;
|
13
|
+
stack: string | undefined;
|
14
|
+
toolName: string;
|
15
|
+
parameter: unknown;
|
16
|
+
};
|
17
|
+
}
|
@@ -0,0 +1,37 @@
|
|
1
|
+
export class NoSuchToolDefinitionError extends Error {
|
2
|
+
constructor({ toolName, parameters, }) {
|
3
|
+
super(`Tool definition '${toolName}' not found. ` +
|
4
|
+
`Parameters: ${JSON.stringify(parameters)}.`);
|
5
|
+
Object.defineProperty(this, "toolName", {
|
6
|
+
enumerable: true,
|
7
|
+
configurable: true,
|
8
|
+
writable: true,
|
9
|
+
value: void 0
|
10
|
+
});
|
11
|
+
Object.defineProperty(this, "cause", {
|
12
|
+
enumerable: true,
|
13
|
+
configurable: true,
|
14
|
+
writable: true,
|
15
|
+
value: void 0
|
16
|
+
});
|
17
|
+
Object.defineProperty(this, "parameters", {
|
18
|
+
enumerable: true,
|
19
|
+
configurable: true,
|
20
|
+
writable: true,
|
21
|
+
value: void 0
|
22
|
+
});
|
23
|
+
this.name = "NoSuchToolDefinitionError";
|
24
|
+
this.toolName = toolName;
|
25
|
+
this.parameters = parameters;
|
26
|
+
}
|
27
|
+
toJSON() {
|
28
|
+
return {
|
29
|
+
name: this.name,
|
30
|
+
message: this.message,
|
31
|
+
cause: this.cause,
|
32
|
+
stack: this.stack,
|
33
|
+
toolName: this.toolName,
|
34
|
+
parameter: this.parameters,
|
35
|
+
};
|
36
|
+
}
|
37
|
+
}
|
@@ -1,12 +1,12 @@
|
|
1
1
|
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
2
|
import { Model, ModelSettings } from "../Model.js";
|
3
|
-
import {
|
3
|
+
import { ToolDefinition } from "./ToolDefinition.js";
|
4
4
|
export interface ToolCallGenerationModelSettings extends ModelSettings {
|
5
5
|
}
|
6
6
|
export interface ToolCallGenerationModel<PROMPT, SETTINGS extends ToolCallGenerationModelSettings = ToolCallGenerationModelSettings> extends Model<SETTINGS> {
|
7
|
-
doGenerateToolCall(tool:
|
7
|
+
doGenerateToolCall(tool: ToolDefinition<string, unknown>, prompt: PROMPT, options?: FunctionOptions): PromiseLike<{
|
8
8
|
response: unknown;
|
9
|
-
|
9
|
+
toolCall: {
|
10
10
|
id: string;
|
11
11
|
parameters: unknown;
|
12
12
|
} | null;
|
@@ -5,7 +5,7 @@ const getErrorMessage_js_1 = require("../../util/getErrorMessage.cjs");
|
|
5
5
|
class ToolCallParametersValidationError extends Error {
|
6
6
|
constructor({ toolName, parameters, cause, }) {
|
7
7
|
super(`Parameter validation failed for tool '${toolName}'. ` +
|
8
|
-
`
|
8
|
+
`Parameters: ${JSON.stringify(parameters)}.\n` +
|
9
9
|
`Error message: ${(0, getErrorMessage_js_1.getErrorMessage)(cause)}`);
|
10
10
|
Object.defineProperty(this, "toolName", {
|
11
11
|
enumerable: true,
|
@@ -2,7 +2,7 @@ import { getErrorMessage } from "../../util/getErrorMessage.js";
|
|
2
2
|
export class ToolCallParametersValidationError extends Error {
|
3
3
|
constructor({ toolName, parameters, cause, }) {
|
4
4
|
super(`Parameter validation failed for tool '${toolName}'. ` +
|
5
|
-
`
|
5
|
+
`Parameters: ${JSON.stringify(parameters)}.\n` +
|
6
6
|
`Error message: ${getErrorMessage(cause)}`);
|
7
7
|
Object.defineProperty(this, "toolName", {
|
8
8
|
enumerable: true,
|
@@ -0,0 +1,23 @@
|
|
1
|
+
import { BaseModelCallFinishedEvent, BaseModelCallStartedEvent } from "../ModelCallEvent.js";
|
2
|
+
export interface ToolCallsOrTextGenerationStartedEvent extends BaseModelCallStartedEvent {
|
3
|
+
functionType: "generate-tool-calls-or-text";
|
4
|
+
}
|
5
|
+
export type ToolCallsOrTextGenerationFinishedEventResult = {
|
6
|
+
status: "success";
|
7
|
+
response: unknown;
|
8
|
+
value: unknown;
|
9
|
+
usage?: {
|
10
|
+
promptTokens: number;
|
11
|
+
completionTokens: number;
|
12
|
+
totalTokens: number;
|
13
|
+
};
|
14
|
+
} | {
|
15
|
+
status: "error";
|
16
|
+
error: unknown;
|
17
|
+
} | {
|
18
|
+
status: "abort";
|
19
|
+
};
|
20
|
+
export type ToolCallsOrTextGenerationFinishedEvent = BaseModelCallFinishedEvent & {
|
21
|
+
functionType: "generate-tool-calls-or-text";
|
22
|
+
result: ToolCallsOrTextGenerationFinishedEventResult;
|
23
|
+
};
|
@@ -0,0 +1 @@
|
|
1
|
+
export {};
|
@@ -0,0 +1,21 @@
|
|
1
|
+
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
|
+
import { Model, ModelSettings } from "../Model.js";
|
3
|
+
import { ToolDefinition } from "./ToolDefinition.js";
|
4
|
+
export interface ToolCallsOrTextGenerationModelSettings extends ModelSettings {
|
5
|
+
}
|
6
|
+
export interface ToolCallsOrTextGenerationModel<PROMPT, SETTINGS extends ToolCallsOrTextGenerationModelSettings> extends Model<SETTINGS> {
|
7
|
+
doGenerateToolCallsOrText(tools: Array<ToolDefinition<string, unknown>>, prompt: PROMPT, options?: FunctionOptions): PromiseLike<{
|
8
|
+
response: unknown;
|
9
|
+
text: string | null;
|
10
|
+
toolCalls: Array<{
|
11
|
+
id: string;
|
12
|
+
name: string;
|
13
|
+
parameters: unknown;
|
14
|
+
}> | null;
|
15
|
+
usage?: {
|
16
|
+
promptTokens: number;
|
17
|
+
completionTokens: number;
|
18
|
+
totalTokens: number;
|
19
|
+
};
|
20
|
+
}>;
|
21
|
+
}
|
@@ -0,0 +1 @@
|
|
1
|
+
export {};
|
@@ -1,6 +1,6 @@
|
|
1
1
|
import { JsonSchemaProducer } from "../../core/schema/JsonSchemaProducer.js";
|
2
2
|
import { Schema } from "../../core/schema/Schema.js";
|
3
|
-
export interface
|
3
|
+
export interface ToolDefinition<NAME extends string, PARAMETERS> {
|
4
4
|
name: NAME;
|
5
5
|
description?: string;
|
6
6
|
parameters: Schema<PARAMETERS> & JsonSchemaProducer;
|
@@ -0,0 +1 @@
|
|
1
|
+
export {};
|
@@ -17,7 +17,7 @@ async function generateToolCall(model, tool, prompt, options) {
|
|
17
17
|
generateResponse: async (options) => {
|
18
18
|
try {
|
19
19
|
const result = await model.doGenerateToolCall(tool, expandedPrompt, options);
|
20
|
-
const toolCall = result.
|
20
|
+
const toolCall = result.toolCall;
|
21
21
|
if (toolCall === null) {
|
22
22
|
throw new ToolCallGenerationError_js_1.ToolCallsGenerationError({
|
23
23
|
toolName: tool.name,
|
@@ -36,6 +36,7 @@ async function generateToolCall(model, tool, prompt, options) {
|
|
36
36
|
response: result.response,
|
37
37
|
extractedValue: {
|
38
38
|
id: toolCall.id,
|
39
|
+
name: tool.name,
|
39
40
|
parameters: parseResult.data,
|
40
41
|
},
|
41
42
|
usage: result.usage,
|
@@ -1,20 +1,15 @@
|
|
1
1
|
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
2
|
import { ModelCallMetadata } from "../ModelCallMetadata.js";
|
3
|
-
import {
|
3
|
+
import { ToolDefinition } from "./ToolDefinition.js";
|
4
4
|
import { ToolCallGenerationModel, ToolCallGenerationModelSettings } from "./ToolCallGenerationModel.js";
|
5
|
-
|
5
|
+
import { ToolCall } from "./ToolCall.js";
|
6
|
+
export declare function generateToolCall<PARAMETERS, PROMPT, NAME extends string, SETTINGS extends ToolCallGenerationModelSettings>(model: ToolCallGenerationModel<PROMPT, SETTINGS>, tool: ToolDefinition<NAME, PARAMETERS>, prompt: PROMPT | ((tool: ToolDefinition<NAME, PARAMETERS>) => PROMPT), options?: FunctionOptions & {
|
6
7
|
returnType?: "structure";
|
7
|
-
}): Promise<
|
8
|
-
|
9
|
-
parameters: PARAMETERS;
|
10
|
-
}>;
|
11
|
-
export declare function generateToolCall<PARAMETERS, PROMPT, NAME extends string, SETTINGS extends ToolCallGenerationModelSettings>(model: ToolCallGenerationModel<PROMPT, SETTINGS>, tool: ToolCallDefinition<NAME, PARAMETERS>, prompt: PROMPT | ((tool: ToolCallDefinition<NAME, PARAMETERS>) => PROMPT), options: FunctionOptions & {
|
8
|
+
}): Promise<ToolCall<NAME, PARAMETERS>>;
|
9
|
+
export declare function generateToolCall<PARAMETERS, PROMPT, NAME extends string, SETTINGS extends ToolCallGenerationModelSettings>(model: ToolCallGenerationModel<PROMPT, SETTINGS>, tool: ToolDefinition<NAME, PARAMETERS>, prompt: PROMPT | ((tool: ToolDefinition<NAME, PARAMETERS>) => PROMPT), options: FunctionOptions & {
|
12
10
|
returnType: "full";
|
13
11
|
}): Promise<{
|
14
|
-
value:
|
15
|
-
id: string;
|
16
|
-
parameters: PARAMETERS;
|
17
|
-
};
|
12
|
+
value: ToolCall<NAME, PARAMETERS>;
|
18
13
|
response: unknown;
|
19
14
|
metadata: ModelCallMetadata;
|
20
15
|
}>;
|
@@ -14,7 +14,7 @@ export async function generateToolCall(model, tool, prompt, options) {
|
|
14
14
|
generateResponse: async (options) => {
|
15
15
|
try {
|
16
16
|
const result = await model.doGenerateToolCall(tool, expandedPrompt, options);
|
17
|
-
const toolCall = result.
|
17
|
+
const toolCall = result.toolCall;
|
18
18
|
if (toolCall === null) {
|
19
19
|
throw new ToolCallsGenerationError({
|
20
20
|
toolName: tool.name,
|
@@ -33,6 +33,7 @@ export async function generateToolCall(model, tool, prompt, options) {
|
|
33
33
|
response: result.response,
|
34
34
|
extractedValue: {
|
35
35
|
id: toolCall.id,
|
36
|
+
name: tool.name,
|
36
37
|
parameters: parseResult.data,
|
37
38
|
},
|
38
39
|
usage: result.usage,
|
@@ -0,0 +1,63 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.generateToolCallsOrText = void 0;
|
4
|
+
const executeStandardCall_js_1 = require("../executeStandardCall.cjs");
|
5
|
+
const NoSuchToolDefinitionError_js_1 = require("./NoSuchToolDefinitionError.cjs");
|
6
|
+
const ToolCallParametersValidationError_js_1 = require("./ToolCallParametersValidationError.cjs");
|
7
|
+
async function generateToolCallsOrText(model, tools, prompt, options) {
|
8
|
+
// Note: PROMPT must not be a function.
|
9
|
+
const expandedPrompt = typeof prompt === "function"
|
10
|
+
? prompt(tools)
|
11
|
+
: prompt;
|
12
|
+
const fullResponse = await (0, executeStandardCall_js_1.executeStandardCall)({
|
13
|
+
functionType: "generate-tool-calls-or-text",
|
14
|
+
input: expandedPrompt,
|
15
|
+
model,
|
16
|
+
options,
|
17
|
+
generateResponse: async (options) => {
|
18
|
+
const result = await model.doGenerateToolCallsOrText(tools, expandedPrompt, options);
|
19
|
+
const { text, toolCalls: rawToolCalls } = result;
|
20
|
+
// no tool calls:
|
21
|
+
if (rawToolCalls == null) {
|
22
|
+
return {
|
23
|
+
response: result.response,
|
24
|
+
extractedValue: { text, toolCalls: null },
|
25
|
+
usage: result.usage,
|
26
|
+
};
|
27
|
+
}
|
28
|
+
// map tool calls:
|
29
|
+
const toolCalls = rawToolCalls.map((rawToolCall) => {
|
30
|
+
const tool = tools.find((tool) => tool.name === rawToolCall.name);
|
31
|
+
if (tool == undefined) {
|
32
|
+
throw new NoSuchToolDefinitionError_js_1.NoSuchToolDefinitionError({
|
33
|
+
toolName: rawToolCall.name,
|
34
|
+
parameters: rawToolCall.parameters,
|
35
|
+
});
|
36
|
+
}
|
37
|
+
const parseResult = tool.parameters.validate(rawToolCall.parameters);
|
38
|
+
if (!parseResult.success) {
|
39
|
+
throw new ToolCallParametersValidationError_js_1.ToolCallParametersValidationError({
|
40
|
+
toolName: tool.name,
|
41
|
+
parameters: rawToolCall.parameters,
|
42
|
+
cause: parseResult.error,
|
43
|
+
});
|
44
|
+
}
|
45
|
+
return {
|
46
|
+
id: rawToolCall.id,
|
47
|
+
name: tool.name,
|
48
|
+
parameters: parseResult.data,
|
49
|
+
};
|
50
|
+
});
|
51
|
+
return {
|
52
|
+
response: result.response,
|
53
|
+
extractedValue: {
|
54
|
+
text,
|
55
|
+
toolCalls: toolCalls,
|
56
|
+
},
|
57
|
+
usage: result.usage,
|
58
|
+
};
|
59
|
+
},
|
60
|
+
});
|
61
|
+
return options?.returnType === "full" ? fullResponse : fullResponse.value;
|
62
|
+
}
|
63
|
+
exports.generateToolCallsOrText = generateToolCallsOrText;
|
@@ -0,0 +1,33 @@
|
|
1
|
+
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
2
|
+
import { ModelCallMetadata } from "../ModelCallMetadata.js";
|
3
|
+
import { ToolCallsOrTextGenerationModel, ToolCallsOrTextGenerationModelSettings } from "./ToolCallsOrTextGenerationModel.js";
|
4
|
+
import { ToolDefinition } from "./ToolDefinition.js";
|
5
|
+
type ToolCallDefinitionArray<T extends ToolDefinition<any, any>[]> = T;
|
6
|
+
type ToToolCallDefinitionMap<T extends ToolCallDefinitionArray<ToolDefinition<any, any>[]>> = {
|
7
|
+
[K in T[number]["name"]]: Extract<T[number], ToolDefinition<K, any>>;
|
8
|
+
};
|
9
|
+
type ToToolCallUnion<T> = {
|
10
|
+
[KEY in keyof T]: T[KEY] extends ToolDefinition<any, infer PARAMETERS> ? {
|
11
|
+
id: string;
|
12
|
+
name: KEY;
|
13
|
+
parameters: PARAMETERS;
|
14
|
+
} : never;
|
15
|
+
}[keyof T];
|
16
|
+
type ToOutputValue<TOOL_CALLS extends ToolCallDefinitionArray<ToolDefinition<any, any>[]>> = ToToolCallUnion<ToToolCallDefinitionMap<TOOL_CALLS>>;
|
17
|
+
export declare function generateToolCallsOrText<TOOLS extends Array<ToolDefinition<any, any>>, PROMPT>(model: ToolCallsOrTextGenerationModel<PROMPT, ToolCallsOrTextGenerationModelSettings>, tools: TOOLS, prompt: PROMPT | ((structureDefinitions: TOOLS) => PROMPT), options?: FunctionOptions & {
|
18
|
+
returnType?: "structure";
|
19
|
+
}): Promise<{
|
20
|
+
text: string | null;
|
21
|
+
toolCalls: Array<ToOutputValue<TOOLS>> | null;
|
22
|
+
}>;
|
23
|
+
export declare function generateToolCallsOrText<TOOLS extends ToolDefinition<any, any>[], PROMPT>(model: ToolCallsOrTextGenerationModel<PROMPT, ToolCallsOrTextGenerationModelSettings>, tools: TOOLS, prompt: PROMPT | ((structureDefinitions: TOOLS) => PROMPT), options: FunctionOptions & {
|
24
|
+
returnType?: "full";
|
25
|
+
}): Promise<{
|
26
|
+
value: {
|
27
|
+
text: string | null;
|
28
|
+
toolCalls: Array<ToOutputValue<TOOLS>>;
|
29
|
+
};
|
30
|
+
response: unknown;
|
31
|
+
metadata: ModelCallMetadata;
|
32
|
+
}>;
|
33
|
+
export {};
|