@ai-sdk-tool/parser 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +23 -0
- package/src/hermes-middleware.ts +270 -0
- package/src/index.ts +83 -0
- package/src/utils.ts +31 -0
package/package.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@ai-sdk-tool/parser",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "",
|
|
5
|
+
"main": "index.js",
|
|
6
|
+
"scripts": {
|
|
7
|
+
"test": "echo \"Error: no test specified\" && exit 1"
|
|
8
|
+
},
|
|
9
|
+
"keywords": [],
|
|
10
|
+
"author": "",
|
|
11
|
+
"license": "ISC",
|
|
12
|
+
"packageManager": "pnpm@9.14.4+sha1.64b6e81e79630419b675c555ef3b65607cfd6315",
|
|
13
|
+
"dependencies": {
|
|
14
|
+
"@ai-sdk/openai-compatible": "^0.2.9",
|
|
15
|
+
"ai": "^4.3.4",
|
|
16
|
+
"relaxed-json": "^1.0.3",
|
|
17
|
+
"tsx": "^4.19.3",
|
|
18
|
+
"zod": "^3.24.2"
|
|
19
|
+
},
|
|
20
|
+
"devDependencies": {
|
|
21
|
+
"@types/node": "^22.14.0"
|
|
22
|
+
}
|
|
23
|
+
}
|
|
@@ -0,0 +1,270 @@
|
|
|
1
|
+
import {
|
|
2
|
+
generateId,
|
|
3
|
+
LanguageModelV1Middleware,
|
|
4
|
+
LanguageModelV1Prompt,
|
|
5
|
+
LanguageModelV1StreamPart,
|
|
6
|
+
} from "ai";
|
|
7
|
+
import * as RJSON from "relaxed-json";
|
|
8
|
+
import { getPotentialStartIndex } from "./utils";
|
|
9
|
+
|
|
10
|
+
const defaultTemplate = (tools: string) =>
|
|
11
|
+
`You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags.
|
|
12
|
+
You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions.
|
|
13
|
+
Here are the available tools: <tools>${tools}</tools>
|
|
14
|
+
Use the following pydantic model json schema for each tool call you will make: {'title': 'FunctionCall', 'type': 'object', 'properties': {'arguments': {'title': 'Arguments', 'type': 'object'}, 'name': {'title': 'Name', 'type': 'string'}}, 'required': ['arguments', 'name']}
|
|
15
|
+
For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:
|
|
16
|
+
<tool_call>
|
|
17
|
+
{'arguments': <args-dict>, 'name': <function-name>}
|
|
18
|
+
</tool_call>`;
|
|
19
|
+
|
|
20
|
+
export function hermesToolMiddleware({
|
|
21
|
+
toolCallTag = "<tool_call>",
|
|
22
|
+
toolCallEndTag = "</tool_call>",
|
|
23
|
+
toolResponseTag = "<tool_response>",
|
|
24
|
+
toolResponseEndTag = "</tool_response>",
|
|
25
|
+
toolSystemPromptTemplate = defaultTemplate,
|
|
26
|
+
}: {
|
|
27
|
+
toolCallTag?: string;
|
|
28
|
+
toolCallEndTag?: string;
|
|
29
|
+
toolResponseTag?: string;
|
|
30
|
+
toolResponseEndTag?: string;
|
|
31
|
+
toolSystemPromptTemplate?: (tools: string) => string;
|
|
32
|
+
}): LanguageModelV1Middleware {
|
|
33
|
+
return {
|
|
34
|
+
middlewareVersion: "v1",
|
|
35
|
+
wrapStream: async ({ doStream }) => {
|
|
36
|
+
const { stream, ...rest } = await doStream();
|
|
37
|
+
|
|
38
|
+
let isFirstToolCall = true;
|
|
39
|
+
let isFirstText = true;
|
|
40
|
+
let afterSwitch = false;
|
|
41
|
+
let isToolCall = false;
|
|
42
|
+
let buffer = "";
|
|
43
|
+
|
|
44
|
+
let toolCallIndex = -1;
|
|
45
|
+
let toolCallBuffer: string[] = [];
|
|
46
|
+
|
|
47
|
+
const transformStream = new TransformStream<
|
|
48
|
+
LanguageModelV1StreamPart,
|
|
49
|
+
LanguageModelV1StreamPart
|
|
50
|
+
>({
|
|
51
|
+
transform(chunk, controller) {
|
|
52
|
+
if (chunk.type === "finish") {
|
|
53
|
+
if (toolCallBuffer.length > 0) {
|
|
54
|
+
toolCallBuffer.forEach((toolCall) => {
|
|
55
|
+
try {
|
|
56
|
+
const parsedToolCall = RJSON.parse(toolCall) as {
|
|
57
|
+
name: string;
|
|
58
|
+
arguments: string;
|
|
59
|
+
};
|
|
60
|
+
|
|
61
|
+
controller.enqueue({
|
|
62
|
+
type: "tool-call",
|
|
63
|
+
toolCallType: "function",
|
|
64
|
+
toolCallId: generateId(),
|
|
65
|
+
toolName: parsedToolCall.name,
|
|
66
|
+
args: JSON.stringify(parsedToolCall.arguments),
|
|
67
|
+
});
|
|
68
|
+
} catch (e) {
|
|
69
|
+
console.error(`Error parsing tool call: ${toolCall}`, e);
|
|
70
|
+
|
|
71
|
+
controller.enqueue({
|
|
72
|
+
type: "text-delta",
|
|
73
|
+
textDelta: `Failed to parse tool call: ${e.message}`,
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
});
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
// stop token
|
|
80
|
+
controller.enqueue(chunk);
|
|
81
|
+
|
|
82
|
+
return;
|
|
83
|
+
} else if (chunk.type !== "text-delta") {
|
|
84
|
+
controller.enqueue(chunk);
|
|
85
|
+
return;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
buffer += chunk.textDelta;
|
|
89
|
+
|
|
90
|
+
function publish(text: string) {
|
|
91
|
+
if (text.length > 0) {
|
|
92
|
+
const prefix =
|
|
93
|
+
afterSwitch && (isToolCall ? !isFirstToolCall : !isFirstText)
|
|
94
|
+
? "\n" // separator
|
|
95
|
+
: "";
|
|
96
|
+
|
|
97
|
+
if (isToolCall) {
|
|
98
|
+
if (!toolCallBuffer[toolCallIndex]) {
|
|
99
|
+
toolCallBuffer[toolCallIndex] = "";
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
toolCallBuffer[toolCallIndex] += text;
|
|
103
|
+
} else {
|
|
104
|
+
controller.enqueue({
|
|
105
|
+
type: "text-delta",
|
|
106
|
+
textDelta: prefix + text,
|
|
107
|
+
});
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
afterSwitch = false;
|
|
111
|
+
|
|
112
|
+
if (isToolCall) {
|
|
113
|
+
isFirstToolCall = false;
|
|
114
|
+
} else {
|
|
115
|
+
isFirstText = false;
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
do {
|
|
121
|
+
const nextTag = isToolCall ? toolCallEndTag : toolCallTag;
|
|
122
|
+
const startIndex = getPotentialStartIndex(buffer, nextTag);
|
|
123
|
+
|
|
124
|
+
// no opening or closing tag found, publish the buffer
|
|
125
|
+
if (startIndex == null) {
|
|
126
|
+
publish(buffer);
|
|
127
|
+
buffer = "";
|
|
128
|
+
break;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
// publish text before the tag
|
|
132
|
+
publish(buffer.slice(0, startIndex));
|
|
133
|
+
|
|
134
|
+
const foundFullMatch = startIndex + nextTag.length <= buffer.length;
|
|
135
|
+
|
|
136
|
+
if (foundFullMatch) {
|
|
137
|
+
buffer = buffer.slice(startIndex + nextTag.length);
|
|
138
|
+
toolCallIndex++;
|
|
139
|
+
isToolCall = !isToolCall;
|
|
140
|
+
afterSwitch = true;
|
|
141
|
+
} else {
|
|
142
|
+
buffer = buffer.slice(startIndex);
|
|
143
|
+
break;
|
|
144
|
+
}
|
|
145
|
+
} while (true);
|
|
146
|
+
},
|
|
147
|
+
});
|
|
148
|
+
|
|
149
|
+
return {
|
|
150
|
+
stream: stream.pipeThrough(transformStream),
|
|
151
|
+
...rest,
|
|
152
|
+
};
|
|
153
|
+
},
|
|
154
|
+
wrapGenerate: async ({ doGenerate }) => {
|
|
155
|
+
const result = await doGenerate();
|
|
156
|
+
|
|
157
|
+
if (!result.text?.includes(toolCallTag)) {
|
|
158
|
+
return result;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
const toolCallRegex = new RegExp(
|
|
162
|
+
`${toolCallTag}(.*?)(?:${toolCallEndTag}|$)`,
|
|
163
|
+
"gs"
|
|
164
|
+
);
|
|
165
|
+
const matches = [...result.text.matchAll(toolCallRegex)];
|
|
166
|
+
const function_call_tuples = matches.map((match) => match[1] || match[2]);
|
|
167
|
+
|
|
168
|
+
return {
|
|
169
|
+
...result,
|
|
170
|
+
// TODO: Return the remaining value after extracting the tool call tag.
|
|
171
|
+
text: "",
|
|
172
|
+
toolCalls: function_call_tuples.map((toolCall) => {
|
|
173
|
+
const parsedToolCall = RJSON.parse(toolCall) as {
|
|
174
|
+
name: string;
|
|
175
|
+
arguments: string;
|
|
176
|
+
};
|
|
177
|
+
|
|
178
|
+
const toolName = parsedToolCall.name;
|
|
179
|
+
const args = parsedToolCall.arguments;
|
|
180
|
+
|
|
181
|
+
return {
|
|
182
|
+
toolCallType: "function",
|
|
183
|
+
toolCallId: generateId(),
|
|
184
|
+
toolName: toolName,
|
|
185
|
+
args: RJSON.stringify(args),
|
|
186
|
+
};
|
|
187
|
+
}),
|
|
188
|
+
};
|
|
189
|
+
},
|
|
190
|
+
|
|
191
|
+
transformParams: async ({ params }) => {
|
|
192
|
+
const processedPrompt = params.prompt.map((message) => {
|
|
193
|
+
if (message.role === "assistant") {
|
|
194
|
+
return {
|
|
195
|
+
role: "assistant",
|
|
196
|
+
content: message.content.map((content) => {
|
|
197
|
+
if (content.type === "tool-call") {
|
|
198
|
+
return {
|
|
199
|
+
type: "text",
|
|
200
|
+
text: `${toolCallTag}${JSON.stringify({
|
|
201
|
+
arguments: content.args,
|
|
202
|
+
name: content.toolName,
|
|
203
|
+
})}${toolCallEndTag}`,
|
|
204
|
+
};
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
return content;
|
|
208
|
+
}),
|
|
209
|
+
};
|
|
210
|
+
} else if (message.role === "tool") {
|
|
211
|
+
return {
|
|
212
|
+
role: "user",
|
|
213
|
+
content: [
|
|
214
|
+
{
|
|
215
|
+
type: "text",
|
|
216
|
+
text: message.content
|
|
217
|
+
.map(
|
|
218
|
+
(content) =>
|
|
219
|
+
`${toolResponseTag}${JSON.stringify({
|
|
220
|
+
toolName: content.toolName,
|
|
221
|
+
result: content.result,
|
|
222
|
+
})}${toolResponseEndTag}`
|
|
223
|
+
)
|
|
224
|
+
.join("\n"),
|
|
225
|
+
},
|
|
226
|
+
],
|
|
227
|
+
};
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
return message;
|
|
231
|
+
}) as LanguageModelV1Prompt;
|
|
232
|
+
|
|
233
|
+
// Appropriate fixes are needed as they are disappearing in LanguageModelV2
|
|
234
|
+
const originalToolDefinitions =
|
|
235
|
+
params.mode.type === "regular" && params.mode.tools
|
|
236
|
+
? params.mode.tools
|
|
237
|
+
: {};
|
|
238
|
+
|
|
239
|
+
const HermesPrompt = toolSystemPromptTemplate(
|
|
240
|
+
JSON.stringify(Object.entries(originalToolDefinitions))
|
|
241
|
+
);
|
|
242
|
+
|
|
243
|
+
const toolSystemPrompt: LanguageModelV1Prompt =
|
|
244
|
+
processedPrompt[0].role === "system"
|
|
245
|
+
? [
|
|
246
|
+
{
|
|
247
|
+
role: "system",
|
|
248
|
+
content: HermesPrompt + "\n\n" + processedPrompt[0].content,
|
|
249
|
+
},
|
|
250
|
+
...processedPrompt.slice(1),
|
|
251
|
+
]
|
|
252
|
+
: [
|
|
253
|
+
{
|
|
254
|
+
role: "system",
|
|
255
|
+
content: HermesPrompt,
|
|
256
|
+
},
|
|
257
|
+
...processedPrompt,
|
|
258
|
+
];
|
|
259
|
+
|
|
260
|
+
return {
|
|
261
|
+
...params,
|
|
262
|
+
mode: {
|
|
263
|
+
// set the mode back to regular and remove the default tools.
|
|
264
|
+
type: "regular",
|
|
265
|
+
},
|
|
266
|
+
prompt: toolSystemPrompt,
|
|
267
|
+
};
|
|
268
|
+
},
|
|
269
|
+
};
|
|
270
|
+
}
|
package/src/index.ts
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
|
|
2
|
+
import { generateText, streamText, wrapLanguageModel } from "ai";
|
|
3
|
+
import { z } from "zod";
|
|
4
|
+
import { hermesToolMiddleware } from "./hermes-middleware";
|
|
5
|
+
|
|
6
|
+
const openrouter = createOpenAICompatible({
|
|
7
|
+
name: "openrouter",
|
|
8
|
+
apiKey: process.env.OPENROUTER_API_KEY,
|
|
9
|
+
baseURL: "https://openrouter.ai/api/v1",
|
|
10
|
+
});
|
|
11
|
+
|
|
12
|
+
async function main() {
|
|
13
|
+
const result = streamText({
|
|
14
|
+
// model: openrouter("openai/gpt-4o"),
|
|
15
|
+
model: wrapLanguageModel({
|
|
16
|
+
model: openrouter("google/gemma-3-27b-it"),
|
|
17
|
+
// model: openrouter("nousresearch/hermes-3-llama-3.1-70b"),
|
|
18
|
+
middleware: hermesToolMiddleware({
|
|
19
|
+
toolSystemPromptTemplate(tools) {
|
|
20
|
+
return `You have access to functions. If you decide to invoke any of the function(s),
|
|
21
|
+
you MUST put it in the format of
|
|
22
|
+
\`\`\`tool_call
|
|
23
|
+
{'name': <function-name>, 'arguments': <args-dict>}
|
|
24
|
+
\`\`\`
|
|
25
|
+
You SHOULD NOT include any other text in the response if you call a function
|
|
26
|
+
${tools}`;
|
|
27
|
+
},
|
|
28
|
+
toolCallTag: "```tool_call\n",
|
|
29
|
+
toolCallEndTag: "```",
|
|
30
|
+
toolResponseTag: "```tool_response\n",
|
|
31
|
+
toolResponseEndTag: "\n```",
|
|
32
|
+
}),
|
|
33
|
+
}),
|
|
34
|
+
system: "You are a helpful assistant.",
|
|
35
|
+
// prompt: "What is the weather in New York and Los Angeles?",
|
|
36
|
+
prompt: "What is the weather in my city?",
|
|
37
|
+
maxSteps: 4,
|
|
38
|
+
tools: {
|
|
39
|
+
get_location: {
|
|
40
|
+
description: "Get the User's location.",
|
|
41
|
+
parameters: z.object({}),
|
|
42
|
+
execute: async () => {
|
|
43
|
+
// Simulate a location API call
|
|
44
|
+
return {
|
|
45
|
+
city: "New York",
|
|
46
|
+
country: "USA",
|
|
47
|
+
};
|
|
48
|
+
},
|
|
49
|
+
},
|
|
50
|
+
get_weather: {
|
|
51
|
+
description:
|
|
52
|
+
"Get the weather for a given city. " +
|
|
53
|
+
"Example cities: 'New York', 'Los Angeles', 'Paris'.",
|
|
54
|
+
parameters: z.object({ city: z.string() }),
|
|
55
|
+
execute: async ({ city }) => {
|
|
56
|
+
// Simulate a weather API call
|
|
57
|
+
const temperature = Math.floor(Math.random() * 100);
|
|
58
|
+
return {
|
|
59
|
+
city,
|
|
60
|
+
temperature,
|
|
61
|
+
condition: "sunny",
|
|
62
|
+
};
|
|
63
|
+
},
|
|
64
|
+
},
|
|
65
|
+
},
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
for await (const part of result.fullStream) {
|
|
69
|
+
if (part.type === "text-delta") {
|
|
70
|
+
process.stdout.write(part.textDelta);
|
|
71
|
+
} else if (part.type === "tool-result") {
|
|
72
|
+
console.log({
|
|
73
|
+
name: part.toolName,
|
|
74
|
+
args: part.args,
|
|
75
|
+
result: part.result,
|
|
76
|
+
});
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
console.log("\n\n[done]");
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
main().catch(console.error);
|
package/src/utils.ts
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Returns the index of the start of the searchedText in the text, or null if it
|
|
3
|
+
* is not found.
|
|
4
|
+
* ref: https://github.com/vercel/ai/blob/452bf12f0be9cb398d4af85a006bca13c8ce36d8/packages/ai/core/util/get-potential-start-index.ts
|
|
5
|
+
*/
|
|
6
|
+
export function getPotentialStartIndex(
|
|
7
|
+
text: string,
|
|
8
|
+
searchedText: string
|
|
9
|
+
): number | null {
|
|
10
|
+
// Return null immediately if searchedText is empty.
|
|
11
|
+
if (searchedText.length === 0) {
|
|
12
|
+
return null;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
// Check if the searchedText exists as a direct substring of text.
|
|
16
|
+
const directIndex = text.indexOf(searchedText);
|
|
17
|
+
if (directIndex !== -1) {
|
|
18
|
+
return directIndex;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
// Otherwise, look for the largest suffix of "text" that matches
|
|
22
|
+
// a prefix of "searchedText". We go from the end of text inward.
|
|
23
|
+
for (let i = text.length - 1; i >= 0; i--) {
|
|
24
|
+
const suffix = text.substring(i);
|
|
25
|
+
if (searchedText.startsWith(suffix)) {
|
|
26
|
+
return i;
|
|
27
|
+
}
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
return null;
|
|
31
|
+
}
|