toolpick 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +140 -0
- package/dist/cache.d.ts +17 -0
- package/dist/cache.js +38 -0
- package/dist/eval/index.d.ts +23 -0
- package/dist/eval/index.js +59 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +3 -0
- package/dist/integrations/middleware.d.ts +10 -0
- package/dist/integrations/middleware.js +58 -0
- package/dist/integrations/prepare-step.d.ts +12 -0
- package/dist/integrations/prepare-step.js +49 -0
- package/dist/integrations/search-tool.d.ts +18 -0
- package/dist/integrations/search-tool.js +32 -0
- package/dist/query-extractor.d.ts +9 -0
- package/dist/query-extractor.js +44 -0
- package/dist/reranker.d.ts +13 -0
- package/dist/reranker.js +92 -0
- package/dist/search/combined.d.ts +9 -0
- package/dist/search/combined.js +28 -0
- package/dist/search/fusion.d.ts +7 -0
- package/dist/search/fusion.js +26 -0
- package/dist/search/hybrid.d.ts +7 -0
- package/dist/search/hybrid.js +150 -0
- package/dist/search/semantic.d.ts +13 -0
- package/dist/search/semantic.js +55 -0
- package/dist/search/types.d.ts +49 -0
- package/dist/search/types.js +0 -0
- package/dist/tool-index.d.ts +39 -0
- package/dist/tool-index.js +159 -0
- package/dist/utils.d.ts +2 -0
- package/dist/utils.js +35 -0
- package/package.json +62 -0
package/README.md
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
# toolpick
|
|
2
|
+
|
|
3
|
+
Your agent has 30 tools. The model sees all of them on every step — tool definitions eat thousands of tokens, the model picks the wrong one, and you're paying for context it doesn't need.
|
|
4
|
+
|
|
5
|
+
toolpick fixes this. It picks the right 5 tools per step so the model only sees what matters.
|
|
6
|
+
|
|
7
|
+
## Install
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
npm install toolpick
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Usage
|
|
14
|
+
|
|
15
|
+
```ts
|
|
16
|
+
import { generateText } from "ai";
|
|
17
|
+
import { openai } from "@ai-sdk/openai";
|
|
18
|
+
import { createToolIndex } from "toolpick";
|
|
19
|
+
|
|
20
|
+
const index = createToolIndex(allTools, {
|
|
21
|
+
embeddingModel: openai.embeddingModel("text-embedding-3-small"),
|
|
22
|
+
});
|
|
23
|
+
|
|
24
|
+
const result = await generateText({
|
|
25
|
+
model: openai("gpt-4o"),
|
|
26
|
+
tools: allTools,
|
|
27
|
+
prepareStep: index.prepareStep(),
|
|
28
|
+
prompt: "ship it to prod",
|
|
29
|
+
maxSteps: 10,
|
|
30
|
+
});
|
|
31
|
+
```
|
|
32
|
+
|
|
33
|
+
Every step, toolpick selects the most relevant tools and sets `activeTools` automatically. The model only sees what it needs. All tools remain available for execution.
|
|
34
|
+
|
|
35
|
+
Works with `generateText`, `streamText`, and `Experimental_Agent`.
|
|
36
|
+
|
|
37
|
+
## How it works
|
|
38
|
+
|
|
39
|
+
Pass an embedding model and toolpick indexes your tool names, descriptions, and parameters. On each step it runs a combined search — fast keyword matching plus semantic embeddings — and returns the best matches.
|
|
40
|
+
|
|
41
|
+
If the model can't find a useful tool, the next step automatically pages to a fresh set it hasn't seen yet. Two misses in a row? All tools get exposed as a fallback. Your agent never gets stuck.
|
|
42
|
+
|
|
43
|
+
If the embedding API goes down, it silently falls back to keyword-only search. No crashes.
|
|
44
|
+
|
|
45
|
+
## LLM re-ranking
|
|
46
|
+
|
|
47
|
+
For maximum accuracy, add a `rerankerModel`. The combined search fetches candidates, then a cheap LLM picks the best ones using reasoning — handling slang, abbreviations, and context that embeddings alone miss.
|
|
48
|
+
|
|
49
|
+
```ts
|
|
50
|
+
const index = createToolIndex(allTools, {
|
|
51
|
+
embeddingModel: openai.embeddingModel("text-embedding-3-small"),
|
|
52
|
+
rerankerModel: openai("gpt-4o-mini"),
|
|
53
|
+
});
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
This takes accuracy from 84% to 100% on blind user queries. Cost: ~$0.0001 per step (gpt-4o-mini).
|
|
57
|
+
|
|
58
|
+
### Description enrichment
|
|
59
|
+
|
|
60
|
+
For an extra boost, `enrichDescriptions` expands your tool descriptions with synonyms and alternative phrasings at startup. This is a one-time LLM call during `warmUp()`:
|
|
61
|
+
|
|
62
|
+
```ts
|
|
63
|
+
const index = createToolIndex(allTools, {
|
|
64
|
+
embeddingModel: openai.embeddingModel("text-embedding-3-small"),
|
|
65
|
+
rerankerModel: openai("gpt-4o-mini"),
|
|
66
|
+
enrichDescriptions: true,
|
|
67
|
+
});
|
|
68
|
+
|
|
69
|
+
await index.warmUp();
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
## Model-driven discovery
|
|
73
|
+
|
|
74
|
+
For agents that need to discover tools outside the current selection, expose the built-in search tool:
|
|
75
|
+
|
|
76
|
+
```ts
|
|
77
|
+
const result = await generateText({
|
|
78
|
+
model: openai("gpt-4o"),
|
|
79
|
+
tools: {
|
|
80
|
+
...allTools,
|
|
81
|
+
search_tools: index.searchTool(),
|
|
82
|
+
},
|
|
83
|
+
prepareStep: index.prepareStep({ alwaysActive: ["search_tools"] }),
|
|
84
|
+
maxSteps: 10,
|
|
85
|
+
prompt: "find and use the right tool",
|
|
86
|
+
});
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
The model can call `search_tools` to browse the full catalog and request specific tools be activated. This mirrors how Anthropic's tool search works — but for any model.
|
|
90
|
+
|
|
91
|
+
## Caching
|
|
92
|
+
|
|
93
|
+
Tool description embeddings are computed once at startup. Cache them to skip the API call on restarts:
|
|
94
|
+
|
|
95
|
+
```ts
|
|
96
|
+
import { createToolIndex, fileCache } from "toolpick";
|
|
97
|
+
|
|
98
|
+
const index = createToolIndex(allTools, {
|
|
99
|
+
embeddingModel: openai.embeddingModel("text-embedding-3-small"),
|
|
100
|
+
embeddingCache: fileCache(".toolpick-cache.json"),
|
|
101
|
+
});
|
|
102
|
+
|
|
103
|
+
await index.warmUp();
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
## Accuracy
|
|
107
|
+
|
|
108
|
+
Tested with 19 blind user queries — slang, vague language, no keyword overlap — against 30 real SaaS tools:
|
|
109
|
+
|
|
110
|
+
| Strategy | Top-5 accuracy | Cost per query |
|
|
111
|
+
|---|---|---|
|
|
112
|
+
| Keyword only | 31% | Free |
|
|
113
|
+
| Embeddings only | 79% | ~$0.00002 |
|
|
114
|
+
| Combined (default) | 84% | ~$0.00002 |
|
|
115
|
+
| **Combined + LLM re-ranking** | **100%** | **~$0.0001** |
|
|
116
|
+
|
|
117
|
+
Queries like "ship it", "ping the team", "file a bug", "get the CI running", and "compress this photo" all resolve to the correct tool.
|
|
118
|
+
|
|
119
|
+
## Options
|
|
120
|
+
|
|
121
|
+
```ts
|
|
122
|
+
const index = createToolIndex(tools, {
|
|
123
|
+
embeddingModel: model, // enables combined search (recommended)
|
|
124
|
+
rerankerModel: model, // LLM re-ranking for max accuracy
|
|
125
|
+
enrichDescriptions: true, // expand descriptions with synonyms at warmUp
|
|
126
|
+
embeddingCache: fileCache(path), // persist embeddings to disk
|
|
127
|
+
strategy: "hybrid", // force keyword-only (free, no API calls)
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
await index.select(query, {
|
|
131
|
+
maxTools: 5, // ceiling, not fixed count
|
|
132
|
+
adaptive: true, // cut early when scores drop off
|
|
133
|
+
alwaysActive: ["search_tools"], // always include these
|
|
134
|
+
threshold: 0.3, // minimum relevance score
|
|
135
|
+
});
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
## License
|
|
139
|
+
|
|
140
|
+
MIT
|
package/dist/cache.d.ts
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import type { EmbeddingCacheOptions } from "./search/types";
|
|
2
|
+
/**
|
|
3
|
+
* File-based embedding cache. Saves/loads tool embeddings as JSON
|
|
4
|
+
* so they survive restarts without re-calling the embedding API.
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* ```ts
|
|
8
|
+
* import { createToolIndex, fileCache } from "toolpick";
|
|
9
|
+
*
|
|
10
|
+
* const index = createToolIndex(tools, {
|
|
11
|
+
* embeddingModel: openai.embeddingModel("text-embedding-3-small"),
|
|
12
|
+
* embeddingCache: fileCache(".toolpick-cache.json"),
|
|
13
|
+
* });
|
|
14
|
+
* await index.warmUp();
|
|
15
|
+
* ```
|
|
16
|
+
*/
|
|
17
|
+
export declare function fileCache(path: string): EmbeddingCacheOptions;
|
package/dist/cache.js
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { readFile, writeFile, mkdir } from "node:fs/promises";
|
|
2
|
+
import { dirname } from "node:path";
|
|
3
|
+
/**
|
|
4
|
+
* File-based embedding cache. Saves/loads tool embeddings as JSON
|
|
5
|
+
* so they survive restarts without re-calling the embedding API.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```ts
|
|
9
|
+
* import { createToolIndex, fileCache } from "toolpick";
|
|
10
|
+
*
|
|
11
|
+
* const index = createToolIndex(tools, {
|
|
12
|
+
* embeddingModel: openai.embeddingModel("text-embedding-3-small"),
|
|
13
|
+
* embeddingCache: fileCache(".toolpick-cache.json"),
|
|
14
|
+
* });
|
|
15
|
+
* await index.warmUp();
|
|
16
|
+
* ```
|
|
17
|
+
*/
|
|
18
|
+
export function fileCache(path) {
|
|
19
|
+
return {
|
|
20
|
+
async load() {
|
|
21
|
+
try {
|
|
22
|
+
const data = await readFile(path, "utf-8");
|
|
23
|
+
const parsed = JSON.parse(data);
|
|
24
|
+
if (Array.isArray(parsed) && parsed.every(Array.isArray)) {
|
|
25
|
+
return parsed;
|
|
26
|
+
}
|
|
27
|
+
return null;
|
|
28
|
+
}
|
|
29
|
+
catch {
|
|
30
|
+
return null;
|
|
31
|
+
}
|
|
32
|
+
},
|
|
33
|
+
async save(embeddings) {
|
|
34
|
+
await mkdir(dirname(path), { recursive: true });
|
|
35
|
+
await writeFile(path, JSON.stringify(embeddings), "utf-8");
|
|
36
|
+
},
|
|
37
|
+
};
|
|
38
|
+
}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import type { ToolIndex } from "../tool-index";
|
|
2
|
+
import type { EvalTestCase, EvalResult } from "../search/types";
|
|
3
|
+
/**
|
|
4
|
+
* Evaluates a ToolIndex against test cases.
|
|
5
|
+
*
|
|
6
|
+
* Measures Top-1, Top-3, Top-5 accuracy and average latency.
|
|
7
|
+
* Returns misses for debugging.
|
|
8
|
+
*
|
|
9
|
+
* @example
|
|
10
|
+
* ```ts
|
|
11
|
+
* import { evalToolIndex } from "toolpick/eval";
|
|
12
|
+
*
|
|
13
|
+
* const result = evalToolIndex(index, [
|
|
14
|
+
* { query: "create a ticket", expected: "createJiraTicket" },
|
|
15
|
+
* { query: "send an email", expected: "sendEmail" },
|
|
16
|
+
* ]);
|
|
17
|
+
* console.log(`Top-1: ${result.top1}%, Top-3: ${result.top3}%`);
|
|
18
|
+
* ```
|
|
19
|
+
*/
|
|
20
|
+
export declare function evalToolIndex(index: ToolIndex, testCases: EvalTestCase[], options?: {
|
|
21
|
+
maxTools?: number;
|
|
22
|
+
}): Promise<EvalResult>;
|
|
23
|
+
export type { EvalTestCase, EvalResult } from "../search/types";
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Evaluates a ToolIndex against test cases.
|
|
3
|
+
*
|
|
4
|
+
* Measures Top-1, Top-3, Top-5 accuracy and average latency.
|
|
5
|
+
* Returns misses for debugging.
|
|
6
|
+
*
|
|
7
|
+
* @example
|
|
8
|
+
* ```ts
|
|
9
|
+
* import { evalToolIndex } from "toolpick/eval";
|
|
10
|
+
*
|
|
11
|
+
* const result = evalToolIndex(index, [
|
|
12
|
+
* { query: "create a ticket", expected: "createJiraTicket" },
|
|
13
|
+
* { query: "send an email", expected: "sendEmail" },
|
|
14
|
+
* ]);
|
|
15
|
+
* console.log(`Top-1: ${result.top1}%, Top-3: ${result.top3}%`);
|
|
16
|
+
* ```
|
|
17
|
+
*/
|
|
18
|
+
export async function evalToolIndex(index, testCases, options = {}) {
|
|
19
|
+
const { maxTools = 5 } = options;
|
|
20
|
+
let top1 = 0;
|
|
21
|
+
let top3 = 0;
|
|
22
|
+
let top5 = 0;
|
|
23
|
+
let totalLatency = 0;
|
|
24
|
+
const misses = [];
|
|
25
|
+
for (const tc of testCases) {
|
|
26
|
+
const start = performance.now();
|
|
27
|
+
const results = await index.select(tc.query, { maxTools: Math.max(maxTools, 5) });
|
|
28
|
+
const elapsed = performance.now() - start;
|
|
29
|
+
totalLatency += elapsed;
|
|
30
|
+
const accepted = new Set([tc.expected, ...(tc.alternatives ?? [])]);
|
|
31
|
+
const isHit = (slice) => slice.some((name) => accepted.has(name));
|
|
32
|
+
if (isHit(results.slice(0, 1)))
|
|
33
|
+
top1++;
|
|
34
|
+
if (isHit(results.slice(0, 3)))
|
|
35
|
+
top3++;
|
|
36
|
+
if (isHit(results.slice(0, 5)))
|
|
37
|
+
top5++;
|
|
38
|
+
if (!isHit(results.slice(0, 5))) {
|
|
39
|
+
misses.push({
|
|
40
|
+
query: tc.query,
|
|
41
|
+
expected: tc.expected,
|
|
42
|
+
got: results.slice(0, 5),
|
|
43
|
+
});
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
const total = testCases.length;
|
|
47
|
+
if (total === 0) {
|
|
48
|
+
return { top1: 0, top3: 0, top5: 0, avgLatencyMs: 0, total: 0, misses: [] };
|
|
49
|
+
}
|
|
50
|
+
const pct = (n) => Math.round((n / total) * 100);
|
|
51
|
+
return {
|
|
52
|
+
top1: pct(top1),
|
|
53
|
+
top3: pct(top3),
|
|
54
|
+
top5: pct(top5),
|
|
55
|
+
avgLatencyMs: Math.round(totalLatency / total * 100) / 100,
|
|
56
|
+
total,
|
|
57
|
+
misses,
|
|
58
|
+
};
|
|
59
|
+
}
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
export { createToolIndex } from "./tool-index";
|
|
2
|
+
export type { ToolIndex } from "./tool-index";
|
|
3
|
+
export type { SearchStrategy, SelectOptions, ToolIndexOptions, SearchResult, ToolDescription, EmbeddingCacheOptions, } from "./search/types";
|
|
4
|
+
export { extractQuery } from "./query-extractor";
|
|
5
|
+
export { fileCache } from "./cache";
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import type { LanguageModelMiddleware } from "ai";
|
|
2
|
+
import type { SearchEngine, SelectOptions } from "../search/types";
|
|
3
|
+
/**
|
|
4
|
+
* Creates a LanguageModelMiddleware that filters tools via transformParams.
|
|
5
|
+
*
|
|
6
|
+
* Secondary integration path. Operates at the provider level where the prompt
|
|
7
|
+
* is in internal format, making query extraction less precise than prepareStep.
|
|
8
|
+
* Best for single-step generateText/streamText calls.
|
|
9
|
+
*/
|
|
10
|
+
export declare function createMiddleware(engine: SearchEngine, toolNames: string[], options?: SelectOptions): LanguageModelMiddleware;
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Creates a LanguageModelMiddleware that filters tools via transformParams.
|
|
3
|
+
*
|
|
4
|
+
* Secondary integration path. Operates at the provider level where the prompt
|
|
5
|
+
* is in internal format, making query extraction less precise than prepareStep.
|
|
6
|
+
* Best for single-step generateText/streamText calls.
|
|
7
|
+
*/
|
|
8
|
+
export function createMiddleware(engine, toolNames, options = {}) {
|
|
9
|
+
const { maxTools = 5, alwaysActive = [] } = options;
|
|
10
|
+
return {
|
|
11
|
+
specificationVersion: "v3",
|
|
12
|
+
transformParams: async ({ params }) => {
|
|
13
|
+
const query = extractQueryFromPrompt(params.prompt);
|
|
14
|
+
if (!query || !params.tools || params.tools.length === 0) {
|
|
15
|
+
return params;
|
|
16
|
+
}
|
|
17
|
+
const results = await engine.search(query, maxTools);
|
|
18
|
+
const selected = new Set([
|
|
19
|
+
...results.map((r) => r.name),
|
|
20
|
+
...alwaysActive,
|
|
21
|
+
]);
|
|
22
|
+
const filteredTools = params.tools.filter((t) => {
|
|
23
|
+
const name = "name" in t ? t.name : undefined;
|
|
24
|
+
return name ? selected.has(name) : true;
|
|
25
|
+
});
|
|
26
|
+
return { ...params, tools: filteredTools };
|
|
27
|
+
},
|
|
28
|
+
};
|
|
29
|
+
}
|
|
30
|
+
/**
|
|
31
|
+
* Extracts user query from provider-level prompt format.
|
|
32
|
+
* This is messier than extracting from ModelMessage[] because
|
|
33
|
+
* the prompt is in LanguageModelV4Prompt format.
|
|
34
|
+
*/
|
|
35
|
+
function extractQueryFromPrompt(prompt) {
|
|
36
|
+
if (!Array.isArray(prompt))
|
|
37
|
+
return "";
|
|
38
|
+
for (let i = prompt.length - 1; i >= 0; i--) {
|
|
39
|
+
const msg = prompt[i];
|
|
40
|
+
if (msg && typeof msg === "object" && "role" in msg && msg.role === "user") {
|
|
41
|
+
if ("content" in msg) {
|
|
42
|
+
const content = msg.content;
|
|
43
|
+
if (typeof content === "string")
|
|
44
|
+
return content;
|
|
45
|
+
if (Array.isArray(content)) {
|
|
46
|
+
const texts = content
|
|
47
|
+
.filter((p) => {
|
|
48
|
+
return p && typeof p === "object" && "type" in p && p.type === "text";
|
|
49
|
+
})
|
|
50
|
+
.map((p) => p.text);
|
|
51
|
+
if (texts.length > 0)
|
|
52
|
+
return texts.join(" ");
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
return "";
|
|
58
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
import type { PrepareStepFunction, ToolSet } from "ai";
|
|
2
|
+
import type { SearchEngine, SelectOptions } from "../search/types";
|
|
3
|
+
/**
|
|
4
|
+
* Creates a prepareStep function that dynamically selects tools per step.
|
|
5
|
+
* Returns `{ activeTools }` to limit which tools the model sees.
|
|
6
|
+
*
|
|
7
|
+
* Includes automatic escalation: if the model produced no tool calls in the
|
|
8
|
+
* previous step, the next step shifts to the *next page* of ranked tools
|
|
9
|
+
* (positions maxTools+1 through maxTools*2) instead of repeating the same set.
|
|
10
|
+
* After two consecutive misses, all tools are exposed.
|
|
11
|
+
*/
|
|
12
|
+
export declare function createPrepareStep<TOOLS extends ToolSet>(engine: SearchEngine, toolNames: (keyof TOOLS & string)[], options?: SelectOptions): PrepareStepFunction<TOOLS>;
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import { extractQuery } from "../query-extractor";
|
|
2
|
+
function asActiveTools(names) {
|
|
3
|
+
return names;
|
|
4
|
+
}
|
|
5
|
+
/**
|
|
6
|
+
* Creates a prepareStep function that dynamically selects tools per step.
|
|
7
|
+
* Returns `{ activeTools }` to limit which tools the model sees.
|
|
8
|
+
*
|
|
9
|
+
* Includes automatic escalation: if the model produced no tool calls in the
|
|
10
|
+
* previous step, the next step shifts to the *next page* of ranked tools
|
|
11
|
+
* (positions maxTools+1 through maxTools*2) instead of repeating the same set.
|
|
12
|
+
* After two consecutive misses, all tools are exposed.
|
|
13
|
+
*/
|
|
14
|
+
export function createPrepareStep(engine, toolNames, options = {}) {
|
|
15
|
+
const { maxTools = 5, alwaysActive = [] } = options;
|
|
16
|
+
const toolNameSet = new Set(toolNames);
|
|
17
|
+
return async ({ messages, steps, stepNumber }) => {
|
|
18
|
+
const query = extractQuery(messages, steps, stepNumber);
|
|
19
|
+
if (!query) {
|
|
20
|
+
return alwaysActive.length > 0
|
|
21
|
+
? { activeTools: asActiveTools(alwaysActive) }
|
|
22
|
+
: undefined;
|
|
23
|
+
}
|
|
24
|
+
let consecutiveFailures = 0;
|
|
25
|
+
if (stepNumber > 0 && steps.length > 0) {
|
|
26
|
+
for (let i = steps.length - 1; i >= 0; i--) {
|
|
27
|
+
if (steps[i].toolCalls.length === 0) {
|
|
28
|
+
consecutiveFailures++;
|
|
29
|
+
}
|
|
30
|
+
else {
|
|
31
|
+
break;
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
if (consecutiveFailures >= 2) {
|
|
36
|
+
const activeTools = [...new Set([...toolNames, ...alwaysActive])];
|
|
37
|
+
return { activeTools: asActiveTools(activeTools) };
|
|
38
|
+
}
|
|
39
|
+
const page = consecutiveFailures;
|
|
40
|
+
const windowSize = maxTools * (page + 1);
|
|
41
|
+
const results = await engine.search(query, windowSize);
|
|
42
|
+
const offset = page * maxTools;
|
|
43
|
+
const pageResults = results.slice(offset, offset + maxTools);
|
|
44
|
+
const selected = pageResults.map((r) => r.name);
|
|
45
|
+
const merged = [...new Set([...selected, ...alwaysActive])];
|
|
46
|
+
const activeTools = merged.filter((name) => toolNameSet.has(name));
|
|
47
|
+
return { activeTools: asActiveTools(activeTools) };
|
|
48
|
+
};
|
|
49
|
+
}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import type { SearchEngine, ToolDescription } from "../search/types";
|
|
2
|
+
/**
|
|
3
|
+
* Creates a meta-tool that agents can call to discover tools
|
|
4
|
+
* not in the current activeTools selection.
|
|
5
|
+
*
|
|
6
|
+
* When the agent can't find a tool it needs, it calls search_tools
|
|
7
|
+
* to discover relevant tools by name and description.
|
|
8
|
+
*/
|
|
9
|
+
export declare function createSearchTool(engine: SearchEngine, toolDescriptions: ToolDescription[]): import("ai").Tool<{
|
|
10
|
+
query: string;
|
|
11
|
+
}, {
|
|
12
|
+
tools: {
|
|
13
|
+
name: string;
|
|
14
|
+
description: string;
|
|
15
|
+
relevance: number;
|
|
16
|
+
}[];
|
|
17
|
+
hint: string;
|
|
18
|
+
}>;
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
import { tool, zodSchema } from "ai";
|
|
2
|
+
import { z } from "zod";
|
|
3
|
+
const SEARCH_RESULTS = 5;
|
|
4
|
+
/**
|
|
5
|
+
* Creates a meta-tool that agents can call to discover tools
|
|
6
|
+
* not in the current activeTools selection.
|
|
7
|
+
*
|
|
8
|
+
* When the agent can't find a tool it needs, it calls search_tools
|
|
9
|
+
* to discover relevant tools by name and description.
|
|
10
|
+
*/
|
|
11
|
+
export function createSearchTool(engine, toolDescriptions) {
|
|
12
|
+
const descriptionMap = new Map(toolDescriptions.map((t) => [t.name, t.text]));
|
|
13
|
+
return tool({
|
|
14
|
+
description: "Search for available tools by describing what you need. " +
|
|
15
|
+
"Use this when you need a capability that isn't in your current tools.",
|
|
16
|
+
inputSchema: zodSchema(z.object({
|
|
17
|
+
query: z.string().describe("Natural language description of the tool you need"),
|
|
18
|
+
})),
|
|
19
|
+
execute: async ({ query }) => {
|
|
20
|
+
const results = await engine.search(query, SEARCH_RESULTS);
|
|
21
|
+
const tools = results.map((r) => ({
|
|
22
|
+
name: r.name,
|
|
23
|
+
description: descriptionMap.get(r.name) ?? "",
|
|
24
|
+
relevance: Math.round(r.score * 100) / 100,
|
|
25
|
+
}));
|
|
26
|
+
return {
|
|
27
|
+
tools,
|
|
28
|
+
hint: "The tools listed above are available. Call the one that best matches your need.",
|
|
29
|
+
};
|
|
30
|
+
},
|
|
31
|
+
});
|
|
32
|
+
}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import type { ModelMessage, StepResult, ToolSet } from "ai";
|
|
2
|
+
/**
|
|
3
|
+
* Extracts the best search query from conversation context for tool selection.
|
|
4
|
+
*
|
|
5
|
+
* - Step 0: uses the original user prompt
|
|
6
|
+
* - Step N with assistant text: uses the assistant's last text (contains next-action intent)
|
|
7
|
+
* - Step N without text: combines original prompt with completed tool names for context shift
|
|
8
|
+
*/
|
|
9
|
+
export declare function extractQuery<TOOLS extends ToolSet>(messages: ModelMessage[], steps: ReadonlyArray<StepResult<TOOLS>>, stepNumber: number): string;
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Extracts the best search query from conversation context for tool selection.
|
|
3
|
+
*
|
|
4
|
+
* - Step 0: uses the original user prompt
|
|
5
|
+
* - Step N with assistant text: uses the assistant's last text (contains next-action intent)
|
|
6
|
+
* - Step N without text: combines original prompt with completed tool names for context shift
|
|
7
|
+
*/
|
|
8
|
+
export function extractQuery(messages, steps, stepNumber) {
|
|
9
|
+
if (stepNumber === 0 || steps.length === 0) {
|
|
10
|
+
return getLastUserMessage(messages);
|
|
11
|
+
}
|
|
12
|
+
const lastStep = steps[steps.length - 1];
|
|
13
|
+
// If the model produced text, it likely describes its next intent
|
|
14
|
+
// e.g. "Ticket created. Now I'll notify the team on Slack."
|
|
15
|
+
if (lastStep.text && lastStep.text.trim().length > 0) {
|
|
16
|
+
return lastStep.text;
|
|
17
|
+
}
|
|
18
|
+
// If only tool calls happened, combine original prompt with
|
|
19
|
+
// what already executed to shift context
|
|
20
|
+
if (lastStep.toolCalls.length > 0) {
|
|
21
|
+
const completedTools = lastStep.toolCalls.map((tc) => tc.toolName).join(" ");
|
|
22
|
+
const originalQuery = getLastUserMessage(messages);
|
|
23
|
+
return `${originalQuery} — ${completedTools}`;
|
|
24
|
+
}
|
|
25
|
+
return getLastUserMessage(messages);
|
|
26
|
+
}
|
|
27
|
+
function getLastUserMessage(messages) {
|
|
28
|
+
for (let i = messages.length - 1; i >= 0; i--) {
|
|
29
|
+
const msg = messages[i];
|
|
30
|
+
if (msg.role === "user") {
|
|
31
|
+
if (typeof msg.content === "string") {
|
|
32
|
+
return msg.content;
|
|
33
|
+
}
|
|
34
|
+
if (Array.isArray(msg.content)) {
|
|
35
|
+
const textParts = msg.content
|
|
36
|
+
.filter((p) => p.type === "text")
|
|
37
|
+
.map((p) => p.text);
|
|
38
|
+
if (textParts.length > 0)
|
|
39
|
+
return textParts.join(" ");
|
|
40
|
+
}
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
return "";
|
|
44
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import type { LanguageModel } from "ai";
|
|
2
|
+
import type { SearchResult, ToolDescription } from "./search/types";
|
|
3
|
+
/**
|
|
4
|
+
* Re-ranks tools using a cheap LLM. When there are <=50 tools,
|
|
5
|
+
* the LLM sees all of them (not just pre-filtered candidates).
|
|
6
|
+
* This adds reasoning that pure embedding similarity can't do.
|
|
7
|
+
*/
|
|
8
|
+
export declare function rerank(model: LanguageModel, query: string, candidates: SearchResult[], descriptions: Map<string, string>, maxResults: number): Promise<SearchResult[]>;
|
|
9
|
+
/**
|
|
10
|
+
* Enriches tool descriptions with synonyms and common phrasings
|
|
11
|
+
* using a cheap LLM. Called once during warmUp, results are cached.
|
|
12
|
+
*/
|
|
13
|
+
export declare function enrichDescriptions(model: LanguageModel, descriptions: ToolDescription[]): Promise<ToolDescription[]>;
|
package/dist/reranker.js
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
import { generateText, Output } from "ai";
|
|
2
|
+
import { z } from "zod";
|
|
3
|
+
const RERANK_TOOL_LIMIT = 50;
|
|
4
|
+
/**
|
|
5
|
+
* Re-ranks tools using a cheap LLM. When there are <=50 tools,
|
|
6
|
+
* the LLM sees all of them (not just pre-filtered candidates).
|
|
7
|
+
* This adds reasoning that pure embedding similarity can't do.
|
|
8
|
+
*/
|
|
9
|
+
export async function rerank(model, query, candidates, descriptions, maxResults) {
|
|
10
|
+
if (candidates.length <= maxResults)
|
|
11
|
+
return candidates;
|
|
12
|
+
const allNames = Array.from(descriptions.keys());
|
|
13
|
+
const useFullSet = allNames.length <= RERANK_TOOL_LIMIT;
|
|
14
|
+
const toolList = useFullSet
|
|
15
|
+
? allNames.map((n) => `- ${n}: ${descriptions.get(n) ?? ""}`).join("\n")
|
|
16
|
+
: candidates.map((c) => `- ${c.name}: ${descriptions.get(c.name) ?? ""}`).join("\n");
|
|
17
|
+
const validNames = new Set(useFullSet ? allNames : candidates.map((c) => c.name));
|
|
18
|
+
try {
|
|
19
|
+
const result = await generateText({
|
|
20
|
+
model,
|
|
21
|
+
output: Output.object({
|
|
22
|
+
schema: z.object({
|
|
23
|
+
tools: z.array(z.string()).describe("Tool names ranked by relevance, most relevant first"),
|
|
24
|
+
}),
|
|
25
|
+
}),
|
|
26
|
+
prompt: `A user said: "${query}"
|
|
27
|
+
|
|
28
|
+
This may be informal, slang, or abbreviated. Pick the ${maxResults} tools most likely to fulfill what the user actually wants, ranked best-first:
|
|
29
|
+
|
|
30
|
+
${toolList}
|
|
31
|
+
|
|
32
|
+
Return ONLY tool names from the list above. Think about what the user means, not just keyword matches.`,
|
|
33
|
+
});
|
|
34
|
+
const ranked = [];
|
|
35
|
+
const seen = new Set();
|
|
36
|
+
for (const name of result.output.tools) {
|
|
37
|
+
if (seen.has(name))
|
|
38
|
+
continue;
|
|
39
|
+
seen.add(name);
|
|
40
|
+
if (validNames.has(name)) {
|
|
41
|
+
ranked.push({ name, score: 1 - ranked.length * 0.1 });
|
|
42
|
+
}
|
|
43
|
+
if (ranked.length >= maxResults)
|
|
44
|
+
break;
|
|
45
|
+
}
|
|
46
|
+
return ranked;
|
|
47
|
+
}
|
|
48
|
+
catch (err) {
|
|
49
|
+
if (process.env.TOOLPICK_DEBUG) {
|
|
50
|
+
console.error("[toolpick rerank] LLM re-ranking failed, falling back to search results:", err);
|
|
51
|
+
}
|
|
52
|
+
return candidates.slice(0, maxResults);
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
/**
|
|
56
|
+
* Enriches tool descriptions with synonyms and common phrasings
|
|
57
|
+
* using a cheap LLM. Called once during warmUp, results are cached.
|
|
58
|
+
*/
|
|
59
|
+
export async function enrichDescriptions(model, descriptions) {
|
|
60
|
+
const toolList = descriptions
|
|
61
|
+
.map((d) => `- ${d.name}: ${d.text}`)
|
|
62
|
+
.join("\n");
|
|
63
|
+
try {
|
|
64
|
+
const result = await generateText({
|
|
65
|
+
model,
|
|
66
|
+
output: Output.object({
|
|
67
|
+
schema: z.object({
|
|
68
|
+
tools: z.array(z.object({
|
|
69
|
+
name: z.string(),
|
|
70
|
+
text: z.string(),
|
|
71
|
+
})),
|
|
72
|
+
}),
|
|
73
|
+
}),
|
|
74
|
+
prompt: `For each tool below, expand the description by adding common synonyms, slang, and alternative phrasings that users might say when they want this tool. Keep the original description and append the alternatives. Be concise — just add key synonyms, not full sentences.
|
|
75
|
+
|
|
76
|
+
${toolList}
|
|
77
|
+
|
|
78
|
+
Return every tool with its expanded description.`,
|
|
79
|
+
});
|
|
80
|
+
const enriched = new Map(result.output.tools.map((t) => [t.name, t.text]));
|
|
81
|
+
return descriptions.map((d) => ({
|
|
82
|
+
name: d.name,
|
|
83
|
+
text: enriched.get(d.name) ?? d.text,
|
|
84
|
+
}));
|
|
85
|
+
}
|
|
86
|
+
catch (err) {
|
|
87
|
+
if (process.env.TOOLPICK_DEBUG) {
|
|
88
|
+
console.error("[toolpick enrich] Description enrichment failed, using originals:", err);
|
|
89
|
+
}
|
|
90
|
+
return descriptions;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import type { EmbeddingCacheOptions, SearchEngine, ToolDescription } from "./types";
|
|
2
|
+
import type { EmbeddingModel } from "ai";
|
|
3
|
+
export declare class CombinedSearch implements SearchEngine {
|
|
4
|
+
private hybrid;
|
|
5
|
+
private semantic;
|
|
6
|
+
constructor(tools: ToolDescription[], model: EmbeddingModel, cache?: EmbeddingCacheOptions);
|
|
7
|
+
init(): Promise<void>;
|
|
8
|
+
search(query: string, maxResults: number): Promise<import("./types").SearchResult[]>;
|
|
9
|
+
}
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import { HybridSearch } from "./hybrid";
|
|
2
|
+
import { SemanticSearch } from "./semantic";
|
|
3
|
+
import { fuseResults } from "./fusion";
|
|
4
|
+
const SEMANTIC_WEIGHT = 0.7;
|
|
5
|
+
const HYBRID_WEIGHT = 0.3;
|
|
6
|
+
export class CombinedSearch {
|
|
7
|
+
hybrid;
|
|
8
|
+
semantic;
|
|
9
|
+
constructor(tools, model, cache) {
|
|
10
|
+
this.hybrid = new HybridSearch(tools);
|
|
11
|
+
this.semantic = new SemanticSearch(tools, model, cache);
|
|
12
|
+
}
|
|
13
|
+
async init() {
|
|
14
|
+
await this.semantic.init();
|
|
15
|
+
}
|
|
16
|
+
async search(query, maxResults) {
|
|
17
|
+
try {
|
|
18
|
+
const [hybridResults, semanticResults] = await Promise.all([
|
|
19
|
+
this.hybrid.search(query, maxResults * 3),
|
|
20
|
+
this.semantic.search(query, maxResults * 3),
|
|
21
|
+
]);
|
|
22
|
+
return fuseResults(hybridResults, semanticResults, HYBRID_WEIGHT, SEMANTIC_WEIGHT, maxResults);
|
|
23
|
+
}
|
|
24
|
+
catch {
|
|
25
|
+
return this.hybrid.search(query, maxResults);
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import type { SearchResult } from "./types";
|
|
2
|
+
/**
|
|
3
|
+
* Normalizes two scored result sets to [0,1] and fuses them
|
|
4
|
+
* with a weighted sum. Used by both HybridSearch (BM25 + TF-IDF)
|
|
5
|
+
* and CombinedSearch (hybrid + semantic).
|
|
6
|
+
*/
|
|
7
|
+
export declare function fuseResults(resultsA: SearchResult[], resultsB: SearchResult[], weightA: number, weightB: number, maxResults: number): SearchResult[];
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Normalizes two scored result sets to [0,1] and fuses them
|
|
3
|
+
* with a weighted sum. Used by both HybridSearch (BM25 + TF-IDF)
|
|
4
|
+
* and CombinedSearch (hybrid + semantic).
|
|
5
|
+
*/
|
|
6
|
+
export function fuseResults(resultsA, resultsB, weightA, weightB, maxResults) {
|
|
7
|
+
const maxA = resultsA[0]?.score ?? 1;
|
|
8
|
+
const maxB = resultsB[0]?.score ?? 1;
|
|
9
|
+
const scoreMap = new Map();
|
|
10
|
+
for (const r of resultsA) {
|
|
11
|
+
const entry = scoreMap.get(r.name) ?? { a: 0, b: 0 };
|
|
12
|
+
entry.a = maxA > 0 ? r.score / maxA : 0;
|
|
13
|
+
scoreMap.set(r.name, entry);
|
|
14
|
+
}
|
|
15
|
+
for (const r of resultsB) {
|
|
16
|
+
const entry = scoreMap.get(r.name) ?? { a: 0, b: 0 };
|
|
17
|
+
entry.b = maxB > 0 ? r.score / maxB : 0;
|
|
18
|
+
scoreMap.set(r.name, entry);
|
|
19
|
+
}
|
|
20
|
+
const fused = [];
|
|
21
|
+
for (const [name, scores] of scoreMap) {
|
|
22
|
+
fused.push({ name, score: weightA * scores.a + weightB * scores.b });
|
|
23
|
+
}
|
|
24
|
+
fused.sort((a, b) => b.score - a.score);
|
|
25
|
+
return fused.slice(0, maxResults);
|
|
26
|
+
}
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
import type { SearchEngine, SearchResult, ToolDescription } from "./types";
|
|
2
|
+
export declare class HybridSearch implements SearchEngine {
|
|
3
|
+
private bm25;
|
|
4
|
+
private tfidf;
|
|
5
|
+
constructor(tools: ToolDescription[]);
|
|
6
|
+
search(query: string, maxResults: number): SearchResult[];
|
|
7
|
+
}
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
import { tokenize } from "../utils";
|
|
2
|
+
import { fuseResults } from "./fusion";
|
|
3
|
+
// ── BM25 ────────────────────────────────────────────────────────
|
|
4
|
+
const BM25_K1 = 1.2;
|
|
5
|
+
const BM25_B = 0.75;
|
|
6
|
+
class BM25Index {
|
|
7
|
+
docs = [];
|
|
8
|
+
df = new Map();
|
|
9
|
+
avgDl = 0;
|
|
10
|
+
totalLength = 0;
|
|
11
|
+
n = 0;
|
|
12
|
+
add(name, text) {
|
|
13
|
+
const tokens = tokenize(text);
|
|
14
|
+
const tf = new Map();
|
|
15
|
+
for (const t of tokens) {
|
|
16
|
+
tf.set(t, (tf.get(t) ?? 0) + 1);
|
|
17
|
+
}
|
|
18
|
+
const seen = new Set(tokens);
|
|
19
|
+
for (const t of seen) {
|
|
20
|
+
this.df.set(t, (this.df.get(t) ?? 0) + 1);
|
|
21
|
+
}
|
|
22
|
+
this.docs.push({ name, tokens, length: tokens.length, tf });
|
|
23
|
+
this.n++;
|
|
24
|
+
this.totalLength += tokens.length;
|
|
25
|
+
this.avgDl = this.totalLength / this.n;
|
|
26
|
+
}
|
|
27
|
+
search(query, maxResults) {
|
|
28
|
+
const queryTokens = tokenize(query);
|
|
29
|
+
const scores = [];
|
|
30
|
+
for (const doc of this.docs) {
|
|
31
|
+
let score = 0;
|
|
32
|
+
for (const qt of queryTokens) {
|
|
33
|
+
const dfVal = this.df.get(qt) ?? 0;
|
|
34
|
+
const idf = Math.log(1 + (this.n - dfVal + 0.5) / (dfVal + 0.5));
|
|
35
|
+
const tfVal = doc.tf.get(qt) ?? 0;
|
|
36
|
+
const num = tfVal * (BM25_K1 + 1);
|
|
37
|
+
const denom = tfVal + BM25_K1 * (1 - BM25_B + BM25_B * (doc.length / this.avgDl));
|
|
38
|
+
score += idf * (num / denom);
|
|
39
|
+
}
|
|
40
|
+
if (score > 0) {
|
|
41
|
+
scores.push({ name: doc.name, score });
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
scores.sort((a, b) => b.score - a.score);
|
|
45
|
+
return scores.slice(0, maxResults);
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
// ── TF-IDF with cosine similarity ───────────────────────────────
|
|
49
|
+
class TfidfIndex {
|
|
50
|
+
termToId = new Map();
|
|
51
|
+
nextTermId = 0;
|
|
52
|
+
docVectors = [];
|
|
53
|
+
df = new Map();
|
|
54
|
+
n = 0;
|
|
55
|
+
getTermId(term) {
|
|
56
|
+
let id = this.termToId.get(term);
|
|
57
|
+
if (id === undefined) {
|
|
58
|
+
id = this.nextTermId++;
|
|
59
|
+
this.termToId.set(term, id);
|
|
60
|
+
}
|
|
61
|
+
return id;
|
|
62
|
+
}
|
|
63
|
+
add(name, text) {
|
|
64
|
+
const tokens = tokenize(text);
|
|
65
|
+
const tf = new Map();
|
|
66
|
+
const seen = new Set();
|
|
67
|
+
for (const t of tokens) {
|
|
68
|
+
const id = this.getTermId(t);
|
|
69
|
+
tf.set(id, (tf.get(id) ?? 0) + 1);
|
|
70
|
+
if (!seen.has(id)) {
|
|
71
|
+
seen.add(id);
|
|
72
|
+
this.df.set(id, (this.df.get(id) ?? 0) + 1);
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
const vector = new Map();
|
|
76
|
+
const len = tokens.length || 1;
|
|
77
|
+
for (const [termId, count] of tf) {
|
|
78
|
+
vector.set(termId, count / len);
|
|
79
|
+
}
|
|
80
|
+
this.docVectors.push({ name, vector });
|
|
81
|
+
this.n++;
|
|
82
|
+
}
|
|
83
|
+
search(query, maxResults) {
|
|
84
|
+
const tokens = tokenize(query);
|
|
85
|
+
const queryTf = new Map();
|
|
86
|
+
for (const t of tokens) {
|
|
87
|
+
const id = this.termToId.get(t);
|
|
88
|
+
if (id !== undefined) {
|
|
89
|
+
queryTf.set(id, (queryTf.get(id) ?? 0) + 1);
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
if (queryTf.size === 0)
|
|
93
|
+
return [];
|
|
94
|
+
// Build query TF-IDF vector
|
|
95
|
+
const queryVector = new Map();
|
|
96
|
+
const qLen = tokens.length || 1;
|
|
97
|
+
for (const [termId, count] of queryTf) {
|
|
98
|
+
const dfVal = this.df.get(termId) ?? 0;
|
|
99
|
+
const idf = Math.log((this.n + 1) / (dfVal + 1)) + 1;
|
|
100
|
+
queryVector.set(termId, (count / qLen) * idf);
|
|
101
|
+
}
|
|
102
|
+
const scores = [];
|
|
103
|
+
for (const doc of this.docVectors) {
|
|
104
|
+
// Apply IDF to doc vector on the fly and compute cosine similarity
|
|
105
|
+
let dot = 0;
|
|
106
|
+
let docMag = 0;
|
|
107
|
+
for (const [termId, tfVal] of doc.vector) {
|
|
108
|
+
const dfVal = this.df.get(termId) ?? 0;
|
|
109
|
+
const idf = Math.log((this.n + 1) / (dfVal + 1)) + 1;
|
|
110
|
+
const tfidf = tfVal * idf;
|
|
111
|
+
docMag += tfidf * tfidf;
|
|
112
|
+
const qVal = queryVector.get(termId);
|
|
113
|
+
if (qVal !== undefined) {
|
|
114
|
+
dot += tfidf * qVal;
|
|
115
|
+
}
|
|
116
|
+
}
|
|
117
|
+
if (dot <= 0)
|
|
118
|
+
continue;
|
|
119
|
+
let queryMag = 0;
|
|
120
|
+
for (const v of queryVector.values()) {
|
|
121
|
+
queryMag += v * v;
|
|
122
|
+
}
|
|
123
|
+
const magnitude = Math.sqrt(docMag) * Math.sqrt(queryMag);
|
|
124
|
+
if (magnitude === 0)
|
|
125
|
+
continue;
|
|
126
|
+
scores.push({ name: doc.name, score: dot / magnitude });
|
|
127
|
+
}
|
|
128
|
+
scores.sort((a, b) => b.score - a.score);
|
|
129
|
+
return scores.slice(0, maxResults);
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
// ── Hybrid (BM25 + TF-IDF) ─────────────────────────────────────
|
|
133
|
+
const HYBRID_ALPHA = 0.2; // 20% BM25, 80% TF-IDF
|
|
134
|
+
const NAME_BOOST_REPEAT = 3;
|
|
135
|
+
export class HybridSearch {
|
|
136
|
+
bm25 = new BM25Index();
|
|
137
|
+
tfidf = new TfidfIndex();
|
|
138
|
+
constructor(tools) {
|
|
139
|
+
for (const t of tools) {
|
|
140
|
+
const boosted = `${Array(NAME_BOOST_REPEAT).fill(t.name).join(" ")} ${t.text}`;
|
|
141
|
+
this.bm25.add(t.name, boosted);
|
|
142
|
+
this.tfidf.add(t.name, boosted);
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
search(query, maxResults) {
|
|
146
|
+
const bm25Results = this.bm25.search(query, maxResults * 3);
|
|
147
|
+
const tfidfResults = this.tfidf.search(query, maxResults * 3);
|
|
148
|
+
return fuseResults(bm25Results, tfidfResults, HYBRID_ALPHA, 1 - HYBRID_ALPHA, maxResults);
|
|
149
|
+
}
|
|
150
|
+
}
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import type { EmbeddingModel } from "ai";
|
|
2
|
+
import type { EmbeddingCacheOptions, SearchEngine, SearchResult, ToolDescription } from "./types";
|
|
3
|
+
export declare class SemanticSearch implements SearchEngine {
|
|
4
|
+
private tools;
|
|
5
|
+
private embeddings;
|
|
6
|
+
private toolNames;
|
|
7
|
+
private model;
|
|
8
|
+
private cache?;
|
|
9
|
+
private initPromise;
|
|
10
|
+
constructor(tools: ToolDescription[], model: EmbeddingModel, cache?: EmbeddingCacheOptions);
|
|
11
|
+
init(): Promise<void>;
|
|
12
|
+
search(query: string, maxResults: number): Promise<SearchResult[]>;
|
|
13
|
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import { embed, embedMany, cosineSimilarity } from "ai";
|
|
2
|
+
export class SemanticSearch {
|
|
3
|
+
tools;
|
|
4
|
+
embeddings = [];
|
|
5
|
+
toolNames = [];
|
|
6
|
+
model;
|
|
7
|
+
cache;
|
|
8
|
+
initPromise = null;
|
|
9
|
+
constructor(tools, model, cache) {
|
|
10
|
+
this.tools = tools;
|
|
11
|
+
this.model = model;
|
|
12
|
+
this.cache = cache;
|
|
13
|
+
this.toolNames = tools.map((t) => t.name);
|
|
14
|
+
}
|
|
15
|
+
async init() {
|
|
16
|
+
if (this.initPromise)
|
|
17
|
+
return this.initPromise;
|
|
18
|
+
this.initPromise = (async () => {
|
|
19
|
+
if (this.cache) {
|
|
20
|
+
const cached = await this.cache.load();
|
|
21
|
+
if (cached && cached.length === this.tools.length) {
|
|
22
|
+
this.embeddings = cached;
|
|
23
|
+
return;
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
const values = this.tools.map((t) => `${t.name}: ${t.text}`);
|
|
27
|
+
const { embeddings } = await embedMany({
|
|
28
|
+
model: this.model,
|
|
29
|
+
values,
|
|
30
|
+
});
|
|
31
|
+
this.embeddings = embeddings;
|
|
32
|
+
if (this.cache) {
|
|
33
|
+
await this.cache.save(embeddings);
|
|
34
|
+
}
|
|
35
|
+
})().catch((err) => {
|
|
36
|
+
this.initPromise = null;
|
|
37
|
+
throw err;
|
|
38
|
+
});
|
|
39
|
+
return this.initPromise;
|
|
40
|
+
}
|
|
41
|
+
async search(query, maxResults) {
|
|
42
|
+
await this.init();
|
|
43
|
+
const { embedding: queryEmbedding } = await embed({
|
|
44
|
+
model: this.model,
|
|
45
|
+
value: query,
|
|
46
|
+
});
|
|
47
|
+
const scores = [];
|
|
48
|
+
for (let i = 0; i < this.embeddings.length; i++) {
|
|
49
|
+
const score = cosineSimilarity(queryEmbedding, this.embeddings[i]);
|
|
50
|
+
scores.push({ name: this.toolNames[i], score });
|
|
51
|
+
}
|
|
52
|
+
scores.sort((a, b) => b.score - a.score);
|
|
53
|
+
return scores.slice(0, maxResults);
|
|
54
|
+
}
|
|
55
|
+
}
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import type { EmbeddingModel, LanguageModel } from "ai";
|
|
2
|
+
export type SearchStrategy = "hybrid" | "semantic" | "combined";
|
|
3
|
+
export interface SelectOptions {
|
|
4
|
+
maxTools?: number;
|
|
5
|
+
alwaysActive?: string[];
|
|
6
|
+
threshold?: number;
|
|
7
|
+
/** When true, returns fewer than maxTools if there's a large score gap. Default: true */
|
|
8
|
+
adaptive?: boolean;
|
|
9
|
+
}
|
|
10
|
+
export interface SearchResult {
|
|
11
|
+
name: string;
|
|
12
|
+
score: number;
|
|
13
|
+
}
|
|
14
|
+
export interface SearchEngine {
|
|
15
|
+
search(query: string, maxResults: number): SearchResult[] | Promise<SearchResult[]>;
|
|
16
|
+
init?(): Promise<void>;
|
|
17
|
+
}
|
|
18
|
+
export interface EmbeddingCacheOptions {
|
|
19
|
+
load(): Promise<number[][] | null>;
|
|
20
|
+
save(embeddings: number[][]): Promise<void>;
|
|
21
|
+
}
|
|
22
|
+
export interface ToolIndexOptions {
|
|
23
|
+
strategy?: SearchStrategy;
|
|
24
|
+
embeddingModel?: EmbeddingModel;
|
|
25
|
+
embeddingCache?: EmbeddingCacheOptions;
|
|
26
|
+
rerankerModel?: LanguageModel;
|
|
27
|
+
enrichDescriptions?: boolean;
|
|
28
|
+
}
|
|
29
|
+
export interface ToolDescription {
|
|
30
|
+
name: string;
|
|
31
|
+
text: string;
|
|
32
|
+
}
|
|
33
|
+
export interface EvalTestCase {
|
|
34
|
+
query: string;
|
|
35
|
+
expected: string;
|
|
36
|
+
alternatives?: string[];
|
|
37
|
+
}
|
|
38
|
+
export interface EvalResult {
|
|
39
|
+
top1: number;
|
|
40
|
+
top3: number;
|
|
41
|
+
top5: number;
|
|
42
|
+
avgLatencyMs: number;
|
|
43
|
+
total: number;
|
|
44
|
+
misses: Array<{
|
|
45
|
+
query: string;
|
|
46
|
+
expected: string;
|
|
47
|
+
got: string[];
|
|
48
|
+
}>;
|
|
49
|
+
}
|
|
File without changes
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import type { LanguageModelMiddleware, PrepareStepFunction, ToolSet } from "ai";
|
|
2
|
+
import type { SelectOptions, ToolIndexOptions } from "./search/types";
|
|
3
|
+
import { createSearchTool } from "./integrations/search-tool";
|
|
4
|
+
export interface ToolIndex<TOOLS extends ToolSet = ToolSet> {
|
|
5
|
+
/** Pre-compute embeddings eagerly so the first select() is fast. */
|
|
6
|
+
warmUp(): Promise<void>;
|
|
7
|
+
/** Select the most relevant tool names for a query. Returns string[] for activeTools. */
|
|
8
|
+
select(query: string, options?: SelectOptions): Promise<string[]>;
|
|
9
|
+
/** Returns a prepareStep function for ToolLoopAgent / generateText / streamText. */
|
|
10
|
+
prepareStep(options?: SelectOptions): PrepareStepFunction<TOOLS>;
|
|
11
|
+
/** Returns a LanguageModelMiddleware for transparent integration via wrapLanguageModel. */
|
|
12
|
+
middleware(options?: SelectOptions): LanguageModelMiddleware;
|
|
13
|
+
/** Returns a meta-tool agents can call to discover tools outside the current selection. */
|
|
14
|
+
searchTool(): ReturnType<typeof createSearchTool>;
|
|
15
|
+
/** The tool names in this index. */
|
|
16
|
+
readonly toolNames: (keyof TOOLS & string)[];
|
|
17
|
+
}
|
|
18
|
+
/**
|
|
19
|
+
* Creates a tool index for dynamic tool selection.
|
|
20
|
+
*
|
|
21
|
+
* When an embeddingModel is provided, defaults to "combined" strategy
|
|
22
|
+
* (hybrid keyword + semantic embeddings) for the best accuracy at minimal cost.
|
|
23
|
+
* Falls back to "hybrid" (free, no API calls) when no model is given.
|
|
24
|
+
*
|
|
25
|
+
* @param tools - A ToolSet (Record<string, Tool>) to index
|
|
26
|
+
* @param options - Strategy configuration
|
|
27
|
+
*
|
|
28
|
+
* @example
|
|
29
|
+
* ```ts
|
|
30
|
+
* import { createToolIndex } from "toolpick";
|
|
31
|
+
* import { openai } from "@ai-sdk/openai";
|
|
32
|
+
*
|
|
33
|
+
* const index = createToolIndex(allTools, {
|
|
34
|
+
* embeddingModel: openai.embeddingModel("text-embedding-3-small"),
|
|
35
|
+
* });
|
|
36
|
+
* const activeTools = await index.select("ship it to prod", { maxTools: 5 });
|
|
37
|
+
* ```
|
|
38
|
+
*/
|
|
39
|
+
export declare function createToolIndex<TOOLS extends ToolSet>(tools: TOOLS, options?: ToolIndexOptions): ToolIndex<TOOLS>;
|
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
import { HybridSearch } from "./search/hybrid";
|
|
2
|
+
import { SemanticSearch } from "./search/semantic";
|
|
3
|
+
import { CombinedSearch } from "./search/combined";
|
|
4
|
+
import { createPrepareStep } from "./integrations/prepare-step";
|
|
5
|
+
import { createMiddleware } from "./integrations/middleware";
|
|
6
|
+
import { createSearchTool } from "./integrations/search-tool";
|
|
7
|
+
import { rerank, enrichDescriptions } from "./reranker";
|
|
8
|
+
function extractParamNames(toolDef) {
|
|
9
|
+
if (!toolDef || typeof toolDef !== "object")
|
|
10
|
+
return [];
|
|
11
|
+
const def = toolDef;
|
|
12
|
+
const inputSchema = def.inputSchema;
|
|
13
|
+
if (!inputSchema || typeof inputSchema !== "object")
|
|
14
|
+
return [];
|
|
15
|
+
const schema = inputSchema;
|
|
16
|
+
if (schema.properties && typeof schema.properties === "object") {
|
|
17
|
+
return Object.keys(schema.properties);
|
|
18
|
+
}
|
|
19
|
+
if (schema.jsonSchema && typeof schema.jsonSchema === "object" &&
|
|
20
|
+
!(schema.jsonSchema instanceof Promise)) {
|
|
21
|
+
const resolved = schema.jsonSchema;
|
|
22
|
+
if (resolved.properties && typeof resolved.properties === "object") {
|
|
23
|
+
return Object.keys(resolved.properties);
|
|
24
|
+
}
|
|
25
|
+
}
|
|
26
|
+
if ("shape" in schema && schema.shape && typeof schema.shape === "object") {
|
|
27
|
+
return Object.keys(schema.shape);
|
|
28
|
+
}
|
|
29
|
+
return [];
|
|
30
|
+
}
|
|
31
|
+
const GAP_RATIO = 0.4;
|
|
32
|
+
const MIN_ADAPTIVE = 2;
|
|
33
|
+
/**
|
|
34
|
+
* Finds the natural cutoff point in ranked results.
|
|
35
|
+
* Looks for the largest relative score drop — if result[i+1] drops
|
|
36
|
+
* by more than GAP_RATIO relative to result[0], cut there.
|
|
37
|
+
*/
|
|
38
|
+
function findElbow(results, max) {
|
|
39
|
+
if (results.length <= MIN_ADAPTIVE)
|
|
40
|
+
return results.slice(0, max);
|
|
41
|
+
const topScore = results[0].score;
|
|
42
|
+
if (topScore <= 0)
|
|
43
|
+
return results.slice(0, max);
|
|
44
|
+
for (let i = 0; i < results.length - 1 && i < max - 1; i++) {
|
|
45
|
+
const drop = results[i].score - results[i + 1].score;
|
|
46
|
+
if (drop / topScore >= GAP_RATIO && i + 1 >= MIN_ADAPTIVE) {
|
|
47
|
+
return results.slice(0, i + 1);
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
return results.slice(0, max);
|
|
51
|
+
}
|
|
52
|
+
function buildToolDescription(name, toolDef) {
|
|
53
|
+
const parts = [];
|
|
54
|
+
if (toolDef.description) {
|
|
55
|
+
parts.push(toolDef.description);
|
|
56
|
+
}
|
|
57
|
+
const paramNames = extractParamNames(toolDef);
|
|
58
|
+
if (paramNames.length > 0) {
|
|
59
|
+
parts.push(paramNames.join(" "));
|
|
60
|
+
}
|
|
61
|
+
return parts.join(" ");
|
|
62
|
+
}
|
|
63
|
+
/**
|
|
64
|
+
* Creates a tool index for dynamic tool selection.
|
|
65
|
+
*
|
|
66
|
+
* When an embeddingModel is provided, defaults to "combined" strategy
|
|
67
|
+
* (hybrid keyword + semantic embeddings) for the best accuracy at minimal cost.
|
|
68
|
+
* Falls back to "hybrid" (free, no API calls) when no model is given.
|
|
69
|
+
*
|
|
70
|
+
* @param tools - A ToolSet (Record<string, Tool>) to index
|
|
71
|
+
* @param options - Strategy configuration
|
|
72
|
+
*
|
|
73
|
+
* @example
|
|
74
|
+
* ```ts
|
|
75
|
+
* import { createToolIndex } from "toolpick";
|
|
76
|
+
* import { openai } from "@ai-sdk/openai";
|
|
77
|
+
*
|
|
78
|
+
* const index = createToolIndex(allTools, {
|
|
79
|
+
* embeddingModel: openai.embeddingModel("text-embedding-3-small"),
|
|
80
|
+
* });
|
|
81
|
+
* const activeTools = await index.select("ship it to prod", { maxTools: 5 });
|
|
82
|
+
* ```
|
|
83
|
+
*/
|
|
84
|
+
export function createToolIndex(tools, options = {}) {
|
|
85
|
+
const { embeddingModel, embeddingCache, rerankerModel, enrichDescriptions: shouldEnrich = false, } = options;
|
|
86
|
+
const strategy = options.strategy
|
|
87
|
+
?? (embeddingModel ? "combined" : "hybrid");
|
|
88
|
+
const toolNames = Object.keys(tools);
|
|
89
|
+
const toolNameSet = new Set(toolNames);
|
|
90
|
+
let descriptions = toolNames.map((name) => ({
|
|
91
|
+
name,
|
|
92
|
+
text: buildToolDescription(name, tools[name]),
|
|
93
|
+
}));
|
|
94
|
+
const descriptionMap = new Map();
|
|
95
|
+
for (const d of descriptions) {
|
|
96
|
+
descriptionMap.set(d.name, d.text);
|
|
97
|
+
}
|
|
98
|
+
let engine;
|
|
99
|
+
let enriched = false;
|
|
100
|
+
function buildEngine(descs) {
|
|
101
|
+
if (strategy === "combined") {
|
|
102
|
+
if (!embeddingModel) {
|
|
103
|
+
throw new Error('toolpick: embeddingModel is required when using strategy "combined". ' +
|
|
104
|
+
'Example: createToolIndex(tools, { embeddingModel: openai.embeddingModel("text-embedding-3-small") })');
|
|
105
|
+
}
|
|
106
|
+
return new CombinedSearch(descs, embeddingModel, embeddingCache);
|
|
107
|
+
}
|
|
108
|
+
else if (strategy === "semantic") {
|
|
109
|
+
if (!embeddingModel) {
|
|
110
|
+
throw new Error('toolpick: embeddingModel is required when using strategy "semantic". ' +
|
|
111
|
+
'Example: createToolIndex(tools, { strategy: "semantic", embeddingModel: openai.embeddingModel("text-embedding-3-small") })');
|
|
112
|
+
}
|
|
113
|
+
return new SemanticSearch(descs, embeddingModel, embeddingCache);
|
|
114
|
+
}
|
|
115
|
+
return new HybridSearch(descs);
|
|
116
|
+
}
|
|
117
|
+
engine = buildEngine(descriptions);
|
|
118
|
+
return {
|
|
119
|
+
toolNames,
|
|
120
|
+
async warmUp() {
|
|
121
|
+
if (shouldEnrich && rerankerModel && !enriched) {
|
|
122
|
+
descriptions = await enrichDescriptions(rerankerModel, descriptions);
|
|
123
|
+
for (const d of descriptions) {
|
|
124
|
+
descriptionMap.set(d.name, d.text);
|
|
125
|
+
}
|
|
126
|
+
engine = buildEngine(descriptions);
|
|
127
|
+
enriched = true;
|
|
128
|
+
}
|
|
129
|
+
if (engine.init)
|
|
130
|
+
await engine.init();
|
|
131
|
+
},
|
|
132
|
+
async select(query, selectOptions = {}) {
|
|
133
|
+
const { maxTools = 5, alwaysActive = [], threshold, adaptive = true } = selectOptions;
|
|
134
|
+
const fetchCount = rerankerModel ? maxTools * 3 : maxTools;
|
|
135
|
+
let results = await engine.search(query, fetchCount);
|
|
136
|
+
if (threshold !== undefined) {
|
|
137
|
+
results = results.filter((r) => r.score >= threshold);
|
|
138
|
+
}
|
|
139
|
+
if (rerankerModel) {
|
|
140
|
+
results = await rerank(rerankerModel, query, results, descriptionMap, maxTools);
|
|
141
|
+
}
|
|
142
|
+
if (adaptive) {
|
|
143
|
+
results = findElbow(results, maxTools);
|
|
144
|
+
}
|
|
145
|
+
const selected = results.map((r) => r.name);
|
|
146
|
+
const merged = [...new Set([...selected, ...alwaysActive])];
|
|
147
|
+
return merged.filter((name) => toolNameSet.has(name));
|
|
148
|
+
},
|
|
149
|
+
prepareStep(stepOptions) {
|
|
150
|
+
return createPrepareStep(engine, toolNames, stepOptions);
|
|
151
|
+
},
|
|
152
|
+
middleware(mwOptions) {
|
|
153
|
+
return createMiddleware(engine, toolNames, mwOptions);
|
|
154
|
+
},
|
|
155
|
+
searchTool() {
|
|
156
|
+
return createSearchTool(engine, descriptions);
|
|
157
|
+
},
|
|
158
|
+
};
|
|
159
|
+
}
|
package/dist/utils.d.ts
ADDED
package/dist/utils.js
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
const STOPWORDS = new Set([
|
|
2
|
+
"a", "an", "the", "and", "or", "but", "if", "then", "else",
|
|
3
|
+
"for", "of", "in", "on", "to", "from", "by", "with", "as",
|
|
4
|
+
"at", "is", "are", "was", "were", "be", "been", "it", "this",
|
|
5
|
+
"that", "these", "those", "not", "no", "can", "could", "should",
|
|
6
|
+
"would", "may", "might", "do", "does", "did", "have", "has",
|
|
7
|
+
"had", "you", "your",
|
|
8
|
+
]);
|
|
9
|
+
export function splitCompound(word) {
|
|
10
|
+
if (word.includes("_")) {
|
|
11
|
+
return word.split("_").filter((s) => s.length > 0).map((s) => s.toLowerCase());
|
|
12
|
+
}
|
|
13
|
+
const parts = word.replace(/([a-z])([A-Z])/g, "$1 $2")
|
|
14
|
+
.replace(/([A-Z]+)([A-Z][a-z])/g, "$1 $2")
|
|
15
|
+
.toLowerCase()
|
|
16
|
+
.split(/\s+/)
|
|
17
|
+
.filter((s) => s.length > 0);
|
|
18
|
+
return parts.length > 1 ? parts : [word.toLowerCase()];
|
|
19
|
+
}
|
|
20
|
+
export function tokenize(text) {
|
|
21
|
+
const raw = text
|
|
22
|
+
.replace(/[^\p{L}\p{N}_\s]/gu, " ")
|
|
23
|
+
.split(/\s+/)
|
|
24
|
+
.filter((t) => t.length > 0);
|
|
25
|
+
const tokens = [];
|
|
26
|
+
for (const t of raw) {
|
|
27
|
+
for (const part of splitCompound(t)) {
|
|
28
|
+
const lower = part.toLowerCase();
|
|
29
|
+
if (!STOPWORDS.has(lower)) {
|
|
30
|
+
tokens.push(lower);
|
|
31
|
+
}
|
|
32
|
+
}
|
|
33
|
+
}
|
|
34
|
+
return tokens;
|
|
35
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "toolpick",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Dynamic tool selection for the Vercel AI SDK. Automatically picks the right tools per step.",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"license": "MIT",
|
|
7
|
+
"repository": {
|
|
8
|
+
"type": "git",
|
|
9
|
+
"url": "https://github.com/pontusab/directories"
|
|
10
|
+
},
|
|
11
|
+
"keywords": [
|
|
12
|
+
"ai",
|
|
13
|
+
"ai-sdk",
|
|
14
|
+
"tools",
|
|
15
|
+
"tool-selection",
|
|
16
|
+
"activeTools",
|
|
17
|
+
"vercel",
|
|
18
|
+
"llm",
|
|
19
|
+
"agent"
|
|
20
|
+
],
|
|
21
|
+
"main": "dist/index.js",
|
|
22
|
+
"types": "dist/index.d.ts",
|
|
23
|
+
"exports": {
|
|
24
|
+
".": {
|
|
25
|
+
"types": "./dist/index.d.ts",
|
|
26
|
+
"import": "./dist/index.js"
|
|
27
|
+
},
|
|
28
|
+
"./eval": {
|
|
29
|
+
"types": "./dist/eval/index.d.ts",
|
|
30
|
+
"import": "./dist/eval/index.js"
|
|
31
|
+
}
|
|
32
|
+
},
|
|
33
|
+
"files": [
|
|
34
|
+
"dist"
|
|
35
|
+
],
|
|
36
|
+
"engines": {
|
|
37
|
+
"node": ">=18"
|
|
38
|
+
},
|
|
39
|
+
"scripts": {
|
|
40
|
+
"build": "tsc",
|
|
41
|
+
"dev": "tsc --watch",
|
|
42
|
+
"test": "bun test src/test"
|
|
43
|
+
},
|
|
44
|
+
"peerDependencies": {
|
|
45
|
+
"ai": ">=4.0",
|
|
46
|
+
"zod": ">=3.25 || >=4.0"
|
|
47
|
+
},
|
|
48
|
+
"peerDependenciesMeta": {
|
|
49
|
+
"ai": {
|
|
50
|
+
"optional": false
|
|
51
|
+
},
|
|
52
|
+
"zod": {
|
|
53
|
+
"optional": false
|
|
54
|
+
}
|
|
55
|
+
},
|
|
56
|
+
"devDependencies": {
|
|
57
|
+
"@ai-sdk/openai": "^3.0.47",
|
|
58
|
+
"@types/node": "^25.5.0",
|
|
59
|
+
"ai": "^6.0.134",
|
|
60
|
+
"typescript": "^5.9.3"
|
|
61
|
+
}
|
|
62
|
+
}
|