@parallel-web/ai-sdk-tools 0.1.5 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +142 -131
- package/dist/index.cjs +106 -101
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +103 -24
- package/dist/index.d.ts +103 -24
- package/dist/index.js +105 -102
- package/dist/index.js.map +1 -1
- package/package.json +3 -3
package/README.md
CHANGED
|
@@ -12,7 +12,7 @@ pnpm add ai @parallel-web/ai-sdk-tools
|
|
|
12
12
|
yarn add ai @parallel-web/ai-sdk-tools
|
|
13
13
|
```
|
|
14
14
|
|
|
15
|
-
> **Note:** This package requires AI SDK v5.
|
|
15
|
+
> **Note:** This package requires AI SDK v5. For AI SDK v4, use `parameters` instead of `inputSchema` when defining tools manually with the `parallel-web` SDK.
|
|
16
16
|
|
|
17
17
|
## Usage
|
|
18
18
|
|
|
@@ -20,17 +20,26 @@ Add `PARALLEL_API_KEY` obtained from [Parallel Platform](https://platform.parall
|
|
|
20
20
|
|
|
21
21
|
### Search Tool
|
|
22
22
|
|
|
23
|
-
`searchTool` uses [Parallel's
|
|
23
|
+
`searchTool` uses [Parallel's Search API](https://docs.parallel.ai/api-reference/search-beta/search) to perform web searches and return LLM-optimized results.
|
|
24
|
+
|
|
25
|
+
**Input schema:**
|
|
26
|
+
- `objective` (required): Natural-language description of what the web search is trying to find
|
|
27
|
+
- `search_queries` (optional): List of keyword search queries (1-6 words each)
|
|
28
|
+
- `mode` (optional): `'agentic'` (default) for concise results in agentic loops, or `'one-shot'` for comprehensive single-response results
|
|
24
29
|
|
|
25
30
|
### Extract Tool
|
|
26
31
|
|
|
27
|
-
`extractTool` uses [Parallel's
|
|
32
|
+
`extractTool` uses [Parallel's Extract API](https://docs.parallel.ai/api-reference/extract-beta/extract) to fetch and extract relevant content from specific URLs.
|
|
33
|
+
|
|
34
|
+
**Input schema:**
|
|
35
|
+
- `urls` (required): List of URLs to extract content from (max 10)
|
|
36
|
+
- `objective` (optional): Natural-language description of what information you're looking for
|
|
28
37
|
|
|
29
38
|
### Basic Example
|
|
30
39
|
|
|
31
40
|
```typescript
|
|
32
41
|
import { openai } from '@ai-sdk/openai';
|
|
33
|
-
import { streamText
|
|
42
|
+
import { streamText } from 'ai';
|
|
34
43
|
import { searchTool, extractTool } from '@parallel-web/ai-sdk-tools';
|
|
35
44
|
|
|
36
45
|
const result = streamText({
|
|
@@ -46,16 +55,47 @@ const result = streamText({
|
|
|
46
55
|
});
|
|
47
56
|
|
|
48
57
|
// Stream the response
|
|
49
|
-
return result.
|
|
58
|
+
return result.toUIMessageStreamResponse();
|
|
50
59
|
```
|
|
51
60
|
|
|
52
|
-
|
|
61
|
+
## Factory Functions
|
|
62
|
+
|
|
63
|
+
For more control over the tool configuration, use the factory functions to create tools with custom defaults:
|
|
64
|
+
|
|
65
|
+
### createSearchTool
|
|
53
66
|
|
|
54
|
-
|
|
67
|
+
Create a search tool with custom defaults for mode, max_results, excerpts, source_policy, or fetch_policy.
|
|
55
68
|
|
|
56
69
|
```typescript
|
|
57
|
-
import {
|
|
58
|
-
|
|
70
|
+
import { createSearchTool } from '@parallel-web/ai-sdk-tools';
|
|
71
|
+
|
|
72
|
+
const myCustomSearchTool = createSearchTool({
|
|
73
|
+
mode: 'one-shot', // 'one-shot' returns more comprehensive results and longer excerpts to answer questions from a single response.
|
|
74
|
+
max_results: 5, // Limit to 5 results
|
|
75
|
+
});
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
### createExtractTool
|
|
79
|
+
|
|
80
|
+
Create an extract tool with custom defaults for excerpts, full_content, or fetch_policy.
|
|
81
|
+
|
|
82
|
+
```typescript
|
|
83
|
+
import { createExtractTool } from '@parallel-web/ai-sdk-tools';
|
|
84
|
+
|
|
85
|
+
const myExtractTool = createExtractTool({
|
|
86
|
+
full_content: true, // Include full page content
|
|
87
|
+
excerpts: {
|
|
88
|
+
max_chars_per_result: 10000,
|
|
89
|
+
},
|
|
90
|
+
});
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
## Direct API Usage
|
|
94
|
+
|
|
95
|
+
You can also use the `parallel-web` SDK directly for maximum flexibility:
|
|
96
|
+
|
|
97
|
+
```typescript
|
|
98
|
+
import { tool } from 'ai';
|
|
59
99
|
import { z } from 'zod';
|
|
60
100
|
import { Parallel } from 'parallel-web';
|
|
61
101
|
|
|
@@ -64,149 +104,120 @@ const parallel = new Parallel({
|
|
|
64
104
|
});
|
|
65
105
|
|
|
66
106
|
const webSearch = tool({
|
|
67
|
-
description: '
|
|
107
|
+
description: 'Search the web for information.',
|
|
68
108
|
inputSchema: z.object({
|
|
69
|
-
|
|
70
|
-
usersQuestion: z.string().describe("The user's question"),
|
|
109
|
+
query: z.string().describe("The user's question"),
|
|
71
110
|
}),
|
|
72
|
-
execute: async ({
|
|
73
|
-
const
|
|
74
|
-
objective:
|
|
75
|
-
|
|
76
|
-
max_results:
|
|
77
|
-
max_chars_per_result: 1000,
|
|
111
|
+
execute: async ({ query }) => {
|
|
112
|
+
const result = await parallel.beta.search({
|
|
113
|
+
objective: query,
|
|
114
|
+
mode: 'agentic',
|
|
115
|
+
max_results: 5,
|
|
78
116
|
});
|
|
79
|
-
return
|
|
117
|
+
return result;
|
|
80
118
|
},
|
|
81
119
|
});
|
|
82
120
|
```
|
|
83
121
|
|
|
84
|
-
##
|
|
122
|
+
## API Reference
|
|
85
123
|
|
|
86
|
-
|
|
124
|
+
- [Search API Documentation](https://docs.parallel.ai/search/search-quickstart)
|
|
125
|
+
- [Extract API Documentation](https://docs.parallel.ai/extract/extract-quickstart)
|
|
126
|
+
- [Search API Best Practices](https://docs.parallel.ai/search/best-practices)
|
|
87
127
|
|
|
88
|
-
|
|
128
|
+
## Response Format
|
|
129
|
+
|
|
130
|
+
Both tools return the raw API response from Parallel:
|
|
131
|
+
|
|
132
|
+
### Search Response
|
|
89
133
|
|
|
90
134
|
```typescript
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
135
|
+
{
|
|
136
|
+
search_id: string;
|
|
137
|
+
results: Array<{
|
|
138
|
+
url: string;
|
|
139
|
+
title?: string;
|
|
140
|
+
publish_date?: string;
|
|
141
|
+
excerpts: string[];
|
|
142
|
+
}>;
|
|
143
|
+
usage?: Array<{ name: string; count: number }>;
|
|
144
|
+
warnings?: Array<{ code: string; message: string }>;
|
|
145
|
+
}
|
|
146
|
+
```
|
|
94
147
|
|
|
95
|
-
|
|
96
|
-
apiKey: process.env.PARALLEL_API_KEY,
|
|
97
|
-
});
|
|
148
|
+
### Extract Response
|
|
98
149
|
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
};
|
|
118
|
-
case 'list':
|
|
119
|
-
default:
|
|
120
|
-
return {
|
|
121
|
-
max_results: 20,
|
|
122
|
-
max_chars_per_result: 1500
|
|
123
|
-
};
|
|
124
|
-
}
|
|
150
|
+
```typescript
|
|
151
|
+
{
|
|
152
|
+
extract_id: string;
|
|
153
|
+
results: Array<{
|
|
154
|
+
url: string;
|
|
155
|
+
title?: string;
|
|
156
|
+
excerpts?: string[];
|
|
157
|
+
full_content?: string;
|
|
158
|
+
publish_date?: string;
|
|
159
|
+
}>;
|
|
160
|
+
errors: Array<{
|
|
161
|
+
url: string;
|
|
162
|
+
error_type: string;
|
|
163
|
+
http_status_code?: number;
|
|
164
|
+
content?: string;
|
|
165
|
+
}>;
|
|
166
|
+
usage?: Array<{ name: string; count: number }>;
|
|
167
|
+
warnings?: Array<{ code: string; message: string }>;
|
|
125
168
|
}
|
|
169
|
+
```
|
|
126
170
|
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
.optional()
|
|
144
|
-
.describe('List of keyword search queries of 1-6 words.'),
|
|
145
|
-
include_domains: z
|
|
146
|
-
.array(z.string())
|
|
147
|
-
.optional()
|
|
148
|
-
.describe('List of valid URL domains to restrict search results.'),
|
|
149
|
-
}),
|
|
150
|
-
execute: async (
|
|
151
|
-
{ ...args },
|
|
152
|
-
{ abortSignal }: { abortSignal?: AbortSignal }
|
|
153
|
-
) => {
|
|
154
|
-
const results = const results = await search(
|
|
155
|
-
{ ...args, ...getSearchParams(args.search_type) },
|
|
156
|
-
{ abortSignal }
|
|
157
|
-
);
|
|
158
|
-
return {
|
|
159
|
-
searchParams: { objective, search_type, search_queries, include_domains },
|
|
160
|
-
answer: results,
|
|
161
|
-
};
|
|
162
|
-
},
|
|
171
|
+
## Migration from v0.1.x
|
|
172
|
+
|
|
173
|
+
Version 0.2.0 introduces an updated API that conforms with Parallel's Search and Extract MCP tools:
|
|
174
|
+
|
|
175
|
+
### searchTool changes
|
|
176
|
+
|
|
177
|
+
- **Input schema changed**: Removed `search_type` and `include_domains`. Added `mode` parameter.
|
|
178
|
+
- **Return value changed**: Now returns raw API response (`{ search_id, results, ... }`) instead of `{ searchParams, answer }`.
|
|
179
|
+
|
|
180
|
+
**Before (v0.1.x):**
|
|
181
|
+
```typescript
|
|
182
|
+
const result = await searchTool.execute({
|
|
183
|
+
objective: 'Find TypeScript info',
|
|
184
|
+
search_type: 'list',
|
|
185
|
+
search_queries: ['TypeScript'],
|
|
186
|
+
include_domains: ['typescriptlang.org'],
|
|
163
187
|
});
|
|
188
|
+
console.log(result.answer.results);
|
|
164
189
|
```
|
|
165
190
|
|
|
166
|
-
|
|
167
|
-
|
|
191
|
+
**After (v0.2.0):**
|
|
168
192
|
```typescript
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
193
|
+
const result = await searchTool.execute({
|
|
194
|
+
objective: 'Find TypeScript info',
|
|
195
|
+
search_queries: ['TypeScript'],
|
|
196
|
+
mode: 'agentic', // optional, defaults to 'agentic'
|
|
197
|
+
});
|
|
198
|
+
console.log(result.results);
|
|
199
|
+
```
|
|
172
200
|
|
|
173
|
-
|
|
174
|
-
|
|
201
|
+
### extractTool changes
|
|
202
|
+
|
|
203
|
+
- **Input schema changed**: `urls` is now first, `objective` is optional.
|
|
204
|
+
- **Return value changed**: Now returns raw API response (`{ extract_id, results, errors, ... }`) instead of `{ searchParams, answer }`.
|
|
205
|
+
|
|
206
|
+
**Before (v0.1.x):**
|
|
207
|
+
```typescript
|
|
208
|
+
const result = await extractTool.execute({
|
|
209
|
+
objective: 'Extract content',
|
|
210
|
+
urls: ['https://example.com'],
|
|
211
|
+
search_queries: ['keyword'],
|
|
175
212
|
});
|
|
213
|
+
console.log(result.answer.results);
|
|
214
|
+
```
|
|
176
215
|
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
- Exploring URLs returned by a web search in greater depth`,
|
|
183
|
-
parameters: z.object({
|
|
184
|
-
// v4 uses parameters instead of inputSchema
|
|
185
|
-
objective: z
|
|
186
|
-
.string()
|
|
187
|
-
.describe(
|
|
188
|
-
"Natural-language description of what information you're looking for from the URLs."
|
|
189
|
-
),
|
|
190
|
-
urls: z
|
|
191
|
-
.array(z.string())
|
|
192
|
-
.describe(
|
|
193
|
-
'List of URLs to extract content from. Maximum 10 URLs per request.'
|
|
194
|
-
),
|
|
195
|
-
search_queries: z
|
|
196
|
-
.array(z.string())
|
|
197
|
-
.optional()
|
|
198
|
-
.describe('Optional keyword search queries related to the objective.'),
|
|
199
|
-
}),
|
|
200
|
-
execute: async ({ objective, urls, search_queries }) => {
|
|
201
|
-
const results = await parallel.beta.extract({
|
|
202
|
-
objective,
|
|
203
|
-
urls,
|
|
204
|
-
search_queries,
|
|
205
|
-
});
|
|
206
|
-
return {
|
|
207
|
-
searchParams: { objective, urls, search_queries },
|
|
208
|
-
answer: results,
|
|
209
|
-
};
|
|
210
|
-
},
|
|
216
|
+
**After (v0.2.0):**
|
|
217
|
+
```typescript
|
|
218
|
+
const result = await extractTool.execute({
|
|
219
|
+
urls: ['https://example.com'],
|
|
220
|
+
objective: 'Extract content', // optional
|
|
211
221
|
});
|
|
222
|
+
console.log(result.results);
|
|
212
223
|
```
|
package/dist/index.cjs
CHANGED
|
@@ -10,7 +10,10 @@ var parallelClient = new Proxy({}, {
|
|
|
10
10
|
get(_target, prop) {
|
|
11
11
|
if (!_parallelClient) {
|
|
12
12
|
_parallelClient = new parallelWeb.Parallel({
|
|
13
|
-
apiKey: process.env["PARALLEL_API_KEY"]
|
|
13
|
+
apiKey: process.env["PARALLEL_API_KEY"],
|
|
14
|
+
defaultHeaders: {
|
|
15
|
+
"X-Tool-Calling-Package": `npm:@parallel-web/ai-sdk-tools/v${"0.2.0"}`
|
|
16
|
+
}
|
|
14
17
|
});
|
|
15
18
|
}
|
|
16
19
|
return _parallelClient[prop];
|
|
@@ -18,90 +21,70 @@ var parallelClient = new Proxy({}, {
|
|
|
18
21
|
});
|
|
19
22
|
|
|
20
23
|
// src/tools/search.ts
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
case "general":
|
|
26
|
-
return { max_results: 10, max_chars_per_result: 9e3 };
|
|
27
|
-
case "single_page":
|
|
28
|
-
return { max_results: 2, max_chars_per_result: 3e4 };
|
|
29
|
-
case "list":
|
|
30
|
-
default:
|
|
31
|
-
return { max_results: 20, max_chars_per_result: 1500 };
|
|
32
|
-
}
|
|
33
|
-
}
|
|
34
|
-
var search = async (searchArgs, { abortSignal }) => {
|
|
35
|
-
return await parallelClient.beta.search(
|
|
36
|
-
{
|
|
37
|
-
...searchArgs
|
|
38
|
-
},
|
|
39
|
-
{
|
|
40
|
-
signal: abortSignal,
|
|
41
|
-
headers: { "parallel-beta": "search-extract-2025-10-10" }
|
|
42
|
-
}
|
|
43
|
-
);
|
|
44
|
-
};
|
|
24
|
+
var objectiveDescription = `Natural-language description of what the web search is trying to find.
|
|
25
|
+
Try to make the search objective atomic, looking for a specific piece of information. May include guidance about preferred sources or freshness.`;
|
|
26
|
+
var searchQueriesDescription = `(optional) List of keyword search queries of 1-6 words, which may include search operators. The search queries should be related to the objective. Limited to 5 entries of 200 characters each.`;
|
|
27
|
+
var modeDescription = `Presets default values for different use cases. "one-shot" returns more comprehensive results and longer excerpts to answer questions from a single response, while "agentic" returns more concise, token-efficient results for use in an agentic loop. Defaults to "agentic".`;
|
|
45
28
|
var searchTool = ai.tool({
|
|
46
|
-
description: `
|
|
47
|
-
web_search_parallel tool returns ranked, extended web excerpts optimized for LLMs.
|
|
48
|
-
Intelligently scale the number of web_search_parallel tool calls to get more information
|
|
49
|
-
when needed, from a single call for simple factual questions to five or more calls for
|
|
50
|
-
complex research questions.
|
|
51
|
-
|
|
52
|
-
* Keep queries concise - 1-6 words for best results. Start broad with very short
|
|
53
|
-
queries and medium context, then add words to narrow results or use high context
|
|
54
|
-
if needed.
|
|
55
|
-
* Include broader context about what the search is trying to accomplish in the
|
|
56
|
-
\`objective\` field. This helps the search engine understand the user's intent and
|
|
57
|
-
provide relevant results and excerpts.
|
|
58
|
-
* Never repeat similar search queries - make every query unique. If initial results are
|
|
59
|
-
insufficient, reformulate queries to obtain new and better results.
|
|
29
|
+
description: `Purpose: Perform web searches and return results in an LLM-friendly format.
|
|
60
30
|
|
|
61
|
-
|
|
62
|
-
- For simple queries, a one-shot call to depth is usually sufficient.
|
|
63
|
-
- For complex multi-hop queries, first try to use breadth to narrow down sources. Then
|
|
64
|
-
use other search types with include_domains to get more detailed results.`,
|
|
31
|
+
Use the web search tool to search the web and access information from the web. The tool returns ranked, extended web excerpts optimized for LLMs.`,
|
|
65
32
|
inputSchema: zod.z.object({
|
|
66
|
-
objective: zod.z.string().describe(
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
freshness guidance here. Limit to 200 characters. This should reflect the end goal so
|
|
70
|
-
that the tool can better understand the intent and return the best results. Do not
|
|
71
|
-
dump long texts.`
|
|
72
|
-
),
|
|
73
|
-
search_type: zod.z.enum(["list", "general", "single_page", "targeted"]).describe(
|
|
74
|
-
`Can be "list", "general", "single_page" or "targeted".
|
|
75
|
-
"list" should be used for searching for data broadly, like aggregating data or
|
|
76
|
-
considering multiple sources or doing broad initial research. "targeted" should be
|
|
77
|
-
used for searching for data from a specific source set. "general" is a catch all case
|
|
78
|
-
if there is no specific use case from list or targeted. "single_page" extracts data
|
|
79
|
-
from a single page - extremely targeted. If there is a specific webpage you want the
|
|
80
|
-
data from, use "single_page" and mention the URL in the objective.
|
|
81
|
-
Use search_type appropriately.`
|
|
82
|
-
).optional().default("list"),
|
|
83
|
-
search_queries: zod.z.array(zod.z.string()).optional().describe(
|
|
84
|
-
`(optional) List of keyword search queries of 1-6
|
|
85
|
-
words, which may include search operators. The search queries should be related to the
|
|
86
|
-
objective. Limited to 5 entries of 200 characters each. Usually 1-3 queries are
|
|
87
|
-
ideal.`
|
|
88
|
-
),
|
|
89
|
-
include_domains: zod.z.array(zod.z.string()).optional().describe(`(optional) List of valid URL domains to explicitly
|
|
90
|
-
focus on for the search. This will restrict all search results to only include results
|
|
91
|
-
from the provided list. This is useful when you want to only use a specific set of
|
|
92
|
-
sources. example: ["google.com", "wikipedia.org"]. Maximum 10 entries.`)
|
|
33
|
+
objective: zod.z.string().describe(objectiveDescription),
|
|
34
|
+
search_queries: zod.z.array(zod.z.string()).optional().describe(searchQueriesDescription),
|
|
35
|
+
mode: zod.z.enum(["agentic", "one-shot"]).optional().default("agentic").describe(modeDescription)
|
|
93
36
|
}),
|
|
94
|
-
execute: async function({
|
|
95
|
-
|
|
96
|
-
{
|
|
97
|
-
|
|
37
|
+
execute: async function({ objective, search_queries, mode }, { abortSignal }) {
|
|
38
|
+
return await parallelClient.beta.search(
|
|
39
|
+
{
|
|
40
|
+
objective,
|
|
41
|
+
search_queries,
|
|
42
|
+
mode
|
|
43
|
+
},
|
|
44
|
+
{
|
|
45
|
+
signal: abortSignal
|
|
46
|
+
}
|
|
98
47
|
);
|
|
99
|
-
return {
|
|
100
|
-
searchParams: args,
|
|
101
|
-
answer: results
|
|
102
|
-
};
|
|
103
48
|
}
|
|
104
49
|
});
|
|
50
|
+
var defaultSearchDescription = `Purpose: Perform web searches and return results in an LLM-friendly format.
|
|
51
|
+
|
|
52
|
+
Use the web search tool to search the web and access information from the web. The tool returns ranked, extended web excerpts optimized for LLMs.`;
|
|
53
|
+
function createSearchTool(options = {}) {
|
|
54
|
+
const {
|
|
55
|
+
mode: defaultMode = "agentic",
|
|
56
|
+
max_results,
|
|
57
|
+
excerpts,
|
|
58
|
+
source_policy,
|
|
59
|
+
fetch_policy,
|
|
60
|
+
description = defaultSearchDescription
|
|
61
|
+
} = options;
|
|
62
|
+
return ai.tool({
|
|
63
|
+
description,
|
|
64
|
+
inputSchema: zod.z.object({
|
|
65
|
+
objective: zod.z.string().describe(objectiveDescription),
|
|
66
|
+
search_queries: zod.z.array(zod.z.string()).optional().describe(searchQueriesDescription)
|
|
67
|
+
}),
|
|
68
|
+
execute: async function({ objective, search_queries }, { abortSignal }) {
|
|
69
|
+
return await parallelClient.beta.search(
|
|
70
|
+
{
|
|
71
|
+
objective,
|
|
72
|
+
search_queries,
|
|
73
|
+
mode: defaultMode,
|
|
74
|
+
max_results,
|
|
75
|
+
excerpts,
|
|
76
|
+
source_policy,
|
|
77
|
+
fetch_policy
|
|
78
|
+
},
|
|
79
|
+
{
|
|
80
|
+
signal: abortSignal
|
|
81
|
+
}
|
|
82
|
+
);
|
|
83
|
+
}
|
|
84
|
+
});
|
|
85
|
+
}
|
|
86
|
+
var urlsDescription = `List of URLs to extract content from. Must be valid HTTP/HTTPS URLs. Maximum 10 URLs per request.`;
|
|
87
|
+
var objectiveDescription2 = `Natural-language description of what information you're looking for from the URLs.`;
|
|
105
88
|
var extractTool = ai.tool({
|
|
106
89
|
description: `Purpose: Fetch and extract relevant content from specific web URLs.
|
|
107
90
|
|
|
@@ -109,36 +92,58 @@ Ideal Use Cases:
|
|
|
109
92
|
- Extracting content from specific URLs you've already identified
|
|
110
93
|
- Exploring URLs returned by a web search in greater depth`,
|
|
111
94
|
inputSchema: zod.z.object({
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
Limit to 200 characters.`
|
|
115
|
-
),
|
|
116
|
-
urls: zod.z.array(zod.z.string()).describe(
|
|
117
|
-
`List of URLs to extract content from. Must be valid
|
|
118
|
-
HTTP/HTTPS URLs. Maximum 10 URLs per request.`
|
|
119
|
-
),
|
|
120
|
-
search_queries: zod.z.array(zod.z.string()).optional().describe(
|
|
121
|
-
`(optional) List of keyword search queries of 1-6
|
|
122
|
-
words, which may include search operators. The search queries should be related to the
|
|
123
|
-
objective. Limited to 5 entries of 200 characters each. Usually 1-3 queries are
|
|
124
|
-
ideal.`
|
|
125
|
-
)
|
|
95
|
+
urls: zod.z.array(zod.z.string()).describe(urlsDescription),
|
|
96
|
+
objective: zod.z.string().optional().describe(objectiveDescription2)
|
|
126
97
|
}),
|
|
127
|
-
execute: async function({
|
|
128
|
-
|
|
129
|
-
{ ...args },
|
|
98
|
+
execute: async function({ urls, objective }, { abortSignal }) {
|
|
99
|
+
return await parallelClient.beta.extract(
|
|
130
100
|
{
|
|
131
|
-
|
|
132
|
-
|
|
101
|
+
urls,
|
|
102
|
+
objective
|
|
103
|
+
},
|
|
104
|
+
{
|
|
105
|
+
signal: abortSignal
|
|
133
106
|
}
|
|
134
107
|
);
|
|
135
|
-
return {
|
|
136
|
-
searchParams: args,
|
|
137
|
-
answer: results
|
|
138
|
-
};
|
|
139
108
|
}
|
|
140
109
|
});
|
|
110
|
+
var defaultExtractDescription = `Purpose: Fetch and extract relevant content from specific web URLs.
|
|
111
|
+
|
|
112
|
+
Ideal Use Cases:
|
|
113
|
+
- Extracting content from specific URLs you've already identified
|
|
114
|
+
- Exploring URLs returned by a web search in greater depth`;
|
|
115
|
+
function createExtractTool(options = {}) {
|
|
116
|
+
const {
|
|
117
|
+
excerpts,
|
|
118
|
+
full_content,
|
|
119
|
+
fetch_policy,
|
|
120
|
+
description = defaultExtractDescription
|
|
121
|
+
} = options;
|
|
122
|
+
return ai.tool({
|
|
123
|
+
description,
|
|
124
|
+
inputSchema: zod.z.object({
|
|
125
|
+
urls: zod.z.array(zod.z.string()).describe(urlsDescription),
|
|
126
|
+
objective: zod.z.string().optional().describe(objectiveDescription2)
|
|
127
|
+
}),
|
|
128
|
+
execute: async function({ urls, objective }, { abortSignal }) {
|
|
129
|
+
return await parallelClient.beta.extract(
|
|
130
|
+
{
|
|
131
|
+
urls,
|
|
132
|
+
objective,
|
|
133
|
+
excerpts,
|
|
134
|
+
full_content,
|
|
135
|
+
fetch_policy
|
|
136
|
+
},
|
|
137
|
+
{
|
|
138
|
+
signal: abortSignal
|
|
139
|
+
}
|
|
140
|
+
);
|
|
141
|
+
}
|
|
142
|
+
});
|
|
143
|
+
}
|
|
141
144
|
|
|
145
|
+
exports.createExtractTool = createExtractTool;
|
|
146
|
+
exports.createSearchTool = createSearchTool;
|
|
142
147
|
exports.extractTool = extractTool;
|
|
143
148
|
exports.searchTool = searchTool;
|
|
144
149
|
//# sourceMappingURL=index.cjs.map
|
package/dist/index.cjs.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/client.ts","../src/tools/search.ts","../src/tools/extract.ts"],"names":["Parallel","tool","z"],"mappings":";;;;;;;AAMA,IAAI,eAAA,GAAmC,IAAA;AAEhC,IAAM,cAAA,GAAiB,IAAI,KAAA,CAAM,EAAC,EAAe;AAAA,EACtD,GAAA,CAAI,SAAS,IAAA,EAAM;AACjB,IAAA,IAAI,CAAC,eAAA,EAAiB;AACpB,MAAA,eAAA,GAAkB,IAAIA,oBAAA,CAAS;AAAA,QAC7B,MAAA,EAAQ,OAAA,CAAQ,GAAA,CAAI,kBAAkB;AAAA,OACvC,CAAA;AAAA,IACH;AACA,IAAA,OAAQ,gBAAwB,IAAI,CAAA;AAAA,EACtC;AACF,CAAC,CAAA;;;ACRD,SAAS,gBACP,WAAA,EACgE;AAChE,EAAA,QAAQ,WAAA;AAAa,IACnB,KAAK,UAAA;AACH,MAAA,OAAO,EAAE,WAAA,EAAa,CAAA,EAAG,oBAAA,EAAsB,IAAA,EAAM;AAAA,IACvD,KAAK,SAAA;AACH,MAAA,OAAO,EAAE,WAAA,EAAa,EAAA,EAAI,oBAAA,EAAsB,GAAA,EAAK;AAAA,IACvD,KAAK,aAAA;AACH,MAAA,OAAO,EAAE,WAAA,EAAa,CAAA,EAAG,oBAAA,EAAsB,GAAA,EAAM;AAAA,IACvD,KAAK,MAAA;AAAA,IACL;AACE,MAAA,OAAO,EAAE,WAAA,EAAa,EAAA,EAAI,oBAAA,EAAsB,IAAA,EAAK;AAAA;AAE3D;AAEA,IAAM,MAAA,GAAS,OACb,UAAA,EACA,EAAE,aAAY,KACX;AACH,EAAA,OAAO,MAAM,eAAe,IAAA,CAAK,MAAA;AAAA,IAC/B;AAAA,MACE,GAAG;AAAA,KACL;AAAA,IACA;AAAA,MACE,MAAA,EAAQ,WAAA;AAAA,MACR,OAAA,EAAS,EAAE,eAAA,EAAiB,2BAAA;AAA4B;AAC1D,GACF;AACF,CAAA;AAEO,IAAM,aAAaC,OAAA,CAAK;AAAA,EAC7B,WAAA,EAAa,CAAA;AAAA;AAAA;AAAA;AAAA;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;AAAA;AAAA;AAAA;AAAA,yEAAA,CAAA;AAAA,EAmBb,WAAA,EAAaC,MAAE,MAAA,CAAO;AAAA,IACpB,SAAA,EAAWA,KAAA,CAAE,MAAA,EAAO,CAAE,QAAA;AAAA,MACpB,CAAA;AAAA;AAAA;AAAA;AAAA,iBAAA;AAAA,KAKF;AAAA,IACA,WAAA,EAAaA,MACV,IAAA,CAAK,CAAC,QAAQ,SAAA,EAAW,aAAA,EAAe,UAAU,CAAC,CAAA,CACnD,QAAA;AAAA,MACC,CAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,+BAAA;AAAA,KAQF,CACC,QAAA,EAAS,CACT,OAAA,CAAQ,MAAM,CAAA;AAAA,IACjB,cAAA,EAAgBA,MACb,KAAA,CAAMA,KAAA,CAAE,QAAQ,CAAA,CAChB,UAAS,CACT,QAAA;AAAA,MACC,CAAA;AAAA;AAAA;AAAA,OAAA;AAAA,KAIF;AAAA,IACF,eAAA,EAAiBA,MAAE,KAAA,CAAMA,KAAA,CAAE,QAAQ,CAAA,CAAE,QAAA,EAAS,CAC3C,QAAA,CAAS,CAAA;AAAA;AAAA;AAAA,uEAAA,CAGwD;AAAA,GACrE,CAAA;AAAA,EAED,OAAA,EAAS,eAAgB,EAAE,GAAG,MAAK,EAAG,EAAE,aAAY,EAAG;AACrD,IAAA,MAAM,UAAU,MAAM,MAAA;AAAA,MACpB,EAAE,GAAG,IAAA,EAAM,GAAG,eAAA,CAAgB,IAAA,CAAK,WAAW,CAAA,EAAE;AAAA,MAChD,EAAE,WAAA;AAAY,KAChB;AAEA,IAAA,OAAO;AAAA,MACL,YAAA,EAAc,IAAA;AAAA,MACd,MAAA,EAAQ;AAAA,KACV;AAAA,EACF;AACF,CAAC;ACrGM,IAAM,cAAcD,OAAAA,CAAK;AAAA,EAC9B,WAAA,EAAa,CAAA;;AAAA;AAAA;AAAA,0DAAA,CAAA;AAAA,EAKb,WAAA,EAAaC,MAAE,MAAA,CAAO;AAAA,IACpB,SAAA,EAAWA,KAAAA,CAAE,MAAA,EAAO,CAAE,QAAA;AAAA,MACpB,CAAA;AAAA,yBAAA;AAAA,KAEF;AAAA,IAEA,MAAMA,KAAAA,CAAE,KAAA,CAAMA,KAAAA,CAAE,MAAA,EAAQ,CAAA,CAAE,QAAA;AAAA,MACxB,CAAA;AAAA,6CAAA;AAAA,KAEF;AAAA,IACA,cAAA,EAAgBA,MACb,KAAA,CAAMA,KAAAA,CAAE,QAAQ,CAAA,CAChB,UAAS,CACT,QAAA;AAAA,MACC,CAAA;AAAA;AAAA;AAAA,OAAA;AAAA;AAIF,GACH,CAAA;AAAA,EAED,OAAA,EAAS,eAAgB,EAAE,GAAG,MAAK,EAAG,EAAE,aAAY,EAAG;AACrD,IAAA,MAAM,OAAA,GAAU,MAAM,cAAA,CAAe,IAAA,CAAK,OAAA;AAAA,MACxC,EAAE,GAAG,IAAA,EAAK;AAAA,MACV;AAAA,QACE,MAAA,EAAQ,WAAA;AAAA,QACR,OAAA,EAAS,EAAE,eAAA,EAAiB,2BAAA;AAA4B;AAC1D,KACF;AAEA,IAAA,OAAO;AAAA,MACL,YAAA,EAAc,IAAA;AAAA,MACd,MAAA,EAAQ;AAAA,KACV;AAAA,EACF;AACF,CAAC","file":"index.cjs","sourcesContent":["/**\n * Shared Parallel Web client instance\n */\n\nimport { Parallel } from 'parallel-web';\n\nlet _parallelClient: Parallel | null = null;\n\nexport const parallelClient = new Proxy({} as Parallel, {\n get(_target, prop) {\n if (!_parallelClient) {\n _parallelClient = new Parallel({\n apiKey: process.env['PARALLEL_API_KEY'],\n });\n }\n return (_parallelClient as any)[prop];\n },\n});\n","/**\n * Search tool for Parallel Web\n */\n\nimport { tool } from 'ai';\nimport { z } from 'zod';\nimport { BetaSearchParams } from 'parallel-web/resources/beta/beta.mjs';\nimport { parallelClient } from '../client.js';\n\nfunction getSearchParams(\n search_type: 'list' | 'targeted' | 'general' | 'single_page'\n): Pick<BetaSearchParams, 'max_results' | 'max_chars_per_result'> {\n switch (search_type) {\n case 'targeted':\n return { max_results: 5, max_chars_per_result: 16000 };\n case 'general':\n return { max_results: 10, max_chars_per_result: 9000 };\n case 'single_page':\n return { max_results: 2, max_chars_per_result: 30000 };\n case 'list':\n default:\n return { max_results: 20, max_chars_per_result: 1500 };\n }\n}\n\nconst search = async (\n searchArgs: BetaSearchParams,\n { abortSignal }: { abortSignal: AbortSignal | undefined }\n) => {\n return await parallelClient.beta.search(\n {\n ...searchArgs,\n },\n {\n signal: abortSignal,\n headers: { 'parallel-beta': 'search-extract-2025-10-10' },\n }\n );\n};\n\nexport const searchTool = tool({\n description: `Use the web_search_parallel tool to access information from the web. The\nweb_search_parallel tool returns ranked, extended web excerpts optimized for LLMs.\nIntelligently scale the number of web_search_parallel tool calls to get more information\nwhen needed, from a single call for simple factual questions to five or more calls for\ncomplex research questions.\n\n* Keep queries concise - 1-6 words for best results. Start broad with very short\n queries and medium context, then add words to narrow results or use high context\n if needed.\n* Include broader context about what the search is trying to accomplish in the\n \\`objective\\` field. This helps the search engine understand the user's intent and\n provide relevant results and excerpts.\n* Never repeat similar search queries - make every query unique. If initial results are\n insufficient, reformulate queries to obtain new and better results.\n\nHow to use:\n- For simple queries, a one-shot call to depth is usually sufficient.\n- For complex multi-hop queries, first try to use breadth to narrow down sources. Then\nuse other search types with include_domains to get more detailed results.`,\n inputSchema: z.object({\n objective: z.string().describe(\n `Natural-language description of what the web research goal\n is. Specify the broad intent of the search query here. Also include any source or\n freshness guidance here. Limit to 200 characters. This should reflect the end goal so\n that the tool can better understand the intent and return the best results. Do not\n dump long texts.`\n ),\n search_type: z\n .enum(['list', 'general', 'single_page', 'targeted'])\n .describe(\n `Can be \"list\", \"general\", \"single_page\" or \"targeted\".\n \"list\" should be used for searching for data broadly, like aggregating data or\n considering multiple sources or doing broad initial research. \"targeted\" should be\n used for searching for data from a specific source set. \"general\" is a catch all case\n if there is no specific use case from list or targeted. \"single_page\" extracts data\n from a single page - extremely targeted. If there is a specific webpage you want the\n data from, use \"single_page\" and mention the URL in the objective.\n Use search_type appropriately.`\n )\n .optional()\n .default('list'),\n search_queries: z\n .array(z.string())\n .optional()\n .describe(\n `(optional) List of keyword search queries of 1-6\n words, which may include search operators. The search queries should be related to the\n objective. Limited to 5 entries of 200 characters each. Usually 1-3 queries are\n ideal.`\n ),\n include_domains: z.array(z.string()).optional()\n .describe(`(optional) List of valid URL domains to explicitly\n focus on for the search. This will restrict all search results to only include results\n from the provided list. This is useful when you want to only use a specific set of\n sources. example: [\"google.com\", \"wikipedia.org\"]. Maximum 10 entries.`),\n }),\n\n execute: async function ({ ...args }, { abortSignal }) {\n const results = await search(\n { ...args, ...getSearchParams(args.search_type) },\n { abortSignal }\n );\n\n return {\n searchParams: args,\n answer: results,\n };\n },\n});\n","/**\n * Extract tool for Parallel Web\n */\n\nimport { tool } from 'ai';\nimport { z } from 'zod';\nimport { parallelClient } from '../client.js';\n\nexport const extractTool = tool({\n description: `Purpose: Fetch and extract relevant content from specific web URLs.\n\nIdeal Use Cases:\n- Extracting content from specific URLs you've already identified\n- Exploring URLs returned by a web search in greater depth`,\n inputSchema: z.object({\n objective: z.string().describe(\n `Natural-language description of what information you're looking for from the URLs. \n Limit to 200 characters.`\n ),\n\n urls: z.array(z.string()).describe(\n `List of URLs to extract content from. Must be valid\nHTTP/HTTPS URLs. Maximum 10 URLs per request.`\n ),\n search_queries: z\n .array(z.string())\n .optional()\n .describe(\n `(optional) List of keyword search queries of 1-6\n words, which may include search operators. The search queries should be related to the\n objective. Limited to 5 entries of 200 characters each. Usually 1-3 queries are\n ideal.`\n ),\n }),\n\n execute: async function ({ ...args }, { abortSignal }) {\n const results = await parallelClient.beta.extract(\n { ...args },\n {\n signal: abortSignal,\n headers: { 'parallel-beta': 'search-extract-2025-10-10' },\n }\n );\n\n return {\n searchParams: args,\n answer: results,\n };\n },\n});\n"]}
|
|
1
|
+
{"version":3,"sources":["../src/client.ts","../src/tools/search.ts","../src/tools/extract.ts"],"names":["Parallel","tool","z","objectiveDescription"],"mappings":";;;;;;;AAQA,IAAI,eAAA,GAAmC,IAAA;AAEhC,IAAM,cAAA,GAAiB,IAAI,KAAA,CAAM,EAAC,EAAe;AAAA,EACtD,GAAA,CAAI,SAAS,IAAA,EAAsB;AACjC,IAAA,IAAI,CAAC,eAAA,EAAiB;AACpB,MAAA,eAAA,GAAkB,IAAIA,oBAAA,CAAS;AAAA,QAC7B,MAAA,EAAQ,OAAA,CAAQ,GAAA,CAAI,kBAAkB,CAAA;AAAA,QACtC,cAAA,EAAgB;AAAA,UACd,wBAAA,EAA0B,mCAAmC,OAA8B,CAAA;AAAA;AAC7F,OACD,CAAA;AAAA,IACH;AACA,IAAA,OAAO,gBAAgB,IAAI,CAAA;AAAA,EAC7B;AACF,CAAC,CAAA;;;AC4BD,IAAM,oBAAA,GAAuB,CAAA;AAAA,gJAAA,CAAA;AAG7B,IAAM,wBAAA,GAA2B,CAAA,+LAAA,CAAA;AAEjC,IAAM,eAAA,GAAkB,CAAA,8QAAA,CAAA;AAMjB,IAAM,aAAaC,OAAA,CAAK;AAAA,EAC7B,WAAA,EAAa,CAAA;;AAAA,iJAAA,CAAA;AAAA,EAGb,WAAA,EAAaC,MAAE,MAAA,CAAO;AAAA,IACpB,SAAA,EAAWA,KAAA,CAAE,MAAA,EAAO,CAAE,SAAS,oBAAoB,CAAA;AAAA,IACnD,cAAA,EAAgBA,KAAA,CACb,KAAA,CAAMA,KAAA,CAAE,MAAA,EAAQ,CAAA,CAChB,QAAA,EAAS,CACT,QAAA,CAAS,wBAAwB,CAAA;AAAA,IACpC,IAAA,EAAMA,KAAA,CACH,IAAA,CAAK,CAAC,WAAW,UAAU,CAAC,CAAA,CAC5B,QAAA,EAAS,CACT,OAAA,CAAQ,SAAS,CAAA,CACjB,SAAS,eAAe;AAAA,GAC5B,CAAA;AAAA,EAED,OAAA,EAAS,eACP,EAAE,SAAA,EAAW,gBAAgB,IAAA,EAAK,EAClC,EAAE,WAAA,EAAY,EACd;AACA,IAAA,OAAO,MAAM,eAAe,IAAA,CAAK,MAAA;AAAA,MAC/B;AAAA,QACE,SAAA;AAAA,QACA,cAAA;AAAA,QACA;AAAA,OACF;AAAA,MACA;AAAA,QACE,MAAA,EAAQ;AAAA;AACV,KACF;AAAA,EACF;AACF,CAAC;AAED,IAAM,wBAAA,GAA2B,CAAA;;AAAA,iJAAA,CAAA;AAoB1B,SAAS,gBAAA,CAAiB,OAAA,GAAmC,EAAC,EAAG;AACtE,EAAA,MAAM;AAAA,IACJ,MAAM,WAAA,GAAc,SAAA;AAAA,IACpB,WAAA;AAAA,IACA,QAAA;AAAA,IACA,aAAA;AAAA,IACA,YAAA;AAAA,IACA,WAAA,GAAc;AAAA,GAChB,GAAI,OAAA;AAEJ,EAAA,OAAOD,OAAA,CAAK;AAAA,IACV,WAAA;AAAA,IACA,WAAA,EAAaC,MAAE,MAAA,CAAO;AAAA,MACpB,SAAA,EAAWA,KAAA,CAAE,MAAA,EAAO,CAAE,SAAS,oBAAoB,CAAA;AAAA,MACnD,cAAA,EAAgBA,KAAA,CACb,KAAA,CAAMA,KAAA,CAAE,MAAA,EAAQ,CAAA,CAChB,QAAA,EAAS,CACT,QAAA,CAAS,wBAAwB;AAAA,KACrC,CAAA;AAAA,IAED,OAAA,EAAS,eAAgB,EAAE,SAAA,EAAW,gBAAe,EAAG,EAAE,aAAY,EAAG;AACvE,MAAA,OAAO,MAAM,eAAe,IAAA,CAAK,MAAA;AAAA,QAC/B;AAAA,UACE,SAAA;AAAA,UACA,cAAA;AAAA,UACA,IAAA,EAAM,WAAA;AAAA,UACN,WAAA;AAAA,UACA,QAAA;AAAA,UACA,aAAA;AAAA,UACA;AAAA,SACF;AAAA,QACA;AAAA,UACE,MAAA,EAAQ;AAAA;AACV,OACF;AAAA,IACF;AAAA,GACD,CAAA;AACH;AChHA,IAAM,eAAA,GAAkB,CAAA,iGAAA,CAAA;AAExB,IAAMC,qBAAAA,GAAuB,CAAA,kFAAA,CAAA;AAMtB,IAAM,cAAcF,OAAAA,CAAK;AAAA,EAC9B,WAAA,EAAa,CAAA;;AAAA;AAAA;AAAA,0DAAA,CAAA;AAAA,EAKb,WAAA,EAAaC,MAAE,MAAA,CAAO;AAAA,IACpB,IAAA,EAAMA,MAAE,KAAA,CAAMA,KAAAA,CAAE,QAAQ,CAAA,CAAE,SAAS,eAAe,CAAA;AAAA,IAClD,WAAWA,KAAAA,CAAE,MAAA,GAAS,QAAA,EAAS,CAAE,SAASC,qBAAoB;AAAA,GAC/D,CAAA;AAAA,EAED,OAAA,EAAS,eACP,EAAE,IAAA,EAAM,WAAU,EAClB,EAAE,aAAY,EACd;AACA,IAAA,OAAO,MAAM,eAAe,IAAA,CAAK,OAAA;AAAA,MAC/B;AAAA,QACE,IAAA;AAAA,QACA;AAAA,OACF;AAAA,MACA;AAAA,QACE,MAAA,EAAQ;AAAA;AACV,KACF;AAAA,EACF;AACF,CAAC;AAED,IAAM,yBAAA,GAA4B,CAAA;;AAAA;AAAA;AAAA,0DAAA,CAAA;AAoB3B,SAAS,iBAAA,CAAkB,OAAA,GAAoC,EAAC,EAAG;AACxE,EAAA,MAAM;AAAA,IACJ,QAAA;AAAA,IACA,YAAA;AAAA,IACA,YAAA;AAAA,IACA,WAAA,GAAc;AAAA,GAChB,GAAI,OAAA;AAEJ,EAAA,OAAOF,OAAAA,CAAK;AAAA,IACV,WAAA;AAAA,IACA,WAAA,EAAaC,MAAE,MAAA,CAAO;AAAA,MACpB,IAAA,EAAMA,MAAE,KAAA,CAAMA,KAAAA,CAAE,QAAQ,CAAA,CAAE,SAAS,eAAe,CAAA;AAAA,MAClD,WAAWA,KAAAA,CAAE,MAAA,GAAS,QAAA,EAAS,CAAE,SAASC,qBAAoB;AAAA,KAC/D,CAAA;AAAA,IAED,OAAA,EAAS,eACP,EAAE,IAAA,EAAM,WAAU,EAClB,EAAE,aAAY,EACd;AACA,MAAA,OAAO,MAAM,eAAe,IAAA,CAAK,OAAA;AAAA,QAC/B;AAAA,UACE,IAAA;AAAA,UACA,SAAA;AAAA,UACA,QAAA;AAAA,UACA,YAAA;AAAA,UACA;AAAA,SACF;AAAA,QACA;AAAA,UACE,MAAA,EAAQ;AAAA;AACV,OACF;AAAA,IACF;AAAA,GACD,CAAA;AACH","file":"index.cjs","sourcesContent":["/**\n * Shared Parallel Web client instance\n */\n\ndeclare const __PACKAGE_VERSION__: string;\n\nimport { Parallel } from 'parallel-web';\n\nlet _parallelClient: Parallel | null = null;\n\nexport const parallelClient = new Proxy({} as Parallel, {\n get(_target, prop: keyof Parallel) {\n if (!_parallelClient) {\n _parallelClient = new Parallel({\n apiKey: process.env['PARALLEL_API_KEY'],\n defaultHeaders: {\n 'X-Tool-Calling-Package': `npm:@parallel-web/ai-sdk-tools/v${__PACKAGE_VERSION__ ?? '0.0.0'}`,\n },\n });\n }\n return _parallelClient[prop];\n },\n});\n","/**\n * Search tool for Parallel Web\n */\n\nimport { tool } from 'ai';\nimport { z } from 'zod';\nimport type {\n ExcerptSettings,\n FetchPolicy,\n} from 'parallel-web/resources/beta/beta.mjs';\nimport type { SourcePolicy } from 'parallel-web/resources/shared.mjs';\nimport { parallelClient } from '../client.js';\n\n/**\n * Options for creating a custom search tool with code-supplied defaults.\n */\nexport interface CreateSearchToolOptions {\n /**\n * Default mode for search. 'agentic' returns concise, token-efficient results\n * for multi-step workflows. 'one-shot' returns comprehensive results with\n * longer excerpts. Defaults to 'agentic'.\n */\n mode?: 'agentic' | 'one-shot';\n\n /**\n * Maximum number of search results to return. Defaults to 10.\n */\n max_results?: number;\n\n /**\n * Excerpt settings for controlling excerpt length.\n */\n excerpts?: ExcerptSettings;\n\n /**\n * Source policy for controlling which domains to include/exclude and freshness.\n */\n source_policy?: SourcePolicy | null;\n\n /**\n * Fetch policy for controlling cached vs fresh content.\n */\n fetch_policy?: FetchPolicy | null;\n\n /**\n * Custom tool description. If not provided, uses the default description.\n */\n description?: string;\n}\n\nconst objectiveDescription = `Natural-language description of what the web search is trying to find.\nTry to make the search objective atomic, looking for a specific piece of information. May include guidance about preferred sources or freshness.`;\n\nconst searchQueriesDescription = `(optional) List of keyword search queries of 1-6 words, which may include search operators. The search queries should be related to the objective. Limited to 5 entries of 200 characters each.`;\n\nconst modeDescription = `Presets default values for different use cases. \"one-shot\" returns more comprehensive results and longer excerpts to answer questions from a single response, while \"agentic\" returns more concise, token-efficient results for use in an agentic loop. Defaults to \"agentic\".`;\n\n/**\n * Search tool that mirrors the MCP web_search_preview tool.\n * Takes objective and optional search_queries/mode, returns raw search response.\n */\nexport const searchTool = tool({\n description: `Purpose: Perform web searches and return results in an LLM-friendly format.\n\nUse the web search tool to search the web and access information from the web. The tool returns ranked, extended web excerpts optimized for LLMs.`,\n inputSchema: z.object({\n objective: z.string().describe(objectiveDescription),\n search_queries: z\n .array(z.string())\n .optional()\n .describe(searchQueriesDescription),\n mode: z\n .enum(['agentic', 'one-shot'])\n .optional()\n .default('agentic')\n .describe(modeDescription),\n }),\n\n execute: async function (\n { objective, search_queries, mode },\n { abortSignal }\n ) {\n return await parallelClient.beta.search(\n {\n objective,\n search_queries,\n mode,\n },\n {\n signal: abortSignal,\n }\n );\n },\n});\n\nconst defaultSearchDescription = `Purpose: Perform web searches and return results in an LLM-friendly format.\n\nUse the web search tool to search the web and access information from the web. The tool returns ranked, extended web excerpts optimized for LLMs.`;\n\n/**\n * Factory function to create a search tool with custom defaults.\n *\n * Use this when you want to set defaults for mode, max_results, excerpts,\n * source_policy, or fetch_policy in your code, so the LLM only needs to\n * provide objective and search_queries.\n *\n * @example\n * ```ts\n * const mySearchTool = createSearchTool({\n * mode: 'one-shot',\n * max_results: 5,\n * excerpts: { max_chars_per_result: 5000 },\n * });\n * ```\n */\nexport function createSearchTool(options: CreateSearchToolOptions = {}) {\n const {\n mode: defaultMode = 'agentic',\n max_results,\n excerpts,\n source_policy,\n fetch_policy,\n description = defaultSearchDescription,\n } = options;\n\n return tool({\n description,\n inputSchema: z.object({\n objective: z.string().describe(objectiveDescription),\n search_queries: z\n .array(z.string())\n .optional()\n .describe(searchQueriesDescription),\n }),\n\n execute: async function ({ objective, search_queries }, { abortSignal }) {\n return await parallelClient.beta.search(\n {\n objective,\n search_queries,\n mode: defaultMode,\n max_results,\n excerpts,\n source_policy,\n fetch_policy,\n },\n {\n signal: abortSignal,\n }\n );\n },\n });\n}\n","/**\n * Extract tool for Parallel Web\n */\n\nimport { tool } from 'ai';\nimport { z } from 'zod';\nimport type {\n ExcerptSettings,\n FetchPolicy,\n BetaExtractParams,\n} from 'parallel-web/resources/beta/beta.mjs';\nimport { parallelClient } from '../client.js';\n\n/**\n * Options for creating a custom extract tool with code-supplied defaults.\n */\nexport interface CreateExtractToolOptions {\n /**\n * Include excerpts from each URL relevant to the search objective and queries.\n * Can be a boolean or ExcerptSettings object. Defaults to true.\n */\n excerpts?: boolean | ExcerptSettings;\n\n /**\n * Include full content from each URL. Can be a boolean or FullContentSettings object.\n * Defaults to false.\n */\n full_content?: BetaExtractParams['full_content'];\n\n /**\n * Fetch policy for controlling cached vs fresh content.\n */\n fetch_policy?: FetchPolicy | null;\n\n /**\n * Custom tool description. If not provided, uses the default description.\n */\n description?: string;\n}\n\nconst urlsDescription = `List of URLs to extract content from. Must be valid HTTP/HTTPS URLs. Maximum 10 URLs per request.`;\n\nconst objectiveDescription = `Natural-language description of what information you're looking for from the URLs.`;\n\n/**\n * Extract tool that mirrors the MCP web_fetch tool.\n * Takes urls and optional objective, returns raw extract response.\n */\nexport const extractTool = tool({\n description: `Purpose: Fetch and extract relevant content from specific web URLs.\n\nIdeal Use Cases:\n- Extracting content from specific URLs you've already identified\n- Exploring URLs returned by a web search in greater depth`,\n inputSchema: z.object({\n urls: z.array(z.string()).describe(urlsDescription),\n objective: z.string().optional().describe(objectiveDescription),\n }),\n\n execute: async function (\n { urls, objective }: { urls: string[]; objective?: string },\n { abortSignal }: { abortSignal?: AbortSignal }\n ) {\n return await parallelClient.beta.extract(\n {\n urls,\n objective,\n },\n {\n signal: abortSignal,\n }\n );\n },\n});\n\nconst defaultExtractDescription = `Purpose: Fetch and extract relevant content from specific web URLs.\n\nIdeal Use Cases:\n- Extracting content from specific URLs you've already identified\n- Exploring URLs returned by a web search in greater depth`;\n\n/**\n * Factory function to create an extract tool with custom defaults.\n *\n * Use this when you want to set defaults for excerpts, full_content, or\n * fetch_policy in your code, so the LLM only needs to provide urls and objective.\n *\n * @example\n * ```ts\n * const myExtractTool = createExtractTool({\n * excerpts: { max_chars_per_result: 5000 },\n * full_content: true,\n * });\n * ```\n */\nexport function createExtractTool(options: CreateExtractToolOptions = {}) {\n const {\n excerpts,\n full_content,\n fetch_policy,\n description = defaultExtractDescription,\n } = options;\n\n return tool({\n description,\n inputSchema: z.object({\n urls: z.array(z.string()).describe(urlsDescription),\n objective: z.string().optional().describe(objectiveDescription),\n }),\n\n execute: async function (\n { urls, objective }: { urls: string[]; objective?: string },\n { abortSignal }: { abortSignal?: AbortSignal }\n ) {\n return await parallelClient.beta.extract(\n {\n urls,\n objective,\n excerpts,\n full_content,\n fetch_policy,\n },\n {\n signal: abortSignal,\n }\n );\n },\n });\n}\n"]}
|
package/dist/index.d.cts
CHANGED
|
@@ -1,38 +1,117 @@
|
|
|
1
1
|
import * as ai from 'ai';
|
|
2
2
|
import * as parallel_web_resources_beta_beta_mjs from 'parallel-web/resources/beta/beta.mjs';
|
|
3
|
+
import { ExcerptSettings, FetchPolicy, BetaExtractParams } from 'parallel-web/resources/beta/beta.mjs';
|
|
4
|
+
import { SourcePolicy } from 'parallel-web/resources/shared.mjs';
|
|
3
5
|
|
|
4
6
|
/**
|
|
5
|
-
*
|
|
7
|
+
* Options for creating a custom search tool with code-supplied defaults.
|
|
8
|
+
*/
|
|
9
|
+
interface CreateSearchToolOptions {
|
|
10
|
+
/**
|
|
11
|
+
* Default mode for search. 'agentic' returns concise, token-efficient results
|
|
12
|
+
* for multi-step workflows. 'one-shot' returns comprehensive results with
|
|
13
|
+
* longer excerpts. Defaults to 'agentic'.
|
|
14
|
+
*/
|
|
15
|
+
mode?: 'agentic' | 'one-shot';
|
|
16
|
+
/**
|
|
17
|
+
* Maximum number of search results to return. Defaults to 10.
|
|
18
|
+
*/
|
|
19
|
+
max_results?: number;
|
|
20
|
+
/**
|
|
21
|
+
* Excerpt settings for controlling excerpt length.
|
|
22
|
+
*/
|
|
23
|
+
excerpts?: ExcerptSettings;
|
|
24
|
+
/**
|
|
25
|
+
* Source policy for controlling which domains to include/exclude and freshness.
|
|
26
|
+
*/
|
|
27
|
+
source_policy?: SourcePolicy | null;
|
|
28
|
+
/**
|
|
29
|
+
* Fetch policy for controlling cached vs fresh content.
|
|
30
|
+
*/
|
|
31
|
+
fetch_policy?: FetchPolicy | null;
|
|
32
|
+
/**
|
|
33
|
+
* Custom tool description. If not provided, uses the default description.
|
|
34
|
+
*/
|
|
35
|
+
description?: string;
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* Search tool that mirrors the MCP web_search_preview tool.
|
|
39
|
+
* Takes objective and optional search_queries/mode, returns raw search response.
|
|
6
40
|
*/
|
|
7
41
|
declare const searchTool: ai.Tool<{
|
|
8
42
|
objective: string;
|
|
9
|
-
|
|
43
|
+
mode: "agentic" | "one-shot";
|
|
10
44
|
search_queries?: string[] | undefined;
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
45
|
+
}, parallel_web_resources_beta_beta_mjs.SearchResult>;
|
|
46
|
+
/**
|
|
47
|
+
* Factory function to create a search tool with custom defaults.
|
|
48
|
+
*
|
|
49
|
+
* Use this when you want to set defaults for mode, max_results, excerpts,
|
|
50
|
+
* source_policy, or fetch_policy in your code, so the LLM only needs to
|
|
51
|
+
* provide objective and search_queries.
|
|
52
|
+
*
|
|
53
|
+
* @example
|
|
54
|
+
* ```ts
|
|
55
|
+
* const mySearchTool = createSearchTool({
|
|
56
|
+
* mode: 'one-shot',
|
|
57
|
+
* max_results: 5,
|
|
58
|
+
* excerpts: { max_chars_per_result: 5000 },
|
|
59
|
+
* });
|
|
60
|
+
* ```
|
|
61
|
+
*/
|
|
62
|
+
declare function createSearchTool(options?: CreateSearchToolOptions): ai.Tool<{
|
|
63
|
+
objective: string;
|
|
64
|
+
search_queries?: string[] | undefined;
|
|
65
|
+
}, parallel_web_resources_beta_beta_mjs.SearchResult>;
|
|
21
66
|
|
|
22
67
|
/**
|
|
23
|
-
*
|
|
68
|
+
* Options for creating a custom extract tool with code-supplied defaults.
|
|
69
|
+
*/
|
|
70
|
+
interface CreateExtractToolOptions {
|
|
71
|
+
/**
|
|
72
|
+
* Include excerpts from each URL relevant to the search objective and queries.
|
|
73
|
+
* Can be a boolean or ExcerptSettings object. Defaults to true.
|
|
74
|
+
*/
|
|
75
|
+
excerpts?: boolean | ExcerptSettings;
|
|
76
|
+
/**
|
|
77
|
+
* Include full content from each URL. Can be a boolean or FullContentSettings object.
|
|
78
|
+
* Defaults to false.
|
|
79
|
+
*/
|
|
80
|
+
full_content?: BetaExtractParams['full_content'];
|
|
81
|
+
/**
|
|
82
|
+
* Fetch policy for controlling cached vs fresh content.
|
|
83
|
+
*/
|
|
84
|
+
fetch_policy?: FetchPolicy | null;
|
|
85
|
+
/**
|
|
86
|
+
* Custom tool description. If not provided, uses the default description.
|
|
87
|
+
*/
|
|
88
|
+
description?: string;
|
|
89
|
+
}
|
|
90
|
+
/**
|
|
91
|
+
* Extract tool that mirrors the MCP web_fetch tool.
|
|
92
|
+
* Takes urls and optional objective, returns raw extract response.
|
|
24
93
|
*/
|
|
25
94
|
declare const extractTool: ai.Tool<{
|
|
26
|
-
objective: string;
|
|
27
95
|
urls: string[];
|
|
28
|
-
|
|
29
|
-
},
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
96
|
+
objective?: string | undefined;
|
|
97
|
+
}, parallel_web_resources_beta_beta_mjs.ExtractResponse>;
|
|
98
|
+
/**
|
|
99
|
+
* Factory function to create an extract tool with custom defaults.
|
|
100
|
+
*
|
|
101
|
+
* Use this when you want to set defaults for excerpts, full_content, or
|
|
102
|
+
* fetch_policy in your code, so the LLM only needs to provide urls and objective.
|
|
103
|
+
*
|
|
104
|
+
* @example
|
|
105
|
+
* ```ts
|
|
106
|
+
* const myExtractTool = createExtractTool({
|
|
107
|
+
* excerpts: { max_chars_per_result: 5000 },
|
|
108
|
+
* full_content: true,
|
|
109
|
+
* });
|
|
110
|
+
* ```
|
|
111
|
+
*/
|
|
112
|
+
declare function createExtractTool(options?: CreateExtractToolOptions): ai.Tool<{
|
|
113
|
+
urls: string[];
|
|
114
|
+
objective?: string | undefined;
|
|
115
|
+
}, parallel_web_resources_beta_beta_mjs.ExtractResponse>;
|
|
37
116
|
|
|
38
|
-
export { extractTool, searchTool };
|
|
117
|
+
export { type CreateExtractToolOptions, type CreateSearchToolOptions, createExtractTool, createSearchTool, extractTool, searchTool };
|
package/dist/index.d.ts
CHANGED
|
@@ -1,38 +1,117 @@
|
|
|
1
1
|
import * as ai from 'ai';
|
|
2
2
|
import * as parallel_web_resources_beta_beta_mjs from 'parallel-web/resources/beta/beta.mjs';
|
|
3
|
+
import { ExcerptSettings, FetchPolicy, BetaExtractParams } from 'parallel-web/resources/beta/beta.mjs';
|
|
4
|
+
import { SourcePolicy } from 'parallel-web/resources/shared.mjs';
|
|
3
5
|
|
|
4
6
|
/**
|
|
5
|
-
*
|
|
7
|
+
* Options for creating a custom search tool with code-supplied defaults.
|
|
8
|
+
*/
|
|
9
|
+
interface CreateSearchToolOptions {
|
|
10
|
+
/**
|
|
11
|
+
* Default mode for search. 'agentic' returns concise, token-efficient results
|
|
12
|
+
* for multi-step workflows. 'one-shot' returns comprehensive results with
|
|
13
|
+
* longer excerpts. Defaults to 'agentic'.
|
|
14
|
+
*/
|
|
15
|
+
mode?: 'agentic' | 'one-shot';
|
|
16
|
+
/**
|
|
17
|
+
* Maximum number of search results to return. Defaults to 10.
|
|
18
|
+
*/
|
|
19
|
+
max_results?: number;
|
|
20
|
+
/**
|
|
21
|
+
* Excerpt settings for controlling excerpt length.
|
|
22
|
+
*/
|
|
23
|
+
excerpts?: ExcerptSettings;
|
|
24
|
+
/**
|
|
25
|
+
* Source policy for controlling which domains to include/exclude and freshness.
|
|
26
|
+
*/
|
|
27
|
+
source_policy?: SourcePolicy | null;
|
|
28
|
+
/**
|
|
29
|
+
* Fetch policy for controlling cached vs fresh content.
|
|
30
|
+
*/
|
|
31
|
+
fetch_policy?: FetchPolicy | null;
|
|
32
|
+
/**
|
|
33
|
+
* Custom tool description. If not provided, uses the default description.
|
|
34
|
+
*/
|
|
35
|
+
description?: string;
|
|
36
|
+
}
|
|
37
|
+
/**
|
|
38
|
+
* Search tool that mirrors the MCP web_search_preview tool.
|
|
39
|
+
* Takes objective and optional search_queries/mode, returns raw search response.
|
|
6
40
|
*/
|
|
7
41
|
declare const searchTool: ai.Tool<{
|
|
8
42
|
objective: string;
|
|
9
|
-
|
|
43
|
+
mode: "agentic" | "one-shot";
|
|
10
44
|
search_queries?: string[] | undefined;
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
45
|
+
}, parallel_web_resources_beta_beta_mjs.SearchResult>;
|
|
46
|
+
/**
|
|
47
|
+
* Factory function to create a search tool with custom defaults.
|
|
48
|
+
*
|
|
49
|
+
* Use this when you want to set defaults for mode, max_results, excerpts,
|
|
50
|
+
* source_policy, or fetch_policy in your code, so the LLM only needs to
|
|
51
|
+
* provide objective and search_queries.
|
|
52
|
+
*
|
|
53
|
+
* @example
|
|
54
|
+
* ```ts
|
|
55
|
+
* const mySearchTool = createSearchTool({
|
|
56
|
+
* mode: 'one-shot',
|
|
57
|
+
* max_results: 5,
|
|
58
|
+
* excerpts: { max_chars_per_result: 5000 },
|
|
59
|
+
* });
|
|
60
|
+
* ```
|
|
61
|
+
*/
|
|
62
|
+
declare function createSearchTool(options?: CreateSearchToolOptions): ai.Tool<{
|
|
63
|
+
objective: string;
|
|
64
|
+
search_queries?: string[] | undefined;
|
|
65
|
+
}, parallel_web_resources_beta_beta_mjs.SearchResult>;
|
|
21
66
|
|
|
22
67
|
/**
|
|
23
|
-
*
|
|
68
|
+
* Options for creating a custom extract tool with code-supplied defaults.
|
|
69
|
+
*/
|
|
70
|
+
interface CreateExtractToolOptions {
|
|
71
|
+
/**
|
|
72
|
+
* Include excerpts from each URL relevant to the search objective and queries.
|
|
73
|
+
* Can be a boolean or ExcerptSettings object. Defaults to true.
|
|
74
|
+
*/
|
|
75
|
+
excerpts?: boolean | ExcerptSettings;
|
|
76
|
+
/**
|
|
77
|
+
* Include full content from each URL. Can be a boolean or FullContentSettings object.
|
|
78
|
+
* Defaults to false.
|
|
79
|
+
*/
|
|
80
|
+
full_content?: BetaExtractParams['full_content'];
|
|
81
|
+
/**
|
|
82
|
+
* Fetch policy for controlling cached vs fresh content.
|
|
83
|
+
*/
|
|
84
|
+
fetch_policy?: FetchPolicy | null;
|
|
85
|
+
/**
|
|
86
|
+
* Custom tool description. If not provided, uses the default description.
|
|
87
|
+
*/
|
|
88
|
+
description?: string;
|
|
89
|
+
}
|
|
90
|
+
/**
|
|
91
|
+
* Extract tool that mirrors the MCP web_fetch tool.
|
|
92
|
+
* Takes urls and optional objective, returns raw extract response.
|
|
24
93
|
*/
|
|
25
94
|
declare const extractTool: ai.Tool<{
|
|
26
|
-
objective: string;
|
|
27
95
|
urls: string[];
|
|
28
|
-
|
|
29
|
-
},
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
96
|
+
objective?: string | undefined;
|
|
97
|
+
}, parallel_web_resources_beta_beta_mjs.ExtractResponse>;
|
|
98
|
+
/**
|
|
99
|
+
* Factory function to create an extract tool with custom defaults.
|
|
100
|
+
*
|
|
101
|
+
* Use this when you want to set defaults for excerpts, full_content, or
|
|
102
|
+
* fetch_policy in your code, so the LLM only needs to provide urls and objective.
|
|
103
|
+
*
|
|
104
|
+
* @example
|
|
105
|
+
* ```ts
|
|
106
|
+
* const myExtractTool = createExtractTool({
|
|
107
|
+
* excerpts: { max_chars_per_result: 5000 },
|
|
108
|
+
* full_content: true,
|
|
109
|
+
* });
|
|
110
|
+
* ```
|
|
111
|
+
*/
|
|
112
|
+
declare function createExtractTool(options?: CreateExtractToolOptions): ai.Tool<{
|
|
113
|
+
urls: string[];
|
|
114
|
+
objective?: string | undefined;
|
|
115
|
+
}, parallel_web_resources_beta_beta_mjs.ExtractResponse>;
|
|
37
116
|
|
|
38
|
-
export { extractTool, searchTool };
|
|
117
|
+
export { type CreateExtractToolOptions, type CreateSearchToolOptions, createExtractTool, createSearchTool, extractTool, searchTool };
|
package/dist/index.js
CHANGED
|
@@ -8,7 +8,10 @@ var parallelClient = new Proxy({}, {
|
|
|
8
8
|
get(_target, prop) {
|
|
9
9
|
if (!_parallelClient) {
|
|
10
10
|
_parallelClient = new Parallel({
|
|
11
|
-
apiKey: process.env["PARALLEL_API_KEY"]
|
|
11
|
+
apiKey: process.env["PARALLEL_API_KEY"],
|
|
12
|
+
defaultHeaders: {
|
|
13
|
+
"X-Tool-Calling-Package": `npm:@parallel-web/ai-sdk-tools/v${"0.2.0"}`
|
|
14
|
+
}
|
|
12
15
|
});
|
|
13
16
|
}
|
|
14
17
|
return _parallelClient[prop];
|
|
@@ -16,90 +19,70 @@ var parallelClient = new Proxy({}, {
|
|
|
16
19
|
});
|
|
17
20
|
|
|
18
21
|
// src/tools/search.ts
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
case "general":
|
|
24
|
-
return { max_results: 10, max_chars_per_result: 9e3 };
|
|
25
|
-
case "single_page":
|
|
26
|
-
return { max_results: 2, max_chars_per_result: 3e4 };
|
|
27
|
-
case "list":
|
|
28
|
-
default:
|
|
29
|
-
return { max_results: 20, max_chars_per_result: 1500 };
|
|
30
|
-
}
|
|
31
|
-
}
|
|
32
|
-
var search = async (searchArgs, { abortSignal }) => {
|
|
33
|
-
return await parallelClient.beta.search(
|
|
34
|
-
{
|
|
35
|
-
...searchArgs
|
|
36
|
-
},
|
|
37
|
-
{
|
|
38
|
-
signal: abortSignal,
|
|
39
|
-
headers: { "parallel-beta": "search-extract-2025-10-10" }
|
|
40
|
-
}
|
|
41
|
-
);
|
|
42
|
-
};
|
|
22
|
+
var objectiveDescription = `Natural-language description of what the web search is trying to find.
|
|
23
|
+
Try to make the search objective atomic, looking for a specific piece of information. May include guidance about preferred sources or freshness.`;
|
|
24
|
+
var searchQueriesDescription = `(optional) List of keyword search queries of 1-6 words, which may include search operators. The search queries should be related to the objective. Limited to 5 entries of 200 characters each.`;
|
|
25
|
+
var modeDescription = `Presets default values for different use cases. "one-shot" returns more comprehensive results and longer excerpts to answer questions from a single response, while "agentic" returns more concise, token-efficient results for use in an agentic loop. Defaults to "agentic".`;
|
|
43
26
|
var searchTool = tool({
|
|
44
|
-
description: `
|
|
45
|
-
web_search_parallel tool returns ranked, extended web excerpts optimized for LLMs.
|
|
46
|
-
Intelligently scale the number of web_search_parallel tool calls to get more information
|
|
47
|
-
when needed, from a single call for simple factual questions to five or more calls for
|
|
48
|
-
complex research questions.
|
|
49
|
-
|
|
50
|
-
* Keep queries concise - 1-6 words for best results. Start broad with very short
|
|
51
|
-
queries and medium context, then add words to narrow results or use high context
|
|
52
|
-
if needed.
|
|
53
|
-
* Include broader context about what the search is trying to accomplish in the
|
|
54
|
-
\`objective\` field. This helps the search engine understand the user's intent and
|
|
55
|
-
provide relevant results and excerpts.
|
|
56
|
-
* Never repeat similar search queries - make every query unique. If initial results are
|
|
57
|
-
insufficient, reformulate queries to obtain new and better results.
|
|
27
|
+
description: `Purpose: Perform web searches and return results in an LLM-friendly format.
|
|
58
28
|
|
|
59
|
-
|
|
60
|
-
- For simple queries, a one-shot call to depth is usually sufficient.
|
|
61
|
-
- For complex multi-hop queries, first try to use breadth to narrow down sources. Then
|
|
62
|
-
use other search types with include_domains to get more detailed results.`,
|
|
29
|
+
Use the web search tool to search the web and access information from the web. The tool returns ranked, extended web excerpts optimized for LLMs.`,
|
|
63
30
|
inputSchema: z.object({
|
|
64
|
-
objective: z.string().describe(
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
freshness guidance here. Limit to 200 characters. This should reflect the end goal so
|
|
68
|
-
that the tool can better understand the intent and return the best results. Do not
|
|
69
|
-
dump long texts.`
|
|
70
|
-
),
|
|
71
|
-
search_type: z.enum(["list", "general", "single_page", "targeted"]).describe(
|
|
72
|
-
`Can be "list", "general", "single_page" or "targeted".
|
|
73
|
-
"list" should be used for searching for data broadly, like aggregating data or
|
|
74
|
-
considering multiple sources or doing broad initial research. "targeted" should be
|
|
75
|
-
used for searching for data from a specific source set. "general" is a catch all case
|
|
76
|
-
if there is no specific use case from list or targeted. "single_page" extracts data
|
|
77
|
-
from a single page - extremely targeted. If there is a specific webpage you want the
|
|
78
|
-
data from, use "single_page" and mention the URL in the objective.
|
|
79
|
-
Use search_type appropriately.`
|
|
80
|
-
).optional().default("list"),
|
|
81
|
-
search_queries: z.array(z.string()).optional().describe(
|
|
82
|
-
`(optional) List of keyword search queries of 1-6
|
|
83
|
-
words, which may include search operators. The search queries should be related to the
|
|
84
|
-
objective. Limited to 5 entries of 200 characters each. Usually 1-3 queries are
|
|
85
|
-
ideal.`
|
|
86
|
-
),
|
|
87
|
-
include_domains: z.array(z.string()).optional().describe(`(optional) List of valid URL domains to explicitly
|
|
88
|
-
focus on for the search. This will restrict all search results to only include results
|
|
89
|
-
from the provided list. This is useful when you want to only use a specific set of
|
|
90
|
-
sources. example: ["google.com", "wikipedia.org"]. Maximum 10 entries.`)
|
|
31
|
+
objective: z.string().describe(objectiveDescription),
|
|
32
|
+
search_queries: z.array(z.string()).optional().describe(searchQueriesDescription),
|
|
33
|
+
mode: z.enum(["agentic", "one-shot"]).optional().default("agentic").describe(modeDescription)
|
|
91
34
|
}),
|
|
92
|
-
execute: async function({
|
|
93
|
-
|
|
94
|
-
{
|
|
95
|
-
|
|
35
|
+
execute: async function({ objective, search_queries, mode }, { abortSignal }) {
|
|
36
|
+
return await parallelClient.beta.search(
|
|
37
|
+
{
|
|
38
|
+
objective,
|
|
39
|
+
search_queries,
|
|
40
|
+
mode
|
|
41
|
+
},
|
|
42
|
+
{
|
|
43
|
+
signal: abortSignal
|
|
44
|
+
}
|
|
96
45
|
);
|
|
97
|
-
return {
|
|
98
|
-
searchParams: args,
|
|
99
|
-
answer: results
|
|
100
|
-
};
|
|
101
46
|
}
|
|
102
47
|
});
|
|
48
|
+
var defaultSearchDescription = `Purpose: Perform web searches and return results in an LLM-friendly format.
|
|
49
|
+
|
|
50
|
+
Use the web search tool to search the web and access information from the web. The tool returns ranked, extended web excerpts optimized for LLMs.`;
|
|
51
|
+
function createSearchTool(options = {}) {
|
|
52
|
+
const {
|
|
53
|
+
mode: defaultMode = "agentic",
|
|
54
|
+
max_results,
|
|
55
|
+
excerpts,
|
|
56
|
+
source_policy,
|
|
57
|
+
fetch_policy,
|
|
58
|
+
description = defaultSearchDescription
|
|
59
|
+
} = options;
|
|
60
|
+
return tool({
|
|
61
|
+
description,
|
|
62
|
+
inputSchema: z.object({
|
|
63
|
+
objective: z.string().describe(objectiveDescription),
|
|
64
|
+
search_queries: z.array(z.string()).optional().describe(searchQueriesDescription)
|
|
65
|
+
}),
|
|
66
|
+
execute: async function({ objective, search_queries }, { abortSignal }) {
|
|
67
|
+
return await parallelClient.beta.search(
|
|
68
|
+
{
|
|
69
|
+
objective,
|
|
70
|
+
search_queries,
|
|
71
|
+
mode: defaultMode,
|
|
72
|
+
max_results,
|
|
73
|
+
excerpts,
|
|
74
|
+
source_policy,
|
|
75
|
+
fetch_policy
|
|
76
|
+
},
|
|
77
|
+
{
|
|
78
|
+
signal: abortSignal
|
|
79
|
+
}
|
|
80
|
+
);
|
|
81
|
+
}
|
|
82
|
+
});
|
|
83
|
+
}
|
|
84
|
+
var urlsDescription = `List of URLs to extract content from. Must be valid HTTP/HTTPS URLs. Maximum 10 URLs per request.`;
|
|
85
|
+
var objectiveDescription2 = `Natural-language description of what information you're looking for from the URLs.`;
|
|
103
86
|
var extractTool = tool({
|
|
104
87
|
description: `Purpose: Fetch and extract relevant content from specific web URLs.
|
|
105
88
|
|
|
@@ -107,36 +90,56 @@ Ideal Use Cases:
|
|
|
107
90
|
- Extracting content from specific URLs you've already identified
|
|
108
91
|
- Exploring URLs returned by a web search in greater depth`,
|
|
109
92
|
inputSchema: z.object({
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
Limit to 200 characters.`
|
|
113
|
-
),
|
|
114
|
-
urls: z.array(z.string()).describe(
|
|
115
|
-
`List of URLs to extract content from. Must be valid
|
|
116
|
-
HTTP/HTTPS URLs. Maximum 10 URLs per request.`
|
|
117
|
-
),
|
|
118
|
-
search_queries: z.array(z.string()).optional().describe(
|
|
119
|
-
`(optional) List of keyword search queries of 1-6
|
|
120
|
-
words, which may include search operators. The search queries should be related to the
|
|
121
|
-
objective. Limited to 5 entries of 200 characters each. Usually 1-3 queries are
|
|
122
|
-
ideal.`
|
|
123
|
-
)
|
|
93
|
+
urls: z.array(z.string()).describe(urlsDescription),
|
|
94
|
+
objective: z.string().optional().describe(objectiveDescription2)
|
|
124
95
|
}),
|
|
125
|
-
execute: async function({
|
|
126
|
-
|
|
127
|
-
{ ...args },
|
|
96
|
+
execute: async function({ urls, objective }, { abortSignal }) {
|
|
97
|
+
return await parallelClient.beta.extract(
|
|
128
98
|
{
|
|
129
|
-
|
|
130
|
-
|
|
99
|
+
urls,
|
|
100
|
+
objective
|
|
101
|
+
},
|
|
102
|
+
{
|
|
103
|
+
signal: abortSignal
|
|
131
104
|
}
|
|
132
105
|
);
|
|
133
|
-
return {
|
|
134
|
-
searchParams: args,
|
|
135
|
-
answer: results
|
|
136
|
-
};
|
|
137
106
|
}
|
|
138
107
|
});
|
|
108
|
+
var defaultExtractDescription = `Purpose: Fetch and extract relevant content from specific web URLs.
|
|
109
|
+
|
|
110
|
+
Ideal Use Cases:
|
|
111
|
+
- Extracting content from specific URLs you've already identified
|
|
112
|
+
- Exploring URLs returned by a web search in greater depth`;
|
|
113
|
+
function createExtractTool(options = {}) {
|
|
114
|
+
const {
|
|
115
|
+
excerpts,
|
|
116
|
+
full_content,
|
|
117
|
+
fetch_policy,
|
|
118
|
+
description = defaultExtractDescription
|
|
119
|
+
} = options;
|
|
120
|
+
return tool({
|
|
121
|
+
description,
|
|
122
|
+
inputSchema: z.object({
|
|
123
|
+
urls: z.array(z.string()).describe(urlsDescription),
|
|
124
|
+
objective: z.string().optional().describe(objectiveDescription2)
|
|
125
|
+
}),
|
|
126
|
+
execute: async function({ urls, objective }, { abortSignal }) {
|
|
127
|
+
return await parallelClient.beta.extract(
|
|
128
|
+
{
|
|
129
|
+
urls,
|
|
130
|
+
objective,
|
|
131
|
+
excerpts,
|
|
132
|
+
full_content,
|
|
133
|
+
fetch_policy
|
|
134
|
+
},
|
|
135
|
+
{
|
|
136
|
+
signal: abortSignal
|
|
137
|
+
}
|
|
138
|
+
);
|
|
139
|
+
}
|
|
140
|
+
});
|
|
141
|
+
}
|
|
139
142
|
|
|
140
|
-
export { extractTool, searchTool };
|
|
143
|
+
export { createExtractTool, createSearchTool, extractTool, searchTool };
|
|
141
144
|
//# sourceMappingURL=index.js.map
|
|
142
145
|
//# sourceMappingURL=index.js.map
|
package/dist/index.js.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/client.ts","../src/tools/search.ts","../src/tools/extract.ts"],"names":["tool","z"],"mappings":";;;;;AAMA,IAAI,eAAA,GAAmC,IAAA;AAEhC,IAAM,cAAA,GAAiB,IAAI,KAAA,CAAM,EAAC,EAAe;AAAA,EACtD,GAAA,CAAI,SAAS,IAAA,EAAM;AACjB,IAAA,IAAI,CAAC,eAAA,EAAiB;AACpB,MAAA,eAAA,GAAkB,IAAI,QAAA,CAAS;AAAA,QAC7B,MAAA,EAAQ,OAAA,CAAQ,GAAA,CAAI,kBAAkB;AAAA,OACvC,CAAA;AAAA,IACH;AACA,IAAA,OAAQ,gBAAwB,IAAI,CAAA;AAAA,EACtC;AACF,CAAC,CAAA;;;ACRD,SAAS,gBACP,WAAA,EACgE;AAChE,EAAA,QAAQ,WAAA;AAAa,IACnB,KAAK,UAAA;AACH,MAAA,OAAO,EAAE,WAAA,EAAa,CAAA,EAAG,oBAAA,EAAsB,IAAA,EAAM;AAAA,IACvD,KAAK,SAAA;AACH,MAAA,OAAO,EAAE,WAAA,EAAa,EAAA,EAAI,oBAAA,EAAsB,GAAA,EAAK;AAAA,IACvD,KAAK,aAAA;AACH,MAAA,OAAO,EAAE,WAAA,EAAa,CAAA,EAAG,oBAAA,EAAsB,GAAA,EAAM;AAAA,IACvD,KAAK,MAAA;AAAA,IACL;AACE,MAAA,OAAO,EAAE,WAAA,EAAa,EAAA,EAAI,oBAAA,EAAsB,IAAA,EAAK;AAAA;AAE3D;AAEA,IAAM,MAAA,GAAS,OACb,UAAA,EACA,EAAE,aAAY,KACX;AACH,EAAA,OAAO,MAAM,eAAe,IAAA,CAAK,MAAA;AAAA,IAC/B;AAAA,MACE,GAAG;AAAA,KACL;AAAA,IACA;AAAA,MACE,MAAA,EAAQ,WAAA;AAAA,MACR,OAAA,EAAS,EAAE,eAAA,EAAiB,2BAAA;AAA4B;AAC1D,GACF;AACF,CAAA;AAEO,IAAM,aAAa,IAAA,CAAK;AAAA,EAC7B,WAAA,EAAa,CAAA;AAAA;AAAA;AAAA;AAAA;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;AAAA;AAAA;AAAA;AAAA,yEAAA,CAAA;AAAA,EAmBb,WAAA,EAAa,EAAE,MAAA,CAAO;AAAA,IACpB,SAAA,EAAW,CAAA,CAAE,MAAA,EAAO,CAAE,QAAA;AAAA,MACpB,CAAA;AAAA;AAAA;AAAA;AAAA,iBAAA;AAAA,KAKF;AAAA,IACA,WAAA,EAAa,EACV,IAAA,CAAK,CAAC,QAAQ,SAAA,EAAW,aAAA,EAAe,UAAU,CAAC,CAAA,CACnD,QAAA;AAAA,MACC,CAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,+BAAA;AAAA,KAQF,CACC,QAAA,EAAS,CACT,OAAA,CAAQ,MAAM,CAAA;AAAA,IACjB,cAAA,EAAgB,EACb,KAAA,CAAM,CAAA,CAAE,QAAQ,CAAA,CAChB,UAAS,CACT,QAAA;AAAA,MACC,CAAA;AAAA;AAAA;AAAA,OAAA;AAAA,KAIF;AAAA,IACF,eAAA,EAAiB,EAAE,KAAA,CAAM,CAAA,CAAE,QAAQ,CAAA,CAAE,QAAA,EAAS,CAC3C,QAAA,CAAS,CAAA;AAAA;AAAA;AAAA,uEAAA,CAGwD;AAAA,GACrE,CAAA;AAAA,EAED,OAAA,EAAS,eAAgB,EAAE,GAAG,MAAK,EAAG,EAAE,aAAY,EAAG;AACrD,IAAA,MAAM,UAAU,MAAM,MAAA;AAAA,MACpB,EAAE,GAAG,IAAA,EAAM,GAAG,eAAA,CAAgB,IAAA,CAAK,WAAW,CAAA,EAAE;AAAA,MAChD,EAAE,WAAA;AAAY,KAChB;AAEA,IAAA,OAAO;AAAA,MACL,YAAA,EAAc,IAAA;AAAA,MACd,MAAA,EAAQ;AAAA,KACV;AAAA,EACF;AACF,CAAC;ACrGM,IAAM,cAAcA,IAAAA,CAAK;AAAA,EAC9B,WAAA,EAAa,CAAA;;AAAA;AAAA;AAAA,0DAAA,CAAA;AAAA,EAKb,WAAA,EAAaC,EAAE,MAAA,CAAO;AAAA,IACpB,SAAA,EAAWA,CAAAA,CAAE,MAAA,EAAO,CAAE,QAAA;AAAA,MACpB,CAAA;AAAA,yBAAA;AAAA,KAEF;AAAA,IAEA,MAAMA,CAAAA,CAAE,KAAA,CAAMA,CAAAA,CAAE,MAAA,EAAQ,CAAA,CAAE,QAAA;AAAA,MACxB,CAAA;AAAA,6CAAA;AAAA,KAEF;AAAA,IACA,cAAA,EAAgBA,EACb,KAAA,CAAMA,CAAAA,CAAE,QAAQ,CAAA,CAChB,UAAS,CACT,QAAA;AAAA,MACC,CAAA;AAAA;AAAA;AAAA,OAAA;AAAA;AAIF,GACH,CAAA;AAAA,EAED,OAAA,EAAS,eAAgB,EAAE,GAAG,MAAK,EAAG,EAAE,aAAY,EAAG;AACrD,IAAA,MAAM,OAAA,GAAU,MAAM,cAAA,CAAe,IAAA,CAAK,OAAA;AAAA,MACxC,EAAE,GAAG,IAAA,EAAK;AAAA,MACV;AAAA,QACE,MAAA,EAAQ,WAAA;AAAA,QACR,OAAA,EAAS,EAAE,eAAA,EAAiB,2BAAA;AAA4B;AAC1D,KACF;AAEA,IAAA,OAAO;AAAA,MACL,YAAA,EAAc,IAAA;AAAA,MACd,MAAA,EAAQ;AAAA,KACV;AAAA,EACF;AACF,CAAC","file":"index.js","sourcesContent":["/**\n * Shared Parallel Web client instance\n */\n\nimport { Parallel } from 'parallel-web';\n\nlet _parallelClient: Parallel | null = null;\n\nexport const parallelClient = new Proxy({} as Parallel, {\n get(_target, prop) {\n if (!_parallelClient) {\n _parallelClient = new Parallel({\n apiKey: process.env['PARALLEL_API_KEY'],\n });\n }\n return (_parallelClient as any)[prop];\n },\n});\n","/**\n * Search tool for Parallel Web\n */\n\nimport { tool } from 'ai';\nimport { z } from 'zod';\nimport { BetaSearchParams } from 'parallel-web/resources/beta/beta.mjs';\nimport { parallelClient } from '../client.js';\n\nfunction getSearchParams(\n search_type: 'list' | 'targeted' | 'general' | 'single_page'\n): Pick<BetaSearchParams, 'max_results' | 'max_chars_per_result'> {\n switch (search_type) {\n case 'targeted':\n return { max_results: 5, max_chars_per_result: 16000 };\n case 'general':\n return { max_results: 10, max_chars_per_result: 9000 };\n case 'single_page':\n return { max_results: 2, max_chars_per_result: 30000 };\n case 'list':\n default:\n return { max_results: 20, max_chars_per_result: 1500 };\n }\n}\n\nconst search = async (\n searchArgs: BetaSearchParams,\n { abortSignal }: { abortSignal: AbortSignal | undefined }\n) => {\n return await parallelClient.beta.search(\n {\n ...searchArgs,\n },\n {\n signal: abortSignal,\n headers: { 'parallel-beta': 'search-extract-2025-10-10' },\n }\n );\n};\n\nexport const searchTool = tool({\n description: `Use the web_search_parallel tool to access information from the web. The\nweb_search_parallel tool returns ranked, extended web excerpts optimized for LLMs.\nIntelligently scale the number of web_search_parallel tool calls to get more information\nwhen needed, from a single call for simple factual questions to five or more calls for\ncomplex research questions.\n\n* Keep queries concise - 1-6 words for best results. Start broad with very short\n queries and medium context, then add words to narrow results or use high context\n if needed.\n* Include broader context about what the search is trying to accomplish in the\n \\`objective\\` field. This helps the search engine understand the user's intent and\n provide relevant results and excerpts.\n* Never repeat similar search queries - make every query unique. If initial results are\n insufficient, reformulate queries to obtain new and better results.\n\nHow to use:\n- For simple queries, a one-shot call to depth is usually sufficient.\n- For complex multi-hop queries, first try to use breadth to narrow down sources. Then\nuse other search types with include_domains to get more detailed results.`,\n inputSchema: z.object({\n objective: z.string().describe(\n `Natural-language description of what the web research goal\n is. Specify the broad intent of the search query here. Also include any source or\n freshness guidance here. Limit to 200 characters. This should reflect the end goal so\n that the tool can better understand the intent and return the best results. Do not\n dump long texts.`\n ),\n search_type: z\n .enum(['list', 'general', 'single_page', 'targeted'])\n .describe(\n `Can be \"list\", \"general\", \"single_page\" or \"targeted\".\n \"list\" should be used for searching for data broadly, like aggregating data or\n considering multiple sources or doing broad initial research. \"targeted\" should be\n used for searching for data from a specific source set. \"general\" is a catch all case\n if there is no specific use case from list or targeted. \"single_page\" extracts data\n from a single page - extremely targeted. If there is a specific webpage you want the\n data from, use \"single_page\" and mention the URL in the objective.\n Use search_type appropriately.`\n )\n .optional()\n .default('list'),\n search_queries: z\n .array(z.string())\n .optional()\n .describe(\n `(optional) List of keyword search queries of 1-6\n words, which may include search operators. The search queries should be related to the\n objective. Limited to 5 entries of 200 characters each. Usually 1-3 queries are\n ideal.`\n ),\n include_domains: z.array(z.string()).optional()\n .describe(`(optional) List of valid URL domains to explicitly\n focus on for the search. This will restrict all search results to only include results\n from the provided list. This is useful when you want to only use a specific set of\n sources. example: [\"google.com\", \"wikipedia.org\"]. Maximum 10 entries.`),\n }),\n\n execute: async function ({ ...args }, { abortSignal }) {\n const results = await search(\n { ...args, ...getSearchParams(args.search_type) },\n { abortSignal }\n );\n\n return {\n searchParams: args,\n answer: results,\n };\n },\n});\n","/**\n * Extract tool for Parallel Web\n */\n\nimport { tool } from 'ai';\nimport { z } from 'zod';\nimport { parallelClient } from '../client.js';\n\nexport const extractTool = tool({\n description: `Purpose: Fetch and extract relevant content from specific web URLs.\n\nIdeal Use Cases:\n- Extracting content from specific URLs you've already identified\n- Exploring URLs returned by a web search in greater depth`,\n inputSchema: z.object({\n objective: z.string().describe(\n `Natural-language description of what information you're looking for from the URLs. \n Limit to 200 characters.`\n ),\n\n urls: z.array(z.string()).describe(\n `List of URLs to extract content from. Must be valid\nHTTP/HTTPS URLs. Maximum 10 URLs per request.`\n ),\n search_queries: z\n .array(z.string())\n .optional()\n .describe(\n `(optional) List of keyword search queries of 1-6\n words, which may include search operators. The search queries should be related to the\n objective. Limited to 5 entries of 200 characters each. Usually 1-3 queries are\n ideal.`\n ),\n }),\n\n execute: async function ({ ...args }, { abortSignal }) {\n const results = await parallelClient.beta.extract(\n { ...args },\n {\n signal: abortSignal,\n headers: { 'parallel-beta': 'search-extract-2025-10-10' },\n }\n );\n\n return {\n searchParams: args,\n answer: results,\n };\n },\n});\n"]}
|
|
1
|
+
{"version":3,"sources":["../src/client.ts","../src/tools/search.ts","../src/tools/extract.ts"],"names":["objectiveDescription","tool","z"],"mappings":";;;;;AAQA,IAAI,eAAA,GAAmC,IAAA;AAEhC,IAAM,cAAA,GAAiB,IAAI,KAAA,CAAM,EAAC,EAAe;AAAA,EACtD,GAAA,CAAI,SAAS,IAAA,EAAsB;AACjC,IAAA,IAAI,CAAC,eAAA,EAAiB;AACpB,MAAA,eAAA,GAAkB,IAAI,QAAA,CAAS;AAAA,QAC7B,MAAA,EAAQ,OAAA,CAAQ,GAAA,CAAI,kBAAkB,CAAA;AAAA,QACtC,cAAA,EAAgB;AAAA,UACd,wBAAA,EAA0B,mCAAmC,OAA8B,CAAA;AAAA;AAC7F,OACD,CAAA;AAAA,IACH;AACA,IAAA,OAAO,gBAAgB,IAAI,CAAA;AAAA,EAC7B;AACF,CAAC,CAAA;;;AC4BD,IAAM,oBAAA,GAAuB,CAAA;AAAA,gJAAA,CAAA;AAG7B,IAAM,wBAAA,GAA2B,CAAA,+LAAA,CAAA;AAEjC,IAAM,eAAA,GAAkB,CAAA,8QAAA,CAAA;AAMjB,IAAM,aAAa,IAAA,CAAK;AAAA,EAC7B,WAAA,EAAa,CAAA;;AAAA,iJAAA,CAAA;AAAA,EAGb,WAAA,EAAa,EAAE,MAAA,CAAO;AAAA,IACpB,SAAA,EAAW,CAAA,CAAE,MAAA,EAAO,CAAE,SAAS,oBAAoB,CAAA;AAAA,IACnD,cAAA,EAAgB,CAAA,CACb,KAAA,CAAM,CAAA,CAAE,MAAA,EAAQ,CAAA,CAChB,QAAA,EAAS,CACT,QAAA,CAAS,wBAAwB,CAAA;AAAA,IACpC,IAAA,EAAM,CAAA,CACH,IAAA,CAAK,CAAC,WAAW,UAAU,CAAC,CAAA,CAC5B,QAAA,EAAS,CACT,OAAA,CAAQ,SAAS,CAAA,CACjB,SAAS,eAAe;AAAA,GAC5B,CAAA;AAAA,EAED,OAAA,EAAS,eACP,EAAE,SAAA,EAAW,gBAAgB,IAAA,EAAK,EAClC,EAAE,WAAA,EAAY,EACd;AACA,IAAA,OAAO,MAAM,eAAe,IAAA,CAAK,MAAA;AAAA,MAC/B;AAAA,QACE,SAAA;AAAA,QACA,cAAA;AAAA,QACA;AAAA,OACF;AAAA,MACA;AAAA,QACE,MAAA,EAAQ;AAAA;AACV,KACF;AAAA,EACF;AACF,CAAC;AAED,IAAM,wBAAA,GAA2B,CAAA;;AAAA,iJAAA,CAAA;AAoB1B,SAAS,gBAAA,CAAiB,OAAA,GAAmC,EAAC,EAAG;AACtE,EAAA,MAAM;AAAA,IACJ,MAAM,WAAA,GAAc,SAAA;AAAA,IACpB,WAAA;AAAA,IACA,QAAA;AAAA,IACA,aAAA;AAAA,IACA,YAAA;AAAA,IACA,WAAA,GAAc;AAAA,GAChB,GAAI,OAAA;AAEJ,EAAA,OAAO,IAAA,CAAK;AAAA,IACV,WAAA;AAAA,IACA,WAAA,EAAa,EAAE,MAAA,CAAO;AAAA,MACpB,SAAA,EAAW,CAAA,CAAE,MAAA,EAAO,CAAE,SAAS,oBAAoB,CAAA;AAAA,MACnD,cAAA,EAAgB,CAAA,CACb,KAAA,CAAM,CAAA,CAAE,MAAA,EAAQ,CAAA,CAChB,QAAA,EAAS,CACT,QAAA,CAAS,wBAAwB;AAAA,KACrC,CAAA;AAAA,IAED,OAAA,EAAS,eAAgB,EAAE,SAAA,EAAW,gBAAe,EAAG,EAAE,aAAY,EAAG;AACvE,MAAA,OAAO,MAAM,eAAe,IAAA,CAAK,MAAA;AAAA,QAC/B;AAAA,UACE,SAAA;AAAA,UACA,cAAA;AAAA,UACA,IAAA,EAAM,WAAA;AAAA,UACN,WAAA;AAAA,UACA,QAAA;AAAA,UACA,aAAA;AAAA,UACA;AAAA,SACF;AAAA,QACA;AAAA,UACE,MAAA,EAAQ;AAAA;AACV,OACF;AAAA,IACF;AAAA,GACD,CAAA;AACH;AChHA,IAAM,eAAA,GAAkB,CAAA,iGAAA,CAAA;AAExB,IAAMA,qBAAAA,GAAuB,CAAA,kFAAA,CAAA;AAMtB,IAAM,cAAcC,IAAAA,CAAK;AAAA,EAC9B,WAAA,EAAa,CAAA;;AAAA;AAAA;AAAA,0DAAA,CAAA;AAAA,EAKb,WAAA,EAAaC,EAAE,MAAA,CAAO;AAAA,IACpB,IAAA,EAAMA,EAAE,KAAA,CAAMA,CAAAA,CAAE,QAAQ,CAAA,CAAE,SAAS,eAAe,CAAA;AAAA,IAClD,WAAWA,CAAAA,CAAE,MAAA,GAAS,QAAA,EAAS,CAAE,SAASF,qBAAoB;AAAA,GAC/D,CAAA;AAAA,EAED,OAAA,EAAS,eACP,EAAE,IAAA,EAAM,WAAU,EAClB,EAAE,aAAY,EACd;AACA,IAAA,OAAO,MAAM,eAAe,IAAA,CAAK,OAAA;AAAA,MAC/B;AAAA,QACE,IAAA;AAAA,QACA;AAAA,OACF;AAAA,MACA;AAAA,QACE,MAAA,EAAQ;AAAA;AACV,KACF;AAAA,EACF;AACF,CAAC;AAED,IAAM,yBAAA,GAA4B,CAAA;;AAAA;AAAA;AAAA,0DAAA,CAAA;AAoB3B,SAAS,iBAAA,CAAkB,OAAA,GAAoC,EAAC,EAAG;AACxE,EAAA,MAAM;AAAA,IACJ,QAAA;AAAA,IACA,YAAA;AAAA,IACA,YAAA;AAAA,IACA,WAAA,GAAc;AAAA,GAChB,GAAI,OAAA;AAEJ,EAAA,OAAOC,IAAAA,CAAK;AAAA,IACV,WAAA;AAAA,IACA,WAAA,EAAaC,EAAE,MAAA,CAAO;AAAA,MACpB,IAAA,EAAMA,EAAE,KAAA,CAAMA,CAAAA,CAAE,QAAQ,CAAA,CAAE,SAAS,eAAe,CAAA;AAAA,MAClD,WAAWA,CAAAA,CAAE,MAAA,GAAS,QAAA,EAAS,CAAE,SAASF,qBAAoB;AAAA,KAC/D,CAAA;AAAA,IAED,OAAA,EAAS,eACP,EAAE,IAAA,EAAM,WAAU,EAClB,EAAE,aAAY,EACd;AACA,MAAA,OAAO,MAAM,eAAe,IAAA,CAAK,OAAA;AAAA,QAC/B;AAAA,UACE,IAAA;AAAA,UACA,SAAA;AAAA,UACA,QAAA;AAAA,UACA,YAAA;AAAA,UACA;AAAA,SACF;AAAA,QACA;AAAA,UACE,MAAA,EAAQ;AAAA;AACV,OACF;AAAA,IACF;AAAA,GACD,CAAA;AACH","file":"index.js","sourcesContent":["/**\n * Shared Parallel Web client instance\n */\n\ndeclare const __PACKAGE_VERSION__: string;\n\nimport { Parallel } from 'parallel-web';\n\nlet _parallelClient: Parallel | null = null;\n\nexport const parallelClient = new Proxy({} as Parallel, {\n get(_target, prop: keyof Parallel) {\n if (!_parallelClient) {\n _parallelClient = new Parallel({\n apiKey: process.env['PARALLEL_API_KEY'],\n defaultHeaders: {\n 'X-Tool-Calling-Package': `npm:@parallel-web/ai-sdk-tools/v${__PACKAGE_VERSION__ ?? '0.0.0'}`,\n },\n });\n }\n return _parallelClient[prop];\n },\n});\n","/**\n * Search tool for Parallel Web\n */\n\nimport { tool } from 'ai';\nimport { z } from 'zod';\nimport type {\n ExcerptSettings,\n FetchPolicy,\n} from 'parallel-web/resources/beta/beta.mjs';\nimport type { SourcePolicy } from 'parallel-web/resources/shared.mjs';\nimport { parallelClient } from '../client.js';\n\n/**\n * Options for creating a custom search tool with code-supplied defaults.\n */\nexport interface CreateSearchToolOptions {\n /**\n * Default mode for search. 'agentic' returns concise, token-efficient results\n * for multi-step workflows. 'one-shot' returns comprehensive results with\n * longer excerpts. Defaults to 'agentic'.\n */\n mode?: 'agentic' | 'one-shot';\n\n /**\n * Maximum number of search results to return. Defaults to 10.\n */\n max_results?: number;\n\n /**\n * Excerpt settings for controlling excerpt length.\n */\n excerpts?: ExcerptSettings;\n\n /**\n * Source policy for controlling which domains to include/exclude and freshness.\n */\n source_policy?: SourcePolicy | null;\n\n /**\n * Fetch policy for controlling cached vs fresh content.\n */\n fetch_policy?: FetchPolicy | null;\n\n /**\n * Custom tool description. If not provided, uses the default description.\n */\n description?: string;\n}\n\nconst objectiveDescription = `Natural-language description of what the web search is trying to find.\nTry to make the search objective atomic, looking for a specific piece of information. May include guidance about preferred sources or freshness.`;\n\nconst searchQueriesDescription = `(optional) List of keyword search queries of 1-6 words, which may include search operators. The search queries should be related to the objective. Limited to 5 entries of 200 characters each.`;\n\nconst modeDescription = `Presets default values for different use cases. \"one-shot\" returns more comprehensive results and longer excerpts to answer questions from a single response, while \"agentic\" returns more concise, token-efficient results for use in an agentic loop. Defaults to \"agentic\".`;\n\n/**\n * Search tool that mirrors the MCP web_search_preview tool.\n * Takes objective and optional search_queries/mode, returns raw search response.\n */\nexport const searchTool = tool({\n description: `Purpose: Perform web searches and return results in an LLM-friendly format.\n\nUse the web search tool to search the web and access information from the web. The tool returns ranked, extended web excerpts optimized for LLMs.`,\n inputSchema: z.object({\n objective: z.string().describe(objectiveDescription),\n search_queries: z\n .array(z.string())\n .optional()\n .describe(searchQueriesDescription),\n mode: z\n .enum(['agentic', 'one-shot'])\n .optional()\n .default('agentic')\n .describe(modeDescription),\n }),\n\n execute: async function (\n { objective, search_queries, mode },\n { abortSignal }\n ) {\n return await parallelClient.beta.search(\n {\n objective,\n search_queries,\n mode,\n },\n {\n signal: abortSignal,\n }\n );\n },\n});\n\nconst defaultSearchDescription = `Purpose: Perform web searches and return results in an LLM-friendly format.\n\nUse the web search tool to search the web and access information from the web. The tool returns ranked, extended web excerpts optimized for LLMs.`;\n\n/**\n * Factory function to create a search tool with custom defaults.\n *\n * Use this when you want to set defaults for mode, max_results, excerpts,\n * source_policy, or fetch_policy in your code, so the LLM only needs to\n * provide objective and search_queries.\n *\n * @example\n * ```ts\n * const mySearchTool = createSearchTool({\n * mode: 'one-shot',\n * max_results: 5,\n * excerpts: { max_chars_per_result: 5000 },\n * });\n * ```\n */\nexport function createSearchTool(options: CreateSearchToolOptions = {}) {\n const {\n mode: defaultMode = 'agentic',\n max_results,\n excerpts,\n source_policy,\n fetch_policy,\n description = defaultSearchDescription,\n } = options;\n\n return tool({\n description,\n inputSchema: z.object({\n objective: z.string().describe(objectiveDescription),\n search_queries: z\n .array(z.string())\n .optional()\n .describe(searchQueriesDescription),\n }),\n\n execute: async function ({ objective, search_queries }, { abortSignal }) {\n return await parallelClient.beta.search(\n {\n objective,\n search_queries,\n mode: defaultMode,\n max_results,\n excerpts,\n source_policy,\n fetch_policy,\n },\n {\n signal: abortSignal,\n }\n );\n },\n });\n}\n","/**\n * Extract tool for Parallel Web\n */\n\nimport { tool } from 'ai';\nimport { z } from 'zod';\nimport type {\n ExcerptSettings,\n FetchPolicy,\n BetaExtractParams,\n} from 'parallel-web/resources/beta/beta.mjs';\nimport { parallelClient } from '../client.js';\n\n/**\n * Options for creating a custom extract tool with code-supplied defaults.\n */\nexport interface CreateExtractToolOptions {\n /**\n * Include excerpts from each URL relevant to the search objective and queries.\n * Can be a boolean or ExcerptSettings object. Defaults to true.\n */\n excerpts?: boolean | ExcerptSettings;\n\n /**\n * Include full content from each URL. Can be a boolean or FullContentSettings object.\n * Defaults to false.\n */\n full_content?: BetaExtractParams['full_content'];\n\n /**\n * Fetch policy for controlling cached vs fresh content.\n */\n fetch_policy?: FetchPolicy | null;\n\n /**\n * Custom tool description. If not provided, uses the default description.\n */\n description?: string;\n}\n\nconst urlsDescription = `List of URLs to extract content from. Must be valid HTTP/HTTPS URLs. Maximum 10 URLs per request.`;\n\nconst objectiveDescription = `Natural-language description of what information you're looking for from the URLs.`;\n\n/**\n * Extract tool that mirrors the MCP web_fetch tool.\n * Takes urls and optional objective, returns raw extract response.\n */\nexport const extractTool = tool({\n description: `Purpose: Fetch and extract relevant content from specific web URLs.\n\nIdeal Use Cases:\n- Extracting content from specific URLs you've already identified\n- Exploring URLs returned by a web search in greater depth`,\n inputSchema: z.object({\n urls: z.array(z.string()).describe(urlsDescription),\n objective: z.string().optional().describe(objectiveDescription),\n }),\n\n execute: async function (\n { urls, objective }: { urls: string[]; objective?: string },\n { abortSignal }: { abortSignal?: AbortSignal }\n ) {\n return await parallelClient.beta.extract(\n {\n urls,\n objective,\n },\n {\n signal: abortSignal,\n }\n );\n },\n});\n\nconst defaultExtractDescription = `Purpose: Fetch and extract relevant content from specific web URLs.\n\nIdeal Use Cases:\n- Extracting content from specific URLs you've already identified\n- Exploring URLs returned by a web search in greater depth`;\n\n/**\n * Factory function to create an extract tool with custom defaults.\n *\n * Use this when you want to set defaults for excerpts, full_content, or\n * fetch_policy in your code, so the LLM only needs to provide urls and objective.\n *\n * @example\n * ```ts\n * const myExtractTool = createExtractTool({\n * excerpts: { max_chars_per_result: 5000 },\n * full_content: true,\n * });\n * ```\n */\nexport function createExtractTool(options: CreateExtractToolOptions = {}) {\n const {\n excerpts,\n full_content,\n fetch_policy,\n description = defaultExtractDescription,\n } = options;\n\n return tool({\n description,\n inputSchema: z.object({\n urls: z.array(z.string()).describe(urlsDescription),\n objective: z.string().optional().describe(objectiveDescription),\n }),\n\n execute: async function (\n { urls, objective }: { urls: string[]; objective?: string },\n { abortSignal }: { abortSignal?: AbortSignal }\n ) {\n return await parallelClient.beta.extract(\n {\n urls,\n objective,\n excerpts,\n full_content,\n fetch_policy,\n },\n {\n signal: abortSignal,\n }\n );\n },\n });\n}\n"]}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@parallel-web/ai-sdk-tools",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.2.0",
|
|
4
4
|
"description": "AI SDK tools for Parallel Web",
|
|
5
5
|
"author": "Parallel Web",
|
|
6
6
|
"license": "MIT",
|
|
@@ -41,8 +41,8 @@
|
|
|
41
41
|
"access": "public"
|
|
42
42
|
},
|
|
43
43
|
"dependencies": {
|
|
44
|
-
"parallel-web": "^0.
|
|
45
|
-
"zod": "^3.
|
|
44
|
+
"parallel-web": "^0.3.1",
|
|
45
|
+
"zod": "^4.3.6"
|
|
46
46
|
},
|
|
47
47
|
"peerDependencies": {
|
|
48
48
|
"ai": "^5.0.0"
|