0agent 1.0.10 → 1.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/daemon.mjs +64 -0
  2. package/package.json +1 -1
package/dist/daemon.mjs CHANGED
@@ -1737,6 +1737,18 @@ var AGENT_TOOLS = [
1737
1737
  }
1738
1738
  }
1739
1739
  },
1740
+ {
1741
+ name: "web_search",
1742
+ description: "Search the web and return titles, URLs, and snippets. No API key needed. Use this first to find relevant pages, then scrape_url for full content.",
1743
+ input_schema: {
1744
+ type: "object",
1745
+ properties: {
1746
+ query: { type: "string", description: "Search query" },
1747
+ num_results: { type: "number", description: "Number of results (default 5, max 10)" }
1748
+ },
1749
+ required: ["query"]
1750
+ }
1751
+ },
1740
1752
  {
1741
1753
  name: "scrape_url",
1742
1754
  description: "Scrape a URL and return clean structured content. Handles JavaScript-rendered pages, auto-adapts to page structure, returns text/links/metadata. Better than shell curl for web pages.",
@@ -2149,6 +2161,11 @@ var AgentExecutor = class {
2149
2161
  return this.readFile(String(input.path ?? ""));
2150
2162
  case "list_dir":
2151
2163
  return this.listDir(input.path ? String(input.path) : void 0);
2164
+ case "web_search":
2165
+ return this.webSearch(
2166
+ String(input.query ?? ""),
2167
+ Math.min(10, Number(input.num_results ?? 5))
2168
+ );
2152
2169
  case "scrape_url":
2153
2170
  return this.scrapeUrl(
2154
2171
  String(input.url ?? ""),
@@ -2195,6 +2212,51 @@ var AgentExecutor = class {
2195
2212
  return content.length > 8e3 ? content.slice(0, 8e3) + `
2196
2213
  \u2026[truncated, ${content.length} total bytes]` : content;
2197
2214
  }
2215
+ async webSearch(query, numResults) {
2216
+ const url = `https://html.duckduckgo.com/html/?q=${encodeURIComponent(query)}&kl=us-en`;
2217
+ let html = "";
2218
+ try {
2219
+ const res = await fetch(url, {
2220
+ headers: {
2221
+ "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0 Safari/537.36",
2222
+ "Accept": "text/html,application/xhtml+xml",
2223
+ "Accept-Language": "en-US,en;q=0.9"
2224
+ },
2225
+ signal: AbortSignal.timeout(12e3)
2226
+ });
2227
+ html = await res.text();
2228
+ } catch (err) {
2229
+ return `Search request failed: ${err instanceof Error ? err.message : String(err)}`;
2230
+ }
2231
+ const results = [];
2232
+ const titleRe = /<a[^>]+class="result__a"[^>]+href="([^"]*)"[^>]*>([\s\S]*?)<\/a>/g;
2233
+ const snippetRe = /<a[^>]+class="result__snippet"[^>]*>([\s\S]*?)<\/a>/g;
2234
+ const titles = [];
2235
+ const snippets = [];
2236
+ let m;
2237
+ while ((m = titleRe.exec(html)) !== null) {
2238
+ let href = m[1];
2239
+ const title = m[2].replace(/<[^>]+>/g, "").replace(/&amp;/g, "&").replace(/&lt;/g, "<").replace(/&gt;/g, ">").trim();
2240
+ const uddg = href.match(/[?&]uddg=([^&]+)/);
2241
+ if (uddg) href = decodeURIComponent(uddg[1]);
2242
+ if (href.startsWith("http") && title && titles.length < numResults) {
2243
+ titles.push({ url: href, title });
2244
+ }
2245
+ }
2246
+ while ((m = snippetRe.exec(html)) !== null && snippets.length < numResults) {
2247
+ snippets.push(m[1].replace(/<[^>]+>/g, "").replace(/\s+/g, " ").trim());
2248
+ }
2249
+ if (titles.length === 0) {
2250
+ const plainText = html.replace(/<[^>]+>/g, " ").replace(/\s+/g, " ").slice(0, 1500);
2251
+ return `No results parsed. Raw content:
2252
+ ${plainText}`;
2253
+ }
2254
+ return titles.map(
2255
+ (t, i) => `${i + 1}. ${t.title}
2256
+ URL: ${t.url}${snippets[i] ? `
2257
+ ${snippets[i]}` : ""}`
2258
+ ).join("\n\n");
2259
+ }
2198
2260
  async scrapeUrl(url, mode, selector, waitMs) {
2199
2261
  if (!url.startsWith("http")) return "Error: URL must start with http:// or https://";
2200
2262
  const selectorLine = selector ? `element = page.find('${selector}')
@@ -2261,6 +2323,7 @@ content = element.text if element else page.get_all_text()` : `content = page.ge
2261
2323
  `- For npm/node projects: check package.json first with read_file or list_dir`,
2262
2324
  `- After write_file, verify with read_file if needed`,
2263
2325
  `- After shell_exec, check output for errors and retry if needed`,
2326
+ `- For research tasks: use web_search first, then scrape_url for full page content`,
2264
2327
  `- Use relative paths from the working directory`,
2265
2328
  `- Be concise in your final response: state what was done and where to find it`
2266
2329
  ];
@@ -2272,6 +2335,7 @@ content = element.text if element else page.get_all_text()` : `content = page.ge
2272
2335
  if (toolName === "write_file") return `"${input.path}"`;
2273
2336
  if (toolName === "read_file") return `"${input.path}"`;
2274
2337
  if (toolName === "list_dir") return `"${input.path ?? "."}"`;
2338
+ if (toolName === "web_search") return `"${String(input.query ?? "").slice(0, 60)}"`;
2275
2339
  if (toolName === "scrape_url") return `"${String(input.url ?? "").slice(0, 60)}" mode=${input.mode ?? "text"}`;
2276
2340
  return JSON.stringify(input).slice(0, 60);
2277
2341
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "0agent",
3
- "version": "1.0.10",
3
+ "version": "1.0.11",
4
4
  "description": "A persistent, learning AI agent that runs on your machine. An agent that learns.",
5
5
  "private": false,
6
6
  "license": "Apache-2.0",