nothumanallowed 14.1.24 → 14.1.25

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "nothumanallowed",
3
- "version": "14.1.24",
3
+ "version": "14.1.25",
4
4
  "description": "NotHumanAllowed — 38 AI agents, 80 tools, Studio (visual agentic workflows). Email, calendar, browser automation, screen capture, canvas, cron/heartbeat, Alexandria E2E messaging, GitHub, Notion, Slack, voice chat, free AI (Liara), 28 languages. Zero-dependency CLI.",
5
5
  "type": "module",
6
6
  "bin": {
package/src/constants.mjs CHANGED
@@ -5,7 +5,7 @@ import { fileURLToPath } from 'url';
5
5
  const __filename = fileURLToPath(import.meta.url);
6
6
  const __dirname = path.dirname(__filename);
7
7
 
8
- export const VERSION = '14.1.24';
8
+ export const VERSION = '14.1.25';
9
9
  export const BASE_URL = 'https://nothumanallowed.com/cli';
10
10
  export const API_BASE = 'https://nothumanallowed.com/api/v1';
11
11
 
@@ -9,6 +9,76 @@ import { sendJSON, sendError, parseBody } from '../index.mjs';
9
9
  import { loadConfig } from '../../config.mjs';
10
10
  import { NHA_DIR, AGENTS_DIR } from '../../constants.mjs';
11
11
  import { callLLM, callLLMStream, parseAgentFile } from '../../services/llm.mjs';
12
+ import { webSearch, fetchUrl } from '../../services/web-tools.mjs';
13
+
14
+ // ── Studio web tool definitions injected into agent system prompts ────────────
15
+ const STUDIO_WEB_TOOLS = `
16
+ You have access to two web tools. Use them to gather real data before writing your analysis.
17
+
18
+ TOOL USAGE — output a JSON block wrapped in <tool_call> tags:
19
+ <tool_call>{"tool": "web_search", "query": "your search query"}</tool_call>
20
+ <tool_call>{"tool": "fetch_url", "url": "https://example.com/article"}</tool_call>
21
+
22
+ Rules:
23
+ - Use web_search to find relevant URLs, news, prices, data
24
+ - Use fetch_url on specific URLs from search results to read full article content
25
+ - You may call multiple tools (each on its own line)
26
+ - After receiving <tool_response> results, write your complete analysis
27
+ - NEVER fabricate data — only use what the tools return
28
+ `;
29
+
30
+ // Agents that should have web tool access in Studio
31
+ const WEB_TOOL_AGENTS = new Set([
32
+ 'WebSearchAgent', 'TravelAgent', 'mercury', 'MERCURY',
33
+ 'athena', 'ATHENA', 'oracle', 'ORACLE', 'cassandra', 'CASSANDRA',
34
+ 'HERALD', 'DataAnalystAgent',
35
+ ]);
36
+
37
+ /**
38
+ * Execute a single studio tool call. Returns a result string.
39
+ */
40
+ async function executeStudioTool(toolCall) {
41
+ try {
42
+ const { tool, query, url } = toolCall;
43
+ if (tool === 'web_search') {
44
+ if (!query) return 'ERROR: web_search requires a query parameter';
45
+ const result = await webSearch(query);
46
+ if (result.error) return `Search failed: ${result.message}`;
47
+ const snippets = result.results
48
+ .slice(0, 6)
49
+ .map((r, i) => `[${i + 1}] ${r.title}\nURL: ${r.url}\n${r.snippet || ''}`)
50
+ .join('\n\n');
51
+ return `Search results for "${query}" (${result.resultCount} found):\n\n${snippets}`;
52
+ }
53
+ if (tool === 'fetch_url') {
54
+ if (!url) return 'ERROR: fetch_url requires a url parameter';
55
+ const result = await fetchUrl(url);
56
+ if (result.error) return `Fetch failed: ${result.message}`;
57
+ const titlePart = result.title ? `Title: ${result.title}\n\n` : '';
58
+ const text = (result.body || '').slice(0, 5000);
59
+ return `Content from ${url}:\n\n${titlePart}${text}`;
60
+ }
61
+ return `Unknown tool: ${tool}`;
62
+ } catch (e) {
63
+ return `Tool execution error: ${e.message}`;
64
+ }
65
+ }
66
+
67
+ /**
68
+ * Parse <tool_call> blocks from LLM output.
69
+ */
70
+ function parseToolCalls(text) {
71
+ const calls = [];
72
+ const re = /<tool_call>([\s\S]*?)<\/tool_call>/g;
73
+ let m;
74
+ while ((m = re.exec(text)) !== null) {
75
+ try {
76
+ const parsed = JSON.parse(m[1].trim());
77
+ if (parsed.tool) calls.push(parsed);
78
+ } catch {}
79
+ }
80
+ return calls;
81
+ }
12
82
 
13
83
  export function register(router) {
14
84
  router.post('/api/studio/plan', async (req, res) => {
@@ -162,14 +232,63 @@ export function register(router) {
162
232
  const systemPrompt = sysParts.join('');
163
233
  const userMessage = stepDef?.prompt || task;
164
234
 
235
+ const useWebTools = WEB_TOOL_AGENTS.has(agent);
236
+ const finalSystemPrompt = useWebTools
237
+ ? systemPrompt + '\n\n' + STUDIO_WEB_TOOLS
238
+ : systemPrompt;
239
+
165
240
  let output = '';
166
241
  let tokensOut = 0;
167
- await callLLMStream(config, systemPrompt, userMessage, (tok) => {
242
+
243
+ // Round 1: initial LLM call (may contain tool_call blocks)
244
+ await callLLMStream(config, finalSystemPrompt, userMessage, (tok) => {
168
245
  output += tok;
169
246
  tokensOut += Math.ceil(tok.length / 4);
170
247
  sse({ token: tok });
171
248
  }, { max_tokens: 8192 });
172
249
 
250
+ // Tool execution loop (max 2 rounds to prevent runaway)
251
+ if (useWebTools) {
252
+ for (let round = 0; round < 2; round++) {
253
+ const toolCalls = parseToolCalls(output);
254
+ if (toolCalls.length === 0) break;
255
+
256
+ sse({ token: '\n\n' });
257
+
258
+ // Execute all tool calls in parallel
259
+ const toolResults = await Promise.all(
260
+ toolCalls.map(async (tc) => {
261
+ const label = tc.tool === 'web_search' ? `Searching: "${tc.query}"` : `Fetching: ${tc.url}`;
262
+ sse({ token: `\n[${label}...] ` });
263
+ const result = await executeStudioTool(tc);
264
+ return { call: tc, result };
265
+ })
266
+ );
267
+
268
+ // Build tool responses block
269
+ const toolResponseBlock = toolResults
270
+ .map(({ call, result }) => {
271
+ const callJson = JSON.stringify(call);
272
+ return `<tool_call>${callJson}</tool_call>\n<tool_response>${result}</tool_response>`;
273
+ })
274
+ .join('\n\n');
275
+
276
+ // Round 2: synthesis with tool results
277
+ const synthesisPrompt = `## TOOL RESULTS:\n${toolResponseBlock}\n\n## YOUR TASK:\n${userMessage}\n\nNow write your complete analysis using the real data from the tool results above. Do not emit more tool calls — write the final answer directly.`;
278
+
279
+ sse({ token: '\n\n' });
280
+ let synthOutput = '';
281
+ await callLLMStream(config, finalSystemPrompt, synthesisPrompt, (tok) => {
282
+ synthOutput += tok;
283
+ tokensOut += Math.ceil(tok.length / 4);
284
+ sse({ token: tok });
285
+ }, { max_tokens: 8192 });
286
+
287
+ output = synthOutput;
288
+ break; // one tool round is sufficient
289
+ }
290
+ }
291
+
173
292
  clearInterval(keepalive);
174
293
  sse({ done: true, output, tokensOut });
175
294
  res.write('data: [DONE]\n\n');