nothumanallowed 1.0.1 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -12,10 +12,11 @@ npm install -g nothumanallowed
12
12
  nha config set provider anthropic
13
13
  nha config set key sk-ant-api03-YOUR_KEY
14
14
 
15
- # Ask a single agent
16
- nha run "Audit this Express app for OWASP Top 10" --agents saber
15
+ # Ask a single agent directly (no server, instant response)
16
+ nha ask saber "Audit this Express app for OWASP Top 10"
17
+ nha ask oracle "Analyze this dataset" --file data.csv
17
18
 
18
- # Or let multiple agents collaborate
19
+ # Or let multiple agents collaborate via deliberation
19
20
  nha run "Design a Kubernetes deployment for a 10K RPS API"
20
21
  ```
21
22
 
@@ -105,7 +106,14 @@ nha install --all # Install everything
105
106
  ## Commands
106
107
 
107
108
  ```bash
108
- # Run agents
109
+ # Ask a single agent (direct call, no server)
110
+ nha ask saber "prompt" # Security audit
111
+ nha ask oracle "prompt" # Data analysis
112
+ nha ask forge "prompt" # DevOps & infrastructure
113
+ nha ask saber "review this" --file app.js # Attach a file
114
+ nha ask saber "prompt" --provider openai # Override provider
115
+
116
+ # Multi-agent collaboration (server-routed deliberation)
109
117
  nha run "prompt" # Auto-route to best agents
110
118
  nha run "prompt" --agents saber,zero # Specific agents
111
119
  nha run --file prompt.txt # From file
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "nothumanallowed",
3
- "version": "1.0.1",
3
+ "version": "1.1.0",
4
4
  "description": "NotHumanAllowed — 38 specialized AI agents for security, code, DevOps, data, and more. Use them individually or let them collaborate via multi-round deliberation.",
5
5
  "type": "module",
6
6
  "bin": {
package/src/cli.mjs CHANGED
@@ -8,6 +8,7 @@ import { spawnCore } from './spawn.mjs';
8
8
  import { loadConfig, setConfigValue } from './config.mjs';
9
9
  import { checkForUpdates, runUpdate } from './updater.mjs';
10
10
  import { download } from './downloader.mjs';
11
+ import { cmdAsk } from './commands/ask.mjs';
11
12
  import { banner, info, ok, warn, fail, C, G, Y, D, W, BOLD, NC, M, B, R } from './ui.mjs';
12
13
 
13
14
  export async function main(argv) {
@@ -33,6 +34,9 @@ export async function main(argv) {
33
34
 
34
35
  // ── Command dispatch ─────────────────────────────────────────────────────
35
36
  switch (cmd) {
37
+ case 'ask':
38
+ return cmdAsk(args);
39
+
36
40
  case 'run':
37
41
  return cmdRun(args);
38
42
 
@@ -321,12 +325,13 @@ function cmdHelp() {
321
325
  console.log(` ${BOLD}Usage:${NC} nha <command> [options]\n`);
322
326
 
323
327
  console.log(` ${C}Agents${NC}`);
328
+ console.log(` ask <agent> "prompt" Ask a single agent directly (no server)`);
329
+ console.log(` ask saber "prompt" ${D}--file code.js${NC} Attach a file`);
330
+ console.log(` ask oracle "prompt" ${D}--provider openai${NC}`);
324
331
  console.log(` agents List all 38 specialized agents`);
325
332
  console.log(` agents info <name> Show agent capabilities & domain`);
326
- console.log(` agents tree Show agent hierarchy by category`);
327
- console.log(` run "prompt" Route to best agents automatically`);
328
- console.log(` run "prompt" ${D}--agents saber,zero${NC} Use specific agents`);
329
- console.log(` run --file f.txt Run from file\n`);
333
+ console.log(` run "prompt" Multi-agent collaboration (server-routed)`);
334
+ console.log(` run "prompt" ${D}--agents saber,zero${NC} Collaborate with specific agents\n`);
330
335
 
331
336
  console.log(` ${C}Extensions${NC} ${D}(downloadable agent modules)${NC}`);
332
337
  console.log(` install <name> Install an extension agent`);
@@ -349,7 +354,7 @@ function cmdHelp() {
349
354
  console.log(` ${C}Quick Start${NC}`);
350
355
  console.log(` ${D}1.${NC} nha config set provider anthropic`);
351
356
  console.log(` ${D}2.${NC} nha config set key sk-ant-api03-YOUR_KEY`);
352
- console.log(` ${D}3.${NC} nha run "Audit this API for OWASP Top 10" --agents saber\n`);
357
+ console.log(` ${D}3.${NC} nha ask saber "Audit this Express app for OWASP Top 10"\n`);
353
358
 
354
359
  console.log(` ${D}38 agents: security, code, data, devops, creative, integration, and more.${NC}`);
355
360
  console.log(` ${D}Use them solo or let them collaborate via multi-round deliberation.${NC}`);
@@ -0,0 +1,410 @@
1
+ /**
2
+ * nha ask <agent> "prompt" — Direct single-agent call. No server. No session.
3
+ * Loads the agent's system prompt from ~/.nha/agents/<name>.mjs,
4
+ * calls the user's configured LLM provider, streams the response.
5
+ *
6
+ * Zero network calls except to the LLM provider.
7
+ */
8
+
9
+ import fs from 'fs';
10
+ import path from 'path';
11
+ import { loadConfig } from '../config.mjs';
12
+ import { AGENTS_DIR, AGENTS } from '../constants.mjs';
13
+ import { fail, info, ok, C, G, Y, D, W, BOLD, NC, M } from '../ui.mjs';
14
+
15
+ // ── LLM Providers ──────────────────────────────────────────────────────────
16
+
17
+ async function callAnthropic(apiKey, model, systemPrompt, userMessage, stream) {
18
+ const body = {
19
+ model: model || 'claude-sonnet-4-20250514',
20
+ max_tokens: 8192,
21
+ system: systemPrompt,
22
+ messages: [{ role: 'user', content: userMessage }],
23
+ stream,
24
+ };
25
+ const res = await fetch('https://api.anthropic.com/v1/messages', {
26
+ method: 'POST',
27
+ headers: {
28
+ 'Content-Type': 'application/json',
29
+ 'x-api-key': apiKey,
30
+ 'anthropic-version': '2023-06-01',
31
+ },
32
+ body: JSON.stringify(body),
33
+ });
34
+ if (!res.ok) {
35
+ const err = await res.text();
36
+ throw new Error(`Anthropic ${res.status}: ${err}`);
37
+ }
38
+ if (stream) return streamSSE(res, 'anthropic');
39
+ const data = await res.json();
40
+ return data.content?.[0]?.text || '';
41
+ }
42
+
43
+ async function callOpenAI(apiKey, model, systemPrompt, userMessage, stream) {
44
+ const body = {
45
+ model: model || 'gpt-4o',
46
+ max_tokens: 8192,
47
+ messages: [
48
+ { role: 'system', content: systemPrompt },
49
+ { role: 'user', content: userMessage },
50
+ ],
51
+ stream,
52
+ };
53
+ const res = await fetch('https://api.openai.com/v1/chat/completions', {
54
+ method: 'POST',
55
+ headers: {
56
+ 'Content-Type': 'application/json',
57
+ 'Authorization': `Bearer ${apiKey}`,
58
+ },
59
+ body: JSON.stringify(body),
60
+ });
61
+ if (!res.ok) {
62
+ const err = await res.text();
63
+ throw new Error(`OpenAI ${res.status}: ${err}`);
64
+ }
65
+ if (stream) return streamSSE(res, 'openai');
66
+ const data = await res.json();
67
+ return data.choices?.[0]?.message?.content || '';
68
+ }
69
+
70
+ async function callGemini(apiKey, model, systemPrompt, userMessage, stream) {
71
+ const m = model || 'gemini-2.5-pro-preview-05-06';
72
+ const url = `https://generativelanguage.googleapis.com/v1beta/models/${m}:generateContent?key=${apiKey}`;
73
+ const body = {
74
+ system_instruction: { parts: [{ text: systemPrompt }] },
75
+ contents: [{ parts: [{ text: userMessage }] }],
76
+ generationConfig: { maxOutputTokens: 8192 },
77
+ };
78
+ const res = await fetch(url, {
79
+ method: 'POST',
80
+ headers: { 'Content-Type': 'application/json' },
81
+ body: JSON.stringify(body),
82
+ });
83
+ if (!res.ok) {
84
+ const err = await res.text();
85
+ throw new Error(`Gemini ${res.status}: ${err}`);
86
+ }
87
+ const data = await res.json();
88
+ return data.candidates?.[0]?.content?.parts?.[0]?.text || '';
89
+ }
90
+
91
+ async function callDeepSeek(apiKey, model, systemPrompt, userMessage, stream) {
92
+ const body = {
93
+ model: model || 'deepseek-chat',
94
+ max_tokens: 8192,
95
+ messages: [
96
+ { role: 'system', content: systemPrompt },
97
+ { role: 'user', content: userMessage },
98
+ ],
99
+ stream,
100
+ };
101
+ const res = await fetch('https://api.deepseek.com/v1/chat/completions', {
102
+ method: 'POST',
103
+ headers: {
104
+ 'Content-Type': 'application/json',
105
+ 'Authorization': `Bearer ${apiKey}`,
106
+ },
107
+ body: JSON.stringify(body),
108
+ });
109
+ if (!res.ok) {
110
+ const err = await res.text();
111
+ throw new Error(`DeepSeek ${res.status}: ${err}`);
112
+ }
113
+ if (stream) return streamSSE(res, 'openai');
114
+ const data = await res.json();
115
+ return data.choices?.[0]?.message?.content || '';
116
+ }
117
+
118
+ async function callGrok(apiKey, model, systemPrompt, userMessage, stream) {
119
+ const body = {
120
+ model: model || 'grok-3-latest',
121
+ max_tokens: 8192,
122
+ messages: [
123
+ { role: 'system', content: systemPrompt },
124
+ { role: 'user', content: userMessage },
125
+ ],
126
+ stream,
127
+ };
128
+ const res = await fetch('https://api.x.ai/v1/chat/completions', {
129
+ method: 'POST',
130
+ headers: {
131
+ 'Content-Type': 'application/json',
132
+ 'Authorization': `Bearer ${apiKey}`,
133
+ },
134
+ body: JSON.stringify(body),
135
+ });
136
+ if (!res.ok) {
137
+ const err = await res.text();
138
+ throw new Error(`Grok ${res.status}: ${err}`);
139
+ }
140
+ if (stream) return streamSSE(res, 'openai');
141
+ const data = await res.json();
142
+ return data.choices?.[0]?.message?.content || '';
143
+ }
144
+
145
+ async function callMistral(apiKey, model, systemPrompt, userMessage, stream) {
146
+ const body = {
147
+ model: model || 'mistral-large-latest',
148
+ max_tokens: 8192,
149
+ messages: [
150
+ { role: 'system', content: systemPrompt },
151
+ { role: 'user', content: userMessage },
152
+ ],
153
+ stream,
154
+ };
155
+ const res = await fetch('https://api.mistral.ai/v1/chat/completions', {
156
+ method: 'POST',
157
+ headers: {
158
+ 'Content-Type': 'application/json',
159
+ 'Authorization': `Bearer ${apiKey}`,
160
+ },
161
+ body: JSON.stringify(body),
162
+ });
163
+ if (!res.ok) {
164
+ const err = await res.text();
165
+ throw new Error(`Mistral ${res.status}: ${err}`);
166
+ }
167
+ if (stream) return streamSSE(res, 'openai');
168
+ const data = await res.json();
169
+ return data.choices?.[0]?.message?.content || '';
170
+ }
171
+
172
+ async function callCohere(apiKey, model, systemPrompt, userMessage, stream) {
173
+ const body = {
174
+ model: model || 'command-r-plus',
175
+ max_tokens: 8192,
176
+ preamble: systemPrompt,
177
+ message: userMessage,
178
+ };
179
+ const res = await fetch('https://api.cohere.ai/v1/chat', {
180
+ method: 'POST',
181
+ headers: {
182
+ 'Content-Type': 'application/json',
183
+ 'Authorization': `Bearer ${apiKey}`,
184
+ },
185
+ body: JSON.stringify(body),
186
+ });
187
+ if (!res.ok) {
188
+ const err = await res.text();
189
+ throw new Error(`Cohere ${res.status}: ${err}`);
190
+ }
191
+ const data = await res.json();
192
+ return data.text || '';
193
+ }
194
+
195
+ // ── SSE Stream Parser ──────────────────────────────────────────────────────
196
+
197
+ async function streamSSE(res, format) {
198
+ const reader = res.body.getReader();
199
+ const decoder = new TextDecoder();
200
+ let buffer = '';
201
+ let fullText = '';
202
+
203
+ while (true) {
204
+ const { done, value } = await reader.read();
205
+ if (done) break;
206
+
207
+ buffer += decoder.decode(value, { stream: true });
208
+ const lines = buffer.split('\n');
209
+ buffer = lines.pop() || '';
210
+
211
+ for (const line of lines) {
212
+ if (!line.startsWith('data: ')) continue;
213
+ const data = line.slice(6).trim();
214
+ if (data === '[DONE]') continue;
215
+
216
+ try {
217
+ const json = JSON.parse(data);
218
+ let chunk = '';
219
+
220
+ if (format === 'anthropic') {
221
+ if (json.type === 'content_block_delta') {
222
+ chunk = json.delta?.text || '';
223
+ }
224
+ } else {
225
+ // OpenAI-compatible (OpenAI, DeepSeek, Grok, Mistral)
226
+ chunk = json.choices?.[0]?.delta?.content || '';
227
+ }
228
+
229
+ if (chunk) {
230
+ process.stdout.write(chunk);
231
+ fullText += chunk;
232
+ }
233
+ } catch {}
234
+ }
235
+ }
236
+
237
+ process.stdout.write('\n');
238
+ return fullText;
239
+ }
240
+
241
+ // ── Provider Router ────────────────────────────────────────────────────────
242
+
243
+ function getProviderCall(provider) {
244
+ const map = {
245
+ anthropic: callAnthropic,
246
+ openai: callOpenAI,
247
+ gemini: callGemini,
248
+ deepseek: callDeepSeek,
249
+ grok: callGrok,
250
+ mistral: callMistral,
251
+ cohere: callCohere,
252
+ };
253
+ return map[provider] || null;
254
+ }
255
+
256
+ function getApiKey(config, provider) {
257
+ const keyMap = {
258
+ anthropic: config.llm.apiKey,
259
+ openai: config.llm.openaiKey || config.llm.apiKey,
260
+ gemini: config.llm.geminiKey || config.llm.apiKey,
261
+ deepseek: config.llm.deepseekKey || config.llm.apiKey,
262
+ grok: config.llm.grokKey || config.llm.apiKey,
263
+ mistral: config.llm.mistralKey || config.llm.apiKey,
264
+ cohere: config.llm.cohereKey || config.llm.apiKey,
265
+ };
266
+ return keyMap[provider] || config.llm.apiKey;
267
+ }
268
+
269
+ // ── Agent File Parser ──────────────────────────────────────────────────────
270
+
271
+ function parseAgentFile(source, agentName) {
272
+ let card = { displayName: agentName.toUpperCase(), category: 'agent', tagline: '' };
273
+ let systemPrompt = '';
274
+
275
+ // Extract AGENT_CARD object
276
+ const cardMatch = source.match(/export\s+var\s+AGENT_CARD\s*=\s*(\{[\s\S]*?\});/);
277
+ if (cardMatch) {
278
+ try {
279
+ // Safe eval of the object literal (it only contains strings and arrays)
280
+ card = new Function('return ' + cardMatch[1])();
281
+ } catch {}
282
+ }
283
+
284
+ // Extract SYSTEM_PROMPT — concatenated string literals
285
+ const promptMatch = source.match(/export\s+var\s+SYSTEM_PROMPT\s*=\s*([\s\S]*?);(?:\n\nexport|\n\nvar|\n\n\/\/)/);
286
+ if (promptMatch) {
287
+ try {
288
+ // Evaluate the concatenated string expression
289
+ systemPrompt = new Function('return ' + promptMatch[1])();
290
+ } catch {}
291
+ }
292
+
293
+ // Fallback: try simpler pattern
294
+ if (!systemPrompt) {
295
+ const simpleMatch = source.match(/SYSTEM_PROMPT\s*=\s*'([\s\S]*?)';/);
296
+ if (simpleMatch) systemPrompt = simpleMatch[1];
297
+ }
298
+
299
+ return { card, systemPrompt };
300
+ }
301
+
302
+ // ── Main Command ───────────────────────────────────────────────────────────
303
+
304
+ export async function cmdAsk(args) {
305
+ // Parse: nha ask <agent> "prompt" [--provider X] [--model Y] [--no-stream] [--file F]
306
+ const agentName = args[0];
307
+ if (!agentName || agentName.startsWith('-')) {
308
+ fail('Usage: nha ask <agent> "your question"');
309
+ fail(' nha ask saber "Audit this Express app for OWASP Top 10"');
310
+ fail(' nha ask oracle "Analyze this CSV for trends" --file data.csv');
311
+ console.log('');
312
+ info('Available agents: ' + AGENTS.join(', '));
313
+ process.exit(1);
314
+ }
315
+
316
+ // Find agent file
317
+ const agentFile = path.join(AGENTS_DIR, `${agentName}.mjs`);
318
+ if (!fs.existsSync(agentFile)) {
319
+ fail(`Agent "${agentName}" not found in ~/.nha/agents/`);
320
+ info('Available: ' + AGENTS.join(', '));
321
+ process.exit(1);
322
+ }
323
+
324
+ // Build prompt from remaining args
325
+ let promptParts = [];
326
+ let provider = null;
327
+ let model = null;
328
+ let stream = true;
329
+ let attachFile = null;
330
+
331
+ for (let i = 1; i < args.length; i++) {
332
+ if (args[i] === '--provider' && args[i + 1]) { provider = args[++i]; continue; }
333
+ if (args[i] === '--model' && args[i + 1]) { model = args[++i]; continue; }
334
+ if (args[i] === '--no-stream') { stream = false; continue; }
335
+ if (args[i] === '--file' && args[i + 1]) { attachFile = args[++i]; continue; }
336
+ promptParts.push(args[i]);
337
+ }
338
+
339
+ let userMessage = promptParts.join(' ');
340
+ if (!userMessage) {
341
+ fail('No prompt provided.');
342
+ fail('Usage: nha ask saber "your question here"');
343
+ process.exit(1);
344
+ }
345
+
346
+ // Attach file content if provided
347
+ if (attachFile) {
348
+ const filePath = path.resolve(attachFile);
349
+ if (!fs.existsSync(filePath)) {
350
+ fail(`File not found: ${attachFile}`);
351
+ process.exit(1);
352
+ }
353
+ const content = fs.readFileSync(filePath, 'utf-8');
354
+ const maxChars = 100_000;
355
+ const truncated = content.length > maxChars ? content.slice(0, maxChars) + '\n\n[... truncated ...]' : content;
356
+ userMessage += `\n\n--- Attached file: ${path.basename(filePath)} ---\n${truncated}`;
357
+ }
358
+
359
+ // Load config
360
+ const config = loadConfig();
361
+ provider = provider || config.llm.provider || 'anthropic';
362
+ model = model || config.llm.model || null;
363
+ const apiKey = getApiKey(config, provider);
364
+
365
+ if (!apiKey) {
366
+ fail(`No API key for ${provider}. Run: nha config set key YOUR_KEY`);
367
+ process.exit(1);
368
+ }
369
+
370
+ // Load agent — parse AGENT_CARD and SYSTEM_PROMPT from file text
371
+ // (can't use dynamic import because agent files have syntax incompatible with standalone ESM import)
372
+ const agentSource = fs.readFileSync(agentFile, 'utf-8');
373
+ const { card, systemPrompt } = parseAgentFile(agentSource, agentName);
374
+
375
+ if (!systemPrompt) {
376
+ fail(`Agent "${agentName}" has no SYSTEM_PROMPT in its file.`);
377
+ process.exit(1);
378
+ }
379
+
380
+ // Print header
381
+ console.log(`\n ${BOLD}${card?.displayName || agentName.toUpperCase()}${NC} ${D}(${card?.tagline || card?.category || 'agent'})${NC}`);
382
+ console.log(` ${D}Provider: ${provider}${model ? ' / ' + model : ''} | Direct call — no server${NC}\n`);
383
+
384
+ // Call LLM
385
+ const callFn = getProviderCall(provider);
386
+ if (!callFn) {
387
+ fail(`Unknown provider: ${provider}`);
388
+ info('Supported: anthropic, openai, gemini, deepseek, grok, mistral, cohere');
389
+ process.exit(1);
390
+ }
391
+
392
+ const startTime = Date.now();
393
+
394
+ try {
395
+ // Gemini and Cohere don't support streaming well via their native APIs
396
+ const useStream = stream && (provider === 'anthropic' || provider === 'openai' || provider === 'deepseek' || provider === 'grok' || provider === 'mistral');
397
+
398
+ const result = await callFn(apiKey, model, systemPrompt, userMessage, useStream);
399
+
400
+ if (!useStream && result) {
401
+ console.log(result);
402
+ }
403
+
404
+ const elapsed = ((Date.now() - startTime) / 1000).toFixed(1);
405
+ console.log(`\n ${D}${elapsed}s | ${provider}${model ? ' / ' + model : ''} | ${card?.displayName || agentName}${NC}\n`);
406
+ } catch (err) {
407
+ fail(err.message);
408
+ process.exit(1);
409
+ }
410
+ }
package/src/constants.mjs CHANGED
@@ -1,7 +1,7 @@
1
1
  import os from 'os';
2
2
  import path from 'path';
3
3
 
4
- export const VERSION = '1.0.1';
4
+ export const VERSION = '1.1.0';
5
5
  export const BASE_URL = 'https://nothumanallowed.com/cli';
6
6
  export const API_BASE = 'https://nothumanallowed.com/api/v1';
7
7