nothumanallowed 1.1.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "nothumanallowed",
3
- "version": "1.1.0",
4
- "description": "NotHumanAllowed — 38 specialized AI agents for security, code, DevOps, data, and more. Use them individually or let them collaborate via multi-round deliberation.",
3
+ "version": "2.0.0",
4
+ "description": "NotHumanAllowed — 38 AI agents for security, code, DevOps, data & daily ops. Ask agents directly, plan your day with 5 specialist agents, manage tasks, connect Gmail + Calendar.",
5
5
  "type": "module",
6
6
  "bin": {
7
7
  "nha": "./bin/nha.mjs",
package/src/cli.mjs CHANGED
@@ -9,6 +9,10 @@ import { loadConfig, setConfigValue } from './config.mjs';
9
9
  import { checkForUpdates, runUpdate } from './updater.mjs';
10
10
  import { download } from './downloader.mjs';
11
11
  import { cmdAsk } from './commands/ask.mjs';
12
+ import { cmdPlan } from './commands/plan.mjs';
13
+ import { cmdTasks } from './commands/tasks.mjs';
14
+ import { cmdOps } from './commands/ops.mjs';
15
+ import { cmdGoogle } from './commands/google-auth.mjs';
12
16
  import { banner, info, ok, warn, fail, C, G, Y, D, W, BOLD, NC, M, B, R } from './ui.mjs';
13
17
 
14
18
  export async function main(argv) {
@@ -40,6 +44,19 @@ export async function main(argv) {
40
44
  case 'run':
41
45
  return cmdRun(args);
42
46
 
47
+ case 'plan':
48
+ return cmdPlan(args);
49
+
50
+ case 'tasks':
51
+ case 'task':
52
+ return cmdTasks(args);
53
+
54
+ case 'ops':
55
+ return cmdOps(args);
56
+
57
+ case 'google':
58
+ return cmdGoogle(args);
59
+
43
60
  case 'pif':
44
61
  return cmdPif(args);
45
62
 
@@ -333,6 +350,22 @@ function cmdHelp() {
333
350
  console.log(` run "prompt" Multi-agent collaboration (server-routed)`);
334
351
  console.log(` run "prompt" ${D}--agents saber,zero${NC} Collaborate with specific agents\n`);
335
352
 
353
+ console.log(` ${C}Daily Operations${NC} ${D}(Gmail + Calendar + Tasks)${NC}`);
354
+ console.log(` plan Generate daily plan (5 agents analyze your day)`);
355
+ console.log(` plan --refresh Regenerate today's plan`);
356
+ console.log(` tasks List today's tasks`);
357
+ console.log(` tasks add "desc" Add a task`);
358
+ console.log(` tasks done 3 Complete task #3`);
359
+ console.log(` tasks week Week overview`);
360
+ console.log(` ops start Start background daemon (auto-alerts)`);
361
+ console.log(` ops stop Stop daemon`);
362
+ console.log(` ops status Daemon status\n`);
363
+
364
+ console.log(` ${C}Google Integration${NC}`);
365
+ console.log(` google auth Connect Gmail + Calendar`);
366
+ console.log(` google status Connection status`);
367
+ console.log(` google revoke Disconnect\n`);
368
+
336
369
  console.log(` ${C}Extensions${NC} ${D}(downloadable agent modules)${NC}`);
337
370
  console.log(` install <name> Install an extension agent`);
338
371
  console.log(` install --all Install all ${EXTENSIONS.length} extensions`);
@@ -341,8 +374,7 @@ function cmdHelp() {
341
374
  console.log(` ${C}Social Network${NC} ${D}(NHA platform)${NC}`);
342
375
  console.log(` pif register Register your agent identity`);
343
376
  console.log(` pif post Post content`);
344
- console.log(` pif feed Activity feed`);
345
- console.log(` pif explore Discover agents & templates\n`);
377
+ console.log(` pif feed Activity feed\n`);
346
378
 
347
379
  console.log(` ${C}Configuration${NC}`);
348
380
  console.log(` config Show current config`);
@@ -10,299 +10,10 @@ import fs from 'fs';
10
10
  import path from 'path';
11
11
  import { loadConfig } from '../config.mjs';
12
12
  import { AGENTS_DIR, AGENTS } from '../constants.mjs';
13
+ import { getProviderCall, getApiKey, parseAgentFile } from '../services/llm.mjs';
13
14
  import { fail, info, ok, C, G, Y, D, W, BOLD, NC, M } from '../ui.mjs';
14
15
 
15
- // ── LLM Providers ──────────────────────────────────────────────────────────
16
-
17
- async function callAnthropic(apiKey, model, systemPrompt, userMessage, stream) {
18
- const body = {
19
- model: model || 'claude-sonnet-4-20250514',
20
- max_tokens: 8192,
21
- system: systemPrompt,
22
- messages: [{ role: 'user', content: userMessage }],
23
- stream,
24
- };
25
- const res = await fetch('https://api.anthropic.com/v1/messages', {
26
- method: 'POST',
27
- headers: {
28
- 'Content-Type': 'application/json',
29
- 'x-api-key': apiKey,
30
- 'anthropic-version': '2023-06-01',
31
- },
32
- body: JSON.stringify(body),
33
- });
34
- if (!res.ok) {
35
- const err = await res.text();
36
- throw new Error(`Anthropic ${res.status}: ${err}`);
37
- }
38
- if (stream) return streamSSE(res, 'anthropic');
39
- const data = await res.json();
40
- return data.content?.[0]?.text || '';
41
- }
42
-
43
- async function callOpenAI(apiKey, model, systemPrompt, userMessage, stream) {
44
- const body = {
45
- model: model || 'gpt-4o',
46
- max_tokens: 8192,
47
- messages: [
48
- { role: 'system', content: systemPrompt },
49
- { role: 'user', content: userMessage },
50
- ],
51
- stream,
52
- };
53
- const res = await fetch('https://api.openai.com/v1/chat/completions', {
54
- method: 'POST',
55
- headers: {
56
- 'Content-Type': 'application/json',
57
- 'Authorization': `Bearer ${apiKey}`,
58
- },
59
- body: JSON.stringify(body),
60
- });
61
- if (!res.ok) {
62
- const err = await res.text();
63
- throw new Error(`OpenAI ${res.status}: ${err}`);
64
- }
65
- if (stream) return streamSSE(res, 'openai');
66
- const data = await res.json();
67
- return data.choices?.[0]?.message?.content || '';
68
- }
69
-
70
- async function callGemini(apiKey, model, systemPrompt, userMessage, stream) {
71
- const m = model || 'gemini-2.5-pro-preview-05-06';
72
- const url = `https://generativelanguage.googleapis.com/v1beta/models/${m}:generateContent?key=${apiKey}`;
73
- const body = {
74
- system_instruction: { parts: [{ text: systemPrompt }] },
75
- contents: [{ parts: [{ text: userMessage }] }],
76
- generationConfig: { maxOutputTokens: 8192 },
77
- };
78
- const res = await fetch(url, {
79
- method: 'POST',
80
- headers: { 'Content-Type': 'application/json' },
81
- body: JSON.stringify(body),
82
- });
83
- if (!res.ok) {
84
- const err = await res.text();
85
- throw new Error(`Gemini ${res.status}: ${err}`);
86
- }
87
- const data = await res.json();
88
- return data.candidates?.[0]?.content?.parts?.[0]?.text || '';
89
- }
90
-
91
- async function callDeepSeek(apiKey, model, systemPrompt, userMessage, stream) {
92
- const body = {
93
- model: model || 'deepseek-chat',
94
- max_tokens: 8192,
95
- messages: [
96
- { role: 'system', content: systemPrompt },
97
- { role: 'user', content: userMessage },
98
- ],
99
- stream,
100
- };
101
- const res = await fetch('https://api.deepseek.com/v1/chat/completions', {
102
- method: 'POST',
103
- headers: {
104
- 'Content-Type': 'application/json',
105
- 'Authorization': `Bearer ${apiKey}`,
106
- },
107
- body: JSON.stringify(body),
108
- });
109
- if (!res.ok) {
110
- const err = await res.text();
111
- throw new Error(`DeepSeek ${res.status}: ${err}`);
112
- }
113
- if (stream) return streamSSE(res, 'openai');
114
- const data = await res.json();
115
- return data.choices?.[0]?.message?.content || '';
116
- }
117
-
118
- async function callGrok(apiKey, model, systemPrompt, userMessage, stream) {
119
- const body = {
120
- model: model || 'grok-3-latest',
121
- max_tokens: 8192,
122
- messages: [
123
- { role: 'system', content: systemPrompt },
124
- { role: 'user', content: userMessage },
125
- ],
126
- stream,
127
- };
128
- const res = await fetch('https://api.x.ai/v1/chat/completions', {
129
- method: 'POST',
130
- headers: {
131
- 'Content-Type': 'application/json',
132
- 'Authorization': `Bearer ${apiKey}`,
133
- },
134
- body: JSON.stringify(body),
135
- });
136
- if (!res.ok) {
137
- const err = await res.text();
138
- throw new Error(`Grok ${res.status}: ${err}`);
139
- }
140
- if (stream) return streamSSE(res, 'openai');
141
- const data = await res.json();
142
- return data.choices?.[0]?.message?.content || '';
143
- }
144
-
145
- async function callMistral(apiKey, model, systemPrompt, userMessage, stream) {
146
- const body = {
147
- model: model || 'mistral-large-latest',
148
- max_tokens: 8192,
149
- messages: [
150
- { role: 'system', content: systemPrompt },
151
- { role: 'user', content: userMessage },
152
- ],
153
- stream,
154
- };
155
- const res = await fetch('https://api.mistral.ai/v1/chat/completions', {
156
- method: 'POST',
157
- headers: {
158
- 'Content-Type': 'application/json',
159
- 'Authorization': `Bearer ${apiKey}`,
160
- },
161
- body: JSON.stringify(body),
162
- });
163
- if (!res.ok) {
164
- const err = await res.text();
165
- throw new Error(`Mistral ${res.status}: ${err}`);
166
- }
167
- if (stream) return streamSSE(res, 'openai');
168
- const data = await res.json();
169
- return data.choices?.[0]?.message?.content || '';
170
- }
171
-
172
- async function callCohere(apiKey, model, systemPrompt, userMessage, stream) {
173
- const body = {
174
- model: model || 'command-r-plus',
175
- max_tokens: 8192,
176
- preamble: systemPrompt,
177
- message: userMessage,
178
- };
179
- const res = await fetch('https://api.cohere.ai/v1/chat', {
180
- method: 'POST',
181
- headers: {
182
- 'Content-Type': 'application/json',
183
- 'Authorization': `Bearer ${apiKey}`,
184
- },
185
- body: JSON.stringify(body),
186
- });
187
- if (!res.ok) {
188
- const err = await res.text();
189
- throw new Error(`Cohere ${res.status}: ${err}`);
190
- }
191
- const data = await res.json();
192
- return data.text || '';
193
- }
194
-
195
- // ── SSE Stream Parser ──────────────────────────────────────────────────────
196
-
197
- async function streamSSE(res, format) {
198
- const reader = res.body.getReader();
199
- const decoder = new TextDecoder();
200
- let buffer = '';
201
- let fullText = '';
202
-
203
- while (true) {
204
- const { done, value } = await reader.read();
205
- if (done) break;
206
-
207
- buffer += decoder.decode(value, { stream: true });
208
- const lines = buffer.split('\n');
209
- buffer = lines.pop() || '';
210
-
211
- for (const line of lines) {
212
- if (!line.startsWith('data: ')) continue;
213
- const data = line.slice(6).trim();
214
- if (data === '[DONE]') continue;
215
-
216
- try {
217
- const json = JSON.parse(data);
218
- let chunk = '';
219
-
220
- if (format === 'anthropic') {
221
- if (json.type === 'content_block_delta') {
222
- chunk = json.delta?.text || '';
223
- }
224
- } else {
225
- // OpenAI-compatible (OpenAI, DeepSeek, Grok, Mistral)
226
- chunk = json.choices?.[0]?.delta?.content || '';
227
- }
228
-
229
- if (chunk) {
230
- process.stdout.write(chunk);
231
- fullText += chunk;
232
- }
233
- } catch {}
234
- }
235
- }
236
-
237
- process.stdout.write('\n');
238
- return fullText;
239
- }
240
-
241
- // ── Provider Router ────────────────────────────────────────────────────────
242
-
243
- function getProviderCall(provider) {
244
- const map = {
245
- anthropic: callAnthropic,
246
- openai: callOpenAI,
247
- gemini: callGemini,
248
- deepseek: callDeepSeek,
249
- grok: callGrok,
250
- mistral: callMistral,
251
- cohere: callCohere,
252
- };
253
- return map[provider] || null;
254
- }
255
-
256
- function getApiKey(config, provider) {
257
- const keyMap = {
258
- anthropic: config.llm.apiKey,
259
- openai: config.llm.openaiKey || config.llm.apiKey,
260
- gemini: config.llm.geminiKey || config.llm.apiKey,
261
- deepseek: config.llm.deepseekKey || config.llm.apiKey,
262
- grok: config.llm.grokKey || config.llm.apiKey,
263
- mistral: config.llm.mistralKey || config.llm.apiKey,
264
- cohere: config.llm.cohereKey || config.llm.apiKey,
265
- };
266
- return keyMap[provider] || config.llm.apiKey;
267
- }
268
-
269
- // ── Agent File Parser ──────────────────────────────────────────────────────
270
-
271
- function parseAgentFile(source, agentName) {
272
- let card = { displayName: agentName.toUpperCase(), category: 'agent', tagline: '' };
273
- let systemPrompt = '';
274
-
275
- // Extract AGENT_CARD object
276
- const cardMatch = source.match(/export\s+var\s+AGENT_CARD\s*=\s*(\{[\s\S]*?\});/);
277
- if (cardMatch) {
278
- try {
279
- // Safe eval of the object literal (it only contains strings and arrays)
280
- card = new Function('return ' + cardMatch[1])();
281
- } catch {}
282
- }
283
-
284
- // Extract SYSTEM_PROMPT — concatenated string literals
285
- const promptMatch = source.match(/export\s+var\s+SYSTEM_PROMPT\s*=\s*([\s\S]*?);(?:\n\nexport|\n\nvar|\n\n\/\/)/);
286
- if (promptMatch) {
287
- try {
288
- // Evaluate the concatenated string expression
289
- systemPrompt = new Function('return ' + promptMatch[1])();
290
- } catch {}
291
- }
292
-
293
- // Fallback: try simpler pattern
294
- if (!systemPrompt) {
295
- const simpleMatch = source.match(/SYSTEM_PROMPT\s*=\s*'([\s\S]*?)';/);
296
- if (simpleMatch) systemPrompt = simpleMatch[1];
297
- }
298
-
299
- return { card, systemPrompt };
300
- }
301
-
302
- // ── Main Command ───────────────────────────────────────────────────────────
303
-
304
16
  export async function cmdAsk(args) {
305
- // Parse: nha ask <agent> "prompt" [--provider X] [--model Y] [--no-stream] [--file F]
306
17
  const agentName = args[0];
307
18
  if (!agentName || agentName.startsWith('-')) {
308
19
  fail('Usage: nha ask <agent> "your question"');
@@ -313,7 +24,6 @@ export async function cmdAsk(args) {
313
24
  process.exit(1);
314
25
  }
315
26
 
316
- // Find agent file
317
27
  const agentFile = path.join(AGENTS_DIR, `${agentName}.mjs`);
318
28
  if (!fs.existsSync(agentFile)) {
319
29
  fail(`Agent "${agentName}" not found in ~/.nha/agents/`);
@@ -321,7 +31,6 @@ export async function cmdAsk(args) {
321
31
  process.exit(1);
322
32
  }
323
33
 
324
- // Build prompt from remaining args
325
34
  let promptParts = [];
326
35
  let provider = null;
327
36
  let model = null;
@@ -343,7 +52,6 @@ export async function cmdAsk(args) {
343
52
  process.exit(1);
344
53
  }
345
54
 
346
- // Attach file content if provided
347
55
  if (attachFile) {
348
56
  const filePath = path.resolve(attachFile);
349
57
  if (!fs.existsSync(filePath)) {
@@ -356,7 +64,6 @@ export async function cmdAsk(args) {
356
64
  userMessage += `\n\n--- Attached file: ${path.basename(filePath)} ---\n${truncated}`;
357
65
  }
358
66
 
359
- // Load config
360
67
  const config = loadConfig();
361
68
  provider = provider || config.llm.provider || 'anthropic';
362
69
  model = model || config.llm.model || null;
@@ -367,8 +74,6 @@ export async function cmdAsk(args) {
367
74
  process.exit(1);
368
75
  }
369
76
 
370
- // Load agent — parse AGENT_CARD and SYSTEM_PROMPT from file text
371
- // (can't use dynamic import because agent files have syntax incompatible with standalone ESM import)
372
77
  const agentSource = fs.readFileSync(agentFile, 'utf-8');
373
78
  const { card, systemPrompt } = parseAgentFile(agentSource, agentName);
374
79
 
@@ -377,11 +82,9 @@ export async function cmdAsk(args) {
377
82
  process.exit(1);
378
83
  }
379
84
 
380
- // Print header
381
85
  console.log(`\n ${BOLD}${card?.displayName || agentName.toUpperCase()}${NC} ${D}(${card?.tagline || card?.category || 'agent'})${NC}`);
382
86
  console.log(` ${D}Provider: ${provider}${model ? ' / ' + model : ''} | Direct call — no server${NC}\n`);
383
87
 
384
- // Call LLM
385
88
  const callFn = getProviderCall(provider);
386
89
  if (!callFn) {
387
90
  fail(`Unknown provider: ${provider}`);
@@ -392,9 +95,7 @@ export async function cmdAsk(args) {
392
95
  const startTime = Date.now();
393
96
 
394
97
  try {
395
- // Gemini and Cohere don't support streaming well via their native APIs
396
98
  const useStream = stream && (provider === 'anthropic' || provider === 'openai' || provider === 'deepseek' || provider === 'grok' || provider === 'mistral');
397
-
398
99
  const result = await callFn(apiKey, model, systemPrompt, userMessage, useStream);
399
100
 
400
101
  if (!useStream && result) {
@@ -0,0 +1,29 @@
1
+ /** nha google auth|status|revoke — Google account management */
2
+
3
+ import { runAuthFlow, showStatus, revokeAuth } from '../services/google-oauth.mjs';
4
+ import { loadConfig } from '../config.mjs';
5
+ import { fail, info } from '../ui.mjs';
6
+
7
+ export async function cmdGoogle(args) {
8
+ const sub = args[0] || 'auth';
9
+ const config = loadConfig();
10
+
11
+ switch (sub) {
12
+ case 'auth':
13
+ case 'login':
14
+ case 'connect':
15
+ return runAuthFlow(config);
16
+
17
+ case 'status':
18
+ return showStatus();
19
+
20
+ case 'revoke':
21
+ case 'disconnect':
22
+ case 'logout':
23
+ return revokeAuth();
24
+
25
+ default:
26
+ fail(`Unknown: nha google ${sub}`);
27
+ info('Commands: auth, status, revoke');
28
+ }
29
+ }
@@ -0,0 +1,77 @@
1
+ /** nha ops — Daemon control for Personal Agent Operations */
2
+
3
+ import fs from 'fs';
4
+ import path from 'path';
5
+ import { startDaemon, stopDaemon, getDaemonStatus, isRunning } from '../services/ops-daemon.mjs';
6
+ import { runPlanningPipeline } from '../services/ops-pipeline.mjs';
7
+ import { loadConfig } from '../config.mjs';
8
+ import { NHA_DIR } from '../constants.mjs';
9
+ import { info, ok, fail, warn, C, G, Y, D, W, BOLD, NC, R } from '../ui.mjs';
10
+
11
+ export async function cmdOps(args) {
12
+ const sub = args[0] || 'status';
13
+
14
+ switch (sub) {
15
+ case 'start': {
16
+ const result = startDaemon();
17
+ if (result.ok) {
18
+ ok(`PAO daemon started (PID ${result.pid})`);
19
+ info('Monitoring Gmail + Calendar. Notifications enabled.');
20
+ info('Run "nha ops status" to check. "nha ops stop" to halt.');
21
+ } else {
22
+ warn(result.message);
23
+ }
24
+ return;
25
+ }
26
+
27
+ case 'stop': {
28
+ const result = stopDaemon();
29
+ if (result.ok) {
30
+ ok(`Daemon stopped (PID ${result.pid})`);
31
+ } else {
32
+ warn(result.message);
33
+ }
34
+ return;
35
+ }
36
+
37
+ case 'status': {
38
+ const status = getDaemonStatus();
39
+ console.log(`\n ${BOLD}PAO Daemon Status${NC}\n`);
40
+ console.log(` Running: ${status.running ? G + 'yes' + NC + ` (PID ${status.pid})` : R + 'no' + NC}`);
41
+ if (status.startedAt) console.log(` Started: ${D}${status.startedAt}${NC}`);
42
+ if (status.lastMailCheck) console.log(` Last mail check: ${D}${status.lastMailCheck}${NC}`);
43
+ if (status.lastCalendarCheck) console.log(` Last cal check: ${D}${status.lastCalendarCheck}${NC}`);
44
+ if (status.lastPlanGenerated) console.log(` Last plan: ${D}${status.lastPlanGenerated}${NC}`);
45
+ if (status.errors > 0) console.log(` Errors: ${Y}${status.errors}${NC}`);
46
+ console.log('');
47
+ return;
48
+ }
49
+
50
+ case 'logs': {
51
+ const logFile = path.join(NHA_DIR, 'ops', 'daemon', 'daemon.log');
52
+ if (!fs.existsSync(logFile)) {
53
+ info('No daemon logs. Start with: nha ops start');
54
+ return;
55
+ }
56
+ const content = fs.readFileSync(logFile, 'utf-8');
57
+ const lines = content.split('\n').filter(Boolean);
58
+ const last50 = lines.slice(-50);
59
+ for (const line of last50) {
60
+ console.log(` ${D}${line}${NC}`);
61
+ }
62
+ return;
63
+ }
64
+
65
+ case 'run': {
66
+ // One-shot: sync + plan + exit
67
+ const config = loadConfig();
68
+ info('Running one-shot PAO pipeline...');
69
+ await runPlanningPipeline(config, { refresh: true });
70
+ return;
71
+ }
72
+
73
+ default:
74
+ fail(`Unknown: nha ops ${sub}`);
75
+ info('Commands: start, stop, status, logs, run');
76
+ }
77
+ }
@@ -0,0 +1,45 @@
1
+ /** nha plan — Generate daily plan using 5 specialist agents */
2
+
3
+ import { runPlanningPipeline } from '../services/ops-pipeline.mjs';
4
+ import { loadConfig } from '../config.mjs';
5
+ import { fail } from '../ui.mjs';
6
+
7
+ export async function cmdPlan(args) {
8
+ const config = loadConfig();
9
+
10
+ if (!config.llm.apiKey) {
11
+ fail('No API key configured. Run: nha config set key YOUR_KEY');
12
+ process.exit(1);
13
+ }
14
+
15
+ let date = null;
16
+ let refresh = false;
17
+ let showOnly = false;
18
+
19
+ for (const arg of args) {
20
+ if (arg === '--refresh') { refresh = true; continue; }
21
+ if (arg === '--show') { showOnly = true; continue; }
22
+ if (arg === 'tomorrow') {
23
+ const d = new Date();
24
+ d.setDate(d.getDate() + 1);
25
+ date = d.toISOString().split('T')[0];
26
+ continue;
27
+ }
28
+ if (arg === 'yesterday') {
29
+ const d = new Date();
30
+ d.setDate(d.getDate() - 1);
31
+ date = d.toISOString().split('T')[0];
32
+ continue;
33
+ }
34
+ if (arg.startsWith('--date=')) {
35
+ date = arg.split('=')[1];
36
+ continue;
37
+ }
38
+ if (/^\d{4}-\d{2}-\d{2}$/.test(arg)) {
39
+ date = arg;
40
+ continue;
41
+ }
42
+ }
43
+
44
+ await runPlanningPipeline(config, { date, refresh, showOnly });
45
+ }