@harbinger-ai/harbinger 0.1.3 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -98,6 +98,51 @@ export async function getFindingCounts() {
98
98
  return dbCounts();
99
99
  }
100
100
 
101
+ // ── Target Counts ───────────────────────────────────────────────────────────
102
+
103
+ export async function getProgramTargetCounts() {
104
+ await requireAuth();
105
+ const { getDb } = await import('../db/index.js');
106
+ const db = getDb();
107
+ const rows = db.prepare(`
108
+ SELECT program_id, COUNT(*) as total,
109
+ SUM(CASE WHEN status = 'in_scope' THEN 1 ELSE 0 END) as in_scope,
110
+ SUM(CASE WHEN status = 'out_of_scope' THEN 1 ELSE 0 END) as out_of_scope
111
+ FROM targets WHERE program_id IS NOT NULL
112
+ GROUP BY program_id
113
+ `).all();
114
+ const counts = {};
115
+ for (const r of rows) {
116
+ counts[r.program_id] = { total: r.total, inScope: r.in_scope, outOfScope: r.out_of_scope };
117
+ }
118
+ return counts;
119
+ }
120
+
121
+ export async function bulkImportTargets(programId, items) {
122
+ await requireAuth();
123
+ if (!programId || !Array.isArray(items) || items.length === 0) return { imported: 0 };
124
+ const { createTarget: dbCreate } = await import('./targets.js');
125
+ let imported = 0;
126
+ for (const item of items) {
127
+ if (!item.value) continue;
128
+ dbCreate({
129
+ programId,
130
+ type: item.type || 'domain',
131
+ value: item.value.trim(),
132
+ status: item.status || 'in_scope',
133
+ notes: item.notes || null,
134
+ });
135
+ imported++;
136
+ }
137
+ return { imported };
138
+ }
139
+
140
+ export async function exportTargets(programId) {
141
+ await requireAuth();
142
+ const { getTargets: dbGet } = await import('./targets.js');
143
+ return dbGet(programId);
144
+ }
145
+
101
146
  // ── Target Sync ─────────────────────────────────────────────────────────────
102
147
 
103
148
  export async function syncTargetsFromPlatform(platform, options = {}) {
@@ -291,8 +291,11 @@ export async function getLlmProviders() {
291
291
  for (const row of rows) {
292
292
  try {
293
293
  const val = JSON.parse(row.value);
294
- // Mask the API key for client display
295
- result[row.key] = { ...val, apiKey: val.apiKey ? val.apiKey : '' };
294
+ // Mask the API key for client — show first 8 + last 4 chars
295
+ const masked = val.apiKey
296
+ ? val.apiKey.slice(0, 8) + '***' + val.apiKey.slice(-4)
297
+ : '';
298
+ result[row.key] = { ...val, apiKey: masked, hasKey: !!val.apiKey };
296
299
  } catch {}
297
300
  }
298
301
  return result;
@@ -323,8 +326,10 @@ export async function saveLlmProvider(provider, config) {
323
326
  let merged = { ...config };
324
327
  if (existing) {
325
328
  const prev = JSON.parse(existing.value);
326
- // If apiKey looks masked or empty, keep existing
327
- if (!config.apiKey && prev.apiKey) merged.apiKey = prev.apiKey;
329
+ // If apiKey looks masked (contains ***) or empty, keep existing
330
+ if ((!config.apiKey || config.apiKey.includes('***')) && prev.apiKey) {
331
+ merged.apiKey = prev.apiKey;
332
+ }
328
333
  }
329
334
 
330
335
  const value = JSON.stringify(merged);
@@ -334,20 +339,32 @@ export async function saveLlmProvider(provider, config) {
334
339
  db.insert(settings).values({ type: 'llm_provider', key: provider, value }).run();
335
340
  }
336
341
 
337
- // Also write to .env for persistence across container restarts
342
+ // Map provider env var names
338
343
  const envMap = {
339
- anthropic: { key: 'ANTHROPIC_API_KEY', model: 'LLM_MODEL', provider: 'anthropic' },
340
- openai: { key: 'OPENAI_API_KEY', model: 'LLM_MODEL', provider: 'openai' },
341
- google: { key: 'GOOGLE_API_KEY', model: 'LLM_MODEL', provider: 'google' },
342
- custom: { key: 'CUSTOM_API_KEY', model: 'LLM_MODEL', provider: 'custom' },
344
+ anthropic: { key: 'ANTHROPIC_API_KEY' },
345
+ openai: { key: 'OPENAI_API_KEY' },
346
+ google: { key: 'GOOGLE_API_KEY' },
347
+ custom: { key: 'CUSTOM_API_KEY' },
348
+ ollama: { key: null },
349
+ lmstudio: { key: null },
343
350
  };
344
351
  const mapping = envMap[provider];
345
- if (mapping && merged.apiKey) {
352
+
353
+ // CRITICAL: Update process.env so the running process can use the key immediately
354
+ if (mapping?.key && merged.apiKey) {
355
+ process.env[mapping.key] = merged.apiKey;
356
+ }
357
+ // For local providers, set the base URL in process.env
358
+ if (merged.baseUrl) {
359
+ process.env.OPENAI_BASE_URL = merged.baseUrl;
360
+ }
361
+
362
+ // Write to .env for persistence across container restarts
363
+ if (mapping?.key && merged.apiKey) {
346
364
  try {
347
365
  const envPath = path.join(process.cwd(), '.env');
348
366
  let envContent = '';
349
367
  try { envContent = fs.readFileSync(envPath, 'utf8'); } catch {}
350
- // Update or append the API key line
351
368
  const keyPattern = new RegExp(`^${mapping.key}=.*$`, 'm');
352
369
  if (keyPattern.test(envContent)) {
353
370
  envContent = envContent.replace(keyPattern, `${mapping.key}=${merged.apiKey}`);
@@ -359,6 +376,21 @@ export async function saveLlmProvider(provider, config) {
359
376
  console.error('Failed to write .env:', envErr);
360
377
  }
361
378
  }
379
+ // Also persist base URL for local providers
380
+ if (merged.baseUrl) {
381
+ try {
382
+ const envPath = path.join(process.cwd(), '.env');
383
+ let envContent = '';
384
+ try { envContent = fs.readFileSync(envPath, 'utf8'); } catch {}
385
+ const pat = /^OPENAI_BASE_URL=.*$/m;
386
+ if (pat.test(envContent)) {
387
+ envContent = envContent.replace(pat, `OPENAI_BASE_URL=${merged.baseUrl}`);
388
+ } else {
389
+ envContent += `\nOPENAI_BASE_URL=${merged.baseUrl}`;
390
+ }
391
+ fs.writeFileSync(envPath, envContent);
392
+ } catch {}
393
+ }
362
394
 
363
395
  return { success: true };
364
396
  } catch (err) {
@@ -380,6 +412,9 @@ export async function setActiveProvider(provider, model) {
380
412
  const path = await import('path');
381
413
  const db = getDb();
382
414
 
415
+ // For local providers (ollama, lmstudio), map to 'custom' for model.js
416
+ const effectiveProvider = (provider === 'ollama' || provider === 'lmstudio') ? 'custom' : provider;
417
+
383
418
  const value = JSON.stringify({ provider, model });
384
419
  const existing = db.select().from(settings)
385
420
  .where(and(eq(settings.type, 'llm_config'), eq(settings.key, 'active_provider')))
@@ -391,13 +426,52 @@ export async function setActiveProvider(provider, model) {
391
426
  db.insert(settings).values({ type: 'llm_config', key: 'active_provider', value }).run();
392
427
  }
393
428
 
429
+ // Also read the provider's API key from DB and apply it to process.env
430
+ const provRow = db.select().from(settings)
431
+ .where(and(eq(settings.type, 'llm_provider'), eq(settings.key, provider)))
432
+ .get();
433
+
434
+ const envKeyMap = {
435
+ anthropic: 'ANTHROPIC_API_KEY',
436
+ openai: 'OPENAI_API_KEY',
437
+ google: 'GOOGLE_API_KEY',
438
+ custom: 'CUSTOM_API_KEY',
439
+ };
440
+
441
+ if (provRow) {
442
+ const provConfig = JSON.parse(provRow.value);
443
+ const envKey = envKeyMap[effectiveProvider];
444
+ if (envKey && provConfig.apiKey) {
445
+ process.env[envKey] = provConfig.apiKey;
446
+ }
447
+ // Set base URL for local/custom providers
448
+ if (provConfig.baseUrl) {
449
+ process.env.OPENAI_BASE_URL = provConfig.baseUrl;
450
+ } else if (provider === 'ollama') {
451
+ process.env.OPENAI_BASE_URL = 'http://host.docker.internal:11434/v1';
452
+ } else if (provider === 'lmstudio') {
453
+ process.env.OPENAI_BASE_URL = 'http://host.docker.internal:1234/v1';
454
+ } else {
455
+ // Cloud provider — clear any local base URL
456
+ delete process.env.OPENAI_BASE_URL;
457
+ }
458
+ }
459
+
394
460
  // Write to .env
395
461
  try {
396
462
  const envPath = path.join(process.cwd(), '.env');
397
463
  let envContent = '';
398
464
  try { envContent = fs.readFileSync(envPath, 'utf8'); } catch {}
399
465
 
400
- const updates = { LLM_PROVIDER: provider, LLM_MODEL: model || '' };
466
+ const updates = { LLM_PROVIDER: effectiveProvider, LLM_MODEL: model || '' };
467
+
468
+ // For local providers set the base URL
469
+ if (provider === 'ollama') {
470
+ updates.OPENAI_BASE_URL = 'http://host.docker.internal:11434/v1';
471
+ } else if (provider === 'lmstudio') {
472
+ updates.OPENAI_BASE_URL = 'http://host.docker.internal:1234/v1';
473
+ }
474
+
401
475
  for (const [envKey, envVal] of Object.entries(updates)) {
402
476
  const pat = new RegExp(`^${envKey}=.*$`, 'm');
403
477
  if (pat.test(envContent)) {
@@ -410,7 +484,7 @@ export async function setActiveProvider(provider, model) {
410
484
  } catch {}
411
485
 
412
486
  // Update process.env for immediate effect
413
- process.env.LLM_PROVIDER = provider;
487
+ process.env.LLM_PROVIDER = effectiveProvider;
414
488
  if (model) process.env.LLM_MODEL = model;
415
489
 
416
490
  return { success: true };
@@ -420,6 +494,122 @@ export async function setActiveProvider(provider, model) {
420
494
  }
421
495
  }
422
496
 
497
+ /**
498
+ * Scan for locally running LLM providers (Ollama, LM Studio, etc.)
499
+ * Returns array of detected providers with their models.
500
+ */
501
+ export async function scanLocalProviders() {
502
+ await requireAuth();
503
+ const results = [];
504
+
505
+ // Known local provider endpoints
506
+ const localEndpoints = [
507
+ { id: 'ollama', name: 'Ollama', urls: ['http://localhost:11434', 'http://host.docker.internal:11434'], modelsPath: '/api/tags' },
508
+ { id: 'lmstudio', name: 'LM Studio', urls: ['http://localhost:1234/v1', 'http://host.docker.internal:1234/v1'], modelsPath: '/models' },
509
+ { id: 'localai', name: 'LocalAI', urls: ['http://localhost:8080/v1', 'http://host.docker.internal:8080/v1'], modelsPath: '/models' },
510
+ { id: 'llamacpp', name: 'llama.cpp', urls: ['http://localhost:8081', 'http://host.docker.internal:8081'], modelsPath: '/v1/models' },
511
+ { id: 'vllm', name: 'vLLM', urls: ['http://localhost:8000/v1', 'http://host.docker.internal:8000/v1'], modelsPath: '/models' },
512
+ { id: 'jan', name: 'Jan', urls: ['http://localhost:1337/v1', 'http://host.docker.internal:1337/v1'], modelsPath: '/models' },
513
+ ];
514
+
515
+ for (const endpoint of localEndpoints) {
516
+ for (const baseUrl of endpoint.urls) {
517
+ try {
518
+ const controller = new AbortController();
519
+ const timeout = setTimeout(() => controller.abort(), 3000);
520
+ const url = baseUrl.replace(/\/v1$/, '') + endpoint.modelsPath;
521
+ const res = await fetch(url, { signal: controller.signal });
522
+ clearTimeout(timeout);
523
+
524
+ if (res.ok) {
525
+ const data = await res.json();
526
+ let models = [];
527
+
528
+ // Parse models based on provider format
529
+ if (endpoint.id === 'ollama') {
530
+ models = (data.models || []).map(m => ({
531
+ id: m.name || m.model,
532
+ name: m.name || m.model,
533
+ size: m.size,
534
+ modified: m.modified_at,
535
+ family: m.details?.family,
536
+ paramSize: m.details?.parameter_size,
537
+ quantization: m.details?.quantization_level,
538
+ }));
539
+ } else {
540
+ // OpenAI-compatible format
541
+ models = (data.data || data || []).map(m => ({
542
+ id: m.id,
543
+ name: m.id,
544
+ owned_by: m.owned_by,
545
+ }));
546
+ }
547
+
548
+ results.push({
549
+ id: endpoint.id,
550
+ name: endpoint.name,
551
+ baseUrl: baseUrl,
552
+ available: true,
553
+ models,
554
+ });
555
+ break; // Found this provider, skip remaining URLs
556
+ }
557
+ } catch {
558
+ // Not available at this URL, try next
559
+ }
560
+ }
561
+ }
562
+
563
+ return results;
564
+ }
565
+
566
+ /**
567
+ * Get models from a specific local provider endpoint.
568
+ */
569
+ export async function getLocalModels(providerUrl) {
570
+ await requireAuth();
571
+ try {
572
+ const controller = new AbortController();
573
+ const timeout = setTimeout(() => controller.abort(), 5000);
574
+
575
+ // Try Ollama format first
576
+ let url = providerUrl.replace(/\/v1$/, '') + '/api/tags';
577
+ let res = await fetch(url, { signal: controller.signal }).catch(() => null);
578
+
579
+ if (res?.ok) {
580
+ clearTimeout(timeout);
581
+ const data = await res.json();
582
+ return (data.models || []).map(m => ({
583
+ id: m.name || m.model,
584
+ name: m.name || m.model,
585
+ size: m.size,
586
+ family: m.details?.family,
587
+ paramSize: m.details?.parameter_size,
588
+ }));
589
+ }
590
+
591
+ // Try OpenAI-compatible format
592
+ url = providerUrl.replace(/\/$/, '') + (providerUrl.includes('/v1') ? '' : '/v1') + '/models';
593
+ const controller2 = new AbortController();
594
+ const timeout2 = setTimeout(() => controller2.abort(), 5000);
595
+ res = await fetch(url, { signal: controller2.signal }).catch(() => null);
596
+ clearTimeout(timeout2);
597
+
598
+ if (res?.ok) {
599
+ const data = await res.json();
600
+ return (data.data || data || []).map(m => ({
601
+ id: m.id,
602
+ name: m.id,
603
+ owned_by: m.owned_by,
604
+ }));
605
+ }
606
+
607
+ return [];
608
+ } catch {
609
+ return [];
610
+ }
611
+ }
612
+
423
613
  /**
424
614
  * Test an LLM provider connection by sending a simple message.
425
615
  */
@@ -507,6 +697,37 @@ export async function testLlmConnection(provider) {
507
697
  return { success: true, response: data.candidates?.[0]?.content?.parts?.[0]?.text || 'OK' };
508
698
  }
509
699
 
700
+ // Local providers (Ollama, LM Studio, etc.) — test via OpenAI-compatible endpoint
701
+ if (provider === 'ollama' || provider === 'lmstudio' || provider === 'localai' || provider === 'llamacpp' || provider === 'vllm' || provider === 'jan') {
702
+ const baseUrls = {
703
+ ollama: 'http://host.docker.internal:11434/v1',
704
+ lmstudio: 'http://host.docker.internal:1234/v1',
705
+ localai: 'http://host.docker.internal:8080/v1',
706
+ llamacpp: 'http://host.docker.internal:8081/v1',
707
+ vllm: 'http://host.docker.internal:8000/v1',
708
+ jan: 'http://host.docker.internal:1337/v1',
709
+ };
710
+ const base = config.baseUrl || baseUrls[provider] || baseUrls.ollama;
711
+ const testModel = config.model || 'llama3';
712
+
713
+ // First try to just list models (lightweight check)
714
+ try {
715
+ const controller = new AbortController();
716
+ const timeout = setTimeout(() => controller.abort(), 5000);
717
+ const modelsUrl = base.replace(/\/v1$/, '') + (provider === 'ollama' ? '/api/tags' : '/models');
718
+ const modelsRes = await fetch(modelsUrl, { signal: controller.signal });
719
+ clearTimeout(timeout);
720
+ if (modelsRes.ok) {
721
+ const data = await modelsRes.json();
722
+ const count = data.models?.length || data.data?.length || 0;
723
+ return { success: true, response: `Connected. ${count} model(s) available.` };
724
+ }
725
+ return { error: `Cannot reach ${provider} at ${base}` };
726
+ } catch (e) {
727
+ return { error: `Cannot reach ${provider}: ${e.message}` };
728
+ }
729
+ }
730
+
510
731
  return { error: 'Unknown provider' };
511
732
  } catch (err) {
512
733
  return { error: err.message };
@@ -528,12 +749,28 @@ export async function getActiveProvider() {
528
749
  .where(and(eq(settings.type, 'llm_config'), eq(settings.key, 'active_provider')))
529
750
  .get();
530
751
 
531
- if (row) return JSON.parse(row.value);
752
+ if (row) {
753
+ const active = JSON.parse(row.value);
754
+ // Also return whether the API key is loaded in process.env
755
+ const envKey = {
756
+ anthropic: 'ANTHROPIC_API_KEY',
757
+ openai: 'OPENAI_API_KEY',
758
+ google: 'GOOGLE_API_KEY',
759
+ custom: 'CUSTOM_API_KEY',
760
+ }[active.provider === 'ollama' || active.provider === 'lmstudio' ? 'custom' : active.provider];
761
+ active.keyLoaded = envKey ? !!process.env[envKey] : true;
762
+ active.envProvider = process.env.LLM_PROVIDER;
763
+ active.envModel = process.env.LLM_MODEL;
764
+ return active;
765
+ }
532
766
 
533
767
  // Fallback: read from env
534
768
  return {
535
769
  provider: process.env.LLM_PROVIDER || 'anthropic',
536
770
  model: process.env.LLM_MODEL || '',
771
+ keyLoaded: !!process.env.ANTHROPIC_API_KEY,
772
+ envProvider: process.env.LLM_PROVIDER,
773
+ envModel: process.env.LLM_MODEL,
537
774
  };
538
775
  } catch (err) {
539
776
  return { provider: process.env.LLM_PROVIDER || 'anthropic', model: process.env.LLM_MODEL || '' };
@@ -615,7 +852,7 @@ export async function updateAgentFile(agentId, filename, content) {
615
852
  }
616
853
 
617
854
  /**
618
- * Create a new agent with identity files.
855
+ * Create a new agent with identity files and optional extended configuration.
619
856
  */
620
857
  export async function createAgent(identity) {
621
858
  await requireAuth();
@@ -630,16 +867,83 @@ export async function createAgent(identity) {
630
867
  fs.mkdirSync(agentPath, { recursive: true });
631
868
 
632
869
  // IDENTITY.md
633
- const identityContent = `Name: ${identity.name || dirName}\nCodename: ${identity.codename || dirName.toUpperCase()}\nRole: ${identity.role || 'General Agent'}\nSpecialization: ${identity.specialization || 'General'}`;
634
- fs.writeFileSync(path.join(agentPath, 'IDENTITY.md'), identityContent, 'utf8');
870
+ const identityLines = [
871
+ `Name: ${identity.name || dirName}`,
872
+ `Codename: ${identity.codename || dirName.toUpperCase()}`,
873
+ `Role: ${identity.role || 'General Agent'}`,
874
+ `Specialization: ${identity.specialization || 'General'}`,
875
+ ];
876
+ if (identity.description) identityLines.push(`Description: ${identity.description}`);
877
+ if (identity.tags?.length) identityLines.push(`Tags: ${identity.tags.join(', ')}`);
878
+ if (identity.goal) identityLines.push(`Goal: ${identity.goal}`);
879
+ fs.writeFileSync(path.join(agentPath, 'IDENTITY.md'), identityLines.join('\n'), 'utf8');
635
880
 
636
881
  // SOUL.md
637
882
  const soulContent = identity.soul || `# ${identity.codename || dirName.toUpperCase()}\n\nYou are ${identity.name || dirName}, a specialized AI agent.\n\n## Role\n${identity.role || 'General Agent'}\n\n## Specialization\n${identity.specialization || 'General purpose tasks'}`;
638
883
  fs.writeFileSync(path.join(agentPath, 'SOUL.md'), soulContent, 'utf8');
639
884
 
640
- // CONFIG.yaml
641
- const configContent = `codename: ${identity.codename || dirName.toUpperCase()}\nrole: ${identity.role || 'General Agent'}\nactive: true`;
642
- fs.writeFileSync(path.join(agentPath, 'CONFIG.yaml'), configContent, 'utf8');
885
+ // SKILLS.md
886
+ if (identity.skills) {
887
+ fs.writeFileSync(path.join(agentPath, 'SKILLS.md'), identity.skills, 'utf8');
888
+ }
889
+
890
+ // TOOLS.md
891
+ if (identity.tools) {
892
+ fs.writeFileSync(path.join(agentPath, 'TOOLS.md'), identity.tools, 'utf8');
893
+ }
894
+
895
+ // HEARTBEAT.md
896
+ if (identity.heartbeat) {
897
+ fs.writeFileSync(path.join(agentPath, 'HEARTBEAT.md'), identity.heartbeat, 'utf8');
898
+ }
899
+
900
+ // CONFIG.yaml — extended with LLM, scheduling, resources
901
+ const configLines = [
902
+ `codename: ${identity.codename || dirName.toUpperCase()}`,
903
+ `role: ${identity.role || 'General Agent'}`,
904
+ `active: true`,
905
+ ];
906
+ if (identity.llm) {
907
+ configLines.push(`\n# LLM Configuration`);
908
+ if (identity.llm.provider) configLines.push(`llm_provider: ${identity.llm.provider}`);
909
+ if (identity.llm.model) configLines.push(`llm_model: ${identity.llm.model}`);
910
+ if (identity.llm.temperature != null) configLines.push(`llm_temperature: ${identity.llm.temperature}`);
911
+ if (identity.llm.maxTokens) configLines.push(`llm_max_tokens: ${identity.llm.maxTokens}`);
912
+ }
913
+ if (identity.scheduling) {
914
+ configLines.push(`\n# Scheduling`);
915
+ if (identity.scheduling.mode) configLines.push(`run_mode: ${identity.scheduling.mode}`);
916
+ if (identity.scheduling.schedule) configLines.push(`schedule: "${identity.scheduling.schedule}"`);
917
+ if (identity.scheduling.maxConcurrent) configLines.push(`max_concurrent: ${identity.scheduling.maxConcurrent}`);
918
+ if (identity.scheduling.timeout) configLines.push(`timeout: ${identity.scheduling.timeout}`);
919
+ }
920
+ if (identity.resources) {
921
+ configLines.push(`\n# Resources`);
922
+ if (identity.resources.cpu) configLines.push(`cpu: ${identity.resources.cpu}`);
923
+ if (identity.resources.memory) configLines.push(`memory: "${identity.resources.memory}"`);
924
+ if (identity.resources.disk) configLines.push(`disk: "${identity.resources.disk}"`);
925
+ if (identity.resources.network) configLines.push(`network: ${identity.resources.network}`);
926
+ }
927
+ if (identity.envVars && Object.keys(identity.envVars).length > 0) {
928
+ configLines.push(`\n# Environment Variables`);
929
+ configLines.push(`env:`);
930
+ for (const [k, v] of Object.entries(identity.envVars)) {
931
+ configLines.push(` ${k}: "${v}"`);
932
+ }
933
+ }
934
+ if (identity.mcpTools?.length) {
935
+ configLines.push(`\n# MCP Tools`);
936
+ configLines.push(`tools:`);
937
+ for (const t of identity.mcpTools) {
938
+ configLines.push(` - ${t}`);
939
+ }
940
+ }
941
+ if (identity.team) {
942
+ configLines.push(`\n# Team`);
943
+ if (identity.team.swarm) configLines.push(`swarm: ${identity.team.swarm}`);
944
+ if (identity.team.supervisor) configLines.push(`supervisor: ${identity.team.supervisor}`);
945
+ }
946
+ fs.writeFileSync(path.join(agentPath, 'CONFIG.yaml'), configLines.join('\n'), 'utf8');
643
947
 
644
948
  return { success: true, id: dirName };
645
949
  } catch (err) {
@@ -648,6 +952,20 @@ export async function createAgent(identity) {
648
952
  }
649
953
  }
650
954
 
955
+ /**
956
+ * Get available MCP tool names for agent tool assignment.
957
+ */
958
+ export async function getAvailableMcpTools() {
959
+ await requireAuth();
960
+ try {
961
+ const { loadMcpTools } = await import('../mcp/client.js');
962
+ const tools = await loadMcpTools();
963
+ return tools.map(t => ({ name: t.name, description: t.description }));
964
+ } catch {
965
+ return [];
966
+ }
967
+ }
968
+
651
969
  /**
652
970
  * Create a job for a specific agent.
653
971
  */