opc-agent 4.2.0 → 4.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. package/.opc/memory.db +0 -0
  2. package/COMPETITIVE-GAP.md +92 -92
  3. package/CONTRIBUTING.md +36 -36
  4. package/README.md +290 -290
  5. package/README.zh-CN.md +269 -269
  6. package/STUDIO-REWRITE-TASK.md +76 -0
  7. package/dist/channels/telegram.d.ts +5 -0
  8. package/dist/channels/telegram.d.ts.map +1 -1
  9. package/dist/channels/telegram.js +108 -0
  10. package/dist/channels/telegram.js.map +1 -1
  11. package/dist/channels/voice.d.ts +71 -97
  12. package/dist/channels/voice.d.ts.map +1 -1
  13. package/dist/channels/voice.js +369 -347
  14. package/dist/channels/voice.js.map +1 -1
  15. package/dist/channels/web.d.ts.map +1 -1
  16. package/dist/channels/web.js +8 -2
  17. package/dist/channels/web.js.map +1 -1
  18. package/dist/channels/wechat.js +6 -6
  19. package/dist/cli/chat.d.ts +4 -1
  20. package/dist/cli/chat.d.ts.map +1 -1
  21. package/dist/cli/chat.js +680 -73
  22. package/dist/cli/chat.js.map +1 -1
  23. package/dist/cli/setup.js +1 -1
  24. package/dist/cli/setup.js.map +1 -1
  25. package/dist/cli.js +373 -280
  26. package/dist/cli.js.map +1 -1
  27. package/dist/core/a2a-http.d.ts +75 -0
  28. package/dist/core/a2a-http.d.ts.map +1 -0
  29. package/dist/core/a2a-http.js +217 -0
  30. package/dist/core/a2a-http.js.map +1 -0
  31. package/dist/core/a2a.d.ts +2 -0
  32. package/dist/core/a2a.d.ts.map +1 -1
  33. package/dist/core/a2a.js +6 -1
  34. package/dist/core/a2a.js.map +1 -1
  35. package/dist/core/agent.d.ts +1 -0
  36. package/dist/core/agent.d.ts.map +1 -1
  37. package/dist/core/agent.js +3 -0
  38. package/dist/core/agent.js.map +1 -1
  39. package/dist/core/gateway-registry.d.ts +116 -0
  40. package/dist/core/gateway-registry.d.ts.map +1 -0
  41. package/dist/core/gateway-registry.js +280 -0
  42. package/dist/core/gateway-registry.js.map +1 -0
  43. package/dist/core/model-recommender.d.ts +40 -0
  44. package/dist/core/model-recommender.d.ts.map +1 -0
  45. package/dist/core/model-recommender.js +186 -0
  46. package/dist/core/model-recommender.js.map +1 -0
  47. package/dist/core/priority-queue.d.ts +100 -0
  48. package/dist/core/priority-queue.d.ts.map +1 -0
  49. package/dist/core/priority-queue.js +181 -0
  50. package/dist/core/priority-queue.js.map +1 -0
  51. package/dist/core/runtime.d.ts.map +1 -1
  52. package/dist/core/runtime.js +192 -22
  53. package/dist/core/runtime.js.map +1 -1
  54. package/dist/deploy/index.js +56 -56
  55. package/dist/doctor.d.ts +1 -0
  56. package/dist/doctor.d.ts.map +1 -1
  57. package/dist/doctor.js +155 -10
  58. package/dist/doctor.js.map +1 -1
  59. package/dist/index.d.ts +10 -3
  60. package/dist/index.d.ts.map +1 -1
  61. package/dist/index.js +24 -13
  62. package/dist/index.js.map +1 -1
  63. package/dist/memory/deepbrain.d.ts +1 -1
  64. package/dist/memory/deepbrain.d.ts.map +1 -1
  65. package/dist/memory/deepbrain.js +95 -4
  66. package/dist/memory/deepbrain.js.map +1 -1
  67. package/dist/memory/evolve-engine.d.ts +113 -0
  68. package/dist/memory/evolve-engine.d.ts.map +1 -0
  69. package/dist/memory/evolve-engine.js +549 -0
  70. package/dist/memory/evolve-engine.js.map +1 -0
  71. package/dist/memory/index.d.ts +2 -0
  72. package/dist/memory/index.d.ts.map +1 -1
  73. package/dist/memory/index.js +3 -1
  74. package/dist/memory/index.js.map +1 -1
  75. package/dist/memory/sqlite-store.d.ts +40 -0
  76. package/dist/memory/sqlite-store.d.ts.map +1 -0
  77. package/dist/memory/sqlite-store.js +269 -0
  78. package/dist/memory/sqlite-store.js.map +1 -0
  79. package/dist/memory/user-profiler.d.ts +8 -0
  80. package/dist/memory/user-profiler.d.ts.map +1 -1
  81. package/dist/memory/user-profiler.js +89 -0
  82. package/dist/memory/user-profiler.js.map +1 -1
  83. package/dist/scheduler/cron-engine.d.ts.map +1 -1
  84. package/dist/scheduler/cron-engine.js +3 -36
  85. package/dist/scheduler/cron-engine.js.map +1 -1
  86. package/dist/scheduler/proactive.d.ts +62 -0
  87. package/dist/scheduler/proactive.d.ts.map +1 -0
  88. package/dist/scheduler/proactive.js +185 -0
  89. package/dist/scheduler/proactive.js.map +1 -0
  90. package/dist/skills/auto-learn.d.ts.map +1 -1
  91. package/dist/skills/auto-learn.js +65 -11
  92. package/dist/skills/auto-learn.js.map +1 -1
  93. package/dist/skills/builtin/index.d.ts.map +1 -1
  94. package/dist/skills/builtin/index.js +163 -30
  95. package/dist/skills/builtin/index.js.map +1 -1
  96. package/dist/skills/types.d.ts +1 -1
  97. package/dist/skills/types.d.ts.map +1 -1
  98. package/dist/skills/types.js +1 -0
  99. package/dist/skills/types.js.map +1 -1
  100. package/dist/studio/server.d.ts +1 -0
  101. package/dist/studio/server.d.ts.map +1 -1
  102. package/dist/studio/server.js +148 -17
  103. package/dist/studio/server.js.map +1 -1
  104. package/dist/studio-ui/index.html +867 -2630
  105. package/dist/ui/components.js +105 -105
  106. package/examples/README.md +22 -22
  107. package/examples/basic-agent.ts +90 -90
  108. package/examples/brain-integration.ts +71 -71
  109. package/examples/multi-channel.ts +74 -74
  110. package/install.ps1 +127 -127
  111. package/install.sh +154 -154
  112. package/models.json +164 -164
  113. package/package.json +5 -2
  114. package/scripts/install.ps1 +31 -31
  115. package/scripts/install.sh +40 -40
  116. package/templates/ecommerce-assistant/README.md +45 -45
  117. package/templates/ecommerce-assistant/oad.yaml +47 -47
  118. package/templates/tech-support/README.md +43 -43
  119. package/templates/tech-support/oad.yaml +45 -45
package/dist/cli.js CHANGED
@@ -40,6 +40,7 @@ const path = __importStar(require("path"));
40
40
  const yaml = __importStar(require("js-yaml"));
41
41
  const readline = __importStar(require("readline"));
42
42
  const runtime_1 = require("./core/runtime");
43
+ const model_recommender_1 = require("./core/model-recommender");
43
44
  const customer_service_1 = require("./templates/customer-service");
44
45
  const sales_assistant_1 = require("./templates/sales-assistant");
45
46
  const knowledge_base_1 = require("./templates/knowledge-base");
@@ -60,7 +61,6 @@ const hermes_1 = require("./deploy/hermes");
60
61
  const index_1 = require("./deploy/index");
61
62
  const workflow_1 = require("./core/workflow");
62
63
  const versioning_1 = require("./core/versioning");
63
- const providers_1 = require("./providers");
64
64
  const knowledge_1 = require("./core/knowledge");
65
65
  const doctor_1 = require("./doctor");
66
66
  const child_process_1 = require("child_process");
@@ -124,7 +124,7 @@ async function select(question, options) {
124
124
  program
125
125
  .name('opc')
126
126
  .description('OPC Agent - Open Agent Framework for business workstations')
127
- .version('2.0.0');
127
+ .version(require('../package.json').version);
128
128
  // ── Init command ─────────────────────────────────────────────
129
129
  program
130
130
  .command('init')
@@ -186,7 +186,7 @@ program
186
186
  }
187
187
  const roleDisplayName = roleMeta.name || matched.role;
188
188
  const roleDescription = roleMeta.name_zh ? `${roleMeta.name} (${roleMeta.name_zh})` : (roleMeta.name || matched.role);
189
- console.log(` ${icon.info} Matched role: ${color.cyan(matched.category + '/' + matched.role)} ${roleDisplayName}`);
189
+ console.log(` ${icon.info} Matched role: ${color.cyan(matched.category + '/' + matched.role)} - ${roleDisplayName}`);
190
190
  // Create directories
191
191
  fs.mkdirSync(dir, { recursive: true });
192
192
  fs.mkdirSync(path.join(dir, 'src', 'skills'), { recursive: true });
@@ -205,9 +205,9 @@ program
205
205
  // Company-specific knowledge belongs to Desk (closed-source), not here.
206
206
  const workstationSeedFromRole = workstationMatch?.[0]?.trim() || '';
207
207
  fs.writeFileSync(path.join(dir, 'brain-seeds', 'workstation.md'), workstationSeedFromRole || `# Workstation Knowledge\n\n## Tools & Environment\n\nCommon tools and setup for this workstation role.\n\n## Workflows\n\nStandard operating procedures and workflows.\n\n## Best Practices\n\nIndustry best practices for this role.\n`);
208
- // agent.yaml with role system prompt and brain seeds
208
+ // oad.yaml with role system prompt and brain seeds(不再生成 agent.yaml)
209
209
  const firstLine = systemPromptContent.split('\n').find((l) => l.trim() && !l.startsWith('#'))?.trim() || 'You are a helpful AI assistant.';
210
- fs.writeFileSync(path.join(dir, 'agent.yaml'), `apiVersion: opc/v1
210
+ fs.writeFileSync(path.join(dir, 'oad.yaml'), `apiVersion: opc/v1
211
211
  kind: Agent
212
212
  metadata:
213
213
  name: ${name}
@@ -251,14 +251,14 @@ spec:
251
251
  if (roleData.files['oad.yaml']) {
252
252
  fs.writeFileSync(path.join(dir, 'oad.yaml'), roleData.files['oad.yaml']);
253
253
  }
254
- // src/index.ts entry point (same as generic)
254
+ // src/index.ts - entry point (same as generic)
255
255
  fs.writeFileSync(path.join(dir, 'src', 'index.ts'), `import { AgentRuntime } from 'opc-agent';
256
256
  import { EchoSkill } from './skills/echo';
257
257
  import { readFileSync, existsSync } from 'fs';
258
258
 
259
259
  async function main() {
260
260
  const runtime = new AgentRuntime();
261
- const config = await runtime.loadConfig('./agent.yaml');
261
+ const config = await runtime.loadConfig('./oad.yaml');
262
262
 
263
263
  const soul = existsSync('./SOUL.md') ? readFileSync('./SOUL.md', 'utf-8') : '';
264
264
  const context = existsSync('./CONTEXT.md') ? readFileSync('./CONTEXT.md', 'utf-8') : '';
@@ -305,15 +305,15 @@ export class EchoSkill extends BaseSkill {
305
305
  fs.writeFileSync(path.join(dir, 'package.json'), JSON.stringify({ name, version: '1.0.0', private: true, scripts: { start: 'opc run', dev: 'opc dev', chat: 'opc chat', build: 'tsc' }, dependencies: { 'opc-agent': '^1.3.0' }, devDependencies: { typescript: '^5.5.0', tsx: '^4.0.0' } }, null, 2));
306
306
  // .gitignore, .env.example, .env
307
307
  fs.writeFileSync(path.join(dir, '.gitignore'), 'node_modules\ndist\n.env\n.opc-knowledge.json\ndata/\n');
308
- fs.writeFileSync(path.join(dir, '.env.example'), `# LLM API Configuration\nOPC_LLM_API_KEY=your-api-key-here\nOPC_LLM_BASE_URL=https://api.openai.com/v1\nOPC_LLM_MODEL=gpt-4o-mini\n`);
309
- fs.writeFileSync(path.join(dir, '.env'), `OPC_LLM_API_KEY=your-api-key-here\nOPC_LLM_BASE_URL=https://api.openai.com/v1\nOPC_LLM_MODEL=gpt-4o-mini\n`);
308
+ fs.writeFileSync(path.join(dir, '.env.example'), `# LLM API Configuration\n# Ollama (免费本地,默认,无需 API key):\nOPC_LLM_BASE_URL=http://localhost:11434/v1\nOPC_LLM_MODEL=qwen2.5\n\n# 如需使用商业模型,取消以下注释:\n# OPC_LLM_API_KEY=your-api-key-here\n# OPC_LLM_BASE_URL=https://api.deepseek.com/v1\n# OPC_LLM_MODEL=deepseek-chat\n`);
309
+ fs.writeFileSync(path.join(dir, '.env'), `# Ollama (免费本地) - 无需 API key\nOPC_LLM_BASE_URL=http://localhost:11434/v1\nOPC_LLM_MODEL=qwen2.5\n`);
310
310
  // README.md
311
311
  fs.writeFileSync(path.join(dir, 'README.md'), `# ${name}\n\nCreated with [OPC Agent](https://github.com/Deepleaper/opc-agent) using the \`${matched.category}/${matched.role}\` workstation role.\n\n## Quick Start\n\n\`\`\`bash\nnpm install\nollama pull qwen2.5\nnpx tsx src/index.ts\n\`\`\`\n\nOpen [http://localhost:3000](http://localhost:3000)\n`);
312
312
  // Dockerfile + docker-compose
313
- fs.writeFileSync(path.join(dir, 'Dockerfile'), `FROM node:22-alpine\nWORKDIR /app\nCOPY package.json package-lock.json* ./\nRUN npm ci --production 2>/dev/null || npm install --production\nCOPY oad.yaml agent.yaml .env* ./\nCOPY src/ ./src/\nCOPY prompts/ ./prompts/ 2>/dev/null || true\nEXPOSE 3000\nCMD ["npx", "opc", "run"]\n`);
314
- fs.writeFileSync(path.join(dir, 'docker-compose.yml'), `version: '3.8'\nservices:\n agent:\n build: .\n ports:\n - "3000:3000"\n env_file:\n - .env\n volumes:\n - ./agent.yaml:/app/agent.yaml:ro\n restart: unless-stopped\n`);
313
+ fs.writeFileSync(path.join(dir, 'Dockerfile'), `FROM node:22-alpine\nWORKDIR /app\nCOPY package.json package-lock.json* ./\nRUN npm ci --production 2>/dev/null || npm install --production\nCOPY oad.yaml .env* ./\nCOPY src/ ./src/\nCOPY prompts/ ./prompts/ 2>/dev/null || true\nEXPOSE 3000\nCMD ["npx", "opc", "run"]\n`);
314
+ fs.writeFileSync(path.join(dir, 'docker-compose.yml'), `version: '3.8'\nservices:\n agent:\n build: .\n ports:\n - "3000:3000"\n env_file:\n - .env\n volumes:\n - ./oad.yaml:/app/oad.yaml:ro\n restart: unless-stopped\n`);
315
315
  console.log(`\n${icon.success} Created agent project: ${color.bold(name + '/')} from role ${color.cyan(matched.category + '/' + matched.role)}`);
316
- console.log(` ${icon.file} agent.yaml - Agent definition with role system prompt`);
316
+ console.log(` ${icon.file} oad.yaml - Agent definition with role system prompt`);
317
317
  console.log(` ${icon.file} SOUL.md - Role personality (${systemPromptContent.split('\n').length} lines)`);
318
318
  console.log(` ${icon.file} CONTEXT.md - Role context & documentation`);
319
319
  console.log(` ${icon.file} brain-seeds/ - 3-tier brain seed knowledge`);
@@ -344,7 +344,7 @@ export class EchoSkill extends BaseSkill {
344
344
  }
345
345
  }
346
346
  catch {
347
- // Hub unreachable fall back to bundled templates
347
+ // Hub unreachable - fall back to bundled templates
348
348
  }
349
349
  let template;
350
350
  let selectedHubTemplate;
@@ -364,6 +364,156 @@ export class EchoSkill extends BaseSkill {
364
364
  else {
365
365
  template = await select('Select a template:', Object.entries(TEMPLATES).map(([value, { label }]) => ({ value, label })));
366
366
  }
367
+ // ── 硬件检测 + 智能模型推荐 ──
368
+ // ── 硬件检测 + 远程模型推荐 ──
369
+ const sys = (0, model_recommender_1.detectSystem)();
370
+ const allModels = await (0, model_recommender_1.fetchModelList)();
371
+ // ── LLM Provider 选择(Ollama-first)──
372
+ let llmProvider = 'ollama';
373
+ let llmModel = 'qwen2.5';
374
+ let llmBaseUrl = 'http://localhost:11434/v1';
375
+ let llmApiKey = '';
376
+ let ollamaRunning = false;
377
+ let modelNames = [];
378
+ // 无论 --yes 还是交互式,都先检测 Ollama
379
+ try {
380
+ const controller = new AbortController();
381
+ const ollamaTimeout = setTimeout(() => controller.abort(), 3000);
382
+ const ollamaRes = await fetch('http://localhost:11434/api/tags', { signal: controller.signal });
383
+ clearTimeout(ollamaTimeout);
384
+ const ollamaData = await ollamaRes.json();
385
+ modelNames = (ollamaData.models || []).map((m) => m.name || m.model);
386
+ ollamaRunning = true;
387
+ if (opts.yes && modelNames.length > 0) {
388
+ const rec = (0, model_recommender_1.recommendModels)(allModels, sys, modelNames);
389
+ // --yes: prefer best installed recommended model
390
+ const bestInstalled = rec.installed.length > 0 ? rec.installed[rec.installed.length - 1] : null;
391
+ // Filter out embedding-only models (can't chat)
392
+ const chatModels = modelNames.filter(m => !m.includes('embed'));
393
+ llmModel = bestInstalled ? bestInstalled.name : (chatModels[0] || 'qwen2.5:7b');
394
+ }
395
+ }
396
+ catch {
397
+ ollamaRunning = false;
398
+ }
399
+ // Compute recommendation (used by both interactive branches)
400
+ const rec = (0, model_recommender_1.recommendModels)(allModels, sys, modelNames);
401
+ if (!opts.yes) {
402
+ if (ollamaRunning) {
403
+ console.log(`\n ${icon.info} ${color.dim('正在检测 Ollama...')}`);
404
+ console.log(` ${icon.success} Ollama 已运行,发现 ${modelNames.length} 个模型`);
405
+ console.log(` ${icon.info} 系统: ${sys.totalRAM}GB RAM (${sys.freeRAM}GB 可用), ${sys.cpuCount} CPU cores`);
406
+ console.log(` ${icon.info} 推荐模型: ${color.cyan(rec.best.name)} (${rec.best.size}) - ${rec.best.desc}`);
407
+ // 选择 provider
408
+ llmProvider = await select('选择 LLM 引擎:', [
409
+ { value: 'ollama', label: '🟢 Ollama (免费本地,推荐) - 已检测到运行中' },
410
+ { value: 'deepseek', label: '🔵 DeepSeek - 高性价比国产模型' },
411
+ { value: 'openai', label: '⚪ OpenAI (GPT-4o)' },
412
+ { value: 'anthropic', label: '🟣 Anthropic (Claude)' },
413
+ { value: 'custom', label: '⚙️ 自定义 (手动输入 Base URL)' },
414
+ ]);
415
+ if (llmProvider === 'ollama') {
416
+ // 已有模型 + 推荐未下载的模型
417
+ const modelOptions = [
418
+ ...rec.installed.map((m) => {
419
+ const isBest = m.name === rec.best.name ? ' ⭐推荐' : '';
420
+ return { value: m.name, label: `${m.name} (${m.size}, ${m.desc})${isBest} [已安装]` };
421
+ }),
422
+ // Also show installed models not in recommendation list
423
+ ...modelNames.filter(n => !rec.installed.find(m => m.name === n)).map(n => ({ value: n, label: `${n} [已安装]` })),
424
+ ...rec.toDownload.map((m) => ({
425
+ value: `pull:${m.name}`,
426
+ label: `${m.name} (${m.size}, ${m.desc}) [需下载]`,
427
+ })),
428
+ ];
429
+ if (modelOptions.length > 0) {
430
+ const chosen = await select('选择 Ollama 模型:', modelOptions);
431
+ if (chosen.startsWith('pull:')) {
432
+ const pullModel = chosen.slice(5);
433
+ console.log(`\n ${icon.info} 正在下载 ${color.cyan(pullModel)}...`);
434
+ console.log(` 运行 ${color.cyan(`ollama pull ${pullModel}`)} 下载`);
435
+ console.log(` 下载完成后运行 ${color.cyan('opc run')} 启动\n`);
436
+ llmModel = pullModel;
437
+ }
438
+ else {
439
+ llmModel = chosen;
440
+ }
441
+ }
442
+ else {
443
+ // 没有本地模型,推荐下载
444
+ console.log(` ${color.yellow('⚠️')} 没有发现已下载的模型`);
445
+ console.log(` ${icon.info} 根据你的硬件 (${sys.freeRAM}GB 可用),推荐下载:`);
446
+ for (const m of rec.suitable.slice(-3)) {
447
+ console.log(` ${color.cyan(`ollama pull ${m.name}`)} (${m.size}, ${m.desc})`);
448
+ }
449
+ llmModel = rec.best.name;
450
+ }
451
+ }
452
+ }
453
+ else {
454
+ // Ollama not running
455
+ console.log(`\n ${icon.info} ${color.dim('正在检测 Ollama...')}`);
456
+ console.log(` ${color.yellow('⚠️')} Ollama 未运行或未安装`);
457
+ llmProvider = await select('选择 LLM 引擎:', [
458
+ { value: 'ollama', label: '🟢 Ollama (免费本地,推荐) - 需先安装: https://ollama.ai' },
459
+ { value: 'deepseek', label: '🔵 DeepSeek - 高性价比国产模型' },
460
+ { value: 'openai', label: '⚪ OpenAI (GPT-4o)' },
461
+ { value: 'anthropic', label: '🟣 Anthropic (Claude)' },
462
+ { value: 'custom', label: '⚙️ 自定义 (手动输入 Base URL)' },
463
+ ]);
464
+ if (llmProvider === 'ollama') {
465
+ console.log(`\n ${icon.info} Ollama 安装指南:`);
466
+ console.log(` 1. 访问 ${color.cyan('https://ollama.ai')} 下载并安装`);
467
+ console.log(` ${icon.info} 根据你的硬件 (${sys.totalRAM}GB RAM, ${sys.freeRAM}GB 可用),推荐:`);
468
+ for (const m of rec.suitable.slice(-3)) {
469
+ console.log(` ${color.cyan(`ollama pull ${m.name}`)} (${m.size}, ${m.desc})`);
470
+ }
471
+ console.log(` 3. 然后 ${color.cyan('opc run')} 即可开始对话\n`);
472
+ llmModel = rec.best.name;
473
+ }
474
+ }
475
+ // 商业模型需要 API key
476
+ if (llmProvider === 'deepseek') {
477
+ llmBaseUrl = 'https://api.deepseek.com/v1';
478
+ llmModel = 'deepseek-chat';
479
+ llmApiKey = await promptUser('输入 DeepSeek API Key (可稍后在 .env 中配置,直接回车跳过)');
480
+ if (!llmApiKey) {
481
+ console.log(` ${icon.info} 稍后在 ${color.cyan('.env')} 文件中设置 ${color.bold('OPC_LLM_API_KEY')}`);
482
+ }
483
+ }
484
+ else if (llmProvider === 'openai') {
485
+ llmBaseUrl = 'https://api.openai.com/v1';
486
+ llmModel = 'gpt-4o-mini';
487
+ llmApiKey = await promptUser('输入 OpenAI API Key (可稍后在 .env 中配置,直接回车跳过)');
488
+ if (!llmApiKey) {
489
+ console.log(` ${icon.info} 稍后在 ${color.cyan('.env')} 文件中设置 ${color.bold('OPC_LLM_API_KEY')}`);
490
+ }
491
+ }
492
+ else if (llmProvider === 'anthropic') {
493
+ llmBaseUrl = 'https://api.anthropic.com/v1';
494
+ llmModel = 'claude-sonnet-4-20250514';
495
+ llmApiKey = await promptUser('输入 Anthropic API Key (可稍后在 .env 中配置,直接回车跳过)');
496
+ if (!llmApiKey) {
497
+ console.log(` ${icon.info} 稍后在 ${color.cyan('.env')} 文件中设置 ${color.bold('OPC_LLM_API_KEY')}`);
498
+ }
499
+ }
500
+ else if (llmProvider === 'custom') {
501
+ llmBaseUrl = await promptUser('输入 Base URL', 'http://localhost:11434/v1');
502
+ llmModel = await promptUser('输入模型名称', 'qwen2.5');
503
+ llmApiKey = await promptUser('输入 API Key (可选,直接回车跳过)');
504
+ // 尝试推断 provider
505
+ if (llmBaseUrl.includes('deepseek.com'))
506
+ llmProvider = 'deepseek';
507
+ else if (llmBaseUrl.includes('openai.com'))
508
+ llmProvider = 'openai';
509
+ else if (llmBaseUrl.includes('anthropic.com'))
510
+ llmProvider = 'anthropic';
511
+ else if (llmBaseUrl.includes('localhost:11434'))
512
+ llmProvider = 'ollama';
513
+ else
514
+ llmProvider = 'openai'; // OpenAI-compatible fallback
515
+ }
516
+ }
367
517
  const dir = path.resolve(name);
368
518
  if (fs.existsSync(dir)) {
369
519
  console.error(`\n${icon.error} Directory ${color.bold(name)} already exists.`);
@@ -374,37 +524,16 @@ export class EchoSkill extends BaseSkill {
374
524
  const factory = TEMPLATES[template]?.factory ?? customer_service_1.createCustomerServiceConfig;
375
525
  const config = factory();
376
526
  config.metadata.name = name;
527
+ // 用用户选择的 provider 和 model 覆盖模板默认值
528
+ config.spec.model = llmModel;
529
+ config.spec.provider = { default: llmProvider };
377
530
  // Ensure web channel exists
378
531
  if (!config.spec.channels.some((c) => c.type === 'web')) {
379
532
  config.spec.channels.push({ type: 'web', port: 3000 });
380
533
  }
534
+ // 只生成 oad.yaml,不生成 agent.yaml
381
535
  fs.writeFileSync(path.join(dir, 'oad.yaml'), yaml.dump(config, { lineWidth: 120 }));
382
- // agent.yaml standalone OAD config for runtime usage
383
- fs.writeFileSync(path.join(dir, 'agent.yaml'), `apiVersion: opc/v1
384
- kind: Agent
385
- metadata:
386
- name: ${name}
387
- version: 1.0.0
388
- description: My AI Agent
389
- spec:
390
- model: qwen2.5
391
- provider:
392
- default: ollama
393
- systemPrompt: |
394
- You are a helpful AI assistant named ${name}.
395
- Be concise, helpful, and friendly.
396
- channels:
397
- - type: web
398
- port: 3000
399
- memory:
400
- shortTerm: true
401
- longTerm:
402
- provider: deepbrain
403
- skills:
404
- - name: echo
405
- description: Echo test skill
406
- `);
407
- // src/index.ts — entry point
536
+ // src/index.ts - entry point
408
537
  fs.writeFileSync(path.join(dir, 'src', 'index.ts'), `import { AgentRuntime } from 'opc-agent';
409
538
  import { EchoSkill } from './skills/echo';
410
539
  import { readFileSync, existsSync } from 'fs';
@@ -413,7 +542,7 @@ async function main() {
413
542
  const runtime = new AgentRuntime();
414
543
 
415
544
  // Load OAD config
416
- const config = await runtime.loadConfig('./agent.yaml');
545
+ const config = await runtime.loadConfig('./oad.yaml');
417
546
 
418
547
  // Load personality and context files
419
548
  const soul = existsSync('./SOUL.md') ? readFileSync('./SOUL.md', 'utf-8') : '';
@@ -439,7 +568,7 @@ async function main() {
439
568
 
440
569
  main().catch(console.error);
441
570
  `);
442
- // src/skills/echo.ts example skill
571
+ // src/skills/echo.ts - example skill
443
572
  fs.writeFileSync(path.join(dir, 'src', 'skills', 'echo.ts'), `import { BaseSkill } from 'opc-agent';
444
573
  import type { AgentContext, Message, SkillResult } from 'opc-agent';
445
574
 
@@ -477,23 +606,39 @@ export class EchoSkill extends BaseSkill {
477
606
  }, null, 2));
478
607
  // .env.example
479
608
  fs.writeFileSync(path.join(dir, '.env.example'), `# LLM API Configuration
480
- OPC_LLM_API_KEY=your-api-key-here
481
- OPC_LLM_BASE_URL=https://api.openai.com/v1
482
- OPC_LLM_MODEL=gpt-4o-mini
609
+ # Ollama (免费本地,默认):
610
+ # OPC_LLM_BASE_URL=http://localhost:11434/v1
611
+ # OPC_LLM_MODEL=qwen2.5
612
+ # (Ollama 无需 API key)
483
613
 
484
- # For DeepSeek:
614
+ # DeepSeek:
615
+ # OPC_LLM_API_KEY=your-deepseek-key
485
616
  # OPC_LLM_BASE_URL=https://api.deepseek.com/v1
486
617
  # OPC_LLM_MODEL=deepseek-chat
487
618
 
488
- # For local Ollama (default in agent.yaml):
489
- # OPC_LLM_BASE_URL=http://localhost:11434/v1
490
- # OPC_LLM_MODEL=qwen2.5
491
- `);
492
- // .env (copy of example)
493
- fs.writeFileSync(path.join(dir, '.env'), `OPC_LLM_API_KEY=your-api-key-here
494
- OPC_LLM_BASE_URL=https://api.openai.com/v1
495
- OPC_LLM_MODEL=gpt-4o-mini
619
+ # OpenAI:
620
+ # OPC_LLM_API_KEY=your-openai-key
621
+ # OPC_LLM_BASE_URL=https://api.openai.com/v1
622
+ # OPC_LLM_MODEL=gpt-4o-mini
623
+
624
+ # Anthropic:
625
+ # OPC_LLM_API_KEY=your-anthropic-key
626
+ # OPC_LLM_BASE_URL=https://api.anthropic.com/v1
627
+ # OPC_LLM_MODEL=claude-sonnet-4-20250514
496
628
  `);
629
+ // .env - 根据用户选择生成正确的配置
630
+ const envLines = [];
631
+ if (llmProvider === 'ollama') {
632
+ envLines.push('# Ollama (免费本地) - 无需 API key');
633
+ envLines.push(`OPC_LLM_BASE_URL=${llmBaseUrl}`);
634
+ envLines.push(`OPC_LLM_MODEL=${llmModel}`);
635
+ }
636
+ else {
637
+ envLines.push(`OPC_LLM_API_KEY=${llmApiKey || 'your-api-key-here'}`);
638
+ envLines.push(`OPC_LLM_BASE_URL=${llmBaseUrl}`);
639
+ envLines.push(`OPC_LLM_MODEL=${llmModel}`);
640
+ }
641
+ fs.writeFileSync(path.join(dir, '.env'), envLines.join('\n') + '\n');
497
642
  // package.json
498
643
  fs.writeFileSync(path.join(dir, 'package.json'), JSON.stringify({
499
644
  name,
@@ -520,7 +665,7 @@ OPC_LLM_MODEL=gpt-4o-mini
520
665
  WORKDIR /app
521
666
  COPY package.json package-lock.json* ./
522
667
  RUN npm ci --production 2>/dev/null || npm install --production
523
- COPY oad.yaml agent.yaml .env* ./
668
+ COPY oad.yaml .env* ./
524
669
  COPY src/ ./src/
525
670
  COPY prompts/ ./prompts/ 2>/dev/null || true
526
671
  EXPOSE 3000
@@ -536,7 +681,7 @@ services:
536
681
  env_file:
537
682
  - .env
538
683
  volumes:
539
- - ./agent.yaml:/app/agent.yaml:ro
684
+ - ./oad.yaml:/app/oad.yaml:ro
540
685
  restart: unless-stopped
541
686
  `);
542
687
  // README.md
@@ -577,8 +722,7 @@ npx opc chat # CLI chat
577
722
 
578
723
  \`\`\`
579
724
  ${name}/
580
- ├── agent.yaml # OAD agent config (used by src/index.ts)
581
- ├── oad.yaml # OAD config (used by opc CLI)
725
+ ├── oad.yaml # Agent 配置 (唯一配置文件)
582
726
  ├── src/
583
727
  │ ├── index.ts # Entry point
584
728
  │ └── skills/
@@ -589,9 +733,9 @@ ${name}/
589
733
 
590
734
  ## Configuration
591
735
 
592
- Edit \`agent.yaml\` to customize your agent's personality, skills, and behavior.
736
+ Edit \`oad.yaml\` to customize your agent's personality, skills, and behavior.
593
737
  `);
594
- // SOUL.md agent personality
738
+ // SOUL.md - agent personality
595
739
  const createdDate = new Date().toISOString().split('T')[0];
596
740
  fs.writeFileSync(path.join(dir, 'SOUL.md'), `# ${name} Personality
597
741
 
@@ -607,7 +751,7 @@ Edit \`agent.yaml\` to customize your agent's personality, skills, and behavior.
607
751
 
608
752
  ## Communication Style
609
753
  - Use clear, simple language
610
- - Be direct answer the question first, then explain
754
+ - Be direct - answer the question first, then explain
611
755
  - Use markdown formatting when helpful
612
756
 
613
757
  ## Rules
@@ -615,7 +759,7 @@ Edit \`agent.yaml\` to customize your agent's personality, skills, and behavior.
615
759
  - Ask for clarification when the request is ambiguous
616
760
  - Never make up information
617
761
  `);
618
- // CONTEXT.md project context
762
+ // CONTEXT.md - project context
619
763
  fs.writeFileSync(path.join(dir, 'CONTEXT.md'), `# Project Context
620
764
 
621
765
  ## About This Agent
@@ -631,7 +775,8 @@ on startup to understand the project context.
631
775
  - Add company policies here
632
776
  `);
633
777
  console.log(`\n${icon.success} Created agent project: ${color.bold(name + '/')}`);
634
- console.log(` ${icon.file} agent.yaml - Agent definition (OAD)`);
778
+ console.log(` ${icon.file} oad.yaml - Agent 配置 (${llmProvider}/${llmModel})`);
779
+ console.log(` ${icon.file} .env - 环境变量${llmProvider === 'ollama' ? '' : ' (API Key)'}`);
635
780
  console.log(` ${icon.file} src/index.ts - Entry point`);
636
781
  console.log(` ${icon.file} src/skills/echo.ts - Example skill`);
637
782
  console.log(` ${icon.file} SOUL.md - Agent personality`);
@@ -656,178 +801,37 @@ on startup to understand the project context.
656
801
  }
657
802
  }
658
803
  catch {
659
- // Brain-seed download failed non-fatal, project still usable
804
+ // Brain-seed download failed - non-fatal, project still usable
660
805
  }
661
806
  }
662
807
  console.log(`\n${color.bold('Next steps:')}`);
663
808
  console.log(` 1. cd ${name}`);
664
809
  console.log(` 2. npm install`);
665
- console.log(` 3. npx tsx src/index.ts ${color.dim('# or: npx opc run')}`);
666
- console.log(` 4. Open http://localhost:3000\n`);
810
+ if (llmProvider === 'ollama' && !ollamaRunning) {
811
+ console.log(` 3. ollama pull ${llmModel} ${color.dim('# 下载模型')}`);
812
+ console.log(` 4. npx opc run ${color.dim('# 启动 Agent')}`);
813
+ }
814
+ else if (llmProvider !== 'ollama' && !llmApiKey) {
815
+ console.log(` 3. 编辑 .env 设置 OPC_LLM_API_KEY`);
816
+ console.log(` 4. npx opc run`);
817
+ }
818
+ else {
819
+ console.log(` 3. npx opc run ${color.dim('# 启动 Agent')}`);
820
+ }
821
+ console.log(` Open http://localhost:3000\n`);
667
822
  console.log(`${color.dim('💡 Tip: Use --role to start from a workstation template:')}`);
668
823
  console.log(`${color.dim(' opc init my-agent --role customer-service')}`);
669
824
  console.log(`${color.dim(' opc init --list-roles (see all roles)')}\n`);
670
825
  });
671
826
  // ── Chat command ─────────────────────────────────────────────
827
+ const chat_1 = require("./cli/chat");
672
828
  program
673
829
  .command('chat')
674
- .description('Interactive CLI chat with the agent')
830
+ .description('Interactive TUI chat with the agent')
675
831
  .option('-f, --file <file>', 'OAD file', 'oad.yaml')
676
832
  .action(async (opts) => {
677
- // Load .env if present
678
833
  loadDotEnv();
679
- let systemPrompt = 'You are a helpful AI agent.';
680
- let model;
681
- let agentName = 'Agent';
682
- let agentVersion = '1.0.0';
683
- let providerName = 'auto';
684
- let skillNames = [];
685
- // Try loading SOUL.md and CONTEXT.md for enriched system prompt
686
- const soulPath = path.resolve('SOUL.md');
687
- const contextPath = path.resolve('CONTEXT.md');
688
- const soulContent = fs.existsSync(soulPath) ? fs.readFileSync(soulPath, 'utf-8') : '';
689
- const contextContent = fs.existsSync(contextPath) ? fs.readFileSync(contextPath, 'utf-8') : '';
690
- try {
691
- const raw = fs.readFileSync(opts.file, 'utf-8');
692
- const config = yaml.load(raw);
693
- if (config?.spec?.systemPrompt)
694
- systemPrompt = config.spec.systemPrompt;
695
- if (config?.spec?.model)
696
- model = config.spec.model;
697
- if (config?.metadata?.name)
698
- agentName = config.metadata.name;
699
- if (config?.metadata?.version)
700
- agentVersion = config.metadata.version;
701
- if (config?.spec?.provider?.default)
702
- providerName = config.spec.provider.default;
703
- if (config?.spec?.skills)
704
- skillNames = config.spec.skills.map((s) => s.name);
705
- }
706
- catch {
707
- // No config file, use defaults
708
- }
709
- // Prepend SOUL.md and CONTEXT.md to system prompt
710
- systemPrompt = [soulContent, contextContent, systemPrompt].filter(Boolean).join('\n\n');
711
- const provider = (0, providers_1.createProvider)(providerName, model);
712
- const history = [];
713
- // Print startup banner
714
- const bannerLines = [
715
- '╔══════════════════════════════════════╗',
716
- '║ 🤖 OPC Agent — Interactive Chat ║',
717
- `║ Agent: ${(agentName + ' v' + agentVersion).padEnd(27)}║`,
718
- `║ Model: ${((providerName + '/' + (model ?? 'default')).slice(0, 27)).padEnd(27)}║`,
719
- `║ Skills: ${(String(skillNames.length) + ' loaded').padEnd(26)}║`,
720
- '║ Type /help for commands ║',
721
- '╚══════════════════════════════════════╝',
722
- ];
723
- console.log('\n' + color.cyan(bannerLines.join('\n')) + '\n');
724
- if (soulContent)
725
- console.log(` ${icon.info} Loaded SOUL.md`);
726
- if (contextContent)
727
- console.log(` ${icon.info} Loaded CONTEXT.md`);
728
- if (soulContent || contextContent)
729
- console.log();
730
- const rl = readline.createInterface({
731
- input: process.stdin,
732
- output: process.stdout,
733
- historySize: 100,
734
- });
735
- const handleSlashCommand = (cmd) => {
736
- const lower = cmd.toLowerCase().trim();
737
- if (lower === '/quit' || lower === '/exit') {
738
- console.log(`\n${color.dim('Goodbye! 👋')}`);
739
- process.exit(0);
740
- }
741
- if (lower === '/help') {
742
- console.log(`\n ${color.bold('Available commands:')}`);
743
- console.log(` ${color.cyan('/help')} — Show this help`);
744
- console.log(` ${color.cyan('/quit')} — Exit chat (/exit also works)`);
745
- console.log(` ${color.cyan('/clear')} — Clear conversation history`);
746
- console.log(` ${color.cyan('/skills')} — List registered skills`);
747
- console.log(` ${color.cyan('/memory')} — Show memory stats`);
748
- console.log(` ${color.cyan('/info')} — Show agent info\n`);
749
- return true;
750
- }
751
- if (lower === '/clear') {
752
- history.length = 0;
753
- console.log(`\n ${icon.success} Conversation history cleared.\n`);
754
- return true;
755
- }
756
- if (lower === '/skills') {
757
- if (skillNames.length === 0) {
758
- console.log(`\n ${icon.info} No skills registered.\n`);
759
- }
760
- else {
761
- console.log(`\n ${color.bold('Registered skills:')}`);
762
- skillNames.forEach((s) => console.log(` • ${color.cyan(s)}`));
763
- console.log();
764
- }
765
- return true;
766
- }
767
- if (lower === '/memory') {
768
- console.log(`\n ${color.bold('Memory stats:')}`);
769
- console.log(` Messages in history: ${color.cyan(String(history.length))}`);
770
- console.log(` Characters: ${color.cyan(String(history.reduce((a, m) => a + m.content.length, 0)))}\n`);
771
- return true;
772
- }
773
- if (lower === '/info') {
774
- console.log(`\n ${color.bold('Agent Info:')}`);
775
- console.log(` Name: ${color.cyan(agentName)}`);
776
- console.log(` Version: ${color.cyan(agentVersion)}`);
777
- console.log(` Provider: ${color.cyan(providerName)}`);
778
- console.log(` Model: ${color.cyan(model ?? 'default')}`);
779
- console.log(` Skills: ${color.cyan(String(skillNames.length))}\n`);
780
- return true;
781
- }
782
- return false;
783
- };
784
- const ask = () => {
785
- rl.question(color.cyan('You: '), async (input) => {
786
- const text = input.trim();
787
- if (!text) {
788
- ask();
789
- return;
790
- }
791
- // Handle slash commands
792
- if (text.startsWith('/') && handleSlashCommand(text)) {
793
- ask();
794
- return;
795
- }
796
- history.push({ role: 'user', content: text });
797
- // Build messages for provider
798
- const messages = history.map((m) => ({
799
- id: 'x',
800
- role: m.role,
801
- content: m.content,
802
- timestamp: Date.now(),
803
- }));
804
- process.stdout.write(color.green('Agent: '));
805
- let full = '';
806
- try {
807
- for await (const chunk of provider.chatStream(messages, systemPrompt)) {
808
- process.stdout.write(chunk);
809
- full += chunk;
810
- }
811
- }
812
- catch (err) {
813
- const msg = err instanceof Error ? err.message : String(err);
814
- process.stdout.write(color.red(`\n[Error: ${msg}]`));
815
- full = `[Error: ${msg}]`;
816
- }
817
- console.log('\n');
818
- history.push({ role: 'assistant', content: full });
819
- // Trim history if too long (keep last 40 messages)
820
- if (history.length > 40) {
821
- history.splice(0, history.length - 40);
822
- }
823
- ask();
824
- });
825
- };
826
- rl.on('close', () => {
827
- console.log(`\n${color.dim('Goodbye! 👋')}`);
828
- process.exit(0);
829
- });
830
- ask();
834
+ await (0, chat_1.runChat)({ file: opts.file });
831
835
  });
832
836
  // ── Run command ──────────────────────────────────────────────
833
837
  program
@@ -858,6 +862,8 @@ program
858
862
  console.log(` ${color.dim('Studio:')} ${studioUrl}`);
859
863
  console.log(` ${color.dim('API:')} POST http://localhost:3000/api/chat`);
860
864
  console.log(`\n ${color.dim('Press Ctrl+C to stop.')}\n`);
865
+ // Keep the process alive — HTTP server refs may not suffice with Commander
866
+ await new Promise(() => { });
861
867
  });
862
868
  // ── Serve command (OpenAI-compatible API) ────────────────────
863
869
  program
@@ -894,6 +900,8 @@ program
894
900
  console.log(` GET /health`);
895
901
  console.log(` GET /v1/agent/status`);
896
902
  console.log(`\n ${color.dim('Press Ctrl+C to stop.')}\n`);
903
+ // Keep the process alive
904
+ await new Promise(() => { });
897
905
  });
898
906
  // ── Info command ─────────────────────────────────────────────
899
907
  program
@@ -1460,7 +1468,7 @@ protocolCmd.command('list')
1460
1468
  const protocols = config?.spec?.protocols || {};
1461
1469
  const items = [
1462
1470
  { name: 'a2a', description: 'Agent-to-Agent protocol', enabled: !!protocols.a2a?.enabled, detail: protocols.a2a?.port ? `port ${protocols.a2a.port}` : '' },
1463
- { name: 'agui', description: 'AG-UI Agent-User Interaction (SSE)', enabled: !!protocols.agui?.enabled, detail: protocols.agui?.path || '/agui' },
1471
+ { name: 'agui', description: 'AG-UI - Agent-User Interaction (SSE)', enabled: !!protocols.agui?.enabled, detail: protocols.agui?.path || '/agui' },
1464
1472
  ];
1465
1473
  console.log(`\n${icon.gear} ${color.bold('Protocols')}\n`);
1466
1474
  for (const p of items) {
@@ -1590,43 +1598,29 @@ const brainCmd = program
1590
1598
  .description('Manage agent brain (memory, seeds, evolve)');
1591
1599
  brainCmd
1592
1600
  .command('status')
1593
- .description('Show brain stats (pages, tiers, last evolve)')
1594
- .option('--url <url>', 'DeepBrain server URL', 'http://localhost:3333')
1595
- .action(async (opts) => {
1596
- console.log(`\n${icon.gear} ${color.bold('DeepBrain Status')} — ${color.dim(opts.url)}\n`);
1601
+ .description('Show brain stats (knowledge tiers, evolve history)')
1602
+ .action(async () => {
1603
+ console.log(`\n${icon.gear} ${color.bold('Knowledge Brain Status')}\n`);
1597
1604
  try {
1598
- const res = await fetch(`${opts.url}/api/stats`);
1599
- if (!res.ok)
1600
- throw new Error(`HTTP ${res.status} ${res.statusText}`);
1601
- const stats = (await res.json());
1602
- const rows = [
1603
- ['Total Pages', String(stats.totalPages ?? stats.pages ?? '-')],
1604
- ['Total Chunks', String(stats.totalChunks ?? stats.chunks ?? '-')],
1605
- ['Memory Tiers', String(stats.memoryTiers ?? stats.tiers ?? '-')],
1606
- ['Index Size', stats.indexSize ?? '-'],
1607
- ['Last Updated', stats.lastUpdated ?? stats.updatedAt ?? '-'],
1608
- ];
1609
- const maxKey = Math.max(...rows.map(([k]) => k.length));
1610
- for (const [key, val] of rows) {
1611
- console.log(` ${color.cyan(key.padEnd(maxKey))} ${val}`);
1612
- }
1605
+ const { KnowledgeEvolveEngine } = require('./memory/evolve-engine');
1606
+ const engine = new KnowledgeEvolveEngine(process.cwd());
1607
+ const model = await engine.detectLocalModel();
1608
+ const stats = engine.getStats();
1609
+ console.log(` ${color.cyan('Evolve Model'.padEnd(16))} ${model} (local, free)`);
1610
+ console.log(` ${color.cyan('Workstation'.padEnd(16))} ${stats.workstation} pages`);
1611
+ console.log(` ${color.cyan('Job'.padEnd(16))} ${stats.job} pages`);
1612
+ console.log(` ${color.cyan('Industry'.padEnd(16))} ${stats.industry} pages`);
1613
+ console.log(` ${color.cyan('Last Evolve'.padEnd(16))} ${stats.lastEvolve || 'never'}`);
1613
1614
  console.log();
1614
1615
  }
1615
- catch (err) {
1616
- const msg = err instanceof Error ? err.message : String(err);
1617
- if (msg.includes('ECONNREFUSED') || msg.includes('fetch failed')) {
1618
- console.log(` ${icon.warn} Cannot connect to DeepBrain at ${opts.url}`);
1619
- console.log(` ${color.dim('Is the server running? Start with: deepbrain serve')}\n`);
1620
- }
1621
- else {
1622
- console.error(` ${icon.error} ${msg}\n`);
1623
- }
1616
+ catch (e) {
1617
+ console.log(` ${icon.warn} ${e.message}\n`);
1624
1618
  }
1625
1619
  });
1626
1620
  brainCmd
1627
1621
  .command('seed')
1628
1622
  .description('Import brain seed files into memory')
1629
- .option('-f, --file <file>', 'OAD file', 'agent.yaml')
1623
+ .option('-f, --file <file>', 'OAD file', 'oad.yaml')
1630
1624
  .option('--status', 'Check if seeds have been imported')
1631
1625
  .option('--reset', 'Re-import seeds (clear marker and re-seed)')
1632
1626
  .action(async (opts) => {
@@ -1679,29 +1673,32 @@ brainCmd
1679
1673
  });
1680
1674
  brainCmd
1681
1675
  .command('evolve')
1682
- .description('Trigger manual knowledge evolution cycle')
1683
- .option('--dry-run', 'Show what would be promoted without doing it')
1676
+ .description('Trigger manual knowledge evolution cycle (uses local Ollama model)')
1677
+ .option('--dry-run', 'Show stats without evolving')
1684
1678
  .action(async (opts) => {
1685
- const { KnowledgeEvolver } = require('./memory/seed-loader');
1686
- const evolver = new KnowledgeEvolver();
1687
- console.log(`\n${icon.gear} ${color.bold('Knowledge Evolution')}\n`);
1688
- console.log(` ${icon.info} Checking for promotion candidates...`);
1689
- // Would connect to real brain in production
1690
- const result = await evolver.checkPromotion(null);
1691
- if (result.candidates.length === 0) {
1692
- console.log(` ${icon.info} No knowledge ready for promotion yet.\n`);
1679
+ const { KnowledgeEvolveEngine } = require('./memory/evolve-engine');
1680
+ const engine = new KnowledgeEvolveEngine(process.cwd());
1681
+ console.log(`\n${icon.gear} ${color.bold('Knowledge Evolution')} ${color.dim('(local Ollama model, zero cost)')}\n`);
1682
+ const model = await engine.detectLocalModel();
1683
+ console.log(` Model: ${color.cyan(model)}`);
1684
+ const stats = engine.getStats();
1685
+ console.log(` Knowledge pages: workstation=${stats.workstation} job=${stats.job} industry=${stats.industry}`);
1686
+ if (stats.lastEvolve)
1687
+ console.log(` Last evolve: ${stats.lastEvolve}`);
1688
+ if (opts.dryRun) {
1689
+ console.log(`\n ${icon.info} Dry run — no changes.\n`);
1690
+ return;
1693
1691
  }
1694
- else {
1695
- for (const c of result.candidates) {
1696
- console.log(` ${color.cyan(c.slug)} ${c.fromTier} ${c.toTier} (confidence: ${(c.confidence * 100).toFixed(0)}%)`);
1697
- }
1698
- if (opts.dryRun) {
1699
- console.log(`\n ${icon.info} Dry run — no changes made.\n`);
1700
- }
1701
- else {
1702
- console.log(`\n ${icon.success} Promoted ${result.promoted} knowledge entries.\n`);
1703
- }
1692
+ console.log(`\n ${icon.info} Running evolve cycle...`);
1693
+ const result = await engine.evolve();
1694
+ console.log(` ${icon.success} Extracted: ${result.extracted}, Deduplicated: ${result.deduplicated}, Promoted: ${result.promoted}`);
1695
+ if (result.compacted)
1696
+ console.log(` ${icon.success} Memory compacted (refined & written back)`);
1697
+ if (result.errors.length > 0) {
1698
+ for (const e of result.errors)
1699
+ console.log(` ${icon.warn} ${e}`);
1704
1700
  }
1701
+ console.log();
1705
1702
  });
1706
1703
  // ── Logs command ─────────────────────────────────────────────
1707
1704
  program
@@ -2145,7 +2142,7 @@ program
2145
2142
  return;
2146
2143
  }
2147
2144
  // Create a minimal mock agent for eval (real usage would load from OAD)
2148
- const oadPath = path.resolve('agent.yaml');
2145
+ const oadPath = path.resolve(fs.existsSync('oad.yaml') ? 'oad.yaml' : 'agent.yaml');
2149
2146
  let agent;
2150
2147
  if (fs.existsSync(oadPath)) {
2151
2148
  const runtime = new runtime_1.AgentRuntime();
@@ -2154,7 +2151,7 @@ program
2154
2151
  agent = runtime.agent;
2155
2152
  }
2156
2153
  if (!agent) {
2157
- console.log(`${icon.warn} No agent.yaml found running with dry-run mock agent.`);
2154
+ console.log(`${icon.warn} No oad.yaml or agent.yaml found - running with dry-run mock agent.`);
2158
2155
  agent = { chat: async (input) => `[mock response to: ${input}]` };
2159
2156
  }
2160
2157
  const evaluator = new eval_1.AgentEvaluator(agent);
@@ -2214,7 +2211,7 @@ guardrailsCmd
2214
2211
  console.log();
2215
2212
  const result = await manager.checkInput(message);
2216
2213
  if (result.passed) {
2217
- console.log(color.green('✓ PASSED no violations'));
2214
+ console.log(color.green('✓ PASSED - no violations'));
2218
2215
  }
2219
2216
  else {
2220
2217
  if (result.blocked)
@@ -2243,7 +2240,7 @@ program
2243
2240
  .action(async (opts) => {
2244
2241
  console.log(color.bold('🎤 Voice Conversation Mode'));
2245
2242
  console.log(` STT: ${opts.stt} | TTS: ${opts.tts} | Voice: ${opts.voice ?? 'default'} | Language: ${opts.language}`);
2246
- console.log(color.dim(' (Voice conversation requires audio input integration use as library)'));
2243
+ console.log(color.dim(' (Voice conversation requires audio input integration - use as library)'));
2247
2244
  console.log();
2248
2245
  console.log('To use voice in your agent:');
2249
2246
  console.log(color.cyan(`
@@ -2260,7 +2257,101 @@ program
2260
2257
  await voice.start();
2261
2258
  `));
2262
2259
  });
2263
- program.parse();
2260
+ // ── Models command ──────────────────────────────────────────────
2261
+ program
2262
+ .command('models')
2263
+ .description('Show recommended Ollama models for your system')
2264
+ .option('--refresh', 'Force refresh model list from remote')
2265
+ .option('--json', 'Output as JSON')
2266
+ .action(async (opts) => {
2267
+ if (opts.refresh) {
2268
+ (0, model_recommender_1.clearModelCache)();
2269
+ console.log(`${icon.success} 模型推荐缓存已清除`);
2270
+ }
2271
+ const sys = (0, model_recommender_1.detectSystem)();
2272
+ const models = await (0, model_recommender_1.fetchModelList)();
2273
+ const cache = (0, model_recommender_1.cacheInfo)();
2274
+ // Detect Ollama
2275
+ let installedModels = [];
2276
+ try {
2277
+ const ctrl = new AbortController();
2278
+ const t = setTimeout(() => ctrl.abort(), 3000);
2279
+ const res = await fetch('http://localhost:11434/api/tags', { signal: ctrl.signal });
2280
+ clearTimeout(t);
2281
+ const data = await res.json();
2282
+ installedModels = (data.models || []).map((m) => m.name || m.model);
2283
+ }
2284
+ catch { /* Ollama not running */ }
2285
+ const rec = (0, model_recommender_1.recommendModels)(models, sys, installedModels);
2286
+ if (opts.json) {
2287
+ console.log(JSON.stringify({ system: sys, cache, recommendation: rec }, null, 2));
2288
+ return;
2289
+ }
2290
+ console.log(`\n${icon.rocket} ${color.bold('OPC 模型推荐')}\n`);
2291
+ console.log(` 系统: ${sys.totalRAM}GB RAM (${sys.freeRAM}GB 可用), ${sys.cpuCount} cores, ${sys.platform}/${sys.arch}`);
2292
+ if (cache.exists) {
2293
+ console.log(` 推荐列表: v${cache.version} (${cache.age})`);
2294
+ }
2295
+ else {
2296
+ console.log(` 推荐列表: 内置 (运行 ${color.cyan('opc models --refresh')} 获取最新)`);
2297
+ }
2298
+ console.log(` Ollama: ${installedModels.length > 0 ? color.green(`运行中, ${installedModels.length} 个模型`) : color.yellow('未运行')}`);
2299
+ console.log(`\n ${color.bold('⭐ 推荐:')} ${color.cyan(rec.best.name)} (${rec.best.size}) - ${rec.best.desc}\n`);
2300
+ // Table
2301
+ console.log(` ${'模型'.padEnd(28)} ${'大小'.padEnd(8)} ${'最低RAM'.padEnd(8)} ${'状态'.padEnd(10)} 说明`);
2302
+ console.log(` ${'─'.repeat(28)} ${'─'.repeat(8)} ${'─'.repeat(8)} ${'─'.repeat(10)} ${'─'.repeat(20)}`);
2303
+ for (const m of rec.suitable) {
2304
+ const installed = installedModels.includes(m.name);
2305
+ const isBest = m.name === rec.best.name;
2306
+ const status = installed ? color.green('已安装') : color.dim('未安装');
2307
+ const star = isBest ? ' ⭐' : (m.recommended ? ' 💎' : '');
2308
+ console.log(` ${(m.name + star).padEnd(28)} ${m.size.padEnd(8)} ${(m.minRAM + 'GB').padEnd(8)} ${status.padEnd(10)} ${m.desc}`);
2309
+ }
2310
+ if (rec.toDownload.length > 0) {
2311
+ console.log(`\n ${color.bold('推荐下载:')}`);
2312
+ for (const m of rec.toDownload) {
2313
+ console.log(` ${color.cyan(`ollama pull ${m.name}`)} (${m.size}, ${m.desc})`);
2314
+ }
2315
+ }
2316
+ console.log();
2317
+ });
2318
+ // ── Search command ──────────────────────────────────────────
2319
+ program
2320
+ .command('memory-search <query>')
2321
+ .description('Search conversation history (requires SQLite memory)')
2322
+ .option('-n, --limit <n>', 'Max results', '10')
2323
+ .option('--json', 'Output as JSON')
2324
+ .action(async (query, opts) => {
2325
+ try {
2326
+ const { SQLiteStore } = await Promise.resolve().then(() => __importStar(require('./memory/sqlite-store')));
2327
+ const store = new SQLiteStore();
2328
+ const results = await store.search(query, parseInt(opts.limit, 10));
2329
+ const stats = await store.stats();
2330
+ if (opts.json) {
2331
+ console.log(JSON.stringify({ query, results, stats }, null, 2));
2332
+ await store.close();
2333
+ return;
2334
+ }
2335
+ console.log(`\n🔍 搜索: "${query}" (${results.length} 条结果, 共 ${stats.totalMessages} 条消息)\n`);
2336
+ if (results.length === 0) {
2337
+ console.log(' 未找到匹配结果。\n');
2338
+ await store.close();
2339
+ return;
2340
+ }
2341
+ for (const msg of results) {
2342
+ const time = new Date(msg.timestamp).toLocaleString();
2343
+ const role = msg.role === 'user' ? '👤' : '🤖';
2344
+ const preview = msg.content.length > 120 ? msg.content.slice(0, 120) + '...' : msg.content;
2345
+ console.log(` ${role} [${time}] ${preview}`);
2346
+ }
2347
+ console.log(`\n 💾 数据库: ${stats.dbSizeKB}KB, ${stats.sessions} 个会话\n`);
2348
+ await store.close();
2349
+ }
2350
+ catch (err) {
2351
+ console.error(`${icon.error} 搜索失败: ${err instanceof Error ? err.message : err}`);
2352
+ console.log(' 提示: 需要 sql.js 支持。运行: npm install sql.js');
2353
+ }
2354
+ });
2264
2355
  // ── Keys command ──────────────────────────────────────────────
2265
2356
  const keys_1 = require("./security/keys");
2266
2357
  const approval_1 = require("./security/approval");
@@ -2307,7 +2398,7 @@ keysCmd
2307
2398
  });
2308
2399
  // ── Approve command ───────────────────────────────────────────
2309
2400
  const approveCmd = program.command('approve').description('Manage command approvals');
2310
- // Singleton for CLI in real usage this would be loaded from daemon state
2401
+ // Singleton for CLI - in real usage this would be loaded from daemon state
2311
2402
  const approvalManager = new approval_1.ApprovalManager();
2312
2403
  approveCmd
2313
2404
  .command('list')
@@ -2439,7 +2530,7 @@ a2aCmd
2439
2530
  const { oadToAgentCard } = require('./protocols/a2a');
2440
2531
  const oad = loadOADFile();
2441
2532
  if (!oad) {
2442
- console.log(`${icon.error} No agent.yaml found`);
2533
+ console.log(`${icon.error} No oad.yaml or agent.yaml found`);
2443
2534
  return;
2444
2535
  }
2445
2536
  const card = oadToAgentCard(oad, 'http://localhost:3001');
@@ -2479,7 +2570,7 @@ a2aCmd
2479
2570
  function loadOADFile() {
2480
2571
  const fs = require('fs');
2481
2572
  const yaml = require('js-yaml');
2482
- for (const name of ['agent.yaml', 'agent.yml']) {
2573
+ for (const name of ['oad.yaml', 'agent.yaml', 'agent.yml']) {
2483
2574
  if (fs.existsSync(name)) {
2484
2575
  return yaml.load(fs.readFileSync(name, 'utf-8'));
2485
2576
  }
@@ -2487,7 +2578,7 @@ function loadOADFile() {
2487
2578
  return null;
2488
2579
  }
2489
2580
  // ── MCP Server Commands ────────────────────────────────────
2490
- const mcpCmd = program.command('mcp').description('MCP server commands expose agent as MCP tools');
2581
+ const mcpCmd = program.command('mcp').description('MCP server commands - expose agent as MCP tools');
2491
2582
  mcpCmd
2492
2583
  .command('serve')
2493
2584
  .option('--http <port>', 'Start HTTP+SSE mode on given port')
@@ -2516,7 +2607,7 @@ mcpCmd
2516
2607
  console.log(`${icon.info} Tools: ${server.getToolCount()}`);
2517
2608
  }
2518
2609
  else {
2519
- console.error(`${icon.success} MCP server (stdio) started ${server.getToolCount()} tools`);
2610
+ console.error(`${icon.success} MCP server (stdio) started - ${server.getToolCount()} tools`);
2520
2611
  await server.serveStdio();
2521
2612
  }
2522
2613
  });
@@ -2565,8 +2656,10 @@ mcpCmd
2565
2656
  console.log(`${icon.info} Tools: ${server.getToolCount()}`);
2566
2657
  }
2567
2658
  else {
2568
- console.error(`${icon.success} MCP server ${color.cyan(name)} (stdio) ${server.getToolCount()} tools`);
2659
+ console.error(`${icon.success} MCP server ${color.cyan(name)} (stdio) - ${server.getToolCount()} tools`);
2569
2660
  await server.serveStdio();
2570
2661
  }
2571
2662
  });
2663
+ // ── Parse CLI ────────────────────────────────────────────────
2664
+ program.parse();
2572
2665
  //# sourceMappingURL=cli.js.map