opc-agent 4.1.1 → 4.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/cli.ts CHANGED
@@ -103,7 +103,7 @@ async function select(question: string, options: { value: string; label: string
103
103
  program
104
104
  .name('opc')
105
105
  .description('OPC Agent - Open Agent Framework for business workstations')
106
- .version('2.0.0');
106
+ .version(require('../package.json').version);
107
107
 
108
108
  // ── Init command ─────────────────────────────────────────────
109
109
 
@@ -168,7 +168,7 @@ program
168
168
  const roleDisplayName = roleMeta.name || matched.role;
169
169
  const roleDescription = roleMeta.name_zh ? `${roleMeta.name} (${roleMeta.name_zh})` : (roleMeta.name || matched.role);
170
170
 
171
- console.log(` ${icon.info} Matched role: ${color.cyan(matched.category + '/' + matched.role)} ${roleDisplayName}`);
171
+ console.log(` ${icon.info} Matched role: ${color.cyan(matched.category + '/' + matched.role)} - ${roleDisplayName}`);
172
172
 
173
173
  // Create directories
174
174
  fs.mkdirSync(dir, { recursive: true });
@@ -192,10 +192,10 @@ program
192
192
  const workstationSeedFromRole = workstationMatch?.[0]?.trim() || '';
193
193
  fs.writeFileSync(path.join(dir, 'brain-seeds', 'workstation.md'), workstationSeedFromRole || `# Workstation Knowledge\n\n## Tools & Environment\n\nCommon tools and setup for this workstation role.\n\n## Workflows\n\nStandard operating procedures and workflows.\n\n## Best Practices\n\nIndustry best practices for this role.\n`);
194
194
 
195
- // agent.yaml with role system prompt and brain seeds
195
+ // oad.yaml with role system prompt and brain seeds(不再生成 agent.yaml)
196
196
  const firstLine = systemPromptContent.split('\n').find((l: string) => l.trim() && !l.startsWith('#'))?.trim() || 'You are a helpful AI assistant.';
197
197
  fs.writeFileSync(
198
- path.join(dir, 'agent.yaml'),
198
+ path.join(dir, 'oad.yaml'),
199
199
  `apiVersion: opc/v1
200
200
  kind: Agent
201
201
  metadata:
@@ -249,7 +249,7 @@ spec:
249
249
  fs.writeFileSync(path.join(dir, 'oad.yaml'), roleData.files['oad.yaml']);
250
250
  }
251
251
 
252
- // src/index.ts entry point (same as generic)
252
+ // src/index.ts - entry point (same as generic)
253
253
  fs.writeFileSync(
254
254
  path.join(dir, 'src', 'index.ts'),
255
255
  `import { AgentRuntime } from 'opc-agent';
@@ -258,7 +258,7 @@ import { readFileSync, existsSync } from 'fs';
258
258
 
259
259
  async function main() {
260
260
  const runtime = new AgentRuntime();
261
- const config = await runtime.loadConfig('./agent.yaml');
261
+ const config = await runtime.loadConfig('./oad.yaml');
262
262
 
263
263
  const soul = existsSync('./SOUL.md') ? readFileSync('./SOUL.md', 'utf-8') : '';
264
264
  const context = existsSync('./CONTEXT.md') ? readFileSync('./CONTEXT.md', 'utf-8') : '';
@@ -327,8 +327,8 @@ export class EchoSkill extends BaseSkill {
327
327
 
328
328
  // .gitignore, .env.example, .env
329
329
  fs.writeFileSync(path.join(dir, '.gitignore'), 'node_modules\ndist\n.env\n.opc-knowledge.json\ndata/\n');
330
- fs.writeFileSync(path.join(dir, '.env.example'), `# LLM API Configuration\nOPC_LLM_API_KEY=your-api-key-here\nOPC_LLM_BASE_URL=https://api.openai.com/v1\nOPC_LLM_MODEL=gpt-4o-mini\n`);
331
- fs.writeFileSync(path.join(dir, '.env'), `OPC_LLM_API_KEY=your-api-key-here\nOPC_LLM_BASE_URL=https://api.openai.com/v1\nOPC_LLM_MODEL=gpt-4o-mini\n`);
330
+ fs.writeFileSync(path.join(dir, '.env.example'), `# LLM API Configuration\n# Ollama (免费本地,默认,无需 API key):\nOPC_LLM_BASE_URL=http://localhost:11434/v1\nOPC_LLM_MODEL=qwen2.5\n\n# 如需使用商业模型,取消以下注释:\n# OPC_LLM_API_KEY=your-api-key-here\n# OPC_LLM_BASE_URL=https://api.deepseek.com/v1\n# OPC_LLM_MODEL=deepseek-chat\n`);
331
+ fs.writeFileSync(path.join(dir, '.env'), `# Ollama (免费本地) - 无需 API key\nOPC_LLM_BASE_URL=http://localhost:11434/v1\nOPC_LLM_MODEL=qwen2.5\n`);
332
332
 
333
333
  // README.md
334
334
  fs.writeFileSync(
@@ -337,11 +337,11 @@ export class EchoSkill extends BaseSkill {
337
337
  );
338
338
 
339
339
  // Dockerfile + docker-compose
340
- fs.writeFileSync(path.join(dir, 'Dockerfile'), `FROM node:22-alpine\nWORKDIR /app\nCOPY package.json package-lock.json* ./\nRUN npm ci --production 2>/dev/null || npm install --production\nCOPY oad.yaml agent.yaml .env* ./\nCOPY src/ ./src/\nCOPY prompts/ ./prompts/ 2>/dev/null || true\nEXPOSE 3000\nCMD ["npx", "opc", "run"]\n`);
341
- fs.writeFileSync(path.join(dir, 'docker-compose.yml'), `version: '3.8'\nservices:\n agent:\n build: .\n ports:\n - "3000:3000"\n env_file:\n - .env\n volumes:\n - ./agent.yaml:/app/agent.yaml:ro\n restart: unless-stopped\n`);
340
+ fs.writeFileSync(path.join(dir, 'Dockerfile'), `FROM node:22-alpine\nWORKDIR /app\nCOPY package.json package-lock.json* ./\nRUN npm ci --production 2>/dev/null || npm install --production\nCOPY oad.yaml .env* ./\nCOPY src/ ./src/\nCOPY prompts/ ./prompts/ 2>/dev/null || true\nEXPOSE 3000\nCMD ["npx", "opc", "run"]\n`);
341
+ fs.writeFileSync(path.join(dir, 'docker-compose.yml'), `version: '3.8'\nservices:\n agent:\n build: .\n ports:\n - "3000:3000"\n env_file:\n - .env\n volumes:\n - ./oad.yaml:/app/oad.yaml:ro\n restart: unless-stopped\n`);
342
342
 
343
343
  console.log(`\n${icon.success} Created agent project: ${color.bold(name + '/')} from role ${color.cyan(matched.category + '/' + matched.role)}`);
344
- console.log(` ${icon.file} agent.yaml - Agent definition with role system prompt`);
344
+ console.log(` ${icon.file} oad.yaml - Agent definition with role system prompt`);
345
345
  console.log(` ${icon.file} SOUL.md - Role personality (${systemPromptContent.split('\n').length} lines)`);
346
346
  console.log(` ${icon.file} CONTEXT.md - Role context & documentation`);
347
347
  console.log(` ${icon.file} brain-seeds/ - 3-tier brain seed knowledge`);
@@ -372,7 +372,7 @@ export class EchoSkill extends BaseSkill {
372
372
  if (hubTemplates.length > 0) useHub = true;
373
373
  }
374
374
  } catch {
375
- // Hub unreachable fall back to bundled templates
375
+ // Hub unreachable - fall back to bundled templates
376
376
  }
377
377
 
378
378
  let template: string;
@@ -390,6 +390,113 @@ export class EchoSkill extends BaseSkill {
390
390
  template = await select('Select a template:', Object.entries(TEMPLATES).map(([value, { label }]) => ({ value, label })));
391
391
  }
392
392
 
393
+ // ── LLM Provider 选择(Ollama-first)──
394
+ let llmProvider = 'ollama';
395
+ let llmModel = 'qwen2.5';
396
+ let llmBaseUrl = 'http://localhost:11434/v1';
397
+ let llmApiKey = '';
398
+ let ollamaRunning = false;
399
+ let modelNames: string[] = [];
400
+
401
+ // 无论 --yes 还是交互式,都先检测 Ollama
402
+ try {
403
+ const controller = new AbortController();
404
+ const ollamaTimeout = setTimeout(() => controller.abort(), 3000);
405
+ const ollamaRes = await fetch('http://localhost:11434/api/tags', { signal: controller.signal });
406
+ clearTimeout(ollamaTimeout);
407
+ const ollamaData = await ollamaRes.json() as any;
408
+ modelNames = (ollamaData.models || []).map((m: any) => m.name || m.model);
409
+ ollamaRunning = true;
410
+ if (opts.yes && modelNames.length > 0) {
411
+ // --yes 模式:自动选第一个已有模型
412
+ llmModel = modelNames[0];
413
+ }
414
+ } catch {
415
+ ollamaRunning = false;
416
+ }
417
+
418
+ if (!opts.yes) {
419
+ if (ollamaRunning) {
420
+ console.log(`\n ${icon.info} ${color.dim('正在检测 Ollama...')}`);
421
+ console.log(` ${icon.success} Ollama 已运行,发现 ${modelNames.length} 个模型`);
422
+
423
+ // 选择 provider
424
+ llmProvider = await select('选择 LLM 引擎:', [
425
+ { value: 'ollama', label: '🟢 Ollama (免费本地,推荐) - 已检测到运行中' },
426
+ { value: 'deepseek', label: '🔵 DeepSeek - 高性价比国产模型' },
427
+ { value: 'openai', label: '⚪ OpenAI (GPT-4o)' },
428
+ { value: 'anthropic', label: '🟣 Anthropic (Claude)' },
429
+ { value: 'custom', label: '⚙️ 自定义 (手动输入 Base URL)' },
430
+ ]);
431
+
432
+ if (llmProvider === 'ollama') {
433
+ // 选择本地模型
434
+ const defaultModel = modelNames.includes('qwen2.5') ? 'qwen2.5' : (modelNames.includes('llama3') ? 'llama3' : (modelNames[0] || 'qwen2.5'));
435
+ if (modelNames.length > 0) {
436
+ llmModel = await select('选择 Ollama 模型:', modelNames.map((m: string) => ({ value: m, label: m + (m === defaultModel ? ' (推荐)' : '') })));
437
+ } else {
438
+ console.log(` ${color.yellow('⚠️')} 没有发现已下载的模型,将使用默认 qwen2.5`);
439
+ console.log(` 运行 ${color.cyan('ollama pull qwen2.5')} 下载模型`);
440
+ llmModel = 'qwen2.5';
441
+ }
442
+ }
443
+ } else {
444
+ // Ollama not running
445
+ console.log(`\n ${icon.info} ${color.dim('正在检测 Ollama...')}`);
446
+ console.log(` ${color.yellow('⚠️')} Ollama 未运行或未安装`);
447
+
448
+ llmProvider = await select('选择 LLM 引擎:', [
449
+ { value: 'ollama', label: '🟢 Ollama (免费本地,推荐) - 需先安装: https://ollama.ai' },
450
+ { value: 'deepseek', label: '🔵 DeepSeek - 高性价比国产模型' },
451
+ { value: 'openai', label: '⚪ OpenAI (GPT-4o)' },
452
+ { value: 'anthropic', label: '🟣 Anthropic (Claude)' },
453
+ { value: 'custom', label: '⚙️ 自定义 (手动输入 Base URL)' },
454
+ ]);
455
+
456
+ if (llmProvider === 'ollama') {
457
+ console.log(`\n ${icon.info} Ollama 安装指南:`);
458
+ console.log(` 1. 访问 ${color.cyan('https://ollama.ai')} 下载并安装`);
459
+ console.log(` 2. 运行 ${color.cyan('ollama pull qwen2.5')} 下载推荐模型`);
460
+ console.log(` 3. 然后 ${color.cyan('opc run')} 即可开始对话\n`);
461
+ llmModel = 'qwen2.5';
462
+ }
463
+ }
464
+
465
+ // 商业模型需要 API key
466
+ if (llmProvider === 'deepseek') {
467
+ llmBaseUrl = 'https://api.deepseek.com/v1';
468
+ llmModel = 'deepseek-chat';
469
+ llmApiKey = await promptUser('输入 DeepSeek API Key (可稍后在 .env 中配置,直接回车跳过)');
470
+ if (!llmApiKey) {
471
+ console.log(` ${icon.info} 稍后在 ${color.cyan('.env')} 文件中设置 ${color.bold('OPC_LLM_API_KEY')}`);
472
+ }
473
+ } else if (llmProvider === 'openai') {
474
+ llmBaseUrl = 'https://api.openai.com/v1';
475
+ llmModel = 'gpt-4o-mini';
476
+ llmApiKey = await promptUser('输入 OpenAI API Key (可稍后在 .env 中配置,直接回车跳过)');
477
+ if (!llmApiKey) {
478
+ console.log(` ${icon.info} 稍后在 ${color.cyan('.env')} 文件中设置 ${color.bold('OPC_LLM_API_KEY')}`);
479
+ }
480
+ } else if (llmProvider === 'anthropic') {
481
+ llmBaseUrl = 'https://api.anthropic.com/v1';
482
+ llmModel = 'claude-sonnet-4-20250514';
483
+ llmApiKey = await promptUser('输入 Anthropic API Key (可稍后在 .env 中配置,直接回车跳过)');
484
+ if (!llmApiKey) {
485
+ console.log(` ${icon.info} 稍后在 ${color.cyan('.env')} 文件中设置 ${color.bold('OPC_LLM_API_KEY')}`);
486
+ }
487
+ } else if (llmProvider === 'custom') {
488
+ llmBaseUrl = await promptUser('输入 Base URL', 'http://localhost:11434/v1');
489
+ llmModel = await promptUser('输入模型名称', 'qwen2.5');
490
+ llmApiKey = await promptUser('输入 API Key (可选,直接回车跳过)');
491
+ // 尝试推断 provider
492
+ if (llmBaseUrl.includes('deepseek.com')) llmProvider = 'deepseek';
493
+ else if (llmBaseUrl.includes('openai.com')) llmProvider = 'openai';
494
+ else if (llmBaseUrl.includes('anthropic.com')) llmProvider = 'anthropic';
495
+ else if (llmBaseUrl.includes('localhost:11434')) llmProvider = 'ollama';
496
+ else llmProvider = 'openai'; // OpenAI-compatible fallback
497
+ }
498
+ }
499
+
393
500
  const dir = path.resolve(name);
394
501
  if (fs.existsSync(dir)) {
395
502
  console.error(`\n${icon.error} Directory ${color.bold(name)} already exists.`);
@@ -403,43 +510,19 @@ export class EchoSkill extends BaseSkill {
403
510
  const config = factory();
404
511
  config.metadata.name = name;
405
512
 
513
+ // 用用户选择的 provider 和 model 覆盖模板默认值
514
+ config.spec.model = llmModel;
515
+ config.spec.provider = { default: llmProvider };
516
+
406
517
  // Ensure web channel exists
407
518
  if (!config.spec.channels.some((c: any) => c.type === 'web')) {
408
519
  config.spec.channels.push({ type: 'web', port: 3000 });
409
520
  }
410
521
 
522
+ // 只生成 oad.yaml,不生成 agent.yaml
411
523
  fs.writeFileSync(path.join(dir, 'oad.yaml'), yaml.dump(config, { lineWidth: 120 }));
412
524
 
413
- // agent.yaml standalone OAD config for runtime usage
414
- fs.writeFileSync(
415
- path.join(dir, 'agent.yaml'),
416
- `apiVersion: opc/v1
417
- kind: Agent
418
- metadata:
419
- name: ${name}
420
- version: 1.0.0
421
- description: My AI Agent
422
- spec:
423
- model: qwen2.5
424
- provider:
425
- default: ollama
426
- systemPrompt: |
427
- You are a helpful AI assistant named ${name}.
428
- Be concise, helpful, and friendly.
429
- channels:
430
- - type: web
431
- port: 3000
432
- memory:
433
- shortTerm: true
434
- longTerm:
435
- provider: deepbrain
436
- skills:
437
- - name: echo
438
- description: Echo test skill
439
- `,
440
- );
441
-
442
- // src/index.ts — entry point
525
+ // src/index.ts - entry point
443
526
  fs.writeFileSync(
444
527
  path.join(dir, 'src', 'index.ts'),
445
528
  `import { AgentRuntime } from 'opc-agent';
@@ -450,7 +533,7 @@ async function main() {
450
533
  const runtime = new AgentRuntime();
451
534
 
452
535
  // Load OAD config
453
- const config = await runtime.loadConfig('./agent.yaml');
536
+ const config = await runtime.loadConfig('./oad.yaml');
454
537
 
455
538
  // Load personality and context files
456
539
  const soul = existsSync('./SOUL.md') ? readFileSync('./SOUL.md', 'utf-8') : '';
@@ -478,7 +561,7 @@ main().catch(console.error);
478
561
  `,
479
562
  );
480
563
 
481
- // src/skills/echo.ts example skill
564
+ // src/skills/echo.ts - example skill
482
565
  fs.writeFileSync(
483
566
  path.join(dir, 'src', 'skills', 'echo.ts'),
484
567
  `import { BaseSkill } from 'opc-agent';
@@ -530,29 +613,41 @@ export class EchoSkill extends BaseSkill {
530
613
  fs.writeFileSync(
531
614
  path.join(dir, '.env.example'),
532
615
  `# LLM API Configuration
533
- OPC_LLM_API_KEY=your-api-key-here
534
- OPC_LLM_BASE_URL=https://api.openai.com/v1
535
- OPC_LLM_MODEL=gpt-4o-mini
616
+ # Ollama (免费本地,默认):
617
+ # OPC_LLM_BASE_URL=http://localhost:11434/v1
618
+ # OPC_LLM_MODEL=qwen2.5
619
+ # (Ollama 无需 API key)
536
620
 
537
- # For DeepSeek:
621
+ # DeepSeek:
622
+ # OPC_LLM_API_KEY=your-deepseek-key
538
623
  # OPC_LLM_BASE_URL=https://api.deepseek.com/v1
539
624
  # OPC_LLM_MODEL=deepseek-chat
540
625
 
541
- # For local Ollama (default in agent.yaml):
542
- # OPC_LLM_BASE_URL=http://localhost:11434/v1
543
- # OPC_LLM_MODEL=qwen2.5
544
- `,
545
- );
626
+ # OpenAI:
627
+ # OPC_LLM_API_KEY=your-openai-key
628
+ # OPC_LLM_BASE_URL=https://api.openai.com/v1
629
+ # OPC_LLM_MODEL=gpt-4o-mini
546
630
 
547
- // .env (copy of example)
548
- fs.writeFileSync(
549
- path.join(dir, '.env'),
550
- `OPC_LLM_API_KEY=your-api-key-here
551
- OPC_LLM_BASE_URL=https://api.openai.com/v1
552
- OPC_LLM_MODEL=gpt-4o-mini
631
+ # Anthropic:
632
+ # OPC_LLM_API_KEY=your-anthropic-key
633
+ # OPC_LLM_BASE_URL=https://api.anthropic.com/v1
634
+ # OPC_LLM_MODEL=claude-sonnet-4-20250514
553
635
  `,
554
636
  );
555
637
 
638
+ // .env - 根据用户选择生成正确的配置
639
+ const envLines: string[] = [];
640
+ if (llmProvider === 'ollama') {
641
+ envLines.push('# Ollama (免费本地) - 无需 API key');
642
+ envLines.push(`OPC_LLM_BASE_URL=${llmBaseUrl}`);
643
+ envLines.push(`OPC_LLM_MODEL=${llmModel}`);
644
+ } else {
645
+ envLines.push(`OPC_LLM_API_KEY=${llmApiKey || 'your-api-key-here'}`);
646
+ envLines.push(`OPC_LLM_BASE_URL=${llmBaseUrl}`);
647
+ envLines.push(`OPC_LLM_MODEL=${llmModel}`);
648
+ }
649
+ fs.writeFileSync(path.join(dir, '.env'), envLines.join('\n') + '\n');
650
+
556
651
  // package.json
557
652
  fs.writeFileSync(
558
653
  path.join(dir, 'package.json'),
@@ -590,7 +685,7 @@ OPC_LLM_MODEL=gpt-4o-mini
590
685
  WORKDIR /app
591
686
  COPY package.json package-lock.json* ./
592
687
  RUN npm ci --production 2>/dev/null || npm install --production
593
- COPY oad.yaml agent.yaml .env* ./
688
+ COPY oad.yaml .env* ./
594
689
  COPY src/ ./src/
595
690
  COPY prompts/ ./prompts/ 2>/dev/null || true
596
691
  EXPOSE 3000
@@ -610,7 +705,7 @@ services:
610
705
  env_file:
611
706
  - .env
612
707
  volumes:
613
- - ./agent.yaml:/app/agent.yaml:ro
708
+ - ./oad.yaml:/app/oad.yaml:ro
614
709
  restart: unless-stopped
615
710
  `,
616
711
  );
@@ -655,8 +750,7 @@ npx opc chat # CLI chat
655
750
 
656
751
  \`\`\`
657
752
  ${name}/
658
- ├── agent.yaml # OAD agent config (used by src/index.ts)
659
- ├── oad.yaml # OAD config (used by opc CLI)
753
+ ├── oad.yaml # Agent 配置 (唯一配置文件)
660
754
  ├── src/
661
755
  │ ├── index.ts # Entry point
662
756
  │ └── skills/
@@ -667,11 +761,11 @@ ${name}/
667
761
 
668
762
  ## Configuration
669
763
 
670
- Edit \`agent.yaml\` to customize your agent's personality, skills, and behavior.
764
+ Edit \`oad.yaml\` to customize your agent's personality, skills, and behavior.
671
765
  `,
672
766
  );
673
767
 
674
- // SOUL.md agent personality
768
+ // SOUL.md - agent personality
675
769
  const createdDate = new Date().toISOString().split('T')[0];
676
770
  fs.writeFileSync(
677
771
  path.join(dir, 'SOUL.md'),
@@ -689,7 +783,7 @@ Edit \`agent.yaml\` to customize your agent's personality, skills, and behavior.
689
783
 
690
784
  ## Communication Style
691
785
  - Use clear, simple language
692
- - Be direct answer the question first, then explain
786
+ - Be direct - answer the question first, then explain
693
787
  - Use markdown formatting when helpful
694
788
 
695
789
  ## Rules
@@ -699,7 +793,7 @@ Edit \`agent.yaml\` to customize your agent's personality, skills, and behavior.
699
793
  `,
700
794
  );
701
795
 
702
- // CONTEXT.md project context
796
+ // CONTEXT.md - project context
703
797
  fs.writeFileSync(
704
798
  path.join(dir, 'CONTEXT.md'),
705
799
  `# Project Context
@@ -719,7 +813,8 @@ on startup to understand the project context.
719
813
  );
720
814
 
721
815
  console.log(`\n${icon.success} Created agent project: ${color.bold(name + '/')}`);
722
- console.log(` ${icon.file} agent.yaml - Agent definition (OAD)`);
816
+ console.log(` ${icon.file} oad.yaml - Agent 配置 (${llmProvider}/${llmModel})`);
817
+ console.log(` ${icon.file} .env - 环境变量${llmProvider === 'ollama' ? '' : ' (API Key)'}`);
723
818
  console.log(` ${icon.file} src/index.ts - Entry point`);
724
819
  console.log(` ${icon.file} src/skills/echo.ts - Example skill`);
725
820
  console.log(` ${icon.file} SOUL.md - Agent personality`);
@@ -744,14 +839,22 @@ on startup to understand the project context.
744
839
  }
745
840
  }
746
841
  } catch {
747
- // Brain-seed download failed non-fatal, project still usable
842
+ // Brain-seed download failed - non-fatal, project still usable
748
843
  }
749
844
  }
750
845
  console.log(`\n${color.bold('Next steps:')}`);
751
846
  console.log(` 1. cd ${name}`);
752
847
  console.log(` 2. npm install`);
753
- console.log(` 3. npx tsx src/index.ts ${color.dim('# or: npx opc run')}`);
754
- console.log(` 4. Open http://localhost:3000\n`);
848
+ if (llmProvider === 'ollama' && !ollamaRunning) {
849
+ console.log(` 3. ollama pull ${llmModel} ${color.dim('# 下载模型')}`);
850
+ console.log(` 4. npx opc run ${color.dim('# 启动 Agent')}`);
851
+ } else if (llmProvider !== 'ollama' && !llmApiKey) {
852
+ console.log(` 3. 编辑 .env 设置 OPC_LLM_API_KEY`);
853
+ console.log(` 4. npx opc run`);
854
+ } else {
855
+ console.log(` 3. npx opc run ${color.dim('# 启动 Agent')}`);
856
+ }
857
+ console.log(` Open http://localhost:3000\n`);
755
858
  console.log(`${color.dim('💡 Tip: Use --role to start from a workstation template:')}`);
756
859
  console.log(`${color.dim(' opc init my-agent --role customer-service')}`);
757
860
  console.log(`${color.dim(' opc init --list-roles (see all roles)')}\n`);
@@ -802,7 +905,7 @@ program
802
905
  // Print startup banner
803
906
  const bannerLines = [
804
907
  '╔══════════════════════════════════════╗',
805
- '║ 🤖 OPC Agent Interactive Chat ║',
908
+ '║ 🤖 OPC Agent - Interactive Chat ║',
806
909
  `║ Agent: ${(agentName + ' v' + agentVersion).padEnd(27)}║`,
807
910
  `║ Model: ${((providerName + '/' + (model ?? 'default')).slice(0, 27)).padEnd(27)}║`,
808
911
  `║ Skills: ${(String(skillNames.length) + ' loaded').padEnd(26)}║`,
@@ -829,12 +932,12 @@ program
829
932
  }
830
933
  if (lower === '/help') {
831
934
  console.log(`\n ${color.bold('Available commands:')}`);
832
- console.log(` ${color.cyan('/help')} Show this help`);
833
- console.log(` ${color.cyan('/quit')} Exit chat (/exit also works)`);
834
- console.log(` ${color.cyan('/clear')} Clear conversation history`);
835
- console.log(` ${color.cyan('/skills')} List registered skills`);
836
- console.log(` ${color.cyan('/memory')} Show memory stats`);
837
- console.log(` ${color.cyan('/info')} Show agent info\n`);
935
+ console.log(` ${color.cyan('/help')} - Show this help`);
936
+ console.log(` ${color.cyan('/quit')} - Exit chat (/exit also works)`);
937
+ console.log(` ${color.cyan('/clear')} - Clear conversation history`);
938
+ console.log(` ${color.cyan('/skills')} - List registered skills`);
939
+ console.log(` ${color.cyan('/memory')} - Show memory stats`);
940
+ console.log(` ${color.cyan('/info')} - Show agent info\n`);
838
941
  return true;
839
942
  }
840
943
  if (lower === '/clear') {
@@ -1579,7 +1682,7 @@ protocolCmd.command('list')
1579
1682
  const protocols = config?.spec?.protocols || {};
1580
1683
  const items = [
1581
1684
  { name: 'a2a', description: 'Agent-to-Agent protocol', enabled: !!protocols.a2a?.enabled, detail: protocols.a2a?.port ? `port ${protocols.a2a.port}` : '' },
1582
- { name: 'agui', description: 'AG-UI Agent-User Interaction (SSE)', enabled: !!protocols.agui?.enabled, detail: protocols.agui?.path || '/agui' },
1685
+ { name: 'agui', description: 'AG-UI - Agent-User Interaction (SSE)', enabled: !!protocols.agui?.enabled, detail: protocols.agui?.path || '/agui' },
1583
1686
  ];
1584
1687
  console.log(`\n${icon.gear} ${color.bold('Protocols')}\n`);
1585
1688
  for (const p of items) {
@@ -1708,7 +1811,7 @@ brainCmd
1708
1811
  .description('Show brain stats (pages, tiers, last evolve)')
1709
1812
  .option('--url <url>', 'DeepBrain server URL', 'http://localhost:3333')
1710
1813
  .action(async (opts: { url: string }) => {
1711
- console.log(`\n${icon.gear} ${color.bold('DeepBrain Status')} ${color.dim(opts.url)}\n`);
1814
+ console.log(`\n${icon.gear} ${color.bold('DeepBrain Status')} - ${color.dim(opts.url)}\n`);
1712
1815
  try {
1713
1816
  const res = await fetch(`${opts.url}/api/stats`);
1714
1817
  if (!res.ok) throw new Error(`HTTP ${res.status} ${res.statusText}`);
@@ -1739,7 +1842,7 @@ brainCmd
1739
1842
  brainCmd
1740
1843
  .command('seed')
1741
1844
  .description('Import brain seed files into memory')
1742
- .option('-f, --file <file>', 'OAD file', 'agent.yaml')
1845
+ .option('-f, --file <file>', 'OAD file', 'oad.yaml')
1743
1846
  .option('--status', 'Check if seeds have been imported')
1744
1847
  .option('--reset', 'Re-import seeds (clear marker and re-seed)')
1745
1848
  .action(async (opts: { file: string; status?: boolean; reset?: boolean }) => {
@@ -1812,7 +1915,7 @@ brainCmd
1812
1915
  console.log(` ${color.cyan(c.slug)} → ${c.fromTier} → ${c.toTier} (confidence: ${(c.confidence * 100).toFixed(0)}%)`);
1813
1916
  }
1814
1917
  if (opts.dryRun) {
1815
- console.log(`\n ${icon.info} Dry run no changes made.\n`);
1918
+ console.log(`\n ${icon.info} Dry run - no changes made.\n`);
1816
1919
  } else {
1817
1920
  console.log(`\n ${icon.success} Promoted ${result.promoted} knowledge entries.\n`);
1818
1921
  }
@@ -2271,7 +2374,7 @@ program
2271
2374
  }
2272
2375
 
2273
2376
  // Create a minimal mock agent for eval (real usage would load from OAD)
2274
- const oadPath = path.resolve('agent.yaml');
2377
+ const oadPath = path.resolve(fs.existsSync('oad.yaml') ? 'oad.yaml' : 'agent.yaml');
2275
2378
  let agent: any;
2276
2379
  if (fs.existsSync(oadPath)) {
2277
2380
  const runtime = new AgentRuntime();
@@ -2281,7 +2384,7 @@ program
2281
2384
  }
2282
2385
 
2283
2386
  if (!agent) {
2284
- console.log(`${icon.warn} No agent.yaml found running with dry-run mock agent.`);
2387
+ console.log(`${icon.warn} No oad.yaml or agent.yaml found - running with dry-run mock agent.`);
2285
2388
  agent = { chat: async (input: string) => `[mock response to: ${input}]` };
2286
2389
  }
2287
2390
 
@@ -2351,7 +2454,7 @@ guardrailsCmd
2351
2454
 
2352
2455
  const result = await manager.checkInput(message);
2353
2456
  if (result.passed) {
2354
- console.log(color.green('✓ PASSED no violations'));
2457
+ console.log(color.green('✓ PASSED - no violations'));
2355
2458
  } else {
2356
2459
  if (result.blocked) console.log(color.red('✗ BLOCKED'));
2357
2460
  if (result.warned) console.log(color.yellow('⚠ WARNING'));
@@ -2379,7 +2482,7 @@ program
2379
2482
  .action(async (opts: any) => {
2380
2483
  console.log(color.bold('🎤 Voice Conversation Mode'));
2381
2484
  console.log(` STT: ${opts.stt} | TTS: ${opts.tts} | Voice: ${opts.voice ?? 'default'} | Language: ${opts.language}`);
2382
- console.log(color.dim(' (Voice conversation requires audio input integration use as library)'));
2485
+ console.log(color.dim(' (Voice conversation requires audio input integration - use as library)'));
2383
2486
  console.log();
2384
2487
  console.log('To use voice in your agent:');
2385
2488
  console.log(color.cyan(`
@@ -2452,7 +2555,7 @@ keysCmd
2452
2555
 
2453
2556
  const approveCmd = program.command('approve').description('Manage command approvals');
2454
2557
 
2455
- // Singleton for CLI in real usage this would be loaded from daemon state
2558
+ // Singleton for CLI - in real usage this would be loaded from daemon state
2456
2559
  const approvalManager = new ApprovalManager();
2457
2560
 
2458
2561
  approveCmd
@@ -2589,7 +2692,7 @@ a2aCmd
2589
2692
  .action(() => {
2590
2693
  const { oadToAgentCard } = require('./protocols/a2a');
2591
2694
  const oad = loadOADFile();
2592
- if (!oad) { console.log(`${icon.error} No agent.yaml found`); return; }
2695
+ if (!oad) { console.log(`${icon.error} No oad.yaml or agent.yaml found`); return; }
2593
2696
  const card = oadToAgentCard(oad, 'http://localhost:3001');
2594
2697
  console.log(JSON.stringify(card, null, 2));
2595
2698
  });
@@ -2628,7 +2731,7 @@ a2aCmd
2628
2731
  function loadOADFile(): any {
2629
2732
  const fs = require('fs');
2630
2733
  const yaml = require('js-yaml');
2631
- for (const name of ['agent.yaml', 'agent.yml']) {
2734
+ for (const name of ['oad.yaml', 'agent.yaml', 'agent.yml']) {
2632
2735
  if (fs.existsSync(name)) {
2633
2736
  return yaml.load(fs.readFileSync(name, 'utf-8'));
2634
2737
  }
@@ -2637,7 +2740,7 @@ function loadOADFile(): any {
2637
2740
  }
2638
2741
 
2639
2742
  // ── MCP Server Commands ────────────────────────────────────
2640
- const mcpCmd = program.command('mcp').description('MCP server commands expose agent as MCP tools');
2743
+ const mcpCmd = program.command('mcp').description('MCP server commands - expose agent as MCP tools');
2641
2744
 
2642
2745
  mcpCmd
2643
2746
  .command('serve')
@@ -2666,7 +2769,7 @@ mcpCmd
2666
2769
  console.log(`${icon.info} Message endpoint: http://localhost:${port}/message`);
2667
2770
  console.log(`${icon.info} Tools: ${server.getToolCount()}`);
2668
2771
  } else {
2669
- console.error(`${icon.success} MCP server (stdio) started ${server.getToolCount()} tools`);
2772
+ console.error(`${icon.success} MCP server (stdio) started - ${server.getToolCount()} tools`);
2670
2773
  await server.serveStdio();
2671
2774
  }
2672
2775
  });
@@ -2717,7 +2820,7 @@ mcpCmd
2717
2820
  console.log(`${icon.success} MCP server ${color.cyan(name)} running on http://localhost:${port}`);
2718
2821
  console.log(`${icon.info} Tools: ${server.getToolCount()}`);
2719
2822
  } else {
2720
- console.error(`${icon.success} MCP server ${color.cyan(name)} (stdio) ${server.getToolCount()} tools`);
2823
+ console.error(`${icon.success} MCP server ${color.cyan(name)} (stdio) - ${server.getToolCount()} tools`);
2721
2824
  await server.serveStdio();
2722
2825
  }
2723
2826
  });
@@ -45,7 +45,16 @@ export class AgentRuntime {
45
45
 
46
46
  async loadConfig(filePath: string): Promise<OADDocument> {
47
47
  const fs = require('fs');
48
+ const path = require('path');
49
+
50
+ // 如果指定文件不存在,尝试 fallback
48
51
  if (!fs.existsSync(filePath)) {
52
+ // 如果发现旧的 agent.yaml,提示迁移
53
+ if (filePath === 'oad.yaml' && fs.existsSync('agent.yaml')) {
54
+ this.logger.warn('⚠️ 发现 agent.yaml 但未找到 oad.yaml。建议运行 `opc migrate` 统一为 oad.yaml。');
55
+ this.logger.info('暂时使用 agent.yaml 加载配置...');
56
+ filePath = 'agent.yaml';
57
+ } else {
49
58
  // Auto-create a minimal oad.yaml with auto-detect provider
50
59
  const yaml = require('js-yaml');
51
60
  const defaultOAD = {
@@ -61,9 +70,16 @@ export class AgentRuntime {
61
70
  };
62
71
  fs.writeFileSync(filePath, yaml.dump(defaultOAD, { lineWidth: 120 }));
63
72
  this.logger.info('Created default oad.yaml (no config file found)');
73
+ }
64
74
  }
65
75
  this.config = loadOAD(filePath);
66
76
  this.logger.info('Config loaded', { name: this.config.metadata.name });
77
+
78
+ // 如果同时存在 agent.yaml 和 oad.yaml,提示用户清理
79
+ if (fs.existsSync('agent.yaml') && fs.existsSync('oad.yaml')) {
80
+ this.logger.warn('⚠️ 同时存在 agent.yaml 和 oad.yaml。建议删除 agent.yaml,统一使用 oad.yaml。');
81
+ }
82
+
67
83
  return this.config;
68
84
  }
69
85
 
@@ -99,6 +115,15 @@ export class AgentRuntime {
99
115
  const cfg = config ?? this.config;
100
116
  if (!cfg) throw new Error('No config loaded. Call loadConfig() first.');
101
117
 
118
+ // 检查 API key 是否为占位符,启动时警告
119
+ const apiKey = process.env.OPC_LLM_API_KEY;
120
+ const cfgProvider = cfg.spec.provider?.default;
121
+ if (cfgProvider !== 'ollama' && cfgProvider !== 'auto') {
122
+ if (!apiKey || apiKey === 'your-api-key-here') {
123
+ this.logger.warn('⚠️ API Key 未配置或仍是占位符。请编辑 .env 文件设置 OPC_LLM_API_KEY。');
124
+ }
125
+ }
126
+
102
127
  let memory: MemoryStore | undefined;
103
128
  const memCfg = cfg.spec.memory;
104
129
  if (memCfg && typeof memCfg.longTerm === 'object' && memCfg.longTerm.provider === 'deepbrain') {