opc-agent 4.1.23 → 4.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +59 -119
- package/COMPETITIVE-GAP.md +92 -92
- package/CONTRIBUTING.md +36 -36
- package/README.md +290 -290
- package/README.zh-CN.md +269 -269
- package/dist/channels/telegram.d.ts +0 -5
- package/dist/channels/telegram.d.ts.map +1 -1
- package/dist/channels/telegram.js +0 -108
- package/dist/channels/telegram.js.map +1 -1
- package/dist/channels/voice.d.ts +97 -71
- package/dist/channels/voice.d.ts.map +1 -1
- package/dist/channels/voice.js +347 -369
- package/dist/channels/voice.js.map +1 -1
- package/dist/channels/web.d.ts.map +1 -1
- package/dist/channels/web.js +2 -8
- package/dist/channels/web.js.map +1 -1
- package/dist/channels/wechat.js +6 -6
- package/dist/cli/chat.d.ts +1 -4
- package/dist/cli/chat.d.ts.map +1 -1
- package/dist/cli/chat.js +73 -680
- package/dist/cli/chat.js.map +1 -1
- package/dist/cli/setup.js +1 -1
- package/dist/cli/setup.js.map +1 -1
- package/dist/cli.js +280 -373
- package/dist/cli.js.map +1 -1
- package/dist/core/agent.d.ts +0 -1
- package/dist/core/agent.d.ts.map +1 -1
- package/dist/core/agent.js +0 -3
- package/dist/core/agent.js.map +1 -1
- package/dist/core/runtime.d.ts.map +1 -1
- package/dist/core/runtime.js +22 -192
- package/dist/core/runtime.js.map +1 -1
- package/dist/deploy/index.js +56 -56
- package/dist/doctor.d.ts +0 -1
- package/dist/doctor.d.ts.map +1 -1
- package/dist/doctor.js +10 -155
- package/dist/doctor.js.map +1 -1
- package/dist/index.d.ts +3 -4
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +9 -9
- package/dist/index.js.map +1 -1
- package/dist/memory/deepbrain.d.ts +1 -1
- package/dist/memory/deepbrain.d.ts.map +1 -1
- package/dist/memory/deepbrain.js +4 -95
- package/dist/memory/deepbrain.js.map +1 -1
- package/dist/memory/index.d.ts +0 -2
- package/dist/memory/index.d.ts.map +1 -1
- package/dist/memory/index.js +1 -3
- package/dist/memory/index.js.map +1 -1
- package/dist/memory/user-profiler.d.ts +0 -8
- package/dist/memory/user-profiler.d.ts.map +1 -1
- package/dist/memory/user-profiler.js +0 -89
- package/dist/memory/user-profiler.js.map +1 -1
- package/dist/scheduler/cron-engine.d.ts.map +1 -1
- package/dist/scheduler/cron-engine.js +36 -3
- package/dist/scheduler/cron-engine.js.map +1 -1
- package/dist/skills/auto-learn.d.ts.map +1 -1
- package/dist/skills/auto-learn.js +11 -65
- package/dist/skills/auto-learn.js.map +1 -1
- package/dist/skills/builtin/index.d.ts.map +1 -1
- package/dist/skills/builtin/index.js +30 -163
- package/dist/skills/builtin/index.js.map +1 -1
- package/dist/skills/types.d.ts +1 -1
- package/dist/skills/types.d.ts.map +1 -1
- package/dist/skills/types.js +0 -1
- package/dist/skills/types.js.map +1 -1
- package/dist/studio/server.d.ts +0 -1
- package/dist/studio/server.d.ts.map +1 -1
- package/dist/studio/server.js +12 -142
- package/dist/studio/server.js.map +1 -1
- package/dist/studio-ui/index.html +26 -365
- package/dist/ui/components.js +105 -105
- package/examples/README.md +22 -22
- package/examples/basic-agent.ts +90 -90
- package/examples/brain-integration.ts +71 -71
- package/examples/multi-channel.ts +74 -74
- package/install.ps1 +127 -127
- package/install.sh +154 -154
- package/models.json +164 -164
- package/package.json +63 -66
- package/scripts/install.ps1 +31 -31
- package/scripts/install.sh +40 -40
- package/templates/ecommerce-assistant/README.md +45 -45
- package/templates/ecommerce-assistant/oad.yaml +47 -47
- package/templates/tech-support/README.md +43 -43
- package/templates/tech-support/oad.yaml +45 -45
- package/.opc/memory.db +0 -0
- package/dist/core/model-recommender.d.ts +0 -40
- package/dist/core/model-recommender.d.ts.map +0 -1
- package/dist/core/model-recommender.js +0 -186
- package/dist/core/model-recommender.js.map +0 -1
- package/dist/memory/evolve-engine.d.ts +0 -113
- package/dist/memory/evolve-engine.d.ts.map +0 -1
- package/dist/memory/evolve-engine.js +0 -549
- package/dist/memory/evolve-engine.js.map +0 -1
- package/dist/memory/sqlite-store.d.ts +0 -40
- package/dist/memory/sqlite-store.d.ts.map +0 -1
- package/dist/memory/sqlite-store.js +0 -269
- package/dist/memory/sqlite-store.js.map +0 -1
- package/dist/scheduler/proactive.d.ts +0 -62
- package/dist/scheduler/proactive.d.ts.map +0 -1
- package/dist/scheduler/proactive.js +0 -185
- package/dist/scheduler/proactive.js.map +0 -1
package/dist/cli.js
CHANGED
|
@@ -40,7 +40,6 @@ const path = __importStar(require("path"));
|
|
|
40
40
|
const yaml = __importStar(require("js-yaml"));
|
|
41
41
|
const readline = __importStar(require("readline"));
|
|
42
42
|
const runtime_1 = require("./core/runtime");
|
|
43
|
-
const model_recommender_1 = require("./core/model-recommender");
|
|
44
43
|
const customer_service_1 = require("./templates/customer-service");
|
|
45
44
|
const sales_assistant_1 = require("./templates/sales-assistant");
|
|
46
45
|
const knowledge_base_1 = require("./templates/knowledge-base");
|
|
@@ -61,6 +60,7 @@ const hermes_1 = require("./deploy/hermes");
|
|
|
61
60
|
const index_1 = require("./deploy/index");
|
|
62
61
|
const workflow_1 = require("./core/workflow");
|
|
63
62
|
const versioning_1 = require("./core/versioning");
|
|
63
|
+
const providers_1 = require("./providers");
|
|
64
64
|
const knowledge_1 = require("./core/knowledge");
|
|
65
65
|
const doctor_1 = require("./doctor");
|
|
66
66
|
const child_process_1 = require("child_process");
|
|
@@ -124,7 +124,7 @@ async function select(question, options) {
|
|
|
124
124
|
program
|
|
125
125
|
.name('opc')
|
|
126
126
|
.description('OPC Agent - Open Agent Framework for business workstations')
|
|
127
|
-
.version(
|
|
127
|
+
.version('2.0.0');
|
|
128
128
|
// ── Init command ─────────────────────────────────────────────
|
|
129
129
|
program
|
|
130
130
|
.command('init')
|
|
@@ -186,7 +186,7 @@ program
|
|
|
186
186
|
}
|
|
187
187
|
const roleDisplayName = roleMeta.name || matched.role;
|
|
188
188
|
const roleDescription = roleMeta.name_zh ? `${roleMeta.name} (${roleMeta.name_zh})` : (roleMeta.name || matched.role);
|
|
189
|
-
console.log(` ${icon.info} Matched role: ${color.cyan(matched.category + '/' + matched.role)}
|
|
189
|
+
console.log(` ${icon.info} Matched role: ${color.cyan(matched.category + '/' + matched.role)} — ${roleDisplayName}`);
|
|
190
190
|
// Create directories
|
|
191
191
|
fs.mkdirSync(dir, { recursive: true });
|
|
192
192
|
fs.mkdirSync(path.join(dir, 'src', 'skills'), { recursive: true });
|
|
@@ -205,9 +205,9 @@ program
|
|
|
205
205
|
// Company-specific knowledge belongs to Desk (closed-source), not here.
|
|
206
206
|
const workstationSeedFromRole = workstationMatch?.[0]?.trim() || '';
|
|
207
207
|
fs.writeFileSync(path.join(dir, 'brain-seeds', 'workstation.md'), workstationSeedFromRole || `# Workstation Knowledge\n\n## Tools & Environment\n\nCommon tools and setup for this workstation role.\n\n## Workflows\n\nStandard operating procedures and workflows.\n\n## Best Practices\n\nIndustry best practices for this role.\n`);
|
|
208
|
-
//
|
|
208
|
+
// agent.yaml with role system prompt and brain seeds
|
|
209
209
|
const firstLine = systemPromptContent.split('\n').find((l) => l.trim() && !l.startsWith('#'))?.trim() || 'You are a helpful AI assistant.';
|
|
210
|
-
fs.writeFileSync(path.join(dir, '
|
|
210
|
+
fs.writeFileSync(path.join(dir, 'agent.yaml'), `apiVersion: opc/v1
|
|
211
211
|
kind: Agent
|
|
212
212
|
metadata:
|
|
213
213
|
name: ${name}
|
|
@@ -251,14 +251,14 @@ spec:
|
|
|
251
251
|
if (roleData.files['oad.yaml']) {
|
|
252
252
|
fs.writeFileSync(path.join(dir, 'oad.yaml'), roleData.files['oad.yaml']);
|
|
253
253
|
}
|
|
254
|
-
// src/index.ts
|
|
254
|
+
// src/index.ts — entry point (same as generic)
|
|
255
255
|
fs.writeFileSync(path.join(dir, 'src', 'index.ts'), `import { AgentRuntime } from 'opc-agent';
|
|
256
256
|
import { EchoSkill } from './skills/echo';
|
|
257
257
|
import { readFileSync, existsSync } from 'fs';
|
|
258
258
|
|
|
259
259
|
async function main() {
|
|
260
260
|
const runtime = new AgentRuntime();
|
|
261
|
-
const config = await runtime.loadConfig('./
|
|
261
|
+
const config = await runtime.loadConfig('./agent.yaml');
|
|
262
262
|
|
|
263
263
|
const soul = existsSync('./SOUL.md') ? readFileSync('./SOUL.md', 'utf-8') : '';
|
|
264
264
|
const context = existsSync('./CONTEXT.md') ? readFileSync('./CONTEXT.md', 'utf-8') : '';
|
|
@@ -305,15 +305,15 @@ export class EchoSkill extends BaseSkill {
|
|
|
305
305
|
fs.writeFileSync(path.join(dir, 'package.json'), JSON.stringify({ name, version: '1.0.0', private: true, scripts: { start: 'opc run', dev: 'opc dev', chat: 'opc chat', build: 'tsc' }, dependencies: { 'opc-agent': '^1.3.0' }, devDependencies: { typescript: '^5.5.0', tsx: '^4.0.0' } }, null, 2));
|
|
306
306
|
// .gitignore, .env.example, .env
|
|
307
307
|
fs.writeFileSync(path.join(dir, '.gitignore'), 'node_modules\ndist\n.env\n.opc-knowledge.json\ndata/\n');
|
|
308
|
-
fs.writeFileSync(path.join(dir, '.env.example'), `# LLM API Configuration\
|
|
309
|
-
fs.writeFileSync(path.join(dir, '.env'),
|
|
308
|
+
fs.writeFileSync(path.join(dir, '.env.example'), `# LLM API Configuration\nOPC_LLM_API_KEY=your-api-key-here\nOPC_LLM_BASE_URL=https://api.openai.com/v1\nOPC_LLM_MODEL=gpt-4o-mini\n`);
|
|
309
|
+
fs.writeFileSync(path.join(dir, '.env'), `OPC_LLM_API_KEY=your-api-key-here\nOPC_LLM_BASE_URL=https://api.openai.com/v1\nOPC_LLM_MODEL=gpt-4o-mini\n`);
|
|
310
310
|
// README.md
|
|
311
311
|
fs.writeFileSync(path.join(dir, 'README.md'), `# ${name}\n\nCreated with [OPC Agent](https://github.com/Deepleaper/opc-agent) using the \`${matched.category}/${matched.role}\` workstation role.\n\n## Quick Start\n\n\`\`\`bash\nnpm install\nollama pull qwen2.5\nnpx tsx src/index.ts\n\`\`\`\n\nOpen [http://localhost:3000](http://localhost:3000)\n`);
|
|
312
312
|
// Dockerfile + docker-compose
|
|
313
|
-
fs.writeFileSync(path.join(dir, 'Dockerfile'), `FROM node:22-alpine\nWORKDIR /app\nCOPY package.json package-lock.json* ./\nRUN npm ci --production 2>/dev/null || npm install --production\nCOPY oad.yaml .env* ./\nCOPY src/ ./src/\nCOPY prompts/ ./prompts/ 2>/dev/null || true\nEXPOSE 3000\nCMD ["npx", "opc", "run"]\n`);
|
|
314
|
-
fs.writeFileSync(path.join(dir, 'docker-compose.yml'), `version: '3.8'\nservices:\n agent:\n build: .\n ports:\n - "3000:3000"\n env_file:\n - .env\n volumes:\n - ./
|
|
313
|
+
fs.writeFileSync(path.join(dir, 'Dockerfile'), `FROM node:22-alpine\nWORKDIR /app\nCOPY package.json package-lock.json* ./\nRUN npm ci --production 2>/dev/null || npm install --production\nCOPY oad.yaml agent.yaml .env* ./\nCOPY src/ ./src/\nCOPY prompts/ ./prompts/ 2>/dev/null || true\nEXPOSE 3000\nCMD ["npx", "opc", "run"]\n`);
|
|
314
|
+
fs.writeFileSync(path.join(dir, 'docker-compose.yml'), `version: '3.8'\nservices:\n agent:\n build: .\n ports:\n - "3000:3000"\n env_file:\n - .env\n volumes:\n - ./agent.yaml:/app/agent.yaml:ro\n restart: unless-stopped\n`);
|
|
315
315
|
console.log(`\n${icon.success} Created agent project: ${color.bold(name + '/')} from role ${color.cyan(matched.category + '/' + matched.role)}`);
|
|
316
|
-
console.log(` ${icon.file}
|
|
316
|
+
console.log(` ${icon.file} agent.yaml - Agent definition with role system prompt`);
|
|
317
317
|
console.log(` ${icon.file} SOUL.md - Role personality (${systemPromptContent.split('\n').length} lines)`);
|
|
318
318
|
console.log(` ${icon.file} CONTEXT.md - Role context & documentation`);
|
|
319
319
|
console.log(` ${icon.file} brain-seeds/ - 3-tier brain seed knowledge`);
|
|
@@ -344,7 +344,7 @@ export class EchoSkill extends BaseSkill {
|
|
|
344
344
|
}
|
|
345
345
|
}
|
|
346
346
|
catch {
|
|
347
|
-
// Hub unreachable
|
|
347
|
+
// Hub unreachable — fall back to bundled templates
|
|
348
348
|
}
|
|
349
349
|
let template;
|
|
350
350
|
let selectedHubTemplate;
|
|
@@ -364,156 +364,6 @@ export class EchoSkill extends BaseSkill {
|
|
|
364
364
|
else {
|
|
365
365
|
template = await select('Select a template:', Object.entries(TEMPLATES).map(([value, { label }]) => ({ value, label })));
|
|
366
366
|
}
|
|
367
|
-
// ── 硬件检测 + 智能模型推荐 ──
|
|
368
|
-
// ── 硬件检测 + 远程模型推荐 ──
|
|
369
|
-
const sys = (0, model_recommender_1.detectSystem)();
|
|
370
|
-
const allModels = await (0, model_recommender_1.fetchModelList)();
|
|
371
|
-
// ── LLM Provider 选择(Ollama-first)──
|
|
372
|
-
let llmProvider = 'ollama';
|
|
373
|
-
let llmModel = 'qwen2.5';
|
|
374
|
-
let llmBaseUrl = 'http://localhost:11434/v1';
|
|
375
|
-
let llmApiKey = '';
|
|
376
|
-
let ollamaRunning = false;
|
|
377
|
-
let modelNames = [];
|
|
378
|
-
// 无论 --yes 还是交互式,都先检测 Ollama
|
|
379
|
-
try {
|
|
380
|
-
const controller = new AbortController();
|
|
381
|
-
const ollamaTimeout = setTimeout(() => controller.abort(), 3000);
|
|
382
|
-
const ollamaRes = await fetch('http://localhost:11434/api/tags', { signal: controller.signal });
|
|
383
|
-
clearTimeout(ollamaTimeout);
|
|
384
|
-
const ollamaData = await ollamaRes.json();
|
|
385
|
-
modelNames = (ollamaData.models || []).map((m) => m.name || m.model);
|
|
386
|
-
ollamaRunning = true;
|
|
387
|
-
if (opts.yes && modelNames.length > 0) {
|
|
388
|
-
const rec = (0, model_recommender_1.recommendModels)(allModels, sys, modelNames);
|
|
389
|
-
// --yes: prefer best installed recommended model
|
|
390
|
-
const bestInstalled = rec.installed.length > 0 ? rec.installed[rec.installed.length - 1] : null;
|
|
391
|
-
// Filter out embedding-only models (can't chat)
|
|
392
|
-
const chatModels = modelNames.filter(m => !m.includes('embed'));
|
|
393
|
-
llmModel = bestInstalled ? bestInstalled.name : (chatModels[0] || 'qwen2.5:7b');
|
|
394
|
-
}
|
|
395
|
-
}
|
|
396
|
-
catch {
|
|
397
|
-
ollamaRunning = false;
|
|
398
|
-
}
|
|
399
|
-
// Compute recommendation (used by both interactive branches)
|
|
400
|
-
const rec = (0, model_recommender_1.recommendModels)(allModels, sys, modelNames);
|
|
401
|
-
if (!opts.yes) {
|
|
402
|
-
if (ollamaRunning) {
|
|
403
|
-
console.log(`\n ${icon.info} ${color.dim('正在检测 Ollama...')}`);
|
|
404
|
-
console.log(` ${icon.success} Ollama 已运行,发现 ${modelNames.length} 个模型`);
|
|
405
|
-
console.log(` ${icon.info} 系统: ${sys.totalRAM}GB RAM (${sys.freeRAM}GB 可用), ${sys.cpuCount} CPU cores`);
|
|
406
|
-
console.log(` ${icon.info} 推荐模型: ${color.cyan(rec.best.name)} (${rec.best.size}) - ${rec.best.desc}`);
|
|
407
|
-
// 选择 provider
|
|
408
|
-
llmProvider = await select('选择 LLM 引擎:', [
|
|
409
|
-
{ value: 'ollama', label: '🟢 Ollama (免费本地,推荐) - 已检测到运行中' },
|
|
410
|
-
{ value: 'deepseek', label: '🔵 DeepSeek - 高性价比国产模型' },
|
|
411
|
-
{ value: 'openai', label: '⚪ OpenAI (GPT-4o)' },
|
|
412
|
-
{ value: 'anthropic', label: '🟣 Anthropic (Claude)' },
|
|
413
|
-
{ value: 'custom', label: '⚙️ 自定义 (手动输入 Base URL)' },
|
|
414
|
-
]);
|
|
415
|
-
if (llmProvider === 'ollama') {
|
|
416
|
-
// 已有模型 + 推荐未下载的模型
|
|
417
|
-
const modelOptions = [
|
|
418
|
-
...rec.installed.map((m) => {
|
|
419
|
-
const isBest = m.name === rec.best.name ? ' ⭐推荐' : '';
|
|
420
|
-
return { value: m.name, label: `${m.name} (${m.size}, ${m.desc})${isBest} [已安装]` };
|
|
421
|
-
}),
|
|
422
|
-
// Also show installed models not in recommendation list
|
|
423
|
-
...modelNames.filter(n => !rec.installed.find(m => m.name === n)).map(n => ({ value: n, label: `${n} [已安装]` })),
|
|
424
|
-
...rec.toDownload.map((m) => ({
|
|
425
|
-
value: `pull:${m.name}`,
|
|
426
|
-
label: `${m.name} (${m.size}, ${m.desc}) [需下载]`,
|
|
427
|
-
})),
|
|
428
|
-
];
|
|
429
|
-
if (modelOptions.length > 0) {
|
|
430
|
-
const chosen = await select('选择 Ollama 模型:', modelOptions);
|
|
431
|
-
if (chosen.startsWith('pull:')) {
|
|
432
|
-
const pullModel = chosen.slice(5);
|
|
433
|
-
console.log(`\n ${icon.info} 正在下载 ${color.cyan(pullModel)}...`);
|
|
434
|
-
console.log(` 运行 ${color.cyan(`ollama pull ${pullModel}`)} 下载`);
|
|
435
|
-
console.log(` 下载完成后运行 ${color.cyan('opc run')} 启动\n`);
|
|
436
|
-
llmModel = pullModel;
|
|
437
|
-
}
|
|
438
|
-
else {
|
|
439
|
-
llmModel = chosen;
|
|
440
|
-
}
|
|
441
|
-
}
|
|
442
|
-
else {
|
|
443
|
-
// 没有本地模型,推荐下载
|
|
444
|
-
console.log(` ${color.yellow('⚠️')} 没有发现已下载的模型`);
|
|
445
|
-
console.log(` ${icon.info} 根据你的硬件 (${sys.freeRAM}GB 可用),推荐下载:`);
|
|
446
|
-
for (const m of rec.suitable.slice(-3)) {
|
|
447
|
-
console.log(` ${color.cyan(`ollama pull ${m.name}`)} (${m.size}, ${m.desc})`);
|
|
448
|
-
}
|
|
449
|
-
llmModel = rec.best.name;
|
|
450
|
-
}
|
|
451
|
-
}
|
|
452
|
-
}
|
|
453
|
-
else {
|
|
454
|
-
// Ollama not running
|
|
455
|
-
console.log(`\n ${icon.info} ${color.dim('正在检测 Ollama...')}`);
|
|
456
|
-
console.log(` ${color.yellow('⚠️')} Ollama 未运行或未安装`);
|
|
457
|
-
llmProvider = await select('选择 LLM 引擎:', [
|
|
458
|
-
{ value: 'ollama', label: '🟢 Ollama (免费本地,推荐) - 需先安装: https://ollama.ai' },
|
|
459
|
-
{ value: 'deepseek', label: '🔵 DeepSeek - 高性价比国产模型' },
|
|
460
|
-
{ value: 'openai', label: '⚪ OpenAI (GPT-4o)' },
|
|
461
|
-
{ value: 'anthropic', label: '🟣 Anthropic (Claude)' },
|
|
462
|
-
{ value: 'custom', label: '⚙️ 自定义 (手动输入 Base URL)' },
|
|
463
|
-
]);
|
|
464
|
-
if (llmProvider === 'ollama') {
|
|
465
|
-
console.log(`\n ${icon.info} Ollama 安装指南:`);
|
|
466
|
-
console.log(` 1. 访问 ${color.cyan('https://ollama.ai')} 下载并安装`);
|
|
467
|
-
console.log(` ${icon.info} 根据你的硬件 (${sys.totalRAM}GB RAM, ${sys.freeRAM}GB 可用),推荐:`);
|
|
468
|
-
for (const m of rec.suitable.slice(-3)) {
|
|
469
|
-
console.log(` ${color.cyan(`ollama pull ${m.name}`)} (${m.size}, ${m.desc})`);
|
|
470
|
-
}
|
|
471
|
-
console.log(` 3. 然后 ${color.cyan('opc run')} 即可开始对话\n`);
|
|
472
|
-
llmModel = rec.best.name;
|
|
473
|
-
}
|
|
474
|
-
}
|
|
475
|
-
// 商业模型需要 API key
|
|
476
|
-
if (llmProvider === 'deepseek') {
|
|
477
|
-
llmBaseUrl = 'https://api.deepseek.com/v1';
|
|
478
|
-
llmModel = 'deepseek-chat';
|
|
479
|
-
llmApiKey = await promptUser('输入 DeepSeek API Key (可稍后在 .env 中配置,直接回车跳过)');
|
|
480
|
-
if (!llmApiKey) {
|
|
481
|
-
console.log(` ${icon.info} 稍后在 ${color.cyan('.env')} 文件中设置 ${color.bold('OPC_LLM_API_KEY')}`);
|
|
482
|
-
}
|
|
483
|
-
}
|
|
484
|
-
else if (llmProvider === 'openai') {
|
|
485
|
-
llmBaseUrl = 'https://api.openai.com/v1';
|
|
486
|
-
llmModel = 'gpt-4o-mini';
|
|
487
|
-
llmApiKey = await promptUser('输入 OpenAI API Key (可稍后在 .env 中配置,直接回车跳过)');
|
|
488
|
-
if (!llmApiKey) {
|
|
489
|
-
console.log(` ${icon.info} 稍后在 ${color.cyan('.env')} 文件中设置 ${color.bold('OPC_LLM_API_KEY')}`);
|
|
490
|
-
}
|
|
491
|
-
}
|
|
492
|
-
else if (llmProvider === 'anthropic') {
|
|
493
|
-
llmBaseUrl = 'https://api.anthropic.com/v1';
|
|
494
|
-
llmModel = 'claude-sonnet-4-20250514';
|
|
495
|
-
llmApiKey = await promptUser('输入 Anthropic API Key (可稍后在 .env 中配置,直接回车跳过)');
|
|
496
|
-
if (!llmApiKey) {
|
|
497
|
-
console.log(` ${icon.info} 稍后在 ${color.cyan('.env')} 文件中设置 ${color.bold('OPC_LLM_API_KEY')}`);
|
|
498
|
-
}
|
|
499
|
-
}
|
|
500
|
-
else if (llmProvider === 'custom') {
|
|
501
|
-
llmBaseUrl = await promptUser('输入 Base URL', 'http://localhost:11434/v1');
|
|
502
|
-
llmModel = await promptUser('输入模型名称', 'qwen2.5');
|
|
503
|
-
llmApiKey = await promptUser('输入 API Key (可选,直接回车跳过)');
|
|
504
|
-
// 尝试推断 provider
|
|
505
|
-
if (llmBaseUrl.includes('deepseek.com'))
|
|
506
|
-
llmProvider = 'deepseek';
|
|
507
|
-
else if (llmBaseUrl.includes('openai.com'))
|
|
508
|
-
llmProvider = 'openai';
|
|
509
|
-
else if (llmBaseUrl.includes('anthropic.com'))
|
|
510
|
-
llmProvider = 'anthropic';
|
|
511
|
-
else if (llmBaseUrl.includes('localhost:11434'))
|
|
512
|
-
llmProvider = 'ollama';
|
|
513
|
-
else
|
|
514
|
-
llmProvider = 'openai'; // OpenAI-compatible fallback
|
|
515
|
-
}
|
|
516
|
-
}
|
|
517
367
|
const dir = path.resolve(name);
|
|
518
368
|
if (fs.existsSync(dir)) {
|
|
519
369
|
console.error(`\n${icon.error} Directory ${color.bold(name)} already exists.`);
|
|
@@ -524,16 +374,37 @@ export class EchoSkill extends BaseSkill {
|
|
|
524
374
|
const factory = TEMPLATES[template]?.factory ?? customer_service_1.createCustomerServiceConfig;
|
|
525
375
|
const config = factory();
|
|
526
376
|
config.metadata.name = name;
|
|
527
|
-
// 用用户选择的 provider 和 model 覆盖模板默认值
|
|
528
|
-
config.spec.model = llmModel;
|
|
529
|
-
config.spec.provider = { default: llmProvider };
|
|
530
377
|
// Ensure web channel exists
|
|
531
378
|
if (!config.spec.channels.some((c) => c.type === 'web')) {
|
|
532
379
|
config.spec.channels.push({ type: 'web', port: 3000 });
|
|
533
380
|
}
|
|
534
|
-
// 只生成 oad.yaml,不生成 agent.yaml
|
|
535
381
|
fs.writeFileSync(path.join(dir, 'oad.yaml'), yaml.dump(config, { lineWidth: 120 }));
|
|
536
|
-
//
|
|
382
|
+
// agent.yaml — standalone OAD config for runtime usage
|
|
383
|
+
fs.writeFileSync(path.join(dir, 'agent.yaml'), `apiVersion: opc/v1
|
|
384
|
+
kind: Agent
|
|
385
|
+
metadata:
|
|
386
|
+
name: ${name}
|
|
387
|
+
version: 1.0.0
|
|
388
|
+
description: My AI Agent
|
|
389
|
+
spec:
|
|
390
|
+
model: qwen2.5
|
|
391
|
+
provider:
|
|
392
|
+
default: ollama
|
|
393
|
+
systemPrompt: |
|
|
394
|
+
You are a helpful AI assistant named ${name}.
|
|
395
|
+
Be concise, helpful, and friendly.
|
|
396
|
+
channels:
|
|
397
|
+
- type: web
|
|
398
|
+
port: 3000
|
|
399
|
+
memory:
|
|
400
|
+
shortTerm: true
|
|
401
|
+
longTerm:
|
|
402
|
+
provider: deepbrain
|
|
403
|
+
skills:
|
|
404
|
+
- name: echo
|
|
405
|
+
description: Echo test skill
|
|
406
|
+
`);
|
|
407
|
+
// src/index.ts — entry point
|
|
537
408
|
fs.writeFileSync(path.join(dir, 'src', 'index.ts'), `import { AgentRuntime } from 'opc-agent';
|
|
538
409
|
import { EchoSkill } from './skills/echo';
|
|
539
410
|
import { readFileSync, existsSync } from 'fs';
|
|
@@ -542,7 +413,7 @@ async function main() {
|
|
|
542
413
|
const runtime = new AgentRuntime();
|
|
543
414
|
|
|
544
415
|
// Load OAD config
|
|
545
|
-
const config = await runtime.loadConfig('./
|
|
416
|
+
const config = await runtime.loadConfig('./agent.yaml');
|
|
546
417
|
|
|
547
418
|
// Load personality and context files
|
|
548
419
|
const soul = existsSync('./SOUL.md') ? readFileSync('./SOUL.md', 'utf-8') : '';
|
|
@@ -568,7 +439,7 @@ async function main() {
|
|
|
568
439
|
|
|
569
440
|
main().catch(console.error);
|
|
570
441
|
`);
|
|
571
|
-
// src/skills/echo.ts
|
|
442
|
+
// src/skills/echo.ts — example skill
|
|
572
443
|
fs.writeFileSync(path.join(dir, 'src', 'skills', 'echo.ts'), `import { BaseSkill } from 'opc-agent';
|
|
573
444
|
import type { AgentContext, Message, SkillResult } from 'opc-agent';
|
|
574
445
|
|
|
@@ -606,39 +477,23 @@ export class EchoSkill extends BaseSkill {
|
|
|
606
477
|
}, null, 2));
|
|
607
478
|
// .env.example
|
|
608
479
|
fs.writeFileSync(path.join(dir, '.env.example'), `# LLM API Configuration
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
# (Ollama 无需 API key)
|
|
480
|
+
OPC_LLM_API_KEY=your-api-key-here
|
|
481
|
+
OPC_LLM_BASE_URL=https://api.openai.com/v1
|
|
482
|
+
OPC_LLM_MODEL=gpt-4o-mini
|
|
613
483
|
|
|
614
|
-
# DeepSeek:
|
|
615
|
-
# OPC_LLM_API_KEY=your-deepseek-key
|
|
484
|
+
# For DeepSeek:
|
|
616
485
|
# OPC_LLM_BASE_URL=https://api.deepseek.com/v1
|
|
617
486
|
# OPC_LLM_MODEL=deepseek-chat
|
|
618
487
|
|
|
619
|
-
#
|
|
620
|
-
#
|
|
621
|
-
#
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
# OPC_LLM_MODEL=claude-sonnet-4-20250514
|
|
488
|
+
# For local Ollama (default in agent.yaml):
|
|
489
|
+
# OPC_LLM_BASE_URL=http://localhost:11434/v1
|
|
490
|
+
# OPC_LLM_MODEL=qwen2.5
|
|
491
|
+
`);
|
|
492
|
+
// .env (copy of example)
|
|
493
|
+
fs.writeFileSync(path.join(dir, '.env'), `OPC_LLM_API_KEY=your-api-key-here
|
|
494
|
+
OPC_LLM_BASE_URL=https://api.openai.com/v1
|
|
495
|
+
OPC_LLM_MODEL=gpt-4o-mini
|
|
628
496
|
`);
|
|
629
|
-
// .env - 根据用户选择生成正确的配置
|
|
630
|
-
const envLines = [];
|
|
631
|
-
if (llmProvider === 'ollama') {
|
|
632
|
-
envLines.push('# Ollama (免费本地) - 无需 API key');
|
|
633
|
-
envLines.push(`OPC_LLM_BASE_URL=${llmBaseUrl}`);
|
|
634
|
-
envLines.push(`OPC_LLM_MODEL=${llmModel}`);
|
|
635
|
-
}
|
|
636
|
-
else {
|
|
637
|
-
envLines.push(`OPC_LLM_API_KEY=${llmApiKey || 'your-api-key-here'}`);
|
|
638
|
-
envLines.push(`OPC_LLM_BASE_URL=${llmBaseUrl}`);
|
|
639
|
-
envLines.push(`OPC_LLM_MODEL=${llmModel}`);
|
|
640
|
-
}
|
|
641
|
-
fs.writeFileSync(path.join(dir, '.env'), envLines.join('\n') + '\n');
|
|
642
497
|
// package.json
|
|
643
498
|
fs.writeFileSync(path.join(dir, 'package.json'), JSON.stringify({
|
|
644
499
|
name,
|
|
@@ -665,7 +520,7 @@ export class EchoSkill extends BaseSkill {
|
|
|
665
520
|
WORKDIR /app
|
|
666
521
|
COPY package.json package-lock.json* ./
|
|
667
522
|
RUN npm ci --production 2>/dev/null || npm install --production
|
|
668
|
-
COPY oad.yaml .env* ./
|
|
523
|
+
COPY oad.yaml agent.yaml .env* ./
|
|
669
524
|
COPY src/ ./src/
|
|
670
525
|
COPY prompts/ ./prompts/ 2>/dev/null || true
|
|
671
526
|
EXPOSE 3000
|
|
@@ -681,7 +536,7 @@ services:
|
|
|
681
536
|
env_file:
|
|
682
537
|
- .env
|
|
683
538
|
volumes:
|
|
684
|
-
- ./
|
|
539
|
+
- ./agent.yaml:/app/agent.yaml:ro
|
|
685
540
|
restart: unless-stopped
|
|
686
541
|
`);
|
|
687
542
|
// README.md
|
|
@@ -722,7 +577,8 @@ npx opc chat # CLI chat
|
|
|
722
577
|
|
|
723
578
|
\`\`\`
|
|
724
579
|
${name}/
|
|
725
|
-
├──
|
|
580
|
+
├── agent.yaml # OAD agent config (used by src/index.ts)
|
|
581
|
+
├── oad.yaml # OAD config (used by opc CLI)
|
|
726
582
|
├── src/
|
|
727
583
|
│ ├── index.ts # Entry point
|
|
728
584
|
│ └── skills/
|
|
@@ -733,9 +589,9 @@ ${name}/
|
|
|
733
589
|
|
|
734
590
|
## Configuration
|
|
735
591
|
|
|
736
|
-
Edit \`
|
|
592
|
+
Edit \`agent.yaml\` to customize your agent's personality, skills, and behavior.
|
|
737
593
|
`);
|
|
738
|
-
// SOUL.md
|
|
594
|
+
// SOUL.md — agent personality
|
|
739
595
|
const createdDate = new Date().toISOString().split('T')[0];
|
|
740
596
|
fs.writeFileSync(path.join(dir, 'SOUL.md'), `# ${name} Personality
|
|
741
597
|
|
|
@@ -751,7 +607,7 @@ Edit \`oad.yaml\` to customize your agent's personality, skills, and behavior.
|
|
|
751
607
|
|
|
752
608
|
## Communication Style
|
|
753
609
|
- Use clear, simple language
|
|
754
|
-
- Be direct
|
|
610
|
+
- Be direct — answer the question first, then explain
|
|
755
611
|
- Use markdown formatting when helpful
|
|
756
612
|
|
|
757
613
|
## Rules
|
|
@@ -759,7 +615,7 @@ Edit \`oad.yaml\` to customize your agent's personality, skills, and behavior.
|
|
|
759
615
|
- Ask for clarification when the request is ambiguous
|
|
760
616
|
- Never make up information
|
|
761
617
|
`);
|
|
762
|
-
// CONTEXT.md
|
|
618
|
+
// CONTEXT.md — project context
|
|
763
619
|
fs.writeFileSync(path.join(dir, 'CONTEXT.md'), `# Project Context
|
|
764
620
|
|
|
765
621
|
## About This Agent
|
|
@@ -775,8 +631,7 @@ on startup to understand the project context.
|
|
|
775
631
|
- Add company policies here
|
|
776
632
|
`);
|
|
777
633
|
console.log(`\n${icon.success} Created agent project: ${color.bold(name + '/')}`);
|
|
778
|
-
console.log(` ${icon.file}
|
|
779
|
-
console.log(` ${icon.file} .env - 环境变量${llmProvider === 'ollama' ? '' : ' (API Key)'}`);
|
|
634
|
+
console.log(` ${icon.file} agent.yaml - Agent definition (OAD)`);
|
|
780
635
|
console.log(` ${icon.file} src/index.ts - Entry point`);
|
|
781
636
|
console.log(` ${icon.file} src/skills/echo.ts - Example skill`);
|
|
782
637
|
console.log(` ${icon.file} SOUL.md - Agent personality`);
|
|
@@ -801,37 +656,178 @@ on startup to understand the project context.
|
|
|
801
656
|
}
|
|
802
657
|
}
|
|
803
658
|
catch {
|
|
804
|
-
// Brain-seed download failed
|
|
659
|
+
// Brain-seed download failed — non-fatal, project still usable
|
|
805
660
|
}
|
|
806
661
|
}
|
|
807
662
|
console.log(`\n${color.bold('Next steps:')}`);
|
|
808
663
|
console.log(` 1. cd ${name}`);
|
|
809
664
|
console.log(` 2. npm install`);
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
console.log(` 4. npx opc run ${color.dim('# 启动 Agent')}`);
|
|
813
|
-
}
|
|
814
|
-
else if (llmProvider !== 'ollama' && !llmApiKey) {
|
|
815
|
-
console.log(` 3. 编辑 .env 设置 OPC_LLM_API_KEY`);
|
|
816
|
-
console.log(` 4. npx opc run`);
|
|
817
|
-
}
|
|
818
|
-
else {
|
|
819
|
-
console.log(` 3. npx opc run ${color.dim('# 启动 Agent')}`);
|
|
820
|
-
}
|
|
821
|
-
console.log(` Open http://localhost:3000\n`);
|
|
665
|
+
console.log(` 3. npx tsx src/index.ts ${color.dim('# or: npx opc run')}`);
|
|
666
|
+
console.log(` 4. Open http://localhost:3000\n`);
|
|
822
667
|
console.log(`${color.dim('💡 Tip: Use --role to start from a workstation template:')}`);
|
|
823
668
|
console.log(`${color.dim(' opc init my-agent --role customer-service')}`);
|
|
824
669
|
console.log(`${color.dim(' opc init --list-roles (see all roles)')}\n`);
|
|
825
670
|
});
|
|
826
671
|
// ── Chat command ─────────────────────────────────────────────
|
|
827
|
-
const chat_1 = require("./cli/chat");
|
|
828
672
|
program
|
|
829
673
|
.command('chat')
|
|
830
|
-
.description('Interactive
|
|
674
|
+
.description('Interactive CLI chat with the agent')
|
|
831
675
|
.option('-f, --file <file>', 'OAD file', 'oad.yaml')
|
|
832
676
|
.action(async (opts) => {
|
|
677
|
+
// Load .env if present
|
|
833
678
|
loadDotEnv();
|
|
834
|
-
|
|
679
|
+
let systemPrompt = 'You are a helpful AI agent.';
|
|
680
|
+
let model;
|
|
681
|
+
let agentName = 'Agent';
|
|
682
|
+
let agentVersion = '1.0.0';
|
|
683
|
+
let providerName = 'auto';
|
|
684
|
+
let skillNames = [];
|
|
685
|
+
// Try loading SOUL.md and CONTEXT.md for enriched system prompt
|
|
686
|
+
const soulPath = path.resolve('SOUL.md');
|
|
687
|
+
const contextPath = path.resolve('CONTEXT.md');
|
|
688
|
+
const soulContent = fs.existsSync(soulPath) ? fs.readFileSync(soulPath, 'utf-8') : '';
|
|
689
|
+
const contextContent = fs.existsSync(contextPath) ? fs.readFileSync(contextPath, 'utf-8') : '';
|
|
690
|
+
try {
|
|
691
|
+
const raw = fs.readFileSync(opts.file, 'utf-8');
|
|
692
|
+
const config = yaml.load(raw);
|
|
693
|
+
if (config?.spec?.systemPrompt)
|
|
694
|
+
systemPrompt = config.spec.systemPrompt;
|
|
695
|
+
if (config?.spec?.model)
|
|
696
|
+
model = config.spec.model;
|
|
697
|
+
if (config?.metadata?.name)
|
|
698
|
+
agentName = config.metadata.name;
|
|
699
|
+
if (config?.metadata?.version)
|
|
700
|
+
agentVersion = config.metadata.version;
|
|
701
|
+
if (config?.spec?.provider?.default)
|
|
702
|
+
providerName = config.spec.provider.default;
|
|
703
|
+
if (config?.spec?.skills)
|
|
704
|
+
skillNames = config.spec.skills.map((s) => s.name);
|
|
705
|
+
}
|
|
706
|
+
catch {
|
|
707
|
+
// No config file, use defaults
|
|
708
|
+
}
|
|
709
|
+
// Prepend SOUL.md and CONTEXT.md to system prompt
|
|
710
|
+
systemPrompt = [soulContent, contextContent, systemPrompt].filter(Boolean).join('\n\n');
|
|
711
|
+
const provider = (0, providers_1.createProvider)(providerName, model);
|
|
712
|
+
const history = [];
|
|
713
|
+
// Print startup banner
|
|
714
|
+
const bannerLines = [
|
|
715
|
+
'╔══════════════════════════════════════╗',
|
|
716
|
+
'║ 🤖 OPC Agent — Interactive Chat ║',
|
|
717
|
+
`║ Agent: ${(agentName + ' v' + agentVersion).padEnd(27)}║`,
|
|
718
|
+
`║ Model: ${((providerName + '/' + (model ?? 'default')).slice(0, 27)).padEnd(27)}║`,
|
|
719
|
+
`║ Skills: ${(String(skillNames.length) + ' loaded').padEnd(26)}║`,
|
|
720
|
+
'║ Type /help for commands ║',
|
|
721
|
+
'╚══════════════════════════════════════╝',
|
|
722
|
+
];
|
|
723
|
+
console.log('\n' + color.cyan(bannerLines.join('\n')) + '\n');
|
|
724
|
+
if (soulContent)
|
|
725
|
+
console.log(` ${icon.info} Loaded SOUL.md`);
|
|
726
|
+
if (contextContent)
|
|
727
|
+
console.log(` ${icon.info} Loaded CONTEXT.md`);
|
|
728
|
+
if (soulContent || contextContent)
|
|
729
|
+
console.log();
|
|
730
|
+
const rl = readline.createInterface({
|
|
731
|
+
input: process.stdin,
|
|
732
|
+
output: process.stdout,
|
|
733
|
+
historySize: 100,
|
|
734
|
+
});
|
|
735
|
+
const handleSlashCommand = (cmd) => {
|
|
736
|
+
const lower = cmd.toLowerCase().trim();
|
|
737
|
+
if (lower === '/quit' || lower === '/exit') {
|
|
738
|
+
console.log(`\n${color.dim('Goodbye! 👋')}`);
|
|
739
|
+
process.exit(0);
|
|
740
|
+
}
|
|
741
|
+
if (lower === '/help') {
|
|
742
|
+
console.log(`\n ${color.bold('Available commands:')}`);
|
|
743
|
+
console.log(` ${color.cyan('/help')} — Show this help`);
|
|
744
|
+
console.log(` ${color.cyan('/quit')} — Exit chat (/exit also works)`);
|
|
745
|
+
console.log(` ${color.cyan('/clear')} — Clear conversation history`);
|
|
746
|
+
console.log(` ${color.cyan('/skills')} — List registered skills`);
|
|
747
|
+
console.log(` ${color.cyan('/memory')} — Show memory stats`);
|
|
748
|
+
console.log(` ${color.cyan('/info')} — Show agent info\n`);
|
|
749
|
+
return true;
|
|
750
|
+
}
|
|
751
|
+
if (lower === '/clear') {
|
|
752
|
+
history.length = 0;
|
|
753
|
+
console.log(`\n ${icon.success} Conversation history cleared.\n`);
|
|
754
|
+
return true;
|
|
755
|
+
}
|
|
756
|
+
if (lower === '/skills') {
|
|
757
|
+
if (skillNames.length === 0) {
|
|
758
|
+
console.log(`\n ${icon.info} No skills registered.\n`);
|
|
759
|
+
}
|
|
760
|
+
else {
|
|
761
|
+
console.log(`\n ${color.bold('Registered skills:')}`);
|
|
762
|
+
skillNames.forEach((s) => console.log(` • ${color.cyan(s)}`));
|
|
763
|
+
console.log();
|
|
764
|
+
}
|
|
765
|
+
return true;
|
|
766
|
+
}
|
|
767
|
+
if (lower === '/memory') {
|
|
768
|
+
console.log(`\n ${color.bold('Memory stats:')}`);
|
|
769
|
+
console.log(` Messages in history: ${color.cyan(String(history.length))}`);
|
|
770
|
+
console.log(` Characters: ${color.cyan(String(history.reduce((a, m) => a + m.content.length, 0)))}\n`);
|
|
771
|
+
return true;
|
|
772
|
+
}
|
|
773
|
+
if (lower === '/info') {
|
|
774
|
+
console.log(`\n ${color.bold('Agent Info:')}`);
|
|
775
|
+
console.log(` Name: ${color.cyan(agentName)}`);
|
|
776
|
+
console.log(` Version: ${color.cyan(agentVersion)}`);
|
|
777
|
+
console.log(` Provider: ${color.cyan(providerName)}`);
|
|
778
|
+
console.log(` Model: ${color.cyan(model ?? 'default')}`);
|
|
779
|
+
console.log(` Skills: ${color.cyan(String(skillNames.length))}\n`);
|
|
780
|
+
return true;
|
|
781
|
+
}
|
|
782
|
+
return false;
|
|
783
|
+
};
|
|
784
|
+
const ask = () => {
|
|
785
|
+
rl.question(color.cyan('You: '), async (input) => {
|
|
786
|
+
const text = input.trim();
|
|
787
|
+
if (!text) {
|
|
788
|
+
ask();
|
|
789
|
+
return;
|
|
790
|
+
}
|
|
791
|
+
// Handle slash commands
|
|
792
|
+
if (text.startsWith('/') && handleSlashCommand(text)) {
|
|
793
|
+
ask();
|
|
794
|
+
return;
|
|
795
|
+
}
|
|
796
|
+
history.push({ role: 'user', content: text });
|
|
797
|
+
// Build messages for provider
|
|
798
|
+
const messages = history.map((m) => ({
|
|
799
|
+
id: 'x',
|
|
800
|
+
role: m.role,
|
|
801
|
+
content: m.content,
|
|
802
|
+
timestamp: Date.now(),
|
|
803
|
+
}));
|
|
804
|
+
process.stdout.write(color.green('Agent: '));
|
|
805
|
+
let full = '';
|
|
806
|
+
try {
|
|
807
|
+
for await (const chunk of provider.chatStream(messages, systemPrompt)) {
|
|
808
|
+
process.stdout.write(chunk);
|
|
809
|
+
full += chunk;
|
|
810
|
+
}
|
|
811
|
+
}
|
|
812
|
+
catch (err) {
|
|
813
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
814
|
+
process.stdout.write(color.red(`\n[Error: ${msg}]`));
|
|
815
|
+
full = `[Error: ${msg}]`;
|
|
816
|
+
}
|
|
817
|
+
console.log('\n');
|
|
818
|
+
history.push({ role: 'assistant', content: full });
|
|
819
|
+
// Trim history if too long (keep last 40 messages)
|
|
820
|
+
if (history.length > 40) {
|
|
821
|
+
history.splice(0, history.length - 40);
|
|
822
|
+
}
|
|
823
|
+
ask();
|
|
824
|
+
});
|
|
825
|
+
};
|
|
826
|
+
rl.on('close', () => {
|
|
827
|
+
console.log(`\n${color.dim('Goodbye! 👋')}`);
|
|
828
|
+
process.exit(0);
|
|
829
|
+
});
|
|
830
|
+
ask();
|
|
835
831
|
});
|
|
836
832
|
// ── Run command ──────────────────────────────────────────────
|
|
837
833
|
program
|
|
@@ -862,8 +858,6 @@ program
|
|
|
862
858
|
console.log(` ${color.dim('Studio:')} ${studioUrl}`);
|
|
863
859
|
console.log(` ${color.dim('API:')} POST http://localhost:3000/api/chat`);
|
|
864
860
|
console.log(`\n ${color.dim('Press Ctrl+C to stop.')}\n`);
|
|
865
|
-
// Keep the process alive — HTTP server refs may not suffice with Commander
|
|
866
|
-
await new Promise(() => { });
|
|
867
861
|
});
|
|
868
862
|
// ── Serve command (OpenAI-compatible API) ────────────────────
|
|
869
863
|
program
|
|
@@ -900,8 +894,6 @@ program
|
|
|
900
894
|
console.log(` GET /health`);
|
|
901
895
|
console.log(` GET /v1/agent/status`);
|
|
902
896
|
console.log(`\n ${color.dim('Press Ctrl+C to stop.')}\n`);
|
|
903
|
-
// Keep the process alive
|
|
904
|
-
await new Promise(() => { });
|
|
905
897
|
});
|
|
906
898
|
// ── Info command ─────────────────────────────────────────────
|
|
907
899
|
program
|
|
@@ -1468,7 +1460,7 @@ protocolCmd.command('list')
|
|
|
1468
1460
|
const protocols = config?.spec?.protocols || {};
|
|
1469
1461
|
const items = [
|
|
1470
1462
|
{ name: 'a2a', description: 'Agent-to-Agent protocol', enabled: !!protocols.a2a?.enabled, detail: protocols.a2a?.port ? `port ${protocols.a2a.port}` : '' },
|
|
1471
|
-
{ name: 'agui', description: 'AG-UI
|
|
1463
|
+
{ name: 'agui', description: 'AG-UI — Agent-User Interaction (SSE)', enabled: !!protocols.agui?.enabled, detail: protocols.agui?.path || '/agui' },
|
|
1472
1464
|
];
|
|
1473
1465
|
console.log(`\n${icon.gear} ${color.bold('Protocols')}\n`);
|
|
1474
1466
|
for (const p of items) {
|
|
@@ -1598,29 +1590,43 @@ const brainCmd = program
|
|
|
1598
1590
|
.description('Manage agent brain (memory, seeds, evolve)');
|
|
1599
1591
|
brainCmd
|
|
1600
1592
|
.command('status')
|
|
1601
|
-
.description('Show brain stats (
|
|
1602
|
-
.
|
|
1603
|
-
|
|
1593
|
+
.description('Show brain stats (pages, tiers, last evolve)')
|
|
1594
|
+
.option('--url <url>', 'DeepBrain server URL', 'http://localhost:3333')
|
|
1595
|
+
.action(async (opts) => {
|
|
1596
|
+
console.log(`\n${icon.gear} ${color.bold('DeepBrain Status')} — ${color.dim(opts.url)}\n`);
|
|
1604
1597
|
try {
|
|
1605
|
-
const
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
const stats =
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
|
|
1612
|
-
|
|
1613
|
-
|
|
1598
|
+
const res = await fetch(`${opts.url}/api/stats`);
|
|
1599
|
+
if (!res.ok)
|
|
1600
|
+
throw new Error(`HTTP ${res.status} ${res.statusText}`);
|
|
1601
|
+
const stats = (await res.json());
|
|
1602
|
+
const rows = [
|
|
1603
|
+
['Total Pages', String(stats.totalPages ?? stats.pages ?? '-')],
|
|
1604
|
+
['Total Chunks', String(stats.totalChunks ?? stats.chunks ?? '-')],
|
|
1605
|
+
['Memory Tiers', String(stats.memoryTiers ?? stats.tiers ?? '-')],
|
|
1606
|
+
['Index Size', stats.indexSize ?? '-'],
|
|
1607
|
+
['Last Updated', stats.lastUpdated ?? stats.updatedAt ?? '-'],
|
|
1608
|
+
];
|
|
1609
|
+
const maxKey = Math.max(...rows.map(([k]) => k.length));
|
|
1610
|
+
for (const [key, val] of rows) {
|
|
1611
|
+
console.log(` ${color.cyan(key.padEnd(maxKey))} ${val}`);
|
|
1612
|
+
}
|
|
1614
1613
|
console.log();
|
|
1615
1614
|
}
|
|
1616
|
-
catch (
|
|
1617
|
-
|
|
1615
|
+
catch (err) {
|
|
1616
|
+
const msg = err instanceof Error ? err.message : String(err);
|
|
1617
|
+
if (msg.includes('ECONNREFUSED') || msg.includes('fetch failed')) {
|
|
1618
|
+
console.log(` ${icon.warn} Cannot connect to DeepBrain at ${opts.url}`);
|
|
1619
|
+
console.log(` ${color.dim('Is the server running? Start with: deepbrain serve')}\n`);
|
|
1620
|
+
}
|
|
1621
|
+
else {
|
|
1622
|
+
console.error(` ${icon.error} ${msg}\n`);
|
|
1623
|
+
}
|
|
1618
1624
|
}
|
|
1619
1625
|
});
|
|
1620
1626
|
brainCmd
|
|
1621
1627
|
.command('seed')
|
|
1622
1628
|
.description('Import brain seed files into memory')
|
|
1623
|
-
.option('-f, --file <file>', 'OAD file', '
|
|
1629
|
+
.option('-f, --file <file>', 'OAD file', 'agent.yaml')
|
|
1624
1630
|
.option('--status', 'Check if seeds have been imported')
|
|
1625
1631
|
.option('--reset', 'Re-import seeds (clear marker and re-seed)')
|
|
1626
1632
|
.action(async (opts) => {
|
|
@@ -1673,32 +1679,29 @@ brainCmd
|
|
|
1673
1679
|
});
|
|
1674
1680
|
brainCmd
|
|
1675
1681
|
.command('evolve')
|
|
1676
|
-
.description('Trigger manual knowledge evolution cycle
|
|
1677
|
-
.option('--dry-run', 'Show
|
|
1682
|
+
.description('Trigger manual knowledge evolution cycle')
|
|
1683
|
+
.option('--dry-run', 'Show what would be promoted without doing it')
|
|
1678
1684
|
.action(async (opts) => {
|
|
1679
|
-
const {
|
|
1680
|
-
const
|
|
1681
|
-
console.log(`\n${icon.gear} ${color.bold('Knowledge Evolution')}
|
|
1682
|
-
|
|
1683
|
-
|
|
1684
|
-
const
|
|
1685
|
-
|
|
1686
|
-
|
|
1687
|
-
console.log(` Last evolve: ${stats.lastEvolve}`);
|
|
1688
|
-
if (opts.dryRun) {
|
|
1689
|
-
console.log(`\n ${icon.info} Dry run — no changes.\n`);
|
|
1690
|
-
return;
|
|
1685
|
+
const { KnowledgeEvolver } = require('./memory/seed-loader');
|
|
1686
|
+
const evolver = new KnowledgeEvolver();
|
|
1687
|
+
console.log(`\n${icon.gear} ${color.bold('Knowledge Evolution')}\n`);
|
|
1688
|
+
console.log(` ${icon.info} Checking for promotion candidates...`);
|
|
1689
|
+
// Would connect to real brain in production
|
|
1690
|
+
const result = await evolver.checkPromotion(null);
|
|
1691
|
+
if (result.candidates.length === 0) {
|
|
1692
|
+
console.log(` ${icon.info} No knowledge ready for promotion yet.\n`);
|
|
1691
1693
|
}
|
|
1692
|
-
|
|
1693
|
-
|
|
1694
|
-
|
|
1695
|
-
|
|
1696
|
-
|
|
1697
|
-
|
|
1698
|
-
|
|
1699
|
-
|
|
1694
|
+
else {
|
|
1695
|
+
for (const c of result.candidates) {
|
|
1696
|
+
console.log(` ${color.cyan(c.slug)} → ${c.fromTier} → ${c.toTier} (confidence: ${(c.confidence * 100).toFixed(0)}%)`);
|
|
1697
|
+
}
|
|
1698
|
+
if (opts.dryRun) {
|
|
1699
|
+
console.log(`\n ${icon.info} Dry run — no changes made.\n`);
|
|
1700
|
+
}
|
|
1701
|
+
else {
|
|
1702
|
+
console.log(`\n ${icon.success} Promoted ${result.promoted} knowledge entries.\n`);
|
|
1703
|
+
}
|
|
1700
1704
|
}
|
|
1701
|
-
console.log();
|
|
1702
1705
|
});
|
|
1703
1706
|
// ── Logs command ─────────────────────────────────────────────
|
|
1704
1707
|
program
|
|
@@ -2142,7 +2145,7 @@ program
|
|
|
2142
2145
|
return;
|
|
2143
2146
|
}
|
|
2144
2147
|
// Create a minimal mock agent for eval (real usage would load from OAD)
|
|
2145
|
-
const oadPath = path.resolve(
|
|
2148
|
+
const oadPath = path.resolve('agent.yaml');
|
|
2146
2149
|
let agent;
|
|
2147
2150
|
if (fs.existsSync(oadPath)) {
|
|
2148
2151
|
const runtime = new runtime_1.AgentRuntime();
|
|
@@ -2151,7 +2154,7 @@ program
|
|
|
2151
2154
|
agent = runtime.agent;
|
|
2152
2155
|
}
|
|
2153
2156
|
if (!agent) {
|
|
2154
|
-
console.log(`${icon.warn} No
|
|
2157
|
+
console.log(`${icon.warn} No agent.yaml found — running with dry-run mock agent.`);
|
|
2155
2158
|
agent = { chat: async (input) => `[mock response to: ${input}]` };
|
|
2156
2159
|
}
|
|
2157
2160
|
const evaluator = new eval_1.AgentEvaluator(agent);
|
|
@@ -2211,7 +2214,7 @@ guardrailsCmd
|
|
|
2211
2214
|
console.log();
|
|
2212
2215
|
const result = await manager.checkInput(message);
|
|
2213
2216
|
if (result.passed) {
|
|
2214
|
-
console.log(color.green('✓ PASSED
|
|
2217
|
+
console.log(color.green('✓ PASSED — no violations'));
|
|
2215
2218
|
}
|
|
2216
2219
|
else {
|
|
2217
2220
|
if (result.blocked)
|
|
@@ -2240,7 +2243,7 @@ program
|
|
|
2240
2243
|
.action(async (opts) => {
|
|
2241
2244
|
console.log(color.bold('🎤 Voice Conversation Mode'));
|
|
2242
2245
|
console.log(` STT: ${opts.stt} | TTS: ${opts.tts} | Voice: ${opts.voice ?? 'default'} | Language: ${opts.language}`);
|
|
2243
|
-
console.log(color.dim(' (Voice conversation requires audio input integration
|
|
2246
|
+
console.log(color.dim(' (Voice conversation requires audio input integration — use as library)'));
|
|
2244
2247
|
console.log();
|
|
2245
2248
|
console.log('To use voice in your agent:');
|
|
2246
2249
|
console.log(color.cyan(`
|
|
@@ -2257,101 +2260,7 @@ program
|
|
|
2257
2260
|
await voice.start();
|
|
2258
2261
|
`));
|
|
2259
2262
|
});
|
|
2260
|
-
|
|
2261
|
-
program
|
|
2262
|
-
.command('models')
|
|
2263
|
-
.description('Show recommended Ollama models for your system')
|
|
2264
|
-
.option('--refresh', 'Force refresh model list from remote')
|
|
2265
|
-
.option('--json', 'Output as JSON')
|
|
2266
|
-
.action(async (opts) => {
|
|
2267
|
-
if (opts.refresh) {
|
|
2268
|
-
(0, model_recommender_1.clearModelCache)();
|
|
2269
|
-
console.log(`${icon.success} 模型推荐缓存已清除`);
|
|
2270
|
-
}
|
|
2271
|
-
const sys = (0, model_recommender_1.detectSystem)();
|
|
2272
|
-
const models = await (0, model_recommender_1.fetchModelList)();
|
|
2273
|
-
const cache = (0, model_recommender_1.cacheInfo)();
|
|
2274
|
-
// Detect Ollama
|
|
2275
|
-
let installedModels = [];
|
|
2276
|
-
try {
|
|
2277
|
-
const ctrl = new AbortController();
|
|
2278
|
-
const t = setTimeout(() => ctrl.abort(), 3000);
|
|
2279
|
-
const res = await fetch('http://localhost:11434/api/tags', { signal: ctrl.signal });
|
|
2280
|
-
clearTimeout(t);
|
|
2281
|
-
const data = await res.json();
|
|
2282
|
-
installedModels = (data.models || []).map((m) => m.name || m.model);
|
|
2283
|
-
}
|
|
2284
|
-
catch { /* Ollama not running */ }
|
|
2285
|
-
const rec = (0, model_recommender_1.recommendModels)(models, sys, installedModels);
|
|
2286
|
-
if (opts.json) {
|
|
2287
|
-
console.log(JSON.stringify({ system: sys, cache, recommendation: rec }, null, 2));
|
|
2288
|
-
return;
|
|
2289
|
-
}
|
|
2290
|
-
console.log(`\n${icon.rocket} ${color.bold('OPC 模型推荐')}\n`);
|
|
2291
|
-
console.log(` 系统: ${sys.totalRAM}GB RAM (${sys.freeRAM}GB 可用), ${sys.cpuCount} cores, ${sys.platform}/${sys.arch}`);
|
|
2292
|
-
if (cache.exists) {
|
|
2293
|
-
console.log(` 推荐列表: v${cache.version} (${cache.age})`);
|
|
2294
|
-
}
|
|
2295
|
-
else {
|
|
2296
|
-
console.log(` 推荐列表: 内置 (运行 ${color.cyan('opc models --refresh')} 获取最新)`);
|
|
2297
|
-
}
|
|
2298
|
-
console.log(` Ollama: ${installedModels.length > 0 ? color.green(`运行中, ${installedModels.length} 个模型`) : color.yellow('未运行')}`);
|
|
2299
|
-
console.log(`\n ${color.bold('⭐ 推荐:')} ${color.cyan(rec.best.name)} (${rec.best.size}) - ${rec.best.desc}\n`);
|
|
2300
|
-
// Table
|
|
2301
|
-
console.log(` ${'模型'.padEnd(28)} ${'大小'.padEnd(8)} ${'最低RAM'.padEnd(8)} ${'状态'.padEnd(10)} 说明`);
|
|
2302
|
-
console.log(` ${'─'.repeat(28)} ${'─'.repeat(8)} ${'─'.repeat(8)} ${'─'.repeat(10)} ${'─'.repeat(20)}`);
|
|
2303
|
-
for (const m of rec.suitable) {
|
|
2304
|
-
const installed = installedModels.includes(m.name);
|
|
2305
|
-
const isBest = m.name === rec.best.name;
|
|
2306
|
-
const status = installed ? color.green('已安装') : color.dim('未安装');
|
|
2307
|
-
const star = isBest ? ' ⭐' : (m.recommended ? ' 💎' : '');
|
|
2308
|
-
console.log(` ${(m.name + star).padEnd(28)} ${m.size.padEnd(8)} ${(m.minRAM + 'GB').padEnd(8)} ${status.padEnd(10)} ${m.desc}`);
|
|
2309
|
-
}
|
|
2310
|
-
if (rec.toDownload.length > 0) {
|
|
2311
|
-
console.log(`\n ${color.bold('推荐下载:')}`);
|
|
2312
|
-
for (const m of rec.toDownload) {
|
|
2313
|
-
console.log(` ${color.cyan(`ollama pull ${m.name}`)} (${m.size}, ${m.desc})`);
|
|
2314
|
-
}
|
|
2315
|
-
}
|
|
2316
|
-
console.log();
|
|
2317
|
-
});
|
|
2318
|
-
// ── Search command ──────────────────────────────────────────
|
|
2319
|
-
program
|
|
2320
|
-
.command('memory-search <query>')
|
|
2321
|
-
.description('Search conversation history (requires SQLite memory)')
|
|
2322
|
-
.option('-n, --limit <n>', 'Max results', '10')
|
|
2323
|
-
.option('--json', 'Output as JSON')
|
|
2324
|
-
.action(async (query, opts) => {
|
|
2325
|
-
try {
|
|
2326
|
-
const { SQLiteStore } = await Promise.resolve().then(() => __importStar(require('./memory/sqlite-store')));
|
|
2327
|
-
const store = new SQLiteStore();
|
|
2328
|
-
const results = await store.search(query, parseInt(opts.limit, 10));
|
|
2329
|
-
const stats = await store.stats();
|
|
2330
|
-
if (opts.json) {
|
|
2331
|
-
console.log(JSON.stringify({ query, results, stats }, null, 2));
|
|
2332
|
-
await store.close();
|
|
2333
|
-
return;
|
|
2334
|
-
}
|
|
2335
|
-
console.log(`\n🔍 搜索: "${query}" (${results.length} 条结果, 共 ${stats.totalMessages} 条消息)\n`);
|
|
2336
|
-
if (results.length === 0) {
|
|
2337
|
-
console.log(' 未找到匹配结果。\n');
|
|
2338
|
-
await store.close();
|
|
2339
|
-
return;
|
|
2340
|
-
}
|
|
2341
|
-
for (const msg of results) {
|
|
2342
|
-
const time = new Date(msg.timestamp).toLocaleString();
|
|
2343
|
-
const role = msg.role === 'user' ? '👤' : '🤖';
|
|
2344
|
-
const preview = msg.content.length > 120 ? msg.content.slice(0, 120) + '...' : msg.content;
|
|
2345
|
-
console.log(` ${role} [${time}] ${preview}`);
|
|
2346
|
-
}
|
|
2347
|
-
console.log(`\n 💾 数据库: ${stats.dbSizeKB}KB, ${stats.sessions} 个会话\n`);
|
|
2348
|
-
await store.close();
|
|
2349
|
-
}
|
|
2350
|
-
catch (err) {
|
|
2351
|
-
console.error(`${icon.error} 搜索失败: ${err instanceof Error ? err.message : err}`);
|
|
2352
|
-
console.log(' 提示: 需要 sql.js 支持。运行: npm install sql.js');
|
|
2353
|
-
}
|
|
2354
|
-
});
|
|
2263
|
+
program.parse();
|
|
2355
2264
|
// ── Keys command ──────────────────────────────────────────────
|
|
2356
2265
|
const keys_1 = require("./security/keys");
|
|
2357
2266
|
const approval_1 = require("./security/approval");
|
|
@@ -2398,7 +2307,7 @@ keysCmd
|
|
|
2398
2307
|
});
|
|
2399
2308
|
// ── Approve command ───────────────────────────────────────────
|
|
2400
2309
|
const approveCmd = program.command('approve').description('Manage command approvals');
|
|
2401
|
-
// Singleton for CLI
|
|
2310
|
+
// Singleton for CLI — in real usage this would be loaded from daemon state
|
|
2402
2311
|
const approvalManager = new approval_1.ApprovalManager();
|
|
2403
2312
|
approveCmd
|
|
2404
2313
|
.command('list')
|
|
@@ -2530,7 +2439,7 @@ a2aCmd
|
|
|
2530
2439
|
const { oadToAgentCard } = require('./protocols/a2a');
|
|
2531
2440
|
const oad = loadOADFile();
|
|
2532
2441
|
if (!oad) {
|
|
2533
|
-
console.log(`${icon.error} No
|
|
2442
|
+
console.log(`${icon.error} No agent.yaml found`);
|
|
2534
2443
|
return;
|
|
2535
2444
|
}
|
|
2536
2445
|
const card = oadToAgentCard(oad, 'http://localhost:3001');
|
|
@@ -2570,7 +2479,7 @@ a2aCmd
|
|
|
2570
2479
|
function loadOADFile() {
|
|
2571
2480
|
const fs = require('fs');
|
|
2572
2481
|
const yaml = require('js-yaml');
|
|
2573
|
-
for (const name of ['
|
|
2482
|
+
for (const name of ['agent.yaml', 'agent.yml']) {
|
|
2574
2483
|
if (fs.existsSync(name)) {
|
|
2575
2484
|
return yaml.load(fs.readFileSync(name, 'utf-8'));
|
|
2576
2485
|
}
|
|
@@ -2578,7 +2487,7 @@ function loadOADFile() {
|
|
|
2578
2487
|
return null;
|
|
2579
2488
|
}
|
|
2580
2489
|
// ── MCP Server Commands ────────────────────────────────────
|
|
2581
|
-
const mcpCmd = program.command('mcp').description('MCP server commands
|
|
2490
|
+
const mcpCmd = program.command('mcp').description('MCP server commands — expose agent as MCP tools');
|
|
2582
2491
|
mcpCmd
|
|
2583
2492
|
.command('serve')
|
|
2584
2493
|
.option('--http <port>', 'Start HTTP+SSE mode on given port')
|
|
@@ -2607,7 +2516,7 @@ mcpCmd
|
|
|
2607
2516
|
console.log(`${icon.info} Tools: ${server.getToolCount()}`);
|
|
2608
2517
|
}
|
|
2609
2518
|
else {
|
|
2610
|
-
console.error(`${icon.success} MCP server (stdio) started
|
|
2519
|
+
console.error(`${icon.success} MCP server (stdio) started — ${server.getToolCount()} tools`);
|
|
2611
2520
|
await server.serveStdio();
|
|
2612
2521
|
}
|
|
2613
2522
|
});
|
|
@@ -2656,10 +2565,8 @@ mcpCmd
|
|
|
2656
2565
|
console.log(`${icon.info} Tools: ${server.getToolCount()}`);
|
|
2657
2566
|
}
|
|
2658
2567
|
else {
|
|
2659
|
-
console.error(`${icon.success} MCP server ${color.cyan(name)} (stdio)
|
|
2568
|
+
console.error(`${icon.success} MCP server ${color.cyan(name)} (stdio) — ${server.getToolCount()} tools`);
|
|
2660
2569
|
await server.serveStdio();
|
|
2661
2570
|
}
|
|
2662
2571
|
});
|
|
2663
|
-
// ── Parse CLI ────────────────────────────────────────────────
|
|
2664
|
-
program.parse();
|
|
2665
2572
|
//# sourceMappingURL=cli.js.map
|