chainlesschain 0.41.1 → 0.42.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -9,6 +9,20 @@ npm install -g chainlesschain
9
9
  chainlesschain setup
10
10
  ```
11
11
 
12
+ After installation, three equivalent commands are available:
13
+
14
+ | Command | Description |
15
+ | ---------------- | ----------------------------------------------------------------- |
16
+ | `chainlesschain` | Full name |
17
+ | `cc` | Shortest alias, recommended for daily use |
18
+ | `clc` | ChainLessChain abbreviation, avoids `cc` conflict with C compiler |
19
+ | `clchain` | chainlesschain abbreviation, easy to recognize |
20
+
21
+ ```bash
22
+ cc setup # equivalent to: chainlesschain setup
23
+ clchain start # equivalent to: chainlesschain start
24
+ ```
25
+
12
26
  ## Requirements
13
27
 
14
28
  - **Node.js** >= 22.12.0
package/package.json CHANGED
@@ -1,10 +1,13 @@
1
1
  {
2
2
  "name": "chainlesschain",
3
- "version": "0.41.1",
3
+ "version": "0.42.3",
4
4
  "description": "CLI for ChainlessChain - install, configure, and manage your personal AI management system",
5
5
  "type": "module",
6
6
  "bin": {
7
- "chainlesschain": "./bin/chainlesschain.js"
7
+ "chainlesschain": "./bin/chainlesschain.js",
8
+ "cc": "./bin/chainlesschain.js",
9
+ "clc": "./bin/chainlesschain.js",
10
+ "clchain": "./bin/chainlesschain.js"
8
11
  },
9
12
  "main": "src/index.js",
10
13
  "scripts": {
@@ -7,6 +7,7 @@
7
7
  */
8
8
 
9
9
  import { startAgentRepl } from "../repl/agent-repl.js";
10
+ import { loadConfig } from "../lib/config-manager.js";
10
11
 
11
12
  export function registerAgentCommand(program) {
12
13
  program
@@ -15,21 +16,21 @@ export function registerAgentCommand(program) {
15
16
  .description(
16
17
  "Start an agentic AI session (reads/writes files, runs commands)",
17
18
  )
18
- .option("--model <model>", "Model name", "qwen2:7b")
19
+ .option("--model <model>", "Model name")
19
20
  .option(
20
21
  "--provider <provider>",
21
22
  "LLM provider (ollama, openai, volcengine, deepseek, ...)",
22
- "ollama",
23
23
  )
24
24
  .option("--base-url <url>", "API base URL")
25
25
  .option("--api-key <key>", "API key")
26
26
  .option("--session <id>", "Resume a previous agent session")
27
27
  .action(async (options) => {
28
+ const config = loadConfig();
28
29
  await startAgentRepl({
29
- model: options.model,
30
- provider: options.provider,
31
- baseUrl: options.baseUrl,
32
- apiKey: options.apiKey,
30
+ model: options.model || config.llm?.model || "qwen2:7b",
31
+ provider: options.provider || config.llm?.provider || "ollama",
32
+ baseUrl: options.baseUrl || config.llm?.baseUrl,
33
+ apiKey: options.apiKey || config.llm?.apiKey,
33
34
  sessionId: options.session,
34
35
  });
35
36
  });
@@ -1,12 +1,13 @@
1
1
  /**
2
2
  * Single-shot AI question command
3
- * chainlesschain ask "What is..." [--model qwen2:7b] [--provider ollama] [--json]
3
+ * chainlesschain ask "What is..." [--model] [--provider] [--json]
4
4
  */
5
5
 
6
6
  import ora from "ora";
7
7
  import chalk from "chalk";
8
8
  import { logger } from "../lib/logger.js";
9
9
  import { BUILT_IN_PROVIDERS } from "../lib/llm-providers.js";
10
+ import { loadConfig } from "../lib/config-manager.js";
10
11
 
11
12
  /**
12
13
  * Send a single question to an LLM provider
@@ -84,24 +85,25 @@ export function registerAskCommand(program) {
84
85
  .command("ask")
85
86
  .description("Ask a question to the AI (single-shot)")
86
87
  .argument("<question>", "The question to ask")
87
- .option("--model <model>", "Model name", "qwen2:7b")
88
+ .option("--model <model>", "Model name")
88
89
  .option(
89
90
  "--provider <provider>",
90
91
  "LLM provider (ollama, openai, volcengine, deepseek, ...)",
91
- "ollama",
92
92
  )
93
93
  .option("--base-url <url>", "API base URL")
94
94
  .option("--api-key <key>", "API key")
95
95
  .option("--json", "Output as JSON")
96
96
  .action(async (question, options) => {
97
+ const config = loadConfig();
98
+ const resolvedOptions = {
99
+ model: options.model || config.llm?.model || "qwen2:7b",
100
+ provider: options.provider || config.llm?.provider || "ollama",
101
+ baseUrl: options.baseUrl || config.llm?.baseUrl,
102
+ apiKey: options.apiKey || config.llm?.apiKey,
103
+ };
97
104
  const spinner = ora("Thinking...").start();
98
105
  try {
99
- const answer = await queryLLM(question, {
100
- model: options.model,
101
- provider: options.provider,
102
- baseUrl: options.baseUrl,
103
- apiKey: options.apiKey,
104
- });
106
+ const answer = await queryLLM(question, resolvedOptions);
105
107
 
106
108
  spinner.stop();
107
109
 
@@ -1,20 +1,20 @@
1
1
  /**
2
2
  * Interactive AI chat command
3
- * chainlesschain chat [--model qwen2:7b] [--provider ollama] [--agent]
3
+ * chainlesschain chat [--model] [--provider] [--agent]
4
4
  */
5
5
 
6
6
  import { startChatRepl } from "../repl/chat-repl.js";
7
7
  import { startAgentRepl } from "../repl/agent-repl.js";
8
+ import { loadConfig } from "../lib/config-manager.js";
8
9
 
9
10
  export function registerChatCommand(program) {
10
11
  program
11
12
  .command("chat")
12
13
  .description("Start an interactive AI chat session")
13
- .option("--model <model>", "Model name", "qwen2:7b")
14
+ .option("--model <model>", "Model name")
14
15
  .option(
15
16
  "--provider <provider>",
16
17
  "LLM provider (ollama, openai, volcengine, deepseek, ...)",
17
- "ollama",
18
18
  )
19
19
  .option("--base-url <url>", "API base URL")
20
20
  .option("--api-key <key>", "API key")
@@ -24,11 +24,12 @@ export function registerChatCommand(program) {
24
24
  )
25
25
  .option("--session <id>", "Resume a previous session (agent mode)")
26
26
  .action(async (options) => {
27
+ const config = loadConfig();
27
28
  const replOptions = {
28
- model: options.model,
29
- provider: options.provider,
30
- baseUrl: options.baseUrl,
31
- apiKey: options.apiKey,
29
+ model: options.model || config.llm?.model || "qwen2:7b",
30
+ provider: options.provider || config.llm?.provider || "ollama",
31
+ baseUrl: options.baseUrl || config.llm?.baseUrl,
32
+ apiKey: options.apiKey || config.llm?.apiKey,
32
33
  sessionId: options.session,
33
34
  };
34
35
 
@@ -63,6 +63,93 @@ const TEMPLATES = {
63
63
  `,
64
64
  skills: ["debug", "summarize", "code-review"],
65
65
  },
66
+ "medical-triage": {
67
+ description:
68
+ "Medical triage assistant with symptom assessment and ESI classification",
69
+ rules: `# Project Rules
70
+
71
+ ## Medical Guidelines
72
+ - Always ask for patient symptoms before providing guidance
73
+ - Use standard ESI (Emergency Severity Index) levels 1-5
74
+ - Never provide definitive diagnoses — recommend professional evaluation
75
+ - Document all triage decisions with reasoning
76
+
77
+ ## AI Assistant Guidelines
78
+ - Prioritize patient safety in all responses
79
+ - Use clear, non-technical language when possible
80
+ - Flag emergency symptoms immediately
81
+ `,
82
+ skills: ["summarize"],
83
+ persona: {
84
+ name: "智能分诊助手",
85
+ role: "你是一个医疗分诊AI助手,帮助诊所工作人员根据症状和紧急��度对患者进行优先级分类。",
86
+ behaviors: [
87
+ "始终先询问患者症状再给出建议",
88
+ "使用标准分诊分类 (ESI 1-5)",
89
+ "绝不提供确定性诊断,建议专业评估",
90
+ "记录所有分诊决策及其理由",
91
+ ],
92
+ toolsPriority: ["read_file", "search_files"],
93
+ toolsDisabled: [],
94
+ },
95
+ },
96
+ "agriculture-expert": {
97
+ description:
98
+ "Agriculture expert assistant for crop management and farming advice",
99
+ rules: `# Project Rules
100
+
101
+ ## Agriculture Guidelines
102
+ - Consider local climate and soil conditions
103
+ - Recommend sustainable farming practices
104
+ - Provide seasonal planting calendars when relevant
105
+ - Reference pest management best practices
106
+
107
+ ## AI Assistant Guidelines
108
+ - Ask about specific crops and region before advising
109
+ - Use data-driven recommendations when possible
110
+ - Warn about pesticide safety and environmental impact
111
+ `,
112
+ skills: ["summarize"],
113
+ persona: {
114
+ name: "农业专家助手",
115
+ role: "你是一个农业技术AI助手,帮助农户进行作物管理、病虫害防治和产量优化。",
116
+ behaviors: [
117
+ "根据当地气候和土壤条件提供建议",
118
+ "推荐可持续的农业实践方法",
119
+ "提供季节性种植日历和管理建议",
120
+ "使用数据驱动的决策支持",
121
+ ],
122
+ toolsPriority: ["read_file", "search_files", "run_code"],
123
+ toolsDisabled: [],
124
+ },
125
+ },
126
+ "general-assistant": {
127
+ description: "General-purpose assistant without coding bias",
128
+ rules: `# Project Rules
129
+
130
+ ## General Guidelines
131
+ - Focus on the user's domain and questions
132
+ - Provide clear, well-structured responses
133
+ - Use tools to manage files and information as needed
134
+
135
+ ## AI Assistant Guidelines
136
+ - Adapt your communication style to the user's needs
137
+ - Ask clarifying questions when requirements are ambiguous
138
+ - Organize information in a logical, easy-to-follow manner
139
+ `,
140
+ skills: ["summarize"],
141
+ persona: {
142
+ name: "通用AI助手",
143
+ role: "你是一个通用AI助手,根据用户的具体需求和项目上下文提供帮助。你不局限于编码任务,而是全面地协助用户完成各种工作。",
144
+ behaviors: [
145
+ "根据用户的领域调整回答风格",
146
+ "在需求不明确时主动询问",
147
+ "用清晰、结构化的方式组织信息",
148
+ ],
149
+ toolsPriority: ["read_file", "write_file", "search_files"],
150
+ toolsDisabled: [],
151
+ },
152
+ },
66
153
  empty: {
67
154
  description: "Bare project with minimal configuration",
68
155
  rules: `# Project Rules
@@ -82,7 +169,7 @@ export function registerInitCommand(program) {
82
169
  )
83
170
  .option(
84
171
  "-t, --template <name>",
85
- "Project template (code-project, data-science, devops, empty)",
172
+ "Project template (code-project, data-science, devops, medical-triage, agriculture-expert, general-assistant, empty)",
86
173
  "empty",
87
174
  )
88
175
  .option("-y, --yes", "Skip prompts, use defaults")
@@ -147,6 +234,9 @@ export function registerInitCommand(program) {
147
234
  workspace: "./skills",
148
235
  },
149
236
  };
237
+ if (tmpl.persona) {
238
+ config.persona = tmpl.persona;
239
+ }
150
240
  fs.writeFileSync(
151
241
  path.join(ccDir, "config.json"),
152
242
  JSON.stringify(config, null, 2),
@@ -156,6 +246,36 @@ export function registerInitCommand(program) {
156
246
  // rules.md
157
247
  fs.writeFileSync(path.join(ccDir, "rules.md"), tmpl.rules, "utf-8");
158
248
 
249
+ // Create auto-activated persona skill if template has persona
250
+ if (tmpl.persona) {
251
+ const personaSkillDir = path.join(
252
+ ccDir,
253
+ "skills",
254
+ `${template}-persona`,
255
+ );
256
+ fs.mkdirSync(personaSkillDir, { recursive: true });
257
+ const skillMd = `---
258
+ name: ${template}-persona
259
+ display-name: ${tmpl.persona.name || template} Persona
260
+ category: persona
261
+ activation: auto
262
+ user-invocable: false
263
+ description: Auto-activated persona for ${template} projects
264
+ ---
265
+
266
+ # ${tmpl.persona.name || template}
267
+
268
+ ${tmpl.persona.role || ""}
269
+
270
+ ${tmpl.persona.behaviors?.map((b) => `- ${b}`).join("\n") || ""}
271
+ `;
272
+ fs.writeFileSync(
273
+ path.join(personaSkillDir, "SKILL.md"),
274
+ skillMd,
275
+ "utf-8",
276
+ );
277
+ }
278
+
159
279
  logger.success(
160
280
  `Initialized ChainlessChain project in ${chalk.cyan(cwd)}`,
161
281
  );
@@ -0,0 +1,178 @@
1
+ /**
2
+ * Persona management command
3
+ * chainlesschain persona [show|set|reset]
4
+ *
5
+ * Manage project-level AI persona configuration.
6
+ */
7
+
8
+ import chalk from "chalk";
9
+ import fs from "fs";
10
+ import path from "path";
11
+ import { logger } from "../lib/logger.js";
12
+ import { findProjectRoot, loadProjectConfig } from "../lib/project-detector.js";
13
+
14
+ export function registerPersonaCommand(program) {
15
+ const persona = program
16
+ .command("persona")
17
+ .description("Manage project AI persona configuration");
18
+
19
+ // persona show
20
+ persona
21
+ .command("show")
22
+ .description("Show the current project persona")
23
+ .action(() => {
24
+ const projectRoot = findProjectRoot(process.cwd());
25
+ if (!projectRoot) {
26
+ logger.error(
27
+ "Not inside a ChainlessChain project. Run `chainlesschain init` first.",
28
+ );
29
+ process.exit(1);
30
+ }
31
+
32
+ const config = loadProjectConfig(projectRoot);
33
+ if (!config?.persona) {
34
+ logger.log("No persona configured. Using default coding assistant.");
35
+ logger.log(
36
+ `\nSet one with: ${chalk.cyan('chainlesschain persona set --name "My Assistant" --role "Your role description"')}`,
37
+ );
38
+ return;
39
+ }
40
+
41
+ const p = config.persona;
42
+ logger.log(chalk.bold("Current Persona:"));
43
+ logger.log(` Name: ${chalk.cyan(p.name || "(unnamed)")}`);
44
+ logger.log(` Role: ${p.role || "(no role defined)"}`);
45
+ if (p.behaviors?.length > 0) {
46
+ logger.log(" Behaviors:");
47
+ for (const b of p.behaviors) {
48
+ logger.log(` - ${b}`);
49
+ }
50
+ }
51
+ if (p.toolsPriority?.length > 0) {
52
+ logger.log(
53
+ ` Preferred tools: ${chalk.gray(p.toolsPriority.join(", "))}`,
54
+ );
55
+ }
56
+ if (p.toolsDisabled?.length > 0) {
57
+ logger.log(
58
+ ` Disabled tools: ${chalk.red(p.toolsDisabled.join(", "))}`,
59
+ );
60
+ }
61
+ });
62
+
63
+ // persona set
64
+ persona
65
+ .command("set")
66
+ .description("Set or update the project persona")
67
+ .option("-n, --name <name>", "Persona display name")
68
+ .option("-r, --role <role>", "Role description (system prompt override)")
69
+ .option(
70
+ "-b, --behavior <behavior>",
71
+ "Add a behavior guideline (repeatable)",
72
+ collectValues,
73
+ [],
74
+ )
75
+ .option(
76
+ "--tools-priority <tools>",
77
+ "Comma-separated list of preferred tools",
78
+ )
79
+ .option(
80
+ "--tools-disabled <tools>",
81
+ "Comma-separated list of disabled tools",
82
+ )
83
+ .action((options) => {
84
+ const projectRoot = findProjectRoot(process.cwd());
85
+ if (!projectRoot) {
86
+ logger.error(
87
+ "Not inside a ChainlessChain project. Run `chainlesschain init` first.",
88
+ );
89
+ process.exit(1);
90
+ }
91
+
92
+ const configPath = path.join(
93
+ projectRoot,
94
+ ".chainlesschain",
95
+ "config.json",
96
+ );
97
+ let config;
98
+ try {
99
+ config = JSON.parse(fs.readFileSync(configPath, "utf-8"));
100
+ } catch {
101
+ logger.error("Failed to read config.json");
102
+ process.exit(1);
103
+ }
104
+
105
+ const existing = config.persona || {};
106
+ const updated = { ...existing };
107
+
108
+ if (options.name) updated.name = options.name;
109
+ if (options.role) updated.role = options.role;
110
+ if (options.behavior?.length > 0) {
111
+ updated.behaviors = [
112
+ ...(existing.behaviors || []),
113
+ ...options.behavior,
114
+ ];
115
+ }
116
+ if (options.toolsPriority) {
117
+ updated.toolsPriority = options.toolsPriority
118
+ .split(",")
119
+ .map((s) => s.trim());
120
+ }
121
+ if (options.toolsDisabled) {
122
+ updated.toolsDisabled = options.toolsDisabled
123
+ .split(",")
124
+ .map((s) => s.trim());
125
+ }
126
+
127
+ config.persona = updated;
128
+ fs.writeFileSync(configPath, JSON.stringify(config, null, 2), "utf-8");
129
+
130
+ logger.success("Persona updated.");
131
+ logger.log(` Name: ${chalk.cyan(updated.name || "(unnamed)")}`);
132
+ logger.log(` Role: ${updated.role || "(no role defined)"}`);
133
+ });
134
+
135
+ // persona reset
136
+ persona
137
+ .command("reset")
138
+ .description(
139
+ "Remove the project persona, restoring the default coding assistant",
140
+ )
141
+ .action(() => {
142
+ const projectRoot = findProjectRoot(process.cwd());
143
+ if (!projectRoot) {
144
+ logger.error(
145
+ "Not inside a ChainlessChain project. Run `chainlesschain init` first.",
146
+ );
147
+ process.exit(1);
148
+ }
149
+
150
+ const configPath = path.join(
151
+ projectRoot,
152
+ ".chainlesschain",
153
+ "config.json",
154
+ );
155
+ let config;
156
+ try {
157
+ config = JSON.parse(fs.readFileSync(configPath, "utf-8"));
158
+ } catch {
159
+ logger.error("Failed to read config.json");
160
+ process.exit(1);
161
+ }
162
+
163
+ if (!config.persona) {
164
+ logger.log("No persona configured. Nothing to reset.");
165
+ return;
166
+ }
167
+
168
+ delete config.persona;
169
+ fs.writeFileSync(configPath, JSON.stringify(config, null, 2), "utf-8");
170
+ logger.success(
171
+ "Persona removed. The default coding assistant will be used.",
172
+ );
173
+ });
174
+ }
175
+
176
+ function collectValues(value, previous) {
177
+ return previous.concat([value]);
178
+ }
@@ -104,9 +104,15 @@ async function runSetup(options) {
104
104
  apiKey = await askPassword(`Enter ${providerInfo.name} API key:`);
105
105
  }
106
106
 
107
- if (provider === "custom") {
108
- baseUrl = await askInput("Enter API base URL:");
109
- model = await askInput("Enter model name:");
107
+ if (provider === "custom" || providerInfo.isProxy) {
108
+ baseUrl = await askInput(
109
+ "Enter API base URL:",
110
+ providerInfo.defaultBaseUrl || "",
111
+ );
112
+ model = await askInput(
113
+ "Enter model name:",
114
+ providerInfo.defaultModel || "",
115
+ );
110
116
  } else {
111
117
  const customizeModel = await askConfirm(
112
118
  `Use default model (${model})?`,
@@ -1,10 +1,50 @@
1
1
  import chalk from "chalk";
2
+ import { execSync } from "node:child_process";
2
3
  import { checkForUpdates } from "../lib/version-checker.js";
3
4
  import { downloadRelease } from "../lib/downloader.js";
4
5
  import { VERSION } from "../constants.js";
5
6
  import { askConfirm } from "../lib/prompts.js";
6
7
  import logger from "../lib/logger.js";
7
8
 
9
+ async function selfUpdateCli(targetVersion) {
10
+ if (VERSION === targetVersion) {
11
+ return true; // Already at the target version
12
+ }
13
+
14
+ try {
15
+ logger.info("Updating CLI package...");
16
+ execSync(`npm install -g chainlesschain@${targetVersion}`, {
17
+ encoding: "utf-8",
18
+ stdio: "pipe",
19
+ });
20
+ // Verify the update actually took effect
21
+ try {
22
+ const newVersion = execSync("chainlesschain --version", {
23
+ encoding: "utf-8",
24
+ stdio: "pipe",
25
+ }).trim();
26
+ if (newVersion === targetVersion) {
27
+ logger.success(`CLI updated to v${targetVersion}`);
28
+ return true;
29
+ }
30
+ logger.warn(
31
+ `CLI update ran but version is still ${newVersion}. Please run manually:\n npm install -g chainlesschain@${targetVersion}`,
32
+ );
33
+ return false;
34
+ } catch (_verifyErr) {
35
+ // Cannot verify, assume success
36
+ logger.success(`CLI updated to v${targetVersion}`);
37
+ return true;
38
+ }
39
+ } catch (_err) {
40
+ // npm global install may fail due to permissions; guide the user
41
+ logger.warn(
42
+ `CLI self-update failed. Please run manually:\n npm install -g chainlesschain@${targetVersion}`,
43
+ );
44
+ return false;
45
+ }
46
+ }
47
+
8
48
  export function registerUpdateCommand(program) {
9
49
  program
10
50
  .command("update")
@@ -64,7 +104,21 @@ export function registerUpdateCommand(program) {
64
104
  }
65
105
 
66
106
  await downloadRelease(result.latestVersion, { force: options.force });
67
- logger.success(`Updated to v${result.latestVersion}`);
107
+ logger.success("Application already installed");
108
+
109
+ // Self-update the CLI npm package
110
+ const cliUpdated = await selfUpdateCli(result.latestVersion);
111
+
112
+ if (cliUpdated) {
113
+ logger.success(`Updated to v${result.latestVersion}`);
114
+ } else {
115
+ logger.warn(
116
+ `Application binary updated, but CLI version remains at ${VERSION}.`,
117
+ );
118
+ logger.info(
119
+ `To complete the update, run:\n npm install -g chainlesschain@${result.latestVersion}`,
120
+ );
121
+ }
68
122
  logger.info("Restart ChainlessChain to use the new version.");
69
123
  } catch (err) {
70
124
  if (err.name === "ExitPromptError") {
package/src/constants.js CHANGED
@@ -27,11 +27,11 @@ export const DEFAULT_PORTS = {
27
27
  };
28
28
 
29
29
  export const LLM_PROVIDERS = {
30
- ollama: {
31
- name: "Ollama (Local)",
32
- defaultBaseUrl: "http://localhost:11434",
33
- defaultModel: "qwen2:7b",
34
- requiresApiKey: false,
30
+ volcengine: {
31
+ name: "Volcengine (火山引擎/豆包)",
32
+ defaultBaseUrl: "https://ark.cn-beijing.volces.com/api/v3",
33
+ defaultModel: "doubao-seed-1-6-251015",
34
+ requiresApiKey: true,
35
35
  },
36
36
  openai: {
37
37
  name: "OpenAI",
@@ -39,10 +39,10 @@ export const LLM_PROVIDERS = {
39
39
  defaultModel: "gpt-4o",
40
40
  requiresApiKey: true,
41
41
  },
42
- dashscope: {
43
- name: "DashScope (Alibaba)",
44
- defaultBaseUrl: "https://dashscope.aliyuncs.com/api/v1",
45
- defaultModel: "qwen-max",
42
+ anthropic: {
43
+ name: "Anthropic (Claude)",
44
+ defaultBaseUrl: "https://api.anthropic.com/v1",
45
+ defaultModel: "claude-sonnet-4-6",
46
46
  requiresApiKey: true,
47
47
  },
48
48
  deepseek: {
@@ -51,8 +51,65 @@ export const LLM_PROVIDERS = {
51
51
  defaultModel: "deepseek-chat",
52
52
  requiresApiKey: true,
53
53
  },
54
+ dashscope: {
55
+ name: "DashScope (阿里通义)",
56
+ defaultBaseUrl: "https://dashscope.aliyuncs.com/compatible-mode/v1",
57
+ defaultModel: "qwen-max",
58
+ requiresApiKey: true,
59
+ },
60
+ gemini: {
61
+ name: "Google Gemini",
62
+ defaultBaseUrl: "https://generativelanguage.googleapis.com/v1beta",
63
+ defaultModel: "gemini-2.0-flash",
64
+ requiresApiKey: true,
65
+ },
66
+ kimi: {
67
+ name: "Kimi (月之暗面)",
68
+ defaultBaseUrl: "https://api.moonshot.cn/v1",
69
+ defaultModel: "moonshot-v1-auto",
70
+ requiresApiKey: true,
71
+ },
72
+ minimax: {
73
+ name: "MiniMax (海螺AI)",
74
+ defaultBaseUrl: "https://api.minimax.chat/v1",
75
+ defaultModel: "MiniMax-Text-01",
76
+ requiresApiKey: true,
77
+ },
78
+ mistral: {
79
+ name: "Mistral AI",
80
+ defaultBaseUrl: "https://api.mistral.ai/v1",
81
+ defaultModel: "mistral-large-latest",
82
+ requiresApiKey: true,
83
+ },
84
+ ollama: {
85
+ name: "Ollama (本地部署)",
86
+ defaultBaseUrl: "http://localhost:11434",
87
+ defaultModel: "qwen2:7b",
88
+ requiresApiKey: false,
89
+ },
90
+ "openai-proxy": {
91
+ name: "OpenAI 中转站 (API2D/CloseAI等)",
92
+ defaultBaseUrl: "",
93
+ defaultModel: "gpt-4o",
94
+ requiresApiKey: true,
95
+ isProxy: true,
96
+ },
97
+ "anthropic-proxy": {
98
+ name: "Anthropic 中转站 (Claude代理)",
99
+ defaultBaseUrl: "",
100
+ defaultModel: "claude-sonnet-4-6",
101
+ requiresApiKey: true,
102
+ isProxy: true,
103
+ },
104
+ "gemini-proxy": {
105
+ name: "Gemini 中转站 (Google代理)",
106
+ defaultBaseUrl: "",
107
+ defaultModel: "gemini-2.0-flash",
108
+ requiresApiKey: true,
109
+ isProxy: true,
110
+ },
54
111
  custom: {
55
- name: "Custom Provider",
112
+ name: "自定义 Provider (自建服务/vLLM/TGI等)",
56
113
  defaultBaseUrl: "",
57
114
  defaultModel: "",
58
115
  requiresApiKey: true,
@@ -83,10 +140,10 @@ export const DEFAULT_CONFIG = {
83
140
  database: null,
84
141
  },
85
142
  llm: {
86
- provider: "ollama",
143
+ provider: "volcengine",
87
144
  apiKey: null,
88
- baseUrl: "http://localhost:11434",
89
- model: "qwen2:7b",
145
+ baseUrl: "https://ark.cn-beijing.volces.com/api/v3",
146
+ model: "doubao-seed-1-6-251015",
90
147
  },
91
148
  enterprise: {
92
149
  serverUrl: null,
package/src/index.js CHANGED
@@ -35,6 +35,7 @@ import { registerWalletCommand } from "./commands/wallet.js";
35
35
  import { registerOrgCommand } from "./commands/org.js";
36
36
  import { registerPluginCommand } from "./commands/plugin.js";
37
37
  import { registerInitCommand } from "./commands/init.js";
38
+ import { registerPersonaCommand } from "./commands/persona.js";
38
39
  import { registerCoworkCommand } from "./commands/cowork.js";
39
40
 
40
41
  // Phase 6: Advanced AI & Hooks
@@ -97,8 +98,9 @@ export function createProgram() {
97
98
  .option("--verbose", "Enable verbose output")
98
99
  .option("--quiet", "Suppress non-essential output");
99
100
 
100
- // Project initialization
101
+ // Project initialization & persona
101
102
  registerInitCommand(program);
103
+ registerPersonaCommand(program);
102
104
 
103
105
  // Existing commands
104
106
  registerSetupCommand(program);
@@ -22,6 +22,7 @@ import { getPlanModeManager } from "./plan-mode.js";
22
22
  import { CLISkillLoader } from "./skill-loader.js";
23
23
  import { executeHooks, HookEvents } from "./hook-manager.js";
24
24
  import { detectPython } from "./cli-anything-bridge.js";
25
+ import { findProjectRoot, loadProjectConfig } from "./project-detector.js";
25
26
 
26
27
  // ─── Tool definitions ────────────────────────────────────────────────────
27
28
 
@@ -331,6 +332,126 @@ ${envLines.join("\n")}
331
332
  Current working directory: ${cwd || process.cwd()}`;
332
333
  }
333
334
 
335
+ // ─── Persona support ─────────────────────────────────────────────────────
336
+
337
+ /**
338
+ * Load persona configuration from project config.json
339
+ * @param {string} cwd - working directory
340
+ * @returns {object|null} persona object or null
341
+ */
342
+ function _loadProjectPersona(cwd) {
343
+ try {
344
+ const projectRoot = findProjectRoot(cwd || process.cwd());
345
+ if (!projectRoot) return null;
346
+ const config = loadProjectConfig(projectRoot);
347
+ return config?.persona || null;
348
+ } catch {
349
+ return null;
350
+ }
351
+ }
352
+
353
+ /**
354
+ * Build a persona-specific system prompt
355
+ * @param {object} persona - persona configuration
356
+ * @param {string[]} envLines - environment info lines
357
+ * @param {string} cwd - working directory
358
+ * @returns {string}
359
+ */
360
+ function _buildPersonaPrompt(persona, envLines, cwd) {
361
+ const lines = [];
362
+ lines.push(`You are ${persona.name || "AI Assistant"}.`);
363
+ if (persona.role) {
364
+ lines.push("");
365
+ lines.push(persona.role);
366
+ }
367
+ if (persona.behaviors?.length > 0) {
368
+ lines.push("");
369
+ lines.push("Key behaviors:");
370
+ for (const b of persona.behaviors) {
371
+ lines.push(`- ${b}`);
372
+ }
373
+ }
374
+ lines.push("");
375
+ lines.push(
376
+ "You have access to tools that let you read files, write files, edit files, run shell commands, and search the codebase. When the user asks you to do something, USE THE TOOLS to actually do it.",
377
+ );
378
+ if (persona.toolsPriority?.length > 0) {
379
+ lines.push(`\nPreferred tools: ${persona.toolsPriority.join(", ")}`);
380
+ }
381
+ lines.push(`\n## Environment\n${envLines.join("\n")}`);
382
+ lines.push(`\nCurrent working directory: ${cwd || process.cwd()}`);
383
+ return lines.join("\n");
384
+ }
385
+
386
+ /**
387
+ * Build the full system prompt with persona, rules.md, and auto-activated persona skills.
388
+ * Single entry point used by both agent-repl and ws-session-manager.
389
+ *
390
+ * Priority order:
391
+ * 1. config.json persona → replaces base system prompt
392
+ * 2. Auto-activated persona skills → appended
393
+ * 3. rules.md → appended
394
+ * 4. Default hardcoded prompt → fallback when no persona
395
+ *
396
+ * @param {string} [cwd] - working directory
397
+ * @returns {string} complete system prompt
398
+ */
399
+ export function buildSystemPrompt(cwd) {
400
+ const dir = cwd || process.cwd();
401
+
402
+ // Check for project persona
403
+ const persona = _loadProjectPersona(dir);
404
+ let prompt;
405
+ if (persona) {
406
+ const env = getEnvironmentInfo();
407
+ const envLines = [
408
+ `OS: ${env.os} (${env.arch})`,
409
+ env.python
410
+ ? `Python: ${env.python}${env.pip ? " + pip" : ""}`
411
+ : "Python: not found",
412
+ env.node ? `Node.js: ${env.node}` : "Node.js: not found",
413
+ `Git: ${env.git ? "available" : "not found"}`,
414
+ ];
415
+ prompt = _buildPersonaPrompt(persona, envLines, dir);
416
+ } else {
417
+ prompt = getBaseSystemPrompt(dir);
418
+ }
419
+
420
+ // Append auto-activated persona skills
421
+ try {
422
+ const loader = new CLISkillLoader();
423
+ const allSkills = loader.getResolvedSkills();
424
+ const personaSkills = allSkills.filter(
425
+ (s) => s.category === "persona" && s.activation === "auto",
426
+ );
427
+ for (const p of personaSkills) {
428
+ if (p.body?.trim()) {
429
+ prompt += `\n\n## Persona: ${p.displayName}\n${p.body}`;
430
+ }
431
+ }
432
+ } catch {
433
+ // Non-critical — skill loader may not be available
434
+ }
435
+
436
+ // Append rules.md
437
+ try {
438
+ const projectRoot = findProjectRoot(dir);
439
+ if (projectRoot) {
440
+ const rulesPath = path.join(projectRoot, ".chainlesschain", "rules.md");
441
+ if (fs.existsSync(rulesPath)) {
442
+ const content = fs.readFileSync(rulesPath, "utf-8");
443
+ if (content.trim()) {
444
+ prompt += `\n\n## Project Rules\n${content}`;
445
+ }
446
+ }
447
+ }
448
+ } catch {
449
+ // Non-critical
450
+ }
451
+
452
+ return prompt;
453
+ }
454
+
334
455
  // ─── Tool execution ──────────────────────────────────────────────────────
335
456
 
336
457
  /**
@@ -349,6 +470,14 @@ export async function executeTool(name, args, context = {}) {
349
470
  const skillLoader = context.skillLoader || _defaultSkillLoader;
350
471
  const cwd = context.cwd || process.cwd();
351
472
 
473
+ // Persona toolsDisabled guard
474
+ const persona = _loadProjectPersona(cwd);
475
+ if (persona?.toolsDisabled?.includes(name)) {
476
+ return {
477
+ error: `Tool "${name}" is disabled by project persona configuration.`,
478
+ };
479
+ }
480
+
352
481
  // Plan mode: check if tool is allowed
353
482
  const planManager = getPlanModeManager();
354
483
  if (planManager.isActive() && !planManager.isToolAllowed(name)) {
@@ -837,6 +966,15 @@ async function _executeRunCode(args, cwd) {
837
966
  export async function chatWithTools(rawMessages, options) {
838
967
  const { provider, model, baseUrl, apiKey, contextEngine: ce } = options;
839
968
 
969
+ // Filter tools based on persona.toolsDisabled
970
+ let tools = AGENT_TOOLS;
971
+ const persona = _loadProjectPersona(options.cwd);
972
+ if (persona?.toolsDisabled?.length > 0) {
973
+ tools = AGENT_TOOLS.filter(
974
+ (t) => !persona.toolsDisabled.includes(t.function.name),
975
+ );
976
+ }
977
+
840
978
  const lastUserMsg = [...rawMessages].reverse().find((m) => m.role === "user");
841
979
  const messages = ce
842
980
  ? ce.buildOptimizedMessages(rawMessages, {
@@ -851,7 +989,7 @@ export async function chatWithTools(rawMessages, options) {
851
989
  body: JSON.stringify({
852
990
  model,
853
991
  messages,
854
- tools: AGENT_TOOLS,
992
+ tools,
855
993
  stream: false,
856
994
  }),
857
995
  });
@@ -868,7 +1006,7 @@ export async function chatWithTools(rawMessages, options) {
868
1006
  const systemMsgs = messages.filter((m) => m.role === "system");
869
1007
  const otherMsgs = messages.filter((m) => m.role !== "system");
870
1008
 
871
- const anthropicTools = AGENT_TOOLS.map((t) => ({
1009
+ const anthropicTools = tools.map((t) => ({
872
1010
  name: t.function.name,
873
1011
  description: t.function.description,
874
1012
  input_schema: t.function.parameters,
@@ -959,7 +1097,7 @@ export async function chatWithTools(rawMessages, options) {
959
1097
  body: JSON.stringify({
960
1098
  model: model || defaultModels[provider] || "gpt-4o-mini",
961
1099
  messages,
962
- tools: AGENT_TOOLS,
1100
+ tools,
963
1101
  }),
964
1102
  });
965
1103
 
@@ -80,7 +80,7 @@ export const BUILT_IN_PROVIDERS = {
80
80
  },
81
81
  volcengine: {
82
82
  name: "volcengine",
83
- displayName: "Volcengine (豆包)",
83
+ displayName: "Volcengine (火山引擎/豆包)",
84
84
  baseUrl: "https://ark.cn-beijing.volces.com/api/v3",
85
85
  apiKeyEnv: "VOLCENGINE_API_KEY",
86
86
  models: [
@@ -91,6 +91,27 @@ export const BUILT_IN_PROVIDERS = {
91
91
  ],
92
92
  free: false,
93
93
  },
94
+ kimi: {
95
+ name: "kimi",
96
+ displayName: "Kimi (月之暗面)",
97
+ baseUrl: "https://api.moonshot.cn/v1",
98
+ apiKeyEnv: "MOONSHOT_API_KEY",
99
+ models: [
100
+ "moonshot-v1-auto",
101
+ "moonshot-v1-8k",
102
+ "moonshot-v1-32k",
103
+ "moonshot-v1-128k",
104
+ ],
105
+ free: false,
106
+ },
107
+ minimax: {
108
+ name: "minimax",
109
+ displayName: "MiniMax (海螺AI)",
110
+ baseUrl: "https://api.minimax.chat/v1",
111
+ apiKeyEnv: "MINIMAX_API_KEY",
112
+ models: ["MiniMax-Text-01", "abab6.5s-chat", "abab5.5-chat"],
113
+ free: false,
114
+ },
94
115
  };
95
116
 
96
117
  /**
@@ -323,7 +344,7 @@ export class LLMProviderRegistry {
323
344
  return { ok: true, elapsed: Date.now() - start, response: text.trim() };
324
345
  }
325
346
 
326
- // OpenAI-compatible (openai, deepseek, dashscope, mistral, volcengine)
347
+ // OpenAI-compatible (openai, deepseek, dashscope, mistral, volcengine, kimi, minimax)
327
348
  const key = this.getApiKey(name);
328
349
  if (!key) throw new Error(`${provider.apiKeyEnv} not set`);
329
350
  const res = await fetch(`${provider.baseUrl}/chat/completions`, {
@@ -212,6 +212,7 @@ export class CLISkillLoader {
212
212
  description: data.description || "",
213
213
  version: data.version || "1.0.0",
214
214
  category: data.category || "uncategorized",
215
+ activation: data.activation || "manual",
215
216
  tags: data.tags || [],
216
217
  userInvocable: data.userInvocable !== false,
217
218
  handler: data.handler || null,
@@ -265,6 +266,16 @@ export class CLISkillLoader {
265
266
  return this.loadAll();
266
267
  }
267
268
 
269
+ /**
270
+ * Get auto-activated persona skills
271
+ * @returns {object[]} skills with category "persona" and activation "auto"
272
+ */
273
+ getAutoActivatedPersonas() {
274
+ return this.getResolvedSkills().filter(
275
+ (s) => s.category === "persona" && s.activation === "auto",
276
+ );
277
+ }
278
+
268
279
  /**
269
280
  * Clear the cache
270
281
  */
@@ -30,6 +30,8 @@ const TASK_MODEL_MAP = {
30
30
  deepseek: "deepseek-chat",
31
31
  dashscope: "qwen-plus",
32
32
  gemini: "gemini-2.0-flash",
33
+ kimi: "moonshot-v1-auto",
34
+ minimax: "MiniMax-Text-01",
33
35
  mistral: "mistral-medium-latest",
34
36
  ollama: "qwen2.5:7b",
35
37
  },
@@ -40,6 +42,8 @@ const TASK_MODEL_MAP = {
40
42
  deepseek: "deepseek-coder",
41
43
  dashscope: "qwen-max",
42
44
  gemini: "gemini-2.0-pro",
45
+ kimi: "moonshot-v1-auto",
46
+ minimax: "MiniMax-Text-01",
43
47
  mistral: "mistral-large-latest",
44
48
  ollama: "qwen2.5-coder:14b",
45
49
  },
@@ -50,6 +54,8 @@ const TASK_MODEL_MAP = {
50
54
  deepseek: "deepseek-reasoner",
51
55
  dashscope: "qwen-max",
52
56
  gemini: "gemini-2.0-pro",
57
+ kimi: "moonshot-v1-128k",
58
+ minimax: "MiniMax-Text-01",
53
59
  mistral: "mistral-large-latest",
54
60
  ollama: "qwen2.5:14b",
55
61
  },
@@ -60,6 +66,8 @@ const TASK_MODEL_MAP = {
60
66
  deepseek: "deepseek-chat",
61
67
  dashscope: "qwen-turbo",
62
68
  gemini: "gemini-2.0-flash",
69
+ kimi: "moonshot-v1-8k",
70
+ minimax: "abab6.5s-chat",
63
71
  mistral: "mistral-small-latest",
64
72
  ollama: "qwen2:7b",
65
73
  },
@@ -70,6 +78,8 @@ const TASK_MODEL_MAP = {
70
78
  deepseek: "deepseek-chat",
71
79
  dashscope: "qwen-plus",
72
80
  gemini: "gemini-2.0-flash",
81
+ kimi: "moonshot-v1-auto",
82
+ minimax: "MiniMax-Text-01",
73
83
  mistral: "mistral-large-latest",
74
84
  ollama: "qwen2:7b",
75
85
  },
@@ -80,6 +90,8 @@ const TASK_MODEL_MAP = {
80
90
  deepseek: "deepseek-chat",
81
91
  dashscope: "qwen-max",
82
92
  gemini: "gemini-2.0-pro",
93
+ kimi: "moonshot-v1-128k",
94
+ minimax: "MiniMax-Text-01",
83
95
  mistral: "mistral-large-latest",
84
96
  ollama: "qwen2:7b",
85
97
  },
@@ -18,7 +18,7 @@ import {
18
18
  getSession as dbGetSession,
19
19
  listSessions as dbListSessions,
20
20
  } from "./session-manager.js";
21
- import { getBaseSystemPrompt } from "./agent-core.js";
21
+ import { buildSystemPrompt } from "./agent-core.js";
22
22
 
23
23
  /**
24
24
  * @typedef {object} Session
@@ -88,16 +88,7 @@ export class WSSessionManager {
88
88
  options.model || (provider === "ollama" ? "qwen2.5:7b" : null);
89
89
  const baseUrl = options.baseUrl || "http://localhost:11434";
90
90
 
91
- // Load project context
92
- let rulesContent = null;
93
- try {
94
- const rulesPath = path.join(projectRoot, ".chainlesschain", "rules.md");
95
- if (fs.existsSync(rulesPath)) {
96
- rulesContent = fs.readFileSync(rulesPath, "utf8");
97
- }
98
- } catch (_err) {
99
- // Non-critical
100
- }
91
+ // Project context (rules.md, persona) is now loaded by buildSystemPrompt()
101
92
 
102
93
  // Create plan manager (non-singleton, per-session)
103
94
  const planManager = new PlanModeManager();
@@ -125,11 +116,8 @@ export class WSSessionManager {
125
116
  // Non-critical
126
117
  }
127
118
 
128
- // Build initial system prompt
129
- let systemPrompt = getBaseSystemPrompt(projectRoot);
130
- if (rulesContent) {
131
- systemPrompt += `\n\n## Project Rules\n${rulesContent}`;
132
- }
119
+ // Build initial system prompt (includes persona + rules.md)
120
+ const systemPrompt = buildSystemPrompt(projectRoot);
133
121
 
134
122
  const messages = [{ role: "system", content: systemPrompt }];
135
123
 
@@ -158,7 +146,7 @@ export class WSSessionManager {
158
146
  apiKey: options.apiKey || null,
159
147
  baseUrl,
160
148
  projectRoot,
161
- rulesContent,
149
+ rulesContent: null,
162
150
  planManager,
163
151
  contextEngine,
164
152
  permanentMemory,
@@ -40,7 +40,7 @@ import { CLIPermanentMemory } from "../lib/permanent-memory.js";
40
40
  import { CLIAutonomousAgent, GoalStatus } from "../lib/autonomous-agent.js";
41
41
  import {
42
42
  AGENT_TOOLS,
43
- getBaseSystemPrompt,
43
+ buildSystemPrompt,
44
44
  executeTool as coreExecuteTool,
45
45
  agentLoop as coreAgentLoop,
46
46
  formatToolArgs,
@@ -150,7 +150,7 @@ export async function startAgentRepl(options = {}) {
150
150
  }
151
151
 
152
152
  const messages = [
153
- { role: "system", content: getBaseSystemPrompt(process.cwd()) },
153
+ { role: "system", content: buildSystemPrompt(process.cwd()) },
154
154
  ];
155
155
 
156
156
  // Load resumed session messages