cc-claw 0.20.3 → 0.20.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/cli.js +183 -12
  2. package/package.json +1 -1
package/dist/cli.js CHANGED
@@ -33,7 +33,7 @@ var VERSION;
33
33
  var init_version = __esm({
34
34
  "src/version.ts"() {
35
35
  "use strict";
36
- VERSION = true ? "0.20.3" : (() => {
36
+ VERSION = true ? "0.20.5" : (() => {
37
37
  try {
38
38
  return JSON.parse(readFileSync(join(process.cwd(), "package.json"), "utf-8")).version ?? "unknown";
39
39
  } catch {
@@ -6085,6 +6085,7 @@ async function healthCheck(serverName) {
6085
6085
  return [];
6086
6086
  }
6087
6087
  const results = [];
6088
+ let needsCatalogRefresh = false;
6088
6089
  for (const server of servers) {
6089
6090
  const baseUrl = getBaseUrl(server);
6090
6091
  const ok = await ping(baseUrl, {
@@ -6097,6 +6098,35 @@ async function healthCheck(serverName) {
6097
6098
  if (newStatus !== server.status) {
6098
6099
  log(`[ollama] Server ${server.name}: ${server.status} \u2192 ${newStatus}`);
6099
6100
  }
6101
+ if (ok) {
6102
+ try {
6103
+ const tags = await listModels(baseUrl, { apiKey: server.apiKey });
6104
+ const remoteNames = new Set(tags.models.map((m) => m.name));
6105
+ let hasNew = false;
6106
+ for (const name of remoteNames) {
6107
+ if (!getModelByName(name, server.id)) {
6108
+ hasNew = true;
6109
+ break;
6110
+ }
6111
+ }
6112
+ if (hasNew) {
6113
+ await discoverModels(server.name);
6114
+ needsCatalogRefresh = true;
6115
+ log(`[ollama] New models detected on ${server.name} \u2014 refreshed`);
6116
+ }
6117
+ } catch {
6118
+ }
6119
+ }
6120
+ }
6121
+ if (needsCatalogRefresh) {
6122
+ try {
6123
+ const { getAdapter: getAdapter4 } = await Promise.resolve().then(() => (init_backends(), backends_exports));
6124
+ const adapter = getAdapter4("ollama");
6125
+ if ("refreshModelCatalog" in adapter) {
6126
+ adapter.refreshModelCatalog();
6127
+ }
6128
+ } catch {
6129
+ }
6100
6130
  }
6101
6131
  return results;
6102
6132
  }
@@ -8716,7 +8746,8 @@ var init_helpers = __esm({
8716
8746
  { cmd: "/mcps", desc: "List MCP servers" },
8717
8747
  { cmd: "/mcp", desc: "Manage MCP servers" },
8718
8748
  { cmd: "/evolve", desc: "Self-learning controls" },
8719
- { cmd: "/intent", desc: "Test intent classifier" }
8749
+ { cmd: "/intent", desc: "Test intent classifier" },
8750
+ { cmd: "/info", desc: "Current chat/topic context" }
8720
8751
  ]
8721
8752
  };
8722
8753
  USAGE_WINDOW_MAP = { "24h": "daily", "7d": "weekly" };
@@ -10592,6 +10623,7 @@ var init_chat = __esm({
10592
10623
  const { getMode: getMode3, getCwd: getCwd3, getModel: getModel3, addUsage: addUsage3, getBackend: getBackend3 } = await Promise.resolve().then(() => (init_store5(), store_exports5));
10593
10624
  const { getAdapterForChat: getAdapterForChat2 } = await Promise.resolve().then(() => (init_backends(), backends_exports));
10594
10625
  const chatId = body.chatId;
10626
+ const backend2 = body.backend;
10595
10627
  const PERM_LEVEL = { plan: 0, safe: 1, yolo: 2 };
10596
10628
  const storedMode = getMode3(chatId);
10597
10629
  const requestedMode = body.mode ?? storedMode;
@@ -10629,7 +10661,8 @@ data: ${JSON.stringify(data)}
10629
10661
  sendSSE("text", partial);
10630
10662
  },
10631
10663
  model: model2,
10632
- permMode: mode
10664
+ permMode: mode,
10665
+ backend: backend2
10633
10666
  });
10634
10667
  if (response.usage) addUsage3(chatId, response.usage.input, response.usage.output, response.usage.cacheRead, model2 ?? "unknown", void 0, response.usage.contextSize);
10635
10668
  sendSSE("done", JSON.stringify({ text: response.text, usage: response.usage }));
@@ -10639,7 +10672,7 @@ data: ${JSON.stringify(data)}
10639
10672
  if (!res.writableEnded) res.end();
10640
10673
  }
10641
10674
  } else {
10642
- const response = await askAgent3(chatId, body.message, { cwd, model: model2, permMode: mode });
10675
+ const response = await askAgent3(chatId, body.message, { cwd, model: model2, permMode: mode, backend: backend2 });
10643
10676
  if (response.usage) addUsage3(chatId, response.usage.input, response.usage.output, response.usage.cacheRead, model2 ?? "unknown", void 0, response.usage.contextSize);
10644
10677
  jsonResponse(res, { text: response.text, usage: response.usage, sessionId: response.sessionId });
10645
10678
  }
@@ -15620,6 +15653,14 @@ async function handleAdd(chatId, channel, name, host, port) {
15620
15653
  return;
15621
15654
  }
15622
15655
  const models = await OllamaService.discoverModels(name);
15656
+ try {
15657
+ const { getAdapter: getAdapter4 } = await Promise.resolve().then(() => (init_backends(), backends_exports));
15658
+ const adapter = getAdapter4("ollama");
15659
+ if ("refreshModelCatalog" in adapter) {
15660
+ adapter.refreshModelCatalog();
15661
+ }
15662
+ } catch {
15663
+ }
15623
15664
  const lines = [
15624
15665
  `\u2705 Added "${name}" (${host}:${actualPort})`,
15625
15666
  "",
@@ -15666,6 +15707,14 @@ async function sendDiscover(chatId, channel, serverName) {
15666
15707
  try {
15667
15708
  const { OllamaService } = await Promise.resolve().then(() => (init_ollama(), ollama_exports));
15668
15709
  const models = await OllamaService.discoverModels(serverName);
15710
+ try {
15711
+ const { getAdapter: getAdapter4 } = await Promise.resolve().then(() => (init_backends(), backends_exports));
15712
+ const adapter = getAdapter4("ollama");
15713
+ if ("refreshModelCatalog" in adapter) {
15714
+ adapter.refreshModelCatalog();
15715
+ }
15716
+ } catch {
15717
+ }
15669
15718
  if (models.length === 0) {
15670
15719
  await channel.sendText(chatId, "No models found. Check server connectivity.", { parseMode: "plain" });
15671
15720
  return;
@@ -17192,12 +17241,28 @@ async function handleVoice(msg, channel) {
17192
17241
  await channel.sendText(chatId, vLimitMsg, { parseMode: "plain" });
17193
17242
  return;
17194
17243
  }
17195
- await channel.sendText(chatId, "Thinking...", { parseMode: "plain" });
17196
17244
  const mode = getMode(chatId);
17197
17245
  const vModel = resolveModel(chatId);
17198
17246
  const vVerbose = getVerboseLevel(chatId);
17199
- const vToolCb = vVerbose !== "off" ? makeToolActionCallback(chatId, channel, vVerbose) : void 0;
17247
+ const adapter = getAdapterForChat(chatId);
17248
+ const modelLabel = formatModelShort(vModel ?? adapter.defaultModel);
17249
+ const showThinking = getShowThinkingUi(chatId);
17250
+ const needsLiveStatus = vVerbose !== "off" || showThinking;
17251
+ let vToolCb;
17252
+ let vLiveStatus = null;
17253
+ if (needsLiveStatus) {
17254
+ const effectiveVerbose = vVerbose === "off" ? "normal" : vVerbose;
17255
+ const ls = makeLiveStatus(chatId, channel, modelLabel, effectiveVerbose, showThinking);
17256
+ vLiveStatus = ls.liveStatus;
17257
+ vToolCb = vVerbose !== "off" ? ls.toolCb : void 0;
17258
+ await vLiveStatus.init();
17259
+ if (showThinking && adapter.id !== "claude") {
17260
+ vLiveStatus.addInfo(`\u{1F4AD} Thinking display not available for ${adapter.displayName}`);
17261
+ }
17262
+ }
17263
+ const sigT0 = Date.now();
17200
17264
  const response = await askAgent(chatId, transcript, { cwd: getCwd(chatId), model: vModel, permMode: mode, onToolAction: vToolCb });
17265
+ if (vLiveStatus) await vLiveStatus.finalize(Date.now() - sigT0);
17201
17266
  if (response.usage) addUsage(chatId, response.usage.input, response.usage.output, response.usage.cacheRead, vModel, void 0, response.usage.contextSize);
17202
17267
  if (await handleResponseExhaustion(response.text, chatId, msg, channel)) return;
17203
17268
  const voiceResponse = ensureReaction(response.text, transcript);
@@ -17262,8 +17327,25 @@ Acknowledge receipt. Do NOT analyze the video unless they ask you to.`;
17262
17327
  const vidModel = resolveModel(chatId);
17263
17328
  const vMode = getMode(chatId);
17264
17329
  const vidVerbose = getVerboseLevel(chatId);
17265
- const vidToolCb = vidVerbose !== "off" ? makeToolActionCallback(chatId, channel, vidVerbose) : void 0;
17330
+ const vidAdapter = getAdapterForChat(chatId);
17331
+ const vidModelLabel = formatModelShort(vidModel ?? vidAdapter.defaultModel);
17332
+ const vidShowThinking = getShowThinkingUi(chatId);
17333
+ const vidNeedsLiveStatus = vidVerbose !== "off" || vidShowThinking;
17334
+ let vidToolCb;
17335
+ let vidLiveStatus = null;
17336
+ if (vidNeedsLiveStatus) {
17337
+ const effectiveVerbose = vidVerbose === "off" ? "normal" : vidVerbose;
17338
+ const ls = makeLiveStatus(chatId, channel, vidModelLabel, effectiveVerbose, vidShowThinking);
17339
+ vidLiveStatus = ls.liveStatus;
17340
+ vidToolCb = vidVerbose !== "off" ? ls.toolCb : void 0;
17341
+ await vidLiveStatus.init();
17342
+ if (vidShowThinking && vidAdapter.id !== "claude") {
17343
+ vidLiveStatus.addInfo(`\u{1F4AD} Thinking display not available for ${vidAdapter.displayName}`);
17344
+ }
17345
+ }
17346
+ const vidT0 = Date.now();
17266
17347
  const response2 = await askAgent(chatId, prompt2, { cwd: getCwd(chatId), model: vidModel, permMode: vMode, onToolAction: vidToolCb });
17348
+ if (vidLiveStatus) await vidLiveStatus.finalize(Date.now() - vidT0);
17267
17349
  if (response2.usage) addUsage(chatId, response2.usage.input, response2.usage.output, response2.usage.cacheRead, vidModel, void 0, response2.usage.contextSize);
17268
17350
  if (await handleResponseExhaustion(response2.text, chatId, msg, channel)) return;
17269
17351
  const vidResponse = ensureReaction(response2.text, caption || "video");
@@ -17319,8 +17401,25 @@ ${content}
17319
17401
  const mediaModel = resolveModel(chatId);
17320
17402
  const mMode = getMode(chatId);
17321
17403
  const mVerbose = getVerboseLevel(chatId);
17322
- const mToolCb = mVerbose !== "off" ? makeToolActionCallback(chatId, channel, mVerbose) : void 0;
17404
+ const mAdapter = getAdapterForChat(chatId);
17405
+ const mModelLabel = formatModelShort(mediaModel ?? mAdapter.defaultModel);
17406
+ const mShowThinking = getShowThinkingUi(chatId);
17407
+ const mNeedsLiveStatus = mVerbose !== "off" || mShowThinking;
17408
+ let mToolCb;
17409
+ let mLiveStatus = null;
17410
+ if (mNeedsLiveStatus) {
17411
+ const effectiveVerbose = mVerbose === "off" ? "normal" : mVerbose;
17412
+ const ls = makeLiveStatus(chatId, channel, mModelLabel, effectiveVerbose, mShowThinking);
17413
+ mLiveStatus = ls.liveStatus;
17414
+ mToolCb = mVerbose !== "off" ? ls.toolCb : void 0;
17415
+ await mLiveStatus.init();
17416
+ if (mShowThinking && mAdapter.id !== "claude") {
17417
+ mLiveStatus.addInfo(`\u{1F4AD} Thinking display not available for ${mAdapter.displayName}`);
17418
+ }
17419
+ }
17420
+ const mT0 = Date.now();
17323
17421
  const response = await askAgent(chatId, prompt, { cwd: getCwd(chatId), model: mediaModel, permMode: mMode, onToolAction: mToolCb });
17422
+ if (mLiveStatus) await mLiveStatus.finalize(Date.now() - mT0);
17324
17423
  if (response.usage) addUsage(chatId, response.usage.input, response.usage.output, response.usage.cacheRead, mediaModel, void 0, response.usage.contextSize);
17325
17424
  if (await handleResponseExhaustion(response.text, chatId, msg, channel)) return;
17326
17425
  const mediaResponse = ensureReaction(response.text, caption || "file");
@@ -17340,8 +17439,11 @@ var init_media = __esm({
17340
17439
  init_stt();
17341
17440
  init_video();
17342
17441
  init_store5();
17442
+ init_chat_settings();
17443
+ init_backends();
17343
17444
  init_helpers();
17344
17445
  init_response();
17446
+ init_live_status();
17345
17447
  MEDIA_INCOMING_PATH = join17(MEDIA_PATH, "incoming");
17346
17448
  }
17347
17449
  });
@@ -20746,7 +20848,7 @@ function buildSelectKeyboard(chatId) {
20746
20848
  const checkmark = isSelected ? "\u2713 " : " ";
20747
20849
  const row = [{
20748
20850
  label: `${checkmark}${modelInfo.label}`,
20749
- data: `council:toggle:${adapter.id}:${modelId}:${modelInfo.label}`,
20851
+ data: `council:toggle:${adapter.id}:${modelId}`,
20750
20852
  ...isSelected ? { style: "success" } : {}
20751
20853
  }];
20752
20854
  buttons.push(row);
@@ -21310,7 +21412,7 @@ async function handleStatusCommand(chatId, commandArgs, msg, channel) {
21310
21412
  `\u{1F507} Voice: ${voice2 ? "on" : "off"} \xB7 Sig: ${modelSig}`,
21311
21413
  ``,
21312
21414
  buildSectionHeader("Session"),
21313
- `\u{1F4CB} ${sessionId ? sessionId.slice(0, 12) + "..." : "no active session"}`,
21415
+ `\u{1F4CB} ${sessionId ?? "no active session"}`,
21314
21416
  `\u{1F4C1} ${cwd ?? "default workspace"}`,
21315
21417
  `\u{1F4D0} Context: ${ctxBar} ${usedK}K/${maxK}K (${contextPct.toFixed(1)}%)`,
21316
21418
  ...sqCount > 0 ? [`\u{1F5FA} Side quests: ${sqCount} active`] : [],
@@ -21591,6 +21693,48 @@ async function handleRunsCommand(chatId, commandArgs, msg, channel) {
21591
21693
  await channel.sendText(chatId, lines.join("\n\n"), { parseMode: "plain" });
21592
21694
  }
21593
21695
  }
21696
+ async function handleInfoCommand(chatId, commandArgs, msg, channel) {
21697
+ const lines = ["\u2139\uFE0F Chat Info", "\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501"];
21698
+ if (msg.senderId) lines.push(`User ID: ${msg.senderId}`);
21699
+ if (msg.senderUsername) lines.push(`Username: ${msg.senderUsername}`);
21700
+ if (msg.senderName) lines.push(`Name: ${msg.senderName}`);
21701
+ lines.push("");
21702
+ lines.push(`Chat ID: ${chatId}`);
21703
+ if (msg.chatTitle) lines.push(`Group: ${msg.chatTitle}`);
21704
+ if (msg.threadId) lines.push(`Topic thread: ${msg.threadId}`);
21705
+ const aliases = getAllChatAliases();
21706
+ const alias = aliases.find((a) => a.chatId === chatId);
21707
+ if (alias) lines.push(`Alias: ${alias.alias}`);
21708
+ const sessionId = getSessionId(chatId);
21709
+ if (sessionId) lines.push(`Session: ${sessionId}`);
21710
+ const backendId = getBackend(chatId) ?? "claude";
21711
+ const currentModel = getModel(chatId);
21712
+ lines.push(`Backend: ${backendId}`);
21713
+ if (currentModel) lines.push(`Model: ${currentModel}`);
21714
+ const execMode = getExecMode(chatId);
21715
+ lines.push(`Exec mode: ${execMode}`);
21716
+ const { getSkillSuggestionsEnabled: getSkillSuggestionsEnabled2 } = await Promise.resolve().then(() => (init_store5(), store_exports5));
21717
+ const skillSuggestions = getSkillSuggestionsEnabled2(chatId);
21718
+ lines.push(`Skill suggestions: ${skillSuggestions ? "on" : "off"}`);
21719
+ const cwd = getCwd(chatId);
21720
+ if (cwd) lines.push(`CWD: ${cwd}`);
21721
+ const { getMessagePairCount: getMessagePairCount2 } = await Promise.resolve().then(() => (init_session_log(), session_log_exports));
21722
+ const pairCount = getMessagePairCount2(chatId);
21723
+ if (pairCount > 0) lines.push(`Messages this session: ${pairCount} pairs`);
21724
+ try {
21725
+ const { getReflectionStatus: getReflectionStatus2 } = await Promise.resolve().then(() => (init_store4(), store_exports4));
21726
+ const db3 = getDb();
21727
+ const reflectionStatus = getReflectionStatus2(db3, chatId);
21728
+ lines.push(`Reflection: ${reflectionStatus}`);
21729
+ } catch {
21730
+ }
21731
+ if (msg.source) lines.push(`Channel: ${msg.source}`);
21732
+ if (msg.threadId) {
21733
+ lines.push("");
21734
+ lines.push(`\u{1F4CB} Target for cron: ${chatId}:topic:${msg.threadId}`);
21735
+ }
21736
+ await channel.sendText(chatId, lines.join("\n"), { parseMode: "plain" });
21737
+ }
21594
21738
  async function handleSkillsCommand(chatId, commandArgs, msg, channel) {
21595
21739
  const skills2 = await discoverAllSkills();
21596
21740
  if (skills2.length === 0) {
@@ -22529,6 +22673,9 @@ async function handleCommand(msg, channel) {
22529
22673
  case "debate":
22530
22674
  await handleCouncilCommand(chatId, commandArgs, msg, channel);
22531
22675
  break;
22676
+ case "info":
22677
+ await handleInfoCommand(chatId, commandArgs, msg, channel);
22678
+ break;
22532
22679
  case "evolve":
22533
22680
  await handleEvolveCommandWrapper(chatId, commandArgs, msg, channel);
22534
22681
  break;
@@ -23395,7 +23542,9 @@ ${rotationNote}`, { parseMode: "html" });
23395
23542
  if (action === "toggle") {
23396
23543
  const backend2 = parts[2];
23397
23544
  const model2 = parts[3];
23398
- const label2 = parts.slice(4).join(":");
23545
+ const { getAdapter: getAdapter4 } = await Promise.resolve().then(() => (init_backends(), backends_exports));
23546
+ const toggleAdapter = getAdapter4(backend2);
23547
+ const label2 = toggleAdapter.availableModels[model2]?.label ?? model2;
23399
23548
  const { toggleParticipant: toggleParticipant2, buildSelectKeyboard: buildSelectKeyboard2, hasPendingCouncil: hasPendingCouncil2 } = await Promise.resolve().then(() => (init_wizard2(), wizard_exports));
23400
23549
  if (!hasPendingCouncil2(chatId)) {
23401
23550
  await channel.sendText(chatId, "No council wizard active. Use /council to start.", { parseMode: "plain" });
@@ -26205,7 +26354,10 @@ var init_telegram2 = __esm({
26205
26354
  { command: "reflect", description: "Trigger reflection analysis" },
26206
26355
  { command: "optimize", description: "Audit identity files and skills" },
26207
26356
  // Ollama
26208
- { command: "ollama", description: "Manage Ollama local LLM servers" }
26357
+ { command: "ollama", description: "Manage Ollama local LLM servers" },
26358
+ // Context & info
26359
+ { command: "info", description: "Current chat context (ID, topic, sender, settings)" },
26360
+ { command: "council", description: "Multi-model debate (select models, anonymous rounds)" }
26209
26361
  ]);
26210
26362
  this.bot.on("message", async (ctx) => {
26211
26363
  const chatId = ctx.chat.id.toString();
@@ -26608,6 +26760,8 @@ var init_telegram2 = __esm({
26608
26760
  const chatId = ctx.chat.id.toString();
26609
26761
  const messageId = ctx.message?.message_id?.toString() ?? "";
26610
26762
  const senderName = ctx.from?.first_name ?? "User";
26763
+ const senderId = ctx.from?.id?.toString();
26764
+ const senderUsername = ctx.from?.username ? `@${ctx.from.username}` : void 0;
26611
26765
  const chatTitle = ctx.chat?.title;
26612
26766
  const replyTo = ctx.message?.reply_to_message;
26613
26767
  const replyToRaw = replyTo ? replyTo.text ?? replyTo.caption ?? "" : "";
@@ -26622,6 +26776,8 @@ var init_telegram2 = __esm({
26622
26776
  messageId,
26623
26777
  text: "",
26624
26778
  senderName,
26779
+ senderId,
26780
+ senderUsername,
26625
26781
  type: "voice",
26626
26782
  source: "telegram",
26627
26783
  fileName: ctx.message.voice.file_id,
@@ -26641,6 +26797,8 @@ var init_telegram2 = __esm({
26641
26797
  messageId,
26642
26798
  text: "",
26643
26799
  senderName,
26800
+ senderId,
26801
+ senderUsername,
26644
26802
  type: "photo",
26645
26803
  source: "telegram",
26646
26804
  caption: ctx.message.caption ?? "",
@@ -26659,6 +26817,8 @@ var init_telegram2 = __esm({
26659
26817
  messageId,
26660
26818
  text: "",
26661
26819
  senderName,
26820
+ senderId,
26821
+ senderUsername,
26662
26822
  type: "document",
26663
26823
  source: "telegram",
26664
26824
  caption: ctx.message.caption ?? "",
@@ -26678,6 +26838,8 @@ var init_telegram2 = __esm({
26678
26838
  messageId,
26679
26839
  text: "",
26680
26840
  senderName,
26841
+ senderId,
26842
+ senderUsername,
26681
26843
  type: "video",
26682
26844
  source: "telegram",
26683
26845
  caption: ctx.message.caption ?? "",
@@ -26709,6 +26871,8 @@ var init_telegram2 = __esm({
26709
26871
  messageId,
26710
26872
  text,
26711
26873
  senderName,
26874
+ senderId,
26875
+ senderUsername,
26712
26876
  type: "command",
26713
26877
  source: "telegram",
26714
26878
  command,
@@ -26725,6 +26889,8 @@ var init_telegram2 = __esm({
26725
26889
  messageId,
26726
26890
  text,
26727
26891
  senderName,
26892
+ senderId,
26893
+ senderUsername,
26728
26894
  type: "text",
26729
26895
  source: "telegram",
26730
26896
  chatTitle,
@@ -27183,6 +27349,8 @@ You are running inside CC-Claw. Your current chat ID, group name, and forum topi
27183
27349
  - \`/skills\` \u2014 List skills
27184
27350
  - \`/skill-install <url>\` \u2014 Install skill from GitHub
27185
27351
  - \`/evolve\` \u2014 Self-learning interactive keyboard (includes quick analyze)
27352
+ - \`/info\` \u2014 Current chat context (chatId, topic thread, sender, session, settings, copyable cron target)
27353
+ - \`/council\` \u2014 Multi-model debate wizard (select backends/models, anonymous rounds)
27186
27354
  - \`/gemini_accounts\` \u2014 Gemini credential rotation management
27187
27355
  - \`/setup-profile\` \u2014 User profile setup wizard
27188
27356
 
@@ -31950,6 +32118,7 @@ async function chatSend(globalOpts, message, cmdOpts) {
31950
32118
  chatId,
31951
32119
  message,
31952
32120
  stream: !!cmdOpts.stream,
32121
+ backend: cmdOpts.backend,
31953
32122
  model: cmdOpts.model,
31954
32123
  cwd: cmdOpts.cwd,
31955
32124
  mode: void 0
@@ -32349,6 +32518,8 @@ var init_completion = __esm({
32349
32518
  "db",
32350
32519
  "gemini",
32351
32520
  "evolve",
32521
+ "info",
32522
+ "council",
32352
32523
  "setup",
32353
32524
  "version",
32354
32525
  "help",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "cc-claw",
3
- "version": "0.20.3",
3
+ "version": "0.20.5",
4
4
  "description": "CC-Claw: Personal AI assistant on Telegram — multi-backend (Claude, Gemini, Codex, Cursor), sub-agent orchestration, MCP management",
5
5
  "type": "module",
6
6
  "main": "dist/cli.js",