cc-claw 0.20.4 → 0.20.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/cli.js +123 -12
  2. package/package.json +1 -1
package/dist/cli.js CHANGED
@@ -33,7 +33,7 @@ var VERSION;
33
33
  var init_version = __esm({
34
34
  "src/version.ts"() {
35
35
  "use strict";
36
- VERSION = true ? "0.20.4" : (() => {
36
+ VERSION = true ? "0.20.5" : (() => {
37
37
  try {
38
38
  return JSON.parse(readFileSync(join(process.cwd(), "package.json"), "utf-8")).version ?? "unknown";
39
39
  } catch {
@@ -6085,6 +6085,7 @@ async function healthCheck(serverName) {
6085
6085
  return [];
6086
6086
  }
6087
6087
  const results = [];
6088
+ let needsCatalogRefresh = false;
6088
6089
  for (const server of servers) {
6089
6090
  const baseUrl = getBaseUrl(server);
6090
6091
  const ok = await ping(baseUrl, {
@@ -6097,6 +6098,35 @@ async function healthCheck(serverName) {
6097
6098
  if (newStatus !== server.status) {
6098
6099
  log(`[ollama] Server ${server.name}: ${server.status} \u2192 ${newStatus}`);
6099
6100
  }
6101
+ if (ok) {
6102
+ try {
6103
+ const tags = await listModels(baseUrl, { apiKey: server.apiKey });
6104
+ const remoteNames = new Set(tags.models.map((m) => m.name));
6105
+ let hasNew = false;
6106
+ for (const name of remoteNames) {
6107
+ if (!getModelByName(name, server.id)) {
6108
+ hasNew = true;
6109
+ break;
6110
+ }
6111
+ }
6112
+ if (hasNew) {
6113
+ await discoverModels(server.name);
6114
+ needsCatalogRefresh = true;
6115
+ log(`[ollama] New models detected on ${server.name} \u2014 refreshed`);
6116
+ }
6117
+ } catch {
6118
+ }
6119
+ }
6120
+ }
6121
+ if (needsCatalogRefresh) {
6122
+ try {
6123
+ const { getAdapter: getAdapter4 } = await Promise.resolve().then(() => (init_backends(), backends_exports));
6124
+ const adapter = getAdapter4("ollama");
6125
+ if ("refreshModelCatalog" in adapter) {
6126
+ adapter.refreshModelCatalog();
6127
+ }
6128
+ } catch {
6129
+ }
6100
6130
  }
6101
6131
  return results;
6102
6132
  }
@@ -10593,6 +10623,7 @@ var init_chat = __esm({
10593
10623
  const { getMode: getMode3, getCwd: getCwd3, getModel: getModel3, addUsage: addUsage3, getBackend: getBackend3 } = await Promise.resolve().then(() => (init_store5(), store_exports5));
10594
10624
  const { getAdapterForChat: getAdapterForChat2 } = await Promise.resolve().then(() => (init_backends(), backends_exports));
10595
10625
  const chatId = body.chatId;
10626
+ const backend2 = body.backend;
10596
10627
  const PERM_LEVEL = { plan: 0, safe: 1, yolo: 2 };
10597
10628
  const storedMode = getMode3(chatId);
10598
10629
  const requestedMode = body.mode ?? storedMode;
@@ -10630,7 +10661,8 @@ data: ${JSON.stringify(data)}
10630
10661
  sendSSE("text", partial);
10631
10662
  },
10632
10663
  model: model2,
10633
- permMode: mode
10664
+ permMode: mode,
10665
+ backend: backend2
10634
10666
  });
10635
10667
  if (response.usage) addUsage3(chatId, response.usage.input, response.usage.output, response.usage.cacheRead, model2 ?? "unknown", void 0, response.usage.contextSize);
10636
10668
  sendSSE("done", JSON.stringify({ text: response.text, usage: response.usage }));
@@ -10640,7 +10672,7 @@ data: ${JSON.stringify(data)}
10640
10672
  if (!res.writableEnded) res.end();
10641
10673
  }
10642
10674
  } else {
10643
- const response = await askAgent3(chatId, body.message, { cwd, model: model2, permMode: mode });
10675
+ const response = await askAgent3(chatId, body.message, { cwd, model: model2, permMode: mode, backend: backend2 });
10644
10676
  if (response.usage) addUsage3(chatId, response.usage.input, response.usage.output, response.usage.cacheRead, model2 ?? "unknown", void 0, response.usage.contextSize);
10645
10677
  jsonResponse(res, { text: response.text, usage: response.usage, sessionId: response.sessionId });
10646
10678
  }
@@ -15621,6 +15653,14 @@ async function handleAdd(chatId, channel, name, host, port) {
15621
15653
  return;
15622
15654
  }
15623
15655
  const models = await OllamaService.discoverModels(name);
15656
+ try {
15657
+ const { getAdapter: getAdapter4 } = await Promise.resolve().then(() => (init_backends(), backends_exports));
15658
+ const adapter = getAdapter4("ollama");
15659
+ if ("refreshModelCatalog" in adapter) {
15660
+ adapter.refreshModelCatalog();
15661
+ }
15662
+ } catch {
15663
+ }
15624
15664
  const lines = [
15625
15665
  `\u2705 Added "${name}" (${host}:${actualPort})`,
15626
15666
  "",
@@ -15667,6 +15707,14 @@ async function sendDiscover(chatId, channel, serverName) {
15667
15707
  try {
15668
15708
  const { OllamaService } = await Promise.resolve().then(() => (init_ollama(), ollama_exports));
15669
15709
  const models = await OllamaService.discoverModels(serverName);
15710
+ try {
15711
+ const { getAdapter: getAdapter4 } = await Promise.resolve().then(() => (init_backends(), backends_exports));
15712
+ const adapter = getAdapter4("ollama");
15713
+ if ("refreshModelCatalog" in adapter) {
15714
+ adapter.refreshModelCatalog();
15715
+ }
15716
+ } catch {
15717
+ }
15670
15718
  if (models.length === 0) {
15671
15719
  await channel.sendText(chatId, "No models found. Check server connectivity.", { parseMode: "plain" });
15672
15720
  return;
@@ -17193,12 +17241,28 @@ async function handleVoice(msg, channel) {
17193
17241
  await channel.sendText(chatId, vLimitMsg, { parseMode: "plain" });
17194
17242
  return;
17195
17243
  }
17196
- await channel.sendText(chatId, "Thinking...", { parseMode: "plain" });
17197
17244
  const mode = getMode(chatId);
17198
17245
  const vModel = resolveModel(chatId);
17199
17246
  const vVerbose = getVerboseLevel(chatId);
17200
- const vToolCb = vVerbose !== "off" ? makeToolActionCallback(chatId, channel, vVerbose) : void 0;
17247
+ const adapter = getAdapterForChat(chatId);
17248
+ const modelLabel = formatModelShort(vModel ?? adapter.defaultModel);
17249
+ const showThinking = getShowThinkingUi(chatId);
17250
+ const needsLiveStatus = vVerbose !== "off" || showThinking;
17251
+ let vToolCb;
17252
+ let vLiveStatus = null;
17253
+ if (needsLiveStatus) {
17254
+ const effectiveVerbose = vVerbose === "off" ? "normal" : vVerbose;
17255
+ const ls = makeLiveStatus(chatId, channel, modelLabel, effectiveVerbose, showThinking);
17256
+ vLiveStatus = ls.liveStatus;
17257
+ vToolCb = vVerbose !== "off" ? ls.toolCb : void 0;
17258
+ await vLiveStatus.init();
17259
+ if (showThinking && adapter.id !== "claude") {
17260
+ vLiveStatus.addInfo(`\u{1F4AD} Thinking display not available for ${adapter.displayName}`);
17261
+ }
17262
+ }
17263
+ const sigT0 = Date.now();
17201
17264
  const response = await askAgent(chatId, transcript, { cwd: getCwd(chatId), model: vModel, permMode: mode, onToolAction: vToolCb });
17265
+ if (vLiveStatus) await vLiveStatus.finalize(Date.now() - sigT0);
17202
17266
  if (response.usage) addUsage(chatId, response.usage.input, response.usage.output, response.usage.cacheRead, vModel, void 0, response.usage.contextSize);
17203
17267
  if (await handleResponseExhaustion(response.text, chatId, msg, channel)) return;
17204
17268
  const voiceResponse = ensureReaction(response.text, transcript);
@@ -17263,8 +17327,25 @@ Acknowledge receipt. Do NOT analyze the video unless they ask you to.`;
17263
17327
  const vidModel = resolveModel(chatId);
17264
17328
  const vMode = getMode(chatId);
17265
17329
  const vidVerbose = getVerboseLevel(chatId);
17266
- const vidToolCb = vidVerbose !== "off" ? makeToolActionCallback(chatId, channel, vidVerbose) : void 0;
17330
+ const vidAdapter = getAdapterForChat(chatId);
17331
+ const vidModelLabel = formatModelShort(vidModel ?? vidAdapter.defaultModel);
17332
+ const vidShowThinking = getShowThinkingUi(chatId);
17333
+ const vidNeedsLiveStatus = vidVerbose !== "off" || vidShowThinking;
17334
+ let vidToolCb;
17335
+ let vidLiveStatus = null;
17336
+ if (vidNeedsLiveStatus) {
17337
+ const effectiveVerbose = vidVerbose === "off" ? "normal" : vidVerbose;
17338
+ const ls = makeLiveStatus(chatId, channel, vidModelLabel, effectiveVerbose, vidShowThinking);
17339
+ vidLiveStatus = ls.liveStatus;
17340
+ vidToolCb = vidVerbose !== "off" ? ls.toolCb : void 0;
17341
+ await vidLiveStatus.init();
17342
+ if (vidShowThinking && vidAdapter.id !== "claude") {
17343
+ vidLiveStatus.addInfo(`\u{1F4AD} Thinking display not available for ${vidAdapter.displayName}`);
17344
+ }
17345
+ }
17346
+ const vidT0 = Date.now();
17267
17347
  const response2 = await askAgent(chatId, prompt2, { cwd: getCwd(chatId), model: vidModel, permMode: vMode, onToolAction: vidToolCb });
17348
+ if (vidLiveStatus) await vidLiveStatus.finalize(Date.now() - vidT0);
17268
17349
  if (response2.usage) addUsage(chatId, response2.usage.input, response2.usage.output, response2.usage.cacheRead, vidModel, void 0, response2.usage.contextSize);
17269
17350
  if (await handleResponseExhaustion(response2.text, chatId, msg, channel)) return;
17270
17351
  const vidResponse = ensureReaction(response2.text, caption || "video");
@@ -17320,8 +17401,25 @@ ${content}
17320
17401
  const mediaModel = resolveModel(chatId);
17321
17402
  const mMode = getMode(chatId);
17322
17403
  const mVerbose = getVerboseLevel(chatId);
17323
- const mToolCb = mVerbose !== "off" ? makeToolActionCallback(chatId, channel, mVerbose) : void 0;
17404
+ const mAdapter = getAdapterForChat(chatId);
17405
+ const mModelLabel = formatModelShort(mediaModel ?? mAdapter.defaultModel);
17406
+ const mShowThinking = getShowThinkingUi(chatId);
17407
+ const mNeedsLiveStatus = mVerbose !== "off" || mShowThinking;
17408
+ let mToolCb;
17409
+ let mLiveStatus = null;
17410
+ if (mNeedsLiveStatus) {
17411
+ const effectiveVerbose = mVerbose === "off" ? "normal" : mVerbose;
17412
+ const ls = makeLiveStatus(chatId, channel, mModelLabel, effectiveVerbose, mShowThinking);
17413
+ mLiveStatus = ls.liveStatus;
17414
+ mToolCb = mVerbose !== "off" ? ls.toolCb : void 0;
17415
+ await mLiveStatus.init();
17416
+ if (mShowThinking && mAdapter.id !== "claude") {
17417
+ mLiveStatus.addInfo(`\u{1F4AD} Thinking display not available for ${mAdapter.displayName}`);
17418
+ }
17419
+ }
17420
+ const mT0 = Date.now();
17324
17421
  const response = await askAgent(chatId, prompt, { cwd: getCwd(chatId), model: mediaModel, permMode: mMode, onToolAction: mToolCb });
17422
+ if (mLiveStatus) await mLiveStatus.finalize(Date.now() - mT0);
17325
17423
  if (response.usage) addUsage(chatId, response.usage.input, response.usage.output, response.usage.cacheRead, mediaModel, void 0, response.usage.contextSize);
17326
17424
  if (await handleResponseExhaustion(response.text, chatId, msg, channel)) return;
17327
17425
  const mediaResponse = ensureReaction(response.text, caption || "file");
@@ -17341,8 +17439,11 @@ var init_media = __esm({
17341
17439
  init_stt();
17342
17440
  init_video();
17343
17441
  init_store5();
17442
+ init_chat_settings();
17443
+ init_backends();
17344
17444
  init_helpers();
17345
17445
  init_response();
17446
+ init_live_status();
17346
17447
  MEDIA_INCOMING_PATH = join17(MEDIA_PATH, "incoming");
17347
17448
  }
17348
17449
  });
@@ -20747,7 +20848,7 @@ function buildSelectKeyboard(chatId) {
20747
20848
  const checkmark = isSelected ? "\u2713 " : " ";
20748
20849
  const row = [{
20749
20850
  label: `${checkmark}${modelInfo.label}`,
20750
- data: `council:toggle:${adapter.id}:${modelId}:${modelInfo.label}`,
20851
+ data: `council:toggle:${adapter.id}:${modelId}`,
20751
20852
  ...isSelected ? { style: "success" } : {}
20752
20853
  }];
20753
20854
  buttons.push(row);
@@ -21311,7 +21412,7 @@ async function handleStatusCommand(chatId, commandArgs, msg, channel) {
21311
21412
  `\u{1F507} Voice: ${voice2 ? "on" : "off"} \xB7 Sig: ${modelSig}`,
21312
21413
  ``,
21313
21414
  buildSectionHeader("Session"),
21314
- `\u{1F4CB} ${sessionId ? sessionId.slice(0, 12) + "..." : "no active session"}`,
21415
+ `\u{1F4CB} ${sessionId ?? "no active session"}`,
21315
21416
  `\u{1F4C1} ${cwd ?? "default workspace"}`,
21316
21417
  `\u{1F4D0} Context: ${ctxBar} ${usedK}K/${maxK}K (${contextPct.toFixed(1)}%)`,
21317
21418
  ...sqCount > 0 ? [`\u{1F5FA} Side quests: ${sqCount} active`] : [],
@@ -21605,7 +21706,7 @@ async function handleInfoCommand(chatId, commandArgs, msg, channel) {
21605
21706
  const alias = aliases.find((a) => a.chatId === chatId);
21606
21707
  if (alias) lines.push(`Alias: ${alias.alias}`);
21607
21708
  const sessionId = getSessionId(chatId);
21608
- if (sessionId) lines.push(`Session: ${sessionId.slice(0, 13)}\u2026`);
21709
+ if (sessionId) lines.push(`Session: ${sessionId}`);
21609
21710
  const backendId = getBackend(chatId) ?? "claude";
21610
21711
  const currentModel = getModel(chatId);
21611
21712
  lines.push(`Backend: ${backendId}`);
@@ -23441,7 +23542,9 @@ ${rotationNote}`, { parseMode: "html" });
23441
23542
  if (action === "toggle") {
23442
23543
  const backend2 = parts[2];
23443
23544
  const model2 = parts[3];
23444
- const label2 = parts.slice(4).join(":");
23545
+ const { getAdapter: getAdapter4 } = await Promise.resolve().then(() => (init_backends(), backends_exports));
23546
+ const toggleAdapter = getAdapter4(backend2);
23547
+ const label2 = toggleAdapter.availableModels[model2]?.label ?? model2;
23445
23548
  const { toggleParticipant: toggleParticipant2, buildSelectKeyboard: buildSelectKeyboard2, hasPendingCouncil: hasPendingCouncil2 } = await Promise.resolve().then(() => (init_wizard2(), wizard_exports));
23446
23549
  if (!hasPendingCouncil2(chatId)) {
23447
23550
  await channel.sendText(chatId, "No council wizard active. Use /council to start.", { parseMode: "plain" });
@@ -26251,7 +26354,10 @@ var init_telegram2 = __esm({
26251
26354
  { command: "reflect", description: "Trigger reflection analysis" },
26252
26355
  { command: "optimize", description: "Audit identity files and skills" },
26253
26356
  // Ollama
26254
- { command: "ollama", description: "Manage Ollama local LLM servers" }
26357
+ { command: "ollama", description: "Manage Ollama local LLM servers" },
26358
+ // Context & info
26359
+ { command: "info", description: "Current chat context (ID, topic, sender, settings)" },
26360
+ { command: "council", description: "Multi-model debate (select models, anonymous rounds)" }
26255
26361
  ]);
26256
26362
  this.bot.on("message", async (ctx) => {
26257
26363
  const chatId = ctx.chat.id.toString();
@@ -27243,6 +27349,8 @@ You are running inside CC-Claw. Your current chat ID, group name, and forum topi
27243
27349
  - \`/skills\` \u2014 List skills
27244
27350
  - \`/skill-install <url>\` \u2014 Install skill from GitHub
27245
27351
  - \`/evolve\` \u2014 Self-learning interactive keyboard (includes quick analyze)
27352
+ - \`/info\` \u2014 Current chat context (chatId, topic thread, sender, session, settings, copyable cron target)
27353
+ - \`/council\` \u2014 Multi-model debate wizard (select backends/models, anonymous rounds)
27246
27354
  - \`/gemini_accounts\` \u2014 Gemini credential rotation management
27247
27355
  - \`/setup-profile\` \u2014 User profile setup wizard
27248
27356
 
@@ -32010,6 +32118,7 @@ async function chatSend(globalOpts, message, cmdOpts) {
32010
32118
  chatId,
32011
32119
  message,
32012
32120
  stream: !!cmdOpts.stream,
32121
+ backend: cmdOpts.backend,
32013
32122
  model: cmdOpts.model,
32014
32123
  cwd: cmdOpts.cwd,
32015
32124
  mode: void 0
@@ -32409,6 +32518,8 @@ var init_completion = __esm({
32409
32518
  "db",
32410
32519
  "gemini",
32411
32520
  "evolve",
32521
+ "info",
32522
+ "council",
32412
32523
  "setup",
32413
32524
  "version",
32414
32525
  "help",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "cc-claw",
3
- "version": "0.20.4",
3
+ "version": "0.20.5",
4
4
  "description": "CC-Claw: Personal AI assistant on Telegram — multi-backend (Claude, Gemini, Codex, Cursor), sub-agent orchestration, MCP management",
5
5
  "type": "module",
6
6
  "main": "dist/cli.js",