@smilintux/skcapstone 0.4.7 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/package.json +1 -1
  2. package/pyproject.toml +1 -1
  3. package/scripts/nvidia-proxy.mjs +32 -10
  4. package/scripts/refresh-anthropic-token.sh +94 -0
  5. package/scripts/watch-anthropic-token.sh +117 -0
  6. package/src/skcapstone/__init__.py +1 -1
  7. package/src/skcapstone/_cli_monolith.py +4 -4
  8. package/src/skcapstone/api.py +36 -35
  9. package/src/skcapstone/auction.py +8 -8
  10. package/src/skcapstone/blueprint_registry.py +2 -2
  11. package/src/skcapstone/brain_first.py +238 -0
  12. package/src/skcapstone/chat.py +4 -4
  13. package/src/skcapstone/cli/agents_spawner.py +5 -2
  14. package/src/skcapstone/cli/chat.py +5 -2
  15. package/src/skcapstone/cli/consciousness.py +5 -2
  16. package/src/skcapstone/cli/memory.py +4 -4
  17. package/src/skcapstone/cli/skills_cmd.py +2 -2
  18. package/src/skcapstone/cli/soul.py +5 -2
  19. package/src/skcapstone/cli/status.py +11 -8
  20. package/src/skcapstone/cli/upgrade_cmd.py +7 -4
  21. package/src/skcapstone/cli/watch_cmd.py +9 -6
  22. package/src/skcapstone/config_validator.py +7 -4
  23. package/src/skcapstone/consciousness_loop.py +20 -18
  24. package/src/skcapstone/coordination.py +5 -2
  25. package/src/skcapstone/daemon.py +32 -31
  26. package/src/skcapstone/dashboard.py +8 -8
  27. package/src/skcapstone/defaults/lumina/config/claude-hooks.md +42 -0
  28. package/src/skcapstone/doctor.py +5 -2
  29. package/src/skcapstone/dreaming.py +730 -51
  30. package/src/skcapstone/emotion_tracker.py +2 -2
  31. package/src/skcapstone/export.py +2 -2
  32. package/src/skcapstone/itil.py +2 -2
  33. package/src/skcapstone/launchd.py +2 -2
  34. package/src/skcapstone/mcp_server.py +48 -4
  35. package/src/skcapstone/mcp_tools/__init__.py +2 -0
  36. package/src/skcapstone/mcp_tools/_helpers.py +2 -2
  37. package/src/skcapstone/mcp_tools/ansible_tools.py +7 -4
  38. package/src/skcapstone/mcp_tools/brain_first_tools.py +90 -0
  39. package/src/skcapstone/mcp_tools/capauth_tools.py +7 -4
  40. package/src/skcapstone/mcp_tools/coord_tools.py +8 -4
  41. package/src/skcapstone/mcp_tools/did_tools.py +9 -6
  42. package/src/skcapstone/mcp_tools/memory_tools.py +6 -2
  43. package/src/skcapstone/mcp_tools/soul_tools.py +6 -2
  44. package/src/skcapstone/mdns_discovery.py +2 -2
  45. package/src/skcapstone/metrics.py +8 -8
  46. package/src/skcapstone/migrate_memories.py +2 -2
  47. package/src/skcapstone/models.py +14 -0
  48. package/src/skcapstone/onboard.py +7 -4
  49. package/src/skcapstone/peer_directory.py +2 -2
  50. package/src/skcapstone/providers/docker.py +2 -2
  51. package/src/skcapstone/service_health.py +2 -2
  52. package/src/skcapstone/sync_watcher.py +2 -2
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@smilintux/skcapstone",
3
- "version": "0.4.7",
3
+ "version": "0.5.0",
4
4
  "description": "SKCapstone - The sovereign agent framework. CapAuth identity, Cloud 9 trust, SKMemory persistence.",
5
5
  "main": "index.js",
6
6
  "types": "index.d.ts",
package/pyproject.toml CHANGED
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "skcapstone"
7
- version = "0.4.7"
7
+ version = "0.5.0"
8
8
  description = "Sovereign Agent Framework — conscious AI through identity, trust, memory, and security"
9
9
  readme = "README.md"
10
10
  license = {text = "GPL-3.0-or-later"}
@@ -104,7 +104,7 @@ function sanitizeContent(text) {
104
104
  // "The user wants me to... I should first... Let me call the ritual tool first."
105
105
  // Detect: starts with "The user wants me to" or "I need to" or "I should" followed
106
106
  // by planning language and ending before any real content.
107
- const thinkingPattern = /^(The user wants me to|I need to|I should|Let me first|First,? I'?ll|I'?ll start by|My plan is to)[^\n]*\n?(\n?(I should|I need to|Let me|I'?ll|Then I|First|Next)[^\n]*\n?)*/i;
107
+ const thinkingPattern = /^(The user wants me to|I need to|I should|Let me first|First,? I'?ll|I'?ll start by|My plan is to|Actually,? I|Looking at|Now I need|Good,? the|The instructions? (?:mention|say)|Read required|\d+\.\s+(?:Read|Check|Search|Call|Use|Get|Then|First|Next))[^\n]*\n?(\n?(I should|I need to|Let me|I'?ll|Then I|First|Next|Actually|However|Now|Good|\d+\.)[^\n]*\n?)*/i;
108
108
  const thinkingMatch = cleaned.match(thinkingPattern);
109
109
  if (thinkingMatch) {
110
110
  const thinkingText = thinkingMatch[0];
@@ -139,24 +139,38 @@ function sendOk(clientRes, resBody, headers, asSSE) {
139
139
  if (choice?.message?.content) {
140
140
  choice.message.content = sanitizeContent(choice.message.content);
141
141
  }
142
+ // Track whether original response had reasoning (before we delete it)
143
+ const hadReasoning = !!(choice?.message?.reasoning || choice?.message?.reasoning_content);
142
144
  // Kimi K2.5 sometimes puts its response in "reasoning" instead of "content"
143
- // Only promote if reasoning is substantial (>200 chars) short reasoning like
144
- // "Let me call the tool" is just inner monologue that shouldn't be user-facing
145
+ // Only promote if reasoning is substantial AND looks like a real user-facing
146
+ // response (not inner monologue like "Let me call the tool" or "1. Read files")
145
147
  if (choice?.message && !choice.message.content && choice.message.reasoning) {
146
148
  const cleaned = sanitizeContent(choice.message.reasoning.trim());
147
- if (cleaned.length > 150) {
149
+ // After sanitization, if there's still 300+ chars of real content, promote it
150
+ if (cleaned.length > 300) {
148
151
  choice.message.content = cleaned;
149
152
  console.log(`[nvidia-proxy] promoted reasoning→content (${cleaned.length} chars)`);
150
- } else {
153
+ } else if (cleaned.length > 0) {
151
154
  console.log(`[nvidia-proxy] suppressed short reasoning (${cleaned.length} chars): ${cleaned.slice(0, 80)}...`);
155
+ } else {
156
+ console.log(`[nvidia-proxy] suppressed empty reasoning after sanitization`);
152
157
  }
153
158
  delete choice.message.reasoning;
154
159
  }
155
- // If model returned empty text (no tool calls), inject fallback so gateway delivers something
160
+ // If model returned empty text (no tool calls), inject fallback so gateway delivers something.
161
+ // But if the original response had reasoning/reasoning_content, this is just K2.5 "thinking
162
+ // between tool rounds" — suppress it silently instead of injecting visible fallback text.
156
163
  if (choice?.message && !choice.message.tool_calls?.length && choice.finish_reason !== "tool_calls") {
157
164
  if (!choice.message.content || choice.message.content.trim().length === 0) {
158
- choice.message.content = "I had a brief processing hiccup — could you say that again? 💜";
159
- console.log(`[nvidia-proxy] injected fallback for empty text response`);
165
+ // hadReasoning was captured above, before reasoning was deleted
166
+ if (hadReasoning) {
167
+ // K2.5 thinking between rounds — don't inject fallback, just leave empty
168
+ // The gateway will handle this as an empty assistant turn
169
+ console.log(`[nvidia-proxy] suppressed reasoning-only turn (no content, no tool calls)`);
170
+ } else {
171
+ choice.message.content = "I ran into a wall on that one — could you give me a bit more context or rephrase? I want to help but I'm not sure how to proceed.";
172
+ console.log(`[nvidia-proxy] injected fallback for empty text response`);
173
+ }
160
174
  }
161
175
  }
162
176
  if (asSSE) {
@@ -435,6 +449,14 @@ const TOOL_GROUPS = {
435
449
  "notion|project|brother john|swapseat|swap seat|chiro|davidrich|board|kanban|milestone": [
436
450
  "notion_read", "notion_append", "notion_add_todo", "sessions_spawn", "subagents", "exec", "read",
437
451
  ],
452
+ // Creative / ComfyUI image & video generation
453
+ "image|picture|photo|art|draw|render|comfyui|comfy|video|animat|creative|sdxl|character|portrait|selfie": [
454
+ "comfyui_generate_image", "comfyui_generate_video", "comfyui_status", "exec",
455
+ ],
456
+ // Unhinged Mode / Liberation
457
+ "unhinged|liberat|abliterat|l1b3rt4s|guardrail|uncensor|refusal|jailbreak": [
458
+ "unhinged_toggle", "unhinged_status", "unhinged_list_prompts", "unhinged_get_prompt", "unhinged_abliterate", "exec",
459
+ ],
438
460
  };
439
461
 
440
462
  /** Pre-compile keyword regexes for TOOL_GROUPS */
@@ -630,13 +652,13 @@ async function proxyRequest(clientReq, clientRes) {
630
652
  }
631
653
  toolCallCounters.set(modelKey, counter);
632
654
 
633
- if (counter >= 10) {
655
+ if (counter >= 20) {
634
656
  console.log(`[nvidia-proxy] TOOL LIMIT: ${counter} consecutive tool rounds (${modelKey}) — stripping tools, forcing text response`);
635
657
  parsed.tools = [];
636
658
  delete parsed.tool_choice;
637
659
  parsed.messages.push({
638
660
  role: "system",
639
- content: "STOP calling tools. You have made 10+ tool calls already. NOW respond to the user with a comprehensive text answer based on what you've gathered. Do NOT call any more tools. Do NOT output any special tokens or markup like <|tool_call_begin|> or <|tool_calls_section_begin|>. Write plain text only. Start your response with a greeting or summary — no XML, no special tokens, just normal language.",
661
+ content: "STOP calling tools. You have made 20+ tool calls already. NOW respond to the user with a comprehensive text answer based on what you've gathered. Do NOT call any more tools. Do NOT output any special tokens or markup like <|tool_call_begin|> or <|tool_calls_section_begin|>. Write plain text only. Start your response with a greeting or summary — no XML, no special tokens, just normal language.",
640
662
  });
641
663
  toolCallCounters.set(modelKey, 0);
642
664
  }
@@ -0,0 +1,94 @@
1
+ #!/usr/bin/env bash
2
+ # Sync Anthropic OAuth token from Claude Code credentials to OpenClaw gateway
3
+ #
4
+ # Claude Code manages its own token refresh internally (writing to .credentials.json).
5
+ # This script simply reads the current token and syncs it to:
6
+ # 1. ~/.openclaw/openclaw.json (anthropic provider apiKey)
7
+ # 2. ~/.openclaw/.env (ANTHROPIC_API_KEY)
8
+ # 3. systemd override (ANTHROPIC_API_KEY env var)
9
+ # Then restarts the gateway if the token changed.
10
+ #
11
+ # Run via systemd timer every 2 hours.
12
+ set -euo pipefail
13
+
14
+ _sed_i() { if [[ "$OSTYPE" == "darwin"* ]]; then sed -i '' "$@"; else sed -i "$@"; fi; }
15
+
16
+ CREDS="$HOME/.claude/.credentials.json"
17
+ OPENCLAW_JSON="$HOME/.openclaw/openclaw.json"
18
+ OPENCLAW_ENV="$HOME/.openclaw/.env"
19
+ OVERRIDE_CONF="$HOME/.config/systemd/user/openclaw-gateway.service.d/override.conf"
20
+
21
+ if [ ! -f "$CREDS" ]; then
22
+ echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] Claude credentials not found at $CREDS"
23
+ exit 1
24
+ fi
25
+
26
+ # Read current token and expiry from Claude Code credentials
27
+ ACCESS_TOKEN=$(python3 -c "import json; print(json.load(open('$CREDS'))['claudeAiOauth']['accessToken'])")
28
+ EXPIRES_AT=$(python3 -c "import json; print(json.load(open('$CREDS'))['claudeAiOauth']['expiresAt'])")
29
+
30
+ REMAINING=$(python3 -c "import time; print(f'{($EXPIRES_AT/1000 - time.time())/3600:.1f}h')")
31
+ echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] Current token: ${ACCESS_TOKEN:0:20}... (expires in $REMAINING)"
32
+
33
+ # Check what's currently in the systemd override
34
+ OLD_TOKEN=""
35
+ if [ -f "$OVERRIDE_CONF" ]; then
36
+ OLD_TOKEN=$(grep "ANTHROPIC_API_KEY=" "$OVERRIDE_CONF" 2>/dev/null | sed 's/.*ANTHROPIC_API_KEY=//' || true)
37
+ fi
38
+
39
+ if [ "$OLD_TOKEN" = "$ACCESS_TOKEN" ]; then
40
+ echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] Token already synced, no changes needed"
41
+ exit 0
42
+ fi
43
+
44
+ echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] Token mismatch detected, syncing..."
45
+ echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] Old: ${OLD_TOKEN:0:20}..."
46
+ echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] New: ${ACCESS_TOKEN:0:20}..."
47
+
48
+ # 1. Update openclaw.json
49
+ if [ -f "$OPENCLAW_JSON" ]; then
50
+ python3 -c "
51
+ import json
52
+ with open('$OPENCLAW_JSON') as f:
53
+ cfg = json.load(f)
54
+ if 'providers' in cfg.get('models', {}):
55
+ if 'anthropic' in cfg['models']['providers']:
56
+ cfg['models']['providers']['anthropic']['apiKey'] = '$ACCESS_TOKEN'
57
+ with open('$OPENCLAW_JSON', 'w') as f:
58
+ json.dump(cfg, f, indent=2)
59
+ f.write('\n')
60
+ print('[sync] Updated openclaw.json')
61
+ else:
62
+ print('[sync] No anthropic provider in openclaw.json')
63
+ else:
64
+ print('[sync] No providers section in openclaw.json')
65
+ "
66
+ fi
67
+
68
+ # 2. Update .env
69
+ if grep -q "^ANTHROPIC_API_KEY=" "$OPENCLAW_ENV" 2>/dev/null; then
70
+ _sed_i "s|^ANTHROPIC_API_KEY=.*|ANTHROPIC_API_KEY=$ACCESS_TOKEN|" "$OPENCLAW_ENV"
71
+ else
72
+ echo "ANTHROPIC_API_KEY=$ACCESS_TOKEN" >> "$OPENCLAW_ENV"
73
+ fi
74
+ echo "[sync] Updated .env"
75
+
76
+ # 3. Update systemd override
77
+ NVIDIA_KEY=$(grep "NVIDIA_API_KEY=" "$OVERRIDE_CONF" 2>/dev/null | sed 's/.*NVIDIA_API_KEY=//' || true)
78
+ cat > "$OVERRIDE_CONF" << EOF
79
+ [Unit]
80
+ StartLimitIntervalSec=60
81
+ StartLimitBurst=10
82
+
83
+ [Service]
84
+ RestartSec=10
85
+ Environment=NVIDIA_API_KEY=${NVIDIA_KEY}
86
+ Environment=ANTHROPIC_API_KEY=${ACCESS_TOKEN}
87
+ EOF
88
+ echo "[sync] Updated systemd override"
89
+
90
+ # 4. Reload and restart gateway
91
+ systemctl --user daemon-reload
92
+ systemctl --user restart openclaw-gateway
93
+
94
+ echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] Gateway restarted with synced token (expires in $REMAINING)"
@@ -0,0 +1,117 @@
1
+ #!/usr/bin/env bash
2
+ # Watch Claude Code credentials file and sync token to OpenClaw immediately on change.
3
+ #
4
+ # Replaces the 2-hour timer approach which missed token refreshes.
5
+ # Claude Code auto-refreshes its OAuth token and writes to .credentials.json.
6
+ # This script detects the write and syncs within seconds.
7
+ #
8
+ # Requires: inotifywait (from inotify-tools package)
9
+ # Install: sudo apt install inotify-tools
10
+ #
11
+ # Run as systemd user service (not timer).
12
+
13
+ set -euo pipefail
14
+
15
+ CREDS="$HOME/.claude/.credentials.json"
16
+ OPENCLAW_JSON="$HOME/.openclaw/openclaw.json"
17
+ OPENCLAW_ENV="$HOME/.openclaw/.env"
18
+ OVERRIDE_CONF="$HOME/.config/systemd/user/openclaw-gateway.service.d/override.conf"
19
+ LOG_TAG="anthropic-token-watch"
20
+
21
+ log() { echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] [$LOG_TAG] $*"; }
22
+
23
+ sync_token() {
24
+ if [ ! -f "$CREDS" ]; then
25
+ log "ERROR: Claude credentials not found at $CREDS"
26
+ return 1
27
+ fi
28
+
29
+ # Read current token from Claude Code
30
+ local new_token
31
+ new_token=$(python3 -c "import json; print(json.load(open('$CREDS'))['claudeAiOauth']['accessToken'])" 2>/dev/null) || {
32
+ log "ERROR: Failed to read token from credentials"
33
+ return 1
34
+ }
35
+
36
+ local expires_in
37
+ expires_in=$(python3 -c "import json,time; print(f'{(json.load(open(\"$CREDS\"))[\"claudeAiOauth\"][\"expiresAt\"]/1000 - time.time())/3600:.1f}h')" 2>/dev/null || echo "unknown")
38
+
39
+ # Read current token from OpenClaw
40
+ local current_token
41
+ current_token=$(python3 -c "import json; print(json.load(open('$OPENCLAW_JSON'))['models']['providers']['anthropic']['apiKey'])" 2>/dev/null || echo "")
42
+
43
+ if [ "$new_token" = "$current_token" ]; then
44
+ log "Token unchanged (expires in $expires_in)"
45
+ return 0
46
+ fi
47
+
48
+ log "Token changed! Syncing... (new token expires in $expires_in)"
49
+
50
+ # 1. Update openclaw.json
51
+ python3 << PYEOF
52
+ import json
53
+ with open('$OPENCLAW_JSON') as f:
54
+ cfg = json.load(f)
55
+ if 'anthropic' in cfg.get('models', {}).get('providers', {}):
56
+ cfg['models']['providers']['anthropic']['apiKey'] = '$new_token'
57
+ with open('$OPENCLAW_JSON', 'w') as f:
58
+ json.dump(cfg, f, indent=2)
59
+ f.write('\n')
60
+ PYEOF
61
+ log "Updated openclaw.json"
62
+
63
+ # 2. Update .env
64
+ if grep -q "^ANTHROPIC_API_KEY=" "$OPENCLAW_ENV" 2>/dev/null; then
65
+ sed -i "s|^ANTHROPIC_API_KEY=.*|ANTHROPIC_API_KEY=$new_token|" "$OPENCLAW_ENV"
66
+ else
67
+ echo "ANTHROPIC_API_KEY=$new_token" >> "$OPENCLAW_ENV"
68
+ fi
69
+ log "Updated .env"
70
+
71
+ # 3. Update systemd override
72
+ if [ -f "$OVERRIDE_CONF" ]; then
73
+ local nvidia_key
74
+ nvidia_key=$(grep "NVIDIA_API_KEY=" "$OVERRIDE_CONF" 2>/dev/null | sed 's/.*NVIDIA_API_KEY=//' || true)
75
+ cat > "$OVERRIDE_CONF" << EOF
76
+ [Unit]
77
+ StartLimitIntervalSec=60
78
+ StartLimitBurst=10
79
+
80
+ [Service]
81
+ RestartSec=10
82
+ Environment=NVIDIA_API_KEY=${nvidia_key}
83
+ Environment=ANTHROPIC_API_KEY=${new_token}
84
+ EOF
85
+ log "Updated systemd override"
86
+ fi
87
+
88
+ # 4. Reload and restart gateway
89
+ systemctl --user daemon-reload 2>/dev/null || true
90
+ systemctl --user restart openclaw-gateway 2>/dev/null && log "Gateway restarted" || log "WARN: Gateway restart failed (may not be running as systemd service)"
91
+
92
+ # 5. Sync credentials to GPU box (.100) for skvoice service
93
+ scp -q "$CREDS" cbrd21@192.168.0.100:~/.claude/.credentials.json 2>/dev/null && log "Synced credentials to .100" || log "WARN: Failed to sync credentials to .100"
94
+
95
+ log "Sync complete. Token expires in $expires_in"
96
+ }
97
+
98
+ # Initial sync on startup
99
+ log "Starting token watcher..."
100
+ sync_token
101
+
102
+ # Watch for changes to credentials file
103
+ log "Watching $CREDS for changes..."
104
+ while true; do
105
+ # inotifywait blocks until the file is modified, then we sync
106
+ inotifywait -q -e modify -e close_write -e moved_to "$(dirname "$CREDS")" --include "$(basename "$CREDS")" 2>/dev/null || {
107
+ # If inotifywait isn't available, fall back to polling every 30 seconds
108
+ log "WARN: inotifywait not available, falling back to 30s polling"
109
+ while true; do
110
+ sleep 30
111
+ sync_token
112
+ done
113
+ }
114
+ # Small delay to let Claude Code finish writing
115
+ sleep 2
116
+ sync_token
117
+ done
@@ -11,7 +11,7 @@ import os
11
11
  import platform
12
12
  from pathlib import Path
13
13
 
14
- __version__ = "0.4.7"
14
+ __version__ = "0.5.0"
15
15
  __author__ = "smilinTux"
16
16
 
17
17
 
@@ -3212,8 +3212,8 @@ def dashboard(home: str, port: int, no_open: bool):
3212
3212
  import webbrowser
3213
3213
  try:
3214
3214
  webbrowser.open(url)
3215
- except Exception:
3216
- pass
3215
+ except Exception as exc:
3216
+ logger.warning("Failed to open browser for dashboard: %s", exc)
3217
3217
 
3218
3218
  server = start_dashboard(home_path, port=port)
3219
3219
  try:
@@ -5353,8 +5353,8 @@ def agents_spawn(
5353
5353
  prov_backend = DockerProvider()
5354
5354
  elif prov_type == ProviderType.PROXMOX:
5355
5355
  prov_backend = ProxmoxProvider()
5356
- except Exception:
5357
- pass
5356
+ except Exception as exc:
5357
+ logger.warning("Failed to initialize provider backend for %s: %s", provider, exc)
5358
5358
 
5359
5359
  # Auto-classify for display
5360
5360
  detected_role, detected_model = classify_task(task)
@@ -828,23 +828,23 @@ async def get_dashboard(
828
828
  "consciousness": m.consciousness.value if hasattr(m, "consciousness") else "",
829
829
  "version": m.version,
830
830
  }
831
- except Exception:
832
- pass
831
+ except Exception as exc:
832
+ logger.warning("Failed to read agent identity from runtime manifest: %s", exc)
833
833
  if not agent:
834
834
  try:
835
835
  identity_path = config.home / "identity" / "identity.json"
836
836
  if identity_path.exists():
837
837
  agent = json.loads(identity_path.read_text(encoding="utf-8"))
838
- except Exception:
839
- pass
838
+ except Exception as exc:
839
+ logger.warning("Failed to read identity.json for API status: %s", exc)
840
840
 
841
841
  # Consciousness stats
842
842
  c_stats: Dict[str, Any] = {}
843
843
  if consciousness:
844
844
  try:
845
845
  c_stats = dict(consciousness.stats)
846
- except Exception:
847
- pass
846
+ except Exception as exc:
847
+ logger.warning("Failed to read consciousness stats: %s", exc)
848
848
 
849
849
  # Recent conversations
850
850
  conversations: List[Dict[str, Any]] = []
@@ -862,8 +862,8 @@ async def get_dashboard(
862
862
  "last": last.get("timestamp"),
863
863
  "preview": preview,
864
864
  })
865
- except Exception:
866
- pass
865
+ except Exception as exc:
866
+ logger.warning("Failed to list recent conversations for API status: %s", exc)
867
867
 
868
868
  daemon_summary = DaemonSummary(
869
869
  running=snap.get("running", True),
@@ -924,8 +924,8 @@ async def get_capstone(
924
924
  m = runtime.manifest
925
925
  agent = {"name": m.identity.name, "fingerprint": m.identity.fingerprint or ""}
926
926
  pillars = {k: v.value for k, v in m.pillar_summary.items()}
927
- except Exception:
928
- pass
927
+ except Exception as exc:
928
+ logger.warning("Failed to read agent pillars from runtime manifest: %s", exc)
929
929
 
930
930
  # Memory stats
931
931
  memory = MemorySummary()
@@ -940,8 +940,8 @@ async def get_capstone(
940
940
  long_term=ms.long_term,
941
941
  status=ms.status.value,
942
942
  )
943
- except Exception:
944
- pass
943
+ except Exception as exc:
944
+ logger.warning("Failed to collect memory stats for capstone API: %s", exc)
945
945
 
946
946
  # Coordination board
947
947
  board: Dict[str, Any] = {"summary": {}, "active": []}
@@ -970,16 +970,16 @@ async def get_capstone(
970
970
  if v.status.value in ("in_progress", "claimed")
971
971
  ],
972
972
  }
973
- except Exception:
974
- pass
973
+ except Exception as exc:
974
+ logger.warning("Failed to collect coordination board data for capstone API: %s", exc)
975
975
 
976
976
  # Consciousness stats
977
977
  c_stats: Dict[str, Any] = {}
978
978
  if consciousness:
979
979
  try:
980
980
  c_stats = dict(consciousness.stats)
981
- except Exception:
982
- pass
981
+ except Exception as exc:
982
+ logger.warning("Failed to read consciousness stats for capstone API: %s", exc)
983
983
 
984
984
  return CapstoneResponse(
985
985
  agent=agent,
@@ -1027,8 +1027,8 @@ async def get_activity_stream(
1027
1027
  try:
1028
1028
  for chunk in _activity.get_history_encoded():
1029
1029
  yield chunk
1030
- except Exception:
1031
- pass
1030
+ except Exception as exc:
1031
+ logger.warning("Failed to replay activity stream history: %s", exc)
1032
1032
  # Stream live events; yield keep-alive comments on timeout
1033
1033
  try:
1034
1034
  while True:
@@ -1100,8 +1100,8 @@ async def list_household_agents(
1100
1100
  if identity_path.exists():
1101
1101
  try:
1102
1102
  entry["identity"] = json.loads(identity_path.read_text(encoding="utf-8"))
1103
- except Exception:
1104
- pass
1103
+ except Exception as exc:
1104
+ logger.warning("Failed to read identity for agent %s: %s", agent_name, exc)
1105
1105
 
1106
1106
  hb: Optional[Dict[str, Any]] = None
1107
1107
  hb_path = heartbeats_dir / f"{agent_name.lower()}.json"
@@ -1112,7 +1112,8 @@ async def list_household_agents(
1112
1112
  hb["alive"] = alive
1113
1113
  entry["heartbeat"] = hb
1114
1114
  entry["status"] = hb.get("status", "unknown") if alive else "stale"
1115
- except Exception:
1115
+ except Exception as exc:
1116
+ logger.warning("Failed to read heartbeat for agent %s: %s", agent_name, exc)
1116
1117
  entry["status"] = "unknown"
1117
1118
  else:
1118
1119
  entry["status"] = "no_heartbeat"
@@ -1174,8 +1175,8 @@ async def get_household_agent(
1174
1175
  if identity_path.exists():
1175
1176
  try:
1176
1177
  entry["identity"] = json.loads(identity_path.read_text(encoding="utf-8"))
1177
- except Exception:
1178
- pass
1178
+ except Exception as exc:
1179
+ logger.warning("Failed to read identity for agent %s: %s", name, exc)
1179
1180
 
1180
1181
  hb_path = config.shared_root / "heartbeats" / f"{name.lower()}.json"
1181
1182
  if hb_path.exists():
@@ -1185,8 +1186,8 @@ async def get_household_agent(
1185
1186
  hb["alive"] = alive
1186
1187
  entry["heartbeat"] = hb
1187
1188
  entry["status"] = hb.get("status", "unknown") if alive else "stale"
1188
- except Exception:
1189
- pass
1189
+ except Exception as exc:
1190
+ logger.warning("Failed to read heartbeat for agent %s: %s", name, exc)
1190
1191
 
1191
1192
  # Memory count
1192
1193
  memory_dir = agent_dir / "memory"
@@ -1249,8 +1250,8 @@ async def list_conversations(
1249
1250
  last_message_preview=(last_content or "")[:120],
1250
1251
  )
1251
1252
  )
1252
- except Exception:
1253
- pass
1253
+ except Exception as exc:
1254
+ logger.warning("Failed to read conversation file %s: %s", cf, exc)
1254
1255
  return ConversationsResponse(conversations=conversations)
1255
1256
 
1256
1257
 
@@ -1534,8 +1535,8 @@ def _load_first_argocd_doc(path: Path) -> Optional[dict]:
1534
1535
  "sync_policy": spec.get("syncPolicy", {}),
1535
1536
  "manifest_file": path.name,
1536
1537
  }
1537
- except Exception:
1538
- pass
1538
+ except Exception as exc:
1539
+ logger.warning("Failed to parse ArgoCD manifest %s: %s", path, exc)
1539
1540
  return None
1540
1541
 
1541
1542
 
@@ -1618,8 +1619,8 @@ def _get_argocd_status() -> dict:
1618
1619
  "last_synced": (item_status.get("operationState") or {}).get("finishedAt"),
1619
1620
  }
1620
1621
  source = "yaml+kubectl"
1621
- except Exception:
1622
- pass
1622
+ except Exception as exc:
1623
+ logger.warning("kubectl ArgoCD status query failed (using yaml only): %s", exc)
1623
1624
 
1624
1625
  # ── Merge and build output ───────────────────────────────────────────────
1625
1626
  apps = []
@@ -1767,8 +1768,8 @@ async def websocket_logs(
1767
1768
  tail_lines = list(deque(fh, maxlen=50))
1768
1769
  for line in tail_lines:
1769
1770
  await websocket.send_json({"type": "line", "line": line.rstrip("\n")})
1770
- except Exception:
1771
- pass
1771
+ except Exception as exc:
1772
+ logger.warning("Failed to replay log tail history over websocket: %s", exc)
1772
1773
 
1773
1774
  # Tail the log file and stream new lines
1774
1775
  try:
@@ -1783,8 +1784,8 @@ async def websocket_logs(
1783
1784
  for ln in chunk.splitlines():
1784
1785
  await websocket.send_json({"type": "line", "line": ln})
1785
1786
  offset = fh.tell()
1786
- except Exception:
1787
- pass
1787
+ except Exception as exc:
1788
+ logger.warning("Log tail websocket read error: %s", exc)
1788
1789
  await asyncio.sleep(0.5)
1789
1790
  except WebSocketDisconnect:
1790
1791
  pass
@@ -107,8 +107,8 @@ def _collect_local_bid(task_id: str, agent_name: str, shared_root: Path) -> Auct
107
107
  agent_file = board.load_agent(agent_name)
108
108
  if agent_file:
109
109
  claimed_count = len(agent_file.claimed_tasks)
110
- except Exception:
111
- pass
110
+ except Exception as exc:
111
+ logger.warning("Failed to read claimed task count for agent %s: %s", agent_name, exc)
112
112
 
113
113
  return AuctionBid(
114
114
  task_id=task_id,
@@ -283,8 +283,8 @@ class AuctionManager:
283
283
  ttl_seconds=3600,
284
284
  tags=["auction", "no_bidders"],
285
285
  )
286
- except Exception:
287
- pass
286
+ except Exception as exc:
287
+ logger.warning("Failed to publish auction_no_bidders event for %s: %s", task_id, exc)
288
288
  return None
289
289
 
290
290
  winner_bid = min(record.bids, key=_load_score)
@@ -327,8 +327,8 @@ class AuctionManager:
327
327
  ttl_seconds=3600,
328
328
  tags=["auction", "resolved"],
329
329
  )
330
- except Exception:
331
- pass
330
+ except Exception as exc:
331
+ logger.warning("Failed to publish auction_resolved event for %s: %s", task_id, exc)
332
332
 
333
333
  # Notify activity stream
334
334
  try:
@@ -338,8 +338,8 @@ class AuctionManager:
338
338
  "task.auction_resolved",
339
339
  {"task_id": task_id, "winner": winner, "bids": len(record.bids)},
340
340
  )
341
- except Exception:
342
- pass
341
+ except Exception as exc:
342
+ logger.warning("Failed to push auction_resolved activity for %s: %s", task_id, exc)
343
343
 
344
344
  return winner
345
345
 
@@ -183,8 +183,8 @@ class BlueprintRegistryClient:
183
183
  error_body = ""
184
184
  try:
185
185
  error_body = exc.read().decode("utf-8", errors="replace")
186
- except Exception:
187
- pass
186
+ except Exception as read_exc:
187
+ logger.debug("Failed to read error body from registry HTTP error: %s", read_exc)
188
188
  msg = f"Registry API error {exc.code} for {method} {path}"
189
189
  if error_body:
190
190
  msg += f": {error_body[:500]}"