@hitechclaw/clawspark 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/CHANGELOG.md +35 -0
  2. package/LICENSE +21 -0
  3. package/README.md +378 -0
  4. package/clawspark +2715 -0
  5. package/configs/models.yaml +108 -0
  6. package/configs/skill-packs.yaml +44 -0
  7. package/configs/skills.yaml +37 -0
  8. package/install.sh +387 -0
  9. package/lib/common.sh +249 -0
  10. package/lib/detect-hardware.sh +156 -0
  11. package/lib/diagnose.sh +636 -0
  12. package/lib/render-diagram.sh +47 -0
  13. package/lib/sandbox-commands.sh +415 -0
  14. package/lib/secure.sh +244 -0
  15. package/lib/select-model.sh +442 -0
  16. package/lib/setup-browser.sh +138 -0
  17. package/lib/setup-dashboard.sh +228 -0
  18. package/lib/setup-inference.sh +128 -0
  19. package/lib/setup-mcp.sh +142 -0
  20. package/lib/setup-messaging.sh +242 -0
  21. package/lib/setup-models.sh +121 -0
  22. package/lib/setup-openclaw.sh +808 -0
  23. package/lib/setup-sandbox.sh +188 -0
  24. package/lib/setup-skills.sh +113 -0
  25. package/lib/setup-systemd.sh +224 -0
  26. package/lib/setup-tailscale.sh +188 -0
  27. package/lib/setup-voice.sh +101 -0
  28. package/lib/skill-audit.sh +449 -0
  29. package/lib/verify.sh +177 -0
  30. package/package.json +57 -0
  31. package/scripts/release.sh +133 -0
  32. package/uninstall.sh +161 -0
  33. package/v2/README.md +50 -0
  34. package/v2/configs/providers.yaml +79 -0
  35. package/v2/configs/skills.yaml +36 -0
  36. package/v2/install.sh +116 -0
  37. package/v2/lib/common.sh +285 -0
  38. package/v2/lib/detect-hardware.sh +119 -0
  39. package/v2/lib/select-runtime.sh +273 -0
  40. package/v2/lib/setup-extras.sh +95 -0
  41. package/v2/lib/setup-openclaw.sh +187 -0
  42. package/v2/lib/setup-provider.sh +131 -0
  43. package/v2/lib/verify.sh +133 -0
  44. package/web/index.html +1835 -0
  45. package/web/install.sh +387 -0
  46. package/web/logo-hero.svg +11 -0
  47. package/web/logo-icon.svg +12 -0
  48. package/web/logo.svg +17 -0
  49. package/web/vercel.json +8 -0
@@ -0,0 +1,228 @@
1
+ #!/usr/bin/env bash
2
+ # lib/setup-dashboard.sh -- Installs ClawMetry observability dashboard for OpenClaw.
3
+ # Provides metrics, logs, and health monitoring via a local web UI.
4
+ set -euo pipefail
5
+
6
+ setup_dashboard() {
7
+ log_info "Setting up ClawMetry observability dashboard..."
8
+ hr
9
+
10
+ # ── Check dependencies ───────────────────────────────────────────────────
11
+ if ! check_command python3; then
12
+ log_warn "python3 not found -- skipping dashboard."
13
+ return 0
14
+ fi
15
+
16
+ # ── Install ClawMetry ────────────────────────────────────────────────────
17
+ local install_ok=false
18
+
19
+ log_info "Installing ClawMetry via pip..."
20
+ if _pip_install clawmetry; then
21
+ install_ok=true
22
+ log_success "ClawMetry installed via pip."
23
+ else
24
+ log_warn "pip install clawmetry failed -- falling back to git clone."
25
+
26
+ # ── Fallback: clone from GitHub ──────────────────────────────
27
+ local clone_dir="${CLAWSPARK_DIR}/clawmetry"
28
+ if [[ -d "${clone_dir}" ]]; then
29
+ log_info "Existing clone found at ${clone_dir} -- pulling latest..."
30
+ (cd "${clone_dir}" && git pull) >> "${CLAWSPARK_LOG}" 2>&1 || true
31
+ else
32
+ log_info "Cloning ClawMetry from GitHub..."
33
+ (git clone https://github.com/vivekchand/clawmetry.git "${clone_dir}") \
34
+ >> "${CLAWSPARK_LOG}" 2>&1 &
35
+ spinner $! "Cloning clawmetry..."
36
+ fi
37
+
38
+ if [[ -d "${clone_dir}" ]]; then
39
+ log_info "Installing Flask dependency..."
40
+ if _pip_install flask; then
41
+ install_ok=true
42
+ log_success "ClawMetry installed from source."
43
+ else
44
+ log_warn "Flask install failed -- dashboard may not work."
45
+ fi
46
+ else
47
+ log_warn "Failed to clone ClawMetry -- skipping dashboard."
48
+ return 0
49
+ fi
50
+ fi
51
+
52
+ if [[ "${install_ok}" != "true" ]]; then
53
+ log_warn "ClawMetry installation incomplete -- skipping dashboard."
54
+ return 0
55
+ fi
56
+
57
+ # ── Configure ClawMetry workspace ────────────────────────────────────────
58
+ local openclaw_dir="${HOME}/.openclaw"
59
+ local clawmetry_config_dir="${CLAWSPARK_DIR}/clawmetry-config"
60
+ mkdir -p "${clawmetry_config_dir}"
61
+
62
+ cat > "${clawmetry_config_dir}/config.json" <<CMEOF
63
+ {
64
+ "workspace": "${openclaw_dir}",
65
+ "host": "127.0.0.1",
66
+ "port": 8900,
67
+ "log_file": "${CLAWSPARK_DIR}/dashboard.log"
68
+ }
69
+ CMEOF
70
+ log_info "ClawMetry configured to use OpenClaw workspace at ${openclaw_dir}"
71
+
72
+ # ── Start ClawMetry as a background service ──────────────────────────────
73
+ _start_dashboard
74
+
75
+ # ── Verify dashboard is accessible ───────────────────────────────────────
76
+ local retries=5
77
+ local dashboard_up=false
78
+ while (( retries > 0 )); do
79
+ if curl -sf --max-time 2 http://127.0.0.1:8900 &>/dev/null; then
80
+ dashboard_up=true
81
+ break
82
+ fi
83
+ sleep 1
84
+ retries=$(( retries - 1 ))
85
+ done
86
+
87
+ if [[ "${dashboard_up}" == "true" ]]; then
88
+ log_success "ClawMetry dashboard is running at http://127.0.0.1:8900"
89
+ else
90
+ log_warn "ClawMetry dashboard did not respond -- it may still be starting."
91
+ log_info "Check logs at ${CLAWSPARK_DIR}/dashboard.log"
92
+ fi
93
+
94
+ # ── Print dashboard URLs ─────────────────────────────────────────────────
95
+ printf '\n'
96
+ print_box \
97
+ "${BOLD}Dashboard URLs${RESET}" \
98
+ "" \
99
+ "ClawMetry (observability): http://127.0.0.1:8900" \
100
+ "OpenClaw Control UI: http://127.0.0.1:18789/__openclaw__/canvas/" \
101
+ "" \
102
+ "The Control UI is built into the OpenClaw gateway" \
103
+ "and requires no additional setup."
104
+ printf '\n'
105
+
106
+ log_success "Dashboard setup complete."
107
+ }
108
+
109
+ # ── Internal helpers ─────────────────────────────────────────────────────────
110
+
111
+ # pip install that handles PEP 668 (externally managed Python on Ubuntu 23.04+)
112
+ _pip_install() {
113
+ local pkg="$1"
114
+ # Try normal pip first
115
+ if pip3 install "${pkg}" >> "${CLAWSPARK_LOG}" 2>&1; then
116
+ return 0
117
+ fi
118
+ # Try --user
119
+ if pip3 install --user "${pkg}" >> "${CLAWSPARK_LOG}" 2>&1; then
120
+ return 0
121
+ fi
122
+ # Try --break-system-packages (PEP 668 workaround for managed Python)
123
+ if pip3 install --break-system-packages "${pkg}" >> "${CLAWSPARK_LOG}" 2>&1; then
124
+ return 0
125
+ fi
126
+ # Try python3 -m pip as last resort
127
+ if python3 -m pip install --break-system-packages "${pkg}" >> "${CLAWSPARK_LOG}" 2>&1; then
128
+ return 0
129
+ fi
130
+ return 1
131
+ }
132
+
133
+ _start_dashboard() {
134
+ local dashboard_log="${CLAWSPARK_DIR}/dashboard.log"
135
+ local dashboard_pid_file="${CLAWSPARK_DIR}/dashboard.pid"
136
+
137
+ # Kill existing dashboard if running
138
+ if [[ -f "${dashboard_pid_file}" ]]; then
139
+ local old_pid
140
+ old_pid=$(cat "${dashboard_pid_file}")
141
+ if kill -0 "${old_pid}" 2>/dev/null; then
142
+ log_info "Stopping existing dashboard (PID ${old_pid})..."
143
+ kill "${old_pid}" 2>/dev/null || true
144
+ sleep 1
145
+ fi
146
+ fi
147
+
148
+ log_info "Starting ClawMetry dashboard..."
149
+
150
+ # Try multiple launch methods in order of preference.
151
+ # The pip --user install puts the CLI in ~/.local/bin (or ~/Library/Python/*/bin
152
+ # on macOS) which may not be on PATH.
153
+ local -a cli_paths=(
154
+ "$(command -v clawmetry 2>/dev/null || true)"
155
+ "${HOME}/.local/bin/clawmetry"
156
+ )
157
+ # macOS: pip --user installs to ~/Library/Python/X.Y/bin/
158
+ local _pyver
159
+ _pyver=$(python3 -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")' 2>/dev/null || echo "")
160
+ if [[ -n "${_pyver}" ]]; then
161
+ cli_paths+=("${HOME}/Library/Python/${_pyver}/bin/clawmetry")
162
+ fi
163
+
164
+ local dash_pid=""
165
+ local started=false
166
+
167
+ # Method 1: CLI entry point
168
+ for _cp in "${cli_paths[@]}"; do
169
+ [[ -z "${_cp}" ]] && continue
170
+ [[ -x "${_cp}" ]] || continue
171
+ nohup "${_cp}" --port 8900 --host 127.0.0.1 > "${dashboard_log}" 2>&1 &
172
+ dash_pid=$!
173
+ echo "${dash_pid}" > "${dashboard_pid_file}"
174
+ sleep 2
175
+ if kill -0 "${dash_pid}" 2>/dev/null; then
176
+ log_success "ClawMetry running (PID ${dash_pid}). Logs: ${dashboard_log}"
177
+ started=true
178
+ break
179
+ fi
180
+ done
181
+
182
+ # Method 2: python3 -m clawmetry
183
+ if [[ "${started}" != "true" ]]; then
184
+ log_info "Trying python3 -m clawmetry..."
185
+ nohup python3 -m clawmetry --port 8900 --host 127.0.0.1 > "${dashboard_log}" 2>&1 &
186
+ dash_pid=$!
187
+ echo "${dash_pid}" > "${dashboard_pid_file}"
188
+ sleep 2
189
+ if kill -0 "${dash_pid}" 2>/dev/null; then
190
+ log_success "ClawMetry running (PID ${dash_pid}). Logs: ${dashboard_log}"
191
+ started=true
192
+ fi
193
+ fi
194
+
195
+ # Method 3: try various Flask app import paths
196
+ if [[ "${started}" != "true" ]]; then
197
+ log_info "Trying alternative launch method..."
198
+ nohup python3 -c "
199
+ import sys
200
+ # Try multiple known entry points
201
+ for factory in ['create_app', 'app', 'make_app']:
202
+ try:
203
+ mod = __import__('clawmetry')
204
+ fn = getattr(mod, factory, None)
205
+ if callable(fn):
206
+ app = fn() if factory != 'app' else fn
207
+ try:
208
+ from waitress import serve
209
+ serve(app, host='127.0.0.1', port=8900)
210
+ except ImportError:
211
+ app.run(host='127.0.0.1', port=8900)
212
+ sys.exit(0)
213
+ except Exception:
214
+ continue
215
+ print('No working entry point found in clawmetry', file=sys.stderr)
216
+ sys.exit(1)
217
+ " > "${dashboard_log}" 2>&1 &
218
+ dash_pid=$!
219
+ echo "${dash_pid}" > "${dashboard_pid_file}"
220
+ sleep 2
221
+ if kill -0 "${dash_pid}" 2>/dev/null; then
222
+ log_success "ClawMetry running via fallback (PID ${dash_pid})."
223
+ started=true
224
+ else
225
+ log_warn "All ClawMetry launch methods failed. Check ${dashboard_log}."
226
+ fi
227
+ fi
228
+ }
@@ -0,0 +1,128 @@
1
+ #!/usr/bin/env bash
2
+ # lib/setup-inference.sh — Installs Ollama, pulls the chosen model, and
3
+ # waits for the inference API to become ready.
4
+ # Exports: INFERENCE_API_URL
5
+ set -euo pipefail
6
+
7
+ setup_inference() {
8
+ log_info "Setting up inference engine (Ollama)..."
9
+ hr
10
+
11
+ # ── Jetson: set CUDA library path so Ollama can find GPU ─────────────────
12
+ if [[ "${HW_PLATFORM:-}" == "jetson" ]] && [[ -d "/usr/local/cuda/lib64" ]]; then
13
+ if [[ "${LD_LIBRARY_PATH:-}" != */usr/local/cuda/lib64* ]]; then
14
+ export LD_LIBRARY_PATH="/usr/local/cuda/lib64:/usr/lib/aarch64-linux-gnu/tegra:${LD_LIBRARY_PATH:-}"
15
+ log_info "Set LD_LIBRARY_PATH for Jetson CUDA: ${LD_LIBRARY_PATH}"
16
+ fi
17
+ fi
18
+
19
+ # ── Install Ollama if missing ───────────────────────────────────────────
20
+ if ! check_command ollama; then
21
+ log_info "Ollama not found — installing..."
22
+ if [[ "$(uname)" == "Darwin" ]]; then
23
+ # macOS: check for Ollama.app first, then try Homebrew
24
+ if [[ -d "/Applications/Ollama.app" ]]; then
25
+ log_info "Ollama.app found but not on PATH. Adding..."
26
+ export PATH="/Applications/Ollama.app/Contents/Resources:${PATH}"
27
+ elif check_command brew; then
28
+ log_info "Installing Ollama via Homebrew..."
29
+ (brew install ollama) >> "${CLAWSPARK_LOG}" 2>&1 &
30
+ spinner $! "Installing Ollama..."
31
+ else
32
+ log_info "Installing Ollama via install script..."
33
+ curl -fsSL https://ollama.com/install.sh | sh >> "${CLAWSPARK_LOG}" 2>&1 || {
34
+ log_warn "Ollama install script returned an error."
35
+ }
36
+ fi
37
+ else
38
+ # Linux: use the official install script
39
+ log_info "Installing via official script (this needs sudo)..."
40
+ if curl -fsSL https://ollama.com/install.sh | sh >> "${CLAWSPARK_LOG}" 2>&1; then
41
+ log_success "Ollama installed."
42
+ else
43
+ log_warn "Ollama install script returned an error."
44
+ fi
45
+ fi
46
+ # Refresh PATH and check
47
+ hash -r 2>/dev/null || true
48
+ if ! check_command ollama; then
49
+ log_error "Ollama installation failed. Check ${CLAWSPARK_LOG} for details."
50
+ log_info "Install manually: https://ollama.com/download"
51
+ return 1
52
+ fi
53
+ else
54
+ log_success "Ollama is already installed."
55
+ fi
56
+
57
+ # ── Start Ollama service ────────────────────────────────────────────────
58
+ if ! _ollama_is_running; then
59
+ log_info "Starting Ollama service..."
60
+ if [[ "$(uname)" == "Darwin" ]] && [[ -d "/Applications/Ollama.app" ]]; then
61
+ # macOS: launch the Ollama app (it runs as a menubar service)
62
+ open -a Ollama 2>/dev/null || true
63
+ log_info "Launched Ollama.app."
64
+ elif check_command systemctl && systemctl is-enabled ollama &>/dev/null; then
65
+ sudo systemctl start ollama >> "${CLAWSPARK_LOG}" 2>&1 || true
66
+ elif check_command snap && snap list ollama &>/dev/null 2>&1; then
67
+ # DGX Spark / some Linux: Ollama installed as snap
68
+ log_info "Ollama is a snap package -- starting via snap."
69
+ sudo snap start ollama 2>> "${CLAWSPARK_LOG}" || true
70
+ else
71
+ # Start as a background process
72
+ nohup ollama serve >> "${CLAWSPARK_DIR}/ollama.log" 2>&1 &
73
+ local serve_pid=$!
74
+ echo "${serve_pid}" > "${CLAWSPARK_DIR}/ollama.pid"
75
+ log_info "Ollama serve started (PID ${serve_pid})."
76
+ fi
77
+
78
+ # Wait for the service to be ready
79
+ _wait_for_ollama 30
80
+ else
81
+ log_success "Ollama is already running."
82
+ fi
83
+
84
+ # ── Pull the selected model ─────────────────────────────────────────────
85
+ log_info "Pulling model: ${SELECTED_MODEL_ID} (this may take a while)..."
86
+ if ollama list 2>/dev/null | grep -qF "${SELECTED_MODEL_ID}"; then
87
+ log_success "Model ${SELECTED_MODEL_ID} is already available locally."
88
+ else
89
+ if ! ollama pull "${SELECTED_MODEL_ID}" 2>&1 | tee -a "${CLAWSPARK_LOG}"; then
90
+ log_error "Failed to pull model ${SELECTED_MODEL_ID}."
91
+ return 1
92
+ fi
93
+ log_success "Model ${SELECTED_MODEL_ID} downloaded."
94
+ fi
95
+
96
+ # ── Verify model is listed ──────────────────────────────────────────────
97
+ if ! ollama list 2>/dev/null | grep -qF "${SELECTED_MODEL_ID}"; then
98
+ log_error "Model ${SELECTED_MODEL_ID} not found in ollama list after pull."
99
+ return 1
100
+ fi
101
+
102
+ # ── Set API URL ─────────────────────────────────────────────────────────
103
+ INFERENCE_API_URL="http://127.0.0.1:11434/v1"
104
+ export INFERENCE_API_URL
105
+
106
+ log_success "Inference engine ready at ${INFERENCE_API_URL}"
107
+ }
108
+
109
+ # ── Internal helpers ────────────────────────────────────────────────────────
110
+
111
+ _ollama_is_running() {
112
+ curl -sf http://127.0.0.1:11434/ &>/dev/null
113
+ }
114
+
115
+ _wait_for_ollama() {
116
+ local max_attempts="${1:-30}"
117
+ local attempt=0
118
+ while (( attempt < max_attempts )); do
119
+ if _ollama_is_running; then
120
+ log_success "Ollama API is responsive."
121
+ return 0
122
+ fi
123
+ attempt=$(( attempt + 1 ))
124
+ sleep 1
125
+ done
126
+ log_error "Ollama did not become ready after ${max_attempts}s."
127
+ return 1
128
+ }
@@ -0,0 +1,142 @@
1
+ #!/usr/bin/env bash
2
+ # lib/setup-mcp.sh -- Installs and configures MCP (Model Context Protocol) servers
3
+ # via mcporter. Gives the agent real capabilities: diagrams, memory, code execution.
4
+ set -euo pipefail
5
+
6
+ setup_mcp() {
7
+ log_info "Setting up MCP servers (diagrams, memory, code execution)..."
8
+ hr
9
+
10
+ # ── Install mcporter (the MCP bridge for OpenClaw) ────────────────────────
11
+ if ! check_command mcporter; then
12
+ log_info "Installing mcporter (MCP bridge)..."
13
+ if npm install -g mcporter@latest >> "${CLAWSPARK_LOG}" 2>&1; then
14
+ log_success "mcporter installed."
15
+ else
16
+ # Try with sudo
17
+ sudo npm install -g mcporter@latest >> "${CLAWSPARK_LOG}" 2>&1 || {
18
+ log_warn "mcporter installation failed. MCP servers will not be available."
19
+ log_info "Install manually: npm install -g mcporter"
20
+ return 0
21
+ }
22
+ log_success "mcporter installed (via sudo)."
23
+ fi
24
+ else
25
+ log_success "mcporter already installed."
26
+ fi
27
+
28
+ # ── Pre-install MCP server packages globally (avoids npx download on each call) ──
29
+ log_info "Installing MCP server packages..."
30
+ local -a mcp_packages=(
31
+ "mermaid-mcp-server"
32
+ "@modelcontextprotocol/server-memory"
33
+ "@modelcontextprotocol/server-filesystem"
34
+ "@modelcontextprotocol/server-sequential-thinking"
35
+ )
36
+
37
+ for pkg in "${mcp_packages[@]}"; do
38
+ local short_name="${pkg##*/}"
39
+ printf ' %s->%s Installing %s%s%s ... ' "${CYAN}" "${RESET}" "${BOLD}" "${short_name}" "${RESET}"
40
+ if npm install -g "${pkg}" >> "${CLAWSPARK_LOG}" 2>&1; then
41
+ printf '%s✓%s\n' "${GREEN}" "${RESET}"
42
+ elif sudo npm install -g "${pkg}" >> "${CLAWSPARK_LOG}" 2>&1; then
43
+ printf '%s✓%s\n' "${GREEN}" "${RESET}"
44
+ else
45
+ printf '%s✗%s\n' "${YELLOW}" "${RESET}"
46
+ fi
47
+ done
48
+
49
+ # ── Create mcporter config ────────────────────────────────────────────────
50
+ local mcporter_dir="${HOME}/.mcporter"
51
+ mkdir -p "${mcporter_dir}"
52
+
53
+ local mcporter_config="${mcporter_dir}/mcporter.json"
54
+ local workspace="${HOME}/workspace"
55
+
56
+ log_info "Configuring MCP servers..."
57
+
58
+ python3 -c "
59
+ import json, os, sys
60
+
61
+ config_path = sys.argv[1]
62
+ workspace = sys.argv[2]
63
+ home = os.environ.get('HOME', '/home/saiyam')
64
+
65
+ # Load existing config or start fresh
66
+ cfg = {}
67
+ if os.path.exists(config_path):
68
+ try:
69
+ with open(config_path) as f:
70
+ cfg = json.load(f)
71
+ except (json.JSONDecodeError, IOError):
72
+ cfg = {}
73
+
74
+ servers = cfg.get('mcpServers', {})
75
+
76
+ # 1. Mermaid -- diagrams, flowcharts, architecture, sequence diagrams
77
+ # The agent says 'draw architecture' -> generates Mermaid code -> renders to PNG
78
+ servers['mermaid'] = {
79
+ 'command': 'npx',
80
+ 'args': ['-y', 'mermaid-mcp-server'],
81
+ 'env': {}
82
+ }
83
+
84
+ # 2. Memory -- persistent knowledge graph across sessions
85
+ # The agent remembers user preferences, project context, past decisions
86
+ servers['memory'] = {
87
+ 'command': 'npx',
88
+ 'args': ['-y', '@modelcontextprotocol/server-memory'],
89
+ 'env': {}
90
+ }
91
+
92
+ # 3. Filesystem -- enhanced file operations for the workspace
93
+ # Read, write, search, move files with proper permissions
94
+ servers['filesystem'] = {
95
+ 'command': 'npx',
96
+ 'args': ['-y', '@modelcontextprotocol/server-filesystem', workspace],
97
+ 'env': {}
98
+ }
99
+
100
+ # 4. Sequential Thinking -- structured reasoning for complex tasks
101
+ # Helps the agent break down problems, plan before executing
102
+ servers['sequentialthinking'] = {
103
+ 'command': 'npx',
104
+ 'args': ['-y', '@modelcontextprotocol/server-sequential-thinking'],
105
+ 'env': {}
106
+ }
107
+
108
+ cfg['mcpServers'] = servers
109
+
110
+ with open(config_path, 'w') as f:
111
+ json.dump(cfg, f, indent=2)
112
+
113
+ print(f'Configured {len(servers)} MCP servers')
114
+ " "${mcporter_config}" "${workspace}" 2>> "${CLAWSPARK_LOG}" || {
115
+ log_warn "Failed to write mcporter config."
116
+ return 0
117
+ }
118
+
119
+ # ── Verify config was written ────────────────────────────────────────────
120
+ if [[ -f "${mcporter_config}" ]]; then
121
+ local server_count
122
+ server_count=$(python3 -c "
123
+ import json
124
+ with open('${mcporter_config}') as f:
125
+ print(len(json.load(f).get('mcpServers', {})))
126
+ " 2>/dev/null || echo "0")
127
+ log_success "MCP servers configured: ${server_count} server(s) in ${mcporter_config}"
128
+ fi
129
+
130
+ # ── Show what's available ─────────────────────────────────────────────────
131
+ printf '\n'
132
+ printf ' %s%sMCP Capabilities:%s\n' "${BOLD}" "${CYAN}" "${RESET}"
133
+ printf ' %s*%s Mermaid -- architecture diagrams, flowcharts, sequence diagrams\n' "${GREEN}" "${RESET}"
134
+ printf ' %s*%s Memory -- persistent knowledge graph across conversations\n' "${GREEN}" "${RESET}"
135
+ printf ' %s*%s Filesystem -- enhanced file read/write/search\n' "${GREEN}" "${RESET}"
136
+ printf ' %s*%s Thinking -- structured reasoning for complex multi-step tasks\n' "${GREEN}" "${RESET}"
137
+ printf '\n'
138
+ printf ' The agent can now create diagrams, remember context, and plan complex work.\n'
139
+ printf ' Add more servers: %sclawspark mcp add <server-name>%s\n\n' "${CYAN}" "${RESET}"
140
+
141
+ log_success "MCP setup complete."
142
+ }