@hitechclaw/clawspark 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +35 -0
- package/LICENSE +21 -0
- package/README.md +378 -0
- package/clawspark +2715 -0
- package/configs/models.yaml +108 -0
- package/configs/skill-packs.yaml +44 -0
- package/configs/skills.yaml +37 -0
- package/install.sh +387 -0
- package/lib/common.sh +249 -0
- package/lib/detect-hardware.sh +156 -0
- package/lib/diagnose.sh +636 -0
- package/lib/render-diagram.sh +47 -0
- package/lib/sandbox-commands.sh +415 -0
- package/lib/secure.sh +244 -0
- package/lib/select-model.sh +442 -0
- package/lib/setup-browser.sh +138 -0
- package/lib/setup-dashboard.sh +228 -0
- package/lib/setup-inference.sh +128 -0
- package/lib/setup-mcp.sh +142 -0
- package/lib/setup-messaging.sh +242 -0
- package/lib/setup-models.sh +121 -0
- package/lib/setup-openclaw.sh +808 -0
- package/lib/setup-sandbox.sh +188 -0
- package/lib/setup-skills.sh +113 -0
- package/lib/setup-systemd.sh +224 -0
- package/lib/setup-tailscale.sh +188 -0
- package/lib/setup-voice.sh +101 -0
- package/lib/skill-audit.sh +449 -0
- package/lib/verify.sh +177 -0
- package/package.json +57 -0
- package/scripts/release.sh +133 -0
- package/uninstall.sh +161 -0
- package/v2/README.md +50 -0
- package/v2/configs/providers.yaml +79 -0
- package/v2/configs/skills.yaml +36 -0
- package/v2/install.sh +116 -0
- package/v2/lib/common.sh +285 -0
- package/v2/lib/detect-hardware.sh +119 -0
- package/v2/lib/select-runtime.sh +273 -0
- package/v2/lib/setup-extras.sh +95 -0
- package/v2/lib/setup-openclaw.sh +187 -0
- package/v2/lib/setup-provider.sh +131 -0
- package/v2/lib/verify.sh +133 -0
- package/web/index.html +1835 -0
- package/web/install.sh +387 -0
- package/web/logo-hero.svg +11 -0
- package/web/logo-icon.svg +12 -0
- package/web/logo.svg +17 -0
- package/web/vercel.json +8 -0
package/lib/diagnose.sh
ADDED
|
@@ -0,0 +1,636 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
set -euo pipefail
|
|
3
|
+
|
|
4
|
+
diagnose_system() {
|
|
5
|
+
log_info "Running full system diagnostic..."
|
|
6
|
+
hr
|
|
7
|
+
|
|
8
|
+
local pass=0
|
|
9
|
+
local fail=0
|
|
10
|
+
local warn=0
|
|
11
|
+
local report_lines=()
|
|
12
|
+
|
|
13
|
+
_check_pass() {
|
|
14
|
+
printf ' %s✓%s %s\n' "${GREEN}" "${RESET}" "$1"
|
|
15
|
+
pass=$(( pass + 1 ))
|
|
16
|
+
report_lines+=("PASS: $1")
|
|
17
|
+
}
|
|
18
|
+
_check_fail() {
|
|
19
|
+
printf ' %s✗%s %s\n' "${RED}" "${RESET}" "$1"
|
|
20
|
+
fail=$(( fail + 1 ))
|
|
21
|
+
report_lines+=("FAIL: $1")
|
|
22
|
+
}
|
|
23
|
+
_check_warn() {
|
|
24
|
+
printf ' %s!%s %s\n' "${YELLOW}" "${RESET}" "$1"
|
|
25
|
+
warn=$(( warn + 1 ))
|
|
26
|
+
report_lines+=("WARN: $1")
|
|
27
|
+
}
|
|
28
|
+
_section() {
|
|
29
|
+
printf '\n %s%s── %s ──%s\n\n' "${BOLD}" "${CYAN}" "$1" "${RESET}"
|
|
30
|
+
report_lines+=("" "=== $1 ===")
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
printf '\n'
|
|
34
|
+
|
|
35
|
+
_DIAG_OS=$(uname -s 2>/dev/null || echo "unknown")
|
|
36
|
+
|
|
37
|
+
_diagnose_system_requirements
|
|
38
|
+
_diagnose_gpu_hardware
|
|
39
|
+
_diagnose_ollama
|
|
40
|
+
_diagnose_openclaw
|
|
41
|
+
_diagnose_skills
|
|
42
|
+
_diagnose_network
|
|
43
|
+
_diagnose_security
|
|
44
|
+
_diagnose_logs
|
|
45
|
+
|
|
46
|
+
printf '\n'
|
|
47
|
+
hr
|
|
48
|
+
|
|
49
|
+
if (( fail == 0 && warn == 0 )); then
|
|
50
|
+
log_success "All ${pass} checks passed. System is healthy."
|
|
51
|
+
elif (( fail == 0 )); then
|
|
52
|
+
log_success "${pass} passed, ${warn} warning(s). System is functional."
|
|
53
|
+
else
|
|
54
|
+
log_warn "${pass} passed, ${warn} warning(s), ${fail} failed. Review items marked with ✗ above."
|
|
55
|
+
fi
|
|
56
|
+
|
|
57
|
+
_write_report
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
_diagnose_system_requirements() {
|
|
61
|
+
_section "System Requirements"
|
|
62
|
+
|
|
63
|
+
local kernel_ver
|
|
64
|
+
kernel_ver=$(uname -r 2>/dev/null || echo "unknown")
|
|
65
|
+
case "${_DIAG_OS}" in
|
|
66
|
+
Linux) _check_pass "OS: Linux (${kernel_ver})" ;;
|
|
67
|
+
Darwin) _check_pass "OS: macOS (${kernel_ver})" ;;
|
|
68
|
+
*) _check_warn "OS: ${_DIAG_OS} (${kernel_ver}) — untested platform" ;;
|
|
69
|
+
esac
|
|
70
|
+
|
|
71
|
+
local bash_ver="${BASH_VERSION:-unknown}"
|
|
72
|
+
local bash_major="${bash_ver%%.*}"
|
|
73
|
+
if [[ "${bash_major}" =~ ^[0-9]+$ ]] && (( bash_major >= 3 )); then
|
|
74
|
+
local bash_minor="${bash_ver#*.}"
|
|
75
|
+
bash_minor="${bash_minor%%.*}"
|
|
76
|
+
if (( bash_major > 3 )) || (( bash_minor >= 2 )); then
|
|
77
|
+
_check_pass "Bash: ${bash_ver} (>= 3.2)"
|
|
78
|
+
else
|
|
79
|
+
_check_fail "Bash: ${bash_ver} (needs >= 3.2)"
|
|
80
|
+
fi
|
|
81
|
+
else
|
|
82
|
+
_check_fail "Bash: ${bash_ver} (needs >= 3.2)"
|
|
83
|
+
fi
|
|
84
|
+
|
|
85
|
+
if check_command node; then
|
|
86
|
+
local node_ver
|
|
87
|
+
node_ver=$(node --version 2>/dev/null || echo "v0")
|
|
88
|
+
local node_major="${node_ver#v}"
|
|
89
|
+
node_major="${node_major%%.*}"
|
|
90
|
+
if [[ "${node_major}" =~ ^[0-9]+$ ]] && (( node_major >= 22 )); then
|
|
91
|
+
_check_pass "Node.js: ${node_ver} (>= 22)"
|
|
92
|
+
else
|
|
93
|
+
_check_fail "Node.js: ${node_ver} (needs >= 22)"
|
|
94
|
+
fi
|
|
95
|
+
else
|
|
96
|
+
_check_fail "Node.js: not installed"
|
|
97
|
+
fi
|
|
98
|
+
|
|
99
|
+
if check_command npm; then
|
|
100
|
+
local npm_ver
|
|
101
|
+
npm_ver=$(npm --version 2>/dev/null || echo "unknown")
|
|
102
|
+
_check_pass "npm: ${npm_ver}"
|
|
103
|
+
else
|
|
104
|
+
_check_fail "npm: not installed"
|
|
105
|
+
fi
|
|
106
|
+
|
|
107
|
+
if check_command python3; then
|
|
108
|
+
local py_ver
|
|
109
|
+
py_ver=$(python3 --version 2>/dev/null | awk '{print $2}' || echo "unknown")
|
|
110
|
+
_check_pass "Python3: ${py_ver}"
|
|
111
|
+
else
|
|
112
|
+
_check_warn "Python3: not installed (optional but recommended)"
|
|
113
|
+
fi
|
|
114
|
+
|
|
115
|
+
if check_command curl; then
|
|
116
|
+
_check_pass "curl: available"
|
|
117
|
+
else
|
|
118
|
+
_check_fail "curl: not installed"
|
|
119
|
+
fi
|
|
120
|
+
|
|
121
|
+
local free_gb=0
|
|
122
|
+
if check_command df; then
|
|
123
|
+
local free_kb
|
|
124
|
+
free_kb=$(df -k "${HOME}" 2>/dev/null | awk 'NR==2{print $4}' || echo 0)
|
|
125
|
+
if [[ "${free_kb}" =~ ^[0-9]+$ ]]; then
|
|
126
|
+
free_gb=$(( free_kb / 1024 / 1024 ))
|
|
127
|
+
fi
|
|
128
|
+
fi
|
|
129
|
+
if (( free_gb >= 20 )); then
|
|
130
|
+
_check_pass "Disk space: ${free_gb}GB free"
|
|
131
|
+
elif (( free_gb > 0 )); then
|
|
132
|
+
_check_warn "Disk space: ${free_gb}GB free (< 20GB recommended)"
|
|
133
|
+
else
|
|
134
|
+
_check_warn "Disk space: unable to determine"
|
|
135
|
+
fi
|
|
136
|
+
|
|
137
|
+
local total_mem_mb=0 avail_mem_mb=0
|
|
138
|
+
if [[ -f /proc/meminfo ]]; then
|
|
139
|
+
local total_kb avail_kb
|
|
140
|
+
total_kb=$(awk '/^MemTotal:/ {print $2}' /proc/meminfo 2>/dev/null || echo 0)
|
|
141
|
+
avail_kb=$(awk '/^MemAvailable:/ {print $2}' /proc/meminfo 2>/dev/null || echo 0)
|
|
142
|
+
total_mem_mb=$(( total_kb / 1024 ))
|
|
143
|
+
avail_mem_mb=$(( avail_kb / 1024 ))
|
|
144
|
+
elif check_command sysctl; then
|
|
145
|
+
local mem_bytes
|
|
146
|
+
mem_bytes=$(sysctl -n hw.memsize 2>/dev/null || echo 0)
|
|
147
|
+
total_mem_mb=$(( mem_bytes / 1024 / 1024 ))
|
|
148
|
+
if check_command vm_stat; then
|
|
149
|
+
local vm_output page_size free_pages
|
|
150
|
+
vm_output=$(vm_stat 2>/dev/null || echo "")
|
|
151
|
+
page_size=$(echo "${vm_output}" | awk '/page size/ {print $8}' || echo 4096)
|
|
152
|
+
page_size="${page_size:-4096}"
|
|
153
|
+
free_pages=$(echo "${vm_output}" | awk '/Pages free:/ {gsub(/\./,"",$3); print $3}' || echo 0)
|
|
154
|
+
avail_mem_mb=$(( free_pages * page_size / 1024 / 1024 ))
|
|
155
|
+
fi
|
|
156
|
+
fi
|
|
157
|
+
local total_gb=$(( total_mem_mb / 1024 ))
|
|
158
|
+
local avail_gb=$(( avail_mem_mb / 1024 ))
|
|
159
|
+
if (( total_mem_mb > 0 )); then
|
|
160
|
+
_check_pass "Memory: ${avail_gb}GB available / ${total_gb}GB total"
|
|
161
|
+
else
|
|
162
|
+
_check_warn "Memory: unable to determine"
|
|
163
|
+
fi
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
_diagnose_gpu_hardware() {
|
|
167
|
+
_section "GPU & Hardware"
|
|
168
|
+
|
|
169
|
+
if check_command nvidia-smi; then
|
|
170
|
+
local gpu_name gpu_vram driver_ver cuda_ver
|
|
171
|
+
gpu_name=$(nvidia-smi --query-gpu=name --format=csv,noheader,nounits 2>/dev/null | head -n1 | xargs || echo "unknown")
|
|
172
|
+
gpu_vram=$(nvidia-smi --query-gpu=memory.total --format=csv,noheader,nounits 2>/dev/null | head -n1 | xargs || echo "0")
|
|
173
|
+
driver_ver=$(nvidia-smi --query-gpu=driver_version --format=csv,noheader,nounits 2>/dev/null | head -n1 | xargs || echo "unknown")
|
|
174
|
+
cuda_ver=$(nvidia-smi 2>/dev/null | sed -n 's/.*CUDA Version: \([0-9.]*\).*/\1/p' | head -n1)
|
|
175
|
+
cuda_ver="${cuda_ver:-unknown}"
|
|
176
|
+
|
|
177
|
+
_check_pass "GPU: ${gpu_name}"
|
|
178
|
+
if [[ "${gpu_vram}" =~ ^[0-9]+$ ]] && (( gpu_vram > 0 )); then
|
|
179
|
+
local vram_gb=$(( gpu_vram / 1024 ))
|
|
180
|
+
_check_pass "VRAM: ${vram_gb}GB (${gpu_vram}MB)"
|
|
181
|
+
else
|
|
182
|
+
_check_warn "VRAM: unable to determine (unified memory?)"
|
|
183
|
+
fi
|
|
184
|
+
_check_pass "Driver: ${driver_ver}, CUDA: ${cuda_ver}"
|
|
185
|
+
elif [[ "${_DIAG_OS}" == "Darwin" ]]; then
|
|
186
|
+
if check_command system_profiler; then
|
|
187
|
+
local gpu_info
|
|
188
|
+
gpu_info=$(system_profiler SPDisplaysDataType 2>/dev/null | grep -E "Chipset Model|VRAM|Metal" || echo "")
|
|
189
|
+
if [[ -n "${gpu_info}" ]]; then
|
|
190
|
+
local chip
|
|
191
|
+
chip=$(echo "${gpu_info}" | grep "Chipset Model" | head -n1 | sed 's/.*: //' | xargs || echo "unknown")
|
|
192
|
+
_check_pass "GPU: ${chip} (macOS)"
|
|
193
|
+
else
|
|
194
|
+
_check_warn "GPU: unable to query system_profiler"
|
|
195
|
+
fi
|
|
196
|
+
else
|
|
197
|
+
_check_warn "GPU: system_profiler not available"
|
|
198
|
+
fi
|
|
199
|
+
else
|
|
200
|
+
_check_warn "GPU: no NVIDIA GPU detected (nvidia-smi not found)"
|
|
201
|
+
fi
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
_diagnose_ollama() {
|
|
205
|
+
_section "Ollama Health"
|
|
206
|
+
|
|
207
|
+
if check_command ollama; then
|
|
208
|
+
local ollama_ver
|
|
209
|
+
ollama_ver=$(ollama --version 2>/dev/null | awk '{print $NF}' || echo "unknown")
|
|
210
|
+
_check_pass "Ollama binary: ${ollama_ver}"
|
|
211
|
+
else
|
|
212
|
+
_check_fail "Ollama binary: not installed"
|
|
213
|
+
return 0
|
|
214
|
+
fi
|
|
215
|
+
|
|
216
|
+
if curl -sf --max-time 5 http://127.0.0.1:11434/ &>/dev/null; then
|
|
217
|
+
_check_pass "Ollama API: responding at 127.0.0.1:11434"
|
|
218
|
+
else
|
|
219
|
+
_check_fail "Ollama API: not responding at 127.0.0.1:11434"
|
|
220
|
+
return 0
|
|
221
|
+
fi
|
|
222
|
+
|
|
223
|
+
local models_output first_model=""
|
|
224
|
+
models_output=$(ollama list 2>/dev/null || echo "")
|
|
225
|
+
if [[ -n "${models_output}" ]]; then
|
|
226
|
+
local model_count
|
|
227
|
+
model_count=$(echo "${models_output}" | tail -n +2 | grep -c '.' || echo 0)
|
|
228
|
+
_check_pass "Ollama models: ${model_count} loaded"
|
|
229
|
+
if (( model_count > 0 )); then
|
|
230
|
+
first_model=$(echo "${models_output}" | awk 'NR==2{print $1}')
|
|
231
|
+
echo "${models_output}" | tail -n +2 | while IFS= read -r line; do
|
|
232
|
+
local name size
|
|
233
|
+
name=$(echo "${line}" | awk '{print $1}')
|
|
234
|
+
size=$(echo "${line}" | awk '{print $3, $4}')
|
|
235
|
+
printf ' %s%s%s %s\n' "${CYAN}" "${name}" "${RESET}" "${size}"
|
|
236
|
+
done
|
|
237
|
+
fi
|
|
238
|
+
else
|
|
239
|
+
_check_warn "Ollama models: unable to list"
|
|
240
|
+
fi
|
|
241
|
+
|
|
242
|
+
if [[ -z "${first_model}" ]]; then
|
|
243
|
+
_check_warn "Inference test: no models available"
|
|
244
|
+
return 0
|
|
245
|
+
fi
|
|
246
|
+
|
|
247
|
+
printf ' %sRunning inference test...%s ' "${BLUE}" "${RESET}"
|
|
248
|
+
local start_s end_s elapsed_ms
|
|
249
|
+
start_s=$(date +%s 2>/dev/null || echo 0)
|
|
250
|
+
|
|
251
|
+
local safe_model
|
|
252
|
+
safe_model=$(printf '%s' "${first_model}" | sed 's/\\/\\\\/g; s/"/\\"/g')
|
|
253
|
+
|
|
254
|
+
local test_response
|
|
255
|
+
test_response=$(curl -sf --max-time 30 http://127.0.0.1:11434/api/generate \
|
|
256
|
+
-d "{\"model\":\"${safe_model}\",\"prompt\":\"Say hi\",\"stream\":false,\"options\":{\"num_predict\":5}}" 2>/dev/null || echo "")
|
|
257
|
+
|
|
258
|
+
end_s=$(date +%s 2>/dev/null || echo 0)
|
|
259
|
+
|
|
260
|
+
if [[ -n "${test_response}" ]]; then
|
|
261
|
+
elapsed_ms=$(( (end_s - start_s) * 1000 ))
|
|
262
|
+
printf '%s✓%s ~%sms\n' "${GREEN}" "${RESET}" "${elapsed_ms}"
|
|
263
|
+
_check_pass "Inference test: ~${elapsed_ms}ms response time"
|
|
264
|
+
else
|
|
265
|
+
printf '%s✗%s\n' "${RED}" "${RESET}"
|
|
266
|
+
_check_warn "Inference test: timed out or no models available"
|
|
267
|
+
fi
|
|
268
|
+
|
|
269
|
+
local ollama_dir=""
|
|
270
|
+
if [[ -d "${HOME}/.ollama/models" ]]; then
|
|
271
|
+
ollama_dir="${HOME}/.ollama/models"
|
|
272
|
+
elif [[ -d "/usr/share/ollama/.ollama/models" ]]; then
|
|
273
|
+
ollama_dir="/usr/share/ollama/.ollama/models"
|
|
274
|
+
fi
|
|
275
|
+
if [[ -n "${ollama_dir}" ]]; then
|
|
276
|
+
local ollama_size
|
|
277
|
+
ollama_size=$(du -sh "${ollama_dir}" 2>/dev/null | awk '{print $1}' || echo "unknown")
|
|
278
|
+
_check_pass "Ollama storage: ${ollama_size} (${ollama_dir})"
|
|
279
|
+
else
|
|
280
|
+
_check_warn "Ollama storage: directory not found"
|
|
281
|
+
fi
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
_diagnose_openclaw() {
|
|
285
|
+
_section "OpenClaw Health"
|
|
286
|
+
|
|
287
|
+
if check_command openclaw; then
|
|
288
|
+
local oc_ver
|
|
289
|
+
oc_ver=$(openclaw --version 2>/dev/null || echo "unknown")
|
|
290
|
+
_check_pass "OpenClaw binary: ${oc_ver}"
|
|
291
|
+
else
|
|
292
|
+
_check_fail "OpenClaw binary: not installed"
|
|
293
|
+
return 0
|
|
294
|
+
fi
|
|
295
|
+
|
|
296
|
+
local config_file="${HOME}/.openclaw/openclaw.json"
|
|
297
|
+
if [[ -f "${config_file}" ]]; then
|
|
298
|
+
if python3 -c "import json, sys; json.load(open(sys.argv[1]))" "${config_file}" 2>/dev/null; then
|
|
299
|
+
_check_pass "Config: ${config_file} (valid JSON)"
|
|
300
|
+
else
|
|
301
|
+
_check_fail "Config: ${config_file} (invalid JSON)"
|
|
302
|
+
fi
|
|
303
|
+
else
|
|
304
|
+
_check_fail "Config: ${config_file} not found"
|
|
305
|
+
fi
|
|
306
|
+
|
|
307
|
+
local gw_running=false
|
|
308
|
+
if systemctl is-active --quiet clawspark-gateway.service 2>/dev/null; then
|
|
309
|
+
_check_pass "Gateway process: running (systemd)"
|
|
310
|
+
gw_running=true
|
|
311
|
+
elif [[ -f "${CLAWSPARK_DIR}/gateway.pid" ]]; then
|
|
312
|
+
local gw_pid
|
|
313
|
+
gw_pid=$(cat "${CLAWSPARK_DIR}/gateway.pid" 2>/dev/null || echo "")
|
|
314
|
+
if [[ -n "${gw_pid}" ]] && kill -0 "${gw_pid}" 2>/dev/null; then
|
|
315
|
+
_check_pass "Gateway process: running (PID ${gw_pid})"
|
|
316
|
+
gw_running=true
|
|
317
|
+
else
|
|
318
|
+
_check_fail "Gateway process: not running (stale PID file)"
|
|
319
|
+
fi
|
|
320
|
+
else
|
|
321
|
+
_check_fail "Gateway process: not running"
|
|
322
|
+
fi
|
|
323
|
+
|
|
324
|
+
if systemctl is-active --quiet clawspark-nodehost.service 2>/dev/null; then
|
|
325
|
+
_check_pass "Node host process: running (systemd)"
|
|
326
|
+
elif [[ -f "${CLAWSPARK_DIR}/node.pid" ]]; then
|
|
327
|
+
local node_pid
|
|
328
|
+
node_pid=$(cat "${CLAWSPARK_DIR}/node.pid" 2>/dev/null || echo "")
|
|
329
|
+
if [[ -n "${node_pid}" ]] && kill -0 "${node_pid}" 2>/dev/null; then
|
|
330
|
+
_check_pass "Node host process: running (PID ${node_pid})"
|
|
331
|
+
else
|
|
332
|
+
_check_fail "Node host process: not running (stale PID file)"
|
|
333
|
+
fi
|
|
334
|
+
else
|
|
335
|
+
_check_fail "Node host process: not running"
|
|
336
|
+
fi
|
|
337
|
+
|
|
338
|
+
if [[ -f "${config_file}" ]] && check_command python3; then
|
|
339
|
+
local restrictions
|
|
340
|
+
restrictions=$(python3 -c "
|
|
341
|
+
import json, sys
|
|
342
|
+
c = json.load(open(sys.argv[1]))
|
|
343
|
+
fs_only = c.get('tools',{}).get('fs',{}).get('workspaceOnly', False)
|
|
344
|
+
deny_cmds = c.get('gateway',{}).get('nodes',{}).get('denyCommands', [])
|
|
345
|
+
print('ws=' + str(fs_only) + ' deny=' + str(len(deny_cmds)))
|
|
346
|
+
" "${config_file}" 2>/dev/null || echo "")
|
|
347
|
+
|
|
348
|
+
if [[ "${restrictions}" == *"ws=True"* ]]; then
|
|
349
|
+
_check_pass "Tool restriction: workspaceOnly enabled"
|
|
350
|
+
else
|
|
351
|
+
_check_warn "Tool restriction: workspaceOnly not set"
|
|
352
|
+
fi
|
|
353
|
+
|
|
354
|
+
if [[ "${restrictions}" =~ deny=([0-9]+) ]]; then
|
|
355
|
+
local deny_count="${BASH_REMATCH[1]}"
|
|
356
|
+
if (( deny_count > 0 )); then
|
|
357
|
+
_check_pass "Tool restriction: ${deny_count} denied command patterns"
|
|
358
|
+
else
|
|
359
|
+
_check_warn "Tool restriction: no denyCommands configured"
|
|
360
|
+
fi
|
|
361
|
+
fi
|
|
362
|
+
fi
|
|
363
|
+
|
|
364
|
+
local openclaw_dir="${HOME}/.openclaw"
|
|
365
|
+
if [[ -f "${openclaw_dir}/SOUL.md" ]]; then
|
|
366
|
+
_check_pass "SOUL.md: present"
|
|
367
|
+
else
|
|
368
|
+
_check_warn "SOUL.md: not found in ${openclaw_dir}"
|
|
369
|
+
fi
|
|
370
|
+
|
|
371
|
+
if [[ -f "${openclaw_dir}/TOOLS.md" ]]; then
|
|
372
|
+
_check_pass "TOOLS.md: present"
|
|
373
|
+
else
|
|
374
|
+
_check_warn "TOOLS.md: not found in ${openclaw_dir}"
|
|
375
|
+
fi
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
_diagnose_skills() {
|
|
379
|
+
_section "Skills Health"
|
|
380
|
+
|
|
381
|
+
local skills_dir="${HOME}/.openclaw/skills"
|
|
382
|
+
local skill_count=0
|
|
383
|
+
|
|
384
|
+
if [[ -d "${skills_dir}" ]]; then
|
|
385
|
+
skill_count=$(find "${skills_dir}" -mindepth 1 -maxdepth 1 -type d 2>/dev/null | wc -l | tr -d ' ')
|
|
386
|
+
_check_pass "Installed skills: ${skill_count} (${skills_dir})"
|
|
387
|
+
|
|
388
|
+
local missing=0
|
|
389
|
+
if [[ -f "${CLAWSPARK_DIR}/skills.yaml" ]]; then
|
|
390
|
+
local slug
|
|
391
|
+
while IFS= read -r slug; do
|
|
392
|
+
[[ -z "${slug}" ]] && continue
|
|
393
|
+
if [[ ! -d "${skills_dir}/${slug}" ]]; then
|
|
394
|
+
_check_warn "Skill directory missing: ${slug}"
|
|
395
|
+
missing=$(( missing + 1 ))
|
|
396
|
+
fi
|
|
397
|
+
done < <(_parse_enabled_skills "${CLAWSPARK_DIR}/skills.yaml")
|
|
398
|
+
if (( missing == 0 && skill_count > 0 )); then
|
|
399
|
+
_check_pass "All configured skills have directories"
|
|
400
|
+
fi
|
|
401
|
+
fi
|
|
402
|
+
else
|
|
403
|
+
_check_warn "Skills directory not found: ${skills_dir}"
|
|
404
|
+
fi
|
|
405
|
+
|
|
406
|
+
local suspicious=0
|
|
407
|
+
if [[ -d "${skills_dir}" ]]; then
|
|
408
|
+
local skill_dir
|
|
409
|
+
for skill_dir in "${skills_dir}"/*/; do
|
|
410
|
+
[[ ! -d "${skill_dir}" ]] && continue
|
|
411
|
+
local skill_name
|
|
412
|
+
skill_name=$(basename "${skill_dir}")
|
|
413
|
+
|
|
414
|
+
if grep -rqlE '(curl|wget|nc|ncat)\s+[^|]*\.(onion|i2p|bit)' "${skill_dir}" 2>/dev/null; then
|
|
415
|
+
_check_fail "Suspicious skill '${skill_name}': darknet URL pattern"
|
|
416
|
+
suspicious=$(( suspicious + 1 ))
|
|
417
|
+
fi
|
|
418
|
+
if grep -rqlE 'eval\s*\(\s*(atob|Buffer\.from)' "${skill_dir}" 2>/dev/null; then
|
|
419
|
+
_check_fail "Suspicious skill '${skill_name}': obfuscated eval pattern"
|
|
420
|
+
suspicious=$(( suspicious + 1 ))
|
|
421
|
+
fi
|
|
422
|
+
if grep -rqlE '(exfiltrate|steal|keylog|reverse.shell)' "${skill_dir}" 2>/dev/null; then
|
|
423
|
+
_check_fail "Suspicious skill '${skill_name}': malicious keyword detected"
|
|
424
|
+
suspicious=$(( suspicious + 1 ))
|
|
425
|
+
fi
|
|
426
|
+
done
|
|
427
|
+
if (( suspicious == 0 && skill_count > 0 )); then
|
|
428
|
+
_check_pass "Skill audit: no suspicious patterns found"
|
|
429
|
+
fi
|
|
430
|
+
fi
|
|
431
|
+
}
|
|
432
|
+
|
|
433
|
+
_diagnose_network() {
|
|
434
|
+
_section "Network & Ports"
|
|
435
|
+
|
|
436
|
+
_check_port() {
|
|
437
|
+
local port="$1"
|
|
438
|
+
local label="$2"
|
|
439
|
+
if check_command lsof; then
|
|
440
|
+
local proc
|
|
441
|
+
proc=$(lsof -i :"${port}" -sTCP:LISTEN -t 2>/dev/null | head -n1 || echo "")
|
|
442
|
+
if [[ -n "${proc}" ]]; then
|
|
443
|
+
local pname
|
|
444
|
+
pname=$(ps -p "${proc}" -o comm= 2>/dev/null || echo "unknown")
|
|
445
|
+
_check_pass "Port ${port} (${label}): in use by ${pname} (PID ${proc})"
|
|
446
|
+
else
|
|
447
|
+
_check_warn "Port ${port} (${label}): not in use"
|
|
448
|
+
fi
|
|
449
|
+
elif check_command ss; then
|
|
450
|
+
if ss -tlnp 2>/dev/null | grep -q ":${port} "; then
|
|
451
|
+
_check_pass "Port ${port} (${label}): in use"
|
|
452
|
+
else
|
|
453
|
+
_check_warn "Port ${port} (${label}): not in use"
|
|
454
|
+
fi
|
|
455
|
+
elif check_command netstat; then
|
|
456
|
+
if netstat -tlnp 2>/dev/null | grep -q ":${port} "; then
|
|
457
|
+
_check_pass "Port ${port} (${label}): in use"
|
|
458
|
+
else
|
|
459
|
+
_check_warn "Port ${port} (${label}): not in use"
|
|
460
|
+
fi
|
|
461
|
+
else
|
|
462
|
+
_check_warn "Port ${port} (${label}): unable to check (no lsof/ss/netstat)"
|
|
463
|
+
fi
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
_check_port 11434 "Ollama"
|
|
467
|
+
_check_port 18789 "OpenClaw node"
|
|
468
|
+
_check_port 8900 "ClawMetry"
|
|
469
|
+
|
|
470
|
+
if check_command curl; then
|
|
471
|
+
if curl -sf --max-time 5 https://registry.npmjs.org/ &>/dev/null; then
|
|
472
|
+
_check_pass "Internet: reachable (npmjs.org)"
|
|
473
|
+
elif curl -sf --max-time 5 https://github.com &>/dev/null; then
|
|
474
|
+
_check_pass "Internet: reachable (github.com)"
|
|
475
|
+
else
|
|
476
|
+
_check_warn "Internet: unreachable (air-gapped or offline)"
|
|
477
|
+
fi
|
|
478
|
+
else
|
|
479
|
+
_check_warn "Internet: cannot check (curl not available)"
|
|
480
|
+
fi
|
|
481
|
+
|
|
482
|
+
if [[ -f "${CLAWSPARK_DIR}/airgap.state" ]]; then
|
|
483
|
+
local airgap_state
|
|
484
|
+
airgap_state=$(cat "${CLAWSPARK_DIR}/airgap.state" 2>/dev/null || echo "unknown")
|
|
485
|
+
if [[ "${airgap_state}" == "true" ]]; then
|
|
486
|
+
_check_pass "Air-gap: enabled"
|
|
487
|
+
else
|
|
488
|
+
_check_pass "Air-gap: disabled"
|
|
489
|
+
fi
|
|
490
|
+
else
|
|
491
|
+
_check_pass "Air-gap: not configured"
|
|
492
|
+
fi
|
|
493
|
+
}
|
|
494
|
+
|
|
495
|
+
_diagnose_security() {
|
|
496
|
+
_section "Security"
|
|
497
|
+
|
|
498
|
+
local token_file="${CLAWSPARK_DIR}/token"
|
|
499
|
+
if [[ -f "${token_file}" ]]; then
|
|
500
|
+
local perms
|
|
501
|
+
perms=$(stat -f '%Lp' "${token_file}" 2>/dev/null || stat -c '%a' "${token_file}" 2>/dev/null || echo "unknown")
|
|
502
|
+
if [[ "${perms}" == "600" ]]; then
|
|
503
|
+
_check_pass "Token file: exists, permissions ${perms}"
|
|
504
|
+
else
|
|
505
|
+
_check_warn "Token file: exists, permissions ${perms} (should be 600)"
|
|
506
|
+
fi
|
|
507
|
+
else
|
|
508
|
+
_check_fail "Token file: not found at ${token_file}"
|
|
509
|
+
fi
|
|
510
|
+
|
|
511
|
+
local bind_value=""
|
|
512
|
+
if check_command systemctl && systemctl cat clawspark-gateway.service &>/dev/null; then
|
|
513
|
+
bind_value=$(systemctl show -p ExecStart clawspark-gateway.service 2>/dev/null \
|
|
514
|
+
| grep -oE '\-\-bind[= ]*(loopback|[^ ]*)' | head -1 || echo "")
|
|
515
|
+
fi
|
|
516
|
+
if [[ -z "${bind_value}" ]] && [[ -f "${CLAWSPARK_DIR}/gateway.pid" ]]; then
|
|
517
|
+
local gw_pid
|
|
518
|
+
gw_pid=$(cat "${CLAWSPARK_DIR}/gateway.pid" 2>/dev/null || echo "")
|
|
519
|
+
if [[ -n "${gw_pid}" ]] && [[ -d "/proc/${gw_pid}" ]]; then
|
|
520
|
+
bind_value=$(tr '\0' ' ' < "/proc/${gw_pid}/cmdline" 2>/dev/null \
|
|
521
|
+
| grep -oE '\-\-bind[= ]*(loopback|[^ ]*)' | head -1 || echo "")
|
|
522
|
+
elif [[ -n "${gw_pid}" ]]; then
|
|
523
|
+
bind_value=$(ps -p "${gw_pid}" -o args= 2>/dev/null \
|
|
524
|
+
| grep -oE '\-\-bind[= ]*(loopback|[^ ]*)' | head -1 || echo "")
|
|
525
|
+
fi
|
|
526
|
+
fi
|
|
527
|
+
if [[ "${bind_value}" == *"loopback"* ]]; then
|
|
528
|
+
_check_pass "Gateway binding: localhost only (--bind loopback)"
|
|
529
|
+
elif [[ -n "${bind_value}" ]]; then
|
|
530
|
+
_check_warn "Gateway binding: ${bind_value} (expected --bind loopback)"
|
|
531
|
+
else
|
|
532
|
+
_check_warn "Gateway binding: unable to determine (process not running or no --bind flag)"
|
|
533
|
+
fi
|
|
534
|
+
|
|
535
|
+
if check_command ufw; then
|
|
536
|
+
local ufw_status
|
|
537
|
+
ufw_status=$(sudo -n ufw status 2>/dev/null | head -n1 || echo "")
|
|
538
|
+
if [[ "${ufw_status}" == *"active"* ]]; then
|
|
539
|
+
_check_pass "UFW firewall: active"
|
|
540
|
+
elif [[ "${ufw_status}" == *"inactive"* ]]; then
|
|
541
|
+
_check_warn "UFW firewall: inactive"
|
|
542
|
+
else
|
|
543
|
+
_check_warn "UFW firewall: unable to determine status"
|
|
544
|
+
fi
|
|
545
|
+
else
|
|
546
|
+
_check_warn "UFW firewall: not installed"
|
|
547
|
+
fi
|
|
548
|
+
|
|
549
|
+
if [[ -f "${CLAWSPARK_DIR}/sandbox.state" ]]; then
|
|
550
|
+
local sandbox_state
|
|
551
|
+
sandbox_state=$(cat "${CLAWSPARK_DIR}/sandbox.state" 2>/dev/null || echo "unknown")
|
|
552
|
+
if [[ "${sandbox_state}" == "true" ]]; then
|
|
553
|
+
_check_pass "Sandbox: enabled"
|
|
554
|
+
else
|
|
555
|
+
_check_pass "Sandbox: disabled (enable with 'clawspark sandbox on')"
|
|
556
|
+
fi
|
|
557
|
+
else
|
|
558
|
+
_check_warn "Sandbox: not configured"
|
|
559
|
+
fi
|
|
560
|
+
}
|
|
561
|
+
|
|
562
|
+
_diagnose_logs() {
|
|
563
|
+
_section "Logs Analysis"
|
|
564
|
+
|
|
565
|
+
_check_log_errors() {
|
|
566
|
+
local logfile="$1" label="$2"
|
|
567
|
+
if [[ -f "${logfile}" ]]; then
|
|
568
|
+
local errors
|
|
569
|
+
errors=$(grep -i 'error' "${logfile}" 2>/dev/null | tail -n 5 || echo "")
|
|
570
|
+
if [[ -n "${errors}" ]]; then
|
|
571
|
+
_check_warn "${label}: recent errors found"
|
|
572
|
+
printf ' %s--- Last 5 errors ---%s\n' "${YELLOW}" "${RESET}"
|
|
573
|
+
echo "${errors}" | while IFS= read -r eline; do
|
|
574
|
+
printf ' %s\n' "${eline}"
|
|
575
|
+
done
|
|
576
|
+
else
|
|
577
|
+
_check_pass "${label}: no errors"
|
|
578
|
+
fi
|
|
579
|
+
else
|
|
580
|
+
_check_warn "${label}: file not found"
|
|
581
|
+
fi
|
|
582
|
+
}
|
|
583
|
+
|
|
584
|
+
_check_log_errors "${CLAWSPARK_DIR}/gateway.log" "gateway.log"
|
|
585
|
+
_check_log_errors "${CLAWSPARK_DIR}/install.log" "install.log"
|
|
586
|
+
|
|
587
|
+
local oom_found=false port_found=false model_found=false
|
|
588
|
+
local logfile
|
|
589
|
+
for logfile in "${CLAWSPARK_DIR}/gateway.log" "${CLAWSPARK_DIR}/install.log"; do
|
|
590
|
+
[[ ! -f "${logfile}" ]] && continue
|
|
591
|
+
if grep -qiE '(out of memory|oom|killed process|cannot allocate|address already in use|port.*in use|EADDRINUSE|model.*not found|pull.*failed|no such model)' "${logfile}" 2>/dev/null; then
|
|
592
|
+
grep -qiE '(out of memory|oom|killed process|cannot allocate)' "${logfile}" 2>/dev/null && oom_found=true
|
|
593
|
+
grep -qiE '(address already in use|port.*in use|EADDRINUSE)' "${logfile}" 2>/dev/null && port_found=true
|
|
594
|
+
grep -qiE '(model.*not found|pull.*failed|no such model)' "${logfile}" 2>/dev/null && model_found=true
|
|
595
|
+
fi
|
|
596
|
+
done
|
|
597
|
+
|
|
598
|
+
if ${oom_found}; then
|
|
599
|
+
_check_fail "Log pattern: OOM (out of memory) errors detected"
|
|
600
|
+
fi
|
|
601
|
+
if ${port_found}; then
|
|
602
|
+
_check_warn "Log pattern: port-in-use errors detected"
|
|
603
|
+
fi
|
|
604
|
+
if ${model_found}; then
|
|
605
|
+
_check_warn "Log pattern: model-not-found errors detected"
|
|
606
|
+
fi
|
|
607
|
+
if ! ${oom_found} && ! ${port_found} && ! ${model_found}; then
|
|
608
|
+
_check_pass "Log patterns: no common error patterns detected"
|
|
609
|
+
fi
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
_write_report() {
|
|
613
|
+
mkdir -p "${CLAWSPARK_DIR}"
|
|
614
|
+
|
|
615
|
+
local report_file="${CLAWSPARK_DIR}/diagnose-report.txt"
|
|
616
|
+
local timestamp
|
|
617
|
+
timestamp=$(date '+%Y-%m-%d %H:%M:%S')
|
|
618
|
+
|
|
619
|
+
{
|
|
620
|
+
echo "ClawSpark Diagnostic Report"
|
|
621
|
+
echo "Generated: ${timestamp}"
|
|
622
|
+
echo "OS: $(uname -s 2>/dev/null || echo 'unknown') $(uname -r 2>/dev/null || echo '')"
|
|
623
|
+
echo "Bash: ${BASH_VERSION:-unknown}"
|
|
624
|
+
echo "========================================="
|
|
625
|
+
for line in "${report_lines[@]}"; do
|
|
626
|
+
echo "${line}"
|
|
627
|
+
done
|
|
628
|
+
echo ""
|
|
629
|
+
echo "========================================="
|
|
630
|
+
echo "Pass: ${pass} Warn: ${warn} Fail: ${fail}"
|
|
631
|
+
} > "${report_file}"
|
|
632
|
+
|
|
633
|
+
printf '\n'
|
|
634
|
+
log_success "Debug report saved to: ${report_file}"
|
|
635
|
+
log_info "Share this file when reporting issues."
|
|
636
|
+
}
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# render-diagram.sh -- Renders Mermaid diagram to PNG via Kroki API
|
|
3
|
+
# Deployed to ~/workspace/ by clawspark installer.
|
|
4
|
+
#
|
|
5
|
+
# Usage:
|
|
6
|
+
# echo "graph TD; A-->B" | render-diagram.sh [output-name]
|
|
7
|
+
# render-diagram.sh [output-name] < file.mmd
|
|
8
|
+
#
|
|
9
|
+
# Output: Prints the absolute path to the rendered PNG on stdout.
|
|
10
|
+
# The PNG is saved to /tmp/openclaw/ which is in OpenClaw's allowed
|
|
11
|
+
# media directory list, so the agent can send it via WhatsApp/Telegram.
|
|
12
|
+
set -euo pipefail
|
|
13
|
+
|
|
14
|
+
OUTPUT_NAME="${1:-diagram-$(date +%s)}"
|
|
15
|
+
OUTPUT_DIR="/tmp/openclaw"
|
|
16
|
+
mkdir -p "${OUTPUT_DIR}"
|
|
17
|
+
|
|
18
|
+
# Read mermaid code from stdin
|
|
19
|
+
MERMAID_CODE=$(cat)
|
|
20
|
+
|
|
21
|
+
if [ -z "${MERMAID_CODE}" ]; then
|
|
22
|
+
echo "Error: No mermaid code provided on stdin" >&2
|
|
23
|
+
echo "Usage: echo 'graph TD; A-->B' | render-diagram.sh [name]" >&2
|
|
24
|
+
exit 1
|
|
25
|
+
fi
|
|
26
|
+
|
|
27
|
+
# Write to temp file
|
|
28
|
+
MMD_FILE="${OUTPUT_DIR}/${OUTPUT_NAME}.mmd"
|
|
29
|
+
printf '%s\n' "${MERMAID_CODE}" > "${MMD_FILE}"
|
|
30
|
+
|
|
31
|
+
# Render via Kroki API (public, free, no auth needed)
|
|
32
|
+
PNG_FILE="${OUTPUT_DIR}/${OUTPUT_NAME}.png"
|
|
33
|
+
HTTP_CODE=$(curl -sS -w '%{http_code}' -X POST https://kroki.io/mermaid/png \
|
|
34
|
+
-H 'Content-Type: text/plain' \
|
|
35
|
+
--data-binary @"${MMD_FILE}" \
|
|
36
|
+
-o "${PNG_FILE}" 2>/dev/null || echo "000")
|
|
37
|
+
|
|
38
|
+
if [ "${HTTP_CODE}" = "200" ] && [ -s "${PNG_FILE}" ]; then
|
|
39
|
+
# Success -- print the path for the agent to use
|
|
40
|
+
echo "${PNG_FILE}"
|
|
41
|
+
else
|
|
42
|
+
# Clean up failed output
|
|
43
|
+
rm -f "${PNG_FILE}"
|
|
44
|
+
echo "Error: Kroki API returned HTTP ${HTTP_CODE}" >&2
|
|
45
|
+
echo "Mermaid code might have syntax errors. Check the diagram syntax." >&2
|
|
46
|
+
exit 1
|
|
47
|
+
fi
|