@hitechclaw/clawspark 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/CHANGELOG.md +35 -0
  2. package/LICENSE +21 -0
  3. package/README.md +378 -0
  4. package/clawspark +2715 -0
  5. package/configs/models.yaml +108 -0
  6. package/configs/skill-packs.yaml +44 -0
  7. package/configs/skills.yaml +37 -0
  8. package/install.sh +387 -0
  9. package/lib/common.sh +249 -0
  10. package/lib/detect-hardware.sh +156 -0
  11. package/lib/diagnose.sh +636 -0
  12. package/lib/render-diagram.sh +47 -0
  13. package/lib/sandbox-commands.sh +415 -0
  14. package/lib/secure.sh +244 -0
  15. package/lib/select-model.sh +442 -0
  16. package/lib/setup-browser.sh +138 -0
  17. package/lib/setup-dashboard.sh +228 -0
  18. package/lib/setup-inference.sh +128 -0
  19. package/lib/setup-mcp.sh +142 -0
  20. package/lib/setup-messaging.sh +242 -0
  21. package/lib/setup-models.sh +121 -0
  22. package/lib/setup-openclaw.sh +808 -0
  23. package/lib/setup-sandbox.sh +188 -0
  24. package/lib/setup-skills.sh +113 -0
  25. package/lib/setup-systemd.sh +224 -0
  26. package/lib/setup-tailscale.sh +188 -0
  27. package/lib/setup-voice.sh +101 -0
  28. package/lib/skill-audit.sh +449 -0
  29. package/lib/verify.sh +177 -0
  30. package/package.json +57 -0
  31. package/scripts/release.sh +133 -0
  32. package/uninstall.sh +161 -0
  33. package/v2/README.md +50 -0
  34. package/v2/configs/providers.yaml +79 -0
  35. package/v2/configs/skills.yaml +36 -0
  36. package/v2/install.sh +116 -0
  37. package/v2/lib/common.sh +285 -0
  38. package/v2/lib/detect-hardware.sh +119 -0
  39. package/v2/lib/select-runtime.sh +273 -0
  40. package/v2/lib/setup-extras.sh +95 -0
  41. package/v2/lib/setup-openclaw.sh +187 -0
  42. package/v2/lib/setup-provider.sh +131 -0
  43. package/v2/lib/verify.sh +133 -0
  44. package/web/index.html +1835 -0
  45. package/web/install.sh +387 -0
  46. package/web/logo-hero.svg +11 -0
  47. package/web/logo-icon.svg +12 -0
  48. package/web/logo.svg +17 -0
  49. package/web/vercel.json +8 -0
@@ -0,0 +1,442 @@
1
+ #!/usr/bin/env bash
2
+ # lib/select-model.sh — Recommends and lets the user pick an LLM based on hardware.
3
+ # Uses llmfit (https://github.com/AlexsJones/llmfit) for non-DGX-Spark platforms.
4
+ # Models are verified against the Ollama library before being shown.
5
+ # Exports: SELECTED_MODEL_ID, SELECTED_MODEL_NAME, SELECTED_MODEL_CTX
6
+ set -euo pipefail
7
+
8
+ select_model() {
9
+ log_info "Selecting model for ${HW_PLATFORM}..."
10
+
11
+ # ── If --model was passed on command line, use it directly ──────────────
12
+ if [[ -n "${FLAG_MODEL:-}" ]]; then
13
+ SELECTED_MODEL_ID="${FLAG_MODEL}"
14
+ SELECTED_MODEL_NAME="${FLAG_MODEL}"
15
+ SELECTED_MODEL_CTX=32768
16
+ export SELECTED_MODEL_ID SELECTED_MODEL_NAME SELECTED_MODEL_CTX
17
+ log_success "Model set via command line: ${SELECTED_MODEL_ID}"
18
+ return 0
19
+ fi
20
+
21
+ # ── Select based on platform ───────────────────────────────────────────
22
+ case "${HW_PLATFORM}" in
23
+ dgx-spark)
24
+ _select_model_curated_spark
25
+ ;;
26
+ mac|jetson|rtx|generic|*)
27
+ # Try llmfit first, fall back to curated list
28
+ _select_model_llmfit || _select_model_curated_fallback
29
+ ;;
30
+ esac
31
+
32
+ SELECTED_MODEL_CTX=32768
33
+ export SELECTED_MODEL_ID SELECTED_MODEL_NAME SELECTED_MODEL_CTX
34
+
35
+ printf '\n'
36
+ print_box \
37
+ "${BOLD}Model Selected${RESET}" \
38
+ "" \
39
+ "Name : ${CYAN}${SELECTED_MODEL_NAME}${RESET}" \
40
+ "ID : ${SELECTED_MODEL_ID}" \
41
+ "Context : ${SELECTED_MODEL_CTX} tokens"
42
+
43
+ log_success "Model selected: ${SELECTED_MODEL_NAME} (${SELECTED_MODEL_ID})"
44
+ }
45
+
46
+ # ── DGX Spark: curated list (tested on real hardware + llmfit verified) ───
47
+ _select_model_curated_spark() {
48
+ # Top models for DGX Spark (128 GB unified memory, NVIDIA GB10).
49
+ # Ranked by llmfit score, cross-checked against Ollama library.
50
+ # tok/s estimates from llmfit; qwen3.5:35b-a3b measured at ~59 tok/s.
51
+ local -a model_ids=(
52
+ "qwen3.5:35b-a3b"
53
+ "qwen3.5:122b-a10b"
54
+ "qwen3-coder-next"
55
+ "qwen3-next"
56
+ "qwen3-coder:30b"
57
+ )
58
+ local -a model_names=(
59
+ "Qwen 3.5 35B-A3B"
60
+ "Qwen 3.5 122B-A10B"
61
+ "Qwen3 Coder Next 80B"
62
+ "Qwen3 Next 80B-A3B"
63
+ "Qwen3 Coder 30B-A3B"
64
+ )
65
+ local -a model_labels=(
66
+ "qwen3.5:35b-a3b (default) -- 18GB, ~59 tok/s, proven on Spark"
67
+ "qwen3.5:122b-a10b -- 33GB, ~45 tok/s, top llmfit score (95.5)"
68
+ "qwen3-coder-next -- 52GB, ~109 tok/s est., coding/agentic"
69
+ "qwen3-next -- 50GB, ~59 tok/s est., chat/instruct"
70
+ "qwen3-coder:30b -- 19GB, ~58 tok/s est., coding lightweight"
71
+ "Let me pick my own model"
72
+ )
73
+ local default_idx=0
74
+
75
+ _present_model_choices model_ids model_names model_labels "${default_idx}"
76
+ }
77
+
78
+ # ── llmfit-powered selection for all other platforms ──────────────────────
79
+ _select_model_llmfit() {
80
+ _ensure_llmfit || return 1
81
+
82
+ printf ' %s->%s Analyzing hardware with llmfit... ' "${CYAN}" "${RESET}" >/dev/tty
83
+ local json
84
+ json=$(llmfit recommend --json -n 20 --min-fit good 2>>"${CLAWSPARK_LOG}") || {
85
+ printf '%sskipped%s\n' "${YELLOW}" "${RESET}" >/dev/tty
86
+ log_warn "llmfit recommend failed -- falling back to curated list."
87
+ return 1
88
+ }
89
+ printf '%sdone%s\n' "${GREEN}" "${RESET}" >/dev/tty
90
+
91
+ # Parse llmfit JSON and map to Ollama model IDs (returns up to 10 candidates)
92
+ local parsed
93
+ parsed=$(_parse_llmfit_to_ollama "${json}" 2>>"${CLAWSPARK_LOG}") || return 1
94
+
95
+ if [[ -z "${parsed}" ]]; then
96
+ log_warn "No llmfit models mapped to Ollama -- falling back to curated list."
97
+ return 1
98
+ fi
99
+
100
+ # Verify each candidate exists on Ollama library, keep top 5
101
+ printf ' %s->%s Verifying models on Ollama... ' "${CYAN}" "${RESET}" >/dev/tty
102
+ local -a model_ids=()
103
+ local -a model_names=()
104
+ local -a model_labels=()
105
+
106
+ while IFS='|' read -r oid name label; do
107
+ if _check_ollama_model "${oid}"; then
108
+ model_ids+=("${oid}")
109
+ model_names+=("${name}")
110
+ model_labels+=("${label}")
111
+ fi
112
+ [[ ${#model_ids[@]} -ge 5 ]] && break
113
+ done <<< "${parsed}"
114
+ printf '%s%d verified%s\n' "${GREEN}" "${#model_ids[@]}" "${RESET}" >/dev/tty
115
+
116
+ if [[ ${#model_ids[@]} -eq 0 ]]; then
117
+ log_warn "No llmfit models found on Ollama -- falling back to curated list."
118
+ return 1
119
+ fi
120
+
121
+ # Add custom option
122
+ model_labels+=("Let me pick my own model")
123
+ local default_idx=0
124
+
125
+ log_info "Found ${#model_ids[@]} compatible model(s) on Ollama for your hardware."
126
+ _present_model_choices model_ids model_names model_labels "${default_idx}"
127
+ }
128
+
129
+ # ── Check if a model exists on the Ollama library ─────────────────────────
130
+ _check_ollama_model() {
131
+ local model_id="$1"
132
+
133
+ # Try local first (instant if already pulled)
134
+ ollama show "${model_id}" &>/dev/null 2>&1 && return 0
135
+
136
+ # Check Ollama library website (model page exists = model available)
137
+ local base="${model_id%%:*}"
138
+ curl -sf --max-time 5 "https://ollama.com/library/${base}" -o /dev/null 2>/dev/null
139
+ }
140
+
141
+ # ── Curated fallback for non-Spark platforms ──────────────────────────────
142
+ _select_model_curated_fallback() {
143
+ local -a model_ids=()
144
+ local -a model_names=()
145
+ local -a model_labels=()
146
+ local default_idx=0
147
+
148
+ case "${HW_PLATFORM}" in
149
+ jetson)
150
+ model_ids=("nemotron-3-nano" "glm-4.7-flash")
151
+ model_names=("Nemotron 3 Nano 30B" "GLM 4.7 Flash")
152
+ model_labels=(
153
+ "nemotron-3-nano (default) -- optimized for Jetson"
154
+ "glm-4.7-flash -- compact & fast"
155
+ )
156
+ ;;
157
+ mac)
158
+ # Apple Silicon with unified memory -- model size depends on RAM
159
+ local ram_gb=$(( ${HW_TOTAL_RAM_MB:-0} / 1024 ))
160
+ if (( ram_gb >= 32 )); then
161
+ model_ids=("qwen3.5:35b-a3b" "qwen3-coder:30b" "glm-4.7-flash")
162
+ model_names=("Qwen 3.5 35B-A3B" "Qwen3 Coder 30B" "GLM 4.7 Flash")
163
+ model_labels=(
164
+ "qwen3.5:35b-a3b (default) -- MoE, fits 32GB+ Mac"
165
+ "qwen3-coder:30b -- coding-focused MoE"
166
+ "glm-4.7-flash -- compact & fast"
167
+ )
168
+ elif (( ram_gb >= 16 )); then
169
+ model_ids=("glm-4.7-flash" "qwen3:8b" "deepseek-v2")
170
+ model_names=("GLM 4.7 Flash" "Qwen3 8B" "DeepSeek V2")
171
+ model_labels=(
172
+ "glm-4.7-flash (default) -- good balance for 16GB"
173
+ "qwen3:8b -- lightweight 8B model"
174
+ "deepseek-v2 -- MoE, good coding"
175
+ )
176
+ else
177
+ model_ids=("qwen3:8b" "glm-4.7-flash")
178
+ model_names=("Qwen3 8B" "GLM 4.7 Flash")
179
+ model_labels=(
180
+ "qwen3:8b (default) -- fits 8GB Mac"
181
+ "glm-4.7-flash -- compact"
182
+ )
183
+ fi
184
+ ;;
185
+ rtx)
186
+ local vram_gb=$(( ${HW_GPU_VRAM_MB:-0} / 1024 ))
187
+ if (( vram_gb >= 24 )); then
188
+ # RTX 3090, 4090, A5000+
189
+ model_ids=("qwen3.5:35b-a3b" "qwen3-coder:30b" "glm-4.7-flash")
190
+ model_names=("Qwen 3.5 35B-A3B" "Qwen3 Coder 30B" "GLM 4.7 Flash")
191
+ model_labels=(
192
+ "qwen3.5:35b-a3b (default) -- MoE, fits 24GB (~18GB model)"
193
+ "qwen3-coder:30b -- coding-focused MoE"
194
+ "glm-4.7-flash -- compact & fast"
195
+ )
196
+ elif (( vram_gb >= 16 )); then
197
+ # RTX 3080 16GB, 4080, A4000
198
+ model_ids=("glm-4.7-flash" "qwen3:14b" "qwen3:8b")
199
+ model_names=("GLM 4.7 Flash" "Qwen3 14B" "Qwen3 8B")
200
+ model_labels=(
201
+ "glm-4.7-flash (default) -- fits 16GB"
202
+ "qwen3:14b -- mid-size model"
203
+ "qwen3:8b -- lightweight"
204
+ )
205
+ elif (( vram_gb >= 12 )); then
206
+ # RTX 3060 12GB
207
+ model_ids=("qwen3:14b" "qwen3:8b" "glm-4.7-flash")
208
+ model_names=("Qwen3 14B" "Qwen3 8B" "GLM 4.7 Flash")
209
+ model_labels=(
210
+ "qwen3:14b (default) -- fits 12GB"
211
+ "qwen3:8b -- smaller, faster"
212
+ "glm-4.7-flash -- compact"
213
+ )
214
+ else
215
+ # RTX 3070/4060/4060 Ti (8GB), RTX 2060 (6GB)
216
+ model_ids=("qwen3:8b" "phi4-mini")
217
+ model_names=("Qwen3 8B" "Phi4 Mini")
218
+ model_labels=(
219
+ "qwen3:8b (default) -- fits 8GB"
220
+ "phi4-mini -- very compact"
221
+ )
222
+ fi
223
+ ;;
224
+ *)
225
+ model_ids=("glm-4.7-flash" "qwen3:8b")
226
+ model_names=("GLM 4.7 Flash" "Qwen3 8B")
227
+ model_labels=(
228
+ "glm-4.7-flash (default)"
229
+ "qwen3:8b -- lightweight"
230
+ )
231
+ ;;
232
+ esac
233
+
234
+ model_labels+=("Let me pick my own model")
235
+ _present_model_choices model_ids model_names model_labels "${default_idx}"
236
+ }
237
+
238
+ # ── Shared: present choices and set SELECTED_MODEL_ID/NAME ────────────────
239
+ # Uses array names (Bash 3.2 compatible: no nameref)
240
+ _present_model_choices() {
241
+ local _ids_name="$1"
242
+ local _names_name="$2"
243
+ local _labels_name="$3"
244
+ local _default=$4
245
+ local _labels_len
246
+ eval "_labels_len=\${#${_labels_name}[@]}"
247
+
248
+ local choice
249
+ choice=$(prompt_choice "Which model would you like to run?" "${_labels_name}" "${_default}")
250
+
251
+ if [[ "${choice}" == "Let me pick my own model" ]]; then
252
+ if [[ "${CLAWSPARK_DEFAULTS}" == "true" ]]; then
253
+ eval "SELECTED_MODEL_ID=\${${_ids_name}[$_default]}"
254
+ eval "SELECTED_MODEL_NAME=\${${_names_name}[$_default]}"
255
+ else
256
+ printf '\n %sEnter the Ollama model ID (e.g. llama3.1:8b):%s ' "${BOLD}" "${RESET}" >/dev/tty
257
+ local custom_id
258
+ read -r custom_id </dev/tty || custom_id=""
259
+ if [[ -z "${custom_id}" ]]; then
260
+ log_warn "No model entered -- falling back to default."
261
+ eval "SELECTED_MODEL_ID=\${${_ids_name}[$_default]}"
262
+ eval "SELECTED_MODEL_NAME=\${${_names_name}[$_default]}"
263
+ else
264
+ SELECTED_MODEL_ID="${custom_id}"
265
+ SELECTED_MODEL_NAME="${custom_id}"
266
+ fi
267
+ fi
268
+ else
269
+ local i found=false
270
+ local labels_last=$(( _labels_len - 2 ))
271
+ for i in $(seq 0 "${labels_last}"); do
272
+ local label_val
273
+ eval "label_val=\${${_labels_name}[$i]}"
274
+ if [[ "${label_val}" == "${choice}" ]]; then
275
+ eval "SELECTED_MODEL_ID=\${${_ids_name}[$i]}"
276
+ eval "SELECTED_MODEL_NAME=\${${_names_name}[$i]}"
277
+ found=true
278
+ break
279
+ fi
280
+ done
281
+ if [[ "${found}" != "true" ]]; then
282
+ log_warn "Could not match selection -- using default model."
283
+ eval "SELECTED_MODEL_ID=\${${_ids_name}[$_default]}"
284
+ eval "SELECTED_MODEL_NAME=\${${_names_name}[$_default]}"
285
+ fi
286
+ fi
287
+ }
288
+
289
+ # ── Install llmfit if not present ─────────────────────────────────────────
290
+ _ensure_llmfit() {
291
+ if command -v llmfit &>/dev/null; then
292
+ return 0
293
+ fi
294
+
295
+ log_info "Installing llmfit for hardware-aware model selection..."
296
+ printf ' %s->%s Installing llmfit ... ' "${CYAN}" "${RESET}" >/dev/tty
297
+
298
+ if curl -fsSL https://llmfit.axjns.dev/install.sh | sh >> "${CLAWSPARK_LOG}" 2>&1; then
299
+ hash -r 2>/dev/null || true
300
+ if command -v llmfit &>/dev/null; then
301
+ printf '%sOK%s\n' "${GREEN}" "${RESET}" >/dev/tty
302
+ return 0
303
+ fi
304
+ # Check common install locations
305
+ local p
306
+ for p in "${HOME}/.local/bin" "${HOME}/bin" "${HOME}/.cargo/bin" "/usr/local/bin"; do
307
+ if [[ -x "${p}/llmfit" ]]; then
308
+ export PATH="${p}:${PATH}"
309
+ printf '%sOK%s\n' "${GREEN}" "${RESET}" >/dev/tty
310
+ return 0
311
+ fi
312
+ done
313
+ fi
314
+
315
+ printf '%sskipped%s\n' "${YELLOW}" "${RESET}" >/dev/tty
316
+ log_warn "Could not install llmfit -- using curated model list."
317
+ return 1
318
+ }
319
+
320
+ # ── Parse llmfit JSON and map to Ollama model IDs ─────────────────────────
321
+ # Returns up to 10 candidates as: ollama_id|display_name|label (one per line)
322
+ _parse_llmfit_to_ollama() {
323
+ local json="$1"
324
+
325
+ python3 -c "
326
+ import json, sys, re
327
+
328
+ # Map llmfit HF-style model names to Ollama model IDs.
329
+ # Each entry: (regex_pattern, ollama_template)
330
+ # Templates use {size} for parameter count extracted from the name/param field.
331
+ PATTERNS = [
332
+ # Qwen3 Coder Next (must be before generic Qwen3 patterns)
333
+ (r'(?i)qwen3-coder-next|qwen3.*coder.*next', 'qwen3-coder-next'),
334
+ # Qwen3 Coder (30B-A3B is the main variant on Ollama)
335
+ (r'(?i)qwen3.*coder.*30b', 'qwen3-coder:30b'),
336
+ (r'(?i)qwen3.*coder.*480b', 'qwen3-coder:480b'),
337
+ (r'(?i)qwen3.*coder', 'qwen3-coder:30b'),
338
+ # Qwen3 Next (80B-A3B is the main variant on Ollama)
339
+ (r'(?i)qwen3-next|qwen3.*next.*80b', 'qwen3-next'),
340
+ # Qwen3.5 family (specific MoE variants first)
341
+ (r'(?i)qwen.*3\.5.*35b.*a3b', 'qwen3.5:35b-a3b'),
342
+ (r'(?i)qwen.*3\.5.*122b.*a10b', 'qwen3.5:122b-a10b'),
343
+ (r'(?i)qwen.*3\.5.*122b', 'qwen3.5:122b'),
344
+ (r'(?i)qwen.*3\.5.*(\d+)b', 'qwen3.5:{size}b'),
345
+ # Generic Qwen3 (non-coder, non-next)
346
+ (r'(?i)qwen.*?3[^.].*?(\d+)b', 'qwen3:{size}b'),
347
+ # Qwen 2.x
348
+ (r'(?i)qwen.*2\.5.*(\d+)b', 'qwen2.5:{size}b'),
349
+ (r'(?i)qwen.*2.*(\d+)b', 'qwen2:{size}b'),
350
+ # Llama family
351
+ (r'(?i)llama.*3\.3.*(\d+)b', 'llama3.3:{size}b'),
352
+ (r'(?i)llama.*3\.1.*(\d+)b', 'llama3.1:{size}b'),
353
+ (r'(?i)llama.*3.*(\d+)b', 'llama3:{size}b'),
354
+ # Microsoft Phi
355
+ (r'(?i)phi.*4.*mini', 'phi4-mini'),
356
+ (r'(?i)phi.*4.*(\d+)b', 'phi4:{size}b'),
357
+ (r'(?i)phi.*3.*mini', 'phi3:mini'),
358
+ (r'(?i)phi.*3.*(\d+)b', 'phi3:{size}b'),
359
+ # Google Gemma
360
+ (r'(?i)gemma.*3.*(\d+)b', 'gemma3:{size}b'),
361
+ (r'(?i)gemma.*2.*(\d+)b', 'gemma2:{size}b'),
362
+ # Mistral
363
+ (r'(?i)codestral.*(\d+)b', 'codestral:{size}b'),
364
+ (r'(?i)mistral.*nemo', 'mistral-nemo'),
365
+ (r'(?i)mistral.*large.*(\d+)b', 'mistral-large:{size}b'),
366
+ (r'(?i)mistral.*small.*(\d+)b', 'mistral-small:{size}b'),
367
+ (r'(?i)mixtral.*(\d+)x(\d+)b', 'mixtral:{size2}x{size}b'),
368
+ (r'(?i)mistral.*(\d+)b', 'mistral:{size}b'),
369
+ # DeepSeek
370
+ (r'(?i)deepseek.*r1.*(\d+)b', 'deepseek-r1:{size}b'),
371
+ (r'(?i)deepseek.*v3', 'deepseek-v3'),
372
+ (r'(?i)deepseek.*v2.*lite', 'deepseek-v2:lite'),
373
+ (r'(?i)deepseek.*v2.*16b', 'deepseek-v2:16b'),
374
+ (r'(?i)deepseek.*v2', 'deepseek-v2'),
375
+ # NVIDIA
376
+ (r'(?i)nemotron.*nano', 'nemotron-3-nano'),
377
+ (r'(?i)nemotron.*mini.*(\d+)b', 'nemotron-mini:{size}b'),
378
+ # GLM
379
+ (r'(?i)glm.*4.*flash', 'glm-4.7-flash'),
380
+ (r'(?i)glm.*4.*(\d+)b', 'glm4:{size}b'),
381
+ # Cohere
382
+ (r'(?i)command.*r.*plus.*(\d+)b','command-r-plus:{size}b'),
383
+ (r'(?i)command.*r.*(\d+)b', 'command-r:{size}b'),
384
+ # StarCoder / Code models
385
+ (r'(?i)starcoder.*2.*(\d+)b', 'starcoder2:{size}b'),
386
+ # Yi
387
+ (r'(?i)yi.*1\.5.*(\d+)b', 'yi:1.5-{size}b'),
388
+ (r'(?i)yi.*(\d+)b', 'yi:{size}b'),
389
+ ]
390
+
391
+ def map_to_ollama(name, param_count):
392
+ text = name + ' ' + param_count
393
+ for pattern, template in PATTERNS:
394
+ m = re.search(pattern, text)
395
+ if m:
396
+ groups = m.groups()
397
+ result = template
398
+ if '{size}' in result:
399
+ size = groups[0] if groups else ''
400
+ if not size:
401
+ pm = re.search(r'(\d+)', param_count)
402
+ size = pm.group(1) if pm else ''
403
+ result = result.replace('{size}', size.lower())
404
+ if '{size2}' in result and len(groups) >= 2:
405
+ result = result.replace('{size2}', groups[1].lower())
406
+ return result
407
+ return None
408
+
409
+ data = json.loads(sys.argv[1])
410
+ models = data.get('models', [])
411
+ seen = set()
412
+ results = []
413
+
414
+ for m in models:
415
+ ollama_id = map_to_ollama(m.get('name', ''), m.get('parameter_count', ''))
416
+ if not ollama_id or ollama_id in seen:
417
+ continue
418
+ seen.add(ollama_id)
419
+
420
+ score = m.get('score', 0)
421
+ tps = m.get('estimated_tps', 0)
422
+ fit = m.get('fit_level', 'Unknown')
423
+ quant = m.get('best_quant', '')
424
+ mem = m.get('memory_required_gb', 0)
425
+ use_case = m.get('use_case', '')
426
+
427
+ # Shorten use_case for label
428
+ cat = ''
429
+ uc = use_case.lower()
430
+ if 'cod' in uc or 'agent' in uc:
431
+ cat = ', coding'
432
+ elif 'chat' in uc or 'instruct' in uc:
433
+ cat = ', chat'
434
+
435
+ default_tag = ' (recommended)' if not results else ''
436
+ label = f'{ollama_id}{default_tag} -- {mem:.0f}GB, ~{tps:.0f} tok/s, {fit} fit{cat}'
437
+ print(f'{ollama_id}|{ollama_id}|{label}')
438
+ results.append(ollama_id)
439
+ if len(results) >= 10:
440
+ break
441
+ " "${json}"
442
+ }
@@ -0,0 +1,138 @@
1
+ #!/usr/bin/env bash
2
+ # lib/setup-browser.sh -- Browser automation setup for OpenClaw.
3
+ # Detects or installs Chromium/Chrome and configures the browser tool
4
+ # in managed headless mode.
5
+ set -euo pipefail
6
+
7
+ setup_browser() {
8
+ log_info "Setting up browser automation..."
9
+
10
+ local browser_bin=""
11
+ if check_command chromium-browser; then
12
+ browser_bin="chromium-browser"
13
+ elif check_command chromium; then
14
+ browser_bin="chromium"
15
+ elif check_command google-chrome; then
16
+ browser_bin="google-chrome"
17
+ elif check_command google-chrome-stable; then
18
+ browser_bin="google-chrome-stable"
19
+ fi
20
+
21
+ # macOS: check common app bundle paths if no CLI binary found
22
+ if [[ -z "${browser_bin}" && "$(uname)" == "Darwin" ]]; then
23
+ local -a mac_browsers=(
24
+ "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
25
+ "/Applications/Chromium.app/Contents/MacOS/Chromium"
26
+ "/Applications/Brave Browser.app/Contents/MacOS/Brave Browser"
27
+ "/Applications/Microsoft Edge.app/Contents/MacOS/Microsoft Edge"
28
+ )
29
+ for candidate in "${mac_browsers[@]}"; do
30
+ if [[ -x "${candidate}" ]]; then
31
+ browser_bin="${candidate}"
32
+ break
33
+ fi
34
+ done
35
+ fi
36
+
37
+ if [[ -n "${browser_bin}" ]]; then
38
+ log_success "Browser found: ${browser_bin}"
39
+ else
40
+ log_info "No browser found. Installing Chromium for browser automation..."
41
+ if check_command apt-get; then
42
+ # Ubuntu 20.04+ and Jetson: apt chromium-browser installs a snap stub
43
+ # that doesn't work headless or under systemd. Prefer google-chrome.
44
+ local _snap_chromium=false
45
+ if command -v snap &>/dev/null && snap list chromium &>/dev/null 2>&1; then
46
+ _snap_chromium=true
47
+ fi
48
+ if [[ "${_snap_chromium}" == "true" ]] || dpkg -l chromium-browser 2>/dev/null | grep -q "^ii.*snap"; then
49
+ log_warn "Snap Chromium detected (broken for headless/systemd). Trying Google Chrome..."
50
+ fi
51
+ # Try Google Chrome first (always works headless)
52
+ if ! check_command google-chrome-stable && ! check_command google-chrome; then
53
+ (sudo apt-get install -y chromium-browser 2>/dev/null || sudo apt-get install -y chromium) >> "${CLAWSPARK_LOG}" 2>&1 &
54
+ spinner $! "Installing Chromium..."
55
+ fi
56
+ if check_command google-chrome-stable; then
57
+ browser_bin="google-chrome-stable"
58
+ elif check_command google-chrome; then
59
+ browser_bin="google-chrome"
60
+ elif check_command chromium-browser; then
61
+ browser_bin="chromium-browser"
62
+ elif check_command chromium; then
63
+ browser_bin="chromium"
64
+ fi
65
+ if [[ -n "${browser_bin}" ]]; then
66
+ log_success "Browser installed: ${browser_bin}"
67
+ else
68
+ log_warn "Browser installation failed. Browser tool will not be available."
69
+ return 0
70
+ fi
71
+ elif check_command brew; then
72
+ log_info "Installing Chromium via Homebrew..."
73
+ (brew install --cask chromium) >> "${CLAWSPARK_LOG}" 2>&1 &
74
+ spinner $! "Installing Chromium..."
75
+ if check_command chromium || [[ -d "/Applications/Chromium.app" ]]; then
76
+ browser_bin=$(command -v chromium 2>/dev/null || echo "/Applications/Chromium.app/Contents/MacOS/Chromium")
77
+ log_success "Chromium installed: ${browser_bin}"
78
+ else
79
+ log_warn "Chromium installation failed. Browser tool will not be available."
80
+ return 0
81
+ fi
82
+ else
83
+ log_warn "No package manager found. Install Chromium manually to enable browser automation."
84
+ return 0
85
+ fi
86
+ fi
87
+
88
+ # Browser config: OpenClaw manages browser via agents.defaults.sandbox.browser
89
+ # The browser binary path is stored in clawspark's own config for the CLI
90
+ local cs_config="${CLAWSPARK_DIR}/config.json"
91
+ python3 -c "
92
+ import json, sys, os
93
+
94
+ path = sys.argv[1]
95
+ browser_bin = sys.argv[2]
96
+
97
+ cfg = {}
98
+ if os.path.exists(path):
99
+ with open(path, 'r') as f:
100
+ cfg = json.load(f)
101
+
102
+ cfg['browser'] = {
103
+ 'executablePath': browser_bin,
104
+ 'headless': True
105
+ }
106
+
107
+ with open(path, 'w') as f:
108
+ json.dump(cfg, f, indent=2)
109
+ print('ok')
110
+ " "${cs_config}" "${browser_bin}" 2>> "${CLAWSPARK_LOG}" || {
111
+ log_warn "Could not save browser config"
112
+ }
113
+
114
+ # Clean up any invalid root-level browser keys from openclaw.json
115
+ local oc_config="${HOME}/.openclaw/openclaw.json"
116
+ if [[ -f "${oc_config}" ]]; then
117
+ python3 -c "
118
+ import json, sys
119
+ path = sys.argv[1]
120
+ with open(path, 'r') as f:
121
+ cfg = json.load(f)
122
+ changed = False
123
+ if 'browser' in cfg:
124
+ if 'mode' in cfg['browser']:
125
+ del cfg['browser']['mode']
126
+ changed = True
127
+ if not cfg['browser']:
128
+ del cfg['browser']
129
+ changed = True
130
+ if changed:
131
+ with open(path, 'w') as f:
132
+ json.dump(cfg, f, indent=2)
133
+ print('ok')
134
+ " "${oc_config}" 2>> "${CLAWSPARK_LOG}" || true
135
+ fi
136
+
137
+ log_success "Browser configured: ${browser_bin} (headless)."
138
+ }