@stevederico/dotbot 0.28.0 → 0.31.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,3 +1,35 @@
1
+ 0.31
2
+
3
+ Document mlx_local provider
4
+ Fix CLI local auth
5
+ Remove shields.io badges
6
+ Generify personal path example
7
+
8
+ 0.30
9
+
10
+ Flush buffered plain text
11
+ Add agentLoop regression tests
12
+ Rename dottie_desktop to mlx_local
13
+ Rename getDottieDesktopStatus helper
14
+ Rename DOTTIE_DESKTOP_URL env var
15
+ Parameterize notification title
16
+ Default agentName to Assistant
17
+ Scrub host-specific doc references
18
+ Skip heartbeat without tasks
19
+ Fail closed on task fetch error
20
+ Add mlx_local provider entry
21
+ Add cron_handler regression tests
22
+
23
+ 0.29
24
+
25
+ Extract shared streamEvents
26
+ Remove dead databaseManager logging
27
+ Remove dead CDP methods
28
+ Remove dead compat aliases
29
+ Remove dead observer module
30
+ Consolidate cron row mapping
31
+ Update README sandbox docs
32
+
1
33
  0.28
2
34
 
3
35
  Add --sandbox mode
package/README.md CHANGED
@@ -5,18 +5,6 @@
5
5
  The ultra-lean AI agent.<br>
6
6
  11k lines. 53 tools. 0 dependencies.
7
7
  </h3>
8
- <p align="center">
9
- <a href="https://opensource.org/licenses/mit">
10
- <img src="https://img.shields.io/badge/License-MIT-blue.svg" alt="MIT License">
11
- </a>
12
- <a href="https://github.com/stevederico/dotbot/stargazers">
13
- <img src="https://img.shields.io/github/stars/stevederico/dotbot?style=social" alt="GitHub stars">
14
- </a>
15
- <a href="https://github.com/stevederico/dotbot">
16
- <img src="https://img.shields.io/badge/version-0.25-green" alt="version">
17
- </a>
18
- <img src="https://img.shields.io/badge/LOC-11k-orange" alt="Lines of Code">
19
- </p>
20
8
  </div>
21
9
 
22
10
  <br />
@@ -27,9 +15,10 @@
27
15
 
28
16
  | | dotbot | nanobot | OpenClaw |
29
17
  |---|:---:|:---:|:---:|
30
- | **Lines of Code** | **11k** | 22k | 1M+ |
18
+ | **Lines of Code** | **~11k** | 22k | 1M+ |
31
19
  | **Tools** | **53** | ~10 | ~50 |
32
- | **Dependencies** | Minimal | Heavy | Heavy |
20
+ | **Dependencies** | **0** | Heavy | Heavy |
21
+ | **Sandbox Mode** | **Built-in** | No | Requires NemoClaw |
33
22
 
34
23
  Everything you need for AI agents. Nothing you don't. No bloated abstractions. No dependency hell. Just a clean, focused agent that works.
35
24
 
@@ -43,7 +32,9 @@ A **streaming AI agent** with tool execution, autonomous tasks, and scheduled jo
43
32
  ```bash
44
33
  dotbot "What's the weather in San Francisco?"
45
34
  dotbot # Interactive mode
35
+ dotbot --sandbox # Sandbox mode (restricted tools)
46
36
  dotbot serve --port 3000
37
+ dotbot models # List available models
47
38
  dotbot tools # List all 53 tools
48
39
  ```
49
40
 
@@ -80,6 +71,49 @@ dotbot stats
80
71
  dotbot memory
81
72
  ```
82
73
 
74
+ ### Sandbox Mode
75
+
76
+ Run dotbot with restricted tool access — deny-by-default.
77
+
78
+ ```bash
79
+ # Full lockdown — safe tools only (memory, search, weather, tasks)
80
+ dotbot --sandbox "What is 2+2?"
81
+
82
+ # Allow specific domains for web_fetch and browser_navigate
83
+ dotbot --sandbox --allow github
84
+ dotbot --sandbox --allow github --allow slack
85
+
86
+ # Allow specific tool groups
87
+ dotbot --sandbox --allow messages
88
+ dotbot --sandbox --allow images
89
+
90
+ # Mix domains and tool groups
91
+ dotbot --sandbox --allow github --allow messages --allow npm
92
+
93
+ # Custom domain
94
+ dotbot --sandbox --allow api.mycompany.com
95
+
96
+ # Persistent config in ~/.dotbotrc
97
+ # { "sandbox": true, "sandboxAllow": ["github", "slack", "messages"] }
98
+ ```
99
+
100
+ **What's blocked by default:**
101
+
102
+ | Category | Tools | How to unlock |
103
+ |----------|-------|---------------|
104
+ | Filesystem writes | `file_write`, `file_delete`, `file_move`, `folder_create` | Cannot unlock |
105
+ | Arbitrary HTTP | `web_fetch` | `--allow <domain>` |
106
+ | Browser | `browser_navigate` | `--allow <domain>` |
107
+ | Code execution | `run_code` | Always allowed (Node.js permission model) |
108
+ | Messaging | `message_*` | `--allow messages` |
109
+ | Images | `image_*` | `--allow images` |
110
+ | Notifications | `notify_user` | `--allow notifications` |
111
+ | App generation | `app_generate`, `app_validate` | Cannot unlock |
112
+
113
+ **What's always allowed:** `memory_*`, `web_search`, `grokipedia_search`, `file_read`, `file_list`, `weather_get`, `event_*`, `task_*`, `trigger_*`, `schedule_job`, `list_jobs`, `toggle_job`, `cancel_job`
114
+
115
+ **Domain presets:** `github`, `slack`, `discord`, `npm`, `pypi`, `jira`, `huggingface`, `docker`, `telegram`
116
+
83
117
  ### Library Usage
84
118
 
85
119
  ```bash
@@ -139,9 +173,13 @@ for await (const event of agent.chat({
139
173
  - **Cerebras** — ultra-fast inference
140
174
  - **Ollama** — local models, no API cost
141
175
 
176
+ ### 🔒 **Sandbox Mode**
177
+ - **Deny-by-default** tool access — no files, code, browser, or messaging
178
+ - **Domain allowlists** — `--allow github`, `--allow slack`
179
+ - **Preset-based** tool unlocking — `--allow messages`, `--allow images`
180
+
142
181
  ### 💾 **Pluggable Storage**
143
182
  - **SQLite** — zero dependencies with Node.js 22.5+
144
- - **MongoDB** — scalable with full-text search
145
183
  - **Memory** — in-memory for testing
146
184
 
147
185
  ### 📊 **Full Audit Trail**
@@ -154,7 +192,7 @@ for await (const event of agent.chat({
154
192
  ## CLI Reference
155
193
 
156
194
  ```
157
- dotbot v0.25 — AI agent CLI
195
+ dotbot — AI agent CLI
158
196
 
159
197
  Usage:
160
198
  dotbot "message" One-shot query
@@ -164,6 +202,7 @@ Usage:
164
202
  echo "msg" | dotbot Pipe input from stdin
165
203
 
166
204
  Commands:
205
+ models List available models from provider
167
206
  doctor Check environment and configuration
168
207
  tools List all available tools
169
208
  stats Show database statistics
@@ -178,10 +217,12 @@ Commands:
178
217
  events [--summary] View audit log
179
218
 
180
219
  Options:
181
- --provider, -p AI provider: xai, anthropic, openai, ollama (default: xai)
220
+ --provider, -p AI provider: xai, anthropic, openai, ollama, mlx_local (default: xai)
182
221
  --model, -m Model name (default: grok-4-1-fast-reasoning)
183
222
  --system, -s Custom system prompt (prepended to default)
184
223
  --session Resume a specific session by ID
224
+ --sandbox Restrict tools to safe subset (deny-by-default)
225
+ --allow Allow domain/preset in sandbox (github, slack, messages, etc.)
185
226
  --db SQLite database path (default: ./dotbot.db)
186
227
  --port Server port for 'serve' command
187
228
  --openai Enable OpenAI-compatible API endpoints
@@ -195,9 +236,10 @@ Environment Variables:
195
236
  ANTHROPIC_API_KEY API key for Anthropic
196
237
  OPENAI_API_KEY API key for OpenAI
197
238
  OLLAMA_BASE_URL Base URL for Ollama (default: http://localhost:11434)
239
+ MLX_LOCAL_URL Base URL for a local MLX-style OpenAI-compatible server (default: http://127.0.0.1:1316/v1)
198
240
 
199
241
  Config File:
200
- ~/.dotbotrc JSON config for defaults (provider, model, db)
242
+ ~/.dotbotrc JSON config for defaults (provider, model, db, sandbox)
201
243
  ```
202
244
 
203
245
  <br />
@@ -323,9 +365,8 @@ await agent.chat({
323
365
  | Technology | Purpose |
324
366
  |------------|---------|
325
367
  | **Node.js 22.5+** | Runtime with built-in SQLite |
326
- | **Playwright** | Browser automation |
368
+ | **Chrome DevTools Protocol** | Browser automation (zero deps) |
327
369
  | **SQLite** | Default storage (zero deps) |
328
- | **MongoDB** | Scalable storage option |
329
370
 
330
371
  <br />
331
372
 
@@ -334,12 +375,13 @@ await agent.chat({
334
375
  ```
335
376
  dotbot/
336
377
  ├── bin/
337
- │ └── dotbot.js # CLI entry point
378
+ │ └── dotbot.js # CLI entry point (REPL, server, sandbox mode)
338
379
  ├── core/
339
380
  │ ├── agent.js # Streaming agent loop
340
381
  │ ├── events.js # SSE event schemas
341
382
  │ ├── compaction.js # Context window management
342
383
  │ ├── normalize.js # Message format conversion
384
+ │ ├── failover.js # Cross-provider failover
343
385
  │ ├── cron_handler.js # Scheduled job execution
344
386
  │ └── trigger_handler.js # Event-driven triggers
345
387
  ├── storage/
@@ -347,9 +389,8 @@ dotbot/
347
389
  │ ├── TaskStore.js # Task interface
348
390
  │ ├── CronStore.js # Job scheduling interface
349
391
  │ ├── TriggerStore.js # Trigger interface
350
- ├── SQLite*.js # SQLite adapters
351
- │ └── Mongo*.js # MongoDB adapters
352
- ├── tools/ # 47 built-in tools
392
+ └── SQLite*.js # SQLite adapters
393
+ ├── tools/ # 53 built-in tools
353
394
  │ ├── memory.js
354
395
  │ ├── web.js
355
396
  │ ├── browser.js
package/bin/dotbot.js CHANGED
@@ -115,7 +115,7 @@ Commands:
115
115
  events [--summary] View audit log
116
116
 
117
117
  Options:
118
- --provider, -p AI provider: xai, anthropic, openai, ollama (default: xai)
118
+ --provider, -p AI provider: xai, anthropic, openai, ollama, mlx_local (default: xai)
119
119
  --model, -m Model name (default: grok-4-1-fast-reasoning)
120
120
  --system, -s Custom system prompt (prepended to default)
121
121
  --session Resume a specific session by ID
@@ -134,6 +134,7 @@ Environment Variables:
134
134
  ANTHROPIC_API_KEY API key for Anthropic
135
135
  OPENAI_API_KEY API key for OpenAI
136
136
  OLLAMA_BASE_URL Base URL for Ollama (default: http://localhost:11434)
137
+ MLX_LOCAL_URL Base URL for a local MLX-style OpenAI-compatible server (default: http://127.0.0.1:1316/v1)
137
138
 
138
139
  Config File:
139
140
  ~/.dotbotrc JSON config for defaults (provider, model, db)
@@ -295,6 +296,13 @@ async function getProviderConfig(providerId) {
295
296
  return { ...base, apiUrl: `${baseUrl}/api/chat` };
296
297
  }
297
298
 
299
+ // Local OpenAI-compatible servers (mlx_local, etc.) don't use API keys —
300
+ // they're served from localhost and the apiUrl is already baked into the
301
+ // provider config (or overridden via env var inside providers.js).
302
+ if (base.local) {
303
+ return base;
304
+ }
305
+
298
306
  const envKey = base.envKey;
299
307
  let apiKey = process.env[envKey];
300
308
 
@@ -604,50 +612,21 @@ async function initStores(dbPath, verbose = false, customSystemPrompt = '') {
604
612
  }
605
613
 
606
614
  /**
607
- * Run a single chat message and stream output.
615
+ * Stream events from an agentLoop iterable to stdout.
616
+ * Handles thinking markers, text deltas, tool status, and errors.
608
617
  *
609
- * @param {string} message - User message
610
- * @param {Object} options - CLI options
618
+ * @param {AsyncIterable<Object>} events - Async iterable of agentLoop events
619
+ * @returns {Promise<string>} Accumulated assistant text content
611
620
  */
612
- async function runChat(message, options) {
613
- const storesObj = await initStores(options.db, options.verbose, options.system);
614
- const provider = await getProviderConfig(options.provider);
615
-
616
- let session;
617
- let messages;
618
-
619
- if (options.session) {
620
- session = await storesObj.sessionStore.getSession(options.session, 'cli-user');
621
- if (!session) {
622
- console.error(`Error: Session not found: ${options.session}`);
623
- process.exit(1);
624
- }
625
- messages = [...(session.messages || []), { role: 'user', content: message }];
626
- } else {
627
- session = await storesObj.sessionStore.createSession('cli-user', options.model, options.provider);
628
- messages = [{ role: 'user', content: message }];
629
- }
630
-
631
- const context = {
632
- userID: 'cli-user',
633
- sessionId: session.id,
634
- providers: { [options.provider]: { apiKey: process.env[AI_PROVIDERS[options.provider]?.envKey] } },
635
- ...storesObj,
636
- };
637
-
621
+ async function streamEvents(events) {
638
622
  let hasThinkingText = false;
639
623
  let thinkingDone = false;
624
+ let assistantContent = '';
640
625
 
641
626
  process.stdout.write('Thinking');
642
627
  startSpinner();
643
628
 
644
- for await (const event of agentLoop({
645
- model: options.model,
646
- messages,
647
- tools: getActiveTools(options.sandbox, options.sandboxAllow),
648
- provider,
649
- context,
650
- })) {
629
+ for await (const event of events) {
651
630
  switch (event.type) {
652
631
  case 'thinking':
653
632
  if (event.text) {
@@ -669,6 +648,7 @@ async function runChat(message, options) {
669
648
  thinkingDone = true;
670
649
  }
671
650
  process.stdout.write(event.text);
651
+ assistantContent += event.text;
672
652
  break;
673
653
  case 'tool_start':
674
654
  if (!thinkingDone) {
@@ -689,11 +669,55 @@ async function runChat(message, options) {
689
669
  stopSpinner('error');
690
670
  break;
691
671
  case 'error':
672
+ stopSpinner();
692
673
  console.error(`\nError: ${event.error}`);
693
674
  break;
694
675
  }
695
676
  }
696
677
 
678
+ return assistantContent;
679
+ }
680
+
681
+ /**
682
+ * Run a single chat message and stream output.
683
+ *
684
+ * @param {string} message - User message
685
+ * @param {Object} options - CLI options
686
+ */
687
+ async function runChat(message, options) {
688
+ const storesObj = await initStores(options.db, options.verbose, options.system);
689
+ const provider = await getProviderConfig(options.provider);
690
+
691
+ let session;
692
+ let messages;
693
+
694
+ if (options.session) {
695
+ session = await storesObj.sessionStore.getSession(options.session, 'cli-user');
696
+ if (!session) {
697
+ console.error(`Error: Session not found: ${options.session}`);
698
+ process.exit(1);
699
+ }
700
+ messages = [...(session.messages || []), { role: 'user', content: message }];
701
+ } else {
702
+ session = await storesObj.sessionStore.createSession('cli-user', options.model, options.provider);
703
+ messages = [{ role: 'user', content: message }];
704
+ }
705
+
706
+ const context = {
707
+ userID: 'cli-user',
708
+ sessionId: session.id,
709
+ providers: { [options.provider]: { apiKey: process.env[AI_PROVIDERS[options.provider]?.envKey] } },
710
+ ...storesObj,
711
+ };
712
+
713
+ await streamEvents(agentLoop({
714
+ model: options.model,
715
+ messages,
716
+ tools: getActiveTools(options.sandbox, options.sandboxAllow),
717
+ provider,
718
+ context,
719
+ }));
720
+
697
721
  process.stdout.write('\n\n');
698
722
  process.exit(0);
699
723
  }
@@ -864,68 +888,14 @@ async function runRepl(options) {
864
888
  const handleMessage = async (text) => {
865
889
  messages.push({ role: 'user', content: text });
866
890
 
867
- let hasThinkingText = false;
868
- let thinkingDone = false;
869
- let assistantContent = '';
870
-
871
- process.stdout.write('Thinking');
872
- startSpinner();
873
-
874
891
  try {
875
- for await (const event of agentLoop({
892
+ const assistantContent = await streamEvents(agentLoop({
876
893
  model: options.model,
877
894
  messages: [...messages],
878
895
  tools: getActiveTools(options.sandbox, options.sandboxAllow),
879
896
  provider,
880
897
  context,
881
- })) {
882
- switch (event.type) {
883
- case 'thinking':
884
- if (event.text) {
885
- if (!hasThinkingText) {
886
- stopSpinner('');
887
- process.stdout.write('\n');
888
- hasThinkingText = true;
889
- }
890
- process.stdout.write(event.text);
891
- }
892
- break;
893
- case 'text_delta':
894
- if (!thinkingDone) {
895
- if (hasThinkingText) {
896
- process.stdout.write('\n...done thinking.\n\n');
897
- } else {
898
- stopSpinner('');
899
- }
900
- thinkingDone = true;
901
- }
902
- process.stdout.write(event.text);
903
- assistantContent += event.text;
904
- break;
905
- case 'tool_start':
906
- if (!thinkingDone) {
907
- if (hasThinkingText) {
908
- process.stdout.write('\n...done thinking.\n\n');
909
- } else {
910
- stopSpinner('');
911
- }
912
- thinkingDone = true;
913
- }
914
- process.stdout.write(`[${event.name}] `);
915
- startSpinner();
916
- break;
917
- case 'tool_result':
918
- stopSpinner('done');
919
- break;
920
- case 'tool_error':
921
- stopSpinner('error');
922
- break;
923
- case 'error':
924
- stopSpinner();
925
- console.error(`\nError: ${event.error}`);
926
- break;
927
- }
928
- }
898
+ }));
929
899
 
930
900
  if (assistantContent) {
931
901
  messages.push({ role: 'assistant', content: assistantContent });
package/core/agent.js CHANGED
@@ -31,7 +31,7 @@ const OLLAMA_BASE = "http://localhost:11434";
31
31
  * @param {Array} options.tools - Tool definitions from tools.js
32
32
  * @param {AbortSignal} [options.signal] - Optional abort signal
33
33
  * @param {Object} [options.provider] - Provider config from AI_PROVIDERS. Defaults to Ollama.
34
- * @param {Object} [options.context] - Execution context passed to tool execute functions (e.g. databaseManager, dbConfig, userID).
34
+ * @param {Object} [options.context] - Execution context passed to tool execute functions (e.g. providers, userID).
35
35
  * @yields {Object} Stream events for the frontend
36
36
  */
37
37
  export async function* agentLoop({ model, messages, tools, signal, provider, context, maxTurns }) {
@@ -174,7 +174,7 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
174
174
  };
175
175
  };
176
176
 
177
- // Local providers (ollama, dottie_desktop): direct fetch, no failover
177
+ // Local providers (ollama, mlx_local): direct fetch, no failover
178
178
  if (provider.local) {
179
179
  const { url, headers, body } = buildAgentRequest(provider);
180
180
  response = await fetch(url, { method: "POST", headers, body, signal });
@@ -210,8 +210,9 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
210
210
  const result = yield* parseAnthropicStream(response, fullContent, toolCalls, signal, activeProvider.id);
211
211
  fullContent = result.fullContent;
212
212
  toolCalls = result.toolCalls;
213
- } else if (activeProvider.id === "dottie_desktop") {
214
- // Dottie Desktop serves local models which may use:
213
+ } else if (activeProvider.id === "mlx_local") {
214
+ // Local MLX-style OpenAI-compatible server. Models served this way
215
+ // may emit output in one of three formats:
215
216
  // 1. gpt-oss channel tokens (<|channel|>analysis/final<|message|>)
216
217
  // 2. Native reasoning (delta.reasoning from parseOpenAIStream)
217
218
  // 3. Plain text (LFM2.5, SmolLM, etc. — no special tokens)
@@ -235,6 +236,21 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
235
236
  if (done) {
236
237
  fullContent = value.fullContent;
237
238
  toolCalls = value.toolCalls;
239
+ // Flush buffered plain-text responses that never hit the
240
+ // CHANNEL_DETECT_THRESHOLD. Happens for short greetings and
241
+ // small-talk from models that don't emit gpt-oss channel tokens
242
+ // (Gemma 4 E2B, LFM2.5, SmolLM, etc.). Without this flush, the
243
+ // rawBuffer is silently discarded and the downstream consumer
244
+ // never receives any text_delta — the UI renders an empty bubble.
245
+ // Skip if the buffer contains tool call markers so the existing
246
+ // post-loop parseToolCalls() below can handle them.
247
+ if (!usesPassthrough && !usesNativeReasoning && !analysisStarted && !finalMarkerFound && rawBuffer.length > 0) {
248
+ if (!hasToolCallMarkers(rawBuffer)) {
249
+ const textEvent = { type: "text_delta", text: rawBuffer };
250
+ validateEvent(textEvent);
251
+ yield textEvent;
252
+ }
253
+ }
238
254
  break;
239
255
  }
240
256
 
@@ -270,7 +286,7 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
270
286
  // the model doesn't use gpt-oss format (e.g. LFM2.5, SmolLM).
271
287
  // Flush buffer and switch to passthrough for remaining tokens.
272
288
  if (!analysisStarted && !finalMarkerFound && rawBuffer.length > CHANNEL_DETECT_THRESHOLD) {
273
- console.log("[dottie_desktop] no channel tokens after", rawBuffer.length, "chars — switching to passthrough");
289
+ console.log("[mlx_local] no channel tokens after", rawBuffer.length, "chars — switching to passthrough");
274
290
  usesPassthrough = true;
275
291
  const textEvent = { type: "text_delta", text: rawBuffer };
276
292
  validateEvent(textEvent);
@@ -285,7 +301,7 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
285
301
  if (aIdx !== -1) {
286
302
  analysisStarted = true;
287
303
  lastThinkingYieldPos = aIdx + ANALYSIS_MARKER.length;
288
- console.log("[dottie_desktop] analysis marker found at", aIdx, "| yieldPos:", lastThinkingYieldPos);
304
+ console.log("[mlx_local] analysis marker found at", aIdx, "| yieldPos:", lastThinkingYieldPos);
289
305
  }
290
306
  }
291
307
 
@@ -295,7 +311,7 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
295
311
  if (endIdx !== -1) {
296
312
  const chunk = rawBuffer.slice(lastThinkingYieldPos, endIdx);
297
313
  if (chunk) {
298
- console.log("[dottie_desktop] thinking (final):", chunk.slice(0, 80));
314
+ console.log("[mlx_local] thinking (final):", chunk.slice(0, 80));
299
315
  const thinkingEvent = {
300
316
  type: "thinking",
301
317
  text: chunk,
@@ -309,7 +325,7 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
309
325
  } else {
310
326
  const chunk = rawBuffer.slice(lastThinkingYieldPos);
311
327
  if (chunk) {
312
- console.log("[dottie_desktop] thinking (incr):", chunk.slice(0, 80));
328
+ console.log("[mlx_local] thinking (incr):", chunk.slice(0, 80));
313
329
  const thinkingEvent = {
314
330
  type: "thinking",
315
331
  text: chunk,
@@ -325,7 +341,7 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
325
341
  // Check for final channel marker
326
342
  const fIdx = rawBuffer.indexOf(FINAL_MARKER);
327
343
  if (fIdx !== -1) {
328
- console.log("[dottie_desktop] final marker found at", fIdx, "| bufLen:", rawBuffer.length);
344
+ console.log("[mlx_local] final marker found at", fIdx, "| bufLen:", rawBuffer.length);
329
345
  finalMarkerFound = true;
330
346
  lastFinalYieldPos = fIdx + FINAL_MARKER.length;
331
347
  const pending = rawBuffer.slice(lastFinalYieldPos);
@@ -740,8 +756,9 @@ export async function getOllamaStatus() {
740
756
  }
741
757
 
742
758
  /**
743
- * Check if Dottie Desktop is running and list available models.
744
- * Uses the OpenAI-compatible /v1/models endpoint.
759
+ * Check if a local OpenAI-compatible model server is running and list
760
+ * available models. Defaults to the MLX LM server convention
761
+ * (http://localhost:1316/v1) and can be overridden with MLX_LOCAL_URL.
745
762
  *
746
763
  * @returns {Promise<{running: boolean, models: Array<{name: string}>}>}
747
764
  */
@@ -765,8 +782,8 @@ function stripGptOssTokens(text) {
765
782
  return text.replace(TOKEN_RE, "").trim();
766
783
  }
767
784
 
768
- export async function getDottieDesktopStatus() {
769
- const baseUrl = (process.env.DOTTIE_DESKTOP_URL || 'http://localhost:1316/v1').replace(/\/v1$/, '');
785
+ export async function getMlxLocalStatus() {
786
+ const baseUrl = (process.env.MLX_LOCAL_URL || 'http://localhost:1316/v1').replace(/\/v1$/, '');
770
787
  try {
771
788
  const res = await fetch(`${baseUrl}/v1/models`);
772
789
  if (!res.ok) return { running: false, models: [] };
package/core/cdp.js CHANGED
@@ -184,35 +184,22 @@ export class CDPClient {
184
184
  return result.result?.value;
185
185
  }
186
186
 
187
- /**
188
- * Get the page title.
189
- * @returns {Promise<string>}
190
- */
187
+ /** Get the page title. */
191
188
  async getTitle() {
192
189
  return this.evaluate('document.title');
193
190
  }
194
191
 
195
- /**
196
- * Get the current URL.
197
- * @returns {Promise<string>}
198
- */
192
+ /** Get the current URL. */
199
193
  async getUrl() {
200
194
  return this.evaluate('window.location.href');
201
195
  }
202
196
 
203
- /**
204
- * Get text content of the page body.
205
- * @returns {Promise<string>}
206
- */
197
+ /** Get text content of the page body. */
207
198
  async getBodyText() {
208
199
  return this.evaluate('document.body?.innerText || ""');
209
200
  }
210
201
 
211
- /**
212
- * Get text content of an element by CSS selector.
213
- * @param {string} selector - CSS selector
214
- * @returns {Promise<string>}
215
- */
202
+ /** Get text content of an element by CSS selector. */
216
203
  async getText(selector) {
217
204
  const escaped = selector.replace(/"/g, '\\"');
218
205
  return this.evaluate(`document.querySelector("${escaped}")?.innerText || ""`);
@@ -308,26 +295,6 @@ export class CDPClient {
308
295
  });
309
296
  }
310
297
 
311
- /**
312
- * Click an element by CSS selector.
313
- * @param {string} selector - CSS selector
314
- */
315
- async clickSelector(selector) {
316
- const el = await this.querySelector(selector);
317
- if (!el) throw new Error(`Element not found: ${selector}`);
318
- await this.click(el.x, el.y);
319
- }
320
-
321
- /**
322
- * Click an element by visible text.
323
- * @param {string} text - Text content to find
324
- */
325
- async clickText(text) {
326
- const el = await this.getByText(text);
327
- if (!el) throw new Error(`Element with text "${text}" not found`);
328
- await this.click(el.x, el.y);
329
- }
330
-
331
298
  /**
332
299
  * Type text character by character.
333
300
  * @param {string} text - Text to type
@@ -453,9 +420,7 @@ export class CDPClient {
453
420
  });
454
421
  }
455
422
 
456
- /**
457
- * Close the connection.
458
- */
423
+ /** Close the CDP connection. */
459
424
  close() {
460
425
  if (this.ws) {
461
426
  this.ws.close();
@@ -490,24 +455,6 @@ export class CDPClient {
490
455
  throw lastError;
491
456
  }
492
457
 
493
- /**
494
- * Wait for an element to appear in the DOM.
495
- * @param {string} selector - CSS selector
496
- * @param {Object} options - Wait options
497
- * @param {number} options.timeout - Timeout in ms (default: 5000)
498
- * @param {number} options.interval - Poll interval in ms (default: 100)
499
- * @returns {Promise<{x: number, y: number, nodeId: number}>} Element info
500
- */
501
- async waitForSelector(selector, { timeout = 5000, interval = 100 } = {}) {
502
- const start = Date.now();
503
- while (Date.now() - start < timeout) {
504
- const el = await this.querySelector(selector);
505
- if (el) return el;
506
- await new Promise(r => setTimeout(r, interval));
507
- }
508
- throw new Error(`Timeout waiting for selector: ${selector}`);
509
- }
510
-
511
458
  /**
512
459
  * Wait for network to be idle (no requests for a period).
513
460
  * @param {Object} options - Wait options
@@ -12,7 +12,7 @@ const CONTEXT_LIMITS = {
12
12
  openai: 120000,
13
13
  xai: 120000,
14
14
  ollama: 6000,
15
- dottie_desktop: 6000,
15
+ mlx_local: 6000,
16
16
  };
17
17
 
18
18
  /** Number of recent messages to always preserve verbatim. */