@stevederico/dotbot 0.31.0 → 0.32.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,7 @@
1
+ {
2
+ "permissions": {
3
+ "allow": [
4
+ "Bash(grep -r \"grok-3\" /Users/sd/Desktop/projects/dotbot --include=\"*.js\" --include=\"*.md\" 2>/dev/null | grep -v node_modules)"
5
+ ]
6
+ }
7
+ }
package/CHANGELOG.md CHANGED
@@ -1,3 +1,9 @@
1
+ 0.32
2
+
3
+ Rename mlx_local provider to local
4
+ Rename MLX_LOCAL_URL env var to LOCAL_LLM_URL
5
+ Remove MLX references from comments and docs
6
+
1
7
  0.31
2
8
 
3
9
  Document mlx_local provider
package/README.md CHANGED
@@ -217,7 +217,7 @@ Commands:
217
217
  events [--summary] View audit log
218
218
 
219
219
  Options:
220
- --provider, -p AI provider: xai, anthropic, openai, ollama, mlx_local (default: xai)
220
+ --provider, -p AI provider: xai, anthropic, openai, ollama, local (default: xai)
221
221
  --model, -m Model name (default: grok-4-1-fast-reasoning)
222
222
  --system, -s Custom system prompt (prepended to default)
223
223
  --session Resume a specific session by ID
@@ -236,7 +236,7 @@ Environment Variables:
236
236
  ANTHROPIC_API_KEY API key for Anthropic
237
237
  OPENAI_API_KEY API key for OpenAI
238
238
  OLLAMA_BASE_URL Base URL for Ollama (default: http://localhost:11434)
239
- MLX_LOCAL_URL Base URL for a local MLX-style OpenAI-compatible server (default: http://127.0.0.1:1316/v1)
239
+ LOCAL_LLM_URL Base URL for a local OpenAI-compatible LLM server (default: http://127.0.0.1:1316/v1)
240
240
 
241
241
  Config File:
242
242
  ~/.dotbotrc JSON config for defaults (provider, model, db, sandbox)
package/bin/dotbot.js CHANGED
@@ -115,7 +115,7 @@ Commands:
115
115
  events [--summary] View audit log
116
116
 
117
117
  Options:
118
- --provider, -p AI provider: xai, anthropic, openai, ollama, mlx_local (default: xai)
118
+ --provider, -p AI provider: xai, anthropic, openai, ollama, local (default: xai)
119
119
  --model, -m Model name (default: grok-4-1-fast-reasoning)
120
120
  --system, -s Custom system prompt (prepended to default)
121
121
  --session Resume a specific session by ID
@@ -134,7 +134,7 @@ Environment Variables:
134
134
  ANTHROPIC_API_KEY API key for Anthropic
135
135
  OPENAI_API_KEY API key for OpenAI
136
136
  OLLAMA_BASE_URL Base URL for Ollama (default: http://localhost:11434)
137
- MLX_LOCAL_URL Base URL for a local MLX-style OpenAI-compatible server (default: http://127.0.0.1:1316/v1)
137
+ LOCAL_LLM_URL Base URL for a local local OpenAI-compatible LLM server (default: http://127.0.0.1:1316/v1)
138
138
 
139
139
  Config File:
140
140
  ~/.dotbotrc JSON config for defaults (provider, model, db)
@@ -296,7 +296,7 @@ async function getProviderConfig(providerId) {
296
296
  return { ...base, apiUrl: `${baseUrl}/api/chat` };
297
297
  }
298
298
 
299
- // Local OpenAI-compatible servers (mlx_local, etc.) don't use API keys —
299
+ // Local OpenAI-compatible servers (local, etc.) don't use API keys —
300
300
  // they're served from localhost and the apiUrl is already baked into the
301
301
  // provider config (or overridden via env var inside providers.js).
302
302
  if (base.local) {
package/core/agent.js CHANGED
@@ -162,7 +162,7 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
162
162
  };
163
163
 
164
164
  // Include tool definitions for non-local providers and local providers
165
- // that support native tool calling (e.g., GLM-4.7 via mlx_lm.server v0.30.7+)
165
+ // that support native tool calling (e.g., GLM-4.7 via local LLM server v0.30.7+)
166
166
  if (!targetProvider.local || targetProvider.supportsToolRole) {
167
167
  requestBody.tools = toolDefs;
168
168
  }
@@ -174,7 +174,7 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
174
174
  };
175
175
  };
176
176
 
177
- // Local providers (ollama, mlx_local): direct fetch, no failover
177
+ // Local providers (ollama, local): direct fetch, no failover
178
178
  if (provider.local) {
179
179
  const { url, headers, body } = buildAgentRequest(provider);
180
180
  response = await fetch(url, { method: "POST", headers, body, signal });
@@ -210,8 +210,8 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
210
210
  const result = yield* parseAnthropicStream(response, fullContent, toolCalls, signal, activeProvider.id);
211
211
  fullContent = result.fullContent;
212
212
  toolCalls = result.toolCalls;
213
- } else if (activeProvider.id === "mlx_local") {
214
- // Local MLX-style OpenAI-compatible server. Models served this way
213
+ } else if (activeProvider.id === "local") {
214
+ // Local OpenAI-compatible server. Models served this way
215
215
  // may emit output in one of three formats:
216
216
  // 1. gpt-oss channel tokens (<|channel|>analysis/final<|message|>)
217
217
  // 2. Native reasoning (delta.reasoning from parseOpenAIStream)
@@ -286,7 +286,7 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
286
286
  // the model doesn't use gpt-oss format (e.g. LFM2.5, SmolLM).
287
287
  // Flush buffer and switch to passthrough for remaining tokens.
288
288
  if (!analysisStarted && !finalMarkerFound && rawBuffer.length > CHANNEL_DETECT_THRESHOLD) {
289
- console.log("[mlx_local] no channel tokens after", rawBuffer.length, "chars — switching to passthrough");
289
+ console.log("[local] no channel tokens after", rawBuffer.length, "chars — switching to passthrough");
290
290
  usesPassthrough = true;
291
291
  const textEvent = { type: "text_delta", text: rawBuffer };
292
292
  validateEvent(textEvent);
@@ -301,7 +301,7 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
301
301
  if (aIdx !== -1) {
302
302
  analysisStarted = true;
303
303
  lastThinkingYieldPos = aIdx + ANALYSIS_MARKER.length;
304
- console.log("[mlx_local] analysis marker found at", aIdx, "| yieldPos:", lastThinkingYieldPos);
304
+ console.log("[local] analysis marker found at", aIdx, "| yieldPos:", lastThinkingYieldPos);
305
305
  }
306
306
  }
307
307
 
@@ -311,7 +311,7 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
311
311
  if (endIdx !== -1) {
312
312
  const chunk = rawBuffer.slice(lastThinkingYieldPos, endIdx);
313
313
  if (chunk) {
314
- console.log("[mlx_local] thinking (final):", chunk.slice(0, 80));
314
+ console.log("[local] thinking (final):", chunk.slice(0, 80));
315
315
  const thinkingEvent = {
316
316
  type: "thinking",
317
317
  text: chunk,
@@ -325,7 +325,7 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
325
325
  } else {
326
326
  const chunk = rawBuffer.slice(lastThinkingYieldPos);
327
327
  if (chunk) {
328
- console.log("[mlx_local] thinking (incr):", chunk.slice(0, 80));
328
+ console.log("[local] thinking (incr):", chunk.slice(0, 80));
329
329
  const thinkingEvent = {
330
330
  type: "thinking",
331
331
  text: chunk,
@@ -341,7 +341,7 @@ export async function* agentLoop({ model, messages, tools, signal, provider, con
341
341
  // Check for final channel marker
342
342
  const fIdx = rawBuffer.indexOf(FINAL_MARKER);
343
343
  if (fIdx !== -1) {
344
- console.log("[mlx_local] final marker found at", fIdx, "| bufLen:", rawBuffer.length);
344
+ console.log("[local] final marker found at", fIdx, "| bufLen:", rawBuffer.length);
345
345
  finalMarkerFound = true;
346
346
  lastFinalYieldPos = fIdx + FINAL_MARKER.length;
347
347
  const pending = rawBuffer.slice(lastFinalYieldPos);
@@ -757,8 +757,8 @@ export async function getOllamaStatus() {
757
757
 
758
758
  /**
759
759
  * Check if a local OpenAI-compatible model server is running and list
760
- * available models. Defaults to the MLX LM server convention
761
- * (http://localhost:1316/v1) and can be overridden with MLX_LOCAL_URL.
760
+ * available models. Defaults to the local LLM server convention
761
+ * (http://localhost:1316/v1) and can be overridden with LOCAL_LLM_URL.
762
762
  *
763
763
  * @returns {Promise<{running: boolean, models: Array<{name: string}>}>}
764
764
  */
@@ -783,7 +783,7 @@ function stripGptOssTokens(text) {
783
783
  }
784
784
 
785
785
  export async function getMlxLocalStatus() {
786
- const baseUrl = (process.env.MLX_LOCAL_URL || 'http://localhost:1316/v1').replace(/\/v1$/, '');
786
+ const baseUrl = (process.env.LOCAL_LLM_URL || 'http://localhost:1316/v1').replace(/\/v1$/, '');
787
787
  try {
788
788
  const res = await fetch(`${baseUrl}/v1/models`);
789
789
  if (!res.ok) return { running: false, models: [] };
@@ -12,7 +12,7 @@ const CONTEXT_LIMITS = {
12
12
  openai: 120000,
13
13
  xai: 120000,
14
14
  ollama: 6000,
15
- mlx_local: 6000,
15
+ local: 6000,
16
16
  };
17
17
 
18
18
  /** Number of recent messages to always preserve verbatim. */
@@ -12,11 +12,11 @@
12
12
  * 3. LFM2.5 native format with markers:
13
13
  * <|tool_call_start|>[tool_name(arg1="value1")]<|tool_call_end|>
14
14
  *
15
- * 4. LFM2.5 bare Pythonic format (markers stripped by mlx_lm.server):
15
+ * 4. LFM2.5 bare Pythonic format (markers stripped by local LLM server):
16
16
  * [tool_name(arg1="value1", arg2="value2")]
17
17
  *
18
18
  * Used when the model doesn't support native OpenAI-style tool calling
19
- * (e.g., mlx_lm.server) and tool definitions are injected via system prompt.
19
+ * (e.g., local LLM server) and tool definitions are injected via system prompt.
20
20
  */
21
21
 
22
22
  const TOOL_CALL_RE = /<tool_call>([\s\S]*?)<\/tool_call>/g;
@@ -135,7 +135,7 @@ export function parseToolCalls(text) {
135
135
  }
136
136
  }
137
137
 
138
- // Format 4: [func_name(key="val")] (bare Pythonic, markers stripped by mlx_lm.server)
138
+ // Format 4: [func_name(key="val")] (bare Pythonic, markers stripped by local LLM server)
139
139
  if (calls.length === 0) {
140
140
  BARE_PYTHONIC_RE.lastIndex = 0;
141
141
  while ((match = BARE_PYTHONIC_RE.exec(text)) !== null) {
package/dotbot.db ADDED
Binary file
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@stevederico/dotbot",
3
- "version": "0.31.0",
3
+ "version": "0.32.0",
4
4
  "description": "AI agent CLI and library for Node.js — streaming, multi-provider, tool execution, autonomous tasks",
5
5
  "type": "module",
6
6
  "main": "index.js",
@@ -3,7 +3,7 @@ import assert from 'node:assert';
3
3
  import { agentLoop } from '../core/agent.js';
4
4
 
5
5
  /**
6
- * Regression tests for the mlx_local provider branch of agentLoop.
6
+ * Regression tests for the local provider branch of agentLoop.
7
7
  *
8
8
  * These cover the flush branch added in 0.30 that handles short plain-text
9
9
  * responses from local models that never emit gpt-oss channel tokens
@@ -13,13 +13,13 @@ import { agentLoop } from '../core/agent.js';
13
13
  */
14
14
 
15
15
  /**
16
- * Build a minimal mlx_local-style provider for agentLoop tests.
17
- * The `id` must be "mlx_local" to hit the buffered-parsing branch,
16
+ * Build a minimal local-style provider for agentLoop tests.
17
+ * The `id` must be "local" to hit the buffered-parsing branch,
18
18
  * and `local: true` skips the failover path for a direct fetch.
19
19
  */
20
20
  function makeLocalProvider() {
21
21
  return {
22
- id: 'mlx_local',
22
+ id: 'local',
23
23
  name: 'Test Local',
24
24
  apiUrl: 'http://127.0.0.1:1316/v1',
25
25
  endpoint: '/chat/completions',
@@ -61,7 +61,7 @@ function stubFetch(response) {
61
61
  return () => { globalThis.fetch = original; };
62
62
  }
63
63
 
64
- describe('agentLoop — mlx_local short plain-text response flush', () => {
64
+ describe('agentLoop — local short plain-text response flush', () => {
65
65
  let restoreFetch;
66
66
 
67
67
  afterEach(() => {
@@ -133,14 +133,14 @@ export const AI_PROVIDERS = {
133
133
  }),
134
134
  formatResponse: (data) => data.choices?.[0]?.message?.content
135
135
  },
136
- mlx_local: {
137
- // Local MLX-style OpenAI-compatible server (e.g. mlx_lm.server, LM Studio,
138
- // vLLM, llama.cpp server). Routes through the `mlx_local` branch in
136
+ local: {
137
+ // Local OpenAI-compatible server (e.g. local LLM server, LM Studio,
138
+ // vLLM, llama.cpp server). Routes through the `local` branch in
139
139
  // core/agent.js which auto-detects gpt-oss channel tokens, native
140
- // reasoning, and plain-text responses. Override the URL with MLX_LOCAL_URL.
141
- id: 'mlx_local',
142
- name: 'Local (MLX)',
143
- apiUrl: process.env.MLX_LOCAL_URL || 'http://127.0.0.1:1316/v1',
140
+ // reasoning, and plain-text responses. Override the URL with LOCAL_LLM_URL.
141
+ id: 'local',
142
+ name: 'Local',
143
+ apiUrl: process.env.LOCAL_LLM_URL || 'http://127.0.0.1:1316/v1',
144
144
  defaultModel: '',
145
145
  models: [],
146
146
  local: true,