@halfagiraf/clawx 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/.env.example +44 -0
  2. package/LICENSE +21 -0
  3. package/README.md +489 -0
  4. package/clawx.json.example +23 -0
  5. package/dist/cli/main.d.ts +21 -0
  6. package/dist/cli/main.d.ts.map +1 -0
  7. package/dist/cli/main.js +176 -0
  8. package/dist/cli/main.js.map +1 -0
  9. package/dist/cli/repl.d.ts +11 -0
  10. package/dist/cli/repl.d.ts.map +1 -0
  11. package/dist/cli/repl.js +119 -0
  12. package/dist/cli/repl.js.map +1 -0
  13. package/dist/cli/tui.d.ts +35 -0
  14. package/dist/cli/tui.d.ts.map +1 -0
  15. package/dist/cli/tui.js +92 -0
  16. package/dist/cli/tui.js.map +1 -0
  17. package/dist/config/index.d.ts +9 -0
  18. package/dist/config/index.d.ts.map +1 -0
  19. package/dist/config/index.js +106 -0
  20. package/dist/config/index.js.map +1 -0
  21. package/dist/core/agent.d.ts +53 -0
  22. package/dist/core/agent.d.ts.map +1 -0
  23. package/dist/core/agent.js +152 -0
  24. package/dist/core/agent.js.map +1 -0
  25. package/dist/core/provider.d.ts +30 -0
  26. package/dist/core/provider.d.ts.map +1 -0
  27. package/dist/core/provider.js +76 -0
  28. package/dist/core/provider.js.map +1 -0
  29. package/dist/core/session.d.ts +37 -0
  30. package/dist/core/session.d.ts.map +1 -0
  31. package/dist/core/session.js +87 -0
  32. package/dist/core/session.js.map +1 -0
  33. package/dist/core/streaming.d.ts +27 -0
  34. package/dist/core/streaming.d.ts.map +1 -0
  35. package/dist/core/streaming.js +137 -0
  36. package/dist/core/streaming.js.map +1 -0
  37. package/dist/index.d.ts +18 -0
  38. package/dist/index.d.ts.map +1 -0
  39. package/dist/index.js +18 -0
  40. package/dist/index.js.map +1 -0
  41. package/dist/tools/gitDiff.d.ts +13 -0
  42. package/dist/tools/gitDiff.d.ts.map +1 -0
  43. package/dist/tools/gitDiff.js +50 -0
  44. package/dist/tools/gitDiff.js.map +1 -0
  45. package/dist/tools/gitStatus.d.ts +13 -0
  46. package/dist/tools/gitStatus.d.ts.map +1 -0
  47. package/dist/tools/gitStatus.js +43 -0
  48. package/dist/tools/gitStatus.js.map +1 -0
  49. package/dist/tools/searchFiles.d.ts +19 -0
  50. package/dist/tools/searchFiles.d.ts.map +1 -0
  51. package/dist/tools/searchFiles.js +101 -0
  52. package/dist/tools/searchFiles.js.map +1 -0
  53. package/dist/tools/sshRun.d.ts +26 -0
  54. package/dist/tools/sshRun.d.ts.map +1 -0
  55. package/dist/tools/sshRun.js +170 -0
  56. package/dist/tools/sshRun.js.map +1 -0
  57. package/dist/types/index.d.ts +35 -0
  58. package/dist/types/index.d.ts.map +1 -0
  59. package/dist/types/index.js +8 -0
  60. package/dist/types/index.js.map +1 -0
  61. package/dist/utils/logger.d.ts +19 -0
  62. package/dist/utils/logger.d.ts.map +1 -0
  63. package/dist/utils/logger.js +43 -0
  64. package/dist/utils/logger.js.map +1 -0
  65. package/dist/utils/system-prompt.d.ts +9 -0
  66. package/dist/utils/system-prompt.d.ts.map +1 -0
  67. package/dist/utils/system-prompt.js +49 -0
  68. package/dist/utils/system-prompt.js.map +1 -0
  69. package/package.json +71 -0
package/.env.example ADDED
@@ -0,0 +1,44 @@
1
+ # Clawx Configuration
2
+ # Copy to .env and fill in your values
3
+
4
+ # === Model Provider ===
5
+ # For local llama.cpp / ollama / vllm endpoints:
6
+ CLAWDEX_PROVIDER=openai-completions
7
+ CLAWDEX_BASE_URL=http://localhost:8080/v1
8
+ CLAWDEX_MODEL=qwen2.5-coder-14b-instruct
9
+ CLAWDEX_API_KEY=not-needed
10
+
11
+ # For OpenAI:
12
+ # CLAWDEX_PROVIDER=openai
13
+ # CLAWDEX_MODEL=gpt-4o
14
+ # OPENAI_API_KEY=sk-...
15
+
16
+ # For Anthropic:
17
+ # CLAWDEX_PROVIDER=anthropic
18
+ # CLAWDEX_MODEL=claude-sonnet-4-20250514
19
+ # ANTHROPIC_API_KEY=sk-ant-...
20
+
21
+ # === Execution ===
22
+ # Working directory for the agent (default: current directory)
23
+ # CLAWDEX_WORK_DIR=/path/to/project
24
+
25
+ # Shell to use for exec commands (default: auto-detect)
26
+ # CLAWDEX_SHELL=/bin/bash
27
+
28
+ # Command timeout in ms (default: 120000)
29
+ # CLAWDEX_EXEC_TIMEOUT=120000
30
+
31
+ # === SSH Targets ===
32
+ # Define named SSH targets as JSON
33
+ # CLAWDEX_SSH_TARGETS={"pi":{"host":"192.168.1.100","username":"pi","privateKeyPath":"~/.ssh/id_rsa"},"server":{"host":"myserver.com","username":"deploy","privateKeyPath":"~/.ssh/deploy_key"}}
34
+
35
+ # === Session ===
36
+ # Session storage directory (default: ~/.clawx/sessions)
37
+ # CLAWDEX_SESSION_DIR=~/.clawx/sessions
38
+
39
+ # === Reasoning ===
40
+ # Thinking level: off, minimal, low, medium, high (default: medium)
41
+ # CLAWDEX_THINKING_LEVEL=medium
42
+
43
+ # Max output tokens (default: 16384)
44
+ # CLAWDEX_MAX_TOKENS=16384
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Steven McSorley
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,489 @@
1
+ # Clawx
2
+
3
+ Lean coding/execution agent extracted from [OpenClaw](https://github.com/openclaw/openclaw) core.
4
+
5
+ Clawx is a terminal-first agent that can create files, write code, run commands, execute over SSH, and iterate until the job is done. It uses the model's own judgment to decide what to build and how.
6
+
7
+ ## What it does
8
+
9
+ - **Creates files** — the model decides what files to create and writes them
10
+ - **Modifies code** — precise search-and-replace edits in existing files
11
+ - **Runs shell commands** — installs deps, builds, tests, verifies
12
+ - **Executes over SSH** — scaffolds and manages remote services
13
+ - **Iterates** — reads command output, fixes errors, tries again
14
+ - **Streams output** — shows progress as the model works
15
+
16
+ ## What it doesn't do
17
+
18
+ - No chat platform integrations (Telegram, WhatsApp, Discord, etc.)
19
+ - No personality/identity systems
20
+ - No plugin marketplace
21
+ - No approval workflows (permissive by default)
22
+ - No gateway/daemon architecture
23
+ - No memory/embedding systems
24
+
25
+ ## Quick start
26
+
27
+ ```bash
28
+ # Clone and build from source
29
+ git clone https://github.com/stevenmcsorley/clawx.git
30
+ cd clawx
31
+ npm install
32
+ npm run build
33
+
34
+ # Configure (see Model Setup below)
35
+ cp .env.example .env
36
+
37
+ # Link the local build as a global command
38
+ npm link
39
+
40
+ # Now you can run from anywhere:
41
+ clawx
42
+
43
+ # Launch TUI with an initial prompt
44
+ clawx "Create a Flask app with auth and a SQLite database"
45
+
46
+ # Single-shot run (headless, exits when done)
47
+ clawx run "Create a hello world Express server"
48
+
49
+ # Basic readline REPL (fallback if TUI has issues)
50
+ clawx --basic
51
+
52
+ # Continue last session
53
+ clawx continue
54
+ ```
55
+
56
+ > **Note:** After `npm link`, the `clawx` command points to your local install.
57
+ > If you ever run `npm install` again, re-run `npm link` to restore the global link.
58
+
59
+ ## Model setup
60
+
61
+ Clawx requires a model that supports **structured tool calling** (returning `tool_calls` in the API response, not just text). This is critical — the agent loop depends on it.
62
+
63
+ ### Model compatibility and benchmarks
64
+
65
+ Tested on Windows 11, RTX 3060 12GB, 2026-03-15.
66
+
67
+ | Model | Provider | Tool calling | VRAM | Benchmark | Status |
68
+ |-------|----------|-------------|------|-----------|--------|
69
+ | **glm-4.7-flash:latest** | Ollama | Structured `tool_calls` | ~5 GB | 12 turns, 13 tool calls — write file + run python | **Recommended** |
70
+ | Qwen2.5-Coder-14B-abliterated Q4_K_M | Ollama | Text-only `<tool_call>` tags | ~9 GB | Tool loop never starts — model returns text, not structured calls | Not compatible |
71
+ | Qwen2.5-Coder-14B-abliterated Q4_K_M | llama-server `--jinja` | Text-only `<tool_call>` tags | ~9 GB | Same as above | Not compatible |
72
+ | GPT-4o / GPT-4-turbo | OpenAI API | Structured `tool_calls` | — | N/A (cloud) | Works |
73
+ | **DeepSeek-V3 (deepseek-chat)** | DeepSeek API | Structured `tool_calls` | — | N/A (cloud) | **Works, very cheap** |
74
+ | DeepSeek-R1 (deepseek-reasoner) | DeepSeek API | Structured `tool_calls` (via chat) | — | N/A (cloud) | Works |
75
+ | Claude 3.5+ | Anthropic API | Structured `tool_calls` | — | N/A (cloud) | Works |
76
+
77
+ **glm-4.7-flash benchmark detail:**
78
+ Task: "Create a file /tmp/hello.py that prints hello world and run it with python"
79
+ - Model correctly called `write` to create the file, then `bash` to run it
80
+ - Hit a Windows `/tmp` path resolution issue (Python resolved `/tmp` differently than Git Bash)
81
+ - Iterated: investigated with `ls`, `which python`, retried with `cat >` via bash
82
+ - Completed successfully after 12 turns and 13 tool calls
83
+
84
+ > **Why Qwen doesn't work:** The abliterated GGUF outputs tool calls as `<tool_call>` text in message content instead of structured `tool_calls` objects in the API response. pi-agent-core requires structured tool calls. This is a model-level issue, not a Clawx bug.
85
+
86
+ ### Option 1: GLM-4.7-Flash via Ollama (recommended for local)
87
+
88
+ Requires: [Ollama](https://ollama.com/) installed, ~5GB VRAM.
89
+
90
+ ```bash
91
+ # 1. Start the Ollama server (if not already running as a service)
92
+ ollama serve
93
+ # Ollama listens on http://localhost:11434 by default
94
+ # On Windows it often runs as a background service automatically
95
+
96
+ # 2. Pull the model (~5GB download)
97
+ ollama pull glm-4.7-flash:latest
98
+
99
+ # 3. Verify the model is available
100
+ ollama list
101
+ # NAME SIZE
102
+ # glm-4.7-flash:latest 5.2 GB
103
+
104
+ # 4. (Optional) Test the model is responding
105
+ ollama run glm-4.7-flash:latest "hello" --verbose
106
+
107
+ # 5. Configure .env
108
+ cat > .env << 'EOF'
109
+ CLAWDEX_PROVIDER=ollama
110
+ CLAWDEX_BASE_URL=http://localhost:11434/v1
111
+ CLAWDEX_MODEL=glm-4.7-flash:latest
112
+ CLAWDEX_API_KEY=not-needed
113
+ CLAWDEX_THINKING_LEVEL=off
114
+ CLAWDEX_MAX_TOKENS=8192
115
+ EOF
116
+
117
+ # 6. Run Clawx
118
+ clawx run "Create a Python script that prints the first 20 Fibonacci numbers"
119
+ ```
120
+
121
+ ### Option 2: Qwen2.5-Coder-14B via Ollama (import GGUF)
122
+
123
+ > **Warning:** This model does NOT produce structured tool calls. It is listed here for reference only. Tool-using agent tasks will fail. You can still use it for plain chat without tools.
124
+
125
+ If you have the GGUF file locally (e.g. `D:/model/Qwen2.5-Coder-14B-Abliterated/`):
126
+
127
+ ```bash
128
+ # 1. Make sure Ollama is running
129
+ ollama serve
130
+
131
+ # 2. Create a Modelfile pointing to your GGUF
132
+ # Example Modelfile content:
133
+ # FROM D:/model/Qwen2.5-Coder-14B-Abliterated/Qwen2.5-Coder-14B-Instruct-abliterated-Q4_K_M.gguf
134
+ # TEMPLATE "..." (with Qwen chat + tool call template)
135
+ # PARAMETER stop "<|im_end|>"
136
+ # PARAMETER stop "<|im_start|>"
137
+ # PARAMETER num_ctx 16384
138
+ # PARAMETER temperature 0.7
139
+
140
+ # 3. Import the GGUF into Ollama
141
+ cd D:/model/Qwen2.5-Coder-14B-Abliterated
142
+ ollama create qwen-coder-abliterated -f Modelfile
143
+ # This copies the GGUF into Ollama's blob store (~8.9GB)
144
+
145
+ # 4. Verify
146
+ ollama list
147
+ # NAME SIZE
148
+ # qwen-coder-abliterated:latest 8.9 GB
149
+
150
+ # 5. Test it responds (plain chat works fine)
151
+ ollama run qwen-coder-abliterated:latest "Write a Python quicksort"
152
+
153
+ # 6. .env for this model (tool calling won't work)
154
+ cat > .env << 'EOF'
155
+ CLAWDEX_PROVIDER=ollama
156
+ CLAWDEX_BASE_URL=http://localhost:11434/v1
157
+ CLAWDEX_MODEL=qwen-coder-abliterated:latest
158
+ CLAWDEX_API_KEY=not-needed
159
+ CLAWDEX_THINKING_LEVEL=off
160
+ CLAWDEX_MAX_TOKENS=8192
161
+ EOF
162
+ ```
163
+
164
+ ### Option 2b: Qwen2.5-Coder-14B via llama-server (llama.cpp)
165
+
166
+ > **Warning:** Same limitation — text-only tool calls, not compatible with Clawx agent loop.
167
+
168
+ If you have llama.cpp built locally (e.g. `D:/llama-cpp/`):
169
+
170
+ ```bash
171
+ # 1. Start llama-server with the GGUF (requires --jinja for tool template)
172
+ D:/llama-cpp/build/bin/Release/llama-server.exe \
173
+ --model D:/model/Qwen2.5-Coder-14B-Abliterated/Qwen2.5-Coder-14B-Instruct-abliterated-Q4_K_M.gguf \
174
+ --host 0.0.0.0 \
175
+ --port 8080 \
176
+ --n-gpu-layers 99 \
177
+ --ctx-size 16384 \
178
+ --jinja
179
+ # Server listens on http://localhost:8080
180
+
181
+ # 2. Verify it's running
182
+ curl http://localhost:8080/v1/models
183
+
184
+ # 3. .env for llama-server
185
+ cat > .env << 'EOF'
186
+ CLAWDEX_PROVIDER=local
187
+ CLAWDEX_BASE_URL=http://localhost:8080/v1
188
+ CLAWDEX_MODEL=qwen2.5-coder-14b-instruct
189
+ CLAWDEX_API_KEY=not-needed
190
+ CLAWDEX_THINKING_LEVEL=off
191
+ CLAWDEX_MAX_TOKENS=8192
192
+ EOF
193
+ ```
194
+
195
+ ### Option 3: DeepSeek API
196
+
197
+ DeepSeek is OpenAI-compatible with full structured tool calling support, including thinking mode.
198
+ Pricing: ~$0.27/1M input, $1.10/1M output (deepseek-chat). Very cost-effective.
199
+
200
+ ```bash
201
+ # 1. Get an API key at https://platform.deepseek.com/
202
+ # 2. Configure .env
203
+ cat > .env << 'EOF'
204
+ CLAWDEX_PROVIDER=deepseek
205
+ CLAWDEX_BASE_URL=https://api.deepseek.com/v1
206
+ CLAWDEX_MODEL=deepseek-chat
207
+ CLAWDEX_API_KEY=sk-your-deepseek-key-here
208
+ CLAWDEX_THINKING_LEVEL=off
209
+ CLAWDEX_MAX_TOKENS=16384
210
+ EOF
211
+
212
+ # For DeepSeek R1 reasoning model (tool calls route through deepseek-chat):
213
+ cat > .env << 'EOF'
214
+ CLAWDEX_PROVIDER=deepseek
215
+ CLAWDEX_BASE_URL=https://api.deepseek.com/v1
216
+ CLAWDEX_MODEL=deepseek-reasoner
217
+ CLAWDEX_API_KEY=sk-your-deepseek-key-here
218
+ CLAWDEX_THINKING_LEVEL=medium
219
+ CLAWDEX_MAX_TOKENS=16384
220
+ EOF
221
+
222
+ # Run
223
+ clawx run "Create a FastAPI app with SQLite and JWT auth"
224
+ ```
225
+
226
+ ### Option 4: OpenAI API
227
+
228
+ ```bash
229
+ cat > .env << 'EOF'
230
+ CLAWDEX_PROVIDER=openai
231
+ CLAWDEX_BASE_URL=https://api.openai.com/v1
232
+ CLAWDEX_MODEL=gpt-4o
233
+ CLAWDEX_API_KEY=sk-your-key-here
234
+ CLAWDEX_THINKING_LEVEL=off
235
+ CLAWDEX_MAX_TOKENS=16384
236
+ EOF
237
+ ```
238
+
239
+ ### Option 5: Anthropic API
240
+
241
+ ```bash
242
+ cat > .env << 'EOF'
243
+ CLAWDEX_PROVIDER=anthropic
244
+ CLAWDEX_BASE_URL=https://api.anthropic.com
245
+ CLAWDEX_MODEL=claude-sonnet-4-20250514
246
+ CLAWDEX_API_KEY=sk-ant-your-key-here
247
+ CLAWDEX_THINKING_LEVEL=medium
248
+ CLAWDEX_MAX_TOKENS=16384
249
+ EOF
250
+ ```
251
+
252
+ ### GPU / VRAM notes
253
+
254
+ - **RTX 3060 12GB**: Can run glm-4.7-flash (~5GB) or Qwen-14B Q4_K_M (~9GB), but not both simultaneously
255
+ - To free VRAM when switching models: `ollama stop glm-4.7-flash:latest` or `ollama stop qwen-coder-abliterated:latest`
256
+ - Ollama auto-loads models on first request and keeps them in VRAM until timeout or manual stop
257
+
258
+ ## Configuration reference
259
+
260
+ ### Environment variables
261
+
262
+ ```bash
263
+ CLAWDEX_PROVIDER=ollama # Provider type (see table below)
264
+ CLAWDEX_BASE_URL=http://localhost:11434/v1 # Endpoint URL
265
+ CLAWDEX_MODEL=glm-4.7-flash:latest # Model name
266
+ CLAWDEX_API_KEY=not-needed # API key (if required)
267
+ CLAWDEX_WORK_DIR=/path/to/project # Working directory
268
+ CLAWDEX_THINKING_LEVEL=off # off|minimal|low|medium|high
269
+ CLAWDEX_MAX_TOKENS=8192 # Max output tokens
270
+ CLAWDEX_EXEC_TIMEOUT=120000 # Tool execution timeout (ms)
271
+ ```
272
+
273
+ ### Supported providers
274
+
275
+ | Provider | CLAWDEX_PROVIDER | Notes |
276
+ |----------|-----------------|-------|
277
+ | Ollama | `ollama` | Recommended for local models |
278
+ | llama.cpp | `openai-completions` or `local` | OpenAI-compatible endpoint |
279
+ | vLLM | `vllm` | Maps to OpenAI-compatible |
280
+ | LM Studio | `lmstudio` | Maps to OpenAI-compatible |
281
+ | DeepSeek | `deepseek` | OpenAI-compatible, cheap, tool calling + thinking |
282
+ | OpenAI | `openai` | GPT-4o, etc. |
283
+ | Anthropic | `anthropic` | Claude models |
284
+ | Google | `google` | Gemini models |
285
+ | Mistral | `mistral` | Mistral models |
286
+
287
+ ### SSH targets
288
+
289
+ Define named SSH targets via environment or `clawx.json`:
290
+
291
+ ```bash
292
+ CLAWDEX_SSH_TARGETS='{"pi":{"host":"192.168.1.100","username":"pi","privateKeyPath":"~/.ssh/id_rsa"}}'
293
+ ```
294
+
295
+ Or in `clawx.json`:
296
+
297
+ ```json
298
+ {
299
+ "sshTargets": {
300
+ "pi": {
301
+ "host": "192.168.1.100",
302
+ "username": "pi",
303
+ "privateKeyPath": "~/.ssh/id_rsa"
304
+ },
305
+ "server": {
306
+ "host": "myserver.com",
307
+ "port": 2222,
308
+ "username": "deploy",
309
+ "privateKeyPath": "~/.ssh/deploy_key"
310
+ }
311
+ }
312
+ }
313
+ ```
314
+
315
+ ### Config file
316
+
317
+ Place a `clawx.json` in your working directory:
318
+
319
+ ```json
320
+ {
321
+ "provider": "openai-completions",
322
+ "baseUrl": "http://localhost:8080/v1",
323
+ "model": "qwen2.5-coder-14b-instruct",
324
+ "maxTokens": 16384,
325
+ "thinkingLevel": "medium",
326
+ "systemPrompt": "You specialize in Python backend services."
327
+ }
328
+ ```
329
+
330
+ ## CLI commands
331
+
332
+ ```
333
+ clawx [prompt] Launch TUI (default mode, rich terminal UI)
334
+ clawx --basic Launch basic readline REPL instead of TUI
335
+ clawx run <prompt> Run a task headless and exit
336
+ clawx chat Interactive basic REPL
337
+ clawx chat -c Resume last session in basic REPL
338
+ clawx continue Resume last session
339
+ clawx sessions List recent sessions
340
+ ```
341
+
342
+ ### Global options
343
+
344
+ ```
345
+ -m, --model <model> Override model
346
+ -p, --provider <provider> Override provider type
347
+ -u, --base-url <url> Override base URL
348
+ -d, --work-dir <dir> Working directory
349
+ -v, --verbose Debug logging
350
+ ```
351
+
352
+ ### TUI features (default mode)
353
+
354
+ The TUI mode uses pi-coding-agent's InteractiveMode:
355
+
356
+ - Syntax-highlighted code in tool results
357
+ - Diff rendering for edit operations
358
+ - Spinner animations during tool execution
359
+ - Ctrl+P to cycle models
360
+ - Ctrl+C to cancel current operation, Ctrl+D to quit
361
+ - Session branching and tree navigation
362
+ - Markdown rendering in responses
363
+ - /slash commands for settings, models, sessions
364
+
365
+ ### Basic REPL commands
366
+
367
+ ```
368
+ /clear Clear session history
369
+ /save Save session
370
+ /info Show session info
371
+ /exit Save and quit
372
+ ```
373
+
374
+ ## Tools available to the model
375
+
376
+ | Tool | Source | Description |
377
+ |------|--------|-------------|
378
+ | `read` | pi-coding-agent | Read file contents |
379
+ | `write` | pi-coding-agent | Create/overwrite files |
380
+ | `edit` | pi-coding-agent | Precise search-and-replace edits |
381
+ | `bash` | pi-coding-agent | Run shell commands |
382
+ | `grep` | pi-coding-agent | Search file contents with regex |
383
+ | `find` | pi-coding-agent | Find files by pattern |
384
+ | `ls` | pi-coding-agent | List directory contents |
385
+ | `search_files` | Clawx | Unified file content search (rg/grep) |
386
+ | `git_status` | Clawx | Git repository status |
387
+ | `git_diff` | Clawx | Git file differences |
388
+ | `ssh_run` | Clawx | Execute commands on SSH targets |
389
+
390
+ ## Architecture
391
+
392
+ ```
393
+ src/
394
+ cli/ CLI entry point and REPL
395
+ config/ Configuration loading (.env, JSON)
396
+ core/
397
+ agent.ts Agent orchestrator (wires pi-agent-core loop)
398
+ provider.ts Model/provider resolution for local endpoints
399
+ session.ts JSON-file session persistence
400
+ streaming.ts Terminal output renderer
401
+ tools/
402
+ sshRun.ts SSH execution (ssh2)
403
+ gitStatus.ts Git status wrapper
404
+ gitDiff.ts Git diff wrapper
405
+ searchFiles.ts File content search (rg/grep)
406
+ types/ TypeScript type definitions
407
+ utils/ Logger, system prompt builder
408
+ ```
409
+
410
+ ### Dependencies
411
+
412
+ - **@mariozechner/pi-agent-core** — Agent loop (user→model→tool→result→loop)
413
+ - **@mariozechner/pi-ai** — Provider abstraction, OpenAI-compatible streaming
414
+ - **@mariozechner/pi-coding-agent** — Coding tools (read, write, edit, bash, grep, find, ls)
415
+ - **ssh2** — SSH client for remote execution
416
+ - **commander** — CLI framework
417
+ - **chalk** — Terminal colors
418
+ - **dotenv** — Environment variable loading
419
+ - **zod** — Schema validation (available for extensions)
420
+
421
+ ## Example workflows
422
+
423
+ ### TUI mode (recommended)
424
+
425
+ ```bash
426
+ # Launch the full TUI — type prompts interactively
427
+ clawx
428
+
429
+ # Launch with an initial task
430
+ clawx "Create a Node.js Express API with JWT auth and SQLite"
431
+
432
+ # Use a specific model for this session
433
+ clawx -m glm-4.7-flash:latest "Build a REST API"
434
+ ```
435
+
436
+ ### Headless single-shot tasks
437
+
438
+ ```bash
439
+ # Create a project and exit
440
+ clawx run "Create a Python Flask app with login, SQLite, and unit tests"
441
+
442
+ # Generate a single file
443
+ clawx run "Create a Python script that prints the first 20 Fibonacci numbers"
444
+
445
+ # Work in a specific directory
446
+ clawx run -d ./my-project "Add a health check endpoint to the Express server"
447
+ ```
448
+
449
+ The agent will create files, install dependencies, build, and verify — iterating on errors until the task is complete.
450
+
451
+ ### Remote scaffolding via SSH
452
+
453
+ ```bash
454
+ # With SSH targets configured in .env or clawx.json
455
+ clawx run "SSH into my Pi and set up a Node.js service that monitors CPU temperature and exposes it as a Prometheus metric on port 9100"
456
+ ```
457
+
458
+ ### Interactive basic REPL
459
+
460
+ ```bash
461
+ # Basic REPL (if TUI doesn't suit your terminal)
462
+ clawx --basic
463
+
464
+ # REPL in a specific project directory
465
+ clawx chat -d ./my-project
466
+ ```
467
+
468
+ ## Programmatic usage
469
+
470
+ ```typescript
471
+ import { loadConfig, runAgent, createStreamRenderer } from "clawx";
472
+
473
+ const config = loadConfig({
474
+ provider: "openai-completions",
475
+ baseUrl: "http://localhost:8080/v1",
476
+ model: "qwen2.5-coder-14b-instruct",
477
+ });
478
+
479
+ const renderer = createStreamRenderer();
480
+ const result = await runAgent(config, {
481
+ prompt: "Create a hello world Express server",
482
+ onEvent: (event) => renderer.onEvent(event),
483
+ });
484
+ renderer.finish();
485
+ ```
486
+
487
+ ## License
488
+
489
+ MIT — extracted and adapted from [OpenClaw](https://github.com/openclaw/openclaw) (MIT).
@@ -0,0 +1,23 @@
1
+ {
2
+ "provider": "openai-completions",
3
+ "baseUrl": "http://localhost:8080/v1",
4
+ "model": "qwen2.5-coder-14b-instruct",
5
+ "apiKey": "not-needed",
6
+ "maxTokens": 16384,
7
+ "thinkingLevel": "medium",
8
+ "execTimeout": 120000,
9
+ "sshTargets": {
10
+ "pi": {
11
+ "host": "192.168.1.100",
12
+ "username": "pi",
13
+ "privateKeyPath": "~/.ssh/id_rsa"
14
+ },
15
+ "server": {
16
+ "host": "myserver.com",
17
+ "port": 2222,
18
+ "username": "deploy",
19
+ "privateKeyPath": "~/.ssh/deploy_key"
20
+ }
21
+ },
22
+ "systemPrompt": ""
23
+ }
@@ -0,0 +1,21 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * Clawx CLI — terminal-first coding/execution agent.
4
+ *
5
+ * EXTRACTION NOTE:
6
+ * OpenClaw's CLI entry (openclaw.mjs → entry.ts → cli/) handles:
7
+ * - 40+ CLI commands (agent, channel, config, daemon, gateway, install, ...)
8
+ * - Channel initialization and routing
9
+ * - Plugin discovery and loading
10
+ * - Gateway server startup
11
+ * - Daemon management
12
+ * - Update checking
13
+ *
14
+ * We DISCARD all of that. Our CLI has these modes:
15
+ * 1. `clawx` — default: TUI mode (rich terminal UI from pi-coding-agent)
16
+ * 2. `clawx run "prompt"` — single-shot: run a task and exit
17
+ * 3. `clawx chat` — basic readline REPL (fallback)
18
+ * 4. `clawx continue` — resume the last session
19
+ */
20
+ export {};
21
+ //# sourceMappingURL=main.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"main.d.ts","sourceRoot":"","sources":["../../src/cli/main.ts"],"names":[],"mappings":";AACA;;;;;;;;;;;;;;;;;GAiBG"}