llmcode-cli 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (212) hide show
  1. llm_code/__init__.py +2 -0
  2. llm_code/analysis/__init__.py +6 -0
  3. llm_code/analysis/cache.py +33 -0
  4. llm_code/analysis/engine.py +256 -0
  5. llm_code/analysis/go_rules.py +114 -0
  6. llm_code/analysis/js_rules.py +84 -0
  7. llm_code/analysis/python_rules.py +311 -0
  8. llm_code/analysis/rules.py +140 -0
  9. llm_code/analysis/rust_rules.py +108 -0
  10. llm_code/analysis/universal_rules.py +111 -0
  11. llm_code/api/__init__.py +0 -0
  12. llm_code/api/client.py +90 -0
  13. llm_code/api/errors.py +73 -0
  14. llm_code/api/openai_compat.py +390 -0
  15. llm_code/api/provider.py +35 -0
  16. llm_code/api/sse.py +52 -0
  17. llm_code/api/types.py +140 -0
  18. llm_code/cli/__init__.py +0 -0
  19. llm_code/cli/commands.py +70 -0
  20. llm_code/cli/image.py +122 -0
  21. llm_code/cli/render.py +214 -0
  22. llm_code/cli/status_line.py +79 -0
  23. llm_code/cli/streaming.py +92 -0
  24. llm_code/cli/tui_main.py +220 -0
  25. llm_code/computer_use/__init__.py +11 -0
  26. llm_code/computer_use/app_detect.py +49 -0
  27. llm_code/computer_use/app_tier.py +57 -0
  28. llm_code/computer_use/coordinator.py +99 -0
  29. llm_code/computer_use/input_control.py +71 -0
  30. llm_code/computer_use/screenshot.py +93 -0
  31. llm_code/cron/__init__.py +13 -0
  32. llm_code/cron/parser.py +145 -0
  33. llm_code/cron/scheduler.py +135 -0
  34. llm_code/cron/storage.py +126 -0
  35. llm_code/enterprise/__init__.py +1 -0
  36. llm_code/enterprise/audit.py +59 -0
  37. llm_code/enterprise/auth.py +26 -0
  38. llm_code/enterprise/oidc.py +95 -0
  39. llm_code/enterprise/rbac.py +65 -0
  40. llm_code/harness/__init__.py +5 -0
  41. llm_code/harness/config.py +33 -0
  42. llm_code/harness/engine.py +129 -0
  43. llm_code/harness/guides.py +41 -0
  44. llm_code/harness/sensors.py +68 -0
  45. llm_code/harness/templates.py +84 -0
  46. llm_code/hida/__init__.py +1 -0
  47. llm_code/hida/classifier.py +187 -0
  48. llm_code/hida/engine.py +49 -0
  49. llm_code/hida/profiles.py +95 -0
  50. llm_code/hida/types.py +28 -0
  51. llm_code/ide/__init__.py +1 -0
  52. llm_code/ide/bridge.py +80 -0
  53. llm_code/ide/detector.py +76 -0
  54. llm_code/ide/server.py +169 -0
  55. llm_code/logging.py +29 -0
  56. llm_code/lsp/__init__.py +0 -0
  57. llm_code/lsp/client.py +298 -0
  58. llm_code/lsp/detector.py +42 -0
  59. llm_code/lsp/manager.py +56 -0
  60. llm_code/lsp/tools.py +288 -0
  61. llm_code/marketplace/__init__.py +0 -0
  62. llm_code/marketplace/builtin_registry.py +102 -0
  63. llm_code/marketplace/installer.py +162 -0
  64. llm_code/marketplace/plugin.py +78 -0
  65. llm_code/marketplace/registry.py +360 -0
  66. llm_code/mcp/__init__.py +0 -0
  67. llm_code/mcp/bridge.py +87 -0
  68. llm_code/mcp/client.py +117 -0
  69. llm_code/mcp/health.py +120 -0
  70. llm_code/mcp/manager.py +214 -0
  71. llm_code/mcp/oauth.py +219 -0
  72. llm_code/mcp/transport.py +254 -0
  73. llm_code/mcp/types.py +53 -0
  74. llm_code/remote/__init__.py +0 -0
  75. llm_code/remote/client.py +136 -0
  76. llm_code/remote/protocol.py +22 -0
  77. llm_code/remote/server.py +275 -0
  78. llm_code/remote/ssh_proxy.py +56 -0
  79. llm_code/runtime/__init__.py +0 -0
  80. llm_code/runtime/auto_commit.py +56 -0
  81. llm_code/runtime/auto_diagnose.py +62 -0
  82. llm_code/runtime/checkpoint.py +70 -0
  83. llm_code/runtime/checkpoint_recovery.py +142 -0
  84. llm_code/runtime/compaction.py +35 -0
  85. llm_code/runtime/compressor.py +415 -0
  86. llm_code/runtime/config.py +533 -0
  87. llm_code/runtime/context.py +49 -0
  88. llm_code/runtime/conversation.py +921 -0
  89. llm_code/runtime/cost_tracker.py +126 -0
  90. llm_code/runtime/dream.py +127 -0
  91. llm_code/runtime/file_protection.py +150 -0
  92. llm_code/runtime/hardware.py +85 -0
  93. llm_code/runtime/hooks.py +223 -0
  94. llm_code/runtime/indexer.py +230 -0
  95. llm_code/runtime/knowledge_compiler.py +232 -0
  96. llm_code/runtime/memory.py +132 -0
  97. llm_code/runtime/memory_layers.py +467 -0
  98. llm_code/runtime/memory_lint.py +252 -0
  99. llm_code/runtime/model_aliases.py +37 -0
  100. llm_code/runtime/ollama.py +93 -0
  101. llm_code/runtime/overlay.py +124 -0
  102. llm_code/runtime/permissions.py +200 -0
  103. llm_code/runtime/plan.py +45 -0
  104. llm_code/runtime/prompt.py +238 -0
  105. llm_code/runtime/repo_map.py +174 -0
  106. llm_code/runtime/sandbox.py +116 -0
  107. llm_code/runtime/session.py +268 -0
  108. llm_code/runtime/skill_resolver.py +61 -0
  109. llm_code/runtime/skills.py +133 -0
  110. llm_code/runtime/speculative.py +75 -0
  111. llm_code/runtime/streaming_executor.py +216 -0
  112. llm_code/runtime/telemetry.py +196 -0
  113. llm_code/runtime/token_budget.py +26 -0
  114. llm_code/runtime/vcr.py +142 -0
  115. llm_code/runtime/vision.py +102 -0
  116. llm_code/swarm/__init__.py +1 -0
  117. llm_code/swarm/backend_subprocess.py +108 -0
  118. llm_code/swarm/backend_tmux.py +103 -0
  119. llm_code/swarm/backend_worktree.py +306 -0
  120. llm_code/swarm/checkpoint.py +74 -0
  121. llm_code/swarm/coordinator.py +236 -0
  122. llm_code/swarm/mailbox.py +88 -0
  123. llm_code/swarm/manager.py +202 -0
  124. llm_code/swarm/memory_sync.py +80 -0
  125. llm_code/swarm/recovery.py +21 -0
  126. llm_code/swarm/team.py +67 -0
  127. llm_code/swarm/types.py +31 -0
  128. llm_code/task/__init__.py +16 -0
  129. llm_code/task/diagnostics.py +93 -0
  130. llm_code/task/manager.py +162 -0
  131. llm_code/task/types.py +112 -0
  132. llm_code/task/verifier.py +104 -0
  133. llm_code/tools/__init__.py +0 -0
  134. llm_code/tools/agent.py +145 -0
  135. llm_code/tools/agent_roles.py +82 -0
  136. llm_code/tools/base.py +94 -0
  137. llm_code/tools/bash.py +565 -0
  138. llm_code/tools/computer_use_tools.py +278 -0
  139. llm_code/tools/coordinator_tool.py +75 -0
  140. llm_code/tools/cron_create.py +90 -0
  141. llm_code/tools/cron_delete.py +49 -0
  142. llm_code/tools/cron_list.py +51 -0
  143. llm_code/tools/deferred.py +92 -0
  144. llm_code/tools/dump.py +116 -0
  145. llm_code/tools/edit_file.py +282 -0
  146. llm_code/tools/git_tools.py +531 -0
  147. llm_code/tools/glob_search.py +112 -0
  148. llm_code/tools/grep_search.py +144 -0
  149. llm_code/tools/ide_diagnostics.py +59 -0
  150. llm_code/tools/ide_open.py +58 -0
  151. llm_code/tools/ide_selection.py +52 -0
  152. llm_code/tools/memory_tools.py +138 -0
  153. llm_code/tools/multi_edit.py +143 -0
  154. llm_code/tools/notebook_edit.py +107 -0
  155. llm_code/tools/notebook_read.py +81 -0
  156. llm_code/tools/parsing.py +63 -0
  157. llm_code/tools/read_file.py +154 -0
  158. llm_code/tools/registry.py +58 -0
  159. llm_code/tools/search_backends/__init__.py +56 -0
  160. llm_code/tools/search_backends/brave.py +56 -0
  161. llm_code/tools/search_backends/duckduckgo.py +129 -0
  162. llm_code/tools/search_backends/searxng.py +71 -0
  163. llm_code/tools/search_backends/tavily.py +73 -0
  164. llm_code/tools/swarm_create.py +109 -0
  165. llm_code/tools/swarm_delete.py +95 -0
  166. llm_code/tools/swarm_list.py +44 -0
  167. llm_code/tools/swarm_message.py +109 -0
  168. llm_code/tools/task_close.py +79 -0
  169. llm_code/tools/task_plan.py +79 -0
  170. llm_code/tools/task_verify.py +90 -0
  171. llm_code/tools/tool_search.py +65 -0
  172. llm_code/tools/web_common.py +258 -0
  173. llm_code/tools/web_fetch.py +223 -0
  174. llm_code/tools/web_search.py +280 -0
  175. llm_code/tools/write_file.py +118 -0
  176. llm_code/tui/__init__.py +1 -0
  177. llm_code/tui/app.py +2432 -0
  178. llm_code/tui/chat_view.py +82 -0
  179. llm_code/tui/chat_widgets.py +309 -0
  180. llm_code/tui/header_bar.py +46 -0
  181. llm_code/tui/input_bar.py +349 -0
  182. llm_code/tui/keybindings.py +142 -0
  183. llm_code/tui/marketplace.py +210 -0
  184. llm_code/tui/status_bar.py +72 -0
  185. llm_code/tui/theme.py +96 -0
  186. llm_code/utils/__init__.py +0 -0
  187. llm_code/utils/diff.py +111 -0
  188. llm_code/utils/errors.py +70 -0
  189. llm_code/utils/hyperlink.py +73 -0
  190. llm_code/utils/notebook.py +179 -0
  191. llm_code/utils/search.py +69 -0
  192. llm_code/utils/text_normalize.py +28 -0
  193. llm_code/utils/version_check.py +62 -0
  194. llm_code/vim/__init__.py +4 -0
  195. llm_code/vim/engine.py +51 -0
  196. llm_code/vim/motions.py +172 -0
  197. llm_code/vim/operators.py +183 -0
  198. llm_code/vim/text_objects.py +139 -0
  199. llm_code/vim/transitions.py +279 -0
  200. llm_code/vim/types.py +68 -0
  201. llm_code/voice/__init__.py +1 -0
  202. llm_code/voice/languages.py +43 -0
  203. llm_code/voice/recorder.py +136 -0
  204. llm_code/voice/stt.py +36 -0
  205. llm_code/voice/stt_anthropic.py +66 -0
  206. llm_code/voice/stt_google.py +32 -0
  207. llm_code/voice/stt_whisper.py +52 -0
  208. llmcode_cli-1.0.0.dist-info/METADATA +524 -0
  209. llmcode_cli-1.0.0.dist-info/RECORD +212 -0
  210. llmcode_cli-1.0.0.dist-info/WHEEL +4 -0
  211. llmcode_cli-1.0.0.dist-info/entry_points.txt +2 -0
  212. llmcode_cli-1.0.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,52 @@
1
+ """Whisper STT backend — POST to OpenAI-compatible /v1/audio/transcriptions."""
2
+ from __future__ import annotations
3
+
4
+ import struct
5
+
6
+ import httpx
7
+
8
+
9
+ class WhisperSTT:
10
+ """Transcribe audio via an OpenAI-compatible Whisper endpoint."""
11
+
12
+ def __init__(self, url: str = "http://localhost:8000/v1/audio/transcriptions"):
13
+ self._url = url
14
+
15
+ def transcribe(self, audio_bytes: bytes, language: str) -> str:
16
+ """Send PCM audio as WAV to the Whisper endpoint."""
17
+ wav_data = _pcm_to_wav(audio_bytes)
18
+ response = httpx.post(
19
+ self._url,
20
+ files={"file": ("audio.wav", wav_data, "audio/wav")},
21
+ data={"language": language, "response_format": "json"},
22
+ timeout=30.0,
23
+ )
24
+ response.raise_for_status()
25
+ return response.json().get("text", "")
26
+
27
+
28
+ def _pcm_to_wav(
29
+ pcm: bytes,
30
+ sample_rate: int = 16000,
31
+ channels: int = 1,
32
+ sample_width: int = 2,
33
+ ) -> bytes:
34
+ """Wrap raw PCM bytes in a WAV header."""
35
+ data_size = len(pcm)
36
+ header = struct.pack(
37
+ "<4sI4s4sIHHIIHH4sI",
38
+ b"RIFF",
39
+ 36 + data_size,
40
+ b"WAVE",
41
+ b"fmt ",
42
+ 16,
43
+ 1, # PCM
44
+ channels,
45
+ sample_rate,
46
+ sample_rate * channels * sample_width,
47
+ channels * sample_width,
48
+ sample_width * 8,
49
+ b"data",
50
+ data_size,
51
+ )
52
+ return header + pcm
@@ -0,0 +1,524 @@
1
+ Metadata-Version: 2.4
2
+ Name: llmcode-cli
3
+ Version: 1.0.0
4
+ Summary: Open-source AI agent runtime for any LLM — production-grade coding agent with multi-layer memory, multi-agent orchestration, and defense-in-depth security
5
+ Project-URL: Homepage, https://github.com/DJFeu/llmcode
6
+ Project-URL: Repository, https://github.com/DJFeu/llmcode
7
+ Project-URL: Issues, https://github.com/DJFeu/llmcode/issues
8
+ Author: Adam Hong
9
+ License: MIT
10
+ License-File: LICENSE
11
+ Classifier: Development Status :: 5 - Production/Stable
12
+ Classifier: Environment :: Console
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Topic :: Software Development :: Libraries
19
+ Requires-Python: >=3.11
20
+ Requires-Dist: click>=8.0
21
+ Requires-Dist: filelock>=3.12
22
+ Requires-Dist: httpx[http2]>=0.27
23
+ Requires-Dist: prompt-toolkit>=3.0
24
+ Requires-Dist: pydantic>=2.0
25
+ Requires-Dist: rich>=13.0
26
+ Requires-Dist: textual>=1.0
27
+ Provides-Extra: anthropic
28
+ Requires-Dist: anthropic>=0.40; extra == 'anthropic'
29
+ Provides-Extra: computer-use
30
+ Requires-Dist: mss>=9.0; extra == 'computer-use'
31
+ Requires-Dist: pillow>=10.0; extra == 'computer-use'
32
+ Requires-Dist: pyautogui>=0.9; extra == 'computer-use'
33
+ Provides-Extra: dev
34
+ Requires-Dist: pytest-asyncio>=0.24; extra == 'dev'
35
+ Requires-Dist: pytest-cov>=5.0; extra == 'dev'
36
+ Requires-Dist: pytest>=8.0; extra == 'dev'
37
+ Requires-Dist: respx>=0.22; extra == 'dev'
38
+ Requires-Dist: ruff>=0.5; extra == 'dev'
39
+ Provides-Extra: docs
40
+ Requires-Dist: mkdocs-material>=9.5; extra == 'docs'
41
+ Requires-Dist: mkdocs>=1.6; extra == 'docs'
42
+ Provides-Extra: ide
43
+ Requires-Dist: psutil>=5.9; extra == 'ide'
44
+ Provides-Extra: telemetry
45
+ Requires-Dist: opentelemetry-api>=1.20; extra == 'telemetry'
46
+ Requires-Dist: opentelemetry-exporter-otlp>=1.20; extra == 'telemetry'
47
+ Requires-Dist: opentelemetry-sdk>=1.20; extra == 'telemetry'
48
+ Provides-Extra: voice
49
+ Requires-Dist: sounddevice>=0.5; extra == 'voice'
50
+ Provides-Extra: voice-google
51
+ Requires-Dist: google-cloud-speech>=2.0; extra == 'voice-google'
52
+ Provides-Extra: web
53
+ Requires-Dist: html2text>=2024.2; extra == 'web'
54
+ Requires-Dist: readability-lxml>=0.8; extra == 'web'
55
+ Provides-Extra: web-browser
56
+ Requires-Dist: html2text>=2024.2; extra == 'web-browser'
57
+ Requires-Dist: playwright>=1.40; extra == 'web-browser'
58
+ Requires-Dist: readability-lxml>=0.8; extra == 'web-browser'
59
+ Provides-Extra: websocket
60
+ Requires-Dist: websockets>=12.0; extra == 'websocket'
61
+ Description-Content-Type: text/markdown
62
+
63
+ # llm-code
64
+
65
+ <p align="center">
66
+ <strong>Open-source AI agent runtime for any LLM</strong><br>
67
+ Production-grade coding agent with Claude Code-level architecture — your model, your hardware, zero vendor lock-in
68
+ </p>
69
+
70
+ <p align="center">
71
+ <a href="#quick-start">Quick Start</a> ·
72
+ <a href="#why-llm-code">Why llm-code</a> ·
73
+ <a href="#features">Features</a> ·
74
+ <a href="#marketplace">Marketplace</a> ·
75
+ <a href="#configuration">Configuration</a> ·
76
+ <a href="#architecture">Architecture</a> ·
77
+ <a href="#contributing">Contributing</a>
78
+ </p>
79
+
80
+ <p align="center">
81
+ <img src="https://img.shields.io/badge/python-3.11+-blue" alt="Python 3.11+">
82
+ <img src="https://img.shields.io/badge/tests-2861%20passing-brightgreen" alt="Tests">
83
+ <img src="https://img.shields.io/badge/license-MIT-green" alt="MIT License">
84
+ </p>
85
+
86
+ ---
87
+
88
+ ## Why llm-code?
89
+
90
+ Most AI coding tools lock you into a single provider. **llm-code doesn't.**
91
+
92
+ Run the same agent experience with a free local model on your own GPU, or with any cloud API. Switch between them with one config change. No API key required for local models.
93
+
94
+ ```
95
+ ██╗ ██╗ ███╗ ███╗
96
+ ██║ ██║ ████╗ ████║
97
+ ██║ ██║ ██╔████╔██║
98
+ ██║ ██║ ██║╚██╔╝██║
99
+ ███████╗ ███████╗ ██║ ╚═╝ ██║
100
+ ╚══════╝ ╚══════╝ ╚═╝ ╚═╝
101
+ ██████╗ ██████╗ ██████╗ ███████╗
102
+ ██╔════╝ ██╔═══██╗ ██╔══██╗ ██╔════╝
103
+ ██║ ██║ ██║ ██║ ██║ █████╗
104
+ ██║ ██║ ██║ ██║ ██║ ██╔══╝
105
+ ╚██████╗ ╚██████╔╝ ██████╔╝ ███████╗
106
+ ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝
107
+ ```
108
+
109
+ **Not just a CLI tool** — a complete AI Agent Runtime with:
110
+
111
+ - **ReAct engine** with 5-stage turn loop and streaming tool execution
112
+ - **7-layer error recovery** that self-heals instead of crashing
113
+ - **5-layer memory system** with governance, working, project, task, and summary memory
114
+ - **Multi-agent orchestration** with coordinator pattern and inter-agent messaging
115
+ - **Defense-in-depth security** with 21-point bash checks and sensitive file protection
116
+
117
+ ## Quick Start
118
+
119
+ ```bash
120
+ pip install llm-code
121
+ ```
122
+
123
+ **With a local model (zero cost):**
124
+
125
+ ```bash
126
+ mkdir -p ~/.llm-code
127
+ cat > ~/.llm-code/config.json << 'EOF'
128
+ {
129
+ "model": "qwen3.5",
130
+ "provider": {
131
+ "base_url": "http://localhost:8000/v1"
132
+ }
133
+ }
134
+ EOF
135
+
136
+ llm-code
137
+ ```
138
+
139
+ **With a cloud API:**
140
+
141
+ ```bash
142
+ cat > ~/.llm-code/config.json << 'EOF'
143
+ {
144
+ "model": "gpt-4o",
145
+ "provider": {
146
+ "base_url": "https://api.openai.com/v1",
147
+ "api_key_env": "OPENAI_API_KEY"
148
+ }
149
+ }
150
+ EOF
151
+
152
+ llm-code
153
+ ```
154
+
155
+ ### Modes
156
+
157
+ ```bash
158
+ llm-code # Default: Fullscreen TUI (Python Textual)
159
+ llm-code --provider ollama # Auto-detect Ollama + interactive model selector
160
+ llm-code --serve --port 8765 # Remote WebSocket server
161
+ llm-code --connect host:8765 # Connect to remote agent
162
+ llm-code --ssh user@host # SSH tunnel + auto-connect
163
+ llm-code --replay <file> # Replay a recorded session
164
+ llm-code --resume # Resume from checkpoint
165
+ ```
166
+
167
+ ### Optional Features
168
+
169
+ ```bash
170
+ pip install llm-code[voice] # Voice input via STT
171
+ pip install llm-code[computer-use] # GUI automation
172
+ pip install llm-code[ide] # IDE integration
173
+ pip install llm-code[telemetry] # OpenTelemetry tracing
174
+ ```
175
+
176
+ ---
177
+
178
+ ## Features
179
+
180
+ ### Model Freedom
181
+
182
+ | Provider | Examples | Cost |
183
+ |----------|----------|------|
184
+ | **Local (vLLM)** | Qwen 3.5, Llama, Mistral, DeepSeek | Free |
185
+ | **Local (Ollama)** | Any GGUF model | Free |
186
+ | **Local (LM Studio)** | Any supported model | Free |
187
+ | **OpenAI** | GPT-4o, GPT-4o-mini, o3 | Pay-per-use |
188
+ | **Anthropic** | Claude Opus, Sonnet, Haiku | Pay-per-use |
189
+ | **Google** | Gemini 2.5 Pro, Gemini 2.5 Flash | Pay-per-use |
190
+ | **xAI** | Grok | Pay-per-use |
191
+ | **DeepSeek** | DeepSeek V3, R1 | Pay-per-use |
192
+
193
+ - **Model aliases** — `qwen`, `gpt`, `opus`, `sonnet` resolve to full model paths
194
+ - **Model routing** — different models for sub-agents, compaction, and fallback
195
+ - **Local models get unlimited token output** — no artificial cap on localhost
196
+
197
+ ### Agent Runtime Engine
198
+
199
+ The core loop follows a 5-stage **ReAct** (Reason + Act) pattern:
200
+
201
+ 1. **Context preparation** — compress history, load relevant memory, apply HIDA filtering
202
+ 2. **Streaming model call** — send conversation + tools, stream response in real-time
203
+ 3. **Tool execution** — read-only tools run concurrently during streaming; writes wait
204
+ 4. **Attachment collection** — gather file changes, task state, memory updates
205
+ 5. **Continue or stop** — loop back if tools were called, stop if model is done
206
+
207
+ **Resilience features:**
208
+
209
+ - **7-layer error recovery** — API retry with exponential backoff, 529 overload handling (30/60/120s), native-to-XML tool fallback, reactive context compression, token limit auto-upgrade, context drain, model fallback after 3 consecutive failures
210
+ - **Speculative execution** — writes pre-execute in a tmpdir overlay before user confirms; confirm copies back, deny discards
211
+ - **4-level context compression** — snip (truncate tool results), microcompact (deduplicate reads), autocompact (AI summary), reactive (emergency on 413)
212
+ - **Cache-aware compression** — preferentially removes non-API-cached messages to preserve cache hits
213
+ - **3-tier prompt cache** — global/project/session scope boundaries for optimal API cache utilization
214
+ - **HIDA dynamic loading** — classifies input into 10 task types, loads only relevant tools/memory/governance rules
215
+
216
+ ### Tools
217
+
218
+ Built-in tools with smart permission classification:
219
+
220
+ | Category | Tools |
221
+ |----------|-------|
222
+ | **File I/O** | read_file, write_file, edit_file (with fuzzy quote matching + mtime conflict detection) |
223
+ | **Search** | glob_search, grep_search, tool_search (deferred tool discovery) |
224
+ | **Execution** | bash (21-point security), agent (sub-agents) |
225
+ | **Git** | git_status, git_diff, git_log, git_commit, git_push, git_stash, git_branch |
226
+ | **Notebook** | notebook_read, notebook_edit (Jupyter .ipynb) |
227
+ | **Computer Use** | screenshot, mouse_click, keyboard_type, key_press, scroll, mouse_drag |
228
+ | **Task Lifecycle** | task_plan, task_verify, task_close |
229
+ | **Scheduling** | cron_create, cron_list, cron_delete |
230
+ | **IDE** | ide_open, ide_diagnostics, ide_selection |
231
+ | **Swarm** | swarm_create, swarm_list, swarm_message, swarm_delete, coordinate |
232
+ | **Memory** | LSP, memory tools |
233
+
234
+ When tool count exceeds 20, non-core tools are deferred and discoverable via `tool_search`.
235
+
236
+ ### Multi-Agent Collaboration
237
+
238
+ ```bash
239
+ /swarm create coder "Implement the login API"
240
+ /swarm create tester "Write tests for the login API"
241
+ /swarm create reviewer "Review the login implementation"
242
+ /swarm coordinate "Build a complete user auth system"
243
+ ```
244
+
245
+ - **Coordinator** auto-decomposes complex tasks into subtasks and dispatches to workers
246
+ - **tmux backend** — each agent in its own terminal pane (subprocess fallback for non-tmux)
247
+ - **Mailbox** — file-based JSONL message passing between agents
248
+ - **Shared memory** — all agents access the same project memory with file locking
249
+ - **Built-in roles** — `coder`, `reviewer`, `researcher`, `tester`, or define custom roles
250
+
251
+ ### Security
252
+
253
+ **21-point Bash security:**
254
+
255
+ Injection detection, newline attack prevention, pipe chain limits, interpreter REPL blacklist, environment variable leak protection, network access control, file permission change detection, system package operation alerts, redirect overwrite detection, credential path protection, background execution detection, recursive operation warnings, multi-command chain limits, and Zsh dangerous builtin blocking.
256
+
257
+ **File protection:** Sensitive files (`.env`, SSH keys, `credentials.*`, `*.pem`) are blocked on write and warned on read.
258
+
259
+ **Sandbox detection:** Auto-detects Docker/container environments and restricts paths.
260
+
261
+ **Permission system:** 5 modes (read_only → auto_accept) with allow/deny lists, shadowed rule detection, and input-aware classification (`ls` auto-approved, `rm -rf` needs confirmation).
262
+
263
+ ### Memory System
264
+
265
+ | Layer | Scope | Lifetime | Purpose |
266
+ |-------|-------|----------|---------|
267
+ | **L0 Governance** | Project | Permanent | Rules from CLAUDE.md + .llm-code/rules/ — always loaded |
268
+ | **L1 Working** | Session | Ephemeral | In-memory scratch space for current task |
269
+ | **L2 Project** | Project | Long-term | DreamTask-consolidated knowledge with tag-based queries |
270
+ | **L3 Task** | Cross-session | Until done | PLAN/DO/VERIFY/CLOSE state machine persisted as JSON |
271
+ | **L4 Summary** | Per-session | Long-term | Conversation summaries for future reference |
272
+
273
+ **DreamTask:** On session exit, automatically consolidates conversation into structured long-term memory — files modified, decisions made, patterns learned.
274
+
275
+ **Checkpoint recovery:** Auto-saves every 60 seconds. Resume with `--resume` or `/checkpoint resume`.
276
+
277
+ ### Task Lifecycle
278
+
279
+ ```
280
+ PLAN --> DO --> VERIFY --> CLOSE --> DONE
281
+ |
282
+ [auto checks]
283
+ pass --> CLOSE
284
+ fail --> diagnostics
285
+ |-- continue (minor fix)
286
+ |-- replan (redo PLAN)
287
+ |-- escalate (ask user)
288
+ ```
289
+
290
+ - **VERIFY** runs automated checks: pytest, ruff, file existence — then LLM judges
291
+ - **Cross-session:** incomplete tasks persist and resume in the next session
292
+ - **CLOSE** writes summaries to L3 task memory and L2 project memory
293
+
294
+ ### Terminal UI
295
+
296
+ - **Fullscreen TUI** (default) — Python Textual, no Node.js required, Claude Code-style UI
297
+ - Welcome banner, markdown rendering, syntax-highlighted code blocks
298
+ - Slash command autocomplete dropdown with `Tab`/arrow navigation
299
+ - Inline `[image]` markers with `Cmd+V` paste support
300
+ - Interactive marketplace browser for skills, plugins, and MCP servers
301
+ - Tabbed `/help` modal (general / commands / custom-commands)
302
+ - ToolBlock diff view with colored +/- lines and line numbers
303
+ - Spinner with orange→red color transition on long operations
304
+ - Permission prompts with single-key y/n/a
305
+ - Cursor movement (←→, Home/End) in input bar
306
+ - **Vim mode** — full motions (hjkl, w/b/e, 0/$, gg/G, f/F/t/T), operators (d/c/y), text objects (iw, i", i()
307
+ - **Diff visualization** — colored inline diffs on every file change
308
+ - **Search** — `/search` or Ctrl+F with match highlighting
309
+ - **OSC8 hyperlinks** — clickable URLs in supporting terminals
310
+ - **Voice input** — hold-to-talk STT (Whisper, Google, Anthropic backends)
311
+ - **Extended thinking** — collapsible thinking panel with adaptive/enabled/disabled modes
312
+
313
+ ### Hook System
314
+
315
+ 6 event categories, 24 events, glob pattern matching:
316
+
317
+ | Category | Events |
318
+ |----------|--------|
319
+ | **tool** | pre_tool_use, post_tool_use, tool_error, tool_denied |
320
+ | **command** | pre_command, post_command, command_error |
321
+ | **prompt** | prompt_submit, prompt_compile, prompt_cache_hit, prompt_cache_miss |
322
+ | **agent** | agent_spawn, agent_complete, agent_error, agent_message |
323
+ | **session** | session_start, session_end, session_save, session_compact, session_dream |
324
+ | **http** | http_request, http_response, http_error, http_retry, http_fallback |
325
+
326
+ ```json
327
+ {
328
+ "hooks": [
329
+ {"event": "post_tool_use", "tool_pattern": "write_file|edit_file", "command": "ruff format {path}"},
330
+ {"event": "session.*", "command": "echo $HOOK_EVENT >> ~/agent.log", "on_error": "ignore"}
331
+ ]
332
+ }
333
+ ```
334
+
335
+ ### IDE Integration
336
+
337
+ llm-code runs a WebSocket JSON-RPC server that any IDE can connect to:
338
+
339
+ - **Open files** at specific lines in your editor
340
+ - **Read diagnostics** (lint errors, type errors) from the IDE
341
+ - **Get selection** — the agent can read your currently selected code
342
+ - **Auto-detection** — scans for running VSCode, JetBrains, Neovim, Sublime
343
+
344
+ ### Observability
345
+
346
+ - **OpenTelemetry** — spans for turns and tool executions with LLM semantic conventions
347
+ - **VCR recording** — structured JSONL event streams for debugging and replay
348
+ - **Cost tracking** — per-model pricing with cache-aware calculations and budget enforcement
349
+ - **Version check** — notifies on startup if a newer release is available
350
+
351
+ ---
352
+
353
+ ## Marketplace
354
+
355
+ Compatible with Claude Code's plugin ecosystem — skills, plugins, and MCP servers work out of the box.
356
+
357
+ ### Skills — `/skill`
358
+
359
+ ```
360
+ > brainstorming (installed)
361
+ test-driven-development (installed)
362
+ code-review-fix [ClawHub]
363
+ security-check [npm]
364
+ ```
365
+
366
+ Sources: **ClawHub.ai**, **npm**, **local plugins**
367
+
368
+ ### Plugins — `/plugin`
369
+
370
+ ```bash
371
+ /plugin install obra/superpowers
372
+ ```
373
+
374
+ Sources: **Official** (Claude Code), **ClawHub**, **npm**, **GitHub**
375
+
376
+ ### MCP Servers — `/mcp`
377
+
378
+ ```json
379
+ {
380
+ "mcpServers": {
381
+ "github": {
382
+ "command": "npx",
383
+ "args": ["-y", "@modelcontextprotocol/server-github"],
384
+ "env": {"GITHUB_TOKEN": "ghp_xxx"}
385
+ }
386
+ }
387
+ }
388
+ ```
389
+
390
+ Supports **stdio**, **HTTP**, **SSE**, and **WebSocket** transports with health monitoring and auto-reconnection.
391
+
392
+ ---
393
+
394
+ ## Configuration
395
+
396
+ ### Config Locations (precedence low -> high)
397
+
398
+ 1. `~/.llm-code/config.json` — User global
399
+ 2. `.llm-code/config.json` — Project
400
+ 3. `.llm-code/config.local.json` — Local (gitignored)
401
+ 4. CLI flags / env vars — Highest
402
+
403
+ ### Example Config
404
+
405
+ ```json
406
+ {
407
+ "model": "qwen3.5-122b",
408
+ "model_aliases": {
409
+ "qwen": "/models/Qwen3.5-122B-A10B-int4-AutoRound",
410
+ "fast": "qwen3.5-7b",
411
+ "gpt": "gpt-4o"
412
+ },
413
+ "provider": {
414
+ "base_url": "http://localhost:8000/v1",
415
+ "api_key_env": "LLM_API_KEY",
416
+ "timeout": 120
417
+ },
418
+ "permissions": {
419
+ "mode": "prompt",
420
+ "allow_tools": ["read_file", "glob_search", "grep_search"]
421
+ },
422
+ "model_routing": {
423
+ "sub_agent": "qwen3.5-32b",
424
+ "compaction": "qwen3.5-7b",
425
+ "fallback": "qwen3.5-7b"
426
+ },
427
+ "max_budget_usd": 5.00,
428
+ "thinking": { "mode": "adaptive", "budget_tokens": 10000 },
429
+ "dream": { "enabled": true, "min_turns": 3 },
430
+ "hida": { "enabled": true },
431
+ "hooks": [
432
+ {"event": "post_tool_use", "tool_pattern": "write_*|edit_*", "command": "ruff format {path}"}
433
+ ],
434
+ "mcpServers": {}
435
+ }
436
+ ```
437
+
438
+ ### Commands
439
+
440
+ | Command | Description |
441
+ |---------|-------------|
442
+ | `/help` | Show all commands |
443
+ | `/model <name>` | Switch model |
444
+ | `/config` | View/set runtime configuration |
445
+ | `/session` | Session management |
446
+ | `/skill` | Browse & install skills |
447
+ | `/plugin` | Browse & install plugins |
448
+ | `/mcp` | Browse & install MCP servers |
449
+ | `/memory` | View project memory |
450
+ | `/memory consolidate` | Run DreamTask now |
451
+ | `/memory history` | View consolidation history |
452
+ | `/task` | Task lifecycle (new/verify/close) |
453
+ | `/swarm` | Multi-agent (create/coordinate/stop) |
454
+ | `/search <query>` | Search conversation history |
455
+ | `/thinking` | Toggle thinking mode |
456
+ | `/vim` | Toggle vim keybindings |
457
+ | `/voice` | Toggle voice input |
458
+ | `/image` | Paste/load an image |
459
+ | `/cron` | Scheduled tasks |
460
+ | `/vcr` | Session recording |
461
+ | `/checkpoint` | Session checkpoints |
462
+ | `/ide` | IDE connection status |
463
+ | `/lsp` | Language Server Protocol status |
464
+ | `/index` | Codebase indexing |
465
+ | `/hida` | HIDA classification info |
466
+ | `/cd <path>` | Change working directory |
467
+ | `/undo` | Undo last file change |
468
+ | `/cancel` | Cancel running operation |
469
+ | `/cost` | Token usage + cost |
470
+ | `/budget <n>` | Set token budget |
471
+ | `/clear` | Clear conversation |
472
+ | `/exit`, `/quit` | Quit |
473
+
474
+ ---
475
+
476
+ ## Architecture
477
+
478
+ ```
479
+ llm_code/ 21,000 lines Python
480
+ ├── api/ Provider abstraction (OpenAI-compat + Anthropic)
481
+ ├── cli/ CLI entry point + Textual TUI launcher
482
+ ├── runtime/ ReAct engine, memory layers, compression, hooks,
483
+ │ permissions, checkpoint, dream, VCR, speculative
484
+ │ execution, telemetry, file protection, sandbox
485
+ ├── tools/ 30+ tools with deferred loading + security
486
+ ├── task/ PLAN/DO/VERIFY/CLOSE state machine
487
+ ├── hida/ Dynamic context loading (10-type classifier)
488
+ ├── mcp/ MCP client (4 transports) + OAuth + health checks
489
+ ├── marketplace/ Plugin system + ClawHub integration
490
+ ├── lsp/ Language Server Protocol client
491
+ ├── remote/ WebSocket server/client + SSH proxy
492
+ ├── vim/ Vim engine (motions, operators, text objects)
493
+ ├── voice/ STT (Whisper, Google, Anthropic backends)
494
+ ├── computer_use/ GUI automation (screenshot + input control)
495
+ ├── cron/ Task scheduler (cron parser + async poller)
496
+ ├── ide/ IDE bridge (WebSocket JSON-RPC server)
497
+ ├── swarm/ Multi-agent (coordinator, tmux/subprocess, mailbox)
498
+ ├── utils/ Notebook, diff, hyperlinks, search, text normalize
499
+ tests/ 2,861 tests across 170+ test files
500
+ ```
501
+
502
+ ---
503
+
504
+ ## Contributing
505
+
506
+ ```bash
507
+ git clone https://github.com/DJFeu/llm-code
508
+ cd llm-code
509
+ python -m venv .venv && source .venv/bin/activate
510
+ pip install -e ".[dev]"
511
+ pytest # 2,861 tests
512
+ ruff check llm_code/ # lint
513
+ ```
514
+
515
+ ### Requirements
516
+
517
+ - Python 3.11+
518
+ - An LLM server (vLLM, Ollama, LM Studio, or cloud API)
519
+
520
+ ---
521
+
522
+ ## License
523
+
524
+ MIT