llmcode-cli 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (212) hide show
  1. llm_code/__init__.py +2 -0
  2. llm_code/analysis/__init__.py +6 -0
  3. llm_code/analysis/cache.py +33 -0
  4. llm_code/analysis/engine.py +256 -0
  5. llm_code/analysis/go_rules.py +114 -0
  6. llm_code/analysis/js_rules.py +84 -0
  7. llm_code/analysis/python_rules.py +311 -0
  8. llm_code/analysis/rules.py +140 -0
  9. llm_code/analysis/rust_rules.py +108 -0
  10. llm_code/analysis/universal_rules.py +111 -0
  11. llm_code/api/__init__.py +0 -0
  12. llm_code/api/client.py +90 -0
  13. llm_code/api/errors.py +73 -0
  14. llm_code/api/openai_compat.py +390 -0
  15. llm_code/api/provider.py +35 -0
  16. llm_code/api/sse.py +52 -0
  17. llm_code/api/types.py +140 -0
  18. llm_code/cli/__init__.py +0 -0
  19. llm_code/cli/commands.py +70 -0
  20. llm_code/cli/image.py +122 -0
  21. llm_code/cli/render.py +214 -0
  22. llm_code/cli/status_line.py +79 -0
  23. llm_code/cli/streaming.py +92 -0
  24. llm_code/cli/tui_main.py +220 -0
  25. llm_code/computer_use/__init__.py +11 -0
  26. llm_code/computer_use/app_detect.py +49 -0
  27. llm_code/computer_use/app_tier.py +57 -0
  28. llm_code/computer_use/coordinator.py +99 -0
  29. llm_code/computer_use/input_control.py +71 -0
  30. llm_code/computer_use/screenshot.py +93 -0
  31. llm_code/cron/__init__.py +13 -0
  32. llm_code/cron/parser.py +145 -0
  33. llm_code/cron/scheduler.py +135 -0
  34. llm_code/cron/storage.py +126 -0
  35. llm_code/enterprise/__init__.py +1 -0
  36. llm_code/enterprise/audit.py +59 -0
  37. llm_code/enterprise/auth.py +26 -0
  38. llm_code/enterprise/oidc.py +95 -0
  39. llm_code/enterprise/rbac.py +65 -0
  40. llm_code/harness/__init__.py +5 -0
  41. llm_code/harness/config.py +33 -0
  42. llm_code/harness/engine.py +129 -0
  43. llm_code/harness/guides.py +41 -0
  44. llm_code/harness/sensors.py +68 -0
  45. llm_code/harness/templates.py +84 -0
  46. llm_code/hida/__init__.py +1 -0
  47. llm_code/hida/classifier.py +187 -0
  48. llm_code/hida/engine.py +49 -0
  49. llm_code/hida/profiles.py +95 -0
  50. llm_code/hida/types.py +28 -0
  51. llm_code/ide/__init__.py +1 -0
  52. llm_code/ide/bridge.py +80 -0
  53. llm_code/ide/detector.py +76 -0
  54. llm_code/ide/server.py +169 -0
  55. llm_code/logging.py +29 -0
  56. llm_code/lsp/__init__.py +0 -0
  57. llm_code/lsp/client.py +298 -0
  58. llm_code/lsp/detector.py +42 -0
  59. llm_code/lsp/manager.py +56 -0
  60. llm_code/lsp/tools.py +288 -0
  61. llm_code/marketplace/__init__.py +0 -0
  62. llm_code/marketplace/builtin_registry.py +102 -0
  63. llm_code/marketplace/installer.py +162 -0
  64. llm_code/marketplace/plugin.py +78 -0
  65. llm_code/marketplace/registry.py +360 -0
  66. llm_code/mcp/__init__.py +0 -0
  67. llm_code/mcp/bridge.py +87 -0
  68. llm_code/mcp/client.py +117 -0
  69. llm_code/mcp/health.py +120 -0
  70. llm_code/mcp/manager.py +214 -0
  71. llm_code/mcp/oauth.py +219 -0
  72. llm_code/mcp/transport.py +254 -0
  73. llm_code/mcp/types.py +53 -0
  74. llm_code/remote/__init__.py +0 -0
  75. llm_code/remote/client.py +136 -0
  76. llm_code/remote/protocol.py +22 -0
  77. llm_code/remote/server.py +275 -0
  78. llm_code/remote/ssh_proxy.py +56 -0
  79. llm_code/runtime/__init__.py +0 -0
  80. llm_code/runtime/auto_commit.py +56 -0
  81. llm_code/runtime/auto_diagnose.py +62 -0
  82. llm_code/runtime/checkpoint.py +70 -0
  83. llm_code/runtime/checkpoint_recovery.py +142 -0
  84. llm_code/runtime/compaction.py +35 -0
  85. llm_code/runtime/compressor.py +415 -0
  86. llm_code/runtime/config.py +533 -0
  87. llm_code/runtime/context.py +49 -0
  88. llm_code/runtime/conversation.py +921 -0
  89. llm_code/runtime/cost_tracker.py +126 -0
  90. llm_code/runtime/dream.py +127 -0
  91. llm_code/runtime/file_protection.py +150 -0
  92. llm_code/runtime/hardware.py +85 -0
  93. llm_code/runtime/hooks.py +223 -0
  94. llm_code/runtime/indexer.py +230 -0
  95. llm_code/runtime/knowledge_compiler.py +232 -0
  96. llm_code/runtime/memory.py +132 -0
  97. llm_code/runtime/memory_layers.py +467 -0
  98. llm_code/runtime/memory_lint.py +252 -0
  99. llm_code/runtime/model_aliases.py +37 -0
  100. llm_code/runtime/ollama.py +93 -0
  101. llm_code/runtime/overlay.py +124 -0
  102. llm_code/runtime/permissions.py +200 -0
  103. llm_code/runtime/plan.py +45 -0
  104. llm_code/runtime/prompt.py +238 -0
  105. llm_code/runtime/repo_map.py +174 -0
  106. llm_code/runtime/sandbox.py +116 -0
  107. llm_code/runtime/session.py +268 -0
  108. llm_code/runtime/skill_resolver.py +61 -0
  109. llm_code/runtime/skills.py +133 -0
  110. llm_code/runtime/speculative.py +75 -0
  111. llm_code/runtime/streaming_executor.py +216 -0
  112. llm_code/runtime/telemetry.py +196 -0
  113. llm_code/runtime/token_budget.py +26 -0
  114. llm_code/runtime/vcr.py +142 -0
  115. llm_code/runtime/vision.py +102 -0
  116. llm_code/swarm/__init__.py +1 -0
  117. llm_code/swarm/backend_subprocess.py +108 -0
  118. llm_code/swarm/backend_tmux.py +103 -0
  119. llm_code/swarm/backend_worktree.py +306 -0
  120. llm_code/swarm/checkpoint.py +74 -0
  121. llm_code/swarm/coordinator.py +236 -0
  122. llm_code/swarm/mailbox.py +88 -0
  123. llm_code/swarm/manager.py +202 -0
  124. llm_code/swarm/memory_sync.py +80 -0
  125. llm_code/swarm/recovery.py +21 -0
  126. llm_code/swarm/team.py +67 -0
  127. llm_code/swarm/types.py +31 -0
  128. llm_code/task/__init__.py +16 -0
  129. llm_code/task/diagnostics.py +93 -0
  130. llm_code/task/manager.py +162 -0
  131. llm_code/task/types.py +112 -0
  132. llm_code/task/verifier.py +104 -0
  133. llm_code/tools/__init__.py +0 -0
  134. llm_code/tools/agent.py +145 -0
  135. llm_code/tools/agent_roles.py +82 -0
  136. llm_code/tools/base.py +94 -0
  137. llm_code/tools/bash.py +565 -0
  138. llm_code/tools/computer_use_tools.py +278 -0
  139. llm_code/tools/coordinator_tool.py +75 -0
  140. llm_code/tools/cron_create.py +90 -0
  141. llm_code/tools/cron_delete.py +49 -0
  142. llm_code/tools/cron_list.py +51 -0
  143. llm_code/tools/deferred.py +92 -0
  144. llm_code/tools/dump.py +116 -0
  145. llm_code/tools/edit_file.py +282 -0
  146. llm_code/tools/git_tools.py +531 -0
  147. llm_code/tools/glob_search.py +112 -0
  148. llm_code/tools/grep_search.py +144 -0
  149. llm_code/tools/ide_diagnostics.py +59 -0
  150. llm_code/tools/ide_open.py +58 -0
  151. llm_code/tools/ide_selection.py +52 -0
  152. llm_code/tools/memory_tools.py +138 -0
  153. llm_code/tools/multi_edit.py +143 -0
  154. llm_code/tools/notebook_edit.py +107 -0
  155. llm_code/tools/notebook_read.py +81 -0
  156. llm_code/tools/parsing.py +63 -0
  157. llm_code/tools/read_file.py +154 -0
  158. llm_code/tools/registry.py +58 -0
  159. llm_code/tools/search_backends/__init__.py +56 -0
  160. llm_code/tools/search_backends/brave.py +56 -0
  161. llm_code/tools/search_backends/duckduckgo.py +129 -0
  162. llm_code/tools/search_backends/searxng.py +71 -0
  163. llm_code/tools/search_backends/tavily.py +73 -0
  164. llm_code/tools/swarm_create.py +109 -0
  165. llm_code/tools/swarm_delete.py +95 -0
  166. llm_code/tools/swarm_list.py +44 -0
  167. llm_code/tools/swarm_message.py +109 -0
  168. llm_code/tools/task_close.py +79 -0
  169. llm_code/tools/task_plan.py +79 -0
  170. llm_code/tools/task_verify.py +90 -0
  171. llm_code/tools/tool_search.py +65 -0
  172. llm_code/tools/web_common.py +258 -0
  173. llm_code/tools/web_fetch.py +223 -0
  174. llm_code/tools/web_search.py +280 -0
  175. llm_code/tools/write_file.py +118 -0
  176. llm_code/tui/__init__.py +1 -0
  177. llm_code/tui/app.py +2432 -0
  178. llm_code/tui/chat_view.py +82 -0
  179. llm_code/tui/chat_widgets.py +309 -0
  180. llm_code/tui/header_bar.py +46 -0
  181. llm_code/tui/input_bar.py +349 -0
  182. llm_code/tui/keybindings.py +142 -0
  183. llm_code/tui/marketplace.py +210 -0
  184. llm_code/tui/status_bar.py +72 -0
  185. llm_code/tui/theme.py +96 -0
  186. llm_code/utils/__init__.py +0 -0
  187. llm_code/utils/diff.py +111 -0
  188. llm_code/utils/errors.py +70 -0
  189. llm_code/utils/hyperlink.py +73 -0
  190. llm_code/utils/notebook.py +179 -0
  191. llm_code/utils/search.py +69 -0
  192. llm_code/utils/text_normalize.py +28 -0
  193. llm_code/utils/version_check.py +62 -0
  194. llm_code/vim/__init__.py +4 -0
  195. llm_code/vim/engine.py +51 -0
  196. llm_code/vim/motions.py +172 -0
  197. llm_code/vim/operators.py +183 -0
  198. llm_code/vim/text_objects.py +139 -0
  199. llm_code/vim/transitions.py +279 -0
  200. llm_code/vim/types.py +68 -0
  201. llm_code/voice/__init__.py +1 -0
  202. llm_code/voice/languages.py +43 -0
  203. llm_code/voice/recorder.py +136 -0
  204. llm_code/voice/stt.py +36 -0
  205. llm_code/voice/stt_anthropic.py +66 -0
  206. llm_code/voice/stt_google.py +32 -0
  207. llm_code/voice/stt_whisper.py +52 -0
  208. llmcode_cli-1.0.0.dist-info/METADATA +524 -0
  209. llmcode_cli-1.0.0.dist-info/RECORD +212 -0
  210. llmcode_cli-1.0.0.dist-info/WHEEL +4 -0
  211. llmcode_cli-1.0.0.dist-info/entry_points.txt +2 -0
  212. llmcode_cli-1.0.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,116 @@
1
+ """SandboxDetector — detect container/sandbox environments and restrict paths."""
2
+ from __future__ import annotations
3
+
4
+ from pathlib import Path
5
+
6
+
7
+ def _has_dockerenv() -> bool:
8
+ """Return True if the Docker sentinel file exists."""
9
+ return Path("/.dockerenv").exists()
10
+
11
+
12
+ def _cgroup_indicates_container() -> bool:
13
+ """Return True if /proc/1/cgroup suggests a container runtime."""
14
+ cgroup_path = Path("/proc/1/cgroup")
15
+ if not cgroup_path.exists():
16
+ return False
17
+ try:
18
+ content = cgroup_path.read_text(errors="replace")
19
+ container_markers = ("docker", "kubepods", "containerd", "lxc", "/ecs/")
20
+ return any(marker in content for marker in container_markers)
21
+ except OSError:
22
+ return False
23
+
24
+
25
+ def _detect_sandbox_type() -> str:
26
+ """Return a string describing the detected sandbox type, or 'none'."""
27
+ if _has_dockerenv():
28
+ return "docker"
29
+ if _cgroup_indicates_container():
30
+ # Try to narrow down further
31
+ cgroup_path = Path("/proc/1/cgroup")
32
+ try:
33
+ content = cgroup_path.read_text(errors="replace")
34
+ if "kubepods" in content:
35
+ return "kubernetes"
36
+ if "containerd" in content:
37
+ return "containerd"
38
+ if "lxc" in content:
39
+ return "lxc"
40
+ if "/ecs/" in content:
41
+ return "ecs"
42
+ except OSError:
43
+ pass
44
+ return "container"
45
+ return "none"
46
+
47
+
48
+ def is_sandboxed() -> bool:
49
+ """Return True if the process appears to be running inside a container/sandbox."""
50
+ return _detect_sandbox_type() != "none"
51
+
52
+
53
+ def get_sandbox_info() -> dict:
54
+ """Return a dict describing the current sandbox environment.
55
+
56
+ Keys
57
+ ----
58
+ sandboxed : bool
59
+ Whether a sandbox was detected.
60
+ type : str
61
+ One of ``"docker"``, ``"kubernetes"``, ``"containerd"``, ``"lxc"``,
62
+ ``"ecs"``, ``"container"``, or ``"none"``.
63
+ restrictions : list[str]
64
+ Human-readable descriptions of active restrictions.
65
+ """
66
+ sandbox_type = _detect_sandbox_type()
67
+ sandboxed = sandbox_type != "none"
68
+
69
+ restrictions: list[str] = []
70
+ if sandboxed:
71
+ restrictions = [
72
+ "Network access may be restricted",
73
+ "Host filesystem is not directly accessible",
74
+ "Privileged operations are not permitted",
75
+ ]
76
+
77
+ return {
78
+ "sandboxed": sandboxed,
79
+ "type": sandbox_type,
80
+ "restrictions": restrictions,
81
+ }
82
+
83
+
84
+ def restrict_paths(base_dir: Path) -> list[Path]:
85
+ """Return a list of absolute paths the agent should NOT access outside *base_dir*.
86
+
87
+ These paths represent sensitive locations on the host filesystem.
88
+ """
89
+ home = Path.home()
90
+ sensitive: list[Path] = [
91
+ home / ".ssh",
92
+ home / ".aws",
93
+ home / ".config" / "gcloud",
94
+ home / ".gnupg",
95
+ home / ".netrc",
96
+ home / ".pgpass",
97
+ Path("/etc/passwd"),
98
+ Path("/etc/shadow"),
99
+ Path("/etc/sudoers"),
100
+ Path("/root"),
101
+ Path("/var/run/secrets"), # Kubernetes service-account tokens
102
+ ]
103
+ # Only return paths that are NOT inside base_dir
104
+ try:
105
+ base_resolved = base_dir.resolve()
106
+ except Exception:
107
+ base_resolved = base_dir
108
+
109
+ result: list[Path] = []
110
+ for p in sensitive:
111
+ try:
112
+ p.resolve().relative_to(base_resolved)
113
+ # The path IS inside base_dir — don't add to restrictions
114
+ except ValueError:
115
+ result.append(p)
116
+ return result
@@ -0,0 +1,268 @@
1
+ """Session management: immutable Session dataclass, SessionManager for persistence."""
2
+ from __future__ import annotations
3
+
4
+ import dataclasses
5
+ import json
6
+ import uuid
7
+ from datetime import datetime, timezone
8
+ from pathlib import Path
9
+
10
+ from llm_code.api.types import (
11
+ ContentBlock,
12
+ ImageBlock,
13
+ Message,
14
+ TextBlock,
15
+ TokenUsage,
16
+ ToolResultBlock,
17
+ ToolUseBlock,
18
+ )
19
+
20
+
21
+ # ---------------------------------------------------------------------------
22
+ # Serialization helpers
23
+ # ---------------------------------------------------------------------------
24
+
25
+ def _block_to_dict(block: ContentBlock) -> dict:
26
+ if isinstance(block, TextBlock):
27
+ return {"type": "text", "text": block.text}
28
+ if isinstance(block, ToolUseBlock):
29
+ return {"type": "tool_use", "id": block.id, "name": block.name, "input": block.input}
30
+ if isinstance(block, ToolResultBlock):
31
+ return {
32
+ "type": "tool_result",
33
+ "tool_use_id": block.tool_use_id,
34
+ "content": block.content,
35
+ "is_error": block.is_error,
36
+ }
37
+ if isinstance(block, ImageBlock):
38
+ return {"type": "image", "media_type": block.media_type, "data": block.data}
39
+ raise ValueError(f"Unknown block type: {type(block)}")
40
+
41
+
42
+ def _dict_to_block(d: dict) -> ContentBlock:
43
+ t = d["type"]
44
+ if t == "text":
45
+ return TextBlock(text=d["text"])
46
+ if t == "tool_use":
47
+ return ToolUseBlock(id=d["id"], name=d["name"], input=d["input"])
48
+ if t == "tool_result":
49
+ return ToolResultBlock(
50
+ tool_use_id=d["tool_use_id"],
51
+ content=d["content"],
52
+ is_error=d.get("is_error", False),
53
+ )
54
+ if t == "image":
55
+ return ImageBlock(media_type=d["media_type"], data=d["data"])
56
+ raise ValueError(f"Unknown block type: {t}")
57
+
58
+
59
+ def _message_to_dict(msg: Message) -> dict:
60
+ return {
61
+ "role": msg.role,
62
+ "content": [_block_to_dict(b) for b in msg.content],
63
+ }
64
+
65
+
66
+ def _dict_to_message(d: dict) -> Message:
67
+ return Message(
68
+ role=d["role"],
69
+ content=tuple(_dict_to_block(b) for b in d["content"]),
70
+ )
71
+
72
+
73
+ # ---------------------------------------------------------------------------
74
+ # Session
75
+ # ---------------------------------------------------------------------------
76
+
77
+ @dataclasses.dataclass(frozen=True)
78
+ class Session:
79
+ id: str
80
+ messages: tuple[Message, ...]
81
+ created_at: str
82
+ updated_at: str
83
+ total_usage: TokenUsage
84
+ project_path: Path
85
+ name: str = ""
86
+ tags: tuple[str, ...] = ()
87
+
88
+ @classmethod
89
+ def create(cls, project_path: Path) -> "Session":
90
+ """Create a new empty session with a unique 8-char hex ID."""
91
+ now = datetime.now(timezone.utc).isoformat()
92
+ session_id = uuid.uuid4().hex[:8]
93
+ return cls(
94
+ id=session_id,
95
+ messages=(),
96
+ created_at=now,
97
+ updated_at=now,
98
+ total_usage=TokenUsage(input_tokens=0, output_tokens=0),
99
+ project_path=project_path,
100
+ )
101
+
102
+ def add_message(self, msg: Message) -> "Session":
103
+ """Return a new Session with the message appended (immutable)."""
104
+ now = datetime.now(timezone.utc).isoformat()
105
+ return dataclasses.replace(
106
+ self,
107
+ messages=self.messages + (msg,),
108
+ updated_at=now,
109
+ )
110
+
111
+ def rename(self, name: str) -> "Session":
112
+ """Return a new Session with the given name (immutable)."""
113
+ now = datetime.now(timezone.utc).isoformat()
114
+ return dataclasses.replace(self, name=name, updated_at=now)
115
+
116
+ def add_tags(self, *tags: str) -> "Session":
117
+ """Return a new Session with tags merged (deduped, order-preserving, immutable)."""
118
+ merged = tuple(dict.fromkeys(self.tags + tags))
119
+ now = datetime.now(timezone.utc).isoformat()
120
+ return dataclasses.replace(self, tags=merged, updated_at=now)
121
+
122
+ def update_usage(self, usage: TokenUsage) -> "Session":
123
+ """Return a new Session with accumulated token usage (immutable)."""
124
+ now = datetime.now(timezone.utc).isoformat()
125
+ new_usage = TokenUsage(
126
+ input_tokens=self.total_usage.input_tokens + usage.input_tokens,
127
+ output_tokens=self.total_usage.output_tokens + usage.output_tokens,
128
+ )
129
+ return dataclasses.replace(self, total_usage=new_usage, updated_at=now)
130
+
131
+ def estimated_tokens(self) -> int:
132
+ """Rough token estimate: total character count divided by 4."""
133
+ char_count = 0
134
+ for msg in self.messages:
135
+ for block in msg.content:
136
+ if isinstance(block, TextBlock):
137
+ char_count += len(block.text)
138
+ elif isinstance(block, ToolResultBlock):
139
+ char_count += len(block.content)
140
+ elif isinstance(block, ToolUseBlock):
141
+ char_count += len(block.name) + len(str(block.input))
142
+ return char_count // 4
143
+
144
+ def to_dict(self) -> dict:
145
+ return {
146
+ "id": self.id,
147
+ "messages": [_message_to_dict(m) for m in self.messages],
148
+ "created_at": self.created_at,
149
+ "updated_at": self.updated_at,
150
+ "total_usage": {
151
+ "input_tokens": self.total_usage.input_tokens,
152
+ "output_tokens": self.total_usage.output_tokens,
153
+ },
154
+ "project_path": str(self.project_path),
155
+ "name": self.name,
156
+ "tags": list(self.tags),
157
+ }
158
+
159
+ @classmethod
160
+ def from_dict(cls, data: dict) -> "Session":
161
+ return cls(
162
+ id=data["id"],
163
+ messages=tuple(_dict_to_message(m) for m in data["messages"]),
164
+ created_at=data["created_at"],
165
+ updated_at=data["updated_at"],
166
+ total_usage=TokenUsage(
167
+ input_tokens=data["total_usage"]["input_tokens"],
168
+ output_tokens=data["total_usage"]["output_tokens"],
169
+ ),
170
+ project_path=Path(data["project_path"]),
171
+ name=data.get("name", ""),
172
+ tags=tuple(data.get("tags", ())),
173
+ )
174
+
175
+
176
+ # ---------------------------------------------------------------------------
177
+ # SessionSummary
178
+ # ---------------------------------------------------------------------------
179
+
180
+ @dataclasses.dataclass(frozen=True)
181
+ class SessionSummary:
182
+ id: str
183
+ project_path: Path
184
+ created_at: str
185
+ message_count: int
186
+ name: str = ""
187
+ tags: tuple[str, ...] = ()
188
+
189
+
190
+ # ---------------------------------------------------------------------------
191
+ # SessionManager
192
+ # ---------------------------------------------------------------------------
193
+
194
+ class SessionManager:
195
+ def __init__(self, session_dir: Path) -> None:
196
+ self._session_dir = session_dir
197
+ session_dir.mkdir(parents=True, exist_ok=True)
198
+
199
+ def save(self, session: Session) -> Path:
200
+ """Persist session as JSON; returns the file path."""
201
+ path = self._session_dir / f"{session.id}.json"
202
+ path.write_text(json.dumps(session.to_dict(), indent=2), encoding="utf-8")
203
+ return path
204
+
205
+ def load(self, session_id: str) -> Session:
206
+ """Load session by ID; raises FileNotFoundError if missing."""
207
+ path = self._session_dir / f"{session_id}.json"
208
+ if not path.exists():
209
+ raise FileNotFoundError(f"Session '{session_id}' not found at {path}")
210
+ data = json.loads(path.read_text(encoding="utf-8"))
211
+ return Session.from_dict(data)
212
+
213
+ def list_sessions(self) -> list[SessionSummary]:
214
+ """Return session summaries sorted by modification time (most recent first)."""
215
+ files = sorted(
216
+ self._session_dir.glob("*.json"),
217
+ key=lambda p: p.stat().st_mtime,
218
+ reverse=True,
219
+ )
220
+ summaries: list[SessionSummary] = []
221
+ for f in files:
222
+ try:
223
+ data = json.loads(f.read_text(encoding="utf-8"))
224
+ summaries.append(
225
+ SessionSummary(
226
+ id=data["id"],
227
+ project_path=Path(data["project_path"]),
228
+ created_at=data["created_at"],
229
+ message_count=len(data["messages"]),
230
+ name=data.get("name", ""),
231
+ tags=tuple(data.get("tags", ())),
232
+ )
233
+ )
234
+ except (json.JSONDecodeError, KeyError):
235
+ continue
236
+ return summaries
237
+
238
+ def rename(self, session_id: str, name: str) -> Session:
239
+ """Rename a session and persist the change; returns updated Session."""
240
+ session = self.load(session_id)
241
+ renamed = session.rename(name)
242
+ self.save(renamed)
243
+ return renamed
244
+
245
+ def delete(self, session_id: str) -> bool:
246
+ """Delete a session file; returns True if deleted, False if not found."""
247
+ path = self._session_dir / f"{session_id}.json"
248
+ if not path.exists():
249
+ return False
250
+ path.unlink()
251
+ return True
252
+
253
+ def search(self, query: str) -> list[SessionSummary]:
254
+ """Return summaries whose name, project path, or tags contain query (case-insensitive)."""
255
+ query_lower = query.lower()
256
+ return [
257
+ s for s in self.list_sessions()
258
+ if query_lower in s.name.lower()
259
+ or query_lower in str(s.project_path).lower()
260
+ or any(query_lower in t.lower() for t in s.tags)
261
+ ]
262
+
263
+ def get_by_name(self, name: str) -> Session | None:
264
+ """Return the first Session whose name matches exactly, or None."""
265
+ for summary in self.list_sessions():
266
+ if summary.name == name:
267
+ return self.load(summary.id)
268
+ return None
@@ -0,0 +1,61 @@
1
+ """Skill dependency resolver — checks and auto-installs missing skill dependencies."""
2
+ from __future__ import annotations
3
+
4
+ import logging
5
+ from packaging.version import Version
6
+
7
+ from llm_code.runtime.skills import Skill, SkillDependency
8
+
9
+ _log = logging.getLogger(__name__)
10
+
11
+
12
+ def _get_llm_code_version() -> str:
13
+ """Return the installed llm-code version."""
14
+ try:
15
+ from importlib.metadata import version as pkg_version
16
+ return pkg_version("llm-code")
17
+ except Exception:
18
+ return "0.0.0"
19
+
20
+
21
+ class SkillResolver:
22
+ """Check and resolve skill dependencies."""
23
+
24
+ def __init__(
25
+ self,
26
+ installed_skills: set[str],
27
+ installer: object,
28
+ max_depth: int = 3,
29
+ ) -> None:
30
+ self._installed = installed_skills
31
+ self._installer = installer
32
+ self._max_depth = max_depth
33
+
34
+ def find_missing(self, skill: Skill) -> list[SkillDependency]:
35
+ """Return list of dependencies not currently installed."""
36
+ return [dep for dep in skill.depends if dep.name not in self._installed]
37
+
38
+ def _check_cycle(self, name: str, visited: frozenset[str]) -> None:
39
+ """Raise ValueError if name is already in the visited set."""
40
+ if name in visited:
41
+ raise ValueError(f"Circular dependency detected: '{name}' already in chain {sorted(visited)}")
42
+
43
+ def _check_depth(self, depth: int) -> None:
44
+ """Raise ValueError if depth exceeds max_depth."""
45
+ if depth > self._max_depth:
46
+ raise ValueError(f"Dependency depth {depth} exceeds max depth {self._max_depth}")
47
+
48
+ def check_min_version(self, skill: Skill) -> list[str]:
49
+ """Check if llm-code version satisfies skill's min_version. Return warnings."""
50
+ if not skill.min_version:
51
+ return []
52
+ current = _get_llm_code_version()
53
+ try:
54
+ if Version(current) < Version(skill.min_version):
55
+ return [
56
+ f"Skill '{skill.name}' requires llm-code >= {skill.min_version}, "
57
+ f"but current version is {current}"
58
+ ]
59
+ except Exception:
60
+ return [f"Could not compare versions: current={current}, required={skill.min_version}"]
61
+ return []
@@ -0,0 +1,133 @@
1
+ """Skills system: load and classify SKILL.md files into SkillSet."""
2
+ from __future__ import annotations
3
+
4
+ import re
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+
8
+ import yaml
9
+
10
+ _FRONTMATTER_RE = re.compile(r"^---\s*\n(.*?)\n---\s*\n(.*)", re.DOTALL)
11
+
12
+
13
+ @dataclass(frozen=True)
14
+ class SkillDependency:
15
+ """A dependency on another skill."""
16
+
17
+ name: str
18
+ registry: str = "" # empty = search all registries
19
+
20
+
21
+ @dataclass(frozen=True)
22
+ class Skill:
23
+ """A single skill loaded from a SKILL.md file."""
24
+
25
+ name: str
26
+ description: str
27
+ content: str
28
+ auto: bool = False
29
+ trigger: str = ""
30
+ version: str = ""
31
+ tags: tuple[str, ...] = ()
32
+ model: str = ""
33
+ depends: tuple[SkillDependency, ...] = ()
34
+ min_version: str = ""
35
+
36
+ def __post_init__(self) -> None:
37
+ # If trigger not set (empty string), default it to name.
38
+ # Because frozen=True we must use object.__setattr__.
39
+ if not self.trigger:
40
+ object.__setattr__(self, "trigger", self.name)
41
+
42
+
43
+ @dataclass(frozen=True)
44
+ class SkillSet:
45
+ """Container for classified skills."""
46
+
47
+ auto_skills: tuple[Skill, ...]
48
+ command_skills: tuple[Skill, ...]
49
+
50
+
51
+ class SkillLoader:
52
+ """Loads skills from directories."""
53
+
54
+ @staticmethod
55
+ def load_skill(path: Path) -> Skill:
56
+ """Parse a SKILL.md file and return a Skill."""
57
+ text = path.read_text(encoding="utf-8")
58
+ m = _FRONTMATTER_RE.match(text)
59
+ if not m:
60
+ raise ValueError(f"Invalid SKILL.md format: {path}")
61
+
62
+ frontmatter_raw, content = m.group(1), m.group(2)
63
+
64
+ try:
65
+ meta = yaml.safe_load(frontmatter_raw) or {}
66
+ except yaml.YAMLError:
67
+ meta = {}
68
+
69
+ name = str(meta.get("name", ""))
70
+ description = str(meta.get("description", ""))
71
+ auto_raw = meta.get("auto", False)
72
+ auto = auto_raw is True or str(auto_raw).lower() in ("true", "yes", "1")
73
+ trigger = str(meta.get("trigger", ""))
74
+
75
+ version = str(meta.get("version", ""))
76
+ model = str(meta.get("model", ""))
77
+ min_version = str(meta.get("min_version", ""))
78
+
79
+ tags_raw = meta.get("tags", [])
80
+ tags = tuple(str(t) for t in tags_raw) if isinstance(tags_raw, list) else ()
81
+
82
+ depends_raw = meta.get("depends", [])
83
+ depends: tuple[SkillDependency, ...] = ()
84
+ if isinstance(depends_raw, list):
85
+ deps = []
86
+ for item in depends_raw:
87
+ if isinstance(item, dict) and "name" in item:
88
+ deps.append(SkillDependency(
89
+ name=str(item["name"]),
90
+ registry=str(item.get("registry", "")),
91
+ ))
92
+ depends = tuple(deps)
93
+
94
+ return Skill(
95
+ name=name,
96
+ description=description,
97
+ content=content,
98
+ auto=auto,
99
+ trigger=trigger,
100
+ version=version,
101
+ tags=tags,
102
+ model=model,
103
+ depends=depends,
104
+ min_version=min_version,
105
+ )
106
+
107
+ @staticmethod
108
+ def load_from_dirs(dirs: list[Path]) -> SkillSet:
109
+ """Scan each directory for subdirs containing SKILL.md and classify."""
110
+ auto: list[Skill] = []
111
+ command: list[Skill] = []
112
+
113
+ for directory in dirs:
114
+ if not directory.is_dir():
115
+ continue
116
+ for subdir in sorted(directory.iterdir()):
117
+ if not subdir.is_dir():
118
+ continue
119
+ skill_md = subdir / "SKILL.md"
120
+ if not skill_md.is_file():
121
+ continue
122
+ if (subdir / ".disabled").exists():
123
+ continue
124
+ skill = SkillLoader.load_skill(skill_md)
125
+ if skill.auto:
126
+ auto.append(skill)
127
+ else:
128
+ command.append(skill)
129
+
130
+ return SkillSet(
131
+ auto_skills=tuple(auto),
132
+ command_skills=tuple(command),
133
+ )
@@ -0,0 +1,75 @@
1
+ """Speculative executor: pre-runs a tool in an OverlayFS before user confirmation."""
2
+ from __future__ import annotations
3
+
4
+ from pathlib import Path
5
+ from typing import TYPE_CHECKING
6
+
7
+ from llm_code.runtime.overlay import OverlayFS
8
+ from llm_code.tools.base import ToolResult
9
+
10
+ if TYPE_CHECKING:
11
+ from llm_code.tools.base import Tool
12
+
13
+
14
+ class SpeculativeExecutor:
15
+ """Pre-execute a tool against a Copy-on-Write overlay.
16
+
17
+ Usage::
18
+
19
+ executor = SpeculativeExecutor(tool, args, base_dir=cwd, session_id="abc")
20
+ result = executor.pre_execute() # runs tool in overlay, real FS untouched
21
+ # … present result + pending changes to user …
22
+ executor.confirm() # commit overlay → real FS
23
+ # or
24
+ executor.deny() # discard overlay, nothing written
25
+
26
+ The ``result`` returned by ``pre_execute()`` is cached; repeated calls
27
+ return the same object without re-running the tool.
28
+ """
29
+
30
+ def __init__(
31
+ self,
32
+ tool: "Tool",
33
+ args: dict,
34
+ base_dir: Path,
35
+ session_id: str,
36
+ ) -> None:
37
+ self._tool = tool
38
+ self._args = args
39
+ self.overlay = OverlayFS(base_dir=base_dir, session_id=session_id)
40
+ self._result: ToolResult | None = None
41
+ self._executed = False
42
+
43
+ # ------------------------------------------------------------------
44
+ # Public API
45
+ # ------------------------------------------------------------------
46
+
47
+ def pre_execute(self) -> ToolResult:
48
+ """Run the tool inside the overlay (idempotent; returns cached result)."""
49
+ if self._executed:
50
+ assert self._result is not None
51
+ return self._result
52
+
53
+ self._result = self._tool.execute(self._args, overlay=self.overlay) # type: ignore[call-arg]
54
+ self._executed = True
55
+ return self._result
56
+
57
+ def confirm(self) -> None:
58
+ """Commit the overlay to the real filesystem.
59
+
60
+ Raises
61
+ ------
62
+ RuntimeError
63
+ If ``pre_execute()`` has not been called yet.
64
+ """
65
+ if not self._executed:
66
+ raise RuntimeError("call pre_execute() before confirm()")
67
+ self.overlay.commit()
68
+
69
+ def deny(self) -> None:
70
+ """Discard the overlay; nothing is written to the real filesystem."""
71
+ self.overlay.discard()
72
+
73
+ def list_pending_changes(self) -> list[Path]:
74
+ """Return the list of real paths that would be written on confirm()."""
75
+ return self.overlay.list_pending()