llmcode-cli 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (212) hide show
  1. llm_code/__init__.py +2 -0
  2. llm_code/analysis/__init__.py +6 -0
  3. llm_code/analysis/cache.py +33 -0
  4. llm_code/analysis/engine.py +256 -0
  5. llm_code/analysis/go_rules.py +114 -0
  6. llm_code/analysis/js_rules.py +84 -0
  7. llm_code/analysis/python_rules.py +311 -0
  8. llm_code/analysis/rules.py +140 -0
  9. llm_code/analysis/rust_rules.py +108 -0
  10. llm_code/analysis/universal_rules.py +111 -0
  11. llm_code/api/__init__.py +0 -0
  12. llm_code/api/client.py +90 -0
  13. llm_code/api/errors.py +73 -0
  14. llm_code/api/openai_compat.py +390 -0
  15. llm_code/api/provider.py +35 -0
  16. llm_code/api/sse.py +52 -0
  17. llm_code/api/types.py +140 -0
  18. llm_code/cli/__init__.py +0 -0
  19. llm_code/cli/commands.py +70 -0
  20. llm_code/cli/image.py +122 -0
  21. llm_code/cli/render.py +214 -0
  22. llm_code/cli/status_line.py +79 -0
  23. llm_code/cli/streaming.py +92 -0
  24. llm_code/cli/tui_main.py +220 -0
  25. llm_code/computer_use/__init__.py +11 -0
  26. llm_code/computer_use/app_detect.py +49 -0
  27. llm_code/computer_use/app_tier.py +57 -0
  28. llm_code/computer_use/coordinator.py +99 -0
  29. llm_code/computer_use/input_control.py +71 -0
  30. llm_code/computer_use/screenshot.py +93 -0
  31. llm_code/cron/__init__.py +13 -0
  32. llm_code/cron/parser.py +145 -0
  33. llm_code/cron/scheduler.py +135 -0
  34. llm_code/cron/storage.py +126 -0
  35. llm_code/enterprise/__init__.py +1 -0
  36. llm_code/enterprise/audit.py +59 -0
  37. llm_code/enterprise/auth.py +26 -0
  38. llm_code/enterprise/oidc.py +95 -0
  39. llm_code/enterprise/rbac.py +65 -0
  40. llm_code/harness/__init__.py +5 -0
  41. llm_code/harness/config.py +33 -0
  42. llm_code/harness/engine.py +129 -0
  43. llm_code/harness/guides.py +41 -0
  44. llm_code/harness/sensors.py +68 -0
  45. llm_code/harness/templates.py +84 -0
  46. llm_code/hida/__init__.py +1 -0
  47. llm_code/hida/classifier.py +187 -0
  48. llm_code/hida/engine.py +49 -0
  49. llm_code/hida/profiles.py +95 -0
  50. llm_code/hida/types.py +28 -0
  51. llm_code/ide/__init__.py +1 -0
  52. llm_code/ide/bridge.py +80 -0
  53. llm_code/ide/detector.py +76 -0
  54. llm_code/ide/server.py +169 -0
  55. llm_code/logging.py +29 -0
  56. llm_code/lsp/__init__.py +0 -0
  57. llm_code/lsp/client.py +298 -0
  58. llm_code/lsp/detector.py +42 -0
  59. llm_code/lsp/manager.py +56 -0
  60. llm_code/lsp/tools.py +288 -0
  61. llm_code/marketplace/__init__.py +0 -0
  62. llm_code/marketplace/builtin_registry.py +102 -0
  63. llm_code/marketplace/installer.py +162 -0
  64. llm_code/marketplace/plugin.py +78 -0
  65. llm_code/marketplace/registry.py +360 -0
  66. llm_code/mcp/__init__.py +0 -0
  67. llm_code/mcp/bridge.py +87 -0
  68. llm_code/mcp/client.py +117 -0
  69. llm_code/mcp/health.py +120 -0
  70. llm_code/mcp/manager.py +214 -0
  71. llm_code/mcp/oauth.py +219 -0
  72. llm_code/mcp/transport.py +254 -0
  73. llm_code/mcp/types.py +53 -0
  74. llm_code/remote/__init__.py +0 -0
  75. llm_code/remote/client.py +136 -0
  76. llm_code/remote/protocol.py +22 -0
  77. llm_code/remote/server.py +275 -0
  78. llm_code/remote/ssh_proxy.py +56 -0
  79. llm_code/runtime/__init__.py +0 -0
  80. llm_code/runtime/auto_commit.py +56 -0
  81. llm_code/runtime/auto_diagnose.py +62 -0
  82. llm_code/runtime/checkpoint.py +70 -0
  83. llm_code/runtime/checkpoint_recovery.py +142 -0
  84. llm_code/runtime/compaction.py +35 -0
  85. llm_code/runtime/compressor.py +415 -0
  86. llm_code/runtime/config.py +533 -0
  87. llm_code/runtime/context.py +49 -0
  88. llm_code/runtime/conversation.py +921 -0
  89. llm_code/runtime/cost_tracker.py +126 -0
  90. llm_code/runtime/dream.py +127 -0
  91. llm_code/runtime/file_protection.py +150 -0
  92. llm_code/runtime/hardware.py +85 -0
  93. llm_code/runtime/hooks.py +223 -0
  94. llm_code/runtime/indexer.py +230 -0
  95. llm_code/runtime/knowledge_compiler.py +232 -0
  96. llm_code/runtime/memory.py +132 -0
  97. llm_code/runtime/memory_layers.py +467 -0
  98. llm_code/runtime/memory_lint.py +252 -0
  99. llm_code/runtime/model_aliases.py +37 -0
  100. llm_code/runtime/ollama.py +93 -0
  101. llm_code/runtime/overlay.py +124 -0
  102. llm_code/runtime/permissions.py +200 -0
  103. llm_code/runtime/plan.py +45 -0
  104. llm_code/runtime/prompt.py +238 -0
  105. llm_code/runtime/repo_map.py +174 -0
  106. llm_code/runtime/sandbox.py +116 -0
  107. llm_code/runtime/session.py +268 -0
  108. llm_code/runtime/skill_resolver.py +61 -0
  109. llm_code/runtime/skills.py +133 -0
  110. llm_code/runtime/speculative.py +75 -0
  111. llm_code/runtime/streaming_executor.py +216 -0
  112. llm_code/runtime/telemetry.py +196 -0
  113. llm_code/runtime/token_budget.py +26 -0
  114. llm_code/runtime/vcr.py +142 -0
  115. llm_code/runtime/vision.py +102 -0
  116. llm_code/swarm/__init__.py +1 -0
  117. llm_code/swarm/backend_subprocess.py +108 -0
  118. llm_code/swarm/backend_tmux.py +103 -0
  119. llm_code/swarm/backend_worktree.py +306 -0
  120. llm_code/swarm/checkpoint.py +74 -0
  121. llm_code/swarm/coordinator.py +236 -0
  122. llm_code/swarm/mailbox.py +88 -0
  123. llm_code/swarm/manager.py +202 -0
  124. llm_code/swarm/memory_sync.py +80 -0
  125. llm_code/swarm/recovery.py +21 -0
  126. llm_code/swarm/team.py +67 -0
  127. llm_code/swarm/types.py +31 -0
  128. llm_code/task/__init__.py +16 -0
  129. llm_code/task/diagnostics.py +93 -0
  130. llm_code/task/manager.py +162 -0
  131. llm_code/task/types.py +112 -0
  132. llm_code/task/verifier.py +104 -0
  133. llm_code/tools/__init__.py +0 -0
  134. llm_code/tools/agent.py +145 -0
  135. llm_code/tools/agent_roles.py +82 -0
  136. llm_code/tools/base.py +94 -0
  137. llm_code/tools/bash.py +565 -0
  138. llm_code/tools/computer_use_tools.py +278 -0
  139. llm_code/tools/coordinator_tool.py +75 -0
  140. llm_code/tools/cron_create.py +90 -0
  141. llm_code/tools/cron_delete.py +49 -0
  142. llm_code/tools/cron_list.py +51 -0
  143. llm_code/tools/deferred.py +92 -0
  144. llm_code/tools/dump.py +116 -0
  145. llm_code/tools/edit_file.py +282 -0
  146. llm_code/tools/git_tools.py +531 -0
  147. llm_code/tools/glob_search.py +112 -0
  148. llm_code/tools/grep_search.py +144 -0
  149. llm_code/tools/ide_diagnostics.py +59 -0
  150. llm_code/tools/ide_open.py +58 -0
  151. llm_code/tools/ide_selection.py +52 -0
  152. llm_code/tools/memory_tools.py +138 -0
  153. llm_code/tools/multi_edit.py +143 -0
  154. llm_code/tools/notebook_edit.py +107 -0
  155. llm_code/tools/notebook_read.py +81 -0
  156. llm_code/tools/parsing.py +63 -0
  157. llm_code/tools/read_file.py +154 -0
  158. llm_code/tools/registry.py +58 -0
  159. llm_code/tools/search_backends/__init__.py +56 -0
  160. llm_code/tools/search_backends/brave.py +56 -0
  161. llm_code/tools/search_backends/duckduckgo.py +129 -0
  162. llm_code/tools/search_backends/searxng.py +71 -0
  163. llm_code/tools/search_backends/tavily.py +73 -0
  164. llm_code/tools/swarm_create.py +109 -0
  165. llm_code/tools/swarm_delete.py +95 -0
  166. llm_code/tools/swarm_list.py +44 -0
  167. llm_code/tools/swarm_message.py +109 -0
  168. llm_code/tools/task_close.py +79 -0
  169. llm_code/tools/task_plan.py +79 -0
  170. llm_code/tools/task_verify.py +90 -0
  171. llm_code/tools/tool_search.py +65 -0
  172. llm_code/tools/web_common.py +258 -0
  173. llm_code/tools/web_fetch.py +223 -0
  174. llm_code/tools/web_search.py +280 -0
  175. llm_code/tools/write_file.py +118 -0
  176. llm_code/tui/__init__.py +1 -0
  177. llm_code/tui/app.py +2432 -0
  178. llm_code/tui/chat_view.py +82 -0
  179. llm_code/tui/chat_widgets.py +309 -0
  180. llm_code/tui/header_bar.py +46 -0
  181. llm_code/tui/input_bar.py +349 -0
  182. llm_code/tui/keybindings.py +142 -0
  183. llm_code/tui/marketplace.py +210 -0
  184. llm_code/tui/status_bar.py +72 -0
  185. llm_code/tui/theme.py +96 -0
  186. llm_code/utils/__init__.py +0 -0
  187. llm_code/utils/diff.py +111 -0
  188. llm_code/utils/errors.py +70 -0
  189. llm_code/utils/hyperlink.py +73 -0
  190. llm_code/utils/notebook.py +179 -0
  191. llm_code/utils/search.py +69 -0
  192. llm_code/utils/text_normalize.py +28 -0
  193. llm_code/utils/version_check.py +62 -0
  194. llm_code/vim/__init__.py +4 -0
  195. llm_code/vim/engine.py +51 -0
  196. llm_code/vim/motions.py +172 -0
  197. llm_code/vim/operators.py +183 -0
  198. llm_code/vim/text_objects.py +139 -0
  199. llm_code/vim/transitions.py +279 -0
  200. llm_code/vim/types.py +68 -0
  201. llm_code/voice/__init__.py +1 -0
  202. llm_code/voice/languages.py +43 -0
  203. llm_code/voice/recorder.py +136 -0
  204. llm_code/voice/stt.py +36 -0
  205. llm_code/voice/stt_anthropic.py +66 -0
  206. llm_code/voice/stt_google.py +32 -0
  207. llm_code/voice/stt_whisper.py +52 -0
  208. llmcode_cli-1.0.0.dist-info/METADATA +524 -0
  209. llmcode_cli-1.0.0.dist-info/RECORD +212 -0
  210. llmcode_cli-1.0.0.dist-info/WHEEL +4 -0
  211. llmcode_cli-1.0.0.dist-info/entry_points.txt +2 -0
  212. llmcode_cli-1.0.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,132 @@
1
+ """Cross-session memory: MemoryStore for persistent key-value memory with session summaries."""
2
+ from __future__ import annotations
3
+
4
+ import hashlib
5
+ import json
6
+ from dataclasses import dataclass
7
+ from datetime import datetime, timezone
8
+ from pathlib import Path
9
+
10
+
11
+ @dataclass(frozen=True)
12
+ class MemoryEntry:
13
+ key: str
14
+ value: str
15
+ created_at: str
16
+ updated_at: str
17
+
18
+
19
+ class MemoryStore:
20
+ """Persistent key-value memory store scoped to a project path."""
21
+
22
+ def __init__(self, memory_dir: Path, project_path: Path) -> None:
23
+ project_hash = hashlib.sha256(str(project_path).encode()).hexdigest()[:8]
24
+ self._dir = memory_dir / project_hash
25
+ self._dir.mkdir(parents=True, exist_ok=True)
26
+ self._memory_file = self._dir / "memory.json"
27
+ self._sessions_dir = self._dir / "sessions"
28
+ self._sessions_dir.mkdir(exist_ok=True)
29
+
30
+ def store(self, key: str, value: str) -> None:
31
+ """Store or update a key-value pair."""
32
+ data = self._load()
33
+ now = datetime.now(timezone.utc).isoformat()
34
+ if key in data:
35
+ data[key]["value"] = value
36
+ data[key]["updated_at"] = now
37
+ else:
38
+ data[key] = {"value": value, "created_at": now, "updated_at": now}
39
+ self._save(data)
40
+
41
+ def recall(self, key: str) -> str | None:
42
+ """Return the value for key, or None if not found."""
43
+ data = self._load()
44
+ entry = data.get(key)
45
+ return entry["value"] if entry else None
46
+
47
+ def list_keys(self) -> list[str]:
48
+ """Return all stored keys."""
49
+ return list(self._load().keys())
50
+
51
+ def delete(self, key: str) -> None:
52
+ """Remove a key from memory (no-op if key does not exist)."""
53
+ data = self._load()
54
+ data.pop(key, None)
55
+ self._save(data)
56
+
57
+ def list_entries(self) -> dict[str, str] | None:
58
+ """Return a dict mapping key → value for all stored entries, or None if empty.
59
+
60
+ This is used by the prompt builder to inject memory into the system prompt.
61
+ Internal keys (starting with '_') are excluded.
62
+ """
63
+ data = self._load()
64
+ entries = {k: v["value"] for k, v in data.items() if not k.startswith("_")}
65
+ return entries if entries else None
66
+
67
+ def get_all(self) -> dict[str, MemoryEntry]:
68
+ """Return all entries as a mapping of key -> MemoryEntry."""
69
+ data = self._load()
70
+ return {
71
+ k: MemoryEntry(
72
+ key=k,
73
+ value=v["value"],
74
+ created_at=v["created_at"],
75
+ updated_at=v["updated_at"],
76
+ )
77
+ for k, v in data.items()
78
+ }
79
+
80
+ def save_session_summary(self, summary: str) -> None:
81
+ """Persist a session summary as a timestamped Markdown file."""
82
+ timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H-%M-%S")
83
+ path = self._sessions_dir / f"{timestamp}.md"
84
+ path.write_text(summary, encoding="utf-8")
85
+
86
+ def load_recent_summaries(self, limit: int = 5) -> list[str]:
87
+ """Return the most recent session summaries (newest first)."""
88
+ files = sorted(self._sessions_dir.glob("*.md"), reverse=True)[:limit]
89
+ return [f.read_text(encoding="utf-8") for f in files]
90
+
91
+ @property
92
+ def consolidated_dir(self) -> Path:
93
+ """Return the consolidated summaries directory, creating it if needed."""
94
+ d = self._dir / "consolidated"
95
+ d.mkdir(exist_ok=True)
96
+ return d
97
+
98
+ def save_consolidated(self, content: str, date_str: str | None = None) -> Path:
99
+ """Persist a consolidated summary as a dated Markdown file.
100
+
101
+ Args:
102
+ content: The markdown summary content.
103
+ date_str: Optional date string (YYYY-MM-DD). Defaults to today (UTC).
104
+
105
+ Returns:
106
+ The path to the written file.
107
+ """
108
+ if date_str is None:
109
+ date_str = datetime.now(timezone.utc).strftime("%Y-%m-%d")
110
+ path = self.consolidated_dir / f"{date_str}.md"
111
+ path.write_text(content, encoding="utf-8")
112
+ return path
113
+
114
+ def load_consolidated_summaries(self, limit: int = 10) -> list[str]:
115
+ """Return the most recent consolidated summaries (newest first)."""
116
+ files = sorted(self.consolidated_dir.glob("*.md"), reverse=True)[:limit]
117
+ return [f.read_text(encoding="utf-8") for f in files]
118
+
119
+ # ------------------------------------------------------------------
120
+ # Internal helpers
121
+ # ------------------------------------------------------------------
122
+
123
+ def _load(self) -> dict:
124
+ if self._memory_file.exists():
125
+ try:
126
+ return json.loads(self._memory_file.read_text())
127
+ except (json.JSONDecodeError, OSError):
128
+ return {}
129
+ return {}
130
+
131
+ def _save(self, data: dict) -> None:
132
+ self._memory_file.write_text(json.dumps(data, indent=2))
@@ -0,0 +1,467 @@
1
+ """Multi-layer memory structure for llm-code."""
2
+ from __future__ import annotations
3
+
4
+ import hashlib
5
+ import json
6
+ import logging
7
+ from dataclasses import dataclass, field
8
+ from datetime import datetime, timezone
9
+ from pathlib import Path
10
+ from typing import TYPE_CHECKING
11
+
12
+ if TYPE_CHECKING:
13
+ from llm_code.runtime.memory import MemoryStore
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ @dataclass(frozen=True)
19
+ class GovernanceRule:
20
+ """A parsed governance rule from CLAUDE.md or .llm-code/rules/*.md."""
21
+
22
+ category: str
23
+ content: str
24
+ source: str
25
+ priority: int = 0
26
+
27
+
28
+ @dataclass(frozen=True)
29
+ class MemoryEntry:
30
+ """A tagged, timestamped memory entry for L2 Project Memory."""
31
+
32
+ key: str
33
+ value: str
34
+ tags: tuple[str, ...] = ()
35
+ created_at: str = ""
36
+ accessed_at: str = ""
37
+
38
+
39
+ @dataclass(frozen=True)
40
+ class TaskRecord:
41
+ """A tracked task for L3 Task Memory."""
42
+
43
+ task_id: str
44
+ description: str
45
+ status: str # "incomplete" | "complete" | "blocked"
46
+ created_at: str = ""
47
+ updated_at: str = ""
48
+ metadata: dict = field(default_factory=dict)
49
+
50
+
51
+ class GovernanceLayer:
52
+ """L0: Scans CLAUDE.md, .llm-code/rules/*.md, .llm-code/governance.md."""
53
+
54
+ _PRIORITY_MAP = {
55
+ "governance.md": 10,
56
+ "rules": 5,
57
+ "CLAUDE.md": 1,
58
+ }
59
+
60
+ def __init__(self, project_root: Path) -> None:
61
+ self._root = project_root
62
+
63
+ def scan(self) -> tuple[GovernanceRule, ...]:
64
+ """Scan all governance sources and return parsed rules."""
65
+ rules: list[GovernanceRule] = []
66
+
67
+ # 1. CLAUDE.md
68
+ claude_md = self._root / "CLAUDE.md"
69
+ if claude_md.is_file():
70
+ rules.extend(self._parse_file(claude_md, priority=1))
71
+
72
+ # 2. .llm-code/rules/*.md
73
+ rules_dir = self._root / ".llm-code" / "rules"
74
+ if rules_dir.is_dir():
75
+ for md_file in sorted(rules_dir.glob("*.md")):
76
+ rules.extend(self._parse_file(md_file, priority=5))
77
+
78
+ # 3. .llm-code/governance.md
79
+ gov_md = self._root / ".llm-code" / "governance.md"
80
+ if gov_md.is_file():
81
+ rules.extend(self._parse_file(gov_md, priority=10))
82
+
83
+ return tuple(rules)
84
+
85
+ def _parse_file(self, path: Path, priority: int) -> list[GovernanceRule]:
86
+ """Parse a markdown file into GovernanceRule entries.
87
+
88
+ Extracts list items (lines starting with '- ') as individual rules.
89
+ Uses the most recent heading as the category.
90
+ """
91
+ try:
92
+ text = path.read_text(encoding="utf-8")
93
+ except OSError:
94
+ return []
95
+
96
+ parsed: list[GovernanceRule] = []
97
+ category = "general"
98
+
99
+ for line in text.splitlines():
100
+ stripped = line.strip()
101
+ if stripped.startswith("#"):
102
+ category = stripped.lstrip("#").strip().lower()
103
+ elif stripped.startswith("- "):
104
+ content = stripped[2:].strip()
105
+ if content:
106
+ parsed.append(GovernanceRule(
107
+ category=category,
108
+ content=content,
109
+ source=str(path),
110
+ priority=priority,
111
+ ))
112
+
113
+ return parsed
114
+
115
+
116
+ class WorkingMemory:
117
+ """L1: In-memory session-scoped key-value store. Not persisted."""
118
+
119
+ def __init__(self) -> None:
120
+ self._data: dict[str, str] = {}
121
+
122
+ def store(self, key: str, value: str) -> None:
123
+ self._data[key] = value
124
+
125
+ def recall(self, key: str) -> str | None:
126
+ return self._data.get(key)
127
+
128
+ def delete(self, key: str) -> None:
129
+ self._data.pop(key, None)
130
+
131
+ def list_keys(self) -> list[str]:
132
+ return list(self._data.keys())
133
+
134
+ def get_all(self) -> dict[str, str]:
135
+ return dict(self._data)
136
+
137
+ def clear(self) -> None:
138
+ self._data.clear()
139
+
140
+
141
+ class ProjectMemory:
142
+ """L2: Persistent project-scoped memory with tags. Wraps MemoryStore."""
143
+
144
+ def __init__(self, memory_dir: Path, project_path: Path) -> None:
145
+ from llm_code.runtime.memory import MemoryStore
146
+
147
+ self._memory_store = MemoryStore(memory_dir, project_path)
148
+ self._tags_file = self._memory_store._dir / "tags.json"
149
+
150
+ @property
151
+ def memory_store(self) -> "MemoryStore":
152
+ """Expose underlying MemoryStore for backward compatibility."""
153
+ return self._memory_store
154
+
155
+ def store(self, key: str, value: str, tags: tuple[str, ...] = ()) -> None:
156
+ """Store a value with optional tags."""
157
+ self._memory_store.store(key, value)
158
+ now = datetime.now(timezone.utc).isoformat()
159
+ tags_data = self._load_tags()
160
+ tags_data[key] = {
161
+ "tags": list(tags),
162
+ "created_at": tags_data.get(key, {}).get("created_at", now),
163
+ "accessed_at": now,
164
+ }
165
+ self._save_tags(tags_data)
166
+
167
+ def recall(self, key: str) -> MemoryEntry | None:
168
+ """Return a MemoryEntry for the key, or None."""
169
+ raw_value = self._memory_store.recall(key)
170
+ if raw_value is None:
171
+ return None
172
+ tags_data = self._load_tags()
173
+ meta = tags_data.get(key, {})
174
+ now = datetime.now(timezone.utc).isoformat()
175
+ # Update accessed_at
176
+ if key in tags_data:
177
+ tags_data[key]["accessed_at"] = now
178
+ self._save_tags(tags_data)
179
+ return MemoryEntry(
180
+ key=key,
181
+ value=raw_value,
182
+ tags=tuple(meta.get("tags", [])),
183
+ created_at=meta.get("created_at", ""),
184
+ accessed_at=now,
185
+ )
186
+
187
+ def query_by_tag(self, tag: str) -> tuple[MemoryEntry, ...]:
188
+ """Return all entries matching the given tag."""
189
+ tags_data = self._load_tags()
190
+ results: list[MemoryEntry] = []
191
+ for key, meta in tags_data.items():
192
+ if tag in meta.get("tags", []):
193
+ raw_value = self._memory_store.recall(key)
194
+ if raw_value is not None:
195
+ results.append(MemoryEntry(
196
+ key=key,
197
+ value=raw_value,
198
+ tags=tuple(meta.get("tags", [])),
199
+ created_at=meta.get("created_at", ""),
200
+ accessed_at=meta.get("accessed_at", ""),
201
+ ))
202
+ return tuple(results)
203
+
204
+ def delete(self, key: str) -> None:
205
+ self._memory_store.delete(key)
206
+ tags_data = self._load_tags()
207
+ tags_data.pop(key, None)
208
+ self._save_tags(tags_data)
209
+
210
+ def list_keys(self) -> list[str]:
211
+ return self._memory_store.list_keys()
212
+
213
+ def get_all(self) -> dict[str, MemoryEntry]:
214
+ tags_data = self._load_tags()
215
+ result: dict[str, MemoryEntry] = {}
216
+ for key, raw_entry in self._memory_store.get_all().items():
217
+ meta = tags_data.get(key, {})
218
+ result[key] = MemoryEntry(
219
+ key=key,
220
+ value=raw_entry.value,
221
+ tags=tuple(meta.get("tags", [])),
222
+ created_at=meta.get("created_at", raw_entry.created_at),
223
+ accessed_at=meta.get("accessed_at", raw_entry.updated_at),
224
+ )
225
+ return result
226
+
227
+ def _load_tags(self) -> dict:
228
+ if self._tags_file.exists():
229
+ try:
230
+ return json.loads(self._tags_file.read_text())
231
+ except (json.JSONDecodeError, OSError):
232
+ return {}
233
+ return {}
234
+
235
+ def _save_tags(self, data: dict) -> None:
236
+ self._tags_file.write_text(json.dumps(data, indent=2))
237
+
238
+
239
+ class TaskMemory:
240
+ """L3: Per-task JSON files with status tracking."""
241
+
242
+ def __init__(self, memory_dir: Path, project_path: Path) -> None:
243
+ project_hash = hashlib.sha256(str(project_path).encode()).hexdigest()[:8]
244
+ self._tasks_dir = memory_dir / project_hash / "tasks"
245
+ self._tasks_dir.mkdir(parents=True, exist_ok=True)
246
+
247
+ def create(
248
+ self, description: str, metadata: dict | None = None,
249
+ ) -> TaskRecord:
250
+ """Create a new incomplete task and persist it."""
251
+ import uuid
252
+
253
+ now = datetime.now(timezone.utc).isoformat()
254
+ task_id = uuid.uuid4().hex[:8]
255
+ task = TaskRecord(
256
+ task_id=task_id,
257
+ description=description,
258
+ status="incomplete",
259
+ created_at=now,
260
+ updated_at=now,
261
+ metadata=metadata or {},
262
+ )
263
+ self._save_task(task)
264
+ return task
265
+
266
+ def get(self, task_id: str) -> TaskRecord | None:
267
+ """Load a task by ID, or None if not found."""
268
+ path = self._tasks_dir / f"{task_id}.json"
269
+ if not path.exists():
270
+ return None
271
+ try:
272
+ data = json.loads(path.read_text())
273
+ return TaskRecord(
274
+ task_id=data["task_id"],
275
+ description=data["description"],
276
+ status=data["status"],
277
+ created_at=data.get("created_at", ""),
278
+ updated_at=data.get("updated_at", ""),
279
+ metadata=data.get("metadata", {}),
280
+ )
281
+ except (json.JSONDecodeError, KeyError, OSError):
282
+ return None
283
+
284
+ def update_status(self, task_id: str, status: str) -> TaskRecord | None:
285
+ """Update a task's status and return the new record."""
286
+ task = self.get(task_id)
287
+ if task is None:
288
+ return None
289
+ now = datetime.now(timezone.utc).isoformat()
290
+ updated = TaskRecord(
291
+ task_id=task.task_id,
292
+ description=task.description,
293
+ status=status,
294
+ created_at=task.created_at,
295
+ updated_at=now,
296
+ metadata=task.metadata,
297
+ )
298
+ self._save_task(updated)
299
+ return updated
300
+
301
+ def list_incomplete(self) -> tuple[TaskRecord, ...]:
302
+ """Scan all task files and return those with status 'incomplete'."""
303
+ results: list[TaskRecord] = []
304
+ for path in self._tasks_dir.glob("*.json"):
305
+ task_id = path.stem
306
+ task = self.get(task_id)
307
+ if task is not None and task.status == "incomplete":
308
+ results.append(task)
309
+ return tuple(results)
310
+
311
+ def delete(self, task_id: str) -> None:
312
+ """Remove a task file."""
313
+ path = self._tasks_dir / f"{task_id}.json"
314
+ if path.exists():
315
+ path.unlink()
316
+
317
+ def _save_task(self, task: TaskRecord) -> None:
318
+ data = {
319
+ "task_id": task.task_id,
320
+ "description": task.description,
321
+ "status": task.status,
322
+ "created_at": task.created_at,
323
+ "updated_at": task.updated_at,
324
+ "metadata": task.metadata,
325
+ }
326
+ path = self._tasks_dir / f"{task.task_id}.json"
327
+ path.write_text(json.dumps(data, indent=2))
328
+
329
+
330
+ class SummaryMemory:
331
+ """Stores conversation summaries per session.
332
+
333
+ Storage: ``<memory_dir>/<project_hash>/summaries/<session_id>.md``
334
+ """
335
+
336
+ def __init__(self, memory_dir: Path, project_path: Path) -> None:
337
+ project_hash = hashlib.sha256(str(project_path).encode()).hexdigest()[:8]
338
+ self._summaries_dir = memory_dir / project_hash / "summaries"
339
+ self._summaries_dir.mkdir(parents=True, exist_ok=True)
340
+
341
+ def save_summary(self, session_id: str, summary: str, messages_count: int) -> None:
342
+ """Persist a summary for *session_id*.
343
+
344
+ The file header stores metadata as YAML-style front-matter so the
345
+ summary body remains plain markdown and human-readable.
346
+ """
347
+ now = datetime.now(timezone.utc).isoformat()
348
+ path = self._summaries_dir / f"{session_id}.md"
349
+ content = (
350
+ f"---\n"
351
+ f"session_id: {session_id}\n"
352
+ f"timestamp: {now}\n"
353
+ f"messages_count: {messages_count}\n"
354
+ f"---\n\n"
355
+ f"{summary}"
356
+ )
357
+ path.write_text(content, encoding="utf-8")
358
+
359
+ def load_summary(self, session_id: str) -> str | None:
360
+ """Return the summary body for *session_id*, or None if not found."""
361
+ path = self._summaries_dir / f"{session_id}.md"
362
+ if not path.exists():
363
+ return None
364
+ text = path.read_text(encoding="utf-8")
365
+ # Strip front-matter block if present
366
+ if text.startswith("---\n"):
367
+ end = text.find("\n---\n", 4)
368
+ if end != -1:
369
+ return text[end + 5:].strip()
370
+ return text.strip()
371
+
372
+ def list_summaries(self) -> list[dict]:
373
+ """Return a list of summary descriptors sorted by modification time (newest first).
374
+
375
+ Each dict has keys: ``id``, ``timestamp``, ``message_count``, ``first_line``.
376
+ """
377
+ results: list[dict] = []
378
+ for path in sorted(
379
+ self._summaries_dir.glob("*.md"),
380
+ key=lambda p: p.stat().st_mtime,
381
+ reverse=True,
382
+ ):
383
+ session_id = path.stem
384
+ try:
385
+ text = path.read_text(encoding="utf-8")
386
+ except OSError:
387
+ continue
388
+
389
+ timestamp = ""
390
+ messages_count = 0
391
+ body = text
392
+
393
+ if text.startswith("---\n"):
394
+ end = text.find("\n---\n", 4)
395
+ if end != -1:
396
+ front = text[4:end]
397
+ body = text[end + 5:].strip()
398
+ for line in front.splitlines():
399
+ if line.startswith("timestamp:"):
400
+ timestamp = line.split(":", 1)[1].strip()
401
+ elif line.startswith("messages_count:"):
402
+ try:
403
+ messages_count = int(line.split(":", 1)[1].strip())
404
+ except ValueError:
405
+ pass
406
+
407
+ first_line = next((line for line in body.splitlines() if line.strip()), "")
408
+ results.append({
409
+ "id": session_id,
410
+ "timestamp": timestamp,
411
+ "message_count": messages_count,
412
+ "first_line": first_line[:120],
413
+ })
414
+
415
+ return results
416
+
417
+
418
+ class LayeredMemory:
419
+ """Facade wrapping all 5 memory layers.
420
+
421
+ - L0 Governance: parsed rules from CLAUDE.md / .llm-code/rules/ / governance.md
422
+ - L1 Working: in-memory, session-scoped, not persisted
423
+ - L2 Project: persistent, tag-based (wraps MemoryStore for backward compat)
424
+ - L3 Task: per-task JSON files with status tracking
425
+ - L4 Summary: conversation summaries persisted per session
426
+ """
427
+
428
+ def __init__(
429
+ self,
430
+ project_root: Path,
431
+ memory_dir: Path,
432
+ project_path: Path,
433
+ ) -> None:
434
+ self._governance = GovernanceLayer(project_root)
435
+ self._working = WorkingMemory()
436
+ self._project = ProjectMemory(memory_dir, project_path)
437
+ self._tasks = TaskMemory(memory_dir, project_path)
438
+ self._summaries = SummaryMemory(memory_dir, project_path)
439
+
440
+ @property
441
+ def governance(self) -> GovernanceLayer:
442
+ return self._governance
443
+
444
+ @property
445
+ def working(self) -> WorkingMemory:
446
+ return self._working
447
+
448
+ @property
449
+ def project(self) -> ProjectMemory:
450
+ return self._project
451
+
452
+ @property
453
+ def tasks(self) -> TaskMemory:
454
+ return self._tasks
455
+
456
+ @property
457
+ def summaries(self) -> SummaryMemory:
458
+ return self._summaries
459
+
460
+ def get_governance_rules(self) -> tuple[GovernanceRule, ...]:
461
+ """Return all governance rules, sorted by priority descending."""
462
+ rules = self._governance.scan()
463
+ return tuple(sorted(rules, key=lambda r: r.priority, reverse=True))
464
+
465
+ def get_incomplete_tasks(self) -> tuple[TaskRecord, ...]:
466
+ """Scan for incomplete tasks (useful on startup)."""
467
+ return self._tasks.list_incomplete()