llmcode-cli 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llm_code/__init__.py +2 -0
- llm_code/analysis/__init__.py +6 -0
- llm_code/analysis/cache.py +33 -0
- llm_code/analysis/engine.py +256 -0
- llm_code/analysis/go_rules.py +114 -0
- llm_code/analysis/js_rules.py +84 -0
- llm_code/analysis/python_rules.py +311 -0
- llm_code/analysis/rules.py +140 -0
- llm_code/analysis/rust_rules.py +108 -0
- llm_code/analysis/universal_rules.py +111 -0
- llm_code/api/__init__.py +0 -0
- llm_code/api/client.py +90 -0
- llm_code/api/errors.py +73 -0
- llm_code/api/openai_compat.py +390 -0
- llm_code/api/provider.py +35 -0
- llm_code/api/sse.py +52 -0
- llm_code/api/types.py +140 -0
- llm_code/cli/__init__.py +0 -0
- llm_code/cli/commands.py +70 -0
- llm_code/cli/image.py +122 -0
- llm_code/cli/render.py +214 -0
- llm_code/cli/status_line.py +79 -0
- llm_code/cli/streaming.py +92 -0
- llm_code/cli/tui_main.py +220 -0
- llm_code/computer_use/__init__.py +11 -0
- llm_code/computer_use/app_detect.py +49 -0
- llm_code/computer_use/app_tier.py +57 -0
- llm_code/computer_use/coordinator.py +99 -0
- llm_code/computer_use/input_control.py +71 -0
- llm_code/computer_use/screenshot.py +93 -0
- llm_code/cron/__init__.py +13 -0
- llm_code/cron/parser.py +145 -0
- llm_code/cron/scheduler.py +135 -0
- llm_code/cron/storage.py +126 -0
- llm_code/enterprise/__init__.py +1 -0
- llm_code/enterprise/audit.py +59 -0
- llm_code/enterprise/auth.py +26 -0
- llm_code/enterprise/oidc.py +95 -0
- llm_code/enterprise/rbac.py +65 -0
- llm_code/harness/__init__.py +5 -0
- llm_code/harness/config.py +33 -0
- llm_code/harness/engine.py +129 -0
- llm_code/harness/guides.py +41 -0
- llm_code/harness/sensors.py +68 -0
- llm_code/harness/templates.py +84 -0
- llm_code/hida/__init__.py +1 -0
- llm_code/hida/classifier.py +187 -0
- llm_code/hida/engine.py +49 -0
- llm_code/hida/profiles.py +95 -0
- llm_code/hida/types.py +28 -0
- llm_code/ide/__init__.py +1 -0
- llm_code/ide/bridge.py +80 -0
- llm_code/ide/detector.py +76 -0
- llm_code/ide/server.py +169 -0
- llm_code/logging.py +29 -0
- llm_code/lsp/__init__.py +0 -0
- llm_code/lsp/client.py +298 -0
- llm_code/lsp/detector.py +42 -0
- llm_code/lsp/manager.py +56 -0
- llm_code/lsp/tools.py +288 -0
- llm_code/marketplace/__init__.py +0 -0
- llm_code/marketplace/builtin_registry.py +102 -0
- llm_code/marketplace/installer.py +162 -0
- llm_code/marketplace/plugin.py +78 -0
- llm_code/marketplace/registry.py +360 -0
- llm_code/mcp/__init__.py +0 -0
- llm_code/mcp/bridge.py +87 -0
- llm_code/mcp/client.py +117 -0
- llm_code/mcp/health.py +120 -0
- llm_code/mcp/manager.py +214 -0
- llm_code/mcp/oauth.py +219 -0
- llm_code/mcp/transport.py +254 -0
- llm_code/mcp/types.py +53 -0
- llm_code/remote/__init__.py +0 -0
- llm_code/remote/client.py +136 -0
- llm_code/remote/protocol.py +22 -0
- llm_code/remote/server.py +275 -0
- llm_code/remote/ssh_proxy.py +56 -0
- llm_code/runtime/__init__.py +0 -0
- llm_code/runtime/auto_commit.py +56 -0
- llm_code/runtime/auto_diagnose.py +62 -0
- llm_code/runtime/checkpoint.py +70 -0
- llm_code/runtime/checkpoint_recovery.py +142 -0
- llm_code/runtime/compaction.py +35 -0
- llm_code/runtime/compressor.py +415 -0
- llm_code/runtime/config.py +533 -0
- llm_code/runtime/context.py +49 -0
- llm_code/runtime/conversation.py +921 -0
- llm_code/runtime/cost_tracker.py +126 -0
- llm_code/runtime/dream.py +127 -0
- llm_code/runtime/file_protection.py +150 -0
- llm_code/runtime/hardware.py +85 -0
- llm_code/runtime/hooks.py +223 -0
- llm_code/runtime/indexer.py +230 -0
- llm_code/runtime/knowledge_compiler.py +232 -0
- llm_code/runtime/memory.py +132 -0
- llm_code/runtime/memory_layers.py +467 -0
- llm_code/runtime/memory_lint.py +252 -0
- llm_code/runtime/model_aliases.py +37 -0
- llm_code/runtime/ollama.py +93 -0
- llm_code/runtime/overlay.py +124 -0
- llm_code/runtime/permissions.py +200 -0
- llm_code/runtime/plan.py +45 -0
- llm_code/runtime/prompt.py +238 -0
- llm_code/runtime/repo_map.py +174 -0
- llm_code/runtime/sandbox.py +116 -0
- llm_code/runtime/session.py +268 -0
- llm_code/runtime/skill_resolver.py +61 -0
- llm_code/runtime/skills.py +133 -0
- llm_code/runtime/speculative.py +75 -0
- llm_code/runtime/streaming_executor.py +216 -0
- llm_code/runtime/telemetry.py +196 -0
- llm_code/runtime/token_budget.py +26 -0
- llm_code/runtime/vcr.py +142 -0
- llm_code/runtime/vision.py +102 -0
- llm_code/swarm/__init__.py +1 -0
- llm_code/swarm/backend_subprocess.py +108 -0
- llm_code/swarm/backend_tmux.py +103 -0
- llm_code/swarm/backend_worktree.py +306 -0
- llm_code/swarm/checkpoint.py +74 -0
- llm_code/swarm/coordinator.py +236 -0
- llm_code/swarm/mailbox.py +88 -0
- llm_code/swarm/manager.py +202 -0
- llm_code/swarm/memory_sync.py +80 -0
- llm_code/swarm/recovery.py +21 -0
- llm_code/swarm/team.py +67 -0
- llm_code/swarm/types.py +31 -0
- llm_code/task/__init__.py +16 -0
- llm_code/task/diagnostics.py +93 -0
- llm_code/task/manager.py +162 -0
- llm_code/task/types.py +112 -0
- llm_code/task/verifier.py +104 -0
- llm_code/tools/__init__.py +0 -0
- llm_code/tools/agent.py +145 -0
- llm_code/tools/agent_roles.py +82 -0
- llm_code/tools/base.py +94 -0
- llm_code/tools/bash.py +565 -0
- llm_code/tools/computer_use_tools.py +278 -0
- llm_code/tools/coordinator_tool.py +75 -0
- llm_code/tools/cron_create.py +90 -0
- llm_code/tools/cron_delete.py +49 -0
- llm_code/tools/cron_list.py +51 -0
- llm_code/tools/deferred.py +92 -0
- llm_code/tools/dump.py +116 -0
- llm_code/tools/edit_file.py +282 -0
- llm_code/tools/git_tools.py +531 -0
- llm_code/tools/glob_search.py +112 -0
- llm_code/tools/grep_search.py +144 -0
- llm_code/tools/ide_diagnostics.py +59 -0
- llm_code/tools/ide_open.py +58 -0
- llm_code/tools/ide_selection.py +52 -0
- llm_code/tools/memory_tools.py +138 -0
- llm_code/tools/multi_edit.py +143 -0
- llm_code/tools/notebook_edit.py +107 -0
- llm_code/tools/notebook_read.py +81 -0
- llm_code/tools/parsing.py +63 -0
- llm_code/tools/read_file.py +154 -0
- llm_code/tools/registry.py +58 -0
- llm_code/tools/search_backends/__init__.py +56 -0
- llm_code/tools/search_backends/brave.py +56 -0
- llm_code/tools/search_backends/duckduckgo.py +129 -0
- llm_code/tools/search_backends/searxng.py +71 -0
- llm_code/tools/search_backends/tavily.py +73 -0
- llm_code/tools/swarm_create.py +109 -0
- llm_code/tools/swarm_delete.py +95 -0
- llm_code/tools/swarm_list.py +44 -0
- llm_code/tools/swarm_message.py +109 -0
- llm_code/tools/task_close.py +79 -0
- llm_code/tools/task_plan.py +79 -0
- llm_code/tools/task_verify.py +90 -0
- llm_code/tools/tool_search.py +65 -0
- llm_code/tools/web_common.py +258 -0
- llm_code/tools/web_fetch.py +223 -0
- llm_code/tools/web_search.py +280 -0
- llm_code/tools/write_file.py +118 -0
- llm_code/tui/__init__.py +1 -0
- llm_code/tui/app.py +2432 -0
- llm_code/tui/chat_view.py +82 -0
- llm_code/tui/chat_widgets.py +309 -0
- llm_code/tui/header_bar.py +46 -0
- llm_code/tui/input_bar.py +349 -0
- llm_code/tui/keybindings.py +142 -0
- llm_code/tui/marketplace.py +210 -0
- llm_code/tui/status_bar.py +72 -0
- llm_code/tui/theme.py +96 -0
- llm_code/utils/__init__.py +0 -0
- llm_code/utils/diff.py +111 -0
- llm_code/utils/errors.py +70 -0
- llm_code/utils/hyperlink.py +73 -0
- llm_code/utils/notebook.py +179 -0
- llm_code/utils/search.py +69 -0
- llm_code/utils/text_normalize.py +28 -0
- llm_code/utils/version_check.py +62 -0
- llm_code/vim/__init__.py +4 -0
- llm_code/vim/engine.py +51 -0
- llm_code/vim/motions.py +172 -0
- llm_code/vim/operators.py +183 -0
- llm_code/vim/text_objects.py +139 -0
- llm_code/vim/transitions.py +279 -0
- llm_code/vim/types.py +68 -0
- llm_code/voice/__init__.py +1 -0
- llm_code/voice/languages.py +43 -0
- llm_code/voice/recorder.py +136 -0
- llm_code/voice/stt.py +36 -0
- llm_code/voice/stt_anthropic.py +66 -0
- llm_code/voice/stt_google.py +32 -0
- llm_code/voice/stt_whisper.py +52 -0
- llmcode_cli-1.0.0.dist-info/METADATA +524 -0
- llmcode_cli-1.0.0.dist-info/RECORD +212 -0
- llmcode_cli-1.0.0.dist-info/WHEEL +4 -0
- llmcode_cli-1.0.0.dist-info/entry_points.txt +2 -0
- llmcode_cli-1.0.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""Git-based checkpoint manager for undoable tool operations."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import subprocess
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from datetime import datetime, timezone
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@dataclass(frozen=True)
|
|
12
|
+
class Checkpoint:
|
|
13
|
+
id: str # incrementing "cp-001", "cp-002", …
|
|
14
|
+
timestamp: str # ISO format
|
|
15
|
+
tool_name: str
|
|
16
|
+
tool_args_summary: str # short display string (first 80 chars of str(tool_args))
|
|
17
|
+
git_sha: str
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class CheckpointManager:
|
|
21
|
+
def __init__(self, cwd: Path) -> None:
|
|
22
|
+
self._cwd = cwd
|
|
23
|
+
self._stack: list[Checkpoint] = []
|
|
24
|
+
self._counter = 0
|
|
25
|
+
|
|
26
|
+
def create(self, tool_name: str, tool_args: dict) -> Checkpoint:
|
|
27
|
+
"""Commit the current working-tree state and push a Checkpoint onto the stack."""
|
|
28
|
+
subprocess.run(["git", "add", "-A"], cwd=self._cwd, capture_output=True)
|
|
29
|
+
subprocess.run(
|
|
30
|
+
["git", "commit", "--allow-empty", "-m", f"llm-code checkpoint: before {tool_name}"],
|
|
31
|
+
cwd=self._cwd,
|
|
32
|
+
capture_output=True,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
sha_result = subprocess.run(
|
|
36
|
+
["git", "rev-parse", "HEAD"],
|
|
37
|
+
capture_output=True,
|
|
38
|
+
text=True,
|
|
39
|
+
cwd=self._cwd,
|
|
40
|
+
)
|
|
41
|
+
git_sha = sha_result.stdout.strip()
|
|
42
|
+
|
|
43
|
+
self._counter += 1
|
|
44
|
+
cp = Checkpoint(
|
|
45
|
+
id=f"cp-{self._counter:03d}",
|
|
46
|
+
timestamp=datetime.now(tz=timezone.utc).isoformat(),
|
|
47
|
+
tool_name=tool_name,
|
|
48
|
+
tool_args_summary=str(tool_args)[:80],
|
|
49
|
+
git_sha=git_sha,
|
|
50
|
+
)
|
|
51
|
+
self._stack.append(cp)
|
|
52
|
+
return cp
|
|
53
|
+
|
|
54
|
+
def undo(self) -> Checkpoint | None:
|
|
55
|
+
"""Pop the last checkpoint and hard-reset the repo to that SHA."""
|
|
56
|
+
if not self._stack:
|
|
57
|
+
return None
|
|
58
|
+
cp = self._stack.pop()
|
|
59
|
+
subprocess.run(
|
|
60
|
+
["git", "reset", "--hard", cp.git_sha],
|
|
61
|
+
cwd=self._cwd,
|
|
62
|
+
capture_output=True,
|
|
63
|
+
)
|
|
64
|
+
return cp
|
|
65
|
+
|
|
66
|
+
def list_checkpoints(self) -> list[Checkpoint]:
|
|
67
|
+
return list(self._stack)
|
|
68
|
+
|
|
69
|
+
def can_undo(self) -> bool:
|
|
70
|
+
return len(self._stack) > 0
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
"""Session checkpoint recovery: save/load full session state for crash recovery."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import asyncio
|
|
5
|
+
import json
|
|
6
|
+
import logging
|
|
7
|
+
from datetime import datetime, timezone
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import TYPE_CHECKING
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from llm_code.runtime.session import Session
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
_CHECKPOINTS_DIR_NAME = "checkpoints"
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class CheckpointRecovery:
|
|
20
|
+
"""Persist and restore full session state for crash recovery.
|
|
21
|
+
|
|
22
|
+
Checkpoints are stored as JSON files under
|
|
23
|
+
``~/.llm-code/checkpoints/<session_id>.json`` (or a custom *checkpoints_dir*).
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(self, checkpoints_dir: Path) -> None:
|
|
27
|
+
self._dir = checkpoints_dir
|
|
28
|
+
self._dir.mkdir(parents=True, exist_ok=True)
|
|
29
|
+
self._auto_save_task: asyncio.Task | None = None
|
|
30
|
+
|
|
31
|
+
# ------------------------------------------------------------------
|
|
32
|
+
# Core persistence
|
|
33
|
+
# ------------------------------------------------------------------
|
|
34
|
+
|
|
35
|
+
def save_checkpoint(self, session: "Session") -> Path:
|
|
36
|
+
"""Serialize *session* to disk and return the checkpoint file path."""
|
|
37
|
+
|
|
38
|
+
data = session.to_dict()
|
|
39
|
+
data["checkpoint_saved_at"] = datetime.now(timezone.utc).isoformat()
|
|
40
|
+
|
|
41
|
+
path = self._dir / f"{session.id}.json"
|
|
42
|
+
path.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
|
43
|
+
logger.debug("Checkpoint saved: %s", path)
|
|
44
|
+
return path
|
|
45
|
+
|
|
46
|
+
def load_checkpoint(self, session_id: str) -> "Session | None":
|
|
47
|
+
"""Deserialize a checkpoint by *session_id*, or return None."""
|
|
48
|
+
from llm_code.runtime.session import Session # local import to avoid cycles
|
|
49
|
+
|
|
50
|
+
path = self._dir / f"{session_id}.json"
|
|
51
|
+
if not path.exists():
|
|
52
|
+
return None
|
|
53
|
+
try:
|
|
54
|
+
data = json.loads(path.read_text(encoding="utf-8"))
|
|
55
|
+
# checkpoint_saved_at is extra metadata; Session.from_dict ignores unknown keys
|
|
56
|
+
# but we strip it to keep from_dict clean
|
|
57
|
+
data.pop("checkpoint_saved_at", None)
|
|
58
|
+
return Session.from_dict(data)
|
|
59
|
+
except (json.JSONDecodeError, KeyError, TypeError) as exc:
|
|
60
|
+
logger.warning("Failed to load checkpoint %s: %s", session_id, exc)
|
|
61
|
+
return None
|
|
62
|
+
|
|
63
|
+
def list_checkpoints(self) -> list[dict]:
|
|
64
|
+
"""Return checkpoint descriptors sorted by modification time (newest first).
|
|
65
|
+
|
|
66
|
+
Each dict has: ``session_id``, ``saved_at``, ``message_count``,
|
|
67
|
+
``project_path``, ``updated_at``.
|
|
68
|
+
"""
|
|
69
|
+
results: list[dict] = []
|
|
70
|
+
for path in sorted(
|
|
71
|
+
self._dir.glob("*.json"),
|
|
72
|
+
key=lambda p: p.stat().st_mtime,
|
|
73
|
+
reverse=True,
|
|
74
|
+
):
|
|
75
|
+
try:
|
|
76
|
+
data = json.loads(path.read_text(encoding="utf-8"))
|
|
77
|
+
results.append({
|
|
78
|
+
"session_id": data.get("id", path.stem),
|
|
79
|
+
"saved_at": data.get("checkpoint_saved_at", ""),
|
|
80
|
+
"message_count": len(data.get("messages", [])),
|
|
81
|
+
"project_path": data.get("project_path", ""),
|
|
82
|
+
"updated_at": data.get("updated_at", ""),
|
|
83
|
+
})
|
|
84
|
+
except (json.JSONDecodeError, OSError):
|
|
85
|
+
continue
|
|
86
|
+
return results
|
|
87
|
+
|
|
88
|
+
def delete_checkpoint(self, session_id: str) -> bool:
|
|
89
|
+
"""Delete a checkpoint file; returns True if it existed."""
|
|
90
|
+
path = self._dir / f"{session_id}.json"
|
|
91
|
+
if path.exists():
|
|
92
|
+
path.unlink()
|
|
93
|
+
return True
|
|
94
|
+
return False
|
|
95
|
+
|
|
96
|
+
# ------------------------------------------------------------------
|
|
97
|
+
# Auto-save background task
|
|
98
|
+
# ------------------------------------------------------------------
|
|
99
|
+
|
|
100
|
+
def start_auto_save(self, get_session_fn, interval: int = 60) -> None:
|
|
101
|
+
"""Start a background asyncio task that saves a checkpoint every *interval* seconds.
|
|
102
|
+
|
|
103
|
+
*get_session_fn* is a zero-argument callable that returns the current
|
|
104
|
+
:class:`~llm_code.runtime.session.Session` (or None to skip).
|
|
105
|
+
"""
|
|
106
|
+
if self._auto_save_task is not None and not self._auto_save_task.done():
|
|
107
|
+
return # already running
|
|
108
|
+
|
|
109
|
+
async def _loop():
|
|
110
|
+
while True:
|
|
111
|
+
await asyncio.sleep(interval)
|
|
112
|
+
try:
|
|
113
|
+
session = get_session_fn()
|
|
114
|
+
if session is not None:
|
|
115
|
+
self.save_checkpoint(session)
|
|
116
|
+
except Exception as exc:
|
|
117
|
+
logger.debug("Auto-save checkpoint error: %s", exc)
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
loop = asyncio.get_event_loop()
|
|
121
|
+
except RuntimeError:
|
|
122
|
+
return
|
|
123
|
+
|
|
124
|
+
self._auto_save_task = loop.create_task(_loop())
|
|
125
|
+
logger.debug("Checkpoint auto-save started (interval=%ds)", interval)
|
|
126
|
+
|
|
127
|
+
def stop_auto_save(self) -> None:
|
|
128
|
+
"""Cancel the auto-save background task if running."""
|
|
129
|
+
if self._auto_save_task is not None and not self._auto_save_task.done():
|
|
130
|
+
self._auto_save_task.cancel()
|
|
131
|
+
self._auto_save_task = None
|
|
132
|
+
|
|
133
|
+
# ------------------------------------------------------------------
|
|
134
|
+
# Startup detection
|
|
135
|
+
# ------------------------------------------------------------------
|
|
136
|
+
|
|
137
|
+
def detect_last_checkpoint(self) -> "Session | None":
|
|
138
|
+
"""Return the most recently modified checkpoint session, or None."""
|
|
139
|
+
entries = self.list_checkpoints()
|
|
140
|
+
if not entries:
|
|
141
|
+
return None
|
|
142
|
+
return self.load_checkpoint(entries[0]["session_id"])
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""Context compaction: trim old session messages when the context grows too large."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import dataclasses
|
|
5
|
+
|
|
6
|
+
from llm_code.api.types import Message, TextBlock
|
|
7
|
+
from llm_code.runtime.session import Session
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def needs_compaction(session: Session, threshold: int = 80000) -> bool:
|
|
11
|
+
"""Return True when the session's estimated token count exceeds *threshold*."""
|
|
12
|
+
return session.estimated_tokens() > threshold
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def compact_session(
|
|
16
|
+
session: Session,
|
|
17
|
+
keep_recent: int = 4,
|
|
18
|
+
summary: str = "",
|
|
19
|
+
) -> Session:
|
|
20
|
+
"""Return a compacted session keeping only the most recent *keep_recent* messages.
|
|
21
|
+
|
|
22
|
+
If the session has <= keep_recent messages, the original session is returned
|
|
23
|
+
unchanged. Otherwise a single summary message is prepended to the last
|
|
24
|
+
*keep_recent* messages.
|
|
25
|
+
"""
|
|
26
|
+
if len(session.messages) <= keep_recent:
|
|
27
|
+
return session
|
|
28
|
+
|
|
29
|
+
summary_msg = Message(
|
|
30
|
+
role="user",
|
|
31
|
+
content=(TextBlock(text=f"[Previous conversation summary]\n{summary}"),),
|
|
32
|
+
)
|
|
33
|
+
recent = session.messages[-keep_recent:]
|
|
34
|
+
new_messages = (summary_msg,) + recent
|
|
35
|
+
return dataclasses.replace(session, messages=new_messages)
|
|
@@ -0,0 +1,415 @@
|
|
|
1
|
+
"""ContextCompressor: 5-level progressive context compression."""
|
|
2
|
+
from __future__ import annotations
|
|
3
|
+
|
|
4
|
+
import dataclasses
|
|
5
|
+
import logging
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
7
|
+
|
|
8
|
+
from llm_code.api.types import Message, MessageRequest, TextBlock, ToolResultBlock, ToolUseBlock
|
|
9
|
+
from llm_code.runtime.session import Session
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from llm_code.api.provider import LLMProvider
|
|
13
|
+
|
|
14
|
+
_log = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
_SUMMARIZE_SYSTEM_PROMPT = """\
|
|
17
|
+
You are a context compression agent. Given conversation messages from a coding \
|
|
18
|
+
session, produce a concise summary preserving:
|
|
19
|
+
|
|
20
|
+
1. What files were read, created, or modified (exact paths)
|
|
21
|
+
2. Key decisions made and their rationale
|
|
22
|
+
3. Current state of the task (what's done, what's pending)
|
|
23
|
+
4. Any errors encountered and how they were resolved
|
|
24
|
+
|
|
25
|
+
Be factual. Use bullet points. Do not include code blocks unless critical.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ContextCompressor:
|
|
30
|
+
"""Progressively compress a Session context through 5 escalating levels.
|
|
31
|
+
|
|
32
|
+
Level 1 — snip_compact: Truncate oversized ToolResultBlock content.
|
|
33
|
+
Level 2 — micro_compact: Remove stale read_file results (keep only latest per path).
|
|
34
|
+
Level 3 — context_collapse: Replace old tool_call+result pairs with one-line summaries.
|
|
35
|
+
Level 4 — auto_compact: Discard all old messages, keep a summary + recent tail.
|
|
36
|
+
Level 5 — llm_summarize: (async only) Replace Level 4 placeholder with LLM-generated summary.
|
|
37
|
+
|
|
38
|
+
Cache-aware: tracks which message indices have been sent to the API (cached).
|
|
39
|
+
Compression levels prefer removing non-cached messages first to preserve
|
|
40
|
+
API-side prompt cache hits.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
max_result_chars: int = 2000,
|
|
46
|
+
provider: "LLMProvider | None" = None,
|
|
47
|
+
summarize_model: str = "",
|
|
48
|
+
max_summary_tokens: int = 1000,
|
|
49
|
+
) -> None:
|
|
50
|
+
self._max_result_chars = max_result_chars
|
|
51
|
+
self._cached_indices: set[int] = set()
|
|
52
|
+
self._provider = provider
|
|
53
|
+
self._summarize_model = summarize_model
|
|
54
|
+
self._max_summary_tokens = max_summary_tokens
|
|
55
|
+
|
|
56
|
+
# ------------------------------------------------------------------
|
|
57
|
+
# Cache tracking
|
|
58
|
+
# ------------------------------------------------------------------
|
|
59
|
+
|
|
60
|
+
def mark_as_cached(self, message_indices: set[int]) -> None:
|
|
61
|
+
"""Mark which message indices have been sent to the API (cache hits)."""
|
|
62
|
+
self._cached_indices.update(message_indices)
|
|
63
|
+
|
|
64
|
+
def _is_cached(self, index: int) -> bool:
|
|
65
|
+
"""Return True if the message at *index* has been sent to the API."""
|
|
66
|
+
return index in self._cached_indices
|
|
67
|
+
|
|
68
|
+
# ------------------------------------------------------------------
|
|
69
|
+
# Public entry point
|
|
70
|
+
# ------------------------------------------------------------------
|
|
71
|
+
|
|
72
|
+
def compress(self, session: Session, max_tokens: int) -> Session:
|
|
73
|
+
"""Compress *session* until estimated_tokens() <= max_tokens.
|
|
74
|
+
|
|
75
|
+
Applies levels in order, stopping as soon as the budget is met.
|
|
76
|
+
If all 4 levels still cannot reach the budget, the Level-4 result
|
|
77
|
+
is returned (best-effort).
|
|
78
|
+
|
|
79
|
+
Resets cached indices after compression since message indices change.
|
|
80
|
+
"""
|
|
81
|
+
if session.estimated_tokens() <= max_tokens:
|
|
82
|
+
return session
|
|
83
|
+
# Reset stale cache indices — message positions change after compression
|
|
84
|
+
self._cached_indices.clear()
|
|
85
|
+
|
|
86
|
+
session = self._snip_compact(session)
|
|
87
|
+
if session.estimated_tokens() <= max_tokens:
|
|
88
|
+
return session
|
|
89
|
+
|
|
90
|
+
session = self._micro_compact(session)
|
|
91
|
+
if session.estimated_tokens() <= max_tokens:
|
|
92
|
+
return session
|
|
93
|
+
|
|
94
|
+
session = self._context_collapse(session, keep_recent=6)
|
|
95
|
+
if session.estimated_tokens() <= max_tokens:
|
|
96
|
+
return session
|
|
97
|
+
|
|
98
|
+
session = self._auto_compact(session, keep_recent=4)
|
|
99
|
+
return session
|
|
100
|
+
|
|
101
|
+
async def compress_async(self, session: Session, max_tokens: int) -> Session:
|
|
102
|
+
"""Async compress with optional Level 5 LLM summarization."""
|
|
103
|
+
result = self.compress(session, max_tokens)
|
|
104
|
+
if self._provider is not None:
|
|
105
|
+
result = await self._llm_summarize(result)
|
|
106
|
+
return result
|
|
107
|
+
|
|
108
|
+
# ------------------------------------------------------------------
|
|
109
|
+
# Level 5 (async only)
|
|
110
|
+
# ------------------------------------------------------------------
|
|
111
|
+
|
|
112
|
+
async def _llm_summarize(self, session: Session) -> Session:
|
|
113
|
+
"""Replace Level 4 placeholder with LLM-generated summary."""
|
|
114
|
+
placeholder_idx = None
|
|
115
|
+
for i, msg in enumerate(session.messages):
|
|
116
|
+
for block in msg.content:
|
|
117
|
+
if isinstance(block, TextBlock) and "[Previous conversation summary]" in block.text:
|
|
118
|
+
placeholder_idx = i
|
|
119
|
+
break
|
|
120
|
+
if placeholder_idx is not None:
|
|
121
|
+
break
|
|
122
|
+
|
|
123
|
+
if placeholder_idx is None:
|
|
124
|
+
return session
|
|
125
|
+
|
|
126
|
+
# Build context from remaining messages
|
|
127
|
+
context_parts: list[str] = []
|
|
128
|
+
for i, msg in enumerate(session.messages):
|
|
129
|
+
if i == placeholder_idx:
|
|
130
|
+
continue
|
|
131
|
+
for block in msg.content:
|
|
132
|
+
if isinstance(block, TextBlock):
|
|
133
|
+
context_parts.append(f"[{msg.role}] {block.text[:500]}")
|
|
134
|
+
elif isinstance(block, ToolUseBlock):
|
|
135
|
+
context_parts.append(f"[tool_call] {block.name}({str(block.input)[:200]})")
|
|
136
|
+
elif isinstance(block, ToolResultBlock):
|
|
137
|
+
context_parts.append(f"[tool_result] {block.content[:200]}")
|
|
138
|
+
|
|
139
|
+
if not context_parts:
|
|
140
|
+
return session
|
|
141
|
+
|
|
142
|
+
try:
|
|
143
|
+
request = MessageRequest(
|
|
144
|
+
model=self._summarize_model,
|
|
145
|
+
system=_SUMMARIZE_SYSTEM_PROMPT,
|
|
146
|
+
messages=(
|
|
147
|
+
Message(
|
|
148
|
+
role="user",
|
|
149
|
+
content=(TextBlock(text="Summarize this conversation:\n\n" + "\n".join(context_parts)),),
|
|
150
|
+
),
|
|
151
|
+
),
|
|
152
|
+
max_tokens=self._max_summary_tokens,
|
|
153
|
+
)
|
|
154
|
+
response = await self._provider.complete(request)
|
|
155
|
+
summary_text = response.content if isinstance(response.content, str) else str(response.content)
|
|
156
|
+
except Exception:
|
|
157
|
+
_log.warning("Level 5 LLM summarization failed, keeping placeholder", exc_info=True)
|
|
158
|
+
return session
|
|
159
|
+
|
|
160
|
+
summary_msg = Message(
|
|
161
|
+
role="user",
|
|
162
|
+
content=(TextBlock(text=f"[Conversation summary]\n{summary_text}"),),
|
|
163
|
+
)
|
|
164
|
+
messages = list(session.messages)
|
|
165
|
+
messages[placeholder_idx] = summary_msg
|
|
166
|
+
return dataclasses.replace(session, messages=tuple(messages))
|
|
167
|
+
|
|
168
|
+
# ------------------------------------------------------------------
|
|
169
|
+
# Level 1
|
|
170
|
+
# ------------------------------------------------------------------
|
|
171
|
+
|
|
172
|
+
def _snip_compact(self, session: Session) -> Session:
|
|
173
|
+
"""Truncate each ToolResultBlock's content to *max_result_chars*.
|
|
174
|
+
|
|
175
|
+
Cache-aware: truncate non-cached messages first. If no non-cached
|
|
176
|
+
messages are over-budget, fall through to truncating cached ones too.
|
|
177
|
+
"""
|
|
178
|
+
new_messages: list[Message] = []
|
|
179
|
+
changed = False
|
|
180
|
+
|
|
181
|
+
# First pass: truncate only non-cached oversized results
|
|
182
|
+
for idx, msg in enumerate(session.messages):
|
|
183
|
+
new_blocks: list = []
|
|
184
|
+
msg_changed = False
|
|
185
|
+
for block in msg.content:
|
|
186
|
+
if (
|
|
187
|
+
isinstance(block, ToolResultBlock)
|
|
188
|
+
and len(block.content) > self._max_result_chars
|
|
189
|
+
and not self._is_cached(idx)
|
|
190
|
+
):
|
|
191
|
+
truncated = block.content[: self._max_result_chars]
|
|
192
|
+
new_blocks.append(dataclasses.replace(block, content=truncated))
|
|
193
|
+
msg_changed = True
|
|
194
|
+
else:
|
|
195
|
+
new_blocks.append(block)
|
|
196
|
+
if msg_changed:
|
|
197
|
+
new_messages.append(dataclasses.replace(msg, content=tuple(new_blocks)))
|
|
198
|
+
changed = True
|
|
199
|
+
else:
|
|
200
|
+
new_messages.append(msg)
|
|
201
|
+
|
|
202
|
+
interim = dataclasses.replace(session, messages=tuple(new_messages)) if changed else session
|
|
203
|
+
|
|
204
|
+
# Second pass: also truncate cached oversized results (fallback)
|
|
205
|
+
final_messages: list[Message] = []
|
|
206
|
+
second_changed = False
|
|
207
|
+
for idx, msg in enumerate(interim.messages):
|
|
208
|
+
new_blocks = []
|
|
209
|
+
msg_changed = False
|
|
210
|
+
for block in msg.content:
|
|
211
|
+
if isinstance(block, ToolResultBlock) and len(block.content) > self._max_result_chars:
|
|
212
|
+
truncated = block.content[: self._max_result_chars]
|
|
213
|
+
new_blocks.append(dataclasses.replace(block, content=truncated))
|
|
214
|
+
msg_changed = True
|
|
215
|
+
else:
|
|
216
|
+
new_blocks.append(block)
|
|
217
|
+
if msg_changed:
|
|
218
|
+
final_messages.append(dataclasses.replace(msg, content=tuple(new_blocks)))
|
|
219
|
+
second_changed = True
|
|
220
|
+
else:
|
|
221
|
+
final_messages.append(msg)
|
|
222
|
+
|
|
223
|
+
if not changed and not second_changed:
|
|
224
|
+
return session
|
|
225
|
+
if second_changed:
|
|
226
|
+
return dataclasses.replace(session, messages=tuple(final_messages))
|
|
227
|
+
return interim
|
|
228
|
+
|
|
229
|
+
# ------------------------------------------------------------------
|
|
230
|
+
# Level 2
|
|
231
|
+
# ------------------------------------------------------------------
|
|
232
|
+
|
|
233
|
+
def _micro_compact(self, session: Session) -> Session:
|
|
234
|
+
"""For the same file read multiple times, keep only the latest read_file result.
|
|
235
|
+
|
|
236
|
+
Strategy: build a mapping from tool_use_id → file path for all read_file
|
|
237
|
+
ToolUseBlocks. Then, for each file path, collect the tool_use_ids in order
|
|
238
|
+
and mark all but the last one for removal. Finally rebuild messages, dropping
|
|
239
|
+
ToolResultBlocks whose tool_use_id is marked.
|
|
240
|
+
|
|
241
|
+
Cache-aware: prefer removing non-cached stale reads first. If no
|
|
242
|
+
non-cached duplicates exist, fall back to removing cached ones.
|
|
243
|
+
"""
|
|
244
|
+
# Pass 1: map tool_use_id → (path, message_index) for read_file calls
|
|
245
|
+
id_to_path: dict[str, str] = {}
|
|
246
|
+
id_to_msg_index: dict[str, int] = {}
|
|
247
|
+
for msg_idx, msg in enumerate(session.messages):
|
|
248
|
+
for block in msg.content:
|
|
249
|
+
if isinstance(block, ToolUseBlock) and block.name == "read_file":
|
|
250
|
+
path = block.input.get("path", "")
|
|
251
|
+
if path:
|
|
252
|
+
id_to_path[block.id] = path
|
|
253
|
+
id_to_msg_index[block.id] = msg_idx
|
|
254
|
+
|
|
255
|
+
# For each path, keep only the last tool_use_id
|
|
256
|
+
path_to_ids: dict[str, list[str]] = {}
|
|
257
|
+
for tid, path in id_to_path.items():
|
|
258
|
+
path_to_ids.setdefault(path, []).append(tid)
|
|
259
|
+
|
|
260
|
+
stale_ids: set[str] = set()
|
|
261
|
+
for path, ids in path_to_ids.items():
|
|
262
|
+
if len(ids) > 1:
|
|
263
|
+
candidate_stale = ids[:-1] # all but the last
|
|
264
|
+
# Prefer removing non-cached first; only include cached if necessary
|
|
265
|
+
non_cached_stale = [t for t in candidate_stale if not self._is_cached(id_to_msg_index.get(t, -1))]
|
|
266
|
+
if non_cached_stale:
|
|
267
|
+
stale_ids.update(non_cached_stale)
|
|
268
|
+
else:
|
|
269
|
+
# Fallback: remove cached stale reads when no non-cached option exists
|
|
270
|
+
stale_ids.update(candidate_stale)
|
|
271
|
+
|
|
272
|
+
if not stale_ids:
|
|
273
|
+
return session
|
|
274
|
+
|
|
275
|
+
# Pass 2: rebuild messages, dropping stale ToolResultBlocks (and their paired ToolUseBlocks)
|
|
276
|
+
new_messages: list[Message] = []
|
|
277
|
+
for msg in session.messages:
|
|
278
|
+
new_blocks = []
|
|
279
|
+
for block in msg.content:
|
|
280
|
+
if isinstance(block, ToolResultBlock) and block.tool_use_id in stale_ids:
|
|
281
|
+
continue # drop stale result
|
|
282
|
+
if isinstance(block, ToolUseBlock) and block.id in stale_ids:
|
|
283
|
+
continue # drop stale use block too
|
|
284
|
+
new_blocks.append(block)
|
|
285
|
+
if new_blocks:
|
|
286
|
+
new_messages.append(dataclasses.replace(msg, content=tuple(new_blocks)))
|
|
287
|
+
# If a message becomes empty (all blocks dropped), skip it entirely
|
|
288
|
+
|
|
289
|
+
return dataclasses.replace(session, messages=tuple(new_messages))
|
|
290
|
+
|
|
291
|
+
# ------------------------------------------------------------------
|
|
292
|
+
# Level 3
|
|
293
|
+
# ------------------------------------------------------------------
|
|
294
|
+
|
|
295
|
+
def _context_collapse(self, session: Session, keep_recent: int = 6) -> Session:
|
|
296
|
+
"""Replace old tool_call+result pairs with one-line summary text.
|
|
297
|
+
|
|
298
|
+
Messages in the *keep_recent* tail are kept intact. Earlier messages
|
|
299
|
+
are converted: ToolUseBlock/ToolResultBlock → summary TextBlock.
|
|
300
|
+
|
|
301
|
+
Cache-aware: collapse non-cached messages first. Cached messages in
|
|
302
|
+
the old section are passed through as-is; only when there are no
|
|
303
|
+
non-cached messages to collapse do we fall back to collapsing cached ones.
|
|
304
|
+
"""
|
|
305
|
+
if len(session.messages) <= keep_recent:
|
|
306
|
+
return session
|
|
307
|
+
|
|
308
|
+
old_messages = session.messages[:-keep_recent]
|
|
309
|
+
recent_messages = session.messages[-keep_recent:]
|
|
310
|
+
|
|
311
|
+
# Separate old messages into non-cached (collapse) and cached (preserve when possible)
|
|
312
|
+
non_cached_old: list[tuple[int, Message]] = []
|
|
313
|
+
cached_old: list[tuple[int, Message]] = []
|
|
314
|
+
for rel_idx, msg in enumerate(old_messages):
|
|
315
|
+
abs_idx = rel_idx # old_messages starts at index 0
|
|
316
|
+
if self._is_cached(abs_idx):
|
|
317
|
+
cached_old.append((abs_idx, msg))
|
|
318
|
+
else:
|
|
319
|
+
non_cached_old.append((abs_idx, msg))
|
|
320
|
+
|
|
321
|
+
# Collapse non-cached old messages into summary lines
|
|
322
|
+
summary_lines: list[str] = []
|
|
323
|
+
for _idx, msg in non_cached_old:
|
|
324
|
+
for block in msg.content:
|
|
325
|
+
if isinstance(block, ToolUseBlock):
|
|
326
|
+
args_summary = ", ".join(
|
|
327
|
+
f"{k}={v!r}" for k, v in list(block.input.items())[:3]
|
|
328
|
+
)
|
|
329
|
+
summary_lines.append(f"Used {block.name}({args_summary})")
|
|
330
|
+
elif isinstance(block, ToolResultBlock):
|
|
331
|
+
pass
|
|
332
|
+
elif isinstance(block, TextBlock) and block.text.strip():
|
|
333
|
+
excerpt = block.text[:80].replace("\n", " ")
|
|
334
|
+
summary_lines.append(f"[msg] {excerpt}")
|
|
335
|
+
|
|
336
|
+
# If non-cached messages produced summary lines, keep cached old messages intact
|
|
337
|
+
if summary_lines or cached_old:
|
|
338
|
+
# Build the new old section: cached messages preserved + summary of non-cached
|
|
339
|
+
preserved_cached = tuple(msg for _idx, msg in cached_old)
|
|
340
|
+
if summary_lines:
|
|
341
|
+
summary_text = "\n".join(summary_lines)
|
|
342
|
+
summary_msg = Message(
|
|
343
|
+
role="user",
|
|
344
|
+
content=(TextBlock(text=f"[Context summary]\n{summary_text}"),),
|
|
345
|
+
)
|
|
346
|
+
new_old_section = preserved_cached + (summary_msg,)
|
|
347
|
+
else:
|
|
348
|
+
new_old_section = preserved_cached
|
|
349
|
+
|
|
350
|
+
if not new_old_section:
|
|
351
|
+
return dataclasses.replace(session, messages=recent_messages)
|
|
352
|
+
return dataclasses.replace(
|
|
353
|
+
session,
|
|
354
|
+
messages=new_old_section + recent_messages,
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
# Fallback: collapse all old messages (including cached) — no non-cached existed
|
|
358
|
+
all_summary_lines: list[str] = []
|
|
359
|
+
for msg in old_messages:
|
|
360
|
+
for block in msg.content:
|
|
361
|
+
if isinstance(block, ToolUseBlock):
|
|
362
|
+
args_summary = ", ".join(
|
|
363
|
+
f"{k}={v!r}" for k, v in list(block.input.items())[:3]
|
|
364
|
+
)
|
|
365
|
+
all_summary_lines.append(f"Used {block.name}({args_summary})")
|
|
366
|
+
elif isinstance(block, ToolResultBlock):
|
|
367
|
+
pass
|
|
368
|
+
elif isinstance(block, TextBlock) and block.text.strip():
|
|
369
|
+
excerpt = block.text[:80].replace("\n", " ")
|
|
370
|
+
all_summary_lines.append(f"[msg] {excerpt}")
|
|
371
|
+
|
|
372
|
+
if not all_summary_lines:
|
|
373
|
+
return dataclasses.replace(session, messages=recent_messages)
|
|
374
|
+
|
|
375
|
+
summary_text = "\n".join(all_summary_lines)
|
|
376
|
+
summary_msg = Message(
|
|
377
|
+
role="user",
|
|
378
|
+
content=(TextBlock(text=f"[Context summary]\n{summary_text}"),),
|
|
379
|
+
)
|
|
380
|
+
return dataclasses.replace(
|
|
381
|
+
session,
|
|
382
|
+
messages=(summary_msg,) + recent_messages,
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
# ------------------------------------------------------------------
|
|
386
|
+
# Level 4
|
|
387
|
+
# ------------------------------------------------------------------
|
|
388
|
+
|
|
389
|
+
def _auto_compact(self, session: Session, keep_recent: int = 4) -> Session:
|
|
390
|
+
"""Replace all old messages with a single summary placeholder + keep tail.
|
|
391
|
+
|
|
392
|
+
This mirrors the logic in :func:`llm_code.runtime.compaction.compact_session`.
|
|
393
|
+
|
|
394
|
+
Cache-aware: cached messages from the old section are preserved before
|
|
395
|
+
the summary placeholder so they remain available for API cache hits.
|
|
396
|
+
"""
|
|
397
|
+
if len(session.messages) <= keep_recent:
|
|
398
|
+
return session
|
|
399
|
+
|
|
400
|
+
old_messages = session.messages[:-keep_recent]
|
|
401
|
+
recent = session.messages[-keep_recent:]
|
|
402
|
+
|
|
403
|
+
# Preserve cached messages from the old section
|
|
404
|
+
preserved_cached = tuple(
|
|
405
|
+
msg for idx, msg in enumerate(old_messages) if self._is_cached(idx)
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
summary_msg = Message(
|
|
409
|
+
role="user",
|
|
410
|
+
content=(TextBlock(text="[Previous conversation summary]\n"),),
|
|
411
|
+
)
|
|
412
|
+
return dataclasses.replace(
|
|
413
|
+
session,
|
|
414
|
+
messages=preserved_cached + (summary_msg,) + recent,
|
|
415
|
+
)
|