aleph-rlm 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aleph/__init__.py +49 -0
- aleph/cache/__init__.py +6 -0
- aleph/cache/base.py +20 -0
- aleph/cache/memory.py +27 -0
- aleph/cli.py +1044 -0
- aleph/config.py +154 -0
- aleph/core.py +874 -0
- aleph/mcp/__init__.py +30 -0
- aleph/mcp/local_server.py +3527 -0
- aleph/mcp/server.py +20 -0
- aleph/prompts/__init__.py +5 -0
- aleph/prompts/system.py +45 -0
- aleph/providers/__init__.py +14 -0
- aleph/providers/anthropic.py +253 -0
- aleph/providers/base.py +59 -0
- aleph/providers/openai.py +224 -0
- aleph/providers/registry.py +22 -0
- aleph/repl/__init__.py +5 -0
- aleph/repl/helpers.py +1068 -0
- aleph/repl/sandbox.py +777 -0
- aleph/sub_query/__init__.py +166 -0
- aleph/sub_query/api_backend.py +166 -0
- aleph/sub_query/cli_backend.py +327 -0
- aleph/types.py +216 -0
- aleph/utils/__init__.py +6 -0
- aleph/utils/logging.py +79 -0
- aleph/utils/tokens.py +43 -0
- aleph_rlm-0.6.0.dist-info/METADATA +358 -0
- aleph_rlm-0.6.0.dist-info/RECORD +32 -0
- aleph_rlm-0.6.0.dist-info/WHEEL +4 -0
- aleph_rlm-0.6.0.dist-info/entry_points.txt +3 -0
- aleph_rlm-0.6.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,3527 @@
|
|
|
1
|
+
"""Aleph MCP server for use with Claude Desktop, Cursor, Windsurf, etc.
|
|
2
|
+
|
|
3
|
+
This server exposes Aleph's context exploration tools and optional action tools.
|
|
4
|
+
|
|
5
|
+
Tools:
|
|
6
|
+
- load_context: Load text/data into sandboxed REPL
|
|
7
|
+
- peek_context: View character/line ranges
|
|
8
|
+
- search_context: Regex search with context
|
|
9
|
+
- semantic_search: Meaning-based search over the context
|
|
10
|
+
- exec_python: Execute Python code in sandbox
|
|
11
|
+
- get_variable: Retrieve variables from REPL
|
|
12
|
+
- sub_query: RLM-style recursive sub-agent queries (CLI or API backend)
|
|
13
|
+
- think: Structure a reasoning sub-step (returns prompt for YOU to reason about)
|
|
14
|
+
- tasks: Lightweight task tracking per context
|
|
15
|
+
- get_status: Show current session state
|
|
16
|
+
- get_evidence: Retrieve collected evidence/citations
|
|
17
|
+
- finalize: Mark task complete with answer
|
|
18
|
+
- chunk_context: Split context into chunks with metadata for navigation
|
|
19
|
+
- evaluate_progress: Self-evaluate progress with convergence tracking
|
|
20
|
+
- summarize_so_far: Compress reasoning history to manage context window
|
|
21
|
+
- rg_search: Fast repo search via ripgrep (action tool)
|
|
22
|
+
|
|
23
|
+
Usage:
|
|
24
|
+
aleph
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
from __future__ import annotations
|
|
28
|
+
|
|
29
|
+
import asyncio
|
|
30
|
+
import bz2
|
|
31
|
+
from contextlib import AsyncExitStack
|
|
32
|
+
import difflib
|
|
33
|
+
import fnmatch
|
|
34
|
+
import gzip
|
|
35
|
+
import importlib
|
|
36
|
+
import inspect
|
|
37
|
+
import io
|
|
38
|
+
import json
|
|
39
|
+
import lzma
|
|
40
|
+
import os
|
|
41
|
+
import re
|
|
42
|
+
import shutil
|
|
43
|
+
import shlex
|
|
44
|
+
import subprocess
|
|
45
|
+
import sys
|
|
46
|
+
import time
|
|
47
|
+
import zipfile
|
|
48
|
+
from dataclasses import dataclass, field
|
|
49
|
+
from datetime import datetime
|
|
50
|
+
from pathlib import Path
|
|
51
|
+
from typing import Any, Iterable, Literal, cast
|
|
52
|
+
import xml.etree.ElementTree as ET
|
|
53
|
+
from html.parser import HTMLParser
|
|
54
|
+
|
|
55
|
+
from ..repl.sandbox import REPLEnvironment, SandboxConfig
|
|
56
|
+
from ..types import ContentFormat, ContextMetadata
|
|
57
|
+
from ..sub_query import SubQueryConfig, detect_backend, has_api_credentials
|
|
58
|
+
from ..sub_query.cli_backend import run_cli_sub_query, CLI_BACKENDS
|
|
59
|
+
from ..sub_query.api_backend import run_api_sub_query
|
|
60
|
+
|
|
61
|
+
__all__ = ["AlephMCPServerLocal", "main", "mcp"]
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
LineNumberBase = Literal[0, 1]
|
|
65
|
+
DEFAULT_LINE_NUMBER_BASE: LineNumberBase = 1
|
|
66
|
+
WorkspaceMode = Literal["fixed", "git", "any"]
|
|
67
|
+
DEFAULT_WORKSPACE_MODE: WorkspaceMode = "fixed"
|
|
68
|
+
ToolDocsMode = Literal["concise", "full"]
|
|
69
|
+
DEFAULT_TOOL_DOCS_MODE: ToolDocsMode = "concise"
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _get_env_float(name: str, default: float) -> float:
|
|
73
|
+
value = os.environ.get(name, "").strip()
|
|
74
|
+
if not value:
|
|
75
|
+
return default
|
|
76
|
+
try:
|
|
77
|
+
return float(value)
|
|
78
|
+
except ValueError:
|
|
79
|
+
return default
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def _get_env_int(name: str, default: int) -> int:
|
|
83
|
+
value = os.environ.get(name, "").strip()
|
|
84
|
+
if not value:
|
|
85
|
+
return default
|
|
86
|
+
try:
|
|
87
|
+
return int(value)
|
|
88
|
+
except ValueError:
|
|
89
|
+
return default
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _get_env_bool(name: str, default: bool) -> bool:
|
|
93
|
+
value = os.environ.get(name)
|
|
94
|
+
if value is None:
|
|
95
|
+
return default
|
|
96
|
+
value = value.strip().lower()
|
|
97
|
+
if value in {"1", "true", "yes", "y", "on"}:
|
|
98
|
+
return True
|
|
99
|
+
if value in {"0", "false", "no", "n", "off"}:
|
|
100
|
+
return False
|
|
101
|
+
return default
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
DEFAULT_REMOTE_TOOL_TIMEOUT_SECONDS = _get_env_float(
|
|
105
|
+
"ALEPH_REMOTE_TOOL_TIMEOUT",
|
|
106
|
+
120.0,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
@dataclass
|
|
111
|
+
class _Evidence:
|
|
112
|
+
"""Provenance tracking for reasoning conclusions."""
|
|
113
|
+
source: Literal["search", "peek", "exec", "manual", "action", "sub_query"]
|
|
114
|
+
line_range: tuple[int, int] | None
|
|
115
|
+
pattern: str | None
|
|
116
|
+
snippet: str
|
|
117
|
+
note: str | None = None
|
|
118
|
+
timestamp: datetime = field(default_factory=datetime.now)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _detect_format(text: str) -> ContentFormat:
|
|
122
|
+
"""Detect content format from text."""
|
|
123
|
+
t = text.lstrip()
|
|
124
|
+
if t.startswith("{") or t.startswith("["):
|
|
125
|
+
try:
|
|
126
|
+
json.loads(text)
|
|
127
|
+
return ContentFormat.JSON
|
|
128
|
+
except Exception:
|
|
129
|
+
return ContentFormat.TEXT
|
|
130
|
+
return ContentFormat.TEXT
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def _detect_format_for_suffix(text: str, suffix: str) -> ContentFormat:
|
|
134
|
+
ext = suffix.lower()
|
|
135
|
+
if ext in {".jsonl", ".ndjson"}:
|
|
136
|
+
return ContentFormat.JSONL
|
|
137
|
+
if ext == ".csv":
|
|
138
|
+
return ContentFormat.CSV
|
|
139
|
+
if ext == ".json":
|
|
140
|
+
return ContentFormat.JSON if _detect_format(text) == ContentFormat.JSON else ContentFormat.TEXT
|
|
141
|
+
if ext in {
|
|
142
|
+
".py", ".js", ".jsx", ".ts", ".tsx", ".go", ".rs", ".java", ".rb", ".php", ".cs",
|
|
143
|
+
".c", ".h", ".cpp", ".hpp",
|
|
144
|
+
}:
|
|
145
|
+
return ContentFormat.CODE
|
|
146
|
+
return _detect_format(text)
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def _coerce_context_to_text(value: Any) -> str:
|
|
150
|
+
if isinstance(value, str):
|
|
151
|
+
return value
|
|
152
|
+
if isinstance(value, bytes):
|
|
153
|
+
return value.decode("utf-8", errors="replace")
|
|
154
|
+
if isinstance(value, (dict, list, tuple)):
|
|
155
|
+
try:
|
|
156
|
+
return json.dumps(value, ensure_ascii=False, indent=2)
|
|
157
|
+
except Exception:
|
|
158
|
+
return str(value)
|
|
159
|
+
return str(value)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def _effective_suffix(path: Path) -> str:
|
|
163
|
+
suffixes = [s.lower() for s in path.suffixes]
|
|
164
|
+
if suffixes and suffixes[-1] in {".gz", ".bz2", ".xz"}:
|
|
165
|
+
return suffixes[-2] if len(suffixes) > 1 else ""
|
|
166
|
+
return path.suffix.lower()
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def _decompress_bytes(path: Path, data: bytes) -> tuple[bytes, str | None]:
|
|
170
|
+
ext = path.suffix.lower()
|
|
171
|
+
if ext == ".gz":
|
|
172
|
+
return gzip.decompress(data), "gzip"
|
|
173
|
+
if ext == ".bz2":
|
|
174
|
+
return bz2.decompress(data), "bzip2"
|
|
175
|
+
if ext == ".xz":
|
|
176
|
+
return lzma.decompress(data), "xz"
|
|
177
|
+
return data, None
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
class _HTMLTextExtractor(HTMLParser):
|
|
181
|
+
def __init__(self) -> None:
|
|
182
|
+
super().__init__()
|
|
183
|
+
self._chunks: list[str] = []
|
|
184
|
+
self._skip = False
|
|
185
|
+
|
|
186
|
+
def handle_starttag(self, tag: str, attrs: list[tuple[str, str | None]]) -> None:
|
|
187
|
+
if tag in {"script", "style"}:
|
|
188
|
+
self._skip = True
|
|
189
|
+
|
|
190
|
+
def handle_endtag(self, tag: str) -> None:
|
|
191
|
+
if tag in {"script", "style"}:
|
|
192
|
+
self._skip = False
|
|
193
|
+
|
|
194
|
+
def handle_data(self, data: str) -> None:
|
|
195
|
+
if self._skip:
|
|
196
|
+
return
|
|
197
|
+
stripped = data.strip()
|
|
198
|
+
if stripped:
|
|
199
|
+
self._chunks.append(stripped)
|
|
200
|
+
|
|
201
|
+
def text(self) -> str:
|
|
202
|
+
return "\n".join(self._chunks)
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def _extract_text_from_html(text: str) -> str:
|
|
206
|
+
parser = _HTMLTextExtractor()
|
|
207
|
+
parser.feed(text)
|
|
208
|
+
return parser.text()
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def _extract_text_from_docx(data: bytes) -> str:
|
|
212
|
+
with zipfile.ZipFile(io.BytesIO(data)) as zf:
|
|
213
|
+
xml_bytes = zf.read("word/document.xml")
|
|
214
|
+
root = ET.fromstring(xml_bytes)
|
|
215
|
+
paragraphs: list[str] = []
|
|
216
|
+
for para in root.iter():
|
|
217
|
+
if not para.tag.endswith("}p"):
|
|
218
|
+
continue
|
|
219
|
+
parts: list[str] = []
|
|
220
|
+
for node in para.iter():
|
|
221
|
+
if node.tag.endswith("}t") and node.text:
|
|
222
|
+
parts.append(node.text)
|
|
223
|
+
if parts:
|
|
224
|
+
paragraphs.append("".join(parts))
|
|
225
|
+
return "\n".join(paragraphs)
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def _extract_text_from_pdf(
|
|
229
|
+
data: bytes,
|
|
230
|
+
path: Path | None,
|
|
231
|
+
timeout_seconds: float,
|
|
232
|
+
) -> tuple[str, str | None]:
|
|
233
|
+
for module_name in ("pypdf", "PyPDF2"):
|
|
234
|
+
try:
|
|
235
|
+
module = importlib.import_module(module_name)
|
|
236
|
+
reader = module.PdfReader(io.BytesIO(data))
|
|
237
|
+
pages: list[str] = []
|
|
238
|
+
for page in reader.pages:
|
|
239
|
+
try:
|
|
240
|
+
page_text = page.extract_text() or ""
|
|
241
|
+
except Exception:
|
|
242
|
+
page_text = ""
|
|
243
|
+
if page_text:
|
|
244
|
+
pages.append(page_text)
|
|
245
|
+
text = "\n".join(pages).strip()
|
|
246
|
+
if text:
|
|
247
|
+
return text, None
|
|
248
|
+
except Exception:
|
|
249
|
+
continue
|
|
250
|
+
|
|
251
|
+
if path is not None:
|
|
252
|
+
pdf_tool = shutil.which("pdftotext")
|
|
253
|
+
if pdf_tool:
|
|
254
|
+
try:
|
|
255
|
+
result = subprocess.run(
|
|
256
|
+
[pdf_tool, "-layout", str(path), "-"],
|
|
257
|
+
capture_output=True,
|
|
258
|
+
text=True,
|
|
259
|
+
timeout=timeout_seconds,
|
|
260
|
+
)
|
|
261
|
+
except Exception as e:
|
|
262
|
+
return "", f"pdftotext failed: {e}"
|
|
263
|
+
if result.returncode == 0 and result.stdout.strip():
|
|
264
|
+
return result.stdout, None
|
|
265
|
+
stderr = result.stderr.strip()
|
|
266
|
+
if stderr:
|
|
267
|
+
return "", f"pdftotext error: {stderr}"
|
|
268
|
+
|
|
269
|
+
return "", "PDF extraction unavailable. Install `pypdf` or `pdftotext` for best results."
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
def _load_text_from_path(
|
|
273
|
+
path: Path,
|
|
274
|
+
max_bytes: int,
|
|
275
|
+
timeout_seconds: float,
|
|
276
|
+
) -> tuple[str, ContentFormat, str | None]:
|
|
277
|
+
data = path.read_bytes()
|
|
278
|
+
if len(data) > max_bytes:
|
|
279
|
+
raise ValueError(f"File too large to read (>{max_bytes} bytes): {path}")
|
|
280
|
+
|
|
281
|
+
data, compression = _decompress_bytes(path, data)
|
|
282
|
+
if compression and len(data) > max_bytes:
|
|
283
|
+
raise ValueError(f"Decompressed file too large (>{max_bytes} bytes): {path}")
|
|
284
|
+
|
|
285
|
+
suffix = _effective_suffix(path)
|
|
286
|
+
warning: str | None = None
|
|
287
|
+
|
|
288
|
+
if suffix == ".pdf":
|
|
289
|
+
text, warning = _extract_text_from_pdf(data, path, timeout_seconds)
|
|
290
|
+
if not text.strip():
|
|
291
|
+
raise ValueError(warning or "Failed to extract PDF text")
|
|
292
|
+
elif suffix == ".docx":
|
|
293
|
+
try:
|
|
294
|
+
text = _extract_text_from_docx(data)
|
|
295
|
+
except Exception as e:
|
|
296
|
+
raise ValueError(f"Failed to extract DOCX text: {e}") from e
|
|
297
|
+
if not text.strip():
|
|
298
|
+
warning = "DOCX extraction produced empty text"
|
|
299
|
+
elif suffix in {".html", ".htm"}:
|
|
300
|
+
text = _extract_text_from_html(data.decode("utf-8", errors="replace"))
|
|
301
|
+
else:
|
|
302
|
+
text = data.decode("utf-8", errors="replace")
|
|
303
|
+
|
|
304
|
+
fmt = _detect_format_for_suffix(text, suffix)
|
|
305
|
+
return text, fmt, warning
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
def _analyze_text_context(text: str, fmt: ContentFormat) -> ContextMetadata:
|
|
309
|
+
"""Analyze text and return metadata."""
|
|
310
|
+
return ContextMetadata(
|
|
311
|
+
format=fmt,
|
|
312
|
+
size_bytes=len(text.encode("utf-8", errors="ignore")),
|
|
313
|
+
size_chars=len(text),
|
|
314
|
+
size_lines=text.count("\n") + 1,
|
|
315
|
+
size_tokens_estimate=len(text) // 4,
|
|
316
|
+
structure_hint=None,
|
|
317
|
+
sample_preview=text[:500],
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
@dataclass
|
|
322
|
+
class _Session:
|
|
323
|
+
"""Session state for a context."""
|
|
324
|
+
repl: REPLEnvironment
|
|
325
|
+
meta: ContextMetadata
|
|
326
|
+
line_number_base: LineNumberBase = DEFAULT_LINE_NUMBER_BASE
|
|
327
|
+
created_at: datetime = field(default_factory=datetime.now)
|
|
328
|
+
iterations: int = 0
|
|
329
|
+
think_history: list[str] = field(default_factory=list)
|
|
330
|
+
# Provenance tracking
|
|
331
|
+
evidence: list[_Evidence] = field(default_factory=list)
|
|
332
|
+
# Convergence signals
|
|
333
|
+
confidence_history: list[float] = field(default_factory=list)
|
|
334
|
+
information_gain: list[int] = field(default_factory=list) # evidence count per iteration
|
|
335
|
+
# Chunk metadata for navigation
|
|
336
|
+
chunks: list[dict] | None = None
|
|
337
|
+
# Lightweight task tracking
|
|
338
|
+
tasks: list[dict[str, Any]] = field(default_factory=list)
|
|
339
|
+
task_counter: int = 0
|
|
340
|
+
|
|
341
|
+
|
|
342
|
+
def _session_to_payload(session_id: str, session: _Session) -> dict[str, Any]:
|
|
343
|
+
ctx_val = session.repl.get_variable("ctx")
|
|
344
|
+
ctx_text = _coerce_context_to_text(ctx_val)
|
|
345
|
+
tasks_payload: list[dict[str, Any]] = []
|
|
346
|
+
for task in session.tasks:
|
|
347
|
+
if isinstance(task, dict):
|
|
348
|
+
tasks_payload.append(task)
|
|
349
|
+
|
|
350
|
+
return {
|
|
351
|
+
"schema": "aleph.session.v1",
|
|
352
|
+
"session_id": session_id,
|
|
353
|
+
"context_id": session_id,
|
|
354
|
+
"created_at": session.created_at.isoformat(),
|
|
355
|
+
"iterations": session.iterations,
|
|
356
|
+
"line_number_base": session.line_number_base,
|
|
357
|
+
"meta": {
|
|
358
|
+
"format": session.meta.format.value,
|
|
359
|
+
"size_bytes": session.meta.size_bytes,
|
|
360
|
+
"size_chars": session.meta.size_chars,
|
|
361
|
+
"size_lines": session.meta.size_lines,
|
|
362
|
+
"size_tokens_estimate": session.meta.size_tokens_estimate,
|
|
363
|
+
"structure_hint": session.meta.structure_hint,
|
|
364
|
+
"sample_preview": session.meta.sample_preview,
|
|
365
|
+
},
|
|
366
|
+
"ctx": ctx_text,
|
|
367
|
+
"think_history": list(session.think_history),
|
|
368
|
+
"confidence_history": list(session.confidence_history),
|
|
369
|
+
"information_gain": list(session.information_gain),
|
|
370
|
+
"chunks": session.chunks,
|
|
371
|
+
"tasks": tasks_payload,
|
|
372
|
+
"task_counter": session.task_counter,
|
|
373
|
+
"evidence": [
|
|
374
|
+
{
|
|
375
|
+
"source": ev.source,
|
|
376
|
+
"line_range": list(ev.line_range) if ev.line_range else None,
|
|
377
|
+
"pattern": ev.pattern,
|
|
378
|
+
"snippet": ev.snippet,
|
|
379
|
+
"note": ev.note,
|
|
380
|
+
"timestamp": ev.timestamp.isoformat(),
|
|
381
|
+
}
|
|
382
|
+
for ev in session.evidence
|
|
383
|
+
],
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
def _session_from_payload(
|
|
388
|
+
obj: dict[str, Any],
|
|
389
|
+
resolved_id: str,
|
|
390
|
+
sandbox_config: SandboxConfig,
|
|
391
|
+
loop: asyncio.AbstractEventLoop | None,
|
|
392
|
+
) -> _Session:
|
|
393
|
+
ctx = obj.get("ctx")
|
|
394
|
+
if not isinstance(ctx, str):
|
|
395
|
+
raise ValueError("Invalid session payload: ctx must be a string")
|
|
396
|
+
|
|
397
|
+
meta_obj = obj.get("meta")
|
|
398
|
+
if not isinstance(meta_obj, dict):
|
|
399
|
+
meta_obj = {}
|
|
400
|
+
|
|
401
|
+
try:
|
|
402
|
+
fmt = ContentFormat(str(meta_obj.get("format") or "text"))
|
|
403
|
+
except Exception:
|
|
404
|
+
fmt = ContentFormat.TEXT
|
|
405
|
+
|
|
406
|
+
meta = ContextMetadata(
|
|
407
|
+
format=fmt,
|
|
408
|
+
size_bytes=int(meta_obj.get("size_bytes") or len(ctx.encode("utf-8", errors="ignore"))),
|
|
409
|
+
size_chars=int(meta_obj.get("size_chars") or len(ctx)),
|
|
410
|
+
size_lines=int(meta_obj.get("size_lines") or (ctx.count("\n") + 1)),
|
|
411
|
+
size_tokens_estimate=int(meta_obj.get("size_tokens_estimate") or (len(ctx) // 4)),
|
|
412
|
+
structure_hint=meta_obj.get("structure_hint"),
|
|
413
|
+
sample_preview=str(meta_obj.get("sample_preview") or ctx[:500]),
|
|
414
|
+
)
|
|
415
|
+
|
|
416
|
+
repl = REPLEnvironment(
|
|
417
|
+
context=ctx,
|
|
418
|
+
context_var_name="ctx",
|
|
419
|
+
config=sandbox_config,
|
|
420
|
+
loop=loop,
|
|
421
|
+
)
|
|
422
|
+
line_number_base = obj.get("line_number_base")
|
|
423
|
+
if line_number_base is None:
|
|
424
|
+
line_number_base = 0
|
|
425
|
+
try:
|
|
426
|
+
base = _validate_line_number_base(int(line_number_base))
|
|
427
|
+
except Exception:
|
|
428
|
+
base = DEFAULT_LINE_NUMBER_BASE
|
|
429
|
+
repl.set_variable("line_number_base", base)
|
|
430
|
+
|
|
431
|
+
created_at = datetime.now()
|
|
432
|
+
created_at_str = obj.get("created_at")
|
|
433
|
+
if isinstance(created_at_str, str):
|
|
434
|
+
try:
|
|
435
|
+
created_at = datetime.fromisoformat(created_at_str)
|
|
436
|
+
except Exception:
|
|
437
|
+
created_at = datetime.now()
|
|
438
|
+
|
|
439
|
+
tasks_payload = obj.get("tasks")
|
|
440
|
+
tasks: list[dict[str, Any]] = []
|
|
441
|
+
if isinstance(tasks_payload, list):
|
|
442
|
+
for task in tasks_payload:
|
|
443
|
+
if not isinstance(task, dict):
|
|
444
|
+
continue
|
|
445
|
+
if "id" not in task or "title" not in task:
|
|
446
|
+
continue
|
|
447
|
+
tasks.append({
|
|
448
|
+
"id": int(task.get("id")),
|
|
449
|
+
"title": str(task.get("title")),
|
|
450
|
+
"status": str(task.get("status") or "todo"),
|
|
451
|
+
"note": task.get("note"),
|
|
452
|
+
"created_at": task.get("created_at"),
|
|
453
|
+
"updated_at": task.get("updated_at"),
|
|
454
|
+
})
|
|
455
|
+
|
|
456
|
+
task_counter = int(obj.get("task_counter") or (max((t["id"] for t in tasks), default=0)))
|
|
457
|
+
|
|
458
|
+
session = _Session(
|
|
459
|
+
repl=repl,
|
|
460
|
+
meta=meta,
|
|
461
|
+
line_number_base=base,
|
|
462
|
+
created_at=created_at,
|
|
463
|
+
iterations=int(obj.get("iterations") or 0),
|
|
464
|
+
think_history=list(obj.get("think_history") or []),
|
|
465
|
+
confidence_history=list(obj.get("confidence_history") or []),
|
|
466
|
+
information_gain=list(obj.get("information_gain") or []),
|
|
467
|
+
chunks=obj.get("chunks"),
|
|
468
|
+
tasks=tasks,
|
|
469
|
+
task_counter=task_counter,
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
ev_list = obj.get("evidence")
|
|
473
|
+
if isinstance(ev_list, list):
|
|
474
|
+
for ev in ev_list:
|
|
475
|
+
if not isinstance(ev, dict):
|
|
476
|
+
continue
|
|
477
|
+
source = ev.get("source")
|
|
478
|
+
if source not in {"search", "peek", "exec", "manual", "action", "sub_query"}:
|
|
479
|
+
continue
|
|
480
|
+
line_range = ev.get("line_range")
|
|
481
|
+
if isinstance(line_range, list) and len(line_range) == 2:
|
|
482
|
+
try:
|
|
483
|
+
line_range = (int(line_range[0]), int(line_range[1]))
|
|
484
|
+
except Exception:
|
|
485
|
+
line_range = None
|
|
486
|
+
else:
|
|
487
|
+
line_range = None
|
|
488
|
+
timestamp = datetime.now()
|
|
489
|
+
ts_str = ev.get("timestamp")
|
|
490
|
+
if isinstance(ts_str, str):
|
|
491
|
+
try:
|
|
492
|
+
timestamp = datetime.fromisoformat(ts_str)
|
|
493
|
+
except Exception:
|
|
494
|
+
timestamp = datetime.now()
|
|
495
|
+
session.evidence.append(
|
|
496
|
+
_Evidence(
|
|
497
|
+
source=source,
|
|
498
|
+
line_range=line_range,
|
|
499
|
+
pattern=ev.get("pattern"),
|
|
500
|
+
snippet=str(ev.get("snippet") or ""),
|
|
501
|
+
note=ev.get("note"),
|
|
502
|
+
timestamp=timestamp,
|
|
503
|
+
)
|
|
504
|
+
)
|
|
505
|
+
|
|
506
|
+
return session
|
|
507
|
+
|
|
508
|
+
def _resolve_env_dir(name: str, require_exists: bool = True) -> Path | None:
|
|
509
|
+
value = os.environ.get(name)
|
|
510
|
+
if value is None:
|
|
511
|
+
return None
|
|
512
|
+
value = value.strip()
|
|
513
|
+
if not value:
|
|
514
|
+
return None
|
|
515
|
+
try:
|
|
516
|
+
path = Path(value).expanduser()
|
|
517
|
+
except Exception:
|
|
518
|
+
return None
|
|
519
|
+
if require_exists and not path.exists():
|
|
520
|
+
return None
|
|
521
|
+
try:
|
|
522
|
+
path = path.resolve()
|
|
523
|
+
except Exception:
|
|
524
|
+
pass
|
|
525
|
+
if path.is_file():
|
|
526
|
+
return path.parent
|
|
527
|
+
return path
|
|
528
|
+
|
|
529
|
+
|
|
530
|
+
def _detect_workspace_root() -> Path:
|
|
531
|
+
env_root = _resolve_env_dir("ALEPH_WORKSPACE_ROOT", require_exists=False)
|
|
532
|
+
if env_root is not None:
|
|
533
|
+
return env_root
|
|
534
|
+
cwd = _resolve_env_dir("PWD") or _resolve_env_dir("INIT_CWD") or Path.cwd()
|
|
535
|
+
for parent in [cwd, *cwd.parents]:
|
|
536
|
+
if (parent / ".git").exists():
|
|
537
|
+
return parent
|
|
538
|
+
return cwd
|
|
539
|
+
|
|
540
|
+
|
|
541
|
+
def _nearest_existing_parent(path: Path) -> Path:
|
|
542
|
+
for parent in [path, *path.parents]:
|
|
543
|
+
if parent.exists():
|
|
544
|
+
return parent
|
|
545
|
+
return path
|
|
546
|
+
|
|
547
|
+
|
|
548
|
+
def _find_git_root(path: Path) -> Path | None:
|
|
549
|
+
start = _nearest_existing_parent(path)
|
|
550
|
+
if start.is_file():
|
|
551
|
+
start = start.parent
|
|
552
|
+
for parent in [start, *start.parents]:
|
|
553
|
+
if (parent / ".git").exists():
|
|
554
|
+
return parent
|
|
555
|
+
return None
|
|
556
|
+
|
|
557
|
+
|
|
558
|
+
def _scoped_path(workspace_root: Path, path: str, mode: WorkspaceMode) -> Path:
|
|
559
|
+
root = workspace_root.resolve()
|
|
560
|
+
p = Path(path)
|
|
561
|
+
if p.is_absolute():
|
|
562
|
+
resolved = p.resolve()
|
|
563
|
+
else:
|
|
564
|
+
resolved = (root / p).resolve()
|
|
565
|
+
|
|
566
|
+
if mode == "any":
|
|
567
|
+
return resolved
|
|
568
|
+
|
|
569
|
+
if mode == "git":
|
|
570
|
+
git_root = _find_git_root(resolved)
|
|
571
|
+
if git_root is None:
|
|
572
|
+
raise ValueError(f"Path '{path}' is not inside a git repository (workspace mode: git)")
|
|
573
|
+
if not resolved.is_relative_to(git_root):
|
|
574
|
+
raise ValueError(f"Path '{path}' escapes git root '{git_root}'")
|
|
575
|
+
return resolved
|
|
576
|
+
|
|
577
|
+
if not resolved.is_relative_to(root):
|
|
578
|
+
raise ValueError(f"Path '{path}' escapes workspace root '{root}'")
|
|
579
|
+
return resolved
|
|
580
|
+
|
|
581
|
+
|
|
582
|
+
def _format_payload(
|
|
583
|
+
payload: dict[str, Any],
|
|
584
|
+
output: Literal["json", "markdown", "object"],
|
|
585
|
+
) -> str | dict[str, Any]:
|
|
586
|
+
if output == "object":
|
|
587
|
+
return payload
|
|
588
|
+
if output == "json":
|
|
589
|
+
return json.dumps(payload, ensure_ascii=False, indent=2)
|
|
590
|
+
return "```json\n" + json.dumps(payload, ensure_ascii=False, indent=2) + "\n```"
|
|
591
|
+
|
|
592
|
+
|
|
593
|
+
def _format_error(
|
|
594
|
+
message: str,
|
|
595
|
+
output: Literal["json", "markdown", "object"],
|
|
596
|
+
) -> str | dict[str, Any]:
|
|
597
|
+
if output == "markdown":
|
|
598
|
+
return f"Error: {message}"
|
|
599
|
+
return _format_payload({"error": message}, output=output)
|
|
600
|
+
|
|
601
|
+
|
|
602
|
+
def _validate_line_number_base(value: int) -> LineNumberBase:
|
|
603
|
+
if value not in (0, 1):
|
|
604
|
+
raise ValueError("line_number_base must be 0 or 1")
|
|
605
|
+
return cast(LineNumberBase, value)
|
|
606
|
+
|
|
607
|
+
|
|
608
|
+
def _resolve_line_number_base(
|
|
609
|
+
session: _Session | None,
|
|
610
|
+
value: int | None,
|
|
611
|
+
) -> LineNumberBase:
|
|
612
|
+
if session is not None:
|
|
613
|
+
if value is None:
|
|
614
|
+
return session.line_number_base
|
|
615
|
+
base = _validate_line_number_base(value)
|
|
616
|
+
if base != session.line_number_base:
|
|
617
|
+
raise ValueError("line_number_base does not match existing session")
|
|
618
|
+
return base
|
|
619
|
+
if value is None:
|
|
620
|
+
return DEFAULT_LINE_NUMBER_BASE
|
|
621
|
+
return _validate_line_number_base(value)
|
|
622
|
+
|
|
623
|
+
def _to_jsonable(obj: Any) -> Any:
|
|
624
|
+
"""Best-effort conversion of MCP/Pydantic objects into JSON-serializable data."""
|
|
625
|
+
if obj is None or isinstance(obj, (str, int, float, bool)):
|
|
626
|
+
return obj
|
|
627
|
+
if isinstance(obj, dict):
|
|
628
|
+
return {str(k): _to_jsonable(v) for k, v in obj.items()}
|
|
629
|
+
if isinstance(obj, (list, tuple)):
|
|
630
|
+
return [_to_jsonable(v) for v in obj]
|
|
631
|
+
if hasattr(obj, "model_dump"):
|
|
632
|
+
try:
|
|
633
|
+
return obj.model_dump()
|
|
634
|
+
except Exception:
|
|
635
|
+
pass
|
|
636
|
+
if hasattr(obj, "__dict__"):
|
|
637
|
+
try:
|
|
638
|
+
return _to_jsonable(vars(obj))
|
|
639
|
+
except Exception:
|
|
640
|
+
pass
|
|
641
|
+
return str(obj)
|
|
642
|
+
|
|
643
|
+
|
|
644
|
+
@dataclass(slots=True)
|
|
645
|
+
class ActionConfig:
|
|
646
|
+
enabled: bool = False
|
|
647
|
+
workspace_root: Path = field(default_factory=_detect_workspace_root)
|
|
648
|
+
workspace_mode: WorkspaceMode = DEFAULT_WORKSPACE_MODE
|
|
649
|
+
require_confirmation: bool = False
|
|
650
|
+
max_cmd_seconds: float = 60.0
|
|
651
|
+
max_output_chars: int = 50_000
|
|
652
|
+
max_read_bytes: int = 1_000_000_000 # Default 1GB. Increase if you have more RAM - the LLM only sees query results, not the file.
|
|
653
|
+
max_write_bytes: int = 100_000_000 # 100 MB
|
|
654
|
+
|
|
655
|
+
|
|
656
|
+
@dataclass
|
|
657
|
+
class _RemoteServerHandle:
|
|
658
|
+
"""A managed remote MCP server connection (stdio transport)."""
|
|
659
|
+
|
|
660
|
+
command: str
|
|
661
|
+
args: list[str] = field(default_factory=list)
|
|
662
|
+
cwd: Path | None = None
|
|
663
|
+
env: dict[str, str] | None = None
|
|
664
|
+
allow_tools: list[str] | None = None
|
|
665
|
+
deny_tools: list[str] | None = None
|
|
666
|
+
|
|
667
|
+
connected_at: datetime | None = None
|
|
668
|
+
session: Any | None = None # ClientSession (kept as Any to avoid hard dependency at import time)
|
|
669
|
+
_stack: AsyncExitStack | None = None
|
|
670
|
+
|
|
671
|
+
|
|
672
|
+
class AlephMCPServerLocal:
|
|
673
|
+
"""MCP server for local AI reasoning.
|
|
674
|
+
|
|
675
|
+
This server provides context exploration tools that work with any
|
|
676
|
+
MCP-compatible AI host (Claude Desktop, Cursor, Windsurf, etc.).
|
|
677
|
+
"""
|
|
678
|
+
|
|
679
|
+
def __init__(
|
|
680
|
+
self,
|
|
681
|
+
sandbox_config: SandboxConfig | None = None,
|
|
682
|
+
action_config: ActionConfig | None = None,
|
|
683
|
+
sub_query_config: SubQueryConfig | None = None,
|
|
684
|
+
tool_docs_mode: ToolDocsMode = DEFAULT_TOOL_DOCS_MODE,
|
|
685
|
+
) -> None:
|
|
686
|
+
self.sandbox_config = sandbox_config or SandboxConfig()
|
|
687
|
+
self.action_config = action_config or ActionConfig()
|
|
688
|
+
self.sub_query_config = sub_query_config or SubQueryConfig()
|
|
689
|
+
self.tool_docs_mode = tool_docs_mode
|
|
690
|
+
self._sessions: dict[str, _Session] = {}
|
|
691
|
+
self._remote_servers: dict[str, _RemoteServerHandle] = {}
|
|
692
|
+
self._auto_pack_loaded = False
|
|
693
|
+
self._streamable_http_task: asyncio.Task | None = None
|
|
694
|
+
self._streamable_http_url: str | None = None
|
|
695
|
+
self._streamable_http_host: str | None = None
|
|
696
|
+
self._streamable_http_port: int | None = None
|
|
697
|
+
self._streamable_http_path: str | None = None
|
|
698
|
+
self._streamable_http_lock = asyncio.Lock()
|
|
699
|
+
|
|
700
|
+
# Import MCP lazily so it's an optional dependency
|
|
701
|
+
try:
|
|
702
|
+
from mcp.server.fastmcp import FastMCP
|
|
703
|
+
except Exception as e:
|
|
704
|
+
raise RuntimeError(
|
|
705
|
+
"MCP support requires the `mcp` package. Install with `pip install \"aleph-rlm[mcp]\"`."
|
|
706
|
+
) from e
|
|
707
|
+
|
|
708
|
+
self.server = FastMCP("aleph-local")
|
|
709
|
+
self._register_tools()
|
|
710
|
+
|
|
711
|
+
if self.action_config.enabled:
|
|
712
|
+
self._auto_load_memory_pack()
|
|
713
|
+
|
|
714
|
+
def _auto_load_memory_pack(self) -> None:
|
|
715
|
+
if self._auto_pack_loaded:
|
|
716
|
+
return
|
|
717
|
+
self._auto_pack_loaded = True
|
|
718
|
+
pack_path = self.action_config.workspace_root / ".aleph" / "memory_pack.json"
|
|
719
|
+
if not pack_path.exists() or not pack_path.is_file():
|
|
720
|
+
return
|
|
721
|
+
try:
|
|
722
|
+
if pack_path.stat().st_size > self.action_config.max_read_bytes:
|
|
723
|
+
return
|
|
724
|
+
except Exception:
|
|
725
|
+
return
|
|
726
|
+
try:
|
|
727
|
+
data = pack_path.read_bytes()
|
|
728
|
+
obj = json.loads(data.decode("utf-8", errors="replace"))
|
|
729
|
+
except Exception:
|
|
730
|
+
return
|
|
731
|
+
|
|
732
|
+
if not isinstance(obj, dict):
|
|
733
|
+
return
|
|
734
|
+
if obj.get("schema") != "aleph.memory_pack.v1":
|
|
735
|
+
return
|
|
736
|
+
sessions = obj.get("sessions")
|
|
737
|
+
if not isinstance(sessions, list):
|
|
738
|
+
return
|
|
739
|
+
for payload in sessions:
|
|
740
|
+
if not isinstance(payload, dict):
|
|
741
|
+
continue
|
|
742
|
+
session_id = payload.get("context_id") or payload.get("session_id")
|
|
743
|
+
resolved_id = str(session_id) if session_id else f"session_{len(self._sessions) + 1}"
|
|
744
|
+
if resolved_id in self._sessions:
|
|
745
|
+
continue
|
|
746
|
+
try:
|
|
747
|
+
session = _session_from_payload(payload, resolved_id, self.sandbox_config, loop=None)
|
|
748
|
+
except Exception:
|
|
749
|
+
continue
|
|
750
|
+
self._sessions[resolved_id] = session
|
|
751
|
+
|
|
752
|
+
def _normalize_streamable_http_path(self, path: str) -> str:
|
|
753
|
+
if not path:
|
|
754
|
+
return "/mcp"
|
|
755
|
+
return path if path.startswith("/") else f"/{path}"
|
|
756
|
+
|
|
757
|
+
def _format_streamable_http_url(self, host: str, port: int, path: str) -> str:
|
|
758
|
+
connect_host = "127.0.0.1" if host in {"0.0.0.0", "::"} else host
|
|
759
|
+
return f"http://{connect_host}:{port}{path}"
|
|
760
|
+
|
|
761
|
+
async def _wait_for_streamable_http_ready(
|
|
762
|
+
self,
|
|
763
|
+
host: str,
|
|
764
|
+
port: int,
|
|
765
|
+
timeout_seconds: float = 2.0,
|
|
766
|
+
) -> tuple[bool, str]:
|
|
767
|
+
deadline = time.monotonic() + timeout_seconds
|
|
768
|
+
connect_host = "127.0.0.1" if host in {"0.0.0.0", "::"} else host
|
|
769
|
+
|
|
770
|
+
while time.monotonic() < deadline:
|
|
771
|
+
if self._streamable_http_task and self._streamable_http_task.done():
|
|
772
|
+
exc = self._streamable_http_task.exception()
|
|
773
|
+
if exc:
|
|
774
|
+
return False, f"Streamable HTTP server failed to start: {exc}"
|
|
775
|
+
return False, "Streamable HTTP server stopped unexpectedly."
|
|
776
|
+
try:
|
|
777
|
+
reader, writer = await asyncio.wait_for(
|
|
778
|
+
asyncio.open_connection(connect_host, port),
|
|
779
|
+
timeout=0.2,
|
|
780
|
+
)
|
|
781
|
+
writer.close()
|
|
782
|
+
await writer.wait_closed()
|
|
783
|
+
return True, ""
|
|
784
|
+
except Exception:
|
|
785
|
+
await asyncio.sleep(0.05)
|
|
786
|
+
|
|
787
|
+
return False, f"Timed out waiting for streamable HTTP server on {connect_host}:{port}."
|
|
788
|
+
|
|
789
|
+
async def _run_streamable_http_server(self, host: str, port: int) -> None:
|
|
790
|
+
try:
|
|
791
|
+
import uvicorn
|
|
792
|
+
except Exception as exc:
|
|
793
|
+
raise RuntimeError(
|
|
794
|
+
"uvicorn is required for streamable HTTP transport. "
|
|
795
|
+
"Install with: pip install uvicorn"
|
|
796
|
+
) from exc
|
|
797
|
+
|
|
798
|
+
app = self.server.streamable_http_app()
|
|
799
|
+
config = uvicorn.Config(
|
|
800
|
+
app,
|
|
801
|
+
host=host,
|
|
802
|
+
port=port,
|
|
803
|
+
log_level="warning",
|
|
804
|
+
access_log=False,
|
|
805
|
+
lifespan="on",
|
|
806
|
+
)
|
|
807
|
+
server = uvicorn.Server(config)
|
|
808
|
+
await server.serve()
|
|
809
|
+
|
|
810
|
+
async def _ensure_streamable_http_server(
|
|
811
|
+
self,
|
|
812
|
+
host: str,
|
|
813
|
+
port: int,
|
|
814
|
+
path: str,
|
|
815
|
+
) -> tuple[bool, str]:
|
|
816
|
+
normalized_path = self._normalize_streamable_http_path(path)
|
|
817
|
+
async with self._streamable_http_lock:
|
|
818
|
+
if self._streamable_http_task and not self._streamable_http_task.done():
|
|
819
|
+
url = self._streamable_http_url or self._format_streamable_http_url(
|
|
820
|
+
host,
|
|
821
|
+
port,
|
|
822
|
+
normalized_path,
|
|
823
|
+
)
|
|
824
|
+
return True, url
|
|
825
|
+
if self._streamable_http_task and self._streamable_http_task.done():
|
|
826
|
+
self._streamable_http_task = None
|
|
827
|
+
self._streamable_http_url = None
|
|
828
|
+
|
|
829
|
+
self.server.settings.host = host
|
|
830
|
+
self.server.settings.port = port
|
|
831
|
+
self.server.settings.streamable_http_path = normalized_path
|
|
832
|
+
|
|
833
|
+
self._streamable_http_task = asyncio.create_task(
|
|
834
|
+
self._run_streamable_http_server(host, port)
|
|
835
|
+
)
|
|
836
|
+
self._streamable_http_host = host
|
|
837
|
+
self._streamable_http_port = port
|
|
838
|
+
self._streamable_http_path = normalized_path
|
|
839
|
+
self._streamable_http_url = self._format_streamable_http_url(
|
|
840
|
+
host,
|
|
841
|
+
port,
|
|
842
|
+
normalized_path,
|
|
843
|
+
)
|
|
844
|
+
|
|
845
|
+
ok, err = await self._wait_for_streamable_http_ready(host, port)
|
|
846
|
+
if not ok:
|
|
847
|
+
return False, err
|
|
848
|
+
return True, self._streamable_http_url or self._format_streamable_http_url(
|
|
849
|
+
host,
|
|
850
|
+
port,
|
|
851
|
+
normalized_path,
|
|
852
|
+
)
|
|
853
|
+
|
|
854
|
+
async def _ensure_remote_server(self, server_id: str) -> tuple[bool, str | _RemoteServerHandle]:
|
|
855
|
+
"""Ensure a remote MCP server is connected and initialized."""
|
|
856
|
+
if server_id not in self._remote_servers:
|
|
857
|
+
return False, f"Error: Remote server '{server_id}' not registered."
|
|
858
|
+
|
|
859
|
+
handle = self._remote_servers[server_id]
|
|
860
|
+
if handle.session is not None:
|
|
861
|
+
return True, handle
|
|
862
|
+
|
|
863
|
+
try:
|
|
864
|
+
from mcp.client.session import ClientSession
|
|
865
|
+
from mcp.client.stdio import StdioServerParameters, stdio_client
|
|
866
|
+
except Exception as e: # pragma: no cover
|
|
867
|
+
return False, f"Error: MCP client support is not available: {e}"
|
|
868
|
+
|
|
869
|
+
params = StdioServerParameters(
|
|
870
|
+
command=handle.command,
|
|
871
|
+
args=handle.args,
|
|
872
|
+
env=handle.env,
|
|
873
|
+
cwd=str(handle.cwd) if handle.cwd is not None else None,
|
|
874
|
+
)
|
|
875
|
+
|
|
876
|
+
stack = AsyncExitStack()
|
|
877
|
+
try:
|
|
878
|
+
read_stream, write_stream = await stack.enter_async_context(stdio_client(params))
|
|
879
|
+
session = await stack.enter_async_context(ClientSession(read_stream, write_stream))
|
|
880
|
+
await session.initialize()
|
|
881
|
+
except Exception as e:
|
|
882
|
+
await stack.aclose()
|
|
883
|
+
return False, f"Error: Failed to connect to remote server '{server_id}': {e}"
|
|
884
|
+
|
|
885
|
+
handle._stack = stack
|
|
886
|
+
handle.session = session
|
|
887
|
+
handle.connected_at = datetime.now()
|
|
888
|
+
return True, handle
|
|
889
|
+
|
|
890
|
+
async def _reset_remote_server_handle(self, handle: _RemoteServerHandle) -> None:
|
|
891
|
+
"""Close and clear a remote server handle without removing registration."""
|
|
892
|
+
if handle._stack is not None:
|
|
893
|
+
try:
|
|
894
|
+
await handle._stack.aclose()
|
|
895
|
+
finally:
|
|
896
|
+
handle._stack = None
|
|
897
|
+
handle.session = None
|
|
898
|
+
handle.connected_at = None
|
|
899
|
+
else:
|
|
900
|
+
handle.session = None
|
|
901
|
+
handle.connected_at = None
|
|
902
|
+
|
|
903
|
+
async def _close_remote_server(self, server_id: str) -> tuple[bool, str]:
|
|
904
|
+
"""Close a remote server connection and terminate the subprocess."""
|
|
905
|
+
if server_id not in self._remote_servers:
|
|
906
|
+
return False, f"Error: Remote server '{server_id}' not registered."
|
|
907
|
+
|
|
908
|
+
handle = self._remote_servers[server_id]
|
|
909
|
+
await self._reset_remote_server_handle(handle)
|
|
910
|
+
return True, f"Closed remote server '{server_id}'."
|
|
911
|
+
|
|
912
|
+
async def _remote_list_tools(self, server_id: str) -> tuple[bool, Any]:
|
|
913
|
+
ok, res = await self._ensure_remote_server(server_id)
|
|
914
|
+
if not ok:
|
|
915
|
+
return False, res
|
|
916
|
+
handle = res # type: ignore[assignment]
|
|
917
|
+
try:
|
|
918
|
+
result = await handle.session.list_tools() # type: ignore[union-attr]
|
|
919
|
+
return True, _to_jsonable(result)
|
|
920
|
+
except Exception as e:
|
|
921
|
+
await self._reset_remote_server_handle(handle)
|
|
922
|
+
ok, res = await self._ensure_remote_server(server_id)
|
|
923
|
+
if not ok:
|
|
924
|
+
return False, f"Error: list_tools failed and reconnect failed: {res}"
|
|
925
|
+
handle = res # type: ignore[assignment]
|
|
926
|
+
try:
|
|
927
|
+
result = await handle.session.list_tools() # type: ignore[union-attr]
|
|
928
|
+
return True, _to_jsonable(result)
|
|
929
|
+
except Exception as e2:
|
|
930
|
+
return False, f"Error: list_tools failed after reconnect: {e2}"
|
|
931
|
+
|
|
932
|
+
async def _remote_call_tool(
|
|
933
|
+
self,
|
|
934
|
+
server_id: str,
|
|
935
|
+
tool: str,
|
|
936
|
+
arguments: dict[str, Any] | None = None,
|
|
937
|
+
timeout_seconds: float | None = DEFAULT_REMOTE_TOOL_TIMEOUT_SECONDS,
|
|
938
|
+
) -> tuple[bool, Any]:
|
|
939
|
+
ok, res = await self._ensure_remote_server(server_id)
|
|
940
|
+
if not ok:
|
|
941
|
+
return False, res
|
|
942
|
+
handle = res # type: ignore[assignment]
|
|
943
|
+
|
|
944
|
+
if not self._remote_tool_allowed(handle, tool):
|
|
945
|
+
return False, f"Error: Tool '{tool}' is not allowed for remote server '{server_id}'."
|
|
946
|
+
|
|
947
|
+
from datetime import timedelta
|
|
948
|
+
|
|
949
|
+
read_timeout = timedelta(
|
|
950
|
+
seconds=float(timeout_seconds or DEFAULT_REMOTE_TOOL_TIMEOUT_SECONDS)
|
|
951
|
+
)
|
|
952
|
+
try:
|
|
953
|
+
result = await handle.session.call_tool( # type: ignore[union-attr]
|
|
954
|
+
name=tool,
|
|
955
|
+
arguments=arguments or {},
|
|
956
|
+
read_timeout_seconds=read_timeout,
|
|
957
|
+
)
|
|
958
|
+
except Exception as e:
|
|
959
|
+
await self._reset_remote_server_handle(handle)
|
|
960
|
+
ok, res = await self._ensure_remote_server(server_id)
|
|
961
|
+
if not ok:
|
|
962
|
+
return False, f"Error: call_tool failed and reconnect failed: {res}"
|
|
963
|
+
handle = res # type: ignore[assignment]
|
|
964
|
+
try:
|
|
965
|
+
result = await handle.session.call_tool( # type: ignore[union-attr]
|
|
966
|
+
name=tool,
|
|
967
|
+
arguments=arguments or {},
|
|
968
|
+
read_timeout_seconds=read_timeout,
|
|
969
|
+
)
|
|
970
|
+
except Exception as e2:
|
|
971
|
+
return False, f"Error: call_tool failed after reconnect: {e2}"
|
|
972
|
+
|
|
973
|
+
result_jsonable = _to_jsonable(result)
|
|
974
|
+
|
|
975
|
+
return True, result_jsonable
|
|
976
|
+
|
|
977
|
+
def _remote_tool_allowed(self, handle: _RemoteServerHandle, tool_name: str) -> bool:
|
|
978
|
+
if handle.allow_tools is not None:
|
|
979
|
+
return tool_name in handle.allow_tools
|
|
980
|
+
if handle.deny_tools is not None and tool_name in handle.deny_tools:
|
|
981
|
+
return False
|
|
982
|
+
return True
|
|
983
|
+
|
|
984
|
+
def _register_tools(self) -> None:
|
|
985
|
+
"""Register all MCP tools."""
|
|
986
|
+
|
|
987
|
+
def _format_context_loaded(
|
|
988
|
+
context_id: str,
|
|
989
|
+
meta: ContextMetadata,
|
|
990
|
+
line_number_base: LineNumberBase,
|
|
991
|
+
note: str | None = None,
|
|
992
|
+
) -> str:
|
|
993
|
+
line_desc = "1-based" if line_number_base == 1 else "0-based"
|
|
994
|
+
msg = (
|
|
995
|
+
f"Context loaded '{context_id}': {meta.size_chars:,} chars, "
|
|
996
|
+
f"{meta.size_lines:,} lines, ~{meta.size_tokens_estimate:,} tokens "
|
|
997
|
+
f"(line numbers {line_desc})."
|
|
998
|
+
)
|
|
999
|
+
if note:
|
|
1000
|
+
msg += f"\nNote: {note}"
|
|
1001
|
+
return msg
|
|
1002
|
+
|
|
1003
|
+
def _create_session(
|
|
1004
|
+
context: str,
|
|
1005
|
+
context_id: str,
|
|
1006
|
+
fmt: ContentFormat,
|
|
1007
|
+
line_number_base: LineNumberBase,
|
|
1008
|
+
) -> ContextMetadata:
|
|
1009
|
+
meta = _analyze_text_context(context, fmt)
|
|
1010
|
+
repl = REPLEnvironment(
|
|
1011
|
+
context=context,
|
|
1012
|
+
context_var_name="ctx",
|
|
1013
|
+
config=self.sandbox_config,
|
|
1014
|
+
loop=asyncio.get_running_loop(),
|
|
1015
|
+
)
|
|
1016
|
+
repl.set_variable("line_number_base", line_number_base)
|
|
1017
|
+
self._sessions[context_id] = _Session(
|
|
1018
|
+
repl=repl,
|
|
1019
|
+
meta=meta,
|
|
1020
|
+
line_number_base=line_number_base,
|
|
1021
|
+
)
|
|
1022
|
+
return meta
|
|
1023
|
+
|
|
1024
|
+
def _get_or_create_session(
|
|
1025
|
+
context_id: str,
|
|
1026
|
+
line_number_base: LineNumberBase | None = None,
|
|
1027
|
+
) -> _Session:
|
|
1028
|
+
session = self._sessions.get(context_id)
|
|
1029
|
+
if session is not None:
|
|
1030
|
+
return session
|
|
1031
|
+
|
|
1032
|
+
base = line_number_base if line_number_base is not None else DEFAULT_LINE_NUMBER_BASE
|
|
1033
|
+
meta = _analyze_text_context("", ContentFormat.TEXT)
|
|
1034
|
+
repl = REPLEnvironment(
|
|
1035
|
+
context="",
|
|
1036
|
+
context_var_name="ctx",
|
|
1037
|
+
config=self.sandbox_config,
|
|
1038
|
+
loop=asyncio.get_running_loop(),
|
|
1039
|
+
)
|
|
1040
|
+
repl.set_variable("line_number_base", base)
|
|
1041
|
+
session = _Session(repl=repl, meta=meta, line_number_base=base)
|
|
1042
|
+
self._sessions[context_id] = session
|
|
1043
|
+
return session
|
|
1044
|
+
|
|
1045
|
+
def _first_doc_line(fn: Any) -> str:
|
|
1046
|
+
doc = inspect.getdoc(fn) or ""
|
|
1047
|
+
for line in doc.splitlines():
|
|
1048
|
+
line = line.strip()
|
|
1049
|
+
if line:
|
|
1050
|
+
return line
|
|
1051
|
+
return ""
|
|
1052
|
+
|
|
1053
|
+
def _short_description(fn: Any, override: str | None) -> str:
|
|
1054
|
+
desc = (override or _first_doc_line(fn)).strip()
|
|
1055
|
+
if not desc:
|
|
1056
|
+
desc = fn.__name__.replace("_", " ")
|
|
1057
|
+
max_len = 120
|
|
1058
|
+
if len(desc) > max_len:
|
|
1059
|
+
desc = desc[: max_len - 3].rstrip() + "..."
|
|
1060
|
+
return desc
|
|
1061
|
+
|
|
1062
|
+
def _tool(description: str | None = None, **kwargs: Any) -> Any:
|
|
1063
|
+
def decorator(fn: Any) -> Any:
|
|
1064
|
+
doc = inspect.getdoc(fn) or ""
|
|
1065
|
+
if self.tool_docs_mode == "full" and doc:
|
|
1066
|
+
return self.server.tool(**kwargs)(fn)
|
|
1067
|
+
desc = _short_description(fn, description)
|
|
1068
|
+
return self.server.tool(description=desc, **kwargs)(fn)
|
|
1069
|
+
|
|
1070
|
+
return decorator
|
|
1071
|
+
|
|
1072
|
+
@_tool()
|
|
1073
|
+
async def load_context(
|
|
1074
|
+
content: str | None = None,
|
|
1075
|
+
context_id: str = "default",
|
|
1076
|
+
format: str = "auto",
|
|
1077
|
+
line_number_base: LineNumberBase = DEFAULT_LINE_NUMBER_BASE,
|
|
1078
|
+
context: str | None = None,
|
|
1079
|
+
) -> str:
|
|
1080
|
+
"""Load context into an in-memory REPL session.
|
|
1081
|
+
|
|
1082
|
+
The context is stored in a sandboxed Python environment as the variable `ctx`.
|
|
1083
|
+
You can then use other tools to explore and process this context.
|
|
1084
|
+
|
|
1085
|
+
Args:
|
|
1086
|
+
content: The text/data to load
|
|
1087
|
+
context_id: Identifier for this context session (default: "default")
|
|
1088
|
+
format: Content format - "auto", "text", or "json" (default: "auto")
|
|
1089
|
+
line_number_base: Line number base for this context (0 or 1)
|
|
1090
|
+
context: Deprecated alias for content
|
|
1091
|
+
|
|
1092
|
+
Returns:
|
|
1093
|
+
Confirmation with context metadata
|
|
1094
|
+
"""
|
|
1095
|
+
text = content if content is not None else context
|
|
1096
|
+
if text is None:
|
|
1097
|
+
return "Error: content is required"
|
|
1098
|
+
try:
|
|
1099
|
+
base = _validate_line_number_base(line_number_base)
|
|
1100
|
+
except ValueError as e:
|
|
1101
|
+
return f"Error: {e}"
|
|
1102
|
+
|
|
1103
|
+
fmt = _detect_format(text) if format == "auto" else ContentFormat(format)
|
|
1104
|
+
meta = _create_session(text, context_id, fmt, base)
|
|
1105
|
+
return _format_context_loaded(context_id, meta, base)
|
|
1106
|
+
|
|
1107
|
+
def _require_actions(confirm: bool) -> str | None:
|
|
1108
|
+
if not self.action_config.enabled:
|
|
1109
|
+
return "Actions are disabled. Start the server with `--enable-actions`."
|
|
1110
|
+
if self.action_config.require_confirmation and not confirm:
|
|
1111
|
+
return "Confirmation required. Re-run with confirm=true."
|
|
1112
|
+
return None
|
|
1113
|
+
|
|
1114
|
+
def _record_action(session: _Session | None, note: str, snippet: str) -> None:
|
|
1115
|
+
if session is None:
|
|
1116
|
+
return
|
|
1117
|
+
evidence_before = len(session.evidence)
|
|
1118
|
+
session.evidence.append(
|
|
1119
|
+
_Evidence(
|
|
1120
|
+
source="action",
|
|
1121
|
+
line_range=None,
|
|
1122
|
+
pattern=None,
|
|
1123
|
+
note=note,
|
|
1124
|
+
snippet=snippet[:200],
|
|
1125
|
+
)
|
|
1126
|
+
)
|
|
1127
|
+
session.information_gain.append(len(session.evidence) - evidence_before)
|
|
1128
|
+
|
|
1129
|
+
def _build_memory_pack_payload() -> tuple[dict[str, Any], list[str]]:
|
|
1130
|
+
sessions_payload: list[dict[str, Any]] = []
|
|
1131
|
+
skipped: list[str] = []
|
|
1132
|
+
for sid, sess in self._sessions.items():
|
|
1133
|
+
try:
|
|
1134
|
+
sessions_payload.append(_session_to_payload(sid, sess))
|
|
1135
|
+
except Exception:
|
|
1136
|
+
skipped.append(sid)
|
|
1137
|
+
payload = {
|
|
1138
|
+
"schema": "aleph.memory_pack.v1",
|
|
1139
|
+
"created_at": datetime.now().isoformat(),
|
|
1140
|
+
"sessions": sessions_payload,
|
|
1141
|
+
"skipped": skipped,
|
|
1142
|
+
}
|
|
1143
|
+
return payload, skipped
|
|
1144
|
+
|
|
1145
|
+
async def _run_subprocess(
|
|
1146
|
+
argv: list[str],
|
|
1147
|
+
cwd: Path,
|
|
1148
|
+
timeout_seconds: float,
|
|
1149
|
+
) -> dict[str, Any]:
|
|
1150
|
+
start = time.perf_counter()
|
|
1151
|
+
proc = await asyncio.create_subprocess_exec(
|
|
1152
|
+
*argv,
|
|
1153
|
+
cwd=str(cwd),
|
|
1154
|
+
stdout=asyncio.subprocess.PIPE,
|
|
1155
|
+
stderr=asyncio.subprocess.PIPE,
|
|
1156
|
+
)
|
|
1157
|
+
timed_out = False
|
|
1158
|
+
try:
|
|
1159
|
+
stdout_b, stderr_b = await asyncio.wait_for(proc.communicate(), timeout=timeout_seconds)
|
|
1160
|
+
except asyncio.TimeoutError:
|
|
1161
|
+
timed_out = True
|
|
1162
|
+
proc.kill()
|
|
1163
|
+
stdout_b, stderr_b = await proc.communicate()
|
|
1164
|
+
|
|
1165
|
+
duration_ms = (time.perf_counter() - start) * 1000.0
|
|
1166
|
+
stdout = stdout_b.decode("utf-8", errors="replace")
|
|
1167
|
+
stderr = stderr_b.decode("utf-8", errors="replace")
|
|
1168
|
+
if len(stdout) > self.action_config.max_output_chars:
|
|
1169
|
+
stdout = stdout[: self.action_config.max_output_chars] + "\n... (truncated)"
|
|
1170
|
+
if len(stderr) > self.action_config.max_output_chars:
|
|
1171
|
+
stderr = stderr[: self.action_config.max_output_chars] + "\n... (truncated)"
|
|
1172
|
+
|
|
1173
|
+
return {
|
|
1174
|
+
"argv": argv,
|
|
1175
|
+
"cwd": str(cwd),
|
|
1176
|
+
"exit_code": proc.returncode,
|
|
1177
|
+
"timed_out": timed_out,
|
|
1178
|
+
"duration_ms": duration_ms,
|
|
1179
|
+
"stdout": stdout,
|
|
1180
|
+
"stderr": stderr,
|
|
1181
|
+
}
|
|
1182
|
+
|
|
1183
|
+
def _parse_rg_vimgrep(output: str, max_results: int) -> tuple[list[dict[str, Any]], bool]:
|
|
1184
|
+
results: list[dict[str, Any]] = []
|
|
1185
|
+
truncated = False
|
|
1186
|
+
limit = max_results if max_results > 0 else None
|
|
1187
|
+
for line in output.splitlines():
|
|
1188
|
+
parts = line.split(":", 3)
|
|
1189
|
+
if len(parts) < 4:
|
|
1190
|
+
continue
|
|
1191
|
+
path_str, line_str, col_str, text = parts
|
|
1192
|
+
try:
|
|
1193
|
+
line_no = int(line_str)
|
|
1194
|
+
col_no = int(col_str)
|
|
1195
|
+
except ValueError:
|
|
1196
|
+
continue
|
|
1197
|
+
results.append({
|
|
1198
|
+
"path": path_str,
|
|
1199
|
+
"line": line_no,
|
|
1200
|
+
"column": col_no,
|
|
1201
|
+
"text": text,
|
|
1202
|
+
})
|
|
1203
|
+
if limit is not None and len(results) >= limit:
|
|
1204
|
+
truncated = True
|
|
1205
|
+
break
|
|
1206
|
+
return results, truncated
|
|
1207
|
+
|
|
1208
|
+
def _python_rg_search(
|
|
1209
|
+
pattern: str,
|
|
1210
|
+
roots: list[Path],
|
|
1211
|
+
glob: str | None,
|
|
1212
|
+
max_results: int,
|
|
1213
|
+
) -> tuple[list[dict[str, Any]], bool]:
|
|
1214
|
+
results: list[dict[str, Any]] = []
|
|
1215
|
+
truncated = False
|
|
1216
|
+
limit = max_results if max_results > 0 else None
|
|
1217
|
+
rx = re.compile(pattern)
|
|
1218
|
+
skip_dirs = {".git", ".venv", "node_modules", "dist", "build", "__pycache__", ".mypy_cache", ".pytest_cache"}
|
|
1219
|
+
|
|
1220
|
+
def _iter_files(root: Path) -> Iterable[Path]:
|
|
1221
|
+
if root.is_file():
|
|
1222
|
+
yield root
|
|
1223
|
+
return
|
|
1224
|
+
for path in root.rglob("*"):
|
|
1225
|
+
if path.is_dir():
|
|
1226
|
+
continue
|
|
1227
|
+
if any(part in skip_dirs for part in path.parts):
|
|
1228
|
+
continue
|
|
1229
|
+
yield path
|
|
1230
|
+
|
|
1231
|
+
for root in roots:
|
|
1232
|
+
for path in _iter_files(root):
|
|
1233
|
+
if glob and not fnmatch.fnmatch(path.name, glob):
|
|
1234
|
+
continue
|
|
1235
|
+
try:
|
|
1236
|
+
if path.stat().st_size > self.action_config.max_read_bytes:
|
|
1237
|
+
continue
|
|
1238
|
+
text = path.read_text(encoding="utf-8", errors="replace")
|
|
1239
|
+
except Exception:
|
|
1240
|
+
continue
|
|
1241
|
+
for idx, line in enumerate(text.splitlines(), start=1):
|
|
1242
|
+
match = rx.search(line)
|
|
1243
|
+
if not match:
|
|
1244
|
+
continue
|
|
1245
|
+
results.append({
|
|
1246
|
+
"path": str(path),
|
|
1247
|
+
"line": idx,
|
|
1248
|
+
"column": match.start() + 1,
|
|
1249
|
+
"text": line,
|
|
1250
|
+
})
|
|
1251
|
+
if limit is not None and len(results) >= limit:
|
|
1252
|
+
truncated = True
|
|
1253
|
+
return results, truncated
|
|
1254
|
+
return results, truncated
|
|
1255
|
+
|
|
1256
|
+
def _auto_save_memory_pack() -> None:
|
|
1257
|
+
if not self.action_config.enabled or not self._sessions:
|
|
1258
|
+
return
|
|
1259
|
+
payload, _ = _build_memory_pack_payload()
|
|
1260
|
+
out_bytes = json.dumps(payload, ensure_ascii=False, indent=2).encode("utf-8", errors="replace")
|
|
1261
|
+
if len(out_bytes) > self.action_config.max_write_bytes:
|
|
1262
|
+
return
|
|
1263
|
+
try:
|
|
1264
|
+
p = _scoped_path(
|
|
1265
|
+
self.action_config.workspace_root,
|
|
1266
|
+
".aleph/memory_pack.json",
|
|
1267
|
+
self.action_config.workspace_mode,
|
|
1268
|
+
)
|
|
1269
|
+
except Exception:
|
|
1270
|
+
return
|
|
1271
|
+
p.parent.mkdir(parents=True, exist_ok=True)
|
|
1272
|
+
try:
|
|
1273
|
+
with open(p, "wb") as f:
|
|
1274
|
+
f.write(out_bytes)
|
|
1275
|
+
except Exception:
|
|
1276
|
+
return
|
|
1277
|
+
for sess in self._sessions.values():
|
|
1278
|
+
_record_action(sess, note="auto_save_memory_pack", snippet=str(p))
|
|
1279
|
+
|
|
1280
|
+
@_tool()
|
|
1281
|
+
async def run_command(
|
|
1282
|
+
cmd: str,
|
|
1283
|
+
cwd: str | None = None,
|
|
1284
|
+
timeout_seconds: float | None = None,
|
|
1285
|
+
shell: bool = False,
|
|
1286
|
+
confirm: bool = False,
|
|
1287
|
+
output: Literal["json", "markdown", "object"] = "json",
|
|
1288
|
+
context_id: str = "default",
|
|
1289
|
+
) -> str | dict[str, Any]:
|
|
1290
|
+
err = _require_actions(confirm)
|
|
1291
|
+
if err:
|
|
1292
|
+
return _format_error(err, output=output)
|
|
1293
|
+
|
|
1294
|
+
session = _get_or_create_session(context_id)
|
|
1295
|
+
session.iterations += 1
|
|
1296
|
+
|
|
1297
|
+
workspace_root = self.action_config.workspace_root
|
|
1298
|
+
cwd_path = (
|
|
1299
|
+
_scoped_path(workspace_root, cwd, self.action_config.workspace_mode)
|
|
1300
|
+
if cwd
|
|
1301
|
+
else workspace_root
|
|
1302
|
+
)
|
|
1303
|
+
timeout = timeout_seconds if timeout_seconds is not None else self.action_config.max_cmd_seconds
|
|
1304
|
+
|
|
1305
|
+
if shell:
|
|
1306
|
+
argv = ["/bin/zsh", "-lc", cmd]
|
|
1307
|
+
else:
|
|
1308
|
+
argv = shlex.split(cmd)
|
|
1309
|
+
if not argv:
|
|
1310
|
+
return _format_error("Empty command", output=output)
|
|
1311
|
+
|
|
1312
|
+
payload = await _run_subprocess(argv=argv, cwd=cwd_path, timeout_seconds=timeout)
|
|
1313
|
+
if session is not None:
|
|
1314
|
+
session.repl._namespace["last_command_result"] = payload
|
|
1315
|
+
_record_action(session, note="run_command", snippet=(payload.get("stdout") or payload.get("stderr") or "")[:200])
|
|
1316
|
+
return _format_payload(payload, output=output)
|
|
1317
|
+
|
|
1318
|
+
@_tool()
|
|
1319
|
+
async def rg_search(
|
|
1320
|
+
pattern: str,
|
|
1321
|
+
paths: list[str] | None = None,
|
|
1322
|
+
glob: str | None = None,
|
|
1323
|
+
max_results: int = 200,
|
|
1324
|
+
load_context_id: str | None = None,
|
|
1325
|
+
confirm: bool = False,
|
|
1326
|
+
output: Literal["json", "markdown", "object"] = "json",
|
|
1327
|
+
context_id: str = "default",
|
|
1328
|
+
) -> str | dict[str, Any]:
|
|
1329
|
+
"""Fast codebase search using ripgrep (rg) with fallback scanning.
|
|
1330
|
+
|
|
1331
|
+
Args:
|
|
1332
|
+
pattern: Regex pattern to search for
|
|
1333
|
+
paths: Optional list of files/dirs (defaults to workspace root)
|
|
1334
|
+
glob: Optional glob filter (e.g. "*.py")
|
|
1335
|
+
max_results: Max matches to return (default: 200)
|
|
1336
|
+
load_context_id: If set, load matches into this context
|
|
1337
|
+
confirm: Required if actions are enabled
|
|
1338
|
+
output: "json", "markdown", or "object"
|
|
1339
|
+
context_id: Session to record evidence in
|
|
1340
|
+
"""
|
|
1341
|
+
err = _require_actions(confirm)
|
|
1342
|
+
if err:
|
|
1343
|
+
return _format_error(err, output=output)
|
|
1344
|
+
if not pattern:
|
|
1345
|
+
return _format_error("pattern is required", output=output)
|
|
1346
|
+
|
|
1347
|
+
session = _get_or_create_session(context_id)
|
|
1348
|
+
session.iterations += 1
|
|
1349
|
+
|
|
1350
|
+
workspace_root = self.action_config.workspace_root
|
|
1351
|
+
resolved_paths: list[Path] = []
|
|
1352
|
+
for p in paths or [str(workspace_root)]:
|
|
1353
|
+
try:
|
|
1354
|
+
resolved_paths.append(
|
|
1355
|
+
_scoped_path(
|
|
1356
|
+
workspace_root,
|
|
1357
|
+
p,
|
|
1358
|
+
self.action_config.workspace_mode,
|
|
1359
|
+
)
|
|
1360
|
+
)
|
|
1361
|
+
except Exception as e:
|
|
1362
|
+
return _format_error(str(e), output=output)
|
|
1363
|
+
|
|
1364
|
+
matches: list[dict[str, Any]] = []
|
|
1365
|
+
truncated = False
|
|
1366
|
+
used_rg = False
|
|
1367
|
+
payload: dict[str, Any] | None = None
|
|
1368
|
+
|
|
1369
|
+
rg_bin = shutil.which("rg")
|
|
1370
|
+
if rg_bin:
|
|
1371
|
+
used_rg = True
|
|
1372
|
+
argv = [rg_bin, "--vimgrep", pattern]
|
|
1373
|
+
if glob:
|
|
1374
|
+
argv.extend(["-g", glob])
|
|
1375
|
+
if max_results > 0:
|
|
1376
|
+
argv.extend(["-m", str(max_results)])
|
|
1377
|
+
argv.extend(str(p) for p in resolved_paths)
|
|
1378
|
+
payload = await _run_subprocess(argv=argv, cwd=workspace_root, timeout_seconds=self.action_config.max_cmd_seconds)
|
|
1379
|
+
matches, truncated = _parse_rg_vimgrep(payload.get("stdout") or "", max_results)
|
|
1380
|
+
else:
|
|
1381
|
+
matches, truncated = _python_rg_search(pattern, resolved_paths, glob, max_results)
|
|
1382
|
+
|
|
1383
|
+
hits_text = "\n".join(
|
|
1384
|
+
f"{m['path']}:{m['line']}:{m['column']}:{m['text']}" for m in matches
|
|
1385
|
+
)
|
|
1386
|
+
if load_context_id:
|
|
1387
|
+
meta = _create_session(hits_text, load_context_id, ContentFormat.TEXT, DEFAULT_LINE_NUMBER_BASE)
|
|
1388
|
+
session.repl._namespace["last_rg_loaded_context"] = load_context_id
|
|
1389
|
+
load_note = f"Loaded {len(matches)} match(es) into '{load_context_id}'."
|
|
1390
|
+
else:
|
|
1391
|
+
meta = None
|
|
1392
|
+
load_note = None
|
|
1393
|
+
|
|
1394
|
+
result_payload = {
|
|
1395
|
+
"pattern": pattern,
|
|
1396
|
+
"paths": [str(p) for p in resolved_paths],
|
|
1397
|
+
"used_rg": used_rg,
|
|
1398
|
+
"match_count": len(matches),
|
|
1399
|
+
"truncated": truncated,
|
|
1400
|
+
"matches": matches,
|
|
1401
|
+
}
|
|
1402
|
+
if payload:
|
|
1403
|
+
result_payload["command"] = payload.get("argv")
|
|
1404
|
+
result_payload["timed_out"] = payload.get("timed_out", False)
|
|
1405
|
+
result_payload["stderr"] = payload.get("stderr", "")
|
|
1406
|
+
if load_context_id:
|
|
1407
|
+
result_payload["loaded_context_id"] = load_context_id
|
|
1408
|
+
result_payload["loaded_meta"] = {
|
|
1409
|
+
"size_chars": meta.size_chars if meta else 0,
|
|
1410
|
+
"size_lines": meta.size_lines if meta else 0,
|
|
1411
|
+
}
|
|
1412
|
+
if load_note:
|
|
1413
|
+
result_payload["note"] = load_note
|
|
1414
|
+
|
|
1415
|
+
session.repl._namespace["last_rg_result"] = result_payload
|
|
1416
|
+
_record_action(session, note="rg_search", snippet=f"{pattern} ({len(matches)} matches)")
|
|
1417
|
+
|
|
1418
|
+
if output == "object":
|
|
1419
|
+
return result_payload
|
|
1420
|
+
if output == "json":
|
|
1421
|
+
return json.dumps(result_payload, ensure_ascii=False, indent=2)
|
|
1422
|
+
|
|
1423
|
+
parts = [
|
|
1424
|
+
"## rg_search Results",
|
|
1425
|
+
f"Pattern: `{pattern}`",
|
|
1426
|
+
f"Matches: {len(matches)}" + (" (truncated)" if truncated else ""),
|
|
1427
|
+
]
|
|
1428
|
+
if load_note:
|
|
1429
|
+
parts.append(load_note)
|
|
1430
|
+
if matches:
|
|
1431
|
+
parts.append("")
|
|
1432
|
+
parts.extend([f"- {m['path']}:{m['line']}:{m['column']}: {m['text']}" for m in matches[:20]])
|
|
1433
|
+
if len(matches) > 20:
|
|
1434
|
+
parts.append(f"... {len(matches) - 20} more")
|
|
1435
|
+
return "\n".join(parts)
|
|
1436
|
+
|
|
1437
|
+
@_tool()
|
|
1438
|
+
async def read_file(
|
|
1439
|
+
path: str,
|
|
1440
|
+
start_line: int = 1,
|
|
1441
|
+
limit: int = 200,
|
|
1442
|
+
include_raw: bool = False,
|
|
1443
|
+
line_number_base: int | None = None,
|
|
1444
|
+
confirm: bool = False,
|
|
1445
|
+
output: Literal["json", "markdown", "object"] = "json",
|
|
1446
|
+
context_id: str = "default",
|
|
1447
|
+
) -> str | dict[str, Any]:
|
|
1448
|
+
err = _require_actions(confirm)
|
|
1449
|
+
if err:
|
|
1450
|
+
return _format_error(err, output=output)
|
|
1451
|
+
|
|
1452
|
+
base_override: LineNumberBase | None = None
|
|
1453
|
+
if line_number_base is not None:
|
|
1454
|
+
try:
|
|
1455
|
+
base_override = _validate_line_number_base(line_number_base)
|
|
1456
|
+
except ValueError as e:
|
|
1457
|
+
return _format_error(str(e), output=output)
|
|
1458
|
+
|
|
1459
|
+
session = _get_or_create_session(context_id, line_number_base=base_override)
|
|
1460
|
+
session.iterations += 1
|
|
1461
|
+
try:
|
|
1462
|
+
base = _resolve_line_number_base(session, line_number_base)
|
|
1463
|
+
except ValueError as e:
|
|
1464
|
+
return _format_error(str(e), output=output)
|
|
1465
|
+
|
|
1466
|
+
if base == 1 and start_line == 0:
|
|
1467
|
+
start_line = 1
|
|
1468
|
+
if start_line < base:
|
|
1469
|
+
return _format_error(f"start_line must be >= {base}", output=output)
|
|
1470
|
+
|
|
1471
|
+
try:
|
|
1472
|
+
p = _scoped_path(
|
|
1473
|
+
self.action_config.workspace_root,
|
|
1474
|
+
path,
|
|
1475
|
+
self.action_config.workspace_mode,
|
|
1476
|
+
)
|
|
1477
|
+
except Exception as e:
|
|
1478
|
+
return _format_error(str(e), output=output)
|
|
1479
|
+
|
|
1480
|
+
if not p.exists() or not p.is_file():
|
|
1481
|
+
return _format_error(f"File not found: {path}", output=output)
|
|
1482
|
+
|
|
1483
|
+
data = p.read_bytes()
|
|
1484
|
+
if len(data) > self.action_config.max_read_bytes:
|
|
1485
|
+
return _format_error(
|
|
1486
|
+
f"File too large to read (>{self.action_config.max_read_bytes} bytes): {path}",
|
|
1487
|
+
output=output,
|
|
1488
|
+
)
|
|
1489
|
+
|
|
1490
|
+
text = data.decode("utf-8", errors="replace")
|
|
1491
|
+
lines = text.splitlines()
|
|
1492
|
+
start_idx = max(0, start_line - base)
|
|
1493
|
+
end_idx = min(len(lines), start_idx + max(0, limit))
|
|
1494
|
+
slice_lines = lines[start_idx:end_idx]
|
|
1495
|
+
numbered = "\n".join(
|
|
1496
|
+
f"{i + start_idx + base:>6}\t{line}" for i, line in enumerate(slice_lines)
|
|
1497
|
+
)
|
|
1498
|
+
end_line = (start_idx + len(slice_lines) - 1 + base) if slice_lines else start_line
|
|
1499
|
+
|
|
1500
|
+
payload: dict[str, Any] = {
|
|
1501
|
+
"path": str(p),
|
|
1502
|
+
"start_line": start_line,
|
|
1503
|
+
"end_line": end_line,
|
|
1504
|
+
"limit": limit,
|
|
1505
|
+
"total_lines": len(lines),
|
|
1506
|
+
"line_number_base": base,
|
|
1507
|
+
"content": numbered,
|
|
1508
|
+
}
|
|
1509
|
+
if include_raw:
|
|
1510
|
+
payload["content_raw"] = "\n".join(slice_lines)
|
|
1511
|
+
if session is not None:
|
|
1512
|
+
session.repl._namespace["last_read_file_result"] = payload
|
|
1513
|
+
_record_action(session, note="read_file", snippet=f"{path} ({start_line}-{end_line})")
|
|
1514
|
+
return _format_payload(payload, output=output)
|
|
1515
|
+
|
|
1516
|
+
@_tool()
|
|
1517
|
+
async def load_file(
|
|
1518
|
+
path: str,
|
|
1519
|
+
context_id: str = "default",
|
|
1520
|
+
format: str = "auto",
|
|
1521
|
+
line_number_base: LineNumberBase = DEFAULT_LINE_NUMBER_BASE,
|
|
1522
|
+
confirm: bool = False,
|
|
1523
|
+
) -> str:
|
|
1524
|
+
"""Load a workspace file into a context session.
|
|
1525
|
+
|
|
1526
|
+
Args:
|
|
1527
|
+
path: File path to read (relative to workspace root)
|
|
1528
|
+
context_id: Identifier for this context session (default: "default")
|
|
1529
|
+
format: Content format - "auto", "text", or "json" (default: "auto")
|
|
1530
|
+
line_number_base: Line number base for this context (0 or 1)
|
|
1531
|
+
confirm: Required if actions are enabled
|
|
1532
|
+
|
|
1533
|
+
Returns:
|
|
1534
|
+
Confirmation with context metadata
|
|
1535
|
+
"""
|
|
1536
|
+
err = _require_actions(confirm)
|
|
1537
|
+
if err:
|
|
1538
|
+
return f"Error: {err}"
|
|
1539
|
+
|
|
1540
|
+
try:
|
|
1541
|
+
base = _validate_line_number_base(line_number_base)
|
|
1542
|
+
except ValueError as e:
|
|
1543
|
+
return f"Error: {e}"
|
|
1544
|
+
|
|
1545
|
+
try:
|
|
1546
|
+
p = _scoped_path(
|
|
1547
|
+
self.action_config.workspace_root,
|
|
1548
|
+
path,
|
|
1549
|
+
self.action_config.workspace_mode,
|
|
1550
|
+
)
|
|
1551
|
+
except Exception as e:
|
|
1552
|
+
return f"Error: {e}"
|
|
1553
|
+
|
|
1554
|
+
if not p.exists() or not p.is_file():
|
|
1555
|
+
return f"Error: File not found: {path}"
|
|
1556
|
+
|
|
1557
|
+
try:
|
|
1558
|
+
text, detected_fmt, warning = _load_text_from_path(
|
|
1559
|
+
p,
|
|
1560
|
+
max_bytes=self.action_config.max_read_bytes,
|
|
1561
|
+
timeout_seconds=self.action_config.max_cmd_seconds,
|
|
1562
|
+
)
|
|
1563
|
+
except ValueError as e:
|
|
1564
|
+
return f"Error: {e}"
|
|
1565
|
+
try:
|
|
1566
|
+
fmt = detected_fmt if format == "auto" else ContentFormat(format)
|
|
1567
|
+
except Exception as e:
|
|
1568
|
+
return f"Error: {e}"
|
|
1569
|
+
meta = _create_session(text, context_id, fmt, base)
|
|
1570
|
+
session = self._sessions[context_id]
|
|
1571
|
+
_record_action(session, note="load_file", snippet=str(p))
|
|
1572
|
+
return _format_context_loaded(context_id, meta, base, note=warning)
|
|
1573
|
+
|
|
1574
|
+
@_tool()
|
|
1575
|
+
async def write_file(
|
|
1576
|
+
path: str,
|
|
1577
|
+
content: str,
|
|
1578
|
+
mode: Literal["overwrite", "append"] = "overwrite",
|
|
1579
|
+
confirm: bool = False,
|
|
1580
|
+
output: Literal["json", "markdown", "object"] = "json",
|
|
1581
|
+
context_id: str = "default",
|
|
1582
|
+
) -> str | dict[str, Any]:
|
|
1583
|
+
err = _require_actions(confirm)
|
|
1584
|
+
if err:
|
|
1585
|
+
return _format_error(err, output=output)
|
|
1586
|
+
|
|
1587
|
+
session = _get_or_create_session(context_id)
|
|
1588
|
+
session.iterations += 1
|
|
1589
|
+
|
|
1590
|
+
try:
|
|
1591
|
+
p = _scoped_path(
|
|
1592
|
+
self.action_config.workspace_root,
|
|
1593
|
+
path,
|
|
1594
|
+
self.action_config.workspace_mode,
|
|
1595
|
+
)
|
|
1596
|
+
except Exception as e:
|
|
1597
|
+
return _format_error(str(e), output=output)
|
|
1598
|
+
|
|
1599
|
+
payload_bytes = content.encode("utf-8", errors="replace")
|
|
1600
|
+
if len(payload_bytes) > self.action_config.max_write_bytes:
|
|
1601
|
+
return _format_error(
|
|
1602
|
+
f"Content too large to write (>{self.action_config.max_write_bytes} bytes)",
|
|
1603
|
+
output=output,
|
|
1604
|
+
)
|
|
1605
|
+
|
|
1606
|
+
p.parent.mkdir(parents=True, exist_ok=True)
|
|
1607
|
+
file_mode = "ab" if mode == "append" else "wb"
|
|
1608
|
+
with open(p, file_mode) as f:
|
|
1609
|
+
f.write(payload_bytes)
|
|
1610
|
+
|
|
1611
|
+
payload: dict[str, Any] = {
|
|
1612
|
+
"path": str(p),
|
|
1613
|
+
"bytes_written": len(payload_bytes),
|
|
1614
|
+
"mode": mode,
|
|
1615
|
+
}
|
|
1616
|
+
if session is not None:
|
|
1617
|
+
session.repl._namespace["last_write_file_result"] = payload
|
|
1618
|
+
_record_action(session, note="write_file", snippet=f"{path} ({len(payload_bytes)} bytes)")
|
|
1619
|
+
return _format_payload(payload, output=output)
|
|
1620
|
+
|
|
1621
|
+
@_tool()
|
|
1622
|
+
async def run_tests(
|
|
1623
|
+
runner: Literal["auto", "pytest"] = "auto",
|
|
1624
|
+
args: list[str] | None = None,
|
|
1625
|
+
cwd: str | None = None,
|
|
1626
|
+
confirm: bool = False,
|
|
1627
|
+
output: Literal["json", "markdown", "object"] = "json",
|
|
1628
|
+
context_id: str = "default",
|
|
1629
|
+
) -> str | dict[str, Any]:
|
|
1630
|
+
err = _require_actions(confirm)
|
|
1631
|
+
if err:
|
|
1632
|
+
return _format_error(err, output=output)
|
|
1633
|
+
|
|
1634
|
+
session = _get_or_create_session(context_id)
|
|
1635
|
+
session.iterations += 1
|
|
1636
|
+
|
|
1637
|
+
runner_resolved = "pytest" if runner == "auto" else runner
|
|
1638
|
+
if runner_resolved != "pytest":
|
|
1639
|
+
return _format_error(f"Unsupported test runner: {runner_resolved}", output=output)
|
|
1640
|
+
|
|
1641
|
+
argv = [sys.executable, "-m", "pytest", "-vv", "--tb=short", "--maxfail=20"]
|
|
1642
|
+
if args:
|
|
1643
|
+
argv.extend(args)
|
|
1644
|
+
|
|
1645
|
+
cwd_path = self.action_config.workspace_root
|
|
1646
|
+
if cwd:
|
|
1647
|
+
try:
|
|
1648
|
+
cwd_path = _scoped_path(
|
|
1649
|
+
self.action_config.workspace_root,
|
|
1650
|
+
cwd,
|
|
1651
|
+
self.action_config.workspace_mode,
|
|
1652
|
+
)
|
|
1653
|
+
except Exception as e:
|
|
1654
|
+
return _format_error(str(e), output=output)
|
|
1655
|
+
|
|
1656
|
+
proc_payload = await _run_subprocess(
|
|
1657
|
+
argv=argv,
|
|
1658
|
+
cwd=cwd_path,
|
|
1659
|
+
timeout_seconds=self.action_config.max_cmd_seconds,
|
|
1660
|
+
)
|
|
1661
|
+
raw_output = (proc_payload.get("stdout") or "") + ("\n" + proc_payload.get("stderr") if proc_payload.get("stderr") else "")
|
|
1662
|
+
|
|
1663
|
+
passed = 0
|
|
1664
|
+
failed = 0
|
|
1665
|
+
errors = 0
|
|
1666
|
+
duration_ms = float(proc_payload.get("duration_ms") or 0.0)
|
|
1667
|
+
exit_code = int(proc_payload.get("exit_code") or 0)
|
|
1668
|
+
|
|
1669
|
+
m_passed = re.search(r"(\\d+)\\s+passed", raw_output)
|
|
1670
|
+
if m_passed:
|
|
1671
|
+
passed = int(m_passed.group(1))
|
|
1672
|
+
m_failed = re.search(r"(\\d+)\\s+failed", raw_output)
|
|
1673
|
+
if m_failed:
|
|
1674
|
+
failed = int(m_failed.group(1))
|
|
1675
|
+
m_errors = re.search(r"(\\d+)\\s+errors?", raw_output)
|
|
1676
|
+
if m_errors:
|
|
1677
|
+
errors = int(m_errors.group(1))
|
|
1678
|
+
|
|
1679
|
+
failures: list[dict[str, Any]] = []
|
|
1680
|
+
section_re = re.compile(r"^_{3,}\\s+(?P<name>.+?)\\s+_{3,}\\s*$", re.MULTILINE)
|
|
1681
|
+
matches = list(section_re.finditer(raw_output))
|
|
1682
|
+
for i, sm in enumerate(matches):
|
|
1683
|
+
start = sm.end()
|
|
1684
|
+
end = matches[i + 1].start() if i + 1 < len(matches) else len(raw_output)
|
|
1685
|
+
block = raw_output[start:end].strip()
|
|
1686
|
+
file = ""
|
|
1687
|
+
line = 0
|
|
1688
|
+
file_line = re.search(r"^(?P<file>.+?\\.py):(?P<line>\\d+):", block, re.MULTILINE)
|
|
1689
|
+
if file_line:
|
|
1690
|
+
file = file_line.group("file")
|
|
1691
|
+
try:
|
|
1692
|
+
line = int(file_line.group("line"))
|
|
1693
|
+
except Exception:
|
|
1694
|
+
line = 0
|
|
1695
|
+
msg = ""
|
|
1696
|
+
err_line = re.search(r"^E\\s+(.+)$", block, re.MULTILINE)
|
|
1697
|
+
if err_line:
|
|
1698
|
+
msg = err_line.group(1).strip()
|
|
1699
|
+
|
|
1700
|
+
failures.append(
|
|
1701
|
+
{
|
|
1702
|
+
"file": file,
|
|
1703
|
+
"line": line,
|
|
1704
|
+
"test_name": sm.group("name").strip(),
|
|
1705
|
+
"message": msg,
|
|
1706
|
+
"traceback": block,
|
|
1707
|
+
}
|
|
1708
|
+
)
|
|
1709
|
+
|
|
1710
|
+
if exit_code != 0 and failed == 0 and errors == 0:
|
|
1711
|
+
errors = 1
|
|
1712
|
+
|
|
1713
|
+
status = "passed"
|
|
1714
|
+
if exit_code != 0:
|
|
1715
|
+
status = "failed" if failed > 0 else "error"
|
|
1716
|
+
|
|
1717
|
+
result: dict[str, Any] = {
|
|
1718
|
+
"passed": passed,
|
|
1719
|
+
"failed": failed,
|
|
1720
|
+
"errors": errors,
|
|
1721
|
+
"failures": failures,
|
|
1722
|
+
"status": status,
|
|
1723
|
+
"duration_ms": duration_ms,
|
|
1724
|
+
"exit_code": exit_code,
|
|
1725
|
+
"raw_output": raw_output,
|
|
1726
|
+
"command": proc_payload,
|
|
1727
|
+
}
|
|
1728
|
+
|
|
1729
|
+
if session is not None:
|
|
1730
|
+
session.repl._namespace["last_test_result"] = result
|
|
1731
|
+
|
|
1732
|
+
summary_snippet = (
|
|
1733
|
+
f"status={status} passed={passed} failed={failed} errors={errors} "
|
|
1734
|
+
f"failures={len(failures)} exit_code={exit_code}"
|
|
1735
|
+
)
|
|
1736
|
+
_record_action(session, note="run_tests", snippet=summary_snippet)
|
|
1737
|
+
for f in failures[:10]:
|
|
1738
|
+
_record_action(session, note="test_failure", snippet=(f.get("message") or f.get("test_name") or "")[:200])
|
|
1739
|
+
|
|
1740
|
+
return _format_payload(result, output=output)
|
|
1741
|
+
|
|
1742
|
+
@_tool()
|
|
1743
|
+
async def list_contexts(
|
|
1744
|
+
output: Literal["json", "markdown", "object"] = "json",
|
|
1745
|
+
) -> str | dict[str, Any]:
|
|
1746
|
+
items: list[dict[str, Any]] = []
|
|
1747
|
+
for cid, session in self._sessions.items():
|
|
1748
|
+
items.append(
|
|
1749
|
+
{
|
|
1750
|
+
"context_id": cid,
|
|
1751
|
+
"created_at": session.created_at.isoformat(),
|
|
1752
|
+
"iterations": session.iterations,
|
|
1753
|
+
"format": session.meta.format.value,
|
|
1754
|
+
"size_chars": session.meta.size_chars,
|
|
1755
|
+
"size_lines": session.meta.size_lines,
|
|
1756
|
+
"estimated_tokens": session.meta.size_tokens_estimate,
|
|
1757
|
+
"line_number_base": session.line_number_base,
|
|
1758
|
+
"evidence_count": len(session.evidence),
|
|
1759
|
+
}
|
|
1760
|
+
)
|
|
1761
|
+
|
|
1762
|
+
payload: dict[str, Any] = {
|
|
1763
|
+
"count": len(items),
|
|
1764
|
+
"items": sorted(items, key=lambda x: x["context_id"]),
|
|
1765
|
+
}
|
|
1766
|
+
return _format_payload(payload, output=output)
|
|
1767
|
+
|
|
1768
|
+
@_tool()
|
|
1769
|
+
async def diff_contexts(
|
|
1770
|
+
a: str,
|
|
1771
|
+
b: str,
|
|
1772
|
+
context_lines: int = 3,
|
|
1773
|
+
max_lines: int = 400,
|
|
1774
|
+
output: Literal["markdown", "text"] = "markdown",
|
|
1775
|
+
) -> str:
|
|
1776
|
+
if a not in self._sessions:
|
|
1777
|
+
return f"Error: No context loaded with ID '{a}'. Use load_context first."
|
|
1778
|
+
if b not in self._sessions:
|
|
1779
|
+
return f"Error: No context loaded with ID '{b}'. Use load_context first."
|
|
1780
|
+
|
|
1781
|
+
sa = self._sessions[a]
|
|
1782
|
+
sb = self._sessions[b]
|
|
1783
|
+
sa.iterations += 1
|
|
1784
|
+
sb.iterations += 1
|
|
1785
|
+
|
|
1786
|
+
a_ctx = sa.repl.get_variable("ctx")
|
|
1787
|
+
b_ctx = sb.repl.get_variable("ctx")
|
|
1788
|
+
if not isinstance(a_ctx, str) or not isinstance(b_ctx, str):
|
|
1789
|
+
return "Error: diff_contexts currently supports only text contexts"
|
|
1790
|
+
|
|
1791
|
+
a_lines = a_ctx.splitlines(keepends=True)
|
|
1792
|
+
b_lines = b_ctx.splitlines(keepends=True)
|
|
1793
|
+
diff_iter = difflib.unified_diff(
|
|
1794
|
+
a_lines,
|
|
1795
|
+
b_lines,
|
|
1796
|
+
fromfile=a,
|
|
1797
|
+
tofile=b,
|
|
1798
|
+
n=max(0, context_lines),
|
|
1799
|
+
)
|
|
1800
|
+
diff_lines = list(diff_iter)
|
|
1801
|
+
truncated = False
|
|
1802
|
+
if len(diff_lines) > max(0, max_lines):
|
|
1803
|
+
diff_lines = diff_lines[: max(0, max_lines)]
|
|
1804
|
+
truncated = True
|
|
1805
|
+
|
|
1806
|
+
diff_text = "".join(diff_lines)
|
|
1807
|
+
if truncated:
|
|
1808
|
+
diff_text += "\n... (truncated)"
|
|
1809
|
+
|
|
1810
|
+
_record_action(sa, note="diff_contexts", snippet=f"{a} vs {b}")
|
|
1811
|
+
_record_action(sb, note="diff_contexts", snippet=f"{a} vs {b}")
|
|
1812
|
+
|
|
1813
|
+
if output == "text":
|
|
1814
|
+
return diff_text
|
|
1815
|
+
return f"```diff\n{diff_text}\n```"
|
|
1816
|
+
|
|
1817
|
+
@_tool()
|
|
1818
|
+
async def save_session(
|
|
1819
|
+
session_id: str = "default",
|
|
1820
|
+
context_id: str | None = None,
|
|
1821
|
+
path: str = "aleph_session.json",
|
|
1822
|
+
confirm: bool = False,
|
|
1823
|
+
output: Literal["json", "markdown", "object"] = "json",
|
|
1824
|
+
) -> str | dict[str, Any]:
|
|
1825
|
+
"""Save a session to disk.
|
|
1826
|
+
|
|
1827
|
+
Use context_id="*" or session_id="*" to save all sessions as a memory pack.
|
|
1828
|
+
"""
|
|
1829
|
+
err = _require_actions(confirm)
|
|
1830
|
+
if err:
|
|
1831
|
+
return _format_error(err, output=output)
|
|
1832
|
+
|
|
1833
|
+
target_id = context_id or session_id
|
|
1834
|
+
if target_id in {"*", "all"}:
|
|
1835
|
+
payload, skipped = _build_memory_pack_payload()
|
|
1836
|
+
pack_path = path if path != "aleph_session.json" else ".aleph/memory_pack.json"
|
|
1837
|
+
out_bytes = json.dumps(payload, ensure_ascii=False, indent=2).encode("utf-8", errors="replace")
|
|
1838
|
+
if len(out_bytes) > self.action_config.max_write_bytes:
|
|
1839
|
+
return _format_error(
|
|
1840
|
+
f"Session file too large to write (>{self.action_config.max_write_bytes} bytes)",
|
|
1841
|
+
output=output,
|
|
1842
|
+
)
|
|
1843
|
+
try:
|
|
1844
|
+
p = _scoped_path(
|
|
1845
|
+
self.action_config.workspace_root,
|
|
1846
|
+
pack_path,
|
|
1847
|
+
self.action_config.workspace_mode,
|
|
1848
|
+
)
|
|
1849
|
+
except Exception as e:
|
|
1850
|
+
return _format_error(str(e), output=output)
|
|
1851
|
+
|
|
1852
|
+
p.parent.mkdir(parents=True, exist_ok=True)
|
|
1853
|
+
with open(p, "wb") as f:
|
|
1854
|
+
f.write(out_bytes)
|
|
1855
|
+
|
|
1856
|
+
for sess in self._sessions.values():
|
|
1857
|
+
_record_action(sess, note="save_memory_pack", snippet=str(p))
|
|
1858
|
+
|
|
1859
|
+
payload_out = {"path": str(p), "bytes_written": len(out_bytes), "sessions": len(payload["sessions"])}
|
|
1860
|
+
if skipped:
|
|
1861
|
+
payload_out["skipped"] = skipped
|
|
1862
|
+
return _format_payload(payload_out, output=output)
|
|
1863
|
+
|
|
1864
|
+
if target_id not in self._sessions:
|
|
1865
|
+
return _format_error(f"No context loaded with ID '{target_id}'. Use load_context first.", output=output)
|
|
1866
|
+
|
|
1867
|
+
session = self._sessions[target_id]
|
|
1868
|
+
session.iterations += 1
|
|
1869
|
+
|
|
1870
|
+
payload = _session_to_payload(target_id, session)
|
|
1871
|
+
out_bytes = json.dumps(payload, ensure_ascii=False, indent=2).encode("utf-8", errors="replace")
|
|
1872
|
+
if len(out_bytes) > self.action_config.max_write_bytes:
|
|
1873
|
+
return _format_error(
|
|
1874
|
+
f"Session file too large to write (>{self.action_config.max_write_bytes} bytes)",
|
|
1875
|
+
output=output,
|
|
1876
|
+
)
|
|
1877
|
+
|
|
1878
|
+
try:
|
|
1879
|
+
p = _scoped_path(
|
|
1880
|
+
self.action_config.workspace_root,
|
|
1881
|
+
path,
|
|
1882
|
+
self.action_config.workspace_mode,
|
|
1883
|
+
)
|
|
1884
|
+
except Exception as e:
|
|
1885
|
+
return _format_error(str(e), output=output)
|
|
1886
|
+
|
|
1887
|
+
p.parent.mkdir(parents=True, exist_ok=True)
|
|
1888
|
+
with open(p, "wb") as f:
|
|
1889
|
+
f.write(out_bytes)
|
|
1890
|
+
|
|
1891
|
+
_record_action(session, note="save_session", snippet=str(p))
|
|
1892
|
+
return _format_payload({"path": str(p), "bytes_written": len(out_bytes)}, output=output)
|
|
1893
|
+
|
|
1894
|
+
@_tool()
|
|
1895
|
+
async def load_session(
|
|
1896
|
+
path: str,
|
|
1897
|
+
session_id: str | None = None,
|
|
1898
|
+
context_id: str | None = None,
|
|
1899
|
+
confirm: bool = False,
|
|
1900
|
+
output: Literal["json", "markdown", "object"] = "json",
|
|
1901
|
+
) -> str | dict[str, Any]:
|
|
1902
|
+
"""Load a session from disk (supports memory packs)."""
|
|
1903
|
+
err = _require_actions(confirm)
|
|
1904
|
+
if err:
|
|
1905
|
+
return _format_error(err, output=output)
|
|
1906
|
+
|
|
1907
|
+
try:
|
|
1908
|
+
p = _scoped_path(
|
|
1909
|
+
self.action_config.workspace_root,
|
|
1910
|
+
path,
|
|
1911
|
+
self.action_config.workspace_mode,
|
|
1912
|
+
)
|
|
1913
|
+
except Exception as e:
|
|
1914
|
+
return _format_error(str(e), output=output)
|
|
1915
|
+
|
|
1916
|
+
if not p.exists() or not p.is_file():
|
|
1917
|
+
return _format_error(f"File not found: {path}", output=output)
|
|
1918
|
+
|
|
1919
|
+
data = p.read_bytes()
|
|
1920
|
+
if len(data) > self.action_config.max_read_bytes:
|
|
1921
|
+
return _format_error(
|
|
1922
|
+
f"Session file too large to read (>{self.action_config.max_read_bytes} bytes): {path}",
|
|
1923
|
+
output=output,
|
|
1924
|
+
)
|
|
1925
|
+
|
|
1926
|
+
try:
|
|
1927
|
+
obj = json.loads(data.decode("utf-8", errors="replace"))
|
|
1928
|
+
except Exception as e:
|
|
1929
|
+
return _format_error(f"Failed to parse JSON: {e}", output=output)
|
|
1930
|
+
|
|
1931
|
+
if not isinstance(obj, dict):
|
|
1932
|
+
return _format_error("Invalid session file format", output=output)
|
|
1933
|
+
|
|
1934
|
+
schema = obj.get("schema")
|
|
1935
|
+
if schema == "aleph.memory_pack.v1":
|
|
1936
|
+
sessions = obj.get("sessions")
|
|
1937
|
+
if not isinstance(sessions, list):
|
|
1938
|
+
return _format_error("Invalid memory pack format", output=output)
|
|
1939
|
+
loaded: list[str] = []
|
|
1940
|
+
skipped_existing: list[str] = []
|
|
1941
|
+
skipped_invalid = 0
|
|
1942
|
+
for payload in sessions:
|
|
1943
|
+
if not isinstance(payload, dict):
|
|
1944
|
+
skipped_invalid += 1
|
|
1945
|
+
continue
|
|
1946
|
+
file_session_id = payload.get("context_id") or payload.get("session_id")
|
|
1947
|
+
resolved_id = str(file_session_id) if file_session_id else f"session_{len(self._sessions) + 1}"
|
|
1948
|
+
if resolved_id in self._sessions:
|
|
1949
|
+
skipped_existing.append(resolved_id)
|
|
1950
|
+
continue
|
|
1951
|
+
try:
|
|
1952
|
+
session = _session_from_payload(
|
|
1953
|
+
payload,
|
|
1954
|
+
resolved_id,
|
|
1955
|
+
self.sandbox_config,
|
|
1956
|
+
loop=asyncio.get_running_loop(),
|
|
1957
|
+
)
|
|
1958
|
+
except Exception:
|
|
1959
|
+
skipped_invalid += 1
|
|
1960
|
+
continue
|
|
1961
|
+
self._sessions[resolved_id] = session
|
|
1962
|
+
_record_action(session, note="load_memory_pack", snippet=str(p))
|
|
1963
|
+
loaded.append(resolved_id)
|
|
1964
|
+
return _format_payload(
|
|
1965
|
+
{
|
|
1966
|
+
"loaded": loaded,
|
|
1967
|
+
"skipped_existing": skipped_existing,
|
|
1968
|
+
"skipped_invalid": skipped_invalid,
|
|
1969
|
+
"loaded_from": str(p),
|
|
1970
|
+
},
|
|
1971
|
+
output=output,
|
|
1972
|
+
)
|
|
1973
|
+
|
|
1974
|
+
file_session_id = obj.get("context_id") or obj.get("session_id")
|
|
1975
|
+
resolved_id = context_id or session_id or (str(file_session_id) if file_session_id else "default")
|
|
1976
|
+
try:
|
|
1977
|
+
session = _session_from_payload(
|
|
1978
|
+
obj,
|
|
1979
|
+
resolved_id,
|
|
1980
|
+
self.sandbox_config,
|
|
1981
|
+
loop=asyncio.get_running_loop(),
|
|
1982
|
+
)
|
|
1983
|
+
except ValueError as e:
|
|
1984
|
+
return _format_error(str(e), output=output)
|
|
1985
|
+
|
|
1986
|
+
self._sessions[resolved_id] = session
|
|
1987
|
+
_record_action(session, note="load_session", snippet=str(p))
|
|
1988
|
+
return _format_payload(
|
|
1989
|
+
{
|
|
1990
|
+
"context_id": resolved_id,
|
|
1991
|
+
"session_id": resolved_id,
|
|
1992
|
+
"line_number_base": session.line_number_base,
|
|
1993
|
+
"loaded_from": str(p),
|
|
1994
|
+
},
|
|
1995
|
+
output=output,
|
|
1996
|
+
)
|
|
1997
|
+
|
|
1998
|
+
@_tool()
|
|
1999
|
+
async def peek_context(
|
|
2000
|
+
start: int = 0,
|
|
2001
|
+
end: int | None = None,
|
|
2002
|
+
context_id: str = "default",
|
|
2003
|
+
unit: Literal["chars", "lines"] = "chars",
|
|
2004
|
+
record_evidence: bool = True,
|
|
2005
|
+
) -> str:
|
|
2006
|
+
"""View a portion of the loaded context.
|
|
2007
|
+
|
|
2008
|
+
Args:
|
|
2009
|
+
start: Starting position (chars are 0-indexed; lines use the session line number base)
|
|
2010
|
+
end: Ending position (chars: exclusive; lines: inclusive, None = to the end)
|
|
2011
|
+
context_id: Context identifier
|
|
2012
|
+
unit: "chars" for character slicing, "lines" for line slicing
|
|
2013
|
+
record_evidence: Store evidence entry for this peek
|
|
2014
|
+
|
|
2015
|
+
Returns:
|
|
2016
|
+
The requested portion of the context
|
|
2017
|
+
"""
|
|
2018
|
+
if context_id not in self._sessions:
|
|
2019
|
+
return f"Error: No context loaded with ID '{context_id}'. Use load_context first."
|
|
2020
|
+
|
|
2021
|
+
session = self._sessions[context_id]
|
|
2022
|
+
repl = session.repl
|
|
2023
|
+
session.iterations += 1
|
|
2024
|
+
|
|
2025
|
+
if unit == "chars":
|
|
2026
|
+
fn = repl.get_variable("peek")
|
|
2027
|
+
if not callable(fn):
|
|
2028
|
+
return "Error: peek() helper is not available"
|
|
2029
|
+
result = fn(start, end)
|
|
2030
|
+
else:
|
|
2031
|
+
fn = repl.get_variable("lines")
|
|
2032
|
+
if not callable(fn):
|
|
2033
|
+
return "Error: lines() helper is not available"
|
|
2034
|
+
base = session.line_number_base
|
|
2035
|
+
if base == 1 and start == 0:
|
|
2036
|
+
start = 1
|
|
2037
|
+
if end == 0 and base == 1:
|
|
2038
|
+
end = 1
|
|
2039
|
+
if start < base:
|
|
2040
|
+
return f"Error: start must be >= {base} for line-based peeks"
|
|
2041
|
+
if end is not None and end < start:
|
|
2042
|
+
return "Error: end must be >= start"
|
|
2043
|
+
start_idx = start - base
|
|
2044
|
+
end_idx = None if end is None else end - base + 1
|
|
2045
|
+
result = fn(start_idx, end_idx)
|
|
2046
|
+
|
|
2047
|
+
# Track evidence for provenance
|
|
2048
|
+
evidence_before = len(session.evidence)
|
|
2049
|
+
if record_evidence and result:
|
|
2050
|
+
if unit == "lines":
|
|
2051
|
+
lines_count = result.count("\n") + 1 if result else 0
|
|
2052
|
+
end_line = start + max(0, lines_count - 1)
|
|
2053
|
+
session.evidence.append(
|
|
2054
|
+
_Evidence(
|
|
2055
|
+
source="peek",
|
|
2056
|
+
line_range=(start, end_line),
|
|
2057
|
+
pattern=None,
|
|
2058
|
+
note=None,
|
|
2059
|
+
snippet=result[:200],
|
|
2060
|
+
)
|
|
2061
|
+
)
|
|
2062
|
+
else:
|
|
2063
|
+
session.evidence.append(
|
|
2064
|
+
_Evidence(
|
|
2065
|
+
source="peek",
|
|
2066
|
+
line_range=None, # Character ranges don't map to lines easily
|
|
2067
|
+
pattern=None,
|
|
2068
|
+
note=None,
|
|
2069
|
+
snippet=result[:200],
|
|
2070
|
+
)
|
|
2071
|
+
)
|
|
2072
|
+
session.information_gain.append(len(session.evidence) - evidence_before)
|
|
2073
|
+
|
|
2074
|
+
return f"```\n{result}\n```"
|
|
2075
|
+
|
|
2076
|
+
@_tool()
|
|
2077
|
+
async def search_context(
|
|
2078
|
+
pattern: str,
|
|
2079
|
+
context_id: str = "default",
|
|
2080
|
+
max_results: int = 10,
|
|
2081
|
+
context_lines: int = 2,
|
|
2082
|
+
record_evidence: bool = True,
|
|
2083
|
+
evidence_mode: Literal["summary", "all"] = "summary",
|
|
2084
|
+
) -> str:
|
|
2085
|
+
"""Search the context using regex patterns.
|
|
2086
|
+
|
|
2087
|
+
Args:
|
|
2088
|
+
pattern: Regular expression pattern to search for
|
|
2089
|
+
context_id: Context identifier
|
|
2090
|
+
max_results: Maximum number of matches to return
|
|
2091
|
+
context_lines: Number of surrounding lines to include
|
|
2092
|
+
record_evidence: Store evidence entries for this search
|
|
2093
|
+
evidence_mode: "summary" records one entry, "all" records every match
|
|
2094
|
+
|
|
2095
|
+
Returns:
|
|
2096
|
+
Matching lines with surrounding context
|
|
2097
|
+
"""
|
|
2098
|
+
if context_id not in self._sessions:
|
|
2099
|
+
return f"Error: No context loaded with ID '{context_id}'. Use load_context first."
|
|
2100
|
+
|
|
2101
|
+
session = self._sessions[context_id]
|
|
2102
|
+
repl = session.repl
|
|
2103
|
+
session.iterations += 1
|
|
2104
|
+
|
|
2105
|
+
fn = repl.get_variable("search")
|
|
2106
|
+
if not callable(fn):
|
|
2107
|
+
return "Error: search() helper is not available"
|
|
2108
|
+
|
|
2109
|
+
try:
|
|
2110
|
+
results = fn(pattern, context_lines=context_lines, max_results=max_results)
|
|
2111
|
+
except re.error as e:
|
|
2112
|
+
return f"Error: Invalid regex pattern `{pattern}`: {e}"
|
|
2113
|
+
|
|
2114
|
+
if not results:
|
|
2115
|
+
return f"No matches found for pattern: `{pattern}`"
|
|
2116
|
+
|
|
2117
|
+
base = session.line_number_base
|
|
2118
|
+
total_lines = session.meta.size_lines
|
|
2119
|
+
max_line = total_lines if base == 1 else max(0, total_lines - 1)
|
|
2120
|
+
|
|
2121
|
+
def _line_range_for(match_line: int) -> tuple[int, int]:
|
|
2122
|
+
if base == 1:
|
|
2123
|
+
start = max(1, match_line - context_lines)
|
|
2124
|
+
end = min(max_line, match_line + context_lines)
|
|
2125
|
+
else:
|
|
2126
|
+
start = max(0, match_line - context_lines)
|
|
2127
|
+
end = min(max_line, match_line + context_lines)
|
|
2128
|
+
return start, end
|
|
2129
|
+
|
|
2130
|
+
# Track evidence for provenance
|
|
2131
|
+
evidence_before = len(session.evidence)
|
|
2132
|
+
out: list[str] = []
|
|
2133
|
+
ranges: list[tuple[int, int]] = []
|
|
2134
|
+
for r in results:
|
|
2135
|
+
try:
|
|
2136
|
+
display_line = r["line_num"]
|
|
2137
|
+
line_range = _line_range_for(display_line)
|
|
2138
|
+
ranges.append(line_range)
|
|
2139
|
+
out.append(f"**Line {display_line}:**\n```\n{r['context']}\n```")
|
|
2140
|
+
except Exception:
|
|
2141
|
+
out.append(str(r))
|
|
2142
|
+
|
|
2143
|
+
if record_evidence:
|
|
2144
|
+
if evidence_mode == "all":
|
|
2145
|
+
for r, line_range in zip(results, ranges):
|
|
2146
|
+
session.evidence.append(
|
|
2147
|
+
_Evidence(
|
|
2148
|
+
source="search",
|
|
2149
|
+
line_range=line_range,
|
|
2150
|
+
pattern=pattern,
|
|
2151
|
+
note=None,
|
|
2152
|
+
snippet=r.get("match", "")[:200],
|
|
2153
|
+
)
|
|
2154
|
+
)
|
|
2155
|
+
else:
|
|
2156
|
+
start = min(r[0] for r in ranges)
|
|
2157
|
+
end = max(r[1] for r in ranges)
|
|
2158
|
+
session.evidence.append(
|
|
2159
|
+
_Evidence(
|
|
2160
|
+
source="search",
|
|
2161
|
+
line_range=(start, end),
|
|
2162
|
+
pattern=pattern,
|
|
2163
|
+
note=f"{len(results)} match(es) (summary)",
|
|
2164
|
+
snippet=results[0].get("match", "")[:200],
|
|
2165
|
+
)
|
|
2166
|
+
)
|
|
2167
|
+
|
|
2168
|
+
# Track information gain
|
|
2169
|
+
session.information_gain.append(len(session.evidence) - evidence_before)
|
|
2170
|
+
|
|
2171
|
+
line_desc = "1-based" if base == 1 else "0-based"
|
|
2172
|
+
return (
|
|
2173
|
+
f"## Search Results for `{pattern}`\n\n"
|
|
2174
|
+
f"Found {len(results)} match(es) (line numbers are {line_desc}):\n\n"
|
|
2175
|
+
+ "\n\n---\n\n".join(out)
|
|
2176
|
+
)
|
|
2177
|
+
|
|
2178
|
+
@_tool()
|
|
2179
|
+
async def semantic_search(
|
|
2180
|
+
query: str,
|
|
2181
|
+
context_id: str = "default",
|
|
2182
|
+
chunk_size: int = 1000,
|
|
2183
|
+
overlap: int = 100,
|
|
2184
|
+
top_k: int = 5,
|
|
2185
|
+
embed_dim: int = 256,
|
|
2186
|
+
record_evidence: bool = True,
|
|
2187
|
+
output: Literal["markdown", "json", "object"] = "markdown",
|
|
2188
|
+
) -> str | dict[str, Any]:
|
|
2189
|
+
"""Semantic search over the context using lightweight embeddings.
|
|
2190
|
+
|
|
2191
|
+
Args:
|
|
2192
|
+
query: Semantic query
|
|
2193
|
+
context_id: Context identifier
|
|
2194
|
+
chunk_size: Characters per chunk (default: 1000)
|
|
2195
|
+
overlap: Overlap between chunks (default: 100)
|
|
2196
|
+
top_k: Number of results to return (default: 5)
|
|
2197
|
+
embed_dim: Embedding dimensions (default: 256)
|
|
2198
|
+
record_evidence: Store evidence entry for this search
|
|
2199
|
+
output: "markdown", "json", or "object"
|
|
2200
|
+
"""
|
|
2201
|
+
if context_id not in self._sessions:
|
|
2202
|
+
return f"Error: No context loaded with ID '{context_id}'. Use load_context first."
|
|
2203
|
+
|
|
2204
|
+
session = self._sessions[context_id]
|
|
2205
|
+
repl = session.repl
|
|
2206
|
+
session.iterations += 1
|
|
2207
|
+
|
|
2208
|
+
fn = repl.get_variable("semantic_search")
|
|
2209
|
+
if not callable(fn):
|
|
2210
|
+
return "Error: semantic_search() helper is not available"
|
|
2211
|
+
|
|
2212
|
+
try:
|
|
2213
|
+
results = fn(
|
|
2214
|
+
query,
|
|
2215
|
+
chunk_size=chunk_size,
|
|
2216
|
+
overlap=overlap,
|
|
2217
|
+
top_k=top_k,
|
|
2218
|
+
embed_dim=embed_dim,
|
|
2219
|
+
)
|
|
2220
|
+
except Exception as e:
|
|
2221
|
+
return f"Error: {e}"
|
|
2222
|
+
|
|
2223
|
+
evidence_before = len(session.evidence)
|
|
2224
|
+
if record_evidence and results:
|
|
2225
|
+
session.evidence.append(
|
|
2226
|
+
_Evidence(
|
|
2227
|
+
source="search",
|
|
2228
|
+
line_range=None,
|
|
2229
|
+
pattern=query,
|
|
2230
|
+
note="semantic_search",
|
|
2231
|
+
snippet=str(results[0].get("preview") or "")[:200],
|
|
2232
|
+
)
|
|
2233
|
+
)
|
|
2234
|
+
session.information_gain.append(len(session.evidence) - evidence_before)
|
|
2235
|
+
|
|
2236
|
+
payload = {
|
|
2237
|
+
"context_id": context_id,
|
|
2238
|
+
"query": query,
|
|
2239
|
+
"count": len(results),
|
|
2240
|
+
"results": results,
|
|
2241
|
+
}
|
|
2242
|
+
|
|
2243
|
+
session.repl._namespace["last_semantic_search"] = payload
|
|
2244
|
+
|
|
2245
|
+
if output == "object":
|
|
2246
|
+
return payload
|
|
2247
|
+
if output == "json":
|
|
2248
|
+
return json.dumps(payload, ensure_ascii=False, indent=2)
|
|
2249
|
+
|
|
2250
|
+
parts = [
|
|
2251
|
+
"## Semantic Search Results",
|
|
2252
|
+
f"Query: `{query}`",
|
|
2253
|
+
f"Matches: {len(results)}",
|
|
2254
|
+
]
|
|
2255
|
+
if results:
|
|
2256
|
+
parts.append("")
|
|
2257
|
+
for r in results:
|
|
2258
|
+
parts.append(
|
|
2259
|
+
f"- Chunk {r['index']} ({r['start_char']}-{r['end_char']}), score {r['score']:.3f}: {r['preview']}"
|
|
2260
|
+
)
|
|
2261
|
+
parts.append("")
|
|
2262
|
+
parts.append("*Use `peek_context(start, end, unit='chars')` for full chunks.*")
|
|
2263
|
+
return "\n".join(parts)
|
|
2264
|
+
|
|
2265
|
+
@_tool()
|
|
2266
|
+
async def exec_python(
|
|
2267
|
+
code: str,
|
|
2268
|
+
context_id: str = "default",
|
|
2269
|
+
) -> str:
|
|
2270
|
+
"""Execute Python code in the sandboxed REPL.
|
|
2271
|
+
|
|
2272
|
+
The loaded context is available as the variable `ctx`.
|
|
2273
|
+
|
|
2274
|
+
Available helpers:
|
|
2275
|
+
- peek(start, end): View characters
|
|
2276
|
+
- lines(start, end): View lines
|
|
2277
|
+
- search(pattern, context_lines=2, max_results=20): Regex search
|
|
2278
|
+
- chunk(chunk_size, overlap=0): Split context into chunks
|
|
2279
|
+
- semantic_search(query, chunk_size=1000, overlap=100, top_k=5): Meaning-based search
|
|
2280
|
+
- embed_text(text, dim=256): Lightweight embedding vector
|
|
2281
|
+
- cite(snippet, line_range=None, note=None): Tag evidence for provenance
|
|
2282
|
+
- allowed_imports(): List allowed imports in the sandbox
|
|
2283
|
+
- is_import_allowed(name): Check if an import is allowed
|
|
2284
|
+
- blocked_names(): List forbidden builtin names
|
|
2285
|
+
|
|
2286
|
+
Available imports: re, json, csv, math, statistics, collections,
|
|
2287
|
+
itertools, functools, datetime, textwrap, difflib
|
|
2288
|
+
|
|
2289
|
+
Args:
|
|
2290
|
+
code: Python code to execute
|
|
2291
|
+
context_id: Context identifier
|
|
2292
|
+
|
|
2293
|
+
Returns:
|
|
2294
|
+
Execution results (stdout, return value, errors)
|
|
2295
|
+
"""
|
|
2296
|
+
if context_id not in self._sessions:
|
|
2297
|
+
return f"Error: No context loaded with ID '{context_id}'. Use load_context first."
|
|
2298
|
+
|
|
2299
|
+
session = self._sessions[context_id]
|
|
2300
|
+
repl = session.repl
|
|
2301
|
+
session.iterations += 1
|
|
2302
|
+
|
|
2303
|
+
# Track evidence count before execution
|
|
2304
|
+
evidence_before = len(session.evidence)
|
|
2305
|
+
|
|
2306
|
+
result = await repl.execute_async(code)
|
|
2307
|
+
|
|
2308
|
+
# Collect citations from REPL and convert to evidence
|
|
2309
|
+
if repl._citations:
|
|
2310
|
+
for citation in repl._citations:
|
|
2311
|
+
session.evidence.append(_Evidence(
|
|
2312
|
+
source="manual",
|
|
2313
|
+
line_range=citation["line_range"],
|
|
2314
|
+
pattern=None,
|
|
2315
|
+
note=citation["note"],
|
|
2316
|
+
snippet=citation["snippet"][:200],
|
|
2317
|
+
))
|
|
2318
|
+
repl._citations.clear() # Clear after collecting
|
|
2319
|
+
|
|
2320
|
+
# Track information gain
|
|
2321
|
+
session.information_gain.append(len(session.evidence) - evidence_before)
|
|
2322
|
+
|
|
2323
|
+
parts: list[str] = []
|
|
2324
|
+
|
|
2325
|
+
if result.stdout:
|
|
2326
|
+
parts.append(f"**Output:**\n```\n{result.stdout}\n```")
|
|
2327
|
+
|
|
2328
|
+
if result.return_value is not None:
|
|
2329
|
+
parts.append(f"**Return Value:** `{result.return_value}`")
|
|
2330
|
+
|
|
2331
|
+
if result.variables_updated:
|
|
2332
|
+
parts.append(f"**Variables Updated:** {', '.join(f'`{v}`' for v in result.variables_updated)}")
|
|
2333
|
+
|
|
2334
|
+
if result.stderr:
|
|
2335
|
+
parts.append(f"**Stderr:**\n```\n{result.stderr}\n```")
|
|
2336
|
+
|
|
2337
|
+
if result.error:
|
|
2338
|
+
parts.append(f"**Error:** {result.error}")
|
|
2339
|
+
|
|
2340
|
+
if result.truncated:
|
|
2341
|
+
parts.append("*Note: Output was truncated*")
|
|
2342
|
+
|
|
2343
|
+
if not parts:
|
|
2344
|
+
parts.append("*(No output)*")
|
|
2345
|
+
|
|
2346
|
+
return "## Execution Result\n\n" + "\n\n".join(parts)
|
|
2347
|
+
|
|
2348
|
+
@_tool()
|
|
2349
|
+
async def get_variable(
|
|
2350
|
+
name: str,
|
|
2351
|
+
context_id: str = "default",
|
|
2352
|
+
) -> str:
|
|
2353
|
+
"""Retrieve a variable from the REPL namespace.
|
|
2354
|
+
|
|
2355
|
+
Args:
|
|
2356
|
+
name: Variable name to retrieve
|
|
2357
|
+
context_id: Context identifier
|
|
2358
|
+
|
|
2359
|
+
Returns:
|
|
2360
|
+
String representation of the variable's value
|
|
2361
|
+
"""
|
|
2362
|
+
if context_id not in self._sessions:
|
|
2363
|
+
return f"Error: No context loaded with ID '{context_id}'. Use load_context first."
|
|
2364
|
+
|
|
2365
|
+
repl = self._sessions[context_id].repl
|
|
2366
|
+
# Check if variable exists in namespace (not just if it's None)
|
|
2367
|
+
if name not in repl._namespace:
|
|
2368
|
+
return f"Variable `{name}` not found in namespace."
|
|
2369
|
+
value = repl._namespace[name]
|
|
2370
|
+
|
|
2371
|
+
# Format nicely for complex types
|
|
2372
|
+
if isinstance(value, (dict, list)):
|
|
2373
|
+
try:
|
|
2374
|
+
formatted = json.dumps(value, indent=2, ensure_ascii=False)
|
|
2375
|
+
return f"**`{name}`:**\n```json\n{formatted}\n```"
|
|
2376
|
+
except Exception:
|
|
2377
|
+
return f"**`{name}`:** `{value}`"
|
|
2378
|
+
|
|
2379
|
+
return f"**`{name}`:** `{value}`"
|
|
2380
|
+
|
|
2381
|
+
@_tool()
|
|
2382
|
+
async def think(
|
|
2383
|
+
question: str,
|
|
2384
|
+
context_slice: str | None = None,
|
|
2385
|
+
context_id: str = "default",
|
|
2386
|
+
) -> str:
|
|
2387
|
+
"""Structure a reasoning sub-step.
|
|
2388
|
+
|
|
2389
|
+
Use this when you need to break down a complex problem into
|
|
2390
|
+
smaller questions. This tool helps you organize your thinking -
|
|
2391
|
+
YOU provide the reasoning, not an external API.
|
|
2392
|
+
|
|
2393
|
+
Args:
|
|
2394
|
+
question: The sub-question to reason about
|
|
2395
|
+
context_slice: Optional relevant context excerpt
|
|
2396
|
+
context_id: Context identifier
|
|
2397
|
+
|
|
2398
|
+
Returns:
|
|
2399
|
+
A structured prompt for you to reason through
|
|
2400
|
+
"""
|
|
2401
|
+
if context_id in self._sessions:
|
|
2402
|
+
self._sessions[context_id].iterations += 1
|
|
2403
|
+
self._sessions[context_id].think_history.append(question)
|
|
2404
|
+
|
|
2405
|
+
parts = [
|
|
2406
|
+
"## Reasoning Step",
|
|
2407
|
+
"",
|
|
2408
|
+
f"**Question:** {question}",
|
|
2409
|
+
]
|
|
2410
|
+
|
|
2411
|
+
if context_slice:
|
|
2412
|
+
parts.extend([
|
|
2413
|
+
"",
|
|
2414
|
+
"**Relevant Context:**",
|
|
2415
|
+
"```",
|
|
2416
|
+
context_slice[:2000], # Limit context slice
|
|
2417
|
+
"```",
|
|
2418
|
+
])
|
|
2419
|
+
|
|
2420
|
+
parts.extend([
|
|
2421
|
+
"",
|
|
2422
|
+
"---",
|
|
2423
|
+
"",
|
|
2424
|
+
"**Your task:** Reason through this step-by-step. Consider:",
|
|
2425
|
+
"1. What information do you have?",
|
|
2426
|
+
"2. What can you infer?",
|
|
2427
|
+
"3. What's the answer to this sub-question?",
|
|
2428
|
+
"",
|
|
2429
|
+
"*After reasoning, use `exec_python` to verify or `finalize` if done.*",
|
|
2430
|
+
])
|
|
2431
|
+
|
|
2432
|
+
return "\n".join(parts)
|
|
2433
|
+
|
|
2434
|
+
@_tool()
|
|
2435
|
+
async def tasks(
|
|
2436
|
+
action: Literal["add", "list", "update", "done", "remove"] = "list",
|
|
2437
|
+
title: str | None = None,
|
|
2438
|
+
task_id: int | None = None,
|
|
2439
|
+
status: Literal["todo", "doing", "done"] | None = None,
|
|
2440
|
+
note: str | None = None,
|
|
2441
|
+
context_id: str = "default",
|
|
2442
|
+
output: Literal["markdown", "json", "object"] = "markdown",
|
|
2443
|
+
) -> str | dict[str, Any]:
|
|
2444
|
+
"""Track tasks tied to a context session."""
|
|
2445
|
+
session = _get_or_create_session(context_id)
|
|
2446
|
+
session.iterations += 1
|
|
2447
|
+
|
|
2448
|
+
valid_statuses = {"todo", "doing", "done"}
|
|
2449
|
+
now = datetime.now().isoformat()
|
|
2450
|
+
|
|
2451
|
+
if action == "add":
|
|
2452
|
+
if not title:
|
|
2453
|
+
return _format_error("title is required for add", output=output)
|
|
2454
|
+
session.task_counter += 1
|
|
2455
|
+
task = {
|
|
2456
|
+
"id": session.task_counter,
|
|
2457
|
+
"title": title,
|
|
2458
|
+
"status": status if status in valid_statuses else "todo",
|
|
2459
|
+
"note": note,
|
|
2460
|
+
"created_at": now,
|
|
2461
|
+
"updated_at": now,
|
|
2462
|
+
}
|
|
2463
|
+
session.tasks.append(task)
|
|
2464
|
+
elif action in {"update", "done"}:
|
|
2465
|
+
if task_id is None:
|
|
2466
|
+
return _format_error("task_id is required for update/done", output=output)
|
|
2467
|
+
task = next((t for t in session.tasks if t.get("id") == task_id), None)
|
|
2468
|
+
if task is None:
|
|
2469
|
+
return _format_error(f"Task {task_id} not found", output=output)
|
|
2470
|
+
if title is not None:
|
|
2471
|
+
task["title"] = title
|
|
2472
|
+
if action == "done":
|
|
2473
|
+
task["status"] = "done"
|
|
2474
|
+
elif status in valid_statuses:
|
|
2475
|
+
task["status"] = status
|
|
2476
|
+
if note is not None:
|
|
2477
|
+
task["note"] = note
|
|
2478
|
+
task["updated_at"] = now
|
|
2479
|
+
elif action == "remove":
|
|
2480
|
+
if task_id is None:
|
|
2481
|
+
return _format_error("task_id is required for remove", output=output)
|
|
2482
|
+
before = len(session.tasks)
|
|
2483
|
+
session.tasks = [t for t in session.tasks if t.get("id") != task_id]
|
|
2484
|
+
if len(session.tasks) == before:
|
|
2485
|
+
return _format_error(f"Task {task_id} not found", output=output)
|
|
2486
|
+
|
|
2487
|
+
counts = {
|
|
2488
|
+
"todo": sum(1 for t in session.tasks if t.get("status") == "todo"),
|
|
2489
|
+
"doing": sum(1 for t in session.tasks if t.get("status") == "doing"),
|
|
2490
|
+
"done": sum(1 for t in session.tasks if t.get("status") == "done"),
|
|
2491
|
+
}
|
|
2492
|
+
payload = {
|
|
2493
|
+
"context_id": context_id,
|
|
2494
|
+
"total": len(session.tasks),
|
|
2495
|
+
"counts": counts,
|
|
2496
|
+
"items": sorted(session.tasks, key=lambda t: int(t.get("id", 0))),
|
|
2497
|
+
}
|
|
2498
|
+
|
|
2499
|
+
if output == "object":
|
|
2500
|
+
return payload
|
|
2501
|
+
if output == "json":
|
|
2502
|
+
return json.dumps(payload, ensure_ascii=False, indent=2)
|
|
2503
|
+
|
|
2504
|
+
parts = [
|
|
2505
|
+
"## Tasks",
|
|
2506
|
+
f"Total: {payload['total']} (todo: {counts['todo']}, doing: {counts['doing']}, done: {counts['done']})",
|
|
2507
|
+
]
|
|
2508
|
+
if payload["items"]:
|
|
2509
|
+
parts.append("")
|
|
2510
|
+
for task in payload["items"]:
|
|
2511
|
+
note_text = f" — {task['note']}" if task.get("note") else ""
|
|
2512
|
+
parts.append(f"- [{task.get('status', 'todo')}] #{task.get('id')}: {task.get('title')}{note_text}")
|
|
2513
|
+
return "\n".join(parts)
|
|
2514
|
+
|
|
2515
|
+
@_tool()
|
|
2516
|
+
async def get_status(
|
|
2517
|
+
context_id: str = "default",
|
|
2518
|
+
) -> str:
|
|
2519
|
+
"""Get current session status.
|
|
2520
|
+
|
|
2521
|
+
Shows loaded context info, iteration count, variables, and history.
|
|
2522
|
+
|
|
2523
|
+
Args:
|
|
2524
|
+
context_id: Context identifier
|
|
2525
|
+
|
|
2526
|
+
Returns:
|
|
2527
|
+
Formatted status report
|
|
2528
|
+
"""
|
|
2529
|
+
if context_id not in self._sessions:
|
|
2530
|
+
return f"No context loaded with ID '{context_id}'. Use load_context to start."
|
|
2531
|
+
|
|
2532
|
+
session = self._sessions[context_id]
|
|
2533
|
+
meta = session.meta
|
|
2534
|
+
repl = session.repl
|
|
2535
|
+
|
|
2536
|
+
# Get all user-defined variables (excluding builtins and helpers)
|
|
2537
|
+
excluded = {
|
|
2538
|
+
"ctx",
|
|
2539
|
+
"peek",
|
|
2540
|
+
"lines",
|
|
2541
|
+
"search",
|
|
2542
|
+
"chunk",
|
|
2543
|
+
"cite",
|
|
2544
|
+
"line_number_base",
|
|
2545
|
+
"allowed_imports",
|
|
2546
|
+
"is_import_allowed",
|
|
2547
|
+
"blocked_names",
|
|
2548
|
+
"__builtins__",
|
|
2549
|
+
}
|
|
2550
|
+
variables = {
|
|
2551
|
+
k: type(v).__name__
|
|
2552
|
+
for k, v in repl._namespace.items()
|
|
2553
|
+
if k not in excluded and not k.startswith("_")
|
|
2554
|
+
}
|
|
2555
|
+
|
|
2556
|
+
parts = [
|
|
2557
|
+
"## Context Status",
|
|
2558
|
+
"",
|
|
2559
|
+
f"**Context ID:** `{context_id}`",
|
|
2560
|
+
f"**Created:** {session.created_at.strftime('%Y-%m-%d %H:%M:%S')}",
|
|
2561
|
+
f"**Iterations:** {session.iterations}",
|
|
2562
|
+
"",
|
|
2563
|
+
"### Context Info",
|
|
2564
|
+
f"- Format: {meta.format.value}",
|
|
2565
|
+
f"- Size: {meta.size_chars:,} characters",
|
|
2566
|
+
f"- Lines: {meta.size_lines:,}",
|
|
2567
|
+
f"- Est. tokens: ~{meta.size_tokens_estimate:,}",
|
|
2568
|
+
f"- Line numbers: {'1-based' if session.line_number_base == 1 else '0-based'}",
|
|
2569
|
+
]
|
|
2570
|
+
|
|
2571
|
+
if variables:
|
|
2572
|
+
parts.extend([
|
|
2573
|
+
"",
|
|
2574
|
+
"### User Variables",
|
|
2575
|
+
])
|
|
2576
|
+
for name, vtype in variables.items():
|
|
2577
|
+
parts.append(f"- `{name}`: {vtype}")
|
|
2578
|
+
|
|
2579
|
+
if session.think_history:
|
|
2580
|
+
parts.extend([
|
|
2581
|
+
"",
|
|
2582
|
+
"### Reasoning History",
|
|
2583
|
+
])
|
|
2584
|
+
for i, q in enumerate(session.think_history[-5:], 1):
|
|
2585
|
+
parts.append(f"{i}. {q[:100]}{'...' if len(q) > 100 else ''}")
|
|
2586
|
+
|
|
2587
|
+
if session.tasks:
|
|
2588
|
+
counts = {
|
|
2589
|
+
"todo": sum(1 for t in session.tasks if t.get("status") == "todo"),
|
|
2590
|
+
"doing": sum(1 for t in session.tasks if t.get("status") == "doing"),
|
|
2591
|
+
"done": sum(1 for t in session.tasks if t.get("status") == "done"),
|
|
2592
|
+
}
|
|
2593
|
+
parts.extend([
|
|
2594
|
+
"",
|
|
2595
|
+
"### Tasks",
|
|
2596
|
+
f"- Total: {len(session.tasks)} (todo: {counts['todo']}, doing: {counts['doing']}, done: {counts['done']})",
|
|
2597
|
+
])
|
|
2598
|
+
open_tasks = [t for t in session.tasks if t.get("status") in {"todo", "doing"}][:5]
|
|
2599
|
+
for t in open_tasks:
|
|
2600
|
+
parts.append(f"- #{t.get('id')}: {t.get('title')} ({t.get('status')})")
|
|
2601
|
+
|
|
2602
|
+
# Convergence metrics
|
|
2603
|
+
parts.extend([
|
|
2604
|
+
"",
|
|
2605
|
+
"### Convergence Metrics",
|
|
2606
|
+
f"- Evidence collected: {len(session.evidence)}",
|
|
2607
|
+
])
|
|
2608
|
+
|
|
2609
|
+
if session.confidence_history:
|
|
2610
|
+
latest_conf = session.confidence_history[-1]
|
|
2611
|
+
parts.append(f"- Latest confidence: {latest_conf:.1%}")
|
|
2612
|
+
if len(session.confidence_history) >= 2:
|
|
2613
|
+
trend = session.confidence_history[-1] - session.confidence_history[-2]
|
|
2614
|
+
trend_str = "↑" if trend > 0 else "↓" if trend < 0 else "→"
|
|
2615
|
+
parts.append(f"- Confidence trend: {trend_str} ({trend:+.1%})")
|
|
2616
|
+
parts.append(f"- Confidence history: {[f'{c:.0%}' for c in session.confidence_history[-5:]]}")
|
|
2617
|
+
|
|
2618
|
+
if session.information_gain:
|
|
2619
|
+
total_gain = sum(session.information_gain)
|
|
2620
|
+
recent_gain = sum(session.information_gain[-3:]) if len(session.information_gain) >= 3 else total_gain
|
|
2621
|
+
parts.append(f"- Total information gain: {total_gain} evidence pieces")
|
|
2622
|
+
parts.append(f"- Recent gain (last 3): {recent_gain}")
|
|
2623
|
+
|
|
2624
|
+
if session.chunks:
|
|
2625
|
+
parts.append(f"- Chunks mapped: {len(session.chunks)}")
|
|
2626
|
+
|
|
2627
|
+
if session.evidence:
|
|
2628
|
+
parts.extend([
|
|
2629
|
+
"",
|
|
2630
|
+
"*Use `get_evidence()` to view citations.*",
|
|
2631
|
+
])
|
|
2632
|
+
|
|
2633
|
+
return "\n".join(parts)
|
|
2634
|
+
|
|
2635
|
+
@_tool()
|
|
2636
|
+
async def get_evidence(
|
|
2637
|
+
context_id: str = "default",
|
|
2638
|
+
limit: int = 20,
|
|
2639
|
+
offset: int = 0,
|
|
2640
|
+
source: Literal["any", "search", "peek", "exec", "manual", "action"] = "any",
|
|
2641
|
+
output: Literal["markdown", "json", "object"] = "markdown",
|
|
2642
|
+
) -> str | dict[str, Any]:
|
|
2643
|
+
"""Retrieve collected evidence/citations for a session.
|
|
2644
|
+
|
|
2645
|
+
Args:
|
|
2646
|
+
context_id: Context identifier
|
|
2647
|
+
limit: Max number of evidence items to return (default: 20)
|
|
2648
|
+
offset: Starting index (default: 0)
|
|
2649
|
+
source: Optional source filter (default: "any")
|
|
2650
|
+
output: "markdown" or "json" (default: "markdown")
|
|
2651
|
+
|
|
2652
|
+
Returns:
|
|
2653
|
+
Evidence list, formatted for inspection or programmatic parsing.
|
|
2654
|
+
"""
|
|
2655
|
+
if context_id not in self._sessions:
|
|
2656
|
+
return f"Error: No context loaded with ID '{context_id}'. Use load_context first."
|
|
2657
|
+
|
|
2658
|
+
session = self._sessions[context_id]
|
|
2659
|
+
evidence = session.evidence
|
|
2660
|
+
if source != "any":
|
|
2661
|
+
evidence = [e for e in evidence if e.source == source]
|
|
2662
|
+
|
|
2663
|
+
total = len(evidence)
|
|
2664
|
+
offset = max(0, offset)
|
|
2665
|
+
limit = 20 if limit <= 0 else limit
|
|
2666
|
+
|
|
2667
|
+
page = evidence[offset : offset + limit]
|
|
2668
|
+
|
|
2669
|
+
if output in {"json", "object"}:
|
|
2670
|
+
payload_items = [
|
|
2671
|
+
{
|
|
2672
|
+
"index": offset + i,
|
|
2673
|
+
"source": ev.source,
|
|
2674
|
+
"line_range": ev.line_range,
|
|
2675
|
+
"pattern": ev.pattern,
|
|
2676
|
+
"note": ev.note,
|
|
2677
|
+
"snippet": ev.snippet,
|
|
2678
|
+
"timestamp": ev.timestamp.isoformat(),
|
|
2679
|
+
}
|
|
2680
|
+
for i, ev in enumerate(page, 1)
|
|
2681
|
+
]
|
|
2682
|
+
payload = {
|
|
2683
|
+
"context_id": context_id,
|
|
2684
|
+
"total": total,
|
|
2685
|
+
"line_number_base": session.line_number_base,
|
|
2686
|
+
"items": payload_items,
|
|
2687
|
+
}
|
|
2688
|
+
if output == "object":
|
|
2689
|
+
return payload
|
|
2690
|
+
return json.dumps(payload, ensure_ascii=False, indent=2)
|
|
2691
|
+
|
|
2692
|
+
parts = [
|
|
2693
|
+
"## Evidence",
|
|
2694
|
+
"",
|
|
2695
|
+
f"**Context ID:** `{context_id}`",
|
|
2696
|
+
f"**Total items:** {total}",
|
|
2697
|
+
f"**Showing:** {len(page)} (offset={offset}, limit={limit})",
|
|
2698
|
+
f"**Line numbers:** {'1-based' if session.line_number_base == 1 else '0-based'}",
|
|
2699
|
+
]
|
|
2700
|
+
if source != "any":
|
|
2701
|
+
parts.append(f"**Source filter:** `{source}`")
|
|
2702
|
+
parts.append("")
|
|
2703
|
+
|
|
2704
|
+
if not page:
|
|
2705
|
+
parts.append("*(No evidence collected yet)*")
|
|
2706
|
+
return "\n".join(parts)
|
|
2707
|
+
|
|
2708
|
+
for i, ev in enumerate(page, offset + 1):
|
|
2709
|
+
source_info = f"[{ev.source}]"
|
|
2710
|
+
if ev.line_range:
|
|
2711
|
+
source_info += f" lines {ev.line_range[0]}-{ev.line_range[1]}"
|
|
2712
|
+
if ev.pattern:
|
|
2713
|
+
source_info += f" pattern: `{ev.pattern}`"
|
|
2714
|
+
if ev.note:
|
|
2715
|
+
source_info += f" note: {ev.note}"
|
|
2716
|
+
snippet = ev.snippet.strip()
|
|
2717
|
+
parts.append(f"{i}. {source_info}: \"{snippet}\"")
|
|
2718
|
+
|
|
2719
|
+
return "\n".join(parts)
|
|
2720
|
+
|
|
2721
|
+
@_tool()
|
|
2722
|
+
async def finalize(
|
|
2723
|
+
answer: str,
|
|
2724
|
+
confidence: Literal["high", "medium", "low"] = "medium",
|
|
2725
|
+
reasoning_summary: str | None = None,
|
|
2726
|
+
context_id: str = "default",
|
|
2727
|
+
) -> str:
|
|
2728
|
+
"""Mark the task complete with your final answer.
|
|
2729
|
+
|
|
2730
|
+
Use this when you have arrived at your final answer after
|
|
2731
|
+
exploring the context and reasoning through the problem.
|
|
2732
|
+
|
|
2733
|
+
Args:
|
|
2734
|
+
answer: Your final answer
|
|
2735
|
+
confidence: How confident you are (high/medium/low)
|
|
2736
|
+
reasoning_summary: Optional brief summary of your reasoning
|
|
2737
|
+
context_id: Context identifier
|
|
2738
|
+
|
|
2739
|
+
Returns:
|
|
2740
|
+
Formatted final answer
|
|
2741
|
+
"""
|
|
2742
|
+
parts = [
|
|
2743
|
+
"## Final Answer",
|
|
2744
|
+
"",
|
|
2745
|
+
answer,
|
|
2746
|
+
]
|
|
2747
|
+
|
|
2748
|
+
if reasoning_summary:
|
|
2749
|
+
parts.extend([
|
|
2750
|
+
"",
|
|
2751
|
+
"---",
|
|
2752
|
+
"",
|
|
2753
|
+
f"**Reasoning:** {reasoning_summary}",
|
|
2754
|
+
])
|
|
2755
|
+
|
|
2756
|
+
if context_id in self._sessions:
|
|
2757
|
+
session = self._sessions[context_id]
|
|
2758
|
+
parts.extend([
|
|
2759
|
+
"",
|
|
2760
|
+
f"*Completed after {session.iterations} iterations.*",
|
|
2761
|
+
])
|
|
2762
|
+
|
|
2763
|
+
parts.append(f"\n**Confidence:** {confidence}")
|
|
2764
|
+
|
|
2765
|
+
# Add evidence citations if available
|
|
2766
|
+
if context_id in self._sessions:
|
|
2767
|
+
session = self._sessions[context_id]
|
|
2768
|
+
if session.evidence:
|
|
2769
|
+
parts.extend([
|
|
2770
|
+
"",
|
|
2771
|
+
"---",
|
|
2772
|
+
"",
|
|
2773
|
+
"### Evidence Citations",
|
|
2774
|
+
f"*Line numbers are {'1-based' if session.line_number_base == 1 else '0-based'}.*",
|
|
2775
|
+
])
|
|
2776
|
+
for i, ev in enumerate(session.evidence[-10:], 1): # Last 10 pieces of evidence
|
|
2777
|
+
source_info = f"[{ev.source}]"
|
|
2778
|
+
if ev.line_range:
|
|
2779
|
+
source_info += f" lines {ev.line_range[0]}-{ev.line_range[1]}"
|
|
2780
|
+
if ev.pattern:
|
|
2781
|
+
source_info += f" pattern: `{ev.pattern}`"
|
|
2782
|
+
if ev.note:
|
|
2783
|
+
source_info += f" note: {ev.note}"
|
|
2784
|
+
parts.append(f"{i}. {source_info}: \"{ev.snippet[:80]}...\"" if len(ev.snippet) > 80 else f"{i}. {source_info}: \"{ev.snippet}\"")
|
|
2785
|
+
|
|
2786
|
+
_auto_save_memory_pack()
|
|
2787
|
+
return "\n".join(parts)
|
|
2788
|
+
|
|
2789
|
+
# =====================================================================
|
|
2790
|
+
# Sub-query tool (RLM-style recursive reasoning)
|
|
2791
|
+
# =====================================================================
|
|
2792
|
+
|
|
2793
|
+
@_tool()
|
|
2794
|
+
async def sub_query(
|
|
2795
|
+
prompt: str,
|
|
2796
|
+
context_slice: str | None = None,
|
|
2797
|
+
context_id: str = "default",
|
|
2798
|
+
backend: str = "auto",
|
|
2799
|
+
) -> str:
|
|
2800
|
+
"""Run a sub-query using a spawned sub-agent (RLM-style recursive reasoning).
|
|
2801
|
+
|
|
2802
|
+
This enables you to break large problems into chunks and query a sub-agent
|
|
2803
|
+
for each chunk, then aggregate results. The sub-agent runs independently
|
|
2804
|
+
and returns its response.
|
|
2805
|
+
|
|
2806
|
+
Backend priority (when backend="auto"):
|
|
2807
|
+
1. API - if ALEPH_SUB_QUERY_API_KEY or OPENAI_API_KEY is set (most reliable)
|
|
2808
|
+
2. codex CLI - if installed
|
|
2809
|
+
3. gemini CLI - if installed
|
|
2810
|
+
4. claude CLI - if installed (deprioritized: hangs in MCP/sandbox contexts)
|
|
2811
|
+
|
|
2812
|
+
Configure via environment:
|
|
2813
|
+
- ALEPH_SUB_QUERY_BACKEND: Force specific backend ("api", "claude", "codex", "gemini")
|
|
2814
|
+
- ALEPH_SUB_QUERY_API_KEY or OPENAI_API_KEY: API credentials
|
|
2815
|
+
- ALEPH_SUB_QUERY_URL or OPENAI_BASE_URL: Custom endpoint for OpenAI-compatible APIs
|
|
2816
|
+
- ALEPH_SUB_QUERY_MODEL: Model name (required)
|
|
2817
|
+
- ALEPH_SUB_QUERY_SHARE_SESSION: "true"/"false" to share the live MCP session with CLI sub-agents
|
|
2818
|
+
- ALEPH_SUB_QUERY_HTTP_HOST: Host for the streamable HTTP server (default: 127.0.0.1)
|
|
2819
|
+
- ALEPH_SUB_QUERY_HTTP_PORT: Port for the streamable HTTP server (default: 8765)
|
|
2820
|
+
- ALEPH_SUB_QUERY_HTTP_PATH: Path for the streamable HTTP server (default: /mcp)
|
|
2821
|
+
- ALEPH_SUB_QUERY_MCP_SERVER_NAME: MCP server name exposed to sub-agents (default: aleph_shared)
|
|
2822
|
+
|
|
2823
|
+
Args:
|
|
2824
|
+
prompt: The question/task for the sub-agent
|
|
2825
|
+
context_slice: Optional context to include (e.g., a chunk from ctx).
|
|
2826
|
+
If not provided, automatically uses the context from context_id session.
|
|
2827
|
+
context_id: Session to use. If context_slice is not provided, the session's
|
|
2828
|
+
loaded context is automatically passed to the sub-agent.
|
|
2829
|
+
backend: "auto", "claude", "codex", "gemini", or "api"
|
|
2830
|
+
|
|
2831
|
+
Returns:
|
|
2832
|
+
The sub-agent's response
|
|
2833
|
+
|
|
2834
|
+
Example usage in exec_python:
|
|
2835
|
+
chunks = chunk(100000) # 100k char chunks
|
|
2836
|
+
summaries = []
|
|
2837
|
+
for c in chunks:
|
|
2838
|
+
result = sub_query("Summarize this section:", context_slice=c)
|
|
2839
|
+
summaries.append(result)
|
|
2840
|
+
final = sub_query(f"Combine these summaries: {summaries}")
|
|
2841
|
+
"""
|
|
2842
|
+
session = self._sessions.get(context_id)
|
|
2843
|
+
if session:
|
|
2844
|
+
session.iterations += 1
|
|
2845
|
+
|
|
2846
|
+
# Auto-inject context from session if context_slice not provided
|
|
2847
|
+
# This matches the RLM pattern: if you specify a context_id, the sub-agent
|
|
2848
|
+
# should have access to that context without needing to pass it explicitly.
|
|
2849
|
+
if not context_slice and session:
|
|
2850
|
+
ctx_val = session.repl.get_variable("ctx")
|
|
2851
|
+
if ctx_val is not None:
|
|
2852
|
+
context_slice = _coerce_context_to_text(ctx_val)
|
|
2853
|
+
|
|
2854
|
+
# Truncate context if needed
|
|
2855
|
+
truncated = False
|
|
2856
|
+
if context_slice and len(context_slice) > self.sub_query_config.max_context_chars:
|
|
2857
|
+
context_slice = context_slice[:self.sub_query_config.max_context_chars]
|
|
2858
|
+
truncated = True
|
|
2859
|
+
|
|
2860
|
+
# Resolve backend
|
|
2861
|
+
resolved_backend = backend
|
|
2862
|
+
if backend == "auto":
|
|
2863
|
+
resolved_backend = detect_backend(self.sub_query_config)
|
|
2864
|
+
|
|
2865
|
+
allowed_backends = {"auto", "api", *CLI_BACKENDS}
|
|
2866
|
+
if resolved_backend not in allowed_backends:
|
|
2867
|
+
return f"Error: Unsupported backend '{resolved_backend}'."
|
|
2868
|
+
|
|
2869
|
+
try:
|
|
2870
|
+
# Try CLI first, fall back to API
|
|
2871
|
+
if resolved_backend in CLI_BACKENDS:
|
|
2872
|
+
mcp_server_url = None
|
|
2873
|
+
share_session = _get_env_bool("ALEPH_SUB_QUERY_SHARE_SESSION", False)
|
|
2874
|
+
if share_session and resolved_backend in {"claude", "codex", "gemini"}:
|
|
2875
|
+
host = os.environ.get("ALEPH_SUB_QUERY_HTTP_HOST", "127.0.0.1")
|
|
2876
|
+
port = _get_env_int("ALEPH_SUB_QUERY_HTTP_PORT", 8765)
|
|
2877
|
+
path = os.environ.get("ALEPH_SUB_QUERY_HTTP_PATH", "/mcp")
|
|
2878
|
+
server_name = os.environ.get(
|
|
2879
|
+
"ALEPH_SUB_QUERY_MCP_SERVER_NAME",
|
|
2880
|
+
"aleph_shared",
|
|
2881
|
+
).strip() or "aleph_shared"
|
|
2882
|
+
ok, url_or_err = await self._ensure_streamable_http_server(host, port, path)
|
|
2883
|
+
if not ok:
|
|
2884
|
+
return (
|
|
2885
|
+
"## Sub-Query Error\n\n"
|
|
2886
|
+
f"**Backend:** `{resolved_backend}`\n\n"
|
|
2887
|
+
f"Failed to start streamable HTTP server: {url_or_err}"
|
|
2888
|
+
)
|
|
2889
|
+
mcp_server_url = url_or_err
|
|
2890
|
+
prompt = (
|
|
2891
|
+
f"{prompt}\n\n"
|
|
2892
|
+
f"[MCP tools are available via the live Aleph server. "
|
|
2893
|
+
f"Use context_id={context_id!r} when calling tools. "
|
|
2894
|
+
f"Tools are prefixed with `mcp__{server_name}__`.]"
|
|
2895
|
+
)
|
|
2896
|
+
success, output = await run_cli_sub_query(
|
|
2897
|
+
prompt=prompt,
|
|
2898
|
+
context_slice=context_slice,
|
|
2899
|
+
backend=resolved_backend, # type: ignore
|
|
2900
|
+
timeout=self.sub_query_config.cli_timeout_seconds,
|
|
2901
|
+
cwd=self.action_config.workspace_root if self.action_config.enabled else None,
|
|
2902
|
+
max_output_chars=self.sub_query_config.cli_max_output_chars,
|
|
2903
|
+
mcp_server_url=mcp_server_url,
|
|
2904
|
+
mcp_server_name=server_name if mcp_server_url else "aleph_shared",
|
|
2905
|
+
trust_mcp_server=True,
|
|
2906
|
+
)
|
|
2907
|
+
else:
|
|
2908
|
+
success, output = await run_api_sub_query(
|
|
2909
|
+
prompt=prompt,
|
|
2910
|
+
context_slice=context_slice,
|
|
2911
|
+
model=self.sub_query_config.api_model,
|
|
2912
|
+
api_key_env=self.sub_query_config.api_key_env,
|
|
2913
|
+
api_base_url_env=self.sub_query_config.api_base_url_env,
|
|
2914
|
+
api_model_env=self.sub_query_config.api_model_env,
|
|
2915
|
+
timeout=self.sub_query_config.api_timeout_seconds,
|
|
2916
|
+
system_prompt=self.sub_query_config.system_prompt if self.sub_query_config.include_system_prompt else None,
|
|
2917
|
+
)
|
|
2918
|
+
except Exception as e:
|
|
2919
|
+
success = False
|
|
2920
|
+
output = f"{type(e).__name__}: {e}"
|
|
2921
|
+
|
|
2922
|
+
# Record evidence
|
|
2923
|
+
if session:
|
|
2924
|
+
session.evidence.append(_Evidence(
|
|
2925
|
+
source="sub_query",
|
|
2926
|
+
line_range=None,
|
|
2927
|
+
pattern=None,
|
|
2928
|
+
snippet=output[:200] if success else f"[ERROR] {output[:150]}",
|
|
2929
|
+
note=f"backend={resolved_backend}" + (" [truncated context]" if truncated else ""),
|
|
2930
|
+
))
|
|
2931
|
+
session.information_gain.append(1 if success else 0)
|
|
2932
|
+
|
|
2933
|
+
if not success:
|
|
2934
|
+
return f"## Sub-Query Error\n\n**Backend:** `{resolved_backend}`\n\n{output}"
|
|
2935
|
+
|
|
2936
|
+
parts = [
|
|
2937
|
+
"## Sub-Query Result",
|
|
2938
|
+
"",
|
|
2939
|
+
f"**Backend:** `{resolved_backend}`",
|
|
2940
|
+
]
|
|
2941
|
+
if truncated:
|
|
2942
|
+
parts.append(f"*Note: Context was truncated to {self.sub_query_config.max_context_chars:,} chars*")
|
|
2943
|
+
parts.extend(["", "---", "", output])
|
|
2944
|
+
|
|
2945
|
+
return "\n".join(parts)
|
|
2946
|
+
|
|
2947
|
+
# =====================================================================
|
|
2948
|
+
# Remote MCP orchestration (v0.5 last mile)
|
|
2949
|
+
# =====================================================================
|
|
2950
|
+
|
|
2951
|
+
@_tool()
|
|
2952
|
+
async def add_remote_server(
|
|
2953
|
+
server_id: str,
|
|
2954
|
+
command: str,
|
|
2955
|
+
args: list[str] | None = None,
|
|
2956
|
+
cwd: str | None = None,
|
|
2957
|
+
env: dict[str, str] | None = None,
|
|
2958
|
+
allow_tools: list[str] | None = None,
|
|
2959
|
+
deny_tools: list[str] | None = None,
|
|
2960
|
+
connect: bool = True,
|
|
2961
|
+
confirm: bool = False,
|
|
2962
|
+
output: Literal["json", "markdown", "object"] = "markdown",
|
|
2963
|
+
) -> str | dict[str, Any]:
|
|
2964
|
+
"""Register a remote MCP server (stdio transport) for orchestration.
|
|
2965
|
+
|
|
2966
|
+
This spawns a subprocess and speaks MCP over stdin/stdout.
|
|
2967
|
+
|
|
2968
|
+
Args:
|
|
2969
|
+
server_id: Local identifier for the remote server
|
|
2970
|
+
command: Executable to run (e.g. 'python3')
|
|
2971
|
+
args: Command arguments (e.g. ['-m','some.mcp.server'])
|
|
2972
|
+
cwd: Working directory for the subprocess
|
|
2973
|
+
env: Extra environment variables for the subprocess
|
|
2974
|
+
allow_tools: Optional allowlist of tool names
|
|
2975
|
+
deny_tools: Optional denylist of tool names
|
|
2976
|
+
connect: If true, connect immediately and cache tool list
|
|
2977
|
+
confirm: Required if actions are enabled
|
|
2978
|
+
output: Output format
|
|
2979
|
+
"""
|
|
2980
|
+
err = _require_actions(confirm)
|
|
2981
|
+
if err:
|
|
2982
|
+
return _format_error(err, output=output)
|
|
2983
|
+
|
|
2984
|
+
if server_id in self._remote_servers:
|
|
2985
|
+
return _format_error(f"Remote server '{server_id}' already exists.", output=output)
|
|
2986
|
+
|
|
2987
|
+
handle = _RemoteServerHandle(
|
|
2988
|
+
command=command,
|
|
2989
|
+
args=args or [],
|
|
2990
|
+
cwd=Path(cwd) if cwd else None,
|
|
2991
|
+
env=env,
|
|
2992
|
+
allow_tools=allow_tools,
|
|
2993
|
+
deny_tools=deny_tools,
|
|
2994
|
+
)
|
|
2995
|
+
self._remote_servers[server_id] = handle
|
|
2996
|
+
|
|
2997
|
+
tools: list[dict[str, Any]] | None = None
|
|
2998
|
+
if connect:
|
|
2999
|
+
ok, res = await self._ensure_remote_server(server_id)
|
|
3000
|
+
if not ok:
|
|
3001
|
+
return _format_error(str(res), output=output)
|
|
3002
|
+
handle = res # type: ignore[assignment]
|
|
3003
|
+
try:
|
|
3004
|
+
r = await handle.session.list_tools() # type: ignore[union-attr]
|
|
3005
|
+
tools = _to_jsonable(r)
|
|
3006
|
+
except Exception:
|
|
3007
|
+
tools = None
|
|
3008
|
+
|
|
3009
|
+
payload: dict[str, Any] = {
|
|
3010
|
+
"server_id": server_id,
|
|
3011
|
+
"command": command,
|
|
3012
|
+
"args": args or [],
|
|
3013
|
+
"cwd": str(handle.cwd) if handle.cwd else None,
|
|
3014
|
+
"allow_tools": allow_tools,
|
|
3015
|
+
"deny_tools": deny_tools,
|
|
3016
|
+
"connected": handle.session is not None,
|
|
3017
|
+
"tools": tools,
|
|
3018
|
+
}
|
|
3019
|
+
return _format_payload(payload, output=output)
|
|
3020
|
+
|
|
3021
|
+
@_tool()
|
|
3022
|
+
async def list_remote_servers(
|
|
3023
|
+
output: Literal["json", "markdown", "object"] = "json",
|
|
3024
|
+
) -> str | dict[str, Any]:
|
|
3025
|
+
"""List all registered remote MCP servers."""
|
|
3026
|
+
items = []
|
|
3027
|
+
for sid, h in self._remote_servers.items():
|
|
3028
|
+
items.append(
|
|
3029
|
+
{
|
|
3030
|
+
"server_id": sid,
|
|
3031
|
+
"command": h.command,
|
|
3032
|
+
"args": h.args,
|
|
3033
|
+
"cwd": str(h.cwd) if h.cwd else None,
|
|
3034
|
+
"connected": h.session is not None,
|
|
3035
|
+
"connected_at": h.connected_at.isoformat() if h.connected_at else None,
|
|
3036
|
+
"allow_tools": h.allow_tools,
|
|
3037
|
+
"deny_tools": h.deny_tools,
|
|
3038
|
+
}
|
|
3039
|
+
)
|
|
3040
|
+
return _format_payload({"count": len(items), "items": items}, output=output)
|
|
3041
|
+
|
|
3042
|
+
@_tool()
|
|
3043
|
+
async def list_remote_tools(
|
|
3044
|
+
server_id: str,
|
|
3045
|
+
confirm: bool = False,
|
|
3046
|
+
output: Literal["json", "markdown", "object"] = "json",
|
|
3047
|
+
) -> str | dict[str, Any]:
|
|
3048
|
+
"""List tools available on a remote MCP server."""
|
|
3049
|
+
err = _require_actions(confirm)
|
|
3050
|
+
if err:
|
|
3051
|
+
return _format_error(err, output=output)
|
|
3052
|
+
|
|
3053
|
+
ok, res = await self._ensure_remote_server(server_id)
|
|
3054
|
+
if not ok:
|
|
3055
|
+
return _format_error(str(res), output=output)
|
|
3056
|
+
ok2, tools = await self._remote_list_tools(server_id)
|
|
3057
|
+
if not ok2:
|
|
3058
|
+
return _format_error(str(tools), output=output)
|
|
3059
|
+
return _format_payload(tools, output=output)
|
|
3060
|
+
|
|
3061
|
+
@_tool()
|
|
3062
|
+
async def call_remote_tool(
|
|
3063
|
+
server_id: str,
|
|
3064
|
+
tool: str,
|
|
3065
|
+
arguments: dict[str, Any] | None = None,
|
|
3066
|
+
timeout_seconds: float | None = DEFAULT_REMOTE_TOOL_TIMEOUT_SECONDS,
|
|
3067
|
+
confirm: bool = False,
|
|
3068
|
+
output: Literal["json", "markdown", "object"] = "markdown",
|
|
3069
|
+
) -> str | dict[str, Any]:
|
|
3070
|
+
"""Call a tool on a remote MCP server.
|
|
3071
|
+
|
|
3072
|
+
Args:
|
|
3073
|
+
server_id: Registered remote server ID
|
|
3074
|
+
tool: Tool name
|
|
3075
|
+
arguments: Tool arguments object
|
|
3076
|
+
timeout_seconds: Tool call timeout (best-effort). Defaults to ALEPH_REMOTE_TOOL_TIMEOUT or 120s.
|
|
3077
|
+
confirm: Required if actions are enabled
|
|
3078
|
+
output: Output format
|
|
3079
|
+
"""
|
|
3080
|
+
err = _require_actions(confirm)
|
|
3081
|
+
if err:
|
|
3082
|
+
return _format_error(err, output=output)
|
|
3083
|
+
|
|
3084
|
+
ok, res = await self._ensure_remote_server(server_id)
|
|
3085
|
+
if not ok:
|
|
3086
|
+
return _format_error(str(res), output=output)
|
|
3087
|
+
ok2, result_jsonable = await self._remote_call_tool(
|
|
3088
|
+
server_id=server_id,
|
|
3089
|
+
tool=tool,
|
|
3090
|
+
arguments=arguments,
|
|
3091
|
+
timeout_seconds=timeout_seconds,
|
|
3092
|
+
)
|
|
3093
|
+
if not ok2:
|
|
3094
|
+
return _format_error(str(result_jsonable), output=output)
|
|
3095
|
+
|
|
3096
|
+
if output == "object":
|
|
3097
|
+
return result_jsonable
|
|
3098
|
+
if output == "json":
|
|
3099
|
+
return json.dumps(result_jsonable, ensure_ascii=False, indent=2)
|
|
3100
|
+
|
|
3101
|
+
parts = [
|
|
3102
|
+
"## Remote Tool Result",
|
|
3103
|
+
"",
|
|
3104
|
+
f"**Server:** `{server_id}`",
|
|
3105
|
+
f"**Tool:** `{tool}`",
|
|
3106
|
+
"",
|
|
3107
|
+
"```json",
|
|
3108
|
+
json.dumps(result_jsonable, ensure_ascii=False, indent=2)[:10_000],
|
|
3109
|
+
"```",
|
|
3110
|
+
]
|
|
3111
|
+
return "\n".join(parts)
|
|
3112
|
+
|
|
3113
|
+
@_tool()
|
|
3114
|
+
async def close_remote_server(
|
|
3115
|
+
server_id: str,
|
|
3116
|
+
confirm: bool = False,
|
|
3117
|
+
output: Literal["json", "markdown", "object"] = "markdown",
|
|
3118
|
+
) -> str | dict[str, Any]:
|
|
3119
|
+
"""Close a remote MCP server connection (terminates subprocess)."""
|
|
3120
|
+
err = _require_actions(confirm)
|
|
3121
|
+
if err:
|
|
3122
|
+
return _format_error(err, output=output)
|
|
3123
|
+
|
|
3124
|
+
ok, msg = await self._close_remote_server(server_id)
|
|
3125
|
+
if output == "object":
|
|
3126
|
+
return {"ok": ok, "message": msg}
|
|
3127
|
+
if output == "json":
|
|
3128
|
+
return json.dumps({"ok": ok, "message": msg}, indent=2)
|
|
3129
|
+
return msg
|
|
3130
|
+
|
|
3131
|
+
@_tool()
|
|
3132
|
+
async def chunk_context(
|
|
3133
|
+
chunk_size: int = 2000,
|
|
3134
|
+
overlap: int = 200,
|
|
3135
|
+
context_id: str = "default",
|
|
3136
|
+
) -> str:
|
|
3137
|
+
"""Split context into chunks and return metadata for navigation.
|
|
3138
|
+
|
|
3139
|
+
Use this to understand how to navigate large documents systematically.
|
|
3140
|
+
Returns chunk boundaries so you can peek specific chunks.
|
|
3141
|
+
|
|
3142
|
+
Args:
|
|
3143
|
+
chunk_size: Characters per chunk (default: 2000)
|
|
3144
|
+
overlap: Overlap between chunks (default: 200)
|
|
3145
|
+
context_id: Context identifier
|
|
3146
|
+
|
|
3147
|
+
Returns:
|
|
3148
|
+
JSON with chunk metadata (index, start_char, end_char, preview)
|
|
3149
|
+
"""
|
|
3150
|
+
if context_id not in self._sessions:
|
|
3151
|
+
return f"Error: No context loaded with ID '{context_id}'. Use load_context first."
|
|
3152
|
+
|
|
3153
|
+
session = self._sessions[context_id]
|
|
3154
|
+
repl = session.repl
|
|
3155
|
+
session.iterations += 1
|
|
3156
|
+
|
|
3157
|
+
fn = repl.get_variable("chunk")
|
|
3158
|
+
if not callable(fn):
|
|
3159
|
+
return "Error: chunk() helper is not available"
|
|
3160
|
+
|
|
3161
|
+
try:
|
|
3162
|
+
chunks = fn(chunk_size, overlap)
|
|
3163
|
+
except ValueError as e:
|
|
3164
|
+
return f"Error: {e}"
|
|
3165
|
+
|
|
3166
|
+
# Build chunk metadata
|
|
3167
|
+
chunk_meta = []
|
|
3168
|
+
pos = 0
|
|
3169
|
+
for i, chunk_text in enumerate(chunks):
|
|
3170
|
+
chunk_meta.append({
|
|
3171
|
+
"index": i,
|
|
3172
|
+
"start_char": pos,
|
|
3173
|
+
"end_char": pos + len(chunk_text),
|
|
3174
|
+
"size": len(chunk_text),
|
|
3175
|
+
"preview": chunk_text[:100] + "..." if len(chunk_text) > 100 else chunk_text,
|
|
3176
|
+
})
|
|
3177
|
+
pos += len(chunk_text) - overlap if i < len(chunks) - 1 else len(chunk_text)
|
|
3178
|
+
|
|
3179
|
+
# Store in session for reference
|
|
3180
|
+
session.chunks = chunk_meta
|
|
3181
|
+
|
|
3182
|
+
parts = [
|
|
3183
|
+
"## Context Chunks",
|
|
3184
|
+
"",
|
|
3185
|
+
f"**Total chunks:** {len(chunks)}",
|
|
3186
|
+
f"**Chunk size:** {chunk_size} chars",
|
|
3187
|
+
f"**Overlap:** {overlap} chars",
|
|
3188
|
+
"",
|
|
3189
|
+
"### Chunk Map",
|
|
3190
|
+
"",
|
|
3191
|
+
]
|
|
3192
|
+
|
|
3193
|
+
for cm in chunk_meta:
|
|
3194
|
+
parts.append(f"- **Chunk {cm['index']}** ({cm['start_char']}-{cm['end_char']}): {cm['preview'][:60]}...")
|
|
3195
|
+
|
|
3196
|
+
parts.extend([
|
|
3197
|
+
"",
|
|
3198
|
+
"*Use `peek_context(start, end, unit='chars')` to view specific chunks.*",
|
|
3199
|
+
])
|
|
3200
|
+
|
|
3201
|
+
return "\n".join(parts)
|
|
3202
|
+
|
|
3203
|
+
@_tool()
|
|
3204
|
+
async def evaluate_progress(
|
|
3205
|
+
current_understanding: str,
|
|
3206
|
+
remaining_questions: list[str] | str | None = None,
|
|
3207
|
+
confidence_score: float = 0.5,
|
|
3208
|
+
context_id: str = "default",
|
|
3209
|
+
) -> str:
|
|
3210
|
+
"""Self-evaluate your progress to decide whether to continue or finalize.
|
|
3211
|
+
|
|
3212
|
+
Use this periodically to assess whether you have enough information
|
|
3213
|
+
to answer the question, or if more exploration is needed.
|
|
3214
|
+
|
|
3215
|
+
Args:
|
|
3216
|
+
current_understanding: Summary of what you've learned so far
|
|
3217
|
+
remaining_questions: List of unanswered questions (if any)
|
|
3218
|
+
confidence_score: Your confidence 0.0-1.0 in current understanding
|
|
3219
|
+
context_id: Context identifier
|
|
3220
|
+
|
|
3221
|
+
Returns:
|
|
3222
|
+
Structured evaluation with recommendation (continue/finalize)
|
|
3223
|
+
"""
|
|
3224
|
+
if isinstance(remaining_questions, str):
|
|
3225
|
+
remaining_questions = [remaining_questions]
|
|
3226
|
+
if context_id in self._sessions:
|
|
3227
|
+
session = self._sessions[context_id]
|
|
3228
|
+
session.iterations += 1
|
|
3229
|
+
session.confidence_history.append(confidence_score)
|
|
3230
|
+
|
|
3231
|
+
parts = [
|
|
3232
|
+
"## Progress Evaluation",
|
|
3233
|
+
"",
|
|
3234
|
+
f"**Current Understanding:**",
|
|
3235
|
+
current_understanding,
|
|
3236
|
+
"",
|
|
3237
|
+
]
|
|
3238
|
+
|
|
3239
|
+
if remaining_questions:
|
|
3240
|
+
parts.extend([
|
|
3241
|
+
"**Remaining Questions:**",
|
|
3242
|
+
])
|
|
3243
|
+
for q in remaining_questions:
|
|
3244
|
+
parts.append(f"- {q}")
|
|
3245
|
+
parts.append("")
|
|
3246
|
+
|
|
3247
|
+
parts.append(f"**Confidence Score:** {confidence_score:.1%}")
|
|
3248
|
+
|
|
3249
|
+
# Analyze convergence
|
|
3250
|
+
if context_id in self._sessions:
|
|
3251
|
+
session = self._sessions[context_id]
|
|
3252
|
+
parts.extend([
|
|
3253
|
+
"",
|
|
3254
|
+
"### Convergence Analysis",
|
|
3255
|
+
f"- Iterations: {session.iterations}",
|
|
3256
|
+
f"- Evidence collected: {len(session.evidence)}",
|
|
3257
|
+
])
|
|
3258
|
+
|
|
3259
|
+
if len(session.confidence_history) >= 2:
|
|
3260
|
+
trend = session.confidence_history[-1] - session.confidence_history[-2]
|
|
3261
|
+
trend_str = "increasing" if trend > 0 else "decreasing" if trend < 0 else "stable"
|
|
3262
|
+
parts.append(f"- Confidence trend: {trend_str} ({trend:+.1%})")
|
|
3263
|
+
|
|
3264
|
+
if session.information_gain:
|
|
3265
|
+
recent_gain = sum(session.information_gain[-3:]) if len(session.information_gain) >= 3 else sum(session.information_gain)
|
|
3266
|
+
parts.append(f"- Recent information gain: {recent_gain} evidence pieces (last 3 ops)")
|
|
3267
|
+
|
|
3268
|
+
# Recommendation
|
|
3269
|
+
parts.extend([
|
|
3270
|
+
"",
|
|
3271
|
+
"---",
|
|
3272
|
+
"",
|
|
3273
|
+
"### Recommendation",
|
|
3274
|
+
])
|
|
3275
|
+
|
|
3276
|
+
if confidence_score >= 0.8:
|
|
3277
|
+
parts.append("**READY TO FINALIZE** - High confidence achieved. Use `finalize()` to provide your answer.")
|
|
3278
|
+
elif confidence_score >= 0.5 and not remaining_questions:
|
|
3279
|
+
parts.append("**CONSIDER FINALIZING** - Moderate confidence with no remaining questions. You may finalize or continue exploring.")
|
|
3280
|
+
else:
|
|
3281
|
+
parts.append("**CONTINUE EXPLORING** - More investigation needed. Use `search_context`, `peek_context`, or `think` to gather more evidence.")
|
|
3282
|
+
|
|
3283
|
+
return "\n".join(parts)
|
|
3284
|
+
|
|
3285
|
+
@_tool()
|
|
3286
|
+
async def summarize_so_far(
|
|
3287
|
+
include_evidence: bool = True,
|
|
3288
|
+
include_variables: bool = True,
|
|
3289
|
+
clear_history: bool = False,
|
|
3290
|
+
context_id: str = "default",
|
|
3291
|
+
) -> str:
|
|
3292
|
+
"""Compress reasoning history to manage context window.
|
|
3293
|
+
|
|
3294
|
+
Use this when your conversation is getting long to create a
|
|
3295
|
+
condensed summary of your progress that can replace earlier context.
|
|
3296
|
+
|
|
3297
|
+
Args:
|
|
3298
|
+
include_evidence: Include evidence citations in summary
|
|
3299
|
+
include_variables: Include computed variables
|
|
3300
|
+
clear_history: Clear think_history after summarizing (to save memory)
|
|
3301
|
+
context_id: Context identifier
|
|
3302
|
+
|
|
3303
|
+
Returns:
|
|
3304
|
+
Compressed reasoning trace
|
|
3305
|
+
"""
|
|
3306
|
+
if context_id not in self._sessions:
|
|
3307
|
+
return f"Error: No context loaded with ID '{context_id}'. Use load_context first."
|
|
3308
|
+
|
|
3309
|
+
session = self._sessions[context_id]
|
|
3310
|
+
|
|
3311
|
+
parts = [
|
|
3312
|
+
"## Context Summary",
|
|
3313
|
+
"",
|
|
3314
|
+
f"**Context ID:** `{context_id}`",
|
|
3315
|
+
f"**Duration:** {datetime.now() - session.created_at}",
|
|
3316
|
+
f"**Iterations:** {session.iterations}",
|
|
3317
|
+
"",
|
|
3318
|
+
]
|
|
3319
|
+
|
|
3320
|
+
# Reasoning history
|
|
3321
|
+
if session.think_history:
|
|
3322
|
+
parts.extend([
|
|
3323
|
+
"### Reasoning Steps",
|
|
3324
|
+
])
|
|
3325
|
+
for i, q in enumerate(session.think_history[-5:], 1):
|
|
3326
|
+
parts.append(f"{i}. {q[:150]}{'...' if len(q) > 150 else ''}")
|
|
3327
|
+
parts.append("")
|
|
3328
|
+
|
|
3329
|
+
if session.tasks:
|
|
3330
|
+
counts = {
|
|
3331
|
+
"todo": sum(1 for t in session.tasks if t.get("status") == "todo"),
|
|
3332
|
+
"doing": sum(1 for t in session.tasks if t.get("status") == "doing"),
|
|
3333
|
+
"done": sum(1 for t in session.tasks if t.get("status") == "done"),
|
|
3334
|
+
}
|
|
3335
|
+
parts.extend([
|
|
3336
|
+
"### Tasks",
|
|
3337
|
+
f"Total: {len(session.tasks)} (todo: {counts['todo']}, doing: {counts['doing']}, done: {counts['done']})",
|
|
3338
|
+
])
|
|
3339
|
+
for t in session.tasks[:5]:
|
|
3340
|
+
parts.append(f"- #{t.get('id')}: {t.get('title')} ({t.get('status')})")
|
|
3341
|
+
parts.append("")
|
|
3342
|
+
|
|
3343
|
+
# Evidence summary
|
|
3344
|
+
if include_evidence and session.evidence:
|
|
3345
|
+
parts.extend([
|
|
3346
|
+
"### Evidence Collected",
|
|
3347
|
+
f"Total: {len(session.evidence)} pieces",
|
|
3348
|
+
"",
|
|
3349
|
+
])
|
|
3350
|
+
# Group by source
|
|
3351
|
+
by_source: dict[str, int] = {}
|
|
3352
|
+
for ev in session.evidence:
|
|
3353
|
+
by_source[ev.source] = by_source.get(ev.source, 0) + 1
|
|
3354
|
+
for source, count in by_source.items():
|
|
3355
|
+
parts.append(f"- {source}: {count}")
|
|
3356
|
+
parts.append("")
|
|
3357
|
+
|
|
3358
|
+
# Show key evidence
|
|
3359
|
+
parts.append("**Key Evidence:**")
|
|
3360
|
+
for ev in session.evidence[-5:]: # Last 5
|
|
3361
|
+
snippet = ev.snippet[:100] + ("..." if len(ev.snippet) > 100 else "")
|
|
3362
|
+
note = f" (note: {ev.note})" if ev.note else ""
|
|
3363
|
+
parts.append(f"- [{ev.source}] {snippet}{note}")
|
|
3364
|
+
parts.append("")
|
|
3365
|
+
|
|
3366
|
+
# Variables
|
|
3367
|
+
if include_variables:
|
|
3368
|
+
repl = session.repl
|
|
3369
|
+
excluded = {
|
|
3370
|
+
"ctx",
|
|
3371
|
+
"peek",
|
|
3372
|
+
"lines",
|
|
3373
|
+
"search",
|
|
3374
|
+
"chunk",
|
|
3375
|
+
"cite",
|
|
3376
|
+
"line_number_base",
|
|
3377
|
+
"allowed_imports",
|
|
3378
|
+
"is_import_allowed",
|
|
3379
|
+
"blocked_names",
|
|
3380
|
+
"__builtins__",
|
|
3381
|
+
}
|
|
3382
|
+
variables = {
|
|
3383
|
+
k: v for k, v in repl._namespace.items()
|
|
3384
|
+
if k not in excluded and not k.startswith("_")
|
|
3385
|
+
}
|
|
3386
|
+
if variables:
|
|
3387
|
+
parts.extend([
|
|
3388
|
+
"### Computed Variables",
|
|
3389
|
+
])
|
|
3390
|
+
for name, val in variables.items():
|
|
3391
|
+
val_str = str(val)[:100]
|
|
3392
|
+
parts.append(f"- `{name}` = {val_str}{'...' if len(str(val)) > 100 else ''}")
|
|
3393
|
+
parts.append("")
|
|
3394
|
+
|
|
3395
|
+
# Convergence
|
|
3396
|
+
if session.confidence_history:
|
|
3397
|
+
latest = session.confidence_history[-1]
|
|
3398
|
+
parts.extend([
|
|
3399
|
+
"### Convergence Status",
|
|
3400
|
+
f"- Latest confidence: {latest:.1%}",
|
|
3401
|
+
f"- Confidence history: {[f'{c:.0%}' for c in session.confidence_history[-5:]]}",
|
|
3402
|
+
])
|
|
3403
|
+
|
|
3404
|
+
# Clear history if requested
|
|
3405
|
+
if clear_history:
|
|
3406
|
+
session.think_history = []
|
|
3407
|
+
parts.extend([
|
|
3408
|
+
"",
|
|
3409
|
+
"*Reasoning history cleared to save memory.*",
|
|
3410
|
+
])
|
|
3411
|
+
|
|
3412
|
+
return "\n".join(parts)
|
|
3413
|
+
|
|
3414
|
+
async def run(self, transport: str = "stdio") -> None:
|
|
3415
|
+
"""Run the MCP server."""
|
|
3416
|
+
if transport != "stdio":
|
|
3417
|
+
raise ValueError("Only stdio transport is supported")
|
|
3418
|
+
|
|
3419
|
+
await self.server.run_stdio_async()
|
|
3420
|
+
|
|
3421
|
+
|
|
3422
|
+
_mcp_instance: Any | None = None
|
|
3423
|
+
|
|
3424
|
+
|
|
3425
|
+
def _get_mcp_instance() -> Any:
|
|
3426
|
+
global _mcp_instance
|
|
3427
|
+
if _mcp_instance is None:
|
|
3428
|
+
_mcp_instance = AlephMCPServerLocal().server
|
|
3429
|
+
return _mcp_instance
|
|
3430
|
+
|
|
3431
|
+
|
|
3432
|
+
def __getattr__(name: str) -> Any:
|
|
3433
|
+
if name == "mcp":
|
|
3434
|
+
return _get_mcp_instance()
|
|
3435
|
+
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
|
3436
|
+
|
|
3437
|
+
|
|
3438
|
+
def main() -> None:
|
|
3439
|
+
"""CLI entry point: `aleph` or `python -m aleph.mcp.local_server`"""
|
|
3440
|
+
import argparse
|
|
3441
|
+
|
|
3442
|
+
parser = argparse.ArgumentParser(
|
|
3443
|
+
description="Run Aleph as an MCP server for local AI reasoning"
|
|
3444
|
+
)
|
|
3445
|
+
parser.add_argument(
|
|
3446
|
+
"--timeout",
|
|
3447
|
+
type=float,
|
|
3448
|
+
default=60.0,
|
|
3449
|
+
help="Code execution timeout in seconds (default: 60)",
|
|
3450
|
+
)
|
|
3451
|
+
parser.add_argument(
|
|
3452
|
+
"--max-output",
|
|
3453
|
+
type=int,
|
|
3454
|
+
default=50000,
|
|
3455
|
+
help="Maximum output characters (default: 50000)",
|
|
3456
|
+
)
|
|
3457
|
+
parser.add_argument(
|
|
3458
|
+
"--enable-actions",
|
|
3459
|
+
action="store_true",
|
|
3460
|
+
help="Enable action tools (run_command/read_file/write_file/run_tests)",
|
|
3461
|
+
)
|
|
3462
|
+
parser.add_argument(
|
|
3463
|
+
"--workspace-root",
|
|
3464
|
+
type=str,
|
|
3465
|
+
default=None,
|
|
3466
|
+
help="Workspace root for action tools (default: ALEPH_WORKSPACE_ROOT or auto-detect git root from invocation cwd)",
|
|
3467
|
+
)
|
|
3468
|
+
parser.add_argument(
|
|
3469
|
+
"--workspace-mode",
|
|
3470
|
+
type=str,
|
|
3471
|
+
choices=["fixed", "git", "any"],
|
|
3472
|
+
default=DEFAULT_WORKSPACE_MODE,
|
|
3473
|
+
help="Path scope for action tools: fixed (workspace root only), git (any git repo), any (no path restriction)",
|
|
3474
|
+
)
|
|
3475
|
+
parser.add_argument(
|
|
3476
|
+
"--require-confirmation",
|
|
3477
|
+
action="store_true",
|
|
3478
|
+
help="Require confirm=true for action tools",
|
|
3479
|
+
)
|
|
3480
|
+
parser.add_argument(
|
|
3481
|
+
"--max-file-size",
|
|
3482
|
+
type=int,
|
|
3483
|
+
default=1_000_000_000,
|
|
3484
|
+
help="Max file size in bytes for load_file/read_file (default: 1GB). Increase based on your RAM—the LLM only sees query results.",
|
|
3485
|
+
)
|
|
3486
|
+
parser.add_argument(
|
|
3487
|
+
"--max-write-bytes",
|
|
3488
|
+
type=int,
|
|
3489
|
+
default=100_000_000,
|
|
3490
|
+
help="Max file size in bytes for write_file/save_session (default: 100MB).",
|
|
3491
|
+
)
|
|
3492
|
+
env_tool_docs = os.environ.get("ALEPH_TOOL_DOCS")
|
|
3493
|
+
default_tool_docs = env_tool_docs if env_tool_docs in {"concise", "full"} else DEFAULT_TOOL_DOCS_MODE
|
|
3494
|
+
parser.add_argument(
|
|
3495
|
+
"--tool-docs",
|
|
3496
|
+
type=str,
|
|
3497
|
+
choices=["concise", "full"],
|
|
3498
|
+
default=default_tool_docs,
|
|
3499
|
+
help="Tool description verbosity for MCP clients: concise (default) or full",
|
|
3500
|
+
)
|
|
3501
|
+
|
|
3502
|
+
args = parser.parse_args()
|
|
3503
|
+
|
|
3504
|
+
config = SandboxConfig(
|
|
3505
|
+
timeout_seconds=args.timeout,
|
|
3506
|
+
max_output_chars=args.max_output,
|
|
3507
|
+
)
|
|
3508
|
+
|
|
3509
|
+
action_cfg = ActionConfig(
|
|
3510
|
+
enabled=bool(args.enable_actions),
|
|
3511
|
+
workspace_root=Path(args.workspace_root).resolve() if args.workspace_root else _detect_workspace_root(),
|
|
3512
|
+
workspace_mode=cast(WorkspaceMode, args.workspace_mode),
|
|
3513
|
+
require_confirmation=bool(args.require_confirmation),
|
|
3514
|
+
max_read_bytes=args.max_file_size,
|
|
3515
|
+
max_write_bytes=args.max_write_bytes,
|
|
3516
|
+
)
|
|
3517
|
+
|
|
3518
|
+
server = AlephMCPServerLocal(
|
|
3519
|
+
sandbox_config=config,
|
|
3520
|
+
action_config=action_cfg,
|
|
3521
|
+
tool_docs_mode=cast(ToolDocsMode, args.tool_docs),
|
|
3522
|
+
)
|
|
3523
|
+
asyncio.run(server.run())
|
|
3524
|
+
|
|
3525
|
+
|
|
3526
|
+
if __name__ == "__main__":
|
|
3527
|
+
main()
|