cortex-llm 1.0.8__tar.gz → 1.0.10__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/PKG-INFO +5 -1
  2. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/README.md +4 -0
  3. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/__init__.py +1 -1
  4. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/inference_engine.py +48 -8
  5. cortex_llm-1.0.10/cortex/tools/__init__.py +5 -0
  6. cortex_llm-1.0.10/cortex/tools/errors.py +9 -0
  7. cortex_llm-1.0.10/cortex/tools/fs_ops.py +135 -0
  8. cortex_llm-1.0.10/cortex/tools/protocol.py +76 -0
  9. cortex_llm-1.0.10/cortex/tools/search.py +70 -0
  10. cortex_llm-1.0.10/cortex/tools/tool_runner.py +144 -0
  11. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/ui/cli.py +231 -124
  12. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/ui/markdown_render.py +9 -0
  13. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex_llm.egg-info/PKG-INFO +5 -1
  14. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex_llm.egg-info/SOURCES.txt +6 -0
  15. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/pyproject.toml +2 -2
  16. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/setup.py +1 -1
  17. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/LICENSE +0 -0
  18. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/__main__.py +0 -0
  19. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/config.py +0 -0
  20. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/conversation_manager.py +0 -0
  21. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/fine_tuning/__init__.py +0 -0
  22. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/fine_tuning/dataset.py +0 -0
  23. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/fine_tuning/mlx_lora_trainer.py +0 -0
  24. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/fine_tuning/trainer.py +0 -0
  25. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/fine_tuning/wizard.py +0 -0
  26. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/gpu_validator.py +0 -0
  27. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/metal/__init__.py +0 -0
  28. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/metal/gpu_validator.py +0 -0
  29. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/metal/memory_pool.py +0 -0
  30. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/metal/mlx_accelerator.py +0 -0
  31. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/metal/mlx_compat.py +0 -0
  32. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/metal/mlx_converter.py +0 -0
  33. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/metal/mps_optimizer.py +0 -0
  34. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/metal/optimizer.py +0 -0
  35. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/metal/performance_profiler.py +0 -0
  36. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/model_downloader.py +0 -0
  37. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/model_manager.py +0 -0
  38. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/quantization/__init__.py +0 -0
  39. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/quantization/dynamic_quantizer.py +0 -0
  40. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/__init__.py +0 -0
  41. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/auto_detector.py +0 -0
  42. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/config_manager.py +0 -0
  43. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/interactive.py +0 -0
  44. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/registry.py +0 -0
  45. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/template_profiles/__init__.py +0 -0
  46. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/template_profiles/base.py +0 -0
  47. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/template_profiles/complex/__init__.py +0 -0
  48. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/template_profiles/complex/reasoning.py +0 -0
  49. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/template_profiles/standard/__init__.py +0 -0
  50. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/template_profiles/standard/alpaca.py +0 -0
  51. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/template_profiles/standard/chatml.py +0 -0
  52. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/template_profiles/standard/gemma.py +0 -0
  53. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/template_profiles/standard/llama.py +0 -0
  54. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/template_registry/template_profiles/standard/simple.py +0 -0
  55. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/ui/__init__.py +0 -0
  56. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex/ui/terminal_app.py +0 -0
  57. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex_llm.egg-info/dependency_links.txt +0 -0
  58. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex_llm.egg-info/entry_points.txt +0 -0
  59. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex_llm.egg-info/not-zip-safe +0 -0
  60. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex_llm.egg-info/requires.txt +0 -0
  61. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/cortex_llm.egg-info/top_level.txt +0 -0
  62. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/setup.cfg +0 -0
  63. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/tests/test_apple_silicon.py +0 -0
  64. {cortex_llm-1.0.8 → cortex_llm-1.0.10}/tests/test_metal_optimization.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: cortex-llm
3
- Version: 1.0.8
3
+ Version: 1.0.10
4
4
  Summary: GPU-Accelerated LLM Terminal for Apple Silicon
5
5
  Home-page: https://github.com/faisalmumtaz/Cortex
6
6
  Author: Cortex Development Team
@@ -131,6 +131,10 @@ Cortex supports:
131
131
  - `docs/template-registry.md`
132
132
  - **Inference engine details** and backend behavior
133
133
  - `docs/inference-engine.md`
134
+ - **Tooling (experimental, WIP)** for repo-scoped read/search and optional file edits with explicit confirmation
135
+ - `docs/cli.md`
136
+
137
+ **Important (Work in Progress):** Tooling is actively evolving and should be considered experimental. Behavior, output format, and available actions may change; tool calls can fail; and UI presentation may be adjusted. Use tooling on non-critical work first, and always review any proposed file changes before approving them.
134
138
 
135
139
  ## Configuration
136
140
 
@@ -73,6 +73,10 @@ Cortex supports:
73
73
  - `docs/template-registry.md`
74
74
  - **Inference engine details** and backend behavior
75
75
  - `docs/inference-engine.md`
76
+ - **Tooling (experimental, WIP)** for repo-scoped read/search and optional file edits with explicit confirmation
77
+ - `docs/cli.md`
78
+
79
+ **Important (Work in Progress):** Tooling is actively evolving and should be considered experimental. Behavior, output format, and available actions may change; tool calls can fail; and UI presentation may be adjusted. Use tooling on non-critical work first, and always review any proposed file changes before approving them.
76
80
 
77
81
  ## Configuration
78
82
 
@@ -5,7 +5,7 @@ A high-performance terminal interface for running Hugging Face LLMs locally
5
5
  with exclusive GPU acceleration via Metal Performance Shaders (MPS) and MLX.
6
6
  """
7
7
 
8
- __version__ = "1.0.8"
8
+ __version__ = "1.0.10"
9
9
  __author__ = "Cortex Development Team"
10
10
  __license__ = "MIT"
11
11
 
@@ -243,6 +243,33 @@ class InferenceEngine:
243
243
  tokens_generated = 0
244
244
  first_token_time = None
245
245
  last_metrics_update = time.time()
246
+ stream_total_text = ""
247
+ stream_cumulative = False
248
+
249
+ def normalize_stream_chunk(chunk: Any) -> str:
250
+ """Normalize streaming output to delta chunks when backend yields cumulative text."""
251
+ nonlocal stream_total_text, stream_cumulative
252
+ if chunk is None:
253
+ return ""
254
+ if not isinstance(chunk, str):
255
+ chunk = str(chunk)
256
+
257
+ if stream_cumulative:
258
+ if chunk.startswith(stream_total_text):
259
+ delta = chunk[len(stream_total_text):]
260
+ stream_total_text = chunk
261
+ return delta
262
+ stream_total_text += chunk
263
+ return chunk
264
+
265
+ if stream_total_text and len(chunk) > len(stream_total_text) and chunk.startswith(stream_total_text):
266
+ stream_cumulative = True
267
+ delta = chunk[len(stream_total_text):]
268
+ stream_total_text = chunk
269
+ return delta
270
+
271
+ stream_total_text += chunk
272
+ return chunk
246
273
 
247
274
  try:
248
275
  # Use MLX accelerator's optimized generation if available
@@ -262,10 +289,14 @@ class InferenceEngine:
262
289
  if self._cancel_event.is_set():
263
290
  self.status = InferenceStatus.CANCELLED
264
291
  break
265
-
292
+
293
+ delta = normalize_stream_chunk(token) if request.stream else str(token)
294
+ if not delta:
295
+ continue
296
+
266
297
  if first_token_time is None:
267
298
  first_token_time = time.time() - start_time
268
-
299
+
269
300
  tokens_generated += 1
270
301
 
271
302
  # Update metrics less frequently
@@ -284,13 +315,18 @@ class InferenceEngine:
284
315
  last_metrics_update = current_time
285
316
 
286
317
  # Token is already a string from generate_optimized
287
- yield token
318
+ yield delta
288
319
 
289
320
  if any(stop in token for stop in request.stop_sequences):
290
321
  break
291
322
  elif mlx_generate:
292
323
  # Fallback to standard MLX generation
293
- logger.info("Using standard MLX generation")
324
+ if request.stream and mlx_stream_generate:
325
+ logger.info("Using MLX streaming generation")
326
+ generate_fn = mlx_stream_generate
327
+ else:
328
+ logger.info("Using standard MLX generation")
329
+ generate_fn = mlx_generate
294
330
 
295
331
  # Import sample_utils for creating sampler
296
332
  try:
@@ -314,7 +350,7 @@ class InferenceEngine:
314
350
  if request.seed is not None and request.seed >= 0:
315
351
  mx.random.seed(request.seed)
316
352
 
317
- for response in mlx_generate(
353
+ for response in generate_fn(
318
354
  model,
319
355
  tokenizer,
320
356
  **generation_kwargs
@@ -328,10 +364,14 @@ class InferenceEngine:
328
364
  token = response.text
329
365
  else:
330
366
  token = str(response)
331
-
367
+
368
+ delta = normalize_stream_chunk(token) if request.stream else token
369
+ if request.stream and not delta:
370
+ continue
371
+
332
372
  if first_token_time is None:
333
373
  first_token_time = time.time() - start_time
334
-
374
+
335
375
  tokens_generated += 1
336
376
 
337
377
  # Update metrics less frequently to reduce overhead
@@ -352,7 +392,7 @@ class InferenceEngine:
352
392
  )
353
393
  last_metrics_update = current_time
354
394
 
355
- yield token
395
+ yield delta
356
396
 
357
397
  if any(stop in token for stop in request.stop_sequences):
358
398
  break
@@ -0,0 +1,5 @@
1
+ """Tooling support for Cortex CLI."""
2
+
3
+ from cortex.tools.tool_runner import ToolRunner
4
+
5
+ __all__ = ["ToolRunner"]
@@ -0,0 +1,9 @@
1
+ """Tooling error types."""
2
+
3
+
4
+ class ToolError(Exception):
5
+ """Base error for tool execution failures."""
6
+
7
+
8
+ class ValidationError(ToolError):
9
+ """Raised when tool arguments or inputs are invalid."""
@@ -0,0 +1,135 @@
1
+ """Filesystem operations scoped to a repo root."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import hashlib
6
+ import os
7
+ import subprocess
8
+ from pathlib import Path
9
+ from typing import Dict, List, Optional, Tuple
10
+
11
+ from cortex.tools.errors import ToolError, ValidationError
12
+
13
+
14
+ class RepoFS:
15
+ """Filesystem helper constrained to a single repo root."""
16
+
17
+ def __init__(self, root: Path) -> None:
18
+ self.root = Path(root).expanduser().resolve()
19
+
20
+ def resolve_path(self, path: str) -> Path:
21
+ if not path or not isinstance(path, str):
22
+ raise ValidationError("path must be a non-empty string")
23
+ raw = Path(path).expanduser()
24
+ resolved = raw.resolve() if raw.is_absolute() else (self.root / raw).resolve()
25
+ if not resolved.is_relative_to(self.root):
26
+ raise ValidationError(f"path escapes repo root ({self.root}); use a relative path like '.'")
27
+ return resolved
28
+
29
+ def list_dir(self, path: str = ".", recursive: bool = False, max_depth: int = 2, max_entries: int = 200) -> Dict[str, List[str]]:
30
+ target = self.resolve_path(path)
31
+ if not target.is_dir():
32
+ raise ValidationError("path is not a directory")
33
+ entries: List[str] = []
34
+ if not recursive:
35
+ for item in sorted(target.iterdir()):
36
+ rel = item.relative_to(self.root)
37
+ suffix = "/" if item.is_dir() else ""
38
+ entries.append(f"{rel}{suffix}")
39
+ if len(entries) >= max_entries:
40
+ break
41
+ return {"entries": entries}
42
+
43
+ base_depth = len(target.relative_to(self.root).parts)
44
+ for dirpath, dirnames, filenames in os.walk(target):
45
+ depth = len(Path(dirpath).relative_to(self.root).parts) - base_depth
46
+ if depth > max_depth:
47
+ dirnames[:] = []
48
+ continue
49
+ for name in sorted(dirnames):
50
+ rel = (Path(dirpath) / name).relative_to(self.root)
51
+ entries.append(f"{rel}/")
52
+ if len(entries) >= max_entries:
53
+ return {"entries": entries}
54
+ for name in sorted(filenames):
55
+ rel = (Path(dirpath) / name).relative_to(self.root)
56
+ entries.append(str(rel))
57
+ if len(entries) >= max_entries:
58
+ return {"entries": entries}
59
+ return {"entries": entries}
60
+
61
+ def read_text(self, path: str, start_line: int = 1, end_line: Optional[int] = None, max_bytes: int = 2_000_000) -> Dict[str, object]:
62
+ target = self.resolve_path(path)
63
+ if not target.is_file():
64
+ raise ValidationError("path is not a file")
65
+ size = target.stat().st_size
66
+ if size > max_bytes and start_line == 1 and end_line is None:
67
+ raise ToolError("file too large; specify a line range")
68
+ if start_line < 1:
69
+ raise ValidationError("start_line must be >= 1")
70
+ if end_line is not None and end_line < start_line:
71
+ raise ValidationError("end_line must be >= start_line")
72
+
73
+ lines: List[str] = []
74
+ with target.open("r", encoding="utf-8") as handle:
75
+ for idx, line in enumerate(handle, start=1):
76
+ if idx < start_line:
77
+ continue
78
+ if end_line is not None and idx > end_line:
79
+ break
80
+ lines.append(line.rstrip("\n"))
81
+ content = "\n".join(lines)
82
+ return {"path": str(target.relative_to(self.root)), "content": content, "start_line": start_line, "end_line": end_line}
83
+
84
+ def read_full_text(self, path: str) -> str:
85
+ target = self.resolve_path(path)
86
+ if not target.is_file():
87
+ raise ValidationError("path is not a file")
88
+ try:
89
+ return target.read_text(encoding="utf-8")
90
+ except UnicodeDecodeError as e:
91
+ raise ToolError(f"file is not valid utf-8: {e}") from e
92
+
93
+ def write_text(self, path: str, content: str, expected_sha256: Optional[str] = None) -> Dict[str, object]:
94
+ target = self.resolve_path(path)
95
+ if not target.exists() or not target.is_file():
96
+ raise ValidationError("path does not exist or is not a file")
97
+ if expected_sha256:
98
+ current = self.read_full_text(path)
99
+ if self.sha256_text(current) != expected_sha256:
100
+ raise ToolError("file changed; expected hash does not match")
101
+ target.write_text(content, encoding="utf-8")
102
+ return {"path": str(target.relative_to(self.root)), "sha256": self.sha256_text(content)}
103
+
104
+ def create_text(self, path: str, content: str, overwrite: bool = False) -> Dict[str, object]:
105
+ target = self.resolve_path(path)
106
+ if target.exists() and not overwrite:
107
+ raise ValidationError("path already exists")
108
+ target.parent.mkdir(parents=True, exist_ok=True)
109
+ target.write_text(content, encoding="utf-8")
110
+ return {"path": str(target.relative_to(self.root)), "sha256": self.sha256_text(content)}
111
+
112
+ def delete_file(self, path: str) -> Dict[str, object]:
113
+ target = self.resolve_path(path)
114
+ if not target.exists() or not target.is_file():
115
+ raise ValidationError("path does not exist or is not a file")
116
+ if not self._is_git_tracked(target):
117
+ raise ToolError("delete blocked: file is not tracked by git")
118
+ target.unlink()
119
+ return {"path": str(target.relative_to(self.root)), "deleted": True}
120
+
121
+ def sha256_text(self, content: str) -> str:
122
+ return hashlib.sha256(content.encode("utf-8")).hexdigest()
123
+
124
+ def _is_git_tracked(self, target: Path) -> bool:
125
+ git_dir = self.root / ".git"
126
+ if not git_dir.exists():
127
+ return False
128
+ rel = str(target.relative_to(self.root))
129
+ result = subprocess.run(
130
+ ["git", "ls-files", "--error-unmatch", rel],
131
+ cwd=self.root,
132
+ capture_output=True,
133
+ text=True,
134
+ )
135
+ return result.returncode == 0
@@ -0,0 +1,76 @@
1
+ """Protocol helpers for tool calling."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from typing import Any, Dict, List, Optional, Tuple
7
+
8
+ TOOL_CALLS_START = "<tool_calls>"
9
+ TOOL_CALLS_END = "</tool_calls>"
10
+ TOOL_RESULTS_START = "<tool_results>"
11
+ TOOL_RESULTS_END = "</tool_results>"
12
+
13
+
14
+ def find_tool_calls_block(text: str) -> Tuple[Optional[int], Optional[int], Optional[str]]:
15
+ """Return (start, end, block) for tool_calls JSON, if present."""
16
+ start = text.find(TOOL_CALLS_START)
17
+ if start == -1:
18
+ return None, None, None
19
+ end = text.find(TOOL_CALLS_END, start + len(TOOL_CALLS_START))
20
+ if end == -1:
21
+ return start, None, None
22
+ block = text[start + len(TOOL_CALLS_START) : end].strip()
23
+ return start, end + len(TOOL_CALLS_END), block
24
+
25
+
26
+ def strip_tool_blocks(text: str) -> str:
27
+ """Remove tool_calls block from text (including incomplete block)."""
28
+ start, end, _ = find_tool_calls_block(text)
29
+ if start is None:
30
+ return text
31
+ if end is None:
32
+ return text[:start]
33
+ return text[:start] + text[end:]
34
+
35
+
36
+ def parse_tool_calls(text: str) -> Tuple[List[Dict[str, Any]], Optional[str]]:
37
+ """Parse tool calls from text. Returns (calls, error)."""
38
+ start, end, block = find_tool_calls_block(text)
39
+ if start is None:
40
+ return [], None
41
+ if end is None or block is None:
42
+ return [], "tool_calls block is incomplete"
43
+ try:
44
+ payload = json.loads(block)
45
+ except json.JSONDecodeError as e:
46
+ return [], f"invalid tool_calls JSON: {e}"
47
+
48
+ if not isinstance(payload, dict):
49
+ return [], "tool_calls payload must be a JSON object"
50
+ calls = payload.get("calls")
51
+ if not isinstance(calls, list):
52
+ return [], "tool_calls payload missing 'calls' list"
53
+
54
+ normalized: List[Dict[str, Any]] = []
55
+ for idx, call in enumerate(calls):
56
+ if not isinstance(call, dict):
57
+ return [], f"tool call at index {idx} must be an object"
58
+ name = call.get("name")
59
+ arguments = call.get("arguments")
60
+ call_id = call.get("id") or f"call_{idx + 1}"
61
+ if not isinstance(name, str) or not name.strip():
62
+ return [], f"tool call at index {idx} missing valid name"
63
+ if arguments is None:
64
+ arguments = {}
65
+ if not isinstance(arguments, dict):
66
+ return [], f"tool call '{name}' arguments must be an object"
67
+ normalized.append({"id": str(call_id), "name": name, "arguments": arguments})
68
+
69
+ return normalized, None
70
+
71
+
72
+ def format_tool_results(results: List[Dict[str, Any]]) -> str:
73
+ """Format tool results for model consumption."""
74
+ payload = {"results": results}
75
+ body = json.dumps(payload, ensure_ascii=True)
76
+ return f"{TOOL_RESULTS_START}\n{body}\n{TOOL_RESULTS_END}"
@@ -0,0 +1,70 @@
1
+ """Search utilities for repo tools."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ import re
7
+ import shutil
8
+ import subprocess
9
+ from pathlib import Path
10
+ from typing import Dict, List
11
+
12
+ from cortex.tools.errors import ToolError, ValidationError
13
+ from cortex.tools.fs_ops import RepoFS
14
+
15
+
16
+ class RepoSearch:
17
+ """Search helper constrained to a repo root."""
18
+
19
+ def __init__(self, repo_fs: RepoFS) -> None:
20
+ self.repo_fs = repo_fs
21
+
22
+ def search(self, query: str, path: str = ".", use_regex: bool = True, max_results: int = 100) -> Dict[str, List[Dict[str, object]]]:
23
+ if not isinstance(query, str) or not query:
24
+ raise ValidationError("query must be a non-empty string")
25
+ if max_results < 1:
26
+ raise ValidationError("max_results must be >= 1")
27
+ root = self.repo_fs.root
28
+ target = self.repo_fs.resolve_path(path)
29
+
30
+ if shutil.which("rg"):
31
+ return {"results": self._rg_search(query, target, use_regex, max_results)}
32
+ return {"results": self._python_search(query, target, use_regex, max_results)}
33
+
34
+ def _rg_search(self, query: str, target: Path, use_regex: bool, max_results: int) -> List[Dict[str, object]]:
35
+ args = ["rg", "--line-number", "--with-filename", "--no-heading"]
36
+ if not use_regex:
37
+ args.append("-F")
38
+ args.extend(["-e", query, str(target)])
39
+ result = subprocess.run(args, cwd=self.repo_fs.root, capture_output=True, text=True)
40
+ if result.returncode not in (0, 1):
41
+ raise ToolError(f"rg failed: {result.stderr.strip()}")
42
+ matches: List[Dict[str, object]] = []
43
+ for line in result.stdout.splitlines():
44
+ try:
45
+ file_path, line_no, text = line.split(":", 2)
46
+ except ValueError:
47
+ continue
48
+ matches.append({"path": file_path, "line": int(line_no), "text": text})
49
+ if len(matches) >= max_results:
50
+ break
51
+ return matches
52
+
53
+ def _python_search(self, query: str, target: Path, use_regex: bool, max_results: int) -> List[Dict[str, object]]:
54
+ pattern = re.compile(query) if use_regex else None
55
+ results: List[Dict[str, object]] = []
56
+ for dirpath, dirnames, filenames in os.walk(target):
57
+ dirnames[:] = [d for d in dirnames if d != ".git"]
58
+ for name in filenames:
59
+ path = Path(dirpath) / name
60
+ try:
61
+ text = path.read_text(encoding="utf-8")
62
+ except Exception:
63
+ continue
64
+ for idx, line in enumerate(text.splitlines(), start=1):
65
+ found = bool(pattern.search(line)) if pattern else (query in line)
66
+ if found:
67
+ results.append({"path": str(path.relative_to(self.repo_fs.root)), "line": idx, "text": line})
68
+ if len(results) >= max_results:
69
+ return results
70
+ return results
@@ -0,0 +1,144 @@
1
+ """Tool runner and specifications for Cortex."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import difflib
6
+ import json
7
+ from pathlib import Path
8
+ from typing import Any, Callable, Dict, List, Optional
9
+
10
+ from cortex.tools.errors import ToolError, ValidationError
11
+ from cortex.tools.fs_ops import RepoFS
12
+ from cortex.tools.search import RepoSearch
13
+
14
+
15
+ ConfirmCallback = Callable[[str], bool]
16
+
17
+
18
+ class ToolRunner:
19
+ """Execute tool calls with safety checks."""
20
+
21
+ def __init__(self, root: Path, confirm_callback: Optional[ConfirmCallback] = None) -> None:
22
+ self.fs = RepoFS(root)
23
+ self.search = RepoSearch(self.fs)
24
+ self.confirm_callback = confirm_callback
25
+
26
+ def set_confirm_callback(self, callback: ConfirmCallback) -> None:
27
+ self.confirm_callback = callback
28
+
29
+ def tool_spec(self) -> Dict[str, Any]:
30
+ return {
31
+ "list_dir": {"args": {"path": "string", "recursive": "bool", "max_depth": "int"}},
32
+ "read_file": {"args": {"path": "string", "start_line": "int", "end_line": "int", "max_bytes": "int"}},
33
+ "search": {"args": {"query": "string", "path": "string", "use_regex": "bool", "max_results": "int"}},
34
+ "write_file": {"args": {"path": "string", "content": "string", "expected_sha256": "string"}},
35
+ "create_file": {"args": {"path": "string", "content": "string", "overwrite": "bool"}},
36
+ "delete_file": {"args": {"path": "string"}},
37
+ "replace_in_file": {"args": {"path": "string", "old": "string", "new": "string", "expected_replacements": "int"}},
38
+ "insert_after": {"args": {"path": "string", "anchor": "string", "content": "string", "expected_matches": "int"}},
39
+ "insert_before": {"args": {"path": "string", "anchor": "string", "content": "string", "expected_matches": "int"}},
40
+ }
41
+
42
+ def tool_instructions(self) -> str:
43
+ spec = json.dumps(self.tool_spec(), ensure_ascii=True, indent=2)
44
+ repo_root = str(self.fs.root)
45
+ return (
46
+ "[CORTEX_TOOL_INSTRUCTIONS v2]\n"
47
+ "You have access to file tools. If a tool is required, respond ONLY with a <tool_calls> JSON block.\n"
48
+ "Do not include any other text when calling tools.\n"
49
+ f"Repo root: {repo_root}\n"
50
+ "All paths must be relative to the repo root (use '.' for root). Do not use absolute paths or ~.\n"
51
+ "If you are unsure about paths, call list_dir with path '.' first.\n"
52
+ "Format:\n"
53
+ "<tool_calls>{\"calls\":[{\"id\":\"call_1\",\"name\":\"tool_name\",\"arguments\":{...}}]}</tool_calls>\n"
54
+ "Available tools:\n"
55
+ f"{spec}"
56
+ )
57
+
58
+ def run_calls(self, calls: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
59
+ results: List[Dict[str, Any]] = []
60
+ for call in calls:
61
+ call_id = call.get("id", "unknown")
62
+ name = call.get("name")
63
+ args = call.get("arguments") or {}
64
+ try:
65
+ if name == "list_dir":
66
+ result = self.fs.list_dir(**args)
67
+ elif name == "read_file":
68
+ result = self.fs.read_text(**args)
69
+ elif name == "search":
70
+ result = self.search.search(**args)
71
+ elif name == "write_file":
72
+ result = self._write_file(**args)
73
+ elif name == "create_file":
74
+ result = self._create_file(**args)
75
+ elif name == "delete_file":
76
+ result = self._delete_file(**args)
77
+ elif name == "replace_in_file":
78
+ result = self._replace_in_file(**args)
79
+ elif name == "insert_after":
80
+ result = self._insert_relative(after=True, **args)
81
+ elif name == "insert_before":
82
+ result = self._insert_relative(after=False, **args)
83
+ else:
84
+ raise ValidationError(f"unknown tool: {name}")
85
+ results.append({"id": call_id, "name": name, "ok": True, "result": result, "error": None})
86
+ except Exception as e:
87
+ results.append({"id": call_id, "name": name, "ok": False, "result": None, "error": str(e)})
88
+ return results
89
+
90
+ def _write_file(self, path: str, content: str, expected_sha256: Optional[str] = None) -> Dict[str, Any]:
91
+ before = self.fs.read_full_text(path)
92
+ self._confirm_change(path, before, content, "write")
93
+ return self.fs.write_text(path, content, expected_sha256=expected_sha256)
94
+
95
+ def _create_file(self, path: str, content: str, overwrite: bool = False) -> Dict[str, Any]:
96
+ before = ""
97
+ self._confirm_change(path, before, content, "create")
98
+ return self.fs.create_text(path, content, overwrite=overwrite)
99
+
100
+ def _delete_file(self, path: str) -> Dict[str, Any]:
101
+ before = self.fs.read_full_text(path)
102
+ self._confirm_change(path, before, "", "delete")
103
+ return self.fs.delete_file(path)
104
+
105
+ def _replace_in_file(self, path: str, old: str, new: str, expected_replacements: int = 1) -> Dict[str, Any]:
106
+ if not old:
107
+ raise ValidationError("old must be a non-empty string")
108
+ content = self.fs.read_full_text(path)
109
+ count = content.count(old)
110
+ if count != expected_replacements:
111
+ raise ToolError(f"expected {expected_replacements} replacements, found {count}")
112
+ updated = content.replace(old, new)
113
+ self._confirm_change(path, content, updated, "replace")
114
+ return self.fs.write_text(path, updated)
115
+
116
+ def _insert_relative(self, path: str, anchor: str, content: str, expected_matches: int = 1, after: bool = True) -> Dict[str, Any]:
117
+ if not anchor:
118
+ raise ValidationError("anchor must be a non-empty string")
119
+ original = self.fs.read_full_text(path)
120
+ count = original.count(anchor)
121
+ if count != expected_matches:
122
+ raise ToolError(f"expected {expected_matches} matches, found {count}")
123
+ insert_text = anchor + content if after else content + anchor
124
+ updated = original.replace(anchor, insert_text, count if expected_matches > 1 else 1)
125
+ self._confirm_change(path, original, updated, "insert")
126
+ return self.fs.write_text(path, updated)
127
+
128
+ def _confirm_change(self, path: str, before: str, after: str, action: str) -> None:
129
+ if self.confirm_callback is None:
130
+ raise ToolError("confirmation required but no callback configured")
131
+ if before == after:
132
+ raise ToolError("no changes to apply")
133
+ diff = "\n".join(
134
+ difflib.unified_diff(
135
+ before.splitlines(),
136
+ after.splitlines(),
137
+ fromfile=f"{path} (before)",
138
+ tofile=f"{path} (after)",
139
+ lineterm="",
140
+ )
141
+ )
142
+ prompt = f"Apply {action} to {path}?\n{diff}\n"
143
+ if not self.confirm_callback(prompt):
144
+ raise ToolError("change declined by user")