tarang 4.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tarang/__init__.py +23 -0
- tarang/cli.py +1168 -0
- tarang/client/__init__.py +19 -0
- tarang/client/api_client.py +701 -0
- tarang/client/auth.py +178 -0
- tarang/context/__init__.py +41 -0
- tarang/context/bm25.py +218 -0
- tarang/context/chunker.py +984 -0
- tarang/context/graph.py +464 -0
- tarang/context/indexer.py +514 -0
- tarang/context/retriever.py +270 -0
- tarang/context/skeleton.py +282 -0
- tarang/context_collector.py +449 -0
- tarang/executor/__init__.py +6 -0
- tarang/executor/diff_apply.py +246 -0
- tarang/executor/linter.py +184 -0
- tarang/stream.py +1346 -0
- tarang/ui/__init__.py +7 -0
- tarang/ui/console.py +407 -0
- tarang/ui/diff_viewer.py +146 -0
- tarang/ui/formatter.py +1151 -0
- tarang/ui/keyboard.py +197 -0
- tarang/ws/__init__.py +14 -0
- tarang/ws/client.py +464 -0
- tarang/ws/executor.py +638 -0
- tarang/ws/handlers.py +590 -0
- tarang-4.4.0.dist-info/METADATA +102 -0
- tarang-4.4.0.dist-info/RECORD +31 -0
- tarang-4.4.0.dist-info/WHEEL +5 -0
- tarang-4.4.0.dist-info/entry_points.txt +2 -0
- tarang-4.4.0.dist-info/top_level.txt +1 -0
tarang/stream.py
ADDED
|
@@ -0,0 +1,1346 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SSE Stream Client with REST Callbacks - Industry-standard pattern.
|
|
3
|
+
|
|
4
|
+
This implements the SSE + REST callback pattern used by OpenAI, Anthropic, Cursor:
|
|
5
|
+
1. CLI sends POST /api/execute with instruction + initial context
|
|
6
|
+
2. Backend streams SSE events (status, tool_request, plan, change, etc.)
|
|
7
|
+
3. When backend needs a tool result, it sends tool_request and WAITS
|
|
8
|
+
4. CLI executes the tool locally
|
|
9
|
+
5. CLI sends POST /api/callback with the result
|
|
10
|
+
6. Backend continues the stream
|
|
11
|
+
|
|
12
|
+
Benefits:
|
|
13
|
+
- Serverless-friendly (Vercel, Cloudflare Workers)
|
|
14
|
+
- Simpler than WebSocket (unidirectional stream)
|
|
15
|
+
- Auto-reconnection via Last-Event-ID
|
|
16
|
+
- Easier debugging (curl-friendly)
|
|
17
|
+
"""
|
|
18
|
+
from __future__ import annotations
|
|
19
|
+
|
|
20
|
+
import fnmatch
|
|
21
|
+
import json
|
|
22
|
+
import logging
|
|
23
|
+
import os
|
|
24
|
+
import re
|
|
25
|
+
import subprocess
|
|
26
|
+
import time
|
|
27
|
+
from dataclasses import dataclass
|
|
28
|
+
from enum import Enum
|
|
29
|
+
from pathlib import Path
|
|
30
|
+
from typing import Any, AsyncGenerator, Callable, Dict, Optional
|
|
31
|
+
|
|
32
|
+
import httpx
|
|
33
|
+
from rich.console import Console
|
|
34
|
+
|
|
35
|
+
from tarang.context_collector import ProjectContext
|
|
36
|
+
from tarang.context.retriever import create_retriever
|
|
37
|
+
from tarang.ui.formatter import OutputFormatter
|
|
38
|
+
|
|
39
|
+
logger = logging.getLogger(__name__)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class EventType(str, Enum):
|
|
43
|
+
"""SSE event types from backend."""
|
|
44
|
+
STATUS = "status"
|
|
45
|
+
TOOL_REQUEST = "tool_request" # Legacy name
|
|
46
|
+
TOOL_CALL = "tool_call" # New name (SSE Split Architecture)
|
|
47
|
+
TOOL_DONE = "tool_done"
|
|
48
|
+
THINKING = "thinking" # Agent thinking/reasoning
|
|
49
|
+
PLAN = "plan" # Strategic plan from orchestrator (emitted ONCE)
|
|
50
|
+
PHASE_UPDATE = "phase_update" # Phase status change (no re-render)
|
|
51
|
+
PHASE_SUMMARY = "phase_summary" # Individual phase summary (display immediately)
|
|
52
|
+
WORKER_UPDATE = "worker_update" # Worker status change (no re-render)
|
|
53
|
+
PHASE_START = "phase_start" # Phase beginning (legacy)
|
|
54
|
+
WORKER_START = "worker_start" # Worker beginning (legacy)
|
|
55
|
+
WORKER_DONE = "worker_done" # Worker completed (legacy)
|
|
56
|
+
DELEGATION = "delegation" # Agent delegation
|
|
57
|
+
CHANGE = "change"
|
|
58
|
+
CONTENT = "content"
|
|
59
|
+
ERROR = "error"
|
|
60
|
+
COMPLETE = "complete"
|
|
61
|
+
CANCELLED = "cancelled"
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
@dataclass
|
|
65
|
+
class StreamEvent:
|
|
66
|
+
"""An event from the SSE stream."""
|
|
67
|
+
type: EventType
|
|
68
|
+
data: Dict[str, Any]
|
|
69
|
+
|
|
70
|
+
@classmethod
|
|
71
|
+
def from_sse(cls, event: str, data: str) -> "StreamEvent":
|
|
72
|
+
"""Parse from SSE format."""
|
|
73
|
+
try:
|
|
74
|
+
event_type = EventType(event)
|
|
75
|
+
except ValueError:
|
|
76
|
+
event_type = EventType.ERROR
|
|
77
|
+
|
|
78
|
+
try:
|
|
79
|
+
parsed_data = json.loads(data)
|
|
80
|
+
except json.JSONDecodeError:
|
|
81
|
+
parsed_data = {"message": data}
|
|
82
|
+
|
|
83
|
+
return cls(type=event_type, data=parsed_data)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
@dataclass
|
|
87
|
+
class FileChange:
|
|
88
|
+
"""A file change from the stream."""
|
|
89
|
+
type: str # "create" or "edit"
|
|
90
|
+
path: str
|
|
91
|
+
content: Optional[str] = None
|
|
92
|
+
search: Optional[str] = None
|
|
93
|
+
replace: Optional[str] = None
|
|
94
|
+
description: str = ""
|
|
95
|
+
|
|
96
|
+
@classmethod
|
|
97
|
+
def from_dict(cls, data: Dict[str, Any]) -> "FileChange":
|
|
98
|
+
return cls(
|
|
99
|
+
type=data.get("type", ""),
|
|
100
|
+
path=data.get("path", ""),
|
|
101
|
+
content=data.get("content"),
|
|
102
|
+
search=data.get("search"),
|
|
103
|
+
replace=data.get("replace"),
|
|
104
|
+
description=data.get("description", ""),
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class LocalToolExecutor:
|
|
109
|
+
"""
|
|
110
|
+
Executes tools locally on the CLI side.
|
|
111
|
+
|
|
112
|
+
Tools are aligned with backend's tool_provider.py TOOL_DEFINITIONS:
|
|
113
|
+
- read_file, list_files, search_files, get_file_info (read-only)
|
|
114
|
+
- write_file, edit_file, shell, delete_file (require approval)
|
|
115
|
+
"""
|
|
116
|
+
|
|
117
|
+
# Files/directories to ignore
|
|
118
|
+
IGNORE_PATTERNS = {
|
|
119
|
+
".git", ".svn", ".hg",
|
|
120
|
+
"node_modules", "venv", ".venv", "env", ".env",
|
|
121
|
+
"__pycache__", ".pytest_cache", ".mypy_cache",
|
|
122
|
+
"vendor", "packages",
|
|
123
|
+
"dist", "build", ".next", ".nuxt", "out",
|
|
124
|
+
"target", "bin", "obj",
|
|
125
|
+
".idea", ".vscode", ".vs",
|
|
126
|
+
".tarang", ".tarang_backups",
|
|
127
|
+
"*.pyc", "*.pyo", "*.so", "*.dylib",
|
|
128
|
+
"*.egg-info", "*.egg",
|
|
129
|
+
".DS_Store", "Thumbs.db",
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
def __init__(
|
|
133
|
+
self,
|
|
134
|
+
project_root: str,
|
|
135
|
+
is_cancelled: Optional[Callable[[], bool]] = None,
|
|
136
|
+
set_process: Optional[Callable[[subprocess.Popen], None]] = None,
|
|
137
|
+
):
|
|
138
|
+
self.project_root = Path(project_root).resolve()
|
|
139
|
+
# Optional callbacks for shell interruption
|
|
140
|
+
self._is_cancelled = is_cancelled or (lambda: False)
|
|
141
|
+
self._set_process = set_process or (lambda p: None)
|
|
142
|
+
|
|
143
|
+
def execute(self, tool: str, args: dict) -> dict:
|
|
144
|
+
"""Execute a tool and return the result."""
|
|
145
|
+
try:
|
|
146
|
+
# Read-only tools
|
|
147
|
+
if tool == "list_files":
|
|
148
|
+
return self._list_files(args)
|
|
149
|
+
elif tool == "read_file":
|
|
150
|
+
return self._read_file(args)
|
|
151
|
+
elif tool == "read_files":
|
|
152
|
+
return self._read_files(args) # Batch read - more efficient
|
|
153
|
+
elif tool == "search_files":
|
|
154
|
+
return self._search_files(args)
|
|
155
|
+
elif tool == "search_code":
|
|
156
|
+
return self._search_code(args)
|
|
157
|
+
elif tool == "get_file_info":
|
|
158
|
+
return self._get_file_info(args)
|
|
159
|
+
# Write tools (require approval - handled by caller)
|
|
160
|
+
elif tool == "write_file":
|
|
161
|
+
return self._write_file(args)
|
|
162
|
+
elif tool == "edit_file":
|
|
163
|
+
return self._edit_file(args)
|
|
164
|
+
elif tool == "delete_file":
|
|
165
|
+
return self._delete_file(args)
|
|
166
|
+
elif tool == "shell":
|
|
167
|
+
return self._shell(args)
|
|
168
|
+
# Validation tools
|
|
169
|
+
elif tool == "validate_file":
|
|
170
|
+
return self._validate_file(args)
|
|
171
|
+
elif tool == "validate_build":
|
|
172
|
+
return self._validate_build(args)
|
|
173
|
+
elif tool == "validate_structure":
|
|
174
|
+
return self._validate_structure(args)
|
|
175
|
+
elif tool == "lint_check":
|
|
176
|
+
return self._lint_check(args)
|
|
177
|
+
else:
|
|
178
|
+
return {"error": f"Unknown tool: {tool}"}
|
|
179
|
+
except Exception as e:
|
|
180
|
+
logger.exception(f"Tool execution error: {tool}")
|
|
181
|
+
return {"error": str(e)}
|
|
182
|
+
|
|
183
|
+
def _list_files(self, args: dict) -> dict:
|
|
184
|
+
"""List files in directory."""
|
|
185
|
+
path = args.get("path", ".")
|
|
186
|
+
pattern = args.get("pattern") # Glob pattern to filter files
|
|
187
|
+
recursive = args.get("recursive", True)
|
|
188
|
+
max_files = args.get("max_files", 500)
|
|
189
|
+
|
|
190
|
+
# Handle absolute paths - resolve them directly
|
|
191
|
+
path_obj = Path(path)
|
|
192
|
+
if path_obj.is_absolute():
|
|
193
|
+
target = path_obj.resolve()
|
|
194
|
+
else:
|
|
195
|
+
target = (self.project_root / path).resolve()
|
|
196
|
+
|
|
197
|
+
if not target.exists():
|
|
198
|
+
return {"error": f"Path not found: {path}"}
|
|
199
|
+
|
|
200
|
+
files = []
|
|
201
|
+
if recursive:
|
|
202
|
+
for root, dirs, filenames in os.walk(target):
|
|
203
|
+
# Filter ignored directories
|
|
204
|
+
dirs[:] = [d for d in dirs if not self._should_ignore(d)]
|
|
205
|
+
|
|
206
|
+
for filename in filenames:
|
|
207
|
+
if self._should_ignore(filename):
|
|
208
|
+
continue
|
|
209
|
+
# Apply pattern filter if provided
|
|
210
|
+
if pattern and not fnmatch.fnmatch(filename, pattern):
|
|
211
|
+
continue
|
|
212
|
+
|
|
213
|
+
full_path = Path(root) / filename
|
|
214
|
+
# Try relative to project_root first, then to target directory
|
|
215
|
+
try:
|
|
216
|
+
rel_path = str(full_path.relative_to(self.project_root))
|
|
217
|
+
except ValueError:
|
|
218
|
+
# Target is outside project_root, use relative to target
|
|
219
|
+
try:
|
|
220
|
+
rel_path = str(full_path.relative_to(target))
|
|
221
|
+
except ValueError:
|
|
222
|
+
continue
|
|
223
|
+
files.append(rel_path)
|
|
224
|
+
|
|
225
|
+
if len(files) >= max_files:
|
|
226
|
+
break
|
|
227
|
+
|
|
228
|
+
if len(files) >= max_files:
|
|
229
|
+
break
|
|
230
|
+
else:
|
|
231
|
+
for item in target.iterdir():
|
|
232
|
+
if item.is_file() and not self._should_ignore(item.name):
|
|
233
|
+
# Apply pattern filter if provided
|
|
234
|
+
if pattern and not fnmatch.fnmatch(item.name, pattern):
|
|
235
|
+
continue
|
|
236
|
+
# Try relative to project_root first, then to target directory
|
|
237
|
+
try:
|
|
238
|
+
rel_path = str(item.relative_to(self.project_root))
|
|
239
|
+
except ValueError:
|
|
240
|
+
try:
|
|
241
|
+
rel_path = str(item.relative_to(target))
|
|
242
|
+
except ValueError:
|
|
243
|
+
continue
|
|
244
|
+
files.append(rel_path)
|
|
245
|
+
|
|
246
|
+
if len(files) >= max_files:
|
|
247
|
+
break
|
|
248
|
+
|
|
249
|
+
return {"files": sorted(files), "count": len(files)}
|
|
250
|
+
|
|
251
|
+
def _read_file(self, args: dict) -> dict:
|
|
252
|
+
"""Read file content."""
|
|
253
|
+
file_path = args.get("file_path", "")
|
|
254
|
+
max_lines = args.get("max_lines", 500)
|
|
255
|
+
start_line = args.get("start_line")
|
|
256
|
+
end_line = args.get("end_line")
|
|
257
|
+
|
|
258
|
+
if not file_path:
|
|
259
|
+
return {"error": "file_path required"}
|
|
260
|
+
|
|
261
|
+
target = self.project_root / file_path
|
|
262
|
+
if not target.exists():
|
|
263
|
+
return {"error": f"File not found: {file_path}"}
|
|
264
|
+
|
|
265
|
+
if not target.is_file():
|
|
266
|
+
return {"error": f"Not a file: {file_path}"}
|
|
267
|
+
|
|
268
|
+
# Check file size (max 100KB)
|
|
269
|
+
try:
|
|
270
|
+
size = target.stat().st_size
|
|
271
|
+
if size > 100 * 1024:
|
|
272
|
+
return {"error": f"File too large: {size} bytes"}
|
|
273
|
+
except OSError as e:
|
|
274
|
+
return {"error": str(e)}
|
|
275
|
+
|
|
276
|
+
try:
|
|
277
|
+
content = target.read_text(encoding="utf-8", errors="replace")
|
|
278
|
+
lines = content.splitlines()
|
|
279
|
+
total_lines = len(lines)
|
|
280
|
+
|
|
281
|
+
# Apply line range if specified
|
|
282
|
+
if start_line is not None or end_line is not None:
|
|
283
|
+
start = (start_line or 1) - 1 # Convert to 0-based
|
|
284
|
+
end = end_line or total_lines
|
|
285
|
+
lines = lines[start:end]
|
|
286
|
+
|
|
287
|
+
# Apply max lines limit
|
|
288
|
+
truncated = len(lines) > max_lines
|
|
289
|
+
if truncated:
|
|
290
|
+
lines = lines[:max_lines]
|
|
291
|
+
|
|
292
|
+
content = "\n".join(lines)
|
|
293
|
+
if truncated:
|
|
294
|
+
content += "\n... (truncated)"
|
|
295
|
+
|
|
296
|
+
return {
|
|
297
|
+
"content": content,
|
|
298
|
+
"lines": len(lines),
|
|
299
|
+
"total_lines": total_lines,
|
|
300
|
+
"path": file_path,
|
|
301
|
+
"truncated": truncated,
|
|
302
|
+
}
|
|
303
|
+
except Exception as e:
|
|
304
|
+
return {"error": str(e)}
|
|
305
|
+
|
|
306
|
+
def _read_files(self, args: dict) -> dict:
|
|
307
|
+
"""
|
|
308
|
+
Read multiple files in a single batch operation.
|
|
309
|
+
|
|
310
|
+
This is more efficient than calling read_file multiple times:
|
|
311
|
+
- Single tool call instead of N calls
|
|
312
|
+
- Reduces AI token overhead
|
|
313
|
+
"""
|
|
314
|
+
file_paths = args.get("file_paths", [])
|
|
315
|
+
|
|
316
|
+
if not file_paths:
|
|
317
|
+
return {"error": "file_paths required"}
|
|
318
|
+
|
|
319
|
+
if len(file_paths) > 10:
|
|
320
|
+
return {"error": "Maximum 10 files per batch", "requested": len(file_paths)}
|
|
321
|
+
|
|
322
|
+
results = []
|
|
323
|
+
for file_path in file_paths:
|
|
324
|
+
result = self._read_file({"file_path": file_path})
|
|
325
|
+
results.append({
|
|
326
|
+
"path": file_path,
|
|
327
|
+
"content": result.get("content", ""),
|
|
328
|
+
"lines": result.get("lines", 0),
|
|
329
|
+
"error": result.get("error"),
|
|
330
|
+
})
|
|
331
|
+
|
|
332
|
+
# Summary stats
|
|
333
|
+
successful = sum(1 for r in results if not r.get("error"))
|
|
334
|
+
total_lines = sum(r.get("lines", 0) for r in results)
|
|
335
|
+
|
|
336
|
+
return {
|
|
337
|
+
"files": results,
|
|
338
|
+
"count": len(results),
|
|
339
|
+
"successful": successful,
|
|
340
|
+
"total_lines": total_lines,
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
def _search_files(self, args: dict) -> dict:
|
|
344
|
+
"""Search for pattern in files."""
|
|
345
|
+
pattern = args.get("pattern", "")
|
|
346
|
+
max_results = args.get("max_results", 100)
|
|
347
|
+
search_path = args.get("path", ".")
|
|
348
|
+
file_pattern = args.get("file_pattern")
|
|
349
|
+
|
|
350
|
+
if not pattern:
|
|
351
|
+
return {"error": "pattern required"}
|
|
352
|
+
|
|
353
|
+
matches = []
|
|
354
|
+
|
|
355
|
+
try:
|
|
356
|
+
regex = re.compile(pattern, re.IGNORECASE)
|
|
357
|
+
except re.error:
|
|
358
|
+
# Treat as literal string
|
|
359
|
+
regex = re.compile(re.escape(pattern), re.IGNORECASE)
|
|
360
|
+
|
|
361
|
+
# Resolve search directory
|
|
362
|
+
search_root = self.project_root / search_path
|
|
363
|
+
if not search_root.exists():
|
|
364
|
+
search_root = self.project_root
|
|
365
|
+
|
|
366
|
+
for root, dirs, filenames in os.walk(search_root):
|
|
367
|
+
dirs[:] = [d for d in dirs if not self._should_ignore(d)]
|
|
368
|
+
|
|
369
|
+
for filename in filenames:
|
|
370
|
+
if self._should_ignore(filename):
|
|
371
|
+
continue
|
|
372
|
+
|
|
373
|
+
# Apply file pattern filter if specified
|
|
374
|
+
if file_pattern and not fnmatch.fnmatch(filename, file_pattern):
|
|
375
|
+
continue
|
|
376
|
+
|
|
377
|
+
full_path = Path(root) / filename
|
|
378
|
+
|
|
379
|
+
# Only search text files
|
|
380
|
+
ext = full_path.suffix.lower()
|
|
381
|
+
if ext not in {".py", ".js", ".ts", ".jsx", ".tsx", ".json", ".yaml",
|
|
382
|
+
".yml", ".md", ".txt", ".html", ".css", ".scss",
|
|
383
|
+
".java", ".kt", ".go", ".rs", ".c", ".cpp", ".h",
|
|
384
|
+
".rb", ".php", ".swift", ".sql", ".sh", ".toml"}:
|
|
385
|
+
continue
|
|
386
|
+
|
|
387
|
+
try:
|
|
388
|
+
content = full_path.read_text(encoding="utf-8", errors="replace")
|
|
389
|
+
for i, line in enumerate(content.splitlines(), 1):
|
|
390
|
+
if regex.search(line):
|
|
391
|
+
try:
|
|
392
|
+
rel_path = str(full_path.relative_to(self.project_root))
|
|
393
|
+
except ValueError:
|
|
394
|
+
continue
|
|
395
|
+
|
|
396
|
+
matches.append({
|
|
397
|
+
"file": rel_path,
|
|
398
|
+
"line": i,
|
|
399
|
+
"content": line.strip()[:200],
|
|
400
|
+
})
|
|
401
|
+
|
|
402
|
+
if len(matches) >= max_results:
|
|
403
|
+
return {"matches": matches, "count": len(matches)}
|
|
404
|
+
except Exception:
|
|
405
|
+
continue
|
|
406
|
+
|
|
407
|
+
return {"matches": matches, "count": len(matches)}
|
|
408
|
+
|
|
409
|
+
# Track background indexing state
|
|
410
|
+
_indexing_in_progress = False
|
|
411
|
+
_index_result = None
|
|
412
|
+
|
|
413
|
+
def _search_code(self, args: dict) -> dict:
|
|
414
|
+
"""Search codebase using BM25 + Knowledge Graph retriever."""
|
|
415
|
+
query = args.get("query", "")
|
|
416
|
+
hops = args.get("hops", 1)
|
|
417
|
+
max_chunks = args.get("max_chunks", 10)
|
|
418
|
+
|
|
419
|
+
if not query:
|
|
420
|
+
return {"error": "query required"}
|
|
421
|
+
|
|
422
|
+
try:
|
|
423
|
+
# Construct the correct index path (.tarang/index/)
|
|
424
|
+
index_path = Path(self.project_root) / ".tarang" / "index"
|
|
425
|
+
retriever = create_retriever(index_path)
|
|
426
|
+
if retriever is None:
|
|
427
|
+
# Index not found - start background indexing
|
|
428
|
+
return self._handle_missing_index(query)
|
|
429
|
+
|
|
430
|
+
result = retriever.retrieve(query=query, hops=hops, max_chunks=max_chunks)
|
|
431
|
+
|
|
432
|
+
# Format chunks for response (result is a RetrievalResult dataclass)
|
|
433
|
+
chunks = []
|
|
434
|
+
for chunk in result.chunks:
|
|
435
|
+
chunks.append({
|
|
436
|
+
"id": chunk.id,
|
|
437
|
+
"file": chunk.file,
|
|
438
|
+
"name": chunk.name,
|
|
439
|
+
"type": chunk.type,
|
|
440
|
+
"content": chunk.content[:2000] if chunk.content else "", # Limit content size
|
|
441
|
+
"line_start": chunk.line_start,
|
|
442
|
+
"signature": chunk.signature or "",
|
|
443
|
+
})
|
|
444
|
+
|
|
445
|
+
return {
|
|
446
|
+
"success": True,
|
|
447
|
+
"chunks": chunks,
|
|
448
|
+
"signatures": result.signatures,
|
|
449
|
+
"graph": result.graph_context,
|
|
450
|
+
"indexed": True,
|
|
451
|
+
}
|
|
452
|
+
except Exception as e:
|
|
453
|
+
logger.exception("search_code error")
|
|
454
|
+
return {"error": f"Search failed: {e}", "indexed": True}
|
|
455
|
+
|
|
456
|
+
def _handle_missing_index(self, query: str) -> dict:
|
|
457
|
+
"""Handle missing index by building in background."""
|
|
458
|
+
import threading
|
|
459
|
+
from tarang.context import ProjectIndexer
|
|
460
|
+
|
|
461
|
+
# Check if indexing already in progress
|
|
462
|
+
if self._indexing_in_progress:
|
|
463
|
+
return {
|
|
464
|
+
"error": "Index is being built in background. Please use search_files or read_file for now.",
|
|
465
|
+
"indexed": False,
|
|
466
|
+
"indexing": True,
|
|
467
|
+
}
|
|
468
|
+
|
|
469
|
+
# Start background indexing
|
|
470
|
+
def build_index():
|
|
471
|
+
try:
|
|
472
|
+
LocalToolExecutor._indexing_in_progress = True
|
|
473
|
+
indexer = ProjectIndexer(self.project_root)
|
|
474
|
+
result = indexer.build(force=False)
|
|
475
|
+
LocalToolExecutor._index_result = result
|
|
476
|
+
logger.info(f"Background indexing complete: {result.files_indexed} files, {result.chunks_created} chunks")
|
|
477
|
+
except Exception as e:
|
|
478
|
+
logger.error(f"Background indexing failed: {e}")
|
|
479
|
+
LocalToolExecutor._index_result = {"error": str(e)}
|
|
480
|
+
finally:
|
|
481
|
+
LocalToolExecutor._indexing_in_progress = False
|
|
482
|
+
|
|
483
|
+
thread = threading.Thread(target=build_index, daemon=True)
|
|
484
|
+
thread.start()
|
|
485
|
+
|
|
486
|
+
return {
|
|
487
|
+
"error": "Index not found. Building index in background... Use search_files or read_file for now, then retry search_code.",
|
|
488
|
+
"indexed": False,
|
|
489
|
+
"indexing": True,
|
|
490
|
+
"hint": f"Alternative: use search_files with pattern matching for '{query[:30]}'",
|
|
491
|
+
}
|
|
492
|
+
|
|
493
|
+
def _get_file_info(self, args: dict) -> dict:
|
|
494
|
+
"""Get metadata about a file."""
|
|
495
|
+
file_path = args.get("file_path", "")
|
|
496
|
+
|
|
497
|
+
if not file_path:
|
|
498
|
+
return {"error": "file_path required"}
|
|
499
|
+
|
|
500
|
+
target = self.project_root / file_path
|
|
501
|
+
|
|
502
|
+
if not target.exists():
|
|
503
|
+
return {"exists": False, "file_path": file_path}
|
|
504
|
+
|
|
505
|
+
try:
|
|
506
|
+
stat = target.stat()
|
|
507
|
+
return {
|
|
508
|
+
"exists": True,
|
|
509
|
+
"file_path": file_path,
|
|
510
|
+
"size": stat.st_size,
|
|
511
|
+
"modified": stat.st_mtime,
|
|
512
|
+
"is_directory": target.is_dir(),
|
|
513
|
+
"is_file": target.is_file(),
|
|
514
|
+
}
|
|
515
|
+
except Exception as e:
|
|
516
|
+
return {"error": str(e)}
|
|
517
|
+
|
|
518
|
+
def _write_file(self, args: dict) -> dict:
|
|
519
|
+
"""Write content to a file."""
|
|
520
|
+
file_path = args.get("file_path", "")
|
|
521
|
+
content = args.get("content", "")
|
|
522
|
+
|
|
523
|
+
if not file_path:
|
|
524
|
+
return {"error": "file_path required"}
|
|
525
|
+
|
|
526
|
+
target = self.project_root / file_path
|
|
527
|
+
|
|
528
|
+
try:
|
|
529
|
+
# Create parent directories
|
|
530
|
+
target.parent.mkdir(parents=True, exist_ok=True)
|
|
531
|
+
|
|
532
|
+
# Check if creating or updating
|
|
533
|
+
created = not target.exists()
|
|
534
|
+
|
|
535
|
+
# Write content
|
|
536
|
+
target.write_text(content, encoding="utf-8")
|
|
537
|
+
|
|
538
|
+
lines_written = content.count("\n") + (1 if content and not content.endswith("\n") else 0)
|
|
539
|
+
|
|
540
|
+
return {
|
|
541
|
+
"success": True,
|
|
542
|
+
"file_path": file_path,
|
|
543
|
+
"lines_written": lines_written,
|
|
544
|
+
"created": created,
|
|
545
|
+
}
|
|
546
|
+
except Exception as e:
|
|
547
|
+
return {"error": str(e), "success": False}
|
|
548
|
+
|
|
549
|
+
def _edit_file(self, args: dict) -> dict:
|
|
550
|
+
"""Edit a file by replacing text."""
|
|
551
|
+
file_path = args.get("file_path", "")
|
|
552
|
+
search = args.get("search", "")
|
|
553
|
+
replace = args.get("replace", "")
|
|
554
|
+
|
|
555
|
+
if not file_path:
|
|
556
|
+
return {"error": "file_path required"}
|
|
557
|
+
if not search:
|
|
558
|
+
return {"error": "search text required"}
|
|
559
|
+
|
|
560
|
+
# Pre-flight validation: Reject no-op edits (search === replace)
|
|
561
|
+
if search.strip() == replace.strip():
|
|
562
|
+
return {
|
|
563
|
+
"error": "STAGNATION ERROR: You attempted to replace text with identical text. "
|
|
564
|
+
"The file has NOT changed. This indicates a logic loop. "
|
|
565
|
+
"Please re-read the file to see its CURRENT state, "
|
|
566
|
+
"or provide your final_answer if the task is complete.",
|
|
567
|
+
"success": False,
|
|
568
|
+
"stagnation": True,
|
|
569
|
+
}
|
|
570
|
+
|
|
571
|
+
target = self.project_root / file_path
|
|
572
|
+
|
|
573
|
+
if not target.exists():
|
|
574
|
+
return {"error": f"File not found: {file_path}", "success": False}
|
|
575
|
+
|
|
576
|
+
try:
|
|
577
|
+
content = target.read_text(encoding="utf-8")
|
|
578
|
+
|
|
579
|
+
if search not in content:
|
|
580
|
+
return {
|
|
581
|
+
"error": f"Search text not found in {file_path}. "
|
|
582
|
+
"The file may have already been modified. "
|
|
583
|
+
"Use read_file to see the current content.",
|
|
584
|
+
"success": False,
|
|
585
|
+
"hint": "Make sure search text matches exactly including whitespace",
|
|
586
|
+
}
|
|
587
|
+
|
|
588
|
+
# Count occurrences and replace
|
|
589
|
+
count = content.count(search)
|
|
590
|
+
new_content = content.replace(search, replace)
|
|
591
|
+
|
|
592
|
+
target.write_text(new_content, encoding="utf-8")
|
|
593
|
+
|
|
594
|
+
return {
|
|
595
|
+
"success": True,
|
|
596
|
+
"file_path": file_path,
|
|
597
|
+
"replacements": count,
|
|
598
|
+
}
|
|
599
|
+
except Exception as e:
|
|
600
|
+
return {"error": str(e), "success": False}
|
|
601
|
+
|
|
602
|
+
def _delete_file(self, args: dict) -> dict:
|
|
603
|
+
"""Delete a file."""
|
|
604
|
+
file_path = args.get("file_path", "")
|
|
605
|
+
|
|
606
|
+
if not file_path:
|
|
607
|
+
return {"error": "file_path required"}
|
|
608
|
+
|
|
609
|
+
target = self.project_root / file_path
|
|
610
|
+
|
|
611
|
+
if not target.exists():
|
|
612
|
+
return {"error": f"File not found: {file_path}", "success": False}
|
|
613
|
+
|
|
614
|
+
try:
|
|
615
|
+
if target.is_dir():
|
|
616
|
+
import shutil
|
|
617
|
+
shutil.rmtree(target)
|
|
618
|
+
else:
|
|
619
|
+
target.unlink()
|
|
620
|
+
|
|
621
|
+
return {"success": True, "file_path": file_path}
|
|
622
|
+
except Exception as e:
|
|
623
|
+
return {"error": str(e), "success": False}
|
|
624
|
+
|
|
625
|
+
def _shell(self, args: dict) -> dict:
|
|
626
|
+
"""Execute a shell command with interruptibility support."""
|
|
627
|
+
command = args.get("command", "")
|
|
628
|
+
cwd = args.get("cwd") or "."
|
|
629
|
+
timeout = args.get("timeout", 60)
|
|
630
|
+
|
|
631
|
+
if not command:
|
|
632
|
+
return {"error": "command required"}
|
|
633
|
+
|
|
634
|
+
working_dir = self.project_root / cwd
|
|
635
|
+
|
|
636
|
+
try:
|
|
637
|
+
# Use Popen for interruptibility
|
|
638
|
+
process = subprocess.Popen(
|
|
639
|
+
command,
|
|
640
|
+
shell=True,
|
|
641
|
+
cwd=working_dir,
|
|
642
|
+
stdout=subprocess.PIPE,
|
|
643
|
+
stderr=subprocess.PIPE,
|
|
644
|
+
text=True,
|
|
645
|
+
)
|
|
646
|
+
|
|
647
|
+
# Register process for potential cancellation
|
|
648
|
+
self._set_process(process)
|
|
649
|
+
|
|
650
|
+
# Poll with timeout, checking cancellation
|
|
651
|
+
stdout_parts = []
|
|
652
|
+
stderr_parts = []
|
|
653
|
+
start_time = time.time()
|
|
654
|
+
|
|
655
|
+
while True:
|
|
656
|
+
# Check if cancelled
|
|
657
|
+
if self._is_cancelled():
|
|
658
|
+
process.terminate()
|
|
659
|
+
try:
|
|
660
|
+
process.wait(timeout=2)
|
|
661
|
+
except subprocess.TimeoutExpired:
|
|
662
|
+
process.kill()
|
|
663
|
+
return {"error": "Cancelled by user", "exit_code": -1, "cancelled": True}
|
|
664
|
+
|
|
665
|
+
# Check if process finished
|
|
666
|
+
retcode = process.poll()
|
|
667
|
+
if retcode is not None:
|
|
668
|
+
# Process finished - read remaining output
|
|
669
|
+
stdout, stderr = process.communicate()
|
|
670
|
+
stdout_parts.append(stdout)
|
|
671
|
+
stderr_parts.append(stderr)
|
|
672
|
+
break
|
|
673
|
+
|
|
674
|
+
# Check timeout
|
|
675
|
+
elapsed = time.time() - start_time
|
|
676
|
+
if elapsed > timeout:
|
|
677
|
+
process.terminate()
|
|
678
|
+
try:
|
|
679
|
+
process.wait(timeout=2)
|
|
680
|
+
except subprocess.TimeoutExpired:
|
|
681
|
+
process.kill()
|
|
682
|
+
return {"error": f"Command timed out after {timeout}s", "exit_code": -1}
|
|
683
|
+
|
|
684
|
+
# Wait a bit before next poll
|
|
685
|
+
time.sleep(0.1)
|
|
686
|
+
|
|
687
|
+
# Clear process reference
|
|
688
|
+
self._set_process(None)
|
|
689
|
+
|
|
690
|
+
stdout_full = "".join(stdout_parts)
|
|
691
|
+
stderr_full = "".join(stderr_parts)
|
|
692
|
+
|
|
693
|
+
return {
|
|
694
|
+
"exit_code": retcode,
|
|
695
|
+
"stdout": stdout_full[:5000] if stdout_full else "",
|
|
696
|
+
"stderr": stderr_full[:2000] if stderr_full else "",
|
|
697
|
+
}
|
|
698
|
+
|
|
699
|
+
except Exception as e:
|
|
700
|
+
return {"error": str(e), "exit_code": -1}
|
|
701
|
+
|
|
702
|
+
def _should_ignore(self, name: str) -> bool:
|
|
703
|
+
"""Check if file/directory should be ignored."""
|
|
704
|
+
for pattern in self.IGNORE_PATTERNS:
|
|
705
|
+
if fnmatch.fnmatch(name, pattern):
|
|
706
|
+
return True
|
|
707
|
+
return False
|
|
708
|
+
|
|
709
|
+
# ========================================================================
|
|
710
|
+
# Validation Tools
|
|
711
|
+
# ========================================================================
|
|
712
|
+
|
|
713
|
+
def _validate_file(self, args: dict) -> dict:
|
|
714
|
+
"""
|
|
715
|
+
Validate that a file exists and contains expected patterns.
|
|
716
|
+
|
|
717
|
+
Args:
|
|
718
|
+
path: Path to file to validate
|
|
719
|
+
patterns: List of patterns that should exist in the file
|
|
720
|
+
"""
|
|
721
|
+
path = args.get("path", "")
|
|
722
|
+
patterns = args.get("patterns", [])
|
|
723
|
+
|
|
724
|
+
if not path:
|
|
725
|
+
return {"error": "path required", "valid": False}
|
|
726
|
+
|
|
727
|
+
target = self.project_root / path
|
|
728
|
+
|
|
729
|
+
# Check file exists
|
|
730
|
+
if not target.exists():
|
|
731
|
+
return {
|
|
732
|
+
"valid": False,
|
|
733
|
+
"exists": False,
|
|
734
|
+
"path": path,
|
|
735
|
+
"message": f"File not found: {path}",
|
|
736
|
+
}
|
|
737
|
+
|
|
738
|
+
if not target.is_file():
|
|
739
|
+
return {
|
|
740
|
+
"valid": False,
|
|
741
|
+
"exists": True,
|
|
742
|
+
"is_file": False,
|
|
743
|
+
"path": path,
|
|
744
|
+
"message": f"Path is not a file: {path}",
|
|
745
|
+
}
|
|
746
|
+
|
|
747
|
+
# If no patterns, just confirm existence
|
|
748
|
+
if not patterns:
|
|
749
|
+
return {
|
|
750
|
+
"valid": True,
|
|
751
|
+
"exists": True,
|
|
752
|
+
"path": path,
|
|
753
|
+
"message": f"File exists: {path}",
|
|
754
|
+
}
|
|
755
|
+
|
|
756
|
+
# Check for patterns in content
|
|
757
|
+
try:
|
|
758
|
+
content = target.read_text(encoding="utf-8", errors="replace")
|
|
759
|
+
found_patterns = []
|
|
760
|
+
missing_patterns = []
|
|
761
|
+
|
|
762
|
+
for pattern in patterns:
|
|
763
|
+
if pattern in content:
|
|
764
|
+
found_patterns.append(pattern)
|
|
765
|
+
else:
|
|
766
|
+
missing_patterns.append(pattern)
|
|
767
|
+
|
|
768
|
+
valid = len(missing_patterns) == 0
|
|
769
|
+
|
|
770
|
+
return {
|
|
771
|
+
"valid": valid,
|
|
772
|
+
"exists": True,
|
|
773
|
+
"path": path,
|
|
774
|
+
"found_patterns": found_patterns,
|
|
775
|
+
"missing_patterns": missing_patterns,
|
|
776
|
+
"message": "All patterns found" if valid else f"Missing patterns: {missing_patterns}",
|
|
777
|
+
}
|
|
778
|
+
except Exception as e:
|
|
779
|
+
return {
|
|
780
|
+
"valid": False,
|
|
781
|
+
"exists": True,
|
|
782
|
+
"path": path,
|
|
783
|
+
"error": str(e),
|
|
784
|
+
}
|
|
785
|
+
|
|
786
|
+
def _validate_build(self, args: dict) -> dict:
|
|
787
|
+
"""
|
|
788
|
+
Run a build/compile command and check for success.
|
|
789
|
+
|
|
790
|
+
Args:
|
|
791
|
+
command: Build command to run (e.g., "npm run build", "cargo build")
|
|
792
|
+
timeout: Command timeout in seconds (default 120)
|
|
793
|
+
"""
|
|
794
|
+
command = args.get("command", "")
|
|
795
|
+
timeout = args.get("timeout", 120)
|
|
796
|
+
|
|
797
|
+
if not command:
|
|
798
|
+
return {"error": "command required", "valid": False}
|
|
799
|
+
|
|
800
|
+
try:
|
|
801
|
+
result = subprocess.run(
|
|
802
|
+
command,
|
|
803
|
+
shell=True,
|
|
804
|
+
cwd=self.project_root,
|
|
805
|
+
capture_output=True,
|
|
806
|
+
text=True,
|
|
807
|
+
timeout=timeout,
|
|
808
|
+
)
|
|
809
|
+
|
|
810
|
+
success = result.returncode == 0
|
|
811
|
+
|
|
812
|
+
return {
|
|
813
|
+
"valid": success,
|
|
814
|
+
"exit_code": result.returncode,
|
|
815
|
+
"command": command,
|
|
816
|
+
"stdout": result.stdout[:3000] if result.stdout else "",
|
|
817
|
+
"stderr": result.stderr[:2000] if result.stderr else "",
|
|
818
|
+
"message": "Build passed" if success else f"Build failed with exit code {result.returncode}",
|
|
819
|
+
}
|
|
820
|
+
except subprocess.TimeoutExpired:
|
|
821
|
+
return {
|
|
822
|
+
"valid": False,
|
|
823
|
+
"exit_code": -1,
|
|
824
|
+
"command": command,
|
|
825
|
+
"message": f"Build timed out after {timeout}s",
|
|
826
|
+
}
|
|
827
|
+
except Exception as e:
|
|
828
|
+
return {
|
|
829
|
+
"valid": False,
|
|
830
|
+
"exit_code": -1,
|
|
831
|
+
"command": command,
|
|
832
|
+
"error": str(e),
|
|
833
|
+
}
|
|
834
|
+
|
|
835
|
+
def _validate_structure(self, args: dict) -> dict:
|
|
836
|
+
"""
|
|
837
|
+
Validate that expected files exist in the project.
|
|
838
|
+
|
|
839
|
+
Args:
|
|
840
|
+
expected_files: List of file paths that should exist
|
|
841
|
+
base_path: Base directory to check from (default ".")
|
|
842
|
+
"""
|
|
843
|
+
expected_files = args.get("expected_files", [])
|
|
844
|
+
base_path = args.get("base_path", ".")
|
|
845
|
+
|
|
846
|
+
if not expected_files:
|
|
847
|
+
return {"error": "expected_files required", "valid": False}
|
|
848
|
+
|
|
849
|
+
base = self.project_root / base_path
|
|
850
|
+
|
|
851
|
+
found_files = []
|
|
852
|
+
missing_files = []
|
|
853
|
+
|
|
854
|
+
for file_path in expected_files:
|
|
855
|
+
target = base / file_path
|
|
856
|
+
if target.exists():
|
|
857
|
+
found_files.append(file_path)
|
|
858
|
+
else:
|
|
859
|
+
missing_files.append(file_path)
|
|
860
|
+
|
|
861
|
+
valid = len(missing_files) == 0
|
|
862
|
+
|
|
863
|
+
return {
|
|
864
|
+
"valid": valid,
|
|
865
|
+
"found_files": found_files,
|
|
866
|
+
"missing_files": missing_files,
|
|
867
|
+
"total_expected": len(expected_files),
|
|
868
|
+
"total_found": len(found_files),
|
|
869
|
+
"message": "All expected files found" if valid else f"Missing files: {missing_files}",
|
|
870
|
+
}
|
|
871
|
+
|
|
872
|
+
def _lint_check(self, args: dict) -> dict:
|
|
873
|
+
"""
|
|
874
|
+
Run a linter to check code quality.
|
|
875
|
+
|
|
876
|
+
Args:
|
|
877
|
+
command: Lint command (auto-detected if empty)
|
|
878
|
+
file_path: Specific file to lint (optional)
|
|
879
|
+
"""
|
|
880
|
+
command = args.get("command", "")
|
|
881
|
+
file_path = args.get("file_path", "")
|
|
882
|
+
|
|
883
|
+
# Auto-detect lint command based on project type
|
|
884
|
+
if not command:
|
|
885
|
+
command = self._detect_lint_command()
|
|
886
|
+
if not command:
|
|
887
|
+
return {
|
|
888
|
+
"valid": True,
|
|
889
|
+
"skipped": True,
|
|
890
|
+
"message": "No linter detected for this project type",
|
|
891
|
+
}
|
|
892
|
+
|
|
893
|
+
# Add specific file to command if provided
|
|
894
|
+
if file_path:
|
|
895
|
+
command = f"{command} {file_path}"
|
|
896
|
+
|
|
897
|
+
try:
|
|
898
|
+
result = subprocess.run(
|
|
899
|
+
command,
|
|
900
|
+
shell=True,
|
|
901
|
+
cwd=self.project_root,
|
|
902
|
+
capture_output=True,
|
|
903
|
+
text=True,
|
|
904
|
+
timeout=60,
|
|
905
|
+
)
|
|
906
|
+
|
|
907
|
+
# Most linters return 0 for clean code
|
|
908
|
+
success = result.returncode == 0
|
|
909
|
+
|
|
910
|
+
return {
|
|
911
|
+
"valid": success,
|
|
912
|
+
"exit_code": result.returncode,
|
|
913
|
+
"command": command,
|
|
914
|
+
"stdout": result.stdout[:3000] if result.stdout else "",
|
|
915
|
+
"stderr": result.stderr[:2000] if result.stderr else "",
|
|
916
|
+
"message": "Lint passed" if success else "Lint errors found",
|
|
917
|
+
}
|
|
918
|
+
except subprocess.TimeoutExpired:
|
|
919
|
+
return {
|
|
920
|
+
"valid": False,
|
|
921
|
+
"exit_code": -1,
|
|
922
|
+
"command": command,
|
|
923
|
+
"message": "Lint command timed out",
|
|
924
|
+
}
|
|
925
|
+
except Exception as e:
|
|
926
|
+
return {
|
|
927
|
+
"valid": False,
|
|
928
|
+
"exit_code": -1,
|
|
929
|
+
"command": command,
|
|
930
|
+
"error": str(e),
|
|
931
|
+
}
|
|
932
|
+
|
|
933
|
+
def _detect_lint_command(self) -> str:
|
|
934
|
+
"""Auto-detect the appropriate lint command for the project."""
|
|
935
|
+
# Check for Node.js project
|
|
936
|
+
package_json = self.project_root / "package.json"
|
|
937
|
+
if package_json.exists():
|
|
938
|
+
try:
|
|
939
|
+
import json
|
|
940
|
+
with open(package_json) as f:
|
|
941
|
+
pkg = json.load(f)
|
|
942
|
+
scripts = pkg.get("scripts", {})
|
|
943
|
+
if "lint" in scripts:
|
|
944
|
+
return "npm run lint"
|
|
945
|
+
if "eslint" in scripts:
|
|
946
|
+
return "npm run eslint"
|
|
947
|
+
except Exception:
|
|
948
|
+
pass
|
|
949
|
+
# Check for eslint config
|
|
950
|
+
eslint_files = ["eslint.config.js", ".eslintrc", ".eslintrc.js", ".eslintrc.json"]
|
|
951
|
+
for f in eslint_files:
|
|
952
|
+
if (self.project_root / f).exists():
|
|
953
|
+
return "npx eslint ."
|
|
954
|
+
|
|
955
|
+
# Check for Python project
|
|
956
|
+
pyproject = self.project_root / "pyproject.toml"
|
|
957
|
+
if pyproject.exists():
|
|
958
|
+
# Check for ruff or flake8 in pyproject.toml
|
|
959
|
+
try:
|
|
960
|
+
content = pyproject.read_text()
|
|
961
|
+
if "ruff" in content:
|
|
962
|
+
return "ruff check ."
|
|
963
|
+
if "flake8" in content:
|
|
964
|
+
return "flake8 ."
|
|
965
|
+
except Exception:
|
|
966
|
+
pass
|
|
967
|
+
|
|
968
|
+
# Check for Rust project
|
|
969
|
+
if (self.project_root / "Cargo.toml").exists():
|
|
970
|
+
return "cargo clippy"
|
|
971
|
+
|
|
972
|
+
# Check for Go project
|
|
973
|
+
if (self.project_root / "go.mod").exists():
|
|
974
|
+
return "go vet ./..."
|
|
975
|
+
|
|
976
|
+
return ""
|
|
977
|
+
|
|
978
|
+
|
|
979
|
+
class TarangStreamClient:
|
|
980
|
+
"""
|
|
981
|
+
SSE + REST callback client for Tarang backend.
|
|
982
|
+
|
|
983
|
+
Usage:
|
|
984
|
+
client = TarangStreamClient(
|
|
985
|
+
base_url="https://backend.example.com",
|
|
986
|
+
token="...",
|
|
987
|
+
openrouter_key="...",
|
|
988
|
+
project_root="/path/to/project",
|
|
989
|
+
)
|
|
990
|
+
|
|
991
|
+
async for event in client.execute(instruction, context):
|
|
992
|
+
if event.type == EventType.CHANGE:
|
|
993
|
+
change = FileChange.from_dict(event.data)
|
|
994
|
+
# Apply change locally
|
|
995
|
+
"""
|
|
996
|
+
|
|
997
|
+
DEFAULT_BASE_URL = "https://tarang-backend-intl-web-app-production.up.railway.app"
|
|
998
|
+
|
|
999
|
+
def __init__(
|
|
1000
|
+
self,
|
|
1001
|
+
base_url: Optional[str] = None,
|
|
1002
|
+
token: Optional[str] = None,
|
|
1003
|
+
openrouter_key: Optional[str] = None,
|
|
1004
|
+
project_root: Optional[str] = None,
|
|
1005
|
+
timeout: float = 300.0, # 5 minutes for long operations
|
|
1006
|
+
on_tool_execute: Optional[Callable[[str, dict], dict]] = None,
|
|
1007
|
+
verbose: bool = False,
|
|
1008
|
+
on_input_start: Optional[Callable[[], None]] = None,
|
|
1009
|
+
on_input_end: Optional[Callable[[], None]] = None,
|
|
1010
|
+
):
|
|
1011
|
+
self.base_url = (base_url or self.DEFAULT_BASE_URL).rstrip("/")
|
|
1012
|
+
self.token = token
|
|
1013
|
+
self.openrouter_key = openrouter_key
|
|
1014
|
+
self.project_root = project_root or os.getcwd()
|
|
1015
|
+
self.timeout = timeout
|
|
1016
|
+
self.verbose = verbose
|
|
1017
|
+
self.current_task_id: Optional[str] = None
|
|
1018
|
+
|
|
1019
|
+
# Callbacks for pausing keyboard monitor during prompts
|
|
1020
|
+
self._on_input_start = on_input_start or (lambda: None)
|
|
1021
|
+
self._on_input_end = on_input_end or (lambda: None)
|
|
1022
|
+
|
|
1023
|
+
# Cancellation flag - checked by execute loop
|
|
1024
|
+
self._cancelled = False
|
|
1025
|
+
# Current shell process - can be interrupted
|
|
1026
|
+
self._shell_process: Optional[subprocess.Popen] = None
|
|
1027
|
+
|
|
1028
|
+
# Rich output formatter for consistent display
|
|
1029
|
+
self.console = Console()
|
|
1030
|
+
self.formatter = OutputFormatter(self.console, verbose=verbose)
|
|
1031
|
+
|
|
1032
|
+
# Session-level approval settings
|
|
1033
|
+
self._approve_all = False # Approve all operations for this session
|
|
1034
|
+
self._approved_tools: set = set() # Approved tool types (e.g., "write_file", "edit_file")
|
|
1035
|
+
|
|
1036
|
+
# Tool executor - can be overridden
|
|
1037
|
+
if on_tool_execute:
|
|
1038
|
+
self._execute_tool = on_tool_execute
|
|
1039
|
+
else:
|
|
1040
|
+
self._tool_executor = LocalToolExecutor(
|
|
1041
|
+
self.project_root,
|
|
1042
|
+
is_cancelled=lambda: self._cancelled,
|
|
1043
|
+
set_process=self._set_shell_process,
|
|
1044
|
+
)
|
|
1045
|
+
self._execute_tool = self._tool_executor.execute
|
|
1046
|
+
|
|
1047
|
+
def _set_shell_process(self, process: Optional[subprocess.Popen]):
|
|
1048
|
+
"""Track current shell process for potential cancellation."""
|
|
1049
|
+
self._shell_process = process
|
|
1050
|
+
|
|
1051
|
+
async def execute(
|
|
1052
|
+
self,
|
|
1053
|
+
instruction: str,
|
|
1054
|
+
context: ProjectContext,
|
|
1055
|
+
model: Optional[str] = None,
|
|
1056
|
+
) -> AsyncGenerator[StreamEvent, None]:
|
|
1057
|
+
"""
|
|
1058
|
+
Execute instruction with SSE streaming and REST callbacks.
|
|
1059
|
+
|
|
1060
|
+
Args:
|
|
1061
|
+
instruction: User instruction
|
|
1062
|
+
context: Project context collected locally
|
|
1063
|
+
model: Optional model override
|
|
1064
|
+
|
|
1065
|
+
Yields:
|
|
1066
|
+
StreamEvent objects
|
|
1067
|
+
"""
|
|
1068
|
+
# Reset cancellation flag for new execution
|
|
1069
|
+
self._cancelled = False
|
|
1070
|
+
|
|
1071
|
+
if not self.token:
|
|
1072
|
+
yield StreamEvent(
|
|
1073
|
+
type=EventType.ERROR,
|
|
1074
|
+
data={"message": "Not authenticated. Run 'tarang login' first."},
|
|
1075
|
+
)
|
|
1076
|
+
return
|
|
1077
|
+
|
|
1078
|
+
if not self.openrouter_key:
|
|
1079
|
+
yield StreamEvent(
|
|
1080
|
+
type=EventType.ERROR,
|
|
1081
|
+
data={"message": "OpenRouter key not set. Run 'tarang config --openrouter-key KEY'"},
|
|
1082
|
+
)
|
|
1083
|
+
return
|
|
1084
|
+
|
|
1085
|
+
url = f"{self.base_url}/api/execute"
|
|
1086
|
+
|
|
1087
|
+
headers = {
|
|
1088
|
+
"Authorization": f"Bearer {self.token}",
|
|
1089
|
+
"X-OpenRouter-Key": self.openrouter_key,
|
|
1090
|
+
"Accept": "text/event-stream",
|
|
1091
|
+
"Content-Type": "application/json",
|
|
1092
|
+
}
|
|
1093
|
+
|
|
1094
|
+
body = {
|
|
1095
|
+
"instruction": instruction,
|
|
1096
|
+
"context": context.to_dict(),
|
|
1097
|
+
}
|
|
1098
|
+
if model:
|
|
1099
|
+
body["model"] = model
|
|
1100
|
+
|
|
1101
|
+
async with httpx.AsyncClient(timeout=self.timeout) as client:
|
|
1102
|
+
try:
|
|
1103
|
+
async with client.stream(
|
|
1104
|
+
"POST",
|
|
1105
|
+
url,
|
|
1106
|
+
headers=headers,
|
|
1107
|
+
json=body,
|
|
1108
|
+
) as response:
|
|
1109
|
+
if response.status_code == 401:
|
|
1110
|
+
yield StreamEvent(
|
|
1111
|
+
type=EventType.ERROR,
|
|
1112
|
+
data={"message": "Authentication failed. Run 'tarang login' again."},
|
|
1113
|
+
)
|
|
1114
|
+
return
|
|
1115
|
+
|
|
1116
|
+
if response.status_code != 200:
|
|
1117
|
+
text = await response.aread()
|
|
1118
|
+
yield StreamEvent(
|
|
1119
|
+
type=EventType.ERROR,
|
|
1120
|
+
data={"message": f"Request failed: {response.status_code} - {text.decode()}"},
|
|
1121
|
+
)
|
|
1122
|
+
return
|
|
1123
|
+
|
|
1124
|
+
# Get task ID from header
|
|
1125
|
+
self.current_task_id = response.headers.get("X-Task-ID")
|
|
1126
|
+
|
|
1127
|
+
# Parse SSE stream
|
|
1128
|
+
current_event = None
|
|
1129
|
+
current_data = []
|
|
1130
|
+
|
|
1131
|
+
async for line in response.aiter_lines():
|
|
1132
|
+
# Check cancellation flag
|
|
1133
|
+
if self._cancelled:
|
|
1134
|
+
yield StreamEvent(
|
|
1135
|
+
type=EventType.STATUS,
|
|
1136
|
+
data={"message": "Cancelled", "cancelled": True},
|
|
1137
|
+
)
|
|
1138
|
+
return
|
|
1139
|
+
|
|
1140
|
+
line = line.strip()
|
|
1141
|
+
|
|
1142
|
+
if not line:
|
|
1143
|
+
# Empty line = end of event
|
|
1144
|
+
if current_event and current_data:
|
|
1145
|
+
data = "\n".join(current_data)
|
|
1146
|
+
event = StreamEvent.from_sse(current_event, data)
|
|
1147
|
+
|
|
1148
|
+
# Handle tool requests (both legacy and new event names)
|
|
1149
|
+
if event.type in (EventType.TOOL_REQUEST, EventType.TOOL_CALL):
|
|
1150
|
+
await self._handle_tool_request(client, event.data)
|
|
1151
|
+
else:
|
|
1152
|
+
yield event
|
|
1153
|
+
|
|
1154
|
+
current_event = None
|
|
1155
|
+
current_data = []
|
|
1156
|
+
continue
|
|
1157
|
+
|
|
1158
|
+
if line.startswith("event:"):
|
|
1159
|
+
current_event = line[6:].strip()
|
|
1160
|
+
elif line.startswith("data:"):
|
|
1161
|
+
current_data.append(line[5:].strip())
|
|
1162
|
+
|
|
1163
|
+
# Handle final event if no trailing newline
|
|
1164
|
+
if current_event and current_data:
|
|
1165
|
+
data = "\n".join(current_data)
|
|
1166
|
+
event = StreamEvent.from_sse(current_event, data)
|
|
1167
|
+
if event.type in (EventType.TOOL_REQUEST, EventType.TOOL_CALL):
|
|
1168
|
+
await self._handle_tool_request(client, event.data)
|
|
1169
|
+
else:
|
|
1170
|
+
yield event
|
|
1171
|
+
|
|
1172
|
+
except httpx.TimeoutException:
|
|
1173
|
+
yield StreamEvent(
|
|
1174
|
+
type=EventType.ERROR,
|
|
1175
|
+
data={"message": "Request timed out. Try a simpler instruction."},
|
|
1176
|
+
)
|
|
1177
|
+
except httpx.ConnectError as e:
|
|
1178
|
+
yield StreamEvent(
|
|
1179
|
+
type=EventType.ERROR,
|
|
1180
|
+
data={"message": f"Connection failed: {e}"},
|
|
1181
|
+
)
|
|
1182
|
+
except Exception as e:
|
|
1183
|
+
logger.exception("Stream error")
|
|
1184
|
+
yield StreamEvent(
|
|
1185
|
+
type=EventType.ERROR,
|
|
1186
|
+
data={"message": f"Stream error: {e}"},
|
|
1187
|
+
)
|
|
1188
|
+
|
|
1189
|
+
async def _handle_tool_request(self, client: httpx.AsyncClient, data: dict) -> None:
|
|
1190
|
+
"""Execute tool locally and send result via callback."""
|
|
1191
|
+
# Support both old (request_id) and new (call_id) formats
|
|
1192
|
+
call_id = data.get("call_id") or data.get("request_id", "")
|
|
1193
|
+
tool = data.get("tool", "")
|
|
1194
|
+
args = data.get("args", {})
|
|
1195
|
+
require_approval = data.get("require_approval", False)
|
|
1196
|
+
description = data.get("description", "")
|
|
1197
|
+
|
|
1198
|
+
logger.info(f"[LOCAL] Executing tool: {tool} with args: {args} in {self.project_root}")
|
|
1199
|
+
|
|
1200
|
+
# Show progress indicator for read-only tools in compact mode
|
|
1201
|
+
if not require_approval:
|
|
1202
|
+
self.formatter.show_tool_progress(tool, args)
|
|
1203
|
+
|
|
1204
|
+
# Show tool request with Rich formatting (full preview for write operations)
|
|
1205
|
+
self.formatter.show_tool_request(tool, args, require_approval, description)
|
|
1206
|
+
|
|
1207
|
+
if require_approval:
|
|
1208
|
+
# Check if already approved for session or tool type
|
|
1209
|
+
if self._approve_all or tool in self._approved_tools:
|
|
1210
|
+
self.formatter.show_approval_status("auto_approved")
|
|
1211
|
+
else:
|
|
1212
|
+
# Pause keyboard monitor for clean input
|
|
1213
|
+
self._on_input_start()
|
|
1214
|
+
try:
|
|
1215
|
+
response = self.formatter.show_approval_prompt(tool, args)
|
|
1216
|
+
|
|
1217
|
+
if response == 'v':
|
|
1218
|
+
# Show full content/command
|
|
1219
|
+
self.formatter.show_view_content(tool, args)
|
|
1220
|
+
response = self.formatter.show_approval_prompt(tool, args, "Y/n/a(ll)/t(ool)")
|
|
1221
|
+
|
|
1222
|
+
if response == 'a':
|
|
1223
|
+
# Approve all for this session
|
|
1224
|
+
self._approve_all = True
|
|
1225
|
+
self.formatter.show_approval_status("approved_all")
|
|
1226
|
+
elif response == 't':
|
|
1227
|
+
# Approve all of this tool type
|
|
1228
|
+
self._approved_tools.add(tool)
|
|
1229
|
+
self.formatter.show_approval_status("approved_tool", tool)
|
|
1230
|
+
elif response == 'n':
|
|
1231
|
+
result = {"skipped": True, "message": "User rejected operation"}
|
|
1232
|
+
self.formatter.show_approval_status("skipped")
|
|
1233
|
+
# Send skipped result
|
|
1234
|
+
callback_url = f"{self.base_url}/api/callback"
|
|
1235
|
+
callback_body = {
|
|
1236
|
+
"task_id": self.current_task_id,
|
|
1237
|
+
"call_id": call_id,
|
|
1238
|
+
"result": result,
|
|
1239
|
+
}
|
|
1240
|
+
try:
|
|
1241
|
+
await client.post(callback_url, json=callback_body, headers={"Authorization": f"Bearer {self.token}"})
|
|
1242
|
+
except Exception:
|
|
1243
|
+
pass
|
|
1244
|
+
return
|
|
1245
|
+
except (EOFError, KeyboardInterrupt):
|
|
1246
|
+
self.formatter.show_approval_status("cancelled")
|
|
1247
|
+
return
|
|
1248
|
+
finally:
|
|
1249
|
+
# Resume keyboard monitor
|
|
1250
|
+
self._on_input_end()
|
|
1251
|
+
|
|
1252
|
+
# Track timing (after approval, measures execution + network round-trip)
|
|
1253
|
+
start_time = time.time()
|
|
1254
|
+
|
|
1255
|
+
# Execute tool locally
|
|
1256
|
+
result = self._execute_tool(tool, args)
|
|
1257
|
+
|
|
1258
|
+
# Send result via callback
|
|
1259
|
+
callback_url = f"{self.base_url}/api/callback"
|
|
1260
|
+
callback_body = {
|
|
1261
|
+
"task_id": self.current_task_id,
|
|
1262
|
+
"call_id": call_id,
|
|
1263
|
+
"result": result,
|
|
1264
|
+
}
|
|
1265
|
+
|
|
1266
|
+
logger.info(f"[LOCAL] Sending callback to {callback_url} for task {self.current_task_id}")
|
|
1267
|
+
|
|
1268
|
+
callback_ok = False
|
|
1269
|
+
try:
|
|
1270
|
+
resp = await client.post(
|
|
1271
|
+
callback_url,
|
|
1272
|
+
json=callback_body,
|
|
1273
|
+
headers={"Authorization": f"Bearer {self.token}"},
|
|
1274
|
+
)
|
|
1275
|
+
if resp.status_code != 200:
|
|
1276
|
+
logger.error(f"Callback failed: {resp.status_code} - {resp.text}")
|
|
1277
|
+
else:
|
|
1278
|
+
logger.info(f"[LOCAL] Callback sent successfully")
|
|
1279
|
+
callback_ok = True
|
|
1280
|
+
except Exception as e:
|
|
1281
|
+
logger.error(f"Callback error: {e}")
|
|
1282
|
+
|
|
1283
|
+
# Calculate duration (from tool_call received to callback complete)
|
|
1284
|
+
duration_s = round(time.time() - start_time, 1)
|
|
1285
|
+
|
|
1286
|
+
# Show result with Rich formatting (include full round-trip timing)
|
|
1287
|
+
self.formatter.show_tool_result(tool, args, result, duration_s)
|
|
1288
|
+
logger.info(f"[LOCAL] Tool result: {result.get('success', 'completed')} in {duration_s}s")
|
|
1289
|
+
|
|
1290
|
+
if not callback_ok:
|
|
1291
|
+
self.formatter.show_callback_status(False, "callback failed")
|
|
1292
|
+
|
|
1293
|
+
async def cancel(self) -> bool:
|
|
1294
|
+
"""Cancel the current task immediately."""
|
|
1295
|
+
# Set cancellation flag first - this breaks the execute loop
|
|
1296
|
+
self._cancelled = True
|
|
1297
|
+
|
|
1298
|
+
# Kill any running shell process
|
|
1299
|
+
if self._shell_process and self._shell_process.poll() is None:
|
|
1300
|
+
try:
|
|
1301
|
+
self._shell_process.terminate()
|
|
1302
|
+
self._shell_process.wait(timeout=2)
|
|
1303
|
+
except Exception:
|
|
1304
|
+
try:
|
|
1305
|
+
self._shell_process.kill()
|
|
1306
|
+
except Exception:
|
|
1307
|
+
pass
|
|
1308
|
+
self._shell_process = None
|
|
1309
|
+
|
|
1310
|
+
# Notify backend
|
|
1311
|
+
if not self.current_task_id:
|
|
1312
|
+
return True
|
|
1313
|
+
|
|
1314
|
+
url = f"{self.base_url}/api/cancel/{self.current_task_id}"
|
|
1315
|
+
|
|
1316
|
+
async with httpx.AsyncClient(timeout=10.0) as client:
|
|
1317
|
+
try:
|
|
1318
|
+
resp = await client.post(
|
|
1319
|
+
url,
|
|
1320
|
+
headers={"Authorization": f"Bearer {self.token}"},
|
|
1321
|
+
)
|
|
1322
|
+
return resp.status_code == 200
|
|
1323
|
+
except Exception as e:
|
|
1324
|
+
logger.error(f"Cancel error: {e}")
|
|
1325
|
+
return True # Still return True since we set the flag
|
|
1326
|
+
|
|
1327
|
+
|
|
1328
|
+
# Backward compatibility alias
|
|
1329
|
+
async def stream_execute(
|
|
1330
|
+
instruction: str,
|
|
1331
|
+
context: ProjectContext,
|
|
1332
|
+
token: str,
|
|
1333
|
+
openrouter_key: str,
|
|
1334
|
+
base_url: Optional[str] = None,
|
|
1335
|
+
model: Optional[str] = None,
|
|
1336
|
+
project_root: Optional[str] = None,
|
|
1337
|
+
) -> AsyncGenerator[StreamEvent, None]:
|
|
1338
|
+
"""Convenience function for streaming execution."""
|
|
1339
|
+
client = TarangStreamClient(
|
|
1340
|
+
base_url=base_url,
|
|
1341
|
+
token=token,
|
|
1342
|
+
openrouter_key=openrouter_key,
|
|
1343
|
+
project_root=project_root,
|
|
1344
|
+
)
|
|
1345
|
+
async for event in client.execute(instruction, context, model):
|
|
1346
|
+
yield event
|