luckyd-code 1.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- luckyd_code/__init__.py +54 -0
- luckyd_code/__main__.py +5 -0
- luckyd_code/_agent_loop.py +551 -0
- luckyd_code/_data_dir.py +73 -0
- luckyd_code/agent.py +38 -0
- luckyd_code/analytics/__init__.py +18 -0
- luckyd_code/analytics/reporter.py +195 -0
- luckyd_code/analytics/scanner.py +443 -0
- luckyd_code/analytics/smells.py +316 -0
- luckyd_code/analytics/trends.py +303 -0
- luckyd_code/api.py +473 -0
- luckyd_code/audit_daemon.py +845 -0
- luckyd_code/autonomous_fixer.py +473 -0
- luckyd_code/background.py +159 -0
- luckyd_code/backup.py +237 -0
- luckyd_code/brain/__init__.py +84 -0
- luckyd_code/brain/assembler.py +100 -0
- luckyd_code/brain/chunker.py +345 -0
- luckyd_code/brain/constants.py +73 -0
- luckyd_code/brain/embedder.py +163 -0
- luckyd_code/brain/graph.py +311 -0
- luckyd_code/brain/indexer.py +316 -0
- luckyd_code/brain/parser.py +140 -0
- luckyd_code/brain/retriever.py +234 -0
- luckyd_code/cli.py +894 -0
- luckyd_code/cli_commands/__init__.py +1 -0
- luckyd_code/cli_commands/audit.py +120 -0
- luckyd_code/cli_commands/background.py +83 -0
- luckyd_code/cli_commands/brain.py +87 -0
- luckyd_code/cli_commands/config.py +75 -0
- luckyd_code/cli_commands/dispatcher.py +695 -0
- luckyd_code/cli_commands/sessions.py +41 -0
- luckyd_code/cli_entry.py +147 -0
- luckyd_code/cli_utils.py +112 -0
- luckyd_code/config.py +205 -0
- luckyd_code/context.py +214 -0
- luckyd_code/cost_tracker.py +209 -0
- luckyd_code/error_reporter.py +508 -0
- luckyd_code/exceptions.py +39 -0
- luckyd_code/export.py +126 -0
- luckyd_code/feedback_analyzer.py +290 -0
- luckyd_code/file_watcher.py +258 -0
- luckyd_code/git/__init__.py +11 -0
- luckyd_code/git/auto_commit.py +157 -0
- luckyd_code/git/tools.py +85 -0
- luckyd_code/hooks.py +236 -0
- luckyd_code/indexer.py +280 -0
- luckyd_code/init.py +39 -0
- luckyd_code/keybindings.py +77 -0
- luckyd_code/log.py +55 -0
- luckyd_code/mcp/__init__.py +6 -0
- luckyd_code/mcp/client.py +184 -0
- luckyd_code/memory/__init__.py +19 -0
- luckyd_code/memory/manager.py +339 -0
- luckyd_code/metrics/__init__.py +5 -0
- luckyd_code/model_registry.py +131 -0
- luckyd_code/orchestrator.py +204 -0
- luckyd_code/permissions/__init__.py +1 -0
- luckyd_code/permissions/manager.py +103 -0
- luckyd_code/planner.py +361 -0
- luckyd_code/plugins.py +91 -0
- luckyd_code/py.typed +0 -0
- luckyd_code/retry.py +57 -0
- luckyd_code/router.py +417 -0
- luckyd_code/sandbox.py +156 -0
- luckyd_code/self_critique.py +2 -0
- luckyd_code/self_improve.py +274 -0
- luckyd_code/sessions.py +114 -0
- luckyd_code/settings.py +72 -0
- luckyd_code/skills/__init__.py +8 -0
- luckyd_code/skills/review.py +22 -0
- luckyd_code/skills/security.py +17 -0
- luckyd_code/tasks/__init__.py +1 -0
- luckyd_code/tasks/manager.py +102 -0
- luckyd_code/templates/icon-192.png +0 -0
- luckyd_code/templates/icon-512.png +0 -0
- luckyd_code/templates/index.html +1965 -0
- luckyd_code/templates/manifest.json +14 -0
- luckyd_code/templates/src/app.js +694 -0
- luckyd_code/templates/src/body.html +767 -0
- luckyd_code/templates/src/cdn.txt +2 -0
- luckyd_code/templates/src/style.css +474 -0
- luckyd_code/templates/sw.js +31 -0
- luckyd_code/templates/test.html +6 -0
- luckyd_code/themes.py +48 -0
- luckyd_code/tools/__init__.py +97 -0
- luckyd_code/tools/agent_tools.py +65 -0
- luckyd_code/tools/bash.py +360 -0
- luckyd_code/tools/brain_tools.py +137 -0
- luckyd_code/tools/browser.py +369 -0
- luckyd_code/tools/datetime_tool.py +34 -0
- luckyd_code/tools/dockerfile_gen.py +212 -0
- luckyd_code/tools/file_ops.py +381 -0
- luckyd_code/tools/game_gen.py +360 -0
- luckyd_code/tools/git_tools.py +130 -0
- luckyd_code/tools/git_worktree.py +63 -0
- luckyd_code/tools/path_validate.py +64 -0
- luckyd_code/tools/project_gen.py +187 -0
- luckyd_code/tools/readme_gen.py +227 -0
- luckyd_code/tools/registry.py +157 -0
- luckyd_code/tools/shell_detect.py +109 -0
- luckyd_code/tools/web.py +89 -0
- luckyd_code/tools/youtube.py +187 -0
- luckyd_code/tools_bridge.py +144 -0
- luckyd_code/undo.py +126 -0
- luckyd_code/update.py +60 -0
- luckyd_code/verify.py +360 -0
- luckyd_code/web_app.py +176 -0
- luckyd_code/web_routes/__init__.py +23 -0
- luckyd_code/web_routes/background.py +73 -0
- luckyd_code/web_routes/brain.py +109 -0
- luckyd_code/web_routes/cost.py +12 -0
- luckyd_code/web_routes/files.py +133 -0
- luckyd_code/web_routes/memories.py +94 -0
- luckyd_code/web_routes/misc.py +67 -0
- luckyd_code/web_routes/project.py +48 -0
- luckyd_code/web_routes/review.py +20 -0
- luckyd_code/web_routes/sessions.py +44 -0
- luckyd_code/web_routes/settings.py +43 -0
- luckyd_code/web_routes/static.py +70 -0
- luckyd_code/web_routes/update.py +19 -0
- luckyd_code/web_routes/ws.py +237 -0
- luckyd_code-1.2.2.dist-info/METADATA +297 -0
- luckyd_code-1.2.2.dist-info/RECORD +127 -0
- luckyd_code-1.2.2.dist-info/WHEEL +4 -0
- luckyd_code-1.2.2.dist-info/entry_points.txt +3 -0
- luckyd_code-1.2.2.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
"""AST-based code parser — extracts functions, classes, imports, and relationships."""
|
|
2
|
+
|
|
3
|
+
import ast
|
|
4
|
+
import os
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any, Optional
|
|
7
|
+
|
|
8
|
+
from .constants import should_skip
|
|
9
|
+
|
|
10
|
+
ParsedFile = dict[str, Any]
|
|
11
|
+
ParseResult = tuple[list[ParsedFile], dict[str, tuple[float, int]]]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _extract_calls(node: ast.FunctionDef) -> list[str]:
|
|
15
|
+
calls: list[str] = []
|
|
16
|
+
for child in ast.walk(node):
|
|
17
|
+
if isinstance(child, ast.Call) and isinstance(child.func, ast.Name):
|
|
18
|
+
calls.append(child.func.id)
|
|
19
|
+
elif isinstance(child, ast.Call) and isinstance(child.func, ast.Attribute):
|
|
20
|
+
calls.append(child.func.attr)
|
|
21
|
+
return list(set(calls))
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def parse_file(filepath: Path) -> ParsedFile:
|
|
25
|
+
result: ParsedFile = {
|
|
26
|
+
"module": str(filepath),
|
|
27
|
+
"classes": [],
|
|
28
|
+
"functions": [],
|
|
29
|
+
"imports": [],
|
|
30
|
+
"errors": [],
|
|
31
|
+
"size": 0,
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
source = filepath.read_text(encoding="utf-8", errors="replace")
|
|
36
|
+
except OSError as e:
|
|
37
|
+
result["errors"].append(str(e))
|
|
38
|
+
return result
|
|
39
|
+
|
|
40
|
+
result["size"] = len(source)
|
|
41
|
+
|
|
42
|
+
try:
|
|
43
|
+
tree = ast.parse(source, filename=str(filepath))
|
|
44
|
+
except SyntaxError as e:
|
|
45
|
+
result["errors"].append(f"SyntaxError: {e}")
|
|
46
|
+
return result
|
|
47
|
+
|
|
48
|
+
for node in ast.walk(tree):
|
|
49
|
+
if isinstance(node, ast.Import):
|
|
50
|
+
for alias in node.names:
|
|
51
|
+
result["imports"].append({
|
|
52
|
+
"module": alias.name,
|
|
53
|
+
"name": alias.asname or alias.name,
|
|
54
|
+
"alias": alias.asname,
|
|
55
|
+
})
|
|
56
|
+
elif isinstance(node, ast.ImportFrom):
|
|
57
|
+
module = node.module or ""
|
|
58
|
+
for alias in node.names:
|
|
59
|
+
result["imports"].append({
|
|
60
|
+
"module": module,
|
|
61
|
+
"name": alias.name,
|
|
62
|
+
"alias": alias.asname,
|
|
63
|
+
})
|
|
64
|
+
|
|
65
|
+
elif isinstance(node, ast.ClassDef):
|
|
66
|
+
cls_info: ParsedFile = {
|
|
67
|
+
"name": node.name,
|
|
68
|
+
"bases": [ast.dump(b) if isinstance(b, ast.Name) else "" for b in node.bases],
|
|
69
|
+
"base_names": [
|
|
70
|
+
b.id if isinstance(b, ast.Name) else
|
|
71
|
+
f"{b.value.id}.{b.attr}" if isinstance(b, ast.Attribute) and hasattr(b, 'value') and isinstance(b.value, ast.Name) else
|
|
72
|
+
str(ast.dump(b))
|
|
73
|
+
for b in node.bases
|
|
74
|
+
],
|
|
75
|
+
"methods": [],
|
|
76
|
+
"decorators": [ast.dump(d) for d in node.decorator_list],
|
|
77
|
+
"docstring": ast.get_docstring(node) or "",
|
|
78
|
+
"line": node.lineno,
|
|
79
|
+
"end_line": node.end_lineno or node.lineno,
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
for child in ast.iter_child_nodes(node):
|
|
83
|
+
if isinstance(child, ast.FunctionDef):
|
|
84
|
+
method_info: ParsedFile = {
|
|
85
|
+
"name": child.name,
|
|
86
|
+
"decorators": [ast.dump(d) for d in child.decorator_list],
|
|
87
|
+
"docstring": ast.get_docstring(child) or "",
|
|
88
|
+
"line": child.lineno,
|
|
89
|
+
"end_line": child.end_lineno or child.lineno,
|
|
90
|
+
"calls": _extract_calls(child),
|
|
91
|
+
}
|
|
92
|
+
cls_info["methods"].append(method_info)
|
|
93
|
+
|
|
94
|
+
result["classes"].append(cls_info)
|
|
95
|
+
|
|
96
|
+
elif isinstance(node, ast.FunctionDef):
|
|
97
|
+
func_info: ParsedFile = {
|
|
98
|
+
"name": node.name,
|
|
99
|
+
"decorators": [ast.dump(d) for d in node.decorator_list],
|
|
100
|
+
"docstring": ast.get_docstring(node) or "",
|
|
101
|
+
"line": node.lineno,
|
|
102
|
+
"end_line": node.end_lineno or node.lineno,
|
|
103
|
+
"calls": _extract_calls(node),
|
|
104
|
+
}
|
|
105
|
+
result["functions"].append(func_info)
|
|
106
|
+
|
|
107
|
+
return result
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def parse_project(project_root: str, file_mtimes: Optional[dict[str, tuple[float, int]]] = None) -> ParseResult:
|
|
111
|
+
root = Path(project_root).resolve()
|
|
112
|
+
results: list[ParsedFile] = []
|
|
113
|
+
new_mtimes: dict[str, tuple[float, int]] = {}
|
|
114
|
+
|
|
115
|
+
for dirpath, dirnames, filenames in os.walk(root):
|
|
116
|
+
dirnames[:] = [d for d in dirnames if not should_skip(d)]
|
|
117
|
+
|
|
118
|
+
for fname in filenames:
|
|
119
|
+
if not fname.endswith(".py"):
|
|
120
|
+
continue
|
|
121
|
+
|
|
122
|
+
fpath = Path(dirpath) / fname
|
|
123
|
+
try:
|
|
124
|
+
st = fpath.stat()
|
|
125
|
+
mtime = st.st_mtime
|
|
126
|
+
size = st.st_size
|
|
127
|
+
except OSError:
|
|
128
|
+
continue
|
|
129
|
+
|
|
130
|
+
new_mtimes[str(fpath)] = (mtime, size)
|
|
131
|
+
|
|
132
|
+
if file_mtimes and str(fpath) in file_mtimes:
|
|
133
|
+
old_mtime, old_size = file_mtimes[str(fpath)]
|
|
134
|
+
if old_mtime == mtime and old_size == size:
|
|
135
|
+
continue
|
|
136
|
+
|
|
137
|
+
parsed = parse_file(fpath)
|
|
138
|
+
results.append(parsed)
|
|
139
|
+
|
|
140
|
+
return results, new_mtimes
|
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
"""Retriever — semantic search over code chunks with fallback to substring search."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any, Optional
|
|
5
|
+
|
|
6
|
+
from ..log import get_logger
|
|
7
|
+
|
|
8
|
+
_RRF_K = 60 # standard RRF constant — higher = smoother rank blending
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class Retriever:
|
|
12
|
+
"""Searches indexed code chunks semantically, with fallback to substring search.
|
|
13
|
+
|
|
14
|
+
Search strategy (in order of quality):
|
|
15
|
+
1. RRF merge of vector + BM25 when both are available (best quality)
|
|
16
|
+
2. Vector-only when BM25 unavailable
|
|
17
|
+
3. BM25-only when vector unavailable
|
|
18
|
+
4. Graph keyword fallback with token-overlap scoring
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def __init__(self):
|
|
22
|
+
self._indexer = None
|
|
23
|
+
self._graph = None
|
|
24
|
+
self._bm25 = None
|
|
25
|
+
self._bm25_tokenized = None
|
|
26
|
+
self._bm25_chunk_count = 0
|
|
27
|
+
|
|
28
|
+
def _get_indexer(self):
|
|
29
|
+
if self._indexer is None:
|
|
30
|
+
from .indexer import VectorIndexer
|
|
31
|
+
|
|
32
|
+
idx = VectorIndexer()
|
|
33
|
+
idx.load()
|
|
34
|
+
self._indexer = idx
|
|
35
|
+
return self._indexer
|
|
36
|
+
|
|
37
|
+
def _get_graph(self):
|
|
38
|
+
if self._graph is None:
|
|
39
|
+
from .graph import KnowledgeGraph
|
|
40
|
+
|
|
41
|
+
g = KnowledgeGraph()
|
|
42
|
+
g.load()
|
|
43
|
+
self._graph = g
|
|
44
|
+
return self._graph
|
|
45
|
+
|
|
46
|
+
def search(
|
|
47
|
+
self,
|
|
48
|
+
query: str,
|
|
49
|
+
k: int = 10,
|
|
50
|
+
file_filter: Optional[str] = None,
|
|
51
|
+
min_score: float = 0.0,
|
|
52
|
+
) -> list[dict[str, Any]]:
|
|
53
|
+
indexer = self._get_indexer()
|
|
54
|
+
vec_results: list[dict[str, Any]] = []
|
|
55
|
+
bm25_results: list[dict[str, Any]] = []
|
|
56
|
+
|
|
57
|
+
if indexer.is_available:
|
|
58
|
+
vec_results = indexer.search(query, k=k)
|
|
59
|
+
if file_filter:
|
|
60
|
+
vec_results = [
|
|
61
|
+
r for r in vec_results
|
|
62
|
+
if file_filter in r.get("file_path", "")
|
|
63
|
+
]
|
|
64
|
+
|
|
65
|
+
bm25_results = self._bm25_search(query, k, file_filter)
|
|
66
|
+
|
|
67
|
+
# Best path: RRF merge when both sources have results
|
|
68
|
+
if vec_results and bm25_results:
|
|
69
|
+
merged = self._rrf_merge(vec_results, bm25_results, k=k)
|
|
70
|
+
if min_score > 0:
|
|
71
|
+
merged = [r for r in merged if r.get("score", 0) >= min_score]
|
|
72
|
+
if merged:
|
|
73
|
+
return merged
|
|
74
|
+
|
|
75
|
+
# Single-source fallback
|
|
76
|
+
for results in (vec_results, bm25_results):
|
|
77
|
+
if results:
|
|
78
|
+
if min_score > 0:
|
|
79
|
+
results = [r for r in results if r.get("score", 0) >= min_score]
|
|
80
|
+
if results:
|
|
81
|
+
return results
|
|
82
|
+
|
|
83
|
+
return self._fallback_search(query, k, file_filter)
|
|
84
|
+
|
|
85
|
+
def _rrf_merge(
|
|
86
|
+
self,
|
|
87
|
+
vec_results: list[dict[str, Any]],
|
|
88
|
+
bm25_results: list[dict[str, Any]],
|
|
89
|
+
k: int = 10,
|
|
90
|
+
) -> list[dict[str, Any]]:
|
|
91
|
+
"""Reciprocal Rank Fusion — combines two ranked lists without score normalisation.
|
|
92
|
+
|
|
93
|
+
Each chunk gets score = sum(1 / (_RRF_K + rank)) across lists it appears in.
|
|
94
|
+
Chunks that rank highly in BOTH lists bubble to the top.
|
|
95
|
+
"""
|
|
96
|
+
rrf_scores: dict[str, float] = {}
|
|
97
|
+
chunk_by_id: dict[str, dict[str, Any]] = {}
|
|
98
|
+
|
|
99
|
+
for rank, chunk in enumerate(vec_results):
|
|
100
|
+
cid = chunk.get("chunk_id", chunk.get("file_path", str(rank)))
|
|
101
|
+
rrf_scores[cid] = rrf_scores.get(cid, 0.0) + 1.0 / (_RRF_K + rank + 1)
|
|
102
|
+
chunk_by_id[cid] = chunk
|
|
103
|
+
|
|
104
|
+
for rank, chunk in enumerate(bm25_results):
|
|
105
|
+
cid = chunk.get("chunk_id", chunk.get("file_path", str(rank)))
|
|
106
|
+
rrf_scores[cid] = rrf_scores.get(cid, 0.0) + 1.0 / (_RRF_K + rank + 1)
|
|
107
|
+
chunk_by_id.setdefault(cid, chunk)
|
|
108
|
+
|
|
109
|
+
sorted_ids = sorted(rrf_scores, key=lambda c: -rrf_scores[c])
|
|
110
|
+
results = []
|
|
111
|
+
for cid in sorted_ids[:k]:
|
|
112
|
+
chunk = dict(chunk_by_id[cid])
|
|
113
|
+
chunk["score"] = round(rrf_scores[cid], 6)
|
|
114
|
+
results.append(chunk)
|
|
115
|
+
return results
|
|
116
|
+
|
|
117
|
+
def _bm25_search(
|
|
118
|
+
self,
|
|
119
|
+
query: str,
|
|
120
|
+
k: int,
|
|
121
|
+
file_filter: Optional[str],
|
|
122
|
+
) -> list[dict[str, Any]]:
|
|
123
|
+
try:
|
|
124
|
+
import rank_bm25
|
|
125
|
+
except ImportError:
|
|
126
|
+
return []
|
|
127
|
+
|
|
128
|
+
indexer = self._get_indexer()
|
|
129
|
+
if not indexer.chunks:
|
|
130
|
+
return []
|
|
131
|
+
|
|
132
|
+
try:
|
|
133
|
+
if self._bm25 is None or len(indexer.chunks) != self._bm25_chunk_count:
|
|
134
|
+
self._bm25_tokenized = [
|
|
135
|
+
c.get("content", "").lower().split()
|
|
136
|
+
for c in indexer.chunks
|
|
137
|
+
]
|
|
138
|
+
self._bm25 = rank_bm25.BM25Okapi(self._bm25_tokenized)
|
|
139
|
+
self._bm25_chunk_count = len(indexer.chunks)
|
|
140
|
+
|
|
141
|
+
tokenized_query = query.lower().split()
|
|
142
|
+
bm25_scores = self._bm25.get_scores(tokenized_query)
|
|
143
|
+
|
|
144
|
+
scored: list[tuple[int, float]] = []
|
|
145
|
+
for i, (chunk, bm25_score) in enumerate(zip(indexer.chunks, bm25_scores)):
|
|
146
|
+
combined = float(bm25_score)
|
|
147
|
+
if combined > 0:
|
|
148
|
+
scored.append((i, combined))
|
|
149
|
+
|
|
150
|
+
scored.sort(key=lambda x: -x[1])
|
|
151
|
+
|
|
152
|
+
results = []
|
|
153
|
+
for i, score in scored[:k]:
|
|
154
|
+
chunk = dict(indexer.chunks[i])
|
|
155
|
+
chunk["score"] = score
|
|
156
|
+
if file_filter and file_filter not in chunk.get("file_path", ""):
|
|
157
|
+
continue
|
|
158
|
+
results.append(chunk)
|
|
159
|
+
|
|
160
|
+
return results
|
|
161
|
+
except Exception as exc:
|
|
162
|
+
get_logger().warning("BM25 search failed: %s", exc)
|
|
163
|
+
return []
|
|
164
|
+
|
|
165
|
+
def _fallback_search(
|
|
166
|
+
self,
|
|
167
|
+
query: str,
|
|
168
|
+
k: int,
|
|
169
|
+
file_filter: Optional[str],
|
|
170
|
+
) -> list[dict[str, Any]]:
|
|
171
|
+
graph = self._get_graph()
|
|
172
|
+
if not graph.nodes:
|
|
173
|
+
return []
|
|
174
|
+
|
|
175
|
+
nodes = graph.search(query, max_results=k)
|
|
176
|
+
query_lower = query.lower()
|
|
177
|
+
query_tokens = set(query_lower.split())
|
|
178
|
+
results = []
|
|
179
|
+
for node in nodes:
|
|
180
|
+
if file_filter and file_filter not in node.get("file", ""):
|
|
181
|
+
continue
|
|
182
|
+
name = node.get("name", "").lower()
|
|
183
|
+
# Score by name overlap: exact > partial token > type-only
|
|
184
|
+
if name == query_lower:
|
|
185
|
+
score = 1.0
|
|
186
|
+
elif any(t in name or name in t for t in query_tokens if len(t) > 2):
|
|
187
|
+
score = 0.7
|
|
188
|
+
elif any(t in (node.get("type", "") or "").lower() for t in query_tokens):
|
|
189
|
+
score = 0.4
|
|
190
|
+
else:
|
|
191
|
+
score = 0.2
|
|
192
|
+
results.append({
|
|
193
|
+
"file_path": node.get("file", ""),
|
|
194
|
+
"chunk_id": f"{node.get('type', 'node')}:{node.get('name', '')}",
|
|
195
|
+
"start_line": node.get("line", 0),
|
|
196
|
+
"end_line": node.get("end_line", 0),
|
|
197
|
+
"type": node.get("type", "symbol"),
|
|
198
|
+
"name": node.get("name", ""),
|
|
199
|
+
"language": "python",
|
|
200
|
+
"content": f"{node.get('type', 'symbol')} {node.get('name', '')}",
|
|
201
|
+
"score": score,
|
|
202
|
+
})
|
|
203
|
+
|
|
204
|
+
results.sort(key=lambda r: -r["score"])
|
|
205
|
+
return results
|
|
206
|
+
|
|
207
|
+
def stats(self) -> dict[str, Any]:
|
|
208
|
+
indexer = self._get_indexer()
|
|
209
|
+
graph = self._get_graph()
|
|
210
|
+
|
|
211
|
+
info: dict[str, Any] = {
|
|
212
|
+
"vector": {
|
|
213
|
+
"available": indexer.is_available,
|
|
214
|
+
"chunks": indexer.stats.get("chunks", 0),
|
|
215
|
+
"files": indexer.stats.get("files", 0),
|
|
216
|
+
"languages": indexer.stats.get("languages", {}),
|
|
217
|
+
"last_indexed": indexer.stats.get("last_indexed", 0),
|
|
218
|
+
},
|
|
219
|
+
"graph": {
|
|
220
|
+
"nodes": graph.stats.get("node_count", 0),
|
|
221
|
+
"edges": graph.stats.get("edge_count", 0),
|
|
222
|
+
"files_parsed": graph.stats.get("files_parsed", 0),
|
|
223
|
+
},
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
if indexer.stats.get("last_indexed"):
|
|
227
|
+
try:
|
|
228
|
+
changed = len(indexer.get_changed_files(os.getcwd()))
|
|
229
|
+
if changed:
|
|
230
|
+
info["stale_files"] = changed
|
|
231
|
+
except Exception:
|
|
232
|
+
get_logger().warning("Failed to check for changed files", exc_info=True)
|
|
233
|
+
|
|
234
|
+
return info
|