aja-codeintel 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aja_codeintel-0.1.0.dist-info/METADATA +436 -0
- aja_codeintel-0.1.0.dist-info/RECORD +68 -0
- aja_codeintel-0.1.0.dist-info/WHEEL +5 -0
- aja_codeintel-0.1.0.dist-info/entry_points.txt +3 -0
- aja_codeintel-0.1.0.dist-info/licenses/LICENSE +21 -0
- aja_codeintel-0.1.0.dist-info/top_level.txt +1 -0
- codeintel_cli/__init__.py +1 -0
- codeintel_cli/__main__.py +4 -0
- codeintel_cli/cli.py +41 -0
- codeintel_cli/commands/__init__.py +1 -0
- codeintel_cli/commands/graph/__init__.py +18 -0
- codeintel_cli/commands/graph/deps_cmd.py +35 -0
- codeintel_cli/commands/graph/related_cmd.py +121 -0
- codeintel_cli/commands/graph/relsymbols_cmd.py +347 -0
- codeintel_cli/commands/graph/reverse_related_cmd.py +54 -0
- codeintel_cli/commands/nav/__init__.py +12 -0
- codeintel_cli/commands/nav/copy_cmd.py +101 -0
- codeintel_cli/commands/nav/open_cmd.py +18 -0
- codeintel_cli/commands/nav/where_cmd.py +21 -0
- codeintel_cli/commands/project/__init__.py +26 -0
- codeintel_cli/commands/project/context_cmd.py +326 -0
- codeintel_cli/commands/project/folder_cmd.py +51 -0
- codeintel_cli/commands/project/imports_cmd.py +90 -0
- codeintel_cli/commands/project/models_cmd.py +98 -0
- codeintel_cli/commands/project/modeltree_cmd.py +476 -0
- codeintel_cli/commands/project/new.py +0 -0
- codeintel_cli/commands/project/resolve_cmd.py +29 -0
- codeintel_cli/commands/project/scan_cmd.py +51 -0
- codeintel_cli/commands/project/servicemap_cmd.py +180 -0
- codeintel_cli/commands/project/tree_cmd.py +203 -0
- codeintel_cli/commands/project/version_cmd.py +14 -0
- codeintel_cli/context/java_context.py +180 -0
- codeintel_cli/context/java_rel.py +299 -0
- codeintel_cli/context/java_service.py +291 -0
- codeintel_cli/context/python_context.py +91 -0
- codeintel_cli/context/python_rel.py +251 -0
- codeintel_cli/context/python_service.py +205 -0
- codeintel_cli/core/fuzzy.py +72 -0
- codeintel_cli/core/opener.py +37 -0
- codeintel_cli/core/project.py +34 -0
- codeintel_cli/core/resolve_folder.py +68 -0
- codeintel_cli/core/resolve_model_target.py +92 -0
- codeintel_cli/core/resolve_target.py +53 -0
- codeintel_cli/core/timing.py +13 -0
- codeintel_cli/core/where.py +77 -0
- codeintel_cli/db/__init__.py +7 -0
- codeintel_cli/db/cache.py +224 -0
- codeintel_cli/db/operations.py +333 -0
- codeintel_cli/db/schema.py +102 -0
- codeintel_cli/errors.py +78 -0
- codeintel_cli/graph/__init__.py +1 -0
- codeintel_cli/graph/builder.py +149 -0
- codeintel_cli/graph/query.py +30 -0
- codeintel_cli/graph/traverse.py +49 -0
- codeintel_cli/lang/__init__.py +0 -0
- codeintel_cli/lang/java/__init__.py +0 -0
- codeintel_cli/lang/java/engine.py +18 -0
- codeintel_cli/lang/java/models.py +105 -0
- codeintel_cli/lang/java/resolve.py +49 -0
- codeintel_cli/lang/python/__init__.py +0 -0
- codeintel_cli/lang/python/engine.py +8 -0
- codeintel_cli/lang/python/models.py +86 -0
- codeintel_cli/lang/router.py +24 -0
- codeintel_cli/parser/imports.py +26 -0
- codeintel_cli/parser/resolve.py +49 -0
- codeintel_cli/parser/symbols.py +92 -0
- codeintel_cli/scanner/__init__.py +0 -0
- codeintel_cli/scanner/scanner.py +41 -0
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import ast
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
from ..core.project import find_project_root
|
|
8
|
+
from ..scanner.scanner import find_all_supported_files
|
|
9
|
+
from ..lang.router import extract_models_for_file
|
|
10
|
+
|
|
11
|
+
@dataclass(frozen=True)
|
|
12
|
+
class PyRel:
|
|
13
|
+
kind: str
|
|
14
|
+
target: str
|
|
15
|
+
field: str
|
|
16
|
+
via: str = ""
|
|
17
|
+
|
|
18
|
+
_COLLECTION_RE = re.compile(r"^(list|List|set|Set|tuple|Tuple|Sequence)\[(.+)\]$")
|
|
19
|
+
_OPTIONAL_RE = re.compile(r"^(Optional|typing\.Optional)\[(.*)\]$")
|
|
20
|
+
_UNION_RE = re.compile(r"^(Union|typing\.Union)\[(.*)\]$")
|
|
21
|
+
_PEP604_RE = re.compile(r"^(.+)\s*\|\s*None$|^None\s*\|\s*(.+)$")
|
|
22
|
+
_QUOTED_ATOM_RE = re.compile(r'(["\'])([^"\']+)\1')
|
|
23
|
+
_FK_SUFFIXES = ("_id", "Id")
|
|
24
|
+
_EMBED_HINTS = {"address", "profile", "detail", "details", "meta", "metadata", "settings", "config", "configuration"}
|
|
25
|
+
_BUILTIN_TYPES = {
|
|
26
|
+
"str", "int", "float", "bool", "dict", "list", "tuple", "set", "Any", "None", "object", "bytes", "bytearray",
|
|
27
|
+
"Decimal", "UUID", "datetime", "date", "time", "timedelta", "Path", "Optional", "Union", "Literal", "TypeVar",
|
|
28
|
+
"frozenset", "complex", "type", "range", "slice", "memoryview", "property", "classmethod", "staticmethod"
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
def _ann_to_str(n: ast.AST | None) -> str:
|
|
32
|
+
if n is None:
|
|
33
|
+
return ""
|
|
34
|
+
try:
|
|
35
|
+
return ast.unparse(n)
|
|
36
|
+
except Exception:
|
|
37
|
+
if isinstance(n, ast.Name):
|
|
38
|
+
return n.id
|
|
39
|
+
if isinstance(n, ast.Constant) and isinstance(n.value, str):
|
|
40
|
+
return n.value
|
|
41
|
+
return ""
|
|
42
|
+
|
|
43
|
+
def _strip_typing(s: str) -> str:
|
|
44
|
+
t = (s or "").strip()
|
|
45
|
+
t = t.replace("collections.abc.", "")
|
|
46
|
+
return t.strip()
|
|
47
|
+
|
|
48
|
+
def _unwrap_optional(s: str) -> str:
|
|
49
|
+
t = _strip_typing(s)
|
|
50
|
+
t = _QUOTED_ATOM_RE.sub(r"\2", t)
|
|
51
|
+
m = _OPTIONAL_RE.match(t)
|
|
52
|
+
if m:
|
|
53
|
+
return m.group(2).strip()
|
|
54
|
+
m2 = _UNION_RE.match(t)
|
|
55
|
+
if m2:
|
|
56
|
+
parts = [p.strip() for p in m2.group(2).split(",")]
|
|
57
|
+
parts = [p for p in parts if p and p != "None"]
|
|
58
|
+
if len(parts) == 1:
|
|
59
|
+
return parts[0]
|
|
60
|
+
m3 = _PEP604_RE.match(t)
|
|
61
|
+
if m3:
|
|
62
|
+
left = (m3.group(1) or "").strip()
|
|
63
|
+
right = (m3.group(2) or "").strip()
|
|
64
|
+
return (left or right).strip()
|
|
65
|
+
return t
|
|
66
|
+
|
|
67
|
+
def _collection_inner(s: str) -> tuple[bool, str]:
|
|
68
|
+
t = _unwrap_optional(s)
|
|
69
|
+
m = _COLLECTION_RE.match(t)
|
|
70
|
+
if not m:
|
|
71
|
+
return False, ""
|
|
72
|
+
inner = _unwrap_optional(m.group(2).strip())
|
|
73
|
+
return True, inner
|
|
74
|
+
|
|
75
|
+
def _normalize_ref_name(type_str: str) -> str:
|
|
76
|
+
t = _unwrap_optional(type_str).strip()
|
|
77
|
+
if not t:
|
|
78
|
+
return ""
|
|
79
|
+
if (t.startswith('"') and t.endswith('"')) or (t.startswith("'") and t.endswith("'")):
|
|
80
|
+
t = t[1:-1].strip()
|
|
81
|
+
if t.startswith("ForwardRef("):
|
|
82
|
+
t = t.removeprefix("ForwardRef(").removesuffix(")").strip()
|
|
83
|
+
t = t.strip('"').strip("'").strip()
|
|
84
|
+
if "." in t:
|
|
85
|
+
t = t.split(".")[-1].strip()
|
|
86
|
+
return t
|
|
87
|
+
|
|
88
|
+
def _fk_guess_target(field_name: str, model_names: set[str]) -> str | None:
|
|
89
|
+
n = field_name.strip()
|
|
90
|
+
if not n:
|
|
91
|
+
return None
|
|
92
|
+
base = n
|
|
93
|
+
if base.endswith("_id"):
|
|
94
|
+
base = base[:-3]
|
|
95
|
+
elif base.endswith("Id"):
|
|
96
|
+
base = base[:-2]
|
|
97
|
+
base = base.strip("_")
|
|
98
|
+
if not base:
|
|
99
|
+
return None
|
|
100
|
+
candidates = {base, base.capitalize(), base[:1].upper() + base[1:], base.replace("_", "").capitalize()}
|
|
101
|
+
for c in candidates:
|
|
102
|
+
if c in model_names:
|
|
103
|
+
return c
|
|
104
|
+
return None
|
|
105
|
+
|
|
106
|
+
@dataclass(frozen=True)
|
|
107
|
+
class _ModelInfo:
|
|
108
|
+
name: str
|
|
109
|
+
file: Path
|
|
110
|
+
fields: list[tuple[str, str, bool]]
|
|
111
|
+
|
|
112
|
+
def _extract_classes_fields_from_ast(py_file: Path) -> list[_ModelInfo]:
|
|
113
|
+
try:
|
|
114
|
+
code = py_file.read_text(encoding="utf-8", errors="ignore")
|
|
115
|
+
except Exception:
|
|
116
|
+
return []
|
|
117
|
+
try:
|
|
118
|
+
tree = ast.parse(code)
|
|
119
|
+
except Exception:
|
|
120
|
+
return []
|
|
121
|
+
out: list[_ModelInfo] = []
|
|
122
|
+
for node in tree.body:
|
|
123
|
+
if not isinstance(node, ast.ClassDef):
|
|
124
|
+
continue
|
|
125
|
+
cls_name = node.name
|
|
126
|
+
fields: list[tuple[str, str, bool]] = []
|
|
127
|
+
for b in node.body:
|
|
128
|
+
if isinstance(b, ast.AnnAssign) and isinstance(b.target, ast.Name):
|
|
129
|
+
fname = b.target.id
|
|
130
|
+
ftype = _ann_to_str(b.annotation) or "Any"
|
|
131
|
+
is_pk = fname.lower() in {"id", "pk"}
|
|
132
|
+
fields.append((fname, ftype, is_pk))
|
|
133
|
+
elif isinstance(b, ast.Assign) and len(b.targets) == 1 and isinstance(b.targets[0], ast.Name):
|
|
134
|
+
fname = b.targets[0].id
|
|
135
|
+
is_pk = fname.lower() in {"id", "pk"}
|
|
136
|
+
fields.append((fname, "Any", is_pk))
|
|
137
|
+
if fields:
|
|
138
|
+
out.append(_ModelInfo(name=cls_name, file=py_file, fields=fields))
|
|
139
|
+
return out
|
|
140
|
+
|
|
141
|
+
def _build_model_index(project_root: Path) -> dict[str, _ModelInfo]:
|
|
142
|
+
model_files: set[Path] = set()
|
|
143
|
+
try:
|
|
144
|
+
for f in find_all_supported_files(project_root):
|
|
145
|
+
if f.suffix.lower() != ".py":
|
|
146
|
+
continue
|
|
147
|
+
defs = extract_models_for_file(f, project_root) or []
|
|
148
|
+
if defs:
|
|
149
|
+
model_files.add(f.resolve())
|
|
150
|
+
except Exception:
|
|
151
|
+
pass
|
|
152
|
+
if not model_files:
|
|
153
|
+
try:
|
|
154
|
+
for f in find_all_supported_files(project_root):
|
|
155
|
+
if f.suffix.lower() != ".py":
|
|
156
|
+
continue
|
|
157
|
+
parts = {p.lower() for p in f.parts}
|
|
158
|
+
if parts & {"models", "model", "entity", "entities", "domain"}:
|
|
159
|
+
model_files.add(f.resolve())
|
|
160
|
+
except Exception:
|
|
161
|
+
pass
|
|
162
|
+
idx: dict[str, _ModelInfo] = {}
|
|
163
|
+
for mf in sorted(model_files):
|
|
164
|
+
for mi in _extract_classes_fields_from_ast(mf):
|
|
165
|
+
idx.setdefault(mi.name, mi)
|
|
166
|
+
return idx
|
|
167
|
+
|
|
168
|
+
def _infer_forward_rels(model: _ModelInfo, model_names: set[str]) -> list[PyRel]:
|
|
169
|
+
rels: list[PyRel] = []
|
|
170
|
+
for fname, ftype, _pk in model.fields:
|
|
171
|
+
if not fname or not ftype:
|
|
172
|
+
continue
|
|
173
|
+
is_coll, inner = _collection_inner(ftype)
|
|
174
|
+
if is_coll:
|
|
175
|
+
tgt = _normalize_ref_name(inner)
|
|
176
|
+
if tgt and tgt in model_names and tgt != model.name and tgt not in _BUILTIN_TYPES:
|
|
177
|
+
rels.append(PyRel(kind="OneToMany", target=tgt, field=fname, via="type"))
|
|
178
|
+
continue
|
|
179
|
+
base = _normalize_ref_name(ftype)
|
|
180
|
+
if base and base in model_names and base != model.name and base not in _BUILTIN_TYPES:
|
|
181
|
+
kind = "OneToOne" if fname.lower() in _EMBED_HINTS else "ManyToOne"
|
|
182
|
+
rels.append(PyRel(kind=kind, target=base, field=fname, via="type"))
|
|
183
|
+
continue
|
|
184
|
+
if fname.endswith(_FK_SUFFIXES) or fname.lower().endswith("_id"):
|
|
185
|
+
tgt = _fk_guess_target(fname, model_names)
|
|
186
|
+
if tgt and tgt != model.name:
|
|
187
|
+
rels.append(PyRel(kind="ManyToOne", target=tgt, field=fname, via="fk"))
|
|
188
|
+
return rels
|
|
189
|
+
|
|
190
|
+
def _has_collection(rels_by_model: dict[str, list[PyRel]], src: str, dst: str) -> bool:
|
|
191
|
+
return any(r.target == dst and r.kind in {"OneToMany", "ManyToMany"} for r in rels_by_model.get(src, []))
|
|
192
|
+
|
|
193
|
+
def _has_scalar(rels_by_model: dict[str, list[PyRel]], src: str, dst: str) -> bool:
|
|
194
|
+
return any(r.target == dst and r.kind in {"ManyToOne", "OneToOne"} for r in rels_by_model.get(src, []))
|
|
195
|
+
|
|
196
|
+
def _upgrade_one_to_one(rels_by_model: dict[str, list[PyRel]]) -> None:
|
|
197
|
+
for src, rels in list(rels_by_model.items()):
|
|
198
|
+
upgraded: list[PyRel] = []
|
|
199
|
+
for r in rels:
|
|
200
|
+
if r.kind not in {"ManyToOne", "OneToOne"}:
|
|
201
|
+
upgraded.append(r)
|
|
202
|
+
continue
|
|
203
|
+
dst = r.target
|
|
204
|
+
if (_has_scalar(rels_by_model, dst, src) and not _has_collection(rels_by_model, src, dst) and not _has_collection(rels_by_model, dst, src)):
|
|
205
|
+
upgraded.append(PyRel(kind="OneToOne", target=dst, field=r.field, via=r.via))
|
|
206
|
+
else:
|
|
207
|
+
upgraded.append(r)
|
|
208
|
+
rels_by_model[src] = upgraded
|
|
209
|
+
|
|
210
|
+
def _upgrade_many_to_many(rels_by_model: dict[str, list[PyRel]]) -> None:
|
|
211
|
+
for src, rels in list(rels_by_model.items()):
|
|
212
|
+
newrels: list[PyRel] = []
|
|
213
|
+
for r in rels:
|
|
214
|
+
if r.kind != "OneToMany":
|
|
215
|
+
newrels.append(r)
|
|
216
|
+
continue
|
|
217
|
+
dst = r.target
|
|
218
|
+
if any(x.kind == "OneToMany" and x.target == src for x in rels_by_model.get(dst, [])):
|
|
219
|
+
newrels.append(PyRel(kind="ManyToMany", target=dst, field=r.field, via=r.via))
|
|
220
|
+
else:
|
|
221
|
+
newrels.append(r)
|
|
222
|
+
rels_by_model[src] = newrels
|
|
223
|
+
|
|
224
|
+
def python_collect_relationship_map(target: Path, project_root: Path):
|
|
225
|
+
idx = _build_model_index(project_root)
|
|
226
|
+
model_names = set(idx.keys())
|
|
227
|
+
candidates = [m for m in idx.values() if m.file.resolve() == target.resolve()]
|
|
228
|
+
if not candidates:
|
|
229
|
+
return target.stem, [], [], {}, []
|
|
230
|
+
stem = target.stem
|
|
231
|
+
main = next((m for m in candidates if m.name.lower() == stem.lower()), candidates[0])
|
|
232
|
+
model_name = main.name
|
|
233
|
+
rels_by_model: dict[str, list[PyRel]] = {name: _infer_forward_rels(mi, model_names) for name, mi in idx.items()}
|
|
234
|
+
_upgrade_one_to_one(rels_by_model)
|
|
235
|
+
_upgrade_many_to_many(rels_by_model)
|
|
236
|
+
forward = rels_by_model.get(model_name, [])
|
|
237
|
+
fields_by_entity: dict[str, list[tuple[str, str, bool]]] = {}
|
|
238
|
+
for r in forward:
|
|
239
|
+
if r and r.target and r.target in idx:
|
|
240
|
+
fields_by_entity[r.target] = idx[r.target].fields
|
|
241
|
+
reverse: list[tuple[str, PyRel]] = []
|
|
242
|
+
for src, rels in rels_by_model.items():
|
|
243
|
+
if src == model_name:
|
|
244
|
+
continue
|
|
245
|
+
for r in rels:
|
|
246
|
+
if r and r.target == model_name:
|
|
247
|
+
reverse.append((src, PyRel(kind=r.kind, target=model_name, field=r.field, via=r.via)))
|
|
248
|
+
if src in idx:
|
|
249
|
+
fields_by_entity.setdefault(src, idx[src].fields)
|
|
250
|
+
reverse.sort(key=lambda x: (x[0].lower(), x[1].kind, x[1].field.lower()))
|
|
251
|
+
return model_name, main.fields, forward, fields_by_entity, reverse
|
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
import ast
|
|
4
|
+
import re
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from ..scanner.scanner import find_all_supported_files
|
|
8
|
+
from ..context.python_rel import python_collect_relationship_map
|
|
9
|
+
|
|
10
|
+
_ROUTE_DECORATORS = {"route", "get", "post", "put", "delete", "patch", "api_route", "app.route", "router.get", "router.post"}
|
|
11
|
+
|
|
12
|
+
def _extract_routes(tree: ast.AST) -> list[dict]:
|
|
13
|
+
routes = []
|
|
14
|
+
|
|
15
|
+
for node in ast.walk(tree):
|
|
16
|
+
if not isinstance(node, ast.FunctionDef):
|
|
17
|
+
continue
|
|
18
|
+
|
|
19
|
+
for dec in node.decorator_list:
|
|
20
|
+
route_info = _parse_route_decorator(dec)
|
|
21
|
+
if route_info:
|
|
22
|
+
route_info["handler"] = node.name
|
|
23
|
+
routes.append(route_info)
|
|
24
|
+
break
|
|
25
|
+
|
|
26
|
+
return routes
|
|
27
|
+
|
|
28
|
+
def _parse_route_decorator(dec: ast.AST) -> dict | None:
|
|
29
|
+
method = "GET"
|
|
30
|
+
path = "/"
|
|
31
|
+
|
|
32
|
+
if isinstance(dec, ast.Call):
|
|
33
|
+
if isinstance(dec.func, ast.Attribute):
|
|
34
|
+
attr_name = dec.func.attr.lower()
|
|
35
|
+
if attr_name in {"get", "post", "put", "delete", "patch"}:
|
|
36
|
+
method = attr_name.upper()
|
|
37
|
+
elif isinstance(dec.func, ast.Name):
|
|
38
|
+
func_name = dec.func.id.lower()
|
|
39
|
+
if func_name in {"get", "post", "put", "delete", "patch"}:
|
|
40
|
+
method = func_name.upper()
|
|
41
|
+
|
|
42
|
+
if dec.args:
|
|
43
|
+
first_arg = dec.args[0]
|
|
44
|
+
if isinstance(first_arg, ast.Constant) and isinstance(first_arg.value, str):
|
|
45
|
+
path = first_arg.value
|
|
46
|
+
|
|
47
|
+
elif isinstance(dec, ast.Attribute):
|
|
48
|
+
attr_name = dec.attr.lower()
|
|
49
|
+
if attr_name in _ROUTE_DECORATORS:
|
|
50
|
+
return {"method": method, "path": path}
|
|
51
|
+
|
|
52
|
+
elif isinstance(dec, ast.Name):
|
|
53
|
+
if dec.id.lower() in _ROUTE_DECORATORS:
|
|
54
|
+
return {"method": method, "path": path}
|
|
55
|
+
|
|
56
|
+
if path != "/":
|
|
57
|
+
return {"method": method, "path": path}
|
|
58
|
+
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
def _extract_functions(tree: ast.AST) -> list[dict]:
|
|
62
|
+
functions = []
|
|
63
|
+
|
|
64
|
+
for node in ast.walk(tree):
|
|
65
|
+
if isinstance(node, ast.FunctionDef):
|
|
66
|
+
if node.name.startswith("_"):
|
|
67
|
+
continue
|
|
68
|
+
|
|
69
|
+
params = []
|
|
70
|
+
for arg in node.args.args:
|
|
71
|
+
if arg.arg == "self":
|
|
72
|
+
continue
|
|
73
|
+
param_type = "Any"
|
|
74
|
+
if arg.annotation:
|
|
75
|
+
try:
|
|
76
|
+
param_type = ast.unparse(arg.annotation)
|
|
77
|
+
except Exception:
|
|
78
|
+
pass
|
|
79
|
+
params.append(f"{arg.arg}: {param_type}")
|
|
80
|
+
|
|
81
|
+
ret_type = ""
|
|
82
|
+
if node.returns:
|
|
83
|
+
try:
|
|
84
|
+
ret_type = ast.unparse(node.returns)
|
|
85
|
+
except Exception:
|
|
86
|
+
pass
|
|
87
|
+
|
|
88
|
+
functions.append({
|
|
89
|
+
"name": node.name,
|
|
90
|
+
"params": ", ".join(params),
|
|
91
|
+
"return": ret_type,
|
|
92
|
+
})
|
|
93
|
+
|
|
94
|
+
return functions
|
|
95
|
+
|
|
96
|
+
def _find_models_used(tree: ast.AST, project_root: Path) -> dict[str, Any]:
|
|
97
|
+
model_names = set()
|
|
98
|
+
|
|
99
|
+
for node in ast.walk(tree):
|
|
100
|
+
if isinstance(node, ast.Name):
|
|
101
|
+
name = node.id
|
|
102
|
+
if name and name[0].isupper() and len(name) > 2:
|
|
103
|
+
model_names.add(name)
|
|
104
|
+
|
|
105
|
+
models = {}
|
|
106
|
+
all_files = find_all_supported_files(project_root)
|
|
107
|
+
|
|
108
|
+
for model_name in model_names:
|
|
109
|
+
for f in all_files:
|
|
110
|
+
if f.suffix.lower() != ".py":
|
|
111
|
+
continue
|
|
112
|
+
if f.stem.lower() != model_name.lower():
|
|
113
|
+
continue
|
|
114
|
+
parts = {p.lower() for p in f.parts}
|
|
115
|
+
if not (parts & {"model", "models", "entity", "entities", "domain"}):
|
|
116
|
+
continue
|
|
117
|
+
|
|
118
|
+
try:
|
|
119
|
+
_, fields, rels, _, _ = python_collect_relationship_map(f, project_root)
|
|
120
|
+
if fields:
|
|
121
|
+
models[model_name] = {
|
|
122
|
+
"fields": fields,
|
|
123
|
+
"relationships": [
|
|
124
|
+
{"kind": r.kind, "target": r.target, "field": r.field}
|
|
125
|
+
for r in rels
|
|
126
|
+
]
|
|
127
|
+
}
|
|
128
|
+
except Exception:
|
|
129
|
+
pass
|
|
130
|
+
|
|
131
|
+
return models
|
|
132
|
+
|
|
133
|
+
def _find_repositories(tree: ast.AST) -> list[dict]:
|
|
134
|
+
repos = []
|
|
135
|
+
|
|
136
|
+
for node in ast.walk(tree):
|
|
137
|
+
if isinstance(node, (ast.Assign, ast.AnnAssign)):
|
|
138
|
+
target_name = ""
|
|
139
|
+
if isinstance(node, ast.Assign) and node.targets:
|
|
140
|
+
target = node.targets[0]
|
|
141
|
+
if isinstance(target, ast.Name):
|
|
142
|
+
target_name = target.id
|
|
143
|
+
elif isinstance(node, ast.AnnAssign):
|
|
144
|
+
if isinstance(node.target, ast.Name):
|
|
145
|
+
target_name = node.target.id
|
|
146
|
+
|
|
147
|
+
if "repo" in target_name.lower() or "dal" in target_name.lower():
|
|
148
|
+
methods = [
|
|
149
|
+
"get(id)",
|
|
150
|
+
"save(entity)",
|
|
151
|
+
"delete(id)",
|
|
152
|
+
"find_all()",
|
|
153
|
+
"find_by(**kwargs)",
|
|
154
|
+
]
|
|
155
|
+
repos.append({"name": target_name, "methods": methods})
|
|
156
|
+
|
|
157
|
+
return repos
|
|
158
|
+
|
|
159
|
+
def analyze_python_service(service_file: Path, project_root: Path) -> dict:
|
|
160
|
+
try:
|
|
161
|
+
code = service_file.read_text(encoding="utf-8", errors="ignore")
|
|
162
|
+
tree = ast.parse(code)
|
|
163
|
+
except Exception:
|
|
164
|
+
tree = ast.Module(body=[], type_ignores=[])
|
|
165
|
+
|
|
166
|
+
service_name = service_file.stem
|
|
167
|
+
|
|
168
|
+
routes = _extract_routes(tree)
|
|
169
|
+
functions = _extract_functions(tree)
|
|
170
|
+
models = _find_models_used(tree, project_root)
|
|
171
|
+
repos = _find_repositories(tree)
|
|
172
|
+
|
|
173
|
+
flow = []
|
|
174
|
+
if routes:
|
|
175
|
+
rt = routes[0]
|
|
176
|
+
flow.append(f"{rt['method']} {rt['path']}")
|
|
177
|
+
flow.append(" ↓")
|
|
178
|
+
flow.append(f"{rt['handler']}()")
|
|
179
|
+
flow.append(" ↓")
|
|
180
|
+
if models:
|
|
181
|
+
model_name = list(models.keys())[0]
|
|
182
|
+
flow.append(f"Uses {model_name} model")
|
|
183
|
+
if models[model_name].get("relationships"):
|
|
184
|
+
rel = models[model_name]["relationships"][0]
|
|
185
|
+
flow.append(f" (has {rel['kind']} → {rel['target']})")
|
|
186
|
+
flow.append(" ↓")
|
|
187
|
+
if repos:
|
|
188
|
+
flow.append(f"{repos[0]['name']}.save()")
|
|
189
|
+
|
|
190
|
+
summary = {
|
|
191
|
+
"Routes": len(routes),
|
|
192
|
+
"Functions": len(functions),
|
|
193
|
+
"Models used": len(models),
|
|
194
|
+
"Data access": len(repos),
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
return {
|
|
198
|
+
"service_name": service_name,
|
|
199
|
+
"endpoints": routes,
|
|
200
|
+
"methods": functions,
|
|
201
|
+
"models": models,
|
|
202
|
+
"repositories": repos,
|
|
203
|
+
"flow": flow,
|
|
204
|
+
"summary": summary,
|
|
205
|
+
}
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
import difflib
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def rank_paths(query: str, files: list[Path], root: Path, top: int = 3) -> list[tuple[Path, float]]:
|
|
7
|
+
q_raw = query.strip()
|
|
8
|
+
q_lower = q_raw.replace("/", "\\").lower()
|
|
9
|
+
|
|
10
|
+
if not q_lower:
|
|
11
|
+
return []
|
|
12
|
+
|
|
13
|
+
q_stem = Path(q_raw).stem.lower()
|
|
14
|
+
scored: list[tuple[Path, float]] = []
|
|
15
|
+
|
|
16
|
+
for p in files:
|
|
17
|
+
if p.name == "__init__.py":
|
|
18
|
+
continue
|
|
19
|
+
|
|
20
|
+
try:
|
|
21
|
+
rel = str(p.relative_to(root)).replace("/", "\\").lower()
|
|
22
|
+
except Exception:
|
|
23
|
+
rel = str(p).replace("/", "\\").lower()
|
|
24
|
+
|
|
25
|
+
stem = p.stem.lower()
|
|
26
|
+
name = p.name.lower()
|
|
27
|
+
score = 0.0
|
|
28
|
+
|
|
29
|
+
if stem == q_stem:
|
|
30
|
+
score = 0.98
|
|
31
|
+
elif name == q_lower:
|
|
32
|
+
score = 0.97
|
|
33
|
+
elif rel == q_lower:
|
|
34
|
+
score = 0.96
|
|
35
|
+
elif rel.endswith(q_lower):
|
|
36
|
+
score = 0.92
|
|
37
|
+
elif stem.startswith(q_stem):
|
|
38
|
+
score = 0.88
|
|
39
|
+
elif name.startswith(q_lower):
|
|
40
|
+
score = 0.86
|
|
41
|
+
elif q_stem in stem:
|
|
42
|
+
score = 0.82
|
|
43
|
+
elif q_lower in rel:
|
|
44
|
+
score = 0.78
|
|
45
|
+
elif q_lower in name:
|
|
46
|
+
score = 0.74
|
|
47
|
+
else:
|
|
48
|
+
ratio_stem = difflib.SequenceMatcher(a=q_stem, b=stem).ratio()
|
|
49
|
+
ratio_rel = difflib.SequenceMatcher(a=q_lower, b=rel).ratio()
|
|
50
|
+
ratio = max(ratio_stem, ratio_rel)
|
|
51
|
+
if ratio >= 0.5:
|
|
52
|
+
score = 0.50 + (ratio - 0.5) * 0.4
|
|
53
|
+
|
|
54
|
+
if score > 0:
|
|
55
|
+
scored.append((p, score))
|
|
56
|
+
|
|
57
|
+
scored.sort(key=lambda x: (-x[1], str(x[0]).lower()))
|
|
58
|
+
return scored[: min(top, len(scored))]
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def fuzzy_top_matches(query: str, files: list[Path], root: Path, top: int = 3) -> list[tuple[Path, float]]:
|
|
62
|
+
return rank_paths(query, files, root, top=top)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def fuzzy_is_confident(ranked: list[tuple[Path, float]], min_score: float = 0.90, gap: float = 0.05) -> bool:
|
|
66
|
+
if not ranked:
|
|
67
|
+
return False
|
|
68
|
+
if ranked[0][1] < min_score:
|
|
69
|
+
return False
|
|
70
|
+
if len(ranked) == 1:
|
|
71
|
+
return True
|
|
72
|
+
return (ranked[0][1] - ranked[1][1]) >= gap
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import os
|
|
5
|
+
import subprocess
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def find_files(query: str, root: Path, files: list[Path]) -> list[Path]:
|
|
9
|
+
"""
|
|
10
|
+
Return files whose relative path contains the query (case-insensitive).
|
|
11
|
+
"""
|
|
12
|
+
q = query.replace("/", "\\").lower()
|
|
13
|
+
matches: list[Path] = []
|
|
14
|
+
|
|
15
|
+
for f in files:
|
|
16
|
+
rel = str(f.relative_to(root)).lower()
|
|
17
|
+
if q in rel:
|
|
18
|
+
matches.append(f)
|
|
19
|
+
|
|
20
|
+
return sorted(matches)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def open_file(path: Path) -> None:
|
|
24
|
+
"""
|
|
25
|
+
Open a file in VS Code if available, otherwise open with default app (Windows).
|
|
26
|
+
"""
|
|
27
|
+
# Try VS Code first (if 'code' is in PATH)
|
|
28
|
+
try:
|
|
29
|
+
r = subprocess.run(["where", "code"], capture_output=True, text=True)
|
|
30
|
+
if r.returncode == 0:
|
|
31
|
+
subprocess.run(["code", str(path)], check=False)
|
|
32
|
+
return
|
|
33
|
+
except Exception:
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
# Fallback to Windows default app
|
|
37
|
+
os.startfile(path) # type: ignore[attr-defined]
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
SKIP_DIRS: set[str] = {
|
|
6
|
+
"__pycache__",
|
|
7
|
+
"venv",
|
|
8
|
+
".venv",
|
|
9
|
+
".git",
|
|
10
|
+
"site-packages",
|
|
11
|
+
"codeintel.egg-info",
|
|
12
|
+
"build",
|
|
13
|
+
"dist",
|
|
14
|
+
".mypy_cache",
|
|
15
|
+
".pytest_cache",
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def find_project_root(start: Path) -> Path:
|
|
20
|
+
start = start.resolve()
|
|
21
|
+
for p in [start] + list(start.parents):
|
|
22
|
+
if (p / "pyproject.toml").exists():
|
|
23
|
+
return p
|
|
24
|
+
if (p / "setup.py").exists():
|
|
25
|
+
return p
|
|
26
|
+
if (p / ".git").exists():
|
|
27
|
+
return p
|
|
28
|
+
if list(p.glob("*.egg-info")):
|
|
29
|
+
return p
|
|
30
|
+
return start
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def is_skipped_path(path: Path) -> bool:
|
|
34
|
+
return any(part in SKIP_DIRS for part in path.parts)
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import typer
|
|
5
|
+
|
|
6
|
+
from .project import find_project_root, is_skipped_path
|
|
7
|
+
from .fuzzy import fuzzy_top_matches
|
|
8
|
+
from ..errors import InvalidPathError
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def resolve_target_folder(
|
|
12
|
+
query: str,
|
|
13
|
+
*,
|
|
14
|
+
top: int = 3,
|
|
15
|
+
auto_threshold: float = 0.90,
|
|
16
|
+
min_threshold: float = 0.60,
|
|
17
|
+
) -> tuple[Path, Path]:
|
|
18
|
+
root = find_project_root(Path.cwd())
|
|
19
|
+
|
|
20
|
+
p = Path(query)
|
|
21
|
+
if p.exists() and p.is_dir():
|
|
22
|
+
return p.resolve(), root
|
|
23
|
+
|
|
24
|
+
p2 = root / query
|
|
25
|
+
if p2.exists() and p2.is_dir():
|
|
26
|
+
return p2.resolve(), root
|
|
27
|
+
|
|
28
|
+
folders = [d.resolve() for d in root.rglob("*") if d.is_dir() and not is_skipped_path(d)]
|
|
29
|
+
|
|
30
|
+
qname = Path(query).name
|
|
31
|
+
exact = [d for d in folders if d.name == qname]
|
|
32
|
+
if len(exact) == 1:
|
|
33
|
+
return exact[0], root
|
|
34
|
+
|
|
35
|
+
if len(exact) > 1:
|
|
36
|
+
typer.echo("Multiple exact matches:")
|
|
37
|
+
for i, m in enumerate(exact[:top], 1):
|
|
38
|
+
typer.echo(f"{i}. {m}")
|
|
39
|
+
choice = typer.prompt("Select number (0=cancel)", type=int, default=0)
|
|
40
|
+
if choice == 0:
|
|
41
|
+
raise typer.Exit()
|
|
42
|
+
if choice < 1 or choice > min(len(exact), top):
|
|
43
|
+
raise typer.BadParameter("Invalid selection.")
|
|
44
|
+
return exact[choice - 1], root
|
|
45
|
+
|
|
46
|
+
matches = fuzzy_top_matches(query, folders, root, top=top)
|
|
47
|
+
if not matches:
|
|
48
|
+
raise InvalidPathError(message="Folder not found", path=Path(query))
|
|
49
|
+
|
|
50
|
+
best_path, best_score = matches[0]
|
|
51
|
+
if best_score < min_threshold:
|
|
52
|
+
raise InvalidPathError(message="Folder not found (low-confidence)", path=Path(query))
|
|
53
|
+
|
|
54
|
+
second = matches[1][1] if len(matches) > 1 else 0.0
|
|
55
|
+
if best_score >= auto_threshold and (best_score - second) >= 0.05:
|
|
56
|
+
return best_path.resolve(), root
|
|
57
|
+
|
|
58
|
+
typer.echo("Did you mean:")
|
|
59
|
+
for i, (p, score) in enumerate(matches, 1):
|
|
60
|
+
typer.echo(f"{i}. {p} score={score:.2f}")
|
|
61
|
+
|
|
62
|
+
choice = typer.prompt("Select number (0=cancel)", type=int, default=0)
|
|
63
|
+
if choice == 0:
|
|
64
|
+
raise typer.Exit()
|
|
65
|
+
if choice < 1 or choice > len(matches):
|
|
66
|
+
raise typer.BadParameter("Invalid selection.")
|
|
67
|
+
|
|
68
|
+
return matches[choice - 1][0].resolve(), root
|