aja-codeintel 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. aja_codeintel-0.1.0.dist-info/METADATA +436 -0
  2. aja_codeintel-0.1.0.dist-info/RECORD +68 -0
  3. aja_codeintel-0.1.0.dist-info/WHEEL +5 -0
  4. aja_codeintel-0.1.0.dist-info/entry_points.txt +3 -0
  5. aja_codeintel-0.1.0.dist-info/licenses/LICENSE +21 -0
  6. aja_codeintel-0.1.0.dist-info/top_level.txt +1 -0
  7. codeintel_cli/__init__.py +1 -0
  8. codeintel_cli/__main__.py +4 -0
  9. codeintel_cli/cli.py +41 -0
  10. codeintel_cli/commands/__init__.py +1 -0
  11. codeintel_cli/commands/graph/__init__.py +18 -0
  12. codeintel_cli/commands/graph/deps_cmd.py +35 -0
  13. codeintel_cli/commands/graph/related_cmd.py +121 -0
  14. codeintel_cli/commands/graph/relsymbols_cmd.py +347 -0
  15. codeintel_cli/commands/graph/reverse_related_cmd.py +54 -0
  16. codeintel_cli/commands/nav/__init__.py +12 -0
  17. codeintel_cli/commands/nav/copy_cmd.py +101 -0
  18. codeintel_cli/commands/nav/open_cmd.py +18 -0
  19. codeintel_cli/commands/nav/where_cmd.py +21 -0
  20. codeintel_cli/commands/project/__init__.py +26 -0
  21. codeintel_cli/commands/project/context_cmd.py +326 -0
  22. codeintel_cli/commands/project/folder_cmd.py +51 -0
  23. codeintel_cli/commands/project/imports_cmd.py +90 -0
  24. codeintel_cli/commands/project/models_cmd.py +98 -0
  25. codeintel_cli/commands/project/modeltree_cmd.py +476 -0
  26. codeintel_cli/commands/project/new.py +0 -0
  27. codeintel_cli/commands/project/resolve_cmd.py +29 -0
  28. codeintel_cli/commands/project/scan_cmd.py +51 -0
  29. codeintel_cli/commands/project/servicemap_cmd.py +180 -0
  30. codeintel_cli/commands/project/tree_cmd.py +203 -0
  31. codeintel_cli/commands/project/version_cmd.py +14 -0
  32. codeintel_cli/context/java_context.py +180 -0
  33. codeintel_cli/context/java_rel.py +299 -0
  34. codeintel_cli/context/java_service.py +291 -0
  35. codeintel_cli/context/python_context.py +91 -0
  36. codeintel_cli/context/python_rel.py +251 -0
  37. codeintel_cli/context/python_service.py +205 -0
  38. codeintel_cli/core/fuzzy.py +72 -0
  39. codeintel_cli/core/opener.py +37 -0
  40. codeintel_cli/core/project.py +34 -0
  41. codeintel_cli/core/resolve_folder.py +68 -0
  42. codeintel_cli/core/resolve_model_target.py +92 -0
  43. codeintel_cli/core/resolve_target.py +53 -0
  44. codeintel_cli/core/timing.py +13 -0
  45. codeintel_cli/core/where.py +77 -0
  46. codeintel_cli/db/__init__.py +7 -0
  47. codeintel_cli/db/cache.py +224 -0
  48. codeintel_cli/db/operations.py +333 -0
  49. codeintel_cli/db/schema.py +102 -0
  50. codeintel_cli/errors.py +78 -0
  51. codeintel_cli/graph/__init__.py +1 -0
  52. codeintel_cli/graph/builder.py +149 -0
  53. codeintel_cli/graph/query.py +30 -0
  54. codeintel_cli/graph/traverse.py +49 -0
  55. codeintel_cli/lang/__init__.py +0 -0
  56. codeintel_cli/lang/java/__init__.py +0 -0
  57. codeintel_cli/lang/java/engine.py +18 -0
  58. codeintel_cli/lang/java/models.py +105 -0
  59. codeintel_cli/lang/java/resolve.py +49 -0
  60. codeintel_cli/lang/python/__init__.py +0 -0
  61. codeintel_cli/lang/python/engine.py +8 -0
  62. codeintel_cli/lang/python/models.py +86 -0
  63. codeintel_cli/lang/router.py +24 -0
  64. codeintel_cli/parser/imports.py +26 -0
  65. codeintel_cli/parser/resolve.py +49 -0
  66. codeintel_cli/parser/symbols.py +92 -0
  67. codeintel_cli/scanner/__init__.py +0 -0
  68. codeintel_cli/scanner/scanner.py +41 -0
@@ -0,0 +1,299 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from pathlib import Path
5
+ import re
6
+
7
+
8
+ @dataclass(frozen=True)
9
+ class Rel:
10
+ kind: str
11
+ target: str
12
+ field: str
13
+ cascade: str | None = None
14
+ fetch: str | None = None
15
+ mapped_by: str | None = None
16
+ join_table: str | None = None
17
+ join_columns: list[str] | None = None
18
+
19
+
20
+ _TYPE_RE = re.compile(r"\b(class|interface|enum|record)\s+([A-Za-z_][A-Za-z0-9_]*)\b")
21
+ _RECORD_RE = re.compile(r"\brecord\s+([A-Za-z_][A-Za-z0-9_]*)\s*\(([^)]*)\)")
22
+ _FIELD_RE = re.compile(
23
+ r"^\s*(?:public|protected|private)?\s*(?:static\s+)?(?:final\s+)?([A-Za-z0-9_<>\[\].,?]+)\s+([A-Za-z_][A-Za-z0-9_]*)\s*(?:=.*)?;"
24
+ )
25
+ _REL_ANNOS = {"OneToOne", "OneToMany", "ManyToMany", "ManyToOne"}
26
+ _ANN_START_RE = re.compile(r"^\s*@([A-Za-z_][A-Za-z0-9_]*)\b(.*)$")
27
+ _JOINCOL_NAME_RE = re.compile(r'name\s*=\s*"([^"]+)"')
28
+ _QUOTED_RE = re.compile(r'^["\'](.*)["\']$')
29
+
30
+ _DENY_FIELD_NAMES = {"return", "this", "true", "false", "null", "new", "super", "throw"}
31
+ _DENY_FIELD_TYPES = {"return", "throw"}
32
+
33
+
34
+ def clean_java(text: str) -> str:
35
+ if not text:
36
+ return ""
37
+ text = re.sub(r"/\*.*?\*/", "", text, flags=re.S)
38
+ text = re.sub(r"//.*?$", "", text, flags=re.M)
39
+ return text
40
+
41
+
42
+ def top_type_name(text: str, fallback: str) -> str:
43
+ if not text:
44
+ return fallback
45
+ m = _TYPE_RE.search(text)
46
+ return m.group(2) if m else fallback
47
+
48
+
49
+ def _collapse_annotations(text: str) -> str:
50
+ lines = text.splitlines()
51
+ result: list[str] = []
52
+ i = 0
53
+ while i < len(lines):
54
+ line = lines[i]
55
+ stripped = line.strip()
56
+ if stripped.startswith("@"):
57
+ depth = stripped.count("(") - stripped.count(")")
58
+ combined = stripped
59
+ j = i + 1
60
+ while depth > 0 and j < len(lines):
61
+ nxt = lines[j].strip()
62
+ combined = combined + " " + nxt
63
+ depth += nxt.count("(") - nxt.count(")")
64
+ j += 1
65
+ result.append(combined)
66
+ i = j
67
+ else:
68
+ result.append(line)
69
+ i += 1
70
+ return "\n".join(result)
71
+
72
+
73
+ def _split_record_params(sig: str) -> list[tuple[str, str]]:
74
+ s = (sig or "").strip()
75
+ if not s:
76
+ return []
77
+ parts: list[str] = []
78
+ cur: list[str] = []
79
+ depth = 0
80
+ for ch in s:
81
+ if ch == "<":
82
+ depth += 1
83
+ elif ch == ">":
84
+ depth = max(0, depth - 1)
85
+ elif ch == "," and depth == 0:
86
+ parts.append("".join(cur).strip())
87
+ cur = []
88
+ continue
89
+ cur.append(ch)
90
+ if cur:
91
+ parts.append("".join(cur).strip())
92
+
93
+ out: list[tuple[str, str]] = []
94
+ for p in parts:
95
+ toks = p.split()
96
+ if len(toks) >= 2:
97
+ t = " ".join(toks[:-1]).strip()
98
+ n = toks[-1].strip()
99
+ if t and n:
100
+ out.append((n, t))
101
+ return out
102
+
103
+
104
+ def _strip_quotes(v: str | None) -> str | None:
105
+ if not v:
106
+ return None
107
+ s = v.strip()
108
+ m = _QUOTED_RE.match(s)
109
+ if m:
110
+ s = m.group(1).strip()
111
+ return s or None
112
+
113
+
114
+ def _extract_arg_map(argstr: str) -> dict[str, str]:
115
+ s = (argstr or "").strip()
116
+ if not s:
117
+ return {}
118
+ out: dict[str, str] = {}
119
+ cur: list[str] = []
120
+ parts: list[str] = []
121
+ depth = 0
122
+ for ch in s:
123
+ if ch == "(":
124
+ depth += 1
125
+ elif ch == ")":
126
+ depth = max(0, depth - 1)
127
+ if ch == "," and depth == 0:
128
+ parts.append("".join(cur).strip())
129
+ cur = []
130
+ continue
131
+ cur.append(ch)
132
+ if cur:
133
+ parts.append("".join(cur).strip())
134
+
135
+ for p in parts:
136
+ if not p:
137
+ continue
138
+ if "=" in p:
139
+ k, v = p.split("=", 1)
140
+ out[k.strip()] = v.strip()
141
+ else:
142
+ out["value"] = p.strip()
143
+ return out
144
+
145
+
146
+ def _unwrap_java_type(t: str) -> str:
147
+ s = (t or "").strip()
148
+ if not s:
149
+ return ""
150
+ s = s.replace("java.util.", "")
151
+ s = s.replace("javax.persistence.", "")
152
+ s = s.replace("jakarta.persistence.", "")
153
+
154
+ m_opt = re.match(r"^Optional\s*<\s*([^>]+)\s*>$", s)
155
+ if m_opt:
156
+ s = m_opt.group(1).strip()
157
+
158
+ m_col = re.match(r"^(List|Set|Collection|Iterable)\s*<\s*([^>]+)\s*>$", s)
159
+ if m_col:
160
+ s = m_col.group(2).strip()
161
+
162
+ if "." in s:
163
+ s = s.split(".")[-1].strip()
164
+
165
+ return s
166
+
167
+
168
+ def java_fields_and_rels(path: Path) -> tuple[list[tuple[str, str, bool]], list[Rel]]:
169
+ try:
170
+ raw = path.read_text(encoding="utf-8", errors="ignore")
171
+ except Exception:
172
+ return [], []
173
+
174
+ text = clean_java(raw)
175
+ if not text:
176
+ return [], []
177
+
178
+ mrec = _RECORD_RE.search(text)
179
+ if mrec:
180
+ params = mrec.group(2)
181
+ flds = [(n, t, n.lower() == "id") for (n, t) in _split_record_params(params)]
182
+ return flds, []
183
+
184
+ text = _collapse_annotations(text)
185
+
186
+ lines = text.splitlines()
187
+ fields: list[tuple[str, str, bool]] = []
188
+ rels: list[Rel] = []
189
+ pending: list[tuple[str, dict[str, str], str]] = []
190
+ pending_is_id = False
191
+
192
+ for line in lines:
193
+ s = line.strip()
194
+ if not s:
195
+ continue
196
+
197
+ m = _ANN_START_RE.match(s)
198
+ if m:
199
+ name = m.group(1)
200
+ rest = m.group(2).strip()
201
+ full = rest
202
+ if full.startswith("(") and full.endswith(")"):
203
+ full = full[1:-1].strip()
204
+ args = _extract_arg_map(full)
205
+ if name == "Id":
206
+ pending_is_id = True
207
+ if name in _REL_ANNOS or name in {"JoinTable", "JoinColumn", "JoinColumns"}:
208
+ pending.append((name, args, s))
209
+ continue
210
+
211
+ fm = _FIELD_RE.match(line)
212
+ if not fm:
213
+ pending.clear()
214
+ pending_is_id = False
215
+ continue
216
+
217
+ ftype = fm.group(1).strip()
218
+ fname = fm.group(2).strip()
219
+
220
+ if not fname or fname.lower() in _DENY_FIELD_NAMES:
221
+ pending.clear()
222
+ pending_is_id = False
223
+ continue
224
+ if ftype.lower() in _DENY_FIELD_TYPES:
225
+ pending.clear()
226
+ pending_is_id = False
227
+ continue
228
+
229
+ is_pk = pending_is_id or fname.lower() == "id"
230
+ fields.append((fname, ftype, is_pk))
231
+
232
+ rel_kind: str | None = None
233
+ rel_args: dict[str, str] = {}
234
+ join_table: str | None = None
235
+ join_cols: list[str] = []
236
+
237
+ for aname, aargs, raw_line in pending:
238
+ if aname in _REL_ANNOS:
239
+ rel_kind = aname
240
+ rel_args = aargs
241
+ elif aname == "JoinTable":
242
+ join_table = _strip_quotes(aargs.get("name") or aargs.get("value"))
243
+ if not join_table:
244
+ mm = re.search(r'name\s*=\s*"([^"]+)"', raw_line)
245
+ if mm:
246
+ join_table = mm.group(1).strip()
247
+ elif aname in {"JoinColumn", "JoinColumns"}:
248
+ for mm in _JOINCOL_NAME_RE.finditer(raw_line):
249
+ join_cols.append(mm.group(1).strip())
250
+
251
+ if rel_kind:
252
+ target = _unwrap_java_type(ftype)
253
+ if target:
254
+ rels.append(
255
+ Rel(
256
+ kind=rel_kind,
257
+ target=target,
258
+ field=fname,
259
+ cascade=_strip_quotes(rel_args.get("cascade")),
260
+ fetch=_strip_quotes(rel_args.get("fetch")),
261
+ mapped_by=_strip_quotes(rel_args.get("mappedBy")),
262
+ join_table=join_table,
263
+ join_columns=join_cols or None,
264
+ )
265
+ )
266
+
267
+ pending.clear()
268
+ pending_is_id = False
269
+
270
+ return fields, rels
271
+
272
+
273
+ def model_fields_from_extractor(path: Path, project_root: Path) -> list[tuple[str, str, bool]]:
274
+ try:
275
+ from ..lang.router import extract_models_for_file
276
+
277
+ defs = extract_models_for_file(path, project_root)
278
+ if not defs:
279
+ return []
280
+
281
+ d0 = defs[0]
282
+ out: list[tuple[str, str, bool]] = []
283
+
284
+ for f in getattr(d0, "fields", []) or []:
285
+ name = (getattr(f, "name", "") or "").strip()
286
+ typ = (getattr(f, "type", "") or "").strip() or "Object"
287
+
288
+ if not name:
289
+ continue
290
+ if name.lower() in _DENY_FIELD_NAMES:
291
+ continue
292
+ if typ.lower() in _DENY_FIELD_TYPES:
293
+ continue
294
+
295
+ out.append((name, typ, name.lower() == "id"))
296
+
297
+ return out
298
+ except Exception:
299
+ return []
@@ -0,0 +1,291 @@
1
+ from __future__ import annotations
2
+ from pathlib import Path
3
+ import re
4
+ from typing import Any
5
+
6
+ from ..scanner.scanner import find_all_supported_files
7
+ from ..context.java_rel import clean_java, top_type_name, java_fields_and_rels
8
+
9
+ _METHOD_RE = re.compile(
10
+ r"^\s*(?:public|protected|private)?\s+(?:static\s+)?"
11
+ r"([A-Za-z0-9_<>\[\].,?\s]+?)\s+([A-Za-z_][A-Za-z0-9_]*)\s*\(([^)]*)\)",
12
+ re.M,
13
+ )
14
+ _FIELD_RE = re.compile(
15
+ r"^\s*(?:private|protected)\s+(?:final\s+)?"
16
+ r"([A-Za-z0-9_<>,\s]+?)\s+([A-Za-z_][A-Za-z0-9_]*)\s*;",
17
+ re.M,
18
+ )
19
+ _MAPPING_RE = re.compile(
20
+ r"@(Get|Post|Put|Delete|Patch|Request)Mapping"
21
+ r"(?:"
22
+ r"\(\s*(?:value|path)\s*=\s*[\"']([^\"']*)[\"'][^)]*\)" # named: value="/path"
23
+ r"|\(\s*[\"']([^\"']*)[\"']\s*\)" # positional: "/path"
24
+ r"|\(\s*\)" # empty parens: ()
25
+ r"|(?=\s|$|@|\n)" # bare: no parens at all
26
+ r")",
27
+ re.M,
28
+ )
29
+ _CLASS_MAPPING_RE = re.compile(
30
+ r'@RequestMapping\s*\(\s*(?:value\s*=\s*|path\s*=\s*)?["\']([^"\']*)["\']'
31
+ )
32
+ _CLASS_NAME_RE = re.compile(r"\bclass\s+([A-Za-z_][A-Za-z0-9_]*)\b")
33
+
34
+ _SKIP_TYPES = {"void", "public", "private", "protected", "static", "final"}
35
+ _SKIP_METHODS = {"equals", "hashCode", "toString", "clone"}
36
+ _PRIMITIVES = {"String", "Long", "Integer", "Boolean", "List", "Set", "Optional", "Page", "Object"}
37
+
38
+ _DEFAULT_REPO_METHODS = [
39
+ "findById(ID) → Optional<T>",
40
+ "save(T) → T",
41
+ "findAll() → List<T>",
42
+ "deleteById(ID)",
43
+ "existsById(ID) → boolean",
44
+ ]
45
+
46
+
47
+ def _extract_type_name(full_type: str) -> str:
48
+ s = (full_type or "").strip()
49
+ s = s.replace("java.util.", "").replace("org.springframework.", "")
50
+ if "." in s:
51
+ s = re.sub(r"\b\w+\.", "", s)
52
+ return s
53
+
54
+
55
+ def _find_service_impl(service_file: Path, project_root: Path) -> Path | None:
56
+ impl_name = service_file.stem + "Impl"
57
+ for f in find_all_supported_files(project_root):
58
+ if f.stem == impl_name and f.suffix.lower() == ".java":
59
+ return f
60
+ return None
61
+
62
+
63
+ def _find_controllers(project_root: Path, service_name: str) -> list[dict]:
64
+ endpoints: list[dict] = []
65
+ seen: set[tuple[str, str]] = set()
66
+
67
+ for f in find_all_supported_files(project_root):
68
+ if f.suffix.lower() != ".java":
69
+ continue
70
+ if "controller" not in {p.lower() for p in f.parts}:
71
+ continue
72
+
73
+ try:
74
+ text = clean_java(f.read_text(encoding="utf-8", errors="ignore"))
75
+ except OSError:
76
+ continue
77
+
78
+ if service_name not in text:
79
+ continue
80
+
81
+ m = _CLASS_NAME_RE.search(text)
82
+ controller_class = m.group(1) if m else ""
83
+ camel_class = controller_class[0].lower() + controller_class[1:] if controller_class else ""
84
+
85
+ class_mapping = ""
86
+ cm = _CLASS_MAPPING_RE.search(text)
87
+ if cm:
88
+ class_mapping = cm.group(1).rstrip("/")
89
+
90
+ for mm in _MAPPING_RE.finditer(text):
91
+ verb = mm.group(1)
92
+ if not verb:
93
+ continue
94
+
95
+ verb = verb.upper()
96
+ if verb == "REQUEST":
97
+ continue # @RequestMapping on class level — skip, already handled via class_mapping
98
+
99
+ raw_path = mm.group(2) or mm.group(3) or ""
100
+ if class_mapping:
101
+ full_path = class_mapping if not raw_path else class_mapping + "/" + raw_path.lstrip("/")
102
+ else:
103
+ full_path = raw_path or "/"
104
+ full_path = re.sub(r"/+", "/", full_path)
105
+
106
+ next_chunk = text[mm.end(): mm.end() + 400]
107
+ handler_match = re.search(
108
+ r"(?:public|protected|private)[^(]*\s+([a-z][A-Za-z0-9_]*)\s*\(",
109
+ next_chunk,
110
+ ) or re.search(r"\s+([a-z][A-Za-z0-9_]*)\s*\(", next_chunk)
111
+
112
+ if not handler_match:
113
+ continue
114
+
115
+ handler_name = handler_match.group(1)
116
+ if handler_name in {controller_class, camel_class}:
117
+ continue
118
+
119
+ key = (verb, full_path)
120
+ if key in seen:
121
+ continue
122
+ seen.add(key)
123
+
124
+ endpoints.append({
125
+ "method": verb,
126
+ "path": full_path,
127
+ "handler": f"{f.stem}.{handler_name}()",
128
+ })
129
+
130
+ return endpoints
131
+
132
+
133
+ def _find_models_used(text: str, project_root: Path) -> dict[str, Any]:
134
+ model_names: set[str] = set()
135
+
136
+ for match in re.finditer(r"import\s+[\w.]+\.(?:entity|model|domain)\.([A-Z]\w+);", text):
137
+ model_names.add(match.group(1))
138
+
139
+ for match in re.finditer(r"\b([A-Z][A-Za-z0-9_]+)\s+\w+\s*[;=]", text):
140
+ name = match.group(1)
141
+ if len(name) > 2 and not name.isupper() and name not in _PRIMITIVES:
142
+ model_names.add(name)
143
+
144
+ models: dict[str, Any] = {}
145
+ entity_dirs = {"model", "models", "entity", "entities", "domain"}
146
+
147
+ for model_name in model_names:
148
+ for f in find_all_supported_files(project_root):
149
+ if f.suffix.lower() != ".java" or f.stem != model_name:
150
+ continue
151
+ if not ({p.lower() for p in f.parts} & entity_dirs):
152
+ continue
153
+ try:
154
+ fields, rels = java_fields_and_rels(f)
155
+ if fields:
156
+ models[model_name] = {
157
+ "fields": fields,
158
+ "relationships": [
159
+ {"kind": r.kind, "target": r.target, "field": r.field}
160
+ for r in rels
161
+ ],
162
+ }
163
+ except Exception:
164
+ pass
165
+
166
+ return models
167
+
168
+
169
+ def _find_repositories(text: str) -> list[dict]:
170
+ repos: list[dict] = []
171
+
172
+ for match in _FIELD_RE.finditer(text):
173
+ ftype, fname = match.group(1), match.group(2)
174
+ if "Repository" in ftype or "repository" in fname.lower():
175
+ repo_name = _extract_type_name(ftype)
176
+ if not any(r["name"] == repo_name for r in repos):
177
+ repos.append({"name": repo_name, "methods": _DEFAULT_REPO_METHODS})
178
+
179
+ for match in re.finditer(r"import\s+[\w.]+\.repository\.([A-Z]\w+Repository);", text):
180
+ repo_name = match.group(1)
181
+ if not any(r["name"] == repo_name for r in repos):
182
+ repos.append({"name": repo_name, "methods": _DEFAULT_REPO_METHODS})
183
+
184
+ return repos
185
+
186
+
187
+ def _extract_service_methods(text: str, class_name: str = "") -> list[dict]:
188
+ methods: list[dict] = []
189
+ seen_names: set[str] = set()
190
+
191
+ for match in _METHOD_RE.finditer(text):
192
+ raw_ret = match.group(1).strip()
193
+ method_name = match.group(2)
194
+ params = match.group(3).strip()
195
+
196
+ if method_name in seen_names:
197
+ continue
198
+ if class_name and method_name == class_name:
199
+ continue
200
+ if method_name in _SKIP_METHODS:
201
+ continue
202
+ if method_name.startswith("set") and params:
203
+ continue
204
+ if method_name.startswith("get") and method_name.endswith("Service"):
205
+ continue
206
+
207
+ ret_type = _extract_type_name(raw_ret)
208
+ if ret_type in _SKIP_TYPES:
209
+ continue
210
+
211
+ param_list: list[str] = []
212
+ for p in params.split(","):
213
+ p = p.strip()
214
+ if not p:
215
+ continue
216
+ tokens = p.split()
217
+ if len(tokens) >= 2:
218
+ ptype = _extract_type_name(" ".join(tokens[:-1]))
219
+ param_list.append(f"{ptype} {tokens[-1]}")
220
+
221
+ seen_names.add(method_name)
222
+ methods.append({
223
+ "name": method_name,
224
+ "params": ", ".join(param_list),
225
+ "return": ret_type if ret_type not in _SKIP_TYPES else "",
226
+ })
227
+
228
+ return methods
229
+
230
+
231
+ def analyze_java_service(service_file: Path, project_root: Path) -> dict:
232
+ try:
233
+ text = clean_java(service_file.read_text(encoding="utf-8", errors="ignore"))
234
+ except OSError:
235
+ text = ""
236
+
237
+ service_name = top_type_name(text, service_file.stem)
238
+
239
+ impl_text = text
240
+ if "interface " in text:
241
+ impl_file = _find_service_impl(service_file, project_root)
242
+ if impl_file:
243
+ try:
244
+ impl_text = clean_java(impl_file.read_text(encoding="utf-8", errors="ignore"))
245
+ except OSError:
246
+ pass
247
+
248
+ endpoints = _find_controllers(project_root, service_name)
249
+ methods = _extract_service_methods(text, service_name)
250
+ models = _find_models_used(impl_text, project_root)
251
+ repos = _find_repositories(impl_text)
252
+
253
+ flow: list[str] = []
254
+ if endpoints:
255
+ ep = endpoints[0]
256
+ flow.append(f"{ep['method']} {ep['path']}")
257
+ flow.append(" ↓")
258
+ flow.append(f"{ep['handler']}")
259
+ flow.append(" ↓")
260
+ if methods:
261
+ m0 = methods[0]
262
+ arg = m0["params"].split(",")[0].split()[-1] if m0["params"] else ""
263
+ flow.append(f"{service_name}.{m0['name']}({arg + '...' if arg else ''})")
264
+ flow.append(" ↓")
265
+ if models:
266
+ model_name = next(iter(models))
267
+ flow.append(f"Uses {model_name} model")
268
+ if models[model_name].get("relationships"):
269
+ rel = models[model_name]["relationships"][0]
270
+ flow.append(f" (has {rel['kind']} → {rel['target']})")
271
+ flow.append(" ↓")
272
+ if repos:
273
+ flow.append(f"{repos[0]['name']}.save()")
274
+ flow.append(" ↓")
275
+ if methods and methods[0].get("return"):
276
+ flow.append(f"Returns: {methods[0]['return']}")
277
+
278
+ return {
279
+ "service_name": service_name,
280
+ "endpoints": endpoints,
281
+ "methods": methods,
282
+ "models": models,
283
+ "repositories": repos,
284
+ "flow": flow,
285
+ "summary": {
286
+ "Endpoints": len(endpoints),
287
+ "Service methods": len(methods),
288
+ "Models used": len(models),
289
+ "Repositories": len(repos),
290
+ },
291
+ }
@@ -0,0 +1,91 @@
1
+ from __future__ import annotations
2
+
3
+ from pathlib import Path
4
+ import typer
5
+
6
+ from ..graph.builder import build_graph_with_counts, get_hub_files_by_ratio
7
+ from ..graph.query import get_related
8
+ from ..parser.symbols import extract_classes_from_file, extract_funcs_from_file
9
+
10
+
11
+ def _path_to_module(path: Path, root: Path) -> str:
12
+ rel = path.relative_to(root)
13
+ parts = list(rel.parts)
14
+ if parts and parts[-1] == "__init__.py":
15
+ parts = parts[:-1]
16
+ elif parts:
17
+ parts[-1] = parts[-1][:-3]
18
+ return ".".join(parts)
19
+
20
+
21
+ def _sym_lines_py(f: Path, root: Path, show_line: bool) -> tuple[list[str], set[str]]:
22
+ out: list[str] = []
23
+ imports: set[str] = set()
24
+ mod = _path_to_module(f, root)
25
+
26
+ for c in extract_classes_from_file(f):
27
+ if c.name.startswith("_"):
28
+ continue
29
+ out.append(c.name if not show_line else f"{c.name} (line {c.lineno})")
30
+ imports.add(f"from {mod} import {c.name}")
31
+
32
+ for fn in extract_funcs_from_file(f):
33
+ if fn.name.startswith("_") or fn.name.startswith("register_"):
34
+ continue
35
+ out.append(fn.name if not show_line else f"{fn.name} (line {fn.lineno})")
36
+ imports.add(f"from {mod} import {fn.name}")
37
+
38
+ return sorted(out), imports
39
+
40
+
41
+ def run_python_context(
42
+ *,
43
+ target: Path,
44
+ root: Path,
45
+ all_files: list[Path],
46
+ depth: int,
47
+ forward_only: bool,
48
+ include_hubs: bool,
49
+ line: bool,
50
+ ) -> None:
51
+ graph, dependents_count = build_graph_with_counts(all_files, root)
52
+
53
+ hubs: set[Path] = set()
54
+ if not include_hubs:
55
+ hubs = get_hub_files_by_ratio(dependents_count, len(all_files), 0.5)
56
+
57
+ scope = get_related(graph, target, depth, include_reverse=not forward_only, hubs=hubs)
58
+ scope_files = sorted((scope - {target}))
59
+
60
+ typer.echo("")
61
+ typer.echo("CONTEXT")
62
+ typer.echo(f"Target: {target.relative_to(root)}")
63
+ typer.echo(f"Depth: {depth} Mode: {'forward-only' if forward_only else 'forward+reverse'}")
64
+ typer.echo("")
65
+
66
+ imports_by_file: dict[Path, set[str]] = {}
67
+
68
+ typer.echo("RELATED SYMBOLS")
69
+ if not scope_files:
70
+ typer.echo(" (none)")
71
+ typer.echo("")
72
+ return
73
+
74
+ for f in scope_files:
75
+ lines, imps = _sym_lines_py(f, root, line)
76
+ if lines:
77
+ typer.echo(f"• {f.relative_to(root)}")
78
+ for x in lines:
79
+ typer.echo(f" {x}")
80
+ typer.echo("")
81
+ if imps:
82
+ imports_by_file[f] = imps
83
+
84
+ typer.echo("IMPORTS")
85
+ all_imports = sorted({imp for s in imports_by_file.values() for imp in s})
86
+ if all_imports:
87
+ for imp in all_imports:
88
+ typer.echo(f" {imp}")
89
+ else:
90
+ typer.echo(" (none)")
91
+ typer.echo("")