@musashishao/agent-kit 1.2.2 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.agent/mcp-gateway/README.md +121 -0
- package/.agent/mcp-gateway/dist/index.d.ts +11 -0
- package/.agent/mcp-gateway/dist/index.js +504 -0
- package/.agent/mcp-gateway/dist/sync/debouncer.d.ts +56 -0
- package/.agent/mcp-gateway/dist/sync/debouncer.js +112 -0
- package/.agent/mcp-gateway/dist/sync/incremental_syncer.d.ts +58 -0
- package/.agent/mcp-gateway/dist/sync/incremental_syncer.js +172 -0
- package/.agent/mcp-gateway/dist/sync/index.d.ts +6 -0
- package/.agent/mcp-gateway/dist/sync/index.js +6 -0
- package/.agent/mcp-gateway/dist/sync/timestamp_checker.d.ts +69 -0
- package/.agent/mcp-gateway/dist/sync/timestamp_checker.js +169 -0
- package/.agent/mcp-gateway/package.json +28 -0
- package/.agent/mcp-gateway/src/index.ts +608 -0
- package/.agent/mcp-gateway/src/sync/debouncer.ts +129 -0
- package/.agent/mcp-gateway/src/sync/incremental_syncer.ts +237 -0
- package/.agent/mcp-gateway/src/sync/index.ts +7 -0
- package/.agent/mcp-gateway/src/sync/timestamp_checker.ts +194 -0
- package/.agent/scripts/ak_cli.py +533 -0
- package/.agent/scripts/setup_host.py +557 -0
- package/.agent/scripts/verify_install.py +174 -0
- package/.agent/skills/app-builder/SKILL.md +51 -1
- package/.agent/skills/app-builder/scripts/generate_ai_infra.py +510 -0
- package/.agent/skills/documentation-templates/SKILL.md +9 -1
- package/.agent/skills/documentation-templates/agents-template.md +202 -0
- package/.agent/skills/graph-mapper/SKILL.md +211 -0
- package/.agent/skills/graph-mapper/scripts/generate_graph.py +503 -0
- package/.agent/skills/rag-engineering/SKILL.md +342 -0
- package/.agent/skills/rag-engineering/chunking-strategies.md +229 -0
- package/.agent/skills/rag-engineering/contextual-retrieval.md +261 -0
- package/.agent/skills/rag-engineering/hybrid-search.md +356 -0
- package/.agent/skills/rag-engineering/scripts/chunk_code.py +606 -0
- package/.agent/templates/mcp_configs/claude_desktop.json +14 -0
- package/.agent/templates/mcp_configs/cursor.json +13 -0
- package/.agent/templates/mcp_configs/vscode.json +13 -0
- package/.agent/workflows/create.md +70 -2
- package/bin/cli.js +91 -0
- package/docs/AI_DATA_INFRASTRUCTURE.md +288 -0
- package/docs/CHANGELOG_AI_INFRA.md +111 -0
- package/package.json +7 -2
|
@@ -0,0 +1,503 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Graph Mapper - Lightweight Dependency Graph Generator
|
|
4
|
+
|
|
5
|
+
Analyzes source code to build a dependency graph without external databases.
|
|
6
|
+
Supports TypeScript/JavaScript and Python projects.
|
|
7
|
+
|
|
8
|
+
Usage:
|
|
9
|
+
python generate_graph.py --src ./src --output .agent/graph.json --lang auto
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import os
|
|
13
|
+
import re
|
|
14
|
+
import json
|
|
15
|
+
import argparse
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Dict, List, Set, Optional
|
|
18
|
+
from dataclasses import dataclass, field, asdict
|
|
19
|
+
from datetime import datetime
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass
|
|
23
|
+
class Node:
|
|
24
|
+
"""Represents a file node in the dependency graph."""
|
|
25
|
+
id: str
|
|
26
|
+
type: str # component, hook, util, service, page, etc.
|
|
27
|
+
path: str
|
|
28
|
+
imports: List[str] = field(default_factory=list)
|
|
29
|
+
exports: List[str] = field(default_factory=list)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class Edge:
|
|
34
|
+
"""Represents a dependency edge."""
|
|
35
|
+
source: str # from
|
|
36
|
+
target: str # to
|
|
37
|
+
type: str = "imports" # imports, extends, implements
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class Graph:
|
|
42
|
+
"""The complete dependency graph."""
|
|
43
|
+
nodes: List[Node] = field(default_factory=list)
|
|
44
|
+
edges: List[Edge] = field(default_factory=list)
|
|
45
|
+
metadata: Dict = field(default_factory=dict)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class TypeScriptAnalyzer:
|
|
49
|
+
"""Analyzes TypeScript/JavaScript files for imports and exports."""
|
|
50
|
+
|
|
51
|
+
EXTENSIONS = {'.ts', '.tsx', '.js', '.jsx', '.mjs', '.cjs'}
|
|
52
|
+
|
|
53
|
+
# Regex patterns for import detection
|
|
54
|
+
IMPORT_PATTERNS = [
|
|
55
|
+
# import X from 'module'
|
|
56
|
+
r"import\s+(?:[\w\s{},*]+)\s+from\s+['\"]([^'\"]+)['\"]",
|
|
57
|
+
# import 'module'
|
|
58
|
+
r"import\s+['\"]([^'\"]+)['\"]",
|
|
59
|
+
# require('module')
|
|
60
|
+
r"require\s*\(\s*['\"]([^'\"]+)['\"]\s*\)",
|
|
61
|
+
# dynamic import('module')
|
|
62
|
+
r"import\s*\(\s*['\"]([^'\"]+)['\"]\s*\)",
|
|
63
|
+
]
|
|
64
|
+
|
|
65
|
+
EXPORT_PATTERNS = [
|
|
66
|
+
# export default
|
|
67
|
+
r"export\s+default\s+(?:class|function|const|let|var)?\s*(\w+)?",
|
|
68
|
+
# export { name }
|
|
69
|
+
r"export\s*\{([^}]+)\}",
|
|
70
|
+
# export const/let/var/function/class
|
|
71
|
+
r"export\s+(?:const|let|var|function|class|interface|type|enum)\s+(\w+)",
|
|
72
|
+
]
|
|
73
|
+
|
|
74
|
+
def __init__(self, base_path: Path, exclude_patterns: List[str]):
|
|
75
|
+
self.base_path = base_path
|
|
76
|
+
self.exclude_patterns = exclude_patterns
|
|
77
|
+
|
|
78
|
+
def should_exclude(self, path: Path) -> bool:
|
|
79
|
+
"""Check if path should be excluded."""
|
|
80
|
+
path_str = str(path)
|
|
81
|
+
for pattern in self.exclude_patterns:
|
|
82
|
+
if pattern in path_str:
|
|
83
|
+
return True
|
|
84
|
+
return False
|
|
85
|
+
|
|
86
|
+
def analyze_file(self, file_path: Path) -> Optional[Node]:
|
|
87
|
+
"""Analyze a single file for imports and exports."""
|
|
88
|
+
if self.should_exclude(file_path):
|
|
89
|
+
return None
|
|
90
|
+
|
|
91
|
+
try:
|
|
92
|
+
content = file_path.read_text(encoding='utf-8')
|
|
93
|
+
except (UnicodeDecodeError, PermissionError):
|
|
94
|
+
return None
|
|
95
|
+
|
|
96
|
+
relative_path = str(file_path.relative_to(self.base_path))
|
|
97
|
+
|
|
98
|
+
# Determine node type based on path/filename
|
|
99
|
+
node_type = self._detect_type(relative_path)
|
|
100
|
+
|
|
101
|
+
# Extract imports
|
|
102
|
+
imports = []
|
|
103
|
+
for pattern in self.IMPORT_PATTERNS:
|
|
104
|
+
matches = re.findall(pattern, content)
|
|
105
|
+
imports.extend(matches)
|
|
106
|
+
|
|
107
|
+
# Resolve relative imports to absolute paths
|
|
108
|
+
imports = [self._resolve_import(imp, file_path) for imp in imports]
|
|
109
|
+
imports = [imp for imp in imports if imp] # Filter None
|
|
110
|
+
|
|
111
|
+
# Extract exports
|
|
112
|
+
exports = []
|
|
113
|
+
for pattern in self.EXPORT_PATTERNS:
|
|
114
|
+
matches = re.findall(pattern, content)
|
|
115
|
+
for match in matches:
|
|
116
|
+
if match:
|
|
117
|
+
# Handle multiple exports in braces
|
|
118
|
+
if ',' in match:
|
|
119
|
+
exports.extend([e.strip() for e in match.split(',')])
|
|
120
|
+
else:
|
|
121
|
+
exports.append(match.strip())
|
|
122
|
+
|
|
123
|
+
return Node(
|
|
124
|
+
id=relative_path,
|
|
125
|
+
type=node_type,
|
|
126
|
+
path=relative_path,
|
|
127
|
+
imports=list(set(imports)),
|
|
128
|
+
exports=list(set(exports))
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
def _detect_type(self, path: str) -> str:
|
|
132
|
+
"""Detect the type of file based on path and name."""
|
|
133
|
+
path_lower = path.lower()
|
|
134
|
+
|
|
135
|
+
if '/components/' in path_lower or path_lower.startswith('components/'):
|
|
136
|
+
return 'component'
|
|
137
|
+
elif '/hooks/' in path_lower or path_lower.startswith('hooks/'):
|
|
138
|
+
return 'hook'
|
|
139
|
+
elif '/utils/' in path_lower or '/lib/' in path_lower:
|
|
140
|
+
return 'utility'
|
|
141
|
+
elif '/services/' in path_lower or '/api/' in path_lower:
|
|
142
|
+
return 'service'
|
|
143
|
+
elif '/pages/' in path_lower or '/app/' in path_lower:
|
|
144
|
+
return 'page'
|
|
145
|
+
elif '/types/' in path_lower:
|
|
146
|
+
return 'type'
|
|
147
|
+
elif '/store/' in path_lower or '/redux/' in path_lower:
|
|
148
|
+
return 'store'
|
|
149
|
+
elif '.test.' in path_lower or '.spec.' in path_lower:
|
|
150
|
+
return 'test'
|
|
151
|
+
else:
|
|
152
|
+
return 'module'
|
|
153
|
+
|
|
154
|
+
def _resolve_import(self, import_path: str, from_file: Path) -> Optional[str]:
|
|
155
|
+
"""Resolve relative import to project-relative path."""
|
|
156
|
+
# Skip external packages
|
|
157
|
+
if not import_path.startswith('.'):
|
|
158
|
+
return None
|
|
159
|
+
|
|
160
|
+
# Resolve relative path
|
|
161
|
+
from_dir = from_file.parent
|
|
162
|
+
resolved = (from_dir / import_path).resolve()
|
|
163
|
+
|
|
164
|
+
# Try common extensions if no extension
|
|
165
|
+
if not resolved.suffix:
|
|
166
|
+
for ext in self.EXTENSIONS:
|
|
167
|
+
test_path = resolved.with_suffix(ext)
|
|
168
|
+
if test_path.exists():
|
|
169
|
+
resolved = test_path
|
|
170
|
+
break
|
|
171
|
+
# Try index file
|
|
172
|
+
index_path = resolved / 'index.ts'
|
|
173
|
+
if index_path.exists():
|
|
174
|
+
resolved = index_path
|
|
175
|
+
|
|
176
|
+
try:
|
|
177
|
+
return str(resolved.relative_to(self.base_path))
|
|
178
|
+
except ValueError:
|
|
179
|
+
return None
|
|
180
|
+
|
|
181
|
+
def analyze_directory(self, directory: Path) -> List[Node]:
|
|
182
|
+
"""Analyze all files in a directory."""
|
|
183
|
+
nodes = []
|
|
184
|
+
|
|
185
|
+
for file_path in directory.rglob('*'):
|
|
186
|
+
if file_path.is_file() and file_path.suffix in self.EXTENSIONS:
|
|
187
|
+
node = self.analyze_file(file_path)
|
|
188
|
+
if node:
|
|
189
|
+
nodes.append(node)
|
|
190
|
+
|
|
191
|
+
return nodes
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
class PythonAnalyzer:
|
|
195
|
+
"""Analyzes Python files for imports and exports."""
|
|
196
|
+
|
|
197
|
+
EXTENSIONS = {'.py'}
|
|
198
|
+
|
|
199
|
+
IMPORT_PATTERNS = [
|
|
200
|
+
# from module import name
|
|
201
|
+
r"from\s+([\w.]+)\s+import",
|
|
202
|
+
# import module
|
|
203
|
+
r"^import\s+([\w.]+)",
|
|
204
|
+
]
|
|
205
|
+
|
|
206
|
+
def __init__(self, base_path: Path, exclude_patterns: List[str]):
|
|
207
|
+
self.base_path = base_path
|
|
208
|
+
self.exclude_patterns = exclude_patterns
|
|
209
|
+
|
|
210
|
+
def should_exclude(self, path: Path) -> bool:
|
|
211
|
+
"""Check if path should be excluded."""
|
|
212
|
+
path_str = str(path)
|
|
213
|
+
for pattern in self.exclude_patterns:
|
|
214
|
+
if pattern in path_str:
|
|
215
|
+
return True
|
|
216
|
+
return False
|
|
217
|
+
|
|
218
|
+
def analyze_file(self, file_path: Path) -> Optional[Node]:
|
|
219
|
+
"""Analyze a single Python file."""
|
|
220
|
+
if self.should_exclude(file_path):
|
|
221
|
+
return None
|
|
222
|
+
|
|
223
|
+
try:
|
|
224
|
+
content = file_path.read_text(encoding='utf-8')
|
|
225
|
+
except (UnicodeDecodeError, PermissionError):
|
|
226
|
+
return None
|
|
227
|
+
|
|
228
|
+
relative_path = str(file_path.relative_to(self.base_path))
|
|
229
|
+
node_type = self._detect_type(relative_path)
|
|
230
|
+
|
|
231
|
+
# Extract imports
|
|
232
|
+
imports = []
|
|
233
|
+
for pattern in self.IMPORT_PATTERNS:
|
|
234
|
+
matches = re.findall(pattern, content, re.MULTILINE)
|
|
235
|
+
imports.extend(matches)
|
|
236
|
+
|
|
237
|
+
# Resolve to file paths
|
|
238
|
+
imports = [self._resolve_import(imp) for imp in imports]
|
|
239
|
+
imports = [imp for imp in imports if imp]
|
|
240
|
+
|
|
241
|
+
# Extract exports (public functions/classes)
|
|
242
|
+
exports = []
|
|
243
|
+
# Classes
|
|
244
|
+
exports.extend(re.findall(r"^class\s+(\w+)", content, re.MULTILINE))
|
|
245
|
+
# Functions (not private)
|
|
246
|
+
exports.extend(re.findall(r"^def\s+([a-zA-Z]\w*)", content, re.MULTILINE))
|
|
247
|
+
|
|
248
|
+
return Node(
|
|
249
|
+
id=relative_path,
|
|
250
|
+
type=node_type,
|
|
251
|
+
path=relative_path,
|
|
252
|
+
imports=list(set(imports)),
|
|
253
|
+
exports=list(set(exports))
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
def _detect_type(self, path: str) -> str:
|
|
257
|
+
"""Detect the type of Python file."""
|
|
258
|
+
path_lower = path.lower()
|
|
259
|
+
|
|
260
|
+
if '/models/' in path_lower:
|
|
261
|
+
return 'model'
|
|
262
|
+
elif '/views/' in path_lower or '/routes/' in path_lower:
|
|
263
|
+
return 'view'
|
|
264
|
+
elif '/services/' in path_lower:
|
|
265
|
+
return 'service'
|
|
266
|
+
elif '/utils/' in path_lower or '/helpers/' in path_lower:
|
|
267
|
+
return 'utility'
|
|
268
|
+
elif '/tests/' in path_lower or 'test_' in path_lower:
|
|
269
|
+
return 'test'
|
|
270
|
+
elif '/api/' in path_lower:
|
|
271
|
+
return 'api'
|
|
272
|
+
else:
|
|
273
|
+
return 'module'
|
|
274
|
+
|
|
275
|
+
def _resolve_import(self, import_path: str) -> Optional[str]:
|
|
276
|
+
"""Resolve Python import to file path."""
|
|
277
|
+
# Convert dot notation to path
|
|
278
|
+
path_parts = import_path.split('.')
|
|
279
|
+
|
|
280
|
+
# Try to find the file
|
|
281
|
+
possible_path = self.base_path / '/'.join(path_parts)
|
|
282
|
+
|
|
283
|
+
# Check for .py file
|
|
284
|
+
py_file = possible_path.with_suffix('.py')
|
|
285
|
+
if py_file.exists():
|
|
286
|
+
try:
|
|
287
|
+
return str(py_file.relative_to(self.base_path))
|
|
288
|
+
except ValueError:
|
|
289
|
+
return None
|
|
290
|
+
|
|
291
|
+
# Check for package (__init__.py)
|
|
292
|
+
init_file = possible_path / '__init__.py'
|
|
293
|
+
if init_file.exists():
|
|
294
|
+
try:
|
|
295
|
+
return str(init_file.relative_to(self.base_path))
|
|
296
|
+
except ValueError:
|
|
297
|
+
return None
|
|
298
|
+
|
|
299
|
+
return None
|
|
300
|
+
|
|
301
|
+
def analyze_directory(self, directory: Path) -> List[Node]:
|
|
302
|
+
"""Analyze all Python files in a directory."""
|
|
303
|
+
nodes = []
|
|
304
|
+
|
|
305
|
+
for file_path in directory.rglob('*.py'):
|
|
306
|
+
if file_path.is_file():
|
|
307
|
+
node = self.analyze_file(file_path)
|
|
308
|
+
if node:
|
|
309
|
+
nodes.append(node)
|
|
310
|
+
|
|
311
|
+
return nodes
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
def build_edges(nodes: List[Node]) -> List[Edge]:
|
|
315
|
+
"""Build edges from node imports."""
|
|
316
|
+
edges = []
|
|
317
|
+
node_ids = {node.id for node in nodes}
|
|
318
|
+
|
|
319
|
+
for node in nodes:
|
|
320
|
+
for imp in node.imports:
|
|
321
|
+
# Only create edge if target exists in our nodes
|
|
322
|
+
if imp in node_ids:
|
|
323
|
+
edges.append(Edge(
|
|
324
|
+
source=node.id,
|
|
325
|
+
target=imp,
|
|
326
|
+
type="imports"
|
|
327
|
+
))
|
|
328
|
+
|
|
329
|
+
return edges
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def calculate_impact(graph: Graph, file_id: str, depth: int = 3) -> Dict:
|
|
333
|
+
"""Calculate impact zone for a file."""
|
|
334
|
+
# Build reverse lookup (who imports this file)
|
|
335
|
+
dependents: Dict[str, Set[str]] = {}
|
|
336
|
+
for edge in graph.edges:
|
|
337
|
+
if edge.target not in dependents:
|
|
338
|
+
dependents[edge.target] = set()
|
|
339
|
+
dependents[edge.target].add(edge.source)
|
|
340
|
+
|
|
341
|
+
# BFS to find all dependents up to depth
|
|
342
|
+
visited = set()
|
|
343
|
+
current_level = {file_id}
|
|
344
|
+
impact_files = []
|
|
345
|
+
|
|
346
|
+
for _ in range(depth):
|
|
347
|
+
next_level = set()
|
|
348
|
+
for file in current_level:
|
|
349
|
+
if file in visited:
|
|
350
|
+
continue
|
|
351
|
+
visited.add(file)
|
|
352
|
+
if file in dependents:
|
|
353
|
+
for dep in dependents[file]:
|
|
354
|
+
if dep not in visited:
|
|
355
|
+
next_level.add(dep)
|
|
356
|
+
impact_files.append(dep)
|
|
357
|
+
current_level = next_level
|
|
358
|
+
if not current_level:
|
|
359
|
+
break
|
|
360
|
+
|
|
361
|
+
return {
|
|
362
|
+
"file": file_id,
|
|
363
|
+
"direct_dependents": list(dependents.get(file_id, [])),
|
|
364
|
+
"transitive_dependents": impact_files,
|
|
365
|
+
"impact_score": len(set(impact_files))
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
|
|
369
|
+
def generate_markdown(graph: Graph, output_path: Path):
|
|
370
|
+
"""Generate markdown representation of the graph."""
|
|
371
|
+
# Build reverse lookup
|
|
372
|
+
dependents: Dict[str, List[str]] = {}
|
|
373
|
+
for edge in graph.edges:
|
|
374
|
+
if edge.target not in dependents:
|
|
375
|
+
dependents[edge.target] = []
|
|
376
|
+
dependents[edge.target].append(edge.source)
|
|
377
|
+
|
|
378
|
+
lines = [
|
|
379
|
+
"# Dependency Graph",
|
|
380
|
+
"",
|
|
381
|
+
f"> Generated: {datetime.now().isoformat()}",
|
|
382
|
+
f"> Total Files: {len(graph.nodes)}",
|
|
383
|
+
f"> Total Dependencies: {len(graph.edges)}",
|
|
384
|
+
"",
|
|
385
|
+
"---",
|
|
386
|
+
""
|
|
387
|
+
]
|
|
388
|
+
|
|
389
|
+
# Group nodes by type
|
|
390
|
+
nodes_by_type: Dict[str, List[Node]] = {}
|
|
391
|
+
for node in graph.nodes:
|
|
392
|
+
if node.type not in nodes_by_type:
|
|
393
|
+
nodes_by_type[node.type] = []
|
|
394
|
+
nodes_by_type[node.type].append(node)
|
|
395
|
+
|
|
396
|
+
for node_type, nodes in sorted(nodes_by_type.items()):
|
|
397
|
+
lines.append(f"## {node_type.title()}s ({len(nodes)})")
|
|
398
|
+
lines.append("")
|
|
399
|
+
|
|
400
|
+
for node in sorted(nodes, key=lambda n: n.id):
|
|
401
|
+
lines.append(f"### `{node.id}`")
|
|
402
|
+
lines.append("")
|
|
403
|
+
|
|
404
|
+
if node.imports:
|
|
405
|
+
lines.append("**Imports:**")
|
|
406
|
+
for imp in sorted(node.imports):
|
|
407
|
+
lines.append(f"- `{imp}`")
|
|
408
|
+
lines.append("")
|
|
409
|
+
|
|
410
|
+
deps = dependents.get(node.id, [])
|
|
411
|
+
if deps:
|
|
412
|
+
lines.append("**Imported by:**")
|
|
413
|
+
for dep in sorted(deps):
|
|
414
|
+
lines.append(f"- `{dep}`")
|
|
415
|
+
lines.append("")
|
|
416
|
+
|
|
417
|
+
impact_score = len(deps)
|
|
418
|
+
lines.append(f"**Impact Score:** {impact_score}")
|
|
419
|
+
lines.append("")
|
|
420
|
+
lines.append("---")
|
|
421
|
+
lines.append("")
|
|
422
|
+
|
|
423
|
+
output_path.write_text('\n'.join(lines), encoding='utf-8')
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
def main():
|
|
427
|
+
parser = argparse.ArgumentParser(description='Generate dependency graph')
|
|
428
|
+
parser.add_argument('--src', default='./src', help='Source directory')
|
|
429
|
+
parser.add_argument('--output', default='.agent/graph.json', help='Output file')
|
|
430
|
+
parser.add_argument('--format', choices=['json', 'markdown', 'both'], default='both')
|
|
431
|
+
parser.add_argument('--lang', choices=['typescript', 'python', 'auto'], default='auto')
|
|
432
|
+
parser.add_argument('--depth', type=int, default=3, help='Max depth for impact analysis')
|
|
433
|
+
parser.add_argument('--exclude', default='node_modules,__pycache__,.git,dist,build',
|
|
434
|
+
help='Comma-separated patterns to exclude')
|
|
435
|
+
|
|
436
|
+
args = parser.parse_args()
|
|
437
|
+
|
|
438
|
+
src_path = Path(args.src).resolve()
|
|
439
|
+
output_path = Path(args.output)
|
|
440
|
+
exclude_patterns = args.exclude.split(',')
|
|
441
|
+
|
|
442
|
+
if not src_path.exists():
|
|
443
|
+
print(f"Error: Source directory '{src_path}' does not exist")
|
|
444
|
+
return 1
|
|
445
|
+
|
|
446
|
+
# Detect language if auto
|
|
447
|
+
lang = args.lang
|
|
448
|
+
if lang == 'auto':
|
|
449
|
+
ts_files = list(src_path.rglob('*.ts')) + list(src_path.rglob('*.tsx'))
|
|
450
|
+
py_files = list(src_path.rglob('*.py'))
|
|
451
|
+
lang = 'typescript' if len(ts_files) >= len(py_files) else 'python'
|
|
452
|
+
print(f"Auto-detected language: {lang}")
|
|
453
|
+
|
|
454
|
+
# Analyze based on language
|
|
455
|
+
if lang == 'typescript':
|
|
456
|
+
analyzer = TypeScriptAnalyzer(src_path, exclude_patterns)
|
|
457
|
+
else:
|
|
458
|
+
analyzer = PythonAnalyzer(src_path, exclude_patterns)
|
|
459
|
+
|
|
460
|
+
print(f"Analyzing {src_path}...")
|
|
461
|
+
nodes = analyzer.analyze_directory(src_path)
|
|
462
|
+
print(f"Found {len(nodes)} files")
|
|
463
|
+
|
|
464
|
+
# Build graph
|
|
465
|
+
edges = build_edges(nodes)
|
|
466
|
+
print(f"Found {len(edges)} dependencies")
|
|
467
|
+
|
|
468
|
+
graph = Graph(
|
|
469
|
+
nodes=nodes,
|
|
470
|
+
edges=edges,
|
|
471
|
+
metadata={
|
|
472
|
+
"generated_at": datetime.now().isoformat(),
|
|
473
|
+
"source_path": str(src_path),
|
|
474
|
+
"language": lang,
|
|
475
|
+
"total_files": len(nodes),
|
|
476
|
+
"total_edges": len(edges)
|
|
477
|
+
}
|
|
478
|
+
)
|
|
479
|
+
|
|
480
|
+
# Ensure output directory exists
|
|
481
|
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
482
|
+
|
|
483
|
+
# Generate outputs
|
|
484
|
+
if args.format in ['json', 'both']:
|
|
485
|
+
json_data = {
|
|
486
|
+
"metadata": graph.metadata,
|
|
487
|
+
"nodes": [asdict(n) for n in graph.nodes],
|
|
488
|
+
"edges": [asdict(e) for e in graph.edges]
|
|
489
|
+
}
|
|
490
|
+
output_path.write_text(json.dumps(json_data, indent=2), encoding='utf-8')
|
|
491
|
+
print(f"Generated: {output_path}")
|
|
492
|
+
|
|
493
|
+
if args.format in ['markdown', 'both']:
|
|
494
|
+
md_path = output_path.with_suffix('.md')
|
|
495
|
+
generate_markdown(graph, md_path)
|
|
496
|
+
print(f"Generated: {md_path}")
|
|
497
|
+
|
|
498
|
+
print("Done!")
|
|
499
|
+
return 0
|
|
500
|
+
|
|
501
|
+
|
|
502
|
+
if __name__ == '__main__':
|
|
503
|
+
exit(main())
|