archrisk-engine 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/aiDiagnosis.d.ts +24 -0
- package/dist/aiDiagnosis.js +125 -0
- package/dist/analyzer.d.ts +18 -0
- package/dist/analyzer.js +136 -0
- package/dist/archScanner.d.ts +46 -0
- package/dist/archScanner.js +235 -0
- package/dist/deepAnalysis.d.ts +10 -0
- package/dist/deepAnalysis.js +101 -0
- package/dist/index.d.ts +5 -0
- package/dist/index.js +21 -0
- package/dist/repoAnalyzer.d.ts +43 -0
- package/dist/repoAnalyzer.js +299 -0
- package/package.json +21 -0
- package/src/aiDiagnosis.ts +159 -0
- package/src/analyzer.ts +124 -0
- package/src/archScanner.ts +227 -0
- package/src/deepAnalysis.ts +85 -0
- package/src/index.ts +5 -0
- package/src/repoAnalyzer.ts +298 -0
- package/tsconfig.json +22 -0
package/src/analyzer.ts
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
1
|
+
import { spawn } from 'child_process';
|
|
2
|
+
import * as path from 'path';
|
|
3
|
+
import * as fs from 'fs';
|
|
4
|
+
import { tmpdir } from 'os';
|
|
5
|
+
import { randomBytes } from 'crypto';
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* [The Eye] Error Capture Engine (TypeScript Version)
|
|
9
|
+
*
|
|
10
|
+
* Intercepts Python syntax/runtime errors.
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
export interface AnalysisResult {
|
|
14
|
+
hasError: boolean;
|
|
15
|
+
error?: string;
|
|
16
|
+
line?: number;
|
|
17
|
+
type?: string;
|
|
18
|
+
file?: string;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Analyze Python code string for syntax errors and security risks
|
|
23
|
+
* @param code - Python source code
|
|
24
|
+
* @param fileName - Original filename for context
|
|
25
|
+
*/
|
|
26
|
+
export async function analyzePythonCode(code: string, fileName: string): Promise<AnalysisResult> {
|
|
27
|
+
// 1. Architecture Health Scan (God Module / Large File)
|
|
28
|
+
const lines = code.split('\n');
|
|
29
|
+
if (lines.length > 800) {
|
|
30
|
+
return {
|
|
31
|
+
hasError: true,
|
|
32
|
+
error: `Large File Risk: ${lines.length} lines. 이 파일의 수정은 시스템 전반에 예측 불가능한 영향을 미칠 수 있습니다.`,
|
|
33
|
+
line: 1,
|
|
34
|
+
type: 'ProductionRisk',
|
|
35
|
+
file: fileName
|
|
36
|
+
};
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
// 2. Risk Scan (Operational & Production Readiness)
|
|
40
|
+
const riskResult = scanForRisks(code, fileName);
|
|
41
|
+
if (riskResult.hasError) {
|
|
42
|
+
return riskResult;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
// 3. Syntax Check
|
|
46
|
+
const flatFileName = fileName.replace(/[\/\\]/g, '_');
|
|
47
|
+
const tempFilePath = path.join(tmpdir(), `arch_risk_${randomBytes(4).toString('hex')}_${flatFileName}`);
|
|
48
|
+
|
|
49
|
+
try {
|
|
50
|
+
fs.writeFileSync(tempFilePath, code);
|
|
51
|
+
|
|
52
|
+
return new Promise((resolve) => {
|
|
53
|
+
const proc = spawn('python3', ['-m', 'py_compile', tempFilePath]);
|
|
54
|
+
|
|
55
|
+
let stderr = '';
|
|
56
|
+
proc.stderr.on('data', (data) => {
|
|
57
|
+
stderr += data.toString();
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
proc.on('close', (code) => {
|
|
61
|
+
if (code !== 0) {
|
|
62
|
+
const errorInfo = parseErrorMessage(stderr, fileName);
|
|
63
|
+
resolve({
|
|
64
|
+
hasError: true,
|
|
65
|
+
...errorInfo
|
|
66
|
+
});
|
|
67
|
+
} else {
|
|
68
|
+
resolve({ hasError: false });
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
// Cleanup temp file
|
|
72
|
+
if (fs.existsSync(tempFilePath)) fs.unlinkSync(tempFilePath);
|
|
73
|
+
});
|
|
74
|
+
|
|
75
|
+
setTimeout(() => {
|
|
76
|
+
proc.kill();
|
|
77
|
+
resolve({ hasError: false, error: 'Analysis timeout' });
|
|
78
|
+
if (fs.existsSync(tempFilePath)) fs.unlinkSync(tempFilePath);
|
|
79
|
+
}, 5000);
|
|
80
|
+
});
|
|
81
|
+
} catch (error: any) {
|
|
82
|
+
return { hasError: true, error: error.message };
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
function scanForRisks(code: string, fileName: string): AnalysisResult {
|
|
87
|
+
const risks = [
|
|
88
|
+
{ pattern: /os\.system\(/, type: 'SecurityRisk', message: '[보안] os.system() 사용이 감지되었습니다. 외부 공격에 노출될 위험이 있습니다.' },
|
|
89
|
+
{ pattern: /subprocess\.(popen|run|call|check_output)\(.*shell\s*=\s*True/, type: 'SecurityRisk', message: '[보안] shell=True 옵션은 명령어 주입 공격의 통로가 될 수 있습니다.' },
|
|
90
|
+
{ pattern: /eval\(|exec\(/, type: 'SecurityRisk', message: '[보안] 동적 코드 실행 함수 사용은 잠재적인 보안 홀을 형성합니다.' },
|
|
91
|
+
{ pattern: /requests\.(get|post|put|delete|patch)\((?!.*timeout=)/, type: 'ProductionRisk', message: '[운영] 외부 API 호출 시 timeout 설정이 없습니다. 장애 발생 시 서비스가 무한 대기에 빠질 수 있습니다.' },
|
|
92
|
+
{ pattern: /aiohttp\.ClientSession\(\).*(get|post|put|delete|patch)\((?!.*timeout=)/, type: 'ProductionRisk', message: '[운영] 비동기 호출 시 timeout 설정이 없습니다. 시스템 리소스 고갈의 원인이 됩니다.' },
|
|
93
|
+
{ pattern: /(?:api_key|password|secret|token)\s*=\s*['"][a-zA-Z0-9_-]{10,}['"]/, type: 'ProductionRisk', message: '[운영] 민감 정보(API Key/Password)가 코드 내에 하드코딩 되어 있습니다. 보안 사고의 직접적인 원인입니다.' },
|
|
94
|
+
];
|
|
95
|
+
|
|
96
|
+
const lines = code.split('\n');
|
|
97
|
+
for (let i = 0; i < lines.length; i++) {
|
|
98
|
+
for (const risk of risks) {
|
|
99
|
+
if (risk.pattern.test(lines[i])) {
|
|
100
|
+
return {
|
|
101
|
+
hasError: true,
|
|
102
|
+
error: risk.message,
|
|
103
|
+
line: i + 1,
|
|
104
|
+
type: risk.type,
|
|
105
|
+
file: fileName
|
|
106
|
+
};
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
return { hasError: false };
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
function parseErrorMessage(stderr: string, fileName: string) {
|
|
115
|
+
const lineMatch = stderr.match(/line (\d+)/);
|
|
116
|
+
const typeMatch = stderr.match(/(\w+Error):/);
|
|
117
|
+
|
|
118
|
+
return {
|
|
119
|
+
error: stderr.trim(),
|
|
120
|
+
line: lineMatch ? parseInt(lineMatch[1]) : undefined,
|
|
121
|
+
type: typeMatch ? typeMatch[1] : 'SyntaxError',
|
|
122
|
+
file: fileName
|
|
123
|
+
};
|
|
124
|
+
}
|
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
import * as fs from 'fs';
|
|
2
|
+
import * as path from 'path';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* [Phase 8] Architecture Scanner - Ported from Code Observatory
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
export interface ArchIssue {
|
|
9
|
+
id: string;
|
|
10
|
+
severity: 'WARN' | 'ERROR';
|
|
11
|
+
ruleId: string;
|
|
12
|
+
title: string;
|
|
13
|
+
details: string;
|
|
14
|
+
relatedPaths?: string[];
|
|
15
|
+
metrics?: any;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
export interface ArchNode {
|
|
19
|
+
id: string;
|
|
20
|
+
kind: 'project' | 'folder' | 'file' | 'hotspot' | 'cycle';
|
|
21
|
+
label: string;
|
|
22
|
+
health: 'OK' | 'WARN' | 'ERROR';
|
|
23
|
+
path: string;
|
|
24
|
+
metrics: any;
|
|
25
|
+
issues?: string[];
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
export interface ArchEdge {
|
|
29
|
+
id: string;
|
|
30
|
+
source: string;
|
|
31
|
+
target: string;
|
|
32
|
+
type: 'contains' | 'imports';
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
export interface ArchScanResult {
|
|
36
|
+
scanId: string;
|
|
37
|
+
rootPath: string;
|
|
38
|
+
createdAt: number;
|
|
39
|
+
health: 'OK' | 'WARN' | 'ERROR';
|
|
40
|
+
nodes: ArchNode[];
|
|
41
|
+
edges: ArchEdge[];
|
|
42
|
+
issues: ArchIssue[];
|
|
43
|
+
phase: 'FAST' | 'DEPS' | 'DONE';
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
function safeId(prefix = 'id') {
|
|
47
|
+
return `${prefix}_${Date.now()}_${Math.random().toString(16).slice(2)}`;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
function isTextFile(p: string) {
|
|
51
|
+
const ext = path.extname(p).toLowerCase();
|
|
52
|
+
return ['.js', '.jsx', '.ts', '.tsx', '.mjs', '.cjs', '.json', '.py'].includes(ext);
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
function isSkippableDir(name: string) {
|
|
56
|
+
return ['node_modules', '.git', '.next', 'dist', 'build', 'out', '.turbo', '__pycache__', 'venv', '.venv'].includes(name);
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
function suspiciousFolderName(name: string) {
|
|
60
|
+
const n = name.toLowerCase();
|
|
61
|
+
return ['temp', 'tmp', 'backup', 'bak', 'old', 'new', 'new2', 'final', 'final_final', 'copy'].some(k => n.includes(k));
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
function countLOC(text: string) {
|
|
65
|
+
return text.split(/\r?\n/).length;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
export function fastScan(rootPath: string): ArchScanResult {
|
|
69
|
+
const start = Date.now();
|
|
70
|
+
let fileCount = 0;
|
|
71
|
+
let dirCount = 0;
|
|
72
|
+
let rootFiles = 0;
|
|
73
|
+
let maxDepth = 0;
|
|
74
|
+
let suspiciousDirs: string[] = [];
|
|
75
|
+
const hotspots: { filePath: string; loc: number }[] = [];
|
|
76
|
+
|
|
77
|
+
function walk(dir: string, depth: number) {
|
|
78
|
+
maxDepth = Math.max(maxDepth, depth);
|
|
79
|
+
let entries;
|
|
80
|
+
try {
|
|
81
|
+
entries = fs.readdirSync(dir, { withFileTypes: true });
|
|
82
|
+
} catch { return; }
|
|
83
|
+
|
|
84
|
+
for (const ent of entries) {
|
|
85
|
+
const full = path.join(dir, ent.name);
|
|
86
|
+
if (ent.isDirectory()) {
|
|
87
|
+
if (isSkippableDir(ent.name)) continue;
|
|
88
|
+
dirCount++;
|
|
89
|
+
if (suspiciousFolderName(ent.name)) suspiciousDirs.push(full);
|
|
90
|
+
walk(full, depth + 1);
|
|
91
|
+
} else if (ent.isFile()) {
|
|
92
|
+
fileCount++;
|
|
93
|
+
if (dir === rootPath) rootFiles++;
|
|
94
|
+
if (isTextFile(full)) {
|
|
95
|
+
try {
|
|
96
|
+
const txt = fs.readFileSync(full, 'utf8');
|
|
97
|
+
const loc = countLOC(txt);
|
|
98
|
+
if (loc >= 800) hotspots.push({ filePath: full, loc });
|
|
99
|
+
} catch { }
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
walk(rootPath, 0);
|
|
106
|
+
|
|
107
|
+
const scanId = safeId('scan');
|
|
108
|
+
const issues: ArchIssue[] = [];
|
|
109
|
+
const nodes: ArchNode[] = [];
|
|
110
|
+
const edges: ArchEdge[] = [];
|
|
111
|
+
let overall: 'OK' | 'WARN' | 'ERROR' = 'OK';
|
|
112
|
+
|
|
113
|
+
if (rootFiles > 60) {
|
|
114
|
+
overall = 'WARN';
|
|
115
|
+
issues.push({ id: safeId('issue'), severity: 'WARN', ruleId: 'root-files', title: 'Root is cluttered', details: `Root directory has ${rootFiles} files. Consider moving code under src/.`, metrics: { rootFiles } });
|
|
116
|
+
}
|
|
117
|
+
if (maxDepth > 10) {
|
|
118
|
+
if (overall === 'OK') overall = 'WARN';
|
|
119
|
+
issues.push({ id: safeId('issue'), severity: 'WARN', ruleId: 'deep-nesting', title: 'Deep directory nesting', details: `Max directory depth is ${maxDepth}.`, metrics: { maxDepth } });
|
|
120
|
+
}
|
|
121
|
+
if (suspiciousDirs.length >= 2) {
|
|
122
|
+
if (overall === 'OK') overall = 'WARN';
|
|
123
|
+
issues.push({ id: safeId('issue'), severity: 'WARN', ruleId: 'suspicious-folders', title: 'Suspicious folders found', details: `Found folders like temp/backup/etc.`, relatedPaths: suspiciousDirs.slice(0, 10).map(p => path.relative(rootPath, p)), metrics: { count: suspiciousDirs.length } });
|
|
124
|
+
}
|
|
125
|
+
if (hotspots.length >= 3) {
|
|
126
|
+
if (overall === 'OK') overall = 'WARN';
|
|
127
|
+
issues.push({ id: safeId('issue'), severity: 'WARN', ruleId: 'hotspots', title: 'Large files (hotspots)', details: `Found ${hotspots.length} files with LOC >= 800.`, relatedPaths: hotspots.slice(0, 10).map(h => path.relative(rootPath, h.filePath)), metrics: { hotspots: hotspots.length } });
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
nodes.push({ id: 'ARCH_PROJECT', kind: 'project', label: path.basename(rootPath) || 'PROJECT', health: overall, path: rootPath, metrics: { fileCount, dirCount, rootFiles, maxDepth, ms: Date.now() - start }, issues: issues.map(i => i.id) });
|
|
131
|
+
|
|
132
|
+
return { scanId, rootPath, createdAt: Date.now(), health: overall, nodes, edges, issues, phase: 'FAST' };
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// --- DEPS SCAN ---
|
|
136
|
+
function parseRelativeImports(fileText: string) {
|
|
137
|
+
const imports: string[] = [];
|
|
138
|
+
const re1 = /import\s+[^'"]*['"]([^'"]+)['"]/g;
|
|
139
|
+
const re2 = /require\(\s*['"]([^'"]+)['"]\s*\)/g;
|
|
140
|
+
const re3 = /from\s+['"]([^'"]+)['"]\s+import/g; // Python style
|
|
141
|
+
let m;
|
|
142
|
+
while ((m = re1.exec(fileText))) imports.push(m[1]);
|
|
143
|
+
while ((m = re2.exec(fileText))) imports.push(m[1]);
|
|
144
|
+
while ((m = re3.exec(fileText))) imports.push(m[1]);
|
|
145
|
+
return imports.filter(s => s.startsWith('./') || s.startsWith('../') || s.includes('.'));
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
function resolveImport(fromFile: string, spec: string) {
|
|
149
|
+
const base = path.resolve(path.dirname(fromFile), spec);
|
|
150
|
+
const candidates = [
|
|
151
|
+
base,
|
|
152
|
+
base + '.py', base + '.ts', base + '.tsx', base + '.js', base + '.jsx',
|
|
153
|
+
path.join(base, '__init__.py'), path.join(base, 'index.ts'), path.join(base, 'index.js')
|
|
154
|
+
];
|
|
155
|
+
for (const c of candidates) {
|
|
156
|
+
if (fs.existsSync(c) && fs.statSync(c).isFile()) return c;
|
|
157
|
+
}
|
|
158
|
+
return null;
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
export function depsScan(rootPath: string): { adj: Map<string, Set<string>>, cycles: string[][] } {
|
|
162
|
+
const files: string[] = [];
|
|
163
|
+
function walk(dir: string) {
|
|
164
|
+
let entries;
|
|
165
|
+
try { entries = fs.readdirSync(dir, { withFileTypes: true }); } catch { return; }
|
|
166
|
+
for (const ent of entries) {
|
|
167
|
+
const full = path.join(dir, ent.name);
|
|
168
|
+
if (ent.isDirectory()) { if (isSkippableDir(ent.name)) continue; walk(full); }
|
|
169
|
+
else if (ent.isFile() && isTextFile(full)) files.push(full);
|
|
170
|
+
}
|
|
171
|
+
}
|
|
172
|
+
walk(rootPath);
|
|
173
|
+
|
|
174
|
+
const adj = new Map<string, Set<string>>();
|
|
175
|
+
for (const f of files) {
|
|
176
|
+
let txt;
|
|
177
|
+
try { txt = fs.readFileSync(f, 'utf8'); } catch { continue; }
|
|
178
|
+
const specs = parseRelativeImports(txt);
|
|
179
|
+
for (const spec of specs) {
|
|
180
|
+
const resolved = resolveImport(f, spec);
|
|
181
|
+
if (!resolved) continue;
|
|
182
|
+
if (!adj.has(f)) adj.set(f, new Set());
|
|
183
|
+
adj.get(f)!.add(resolved);
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
const visited = new Set<string>();
|
|
188
|
+
const stack = new Set<string>();
|
|
189
|
+
const parent = new Map<string, string>();
|
|
190
|
+
const cycles: string[][] = [];
|
|
191
|
+
|
|
192
|
+
function dfs(u: string) {
|
|
193
|
+
visited.add(u);
|
|
194
|
+
stack.add(u);
|
|
195
|
+
const nbrs = adj.get(u);
|
|
196
|
+
if (nbrs) {
|
|
197
|
+
for (const v of nbrs) {
|
|
198
|
+
if (!visited.has(v)) {
|
|
199
|
+
parent.set(v, u);
|
|
200
|
+
dfs(v);
|
|
201
|
+
} else if (stack.has(v)) {
|
|
202
|
+
const cycle = [v];
|
|
203
|
+
let cur: string | undefined = u;
|
|
204
|
+
while (cur && cur !== v && cycle.length < 50) {
|
|
205
|
+
cycle.push(cur);
|
|
206
|
+
cur = parent.get(cur);
|
|
207
|
+
}
|
|
208
|
+
cycle.push(v);
|
|
209
|
+
cycle.reverse();
|
|
210
|
+
cycles.push(cycle);
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
stack.delete(u);
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
for (const f of adj.keys()) {
|
|
218
|
+
if (!visited.has(f)) dfs(f);
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
return { adj, cycles };
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
/**
|
|
225
|
+
* Note: Full DEPS scan requires complex dependency resolution logic.
|
|
226
|
+
* For now, we provide the FAST scan as the baseline for the GitHub App.
|
|
227
|
+
*/
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import { GoogleGenerativeAI } from '@google/generative-ai';
|
|
2
|
+
import * as fs from 'fs-extra';
|
|
3
|
+
import * as path from 'path';
|
|
4
|
+
|
|
5
|
+
export interface DeepAnalysisResult {
|
|
6
|
+
summary: string;
|
|
7
|
+
refactoringGuides: {
|
|
8
|
+
file: string;
|
|
9
|
+
description: string;
|
|
10
|
+
suggestion: string;
|
|
11
|
+
}[];
|
|
12
|
+
techDebtScore: number;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export async function runDeepAnalysis(
|
|
16
|
+
repoDir: string,
|
|
17
|
+
provider: string,
|
|
18
|
+
apiKey: string
|
|
19
|
+
): Promise<DeepAnalysisResult> {
|
|
20
|
+
if (provider !== 'GEMINI') {
|
|
21
|
+
throw new Error(`Provider ${provider} is not yet supported in Deep Analysis.`);
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
const genAI = new GoogleGenerativeAI(apiKey);
|
|
25
|
+
const model = genAI.getGenerativeModel({ model: 'gemini-1.5-flash' });
|
|
26
|
+
|
|
27
|
+
// Gather some context (simplified for MVP)
|
|
28
|
+
const files = await getRelevantFiles(repoDir);
|
|
29
|
+
const codeContext = await Promise.all(files.map(async f => {
|
|
30
|
+
const content = await fs.readFile(f, 'utf8');
|
|
31
|
+
return `File: ${path.relative(repoDir, f)}\nContent:\n${content.substring(0, 1000)}...`;
|
|
32
|
+
}));
|
|
33
|
+
|
|
34
|
+
const prompt = `
|
|
35
|
+
You are an expert Software Architect. Analyze the following project for:
|
|
36
|
+
1. Code smells and anti-patterns.
|
|
37
|
+
2. Specific refactoring suggestions.
|
|
38
|
+
3. Technical debt estimation.
|
|
39
|
+
|
|
40
|
+
Project Context:
|
|
41
|
+
${codeContext.join('\n\n')}
|
|
42
|
+
|
|
43
|
+
Return JSON format:
|
|
44
|
+
{
|
|
45
|
+
"summary": "High level overview",
|
|
46
|
+
"refactoringGuides": [
|
|
47
|
+
{"file": "filename", "description": "why it needs refactoring", "suggestion": "how to refactor"}
|
|
48
|
+
],
|
|
49
|
+
"techDebtScore": 0-100
|
|
50
|
+
}
|
|
51
|
+
`;
|
|
52
|
+
|
|
53
|
+
const result = await model.generateContent(prompt);
|
|
54
|
+
const response = await result.response;
|
|
55
|
+
const text = response.text();
|
|
56
|
+
|
|
57
|
+
try {
|
|
58
|
+
const jsonStr = text.match(/\{[\s\S]*\}/)?.[0] || '{}';
|
|
59
|
+
return JSON.parse(jsonStr);
|
|
60
|
+
} catch (e) {
|
|
61
|
+
return {
|
|
62
|
+
summary: "AI analysis completed but failed to parse structured data.",
|
|
63
|
+
refactoringGuides: [],
|
|
64
|
+
techDebtScore: 50
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
async function getRelevantFiles(dir: string): Promise<string[]> {
|
|
70
|
+
const allFiles: string[] = [];
|
|
71
|
+
const items = await fs.readdir(dir);
|
|
72
|
+
|
|
73
|
+
for (const item of items) {
|
|
74
|
+
if (item === 'node_modules' || item.startsWith('.')) continue;
|
|
75
|
+
const fullPath = path.join(dir, item);
|
|
76
|
+
const stat = await fs.stat(fullPath);
|
|
77
|
+
if (stat.isDirectory()) {
|
|
78
|
+
allFiles.push(...await getRelevantFiles(fullPath));
|
|
79
|
+
} else if (item.endsWith('.py') || item.endsWith('.ts') || item.endsWith('.js')) {
|
|
80
|
+
allFiles.push(fullPath);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
return allFiles.slice(0, 5); // MVP: Limit to 5 files for context
|
|
85
|
+
}
|