@utopia-ai/cli 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/settings.json +1 -0
- package/.claude/settings.local.json +38 -0
- package/bin/utopia.js +20 -0
- package/package.json +46 -0
- package/python/README.md +34 -0
- package/python/instrumenter/instrument.py +1148 -0
- package/python/pyproject.toml +32 -0
- package/python/setup.py +27 -0
- package/python/utopia_runtime/__init__.py +30 -0
- package/python/utopia_runtime/__pycache__/__init__.cpython-313.pyc +0 -0
- package/python/utopia_runtime/__pycache__/client.cpython-313.pyc +0 -0
- package/python/utopia_runtime/__pycache__/probe.cpython-313.pyc +0 -0
- package/python/utopia_runtime/client.py +31 -0
- package/python/utopia_runtime/probe.py +446 -0
- package/python/utopia_runtime.egg-info/PKG-INFO +59 -0
- package/python/utopia_runtime.egg-info/SOURCES.txt +10 -0
- package/python/utopia_runtime.egg-info/dependency_links.txt +1 -0
- package/python/utopia_runtime.egg-info/top_level.txt +1 -0
- package/scripts/publish-npm.sh +14 -0
- package/scripts/publish-pypi.sh +17 -0
- package/src/cli/commands/codex.ts +193 -0
- package/src/cli/commands/context.ts +188 -0
- package/src/cli/commands/destruct.ts +237 -0
- package/src/cli/commands/easter-eggs.ts +203 -0
- package/src/cli/commands/init.ts +505 -0
- package/src/cli/commands/instrument.ts +962 -0
- package/src/cli/commands/mcp.ts +16 -0
- package/src/cli/commands/serve.ts +194 -0
- package/src/cli/commands/status.ts +304 -0
- package/src/cli/commands/validate.ts +328 -0
- package/src/cli/index.ts +37 -0
- package/src/cli/utils/config.ts +54 -0
- package/src/graph/index.ts +687 -0
- package/src/instrumenter/javascript.ts +1798 -0
- package/src/mcp/index.ts +886 -0
- package/src/runtime/js/index.ts +518 -0
- package/src/runtime/js/package-lock.json +30 -0
- package/src/runtime/js/package.json +30 -0
- package/src/runtime/js/tsconfig.json +16 -0
- package/src/server/db/index.ts +26 -0
- package/src/server/db/schema.ts +45 -0
- package/src/server/index.ts +79 -0
- package/src/server/middleware/auth.ts +74 -0
- package/src/server/routes/admin.ts +36 -0
- package/src/server/routes/graph.ts +358 -0
- package/src/server/routes/probes.ts +286 -0
- package/src/types.ts +147 -0
- package/src/utopia-mode/index.ts +206 -0
- package/tsconfig.json +19 -0
|
@@ -0,0 +1,286 @@
|
|
|
1
|
+
import { Router, Request, Response } from 'express';
|
|
2
|
+
import { getDb } from '../db/index.js';
|
|
3
|
+
|
|
4
|
+
const router: Router = Router();
|
|
5
|
+
|
|
6
|
+
interface ProbeRow {
|
|
7
|
+
id: string;
|
|
8
|
+
project_id: string;
|
|
9
|
+
probe_type: string;
|
|
10
|
+
timestamp: string;
|
|
11
|
+
file: string;
|
|
12
|
+
line: number;
|
|
13
|
+
function_name: string;
|
|
14
|
+
data: string;
|
|
15
|
+
metadata: string;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
interface ProbeResponse {
|
|
19
|
+
id: string;
|
|
20
|
+
projectId: string;
|
|
21
|
+
probeType: string;
|
|
22
|
+
timestamp: string;
|
|
23
|
+
file: string;
|
|
24
|
+
line: number;
|
|
25
|
+
functionName: string;
|
|
26
|
+
data: Record<string, unknown>;
|
|
27
|
+
metadata: Record<string, unknown>;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
function rowToProbe(row: ProbeRow): ProbeResponse {
|
|
31
|
+
return {
|
|
32
|
+
id: row.id,
|
|
33
|
+
projectId: row.project_id,
|
|
34
|
+
probeType: row.probe_type,
|
|
35
|
+
timestamp: row.timestamp,
|
|
36
|
+
file: row.file,
|
|
37
|
+
line: row.line,
|
|
38
|
+
functionName: row.function_name,
|
|
39
|
+
data: JSON.parse(row.data),
|
|
40
|
+
metadata: JSON.parse(row.metadata),
|
|
41
|
+
};
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
const VALID_PROBE_TYPES = new Set(['error', 'database', 'api', 'infra', 'function']);
|
|
45
|
+
|
|
46
|
+
function validateProbe(probe: Record<string, unknown>): string | null {
|
|
47
|
+
if (!probe.id || typeof probe.id !== 'string') return 'Missing or invalid "id"';
|
|
48
|
+
if (!probe.project_id && !probe.projectId) return 'Missing "project_id"';
|
|
49
|
+
if (!probe.probe_type && !probe.probeType) return 'Missing "probe_type"';
|
|
50
|
+
const probeType = (probe.probe_type || probe.probeType) as string;
|
|
51
|
+
if (!VALID_PROBE_TYPES.has(probeType)) {
|
|
52
|
+
return `Invalid probe_type "${probeType}". Must be one of: error, database, api, infra, function`;
|
|
53
|
+
}
|
|
54
|
+
if (!probe.file || typeof probe.file !== 'string') return 'Missing or invalid "file"';
|
|
55
|
+
if (probe.line === undefined || probe.line === null || typeof probe.line !== 'number') {
|
|
56
|
+
return 'Missing or invalid "line"';
|
|
57
|
+
}
|
|
58
|
+
return null;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
// POST / - Ingest probe data (single or array)
|
|
62
|
+
router.post('/', (req: Request, res: Response) => {
|
|
63
|
+
const db = getDb();
|
|
64
|
+
const body = req.body;
|
|
65
|
+
|
|
66
|
+
const probes: Record<string, unknown>[] = Array.isArray(body) ? body : [body];
|
|
67
|
+
|
|
68
|
+
if (probes.length === 0) {
|
|
69
|
+
res.status(400).json({ error: 'Request body must contain probe data' });
|
|
70
|
+
return;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const errors: Array<{ index: number; error: string }> = [];
|
|
74
|
+
|
|
75
|
+
const insertStmt = db.prepare(`
|
|
76
|
+
INSERT OR REPLACE INTO probes (id, project_id, probe_type, timestamp, file, line, function_name, data, metadata)
|
|
77
|
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
78
|
+
`);
|
|
79
|
+
|
|
80
|
+
const insertMany = db.transaction((probeList: Record<string, unknown>[]) => {
|
|
81
|
+
let inserted = 0;
|
|
82
|
+
for (let i = 0; i < probeList.length; i++) {
|
|
83
|
+
const probe = probeList[i];
|
|
84
|
+
const validationError = validateProbe(probe);
|
|
85
|
+
if (validationError) {
|
|
86
|
+
errors.push({ index: i, error: validationError });
|
|
87
|
+
continue;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
const projectId = (probe.project_id || probe.projectId) as string;
|
|
91
|
+
const probeType = (probe.probe_type || probe.probeType) as string;
|
|
92
|
+
const timestamp = (probe.timestamp as string) || new Date().toISOString();
|
|
93
|
+
const functionName = (probe.function_name || probe.functionName || '') as string;
|
|
94
|
+
|
|
95
|
+
insertStmt.run(
|
|
96
|
+
probe.id,
|
|
97
|
+
projectId,
|
|
98
|
+
probeType,
|
|
99
|
+
timestamp,
|
|
100
|
+
probe.file,
|
|
101
|
+
probe.line,
|
|
102
|
+
functionName,
|
|
103
|
+
JSON.stringify(probe.data ?? {}),
|
|
104
|
+
JSON.stringify(probe.metadata ?? {}),
|
|
105
|
+
);
|
|
106
|
+
inserted++;
|
|
107
|
+
}
|
|
108
|
+
return inserted;
|
|
109
|
+
});
|
|
110
|
+
|
|
111
|
+
const inserted = insertMany(probes);
|
|
112
|
+
|
|
113
|
+
if (errors.length > 0 && inserted === 0) {
|
|
114
|
+
res.status(400).json({ error: 'All probes failed validation', details: errors });
|
|
115
|
+
return;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
res.status(201).json({
|
|
119
|
+
inserted,
|
|
120
|
+
errors: errors.length > 0 ? errors : undefined,
|
|
121
|
+
});
|
|
122
|
+
});
|
|
123
|
+
|
|
124
|
+
// GET /errors/recent - Get recent errors
|
|
125
|
+
// Must be defined BEFORE /:id to avoid matching "errors" as an id
|
|
126
|
+
router.get('/errors/recent', (req: Request, res: Response) => {
|
|
127
|
+
const db = getDb();
|
|
128
|
+
const hours = parseInt(req.query.hours as string, 10) || 24;
|
|
129
|
+
const limit = Math.min(parseInt(req.query.limit as string, 10) || 100, 1000);
|
|
130
|
+
|
|
131
|
+
const since = new Date(Date.now() - hours * 60 * 60 * 1000).toISOString();
|
|
132
|
+
|
|
133
|
+
const rows = db.prepare(`
|
|
134
|
+
SELECT * FROM probes
|
|
135
|
+
WHERE probe_type = 'error' AND timestamp >= ?
|
|
136
|
+
ORDER BY timestamp DESC
|
|
137
|
+
LIMIT ?
|
|
138
|
+
`).all(since, limit) as ProbeRow[];
|
|
139
|
+
|
|
140
|
+
res.json({
|
|
141
|
+
count: rows.length,
|
|
142
|
+
probes: rows.map(rowToProbe),
|
|
143
|
+
});
|
|
144
|
+
});
|
|
145
|
+
|
|
146
|
+
// GET /context - Smart context retrieval
|
|
147
|
+
// Must be defined BEFORE /:id to avoid matching "context" as an id
|
|
148
|
+
router.get('/context', (req: Request, res: Response) => {
|
|
149
|
+
const db = getDb();
|
|
150
|
+
const prompt = req.query.prompt as string;
|
|
151
|
+
|
|
152
|
+
if (!prompt) {
|
|
153
|
+
res.status(400).json({ error: 'Missing "prompt" query parameter' });
|
|
154
|
+
return;
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
const limit = Math.min(parseInt(req.query.limit as string, 10) || 20, 100);
|
|
158
|
+
|
|
159
|
+
// Extract keywords: split on whitespace/non-alphanumeric, filter stopwords and short tokens
|
|
160
|
+
const stopWords = new Set([
|
|
161
|
+
'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for',
|
|
162
|
+
'of', 'with', 'by', 'is', 'it', 'this', 'that', 'are', 'was', 'were',
|
|
163
|
+
'be', 'been', 'has', 'have', 'had', 'do', 'does', 'did', 'will', 'would',
|
|
164
|
+
'could', 'should', 'may', 'might', 'can', 'not', 'no', 'what', 'how',
|
|
165
|
+
'why', 'when', 'where', 'which', 'who', 'whom', 'from', 'into', 'than',
|
|
166
|
+
'then', 'there', 'here', 'just', 'also', 'very', 'too', 'some', 'any',
|
|
167
|
+
'all', 'each', 'every', 'both', 'few', 'more', 'most', 'other', 'about',
|
|
168
|
+
]);
|
|
169
|
+
|
|
170
|
+
const keywords = prompt
|
|
171
|
+
.toLowerCase()
|
|
172
|
+
.split(/[^a-z0-9_.-]+/)
|
|
173
|
+
.filter(w => w.length >= 2 && !stopWords.has(w));
|
|
174
|
+
|
|
175
|
+
if (keywords.length === 0) {
|
|
176
|
+
res.json({ count: 0, probes: [], keywords: [] });
|
|
177
|
+
return;
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
// Build WHERE clause: match any keyword against file, function_name, or data
|
|
181
|
+
const conditions = keywords.map(() =>
|
|
182
|
+
'(LOWER(file) LIKE ? OR LOWER(function_name) LIKE ? OR LOWER(data) LIKE ?)'
|
|
183
|
+
);
|
|
184
|
+
|
|
185
|
+
const whereParams: string[] = [];
|
|
186
|
+
for (const kw of keywords) {
|
|
187
|
+
const pattern = `%${kw}%`;
|
|
188
|
+
whereParams.push(pattern, pattern, pattern);
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
// Build relevance scoring: count how many keyword/column matches
|
|
192
|
+
const relevanceExpr = keywords.map(() =>
|
|
193
|
+
'(CASE WHEN LOWER(file) LIKE ? THEN 1 ELSE 0 END + ' +
|
|
194
|
+
'CASE WHEN LOWER(function_name) LIKE ? THEN 1 ELSE 0 END + ' +
|
|
195
|
+
'CASE WHEN LOWER(data) LIKE ? THEN 1 ELSE 0 END)'
|
|
196
|
+
).join(' + ');
|
|
197
|
+
|
|
198
|
+
const relevanceParams: string[] = [];
|
|
199
|
+
for (const kw of keywords) {
|
|
200
|
+
const pattern = `%${kw}%`;
|
|
201
|
+
relevanceParams.push(pattern, pattern, pattern);
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
const sql = `
|
|
205
|
+
SELECT *, (${relevanceExpr}) as relevance
|
|
206
|
+
FROM probes
|
|
207
|
+
WHERE ${conditions.join(' OR ')}
|
|
208
|
+
ORDER BY relevance DESC, timestamp DESC
|
|
209
|
+
LIMIT ?
|
|
210
|
+
`;
|
|
211
|
+
|
|
212
|
+
const allParams = [...relevanceParams, ...whereParams, limit];
|
|
213
|
+
const rows = db.prepare(sql).all(...allParams) as (ProbeRow & { relevance: number })[];
|
|
214
|
+
|
|
215
|
+
res.json({
|
|
216
|
+
count: rows.length,
|
|
217
|
+
keywords,
|
|
218
|
+
probes: rows.map(rowToProbe),
|
|
219
|
+
});
|
|
220
|
+
});
|
|
221
|
+
|
|
222
|
+
// GET /:id - Get single probe by ID
|
|
223
|
+
router.get('/:id', (req: Request, res: Response) => {
|
|
224
|
+
const db = getDb();
|
|
225
|
+
const row = db.prepare('SELECT * FROM probes WHERE id = ?').get(req.params.id) as ProbeRow | undefined;
|
|
226
|
+
|
|
227
|
+
if (!row) {
|
|
228
|
+
res.status(404).json({ error: 'Probe not found' });
|
|
229
|
+
return;
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
res.json(rowToProbe(row));
|
|
233
|
+
});
|
|
234
|
+
|
|
235
|
+
// GET / - Query probes with filters
|
|
236
|
+
router.get('/', (req: Request, res: Response) => {
|
|
237
|
+
const db = getDb();
|
|
238
|
+
|
|
239
|
+
const conditions: string[] = [];
|
|
240
|
+
const params: (string | number)[] = [];
|
|
241
|
+
|
|
242
|
+
if (req.query.probe_type) {
|
|
243
|
+
conditions.push('probe_type = ?');
|
|
244
|
+
params.push(req.query.probe_type as string);
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
if (req.query.file) {
|
|
248
|
+
conditions.push('file = ?');
|
|
249
|
+
params.push(req.query.file as string);
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
if (req.query.function_name) {
|
|
253
|
+
conditions.push('function_name = ?');
|
|
254
|
+
params.push(req.query.function_name as string);
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
if (req.query.project_id) {
|
|
258
|
+
conditions.push('project_id = ?');
|
|
259
|
+
params.push(req.query.project_id as string);
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
if (req.query.since) {
|
|
263
|
+
conditions.push('timestamp >= ?');
|
|
264
|
+
params.push(req.query.since as string);
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
if (req.query.until) {
|
|
268
|
+
conditions.push('timestamp <= ?');
|
|
269
|
+
params.push(req.query.until as string);
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
const limit = Math.min(parseInt(req.query.limit as string, 10) || 100, 1000);
|
|
273
|
+
|
|
274
|
+
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : '';
|
|
275
|
+
const sql = `SELECT * FROM probes ${whereClause} ORDER BY timestamp DESC LIMIT ?`;
|
|
276
|
+
params.push(limit);
|
|
277
|
+
|
|
278
|
+
const rows = db.prepare(sql).all(...params) as ProbeRow[];
|
|
279
|
+
|
|
280
|
+
res.json({
|
|
281
|
+
count: rows.length,
|
|
282
|
+
probes: rows.map(rowToProbe),
|
|
283
|
+
});
|
|
284
|
+
});
|
|
285
|
+
|
|
286
|
+
export default router;
|
package/src/types.ts
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
// Utopia shared types
|
|
2
|
+
|
|
3
|
+
export type CloudProvider = 'aws' | 'gcp' | 'vercel' | 'azure' | 'other';
|
|
4
|
+
export type DeploymentMethod = 'manual' | 'github-actions' | 'vercel-trigger' | 'other';
|
|
5
|
+
export type Language = 'javascript' | 'typescript' | 'python';
|
|
6
|
+
export type Framework = 'react' | 'nextjs' | 'express' | 'fastapi' | 'flask' | 'django' | 'other';
|
|
7
|
+
|
|
8
|
+
export interface UtopiaConfig {
|
|
9
|
+
version: string;
|
|
10
|
+
projectId: string;
|
|
11
|
+
cloudProvider: CloudProvider;
|
|
12
|
+
service: string;
|
|
13
|
+
deploymentMethod: DeploymentMethod;
|
|
14
|
+
isStandalone: boolean;
|
|
15
|
+
dataEndpoint: string;
|
|
16
|
+
language: Language[];
|
|
17
|
+
framework: Framework;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
export type ProbeType = 'error' | 'database' | 'api' | 'infra' | 'function';
|
|
21
|
+
|
|
22
|
+
export interface ProbeData {
|
|
23
|
+
id: string;
|
|
24
|
+
projectId: string;
|
|
25
|
+
probeType: ProbeType;
|
|
26
|
+
timestamp: string;
|
|
27
|
+
file: string;
|
|
28
|
+
line: number;
|
|
29
|
+
functionName: string;
|
|
30
|
+
data: Record<string, unknown>;
|
|
31
|
+
metadata: ProbeMetadata;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
export interface ProbeMetadata {
|
|
35
|
+
runtime: 'node' | 'python';
|
|
36
|
+
environment?: string;
|
|
37
|
+
hostname?: string;
|
|
38
|
+
pid?: number;
|
|
39
|
+
version?: string;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
export interface ErrorProbeData extends ProbeData {
|
|
43
|
+
probeType: 'error';
|
|
44
|
+
data: {
|
|
45
|
+
errorType: string;
|
|
46
|
+
message: string;
|
|
47
|
+
stack: string;
|
|
48
|
+
inputData: Record<string, unknown>;
|
|
49
|
+
codeLine: string;
|
|
50
|
+
};
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
export interface DatabaseProbeData extends ProbeData {
|
|
54
|
+
probeType: 'database';
|
|
55
|
+
data: {
|
|
56
|
+
operation: string;
|
|
57
|
+
query?: string;
|
|
58
|
+
table?: string;
|
|
59
|
+
duration: number;
|
|
60
|
+
rowCount?: number;
|
|
61
|
+
connectionInfo: {
|
|
62
|
+
type: string;
|
|
63
|
+
host?: string;
|
|
64
|
+
database?: string;
|
|
65
|
+
};
|
|
66
|
+
params?: unknown[];
|
|
67
|
+
};
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
export interface ApiProbeData extends ProbeData {
|
|
71
|
+
probeType: 'api';
|
|
72
|
+
data: {
|
|
73
|
+
method: string;
|
|
74
|
+
url: string;
|
|
75
|
+
statusCode?: number;
|
|
76
|
+
duration: number;
|
|
77
|
+
requestHeaders?: Record<string, string>;
|
|
78
|
+
responseHeaders?: Record<string, string>;
|
|
79
|
+
requestBody?: unknown;
|
|
80
|
+
responseBody?: unknown;
|
|
81
|
+
error?: string;
|
|
82
|
+
};
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
export interface InfraProbeData extends ProbeData {
|
|
86
|
+
probeType: 'infra';
|
|
87
|
+
data: {
|
|
88
|
+
provider: CloudProvider;
|
|
89
|
+
region?: string;
|
|
90
|
+
serviceType?: string;
|
|
91
|
+
instanceId?: string;
|
|
92
|
+
containerInfo?: {
|
|
93
|
+
containerId?: string;
|
|
94
|
+
image?: string;
|
|
95
|
+
};
|
|
96
|
+
envVars: Record<string, string>;
|
|
97
|
+
memoryUsage: number;
|
|
98
|
+
cpuUsage?: number;
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
export interface FunctionProbeData extends ProbeData {
|
|
103
|
+
probeType: 'function';
|
|
104
|
+
data: {
|
|
105
|
+
args: unknown[];
|
|
106
|
+
returnValue?: unknown;
|
|
107
|
+
duration: number;
|
|
108
|
+
llmContext?: string; // Utopia mode: LLM-generated context
|
|
109
|
+
callStack: string[];
|
|
110
|
+
};
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
// Impact Graph types
|
|
114
|
+
export interface GraphNode {
|
|
115
|
+
id: string;
|
|
116
|
+
type: 'function' | 'service' | 'database' | 'api' | 'file';
|
|
117
|
+
name: string;
|
|
118
|
+
file?: string;
|
|
119
|
+
metadata: Record<string, unknown>;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
export interface GraphEdge {
|
|
123
|
+
source: string;
|
|
124
|
+
target: string;
|
|
125
|
+
type: 'calls' | 'queries' | 'serves' | 'depends_on';
|
|
126
|
+
weight: number;
|
|
127
|
+
lastSeen: string;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
export interface ImpactGraph {
|
|
131
|
+
nodes: GraphNode[];
|
|
132
|
+
edges: GraphEdge[];
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
// MCP tool types
|
|
136
|
+
export interface ContextQuery {
|
|
137
|
+
prompt: string;
|
|
138
|
+
file?: string;
|
|
139
|
+
probeTypes?: ProbeType[];
|
|
140
|
+
limit?: number;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
export interface ContextResult {
|
|
144
|
+
relevantProbes: ProbeData[];
|
|
145
|
+
impactedNodes: GraphNode[];
|
|
146
|
+
summary: string;
|
|
147
|
+
}
|
|
@@ -0,0 +1,206 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Utopia Mode — LLM-Enhanced Probes
|
|
3
|
+
*
|
|
4
|
+
* At runtime, probes capture function data and send it (async, non-blocking)
|
|
5
|
+
* to an LLM that generates rich semantic context about what the code is doing.
|
|
6
|
+
* This context is then stored in the data service for AI coding agents to query.
|
|
7
|
+
*
|
|
8
|
+
* The LLM call happens server-side (in the data service) to avoid adding
|
|
9
|
+
* the Anthropic SDK as a dependency of the probe runtime.
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
import Anthropic from '@anthropic-ai/sdk';
|
|
13
|
+
|
|
14
|
+
interface LlmContextRequest {
|
|
15
|
+
file: string;
|
|
16
|
+
line: number;
|
|
17
|
+
functionName: string;
|
|
18
|
+
args: unknown[];
|
|
19
|
+
returnValue?: unknown;
|
|
20
|
+
duration: number;
|
|
21
|
+
probeType: string;
|
|
22
|
+
additionalContext?: Record<string, unknown>;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
interface LlmContextResult {
|
|
26
|
+
summary: string;
|
|
27
|
+
behavior: string;
|
|
28
|
+
dataFlow: string;
|
|
29
|
+
sideEffects: string[];
|
|
30
|
+
dependencies: string[];
|
|
31
|
+
risks: string[];
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
let client: Anthropic | null = null;
|
|
35
|
+
let processingQueue: LlmContextRequest[] = [];
|
|
36
|
+
let isProcessing = false;
|
|
37
|
+
const BATCH_SIZE = 5;
|
|
38
|
+
const FLUSH_INTERVAL = 10_000; // 10 seconds
|
|
39
|
+
let flushTimer: ReturnType<typeof setInterval> | null = null;
|
|
40
|
+
|
|
41
|
+
function getClient(): Anthropic | null {
|
|
42
|
+
if (client) return client;
|
|
43
|
+
const apiKey = process.env.ANTHROPIC_API_KEY || process.env.UTOPIA_LLM_API_KEY;
|
|
44
|
+
if (!apiKey) return null;
|
|
45
|
+
client = new Anthropic({ apiKey });
|
|
46
|
+
return client;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
function startFlushTimer(): void {
|
|
50
|
+
if (flushTimer) return;
|
|
51
|
+
flushTimer = setInterval(() => {
|
|
52
|
+
processQueue().catch(() => {});
|
|
53
|
+
}, FLUSH_INTERVAL);
|
|
54
|
+
if (flushTimer.unref) flushTimer.unref();
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Queue a function's runtime data for LLM context generation.
|
|
59
|
+
* Called by the data service when it receives a function probe or llm_context probe.
|
|
60
|
+
*/
|
|
61
|
+
export function queueForLlmContext(request: LlmContextRequest): void {
|
|
62
|
+
processingQueue.push(request);
|
|
63
|
+
startFlushTimer();
|
|
64
|
+
|
|
65
|
+
if (processingQueue.length >= BATCH_SIZE) {
|
|
66
|
+
processQueue().catch(() => {});
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* Process queued requests — generate LLM context for each and return results.
|
|
72
|
+
*/
|
|
73
|
+
async function processQueue(): Promise<LlmContextResult[]> {
|
|
74
|
+
if (isProcessing || processingQueue.length === 0) return [];
|
|
75
|
+
isProcessing = true;
|
|
76
|
+
|
|
77
|
+
const batch = processingQueue.splice(0, BATCH_SIZE);
|
|
78
|
+
const results: LlmContextResult[] = [];
|
|
79
|
+
|
|
80
|
+
try {
|
|
81
|
+
const anthropic = getClient();
|
|
82
|
+
if (!anthropic) {
|
|
83
|
+
isProcessing = false;
|
|
84
|
+
return [];
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
// Process each request concurrently but with a concurrency limit
|
|
88
|
+
const promises = batch.map(req => generateContext(anthropic, req));
|
|
89
|
+
const settled = await Promise.allSettled(promises);
|
|
90
|
+
|
|
91
|
+
for (const result of settled) {
|
|
92
|
+
if (result.status === 'fulfilled' && result.value) {
|
|
93
|
+
results.push(result.value);
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
} catch {
|
|
97
|
+
// Silently fail — never impact the host application
|
|
98
|
+
} finally {
|
|
99
|
+
isProcessing = false;
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
return results;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
/**
|
|
106
|
+
* Generate semantic context for a single function invocation using Claude.
|
|
107
|
+
*/
|
|
108
|
+
async function generateContext(
|
|
109
|
+
anthropic: Anthropic,
|
|
110
|
+
request: LlmContextRequest
|
|
111
|
+
): Promise<LlmContextResult | null> {
|
|
112
|
+
try {
|
|
113
|
+
const prompt = buildPrompt(request);
|
|
114
|
+
|
|
115
|
+
const message = await anthropic.messages.create({
|
|
116
|
+
model: 'claude-haiku-4-5-20251001',
|
|
117
|
+
max_tokens: 512,
|
|
118
|
+
messages: [{ role: 'user', content: prompt }],
|
|
119
|
+
system: `You are analyzing runtime data from a production function invocation. Respond ONLY with valid JSON matching this schema:
|
|
120
|
+
{
|
|
121
|
+
"summary": "one-line description of what this function call did",
|
|
122
|
+
"behavior": "description of the function's runtime behavior based on inputs/outputs",
|
|
123
|
+
"dataFlow": "how data flows through this function (inputs → transformations → outputs)",
|
|
124
|
+
"sideEffects": ["list of side effects: DB writes, API calls, file I/O, etc."],
|
|
125
|
+
"dependencies": ["list of external dependencies this function relies on"],
|
|
126
|
+
"risks": ["potential issues: N+1 queries, slow calls, error-prone patterns"]
|
|
127
|
+
}`,
|
|
128
|
+
});
|
|
129
|
+
|
|
130
|
+
const content = message.content[0];
|
|
131
|
+
if (content.type !== 'text') return null;
|
|
132
|
+
|
|
133
|
+
const parsed = JSON.parse(content.text) as LlmContextResult;
|
|
134
|
+
return parsed;
|
|
135
|
+
} catch {
|
|
136
|
+
return null;
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
function buildPrompt(request: LlmContextRequest): string {
|
|
141
|
+
const argsSummary = truncate(JSON.stringify(request.args), 500);
|
|
142
|
+
const returnSummary = truncate(JSON.stringify(request.returnValue), 500);
|
|
143
|
+
|
|
144
|
+
return `Analyze this production function invocation:
|
|
145
|
+
|
|
146
|
+
File: ${request.file}
|
|
147
|
+
Function: ${request.functionName}
|
|
148
|
+
Line: ${request.line}
|
|
149
|
+
Duration: ${request.duration}ms
|
|
150
|
+
Arguments: ${argsSummary}
|
|
151
|
+
Return Value: ${returnSummary}
|
|
152
|
+
${request.additionalContext ? `Additional Context: ${JSON.stringify(request.additionalContext)}` : ''}
|
|
153
|
+
|
|
154
|
+
Based on the function name, file path, arguments, return value, and duration, generate a semantic context summary.`;
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
function truncate(str: string, maxLen: number): string {
|
|
158
|
+
if (str.length <= maxLen) return str;
|
|
159
|
+
return str.substring(0, maxLen) + '...';
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
/**
|
|
163
|
+
* Process a probe from the data service and generate LLM context.
|
|
164
|
+
* Returns the context to be stored alongside the probe.
|
|
165
|
+
*/
|
|
166
|
+
export async function processProbeForLlmContext(probe: {
|
|
167
|
+
file: string;
|
|
168
|
+
line: number;
|
|
169
|
+
function_name: string;
|
|
170
|
+
probe_type: string;
|
|
171
|
+
data: string;
|
|
172
|
+
}): Promise<LlmContextResult | null> {
|
|
173
|
+
const anthropic = getClient();
|
|
174
|
+
if (!anthropic) return null;
|
|
175
|
+
|
|
176
|
+
let parsedData: Record<string, unknown> = {};
|
|
177
|
+
try {
|
|
178
|
+
parsedData = JSON.parse(probe.data);
|
|
179
|
+
} catch {
|
|
180
|
+
return null;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
return generateContext(anthropic, {
|
|
184
|
+
file: probe.file,
|
|
185
|
+
line: probe.line,
|
|
186
|
+
functionName: probe.function_name,
|
|
187
|
+
args: (parsedData.args as unknown[]) || [],
|
|
188
|
+
returnValue: parsedData.returnValue,
|
|
189
|
+
duration: (parsedData.duration as number) || 0,
|
|
190
|
+
probeType: probe.probe_type,
|
|
191
|
+
additionalContext: parsedData,
|
|
192
|
+
});
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
/**
|
|
196
|
+
* Shutdown: flush remaining queue.
|
|
197
|
+
*/
|
|
198
|
+
export async function shutdown(): Promise<void> {
|
|
199
|
+
if (flushTimer) {
|
|
200
|
+
clearInterval(flushTimer);
|
|
201
|
+
flushTimer = null;
|
|
202
|
+
}
|
|
203
|
+
await processQueue();
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
export type { LlmContextRequest, LlmContextResult };
|
package/tsconfig.json
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
{
|
|
2
|
+
"compilerOptions": {
|
|
3
|
+
"target": "ES2022",
|
|
4
|
+
"module": "ESNext",
|
|
5
|
+
"moduleResolution": "bundler",
|
|
6
|
+
"outDir": "./dist",
|
|
7
|
+
"rootDir": "./src",
|
|
8
|
+
"strict": true,
|
|
9
|
+
"esModuleInterop": true,
|
|
10
|
+
"skipLibCheck": true,
|
|
11
|
+
"forceConsistentCasingInFileNames": true,
|
|
12
|
+
"resolveJsonModule": true,
|
|
13
|
+
"declaration": true,
|
|
14
|
+
"declarationMap": true,
|
|
15
|
+
"sourceMap": true
|
|
16
|
+
},
|
|
17
|
+
"include": ["src/**/*"],
|
|
18
|
+
"exclude": ["node_modules", "dist"]
|
|
19
|
+
}
|