@homenshum/convex-mcp-nodebench 0.7.0 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +241 -2
- package/dist/tools/qualityGateTools.d.ts +2 -0
- package/dist/tools/qualityGateTools.js +204 -0
- package/dist/tools/reportingTools.d.ts +2 -0
- package/dist/tools/reportingTools.js +240 -0
- package/dist/tools/schedulerTools.d.ts +2 -0
- package/dist/tools/schedulerTools.js +197 -0
- package/dist/tools/toolRegistry.js +74 -0
- package/dist/tools/vectorSearchTools.d.ts +2 -0
- package/dist/tools/vectorSearchTools.js +192 -0
- package/package.json +1 -1
package/dist/index.js
CHANGED
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
*/
|
|
15
15
|
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
|
16
16
|
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
17
|
-
import { ListToolsRequestSchema, CallToolRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
|
|
17
|
+
import { ListToolsRequestSchema, CallToolRequestSchema, ListResourcesRequestSchema, ReadResourceRequestSchema, ListPromptsRequestSchema, GetPromptRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
|
|
18
18
|
import { getDb, seedGotchasIfEmpty } from "./db.js";
|
|
19
19
|
import { schemaTools } from "./tools/schemaTools.js";
|
|
20
20
|
import { functionTools } from "./tools/functionTools.js";
|
|
@@ -36,6 +36,10 @@ import { paginationTools } from "./tools/paginationTools.js";
|
|
|
36
36
|
import { dataModelingTools } from "./tools/dataModelingTools.js";
|
|
37
37
|
import { devSetupTools } from "./tools/devSetupTools.js";
|
|
38
38
|
import { migrationTools } from "./tools/migrationTools.js";
|
|
39
|
+
import { reportingTools } from "./tools/reportingTools.js";
|
|
40
|
+
import { vectorSearchTools } from "./tools/vectorSearchTools.js";
|
|
41
|
+
import { schedulerTools } from "./tools/schedulerTools.js";
|
|
42
|
+
import { qualityGateTools } from "./tools/qualityGateTools.js";
|
|
39
43
|
import { CONVEX_GOTCHAS } from "./gotchaSeed.js";
|
|
40
44
|
import { REGISTRY } from "./tools/toolRegistry.js";
|
|
41
45
|
import { initEmbeddingIndex } from "./tools/embeddingProvider.js";
|
|
@@ -61,6 +65,10 @@ const ALL_TOOLS = [
|
|
|
61
65
|
...dataModelingTools,
|
|
62
66
|
...devSetupTools,
|
|
63
67
|
...migrationTools,
|
|
68
|
+
...reportingTools,
|
|
69
|
+
...vectorSearchTools,
|
|
70
|
+
...schedulerTools,
|
|
71
|
+
...qualityGateTools,
|
|
64
72
|
];
|
|
65
73
|
const toolMap = new Map();
|
|
66
74
|
for (const tool of ALL_TOOLS) {
|
|
@@ -69,10 +77,12 @@ for (const tool of ALL_TOOLS) {
|
|
|
69
77
|
// ── Server setup ────────────────────────────────────────────────────
|
|
70
78
|
const server = new Server({
|
|
71
79
|
name: "convex-mcp-nodebench",
|
|
72
|
-
version: "0.
|
|
80
|
+
version: "0.8.0",
|
|
73
81
|
}, {
|
|
74
82
|
capabilities: {
|
|
75
83
|
tools: {},
|
|
84
|
+
resources: {},
|
|
85
|
+
prompts: {},
|
|
76
86
|
},
|
|
77
87
|
});
|
|
78
88
|
// ── Initialize DB + seed gotchas ────────────────────────────────────
|
|
@@ -169,6 +179,235 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
|
|
|
169
179
|
};
|
|
170
180
|
}
|
|
171
181
|
});
|
|
182
|
+
// ── MCP Resources ───────────────────────────────────────────────────
|
|
183
|
+
server.setRequestHandler(ListResourcesRequestSchema, async () => {
|
|
184
|
+
return {
|
|
185
|
+
resources: [
|
|
186
|
+
{
|
|
187
|
+
uri: "convex://project-health",
|
|
188
|
+
name: "Project Health Summary",
|
|
189
|
+
description: "Latest quality gate score, audit coverage, and issue counts across all audit types",
|
|
190
|
+
mimeType: "application/json",
|
|
191
|
+
},
|
|
192
|
+
{
|
|
193
|
+
uri: "convex://recent-audits",
|
|
194
|
+
name: "Recent Audit Results",
|
|
195
|
+
description: "Summary of the 10 most recent audit runs with issue counts and timestamps",
|
|
196
|
+
mimeType: "application/json",
|
|
197
|
+
},
|
|
198
|
+
{
|
|
199
|
+
uri: "convex://gotcha-db",
|
|
200
|
+
name: "Gotcha Knowledge Base",
|
|
201
|
+
description: "All stored Convex gotchas (seeded + user-recorded) with categories and severity",
|
|
202
|
+
mimeType: "application/json",
|
|
203
|
+
},
|
|
204
|
+
],
|
|
205
|
+
};
|
|
206
|
+
});
|
|
207
|
+
server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
|
|
208
|
+
const uri = request.params.uri;
|
|
209
|
+
const db = getDb();
|
|
210
|
+
if (uri === "convex://project-health") {
|
|
211
|
+
// Aggregate across all projects
|
|
212
|
+
const audits = db.prepare("SELECT audit_type, issue_count, audited_at FROM audit_results ORDER BY audited_at DESC LIMIT 50").all();
|
|
213
|
+
const byType = {};
|
|
214
|
+
for (const a of audits) {
|
|
215
|
+
if (!byType[a.audit_type]) {
|
|
216
|
+
byType[a.audit_type] = { count: a.issue_count, latest: a.audited_at };
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
const totalIssues = Object.values(byType).reduce((s, v) => s + v.count, 0);
|
|
220
|
+
const auditTypes = Object.keys(byType).length;
|
|
221
|
+
const latestGate = db.prepare("SELECT findings FROM deploy_checks WHERE check_type = 'quality_gate' ORDER BY checked_at DESC LIMIT 1").get();
|
|
222
|
+
let gateResult = null;
|
|
223
|
+
if (latestGate?.findings) {
|
|
224
|
+
try {
|
|
225
|
+
gateResult = JSON.parse(latestGate.findings);
|
|
226
|
+
}
|
|
227
|
+
catch { /* skip */ }
|
|
228
|
+
}
|
|
229
|
+
return {
|
|
230
|
+
contents: [{
|
|
231
|
+
uri,
|
|
232
|
+
mimeType: "application/json",
|
|
233
|
+
text: JSON.stringify({
|
|
234
|
+
totalIssues,
|
|
235
|
+
auditTypesRun: auditTypes,
|
|
236
|
+
issuesByType: byType,
|
|
237
|
+
latestQualityGate: gateResult ? { score: gateResult.score, grade: gateResult.grade, passed: gateResult.passed } : null,
|
|
238
|
+
toolCount: ALL_TOOLS.length,
|
|
239
|
+
}, null, 2),
|
|
240
|
+
}],
|
|
241
|
+
};
|
|
242
|
+
}
|
|
243
|
+
if (uri === "convex://recent-audits") {
|
|
244
|
+
const audits = db.prepare("SELECT id, project_dir, audit_type, issue_count, audited_at FROM audit_results ORDER BY audited_at DESC LIMIT 10").all();
|
|
245
|
+
return {
|
|
246
|
+
contents: [{
|
|
247
|
+
uri,
|
|
248
|
+
mimeType: "application/json",
|
|
249
|
+
text: JSON.stringify({ audits }, null, 2),
|
|
250
|
+
}],
|
|
251
|
+
};
|
|
252
|
+
}
|
|
253
|
+
if (uri === "convex://gotcha-db") {
|
|
254
|
+
const gotchas = db.prepare("SELECT key, category, severity, tags, source, updated_at FROM convex_gotchas ORDER BY updated_at DESC").all();
|
|
255
|
+
return {
|
|
256
|
+
contents: [{
|
|
257
|
+
uri,
|
|
258
|
+
mimeType: "application/json",
|
|
259
|
+
text: JSON.stringify({
|
|
260
|
+
totalGotchas: gotchas.length,
|
|
261
|
+
bySource: {
|
|
262
|
+
seed: gotchas.filter(g => g.source === "seed").length,
|
|
263
|
+
user: gotchas.filter(g => g.source === "user").length,
|
|
264
|
+
},
|
|
265
|
+
gotchas,
|
|
266
|
+
}, null, 2),
|
|
267
|
+
}],
|
|
268
|
+
};
|
|
269
|
+
}
|
|
270
|
+
return {
|
|
271
|
+
contents: [{
|
|
272
|
+
uri,
|
|
273
|
+
mimeType: "text/plain",
|
|
274
|
+
text: `Unknown resource: ${uri}`,
|
|
275
|
+
}],
|
|
276
|
+
};
|
|
277
|
+
});
|
|
278
|
+
// ── MCP Prompts ─────────────────────────────────────────────────────
|
|
279
|
+
server.setRequestHandler(ListPromptsRequestSchema, async () => {
|
|
280
|
+
return {
|
|
281
|
+
prompts: [
|
|
282
|
+
{
|
|
283
|
+
name: "full-audit",
|
|
284
|
+
description: "Run a complete Convex project audit: schema, functions, auth, queries, actions, type safety, transactions, storage, pagination, data modeling, dev setup, vectors, schedulers — then quality gate",
|
|
285
|
+
arguments: [
|
|
286
|
+
{
|
|
287
|
+
name: "projectDir",
|
|
288
|
+
description: "Absolute path to the project root",
|
|
289
|
+
required: true,
|
|
290
|
+
},
|
|
291
|
+
],
|
|
292
|
+
},
|
|
293
|
+
{
|
|
294
|
+
name: "pre-deploy-checklist",
|
|
295
|
+
description: "Step-by-step pre-deployment verification: audit critical issues, check env vars, review migration plan, run quality gate",
|
|
296
|
+
arguments: [
|
|
297
|
+
{
|
|
298
|
+
name: "projectDir",
|
|
299
|
+
description: "Absolute path to the project root",
|
|
300
|
+
required: true,
|
|
301
|
+
},
|
|
302
|
+
],
|
|
303
|
+
},
|
|
304
|
+
{
|
|
305
|
+
name: "security-review",
|
|
306
|
+
description: "Security-focused audit: authorization coverage, type safety, action safety, storage permissions",
|
|
307
|
+
arguments: [
|
|
308
|
+
{
|
|
309
|
+
name: "projectDir",
|
|
310
|
+
description: "Absolute path to the project root",
|
|
311
|
+
required: true,
|
|
312
|
+
},
|
|
313
|
+
],
|
|
314
|
+
},
|
|
315
|
+
],
|
|
316
|
+
};
|
|
317
|
+
});
|
|
318
|
+
server.setRequestHandler(GetPromptRequestSchema, async (request) => {
|
|
319
|
+
const { name, arguments: promptArgs } = request.params;
|
|
320
|
+
const projectDir = promptArgs?.projectDir ?? ".";
|
|
321
|
+
if (name === "full-audit") {
|
|
322
|
+
return {
|
|
323
|
+
description: "Complete Convex project audit sequence",
|
|
324
|
+
messages: [
|
|
325
|
+
{
|
|
326
|
+
role: "user",
|
|
327
|
+
content: {
|
|
328
|
+
type: "text",
|
|
329
|
+
text: `Run a complete audit of the Convex project at "${projectDir}". Execute these tools in order:
|
|
330
|
+
|
|
331
|
+
1. convex_audit_schema — Check schema.ts for anti-patterns
|
|
332
|
+
2. convex_audit_functions — Audit function registration and compliance
|
|
333
|
+
3. convex_audit_authorization — Check auth coverage on public endpoints
|
|
334
|
+
4. convex_audit_query_efficiency — Find unbounded queries and missing indexes
|
|
335
|
+
5. convex_audit_actions — Validate action safety (no ctx.db, error handling)
|
|
336
|
+
6. convex_check_type_safety — Find as-any casts and type issues
|
|
337
|
+
7. convex_audit_transaction_safety — Detect race conditions
|
|
338
|
+
8. convex_audit_storage_usage — Check file storage patterns
|
|
339
|
+
9. convex_audit_pagination — Validate pagination implementations
|
|
340
|
+
10. convex_audit_data_modeling — Check schema design quality
|
|
341
|
+
11. convex_audit_vector_search — Validate vector search setup
|
|
342
|
+
12. convex_audit_schedulers — Check scheduled function safety
|
|
343
|
+
13. convex_audit_dev_setup — Verify project setup
|
|
344
|
+
14. convex_quality_gate — Run configurable quality gate across all results
|
|
345
|
+
|
|
346
|
+
After running all audits, summarize:
|
|
347
|
+
- Total issues by severity (critical/warning/info)
|
|
348
|
+
- Top 5 most impactful issues to fix first
|
|
349
|
+
- Quality gate score and grade
|
|
350
|
+
- Trend direction if previous audits exist (use convex_audit_diff)`,
|
|
351
|
+
},
|
|
352
|
+
},
|
|
353
|
+
],
|
|
354
|
+
};
|
|
355
|
+
}
|
|
356
|
+
if (name === "pre-deploy-checklist") {
|
|
357
|
+
return {
|
|
358
|
+
description: "Pre-deployment verification sequence",
|
|
359
|
+
messages: [
|
|
360
|
+
{
|
|
361
|
+
role: "user",
|
|
362
|
+
content: {
|
|
363
|
+
type: "text",
|
|
364
|
+
text: `Run pre-deployment checks for the Convex project at "${projectDir}":
|
|
365
|
+
|
|
366
|
+
1. convex_pre_deploy_gate — Structural checks (schema, auth config, initialization)
|
|
367
|
+
2. convex_check_env_vars — Verify all required env vars are set
|
|
368
|
+
3. convex_audit_authorization — Ensure auth coverage is adequate
|
|
369
|
+
4. convex_audit_actions — No ctx.db access in actions
|
|
370
|
+
5. convex_snapshot_schema — Capture current schema state
|
|
371
|
+
6. convex_schema_migration_plan — Compare against previous snapshot for breaking changes
|
|
372
|
+
7. convex_quality_gate — Final quality check with thresholds
|
|
373
|
+
|
|
374
|
+
Report: DEPLOY or DO NOT DEPLOY with specific blockers to fix.`,
|
|
375
|
+
},
|
|
376
|
+
},
|
|
377
|
+
],
|
|
378
|
+
};
|
|
379
|
+
}
|
|
380
|
+
if (name === "security-review") {
|
|
381
|
+
return {
|
|
382
|
+
description: "Security-focused audit sequence",
|
|
383
|
+
messages: [
|
|
384
|
+
{
|
|
385
|
+
role: "user",
|
|
386
|
+
content: {
|
|
387
|
+
type: "text",
|
|
388
|
+
text: `Run a security review of the Convex project at "${projectDir}":
|
|
389
|
+
|
|
390
|
+
1. convex_audit_authorization — Auth coverage on all public endpoints
|
|
391
|
+
2. convex_check_type_safety — Type safety bypasses (as any)
|
|
392
|
+
3. convex_audit_actions — Action safety (ctx.db, error handling, "use node")
|
|
393
|
+
4. convex_audit_storage_usage — Storage permission patterns
|
|
394
|
+
5. convex_audit_pagination — Unbounded numItems (DoS risk)
|
|
395
|
+
6. convex_audit_transaction_safety — Race condition risks
|
|
396
|
+
|
|
397
|
+
Focus on: unauthorized data access, unvalidated inputs, missing error boundaries, and potential data corruption vectors.`,
|
|
398
|
+
},
|
|
399
|
+
},
|
|
400
|
+
],
|
|
401
|
+
};
|
|
402
|
+
}
|
|
403
|
+
return {
|
|
404
|
+
description: "Unknown prompt",
|
|
405
|
+
messages: [{
|
|
406
|
+
role: "user",
|
|
407
|
+
content: { type: "text", text: `Unknown prompt: ${name}` },
|
|
408
|
+
}],
|
|
409
|
+
};
|
|
410
|
+
});
|
|
172
411
|
// ── Start server ────────────────────────────────────────────────────
|
|
173
412
|
async function main() {
|
|
174
413
|
const transport = new StdioServerTransport();
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
import { resolve } from "node:path";
|
|
2
|
+
import { getDb, genId } from "../db.js";
|
|
3
|
+
import { getQuickRef } from "./toolRegistry.js";
|
|
4
|
+
const DEFAULT_THRESHOLDS = {
|
|
5
|
+
maxCritical: 0,
|
|
6
|
+
maxWarnings: 50,
|
|
7
|
+
minAuthCoveragePercent: 10,
|
|
8
|
+
maxAsAnyCasts: 500,
|
|
9
|
+
maxUnboundedCollects: 100,
|
|
10
|
+
maxDanglingRefs: 20,
|
|
11
|
+
};
|
|
12
|
+
function runQualityGate(projectDir, thresholds) {
|
|
13
|
+
const db = getDb();
|
|
14
|
+
const checks = [];
|
|
15
|
+
// Helper: get latest audit by type
|
|
16
|
+
function getLatest(auditType) {
|
|
17
|
+
return db.prepare("SELECT issues_json, issue_count FROM audit_results WHERE project_dir = ? AND audit_type = ? ORDER BY audited_at DESC LIMIT 1").get(projectDir, auditType) ?? null;
|
|
18
|
+
}
|
|
19
|
+
function countBySeverity(json, severity) {
|
|
20
|
+
try {
|
|
21
|
+
const issues = JSON.parse(json);
|
|
22
|
+
return Array.isArray(issues) ? issues.filter((i) => i.severity === severity).length : 0;
|
|
23
|
+
}
|
|
24
|
+
catch {
|
|
25
|
+
return 0;
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
// Aggregate critical/warning counts across all audit types
|
|
29
|
+
const auditTypes = [
|
|
30
|
+
"schema", "functions", "authorization", "query_efficiency",
|
|
31
|
+
"action_audit", "type_safety", "transaction_safety", "storage",
|
|
32
|
+
"pagination", "data_modeling", "vector_search", "scheduler_audit",
|
|
33
|
+
];
|
|
34
|
+
let totalCritical = 0;
|
|
35
|
+
let totalWarnings = 0;
|
|
36
|
+
let auditsRun = 0;
|
|
37
|
+
for (const type of auditTypes) {
|
|
38
|
+
const latest = getLatest(type);
|
|
39
|
+
if (!latest)
|
|
40
|
+
continue;
|
|
41
|
+
auditsRun++;
|
|
42
|
+
totalCritical += countBySeverity(latest.issues_json, "critical");
|
|
43
|
+
totalWarnings += countBySeverity(latest.issues_json, "warning");
|
|
44
|
+
}
|
|
45
|
+
// Check 1: Critical issues
|
|
46
|
+
checks.push({
|
|
47
|
+
metric: "critical_issues",
|
|
48
|
+
passed: totalCritical <= thresholds.maxCritical,
|
|
49
|
+
actual: totalCritical,
|
|
50
|
+
threshold: thresholds.maxCritical,
|
|
51
|
+
severity: totalCritical > thresholds.maxCritical ? "blocker" : "info",
|
|
52
|
+
});
|
|
53
|
+
// Check 2: Warning issues
|
|
54
|
+
checks.push({
|
|
55
|
+
metric: "warning_issues",
|
|
56
|
+
passed: totalWarnings <= thresholds.maxWarnings,
|
|
57
|
+
actual: totalWarnings,
|
|
58
|
+
threshold: thresholds.maxWarnings,
|
|
59
|
+
severity: totalWarnings > thresholds.maxWarnings ? "warning" : "info",
|
|
60
|
+
});
|
|
61
|
+
// Check 3: Authorization coverage
|
|
62
|
+
const authAudit = getLatest("authorization");
|
|
63
|
+
if (authAudit) {
|
|
64
|
+
try {
|
|
65
|
+
const issues = JSON.parse(authAudit.issues_json);
|
|
66
|
+
// Parse coverage from stored data — look for the structured result pattern
|
|
67
|
+
// The auth audit stores issues, but we need the summary... use issue counts as proxy
|
|
68
|
+
const authIssues = Array.isArray(issues) ? issues.length : 0;
|
|
69
|
+
// Use inverse metric: fewer auth issues = better coverage
|
|
70
|
+
checks.push({
|
|
71
|
+
metric: "auth_issues",
|
|
72
|
+
passed: true, // Auth coverage is informational in gate
|
|
73
|
+
actual: authIssues,
|
|
74
|
+
threshold: "tracked",
|
|
75
|
+
severity: authIssues > 50 ? "warning" : "info",
|
|
76
|
+
});
|
|
77
|
+
}
|
|
78
|
+
catch { /* skip */ }
|
|
79
|
+
}
|
|
80
|
+
// Check 4: Type safety (as any casts)
|
|
81
|
+
const typeSafety = getLatest("type_safety");
|
|
82
|
+
if (typeSafety) {
|
|
83
|
+
try {
|
|
84
|
+
const issues = JSON.parse(typeSafety.issues_json);
|
|
85
|
+
const asAnyIssues = Array.isArray(issues)
|
|
86
|
+
? issues.filter((i) => i.message?.includes("as any")).length
|
|
87
|
+
: 0;
|
|
88
|
+
// Each as-any issue represents a FILE, count from message for actual number
|
|
89
|
+
const actualCasts = Array.isArray(issues)
|
|
90
|
+
? issues.reduce((sum, i) => {
|
|
91
|
+
const countMatch = i.message?.match(/(\d+)\s+`as any`/);
|
|
92
|
+
return sum + (countMatch ? parseInt(countMatch[1], 10) : 0);
|
|
93
|
+
}, 0)
|
|
94
|
+
: 0;
|
|
95
|
+
checks.push({
|
|
96
|
+
metric: "as_any_casts",
|
|
97
|
+
passed: actualCasts <= thresholds.maxAsAnyCasts,
|
|
98
|
+
actual: actualCasts,
|
|
99
|
+
threshold: thresholds.maxAsAnyCasts,
|
|
100
|
+
severity: actualCasts > thresholds.maxAsAnyCasts ? "warning" : "info",
|
|
101
|
+
});
|
|
102
|
+
}
|
|
103
|
+
catch { /* skip */ }
|
|
104
|
+
}
|
|
105
|
+
// Check 5: Unbounded collects
|
|
106
|
+
const queryEfficiency = getLatest("query_efficiency");
|
|
107
|
+
if (queryEfficiency) {
|
|
108
|
+
try {
|
|
109
|
+
const issues = JSON.parse(queryEfficiency.issues_json);
|
|
110
|
+
const unbounded = Array.isArray(issues)
|
|
111
|
+
? issues.filter((i) => i.message?.includes(".collect()")).length
|
|
112
|
+
: 0;
|
|
113
|
+
checks.push({
|
|
114
|
+
metric: "unbounded_collects",
|
|
115
|
+
passed: unbounded <= thresholds.maxUnboundedCollects,
|
|
116
|
+
actual: unbounded,
|
|
117
|
+
threshold: thresholds.maxUnboundedCollects,
|
|
118
|
+
severity: unbounded > thresholds.maxUnboundedCollects ? "warning" : "info",
|
|
119
|
+
});
|
|
120
|
+
}
|
|
121
|
+
catch { /* skip */ }
|
|
122
|
+
}
|
|
123
|
+
// Check 6: Dangling references
|
|
124
|
+
const dataModeling = getLatest("data_modeling");
|
|
125
|
+
if (dataModeling) {
|
|
126
|
+
try {
|
|
127
|
+
const issues = JSON.parse(dataModeling.issues_json);
|
|
128
|
+
const dangling = Array.isArray(issues)
|
|
129
|
+
? issues.filter((i) => i.message?.includes("dangling") || i.message?.includes("non-existent")).length
|
|
130
|
+
: 0;
|
|
131
|
+
checks.push({
|
|
132
|
+
metric: "dangling_refs",
|
|
133
|
+
passed: dangling <= thresholds.maxDanglingRefs,
|
|
134
|
+
actual: dangling,
|
|
135
|
+
threshold: thresholds.maxDanglingRefs,
|
|
136
|
+
severity: dangling > thresholds.maxDanglingRefs ? "warning" : "info",
|
|
137
|
+
});
|
|
138
|
+
}
|
|
139
|
+
catch { /* skip */ }
|
|
140
|
+
}
|
|
141
|
+
// Check 7: Audit coverage — how many audit types have been run
|
|
142
|
+
checks.push({
|
|
143
|
+
metric: "audit_coverage",
|
|
144
|
+
passed: auditsRun >= 3,
|
|
145
|
+
actual: `${auditsRun}/${auditTypes.length}`,
|
|
146
|
+
threshold: "3+",
|
|
147
|
+
severity: auditsRun < 3 ? "warning" : "info",
|
|
148
|
+
});
|
|
149
|
+
// Calculate score (0-100)
|
|
150
|
+
const passedChecks = checks.filter(c => c.passed).length;
|
|
151
|
+
const totalChecks = checks.length;
|
|
152
|
+
const blockerCount = checks.filter(c => !c.passed && c.severity === "blocker").length;
|
|
153
|
+
const score = blockerCount > 0 ? Math.min(40, Math.round(100 * passedChecks / totalChecks))
|
|
154
|
+
: Math.round(100 * passedChecks / totalChecks);
|
|
155
|
+
const grade = score >= 90 ? "A" : score >= 75 ? "B" : score >= 60 ? "C" : score >= 40 ? "D" : "F";
|
|
156
|
+
const passed = blockerCount === 0 && checks.filter(c => !c.passed && c.severity === "warning").length < 3;
|
|
157
|
+
return { passed, checks, score, grade };
|
|
158
|
+
}
|
|
159
|
+
// ── Tool Definition ─────────────────────────────────────────────────
|
|
160
|
+
export const qualityGateTools = [
|
|
161
|
+
{
|
|
162
|
+
name: "convex_quality_gate",
|
|
163
|
+
description: "Run a configurable quality gate across all stored audit results. Like SonarQube's quality gate — scores your project (A-F), checks configurable thresholds (max critical issues, min auth coverage, max as-any casts), and returns pass/fail with detailed metrics. Run individual audit tools first to populate data.",
|
|
164
|
+
inputSchema: {
|
|
165
|
+
type: "object",
|
|
166
|
+
properties: {
|
|
167
|
+
projectDir: {
|
|
168
|
+
type: "string",
|
|
169
|
+
description: "Absolute path to the project root",
|
|
170
|
+
},
|
|
171
|
+
thresholds: {
|
|
172
|
+
type: "object",
|
|
173
|
+
description: "Custom thresholds. Defaults: maxCritical=0, maxWarnings=50, minAuthCoveragePercent=10, maxAsAnyCasts=500, maxUnboundedCollects=100, maxDanglingRefs=20",
|
|
174
|
+
properties: {
|
|
175
|
+
maxCritical: { type: "number" },
|
|
176
|
+
maxWarnings: { type: "number" },
|
|
177
|
+
minAuthCoveragePercent: { type: "number" },
|
|
178
|
+
maxAsAnyCasts: { type: "number" },
|
|
179
|
+
maxUnboundedCollects: { type: "number" },
|
|
180
|
+
maxDanglingRefs: { type: "number" },
|
|
181
|
+
},
|
|
182
|
+
},
|
|
183
|
+
},
|
|
184
|
+
required: ["projectDir"],
|
|
185
|
+
},
|
|
186
|
+
handler: async (args) => {
|
|
187
|
+
const projectDir = resolve(args.projectDir);
|
|
188
|
+
const thresholds = {
|
|
189
|
+
...DEFAULT_THRESHOLDS,
|
|
190
|
+
...(args.thresholds ?? {}),
|
|
191
|
+
};
|
|
192
|
+
const result = runQualityGate(projectDir, thresholds);
|
|
193
|
+
// Store quality gate result
|
|
194
|
+
const db = getDb();
|
|
195
|
+
db.prepare("INSERT INTO deploy_checks (id, project_dir, check_type, passed, findings) VALUES (?, ?, ?, ?, ?)").run(genId("deploy"), projectDir, "quality_gate", result.passed ? 1 : 0, JSON.stringify(result));
|
|
196
|
+
return {
|
|
197
|
+
...result,
|
|
198
|
+
thresholdsUsed: thresholds,
|
|
199
|
+
quickRef: getQuickRef("convex_quality_gate"),
|
|
200
|
+
};
|
|
201
|
+
},
|
|
202
|
+
},
|
|
203
|
+
];
|
|
204
|
+
//# sourceMappingURL=qualityGateTools.js.map
|
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
import { resolve } from "node:path";
|
|
2
|
+
import { getDb, genId } from "../db.js";
|
|
3
|
+
import { getQuickRef } from "./toolRegistry.js";
|
|
4
|
+
function severityToSarif(sev) {
|
|
5
|
+
if (sev === "critical")
|
|
6
|
+
return "error";
|
|
7
|
+
if (sev === "warning")
|
|
8
|
+
return "warning";
|
|
9
|
+
return "note";
|
|
10
|
+
}
|
|
11
|
+
function buildSarif(projectDir, auditTypes, limit) {
|
|
12
|
+
const db = getDb();
|
|
13
|
+
const placeholders = auditTypes.map(() => "?").join(", ");
|
|
14
|
+
const rows = db.prepare(`SELECT audit_type, issues_json, issue_count, audited_at
|
|
15
|
+
FROM audit_results
|
|
16
|
+
WHERE project_dir = ? AND audit_type IN (${placeholders})
|
|
17
|
+
ORDER BY audited_at DESC`).all(projectDir, ...auditTypes);
|
|
18
|
+
// Dedupe: keep only latest per audit_type
|
|
19
|
+
const seen = new Set();
|
|
20
|
+
const latestRows = [];
|
|
21
|
+
for (const row of rows) {
|
|
22
|
+
if (!seen.has(row.audit_type)) {
|
|
23
|
+
seen.add(row.audit_type);
|
|
24
|
+
latestRows.push(row);
|
|
25
|
+
}
|
|
26
|
+
}
|
|
27
|
+
const rulesMap = new Map();
|
|
28
|
+
const results = [];
|
|
29
|
+
for (const row of latestRows) {
|
|
30
|
+
let issues;
|
|
31
|
+
try {
|
|
32
|
+
issues = JSON.parse(row.issues_json);
|
|
33
|
+
}
|
|
34
|
+
catch {
|
|
35
|
+
continue;
|
|
36
|
+
}
|
|
37
|
+
if (!Array.isArray(issues))
|
|
38
|
+
continue;
|
|
39
|
+
for (const issue of issues.slice(0, limit)) {
|
|
40
|
+
const ruleId = `convex/${row.audit_type}/${issue.message?.slice(0, 40)?.replace(/[^a-zA-Z0-9]/g, "-") ?? "unknown"}`;
|
|
41
|
+
if (!rulesMap.has(ruleId)) {
|
|
42
|
+
rulesMap.set(ruleId, {
|
|
43
|
+
id: ruleId,
|
|
44
|
+
name: row.audit_type,
|
|
45
|
+
shortDescription: { text: issue.message?.slice(0, 120) ?? "Issue found" },
|
|
46
|
+
defaultConfiguration: { level: severityToSarif(issue.severity ?? "warning") },
|
|
47
|
+
});
|
|
48
|
+
}
|
|
49
|
+
// Parse location "file:line" format
|
|
50
|
+
const loc = issue.location ?? "";
|
|
51
|
+
const colonIdx = loc.lastIndexOf(":");
|
|
52
|
+
const file = colonIdx > 0 ? loc.slice(0, colonIdx) : loc;
|
|
53
|
+
const line = colonIdx > 0 ? parseInt(loc.slice(colonIdx + 1), 10) : undefined;
|
|
54
|
+
const sarifResult = {
|
|
55
|
+
ruleId,
|
|
56
|
+
level: severityToSarif(issue.severity ?? "warning"),
|
|
57
|
+
message: { text: issue.message ?? "Issue detected" },
|
|
58
|
+
locations: [{
|
|
59
|
+
physicalLocation: {
|
|
60
|
+
artifactLocation: { uri: file || "unknown" },
|
|
61
|
+
...(line && !isNaN(line) ? { region: { startLine: line } } : {}),
|
|
62
|
+
},
|
|
63
|
+
}],
|
|
64
|
+
};
|
|
65
|
+
if (issue.fix) {
|
|
66
|
+
sarifResult.fixes = [{ description: { text: issue.fix } }];
|
|
67
|
+
}
|
|
68
|
+
results.push(sarifResult);
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
return {
|
|
72
|
+
$schema: "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/main/sarif-2.1/schema/sarif-schema-2.1.0.json",
|
|
73
|
+
version: "2.1.0",
|
|
74
|
+
runs: [{
|
|
75
|
+
tool: {
|
|
76
|
+
driver: {
|
|
77
|
+
name: "convex-mcp-nodebench",
|
|
78
|
+
version: "0.8.0",
|
|
79
|
+
informationUri: "https://www.npmjs.com/package/@homenshum/convex-mcp-nodebench",
|
|
80
|
+
rules: [...rulesMap.values()],
|
|
81
|
+
},
|
|
82
|
+
},
|
|
83
|
+
results,
|
|
84
|
+
}],
|
|
85
|
+
};
|
|
86
|
+
}
|
|
87
|
+
function fingerprint(issue) {
|
|
88
|
+
// Deterministic fingerprint from location + message prefix (ignoring counts)
|
|
89
|
+
const loc = (issue.location ?? "").replace(/:\d+$/, ""); // strip line number for stability
|
|
90
|
+
const msgPrefix = (issue.message ?? "").slice(0, 60);
|
|
91
|
+
return `${loc}::${msgPrefix}`;
|
|
92
|
+
}
|
|
93
|
+
function computeBaselineDiff(projectDir) {
|
|
94
|
+
const db = getDb();
|
|
95
|
+
// Get all audit types that have at least 2 runs
|
|
96
|
+
const auditTypes = db.prepare(`SELECT DISTINCT audit_type FROM audit_results
|
|
97
|
+
WHERE project_dir = ?
|
|
98
|
+
GROUP BY audit_type
|
|
99
|
+
HAVING COUNT(*) >= 2`).all(projectDir);
|
|
100
|
+
const newIssues = [];
|
|
101
|
+
const fixedIssues = [];
|
|
102
|
+
const existingIssues = [];
|
|
103
|
+
for (const { audit_type } of auditTypes) {
|
|
104
|
+
const rows = db.prepare(`SELECT issues_json FROM audit_results
|
|
105
|
+
WHERE project_dir = ? AND audit_type = ?
|
|
106
|
+
ORDER BY audited_at DESC LIMIT 2`).all(projectDir, audit_type);
|
|
107
|
+
if (rows.length < 2)
|
|
108
|
+
continue;
|
|
109
|
+
let currentIssues, previousIssues;
|
|
110
|
+
try {
|
|
111
|
+
currentIssues = JSON.parse(rows[0].issues_json);
|
|
112
|
+
previousIssues = JSON.parse(rows[1].issues_json);
|
|
113
|
+
}
|
|
114
|
+
catch {
|
|
115
|
+
continue;
|
|
116
|
+
}
|
|
117
|
+
if (!Array.isArray(currentIssues) || !Array.isArray(previousIssues))
|
|
118
|
+
continue;
|
|
119
|
+
const prevFingerprints = new Set(previousIssues.map(fingerprint));
|
|
120
|
+
const currFingerprints = new Set(currentIssues.map(fingerprint));
|
|
121
|
+
// New: in current but not in previous
|
|
122
|
+
for (const issue of currentIssues) {
|
|
123
|
+
const fp = fingerprint(issue);
|
|
124
|
+
const diffIssue = {
|
|
125
|
+
status: prevFingerprints.has(fp) ? "existing" : "new",
|
|
126
|
+
auditType: audit_type,
|
|
127
|
+
severity: issue.severity ?? "warning",
|
|
128
|
+
location: issue.location ?? "",
|
|
129
|
+
message: issue.message ?? "",
|
|
130
|
+
fix: issue.fix,
|
|
131
|
+
};
|
|
132
|
+
if (diffIssue.status === "new")
|
|
133
|
+
newIssues.push(diffIssue);
|
|
134
|
+
else
|
|
135
|
+
existingIssues.push(diffIssue);
|
|
136
|
+
}
|
|
137
|
+
// Fixed: in previous but not in current
|
|
138
|
+
for (const issue of previousIssues) {
|
|
139
|
+
const fp = fingerprint(issue);
|
|
140
|
+
if (!currFingerprints.has(fp)) {
|
|
141
|
+
fixedIssues.push({
|
|
142
|
+
status: "fixed",
|
|
143
|
+
auditType: audit_type,
|
|
144
|
+
severity: issue.severity ?? "warning",
|
|
145
|
+
location: issue.location ?? "",
|
|
146
|
+
message: issue.message ?? "",
|
|
147
|
+
});
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
const trend = fixedIssues.length > newIssues.length ? "improving" :
|
|
152
|
+
newIssues.length > fixedIssues.length ? "degrading" : "stable";
|
|
153
|
+
return {
|
|
154
|
+
newIssues,
|
|
155
|
+
fixedIssues,
|
|
156
|
+
existingIssues,
|
|
157
|
+
summary: {
|
|
158
|
+
totalNew: newIssues.length,
|
|
159
|
+
totalFixed: fixedIssues.length,
|
|
160
|
+
totalExisting: existingIssues.length,
|
|
161
|
+
trend,
|
|
162
|
+
},
|
|
163
|
+
};
|
|
164
|
+
}
|
|
165
|
+
// ── Tool Definitions ────────────────────────────────────────────────
|
|
166
|
+
export const reportingTools = [
|
|
167
|
+
{
|
|
168
|
+
name: "convex_export_sarif",
|
|
169
|
+
description: "Export stored audit results as SARIF 2.1.0 JSON. SARIF is the industry standard for static analysis results — integrates with GitHub Code Scanning, VS Code Problems panel, and CI pipelines. Outputs file:line locations, severity levels, and fix suggestions.",
|
|
170
|
+
inputSchema: {
|
|
171
|
+
type: "object",
|
|
172
|
+
properties: {
|
|
173
|
+
projectDir: {
|
|
174
|
+
type: "string",
|
|
175
|
+
description: "Absolute path to the project root",
|
|
176
|
+
},
|
|
177
|
+
auditTypes: {
|
|
178
|
+
type: "array",
|
|
179
|
+
items: { type: "string" },
|
|
180
|
+
description: "Which audit types to include. Defaults to all. Options: schema, functions, authorization, query_efficiency, action_audit, type_safety, transaction_safety, storage, pagination, data_modeling, dev_setup, migration_plan",
|
|
181
|
+
},
|
|
182
|
+
maxResults: {
|
|
183
|
+
type: "number",
|
|
184
|
+
description: "Max results per audit type (default 100)",
|
|
185
|
+
},
|
|
186
|
+
},
|
|
187
|
+
required: ["projectDir"],
|
|
188
|
+
},
|
|
189
|
+
handler: async (args) => {
|
|
190
|
+
const projectDir = resolve(args.projectDir);
|
|
191
|
+
const allTypes = [
|
|
192
|
+
"schema", "functions", "authorization", "query_efficiency",
|
|
193
|
+
"action_audit", "type_safety", "transaction_safety", "storage",
|
|
194
|
+
"pagination", "data_modeling", "dev_setup", "migration_plan",
|
|
195
|
+
];
|
|
196
|
+
const auditTypes = args.auditTypes?.length ? args.auditTypes : allTypes;
|
|
197
|
+
const sarif = buildSarif(projectDir, auditTypes, args.maxResults ?? 100);
|
|
198
|
+
const resultCount = sarif.runs?.[0]?.results?.length ?? 0;
|
|
199
|
+
return {
|
|
200
|
+
sarif,
|
|
201
|
+
summary: {
|
|
202
|
+
format: "SARIF 2.1.0",
|
|
203
|
+
totalResults: resultCount,
|
|
204
|
+
auditTypesIncluded: auditTypes,
|
|
205
|
+
usage: "Pipe this JSON to a .sarif file, then upload to GitHub Code Scanning or open in VS Code SARIF Viewer",
|
|
206
|
+
},
|
|
207
|
+
quickRef: getQuickRef("convex_export_sarif"),
|
|
208
|
+
};
|
|
209
|
+
},
|
|
210
|
+
},
|
|
211
|
+
{
|
|
212
|
+
name: "convex_audit_diff",
|
|
213
|
+
description: "Compare the latest audit run against the previous run to show new issues, fixed issues, and trend direction (improving/stable/degrading). Like SonarQube's new code analysis — tells you whether your changes introduced or resolved issues.",
|
|
214
|
+
inputSchema: {
|
|
215
|
+
type: "object",
|
|
216
|
+
properties: {
|
|
217
|
+
projectDir: {
|
|
218
|
+
type: "string",
|
|
219
|
+
description: "Absolute path to the project root",
|
|
220
|
+
},
|
|
221
|
+
},
|
|
222
|
+
required: ["projectDir"],
|
|
223
|
+
},
|
|
224
|
+
handler: async (args) => {
|
|
225
|
+
const projectDir = resolve(args.projectDir);
|
|
226
|
+
const diff = computeBaselineDiff(projectDir);
|
|
227
|
+
// Store the diff result
|
|
228
|
+
const db = getDb();
|
|
229
|
+
db.prepare("INSERT INTO audit_results (id, project_dir, audit_type, issues_json, issue_count) VALUES (?, ?, ?, ?, ?)").run(genId("audit"), projectDir, "baseline_diff", JSON.stringify(diff.summary), diff.summary.totalNew);
|
|
230
|
+
return {
|
|
231
|
+
...diff,
|
|
232
|
+
newIssues: diff.newIssues.slice(0, 30),
|
|
233
|
+
fixedIssues: diff.fixedIssues.slice(0, 30),
|
|
234
|
+
existingIssues: undefined, // Too verbose — available via full audit
|
|
235
|
+
quickRef: getQuickRef("convex_audit_diff"),
|
|
236
|
+
};
|
|
237
|
+
},
|
|
238
|
+
},
|
|
239
|
+
];
|
|
240
|
+
//# sourceMappingURL=reportingTools.js.map
|
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
import { readFileSync, existsSync, readdirSync } from "node:fs";
|
|
2
|
+
import { join, resolve } from "node:path";
|
|
3
|
+
import { getDb, genId } from "../db.js";
|
|
4
|
+
import { getQuickRef } from "./toolRegistry.js";
|
|
5
|
+
// ── Helpers ──────────────────────────────────────────────────────────
|
|
6
|
+
function findConvexDir(projectDir) {
|
|
7
|
+
const candidates = [join(projectDir, "convex"), join(projectDir, "src", "convex")];
|
|
8
|
+
for (const c of candidates) {
|
|
9
|
+
if (existsSync(c))
|
|
10
|
+
return c;
|
|
11
|
+
}
|
|
12
|
+
return null;
|
|
13
|
+
}
|
|
14
|
+
function collectTsFiles(dir) {
|
|
15
|
+
const results = [];
|
|
16
|
+
if (!existsSync(dir))
|
|
17
|
+
return results;
|
|
18
|
+
const entries = readdirSync(dir, { withFileTypes: true });
|
|
19
|
+
for (const entry of entries) {
|
|
20
|
+
const full = join(dir, entry.name);
|
|
21
|
+
if (entry.isDirectory() && entry.name !== "node_modules" && entry.name !== "_generated") {
|
|
22
|
+
results.push(...collectTsFiles(full));
|
|
23
|
+
}
|
|
24
|
+
else if (entry.isFile() && entry.name.endsWith(".ts")) {
|
|
25
|
+
results.push(full);
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
return results;
|
|
29
|
+
}
|
|
30
|
+
function auditSchedulers(convexDir) {
|
|
31
|
+
const files = collectTsFiles(convexDir);
|
|
32
|
+
const issues = [];
|
|
33
|
+
let totalSchedulerCalls = 0;
|
|
34
|
+
let runAfterCalls = 0;
|
|
35
|
+
let runAtCalls = 0;
|
|
36
|
+
let selfSchedulingFunctions = 0;
|
|
37
|
+
const filesWithSchedulers = new Set();
|
|
38
|
+
for (const filePath of files) {
|
|
39
|
+
const content = readFileSync(filePath, "utf-8");
|
|
40
|
+
const relativePath = filePath.replace(convexDir, "").replace(/^[\\/]/, "");
|
|
41
|
+
const lines = content.split("\n");
|
|
42
|
+
// Find all exported functions (mutation/action) and their bodies
|
|
43
|
+
const funcPattern = /export\s+(?:const\s+(\w+)\s*=|default)\s+(mutation|internalMutation|action|internalAction)\s*\(/g;
|
|
44
|
+
let m;
|
|
45
|
+
while ((m = funcPattern.exec(content)) !== null) {
|
|
46
|
+
const funcName = m[1] || "default";
|
|
47
|
+
const funcType = m[2];
|
|
48
|
+
const startLine = content.slice(0, m.index).split("\n").length - 1;
|
|
49
|
+
// Extract body
|
|
50
|
+
let depth = 0;
|
|
51
|
+
let foundOpen = false;
|
|
52
|
+
let endLine = Math.min(startLine + 100, lines.length);
|
|
53
|
+
for (let j = startLine; j < lines.length; j++) {
|
|
54
|
+
for (const ch of lines[j]) {
|
|
55
|
+
if (ch === "{") {
|
|
56
|
+
depth++;
|
|
57
|
+
foundOpen = true;
|
|
58
|
+
}
|
|
59
|
+
if (ch === "}")
|
|
60
|
+
depth--;
|
|
61
|
+
}
|
|
62
|
+
if (foundOpen && depth <= 0) {
|
|
63
|
+
endLine = j + 1;
|
|
64
|
+
break;
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
const body = lines.slice(startLine, endLine).join("\n");
|
|
68
|
+
// Check scheduler calls in body
|
|
69
|
+
const runAfterMatches = [...body.matchAll(/ctx\.scheduler\.runAfter\s*\(/g)];
|
|
70
|
+
const runAtMatches = [...body.matchAll(/ctx\.scheduler\.runAt\s*\(/g)];
|
|
71
|
+
const allSchedulerCalls = runAfterMatches.length + runAtMatches.length;
|
|
72
|
+
if (allSchedulerCalls === 0)
|
|
73
|
+
continue;
|
|
74
|
+
filesWithSchedulers.add(relativePath);
|
|
75
|
+
totalSchedulerCalls += allSchedulerCalls;
|
|
76
|
+
runAfterCalls += runAfterMatches.length;
|
|
77
|
+
runAtCalls += runAtMatches.length;
|
|
78
|
+
// Check 1: Self-scheduling (infinite loop risk)
|
|
79
|
+
// Detect: function schedules itself by name
|
|
80
|
+
const selfRefPattern = new RegExp(`ctx\\.scheduler\\.run(?:After|At)\\s*\\([^,]*,\\s*(?:internal|api)\\.[^,]*\\.${funcName}\\b`);
|
|
81
|
+
if (selfRefPattern.test(body)) {
|
|
82
|
+
selfSchedulingFunctions++;
|
|
83
|
+
// Check if there's a termination condition
|
|
84
|
+
const hasTermination = /if\s*\(|return\s+(?:null|undefined|void)|\.length\s*(?:===?|<=?)\s*0/.test(body);
|
|
85
|
+
issues.push({
|
|
86
|
+
severity: hasTermination ? "warning" : "critical",
|
|
87
|
+
location: `${relativePath}:${startLine + 1}`,
|
|
88
|
+
functionName: funcName,
|
|
89
|
+
message: `${funcType} "${funcName}" schedules itself${hasTermination ? " (has conditional guard)" : " without clear termination — infinite loop risk"}.`,
|
|
90
|
+
fix: hasTermination
|
|
91
|
+
? "Verify the termination condition covers all edge cases"
|
|
92
|
+
: "Add a termination condition (max retries, empty queue check) before self-scheduling",
|
|
93
|
+
});
|
|
94
|
+
}
|
|
95
|
+
// Check 2: Very short delay (< 1 second) — may indicate missing backoff
|
|
96
|
+
for (const match of runAfterMatches) {
|
|
97
|
+
const callIdx = content.indexOf(match[0], m.index);
|
|
98
|
+
const callLine = content.slice(0, callIdx).split("\n").length;
|
|
99
|
+
const afterCall = content.slice(callIdx, callIdx + 100);
|
|
100
|
+
const delayMatch = afterCall.match(/runAfter\s*\(\s*(\d+(?:\.\d+)?)\s*[,)]/);
|
|
101
|
+
if (delayMatch) {
|
|
102
|
+
const delay = parseFloat(delayMatch[1]);
|
|
103
|
+
if (delay < 1) {
|
|
104
|
+
issues.push({
|
|
105
|
+
severity: "warning",
|
|
106
|
+
location: `${relativePath}:${callLine}`,
|
|
107
|
+
functionName: funcName,
|
|
108
|
+
message: `scheduler.runAfter(${delay}, ...) uses sub-second delay. In retry/loop patterns this can overwhelm the scheduler.`,
|
|
109
|
+
fix: "Use at least 1-second delay. For retries, implement exponential backoff (e.g., delay * 2^attempt)",
|
|
110
|
+
});
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
// Check 3: Scheduler in action without try/catch
|
|
115
|
+
if ((funcType === "action" || funcType === "internalAction") && allSchedulerCalls > 0) {
|
|
116
|
+
// Check if the scheduler call is wrapped in try/catch
|
|
117
|
+
if (!/try\s*\{/.test(body)) {
|
|
118
|
+
issues.push({
|
|
119
|
+
severity: "info",
|
|
120
|
+
location: `${relativePath}:${startLine + 1}`,
|
|
121
|
+
functionName: funcName,
|
|
122
|
+
message: `${funcType} "${funcName}" uses scheduler without try/catch. If the action fails before scheduling, work may be lost.`,
|
|
123
|
+
fix: "Wrap scheduler calls in try/catch or move scheduling to a mutation for transactional guarantees",
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
}
|
|
127
|
+
// Check 4: Multiple scheduler calls in same function (fan-out)
|
|
128
|
+
if (allSchedulerCalls > 3) {
|
|
129
|
+
issues.push({
|
|
130
|
+
severity: "info",
|
|
131
|
+
location: `${relativePath}:${startLine + 1}`,
|
|
132
|
+
functionName: funcName,
|
|
133
|
+
message: `${funcType} "${funcName}" makes ${allSchedulerCalls} scheduler calls. Consider if a single orchestrator action would be cleaner.`,
|
|
134
|
+
fix: "Group related work into fewer scheduled calls or use a queue-based pattern",
|
|
135
|
+
});
|
|
136
|
+
}
|
|
137
|
+
// Check 5: Scheduling from a query (not possible — queries are read-only)
|
|
138
|
+
if (funcType.includes("Query") || funcType === "query" || funcType === "internalQuery") {
|
|
139
|
+
issues.push({
|
|
140
|
+
severity: "critical",
|
|
141
|
+
location: `${relativePath}:${startLine + 1}`,
|
|
142
|
+
functionName: funcName,
|
|
143
|
+
message: `Query "${funcName}" tries to use ctx.scheduler — queries are read-only and cannot schedule functions.`,
|
|
144
|
+
fix: "Move scheduler calls to a mutation or action",
|
|
145
|
+
});
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
}
|
|
149
|
+
return {
|
|
150
|
+
issues,
|
|
151
|
+
stats: {
|
|
152
|
+
totalSchedulerCalls,
|
|
153
|
+
runAfterCalls,
|
|
154
|
+
runAtCalls,
|
|
155
|
+
selfSchedulingFunctions,
|
|
156
|
+
filesWithSchedulers: filesWithSchedulers.size,
|
|
157
|
+
},
|
|
158
|
+
};
|
|
159
|
+
}
|
|
160
|
+
// ── Tool Definition ─────────────────────────────────────────────────
|
|
161
|
+
export const schedulerTools = [
|
|
162
|
+
{
|
|
163
|
+
name: "convex_audit_schedulers",
|
|
164
|
+
description: "Audit Convex scheduled function usage (ctx.scheduler.runAfter/runAt): detects infinite self-scheduling loops, sub-second delays without backoff, scheduler calls in queries (impossible), unprotected scheduler calls in actions, and excessive fan-out patterns.",
|
|
165
|
+
inputSchema: {
|
|
166
|
+
type: "object",
|
|
167
|
+
properties: {
|
|
168
|
+
projectDir: {
|
|
169
|
+
type: "string",
|
|
170
|
+
description: "Absolute path to the project root containing a convex/ directory",
|
|
171
|
+
},
|
|
172
|
+
},
|
|
173
|
+
required: ["projectDir"],
|
|
174
|
+
},
|
|
175
|
+
handler: async (args) => {
|
|
176
|
+
const projectDir = resolve(args.projectDir);
|
|
177
|
+
const convexDir = findConvexDir(projectDir);
|
|
178
|
+
if (!convexDir) {
|
|
179
|
+
return { error: "No convex/ directory found" };
|
|
180
|
+
}
|
|
181
|
+
const { issues, stats } = auditSchedulers(convexDir);
|
|
182
|
+
const db = getDb();
|
|
183
|
+
db.prepare("INSERT INTO audit_results (id, project_dir, audit_type, issues_json, issue_count) VALUES (?, ?, ?, ?, ?)").run(genId("audit"), projectDir, "scheduler_audit", JSON.stringify(issues), issues.length);
|
|
184
|
+
return {
|
|
185
|
+
summary: {
|
|
186
|
+
...stats,
|
|
187
|
+
totalIssues: issues.length,
|
|
188
|
+
critical: issues.filter(i => i.severity === "critical").length,
|
|
189
|
+
warnings: issues.filter(i => i.severity === "warning").length,
|
|
190
|
+
},
|
|
191
|
+
issues: issues.slice(0, 30),
|
|
192
|
+
quickRef: getQuickRef("convex_audit_schedulers"),
|
|
193
|
+
};
|
|
194
|
+
},
|
|
195
|
+
},
|
|
196
|
+
];
|
|
197
|
+
//# sourceMappingURL=schedulerTools.js.map
|
|
@@ -411,6 +411,80 @@ export const REGISTRY = [
|
|
|
411
411
|
phase: "deploy",
|
|
412
412
|
complexity: "medium",
|
|
413
413
|
},
|
|
414
|
+
// ── Reporting Tools ─────────────────────
|
|
415
|
+
{
|
|
416
|
+
name: "convex_export_sarif",
|
|
417
|
+
category: "integration",
|
|
418
|
+
tags: ["sarif", "export", "report", "github", "code-scanning", "ci", "static-analysis"],
|
|
419
|
+
quickRef: {
|
|
420
|
+
nextAction: "Upload the SARIF file to GitHub Code Scanning or open in VS Code SARIF Viewer",
|
|
421
|
+
nextTools: ["convex_audit_diff", "convex_quality_gate"],
|
|
422
|
+
methodology: "convex_deploy_verification",
|
|
423
|
+
relatedGotchas: [],
|
|
424
|
+
confidence: "high",
|
|
425
|
+
},
|
|
426
|
+
phase: "deploy",
|
|
427
|
+
complexity: "low",
|
|
428
|
+
},
|
|
429
|
+
{
|
|
430
|
+
name: "convex_audit_diff",
|
|
431
|
+
category: "deployment",
|
|
432
|
+
tags: ["diff", "baseline", "trend", "new-issues", "fixed", "improving", "degrading", "comparison"],
|
|
433
|
+
quickRef: {
|
|
434
|
+
nextAction: "Focus on fixing new issues first, then tackle existing ones",
|
|
435
|
+
nextTools: ["convex_export_sarif", "convex_quality_gate"],
|
|
436
|
+
methodology: "convex_deploy_verification",
|
|
437
|
+
relatedGotchas: [],
|
|
438
|
+
confidence: "high",
|
|
439
|
+
},
|
|
440
|
+
phase: "deploy",
|
|
441
|
+
complexity: "medium",
|
|
442
|
+
},
|
|
443
|
+
// ── Vector Search Tools ─────────────────
|
|
444
|
+
{
|
|
445
|
+
name: "convex_audit_vector_search",
|
|
446
|
+
category: "schema",
|
|
447
|
+
tags: ["vector", "search", "embedding", "dimension", "similarity", "vectorIndex", "float64", "AI", "RAG"],
|
|
448
|
+
quickRef: {
|
|
449
|
+
nextAction: "Fix dimension mismatches and add filterFields to vector indexes for better performance",
|
|
450
|
+
nextTools: ["convex_audit_schema", "convex_suggest_indexes"],
|
|
451
|
+
methodology: "convex_schema_audit",
|
|
452
|
+
relatedGotchas: [],
|
|
453
|
+
confidence: "high",
|
|
454
|
+
},
|
|
455
|
+
phase: "audit",
|
|
456
|
+
complexity: "medium",
|
|
457
|
+
},
|
|
458
|
+
// ── Scheduler Tools ─────────────────────
|
|
459
|
+
{
|
|
460
|
+
name: "convex_audit_schedulers",
|
|
461
|
+
category: "function",
|
|
462
|
+
tags: ["scheduler", "runAfter", "runAt", "schedule", "cron", "infinite-loop", "backoff", "retry", "delayed"],
|
|
463
|
+
quickRef: {
|
|
464
|
+
nextAction: "Fix self-scheduling loops (add termination conditions) and implement exponential backoff",
|
|
465
|
+
nextTools: ["convex_check_crons", "convex_audit_actions"],
|
|
466
|
+
methodology: "convex_function_compliance",
|
|
467
|
+
relatedGotchas: [],
|
|
468
|
+
confidence: "high",
|
|
469
|
+
},
|
|
470
|
+
phase: "audit",
|
|
471
|
+
complexity: "medium",
|
|
472
|
+
},
|
|
473
|
+
// ── Quality Gate Tools ──────────────────
|
|
474
|
+
{
|
|
475
|
+
name: "convex_quality_gate",
|
|
476
|
+
category: "deployment",
|
|
477
|
+
tags: ["quality", "gate", "score", "grade", "threshold", "sonarqube", "metrics", "pass-fail", "A-F"],
|
|
478
|
+
quickRef: {
|
|
479
|
+
nextAction: "Fix blockers to raise your grade, then run again to verify improvement",
|
|
480
|
+
nextTools: ["convex_audit_diff", "convex_export_sarif", "convex_pre_deploy_gate"],
|
|
481
|
+
methodology: "convex_deploy_verification",
|
|
482
|
+
relatedGotchas: [],
|
|
483
|
+
confidence: "high",
|
|
484
|
+
},
|
|
485
|
+
phase: "deploy",
|
|
486
|
+
complexity: "high",
|
|
487
|
+
},
|
|
414
488
|
];
|
|
415
489
|
export function getQuickRef(toolName) {
|
|
416
490
|
const entry = REGISTRY.find((e) => e.name === toolName);
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
import { readFileSync, existsSync, readdirSync } from "node:fs";
|
|
2
|
+
import { join, resolve } from "node:path";
|
|
3
|
+
import { getDb, genId } from "../db.js";
|
|
4
|
+
import { getQuickRef } from "./toolRegistry.js";
|
|
5
|
+
// ── Helpers ──────────────────────────────────────────────────────────
|
|
6
|
+
function findConvexDir(projectDir) {
|
|
7
|
+
const candidates = [join(projectDir, "convex"), join(projectDir, "src", "convex")];
|
|
8
|
+
for (const c of candidates) {
|
|
9
|
+
if (existsSync(c))
|
|
10
|
+
return c;
|
|
11
|
+
}
|
|
12
|
+
return null;
|
|
13
|
+
}
|
|
14
|
+
function collectTsFiles(dir) {
|
|
15
|
+
const results = [];
|
|
16
|
+
if (!existsSync(dir))
|
|
17
|
+
return results;
|
|
18
|
+
const entries = readdirSync(dir, { withFileTypes: true });
|
|
19
|
+
for (const entry of entries) {
|
|
20
|
+
const full = join(dir, entry.name);
|
|
21
|
+
if (entry.isDirectory() && entry.name !== "node_modules" && entry.name !== "_generated") {
|
|
22
|
+
results.push(...collectTsFiles(full));
|
|
23
|
+
}
|
|
24
|
+
else if (entry.isFile() && entry.name.endsWith(".ts")) {
|
|
25
|
+
results.push(full);
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
return results;
|
|
29
|
+
}
|
|
30
|
+
// Common embedding model dimensions
|
|
31
|
+
const KNOWN_DIMENSIONS = {
|
|
32
|
+
384: "all-MiniLM-L6-v2",
|
|
33
|
+
512: "e5-small",
|
|
34
|
+
768: "text-embedding-004 / all-mpnet-base-v2",
|
|
35
|
+
1024: "e5-large / cohere-embed-v3",
|
|
36
|
+
1536: "text-embedding-3-small / text-embedding-ada-002",
|
|
37
|
+
3072: "text-embedding-3-large",
|
|
38
|
+
};
|
|
39
|
+
function auditVectorSearch(convexDir) {
|
|
40
|
+
const issues = [];
|
|
41
|
+
const schemaPath = join(convexDir, "schema.ts");
|
|
42
|
+
// Parse schema for vectorIndex definitions
|
|
43
|
+
const vectorIndexes = [];
|
|
44
|
+
if (existsSync(schemaPath)) {
|
|
45
|
+
const schema = readFileSync(schemaPath, "utf-8");
|
|
46
|
+
const lines = schema.split("\n");
|
|
47
|
+
for (let i = 0; i < lines.length; i++) {
|
|
48
|
+
// Match .vectorIndex("name", { ... })
|
|
49
|
+
const viMatch = lines[i].match(/\.vectorIndex\s*\(\s*["']([^"']+)["']/);
|
|
50
|
+
if (viMatch) {
|
|
51
|
+
// Look ahead for dimensions and filterFields
|
|
52
|
+
const chunk = lines.slice(i, Math.min(i + 10, lines.length)).join("\n");
|
|
53
|
+
const dimMatch = chunk.match(/dimensions\s*:\s*(\d+)/);
|
|
54
|
+
const filterMatch = chunk.match(/filterFields\s*:\s*\[([^\]]*)\]/);
|
|
55
|
+
const dims = dimMatch ? parseInt(dimMatch[1], 10) : 0;
|
|
56
|
+
const filters = filterMatch
|
|
57
|
+
? filterMatch[1].match(/["']([^"']+)["']/g)?.map(s => s.replace(/["']/g, "")) ?? []
|
|
58
|
+
: [];
|
|
59
|
+
vectorIndexes.push({ table: viMatch[1], dimensions: dims, filterFields: filters, line: i + 1 });
|
|
60
|
+
// Check: uncommon dimension size
|
|
61
|
+
if (dims > 0 && !KNOWN_DIMENSIONS[dims]) {
|
|
62
|
+
const nearest = Object.keys(KNOWN_DIMENSIONS)
|
|
63
|
+
.map(Number)
|
|
64
|
+
.sort((a, b) => Math.abs(a - dims) - Math.abs(b - dims))[0];
|
|
65
|
+
issues.push({
|
|
66
|
+
severity: "warning",
|
|
67
|
+
location: `schema.ts:${i + 1}`,
|
|
68
|
+
message: `Vector index "${viMatch[1]}" has ${dims} dimensions — not a standard embedding size. Did you mean ${nearest} (${KNOWN_DIMENSIONS[nearest]})?`,
|
|
69
|
+
fix: `Verify your embedding model output size matches ${dims} dimensions`,
|
|
70
|
+
});
|
|
71
|
+
}
|
|
72
|
+
// Check: no filter fields
|
|
73
|
+
if (filters.length === 0) {
|
|
74
|
+
issues.push({
|
|
75
|
+
severity: "info",
|
|
76
|
+
location: `schema.ts:${i + 1}`,
|
|
77
|
+
message: `Vector index "${viMatch[1]}" has no filterFields. Vector searches will scan all vectors — add filters for better performance.`,
|
|
78
|
+
fix: 'Add filterFields: ["field1", "field2"] to narrow vector search scope',
|
|
79
|
+
});
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
// Check: vector field declared as v.array(v.number()) instead of v.array(v.float64())
|
|
83
|
+
if (/v\.array\s*\(\s*v\.number\s*\(\s*\)\s*\)/.test(lines[i]) &&
|
|
84
|
+
/embed|vector|embedding/i.test(lines.slice(Math.max(0, i - 3), i + 1).join("\n"))) {
|
|
85
|
+
issues.push({
|
|
86
|
+
severity: "warning",
|
|
87
|
+
location: `schema.ts:${i + 1}`,
|
|
88
|
+
message: "Vector field uses v.array(v.number()) — Convex vector search requires v.array(v.float64()) for proper storage.",
|
|
89
|
+
fix: "Change to v.array(v.float64()) for vector fields",
|
|
90
|
+
});
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
// Scan code files for .vectorSearch() usage
|
|
95
|
+
const files = collectTsFiles(convexDir);
|
|
96
|
+
let vectorSearchCallCount = 0;
|
|
97
|
+
for (const filePath of files) {
|
|
98
|
+
const content = readFileSync(filePath, "utf-8");
|
|
99
|
+
const relativePath = filePath.replace(convexDir, "").replace(/^[\\/]/, "");
|
|
100
|
+
const lines = content.split("\n");
|
|
101
|
+
for (let i = 0; i < lines.length; i++) {
|
|
102
|
+
const vsMatch = lines[i].match(/\.vectorSearch\s*\(\s*["']([^"']+)["']/);
|
|
103
|
+
if (vsMatch) {
|
|
104
|
+
vectorSearchCallCount++;
|
|
105
|
+
const indexName = vsMatch[1];
|
|
106
|
+
// Check if the referenced index exists
|
|
107
|
+
const matchingIdx = vectorIndexes.find(vi => vi.table === indexName);
|
|
108
|
+
if (!matchingIdx && vectorIndexes.length > 0) {
|
|
109
|
+
issues.push({
|
|
110
|
+
severity: "critical",
|
|
111
|
+
location: `${relativePath}:${i + 1}`,
|
|
112
|
+
message: `vectorSearch references index "${indexName}" which is not defined in schema.ts.`,
|
|
113
|
+
fix: `Add a .vectorIndex("${indexName}", { ... }) to the appropriate table in schema.ts`,
|
|
114
|
+
});
|
|
115
|
+
}
|
|
116
|
+
// Check: no filter parameter when filterFields exist
|
|
117
|
+
const chunk = lines.slice(i, Math.min(i + 5, lines.length)).join("\n");
|
|
118
|
+
if (matchingIdx && matchingIdx.filterFields.length > 0 && !/filter\s*:/.test(chunk)) {
|
|
119
|
+
issues.push({
|
|
120
|
+
severity: "info",
|
|
121
|
+
location: `${relativePath}:${i + 1}`,
|
|
122
|
+
message: `vectorSearch on "${indexName}" doesn't use filter — available filterFields: ${matchingIdx.filterFields.join(", ")}`,
|
|
123
|
+
fix: "Add filter parameter to narrow results and improve performance",
|
|
124
|
+
});
|
|
125
|
+
}
|
|
126
|
+
// Check: hardcoded vector dimensions in code (should match schema)
|
|
127
|
+
const vecLiteralMatch = chunk.match(/new\s+Float64Array\s*\(\s*(\d+)\s*\)|Array\s*\(\s*(\d+)\s*\)\.fill/);
|
|
128
|
+
if (vecLiteralMatch && matchingIdx) {
|
|
129
|
+
const codeDims = parseInt(vecLiteralMatch[1] || vecLiteralMatch[2], 10);
|
|
130
|
+
if (codeDims !== matchingIdx.dimensions && matchingIdx.dimensions > 0) {
|
|
131
|
+
issues.push({
|
|
132
|
+
severity: "critical",
|
|
133
|
+
location: `${relativePath}:${i + 1}`,
|
|
134
|
+
message: `Vector dimensions mismatch: code uses ${codeDims} but schema defines ${matchingIdx.dimensions} for index "${indexName}".`,
|
|
135
|
+
fix: `Ensure embedding model output (${codeDims}) matches schema dimensions (${matchingIdx.dimensions})`,
|
|
136
|
+
});
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
const tablesWithVectors = [...new Set(vectorIndexes.map(vi => vi.table))];
|
|
143
|
+
const dimensions = [...new Set(vectorIndexes.map(vi => vi.dimensions).filter(d => d > 0))];
|
|
144
|
+
return {
|
|
145
|
+
issues,
|
|
146
|
+
stats: {
|
|
147
|
+
vectorIndexCount: vectorIndexes.length,
|
|
148
|
+
vectorSearchCallCount,
|
|
149
|
+
tablesWithVectors,
|
|
150
|
+
dimensions,
|
|
151
|
+
},
|
|
152
|
+
};
|
|
153
|
+
}
|
|
154
|
+
// ── Tool Definition ─────────────────────────────────────────────────
|
|
155
|
+
export const vectorSearchTools = [
|
|
156
|
+
{
|
|
157
|
+
name: "convex_audit_vector_search",
|
|
158
|
+
description: "Audit Convex vector search implementation: validates vectorIndex dimensions against known embedding models, checks for missing filterFields, v.array(v.float64()) usage, dimension mismatches between schema and code, and undefined index references.",
|
|
159
|
+
inputSchema: {
|
|
160
|
+
type: "object",
|
|
161
|
+
properties: {
|
|
162
|
+
projectDir: {
|
|
163
|
+
type: "string",
|
|
164
|
+
description: "Absolute path to the project root containing a convex/ directory",
|
|
165
|
+
},
|
|
166
|
+
},
|
|
167
|
+
required: ["projectDir"],
|
|
168
|
+
},
|
|
169
|
+
handler: async (args) => {
|
|
170
|
+
const projectDir = resolve(args.projectDir);
|
|
171
|
+
const convexDir = findConvexDir(projectDir);
|
|
172
|
+
if (!convexDir) {
|
|
173
|
+
return { error: "No convex/ directory found" };
|
|
174
|
+
}
|
|
175
|
+
const { issues, stats } = auditVectorSearch(convexDir);
|
|
176
|
+
const db = getDb();
|
|
177
|
+
db.prepare("INSERT INTO audit_results (id, project_dir, audit_type, issues_json, issue_count) VALUES (?, ?, ?, ?, ?)").run(genId("audit"), projectDir, "vector_search", JSON.stringify(issues), issues.length);
|
|
178
|
+
return {
|
|
179
|
+
summary: {
|
|
180
|
+
...stats,
|
|
181
|
+
totalIssues: issues.length,
|
|
182
|
+
critical: issues.filter(i => i.severity === "critical").length,
|
|
183
|
+
warnings: issues.filter(i => i.severity === "warning").length,
|
|
184
|
+
knownDimensions: KNOWN_DIMENSIONS,
|
|
185
|
+
},
|
|
186
|
+
issues: issues.slice(0, 30),
|
|
187
|
+
quickRef: getQuickRef("convex_audit_vector_search"),
|
|
188
|
+
};
|
|
189
|
+
},
|
|
190
|
+
},
|
|
191
|
+
];
|
|
192
|
+
//# sourceMappingURL=vectorSearchTools.js.map
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@homenshum/convex-mcp-nodebench",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.8.0",
|
|
4
4
|
"description": "Convex-specific MCP server applying NodeBench self-instruct diligence patterns to Convex development. Schema audit, function compliance, deployment gates, persistent gotcha DB, and methodology guidance. Complements Context7 (raw docs) and official Convex MCP (deployment introspection) with structured verification workflows.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|