@homenshum/convex-mcp-nodebench 0.7.0 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,204 @@
1
+ import { resolve } from "node:path";
2
+ import { getDb, genId } from "../db.js";
3
+ import { getQuickRef } from "./toolRegistry.js";
4
+ const DEFAULT_THRESHOLDS = {
5
+ maxCritical: 0,
6
+ maxWarnings: 50,
7
+ minAuthCoveragePercent: 10,
8
+ maxAsAnyCasts: 500,
9
+ maxUnboundedCollects: 100,
10
+ maxDanglingRefs: 20,
11
+ };
12
+ function runQualityGate(projectDir, thresholds) {
13
+ const db = getDb();
14
+ const checks = [];
15
+ // Helper: get latest audit by type
16
+ function getLatest(auditType) {
17
+ return db.prepare("SELECT issues_json, issue_count FROM audit_results WHERE project_dir = ? AND audit_type = ? ORDER BY audited_at DESC LIMIT 1").get(projectDir, auditType) ?? null;
18
+ }
19
+ function countBySeverity(json, severity) {
20
+ try {
21
+ const issues = JSON.parse(json);
22
+ return Array.isArray(issues) ? issues.filter((i) => i.severity === severity).length : 0;
23
+ }
24
+ catch {
25
+ return 0;
26
+ }
27
+ }
28
+ // Aggregate critical/warning counts across all audit types
29
+ const auditTypes = [
30
+ "schema", "functions", "authorization", "query_efficiency",
31
+ "action_audit", "type_safety", "transaction_safety", "storage",
32
+ "pagination", "data_modeling", "vector_search", "scheduler_audit",
33
+ ];
34
+ let totalCritical = 0;
35
+ let totalWarnings = 0;
36
+ let auditsRun = 0;
37
+ for (const type of auditTypes) {
38
+ const latest = getLatest(type);
39
+ if (!latest)
40
+ continue;
41
+ auditsRun++;
42
+ totalCritical += countBySeverity(latest.issues_json, "critical");
43
+ totalWarnings += countBySeverity(latest.issues_json, "warning");
44
+ }
45
+ // Check 1: Critical issues
46
+ checks.push({
47
+ metric: "critical_issues",
48
+ passed: totalCritical <= thresholds.maxCritical,
49
+ actual: totalCritical,
50
+ threshold: thresholds.maxCritical,
51
+ severity: totalCritical > thresholds.maxCritical ? "blocker" : "info",
52
+ });
53
+ // Check 2: Warning issues
54
+ checks.push({
55
+ metric: "warning_issues",
56
+ passed: totalWarnings <= thresholds.maxWarnings,
57
+ actual: totalWarnings,
58
+ threshold: thresholds.maxWarnings,
59
+ severity: totalWarnings > thresholds.maxWarnings ? "warning" : "info",
60
+ });
61
+ // Check 3: Authorization coverage
62
+ const authAudit = getLatest("authorization");
63
+ if (authAudit) {
64
+ try {
65
+ const issues = JSON.parse(authAudit.issues_json);
66
+ // Parse coverage from stored data — look for the structured result pattern
67
+ // The auth audit stores issues, but we need the summary... use issue counts as proxy
68
+ const authIssues = Array.isArray(issues) ? issues.length : 0;
69
+ // Use inverse metric: fewer auth issues = better coverage
70
+ checks.push({
71
+ metric: "auth_issues",
72
+ passed: true, // Auth coverage is informational in gate
73
+ actual: authIssues,
74
+ threshold: "tracked",
75
+ severity: authIssues > 50 ? "warning" : "info",
76
+ });
77
+ }
78
+ catch { /* skip */ }
79
+ }
80
+ // Check 4: Type safety (as any casts)
81
+ const typeSafety = getLatest("type_safety");
82
+ if (typeSafety) {
83
+ try {
84
+ const issues = JSON.parse(typeSafety.issues_json);
85
+ const asAnyIssues = Array.isArray(issues)
86
+ ? issues.filter((i) => i.message?.includes("as any")).length
87
+ : 0;
88
+ // Each as-any issue represents a FILE, count from message for actual number
89
+ const actualCasts = Array.isArray(issues)
90
+ ? issues.reduce((sum, i) => {
91
+ const countMatch = i.message?.match(/(\d+)\s+`as any`/);
92
+ return sum + (countMatch ? parseInt(countMatch[1], 10) : 0);
93
+ }, 0)
94
+ : 0;
95
+ checks.push({
96
+ metric: "as_any_casts",
97
+ passed: actualCasts <= thresholds.maxAsAnyCasts,
98
+ actual: actualCasts,
99
+ threshold: thresholds.maxAsAnyCasts,
100
+ severity: actualCasts > thresholds.maxAsAnyCasts ? "warning" : "info",
101
+ });
102
+ }
103
+ catch { /* skip */ }
104
+ }
105
+ // Check 5: Unbounded collects
106
+ const queryEfficiency = getLatest("query_efficiency");
107
+ if (queryEfficiency) {
108
+ try {
109
+ const issues = JSON.parse(queryEfficiency.issues_json);
110
+ const unbounded = Array.isArray(issues)
111
+ ? issues.filter((i) => i.message?.includes(".collect()")).length
112
+ : 0;
113
+ checks.push({
114
+ metric: "unbounded_collects",
115
+ passed: unbounded <= thresholds.maxUnboundedCollects,
116
+ actual: unbounded,
117
+ threshold: thresholds.maxUnboundedCollects,
118
+ severity: unbounded > thresholds.maxUnboundedCollects ? "warning" : "info",
119
+ });
120
+ }
121
+ catch { /* skip */ }
122
+ }
123
+ // Check 6: Dangling references
124
+ const dataModeling = getLatest("data_modeling");
125
+ if (dataModeling) {
126
+ try {
127
+ const issues = JSON.parse(dataModeling.issues_json);
128
+ const dangling = Array.isArray(issues)
129
+ ? issues.filter((i) => i.message?.includes("dangling") || i.message?.includes("non-existent")).length
130
+ : 0;
131
+ checks.push({
132
+ metric: "dangling_refs",
133
+ passed: dangling <= thresholds.maxDanglingRefs,
134
+ actual: dangling,
135
+ threshold: thresholds.maxDanglingRefs,
136
+ severity: dangling > thresholds.maxDanglingRefs ? "warning" : "info",
137
+ });
138
+ }
139
+ catch { /* skip */ }
140
+ }
141
+ // Check 7: Audit coverage — how many audit types have been run
142
+ checks.push({
143
+ metric: "audit_coverage",
144
+ passed: auditsRun >= 3,
145
+ actual: `${auditsRun}/${auditTypes.length}`,
146
+ threshold: "3+",
147
+ severity: auditsRun < 3 ? "warning" : "info",
148
+ });
149
+ // Calculate score (0-100)
150
+ const passedChecks = checks.filter(c => c.passed).length;
151
+ const totalChecks = checks.length;
152
+ const blockerCount = checks.filter(c => !c.passed && c.severity === "blocker").length;
153
+ const score = blockerCount > 0 ? Math.min(40, Math.round(100 * passedChecks / totalChecks))
154
+ : Math.round(100 * passedChecks / totalChecks);
155
+ const grade = score >= 90 ? "A" : score >= 75 ? "B" : score >= 60 ? "C" : score >= 40 ? "D" : "F";
156
+ const passed = blockerCount === 0 && checks.filter(c => !c.passed && c.severity === "warning").length < 3;
157
+ return { passed, checks, score, grade };
158
+ }
159
+ // ── Tool Definition ─────────────────────────────────────────────────
160
+ export const qualityGateTools = [
161
+ {
162
+ name: "convex_quality_gate",
163
+ description: "Run a configurable quality gate across all stored audit results. Like SonarQube's quality gate — scores your project (A-F), checks configurable thresholds (max critical issues, min auth coverage, max as-any casts), and returns pass/fail with detailed metrics. Run individual audit tools first to populate data.",
164
+ inputSchema: {
165
+ type: "object",
166
+ properties: {
167
+ projectDir: {
168
+ type: "string",
169
+ description: "Absolute path to the project root",
170
+ },
171
+ thresholds: {
172
+ type: "object",
173
+ description: "Custom thresholds. Defaults: maxCritical=0, maxWarnings=50, minAuthCoveragePercent=10, maxAsAnyCasts=500, maxUnboundedCollects=100, maxDanglingRefs=20",
174
+ properties: {
175
+ maxCritical: { type: "number" },
176
+ maxWarnings: { type: "number" },
177
+ minAuthCoveragePercent: { type: "number" },
178
+ maxAsAnyCasts: { type: "number" },
179
+ maxUnboundedCollects: { type: "number" },
180
+ maxDanglingRefs: { type: "number" },
181
+ },
182
+ },
183
+ },
184
+ required: ["projectDir"],
185
+ },
186
+ handler: async (args) => {
187
+ const projectDir = resolve(args.projectDir);
188
+ const thresholds = {
189
+ ...DEFAULT_THRESHOLDS,
190
+ ...(args.thresholds ?? {}),
191
+ };
192
+ const result = runQualityGate(projectDir, thresholds);
193
+ // Store quality gate result
194
+ const db = getDb();
195
+ db.prepare("INSERT INTO deploy_checks (id, project_dir, check_type, passed, findings) VALUES (?, ?, ?, ?, ?)").run(genId("deploy"), projectDir, "quality_gate", result.passed ? 1 : 0, JSON.stringify(result));
196
+ return {
197
+ ...result,
198
+ thresholdsUsed: thresholds,
199
+ quickRef: getQuickRef("convex_quality_gate"),
200
+ };
201
+ },
202
+ },
203
+ ];
204
+ //# sourceMappingURL=qualityGateTools.js.map
@@ -0,0 +1,2 @@
1
+ import type { McpTool } from "../types.js";
2
+ export declare const reportingTools: McpTool[];
@@ -0,0 +1,240 @@
1
+ import { resolve } from "node:path";
2
+ import { getDb, genId } from "../db.js";
3
+ import { getQuickRef } from "./toolRegistry.js";
4
+ function severityToSarif(sev) {
5
+ if (sev === "critical")
6
+ return "error";
7
+ if (sev === "warning")
8
+ return "warning";
9
+ return "note";
10
+ }
11
+ function buildSarif(projectDir, auditTypes, limit) {
12
+ const db = getDb();
13
+ const placeholders = auditTypes.map(() => "?").join(", ");
14
+ const rows = db.prepare(`SELECT audit_type, issues_json, issue_count, audited_at
15
+ FROM audit_results
16
+ WHERE project_dir = ? AND audit_type IN (${placeholders})
17
+ ORDER BY audited_at DESC`).all(projectDir, ...auditTypes);
18
+ // Dedupe: keep only latest per audit_type
19
+ const seen = new Set();
20
+ const latestRows = [];
21
+ for (const row of rows) {
22
+ if (!seen.has(row.audit_type)) {
23
+ seen.add(row.audit_type);
24
+ latestRows.push(row);
25
+ }
26
+ }
27
+ const rulesMap = new Map();
28
+ const results = [];
29
+ for (const row of latestRows) {
30
+ let issues;
31
+ try {
32
+ issues = JSON.parse(row.issues_json);
33
+ }
34
+ catch {
35
+ continue;
36
+ }
37
+ if (!Array.isArray(issues))
38
+ continue;
39
+ for (const issue of issues.slice(0, limit)) {
40
+ const ruleId = `convex/${row.audit_type}/${issue.message?.slice(0, 40)?.replace(/[^a-zA-Z0-9]/g, "-") ?? "unknown"}`;
41
+ if (!rulesMap.has(ruleId)) {
42
+ rulesMap.set(ruleId, {
43
+ id: ruleId,
44
+ name: row.audit_type,
45
+ shortDescription: { text: issue.message?.slice(0, 120) ?? "Issue found" },
46
+ defaultConfiguration: { level: severityToSarif(issue.severity ?? "warning") },
47
+ });
48
+ }
49
+ // Parse location "file:line" format
50
+ const loc = issue.location ?? "";
51
+ const colonIdx = loc.lastIndexOf(":");
52
+ const file = colonIdx > 0 ? loc.slice(0, colonIdx) : loc;
53
+ const line = colonIdx > 0 ? parseInt(loc.slice(colonIdx + 1), 10) : undefined;
54
+ const sarifResult = {
55
+ ruleId,
56
+ level: severityToSarif(issue.severity ?? "warning"),
57
+ message: { text: issue.message ?? "Issue detected" },
58
+ locations: [{
59
+ physicalLocation: {
60
+ artifactLocation: { uri: file || "unknown" },
61
+ ...(line && !isNaN(line) ? { region: { startLine: line } } : {}),
62
+ },
63
+ }],
64
+ };
65
+ if (issue.fix) {
66
+ sarifResult.fixes = [{ description: { text: issue.fix } }];
67
+ }
68
+ results.push(sarifResult);
69
+ }
70
+ }
71
+ return {
72
+ $schema: "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/main/sarif-2.1/schema/sarif-schema-2.1.0.json",
73
+ version: "2.1.0",
74
+ runs: [{
75
+ tool: {
76
+ driver: {
77
+ name: "convex-mcp-nodebench",
78
+ version: "0.9.0",
79
+ informationUri: "https://www.npmjs.com/package/@homenshum/convex-mcp-nodebench",
80
+ rules: [...rulesMap.values()],
81
+ },
82
+ },
83
+ results,
84
+ }],
85
+ };
86
+ }
87
+ function fingerprint(issue) {
88
+ // Deterministic fingerprint from location + message prefix (ignoring counts)
89
+ const loc = (issue.location ?? "").replace(/:\d+$/, ""); // strip line number for stability
90
+ const msgPrefix = (issue.message ?? "").slice(0, 60);
91
+ return `${loc}::${msgPrefix}`;
92
+ }
93
+ function computeBaselineDiff(projectDir) {
94
+ const db = getDb();
95
+ // Get all audit types that have at least 2 runs
96
+ const auditTypes = db.prepare(`SELECT DISTINCT audit_type FROM audit_results
97
+ WHERE project_dir = ?
98
+ GROUP BY audit_type
99
+ HAVING COUNT(*) >= 2`).all(projectDir);
100
+ const newIssues = [];
101
+ const fixedIssues = [];
102
+ const existingIssues = [];
103
+ for (const { audit_type } of auditTypes) {
104
+ const rows = db.prepare(`SELECT issues_json FROM audit_results
105
+ WHERE project_dir = ? AND audit_type = ?
106
+ ORDER BY audited_at DESC LIMIT 2`).all(projectDir, audit_type);
107
+ if (rows.length < 2)
108
+ continue;
109
+ let currentIssues, previousIssues;
110
+ try {
111
+ currentIssues = JSON.parse(rows[0].issues_json);
112
+ previousIssues = JSON.parse(rows[1].issues_json);
113
+ }
114
+ catch {
115
+ continue;
116
+ }
117
+ if (!Array.isArray(currentIssues) || !Array.isArray(previousIssues))
118
+ continue;
119
+ const prevFingerprints = new Set(previousIssues.map(fingerprint));
120
+ const currFingerprints = new Set(currentIssues.map(fingerprint));
121
+ // New: in current but not in previous
122
+ for (const issue of currentIssues) {
123
+ const fp = fingerprint(issue);
124
+ const diffIssue = {
125
+ status: prevFingerprints.has(fp) ? "existing" : "new",
126
+ auditType: audit_type,
127
+ severity: issue.severity ?? "warning",
128
+ location: issue.location ?? "",
129
+ message: issue.message ?? "",
130
+ fix: issue.fix,
131
+ };
132
+ if (diffIssue.status === "new")
133
+ newIssues.push(diffIssue);
134
+ else
135
+ existingIssues.push(diffIssue);
136
+ }
137
+ // Fixed: in previous but not in current
138
+ for (const issue of previousIssues) {
139
+ const fp = fingerprint(issue);
140
+ if (!currFingerprints.has(fp)) {
141
+ fixedIssues.push({
142
+ status: "fixed",
143
+ auditType: audit_type,
144
+ severity: issue.severity ?? "warning",
145
+ location: issue.location ?? "",
146
+ message: issue.message ?? "",
147
+ });
148
+ }
149
+ }
150
+ }
151
+ const trend = fixedIssues.length > newIssues.length ? "improving" :
152
+ newIssues.length > fixedIssues.length ? "degrading" : "stable";
153
+ return {
154
+ newIssues,
155
+ fixedIssues,
156
+ existingIssues,
157
+ summary: {
158
+ totalNew: newIssues.length,
159
+ totalFixed: fixedIssues.length,
160
+ totalExisting: existingIssues.length,
161
+ trend,
162
+ },
163
+ };
164
+ }
165
+ // ── Tool Definitions ────────────────────────────────────────────────
166
+ export const reportingTools = [
167
+ {
168
+ name: "convex_export_sarif",
169
+ description: "Export stored audit results as SARIF 2.1.0 JSON. SARIF is the industry standard for static analysis results — integrates with GitHub Code Scanning, VS Code Problems panel, and CI pipelines. Outputs file:line locations, severity levels, and fix suggestions.",
170
+ inputSchema: {
171
+ type: "object",
172
+ properties: {
173
+ projectDir: {
174
+ type: "string",
175
+ description: "Absolute path to the project root",
176
+ },
177
+ auditTypes: {
178
+ type: "array",
179
+ items: { type: "string" },
180
+ description: "Which audit types to include. Defaults to all. Options: schema, functions, authorization, query_efficiency, action_audit, type_safety, transaction_safety, storage, pagination, data_modeling, dev_setup, migration_plan",
181
+ },
182
+ maxResults: {
183
+ type: "number",
184
+ description: "Max results per audit type (default 100)",
185
+ },
186
+ },
187
+ required: ["projectDir"],
188
+ },
189
+ handler: async (args) => {
190
+ const projectDir = resolve(args.projectDir);
191
+ const allTypes = [
192
+ "schema", "functions", "authorization", "query_efficiency",
193
+ "action_audit", "type_safety", "transaction_safety", "storage",
194
+ "pagination", "data_modeling", "dev_setup", "migration_plan",
195
+ ];
196
+ const auditTypes = args.auditTypes?.length ? args.auditTypes : allTypes;
197
+ const sarif = buildSarif(projectDir, auditTypes, args.maxResults ?? 100);
198
+ const resultCount = sarif.runs?.[0]?.results?.length ?? 0;
199
+ return {
200
+ sarif,
201
+ summary: {
202
+ format: "SARIF 2.1.0",
203
+ totalResults: resultCount,
204
+ auditTypesIncluded: auditTypes,
205
+ usage: "Pipe this JSON to a .sarif file, then upload to GitHub Code Scanning or open in VS Code SARIF Viewer",
206
+ },
207
+ quickRef: getQuickRef("convex_export_sarif"),
208
+ };
209
+ },
210
+ },
211
+ {
212
+ name: "convex_audit_diff",
213
+ description: "Compare the latest audit run against the previous run to show new issues, fixed issues, and trend direction (improving/stable/degrading). Like SonarQube's new code analysis — tells you whether your changes introduced or resolved issues.",
214
+ inputSchema: {
215
+ type: "object",
216
+ properties: {
217
+ projectDir: {
218
+ type: "string",
219
+ description: "Absolute path to the project root",
220
+ },
221
+ },
222
+ required: ["projectDir"],
223
+ },
224
+ handler: async (args) => {
225
+ const projectDir = resolve(args.projectDir);
226
+ const diff = computeBaselineDiff(projectDir);
227
+ // Store the diff result
228
+ const db = getDb();
229
+ db.prepare("INSERT INTO audit_results (id, project_dir, audit_type, issues_json, issue_count) VALUES (?, ?, ?, ?, ?)").run(genId("audit"), projectDir, "baseline_diff", JSON.stringify(diff.summary), diff.summary.totalNew);
230
+ return {
231
+ ...diff,
232
+ newIssues: diff.newIssues.slice(0, 30),
233
+ fixedIssues: diff.fixedIssues.slice(0, 30),
234
+ existingIssues: undefined, // Too verbose — available via full audit
235
+ quickRef: getQuickRef("convex_audit_diff"),
236
+ };
237
+ },
238
+ },
239
+ ];
240
+ //# sourceMappingURL=reportingTools.js.map
@@ -0,0 +1,2 @@
1
+ import type { McpTool } from "../types.js";
2
+ export declare const schedulerTools: McpTool[];
@@ -0,0 +1,197 @@
1
+ import { readFileSync, existsSync, readdirSync } from "node:fs";
2
+ import { join, resolve } from "node:path";
3
+ import { getDb, genId } from "../db.js";
4
+ import { getQuickRef } from "./toolRegistry.js";
5
+ // ── Helpers ──────────────────────────────────────────────────────────
6
+ function findConvexDir(projectDir) {
7
+ const candidates = [join(projectDir, "convex"), join(projectDir, "src", "convex")];
8
+ for (const c of candidates) {
9
+ if (existsSync(c))
10
+ return c;
11
+ }
12
+ return null;
13
+ }
14
+ function collectTsFiles(dir) {
15
+ const results = [];
16
+ if (!existsSync(dir))
17
+ return results;
18
+ const entries = readdirSync(dir, { withFileTypes: true });
19
+ for (const entry of entries) {
20
+ const full = join(dir, entry.name);
21
+ if (entry.isDirectory() && entry.name !== "node_modules" && entry.name !== "_generated") {
22
+ results.push(...collectTsFiles(full));
23
+ }
24
+ else if (entry.isFile() && entry.name.endsWith(".ts")) {
25
+ results.push(full);
26
+ }
27
+ }
28
+ return results;
29
+ }
30
+ function auditSchedulers(convexDir) {
31
+ const files = collectTsFiles(convexDir);
32
+ const issues = [];
33
+ let totalSchedulerCalls = 0;
34
+ let runAfterCalls = 0;
35
+ let runAtCalls = 0;
36
+ let selfSchedulingFunctions = 0;
37
+ const filesWithSchedulers = new Set();
38
+ for (const filePath of files) {
39
+ const content = readFileSync(filePath, "utf-8");
40
+ const relativePath = filePath.replace(convexDir, "").replace(/^[\\/]/, "");
41
+ const lines = content.split("\n");
42
+ // Find all exported functions (mutation/action) and their bodies
43
+ const funcPattern = /export\s+(?:const\s+(\w+)\s*=|default)\s+(mutation|internalMutation|action|internalAction)\s*\(/g;
44
+ let m;
45
+ while ((m = funcPattern.exec(content)) !== null) {
46
+ const funcName = m[1] || "default";
47
+ const funcType = m[2];
48
+ const startLine = content.slice(0, m.index).split("\n").length - 1;
49
+ // Extract body
50
+ let depth = 0;
51
+ let foundOpen = false;
52
+ let endLine = Math.min(startLine + 100, lines.length);
53
+ for (let j = startLine; j < lines.length; j++) {
54
+ for (const ch of lines[j]) {
55
+ if (ch === "{") {
56
+ depth++;
57
+ foundOpen = true;
58
+ }
59
+ if (ch === "}")
60
+ depth--;
61
+ }
62
+ if (foundOpen && depth <= 0) {
63
+ endLine = j + 1;
64
+ break;
65
+ }
66
+ }
67
+ const body = lines.slice(startLine, endLine).join("\n");
68
+ // Check scheduler calls in body
69
+ const runAfterMatches = [...body.matchAll(/ctx\.scheduler\.runAfter\s*\(/g)];
70
+ const runAtMatches = [...body.matchAll(/ctx\.scheduler\.runAt\s*\(/g)];
71
+ const allSchedulerCalls = runAfterMatches.length + runAtMatches.length;
72
+ if (allSchedulerCalls === 0)
73
+ continue;
74
+ filesWithSchedulers.add(relativePath);
75
+ totalSchedulerCalls += allSchedulerCalls;
76
+ runAfterCalls += runAfterMatches.length;
77
+ runAtCalls += runAtMatches.length;
78
+ // Check 1: Self-scheduling (infinite loop risk)
79
+ // Detect: function schedules itself by name
80
+ const selfRefPattern = new RegExp(`ctx\\.scheduler\\.run(?:After|At)\\s*\\([^,]*,\\s*(?:internal|api)\\.[^,]*\\.${funcName}\\b`);
81
+ if (selfRefPattern.test(body)) {
82
+ selfSchedulingFunctions++;
83
+ // Check if there's a termination condition
84
+ const hasTermination = /if\s*\(|return\s+(?:null|undefined|void)|\.length\s*(?:===?|<=?)\s*0/.test(body);
85
+ issues.push({
86
+ severity: hasTermination ? "warning" : "critical",
87
+ location: `${relativePath}:${startLine + 1}`,
88
+ functionName: funcName,
89
+ message: `${funcType} "${funcName}" schedules itself${hasTermination ? " (has conditional guard)" : " without clear termination — infinite loop risk"}.`,
90
+ fix: hasTermination
91
+ ? "Verify the termination condition covers all edge cases"
92
+ : "Add a termination condition (max retries, empty queue check) before self-scheduling",
93
+ });
94
+ }
95
+ // Check 2: Very short delay (< 1 second) — may indicate missing backoff
96
+ for (const match of runAfterMatches) {
97
+ const callIdx = content.indexOf(match[0], m.index);
98
+ const callLine = content.slice(0, callIdx).split("\n").length;
99
+ const afterCall = content.slice(callIdx, callIdx + 100);
100
+ const delayMatch = afterCall.match(/runAfter\s*\(\s*(\d+(?:\.\d+)?)\s*[,)]/);
101
+ if (delayMatch) {
102
+ const delay = parseFloat(delayMatch[1]);
103
+ if (delay < 1) {
104
+ issues.push({
105
+ severity: "warning",
106
+ location: `${relativePath}:${callLine}`,
107
+ functionName: funcName,
108
+ message: `scheduler.runAfter(${delay}, ...) uses sub-second delay. In retry/loop patterns this can overwhelm the scheduler.`,
109
+ fix: "Use at least 1-second delay. For retries, implement exponential backoff (e.g., delay * 2^attempt)",
110
+ });
111
+ }
112
+ }
113
+ }
114
+ // Check 3: Scheduler in action without try/catch
115
+ if ((funcType === "action" || funcType === "internalAction") && allSchedulerCalls > 0) {
116
+ // Check if the scheduler call is wrapped in try/catch
117
+ if (!/try\s*\{/.test(body)) {
118
+ issues.push({
119
+ severity: "info",
120
+ location: `${relativePath}:${startLine + 1}`,
121
+ functionName: funcName,
122
+ message: `${funcType} "${funcName}" uses scheduler without try/catch. If the action fails before scheduling, work may be lost.`,
123
+ fix: "Wrap scheduler calls in try/catch or move scheduling to a mutation for transactional guarantees",
124
+ });
125
+ }
126
+ }
127
+ // Check 4: Multiple scheduler calls in same function (fan-out)
128
+ if (allSchedulerCalls > 3) {
129
+ issues.push({
130
+ severity: "info",
131
+ location: `${relativePath}:${startLine + 1}`,
132
+ functionName: funcName,
133
+ message: `${funcType} "${funcName}" makes ${allSchedulerCalls} scheduler calls. Consider if a single orchestrator action would be cleaner.`,
134
+ fix: "Group related work into fewer scheduled calls or use a queue-based pattern",
135
+ });
136
+ }
137
+ // Check 5: Scheduling from a query (not possible — queries are read-only)
138
+ if (funcType.includes("Query") || funcType === "query" || funcType === "internalQuery") {
139
+ issues.push({
140
+ severity: "critical",
141
+ location: `${relativePath}:${startLine + 1}`,
142
+ functionName: funcName,
143
+ message: `Query "${funcName}" tries to use ctx.scheduler — queries are read-only and cannot schedule functions.`,
144
+ fix: "Move scheduler calls to a mutation or action",
145
+ });
146
+ }
147
+ }
148
+ }
149
+ return {
150
+ issues,
151
+ stats: {
152
+ totalSchedulerCalls,
153
+ runAfterCalls,
154
+ runAtCalls,
155
+ selfSchedulingFunctions,
156
+ filesWithSchedulers: filesWithSchedulers.size,
157
+ },
158
+ };
159
+ }
160
+ // ── Tool Definition ─────────────────────────────────────────────────
161
+ export const schedulerTools = [
162
+ {
163
+ name: "convex_audit_schedulers",
164
+ description: "Audit Convex scheduled function usage (ctx.scheduler.runAfter/runAt): detects infinite self-scheduling loops, sub-second delays without backoff, scheduler calls in queries (impossible), unprotected scheduler calls in actions, and excessive fan-out patterns.",
165
+ inputSchema: {
166
+ type: "object",
167
+ properties: {
168
+ projectDir: {
169
+ type: "string",
170
+ description: "Absolute path to the project root containing a convex/ directory",
171
+ },
172
+ },
173
+ required: ["projectDir"],
174
+ },
175
+ handler: async (args) => {
176
+ const projectDir = resolve(args.projectDir);
177
+ const convexDir = findConvexDir(projectDir);
178
+ if (!convexDir) {
179
+ return { error: "No convex/ directory found" };
180
+ }
181
+ const { issues, stats } = auditSchedulers(convexDir);
182
+ const db = getDb();
183
+ db.prepare("INSERT INTO audit_results (id, project_dir, audit_type, issues_json, issue_count) VALUES (?, ?, ?, ?, ?)").run(genId("audit"), projectDir, "scheduler_audit", JSON.stringify(issues), issues.length);
184
+ return {
185
+ summary: {
186
+ ...stats,
187
+ totalIssues: issues.length,
188
+ critical: issues.filter(i => i.severity === "critical").length,
189
+ warnings: issues.filter(i => i.severity === "warning").length,
190
+ },
191
+ issues: issues.slice(0, 30),
192
+ quickRef: getQuickRef("convex_audit_schedulers"),
193
+ };
194
+ },
195
+ },
196
+ ];
197
+ //# sourceMappingURL=schedulerTools.js.map