@eduardbar/drift 1.4.0 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/.github/actions/drift-review/README.md +4 -2
  2. package/.github/actions/drift-review/action.yml +22 -5
  3. package/.github/actions/drift-scan/README.md +3 -3
  4. package/.github/actions/drift-scan/action.yml +1 -1
  5. package/.github/workflows/publish-vscode.yml +1 -3
  6. package/.github/workflows/publish.yml +8 -0
  7. package/.github/workflows/quality.yml +15 -0
  8. package/.github/workflows/reusable-quality-checks.yml +95 -0
  9. package/.github/workflows/review-pr.yml +0 -1
  10. package/AGENTS.md +2 -2
  11. package/CHANGELOG.md +14 -1
  12. package/README.md +30 -3
  13. package/benchmarks/fixtures/critical/drift.config.ts +21 -0
  14. package/benchmarks/fixtures/critical/src/app/user-service.ts +30 -0
  15. package/benchmarks/fixtures/critical/src/domain/entities.ts +19 -0
  16. package/benchmarks/fixtures/critical/src/domain/policies.ts +22 -0
  17. package/benchmarks/fixtures/critical/src/index.ts +10 -0
  18. package/benchmarks/fixtures/critical/src/infra/memory-user-repo.ts +14 -0
  19. package/benchmarks/perf-budget.v1.json +27 -0
  20. package/dist/benchmark.js +12 -0
  21. package/dist/cli.js +2 -2
  22. package/dist/doctor.d.ts +21 -0
  23. package/dist/doctor.js +10 -3
  24. package/dist/guard-baseline.d.ts +12 -0
  25. package/dist/guard-baseline.js +57 -0
  26. package/dist/guard-metrics.d.ts +6 -0
  27. package/dist/guard-metrics.js +39 -0
  28. package/dist/guard-types.d.ts +2 -1
  29. package/dist/guard.d.ts +3 -1
  30. package/dist/guard.js +9 -70
  31. package/dist/index.d.ts +1 -1
  32. package/dist/index.js +1 -1
  33. package/dist/init.js +1 -1
  34. package/dist/output-metadata.d.ts +2 -0
  35. package/dist/output-metadata.js +2 -0
  36. package/dist/trust.d.ts +2 -1
  37. package/dist/trust.js +1 -1
  38. package/dist/types.d.ts +1 -1
  39. package/docs/AGENTS.md +1 -1
  40. package/package.json +10 -4
  41. package/schemas/drift-doctor.v1.json +57 -0
  42. package/schemas/drift-guard.v1.json +298 -0
  43. package/scripts/check-docs-drift.mjs +154 -0
  44. package/scripts/check-performance-budget.mjs +360 -0
  45. package/scripts/check-runtime-policy.mjs +66 -0
  46. package/src/benchmark.ts +17 -0
  47. package/src/cli.ts +2 -2
  48. package/src/doctor.ts +15 -3
  49. package/src/guard-baseline.ts +74 -0
  50. package/src/guard-metrics.ts +52 -0
  51. package/src/guard-types.ts +3 -1
  52. package/src/guard.ts +14 -90
  53. package/src/index.ts +1 -0
  54. package/src/init.ts +1 -1
  55. package/src/output-metadata.ts +2 -0
  56. package/src/trust.ts +1 -1
  57. package/src/types.ts +1 -0
  58. package/tests/ci-quality-matrix.test.ts +37 -0
  59. package/tests/ci-smoke-gate.test.ts +26 -0
  60. package/tests/ci-version-alignment.test.ts +93 -0
  61. package/tests/docs-drift-check.test.ts +115 -0
  62. package/tests/new-features.test.ts +2 -2
  63. package/tests/perf-budget-check.test.ts +146 -0
  64. package/tests/phase1-init-doctor-guard.test.ts +104 -2
  65. package/tests/runtime-policy-alignment.test.ts +46 -0
  66. package/vitest.config.ts +2 -0
@@ -0,0 +1,360 @@
1
+ import { cpSync, existsSync, mkdirSync, mkdtempSync, readFileSync, rmSync, writeFileSync } from 'node:fs'
2
+ import { dirname, join, resolve } from 'node:path'
3
+ import { spawnSync } from 'node:child_process'
4
+ import { tmpdir } from 'node:os'
5
+ import { pathToFileURL } from 'node:url'
6
+
7
+ const BUDGET_SCHEMA_VERSION = 'drift-perf-budget/v1'
8
+ const BENCHMARK_RESULT_SCHEMA = 'drift-perf-check-result/v1'
9
+ const DEFAULT_BUDGET_PATH = 'benchmarks/perf-budget.v1.json'
10
+ const DEFAULT_RESULT_PATH = '.drift-perf/benchmark-latest.json'
11
+ const TASK_IDS = ['scan', 'review', 'trust']
12
+
13
+ function runGit(cwd, args) {
14
+ return spawnSync('git', args, {
15
+ cwd,
16
+ encoding: 'utf8',
17
+ })
18
+ }
19
+
20
+ function parseArgs(argv) {
21
+ const parsed = {
22
+ budgetPath: DEFAULT_BUDGET_PATH,
23
+ resultPath: DEFAULT_RESULT_PATH,
24
+ benchmarkResultPath: undefined,
25
+ }
26
+
27
+ let index = 0
28
+ while (index < argv.length) {
29
+ const token = argv[index]
30
+ if (token === '--budget') {
31
+ const next = argv[index + 1]
32
+ if (!next) throw new Error('--budget requires a value')
33
+ parsed.budgetPath = next
34
+ index += 2
35
+ continue
36
+ }
37
+
38
+ if (token === '--out') {
39
+ const next = argv[index + 1]
40
+ if (!next) throw new Error('--out requires a value')
41
+ parsed.resultPath = next
42
+ index += 2
43
+ continue
44
+ }
45
+
46
+ if (token === '--result') {
47
+ const next = argv[index + 1]
48
+ if (!next) throw new Error('--result requires a value')
49
+ parsed.benchmarkResultPath = next
50
+ index += 2
51
+ continue
52
+ }
53
+
54
+ throw new Error(`Unknown argument: ${token}`)
55
+ }
56
+
57
+ return parsed
58
+ }
59
+
60
+ function readJson(filePath) {
61
+ return JSON.parse(readFileSync(filePath, 'utf8'))
62
+ }
63
+
64
+ function asNonNegativeNumber(value, label) {
65
+ if (!Number.isFinite(value) || value < 0) {
66
+ throw new Error(`${label} must be a non-negative number`)
67
+ }
68
+ return value
69
+ }
70
+
71
+ function validateBudgetSchema(budget) {
72
+ if (budget?.schemaVersion !== BUDGET_SCHEMA_VERSION) {
73
+ throw new Error(`Unsupported budget schemaVersion. Expected '${BUDGET_SCHEMA_VERSION}'`)
74
+ }
75
+
76
+ for (const taskId of TASK_IDS) {
77
+ const taskBudget = budget?.tasks?.[taskId]
78
+ if (!taskBudget) {
79
+ throw new Error(`Missing budget entry for task '${taskId}'`)
80
+ }
81
+ asNonNegativeNumber(taskBudget.maxMedianMs, `tasks.${taskId}.maxMedianMs`)
82
+ asNonNegativeNumber(taskBudget.maxRssMb, `tasks.${taskId}.maxRssMb`)
83
+ }
84
+
85
+ asNonNegativeNumber(budget?.tolerance?.runtimePct ?? 0, 'tolerance.runtimePct')
86
+ asNonNegativeNumber(budget?.tolerance?.memoryPct ?? 0, 'tolerance.memoryPct')
87
+
88
+ asNonNegativeNumber(budget?.benchmark?.warmupRuns, 'benchmark.warmupRuns')
89
+ const measuredRuns = asNonNegativeNumber(budget?.benchmark?.measuredRuns, 'benchmark.measuredRuns')
90
+ if (measuredRuns < 1) {
91
+ throw new Error('benchmark.measuredRuns must be at least 1')
92
+ }
93
+ }
94
+
95
+ function benchmarkTaskMap(benchmarkResult) {
96
+ const map = new Map()
97
+ for (const taskResult of benchmarkResult?.results ?? []) {
98
+ map.set(taskResult.name, taskResult)
99
+ }
100
+ return map
101
+ }
102
+
103
+ export function evaluatePerformanceBudget(budget, benchmarkResult) {
104
+ validateBudgetSchema(budget)
105
+
106
+ const runtimeToleranceFactor = 1 + ((budget?.tolerance?.runtimePct ?? 0) / 100)
107
+ const memoryToleranceFactor = 1 + ((budget?.tolerance?.memoryPct ?? 0) / 100)
108
+ const byTask = benchmarkTaskMap(benchmarkResult)
109
+ const failures = []
110
+ const checks = []
111
+
112
+ for (const taskId of TASK_IDS) {
113
+ const taskBudget = budget.tasks[taskId]
114
+ const measured = byTask.get(taskId)
115
+ if (!measured) {
116
+ failures.push(`Benchmark output is missing task '${taskId}'`)
117
+ continue
118
+ }
119
+
120
+ const medianMs = asNonNegativeNumber(measured.medianMs, `results.${taskId}.medianMs`)
121
+ const maxRssMb = asNonNegativeNumber(measured.maxRssMb, `results.${taskId}.maxRssMb`)
122
+ const allowedMedianMs = taskBudget.maxMedianMs * runtimeToleranceFactor
123
+ const allowedMaxRssMb = taskBudget.maxRssMb * memoryToleranceFactor
124
+
125
+ const runtimePassed = medianMs <= allowedMedianMs
126
+ const memoryPassed = maxRssMb <= allowedMaxRssMb
127
+
128
+ checks.push({
129
+ task: taskId,
130
+ measured: { medianMs, maxRssMb },
131
+ budget: {
132
+ maxMedianMs: taskBudget.maxMedianMs,
133
+ maxRssMb: taskBudget.maxRssMb,
134
+ allowedMedianMs,
135
+ allowedMaxRssMb,
136
+ },
137
+ passed: runtimePassed && memoryPassed,
138
+ runtimePassed,
139
+ memoryPassed,
140
+ })
141
+
142
+ if (!runtimePassed) {
143
+ failures.push(
144
+ `${taskId}: median runtime ${medianMs.toFixed(2)}ms exceeds allowed ${allowedMedianMs.toFixed(2)}ms (budget ${taskBudget.maxMedianMs}ms + tolerance ${budget.tolerance.runtimePct}%)`,
145
+ )
146
+ }
147
+
148
+ if (!memoryPassed) {
149
+ failures.push(
150
+ `${taskId}: max RSS ${maxRssMb.toFixed(2)}MB exceeds allowed ${allowedMaxRssMb.toFixed(2)}MB (budget ${taskBudget.maxRssMb}MB + tolerance ${budget.tolerance.memoryPct}%)`,
151
+ )
152
+ }
153
+ }
154
+
155
+ return {
156
+ ok: failures.length === 0,
157
+ failures,
158
+ checks,
159
+ }
160
+ }
161
+
162
+ function createBenchmarkArgs(rootDir, budgetPath, budget, outputPath) {
163
+ const tsxLoaderPath = resolve(rootDir, 'node_modules', 'tsx', 'dist', 'loader.mjs')
164
+ if (!existsSync(tsxLoaderPath)) {
165
+ throw new Error(`Missing tsx loader at ${tsxLoaderPath}. Run npm ci first.`)
166
+ }
167
+
168
+ const benchmarkEntry = resolve(rootDir, 'src', 'benchmark.ts')
169
+ const tsxLoaderSpecifier = pathToFileURL(tsxLoaderPath).href
170
+
171
+ const benchmark = resolveBenchmarkContext(rootDir, budget)
172
+ const args = [
173
+ '--import',
174
+ tsxLoaderSpecifier,
175
+ benchmarkEntry,
176
+ '--scan-path',
177
+ benchmark.scanPath,
178
+ '--review-path',
179
+ benchmark.reviewPath,
180
+ '--trust-path',
181
+ benchmark.trustPath,
182
+ '--base',
183
+ String(benchmark.baseRef),
184
+ '--warmup',
185
+ String(benchmark.warmupRuns),
186
+ '--runs',
187
+ String(benchmark.measuredRuns),
188
+ '--json-out',
189
+ outputPath,
190
+ ]
191
+
192
+ return {
193
+ budgetPath,
194
+ benchmarkEntry,
195
+ args,
196
+ cleanup: benchmark.cleanup,
197
+ }
198
+ }
199
+
200
+ function createCommittedFixtureRepo(rootDir, fixturePath) {
201
+ const resolvedFixturePath = resolve(rootDir, fixturePath)
202
+ if (!existsSync(resolvedFixturePath)) {
203
+ throw new Error(`Benchmark fixture path does not exist: ${resolvedFixturePath}`)
204
+ }
205
+
206
+ const tempRepo = mkdtempSync(join(tmpdir(), 'drift-perf-fixture-'))
207
+ cpSync(resolvedFixturePath, tempRepo, { recursive: true })
208
+
209
+ const init = runGit(tempRepo, ['init'])
210
+ if (init.status !== 0) {
211
+ throw new Error(`Failed to initialize git fixture repository: ${init.stderr ?? ''}`)
212
+ }
213
+
214
+ const add = runGit(tempRepo, ['add', '.'])
215
+ if (add.status !== 0) {
216
+ throw new Error(`Failed to stage git fixture files: ${add.stderr ?? ''}`)
217
+ }
218
+
219
+ const commit = runGit(tempRepo, [
220
+ '-c',
221
+ 'user.name=drift-ci',
222
+ '-c',
223
+ 'user.email=drift-ci@example.com',
224
+ 'commit',
225
+ '-m',
226
+ 'fixture baseline',
227
+ ])
228
+
229
+ if (commit.status !== 0) {
230
+ throw new Error(`Failed to commit git fixture baseline: ${commit.stderr ?? ''}`)
231
+ }
232
+
233
+ return {
234
+ repoPath: tempRepo,
235
+ cleanup: () => {
236
+ rmSync(tempRepo, { recursive: true, force: true })
237
+ },
238
+ }
239
+ }
240
+
241
+ function resolveBenchmarkContext(rootDir, budget) {
242
+ const benchmark = budget?.benchmark ?? {}
243
+ const warmupRuns = benchmark.warmupRuns
244
+ const measuredRuns = benchmark.measuredRuns
245
+
246
+ if (typeof benchmark.fixturePath === 'string' && benchmark.fixturePath.trim().length > 0) {
247
+ const fixtureRepo = createCommittedFixtureRepo(rootDir, benchmark.fixturePath)
248
+ return {
249
+ scanPath: fixtureRepo.repoPath,
250
+ reviewPath: fixtureRepo.repoPath,
251
+ trustPath: fixtureRepo.repoPath,
252
+ baseRef: 'HEAD',
253
+ warmupRuns,
254
+ measuredRuns,
255
+ cleanup: fixtureRepo.cleanup,
256
+ }
257
+ }
258
+
259
+ if (!benchmark.scanPath || !benchmark.reviewPath || !benchmark.trustPath || !benchmark.baseRef) {
260
+ throw new Error('benchmark must provide fixturePath or scanPath/reviewPath/trustPath/baseRef')
261
+ }
262
+
263
+ return {
264
+ scanPath: resolve(rootDir, benchmark.scanPath),
265
+ reviewPath: resolve(rootDir, benchmark.reviewPath),
266
+ trustPath: resolve(rootDir, benchmark.trustPath),
267
+ baseRef: benchmark.baseRef,
268
+ warmupRuns,
269
+ measuredRuns,
270
+ cleanup: undefined,
271
+ }
272
+ }
273
+
274
+ function runBenchmark(rootDir, budgetPath, budget, resultPath) {
275
+ mkdirSync(dirname(resultPath), { recursive: true })
276
+
277
+ const benchmark = createBenchmarkArgs(rootDir, budgetPath, budget, resultPath)
278
+ const execution = spawnSync(process.execPath, benchmark.args, {
279
+ cwd: rootDir,
280
+ encoding: 'utf8',
281
+ })
282
+
283
+ if (benchmark.cleanup) {
284
+ benchmark.cleanup()
285
+ }
286
+
287
+ if (execution.status !== 0) {
288
+ const errorOutput = `${execution.stdout ?? ''}${execution.stderr ?? ''}`.trim()
289
+ throw new Error(`Benchmark command failed (${benchmark.benchmarkEntry}):\n${errorOutput}`)
290
+ }
291
+
292
+ if (!existsSync(resultPath)) {
293
+ throw new Error(`Benchmark did not produce expected JSON output at ${resultPath}`)
294
+ }
295
+
296
+ return readJson(resultPath)
297
+ }
298
+
299
+ export function runPerformanceBudgetCheck(rootDir = process.cwd(), argv = process.argv.slice(2)) {
300
+ const parsed = parseArgs(argv)
301
+ const budgetPath = resolve(rootDir, parsed.budgetPath)
302
+ const resultPath = resolve(rootDir, parsed.resultPath)
303
+ const budget = readJson(budgetPath)
304
+
305
+ const benchmarkResult = parsed.benchmarkResultPath
306
+ ? readJson(resolve(rootDir, parsed.benchmarkResultPath))
307
+ : runBenchmark(rootDir, budgetPath, budget, resultPath)
308
+
309
+ const gateResultPath = resolve(dirname(resultPath), 'perf-gate-result.json')
310
+
311
+ const evaluation = evaluatePerformanceBudget(budget, benchmarkResult)
312
+ const gateResult = {
313
+ schemaVersion: BENCHMARK_RESULT_SCHEMA,
314
+ generatedAt: new Date().toISOString(),
315
+ budgetFile: budgetPath,
316
+ budgetVersion: budget.budgetVersion,
317
+ benchmarkFile: parsed.benchmarkResultPath ? resolve(rootDir, parsed.benchmarkResultPath) : resultPath,
318
+ ok: evaluation.ok,
319
+ checks: evaluation.checks,
320
+ failures: evaluation.failures,
321
+ }
322
+
323
+ mkdirSync(dirname(resultPath), { recursive: true })
324
+ const gateResultSerialized = `${JSON.stringify(gateResult, null, 2)}\n`
325
+ writeFileSync(gateResultPath, gateResultSerialized, 'utf8')
326
+
327
+ if (!parsed.benchmarkResultPath) {
328
+ process.stdout.write(`Performance benchmark generated: ${resultPath}\n`)
329
+ }
330
+ process.stdout.write(`Performance gate report: ${gateResultPath}\n`)
331
+
332
+ process.stdout.write(`Performance budget version: ${budget.budgetVersion}\n`)
333
+ for (const check of evaluation.checks) {
334
+ process.stdout.write(
335
+ `- ${check.task}: median ${check.measured.medianMs.toFixed(2)}ms (<= ${check.budget.allowedMedianMs.toFixed(2)}ms), max RSS ${check.measured.maxRssMb.toFixed(2)}MB (<= ${check.budget.allowedMaxRssMb.toFixed(2)}MB)\n`,
336
+ )
337
+ }
338
+
339
+ if (!evaluation.ok) {
340
+ process.stderr.write('Performance budget check failed:\n')
341
+ for (const failure of evaluation.failures) {
342
+ process.stderr.write(`- ${failure}\n`)
343
+ }
344
+ process.exitCode = 1
345
+ return gateResult
346
+ }
347
+
348
+ process.stdout.write('Performance budget check passed.\n')
349
+ return gateResult
350
+ }
351
+
352
+ if (import.meta.url === pathToFileURL(process.argv[1]).href) {
353
+ try {
354
+ runPerformanceBudgetCheck()
355
+ } catch (error) {
356
+ const message = error instanceof Error ? error.message : String(error)
357
+ process.stderr.write(`Performance budget check failed: ${message}\n`)
358
+ process.exit(1)
359
+ }
360
+ }
@@ -0,0 +1,66 @@
1
+ import { readFileSync } from 'node:fs'
2
+ import { join } from 'node:path'
3
+
4
+ const EXPECTED_ENGINE_RANGE = '^20.0.0 || ^22.0.0'
5
+ const EXPECTED_NODE_MATRIX = '["20", "22"]'
6
+ const EXPECTED_README_RUNTIME = '**Runtime:** Node.js 20.x and 22.x (LTS)'
7
+ const EXPECTED_INIT_TEMPLATE_NODE_VERSION = 'node-version: 20'
8
+
9
+ function readRepoFile(relativePath) {
10
+ return readFileSync(join(process.cwd(), relativePath), 'utf8')
11
+ }
12
+
13
+ function assertIncludes(content, expected, errorMessage) {
14
+ if (!content.includes(expected)) {
15
+ throw new Error(errorMessage)
16
+ }
17
+ }
18
+
19
+ function main() {
20
+ const packageJson = JSON.parse(readRepoFile('package.json'))
21
+ const engineRange = packageJson?.engines?.node
22
+
23
+ if (engineRange !== EXPECTED_ENGINE_RANGE) {
24
+ throw new Error(
25
+ `Invalid package.json engines.node: expected "${EXPECTED_ENGINE_RANGE}", got "${String(engineRange)}"`,
26
+ )
27
+ }
28
+
29
+ const qualityWorkflow = readRepoFile('.github/workflows/quality.yml')
30
+ assertIncludes(
31
+ qualityWorkflow,
32
+ `node_versions: '${EXPECTED_NODE_MATRIX}'`,
33
+ `quality.yml must declare node_versions: '${EXPECTED_NODE_MATRIX}'`,
34
+ )
35
+
36
+ const reusableWorkflow = readRepoFile('.github/workflows/reusable-quality-checks.yml')
37
+ assertIncludes(
38
+ reusableWorkflow,
39
+ `default: '${EXPECTED_NODE_MATRIX}'`,
40
+ `reusable-quality-checks.yml must declare default: '${EXPECTED_NODE_MATRIX}'`,
41
+ )
42
+
43
+ const initTemplate = readRepoFile('src/init.ts')
44
+ assertIncludes(
45
+ initTemplate,
46
+ EXPECTED_INIT_TEMPLATE_NODE_VERSION,
47
+ `src/init.ts workflow template must include: ${EXPECTED_INIT_TEMPLATE_NODE_VERSION}`,
48
+ )
49
+
50
+ const readme = readRepoFile('README.md')
51
+ assertIncludes(
52
+ readme,
53
+ EXPECTED_README_RUNTIME,
54
+ `README runtime line must include: ${EXPECTED_README_RUNTIME}`,
55
+ )
56
+
57
+ const lockfile = readRepoFile('package-lock.json')
58
+ assertIncludes(lockfile, '"node_modules/commander"', 'package-lock must include commander entry')
59
+ assertIncludes(lockfile, '"node": ">=20"', 'commander dependency requires Node >=20; runtime policy cannot be lower')
60
+
61
+ process.stdout.write(
62
+ `Runtime policy check passed: engines.node=${EXPECTED_ENGINE_RANGE}, matrix=${EXPECTED_NODE_MATRIX}, docs aligned.\n`,
63
+ )
64
+ }
65
+
66
+ main()
package/src/benchmark.ts CHANGED
@@ -24,10 +24,14 @@ interface TaskResult {
24
24
  warmupRuns: number
25
25
  measuredRuns: number
26
26
  samplesMs: number[]
27
+ samplesRssMb: number[]
27
28
  medianMs: number
28
29
  meanMs: number
29
30
  minMs: number
30
31
  maxMs: number
32
+ medianRssMb: number
33
+ meanRssMb: number
34
+ maxRssMb: number
31
35
  }
32
36
 
33
37
  interface BenchmarkOutput {
@@ -129,6 +133,10 @@ function formatMs(ms: number): string {
129
133
  return ms.toFixed(2)
130
134
  }
131
135
 
136
+ function bytesToMb(bytes: number): number {
137
+ return bytes / (1024 * 1024)
138
+ }
139
+
132
140
  async function runTask(
133
141
  name: TaskResult['name'],
134
142
  warmupRuns: number,
@@ -140,22 +148,31 @@ async function runTask(
140
148
  }
141
149
 
142
150
  const samplesMs: number[] = []
151
+ const samplesRssMb: number[] = []
143
152
  for (let i = 0; i < measuredRuns; i += 1) {
153
+ const rssBefore = process.memoryUsage().rss
144
154
  const started = performance.now()
145
155
  await task()
146
156
  samplesMs.push(performance.now() - started)
157
+ const rssAfter = process.memoryUsage().rss
158
+ samplesRssMb.push(bytesToMb(Math.max(rssBefore, rssAfter)))
147
159
  }
148
160
 
149
161
  const total = samplesMs.reduce((sum, sample) => sum + sample, 0)
162
+ const totalRss = samplesRssMb.reduce((sum, sample) => sum + sample, 0)
150
163
  return {
151
164
  name,
152
165
  warmupRuns,
153
166
  measuredRuns,
154
167
  samplesMs,
168
+ samplesRssMb,
155
169
  medianMs: median(samplesMs),
156
170
  meanMs: total / samplesMs.length,
157
171
  minMs: Math.min(...samplesMs),
158
172
  maxMs: Math.max(...samplesMs),
173
+ medianRssMb: median(samplesRssMb),
174
+ meanRssMb: totalRss / samplesRssMb.length,
175
+ maxRssMb: Math.max(...samplesRssMb),
159
176
  }
160
177
  }
161
178
 
package/src/cli.ts CHANGED
@@ -14,7 +14,7 @@ import { printConsole, printDiff } from './printer.js'
14
14
  import { loadConfig } from './config.js'
15
15
  import { extractFilesAtRef, cleanupTempDir } from './git.js'
16
16
  import { computeDiff } from './diff.js'
17
- import { runGuard } from './guard.js'
17
+ import { formatGuardJson, runGuard } from './guard.js'
18
18
  import { generateHtmlReport } from './report.js'
19
19
  import { generateBadge } from './badge.js'
20
20
  import { emitCIAnnotations, printCISummary } from './ci.js'
@@ -412,7 +412,7 @@ addResourceOptions(
412
412
  })
413
413
 
414
414
  if (options.json) {
415
- process.stdout.write(JSON.stringify(result, null, 2) + '\n')
415
+ process.stdout.write(`${formatGuardJson(result)}\n`)
416
416
  } else {
417
417
  printGuardSummary(result)
418
418
  }
package/src/doctor.ts CHANGED
@@ -1,6 +1,8 @@
1
1
  import { existsSync, readdirSync, readFileSync } from 'node:fs'
2
2
  import { join } from 'node:path'
3
3
  import kleur from 'kleur'
4
+ import type { DriftOutputMetadata } from './types.js'
5
+ import { OUTPUT_SCHEMA, withOutputMetadata } from './output-metadata.js'
4
6
 
5
7
  export interface DoctorOptions {
6
8
  json?: boolean
@@ -23,10 +25,12 @@ interface DoctorReport {
23
25
  }
24
26
  }
25
27
 
28
+ export type DoctorReportJson = DoctorReport & DriftOutputMetadata
29
+
26
30
  const SOURCE_EXTENSIONS = new Set(['.ts', '.tsx', '.js', '.jsx'])
27
31
  const IGNORED_DIRECTORIES = new Set(['node_modules', '.git', 'dist', '.next', 'coverage'])
28
32
  const DECIMAL_RADIX = 10
29
- const MIN_SUPPORTED_NODE_MAJOR = 18
33
+ const MIN_SUPPORTED_NODE_MAJOR = 20
30
34
  const LOW_MEMORY_SOURCE_FILE_THRESHOLD = 500
31
35
  const DRIFT_CONFIG_CANDIDATES = [
32
36
  'drift.config.ts',
@@ -128,7 +132,7 @@ function printConsoleReport(report: DoctorReport): void {
128
132
 
129
133
  const nodeStatus = report.node.supported
130
134
  ? `${icons.check} ${kleur.green('Node runtime supported')}`
131
- : `${icons.warn} ${kleur.yellow('Node runtime below recommended minimum (>=18)')}`
135
+ : `${icons.warn} ${kleur.yellow('Node runtime below supported minimum (>=20)')}`
132
136
  process.stdout.write(`${nodeStatus} ${kleur.gray(`(${report.node.version})`)}\n`)
133
137
 
134
138
  if (report.project.packageJsonFound) {
@@ -160,11 +164,19 @@ function printConsoleReport(report: DoctorReport): void {
160
164
  process.stdout.write('\n')
161
165
  }
162
166
 
167
+ export function formatDoctorJsonObject(report: DoctorReport): DoctorReportJson {
168
+ return withOutputMetadata(report, OUTPUT_SCHEMA.doctor)
169
+ }
170
+
171
+ export function formatDoctorJson(report: DoctorReport): string {
172
+ return JSON.stringify(formatDoctorJsonObject(report), null, 2)
173
+ }
174
+
163
175
  export async function runDoctor(projectPath: string, options?: DoctorOptions): Promise<number> {
164
176
  const report = buildDoctorReport(projectPath)
165
177
 
166
178
  if (options?.json) {
167
- process.stdout.write(`${JSON.stringify(report, null, 2)}\n`)
179
+ process.stdout.write(`${formatDoctorJson(report)}\n`)
168
180
  } else {
169
181
  printConsoleReport(report)
170
182
  }
@@ -0,0 +1,74 @@
1
+ import { existsSync, readFileSync } from 'node:fs'
2
+ import { resolve } from 'node:path'
3
+ import type { GuardBaseline, IssueSeverity } from './guard-types.js'
4
+
5
+ export interface NormalizedBaseline {
6
+ score?: number
7
+ totalIssues?: number
8
+ bySeverity: Partial<Record<IssueSeverity, number>>
9
+ }
10
+
11
+ function parseNumber(value: unknown): number | undefined {
12
+ return typeof value === 'number' && !Number.isNaN(value) ? value : undefined
13
+ }
14
+
15
+ function firstDefinedNumber(values: unknown[]): number | undefined {
16
+ for (const value of values) {
17
+ const parsed = parseNumber(value)
18
+ if (parsed !== undefined) {
19
+ return parsed
20
+ }
21
+ }
22
+
23
+ return undefined
24
+ }
25
+
26
+ function normalizeSeverity(baseline: GuardBaseline, severity: IssueSeverity): number | undefined {
27
+ const summaryBySeverity = baseline.summary?.[`${severity}s` as 'errors' | 'warnings' | 'infos']
28
+
29
+ return firstDefinedNumber([
30
+ baseline.bySeverity?.[severity],
31
+ severity === 'error' ? baseline.errors : undefined,
32
+ severity === 'warning' ? baseline.warnings : undefined,
33
+ severity === 'info' ? baseline.infos : undefined,
34
+ summaryBySeverity,
35
+ ])
36
+ }
37
+
38
+ function hasAnchor(baseline: NormalizedBaseline): boolean {
39
+ if (baseline.score !== undefined || baseline.totalIssues !== undefined) {
40
+ return true
41
+ }
42
+
43
+ const severities: IssueSeverity[] = ['error', 'warning', 'info']
44
+ return severities.some((severity) => baseline.bySeverity[severity] !== undefined)
45
+ }
46
+
47
+ export function normalizeBaseline(baseline: GuardBaseline): NormalizedBaseline {
48
+ const normalized: NormalizedBaseline = {
49
+ score: parseNumber(baseline.score),
50
+ totalIssues: parseNumber(baseline.totalIssues),
51
+ bySeverity: {
52
+ error: normalizeSeverity(baseline, 'error'),
53
+ warning: normalizeSeverity(baseline, 'warning'),
54
+ info: normalizeSeverity(baseline, 'info'),
55
+ },
56
+ }
57
+
58
+ if (!hasAnchor(normalized)) {
59
+ throw new Error('Invalid guard baseline: expected score, totalIssues, or severity counters (error/warning/info).')
60
+ }
61
+
62
+ return normalized
63
+ }
64
+
65
+ export function readBaselineFromFile(projectPath: string, baselinePath?: string): { baseline: NormalizedBaseline; path: string } | undefined {
66
+ const resolvedBaselinePath = resolve(projectPath, baselinePath ?? 'drift-baseline.json')
67
+ if (!existsSync(resolvedBaselinePath)) return undefined
68
+
69
+ const raw = JSON.parse(readFileSync(resolvedBaselinePath, 'utf8')) as GuardBaseline
70
+ return {
71
+ baseline: normalizeBaseline(raw),
72
+ path: resolvedBaselinePath,
73
+ }
74
+ }
@@ -0,0 +1,52 @@
1
+ import type { DriftDiff, DriftIssue, DriftReport } from './types.js'
2
+ import type { GuardMetrics, IssueSeverity } from './guard-types.js'
3
+ import type { NormalizedBaseline } from './guard-baseline.js'
4
+
5
+ function createSeverityDelta(): Record<IssueSeverity, number> {
6
+ return {
7
+ error: 0,
8
+ warning: 0,
9
+ info: 0,
10
+ }
11
+ }
12
+
13
+ function applySeverityDelta(
14
+ delta: Record<IssueSeverity, number>,
15
+ issues: DriftIssue[],
16
+ direction: 1 | -1,
17
+ ): void {
18
+ for (const issue of issues) {
19
+ delta[issue.severity] += direction
20
+ }
21
+ }
22
+
23
+ function countSeverityDeltaFromDiff(diff: DriftDiff): Record<IssueSeverity, number> {
24
+ const severityDelta = createSeverityDelta()
25
+
26
+ for (const file of diff.files) {
27
+ applySeverityDelta(severityDelta, file.newIssues, 1)
28
+ applySeverityDelta(severityDelta, file.resolvedIssues, -1)
29
+ }
30
+
31
+ return severityDelta
32
+ }
33
+
34
+ export function buildMetricsFromDiff(diff: DriftDiff): GuardMetrics {
35
+ return {
36
+ scoreDelta: diff.totalDelta,
37
+ totalIssuesDelta: diff.newIssuesCount - diff.resolvedIssuesCount,
38
+ severityDelta: countSeverityDeltaFromDiff(diff),
39
+ }
40
+ }
41
+
42
+ export function buildMetricsFromBaseline(current: DriftReport, baseline: NormalizedBaseline): GuardMetrics {
43
+ return {
44
+ scoreDelta: current.totalScore - (baseline.score ?? current.totalScore),
45
+ totalIssuesDelta: current.totalIssues - (baseline.totalIssues ?? current.totalIssues),
46
+ severityDelta: {
47
+ error: current.summary.errors - (baseline.bySeverity.error ?? current.summary.errors),
48
+ warning: current.summary.warnings - (baseline.bySeverity.warning ?? current.summary.warnings),
49
+ info: current.summary.infos - (baseline.bySeverity.info ?? current.summary.infos),
50
+ },
51
+ }
52
+ }
@@ -1,4 +1,4 @@
1
- import type { DriftAnalysisOptions, DriftDiff, DriftIssue, DriftReport } from './types.js'
1
+ import type { DriftAnalysisOptions, DriftDiff, DriftIssue, DriftOutputMetadata, DriftReport } from './types.js'
2
2
 
3
3
  export type IssueSeverity = DriftIssue['severity']
4
4
 
@@ -62,3 +62,5 @@ export interface GuardResult {
62
62
  current: DriftReport
63
63
  diff?: DriftDiff
64
64
  }
65
+
66
+ export type GuardResultJson = GuardResult & DriftOutputMetadata