@soulbatical/tetra-dev-toolkit 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/README.md +312 -0
  2. package/bin/vca-audit.js +90 -0
  3. package/bin/vca-dev-token.js +39 -0
  4. package/bin/vca-setup.js +227 -0
  5. package/lib/checks/codeQuality/api-response-format.js +268 -0
  6. package/lib/checks/health/claude-md.js +114 -0
  7. package/lib/checks/health/doppler-compliance.js +174 -0
  8. package/lib/checks/health/git.js +61 -0
  9. package/lib/checks/health/gitignore.js +83 -0
  10. package/lib/checks/health/index.js +26 -0
  11. package/lib/checks/health/infrastructure-yml.js +87 -0
  12. package/lib/checks/health/mcps.js +57 -0
  13. package/lib/checks/health/naming-conventions.js +302 -0
  14. package/lib/checks/health/plugins.js +38 -0
  15. package/lib/checks/health/quality-toolkit.js +97 -0
  16. package/lib/checks/health/repo-visibility.js +70 -0
  17. package/lib/checks/health/rls-audit.js +130 -0
  18. package/lib/checks/health/scanner.js +68 -0
  19. package/lib/checks/health/secrets.js +80 -0
  20. package/lib/checks/health/stella-integration.js +124 -0
  21. package/lib/checks/health/tests.js +140 -0
  22. package/lib/checks/health/types.js +77 -0
  23. package/lib/checks/health/vincifox-widget.js +47 -0
  24. package/lib/checks/index.js +17 -0
  25. package/lib/checks/security/deprecated-supabase-admin.js +96 -0
  26. package/lib/checks/security/gitignore-validation.js +211 -0
  27. package/lib/checks/security/hardcoded-secrets.js +95 -0
  28. package/lib/checks/security/service-key-exposure.js +107 -0
  29. package/lib/checks/security/systemdb-whitelist.js +138 -0
  30. package/lib/checks/stability/ci-pipeline.js +143 -0
  31. package/lib/checks/stability/husky-hooks.js +117 -0
  32. package/lib/checks/stability/npm-audit.js +140 -0
  33. package/lib/checks/supabase/rls-policy-audit.js +261 -0
  34. package/lib/commands/dev-token.js +342 -0
  35. package/lib/config.js +213 -0
  36. package/lib/index.js +17 -0
  37. package/lib/reporters/terminal.js +134 -0
  38. package/lib/runner.js +179 -0
  39. package/package.json +72 -0
@@ -0,0 +1,268 @@
1
+ /**
2
+ * Check API response format consistency
3
+ *
4
+ * Validates that all res.json() calls follow the standard format:
5
+ * - Success: { success: true, data: ... }
6
+ * - Error: { success: false, error: "..." }
7
+ * - List: { success: true, data: [...], meta: { total, limit, offset, hasMore } }
8
+ *
9
+ * Based on SparkBuddy-Live response format standard.
10
+ * Works with both feature-based (controllers) and route-based (routes) architectures.
11
+ */
12
+
13
+ import { glob } from 'glob'
14
+ import { readFileSync } from 'fs'
15
+
16
+ export const meta = {
17
+ id: 'api-response-format',
18
+ name: 'API Response Format Consistency',
19
+ category: 'codeQuality',
20
+ severity: 'high',
21
+ description: 'Validates all API responses follow { success, data/error } standard format'
22
+ }
23
+
24
+ export async function run(config, projectRoot) {
25
+ const results = {
26
+ passed: true,
27
+ findings: [],
28
+ summary: { total: 0, critical: 0, high: 0, medium: 0, low: 0 },
29
+ info: {
30
+ totalEndpoints: 0,
31
+ compliant: 0,
32
+ violations: 0
33
+ }
34
+ }
35
+
36
+ // Find all route and controller files
37
+ const patterns = [
38
+ '**/routes/**/*.ts',
39
+ '**/routes/**/*.js',
40
+ '**/controllers/**/*.ts',
41
+ '**/controllers/**/*.js',
42
+ '**/features/**/controllers/**/*.ts',
43
+ '**/features/**/*.controller.ts'
44
+ ]
45
+
46
+ let files = []
47
+ for (const pattern of patterns) {
48
+ const found = await glob(pattern, {
49
+ cwd: projectRoot,
50
+ ignore: [
51
+ ...(config.ignore || []),
52
+ 'node_modules/**',
53
+ '**/node_modules/**',
54
+ 'dist/**',
55
+ '**/dist/**',
56
+ 'build/**',
57
+ '**/build/**',
58
+ '**/*.test.*',
59
+ '**/*.spec.*',
60
+ '**/*.d.ts',
61
+ '**/*.js.map'
62
+ ]
63
+ })
64
+ files.push(...found)
65
+ }
66
+
67
+ // Deduplicate
68
+ files = [...new Set(files)]
69
+
70
+ if (files.length === 0) {
71
+ results.skipped = true
72
+ results.skipReason = 'No route or controller files found'
73
+ return results
74
+ }
75
+
76
+ for (const file of files) {
77
+ const filePath = `${projectRoot}/${file}`
78
+ let content
79
+ try {
80
+ content = readFileSync(filePath, 'utf-8')
81
+ } catch {
82
+ continue
83
+ }
84
+
85
+ const lines = content.split('\n')
86
+ analyzeFile(file, lines, results)
87
+ }
88
+
89
+ results.passed = results.findings.filter(f => f.severity === 'critical' || f.severity === 'high').length === 0
90
+ results.info.violations = results.findings.length
91
+ results.info.compliant = results.info.totalEndpoints - results.info.violations
92
+
93
+ return results
94
+ }
95
+
96
+ function analyzeFile(file, lines, results) {
97
+ for (let i = 0; i < lines.length; i++) {
98
+ const line = lines[i]
99
+
100
+ // Skip comments, imports, type definitions
101
+ const trimmed = line.trim()
102
+ if (trimmed.startsWith('//') || trimmed.startsWith('*') || trimmed.startsWith('/*') ||
103
+ trimmed.startsWith('import') || trimmed.startsWith('export type') ||
104
+ trimmed.startsWith('export interface')) {
105
+ continue
106
+ }
107
+
108
+ // Skip SSE responses (res.write)
109
+ if (trimmed.includes('res.write(') || trimmed.includes('res.end(')) {
110
+ continue
111
+ }
112
+
113
+ // Find res.json() or res.status().json() calls
114
+ if (!line.match(/res\.(json|status\s*\([^)]+\)\s*\.\s*json)\s*\(/)) {
115
+ continue
116
+ }
117
+
118
+ // Check for 204 No Content (valid without body)
119
+ if (line.match(/status\s*\(\s*204\s*\)/)) {
120
+ continue
121
+ }
122
+
123
+ results.info.totalEndpoints++
124
+
125
+ // Extract the complete response object using brace counting
126
+ const responseCode = extractResponseObject(lines, i)
127
+ if (!responseCode) continue
128
+
129
+ // Get function context for better error messages
130
+ const functionName = extractFunctionContext(lines, i)
131
+
132
+ // Validate the response
133
+ validateResponse(file, i + 1, responseCode, functionName, results)
134
+ }
135
+ }
136
+
137
+ function extractResponseObject(lines, startLine) {
138
+ let braceCount = 0
139
+ let content = ''
140
+ let foundOpenBrace = false
141
+
142
+ for (let j = startLine; j < Math.min(startLine + 30, lines.length); j++) {
143
+ const currentLine = lines[j]
144
+ content += currentLine + '\n'
145
+
146
+ for (const char of currentLine) {
147
+ if (char === '{') {
148
+ braceCount++
149
+ foundOpenBrace = true
150
+ }
151
+ if (char === '}') braceCount--
152
+ }
153
+
154
+ // Found complete object
155
+ if (foundOpenBrace && braceCount <= 0) {
156
+ return content
157
+ }
158
+ }
159
+
160
+ // Handle single-line responses like res.json(data) or res.json([...])
161
+ if (!foundOpenBrace) {
162
+ return content
163
+ }
164
+
165
+ return content
166
+ }
167
+
168
+ function extractFunctionContext(lines, lineNum) {
169
+ for (let i = lineNum - 1; i >= Math.max(0, lineNum - 30); i--) {
170
+ const line = lines[i]
171
+
172
+ // router.get/post/put/patch/delete
173
+ const routeMatch = line.match(/router\.(get|post|put|patch|delete)\s*\(\s*['"`]([^'"`]+)['"`]/)
174
+ if (routeMatch) return `${routeMatch[1].toUpperCase()} ${routeMatch[2]}`
175
+
176
+ // app.get/post etc
177
+ const appMatch = line.match(/app\.(get|post|put|patch|delete)\s*\(\s*['"`]([^'"`]+)['"`]/)
178
+ if (appMatch) return `${appMatch[1].toUpperCase()} ${appMatch[2]}`
179
+
180
+ // async function name
181
+ const asyncMatch = line.match(/async\s+(\w+)\s*\(/)
182
+ if (asyncMatch) return asyncMatch[1]
183
+
184
+ // const name = async
185
+ const arrowMatch = line.match(/const\s+(\w+)\s*=\s*async/)
186
+ if (arrowMatch) return arrowMatch[1]
187
+ }
188
+
189
+ return 'unknown'
190
+ }
191
+
192
+ function validateResponse(file, lineNum, responseCode, functionName, results) {
193
+ const isErrorResponse = responseCode.includes('success: false') ||
194
+ responseCode.includes('success:false') ||
195
+ /res\.status\s*\(\s*(4\d{2}|5\d{2})\s*\)/.test(responseCode)
196
+
197
+ const hasSuccess = /success\s*:/.test(responseCode)
198
+ const hasData = /\bdata\s*[:}]|\bdata\s*,/.test(responseCode) || /,\s*data\s*[,}]|{\s*data\s*[,}]/.test(responseCode)
199
+ const hasError = /\berror\s*:/.test(responseCode)
200
+ const hasResults = /\bresults\s*:/.test(responseCode)
201
+
202
+ // Check for raw array response: res.json(someArray) or res.json([...])
203
+ const isRawResponse = /res\.(json|status\s*\([^)]+\)\s*\.\s*json)\s*\(\s*[a-zA-Z_]/.test(responseCode) &&
204
+ !responseCode.includes('{')
205
+
206
+ if (isRawResponse) {
207
+ addFinding(results, {
208
+ file,
209
+ line: lineNum,
210
+ type: 'RAW_RESPONSE',
211
+ severity: 'critical',
212
+ message: `Raw response without wrapper in ${functionName}`,
213
+ fix: 'Wrap in { success: true, data: <value> }'
214
+ })
215
+ return
216
+ }
217
+
218
+ if (isErrorResponse) {
219
+ // Error response validation
220
+ if (!hasSuccess || !hasError) {
221
+ const missing = []
222
+ if (!hasSuccess) missing.push('success: false')
223
+ if (!hasError) missing.push('error')
224
+
225
+ addFinding(results, {
226
+ file,
227
+ line: lineNum,
228
+ type: 'INVALID_ERROR_RESPONSE',
229
+ severity: 'high',
230
+ message: `Error response missing ${missing.join(', ')} in ${functionName}`,
231
+ fix: 'Use { success: false, error: "message" }'
232
+ })
233
+ }
234
+ } else {
235
+ // Success response validation
236
+ const isBatch = /refresh|batch|bulk/i.test(functionName)
237
+ const hasValidData = hasData || (isBatch && hasResults)
238
+
239
+ if (!hasSuccess) {
240
+ addFinding(results, {
241
+ file,
242
+ line: lineNum,
243
+ type: 'MISSING_SUCCESS_WRAPPER',
244
+ severity: 'critical',
245
+ message: `Success response missing 'success: true' in ${functionName}`,
246
+ snippet: responseCode.substring(0, 120).replace(/\s+/g, ' ').trim(),
247
+ fix: 'Add success: true to response object'
248
+ })
249
+ } else if (!hasValidData && !hasError) {
250
+ addFinding(results, {
251
+ file,
252
+ line: lineNum,
253
+ type: 'SUCCESS_WITHOUT_DATA',
254
+ severity: 'high',
255
+ message: `Response has success but no data field in ${functionName}`,
256
+ fix: 'Add data field: { success: true, data: ... }'
257
+ })
258
+ }
259
+ }
260
+ }
261
+
262
+ function addFinding(results, finding) {
263
+ results.findings.push(finding)
264
+
265
+ const severity = finding.severity || 'medium'
266
+ results.summary.total++
267
+ results.summary[severity] = (results.summary[severity] || 0) + 1
268
+ }
@@ -0,0 +1,114 @@
1
+ /**
2
+ * Health Check: CLAUDE.md Protocol Sections
3
+ *
4
+ * Verifies CLAUDE.md has the full Stella protocol (v2):
5
+ * 1. Session check-in (akkoord + identity)
6
+ * 2. 6-step howto process (search, make, load, plan, execute, evaluate)
7
+ * 3. Howto-maken (step 1b — never freestyle)
8
+ * 4. Ralph workflow
9
+ * 5. MCP fallback
10
+ *
11
+ * Score: 0-5 (1 per section present)
12
+ */
13
+
14
+ import { existsSync, readFileSync } from 'fs'
15
+ import { join } from 'path'
16
+ import { createCheck } from './types.js'
17
+
18
+ export async function check(projectPath) {
19
+ const result = createCheck('claude-md', 5, {
20
+ exists: false,
21
+ hasSessionCheckin: false,
22
+ hasHowtoProtocol: false,
23
+ hasHowtoMaken: false,
24
+ hasRalphWorkflow: false,
25
+ hasMcpFallback: false,
26
+ missing: []
27
+ })
28
+
29
+ const claudeMdPath = join(projectPath, 'CLAUDE.md')
30
+ if (!existsSync(claudeMdPath)) {
31
+ result.status = 'error'
32
+ result.details.message = 'CLAUDE.md not found'
33
+ result.details.missing.push('CLAUDE.md file')
34
+ return result
35
+ }
36
+
37
+ result.details.exists = true
38
+ let content
39
+ try { content = readFileSync(claudeMdPath, 'utf-8') } catch {
40
+ result.status = 'error'; result.details.message = 'Could not read CLAUDE.md'; return result
41
+ }
42
+
43
+ // Check 1: Session check-in (stella_howto_akkoord + stella_confirm_identity)
44
+ const hasAkkoord = content.includes('howto_akkoord')
45
+ const hasIdentity = content.includes('confirm_identity')
46
+ if (hasAkkoord && hasIdentity) {
47
+ result.details.hasSessionCheckin = true
48
+ result.score += 1
49
+ } else {
50
+ const missing = []
51
+ if (!hasAkkoord) missing.push('stella_howto_akkoord')
52
+ if (!hasIdentity) missing.push('stella_confirm_identity')
53
+ result.details.missing.push(`Sessie inchecken: ${missing.join(', ')}`)
54
+ }
55
+
56
+ // Check 2: 6-step howto protocol (search + rate + plan + summarize)
57
+ const hasHowtoSearch = content.includes('howto_search')
58
+ const hasHowtoRate = content.includes('howto_rate')
59
+ const hasPlanCreate = content.includes('plan_create')
60
+ const hasSummarize = content.includes('summarize')
61
+
62
+ if (hasHowtoSearch && hasHowtoRate && hasPlanCreate && hasSummarize) {
63
+ result.details.hasHowtoProtocol = true
64
+ result.score += 1
65
+ } else {
66
+ const missing = []
67
+ if (!hasHowtoSearch) missing.push('howto_search')
68
+ if (!hasHowtoRate) missing.push('howto_rate')
69
+ if (!hasPlanCreate) missing.push('plan_create')
70
+ if (!hasSummarize) missing.push('summarize')
71
+ result.details.missing.push(`Howto protocol stappen: ${missing.join(', ')}`)
72
+ }
73
+
74
+ // Check 3: Howto-maken (step 1b — never freestyle, always create howto)
75
+ const hasHowtoUpsert = content.includes('howto_upsert')
76
+ const hasHowtoMaken = content.includes('HOW-TO MAKEN') || content.includes('howto maken')
77
+ const hasNeverFreestyle = content.includes('altijd eerst howto maken') || content.includes('NOOIT')
78
+
79
+ if (hasHowtoUpsert && (hasHowtoMaken || hasNeverFreestyle)) {
80
+ result.details.hasHowtoMaken = true
81
+ result.score += 1
82
+ } else {
83
+ const missing = []
84
+ if (!hasHowtoUpsert) missing.push('howto_upsert')
85
+ if (!hasHowtoMaken && !hasNeverFreestyle) missing.push('HOW-TO MAKEN stap / "altijd eerst howto maken"')
86
+ result.details.missing.push(`Howto-maken (stap 1b): ${missing.join(', ')}`)
87
+ }
88
+
89
+ // Check 4: Ralph workflow
90
+ if (/Ralph workflow/i.test(content) || (/@fix_plan/.test(content) && /\.ralph\/specs/.test(content))) {
91
+ result.details.hasRalphWorkflow = true
92
+ result.score += 1
93
+ } else {
94
+ result.details.missing.push('Ralph workflow sectie')
95
+ }
96
+
97
+ // Check 5: MCP fallback
98
+ if (/MCP niet bereikbaar|\/mcp/.test(content)) {
99
+ result.details.hasMcpFallback = true
100
+ result.score += 1
101
+ } else {
102
+ result.details.missing.push('MCP niet bereikbaar instructie')
103
+ }
104
+
105
+ if (result.score === 0) {
106
+ result.status = 'error'
107
+ result.details.message = 'CLAUDE.md exists but missing all required sections'
108
+ } else if (result.score < result.maxScore) {
109
+ result.status = 'warning'
110
+ result.details.message = `Missing: ${result.details.missing.join(', ')}`
111
+ }
112
+
113
+ return result
114
+ }
@@ -0,0 +1,174 @@
1
+ /**
2
+ * Health Check: Doppler Secret Management Compliance
3
+ *
4
+ * Checks: doppler.yaml exists, npm scripts use `doppler run`, no .env files with secrets.
5
+ * Recursively scans for ANY file containing secrets (not just .env in root dirs).
6
+ * Score: 0-3 (1 per aspect)
7
+ */
8
+
9
+ import { existsSync, readFileSync, readdirSync, statSync } from 'fs'
10
+ import { join, relative } from 'path'
11
+ import { createCheck } from './types.js'
12
+
13
+ export async function check(projectPath) {
14
+ const result = createCheck('doppler-compliance', 3, {
15
+ hasDopplerYaml: false,
16
+ dopplerConfigs: [],
17
+ scriptsWithDoppler: [],
18
+ scriptsWithoutDoppler: [],
19
+ envFilesFound: [],
20
+ hasDotenvDep: false
21
+ })
22
+
23
+ // --- Check 1: doppler.yaml ---
24
+ const dopplerYamlPath = join(projectPath, 'doppler.yaml')
25
+ if (existsSync(dopplerYamlPath)) {
26
+ try {
27
+ const content = readFileSync(dopplerYamlPath, 'utf-8')
28
+ const configs = []
29
+ let currentEntry = {}
30
+
31
+ for (const line of content.split('\n')) {
32
+ const trimmed = line.trim()
33
+ if (trimmed.startsWith('- project:')) {
34
+ if (currentEntry.project) configs.push(currentEntry)
35
+ currentEntry = { project: trimmed.replace('- project:', '').trim() }
36
+ } else if (trimmed.startsWith('config:') && currentEntry.project) {
37
+ currentEntry.config = trimmed.replace('config:', '').trim()
38
+ } else if (trimmed.startsWith('path:') && currentEntry.project) {
39
+ currentEntry.path = trimmed.replace('path:', '').trim()
40
+ }
41
+ }
42
+ if (currentEntry.project) configs.push(currentEntry)
43
+
44
+ result.details.dopplerConfigs = configs
45
+ if (configs.some(c => c.project && c.config && c.path)) {
46
+ result.details.hasDopplerYaml = true
47
+ result.score += 1
48
+ } else {
49
+ result.details.dopplerYamlIssue = 'doppler.yaml exists but has no valid setup entries'
50
+ }
51
+ } catch {
52
+ result.details.dopplerYamlIssue = 'Could not parse doppler.yaml'
53
+ }
54
+ }
55
+
56
+ // --- Check 2: npm scripts use `doppler run` ---
57
+ const packageJsonPaths = [
58
+ { path: join(projectPath, 'package.json'), label: 'root' },
59
+ { path: join(projectPath, 'backend', 'package.json'), label: 'backend' },
60
+ { path: join(projectPath, 'bot', 'package.json'), label: 'bot' }
61
+ ]
62
+
63
+ const secretScriptPatterns = /^(dev|dev:backend|dev:bot|dev:all|dev:full)$/
64
+
65
+ for (const { path: pkgPath, label } of packageJsonPaths) {
66
+ if (!existsSync(pkgPath)) continue
67
+ try {
68
+ const scripts = JSON.parse(readFileSync(pkgPath, 'utf-8')).scripts || {}
69
+ for (const [name, cmd] of Object.entries(scripts)) {
70
+ if (!secretScriptPatterns.test(name)) continue
71
+ if (cmd.includes('doppler run')) result.details.scriptsWithDoppler.push(`${label}:${name}`)
72
+ else if (!cmd.includes('npm run dev --workspace') && !cmd.includes('npm:dev:')) {
73
+ result.details.scriptsWithoutDoppler.push(`${label}:${name}`)
74
+ }
75
+ }
76
+ } catch { /* ignore */ }
77
+ }
78
+
79
+ if (result.details.scriptsWithDoppler.length > 0 && result.details.scriptsWithoutDoppler.length === 0) {
80
+ result.score += 1
81
+ } else if (result.details.scriptsWithDoppler.length > 0) {
82
+ result.score += 0.5
83
+ }
84
+
85
+ // --- Check 3: No files with secrets on disk ---
86
+ // Recursively scan the project for any file that looks like it contains secrets.
87
+ // Matches: .env, .env.local, .env.production, episode-config.env, etc.
88
+ const SKIP_DIRS = new Set(['node_modules', '.git', 'dist', 'build', '.next', '.turbo', '.cache', 'coverage', '__pycache__'])
89
+ const ALLOWED_FILES = new Set(['.env.example', 'frontend/.env', 'bot/.env.example'])
90
+ const SECRET_PATTERNS = [
91
+ /^(sk-|sk_live_|sk_test_)/, // OpenAI / Stripe keys
92
+ /^(sb_secret_|sbp_)/, // Supabase service role
93
+ /^eyJ[A-Za-z0-9_-]{20,}/, // JWT tokens
94
+ /^gh[pousr]_[A-Za-z0-9]{20,}/, // GitHub tokens
95
+ /^AKIA[A-Z0-9]{12,}/, // AWS access keys
96
+ /^xox[bpsa]-[A-Za-z0-9-]{20,}/, // Slack tokens
97
+ /^[A-Za-z0-9+/]{40,}={0,2}$/, // Base64 encoded secrets (40+ chars)
98
+ ]
99
+ const PLACEHOLDER_VALUES = new Set(['your_value_here', 'xxx', 'TODO', 'CHANGE_ME', 'replace_me', ''])
100
+
101
+ function isSecretValue(value) {
102
+ const v = value.trim().replace(/^["']|["']$/g, '')
103
+ if (!v || PLACEHOLDER_VALUES.has(v)) return false
104
+ if (v.startsWith('http://localhost') || v.startsWith('https://localhost')) return false
105
+ if (SECRET_PATTERNS.some(p => p.test(v))) return true
106
+ // Generic: long alphanumeric values (20+ chars, not a URL)
107
+ if (v.length >= 20 && /^[A-Za-z0-9_+/=-]+$/.test(v) && !v.startsWith('http')) return true
108
+ return false
109
+ }
110
+
111
+ function scanDir(dir, depth = 0) {
112
+ if (depth > 5) return // prevent runaway recursion
113
+ try {
114
+ for (const entry of readdirSync(dir)) {
115
+ if (SKIP_DIRS.has(entry)) continue
116
+ const fullPath = join(dir, entry)
117
+ try {
118
+ const stat = statSync(fullPath)
119
+ if (stat.isDirectory()) {
120
+ scanDir(fullPath, depth + 1)
121
+ } else if (stat.isFile() && entry.includes('.env') && !entry.endsWith('.example')) {
122
+ const relPath = relative(projectPath, fullPath)
123
+ if (ALLOWED_FILES.has(relPath)) continue
124
+ try {
125
+ const content = readFileSync(fullPath, 'utf-8')
126
+ const hasSecrets = content.split('\n').some(line => {
127
+ const trimmed = line.trim()
128
+ if (!trimmed || trimmed.startsWith('#')) return false
129
+ const m = trimmed.match(/^([A-Z_][A-Z_0-9]*)\s*=\s*(.+)/)
130
+ if (!m) return false
131
+ return isSecretValue(m[2])
132
+ })
133
+ if (hasSecrets) result.details.envFilesFound.push(relPath)
134
+ } catch { /* ignore unreadable files */ }
135
+ }
136
+ } catch { /* ignore stat errors */ }
137
+ }
138
+ } catch { /* ignore unreadable dirs */ }
139
+ }
140
+
141
+ scanDir(projectPath)
142
+
143
+ // Check for dotenv dependency
144
+ for (const pkgPath of [join(projectPath, 'package.json'), join(projectPath, 'backend', 'package.json')]) {
145
+ if (!existsSync(pkgPath)) continue
146
+ try {
147
+ const pkg = JSON.parse(readFileSync(pkgPath, 'utf-8'))
148
+ if ({ ...pkg.dependencies, ...pkg.devDependencies }['dotenv']) {
149
+ result.details.hasDotenvDep = true
150
+ break
151
+ }
152
+ } catch { /* ignore */ }
153
+ }
154
+
155
+ if (result.details.envFilesFound.length === 0 && !result.details.hasDotenvDep) result.score += 1
156
+ else if (result.details.envFilesFound.length === 0) result.score += 0.5
157
+
158
+ result.score = Math.min(result.score, result.maxScore)
159
+
160
+ if (result.score === 0) {
161
+ result.status = 'warning'
162
+ result.details.message = 'Not using Doppler — secrets may be in .env files'
163
+ } else if (result.score < result.maxScore) {
164
+ result.status = 'warning'
165
+ const issues = []
166
+ if (!result.details.hasDopplerYaml) issues.push('no doppler.yaml')
167
+ if (result.details.scriptsWithoutDoppler.length > 0) issues.push(`${result.details.scriptsWithoutDoppler.length} scripts without doppler run`)
168
+ if (result.details.envFilesFound.length > 0) issues.push(`${result.details.envFilesFound.length} .env files with secrets`)
169
+ if (result.details.hasDotenvDep) issues.push('dotenv dependency still present')
170
+ result.details.message = issues.join(', ')
171
+ }
172
+
173
+ return result
174
+ }
@@ -0,0 +1,61 @@
1
+ /**
2
+ * Health Check: Git Status
3
+ *
4
+ * Checks branch, uncommitted changes, and unpushed commits.
5
+ * Score: 3 (full) - deductions for uncommitted/unpushed changes
6
+ */
7
+
8
+ import { existsSync } from 'fs'
9
+ import { execSync } from 'child_process'
10
+ import { join } from 'path'
11
+ import { createCheck } from './types.js'
12
+
13
+ export async function check(projectPath) {
14
+ const result = createCheck('git', 3)
15
+
16
+ if (!existsSync(join(projectPath, '.git'))) {
17
+ result.status = 'warning'
18
+ result.details.message = 'Not a git repository'
19
+ return result
20
+ }
21
+
22
+ try {
23
+ const branch = execSync('git rev-parse --abbrev-ref HEAD', {
24
+ cwd: projectPath, encoding: 'utf-8'
25
+ }).trim()
26
+ result.details.branch = branch
27
+
28
+ const status = execSync('git status --porcelain', {
29
+ cwd: projectPath, encoding: 'utf-8'
30
+ }).trim()
31
+ const uncommittedChanges = status.split('\n').filter(l => l.trim()).length
32
+ result.details.uncommittedChanges = uncommittedChanges
33
+
34
+ try {
35
+ const unpushed = execSync(`git log origin/${branch}..HEAD --oneline 2>/dev/null || echo ""`, {
36
+ cwd: projectPath, encoding: 'utf-8'
37
+ }).trim()
38
+ result.details.unpushedCommits = unpushed.split('\n').filter(l => l.trim()).length
39
+ } catch {
40
+ result.details.unpushedCommits = 0
41
+ }
42
+
43
+ result.score = 3
44
+ if (uncommittedChanges > 10) {
45
+ result.score -= 2
46
+ result.status = 'warning'
47
+ } else if (uncommittedChanges > 0) {
48
+ result.score -= 1
49
+ }
50
+ if (result.details.unpushedCommits > 5) {
51
+ result.score -= 1
52
+ result.status = 'warning'
53
+ }
54
+ result.score = Math.max(0, result.score)
55
+ } catch {
56
+ result.status = 'error'
57
+ result.details.error = 'Failed to check git status'
58
+ }
59
+
60
+ return result
61
+ }