docguard-cli 0.5.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. package/LICENSE +21 -0
  2. package/PHILOSOPHY.md +150 -0
  3. package/README.md +309 -0
  4. package/STANDARD.md +751 -0
  5. package/cli/commands/agents.mjs +221 -0
  6. package/cli/commands/audit.mjs +92 -0
  7. package/cli/commands/badge.mjs +72 -0
  8. package/cli/commands/ci.mjs +80 -0
  9. package/cli/commands/diagnose.mjs +273 -0
  10. package/cli/commands/diff.mjs +360 -0
  11. package/cli/commands/fix.mjs +610 -0
  12. package/cli/commands/generate.mjs +842 -0
  13. package/cli/commands/guard.mjs +158 -0
  14. package/cli/commands/hooks.mjs +227 -0
  15. package/cli/commands/init.mjs +249 -0
  16. package/cli/commands/score.mjs +396 -0
  17. package/cli/commands/watch.mjs +143 -0
  18. package/cli/docguard.mjs +458 -0
  19. package/cli/validators/architecture.mjs +380 -0
  20. package/cli/validators/changelog.mjs +39 -0
  21. package/cli/validators/docs-sync.mjs +110 -0
  22. package/cli/validators/drift.mjs +101 -0
  23. package/cli/validators/environment.mjs +70 -0
  24. package/cli/validators/freshness.mjs +224 -0
  25. package/cli/validators/security.mjs +101 -0
  26. package/cli/validators/structure.mjs +88 -0
  27. package/cli/validators/test-spec.mjs +115 -0
  28. package/docs/ai-integration.md +179 -0
  29. package/docs/commands.md +239 -0
  30. package/docs/configuration.md +96 -0
  31. package/docs/faq.md +155 -0
  32. package/docs/installation.md +81 -0
  33. package/docs/profiles.md +103 -0
  34. package/docs/quickstart.md +79 -0
  35. package/package.json +57 -0
  36. package/templates/ADR.md.template +64 -0
  37. package/templates/AGENTS.md.template +88 -0
  38. package/templates/ARCHITECTURE.md.template +78 -0
  39. package/templates/CHANGELOG.md.template +16 -0
  40. package/templates/CURRENT-STATE.md.template +64 -0
  41. package/templates/DATA-MODEL.md.template +66 -0
  42. package/templates/DEPLOYMENT.md.template +66 -0
  43. package/templates/DRIFT-LOG.md.template +18 -0
  44. package/templates/ENVIRONMENT.md.template +43 -0
  45. package/templates/KNOWN-GOTCHAS.md.template +69 -0
  46. package/templates/ROADMAP.md.template +82 -0
  47. package/templates/RUNBOOKS.md.template +115 -0
  48. package/templates/SECURITY.md.template +42 -0
  49. package/templates/TEST-SPEC.md.template +55 -0
  50. package/templates/TROUBLESHOOTING.md.template +96 -0
  51. package/templates/VENDOR-BUGS.md.template +74 -0
  52. package/templates/ci/github-actions.yml +39 -0
  53. package/templates/commands/docguard.fix.md +65 -0
  54. package/templates/commands/docguard.guard.md +40 -0
  55. package/templates/commands/docguard.init.md +62 -0
  56. package/templates/commands/docguard.review.md +44 -0
  57. package/templates/commands/docguard.update.md +44 -0
@@ -0,0 +1,224 @@
1
+ /**
2
+ * Freshness Validator — Check if documentation is stale relative to code changes.
3
+ * Uses git history to compare when docs were last modified vs when code was last changed.
4
+ *
5
+ * This catches the exact issue the user identified: docs say "[ ] planned"
6
+ * but the code has already been implemented and committed.
7
+ */
8
+
9
+ import { existsSync, readdirSync, statSync } from 'node:fs';
10
+ import { resolve, join, extname } from 'node:path';
11
+ import { execSync } from 'node:child_process';
12
+
13
+ const IGNORE_DIRS = new Set([
14
+ 'node_modules', '.git', '.next', 'dist', 'build',
15
+ 'coverage', '.cache', '__pycache__', '.venv', 'vendor',
16
+ 'templates', 'configs', 'Research',
17
+ ]);
18
+
19
+ /**
20
+ * Get the last git commit date for a file.
21
+ * Returns null if the file isn't tracked or git isn't available.
22
+ */
23
+ function getLastGitDate(filePath, dir) {
24
+ try {
25
+ const result = execSync(
26
+ `git log -1 --format="%aI" -- "${filePath}"`,
27
+ { cwd: dir, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] }
28
+ ).trim();
29
+ return result ? new Date(result) : null;
30
+ } catch {
31
+ return null;
32
+ }
33
+ }
34
+
35
+ /**
36
+ * Get the count of commits that touched code files since a given date.
37
+ */
38
+ function getCodeCommitsSince(date, dir) {
39
+ try {
40
+ const isoDate = date.toISOString();
41
+ const result = execSync(
42
+ `git log --since="${isoDate}" --oneline --diff-filter=M -- "*.js" "*.mjs" "*.ts" "*.tsx" "*.py" "*.java" "*.go" | wc -l`,
43
+ { cwd: dir, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] }
44
+ ).trim();
45
+ return parseInt(result) || 0;
46
+ } catch {
47
+ return 0;
48
+ }
49
+ }
50
+
51
+ /**
52
+ * Check if git is available in this project.
53
+ */
54
+ function isGitRepo(dir) {
55
+ try {
56
+ execSync('git rev-parse --is-inside-work-tree', {
57
+ cwd: dir, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe']
58
+ });
59
+ return true;
60
+ } catch {
61
+ return false;
62
+ }
63
+ }
64
+
65
+ /**
66
+ * Get total number of commits in the repo.
67
+ */
68
+ function getTotalCommits(dir) {
69
+ try {
70
+ return parseInt(execSync('git rev-list --count HEAD', {
71
+ cwd: dir, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe']
72
+ }).trim()) || 0;
73
+ } catch {
74
+ return 0;
75
+ }
76
+ }
77
+
78
+ /**
79
+ * Get the last N commits touching code files (not docs).
80
+ */
81
+ function getRecentCodeCommits(dir, count = 5) {
82
+ try {
83
+ const result = execSync(
84
+ `git log -${count} --format="%h %aI %s" -- "*.js" "*.mjs" "*.ts" "*.tsx" "*.py" "*.java"`,
85
+ { cwd: dir, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] }
86
+ ).trim();
87
+ return result ? result.split('\n') : [];
88
+ } catch {
89
+ return [];
90
+ }
91
+ }
92
+
93
+ export function validateFreshness(dir, config) {
94
+ const results = [];
95
+
96
+ if (!isGitRepo(dir)) {
97
+ results.push({
98
+ status: 'skip',
99
+ message: 'Not a git repository — freshness check skipped',
100
+ });
101
+ return results;
102
+ }
103
+
104
+ const totalCommits = getTotalCommits(dir);
105
+ if (totalCommits < 3) {
106
+ results.push({
107
+ status: 'skip',
108
+ message: `Only ${totalCommits} commits — freshness check needs ≥3 commits`,
109
+ });
110
+ return results;
111
+ }
112
+
113
+ // ── 1. Check each canonical doc's last update vs latest code commit ──
114
+ const docFiles = [
115
+ 'docs-canonical/ARCHITECTURE.md',
116
+ 'docs-canonical/DATA-MODEL.md',
117
+ 'docs-canonical/SECURITY.md',
118
+ 'docs-canonical/TEST-SPEC.md',
119
+ 'docs-canonical/ENVIRONMENT.md',
120
+ 'ROADMAP.md',
121
+ 'AGENTS.md',
122
+ ];
123
+
124
+ // Get the most recent code commit date
125
+ const recentCodeCommits = getRecentCodeCommits(dir, 1);
126
+ let latestCodeDate = null;
127
+ if (recentCodeCommits.length > 0) {
128
+ const parts = recentCodeCommits[0].split(' ');
129
+ if (parts.length >= 2) {
130
+ latestCodeDate = new Date(parts[1]);
131
+ }
132
+ }
133
+
134
+ const STALE_THRESHOLD_DAYS = 30; // Docs older than 30 days vs latest code = stale
135
+ const WARNING_THRESHOLD_COMMITS = 10; // More than 10 code commits since last doc update = stale
136
+
137
+ for (const docFile of docFiles) {
138
+ const docPath = resolve(dir, docFile);
139
+ if (!existsSync(docPath)) continue;
140
+
141
+ const docDate = getLastGitDate(docFile, dir);
142
+ if (!docDate) {
143
+ // File exists but isn't tracked in git yet
144
+ results.push({
145
+ status: 'warn',
146
+ message: `${docFile} exists but is not yet committed to git`,
147
+ });
148
+ continue;
149
+ }
150
+
151
+ // Check how many code commits happened since this doc was last updated
152
+ const codeCommitsSince = getCodeCommitsSince(docDate, dir);
153
+
154
+ if (codeCommitsSince >= WARNING_THRESHOLD_COMMITS) {
155
+ results.push({
156
+ status: 'warn',
157
+ message: `${docFile} — ${codeCommitsSince} code commits since last doc update (${docDate.toISOString().split('T')[0]})`,
158
+ });
159
+ continue;
160
+ }
161
+
162
+ // Check age vs latest code commit
163
+ if (latestCodeDate) {
164
+ const daysDiff = Math.floor((latestCodeDate - docDate) / (1000 * 60 * 60 * 24));
165
+ if (daysDiff > STALE_THRESHOLD_DAYS) {
166
+ results.push({
167
+ status: 'warn',
168
+ message: `${docFile} — last updated ${daysDiff} days before latest code change`,
169
+ });
170
+ continue;
171
+ }
172
+ }
173
+
174
+ results.push({
175
+ status: 'pass',
176
+ message: `${docFile} is fresh`,
177
+ });
178
+ }
179
+
180
+ // ── 2. Check CHANGELOG.md was updated in the last 5 code commits ──
181
+ const changelogPath = resolve(dir, config.requiredFiles?.changelog || 'CHANGELOG.md');
182
+ if (existsSync(changelogPath)) {
183
+ const changelogDate = getLastGitDate(config.requiredFiles?.changelog || 'CHANGELOG.md', dir);
184
+ if (changelogDate && latestCodeDate) {
185
+ const daysDiff = Math.floor((latestCodeDate - changelogDate) / (1000 * 60 * 60 * 24));
186
+ if (daysDiff > 7) {
187
+ results.push({
188
+ status: 'warn',
189
+ message: `CHANGELOG.md not updated in ${daysDiff} days despite code changes`,
190
+ });
191
+ } else {
192
+ results.push({
193
+ status: 'pass',
194
+ message: 'CHANGELOG.md is up to date',
195
+ });
196
+ }
197
+ }
198
+ }
199
+
200
+ // ── 3. Check DRIFT-LOG.md was updated if there are DRIFT comments ──
201
+ const driftPath = resolve(dir, config.requiredFiles?.driftLog || 'DRIFT-LOG.md');
202
+ if (existsSync(driftPath)) {
203
+ const driftDate = getLastGitDate(config.requiredFiles?.driftLog || 'DRIFT-LOG.md', dir);
204
+ // Check for recent DRIFT comments added to code
205
+ try {
206
+ const recentDrifts = execSync(
207
+ `git log -5 --all -p -- "*.js" "*.mjs" "*.ts" "*.tsx" "*.py" | grep -c "DRIFT:" || true`,
208
+ { cwd: dir, encoding: 'utf-8', stdio: ['pipe', 'pipe', 'pipe'] }
209
+ ).trim();
210
+ const driftCount = parseInt(recentDrifts) || 0;
211
+ if (driftCount > 0 && driftDate) {
212
+ const codeCommitsSince = getCodeCommitsSince(driftDate, dir);
213
+ if (codeCommitsSince > 3) {
214
+ results.push({
215
+ status: 'warn',
216
+ message: `DRIFT-LOG.md may be stale — ${driftCount} DRIFT comments found in recent commits`,
217
+ });
218
+ }
219
+ }
220
+ } catch { /* skip */ }
221
+ }
222
+
223
+ return results;
224
+ }
@@ -0,0 +1,101 @@
1
+ /**
2
+ * Security Validator — Basic checks for secrets in code
3
+ */
4
+
5
+ import { existsSync, readFileSync, readdirSync, statSync } from 'node:fs';
6
+ import { resolve, join, extname } from 'node:path';
7
+
8
+ const CODE_EXTENSIONS = new Set([
9
+ '.js', '.mjs', '.cjs', '.ts', '.tsx', '.jsx',
10
+ '.py', '.java', '.go', '.rs', '.swift', '.kt',
11
+ '.rb', '.php', '.cs', '.env',
12
+ ]);
13
+
14
+ const IGNORE_DIRS = new Set([
15
+ 'node_modules', '.git', '.next', 'dist', 'build',
16
+ 'coverage', '.cache', '__pycache__', '.venv', 'vendor',
17
+ ]);
18
+
19
+ // Patterns that might indicate hardcoded secrets
20
+ const SECRET_PATTERNS = [
21
+ { pattern: /(?:password|passwd|pwd)\s*[:=]\s*['"][^'"]{8,}['"]/gi, label: 'hardcoded password' },
22
+ { pattern: /(?:api[_-]?key|apikey)\s*[:=]\s*['"][^'"]{16,}['"]/gi, label: 'hardcoded API key' },
23
+ { pattern: /(?:secret[_-]?key|secretkey)\s*[:=]\s*['"][^'"]{16,}['"]/gi, label: 'hardcoded secret key' },
24
+ { pattern: /(?:access[_-]?token|accesstoken)\s*[:=]\s*['"][^'"]{16,}['"]/gi, label: 'hardcoded access token' },
25
+ { pattern: /AKIA[0-9A-Z]{16}/g, label: 'AWS Access Key ID' },
26
+ { pattern: /(?:sk-|sk_live_|sk_test_)[a-zA-Z0-9]{20,}/g, label: 'API secret key (Stripe/OpenAI pattern)' },
27
+ ];
28
+
29
+ export function validateSecurity(projectDir, config) {
30
+ const results = { name: 'security', errors: [], warnings: [], passed: 0, total: 0 };
31
+
32
+ const findings = [];
33
+
34
+ walkDir(projectDir, (filePath) => {
35
+ const ext = extname(filePath);
36
+ if (!CODE_EXTENSIONS.has(ext)) return;
37
+
38
+ // Skip .env files — they're supposed to have secrets
39
+ if (filePath.endsWith('.env') || filePath.endsWith('.env.local')) return;
40
+ // Skip .env.example — it should have placeholder values
41
+ if (filePath.endsWith('.env.example')) return;
42
+
43
+ const content = readFileSync(filePath, 'utf-8');
44
+ const relPath = filePath.replace(projectDir + '/', '');
45
+
46
+ for (const { pattern, label } of SECRET_PATTERNS) {
47
+ pattern.lastIndex = 0;
48
+ const match = pattern.exec(content);
49
+ if (match) {
50
+ findings.push({ file: relPath, label, match: match[0].substring(0, 30) + '...' });
51
+ }
52
+ }
53
+ });
54
+
55
+ results.total = 1;
56
+ if (findings.length === 0) {
57
+ results.passed = 1;
58
+ } else {
59
+ for (const f of findings) {
60
+ results.errors.push(`${f.file}: possible ${f.label} found`);
61
+ }
62
+ }
63
+
64
+ // Check .gitignore includes .env
65
+ results.total++;
66
+ const gitignorePath = resolve(projectDir, '.gitignore');
67
+ if (existsSync(gitignorePath)) {
68
+ const gitignore = readFileSync(gitignorePath, 'utf-8');
69
+ if (gitignore.includes('.env') || gitignore.includes('.env.local')) {
70
+ results.passed++;
71
+ } else {
72
+ results.warnings.push('.gitignore does not include .env — secrets may be committed');
73
+ }
74
+ } else {
75
+ results.warnings.push('No .gitignore found — secrets may be committed');
76
+ }
77
+
78
+ return results;
79
+ }
80
+
81
+ function walkDir(dir, callback) {
82
+ if (!existsSync(dir)) return;
83
+
84
+ const entries = readdirSync(dir);
85
+ for (const entry of entries) {
86
+ if (IGNORE_DIRS.has(entry)) continue;
87
+ if (entry.startsWith('.') && entry !== '.env') continue;
88
+
89
+ const fullPath = join(dir, entry);
90
+ try {
91
+ const stat = statSync(fullPath);
92
+ if (stat.isDirectory()) {
93
+ walkDir(fullPath, callback);
94
+ } else if (stat.isFile()) {
95
+ callback(fullPath);
96
+ }
97
+ } catch {
98
+ // Skip unreadable files
99
+ }
100
+ }
101
+ }
@@ -0,0 +1,88 @@
1
+ /**
2
+ * Structure Validator — Checks that all required CDD files exist
3
+ */
4
+
5
+ import { existsSync, readFileSync } from 'node:fs';
6
+ import { resolve } from 'node:path';
7
+
8
+ export function validateStructure(projectDir, config) {
9
+ const results = { name: 'structure', errors: [], warnings: [], passed: 0, total: 0 };
10
+
11
+ // Check canonical docs
12
+ for (const file of config.requiredFiles.canonical) {
13
+ results.total++;
14
+ const fullPath = resolve(projectDir, file);
15
+ if (existsSync(fullPath)) {
16
+ results.passed++;
17
+ } else {
18
+ results.errors.push(`Missing required file: ${file}`);
19
+ }
20
+ }
21
+
22
+ // Check agent file (any one is fine)
23
+ results.total++;
24
+ const agentFileFound = config.requiredFiles.agentFile.some(f =>
25
+ existsSync(resolve(projectDir, f))
26
+ );
27
+ if (agentFileFound) {
28
+ results.passed++;
29
+ } else {
30
+ results.errors.push(`Missing agent file: ${config.requiredFiles.agentFile.join(' or ')}`);
31
+ }
32
+
33
+ // Check changelog
34
+ results.total++;
35
+ if (existsSync(resolve(projectDir, config.requiredFiles.changelog))) {
36
+ results.passed++;
37
+ } else {
38
+ results.errors.push(`Missing required file: ${config.requiredFiles.changelog}`);
39
+ }
40
+
41
+ // Check drift log
42
+ results.total++;
43
+ if (existsSync(resolve(projectDir, config.requiredFiles.driftLog))) {
44
+ results.passed++;
45
+ } else {
46
+ results.errors.push(`Missing required file: ${config.requiredFiles.driftLog}`);
47
+ }
48
+
49
+ return results;
50
+ }
51
+
52
+ /**
53
+ * Check that canonical doc files contain required sections
54
+ */
55
+ export function validateDocSections(projectDir, config) {
56
+ const results = { name: 'doc-sections', errors: [], warnings: [], passed: 0, total: 0 };
57
+ const ptc = config.projectTypeConfig || {};
58
+
59
+ const requiredSections = {
60
+ 'docs-canonical/ARCHITECTURE.md': ['## System Overview', '## Component Map', '## Tech Stack'],
61
+ 'docs-canonical/DATA-MODEL.md': ptc.needsDatabase !== false
62
+ ? ['## Entities']
63
+ : [], // CLI/library projects don't need entity docs
64
+ 'docs-canonical/SECURITY.md': ['## Authentication', '## Secrets Management'],
65
+ 'docs-canonical/TEST-SPEC.md': ['## Test Categories', '## Coverage Rules'],
66
+ 'docs-canonical/ENVIRONMENT.md': ptc.needsEnvVars !== false
67
+ ? ['## Environment Variables', '## Setup Steps']
68
+ : ['## Setup Steps'], // Always need setup steps, env vars optional
69
+ };
70
+
71
+ for (const [file, sections] of Object.entries(requiredSections)) {
72
+ const fullPath = resolve(projectDir, file);
73
+ if (!existsSync(fullPath)) continue;
74
+
75
+ const content = readFileSync(fullPath, 'utf-8');
76
+
77
+ for (const section of sections) {
78
+ results.total++;
79
+ if (content.includes(section)) {
80
+ results.passed++;
81
+ } else {
82
+ results.warnings.push(`${file}: missing section "${section}"`);
83
+ }
84
+ }
85
+ }
86
+
87
+ return results;
88
+ }
@@ -0,0 +1,115 @@
1
+ /**
2
+ * Test Spec Validator — Checks that tests exist per TEST-SPEC.md coverage rules
3
+ * Now respects projectTypeConfig (e.g., skip E2E for CLI tools)
4
+ */
5
+
6
+ import { existsSync, readFileSync } from 'node:fs';
7
+ import { resolve } from 'node:path';
8
+
9
+ export function validateTestSpec(projectDir, config) {
10
+ const results = { name: 'test-spec', errors: [], warnings: [], passed: 0, total: 0 };
11
+
12
+ const testSpecPath = resolve(projectDir, 'docs-canonical/TEST-SPEC.md');
13
+ if (!existsSync(testSpecPath)) {
14
+ return results; // Structure validator catches this
15
+ }
16
+
17
+ const content = readFileSync(testSpecPath, 'utf-8');
18
+ const ptc = config.projectTypeConfig || {};
19
+
20
+ // Parse the Source-to-Test Map table (new header) or Service-to-Test Map (old header)
21
+ const serviceMapMatch = content.match(
22
+ /## (?:Service-to-Test Map|Source-to-Test Map)[\s\S]*?\n\|.*\|.*\|.*\|([\s\S]*?)(?=\n##|\n$|$)/
23
+ );
24
+
25
+ if (serviceMapMatch) {
26
+ const tableContent = serviceMapMatch[1];
27
+ const rows = tableContent
28
+ .split('\n')
29
+ .filter(line => line.startsWith('|') && !line.includes('---'));
30
+
31
+ for (const row of rows) {
32
+ const cells = row
33
+ .split('|')
34
+ .map(s => s.trim())
35
+ .filter(s => s.length > 0);
36
+
37
+ if (cells.length < 3) continue;
38
+
39
+ const sourceFile = cells[0];
40
+ const status = cells[cells.length - 1]; // Last column is always status
41
+
42
+ // Skip template/example rows and italic placeholder rows
43
+ if (sourceFile.startsWith('<!--') || sourceFile === 'Source File' || sourceFile.startsWith('*')) continue;
44
+
45
+ if (status && status.includes('❌')) {
46
+ results.total++;
47
+ results.warnings.push(
48
+ `TEST-SPEC declares ${sourceFile} as ❌ — missing tests`
49
+ );
50
+ } else if (status && status.includes('⚠️')) {
51
+ results.total++;
52
+ results.warnings.push(
53
+ `TEST-SPEC declares ${sourceFile} as ⚠️ — partial coverage`
54
+ );
55
+ } else if (status && status.includes('✅')) {
56
+ results.total++;
57
+ results.passed++;
58
+ }
59
+ }
60
+ }
61
+
62
+ // Parse Critical User Journeys OR Critical CLI Flows
63
+ // Only check E2E journeys if the project type needs E2E
64
+ if (ptc.needsE2E !== false) {
65
+ const journeyMatch = content.match(
66
+ /## Critical (?:User Journeys|CLI Flows)[\s\S]*?\n\|.*\|.*\|.*\|.*\|([\s\S]*?)(?=\n##|\n---|\n$|$)/
67
+ );
68
+
69
+ if (journeyMatch) {
70
+ const tableContent = journeyMatch[1];
71
+ const rows = tableContent
72
+ .split('\n')
73
+ .filter(line => line.startsWith('|') && !line.includes('---'));
74
+
75
+ for (const row of rows) {
76
+ const cells = row
77
+ .split('|')
78
+ .map(s => s.trim())
79
+ .filter(s => s.length > 0);
80
+
81
+ if (cells.length < 4) continue;
82
+
83
+ const [num, journey, testFile, status] = cells;
84
+ // Skip template rows (comments), headers
85
+ if (num.startsWith('<!--') || num === '#' || journey.startsWith('<!--')) continue;
86
+
87
+ if (status && status.includes('❌')) {
88
+ results.total++;
89
+ results.warnings.push(
90
+ `E2E Journey #${num} (${journey}) — missing test: ${testFile}`
91
+ );
92
+ } else if (status && status.includes('✅')) {
93
+ results.total++;
94
+ results.passed++;
95
+ }
96
+ }
97
+ }
98
+ }
99
+
100
+ // If no test spec entries parsed, check if test directory exists
101
+ if (results.total === 0) {
102
+ results.total = 1;
103
+ const commonTestDirs = ['tests', 'test', '__tests__', 'spec'];
104
+ const hasTestDir = commonTestDirs.some(d =>
105
+ existsSync(resolve(projectDir, d))
106
+ );
107
+ if (hasTestDir) {
108
+ results.passed = 1;
109
+ } else {
110
+ results.warnings.push('No test directory found (expected: tests/, test/, __tests__/)');
111
+ }
112
+ }
113
+
114
+ return results;
115
+ }
@@ -0,0 +1,179 @@
1
+ # AI Integration Guide
2
+
3
+ DocGuard is **AI-native**. It generates prompts that AI agents execute — the human reviews, not writes.
4
+
5
+ ## How It Works
6
+
7
+ ```
8
+ docguard diagnose → AI reads output → AI writes docs → docguard guard → ✅
9
+ ```
10
+
11
+ DocGuard is designed to be used **by** AI agents, not just **for** humans.
12
+
13
+ ## Supported AI Agents
14
+
15
+ DocGuard works with any AI coding agent that can read CLI output:
16
+
17
+ | Agent | Integration |
18
+ |-------|------------|
19
+ | **Claude Code** | Reads `diagnose` output, writes docs, runs `guard` |
20
+ | **GitHub Copilot** | Slash commands in `.github/commands/` |
21
+ | **Cursor** | Slash commands in `.cursor/rules/` |
22
+ | **Google Antigravity** | Workflows in `.agents/workflows/` |
23
+ | **Google Gemini** | Commands in `.gemini/commands/` |
24
+ | **Any CLI-capable LLM** | Reads JSON output from `--format json` |
25
+
26
+ ## Slash Commands
27
+
28
+ `docguard init` auto-installs slash commands for detected AI agents:
29
+
30
+ ```
31
+ .github/commands/diagnose.md # GitHub Copilot
32
+ .cursor/rules/diagnose.md # Cursor
33
+ .gemini/commands/diagnose.md # Google Gemini
34
+ .agents/workflows/diagnose.md # Antigravity
35
+ ```
36
+
37
+ ## The AI Workflow
38
+
39
+ ### Step 1: Diagnose
40
+
41
+ ```bash
42
+ npx docguard diagnose
43
+ ```
44
+
45
+ Output:
46
+ ```
47
+ 🔍 DocGuard Diagnose — my-project
48
+ Profile: standard | Score: 75/100 (B)
49
+ Guard: 35/41 passed | Status: WARN
50
+
51
+ Warnings (3):
52
+ ⚠ [Freshness] docs-canonical/ARCHITECTURE.md — 15 commits since last update
53
+ Fix: docguard fix --doc architecture
54
+
55
+ 📋 Remediation Plan:
56
+ 1. docguard fix --doc architecture
57
+ 2. docguard guard ← verify fixes
58
+
59
+ 🤖 AI-Ready Prompt:
60
+ TASK: Fix 3 documentation issue(s) in project "my-project"
61
+ ...
62
+ ```
63
+
64
+ ### Step 2: AI Fixes
65
+
66
+ The AI reads the remediation plan and executes `docguard fix --doc <name>` for each issue. Each fix command outputs research instructions:
67
+
68
+ ```bash
69
+ npx docguard fix --doc architecture
70
+ ```
71
+
72
+ Output:
73
+ ```
74
+ TASK: Write ARCHITECTURE.md for "my-project"
75
+
76
+ RESEARCH STEPS:
77
+ 1. Read package.json for dependencies and project structure
78
+ 2. List top-level directories (src/, lib/, cli/)
79
+ 3. Read 2-3 representative files per directory
80
+ 4. Map the import graph
81
+ 5. Identify external dependencies
82
+
83
+ WRITE THE DOCUMENT:
84
+ - System Overview (2-3 sentences)
85
+ - Component Map (table of modules)
86
+ - Layer Boundaries (import rules)
87
+ - Data Flow (request lifecycle)
88
+ ```
89
+
90
+ ### Step 3: Verify
91
+
92
+ ```bash
93
+ npx docguard guard
94
+ ```
95
+
96
+ If all checks pass → done. If issues remain → repeat from Step 1.
97
+
98
+ ## JSON Output for Automation
99
+
100
+ For programmatic integration:
101
+
102
+ ```bash
103
+ npx docguard diagnose --format json
104
+ ```
105
+
106
+ ```json
107
+ {
108
+ "project": "my-project",
109
+ "profile": "standard",
110
+ "status": "WARN",
111
+ "score": 75,
112
+ "grade": "B",
113
+ "issues": [
114
+ {
115
+ "severity": "warning",
116
+ "validator": "Freshness",
117
+ "message": "ARCHITECTURE.md — 15 commits since last update",
118
+ "command": "docguard fix --doc architecture",
119
+ "docTarget": "architecture"
120
+ }
121
+ ],
122
+ "fixCommands": ["docguard fix --doc architecture"]
123
+ }
124
+ ```
125
+
126
+ ```bash
127
+ npx docguard guard --format json
128
+ ```
129
+
130
+ ```json
131
+ {
132
+ "project": "my-project",
133
+ "profile": "standard",
134
+ "status": "PASS",
135
+ "passed": 41,
136
+ "total": 41,
137
+ "validators": [
138
+ { "name": "Structure", "status": "pass", "passed": 8, "total": 8 }
139
+ ]
140
+ }
141
+ ```
142
+
143
+ ## CI/CD Integration
144
+
145
+ ### GitHub Actions
146
+
147
+ DocGuard ships a ready-to-use workflow:
148
+
149
+ ```yaml
150
+ # .github/workflows/docguard.yml
151
+ name: DocGuard CDD Check
152
+ on: [pull_request]
153
+ jobs:
154
+ docguard:
155
+ runs-on: ubuntu-latest
156
+ steps:
157
+ - uses: actions/checkout@v4
158
+ - uses: actions/setup-node@v4
159
+ with: { node-version: '20' }
160
+ - run: npx docguard ci --format json --threshold 70
161
+ ```
162
+
163
+ Or copy `templates/ci/github-actions.yml` from this repo.
164
+
165
+ ### Pre-commit Hook
166
+
167
+ ```bash
168
+ npx docguard hooks
169
+ ```
170
+
171
+ Automatically runs `guard` before every commit.
172
+
173
+ ## Best Practices for AI Agents
174
+
175
+ 1. **Always run `diagnose` first** — it's the one command that identifies everything
176
+ 2. **Use `--format json`** for structured, parseable output
177
+ 3. **Run `guard` after fixes** to verify — loop until all checks pass
178
+ 4. **Use `fix --doc <name>`** for targeted prompts when you know which doc needs work
179
+ 5. **Check `score --tax`** periodically to ensure documentation isn't becoming a burden