jumpstart-mode 1.1.11 → 1.1.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/agents/jumpstart-adversary.agent.md +2 -1
- package/.github/agents/jumpstart-architect.agent.md +6 -7
- package/.github/agents/jumpstart-challenger.agent.md +2 -1
- package/.github/agents/jumpstart-developer.agent.md +1 -1
- package/.github/agents/jumpstart-devops.agent.md +2 -2
- package/.github/agents/jumpstart-diagram-verifier.agent.md +2 -1
- package/.github/agents/jumpstart-maintenance.agent.md +1 -0
- package/.github/agents/jumpstart-performance.agent.md +1 -0
- package/.github/agents/jumpstart-pm.agent.md +1 -1
- package/.github/agents/jumpstart-refactor.agent.md +1 -0
- package/.github/agents/jumpstart-requirements-extractor.agent.md +1 -0
- package/.github/agents/jumpstart-researcher.agent.md +1 -0
- package/.github/agents/jumpstart-retrospective.agent.md +1 -0
- package/.github/agents/jumpstart-reviewer.agent.md +2 -0
- package/.github/agents/jumpstart-scout.agent.md +1 -1
- package/.github/agents/jumpstart-scrum-master.agent.md +1 -0
- package/.github/agents/jumpstart-security.agent.md +2 -1
- package/.github/agents/jumpstart-tech-writer.agent.md +1 -0
- package/.github/agents/jumpstart-uiux-designer.agent.md +66 -0
- package/.github/workflows/quality.yml +19 -2
- package/.jumpstart/agents/analyst.md +38 -0
- package/.jumpstart/agents/architect.md +39 -1
- package/.jumpstart/agents/challenger.md +38 -0
- package/.jumpstart/agents/developer.md +41 -0
- package/.jumpstart/agents/pm.md +38 -0
- package/.jumpstart/agents/scout.md +33 -0
- package/.jumpstart/agents/ux-designer.md +29 -9
- package/.jumpstart/commands/commands.md +6 -5
- package/.jumpstart/config.yaml +25 -1
- package/.jumpstart/roadmap.md +1 -1
- package/.jumpstart/schemas/timeline.schema.json +1 -0
- package/.jumpstart/skills/README.md +1 -0
- package/.jumpstart/skills/quality-gates/SKILL.md +126 -0
- package/.jumpstart/skills/skill-creator/SKILL.md +485 -357
- package/.jumpstart/skills/skill-creator/agents/analyzer.md +274 -0
- package/.jumpstart/skills/skill-creator/agents/comparator.md +202 -0
- package/.jumpstart/skills/skill-creator/agents/grader.md +223 -0
- package/.jumpstart/skills/skill-creator/assets/eval_review.html +146 -0
- package/.jumpstart/skills/skill-creator/eval-viewer/generate_review.py +471 -0
- package/.jumpstart/skills/skill-creator/eval-viewer/viewer.html +1325 -0
- package/.jumpstart/skills/skill-creator/references/schemas.md +430 -0
- package/.jumpstart/skills/skill-creator/scripts/__init__.py +0 -0
- package/.jumpstart/skills/skill-creator/scripts/aggregate_benchmark.py +401 -0
- package/.jumpstart/skills/skill-creator/scripts/generate_report.py +326 -0
- package/.jumpstart/skills/skill-creator/scripts/improve_description.py +247 -0
- package/.jumpstart/skills/skill-creator/scripts/package_skill.py +136 -110
- package/.jumpstart/skills/skill-creator/scripts/run_eval.py +310 -0
- package/.jumpstart/skills/skill-creator/scripts/run_loop.py +328 -0
- package/.jumpstart/skills/skill-creator/scripts/utils.py +47 -0
- package/.jumpstart/skills/ui-ux-pro-max/SKILL.md +266 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/charts.csv +26 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/colors.csv +97 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/icons.csv +101 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/landing.csv +31 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/products.csv +97 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/react-performance.csv +45 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/stacks/astro.csv +54 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/stacks/flutter.csv +53 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/stacks/html-tailwind.csv +56 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/stacks/jetpack-compose.csv +53 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/stacks/nextjs.csv +53 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/stacks/nuxt-ui.csv +51 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/stacks/nuxtjs.csv +59 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/stacks/react-native.csv +52 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/stacks/react.csv +54 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/stacks/shadcn.csv +61 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/stacks/svelte.csv +54 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/stacks/swiftui.csv +51 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/stacks/vue.csv +50 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/styles.csv +68 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/typography.csv +58 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/ui-reasoning.csv +101 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/ux-guidelines.csv +100 -0
- package/.jumpstart/skills/ui-ux-pro-max/data/web-interface.csv +31 -0
- package/.jumpstart/skills/ui-ux-pro-max/scripts/core.py +253 -0
- package/.jumpstart/skills/ui-ux-pro-max/scripts/design_system.py +1067 -0
- package/.jumpstart/skills/ui-ux-pro-max/scripts/search.py +114 -0
- package/.jumpstart/state/timeline.json +659 -0
- package/.jumpstart/templates/model-map.md +1 -1
- package/.jumpstart/templates/ux-design.md +3 -3
- package/.jumpstart/usage-log.json +74 -3
- package/AGENTS.md +1 -1
- package/README.md +64 -3
- package/bin/cli.js +3217 -1
- package/bin/headless-runner.js +62 -2
- package/bin/lib/agent-checkpoint.js +168 -0
- package/bin/lib/ai-evaluation.js +104 -0
- package/bin/lib/ai-intake.js +152 -0
- package/bin/lib/ambiguity-heatmap.js +152 -0
- package/bin/lib/artifact-comparison.js +104 -0
- package/bin/lib/ast-edit-engine.js +157 -0
- package/bin/lib/backlog-sync.js +338 -0
- package/bin/lib/bcdr-planning.js +158 -0
- package/bin/lib/bidirectional-trace.js +199 -0
- package/bin/lib/branch-workflow.js +266 -0
- package/bin/lib/cab-output.js +119 -0
- package/bin/lib/chat-integration.js +122 -0
- package/bin/lib/ci-cd-integration.js +208 -0
- package/bin/lib/codebase-retrieval.js +125 -0
- package/bin/lib/collaboration.js +168 -0
- package/bin/lib/compliance-packs.js +213 -0
- package/bin/lib/context-chunker.js +128 -0
- package/bin/lib/context-onboarding.js +122 -0
- package/bin/lib/contract-first.js +124 -0
- package/bin/lib/cost-router.js +148 -0
- package/bin/lib/credential-boundary.js +155 -0
- package/bin/lib/data-classification.js +180 -0
- package/bin/lib/data-contracts.js +129 -0
- package/bin/lib/db-evolution.js +158 -0
- package/bin/lib/decision-conflicts.js +299 -0
- package/bin/lib/delivery-confidence.js +361 -0
- package/bin/lib/dependency-upgrade.js +153 -0
- package/bin/lib/design-system.js +133 -0
- package/bin/lib/deterministic-artifacts.js +151 -0
- package/bin/lib/diagram-studio.js +115 -0
- package/bin/lib/domain-ontology.js +140 -0
- package/bin/lib/ea-review-packet.js +151 -0
- package/bin/lib/enterprise-search.js +123 -0
- package/bin/lib/enterprise-templates.js +140 -0
- package/bin/lib/environment-promotion.js +220 -0
- package/bin/lib/estimation-studio.js +130 -0
- package/bin/lib/event-modeling.js +133 -0
- package/bin/lib/evidence-collector.js +179 -0
- package/bin/lib/finops-planner.js +182 -0
- package/bin/lib/fitness-functions.js +279 -0
- package/bin/lib/focus.js +448 -0
- package/bin/lib/governance-dashboard.js +165 -0
- package/bin/lib/guided-handoff.js +120 -0
- package/bin/lib/impact-analysis.js +190 -0
- package/bin/lib/incident-feedback.js +157 -0
- package/bin/lib/integrate.js +1 -1
- package/bin/lib/knowledge-graph.js +122 -0
- package/bin/lib/legacy-modernizer.js +160 -0
- package/bin/lib/migration-planner.js +144 -0
- package/bin/lib/model-governance.js +185 -0
- package/bin/lib/model-router.js +144 -0
- package/bin/lib/multi-repo.js +272 -0
- package/bin/lib/next-phase.js +53 -8
- package/bin/lib/ops-ownership.js +152 -0
- package/bin/lib/parallel-agents.js +257 -0
- package/bin/lib/pattern-library.js +115 -0
- package/bin/lib/persona-packs.js +99 -0
- package/bin/lib/plan-executor.js +366 -0
- package/bin/lib/platform-engineering.js +119 -0
- package/bin/lib/playback-summaries.js +126 -0
- package/bin/lib/policy-engine.js +240 -0
- package/bin/lib/portfolio-reporting.js +357 -0
- package/bin/lib/pr-package.js +197 -0
- package/bin/lib/project-memory.js +235 -0
- package/bin/lib/prompt-governance.js +130 -0
- package/bin/lib/promptless-mode.js +128 -0
- package/bin/lib/quality-graph.js +193 -0
- package/bin/lib/raci-matrix.js +188 -0
- package/bin/lib/refactor-planner.js +167 -0
- package/bin/lib/reference-architectures.js +304 -0
- package/bin/lib/release-readiness.js +171 -0
- package/bin/lib/repo-graph.js +262 -0
- package/bin/lib/requirements-baseline.js +358 -0
- package/bin/lib/risk-register.js +211 -0
- package/bin/lib/role-approval.js +249 -0
- package/bin/lib/role-views.js +142 -0
- package/bin/lib/root-cause-analysis.js +132 -0
- package/bin/lib/runtime-debugger.js +154 -0
- package/bin/lib/safe-rename.js +135 -0
- package/bin/lib/secret-scanner.js +313 -0
- package/bin/lib/semantic-diff.js +335 -0
- package/bin/lib/sla-slo.js +210 -0
- package/bin/lib/smoke-tester.js +344 -0
- package/bin/lib/spec-comments.js +147 -0
- package/bin/lib/spec-maturity.js +287 -0
- package/bin/lib/sre-integration.js +154 -0
- package/bin/lib/structured-elicitation.js +174 -0
- package/bin/lib/telemetry-feedback.js +118 -0
- package/bin/lib/test-generator.js +146 -0
- package/bin/lib/timeline.js +2 -1
- package/bin/lib/tool-bridge.js +159 -0
- package/bin/lib/tool-guardrails.js +139 -0
- package/bin/lib/tool-schemas.js +281 -3
- package/bin/lib/transcript-ingestion.js +150 -0
- package/bin/lib/type-checker.js +261 -0
- package/bin/lib/uat-coverage.js +411 -0
- package/bin/lib/vendor-risk.js +173 -0
- package/bin/lib/waiver-workflow.js +174 -0
- package/bin/lib/web-dashboard.js +126 -0
- package/bin/lib/workshop-mode.js +165 -0
- package/bin/lib/workstream-ownership.js +104 -0
- package/package.json +1 -1
- package/.github/agents/jumpstart-ux-designer.agent.md +0 -45
|
@@ -0,0 +1,313 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* secret-scanner.js — Secret Scanning for Agents
|
|
3
|
+
*
|
|
4
|
+
* Scans files for accidentally committed secrets (API keys, tokens,
|
|
5
|
+
* passwords, private keys). Prevents sensitive data from reaching
|
|
6
|
+
* source control.
|
|
7
|
+
*
|
|
8
|
+
* Usage:
|
|
9
|
+
* echo '{"files":["src/config.js"],"root":"."}' | node bin/lib/secret-scanner.js
|
|
10
|
+
*
|
|
11
|
+
* Input (stdin JSON):
|
|
12
|
+
* {
|
|
13
|
+
* "files": ["src/config.js", "src/utils.ts"],
|
|
14
|
+
* "root": ".",
|
|
15
|
+
* "config": {
|
|
16
|
+
* "custom_patterns": [{ "name": "Custom Token", "pattern": "CUSTOM_[A-Z0-9]{32}" }],
|
|
17
|
+
* "allowlist": [".env.example"]
|
|
18
|
+
* }
|
|
19
|
+
* }
|
|
20
|
+
*
|
|
21
|
+
* Output (stdout JSON):
|
|
22
|
+
* {
|
|
23
|
+
* "files_scanned": 2,
|
|
24
|
+
* "secrets_found": 1,
|
|
25
|
+
* "findings": [...],
|
|
26
|
+
* "pass": false
|
|
27
|
+
* }
|
|
28
|
+
*/
|
|
29
|
+
|
|
30
|
+
import { createRequire } from 'module';
|
|
31
|
+
const require = createRequire(import.meta.url);
|
|
32
|
+
const fs = require('fs');
|
|
33
|
+
const path = require('path');
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Built-in secret patterns to detect.
|
|
37
|
+
* Each has a name, a regex pattern, and a severity level.
|
|
38
|
+
*/
|
|
39
|
+
const DEFAULT_PATTERNS = [
|
|
40
|
+
{
|
|
41
|
+
name: 'AWS Access Key',
|
|
42
|
+
pattern: /(?<![A-Za-z0-9/+=])(AKIA[0-9A-Z]{16})(?![A-Za-z0-9/+=])/,
|
|
43
|
+
severity: 'critical'
|
|
44
|
+
},
|
|
45
|
+
{
|
|
46
|
+
name: 'AWS Secret Key',
|
|
47
|
+
pattern: /(?<![A-Za-z0-9/+=])([A-Za-z0-9/+=]{40})(?![A-Za-z0-9/+=])/,
|
|
48
|
+
severity: 'critical',
|
|
49
|
+
requires_context: /aws_secret|secret_access_key|AWS_SECRET/i
|
|
50
|
+
},
|
|
51
|
+
{
|
|
52
|
+
name: 'GitHub Token',
|
|
53
|
+
pattern: /(?<![A-Za-z0-9_])(gh[ps]_[A-Za-z0-9_]{36,})(?![A-Za-z0-9_])/,
|
|
54
|
+
severity: 'critical'
|
|
55
|
+
},
|
|
56
|
+
{
|
|
57
|
+
name: 'GitHub Fine-grained PAT',
|
|
58
|
+
pattern: /(?<![A-Za-z0-9_])(github_pat_[A-Za-z0-9_]{22,})(?![A-Za-z0-9_])/,
|
|
59
|
+
severity: 'critical'
|
|
60
|
+
},
|
|
61
|
+
{
|
|
62
|
+
name: 'Generic API Key Assignment',
|
|
63
|
+
pattern: /(?:api[_-]?key|apikey)\s*[:=]\s*["']([A-Za-z0-9_\-]{20,})["']/i,
|
|
64
|
+
severity: 'high'
|
|
65
|
+
},
|
|
66
|
+
{
|
|
67
|
+
name: 'Generic Secret Assignment',
|
|
68
|
+
pattern: /(?:secret|password|passwd|pwd)\s*[:=]\s*["']([^\s"']{8,})["']/i,
|
|
69
|
+
severity: 'high'
|
|
70
|
+
},
|
|
71
|
+
{
|
|
72
|
+
name: 'Generic Token Assignment',
|
|
73
|
+
pattern: /(?:token|auth_token|access_token)\s*[:=]\s*["']([A-Za-z0-9_\-.]{20,})["']/i,
|
|
74
|
+
severity: 'high'
|
|
75
|
+
},
|
|
76
|
+
{
|
|
77
|
+
name: 'Private Key Header',
|
|
78
|
+
pattern: /-----BEGIN (?:RSA |EC |DSA |OPENSSH )?PRIVATE KEY-----/,
|
|
79
|
+
severity: 'critical'
|
|
80
|
+
},
|
|
81
|
+
{
|
|
82
|
+
name: 'Slack Webhook',
|
|
83
|
+
pattern: /https:\/\/hooks\.slack\.com\/services\/T[A-Z0-9]+\/B[A-Z0-9]+\/[A-Za-z0-9]+/,
|
|
84
|
+
severity: 'high'
|
|
85
|
+
},
|
|
86
|
+
{
|
|
87
|
+
name: 'Slack Bot Token',
|
|
88
|
+
pattern: /(?<![A-Za-z0-9_-])(xoxb-[0-9]{10,}-[0-9]{10,}-[A-Za-z0-9]{24,})(?![A-Za-z0-9_-])/,
|
|
89
|
+
severity: 'critical'
|
|
90
|
+
},
|
|
91
|
+
{
|
|
92
|
+
name: 'Database Connection String',
|
|
93
|
+
pattern: /(?:mongodb(?:\+srv)?|postgres(?:ql)?|mysql|redis):\/\/[^\s"'<>]{10,}/i,
|
|
94
|
+
severity: 'high'
|
|
95
|
+
},
|
|
96
|
+
{
|
|
97
|
+
name: 'Bearer Token',
|
|
98
|
+
pattern: /(?:Authorization|Bearer)\s*[:=]\s*["']?Bearer\s+[A-Za-z0-9_\-.]{20,}/i,
|
|
99
|
+
severity: 'high'
|
|
100
|
+
}
|
|
101
|
+
];
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* Files/directories to skip by default.
|
|
105
|
+
*/
|
|
106
|
+
const DEFAULT_SKIP = [
|
|
107
|
+
'node_modules', '.git', 'dist', 'build', 'coverage',
|
|
108
|
+
'package-lock.json', 'yarn.lock', 'pnpm-lock.yaml'
|
|
109
|
+
];
|
|
110
|
+
|
|
111
|
+
/**
|
|
112
|
+
* File extensions typically safe to skip (binary/media).
|
|
113
|
+
*/
|
|
114
|
+
const BINARY_EXTENSIONS = new Set([
|
|
115
|
+
'.png', '.jpg', '.jpeg', '.gif', '.ico', '.svg', '.webp',
|
|
116
|
+
'.woff', '.woff2', '.ttf', '.eot', '.otf',
|
|
117
|
+
'.zip', '.tar', '.gz', '.bz2',
|
|
118
|
+
'.pdf', '.doc', '.docx', '.xls', '.xlsx',
|
|
119
|
+
'.mp3', '.mp4', '.avi', '.mov', '.wav'
|
|
120
|
+
]);
|
|
121
|
+
|
|
122
|
+
/**
|
|
123
|
+
* Compile custom pattern strings into RegExp objects.
|
|
124
|
+
*
|
|
125
|
+
* @param {Array<{ name: string, pattern: string }>} customPatterns
|
|
126
|
+
* @returns {Array<{ name: string, pattern: RegExp, severity: string }>}
|
|
127
|
+
*/
|
|
128
|
+
function compileCustomPatterns(customPatterns) {
|
|
129
|
+
return (customPatterns || []).map(p => {
|
|
130
|
+
// Basic safeguard: reject patterns exceeding a reasonable length
|
|
131
|
+
if (typeof p.pattern === 'string' && p.pattern.length > 500) {
|
|
132
|
+
return null;
|
|
133
|
+
}
|
|
134
|
+
let regex;
|
|
135
|
+
try {
|
|
136
|
+
regex = new RegExp(p.pattern);
|
|
137
|
+
} catch {
|
|
138
|
+
return null;
|
|
139
|
+
}
|
|
140
|
+
return {
|
|
141
|
+
name: p.name || 'Custom Pattern',
|
|
142
|
+
pattern: regex,
|
|
143
|
+
severity: p.severity || 'high'
|
|
144
|
+
};
|
|
145
|
+
}).filter(Boolean);
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
/**
|
|
149
|
+
* Check if a file path should be skipped.
|
|
150
|
+
*
|
|
151
|
+
* @param {string} filePath - File path to check.
|
|
152
|
+
* @param {string[]} allowlist - Paths to allowlist.
|
|
153
|
+
* @returns {boolean}
|
|
154
|
+
*/
|
|
155
|
+
function shouldSkip(filePath, allowlist = []) {
|
|
156
|
+
const basename = path.basename(filePath);
|
|
157
|
+
const ext = path.extname(filePath).toLowerCase();
|
|
158
|
+
|
|
159
|
+
if (BINARY_EXTENSIONS.has(ext)) return true;
|
|
160
|
+
|
|
161
|
+
// Match skip entries against path segments to avoid false positives
|
|
162
|
+
// (e.g. "build" should not match "rebuild" or "buildUtils.js")
|
|
163
|
+
const segments = filePath.split(path.sep).flatMap(s => s.split('/'));
|
|
164
|
+
for (const skip of DEFAULT_SKIP) {
|
|
165
|
+
if (segments.includes(skip)) return true;
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
for (const allowed of allowlist) {
|
|
169
|
+
if (filePath === allowed || filePath.endsWith(allowed) || basename === allowed) {
|
|
170
|
+
return true;
|
|
171
|
+
}
|
|
172
|
+
}
|
|
173
|
+
|
|
174
|
+
return false;
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
/**
|
|
178
|
+
* Scan a single file for secrets.
|
|
179
|
+
*
|
|
180
|
+
* @param {string} filePath - Absolute path to file.
|
|
181
|
+
* @param {Array} patterns - Secret patterns to check against.
|
|
182
|
+
* @returns {Array<{ file: string, line: number, pattern_name: string, severity: string, match: string }>}
|
|
183
|
+
*/
|
|
184
|
+
function scanFile(filePath, patterns) {
|
|
185
|
+
const findings = [];
|
|
186
|
+
|
|
187
|
+
let content;
|
|
188
|
+
try {
|
|
189
|
+
content = fs.readFileSync(filePath, 'utf8');
|
|
190
|
+
} catch {
|
|
191
|
+
return findings;
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
const lines = content.split('\n');
|
|
195
|
+
|
|
196
|
+
for (let i = 0; i < lines.length; i++) {
|
|
197
|
+
const line = lines[i];
|
|
198
|
+
// Skip comment lines that are example/template patterns
|
|
199
|
+
if (/^\s*(#|\/\/)\s*(example|TODO|FIXME|NOTE)/i.test(line)) continue;
|
|
200
|
+
|
|
201
|
+
for (const patternDef of patterns) {
|
|
202
|
+
// If pattern requires context (e.g. AWS secret key), check for context first
|
|
203
|
+
if (patternDef.requires_context && !patternDef.requires_context.test(line)) {
|
|
204
|
+
continue;
|
|
205
|
+
}
|
|
206
|
+
|
|
207
|
+
const match = line.match(patternDef.pattern);
|
|
208
|
+
if (match) {
|
|
209
|
+
const matched = match[1] || match[0];
|
|
210
|
+
// Redact the match for safe reporting
|
|
211
|
+
const redacted = matched.length > 8
|
|
212
|
+
? matched.substring(0, 4) + '****' + matched.substring(matched.length - 4)
|
|
213
|
+
: '****';
|
|
214
|
+
|
|
215
|
+
findings.push({
|
|
216
|
+
file: filePath,
|
|
217
|
+
line: i + 1,
|
|
218
|
+
pattern_name: patternDef.name,
|
|
219
|
+
severity: patternDef.severity,
|
|
220
|
+
match: redacted
|
|
221
|
+
});
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
return findings;
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* Run secret scanning on specified files.
|
|
231
|
+
*
|
|
232
|
+
* @param {object} input - Scan options.
|
|
233
|
+
* @param {string[]} input.files - Files to scan.
|
|
234
|
+
* @param {string} [input.root] - Project root.
|
|
235
|
+
* @param {object} [input.config] - Override config.
|
|
236
|
+
* @param {Array} [input.config.custom_patterns] - Additional patterns.
|
|
237
|
+
* @param {string[]} [input.config.allowlist] - Files to skip.
|
|
238
|
+
* @returns {object} Scan results.
|
|
239
|
+
*/
|
|
240
|
+
function runSecretScan(input) {
|
|
241
|
+
const { files = [], root = '.', config = {} } = input;
|
|
242
|
+
const resolvedRoot = path.resolve(root);
|
|
243
|
+
const allowlist = config.allowlist || [];
|
|
244
|
+
|
|
245
|
+
// Merge default + custom patterns
|
|
246
|
+
const patterns = [
|
|
247
|
+
...DEFAULT_PATTERNS,
|
|
248
|
+
...compileCustomPatterns(config.custom_patterns)
|
|
249
|
+
];
|
|
250
|
+
|
|
251
|
+
const allFindings = [];
|
|
252
|
+
let filesScanned = 0;
|
|
253
|
+
|
|
254
|
+
for (const file of files) {
|
|
255
|
+
const fullPath = path.isAbsolute(file) ? file : path.join(resolvedRoot, file);
|
|
256
|
+
|
|
257
|
+
if (!fs.existsSync(fullPath)) continue;
|
|
258
|
+
if (shouldSkip(file, allowlist)) continue;
|
|
259
|
+
|
|
260
|
+
filesScanned++;
|
|
261
|
+
const fileFindings = scanFile(fullPath, patterns);
|
|
262
|
+
allFindings.push(...fileFindings);
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
// Group by severity for summary
|
|
266
|
+
const critical = allFindings.filter(f => f.severity === 'critical').length;
|
|
267
|
+
const high = allFindings.filter(f => f.severity === 'high').length;
|
|
268
|
+
|
|
269
|
+
return {
|
|
270
|
+
files_scanned: filesScanned,
|
|
271
|
+
secrets_found: allFindings.length,
|
|
272
|
+
critical,
|
|
273
|
+
high,
|
|
274
|
+
findings: allFindings,
|
|
275
|
+
pass: allFindings.length === 0
|
|
276
|
+
};
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
// ─── CLI Entry Point ──────────────────────────────────────────────────────────
|
|
280
|
+
|
|
281
|
+
if (process.argv[1] && (
|
|
282
|
+
process.argv[1].endsWith('secret-scanner.js') ||
|
|
283
|
+
process.argv[1].endsWith('secret-scanner')
|
|
284
|
+
)) {
|
|
285
|
+
let input = '';
|
|
286
|
+
process.stdin.setEncoding('utf8');
|
|
287
|
+
process.stdin.on('data', chunk => { input += chunk; });
|
|
288
|
+
process.stdin.on('end', () => {
|
|
289
|
+
try {
|
|
290
|
+
const parsed = input.trim() ? JSON.parse(input) : {};
|
|
291
|
+
const result = runSecretScan(parsed);
|
|
292
|
+
process.stdout.write(JSON.stringify(result, null, 2) + '\n');
|
|
293
|
+
process.exit(result.pass ? 0 : 1);
|
|
294
|
+
} catch (err) {
|
|
295
|
+
process.stdout.write(JSON.stringify({ error: err.message }) + '\n');
|
|
296
|
+
process.exit(2);
|
|
297
|
+
}
|
|
298
|
+
});
|
|
299
|
+
|
|
300
|
+
if (process.stdin.isTTY) {
|
|
301
|
+
const result = runSecretScan({});
|
|
302
|
+
process.stdout.write(JSON.stringify(result, null, 2) + '\n');
|
|
303
|
+
process.exit(result.pass ? 0 : 1);
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
export {
|
|
308
|
+
runSecretScan,
|
|
309
|
+
scanFile,
|
|
310
|
+
shouldSkip,
|
|
311
|
+
compileCustomPatterns,
|
|
312
|
+
DEFAULT_PATTERNS
|
|
313
|
+
};
|
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* semantic-diff.js — Cross-artifact Semantic Diffing
|
|
3
|
+
*
|
|
4
|
+
* Detects meaning changes, not just text changes, across PRD,
|
|
5
|
+
* architecture, APIs, and tests.
|
|
6
|
+
*
|
|
7
|
+
* Usage:
|
|
8
|
+
* node bin/lib/semantic-diff.js compare <path1> <path2> [options]
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
'use strict';
|
|
12
|
+
|
|
13
|
+
const fs = require('fs');
|
|
14
|
+
const path = require('path');
|
|
15
|
+
const crypto = require('crypto');
|
|
16
|
+
|
|
17
|
+
const SECTION_HEADING = /^(#{1,6})\s+(.+)$/gm;
|
|
18
|
+
const REQUIREMENT_PATTERN = /\b(REQ-\d+|E\d+-S\d+|NFR-\d+|UC-\d+|FR-\d+|AC-\d+|M\d+-T\d+)\b/g;
|
|
19
|
+
const API_ENDPOINT = /\b(GET|POST|PUT|PATCH|DELETE|HEAD|OPTIONS)\s+(\S+)/g;
|
|
20
|
+
const TABLE_ROW = /^\|(.+)\|$/gm;
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Extract structural sections from markdown.
|
|
24
|
+
* @param {string} content
|
|
25
|
+
* @returns {object[]}
|
|
26
|
+
*/
|
|
27
|
+
function extractSections(content) {
|
|
28
|
+
const sections = [];
|
|
29
|
+
const lines = content.split('\n');
|
|
30
|
+
let currentSection = { heading: '(preamble)', level: 0, content: [], startLine: 0 };
|
|
31
|
+
|
|
32
|
+
for (let i = 0; i < lines.length; i++) {
|
|
33
|
+
const headingMatch = lines[i].match(/^(#{1,6})\s+(.+)$/);
|
|
34
|
+
if (headingMatch) {
|
|
35
|
+
if (currentSection.content.length > 0 || currentSection.heading !== '(preamble)') {
|
|
36
|
+
currentSection.content = currentSection.content.join('\n').trim();
|
|
37
|
+
sections.push(currentSection);
|
|
38
|
+
}
|
|
39
|
+
currentSection = {
|
|
40
|
+
heading: headingMatch[2].trim(),
|
|
41
|
+
level: headingMatch[1].length,
|
|
42
|
+
content: [],
|
|
43
|
+
startLine: i + 1
|
|
44
|
+
};
|
|
45
|
+
} else {
|
|
46
|
+
currentSection.content.push(lines[i]);
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
currentSection.content = currentSection.content.join('\n').trim();
|
|
50
|
+
sections.push(currentSection);
|
|
51
|
+
|
|
52
|
+
return sections;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
/**
|
|
56
|
+
* Extract requirement references from content.
|
|
57
|
+
* @param {string} content
|
|
58
|
+
* @returns {string[]}
|
|
59
|
+
*/
|
|
60
|
+
function extractRequirements(content) {
|
|
61
|
+
const matches = content.match(REQUIREMENT_PATTERN) || [];
|
|
62
|
+
return [...new Set(matches)].sort();
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Extract API endpoints from content.
|
|
67
|
+
* @param {string} content
|
|
68
|
+
* @returns {object[]}
|
|
69
|
+
*/
|
|
70
|
+
function extractApiEndpoints(content) {
|
|
71
|
+
const endpoints = [];
|
|
72
|
+
let match;
|
|
73
|
+
const pattern = new RegExp(API_ENDPOINT.source, 'g');
|
|
74
|
+
while ((match = pattern.exec(content)) !== null) {
|
|
75
|
+
endpoints.push({ method: match[1], path: match[2] });
|
|
76
|
+
}
|
|
77
|
+
return endpoints;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* Extract key-value pairs from table rows.
|
|
82
|
+
* @param {string} content
|
|
83
|
+
* @returns {string[][]}
|
|
84
|
+
*/
|
|
85
|
+
function extractTableData(content) {
|
|
86
|
+
const rows = [];
|
|
87
|
+
let match;
|
|
88
|
+
const pattern = new RegExp(TABLE_ROW.source, 'gm');
|
|
89
|
+
while ((match = pattern.exec(content)) !== null) {
|
|
90
|
+
const cells = match[1].split('|').map(c => c.trim()).filter(c => c.length > 0);
|
|
91
|
+
if (cells.some(c => /^[-:]+$/.test(c))) continue; // skip separator rows
|
|
92
|
+
rows.push(cells);
|
|
93
|
+
}
|
|
94
|
+
return rows;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
/**
|
|
98
|
+
* Normalize text for comparison: lowercase, collapse whitespace, strip punctuation.
|
|
99
|
+
* @param {string} text
|
|
100
|
+
* @returns {string}
|
|
101
|
+
*/
|
|
102
|
+
function normalizeText(text) {
|
|
103
|
+
return text.toLowerCase()
|
|
104
|
+
.replace(/[^\w\s]/g, ' ')
|
|
105
|
+
.replace(/\s+/g, ' ')
|
|
106
|
+
.trim();
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
/**
|
|
110
|
+
* Compute simple similarity ratio between two strings (0-1).
|
|
111
|
+
* Uses set-based word overlap (Jaccard-like).
|
|
112
|
+
* @param {string} a
|
|
113
|
+
* @param {string} b
|
|
114
|
+
* @returns {number}
|
|
115
|
+
*/
|
|
116
|
+
function textSimilarity(a, b) {
|
|
117
|
+
const wordsA = new Set(normalizeText(a).split(' ').filter(w => w.length > 2));
|
|
118
|
+
const wordsB = new Set(normalizeText(b).split(' ').filter(w => w.length > 2));
|
|
119
|
+
if (wordsA.size === 0 && wordsB.size === 0) return 1;
|
|
120
|
+
if (wordsA.size === 0 || wordsB.size === 0) return 0;
|
|
121
|
+
|
|
122
|
+
let intersection = 0;
|
|
123
|
+
for (const w of wordsA) {
|
|
124
|
+
if (wordsB.has(w)) intersection++;
|
|
125
|
+
}
|
|
126
|
+
const union = new Set([...wordsA, ...wordsB]).size;
|
|
127
|
+
return union === 0 ? 1 : intersection / union;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Compare two artifacts and find semantic differences.
|
|
132
|
+
*
|
|
133
|
+
* @param {string} contentA - Original content.
|
|
134
|
+
* @param {string} contentB - Modified content.
|
|
135
|
+
* @param {object} [options]
|
|
136
|
+
* @returns {object}
|
|
137
|
+
*/
|
|
138
|
+
function compareArtifacts(contentA, contentB, options = {}) {
|
|
139
|
+
const sectionsA = extractSections(contentA);
|
|
140
|
+
const sectionsB = extractSections(contentB);
|
|
141
|
+
|
|
142
|
+
const sectionChanges = [];
|
|
143
|
+
const headingsA = sectionsA.map(s => s.heading);
|
|
144
|
+
const headingsB = sectionsB.map(s => s.heading);
|
|
145
|
+
|
|
146
|
+
// Detect added sections
|
|
147
|
+
for (const sec of sectionsB) {
|
|
148
|
+
if (!headingsA.includes(sec.heading)) {
|
|
149
|
+
sectionChanges.push({
|
|
150
|
+
type: 'section_added',
|
|
151
|
+
heading: sec.heading,
|
|
152
|
+
severity: 'info'
|
|
153
|
+
});
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
// Detect removed sections
|
|
158
|
+
for (const sec of sectionsA) {
|
|
159
|
+
if (!headingsB.includes(sec.heading)) {
|
|
160
|
+
sectionChanges.push({
|
|
161
|
+
type: 'section_removed',
|
|
162
|
+
heading: sec.heading,
|
|
163
|
+
severity: 'warning'
|
|
164
|
+
});
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
// Detect modified sections
|
|
169
|
+
for (const secA of sectionsA) {
|
|
170
|
+
const secB = sectionsB.find(s => s.heading === secA.heading);
|
|
171
|
+
if (secB) {
|
|
172
|
+
const similarity = textSimilarity(secA.content, secB.content);
|
|
173
|
+
if (similarity < 0.95) {
|
|
174
|
+
sectionChanges.push({
|
|
175
|
+
type: 'section_modified',
|
|
176
|
+
heading: secA.heading,
|
|
177
|
+
similarity: Math.round(similarity * 100),
|
|
178
|
+
severity: similarity < 0.5 ? 'critical' : similarity < 0.8 ? 'warning' : 'info'
|
|
179
|
+
});
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
// Requirement changes
|
|
185
|
+
const reqsA = extractRequirements(contentA);
|
|
186
|
+
const reqsB = extractRequirements(contentB);
|
|
187
|
+
const addedReqs = reqsB.filter(r => !reqsA.includes(r));
|
|
188
|
+
const removedReqs = reqsA.filter(r => !reqsB.includes(r));
|
|
189
|
+
|
|
190
|
+
// API endpoint changes
|
|
191
|
+
const apisA = extractApiEndpoints(contentA);
|
|
192
|
+
const apisB = extractApiEndpoints(contentB);
|
|
193
|
+
const apiKeysA = apisA.map(a => `${a.method} ${a.path}`);
|
|
194
|
+
const apiKeysB = apisB.map(a => `${a.method} ${a.path}`);
|
|
195
|
+
const addedApis = apiKeysB.filter(k => !apiKeysA.includes(k));
|
|
196
|
+
const removedApis = apiKeysA.filter(k => !apiKeysB.includes(k));
|
|
197
|
+
|
|
198
|
+
// Table changes
|
|
199
|
+
const tablesA = extractTableData(contentA);
|
|
200
|
+
const tablesB = extractTableData(contentB);
|
|
201
|
+
|
|
202
|
+
const overallSimilarity = textSimilarity(contentA, contentB);
|
|
203
|
+
const hasBreakingChanges = removedReqs.length > 0 || removedApis.length > 0
|
|
204
|
+
|| sectionChanges.some(c => c.severity === 'critical');
|
|
205
|
+
|
|
206
|
+
return {
|
|
207
|
+
success: true,
|
|
208
|
+
overall_similarity: Math.round(overallSimilarity * 100),
|
|
209
|
+
has_breaking_changes: hasBreakingChanges,
|
|
210
|
+
section_changes: sectionChanges,
|
|
211
|
+
requirement_changes: {
|
|
212
|
+
added: addedReqs,
|
|
213
|
+
removed: removedReqs,
|
|
214
|
+
total_before: reqsA.length,
|
|
215
|
+
total_after: reqsB.length
|
|
216
|
+
},
|
|
217
|
+
api_changes: {
|
|
218
|
+
added: addedApis,
|
|
219
|
+
removed: removedApis,
|
|
220
|
+
total_before: apisA.length,
|
|
221
|
+
total_after: apisB.length
|
|
222
|
+
},
|
|
223
|
+
table_changes: {
|
|
224
|
+
rows_before: tablesA.length,
|
|
225
|
+
rows_after: tablesB.length
|
|
226
|
+
},
|
|
227
|
+
summary: {
|
|
228
|
+
sections_added: sectionChanges.filter(c => c.type === 'section_added').length,
|
|
229
|
+
sections_removed: sectionChanges.filter(c => c.type === 'section_removed').length,
|
|
230
|
+
sections_modified: sectionChanges.filter(c => c.type === 'section_modified').length,
|
|
231
|
+
requirements_added: addedReqs.length,
|
|
232
|
+
requirements_removed: removedReqs.length,
|
|
233
|
+
apis_added: addedApis.length,
|
|
234
|
+
apis_removed: removedApis.length
|
|
235
|
+
}
|
|
236
|
+
};
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
/**
|
|
240
|
+
* Compare two artifact files on disk.
|
|
241
|
+
*
|
|
242
|
+
* @param {string} pathA - Path to original artifact.
|
|
243
|
+
* @param {string} pathB - Path to modified artifact.
|
|
244
|
+
* @param {object} [options]
|
|
245
|
+
* @returns {object}
|
|
246
|
+
*/
|
|
247
|
+
function compareFiles(pathA, pathB, options = {}) {
|
|
248
|
+
if (!fs.existsSync(pathA)) {
|
|
249
|
+
return { success: false, error: `File not found: ${pathA}` };
|
|
250
|
+
}
|
|
251
|
+
if (!fs.existsSync(pathB)) {
|
|
252
|
+
return { success: false, error: `File not found: ${pathB}` };
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
const contentA = fs.readFileSync(pathA, 'utf8');
|
|
256
|
+
const contentB = fs.readFileSync(pathB, 'utf8');
|
|
257
|
+
|
|
258
|
+
const result = compareArtifacts(contentA, contentB, options);
|
|
259
|
+
result.file_a = pathA;
|
|
260
|
+
result.file_b = pathB;
|
|
261
|
+
return result;
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
/**
|
|
265
|
+
* Detect cross-artifact consistency issues across multiple spec files.
|
|
266
|
+
*
|
|
267
|
+
* @param {string} root - Project root.
|
|
268
|
+
* @param {object} [options]
|
|
269
|
+
* @returns {object}
|
|
270
|
+
*/
|
|
271
|
+
function crossArtifactDiff(root, options = {}) {
|
|
272
|
+
const specsDir = path.join(root, 'specs');
|
|
273
|
+
if (!fs.existsSync(specsDir)) {
|
|
274
|
+
return { success: false, error: 'specs/ directory not found' };
|
|
275
|
+
}
|
|
276
|
+
|
|
277
|
+
const artifacts = {};
|
|
278
|
+
const artifactFiles = ['challenger-brief.md', 'product-brief.md', 'prd.md', 'architecture.md', 'implementation-plan.md'];
|
|
279
|
+
|
|
280
|
+
for (const file of artifactFiles) {
|
|
281
|
+
const fullPath = path.join(specsDir, file);
|
|
282
|
+
if (fs.existsSync(fullPath)) {
|
|
283
|
+
const content = fs.readFileSync(fullPath, 'utf8');
|
|
284
|
+
artifacts[file] = {
|
|
285
|
+
content,
|
|
286
|
+
requirements: extractRequirements(content),
|
|
287
|
+
apis: extractApiEndpoints(content),
|
|
288
|
+
sections: extractSections(content)
|
|
289
|
+
};
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
const inconsistencies = [];
|
|
294
|
+
|
|
295
|
+
// Check requirement coverage flow (upstream → downstream)
|
|
296
|
+
const orderedKeys = Object.keys(artifacts);
|
|
297
|
+
for (let i = 0; i < orderedKeys.length - 1; i++) {
|
|
298
|
+
const upstream = artifacts[orderedKeys[i]];
|
|
299
|
+
const downstream = artifacts[orderedKeys[i + 1]];
|
|
300
|
+
if (!upstream || !downstream) continue;
|
|
301
|
+
|
|
302
|
+
const missingDownstream = upstream.requirements.filter(r => !downstream.requirements.includes(r));
|
|
303
|
+
if (missingDownstream.length > 0) {
|
|
304
|
+
inconsistencies.push({
|
|
305
|
+
type: 'requirement_gap',
|
|
306
|
+
upstream: orderedKeys[i],
|
|
307
|
+
downstream: orderedKeys[i + 1],
|
|
308
|
+
missing_requirements: missingDownstream,
|
|
309
|
+
severity: 'warning'
|
|
310
|
+
});
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
return {
|
|
315
|
+
success: true,
|
|
316
|
+
artifacts_analyzed: Object.keys(artifacts).length,
|
|
317
|
+
inconsistencies,
|
|
318
|
+
summary: {
|
|
319
|
+
total_inconsistencies: inconsistencies.length,
|
|
320
|
+
requirement_gaps: inconsistencies.filter(i => i.type === 'requirement_gap').length
|
|
321
|
+
}
|
|
322
|
+
};
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
module.exports = {
|
|
326
|
+
extractSections,
|
|
327
|
+
extractRequirements,
|
|
328
|
+
extractApiEndpoints,
|
|
329
|
+
extractTableData,
|
|
330
|
+
normalizeText,
|
|
331
|
+
textSimilarity,
|
|
332
|
+
compareArtifacts,
|
|
333
|
+
compareFiles,
|
|
334
|
+
crossArtifactDiff
|
|
335
|
+
};
|