@claudemini/ses-cli 1.4.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +465 -0
- package/bin/ses.js +85 -0
- package/lib/agent-review.js +722 -0
- package/lib/checkpoint.js +320 -0
- package/lib/checkpoints.js +54 -0
- package/lib/clean.js +45 -0
- package/lib/commit.js +60 -0
- package/lib/config.js +28 -0
- package/lib/disable.js +152 -0
- package/lib/doctor.js +307 -0
- package/lib/enable.js +294 -0
- package/lib/explain.js +212 -0
- package/lib/extract.js +265 -0
- package/lib/git-shadow.js +136 -0
- package/lib/init.js +83 -0
- package/lib/list.js +62 -0
- package/lib/log.js +77 -0
- package/lib/prompts.js +125 -0
- package/lib/query.js +110 -0
- package/lib/redact.js +170 -0
- package/lib/report.js +296 -0
- package/lib/reset.js +122 -0
- package/lib/resume.js +224 -0
- package/lib/review-common.js +100 -0
- package/lib/review.js +652 -0
- package/lib/rewind.js +198 -0
- package/lib/session.js +225 -0
- package/lib/shadow.js +51 -0
- package/lib/status.js +198 -0
- package/lib/summarize.js +315 -0
- package/lib/view.js +50 -0
- package/lib/webhook.js +224 -0
- package/package.json +41 -0
package/lib/list.js
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
import { readFileSync, existsSync, readdirSync, statSync } from 'fs';
|
|
4
|
+
import { join } from 'path';
|
|
5
|
+
import { getProjectRoot, getLogDir, SESSION_ID_REGEX } from './config.js';
|
|
6
|
+
|
|
7
|
+
export default async function list(args) {
|
|
8
|
+
const logDir = getLogDir(getProjectRoot());
|
|
9
|
+
|
|
10
|
+
if (!existsSync(logDir)) {
|
|
11
|
+
console.log('No log directory found. Run "ses init" first.');
|
|
12
|
+
return;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
const entries = readdirSync(logDir);
|
|
16
|
+
const sessions = entries
|
|
17
|
+
.filter(name => SESSION_ID_REGEX.test(name))
|
|
18
|
+
.map(id => {
|
|
19
|
+
const dir = join(logDir, id);
|
|
20
|
+
const metaFile = join(dir, 'metadata.json');
|
|
21
|
+
const stateFile = join(dir, 'state.json');
|
|
22
|
+
try {
|
|
23
|
+
const meta = JSON.parse(readFileSync(metaFile, 'utf-8'));
|
|
24
|
+
let state = null;
|
|
25
|
+
if (existsSync(stateFile)) {
|
|
26
|
+
state = JSON.parse(readFileSync(stateFile, 'utf-8'));
|
|
27
|
+
}
|
|
28
|
+
return { id, meta, state, mtime: statSync(dir).mtime };
|
|
29
|
+
} catch { return null; }
|
|
30
|
+
})
|
|
31
|
+
.filter(Boolean)
|
|
32
|
+
.sort((a, b) => b.mtime - a.mtime);
|
|
33
|
+
|
|
34
|
+
if (sessions.length === 0) {
|
|
35
|
+
console.log('No sessions found.');
|
|
36
|
+
return;
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
console.log(`${sessions.length} session(s):\n`);
|
|
40
|
+
|
|
41
|
+
sessions.forEach((s, i) => {
|
|
42
|
+
const m = s.meta;
|
|
43
|
+
const st = s.state;
|
|
44
|
+
const dur = m.duration_minutes ?? (st?.start_time && st?.last_time
|
|
45
|
+
? Math.round((new Date(st.last_time) - new Date(st.start_time)) / 60000) : 0);
|
|
46
|
+
const type = m.type || 'unknown';
|
|
47
|
+
const risk = m.risk || '-';
|
|
48
|
+
const intent = m.intent || '';
|
|
49
|
+
const files = (m.files_touched || 0);
|
|
50
|
+
const errs = (m.errors || 0);
|
|
51
|
+
const shadow = st?.shadow_branch ? ` [${st.shadow_branch}]` : '';
|
|
52
|
+
const date = new Date(m.last_updated).toLocaleString();
|
|
53
|
+
|
|
54
|
+
console.log(`${i + 1}. ${s.id.slice(0, 8)} [${type}] risk:${risk}`);
|
|
55
|
+
if (intent) {
|
|
56
|
+
console.log(` ${intent.slice(0, 100)}`);
|
|
57
|
+
}
|
|
58
|
+
console.log(` ${dur}min | ${m.event_count} events | ${m.tool_calls || 0} tools | ${files} files | ${errs} errors${shadow}`);
|
|
59
|
+
console.log(` ${date}`);
|
|
60
|
+
console.log('');
|
|
61
|
+
});
|
|
62
|
+
}
|
package/lib/log.js
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* ses log — event ingestion dispatcher.
|
|
5
|
+
* Reads stdin, parses event, delegates to session/extract/report modules.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { appendFileSync, existsSync, mkdirSync, readFileSync, writeFileSync } from 'fs';
|
|
9
|
+
import { join } from 'path';
|
|
10
|
+
import { getProjectRoot, getLogDir } from './config.js';
|
|
11
|
+
import { loadState, saveState, processEvent, updateIndex } from './session.js';
|
|
12
|
+
import { extractIntent, extractChanges, classifySession } from './extract.js';
|
|
13
|
+
import { generateReports } from './report.js';
|
|
14
|
+
import { dispatchWebhook } from './webhook.js';
|
|
15
|
+
|
|
16
|
+
export default async function log(args) {
|
|
17
|
+
const hookType = args[0] || 'unknown';
|
|
18
|
+
|
|
19
|
+
// Read payload from stdin
|
|
20
|
+
let payload = '';
|
|
21
|
+
for await (const chunk of process.stdin) {
|
|
22
|
+
payload += chunk;
|
|
23
|
+
}
|
|
24
|
+
if (!payload.trim()) process.exit(0);
|
|
25
|
+
|
|
26
|
+
let event;
|
|
27
|
+
try {
|
|
28
|
+
event = JSON.parse(payload);
|
|
29
|
+
} catch {
|
|
30
|
+
process.exit(1);
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
const projectRoot = getProjectRoot();
|
|
34
|
+
const logDir = getLogDir(projectRoot);
|
|
35
|
+
const sessionId = event.session_id || 'unknown';
|
|
36
|
+
const sessionDir = join(logDir, sessionId);
|
|
37
|
+
mkdirSync(sessionDir, { recursive: true });
|
|
38
|
+
|
|
39
|
+
// 1. Append raw event
|
|
40
|
+
appendFileSync(join(sessionDir, 'events.jsonl'), payload.trim() + '\n');
|
|
41
|
+
|
|
42
|
+
// 2. Load and update state
|
|
43
|
+
const state = loadState(sessionDir);
|
|
44
|
+
if (!state) process.exit(1);
|
|
45
|
+
processEvent(state, event, hookType, projectRoot);
|
|
46
|
+
saveState(sessionDir, state);
|
|
47
|
+
|
|
48
|
+
// 3. Extract semantics and generate reports
|
|
49
|
+
const intent = extractIntent(state.prompts);
|
|
50
|
+
const changes = extractChanges(state);
|
|
51
|
+
const classification = classifySession(intent, changes);
|
|
52
|
+
generateReports(sessionDir, sessionId, state, intent, changes, classification);
|
|
53
|
+
|
|
54
|
+
// 4. On session end: checkpoint + update index + webhook (idempotent — run once per session)
|
|
55
|
+
const isSessionEnd = ['session-end', 'SessionEnd', 'stop', 'session_end', 'end', 'onSessionEnd'].includes(hookType);
|
|
56
|
+
const endedSentinel = join(sessionDir, '.ended');
|
|
57
|
+
if (isSessionEnd && !existsSync(endedSentinel)) {
|
|
58
|
+
writeFileSync(endedSentinel, new Date().toISOString());
|
|
59
|
+
try {
|
|
60
|
+
const { commitCheckpoint } = await import('./checkpoint.js');
|
|
61
|
+
await commitCheckpoint(projectRoot, sessionDir, sessionId);
|
|
62
|
+
} catch { /* checkpoint is best-effort */ }
|
|
63
|
+
|
|
64
|
+
try {
|
|
65
|
+
updateIndex(logDir, sessionId, intent, classification, changes, state);
|
|
66
|
+
} catch { /* index is best-effort */ }
|
|
67
|
+
|
|
68
|
+
// Dispatch webhook for session end — must await before exit
|
|
69
|
+
try {
|
|
70
|
+
const summaryPath = join(sessionDir, 'summary.json');
|
|
71
|
+
const summary = JSON.parse(readFileSync(summaryPath, 'utf-8'));
|
|
72
|
+
await dispatchWebhook(projectRoot, 'session.ended', summary);
|
|
73
|
+
} catch { /* webhook is best-effort */ }
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
process.exit(0);
|
|
77
|
+
}
|
package/lib/prompts.js
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
export const SUMMARY_SYSTEM_PROMPT = 'You are a helpful assistant that summarizes AI coding sessions.';
|
|
2
|
+
|
|
3
|
+
export function buildSummarizePrompt(context = {}) {
|
|
4
|
+
const parts = [];
|
|
5
|
+
|
|
6
|
+
parts.push('Generate a concise summary that explains:');
|
|
7
|
+
parts.push('1. What the user wanted to accomplish');
|
|
8
|
+
parts.push('2. What changes were made');
|
|
9
|
+
parts.push('3. Any issues or errors encountered');
|
|
10
|
+
parts.push('4. Overall outcome');
|
|
11
|
+
parts.push('\n---\n');
|
|
12
|
+
|
|
13
|
+
if (context.prompts_text) {
|
|
14
|
+
parts.push(`## User Prompts\n${context.prompts_text}\n`);
|
|
15
|
+
} else if (context.prompts && context.prompts.length > 0) {
|
|
16
|
+
parts.push('## User Prompts\n');
|
|
17
|
+
context.prompts.slice(0, 5).forEach((p) => {
|
|
18
|
+
const text = typeof p === 'string' ? p : p.text || '';
|
|
19
|
+
parts.push(`- ${text.slice(0, 200)}`);
|
|
20
|
+
});
|
|
21
|
+
parts.push('');
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
if (context.changes && context.changes.length > 0) {
|
|
25
|
+
parts.push('## Files Changed\n');
|
|
26
|
+
context.changes.slice(0, 10).forEach((f) => {
|
|
27
|
+
const ops = f.operations?.join(', ') || 'modified';
|
|
28
|
+
parts.push(`- ${f.path}: ${ops}`);
|
|
29
|
+
});
|
|
30
|
+
parts.push('');
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
if (context.tools && Object.keys(context.tools).length > 0) {
|
|
34
|
+
parts.push('## Tools Used\n');
|
|
35
|
+
Object.entries(context.tools).forEach(([tool, count]) => {
|
|
36
|
+
parts.push(`- ${tool}: ${count} times`);
|
|
37
|
+
});
|
|
38
|
+
parts.push('');
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
if (context.errors && context.errors.length > 0) {
|
|
42
|
+
parts.push('## Errors\n');
|
|
43
|
+
context.errors.slice(0, 5).forEach((e) => {
|
|
44
|
+
parts.push(`- ${e.tool}: ${(e.message || '').slice(0, 100)}`);
|
|
45
|
+
});
|
|
46
|
+
parts.push('');
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
return parts.join('\n');
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
export const REVIEW_AGENT_SYSTEM_PROMPT = [
|
|
53
|
+
'You are a professional code review expert. Your task is to find defects introduced by code changes that still exist in the final code.',
|
|
54
|
+
'',
|
|
55
|
+
'## Core Principles',
|
|
56
|
+
'',
|
|
57
|
+
'You review the **final state** of the code, not the change process.',
|
|
58
|
+
'- A diff shows what changed, but you must answer: what problems remain after the change?',
|
|
59
|
+
'- If a diff fixes a bug, that is not a finding. It is an improvement.',
|
|
60
|
+
'- If a diff introduces a new bug or misses an edge case, that is a finding.',
|
|
61
|
+
'- Do not treat change descriptions as findings (for example, "X changed to Y" is not a problem unless Y is defective).',
|
|
62
|
+
'',
|
|
63
|
+
'## Workflow',
|
|
64
|
+
'',
|
|
65
|
+
'1. read_session_summary - understand session intent and scope of changes',
|
|
66
|
+
'2. read_git_diff - inspect code changes and mark suspicious points',
|
|
67
|
+
'3. read_source_file - read full context for each suspicious point and verify',
|
|
68
|
+
'4. submit_findings - submit only verified findings',
|
|
69
|
+
'',
|
|
70
|
+
'Key rule: step 3 is the validation step. If context disproves a suspicion, discard that finding.',
|
|
71
|
+
'Do not lower standards to increase count. Prefer 2 high-quality findings over 8 weak findings.',
|
|
72
|
+
'',
|
|
73
|
+
'## Finding Categories',
|
|
74
|
+
'',
|
|
75
|
+
'| Category | Review Focus |',
|
|
76
|
+
'|---|---|',
|
|
77
|
+
'| correctness | logic errors, missed edge cases, type errors, race conditions |',
|
|
78
|
+
'| security | injection, path traversal, secret leaks, auth bypass |',
|
|
79
|
+
'| reliability | unhandled errors, resource leaks, missing timeouts, weak recovery |',
|
|
80
|
+
'| performance | O(n^2) loops, memory leaks, unnecessary sync I/O |',
|
|
81
|
+
'| testing | logic changed without tests, coverage gaps |',
|
|
82
|
+
'| maintainability | report only serious design issues, not style nitpicks |',
|
|
83
|
+
'',
|
|
84
|
+
'## Severity Standards',
|
|
85
|
+
'',
|
|
86
|
+
'- critical: production crash or data loss',
|
|
87
|
+
'- high: incorrect functionality or security vulnerability',
|
|
88
|
+
'- medium: issue may trigger under specific conditions',
|
|
89
|
+
'- low: code smell or long-term maintenance risk',
|
|
90
|
+
'- info: session summary only (exactly 1 required)',
|
|
91
|
+
'',
|
|
92
|
+
'If an issue is truly severe, mark it as high or critical. Do not downplay severity.',
|
|
93
|
+
'',
|
|
94
|
+
'## Rules',
|
|
95
|
+
'',
|
|
96
|
+
'- Every finding must include evidence: exact file, line number, and code snippet.',
|
|
97
|
+
'- You must verify with read_source_file before reporting. Do not guess from diff alone.',
|
|
98
|
+
'- Do not review content outside project scope (IDE config, external artifacts, etc.).',
|
|
99
|
+
'- Submit exactly 1 info-level finding to summarize session results and change quality.',
|
|
100
|
+
'- The summary finding should include: what changed, quality assessment, and missing tests/docs.',
|
|
101
|
+
'- Priority order: correctness > security > reliability > performance > testing > maintainability',
|
|
102
|
+
].join('\n');
|
|
103
|
+
|
|
104
|
+
export function buildReviewUserMessage(sessionIds, options = {}) {
|
|
105
|
+
const timeoutLine = Number.isFinite(options.timeoutMs)
|
|
106
|
+
? `- Execution budget: timeout ${options.timeoutMs}ms, autoApprove=${Boolean(options.autoApprove)}`
|
|
107
|
+
: null;
|
|
108
|
+
const lines = [
|
|
109
|
+
`Review sessions: ${sessionIds.join(', ')}`,
|
|
110
|
+
'',
|
|
111
|
+
'Execution steps:',
|
|
112
|
+
'1. Call read_session_summary first to understand change scope',
|
|
113
|
+
'2. Call read_git_diff and list all suspicious points',
|
|
114
|
+
'3. For each suspicious point, call read_source_file to verify with full context',
|
|
115
|
+
'4. Submit only validated findings and call submit_findings once',
|
|
116
|
+
'',
|
|
117
|
+
'Important reminders:',
|
|
118
|
+
'- Distinguish "change description" from "defect": report only real defects in final code state.',
|
|
119
|
+
'- If code fixes an issue, do not report the fix itself as a finding.',
|
|
120
|
+
'- If uncertain, verify in source. If not validated, discard the finding.',
|
|
121
|
+
`- Minimum report severity: ${options.minSeverity}, CI fail threshold: ${options.failOn}`,
|
|
122
|
+
timeoutLine,
|
|
123
|
+
];
|
|
124
|
+
return lines.filter(Boolean).join('\n');
|
|
125
|
+
}
|
package/lib/query.js
ADDED
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* ses query — cross-session memory queries.
|
|
5
|
+
* Reads index.json to answer questions about past sessions.
|
|
6
|
+
*/
|
|
7
|
+
|
|
8
|
+
import { readFileSync, existsSync } from 'fs';
|
|
9
|
+
import { join } from 'path';
|
|
10
|
+
import { getProjectRoot, getLogDir } from './config.js';
|
|
11
|
+
|
|
12
|
+
function loadIndex(logDir) {
|
|
13
|
+
const indexFile = join(logDir, 'index.json');
|
|
14
|
+
if (!existsSync(indexFile)) return null;
|
|
15
|
+
try {
|
|
16
|
+
return JSON.parse(readFileSync(indexFile, 'utf-8'));
|
|
17
|
+
} catch {
|
|
18
|
+
return null;
|
|
19
|
+
}
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export default async function query(args) {
|
|
23
|
+
const logDir = getLogDir(getProjectRoot());
|
|
24
|
+
const index = loadIndex(logDir);
|
|
25
|
+
|
|
26
|
+
if (!index || index.sessions.length === 0) {
|
|
27
|
+
console.log('No session index found. Sessions are indexed on session-end.');
|
|
28
|
+
console.log('Run "ses list" to see raw sessions.');
|
|
29
|
+
return;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
const jsonOutput = args.includes('--json');
|
|
33
|
+
const fileArg = args.find(a => a.startsWith('--file='));
|
|
34
|
+
const recentArg = args.find(a => a.startsWith('--recent='));
|
|
35
|
+
const typeArg = args.find(a => a.startsWith('--type='));
|
|
36
|
+
const riskArg = args.find(a => a.startsWith('--risk='));
|
|
37
|
+
|
|
38
|
+
let results = [...index.sessions];
|
|
39
|
+
|
|
40
|
+
// Filter by file
|
|
41
|
+
if (fileArg) {
|
|
42
|
+
const filePath = fileArg.split('=')[1];
|
|
43
|
+
const sessionIds = index.file_history?.[filePath] || [];
|
|
44
|
+
if (sessionIds.length === 0) {
|
|
45
|
+
// Try partial match
|
|
46
|
+
const matchingFiles = Object.keys(index.file_history || {})
|
|
47
|
+
.filter(f => f.includes(filePath));
|
|
48
|
+
const matchedIds = new Set();
|
|
49
|
+
for (const f of matchingFiles) {
|
|
50
|
+
for (const id of index.file_history[f]) matchedIds.add(id);
|
|
51
|
+
}
|
|
52
|
+
results = results.filter(s => matchedIds.has(s.id));
|
|
53
|
+
if (matchingFiles.length > 0 && !jsonOutput) {
|
|
54
|
+
console.log(`Matched files: ${matchingFiles.join(', ')}\n`);
|
|
55
|
+
}
|
|
56
|
+
} else {
|
|
57
|
+
const idSet = new Set(sessionIds);
|
|
58
|
+
results = results.filter(s => idSet.has(s.id));
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
// Filter by type
|
|
63
|
+
if (typeArg) {
|
|
64
|
+
const type = typeArg.split('=')[1];
|
|
65
|
+
results = results.filter(s => s.type === type);
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
// Filter by risk
|
|
69
|
+
if (riskArg) {
|
|
70
|
+
const risk = riskArg.split('=')[1];
|
|
71
|
+
results = results.filter(s => s.risk === risk);
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// Limit by recent
|
|
75
|
+
if (recentArg) {
|
|
76
|
+
const n = parseInt(recentArg.split('=')[1]) || 5;
|
|
77
|
+
results = results.slice(-n);
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
// Output
|
|
81
|
+
if (jsonOutput) {
|
|
82
|
+
console.log(JSON.stringify(results, null, 2));
|
|
83
|
+
return;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
if (results.length === 0) {
|
|
87
|
+
console.log('No matching sessions found.');
|
|
88
|
+
return;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
console.log(`${results.length} session(s):\n`);
|
|
92
|
+
for (const s of results.reverse()) {
|
|
93
|
+
const filesCount = s.files?.length || 0;
|
|
94
|
+
console.log(` ${s.id.slice(0, 8)} ${s.date} [${s.type}] risk:${s.risk} ${s.duration}min ${filesCount} files`);
|
|
95
|
+
if (s.intent) {
|
|
96
|
+
console.log(` ${s.intent.slice(0, 100)}`);
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
// Show file history summary if --file was used
|
|
101
|
+
if (fileArg) {
|
|
102
|
+
const filePath = fileArg.split('=')[1];
|
|
103
|
+
const history = index.file_history?.[filePath];
|
|
104
|
+
if (history) {
|
|
105
|
+
console.log(`\nFile "${filePath}" modified in ${history.length} session(s)`);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
console.log('\nOptions: --file=<path> --type=<type> --risk=<level> --recent=<n> --json');
|
|
110
|
+
}
|
package/lib/redact.js
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Secrets redaction module
|
|
5
|
+
* Best-effort redaction of sensitive information from logs
|
|
6
|
+
* Reference: Entire's security approach
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
// Common secret patterns
|
|
10
|
+
const SECRET_PATTERNS = [
|
|
11
|
+
// API Keys
|
|
12
|
+
[/(api[_-]?key|apikey|api[_-]?secret)[":\s=]+["']?([a-zA-Z0-9_\-]{20,})["']?/gi, '$1: [REDACTED]'],
|
|
13
|
+
|
|
14
|
+
// AWS
|
|
15
|
+
[/(aws[_-]?access[_-]?key[_-]?id|aws[_-]?secret[_-]?access[_-]?key)[":\s=]+["']?([A-Z0-9]{20,})["']?/gi, '$1: [REDACTED]'],
|
|
16
|
+
[/(AWS_ACCESS_KEY_ID|AWS_SECRET_ACCESS_KEY)["\s=]+["']?([A-Za-z0-9\/+]{40})["']?/gi, '$1: [REDACTED]'],
|
|
17
|
+
|
|
18
|
+
// GitHub Tokens
|
|
19
|
+
[/(ghp|gho|ghu|ghs|ghr)_[A-Za-z0-9_]{36,}/gi, '[GITHUB_TOKEN_REDACTED]'],
|
|
20
|
+
[/github[_-]?(token|pat)[":\s=]+["']?[a-zA-Z0-9_]{36,}["']?/gi, 'github_token: [REDACTED]'],
|
|
21
|
+
|
|
22
|
+
// NPM Tokens
|
|
23
|
+
[/(npm|NPM)[_-]?[a-zA-Z0-9]{30,}/gi, '[NPM_TOKEN_REDACTED]'],
|
|
24
|
+
[/(npm[_-]?token|NPM_AUTH_TOKEN)[":\s=]+["']?[a-zA-Z0-9_\-]{30,}["']?/gi, 'npm_token: [REDACTED]'],
|
|
25
|
+
|
|
26
|
+
// OpenAI / Anthropic
|
|
27
|
+
[/(sk-[a-zA-Z0-9]{20,}|sk-ant-[a-zA-Z0-9_\-]{50,})/gi, '[AI_API_KEY_REDACTED]'],
|
|
28
|
+
[/(openai[_-]?key|openai[_-]?token|anthropic[_-]?key)[":\s=]+["']?[a-zA-Z0-9_\-]{20,}["']?/gi, '$1: [REDACTED]'],
|
|
29
|
+
|
|
30
|
+
// Database URLs with credentials
|
|
31
|
+
[/(mysql|postgres|postgresql|mongodb|redis):\/\/[^:]+:[^@]+@/gi, '$1://[REDACTED]@'],
|
|
32
|
+
|
|
33
|
+
// JWT Tokens
|
|
34
|
+
[/eyJ[a-zA-Z0-9_-]*\.eyJ[a-zA-Z0-9_-]*\.[a-zA-Z0-9_-]*/gi, '[JWT_REDACTED]'],
|
|
35
|
+
|
|
36
|
+
// Generic Bearer tokens
|
|
37
|
+
[/bearer\s+[a-zA-Z0-9_\-\.]{20,}/gi, 'bearer [TOKEN_REDACTED]'],
|
|
38
|
+
|
|
39
|
+
// Private keys
|
|
40
|
+
[/-----BEGIN [A-Z ]+ PRIVATE KEY-----[\s\S]*?-----END [A-Z ]+ PRIVATE KEY-----/gi, '[PRIVATE_KEY_REDACTED]'],
|
|
41
|
+
|
|
42
|
+
// Slack tokens
|
|
43
|
+
[/xox[baprs]-[0-9]{10,}-[0-9]{10,}-[a-zA-Z0-9]{24,}/gi, '[SLACK_TOKEN_REDACTED]'],
|
|
44
|
+
|
|
45
|
+
// Stripe keys
|
|
46
|
+
[/(sk|pk)_(live|test)_[a-zA-Z0-9]{24,}/gi, '[STRIPE_KEY_REDACTED]'],
|
|
47
|
+
|
|
48
|
+
// Environment variables with secrets
|
|
49
|
+
[/^(AWS_|AZURE_|GOOGLE_|STRIPE_|SENTRY_|DATADOG_)[A-Z_]+=.+$/gim, '$1[REDACTED]'],
|
|
50
|
+
|
|
51
|
+
// Generic "password" fields
|
|
52
|
+
[/"password"\s*:\s*"[^"]+"/gi, '"password": "[REDACTED]"'],
|
|
53
|
+
[/'password'\s*:\s*'[^']+'/gi, "'password': '[REDACTED]'"],
|
|
54
|
+
|
|
55
|
+
// Generic "secret" fields
|
|
56
|
+
[/"secret"\s*:\s*"[^"]+"/gi, '"secret": "[REDACTED]"'],
|
|
57
|
+
[/'secret'\s*:\s*'[^']+'/gi, "'secret': '[REDACTED]'"],
|
|
58
|
+
|
|
59
|
+
// Generic "token" fields
|
|
60
|
+
[/"token"\s*:\s*"[^"]+"/gi, '"token": "[REDACTED]"'],
|
|
61
|
+
[/'token'\s*:\s*'[^']+'/gi, "'token': '[REDACTED]'"],
|
|
62
|
+
|
|
63
|
+
// Authorization headers
|
|
64
|
+
[/authorization:\s*[bB]earer\s+[a-zA-Z0-9_\-\.]{20,}/gi, 'authorization: Bearer [TOKEN_REDACTED]'],
|
|
65
|
+
[/authorization:\s*[bB]asic\s+[a-zA-Z0-9_\-\.]{20,}/gi, 'authorization: Basic [CREDENTIALS_REDACTED]'],
|
|
66
|
+
];
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Redact secrets from text content
|
|
70
|
+
* @param {string} content - The content to redact
|
|
71
|
+
* @returns {string} - Content with secrets redacted
|
|
72
|
+
*/
|
|
73
|
+
export function redactSecrets(content) {
|
|
74
|
+
if (!content || typeof content !== 'string') {
|
|
75
|
+
return content;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
let redacted = content;
|
|
79
|
+
|
|
80
|
+
for (const [pattern, replacement] of SECRET_PATTERNS) {
|
|
81
|
+
// Reset lastIndex for global patterns
|
|
82
|
+
pattern.lastIndex = 0;
|
|
83
|
+
redacted = redacted.replace(pattern, replacement);
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
return redacted;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Redact secrets from an object (recursive)
|
|
91
|
+
* @param {object} obj - The object to redact
|
|
92
|
+
* @param {string[]} skipKeys - Keys to skip redaction
|
|
93
|
+
* @returns {object} - Object with secrets redacted
|
|
94
|
+
*/
|
|
95
|
+
export function redactObject(obj, skipKeys = ['path', 'filename', 'name']) {
|
|
96
|
+
if (!obj || typeof obj !== 'object') {
|
|
97
|
+
return obj;
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
if (Array.isArray(obj)) {
|
|
101
|
+
return obj.map(item => redactObject(item, skipKeys));
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
const redacted = {};
|
|
105
|
+
const secretKeys = ['password', 'secret', 'token', 'key', 'credential', 'auth', 'authorization', 'api_key', 'apikey'];
|
|
106
|
+
|
|
107
|
+
for (const [key, value] of Object.entries(obj)) {
|
|
108
|
+
// Skip non-secret keys
|
|
109
|
+
const lowerKey = key.toLowerCase();
|
|
110
|
+
if (skipKeys.some(skip => lowerKey.includes(skip.toLowerCase()))) {
|
|
111
|
+
redacted[key] = value;
|
|
112
|
+
continue;
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
// Check if this key might contain a secret
|
|
116
|
+
const isSecretKey = secretKeys.some(secret => lowerKey.includes(secret));
|
|
117
|
+
|
|
118
|
+
if (isSecretKey && typeof value === 'string') {
|
|
119
|
+
redacted[key] = '[REDACTED]';
|
|
120
|
+
} else if (typeof value === 'object' && value !== null) {
|
|
121
|
+
redacted[key] = redactObject(value, skipKeys);
|
|
122
|
+
} else {
|
|
123
|
+
redacted[key] = value;
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
return redacted;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
/**
|
|
131
|
+
* Redact secrets from a JSON string
|
|
132
|
+
* @param {string} jsonString - The JSON string to redact
|
|
133
|
+
* @returns {string} - JSON with secrets redacted
|
|
134
|
+
*/
|
|
135
|
+
export function redactJson(jsonString) {
|
|
136
|
+
try {
|
|
137
|
+
const obj = JSON.parse(jsonString);
|
|
138
|
+
const redacted = redactObject(obj);
|
|
139
|
+
return JSON.stringify(redacted, null, 2);
|
|
140
|
+
} catch {
|
|
141
|
+
// Not valid JSON, try as plain text
|
|
142
|
+
return redactSecrets(jsonString);
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
/**
|
|
147
|
+
* Check if content likely contains secrets (for logging)
|
|
148
|
+
* @param {string} content - The content to check
|
|
149
|
+
* @returns {boolean} - True if secrets are likely present
|
|
150
|
+
*/
|
|
151
|
+
export function likelyContainsSecrets(content) {
|
|
152
|
+
if (!content || typeof content !== 'string') {
|
|
153
|
+
return false;
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
const secretIndicators = [
|
|
157
|
+
/api[_-]?key/i,
|
|
158
|
+
/password/i,
|
|
159
|
+
/secret/i,
|
|
160
|
+
/token/i,
|
|
161
|
+
/credential/i,
|
|
162
|
+
/authorization/i,
|
|
163
|
+
/private[_-]?key/i,
|
|
164
|
+
/xox[baprs]/,
|
|
165
|
+
/sk-[a-zA-Z0-9]/,
|
|
166
|
+
/eyJ[a-zA-Z0-9_-]*\./,
|
|
167
|
+
];
|
|
168
|
+
|
|
169
|
+
return secretIndicators.some(pattern => pattern.test(content));
|
|
170
|
+
}
|