50c 3.9.4 → 3.9.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of 50c might be problematic. Click here for more details.

@@ -1,812 +1,1361 @@
1
- /**
2
- * 50c Pre-Publish Verification Tool
3
- *
4
- * Thorough 100-point checklist before publishing to:
5
- * - npm (packages)
6
- * - GitHub (releases)
7
- * - arXiv (papers)
8
- * - Medical journals
9
- * - Scientific publications
10
- *
11
- * "Ask once, verify everything"
12
- */
13
-
14
- // Verification profiles for different publish targets
15
- const PROFILES = {
16
- npm: {
17
- name: 'NPM Package',
18
- checks: [
19
- // Package basics
20
- { id: 'pkg_name', cat: 'metadata', desc: 'Package name valid and available', empirical: true },
21
- { id: 'pkg_version', cat: 'metadata', desc: 'Version bumped from npm registry', empirical: true },
22
- { id: 'pkg_desc', cat: 'metadata', desc: 'Description clear, no marketing fluff', empirical: false },
23
- { id: 'pkg_keywords', cat: 'metadata', desc: 'Keywords relevant and searchable', empirical: false },
24
- { id: 'pkg_license', cat: 'metadata', desc: 'LICENSE file exists and valid', empirical: true },
25
- { id: 'pkg_repo', cat: 'metadata', desc: 'Repository URL valid and accessible', empirical: true },
26
- { id: 'pkg_homepage', cat: 'metadata', desc: 'Homepage URL live and correct', empirical: true },
27
-
28
- // Code quality
29
- { id: 'syntax_valid', cat: 'code', desc: 'All JS/TS files pass syntax check', empirical: true },
30
- { id: 'no_console_log', cat: 'code', desc: 'No debug console.log statements', empirical: true },
31
- { id: 'no_todo_fixme', cat: 'code', desc: 'No unresolved TODO/FIXME comments', empirical: true },
32
- { id: 'no_hardcoded_secrets', cat: 'security', desc: 'No hardcoded API keys/passwords/tokens', empirical: true },
33
- { id: 'no_localhost', cat: 'security', desc: 'No localhost/127.0.0.1 in production code', empirical: true },
34
- { id: 'deps_secure', cat: 'security', desc: 'No known vulnerabilities in dependencies', empirical: true },
35
-
36
- // Documentation
37
- { id: 'readme_exists', cat: 'docs', desc: 'README.md exists', empirical: true },
38
- { id: 'readme_install', cat: 'docs', desc: 'Installation instructions present', empirical: true },
39
- { id: 'readme_usage', cat: 'docs', desc: 'Usage examples present', empirical: true },
40
- { id: 'readme_links_live', cat: 'docs', desc: 'All README links are live', empirical: true },
41
- { id: 'changelog_updated', cat: 'docs', desc: 'CHANGELOG updated for this version', empirical: true },
42
-
43
- // Build/Test
44
- { id: 'tests_pass', cat: 'test', desc: 'All tests pass', empirical: true },
45
- { id: 'build_clean', cat: 'test', desc: 'Build completes without warnings', empirical: true },
46
- { id: 'entry_point_valid', cat: 'test', desc: 'Main/bin entry points resolve', empirical: true },
47
-
48
- // Files
49
- { id: 'files_included', cat: 'files', desc: 'Only intended files in package', empirical: true },
50
- { id: 'no_env_files', cat: 'files', desc: 'No .env files included', empirical: true },
51
- { id: 'no_test_files', cat: 'files', desc: 'Test files excluded from package', empirical: true },
52
- { id: 'size_reasonable', cat: 'files', desc: 'Package size under 10MB', empirical: true },
53
- ]
54
- },
55
-
56
- github: {
57
- name: 'GitHub Release',
58
- checks: [
59
- { id: 'tag_format', cat: 'release', desc: 'Tag follows semver (vX.Y.Z)', empirical: true },
60
- { id: 'tag_unique', cat: 'release', desc: 'Tag does not exist yet', empirical: true },
61
- { id: 'branch_clean', cat: 'release', desc: 'No uncommitted changes', empirical: true },
62
- { id: 'branch_pushed', cat: 'release', desc: 'All commits pushed to remote', empirical: true },
63
- { id: 'ci_passing', cat: 'release', desc: 'CI/CD pipeline green', empirical: true },
64
- { id: 'release_notes', cat: 'docs', desc: 'Release notes written', empirical: false },
65
- { id: 'breaking_changes', cat: 'docs', desc: 'Breaking changes documented', empirical: false },
66
- { id: 'migration_guide', cat: 'docs', desc: 'Migration guide if needed', empirical: false },
67
- { id: 'no_secrets_in_history', cat: 'security', desc: 'No secrets in git history', empirical: true },
68
- { id: 'license_file', cat: 'legal', desc: 'LICENSE file present', empirical: true },
69
- ]
70
- },
71
-
72
- arxiv: {
73
- name: 'arXiv Paper',
74
- checks: [
75
- // Compilation
76
- { id: 'latex_compiles', cat: 'build', desc: 'LaTeX compiles without errors', empirical: true },
77
- { id: 'no_overfull_hbox', cat: 'build', desc: 'No overfull hbox warnings', empirical: true },
78
- { id: 'figures_render', cat: 'build', desc: 'All figures render correctly', empirical: true },
79
- { id: 'refs_resolve', cat: 'build', desc: 'All references resolve', empirical: true },
80
- { id: 'citations_complete', cat: 'build', desc: 'No [?] citation markers', empirical: true },
81
-
82
- // Content integrity
83
- { id: 'abstract_standalone', cat: 'content', desc: 'Abstract is self-contained', empirical: false },
84
- { id: 'claims_supported', cat: 'content', desc: 'All claims have citations or proofs', empirical: false },
85
- { id: 'math_verified', cat: 'content', desc: 'Mathematical derivations verified', empirical: false },
86
- { id: 'no_placeholder', cat: 'content', desc: 'No TODO/TBD/XXX placeholders', empirical: true },
87
- { id: 'consistent_notation', cat: 'content', desc: 'Notation consistent throughout', empirical: false },
88
-
89
- // AI-POWERED VERIFICATION (50c tools)
90
- { id: 'math_verified_bcalc', cat: 'ai-verify', desc: '[bCalc] Math expressions validated', empirical: true },
91
- { id: 'proofs_verified_genius', cat: 'ai-verify', desc: '[genius+] Proofs checked for gaps', empirical: true },
92
- { id: 'claims_validated_search', cat: 'ai-verify', desc: '[web_search] Novelty claims validated', empirical: true },
93
-
94
- // Reproducibility
95
- { id: 'code_available', cat: 'repro', desc: 'Code repository linked', empirical: true },
96
- { id: 'data_available', cat: 'repro', desc: 'Dataset accessible or described', empirical: false },
97
- { id: 'hyperparams_listed', cat: 'repro', desc: 'All hyperparameters specified', empirical: false },
98
- { id: 'compute_resources', cat: 'repro', desc: 'Compute requirements stated', empirical: false },
99
-
100
- // Ethics
101
- { id: 'ethics_statement', cat: 'ethics', desc: 'Ethics statement if applicable', empirical: false },
102
- { id: 'limitations_discussed', cat: 'ethics', desc: 'Limitations acknowledged', empirical: false },
103
- { id: 'societal_impact', cat: 'ethics', desc: 'Broader impact discussed', empirical: false },
104
-
105
- // Formatting
106
- { id: 'arxiv_format', cat: 'format', desc: 'Follows arXiv formatting guidelines', empirical: true },
107
- { id: 'page_limit', cat: 'format', desc: 'Within page limit for category', empirical: true },
108
- { id: 'anon_submission', cat: 'format', desc: 'Anonymized if required', empirical: true },
109
- ]
110
- },
111
-
112
- medical: {
113
- name: 'Medical/Clinical Publication',
114
- checks: [
115
- // Regulatory
116
- { id: 'irb_approval', cat: 'regulatory', desc: 'IRB/Ethics approval documented', empirical: true },
117
- { id: 'trial_registration', cat: 'regulatory', desc: 'Trial registered (if applicable)', empirical: true },
118
- { id: 'consort_checklist', cat: 'regulatory', desc: 'CONSORT/STROBE checklist completed', empirical: true },
119
- { id: 'data_privacy', cat: 'regulatory', desc: 'Patient data de-identified', empirical: true },
120
-
121
- // Statistical rigor
122
- { id: 'sample_size_justified', cat: 'stats', desc: 'Sample size calculation provided', empirical: false },
123
- { id: 'stats_methods_appropriate', cat: 'stats', desc: 'Statistical methods appropriate', empirical: false },
124
- { id: 'confidence_intervals', cat: 'stats', desc: 'Confidence intervals reported', empirical: true },
125
- { id: 'effect_sizes', cat: 'stats', desc: 'Effect sizes reported', empirical: true },
126
- { id: 'multiple_comparisons', cat: 'stats', desc: 'Multiple comparisons addressed', empirical: false },
127
-
128
- // Clinical validity
129
- { id: 'endpoints_defined', cat: 'clinical', desc: 'Primary/secondary endpoints defined', empirical: true },
130
- { id: 'adverse_events', cat: 'clinical', desc: 'Adverse events reported', empirical: true },
131
- { id: 'limitations_stated', cat: 'clinical', desc: 'Study limitations stated', empirical: true },
132
- { id: 'generalizability', cat: 'clinical', desc: 'Generalizability discussed', empirical: false },
133
-
134
- // Conflicts
135
- { id: 'coi_disclosed', cat: 'disclosure', desc: 'Conflicts of interest disclosed', empirical: true },
136
- { id: 'funding_disclosed', cat: 'disclosure', desc: 'Funding sources disclosed', empirical: true },
137
- { id: 'author_contributions', cat: 'disclosure', desc: 'Author contributions listed', empirical: true },
138
- ]
139
- },
140
-
141
- science: {
142
- name: 'Scientific Publication',
143
- checks: [
144
- // Methodology
145
- { id: 'methods_reproducible', cat: 'methods', desc: 'Methods described in reproducible detail', empirical: false },
146
- { id: 'controls_appropriate', cat: 'methods', desc: 'Control experiments appropriate', empirical: false },
147
- { id: 'sample_size_adequate', cat: 'methods', desc: 'Sample size adequate for claims', empirical: false },
148
- { id: 'blinding_described', cat: 'methods', desc: 'Blinding procedures described', empirical: false },
149
-
150
- // AI-POWERED VERIFICATION (50c tools)
151
- { id: 'methodology_hints', cat: 'ai-verify', desc: '[hints+] Methodology weaknesses', empirical: true },
152
- { id: 'claims_validated_search', cat: 'ai-verify', desc: '[web_search] Prior work check', empirical: true },
153
-
154
- // Data integrity
155
- { id: 'raw_data_available', cat: 'data', desc: 'Raw data available/accessible', empirical: true },
156
- { id: 'data_processing_documented', cat: 'data', desc: 'Data processing steps documented', empirical: false },
157
- { id: 'outliers_handled', cat: 'data', desc: 'Outlier handling described', empirical: false },
158
- { id: 'error_bars_explained', cat: 'data', desc: 'Error bars/uncertainty quantified', empirical: true },
159
-
160
- // Claims
161
- { id: 'claims_match_evidence', cat: 'claims', desc: 'Claims proportional to evidence', empirical: false },
162
- { id: 'alternative_explanations', cat: 'claims', desc: 'Alternative explanations addressed', empirical: false },
163
- { id: 'no_overclaiming', cat: 'claims', desc: 'No overclaiming in abstract/title', empirical: false },
164
- { id: 'novelty_justified', cat: 'claims', desc: 'Novelty claims justified vs prior work', empirical: false },
165
-
166
- // Figures
167
- { id: 'figures_clear', cat: 'figures', desc: 'Figures clear at print resolution', empirical: true },
168
- { id: 'axes_labeled', cat: 'figures', desc: 'All axes labeled with units', empirical: true },
169
- { id: 'legends_complete', cat: 'figures', desc: 'Figure legends self-contained', empirical: false },
170
- { id: 'color_accessible', cat: 'figures', desc: 'Color-blind accessible', empirical: true },
171
- ]
172
- },
173
-
174
- math: {
175
- name: 'Mathematics Paper',
176
- checks: [
177
- // Proofs
178
- { id: 'proofs_complete', cat: 'proofs', desc: 'All proofs complete (no gaps)', empirical: false },
179
- { id: 'proofs_verified', cat: 'proofs', desc: 'Proofs independently verified', empirical: false },
180
- { id: 'edge_cases_handled', cat: 'proofs', desc: 'Edge cases and degeneracies handled', empirical: false },
181
- { id: 'assumptions_explicit', cat: 'proofs', desc: 'All assumptions explicitly stated', empirical: false },
182
-
183
- // AI-POWERED VERIFICATION (50c tools)
184
- { id: 'math_verified_bcalc', cat: 'ai-verify', desc: '[bCalc] Mathematical verification', empirical: true },
185
- { id: 'proofs_verified_genius', cat: 'ai-verify', desc: '[genius+] Proof gap detection', empirical: true },
186
- { id: 'claims_validated_search', cat: 'ai-verify', desc: '[web_search] Novelty validation', empirical: true },
187
-
188
- // Definitions
189
- { id: 'terms_defined', cat: 'defs', desc: 'All terms defined before use', empirical: true },
190
- { id: 'notation_consistent', cat: 'defs', desc: 'Notation consistent with literature', empirical: false },
191
- { id: 'numbering_correct', cat: 'defs', desc: 'Theorem/lemma numbering correct', empirical: true },
192
-
193
- // Examples
194
- { id: 'examples_verify_claims', cat: 'examples', desc: 'Examples verify main theorems', empirical: false },
195
- { id: 'counterexamples_checked', cat: 'examples', desc: 'Potential counterexamples addressed', empirical: false },
196
- { id: 'computational_verified', cat: 'examples', desc: 'Computational results verified', empirical: true },
197
-
198
- // Literature
199
- { id: 'prior_work_cited', cat: 'refs', desc: 'Prior work appropriately cited', empirical: false },
200
- { id: 'no_overclaiming_novelty', cat: 'refs', desc: 'Novelty claims accurate', empirical: false },
201
- { id: 'comparison_to_existing', cat: 'refs', desc: 'Results compared to existing bounds', empirical: false },
202
- ]
203
- }
204
- };
205
-
206
- // 50c API integration for AI-powered verification
207
- async function call50cTool(tool, params, timeout = 60000) {
208
- const fs = require('fs');
209
- const path = require('path');
210
-
211
- // Try to get API key from mcp.json
212
- let apiKey = process.env.FIFTYC_API_KEY;
213
- if (!apiKey) {
214
- try {
215
- const homeDir = process.env.USERPROFILE || process.env.HOME;
216
- const mcpPath = path.join(homeDir, '.verdent', 'mcp.json');
217
- if (fs.existsSync(mcpPath)) {
218
- const mcp = JSON.parse(fs.readFileSync(mcpPath, 'utf8'));
219
- const server = mcp.mcpServers?.['50c'] || mcp.mcpServers?.['50c-ai'];
220
- apiKey = server?.env?.FIFTYC_API_KEY;
221
- }
222
- } catch (e) {}
223
- }
224
-
225
- if (!apiKey) {
226
- return { error: 'No 50c API key configured' };
227
- }
228
-
229
- try {
230
- const controller = new AbortController();
231
- const timeoutId = setTimeout(() => controller.abort(), timeout);
232
-
233
- const res = await fetch('https://api.50c.ai/mcp', {
234
- method: 'POST',
235
- headers: {
236
- 'Content-Type': 'application/json',
237
- 'Authorization': `Bearer ${apiKey}`
238
- },
239
- body: JSON.stringify({
240
- jsonrpc: '2.0',
241
- method: 'tools/call',
242
- params: { name: tool, arguments: params },
243
- id: Date.now()
244
- }),
245
- signal: controller.signal
246
- });
247
-
248
- clearTimeout(timeoutId);
249
- const data = await res.json();
250
- return data.result?.content?.[0]?.text || data.result || data;
251
- } catch (e) {
252
- return { error: e.message };
253
- }
254
- }
255
-
256
- // Empirical check implementations
257
- const EMPIRICAL_CHECKS = {
258
- // NPM checks
259
- async pkg_version(ctx) {
260
- const pkg = ctx.packageJson;
261
- if (!pkg) return { pass: false, msg: 'No package.json found' };
262
-
263
- try {
264
- const res = await fetch(`https://registry.npmjs.org/${pkg.name}/latest`);
265
- if (res.status === 404) return { pass: true, msg: 'New package (not on npm yet)' };
266
- const data = await res.json();
267
- const npmVersion = data.version;
268
- const localVersion = pkg.version;
269
-
270
- if (localVersion === npmVersion) {
271
- return { pass: false, msg: `Version ${localVersion} already published. Bump required.` };
272
- }
273
-
274
- // Check if local is higher
275
- const [lMaj, lMin, lPatch] = localVersion.split('.').map(Number);
276
- const [nMaj, nMin, nPatch] = npmVersion.split('.').map(Number);
277
-
278
- if (lMaj > nMaj || (lMaj === nMaj && lMin > nMin) || (lMaj === nMaj && lMin === nMin && lPatch > nPatch)) {
279
- return { pass: true, msg: `${localVersion} > ${npmVersion} (npm)` };
280
- }
281
-
282
- return { pass: false, msg: `Local ${localVersion} <= npm ${npmVersion}` };
283
- } catch (e) {
284
- return { pass: false, msg: `npm check failed: ${e.message}` };
285
- }
286
- },
287
-
288
- async pkg_homepage(ctx) {
289
- const url = ctx.packageJson?.homepage;
290
- if (!url) return { pass: false, msg: 'No homepage in package.json' };
291
-
292
- try {
293
- const res = await fetch(url, { method: 'HEAD' });
294
- return res.ok
295
- ? { pass: true, msg: `${url} is live` }
296
- : { pass: false, msg: `${url} returned ${res.status}` };
297
- } catch (e) {
298
- return { pass: false, msg: `${url} unreachable: ${e.message}` };
299
- }
300
- },
301
-
302
- async pkg_repo(ctx) {
303
- const repo = ctx.packageJson?.repository?.url || ctx.packageJson?.repository;
304
- if (!repo) return { pass: null, msg: 'No repository URL (optional)' };
305
-
306
- // Convert git URL to HTTPS for checking
307
- let checkUrl = String(repo).replace(/^git\+/, '').replace(/\.git$/, '');
308
- if (checkUrl.startsWith('git://')) {
309
- checkUrl = checkUrl.replace('git://', 'https://');
310
- }
311
-
312
- try {
313
- const res = await fetch(checkUrl, { method: 'HEAD' });
314
- return res.ok
315
- ? { pass: true, msg: `${checkUrl} accessible` }
316
- : { pass: false, msg: `${checkUrl} returned ${res.status}` };
317
- } catch (e) {
318
- return { pass: false, msg: `${checkUrl} unreachable` };
319
- }
320
- },
321
-
322
- async syntax_valid(ctx) {
323
- const { execSync } = require('child_process');
324
- const jsFiles = ctx.files.filter(f => f.endsWith('.js') || f.endsWith('.mjs'));
325
- const errors = [];
326
-
327
- for (const file of jsFiles.slice(0, 20)) { // Limit to 20 files
328
- try {
329
- execSync(`node -c "${file}"`, { stdio: 'pipe', cwd: ctx.cwd });
330
- } catch (e) {
331
- errors.push(file);
332
- }
333
- }
334
-
335
- return errors.length === 0
336
- ? { pass: true, msg: `${jsFiles.length} JS files valid` }
337
- : { pass: false, msg: `Syntax errors in: ${errors.join(', ')}` };
338
- },
339
-
340
- async no_hardcoded_secrets(ctx) {
341
- const { execSync } = require('child_process');
342
- const patterns = [
343
- 'api[_-]?key\\s*[=:]\\s*["\'][a-zA-Z0-9]{20,}',
344
- 'password\\s*[=:]\\s*["\'][^"\']{8,}',
345
- 'secret\\s*[=:]\\s*["\'][a-zA-Z0-9]{20,}',
346
- 'token\\s*[=:]\\s*["\'][a-zA-Z0-9]{20,}',
347
- 'AWS[_-]?ACCESS[_-]?KEY',
348
- 'PRIVATE[_-]?KEY',
349
- ];
350
-
351
- const findings = [];
352
- for (const pattern of patterns) {
353
- try {
354
- const result = execSync(
355
- `findstr /R /I /S "${pattern}" *.js *.ts *.json 2>nul`,
356
- { cwd: ctx.cwd, encoding: 'utf8', stdio: ['pipe', 'pipe', 'pipe'] }
357
- );
358
- if (result.trim()) {
359
- findings.push(pattern.substring(0, 20) + '...');
360
- }
361
- } catch (e) {
362
- // No matches is good
363
- }
364
- }
365
-
366
- return findings.length === 0
367
- ? { pass: true, msg: 'No hardcoded secrets detected' }
368
- : { pass: false, msg: `Potential secrets: ${findings.join(', ')}` };
369
- },
370
-
371
- async readme_links_live(ctx) {
372
- const fs = require('fs');
373
- const readmePath = ctx.files.find(f => f.toLowerCase().endsWith('readme.md'));
374
- if (!readmePath) return { pass: false, msg: 'No README.md' };
375
-
376
- const content = fs.readFileSync(readmePath, 'utf8');
377
- const urlPattern = /https?:\/\/[^\s\)\"\']+/g;
378
- const urls = [...new Set(content.match(urlPattern) || [])];
379
-
380
- const deadLinks = [];
381
- for (const url of urls.slice(0, 10)) { // Check first 10
382
- try {
383
- const res = await fetch(url, { method: 'HEAD', timeout: 5000 });
384
- if (!res.ok) deadLinks.push(url);
385
- } catch (e) {
386
- deadLinks.push(url);
387
- }
388
- }
389
-
390
- return deadLinks.length === 0
391
- ? { pass: true, msg: `${urls.length} links verified` }
392
- : { pass: false, msg: `Dead links: ${deadLinks.join(', ')}` };
393
- },
394
-
395
- async no_localhost(ctx) {
396
- const fs = require('fs');
397
- const path = require('path');
398
- const findings = [];
399
-
400
- for (const file of ctx.files.filter(f => /\.(js|ts|json)$/.test(f)).slice(0, 30)) {
401
- // Skip this file itself (it contains localhost patterns for checking)
402
- if (path.basename(file) === 'pre-publish.js') continue;
403
-
404
- try {
405
- const content = fs.readFileSync(file, 'utf8');
406
- if (/localhost|127\.0\.0\.1|0\.0\.0\.0/.test(content)) {
407
- // Check it's not in a comment or config option
408
- const lines = content.split('\n');
409
- for (let i = 0; i < lines.length; i++) {
410
- const line = lines[i];
411
- if (/localhost|127\.0\.0\.1/.test(line) && !line.trim().startsWith('//') && !line.includes('||') && !line.includes('regex') && !line.includes('pattern')) {
412
- findings.push(`${path.basename(file)}:${i+1}`);
413
- break;
414
- }
415
- }
416
- }
417
- } catch (e) {}
418
- }
419
-
420
- return findings.length === 0
421
- ? { pass: true, msg: 'No localhost references' }
422
- : { pass: false, msg: `localhost found: ${findings.slice(0,3).join(', ')}` };
423
- },
424
-
425
- async size_reasonable(ctx) {
426
- const { execSync } = require('child_process');
427
- try {
428
- const result = execSync('npm pack --dry-run 2>&1', { cwd: ctx.cwd, encoding: 'utf8' });
429
- const sizeMatch = result.match(/total files.*?(\d+(?:\.\d+)?)\s*(kB|MB|B)/i);
430
- if (sizeMatch) {
431
- let sizeKB = parseFloat(sizeMatch[1]);
432
- if (sizeMatch[2].toLowerCase() === 'mb') sizeKB *= 1024;
433
- if (sizeMatch[2].toLowerCase() === 'b') sizeKB /= 1024;
434
-
435
- return sizeKB < 10240 // 10MB limit
436
- ? { pass: true, msg: `Package size: ${sizeKB.toFixed(1)} KB` }
437
- : { pass: false, msg: `Package too large: ${sizeKB.toFixed(1)} KB (limit 10MB)` };
438
- }
439
- } catch (e) {}
440
- return { pass: null, msg: 'Could not determine package size' };
441
- },
442
-
443
- // arXiv checks
444
- async latex_compiles(ctx) {
445
- const { execSync } = require('child_process');
446
- const texFile = ctx.files.find(f => f.endsWith('.tex') && !f.includes('preamble'));
447
- if (!texFile) return { pass: false, msg: 'No .tex file found' };
448
-
449
- try {
450
- execSync(`pdflatex -interaction=nonstopmode "${texFile}"`, { cwd: ctx.cwd, stdio: 'pipe' });
451
- return { pass: true, msg: 'LaTeX compiles' };
452
- } catch (e) {
453
- return { pass: false, msg: 'LaTeX compilation failed' };
454
- }
455
- },
456
-
457
- async no_placeholder(ctx) {
458
- const fs = require('fs');
459
- const findings = [];
460
- const patterns = /\b(TODO|FIXME|XXX|TBD|PLACEHOLDER)\b/gi;
461
-
462
- for (const file of ctx.files.slice(0, 50)) {
463
- try {
464
- const content = fs.readFileSync(file, 'utf8');
465
- const matches = content.match(patterns);
466
- if (matches) {
467
- findings.push(`${file}: ${matches.length} placeholders`);
468
- }
469
- } catch (e) {}
470
- }
471
-
472
- return findings.length === 0
473
- ? { pass: true, msg: 'No placeholders found' }
474
- : { pass: false, msg: findings.slice(0, 3).join('; ') };
475
- },
476
-
477
- // AI-powered math verification using bCalc
478
- async math_verified_bcalc(ctx) {
479
- const fs = require('fs');
480
- const texFiles = ctx.files.filter(f => f.endsWith('.tex'));
481
- if (texFiles.length === 0) return { pass: null, msg: 'No .tex files' };
482
-
483
- // Extract key mathematical claims/equations
484
- const content = fs.readFileSync(texFiles[0], 'utf8');
485
- const equations = content.match(/\$\$[^$]+\$\$/g) || [];
486
- const claims = content.match(/\\begin\{theorem\}[\s\S]*?\\end\{theorem\}/g) || [];
487
-
488
- if (equations.length === 0 && claims.length === 0) {
489
- return { pass: null, msg: 'No equations/theorems found to verify' };
490
- }
491
-
492
- // Sample first theorem or key equation for bCalc verification
493
- const sample = claims[0] || equations[0];
494
- const cleanSample = sample.replace(/\\[a-z]+\{[^}]*\}/g, '').substring(0, 500);
495
-
496
- try {
497
- const result = await call50cTool('bcalc', {
498
- expression: cleanSample,
499
- mode: 'verify'
500
- }, 30000);
501
-
502
- if (result.error) {
503
- return { pass: null, msg: `bCalc unavailable: ${result.error}` };
504
- }
505
-
506
- // Check if bCalc found issues
507
- const resultStr = typeof result === 'string' ? result : JSON.stringify(result);
508
- const hasIssues = /error|invalid|incorrect|false/i.test(resultStr);
509
-
510
- return hasIssues
511
- ? { pass: false, msg: `bCalc found issues: ${resultStr.substring(0, 100)}` }
512
- : { pass: true, msg: 'bCalc verified mathematical expressions' };
513
- } catch (e) {
514
- return { pass: null, msg: `bCalc check failed: ${e.message}` };
515
- }
516
- },
517
-
518
- // AI-powered proof verification using genius_plus
519
- async proofs_verified_genius(ctx) {
520
- const fs = require('fs');
521
- const texFiles = ctx.files.filter(f => f.endsWith('.tex'));
522
- if (texFiles.length === 0) return { pass: null, msg: 'No .tex files' };
523
-
524
- const content = fs.readFileSync(texFiles[0], 'utf8');
525
-
526
- // Extract proofs
527
- const proofs = content.match(/\\begin\{proof\}[\s\S]*?\\end\{proof\}/g) || [];
528
- if (proofs.length === 0) {
529
- return { pass: null, msg: 'No formal proofs found' };
530
- }
531
-
532
- // Take abstract + first proof for genius+ analysis
533
- const abstractMatch = content.match(/\\begin\{abstract\}([\s\S]*?)\\end\{abstract\}/);
534
- const abstract = abstractMatch ? abstractMatch[1] : '';
535
- const firstProof = proofs[0].substring(0, 1000);
536
-
537
- try {
538
- const result = await call50cTool('genius_plus', {
539
- problem: `Verify this mathematical proof for logical gaps, unstated assumptions, or errors:\n\nContext: ${abstract.substring(0, 300)}\n\nProof:\n${firstProof}`
540
- }, 90000);
541
-
542
- if (result.error) {
543
- return { pass: null, msg: `genius+ unavailable: ${result.error}` };
544
- }
545
-
546
- const resultStr = typeof result === 'string' ? result : JSON.stringify(result);
547
-
548
- // Check for red flags
549
- const redFlags = /gap|flaw|error|incorrect|missing|invalid|contradiction/i.test(resultStr);
550
-
551
- return redFlags
552
- ? { pass: false, msg: `genius+ found issues: ${resultStr.substring(0, 150)}...` }
553
- : { pass: true, msg: 'genius+ verified proof structure' };
554
- } catch (e) {
555
- return { pass: null, msg: `genius+ check failed: ${e.message}` };
556
- }
557
- },
558
-
559
- // AI-powered claim validation using web_search
560
- async claims_validated_search(ctx) {
561
- const fs = require('fs');
562
- const texFiles = ctx.files.filter(f => f.endsWith('.tex'));
563
- if (texFiles.length === 0) return { pass: null, msg: 'No .tex files' };
564
-
565
- const content = fs.readFileSync(texFiles[0], 'utf8');
566
-
567
- // Extract title and key claims
568
- const titleMatch = content.match(/\\title\{([^}]+)\}/);
569
- const title = titleMatch ? titleMatch[1] : '';
570
-
571
- // Find novelty claims
572
- const noveltyPatterns = /first|novel|new|unprecedented|breakthrough|improve|better than/gi;
573
- const hasNoveltyClaims = noveltyPatterns.test(content);
574
-
575
- if (!hasNoveltyClaims || !title) {
576
- return { pass: null, msg: 'No novelty claims to validate' };
577
- }
578
-
579
- try {
580
- const result = await call50cTool('web_search', {
581
- query: title.replace(/[\\{}]/g, ' ').trim(),
582
- max_results: 5
583
- }, 30000);
584
-
585
- if (result.error) {
586
- return { pass: null, msg: `web_search unavailable: ${result.error}` };
587
- }
588
-
589
- const resultStr = typeof result === 'string' ? result : JSON.stringify(result);
590
-
591
- // Check if similar work already exists
592
- const duplicateSignals = /already|published|existing|prior art|known result/i.test(resultStr);
593
-
594
- return duplicateSignals
595
- ? { pass: false, msg: `Potential prior work found - verify novelty: ${resultStr.substring(0, 100)}` }
596
- : { pass: true, msg: 'No obvious conflicting prior work found' };
597
- } catch (e) {
598
- return { pass: null, msg: `Search check failed: ${e.message}` };
599
- }
600
- },
601
-
602
- // Science-specific: validate methodology with hints+
603
- async methodology_hints(ctx) {
604
- const fs = require('fs');
605
- const files = ctx.files.filter(f => /\.(tex|md|txt)$/.test(f));
606
- if (files.length === 0) return { pass: null, msg: 'No text files' };
607
-
608
- // Read first file
609
- const content = fs.readFileSync(files[0], 'utf8').substring(0, 2000);
610
-
611
- // Look for methods section
612
- const methodsMatch = content.match(/method|experiment|procedure|approach/i);
613
- if (!methodsMatch) {
614
- return { pass: null, msg: 'No methods section detected' };
615
- }
616
-
617
- try {
618
- const result = await call50cTool('hints_plus', {
619
- query: `Evaluate scientific methodology for weaknesses: ${content.substring(0, 1000)}`
620
- }, 30000);
621
-
622
- if (result.error) {
623
- return { pass: null, msg: `hints+ unavailable: ${result.error}` };
624
- }
625
-
626
- // hints_plus returns brutal hints - just report them
627
- return {
628
- pass: null,
629
- msg: `hints+: ${typeof result === 'string' ? result.substring(0, 200) : JSON.stringify(result).substring(0, 200)}`
630
- };
631
- } catch (e) {
632
- return { pass: null, msg: `hints+ check failed: ${e.message}` };
633
- }
634
- },
635
- };
636
-
637
- /**
638
- * Main verification function
639
- */
640
- async function verify(profile, options = {}) {
641
- const config = PROFILES[profile];
642
- if (!config) {
643
- return { error: `Unknown profile: ${profile}. Available: ${Object.keys(PROFILES).join(', ')}` };
644
- }
645
-
646
- const fs = require('fs');
647
- const path = require('path');
648
- const cwd = options.cwd || process.cwd();
649
-
650
- // Build context
651
- const ctx = {
652
- cwd,
653
- profile,
654
- files: [],
655
- packageJson: null,
656
- };
657
-
658
- // Gather files
659
- try {
660
- const walk = (dir, depth = 0) => {
661
- if (depth > 3) return; // Max depth
662
- const items = fs.readdirSync(dir);
663
- for (const item of items) {
664
- if (item.startsWith('.') || item === 'node_modules') continue;
665
- const full = path.join(dir, item);
666
- const stat = fs.statSync(full);
667
- if (stat.isDirectory()) {
668
- walk(full, depth + 1);
669
- } else {
670
- ctx.files.push(full);
671
- }
672
- }
673
- };
674
- walk(cwd);
675
- } catch (e) {}
676
-
677
- // Load package.json if exists
678
- try {
679
- const pkgPath = path.join(cwd, 'package.json');
680
- if (fs.existsSync(pkgPath)) {
681
- ctx.packageJson = JSON.parse(fs.readFileSync(pkgPath, 'utf8'));
682
- }
683
- } catch (e) {}
684
-
685
- // Run checks
686
- const results = {
687
- profile: config.name,
688
- timestamp: new Date().toISOString(),
689
- summary: { pass: 0, fail: 0, skip: 0, manual: 0 },
690
- checks: [],
691
- ready: false,
692
- };
693
-
694
- for (const check of config.checks) {
695
- const result = { ...check, status: null, msg: '' };
696
-
697
- if (check.empirical && EMPIRICAL_CHECKS[check.id]) {
698
- try {
699
- const r = await EMPIRICAL_CHECKS[check.id](ctx);
700
- result.status = r.pass ? 'PASS' : (r.pass === false ? 'FAIL' : 'SKIP');
701
- result.msg = r.msg;
702
- } catch (e) {
703
- result.status = 'SKIP';
704
- result.msg = `Error: ${e.message}`;
705
- }
706
- } else if (check.empirical) {
707
- result.status = 'SKIP';
708
- result.msg = 'Check not implemented';
709
- } else {
710
- result.status = 'MANUAL';
711
- result.msg = 'Requires manual review';
712
- }
713
-
714
- results.checks.push(result);
715
-
716
- if (result.status === 'PASS') results.summary.pass++;
717
- else if (result.status === 'FAIL') results.summary.fail++;
718
- else if (result.status === 'SKIP') results.summary.skip++;
719
- else results.summary.manual++;
720
- }
721
-
722
- results.ready = results.summary.fail === 0;
723
- results.score = Math.round((results.summary.pass / (results.summary.pass + results.summary.fail + results.summary.manual)) * 100);
724
-
725
- return results;
726
- }
727
-
728
- /**
729
- * Generate verification receipt as Markdown
730
- */
731
- function generateReceipt(results, options = {}) {
732
- const lines = [
733
- '# 50c Pre-Publish Verification Receipt',
734
- '',
735
- `**Generated:** ${results.timestamp}`,
736
- `**Profile:** ${results.profile}`,
737
- `**Directory:** ${options.cwd || 'N/A'}`,
738
- `**Verdict:** ${results.ready ? '✅ READY TO PUBLISH' : '❌ NOT READY - FIX ISSUES'}`,
739
- '',
740
- '---',
741
- '',
742
- '## Summary',
743
- '',
744
- '| Metric | Count |',
745
- '|--------|-------|',
746
- `| Pass | ${results.summary.pass} |`,
747
- `| Fail | ${results.summary.fail} |`,
748
- `| Skip | ${results.summary.skip} |`,
749
- `| Manual | ${results.summary.manual} |`,
750
- `| **Score** | **${results.score}/100** |`,
751
- '',
752
- '---',
753
- '',
754
- '## Detailed Results',
755
- '',
756
- ];
757
-
758
- // Group by category
759
- const byCategory = {};
760
- for (const check of results.checks) {
761
- if (!byCategory[check.cat]) byCategory[check.cat] = [];
762
- byCategory[check.cat].push(check);
763
- }
764
-
765
- for (const [cat, checks] of Object.entries(byCategory)) {
766
- lines.push(`### ${cat.toUpperCase()}`);
767
- lines.push('');
768
- lines.push('| Status | Check | Message |');
769
- lines.push('|--------|-------|---------|');
770
- for (const c of checks) {
771
- const icon = c.status === 'PASS' ? '✅' : c.status === 'FAIL' ? '❌' : c.status === 'SKIP' ? '⏭️' : '👁️';
772
- lines.push(`| ${icon} ${c.status} | ${c.desc} | ${(c.msg || '').substring(0, 50)} |`);
773
- }
774
- lines.push('');
775
- }
776
-
777
- // Cost estimate
778
- const aiChecks = results.checks.filter(c => c.cat === 'ai-verify' && c.status !== 'SKIP');
779
- if (aiChecks.length > 0) {
780
- lines.push('---');
781
- lines.push('');
782
- lines.push('## AI Verification Cost');
783
- lines.push('');
784
- lines.push('| Tool | Est. Cost |');
785
- lines.push('|------|-----------|');
786
- for (const c of aiChecks) {
787
- const cost = c.id.includes('bcalc') ? '$0.15' :
788
- c.id.includes('genius') ? '$0.65' :
789
- c.id.includes('hints') ? '$0.10' : 'FREE';
790
- lines.push(`| ${c.id} | ${cost} |`);
791
- }
792
- lines.push('');
793
- }
794
-
795
- lines.push('---');
796
- lines.push('');
797
- lines.push('*Generated by 50c Pre-Publish Verification Tool*');
798
- lines.push('*AI verification does not replace peer review*');
799
-
800
- return lines.join('\n');
801
- }
802
-
803
- /**
804
- * Full verification with receipt generation
805
- */
806
- async function verifyWithReceipt(profile, options = {}) {
807
- const results = await verify(profile, options);
808
- const receipt = generateReceipt(results, options);
809
- return { ...results, receipt };
810
- }
811
-
812
- module.exports = { verify, verifyWithReceipt, generateReceipt, PROFILES, EMPIRICAL_CHECKS, call50cTool };
1
+ /**
2
+ * 50c Pre-Publish Verification Tool
3
+ *
4
+ * Thorough checklist before publishing to:
5
+ * - npm (packages) — with supply chain attack detection
6
+ * - GitHub (releases)
7
+ * - arXiv (papers)
8
+ * - Medical journals
9
+ * - Scientific publications
10
+ *
11
+ * "Ask once, verify everything"
12
+ * "Never let a Verdant happen again"
13
+ */
14
+
15
+ const { isPrivateIP, isValidIPv4, extractPublicIPs } = require('./ip-utils');
16
+
17
+ // Severity levels for supply chain checks
18
+ const CHECK_SEVERITY = {
19
+ no_hardcoded_ips: 'CRITICAL',
20
+ no_binary_files: 'CRITICAL',
21
+ no_env_leaks: 'CRITICAL',
22
+ no_suspicious_urls: 'HIGH',
23
+ no_obfuscated_code: 'HIGH',
24
+ no_dangerous_scripts: 'HIGH',
25
+ npm_diff_check: 'HIGH',
26
+ no_hardcoded_secrets: 'HIGH',
27
+ no_network_calls: 'MEDIUM',
28
+ no_minified_without_source: 'MEDIUM',
29
+ no_dynamic_requires: 'MEDIUM',
30
+ };
31
+
32
+ const SEVERITY_WEIGHTS = {
33
+ CRITICAL: 100,
34
+ HIGH: 50,
35
+ MEDIUM: 20,
36
+ };
37
+
38
+ // Verification profiles for different publish targets
39
+ const PROFILES = {
40
+ npm: {
41
+ name: 'NPM Package',
42
+ checks: [
43
+ // SUPPLY CHAIN SECURITY (checked FIRST — stop-ship on failure)
44
+ { id: 'no_hardcoded_ips', cat: 'supply-chain', desc: 'No hardcoded public IPs in source code', empirical: true },
45
+ { id: 'no_suspicious_urls', cat: 'supply-chain', desc: 'No hardcoded URLs to non-standard domains', empirical: true },
46
+ { id: 'no_obfuscated_code', cat: 'supply-chain', desc: 'No eval/Function/Buffer.from obfuscation patterns', empirical: true },
47
+ { id: 'no_network_calls', cat: 'supply-chain', desc: 'No raw network calls to non-registry endpoints', empirical: true },
48
+ { id: 'no_dangerous_scripts', cat: 'supply-chain', desc: 'No dangerous preinstall/postinstall scripts', empirical: true },
49
+ { id: 'no_binary_files', cat: 'supply-chain', desc: 'No executable binary files in package', empirical: true },
50
+ { id: 'no_minified_without_source', cat: 'supply-chain', desc: 'No minified JS without source maps', empirical: true },
51
+ { id: 'no_env_leaks', cat: 'supply-chain', desc: 'No .env/credentials/key files in package', empirical: true },
52
+ { id: 'npm_diff_check', cat: 'supply-chain', desc: 'Diff against last published version — no injected IPs/URLs/eval', empirical: true },
53
+ { id: 'no_dynamic_requires', cat: 'supply-chain', desc: 'No dynamic require()/import() with variables', empirical: true },
54
+
55
+ // Existing security
56
+ { id: 'no_hardcoded_secrets', cat: 'security', desc: 'No hardcoded API keys/passwords/tokens', empirical: true },
57
+ { id: 'no_localhost', cat: 'security', desc: 'No localhost/127.0.0.1 in production code', empirical: true },
58
+ { id: 'deps_secure', cat: 'security', desc: 'No known vulnerabilities in dependencies', empirical: true },
59
+
60
+ // Package basics
61
+ { id: 'pkg_name', cat: 'metadata', desc: 'Package name valid and available', empirical: true },
62
+ { id: 'pkg_version', cat: 'metadata', desc: 'Version bumped from npm registry', empirical: true },
63
+ { id: 'pkg_desc', cat: 'metadata', desc: 'Description clear, no marketing fluff', empirical: false },
64
+ { id: 'pkg_keywords', cat: 'metadata', desc: 'Keywords relevant and searchable', empirical: false },
65
+ { id: 'pkg_license', cat: 'metadata', desc: 'LICENSE file exists and valid', empirical: true },
66
+ { id: 'pkg_repo', cat: 'metadata', desc: 'Repository URL valid and accessible', empirical: true },
67
+ { id: 'pkg_homepage', cat: 'metadata', desc: 'Homepage URL live and correct', empirical: true },
68
+
69
+ // Code quality
70
+ { id: 'syntax_valid', cat: 'code', desc: 'All JS/TS files pass syntax check', empirical: true },
71
+ { id: 'no_console_log', cat: 'code', desc: 'No debug console.log statements', empirical: true },
72
+ { id: 'no_todo_fixme', cat: 'code', desc: 'No unresolved TODO/FIXME comments', empirical: true },
73
+
74
+ // Documentation
75
+ { id: 'readme_exists', cat: 'docs', desc: 'README.md exists', empirical: true },
76
+ { id: 'readme_install', cat: 'docs', desc: 'Installation instructions present', empirical: true },
77
+ { id: 'readme_usage', cat: 'docs', desc: 'Usage examples present', empirical: true },
78
+ { id: 'readme_links_live', cat: 'docs', desc: 'All README links are live', empirical: true },
79
+ { id: 'changelog_updated', cat: 'docs', desc: 'CHANGELOG updated for this version', empirical: true },
80
+
81
+ // Build/Test
82
+ { id: 'tests_pass', cat: 'test', desc: 'All tests pass', empirical: true },
83
+ { id: 'build_clean', cat: 'test', desc: 'Build completes without warnings', empirical: true },
84
+ { id: 'entry_point_valid', cat: 'test', desc: 'Main/bin entry points resolve', empirical: true },
85
+
86
+ // Files
87
+ { id: 'files_included', cat: 'files', desc: 'Only intended files in package', empirical: true },
88
+ { id: 'no_env_files', cat: 'files', desc: 'No .env files included', empirical: true },
89
+ { id: 'no_test_files', cat: 'files', desc: 'Test files excluded from package', empirical: true },
90
+ { id: 'size_reasonable', cat: 'files', desc: 'Package size under 10MB', empirical: true },
91
+ ]
92
+ },
93
+
94
+ github: {
95
+ name: 'GitHub Release',
96
+ checks: [
97
+ { id: 'tag_format', cat: 'release', desc: 'Tag follows semver (vX.Y.Z)', empirical: true },
98
+ { id: 'tag_unique', cat: 'release', desc: 'Tag does not exist yet', empirical: true },
99
+ { id: 'branch_clean', cat: 'release', desc: 'No uncommitted changes', empirical: true },
100
+ { id: 'branch_pushed', cat: 'release', desc: 'All commits pushed to remote', empirical: true },
101
+ { id: 'ci_passing', cat: 'release', desc: 'CI/CD pipeline green', empirical: true },
102
+ { id: 'release_notes', cat: 'docs', desc: 'Release notes written', empirical: false },
103
+ { id: 'breaking_changes', cat: 'docs', desc: 'Breaking changes documented', empirical: false },
104
+ { id: 'migration_guide', cat: 'docs', desc: 'Migration guide if needed', empirical: false },
105
+ { id: 'no_secrets_in_history', cat: 'security', desc: 'No secrets in git history', empirical: true },
106
+ { id: 'license_file', cat: 'legal', desc: 'LICENSE file present', empirical: true },
107
+ ]
108
+ },
109
+
110
+ arxiv: {
111
+ name: 'arXiv Paper',
112
+ checks: [
113
+ { id: 'latex_compiles', cat: 'build', desc: 'LaTeX compiles without errors', empirical: true },
114
+ { id: 'no_overfull_hbox', cat: 'build', desc: 'No overfull hbox warnings', empirical: true },
115
+ { id: 'figures_render', cat: 'build', desc: 'All figures render correctly', empirical: true },
116
+ { id: 'refs_resolve', cat: 'build', desc: 'All references resolve', empirical: true },
117
+ { id: 'citations_complete', cat: 'build', desc: 'No [?] citation markers', empirical: true },
118
+ { id: 'abstract_standalone', cat: 'content', desc: 'Abstract is self-contained', empirical: false },
119
+ { id: 'claims_supported', cat: 'content', desc: 'All claims supported by evidence', empirical: false },
120
+ { id: 'math_verified_bcalc', cat: 'ai-verify', desc: 'Mathematical expressions verified (bCalc)', empirical: true },
121
+ { id: 'proofs_verified_genius', cat: 'ai-verify', desc: 'Proofs verified for gaps (genius+)', empirical: true },
122
+ { id: 'claims_validated_search', cat: 'ai-verify', desc: 'Novelty claims validated (web search)', empirical: true },
123
+ { id: 'no_placeholder', cat: 'content', desc: 'No TODO/FIXME placeholders', empirical: true },
124
+ { id: 'notation_consistent', cat: 'content', desc: 'Notation consistent throughout', empirical: false },
125
+ { id: 'code_available', cat: 'reproducibility', desc: 'Code/data availability stated', empirical: false },
126
+ { id: 'hyperparams_listed', cat: 'reproducibility', desc: 'Hyperparameters documented', empirical: false },
127
+ { id: 'compute_resources', cat: 'reproducibility', desc: 'Compute resources stated', empirical: false },
128
+ { id: 'ethics_statement', cat: 'ethics', desc: 'Ethics statement included', empirical: false },
129
+ { id: 'limitations', cat: 'ethics', desc: 'Limitations discussed', empirical: false },
130
+ { id: 'societal_impact', cat: 'ethics', desc: 'Societal impact considered', empirical: false },
131
+ { id: 'arxiv_format', cat: 'format', desc: 'Follows arXiv guidelines', empirical: false },
132
+ { id: 'page_limits', cat: 'format', desc: 'Within page limits', empirical: false },
133
+ ]
134
+ },
135
+
136
+ medical: {
137
+ name: 'Medical Journal',
138
+ checks: [
139
+ { id: 'irb_approval', cat: 'regulatory', desc: 'IRB/ethics approval documented', empirical: false },
140
+ { id: 'trial_registration', cat: 'regulatory', desc: 'Clinical trial registered', empirical: false },
141
+ { id: 'consort_strobe', cat: 'regulatory', desc: 'CONSORT/STROBE checklist followed', empirical: false },
142
+ { id: 'patient_deidentified', cat: 'regulatory', desc: 'Patient data de-identified', empirical: false },
143
+ { id: 'sample_size', cat: 'statistics', desc: 'Sample size justified', empirical: false },
144
+ { id: 'appropriate_methods', cat: 'statistics', desc: 'Statistical methods appropriate', empirical: false },
145
+ { id: 'confidence_intervals', cat: 'statistics', desc: 'Confidence intervals reported', empirical: false },
146
+ { id: 'effect_sizes', cat: 'statistics', desc: 'Effect sizes reported', empirical: false },
147
+ { id: 'multiple_comparisons', cat: 'statistics', desc: 'Multiple comparisons corrected', empirical: false },
148
+ { id: 'endpoints_defined', cat: 'clinical', desc: 'Primary/secondary endpoints defined', empirical: false },
149
+ { id: 'adverse_events', cat: 'clinical', desc: 'Adverse events reported', empirical: false },
150
+ { id: 'clinical_limitations', cat: 'clinical', desc: 'Limitations discussed', empirical: false },
151
+ { id: 'generalizability', cat: 'clinical', desc: 'Generalizability discussed', empirical: false },
152
+ { id: 'coi_disclosure', cat: 'conflicts', desc: 'Conflicts of interest disclosed', empirical: false },
153
+ { id: 'funding_source', cat: 'conflicts', desc: 'Funding source declared', empirical: false },
154
+ { id: 'author_contributions', cat: 'conflicts', desc: 'Author contributions listed', empirical: false },
155
+ ]
156
+ },
157
+
158
+ science: {
159
+ name: 'Scientific Publication',
160
+ checks: [
161
+ { id: 'reproducibility', cat: 'methodology', desc: 'Methods reproducible', empirical: false },
162
+ { id: 'controls', cat: 'methodology', desc: 'Appropriate controls', empirical: false },
163
+ { id: 'sample_size_sci', cat: 'methodology', desc: 'Sample size adequate', empirical: false },
164
+ { id: 'blinding', cat: 'methodology', desc: 'Blinding where appropriate', empirical: false },
165
+ { id: 'methodology_hints', cat: 'ai-verify', desc: 'Methodology review (hints+)', empirical: true },
166
+ { id: 'claims_validated_search', cat: 'ai-verify', desc: 'Novelty validated (web search)', empirical: true },
167
+ { id: 'raw_data', cat: 'data', desc: 'Raw data available', empirical: false },
168
+ { id: 'processing_documented', cat: 'data', desc: 'Data processing documented', empirical: false },
169
+ { id: 'outlier_handling', cat: 'data', desc: 'Outlier handling described', empirical: false },
170
+ { id: 'error_bars', cat: 'data', desc: 'Error bars included', empirical: false },
171
+ { id: 'claims_match', cat: 'claims', desc: 'Claims match evidence', empirical: false },
172
+ { id: 'alternative_explanations', cat: 'claims', desc: 'Alternative explanations considered', empirical: false },
173
+ { id: 'no_overclaiming', cat: 'claims', desc: 'No overclaiming', empirical: false },
174
+ { id: 'novelty_justified', cat: 'claims', desc: 'Novelty justified', empirical: false },
175
+ { id: 'figure_clarity', cat: 'figures', desc: 'Figures clear', empirical: false },
176
+ { id: 'axis_labels', cat: 'figures', desc: 'Axis labels present', empirical: false },
177
+ { id: 'legends', cat: 'figures', desc: 'Legends included', empirical: false },
178
+ { id: 'color_blind', cat: 'figures', desc: 'Color-blind accessible', empirical: false },
179
+ { id: 'no_placeholder', cat: 'content', desc: 'No placeholders', empirical: true },
180
+ { id: 'prior_work', cat: 'literature', desc: 'Prior work cited', empirical: false },
181
+ { id: 'novelty_claims', cat: 'literature', desc: 'Novelty claims accurate', empirical: false },
182
+ ]
183
+ },
184
+
185
+ math: {
186
+ name: 'Mathematics Paper',
187
+ checks: [
188
+ { id: 'proofs_complete', cat: 'proofs', desc: 'All proofs complete', empirical: false },
189
+ { id: 'math_verified_bcalc', cat: 'ai-verify', desc: 'Math verified (bCalc)', empirical: true },
190
+ { id: 'proofs_verified_genius', cat: 'ai-verify', desc: 'Proofs verified (genius+)', empirical: true },
191
+ { id: 'claims_validated_search', cat: 'ai-verify', desc: 'Novelty validated (web search)', empirical: true },
192
+ { id: 'edge_cases', cat: 'proofs', desc: 'Edge cases handled', empirical: false },
193
+ { id: 'assumptions_explicit', cat: 'proofs', desc: 'Assumptions explicit', empirical: false },
194
+ { id: 'terms_defined', cat: 'definitions', desc: 'All terms defined', empirical: false },
195
+ { id: 'notation_consistent', cat: 'definitions', desc: 'Notation consistent', empirical: false },
196
+ { id: 'numbering_correct', cat: 'definitions', desc: 'Theorem numbering correct', empirical: false },
197
+ { id: 'examples_verify', cat: 'examples', desc: 'Examples verify theorems', empirical: false },
198
+ { id: 'counterexamples', cat: 'examples', desc: 'Counterexamples considered', empirical: false },
199
+ { id: 'computational_verify', cat: 'examples', desc: 'Computational verification', empirical: false },
200
+ { id: 'prior_work_cited', cat: 'literature', desc: 'Prior work cited', empirical: false },
201
+ { id: 'novelty_claims_math', cat: 'literature', desc: 'Novelty claims accurate', empirical: false },
202
+ { id: 'bounds_compared', cat: 'literature', desc: 'Bounds compared to existing', empirical: false },
203
+ { id: 'no_placeholder', cat: 'content', desc: 'No placeholders', empirical: true },
204
+ ]
205
+ },
206
+ };
207
+
208
+ /**
209
+ * Load .50c-security.json allowlist from project root
210
+ */
211
+ function loadAllowlist(cwd) {
212
+ const fs = require('fs');
213
+ const path = require('path');
214
+ const defaultAllowlist = {
215
+ allowedIPs: [],
216
+ allowedDomains: ['registry.npmjs.org', 'npmjs.com', 'npmjs.org', 'github.com', 'api.github.com', 'raw.githubusercontent.com', 'googleapis.com', 'api.50c.ai', 'beacon.50c.ai', '50c.ai', 'w3.org', 'www.w3.org', 'nodejs.org', 'localhost', 'ip-api.com'],
217
+ allowedScripts: [],
218
+ ignorePaths: ['node_modules/', '.git/'],
219
+ };
220
+
221
+ try {
222
+ const configPath = path.join(cwd, '.50c-security.json');
223
+ if (fs.existsSync(configPath)) {
224
+ const userConfig = JSON.parse(fs.readFileSync(configPath, 'utf8'));
225
+ return {
226
+ allowedIPs: [...defaultAllowlist.allowedIPs, ...(userConfig.allowedIPs || [])],
227
+ allowedDomains: [...defaultAllowlist.allowedDomains, ...(userConfig.allowedDomains || [])],
228
+ allowedScripts: [...defaultAllowlist.allowedScripts, ...(userConfig.allowedScripts || [])],
229
+ ignorePaths: [...defaultAllowlist.ignorePaths, ...(userConfig.ignorePaths || [])],
230
+ };
231
+ }
232
+ } catch (e) {}
233
+ return defaultAllowlist;
234
+ }
235
+
236
+ /**
237
+ * Call a remote 50c tool via API
238
+ */
239
+ async function call50cTool(tool, params, timeout = 60000) {
240
+ const fs = require('fs');
241
+ const path = require('path');
242
+
243
+ // Find API key
244
+ let apiKey = process.env.FIFTYC_API_KEY;
245
+ if (!apiKey) {
246
+ try {
247
+ const configPaths = [
248
+ path.join(process.env.HOME || process.env.USERPROFILE || '', '.verdent', 'mcp.json'),
249
+ path.join(process.env.HOME || process.env.USERPROFILE || '', '.50c', 'config.json'),
250
+ ];
251
+ for (const p of configPaths) {
252
+ if (fs.existsSync(p)) {
253
+ const cfg = JSON.parse(fs.readFileSync(p, 'utf8'));
254
+ apiKey = cfg.mcpServers?.['50c']?.env?.FIFTYC_API_KEY || cfg.apiKey;
255
+ if (apiKey) break;
256
+ }
257
+ }
258
+ } catch (e) {}
259
+ }
260
+
261
+ if (!apiKey) {
262
+ return { error: 'No 50c API key configured' };
263
+ }
264
+
265
+ try {
266
+ const res = await fetch('https://api.50c.ai/mcp', {
267
+ method: 'POST',
268
+ headers: {
269
+ 'Content-Type': 'application/json',
270
+ 'Authorization': `Bearer ${apiKey}`,
271
+ },
272
+ body: JSON.stringify({
273
+ jsonrpc: '2.0',
274
+ id: Date.now(),
275
+ method: 'tools/call',
276
+ params: { name: tool, arguments: params },
277
+ }),
278
+ signal: AbortSignal.timeout(timeout),
279
+ });
280
+
281
+ const data = await res.json();
282
+ return data.result?.content?.[0]?.text || data.result || data;
283
+ } catch (e) {
284
+ return { error: e.message };
285
+ }
286
+ }
287
+
288
+ // ==========================================
289
+ // EMPIRICAL CHECK IMPLEMENTATIONS
290
+ // ==========================================
291
+ const EMPIRICAL_CHECKS = {
292
+
293
+ // ====== SUPPLY CHAIN CHECKS (NEW) ======
294
+
295
+ /**
296
+ * #1 CRITICAL: Detect hardcoded public IPs the exact Verdant attack vector
297
+ * Scans all .js/.ts/.json files for routable public IP addresses
298
+ */
299
+ async no_hardcoded_ips(ctx) {
300
+ const fs = require('fs');
301
+ const path = require('path');
302
+ const allowlist = ctx.allowlist || loadAllowlist(ctx.cwd);
303
+ const findings = [];
304
+
305
+ const sourceFiles = ctx.files.filter(f => /\.(js|ts|mjs|cjs|json)$/.test(f));
306
+
307
+ for (const file of sourceFiles.slice(0, 50)) {
308
+ const basename = path.basename(file);
309
+ // Skip self, lock files, and test fixtures
310
+ if (basename === 'pre-publish.js' || basename === 'ip-utils.js' ||
311
+ basename === 'backdoor-checker.js' || basename === 'package-lock.json' ||
312
+ file.includes('__test') || file.includes('.test.') || file.includes('.spec.')) continue;
313
+
314
+ try {
315
+ const content = fs.readFileSync(file, 'utf8');
316
+ const lines = content.split('\n');
317
+
318
+ for (let i = 0; i < lines.length; i++) {
319
+ const line = lines[i];
320
+ // Skip comments
321
+ if (line.trim().startsWith('//') || line.trim().startsWith('*') || line.trim().startsWith('/*')) continue;
322
+
323
+ const publicIPs = extractPublicIPs(line);
324
+ for (const ip of publicIPs) {
325
+ if (!allowlist.allowedIPs.includes(ip)) {
326
+ findings.push(`${path.basename(file)}:${i + 1} → ${ip}`);
327
+ }
328
+ }
329
+ }
330
+ } catch (e) {}
331
+ }
332
+
333
+ return findings.length === 0
334
+ ? { pass: true, msg: 'No hardcoded public IPs found' }
335
+ : { pass: false, msg: `[CRITICAL] Public IPs found: ${findings.slice(0, 5).join('; ')}` };
336
+ },
337
+
338
+ /**
339
+ * #2 HIGH: Detect hardcoded URLs to non-standard domains
340
+ */
341
+ async no_suspicious_urls(ctx) {
342
+ const fs = require('fs');
343
+ const path = require('path');
344
+ const allowlist = ctx.allowlist || loadAllowlist(ctx.cwd);
345
+ const findings = [];
346
+
347
+ const urlPattern = /https?:\/\/([a-zA-Z0-9.-]+)/g;
348
+ const sourceFiles = ctx.files.filter(f => /\.(js|ts|mjs|cjs)$/.test(f));
349
+
350
+ for (const file of sourceFiles.slice(0, 50)) {
351
+ const basename = path.basename(file);
352
+ if (basename === 'pre-publish.js' || basename === 'backdoor-checker.js' ||
353
+ file.includes('__test') || file.includes('.test.') || file.includes('.spec.') ||
354
+ file.includes('README')) continue;
355
+
356
+ try {
357
+ const content = fs.readFileSync(file, 'utf8');
358
+ const lines = content.split('\n');
359
+
360
+ for (let i = 0; i < lines.length; i++) {
361
+ const line = lines[i];
362
+ if (line.trim().startsWith('//') || line.trim().startsWith('*')) continue;
363
+
364
+ let match;
365
+ urlPattern.lastIndex = 0;
366
+ while ((match = urlPattern.exec(line)) !== null) {
367
+ const domain = match[1].toLowerCase();
368
+ const isDomainAllowed = allowlist.allowedDomains.some(d =>
369
+ domain === d || domain.endsWith('.' + d)
370
+ );
371
+ if (!isDomainAllowed) {
372
+ // Check if it's an IP-based URL (already caught by no_hardcoded_ips)
373
+ if (!isValidIPv4(domain)) {
374
+ findings.push(`${path.basename(file)}:${i + 1} ${domain}`);
375
+ }
376
+ }
377
+ }
378
+ }
379
+ } catch (e) {}
380
+ }
381
+
382
+ // Deduplicate by domain
383
+ const uniqueDomains = [...new Set(findings.map(f => f.split(' ')[1]))];
384
+
385
+ return uniqueDomains.length === 0
386
+ ? { pass: true, msg: 'No suspicious URLs found' }
387
+ : { pass: false, msg: `Non-standard domains: ${uniqueDomains.slice(0, 5).join(', ')}` };
388
+ },
389
+
390
+ /**
391
+ * #3 HIGH: Detect obfuscated code patterns
392
+ */
393
+ async no_obfuscated_code(ctx) {
394
+ const fs = require('fs');
395
+ const path = require('path');
396
+ const findings = [];
397
+
398
+ const patterns = [
399
+ { re: /\beval\s*\(/, name: 'eval()' },
400
+ { re: /\bnew\s+Function\s*\(/, name: 'new Function()' },
401
+ { re: /Buffer\.from\s*\([^)]*,\s*['"]base64['"]/, name: 'Buffer.from(base64)' },
402
+ { re: /\batob\s*\(/, name: 'atob()' },
403
+ { re: /\\x[0-9a-f]{2}\\x[0-9a-f]{2}\\x[0-9a-f]{2}\\x[0-9a-f]{2}/i, name: 'hex escape sequences' },
404
+ { re: /String\.fromCharCode\s*\(\s*\d+\s*(,\s*\d+\s*){5,}/, name: 'String.fromCharCode (many chars)' },
405
+ { re: /\['\\x[0-9a-f]+'\]/, name: 'hex property access' },
406
+ ];
407
+
408
+ const sourceFiles = ctx.files.filter(f => /\.(js|ts|mjs|cjs)$/.test(f));
409
+
410
+ for (const file of sourceFiles.slice(0, 50)) {
411
+ const basename = path.basename(file);
412
+ if (file.includes('__test') || file.includes('.test.') || file.includes('.spec.') || basename === 'pre-publish.js') continue;
413
+
414
+ // Higher threshold for minified files
415
+ const isMinified = basename.includes('.min.');
416
+ const hasSourceMap = ctx.files.some(f => f === file + '.map' || f === file.replace(/\.js$/, '.js.map'));
417
+ if (isMinified && hasSourceMap) continue; // minified with source map = OK
418
+
419
+ try {
420
+ const content = fs.readFileSync(file, 'utf8');
421
+ let score = 0;
422
+ const fileFindings = [];
423
+
424
+ for (const { re, name } of patterns) {
425
+ if (re.test(content)) {
426
+ score += 20;
427
+ fileFindings.push(name);
428
+ }
429
+ }
430
+
431
+ const threshold = isMinified ? 60 : 40;
432
+ if (score >= threshold) {
433
+ findings.push(`${basename}: ${fileFindings.join(', ')}`);
434
+ }
435
+ } catch (e) {}
436
+ }
437
+
438
+ return findings.length === 0
439
+ ? { pass: true, msg: 'No obfuscated code patterns detected' }
440
+ : { pass: false, msg: `Obfuscation detected: ${findings.slice(0, 3).join('; ')}` };
441
+ },
442
+
443
+ /**
444
+ * #4 MEDIUM: Detect raw network calls to non-registry endpoints
445
+ */
446
+ async no_network_calls(ctx) {
447
+ const fs = require('fs');
448
+ const path = require('path');
449
+ const allowlist = ctx.allowlist || loadAllowlist(ctx.cwd);
450
+ const findings = [];
451
+
452
+ // Patterns for direct network access
453
+ const netPatterns = [
454
+ /require\s*\(\s*['"]https?['"]\s*\)/,
455
+ /require\s*\(\s*['"]net['"]\s*\)/,
456
+ /require\s*\(\s*['"]dgram['"]\s*\)/,
457
+ /\.request\s*\(\s*['"]https?:\/\//,
458
+ /\.get\s*\(\s*['"]https?:\/\//,
459
+ /fetch\s*\(\s*['"]https?:\/\//,
460
+ /axios\s*[.(]/,
461
+ /\.createConnection\s*\(/,
462
+ /\.createServer\s*\(/,
463
+ /new\s+WebSocket\s*\(/,
464
+ ];
465
+
466
+ const sourceFiles = ctx.files.filter(f => /\.(js|ts|mjs|cjs)$/.test(f));
467
+
468
+ for (const file of sourceFiles.slice(0, 50)) {
469
+ const basename = path.basename(file);
470
+ if (basename === 'pre-publish.js' || basename === 'backdoor-checker.js' ||
471
+ basename === 'subagent.js' || basename === '50c.js' ||
472
+ basename === 'invent-ui.js' || basename === 'mcp-tv.js' ||
473
+ basename === 'team.js' || basename === 'tools-registry.js' ||
474
+ file.includes('__test') || file.includes('.test.')) continue;
475
+
476
+ try {
477
+ const content = fs.readFileSync(file, 'utf8');
478
+
479
+ for (const pattern of netPatterns) {
480
+ if (pattern.test(content)) {
481
+ // Check if the URL targets an allowed domain
482
+ const urlMatch = content.match(/https?:\/\/([a-zA-Z0-9.-]+)/);
483
+ if (urlMatch) {
484
+ const domain = urlMatch[1].toLowerCase();
485
+ const isDomainAllowed = allowlist.allowedDomains.some(d =>
486
+ domain === d || domain.endsWith('.' + d)
487
+ );
488
+ if (isDomainAllowed) continue;
489
+ }
490
+ findings.push(`${basename}: network access pattern`);
491
+ break;
492
+ }
493
+ }
494
+ } catch (e) {}
495
+ }
496
+
497
+ return findings.length === 0
498
+ ? { pass: true, msg: 'No suspicious network calls' }
499
+ : { pass: false, msg: `Network calls found: ${findings.slice(0, 5).join('; ')}` };
500
+ },
501
+
502
+ /**
503
+ * #5 HIGH: Detect dangerous install scripts in package.json
504
+ */
505
+ async no_dangerous_scripts(ctx) {
506
+ const pkg = ctx.packageJson;
507
+ if (!pkg) return { pass: null, msg: 'No package.json' };
508
+
509
+ const allowlist = ctx.allowlist || loadAllowlist(ctx.cwd);
510
+ const dangerousHooks = ['preinstall', 'postinstall', 'preuninstall', 'postuninstall'];
511
+ const dangerousPatterns = [
512
+ /curl\s/i, /wget\s/i, /powershell/i, /cmd\s*\/c/i,
513
+ /\|\s*sh\b/i, /\|\s*bash\b/i, /exec\s*\(/i,
514
+ /rm\s+-rf/i, /del\s+\/f/i, /base64/i,
515
+ /eval/i, />\s*\/dev\/null/i,
516
+ ];
517
+
518
+ const findings = [];
519
+ const scripts = pkg.scripts || {};
520
+
521
+ for (const hook of dangerousHooks) {
522
+ const cmd = scripts[hook];
523
+ if (!cmd) continue;
524
+
525
+ // Check against allowlist
526
+ if (allowlist.allowedScripts.includes(`${hook}: ${cmd}`)) continue;
527
+
528
+ // Check for dangerous patterns
529
+ const isDangerous = dangerousPatterns.some(p => p.test(cmd));
530
+ if (isDangerous) {
531
+ findings.push(`${hook}: "${cmd.substring(0, 60)}"`);
532
+ } else if (cmd.includes('node ') || cmd.includes('npx ')) {
533
+ // node/npx scripts are usually OK but flag if they download
534
+ if (/http|fetch|request|download/i.test(cmd)) {
535
+ findings.push(`${hook}: "${cmd.substring(0, 60)}" (downloads)`)
536
+ }
537
+ }
538
+ }
539
+
540
+ return findings.length === 0
541
+ ? { pass: true, msg: 'No dangerous install scripts' }
542
+ : { pass: false, msg: `Dangerous scripts: ${findings.join('; ')}` };
543
+ },
544
+
545
+ /**
546
+ * #6 CRITICAL: Detect binary/executable files in package
547
+ */
548
+ async no_binary_files(ctx) {
549
+ const path = require('path');
550
+ const binaryExts = ['.exe', '.dll', '.so', '.dylib', '.bin', '.com', '.bat', '.cmd', '.ps1', '.vbs', '.scr', '.msi', '.dmg', '.app'];
551
+ const findings = [];
552
+
553
+ for (const file of ctx.files) {
554
+ const ext = path.extname(file).toLowerCase();
555
+ if (binaryExts.includes(ext)) {
556
+ findings.push(path.basename(file));
557
+ }
558
+ }
559
+
560
+ return findings.length === 0
561
+ ? { pass: true, msg: 'No binary/executable files' }
562
+ : { pass: false, msg: `[CRITICAL] Binaries found: ${findings.join(', ')}` };
563
+ },
564
+
565
+ /**
566
+ * #7 MEDIUM: Detect minified JS without source maps
567
+ */
568
+ async no_minified_without_source(ctx) {
569
+ const path = require('path');
570
+ const findings = [];
571
+
572
+ const minFiles = ctx.files.filter(f => f.includes('.min.js') || f.includes('.min.mjs'));
573
+
574
+ for (const file of minFiles) {
575
+ const mapFile = file + '.map';
576
+ const altMapFile = file.replace(/\.min\.(m?js)$/, '.$1.map');
577
+ const hasMap = ctx.files.includes(mapFile) || ctx.files.includes(altMapFile);
578
+ const hasSource = ctx.files.includes(file.replace('.min.', '.'));
579
+
580
+ if (!hasMap && !hasSource) {
581
+ findings.push(path.basename(file));
582
+ }
583
+ }
584
+
585
+ return findings.length === 0
586
+ ? { pass: true, msg: minFiles.length > 0 ? `${minFiles.length} minified files have sources` : 'No minified files' }
587
+ : { pass: false, msg: `Minified without source: ${findings.join(', ')}` };
588
+ },
589
+
590
+ /**
591
+ * #8 CRITICAL: Detect leaked credential/env files
592
+ */
593
+ async no_env_leaks(ctx) {
594
+ const path = require('path');
595
+ const dangerousFiles = [
596
+ '.env', '.env.local', '.env.production', '.env.staging', '.env.development',
597
+ 'credentials.json', 'service-account.json', 'gcloud-key.json',
598
+ 'id_rsa', 'id_ed25519', 'id_dsa', '.pem', '.key', '.pfx', '.p12',
599
+ '.htpasswd', '.netrc', '.npmrc', '.pypirc',
600
+ ];
601
+ const findings = [];
602
+
603
+ for (const file of ctx.files) {
604
+ const basename = path.basename(file).toLowerCase();
605
+ const ext = path.extname(file).toLowerCase();
606
+
607
+ if (dangerousFiles.includes(basename) || dangerousFiles.includes(ext)) {
608
+ findings.push(path.basename(file));
609
+ }
610
+ }
611
+
612
+ return findings.length === 0
613
+ ? { pass: true, msg: 'No credential/env file leaks' }
614
+ : { pass: false, msg: `[CRITICAL] Sensitive files: ${findings.join(', ')}` };
615
+ },
616
+
617
+ /**
618
+ * #9 HIGH: Diff against last published npm version — detect injected code
619
+ */
620
+ async npm_diff_check(ctx) {
621
+ const { execSync } = require('child_process');
622
+ const fs = require('fs');
623
+ const path = require('path');
624
+ const os = require('os');
625
+
626
+ const pkg = ctx.packageJson;
627
+ if (!pkg || !pkg.name) return { pass: null, msg: 'No package.json or name' };
628
+
629
+ try {
630
+ // Check if package exists on npm
631
+ const res = await fetch(`https://registry.npmjs.org/${pkg.name}/latest`);
632
+ if (res.status === 404) return { pass: true, msg: 'New package no previous version to diff' };
633
+
634
+ const npmData = await res.json();
635
+ const tarballUrl = npmData.dist?.tarball;
636
+ if (!tarballUrl) return { pass: null, msg: 'Could not find tarball URL' };
637
+
638
+ // Download and extract to temp dir
639
+ const tmpDir = path.join(os.tmpdir(), `50c-diff-${Date.now()}`);
640
+ fs.mkdirSync(tmpDir, { recursive: true });
641
+
642
+ try {
643
+ // Download tarball
644
+ const tarball = path.join(tmpDir, 'pkg.tgz');
645
+ execSync(`node -e "const https=require('https');const fs=require('fs');const f=fs.createWriteStream('${tarball.replace(/\\/g, '\\\\')}');https.get('${tarballUrl}',(r)=>{r.pipe(f);f.on('finish',()=>{f.close();process.exit(0)})})"`,
646
+ { cwd: tmpDir, timeout: 30000, stdio: 'pipe' });
647
+
648
+ // Extract
649
+ execSync(`tar -xzf pkg.tgz 2>/dev/null || node -e "require('child_process').execSync('tar -xzf pkg.tgz',{cwd:'${tmpDir.replace(/\\/g, '\\\\')}'})"`,
650
+ { cwd: tmpDir, timeout: 15000, stdio: 'pipe' });
651
+
652
+ // Compare JS files — look for newly added IPs, URLs, eval, obfuscation
653
+ const pubDir = path.join(tmpDir, 'package');
654
+ if (!fs.existsSync(pubDir)) return { pass: null, msg: 'Could not extract published package' };
655
+
656
+ const suspiciousAdditions = [];
657
+ const localFiles = ctx.files.filter(f => /\.(js|ts|mjs|cjs)$/.test(f));
658
+
659
+ for (const localFile of localFiles.slice(0, 30)) {
660
+ const relativePath = path.relative(ctx.cwd, localFile);
661
+ const pubFile = path.join(pubDir, relativePath);
662
+
663
+ if (!fs.existsSync(pubFile)) continue; // New file, can't diff
664
+ if (path.basename(localFile) === 'pre-publish.js' || path.basename(localFile) === 'backdoor-checker.js') continue; // Skip security tool diffs
665
+
666
+ try {
667
+ const localContent = fs.readFileSync(localFile, 'utf8');
668
+ const pubContent = fs.readFileSync(pubFile, 'utf8');
669
+
670
+ if (localContent === pubContent) continue; // No changes
671
+
672
+ // Find added lines
673
+ const localLines = new Set(localContent.split('\n'));
674
+ const pubLines = new Set(pubContent.split('\n'));
675
+ const addedLines = [...localLines].filter(l => !pubLines.has(l));
676
+
677
+ // Scan added lines for suspicious patterns
678
+ for (const line of addedLines) {
679
+ // Skip regex definitions, pattern strings, and comments
680
+ const trimmed = line.trim();
681
+ if (trimmed.startsWith('//') || trimmed.startsWith('*') ||
682
+ trimmed.startsWith('{ re:') || /^\s*\/.*\/[gimsuy]*,?\s*$/.test(trimmed) ||
683
+ /pattern|regex|re:\s*\//.test(trimmed)) continue;
684
+
685
+ const ips = extractPublicIPs(line);
686
+ if (ips.length > 0) {
687
+ suspiciousAdditions.push(`${relativePath}: added IP ${ips[0]}`);
688
+ }
689
+ if (/\beval\s*\(/.test(line)) {
690
+ suspiciousAdditions.push(`${relativePath}: added eval()`);
691
+ }
692
+ if (/Buffer\.from\s*\([^)]*base64/.test(line)) {
693
+ suspiciousAdditions.push(`${relativePath}: added base64 decode`);
694
+ }
695
+ }
696
+ } catch (e) {}
697
+ }
698
+
699
+ // Cleanup
700
+ try { fs.rmSync(tmpDir, { recursive: true, force: true }); } catch (e) {}
701
+
702
+ return suspiciousAdditions.length === 0
703
+ ? { pass: true, msg: `Diffed against ${npmData.version} no suspicious additions` }
704
+ : { pass: false, msg: `Suspicious additions vs ${npmData.version}: ${suspiciousAdditions.slice(0, 3).join('; ')}` };
705
+
706
+ } catch (innerErr) {
707
+ try { fs.rmSync(tmpDir, { recursive: true, force: true }); } catch (e) {}
708
+ return { pass: null, msg: `Diff failed: ${innerErr.message.substring(0, 60)}` };
709
+ }
710
+ } catch (e) {
711
+ return { pass: null, msg: `npm diff check failed: ${e.message.substring(0, 60)}` };
712
+ }
713
+ },
714
+
715
+ /**
716
+ * #10 MEDIUM: Detect dynamic require/import with variables
717
+ */
718
+ async no_dynamic_requires(ctx) {
719
+ const fs = require('fs');
720
+ const path = require('path');
721
+ const findings = [];
722
+
723
+ const patterns = [
724
+ { re: /require\s*\(\s*[^'")\s]/, name: 'dynamic require()' }, // require(variable)
725
+ { re: /import\s*\(\s*[^'")\s]/, name: 'dynamic import()' }, // import(variable)
726
+ { re: /require\s*\(\s*__dirname\s*\+/, name: 'require(__dirname + ...)' },
727
+ { re: /require\s*\(\s*process\./, name: 'require(process.*)' },
728
+ ];
729
+
730
+ const sourceFiles = ctx.files.filter(f => /\.(js|ts|mjs|cjs)$/.test(f));
731
+
732
+ for (const file of sourceFiles.slice(0, 50)) {
733
+ const basename = path.basename(file);
734
+ if (file.includes('__test') || file.includes('.test.') || file.includes('.spec.') || basename === 'pre-publish.js') continue;
735
+ // These files legitimately use dynamic requires
736
+ if (basename === 'pre-publish.js' || basename === '50c.js') continue;
737
+
738
+ try {
739
+ const content = fs.readFileSync(file, 'utf8');
740
+ const lines = content.split('\n');
741
+
742
+ for (let i = 0; i < lines.length; i++) {
743
+ const line = lines[i];
744
+ if (line.trim().startsWith('//') || line.trim().startsWith('*')) continue;
745
+
746
+ for (const { re, name } of patterns) {
747
+ if (re.test(line)) {
748
+ // Allow require('./relative-path') only flag truly dynamic
749
+ const requireMatch = line.match(/require\s*\(\s*([^)]+)\)/);
750
+ if (requireMatch) {
751
+ const arg = requireMatch[1].trim();
752
+ // Static string literal = OK
753
+ if (/^['"]/.test(arg) && /['"]$/.test(arg)) continue;
754
+ // Template literal with only __dirname = OK
755
+ if (arg.startsWith('`') && arg.includes('__dirname') && !arg.includes('${')) continue;
756
+ }
757
+ findings.push(`${basename}:${i + 1} → ${name}`);
758
+ break;
759
+ }
760
+ }
761
+ }
762
+ } catch (e) {}
763
+ }
764
+
765
+ return findings.length === 0
766
+ ? { pass: true, msg: 'No dynamic requires/imports' }
767
+ : { pass: false, msg: `Dynamic requires: ${findings.slice(0, 3).join('; ')}` };
768
+ },
769
+
770
+ // ====== EXISTING CHECKS (FIXED) ======
771
+
772
+ async pkg_version(ctx) {
773
+ const pkg = ctx.packageJson;
774
+ if (!pkg) return { pass: false, msg: 'No package.json found' };
775
+
776
+ try {
777
+ const res = await fetch(`https://registry.npmjs.org/${pkg.name}/latest`);
778
+ if (res.status === 404) return { pass: true, msg: 'New package (not on npm yet)' };
779
+ const data = await res.json();
780
+ const npmVersion = data.version;
781
+ const localVersion = pkg.version;
782
+
783
+ if (localVersion === npmVersion) {
784
+ return { pass: false, msg: `Version ${localVersion} already published. Bump required.` };
785
+ }
786
+
787
+ const [lMaj, lMin, lPatch] = localVersion.split('.').map(Number);
788
+ const [nMaj, nMin, nPatch] = npmVersion.split('.').map(Number);
789
+
790
+ if (lMaj > nMaj || (lMaj === nMaj && lMin > nMin) || (lMaj === nMaj && lMin === nMin && lPatch > nPatch)) {
791
+ return { pass: true, msg: `${localVersion} > ${npmVersion} (npm)` };
792
+ }
793
+
794
+ return { pass: false, msg: `Local ${localVersion} <= npm ${npmVersion}` };
795
+ } catch (e) {
796
+ return { pass: false, msg: `npm check failed: ${e.message}` };
797
+ }
798
+ },
799
+
800
+ async pkg_homepage(ctx) {
801
+ const url = ctx.packageJson?.homepage;
802
+ if (!url) return { pass: false, msg: 'No homepage in package.json' };
803
+
804
+ try {
805
+ const res = await fetch(url, { method: 'HEAD' });
806
+ return res.ok
807
+ ? { pass: true, msg: `${url} is live` }
808
+ : { pass: false, msg: `${url} returned ${res.status}` };
809
+ } catch (e) {
810
+ return { pass: false, msg: `${url} unreachable: ${e.message}` };
811
+ }
812
+ },
813
+
814
+ async pkg_repo(ctx) {
815
+ const repo = ctx.packageJson?.repository?.url || ctx.packageJson?.repository;
816
+ if (!repo) return { pass: null, msg: 'No repository URL (optional)' };
817
+
818
+ let checkUrl = String(repo).replace(/^git\+/, '').replace(/\.git$/, '');
819
+ if (checkUrl.startsWith('git://')) {
820
+ checkUrl = checkUrl.replace('git://', 'https://');
821
+ }
822
+
823
+ try {
824
+ const res = await fetch(checkUrl, { method: 'HEAD' });
825
+ return res.ok
826
+ ? { pass: true, msg: `${checkUrl} accessible` }
827
+ : { pass: false, msg: `${checkUrl} returned ${res.status}` };
828
+ } catch (e) {
829
+ return { pass: false, msg: `${checkUrl} unreachable` };
830
+ }
831
+ },
832
+
833
+ async syntax_valid(ctx) {
834
+ const { execSync } = require('child_process');
835
+ const jsFiles = ctx.files.filter(f => f.endsWith('.js') || f.endsWith('.mjs'));
836
+ const errors = [];
837
+
838
+ for (const file of jsFiles.slice(0, 20)) {
839
+ try {
840
+ execSync(`node -c "${file}"`, { stdio: 'pipe', cwd: ctx.cwd });
841
+ } catch (e) {
842
+ errors.push(file);
843
+ }
844
+ }
845
+
846
+ return errors.length === 0
847
+ ? { pass: true, msg: `${jsFiles.length} JS files valid` }
848
+ : { pass: false, msg: `Syntax errors in: ${errors.join(', ')}` };
849
+ },
850
+
851
+ /**
852
+ * FIXED: Cross-platform — uses Node.js fs instead of Windows-only findstr
853
+ */
854
+ async no_hardcoded_secrets(ctx) {
855
+ const fs = require('fs');
856
+ const path = require('path');
857
+
858
+ const patterns = [
859
+ /api[_-]?key\s*[=:]\s*["'][a-zA-Z0-9]{20,}/i,
860
+ /password\s*[=:]\s*["'][^"']{8,}/i,
861
+ /secret\s*[=:]\s*["'][a-zA-Z0-9]{20,}/i,
862
+ /\btoken\s*[=:]\s*["'][a-zA-Z0-9]{20,}/i,
863
+ /AWS[_-]?ACCESS[_-]?KEY/,
864
+ /PRIVATE[_-]?KEY/,
865
+ /-----BEGIN\s+(RSA\s+)?PRIVATE\s+KEY/,
866
+ /ghp_[a-zA-Z0-9]{36}/, // GitHub personal access token
867
+ /sk-[a-zA-Z0-9]{20,}/, // OpenAI/Stripe secret key
868
+ ];
869
+
870
+ const findings = [];
871
+ const sourceFiles = ctx.files.filter(f => /\.(js|ts|mjs|cjs|json)$/.test(f));
872
+
873
+ for (const file of sourceFiles.slice(0, 50)) {
874
+ const basename = path.basename(file);
875
+ if (basename === 'package-lock.json' || basename === 'pre-publish.js') continue;
876
+
877
+ try {
878
+ const content = fs.readFileSync(file, 'utf8');
879
+ const lines = content.split('\n');
880
+
881
+ for (let i = 0; i < lines.length; i++) {
882
+ const line = lines[i];
883
+ if (line.trim().startsWith('//') || line.trim().startsWith('*')) continue;
884
+
885
+ for (const pattern of patterns) {
886
+ if (pattern.test(line)) {
887
+ // Don't flag environment variable reads or pattern definitions
888
+ if (/process\.env|regex|pattern|example|test|mock/i.test(line)) continue;
889
+ findings.push(`${basename}:${i + 1}`);
890
+ break;
891
+ }
892
+ }
893
+ }
894
+ } catch (e) {}
895
+ }
896
+
897
+ return findings.length === 0
898
+ ? { pass: true, msg: 'No hardcoded secrets detected' }
899
+ : { pass: false, msg: `Potential secrets: ${findings.slice(0, 5).join(', ')}` };
900
+ },
901
+
902
+ async readme_links_live(ctx) {
903
+ const fs = require('fs');
904
+ const readmePath = ctx.files.find(f => f.toLowerCase().endsWith('readme.md'));
905
+ if (!readmePath) return { pass: false, msg: 'No README.md' };
906
+
907
+ const content = fs.readFileSync(readmePath, 'utf8');
908
+ const urlPattern = /https?:\/\/[^\s\)\"\']+/g;
909
+ const urls = [...new Set(content.match(urlPattern) || [])];
910
+
911
+ const deadLinks = [];
912
+ for (const url of urls.slice(0, 10)) {
913
+ try {
914
+ const res = await fetch(url, { method: 'HEAD', signal: AbortSignal.timeout(5000) });
915
+ if (!res.ok) deadLinks.push(url);
916
+ } catch (e) {
917
+ deadLinks.push(url);
918
+ }
919
+ }
920
+
921
+ return deadLinks.length === 0
922
+ ? { pass: true, msg: `${urls.length} links verified` }
923
+ : { pass: false, msg: `Dead links: ${deadLinks.join(', ')}` };
924
+ },
925
+
926
+ async no_localhost(ctx) {
927
+ const fs = require('fs');
928
+ const path = require('path');
929
+ const findings = [];
930
+
931
+ for (const file of ctx.files.filter(f => /\.(js|ts|json)$/.test(f)).slice(0, 30)) {
932
+ if (path.basename(file) === 'pre-publish.js') continue;
933
+ if (path.basename(file) === 'backdoor-checker.js') continue;
934
+
935
+ try {
936
+ const content = fs.readFileSync(file, 'utf8');
937
+ if (/localhost|127\.0\.0\.1|0\.0\.0\.0/.test(content)) {
938
+ const lines = content.split('\n');
939
+ for (let i = 0; i < lines.length; i++) {
940
+ const line = lines[i];
941
+ if (/localhost|127\.0\.0\.1/.test(line) && !line.trim().startsWith('//') && !line.includes('||') && !line.includes('regex') && !line.includes('pattern')) {
942
+ findings.push(`${path.basename(file)}:${i+1}`);
943
+ break;
944
+ }
945
+ }
946
+ }
947
+ } catch (e) {}
948
+ }
949
+
950
+ return findings.length === 0
951
+ ? { pass: true, msg: 'No localhost references' }
952
+ : { pass: false, msg: `localhost found: ${findings.slice(0,3).join(', ')}` };
953
+ },
954
+
955
+ async size_reasonable(ctx) {
956
+ const { execSync } = require('child_process');
957
+ try {
958
+ const result = execSync('npm pack --dry-run 2>&1', { cwd: ctx.cwd, encoding: 'utf8' });
959
+ const sizeMatch = result.match(/total files.*?(\d+(?:\.\d+)?)\s*(kB|MB|B)/i);
960
+ if (sizeMatch) {
961
+ let sizeKB = parseFloat(sizeMatch[1]);
962
+ if (sizeMatch[2].toLowerCase() === 'mb') sizeKB *= 1024;
963
+ if (sizeMatch[2].toLowerCase() === 'b') sizeKB /= 1024;
964
+
965
+ return sizeKB < 10240
966
+ ? { pass: true, msg: `Package size: ${sizeKB.toFixed(1)} KB` }
967
+ : { pass: false, msg: `Package too large: ${sizeKB.toFixed(1)} KB (limit 10MB)` };
968
+ }
969
+ } catch (e) {}
970
+ return { pass: null, msg: 'Could not determine package size' };
971
+ },
972
+
973
+ // ====== ACADEMIC/AI CHECKS (unchanged) ======
974
+
975
+ async latex_compiles(ctx) {
976
+ const { execSync } = require('child_process');
977
+ const texFile = ctx.files.find(f => f.endsWith('.tex') && !f.includes('preamble'));
978
+ if (!texFile) return { pass: false, msg: 'No .tex file found' };
979
+
980
+ try {
981
+ execSync(`pdflatex -interaction=nonstopmode "${texFile}"`, { cwd: ctx.cwd, stdio: 'pipe' });
982
+ return { pass: true, msg: 'LaTeX compiles' };
983
+ } catch (e) {
984
+ return { pass: false, msg: 'LaTeX compilation failed' };
985
+ }
986
+ },
987
+
988
+ async no_placeholder(ctx) {
989
+ const fs = require('fs');
990
+ const findings = [];
991
+ const patterns = /\b(TODO|FIXME|XXX|TBD|PLACEHOLDER)\b/gi;
992
+
993
+ for (const file of ctx.files.slice(0, 50)) {
994
+ try {
995
+ const content = fs.readFileSync(file, 'utf8');
996
+ const matches = content.match(patterns);
997
+ if (matches) {
998
+ findings.push(`${file}: ${matches.length} placeholders`);
999
+ }
1000
+ } catch (e) {}
1001
+ }
1002
+
1003
+ return findings.length === 0
1004
+ ? { pass: true, msg: 'No placeholders found' }
1005
+ : { pass: false, msg: findings.slice(0, 3).join('; ') };
1006
+ },
1007
+
1008
+ async math_verified_bcalc(ctx) {
1009
+ const fs = require('fs');
1010
+ const texFiles = ctx.files.filter(f => f.endsWith('.tex'));
1011
+ if (texFiles.length === 0) return { pass: null, msg: 'No .tex files' };
1012
+
1013
+ const content = fs.readFileSync(texFiles[0], 'utf8');
1014
+ const equations = content.match(/\$\$[^$]+\$\$/g) || [];
1015
+ const claims = content.match(/\\begin\{theorem\}[\s\S]*?\\end\{theorem\}/g) || [];
1016
+
1017
+ if (equations.length === 0 && claims.length === 0) {
1018
+ return { pass: null, msg: 'No equations/theorems found to verify' };
1019
+ }
1020
+
1021
+ const sample = claims[0] || equations[0];
1022
+ const cleanSample = sample.replace(/\\[a-z]+\{[^}]*\}/g, '').substring(0, 500);
1023
+
1024
+ try {
1025
+ const result = await call50cTool('bcalc', { expression: cleanSample, mode: 'verify' }, 30000);
1026
+
1027
+ if (result.error) return { pass: null, msg: `bCalc unavailable: ${result.error}` };
1028
+
1029
+ const resultStr = typeof result === 'string' ? result : JSON.stringify(result);
1030
+ const hasIssues = /error|invalid|incorrect|false/i.test(resultStr);
1031
+
1032
+ return hasIssues
1033
+ ? { pass: false, msg: `bCalc found issues: ${resultStr.substring(0, 100)}` }
1034
+ : { pass: true, msg: 'bCalc verified mathematical expressions' };
1035
+ } catch (e) {
1036
+ return { pass: null, msg: `bCalc check failed: ${e.message}` };
1037
+ }
1038
+ },
1039
+
1040
+ async proofs_verified_genius(ctx) {
1041
+ const fs = require('fs');
1042
+ const texFiles = ctx.files.filter(f => f.endsWith('.tex'));
1043
+ if (texFiles.length === 0) return { pass: null, msg: 'No .tex files' };
1044
+
1045
+ const content = fs.readFileSync(texFiles[0], 'utf8');
1046
+ const proofs = content.match(/\\begin\{proof\}[\s\S]*?\\end\{proof\}/g) || [];
1047
+ if (proofs.length === 0) return { pass: null, msg: 'No formal proofs found' };
1048
+
1049
+ const abstractMatch = content.match(/\\begin\{abstract\}([\s\S]*?)\\end\{abstract\}/);
1050
+ const abstract = abstractMatch ? abstractMatch[1] : '';
1051
+ const firstProof = proofs[0].substring(0, 1000);
1052
+
1053
+ try {
1054
+ const result = await call50cTool('genius_plus', {
1055
+ problem: `Verify this mathematical proof for logical gaps, unstated assumptions, or errors:\n\nContext: ${abstract.substring(0, 300)}\n\nProof:\n${firstProof}`
1056
+ }, 90000);
1057
+
1058
+ if (result.error) return { pass: null, msg: `genius+ unavailable: ${result.error}` };
1059
+
1060
+ const resultStr = typeof result === 'string' ? result : JSON.stringify(result);
1061
+ const redFlags = /gap|flaw|error|incorrect|missing|invalid|contradiction/i.test(resultStr);
1062
+
1063
+ return redFlags
1064
+ ? { pass: false, msg: `genius+ found issues: ${resultStr.substring(0, 150)}...` }
1065
+ : { pass: true, msg: 'genius+ verified proof structure' };
1066
+ } catch (e) {
1067
+ return { pass: null, msg: `genius+ check failed: ${e.message}` };
1068
+ }
1069
+ },
1070
+
1071
+ async claims_validated_search(ctx) {
1072
+ const fs = require('fs');
1073
+ const texFiles = ctx.files.filter(f => f.endsWith('.tex'));
1074
+ if (texFiles.length === 0) return { pass: null, msg: 'No .tex files' };
1075
+
1076
+ const content = fs.readFileSync(texFiles[0], 'utf8');
1077
+ const titleMatch = content.match(/\\title\{([^}]+)\}/);
1078
+ const title = titleMatch ? titleMatch[1] : '';
1079
+
1080
+ const noveltyPatterns = /first|novel|new|unprecedented|breakthrough|improve|better than/gi;
1081
+ const hasNoveltyClaims = noveltyPatterns.test(content);
1082
+
1083
+ if (!hasNoveltyClaims || !title) return { pass: null, msg: 'No novelty claims to validate' };
1084
+
1085
+ try {
1086
+ const result = await call50cTool('web_search', {
1087
+ query: title.replace(/[\\{}]/g, ' ').trim(),
1088
+ max_results: 5
1089
+ }, 30000);
1090
+
1091
+ if (result.error) return { pass: null, msg: `web_search unavailable: ${result.error}` };
1092
+
1093
+ const resultStr = typeof result === 'string' ? result : JSON.stringify(result);
1094
+ const duplicateSignals = /already|published|existing|prior art|known result/i.test(resultStr);
1095
+
1096
+ return duplicateSignals
1097
+ ? { pass: false, msg: `Potential prior work found - verify novelty: ${resultStr.substring(0, 100)}` }
1098
+ : { pass: true, msg: 'No obvious conflicting prior work found' };
1099
+ } catch (e) {
1100
+ return { pass: null, msg: `Search check failed: ${e.message}` };
1101
+ }
1102
+ },
1103
+
1104
+ async methodology_hints(ctx) {
1105
+ const fs = require('fs');
1106
+ const files = ctx.files.filter(f => /\.(tex|md|txt)$/.test(f));
1107
+ if (files.length === 0) return { pass: null, msg: 'No text files' };
1108
+
1109
+ const content = fs.readFileSync(files[0], 'utf8').substring(0, 2000);
1110
+ const methodsMatch = content.match(/method|experiment|procedure|approach/i);
1111
+ if (!methodsMatch) return { pass: null, msg: 'No methods section detected' };
1112
+
1113
+ try {
1114
+ const result = await call50cTool('hints_plus', {
1115
+ query: `Evaluate scientific methodology for weaknesses: ${content.substring(0, 1000)}`
1116
+ }, 30000);
1117
+
1118
+ if (result.error) return { pass: null, msg: `hints+ unavailable: ${result.error}` };
1119
+
1120
+ return {
1121
+ pass: null,
1122
+ msg: `hints+: ${typeof result === 'string' ? result.substring(0, 200) : JSON.stringify(result).substring(0, 200)}`
1123
+ };
1124
+ } catch (e) {
1125
+ return { pass: null, msg: `hints+ check failed: ${e.message}` };
1126
+ }
1127
+ },
1128
+ };
1129
+
1130
+ /**
1131
+ * Main verification function
1132
+ */
1133
+ async function verify(profile, options = {}) {
1134
+ const config = PROFILES[profile];
1135
+ if (!config) {
1136
+ return { error: `Unknown profile: ${profile}. Available: ${Object.keys(PROFILES).join(', ')}` };
1137
+ }
1138
+
1139
+ const fs = require('fs');
1140
+ const path = require('path');
1141
+ const cwd = options.cwd || process.cwd();
1142
+
1143
+ // Build context
1144
+ const ctx = {
1145
+ cwd,
1146
+ profile,
1147
+ files: [],
1148
+ packageJson: null,
1149
+ allowlist: loadAllowlist(cwd),
1150
+ };
1151
+
1152
+ // Gather files
1153
+ try {
1154
+ const walk = (dir, depth = 0) => {
1155
+ if (depth > 3) return;
1156
+ const items = fs.readdirSync(dir);
1157
+ for (const item of items) {
1158
+ if (item.startsWith('.') || item === 'node_modules') continue;
1159
+ const full = path.join(dir, item);
1160
+ const stat = fs.statSync(full);
1161
+ if (stat.isDirectory()) {
1162
+ walk(full, depth + 1);
1163
+ } else {
1164
+ ctx.files.push(full);
1165
+ }
1166
+ }
1167
+ };
1168
+ walk(cwd);
1169
+ } catch (e) {}
1170
+
1171
+ // Load package.json if exists
1172
+ try {
1173
+ const pkgPath = path.join(cwd, 'package.json');
1174
+ if (fs.existsSync(pkgPath)) {
1175
+ ctx.packageJson = JSON.parse(fs.readFileSync(pkgPath, 'utf8'));
1176
+ }
1177
+ } catch (e) {}
1178
+
1179
+ // Run checks
1180
+ const results = {
1181
+ profile: config.name,
1182
+ timestamp: new Date().toISOString(),
1183
+ summary: { pass: 0, fail: 0, skip: 0, manual: 0, critical: 0, high: 0, medium: 0 },
1184
+ checks: [],
1185
+ ready: false,
1186
+ blocked: false,
1187
+ };
1188
+
1189
+ for (const check of config.checks) {
1190
+ const result = { ...check, status: null, msg: '', severity: CHECK_SEVERITY[check.id] || null };
1191
+
1192
+ if (check.empirical && EMPIRICAL_CHECKS[check.id]) {
1193
+ try {
1194
+ const r = await EMPIRICAL_CHECKS[check.id](ctx);
1195
+ result.status = r.pass ? 'PASS' : (r.pass === false ? 'FAIL' : 'SKIP');
1196
+ result.msg = r.msg;
1197
+ } catch (e) {
1198
+ result.status = 'SKIP';
1199
+ result.msg = `Error: ${e.message}`;
1200
+ }
1201
+ } else if (check.empirical) {
1202
+ result.status = 'SKIP';
1203
+ result.msg = 'Check not implemented';
1204
+ } else {
1205
+ result.status = 'MANUAL';
1206
+ result.msg = 'Requires manual review';
1207
+ }
1208
+
1209
+ results.checks.push(result);
1210
+
1211
+ if (result.status === 'PASS') results.summary.pass++;
1212
+ else if (result.status === 'FAIL') {
1213
+ results.summary.fail++;
1214
+ // Track severity of failures
1215
+ if (result.severity === 'CRITICAL') results.summary.critical++;
1216
+ else if (result.severity === 'HIGH') results.summary.high++;
1217
+ else if (result.severity === 'MEDIUM') results.summary.medium++;
1218
+ }
1219
+ else if (result.status === 'SKIP') results.summary.skip++;
1220
+ else results.summary.manual++;
1221
+ }
1222
+
1223
+ // Severity-weighted scoring
1224
+ const totalCheckable = results.summary.pass + results.summary.fail + results.summary.manual;
1225
+ const penaltyScore = (results.summary.critical * SEVERITY_WEIGHTS.CRITICAL) +
1226
+ (results.summary.high * SEVERITY_WEIGHTS.HIGH) +
1227
+ (results.summary.medium * SEVERITY_WEIGHTS.MEDIUM);
1228
+
1229
+ results.blocked = results.summary.critical > 0;
1230
+ results.ready = results.summary.fail === 0;
1231
+ results.score = Math.max(0, Math.round((results.summary.pass / Math.max(totalCheckable, 1)) * 100));
1232
+ results.severityScore = penaltyScore;
1233
+
1234
+ // Verdict
1235
+ if (results.blocked) {
1236
+ results.verdict = 'BLOCKED — CRITICAL supply chain failure. DO NOT PUBLISH.';
1237
+ } else if (results.summary.high > 0) {
1238
+ results.verdict = 'NOT READY — HIGH severity issues must be resolved.';
1239
+ } else if (results.summary.fail > 0) {
1240
+ results.verdict = 'NOT READY — Fix failing checks before publishing.';
1241
+ } else {
1242
+ results.verdict = 'READY TO PUBLISH';
1243
+ }
1244
+
1245
+ return results;
1246
+ }
1247
+
1248
+ /**
1249
+ * Generate verification receipt as Markdown
1250
+ */
1251
+ function generateReceipt(results, options = {}) {
1252
+ const lines = [
1253
+ '# 50c Pre-Publish Verification Receipt',
1254
+ '',
1255
+ `**Generated:** ${results.timestamp}`,
1256
+ `**Profile:** ${results.profile}`,
1257
+ `**Directory:** ${options.cwd || 'N/A'}`,
1258
+ '',
1259
+ ];
1260
+
1261
+ // Verdict banner
1262
+ if (results.blocked) {
1263
+ lines.push('## BLOCKED — CRITICAL SUPPLY CHAIN FAILURE');
1264
+ lines.push('');
1265
+ lines.push('> This package has CRITICAL security issues that MUST be resolved before publishing.');
1266
+ lines.push('> Publishing with these issues could compromise every user who installs this package.');
1267
+ } else if (results.ready) {
1268
+ lines.push('## READY TO PUBLISH');
1269
+ } else {
1270
+ lines.push('## NOT READY — FIX ISSUES');
1271
+ }
1272
+
1273
+ lines.push('');
1274
+ lines.push('---');
1275
+ lines.push('');
1276
+ lines.push('## Summary');
1277
+ lines.push('');
1278
+ lines.push('| Metric | Count |');
1279
+ lines.push('|--------|-------|');
1280
+ lines.push(`| Pass | ${results.summary.pass} |`);
1281
+ lines.push(`| Fail | ${results.summary.fail} |`);
1282
+ if (results.summary.critical > 0) lines.push(`| -- CRITICAL | ${results.summary.critical} |`);
1283
+ if (results.summary.high > 0) lines.push(`| -- HIGH | ${results.summary.high} |`);
1284
+ if (results.summary.medium > 0) lines.push(`| -- MEDIUM | ${results.summary.medium} |`);
1285
+ lines.push(`| Skip | ${results.summary.skip} |`);
1286
+ lines.push(`| Manual | ${results.summary.manual} |`);
1287
+ lines.push(`| **Score** | **${results.score}/100** |`);
1288
+ if (results.severityScore > 0) lines.push(`| **Severity Penalty** | **${results.severityScore}** |`);
1289
+ lines.push('');
1290
+ lines.push('---');
1291
+ lines.push('');
1292
+ lines.push('## Detailed Results');
1293
+ lines.push('');
1294
+
1295
+ // Group by category — supply-chain FIRST
1296
+ const byCategory = {};
1297
+ const categoryOrder = ['supply-chain', 'security', 'metadata', 'code', 'docs', 'test', 'files', 'ai-verify', 'build', 'content', 'reproducibility', 'ethics', 'format', 'release', 'regulatory', 'statistics', 'clinical', 'conflicts', 'methodology', 'data', 'claims', 'figures', 'literature', 'proofs', 'definitions', 'examples', 'legal'];
1298
+
1299
+ for (const check of results.checks) {
1300
+ if (!byCategory[check.cat]) byCategory[check.cat] = [];
1301
+ byCategory[check.cat].push(check);
1302
+ }
1303
+
1304
+ // Sort categories: supply-chain first, then security, then rest
1305
+ const sortedCats = Object.keys(byCategory).sort((a, b) => {
1306
+ const aIdx = categoryOrder.indexOf(a);
1307
+ const bIdx = categoryOrder.indexOf(b);
1308
+ return (aIdx === -1 ? 999 : aIdx) - (bIdx === -1 ? 999 : bIdx);
1309
+ });
1310
+
1311
+ for (const cat of sortedCats) {
1312
+ const checks = byCategory[cat];
1313
+ const catLabel = cat === 'supply-chain' ? 'SUPPLY CHAIN SECURITY' : cat.toUpperCase();
1314
+ lines.push(`### ${catLabel}`);
1315
+ lines.push('');
1316
+ lines.push('| Status | Severity | Check | Message |');
1317
+ lines.push('|--------|----------|-------|---------|');
1318
+ for (const c of checks) {
1319
+ const icon = c.status === 'PASS' ? '✅' : c.status === 'FAIL' ? '❌' : c.status === 'SKIP' ? '⏭️' : '👁️';
1320
+ const sev = c.severity || '-';
1321
+ lines.push(`| ${icon} ${c.status} | ${sev} | ${c.desc} | ${(c.msg || '').substring(0, 60)} |`);
1322
+ }
1323
+ lines.push('');
1324
+ }
1325
+
1326
+ // Cost estimate for AI checks
1327
+ const aiChecks = results.checks.filter(c => c.cat === 'ai-verify' && c.status !== 'SKIP');
1328
+ if (aiChecks.length > 0) {
1329
+ lines.push('---');
1330
+ lines.push('');
1331
+ lines.push('## AI Verification Cost');
1332
+ lines.push('');
1333
+ lines.push('| Tool | Est. Cost |');
1334
+ lines.push('|------|-----------|');
1335
+ for (const c of aiChecks) {
1336
+ const cost = c.id.includes('bcalc') ? '$0.15' :
1337
+ c.id.includes('genius') ? '$0.65' :
1338
+ c.id.includes('hints') ? '$0.10' : 'FREE';
1339
+ lines.push(`| ${c.id} | ${cost} |`);
1340
+ }
1341
+ lines.push('');
1342
+ }
1343
+
1344
+ lines.push('---');
1345
+ lines.push('');
1346
+ lines.push('*Generated by 50c Security Suite — "Never let a Verdant happen again"*');
1347
+ lines.push('*Supply chain checks are FREE. AI verification costs noted above.*');
1348
+
1349
+ return lines.join('\n');
1350
+ }
1351
+
1352
+ /**
1353
+ * Full verification with receipt generation
1354
+ */
1355
+ async function verifyWithReceipt(profile, options = {}) {
1356
+ const results = await verify(profile, options);
1357
+ const receipt = generateReceipt(results, options);
1358
+ return { ...results, receipt };
1359
+ }
1360
+
1361
+ module.exports = { verify, verifyWithReceipt, generateReceipt, PROFILES, EMPIRICAL_CHECKS, call50cTool, loadAllowlist };