pumuki 6.3.39 → 6.3.40

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/README.md +21 -12
  2. package/VERSION +1 -1
  3. package/core/gate/evaluateRules.test.ts +40 -0
  4. package/core/gate/evaluateRules.ts +7 -1
  5. package/core/rules/Consequence.ts +1 -0
  6. package/docs/CONFIGURATION.md +50 -0
  7. package/docs/INSTALLATION.md +38 -11
  8. package/docs/MCP_SERVERS.md +1 -1
  9. package/docs/README.md +1 -0
  10. package/docs/RELEASE_NOTES.md +44 -0
  11. package/docs/USAGE.md +191 -9
  12. package/docs/registro-maestro-de-seguimiento.md +2 -2
  13. package/docs/seguimiento-activo-pumuki-saas-supermercados.md +1592 -1
  14. package/docs/validation/README.md +2 -1
  15. package/docs/validation/ast-intelligence-roadmap.md +96 -0
  16. package/integrations/config/skillsCustomRules.ts +14 -0
  17. package/integrations/config/skillsDetectorRegistry.ts +11 -1
  18. package/integrations/config/skillsLock.ts +30 -0
  19. package/integrations/config/skillsMarkdownRules.ts +14 -3
  20. package/integrations/config/skillsRuleSet.ts +25 -3
  21. package/integrations/evidence/readEvidence.test.ts +3 -2
  22. package/integrations/evidence/readEvidence.ts +14 -4
  23. package/integrations/evidence/repoState.ts +10 -2
  24. package/integrations/evidence/schema.test.ts +3 -2
  25. package/integrations/evidence/schema.ts +3 -0
  26. package/integrations/evidence/writeEvidence.test.ts +3 -2
  27. package/integrations/gate/evaluateAiGate.ts +511 -2
  28. package/integrations/git/GitService.ts +5 -1
  29. package/integrations/git/astIntelligenceDualValidation.ts +275 -0
  30. package/integrations/git/gitAtomicity.ts +42 -9
  31. package/integrations/git/resolveGitRefs.ts +37 -0
  32. package/integrations/git/runPlatformGate.ts +228 -1
  33. package/integrations/git/runPlatformGateEvaluation.ts +4 -0
  34. package/integrations/git/stageRunners.ts +116 -2
  35. package/integrations/lifecycle/cli.ts +759 -22
  36. package/integrations/lifecycle/doctor.ts +62 -0
  37. package/integrations/lifecycle/index.ts +1 -0
  38. package/integrations/lifecycle/packageInfo.ts +25 -3
  39. package/integrations/lifecycle/policyReconcile.ts +304 -0
  40. package/integrations/lifecycle/preWriteAutomation.ts +42 -2
  41. package/integrations/lifecycle/watch.ts +365 -0
  42. package/integrations/mcp/aiGateCheck.ts +59 -2
  43. package/integrations/mcp/autoExecuteAiStart.ts +25 -1
  44. package/integrations/mcp/preFlightCheck.ts +13 -0
  45. package/integrations/sdd/evidenceScaffold.ts +223 -0
  46. package/integrations/sdd/index.ts +2 -0
  47. package/integrations/sdd/stateSync.ts +400 -0
  48. package/integrations/sdd/syncDocs.ts +97 -2
  49. package/package.json +4 -1
  50. package/scripts/backlog-action-reasons-lib.ts +38 -0
  51. package/scripts/backlog-id-issue-map-lib.ts +69 -0
  52. package/scripts/backlog-json-contract-lib.ts +3 -0
  53. package/scripts/framework-menu-consumer-preflight-lib.ts +6 -0
  54. package/scripts/package-install-smoke-command-resolution-lib.ts +64 -0
  55. package/scripts/package-install-smoke-consumer-npm-lib.ts +43 -0
  56. package/scripts/package-install-smoke-consumer-repo-setup-lib.ts +2 -0
  57. package/scripts/package-install-smoke-execution-steps-lib.ts +27 -9
  58. package/scripts/package-install-smoke-lifecycle-lib.ts +15 -4
  59. package/scripts/package-install-smoke-workspace-factory-lib.ts +4 -1
  60. package/scripts/reconcile-consumer-backlog-issues-lib.ts +651 -0
  61. package/scripts/reconcile-consumer-backlog-issues.ts +348 -0
  62. package/scripts/watch-consumer-backlog-lib.ts +465 -0
  63. package/scripts/watch-consumer-backlog.ts +326 -0
@@ -1,6 +1,6 @@
1
1
  import { createHash } from 'node:crypto';
2
2
  import { existsSync, mkdirSync, readFileSync, writeFileSync } from 'node:fs';
3
- import { dirname, resolve } from 'node:path';
3
+ import { dirname, isAbsolute, relative, resolve } from 'node:path';
4
4
  import { readEvidenceResult, type EvidenceReadResult } from '../evidence/readEvidence';
5
5
  import { readSddStatus } from './policy';
6
6
  import type { SddStage } from './types';
@@ -48,6 +48,7 @@ export type SddSyncDocsResult = {
48
48
  change: string | null;
49
49
  stage: SddStage | null;
50
50
  task: string | null;
51
+ fromEvidencePath: string | null;
51
52
  };
52
53
  updated: boolean;
53
54
  files: ReadonlyArray<SddSyncDocsFileResult>;
@@ -65,6 +66,14 @@ export type SddSyncDocsResult = {
65
66
  successful_patterns: string[];
66
67
  rule_updates: string[];
67
68
  gate_anomalies: string[];
69
+ scoring: {
70
+ profile: 'heuristic-v1';
71
+ score: number;
72
+ successful_count: number;
73
+ failed_count: number;
74
+ anomaly_count: number;
75
+ rule_update_count: number;
76
+ };
68
77
  sync_docs: {
69
78
  updated: boolean;
70
79
  file_paths: string[];
@@ -81,6 +90,7 @@ export type SddLearnResult = {
81
90
  change: string;
82
91
  stage: SddStage | null;
83
92
  task: string | null;
93
+ fromEvidencePath: string | null;
84
94
  };
85
95
  learning: NonNullable<SddSyncDocsResult['learning']>;
86
96
  };
@@ -93,6 +103,7 @@ export type SddAutoSyncResult = {
93
103
  change: string;
94
104
  stage: SddStage | null;
95
105
  task: string | null;
106
+ fromEvidencePath: string | null;
96
107
  };
97
108
  syncDocs: {
98
109
  updated: boolean;
@@ -106,6 +117,28 @@ const normalizeSectionBody = (value: string): string => value.trim().replace(/\r
106
117
  const computeDigest = (value: string): string =>
107
118
  createHash('sha256').update(value, 'utf8').digest('hex');
108
119
 
120
+ const resolveRepoBoundPath = (params: {
121
+ repoRoot: string;
122
+ candidatePath: string;
123
+ flagName: '--from-evidence';
124
+ }): string => {
125
+ const repoRootAbsolute = resolve(params.repoRoot);
126
+ const resolved = isAbsolute(params.candidatePath)
127
+ ? resolve(params.candidatePath)
128
+ : resolve(repoRootAbsolute, params.candidatePath);
129
+ const rel = relative(repoRootAbsolute, resolved);
130
+ if (
131
+ rel === '..' ||
132
+ rel.startsWith(`..${process.platform === 'win32' ? '\\' : '/'}`) ||
133
+ isAbsolute(rel)
134
+ ) {
135
+ throw new Error(
136
+ `[pumuki][sdd] ${params.flagName} must resolve inside repository root: ${params.candidatePath}`
137
+ );
138
+ }
139
+ return resolved;
140
+ };
141
+
109
142
  const prefixLines = (value: string, marker: '-' | '+'): string =>
110
143
  value
111
144
  .split('\n')
@@ -281,12 +314,42 @@ const collectLearningSignals = (params: {
281
314
  };
282
315
  };
283
316
 
317
+ const toLearningScore = (params: {
318
+ successfulPatterns: string[];
319
+ failedPatterns: string[];
320
+ gateAnomalies: string[];
321
+ ruleUpdates: string[];
322
+ }): {
323
+ profile: 'heuristic-v1';
324
+ score: number;
325
+ successful_count: number;
326
+ failed_count: number;
327
+ anomaly_count: number;
328
+ rule_update_count: number;
329
+ } => {
330
+ const successfulCount = params.successfulPatterns.length;
331
+ const failedCount = params.failedPatterns.length;
332
+ const anomalyCount = params.gateAnomalies.length;
333
+ const ruleUpdateCount = params.ruleUpdates.length;
334
+ const rawScore = 100 + successfulCount * 4 - failedCount * 25 - anomalyCount * 10 - ruleUpdateCount * 5;
335
+ const score = Math.max(0, Math.min(100, rawScore));
336
+ return {
337
+ profile: 'heuristic-v1',
338
+ score,
339
+ successful_count: successfulCount,
340
+ failed_count: failedCount,
341
+ anomaly_count: anomalyCount,
342
+ rule_update_count: ruleUpdateCount,
343
+ };
344
+ };
345
+
284
346
  export const runSddSyncDocs = (params?: {
285
347
  repoRoot?: string;
286
348
  dryRun?: boolean;
287
349
  change?: string;
288
350
  stage?: SddStage;
289
351
  task?: string;
352
+ fromEvidencePath?: string;
290
353
  targets?: ReadonlyArray<SddSyncDocsTarget>;
291
354
  now?: () => Date;
292
355
  evidenceReader?: (repoRoot: string) => EvidenceReadResult;
@@ -296,9 +359,27 @@ export const runSddSyncDocs = (params?: {
296
359
  const change = params?.change?.trim() ? params.change.trim() : null;
297
360
  const stage = params?.stage ?? null;
298
361
  const task = params?.task?.trim() ? params.task.trim() : null;
362
+ const fromEvidencePath = params?.fromEvidencePath?.trim()
363
+ ? params.fromEvidencePath.trim()
364
+ : null;
365
+ const fromEvidenceAbsolutePath = fromEvidencePath
366
+ ? resolveRepoBoundPath({
367
+ repoRoot,
368
+ candidatePath: fromEvidencePath,
369
+ flagName: '--from-evidence',
370
+ })
371
+ : null;
299
372
  const targets = params?.targets ?? DEFAULT_SYNC_DOCS_TARGETS;
300
373
  const now = params?.now ?? (() => new Date());
301
- const evidenceReader = params?.evidenceReader ?? readEvidenceResult;
374
+ const evidenceReader =
375
+ params?.evidenceReader ??
376
+ ((candidateRepoRoot: string) =>
377
+ readEvidenceResult(
378
+ candidateRepoRoot,
379
+ fromEvidenceAbsolutePath
380
+ ? { evidencePath: fromEvidenceAbsolutePath }
381
+ : undefined
382
+ ));
302
383
 
303
384
  const updates = targets.map((target) => {
304
385
  const absolutePath = resolve(repoRoot, target.path);
@@ -362,6 +443,12 @@ export const runSddSyncDocs = (params?: {
362
443
  updated,
363
444
  evidenceResult: evidenceReader(repoRoot),
364
445
  });
446
+ const scoring = toLearningScore({
447
+ successfulPatterns: signals.successfulPatterns,
448
+ failedPatterns: signals.failedPatterns,
449
+ gateAnomalies: signals.gateAnomalies,
450
+ ruleUpdates: signals.ruleUpdates,
451
+ });
365
452
  const artifact = {
366
453
  version: '1.0' as const,
367
454
  change_id: change,
@@ -372,6 +459,7 @@ export const runSddSyncDocs = (params?: {
372
459
  successful_patterns: signals.successfulPatterns,
373
460
  rule_updates: signals.ruleUpdates,
374
461
  gate_anomalies: signals.gateAnomalies,
462
+ scoring,
375
463
  sync_docs: {
376
464
  updated,
377
465
  file_paths: files.map((file) => file.path),
@@ -401,6 +489,7 @@ export const runSddSyncDocs = (params?: {
401
489
  change,
402
490
  stage,
403
491
  task,
492
+ fromEvidencePath,
404
493
  },
405
494
  updated,
406
495
  files,
@@ -414,6 +503,7 @@ export const runSddLearn = (params?: {
414
503
  change?: string;
415
504
  stage?: SddStage;
416
505
  task?: string;
506
+ fromEvidencePath?: string;
417
507
  now?: () => Date;
418
508
  evidenceReader?: (repoRoot: string) => EvidenceReadResult;
419
509
  }): SddLearnResult => {
@@ -428,6 +518,7 @@ export const runSddLearn = (params?: {
428
518
  change,
429
519
  stage: params?.stage,
430
520
  task: params?.task,
521
+ fromEvidencePath: params?.fromEvidencePath,
431
522
  now: params?.now,
432
523
  evidenceReader: params?.evidenceReader,
433
524
  targets: [],
@@ -445,6 +536,7 @@ export const runSddLearn = (params?: {
445
536
  change,
446
537
  stage: result.context.stage,
447
538
  task: result.context.task,
539
+ fromEvidencePath: result.context.fromEvidencePath,
448
540
  },
449
541
  learning: result.learning,
450
542
  };
@@ -456,6 +548,7 @@ export const runSddAutoSync = (params?: {
456
548
  change?: string;
457
549
  stage?: SddStage;
458
550
  task?: string;
551
+ fromEvidencePath?: string;
459
552
  now?: () => Date;
460
553
  evidenceReader?: (repoRoot: string) => EvidenceReadResult;
461
554
  targets?: ReadonlyArray<SddSyncDocsTarget>;
@@ -471,6 +564,7 @@ export const runSddAutoSync = (params?: {
471
564
  change,
472
565
  stage: params?.stage,
473
566
  task: params?.task,
567
+ fromEvidencePath: params?.fromEvidencePath,
474
568
  now: params?.now,
475
569
  evidenceReader: params?.evidenceReader,
476
570
  targets: params?.targets,
@@ -488,6 +582,7 @@ export const runSddAutoSync = (params?: {
488
582
  change,
489
583
  stage: syncResult.context.stage,
490
584
  task: syncResult.context.task,
585
+ fromEvidencePath: syncResult.context.fromEvidencePath,
491
586
  },
492
587
  syncDocs: {
493
588
  updated: syncResult.updated,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "pumuki",
3
- "version": "6.3.39",
3
+ "version": "6.3.40",
4
4
  "description": "Enterprise-grade AST Intelligence System with multi-platform support (iOS, Android, Backend, Frontend) and Feature-First + DDD + Clean Architecture enforcement. Includes dynamic violations API for intelligent querying.",
5
5
  "main": "index.js",
6
6
  "bin": {
@@ -49,6 +49,7 @@
49
49
  "test:heuristics": "npx --yes tsx@4.21.0 --test core/facts/__tests__/extractHeuristicFacts.test.ts",
50
50
  "test:evidence": "npx --yes tsx@4.21.0 --test integrations/evidence/__tests__/buildEvidence.test.ts integrations/evidence/__tests__/humanIntent.test.ts",
51
51
  "test:mcp": "npx --yes tsx@4.21.0 --test integrations/mcp/__tests__/*.test.ts",
52
+ "test:backlog-tooling": "npx --yes tsx@4.21.0 --test scripts/__tests__/backlog-action-reasons-lib.test.ts scripts/__tests__/backlog-json-contract-lib.test.ts scripts/__tests__/backlog-cli-help-exit-code.test.ts scripts/__tests__/backlog-id-issue-map-lib.test.ts scripts/__tests__/reconcile-consumer-backlog-issues.test.ts scripts/__tests__/watch-consumer-backlog.test.ts",
52
53
  "test:saas-ingestion": "npx --yes tsx@4.21.0 --test integrations/lifecycle/__tests__/saasIngestionContract.test.ts integrations/lifecycle/__tests__/saasIngestionBuilder.test.ts integrations/lifecycle/__tests__/saasIngestionTransport.test.ts integrations/lifecycle/__tests__/saasIngestionIdempotency.test.ts integrations/lifecycle/__tests__/saasIngestionAuth.test.ts integrations/lifecycle/__tests__/saasIngestionAudit.test.ts integrations/lifecycle/__tests__/saasIngestionMetrics.test.ts integrations/lifecycle/__tests__/saasIngestionGovernance.test.ts integrations/lifecycle/__tests__/saasFederation.test.ts integrations/lifecycle/__tests__/saasEnterpriseAnalytics.test.ts integrations/lifecycle/__tests__/cli.test.ts",
53
54
  "test:operational-memory": "npx --yes tsx@4.21.0 --test integrations/lifecycle/__tests__/operationalMemoryContract.test.ts integrations/lifecycle/__tests__/operationalMemorySignals.test.ts integrations/lifecycle/__tests__/operationalMemorySnapshot.test.ts integrations/git/__tests__/runPlatformGate.test.ts integrations/git/__tests__/runPlatformGateEvidence.test.ts integrations/evidence/__tests__/buildEvidence.test.ts integrations/evidence/writeEvidence.test.ts integrations/evidence/generateEvidence.test.ts",
54
55
  "test:stage-gates": "npx --yes tsx@4.21.0 --test integrations/config/__tests__/*.test.ts integrations/gate/__tests__/*.test.ts integrations/git/__tests__/*.test.ts integrations/lifecycle/__tests__/*.test.ts integrations/sdd/__tests__/*.test.ts scripts/__tests__/*.test.ts",
@@ -98,6 +99,8 @@
98
99
  "validation:phase8:close-ready": "bash scripts/run-phase8-close-ready.sh",
99
100
  "validation:progress-single-active": "bash scripts/check-refactor-progress-single-active.sh",
100
101
  "validation:tracking-single-active": "bash scripts/check-tracking-single-active.sh",
102
+ "validation:backlog-reconcile": "node --import tsx scripts/reconcile-consumer-backlog-issues.ts",
103
+ "validation:backlog-watch": "node --import tsx scripts/watch-consumer-backlog.ts",
101
104
  "validation:phase5-escalation:ready-to-submit": "bash scripts/check-phase5-escalation-ready-to-submit.sh",
102
105
  "validation:phase5-escalation:prepare": "bash scripts/prepare-phase5-escalation-submission.sh",
103
106
  "validation:phase5-escalation:close-submission": "bash scripts/close-phase5-escalation-submission.sh",
@@ -0,0 +1,38 @@
1
+ export type WatchActionReason =
2
+ | 'needs_issue'
3
+ | 'drift_closed_issue'
4
+ | 'heading_drift';
5
+
6
+ export type ReconcileActionReason =
7
+ | 'reference_changes'
8
+ | 'issue_changes'
9
+ | 'heading_changes'
10
+ | 'summary_updated'
11
+ | 'next_step_updated';
12
+
13
+ export const buildWatchActionRequiredReasons = (input: {
14
+ needsIssueCount: number;
15
+ driftClosedIssueCount: number;
16
+ headingDriftCount: number;
17
+ }): ReadonlyArray<WatchActionReason> => [
18
+ ...(input.needsIssueCount > 0 ? (['needs_issue'] as const) : []),
19
+ ...(input.driftClosedIssueCount > 0 ? (['drift_closed_issue'] as const) : []),
20
+ ...(input.headingDriftCount > 0 ? (['heading_drift'] as const) : []),
21
+ ];
22
+
23
+ export const buildReconcileActionRequiredReasons = (input: {
24
+ referenceChangesCount: number;
25
+ issueChangesCount: number;
26
+ headingChangesCount: number;
27
+ summaryUpdated: boolean;
28
+ nextStepUpdated: boolean;
29
+ }): ReadonlyArray<ReconcileActionReason> => [
30
+ ...(input.referenceChangesCount > 0 ? (['reference_changes'] as const) : []),
31
+ ...(input.issueChangesCount > 0 ? (['issue_changes'] as const) : []),
32
+ ...(input.headingChangesCount > 0 ? (['heading_changes'] as const) : []),
33
+ ...(input.summaryUpdated ? (['summary_updated'] as const) : []),
34
+ ...(input.nextStepUpdated ? (['next_step_updated'] as const) : []),
35
+ ];
36
+
37
+ export const formatActionReasonsForHuman = (reasons: ReadonlyArray<string>): string =>
38
+ reasons.length > 0 ? reasons.join(',') : 'none';
@@ -0,0 +1,69 @@
1
+ import { readFileSync } from 'node:fs';
2
+ import { resolve } from 'node:path';
3
+
4
+ export const BACKLOG_ID_PATTERN = /^(PUMUKI-(?:M)?\d+|PUMUKI-INC-\d+|FP-\d+|AST-GAP-\d+)$/;
5
+
6
+ export type BacklogIdIssueMapRecord = Readonly<Record<string, number>>;
7
+
8
+ const parsePositiveIssueNumber = (value: unknown): number | null => {
9
+ if (typeof value === 'number' && Number.isFinite(value) && value > 0) {
10
+ return Math.trunc(value);
11
+ }
12
+ if (typeof value === 'string') {
13
+ const parsed = Number.parseInt(value, 10);
14
+ if (Number.isFinite(parsed) && parsed > 0) {
15
+ return parsed;
16
+ }
17
+ }
18
+ return null;
19
+ };
20
+
21
+ export const parseIdIssueMapRecord = (raw: string): BacklogIdIssueMapRecord => {
22
+ const parsed = JSON.parse(raw) as Record<string, unknown>;
23
+ const normalized: Record<string, number> = {};
24
+ for (const [id, value] of Object.entries(parsed)) {
25
+ if (!BACKLOG_ID_PATTERN.test(id)) {
26
+ throw new Error(`Invalid id in --id-issue-map: "${id}"`);
27
+ }
28
+ const issueNumber = parsePositiveIssueNumber(value);
29
+ if (issueNumber === null) {
30
+ throw new Error(`Invalid issue number for "${id}" in --id-issue-map`);
31
+ }
32
+ normalized[id] = issueNumber;
33
+ }
34
+ return normalized;
35
+ };
36
+
37
+ export const parseIdIssueMapRecordFile = (
38
+ filePath: string,
39
+ readFile: (path: string) => string = (path) => readFileSync(path, 'utf8')
40
+ ): BacklogIdIssueMapRecord => {
41
+ const resolvedPath = resolve(filePath);
42
+ return parseIdIssueMapRecord(readFile(resolvedPath));
43
+ };
44
+
45
+ export const mergeIdIssueMapRecords = (
46
+ base?: BacklogIdIssueMapRecord,
47
+ override?: BacklogIdIssueMapRecord
48
+ ): BacklogIdIssueMapRecord | undefined => {
49
+ if (!base && !override) {
50
+ return undefined;
51
+ }
52
+ return {
53
+ ...(base ?? {}),
54
+ ...(override ?? {}),
55
+ };
56
+ };
57
+
58
+ export const recordToIdIssueMap = (
59
+ record?: BacklogIdIssueMapRecord
60
+ ): ReadonlyMap<string, number> | undefined => {
61
+ if (!record) {
62
+ return undefined;
63
+ }
64
+ const map = new Map<string, number>();
65
+ for (const [id, issue] of Object.entries(record)) {
66
+ map.set(id, Math.trunc(issue));
67
+ }
68
+ return map;
69
+ };
@@ -0,0 +1,3 @@
1
+ export const BACKLOG_JSON_SCHEMA_VERSION = '1.0.0';
2
+ export const BACKLOG_JSON_COMPAT_MIN_READER_VERSION = '1.0.0';
3
+ export const BACKLOG_JSON_COMPAT_CONTRACT_ID = 'backlog-tooling-json-v1';
@@ -60,6 +60,12 @@ const ACTIONABLE_HINTS_BY_CODE: Readonly<Record<string, string>> = {
60
60
  EVIDENCE_RULES_COVERAGE_STAGE_MISMATCH: 'reanuda auditoría en el stage correcto.',
61
61
  EVIDENCE_RULES_COVERAGE_INCOMPLETE:
62
62
  'asegura unevaluated=0 y coverage_ratio=1 antes de continuar.',
63
+ EVIDENCE_SKILLS_CONTRACT_INCOMPLETE:
64
+ 'completa contrato de skills/policy para el stage actual y vuelve a validar.',
65
+ EVIDENCE_PREWRITE_WORKTREE_OVER_LIMIT:
66
+ 'reduce el worktree pendiente en slices atómicos y vuelve a ejecutar PRE_WRITE.',
67
+ EVIDENCE_PREWRITE_WORKTREE_WARN:
68
+ 'conviene particionar cambios ahora para evitar bloqueo tardío en commit/push.',
63
69
  EVIDENCE_UNSUPPORTED_AUTO_RULES:
64
70
  'mapea todas las reglas AUTO a detectores AST antes de continuar.',
65
71
  EVIDENCE_TIMESTAMP_FUTURE: 'corrige la hora del sistema y regenera evidencia.',
@@ -0,0 +1,64 @@
1
+ import { existsSync } from 'node:fs';
2
+ import { join } from 'node:path';
3
+
4
+ export type SmokeCommandResolution =
5
+ | 'local-bin'
6
+ | 'local-node-entry'
7
+ | 'npx-package';
8
+
9
+ export type ResolvedSmokeCommand = {
10
+ executable: string;
11
+ args: string[];
12
+ resolution: SmokeCommandResolution;
13
+ };
14
+
15
+ const resolveLocalBinPath = (consumerRepo: string, binary: string): string | null => {
16
+ const candidates =
17
+ process.platform === 'win32'
18
+ ? [join(consumerRepo, 'node_modules', '.bin', `${binary}.cmd`), join(consumerRepo, 'node_modules', '.bin', binary)]
19
+ : [join(consumerRepo, 'node_modules', '.bin', binary)];
20
+
21
+ for (const candidate of candidates) {
22
+ if (existsSync(candidate)) {
23
+ return candidate;
24
+ }
25
+ }
26
+
27
+ return null;
28
+ };
29
+
30
+ const resolveLocalNodeEntrypoint = (consumerRepo: string, binary: string): string | null => {
31
+ const entrypoint = join(consumerRepo, 'node_modules', 'pumuki', 'bin', `${binary}.js`);
32
+ return existsSync(entrypoint) ? entrypoint : null;
33
+ };
34
+
35
+ export const resolveConsumerPumukiCommand = (params: {
36
+ consumerRepo: string;
37
+ binary: string;
38
+ args?: ReadonlyArray<string>;
39
+ }): ResolvedSmokeCommand => {
40
+ const args = [...(params.args ?? [])];
41
+ const localBinPath = resolveLocalBinPath(params.consumerRepo, params.binary);
42
+ if (localBinPath) {
43
+ return {
44
+ executable: localBinPath,
45
+ args,
46
+ resolution: 'local-bin',
47
+ };
48
+ }
49
+
50
+ const localEntrypoint = resolveLocalNodeEntrypoint(params.consumerRepo, params.binary);
51
+ if (localEntrypoint) {
52
+ return {
53
+ executable: 'node',
54
+ args: [localEntrypoint, ...args],
55
+ resolution: 'local-node-entry',
56
+ };
57
+ }
58
+
59
+ return {
60
+ executable: 'npx',
61
+ args: ['--yes', '--package', 'pumuki@latest', params.binary, ...args],
62
+ resolution: 'npx-package',
63
+ };
64
+ };
@@ -1,6 +1,7 @@
1
1
  import { writeFileSync } from 'node:fs';
2
2
  import { join } from 'node:path';
3
3
  import {
4
+ assertNoFatalOutput,
4
5
  assertSuccess,
5
6
  runCommand,
6
7
  } from './package-install-smoke-runner-common';
@@ -8,6 +9,7 @@ import {
8
9
  pushCommandLog,
9
10
  type SmokeWorkspace,
10
11
  } from './package-install-smoke-workspace-lib';
12
+ import { resolveConsumerPumukiCommand } from './package-install-smoke-command-resolution-lib';
11
13
  import packageJson from '../package.json';
12
14
 
13
15
  const runNpmStep = (
@@ -45,3 +47,44 @@ export const verifyInstalledPackageCanBeRequired = (
45
47
  pushCommandLog(workspace.commandLog, installCheck);
46
48
  assertSuccess(installCheck, 'package require smoke');
47
49
  };
50
+
51
+ export const verifyInstalledPumukiBinaryVersion = (
52
+ workspace: SmokeWorkspace
53
+ ): void => {
54
+ const noInstallVersionCheck = runCommand({
55
+ cwd: workspace.consumerRepo,
56
+ executable: 'npx',
57
+ args: ['--no-install', 'pumuki', '--version'],
58
+ });
59
+ pushCommandLog(workspace.commandLog, noInstallVersionCheck);
60
+
61
+ const noInstallPassed =
62
+ noInstallVersionCheck.exitCode === 0
63
+ && !/Cannot find module|ERR_MODULE_NOT_FOUND|failed to resolve tsx runtime/.test(
64
+ noInstallVersionCheck.combined
65
+ );
66
+ if (noInstallPassed) {
67
+ assertNoFatalOutput(noInstallVersionCheck, 'pumuki --version smoke');
68
+ return;
69
+ }
70
+
71
+ const fallback = resolveConsumerPumukiCommand({
72
+ consumerRepo: workspace.consumerRepo,
73
+ binary: 'pumuki',
74
+ args: ['--version'],
75
+ });
76
+ const fallbackCheck = runCommand({
77
+ cwd: workspace.consumerRepo,
78
+ executable: fallback.executable,
79
+ args: fallback.args,
80
+ });
81
+ pushCommandLog(workspace.commandLog, fallbackCheck);
82
+ assertSuccess(
83
+ fallbackCheck,
84
+ `pumuki --version smoke fallback (${fallback.resolution})`
85
+ );
86
+ assertNoFatalOutput(
87
+ fallbackCheck,
88
+ `pumuki --version smoke fallback (${fallback.resolution})`
89
+ );
90
+ };
@@ -11,6 +11,7 @@ import {
11
11
  import {
12
12
  installTarballIntoConsumerRepo,
13
13
  verifyInstalledPackageCanBeRequired,
14
+ verifyInstalledPumukiBinaryVersion,
14
15
  } from './package-install-smoke-consumer-npm-lib';
15
16
  import type { SmokeWorkspace } from './package-install-smoke-workspace-lib';
16
17
 
@@ -23,6 +24,7 @@ export const setupConsumerRepository = (
23
24
  initializeConsumerGitRepository(workspace);
24
25
  installTarballIntoConsumerRepo(workspace);
25
26
  verifyInstalledPackageCanBeRequired(workspace);
27
+ verifyInstalledPumukiBinaryVersion(workspace);
26
28
  commitBaseline(workspace);
27
29
  configureRemoteAndFeatureBranch(workspace);
28
30
  writeAndCommitRangePayloadForBlockMode(workspace, mode);
@@ -1,5 +1,6 @@
1
1
  import type { SmokeExpectation } from './package-install-smoke-contract';
2
2
  import { runGateStep, type SmokeGateStep } from './package-install-smoke-gate-lib';
3
+ import { resolveConsumerPumukiCommand } from './package-install-smoke-command-resolution-lib';
3
4
  import type { SmokeWorkspace } from './package-install-smoke-workspace-contract';
4
5
 
5
6
  export type SmokeStepResult = {
@@ -8,25 +9,30 @@ export type SmokeStepResult = {
8
9
  exitCode: number;
9
10
  };
10
11
 
11
- export const DEFAULT_SMOKE_GATE_STEPS: ReadonlyArray<SmokeGateStep> = [
12
+ type SmokeGateStepDescriptor = {
13
+ label: SmokeGateStep['label'];
14
+ binary: string;
15
+ args?: ReadonlyArray<string>;
16
+ evidenceFile: SmokeGateStep['evidenceFile'];
17
+ stage: SmokeGateStep['stage'];
18
+ };
19
+
20
+ export const DEFAULT_SMOKE_GATE_STEPS: ReadonlyArray<SmokeGateStepDescriptor> = [
12
21
  {
13
22
  label: 'pre-commit',
14
- command: 'npx',
15
- args: ['--yes', 'pumuki-pre-commit'],
23
+ binary: 'pumuki-pre-commit',
16
24
  evidenceFile: 'pre-commit.ai_evidence.json',
17
25
  stage: 'PRE_COMMIT',
18
26
  },
19
27
  {
20
28
  label: 'pre-push',
21
- command: 'npx',
22
- args: ['--yes', 'pumuki-pre-push'],
29
+ binary: 'pumuki-pre-push',
23
30
  evidenceFile: 'pre-push.ai_evidence.json',
24
31
  stage: 'PRE_PUSH',
25
32
  },
26
33
  {
27
34
  label: 'ci',
28
- command: 'npx',
29
- args: ['--yes', 'pumuki-ci'],
35
+ binary: 'pumuki-ci',
30
36
  evidenceFile: 'ci.ai_evidence.json',
31
37
  stage: 'CI',
32
38
  },
@@ -36,10 +42,22 @@ export const runDefaultSmokeGateSteps = (params: {
36
42
  workspace: SmokeWorkspace;
37
43
  expectation: SmokeExpectation;
38
44
  }): ReadonlyArray<SmokeStepResult> =>
39
- DEFAULT_SMOKE_GATE_STEPS.map((step) => {
45
+ DEFAULT_SMOKE_GATE_STEPS.map((stepDescriptor) => {
46
+ const resolvedCommand = resolveConsumerPumukiCommand({
47
+ consumerRepo: params.workspace.consumerRepo,
48
+ binary: stepDescriptor.binary,
49
+ args: stepDescriptor.args,
50
+ });
51
+ const step: SmokeGateStep = {
52
+ label: stepDescriptor.label,
53
+ command: resolvedCommand.executable,
54
+ args: resolvedCommand.args,
55
+ evidenceFile: stepDescriptor.evidenceFile,
56
+ stage: stepDescriptor.stage,
57
+ };
40
58
  const result = runGateStep(params.workspace, step, params.expectation);
41
59
  return {
42
- label: step.label,
60
+ label: stepDescriptor.label,
43
61
  outcome: result.outcome,
44
62
  exitCode: result.exitCode,
45
63
  };
@@ -3,6 +3,7 @@ import {
3
3
  assertSuccess,
4
4
  runCommand,
5
5
  } from './package-install-smoke-runner-common';
6
+ import { resolveConsumerPumukiCommand } from './package-install-smoke-command-resolution-lib';
6
7
  import { pushCommandLog, type SmokeWorkspace } from './package-install-smoke-workspace-lib';
7
8
 
8
9
  const normalizeStatus = (value: string): string =>
@@ -27,10 +28,15 @@ export const captureLifecycleStatusSnapshot = (workspace: SmokeWorkspace): strin
27
28
  getShortStatus(workspace);
28
29
 
29
30
  export const runLifecycleInstallStep = (workspace: SmokeWorkspace): void => {
31
+ const command = resolveConsumerPumukiCommand({
32
+ consumerRepo: workspace.consumerRepo,
33
+ binary: 'pumuki',
34
+ args: ['install'],
35
+ });
30
36
  const result = runCommand({
31
37
  cwd: workspace.consumerRepo,
32
- executable: 'npx',
33
- args: ['--yes', 'pumuki', 'install'],
38
+ executable: command.executable,
39
+ args: command.args,
34
40
  env: {
35
41
  PUMUKI_SKIP_OPENSPEC_BOOTSTRAP: '1',
36
42
  },
@@ -41,10 +47,15 @@ export const runLifecycleInstallStep = (workspace: SmokeWorkspace): void => {
41
47
  };
42
48
 
43
49
  export const runLifecycleUninstallStep = (workspace: SmokeWorkspace): void => {
50
+ const command = resolveConsumerPumukiCommand({
51
+ consumerRepo: workspace.consumerRepo,
52
+ binary: 'pumuki',
53
+ args: ['uninstall', '--purge-artifacts'],
54
+ });
44
55
  const result = runCommand({
45
56
  cwd: workspace.consumerRepo,
46
- executable: 'npx',
47
- args: ['--yes', 'pumuki', 'uninstall', '--purge-artifacts'],
57
+ executable: command.executable,
58
+ args: command.args,
48
59
  });
49
60
  pushCommandLog(workspace.commandLog, result);
50
61
  assertNoFatalOutput(result, 'pumuki lifecycle uninstall');
@@ -15,7 +15,10 @@ export const createSmokeWorkspace = (mode: SmokeMode): SmokeWorkspace => {
15
15
  ensureDirectory(reportRoot);
16
16
 
17
17
  const tmpRoot = mkdtempSync(join(tmpdir(), 'pumuki-package-smoke-'));
18
- const consumerRepo = join(tmpRoot, 'consumer');
18
+ const consumerRepo = join(
19
+ tmpRoot,
20
+ process.platform === 'win32' ? 'consumer' : 'consumer:repo'
21
+ );
19
22
  const bareRemote = join(tmpRoot, 'origin.git');
20
23
 
21
24
  return {