cap-pro 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/README.md +26 -0
- package/.claude-plugin/marketplace.json +24 -0
- package/.claude-plugin/plugin.json +24 -0
- package/LICENSE +21 -0
- package/README.ja-JP.md +834 -0
- package/README.ko-KR.md +823 -0
- package/README.md +806 -0
- package/README.pt-BR.md +452 -0
- package/README.zh-CN.md +800 -0
- package/agents/cap-architect.md +269 -0
- package/agents/cap-brainstormer.md +207 -0
- package/agents/cap-curator.md +276 -0
- package/agents/cap-debugger.md +365 -0
- package/agents/cap-designer.md +246 -0
- package/agents/cap-historian.md +464 -0
- package/agents/cap-migrator.md +291 -0
- package/agents/cap-prototyper.md +197 -0
- package/agents/cap-validator.md +308 -0
- package/bin/install.js +5433 -0
- package/cap/bin/cap-tools.cjs +853 -0
- package/cap/bin/lib/arc-scanner.cjs +344 -0
- package/cap/bin/lib/cap-affinity-engine.cjs +862 -0
- package/cap/bin/lib/cap-anchor.cjs +228 -0
- package/cap/bin/lib/cap-annotation-writer.cjs +340 -0
- package/cap/bin/lib/cap-checkpoint.cjs +434 -0
- package/cap/bin/lib/cap-cluster-detect.cjs +945 -0
- package/cap/bin/lib/cap-cluster-display.cjs +52 -0
- package/cap/bin/lib/cap-cluster-format.cjs +245 -0
- package/cap/bin/lib/cap-cluster-helpers.cjs +295 -0
- package/cap/bin/lib/cap-cluster-io.cjs +212 -0
- package/cap/bin/lib/cap-completeness.cjs +540 -0
- package/cap/bin/lib/cap-deps.cjs +583 -0
- package/cap/bin/lib/cap-design-families.cjs +332 -0
- package/cap/bin/lib/cap-design.cjs +966 -0
- package/cap/bin/lib/cap-divergence-detector.cjs +400 -0
- package/cap/bin/lib/cap-doctor.cjs +752 -0
- package/cap/bin/lib/cap-feature-map-internals.cjs +19 -0
- package/cap/bin/lib/cap-feature-map-migrate.cjs +335 -0
- package/cap/bin/lib/cap-feature-map-monorepo.cjs +885 -0
- package/cap/bin/lib/cap-feature-map-shard.cjs +315 -0
- package/cap/bin/lib/cap-feature-map.cjs +1943 -0
- package/cap/bin/lib/cap-fitness-score.cjs +1075 -0
- package/cap/bin/lib/cap-impact-analysis.cjs +652 -0
- package/cap/bin/lib/cap-learn-review.cjs +1072 -0
- package/cap/bin/lib/cap-learning-signals.cjs +627 -0
- package/cap/bin/lib/cap-loader.cjs +227 -0
- package/cap/bin/lib/cap-logger.cjs +57 -0
- package/cap/bin/lib/cap-memory-bridge.cjs +764 -0
- package/cap/bin/lib/cap-memory-confidence.cjs +452 -0
- package/cap/bin/lib/cap-memory-dir.cjs +987 -0
- package/cap/bin/lib/cap-memory-engine.cjs +698 -0
- package/cap/bin/lib/cap-memory-extends.cjs +398 -0
- package/cap/bin/lib/cap-memory-graph.cjs +790 -0
- package/cap/bin/lib/cap-memory-migrate.cjs +2015 -0
- package/cap/bin/lib/cap-memory-pin.cjs +183 -0
- package/cap/bin/lib/cap-memory-platform.cjs +490 -0
- package/cap/bin/lib/cap-memory-prune.cjs +707 -0
- package/cap/bin/lib/cap-memory-schema.cjs +812 -0
- package/cap/bin/lib/cap-migrate-tags.cjs +309 -0
- package/cap/bin/lib/cap-migrate.cjs +540 -0
- package/cap/bin/lib/cap-pattern-apply.cjs +1203 -0
- package/cap/bin/lib/cap-pattern-pipeline.cjs +1034 -0
- package/cap/bin/lib/cap-plugin-manifest.cjs +80 -0
- package/cap/bin/lib/cap-realtime-affinity.cjs +399 -0
- package/cap/bin/lib/cap-reconcile.cjs +570 -0
- package/cap/bin/lib/cap-research-gate.cjs +218 -0
- package/cap/bin/lib/cap-scope-filter.cjs +402 -0
- package/cap/bin/lib/cap-semantic-pipeline.cjs +1038 -0
- package/cap/bin/lib/cap-session-extract.cjs +987 -0
- package/cap/bin/lib/cap-session.cjs +445 -0
- package/cap/bin/lib/cap-snapshot-linkage.cjs +963 -0
- package/cap/bin/lib/cap-stack-docs.cjs +646 -0
- package/cap/bin/lib/cap-tag-observer.cjs +371 -0
- package/cap/bin/lib/cap-tag-scanner.cjs +1766 -0
- package/cap/bin/lib/cap-telemetry.cjs +466 -0
- package/cap/bin/lib/cap-test-audit.cjs +1438 -0
- package/cap/bin/lib/cap-thread-migrator.cjs +307 -0
- package/cap/bin/lib/cap-thread-synthesis.cjs +545 -0
- package/cap/bin/lib/cap-thread-tracker.cjs +519 -0
- package/cap/bin/lib/cap-trace.cjs +399 -0
- package/cap/bin/lib/cap-trust-mode.cjs +336 -0
- package/cap/bin/lib/cap-ui-design-editor.cjs +642 -0
- package/cap/bin/lib/cap-ui-mind-map.cjs +712 -0
- package/cap/bin/lib/cap-ui-thread-nav.cjs +693 -0
- package/cap/bin/lib/cap-ui.cjs +1245 -0
- package/cap/bin/lib/cap-upgrade.cjs +1028 -0
- package/cap/bin/lib/cli/arg-helpers.cjs +49 -0
- package/cap/bin/lib/cli/frontmatter-router.cjs +31 -0
- package/cap/bin/lib/cli/init-router.cjs +68 -0
- package/cap/bin/lib/cli/phase-router.cjs +102 -0
- package/cap/bin/lib/cli/state-router.cjs +61 -0
- package/cap/bin/lib/cli/template-router.cjs +37 -0
- package/cap/bin/lib/cli/uat-router.cjs +29 -0
- package/cap/bin/lib/cli/validation-router.cjs +26 -0
- package/cap/bin/lib/cli/verification-router.cjs +31 -0
- package/cap/bin/lib/cli/workstream-router.cjs +39 -0
- package/cap/bin/lib/commands.cjs +961 -0
- package/cap/bin/lib/config.cjs +467 -0
- package/cap/bin/lib/convention-reader.cjs +258 -0
- package/cap/bin/lib/core.cjs +1241 -0
- package/cap/bin/lib/feature-aggregator.cjs +423 -0
- package/cap/bin/lib/frontmatter.cjs +337 -0
- package/cap/bin/lib/init.cjs +1443 -0
- package/cap/bin/lib/manifest-generator.cjs +383 -0
- package/cap/bin/lib/milestone.cjs +253 -0
- package/cap/bin/lib/model-profiles.cjs +69 -0
- package/cap/bin/lib/monorepo-context.cjs +226 -0
- package/cap/bin/lib/monorepo-migrator.cjs +509 -0
- package/cap/bin/lib/phase.cjs +889 -0
- package/cap/bin/lib/profile-output.cjs +989 -0
- package/cap/bin/lib/profile-pipeline.cjs +540 -0
- package/cap/bin/lib/roadmap.cjs +330 -0
- package/cap/bin/lib/security.cjs +394 -0
- package/cap/bin/lib/session-manager.cjs +292 -0
- package/cap/bin/lib/skeleton-generator.cjs +179 -0
- package/cap/bin/lib/state.cjs +1032 -0
- package/cap/bin/lib/template.cjs +231 -0
- package/cap/bin/lib/test-detector.cjs +62 -0
- package/cap/bin/lib/uat.cjs +283 -0
- package/cap/bin/lib/verify.cjs +889 -0
- package/cap/bin/lib/workspace-detector.cjs +371 -0
- package/cap/bin/lib/workstream.cjs +492 -0
- package/cap/commands/gsd/workstreams.md +63 -0
- package/cap/references/arc-standard.md +315 -0
- package/cap/references/cap-agent-architecture.md +101 -0
- package/cap/references/cap-gitignore-template +9 -0
- package/cap/references/cap-zero-deps.md +158 -0
- package/cap/references/checkpoints.md +778 -0
- package/cap/references/continuation-format.md +249 -0
- package/cap/references/contract-test-templates.md +312 -0
- package/cap/references/feature-map-template.md +25 -0
- package/cap/references/git-integration.md +295 -0
- package/cap/references/git-planning-commit.md +38 -0
- package/cap/references/model-profiles.md +174 -0
- package/cap/references/phase-numbering.md +126 -0
- package/cap/references/planning-config.md +202 -0
- package/cap/references/property-test-templates.md +316 -0
- package/cap/references/security-test-templates.md +347 -0
- package/cap/references/session-template.json +8 -0
- package/cap/references/tdd.md +263 -0
- package/cap/references/user-profiling.md +681 -0
- package/cap/references/verification-patterns.md +612 -0
- package/cap/templates/UAT.md +265 -0
- package/cap/templates/claude-md.md +175 -0
- package/cap/templates/codebase/architecture.md +255 -0
- package/cap/templates/codebase/concerns.md +310 -0
- package/cap/templates/codebase/conventions.md +307 -0
- package/cap/templates/codebase/integrations.md +280 -0
- package/cap/templates/codebase/stack.md +186 -0
- package/cap/templates/codebase/structure.md +285 -0
- package/cap/templates/codebase/testing.md +480 -0
- package/cap/templates/config.json +44 -0
- package/cap/templates/context.md +352 -0
- package/cap/templates/continue-here.md +78 -0
- package/cap/templates/copilot-instructions.md +7 -0
- package/cap/templates/debug-subagent-prompt.md +91 -0
- package/cap/templates/discussion-log.md +63 -0
- package/cap/templates/milestone-archive.md +123 -0
- package/cap/templates/milestone.md +115 -0
- package/cap/templates/phase-prompt.md +610 -0
- package/cap/templates/planner-subagent-prompt.md +117 -0
- package/cap/templates/project.md +186 -0
- package/cap/templates/requirements.md +231 -0
- package/cap/templates/research-project/ARCHITECTURE.md +204 -0
- package/cap/templates/research-project/FEATURES.md +147 -0
- package/cap/templates/research-project/PITFALLS.md +200 -0
- package/cap/templates/research-project/STACK.md +120 -0
- package/cap/templates/research-project/SUMMARY.md +170 -0
- package/cap/templates/research.md +552 -0
- package/cap/templates/roadmap.md +202 -0
- package/cap/templates/state.md +176 -0
- package/cap/templates/summary.md +364 -0
- package/cap/templates/user-preferences.md +498 -0
- package/cap/templates/verification-report.md +322 -0
- package/cap/workflows/add-phase.md +112 -0
- package/cap/workflows/add-tests.md +351 -0
- package/cap/workflows/add-todo.md +158 -0
- package/cap/workflows/audit-milestone.md +340 -0
- package/cap/workflows/audit-uat.md +109 -0
- package/cap/workflows/autonomous.md +891 -0
- package/cap/workflows/check-todos.md +177 -0
- package/cap/workflows/cleanup.md +152 -0
- package/cap/workflows/complete-milestone.md +767 -0
- package/cap/workflows/diagnose-issues.md +231 -0
- package/cap/workflows/discovery-phase.md +289 -0
- package/cap/workflows/discuss-phase-assumptions.md +653 -0
- package/cap/workflows/discuss-phase.md +1049 -0
- package/cap/workflows/do.md +104 -0
- package/cap/workflows/execute-phase.md +846 -0
- package/cap/workflows/execute-plan.md +514 -0
- package/cap/workflows/fast.md +105 -0
- package/cap/workflows/forensics.md +265 -0
- package/cap/workflows/health.md +181 -0
- package/cap/workflows/help.md +660 -0
- package/cap/workflows/insert-phase.md +130 -0
- package/cap/workflows/list-phase-assumptions.md +178 -0
- package/cap/workflows/list-workspaces.md +56 -0
- package/cap/workflows/manager.md +362 -0
- package/cap/workflows/map-codebase.md +377 -0
- package/cap/workflows/milestone-summary.md +223 -0
- package/cap/workflows/new-milestone.md +486 -0
- package/cap/workflows/new-project.md +1250 -0
- package/cap/workflows/new-workspace.md +237 -0
- package/cap/workflows/next.md +97 -0
- package/cap/workflows/node-repair.md +92 -0
- package/cap/workflows/note.md +156 -0
- package/cap/workflows/pause-work.md +176 -0
- package/cap/workflows/plan-milestone-gaps.md +273 -0
- package/cap/workflows/plan-phase.md +857 -0
- package/cap/workflows/plant-seed.md +169 -0
- package/cap/workflows/pr-branch.md +129 -0
- package/cap/workflows/profile-user.md +449 -0
- package/cap/workflows/progress.md +507 -0
- package/cap/workflows/quick.md +757 -0
- package/cap/workflows/remove-phase.md +155 -0
- package/cap/workflows/remove-workspace.md +90 -0
- package/cap/workflows/research-phase.md +82 -0
- package/cap/workflows/resume-project.md +326 -0
- package/cap/workflows/review.md +228 -0
- package/cap/workflows/session-report.md +146 -0
- package/cap/workflows/settings.md +283 -0
- package/cap/workflows/ship.md +228 -0
- package/cap/workflows/stats.md +60 -0
- package/cap/workflows/transition.md +671 -0
- package/cap/workflows/ui-phase.md +298 -0
- package/cap/workflows/ui-review.md +161 -0
- package/cap/workflows/update.md +323 -0
- package/cap/workflows/validate-phase.md +170 -0
- package/cap/workflows/verify-phase.md +254 -0
- package/cap/workflows/verify-work.md +637 -0
- package/commands/cap/annotate.md +165 -0
- package/commands/cap/brainstorm.md +393 -0
- package/commands/cap/checkpoint.md +106 -0
- package/commands/cap/completeness.md +94 -0
- package/commands/cap/continue.md +72 -0
- package/commands/cap/debug.md +588 -0
- package/commands/cap/deps.md +169 -0
- package/commands/cap/design.md +479 -0
- package/commands/cap/init.md +354 -0
- package/commands/cap/iterate.md +249 -0
- package/commands/cap/learn.md +459 -0
- package/commands/cap/memory.md +275 -0
- package/commands/cap/migrate-feature-map.md +91 -0
- package/commands/cap/migrate-memory.md +108 -0
- package/commands/cap/migrate-tags.md +91 -0
- package/commands/cap/migrate.md +131 -0
- package/commands/cap/prototype.md +510 -0
- package/commands/cap/reconcile.md +121 -0
- package/commands/cap/review.md +360 -0
- package/commands/cap/save.md +72 -0
- package/commands/cap/scan.md +404 -0
- package/commands/cap/start.md +356 -0
- package/commands/cap/status.md +118 -0
- package/commands/cap/test-audit.md +262 -0
- package/commands/cap/test.md +394 -0
- package/commands/cap/trace.md +133 -0
- package/commands/cap/ui.md +167 -0
- package/hooks/dist/cap-check-update.js +115 -0
- package/hooks/dist/cap-context-monitor.js +185 -0
- package/hooks/dist/cap-learn-review-hook.js +114 -0
- package/hooks/dist/cap-learning-hook.js +192 -0
- package/hooks/dist/cap-memory.js +299 -0
- package/hooks/dist/cap-prompt-guard.js +97 -0
- package/hooks/dist/cap-statusline.js +157 -0
- package/hooks/dist/cap-tag-observer.js +115 -0
- package/hooks/dist/cap-version-check.js +112 -0
- package/hooks/dist/cap-workflow-guard.js +175 -0
- package/hooks/hooks.json +55 -0
- package/package.json +58 -0
- package/scripts/base64-scan.sh +262 -0
- package/scripts/build-hooks.js +93 -0
- package/scripts/cap-removal-checklist.md +202 -0
- package/scripts/prompt-injection-scan.sh +199 -0
- package/scripts/run-tests.cjs +181 -0
- package/scripts/secret-scan.sh +227 -0
|
@@ -0,0 +1,2015 @@
|
|
|
1
|
+
// @cap-context CAP V6 Memory Migration Tool — one-shot conversion of V5 monolith memory files
|
|
2
|
+
// @cap-history(sessions:2, edits:14, since:2026-05-06, learned:2026-05-08) Frequently modified — 2 sessions, 14 edits
|
|
3
|
+
// (decisions.md, pitfalls.md, patterns.md, hotspots.md, graph.json) to the V6 per-feature layout
|
|
4
|
+
// defined by F-076 (cap-memory-schema.cjs). Designed to handle production scale: 1219+ entries,
|
|
5
|
+
// 38+ orphan snapshots without breakage. Hard-cutover: no V5/V6 coexistence at runtime; the user
|
|
6
|
+
// commits the new layout to git after this tool runs successfully.
|
|
7
|
+
|
|
8
|
+
'use strict';
|
|
9
|
+
|
|
10
|
+
// @cap-feature(feature:F-077, primary:true) V6 Memory Migration Tool — one-shot migration from V5 monolith to per-feature files (F-076 schema)
|
|
11
|
+
// @cap-feature(feature:F-082) Sub-app prefix boost in classifyEntry — leverages F-082's runtime metadata.subApp to lift path-heuristik confidence on monorepo file paths.
|
|
12
|
+
|
|
13
|
+
const fs = require('node:fs');
|
|
14
|
+
const path = require('node:path');
|
|
15
|
+
const readline = require('node:readline');
|
|
16
|
+
|
|
17
|
+
const schema = require('./cap-memory-schema.cjs');
|
|
18
|
+
const { readFeatureMap } = require('./cap-feature-map.cjs');
|
|
19
|
+
|
|
20
|
+
// -------- Constants --------
|
|
21
|
+
|
|
22
|
+
// @cap-decision(F-077/D1) Confidence threshold for auto vs ask is 0.7. Picked because:
|
|
23
|
+
// (1) tag-metadata + path-heuristic combined yield ≥0.7 on >90% of GoetzeInvest entries (manual
|
|
24
|
+
// audit on 50-sample), (2) leaves headroom under 1.0 so future signals (e.g., ANN over content
|
|
25
|
+
// embeddings) can lift confidence without breaking calibration, (3) symmetric with F-072 fitness
|
|
26
|
+
// score thresholds where 0.7 is the "trust" cutoff. Re-runs with different signal sets need to
|
|
27
|
+
// recalibrate but the constant is exposed so a future flag can override.
|
|
28
|
+
const CONFIDENCE_AUTO_THRESHOLD = 0.7;
|
|
29
|
+
|
|
30
|
+
// @cap-decision(F-077/D2) Backup file naming uses date-only (YYYY-MM-DD) suffix. Idempotent on
|
|
31
|
+
// same-day re-run — overwriting a same-day backup is safe because the only path to that state is
|
|
32
|
+
// replaying the same migration. A timestamp-with-seconds suffix would yield non-idempotent backups
|
|
33
|
+
// and pollute .cap/memory/.archive/ with one file per re-run. Cross-day re-run produces a NEW
|
|
34
|
+
// backup file (audit trail preserved across days).
|
|
35
|
+
const BACKUP_DIR = '.cap/memory/.archive';
|
|
36
|
+
|
|
37
|
+
// V5 source files we know how to parse.
|
|
38
|
+
const V5_SOURCES = ['decisions.md', 'pitfalls.md', 'patterns.md', 'hotspots.md'];
|
|
39
|
+
const V5_BINARY_SOURCES = ['graph.json'];
|
|
40
|
+
|
|
41
|
+
// @cap-decision(F-077/D3) Migration report lives in .cap/memory/.archive/migration-report-<date>.md.
|
|
42
|
+
// Same-day re-run REPLACES the report (simpler than an append-mode report; the previous run's
|
|
43
|
+
// report is still recoverable from git history if needed). The .archive/ folder is the single
|
|
44
|
+
// destination for all V5-derived artifacts (backups + report) so a user inspecting "what
|
|
45
|
+
// happened during V6 migration" only has to look in one place.
|
|
46
|
+
const MIGRATION_REPORT_PREFIX = 'migration-report';
|
|
47
|
+
|
|
48
|
+
// Platform topic for entries with no signal — deliberate "unassigned" bucket so no entry is lost.
|
|
49
|
+
const UNASSIGNED_PLATFORM_TOPIC = 'unassigned';
|
|
50
|
+
const UNASSIGNED_SNAPSHOTS_TOPIC = 'snapshots-unassigned';
|
|
51
|
+
|
|
52
|
+
// Snapshot date-proximity window for the heuristic (hours).
|
|
53
|
+
const SNAPSHOT_DATE_WINDOW_HOURS = 24;
|
|
54
|
+
|
|
55
|
+
// @cap-decision(F-077/D7) Title-prefix heuristic threshold — minimum occurrences across the V5
|
|
56
|
+
// corpus before a prefix counts as signal. 5 is the floor where we stop seeing
|
|
57
|
+
// sentence-starts ("Select:", "Update:", "Migration 067:") and start seeing
|
|
58
|
+
// actual app-name buckets ("GoetzeBooking:" appears 30+ times, "EasyMail:" 50+,
|
|
59
|
+
// etc.). Exposed for future tuning; tests pin the constant explicitly.
|
|
60
|
+
const TITLE_PREFIX_MIN_OCCURRENCES = 5;
|
|
61
|
+
|
|
62
|
+
// -------- Typedefs --------
|
|
63
|
+
|
|
64
|
+
/**
|
|
65
|
+
* @typedef {Object} V5Entry
|
|
66
|
+
* @property {'decision'|'pitfall'|'pattern'|'hotspot'} kind
|
|
67
|
+
* @property {string} anchorId - the `<a id="..."></a>` anchor (or '' if none)
|
|
68
|
+
* @property {string} title - the H3 heading text
|
|
69
|
+
* @property {string} content - body text (multi-line, preserved)
|
|
70
|
+
* @property {string} sourceFile - "decisions.md" etc.
|
|
71
|
+
* @property {number} sourceLine - 1-indexed line number where the entry's H3 starts
|
|
72
|
+
* @property {string|null} dateLabel - the "Date:" field text (e.g., "code", "code (F-050)")
|
|
73
|
+
* @property {string[]} relatedFiles - paths from the "Files:" field
|
|
74
|
+
* @property {number|null} confidence - the "Confidence:" field as 0..1, null if missing
|
|
75
|
+
* @property {string|null} lastSeen - the "Last Seen:" ISO string, null if missing
|
|
76
|
+
* @property {string|null} taggedFeatureId - F-NNN extracted from anchor's tag-metadata if any
|
|
77
|
+
* @property {string|null} taggedPlatformTopic - platform topic if `@cap-decision platform:<topic>` was tagged
|
|
78
|
+
*/
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* @typedef {Object} V5Snapshot
|
|
82
|
+
* @property {string} fileName - relative to `.cap/snapshots/`
|
|
83
|
+
* @property {string} sourcePath - absolute path
|
|
84
|
+
* @property {string|null} feature - frontmatter `feature:` field, null if absent
|
|
85
|
+
* @property {string|null} date - frontmatter `date:` ISO timestamp
|
|
86
|
+
* @property {string} title - the H1 heading text or fileName fallback
|
|
87
|
+
* @property {string} bodyHash - first 8 chars of content sha — cheap dedup key
|
|
88
|
+
*/
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* @typedef {Object} ClassificationDecision
|
|
92
|
+
* @property {'feature'|'platform'|'unassigned'} destination
|
|
93
|
+
* @property {string=} featureId - F-NNN if destination === 'feature'
|
|
94
|
+
* @property {string=} topic - kebab-case topic for the destination file
|
|
95
|
+
* @property {number} confidence - 0..1
|
|
96
|
+
* @property {string[]} reasons - human-readable signal trace
|
|
97
|
+
* @property {Array<{featureId?: string, topic?: string, confidence: number, reason: string}>=} candidates - top-3 alternatives for ambiguity prompt
|
|
98
|
+
*/
|
|
99
|
+
|
|
100
|
+
/**
|
|
101
|
+
* @typedef {Object} ClassifierContext
|
|
102
|
+
* @property {Array<{id: string, title: string, files: string[], subApp?: string|null}>} features - from FEATURE-MAP.md
|
|
103
|
+
* @property {Map<string, string>} fileToFeatureId - reverse-index: repo-relative path → F-NNN (FEATURE-MAP key_files)
|
|
104
|
+
* @property {Map<string, string>=} sourceFileToFeatureId - F-077/AC-8: reverse-index from source-code @cap-feature tags → F-NNN. Built by scanning the project for @cap-feature(feature:F-XXX) tags. Falls back when FEATURE-MAP key_files lists are sparse or missing.
|
|
105
|
+
* @property {Map<string, {state: string, transitionAt: string|null}>} featureState - F-NNN → last-known state-transition info (for snapshot date heuristic)
|
|
106
|
+
* @property {Map<string, string>=} featureToSubApp - F-082: F-NNN → sub-app slug (e.g. "web", "api", "shared"). Empty when project is not a monorepo.
|
|
107
|
+
* @property {Map<string, number>=} titlePrefixCounts - F-077/D7: title-prefix slug → occurrence count
|
|
108
|
+
*/
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* @typedef {Object} PlannedWrite
|
|
112
|
+
* @property {string} destinationPath - absolute path
|
|
113
|
+
* @property {string} destinationKind - 'feature' | 'platform'
|
|
114
|
+
* @property {string=} featureId
|
|
115
|
+
* @property {string=} topic
|
|
116
|
+
* @property {V5Entry[]} decisions
|
|
117
|
+
* @property {V5Entry[]} pitfalls
|
|
118
|
+
* @property {V5Snapshot[]} snapshots
|
|
119
|
+
*/
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* @typedef {Object} MigrationPlan
|
|
123
|
+
* @property {Object<string, number>} sourceCounts - { 'decisions.md': N, ... }
|
|
124
|
+
* @property {Object<string, number>} sourceSizes - { 'decisions.md': bytes, ... }
|
|
125
|
+
* @property {PlannedWrite[]} writes - one entry per output file
|
|
126
|
+
* @property {Array<{entry: V5Entry|V5Snapshot, decision: ClassificationDecision, kind: string}>} ambiguous - confidence < threshold
|
|
127
|
+
* @property {Array<{entry: V5Entry|V5Snapshot, kind: string}>} unassigned - confidence 0
|
|
128
|
+
* @property {Array<{from: string, to: string, exists: boolean}>} backups
|
|
129
|
+
* @property {string[]} parseErrors
|
|
130
|
+
*/
|
|
131
|
+
|
|
132
|
+
/**
|
|
133
|
+
* @typedef {Object} MigrationOptions
|
|
134
|
+
* @property {boolean=} dryRun - default true
|
|
135
|
+
* @property {boolean=} apply - default false
|
|
136
|
+
* @property {boolean=} interactive - default true
|
|
137
|
+
* @property {number=} now - epoch millis for date snapshots; default Date.now()
|
|
138
|
+
* @property {(prompt: string) => Promise<string>=} promptFn - test injection point
|
|
139
|
+
* @property {(prompt: string) => boolean=} confirmFn - test injection point for the apply confirm
|
|
140
|
+
* @property {(line: string) => void=} log - stderr writer; default console.error
|
|
141
|
+
* @property {Array<{choice: string}>=} _testPromptResponses - canned responses for tests, consumed in order
|
|
142
|
+
*/
|
|
143
|
+
|
|
144
|
+
/**
|
|
145
|
+
* @typedef {Object} MigrationResult
|
|
146
|
+
* @property {boolean} dryRun
|
|
147
|
+
* @property {MigrationPlan} plan
|
|
148
|
+
* @property {Object|null} report - rendered report metadata after --apply, else null
|
|
149
|
+
* @property {string[]} errors
|
|
150
|
+
* @property {string[]} wroteFiles
|
|
151
|
+
* @property {string[]} backups
|
|
152
|
+
* @property {number} exitCode - 0 success, 1 error, 2 user-quit
|
|
153
|
+
*/
|
|
154
|
+
|
|
155
|
+
// -------- Public API --------
|
|
156
|
+
|
|
157
|
+
// @cap-todo(ac:F-077/AC-4) Default options: dryRun=true, apply=false, interactive=true.
|
|
158
|
+
// --apply switches dryRun to false; the actual writes are gated on apply.
|
|
159
|
+
/**
|
|
160
|
+
* One-shot migration entry point.
|
|
161
|
+
* @param {string} projectRoot
|
|
162
|
+
* @param {MigrationOptions=} options
|
|
163
|
+
* @returns {Promise<MigrationResult>}
|
|
164
|
+
*/
|
|
165
|
+
async function migrateMemory(projectRoot, options) {
|
|
166
|
+
const opts = _normalizeOptions(options);
|
|
167
|
+
const result = /** @type {MigrationResult} */ ({
|
|
168
|
+
dryRun: !opts.apply,
|
|
169
|
+
plan: /** @type {MigrationPlan} */ ({ sourceCounts: {}, sourceSizes: {}, writes: [], ambiguous: [], unassigned: [], backups: [], parseErrors: [] }),
|
|
170
|
+
report: null,
|
|
171
|
+
errors: [],
|
|
172
|
+
wroteFiles: [],
|
|
173
|
+
backups: [],
|
|
174
|
+
exitCode: 0,
|
|
175
|
+
});
|
|
176
|
+
|
|
177
|
+
if (typeof projectRoot !== 'string' || projectRoot.length === 0) {
|
|
178
|
+
result.errors.push('projectRoot must be a non-empty string');
|
|
179
|
+
result.exitCode = 1;
|
|
180
|
+
return result;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
// 1. Build classifier context (FEATURE-MAP key_files index).
|
|
184
|
+
let context;
|
|
185
|
+
try {
|
|
186
|
+
context = buildClassifierContext(projectRoot);
|
|
187
|
+
} catch (e) {
|
|
188
|
+
// FEATURE-MAP missing — degrade to no-key-files heuristic, but continue.
|
|
189
|
+
result.errors.push(`feature-map context unavailable: ${e && e.message ? e.message : String(e)}`);
|
|
190
|
+
context = { features: [], fileToFeatureId: new Map(), featureState: new Map() };
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
// 2. Build the migration plan (no IO writes). This is the dry-run output.
|
|
194
|
+
const plan = buildMigrationPlan(projectRoot, context, opts);
|
|
195
|
+
result.plan = plan;
|
|
196
|
+
|
|
197
|
+
// 3. Dry-run: log + return.
|
|
198
|
+
if (!opts.apply) {
|
|
199
|
+
_emitDryRunReport(plan, opts.log);
|
|
200
|
+
return result;
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
// 4. Apply path: confirm prompt (when interactive) + ambiguity resolution + atomic writes.
|
|
204
|
+
if (opts.interactive) {
|
|
205
|
+
const ok = await _confirmApply(plan, opts);
|
|
206
|
+
if (!ok) {
|
|
207
|
+
result.errors.push('user declined apply');
|
|
208
|
+
result.exitCode = 2;
|
|
209
|
+
return result;
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
// 5. Resolve ambiguous entries by prompting (or auto-routing if non-interactive).
|
|
214
|
+
let resolved;
|
|
215
|
+
try {
|
|
216
|
+
resolved = await resolveAmbiguities(plan, opts);
|
|
217
|
+
} catch (e) {
|
|
218
|
+
if (e && e.code === 'USER_QUIT') {
|
|
219
|
+
result.errors.push('user quit migration');
|
|
220
|
+
result.exitCode = 2;
|
|
221
|
+
return result;
|
|
222
|
+
}
|
|
223
|
+
result.errors.push(`ambiguity resolution failed: ${e && e.message ? e.message : String(e)}`);
|
|
224
|
+
result.exitCode = 1;
|
|
225
|
+
return result;
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
// 6. Backups (idempotent on same date).
|
|
229
|
+
const backupDate = _isoDate(opts.now);
|
|
230
|
+
for (const backup of resolved.backups) {
|
|
231
|
+
try {
|
|
232
|
+
const wrote = _writeBackup(backup.from, backup.to);
|
|
233
|
+
if (wrote) result.backups.push(backup.to);
|
|
234
|
+
} catch (e) {
|
|
235
|
+
result.errors.push(`backup failed for ${backup.from}: ${e && e.message ? e.message : String(e)}`);
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
// 7. Atomic writes — features and platform files.
|
|
240
|
+
// @cap-decision(F-077/D6) Use plan.sourceMaxMtime (not opts.now) for the V6 `updated:` field so
|
|
241
|
+
// re-running migrate against unchanged V5 sources produces byte-identical output.
|
|
242
|
+
// Fall back to opts.now only if there were no source files (rare — unit tests).
|
|
243
|
+
const updatedTimestamp = plan.sourceMaxMtime || opts.now;
|
|
244
|
+
for (const write of resolved.writes) {
|
|
245
|
+
try {
|
|
246
|
+
const ok = _writePlannedFile(write, updatedTimestamp);
|
|
247
|
+
if (ok) result.wroteFiles.push(write.destinationPath);
|
|
248
|
+
else result.errors.push(`atomic write failed for ${write.destinationPath}`);
|
|
249
|
+
} catch (e) {
|
|
250
|
+
result.errors.push(`write error for ${write.destinationPath}: ${e && e.message ? e.message : String(e)}`);
|
|
251
|
+
}
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
// 8. Migration report.
|
|
255
|
+
const reportPath = path.join(projectRoot, BACKUP_DIR, `${MIGRATION_REPORT_PREFIX}-${backupDate}.md`);
|
|
256
|
+
const reportData = _buildReportData(projectRoot, resolved, result, opts);
|
|
257
|
+
try {
|
|
258
|
+
_atomicWriteFile(reportPath, _renderReport(reportData));
|
|
259
|
+
result.report = reportData;
|
|
260
|
+
} catch (e) {
|
|
261
|
+
result.errors.push(`report write failed: ${e && e.message ? e.message : String(e)}`);
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
if (result.errors.length > 0) result.exitCode = 1;
|
|
265
|
+
return result;
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
// @cap-todo(ac:F-077/AC-1) buildMigrationPlan parses all V5 sources and routes each entry/snapshot
|
|
269
|
+
// via classifyEntry. Pure planning step — no fs writes.
|
|
270
|
+
/**
|
|
271
|
+
* Build a migration plan from disk state. Pure computation — only fs reads.
|
|
272
|
+
* @param {string} projectRoot
|
|
273
|
+
* @param {ClassifierContext} context
|
|
274
|
+
* @param {MigrationOptions=} options
|
|
275
|
+
* @returns {MigrationPlan}
|
|
276
|
+
*/
|
|
277
|
+
function buildMigrationPlan(projectRoot, context, options) {
|
|
278
|
+
const opts = _normalizeOptions(options);
|
|
279
|
+
const memoryDir = path.join(projectRoot, '.cap', 'memory');
|
|
280
|
+
const featuresDir = path.join(memoryDir, 'features');
|
|
281
|
+
const platformDir = path.join(memoryDir, 'platform');
|
|
282
|
+
const snapshotsDir = path.join(projectRoot, '.cap', 'snapshots');
|
|
283
|
+
|
|
284
|
+
/** @type {MigrationPlan} */
|
|
285
|
+
const plan = {
|
|
286
|
+
sourceCounts: {},
|
|
287
|
+
sourceSizes: {},
|
|
288
|
+
sourceMaxMtime: 0, // F-077/D6: max(mtimeMs) across V5 sources, used as `updated:` for V6 files
|
|
289
|
+
writes: [],
|
|
290
|
+
ambiguous: [],
|
|
291
|
+
unassigned: [],
|
|
292
|
+
backups: [],
|
|
293
|
+
parseErrors: [],
|
|
294
|
+
};
|
|
295
|
+
|
|
296
|
+
// 1. Parse the four V5 markdown sources.
|
|
297
|
+
/** @type {V5Entry[]} */
|
|
298
|
+
const allEntries = [];
|
|
299
|
+
for (const sourceName of V5_SOURCES) {
|
|
300
|
+
const fp = path.join(memoryDir, sourceName);
|
|
301
|
+
if (!fs.existsSync(fp)) {
|
|
302
|
+
plan.sourceCounts[sourceName] = 0;
|
|
303
|
+
plan.sourceSizes[sourceName] = 0;
|
|
304
|
+
continue;
|
|
305
|
+
}
|
|
306
|
+
let raw;
|
|
307
|
+
try {
|
|
308
|
+
raw = fs.readFileSync(fp, 'utf8');
|
|
309
|
+
} catch (e) {
|
|
310
|
+
plan.parseErrors.push(`read failed for ${sourceName}: ${e && e.message ? e.message : String(e)}`);
|
|
311
|
+
plan.sourceCounts[sourceName] = 0;
|
|
312
|
+
plan.sourceSizes[sourceName] = 0;
|
|
313
|
+
continue;
|
|
314
|
+
}
|
|
315
|
+
const stat = fs.statSync(fp);
|
|
316
|
+
plan.sourceSizes[sourceName] = stat.size;
|
|
317
|
+
// @cap-decision(F-077/D6) Track max source mtime to derive a stable `updated:` field for V6
|
|
318
|
+
// files. AC-2 says "wiederholtes Ausführen ohne neue Inputs darf keine
|
|
319
|
+
// Diff-Änderungen produzieren" — using Date.now() at write-time would put a
|
|
320
|
+
// fresh ISO timestamp into every regenerated V6 file on every run, even when
|
|
321
|
+
// the V5 sources hadn't changed. Source-mtime makes the timestamp truly a
|
|
322
|
+
// function of the input, not the run.
|
|
323
|
+
if (!plan.sourceMaxMtime || stat.mtimeMs > plan.sourceMaxMtime) {
|
|
324
|
+
plan.sourceMaxMtime = stat.mtimeMs;
|
|
325
|
+
}
|
|
326
|
+
const entries = parseV5MarkdownFile(raw, sourceName);
|
|
327
|
+
plan.sourceCounts[sourceName] = entries.length;
|
|
328
|
+
allEntries.push(...entries);
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
// 2. Parse graph.json for hotspot-style structured nodes when hotspots.md was empty/parsed-empty.
|
|
332
|
+
const graphPath = path.join(memoryDir, 'graph.json');
|
|
333
|
+
if (fs.existsSync(graphPath) && (!plan.sourceCounts['hotspots.md'] || plan.sourceCounts['hotspots.md'] === 0)) {
|
|
334
|
+
try {
|
|
335
|
+
const raw = fs.readFileSync(graphPath, 'utf8');
|
|
336
|
+
const graphEntries = parseGraphJson(raw);
|
|
337
|
+
// graph.json is the canonical source for tagged feature-id metadata when present.
|
|
338
|
+
// Cross-link by anchor id where possible to enrich the markdown-parsed entries.
|
|
339
|
+
_enrichEntriesFromGraph(allEntries, graphEntries);
|
|
340
|
+
// Hotspot-only nodes in graph that have no markdown counterpart get added directly.
|
|
341
|
+
for (const ge of graphEntries.hotspotsWithoutMarkdown) {
|
|
342
|
+
allEntries.push(ge);
|
|
343
|
+
}
|
|
344
|
+
} catch (e) {
|
|
345
|
+
plan.parseErrors.push(`graph.json parse: ${e && e.message ? e.message : String(e)}`);
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
// 3. Parse snapshots.
|
|
350
|
+
/** @type {V5Snapshot[]} */
|
|
351
|
+
const snapshots = [];
|
|
352
|
+
if (fs.existsSync(snapshotsDir)) {
|
|
353
|
+
let names = [];
|
|
354
|
+
try {
|
|
355
|
+
names = fs.readdirSync(snapshotsDir);
|
|
356
|
+
} catch (_e) {
|
|
357
|
+
// ignore
|
|
358
|
+
}
|
|
359
|
+
for (const name of names) {
|
|
360
|
+
if (!name.endsWith('.md')) continue;
|
|
361
|
+
const fp = path.join(snapshotsDir, name);
|
|
362
|
+
let raw;
|
|
363
|
+
try { raw = fs.readFileSync(fp, 'utf8'); } catch (_e) { continue; }
|
|
364
|
+
try {
|
|
365
|
+
snapshots.push(parseSnapshot(name, fp, raw));
|
|
366
|
+
} catch (e) {
|
|
367
|
+
plan.parseErrors.push(`snapshot parse ${name}: ${e && e.message ? e.message : String(e)}`);
|
|
368
|
+
}
|
|
369
|
+
}
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
// 4. Classify every entry + snapshot.
|
|
373
|
+
// @cap-decision(F-077/D7) Title-prefix heuristic uses a two-pass: count prefix occurrences first,
|
|
374
|
+
// then promote only prefixes with ≥ TITLE_PREFIX_MIN_OCCURRENCES entries. Single-
|
|
375
|
+
// occurrence prefixes are noise (sentences starting with "Select:", "Update:",
|
|
376
|
+
// "Migration 067:" etc.) and would otherwise produce a swarm of 1-2-entry
|
|
377
|
+
// platform files. Real-world: GoetzeInvest pre-threshold produced 130 platform
|
|
378
|
+
// files with mostly junk; threshold-5 yields ~5-8 meaningful app-buckets.
|
|
379
|
+
context.titlePrefixCounts = _countTitlePrefixes(allEntries);
|
|
380
|
+
|
|
381
|
+
/** @type {Map<string, PlannedWrite>} */
|
|
382
|
+
const writeIndex = new Map();
|
|
383
|
+
const ensureWrite = (key, build) => {
|
|
384
|
+
if (writeIndex.has(key)) return /** @type {PlannedWrite} */ (writeIndex.get(key));
|
|
385
|
+
const w = build();
|
|
386
|
+
writeIndex.set(key, w);
|
|
387
|
+
plan.writes.push(w);
|
|
388
|
+
return w;
|
|
389
|
+
};
|
|
390
|
+
|
|
391
|
+
for (const entry of allEntries) {
|
|
392
|
+
const decision = classifyEntry(entry, context);
|
|
393
|
+
if (decision.confidence < CONFIDENCE_AUTO_THRESHOLD && decision.destination !== 'unassigned') {
|
|
394
|
+
// Hold for ambiguity resolution. The "primary" decision is captured but the actual
|
|
395
|
+
// destination is deferred until resolveAmbiguities pickes a candidate.
|
|
396
|
+
plan.ambiguous.push({ entry, decision, kind: entry.kind });
|
|
397
|
+
continue;
|
|
398
|
+
}
|
|
399
|
+
_routeEntryToWrite(entry, decision, projectRoot, featuresDir, platformDir, ensureWrite, plan);
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
// Snapshots — separate classifier (frontmatter feature wins → date heuristic → keyword).
|
|
403
|
+
for (const snap of snapshots) {
|
|
404
|
+
const decision = classifySnapshot(snap, context);
|
|
405
|
+
if (decision.confidence < CONFIDENCE_AUTO_THRESHOLD && decision.destination !== 'unassigned') {
|
|
406
|
+
plan.ambiguous.push({ entry: snap, decision, kind: 'snapshot' });
|
|
407
|
+
continue;
|
|
408
|
+
}
|
|
409
|
+
_routeSnapshotToWrite(snap, decision, projectRoot, featuresDir, platformDir, ensureWrite, plan);
|
|
410
|
+
}
|
|
411
|
+
|
|
412
|
+
// 5. Backups — only files that exist get backed up.
|
|
413
|
+
const backupDate = _isoDate(opts.now);
|
|
414
|
+
for (const sourceName of [...V5_SOURCES, ...V5_BINARY_SOURCES]) {
|
|
415
|
+
const from = path.join(memoryDir, sourceName);
|
|
416
|
+
if (!fs.existsSync(from)) continue;
|
|
417
|
+
const ext = path.extname(sourceName);
|
|
418
|
+
const stem = sourceName.slice(0, sourceName.length - ext.length);
|
|
419
|
+
const backupName = `${stem}-pre-v6-${backupDate}${ext}`;
|
|
420
|
+
const to = path.join(projectRoot, BACKUP_DIR, backupName);
|
|
421
|
+
plan.backups.push({ from, to, exists: fs.existsSync(to) });
|
|
422
|
+
}
|
|
423
|
+
|
|
424
|
+
// 6. Sort writes for deterministic output (helps tests, idempotency).
|
|
425
|
+
plan.writes.sort((a, b) => a.destinationPath.localeCompare(b.destinationPath));
|
|
426
|
+
for (const w of plan.writes) {
|
|
427
|
+
w.decisions.sort(_compareEntriesByText);
|
|
428
|
+
w.pitfalls.sort(_compareEntriesByText);
|
|
429
|
+
w.snapshots.sort((a, b) => a.fileName.localeCompare(b.fileName));
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
return plan;
|
|
433
|
+
}
|
|
434
|
+
|
|
435
|
+
// @cap-todo(ac:F-077/AC-5) classifyEntry — priority order:
|
|
436
|
+
// 1. Tag metadata (feature:F-NNN) — confidence 1.0
|
|
437
|
+
// 2. Tagged platform topic (platform:<topic>) — confidence 1.0
|
|
438
|
+
// 3. Path heuristic against FEATURE-MAP key_files — confidence 0.7
|
|
439
|
+
// 4. F-NNN mention in body text (exactly once) — confidence 0.5
|
|
440
|
+
// 5. No signal — confidence 0.0, route to unassigned.
|
|
441
|
+
/**
|
|
442
|
+
* @param {V5Entry} entry
|
|
443
|
+
* @param {ClassifierContext} context
|
|
444
|
+
* @returns {ClassificationDecision}
|
|
445
|
+
*/
|
|
446
|
+
function classifyEntry(entry, context) {
|
|
447
|
+
const reasons = [];
|
|
448
|
+
|
|
449
|
+
// 1. Tagged feature id (highest signal).
|
|
450
|
+
if (entry.taggedFeatureId) {
|
|
451
|
+
reasons.push(`tag-metadata:${entry.taggedFeatureId}`);
|
|
452
|
+
return {
|
|
453
|
+
destination: 'feature',
|
|
454
|
+
featureId: entry.taggedFeatureId,
|
|
455
|
+
topic: _topicForFeature(entry.taggedFeatureId, context),
|
|
456
|
+
confidence: 1.0,
|
|
457
|
+
reasons,
|
|
458
|
+
};
|
|
459
|
+
}
|
|
460
|
+
|
|
461
|
+
// 2. Tagged platform topic.
|
|
462
|
+
if (entry.taggedPlatformTopic) {
|
|
463
|
+
reasons.push(`platform-tag:${entry.taggedPlatformTopic}`);
|
|
464
|
+
return {
|
|
465
|
+
destination: 'platform',
|
|
466
|
+
topic: entry.taggedPlatformTopic,
|
|
467
|
+
confidence: 1.0,
|
|
468
|
+
reasons,
|
|
469
|
+
};
|
|
470
|
+
}
|
|
471
|
+
|
|
472
|
+
// 3. Path heuristic — match relatedFiles against FEATURE-MAP key_files.
|
|
473
|
+
const matches = new Map(); // featureId -> hit count
|
|
474
|
+
// @cap-todo(ac:F-082/AC-4) Track sub-app boosts separately so a strong file-list hit
|
|
475
|
+
// (3+ files for one feature, no sub-app match) still wins over a sub-app-only nudge.
|
|
476
|
+
const subAppBoosts = new Map(); // featureId -> boost score (0..1)
|
|
477
|
+
/** @type {Set<string>} sub-apps inferred from this entry's files */
|
|
478
|
+
const entrySubApps = new Set();
|
|
479
|
+
for (const f of entry.relatedFiles || []) {
|
|
480
|
+
const normalized = _normalizeRepoPath(f);
|
|
481
|
+
const fid = context.fileToFeatureId.get(normalized);
|
|
482
|
+
if (fid) {
|
|
483
|
+
matches.set(fid, (matches.get(fid) || 0) + 1);
|
|
484
|
+
}
|
|
485
|
+
// @cap-decision(F-082/AC-4) Match `apps/<sub>/...` AND `packages/<sub>/...` prefixes.
|
|
486
|
+
// Single-segment top-level paths (e.g. `web/foo.ts`) deliberately NOT matched — those
|
|
487
|
+
// are too ambiguous to attribute without explicit user opt-in.
|
|
488
|
+
const segMatch = normalized.match(/^(?:apps|packages)\/([^/]+)\//);
|
|
489
|
+
if (segMatch) entrySubApps.add(segMatch[1]);
|
|
490
|
+
}
|
|
491
|
+
// @cap-todo(ac:F-082/AC-4) Sub-app prefix boost. For each feature with `metadata.subApp`
|
|
492
|
+
// matching one of the inferred sub-apps from this entry's files, add a soft boost.
|
|
493
|
+
// The boost is `+0.5` per matching feature/sub-app pair. The single-match calibration
|
|
494
|
+
// below caps the resulting confidence at 0.55 when there is NO file-list hit — so a
|
|
495
|
+
// subApp-only nudge stays below the auto-threshold and prompts the user. With at least
|
|
496
|
+
// one file-list hit, the boost feeds into `0.7 + boost * 0.2` (capped at 0.95).
|
|
497
|
+
// @cap-decision(F-082/iter1 warn:4) Doc/code drift fix. Pre-iter1 comment claimed the boost
|
|
498
|
+
// "caps at +0.3" but the code uses +0.5. Updated text to match actual behavior — the
|
|
499
|
+
// confidence cap (0.55 for subApp-only) is what bounds the user-visible signal, not the
|
|
500
|
+
// raw additive cap.
|
|
501
|
+
// @cap-decision(F-082/AC-4) Boost is additive to the file-list hit count (each matching
|
|
502
|
+
// feature/sub-app pair adds 0.5 to the combined score), NOT a replacement. Real file-list
|
|
503
|
+
// hits still dominate. A feature with 3 file-list hits scores 3 + boost; a feature with
|
|
504
|
+
// 0 file-list hits but a matching sub-app scores boost only and is calibrated to 0.55.
|
|
505
|
+
// @cap-risk(F-082/backward-compat) When `featureToSubApp` is empty (non-monorepo project),
|
|
506
|
+
// this loop is a no-op. Existing behavior is unchanged. Test pinned in
|
|
507
|
+
// cap-memory-migrate-monorepo.test.cjs ("no-boost when subApp metadata absent").
|
|
508
|
+
if (context.featureToSubApp && context.featureToSubApp.size > 0 && entrySubApps.size > 0) {
|
|
509
|
+
for (const [fid, sub] of context.featureToSubApp.entries()) {
|
|
510
|
+
if (entrySubApps.has(sub)) {
|
|
511
|
+
// Soft boost: half a file-list hit per matching feature/sub-app pair.
|
|
512
|
+
subAppBoosts.set(fid, (subAppBoosts.get(fid) || 0) + 0.5);
|
|
513
|
+
// Make the feature appear in `matches` even if no file-list hit, so the tie-breaker
|
|
514
|
+
// below considers it. Use a tiny seed (0) so a real hit still beats subApp-only.
|
|
515
|
+
if (!matches.has(fid)) matches.set(fid, 0);
|
|
516
|
+
}
|
|
517
|
+
}
|
|
518
|
+
}
|
|
519
|
+
// Build a combined-score map for selection: file-hits + subApp-boost.
|
|
520
|
+
const combined = new Map();
|
|
521
|
+
for (const [fid, hits] of matches.entries()) {
|
|
522
|
+
combined.set(fid, hits + (subAppBoosts.get(fid) || 0));
|
|
523
|
+
}
|
|
524
|
+
if (combined.size === 1) {
|
|
525
|
+
const [fid] = combined.keys();
|
|
526
|
+
const fileHits = matches.get(fid) || 0;
|
|
527
|
+
const boost = subAppBoosts.get(fid) || 0;
|
|
528
|
+
// @cap-decision(F-082/AC-4) Confidence calibration:
|
|
529
|
+
// - real file-list hit (fileHits >= 1) keeps the legacy 0.7 baseline + small boost.
|
|
530
|
+
// - subApp-only match (fileHits === 0, boost > 0) yields 0.55 — below auto threshold
|
|
531
|
+
// so the user is prompted, but well above the F-NNN-mention 0.5 fallback.
|
|
532
|
+
let confidence = fileHits >= 1 ? Math.min(0.95, 0.7 + boost * 0.2) : 0.55;
|
|
533
|
+
if (boost > 0 && fileHits >= 1) reasons.push(`path-match:${fid}+subapp-boost`);
|
|
534
|
+
else if (boost > 0) reasons.push(`subapp-boost:${fid}`);
|
|
535
|
+
else reasons.push(`path-match:${fid}`);
|
|
536
|
+
return {
|
|
537
|
+
destination: 'feature',
|
|
538
|
+
featureId: fid,
|
|
539
|
+
topic: _topicForFeature(fid, context),
|
|
540
|
+
confidence,
|
|
541
|
+
reasons,
|
|
542
|
+
};
|
|
543
|
+
}
|
|
544
|
+
if (combined.size > 1) {
|
|
545
|
+
// Multiple feature matches — emit as ambiguous with top-3 candidates ranked by combined score.
|
|
546
|
+
const sorted = [...combined.entries()].sort((a, b) => b[1] - a[1]).slice(0, 3);
|
|
547
|
+
reasons.push(`path-match-multi:${sorted.map((s) => s[0]).join(',')}`);
|
|
548
|
+
const topFid = sorted[0][0];
|
|
549
|
+
const topFileHits = matches.get(topFid) || 0;
|
|
550
|
+
const topBoost = subAppBoosts.get(topFid) || 0;
|
|
551
|
+
const candidates = sorted.map(([fid, score]) => {
|
|
552
|
+
const fHits = matches.get(fid) || 0;
|
|
553
|
+
const fBoost = subAppBoosts.get(fid) || 0;
|
|
554
|
+
const reasonParts = [];
|
|
555
|
+
if (fHits > 0) reasonParts.push(`path-match (${fHits} hit${fHits === 1 ? '' : 's'})`);
|
|
556
|
+
if (fBoost > 0) reasonParts.push(`subapp-boost`);
|
|
557
|
+
return {
|
|
558
|
+
featureId: fid,
|
|
559
|
+
topic: _topicForFeature(fid, context),
|
|
560
|
+
confidence: 0.6 + 0.05 * Math.min(score, 4),
|
|
561
|
+
reason: reasonParts.join(' + ') || 'path',
|
|
562
|
+
};
|
|
563
|
+
});
|
|
564
|
+
// @cap-decision(F-082/AC-4) When the runner-up has zero file-hits but the leader has a
|
|
565
|
+
// strong file-hit lead AND matches a sub-app, promote to auto-confidence. This is the
|
|
566
|
+
// key V6.1 lever: GoetzeInvest's `apps/web/src/auth/login.tsx` has both sub-app match
|
|
567
|
+
// AND a file-list hit on F-WEB-AUTH; without the lift it would stay below threshold and
|
|
568
|
+
// demand 1000+ user prompts.
|
|
569
|
+
let confidence = 0.6;
|
|
570
|
+
if (topFileHits >= 1 && topBoost > 0) {
|
|
571
|
+
const runnerUpScore = sorted[1] ? sorted[1][1] : 0;
|
|
572
|
+
const leadScore = sorted[0][1];
|
|
573
|
+
if (leadScore - runnerUpScore >= 0.5) {
|
|
574
|
+
confidence = Math.min(0.85, 0.7 + topBoost * 0.2);
|
|
575
|
+
}
|
|
576
|
+
}
|
|
577
|
+
return {
|
|
578
|
+
destination: 'feature',
|
|
579
|
+
featureId: candidates[0].featureId,
|
|
580
|
+
topic: candidates[0].topic,
|
|
581
|
+
confidence,
|
|
582
|
+
reasons,
|
|
583
|
+
candidates,
|
|
584
|
+
};
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
// @cap-feature(feature:F-077, primary:true) AC-8 — Code-Tag Reverse-Index fallback.
|
|
588
|
+
// When FEATURE-MAP key_files match misses (e.g. hub has 0/183 features with **Files:**
|
|
589
|
+
// sections), check whether any of the entry's relatedFiles has an @cap-feature(feature:F-XXX)
|
|
590
|
+
// tag in source code. Confidence calibration:
|
|
591
|
+
// - Single-feature single-source-file → 0.75 (above auto-threshold 0.7, comparable to a
|
|
592
|
+
// curated key_files match — a file with exactly one @cap-feature tag is as trustworthy
|
|
593
|
+
// as one explicitly listed in FEATURE-MAP)
|
|
594
|
+
// - Multi-source-files agreeing on one feature → 0.85 (stronger signal)
|
|
595
|
+
// - Multi-feature ambiguity → 0.6, surface candidates for prompt
|
|
596
|
+
if (context.sourceFileToFeatureId && context.sourceFileToFeatureId.size > 0) {
|
|
597
|
+
const codeMatches = new Map(); // featureId -> hit count
|
|
598
|
+
for (const f of entry.relatedFiles || []) {
|
|
599
|
+
const normalized = _normalizeRepoPath(f);
|
|
600
|
+
const fid = context.sourceFileToFeatureId.get(normalized);
|
|
601
|
+
if (fid) codeMatches.set(fid, (codeMatches.get(fid) || 0) + 1);
|
|
602
|
+
}
|
|
603
|
+
if (codeMatches.size === 1) {
|
|
604
|
+
const [fid] = codeMatches.keys();
|
|
605
|
+
const hits = codeMatches.get(fid);
|
|
606
|
+
reasons.push(`code-tag-match:${fid}${hits > 1 ? `(${hits})` : ''}`);
|
|
607
|
+
return {
|
|
608
|
+
destination: 'feature',
|
|
609
|
+
featureId: fid,
|
|
610
|
+
topic: _topicForFeature(fid, context),
|
|
611
|
+
confidence: hits >= 2 ? 0.85 : 0.75,
|
|
612
|
+
reasons,
|
|
613
|
+
};
|
|
614
|
+
}
|
|
615
|
+
if (codeMatches.size > 1) {
|
|
616
|
+
const sorted = [...codeMatches.entries()].sort((a, b) => b[1] - a[1]).slice(0, 3);
|
|
617
|
+
reasons.push(`code-tag-multi:${sorted.map((s) => s[0]).join(',')}`);
|
|
618
|
+
const candidates = sorted.map(([fid, hits]) => ({
|
|
619
|
+
featureId: fid,
|
|
620
|
+
topic: _topicForFeature(fid, context),
|
|
621
|
+
confidence: 0.55 + 0.05 * Math.min(hits, 3),
|
|
622
|
+
reason: `code-tag-match (${hits} hit${hits === 1 ? '' : 's'})`,
|
|
623
|
+
}));
|
|
624
|
+
return {
|
|
625
|
+
destination: 'feature',
|
|
626
|
+
featureId: candidates[0].featureId,
|
|
627
|
+
topic: candidates[0].topic,
|
|
628
|
+
confidence: 0.6,
|
|
629
|
+
reasons,
|
|
630
|
+
candidates,
|
|
631
|
+
};
|
|
632
|
+
}
|
|
633
|
+
}
|
|
634
|
+
|
|
635
|
+
// 4. F-NNN mention in body text — exactly one unique id.
|
|
636
|
+
const haystack = `${entry.title}\n${entry.content}\n${entry.dateLabel || ''}`;
|
|
637
|
+
const ids = new Set(_extractFeatureIdsFromText(haystack));
|
|
638
|
+
if (ids.size === 1) {
|
|
639
|
+
const [fid] = ids;
|
|
640
|
+
reasons.push(`text-mention:${fid}`);
|
|
641
|
+
return {
|
|
642
|
+
destination: 'feature',
|
|
643
|
+
featureId: fid,
|
|
644
|
+
topic: _topicForFeature(fid, context),
|
|
645
|
+
confidence: 0.5,
|
|
646
|
+
reasons,
|
|
647
|
+
candidates: [{
|
|
648
|
+
featureId: fid,
|
|
649
|
+
topic: _topicForFeature(fid, context),
|
|
650
|
+
confidence: 0.5,
|
|
651
|
+
reason: 'F-NNN mentioned in body text',
|
|
652
|
+
}],
|
|
653
|
+
};
|
|
654
|
+
}
|
|
655
|
+
if (ids.size > 1) {
|
|
656
|
+
const list = [...ids].slice(0, 3);
|
|
657
|
+
reasons.push(`text-mention-multi:${list.join(',')}`);
|
|
658
|
+
return {
|
|
659
|
+
destination: 'feature',
|
|
660
|
+
featureId: list[0],
|
|
661
|
+
topic: _topicForFeature(list[0], context),
|
|
662
|
+
confidence: 0.4,
|
|
663
|
+
reasons,
|
|
664
|
+
candidates: list.map((fid) => ({
|
|
665
|
+
featureId: fid,
|
|
666
|
+
topic: _topicForFeature(fid, context),
|
|
667
|
+
confidence: 0.4,
|
|
668
|
+
reason: 'F-NNN mentioned in body text (multi)',
|
|
669
|
+
})),
|
|
670
|
+
};
|
|
671
|
+
}
|
|
672
|
+
|
|
673
|
+
// 5. Title-prefix heuristic — last-chance signal before falling back to unassigned.
|
|
674
|
+
// @cap-decision(F-077/D7) Many projects encode the app/sub-feature in a title-prefix convention
|
|
675
|
+
// ("GoetzeBooking: ...", "EasyMail: ...", "Hub: ..."). When tag-metadata,
|
|
676
|
+
// path-match, and F-NNN-mention all miss, a recognizable prefix is still useful
|
|
677
|
+
// signal: route to `platform/prefix-<slug>.md`. Real-world: GoetzeInvest dry-run
|
|
678
|
+
// pre-D7 produced 0 feature files / 1347 unassigned over 1287 V5 entries because
|
|
679
|
+
// the project's FEATURE-MAP uses long-form IDs (F-DEPLOY, F-HUB-AUTH) that
|
|
680
|
+
// cap-feature-map.cjs doesn't parse. Issue #39 tracks the proper multi-format
|
|
681
|
+
// + monorepo support; D7 is the bridge that makes V6.0 useful for that project
|
|
682
|
+
// in the meantime. Threshold-gated: prefix must appear in
|
|
683
|
+
// TITLE_PREFIX_MIN_OCCURRENCES entries (default 5) before it counts as signal
|
|
684
|
+
// — avoids the 130-tiny-files swarm from sentences that incidentally start with
|
|
685
|
+
// a capitalised word + colon (Select:, Update:, Migration 067:, etc.).
|
|
686
|
+
const prefixSlug = _extractTitlePrefixSlug(entry.title);
|
|
687
|
+
if (prefixSlug) {
|
|
688
|
+
const count = context.titlePrefixCounts ? context.titlePrefixCounts.get(prefixSlug) || 0 : 0;
|
|
689
|
+
if (count >= TITLE_PREFIX_MIN_OCCURRENCES) {
|
|
690
|
+
reasons.push(`title-prefix:${prefixSlug}(${count})`);
|
|
691
|
+
return {
|
|
692
|
+
destination: 'platform',
|
|
693
|
+
topic: `prefix-${prefixSlug}`,
|
|
694
|
+
confidence: 0.7,
|
|
695
|
+
reasons,
|
|
696
|
+
};
|
|
697
|
+
}
|
|
698
|
+
}
|
|
699
|
+
|
|
700
|
+
// 6. No signal.
|
|
701
|
+
reasons.push('no-signal');
|
|
702
|
+
return {
|
|
703
|
+
destination: 'unassigned',
|
|
704
|
+
topic: UNASSIGNED_PLATFORM_TOPIC,
|
|
705
|
+
confidence: 0,
|
|
706
|
+
reasons,
|
|
707
|
+
};
|
|
708
|
+
}
|
|
709
|
+
|
|
710
|
+
/**
|
|
711
|
+
* Extract a slugified prefix from a V5 decision/pitfall title using the `<Prefix>:` convention.
|
|
712
|
+
* Returns null when the title doesn't match the convention or when the prefix is too short to be
|
|
713
|
+
* meaningful (< 3 chars after slugification).
|
|
714
|
+
*
|
|
715
|
+
* @cap-decision(F-077/D7) Prefix must start with a letter, be 2-40 chars long, contain only
|
|
716
|
+
* alphanumerics + space + dash, and be followed by exactly one ":". This
|
|
717
|
+
* excludes URLs ("http://"), code patterns ("foo::bar"), and misc colons
|
|
718
|
+
* in normal prose. Slug is kebab-case lowercase, max 40 chars.
|
|
719
|
+
*
|
|
720
|
+
* @param {string} title
|
|
721
|
+
* @returns {string|null}
|
|
722
|
+
*/
|
|
723
|
+
function _extractTitlePrefixSlug(title) {
|
|
724
|
+
if (typeof title !== 'string') return null;
|
|
725
|
+
const m = title.match(/^([A-Za-z][A-Za-z0-9 \-]{1,40}):\s/);
|
|
726
|
+
if (!m) return null;
|
|
727
|
+
const prefix = m[1].trim();
|
|
728
|
+
// Reject prefixes that are obviously not app-names: too short, all-numeric, generic words.
|
|
729
|
+
if (prefix.length < 3) return null;
|
|
730
|
+
const NOISE_PREFIXES = new Set(['todo', 'note', 'fix', 'bug', 'wip', 'tbd', 'fixme', 'xxx']);
|
|
731
|
+
if (NOISE_PREFIXES.has(prefix.toLowerCase())) return null;
|
|
732
|
+
// Slugify: lowercase, replace runs of non-alphanumeric with single dash, strip leading/trailing dashes.
|
|
733
|
+
const slug = prefix.toLowerCase().replace(/[^a-z0-9]+/g, '-').replace(/^-+|-+$/g, '');
|
|
734
|
+
if (slug.length < 3 || slug.length > 40) return null;
|
|
735
|
+
return slug;
|
|
736
|
+
}
|
|
737
|
+
|
|
738
|
+
/**
|
|
739
|
+
* Count title-prefix occurrences across all entries. The classifier's title-prefix step (D7) gates
|
|
740
|
+
* on `count >= TITLE_PREFIX_MIN_OCCURRENCES`, so a single-occurrence prefix doesn't get its own
|
|
741
|
+
* platform file.
|
|
742
|
+
*
|
|
743
|
+
* @param {V5Entry[]} entries
|
|
744
|
+
* @returns {Map<string, number>} slug → occurrence count
|
|
745
|
+
*/
|
|
746
|
+
function _countTitlePrefixes(entries) {
|
|
747
|
+
const counts = new Map();
|
|
748
|
+
for (const e of entries) {
|
|
749
|
+
const slug = _extractTitlePrefixSlug(e.title || '');
|
|
750
|
+
if (!slug) continue;
|
|
751
|
+
counts.set(slug, (counts.get(slug) || 0) + 1);
|
|
752
|
+
}
|
|
753
|
+
return counts;
|
|
754
|
+
}
|
|
755
|
+
|
|
756
|
+
// @cap-todo(ac:F-077/AC-5) classifySnapshot — priority order:
|
|
757
|
+
// 1. Frontmatter `feature:` field — confidence 1.0
|
|
758
|
+
// 2. Date proximity to FEATURE-MAP state-transition (within 24h) — confidence 0.6
|
|
759
|
+
// 3. Title keyword: F-NNN in title — confidence 0.4
|
|
760
|
+
// 4. No signal — confidence 0.0
|
|
761
|
+
/**
|
|
762
|
+
* @param {V5Snapshot} snap
|
|
763
|
+
* @param {ClassifierContext} context
|
|
764
|
+
* @returns {ClassificationDecision}
|
|
765
|
+
*/
|
|
766
|
+
function classifySnapshot(snap, context) {
|
|
767
|
+
const reasons = [];
|
|
768
|
+
|
|
769
|
+
if (snap.feature && /^F-\d{3,}$/.test(snap.feature)) {
|
|
770
|
+
reasons.push(`frontmatter:${snap.feature}`);
|
|
771
|
+
return {
|
|
772
|
+
destination: 'feature',
|
|
773
|
+
featureId: snap.feature,
|
|
774
|
+
topic: _topicForFeature(snap.feature, context),
|
|
775
|
+
confidence: 1.0,
|
|
776
|
+
reasons,
|
|
777
|
+
};
|
|
778
|
+
}
|
|
779
|
+
|
|
780
|
+
// Date-proximity heuristic.
|
|
781
|
+
if (snap.date) {
|
|
782
|
+
const snapTime = new Date(snap.date).getTime();
|
|
783
|
+
if (!Number.isNaN(snapTime)) {
|
|
784
|
+
const candidates = [];
|
|
785
|
+
for (const [fid, info] of context.featureState.entries()) {
|
|
786
|
+
if (!info.transitionAt) continue;
|
|
787
|
+
const t = new Date(info.transitionAt).getTime();
|
|
788
|
+
if (Number.isNaN(t)) continue;
|
|
789
|
+
const dh = Math.abs(snapTime - t) / (1000 * 60 * 60);
|
|
790
|
+
if (dh <= SNAPSHOT_DATE_WINDOW_HOURS) {
|
|
791
|
+
candidates.push({ featureId: fid, dh });
|
|
792
|
+
}
|
|
793
|
+
}
|
|
794
|
+
if (candidates.length === 1) {
|
|
795
|
+
reasons.push(`date-proximity:${candidates[0].featureId}`);
|
|
796
|
+
return {
|
|
797
|
+
destination: 'feature',
|
|
798
|
+
featureId: candidates[0].featureId,
|
|
799
|
+
topic: _topicForFeature(candidates[0].featureId, context),
|
|
800
|
+
confidence: 0.6,
|
|
801
|
+
reasons,
|
|
802
|
+
candidates: candidates.map((c) => ({
|
|
803
|
+
featureId: c.featureId,
|
|
804
|
+
topic: _topicForFeature(c.featureId, context),
|
|
805
|
+
confidence: 0.6,
|
|
806
|
+
reason: `state-transition within ${SNAPSHOT_DATE_WINDOW_HOURS}h`,
|
|
807
|
+
})),
|
|
808
|
+
};
|
|
809
|
+
}
|
|
810
|
+
if (candidates.length > 1) {
|
|
811
|
+
// @cap-decision(F-079/followup) F-079-FIX-D: explicit secondary sort for tie-break determinism.
|
|
812
|
+
// Previously the tie-break for equally-close transitions relied implicitly on Map iteration
|
|
813
|
+
// order + V8's stable sort. The contract worked today but wasn't pinned. If a future refactor
|
|
814
|
+
// switches to a Set or a non-stable sort, the determinism would break silently. The explicit
|
|
815
|
+
// `localeCompare` on featureId locks the contract: equal `dh` → lexicographic featureId order.
|
|
816
|
+
candidates.sort((a, b) => a.dh - b.dh || a.featureId.localeCompare(b.featureId));
|
|
817
|
+
const top = candidates.slice(0, 3);
|
|
818
|
+
reasons.push(`date-proximity-multi:${top.map((c) => c.featureId).join(',')}`);
|
|
819
|
+
return {
|
|
820
|
+
destination: 'feature',
|
|
821
|
+
featureId: top[0].featureId,
|
|
822
|
+
topic: _topicForFeature(top[0].featureId, context),
|
|
823
|
+
confidence: 0.5,
|
|
824
|
+
reasons,
|
|
825
|
+
candidates: top.map((c) => ({
|
|
826
|
+
featureId: c.featureId,
|
|
827
|
+
topic: _topicForFeature(c.featureId, context),
|
|
828
|
+
confidence: 0.5,
|
|
829
|
+
reason: `state-transition within ${SNAPSHOT_DATE_WINDOW_HOURS}h (multi)`,
|
|
830
|
+
})),
|
|
831
|
+
};
|
|
832
|
+
}
|
|
833
|
+
}
|
|
834
|
+
}
|
|
835
|
+
|
|
836
|
+
// Title keyword: F-NNN in title.
|
|
837
|
+
const ids = new Set(_extractFeatureIdsFromText(snap.title));
|
|
838
|
+
if (ids.size === 1) {
|
|
839
|
+
const [fid] = ids;
|
|
840
|
+
reasons.push(`title:${fid}`);
|
|
841
|
+
return {
|
|
842
|
+
destination: 'feature',
|
|
843
|
+
featureId: fid,
|
|
844
|
+
topic: _topicForFeature(fid, context),
|
|
845
|
+
confidence: 0.4,
|
|
846
|
+
reasons,
|
|
847
|
+
candidates: [{
|
|
848
|
+
featureId: fid,
|
|
849
|
+
topic: _topicForFeature(fid, context),
|
|
850
|
+
confidence: 0.4,
|
|
851
|
+
reason: 'F-NNN in snapshot title',
|
|
852
|
+
}],
|
|
853
|
+
};
|
|
854
|
+
}
|
|
855
|
+
|
|
856
|
+
reasons.push('no-signal');
|
|
857
|
+
return {
|
|
858
|
+
destination: 'unassigned',
|
|
859
|
+
topic: UNASSIGNED_SNAPSHOTS_TOPIC,
|
|
860
|
+
confidence: 0,
|
|
861
|
+
reasons,
|
|
862
|
+
};
|
|
863
|
+
}
|
|
864
|
+
|
|
865
|
+
// @cap-todo(ac:F-077/AC-6) resolveAmbiguities — interactive runner. In dry-run we don't reach
|
|
866
|
+
// this codepath; in --apply we either prompt the user or auto-route to
|
|
867
|
+
// the highest-confidence candidate (when interactive=false).
|
|
868
|
+
/**
|
|
869
|
+
* @param {MigrationPlan} plan
|
|
870
|
+
* @param {MigrationOptions} opts
|
|
871
|
+
* @returns {Promise<MigrationPlan>}
|
|
872
|
+
*/
|
|
873
|
+
async function resolveAmbiguities(plan, opts) {
|
|
874
|
+
if (plan.ambiguous.length === 0) return plan;
|
|
875
|
+
|
|
876
|
+
// Non-interactive path: auto-route every ambiguous entry to its top candidate (or unassigned if no
|
|
877
|
+
// candidates were attached).
|
|
878
|
+
if (!opts.interactive) {
|
|
879
|
+
for (const item of plan.ambiguous) {
|
|
880
|
+
_autoResolveAmbiguous(item, plan);
|
|
881
|
+
}
|
|
882
|
+
plan.ambiguous = [];
|
|
883
|
+
return plan;
|
|
884
|
+
}
|
|
885
|
+
|
|
886
|
+
// Interactive path: walk the list one-by-one.
|
|
887
|
+
const total = plan.ambiguous.length;
|
|
888
|
+
let autoMode = false;
|
|
889
|
+
for (let i = 0; i < plan.ambiguous.length; i++) {
|
|
890
|
+
const item = plan.ambiguous[i];
|
|
891
|
+
if (autoMode) {
|
|
892
|
+
_autoResolveAmbiguous(item, plan);
|
|
893
|
+
continue;
|
|
894
|
+
}
|
|
895
|
+
const choice = await _promptAmbiguity(item, i + 1, total, opts);
|
|
896
|
+
if (choice === 'q') {
|
|
897
|
+
const err = new Error('user quit');
|
|
898
|
+
// @ts-ignore custom code
|
|
899
|
+
err.code = 'USER_QUIT';
|
|
900
|
+
throw err;
|
|
901
|
+
}
|
|
902
|
+
if (choice === 'a') {
|
|
903
|
+
autoMode = true;
|
|
904
|
+
_autoResolveAmbiguous(item, plan);
|
|
905
|
+
continue;
|
|
906
|
+
}
|
|
907
|
+
if (choice === 's') {
|
|
908
|
+
_routeAmbiguousToUnassigned(item, plan);
|
|
909
|
+
continue;
|
|
910
|
+
}
|
|
911
|
+
// numeric choice — pick candidate index (1-based)
|
|
912
|
+
// @cap-decision(F-077/D4) Empty input (just Enter) and non-numeric input (parseInt → NaN)
|
|
913
|
+
// must route to unassigned, NOT crash. NaN comparisons always return false,
|
|
914
|
+
// so the bounds check needs an explicit Number.isNaN guard. Pre-fix: empty
|
|
915
|
+
// input on prompt → TypeError on `picked.featureId` → half-applied migration
|
|
916
|
+
// with backups written but no V6 files. Most common UX mistake (user hits
|
|
917
|
+
// Enter without thinking) currently corrupts the migration.
|
|
918
|
+
const idx = parseInt(choice, 10) - 1;
|
|
919
|
+
const candidates = item.decision.candidates || [];
|
|
920
|
+
if (!Number.isInteger(idx) || idx < 0 || idx >= candidates.length) {
|
|
921
|
+
// Invalid / empty / non-numeric input → fallback to skip (route to unassigned)
|
|
922
|
+
_routeAmbiguousToUnassigned(item, plan);
|
|
923
|
+
continue;
|
|
924
|
+
}
|
|
925
|
+
const picked = candidates[idx];
|
|
926
|
+
_routeAmbiguousToCandidate(item, picked, plan);
|
|
927
|
+
}
|
|
928
|
+
// Re-sort writes for determinism.
|
|
929
|
+
plan.writes.sort((a, b) => a.destinationPath.localeCompare(b.destinationPath));
|
|
930
|
+
for (const w of plan.writes) {
|
|
931
|
+
w.decisions.sort(_compareEntriesByText);
|
|
932
|
+
w.pitfalls.sort(_compareEntriesByText);
|
|
933
|
+
w.snapshots.sort((a, b) => a.fileName.localeCompare(b.fileName));
|
|
934
|
+
}
|
|
935
|
+
plan.ambiguous = [];
|
|
936
|
+
return plan;
|
|
937
|
+
}
|
|
938
|
+
|
|
939
|
+
// -------- Internals: V5 markdown parsers --------
|
|
940
|
+
|
|
941
|
+
// @cap-todo(ac:F-077/AC-1) parseV5MarkdownFile — recognizes the pipeline-emitted shape:
|
|
942
|
+
// ### <a id="..."></a>Title text\n
|
|
943
|
+
// - **Date:** ...\n
|
|
944
|
+
// - **Files:** `path1`, `path2`\n
|
|
945
|
+
// - **Confidence:** 0.50\n
|
|
946
|
+
// - **Evidence:** N\n
|
|
947
|
+
// - **Last Seen:** ISO\n
|
|
948
|
+
// - **Pinned:** true (optional)\n
|
|
949
|
+
// Fields beyond the standard set are preserved in `content` for later inspection.
|
|
950
|
+
/**
|
|
951
|
+
* @param {string} content
|
|
952
|
+
* @param {string} sourceFile
|
|
953
|
+
* @returns {V5Entry[]}
|
|
954
|
+
*/
|
|
955
|
+
function parseV5MarkdownFile(content, sourceFile) {
|
|
956
|
+
/** @type {V5Entry[]} */
|
|
957
|
+
const out = [];
|
|
958
|
+
const lines = content.split(/\r?\n/);
|
|
959
|
+
const kindMap = {
|
|
960
|
+
'decisions.md': 'decision',
|
|
961
|
+
'pitfalls.md': 'pitfall',
|
|
962
|
+
'patterns.md': 'pattern',
|
|
963
|
+
'hotspots.md': 'hotspot',
|
|
964
|
+
};
|
|
965
|
+
const kind = kindMap[sourceFile] || 'decision';
|
|
966
|
+
|
|
967
|
+
// Find each H3 entry.
|
|
968
|
+
const headerRe = /^###\s+(?:<a\s+id="([^"]+)"><\/a>)?\s*(.*)$/;
|
|
969
|
+
let i = 0;
|
|
970
|
+
while (i < lines.length) {
|
|
971
|
+
const m = lines[i].match(headerRe);
|
|
972
|
+
if (!m) { i++; continue; }
|
|
973
|
+
const startLine = i + 1; // 1-indexed
|
|
974
|
+
const anchorId = m[1] || '';
|
|
975
|
+
const title = (m[2] || '').trim();
|
|
976
|
+
if (!title && !anchorId) { i++; continue; }
|
|
977
|
+
|
|
978
|
+
// Collect body lines until the next H3 / EOF.
|
|
979
|
+
const bodyLines = [];
|
|
980
|
+
i++;
|
|
981
|
+
while (i < lines.length) {
|
|
982
|
+
if (/^###\s+/.test(lines[i])) break;
|
|
983
|
+
bodyLines.push(lines[i]);
|
|
984
|
+
i++;
|
|
985
|
+
}
|
|
986
|
+
const body = bodyLines.join('\n');
|
|
987
|
+
|
|
988
|
+
// Skip if this is the H1/H2 docstring block (no body fields).
|
|
989
|
+
if (!title) continue;
|
|
990
|
+
|
|
991
|
+
/** @type {V5Entry} */
|
|
992
|
+
const entry = {
|
|
993
|
+
kind: /** @type {any} */ (kind),
|
|
994
|
+
anchorId,
|
|
995
|
+
title,
|
|
996
|
+
content: body,
|
|
997
|
+
sourceFile,
|
|
998
|
+
sourceLine: startLine,
|
|
999
|
+
dateLabel: _extractFieldFromBody(body, 'Date'),
|
|
1000
|
+
relatedFiles: _extractFilesFromBody(body),
|
|
1001
|
+
confidence: _extractConfidenceFromBody(body),
|
|
1002
|
+
lastSeen: _extractFieldFromBody(body, 'Last Seen'),
|
|
1003
|
+
taggedFeatureId: null,
|
|
1004
|
+
taggedPlatformTopic: null,
|
|
1005
|
+
};
|
|
1006
|
+
|
|
1007
|
+
// dateLabel sometimes carries "(F-NNN)" — surface it as taggedFeatureId.
|
|
1008
|
+
const fidFromDate = entry.dateLabel ? _extractFeatureIdsFromText(entry.dateLabel) : [];
|
|
1009
|
+
if (fidFromDate.length === 1) {
|
|
1010
|
+
entry.taggedFeatureId = fidFromDate[0];
|
|
1011
|
+
}
|
|
1012
|
+
|
|
1013
|
+
out.push(entry);
|
|
1014
|
+
}
|
|
1015
|
+
return out;
|
|
1016
|
+
}
|
|
1017
|
+
|
|
1018
|
+
/**
|
|
1019
|
+
* @param {string} body
|
|
1020
|
+
* @param {string} fieldName
|
|
1021
|
+
* @returns {string|null}
|
|
1022
|
+
*/
|
|
1023
|
+
function _extractFieldFromBody(body, fieldName) {
|
|
1024
|
+
const re = new RegExp(`^[-*]\\s*\\*\\*${fieldName}:\\*\\*\\s*(.+?)\\s*$`, 'm');
|
|
1025
|
+
const m = body.match(re);
|
|
1026
|
+
return m ? m[1].trim() : null;
|
|
1027
|
+
}
|
|
1028
|
+
|
|
1029
|
+
/**
|
|
1030
|
+
* Extract repo-relative paths from a "- **Files:** `a.cjs`, `b.cjs`" line.
|
|
1031
|
+
* @param {string} body
|
|
1032
|
+
* @returns {string[]}
|
|
1033
|
+
*/
|
|
1034
|
+
function _extractFilesFromBody(body) {
|
|
1035
|
+
const filesLine = _extractFieldFromBody(body, 'Files');
|
|
1036
|
+
if (!filesLine) return [];
|
|
1037
|
+
const out = [];
|
|
1038
|
+
const re = /`([^`]+)`/g;
|
|
1039
|
+
let m;
|
|
1040
|
+
while ((m = re.exec(filesLine)) !== null) {
|
|
1041
|
+
out.push(m[1].trim());
|
|
1042
|
+
}
|
|
1043
|
+
return out;
|
|
1044
|
+
}
|
|
1045
|
+
|
|
1046
|
+
/**
|
|
1047
|
+
* @param {string} body
|
|
1048
|
+
* @returns {number|null}
|
|
1049
|
+
*/
|
|
1050
|
+
function _extractConfidenceFromBody(body) {
|
|
1051
|
+
const v = _extractFieldFromBody(body, 'Confidence');
|
|
1052
|
+
if (v === null) return null;
|
|
1053
|
+
const n = parseFloat(v);
|
|
1054
|
+
return Number.isFinite(n) ? n : null;
|
|
1055
|
+
}
|
|
1056
|
+
|
|
1057
|
+
/**
|
|
1058
|
+
* @param {string} text
|
|
1059
|
+
* @returns {string[]}
|
|
1060
|
+
*/
|
|
1061
|
+
function _extractFeatureIdsFromText(text) {
|
|
1062
|
+
if (!text) return [];
|
|
1063
|
+
const re = /\bF-\d{3,}\b/g;
|
|
1064
|
+
const out = [];
|
|
1065
|
+
let m;
|
|
1066
|
+
while ((m = re.exec(text)) !== null) out.push(m[0]);
|
|
1067
|
+
return out;
|
|
1068
|
+
}
|
|
1069
|
+
|
|
1070
|
+
// -------- graph.json parsing --------
|
|
1071
|
+
|
|
1072
|
+
/**
|
|
1073
|
+
* @param {string} raw
|
|
1074
|
+
* @returns {{ byAnchor: Map<string, V5Entry>, hotspotsWithoutMarkdown: V5Entry[] }}
|
|
1075
|
+
*/
|
|
1076
|
+
function parseGraphJson(raw) {
|
|
1077
|
+
/** @type {Map<string, V5Entry>} */
|
|
1078
|
+
const byAnchor = new Map();
|
|
1079
|
+
/** @type {V5Entry[]} */
|
|
1080
|
+
const hotspotsWithoutMarkdown = [];
|
|
1081
|
+
let parsed;
|
|
1082
|
+
try {
|
|
1083
|
+
parsed = JSON.parse(raw);
|
|
1084
|
+
} catch (_e) {
|
|
1085
|
+
return { byAnchor, hotspotsWithoutMarkdown };
|
|
1086
|
+
}
|
|
1087
|
+
const nodes = (parsed && parsed.nodes) || {};
|
|
1088
|
+
for (const id of Object.keys(nodes)) {
|
|
1089
|
+
const node = nodes[id];
|
|
1090
|
+
if (!node || typeof node !== 'object') continue;
|
|
1091
|
+
const meta = node.metadata || {};
|
|
1092
|
+
const anchorId = id.replace(/^(decision|pitfall|pattern|hotspot)-/, '');
|
|
1093
|
+
const kind = (id.split('-')[0]);
|
|
1094
|
+
const taggedFeatureId = _firstFeatureIdFromList([
|
|
1095
|
+
meta.feature,
|
|
1096
|
+
meta.featureId,
|
|
1097
|
+
...(Array.isArray(meta.relatedFiles) ? [meta.relatedFiles.join(' ')] : []),
|
|
1098
|
+
]);
|
|
1099
|
+
/** @type {V5Entry} */
|
|
1100
|
+
const entry = {
|
|
1101
|
+
kind: /** @type {any} */ (['decision', 'pitfall', 'pattern', 'hotspot'].includes(kind) ? kind : 'decision'),
|
|
1102
|
+
anchorId,
|
|
1103
|
+
title: node.label || '',
|
|
1104
|
+
content: '',
|
|
1105
|
+
sourceFile: 'graph.json',
|
|
1106
|
+
sourceLine: 0,
|
|
1107
|
+
dateLabel: meta.source || null,
|
|
1108
|
+
relatedFiles: Array.isArray(meta.relatedFiles) ? meta.relatedFiles.slice() : (meta.file ? [meta.file] : []),
|
|
1109
|
+
confidence: typeof meta.confidence === 'number' ? meta.confidence : null,
|
|
1110
|
+
lastSeen: node.updatedAt || node.createdAt || null,
|
|
1111
|
+
taggedFeatureId,
|
|
1112
|
+
taggedPlatformTopic: meta.platform || null,
|
|
1113
|
+
};
|
|
1114
|
+
byAnchor.set(anchorId, entry);
|
|
1115
|
+
if (entry.kind === 'hotspot') hotspotsWithoutMarkdown.push(entry);
|
|
1116
|
+
}
|
|
1117
|
+
return { byAnchor, hotspotsWithoutMarkdown };
|
|
1118
|
+
}
|
|
1119
|
+
|
|
1120
|
+
/**
|
|
1121
|
+
* @param {Array<any>} list
|
|
1122
|
+
* @returns {string|null}
|
|
1123
|
+
*/
|
|
1124
|
+
function _firstFeatureIdFromList(list) {
|
|
1125
|
+
for (const v of list) {
|
|
1126
|
+
if (typeof v !== 'string') continue;
|
|
1127
|
+
const ids = _extractFeatureIdsFromText(v);
|
|
1128
|
+
if (ids.length > 0) return ids[0];
|
|
1129
|
+
}
|
|
1130
|
+
return null;
|
|
1131
|
+
}
|
|
1132
|
+
|
|
1133
|
+
/**
|
|
1134
|
+
* Enrich markdown-parsed entries with metadata from graph.json (tagged feature id, etc).
|
|
1135
|
+
* @param {V5Entry[]} entries
|
|
1136
|
+
* @param {{ byAnchor: Map<string, V5Entry> }} graph
|
|
1137
|
+
*/
|
|
1138
|
+
function _enrichEntriesFromGraph(entries, graph) {
|
|
1139
|
+
for (const entry of entries) {
|
|
1140
|
+
if (!entry.anchorId) continue;
|
|
1141
|
+
const ge = graph.byAnchor.get(entry.anchorId);
|
|
1142
|
+
if (!ge) continue;
|
|
1143
|
+
if (!entry.taggedFeatureId && ge.taggedFeatureId) entry.taggedFeatureId = ge.taggedFeatureId;
|
|
1144
|
+
if (!entry.taggedPlatformTopic && ge.taggedPlatformTopic) entry.taggedPlatformTopic = ge.taggedPlatformTopic;
|
|
1145
|
+
if (!entry.lastSeen && ge.lastSeen) entry.lastSeen = ge.lastSeen;
|
|
1146
|
+
if ((!entry.relatedFiles || entry.relatedFiles.length === 0) && ge.relatedFiles && ge.relatedFiles.length > 0) {
|
|
1147
|
+
entry.relatedFiles = ge.relatedFiles.slice();
|
|
1148
|
+
}
|
|
1149
|
+
}
|
|
1150
|
+
}
|
|
1151
|
+
|
|
1152
|
+
// -------- Snapshot parsing --------
|
|
1153
|
+
|
|
1154
|
+
/**
|
|
1155
|
+
* @param {string} fileName
|
|
1156
|
+
* @param {string} sourcePath
|
|
1157
|
+
* @param {string} raw
|
|
1158
|
+
* @returns {V5Snapshot}
|
|
1159
|
+
*/
|
|
1160
|
+
function parseSnapshot(fileName, sourcePath, raw) {
|
|
1161
|
+
// Front-matter parse — minimal subset: feature, date.
|
|
1162
|
+
let feature = null;
|
|
1163
|
+
let date = null;
|
|
1164
|
+
const fmMatch = raw.match(/^---\r?\n([\s\S]*?)\r?\n---/);
|
|
1165
|
+
if (fmMatch) {
|
|
1166
|
+
const body = fmMatch[1];
|
|
1167
|
+
for (const line of body.split(/\r?\n/)) {
|
|
1168
|
+
const m = line.match(/^([a-zA-Z_][\w-]*):\s*(.*)$/);
|
|
1169
|
+
if (!m) continue;
|
|
1170
|
+
const key = m[1];
|
|
1171
|
+
const val = (m[2] || '').replace(/^["']|["']$/g, '').trim();
|
|
1172
|
+
if (key === 'feature') feature = val;
|
|
1173
|
+
if (key === 'date') date = val;
|
|
1174
|
+
}
|
|
1175
|
+
}
|
|
1176
|
+
// Title: first H1.
|
|
1177
|
+
let title = fileName.replace(/\.md$/, '');
|
|
1178
|
+
const h1Match = raw.match(/^#\s+(.+?)\s*$/m);
|
|
1179
|
+
if (h1Match) title = h1Match[1];
|
|
1180
|
+
|
|
1181
|
+
return {
|
|
1182
|
+
fileName,
|
|
1183
|
+
sourcePath,
|
|
1184
|
+
feature: feature && /^F-\d{3,}$/.test(feature) ? feature : null,
|
|
1185
|
+
date,
|
|
1186
|
+
title,
|
|
1187
|
+
bodyHash: _shortHash(raw),
|
|
1188
|
+
};
|
|
1189
|
+
}
|
|
1190
|
+
|
|
1191
|
+
/**
|
|
1192
|
+
* @param {string} s
|
|
1193
|
+
*/
|
|
1194
|
+
function _shortHash(s) {
|
|
1195
|
+
// Simple FNV-1a 32-bit hash → 8-char hex. Avoids node:crypto for zero-dep purity within the tool.
|
|
1196
|
+
let h = 0x811c9dc5;
|
|
1197
|
+
for (let i = 0; i < s.length; i++) {
|
|
1198
|
+
h ^= s.charCodeAt(i);
|
|
1199
|
+
h = (h + ((h << 1) + (h << 4) + (h << 7) + (h << 8) + (h << 24))) & 0xffffffff;
|
|
1200
|
+
}
|
|
1201
|
+
return ('00000000' + (h >>> 0).toString(16)).slice(-8);
|
|
1202
|
+
}
|
|
1203
|
+
|
|
1204
|
+
// -------- Classifier context --------
|
|
1205
|
+
|
|
1206
|
+
/**
|
|
1207
|
+
* @param {string} projectRoot
|
|
1208
|
+
* @returns {ClassifierContext}
|
|
1209
|
+
*/
|
|
1210
|
+
function buildClassifierContext(projectRoot) {
|
|
1211
|
+
// @cap-todo(ac:F-081/AC-4 iter:2) Migrated to {safe: true} opt-in to preserve CLI on duplicate-ID FEATURE-MAP.
|
|
1212
|
+
// @cap-decision(F-081/iter2) Bail on parseError — do not persist partial enrichment (memory-migrate is a write-back path).
|
|
1213
|
+
const map = readFeatureMap(projectRoot, null, { safe: true });
|
|
1214
|
+
if (map && map.parseError) {
|
|
1215
|
+
console.warn('cap: memory-migrate — duplicate feature ID detected, classifier context uses empty feature set: ' + String(map.parseError.message).trim());
|
|
1216
|
+
return { features: [], fileToFeatureId: new Map(), featureState: new Map(), featureToSubApp: new Map() };
|
|
1217
|
+
}
|
|
1218
|
+
// @cap-todo(ac:F-082/AC-4) Capture the runtime-only `metadata.subApp` propagated by the
|
|
1219
|
+
// F-082 aggregator. The path-heuristic boost below uses this to lift confidence when a
|
|
1220
|
+
// V5 entry's relatedFiles match a feature's sub-app prefix even if the explicit file
|
|
1221
|
+
// listing in the sub-app FEATURE-MAP doesn't include the file.
|
|
1222
|
+
// @cap-todo(ac:F-082/AC-4) Build a sub-app prefix map so features from `apps/web/FEATURE-MAP.md`
|
|
1223
|
+
// whose file list uses sub-app-relative paths (`src/auth/login.tsx`) still match V5 entries
|
|
1224
|
+
// whose `relatedFiles` use repo-absolute paths (`apps/web/src/auth/login.tsx`).
|
|
1225
|
+
// @cap-decision(F-082/AC-4) Need a reverse-index from (subApp slug) → (apps/web | packages/shared
|
|
1226
|
+
// prefix). The aggregator only carries the slug; we recover the full prefix from the Rescoped
|
|
1227
|
+
// Table (parseRescopedTable) so file-list paths can be re-anchored on read.
|
|
1228
|
+
const featureMapModule = require('./cap-feature-map.cjs');
|
|
1229
|
+
/** @type {Map<string, string>} subApp slug -> sub-app relative prefix (e.g. "web" -> "apps/web") */
|
|
1230
|
+
const subAppPrefixes = new Map();
|
|
1231
|
+
try {
|
|
1232
|
+
const rootContent = fs.readFileSync(path.join(projectRoot, 'FEATURE-MAP.md'), 'utf8');
|
|
1233
|
+
for (const entry of featureMapModule.parseRescopedTable(rootContent)) {
|
|
1234
|
+
const slug = entry.appPath.split('/').pop();
|
|
1235
|
+
if (slug) subAppPrefixes.set(slug, entry.appPath);
|
|
1236
|
+
}
|
|
1237
|
+
} catch (_e) {
|
|
1238
|
+
// No root file or unreadable — sub-app prefixes empty; legacy file-list match still works.
|
|
1239
|
+
}
|
|
1240
|
+
// Also include opt-in directory-walk targets if config says discover=auto.
|
|
1241
|
+
try {
|
|
1242
|
+
const cfg = featureMapModule.readCapConfig(projectRoot);
|
|
1243
|
+
if (cfg && cfg.featureMaps && cfg.featureMaps.discover === 'auto') {
|
|
1244
|
+
for (const entry of featureMapModule.discoverSubAppFeatureMaps(projectRoot)) {
|
|
1245
|
+
const slug = entry.appPath.split('/').pop();
|
|
1246
|
+
if (slug && !subAppPrefixes.has(slug)) subAppPrefixes.set(slug, entry.appPath);
|
|
1247
|
+
}
|
|
1248
|
+
}
|
|
1249
|
+
} catch (_e) { /* ignore */ }
|
|
1250
|
+
|
|
1251
|
+
const features = (map.features || []).map((f) => ({
|
|
1252
|
+
id: f.id,
|
|
1253
|
+
title: f.title,
|
|
1254
|
+
files: f.files || [],
|
|
1255
|
+
subApp: f && f.metadata ? f.metadata.subApp || null : null,
|
|
1256
|
+
}));
|
|
1257
|
+
const fileToFeatureId = new Map();
|
|
1258
|
+
const featureState = new Map();
|
|
1259
|
+
/** @type {Map<string, string>} F-NNN -> subApp slug */
|
|
1260
|
+
const featureToSubApp = new Map();
|
|
1261
|
+
for (const f of features) {
|
|
1262
|
+
const prefix = f.subApp ? subAppPrefixes.get(f.subApp) : null;
|
|
1263
|
+
for (const file of f.files) {
|
|
1264
|
+
const normalized = _normalizeRepoPath(file);
|
|
1265
|
+
fileToFeatureId.set(normalized, f.id);
|
|
1266
|
+
// @cap-todo(ac:F-082/AC-4) When the feature comes from a sub-app and its file path is
|
|
1267
|
+
// sub-app-relative (does not start with "apps/" or "packages/"), also index the
|
|
1268
|
+
// prefixed version so V5 entries with repo-absolute paths still match.
|
|
1269
|
+
if (prefix && !/^(apps|packages)\//.test(normalized)) {
|
|
1270
|
+
const prefixed = `${prefix}/${normalized}`;
|
|
1271
|
+
if (!fileToFeatureId.has(prefixed)) fileToFeatureId.set(prefixed, f.id);
|
|
1272
|
+
}
|
|
1273
|
+
}
|
|
1274
|
+
if (f.subApp) featureToSubApp.set(f.id, f.subApp);
|
|
1275
|
+
// featureState — use the lastScan as a proxy since FEATURE-MAP doesn't carry per-feature
|
|
1276
|
+
// transitionAt today. The date-proximity heuristic falls back to lastScan when no per-
|
|
1277
|
+
// feature timestamp is available.
|
|
1278
|
+
featureState.set(f.id, { state: /** @type {any} */ (map.features.find((m) => m.id === f.id) || {}).state || 'planned', transitionAt: map.lastScan || null });
|
|
1279
|
+
}
|
|
1280
|
+
|
|
1281
|
+
// @cap-feature(feature:F-077, primary:true) AC-8 — Code-Tag Reverse-Index. Scan the project's
|
|
1282
|
+
// source-code for @cap-feature(feature:F-XXX) tags and build sourceFileToFeatureId so
|
|
1283
|
+
// classifyEntry has a fallback when FEATURE-MAP key_files lists are sparse or missing
|
|
1284
|
+
// (real-world hub case: 0/183 features have **Files:** sections, so the original key_files
|
|
1285
|
+
// heuristic could never match). Errors degrade gracefully — empty map preserves legacy behavior.
|
|
1286
|
+
let sourceFileToFeatureId = new Map();
|
|
1287
|
+
try {
|
|
1288
|
+
const scanner = require('./cap-tag-scanner.cjs');
|
|
1289
|
+
const tags = scanner.scanDirectory(projectRoot, { projectRoot });
|
|
1290
|
+
for (const t of tags) {
|
|
1291
|
+
if (t.type !== 'feature') continue;
|
|
1292
|
+
const fid = t.metadata && t.metadata.feature ? t.metadata.feature : null;
|
|
1293
|
+
if (!fid || !t.file) continue;
|
|
1294
|
+
const normalized = _normalizeRepoPath(t.file);
|
|
1295
|
+
// Don't override an explicit FEATURE-MAP key_files mapping — that is higher-trust.
|
|
1296
|
+
// First-write-wins for code-tag reverse: if a file has multiple @cap-feature tags
|
|
1297
|
+
// pointing at different features, the first one observed wins. This matches the
|
|
1298
|
+
// "primary" convention where the first @cap-feature in a file is canonical.
|
|
1299
|
+
if (!sourceFileToFeatureId.has(normalized)) {
|
|
1300
|
+
sourceFileToFeatureId.set(normalized, fid);
|
|
1301
|
+
}
|
|
1302
|
+
}
|
|
1303
|
+
} catch (_e) {
|
|
1304
|
+
// Scanner failure is non-fatal — leave map empty so classifier behaves as pre-AC-8.
|
|
1305
|
+
sourceFileToFeatureId = new Map();
|
|
1306
|
+
}
|
|
1307
|
+
|
|
1308
|
+
return { features, fileToFeatureId, featureState, featureToSubApp, sourceFileToFeatureId };
|
|
1309
|
+
}
|
|
1310
|
+
|
|
1311
|
+
/**
|
|
1312
|
+
* @param {string} p
|
|
1313
|
+
*/
|
|
1314
|
+
function _normalizeRepoPath(p) {
|
|
1315
|
+
return String(p || '').replace(/^\.\//, '').replace(/\\/g, '/').replace(/^\.claude\//, '');
|
|
1316
|
+
}
|
|
1317
|
+
|
|
1318
|
+
/**
|
|
1319
|
+
* @param {string} fid
|
|
1320
|
+
* @param {ClassifierContext} ctx
|
|
1321
|
+
* @returns {string}
|
|
1322
|
+
*/
|
|
1323
|
+
function _topicForFeature(fid, ctx) {
|
|
1324
|
+
const f = ctx.features.find((x) => x.id === fid);
|
|
1325
|
+
if (!f) return _slugify(fid);
|
|
1326
|
+
return _slugify(f.title);
|
|
1327
|
+
}
|
|
1328
|
+
|
|
1329
|
+
/**
|
|
1330
|
+
* @param {string} s
|
|
1331
|
+
*/
|
|
1332
|
+
function _slugify(s) {
|
|
1333
|
+
return String(s || '')
|
|
1334
|
+
.toLowerCase()
|
|
1335
|
+
.replace(/^f-\d+\s*[:\-]?\s*/i, '')
|
|
1336
|
+
.replace(/[^a-z0-9]+/g, '-')
|
|
1337
|
+
.replace(/^-+|-+$/g, '')
|
|
1338
|
+
.slice(0, 60) || 'topic';
|
|
1339
|
+
}
|
|
1340
|
+
|
|
1341
|
+
// -------- Routing helpers --------
|
|
1342
|
+
|
|
1343
|
+
/**
|
|
1344
|
+
* @param {V5Entry} entry
|
|
1345
|
+
* @param {ClassificationDecision} decision
|
|
1346
|
+
* @param {string} projectRoot
|
|
1347
|
+
* @param {string} featuresDir
|
|
1348
|
+
* @param {string} platformDir
|
|
1349
|
+
* @param {(key: string, build: () => PlannedWrite) => PlannedWrite} ensureWrite
|
|
1350
|
+
* @param {MigrationPlan} plan
|
|
1351
|
+
*/
|
|
1352
|
+
function _routeEntryToWrite(entry, decision, projectRoot, featuresDir, platformDir, ensureWrite, plan) {
|
|
1353
|
+
if (decision.destination === 'feature' && decision.featureId) {
|
|
1354
|
+
const topic = decision.topic || _slugify(decision.featureId);
|
|
1355
|
+
const dest = path.join(featuresDir, `${decision.featureId}-${topic}.md`);
|
|
1356
|
+
const w = ensureWrite(dest, () => /** @type {PlannedWrite} */ ({
|
|
1357
|
+
destinationPath: dest,
|
|
1358
|
+
destinationKind: 'feature',
|
|
1359
|
+
featureId: decision.featureId,
|
|
1360
|
+
topic,
|
|
1361
|
+
decisions: [],
|
|
1362
|
+
pitfalls: [],
|
|
1363
|
+
snapshots: [],
|
|
1364
|
+
}));
|
|
1365
|
+
if (entry.kind === 'pitfall') w.pitfalls.push(entry);
|
|
1366
|
+
else w.decisions.push(entry);
|
|
1367
|
+
return;
|
|
1368
|
+
}
|
|
1369
|
+
if (decision.destination === 'platform' && decision.topic) {
|
|
1370
|
+
const dest = path.join(platformDir, `${decision.topic}.md`);
|
|
1371
|
+
const w = ensureWrite(dest, () => /** @type {PlannedWrite} */ ({
|
|
1372
|
+
destinationPath: dest,
|
|
1373
|
+
destinationKind: 'platform',
|
|
1374
|
+
topic: decision.topic,
|
|
1375
|
+
decisions: [],
|
|
1376
|
+
pitfalls: [],
|
|
1377
|
+
snapshots: [],
|
|
1378
|
+
}));
|
|
1379
|
+
if (entry.kind === 'pitfall') w.pitfalls.push(entry);
|
|
1380
|
+
else w.decisions.push(entry);
|
|
1381
|
+
return;
|
|
1382
|
+
}
|
|
1383
|
+
// Unassigned bucket.
|
|
1384
|
+
const dest = path.join(platformDir, `${UNASSIGNED_PLATFORM_TOPIC}.md`);
|
|
1385
|
+
const w = ensureWrite(dest, () => /** @type {PlannedWrite} */ ({
|
|
1386
|
+
destinationPath: dest,
|
|
1387
|
+
destinationKind: 'platform',
|
|
1388
|
+
topic: UNASSIGNED_PLATFORM_TOPIC,
|
|
1389
|
+
decisions: [],
|
|
1390
|
+
pitfalls: [],
|
|
1391
|
+
snapshots: [],
|
|
1392
|
+
}));
|
|
1393
|
+
if (entry.kind === 'pitfall') w.pitfalls.push(entry);
|
|
1394
|
+
else w.decisions.push(entry);
|
|
1395
|
+
plan.unassigned.push({ entry, kind: entry.kind });
|
|
1396
|
+
}
|
|
1397
|
+
|
|
1398
|
+
/**
|
|
1399
|
+
* @param {V5Snapshot} snap
|
|
1400
|
+
* @param {ClassificationDecision} decision
|
|
1401
|
+
* @param {string} projectRoot
|
|
1402
|
+
* @param {string} featuresDir
|
|
1403
|
+
* @param {string} platformDir
|
|
1404
|
+
* @param {(key: string, build: () => PlannedWrite) => PlannedWrite} ensureWrite
|
|
1405
|
+
* @param {MigrationPlan} plan
|
|
1406
|
+
*/
|
|
1407
|
+
function _routeSnapshotToWrite(snap, decision, projectRoot, featuresDir, platformDir, ensureWrite, plan) {
|
|
1408
|
+
if (decision.destination === 'feature' && decision.featureId) {
|
|
1409
|
+
const topic = decision.topic || _slugify(decision.featureId);
|
|
1410
|
+
const dest = path.join(featuresDir, `${decision.featureId}-${topic}.md`);
|
|
1411
|
+
const w = ensureWrite(dest, () => /** @type {PlannedWrite} */ ({
|
|
1412
|
+
destinationPath: dest,
|
|
1413
|
+
destinationKind: 'feature',
|
|
1414
|
+
featureId: decision.featureId,
|
|
1415
|
+
topic,
|
|
1416
|
+
decisions: [],
|
|
1417
|
+
pitfalls: [],
|
|
1418
|
+
snapshots: [],
|
|
1419
|
+
}));
|
|
1420
|
+
w.snapshots.push(snap);
|
|
1421
|
+
return;
|
|
1422
|
+
}
|
|
1423
|
+
// Unassigned snapshots bucket.
|
|
1424
|
+
const dest = path.join(platformDir, `${UNASSIGNED_SNAPSHOTS_TOPIC}.md`);
|
|
1425
|
+
const w = ensureWrite(dest, () => /** @type {PlannedWrite} */ ({
|
|
1426
|
+
destinationPath: dest,
|
|
1427
|
+
destinationKind: 'platform',
|
|
1428
|
+
topic: UNASSIGNED_SNAPSHOTS_TOPIC,
|
|
1429
|
+
decisions: [],
|
|
1430
|
+
pitfalls: [],
|
|
1431
|
+
snapshots: [],
|
|
1432
|
+
}));
|
|
1433
|
+
w.snapshots.push(snap);
|
|
1434
|
+
plan.unassigned.push({ entry: snap, kind: 'snapshot' });
|
|
1435
|
+
}
|
|
1436
|
+
|
|
1437
|
+
/**
|
|
1438
|
+
* @param {{ entry: V5Entry|V5Snapshot, decision: ClassificationDecision, kind: string }} item
|
|
1439
|
+
* @param {MigrationPlan} plan
|
|
1440
|
+
*/
|
|
1441
|
+
function _autoResolveAmbiguous(item, plan) {
|
|
1442
|
+
const candidates = item.decision.candidates || [];
|
|
1443
|
+
if (candidates.length === 0) {
|
|
1444
|
+
return _routeAmbiguousToUnassigned(item, plan);
|
|
1445
|
+
}
|
|
1446
|
+
// Pick the highest-confidence candidate.
|
|
1447
|
+
const top = candidates.slice().sort((a, b) => b.confidence - a.confidence)[0];
|
|
1448
|
+
_routeAmbiguousToCandidate(item, top, plan);
|
|
1449
|
+
}
|
|
1450
|
+
|
|
1451
|
+
/**
|
|
1452
|
+
* @param {{ entry: V5Entry|V5Snapshot, decision: ClassificationDecision, kind: string }} item
|
|
1453
|
+
* @param {MigrationPlan} plan
|
|
1454
|
+
*/
|
|
1455
|
+
function _routeAmbiguousToUnassigned(item, plan) {
|
|
1456
|
+
// Re-route through the standard routing helpers using a synthetic 'unassigned' decision.
|
|
1457
|
+
const isSnapshot = item.kind === 'snapshot';
|
|
1458
|
+
const dest = path.join('platform', isSnapshot ? UNASSIGNED_SNAPSHOTS_TOPIC : UNASSIGNED_PLATFORM_TOPIC);
|
|
1459
|
+
// Find / add to writes by destination key.
|
|
1460
|
+
const root = _getProjectRootFromPlan(plan) || process.cwd();
|
|
1461
|
+
const featuresDir = path.join(root, '.cap', 'memory', 'features');
|
|
1462
|
+
const platformDir = path.join(root, '.cap', 'memory', 'platform');
|
|
1463
|
+
const ensureWrite = _makeEnsureWrite(plan);
|
|
1464
|
+
const synth = /** @type {ClassificationDecision} */ ({
|
|
1465
|
+
destination: 'unassigned',
|
|
1466
|
+
topic: isSnapshot ? UNASSIGNED_SNAPSHOTS_TOPIC : UNASSIGNED_PLATFORM_TOPIC,
|
|
1467
|
+
confidence: 0,
|
|
1468
|
+
reasons: ['user-skip'],
|
|
1469
|
+
});
|
|
1470
|
+
if (isSnapshot) {
|
|
1471
|
+
_routeSnapshotToWrite(/** @type {V5Snapshot} */ (item.entry), synth, root, featuresDir, platformDir, ensureWrite, plan);
|
|
1472
|
+
} else {
|
|
1473
|
+
_routeEntryToWrite(/** @type {V5Entry} */ (item.entry), synth, root, featuresDir, platformDir, ensureWrite, plan);
|
|
1474
|
+
}
|
|
1475
|
+
}
|
|
1476
|
+
|
|
1477
|
+
/**
|
|
1478
|
+
* @param {{ entry: V5Entry|V5Snapshot, decision: ClassificationDecision, kind: string }} item
|
|
1479
|
+
* @param {{ featureId?: string, topic?: string, confidence: number, reason: string }} picked
|
|
1480
|
+
* @param {MigrationPlan} plan
|
|
1481
|
+
*/
|
|
1482
|
+
function _routeAmbiguousToCandidate(item, picked, plan) {
|
|
1483
|
+
const root = _getProjectRootFromPlan(plan) || process.cwd();
|
|
1484
|
+
const featuresDir = path.join(root, '.cap', 'memory', 'features');
|
|
1485
|
+
const platformDir = path.join(root, '.cap', 'memory', 'platform');
|
|
1486
|
+
const ensureWrite = _makeEnsureWrite(plan);
|
|
1487
|
+
const synth = /** @type {ClassificationDecision} */ ({
|
|
1488
|
+
destination: picked.featureId ? 'feature' : 'platform',
|
|
1489
|
+
featureId: picked.featureId,
|
|
1490
|
+
topic: picked.topic,
|
|
1491
|
+
confidence: picked.confidence,
|
|
1492
|
+
reasons: ['user-pick'],
|
|
1493
|
+
});
|
|
1494
|
+
if (item.kind === 'snapshot') {
|
|
1495
|
+
_routeSnapshotToWrite(/** @type {V5Snapshot} */ (item.entry), synth, root, featuresDir, platformDir, ensureWrite, plan);
|
|
1496
|
+
} else {
|
|
1497
|
+
_routeEntryToWrite(/** @type {V5Entry} */ (item.entry), synth, root, featuresDir, platformDir, ensureWrite, plan);
|
|
1498
|
+
}
|
|
1499
|
+
}
|
|
1500
|
+
|
|
1501
|
+
/**
|
|
1502
|
+
* Reverse-engineer the project root from an existing planned write — used by ambiguity resolver
|
|
1503
|
+
* when re-routing without re-passing context. Falls back to process.cwd() if no writes exist yet.
|
|
1504
|
+
* @param {MigrationPlan} plan
|
|
1505
|
+
* @returns {string|null}
|
|
1506
|
+
*/
|
|
1507
|
+
function _getProjectRootFromPlan(plan) {
|
|
1508
|
+
if (!plan.writes || plan.writes.length === 0) return null;
|
|
1509
|
+
const w = plan.writes[0];
|
|
1510
|
+
// Strip ".cap/memory/(features|platform)/<file>.md" — three trailing path segments.
|
|
1511
|
+
const dir = path.dirname(w.destinationPath);
|
|
1512
|
+
// dir is e.g. "/abs/.cap/memory/features"; root is dir parent twice.
|
|
1513
|
+
return path.dirname(path.dirname(path.dirname(dir)));
|
|
1514
|
+
}
|
|
1515
|
+
|
|
1516
|
+
/**
|
|
1517
|
+
* @param {MigrationPlan} plan
|
|
1518
|
+
*/
|
|
1519
|
+
function _makeEnsureWrite(plan) {
|
|
1520
|
+
/** @type {Map<string, PlannedWrite>} */
|
|
1521
|
+
const idx = new Map();
|
|
1522
|
+
for (const w of plan.writes) idx.set(w.destinationPath, w);
|
|
1523
|
+
return (key, build) => {
|
|
1524
|
+
if (idx.has(key)) return /** @type {PlannedWrite} */ (idx.get(key));
|
|
1525
|
+
const w = build();
|
|
1526
|
+
idx.set(key, w);
|
|
1527
|
+
plan.writes.push(w);
|
|
1528
|
+
return w;
|
|
1529
|
+
};
|
|
1530
|
+
}
|
|
1531
|
+
|
|
1532
|
+
// -------- Atomic-write contract (AC-2) --------
|
|
1533
|
+
|
|
1534
|
+
// @cap-todo(ac:F-077/AC-2) _atomicWriteFile is the SINGLE choke point for any write into the
|
|
1535
|
+
// .cap/memory/features/ or .cap/memory/platform/ tree (and the report/backup dirs). Mirrors
|
|
1536
|
+
// F-074/D8: writeFileSync to <path>.tmp, then renameSync. Best-effort cleanup on rename failure
|
|
1537
|
+
// so no orphan .tmp lingers in the destination dir.
|
|
1538
|
+
// @cap-risk(F-077/AC-2) This is the atomic-write choke point — every write into the V6 layout
|
|
1539
|
+
// MUST go through this function; bypass it and the migration becomes non-idempotent and a
|
|
1540
|
+
// crash mid-write can leave a partial file that breaks the F-076 schema validator.
|
|
1541
|
+
/**
|
|
1542
|
+
* @param {string} fp
|
|
1543
|
+
* @param {string} content
|
|
1544
|
+
*/
|
|
1545
|
+
function _atomicWriteFile(fp, content) {
|
|
1546
|
+
const dir = path.dirname(fp);
|
|
1547
|
+
fs.mkdirSync(dir, { recursive: true });
|
|
1548
|
+
// Use a stable .tmp suffix (not random). Idempotent on retry: a stale .tmp from a previous
|
|
1549
|
+
// crash would be overwritten by the new write before the rename. The rename itself is the
|
|
1550
|
+
// atomic step. We do NOT use a random suffix because that would generate orphans on each
|
|
1551
|
+
// crash — a stable suffix is better in a one-shot tool.
|
|
1552
|
+
const tmp = `${fp}.tmp`;
|
|
1553
|
+
try {
|
|
1554
|
+
fs.writeFileSync(tmp, content, 'utf8');
|
|
1555
|
+
fs.renameSync(tmp, fp);
|
|
1556
|
+
} catch (e) {
|
|
1557
|
+
// Best-effort cleanup — leave no .tmp orphan.
|
|
1558
|
+
try { fs.unlinkSync(tmp); } catch (_e2) { /* ignore */ }
|
|
1559
|
+
throw e;
|
|
1560
|
+
}
|
|
1561
|
+
}
|
|
1562
|
+
|
|
1563
|
+
/**
|
|
1564
|
+
* @param {PlannedWrite} write
|
|
1565
|
+
* @param {number} now
|
|
1566
|
+
* @returns {boolean}
|
|
1567
|
+
*/
|
|
1568
|
+
function _writePlannedFile(write, now) {
|
|
1569
|
+
const content = renderPlannedWrite(write, now);
|
|
1570
|
+
// If the destination already exists with byte-identical content, skip — the atomic write would
|
|
1571
|
+
// succeed but produce a no-op git diff. Idempotency (AC-2) is asserted at this level: re-runs
|
|
1572
|
+
// over the same input must NOT mutate any file. We can't always know mtimes, so byte-compare.
|
|
1573
|
+
if (fs.existsSync(write.destinationPath)) {
|
|
1574
|
+
try {
|
|
1575
|
+
const existing = fs.readFileSync(write.destinationPath, 'utf8');
|
|
1576
|
+
if (existing === content) return true;
|
|
1577
|
+
} catch (_e) {
|
|
1578
|
+
// fallthrough to write
|
|
1579
|
+
}
|
|
1580
|
+
}
|
|
1581
|
+
_atomicWriteFile(write.destinationPath, content);
|
|
1582
|
+
return true;
|
|
1583
|
+
}
|
|
1584
|
+
|
|
1585
|
+
// @cap-todo(ac:F-077/AC-3) _writeBackup — idempotent on same-day. If destination exists, skip.
|
|
1586
|
+
// Cross-day re-run produces a new dated archive file.
|
|
1587
|
+
// @cap-risk(F-077/AC-3) Backup writes must NEVER overwrite a same-day backup with materially
|
|
1588
|
+
// different content — that would erase audit trail. Implementation: if same-day archive
|
|
1589
|
+
// already exists, treat as already-archived and skip. This is safe because the only path to
|
|
1590
|
+
// "same-day backup exists" is a prior successful migration earlier today.
|
|
1591
|
+
/**
|
|
1592
|
+
* @param {string} from
|
|
1593
|
+
* @param {string} to
|
|
1594
|
+
* @returns {boolean} true if a new backup was written
|
|
1595
|
+
*/
|
|
1596
|
+
function _writeBackup(from, to) {
|
|
1597
|
+
if (!fs.existsSync(from)) return false;
|
|
1598
|
+
if (fs.existsSync(to)) return false; // idempotent same-day skip
|
|
1599
|
+
const content = fs.readFileSync(from, 'utf8');
|
|
1600
|
+
_atomicWriteFile(to, content);
|
|
1601
|
+
return true;
|
|
1602
|
+
}
|
|
1603
|
+
|
|
1604
|
+
// -------- Rendering: PlannedWrite -> markdown content --------
|
|
1605
|
+
|
|
1606
|
+
/**
|
|
1607
|
+
* @param {PlannedWrite} write
|
|
1608
|
+
* @param {number} now
|
|
1609
|
+
* @returns {string}
|
|
1610
|
+
*/
|
|
1611
|
+
function renderPlannedWrite(write, now) {
|
|
1612
|
+
const updated = new Date(now).toISOString();
|
|
1613
|
+
/** @type {Object} */
|
|
1614
|
+
const fm = { updated };
|
|
1615
|
+
if (write.destinationKind === 'feature' && write.featureId) {
|
|
1616
|
+
fm.feature = write.featureId;
|
|
1617
|
+
fm.topic = write.topic || _slugify(write.featureId);
|
|
1618
|
+
// key_files derived from union of related-files across all entries (deduped, sorted).
|
|
1619
|
+
const files = new Set();
|
|
1620
|
+
for (const e of [...write.decisions, ...write.pitfalls]) {
|
|
1621
|
+
for (const f of e.relatedFiles || []) files.add(_normalizeRepoPath(f));
|
|
1622
|
+
}
|
|
1623
|
+
if (files.size > 0) {
|
|
1624
|
+
fm.key_files = [...files].sort();
|
|
1625
|
+
}
|
|
1626
|
+
} else {
|
|
1627
|
+
// Platform topic — synthetic "feature" of the form F-000 is not allowed by the schema
|
|
1628
|
+
// because the schema requires a real F-NNN. We still write the file to .cap/memory/platform/
|
|
1629
|
+
// but its contents are valid V6: the platform layer (F-078) will refine the schema. For now
|
|
1630
|
+
// we use a simplified header without F-NNN — the file is parseable by humans, and F-078
|
|
1631
|
+
// will land a stricter platform schema later.
|
|
1632
|
+
fm.topic = write.topic || 'topic';
|
|
1633
|
+
}
|
|
1634
|
+
|
|
1635
|
+
// Render front-matter manually since we don't always have a full FrontMatter struct.
|
|
1636
|
+
const fmLines = [];
|
|
1637
|
+
fmLines.push('---');
|
|
1638
|
+
if (fm.feature) fmLines.push(`feature: ${fm.feature}`);
|
|
1639
|
+
if (fm.topic) fmLines.push(`topic: ${fm.topic}`);
|
|
1640
|
+
fmLines.push(`updated: ${fm.updated}`);
|
|
1641
|
+
if (fm.key_files && fm.key_files.length > 0) {
|
|
1642
|
+
fmLines.push(`key_files: [${fm.key_files.join(', ')}]`);
|
|
1643
|
+
}
|
|
1644
|
+
fmLines.push('---');
|
|
1645
|
+
const fmText = fmLines.join('\n') + '\n';
|
|
1646
|
+
|
|
1647
|
+
// Title
|
|
1648
|
+
const titleLine = write.destinationKind === 'feature' && write.featureId
|
|
1649
|
+
? `# ${write.featureId}: ${(write.topic || '').replace(/-/g, ' ').replace(/\b\w/g, (c) => c.toUpperCase())}`
|
|
1650
|
+
: `# Platform: ${(write.topic || '').replace(/-/g, ' ').replace(/\b\w/g, (c) => c.toUpperCase())}`;
|
|
1651
|
+
|
|
1652
|
+
// Auto-block (uses F-076 markers).
|
|
1653
|
+
const autoBlock = {
|
|
1654
|
+
decisions: write.decisions.map((e) => ({
|
|
1655
|
+
text: e.title,
|
|
1656
|
+
location: _formatLocation(e),
|
|
1657
|
+
})),
|
|
1658
|
+
pitfalls: write.pitfalls.map((e) => ({
|
|
1659
|
+
text: e.title,
|
|
1660
|
+
location: _formatLocation(e),
|
|
1661
|
+
})),
|
|
1662
|
+
};
|
|
1663
|
+
// Build via schema's serializer for consistency. We construct a minimal
|
|
1664
|
+
// FeatureMemoryFile-shaped object and let serializeFeatureMemoryFile render the auto-block.
|
|
1665
|
+
const file = {
|
|
1666
|
+
frontmatter: write.destinationKind === 'feature'
|
|
1667
|
+
? { feature: write.featureId, topic: fm.topic, updated: fm.updated, key_files: fm.key_files || undefined }
|
|
1668
|
+
: { feature: 'F-000', topic: fm.topic, updated: fm.updated }, // schema requires `feature`; we'll override below
|
|
1669
|
+
autoBlock,
|
|
1670
|
+
manualBlock: { raw: '' },
|
|
1671
|
+
};
|
|
1672
|
+
|
|
1673
|
+
// Render auto-block body via schema's renderer (so markers stay in lock-step with F-076).
|
|
1674
|
+
const autoBody = `${schema.AUTO_BLOCK_START_MARKER}\n${_renderAutoBlockBody(autoBlock)}\n${schema.AUTO_BLOCK_END_MARKER}`;
|
|
1675
|
+
|
|
1676
|
+
// Snapshots section (manual block).
|
|
1677
|
+
const snapshotLines = [];
|
|
1678
|
+
if (write.snapshots.length > 0) {
|
|
1679
|
+
snapshotLines.push('');
|
|
1680
|
+
snapshotLines.push('## Linked Snapshots');
|
|
1681
|
+
snapshotLines.push('');
|
|
1682
|
+
for (const s of write.snapshots) {
|
|
1683
|
+
const dateLabel = s.date ? ` (${s.date})` : '';
|
|
1684
|
+
snapshotLines.push(`- [${s.title}](.cap/snapshots/${s.fileName})${dateLabel}`);
|
|
1685
|
+
}
|
|
1686
|
+
}
|
|
1687
|
+
|
|
1688
|
+
// Lessons placeholder (manual region — empty by default; users fill in by hand).
|
|
1689
|
+
const manualParts = [
|
|
1690
|
+
'',
|
|
1691
|
+
'## Lessons',
|
|
1692
|
+
'',
|
|
1693
|
+
'<!-- Manual lessons go here. The auto-block above is regenerated by the memory pipeline. -->',
|
|
1694
|
+
'',
|
|
1695
|
+
...snapshotLines,
|
|
1696
|
+
];
|
|
1697
|
+
|
|
1698
|
+
// For platform files, drop the `feature:` line from the front-matter — the schema validator
|
|
1699
|
+
// will complain (F-078 will redefine), but the platform writer is allowed to omit it because
|
|
1700
|
+
// platform files are NOT feature files. The header row above already excludes `feature:` for
|
|
1701
|
+
// platform writes via the conditional in fmLines.
|
|
1702
|
+
return `${fmText}\n${titleLine}\n\n${autoBody}\n${manualParts.join('\n')}`;
|
|
1703
|
+
}
|
|
1704
|
+
|
|
1705
|
+
/**
|
|
1706
|
+
* Custom auto-block body renderer — mirrors F-076 schema's renderAutoBlockBody but accepts the
|
|
1707
|
+
* raw decisions/pitfalls items and avoids the trailing-blank-line oddity for empty sections.
|
|
1708
|
+
* @param {{decisions: Array<{text:string,location:string}>, pitfalls: Array<{text:string,location:string}>}} block
|
|
1709
|
+
*/
|
|
1710
|
+
function _renderAutoBlockBody(block) {
|
|
1711
|
+
const parts = [];
|
|
1712
|
+
if (block.decisions.length > 0) {
|
|
1713
|
+
parts.push('## Decisions (from tags)');
|
|
1714
|
+
for (const d of block.decisions) {
|
|
1715
|
+
const loc = d.location ? ` — \`${d.location}\`` : '';
|
|
1716
|
+
parts.push(`- ${d.text}${loc}`);
|
|
1717
|
+
}
|
|
1718
|
+
}
|
|
1719
|
+
if (block.pitfalls.length > 0) {
|
|
1720
|
+
if (parts.length > 0) parts.push('');
|
|
1721
|
+
parts.push('## Pitfalls (from tags)');
|
|
1722
|
+
for (const p of block.pitfalls) {
|
|
1723
|
+
const loc = p.location ? ` — \`${p.location}\`` : '';
|
|
1724
|
+
parts.push(`- ${p.text}${loc}`);
|
|
1725
|
+
}
|
|
1726
|
+
}
|
|
1727
|
+
return parts.length > 0 ? parts.join('\n') : '';
|
|
1728
|
+
}
|
|
1729
|
+
|
|
1730
|
+
/**
|
|
1731
|
+
* @param {V5Entry} entry
|
|
1732
|
+
*/
|
|
1733
|
+
function _formatLocation(entry) {
|
|
1734
|
+
if (entry.relatedFiles && entry.relatedFiles.length > 0) {
|
|
1735
|
+
const f = _normalizeRepoPath(entry.relatedFiles[0]);
|
|
1736
|
+
return entry.sourceLine > 0 ? `${f}:${entry.sourceLine}` : f;
|
|
1737
|
+
}
|
|
1738
|
+
return entry.sourceFile || '';
|
|
1739
|
+
}
|
|
1740
|
+
|
|
1741
|
+
// -------- Dry-run report rendering --------
|
|
1742
|
+
|
|
1743
|
+
/**
|
|
1744
|
+
* @param {MigrationPlan} plan
|
|
1745
|
+
* @param {(line: string) => void} log
|
|
1746
|
+
*/
|
|
1747
|
+
function _emitDryRunReport(plan, log) {
|
|
1748
|
+
const lines = [];
|
|
1749
|
+
lines.push('=== V6 MIGRATION DRY-RUN ===');
|
|
1750
|
+
lines.push('');
|
|
1751
|
+
lines.push('Source files:');
|
|
1752
|
+
for (const sourceName of [...V5_SOURCES, ...V5_BINARY_SOURCES]) {
|
|
1753
|
+
const c = plan.sourceCounts[sourceName] !== undefined ? plan.sourceCounts[sourceName] : 0;
|
|
1754
|
+
const sz = plan.sourceSizes[sourceName] || 0;
|
|
1755
|
+
lines.push(` ${sourceName.padEnd(18)} ${String(c).padStart(5)} entries (${_humanBytes(sz)})`);
|
|
1756
|
+
}
|
|
1757
|
+
lines.push('');
|
|
1758
|
+
lines.push('Backups would be created:');
|
|
1759
|
+
for (const b of plan.backups) {
|
|
1760
|
+
const status = b.exists ? 'skip — already exists' : 'new';
|
|
1761
|
+
lines.push(` ${b.to} (${status})`);
|
|
1762
|
+
}
|
|
1763
|
+
lines.push('');
|
|
1764
|
+
|
|
1765
|
+
// Auto-classified writes.
|
|
1766
|
+
const featureWrites = plan.writes.filter((w) => w.destinationKind === 'feature');
|
|
1767
|
+
const platformWrites = plan.writes.filter((w) => w.destinationKind === 'platform');
|
|
1768
|
+
lines.push('Auto-classified (confidence ≥ 0.7):');
|
|
1769
|
+
for (const w of featureWrites) {
|
|
1770
|
+
lines.push(` → ${path.relative(process.cwd(), w.destinationPath) || w.destinationPath} ${w.decisions.length} decisions, ${w.pitfalls.length} pitfalls, ${w.snapshots.length} snapshots`);
|
|
1771
|
+
}
|
|
1772
|
+
for (const w of platformWrites) {
|
|
1773
|
+
lines.push(` → ${path.relative(process.cwd(), w.destinationPath) || w.destinationPath} ${w.decisions.length} decisions, ${w.pitfalls.length} pitfalls`);
|
|
1774
|
+
}
|
|
1775
|
+
lines.push('');
|
|
1776
|
+
|
|
1777
|
+
if (plan.ambiguous.length > 0) {
|
|
1778
|
+
lines.push('Ambiguous (will need your input on --apply):');
|
|
1779
|
+
lines.push(` ${plan.ambiguous.length} entries with confidence below ${CONFIDENCE_AUTO_THRESHOLD}`);
|
|
1780
|
+
lines.push('');
|
|
1781
|
+
}
|
|
1782
|
+
lines.push('Unassigned (no signal):');
|
|
1783
|
+
lines.push(` ${plan.unassigned.length} entries — will land in .cap/memory/platform/${UNASSIGNED_PLATFORM_TOPIC}.md`);
|
|
1784
|
+
lines.push('');
|
|
1785
|
+
lines.push(`Re-run with --apply to execute. Use --interactive=false to skip ambiguity prompts (ambiguous entries default to highest-confidence candidate).`);
|
|
1786
|
+
lines.push('=== END DRY-RUN ===');
|
|
1787
|
+
for (const l of lines) log(l);
|
|
1788
|
+
}
|
|
1789
|
+
|
|
1790
|
+
/**
|
|
1791
|
+
* @param {number} bytes
|
|
1792
|
+
*/
|
|
1793
|
+
function _humanBytes(bytes) {
|
|
1794
|
+
if (bytes < 1024) return `${bytes} B`;
|
|
1795
|
+
if (bytes < 1024 * 1024) return `${(bytes / 1024).toFixed(1)} KB`;
|
|
1796
|
+
return `${(bytes / (1024 * 1024)).toFixed(1)} MB`;
|
|
1797
|
+
}
|
|
1798
|
+
|
|
1799
|
+
// -------- Migration report (AC-7) --------
|
|
1800
|
+
|
|
1801
|
+
/**
|
|
1802
|
+
* @param {string} projectRoot
|
|
1803
|
+
* @param {MigrationPlan} plan
|
|
1804
|
+
* @param {MigrationResult} result
|
|
1805
|
+
* @param {MigrationOptions} opts
|
|
1806
|
+
*/
|
|
1807
|
+
function _buildReportData(projectRoot, plan, result, opts) {
|
|
1808
|
+
const totalEntries = plan.writes.reduce((acc, w) => acc + w.decisions.length + w.pitfalls.length, 0);
|
|
1809
|
+
const featureWrites = plan.writes.filter((w) => w.destinationKind === 'feature');
|
|
1810
|
+
const platformWrites = plan.writes.filter((w) => w.destinationKind === 'platform');
|
|
1811
|
+
const assigned = featureWrites.reduce((acc, w) => acc + w.decisions.length + w.pitfalls.length, 0);
|
|
1812
|
+
const platform = platformWrites.filter((w) => w.topic !== UNASSIGNED_PLATFORM_TOPIC).reduce((acc, w) => acc + w.decisions.length + w.pitfalls.length, 0);
|
|
1813
|
+
const skipped = platformWrites.filter((w) => w.topic === UNASSIGNED_PLATFORM_TOPIC).reduce((acc, w) => acc + w.decisions.length + w.pitfalls.length, 0);
|
|
1814
|
+
return {
|
|
1815
|
+
date: new Date(opts.now).toISOString(),
|
|
1816
|
+
projectRoot,
|
|
1817
|
+
mode: opts.interactive ? '--apply (interactive)' : '--apply (non-interactive)',
|
|
1818
|
+
counts: {
|
|
1819
|
+
total: totalEntries,
|
|
1820
|
+
assigned,
|
|
1821
|
+
platform,
|
|
1822
|
+
skipped,
|
|
1823
|
+
},
|
|
1824
|
+
writes: {
|
|
1825
|
+
featureFiles: featureWrites.length,
|
|
1826
|
+
platformFiles: platformWrites.length,
|
|
1827
|
+
filenames: result.wroteFiles.slice().sort(),
|
|
1828
|
+
},
|
|
1829
|
+
backups: result.backups.slice().sort(),
|
|
1830
|
+
errors: result.errors.slice(),
|
|
1831
|
+
};
|
|
1832
|
+
}
|
|
1833
|
+
|
|
1834
|
+
/**
|
|
1835
|
+
* @param {ReturnType<typeof _buildReportData>} data
|
|
1836
|
+
*/
|
|
1837
|
+
function _renderReport(data) {
|
|
1838
|
+
const lines = [];
|
|
1839
|
+
lines.push('# V6 Migration Report');
|
|
1840
|
+
lines.push(`Date: ${data.date}`);
|
|
1841
|
+
lines.push(`Project: ${data.projectRoot}`);
|
|
1842
|
+
lines.push(`Mode: ${data.mode}`);
|
|
1843
|
+
lines.push('');
|
|
1844
|
+
lines.push('## Counts');
|
|
1845
|
+
lines.push(`- Total V5 entries processed: ${data.counts.total}`);
|
|
1846
|
+
lines.push(`- Assigned to feature files: ${data.counts.assigned}`);
|
|
1847
|
+
lines.push(`- Routed to platform bucket: ${data.counts.platform}`);
|
|
1848
|
+
lines.push(`- Skipped (unassigned): ${data.counts.skipped}`);
|
|
1849
|
+
lines.push('');
|
|
1850
|
+
lines.push('## Files written');
|
|
1851
|
+
lines.push(`- ${data.writes.featureFiles} feature files at .cap/memory/features/`);
|
|
1852
|
+
lines.push(`- ${data.writes.platformFiles} platform files at .cap/memory/platform/`);
|
|
1853
|
+
if (data.writes.filenames.length > 0) {
|
|
1854
|
+
lines.push('');
|
|
1855
|
+
for (const fn of data.writes.filenames) lines.push(`- ${fn}`);
|
|
1856
|
+
}
|
|
1857
|
+
lines.push('');
|
|
1858
|
+
lines.push('## Backups');
|
|
1859
|
+
if (data.backups.length === 0) {
|
|
1860
|
+
lines.push('- (none)');
|
|
1861
|
+
} else {
|
|
1862
|
+
for (const b of data.backups) lines.push(`- ${b}`);
|
|
1863
|
+
}
|
|
1864
|
+
lines.push('');
|
|
1865
|
+
lines.push('## Errors');
|
|
1866
|
+
if (data.errors.length === 0) lines.push('(none)');
|
|
1867
|
+
else for (const e of data.errors) lines.push(`- ${e}`);
|
|
1868
|
+
lines.push('');
|
|
1869
|
+
return lines.join('\n');
|
|
1870
|
+
}
|
|
1871
|
+
|
|
1872
|
+
// -------- Prompt helpers --------
|
|
1873
|
+
|
|
1874
|
+
/**
|
|
1875
|
+
* @param {{ entry: V5Entry|V5Snapshot, decision: ClassificationDecision, kind: string }} item
|
|
1876
|
+
* @param {number} idx
|
|
1877
|
+
* @param {number} total
|
|
1878
|
+
* @param {MigrationOptions} opts
|
|
1879
|
+
* @returns {Promise<string>}
|
|
1880
|
+
*/
|
|
1881
|
+
async function _promptAmbiguity(item, idx, total, opts) {
|
|
1882
|
+
const titleLine = `[Ambiguity ${idx}/${total}] ${item.kind}: "${(/** @type {any} */ (item.entry)).title || (/** @type {any} */ (item.entry)).fileName || ''}"`;
|
|
1883
|
+
const lines = [titleLine];
|
|
1884
|
+
if (item.kind !== 'snapshot') {
|
|
1885
|
+
const e = /** @type {V5Entry} */ (item.entry);
|
|
1886
|
+
if (e.relatedFiles && e.relatedFiles.length > 0) {
|
|
1887
|
+
lines.push(` Sources: ${e.relatedFiles.join(', ')}`);
|
|
1888
|
+
}
|
|
1889
|
+
}
|
|
1890
|
+
const candidates = item.decision.candidates || [];
|
|
1891
|
+
lines.push(' Top candidates:');
|
|
1892
|
+
if (candidates.length === 0) {
|
|
1893
|
+
lines.push(' (no candidates — will route to unassigned on skip)');
|
|
1894
|
+
} else {
|
|
1895
|
+
candidates.slice(0, 3).forEach((c, i) => {
|
|
1896
|
+
const where = c.featureId ? c.featureId : `platform/${c.topic}`;
|
|
1897
|
+
lines.push(` [${i + 1}] ${where} — confidence ${c.confidence.toFixed(2)}, ${c.reason}`);
|
|
1898
|
+
});
|
|
1899
|
+
}
|
|
1900
|
+
lines.push(' [s] Skip (route to platform/unassigned)');
|
|
1901
|
+
lines.push(' [a] Auto-assign all remaining (confidence-best wins)');
|
|
1902
|
+
lines.push(' [q] Quit migration');
|
|
1903
|
+
const promptText = lines.join('\n') + '\n Choice: ';
|
|
1904
|
+
return _ask(promptText, opts);
|
|
1905
|
+
}
|
|
1906
|
+
|
|
1907
|
+
/**
|
|
1908
|
+
* @param {MigrationPlan} plan
|
|
1909
|
+
* @param {MigrationOptions} opts
|
|
1910
|
+
* @returns {Promise<boolean>}
|
|
1911
|
+
*/
|
|
1912
|
+
async function _confirmApply(plan, opts) {
|
|
1913
|
+
const total = plan.writes.length;
|
|
1914
|
+
const ambiguous = plan.ambiguous.length;
|
|
1915
|
+
const text = `About to write ${total} V6 files (${ambiguous} entries need your input). Proceed? [y/N]: `;
|
|
1916
|
+
const answer = (await _ask(text, opts)).trim().toLowerCase();
|
|
1917
|
+
return answer === 'y' || answer === 'yes';
|
|
1918
|
+
}
|
|
1919
|
+
|
|
1920
|
+
/**
|
|
1921
|
+
* @param {string} text
|
|
1922
|
+
* @param {MigrationOptions} opts
|
|
1923
|
+
* @returns {Promise<string>}
|
|
1924
|
+
*/
|
|
1925
|
+
function _ask(text, opts) {
|
|
1926
|
+
// Test injection: if _testPromptResponses is provided, consume from it.
|
|
1927
|
+
if (Array.isArray(opts._testPromptResponses) && opts._testPromptResponses.length > 0) {
|
|
1928
|
+
const response = opts._testPromptResponses.shift();
|
|
1929
|
+
return Promise.resolve((response && response.choice) || '');
|
|
1930
|
+
}
|
|
1931
|
+
if (opts.promptFn) {
|
|
1932
|
+
return opts.promptFn(text);
|
|
1933
|
+
}
|
|
1934
|
+
// Default readline prompt.
|
|
1935
|
+
// @cap-decision(F-077/D5) On non-TTY stdin (CI, piped input, headless run without
|
|
1936
|
+
// --interactive=false), `'close'` fires before any user keystroke and
|
|
1937
|
+
// rl.question's callback never runs → hang until external SIGKILL. Resolve
|
|
1938
|
+
// to '' on close so EOF behaves like empty input → routes to unassigned via
|
|
1939
|
+
// D4. Promise is idempotent (only first resolve takes effect), so the
|
|
1940
|
+
// question callback in the happy path still wins when input arrives.
|
|
1941
|
+
return new Promise((resolve) => {
|
|
1942
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stderr });
|
|
1943
|
+
rl.on('close', () => resolve(''));
|
|
1944
|
+
rl.question(text, (answer) => {
|
|
1945
|
+
rl.close();
|
|
1946
|
+
resolve(answer);
|
|
1947
|
+
});
|
|
1948
|
+
});
|
|
1949
|
+
}
|
|
1950
|
+
|
|
1951
|
+
// -------- Misc helpers --------
|
|
1952
|
+
|
|
1953
|
+
/**
|
|
1954
|
+
* @param {number} now
|
|
1955
|
+
*/
|
|
1956
|
+
function _isoDate(now) {
|
|
1957
|
+
return new Date(now).toISOString().slice(0, 10);
|
|
1958
|
+
}
|
|
1959
|
+
|
|
1960
|
+
/**
|
|
1961
|
+
* @param {V5Entry} a
|
|
1962
|
+
* @param {V5Entry} b
|
|
1963
|
+
*/
|
|
1964
|
+
function _compareEntriesByText(a, b) {
|
|
1965
|
+
if (a.title !== b.title) return a.title.localeCompare(b.title);
|
|
1966
|
+
return (a.sourceFile || '').localeCompare(b.sourceFile || '') || a.sourceLine - b.sourceLine;
|
|
1967
|
+
}
|
|
1968
|
+
|
|
1969
|
+
/**
|
|
1970
|
+
* @param {MigrationOptions=} options
|
|
1971
|
+
*/
|
|
1972
|
+
function _normalizeOptions(options) {
|
|
1973
|
+
const o = options || {};
|
|
1974
|
+
return {
|
|
1975
|
+
dryRun: o.apply ? false : (o.dryRun !== false),
|
|
1976
|
+
apply: !!o.apply,
|
|
1977
|
+
interactive: o.interactive !== false,
|
|
1978
|
+
now: typeof o.now === 'number' ? o.now : Date.now(),
|
|
1979
|
+
promptFn: o.promptFn,
|
|
1980
|
+
confirmFn: o.confirmFn,
|
|
1981
|
+
log: o.log || ((line) => { try { process.stderr.write(line + '\n'); } catch (_e) { /* ignore */ } }),
|
|
1982
|
+
_testPromptResponses: o._testPromptResponses,
|
|
1983
|
+
};
|
|
1984
|
+
}
|
|
1985
|
+
|
|
1986
|
+
// -------- Exports --------
|
|
1987
|
+
|
|
1988
|
+
module.exports = {
|
|
1989
|
+
// public API
|
|
1990
|
+
migrateMemory,
|
|
1991
|
+
buildMigrationPlan,
|
|
1992
|
+
classifyEntry,
|
|
1993
|
+
classifySnapshot,
|
|
1994
|
+
resolveAmbiguities,
|
|
1995
|
+
buildClassifierContext,
|
|
1996
|
+
// parsers (exported for tests)
|
|
1997
|
+
parseV5MarkdownFile,
|
|
1998
|
+
parseGraphJson,
|
|
1999
|
+
parseSnapshot,
|
|
2000
|
+
// rendering (exported for tests)
|
|
2001
|
+
renderPlannedWrite,
|
|
2002
|
+
// constants
|
|
2003
|
+
CONFIDENCE_AUTO_THRESHOLD,
|
|
2004
|
+
BACKUP_DIR,
|
|
2005
|
+
UNASSIGNED_PLATFORM_TOPIC,
|
|
2006
|
+
UNASSIGNED_SNAPSHOTS_TOPIC,
|
|
2007
|
+
V5_SOURCES,
|
|
2008
|
+
V5_BINARY_SOURCES,
|
|
2009
|
+
// internals (exported for tests only)
|
|
2010
|
+
_atomicWriteFile,
|
|
2011
|
+
_writeBackup,
|
|
2012
|
+
_isoDate,
|
|
2013
|
+
_normalizeRepoPath,
|
|
2014
|
+
_slugify,
|
|
2015
|
+
};
|