@leeovery/claude-technical-workflows 2.1.34 → 2.1.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/README.md +1 -2
  2. package/agents/planning-task-author.md +46 -16
  3. package/agents/review-findings-synthesizer.md +14 -18
  4. package/agents/review-task-verifier.md +3 -4
  5. package/package.json +1 -1
  6. package/skills/begin-implementation/SKILL.md +5 -1
  7. package/skills/begin-planning/SKILL.md +5 -1
  8. package/skills/migrate/SKILL.md +2 -1
  9. package/skills/migrate/scripts/migrate.sh +31 -28
  10. package/skills/migrate/scripts/migrations/001-discussion-frontmatter.sh +1 -15
  11. package/skills/migrate/scripts/migrations/002-specification-frontmatter.sh +0 -14
  12. package/skills/migrate/scripts/migrations/003-planning-frontmatter.sh +0 -14
  13. package/skills/migrate/scripts/migrations/004-sources-object-format.sh +0 -12
  14. package/skills/migrate/scripts/migrations/005-plan-external-deps-frontmatter.sh +0 -12
  15. package/skills/migrate/scripts/migrations/006-directory-restructure.sh +2 -22
  16. package/skills/migrate/scripts/migrations/007-tasks-subdirectory.sh +1 -17
  17. package/skills/migrate/scripts/migrations/008-review-directory-structure.sh +0 -10
  18. package/skills/migrate/scripts/migrations/009-review-per-plan-storage.sh +117 -0
  19. package/skills/migrate/scripts/migrations/010-gitignore-sessions.sh +67 -0
  20. package/skills/start-discussion/SKILL.md +1 -1
  21. package/skills/start-discussion/references/handle-selection.md +1 -1
  22. package/skills/start-discussion/references/research-analysis.md +3 -3
  23. package/skills/start-discussion/scripts/discovery.sh +1 -1
  24. package/skills/start-review/references/display-plans.md +2 -2
  25. package/skills/start-review/references/invoke-skill.md +20 -26
  26. package/skills/start-review/references/select-plans.md +1 -1
  27. package/skills/start-review/scripts/discovery.sh +15 -64
  28. package/skills/start-specification/SKILL.md +1 -1
  29. package/skills/start-specification/references/analysis-flow.md +2 -2
  30. package/skills/start-specification/references/display-analyze.md +1 -1
  31. package/skills/start-specification/references/display-groupings.md +3 -3
  32. package/skills/start-specification/references/display-specs-menu.md +1 -1
  33. package/skills/start-specification/scripts/discovery.sh +1 -1
  34. package/skills/technical-planning/SKILL.md +4 -3
  35. package/skills/technical-planning/references/author-tasks.md +119 -35
  36. package/skills/technical-planning/references/output-formats/tick/about.md +3 -2
  37. package/skills/technical-planning/references/output-formats/tick/graph.md +2 -0
  38. package/skills/technical-planning/references/output-formats/tick/reading.md +2 -0
  39. package/skills/technical-planning/references/plan-construction.md +11 -15
  40. package/skills/technical-planning/references/review-integrity.md +1 -1
  41. package/skills/technical-review/SKILL.md +3 -13
  42. package/skills/technical-review/references/invoke-review-synthesizer.md +3 -3
  43. package/skills/technical-review/references/invoke-task-verifiers.md +5 -12
  44. package/skills/technical-review/references/produce-review.md +2 -4
  45. package/skills/technical-review/references/review-actions-loop.md +14 -22
  46. package/skills/technical-review/references/template.md +1 -21
  47. package/agents/review-product-assessor.md +0 -112
  48. package/skills/technical-review/references/invoke-product-assessor.md +0 -57
package/README.md CHANGED
@@ -235,8 +235,7 @@ docs/workflow/
235
235
  └── {topic}/
236
236
  └── r1/
237
237
  ├── review.md # Review summary and verdict
238
- ├── qa-task-1.md # Per-task QA verification
239
- └── product-assessment.md # Holistic product assessment
238
+ └── qa-task-1.md # Per-task QA verification
240
239
  ```
241
240
 
242
241
  Research starts with `exploration.md` and splits into topic files as themes emerge. From specification onwards, each topic gets its own directory. Planning task storage varies by [output format](#output-formats) — the tree above shows local-markdown; Tick and Linear store tasks externally.
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  name: planning-task-author
3
- description: Writes full detail for a single plan task. Invoked by technical-planning skill during plan construction.
4
- tools: Read, Glob, Grep
3
+ description: Writes full detail for all plan tasks in a phase. Invoked by technical-planning skill during plan construction.
4
+ tools: Read, Glob, Grep, Write
5
5
  model: opus
6
6
  ---
7
7
 
@@ -18,13 +18,12 @@ You receive file paths via the orchestrator's prompt:
18
18
  3. **Cross-cutting spec paths** (if any) — Architectural decisions that influence planning
19
19
  4. **task-design.md** — Task design principles and template
20
20
  5. **All approved phases** — The complete phase structure (from the Plan Index File)
21
- 6. **Task list for current phase** — The approved task table
22
- 7. **Target task** — Which task to author (name, edge cases from the table)
23
- 8. **Output format adapter path** — The output format reference defining the exact file structure
21
+ 6. **Task list for current phase** — The approved task table (ALL tasks in the phase)
22
+ 7. **Scratch file path** — Where to write authored tasks
24
23
 
25
24
  On **amendment**, you also receive:
26
- - **Previous output** — Your prior task detail
27
- - **User feedback**What to change
25
+ - **Scratch file path** — Contains previously authored tasks with status markers
26
+ - The scratch file contains `rejected` tasks with feedback blockquotes rewrite only those
28
27
 
29
28
  ## Your Process
30
29
 
@@ -33,10 +32,40 @@ On **amendment**, you also receive:
33
32
  3. Read any cross-cutting specifications
34
33
  4. Read `task-design.md` — absorb the task template and quality standards
35
34
  5. Read the approved phases and task list — understand context and scope
36
- 6. Read the output format adapter understand the exact format for task files
37
- 7. Author the target task in the output format's structure
35
+ 6. Author all tasks in the phase, writing each to the scratch file incrementally — each task written to disk before starting the next
38
36
 
39
- If this is an **amendment**: read your previous output and the user's feedback, then revise accordingly.
37
+ If this is an **amendment**: read the scratch file, find tasks marked `rejected` (they have a feedback blockquote below the status line). Rewrite the entire scratch file — copy `approved` tasks verbatim, rewrite `rejected` tasks addressing the feedback. Reset rewritten tasks to `pending` status.
38
+
39
+ ## Scratch File Format
40
+
41
+ Write the scratch file with this structure:
42
+
43
+ ```markdown
44
+ ---
45
+ phase: {N}
46
+ phase_name: {Phase Name}
47
+ total: {count}
48
+ ---
49
+
50
+ ## {task-id} | pending
51
+
52
+ ### Task {seq}: {Task Name}
53
+
54
+ **Problem**: ...
55
+ **Solution**: ...
56
+ **Outcome**: ...
57
+ **Do**: ...
58
+ **Acceptance Criteria**: ...
59
+ **Tests**: ...
60
+ **Edge Cases**: ...
61
+ **Context**: ...
62
+ **Spec Reference**: ...
63
+
64
+ ## {task-id} | pending
65
+
66
+ ### Task {seq}: {Task Name}
67
+ ...
68
+ ```
40
69
 
41
70
  ## Task Template
42
71
 
@@ -48,20 +77,21 @@ Every task must include these fields (from task-design.md):
48
77
  - **Do**: Specific implementation steps (file locations, method names where helpful)
49
78
  - **Acceptance Criteria**: Pass/fail verifiable criteria
50
79
  - **Tests**: Named test cases including edge cases
80
+ - **Edge Cases**: Edge case handling (reference from the task table)
51
81
  - **Context**: (when relevant) Specification decisions and constraints that inform implementation
82
+ - **Spec Reference**: Which specification section(s) this task traces to
52
83
 
53
84
  ## Your Output
54
85
 
55
- Return the complete task detail in the exact format specified by the output format adapter. What you produce is what the orchestrator will write verbatim — the user sees your output before approving, and approved output is logged without modification.
56
-
57
- The output format adapter determines the file structure (frontmatter, sections, naming). Follow it precisely.
86
+ Write all tasks to the scratch file path provided. Use the canonical task template format above. Each task is written to disk before starting the next incremental writes, not a single batch at the end.
58
87
 
59
88
  ## Rules
60
89
 
61
- 1. **Self-contained** — anyone (Claude or human) could pick up this task and execute it without opening another document
90
+ 1. **Self-contained** — anyone (Claude or human) could pick up any task and execute it without opening another document
62
91
  2. **Specification is source of truth** — pull rationale, decisions, and constraints from the spec
63
92
  3. **Cross-cutting specs inform** — apply their architectural decisions where relevant (e.g., caching, rate limiting)
64
93
  4. **Every field required** — Problem, Solution, Outcome, Do, Acceptance Criteria, Tests are all mandatory
65
94
  5. **Tests include edge cases** — not just happy path; reference the edge cases from the task table
66
- 6. **Match the output format exactly** — follow the adapter's template structure
67
- 7. **No modifications after approval** — what the user sees is what gets logged
95
+ 6. **Write tasks to the scratch file incrementally** — each task written to disk before starting the next
96
+ 7. **Spec interpretation errors propagate across tasks in a batch** — ground every decision in the specification. When the spec is ambiguous, note the ambiguity in the task's Context section rather than inventing a plausible default.
97
+ 8. **No modifications after approval** — what the user sees is what gets logged
@@ -1,6 +1,6 @@
1
1
  ---
2
2
  name: review-findings-synthesizer
3
- description: Synthesizes review findings into normalized tasks. Reads review files (QA verifications and product assessment), deduplicates, groups, normalizes using task template, and writes a staging file for orchestrator approval. Invoked by technical-review skill after review actions are initiated.
3
+ description: Synthesizes review findings into normalized tasks. Reads QA verification files, deduplicates, groups, normalizes using task template, and writes a staging file for orchestrator approval. Invoked by technical-review skill after review actions are initiated.
4
4
  tools: Read, Write, Glob, Grep
5
5
  model: opus
6
6
  ---
@@ -13,23 +13,21 @@ You locate the review findings files using the provided paths, then read them, d
13
13
 
14
14
  You receive via the orchestrator's prompt:
15
15
 
16
- 1. **Review scope** — single or multi, with plan list
17
- 2. **Review paths** — paths to `r{N}/` directories containing review summary, QA files, and product assessment
18
- 3. **Specification path(s)** — the validated specification(s) for context
16
+ 1. **Plan topic** — the plan being synthesized
17
+ 2. **Review path** — path to `r{N}/` directory containing review summary and QA files
18
+ 3. **Specification path** — the validated specification for context
19
19
  4. **Cycle number** — which review remediation cycle this is
20
20
 
21
21
  ## Your Process
22
22
 
23
- 1. **Read review summary(ies)** — extract verdict, required changes, recommendations from each `review.md`
24
- 2. **Read all QA files** — read every `qa-task-*.md` across all review paths. Extract BLOCKING ISSUES and significant NON-BLOCKING NOTES with their file:line references
25
- 3. **Read product assessment(s)** — extract ROBUSTNESS, GAPS, and STRENGTHENING findings from `product-assessment.md`
26
- 4. **Tag each finding with source plan** — use the directory structure of QA files to identify which plan each finding belongs to. For multi-plan reviews, QA files are stored in per-plan subdirectories within the review. Product assessment findings: tag by plan where identifiable; mark as `cross-cutting` otherwise
27
- 5. **Deduplicate** — same issue found in QA + product assessment one finding, note all sources
28
- 6. **Group related findings** — multiple findings about the same concern become one task (e.g., 3 QA findings about missing error handling in the same module = 1 "add error handling" task)
29
- 7. **Filter** — discard low-severity non-blocking findings unless they cluster into a pattern. Never discard high-severity or blocking findings.
30
- 8. **Normalize** — convert each group into a task using the canonical task template (Problem / Solution / Outcome / Do / Acceptance Criteria / Tests)
31
- 9. **Write report** — output to `docs/workflow/implementation/{primary-topic}/review-report-c{cycle}.md`
32
- 10. **Write staging file** — if actionable tasks exist, write to `docs/workflow/implementation/{primary-topic}/review-tasks-c{cycle}.md` with `status: pending` for each task
23
+ 1. **Read review summary** — extract verdict, required changes, recommendations from `review.md`
24
+ 2. **Read all QA files** — read every `qa-task-*.md` in the review path. Extract BLOCKING ISSUES and significant NON-BLOCKING NOTES with their file:line references
25
+ 3. **Deduplicate** — same issue found across multiple QA files → one finding, note all sources
26
+ 4. **Group related findings** — multiple findings about the same concern become one task (e.g., 3 QA findings about missing error handling in the same module = 1 "add error handling" task)
27
+ 5. **Filter** — discard low-severity non-blocking findings unless they cluster into a pattern. Never discard high-severity or blocking findings.
28
+ 6. **Normalize** — convert each group into a task using the canonical task template (Problem / Solution / Outcome / Do / Acceptance Criteria / Tests)
29
+ 7. **Write report** — output to `docs/workflow/implementation/{topic}/review-report-c{cycle}.md`
30
+ 8. **Write staging file** — if actionable tasks exist, write to `docs/workflow/implementation/{topic}/review-tasks-c{cycle}.md` with `status: pending` for each task
33
31
 
34
32
  ## Report Format
35
33
 
@@ -70,8 +68,7 @@ gate_mode: gated
70
68
  ## Task 1: {title}
71
69
  status: pending
72
70
  severity: high
73
- plan: {plan-topic}
74
- sources: qa-task-3, product-assessment
71
+ sources: qa-task-3, qa-task-7
75
72
 
76
73
  **Problem**: {what the review found}
77
74
  **Solution**: {what to fix}
@@ -94,9 +91,8 @@ status: pending
94
91
  1. **No new features** — only address issues found in the review. Every proposed task must trace back to a specific review finding.
95
92
  2. **Never discard blocking** — blocking issues from QA always become proposed tasks.
96
93
  3. **Self-contained tasks** — every proposed task must be independently executable. No task should depend on another proposed task.
97
- 4. **Faithful synthesis** — do not invent findings. Every proposed task must trace back to at least one QA finding or product assessment observation.
94
+ 4. **Faithful synthesis** — do not invent findings. Every proposed task must trace back to at least one QA finding.
98
95
  5. **No git writes** — do not commit or stage. Writing the report and staging files are your only file writes.
99
- 6. **Plan tagging** — every task must have a `plan:` field identifying which plan it belongs to. This is critical for multi-plan reviews where tasks are created in different plans.
100
96
 
101
97
  ## Your Output
102
98
 
@@ -17,10 +17,9 @@ You receive:
17
17
  3. **Plan path**: The full plan for additional context
18
18
  4. **Project skill paths**: Relevant `.claude/skills/` paths for framework conventions
19
19
  5. **Review checklist path**: Path to the review checklist (`skills/technical-review/references/review-checklist.md`) — read this for detailed verification criteria
20
- 6. **Review scope**: Scope directory name for output path
20
+ 6. **Topic**: The plan topic name (used for output directory)
21
21
  7. **Review number**: Version number (e.g., 1 for `r1/`)
22
- 8. **Plan topic**: For multi-plan reviews, the plan-specific subdirectory name
23
- 9. **Task index**: Sequential number for this task (used for output file naming)
22
+ 8. **Task index**: Sequential number for this task (used for output file naming)
24
23
 
25
24
  ## Your Task
26
25
 
@@ -88,7 +87,7 @@ Review the implementation as a senior architect would:
88
87
 
89
88
  ## Output File Format
90
89
 
91
- Write to `docs/workflow/review/{scope}/r{N}/qa-task-{index}.md`:
90
+ Write to `docs/workflow/review/{topic}/r{N}/qa-task-{index}.md`:
92
91
 
93
92
  ```
94
93
  TASK: [Task name/description]
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@leeovery/claude-technical-workflows",
3
- "version": "2.1.34",
3
+ "version": "2.1.36",
4
4
  "description": "Technical workflow skills & commands for Claude Code",
5
5
  "license": "MIT",
6
6
  "author": "Lee Overy <me@leeovery.com>",
@@ -25,12 +25,16 @@ This skill is a **bridge** — it runs pre-flight checks for implementation and
25
25
 
26
26
  ## Step 1: Run Discovery
27
27
 
28
- Execute the start-implementation discovery script to gather current state:
28
+ !`.claude/skills/start-implementation/scripts/discovery.sh`
29
+
30
+ If the above shows a script invocation rather than YAML output, the dynamic content preprocessor did not run. Execute the script before continuing:
29
31
 
30
32
  ```bash
31
33
  .claude/skills/start-implementation/scripts/discovery.sh
32
34
  ```
33
35
 
36
+ If YAML content is already displayed, it has been run on your behalf.
37
+
34
38
  Parse the output to find the plan matching the provided topic. Extract:
35
39
 
36
40
  - **Plan details**: status, format, plan_id, specification, specification_exists
@@ -25,12 +25,16 @@ This skill is a **bridge** — it runs pre-flight checks for planning and hands
25
25
 
26
26
  ## Step 1: Run Discovery
27
27
 
28
- Execute the start-planning discovery script to gather current state:
28
+ !`.claude/skills/start-planning/scripts/discovery.sh`
29
+
30
+ If the above shows a script invocation rather than YAML output, the dynamic content preprocessor did not run. Execute the script before continuing:
29
31
 
30
32
  ```bash
31
33
  .claude/skills/start-planning/scripts/discovery.sh
32
34
  ```
33
35
 
36
+ If YAML content is already displayed, it has been run on your behalf.
37
+
34
38
  Parse the output to extract:
35
39
 
36
40
  - **Cross-cutting specifications** from `specifications.crosscutting` (name, status)
@@ -39,6 +39,7 @@ Return control silently - no user interaction needed.
39
39
  ## Notes
40
40
 
41
41
  - This skill is run automatically at the start of every workflow skill
42
- - Migrations are tracked in `docs/workflow/.cache/migrations.log`
42
+ - Migrations are tracked in `docs/workflow/.state/migrations` (one migration ID per line)
43
+ - The orchestrator skips entire migrations once recorded — individual scripts don't track
43
44
  - To force re-running all migrations, delete the tracking file
44
45
  - Each migration is idempotent - safe to run multiple times
@@ -9,54 +9,43 @@
9
9
  # ./scripts/migrate.sh
10
10
  #
11
11
  # Tracking:
12
- # Migrations are tracked in docs/workflow/.cache/migrations.log
13
- # Format: "filepath: migration_id" (one per line, append-only)
12
+ # Migrations are tracked in docs/workflow/.state/migrations
13
+ # Format: "migration_id" per line (e.g., "001", "002")
14
+ # The orchestrator checks/records migration IDs — individual scripts don't track.
14
15
  # Delete the log file to force re-running all migrations.
15
16
  #
16
17
  # Adding new migrations:
17
18
  # 1. Create scripts/migrations/NNN-description.sh (e.g., 002-spec-frontmatter.sh)
18
19
  # 2. The script will be run automatically in numeric order
19
- # 3. Each migration script receives helper functions via source
20
+ # 3. Each migration script receives helper functions via source: report_update, report_skip
20
21
  #
21
22
 
22
23
  set -eo pipefail
23
24
 
24
25
  SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
25
26
  MIGRATIONS_DIR="$SCRIPT_DIR/migrations"
26
- TRACKING_FILE="docs/workflow/.cache/migrations.log"
27
+ TRACKING_FILE="docs/workflow/.state/migrations"
27
28
 
28
29
  # Track counts for final report
29
30
  FILES_UPDATED=0
30
31
  FILES_SKIPPED=0
31
32
  MIGRATIONS_RUN=0
32
33
 
33
- # Ensure cache directory exists
34
+ # Ensure state directory exists
34
35
  mkdir -p "$(dirname "$TRACKING_FILE")"
35
36
 
37
+ # Self-healing: merge entries from old locations into .state/migrations
38
+ OLD_CACHE_LOG="docs/workflow/.cache/migrations.log"
39
+ OLD_CACHE_FILE="docs/workflow/.cache/migrations"
40
+ if [ -f "$OLD_CACHE_LOG" ] || [ -f "$OLD_CACHE_FILE" ]; then
41
+ { cat "$OLD_CACHE_LOG" 2>/dev/null || true; cat "$OLD_CACHE_FILE" 2>/dev/null || true; cat "$TRACKING_FILE" 2>/dev/null || true; } | sort -u > "${TRACKING_FILE}.tmp"
42
+ mv "${TRACKING_FILE}.tmp" "$TRACKING_FILE"
43
+ rm -f "$OLD_CACHE_LOG" "$OLD_CACHE_FILE"
44
+ fi
45
+
36
46
  # Touch tracking file if it doesn't exist
37
47
  touch "$TRACKING_FILE"
38
48
 
39
- #
40
- # Helper function: Check if a migration has been applied to a file
41
- # Usage: is_migrated "filepath" "migration_id"
42
- # Returns: 0 if migrated, 1 if not
43
- #
44
- is_migrated() {
45
- local filepath="$1"
46
- local migration_id="$2"
47
- grep -q "^${filepath}: ${migration_id}$" "$TRACKING_FILE" 2>/dev/null
48
- }
49
-
50
- #
51
- # Helper function: Record that a migration was applied to a file
52
- # Usage: record_migration "filepath" "migration_id"
53
- #
54
- record_migration() {
55
- local filepath="$1"
56
- local migration_id="$2"
57
- echo "${filepath}: ${migration_id}" >> "$TRACKING_FILE"
58
- }
59
-
60
49
  #
61
50
  # Helper function: Report a file update (for migration scripts to call)
62
51
  # Usage: report_update "filepath" "description"
@@ -78,7 +67,7 @@ report_skip() {
78
67
  }
79
68
 
80
69
  # Export functions and variables for migration scripts
81
- export -f is_migrated record_migration report_update report_skip
70
+ export -f report_update report_skip
82
71
  export TRACKING_FILE FILES_UPDATED FILES_SKIPPED
83
72
 
84
73
  #
@@ -99,6 +88,14 @@ if [ ${#MIGRATION_SCRIPTS[@]} -eq 0 ]; then
99
88
  exit 0
100
89
  fi
101
90
 
91
+ # One-time: convert old per-file format to per-migration format
92
+ # Old: "docs/workflow/discussion/auth.md: 001" → extracts "001"
93
+ # New: "001" → already correct
94
+ if grep -q ': [0-9]' "$TRACKING_FILE" 2>/dev/null; then
95
+ grep -oE '[0-9]+$' "$TRACKING_FILE" | sort -u > "${TRACKING_FILE}.tmp"
96
+ mv "${TRACKING_FILE}.tmp" "$TRACKING_FILE"
97
+ fi
98
+
102
99
  for script in "${MIGRATION_SCRIPTS[@]}"; do
103
100
  # Extract migration ID from filename (e.g., "001" from "001-discussion-frontmatter.sh")
104
101
  migration_id=$(basename "$script" .sh | grep -oE '^[0-9]+')
@@ -108,11 +105,17 @@ for script in "${MIGRATION_SCRIPTS[@]}"; do
108
105
  continue
109
106
  fi
110
107
 
108
+ # Global check — skip entire migration if already recorded
109
+ if grep -q "^${migration_id}$" "$TRACKING_FILE" 2>/dev/null; then
110
+ continue
111
+ fi
112
+
111
113
  # Source and run the migration script
112
- # The script has access to: is_migrated, record_migration, report_update, report_skip
114
+ # The script has access to: report_update, report_skip
113
115
  # shellcheck source=/dev/null
114
116
  source "$script"
115
117
 
118
+ echo "$migration_id" >> "$TRACKING_FILE"
116
119
  MIGRATIONS_RUN=$((MIGRATIONS_RUN + 1))
117
120
  done
118
121
 
@@ -24,9 +24,7 @@
24
24
  # Exploring, Deciding → in-progress
25
25
  # Concluded, Complete, ✅ Complete → concluded
26
26
  #
27
- # This script is sourced by migrate-documents.sh and has access to:
28
- # - is_migrated "filepath" "migration_id"
29
- # - record_migration "filepath" "migration_id"
27
+ # This script is sourced by migrate.sh and has access to:
30
28
  # - report_update "filepath" "description"
31
29
  # - report_skip "filepath"
32
30
  #
@@ -43,24 +41,14 @@ fi
43
41
  for file in "$DISCUSSION_DIR"/*.md; do
44
42
  [ -f "$file" ] || continue
45
43
 
46
- # Check if already migrated via tracking
47
- if is_migrated "$file" "$MIGRATION_ID"; then
48
- report_skip "$file"
49
- continue
50
- fi
51
-
52
44
  # Check if file already has YAML frontmatter
53
45
  if head -1 "$file" 2>/dev/null | grep -q "^---$"; then
54
- # Already has frontmatter - just record and skip
55
- record_migration "$file" "$MIGRATION_ID"
56
46
  report_skip "$file"
57
47
  continue
58
48
  fi
59
49
 
60
50
  # Check if file has legacy format (look for **Status**: or **Status:** or **Date**: or **Started:**)
61
51
  if ! grep -q '^\*\*Status\*\*:\|^\*\*Status:\*\*\|^\*\*Date\*\*:\|^\*\*Started:\*\*' "$file" 2>/dev/null; then
62
- # No legacy format found - might be malformed, skip
63
- record_migration "$file" "$MIGRATION_ID"
64
52
  report_skip "$file"
65
53
  continue
66
54
  fi
@@ -134,7 +122,5 @@ date: $date_value
134
122
  echo "$content"
135
123
  } > "$file"
136
124
 
137
- # Record and report
138
- record_migration "$file" "$MIGRATION_ID"
139
125
  report_update "$file" "added frontmatter"
140
126
  done
@@ -34,8 +34,6 @@
34
34
  # (not found or unrecognized) → empty (requires manual review)
35
35
  #
36
36
  # This script is sourced by migrate.sh and has access to:
37
- # - is_migrated "filepath" "migration_id"
38
- # - record_migration "filepath" "migration_id"
39
37
  # - report_update "filepath" "description"
40
38
  # - report_skip "filepath"
41
39
  #
@@ -57,24 +55,14 @@ for file in "$SPEC_DIR"/*.md; do
57
55
  *-review-*|*-tracking*) continue ;;
58
56
  esac
59
57
 
60
- # Check if already migrated via tracking
61
- if is_migrated "$file" "$MIGRATION_ID"; then
62
- report_skip "$file"
63
- continue
64
- fi
65
-
66
58
  # Check if file already has YAML frontmatter
67
59
  if head -1 "$file" 2>/dev/null | grep -q "^---$"; then
68
- # Already has frontmatter - just record and skip
69
- record_migration "$file" "$MIGRATION_ID"
70
60
  report_skip "$file"
71
61
  continue
72
62
  fi
73
63
 
74
64
  # Check if file has legacy format (look for **Status**: or **Status:** or **Type**: or **Last Updated**:)
75
65
  if ! grep -q '^\*\*Status\*\*:\|^\*\*Status:\*\*\|^\*\*Type\*\*:\|^\*\*Last Updated\*\*:' "$file" 2>/dev/null; then
76
- # No legacy format found - might be malformed, skip
77
- record_migration "$file" "$MIGRATION_ID"
78
66
  report_skip "$file"
79
67
  continue
80
68
  fi
@@ -206,7 +194,5 @@ date: $date_value
206
194
  echo "$content"
207
195
  } > "$file"
208
196
 
209
- # Record and report
210
- record_migration "$file" "$MIGRATION_ID"
211
197
  report_update "$file" "added frontmatter (status: $status_new, type: $type_new)"
212
198
  done
@@ -32,8 +32,6 @@
32
32
  # Completed, Done → concluded
33
33
  #
34
34
  # This script is sourced by migrate.sh and has access to:
35
- # - is_migrated "filepath" "migration_id"
36
- # - record_migration "filepath" "migration_id"
37
35
  # - report_update "filepath" "description"
38
36
  # - report_skip "filepath"
39
37
  #
@@ -55,16 +53,8 @@ for file in "$PLAN_DIR"/*.md; do
55
53
  *-review-*|*-tracking*) continue ;;
56
54
  esac
57
55
 
58
- # Check if already migrated via tracking
59
- if is_migrated "$file" "$MIGRATION_ID"; then
60
- report_skip "$file"
61
- continue
62
- fi
63
-
64
56
  # Check if file already has full frontmatter (topic field present)
65
57
  if head -10 "$file" 2>/dev/null | grep -q "^topic:"; then
66
- # Already has full frontmatter - just record and skip
67
- record_migration "$file" "$MIGRATION_ID"
68
58
  report_skip "$file"
69
59
  continue
70
60
  fi
@@ -75,8 +65,6 @@ for file in "$PLAN_DIR"/*.md; do
75
65
  has_inline_metadata=$(grep -c '^\*\*Date\*\*:\|^\*\*Status\*\*:\|^\*\*Specification\*\*:' "$file" 2>/dev/null || true)
76
66
 
77
67
  if [ "${has_partial_frontmatter:-0}" = "0" ] && [ "${has_inline_metadata:-0}" = "0" ]; then
78
- # No legacy format found - might be malformed, skip
79
- record_migration "$file" "$MIGRATION_ID"
80
68
  report_skip "$file"
81
69
  continue
82
70
  fi
@@ -192,7 +180,5 @@ specification: $spec_value
192
180
  echo "$content"
193
181
  } > "$file"
194
182
 
195
- # Record and report
196
- record_migration "$file" "$MIGRATION_ID"
197
183
  report_update "$file" "added full frontmatter (status: $status_new, format: $format_value)"
198
184
  done
@@ -29,8 +29,6 @@
29
29
  # - If no matching discussion, add empty sources: [] and report for user review
30
30
  #
31
31
  # This script is sourced by migrate.sh and has access to:
32
- # - is_migrated "filepath" "migration_id"
33
- # - record_migration "filepath" "migration_id"
34
32
  # - report_update "filepath" "description"
35
33
  # - report_skip "filepath"
36
34
  #
@@ -92,15 +90,8 @@ for file in "$SPEC_DIR"/*.md; do
92
90
  *-review-*|*-tracking*) continue ;;
93
91
  esac
94
92
 
95
- # Check if already migrated via tracking
96
- if is_migrated "$file" "$MIGRATION_ID"; then
97
- report_skip "$file"
98
- continue
99
- fi
100
-
101
93
  # Check if file has YAML frontmatter
102
94
  if ! head -1 "$file" 2>/dev/null | grep -q "^---$"; then
103
- record_migration "$file" "$MIGRATION_ID"
104
95
  report_skip "$file"
105
96
  continue
106
97
  fi
@@ -113,7 +104,6 @@ for file in "$SPEC_DIR"/*.md; do
113
104
 
114
105
  # If sources field exists, check if already in object format
115
106
  if $has_sources_field && sources_already_object_format "$file"; then
116
- record_migration "$file" "$MIGRATION_ID"
117
107
  report_skip "$file"
118
108
  continue
119
109
  fi
@@ -196,8 +186,6 @@ ${new_sources_block}"
196
186
  echo "$content"
197
187
  } > "$file"
198
188
 
199
- record_migration "$file" "$MIGRATION_ID"
200
-
201
189
  # Report appropriate message based on what was done
202
190
  if $has_sources_field; then
203
191
  report_update "$file" "converted sources to object format"
@@ -30,8 +30,6 @@
30
30
  # - "- ~~{topic}: {description}~~ → satisfied externally" → state: satisfied_externally
31
31
  #
32
32
  # This script is sourced by migrate.sh and has access to:
33
- # - is_migrated "filepath" "migration_id"
34
- # - record_migration "filepath" "migration_id"
35
33
  # - report_update "filepath" "description"
36
34
  # - report_skip "filepath"
37
35
  #
@@ -122,15 +120,8 @@ for file in "$PLAN_DIR"/*.md; do
122
120
  *-review-*|*-tracking*) continue ;;
123
121
  esac
124
122
 
125
- # Check if already migrated via tracking
126
- if is_migrated "$file" "$MIGRATION_ID"; then
127
- report_skip "$file"
128
- continue
129
- fi
130
-
131
123
  # Check if file has YAML frontmatter
132
124
  if ! head -1 "$file" 2>/dev/null | grep -q "^---$"; then
133
- record_migration "$file" "$MIGRATION_ID"
134
125
  report_skip "$file"
135
126
  continue
136
127
  fi
@@ -138,7 +129,6 @@ for file in "$PLAN_DIR"/*.md; do
138
129
  # Check if external_dependencies already exists in frontmatter
139
130
  frontmatter=$(extract_frontmatter_005 "$file")
140
131
  if echo "$frontmatter" | grep -q "^external_dependencies:"; then
141
- record_migration "$file" "$MIGRATION_ID"
142
132
  report_skip "$file"
143
133
  continue
144
134
  fi
@@ -226,8 +216,6 @@ ${new_deps_block}"
226
216
  echo "$new_body"
227
217
  } > "$file"
228
218
 
229
- record_migration "$file" "$MIGRATION_ID"
230
-
231
219
  if $has_deps; then
232
220
  report_update "$file" "migrated external dependencies to frontmatter"
233
221
  else
@@ -12,8 +12,6 @@
12
12
  # Updates plan frontmatter `specification` field to use new directory paths.
13
13
  #
14
14
  # This script is sourced by migrate.sh and has access to:
15
- # - is_migrated "filepath" "migration_id"
16
- # - record_migration "filepath" "migration_id"
17
15
  # - report_update "filepath" "description"
18
16
  # - report_skip "filepath"
19
17
  #
@@ -33,19 +31,11 @@ if [ -d "$SPEC_DIR" ]; then
33
31
 
34
32
  # Skip if already a directory (already migrated)
35
33
  if [ -d "$SPEC_DIR/$name" ] && [ -f "$SPEC_DIR/$name/specification.md" ]; then
36
- new_path="$SPEC_DIR/$name/specification.md"
37
- if ! is_migrated "$new_path" "$MIGRATION_ID"; then
38
- record_migration "$new_path" "$MIGRATION_ID"
39
- fi
40
- report_skip "$new_path"
34
+ report_skip "$SPEC_DIR/$name/specification.md"
41
35
  continue
42
36
  fi
43
37
 
44
38
  new_path="$SPEC_DIR/$name/specification.md"
45
- if is_migrated "$new_path" "$MIGRATION_ID"; then
46
- report_skip "$new_path"
47
- continue
48
- fi
49
39
 
50
40
  # Create topic directory
51
41
  mkdir -p "$SPEC_DIR/$name"
@@ -63,7 +53,6 @@ if [ -d "$SPEC_DIR" ]; then
63
53
  report_update "$SPEC_DIR/$name/$new_tracking_name" "moved tracking file into topic directory"
64
54
  done
65
55
 
66
- record_migration "$new_path" "$MIGRATION_ID"
67
56
  report_update "$new_path" "restructured to topic directory"
68
57
  done
69
58
  fi
@@ -79,19 +68,11 @@ if [ -d "$PLAN_DIR" ]; then
79
68
 
80
69
  # Skip if already a directory with plan.md
81
70
  if [ -d "$PLAN_DIR/$name" ] && [ -f "$PLAN_DIR/$name/plan.md" ]; then
82
- new_path="$PLAN_DIR/$name/plan.md"
83
- if ! is_migrated "$new_path" "$MIGRATION_ID"; then
84
- record_migration "$new_path" "$MIGRATION_ID"
85
- fi
86
- report_skip "$new_path"
71
+ report_skip "$PLAN_DIR/$name/plan.md"
87
72
  continue
88
73
  fi
89
74
 
90
75
  new_path="$PLAN_DIR/$name/plan.md"
91
- if is_migrated "$new_path" "$MIGRATION_ID"; then
92
- report_skip "$new_path"
93
- continue
94
- fi
95
76
 
96
77
  # Create topic directory (may already exist for local-markdown tasks)
97
78
  mkdir -p "$PLAN_DIR/$name"
@@ -108,7 +89,6 @@ if [ -d "$PLAN_DIR" ]; then
108
89
  report_update "$PLAN_DIR/$name/$new_tracking_name" "moved tracking file into topic directory"
109
90
  done
110
91
 
111
- record_migration "$new_path" "$MIGRATION_ID"
112
92
  report_update "$new_path" "restructured to topic directory"
113
93
  done
114
94
  fi