gaia-framework 1.66.0 → 1.87.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/.claude/commands/gaia-create-stakeholder.md +20 -0
  2. package/.claude/commands/gaia-test-gap-analysis.md +17 -0
  3. package/CLAUDE.md +87 -1
  4. package/README.md +2 -2
  5. package/_gaia/_config/global.yaml +5 -1
  6. package/_gaia/_config/lifecycle-sequence.yaml +20 -0
  7. package/_gaia/_config/skill-manifest.csv +2 -0
  8. package/_gaia/_config/workflow-manifest.csv +3 -1
  9. package/_gaia/core/engine/workflow.xml +5 -1
  10. package/_gaia/core/workflows/party-mode/steps/step-01-agent-loading.md +60 -9
  11. package/_gaia/creative/workflows/problem-solving/checklist.md +64 -14
  12. package/_gaia/creative/workflows/problem-solving/instructions.xml +367 -22
  13. package/_gaia/creative/workflows/problem-solving/workflow.yaml +31 -1
  14. package/_gaia/dev/agents/_base-dev.md +13 -2
  15. package/_gaia/dev/skills/_skill-index.yaml +15 -0
  16. package/_gaia/dev/skills/figma-integration.md +498 -0
  17. package/_gaia/lifecycle/templates/brownfield-scan-security-prompt.md +228 -0
  18. package/_gaia/lifecycle/templates/gap-entry-schema.md +39 -4
  19. package/_gaia/lifecycle/templates/story-template.md +29 -1
  20. package/_gaia/lifecycle/workflows/2-planning/create-ux-design/instructions.xml +96 -3
  21. package/_gaia/lifecycle/workflows/4-implementation/code-review/instructions.xml +10 -0
  22. package/_gaia/lifecycle/workflows/4-implementation/create-stakeholder/checklist.md +25 -0
  23. package/_gaia/lifecycle/workflows/4-implementation/create-stakeholder/instructions.xml +79 -0
  24. package/_gaia/lifecycle/workflows/4-implementation/create-stakeholder/workflow.yaml +22 -0
  25. package/_gaia/lifecycle/workflows/4-implementation/create-story/instructions.xml +10 -0
  26. package/_gaia/lifecycle/workflows/4-implementation/dev-story/instructions.xml +10 -0
  27. package/_gaia/lifecycle/workflows/4-implementation/retrospective/instructions.xml +3 -3
  28. package/_gaia/lifecycle/workflows/4-implementation/validate-story/instructions.xml +11 -0
  29. package/_gaia/lifecycle/workflows/anytime/brownfield-onboarding/instructions.xml +11 -7
  30. package/_gaia/testing/workflows/test-gap-analysis/checklist.md +8 -0
  31. package/_gaia/testing/workflows/test-gap-analysis/instructions.xml +53 -0
  32. package/_gaia/testing/workflows/test-gap-analysis/workflow.yaml +38 -0
  33. package/bin/gaia-framework.js +36 -2
  34. package/bin/helpers/derive-bump-label.js +41 -0
  35. package/bin/helpers/validate-bump-labels.js +38 -0
  36. package/gaia-install.sh +71 -4
  37. package/package.json +1 -1
  38. package/_gaia/_memory/tier2-results/.gitkeep +0 -0
  39. package/_gaia/_memory/tier2-results/checkpoint-resume-2026-03-24.yaml +0 -6
  40. package/_gaia/_memory/tier2-results/engine-scenarios-2026-03-22.yaml +0 -14
@@ -0,0 +1,22 @@
1
+ name: create-stakeholder
2
+ description: 'Scaffold a new stakeholder file for Party Mode discussions'
3
+ module: lifecycle
4
+ agent: orchestrator
5
+ config_resolved: "{installed_path}/.resolved/create-stakeholder.yaml"
6
+ config_source: "{project-root}/_gaia/lifecycle/config.yaml"
7
+ installed_path: "{project-root}/_gaia/lifecycle/workflows/4-implementation/create-stakeholder"
8
+ instructions: "{installed_path}/instructions.xml"
9
+ validation: "{installed_path}/checklist.md"
10
+ quality_gates:
11
+ pre_start: []
12
+ post_complete:
13
+ - check: "stakeholder_file_line_count_valid == true"
14
+ on_fail: "HALT: Stakeholder file exceeds 100-line limit (FR-164). Edit {project-root}/custom/stakeholders/{slug}.md to reduce content or re-run /gaia-create-stakeholder."
15
+ - check: "stakeholder_count_in_directory_valid == true"
16
+ on_fail: "HALT: custom/stakeholders/ has more than 50 files (FR-164). Remove unused stakeholders from {project-root}/custom/stakeholders/ first."
17
+ on_error:
18
+ missing_file: "ask_user"
19
+ unresolved_variable: "halt"
20
+
21
+ output:
22
+ primary: "{project-root}/custom/stakeholders/{slug}.md"
@@ -94,6 +94,14 @@
94
94
  <step n="6" title="Generate Output">
95
95
  <action>Read the story template from the engine-resolved template path. The engine resolves this in Step 1 (Load and Resolve Config): if {project-root}/custom/templates/story-template.md exists and is non-empty, the custom template is used; otherwise it falls back to {project-root}/_gaia/lifecycle/templates/story-template.md. Use whichever path the engine resolved.</action>
96
96
  <action>Read sizing_map from {project-root}/_gaia/_config/global.yaml to resolve T-shirt size to story points (S→2, M→5, L→8, XL→13).</action>
97
+ <action>Detect invocation context to determine origin fields:
98
+ If invoked from problem-solving routing (E16-S3): set origin="problem-solving" and origin_ref to the path of the Problem Brief or problem-solving checkpoint artifact (e.g., docs/creative-artifacts/problem-solving-YYYY-MM-DD.md).
99
+ If invoked from triage routing: set origin="triage" and origin_ref to the triage artifact path.
100
+ If invoked from add-feature routing: set origin="add-feature" and origin_ref to the source artifact path.
101
+ If invoked from sprint-planning: set origin="sprint-planning" and origin_ref to the sprint plan artifact path.
102
+ If invoked with explicit origin parameters from caller: use the provided origin and origin_ref values.
103
+ If invoked normally (no routing context): set origin=null and origin_ref=null (planned work default).
104
+ </action>
97
105
  <action>Populate ALL YAML frontmatter fields from epics-and-stories.md data:
98
106
  - key: story key from epics (e.g., E1-S1)
99
107
  - title: story title
@@ -110,6 +118,8 @@
110
118
  - date: current date
111
119
  - author: agent name (e.g., "Nate (Scrum Master)")
112
120
  - priority_flag: null (default — set to "next-sprint" by add-feature for high-urgency stories)
121
+ - origin: workflow origin (null for planned work, "problem-solving" from problem-solving routing, "triage" from triage, "add-feature" from add-feature routing, "sprint-planning" from sprint planning, "manual" for explicit manual creation)
122
+ - origin_ref: path to source artifact that triggered story creation (null when origin is null)
113
123
  </action>
114
124
  <template-output file="{implementation_artifacts}/{story_key}-{story_title_slug}.md">
115
125
  Generate the story file following the story-template.md structure. The filename must use the story key and slugified title (e.g., E1-S1-user-login.md). Include complete YAML frontmatter with ALL 15 fields populated. Fill all template sections: User Story, Acceptance Criteria, Tasks/Subtasks (linked to AC numbers), Dev Notes, Technical Notes, Dependencies, Test Scenarios, Project Structure Notes, References, Dev Agent Record, and Estimate. IMPORTANT: The body "**Status:**" line MUST match the frontmatter status field exactly. Both must say the same status value.
@@ -48,6 +48,16 @@
48
48
  </action>
49
49
  <action if="mode == REWORK or mode == RESUME">Skip status change — story is already in-progress.</action>
50
50
  </step>
51
+ <step n="4b" title="Figma Design Consumption">
52
+ <action>Check for figma: metadata block in story file YAML frontmatter. Expected fields: file_key (string — Figma file identifier), pages (list of page names to extract from), node_ids (list of specific node IDs), design_version (string — Figma lastModified hash for traceability). Also check ux-design.md for a figma: metadata block.</action>
53
+ <action if="no figma: metadata block found in story file or ux-design.md">No Figma metadata present — skip all Figma-related actions. Use ux-design.md text as-is with zero behavioral change. Proceed to Step 5.</action>
54
+ <action if="figma: metadata block is present">JIT-load figma-integration.md tokens and components sections for MCP extraction.</action>
55
+ <action if="figma: metadata block is present">Check for cached responses in {project-path}/.figma-cache/ using composite cache key {file_key}:{page_id}:{design_version_hash}. If cache hit with valid version hash, use cached data. If cache miss or design version changed since last implementation, proceed with MCP extraction.</action>
56
+ <action if="figma: metadata block is present and MCP available">Extract design tokens via MCP and write design-tokens.json to {planning_artifacts}/design-system/ in W3C DTCG format. Extract component specs via MCP and write component-specs.yaml to the same directory.</action>
57
+ <action if="figma: metadata block is present and MCP unreachable">If cached data exists in {project-path}/.figma-cache/ (even with expired TTL), continue with last-known-good files and display [OFFLINE] warning: "Design data loaded from cache — MCP unreachable. Token data may be stale." Do not halt — proceed with stale data.</action>
58
+ <action if="figma: metadata block is present">Compare design_version in story figma: metadata against the current Figma file version. If the design version has changed since the last implementation run, offer an incremental update: "Design has changed since last implementation (stored: {old_version}, current: {new_version}). Run incremental token update? [y/n]". Record the design_version consumed in the story figma: metadata for traceability.</action>
59
+ <action if="figma: metadata block is present">JIT-load figma-integration.md export section. Generate stack-specific scaffolded code from intermediate files (design-tokens.json and component-specs.yaml) using the active dev agent's resolution table. Output token files and component scaffolds to {project-path} design system directory.</action>
60
+ </step>
51
61
  <step n="5" title="Plan Implementation">
52
62
  <action if="mode == REWORK">REWORK MODE — Focus plan on fixing review feedback:
53
63
  1. Read the story's Review Gate table — identify which reviews FAILED
@@ -108,14 +108,14 @@
108
108
 
109
109
  3. Apply the improvement: append to or modify the relevant section in custom/skills/{skill-name}.md with comment "<!-- Added from retro-{sprint_id}: {reason} -->".
110
110
 
111
- 4. Register in .customize.yaml: after writing the custom skill file, register it in {project-root}/_gaia/_config/agents/all-dev.customize.yaml so the engine loads from the custom path on subsequent runs.
112
- - If all-dev.customize.yaml does not exist: create it with proper YAML structure:
111
+ 4. Register in .customize.yaml: after writing the custom skill file, register it in {project-root}/custom/skills/all-dev.customize.yaml so the engine loads from the custom path on subsequent runs (ADR-020 — customization registries live alongside custom skills in custom/skills/).
112
+ - If custom/skills/all-dev.customize.yaml does not exist: create it with proper YAML structure:
113
113
  ```yaml
114
114
  skill_overrides:
115
115
  {skill-name}:
116
116
  source: "custom/skills/{skill-name}.md"
117
117
  ```
118
- - If all-dev.customize.yaml already exists: read current content, check if a skill_overrides entry for this skill already exists. If it does not exist, append the new entry under skill_overrides. Preserve all existing entries — only add the new one. If it already exists (duplicate), skip registration to prevent duplicate entries.
118
+ - If custom/skills/all-dev.customize.yaml already exists: read current content, check if a skill_overrides entry for this skill already exists. If it does not exist, append the new entry under skill_overrides. Preserve all existing entries — only add the new one. If it already exists (duplicate), skip registration to prevent duplicate entries.
119
119
  </action>
120
120
  <action>If no skill improvements identified, state: "No skill improvements identified this sprint."</action>
121
121
  </step>
@@ -45,6 +45,17 @@
45
45
  (file paths, component references, API endpoints, dependency versions).
46
46
  Verify against filesystem and ground truth (if available).
47
47
  Classify findings as CRITICAL (broken reference), WARNING (outdated), INFO (style).
48
+
49
+ (g) Origin Field Validation (optional fields — backward compatible):
50
+ The origin and origin_ref fields are OPTIONAL — stories without these fields
51
+ are valid (backward compatibility). Missing origin/origin_ref fields do NOT
52
+ cause errors and are accepted without warnings.
53
+ If the origin field IS present, validate:
54
+ - origin must be one of: "manual", "problem-solving", "triage", "add-feature",
55
+ "sprint-planning", or null. An invalid origin enum value is a CRITICAL finding.
56
+ - If origin is non-null, origin_ref must be non-empty (not null, not empty string).
57
+ A non-null origin with empty or null origin_ref is a WARNING finding.
58
+ If origin is null or absent, origin_ref is not validated (orphaned refs are acceptable).
48
59
  </action>
49
60
  </step>
50
61
  <step n="3" title="Validation Fix Loop">
@@ -124,23 +124,26 @@
124
124
  **Step 1 — Load all scan outputs:**
125
125
  Load gap entries from ALL of the following sources. For each file, if it exists, parse YAML gap entries matching the schema from gap-entry-schema.md. If a file is empty or missing, log a warning noting which scanner produced no results and continue processing the remaining files without error.
126
126
 
127
- Deep analysis scan outputs (7 files):
127
+ Deep analysis scan outputs (7 files — Step 2.5):
128
128
  - {planning_artifacts}/brownfield-scan-config-contradiction.md
129
129
  - {planning_artifacts}/brownfield-scan-dead-code.md
130
- - {planning_artifacts}/brownfield-scan-hard-coded-logic.md
131
- - {planning_artifacts}/brownfield-scan-security-endpoint.md
130
+ - {planning_artifacts}/brownfield-scan-hardcoded.md
131
+ - {planning_artifacts}/brownfield-scan-security.md
132
132
  - {planning_artifacts}/brownfield-scan-runtime-behavior.md
133
- - {planning_artifacts}/brownfield-scan-doc-code-drift.md
133
+ - {planning_artifacts}/brownfield-scan-doc-code.md
134
134
  - {planning_artifacts}/brownfield-scan-integration-seam.md
135
135
 
136
- Step 2 documentation subagent outputs:
136
+ Test execution scan output (1 file — Step 2.75):
137
+ - {planning_artifacts}/brownfield-scan-test-execution.md (failing tests as gap entries)
138
+
139
+ Step 2 documentation subagent outputs (4 files):
137
140
  - {planning_artifacts}/api-documentation.md (API gaps)
138
141
  - {planning_artifacts}/event-catalog.md (event/messaging gaps)
139
142
  - {planning_artifacts}/ux-design.md (frontend/UX gaps)
140
143
  - {planning_artifacts}/dependency-map.md (dependency gaps)
141
144
 
142
- Step 3 test execution results:
143
- - {test_artifacts}/nfr-assessment.md (test execution gap findings)
145
+ Step 3 NFR assessment:
146
+ - {test_artifacts}/nfr-assessment.md (NFR gap findings)
144
147
 
145
148
  **Step 2 — Validate entries against schema:**
146
149
  For each parsed gap entry, validate that all required fields are present: id, category, severity, title, description (or evidence), evidence_file, evidence_line, recommendation. Entries missing any required field are logged as warnings (noting the source file and which field is missing) and skipped from consolidation rather than causing a failure.
@@ -197,6 +200,7 @@
197
200
  — If infrastructure: use IR-### for infrastructure requirements, OR-### for operational requirements, and SR-### for security requirements exclusively. Do NOT use FR/NFR prefixes.
198
201
  — If platform: use BOTH ID scheme families — FR-###/NFR-### for application-layer requirements and IR-###/OR-###/SR-### for infrastructure-layer requirements. All requirement IDs are globally unique within the project — the prefix disambiguates (e.g., FR-001 and IR-001 are distinct, no collision).</action>
199
202
  <action>Read upstream artifacts to inform gap analysis:</action>
203
+ <action>— project-documentation.md → project context: tech stack, architecture patterns, conventions, detected capability flags, CI/CD summary. Use for PRD Overview section (existing project summary) and to ground gap requirements in the actual project structure.</action>
200
204
  <action>— consolidated-gaps.md → primary input: deduplicated, ranked, and code-verified gap list from Steps 3.5 and 5.5. If a '## Verification Corrections for PRD' section exists, use it to correct factual errors from contradicted claims.</action>
201
205
  <action>— nfr-assessment.md → NFR section gets real "Current Baseline" and "Target" columns</action>
202
206
  <action>— api-documentation.md (if exists) → extract API gaps (undocumented endpoints, missing validation)</action>
@@ -0,0 +1,8 @@
1
+ # Test Gap Analysis Checklist
2
+ - [ ] Execution mode determined (coverage or verification)
3
+ - [ ] Test plan scanned for test case IDs and story links
4
+ - [ ] Story files scanned for acceptance criteria
5
+ - [ ] Cross-reference completed — gaps identified
6
+ - [ ] Output follows FR-223 schema (summary count, per-story table, coverage %)
7
+ - [ ] Zero-gap case handled with "No coverage gaps detected" message
8
+ - [ ] Workflow completed within NFR-040 performance constraint (< 60 seconds)
@@ -0,0 +1,53 @@
1
+ <workflow name="test-gap-analysis">
2
+ <critical>
3
+ <mandate>Scan test-plan.md and story files to identify acceptance criteria gaps — NFR-040 requires completion in under 60 seconds</mandate>
4
+ <mandate>Output must follow the FR-223 schema: summary count, per-story gap table, coverage percentage</mandate>
5
+ <mandate>When no gaps are detected, output must state "No coverage gaps detected" with a summary count of zero</mandate>
6
+ </critical>
7
+
8
+ <step n="1" title="Determine Mode">
9
+ <action>Check the --mode argument: coverage or verification</action>
10
+ <action>If no mode specified, default to coverage mode</action>
11
+ <action>If mode is 'verification', skip to verification-mode steps (E19-S2 scope — not implemented yet)</action>
12
+ </step>
13
+
14
+ <step n="2" title="Scan Test Plan">
15
+ <action>Read {test_artifacts}/test-plan.md</action>
16
+ <action>Extract all test case IDs and their linked story keys from the test plan</action>
17
+ <action>Build a map of test_case_id -> [story_keys] for cross-referencing</action>
18
+ <action>If test-plan.md is missing, log warning: "test-plan.md not found — partial coverage analysis only" and continue with empty test case map</action>
19
+ </step>
20
+
21
+ <step n="3" title="Scan Story Files">
22
+ <action>Scan all story files in docs/implementation-artifacts/ matching pattern {story_key}-*.md</action>
23
+ <action>For each story file, extract all acceptance criteria (AC items) from the "Acceptance Criteria" section</action>
24
+ <action>Build a map of story_key -> [AC items] with their identifiers (AC1, AC2, etc.)</action>
25
+ <action>Track untested acceptance criteria — ACs that have no matching test case in the test plan map</action>
26
+ </step>
27
+
28
+ <step n="4" title="Cross-Reference and Identify Gaps">
29
+ <action>For each story's acceptance criteria, check if a corresponding test case exists in the test plan</action>
30
+ <action>Flag each AC as covered (has test case) or uncovered-ac (no test case found)</action>
31
+ <action>Calculate coverage rate per story: covered_ACs / total_ACs</action>
32
+ <action>Calculate overall coverage percentage across all stories</action>
33
+ </step>
34
+
35
+ <step n="5" title="Generate Output (FR-223 Schema)">
36
+ <action>Generate the output artifact at {test_artifacts}/test-gap-analysis-{date}.md</action>
37
+ <action>Output must include the following FR-223 schema sections:
38
+ - Summary section with total count of stories analyzed, ACs scanned, gaps found, and overall coverage percentage
39
+ - Per-story gap table listing each story, its ACs, and their coverage status
40
+ - Coverage rate breakdown showing covered vs uncovered acceptance criteria
41
+ </action>
42
+ <action>If zero gaps are detected (all ACs have corresponding test cases):
43
+ Output "No coverage gaps detected" in the summary section with a gap count of 0 and coverage rate of 100%</action>
44
+ <template-output file="{test_artifacts}/test-gap-analysis-{date}.md">
45
+ Gap analysis report following FR-223 schema with summary count, per-story gap table, and coverage percentage.
46
+ </template-output>
47
+ </step>
48
+
49
+ <step n="6" title="Performance Validation">
50
+ <action>Verify workflow completed within the NFR-040 constraint of under 60 seconds</action>
51
+ <action>Log total execution time in the output footer</action>
52
+ </step>
53
+ </workflow>
@@ -0,0 +1,38 @@
1
+ name: test-gap-analysis
2
+ description: 'Scan test suite against requirements to identify untested or under-tested areas'
3
+ module: testing
4
+ agent: test-architect
5
+ execution_mode: planning
6
+ modes:
7
+ - coverage
8
+ - verification
9
+ default_mode: coverage
10
+ config_resolved: "{installed_path}/.resolved/test-gap-analysis.yaml"
11
+ config_source: "{project-root}/_gaia/testing/config.yaml"
12
+ installed_path: "{project-root}/_gaia/testing/workflows/test-gap-analysis"
13
+ instructions: "{installed_path}/instructions.xml"
14
+ validation: "{installed_path}/checklist.md"
15
+ traces_to:
16
+ - FR-221
17
+ - FR-223
18
+ - NFR-040
19
+ performance_constraints:
20
+ max_duration_seconds: 60 # NFR-040 — workflow must complete in under 60 seconds
21
+ input_file_patterns:
22
+ test_plan:
23
+ whole: "{test_artifacts}/test-plan.md"
24
+ load_strategy: "FULL_LOAD"
25
+ story_files:
26
+ whole: "{implementation_artifacts}/*.md"
27
+ load_strategy: "INDEX_GUIDED"
28
+ architecture:
29
+ whole: "{planning_artifacts}/architecture.md"
30
+ load_strategy: "INDEX_GUIDED"
31
+ sprint_status:
32
+ whole: "{implementation_artifacts}/sprint-status.yaml"
33
+ load_strategy: "FULL_LOAD"
34
+ output:
35
+ primary: "{test_artifacts}/test-gap-analysis-{date}.md"
36
+ on_error:
37
+ missing_file: "warn_and_continue"
38
+ unresolved_variable: "halt"
@@ -158,6 +158,8 @@ Commands:
158
158
  status Show installation info
159
159
 
160
160
  Options:
161
+ --branch <name> Clone from a specific branch
162
+ --staging Shorthand for --branch staging
161
163
  --yes Skip confirmation prompts
162
164
  --dry-run Show what would be done without making changes
163
165
  --verbose Show detailed progress
@@ -207,6 +209,32 @@ function main(deps) {
207
209
  fail(`Unknown command: ${command}\n Run 'npx gaia-framework --help' for usage.`);
208
210
  }
209
211
 
212
+ // ─── Branch flag parsing (E14-S1) ─────────────────────────────────────────
213
+ // Extract --branch / --staging before building the passthrough array.
214
+ // These flags control which git branch is cloned — they are consumed by the
215
+ // JS CLI and forwarded as --branch <name> to gaia-install.sh.
216
+ let branchValue = null;
217
+ const remaining = args.slice(0);
218
+
219
+ const branchIdx = remaining.indexOf("--branch");
220
+ const hasStaging = remaining.includes("--staging");
221
+
222
+ if (branchIdx >= 0 && hasStaging) {
223
+ fail("Cannot use --branch and --staging together. Use --branch staging instead.");
224
+ }
225
+
226
+ if (branchIdx >= 0) {
227
+ const valueIdx = branchIdx + 1;
228
+ if (valueIdx >= remaining.length || remaining[valueIdx].startsWith("--")) {
229
+ fail("Missing value for --branch flag. Usage: --branch <name>");
230
+ }
231
+ branchValue = remaining[valueIdx];
232
+ remaining.splice(branchIdx, 2);
233
+ } else if (hasStaging) {
234
+ branchValue = "staging";
235
+ remaining.splice(remaining.indexOf("--staging"), 1);
236
+ }
237
+
210
238
  // Ensure git is available
211
239
  ensureGit();
212
240
 
@@ -237,8 +265,9 @@ function main(deps) {
237
265
 
238
266
  info("Cloning GAIA framework from GitHub...");
239
267
 
268
+ const branchCloneFlag = branchValue ? ` --branch ${branchValue}` : "";
240
269
  try {
241
- _exec(`git clone --depth 1 ${REPO_URL} "${tempDir}"`, {
270
+ _exec(`git clone --depth 1${branchCloneFlag} ${REPO_URL} "${tempDir}"`, {
242
271
  stdio: ["ignore", "ignore", "pipe"],
243
272
  });
244
273
  } catch (err) {
@@ -256,10 +285,15 @@ function main(deps) {
256
285
 
257
286
  // Build the shell command: inject --source pointing to the temp clone
258
287
  // so the shell script doesn't need to clone again
259
- const passthrough = args.slice(0);
288
+ const passthrough = remaining.slice(0);
260
289
  // Insert --source right after the command (convert to POSIX for bash on Windows)
261
290
  passthrough.splice(1, 0, "--source", toPosixPath(tempDir));
262
291
 
292
+ // Inject --branch flag for installer passthrough (E14-S1)
293
+ if (branchValue) {
294
+ passthrough.push("--branch", branchValue);
295
+ }
296
+
263
297
  // Locate bash (critical for Windows support)
264
298
  const bashPath = _findBash();
265
299
  if (!bashPath) {
@@ -0,0 +1,41 @@
1
+ /**
2
+ * Derive bump label from PR title conventional commit prefix.
3
+ * Used by .github/workflows/pr-title-label.yml (E14-S11, ADR-025).
4
+ *
5
+ * @param {string} title - PR title
6
+ * @param {string} body - PR body
7
+ * @returns {{ label: string, type: string, breaking: boolean } | null}
8
+ * Returns the derived bump label info, or null if title doesn't match.
9
+ */
10
+
11
+ const TITLE_REGEX = /^(feat|fix|refactor|perf|test|docs|chore|ci|style)(\(.+\))?(!)?: .+$/;
12
+
13
+ const TYPE_TO_LABEL = Object.freeze({
14
+ feat: "bump:minor",
15
+ fix: "bump:patch",
16
+ perf: "bump:patch",
17
+ refactor: "bump:none",
18
+ test: "bump:none",
19
+ docs: "bump:none",
20
+ chore: "bump:none",
21
+ ci: "bump:none",
22
+ style: "bump:none",
23
+ });
24
+
25
+ function deriveBumpLabel(title, body) {
26
+ const match = title.match(TITLE_REGEX);
27
+ if (!match) return null;
28
+
29
+ const type = match[1];
30
+ const bang = match[3] === "!";
31
+ const bodyBreaking = typeof body === "string" && body.includes("BREAKING CHANGE");
32
+ const breaking = bang || bodyBreaking;
33
+
34
+ const label = breaking ? "bump:major" : TYPE_TO_LABEL[type];
35
+
36
+ return { label, type, breaking };
37
+ }
38
+
39
+ module.exports = deriveBumpLabel;
40
+ module.exports.TITLE_REGEX = TITLE_REGEX;
41
+ module.exports.TYPE_TO_LABEL = TYPE_TO_LABEL;
@@ -0,0 +1,38 @@
1
+ /**
2
+ * PR bump label validation for staging merge enforcement.
3
+ * Used by .github/workflows/label-check.yml (E14-S6, ADR-025).
4
+ */
5
+
6
+ const VALID_BUMP_LABELS = Object.freeze(["bump:major", "bump:minor", "bump:patch", "bump:none"]);
7
+
8
+ /**
9
+ * Validate that exactly one bump:* label is present on a PR.
10
+ *
11
+ * @param {string[]} labels - Array of label names from the PR
12
+ * @returns {{ pass: boolean, message: string }} Validation result
13
+ */
14
+ function validateBumpLabels(labels) {
15
+ const bumpLabels = labels.filter((label) => VALID_BUMP_LABELS.includes(label));
16
+
17
+ if (bumpLabels.length === 0) {
18
+ return {
19
+ pass: false,
20
+ message: `No bump label found. Add one of: ${VALID_BUMP_LABELS.join(", ")}`,
21
+ };
22
+ }
23
+
24
+ if (bumpLabels.length > 1) {
25
+ return {
26
+ pass: false,
27
+ message: `Multiple bump labels found: ${bumpLabels.join(", ")}. Exactly one required.`,
28
+ };
29
+ }
30
+
31
+ return {
32
+ pass: true,
33
+ message: `Valid bump label: ${bumpLabels[0]}`,
34
+ };
35
+ }
36
+
37
+ module.exports = validateBumpLabels;
38
+ module.exports.VALID_BUMP_LABELS = VALID_BUMP_LABELS;
package/gaia-install.sh CHANGED
@@ -76,6 +76,7 @@ TARGET=""
76
76
  OPT_YES=false
77
77
  OPT_DRY_RUN=false
78
78
  OPT_VERBOSE=false
79
+ OPT_BRANCH=""
79
80
 
80
81
  # ─── Utility Functions ──────────────────────────────────────────────────────
81
82
 
@@ -95,7 +96,11 @@ clone_from_github() {
95
96
  fi
96
97
  TEMP_CLONE_DIR="$(mktemp -d "${TMPDIR:-/tmp}/gaia-framework-XXXXXX")"
97
98
  info "Cloning GAIA from GitHub..." >&2
98
- if git clone --depth 1 "$GITHUB_REPO" "$TEMP_CLONE_DIR" 2>/dev/null; then
99
+ local branch_args=()
100
+ if [[ -n "$OPT_BRANCH" ]]; then
101
+ branch_args=(--branch "$OPT_BRANCH")
102
+ fi
103
+ if git clone --depth 1 "${branch_args[@]}" "$GITHUB_REPO" "$TEMP_CLONE_DIR" 2>/dev/null; then
99
104
  success "Cloned to temporary directory" >&2
100
105
  else
101
106
  error "Failed to clone from $GITHUB_REPO"
@@ -495,7 +500,7 @@ cmd_init() {
495
500
 
496
501
  # Step 8: Create custom directories (ADR-020: user-owned write targets)
497
502
  step "Creating custom directories..."
498
- for cdir in skills templates; do
503
+ for cdir in skills templates stakeholders; do
499
504
  if [[ "$OPT_DRY_RUN" == true ]]; then
500
505
  detail "[dry-run] Would create: custom/$cdir/"
501
506
  else
@@ -628,6 +633,35 @@ cmd_update() {
628
633
  "_config/manifest.yaml"
629
634
  )
630
635
 
636
+ # ─── Migrate .customize.yaml files before _gaia/ overwrite (E10-S19, FR-153) ──
637
+ # Copy user customize.yaml files from _gaia/_config/agents/ to custom/skills/
638
+ # before the framework overwrite replaces _gaia/ contents with defaults.
639
+ # Copy-only semantics: originals are left in place as fallback (AC2).
640
+ step "Migrating customize.yaml files to custom/skills/..."
641
+ if [[ "$OPT_DRY_RUN" != true ]]; then
642
+ mkdir -p "$TARGET/custom/skills"
643
+ fi
644
+ local migrated=0
645
+ for cust_file in "$TARGET/_gaia/_config/agents/"*.customize.yaml; do
646
+ [[ -f "$cust_file" ]] || continue
647
+ local cust_basename
648
+ cust_basename="$(basename "$cust_file")"
649
+ if [[ -f "$TARGET/custom/skills/$cust_basename" ]]; then
650
+ [[ "$OPT_VERBOSE" == true ]] && detail "Skipped (already exists): custom/skills/$cust_basename"
651
+ continue
652
+ fi
653
+ if [[ "$OPT_DRY_RUN" == true ]]; then
654
+ detail "[dry-run] Would migrate: _gaia/_config/agents/$cust_basename → custom/skills/$cust_basename"
655
+ else
656
+ cp "$cust_file" "$TARGET/custom/skills/$cust_basename"
657
+ info "[migrate] _gaia/_config/agents/$cust_basename → custom/skills/$cust_basename"
658
+ migrated=$((migrated + 1))
659
+ fi
660
+ done
661
+ if [[ "$migrated" -gt 0 ]]; then
662
+ detail "Migrated $migrated customize.yaml file(s) to custom/skills/"
663
+ fi
664
+
631
665
  step "Updating framework files..."
632
666
  local updated=0 skipped=0 changed=0
633
667
 
@@ -690,7 +724,7 @@ cmd_update() {
690
724
  done
691
725
 
692
726
  # Ensure custom directories exist (user-owned, never overwritten — ADR-020)
693
- for cdir in skills templates; do
727
+ for cdir in skills templates stakeholders; do
694
728
  if [[ "$OPT_DRY_RUN" == true ]]; then
695
729
  [[ ! -d "$TARGET/custom/$cdir" ]] && detail "[dry-run] Would create: custom/$cdir/"
696
730
  else
@@ -740,6 +774,29 @@ cmd_update() {
740
774
  fi
741
775
  fi
742
776
 
777
+ # ─── Post-install verification: check skill references in customize.yaml (E10-S19, AC4) ──
778
+ step "Verifying skill references in custom/skills/*.customize.yaml..."
779
+ for cust_file in "$TARGET/custom/skills/"*.customize.yaml; do
780
+ [[ -f "$cust_file" ]] || continue
781
+ local cust_name
782
+ cust_name="$(basename "$cust_file")"
783
+ # Extract source: values from customize.yaml (simple grep — no full YAML parser needed)
784
+ while IFS= read -r source_line; do
785
+ # Strip key prefix, leading whitespace, and surrounding quotes via parameter expansion
786
+ local ref_path="${source_line#*source:}"
787
+ ref_path="${ref_path#"${ref_path%%[![:space:]]*}"}" # trim leading whitespace
788
+ ref_path="${ref_path#[\"\']}" # trim leading quote
789
+ ref_path="${ref_path%[\"\']}" # trim trailing quote
790
+ [[ -z "$ref_path" ]] && continue
791
+ # Resolve relative paths against TARGET
792
+ local full_path="$TARGET/$ref_path"
793
+ [[ "$ref_path" == /* ]] && full_path="$ref_path"
794
+ if [[ ! -f "$full_path" ]]; then
795
+ warn "[warn] Broken skill reference in $cust_name: $ref_path not found"
796
+ fi
797
+ done < <(grep 'source:' "$cust_file" || true)
798
+ done
799
+
743
800
  # Summary
744
801
  echo ""
745
802
  if [[ -d "$backup_dir" ]]; then
@@ -832,9 +889,10 @@ cmd_validate() {
832
889
  [[ -d "$TARGET/docs/$dir" ]]; check "Docs: $dir" $?
833
890
  done
834
891
 
835
- # Custom directories (ADR-020: user-owned write targets)
892
+ # Custom directories (ADR-020: user-owned write targets, ADR-026: stakeholder agents)
836
893
  [[ -d "$TARGET/custom/skills" ]]; check "custom/skills/ exists" $?
837
894
  [[ -d "$TARGET/custom/templates" ]]; check "custom/templates/ exists" $?
895
+ [[ -d "$TARGET/custom/stakeholders" ]]; check "custom/stakeholders/ exists" $?
838
896
 
839
897
  # Version
840
898
  local version
@@ -938,6 +996,7 @@ ${BOLD}Commands:${RESET}
938
996
 
939
997
  ${BOLD}Options:${RESET}
940
998
  --source <path> Local GAIA source (or clones from GitHub if omitted)
999
+ --branch <name> Clone from a specific branch
941
1000
  --yes Skip confirmation prompts
942
1001
  --dry-run Show what would be done without making changes
943
1002
  --verbose Show detailed progress
@@ -1011,6 +1070,14 @@ parse_args() {
1011
1070
  OPT_VERBOSE=true
1012
1071
  shift
1013
1072
  ;;
1073
+ --branch)
1074
+ if [[ -z "${2:-}" ]]; then
1075
+ error "--branch requires a branch name argument"
1076
+ exit 1
1077
+ fi
1078
+ OPT_BRANCH="$2"
1079
+ shift 2
1080
+ ;;
1014
1081
  --help|-h)
1015
1082
  usage
1016
1083
  exit 0
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "gaia-framework",
3
- "version": "1.66.0",
3
+ "version": "1.87.0",
4
4
  "description": "GAIA — Generative Agile Intelligence Architecture installer",
5
5
  "bin": {
6
6
  "gaia-framework": "./bin/gaia-framework.js"
File without changes
@@ -1,6 +0,0 @@
1
- test_name: "checkpoint-resume-reliability"
2
- date: "2026-03-24"
3
- result: "pass"
4
- observations: "All 10 test scenarios passed. Checkpoint validation (schema, checksums, legacy format), resume state reconstruction (happy path, error cases), and file modification detection (modified, deleted) all verified. 45 total tests: 31 unit + 14 Tier 2."
5
- runner: "Cleo (typescript-dev)"
6
- framework_version: "1.48.3"
@@ -1,14 +0,0 @@
1
- test_name: engine-scenarios
2
- date: "2026-03-22T00:00:00Z"
3
- result: pass
4
- observations: |
5
- All 6 ACs validated via 18 structural tests:
6
- - AC1: Step ordering verified in workflow.xml and dev-story instructions.xml (4 tests)
7
- - AC2: Checkpoint YAML schema validated with all required fields (3 tests)
8
- - AC3: Quality gate structure validated across all workflow configs (3 tests)
9
- - AC4: Variable resolution verified — all vars in known set, engine requires global.yaml (3 tests)
10
- - AC5: Execution mode switching definitions validated in workflow.xml (3 tests)
11
- - AC6: tier2-results directory exists, result YAML schema validated (2 tests)
12
- Zero regressions in existing Tier 1 tests (3486 passing).
13
- runner: vitest
14
- framework_version: "1.45.0"