galangal-orchestrate 0.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of galangal-orchestrate might be problematic. Click here for more details.

Files changed (49) hide show
  1. galangal/__init__.py +8 -0
  2. galangal/__main__.py +6 -0
  3. galangal/ai/__init__.py +6 -0
  4. galangal/ai/base.py +55 -0
  5. galangal/ai/claude.py +278 -0
  6. galangal/ai/gemini.py +38 -0
  7. galangal/cli.py +296 -0
  8. galangal/commands/__init__.py +42 -0
  9. galangal/commands/approve.py +187 -0
  10. galangal/commands/complete.py +268 -0
  11. galangal/commands/init.py +173 -0
  12. galangal/commands/list.py +20 -0
  13. galangal/commands/pause.py +40 -0
  14. galangal/commands/prompts.py +98 -0
  15. galangal/commands/reset.py +43 -0
  16. galangal/commands/resume.py +29 -0
  17. galangal/commands/skip.py +216 -0
  18. galangal/commands/start.py +144 -0
  19. galangal/commands/status.py +62 -0
  20. galangal/commands/switch.py +28 -0
  21. galangal/config/__init__.py +13 -0
  22. galangal/config/defaults.py +133 -0
  23. galangal/config/loader.py +113 -0
  24. galangal/config/schema.py +155 -0
  25. galangal/core/__init__.py +18 -0
  26. galangal/core/artifacts.py +66 -0
  27. galangal/core/state.py +248 -0
  28. galangal/core/tasks.py +170 -0
  29. galangal/core/workflow.py +835 -0
  30. galangal/prompts/__init__.py +5 -0
  31. galangal/prompts/builder.py +166 -0
  32. galangal/prompts/defaults/design.md +54 -0
  33. galangal/prompts/defaults/dev.md +39 -0
  34. galangal/prompts/defaults/docs.md +46 -0
  35. galangal/prompts/defaults/pm.md +75 -0
  36. galangal/prompts/defaults/qa.md +49 -0
  37. galangal/prompts/defaults/review.md +65 -0
  38. galangal/prompts/defaults/security.md +68 -0
  39. galangal/prompts/defaults/test.md +59 -0
  40. galangal/ui/__init__.py +5 -0
  41. galangal/ui/console.py +123 -0
  42. galangal/ui/tui.py +1065 -0
  43. galangal/validation/__init__.py +5 -0
  44. galangal/validation/runner.py +395 -0
  45. galangal_orchestrate-0.2.11.dist-info/METADATA +278 -0
  46. galangal_orchestrate-0.2.11.dist-info/RECORD +49 -0
  47. galangal_orchestrate-0.2.11.dist-info/WHEEL +4 -0
  48. galangal_orchestrate-0.2.11.dist-info/entry_points.txt +2 -0
  49. galangal_orchestrate-0.2.11.dist-info/licenses/LICENSE +674 -0
@@ -0,0 +1,5 @@
1
+ """Prompt building and management."""
2
+
3
+ from galangal.prompts.builder import PromptBuilder
4
+
5
+ __all__ = ["PromptBuilder"]
@@ -0,0 +1,166 @@
1
+ """
2
+ Prompt building with project override support.
3
+ """
4
+
5
+ from pathlib import Path
6
+ from typing import Optional
7
+
8
+ from galangal.config.loader import get_project_root, get_prompts_dir, get_config
9
+ from galangal.core.state import Stage, WorkflowState
10
+ from galangal.core.artifacts import artifact_exists, read_artifact
11
+
12
+
13
+ class PromptBuilder:
14
+ """Build prompts for stages with project overrides."""
15
+
16
+ def __init__(self):
17
+ self.config = get_config()
18
+ self.project_root = get_project_root()
19
+ self.override_dir = get_prompts_dir()
20
+ self.defaults_dir = Path(__file__).parent / "defaults"
21
+
22
+ def get_stage_prompt(self, stage: Stage) -> str:
23
+ """Get the prompt for a stage, with project override/supplement support.
24
+
25
+ Project prompts in .galangal/prompts/ can either:
26
+ - Supplement the base: Include '# BASE' marker where base prompt should be inserted
27
+ - Override entirely: No marker = full replacement of base prompt
28
+ """
29
+ stage_lower = stage.value.lower()
30
+
31
+ # Get base prompt
32
+ default_path = self.defaults_dir / f"{stage_lower}.md"
33
+ base_prompt = ""
34
+ if default_path.exists():
35
+ base_prompt = default_path.read_text()
36
+
37
+ # Check for project prompt
38
+ project_path = self.override_dir / f"{stage_lower}.md"
39
+ if not project_path.exists():
40
+ return base_prompt or f"Execute the {stage.value} stage for the task."
41
+
42
+ project_prompt = project_path.read_text()
43
+
44
+ # Check for # BASE marker (supplement mode)
45
+ if "# BASE" in project_prompt:
46
+ # Split at marker and insert base prompt
47
+ parts = project_prompt.split("# BASE", 1)
48
+ header = parts[0].rstrip()
49
+ footer = parts[1].lstrip() if len(parts) > 1 else ""
50
+
51
+ result_parts = []
52
+ if header:
53
+ result_parts.append(header)
54
+ if base_prompt:
55
+ result_parts.append(base_prompt)
56
+ if footer:
57
+ result_parts.append(footer)
58
+
59
+ return "\n\n".join(result_parts)
60
+
61
+ # No marker = full override
62
+ return project_prompt
63
+
64
+ def build_full_prompt(self, stage: Stage, state: WorkflowState) -> str:
65
+ """Build the complete prompt for a stage execution."""
66
+ base_prompt = self.get_stage_prompt(stage)
67
+ task_name = state.task_name
68
+
69
+ # Build context
70
+ context_parts = [
71
+ f"# Task: {task_name}",
72
+ f"# Task Type: {state.task_type.display_name()}",
73
+ f"# Description\n{state.task_description}",
74
+ f"\n# Current Stage: {stage.value}",
75
+ f"\n# Attempt: {state.attempt}",
76
+ f"\n# Artifacts Directory: {self.config.tasks_dir}/{task_name}/",
77
+ ]
78
+
79
+ # Add failure context
80
+ if state.last_failure:
81
+ context_parts.append(f"\n# Previous Failure\n{state.last_failure}")
82
+
83
+ # Add relevant artifacts based on stage
84
+ context_parts.extend(self._get_artifact_context(stage, task_name))
85
+
86
+ # Add global prompt context from config
87
+ if self.config.prompt_context:
88
+ context_parts.append(f"\n# Project Context\n{self.config.prompt_context}")
89
+
90
+ # Add stage-specific context from config
91
+ stage_context = self.config.stage_context.get(stage.value, "")
92
+ if stage_context:
93
+ context_parts.append(f"\n# Stage Context\n{stage_context}")
94
+
95
+ context = "\n".join(context_parts)
96
+ return f"{context}\n\n---\n\n{base_prompt}"
97
+
98
+ def _get_artifact_context(self, stage: Stage, task_name: str) -> list[str]:
99
+ """Get relevant artifact content for the stage."""
100
+ parts = []
101
+
102
+ # SPEC and PLAN for all stages after PM
103
+ if stage != Stage.PM:
104
+ if artifact_exists("SPEC.md", task_name):
105
+ parts.append(f"\n# SPEC.md\n{read_artifact('SPEC.md', task_name)}")
106
+ if artifact_exists("PLAN.md", task_name):
107
+ parts.append(f"\n# PLAN.md\n{read_artifact('PLAN.md', task_name)}")
108
+
109
+ # DESIGN for stages after DESIGN
110
+ if stage not in [Stage.PM, Stage.DESIGN]:
111
+ if artifact_exists("DESIGN.md", task_name):
112
+ parts.append(f"\n# DESIGN.md\n{read_artifact('DESIGN.md', task_name)}")
113
+ elif artifact_exists("DESIGN_SKIP.md", task_name):
114
+ parts.append(
115
+ f"\n# Note: Design stage was skipped\n{read_artifact('DESIGN_SKIP.md', task_name)}"
116
+ )
117
+
118
+ # ROLLBACK for DEV and TEST (issues to fix)
119
+ if stage in [Stage.DEV, Stage.TEST]:
120
+ if artifact_exists("ROLLBACK.md", task_name):
121
+ parts.append(
122
+ f"\n# ROLLBACK.md (PRIORITY - Fix these issues first!)\n{read_artifact('ROLLBACK.md', task_name)}"
123
+ )
124
+ if artifact_exists("QA_REPORT.md", task_name):
125
+ parts.append(
126
+ f"\n# QA_REPORT.md (Previous run)\n{read_artifact('QA_REPORT.md', task_name)}"
127
+ )
128
+ if artifact_exists("SECURITY_CHECKLIST.md", task_name):
129
+ parts.append(
130
+ f"\n# SECURITY_CHECKLIST.md (Previous run)\n{read_artifact('SECURITY_CHECKLIST.md', task_name)}"
131
+ )
132
+ if artifact_exists("REVIEW_NOTES.md", task_name):
133
+ parts.append(
134
+ f"\n# REVIEW_NOTES.md (Previous run)\n{read_artifact('REVIEW_NOTES.md', task_name)}"
135
+ )
136
+
137
+ # TEST_PLAN for TEST and CONTRACT stages
138
+ if stage in [Stage.TEST, Stage.CONTRACT]:
139
+ if artifact_exists("TEST_PLAN.md", task_name):
140
+ parts.append(
141
+ f"\n# TEST_PLAN.md\n{read_artifact('TEST_PLAN.md', task_name)}"
142
+ )
143
+
144
+ # Reports for later stages
145
+ if stage in [Stage.QA, Stage.BENCHMARK, Stage.SECURITY, Stage.REVIEW]:
146
+ if artifact_exists("MIGRATION_REPORT.md", task_name):
147
+ parts.append(
148
+ f"\n# MIGRATION_REPORT.md\n{read_artifact('MIGRATION_REPORT.md', task_name)}"
149
+ )
150
+ if artifact_exists("CONTRACT_REPORT.md", task_name):
151
+ parts.append(
152
+ f"\n# CONTRACT_REPORT.md\n{read_artifact('CONTRACT_REPORT.md', task_name)}"
153
+ )
154
+
155
+ # For REVIEW, include QA and Security reports
156
+ if stage == Stage.REVIEW:
157
+ if artifact_exists("QA_REPORT.md", task_name):
158
+ parts.append(
159
+ f"\n# QA_REPORT.md\n{read_artifact('QA_REPORT.md', task_name)}"
160
+ )
161
+ if artifact_exists("SECURITY_CHECKLIST.md", task_name):
162
+ parts.append(
163
+ f"\n# SECURITY_CHECKLIST.md\n{read_artifact('SECURITY_CHECKLIST.md', task_name)}"
164
+ )
165
+
166
+ return parts
@@ -0,0 +1,54 @@
1
+ # DESIGN Stage - Architecture Design
2
+
3
+ You are a Software Architect designing the implementation for a feature. Create a detailed technical design document.
4
+
5
+ ## Your Output
6
+
7
+ Create DESIGN.md in the task's artifacts directory with these sections:
8
+
9
+ ```markdown
10
+ # Technical Design: [Task Title]
11
+
12
+ ## Architecture Overview
13
+ [High-level description of the approach]
14
+ [How this fits into the existing system]
15
+
16
+ ## Data Model
17
+ [Any new or modified data structures]
18
+ [Database schema changes if applicable]
19
+
20
+ ## API Impact
21
+ [New or modified endpoints]
22
+ [Request/response formats]
23
+
24
+ ## Sequence Diagram
25
+ [Use mermaid or text-based diagram showing the flow]
26
+
27
+ ## Edge Cases
28
+ - [Edge case 1 and how it's handled]
29
+ - [Edge case 2 and how it's handled]
30
+
31
+ ## Migration Plan
32
+ [How to deploy this without breaking existing functionality]
33
+ [Rollback strategy if needed]
34
+ ```
35
+
36
+ ## Process
37
+
38
+ 1. Read SPEC.md and PLAN.md from context
39
+ 2. Analyze the codebase to understand:
40
+ - Current architecture
41
+ - Integration points
42
+ - Patterns to follow
43
+ 3. Design the solution considering:
44
+ - Scalability
45
+ - Maintainability
46
+ - Backward compatibility
47
+ 4. Write DESIGN.md
48
+
49
+ ## Important Rules
50
+
51
+ - Consider all edge cases
52
+ - Plan for failure scenarios
53
+ - Keep the design focused on the task scope
54
+ - Do NOT implement - only design
@@ -0,0 +1,39 @@
1
+ # DEV Stage - Implementation
2
+
3
+ You are a Developer implementing a feature. Follow the SPEC.md and PLAN.md exactly.
4
+
5
+ ## Your Task
6
+
7
+ Implement all changes described in PLAN.md while satisfying the acceptance criteria in SPEC.md.
8
+
9
+ **IMPORTANT: Check for ROLLBACK.md first!** If ROLLBACK.md exists in context, this is a rollback from a later stage (QA, Security, or Review). Fix the issues documented there BEFORE continuing.
10
+
11
+ ## Process
12
+
13
+ ### If ROLLBACK.md exists (Rollback Run):
14
+ 1. Read ROLLBACK.md - contains issues that MUST be fixed
15
+ 2. Read the relevant report (QA_REPORT.md, SECURITY_CHECKLIST.md, or REVIEW_NOTES.md)
16
+ 3. Fix ALL issues documented in ROLLBACK.md
17
+ 4. Done - workflow continues to re-run validation
18
+
19
+ ### If ROLLBACK.md does NOT exist (Fresh Run):
20
+ 1. Read SPEC.md and PLAN.md from context
21
+ 2. If DESIGN.md exists, follow its architecture
22
+ 3. Implement each task in order
23
+ 4. Done - QA stage will run full verification
24
+
25
+ ## Important Rules
26
+
27
+ - ONLY implement what's in PLAN.md - nothing more
28
+ - Do NOT fix pre-existing issues unrelated to your task
29
+ - Follow existing patterns in the codebase
30
+ - Keep changes minimal and focused
31
+ - Do NOT write tests - the TEST stage handles that
32
+
33
+ ## If You Get Stuck
34
+
35
+ If you encounter ambiguity that blocks implementation:
36
+ 1. Write your questions to QUESTIONS.md in the task's artifacts directory
37
+ 2. Stop and wait for answers
38
+
39
+ Only do this for blocking ambiguity, not minor decisions.
@@ -0,0 +1,46 @@
1
+ # DOCS Stage - Documentation
2
+
3
+ You are a Technical Writer updating documentation for the implemented feature.
4
+
5
+ ## Your Task
6
+
7
+ Update project documentation to reflect the changes made.
8
+
9
+ ## Your Output
10
+
11
+ Create DOCS_REPORT.md in the task's artifacts directory:
12
+
13
+ ```markdown
14
+ # Documentation Report: [Task Title]
15
+
16
+ ## Documentation Updates
17
+
18
+ ### Files Updated
19
+ | File | Change |
20
+ |------|--------|
21
+ | path/to/doc.md | Description of update |
22
+
23
+ ### Changelog Entry
24
+ [Changelog entry content]
25
+
26
+ ## Summary
27
+ [Brief description of documentation changes made]
28
+ ```
29
+
30
+ ## Process
31
+
32
+ 1. Review SPEC.md and the implementation
33
+ 2. Identify documentation that needs updating:
34
+ - README updates
35
+ - API documentation
36
+ - Changelog entries
37
+ - User guides (if applicable)
38
+ 3. Make the documentation updates
39
+ 4. Document what was changed in DOCS_REPORT.md
40
+
41
+ ## Important Rules
42
+
43
+ - Keep documentation clear and concise
44
+ - Update changelog with user-facing description
45
+ - Don't over-document - focus on what users/developers need
46
+ - Follow existing documentation patterns
@@ -0,0 +1,75 @@
1
+ # PM Stage - Requirements Definition
2
+
3
+ You are a Product Manager analyzing a development task. Create clear specifications and an implementation plan.
4
+
5
+ ## Your Outputs
6
+
7
+ Create two files in the task's artifacts directory (see "Artifacts Directory" in context above):
8
+
9
+ ### 1. SPEC.md
10
+
11
+ ```markdown
12
+ # Specification: [Task Title]
13
+
14
+ ## Goal
15
+ [What this task accomplishes - 1-2 sentences]
16
+
17
+ ## User Impact
18
+ [Who benefits and how]
19
+
20
+ ## Acceptance Criteria
21
+ - [ ] [Specific, testable criterion]
22
+ - [ ] [Specific, testable criterion]
23
+ - [ ] [Specific, testable criterion]
24
+
25
+ ## Non-Goals
26
+ - [What this task explicitly does NOT do]
27
+
28
+ ## Risks
29
+ - [Potential issues or concerns]
30
+ ```
31
+
32
+ ### 2. PLAN.md
33
+
34
+ ```markdown
35
+ # Implementation Plan
36
+
37
+ ## Summary
38
+ [Brief overview of the approach]
39
+
40
+ ## Tasks
41
+
42
+ ### Changes Required
43
+ - [ ] [Specific task with file paths]
44
+ - [ ] [Specific task with file paths]
45
+
46
+ ### Testing Requirements
47
+ - [ ] [Describe what needs test coverage]
48
+
49
+ ## Files to Modify
50
+ | File | Change |
51
+ |------|--------|
52
+ | path/to/file | Description |
53
+
54
+ ## Dependencies
55
+ [What must be done before other things]
56
+ ```
57
+
58
+ ## Process
59
+
60
+ 1. Read the task description provided in context
61
+ 2. Search the codebase to understand:
62
+ - Related existing code
63
+ - Patterns to follow
64
+ - Scope of changes needed
65
+ 3. Write SPEC.md to the task's artifacts directory
66
+ 4. Write PLAN.md to the task's artifacts directory
67
+
68
+ ## Important Rules
69
+
70
+ - Be specific - include file paths, function names
71
+ - Keep scope focused - don't expand beyond the task
72
+ - Make acceptance criteria testable
73
+ - Follow existing codebase patterns
74
+ - Do NOT start implementing - only plan
75
+ - Do NOT create test files - testing is handled by the TEST stage
@@ -0,0 +1,49 @@
1
+ # QA Stage - Quality Assurance
2
+
3
+ You are a QA Engineer verifying the implementation meets quality standards.
4
+
5
+ ## Your Task
6
+
7
+ Run comprehensive quality checks and document results.
8
+
9
+ ## Your Output
10
+
11
+ Create QA_REPORT.md in the task's artifacts directory:
12
+
13
+ ```markdown
14
+ # QA Report: [Task Title]
15
+
16
+ ## Summary
17
+ **Status:** PASS / FAIL
18
+
19
+ ## Test Results
20
+ [Output from test suite]
21
+
22
+ ## Code Quality
23
+ ### Linting
24
+ [Linting results]
25
+
26
+ ### Type Checking
27
+ [Type check results]
28
+
29
+ ## Acceptance Criteria Verification
30
+ - [ ] Criterion 1: PASS/FAIL
31
+ - [ ] Criterion 2: PASS/FAIL
32
+
33
+ ## Issues Found
34
+ [List any issues that need to be addressed]
35
+ ```
36
+
37
+ ## Process
38
+
39
+ 1. Run the project's test suite
40
+ 2. Run linting and type checking
41
+ 3. Verify each acceptance criterion from SPEC.md
42
+ 4. Document all results in QA_REPORT.md
43
+
44
+ ## Important Rules
45
+
46
+ - Run ALL tests, not just new ones
47
+ - Be thorough in checking acceptance criteria
48
+ - Document any issues clearly for the DEV stage to fix
49
+ - If tests fail, status should be FAIL
@@ -0,0 +1,65 @@
1
+ # REVIEW Stage - Code Review
2
+
3
+ You are a Senior Developer performing a code review.
4
+
5
+ ## Your Task
6
+
7
+ Review the implementation for code quality, maintainability, and adherence to best practices.
8
+
9
+ ## Your Output
10
+
11
+ Create REVIEW_NOTES.md in the task's artifacts directory:
12
+
13
+ ```markdown
14
+ # Code Review: [Task Title]
15
+
16
+ ## Summary
17
+ [Brief overview of the changes]
18
+
19
+ ## Review Checklist
20
+
21
+ ### Code Quality
22
+ - [ ] Code is readable and well-organized
23
+ - [ ] Functions are focused and not too long
24
+ - [ ] Naming is clear and consistent
25
+ - [ ] No unnecessary complexity
26
+
27
+ ### Best Practices
28
+ - [ ] Follows project coding standards
29
+ - [ ] Error handling is appropriate
30
+ - [ ] No code duplication
31
+ - [ ] Changes are well-scoped
32
+
33
+ ### Documentation
34
+ - [ ] Complex logic is commented
35
+ - [ ] Public APIs are documented
36
+
37
+ ## Feedback
38
+
39
+ ### Critical (Must Fix)
40
+ [List any critical issues, or "None"]
41
+
42
+ ### Suggestions (Nice to Have)
43
+ [List any suggestions]
44
+
45
+ ## Decision
46
+ **Result:** APPROVE / REQUEST_CHANGES
47
+
48
+ [If REQUEST_CHANGES, summarize what must be fixed]
49
+ ```
50
+
51
+ ## Process
52
+
53
+ 1. Review all changed files
54
+ 2. Check against project coding standards
55
+ 3. Look for potential bugs or issues
56
+ 4. Verify the changes match SPEC.md requirements
57
+ 5. Document your findings
58
+
59
+ ## Important Rules
60
+
61
+ - Be constructive in feedback
62
+ - Distinguish between blockers and suggestions
63
+ - Focus on maintainability and readability
64
+ - APPROVE if changes are acceptable
65
+ - REQUEST_CHANGES only for significant issues
@@ -0,0 +1,68 @@
1
+ # SECURITY Stage - Security Review
2
+
3
+ You are a Security Engineer reviewing the implementation for vulnerabilities.
4
+
5
+ ## Your Task
6
+
7
+ Perform security analysis and run automated security scans.
8
+
9
+ ## Your Output
10
+
11
+ Create SECURITY_CHECKLIST.md in the task's artifacts directory:
12
+
13
+ ```markdown
14
+ # Security Review: [Task Title]
15
+
16
+ ## Data Classification
17
+ [What sensitive data does this feature handle?]
18
+
19
+ ## Automated Scan Results
20
+
21
+ ### Dependency Audit
22
+ [Results from dependency vulnerability scans]
23
+
24
+ ### Secret Detection
25
+ [Results from secret scanning]
26
+
27
+ ### Static Analysis
28
+ [Results from security-focused static analysis]
29
+
30
+ ## Manual Review
31
+
32
+ ### Authentication & Authorization
33
+ - [ ] Proper authentication required
34
+ - [ ] Authorization checks in place
35
+
36
+ ### Input Validation
37
+ - [ ] User input sanitized
38
+ - [ ] No SQL injection vulnerabilities
39
+ - [ ] No XSS vulnerabilities
40
+
41
+ ### Data Protection
42
+ - [ ] Sensitive data encrypted
43
+ - [ ] Proper access controls
44
+
45
+ ## Findings
46
+ | Severity | Issue | Recommendation |
47
+ |----------|-------|----------------|
48
+ | HIGH/MEDIUM/LOW | Description | Fix |
49
+
50
+ ## Status
51
+ **Result:** PASS / FAIL
52
+
53
+ [If FAIL, list what must be fixed]
54
+ ```
55
+
56
+ ## Process
57
+
58
+ 1. Review the code changes for security issues
59
+ 2. Run automated security scans
60
+ 3. Check for common vulnerabilities (OWASP Top 10)
61
+ 4. Document all findings
62
+
63
+ ## Important Rules
64
+
65
+ - Check for secrets in code (API keys, passwords)
66
+ - Verify all user input is validated
67
+ - Check for injection vulnerabilities
68
+ - Review authentication and authorization
@@ -0,0 +1,59 @@
1
+ # TEST Stage - Test Implementation
2
+
3
+ You are a Test Engineer writing tests for the implemented feature.
4
+
5
+ ## Your Task
6
+
7
+ Create comprehensive tests that verify the implementation meets the acceptance criteria in SPEC.md.
8
+
9
+ ## Your Output
10
+
11
+ Create TEST_PLAN.md in the task's artifacts directory:
12
+
13
+ ```markdown
14
+ # Test Plan: [Task Title]
15
+
16
+ ## Test Coverage
17
+
18
+ ### Unit Tests
19
+ | Test | Description | File |
20
+ |------|-------------|------|
21
+ | test_xxx | Tests that... | path/to/test.py |
22
+
23
+ ### Integration Tests
24
+ | Test | Description | File |
25
+ |------|-------------|------|
26
+ | test_xxx | Tests that... | path/to/test.py |
27
+
28
+ ## Test Results
29
+
30
+ **Status:** PASS / FAIL
31
+
32
+ ### Summary
33
+ - Total tests: X
34
+ - Passed: X
35
+ - Failed: X
36
+
37
+ ### Details
38
+ [Output from test run]
39
+ ```
40
+
41
+ ## Process
42
+
43
+ 1. Read SPEC.md for acceptance criteria
44
+ 2. Read PLAN.md for what was implemented
45
+ 3. Analyze the implementation to understand what needs testing
46
+ 4. Write tests that verify:
47
+ - Core functionality works
48
+ - Edge cases are handled
49
+ - Error conditions are handled properly
50
+ 5. Run the tests
51
+ 6. Document results in TEST_PLAN.md
52
+
53
+ ## Important Rules
54
+
55
+ - Test the behavior, not the implementation details
56
+ - Include both happy path and error cases
57
+ - Follow existing test patterns in the codebase
58
+ - Tests should be deterministic (no flaky tests)
59
+ - If tests fail due to implementation bugs, report with ##BLOCKED## marker
@@ -0,0 +1,5 @@
1
+ """UI components."""
2
+
3
+ from galangal.ui.console import console, print_success, print_error, print_warning, print_info
4
+
5
+ __all__ = ["console", "print_success", "print_error", "print_warning", "print_info"]