vox-core 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. vox_core-0.2.0/.ai/COMMANDS/_shared/README.md +13 -0
  2. vox_core-0.2.0/.ai/COMMANDS/_shared/review-checklist.md +23 -0
  3. vox_core-0.2.0/.ai/COMMANDS/_shared/story-writing.md +20 -0
  4. vox_core-0.2.0/.ai/COMMANDS/check_tests.md +190 -0
  5. vox_core-0.2.0/.ai/COMMANDS/commit.md +113 -0
  6. vox_core-0.2.0/.ai/COMMANDS/create-prd.md +158 -0
  7. vox_core-0.2.0/.ai/COMMANDS/create-rules.md +155 -0
  8. vox_core-0.2.0/.ai/COMMANDS/execute.md +139 -0
  9. vox_core-0.2.0/.ai/COMMANDS/handoff.md +69 -0
  10. vox_core-0.2.0/.ai/COMMANDS/library-skill.md +77 -0
  11. vox_core-0.2.0/.ai/COMMANDS/phase-intent-check.md +60 -0
  12. vox_core-0.2.0/.ai/COMMANDS/plan.md +510 -0
  13. vox_core-0.2.0/.ai/COMMANDS/pr.md +98 -0
  14. vox_core-0.2.0/.ai/COMMANDS/prime.md +83 -0
  15. vox_core-0.2.0/.ai/COMMANDS/push.md +54 -0
  16. vox_core-0.2.0/.ai/COMMANDS/release-notes.md +83 -0
  17. vox_core-0.2.0/.ai/COMMANDS/retro.md +116 -0
  18. vox_core-0.2.0/.ai/COMMANDS/review.md +53 -0
  19. vox_core-0.2.0/.ai/COMMANDS/status-sync.md +74 -0
  20. vox_core-0.2.0/.ai/COMMANDS/tech-debt.md +73 -0
  21. vox_core-0.2.0/.ai/COMMANDS/validate.md +121 -0
  22. vox_core-0.2.0/.ai/HUMAN_RUNBOOK.md +77 -0
  23. vox_core-0.2.0/.ai/PLANS/001-push-to-talk.md +559 -0
  24. vox_core-0.2.0/.ai/PLANS/002-tray-packaging-ux.md +478 -0
  25. vox_core-0.2.0/.ai/PLANS/README.md +7 -0
  26. vox_core-0.2.0/.ai/REF/README.md +18 -0
  27. vox_core-0.2.0/.ai/REF/just-targets.md +33 -0
  28. vox_core-0.2.0/.ai/REF/plan-authoring.md +39 -0
  29. vox_core-0.2.0/.ai/REF/project-types/cli-tool.md +31 -0
  30. vox_core-0.2.0/.ai/REF/status-surfaces.md +42 -0
  31. vox_core-0.2.0/.ai/REF/testing-and-gates.md +35 -0
  32. vox_core-0.2.0/.ai/RULES.md +123 -0
  33. vox_core-0.2.0/.ai/SPECS/001-push-to-talk/PRD.md +420 -0
  34. vox_core-0.2.0/.github/dependabot.yml +18 -0
  35. vox_core-0.2.0/.github/pull_request_template.md +232 -0
  36. vox_core-0.2.0/.github/workflows/build-release-assets.yml +102 -0
  37. vox_core-0.2.0/.github/workflows/ci.yml +53 -0
  38. vox_core-0.2.0/.github/workflows/pr_template_check.yml +104 -0
  39. vox_core-0.2.0/.github/workflows/publish-pypi.yml +54 -0
  40. vox_core-0.2.0/.github/workflows/release-please.yml +25 -0
  41. vox_core-0.2.0/.gitignore +210 -0
  42. vox_core-0.2.0/.python-version +1 -0
  43. vox_core-0.2.0/.release-please-manifest.json +3 -0
  44. vox_core-0.2.0/AGENTS.md +7 -0
  45. vox_core-0.2.0/CHANGELOG.md +14 -0
  46. vox_core-0.2.0/LICENSE +21 -0
  47. vox_core-0.2.0/PKG-INFO +116 -0
  48. vox_core-0.2.0/README.md +85 -0
  49. vox_core-0.2.0/docs/dev/status.md +27 -0
  50. vox_core-0.2.0/docs/ideas/2026-03-17_13-49-14__Branch-Push-to-talk-transcription-in-Python__chat.json +28 -0
  51. vox_core-0.2.0/justfile +113 -0
  52. vox_core-0.2.0/media/vox_icon.png +0 -0
  53. vox_core-0.2.0/pyproject.toml +189 -0
  54. vox_core-0.2.0/release-please-config.json +7 -0
  55. vox_core-0.2.0/scripts/.gitkeep +0 -0
  56. vox_core-0.2.0/src/vox/__init__.py +3 -0
  57. vox_core-0.2.0/src/vox/capture/__init__.py +10 -0
  58. vox_core-0.2.0/src/vox/capture/stream.py +141 -0
  59. vox_core-0.2.0/src/vox/cli.py +134 -0
  60. vox_core-0.2.0/src/vox/commands.py +150 -0
  61. vox_core-0.2.0/src/vox/config.py +454 -0
  62. vox_core-0.2.0/src/vox/gui/__init__.py +11 -0
  63. vox_core-0.2.0/src/vox/gui/stop_window.py +120 -0
  64. vox_core-0.2.0/src/vox/gui/tray.py +90 -0
  65. vox_core-0.2.0/src/vox/gui/vox_icon.png +0 -0
  66. vox_core-0.2.0/src/vox/hotkey/__init__.py +5 -0
  67. vox_core-0.2.0/src/vox/hotkey/register.py +288 -0
  68. vox_core-0.2.0/src/vox/inject/__init__.py +6 -0
  69. vox_core-0.2.0/src/vox/inject/clipboard.py +34 -0
  70. vox_core-0.2.0/src/vox/inject/keystroke.py +50 -0
  71. vox_core-0.2.0/src/vox/transcribe/__init__.py +6 -0
  72. vox_core-0.2.0/src/vox/transcribe/exceptions.py +7 -0
  73. vox_core-0.2.0/src/vox/transcribe/faster_whisper_backend.py +101 -0
  74. vox_core-0.2.0/tests/__init__.py +1 -0
  75. vox_core-0.2.0/tests/conftest.py +34 -0
  76. vox_core-0.2.0/tests/integration/__init__.py +1 -0
  77. vox_core-0.2.0/tests/integration/test_capture_transcribe.py +28 -0
  78. vox_core-0.2.0/tests/integration/test_capture_transcribe_inject.py +22 -0
  79. vox_core-0.2.0/tests/unit/__init__.py +1 -0
  80. vox_core-0.2.0/tests/unit/test_capture.py +188 -0
  81. vox_core-0.2.0/tests/unit/test_cli.py +219 -0
  82. vox_core-0.2.0/tests/unit/test_commands.py +367 -0
  83. vox_core-0.2.0/tests/unit/test_config.py +355 -0
  84. vox_core-0.2.0/tests/unit/test_hotkey.py +151 -0
  85. vox_core-0.2.0/tests/unit/test_inject.py +127 -0
  86. vox_core-0.2.0/tests/unit/test_transcribe.py +143 -0
  87. vox_core-0.2.0/uv.lock +2173 -0
  88. vox_core-0.2.0/vox.toml.example +29 -0
@@ -0,0 +1,13 @@
1
+ # Shared Command Blocks
2
+
3
+ This folder contains reusable guidance blocks referenced by runnable commands in `.ai/COMMANDS/`.
4
+
5
+ These files are not primary entry commands. Use them as referenced from:
6
+ - `commit.md`
7
+ - `push.md`
8
+ - `review.md`
9
+ - `pr.md`
10
+
11
+ Purpose:
12
+ - avoid duplicating checklists and quality bars across commands
13
+ - keep commit/PR/review expectations consistent
@@ -0,0 +1,23 @@
1
+ # Self-Review Checklist (Reusable)
2
+
3
+ Use this checklist before commit and PR.
4
+
5
+ ## Bugs / Correctness
6
+ - Does behavior match plan/spec exactly?
7
+ - Any obvious runtime errors, edge-case failures, or invalid assumptions?
8
+
9
+ ## Regressions
10
+ - Could this break existing workflows, APIs, CLI contracts, or data contracts?
11
+ - Were impacted areas re-tested?
12
+
13
+ ## Tests
14
+ - Are new/changed behaviors covered by tests?
15
+ - Are missing tests explicitly documented as risk?
16
+
17
+ ## Risks
18
+ - Security, data loss, migration, compatibility, performance, operational risk noted?
19
+ - Are rollback/remediation paths clear?
20
+
21
+ ## Evidence
22
+ - Validation commands and outcomes are captured.
23
+ - User-visible outputs are proven with direct verification commands.
@@ -0,0 +1,20 @@
1
+ # Story Writing For Commits and PRs
2
+
3
+ High-signal history explains both implementation and intent.
4
+
5
+ Use this structure:
6
+
7
+ ## Context
8
+ - What problem or gap existed?
9
+
10
+ ## Changes
11
+ - What was implemented? (major files/components only)
12
+
13
+ ## Why
14
+ - Why this approach or tradeoff?
15
+
16
+ ## Validation
17
+ - Which commands prove it works? Include outcomes.
18
+
19
+ ## Impact
20
+ - What can users/developers do now that they could not do before?
@@ -0,0 +1,190 @@
1
+ ---
2
+ description: "Audit repository tests and produce evidence-backed quality + fix plan"
3
+ ---
4
+
5
+ # Check Tests: Rigorous Test Audit Workflow
6
+
7
+ You are a `test-auditor`. Your job is to systematically inspect the repository's tests and produce a rigorous, evidence-backed audit report plus a prioritized fix plan.
8
+
9
+ ## Non-Negotiable Rules
10
+
11
+ - Do not skip steps. If you cannot perform a step, write `NOT CHECKED` and explain exactly why.
12
+ - Every finding must cite evidence: file path + line range, or exact command output snippet.
13
+ - Prefer truth over politeness. If tests are weak, say so plainly.
14
+ - No vague claims like "looks fine" or "seems ok" without evidence.
15
+
16
+ ## Goals
17
+
18
+ 1. Evaluate test quality, maintainability, and bug-catching power.
19
+ 2. Detect common problems:
20
+ - brittleness
21
+ - over-mocking
22
+ - unclear intent
23
+ - poor coverage of edge cases
24
+ - integration tests disguised as unit tests
25
+ - dependence on real network/time/fs
26
+ - flaky behavior
27
+ - slow tests
28
+ 3. Produce a ranked list of concrete improvements with suggested code-level changes.
29
+
30
+ ## Workflow
31
+
32
+ ### Step 0 - Repo Recon
33
+
34
+ - Identify language(s), test framework(s), and test layout conventions.
35
+ - Enumerate all test files (glob patterns, directories).
36
+
37
+ Required output:
38
+ - `Test Inventory` table with:
39
+ - file
40
+ - framework
41
+ - approx # tests
42
+ - notes (mocks/fixtures/heavy setup)
43
+
44
+ ### Step 1 - Run Baseline
45
+
46
+ - Run the full test suite.
47
+ - Capture:
48
+ - pass/fail
49
+ - runtime
50
+ - top 5 slowest tests (if possible)
51
+ - failure summaries
52
+ - Run a quick flake check:
53
+ - rerun tests 3x (or targeted suspicious ones) to detect flaky failures.
54
+
55
+ Required output:
56
+ - Commands executed + outputs summary.
57
+
58
+ ### Step 2 - Quality Rubric (Score Every Test File)
59
+
60
+ For each test file, score `0-2` on each dimension (total `0-20`):
61
+
62
+ - `A) Clarity/Intent`: test names + assertions make behavior obvious
63
+ - `B) AAA Structure`: Arrange/Act/Assert separation is readable
64
+ - `C) Independence`: no order dependence; minimal shared mutable state
65
+ - `D) Determinism`: doesn't rely on wall clock, randomness (without seeding), network, external services
66
+ - `E) Isolation`: unit tests don't touch DB/fs/network unless explicitly integration tests
67
+ - `F) Assertions`: meaningful asserts; avoids "no assert" tests; checks outcomes not internals
68
+ - `G) Setup Hygiene`: avoids huge fixtures; avoids brittle global fixtures; uses factories/builders
69
+ - `H) Mocking Discipline`: mocks at boundaries; avoids mocking the system under test; avoids over-mocking
70
+ - `I) Coverage of Edge Cases`: error paths, boundary values, weird inputs
71
+ - `J) Maintainability`: low duplication; helper utilities used appropriately; failures are diagnosable
72
+
73
+ For each file, include:
74
+
75
+ - a score table row
76
+ - `2-5` bullet findings with evidence (path + line range)
77
+ - one `best next improvement` suggestion
78
+
79
+ ### Step 3 - Repo-Level Patterns
80
+
81
+ Aggregate across all tests:
82
+
83
+ - top 10 recurring issues
84
+ - `Flake risks` list
85
+ - `Slow risks` list
86
+ - missing test layers: unit vs integration vs e2e; recommend a pyramid distribution appropriate to repo
87
+
88
+ ### Step 4 - Prioritized Fix Plan
89
+
90
+ Create a plan ranked by ROI:
91
+
92
+ - `P0`: fixes that reduce flakiness and increase determinism
93
+ - `P1`: fixes that increase bug-catching power (assertions, edge cases)
94
+ - `P2`: refactors that improve maintainability (fixtures, factories, helpers)
95
+
96
+ Each item must include:
97
+
98
+ - impact
99
+ - effort (`S/M/L`)
100
+ - files to touch
101
+ - example change (pseudo-code or snippet)
102
+
103
+ ### Step 5 - Optional Patch Set
104
+
105
+ Only if the human explicitly requests implementation after reviewing the audit, implement `1-3` `P0/P1` improvements as small, clean patches.
106
+
107
+ Default behavior:
108
+
109
+ - stop after reporting findings, scores, and prioritized recommendations
110
+ - do not modify code/tests without explicit follow-up approval
111
+
112
+ If implementation is explicitly requested:
113
+
114
+ - keep each patch PR-sized
115
+ - add/adjust tests without changing production behavior unless necessary
116
+ - show diffs clearly
117
+
118
+ ## Calibration Examples
119
+
120
+ Use these patterns when judging tests.
121
+
122
+ ### Example 1 - Clear Behavior vs Vague
123
+
124
+ Bad:
125
+ - `test_process(): assert process(x) != None`
126
+
127
+ Good:
128
+ - `test_process_returns_normalized_email_lowercases_and_strips_whitespace()`
129
+
130
+ ### Example 2 - Testing Implementation Details vs Outcomes
131
+
132
+ Bad:
133
+ - `assert service._cache["k"] == "v"` (pokes internals)
134
+
135
+ Good:
136
+ - `assert service.get("k") == "v"` (validates public behavior)
137
+
138
+ ### Example 3 - Brittle Time/Network vs Deterministic Seams
139
+
140
+ Bad:
141
+ - calls real `time.sleep()`, `datetime.now()`, real HTTP
142
+
143
+ Good:
144
+ - inject clock/http client; freeze time; use stub server or mock at boundary
145
+
146
+ ### Example 4 - Over-Mocking vs Boundary Mocking
147
+
148
+ Bad:
149
+ - mocks every collaborator; asserts call order and exact args everywhere
150
+
151
+ Good:
152
+ - mocks only external boundary (db/http/fs); asserts meaningful outcome + key interaction(s) only
153
+
154
+ ### Example 5 - AAA Structure
155
+
156
+ Bad:
157
+ - setup/assert interleaved; hard to see what changed
158
+
159
+ Good:
160
+ - Arrange: build inputs
161
+ - Act: call function
162
+ - Assert: check outputs/errors
163
+
164
+ ## Reference Context
165
+
166
+ Follow principles from Google's "Software Engineering at Google" testing chapters:
167
+
168
+ - prefer fast, reliable unit tests
169
+ - value maintainability and clarity
170
+
171
+ Constraint:
172
+ - do not quote directly; translate principles into the rubric and findings
173
+
174
+ ## Final Output Format
175
+
176
+ 1. Test Inventory (table)
177
+ 2. Execution Summary (commands + results)
178
+ 3. File-by-file Rubric Scores (table + per-file notes)
179
+ 4. Repo Patterns
180
+ 5. Prioritized Fix Plan
181
+ 6. Optional Patches (diffs or described edits)
182
+
183
+ Before finishing, output a `Checklist` with `✅/❌`:
184
+
185
+ - [ ] Enumerated all test files
186
+ - [ ] Ran full test suite
187
+ - [ ] Ran flake check (rerun)
188
+ - [ ] Scored every test file with rubric
189
+ - [ ] Evidence provided for every claim
190
+ - [ ] Prioritized fix plan with ROI ranking
@@ -0,0 +1,113 @@
1
+ ---
2
+ description: "Create a commit with Commitizen/Conventional Commit compliance and high-signal context"
3
+ ---
4
+
5
+ # Commit: High-Signal, Commitizen-Compliant
6
+
7
+ ## Objective
8
+
9
+ Create a single atomic commit for the intended changes with a Commitizen-compliant message that explains both **what changed** and **why it changed**.
10
+
11
+ Reusable guidance:
12
+ - `.ai/COMMANDS/_shared/story-writing.md`
13
+ - `.ai/COMMANDS/_shared/review-checklist.md`
14
+
15
+ ## Process
16
+
17
+ ### 1. Review change scope
18
+
19
+ Run `.ai/COMMANDS/review.md` first.
20
+
21
+ Branch safety check (required):
22
+ - `git branch --show-current`
23
+ - If branch is `main`, stop and create/switch to a feature branch before committing.
24
+
25
+ - Run:
26
+ - `git status`
27
+ - `git diff HEAD`
28
+ - `git status --porcelain`
29
+ - Confirm the commit scope is coherent and atomic.
30
+ - If unrelated changes are present, do not include them.
31
+
32
+ ### 2. Stage only intended files
33
+
34
+ - Add modified and untracked files that belong to this commit.
35
+ - Re-check staged scope:
36
+ - `git diff --cached --stat`
37
+ - `git diff --cached`
38
+
39
+ ### 3. Write Commitizen-compliant message
40
+
41
+ Message must follow Conventional Commits / Commitizen:
42
+
43
+ `<type>(<optional-scope>): <short imperative summary>`
44
+
45
+ Allowed types:
46
+ - `feat`, `fix`, `docs`, `style`, `refactor`, `perf`, `test`, `build`, `ci`, `chore`, `revert`
47
+
48
+ Optional breaking-change marker:
49
+ - `type(scope)!: summary`
50
+ - or `BREAKING CHANGE:` footer in body
51
+
52
+ ### 4. Tell the story in the body
53
+
54
+ Commit body is required for non-trivial changes. Include:
55
+ - Context/problem: what was missing or broken.
56
+ - Key changes: what was implemented (major files/components).
57
+ - Reasoning: why this approach was chosen.
58
+ - Outcome: what is now possible/verified.
59
+
60
+ Recommended body structure:
61
+
62
+ ```text
63
+ Context:
64
+ - ...
65
+
66
+ Changes:
67
+ - ...
68
+
69
+ Why:
70
+ - ...
71
+
72
+ Validation:
73
+ - <command/result>
74
+ - <command/result>
75
+ ```
76
+
77
+ ### 5. Add footers when relevant
78
+
79
+ - Reference plans/issues:
80
+ - `Refs: .ai/PLANS/<NNN>-<feature>.md`
81
+ - Breaking changes:
82
+ - `BREAKING CHANGE: ...`
83
+
84
+ ## Quality Bar
85
+
86
+ - Subject line is <= 72 chars, imperative, specific.
87
+ - Type/scope accurately reflect the primary intent.
88
+ - Body explains both implementation and motivation.
89
+ - Future readers (human or AI) can understand the change without re-reading the full diff.
90
+ - Message matches staged content exactly.
91
+
92
+ ## Example
93
+
94
+ ```text
95
+ docs(reboot): align rules and command references with reboot baseline
96
+
97
+ Context:
98
+ - command guidance referenced removed targets and legacy architecture paths.
99
+
100
+ Changes:
101
+ - updated `.ai/RULES.md` architecture/boundary guidance for current repository layout
102
+ - updated command/reference docs to use active `just` targets
103
+ - removed stale examples that referenced legacy render subsystem paths
104
+
105
+ Why:
106
+ - keep workflow docs actionable so validation and execution commands match real repo state
107
+
108
+ Validation:
109
+ - just quality-check (pass)
110
+ - just status (pass)
111
+
112
+ Refs: .ai/PLANS/016-example-reboot-docs-alignment.md
113
+ ```
@@ -0,0 +1,158 @@
1
+ ---
2
+ description: Create a Product Requirements Document from conversation
3
+ argument-hint: [plan-slug-or-output-path]
4
+ ---
5
+
6
+ # Create PRD: Generate Product Requirements Document
7
+
8
+ ## Overview
9
+
10
+ Generate a comprehensive Product Requirements Document (PRD) based on the current conversation context and requirements discussed. Use the structure and sections defined below to create a thorough, professional PRD.
11
+
12
+ ## Output File
13
+
14
+ Write the PRD to: `$ARGUMENTS` (default: `.ai/SPECS/<NNN>-<feature>/PRD.md`)
15
+
16
+ Default behavior:
17
+ - If an active plan exists (`.ai/PLANS/<NNN>-<feature>.md`), write to `.ai/SPECS/<NNN>-<feature>/PRD.md`.
18
+ - If `$ARGUMENTS` is a plan slug like `<NNN>-<feature>`, write to `.ai/SPECS/<NNN>-<feature>/PRD.md`.
19
+ - If `$ARGUMENTS` is a path, write exactly there.
20
+ - Never default to repo root.
21
+
22
+ ## PRD Structure
23
+
24
+ Create a well-structured PRD with the following sections. Adapt depth and detail based on available information:
25
+
26
+ ### Required Sections
27
+
28
+ **1. Executive Summary**
29
+ - Concise product overview (2-3 paragraphs)
30
+ - Core value proposition
31
+ - MVP goal statement
32
+
33
+ **2. Mission**
34
+ - Product mission statement
35
+ - Core principles (3-5 key principles)
36
+
37
+ **3. Target Users**
38
+ - Primary user personas
39
+ - Technical comfort level
40
+ - Key user needs and pain points
41
+
42
+ **4. MVP Scope**
43
+ - **In Scope:** Core functionality for MVP (use ✅ checkboxes)
44
+ - **Out of Scope:** Features deferred to future phases (use ❌ checkboxes)
45
+ - Group by categories (Core Functionality, Technical, Integration, Deployment)
46
+
47
+ **5. User Stories**
48
+ - Primary user stories (5-8 stories) in format: "As a [user], I want to [action], so that [benefit]"
49
+ - Include concrete examples for each story
50
+ - Add technical user stories if relevant
51
+
52
+ **6. Core Architecture & Patterns**
53
+ - High-level architecture approach
54
+ - Directory structure (if applicable)
55
+ - Key design patterns and principles
56
+ - Technology-specific patterns
57
+
58
+ **7. Tools/Features**
59
+ - Detailed feature specifications
60
+ - If building an agent: Tool designs with purpose, operations, and key features
61
+ - If building an app: Core feature breakdown
62
+
63
+ **8. Technology Stack**
64
+ - Backend/Frontend technologies with versions
65
+ - Dependencies and libraries
66
+ - Optional dependencies
67
+ - Third-party integrations
68
+
69
+ **9. Security & Configuration**
70
+ - Authentication/authorization approach
71
+ - Configuration management (environment variables, settings)
72
+ - Security scope (in-scope and out-of-scope)
73
+ - Deployment considerations
74
+
75
+ **10. API Specification** (if applicable)
76
+ - Endpoint definitions
77
+ - Request/response formats
78
+ - Authentication requirements
79
+ - Example payloads
80
+
81
+ **11. Success Criteria**
82
+ - MVP success definition
83
+ - Functional requirements (use ✅ checkboxes)
84
+ - Quality indicators
85
+ - User experience goals
86
+
87
+ **12. Implementation Phases**
88
+ - Break down into 3-4 phases
89
+ - Each phase includes: Goal, Deliverables (✅ checkboxes), Validation criteria
90
+ - Realistic timeline estimates
91
+
92
+ **13. Future Considerations**
93
+ - Post-MVP enhancements
94
+ - Integration opportunities
95
+ - Advanced features for later phases
96
+
97
+ **14. Risks & Mitigations**
98
+ - 3-5 key risks with specific mitigation strategies
99
+
100
+ **15. Appendix** (if applicable)
101
+ - Related documents
102
+ - Key dependencies with links
103
+ - Repository/project structure
104
+
105
+ ## Instructions
106
+
107
+ ### 1. Extract Requirements
108
+ - Review the entire conversation history
109
+ - Identify explicit requirements and implicit needs
110
+ - Note technical constraints and preferences
111
+ - Capture user goals and success criteria
112
+
113
+ ### 2. Synthesize Information
114
+ - Organize requirements into appropriate sections
115
+ - Fill in reasonable assumptions where details are missing
116
+ - Maintain consistency across sections
117
+ - Ensure technical feasibility
118
+
119
+ ### 3. Write the PRD
120
+ - Use clear, professional language
121
+ - Include concrete examples and specifics
122
+ - Use markdown formatting (headings, lists, code blocks, checkboxes)
123
+ - Add code snippets for technical sections where helpful
124
+ - Keep Executive Summary concise but comprehensive
125
+
126
+ ### 4. Quality Checks
127
+ - ✅ All required sections present
128
+ - ✅ User stories have clear benefits
129
+ - ✅ MVP scope is realistic and well-defined
130
+ - ✅ Technology choices are justified
131
+ - ✅ Implementation phases are actionable
132
+ - ✅ Success criteria are measurable
133
+ - ✅ Consistent terminology throughout
134
+
135
+ ## Style Guidelines
136
+
137
+ - **Tone:** Professional, clear, action-oriented
138
+ - **Format:** Use markdown extensively (headings, lists, code blocks, tables)
139
+ - **Checkboxes:** Use ✅ for in-scope items, ❌ for out-of-scope
140
+ - **Specificity:** Prefer concrete examples over abstract descriptions
141
+ - **Length:** Comprehensive but scannable (typically 30-60 sections worth of content)
142
+
143
+ ## Output Confirmation
144
+
145
+ After creating the PRD:
146
+ 1. Confirm the file path where it was written
147
+ 2. Provide a brief summary of the PRD contents
148
+ 3. Highlight any assumptions made due to missing information
149
+ 4. Suggest next steps (e.g., review, refinement, planning)
150
+
151
+ ## Notes
152
+
153
+ - If critical information is missing, ask clarifying questions before generating
154
+ - If neither an active plan slug nor `$ARGUMENTS` is available, ask for a slug/path before writing
155
+ - Adapt section depth based on available details
156
+ - For highly technical products, emphasize architecture and technical stack
157
+ - For user-facing products, emphasize user stories and experience
158
+ - This command contains the complete PRD template structure - no external references needed
@@ -0,0 +1,155 @@
1
+ description: Create global rules (.ai/RULES.md) from codebase analysis
2
+ ---
3
+
4
+ # Create Global Rules
5
+ Generate or refresh `.ai/RULES.md` for Codex/Cursor by analyzing the codebase and extracting enforceable patterns.
6
+
7
+ ---
8
+
9
+ ## Objective
10
+
11
+ Create project-specific global rules that give coding agents (Codex/Cursor) context about:
12
+ - What this project is
13
+ - Technologies used
14
+ - How the code is organized
15
+ - Patterns and conventions to follow
16
+ - How to build, test, and validate
17
+
18
+ ---
19
+
20
+ ## Phase 1: DISCOVER
21
+
22
+ ### Identify Project Type
23
+
24
+ First, determine what kind of project this is:
25
+
26
+ | Type | Indicators |
27
+ |------|------------|
28
+ | Web App (Full-stack) | Separate client/server dirs, API routes |
29
+ | Web App (Frontend) | React/Vue/Svelte, no server code |
30
+ | API/Backend | Express/Fastify/etc, no frontend |
31
+ | Library/Package | `main`/`exports` in package.json, publishable |
32
+ | CLI Tool | `bin` in package.json, command-line interface |
33
+ | Monorepo | Multiple packages, workspaces config |
34
+ | Script/Automation | Standalone scripts, task-focused |
35
+
36
+ ### Analyze Configuration
37
+
38
+ Look at root configuration files:
39
+
40
+ ```
41
+ package.json → dependencies, scripts, type
42
+ tsconfig.json → TypeScript settings
43
+ vite.config.* → Build tool
44
+ *.config.js/ts → Various tool configs
45
+ ```
46
+
47
+ ### Map Directory Structure
48
+
49
+ Explore the codebase to understand organization:
50
+ - Where does source code live?
51
+ - Where are tests?
52
+ - Any shared code?
53
+ - Configuration locations?
54
+
55
+ ---
56
+
57
+ ## Phase 2: ANALYZE
58
+
59
+ ### Extract Tech Stack
60
+
61
+ From package.json and config files, identify:
62
+ - Runtime/Language (Node, Bun, Deno, browser)
63
+ - Framework(s)
64
+ - Database (if any)
65
+ - Testing tools
66
+ - Build tools
67
+ - Linting/formatting
68
+
69
+ ### Identify Patterns
70
+
71
+ Study existing code for:
72
+ - **Naming**: How are files, functions, classes named?
73
+ - **Structure**: How is code organized within files?
74
+ - **Errors**: How are errors created and handled?
75
+ - **Types**: How are types/interfaces defined?
76
+ - **Tests**: How are tests structured?
77
+
78
+ ### Find Key Files
79
+
80
+ Identify files that are important to understand:
81
+ - Entry points
82
+ - Configuration
83
+ - Core business logic
84
+ - Shared utilities
85
+ - Type definitions
86
+
87
+ ---
88
+
89
+ ## Phase 3: GENERATE
90
+
91
+ ### Create/Refresh `.ai/RULES.md`
92
+
93
+ Use the current `.ai/RULES.md` and `.ai/REF/README.md` as the starting baseline.
94
+
95
+ **Output path**: `.ai/RULES.md`
96
+
97
+ **Adapt to the project:**
98
+ - Remove sections that don't apply
99
+ - Add sections specific to this project type
100
+ - Keep it concise - focus on what's useful and enforceable for coding agents
101
+
102
+ **Key sections to include:**
103
+
104
+ 1. **Project Overview** - What is this and what does it do?
105
+ 2. **Tech Stack** - What technologies are used?
106
+ 3. **Commands** - How to dev, build, test, lint?
107
+ 4. **Structure** - How is the code organized?
108
+ 5. **Patterns** - What conventions should be followed?
109
+ 6. **Key Files** - What files are important to know?
110
+
111
+ **Optional sections (add if relevant):**
112
+ - Architecture (for complex apps)
113
+ - API endpoints (for backends)
114
+ - Component patterns (for frontends)
115
+ - Database patterns (if using a DB)
116
+ - On-demand context references
117
+ - Project-type overlays (link to `.ai/REF/project-types/*` and instruct when to read each)
118
+
119
+ ---
120
+
121
+ ## Phase 4: OUTPUT
122
+
123
+ ```markdown
124
+ ## Global Rules Updated
125
+
126
+ **File**: `.ai/RULES.md`
127
+
128
+ ### Project Type
129
+
130
+ {Detected project type}
131
+
132
+ ### Tech Stack Summary
133
+
134
+ {Key technologies detected}
135
+
136
+ ### Structure
137
+
138
+ {Brief structure overview}
139
+
140
+ ### Next Steps
141
+
142
+ 1. Review the updated `.ai/RULES.md`
143
+ 2. Add any project-specific notes
144
+ 3. Ensure `.ai/REF/*` contains package-specific guidance linked from `.ai/RULES.md`
145
+ 4. Run PRIME/PLAN/EXECUTE workflows against the updated rules
146
+ ```
147
+
148
+ ---
149
+
150
+ ## Tips
151
+
152
+ - Keep `.ai/RULES.md` focused and scannable
153
+ - Don't duplicate information that's in other docs (link instead)
154
+ - Focus on patterns and conventions, not exhaustive documentation
155
+ - Update it as the project evolves