@samahlstrom/forge-cli 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/README.md +175 -0
  2. package/bin/forge.js +2 -0
  3. package/dist/addons/index.d.ts +25 -0
  4. package/dist/addons/index.js +139 -0
  5. package/dist/addons/index.js.map +1 -0
  6. package/dist/commands/add.d.ts +1 -0
  7. package/dist/commands/add.js +61 -0
  8. package/dist/commands/add.js.map +1 -0
  9. package/dist/commands/doctor.d.ts +1 -0
  10. package/dist/commands/doctor.js +177 -0
  11. package/dist/commands/doctor.js.map +1 -0
  12. package/dist/commands/ingest.d.ts +24 -0
  13. package/dist/commands/ingest.js +316 -0
  14. package/dist/commands/ingest.js.map +1 -0
  15. package/dist/commands/init.d.ts +8 -0
  16. package/dist/commands/init.js +557 -0
  17. package/dist/commands/init.js.map +1 -0
  18. package/dist/commands/remove.d.ts +1 -0
  19. package/dist/commands/remove.js +42 -0
  20. package/dist/commands/remove.js.map +1 -0
  21. package/dist/commands/status.d.ts +1 -0
  22. package/dist/commands/status.js +48 -0
  23. package/dist/commands/status.js.map +1 -0
  24. package/dist/commands/upgrade.d.ts +5 -0
  25. package/dist/commands/upgrade.js +190 -0
  26. package/dist/commands/upgrade.js.map +1 -0
  27. package/dist/detect/features.d.ts +10 -0
  28. package/dist/detect/features.js +33 -0
  29. package/dist/detect/features.js.map +1 -0
  30. package/dist/detect/go.d.ts +3 -0
  31. package/dist/detect/go.js +38 -0
  32. package/dist/detect/go.js.map +1 -0
  33. package/dist/detect/index.d.ts +25 -0
  34. package/dist/detect/index.js +32 -0
  35. package/dist/detect/index.js.map +1 -0
  36. package/dist/detect/node.d.ts +3 -0
  37. package/dist/detect/node.js +99 -0
  38. package/dist/detect/node.js.map +1 -0
  39. package/dist/detect/python.d.ts +3 -0
  40. package/dist/detect/python.js +86 -0
  41. package/dist/detect/python.js.map +1 -0
  42. package/dist/index.d.ts +1 -0
  43. package/dist/index.js +51 -0
  44. package/dist/index.js.map +1 -0
  45. package/dist/render/engine.d.ts +8 -0
  46. package/dist/render/engine.js +71 -0
  47. package/dist/render/engine.js.map +1 -0
  48. package/dist/render/merge.d.ts +5 -0
  49. package/dist/render/merge.js +33 -0
  50. package/dist/render/merge.js.map +1 -0
  51. package/dist/utils/fs.d.ts +8 -0
  52. package/dist/utils/fs.js +42 -0
  53. package/dist/utils/fs.js.map +1 -0
  54. package/dist/utils/git.d.ts +3 -0
  55. package/dist/utils/git.js +31 -0
  56. package/dist/utils/git.js.map +1 -0
  57. package/dist/utils/hash.d.ts +8 -0
  58. package/dist/utils/hash.js +22 -0
  59. package/dist/utils/hash.js.map +1 -0
  60. package/dist/utils/yaml.d.ts +3 -0
  61. package/dist/utils/yaml.js +12 -0
  62. package/dist/utils/yaml.js.map +1 -0
  63. package/package.json +53 -0
  64. package/templates/addons/beads-dolt-backend/files/dolt-setup.sh +267 -0
  65. package/templates/addons/beads-dolt-backend/manifest.yaml +13 -0
  66. package/templates/addons/browser-testing/files/browser-smoke.sh +196 -0
  67. package/templates/addons/browser-testing/files/visual-qa.md +103 -0
  68. package/templates/addons/browser-testing/manifest.yaml +20 -0
  69. package/templates/addons/compliance-hipaa/files/hipaa-checks.sh +184 -0
  70. package/templates/addons/compliance-hipaa/files/hipaa-context.md +91 -0
  71. package/templates/addons/compliance-hipaa/manifest.yaml +15 -0
  72. package/templates/addons/compliance-soc2/files/soc2-checks.sh +232 -0
  73. package/templates/addons/compliance-soc2/files/soc2-context.md +147 -0
  74. package/templates/addons/compliance-soc2/manifest.yaml +15 -0
  75. package/templates/core/CLAUDE.md.hbs +70 -0
  76. package/templates/core/agents/architect.md.hbs +68 -0
  77. package/templates/core/agents/backend.md.hbs +27 -0
  78. package/templates/core/agents/frontend.md.hbs +25 -0
  79. package/templates/core/agents/quality.md.hbs +40 -0
  80. package/templates/core/agents/security.md.hbs +53 -0
  81. package/templates/core/context/project.md.hbs +60 -0
  82. package/templates/core/forge.yaml.hbs +69 -0
  83. package/templates/core/hooks/post-edit.sh.hbs +8 -0
  84. package/templates/core/hooks/pre-edit.sh.hbs +41 -0
  85. package/templates/core/hooks/session-start.sh.hbs +34 -0
  86. package/templates/core/pipeline/classify.sh.hbs +159 -0
  87. package/templates/core/pipeline/decompose.md.hbs +100 -0
  88. package/templates/core/pipeline/deliver.sh.hbs +171 -0
  89. package/templates/core/pipeline/execute.md.hbs +138 -0
  90. package/templates/core/pipeline/intake.sh.hbs +152 -0
  91. package/templates/core/pipeline/orchestrator.sh.hbs +361 -0
  92. package/templates/core/pipeline/verify.sh.hbs +160 -0
  93. package/templates/core/settings.json.hbs +55 -0
  94. package/templates/core/skill-creator.md.hbs +151 -0
  95. package/templates/core/skill-deliver.md.hbs +46 -0
  96. package/templates/core/skill-ingest.md.hbs +245 -0
  97. package/templates/presets/go/stack.md.hbs +133 -0
  98. package/templates/presets/python-fastapi/stack.md.hbs +101 -0
  99. package/templates/presets/react-next-ts/stack.md.hbs +77 -0
  100. package/templates/presets/sveltekit-ts/stack.md.hbs +116 -0
@@ -0,0 +1,151 @@
1
+ ---
2
+ name: skill-creator
3
+ description: Create new skills, modify and improve existing skills, and measure skill performance. Use when users want to create a skill from scratch, edit, or optimize an existing skill, run evals to test a skill, benchmark skill performance with variance analysis, or optimize a skill's description for better triggering accuracy.
4
+ ---
5
+
6
+ # Skill Creator
7
+
8
+ A skill for creating new skills and iteratively improving them.
9
+
10
+ At a high level, the process of creating a skill goes like this:
11
+
12
+ - Decide what you want the skill to do and roughly how it should do it
13
+ - Write a draft of the skill
14
+ - Create a few test prompts and run claude-with-access-to-the-skill on them
15
+ - Help the user evaluate the results both qualitatively and quantitatively
16
+ - Rewrite the skill based on feedback from the user's evaluation of the results
17
+ - Repeat until you're satisfied
18
+
19
+ Your job when using this skill is to figure out where the user is in this process and then jump in and help them progress through these stages.
20
+
21
+ ## Creating a skill
22
+
23
+ ### Capture Intent
24
+
25
+ Start by understanding the user's intent. The current conversation might already contain a workflow the user wants to capture (e.g., they say "turn this into a skill"). If so, extract answers from the conversation history first.
26
+
27
+ 1. What should this skill enable Claude to do?
28
+ 2. When should this skill trigger? (what user phrases/contexts)
29
+ 3. What's the expected output format?
30
+ 4. Should we set up test cases to verify the skill works?
31
+
32
+ ### Interview and Research
33
+
34
+ Proactively ask questions about edge cases, input/output formats, example files, success criteria, and dependencies. Wait to write test prompts until you've got this part ironed out.
35
+
36
+ ### Write the SKILL.md
37
+
38
+ Based on the user interview, fill in these components:
39
+
40
+ - **name**: Skill identifier (kebab-case, max 64 chars)
41
+ - **description**: When to trigger, what it does. This is the primary triggering mechanism - include both what the skill does AND specific contexts for when to use it. Make descriptions slightly "pushy" to combat undertriggering.
42
+ - **the rest of the skill**
43
+
44
+ ### Skill Writing Guide
45
+
46
+ #### Anatomy of a Skill
47
+
48
+ ```
49
+ skill-name/
50
+ ├── SKILL.md (required)
51
+ │ ├── YAML frontmatter (name, description required)
52
+ │ └── Markdown instructions
53
+ └── Bundled Resources (optional)
54
+ ├── scripts/ - Executable code for deterministic/repetitive tasks
55
+ ├── references/ - Docs loaded into context as needed
56
+ └── assets/ - Files used in output (templates, icons, fonts)
57
+ ```
58
+
59
+ #### Progressive Disclosure
60
+
61
+ Skills use a three-level loading system:
62
+ 1. **Metadata** (name + description) - Always in context (~100 words)
63
+ 2. **SKILL.md body** - In context whenever skill triggers (<500 lines ideal)
64
+ 3. **Bundled resources** - As needed (unlimited, scripts can execute without loading)
65
+
66
+ **Key patterns:**
67
+ - Keep SKILL.md under 500 lines; if approaching this limit, add hierarchy with clear pointers
68
+ - Reference files clearly from SKILL.md with guidance on when to read them
69
+ - For large reference files (>300 lines), include a table of contents
70
+
71
+ #### Writing Patterns
72
+
73
+ Prefer using the imperative form in instructions.
74
+
75
+ **Defining output formats:**
76
+ ```markdown
77
+ ## Report structure
78
+ ALWAYS use this exact template:
79
+ # [Title]
80
+ ## Executive summary
81
+ ## Key findings
82
+ ## Recommendations
83
+ ```
84
+
85
+ **Examples pattern:**
86
+ ```markdown
87
+ ## Commit message format
88
+ **Example 1:**
89
+ Input: Added user authentication with JWT tokens
90
+ Output: feat(auth): implement JWT-based authentication
91
+ ```
92
+
93
+ ### Writing Style
94
+
95
+ Try to explain to the model why things are important in lieu of heavy-handed MUSTs. Use theory of mind and try to make the skill general and not super-narrow to specific examples.
96
+
97
+ ### Test Cases
98
+
99
+ After writing the skill draft, come up with 2-3 realistic test prompts. Share them with the user for review. Save test cases to `evals/evals.json`:
100
+
101
+ ```json
102
+ {
103
+ "skill_name": "example-skill",
104
+ "evals": [
105
+ {
106
+ "id": 1,
107
+ "prompt": "User's task prompt",
108
+ "expected_output": "Description of expected result",
109
+ "files": []
110
+ }
111
+ ]
112
+ }
113
+ ```
114
+
115
+ ## Running and Evaluating Test Cases
116
+
117
+ For each test case, spawn two subagents — one with the skill, one without. Launch everything at once.
118
+
119
+ **With-skill run:**
120
+ ```
121
+ Execute this task:
122
+ - Skill path: <path-to-skill>
123
+ - Task: <eval prompt>
124
+ - Save outputs to: <workspace>/iteration-<N>/eval-<ID>/with_skill/outputs/
125
+ ```
126
+
127
+ **Baseline run** (same prompt, no skill, save to `without_skill/outputs/`).
128
+
129
+ While runs are in progress, draft quantitative assertions for each test case. Good assertions are objectively verifiable and have descriptive names.
130
+
131
+ ## Improving the Skill
132
+
133
+ 1. **Generalize from the feedback.** Don't overfit to specific examples.
134
+ 2. **Keep the prompt lean.** Remove things that aren't pulling their weight.
135
+ 3. **Explain the why.** Explain the reasoning behind instructions so the model understands importance.
136
+ 4. **Look for repeated work across test cases.** If all test cases independently wrote similar scripts, bundle that script in `scripts/`.
137
+
138
+ ### The Iteration Loop
139
+
140
+ 1. Apply improvements to the skill
141
+ 2. Rerun all test cases into a new `iteration-<N+1>/` directory
142
+ 3. Wait for the user to review
143
+ 4. Read feedback, improve again, repeat
144
+
145
+ Keep going until the user is satisfied or feedback is all empty.
146
+
147
+ ## Skill Location
148
+
149
+ New skills should be created at: `.claude/skills/<skill-name>/SKILL.md`
150
+
151
+ This project uses forge for orchestration. When creating skills for this project, place them in the standard Claude Code skills directory so they're automatically available via `/skill-name`.
@@ -0,0 +1,46 @@
1
+ # deliver
2
+
3
+ > Intake, classify, and execute tracked work through the forge pipeline.
4
+
5
+ ## Trigger
6
+
7
+ User runs `/deliver <description>` or `/deliver --flag`
8
+
9
+ ## Process
10
+
11
+ 1. Run: `bash .forge/pipeline/orchestrator.sh <user-input>`
12
+ 2. Read the JSON output line. Branch on `status`:
13
+
14
+ ### PAUSE
15
+ The pipeline needs LLM work.
16
+ - Read the file at `prompt_file` for instructions
17
+ - Read each context file listed in `context[]` from `.forge/context/`
18
+ - Execute the work described in the prompt
19
+ - Write your output to `output_file`
20
+ - Run the `resume` command to continue the pipeline
21
+
22
+ ### HUMAN_INPUT
23
+ The pipeline needs a user decision.
24
+ - Present `question` and `options` to the user via AskUserQuestion
25
+ - Get their answer (option index)
26
+ - Run the `resume` command with their answer
27
+
28
+ ### DONE
29
+ Pipeline complete.
30
+ - Report: PR URL from `pr_url`, branch from `branch`, summary from `summary`
31
+
32
+ ### ERROR
33
+ Pipeline failed.
34
+ - Report: which `stage` failed, the `error` message
35
+ - Point user to the debug file at `debug_file`
36
+ - Suggest running the `action` command to retry
37
+
38
+ 3. After handling the JSON output, if another orchestrator call is needed (PAUSE resume), repeat from step 1 with the resume command.
39
+ 4. Continue until you receive DONE or ERROR.
40
+
41
+ ## Flags
42
+
43
+ - `--quick` — Lightweight mode: skip decomposition, minimal verification
44
+ - `--hotfix` — Fast-path: skip decomposition, auto T1, minimal checks
45
+ - `--issue <N>` — Fetch GitHub issue as input
46
+ - `--resume <id>` — Resume a halted or interrupted pipeline run
@@ -0,0 +1,245 @@
1
+ ---
2
+ name: ingest
3
+ description: Parse a spec document into a structured project plan with epics, features, and atomic tasks. Use when a user has a PRD, technical spec, or requirements document and wants to decompose it into executable work before writing code. Triggers on /ingest, or when user mentions ingesting a spec, parsing requirements, or planning from a document.
4
+ ---
5
+
6
+ # ingest
7
+
8
+ > Decompose a spec document into a structured project plan, generate custom skills, and execute phase-by-phase through the /deliver pipeline.
9
+
10
+ ## Trigger
11
+
12
+ User runs `/ingest <spec-id>` where spec-id corresponds to a directory in `.forge/specs/`.
13
+
14
+ ## Process
15
+
16
+ ### Step 1: Load the spec
17
+
18
+ 1. Read `.forge/specs/<spec-id>/meta.json` to get source file path, format, page count, and chunk size
19
+ 2. If `.forge/specs/<spec-id>/analysis.json` exists, read it — this contains project metadata extracted during `forge init --spec`
20
+ 3. Read the source document at `.forge/specs/<spec-id>/source.*`
21
+ - For PDFs: read in chunks using the `pages` parameter (e.g., pages 1-20, then 21-40)
22
+ - For text/markdown: read the full file (or chunk by heading if very large)
23
+
24
+ ### Step 2: Multi-pass analysis
25
+
26
+ Run four analysis passes. Each pass builds on the previous ones. Write outputs to `.forge/specs/<spec-id>/`.
27
+
28
+ #### Pass 1: Structure Extraction
29
+
30
+ Read the spec (chunk by chunk for large documents) and extract:
31
+
32
+ - **Sections**: headings, hierarchy, page ranges
33
+ - **Requirements**: SHALL/MUST/SHOULD statements with IDs
34
+ - **User stories**: As a... I want... So that...
35
+ - **Constraints**: performance, compliance, compatibility, security
36
+ - **Data entities**: models, schemas, relationships
37
+ - **Glossary**: domain-specific terms
38
+
39
+ Write to: `.forge/specs/<spec-id>/pass-1-structure.json`
40
+
41
+ For chunked documents: run pass 1 per chunk, then consolidate into a single pass-1-structure.json before proceeding.
42
+
43
+ #### Pass 2: Domain Mapping
44
+
45
+ Read pass-1 output. Identify:
46
+
47
+ - **Domains/modules**: logical groupings (e.g., auth, billing, scheduling)
48
+ - **Cross-references**: which sections feed which domains
49
+ - **Dependency graph**: which domains depend on others
50
+ - **Shared concerns**: cross-cutting things like logging, auth, multi-tenancy
51
+
52
+ Write to: `.forge/specs/<spec-id>/pass-2-domains.json`
53
+
54
+ #### Pass 3: Epic/Feature/Task Breakdown
55
+
56
+ Read pass-1 + pass-2 outputs. Decompose into:
57
+
58
+ - **Epics**: one per domain or major capability
59
+ - **Features**: logical groupings within an epic (max 5 per epic)
60
+ - **Tasks**: atomic units of work, each completable in a single `/deliver` call (max 8 per feature)
61
+
62
+ Each task gets:
63
+ - `id`: unique identifier (e.g., task-1-1-1)
64
+ - `title`: what to do
65
+ - `description`: detailed description with acceptance criteria
66
+ - `risk_tier`: T1 (low), T2 (moderate), T3 (critical) — using the same classification as the pipeline
67
+ - `dependencies`: list of task IDs that must complete first
68
+ - `files_likely`: predicted files to create or modify
69
+ - `agent`: which agent handles this (architect, backend, frontend, quality, security)
70
+
71
+ Write to: `.forge/specs/<spec-id>/pass-3-breakdown.json`
72
+
73
+ #### Pass 4: Skill Identification
74
+
75
+ Read pass-3 output. Look for:
76
+
77
+ - **Repeated patterns**: if "create CRUD for entity X" appears 4+ times, that's a skill
78
+ - **Domain workflows**: complex multi-step processes that appear more than once
79
+ - **Boilerplate patterns**: things like "add API endpoint with validation + tests + docs"
80
+
81
+ For each identified skill:
82
+ - `name`: kebab-case identifier
83
+ - `description`: what it does and when to trigger
84
+ - `pattern`: the repeated work it captures
85
+ - `estimated_savings`: how many tasks it simplifies
86
+
87
+ Write to: `.forge/specs/<spec-id>/pass-4-skills.json`
88
+
89
+ ### Step 3: Synthesize
90
+
91
+ Combine all pass outputs into a single `spec.yaml`:
92
+
93
+ ```yaml
94
+ version: 1
95
+ spec_id: "<id>"
96
+ status: "draft"
97
+ summary: "<1-2 sentence project summary>"
98
+
99
+ domains:
100
+ - id: "dom-<name>"
101
+ name: "<Domain Name>"
102
+ dependencies: ["dom-<other>"]
103
+
104
+ epics:
105
+ - id: "epic-<N>"
106
+ domain: "dom-<name>"
107
+ title: "<Epic title>"
108
+ features:
109
+ - id: "feat-<N>-<M>"
110
+ title: "<Feature title>"
111
+ tasks:
112
+ - id: "task-<N>-<M>-<K>"
113
+ title: "<Task title>"
114
+ description: "<Detailed description>"
115
+ risk_tier: "T1|T2|T3"
116
+ dependencies: []
117
+ files_likely: []
118
+ agent: "backend|frontend|quality|security"
119
+
120
+ execution_plan:
121
+ phases:
122
+ - id: "phase-<N>"
123
+ name: "<Phase name>"
124
+ epics: ["epic-<N>"]
125
+ rationale: "<Why this phase comes here>"
126
+ parallelizable: true|false
127
+
128
+ total_tasks: <count>
129
+ critical_path: ["task-...", "task-..."]
130
+
131
+ generated_skills: ["skill-name-1", "skill-name-2"]
132
+
133
+ constraints:
134
+ - "<Hard constraint from the spec>"
135
+ ```
136
+
137
+ Write to: `.forge/specs/<spec-id>/spec.yaml`
138
+
139
+ ### Step 4: Review Gate
140
+
141
+ Present the plan to the user. Show:
142
+
143
+ ```
144
+ Spec Analysis Complete
145
+ ═══════════════════════
146
+
147
+ Domains: 6
148
+ Epics: 8
149
+ Features: 31
150
+ Tasks: 52
151
+ Phases: 4
152
+ Custom skills: 4
153
+ Risk profile: 23 T3, 18 T2, 11 T1
154
+
155
+ Phase 1 — Foundation
156
+ <epics and task count>
157
+
158
+ Phase 2 — Core Features
159
+ <epics and task count>
160
+
161
+ ...
162
+
163
+ Constraints:
164
+ • <constraint 1>
165
+ • <constraint 2>
166
+
167
+ Custom skills to generate:
168
+ • <skill-1>: <what it does>
169
+ • <skill-2>: <what it does>
170
+ ```
171
+
172
+ Ask: **Approve, refine, or cancel?**
173
+
174
+ ### Step 5: Refinement (if needed)
175
+
176
+ If the user wants changes:
177
+ 1. Take their feedback as natural language
178
+ 2. Re-run the affected passes with the feedback as additional context
179
+ 3. Update spec.yaml
180
+ 4. Return to the review gate
181
+
182
+ Common refinement requests:
183
+ - "Split epic X into two epics"
184
+ - "Move module Y to an earlier phase"
185
+ - "Add a constraint about Z"
186
+ - "This task is too big, break it down further"
187
+ - "Remove the reporting module, that's out of scope"
188
+
189
+ Loop until the user approves.
190
+
191
+ ### Step 6: Generate Skills
192
+
193
+ After approval, for each skill identified in pass-4:
194
+
195
+ 1. Create `.claude/skills/<skill-name>/SKILL.md` with proper frontmatter
196
+ 2. Write the skill body with:
197
+ - Step-by-step instructions specific to this project's stack
198
+ - References to project context files
199
+ - Output format expectations
200
+ - Examples from the spec
201
+
202
+ Use the `/skill-creator` skill's patterns for writing good skill files.
203
+
204
+ ### Step 7: Execution Handoff
205
+
206
+ After skills are generated, present the execution plan:
207
+
208
+ ```
209
+ Ready to execute. How would you like to proceed?
210
+
211
+ 1. Phase-by-phase (recommended) — execute one phase at a time with review gates
212
+ 2. Full auto — execute all phases sequentially
213
+ 3. Manual — I'll run /deliver for individual tasks myself
214
+ ```
215
+
216
+ **Phase-by-phase mode** (recommended):
217
+ - For each phase, iterate through its epics and tasks in dependency order
218
+ - For each task, construct a `/deliver` call with the task's description from spec.yaml
219
+ - After each phase completes, update spec.yaml status and ask user to review before next phase
220
+ - Parallelizable phases: tasks without inter-dependencies can be delivered concurrently via subagents
221
+
222
+ **Constructing deliver calls from spec.yaml tasks**:
223
+ ```
224
+ /deliver "<task.title> — <task.description>. Target files: <task.files_likely>. Risk: <task.risk_tier>. Spec ref: <task.id>"
225
+ ```
226
+
227
+ ### Resuming
228
+
229
+ If the session is interrupted, the user can run `/ingest <spec-id>` again. Check `spec.yaml` status:
230
+ - `pending-analysis`: start from pass 1
231
+ - `draft`: show the review gate
232
+ - `approved`: resume skill generation or execution
233
+ - `in-progress`: find the last completed phase and resume from the next one
234
+
235
+ ## Output Protocol
236
+
237
+ All intermediate outputs go to `.forge/specs/<spec-id>/`:
238
+ - `meta.json` — source file metadata
239
+ - `analysis.json` — project metadata from init
240
+ - `pass-1-structure.json` — extracted structure
241
+ - `pass-2-domains.json` — domain mapping
242
+ - `pass-3-breakdown.json` — epic/feature/task breakdown
243
+ - `pass-4-skills.json` — identified skills
244
+ - `spec.yaml` — the final synthesized plan
245
+ - `refinement-log.json` — history of user refinements
@@ -0,0 +1,133 @@
1
+ # Stack Context: Go
2
+
3
+ ## Tech Stack
4
+
5
+ - **Language**: Go 1.22+
6
+ - **Modules**: Go modules
7
+ - **Testing**: `go test` (standard library)
8
+ - **Linting**: golangci-lint
9
+ - **Formatting**: gofmt (enforced)
10
+
11
+ ## Project Structure
12
+
13
+ ```
14
+ cmd/
15
+ server/
16
+ main.go # Entry point
17
+ internal/
18
+ handler/ # HTTP handlers
19
+ user.go
20
+ user_test.go
21
+ service/ # Business logic
22
+ user.go
23
+ user_test.go
24
+ repository/ # Data access
25
+ user.go
26
+ user_test.go
27
+ model/ # Domain types
28
+ user.go
29
+ middleware/ # HTTP middleware
30
+ auth.go
31
+ logging.go
32
+ config/ # Configuration
33
+ config.go
34
+ pkg/ # Public packages (if any)
35
+ ```
36
+
37
+ ## Key Patterns
38
+
39
+ ### Handler Structure
40
+ ```go
41
+ type UserHandler struct {
42
+ service *service.UserService
43
+ }
44
+
45
+ func NewUserHandler(svc *service.UserService) *UserHandler {
46
+ return &UserHandler{service: svc}
47
+ }
48
+
49
+ func (h *UserHandler) Create(w http.ResponseWriter, r *http.Request) {
50
+ var input model.CreateUserInput
51
+ if err := json.NewDecoder(r.Body).Decode(&input); err != nil {
52
+ http.Error(w, "invalid request body", http.StatusBadRequest)
53
+ return
54
+ }
55
+
56
+ user, err := h.service.Create(r.Context(), input)
57
+ if err != nil {
58
+ // Handle specific error types
59
+ http.Error(w, err.Error(), http.StatusInternalServerError)
60
+ return
61
+ }
62
+
63
+ w.Header().Set("Content-Type", "application/json")
64
+ w.WriteHeader(http.StatusCreated)
65
+ json.NewEncoder(w).Encode(user)
66
+ }
67
+ ```
68
+
69
+ ### Error Handling
70
+ - Return errors, don't panic
71
+ - Wrap errors with context: `fmt.Errorf("create user: %w", err)`
72
+ - Use sentinel errors for known conditions
73
+ - Check errors immediately after function calls
74
+
75
+ ### Interfaces
76
+ - Define interfaces where they are USED, not where they are implemented
77
+ - Keep interfaces small (1-3 methods)
78
+ - Accept interfaces, return structs
79
+
80
+ ### Testing
81
+ ```go
82
+ func TestUserService_Create(t *testing.T) {
83
+ tests := []struct {
84
+ name string
85
+ input model.CreateUserInput
86
+ want *model.User
87
+ wantErr bool
88
+ }{
89
+ {
90
+ name: "valid input",
91
+ input: model.CreateUserInput{Name: "Alice", Email: "alice@example.com"},
92
+ want: &model.User{Name: "Alice", Email: "alice@example.com"},
93
+ },
94
+ {
95
+ name: "empty name",
96
+ input: model.CreateUserInput{Name: "", Email: "alice@example.com"},
97
+ wantErr: true,
98
+ },
99
+ }
100
+ for _, tt := range tests {
101
+ t.Run(tt.name, func(t *testing.T) {
102
+ // test implementation
103
+ })
104
+ }
105
+ }
106
+ ```
107
+
108
+ ### Context Propagation
109
+ - Pass `context.Context` as first parameter to all functions that do I/O
110
+ - Use context for cancellation, timeouts, and request-scoped values
111
+ - Never store context in structs
112
+
113
+ ### Dependency Injection
114
+ - Use constructor injection (NewXxx functions)
115
+ - Wire dependencies in `main.go`
116
+ - No DI frameworks — keep it simple
117
+
118
+ ## Anti-Patterns
119
+
120
+ - Never use `interface{}` or `any` without strong reason
121
+ - Never ignore errors (`_ = someFunc()`)
122
+ - Never use `init()` for complex logic
123
+ - Never use package-level mutable state
124
+ - Never use `panic` for expected errors
125
+ - Never use `*` imports (dot imports)
126
+ - Never hardcode secrets — use environment variables
127
+
128
+ ## Quality Gates
129
+
130
+ - `go vet ./...` — Static analysis
131
+ - `golangci-lint run` — Comprehensive linting
132
+ - `go test ./...` — Tests
133
+ - `gofmt -w .` — Formatting (enforced)
@@ -0,0 +1,101 @@
1
+ # Stack Context: FastAPI + Python
2
+
3
+ ## Tech Stack
4
+
5
+ - **Framework**: FastAPI
6
+ - **Language**: Python 3.11+
7
+ - **Validation**: Pydantic v2
8
+ - **ORM**: SQLAlchemy 2.0 (async)
9
+ - **Testing**: pytest + pytest-asyncio
10
+ - **Linting**: ruff
11
+ - **Type checking**: mypy or pyright
12
+
13
+ ## Project Structure
14
+
15
+ ```
16
+ src/
17
+ main.py # FastAPI app creation + router mounting
18
+ config.py # Settings via pydantic-settings
19
+ routers/ # API route modules
20
+ users.py
21
+ items.py
22
+ models/ # SQLAlchemy models
23
+ user.py
24
+ item.py
25
+ schemas/ # Pydantic request/response models
26
+ user.py
27
+ item.py
28
+ services/ # Business logic
29
+ user_service.py
30
+ dependencies/ # FastAPI dependencies (DI)
31
+ auth.py
32
+ database.py
33
+ middleware/ # Custom middleware
34
+ tests/
35
+ conftest.py # Fixtures
36
+ test_users.py
37
+ test_items.py
38
+ ```
39
+
40
+ ## Key Patterns
41
+
42
+ ### Route Structure
43
+ ```python
44
+ from fastapi import APIRouter, Depends, HTTPException, status
45
+ from ..schemas.user import UserCreate, UserResponse
46
+ from ..services.user_service import UserService
47
+ from ..dependencies.auth import get_current_user
48
+
49
+ router = APIRouter(prefix="/users", tags=["users"])
50
+
51
+ @router.post("/", response_model=UserResponse, status_code=status.HTTP_201_CREATED)
52
+ async def create_user(
53
+ data: UserCreate,
54
+ service: UserService = Depends(),
55
+ current_user = Depends(get_current_user),
56
+ ):
57
+ return await service.create(data)
58
+ ```
59
+
60
+ ### Pydantic Models (v2)
61
+ ```python
62
+ from pydantic import BaseModel, Field, ConfigDict
63
+
64
+ class UserCreate(BaseModel):
65
+ model_config = ConfigDict(strict=True)
66
+ name: str = Field(min_length=1, max_length=100)
67
+ email: str = Field(pattern=r'^[\w.-]+@[\w.-]+\.\w+$')
68
+ ```
69
+
70
+ ### Dependency Injection
71
+ - Use `Depends()` for all shared logic (auth, DB sessions, services)
72
+ - Never create DB sessions manually — inject via dependency
73
+ - Keep dependencies small and composable
74
+
75
+ ### Database Sessions
76
+ ```python
77
+ async def get_db() -> AsyncGenerator[AsyncSession, None]:
78
+ async with async_session() as session:
79
+ yield session
80
+ ```
81
+
82
+ ### Error Handling
83
+ - Use `HTTPException` with proper status codes
84
+ - Never expose internal error details to clients
85
+ - Log errors with structured logging
86
+
87
+ ## Anti-Patterns
88
+
89
+ - Never use `Any` type — always type hints
90
+ - Never use raw SQL — use SQLAlchemy ORM or `text()` with parameters
91
+ - Never hardcode secrets — use `pydantic-settings` with env vars
92
+ - Never use `*` imports
93
+ - Never use mutable default arguments
94
+ - Never use `global` variables for state
95
+
96
+ ## Quality Gates
97
+
98
+ - `mypy .` — Type checking
99
+ - `ruff check .` — Linting
100
+ - `pytest` — Tests
101
+ - `ruff format .` — Formatting