@booklib/skills 1.0.0 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CONTRIBUTING.md +122 -0
- package/README.md +20 -1
- package/ROADMAP.md +36 -0
- package/animation-at-work/evals/evals.json +44 -0
- package/animation-at-work/examples/after.md +64 -0
- package/animation-at-work/examples/before.md +35 -0
- package/animation-at-work/scripts/audit_animations.py +295 -0
- package/bin/skills.js +552 -42
- package/clean-code-reviewer/SKILL.md +109 -1
- package/clean-code-reviewer/evals/evals.json +121 -3
- package/clean-code-reviewer/examples/after.md +48 -0
- package/clean-code-reviewer/examples/before.md +33 -0
- package/clean-code-reviewer/references/api_reference.md +158 -0
- package/clean-code-reviewer/references/practices-catalog.md +282 -0
- package/clean-code-reviewer/references/review-checklist.md +254 -0
- package/clean-code-reviewer/scripts/pre-review.py +206 -0
- package/data-intensive-patterns/evals/evals.json +43 -0
- package/data-intensive-patterns/examples/after.md +61 -0
- package/data-intensive-patterns/examples/before.md +38 -0
- package/data-intensive-patterns/scripts/adr.py +213 -0
- package/data-pipelines/evals/evals.json +45 -0
- package/data-pipelines/examples/after.md +97 -0
- package/data-pipelines/examples/before.md +37 -0
- package/data-pipelines/scripts/new_pipeline.py +444 -0
- package/design-patterns/evals/evals.json +46 -0
- package/design-patterns/examples/after.md +52 -0
- package/design-patterns/examples/before.md +29 -0
- package/design-patterns/scripts/scaffold.py +807 -0
- package/domain-driven-design/SKILL.md +120 -0
- package/domain-driven-design/evals/evals.json +48 -0
- package/domain-driven-design/examples/after.md +80 -0
- package/domain-driven-design/examples/before.md +43 -0
- package/domain-driven-design/scripts/scaffold.py +421 -0
- package/effective-java/evals/evals.json +46 -0
- package/effective-java/examples/after.md +83 -0
- package/effective-java/examples/before.md +37 -0
- package/effective-java/scripts/checkstyle_setup.py +211 -0
- package/effective-kotlin/evals/evals.json +45 -0
- package/effective-kotlin/examples/after.md +36 -0
- package/effective-kotlin/examples/before.md +38 -0
- package/effective-python/SKILL.md +199 -0
- package/effective-python/evals/evals.json +44 -0
- package/effective-python/examples/after.md +56 -0
- package/effective-python/examples/before.md +40 -0
- package/effective-python/ref-01-pythonic-thinking.md +202 -0
- package/effective-python/ref-02-lists-and-dicts.md +146 -0
- package/effective-python/ref-03-functions.md +186 -0
- package/effective-python/ref-04-comprehensions-generators.md +211 -0
- package/effective-python/ref-05-classes-interfaces.md +188 -0
- package/effective-python/ref-06-metaclasses-attributes.md +209 -0
- package/effective-python/ref-07-concurrency.md +213 -0
- package/effective-python/ref-08-robustness-performance.md +248 -0
- package/effective-python/ref-09-testing-debugging.md +253 -0
- package/effective-python/ref-10-collaboration.md +175 -0
- package/effective-python/references/api_reference.md +218 -0
- package/effective-python/references/practices-catalog.md +483 -0
- package/effective-python/references/review-checklist.md +190 -0
- package/effective-python/scripts/lint.py +173 -0
- package/kotlin-in-action/evals/evals.json +43 -0
- package/kotlin-in-action/examples/after.md +53 -0
- package/kotlin-in-action/examples/before.md +39 -0
- package/kotlin-in-action/scripts/setup_detekt.py +224 -0
- package/lean-startup/evals/evals.json +43 -0
- package/lean-startup/examples/after.md +80 -0
- package/lean-startup/examples/before.md +34 -0
- package/lean-startup/scripts/new_experiment.py +286 -0
- package/microservices-patterns/SKILL.md +140 -0
- package/microservices-patterns/evals/evals.json +45 -0
- package/microservices-patterns/examples/after.md +69 -0
- package/microservices-patterns/examples/before.md +40 -0
- package/microservices-patterns/scripts/new_service.py +583 -0
- package/package.json +1 -1
- package/refactoring-ui/evals/evals.json +45 -0
- package/refactoring-ui/examples/after.md +85 -0
- package/refactoring-ui/examples/before.md +58 -0
- package/refactoring-ui/scripts/audit_css.py +250 -0
- package/skill-router/SKILL.md +142 -0
- package/skill-router/evals/evals.json +38 -0
- package/skill-router/examples/after.md +63 -0
- package/skill-router/examples/before.md +39 -0
- package/skill-router/references/api_reference.md +24 -0
- package/skill-router/references/routing-heuristics.md +89 -0
- package/skill-router/references/skill-catalog.md +156 -0
- package/skill-router/scripts/route.py +266 -0
- package/storytelling-with-data/evals/evals.json +47 -0
- package/storytelling-with-data/examples/after.md +50 -0
- package/storytelling-with-data/examples/before.md +33 -0
- package/storytelling-with-data/scripts/chart_review.py +301 -0
- package/system-design-interview/evals/evals.json +45 -0
- package/system-design-interview/examples/after.md +94 -0
- package/system-design-interview/examples/before.md +27 -0
- package/system-design-interview/scripts/new_design.py +421 -0
- package/using-asyncio-python/evals/evals.json +43 -0
- package/using-asyncio-python/examples/after.md +68 -0
- package/using-asyncio-python/examples/before.md +39 -0
- package/using-asyncio-python/scripts/check_blocking.py +270 -0
- package/web-scraping-python/evals/evals.json +46 -0
- package/web-scraping-python/examples/after.md +109 -0
- package/web-scraping-python/examples/before.md +40 -0
- package/web-scraping-python/scripts/new_scraper.py +231 -0
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
# After
|
|
2
|
+
|
|
3
|
+
An MVP experiment document that identifies the riskiest assumption, defines the smallest test, specifies actionable metrics, and sets explicit pivot/persevere criteria before any code is written.
|
|
4
|
+
|
|
5
|
+
```markdown
|
|
6
|
+
## MVP Experiment: AI Expense Management — Concierge Test
|
|
7
|
+
|
|
8
|
+
### The Big Idea
|
|
9
|
+
SMB finance managers spend 3+ hours per week manually reconciling receipts.
|
|
10
|
+
We believe they will pay for a tool that eliminates that work — but we have
|
|
11
|
+
not yet validated that they will change their workflow to use it.
|
|
12
|
+
|
|
13
|
+
### Leap-of-Faith Assumptions to Test
|
|
14
|
+
|
|
15
|
+
**Value hypothesis (riskiest):**
|
|
16
|
+
> "Finance managers at 10–50 person companies will submit receipts through
|
|
17
|
+
> a new tool at least 3× per week if the categorisation is accurate."
|
|
18
|
+
|
|
19
|
+
**Growth hypothesis (secondary):**
|
|
20
|
+
> "Satisfied users will refer at least one colleague within 30 days."
|
|
21
|
+
|
|
22
|
+
### Why NOT build the full product yet
|
|
23
|
+
Building OCR + AI categorisation + ERP integrations takes 6 months and $400K.
|
|
24
|
+
If users don't form the habit of submitting receipts, none of those features
|
|
25
|
+
matter. We must validate the value hypothesis first.
|
|
26
|
+
|
|
27
|
+
### MVP Design: Concierge (Manual Behind the Scenes)
|
|
28
|
+
|
|
29
|
+
**What users experience:**
|
|
30
|
+
- Sign up form → they receive a dedicated WhatsApp number
|
|
31
|
+
- They photograph receipts and send to WhatsApp
|
|
32
|
+
- We manually categorise and respond with a structured summary within 2 hours
|
|
33
|
+
- Weekly: we email a formatted expense report (manually compiled in Google Sheets)
|
|
34
|
+
|
|
35
|
+
**What we actually build:**
|
|
36
|
+
- Landing page with sign-up form (1 day)
|
|
37
|
+
- WhatsApp Business number (1 hour)
|
|
38
|
+
- Google Sheets report template (2 hours)
|
|
39
|
+
- Total build time: ~2 days
|
|
40
|
+
|
|
41
|
+
**Participants:** 15 finance managers recruited from our network
|
|
42
|
+
|
|
43
|
+
**Duration:** 6 weeks
|
|
44
|
+
|
|
45
|
+
### Innovation Accounting — Metrics
|
|
46
|
+
|
|
47
|
+
| Metric | Baseline | Target (persevere) | Source |
|
|
48
|
+
|--------|----------|--------------------|--------|
|
|
49
|
+
| Weekly receipt submission rate | 0 | ≥ 3 receipts/user/week by week 4 | WhatsApp logs |
|
|
50
|
+
| Categorisation acceptance rate | — | ≥ 85% accepted without correction | User replies |
|
|
51
|
+
| User retention (still active week 6) | — | ≥ 70% of participants | WhatsApp logs |
|
|
52
|
+
| Referral rate | 0 | ≥ 1 referral per 5 users | Sign-up source field |
|
|
53
|
+
| Willingness to pay | 0 | ≥ 60% say "yes" at $29/mo | End-of-pilot survey |
|
|
54
|
+
|
|
55
|
+
**Vanity metrics we will NOT track:** total sign-ups, page views, social shares.
|
|
56
|
+
|
|
57
|
+
### Pivot/Persevere Decision — Week 6 Review
|
|
58
|
+
|
|
59
|
+
**Persevere** (move to automated MVP) if ALL of:
|
|
60
|
+
- Submission rate ≥ 3/week by week 4
|
|
61
|
+
- Retention ≥ 70% at week 6
|
|
62
|
+
- Willingness to pay ≥ 60%
|
|
63
|
+
|
|
64
|
+
**Pivot** (reconsider problem or segment) if ANY of:
|
|
65
|
+
- Submission rate < 1/week by week 3 (habit not forming)
|
|
66
|
+
- Retention < 40% at week 4 (users not finding value)
|
|
67
|
+
|
|
68
|
+
**Pivot options to consider:**
|
|
69
|
+
- Zoom-in: focus only on one vertical (e.g., restaurant industry) where receipt volume is higher
|
|
70
|
+
- Customer segment: target bookkeepers who file for multiple clients, not the SMB themselves
|
|
71
|
+
- Problem: pivot to approval workflows if users engage more with that step than receipt capture
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
Key improvements:
|
|
75
|
+
- Leap-of-faith assumptions are named explicitly before any engineering begins — value hypothesis and growth hypothesis are the riskiest bets, not product features (Ch 5: Leap-of-faith assumptions)
|
|
76
|
+
- Concierge MVP — two days of manual work — tests the core habit loop without six months of engineering (Ch 6: MVP is for learning, not launching)
|
|
77
|
+
- Innovation accounting table defines baseline, target, and data source for each metric before the experiment runs (Ch 7: Innovation accounting)
|
|
78
|
+
- All metrics are cohort-based and behavioural ("submission rate", "retention") — vanity metrics like total sign-ups are explicitly excluded (Ch 7: Actionable vs. vanity metrics)
|
|
79
|
+
- Explicit pivot/persevere decision criteria with a named decision date eliminate gut-feel delays (Ch 8: Pivot or persevere)
|
|
80
|
+
- Pivot catalog is pre-populated — the team has thought through options in advance rather than scrambling at the decision point (Ch 8: Pivot types)
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# Before
|
|
2
|
+
|
|
3
|
+
A product specification for a 6-month build with no validated learning step, no hypothesis to test, and no criteria for deciding whether to continue.
|
|
4
|
+
|
|
5
|
+
```markdown
|
|
6
|
+
## Product Spec: AI-Powered Expense Management Platform
|
|
7
|
+
|
|
8
|
+
### Overview
|
|
9
|
+
We will build a comprehensive AI-powered expense management platform for SMBs.
|
|
10
|
+
The platform will replace manual receipt tracking with automatic categorization,
|
|
11
|
+
OCR scanning, policy enforcement, and CFO-level reporting.
|
|
12
|
+
|
|
13
|
+
### Features (all required for launch)
|
|
14
|
+
- Mobile app (iOS + Android) with camera OCR for receipt scanning
|
|
15
|
+
- AI categorization engine trained on 50k expense categories
|
|
16
|
+
- Multi-currency support with live FX rates
|
|
17
|
+
- Approval workflow engine with configurable multi-level sign-off
|
|
18
|
+
- Integration with QuickBooks, Xero, NetSuite, SAP
|
|
19
|
+
- Admin dashboard with 25 pre-built CFO reports
|
|
20
|
+
- SSO/SAML integration
|
|
21
|
+
- Audit log and compliance export (SOC 2 ready)
|
|
22
|
+
|
|
23
|
+
### Timeline
|
|
24
|
+
- Month 1-2: Backend API + database schema
|
|
25
|
+
- Month 3-4: Mobile app development
|
|
26
|
+
- Month 5: Integrations + admin dashboard
|
|
27
|
+
- Month 6: QA, security review, launch
|
|
28
|
+
|
|
29
|
+
### Success metric
|
|
30
|
+
5,000 paying customers within 3 months of launch.
|
|
31
|
+
|
|
32
|
+
### Next step
|
|
33
|
+
Kick off engineering sprint planning on Monday.
|
|
34
|
+
```
|
|
@@ -0,0 +1,286 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Lean Startup — MVP Experiment Document Generator.
|
|
4
|
+
|
|
5
|
+
Usage (interactive): python new_experiment.py
|
|
6
|
+
Usage (one-shot): python new_experiment.py --name "X" --hypothesis "Y" \
|
|
7
|
+
--mvp-type landing-page --metric "signups" \
|
|
8
|
+
--threshold "100 signups" --duration "2 weeks" \
|
|
9
|
+
--output experiment.md
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import argparse
|
|
13
|
+
import sys
|
|
14
|
+
from datetime import date, timedelta
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
|
|
17
|
+
MVP_TYPES = ["concierge", "wizard-of-oz", "landing-page", "smoke-test"]
|
|
18
|
+
|
|
19
|
+
MVP_DESCRIPTIONS = {
|
|
20
|
+
"concierge": (
|
|
21
|
+
"Manually deliver the service to early customers without automation. "
|
|
22
|
+
"You act as the 'product' to learn what customers actually want before building."
|
|
23
|
+
),
|
|
24
|
+
"wizard-of-oz": (
|
|
25
|
+
"Present a working product interface to customers, but fulfil requests manually "
|
|
26
|
+
"behind the scenes. Validates demand without engineering the full solution."
|
|
27
|
+
),
|
|
28
|
+
"landing-page": (
|
|
29
|
+
"Publish a description and sign-up form for a product that does not yet exist. "
|
|
30
|
+
"Measures intent-to-use at near-zero cost."
|
|
31
|
+
),
|
|
32
|
+
"smoke-test": (
|
|
33
|
+
"Run a small paid-acquisition experiment (e.g., Google Ads) to a landing page. "
|
|
34
|
+
"Measures real purchase intent before building anything."
|
|
35
|
+
),
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
WHY_NOT_BUILD = {
|
|
39
|
+
"concierge": (
|
|
40
|
+
"Building the full automated product before understanding the exact workflow "
|
|
41
|
+
"customers need risks expensive re-work. Manual delivery surfaces the real "
|
|
42
|
+
"job-to-be-done faster and cheaper."
|
|
43
|
+
),
|
|
44
|
+
"wizard-of-oz": (
|
|
45
|
+
"Engineering the back-end automation is costly and time-consuming. "
|
|
46
|
+
"Validating that customers use and value the front-end experience first "
|
|
47
|
+
"eliminates the biggest unknown before we invest in automation."
|
|
48
|
+
),
|
|
49
|
+
"landing-page": (
|
|
50
|
+
"Writing code for a product nobody wants is pure waste. "
|
|
51
|
+
"A landing page lets us measure whether the value proposition resonates "
|
|
52
|
+
"with the target segment in days, not months."
|
|
53
|
+
),
|
|
54
|
+
"smoke-test": (
|
|
55
|
+
"Acquiring real paying customers validates both the value hypothesis and "
|
|
56
|
+
"the growth hypothesis simultaneously. Building first would delay this "
|
|
57
|
+
"signal by weeks and obscure whether demand is organic or forced."
|
|
58
|
+
),
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
PIVOT_OPTIONS = [
|
|
62
|
+
"Zoom-in pivot: narrow scope to the single highest-value feature",
|
|
63
|
+
"Customer-segment pivot: target a different customer segment with the same product",
|
|
64
|
+
"Value-capture pivot: change the monetisation model (subscription vs. one-time)",
|
|
65
|
+
"Channel pivot: switch acquisition channel (e.g., outbound sales → content marketing)",
|
|
66
|
+
"Technology pivot: solve the same problem with a different underlying technology",
|
|
67
|
+
"Platform pivot: move from application to platform or vice versa",
|
|
68
|
+
]
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def prompt(label: str, default: str = "") -> str:
|
|
72
|
+
suffix = f" [{default}]" if default else ""
|
|
73
|
+
while True:
|
|
74
|
+
value = input(f"{label}{suffix}: ").strip()
|
|
75
|
+
if value:
|
|
76
|
+
return value
|
|
77
|
+
if default:
|
|
78
|
+
return default
|
|
79
|
+
print(" (required — please enter a value)")
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def prompt_choice(label: str, choices: list[str]) -> str:
|
|
83
|
+
print(f"\n{label}")
|
|
84
|
+
for i, c in enumerate(choices, 1):
|
|
85
|
+
print(f" {i}. {c}")
|
|
86
|
+
while True:
|
|
87
|
+
raw = input("Enter number: ").strip()
|
|
88
|
+
if raw.isdigit() and 1 <= int(raw) <= len(choices):
|
|
89
|
+
return choices[int(raw) - 1]
|
|
90
|
+
print(f" Please enter a number between 1 and {len(choices)}.")
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def gather_interactive() -> dict:
|
|
94
|
+
print("\n=== Lean Startup — New MVP Experiment ===\n")
|
|
95
|
+
print("Answer each prompt. Press Enter to accept the default.\n")
|
|
96
|
+
data: dict = {}
|
|
97
|
+
data["name"] = prompt("Product / feature name")
|
|
98
|
+
data["value_hypothesis"] = prompt(
|
|
99
|
+
"Value hypothesis (customers will value X because Y)"
|
|
100
|
+
)
|
|
101
|
+
data["growth_hypothesis"] = prompt(
|
|
102
|
+
"Growth hypothesis (new customers will find us via ...)"
|
|
103
|
+
)
|
|
104
|
+
data["mvp_type"] = prompt_choice("MVP type", MVP_TYPES)
|
|
105
|
+
data["metric"] = prompt("Primary metric to track (e.g., 'weekly active users')")
|
|
106
|
+
data["baseline"] = prompt("Current baseline for this metric (e.g., '0', 'unknown')", "0")
|
|
107
|
+
data["threshold"] = prompt("Success threshold (e.g., '50 sign-ups in 2 weeks')")
|
|
108
|
+
data["duration"] = prompt("Experiment duration (e.g., '2 weeks')", "2 weeks")
|
|
109
|
+
data["team"] = prompt("Team / owner", "Product team")
|
|
110
|
+
return data
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
def parse_duration_weeks(duration_str: str) -> int:
|
|
114
|
+
"""Very simple heuristic: look for a number before 'week'."""
|
|
115
|
+
import re
|
|
116
|
+
m = re.search(r"(\d+)\s*week", duration_str, re.IGNORECASE)
|
|
117
|
+
if m:
|
|
118
|
+
return int(m.group(1))
|
|
119
|
+
m = re.search(r"(\d+)\s*day", duration_str, re.IGNORECASE)
|
|
120
|
+
if m:
|
|
121
|
+
return max(1, int(m.group(1)) // 7)
|
|
122
|
+
return 2 # default
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def render_document(data: dict) -> str:
|
|
126
|
+
today = date.today()
|
|
127
|
+
weeks = parse_duration_weeks(data["duration"])
|
|
128
|
+
decision_date = today + timedelta(weeks=weeks)
|
|
129
|
+
mvp_type = data["mvp_type"]
|
|
130
|
+
|
|
131
|
+
lines = [
|
|
132
|
+
f"# MVP Experiment: {data['name']}",
|
|
133
|
+
"",
|
|
134
|
+
f"**Date created:** {today} ",
|
|
135
|
+
f"**Owner:** {data['team']} ",
|
|
136
|
+
f"**Pivot/Persevere decision by:** {decision_date} ",
|
|
137
|
+
f"**Duration:** {data['duration']} ",
|
|
138
|
+
"",
|
|
139
|
+
"---",
|
|
140
|
+
"",
|
|
141
|
+
"## 1. Leap-of-Faith Assumptions",
|
|
142
|
+
"",
|
|
143
|
+
"### Value Hypothesis",
|
|
144
|
+
f"> {data['value_hypothesis']}",
|
|
145
|
+
"",
|
|
146
|
+
"This is the core belief we are testing. If customers do not behave as",
|
|
147
|
+
"predicted, we learn this assumption is false and must pivot.",
|
|
148
|
+
"",
|
|
149
|
+
"### Growth Hypothesis",
|
|
150
|
+
f"> {data['growth_hypothesis']}",
|
|
151
|
+
"",
|
|
152
|
+
"---",
|
|
153
|
+
"",
|
|
154
|
+
"## 2. Why NOT Build the Full Product Yet",
|
|
155
|
+
"",
|
|
156
|
+
WHY_NOT_BUILD[mvp_type],
|
|
157
|
+
"",
|
|
158
|
+
"Building the full solution before validating these assumptions would be",
|
|
159
|
+
"**premature scaling** — one of the most common causes of startup failure",
|
|
160
|
+
"according to the Lean Startup framework.",
|
|
161
|
+
"",
|
|
162
|
+
"---",
|
|
163
|
+
"",
|
|
164
|
+
f"## 3. MVP Design — {mvp_type.replace('-', ' ').title()}",
|
|
165
|
+
"",
|
|
166
|
+
f"**Type:** {mvp_type}",
|
|
167
|
+
"",
|
|
168
|
+
MVP_DESCRIPTIONS[mvp_type],
|
|
169
|
+
"",
|
|
170
|
+
"### What we will build / do",
|
|
171
|
+
"",
|
|
172
|
+
"- [ ] Define the exact customer action we want to observe",
|
|
173
|
+
"- [ ] Set up measurement (analytics, tracking, manual log)",
|
|
174
|
+
"- [ ] Recruit initial participants / drive initial traffic",
|
|
175
|
+
"- [ ] Execute the experiment and collect data",
|
|
176
|
+
"- [ ] Analyse results against the success threshold below",
|
|
177
|
+
"",
|
|
178
|
+
"### What we will NOT build",
|
|
179
|
+
"",
|
|
180
|
+
"- Full back-end automation",
|
|
181
|
+
"- Production-quality UI beyond what is needed to trigger the measured action",
|
|
182
|
+
"- Scalability infrastructure",
|
|
183
|
+
"",
|
|
184
|
+
"---",
|
|
185
|
+
"",
|
|
186
|
+
"## 4. Innovation Accounting",
|
|
187
|
+
"",
|
|
188
|
+
"| Metric | Baseline | Target | How We Measure |",
|
|
189
|
+
"|--------|----------|--------|----------------|",
|
|
190
|
+
f"| {data['metric']} | {data['baseline']} | {data['threshold']} | [instrument] |",
|
|
191
|
+
"| Retention (week 2) | — | >30% | Cohort analysis |",
|
|
192
|
+
"| NPS / qualitative | — | Positive themes | Customer interviews |",
|
|
193
|
+
"",
|
|
194
|
+
"Measurements should be **actionable, accessible, auditable** (Three A's).",
|
|
195
|
+
"Avoid vanity metrics (total page views, total registered users).",
|
|
196
|
+
"",
|
|
197
|
+
"---",
|
|
198
|
+
"",
|
|
199
|
+
"## 5. Pivot / Persevere Criteria",
|
|
200
|
+
"",
|
|
201
|
+
f"**Decision date:** {decision_date} ",
|
|
202
|
+
f"**Decision owner:** {data['team']}",
|
|
203
|
+
"",
|
|
204
|
+
"### Persevere if",
|
|
205
|
+
f"- The primary metric reaches or exceeds **{data['threshold']}** by {decision_date}.",
|
|
206
|
+
"- Customer interviews reveal consistent, strong pull — not polite feedback.",
|
|
207
|
+
"- At least one customer takes an unexpected high-engagement action.",
|
|
208
|
+
"",
|
|
209
|
+
"### Pivot if",
|
|
210
|
+
f"- The primary metric is below **{data['threshold']}** with no strong upward trend.",
|
|
211
|
+
"- Qualitative feedback reveals the problem is not painful enough to act on.",
|
|
212
|
+
"- A significantly different customer segment shows stronger signal.",
|
|
213
|
+
"",
|
|
214
|
+
"### Pre-populated Pivot Options",
|
|
215
|
+
"",
|
|
216
|
+
]
|
|
217
|
+
for opt in PIVOT_OPTIONS:
|
|
218
|
+
lines.append(f"- {opt}")
|
|
219
|
+
|
|
220
|
+
lines += [
|
|
221
|
+
"",
|
|
222
|
+
"---",
|
|
223
|
+
"",
|
|
224
|
+
"## 6. References",
|
|
225
|
+
"",
|
|
226
|
+
"- Ries, E. (2011). *The Lean Startup*. Crown Business.",
|
|
227
|
+
"- Build-Measure-Learn loop: build the smallest thing that generates the",
|
|
228
|
+
" most learning, not the smallest shippable product.",
|
|
229
|
+
"",
|
|
230
|
+
"---",
|
|
231
|
+
"",
|
|
232
|
+
"*Generated by `new_experiment.py` — Lean Startup skill.*",
|
|
233
|
+
]
|
|
234
|
+
return "\n".join(lines) + "\n"
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def main() -> None:
|
|
238
|
+
parser = argparse.ArgumentParser(description="Generate a Lean Startup MVP experiment doc.")
|
|
239
|
+
parser.add_argument("--name", help="Product or feature name")
|
|
240
|
+
parser.add_argument("--hypothesis", help="Value hypothesis")
|
|
241
|
+
parser.add_argument("--mvp-type", choices=MVP_TYPES, dest="mvp_type")
|
|
242
|
+
parser.add_argument("--metric", help="Primary metric")
|
|
243
|
+
parser.add_argument("--threshold", help="Success threshold")
|
|
244
|
+
parser.add_argument("--duration", default="2 weeks", help="Experiment duration")
|
|
245
|
+
parser.add_argument("--output", type=Path, help="Output file (default: stdout)")
|
|
246
|
+
args = parser.parse_args()
|
|
247
|
+
|
|
248
|
+
# Determine mode: fully specified vs interactive
|
|
249
|
+
required = ["name", "hypothesis", "mvp_type", "metric", "threshold"]
|
|
250
|
+
if all(getattr(args, f.replace("-", "_"), None) for f in required):
|
|
251
|
+
data = {
|
|
252
|
+
"name": args.name,
|
|
253
|
+
"value_hypothesis": args.hypothesis,
|
|
254
|
+
"growth_hypothesis": "To be determined after initial validation.",
|
|
255
|
+
"mvp_type": args.mvp_type,
|
|
256
|
+
"metric": args.metric,
|
|
257
|
+
"baseline": "0",
|
|
258
|
+
"threshold": args.threshold,
|
|
259
|
+
"duration": args.duration,
|
|
260
|
+
"team": "Product team",
|
|
261
|
+
}
|
|
262
|
+
else:
|
|
263
|
+
if any(getattr(args, f.replace("-", "_"), None) for f in required):
|
|
264
|
+
print(
|
|
265
|
+
"WARNING: Some flags provided but not all required flags are set. "
|
|
266
|
+
"Falling back to interactive mode.",
|
|
267
|
+
file=sys.stderr,
|
|
268
|
+
)
|
|
269
|
+
try:
|
|
270
|
+
data = gather_interactive()
|
|
271
|
+
except (KeyboardInterrupt, EOFError):
|
|
272
|
+
print("\nAborted.", file=sys.stderr)
|
|
273
|
+
sys.exit(1)
|
|
274
|
+
|
|
275
|
+
document = render_document(data)
|
|
276
|
+
|
|
277
|
+
if args.output:
|
|
278
|
+
args.output.write_text(document)
|
|
279
|
+
print(f"\nExperiment document written to: {args.output}")
|
|
280
|
+
print(f"Next: schedule the pivot/persevere meeting for the decision date shown in the doc.")
|
|
281
|
+
else:
|
|
282
|
+
sys.stdout.write(document)
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
if __name__ == "__main__":
|
|
286
|
+
main()
|
|
@@ -177,3 +177,143 @@ Priority-ordered list of improvements, from most critical to nice-to-have.
|
|
|
177
177
|
(independent deployment, team autonomy, technology diversity) outweigh the costs.
|
|
178
178
|
- For deeper pattern details, read `references/patterns-catalog.md` before generating code.
|
|
179
179
|
- For review checklists, read `references/review-checklist.md` before reviewing code.
|
|
180
|
+
|
|
181
|
+
---
|
|
182
|
+
|
|
183
|
+
## Mode 3: Service Migration Planning
|
|
184
|
+
|
|
185
|
+
**Trigger phrases:** "decompose my monolith", "migrate to microservices", "strangle the monolith", "extract a service from"
|
|
186
|
+
|
|
187
|
+
You are helping a developer plan an incremental migration from a monolith (or distributed monolith) to a microservices architecture. The goal is a **phased migration** using the Strangler Fig pattern — the monolith keeps running while services are extracted one at a time.
|
|
188
|
+
|
|
189
|
+
### Step 1 — Assess Current State
|
|
190
|
+
|
|
191
|
+
Classify the system as one of:
|
|
192
|
+
- **Monolith** — Single deployable unit, single database. Starting point for decomposition.
|
|
193
|
+
- **Distributed Monolith** — Multiple services but tightly coupled (shared database, synchronous chains, must deploy together). Often worse than a monolith.
|
|
194
|
+
- **Partly Decomposed** — Some services extracted but shared databases or tight coupling remain.
|
|
195
|
+
|
|
196
|
+
Flag the critical problems:
|
|
197
|
+
- Shared databases (which tables are shared by which modules?)
|
|
198
|
+
- Synchronous call chains (A → B → C → D, fragile under failure)
|
|
199
|
+
- Missing circuit breakers
|
|
200
|
+
- No compensating transactions for cross-boundary operations
|
|
201
|
+
|
|
202
|
+
### Step 2 — Phase 1: Identify Boundaries (No Code Change)
|
|
203
|
+
|
|
204
|
+
**Goal:** Map business capabilities and propose service boundaries before touching code.
|
|
205
|
+
**Risk:** Zero — analysis only.
|
|
206
|
+
|
|
207
|
+
Actions:
|
|
208
|
+
- Map business capabilities (Order Management, Inventory, Billing, Notifications, etc.)
|
|
209
|
+
- Identify which capabilities are most independent (least shared database tables)
|
|
210
|
+
- Propose decomposition using **Decompose by Business Capability**
|
|
211
|
+
- Draw a capability map: which capabilities share data? Which are truly isolated?
|
|
212
|
+
|
|
213
|
+
Output: A capability map table showing each candidate service, its data ownership, and coupling level.
|
|
214
|
+
|
|
215
|
+
**Definition of Done:** Agreement on which service to extract first (least-coupled capability).
|
|
216
|
+
|
|
217
|
+
### Step 3 — Phase 2: Strangle the Monolith (Low-Risk)
|
|
218
|
+
|
|
219
|
+
**Goal:** Extract one service at a time using the Strangler Fig pattern.
|
|
220
|
+
**Risk:** Low if done incrementally — monolith keeps running.
|
|
221
|
+
|
|
222
|
+
Strategy:
|
|
223
|
+
- Start with the **least-coupled** capability (fewest shared tables, fewest synchronous callers)
|
|
224
|
+
- Build the new service alongside the monolith
|
|
225
|
+
- Route new traffic to the new service; keep the monolith handling old traffic
|
|
226
|
+
- Once the new service is stable, cut over the monolith's callers
|
|
227
|
+
|
|
228
|
+
Order of extraction (typical):
|
|
229
|
+
1. Leaf services (no downstream dependencies) — e.g., Notifications
|
|
230
|
+
2. Read-heavy services (can duplicate read models first)
|
|
231
|
+
3. Write-heavy services (require database decoupling first)
|
|
232
|
+
|
|
233
|
+
**Definition of Done:** First service deployed independently. Monolith no longer owns that capability.
|
|
234
|
+
|
|
235
|
+
### Step 4 — Phase 3: Database Decoupling (Medium-Risk)
|
|
236
|
+
|
|
237
|
+
**Goal:** Give each service its own private database.
|
|
238
|
+
**Risk:** Medium — requires data migration and API contracts between services.
|
|
239
|
+
|
|
240
|
+
Actions:
|
|
241
|
+
- Identify shared tables; assign ownership to one service
|
|
242
|
+
- Replace shared table reads with API calls or event subscriptions
|
|
243
|
+
- Use the **Database per Service** pattern: each service's schema is off-limits to other services
|
|
244
|
+
- For data that must stay consistent, plan eventual consistency via domain events
|
|
245
|
+
|
|
246
|
+
Patterns to apply:
|
|
247
|
+
- **Shared Database → separate schemas**: One service owns the table; others read via API
|
|
248
|
+
- **API Composition** for cross-service queries (replaces direct joins)
|
|
249
|
+
- **Domain events** to propagate state changes asynchronously
|
|
250
|
+
|
|
251
|
+
**Definition of Done:** No service reads from another service's database directly. All cross-service data flows through APIs or events.
|
|
252
|
+
|
|
253
|
+
### Step 5 — Phase 4: Async Communication (Medium-Risk)
|
|
254
|
+
|
|
255
|
+
**Goal:** Replace synchronous call chains with messaging; add resilience.
|
|
256
|
+
**Risk:** Medium — changes communication model across services.
|
|
257
|
+
|
|
258
|
+
Actions:
|
|
259
|
+
- Replace synchronous A → B → C chains with publish/subscribe messaging
|
|
260
|
+
- Add **Circuit Breakers** for remaining synchronous calls (fail fast, fallback)
|
|
261
|
+
- Make message handlers **idempotent** (handle duplicate messages safely)
|
|
262
|
+
- Use **Transactional Outbox** to ensure events are published atomically with database writes
|
|
263
|
+
|
|
264
|
+
**Definition of Done:** No synchronous chains longer than 2 hops. All event handlers are idempotent.
|
|
265
|
+
|
|
266
|
+
### Step 6 — Phase 5: Distributed Transactions (High-Risk, As Needed)
|
|
267
|
+
|
|
268
|
+
**Goal:** Handle multi-service operations that require consistency.
|
|
269
|
+
**Risk:** High — Saga implementation requires careful design of compensating transactions.
|
|
270
|
+
|
|
271
|
+
Apply when: a single user action must atomically update data owned by 2+ services (e.g., creating an order must both charge payment and reserve inventory).
|
|
272
|
+
|
|
273
|
+
Actions:
|
|
274
|
+
- Identify cross-service operations requiring consistency
|
|
275
|
+
- Design **Sagas** (choreography or orchestration) with compensating transactions for each step
|
|
276
|
+
- For orchestration: implement a Saga orchestrator state machine
|
|
277
|
+
- For choreography: design event sequences and compensation events
|
|
278
|
+
- Test failure scenarios explicitly
|
|
279
|
+
|
|
280
|
+
**Definition of Done:** Every multi-service operation has a defined happy path and compensation path. No distributed transactions use 2PC.
|
|
281
|
+
|
|
282
|
+
### Migration Output Format
|
|
283
|
+
|
|
284
|
+
```
|
|
285
|
+
## Service Migration Plan: [System Name]
|
|
286
|
+
|
|
287
|
+
### Current State Assessment
|
|
288
|
+
**Classification:** Monolith
|
|
289
|
+
**Shared databases:** Orders table shared by OrderModule and BillingModule
|
|
290
|
+
**Synchronous chains:** API Gateway → OrderService → InventoryService → NotificationService (3-hop chain)
|
|
291
|
+
|
|
292
|
+
### Capability Map
|
|
293
|
+
| Capability | Candidate Service | Shared Tables | Coupling Level |
|
|
294
|
+
|------------|------------------|---------------|----------------|
|
|
295
|
+
| Notifications | NotificationService | None | Low — extract first |
|
|
296
|
+
| Inventory | InventoryService | inventory, products | Medium |
|
|
297
|
+
| Orders | OrderService | orders, line_items, payments | High — extract last |
|
|
298
|
+
|
|
299
|
+
### Phase 1 — Boundaries (start now, no code change)
|
|
300
|
+
- [ ] Agree on service boundaries based on capability map above
|
|
301
|
+
- [ ] Identify NotificationService as first extraction target
|
|
302
|
+
|
|
303
|
+
### Phase 2 — Strangle the Monolith (next quarter)
|
|
304
|
+
- [ ] Build NotificationService alongside monolith
|
|
305
|
+
- [ ] Route notification calls to new service via API Gateway
|
|
306
|
+
- [ ] Decommission notification code from monolith
|
|
307
|
+
|
|
308
|
+
### Phase 3 — Database Decoupling (following quarter)
|
|
309
|
+
- [ ] Assign `notifications` table to NotificationService exclusively
|
|
310
|
+
- [ ] Replace OrderModule's direct DB read of customer email with API call to CustomerService
|
|
311
|
+
|
|
312
|
+
### Phase 4 — Async Communication (6 months)
|
|
313
|
+
- [ ] Replace OrderService → NotificationService sync call with OrderCreated domain event
|
|
314
|
+
- [ ] Add Circuit Breaker to InventoryService call from OrderService
|
|
315
|
+
|
|
316
|
+
### Phase 5 — Distributed Transactions (as needed)
|
|
317
|
+
- [ ] Design CreateOrderSaga: reserve inventory → charge payment → confirm order
|
|
318
|
+
- [ ] Define compensating transactions: release inventory, void charge
|
|
319
|
+
```
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
{
|
|
2
|
+
"evals": [
|
|
3
|
+
{
|
|
4
|
+
"id": "eval-01-distributed-transaction-no-saga",
|
|
5
|
+
"prompt": "Review this microservices code for an e-commerce checkout flow:\n\n```java\n// OrderService — orchestrates the entire checkout in one HTTP transaction\n@RestController\npublic class CheckoutController {\n private final OrderRepository orderRepository;\n private final InventoryServiceClient inventoryClient;\n private final PaymentServiceClient paymentClient;\n private final ShippingServiceClient shippingClient;\n private final NotificationServiceClient notificationClient;\n \n @PostMapping(\"/checkout\")\n @Transactional // <-- spans the entire method\n public OrderConfirmation checkout(@RequestBody CheckoutRequest request) {\n // Step 1: Reserve inventory\n inventoryClient.reserveItems(request.getItems());\n \n // Step 2: Create order in our DB\n Order order = orderRepository.save(new Order(request));\n \n // Step 3: Charge payment\n PaymentResult payment = paymentClient.charge(request.getPaymentInfo(), order.getTotal());\n \n // Step 4: Create shipping label\n ShippingLabel label = shippingClient.createLabel(order.getId(), request.getAddress());\n \n // Step 5: Send confirmation email\n notificationClient.sendConfirmation(request.getEmail(), order.getId());\n \n order.setShippingLabel(label.getTrackingNumber());\n order.setStatus(\"CONFIRMED\");\n return new OrderConfirmation(order.getId(), label.getTrackingNumber());\n }\n}\n```",
|
|
6
|
+
"expectations": [
|
|
7
|
+
"Identifies the core anti-pattern: a distributed transaction where @Transactional spans HTTP calls to 3 external services — this does not work as intended",
|
|
8
|
+
"Explains why @Transactional cannot span HTTP calls: the ACID transaction boundary is the local database only; remote service calls are not rolled back if a later step fails",
|
|
9
|
+
"Identifies the failure scenario: if shippingClient.createLabel() throws after paymentClient.charge() succeeds, the customer is charged but never gets a shipping label — and inventory is reserved but no rollback occurs",
|
|
10
|
+
"Names the pattern that is needed: the Saga pattern (orchestration-based) to handle this cross-service workflow",
|
|
11
|
+
"Explains compensating transactions: each step needs a corresponding undo — if shipping fails, a ReversePaymentSaga step should issue a refund and a ReleaseInventory step should free reserved stock",
|
|
12
|
+
"Notes the synchronous chain anti-pattern: OrderService → Inventory → Payment → Shipping → Notification is a fragile chain — if Notification is slow or down, the entire checkout hangs",
|
|
13
|
+
"Recommends making notifications asynchronous via a message queue (fire-and-forget)",
|
|
14
|
+
"Suggests using Saga orchestration with explicit saga states (PENDING_INVENTORY, PENDING_PAYMENT, PENDING_SHIPPING, CONFIRMED, FAILED) stored durably"
|
|
15
|
+
]
|
|
16
|
+
},
|
|
17
|
+
{
|
|
18
|
+
"id": "eval-02-shared-database",
|
|
19
|
+
"prompt": "Review this microservices architecture:\n\n```java\n// ProductService — manages product catalog\n@Service\npublic class ProductService {\n @Autowired\n private JdbcTemplate jdbc; // Connects to: jdbc:postgresql://shared-db:5432/platform\n \n public Product getProduct(Long id) {\n return jdbc.queryForObject(\n \"SELECT * FROM products WHERE id = ?\",\n new ProductRowMapper(), id);\n }\n \n public void updatePrice(Long productId, BigDecimal newPrice) {\n jdbc.update(\"UPDATE products SET price = ?, updated_at = NOW() WHERE id = ?\",\n newPrice, productId);\n }\n}\n\n// OrderService — in a SEPARATE deployable service/process\n@Service\npublic class OrderService {\n @Autowired\n private JdbcTemplate jdbc; // ALSO connects to: jdbc:postgresql://shared-db:5432/platform\n \n public Order createOrder(CreateOrderRequest req) {\n // Directly reads from products table owned by ProductService\n BigDecimal price = jdbc.queryForObject(\n \"SELECT price FROM products WHERE id = ?\",\n BigDecimal.class, req.getProductId());\n \n // Directly joins across service boundaries\n List<OrderLine> lines = jdbc.query(\n \"SELECT o.*, p.name, p.sku FROM orders o JOIN products p ON o.product_id = p.id WHERE o.customer_id = ?\",\n new OrderLineRowMapper(), req.getCustomerId());\n \n jdbc.update(\"INSERT INTO orders (product_id, customer_id, price) VALUES (?, ?, ?)\",\n req.getProductId(), req.getCustomerId(), price);\n return buildOrder(lines);\n }\n}\n```",
|
|
20
|
+
"expectations": [
|
|
21
|
+
"Identifies the Shared Database anti-pattern: both ProductService and OrderService connect to the same database and directly access each other's tables",
|
|
22
|
+
"Explains why this is problematic: ProductService cannot change its products table schema without coordinating with OrderService — the services are coupled at the data layer despite being separate deployables",
|
|
23
|
+
"Flags that OrderService reads from products — meaning ProductService's internal data model is now OrderService's public API; any rename or restructure breaks OrderService",
|
|
24
|
+
"Flags the cross-service JOIN in SQL: joins across service boundaries are a strong indicator of incorrect service decomposition or a shared database violation",
|
|
25
|
+
"Explains the correct pattern: each service owns its own database/schema; OrderService should get product information by calling ProductService's API (synchronous) or by maintaining a local read model via events (CQRS)",
|
|
26
|
+
"Recommends that OrderService should store the price at order-creation time (denormalized) rather than joining live to the products table — price at time of purchase is correct business behavior",
|
|
27
|
+
"Notes that this architecture makes independent deployment impossible: upgrading ProductService's database schema requires a coordinated deployment with OrderService"
|
|
28
|
+
]
|
|
29
|
+
},
|
|
30
|
+
{
|
|
31
|
+
"id": "eval-03-event-driven-saga",
|
|
32
|
+
"prompt": "Review this saga implementation for a ride-sharing trip booking:\n\n```java\n// TripSaga — orchestrator (Spring Saga / Axon-style pseudocode)\n@Saga\npublic class BookTripSaga {\n private String tripId;\n private String driverId;\n private String paymentAuthId;\n \n @StartSaga\n @SagaEventHandler(associationProperty = \"tripId\")\n public void on(TripRequestedEvent event) {\n this.tripId = event.getTripId();\n commandGateway.send(new FindAvailableDriverCommand(event.getTripId(), event.getLocation()));\n }\n \n @SagaEventHandler(associationProperty = \"tripId\")\n public void on(DriverAssignedEvent event) {\n this.driverId = event.getDriverId();\n commandGateway.send(new AuthorizePaymentCommand(event.getTripId(), event.getEstimatedFare()));\n }\n \n @SagaEventHandler(associationProperty = \"tripId\")\n public void on(PaymentAuthorizedEvent event) {\n this.paymentAuthId = event.getAuthorizationId();\n commandGateway.send(new ConfirmTripCommand(tripId, driverId));\n }\n \n @SagaEventHandler(associationProperty = \"tripId\")\n public void on(NoDriverAvailableEvent event) {\n commandGateway.send(new CancelTripCommand(tripId, \"No drivers available\"));\n SagaLifecycle.end();\n }\n \n @SagaEventHandler(associationProperty = \"tripId\")\n public void on(PaymentDeclinedEvent event) {\n // Compensate: release the reserved driver\n commandGateway.send(new ReleaseDriverCommand(driverId, tripId));\n commandGateway.send(new CancelTripCommand(tripId, \"Payment declined\"));\n SagaLifecycle.end();\n }\n \n @EndSaga\n @SagaEventHandler(associationProperty = \"tripId\")\n public void on(TripConfirmedEvent event) {\n // Saga complete — trip is live\n }\n}\n```",
|
|
33
|
+
"expectations": [
|
|
34
|
+
"Recognizes this as a well-designed orchestration-based saga and says so explicitly",
|
|
35
|
+
"Praises the compensating transactions: PaymentDeclinedEvent triggers both ReleaseDriverCommand and CancelTripCommand — each forward step has a corresponding undo",
|
|
36
|
+
"Praises that the saga stores intermediate state (driverId, paymentAuthId) to enable compensation — the saga has the information it needs to undo each step",
|
|
37
|
+
"Praises that the saga is event-driven: each step reacts to an event rather than making synchronous calls — this decouples the booking flow from service availability",
|
|
38
|
+
"Praises explicit saga lifecycle management: SagaLifecycle.end() on failure paths and @EndSaga on success ensure the saga doesn't leak memory",
|
|
39
|
+
"Praises modeling failure paths as first-class events (NoDriverAvailableEvent, PaymentDeclinedEvent) — not exceptions, but domain events the saga handles explicitly",
|
|
40
|
+
"Does NOT manufacture fake issues just to have something to say",
|
|
41
|
+
"May offer optional suggestions (timeout handling if DriverAssignedEvent never arrives, idempotent command handlers) but frames them as additional robustness, not defects in the current design"
|
|
42
|
+
]
|
|
43
|
+
}
|
|
44
|
+
]
|
|
45
|
+
}
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
# After
|
|
2
|
+
|
|
3
|
+
`InventoryService` owns its own read model, populated by consuming `OrderLineItemAdded` domain events published by the Order Service — no shared database access.
|
|
4
|
+
|
|
5
|
+
```java
|
|
6
|
+
// --- Order Service publishes domain events (its own codebase) ---
|
|
7
|
+
|
|
8
|
+
@DomainEvent
|
|
9
|
+
public record OrderLineItemAdded(
|
|
10
|
+
String orderId,
|
|
11
|
+
String productId,
|
|
12
|
+
int quantity,
|
|
13
|
+
Instant occurredAt
|
|
14
|
+
) {}
|
|
15
|
+
|
|
16
|
+
// Order Service publishes this event after saving the order aggregate
|
|
17
|
+
orderEventPublisher.publish(new OrderLineItemAdded(
|
|
18
|
+
order.getId(), line.getProductId(), line.getQuantity(), Instant.now()
|
|
19
|
+
));
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
// --- Inventory Service: owns its own read model, no shared DB ---
|
|
23
|
+
|
|
24
|
+
// Private denormalized table — owned exclusively by Inventory Service
|
|
25
|
+
@Entity @Table(name = "product_weekly_sales")
|
|
26
|
+
public class ProductWeeklySales {
|
|
27
|
+
@Id private String productId;
|
|
28
|
+
private int unitsSoldLast7Days;
|
|
29
|
+
private Instant lastUpdated;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
@Component
|
|
33
|
+
public class OrderLineItemAddedConsumer {
|
|
34
|
+
|
|
35
|
+
@KafkaListener(topics = "order.events", groupId = "inventory-service")
|
|
36
|
+
public void handle(OrderLineItemAdded event) {
|
|
37
|
+
// Idempotent: uses event's occurredAt to filter stale events
|
|
38
|
+
if (event.occurredAt().isBefore(Instant.now().minus(7, DAYS))) return;
|
|
39
|
+
|
|
40
|
+
weeklySalesRepository.incrementUnitsSold(event.productId(), event.quantity());
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
@RestController @RequestMapping("/inventory")
|
|
45
|
+
public class InventoryController {
|
|
46
|
+
|
|
47
|
+
@GetMapping("/reorder-candidates")
|
|
48
|
+
public List<ReorderItem> getReorderCandidates() {
|
|
49
|
+
// Queries Inventory Service's OWN database — no cross-service DB access
|
|
50
|
+
return weeklySalesRepository.findAll().stream()
|
|
51
|
+
.filter(sales -> {
|
|
52
|
+
int stockLevel = stockRepository.getLevel(sales.getProductId());
|
|
53
|
+
return stockLevel < sales.getUnitsSoldLast7Days() * 2;
|
|
54
|
+
})
|
|
55
|
+
.map(sales -> new ReorderItem(
|
|
56
|
+
sales.getProductId(),
|
|
57
|
+
sales.getUnitsSoldLast7Days() * 3 - stockRepository.getLevel(sales.getProductId())
|
|
58
|
+
))
|
|
59
|
+
.toList();
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
Key improvements:
|
|
65
|
+
- Each service owns its database — `InventoryService` never touches the `orders` schema (Database per Service pattern)
|
|
66
|
+
- `OrderLineItemAdded` domain event decouples the services; Order Service does not know Inventory Service exists
|
|
67
|
+
- `ProductWeeklySales` is a denormalized read model maintained by consuming events — a lightweight CQRS view
|
|
68
|
+
- The Kafka consumer is idempotent: events outside the 7-day window are skipped, making re-delivery safe
|
|
69
|
+
- Deleting the coupling to `sharedDataSource` eliminates the risk that a schema change in Order Service breaks Inventory Service at runtime
|