@champpaba/claude-agent-kit 3.0.2 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/.claude/CHANGELOG.md +707 -0
  2. package/.claude/CLAUDE.md +128 -613
  3. package/.claude/agents/_shared/pre-work-checklist.md +108 -7
  4. package/.claude/commands/cdev.md +36 -0
  5. package/.claude/commands/csetup.md +292 -1791
  6. package/.claude/commands/cview.md +364 -364
  7. package/.claude/contexts/design/accessibility.md +611 -611
  8. package/.claude/contexts/design/layout.md +400 -400
  9. package/.claude/contexts/design/responsive.md +551 -551
  10. package/.claude/contexts/design/shadows.md +522 -522
  11. package/.claude/contexts/design/typography.md +465 -465
  12. package/.claude/contexts/domain/README.md +164 -164
  13. package/.claude/contexts/patterns/agent-coordination.md +388 -388
  14. package/.claude/contexts/patterns/development-principles.md +513 -513
  15. package/.claude/contexts/patterns/error-handling.md +478 -478
  16. package/.claude/contexts/patterns/logging.md +424 -424
  17. package/.claude/contexts/patterns/tdd-classification.md +516 -516
  18. package/.claude/contexts/patterns/testing.md +413 -413
  19. package/.claude/lib/README.md +3 -3
  20. package/.claude/lib/detailed-guides/taskmaster-analysis.md +1 -1
  21. package/.claude/lib/task-analyzer.md +144 -0
  22. package/.claude/lib/tdd-workflow.md +2 -1
  23. package/.claude/lib/validation-gates.md +484 -484
  24. package/.claude/settings.local.json +42 -42
  25. package/.claude/templates/PROJECT_STATUS.template.yml +16 -41
  26. package/.claude/templates/context-template.md +45 -45
  27. package/.claude/templates/flags-template.json +42 -42
  28. package/.claude/templates/phases-sections/accessibility-test.md +17 -17
  29. package/.claude/templates/phases-sections/api-design.md +37 -37
  30. package/.claude/templates/phases-sections/backend-tests.md +16 -16
  31. package/.claude/templates/phases-sections/backend.md +37 -37
  32. package/.claude/templates/phases-sections/business-logic-validation.md +16 -16
  33. package/.claude/templates/phases-sections/component-tests.md +17 -17
  34. package/.claude/templates/phases-sections/contract-backend.md +16 -16
  35. package/.claude/templates/phases-sections/contract-frontend.md +16 -16
  36. package/.claude/templates/phases-sections/database.md +35 -35
  37. package/.claude/templates/phases-sections/e2e-tests.md +16 -16
  38. package/.claude/templates/phases-sections/fix-implementation.md +17 -17
  39. package/.claude/templates/phases-sections/frontend-integration.md +18 -18
  40. package/.claude/templates/phases-sections/manual-flow-test.md +15 -15
  41. package/.claude/templates/phases-sections/manual-ux-test.md +16 -16
  42. package/.claude/templates/phases-sections/refactor-implementation.md +17 -17
  43. package/.claude/templates/phases-sections/refactor.md +16 -16
  44. package/.claude/templates/phases-sections/regression-tests.md +15 -15
  45. package/.claude/templates/phases-sections/responsive-test.md +16 -16
  46. package/.claude/templates/phases-sections/script-implementation.md +43 -43
  47. package/.claude/templates/phases-sections/test-coverage.md +16 -16
  48. package/.claude/templates/phases-sections/user-approval.md +14 -14
  49. package/LICENSE +21 -21
  50. package/package.json +1 -1
  51. package/.claude/lib/tdd-classifier.md +0 -345
@@ -158,6 +158,71 @@ if (fileExists(projectStatusPath)) {
158
158
  }
159
159
  ```
160
160
 
161
+ ### Step 1.6: Memory Context Query (v2.2.0 - claude-mem Integration)
162
+
163
+ **WHY:** Query past work to leverage decisions, avoid repeating mistakes, and maintain consistency.
164
+
165
+ ```typescript
166
+ // Extract keywords from change-id and proposal title
167
+ const changeKeywords = changeId.split('-').join(' ')
168
+ const proposalPath = `openspec/changes/${changeId}/proposal.md`
169
+ const proposalContent = fileExists(proposalPath) ? Read(proposalPath) : ''
170
+ const proposalTitle = proposalContent.match(/^#\s+(.+)/m)?.[1] || changeId
171
+
172
+ output(`\n🧠 Querying claude-mem for related past work...`)
173
+
174
+ // Use mem-search skill to find related observations
175
+ // The skill auto-invokes when asking about past work
176
+ const queries = [
177
+ `decisions about ${changeKeywords}`,
178
+ `bugs related to ${changeKeywords}`,
179
+ `implementations of ${changeKeywords}`
180
+ ]
181
+
182
+ // Claude will auto-invoke mem-search for these queries
183
+ // Results are stored for inclusion in research-checklist.md
184
+
185
+ let pastLearnings = []
186
+
187
+ // Note: In practice, Main Claude asks these questions naturally
188
+ // and mem-search skill returns relevant observations
189
+
190
+ output(` Searched for: ${changeKeywords}`)
191
+ output(` (Results will be included in research-checklist.md if relevant)`)
192
+ output(``)
193
+
194
+ // Store for later use in research-checklist.md generation
195
+ // pastLearnings will be populated by mem-search results
196
+ ```
197
+
198
+ **Integration with research-checklist.md:**
199
+
200
+ When generating `research-checklist.md` (Step 2.6), include a "Past Learnings" section:
201
+
202
+ ```markdown
203
+ ## Past Learnings (from claude-mem)
204
+
205
+ > Related observations from previous sessions:
206
+
207
+ | ID | Type | Summary | Relevance |
208
+ |----|------|---------|-----------|
209
+ | #12345 | decision | Chose Drizzle over Prisma | HIGH |
210
+ | #12340 | bugfix | Fixed N+1 query in user list | MEDIUM |
211
+
212
+ ### Key Takeaways:
213
+ - Use Drizzle patterns established in #12345
214
+ - Watch for N+1 queries (see #12340 for solution)
215
+ ```
216
+
217
+ If no relevant observations found:
218
+ ```markdown
219
+ ## Past Learnings (from claude-mem)
220
+
221
+ No related past work found. Proceeding fresh.
222
+ ```
223
+
224
+ ---
225
+
161
226
  ### Step 2: Read OpenSpec Files
162
227
 
163
228
  Read in order:
@@ -253,1531 +318,310 @@ Continue anyway? (yes/no)
253
318
 
254
319
  ---
255
320
 
256
- ### Step 2.6: Adaptive Depth Research (v2.4.0)
257
-
258
- > **NEW:** Dynamic research layers based on change complexity - replaces hardcoded feature detection
259
- > **WHY:** Different changes need different research depth. A typo fix needs 0 layers, a healthcare portal needs 10+.
260
- > **Output:** `openspec/changes/{changeId}/research-checklist.md`
321
+ ### Step 2.6: Generate Pre-Work Context (v3.2.0 - Consolidated)
261
322
 
262
- **Key Principles:**
263
- - Layer 1 is ALWAYS "Best Practice" (ąø„ąø™ąø­ąø·ą¹ˆąø™ąø—ąø³ąøąø±ąø™ąø¢ąø±ąø‡ą¹„ąø‡? / How do others do it?)
264
- - Layer 2+ determined dynamically based on change context
265
- - No fixed minimum or maximum - truly adaptive (0 to 10+ layers)
266
- - Visual design (from /designsetup) is STATIC - this only handles Strategy (WHAT/WHERE)
267
- - Warns if industry practice conflicts with user's design choices
323
+ > **EXECUTE THESE STEPS** - Not pseudocode, actual instructions for Main Claude
324
+ > **Output:** `openspec/changes/{changeId}/pre-work-context.md`
325
+ > **Purpose:** Single file containing ALL context agents need before implementation
268
326
 
269
- ```typescript
270
- output(`\nšŸ”¬ Adaptive Depth Research Analysis...`)
271
-
272
- // 1. Gather change context from all spec files
273
- const proposalPath = `openspec/changes/${changeId}/proposal.md`
274
- const tasksPath = `openspec/changes/${changeId}/tasks.md`
275
- const designPath = `openspec/changes/${changeId}/design.md`
276
-
277
- const proposal = fileExists(proposalPath) ? Read(proposalPath) : ''
278
- const tasks = fileExists(tasksPath) ? Read(tasksPath) : ''
279
- const design = fileExists(designPath) ? Read(designPath) : ''
280
- const combined = (proposal + '\n' + tasks + '\n' + design).toLowerCase()
281
-
282
- // 2. Analyze change characteristics using semantic understanding
283
- const changeAnalysis = analyzeChangeCharacteristics(combined, proposal, tasks)
284
-
285
- output(`\nšŸ“Š Change Analysis:`)
286
- output(` Type: ${changeAnalysis.primaryType}`)
287
- output(` Complexity: ${changeAnalysis.complexity}/10`)
288
- output(` Risk Level: ${changeAnalysis.riskLevel}`)
289
- output(` Target Audience: ${changeAnalysis.audience || 'Internal'}`)
290
-
291
- // 3. Determine required research layers based on change characteristics
292
- const requiredLayers = determineResearchLayers(changeAnalysis)
293
-
294
- output(`\nšŸ“š Research Layers Required: ${requiredLayers.length}`)
295
-
296
- if (requiredLayers.length === 0) {
297
- output(` āœ… No research needed - trivial change`)
298
- output(` (Typo fix, debug log, simple badge, etc.)`)
299
- } else {
300
- requiredLayers.forEach((layer, idx) => {
301
- output(` L${idx + 1}: ${layer.name}`)
302
- output(` Focus: ${layer.focus}`)
303
- output(` Questions: ${layer.questions.slice(0, 2).join(', ')}...`)
304
- })
305
- }
327
+ **This step consolidates:**
328
+ - Adaptive Depth Research (domain knowledge)
329
+ - Library Best Practices (from Context7)
330
+ - Integration Warnings (cross-library concerns)
331
+ - Critical Checklists (security/compliance requirements)
306
332
 
307
- // 4. Execute research for each layer using Context7 + semantic analysis
308
- const researchResults = []
309
-
310
- for (const layer of requiredLayers) {
311
- output(`\nšŸ” Researching L${layer.order}: ${layer.name}...`)
333
+ ---
312
334
 
313
- const layerResult = await executeLayerResearch(layer, changeAnalysis)
314
- researchResults.push(layerResult)
335
+ #### Step 2.6.1: Analyze Change Characteristics
315
336
 
316
- output(` āœ… Found ${layerResult.findings.length} key findings`)
317
- if (layerResult.warnings.length > 0) {
318
- layerResult.warnings.forEach(w => output(` āš ļø ${w}`))
319
- }
320
- }
337
+ **Read and analyze these files:**
338
+ - `openspec/changes/{changeId}/proposal.md`
339
+ - `openspec/changes/{changeId}/tasks.md`
340
+ - `openspec/changes/{changeId}/design.md` (if exists)
321
341
 
322
- // 5. Check for conflicts with design system (if exists)
323
- const tokensPath = 'design-system/data.yaml'
324
- if (fileExists(tokensPath) && researchResults.length > 0) {
325
- const tokens = parseYaml(Read(tokensPath))
326
- const conflicts = checkDesignConflicts(tokens, researchResults, changeAnalysis)
327
-
328
- if (conflicts.length > 0) {
329
- output(`\nāš ļø Design vs Industry Fit Conflicts:`)
330
- conflicts.forEach(c => {
331
- output(` - ${c.aspect}: Your design uses "${c.current}", but ${c.industry}`)
332
- output(` Recommendation: ${c.recommendation}`)
333
- })
334
- output(`\n Note: User design choices take precedence. These are informational warnings.`)
335
- }
336
- }
342
+ **Determine:**
343
+ 1. **Primary Type:** marketing | dashboard | api | auth | database | general
344
+ 2. **Complexity (1-10):** Based on features, integrations, external APIs
345
+ 3. **Risk Level:** LOW | MEDIUM | HIGH
346
+ 4. **Domains:** healthcare, fintech, ecommerce, saas (if applicable)
347
+ 5. **Features:** payment, auth, multi-tenancy, realtime (if detected)
337
348
 
338
- // 6. Generate research-checklist.md
339
- const checklistPath = `openspec/changes/${changeId}/research-checklist.md`
340
- const checklistContent = generateResearchChecklist(changeAnalysis, requiredLayers, researchResults)
341
-
342
- Write(checklistPath, checklistContent)
343
- output(`\nāœ… Generated: ${checklistPath}`)
344
-
345
- // Store for use by agents
346
- const adaptiveResearch = {
347
- layerCount: requiredLayers.length,
348
- layers: requiredLayers.map(l => l.name),
349
- complexity: changeAnalysis.complexity,
350
- checklistPath: checklistPath
351
- }
349
+ **Output:**
350
+ ```
351
+ šŸ“Š Change Analysis:
352
+ Type: auth
353
+ Complexity: 6/10
354
+ Risk Level: HIGH
355
+ Domains: fintech
356
+ Features: payment, auth
352
357
  ```
353
358
 
354
- #### Helper Functions
355
-
356
- ```typescript
357
- // Analyze change characteristics using semantic understanding
358
- function analyzeChangeCharacteristics(combined, proposal, tasks) {
359
- const analysis = {
360
- primaryType: 'general',
361
- complexity: 1,
362
- riskLevel: 'LOW',
363
- audience: 'internal',
364
- domains: [],
365
- features: [],
366
- hasUI: false,
367
- hasAPI: false,
368
- hasDatabase: false,
369
- hasPayment: false,
370
- hasAuth: false,
371
- hasCompliance: false,
372
- hasSensitiveData: false,
373
- isExternalFacing: false,
374
- industryContext: null
375
- }
376
-
377
- // Detect primary type
378
- if (/marketing|landing|hero|cta|conversion|sales/i.test(combined)) {
379
- analysis.primaryType = 'marketing'
380
- analysis.isExternalFacing = true
381
- } else if (/dashboard|admin|management|analytics/i.test(combined)) {
382
- analysis.primaryType = 'dashboard'
383
- } else if (/api|endpoint|rest|graphql/i.test(combined)) {
384
- analysis.primaryType = 'api'
385
- } else if (/auth|login|register|password/i.test(combined)) {
386
- analysis.primaryType = 'auth'
387
- analysis.hasAuth = true
388
- } else if (/database|schema|migration|model/i.test(combined)) {
389
- analysis.primaryType = 'database'
390
- analysis.hasDatabase = true
391
- }
392
-
393
- // Detect features and domains
394
- if (/payment|stripe|billing|checkout|subscription/i.test(combined)) {
395
- analysis.hasPayment = true
396
- analysis.features.push('payment')
397
- analysis.riskLevel = 'HIGH'
398
- }
399
- if (/health|medical|patient|hipaa|phi/i.test(combined)) {
400
- analysis.hasCompliance = true
401
- analysis.hasSensitiveData = true
402
- analysis.domains.push('healthcare')
403
- analysis.industryContext = 'healthcare'
404
- analysis.riskLevel = 'HIGH'
405
- }
406
- if (/fintech|banking|finance|pci|financial/i.test(combined)) {
407
- analysis.hasCompliance = true
408
- analysis.domains.push('fintech')
409
- analysis.industryContext = 'fintech'
410
- analysis.riskLevel = 'HIGH'
411
- }
412
- if (/saas|multi-tenant|tenant/i.test(combined)) {
413
- analysis.domains.push('saas')
414
- analysis.features.push('multi-tenancy')
415
- }
416
- if (/ecommerce|e-commerce|cart|product|shop/i.test(combined)) {
417
- analysis.domains.push('ecommerce')
418
- analysis.isExternalFacing = true
419
- }
420
- if (/realtime|real-time|websocket|collaboration/i.test(combined)) {
421
- analysis.features.push('realtime')
422
- }
423
-
424
- // Detect UI/API/Database
425
- analysis.hasUI = /ui|page|component|form|button|modal/i.test(combined)
426
- analysis.hasAPI = /api|endpoint|route|controller/i.test(combined)
427
- analysis.hasDatabase = analysis.hasDatabase || /table|column|relation|index/i.test(combined)
428
-
429
- // Detect audience
430
- if (/b2c|consumer|user|customer/i.test(combined)) {
431
- analysis.audience = 'consumer'
432
- analysis.isExternalFacing = true
433
- } else if (/b2b|enterprise|business/i.test(combined)) {
434
- analysis.audience = 'business'
435
- analysis.isExternalFacing = true
436
- }
437
-
438
- // Calculate complexity (1-10)
439
- let complexity = 1
440
- if (analysis.features.length > 0) complexity += analysis.features.length
441
- if (analysis.domains.length > 0) complexity += analysis.domains.length
442
- if (analysis.hasCompliance) complexity += 2
443
- if (analysis.hasPayment) complexity += 2
444
- if (analysis.hasAuth) complexity += 1
445
- if (analysis.isExternalFacing) complexity += 1
446
- if (/integration|external api|third-party/i.test(combined)) complexity += 2
447
-
448
- analysis.complexity = Math.min(complexity, 10)
449
-
450
- // Adjust risk level
451
- if (analysis.complexity >= 7 || analysis.hasCompliance || analysis.hasPayment) {
452
- analysis.riskLevel = 'HIGH'
453
- } else if (analysis.complexity >= 4 || analysis.hasAuth) {
454
- analysis.riskLevel = 'MEDIUM'
455
- }
456
-
457
- return analysis
458
- }
459
-
460
- // Determine research layers dynamically based on change characteristics
461
- function determineResearchLayers(analysis) {
462
- const layers = []
463
- let order = 1
464
-
465
- // Check for trivial changes (0 layers)
466
- if (analysis.complexity <= 1 &&
467
- !analysis.hasUI && !analysis.hasAPI && !analysis.hasDatabase &&
468
- analysis.riskLevel === 'LOW') {
469
- return [] // No research needed
470
- }
471
-
472
- // L1: Best Practice (ALWAYS for non-trivial changes)
473
- layers.push({
474
- order: order++,
475
- name: 'Best Practice / Industry Standard',
476
- focus: `How do others implement ${analysis.primaryType}?`,
477
- questions: [
478
- `What is the industry standard for ${analysis.primaryType}?`,
479
- 'What are common patterns and anti-patterns?',
480
- 'What are the key success factors?',
481
- 'What are common failure modes?'
482
- ],
483
- searchTopics: [`${analysis.primaryType} best practices`, `${analysis.primaryType} patterns`]
484
- })
485
-
486
- // L2+: Dynamic layers based on context
487
-
488
- // Security layer (for auth, payment, sensitive data)
489
- if (analysis.hasAuth || analysis.hasPayment || analysis.hasSensitiveData) {
490
- layers.push({
491
- order: order++,
492
- name: 'Security Requirements',
493
- focus: 'What security measures are required?',
494
- questions: [
495
- 'What authentication/authorization is needed?',
496
- 'What data protection is required?',
497
- 'What are common security vulnerabilities?',
498
- 'What compliance requirements apply?'
499
- ],
500
- searchTopics: ['security best practices', `${analysis.primaryType} security`]
501
- })
502
- }
503
-
504
- // Compliance layer (for regulated industries)
505
- if (analysis.hasCompliance || analysis.industryContext) {
506
- layers.push({
507
- order: order++,
508
- name: `${analysis.industryContext || 'Industry'} Compliance`,
509
- focus: `What ${analysis.industryContext || 'industry'} regulations apply?`,
510
- questions: [
511
- 'What regulatory requirements must be met?',
512
- 'What audit trails are needed?',
513
- 'What data handling rules apply?',
514
- 'What documentation is required?'
515
- ],
516
- searchTopics: [`${analysis.industryContext} compliance`, `${analysis.industryContext} regulations`]
517
- })
518
- }
519
-
520
- // UX layer (for external-facing UI)
521
- if (analysis.isExternalFacing && analysis.hasUI) {
522
- layers.push({
523
- order: order++,
524
- name: 'User Experience Patterns',
525
- focus: 'What UX patterns work for this audience?',
526
- questions: [
527
- 'What user journey is expected?',
528
- 'What conversion patterns work?',
529
- 'What accessibility requirements apply?',
530
- 'What are user expectations?'
531
- ],
532
- searchTopics: [`${analysis.primaryType} UX`, `${analysis.audience} UX patterns`]
533
- })
534
- }
535
-
536
- // Psychology layer (for marketing/sales)
537
- if (analysis.primaryType === 'marketing' || /conversion|sales|cta/i.test(analysis.primaryType)) {
538
- layers.push({
539
- order: order++,
540
- name: 'Conversion Psychology',
541
- focus: 'What psychological triggers work?',
542
- questions: [
543
- 'What is the buyer awareness level?',
544
- 'What pain points to address?',
545
- 'What objections to overcome?',
546
- 'What social proof is needed?'
547
- ],
548
- searchTopics: ['conversion psychology', 'landing page psychology']
549
- })
550
- }
551
-
552
- // Content Strategy layer (for content-heavy pages)
553
- if (analysis.primaryType === 'marketing' || /content|blog|documentation/i.test(analysis.primaryType)) {
554
- layers.push({
555
- order: order++,
556
- name: 'Content Strategy',
557
- focus: 'What content structure works?',
558
- questions: [
559
- 'What content hierarchy is effective?',
560
- 'What tone and voice to use?',
561
- 'What call-to-actions work?',
562
- 'What content gaps exist?'
563
- ],
564
- searchTopics: ['content strategy', 'copywriting best practices']
565
- })
566
- }
567
-
568
- // Data Architecture layer (for database/data-intensive)
569
- if (analysis.hasDatabase || /data|analytics|reporting/i.test(analysis.primaryType)) {
570
- layers.push({
571
- order: order++,
572
- name: 'Data Architecture',
573
- focus: 'What data patterns are appropriate?',
574
- questions: [
575
- 'What normalization level is appropriate?',
576
- 'What indexing strategy is needed?',
577
- 'What scaling considerations apply?',
578
- 'What data integrity rules?'
579
- ],
580
- searchTopics: ['database design patterns', 'data architecture']
581
- })
582
- }
583
-
584
- // API Design layer (for API-focused changes)
585
- if (analysis.hasAPI || analysis.primaryType === 'api') {
586
- layers.push({
587
- order: order++,
588
- name: 'API Design',
589
- focus: 'What API patterns are appropriate?',
590
- questions: [
591
- 'What API style is appropriate (REST/GraphQL)?',
592
- 'What versioning strategy?',
593
- 'What error handling patterns?',
594
- 'What rate limiting/throttling?'
595
- ],
596
- searchTopics: ['API design best practices', 'REST API patterns']
597
- })
598
- }
599
-
600
- // Multi-tenancy layer (for SaaS)
601
- if (analysis.features.includes('multi-tenancy')) {
602
- layers.push({
603
- order: order++,
604
- name: 'Multi-tenancy Patterns',
605
- focus: 'What isolation and scaling patterns?',
606
- questions: [
607
- 'What data isolation model?',
608
- 'What authentication per tenant?',
609
- 'What resource limits?',
610
- 'What billing model integration?'
611
- ],
612
- searchTopics: ['multi-tenant architecture', 'SaaS patterns']
613
- })
614
- }
615
-
616
- // Real-time layer (for collaboration/live features)
617
- if (analysis.features.includes('realtime')) {
618
- layers.push({
619
- order: order++,
620
- name: 'Real-time Architecture',
621
- focus: 'What real-time patterns are needed?',
622
- questions: [
623
- 'WebSocket vs SSE vs polling?',
624
- 'What conflict resolution?',
625
- 'What offline support?',
626
- 'What scaling for connections?'
627
- ],
628
- searchTopics: ['real-time architecture', 'WebSocket patterns']
629
- })
630
- }
631
-
632
- // Performance layer (for high-traffic or data-intensive)
633
- if (analysis.isExternalFacing || analysis.complexity >= 6 ||
634
- /performance|speed|optimization|cache/i.test(analysis.primaryType)) {
635
- layers.push({
636
- order: order++,
637
- name: 'Performance Optimization',
638
- focus: 'What performance patterns are needed?',
639
- questions: [
640
- 'What caching strategy?',
641
- 'What lazy loading patterns?',
642
- 'What CDN/edge considerations?',
643
- 'What database optimization?'
644
- ],
645
- searchTopics: ['performance optimization', 'caching strategies']
646
- })
647
- }
648
-
649
- // Integration layer (for external APIs/services)
650
- if (/integration|external api|third-party|webhook/i.test(analysis.primaryType) ||
651
- analysis.features.some(f => /payment|email|sms|notification/i.test(f))) {
652
- layers.push({
653
- order: order++,
654
- name: 'Integration Patterns',
655
- focus: 'What integration patterns are robust?',
656
- questions: [
657
- 'What retry/circuit breaker patterns?',
658
- 'What error handling for external failures?',
659
- 'What monitoring/alerting?',
660
- 'What idempotency requirements?'
661
- ],
662
- searchTopics: ['integration patterns', 'API integration best practices']
663
- })
664
- }
665
-
666
- // Testing Strategy layer (for complex/high-risk)
667
- if (analysis.riskLevel === 'HIGH' || analysis.complexity >= 7) {
668
- layers.push({
669
- order: order++,
670
- name: 'Testing Strategy',
671
- focus: 'What testing coverage is needed?',
672
- questions: [
673
- 'What unit vs integration vs e2e balance?',
674
- 'What edge cases to cover?',
675
- 'What load/stress testing?',
676
- 'What security testing?'
677
- ],
678
- searchTopics: ['testing strategy', `${analysis.primaryType} testing`]
679
- })
680
- }
681
-
682
- return layers
683
- }
684
-
685
- // Execute research for a single layer using Claude's knowledge
686
- // WHY: Domain knowledge (UX, DB design, security patterns) comes from Claude's training
687
- // Stack knowledge (Prisma, React) comes from Context7 in Step 2.7
688
- function executeLayerResearch(layer, changeAnalysis) {
689
- const result = {
690
- layer: layer.name,
691
- findings: [],
692
- recommendations: [],
693
- warnings: [],
694
- requiredItems: [], // Critical checklist items that MUST be addressed
695
- source: 'claude-knowledge'
696
- }
697
-
698
- // Claude generates best practices based on:
699
- // - Layer context (what domain?)
700
- // - Change analysis (what's being built?)
701
- // - Questions to answer (what to research?)
702
- //
703
- // Claude's training includes:
704
- // - UX: Nielsen Norman, Baymard Institute, Laws of UX
705
- // - Database: Codd's normalization, indexing patterns
706
- // - Security: OWASP, auth patterns, encryption
707
- // - API: REST dissertation, versioning patterns
708
- // - Architecture: distributed systems, caching, scaling
709
-
710
- result.findings = generateDomainKnowledge(layer, changeAnalysis)
711
- result.recommendations = generateRecommendations(layer, changeAnalysis)
712
- result.warnings = checkForWarnings(layer, changeAnalysis)
713
-
714
- // Inject critical required items based on layer and context
715
- // WHY: These are non-negotiable security/compliance requirements
716
- result.requiredItems = injectCriticalRequiredItems(layer, changeAnalysis)
717
-
718
- return result
719
- }
720
-
721
- // ============================================================
722
- // CRITICAL FLOW REQUIREMENTS (v2.8.0)
723
- // ============================================================
724
- // These are non-negotiable items that MUST be in the checklist
725
- // WHY: Security/compliance failures have legal/financial consequences
726
-
727
- /**
728
- * Inject critical required items based on layer type and change context
729
- * Returns checklist items that agents MUST verify are implemented
730
- */
731
- function injectCriticalRequiredItems(layer, changeAnalysis) {
732
- const items = []
733
-
734
- // Security Requirements Layer
735
- if (layer.name === 'Security Requirements') {
736
- // Auth-related critical items
737
- if (changeAnalysis.hasAuth) {
738
- items.push(...CRITICAL_FLOWS.auth.security)
739
- }
740
- // Payment-related critical items
741
- if (changeAnalysis.hasPayment) {
742
- items.push(...CRITICAL_FLOWS.payment.security)
743
- }
744
- // Sensitive data handling
745
- if (changeAnalysis.hasSensitiveData) {
746
- items.push(...CRITICAL_FLOWS.sensitiveData.security)
747
- }
748
- }
749
-
750
- // Compliance Layer
751
- if (layer.name.includes('Compliance')) {
752
- if (changeAnalysis.industryContext === 'healthcare') {
753
- items.push(...CRITICAL_FLOWS.healthcare.compliance)
754
- }
755
- if (changeAnalysis.industryContext === 'fintech') {
756
- items.push(...CRITICAL_FLOWS.fintech.compliance)
757
- }
758
- }
759
-
760
- // Data Architecture Layer
761
- if (layer.name === 'Data Architecture') {
762
- if (changeAnalysis.hasSensitiveData) {
763
- items.push(...CRITICAL_FLOWS.sensitiveData.dataArchitecture)
764
- }
765
- }
766
-
767
- return items
768
- }
769
-
770
- /**
771
- * Critical Flow Definitions
772
- * Format: { category: { layer: [...items] } }
773
- * Each item has: id, check, why, severity
774
- */
775
- const CRITICAL_FLOWS = {
776
- // ============================================================
777
- // AUTH CRITICAL FLOWS
778
- // ============================================================
779
- auth: {
780
- security: [
781
- {
782
- id: 'auth-password-hash',
783
- check: '☐ Password hashing with bcrypt/argon2 (cost factor ≄ 10)',
784
- why: 'Plain text or weak hashing = immediate breach if DB leaked',
785
- severity: 'critical'
786
- },
787
- {
788
- id: 'auth-rate-limit',
789
- check: '☐ Rate limiting on login (max 5 attempts per 15 min)',
790
- why: 'Prevents brute force attacks',
791
- severity: 'critical'
792
- },
793
- {
794
- id: 'auth-session-timeout',
795
- check: '☐ Session timeout configured (≤ 24h, ≤ 15min for sensitive)',
796
- why: 'Abandoned sessions are attack vectors',
797
- severity: 'high'
798
- },
799
- {
800
- id: 'auth-csrf',
801
- check: '☐ CSRF protection on all state-changing endpoints',
802
- why: 'OWASP Top 10 vulnerability',
803
- severity: 'critical'
804
- },
805
- {
806
- id: 'auth-secure-cookies',
807
- check: '☐ Cookies: httpOnly, secure, sameSite=strict',
808
- why: 'Prevents XSS token theft and CSRF',
809
- severity: 'critical'
810
- },
811
- {
812
- id: 'auth-password-policy',
813
- check: '☐ Password policy enforced (min 8 chars, complexity optional)',
814
- why: 'Weak passwords are #1 breach cause',
815
- severity: 'high'
816
- },
817
- {
818
- id: 'auth-account-lockout',
819
- check: '☐ Account lockout after repeated failures (with unlock mechanism)',
820
- why: 'Prevents brute force, but needs recovery path',
821
- severity: 'medium'
822
- }
823
- ],
824
- flow: [
825
- {
826
- id: 'auth-flow-login',
827
- check: '☐ Login flow: input → validate → session → redirect',
828
- why: 'Standard secure login pattern',
829
- severity: 'high'
830
- },
831
- {
832
- id: 'auth-flow-logout',
833
- check: '☐ Logout: invalidate session server-side (not just cookie)',
834
- why: 'Client-side only logout leaves session valid',
835
- severity: 'high'
836
- },
837
- {
838
- id: 'auth-flow-forgot',
839
- check: '☐ Forgot password: email → time-limited token → reset',
840
- why: 'Token must expire (≤ 1 hour)',
841
- severity: 'high'
842
- }
843
- ]
844
- },
845
-
846
- // ============================================================
847
- // PAYMENT CRITICAL FLOWS
848
- // ============================================================
849
- payment: {
850
- security: [
851
- {
852
- id: 'payment-no-card-storage',
853
- check: '☐ NO raw card numbers stored (use Stripe/payment provider tokens)',
854
- why: 'PCI-DSS requirement, storing cards = massive liability',
855
- severity: 'critical'
856
- },
857
- {
858
- id: 'payment-https',
859
- check: '☐ HTTPS enforced on all payment pages',
860
- why: 'Payment data in transit must be encrypted',
861
- severity: 'critical'
862
- },
863
- {
864
- id: 'payment-webhook-verify',
865
- check: '☐ Webhook signature verification (never trust unverified webhooks)',
866
- why: 'Attackers can fake payment success webhooks',
867
- severity: 'critical'
868
- },
869
- {
870
- id: 'payment-idempotency',
871
- check: '☐ Idempotency keys for payment creation',
872
- why: 'Prevents double charges on retry',
873
- severity: 'high'
874
- },
875
- {
876
- id: 'payment-amount-verify',
877
- check: '☐ Server-side price verification (never trust client price)',
878
- why: 'Attackers modify client-side prices',
879
- severity: 'critical'
880
- }
881
- ],
882
- flow: [
883
- {
884
- id: 'payment-flow-checkout',
885
- check: '☐ Checkout flow: cart → address → payment → confirm → receipt',
886
- why: 'Standard e-commerce pattern users expect',
887
- severity: 'medium'
888
- },
889
- {
890
- id: 'payment-flow-error',
891
- check: '☐ Payment error handling with clear user message',
892
- why: 'Failed payments need recovery path',
893
- severity: 'high'
894
- },
895
- {
896
- id: 'payment-flow-refund',
897
- check: '☐ Refund flow documented (even if manual)',
898
- why: 'Legal requirement in most jurisdictions',
899
- severity: 'high'
900
- }
901
- ]
902
- },
903
-
904
- // ============================================================
905
- // SENSITIVE DATA CRITICAL FLOWS
906
- // ============================================================
907
- sensitiveData: {
908
- security: [
909
- {
910
- id: 'data-encryption-rest',
911
- check: '☐ Encryption at rest for PII/PHI (AES-256 or DB-level)',
912
- why: 'Breached DB without encryption = full exposure',
913
- severity: 'critical'
914
- },
915
- {
916
- id: 'data-encryption-transit',
917
- check: '☐ Encryption in transit (TLS 1.2+)',
918
- why: 'Data interception prevention',
919
- severity: 'critical'
920
- },
921
- {
922
- id: 'data-access-logging',
923
- check: '☐ Audit logging for sensitive data access',
924
- why: 'Required for breach investigation and compliance',
925
- severity: 'high'
926
- },
927
- {
928
- id: 'data-minimization',
929
- check: '☐ Data minimization (only collect what is needed)',
930
- why: 'GDPR principle, reduces breach impact',
931
- severity: 'medium'
932
- }
933
- ],
934
- dataArchitecture: [
935
- {
936
- id: 'data-arch-backup',
937
- check: '☐ Backup strategy with encryption',
938
- why: 'Backups are often unencrypted breach vector',
939
- severity: 'high'
940
- },
941
- {
942
- id: 'data-arch-retention',
943
- check: '☐ Data retention policy defined',
944
- why: 'Legal requirement (GDPR right to deletion)',
945
- severity: 'medium'
946
- }
947
- ]
948
- },
949
-
950
- // ============================================================
951
- // HEALTHCARE COMPLIANCE (HIPAA)
952
- // ============================================================
953
- healthcare: {
954
- compliance: [
955
- {
956
- id: 'hipaa-phi-encrypt',
957
- check: '☐ All PHI encrypted at rest and in transit',
958
- why: 'HIPAA Security Rule requirement',
959
- severity: 'critical'
960
- },
961
- {
962
- id: 'hipaa-access-control',
963
- check: '☐ Role-based access control for PHI',
964
- why: 'Minimum necessary standard',
965
- severity: 'critical'
966
- },
967
- {
968
- id: 'hipaa-audit-trail',
969
- check: '☐ Audit trail for all PHI access (who, what, when)',
970
- why: 'HIPAA requires 6-year audit log retention',
971
- severity: 'critical'
972
- },
973
- {
974
- id: 'hipaa-baa',
975
- check: '☐ BAA signed with all vendors handling PHI',
976
- why: 'Business Associate Agreement legally required',
977
- severity: 'critical'
978
- },
979
- {
980
- id: 'hipaa-breach-plan',
981
- check: '☐ Breach notification plan documented',
982
- why: '60-day notification requirement',
983
- severity: 'high'
984
- }
985
- ]
986
- },
987
-
988
- // ============================================================
989
- // FINTECH COMPLIANCE (PCI-DSS)
990
- // ============================================================
991
- fintech: {
992
- compliance: [
993
- {
994
- id: 'pci-no-pan',
995
- check: '☐ No PAN (card numbers) stored unless PCI certified',
996
- why: 'PCI-DSS Level 1 requirement',
997
- severity: 'critical'
998
- },
999
- {
1000
- id: 'pci-tokenization',
1001
- check: '☐ Tokenization for card data (Stripe, Braintree)',
1002
- why: 'Removes PCI scope from your systems',
1003
- severity: 'critical'
1004
- },
1005
- {
1006
- id: 'pci-network-segment',
1007
- check: '☐ Network segmentation for payment systems',
1008
- why: 'Limits breach blast radius',
1009
- severity: 'high'
1010
- },
1011
- {
1012
- id: 'fintech-kyc',
1013
- check: '☐ KYC verification flow for financial accounts',
1014
- why: 'AML/KYC regulations',
1015
- severity: 'high'
1016
- },
1017
- {
1018
- id: 'fintech-transaction-limits',
1019
- check: '☐ Transaction limits and velocity checks',
1020
- why: 'Fraud prevention, regulatory requirement',
1021
- severity: 'high'
1022
- },
1023
- {
1024
- id: 'fintech-audit',
1025
- check: '☐ Transaction audit trail (immutable)',
1026
- why: 'Regulatory reporting requirement',
1027
- severity: 'critical'
1028
- }
1029
- ]
1030
- }
1031
- }
1032
-
1033
- // Generate domain knowledge using Claude's reasoning
1034
- // This is where Claude applies its training to the specific change
1035
- function generateDomainKnowledge(layer, changeAnalysis) {
1036
- const findings = []
1037
-
1038
- // Based on layer type, Claude will reason about best practices
1039
- // The actual content comes from Claude's response when /csetup runs
1040
-
1041
- findings.push({
1042
- question: layer.questions[0],
1043
- // Claude fills this based on its knowledge when executing
1044
- analysis: `[Claude analyzes: ${layer.focus}]`,
1045
- bestPractices: [], // Claude lists industry standards
1046
- antiPatterns: [], // Claude lists what to avoid
1047
- tradeoffs: [] // Claude explains trade-offs
1048
- })
359
+ ---
1049
360
 
1050
- return findings
1051
- }
361
+ #### Step 2.6.2: Detect Libraries
1052
362
 
1053
- // Check for conflicts between design system and industry practices
1054
- function checkDesignConflicts(tokens, researchResults, changeAnalysis) {
1055
- const conflicts = []
1056
-
1057
- // Only check for marketing/external-facing changes
1058
- if (!changeAnalysis.isExternalFacing) return conflicts
1059
-
1060
- // Check color appropriateness for industry
1061
- if (tokens.colors && changeAnalysis.industryContext) {
1062
- const primaryColor = tokens.colors.primary
1063
-
1064
- if (changeAnalysis.industryContext === 'healthcare') {
1065
- // Healthcare typically uses blue/green (trust, calm)
1066
- if (/red|orange|yellow/i.test(primaryColor)) {
1067
- conflicts.push({
1068
- aspect: 'Primary Color',
1069
- current: primaryColor,
1070
- industry: 'healthcare typically uses blue/green for trust and calm',
1071
- recommendation: 'Consider if bright colors are appropriate for healthcare context'
1072
- })
1073
- }
1074
- }
363
+ **Read these files and identify library/framework names:**
364
+ - `package.json` (dependencies, devDependencies)
365
+ - `requirements.txt` or `pyproject.toml` (Python)
366
+ - `openspec/changes/{changeId}/proposal.md`
367
+ - `openspec/changes/{changeId}/design.md`
1075
368
 
1076
- if (changeAnalysis.industryContext === 'fintech') {
1077
- // Fintech typically uses blue/green (trust, money)
1078
- if (/pink|purple|orange/i.test(primaryColor)) {
1079
- conflicts.push({
1080
- aspect: 'Primary Color',
1081
- current: primaryColor,
1082
- industry: 'fintech typically uses blue/green for trust and stability',
1083
- recommendation: 'Consider if playful colors fit financial services context'
1084
- })
1085
- }
1086
- }
1087
- }
369
+ **Look for:** React, Next.js, Vue, Angular, FastAPI, Express, Django, Prisma, Drizzle, SQLAlchemy, Vitest, Jest, Playwright, Stripe, better-auth, etc.
1088
370
 
1089
- // Check animation appropriateness
1090
- if (tokens.animations && changeAnalysis.hasCompliance) {
1091
- if (tokens.animations.enableScrollAnimations) {
1092
- conflicts.push({
1093
- aspect: 'Scroll Animations',
1094
- current: 'enabled',
1095
- industry: 'compliance-heavy sites often minimize animations for accessibility',
1096
- recommendation: 'Ensure animations have reduced-motion alternatives'
1097
- })
1098
- }
1099
- }
371
+ **Output:**
372
+ ```
373
+ šŸ” Libraries Detected:
374
+ - Next.js (from package.json)
375
+ - Prisma (from design.md)
376
+ - better-auth (from tasks.md)
377
+ ```
1100
378
 
1101
- return conflicts
1102
- }
379
+ ---
1103
380
 
1104
- // Generate research checklist markdown
1105
- function generateResearchChecklist(changeAnalysis, layers, results) {
1106
- let content = `# Research Checklist: ${changeAnalysis.primaryType}\n\n`
1107
- content += `> Generated by Adaptive Depth Research (v2.4.0)\n`
1108
- content += `> Complexity: ${changeAnalysis.complexity}/10 | Risk: ${changeAnalysis.riskLevel}\n\n`
1109
-
1110
- if (layers.length === 0) {
1111
- content += `## āœ… No Research Required\n\n`
1112
- content += `This is a trivial change (complexity ${changeAnalysis.complexity}/10).\n`
1113
- content += `Proceed directly with implementation.\n`
1114
- return content
1115
- }
381
+ #### Step 2.6.3: Fetch Best Practices via Context7
1116
382
 
1117
- content += `## Summary\n\n`
1118
- content += `| Layer | Focus | Status |\n`
1119
- content += `|-------|-------|--------|\n`
1120
- layers.forEach((layer, idx) => {
1121
- content += `| L${idx + 1}: ${layer.name} | ${layer.focus} | ā³ Pending |\n`
1122
- })
383
+ **For EACH detected library, call these MCP tools:**
1123
384
 
1124
- content += `\n---\n\n`
385
+ ```
386
+ 1. mcp__context7__resolve-library-id
387
+ Input: { libraryName: "Next.js" }
388
+ Output: Get the context7CompatibleLibraryID (e.g., "/vercel/next.js")
389
+
390
+ 2. mcp__context7__get-library-docs
391
+ Input: {
392
+ context7CompatibleLibraryID: "/vercel/next.js",
393
+ topic: "best practices, patterns, common mistakes, [other-lib-names]",
394
+ mode: "code"
395
+ }
396
+ Output: Documentation with best practices
397
+ ```
1125
398
 
1126
- // Detail each layer
1127
- results.forEach((result, idx) => {
1128
- const layer = layers[idx]
1129
- content += `## L${idx + 1}: ${layer.name}\n\n`
1130
- content += `**Focus:** ${layer.focus}\n\n`
399
+ **Smart Topic Query:** Include OTHER detected library names in the topic for cross-library integration docs.
400
+ Example: When fetching Prisma docs, include "Next.js, React" in topic.
1131
401
 
1132
- content += `### Key Questions\n\n`
1133
- layer.questions.forEach(q => {
1134
- content += `- [ ] ${q}\n`
1135
- })
402
+ **Skip if:**
403
+ - Library not found in Context7 (note in warnings section instead)
404
+ - Library already has cached best practices from previous run
1136
405
 
1137
- if (result.findings.length > 0) {
1138
- content += `\n### Findings\n\n`
1139
- result.findings.forEach(f => {
1140
- content += `#### ${f.topic}\n\n`
1141
- if (Array.isArray(f.content)) {
1142
- f.content.forEach(point => content += `- ${point}\n`)
1143
- } else {
1144
- content += `${f.content}\n`
1145
- }
1146
- content += `\n*Source: ${f.source}*\n\n`
1147
- })
1148
- }
406
+ ---
1149
407
 
1150
- if (result.recommendations.length > 0) {
1151
- content += `### Recommendations\n\n`
1152
- result.recommendations.forEach(r => {
1153
- content += `- ${r}\n`
1154
- })
1155
- }
408
+ #### Step 2.6.4: Determine Research Layers
1156
409
 
1157
- if (result.warnings.length > 0) {
1158
- content += `\n### āš ļø Warnings\n\n`
1159
- result.warnings.forEach(w => {
1160
- content += `- ${w}\n`
1161
- })
1162
- }
410
+ **Based on change analysis, select relevant research layers:**
1163
411
 
1164
- content += `\n---\n\n`
1165
- })
412
+ | Trigger | Layer | Focus |
413
+ |---------|-------|-------|
414
+ | Always (complexity > 1) | Best Practices | How do others do this? |
415
+ | hasAuth OR hasPayment | Security | Authentication, data protection |
416
+ | healthcare OR fintech | Compliance | HIPAA, PCI-DSS, regulations |
417
+ | isExternalFacing + hasUI | UX Patterns | User journey, accessibility |
418
+ | marketing type | Conversion Psychology | Triggers, objections, social proof |
419
+ | hasDatabase | Data Architecture | Normalization, indexing, scaling |
420
+ | hasAPI | API Design | REST/GraphQL, versioning, errors |
421
+ | payment feature | Payment Flows | PCI compliance, webhooks, idempotency |
1166
422
 
1167
- // Add Content Guidelines section for marketing pages
1168
- if (changeAnalysis.primaryType === 'marketing' || changeAnalysis.isExternalFacing) {
1169
- content += `## šŸ“ Content Guidelines\n\n`
1170
- content += `> Claude generates these guidelines based on the change context.\n`
1171
- content += `> Use these as a starting point for content creation.\n\n`
1172
-
1173
- content += `### Hero Section\n\n`
1174
- content += `**Headline Strategy:**\n`
1175
- content += `- Lead with primary pain point or aspiration\n`
1176
- content += `- 8-12 words, emotional trigger\n`
1177
- content += `- [Claude: Generate specific headline angle based on proposal]\n\n`
1178
-
1179
- content += `**Subheadline:**\n`
1180
- content += `- Concrete benefit + emotional payoff\n`
1181
- content += `- 15-25 words\n`
1182
- content += `- [Claude: Generate based on value proposition]\n\n`
1183
-
1184
- content += `**CTA:**\n`
1185
- content += `- Action verb + outcome\n`
1186
- content += `- [Claude: Generate based on user journey stage]\n\n`
1187
-
1188
- content += `### Value Proposition\n\n`
1189
- content += `For each feature, translate to:\n`
1190
- content += `| Feature | Benefit | Emotional Payoff |\n`
1191
- content += `|---------|---------|------------------|\n`
1192
- content += `| [Technical] | [What user gets] | [How it makes them feel] |\n\n`
1193
-
1194
- content += `### Social Proof\n\n`
1195
- content += `- Use specific results (numbers, timeframes)\n`
1196
- content += `- Match testimonials to target audience\n`
1197
- content += `- Include trust signals (logos, certifications)\n\n`
1198
-
1199
- content += `---\n\n`
1200
- }
423
+ **For each selected layer, generate:**
424
+ - 3-5 key questions to consider
425
+ - 2-3 recommendations from domain knowledge
426
+ - Warnings if applicable
1201
427
 
1202
- content += `## Agent Instructions\n\n`
1203
- content += `When implementing this change, agents should:\n\n`
1204
- content += `1. Review each layer's findings before starting\n`
1205
- content += `2. Check off questions as they are addressed\n`
1206
- content += `3. Follow recommendations where applicable\n`
1207
- content += `4. Address warnings or document exceptions\n`
428
+ ---
1208
429
 
1209
- if (changeAnalysis.primaryType === 'marketing' || changeAnalysis.isExternalFacing) {
1210
- content += `5. Use Content Guidelines section for copy direction\n`
1211
- }
430
+ #### Step 2.6.5: Detect Integration Warnings
1212
431
 
1213
- return content
1214
- }
432
+ **Cross-reference library combinations for known issues:**
1215
433
 
1216
- // Helper: Generate recommendations based on layer and context
1217
- // Claude fills in actual recommendations when executing /csetup
1218
- function generateRecommendations(layer, changeAnalysis) {
1219
- // These are prompts for Claude to expand with actual knowledge
1220
- // When /csetup runs, Claude will provide specific recommendations
434
+ | Combination | Warning |
435
+ |-------------|---------|
436
+ | better-auth + custom JWT | better-auth handles JWT internally - don't duplicate |
437
+ | Prisma + serverless | Cold starts can timeout - use connection pooling |
438
+ | Next.js 14+ + pages router | App router is default - check which is intended |
439
+ | React 19 + old state libs | Check compatibility with new React features |
1221
440
 
1222
- return [
1223
- `[Claude: Based on ${layer.name} best practices for ${changeAnalysis.primaryType}]`,
1224
- `[Claude: Specific to this change's context and requirements]`
1225
- ]
1226
- }
441
+ **Check Context7 docs for integration warnings:**
442
+ - Look for "migration", "breaking changes", "compatibility" in docs
443
+ - Note version-specific warnings
1227
444
 
1228
- // Helper: Check for potential warnings based on layer and context
1229
- function checkForWarnings(layer, changeAnalysis) {
1230
- const warnings = []
445
+ ---
1231
446
 
1232
- // Risk-based warnings
1233
- if (changeAnalysis.riskLevel === 'HIGH') {
1234
- warnings.push(`HIGH risk change - review ${layer.name} carefully before deployment`)
1235
- }
447
+ #### Step 2.6.6: Generate Critical Checklist Items
1236
448
 
1237
- // Compliance warnings
1238
- if (layer.name.includes('Compliance') && changeAnalysis.hasCompliance) {
1239
- warnings.push(`Regulatory compliance required - ensure all ${changeAnalysis.industryContext} requirements are met`)
1240
- }
449
+ **Based on change characteristics, inject required items:**
1241
450
 
1242
- // Payment warnings
1243
- if (changeAnalysis.hasPayment) {
1244
- warnings.push('Payment integration - ensure PCI compliance requirements are met')
1245
- }
451
+ **If hasAuth:**
452
+ ```
453
+ ☐ Password hashing with bcrypt/argon2 (cost ≄ 10)
454
+ ☐ Rate limiting on login (max 5 per 15 min)
455
+ ☐ Session timeout configured
456
+ ☐ CSRF protection on state-changing endpoints
457
+ ☐ Cookies: httpOnly, secure, sameSite=strict
458
+ ```
1246
459
 
1247
- // Security warnings
1248
- if (layer.name.includes('Security') && changeAnalysis.hasSensitiveData) {
1249
- warnings.push('Sensitive data handling - ensure proper encryption and access controls')
1250
- }
460
+ **If hasPayment:**
461
+ ```
462
+ ☐ NO raw card storage (use Stripe tokens)
463
+ ☐ HTTPS on all payment pages
464
+ ☐ Webhook signature verification
465
+ ☐ Idempotency keys for payments
466
+ ☐ Server-side price verification
467
+ ```
1251
468
 
1252
- return warnings
1253
- }
469
+ **If healthcare/fintech:**
470
+ ```
471
+ ☐ Encryption at rest for PII/PHI
472
+ ☐ Audit logging for sensitive data access
473
+ ☐ Data minimization applied
474
+ ☐ Compliance documentation prepared
1254
475
  ```
1255
476
 
1256
477
  ---
1257
478
 
1258
- ### Step 2.7: Auto-Setup Best Practices (v2.3.0 - Dynamic Detection)
1259
-
1260
- > **Zero-Maintenance Design:** Automatically detects any library/framework from spec text and resolves via Context7.
1261
- > **WHY:** Hardcoded mappings require constant maintenance and miss new libraries. Dynamic resolution works with any language (Python, Rust, Go, etc.) without code changes.
1262
-
1263
- ```typescript
1264
- // ============================================================
1265
- // STEP 2.7: Dynamic Tech Stack Detection & Best Practices
1266
- // ============================================================
1267
-
1268
- output(`\nšŸ” Detecting Tech Stack (Dynamic Resolution)...`)
1269
-
1270
- // 1. Gather text from ALL relevant sources
1271
- const textSources = {
1272
- proposal: Read(`openspec/changes/${changeId}/proposal.md`) || '',
1273
- tasks: Read(`openspec/changes/${changeId}/tasks.md`) || '',
1274
- design: fileExists(`openspec/changes/${changeId}/design.md`)
1275
- ? Read(`openspec/changes/${changeId}/design.md`) : '',
1276
- packageJson: fileExists('package.json') ? Read('package.json') : '',
1277
- requirementsTxt: fileExists('requirements.txt') ? Read('requirements.txt') : '',
1278
- pyprojectToml: fileExists('pyproject.toml') ? Read('pyproject.toml') : '',
1279
- cargoToml: fileExists('Cargo.toml') ? Read('Cargo.toml') : '',
1280
- goMod: fileExists('go.mod') ? Read('go.mod') : '',
1281
- composerJson: fileExists('composer.json') ? Read('composer.json') : '',
1282
- gemfile: fileExists('Gemfile') ? Read('Gemfile') : ''
1283
- }
479
+ #### Step 2.6.7: Write pre-work-context.md
1284
480
 
1285
- const allText = Object.values(textSources).join('\n')
481
+ **Create `openspec/changes/{changeId}/pre-work-context.md`:**
1286
482
 
1287
- // 2. Extract library names using semantic analysis (TRUE zero-maintenance)
1288
- // WHY: Pattern-based extraction still requires maintenance.
1289
- // Instead, use Claude's understanding to identify libraries from context.
1290
- output(` 🧠 Analyzing text semantically...`)
483
+ ```markdown
484
+ # Pre-Work Context: {changeId}
1291
485
 
1292
- const potentialLibraries = await extractLibrariesSemantically(allText)
486
+ > **Generated:** {date}
487
+ > **Purpose:** All context agents need before implementation
488
+ > **Read by:** All agents in STEP 0
1293
489
 
1294
- output(` šŸ“ Found ${potentialLibraries.length} potential libraries to verify`)
1295
-
1296
- // 3. Resolve each potential library with Context7 (validate it's a real library)
1297
- const resolvedLibraries = []
1298
- const resolutionCache = new Map()
1299
-
1300
- for (const candidate of potentialLibraries) {
1301
- if (resolutionCache.has(candidate.toLowerCase())) continue
1302
-
1303
- try {
1304
- const result = await mcp__context7__resolve_library_id({
1305
- libraryName: candidate
1306
- })
1307
-
1308
- const bestMatch = parseContext7Response(result, candidate)
1309
-
1310
- if (bestMatch && bestMatch.score >= 60) {
1311
- resolvedLibraries.push({
1312
- name: candidate,
1313
- context7Id: bestMatch.id,
1314
- title: bestMatch.title,
1315
- snippets: bestMatch.snippets,
1316
- score: bestMatch.score
1317
- })
1318
- resolutionCache.set(candidate.toLowerCase(), bestMatch)
1319
- output(` āœ… ${candidate} → ${bestMatch.id} (${bestMatch.snippets} snippets)`)
1320
- }
1321
- } catch (error) {
1322
- resolutionCache.set(candidate.toLowerCase(), null)
1323
- }
1324
- }
1325
-
1326
- output(`\nšŸ“Š Verified Libraries: ${resolvedLibraries.length}`)
1327
- resolvedLibraries.forEach(lib => {
1328
- output(` - ${lib.title} (${lib.context7Id})`)
1329
- })
1330
-
1331
- // 4. If no libraries detected, ask user for guidance
1332
- if (resolvedLibraries.length === 0) {
1333
- output(`\nāš ļø No libraries auto-detected from spec files`)
1334
-
1335
- const answer = await askUserQuestion({
1336
- questions: [{
1337
- question: 'Enter the main libraries/frameworks for this project (comma-separated):',
1338
- header: 'Libraries',
1339
- options: [
1340
- { label: 'Skip', description: 'Continue without library-specific best practices' },
1341
- { label: 'React, Next.js', description: 'Common frontend stack' },
1342
- { label: 'FastAPI, SQLAlchemy, Pydantic', description: 'Python API stack' },
1343
- { label: 'Express, Prisma', description: 'Node.js backend stack' }
1344
- ],
1345
- multiSelect: false
1346
- }]
1347
- })
1348
-
1349
- if (!answer.includes('Skip')) {
1350
- // Parse user input and resolve each
1351
- const userLibraries = answer.split(',').map(s => s.trim()).filter(Boolean)
1352
- for (const lib of userLibraries) {
1353
- const result = await mcp__context7__resolve_library_id({ libraryName: lib })
1354
- const bestMatch = parseContext7Response(result, lib)
1355
- if (bestMatch) {
1356
- resolvedLibraries.push({
1357
- name: lib,
1358
- context7Id: bestMatch.id,
1359
- title: bestMatch.title,
1360
- snippets: bestMatch.snippets,
1361
- score: bestMatch.score
1362
- })
1363
- }
1364
- }
1365
- }
1366
- }
1367
-
1368
- // 5. Generate best-practices files for resolved libraries
1369
- // v2.5.0: Smart Topic Query - include other library names for cross-library integration docs
1370
- const bpDir = '.claude/contexts/domain/project/best-practices/'
1371
- const existingBp = fileExists(bpDir) ? listFiles(bpDir) : []
1372
-
1373
- // Filter to libraries that don't have best-practices yet
1374
- const newLibraries = resolvedLibraries.filter(lib => {
1375
- const safeName = lib.name.toLowerCase().replace(/[^a-z0-9]/g, '-')
1376
- return !existingBp.some(f => f.toLowerCase().includes(safeName))
1377
- })
1378
-
1379
- if (newLibraries.length > 0) {
1380
- output(`\nšŸ“š Generating Best Practices from Context7...`)
1381
- output(` šŸ’” Using Smart Topic Query for cross-library integration docs`)
490
+ ---
1382
491
 
1383
- // Create directory if needed
1384
- if (!fileExists(bpDir)) {
1385
- mkdir(bpDir)
1386
- }
492
+ ## 1. Change Analysis
1387
493
 
1388
- // Collect all integration risks for summary
1389
- const integrationRisks = []
1390
-
1391
- for (const lib of newLibraries) {
1392
- output(` šŸ“– Fetching ${lib.title} best practices...`)
1393
-
1394
- try {
1395
- // v2.5.0: Smart Topic Query - include other library names in topic
1396
- // WHY: This captures integration docs (e.g., "drizzle adapter" in auth.js docs)
1397
- const otherLibNames = resolvedLibraries
1398
- .filter(l => l.name !== lib.name)
1399
- .map(l => l.name.toLowerCase())
1400
- .slice(0, 5) // Limit to 5 to avoid topic overflow
1401
- .join(', ')
1402
-
1403
- const smartTopic = [
1404
- 'best practices',
1405
- 'patterns',
1406
- 'anti-patterns',
1407
- 'common mistakes',
1408
- 'adapter', // Key for ORM + Auth integrations
1409
- 'integration', // Key for multi-library setups
1410
- 'schema', // Key for database + auth column naming
1411
- 'configuration', // Key for setup requirements
1412
- otherLibNames // Include other detected libraries
1413
- ].filter(Boolean).join(', ')
1414
-
1415
- const docs = await mcp__context7__get_library_docs({
1416
- context7CompatibleLibraryID: lib.context7Id,
1417
- topic: smartTopic,
1418
- mode: 'code'
1419
- })
494
+ | Aspect | Value |
495
+ |--------|-------|
496
+ | Type | {primaryType} |
497
+ | Complexity | {complexity}/10 |
498
+ | Risk Level | {riskLevel} |
499
+ | Domains | {domains or "General"} |
500
+ | Features | {features or "None detected"} |
1420
501
 
1421
- // v2.5.0: Detect integration risks from docs content
1422
- const risks = detectIntegrationRisks(docs, lib.name, resolvedLibraries)
1423
- if (risks.length > 0) {
1424
- integrationRisks.push(...risks)
1425
- output(` āš ļø Found ${risks.length} integration pattern(s) to review`)
1426
- }
502
+ ---
1427
503
 
1428
- const bpContent = generateBestPracticesFile(lib.title, docs, lib.context7Id)
1429
- const safeName = lib.name.toLowerCase().replace(/[^a-z0-9]/g, '-')
1430
- Write(`${bpDir}${safeName}.md`, bpContent)
504
+ ## 2. Library Best Practices
1431
505
 
1432
- output(` āœ… ${safeName}.md generated`)
1433
- } catch (error) {
1434
- output(` āš ļø ${lib.title} - failed to fetch docs, skipping`)
1435
- }
1436
- }
506
+ ### {Library 1}
1437
507
 
1438
- // v2.5.0: Generate integration risk summary if any found
1439
- if (integrationRisks.length > 0) {
1440
- generateIntegrationRiskSummary(integrationRisks, bpDir, resolvedLibraries)
1441
- output(`\nāš ļø Integration Risk Summary generated (${integrationRisks.length} items)`)
1442
- output(` šŸ“„ ${bpDir}INTEGRATION_RISKS.md`)
1443
- }
508
+ **Source:** Context7 ({context7Id})
1444
509
 
1445
- // Generate/update index.md
1446
- generateBestPracticesIndex(resolvedLibraries, changeId)
1447
- output(` āœ… index.md updated`)
510
+ **DO:**
511
+ - {best practice 1}
512
+ - {best practice 2}
1448
513
 
1449
- output(`\nāœ… Best Practices Setup Complete!`)
1450
- output(` New files: ${newLibraries.length}`)
1451
- output(` Location: ${bpDir}`)
1452
- } else if (resolvedLibraries.length > 0) {
1453
- output(`\nāœ… Best Practices: Already configured for detected libraries`)
1454
- } else {
1455
- output(`\nāœ… Best Practices: Skipped (no libraries to configure)`)
1456
- }
514
+ **DON'T:**
515
+ - {anti-pattern 1}
516
+ - {anti-pattern 2}
1457
517
 
1458
- // 6. Store resolved stack for context.md generation
1459
- const stackForContext = {
1460
- detected: resolvedLibraries.map(l => l.name),
1461
- resolved: resolvedLibraries,
1462
- bestPracticesPath: bpDir,
1463
- files: existingBp.concat(newLibraries.map(l => `${l.name.toLowerCase().replace(/[^a-z0-9]/g, '-')}.md`))
1464
- }
518
+ **Code Example:**
519
+ ```{lang}
520
+ {example code from Context7}
1465
521
  ```
1466
522
 
523
+ ### {Library 2}
524
+ {repeat structure}
525
+
1467
526
  ---
1468
527
 
1469
- ### Helper: extractLibrariesSemantically() - PRIMARY
528
+ ## 3. Research Findings
1470
529
 
1471
- > **TRUE Zero-Maintenance:** Uses Claude's semantic understanding to extract library names from any text.
1472
- > **WHY:** Pattern-based regex always has edge cases. Claude understands context and meaning.
530
+ ### L1: {Layer Name}
1473
531
 
1474
- ```typescript
1475
- async function extractLibrariesSemantically(text: string): Promise<string[]> {
1476
- // Truncate text if too long (keep first 8000 chars for efficiency)
1477
- const truncatedText = text.length > 8000 ? text.slice(0, 8000) + '\n...[truncated]' : text
1478
-
1479
- // Use Claude to semantically extract library/framework names
1480
- // This is the MAIN Claude running /csetup, so we just analyze inline
1481
- const prompt = `
1482
- Analyze this text and extract ALL programming libraries, frameworks, and tools mentioned.
1483
-
1484
- TEXT:
1485
- ${truncatedText}
1486
-
1487
- INSTRUCTIONS:
1488
- 1. Look for any technology names (libraries, frameworks, ORMs, databases, tools)
1489
- 2. Include both explicit mentions ("using Mastra") and implicit ones ("PostgreSQL database")
1490
- 3. Return ONLY the library/framework names, one per line
1491
- 4. Do NOT include generic terms like "API", "REST", "database" (unless it's a specific product name)
1492
- 5. Do NOT include programming languages (Python, JavaScript, etc.) unless they're library names
1493
- 6. Include package names from any language (npm, pip, cargo, go, etc.)
1494
-
1495
- OUTPUT FORMAT (one name per line, nothing else):
1496
- `
532
+ **Key Questions:**
533
+ - {question 1}
534
+ - {question 2}
1497
535
 
1498
- // Since this runs within Main Claude context, we analyze inline
1499
- // The output parsing happens in the calling code
1500
- // For now, return a simple extraction that Main Claude will process
536
+ **Recommendations:**
537
+ - {recommendation based on domain knowledge}
1501
538
 
1502
- // Fallback: Use pattern-based extraction as baseline
1503
- // Main Claude will enhance this with semantic understanding
1504
- const patternBased = extractPotentialLibraryNames(text)
539
+ **Warnings:**
540
+ - {warning if applicable}
1505
541
 
1506
- // Main Claude: When you execute this, also semantically analyze the text
1507
- // and add any libraries you identify from context that patterns missed.
1508
- // Example: "The system uses Mastra for orchestration" → add "Mastra"
542
+ {repeat for each layer}
1509
543
 
1510
- return patternBased
1511
- }
1512
- ```
544
+ ---
1513
545
 
1514
- **Execution Note for Main Claude:**
546
+ ## 4. Integration Warnings
1515
547
 
1516
- When running `/csetup`, after calling `extractLibrariesSemantically()`:
1517
- 1. Review the spec text yourself (proposal.md, design.md, tasks.md)
1518
- 2. Identify any library/framework names the patterns might have missed
1519
- 3. Add them to the `potentialLibraries` array before Context7 validation
548
+ āš ļø **{Library A} + {Library B}:**
549
+ {warning description}
1520
550
 
1521
- This hybrid approach ensures:
1522
- - Pattern extraction catches obvious cases quickly
1523
- - Claude's semantic understanding catches edge cases
1524
- - Context7 validates everything (filters out false positives)
551
+ āš ļø **{Another combination}:**
552
+ {warning description}
1525
553
 
1526
554
  ---
1527
555
 
1528
- ### Helper: extractPotentialLibraryNames() - FALLBACK
556
+ ## 5. Critical Checklist
1529
557
 
1530
- > **Pattern-based fallback:** Provides baseline extraction. Main Claude enhances with semantic analysis.
558
+ > **MUST complete before marking phase done**
1531
559
 
1532
- ```typescript
1533
- function extractPotentialLibraryNames(text: string): string[] {
1534
- const candidates = new Set<string>()
1535
-
1536
- // === Pattern 1: Package file dependencies ===
1537
- // package.json: "react": "^18.0.0" → react
1538
- const npmDeps = text.match(/"([a-z@][a-z0-9._/-]*)"\s*:\s*"[\^~]?\d/gi) || []
1539
- npmDeps.forEach(m => {
1540
- const match = m.match(/"([^"]+)"/)
1541
- if (match) candidates.add(match[1].replace(/^@[^/]+\//, '')) // Strip scope
1542
- })
560
+ ### Security
561
+ {security items if applicable}
1543
562
 
1544
- // requirements.txt: sqlalchemy==2.0.0 → sqlalchemy
1545
- // Allow optional leading whitespace for indented requirements
1546
- const pyDeps = text.match(/^\s*([a-zA-Z][a-zA-Z0-9_-]*)\s*[=<>~!]/gm) || []
1547
- pyDeps.forEach(m => {
1548
- const match = m.match(/([a-zA-Z][a-zA-Z0-9_-]*)\s*[=<>~!]/)
1549
- if (match) candidates.add(match[1])
1550
- })
563
+ ### Compliance
564
+ {compliance items if applicable}
1551
565
 
1552
- // Cargo.toml: tokio = "1.0" → tokio
1553
- // Allow optional leading whitespace for indented dependencies
1554
- const rustDeps = text.match(/^\s*([a-z][a-z0-9_-]*)\s*=/gm) || []
1555
- rustDeps.forEach(m => {
1556
- const match = m.match(/([a-z][a-z0-9_-]*)\s*=/)
1557
- if (match) candidates.add(match[1])
1558
- })
566
+ ### Data Protection
567
+ {data items if applicable}
1559
568
 
1560
- // go.mod: require github.com/gin-gonic/gin → gin
1561
- const goDeps = text.match(/(?:require\s+)?github\.com\/[^/\s]+\/([a-z][a-z0-9_-]*)/gi) || []
1562
- goDeps.forEach(m => {
1563
- const match = m.match(/\/([a-z][a-z0-9_-]*)$/i)
1564
- if (match) candidates.add(match[1])
1565
- })
1566
-
1567
- // === Pattern 2: Import statements ===
1568
- // Python: from sqlalchemy import, import pydantic
1569
- const pyImports = text.match(/(?:from|import)\s+([a-zA-Z][a-zA-Z0-9_]*)/g) || []
1570
- pyImports.forEach(m => {
1571
- const match = m.match(/(?:from|import)\s+([a-zA-Z][a-zA-Z0-9_]*)/)
1572
- if (match) candidates.add(match[1])
1573
- })
1574
-
1575
- // JS/TS: import X from 'Y', require('Y')
1576
- const jsImports = text.match(/(?:from|require\s*\(\s*)['"]([a-zA-Z@][a-zA-Z0-9._/-]*)['"]/g) || []
1577
- jsImports.forEach(m => {
1578
- const match = m.match(/['"]([^'"]+)['"]/)
1579
- if (match) {
1580
- const pkg = match[1].replace(/^@[^/]+\//, '').split('/')[0]
1581
- candidates.add(pkg)
1582
- }
1583
- })
569
+ ---
1584
570
 
1585
- // Rust: use tokio::, extern crate serde
1586
- const rustImports = text.match(/(?:use|extern\s+crate)\s+([a-z][a-z0-9_]*)/g) || []
1587
- rustImports.forEach(m => {
1588
- const match = m.match(/(?:use|extern\s+crate)\s+([a-z][a-z0-9_]*)/)
1589
- if (match) candidates.add(match[1])
1590
- })
571
+ ## 6. Quick Reference
1591
572
 
1592
- // === Pattern 3: Tech mentions in prose ===
1593
- // "using FastAPI", "with Prisma", "powered by Mastra"
1594
- const techMentions = text.match(/(?:using|with|via|built with|powered by)\s+([A-Z][a-zA-Z0-9.]*)/gi) || []
1595
- techMentions.forEach(m => {
1596
- const match = m.match(/\s([A-Z][a-zA-Z0-9.]*)$/i)
1597
- if (match) candidates.add(match[1])
1598
- })
573
+ **Package Manager:** {from tech-stack.md or detected}
574
+ **Test Command:** {detected test script}
575
+ **Build Command:** {detected build script}
1599
576
 
1600
- // CamelCase words (FastAPI, NextAuth)
1601
- const camelCase = text.match(/\b([A-Z][a-z]+(?:[A-Z][a-z]+)+)\b/g) || []
1602
- camelCase.forEach(w => candidates.add(w))
1603
-
1604
- // Mixed case words (SQLAlchemy, PostgreSQL, GraphQL - uppercase prefix + CamelCase)
1605
- const mixedCase = text.match(/\b([A-Z]{2,}[a-z]+[A-Za-z]*)\b/g) || []
1606
- mixedCase.forEach(w => candidates.add(w))
1607
-
1608
- // === Pattern 3.5: PascalCase single words after tech keywords ===
1609
- // "Framework: Mastra", "ORM: Prisma", "Database: PostgreSQL"
1610
- // WHY: Many library names are single PascalCase words (Mastra, Prisma, Django, Flask)
1611
- const techKeywordPatterns = [
1612
- /(?:framework|library|orm|database|db|backend|frontend|ui|css|styling)[:\s]+([A-Z][a-z]+\w*)/gi,
1613
- /(?:built\s+with|powered\s+by|using|via|with)\s+([A-Z][a-z]+\w*)/gi,
1614
- /\*\*(?:framework|library|orm|database|backend|frontend)\*\*[:\s]+([A-Z][a-z]+\w*)/gi
1615
- ]
1616
- techKeywordPatterns.forEach(pattern => {
1617
- const matches = text.matchAll(pattern)
1618
- for (const match of matches) {
1619
- if (match[1]) candidates.add(match[1])
1620
- }
1621
- })
577
+ ---
1622
578
 
1623
- // === Pattern 3.6: Standalone PascalCase in markdown lists ===
1624
- // "- Mastra for AI agents", "- PostgreSQL database", "* React frontend"
1625
- const mdListItems = text.match(/^[\s]*[-*]\s+([A-Z][a-z]+\w*)(?:\s|$)/gm) || []
1626
- mdListItems.forEach(m => {
1627
- const match = m.match(/[-*]\s+([A-Z][a-z]+\w*)/)
1628
- if (match) candidates.add(match[1])
1629
- })
579
+ **Agents: Read this file in STEP 0 before implementation.**
580
+ ```
1630
581
 
1631
- // === Pattern 3.7: Words after "for" in tech context ===
1632
- // "Mastra for AI orchestration", "Drizzle for database"
1633
- const forPattern = text.match(/([A-Z][a-z]+\w*)\s+for\s+(?:ai|database|backend|frontend|api|web|mobile|server|client)/gi) || []
1634
- forPattern.forEach(m => {
1635
- const match = m.match(/^([A-Z][a-z]+\w*)/)
1636
- if (match) candidates.add(match[1])
1637
- })
582
+ ---
1638
583
 
1639
- // Known framework patterns: Next.js, Vue.js, Express.js
1640
- const dotJs = text.match(/\b([A-Z][a-z]+)\.js\b/gi) || []
1641
- dotJs.forEach(m => {
1642
- const match = m.match(/([A-Z][a-z]+)/i)
1643
- if (match) candidates.add(match[1])
1644
- })
584
+ #### Step 2.6.8: Output Summary
1645
585
 
1646
- // === Pattern 4: Explicit tech stack sections ===
1647
- // "Tech Stack:", "Technologies:", "Built with:"
1648
- const techSection = text.match(/(?:tech\s*stack|technologies|built\s*with|dependencies)[:\s]+([^\n]+)/gi) || []
1649
- techSection.forEach(section => {
1650
- const items = section.split(/[,\s]+/)
1651
- items.forEach(item => {
1652
- const cleaned = item.replace(/[^a-zA-Z0-9.-]/g, '')
1653
- if (cleaned.length > 2) candidates.add(cleaned)
1654
- })
1655
- })
586
+ ```
587
+ āœ… Pre-Work Context Generated!
1656
588
 
1657
- // === Pattern 4.5: Markdown bold/code tech mentions ===
1658
- // "**Mastra**", "`prisma`", "**Framework:** Mastra"
1659
- const boldWords = text.match(/\*\*([A-Z][a-z]+\w*)\*\*/g) || []
1660
- boldWords.forEach(m => {
1661
- const match = m.match(/\*\*([A-Z][a-z]+\w*)\*\*/)
1662
- if (match) candidates.add(match[1])
1663
- })
589
+ šŸ“„ File: openspec/changes/{changeId}/pre-work-context.md
1664
590
 
1665
- const codeWords = text.match(/`([a-zA-Z][a-zA-Z0-9_-]+)`/g) || []
1666
- codeWords.forEach(m => {
1667
- const match = m.match(/`([a-zA-Z][a-zA-Z0-9_-]+)`/)
1668
- if (match && match[1].length > 2) candidates.add(match[1])
1669
- })
591
+ Contents:
592
+ - Change Analysis: {type}, {complexity}/10, {risk}
593
+ - Libraries: {count} ({names})
594
+ - Research Layers: {count}
595
+ - Integration Warnings: {count}
596
+ - Critical Checklist Items: {count}
1670
597
 
1671
- // === Filter out noise ===
1672
- // Use lowercase for case-insensitive comparison
1673
- const stopWords = new Set([
1674
- // Common English words (all lowercase for comparison)
1675
- 'the', 'this', 'that', 'with', 'from', 'using', 'for', 'and', 'but', 'not',
1676
- 'all', 'any', 'can', 'could', 'should', 'would', 'will', 'may', 'might',
1677
- 'each', 'every', 'some', 'many', 'most', 'other', 'such', 'only', 'just',
1678
- 'also', 'well', 'back', 'even', 'still', 'already', 'always', 'never',
1679
- // Common programming terms that aren't libraries
1680
- 'api', 'rest', 'http', 'https', 'json', 'xml', 'html', 'css', 'sql',
1681
- 'get', 'post', 'put', 'delete', 'patch', 'url', 'uri', 'uuid', 'id',
1682
- 'true', 'false', 'none', 'null', 'undefined', 'error', 'exception',
1683
- 'class', 'function', 'method', 'object', 'array', 'string', 'number',
1684
- 'boolean', 'int', 'float', 'double', 'char', 'byte', 'long', 'short',
1685
- 'public', 'private', 'protected', 'static', 'final', 'const', 'let', 'var',
1686
- 'import', 'export', 'module', 'package', 'interface', 'type', 'enum',
1687
- 'test', 'tests', 'spec', 'specs', 'mock', 'stub', 'fake', 'spy',
1688
- 'config', 'configuration', 'settings', 'options', 'params', 'args',
1689
- 'user', 'users', 'admin', 'auth', 'login', 'logout', 'session', 'token',
1690
- 'data', 'database', 'table', 'column', 'row', 'index', 'key', 'value',
1691
- 'file', 'files', 'path', 'dir', 'directory', 'folder', 'name', 'size',
1692
- 'create', 'read', 'update', 'delete', 'list', 'get', 'set', 'add', 'remove',
1693
- 'start', 'stop', 'run', 'build', 'deploy', 'install', 'setup', 'init',
1694
- // Version/date patterns
1695
- 'version', 'release', 'beta', 'alpha', 'stable', 'latest', 'current'
1696
- ])
1697
-
1698
- return [...candidates]
1699
- .filter(w => w.length > 2 && w.length < 30)
1700
- .filter(w => !stopWords.has(w.toLowerCase())) // Case-insensitive comparison
1701
- .filter(w => !/^\d+$/.test(w)) // Not pure numbers
1702
- .filter(w => !/^v?\d+\.\d+/.test(w)) // Not version numbers
1703
- .slice(0, 50) // Limit to avoid too many API calls
1704
- }
598
+ šŸ“Œ Agents will read this in STEP 0
1705
599
  ```
1706
600
 
1707
601
  ---
1708
602
 
1709
- ### Helper: parseContext7Response()
603
+ #### Step 2.6.9: Skip Conditions
1710
604
 
1711
- > **WHY:** Context7 returns multiple matches. Select the best one based on relevance score and snippet count.
605
+ **Skip this step entirely if:**
606
+ - Change is trivial (complexity = 1, risk = LOW, no special features)
607
+ - Output: `āœ… Pre-Work Context: Skipped (trivial change)`
1712
608
 
1713
- ```typescript
1714
- function parseContext7Response(response: string, searchTerm: string): {
1715
- id: string
1716
- title: string
1717
- snippets: number
1718
- score: number
1719
- } | null {
1720
- // Parse the Context7 response text to extract library info
1721
- // Response format includes lines like:
1722
- // - Title: SQLAlchemy
1723
- // - Context7-compatible library ID: /sqlalchemy/sqlalchemy
1724
- // - Code Snippets: 2830
1725
- // - Benchmark Score: 84.4
1726
-
1727
- const libraries = []
1728
- const blocks = response.split('----------').filter(b => b.trim())
1729
-
1730
- for (const block of blocks) {
1731
- const titleMatch = block.match(/Title:\s*(.+)/i)
1732
- const idMatch = block.match(/Context7-compatible library ID:\s*(\S+)/i)
1733
- const snippetsMatch = block.match(/Code Snippets:\s*(\d+)/i)
1734
- const scoreMatch = block.match(/Benchmark Score:\s*([\d.]+)/i)
1735
-
1736
- if (titleMatch && idMatch) {
1737
- libraries.push({
1738
- title: titleMatch[1].trim(),
1739
- id: idMatch[1].trim(),
1740
- snippets: snippetsMatch ? parseInt(snippetsMatch[1]) : 0,
1741
- score: scoreMatch ? parseFloat(scoreMatch[1]) : 50
1742
- })
1743
- }
1744
- }
609
+ **Skip library lookup if:**
610
+ - No new libraries detected
611
+ - All libraries already have Context7 cache
1745
612
 
1746
- if (libraries.length === 0) return null
1747
-
1748
- // Prefer exact title match, then highest score with good snippet count
1749
- const searchLower = searchTerm.toLowerCase()
1750
-
1751
- // First: exact match
1752
- const exactMatch = libraries.find(l =>
1753
- l.title.toLowerCase() === searchLower ||
1754
- l.id.toLowerCase().includes(searchLower)
1755
- )
1756
- if (exactMatch) return exactMatch
1757
-
1758
- // Second: partial match with good score
1759
- const partialMatches = libraries.filter(l =>
1760
- l.title.toLowerCase().includes(searchLower) ||
1761
- searchLower.includes(l.title.toLowerCase())
1762
- )
1763
- if (partialMatches.length > 0) {
1764
- return partialMatches.sort((a, b) => b.score - a.score)[0]
1765
- }
613
+ ---
1766
614
 
1767
- // Third: best overall score (only if snippets > 100 for quality)
1768
- const qualityLibs = libraries.filter(l => l.snippets > 100)
1769
- if (qualityLibs.length > 0) {
1770
- return qualityLibs.sort((a, b) => b.score - a.score)[0]
1771
- }
615
+ **āš ļø IMPORTANT:** This step requires YOU (Main Claude) to:
616
+ 1. Actually read the spec files
617
+ 2. Actually call Context7 MCP tools
618
+ 3. Actually write the pre-work-context.md file
1772
619
 
1773
- // Fallback: first result
1774
- return libraries[0]
1775
- }
1776
- ```
620
+ Do NOT treat this as pseudocode. EXECUTE these instructions.
1777
621
 
1778
622
  ---
1779
623
 
1780
- ### Step 2.8: Library Capability Validation (v2.2.0)
624
+ ### Step 2.7: Library Capability Validation (v2.2.0)
1781
625
 
1782
626
  > **NEW:** Verify chosen libraries support ALL spec requirements before proceeding
1783
627
  > **WHY:** Prevents spec drift - discovering during implementation that library doesn't support requirements
@@ -2014,351 +858,6 @@ const capabilityAnalysis = {
2014
858
  }
2015
859
  ```
2016
860
 
2017
- **Helper: generateBestPracticesFile()**
2018
-
2019
- > **Updated v2.3.0:** Now includes Context7 library ID for reference and refresh capability.
2020
-
2021
- ```typescript
2022
- function generateBestPracticesFile(
2023
- tech: string,
2024
- context7Docs: string,
2025
- context7Id: string
2026
- ): string {
2027
- return `# ${tech} Best Practices
2028
-
2029
- > **Source:** Context7 MCP
2030
- > **Library ID:** \`${context7Id}\`
2031
- > **Generated:** ${new Date().toISOString().split('T')[0]}
2032
- > **Refresh:** Query Context7 with the Library ID above to update this file
2033
-
2034
- ---
2035
-
2036
- ## Best Practices
2037
-
2038
- ${extractBestPractices(context7Docs)}
2039
-
2040
- ---
2041
-
2042
- ## Anti-Patterns to Avoid
2043
-
2044
- ${extractAntiPatterns(context7Docs)}
2045
-
2046
- ---
2047
-
2048
- ## Code Examples
2049
-
2050
- ${extractCodeExamples(context7Docs)}
2051
-
2052
- ---
2053
-
2054
- ## Quick Checklist
2055
-
2056
- Before committing ${tech} code:
2057
- ${extractChecklist(context7Docs)}
2058
-
2059
- ---
2060
-
2061
- **Agents read this file in STEP 0 before implementation.**
2062
- `
2063
- }
2064
-
2065
- // Helper: Extract best practices from Context7 docs
2066
- function extractBestPractices(docs: string): string {
2067
- // Look for sections about best practices, recommendations, patterns
2068
- const patterns = [
2069
- /best\s*practices?[:\s]+([^#]+?)(?=##|$)/gi,
2070
- /recommend(?:ed|ations)?[:\s]+([^#]+?)(?=##|$)/gi,
2071
- /(?:do|should)[:\s]+([^#]+?)(?=##|$)/gi
2072
- ]
2073
-
2074
- let extracted = ''
2075
- for (const pattern of patterns) {
2076
- const matches = docs.match(pattern)
2077
- if (matches) {
2078
- extracted += matches.join('\n\n')
2079
- }
2080
- }
2081
-
2082
- return extracted || docs.slice(0, 2000) // Fallback to first 2000 chars
2083
- }
2084
-
2085
- // Helper: Extract anti-patterns from Context7 docs
2086
- function extractAntiPatterns(docs: string): string {
2087
- const patterns = [
2088
- /anti-?patterns?[:\s]+([^#]+?)(?=##|$)/gi,
2089
- /avoid[:\s]+([^#]+?)(?=##|$)/gi,
2090
- /(?:don'?t|should\s*not)[:\s]+([^#]+?)(?=##|$)/gi,
2091
- /common\s*mistakes?[:\s]+([^#]+?)(?=##|$)/gi
2092
- ]
2093
-
2094
- let extracted = ''
2095
- for (const pattern of patterns) {
2096
- const matches = docs.match(pattern)
2097
- if (matches) {
2098
- extracted += matches.join('\n\n')
2099
- }
2100
- }
2101
-
2102
- return extracted || 'Review Context7 documentation for anti-patterns specific to your use case.'
2103
- }
2104
-
2105
- // Helper: Extract code examples from Context7 docs
2106
- function extractCodeExamples(docs: string): string {
2107
- // Extract code blocks
2108
- const codeBlocks = docs.match(/\`\`\`[\s\S]*?\`\`\`/g) || []
2109
- return codeBlocks.slice(0, 5).join('\n\n') || 'See Context7 documentation for code examples.'
2110
- }
2111
-
2112
- // Helper: Generate checklist from docs
2113
- function extractChecklist(docs: string): string {
2114
- // Look for checklist items or generate from best practices
2115
- const checklistPatterns = [
2116
- /- \[[ x]\][^\n]+/gi,
2117
- /\d+\.\s+[^\n]+/gi
2118
- ]
2119
-
2120
- let items = []
2121
- for (const pattern of checklistPatterns) {
2122
- const matches = docs.match(pattern)
2123
- if (matches) {
2124
- items = items.concat(matches.slice(0, 10))
2125
- }
2126
- }
2127
-
2128
- if (items.length > 0) {
2129
- return items.map(item => `- [ ] ${item.replace(/^[\d.-\[\]x\s]+/i, '')}`).join('\n')
2130
- }
2131
-
2132
- // Fallback: generic checklist
2133
- return `- [ ] Follow official documentation patterns
2134
- - [ ] Handle errors appropriately
2135
- - [ ] Add proper typing/validation
2136
- - [ ] Write tests for new code
2137
- - [ ] Review for security concerns`
2138
- }
2139
- ```
2140
-
2141
- ---
2142
-
2143
- ### Helper: detectIntegrationRisks() - v2.5.0
2144
-
2145
- > **Smart Risk Detection:** Scans Context7 docs for integration patterns that require attention.
2146
- > **WHY:** Proactively catch integration requirements BEFORE implementation, not at runtime.
2147
-
2148
- ```typescript
2149
- function detectIntegrationRisks(
2150
- docs: string,
2151
- currentLib: string,
2152
- allLibs: Array<{ name: string; title: string }>
2153
- ): Array<{ library: string; risk: string; pattern: string; recommendation: string }> {
2154
- const risks = []
2155
- const docsLower = docs.toLowerCase()
2156
-
2157
- // Integration risk patterns to detect
2158
- const riskPatterns = [
2159
- // Adapter patterns (ORM + Auth)
2160
- {
2161
- keywords: ['adapter', 'drizzleadapter', 'prismaadapter'],
2162
- risk: 'Database adapter configuration required',
2163
- pattern: 'adapter',
2164
- recommendation: 'Verify adapter schema matches expected column names'
2165
- },
2166
- // Column naming patterns
2167
- {
2168
- keywords: ['column', 'columnname', 'snake_case', 'camelcase', 'mapping'],
2169
- risk: 'Column naming convention mismatch possible',
2170
- pattern: 'schema',
2171
- recommendation: 'Check column naming between ORM schema and library expectations'
2172
- },
2173
- // Schema patterns
2174
- {
2175
- keywords: ['userstable', 'accountstable', 'sessionstable', 'schema'],
2176
- risk: 'Custom table schema required',
2177
- pattern: 'schema',
2178
- recommendation: 'Ensure table schemas match library documentation exactly'
2179
- },
2180
- // Sync/Migration patterns
2181
- {
2182
- keywords: ['sync', 'migrate', 'migration', 'syncurl', 'embedded replica'],
2183
- risk: 'Data synchronization setup required',
2184
- pattern: 'sync',
2185
- recommendation: 'Configure sync intervals and handle offline scenarios'
2186
- },
2187
- // Webhook patterns
2188
- {
2189
- keywords: ['webhook', 'webhookendpoint', 'webhooksecret'],
2190
- risk: 'Webhook endpoint configuration required',
2191
- pattern: 'webhook',
2192
- recommendation: 'Set up webhook endpoints and verify signatures'
2193
- },
2194
- // API Key patterns
2195
- {
2196
- keywords: ['apikey', 'secretkey', 'authtoken', 'bearer'],
2197
- risk: 'API credentials setup required',
2198
- pattern: 'credentials',
2199
- recommendation: 'Store credentials securely in environment variables'
2200
- },
2201
- // Lifecycle patterns
2202
- {
2203
- keywords: ['beforeall', 'afterall', 'beforeeach', 'aftereach', 'setup', 'teardown'],
2204
- risk: 'Test lifecycle hooks required',
2205
- pattern: 'lifecycle',
2206
- recommendation: 'Implement proper setup/teardown in test configuration'
2207
- }
2208
- ]
2209
-
2210
- for (const rp of riskPatterns) {
2211
- const found = rp.keywords.some(kw => docsLower.includes(kw.toLowerCase()))
2212
- if (found) {
2213
- // Check if this risk involves other detected libraries
2214
- const involvedLibs = allLibs
2215
- .filter(l => l.name !== currentLib)
2216
- .filter(l => docsLower.includes(l.name.toLowerCase()))
2217
- .map(l => l.name)
2218
-
2219
- risks.push({
2220
- library: currentLib,
2221
- risk: rp.risk,
2222
- pattern: rp.pattern,
2223
- recommendation: rp.recommendation,
2224
- involvedLibraries: involvedLibs
2225
- })
2226
- }
2227
- }
2228
-
2229
- return risks
2230
- }
2231
- ```
2232
-
2233
- ---
2234
-
2235
- ### Helper: generateIntegrationRiskSummary() - v2.5.0
2236
-
2237
- > **Risk Summary Output:** Creates INTEGRATION_RISKS.md with all detected cross-library concerns.
2238
- > **WHY:** Agents can review this BEFORE implementation to avoid common integration mistakes.
2239
-
2240
- ```typescript
2241
- function generateIntegrationRiskSummary(
2242
- risks: Array<{
2243
- library: string
2244
- risk: string
2245
- pattern: string
2246
- recommendation: string
2247
- involvedLibraries?: string[]
2248
- }>,
2249
- bpDir: string,
2250
- allLibs: Array<{ name: string; title: string }>
2251
- ): void {
2252
- // Group risks by pattern type
2253
- const byPattern = {}
2254
- for (const r of risks) {
2255
- if (!byPattern[r.pattern]) byPattern[r.pattern] = []
2256
- byPattern[r.pattern].push(r)
2257
- }
2258
-
2259
- const content = `# Integration Risk Summary
2260
-
2261
- > **Generated:** ${new Date().toISOString().split('T')[0]}
2262
- > **Template Version:** 2.5.0 - Smart Topic Query
2263
- > **Detected Libraries:** ${allLibs.map(l => l.name).join(', ')}
2264
-
2265
- ---
2266
-
2267
- ## āš ļø Review Before Implementation
2268
-
2269
- The following integration patterns were detected from library documentation.
2270
- **Agents should review these items in STEP 0 before writing code.**
2271
-
2272
- ---
2273
-
2274
- ${Object.entries(byPattern).map(([pattern, patternRisks]) => `
2275
- ### ${pattern.toUpperCase()} Patterns
2276
-
2277
- | Library | Risk | Recommendation |
2278
- |---------|------|----------------|
2279
- ${patternRisks.map(r => `| ${r.library} | ${r.risk} | ${r.recommendation} |`).join('\n')}
2280
- ${patternRisks.some(r => r.involvedLibraries?.length > 0) ? `
2281
- **Cross-library concerns:**
2282
- ${patternRisks.filter(r => r.involvedLibraries?.length > 0).map(r => `- ${r.library} ↔ ${r.involvedLibraries.join(', ')}: ${r.risk}`).join('\n')}
2283
- ` : ''}
2284
- `).join('\n')}
2285
-
2286
- ---
2287
-
2288
- ## Quick Checklist
2289
-
2290
- Before implementing integrations:
2291
-
2292
- ${[...new Set(risks.map(r => r.recommendation))].map(rec => `- [ ] ${rec}`).join('\n')}
2293
-
2294
- ---
2295
-
2296
- **This file is auto-generated by /csetup v2.5.0**
2297
- **Agents read this in STEP 0 alongside best-practices files**
2298
- `
2299
-
2300
- Write(`${bpDir}INTEGRATION_RISKS.md`, content)
2301
- }
2302
- ```
2303
-
2304
- ---
2305
-
2306
- ### Helper: generateBestPracticesIndex() - v2.5.0
2307
-
2308
- > **Index File:** Creates index.md registry of all best practices files.
2309
- > **v2.5.0:** Now includes INTEGRATION_RISKS.md if present.
2310
-
2311
- ```typescript
2312
- function generateBestPracticesIndex(
2313
- resolvedLibraries: Array<{ name: string; title: string; context7Id: string }>,
2314
- changeId: string
2315
- ): void {
2316
- const bpDir = '.claude/contexts/domain/project/best-practices/'
2317
- const hasIntegrationRisks = fileExists(`${bpDir}INTEGRATION_RISKS.md`)
2318
-
2319
- const content = `# Best Practices Index
2320
-
2321
- > **Generated:** ${new Date().toISOString().split('T')[0]}
2322
- > **Template Version:** 2.5.0 - Smart Topic Query
2323
- > **Change:** ${changeId}
2324
-
2325
- ---
2326
-
2327
- ## šŸ“š Library Best Practices
2328
-
2329
- | Library | File | Context7 ID |
2330
- |---------|------|-------------|
2331
- ${resolvedLibraries.map(lib => {
2332
- const safeName = lib.name.toLowerCase().replace(/[^a-z0-9]/g, '-')
2333
- return `| ${lib.title} | [${safeName}.md](./${safeName}.md) | \`${lib.context7Id}\` |`
2334
- }).join('\n')}
2335
-
2336
- ---
2337
-
2338
- ${hasIntegrationRisks ? `## āš ļø Integration Risks
2339
-
2340
- Cross-library integration concerns detected. **Review before implementation.**
2341
-
2342
- → [INTEGRATION_RISKS.md](./INTEGRATION_RISKS.md)
2343
-
2344
- ---
2345
-
2346
- ` : ''}## Usage
2347
-
2348
- Agents read these files in **STEP 0** before implementation:
2349
-
2350
- 1. Read \`index.md\` (this file) for overview
2351
- 2. Read relevant \`{library}.md\` files for specific best practices
2352
- ${hasIntegrationRisks ? '3. Read `INTEGRATION_RISKS.md` for cross-library concerns' : ''}
2353
-
2354
- ---
2355
-
2356
- **Auto-generated by /csetup v2.5.0**
2357
- `
2358
-
2359
- Write(`${bpDir}index.md`, content)
2360
- }
2361
- ```
2362
861
 
2363
862
  ---
2364
863
 
@@ -2707,15 +1206,27 @@ function generatePhaseSection(phase, phaseTasks) {
2707
1206
  const dominantAgent = getMostCommonAgent(phaseTasks)
2708
1207
  const hasIncremental = phaseTasks.some(t => t.milestones)
2709
1208
  const maxRisk = getMaxRisk(phaseTasks)
2710
- const needsTDD = phaseTasks.some(t => t.risk === 'HIGH' || t.complexity >= 7)
1209
+
1210
+ // v3.1.0: Use TDD classification from task-analyzer.md (Step 2.6)
1211
+ // Each task now has task.tdd = { tdd_required, workflow, reason, confidence }
1212
+ const tddTasks = phaseTasks.filter(t => t.tdd?.tdd_required === true)
1213
+ const needsTDD = tddTasks.length > 0
1214
+ const tddReasons = [...new Set(tddTasks.map(t => t.tdd?.reason).filter(Boolean))]
2711
1215
 
2712
1216
  let section = `## Phase ${phase.number}: ${phase.name}
2713
1217
 
2714
1218
  **Agent:** ${dominantAgent}
2715
1219
  **Strategy:** ${hasIncremental ? 'šŸ”„ INCREMENTAL' : 'Standard'}
2716
1220
  **Risk:** ${maxRisk}
2717
- ${needsTDD ? '**TDD Required:** Yes' : ''}
2718
-
1221
+ ${needsTDD ? `**TDD Required:** āœ… YES
1222
+ **TDD Reason:** ${tddReasons.slice(0, 2).join('; ')}
1223
+ **TDD Workflow:** red-green-refactor
1224
+
1225
+ āš ļø **TDD WORKFLOW REQUIRED:**
1226
+ 1. šŸ”“ RED: Write tests FIRST (they should fail)
1227
+ 2. āœ… GREEN: Write minimal implementation to pass tests
1228
+ 3. šŸ”§ REFACTOR: Improve code quality while keeping tests green
1229
+ ` : ''}
2719
1230
  `
2720
1231
 
2721
1232
  // Group tasks: incremental tasks get milestone sections, others get simple list
@@ -3102,20 +1613,10 @@ function detectChangeType(tasks: AnalyzedTask[]): string {
3102
1613
  }
3103
1614
  ```
3104
1615
 
3105
- ### detectAdditionalTech() - DEPRECATED
3106
-
3107
- > **Note:** This function is deprecated in v2.3.0. Use `extractPotentialLibraryNames()` + Context7 resolution instead.
3108
- > The dynamic approach automatically detects any library without hardcoded patterns.
1616
+ ### detectAdditionalTech() - REMOVED (v3.1.0)
3109
1617
 
3110
- ```typescript
3111
- // DEPRECATED: Kept for backwards compatibility only
3112
- // Use extractPotentialLibraryNames() for new implementations
3113
- function detectAdditionalTech(proposal: string, tasks: string): string[] {
3114
- // Now delegates to the dynamic detection system
3115
- const combined = proposal + ' ' + tasks
3116
- return extractPotentialLibraryNames(combined)
3117
- }
3118
- ```
1618
+ > **Note:** This function was removed in v3.1.0. Use Step 2.7's direct Context7 instructions instead.
1619
+ > Main Claude now directly calls Context7 MCP tools to detect and resolve libraries.
3119
1620
 
3120
1621
  ---
3121
1622