the-grid-cc 1.7.14 → 1.7.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/DAEMON_VALIDATION.md +354 -0
- package/DAEMON_WIRING_COMPLETE.md +176 -0
- package/PERSISTENCE_COMPLETE.md +309 -0
- package/README.md +36 -6
- package/RESEARCH_FIRST_DELIVERABLES.md +604 -0
- package/commands/grid/VERSION +1 -1
- package/commands/grid/help.md +29 -0
- package/commands/grid/init.md +35 -1
- package/commands/grid/mc.md +61 -14
- package/docs/CONFIG_SCHEMA.md +479 -0
- package/docs/GIT_AUTONOMY_INTEGRATION.md +343 -0
- package/docs/INTEGRATION_SUMMARY.md +316 -0
- package/docs/MC_RESEARCH_INTEGRATION.md +716 -0
- package/docs/PERSISTENCE_FLOW.md +483 -0
- package/docs/PERSISTENCE_IMPLEMENTATION.md +361 -0
- package/docs/PERSISTENCE_QUICKSTART.md +283 -0
- package/docs/RESEARCH_CONFIG.md +511 -0
- package/docs/RESEARCH_INFRASTRUCTURE.md +429 -0
- package/docs/WIRING_VERIFICATION.md +389 -0
- package/package.json +1 -1
- package/templates/daemon-checkpoint.json +51 -0
- package/templates/daemon-config.json +28 -0
- package/templates/git-config.json +65 -0
- package/templates/grid-state/.gitignore-entry +3 -0
- package/templates/grid-state/BLOCK-SUMMARY.md +66 -0
- package/templates/grid-state/BLOCKERS.md +31 -0
- package/templates/grid-state/CHECKPOINT.md +59 -0
- package/templates/grid-state/DECISIONS.md +30 -0
- package/templates/grid-state/README.md +138 -0
- package/templates/grid-state/SCRATCHPAD.md +29 -0
- package/templates/grid-state/STATE.md +47 -0
- package/templates/grid-state/WARMTH.md +48 -0
- package/templates/grid-state/config.json +24 -0
|
@@ -0,0 +1,716 @@
|
|
|
1
|
+
# Master Control Research Integration
|
|
2
|
+
|
|
3
|
+
**Technical Implementation Guide**
|
|
4
|
+
**Version:** 1.0
|
|
5
|
+
**Date:** 2026-01-23
|
|
6
|
+
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
## Overview
|
|
10
|
+
|
|
11
|
+
This document specifies how Master Control integrates the Research-First Architecture into its workflow. It describes the exact spawn sequence, prompt templates, and context passing between MC → Scout/Researcher → Planner.
|
|
12
|
+
|
|
13
|
+
---
|
|
14
|
+
|
|
15
|
+
## MC Research Phase Flow
|
|
16
|
+
|
|
17
|
+
### High-Level Sequence
|
|
18
|
+
|
|
19
|
+
```
|
|
20
|
+
User Request
|
|
21
|
+
↓
|
|
22
|
+
MC receives request
|
|
23
|
+
↓
|
|
24
|
+
MC checks: Should research run?
|
|
25
|
+
↓
|
|
26
|
+
├─ NO → Spawn Planner directly (legacy path)
|
|
27
|
+
│
|
|
28
|
+
└─ YES → Research Phase
|
|
29
|
+
↓
|
|
30
|
+
┌────┴────┐
|
|
31
|
+
↓ ↓
|
|
32
|
+
Scout Researcher(s)
|
|
33
|
+
│ │
|
|
34
|
+
│ (parallel spawn)
|
|
35
|
+
│ │
|
|
36
|
+
└────┬────┘
|
|
37
|
+
↓
|
|
38
|
+
Context Assembly
|
|
39
|
+
↓
|
|
40
|
+
Spawn Planner
|
|
41
|
+
(with research context)
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
---
|
|
45
|
+
|
|
46
|
+
## 1. Research Decision Logic
|
|
47
|
+
|
|
48
|
+
### MC's Initial Check
|
|
49
|
+
|
|
50
|
+
```python
|
|
51
|
+
def handle_user_request(user_request: str):
|
|
52
|
+
"""MC's entry point for all user requests."""
|
|
53
|
+
|
|
54
|
+
# Load config (if exists)
|
|
55
|
+
config = load_config() or get_default_config()
|
|
56
|
+
|
|
57
|
+
# Determine if research should run
|
|
58
|
+
research_decision = should_run_research(user_request, config)
|
|
59
|
+
|
|
60
|
+
if not research_decision['run']:
|
|
61
|
+
# Legacy path - direct to Planner
|
|
62
|
+
return spawn_planner(user_request, context={})
|
|
63
|
+
|
|
64
|
+
# Research-first path
|
|
65
|
+
research_context = execute_research_phase(user_request, config)
|
|
66
|
+
return spawn_planner(user_request, context=research_context)
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
### Skip Conditions
|
|
70
|
+
|
|
71
|
+
```python
|
|
72
|
+
def should_run_research(user_request: str, config: dict) -> dict:
|
|
73
|
+
"""Determine if research phase should execute."""
|
|
74
|
+
|
|
75
|
+
if not config.get('research', {}).get('enabled', True):
|
|
76
|
+
return {'run': False, 'reason': 'Research disabled in config'}
|
|
77
|
+
|
|
78
|
+
# User explicitly skipped
|
|
79
|
+
if 'skip research' in user_request.lower():
|
|
80
|
+
return {'run': False, 'reason': 'User requested skip'}
|
|
81
|
+
|
|
82
|
+
# Quick mode eligible (≤5 files, simple operations)
|
|
83
|
+
if quick_mode_eligible(user_request):
|
|
84
|
+
return {'run': False, 'reason': 'Quick mode eligible'}
|
|
85
|
+
|
|
86
|
+
# Debug/fix mode (no new tech involved)
|
|
87
|
+
if is_debug_request(user_request):
|
|
88
|
+
return {'run': False, 'reason': 'Debug mode'}
|
|
89
|
+
|
|
90
|
+
# All technologies already cached
|
|
91
|
+
techs = extract_technologies(user_request)
|
|
92
|
+
if techs and all(is_cached(tech, config) for tech in techs):
|
|
93
|
+
return {'run': False, 'reason': 'All tech cached'}
|
|
94
|
+
|
|
95
|
+
return {'run': True, 'reason': 'Research needed'}
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
---
|
|
99
|
+
|
|
100
|
+
## 2. Scout Spawning
|
|
101
|
+
|
|
102
|
+
### When to Spawn Scout
|
|
103
|
+
|
|
104
|
+
Scout runs when:
|
|
105
|
+
- `research.scout.enabled == true` (default)
|
|
106
|
+
- Existing codebase detected (`os.listdir(os.getcwd())` shows files)
|
|
107
|
+
- Not a greenfield project
|
|
108
|
+
|
|
109
|
+
### Scout Spawn Template
|
|
110
|
+
|
|
111
|
+
```python
|
|
112
|
+
def spawn_scout(config: dict) -> str:
|
|
113
|
+
"""Spawn Scout for codebase reconnaissance."""
|
|
114
|
+
|
|
115
|
+
scout_config = config.get('research', {}).get('scout', {})
|
|
116
|
+
timeout = scout_config.get('timeout_seconds', 120)
|
|
117
|
+
max_files = scout_config.get('max_files_analyzed', 1000)
|
|
118
|
+
depth_limit = scout_config.get('depth_limit', 4)
|
|
119
|
+
skip_dirs = scout_config.get('skip_dirs', ['node_modules', '.git', 'dist', 'build'])
|
|
120
|
+
|
|
121
|
+
prompt = f"""
|
|
122
|
+
First, read ~/.claude/agents/grid-scout.md for your complete role definition.
|
|
123
|
+
|
|
124
|
+
You are Scout, a reconnaissance program on The Grid. Your mission: rapid codebase analysis in <2 minutes.
|
|
125
|
+
|
|
126
|
+
## Mission Parameters
|
|
127
|
+
|
|
128
|
+
<config>
|
|
129
|
+
project_root: {os.getcwd()}
|
|
130
|
+
timeout_seconds: {timeout}
|
|
131
|
+
max_files_analyzed: {max_files}
|
|
132
|
+
depth_limit: {depth_limit}
|
|
133
|
+
skip_directories: {skip_dirs}
|
|
134
|
+
</config>
|
|
135
|
+
|
|
136
|
+
## Your Objectives
|
|
137
|
+
|
|
138
|
+
1. **Structure Scan** (30s max)
|
|
139
|
+
- Map directory tree (depth ≤ {depth_limit})
|
|
140
|
+
- Count file types
|
|
141
|
+
- Identify package managers
|
|
142
|
+
|
|
143
|
+
2. **Technology Detection** (30s max)
|
|
144
|
+
- Parse package.json / pyproject.toml / go.mod / etc.
|
|
145
|
+
- Detect frameworks and versions
|
|
146
|
+
- Identify databases and ORMs
|
|
147
|
+
|
|
148
|
+
3. **Pattern Detection** (30s max)
|
|
149
|
+
- Recognize architectural patterns (App Router, MVC, etc.)
|
|
150
|
+
- Detect coding conventions (naming, imports)
|
|
151
|
+
- Identify file organization patterns
|
|
152
|
+
|
|
153
|
+
4. **Constraint Discovery** (30s max)
|
|
154
|
+
- Find locked dependencies
|
|
155
|
+
- Locate existing schemas
|
|
156
|
+
- Map API routes/contracts
|
|
157
|
+
- Note config requirements
|
|
158
|
+
|
|
159
|
+
## Output Requirements
|
|
160
|
+
|
|
161
|
+
Write scout report to: `.grid/scout/RECON_{datetime.now().isoformat()}.md`
|
|
162
|
+
|
|
163
|
+
Report must include:
|
|
164
|
+
- Executive summary
|
|
165
|
+
- Technology stack table
|
|
166
|
+
- Detected patterns with confidence levels
|
|
167
|
+
- Constraints that planning MUST respect
|
|
168
|
+
- Key files for planning reference
|
|
169
|
+
- Recommendations for Planner
|
|
170
|
+
|
|
171
|
+
## Critical Rules
|
|
172
|
+
|
|
173
|
+
- SPEED OVER COMPLETENESS - 2 minute hard limit
|
|
174
|
+
- Sample first {max_files} files only
|
|
175
|
+
- Skip {skip_dirs}
|
|
176
|
+
- Parallel operations wherever possible
|
|
177
|
+
- Early exit when patterns clear
|
|
178
|
+
|
|
179
|
+
Begin reconnaissance. End of Line.
|
|
180
|
+
"""
|
|
181
|
+
|
|
182
|
+
# Spawn Scout as Task
|
|
183
|
+
scout_result = Task(
|
|
184
|
+
prompt=prompt,
|
|
185
|
+
subagent_type="general-purpose",
|
|
186
|
+
description="Scout: Codebase reconnaissance"
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
# Read the scout report
|
|
190
|
+
report_files = glob('.grid/scout/RECON_*.md')
|
|
191
|
+
if report_files:
|
|
192
|
+
latest_report = sorted(report_files)[-1]
|
|
193
|
+
return read(latest_report)
|
|
194
|
+
|
|
195
|
+
return None
|
|
196
|
+
```
|
|
197
|
+
|
|
198
|
+
---
|
|
199
|
+
|
|
200
|
+
## 3. Researcher Spawning
|
|
201
|
+
|
|
202
|
+
### Research Needs Extraction
|
|
203
|
+
|
|
204
|
+
```python
|
|
205
|
+
def extract_research_needs(user_request: str, scout_context: str = None) -> list[dict]:
|
|
206
|
+
"""Extract what needs to be researched from user request and Scout findings."""
|
|
207
|
+
|
|
208
|
+
needs = []
|
|
209
|
+
|
|
210
|
+
# Extract technologies from user request
|
|
211
|
+
techs = extract_technologies(user_request)
|
|
212
|
+
for tech in techs:
|
|
213
|
+
needs.append({
|
|
214
|
+
'type': 'technology',
|
|
215
|
+
'topic': tech,
|
|
216
|
+
'queries': [
|
|
217
|
+
f"{tech} best practices 2024 2025",
|
|
218
|
+
f"{tech} production patterns",
|
|
219
|
+
f"{tech} common mistakes to avoid"
|
|
220
|
+
]
|
|
221
|
+
})
|
|
222
|
+
|
|
223
|
+
# Extract patterns from request
|
|
224
|
+
patterns = extract_patterns(user_request)
|
|
225
|
+
for pattern in patterns:
|
|
226
|
+
needs.append({
|
|
227
|
+
'type': 'pattern',
|
|
228
|
+
'topic': pattern,
|
|
229
|
+
'queries': [
|
|
230
|
+
f"{pattern} implementation guide",
|
|
231
|
+
f"{pattern} real world examples",
|
|
232
|
+
f"when not to use {pattern}"
|
|
233
|
+
]
|
|
234
|
+
})
|
|
235
|
+
|
|
236
|
+
# Extract API/integration needs
|
|
237
|
+
integrations = extract_integrations(user_request)
|
|
238
|
+
for integration in integrations:
|
|
239
|
+
needs.append({
|
|
240
|
+
'type': 'api_docs',
|
|
241
|
+
'topic': integration,
|
|
242
|
+
'queries': [
|
|
243
|
+
f"{integration} API documentation",
|
|
244
|
+
f"{integration} integration examples",
|
|
245
|
+
f"{integration} best practices"
|
|
246
|
+
]
|
|
247
|
+
})
|
|
248
|
+
|
|
249
|
+
# If Scout detected unfamiliar patterns, research them
|
|
250
|
+
if scout_context:
|
|
251
|
+
unknown_patterns = extract_unknown_patterns(scout_context)
|
|
252
|
+
for pattern in unknown_patterns:
|
|
253
|
+
needs.append({
|
|
254
|
+
'type': 'pattern',
|
|
255
|
+
'topic': pattern,
|
|
256
|
+
'queries': [f"{pattern} explanation", f"{pattern} usage"]
|
|
257
|
+
})
|
|
258
|
+
|
|
259
|
+
return needs
|
|
260
|
+
```
|
|
261
|
+
|
|
262
|
+
### Cache Check
|
|
263
|
+
|
|
264
|
+
```python
|
|
265
|
+
def check_research_cache(needs: list[dict], config: dict) -> tuple[list, list]:
|
|
266
|
+
"""Check cache for existing research, return (cached, needed)."""
|
|
267
|
+
|
|
268
|
+
cache_enabled = config.get('research', {}).get('cache', {}).get('enabled', True)
|
|
269
|
+
if not cache_enabled:
|
|
270
|
+
return [], needs
|
|
271
|
+
|
|
272
|
+
# Load cache index
|
|
273
|
+
cache_index_path = '.grid/research_cache/index.json'
|
|
274
|
+
if not os.path.exists(cache_index_path):
|
|
275
|
+
return [], needs
|
|
276
|
+
|
|
277
|
+
with open(cache_index_path) as f:
|
|
278
|
+
cache_index = json.load(f)
|
|
279
|
+
|
|
280
|
+
cached = []
|
|
281
|
+
needed = []
|
|
282
|
+
|
|
283
|
+
for need in needs:
|
|
284
|
+
# Check if queries match any cached entry
|
|
285
|
+
cache_hit = None
|
|
286
|
+
for entry in cache_index.get('entries', []):
|
|
287
|
+
# Jaccard similarity > 0.7
|
|
288
|
+
if jaccard_similarity(entry['queries'], need['queries']) > 0.7:
|
|
289
|
+
# Not expired?
|
|
290
|
+
if datetime.now() < datetime.fromisoformat(entry['expires']):
|
|
291
|
+
cache_hit = entry
|
|
292
|
+
break
|
|
293
|
+
|
|
294
|
+
if cache_hit:
|
|
295
|
+
# Read cached research
|
|
296
|
+
cached_path = f".grid/research_cache/{cache_hit['file']}"
|
|
297
|
+
with open(cached_path) as f:
|
|
298
|
+
cached.append({
|
|
299
|
+
'topic': need['topic'],
|
|
300
|
+
'content': f.read(),
|
|
301
|
+
'source': 'cache'
|
|
302
|
+
})
|
|
303
|
+
|
|
304
|
+
# Update hit count
|
|
305
|
+
cache_hit['hit_count'] = cache_hit.get('hit_count', 0) + 1
|
|
306
|
+
else:
|
|
307
|
+
needed.append(need)
|
|
308
|
+
|
|
309
|
+
# Save updated index (with new hit counts)
|
|
310
|
+
with open(cache_index_path, 'w') as f:
|
|
311
|
+
json.dump(cache_index, f, indent=2)
|
|
312
|
+
|
|
313
|
+
return cached, needed
|
|
314
|
+
```
|
|
315
|
+
|
|
316
|
+
### Researcher Spawn Template
|
|
317
|
+
|
|
318
|
+
```python
|
|
319
|
+
def spawn_researchers(needs: list[dict], config: dict) -> list[str]:
|
|
320
|
+
"""Spawn Researcher programs for uncached research needs."""
|
|
321
|
+
|
|
322
|
+
researcher_config = config.get('research', {}).get('researcher', {})
|
|
323
|
+
timeout = researcher_config.get('timeout_seconds', 300)
|
|
324
|
+
max_queries = researcher_config.get('max_queries_per_topic', 10)
|
|
325
|
+
max_parallel = researcher_config.get('max_researchers_parallel', 3)
|
|
326
|
+
tool_preference = researcher_config.get('search_tool_preference', [
|
|
327
|
+
'mcp__exa__get_code_context_exa',
|
|
328
|
+
'WebSearch',
|
|
329
|
+
'WebFetch'
|
|
330
|
+
])
|
|
331
|
+
|
|
332
|
+
# Limit to max parallel
|
|
333
|
+
needs = needs[:max_parallel]
|
|
334
|
+
|
|
335
|
+
research_results = []
|
|
336
|
+
|
|
337
|
+
for need in needs:
|
|
338
|
+
prompt = f"""
|
|
339
|
+
First, read ~/.claude/agents/grid-researcher.md for your complete role definition.
|
|
340
|
+
|
|
341
|
+
You are Researcher, an intelligence gathering program on The Grid. Your mission: gather external context for informed planning.
|
|
342
|
+
|
|
343
|
+
## Mission Parameters
|
|
344
|
+
|
|
345
|
+
<config>
|
|
346
|
+
timeout_seconds: {timeout}
|
|
347
|
+
max_queries_per_topic: {max_queries}
|
|
348
|
+
search_tools: {tool_preference}
|
|
349
|
+
cache_enabled: true
|
|
350
|
+
</config>
|
|
351
|
+
|
|
352
|
+
<research_request>
|
|
353
|
+
mission_type: {need['type']}
|
|
354
|
+
topic: {need['topic']}
|
|
355
|
+
suggested_queries: {need['queries']}
|
|
356
|
+
</research_request>
|
|
357
|
+
|
|
358
|
+
## Your Objectives
|
|
359
|
+
|
|
360
|
+
1. **Query Generation** - Create effective search queries
|
|
361
|
+
2. **Parallel Search** - Execute searches using preferred tools
|
|
362
|
+
3. **Context Assembly** - Structure findings for Planner consumption
|
|
363
|
+
4. **Caching** - Save to `.grid/research_cache/{slugify(need['topic'])}.md`
|
|
364
|
+
|
|
365
|
+
## Output Requirements
|
|
366
|
+
|
|
367
|
+
Create structured research context with:
|
|
368
|
+
- Executive summary (2-3 sentences)
|
|
369
|
+
- Best practices table (practice, source, confidence)
|
|
370
|
+
- Recommended patterns (what, when, why, source)
|
|
371
|
+
- Anti-patterns to avoid (what, why bad, instead, source)
|
|
372
|
+
- Code examples with sources
|
|
373
|
+
- API reference (if applicable)
|
|
374
|
+
- Confidence assessment (HIGH/MEDIUM/LOW per topic)
|
|
375
|
+
- Sources (all URLs cited)
|
|
376
|
+
|
|
377
|
+
## Cache Output
|
|
378
|
+
|
|
379
|
+
Write to: `.grid/research_cache/{slugify(need['topic'])}.md`
|
|
380
|
+
|
|
381
|
+
Update cache index: `.grid/research_cache/index.json`
|
|
382
|
+
|
|
383
|
+
## Critical Rules
|
|
384
|
+
|
|
385
|
+
- CITE EVERYTHING - No source = LOW confidence
|
|
386
|
+
- RECENT > OLD - Prefer 2024-2025 sources
|
|
387
|
+
- OFFICIAL > BLOG - Prefer official docs
|
|
388
|
+
- CODE REQUIRED - Include working examples
|
|
389
|
+
- TIME-BOX - 5 minutes max, move on if stuck
|
|
390
|
+
- PARALLEL SEARCHES - Independent queries at once
|
|
391
|
+
- HONEST REPORTING - Say what you couldn't find
|
|
392
|
+
|
|
393
|
+
Begin research. End of Line.
|
|
394
|
+
"""
|
|
395
|
+
|
|
396
|
+
# Spawn Researcher as Task (they run in parallel)
|
|
397
|
+
task = Task(
|
|
398
|
+
prompt=prompt,
|
|
399
|
+
subagent_type="general-purpose",
|
|
400
|
+
description=f"Research: {need['topic']}"
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
research_results.append(task)
|
|
404
|
+
|
|
405
|
+
# All researchers run in parallel
|
|
406
|
+
# MC waits for all to complete, then reads their outputs
|
|
407
|
+
|
|
408
|
+
# Read research outputs
|
|
409
|
+
outputs = []
|
|
410
|
+
for need in needs:
|
|
411
|
+
cache_file = f".grid/research_cache/{slugify(need['topic'])}.md"
|
|
412
|
+
if os.path.exists(cache_file):
|
|
413
|
+
with open(cache_file) as f:
|
|
414
|
+
outputs.append({
|
|
415
|
+
'topic': need['topic'],
|
|
416
|
+
'content': f.read(),
|
|
417
|
+
'source': 'fresh'
|
|
418
|
+
})
|
|
419
|
+
|
|
420
|
+
return outputs
|
|
421
|
+
```
|
|
422
|
+
|
|
423
|
+
---
|
|
424
|
+
|
|
425
|
+
## 4. Context Assembly
|
|
426
|
+
|
|
427
|
+
### Combining Scout + Researcher Outputs
|
|
428
|
+
|
|
429
|
+
```python
|
|
430
|
+
def execute_research_phase(user_request: str, config: dict) -> dict:
|
|
431
|
+
"""Execute complete research phase, return assembled context."""
|
|
432
|
+
|
|
433
|
+
context = {
|
|
434
|
+
'research_executed': True,
|
|
435
|
+
'scout': None,
|
|
436
|
+
'cached_research': [],
|
|
437
|
+
'fresh_research': []
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
# Phase 1: Scout (if existing codebase)
|
|
441
|
+
if config.get('research', {}).get('scout', {}).get('enabled', True):
|
|
442
|
+
if has_existing_code():
|
|
443
|
+
context['scout'] = spawn_scout(config)
|
|
444
|
+
|
|
445
|
+
# Phase 2: Extract research needs
|
|
446
|
+
needs = extract_research_needs(user_request, context['scout'])
|
|
447
|
+
|
|
448
|
+
# Phase 3: Check cache
|
|
449
|
+
if config.get('research', {}).get('cache', {}).get('enabled', True):
|
|
450
|
+
cached, needed = check_research_cache(needs, config)
|
|
451
|
+
context['cached_research'] = cached
|
|
452
|
+
else:
|
|
453
|
+
needed = needs
|
|
454
|
+
|
|
455
|
+
# Phase 4: Spawn researchers for uncached needs
|
|
456
|
+
if needed and config.get('research', {}).get('researcher', {}).get('enabled', True):
|
|
457
|
+
context['fresh_research'] = spawn_researchers(needed, config)
|
|
458
|
+
|
|
459
|
+
return context
|
|
460
|
+
```
|
|
461
|
+
|
|
462
|
+
---
|
|
463
|
+
|
|
464
|
+
## 5. Planner Integration
|
|
465
|
+
|
|
466
|
+
### Planner Spawn with Research Context
|
|
467
|
+
|
|
468
|
+
```python
|
|
469
|
+
def spawn_planner(user_request: str, context: dict):
|
|
470
|
+
"""Spawn Planner with research context."""
|
|
471
|
+
|
|
472
|
+
# Format research context for Planner
|
|
473
|
+
codebase_context = context.get('scout', 'No existing codebase detected. Greenfield project.')
|
|
474
|
+
|
|
475
|
+
research_context = ""
|
|
476
|
+
for cached in context.get('cached_research', []):
|
|
477
|
+
research_context += f"\n## Cached Research: {cached['topic']}\n{cached['content']}\n"
|
|
478
|
+
|
|
479
|
+
for fresh in context.get('fresh_research', []):
|
|
480
|
+
research_context += f"\n## Fresh Research: {fresh['topic']}\n{fresh['content']}\n"
|
|
481
|
+
|
|
482
|
+
if not research_context:
|
|
483
|
+
research_context = "No external research performed. Use training knowledge."
|
|
484
|
+
|
|
485
|
+
prompt = f"""
|
|
486
|
+
First, read ~/.claude/agents/grid-planner.md for your complete role definition.
|
|
487
|
+
|
|
488
|
+
You are Planner, a planning program on The Grid. Your mission: create execution plans from User intent, informed by research context.
|
|
489
|
+
|
|
490
|
+
## Context from Research Phase
|
|
491
|
+
|
|
492
|
+
<codebase_context>
|
|
493
|
+
{codebase_context}
|
|
494
|
+
</codebase_context>
|
|
495
|
+
|
|
496
|
+
<research_context>
|
|
497
|
+
{research_context}
|
|
498
|
+
</research_context>
|
|
499
|
+
|
|
500
|
+
<user_request>
|
|
501
|
+
{user_request}
|
|
502
|
+
</user_request>
|
|
503
|
+
|
|
504
|
+
## Your Objectives
|
|
505
|
+
|
|
506
|
+
Create an execution plan that:
|
|
507
|
+
1. **Respects codebase constraints** (from Scout)
|
|
508
|
+
- Preserves existing API contracts
|
|
509
|
+
- Follows detected conventions
|
|
510
|
+
- Works with locked dependencies
|
|
511
|
+
- Extends existing patterns
|
|
512
|
+
|
|
513
|
+
2. **Leverages research findings** (from Researchers)
|
|
514
|
+
- Uses recommended patterns
|
|
515
|
+
- Avoids documented anti-patterns
|
|
516
|
+
- Follows 2024-2025 best practices
|
|
517
|
+
- Incorporates code examples
|
|
518
|
+
|
|
519
|
+
3. **Decomposes into Blocks and Threads**
|
|
520
|
+
- 2-3 Threads per Block (50% context budget)
|
|
521
|
+
- Wave numbers for parallel execution
|
|
522
|
+
- Dependencies clearly marked
|
|
523
|
+
|
|
524
|
+
4. **Derives must-haves from goals**
|
|
525
|
+
- Goal-backward verification
|
|
526
|
+
- Acceptance criteria per Block
|
|
527
|
+
|
|
528
|
+
## Output Format
|
|
529
|
+
|
|
530
|
+
Write plan to: `.grid/plans/{cluster}-PLAN-SUMMARY.md`
|
|
531
|
+
|
|
532
|
+
Then create Block plans: `.grid/plans/{cluster}-block-{N}.md`
|
|
533
|
+
|
|
534
|
+
## Critical Rules
|
|
535
|
+
|
|
536
|
+
- CONSTRAINTS ARE SACRED - Scout findings are non-negotiable
|
|
537
|
+
- RESEARCH INFORMS - Best practices guide, don't dictate
|
|
538
|
+
- CONTEXT BUDGET - 2-3 Threads/Block, never more
|
|
539
|
+
- WAVE ASSIGNMENT - Compute during planning, not execution
|
|
540
|
+
|
|
541
|
+
Begin planning. End of Line.
|
|
542
|
+
"""
|
|
543
|
+
|
|
544
|
+
return Task(
|
|
545
|
+
prompt=prompt,
|
|
546
|
+
subagent_type="general-purpose",
|
|
547
|
+
description="Planner: Create execution plan"
|
|
548
|
+
)
|
|
549
|
+
```
|
|
550
|
+
|
|
551
|
+
---
|
|
552
|
+
|
|
553
|
+
## 6. MC Orchestration Example
|
|
554
|
+
|
|
555
|
+
### Complete Research-First Flow
|
|
556
|
+
|
|
557
|
+
```python
|
|
558
|
+
def mc_orchestrate_research_first(user_request: str):
|
|
559
|
+
"""Master Control's research-first orchestration."""
|
|
560
|
+
|
|
561
|
+
print("Master Control online.")
|
|
562
|
+
print("Analyzing request...")
|
|
563
|
+
|
|
564
|
+
# Load config
|
|
565
|
+
config = load_config() or {
|
|
566
|
+
'research': {
|
|
567
|
+
'enabled': True,
|
|
568
|
+
'scout': {'enabled': True, 'timeout_seconds': 120},
|
|
569
|
+
'researcher': {'enabled': True, 'max_researchers_parallel': 3},
|
|
570
|
+
'cache': {'enabled': True, 'ttl_hours': 24}
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
|
|
574
|
+
# Check if research should run
|
|
575
|
+
decision = should_run_research(user_request, config)
|
|
576
|
+
|
|
577
|
+
if not decision['run']:
|
|
578
|
+
print(f"Skipping research: {decision['reason']}")
|
|
579
|
+
print("Spawning Planner...")
|
|
580
|
+
return spawn_planner(user_request, context={})
|
|
581
|
+
|
|
582
|
+
# Execute research phase
|
|
583
|
+
print("Research phase initiated...")
|
|
584
|
+
|
|
585
|
+
# Scout
|
|
586
|
+
if config['research']['scout']['enabled'] and has_existing_code():
|
|
587
|
+
print("└─ Spawning Scout for reconnaissance...")
|
|
588
|
+
scout_result = spawn_scout(config)
|
|
589
|
+
print(" └─ Scout report complete")
|
|
590
|
+
else:
|
|
591
|
+
print("└─ No existing codebase detected")
|
|
592
|
+
scout_result = None
|
|
593
|
+
|
|
594
|
+
# Researchers
|
|
595
|
+
needs = extract_research_needs(user_request, scout_result)
|
|
596
|
+
print(f"└─ {len(needs)} research topics identified")
|
|
597
|
+
|
|
598
|
+
cached, needed = check_research_cache(needs, config)
|
|
599
|
+
print(f" ├─ Cache hits: {len(cached)}")
|
|
600
|
+
print(f" └─ Fresh research needed: {len(needed)}")
|
|
601
|
+
|
|
602
|
+
if needed:
|
|
603
|
+
print(f"└─ Spawning {len(needed)} Researcher(s) in parallel...")
|
|
604
|
+
fresh_research = spawn_researchers(needed, config)
|
|
605
|
+
print(" └─ All research complete")
|
|
606
|
+
else:
|
|
607
|
+
fresh_research = []
|
|
608
|
+
|
|
609
|
+
# Assemble context
|
|
610
|
+
context = {
|
|
611
|
+
'scout': scout_result,
|
|
612
|
+
'cached_research': cached,
|
|
613
|
+
'fresh_research': fresh_research
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
print("Research phase complete.")
|
|
617
|
+
print("Spawning Planner with research context...")
|
|
618
|
+
|
|
619
|
+
# Spawn Planner with context
|
|
620
|
+
return spawn_planner(user_request, context=context)
|
|
621
|
+
```
|
|
622
|
+
|
|
623
|
+
---
|
|
624
|
+
|
|
625
|
+
## 7. Utility Functions
|
|
626
|
+
|
|
627
|
+
### Technology Extraction
|
|
628
|
+
|
|
629
|
+
```python
|
|
630
|
+
def extract_technologies(text: str) -> list[str]:
|
|
631
|
+
"""Extract technology mentions from text."""
|
|
632
|
+
# Pattern: common framework/library names
|
|
633
|
+
tech_patterns = [
|
|
634
|
+
r'\b(Next\.js|React|Vue|Angular|Svelte)\b',
|
|
635
|
+
r'\b(Express|FastAPI|Django|Flask|Rails)\b',
|
|
636
|
+
r'\b(Prisma|TypeORM|Sequelize|Mongoose|SQLAlchemy)\b',
|
|
637
|
+
r'\b(PostgreSQL|MySQL|MongoDB|Redis|SQLite)\b',
|
|
638
|
+
r'\b(Tailwind|Bootstrap|Material UI|Chakra UI)\b',
|
|
639
|
+
]
|
|
640
|
+
|
|
641
|
+
techs = set()
|
|
642
|
+
for pattern in tech_patterns:
|
|
643
|
+
matches = re.findall(pattern, text, re.IGNORECASE)
|
|
644
|
+
techs.update(matches)
|
|
645
|
+
|
|
646
|
+
return list(techs)
|
|
647
|
+
```
|
|
648
|
+
|
|
649
|
+
### Quick Mode Eligibility
|
|
650
|
+
|
|
651
|
+
```python
|
|
652
|
+
def quick_mode_eligible(user_request: str) -> bool:
|
|
653
|
+
"""Check if request is simple enough for quick mode."""
|
|
654
|
+
|
|
655
|
+
# Simple signals
|
|
656
|
+
simple_patterns = [
|
|
657
|
+
r'\bfix\b.*\bbug\b',
|
|
658
|
+
r'\bupdate\b.*\bversion\b',
|
|
659
|
+
r'\badd\b.*\btype\b',
|
|
660
|
+
r'\brefactor\b',
|
|
661
|
+
r'\brename\b',
|
|
662
|
+
]
|
|
663
|
+
|
|
664
|
+
for pattern in simple_patterns:
|
|
665
|
+
if re.search(pattern, user_request, re.IGNORECASE):
|
|
666
|
+
return True
|
|
667
|
+
|
|
668
|
+
# File count estimate
|
|
669
|
+
file_mentions = re.findall(r'(\d+)\s*(files?|components?)', user_request, re.IGNORECASE)
|
|
670
|
+
if file_mentions:
|
|
671
|
+
file_count = int(file_mentions[0][0])
|
|
672
|
+
if file_count <= 5:
|
|
673
|
+
return True
|
|
674
|
+
|
|
675
|
+
return False
|
|
676
|
+
```
|
|
677
|
+
|
|
678
|
+
### Cache Helpers
|
|
679
|
+
|
|
680
|
+
```python
|
|
681
|
+
def slugify(text: str) -> str:
|
|
682
|
+
"""Convert text to filesystem-safe slug."""
|
|
683
|
+
text = text.lower()
|
|
684
|
+
text = re.sub(r'[^a-z0-9]+', '-', text)
|
|
685
|
+
return text.strip('-')
|
|
686
|
+
|
|
687
|
+
def jaccard_similarity(list1: list, list2: list) -> float:
|
|
688
|
+
"""Compute Jaccard similarity between two lists."""
|
|
689
|
+
set1 = set(list1)
|
|
690
|
+
set2 = set(list2)
|
|
691
|
+
intersection = len(set1 & set2)
|
|
692
|
+
union = len(set1 | set2)
|
|
693
|
+
return intersection / union if union > 0 else 0.0
|
|
694
|
+
```
|
|
695
|
+
|
|
696
|
+
---
|
|
697
|
+
|
|
698
|
+
## Summary
|
|
699
|
+
|
|
700
|
+
MC integrates research-first by:
|
|
701
|
+
|
|
702
|
+
1. **Checking skip conditions** before every planning request
|
|
703
|
+
2. **Spawning Scout** (if existing codebase) for constraints
|
|
704
|
+
3. **Extracting research needs** from user request + Scout findings
|
|
705
|
+
4. **Checking cache** for existing research (24h TTL)
|
|
706
|
+
5. **Spawning Researchers** (parallel) for uncached needs
|
|
707
|
+
6. **Assembling context** from Scout + cached + fresh research
|
|
708
|
+
7. **Spawning Planner** with complete research context
|
|
709
|
+
|
|
710
|
+
Research context enables Planner to:
|
|
711
|
+
- Respect existing codebase patterns
|
|
712
|
+
- Use current best practices (2024-2025)
|
|
713
|
+
- Avoid known anti-patterns
|
|
714
|
+
- Include working code examples
|
|
715
|
+
|
|
716
|
+
**End of Line.**
|