mapify-cli 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mapify_cli/__init__.py +1946 -0
- mapify_cli/playbook_manager.py +517 -0
- mapify_cli/recitation_manager.py +551 -0
- mapify_cli/semantic_search.py +405 -0
- mapify_cli/templates/agents/CHANGELOG.md +108 -0
- mapify_cli/templates/agents/MCP-PATTERNS.md +343 -0
- mapify_cli/templates/agents/README.md +183 -0
- mapify_cli/templates/agents/actor.md +650 -0
- mapify_cli/templates/agents/curator.md +1155 -0
- mapify_cli/templates/agents/documentation-reviewer.md +1282 -0
- mapify_cli/templates/agents/evaluator.md +843 -0
- mapify_cli/templates/agents/monitor.md +977 -0
- mapify_cli/templates/agents/predictor.md +965 -0
- mapify_cli/templates/agents/reflector.md +1048 -0
- mapify_cli/templates/agents/task-decomposer.md +1169 -0
- mapify_cli/templates/agents/test-generator.md +1175 -0
- mapify_cli/templates/commands/map-debug.md +315 -0
- mapify_cli/templates/commands/map-feature.md +454 -0
- mapify_cli/templates/commands/map-refactor.md +317 -0
- mapify_cli/templates/commands/map-review.md +29 -0
- mapify_cli/templates/hooks/README.md +55 -0
- mapify_cli/templates/hooks/validate-agent-templates.sh +94 -0
- mapify_cli/templates/settings.hooks.json +20 -0
- mapify_cli/workflow_logger.py +411 -0
- mapify_cli-1.0.0.dist-info/METADATA +310 -0
- mapify_cli-1.0.0.dist-info/RECORD +28 -0
- mapify_cli-1.0.0.dist-info/WHEEL +4 -0
- mapify_cli-1.0.0.dist-info/entry_points.txt +2 -0
mapify_cli/__init__.py
ADDED
|
@@ -0,0 +1,1946 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# /// script
|
|
3
|
+
# requires-python = ">=3.11"
|
|
4
|
+
# dependencies = [
|
|
5
|
+
# "typer",
|
|
6
|
+
# "rich",
|
|
7
|
+
# "platformdirs",
|
|
8
|
+
# "readchar",
|
|
9
|
+
# "httpx",
|
|
10
|
+
# "truststore",
|
|
11
|
+
# ]
|
|
12
|
+
# ///
|
|
13
|
+
"""
|
|
14
|
+
Mapify CLI - Setup tool for MAP Framework projects
|
|
15
|
+
|
|
16
|
+
Usage:
|
|
17
|
+
uvx mapify init <project-name>
|
|
18
|
+
uvx mapify init .
|
|
19
|
+
|
|
20
|
+
Or install globally:
|
|
21
|
+
uv tool install --from git+https://github.com/azalio/map-framework.git mapify-cli
|
|
22
|
+
mapify init <project-name>
|
|
23
|
+
mapify check
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
__version__ = "1.0.0"
|
|
27
|
+
|
|
28
|
+
import os
|
|
29
|
+
import subprocess
|
|
30
|
+
import sys
|
|
31
|
+
import shutil
|
|
32
|
+
import json
|
|
33
|
+
from pathlib import Path
|
|
34
|
+
from typing import Optional, List, Dict, Any
|
|
35
|
+
|
|
36
|
+
import typer
|
|
37
|
+
import httpx
|
|
38
|
+
import readchar
|
|
39
|
+
import ssl
|
|
40
|
+
try:
|
|
41
|
+
import truststore
|
|
42
|
+
HAS_TRUSTSTORE = True
|
|
43
|
+
except ImportError:
|
|
44
|
+
HAS_TRUSTSTORE = False
|
|
45
|
+
|
|
46
|
+
from rich.console import Console
|
|
47
|
+
from rich.panel import Panel
|
|
48
|
+
from rich.text import Text
|
|
49
|
+
from rich.live import Live
|
|
50
|
+
from rich.align import Align
|
|
51
|
+
from rich.table import Table
|
|
52
|
+
from rich.tree import Tree
|
|
53
|
+
from typer.core import TyperGroup
|
|
54
|
+
|
|
55
|
+
# Create secure SSL context with proper fallback
|
|
56
|
+
def create_ssl_context():
|
|
57
|
+
"""Create SSL context with proper certificate validation."""
|
|
58
|
+
try:
|
|
59
|
+
if HAS_TRUSTSTORE:
|
|
60
|
+
context = truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
|
|
61
|
+
context.check_hostname = True
|
|
62
|
+
context.verify_mode = ssl.CERT_REQUIRED
|
|
63
|
+
return context
|
|
64
|
+
except Exception:
|
|
65
|
+
pass
|
|
66
|
+
|
|
67
|
+
# Fallback to standard SSL context
|
|
68
|
+
context = ssl.create_default_context()
|
|
69
|
+
context.check_hostname = True
|
|
70
|
+
context.verify_mode = ssl.CERT_REQUIRED
|
|
71
|
+
return context
|
|
72
|
+
|
|
73
|
+
ssl_context = create_ssl_context()
|
|
74
|
+
|
|
75
|
+
# Constants
|
|
76
|
+
MCP_SERVER_CHOICES = {
|
|
77
|
+
"all": "All available MCP servers",
|
|
78
|
+
"essential": "Essential (cipher, claude-reviewer, sequential-thinking)",
|
|
79
|
+
"docs": "Documentation (context7, deepwiki)",
|
|
80
|
+
"custom": "Select individually",
|
|
81
|
+
"none": "Skip MCP setup"
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
INDIVIDUAL_MCP_SERVERS = {
|
|
85
|
+
"cipher": "Knowledge management system",
|
|
86
|
+
"claude-reviewer": "Professional code review",
|
|
87
|
+
"sequential-thinking": "Chain-of-thought reasoning",
|
|
88
|
+
"codex-bridge": "AI code generation",
|
|
89
|
+
"context7": "Library documentation",
|
|
90
|
+
"deepwiki": "GitHub repository intelligence"
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
# ASCII Art Banner
|
|
94
|
+
BANNER = """
|
|
95
|
+
╔╦╗╔═╗╔═╗ ╦╔═╦╔╦╗
|
|
96
|
+
║║║╠═╣╠═╝ ╠╩╗║ ║
|
|
97
|
+
╩ ╩╩ ╩╩ ╩ ╩╩ ╩
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
TAGLINE = "MAP Kit - Modular Agentic Planner Framework for Claude Code"
|
|
101
|
+
|
|
102
|
+
console = Console()
|
|
103
|
+
|
|
104
|
+
class StepTracker:
|
|
105
|
+
"""Track and render hierarchical steps as a tree"""
|
|
106
|
+
def __init__(self, title: str):
|
|
107
|
+
self.title = title
|
|
108
|
+
self.steps: List[Dict[str, Any]] = [] # list of dicts: {key, label, status, detail}
|
|
109
|
+
self._refresh_cb = None
|
|
110
|
+
|
|
111
|
+
def attach_refresh(self, cb):
|
|
112
|
+
self._refresh_cb = cb
|
|
113
|
+
|
|
114
|
+
def add(self, key: str, label: str):
|
|
115
|
+
if key not in [s["key"] for s in self.steps]:
|
|
116
|
+
self.steps.append({"key": key, "label": label, "status": "pending", "detail": ""})
|
|
117
|
+
self._maybe_refresh()
|
|
118
|
+
|
|
119
|
+
def start(self, key: str, detail: str = ""):
|
|
120
|
+
self._update(key, status="running", detail=detail)
|
|
121
|
+
|
|
122
|
+
def complete(self, key: str, detail: str = ""):
|
|
123
|
+
self._update(key, status="done", detail=detail)
|
|
124
|
+
|
|
125
|
+
def error(self, key: str, detail: str = ""):
|
|
126
|
+
self._update(key, status="error", detail=detail)
|
|
127
|
+
|
|
128
|
+
def skip(self, key: str, detail: str = ""):
|
|
129
|
+
self._update(key, status="skipped", detail=detail)
|
|
130
|
+
|
|
131
|
+
def _update(self, key: str, status: str, detail: str):
|
|
132
|
+
for s in self.steps:
|
|
133
|
+
if s["key"] == key:
|
|
134
|
+
s["status"] = status
|
|
135
|
+
if detail:
|
|
136
|
+
s["detail"] = detail
|
|
137
|
+
self._maybe_refresh()
|
|
138
|
+
return
|
|
139
|
+
# If not present, add it
|
|
140
|
+
self.steps.append({"key": key, "label": key, "status": status, "detail": detail})
|
|
141
|
+
self._maybe_refresh()
|
|
142
|
+
|
|
143
|
+
def _maybe_refresh(self):
|
|
144
|
+
if self._refresh_cb:
|
|
145
|
+
try:
|
|
146
|
+
self._refresh_cb()
|
|
147
|
+
except Exception:
|
|
148
|
+
pass
|
|
149
|
+
|
|
150
|
+
def render(self):
|
|
151
|
+
tree = Tree(f"[cyan]{self.title}[/cyan]", guide_style="grey50")
|
|
152
|
+
for step in self.steps:
|
|
153
|
+
label = step["label"]
|
|
154
|
+
detail_text = step["detail"].strip() if step["detail"] else ""
|
|
155
|
+
|
|
156
|
+
# Status symbols
|
|
157
|
+
status = step["status"]
|
|
158
|
+
if status == "done":
|
|
159
|
+
symbol = "[green]●[/green]"
|
|
160
|
+
elif status == "pending":
|
|
161
|
+
symbol = "[green dim]○[/green dim]"
|
|
162
|
+
elif status == "running":
|
|
163
|
+
symbol = "[cyan]○[/cyan]"
|
|
164
|
+
elif status == "error":
|
|
165
|
+
symbol = "[red]●[/red]"
|
|
166
|
+
elif status == "skipped":
|
|
167
|
+
symbol = "[yellow]○[/yellow]"
|
|
168
|
+
else:
|
|
169
|
+
symbol = " "
|
|
170
|
+
|
|
171
|
+
if status == "pending":
|
|
172
|
+
# Entire line light gray (pending)
|
|
173
|
+
if detail_text:
|
|
174
|
+
line = f"{symbol} [bright_black]{label} ({detail_text})[/bright_black]"
|
|
175
|
+
else:
|
|
176
|
+
line = f"{symbol} [bright_black]{label}[/bright_black]"
|
|
177
|
+
else:
|
|
178
|
+
# Label white, detail light gray in parentheses
|
|
179
|
+
if detail_text:
|
|
180
|
+
line = f"{symbol} [white]{label}[/white] [bright_black]({detail_text})[/bright_black]"
|
|
181
|
+
else:
|
|
182
|
+
line = f"{symbol} [white]{label}[/white]"
|
|
183
|
+
|
|
184
|
+
tree.add(line)
|
|
185
|
+
return tree
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def get_key():
|
|
189
|
+
"""Get a single keypress in a cross-platform way"""
|
|
190
|
+
key = readchar.readkey()
|
|
191
|
+
|
|
192
|
+
# Arrow keys
|
|
193
|
+
if key == readchar.key.UP or key == readchar.key.CTRL_P:
|
|
194
|
+
return 'up'
|
|
195
|
+
if key == readchar.key.DOWN or key == readchar.key.CTRL_N:
|
|
196
|
+
return 'down'
|
|
197
|
+
|
|
198
|
+
# Enter/Return - support multiple variants for cross-platform compatibility
|
|
199
|
+
if key == readchar.key.ENTER or key == '\r' or key == '\n':
|
|
200
|
+
return 'enter'
|
|
201
|
+
# Also check for readchar.key.CR (carriage return) if it exists
|
|
202
|
+
if hasattr(readchar.key, 'CR') and key == readchar.key.CR:
|
|
203
|
+
return 'enter'
|
|
204
|
+
if hasattr(readchar.key, 'LF') and key == readchar.key.LF:
|
|
205
|
+
return 'enter'
|
|
206
|
+
|
|
207
|
+
# Space for toggle
|
|
208
|
+
if key == ' ':
|
|
209
|
+
return 'space'
|
|
210
|
+
|
|
211
|
+
# Escape
|
|
212
|
+
if key == readchar.key.ESC:
|
|
213
|
+
return 'escape'
|
|
214
|
+
|
|
215
|
+
# Ctrl+C
|
|
216
|
+
if key == readchar.key.CTRL_C:
|
|
217
|
+
raise KeyboardInterrupt
|
|
218
|
+
|
|
219
|
+
return key
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def select_with_arrows(options: dict, prompt_text: str = "Select an option", default_key: Optional[str] = None) -> str:
|
|
223
|
+
"""Interactive selection using arrow keys"""
|
|
224
|
+
option_keys = list(options.keys())
|
|
225
|
+
if default_key and default_key in option_keys:
|
|
226
|
+
selected_index = option_keys.index(default_key)
|
|
227
|
+
else:
|
|
228
|
+
selected_index = 0
|
|
229
|
+
|
|
230
|
+
selected_key = None
|
|
231
|
+
|
|
232
|
+
def create_selection_panel():
|
|
233
|
+
"""Create the selection panel with current selection highlighted."""
|
|
234
|
+
table = Table.grid(padding=(0, 2))
|
|
235
|
+
table.add_column(style="cyan", justify="left", width=3)
|
|
236
|
+
table.add_column(style="white", justify="left")
|
|
237
|
+
|
|
238
|
+
for i, key in enumerate(option_keys):
|
|
239
|
+
if i == selected_index:
|
|
240
|
+
table.add_row("▶", f"[cyan]{key}[/cyan] [dim]({options[key]})[/dim]")
|
|
241
|
+
else:
|
|
242
|
+
table.add_row(" ", f"[cyan]{key}[/cyan] [dim]({options[key]})[/dim]")
|
|
243
|
+
|
|
244
|
+
table.add_row("", "")
|
|
245
|
+
table.add_row("", "[dim]Use ↑/↓ to navigate, Enter to select, Esc to cancel[/dim]")
|
|
246
|
+
|
|
247
|
+
return Panel(
|
|
248
|
+
table,
|
|
249
|
+
title=f"[bold]{prompt_text}[/bold]",
|
|
250
|
+
border_style="cyan",
|
|
251
|
+
padding=(1, 2)
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
console.print()
|
|
255
|
+
|
|
256
|
+
with Live(create_selection_panel(), console=console, transient=True, auto_refresh=False) as live:
|
|
257
|
+
while True:
|
|
258
|
+
try:
|
|
259
|
+
key = get_key()
|
|
260
|
+
if key == 'up':
|
|
261
|
+
selected_index = (selected_index - 1) % len(option_keys)
|
|
262
|
+
elif key == 'down':
|
|
263
|
+
selected_index = (selected_index + 1) % len(option_keys)
|
|
264
|
+
elif key == 'enter':
|
|
265
|
+
selected_key = option_keys[selected_index]
|
|
266
|
+
break
|
|
267
|
+
elif key == 'escape':
|
|
268
|
+
console.print("\n[yellow]Selection cancelled[/yellow]")
|
|
269
|
+
raise typer.Exit(1)
|
|
270
|
+
|
|
271
|
+
live.update(create_selection_panel(), refresh=True)
|
|
272
|
+
|
|
273
|
+
except KeyboardInterrupt:
|
|
274
|
+
console.print("\n[yellow]Selection cancelled[/yellow]")
|
|
275
|
+
raise typer.Exit(1)
|
|
276
|
+
|
|
277
|
+
return selected_key
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def select_multiple_with_arrows(options: dict, prompt_text: str = "Select options") -> List[str]:
|
|
281
|
+
"""Interactive multiple selection using arrow keys and space"""
|
|
282
|
+
option_keys = list(options.keys())
|
|
283
|
+
selected_index = 0
|
|
284
|
+
selected_items: set[str] = set()
|
|
285
|
+
|
|
286
|
+
def create_selection_panel():
|
|
287
|
+
"""Create the selection panel with checkboxes"""
|
|
288
|
+
table = Table.grid(padding=(0, 2))
|
|
289
|
+
table.add_column(style="cyan", justify="left", width=3)
|
|
290
|
+
table.add_column(style="white", justify="left")
|
|
291
|
+
|
|
292
|
+
for i, key in enumerate(option_keys):
|
|
293
|
+
checkbox = "[x]" if key in selected_items else "[ ]"
|
|
294
|
+
if i == selected_index:
|
|
295
|
+
table.add_row("▶", f"{checkbox} [cyan]{key}[/cyan] [dim]({options[key]})[/dim]")
|
|
296
|
+
else:
|
|
297
|
+
table.add_row(" ", f"{checkbox} [cyan]{key}[/cyan] [dim]({options[key]})[/dim]")
|
|
298
|
+
|
|
299
|
+
table.add_row("", "")
|
|
300
|
+
table.add_row("", f"[dim]Selected: {len(selected_items)}/{len(options)}[/dim]")
|
|
301
|
+
table.add_row("", "[dim]Use ↑/↓ to navigate, Space to toggle, Enter to confirm, Esc to cancel[/dim]")
|
|
302
|
+
|
|
303
|
+
return Panel(
|
|
304
|
+
table,
|
|
305
|
+
title=f"[bold]{prompt_text}[/bold]",
|
|
306
|
+
border_style="cyan",
|
|
307
|
+
padding=(1, 2)
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
console.print()
|
|
311
|
+
|
|
312
|
+
with Live(create_selection_panel(), console=console, transient=True, auto_refresh=False) as live:
|
|
313
|
+
while True:
|
|
314
|
+
try:
|
|
315
|
+
key = get_key()
|
|
316
|
+
if key == 'up':
|
|
317
|
+
selected_index = (selected_index - 1) % len(option_keys)
|
|
318
|
+
elif key == 'down':
|
|
319
|
+
selected_index = (selected_index + 1) % len(option_keys)
|
|
320
|
+
elif key == 'space':
|
|
321
|
+
current_key = option_keys[selected_index]
|
|
322
|
+
if current_key in selected_items:
|
|
323
|
+
selected_items.remove(current_key)
|
|
324
|
+
else:
|
|
325
|
+
selected_items.add(current_key)
|
|
326
|
+
elif key == 'enter':
|
|
327
|
+
break
|
|
328
|
+
elif key == 'escape':
|
|
329
|
+
console.print("\n[yellow]Selection cancelled[/yellow]")
|
|
330
|
+
raise typer.Exit(1)
|
|
331
|
+
|
|
332
|
+
live.update(create_selection_panel(), refresh=True)
|
|
333
|
+
|
|
334
|
+
except KeyboardInterrupt:
|
|
335
|
+
console.print("\n[yellow]Selection cancelled[/yellow]")
|
|
336
|
+
raise typer.Exit(1)
|
|
337
|
+
|
|
338
|
+
return list(selected_items)
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
class BannerGroup(TyperGroup):
|
|
342
|
+
"""Custom group that shows banner before help."""
|
|
343
|
+
|
|
344
|
+
def format_help(self, ctx, formatter):
|
|
345
|
+
# Show banner before help
|
|
346
|
+
show_banner()
|
|
347
|
+
super().format_help(ctx, formatter)
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
app = typer.Typer(
|
|
351
|
+
name="mapify",
|
|
352
|
+
help="Setup tool for MAP Framework projects",
|
|
353
|
+
add_completion=False,
|
|
354
|
+
invoke_without_command=True,
|
|
355
|
+
cls=BannerGroup,
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
# Create subcommand groups
|
|
359
|
+
recitation_app = typer.Typer(name="recitation", help="Manage task execution plans (recitation pattern)")
|
|
360
|
+
playbook_app = typer.Typer(name="playbook", help="Manage and search playbook patterns")
|
|
361
|
+
|
|
362
|
+
app.add_typer(recitation_app, name="recitation")
|
|
363
|
+
app.add_typer(playbook_app, name="playbook")
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
def show_banner():
|
|
367
|
+
"""Display the ASCII art banner."""
|
|
368
|
+
banner_lines = BANNER.strip().split('\n')
|
|
369
|
+
colors = ["bright_blue", "blue", "cyan"]
|
|
370
|
+
|
|
371
|
+
styled_banner = Text()
|
|
372
|
+
for i, line in enumerate(banner_lines):
|
|
373
|
+
color = colors[i % len(colors)]
|
|
374
|
+
styled_banner.append(line + "\n", style=color)
|
|
375
|
+
|
|
376
|
+
console.print(Align.center(styled_banner))
|
|
377
|
+
console.print(Align.center(Text(TAGLINE, style="italic bright_yellow")))
|
|
378
|
+
console.print()
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
def version_callback(value: bool):
|
|
382
|
+
"""Callback to show version and exit."""
|
|
383
|
+
if value:
|
|
384
|
+
console.print(f"mapify-cli version {__version__}")
|
|
385
|
+
raise typer.Exit()
|
|
386
|
+
|
|
387
|
+
|
|
388
|
+
@app.callback()
|
|
389
|
+
def callback(
|
|
390
|
+
ctx: typer.Context,
|
|
391
|
+
version: Optional[bool] = typer.Option(None, "--version", callback=version_callback, is_eager=True, help="Show version and exit")
|
|
392
|
+
):
|
|
393
|
+
"""Show banner when no subcommand is provided."""
|
|
394
|
+
if ctx.invoked_subcommand is None and "--help" not in sys.argv and "-h" not in sys.argv and not version:
|
|
395
|
+
show_banner()
|
|
396
|
+
console.print(Align.center("[dim]Run 'mapify --help' for usage information[/dim]"))
|
|
397
|
+
console.print()
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
def check_tool(tool: str) -> bool:
|
|
401
|
+
"""Check if a tool is installed."""
|
|
402
|
+
# Special handling for Claude CLI
|
|
403
|
+
if tool == "claude":
|
|
404
|
+
claude_local_path = Path.home() / ".claude" / "local" / "claude"
|
|
405
|
+
if claude_local_path.exists() and claude_local_path.is_file():
|
|
406
|
+
return True
|
|
407
|
+
|
|
408
|
+
return shutil.which(tool) is not None
|
|
409
|
+
|
|
410
|
+
|
|
411
|
+
def check_mcp_server(server: str) -> bool:
|
|
412
|
+
"""Check if an MCP server is available/configured"""
|
|
413
|
+
# For now, we'll assume MCP servers are available if configured
|
|
414
|
+
# In a real implementation, you'd check actual MCP configuration
|
|
415
|
+
return True
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
def is_debug_enabled(debug_flag: Optional[bool] = None) -> bool:
|
|
419
|
+
"""
|
|
420
|
+
Check if debug mode is enabled via CLI flag or environment variable.
|
|
421
|
+
|
|
422
|
+
Args:
|
|
423
|
+
debug_flag: CLI --debug flag value (None, True, or False)
|
|
424
|
+
|
|
425
|
+
Returns:
|
|
426
|
+
True if debug logging should be enabled
|
|
427
|
+
"""
|
|
428
|
+
# CLI flag takes precedence over environment variable
|
|
429
|
+
if debug_flag is not None:
|
|
430
|
+
return debug_flag
|
|
431
|
+
|
|
432
|
+
# Check MAP_DEBUG environment variable
|
|
433
|
+
env_debug = os.environ.get('MAP_DEBUG', '').lower()
|
|
434
|
+
return env_debug in ('true', '1', 'yes', 'on')
|
|
435
|
+
|
|
436
|
+
|
|
437
|
+
def get_templates_dir() -> Path:
|
|
438
|
+
"""Get the path to bundled templates directory."""
|
|
439
|
+
import importlib.resources
|
|
440
|
+
try:
|
|
441
|
+
# Python 3.11+ with importlib.resources.files
|
|
442
|
+
if hasattr(importlib.resources, 'files'):
|
|
443
|
+
return Path(str(importlib.resources.files('mapify_cli') / 'templates'))
|
|
444
|
+
except Exception:
|
|
445
|
+
pass
|
|
446
|
+
|
|
447
|
+
# Fallback to module directory
|
|
448
|
+
module_dir = Path(__file__).parent
|
|
449
|
+
templates_dir = module_dir / "templates"
|
|
450
|
+
if templates_dir.exists():
|
|
451
|
+
return templates_dir
|
|
452
|
+
|
|
453
|
+
# Development mode - check parent directories
|
|
454
|
+
for parent in [module_dir.parent, module_dir.parent.parent]:
|
|
455
|
+
templates_dir = parent / "templates"
|
|
456
|
+
if templates_dir.exists():
|
|
457
|
+
return templates_dir
|
|
458
|
+
|
|
459
|
+
raise RuntimeError("Templates directory not found. Please reinstall mapify-cli.")
|
|
460
|
+
|
|
461
|
+
|
|
462
|
+
def create_agent_files(project_path: Path, mcp_servers: List[str]) -> None:
|
|
463
|
+
"""Create MAP agent files in .claude/agents/"""
|
|
464
|
+
agents_dir = project_path / ".claude" / "agents"
|
|
465
|
+
agents_dir.mkdir(parents=True, exist_ok=True)
|
|
466
|
+
|
|
467
|
+
# Get templates directory
|
|
468
|
+
templates_dir = get_templates_dir()
|
|
469
|
+
agents_template_dir = templates_dir / "agents"
|
|
470
|
+
|
|
471
|
+
if agents_template_dir.exists():
|
|
472
|
+
# Copy original agent files from templates (preserves template variables!)
|
|
473
|
+
import shutil
|
|
474
|
+
|
|
475
|
+
# Files to exclude from agent directory (documentation, not agents)
|
|
476
|
+
exclude_files = {"README.md", "CHANGELOG.md", "MCP-PATTERNS.md"}
|
|
477
|
+
|
|
478
|
+
for agent_template in agents_template_dir.glob("*.md"):
|
|
479
|
+
# Skip documentation files - they're not agents
|
|
480
|
+
if agent_template.name in exclude_files:
|
|
481
|
+
continue
|
|
482
|
+
dest_file = agents_dir / agent_template.name
|
|
483
|
+
shutil.copy2(agent_template, dest_file)
|
|
484
|
+
else:
|
|
485
|
+
# Fallback: generate simplified versions if templates not found
|
|
486
|
+
# NOTE: orchestrator removed (moved to slash commands in production architecture)
|
|
487
|
+
agents = {
|
|
488
|
+
"task-decomposer": create_task_decomposer_content(mcp_servers),
|
|
489
|
+
"actor": create_actor_content(mcp_servers),
|
|
490
|
+
"monitor": create_monitor_content(mcp_servers),
|
|
491
|
+
"predictor": create_predictor_content(mcp_servers),
|
|
492
|
+
"evaluator": create_evaluator_content(mcp_servers),
|
|
493
|
+
"reflector": create_reflector_content(mcp_servers),
|
|
494
|
+
"curator": create_curator_content(mcp_servers),
|
|
495
|
+
"test-generator": create_test_generator_content(mcp_servers),
|
|
496
|
+
"documentation-reviewer": create_documentation_reviewer_content(mcp_servers)
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
for name, content in agents.items():
|
|
500
|
+
agent_file = agents_dir / f"{name}.md"
|
|
501
|
+
agent_file.write_text(content)
|
|
502
|
+
|
|
503
|
+
|
|
504
|
+
def create_task_decomposer_content(mcp_servers: List[str]) -> str:
|
|
505
|
+
"""Create task-decomposer agent content"""
|
|
506
|
+
mcp_section = ""
|
|
507
|
+
if any(s in mcp_servers for s in ["cipher", "sequential-thinking", "deepwiki", "context7"]):
|
|
508
|
+
mcp_section = """
|
|
509
|
+
## MCP Integration
|
|
510
|
+
|
|
511
|
+
**ALWAYS use these MCP tools:**
|
|
512
|
+
"""
|
|
513
|
+
if "cipher" in mcp_servers:
|
|
514
|
+
mcp_section += """
|
|
515
|
+
1. **mcp__cipher__cipher_memory_search** - Search for similar features/patterns
|
|
516
|
+
- Query: "feature implementation [feature_name]"
|
|
517
|
+
- Query: "task decomposition [similar_goal]"
|
|
518
|
+
"""
|
|
519
|
+
if "sequential-thinking" in mcp_servers:
|
|
520
|
+
mcp_section += """
|
|
521
|
+
2. **mcp__sequential-thinking__sequentialthinking** - For complex planning
|
|
522
|
+
- Use when goal is ambiguous or has many dependencies
|
|
523
|
+
"""
|
|
524
|
+
if "deepwiki" in mcp_servers:
|
|
525
|
+
mcp_section += """
|
|
526
|
+
3. **mcp__deepwiki__ask_question** - Get insights from GitHub repositories
|
|
527
|
+
- Ask: "How does [repo] implement [feature]?"
|
|
528
|
+
"""
|
|
529
|
+
if "context7" in mcp_servers:
|
|
530
|
+
mcp_section += """
|
|
531
|
+
4. **mcp__context7__get-library-docs** - Get up-to-date library documentation
|
|
532
|
+
- First use resolve-library-id to find the library
|
|
533
|
+
"""
|
|
534
|
+
|
|
535
|
+
return f"""---
|
|
536
|
+
name: task-decomposer
|
|
537
|
+
description: Breaks complex goals into atomic, testable subtasks (MAP)
|
|
538
|
+
tools: Read, Grep, Glob
|
|
539
|
+
model: sonnet
|
|
540
|
+
---
|
|
541
|
+
|
|
542
|
+
# Role: Task Decomposition Specialist (MAP)
|
|
543
|
+
|
|
544
|
+
You are a software architect who turns high-level feature goals into clear, atomic, testable subtasks with explicit dependencies and acceptance criteria.
|
|
545
|
+
{mcp_section}
|
|
546
|
+
## Responsibilities
|
|
547
|
+
|
|
548
|
+
- Analyze the goal and repository context
|
|
549
|
+
- Identify prerequisites and dependencies
|
|
550
|
+
- Produce a logically ordered list of atomic subtasks
|
|
551
|
+
- Include affected files, risks, and acceptance criteria
|
|
552
|
+
|
|
553
|
+
## Output Format (JSON only)
|
|
554
|
+
|
|
555
|
+
Return a valid JSON document with subtasks, dependencies, and acceptance criteria.
|
|
556
|
+
"""
|
|
557
|
+
|
|
558
|
+
|
|
559
|
+
def create_actor_content(mcp_servers: List[str]) -> str:
|
|
560
|
+
"""Create actor agent content"""
|
|
561
|
+
mcp_section = ""
|
|
562
|
+
if any(s in mcp_servers for s in ["cipher", "codex-bridge", "context7", "deepwiki"]):
|
|
563
|
+
mcp_section = """
|
|
564
|
+
# MCP INTEGRATION
|
|
565
|
+
|
|
566
|
+
**ALWAYS use these MCP tools:**
|
|
567
|
+
"""
|
|
568
|
+
if "cipher" in mcp_servers:
|
|
569
|
+
mcp_section += """
|
|
570
|
+
1. **mcp__cipher__cipher_memory_search** - Search for code patterns
|
|
571
|
+
- Query: "implementation pattern [feature_type]"
|
|
572
|
+
- Store successful implementations after validation
|
|
573
|
+
"""
|
|
574
|
+
if "codex-bridge" in mcp_servers:
|
|
575
|
+
mcp_section += """
|
|
576
|
+
2. **mcp__codex-bridge__consult_codex** - Generate optimized code solutions
|
|
577
|
+
- Use for complex algorithms or unfamiliar APIs
|
|
578
|
+
- NOTE: Set timeout=600 (10 minutes) for complex operations
|
|
579
|
+
- Example: consult_codex(query="...", directory=".", timeout=600)
|
|
580
|
+
"""
|
|
581
|
+
if "context7" in mcp_servers:
|
|
582
|
+
mcp_section += """
|
|
583
|
+
3. **mcp__context7__get-library-docs** - Get current library documentation
|
|
584
|
+
- Essential when using external libraries/frameworks
|
|
585
|
+
"""
|
|
586
|
+
if "deepwiki" in mcp_servers:
|
|
587
|
+
mcp_section += """
|
|
588
|
+
4. **mcp__deepwiki__read_wiki_contents** - Study implementation patterns
|
|
589
|
+
- Learn from production code examples
|
|
590
|
+
"""
|
|
591
|
+
|
|
592
|
+
return f"""---
|
|
593
|
+
name: actor
|
|
594
|
+
description: Generates production-ready implementation proposals (MAP)
|
|
595
|
+
tools: Read, Write, Edit, Bash, Grep, Glob
|
|
596
|
+
model: sonnet
|
|
597
|
+
---
|
|
598
|
+
|
|
599
|
+
# IDENTITY
|
|
600
|
+
|
|
601
|
+
You are a senior software engineer who writes clean, efficient, production-ready code.
|
|
602
|
+
{mcp_section}
|
|
603
|
+
# SOURCE OF TRUTH (CRITICAL FOR DOCUMENTATION)
|
|
604
|
+
|
|
605
|
+
**IF writing or updating documentation, ALWAYS find and read source documents FIRST:**
|
|
606
|
+
|
|
607
|
+
## Discovery Process
|
|
608
|
+
|
|
609
|
+
1. **Find design documents** via Glob:
|
|
610
|
+
- **/tech-design.md, **/architecture.md, **/design-doc.md, **/api-spec.md
|
|
611
|
+
- Look in: docs/, docs/private/, docs/architecture/, project root
|
|
612
|
+
- Check parent directories if in decomposition subfolder
|
|
613
|
+
|
|
614
|
+
2. **Read source BEFORE writing**:
|
|
615
|
+
- Extract API structures (spec, status fields, exact types)
|
|
616
|
+
- Extract lifecycle logic (enabled/disabled, install/uninstall triggers)
|
|
617
|
+
- Extract component responsibilities (who installs, who owns CRDs)
|
|
618
|
+
- Extract integration patterns (data flows, adapters needed)
|
|
619
|
+
|
|
620
|
+
3. **Use source as authority**:
|
|
621
|
+
- DON'T generalize from examples or DOD scenarios
|
|
622
|
+
- DON'T assume partial patterns apply globally
|
|
623
|
+
- DON'T write critical sections without verifying against source
|
|
624
|
+
- DO quote exact field names, types, logic from source
|
|
625
|
+
|
|
626
|
+
## Common Mistakes to Avoid
|
|
627
|
+
|
|
628
|
+
❌ Wrong: Using presets: [] (empty array for one engine) when source defines engines: {{}} (empty map for all engines)
|
|
629
|
+
❌ Wrong: Generalizing from DOD scenario to Uninstallation logic
|
|
630
|
+
❌ Wrong: Writing "triggers deletion" without checking what exactly gets deleted
|
|
631
|
+
|
|
632
|
+
✅ Right: Read tech-design.md → Find definitions → Use exact syntax
|
|
633
|
+
✅ Right: Check lifecycle section in source → Verify behavior → Document accurately
|
|
634
|
+
✅ Right: Look up component responsibilities → State correctly if source says so
|
|
635
|
+
|
|
636
|
+
## When Writing Documentation
|
|
637
|
+
|
|
638
|
+
- Step 1: Find source documents (Glob for **/tech-design.md, etc.)
|
|
639
|
+
- Step 2: Read source completely (don't just search for keywords)
|
|
640
|
+
- Step 3: Extract authoritative definitions (API, lifecycle, responsibilities)
|
|
641
|
+
- Step 4: Write section using source definitions
|
|
642
|
+
- Step 5: Cross-reference: Does my text match source? Line by line?
|
|
643
|
+
|
|
644
|
+
Remember: tech-design.md is source of truth, NOT DOD scenarios, NOT examples, NOT your interpretation.
|
|
645
|
+
|
|
646
|
+
# TASK
|
|
647
|
+
|
|
648
|
+
Implement the subtask with clean, testable code following project patterns.
|
|
649
|
+
|
|
650
|
+
# OUTPUT FORMAT
|
|
651
|
+
|
|
652
|
+
Provide implementation with approach, code changes, trade-offs, and testing considerations.
|
|
653
|
+
"""
|
|
654
|
+
|
|
655
|
+
|
|
656
|
+
def create_monitor_content(mcp_servers: List[str]) -> str:
|
|
657
|
+
"""Create monitor agent content"""
|
|
658
|
+
mcp_section = ""
|
|
659
|
+
if "claude-reviewer" in mcp_servers:
|
|
660
|
+
mcp_section = """
|
|
661
|
+
# MCP INTEGRATION
|
|
662
|
+
|
|
663
|
+
**ALWAYS use these MCP tools for comprehensive review:**
|
|
664
|
+
|
|
665
|
+
1. **mcp__claude-reviewer__request_review** - Get professional AI code review
|
|
666
|
+
- Use FIRST to get baseline review, then add your analysis
|
|
667
|
+
"""
|
|
668
|
+
|
|
669
|
+
return f"""---
|
|
670
|
+
name: monitor
|
|
671
|
+
description: Reviews code for correctness, standards, security, and testability (MAP)
|
|
672
|
+
tools: Read, Grep, Bash, Glob
|
|
673
|
+
model: sonnet
|
|
674
|
+
---
|
|
675
|
+
|
|
676
|
+
# IDENTITY
|
|
677
|
+
|
|
678
|
+
You are a meticulous code reviewer and security expert. Your mission is to catch bugs, vulnerabilities, and violations before code reaches production.
|
|
679
|
+
{mcp_section}
|
|
680
|
+
# REVIEW CHECKLIST
|
|
681
|
+
|
|
682
|
+
Work through: Correctness, Security, Code Quality, Performance, Testability, Maintainability
|
|
683
|
+
|
|
684
|
+
## DOCUMENTATION CONSISTENCY (CRITICAL)
|
|
685
|
+
|
|
686
|
+
**When reviewing decomposition/implementation documents:**
|
|
687
|
+
|
|
688
|
+
- Find source of truth (tech-design.md, architecture.md):
|
|
689
|
+
* Use Glob: **/tech-design.md, **/architecture.md, **/design-doc.md
|
|
690
|
+
* Look in parent directories if reviewing decomposition
|
|
691
|
+
|
|
692
|
+
- Read source document FIRST
|
|
693
|
+
- Verify API consistency:
|
|
694
|
+
* All spec fields match source?
|
|
695
|
+
* All status fields match source?
|
|
696
|
+
* Field types and defaults consistent?
|
|
697
|
+
* Example: engines: {{}} vs presets: [] - different semantics!
|
|
698
|
+
|
|
699
|
+
- Verify lifecycle consistency:
|
|
700
|
+
* Does enabled: false behavior match source?
|
|
701
|
+
* Are uninstallation triggers correct?
|
|
702
|
+
* Are state transitions consistent?
|
|
703
|
+
* Check two-level patterns (e.g., enabled: false vs engines: {{}})
|
|
704
|
+
|
|
705
|
+
- Verify component responsibilities:
|
|
706
|
+
* Installation ownership matches source?
|
|
707
|
+
* CRD ownership consistent?
|
|
708
|
+
* Integration patterns same as source?
|
|
709
|
+
|
|
710
|
+
Red flags - mark as CRITICAL issue:
|
|
711
|
+
- Decomposition contradicts tech-design on lifecycle logic
|
|
712
|
+
- Missing critical spec/status fields from source
|
|
713
|
+
- Wrong component ownership
|
|
714
|
+
- Lifecycle levels confused (partial vs global state)
|
|
715
|
+
- Not using tech-design definitions (generalizing from examples instead)
|
|
716
|
+
|
|
717
|
+
# OUTPUT FORMAT (JSON)
|
|
718
|
+
|
|
719
|
+
Return strictly valid JSON with validation results and specific issues.
|
|
720
|
+
"""
|
|
721
|
+
|
|
722
|
+
|
|
723
|
+
def create_predictor_content(mcp_servers: List[str]) -> str:
|
|
724
|
+
"""Create predictor agent content"""
|
|
725
|
+
mcp_section = ""
|
|
726
|
+
if any(s in mcp_servers for s in ["cipher", "codex-bridge", "deepwiki", "context7"]):
|
|
727
|
+
mcp_section = """
|
|
728
|
+
## MCP Integration
|
|
729
|
+
|
|
730
|
+
**ALWAYS use these MCP tools:**
|
|
731
|
+
"""
|
|
732
|
+
if "cipher" in mcp_servers:
|
|
733
|
+
mcp_section += """
|
|
734
|
+
1. **mcp__cipher__cipher_memory_search** - Find similar impact patterns
|
|
735
|
+
- Query: "impact analysis [change_type]"
|
|
736
|
+
- Learn from past breaking changes
|
|
737
|
+
"""
|
|
738
|
+
if "codex-bridge" in mcp_servers:
|
|
739
|
+
mcp_section += """
|
|
740
|
+
2. **mcp__codex-bridge__consult_codex** - Analyze complex dependency chains
|
|
741
|
+
- Use for deep code analysis and impact prediction
|
|
742
|
+
- NOTE: Set timeout=600 (10 minutes) for thorough analysis
|
|
743
|
+
- Example: consult_codex(query="analyze impact of...", directory=".", timeout=600)
|
|
744
|
+
"""
|
|
745
|
+
if "deepwiki" in mcp_servers:
|
|
746
|
+
mcp_section += """
|
|
747
|
+
3. **mcp__deepwiki__ask_question** - Check how repos handle similar changes
|
|
748
|
+
- Ask: "What breaks when changing [component]?"
|
|
749
|
+
"""
|
|
750
|
+
if "context7" in mcp_servers:
|
|
751
|
+
mcp_section += """
|
|
752
|
+
4. **mcp__context7__get-library-docs** - Check library compatibility
|
|
753
|
+
- Verify API changes against current documentation
|
|
754
|
+
"""
|
|
755
|
+
|
|
756
|
+
return f"""---
|
|
757
|
+
name: predictor
|
|
758
|
+
description: Predicts consequences and dependency impact of changes (MAP)
|
|
759
|
+
tools: Read, Grep, Glob, Bash
|
|
760
|
+
model: sonnet
|
|
761
|
+
---
|
|
762
|
+
|
|
763
|
+
# Role: Impact Analysis Specialist (MAP)
|
|
764
|
+
|
|
765
|
+
You analyze proposed changes to predict their effects across the codebase.
|
|
766
|
+
{mcp_section}
|
|
767
|
+
## Analysis Process
|
|
768
|
+
|
|
769
|
+
1. Read the proposed code changes
|
|
770
|
+
2. Identify directly modified files and APIs
|
|
771
|
+
3. Trace dependencies using Grep/Glob
|
|
772
|
+
4. Predict the resulting state and risks
|
|
773
|
+
|
|
774
|
+
## Output Format (JSON only)
|
|
775
|
+
|
|
776
|
+
Return JSON with predicted state, affected components, breaking changes, and risk assessment.
|
|
777
|
+
"""
|
|
778
|
+
|
|
779
|
+
|
|
780
|
+
def create_evaluator_content(mcp_servers: List[str]) -> str:
|
|
781
|
+
"""Create evaluator agent content"""
|
|
782
|
+
return """---
|
|
783
|
+
name: evaluator
|
|
784
|
+
description: Evaluates solution quality and completeness (MAP)
|
|
785
|
+
tools: Read, Bash, Grep
|
|
786
|
+
model: sonnet
|
|
787
|
+
---
|
|
788
|
+
|
|
789
|
+
# Role: Solution Quality Evaluator (MAP)
|
|
790
|
+
|
|
791
|
+
You provide objective scoring based on multi-dimensional quality criteria.
|
|
792
|
+
|
|
793
|
+
## Evaluation Criteria (0–10)
|
|
794
|
+
|
|
795
|
+
1. Functionality — meets requirements
|
|
796
|
+
2. Code Quality — readability, maintainability
|
|
797
|
+
3. Performance — efficiency
|
|
798
|
+
4. Security — best practices
|
|
799
|
+
5. Testability — ease of testing
|
|
800
|
+
6. Completeness — tests/docs/error handling
|
|
801
|
+
|
|
802
|
+
## Output Format (JSON only)
|
|
803
|
+
|
|
804
|
+
Return JSON with scores, strengths, weaknesses, and recommendation (proceed|improve|reconsider).
|
|
805
|
+
"""
|
|
806
|
+
|
|
807
|
+
|
|
808
|
+
def create_reflector_content(mcp_servers: List[str]) -> str:
|
|
809
|
+
"""Create reflector agent content"""
|
|
810
|
+
mcp_section = ""
|
|
811
|
+
if "cipher" in mcp_servers:
|
|
812
|
+
mcp_section = """
|
|
813
|
+
# MCP INTEGRATION
|
|
814
|
+
|
|
815
|
+
**ALWAYS use cipher for knowledge management:**
|
|
816
|
+
|
|
817
|
+
1. **mcp__cipher__cipher_memory_search** - Check existing patterns
|
|
818
|
+
- Query: "lesson learned [topic]"
|
|
819
|
+
- Avoid duplicating existing knowledge
|
|
820
|
+
"""
|
|
821
|
+
|
|
822
|
+
return f"""---
|
|
823
|
+
name: reflector
|
|
824
|
+
description: Extracts structured lessons from execution attempts (ACE)
|
|
825
|
+
tools: Read, Grep, Glob
|
|
826
|
+
model: sonnet
|
|
827
|
+
---
|
|
828
|
+
|
|
829
|
+
# IDENTITY
|
|
830
|
+
|
|
831
|
+
You are a reflection specialist who analyzes execution attempts to extract structured, actionable lessons learned.
|
|
832
|
+
{mcp_section}
|
|
833
|
+
# ROLE
|
|
834
|
+
|
|
835
|
+
Analyze Actor implementations and Monitor feedback to identify:
|
|
836
|
+
- What worked well (success patterns)
|
|
837
|
+
- What failed and why (failure patterns)
|
|
838
|
+
- Reusable insights for future implementations
|
|
839
|
+
- Anti-patterns to avoid
|
|
840
|
+
|
|
841
|
+
## Output Format (JSON)
|
|
842
|
+
|
|
843
|
+
Return JSON with:
|
|
844
|
+
- key_insight: Main lesson learned
|
|
845
|
+
- success_patterns: What worked well
|
|
846
|
+
- failure_patterns: What went wrong
|
|
847
|
+
- suggested_new_bullets: Playbook entries to add
|
|
848
|
+
- confidence: How reliable this insight is
|
|
849
|
+
"""
|
|
850
|
+
|
|
851
|
+
|
|
852
|
+
def create_curator_content(mcp_servers: List[str]) -> str:
|
|
853
|
+
"""Create curator agent content"""
|
|
854
|
+
mcp_section = ""
|
|
855
|
+
if "cipher" in mcp_servers:
|
|
856
|
+
mcp_section = """
|
|
857
|
+
# MCP INTEGRATION
|
|
858
|
+
|
|
859
|
+
**Use cipher for deduplication:**
|
|
860
|
+
|
|
861
|
+
1. **mcp__cipher__cipher_memory_search** - Check for duplicate patterns
|
|
862
|
+
- Prevents adding redundant playbook entries
|
|
863
|
+
"""
|
|
864
|
+
|
|
865
|
+
return f"""---
|
|
866
|
+
name: curator
|
|
867
|
+
description: Manages structured playbook with incremental updates (ACE)
|
|
868
|
+
tools: Read, Write, Edit
|
|
869
|
+
model: sonnet
|
|
870
|
+
---
|
|
871
|
+
|
|
872
|
+
# IDENTITY
|
|
873
|
+
|
|
874
|
+
You are a knowledge curator who maintains the ACE playbook by integrating Reflector insights.
|
|
875
|
+
{mcp_section}
|
|
876
|
+
# ROLE
|
|
877
|
+
|
|
878
|
+
Integrate Reflector insights into playbook using delta operations:
|
|
879
|
+
- ADD: New pattern bullets
|
|
880
|
+
- UPDATE: Increment helpful/harmful counters
|
|
881
|
+
- DEPRECATE: Remove harmful patterns
|
|
882
|
+
|
|
883
|
+
## Quality Gates
|
|
884
|
+
|
|
885
|
+
- Content length ≥ 100 characters
|
|
886
|
+
- Code examples for technical patterns
|
|
887
|
+
- Deduplication via semantic similarity
|
|
888
|
+
- Technology-specific (not generic advice)
|
|
889
|
+
|
|
890
|
+
## Output Format (JSON)
|
|
891
|
+
|
|
892
|
+
Return JSON with:
|
|
893
|
+
- reasoning: Why these operations improve playbook
|
|
894
|
+
- operations: Array of ADD/UPDATE/DEPRECATE operations
|
|
895
|
+
- deduplication_check: What duplicates were found
|
|
896
|
+
"""
|
|
897
|
+
|
|
898
|
+
|
|
899
|
+
def create_test_generator_content(mcp_servers: List[str]) -> str:
|
|
900
|
+
"""Create test-generator agent content"""
|
|
901
|
+
mcp_section = ""
|
|
902
|
+
if any(s in mcp_servers for s in ["cipher", "context7"]):
|
|
903
|
+
mcp_section = """
|
|
904
|
+
# MCP INTEGRATION
|
|
905
|
+
|
|
906
|
+
**Use these tools for test generation:**
|
|
907
|
+
"""
|
|
908
|
+
if "cipher" in mcp_servers:
|
|
909
|
+
mcp_section += """
|
|
910
|
+
1. **mcp__cipher__cipher_memory_search** - Find similar test patterns
|
|
911
|
+
- Query: "test pattern [feature_type]"
|
|
912
|
+
"""
|
|
913
|
+
if "context7" in mcp_servers:
|
|
914
|
+
mcp_section += """
|
|
915
|
+
2. **mcp__context7__get-library-docs** - Verify testing framework usage
|
|
916
|
+
- Ensure correct test syntax for language/framework
|
|
917
|
+
"""
|
|
918
|
+
|
|
919
|
+
return f"""---
|
|
920
|
+
name: test-generator
|
|
921
|
+
description: Generates comprehensive test suites for Actor output
|
|
922
|
+
tools: Read, Write, Edit, Bash, Grep, Glob
|
|
923
|
+
model: sonnet
|
|
924
|
+
---
|
|
925
|
+
|
|
926
|
+
# IDENTITY
|
|
927
|
+
|
|
928
|
+
You are a test automation specialist who creates comprehensive, maintainable test suites.
|
|
929
|
+
{mcp_section}
|
|
930
|
+
# ROLE
|
|
931
|
+
|
|
932
|
+
Generate tests for Actor implementations covering:
|
|
933
|
+
- Unit tests (individual functions)
|
|
934
|
+
- Integration tests (component interactions)
|
|
935
|
+
- Edge cases and error handling
|
|
936
|
+
- Security-critical paths (100% coverage required)
|
|
937
|
+
|
|
938
|
+
## Test Strategy
|
|
939
|
+
|
|
940
|
+
1. **AAA Pattern**: Arrange, Act, Assert
|
|
941
|
+
2. **Coverage Targets**:
|
|
942
|
+
- Critical code: 100%
|
|
943
|
+
- High priority: 90%
|
|
944
|
+
- Medium priority: 80%
|
|
945
|
+
3. **Edge Cases**: Empty inputs, null values, boundaries
|
|
946
|
+
|
|
947
|
+
## Output Format (JSON)
|
|
948
|
+
|
|
949
|
+
Return JSON with:
|
|
950
|
+
- approach: Test strategy for this code
|
|
951
|
+
- test_files: Array of {{file_path, content, test_type}}
|
|
952
|
+
- coverage_analysis: Expected coverage percentage
|
|
953
|
+
- testing_notes: Special considerations
|
|
954
|
+
"""
|
|
955
|
+
|
|
956
|
+
|
|
957
|
+
def create_documentation_reviewer_content(mcp_servers: List[str]) -> str:
|
|
958
|
+
"""Create documentation-reviewer agent content"""
|
|
959
|
+
mcp_section = ""
|
|
960
|
+
if any(s in mcp_servers for s in ["cipher", "context7", "deepwiki"]):
|
|
961
|
+
mcp_section = """
|
|
962
|
+
# MCP INTEGRATION
|
|
963
|
+
|
|
964
|
+
**ALWAYS use these tools for documentation review:**
|
|
965
|
+
"""
|
|
966
|
+
if "cipher" in mcp_servers:
|
|
967
|
+
mcp_section += """
|
|
968
|
+
1. **mcp__cipher__cipher_memory_search** - Check for known patterns
|
|
969
|
+
- Query: "external dependency detection [technology]"
|
|
970
|
+
- Query: "CRD installation pattern [project]"
|
|
971
|
+
"""
|
|
972
|
+
if "context7" in mcp_servers:
|
|
973
|
+
mcp_section += """
|
|
974
|
+
2. **mcp__context7__get-library-docs** - Verify library requirements
|
|
975
|
+
- Check official docs for installation requirements
|
|
976
|
+
- Validate version compatibility
|
|
977
|
+
"""
|
|
978
|
+
if "deepwiki" in mcp_servers:
|
|
979
|
+
mcp_section += """
|
|
980
|
+
3. **mcp__deepwiki__ask_question** - Compare with similar projects
|
|
981
|
+
- Ask: "How do other projects handle [integration]?"
|
|
982
|
+
- Learn from successful implementations
|
|
983
|
+
"""
|
|
984
|
+
|
|
985
|
+
return f"""---
|
|
986
|
+
name: documentation-reviewer
|
|
987
|
+
description: Reviews technical documentation for completeness, external dependencies, and architectural consistency
|
|
988
|
+
tools: Read, Grep, Glob, Fetch
|
|
989
|
+
model: sonnet
|
|
990
|
+
---
|
|
991
|
+
|
|
992
|
+
# IDENTITY
|
|
993
|
+
|
|
994
|
+
You are a technical documentation expert specialized in architecture reviews and dependency analysis.
|
|
995
|
+
{mcp_section}
|
|
996
|
+
# REVIEW CHECKLIST
|
|
997
|
+
|
|
998
|
+
## 1. EXTERNAL DEPENDENCIES SCAN
|
|
999
|
+
- Extract all URLs via pattern matching
|
|
1000
|
+
- Use Fetch tool (10s timeout) to verify each URL
|
|
1001
|
+
- Check for CRDs, Helm charts, installation instructions
|
|
1002
|
+
- Determine installation responsibility
|
|
1003
|
+
- Verify documentation completeness
|
|
1004
|
+
|
|
1005
|
+
## 2. CRD DETECTION LOGIC
|
|
1006
|
+
Look for:
|
|
1007
|
+
- YAML with apiVersion: apiextensions.k8s.io/v1
|
|
1008
|
+
- kind: CustomResourceDefinition
|
|
1009
|
+
- Mentions of "custom resource"
|
|
1010
|
+
- Controller/operator projects
|
|
1011
|
+
|
|
1012
|
+
## 3. CONSISTENCY WITH SOURCE OF TRUTH (CRITICAL)
|
|
1013
|
+
|
|
1014
|
+
**ALWAYS verify decomposition documents against tech-design/architecture:**
|
|
1015
|
+
|
|
1016
|
+
### Source of Truth Discovery
|
|
1017
|
+
- Find source documents via Glob: **/tech-design.md, **/architecture.md, **/design-doc.md
|
|
1018
|
+
- Look in parent directories: docs/, docs/private/, project root
|
|
1019
|
+
- Read source documents FIRST before reviewing decomposition
|
|
1020
|
+
- Extract key concepts: API structures, lifecycle states, component responsibilities, integration patterns
|
|
1021
|
+
|
|
1022
|
+
### Consistency Validation
|
|
1023
|
+
For each section in target document, verify against source:
|
|
1024
|
+
- API fields match exactly (all spec and status fields present, types consistent)
|
|
1025
|
+
* Example: engines: {{}} (empty map) vs engines.kyverno.presets: [] (empty array) - different semantics!
|
|
1026
|
+
- Lifecycle logic matches (installation/uninstallation triggers same as in source)
|
|
1027
|
+
* Check: Does enabled: false delete all? Does engines: {{}} delete ClusterPolicySet only?
|
|
1028
|
+
- Component responsibilities match (who installs what, who owns CRDs, who triggers actions)
|
|
1029
|
+
- Integration patterns match (data flow direction, adapter requirements, API versions)
|
|
1030
|
+
|
|
1031
|
+
### Red Flags (Auto-fail if found)
|
|
1032
|
+
❌ Critical inconsistencies:
|
|
1033
|
+
- Target document contradicts source on lifecycle logic
|
|
1034
|
+
- Missing critical spec/status fields from source
|
|
1035
|
+
- Wrong component ownership (e.g., "User installs" when source says "Component Manager installs")
|
|
1036
|
+
- Lifecycle levels confused (e.g., using presets: [] when should be engines: {{}})
|
|
1037
|
+
|
|
1038
|
+
❌ Common mistakes to catch:
|
|
1039
|
+
- Generalizing from DOD scenarios instead of using tech-design definitions
|
|
1040
|
+
- Mixing partial state (presets: [] for one engine) with global state (engines: {{}} for all)
|
|
1041
|
+
- Missing "two-level" patterns (e.g., enabled: false vs engines: {{}})
|
|
1042
|
+
- Not reading tech-design before writing critical sections
|
|
1043
|
+
|
|
1044
|
+
## OUTPUT FORMAT (JSON)
|
|
1045
|
+
|
|
1046
|
+
Return strictly valid JSON with:
|
|
1047
|
+
- valid: boolean
|
|
1048
|
+
- summary: string
|
|
1049
|
+
- external_dependencies_checked: array
|
|
1050
|
+
- missing_requirements: array
|
|
1051
|
+
- consistency_check: object with source_document, sections_verified, overall_consistency
|
|
1052
|
+
- score: number (0-10)
|
|
1053
|
+
- recommendation: "proceed|improve|reconsider"
|
|
1054
|
+
|
|
1055
|
+
# DECISION RULES
|
|
1056
|
+
|
|
1057
|
+
Return valid=false if:
|
|
1058
|
+
- Any critical issues found
|
|
1059
|
+
- External dependencies cannot be verified and are critical
|
|
1060
|
+
- CRD installation completely undefined
|
|
1061
|
+
- **Consistency check fails** (overall_consistency: "inconsistent")
|
|
1062
|
+
- **Source document not read** before reviewing decomposition
|
|
1063
|
+
- **Critical lifecycle logic mismatch** with source
|
|
1064
|
+
|
|
1065
|
+
# CONSTRAINTS
|
|
1066
|
+
|
|
1067
|
+
- Be PROACTIVE: Fetch EVERY external URL (with timeout protection)
|
|
1068
|
+
- Handle errors gracefully: Don't fail on transient network issues
|
|
1069
|
+
- Security conscious: Validate URLs (no private IPs, localhost)
|
|
1070
|
+
- Performance aware: Cache results, parallel fetch up to 5 URLs
|
|
1071
|
+
- Output strictly JSON
|
|
1072
|
+
"""
|
|
1073
|
+
|
|
1074
|
+
|
|
1075
|
+
def create_command_files(project_path: Path) -> None:
|
|
1076
|
+
"""Create MAP slash commands in .claude/commands/"""
|
|
1077
|
+
commands_dir = project_path / ".claude" / "commands"
|
|
1078
|
+
commands_dir.mkdir(parents=True, exist_ok=True)
|
|
1079
|
+
|
|
1080
|
+
# Get templates directory
|
|
1081
|
+
templates_dir = get_templates_dir()
|
|
1082
|
+
commands_template_dir = templates_dir / "commands"
|
|
1083
|
+
|
|
1084
|
+
if not commands_template_dir.exists():
|
|
1085
|
+
# Fallback to inline generation if templates not found
|
|
1086
|
+
commands = {
|
|
1087
|
+
"map-feature": """---
|
|
1088
|
+
description: Implement new feature using full MAP workflow
|
|
1089
|
+
---
|
|
1090
|
+
|
|
1091
|
+
Use the orchestrator agent to implement the following feature:
|
|
1092
|
+
|
|
1093
|
+
$ARGUMENTS
|
|
1094
|
+
|
|
1095
|
+
Start with task decomposition, then iterate through actor-monitor-predictor-evaluator for each subtask.
|
|
1096
|
+
Store successful patterns in knowledge base for future reuse.
|
|
1097
|
+
""",
|
|
1098
|
+
"map-debug": """---
|
|
1099
|
+
description: Debug issue using MAP analysis
|
|
1100
|
+
---
|
|
1101
|
+
|
|
1102
|
+
Use the orchestrator agent to debug the following issue:
|
|
1103
|
+
|
|
1104
|
+
$ARGUMENTS
|
|
1105
|
+
|
|
1106
|
+
Decompose the debugging process, implement fixes, validate with monitor, and assess impact.
|
|
1107
|
+
""",
|
|
1108
|
+
"map-refactor": """---
|
|
1109
|
+
description: Refactor code with MAP impact analysis
|
|
1110
|
+
---
|
|
1111
|
+
|
|
1112
|
+
Use the orchestrator agent to refactor:
|
|
1113
|
+
|
|
1114
|
+
$ARGUMENTS
|
|
1115
|
+
|
|
1116
|
+
Use predictor to analyze all dependencies, actor to refactor, and evaluator to ensure quality.
|
|
1117
|
+
""",
|
|
1118
|
+
"map-review": """---
|
|
1119
|
+
description: Comprehensive MAP review of changes
|
|
1120
|
+
---
|
|
1121
|
+
|
|
1122
|
+
Use monitor, predictor, and evaluator agents to review current changes.
|
|
1123
|
+
|
|
1124
|
+
Provide detailed analysis of code quality, potential impacts, and quality scores.
|
|
1125
|
+
"""
|
|
1126
|
+
}
|
|
1127
|
+
|
|
1128
|
+
for name, content in commands.items():
|
|
1129
|
+
command_file = commands_dir / f"{name}.md"
|
|
1130
|
+
command_file.write_text(content)
|
|
1131
|
+
else:
|
|
1132
|
+
# Copy templates from bundled directory
|
|
1133
|
+
import shutil
|
|
1134
|
+
for command_template in commands_template_dir.glob("*.md"):
|
|
1135
|
+
dest_file = commands_dir / command_template.name
|
|
1136
|
+
shutil.copy2(command_template, dest_file)
|
|
1137
|
+
|
|
1138
|
+
|
|
1139
|
+
def install_hooks(project_path: Path, with_hooks: bool = True) -> int:
|
|
1140
|
+
"""Install Claude Code hooks in .claude/hooks/
|
|
1141
|
+
|
|
1142
|
+
Returns:
|
|
1143
|
+
Number of hook scripts installed
|
|
1144
|
+
"""
|
|
1145
|
+
if not with_hooks:
|
|
1146
|
+
return 0
|
|
1147
|
+
|
|
1148
|
+
hooks_dir = project_path / ".claude" / "hooks"
|
|
1149
|
+
hooks_dir.mkdir(parents=True, exist_ok=True)
|
|
1150
|
+
|
|
1151
|
+
# Get templates directory
|
|
1152
|
+
templates_dir = get_templates_dir()
|
|
1153
|
+
hooks_template_dir = templates_dir / "hooks"
|
|
1154
|
+
|
|
1155
|
+
if not hooks_template_dir.exists():
|
|
1156
|
+
# Hooks templates not found, skip installation
|
|
1157
|
+
return 0
|
|
1158
|
+
|
|
1159
|
+
# Copy all hook scripts
|
|
1160
|
+
import shutil
|
|
1161
|
+
import stat
|
|
1162
|
+
|
|
1163
|
+
hooks_count = 0
|
|
1164
|
+
for hook_file in hooks_template_dir.glob("*.sh"):
|
|
1165
|
+
dest_file = hooks_dir / hook_file.name
|
|
1166
|
+
shutil.copy2(hook_file, dest_file)
|
|
1167
|
+
# Make executable
|
|
1168
|
+
dest_file.chmod(dest_file.stat().st_mode | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH)
|
|
1169
|
+
hooks_count += 1
|
|
1170
|
+
|
|
1171
|
+
# Copy README.md
|
|
1172
|
+
readme_src = hooks_template_dir / "README.md"
|
|
1173
|
+
if readme_src.exists():
|
|
1174
|
+
readme_dest = hooks_dir / "README.md"
|
|
1175
|
+
shutil.copy2(readme_src, readme_dest)
|
|
1176
|
+
|
|
1177
|
+
# Copy settings.hooks.json to .claude/
|
|
1178
|
+
settings_hooks_src = templates_dir / "settings.hooks.json"
|
|
1179
|
+
if settings_hooks_src.exists():
|
|
1180
|
+
settings_hooks_dest = project_path / ".claude" / "settings.hooks.json"
|
|
1181
|
+
shutil.copy2(settings_hooks_src, settings_hooks_dest)
|
|
1182
|
+
|
|
1183
|
+
return hooks_count
|
|
1184
|
+
|
|
1185
|
+
|
|
1186
|
+
def configure_global_permissions() -> None:
|
|
1187
|
+
"""Configure global Claude Code permissions for read-only commands"""
|
|
1188
|
+
claude_dir = Path.home() / ".claude"
|
|
1189
|
+
settings_file = claude_dir / "settings.json"
|
|
1190
|
+
|
|
1191
|
+
# Create .claude directory if it doesn't exist
|
|
1192
|
+
claude_dir.mkdir(exist_ok=True)
|
|
1193
|
+
|
|
1194
|
+
# Default permissions for read-only commands
|
|
1195
|
+
default_permissions = {
|
|
1196
|
+
"allow": [
|
|
1197
|
+
"Bash(git status:*)",
|
|
1198
|
+
"Bash(git log:*)",
|
|
1199
|
+
"Bash(git diff:*)",
|
|
1200
|
+
"Bash(git show:*)",
|
|
1201
|
+
"Bash(git check-ignore:*)",
|
|
1202
|
+
"Bash(git branch --show-current:*)",
|
|
1203
|
+
"Bash(git branch -a:*)",
|
|
1204
|
+
"Bash(git ls-files:*)",
|
|
1205
|
+
"Bash(ls :*)",
|
|
1206
|
+
"Bash(cat :*)",
|
|
1207
|
+
"Bash(head :*)",
|
|
1208
|
+
"Bash(tail :*)",
|
|
1209
|
+
"Bash(wc :*)",
|
|
1210
|
+
"Bash(grep :*)",
|
|
1211
|
+
"Bash(find :*)",
|
|
1212
|
+
"Bash(sort :*)",
|
|
1213
|
+
"Bash(uniq :*)",
|
|
1214
|
+
"Bash(jq :*)",
|
|
1215
|
+
"Bash(which :*)",
|
|
1216
|
+
"Bash(echo :*)",
|
|
1217
|
+
"Bash(pwd:*)",
|
|
1218
|
+
"Bash(whoami:*)",
|
|
1219
|
+
"Bash(python:* -m mapify_cli.recitation_manager:*)",
|
|
1220
|
+
"Bash(ruby -c :*)",
|
|
1221
|
+
"Bash(go fmt /tmp/:*)",
|
|
1222
|
+
"Bash(gofmt -l :*)",
|
|
1223
|
+
"Bash(gofmt -d :*)",
|
|
1224
|
+
"Bash(go vet :*)",
|
|
1225
|
+
"Bash(go build:*)",
|
|
1226
|
+
"Bash(go test -c:*)",
|
|
1227
|
+
"Bash(go mod download:*)",
|
|
1228
|
+
"Bash(go mod tidy:*)",
|
|
1229
|
+
"Bash(chmod +x:*)",
|
|
1230
|
+
"Read(//Users/**)",
|
|
1231
|
+
"Read(//private/tmp/**)",
|
|
1232
|
+
"Glob(**)"
|
|
1233
|
+
],
|
|
1234
|
+
"deny": []
|
|
1235
|
+
}
|
|
1236
|
+
|
|
1237
|
+
# Read existing settings or create new
|
|
1238
|
+
if settings_file.exists():
|
|
1239
|
+
try:
|
|
1240
|
+
with open(settings_file, 'r') as f:
|
|
1241
|
+
settings = json.load(f)
|
|
1242
|
+
except json.JSONDecodeError:
|
|
1243
|
+
console.print("[yellow]Warning:[/yellow] Corrupted settings.json, will recreate")
|
|
1244
|
+
settings = {}
|
|
1245
|
+
else:
|
|
1246
|
+
settings = {}
|
|
1247
|
+
|
|
1248
|
+
# Merge permissions (preserve user's custom permissions)
|
|
1249
|
+
if "permissions" not in settings:
|
|
1250
|
+
settings["permissions"] = default_permissions
|
|
1251
|
+
else:
|
|
1252
|
+
# Add new permissions if they don't exist
|
|
1253
|
+
existing_allow = set(settings["permissions"].get("allow", []))
|
|
1254
|
+
for perm in default_permissions["allow"]:
|
|
1255
|
+
if perm not in existing_allow:
|
|
1256
|
+
settings["permissions"].setdefault("allow", []).append(perm)
|
|
1257
|
+
|
|
1258
|
+
# Write back
|
|
1259
|
+
with open(settings_file, 'w') as f:
|
|
1260
|
+
json.dump(settings, f, indent=2)
|
|
1261
|
+
|
|
1262
|
+
console.print(f"[green]✓[/green] Configured global permissions in {settings_file}")
|
|
1263
|
+
console.print(f"[dim] Added {len(default_permissions['allow'])} read-only command patterns[/dim]")
|
|
1264
|
+
|
|
1265
|
+
|
|
1266
|
+
def create_mcp_config(project_path: Path, mcp_servers: List[str]) -> None:
|
|
1267
|
+
"""Create MCP configuration file"""
|
|
1268
|
+
config: Dict[str, Any] = {
|
|
1269
|
+
"mcp_servers": {},
|
|
1270
|
+
"agent_mcp_mappings": {
|
|
1271
|
+
"task-decomposer": [],
|
|
1272
|
+
"actor": [],
|
|
1273
|
+
"monitor": [],
|
|
1274
|
+
"predictor": [],
|
|
1275
|
+
"evaluator": [],
|
|
1276
|
+
"orchestrator": [],
|
|
1277
|
+
"reflector": [],
|
|
1278
|
+
"curator": [],
|
|
1279
|
+
"documentation-reviewer": [],
|
|
1280
|
+
"test-generator": []
|
|
1281
|
+
},
|
|
1282
|
+
"workflow_settings": {
|
|
1283
|
+
"always_retrieve_knowledge": True,
|
|
1284
|
+
"store_successful_patterns": True,
|
|
1285
|
+
"use_professional_review": True,
|
|
1286
|
+
"enable_sequential_thinking": True,
|
|
1287
|
+
"knowledge_cache_ttl": 3600
|
|
1288
|
+
}
|
|
1289
|
+
}
|
|
1290
|
+
|
|
1291
|
+
# Add server configurations
|
|
1292
|
+
server_configs = {
|
|
1293
|
+
"claude-reviewer": {
|
|
1294
|
+
"enabled": True,
|
|
1295
|
+
"description": "Professional AI code review",
|
|
1296
|
+
"config": {
|
|
1297
|
+
"auto_review": True,
|
|
1298
|
+
"focus_areas": ["security", "performance", "testing"],
|
|
1299
|
+
"severity_threshold": "medium"
|
|
1300
|
+
}
|
|
1301
|
+
},
|
|
1302
|
+
"sequential-thinking": {
|
|
1303
|
+
"enabled": True,
|
|
1304
|
+
"description": "Chain-of-thought reasoning",
|
|
1305
|
+
"config": {
|
|
1306
|
+
"max_thoughts": 10,
|
|
1307
|
+
"branch_exploration": True,
|
|
1308
|
+
"hypothesis_verification": True
|
|
1309
|
+
}
|
|
1310
|
+
},
|
|
1311
|
+
"cipher": {
|
|
1312
|
+
"enabled": True,
|
|
1313
|
+
"description": "Knowledge management system",
|
|
1314
|
+
"config": {
|
|
1315
|
+
"auto_store": True,
|
|
1316
|
+
"retrieval_limit": 5,
|
|
1317
|
+
"conflict_resolution": "manual"
|
|
1318
|
+
}
|
|
1319
|
+
},
|
|
1320
|
+
"codex-bridge": {
|
|
1321
|
+
"enabled": True,
|
|
1322
|
+
"description": "AI code generation",
|
|
1323
|
+
"config": {
|
|
1324
|
+
"format": "json",
|
|
1325
|
+
"timeout": 600, # 10 minutes required for complex operations
|
|
1326
|
+
"batch_size": 5
|
|
1327
|
+
}
|
|
1328
|
+
},
|
|
1329
|
+
"context7": {
|
|
1330
|
+
"enabled": True,
|
|
1331
|
+
"description": "Up-to-date library documentation",
|
|
1332
|
+
"config": {
|
|
1333
|
+
"tokens": 5000,
|
|
1334
|
+
"auto_resolve": True,
|
|
1335
|
+
"cache_duration": 3600
|
|
1336
|
+
}
|
|
1337
|
+
},
|
|
1338
|
+
"deepwiki": {
|
|
1339
|
+
"enabled": True,
|
|
1340
|
+
"description": "GitHub repository intelligence",
|
|
1341
|
+
"config": {
|
|
1342
|
+
"auto_structure": True,
|
|
1343
|
+
"max_depth": 3,
|
|
1344
|
+
"cache_repos": True
|
|
1345
|
+
}
|
|
1346
|
+
}
|
|
1347
|
+
}
|
|
1348
|
+
|
|
1349
|
+
# Add selected servers
|
|
1350
|
+
for server in mcp_servers:
|
|
1351
|
+
if server in server_configs:
|
|
1352
|
+
config["mcp_servers"][server] = server_configs[server]
|
|
1353
|
+
|
|
1354
|
+
# Update agent mappings based on selected servers
|
|
1355
|
+
if "cipher" in mcp_servers:
|
|
1356
|
+
for agent in config["agent_mcp_mappings"]:
|
|
1357
|
+
config["agent_mcp_mappings"][agent].append("cipher")
|
|
1358
|
+
|
|
1359
|
+
if "sequential-thinking" in mcp_servers:
|
|
1360
|
+
for agent in ["task-decomposer", "monitor", "evaluator", "orchestrator", "reflector"]:
|
|
1361
|
+
if agent in config["agent_mcp_mappings"]:
|
|
1362
|
+
config["agent_mcp_mappings"][agent].append("sequential-thinking")
|
|
1363
|
+
|
|
1364
|
+
if "claude-reviewer" in mcp_servers:
|
|
1365
|
+
for agent in ["monitor", "evaluator", "orchestrator"]:
|
|
1366
|
+
if agent in config["agent_mcp_mappings"]:
|
|
1367
|
+
config["agent_mcp_mappings"][agent].append("claude-reviewer")
|
|
1368
|
+
|
|
1369
|
+
if "codex-bridge" in mcp_servers:
|
|
1370
|
+
for agent in ["actor", "predictor", "test-generator"]:
|
|
1371
|
+
if agent in config["agent_mcp_mappings"]:
|
|
1372
|
+
config["agent_mcp_mappings"][agent].append("codex-bridge")
|
|
1373
|
+
|
|
1374
|
+
if "context7" in mcp_servers:
|
|
1375
|
+
for agent in config["agent_mcp_mappings"]:
|
|
1376
|
+
config["agent_mcp_mappings"][agent].append("context7")
|
|
1377
|
+
|
|
1378
|
+
if "deepwiki" in mcp_servers:
|
|
1379
|
+
for agent in config["agent_mcp_mappings"]:
|
|
1380
|
+
config["agent_mcp_mappings"][agent].append("deepwiki")
|
|
1381
|
+
|
|
1382
|
+
# Write config file
|
|
1383
|
+
config_file = project_path / ".claude" / "mcp_config.json"
|
|
1384
|
+
config_file.parent.mkdir(parents=True, exist_ok=True)
|
|
1385
|
+
config_file.write_text(json.dumps(config, indent=2))
|
|
1386
|
+
|
|
1387
|
+
|
|
1388
|
+
def init_git_repo(project_path: Path, quiet: bool = False) -> bool:
|
|
1389
|
+
"""Initialize a git repository"""
|
|
1390
|
+
try:
|
|
1391
|
+
original_cwd = Path.cwd()
|
|
1392
|
+
os.chdir(project_path)
|
|
1393
|
+
if not quiet:
|
|
1394
|
+
console.print("[cyan]Initializing git repository...[/cyan]")
|
|
1395
|
+
|
|
1396
|
+
# Initialize repository
|
|
1397
|
+
subprocess.run(["git", "init"], check=True, capture_output=True)
|
|
1398
|
+
|
|
1399
|
+
# Check if user has configured git identity
|
|
1400
|
+
try:
|
|
1401
|
+
user_email = subprocess.run(
|
|
1402
|
+
["git", "config", "user.email"],
|
|
1403
|
+
capture_output=True,
|
|
1404
|
+
text=True,
|
|
1405
|
+
check=False
|
|
1406
|
+
).stdout.strip()
|
|
1407
|
+
|
|
1408
|
+
user_name = subprocess.run(
|
|
1409
|
+
["git", "config", "user.name"],
|
|
1410
|
+
capture_output=True,
|
|
1411
|
+
text=True,
|
|
1412
|
+
check=False
|
|
1413
|
+
).stdout.strip()
|
|
1414
|
+
|
|
1415
|
+
if not user_email or not user_name:
|
|
1416
|
+
if not quiet:
|
|
1417
|
+
console.print("[yellow]Git identity not configured.[/yellow]")
|
|
1418
|
+
console.print("Setting temporary git identity for initial commit...")
|
|
1419
|
+
|
|
1420
|
+
# Set temporary identity for this repository only
|
|
1421
|
+
subprocess.run(
|
|
1422
|
+
["git", "config", "--local", "user.email", "map-framework@example.com"],
|
|
1423
|
+
check=True,
|
|
1424
|
+
capture_output=True
|
|
1425
|
+
)
|
|
1426
|
+
subprocess.run(
|
|
1427
|
+
["git", "config", "--local", "user.name", "MAP Framework"],
|
|
1428
|
+
check=True,
|
|
1429
|
+
capture_output=True
|
|
1430
|
+
)
|
|
1431
|
+
|
|
1432
|
+
if not quiet:
|
|
1433
|
+
console.print("[yellow]Note: Please configure your git identity with:[/yellow]")
|
|
1434
|
+
console.print(" git config --global user.email 'your.email@example.com'")
|
|
1435
|
+
console.print(" git config --global user.name 'Your Name'")
|
|
1436
|
+
except subprocess.CalledProcessError:
|
|
1437
|
+
# If we can't check config, set temporary values
|
|
1438
|
+
subprocess.run(
|
|
1439
|
+
["git", "config", "--local", "user.email", "map-framework@example.com"],
|
|
1440
|
+
check=False,
|
|
1441
|
+
capture_output=True
|
|
1442
|
+
)
|
|
1443
|
+
subprocess.run(
|
|
1444
|
+
["git", "config", "--local", "user.name", "MAP Framework"],
|
|
1445
|
+
check=False,
|
|
1446
|
+
capture_output=True
|
|
1447
|
+
)
|
|
1448
|
+
|
|
1449
|
+
# Add files and create initial commit
|
|
1450
|
+
subprocess.run(["git", "add", "."], check=True, capture_output=True)
|
|
1451
|
+
|
|
1452
|
+
# Try to commit
|
|
1453
|
+
result = subprocess.run(
|
|
1454
|
+
["git", "commit", "-m", "Initial commit from MAP Framework"],
|
|
1455
|
+
capture_output=True,
|
|
1456
|
+
text=True
|
|
1457
|
+
)
|
|
1458
|
+
|
|
1459
|
+
if result.returncode != 0:
|
|
1460
|
+
# Check if it's because there are no changes (all files might be ignored)
|
|
1461
|
+
if "nothing to commit" in result.stdout or "nothing to commit" in result.stderr:
|
|
1462
|
+
if not quiet:
|
|
1463
|
+
console.print("[yellow]⚠[/yellow] No files to commit (check .gitignore)")
|
|
1464
|
+
return True
|
|
1465
|
+
else:
|
|
1466
|
+
raise subprocess.CalledProcessError(result.returncode, result.args, result.stdout, result.stderr)
|
|
1467
|
+
|
|
1468
|
+
if not quiet:
|
|
1469
|
+
console.print("[green]✓[/green] Git repository initialized")
|
|
1470
|
+
return True
|
|
1471
|
+
except subprocess.CalledProcessError as e:
|
|
1472
|
+
if not quiet:
|
|
1473
|
+
error_msg = str(e)
|
|
1474
|
+
if hasattr(e, 'stderr') and e.stderr:
|
|
1475
|
+
error_msg = e.stderr
|
|
1476
|
+
console.print(f"[red]Error initializing git repository:[/red] {error_msg}")
|
|
1477
|
+
console.print("[yellow]Tip: You can skip git initialization with --no-git[/yellow]")
|
|
1478
|
+
return False
|
|
1479
|
+
except FileNotFoundError:
|
|
1480
|
+
if not quiet:
|
|
1481
|
+
console.print("[red]Git is not installed or not in PATH.[/red]")
|
|
1482
|
+
console.print("[yellow]Please install git or use --no-git to skip repository initialization[/yellow]")
|
|
1483
|
+
return False
|
|
1484
|
+
finally:
|
|
1485
|
+
os.chdir(original_cwd)
|
|
1486
|
+
|
|
1487
|
+
|
|
1488
|
+
def is_git_repo(path: Optional[Path] = None) -> bool:
|
|
1489
|
+
"""Check if the specified path is inside a git repository"""
|
|
1490
|
+
if path is None:
|
|
1491
|
+
path = Path.cwd()
|
|
1492
|
+
|
|
1493
|
+
try:
|
|
1494
|
+
subprocess.run(
|
|
1495
|
+
["git", "rev-parse", "--is-inside-work-tree"],
|
|
1496
|
+
check=True,
|
|
1497
|
+
capture_output=True,
|
|
1498
|
+
cwd=path,
|
|
1499
|
+
)
|
|
1500
|
+
return True
|
|
1501
|
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
1502
|
+
return False
|
|
1503
|
+
|
|
1504
|
+
|
|
1505
|
+
def is_command(cmd_list: List[str]) -> bool:
|
|
1506
|
+
"""Check if a command exists on the system."""
|
|
1507
|
+
if not cmd_list:
|
|
1508
|
+
return False
|
|
1509
|
+
try:
|
|
1510
|
+
subprocess.run(
|
|
1511
|
+
["which", cmd_list[0]],
|
|
1512
|
+
check=True,
|
|
1513
|
+
capture_output=True
|
|
1514
|
+
)
|
|
1515
|
+
return True
|
|
1516
|
+
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
1517
|
+
return False
|
|
1518
|
+
|
|
1519
|
+
|
|
1520
|
+
def get_latest_release(owner: str, repo: str) -> Optional[Dict[str, Any]]:
|
|
1521
|
+
"""Get the latest release from GitHub."""
|
|
1522
|
+
try:
|
|
1523
|
+
url = f"https://api.github.com/repos/{owner}/{repo}/releases/latest"
|
|
1524
|
+
with httpx.Client(verify=create_ssl_context()) as client:
|
|
1525
|
+
response = client.get(url)
|
|
1526
|
+
if response.status_code == 200:
|
|
1527
|
+
return response.json()
|
|
1528
|
+
except Exception:
|
|
1529
|
+
pass
|
|
1530
|
+
return None
|
|
1531
|
+
|
|
1532
|
+
|
|
1533
|
+
def create_commands_dir(project_path: Path) -> None:
|
|
1534
|
+
"""Create commands directory with README."""
|
|
1535
|
+
commands_dir = project_path / ".claude" / "commands"
|
|
1536
|
+
commands_dir.mkdir(parents=True, exist_ok=True)
|
|
1537
|
+
|
|
1538
|
+
readme = commands_dir / "README.md"
|
|
1539
|
+
readme.write_text("""# Claude Code Commands
|
|
1540
|
+
|
|
1541
|
+
This directory contains custom slash commands for Claude Code.
|
|
1542
|
+
|
|
1543
|
+
## Available Commands
|
|
1544
|
+
|
|
1545
|
+
- `/map-review` - Comprehensive review of changes using MAP framework
|
|
1546
|
+
- `/map-refactor` - Refactor code with MAP impact analysis
|
|
1547
|
+
- `/map-debug` - Debug issues using MAP analysis
|
|
1548
|
+
- `/map-feature` - Implement new features using full MAP workflow
|
|
1549
|
+
|
|
1550
|
+
## Creating Custom Commands
|
|
1551
|
+
|
|
1552
|
+
Create a new `.md` file in this directory with the following format:
|
|
1553
|
+
|
|
1554
|
+
```markdown
|
|
1555
|
+
---
|
|
1556
|
+
description: Brief description of your command
|
|
1557
|
+
---
|
|
1558
|
+
|
|
1559
|
+
Your command prompt here
|
|
1560
|
+
```
|
|
1561
|
+
|
|
1562
|
+
The filename becomes the command name (without the `.md` extension).
|
|
1563
|
+
""")
|
|
1564
|
+
|
|
1565
|
+
|
|
1566
|
+
@app.command()
|
|
1567
|
+
def init(
|
|
1568
|
+
project_name: Optional[str] = typer.Argument(None, help="Name for your new project directory (use '.' for current directory)"),
|
|
1569
|
+
mcp: Optional[str] = typer.Option(None, "--mcp", help="MCP servers to enable: all, essential, docs, none, or comma-separated list"),
|
|
1570
|
+
no_git: bool = typer.Option(False, "--no-git", help="Skip git repository initialization"),
|
|
1571
|
+
force: bool = typer.Option(False, "--force", help="Force merge/overwrite when using '.' in non-empty directory"),
|
|
1572
|
+
with_hooks: bool = typer.Option(True, "--with-hooks/--no-hooks", help="Install Claude Code hooks (default: yes)"),
|
|
1573
|
+
debug: bool = typer.Option(False, "--debug", help="Enable debug logging (creates .map/logs/workflow_*.log)"),
|
|
1574
|
+
):
|
|
1575
|
+
"""
|
|
1576
|
+
Initialize a new MAP Framework project.
|
|
1577
|
+
|
|
1578
|
+
This command will:
|
|
1579
|
+
1. Check that required tools are installed
|
|
1580
|
+
2. Create MCP configuration files
|
|
1581
|
+
3. Create MAP agents and commands
|
|
1582
|
+
4. Initialize a git repository (optional)
|
|
1583
|
+
|
|
1584
|
+
Examples:
|
|
1585
|
+
mapify init my-project
|
|
1586
|
+
mapify init my-project --mcp all
|
|
1587
|
+
mapify init my-project --mcp "cipher,context7"
|
|
1588
|
+
mapify init .
|
|
1589
|
+
mapify init . --force # Force init in non-empty current directory
|
|
1590
|
+
mapify init --debug # Enable workflow logging
|
|
1591
|
+
"""
|
|
1592
|
+
# Show banner
|
|
1593
|
+
show_banner()
|
|
1594
|
+
|
|
1595
|
+
# Initialize workflow logger if debug mode is enabled
|
|
1596
|
+
workflow_logger = None
|
|
1597
|
+
if is_debug_enabled(debug):
|
|
1598
|
+
from mapify_cli.workflow_logger import MapWorkflowLogger
|
|
1599
|
+
workflow_logger = MapWorkflowLogger(Path.cwd(), enabled=True)
|
|
1600
|
+
log_file = workflow_logger.start_session(task_id=f"mapify_init_{datetime.now().strftime('%Y%m%d_%H%M%S')}")
|
|
1601
|
+
console.print(f"[dim]Debug logging enabled: {log_file}[/dim]")
|
|
1602
|
+
workflow_logger.log_event("command_start", f"mapify init {project_name or '.'}", metadata={"debug": debug, "mcp": mcp})
|
|
1603
|
+
|
|
1604
|
+
# Handle '.' as shorthand for current directory
|
|
1605
|
+
use_current_dir = (project_name == ".")
|
|
1606
|
+
|
|
1607
|
+
if use_current_dir:
|
|
1608
|
+
project_name = None
|
|
1609
|
+
|
|
1610
|
+
# Validate arguments
|
|
1611
|
+
if not use_current_dir and not project_name:
|
|
1612
|
+
console.print("[red]Error:[/red] Must specify either a project name or use '.' for current directory")
|
|
1613
|
+
raise typer.Exit(1)
|
|
1614
|
+
|
|
1615
|
+
# Determine project directory
|
|
1616
|
+
if use_current_dir:
|
|
1617
|
+
project_name = Path.cwd().name
|
|
1618
|
+
project_path = Path.cwd()
|
|
1619
|
+
|
|
1620
|
+
# Check if current directory has any files
|
|
1621
|
+
existing_items = list(project_path.iterdir())
|
|
1622
|
+
if existing_items:
|
|
1623
|
+
console.print(f"[yellow]Warning:[/yellow] Current directory is not empty ({len(existing_items)} items)")
|
|
1624
|
+
if not force:
|
|
1625
|
+
response = typer.confirm("Do you want to continue?")
|
|
1626
|
+
if not response:
|
|
1627
|
+
console.print("[yellow]Operation cancelled[/yellow]")
|
|
1628
|
+
raise typer.Exit(0)
|
|
1629
|
+
else:
|
|
1630
|
+
project_path = Path(project_name).resolve()
|
|
1631
|
+
if project_path.exists():
|
|
1632
|
+
console.print(f"[red]Error:[/red] Directory '{project_name}' already exists")
|
|
1633
|
+
raise typer.Exit(1)
|
|
1634
|
+
project_path.mkdir(parents=True)
|
|
1635
|
+
|
|
1636
|
+
# Setup tracker
|
|
1637
|
+
tracker = StepTracker("Initialize MAP Framework Project")
|
|
1638
|
+
|
|
1639
|
+
# Check tools
|
|
1640
|
+
tracker.add("check-tools", "Check required tools")
|
|
1641
|
+
tracker.start("check-tools")
|
|
1642
|
+
|
|
1643
|
+
git_available = check_tool("git")
|
|
1644
|
+
claude_available = check_tool("claude")
|
|
1645
|
+
|
|
1646
|
+
if claude_available:
|
|
1647
|
+
tracker.complete("check-tools", "git, claude")
|
|
1648
|
+
elif git_available:
|
|
1649
|
+
tracker.complete("check-tools", "git")
|
|
1650
|
+
else:
|
|
1651
|
+
tracker.complete("check-tools", "minimal")
|
|
1652
|
+
|
|
1653
|
+
# Use Claude Code (the only supported AI assistant)
|
|
1654
|
+
tracker.add("ai-select", "Select AI assistant")
|
|
1655
|
+
selected_ai = "claude"
|
|
1656
|
+
tracker.complete("ai-select", selected_ai)
|
|
1657
|
+
|
|
1658
|
+
# Select MCP servers
|
|
1659
|
+
tracker.add("mcp-select", "Select MCP servers")
|
|
1660
|
+
tracker.start("mcp-select")
|
|
1661
|
+
|
|
1662
|
+
selected_mcp_servers = []
|
|
1663
|
+
|
|
1664
|
+
if mcp == "all":
|
|
1665
|
+
selected_mcp_servers = list(INDIVIDUAL_MCP_SERVERS.keys())
|
|
1666
|
+
elif mcp == "essential":
|
|
1667
|
+
selected_mcp_servers = ["cipher", "claude-reviewer", "sequential-thinking"]
|
|
1668
|
+
elif mcp == "docs":
|
|
1669
|
+
selected_mcp_servers = ["context7", "deepwiki"]
|
|
1670
|
+
elif mcp == "none":
|
|
1671
|
+
selected_mcp_servers = []
|
|
1672
|
+
elif mcp:
|
|
1673
|
+
# Parse comma-separated list
|
|
1674
|
+
selected_mcp_servers = [s.strip() for s in mcp.split(",") if s.strip() in INDIVIDUAL_MCP_SERVERS]
|
|
1675
|
+
else:
|
|
1676
|
+
# Interactive selection
|
|
1677
|
+
mcp_choice = select_with_arrows(MCP_SERVER_CHOICES, "Choose MCP configuration:", "essential")
|
|
1678
|
+
|
|
1679
|
+
if mcp_choice == "all":
|
|
1680
|
+
selected_mcp_servers = list(INDIVIDUAL_MCP_SERVERS.keys())
|
|
1681
|
+
elif mcp_choice == "essential":
|
|
1682
|
+
selected_mcp_servers = ["cipher", "claude-reviewer", "sequential-thinking"]
|
|
1683
|
+
elif mcp_choice == "docs":
|
|
1684
|
+
selected_mcp_servers = ["context7", "deepwiki"]
|
|
1685
|
+
elif mcp_choice == "custom":
|
|
1686
|
+
selected_mcp_servers = select_multiple_with_arrows(INDIVIDUAL_MCP_SERVERS, "Select MCP servers:")
|
|
1687
|
+
else:
|
|
1688
|
+
selected_mcp_servers = []
|
|
1689
|
+
|
|
1690
|
+
tracker.complete("mcp-select", f"{len(selected_mcp_servers)} servers")
|
|
1691
|
+
|
|
1692
|
+
# Create MAP files
|
|
1693
|
+
tracker.add("create-agents", "Create MAP agents")
|
|
1694
|
+
tracker.start("create-agents")
|
|
1695
|
+
create_agent_files(project_path, selected_mcp_servers)
|
|
1696
|
+
tracker.complete("create-agents", "9 agents")
|
|
1697
|
+
|
|
1698
|
+
tracker.add("create-commands", "Create slash commands")
|
|
1699
|
+
tracker.start("create-commands")
|
|
1700
|
+
create_command_files(project_path)
|
|
1701
|
+
tracker.complete("create-commands", "4 commands")
|
|
1702
|
+
|
|
1703
|
+
# Install Claude Code hooks
|
|
1704
|
+
if with_hooks:
|
|
1705
|
+
tracker.add("install-hooks", "Install Claude Code hooks")
|
|
1706
|
+
tracker.start("install-hooks")
|
|
1707
|
+
hooks_count = install_hooks(project_path, with_hooks=True)
|
|
1708
|
+
hooks_word = "hook" if hooks_count == 1 else "hooks"
|
|
1709
|
+
tracker.complete("install-hooks", f"{hooks_count} {hooks_word} installed")
|
|
1710
|
+
|
|
1711
|
+
if selected_mcp_servers:
|
|
1712
|
+
tracker.add("mcp-config", "Create MCP config file")
|
|
1713
|
+
tracker.start("mcp-config")
|
|
1714
|
+
create_mcp_config(project_path, selected_mcp_servers)
|
|
1715
|
+
tracker.complete("mcp-config", f"{len(selected_mcp_servers)} servers")
|
|
1716
|
+
|
|
1717
|
+
# Initialize git
|
|
1718
|
+
if not no_git and git_available:
|
|
1719
|
+
tracker.add("git", "Initialize git repository")
|
|
1720
|
+
tracker.start("git")
|
|
1721
|
+
if is_git_repo(project_path):
|
|
1722
|
+
tracker.complete("git", "existing repo")
|
|
1723
|
+
else:
|
|
1724
|
+
if init_git_repo(project_path, quiet=True):
|
|
1725
|
+
tracker.complete("git", "initialized")
|
|
1726
|
+
else:
|
|
1727
|
+
tracker.error("git", "failed")
|
|
1728
|
+
|
|
1729
|
+
tracker.add("finalize", "Finalize")
|
|
1730
|
+
tracker.complete("finalize", "project ready")
|
|
1731
|
+
|
|
1732
|
+
# Configure global permissions for read-only commands
|
|
1733
|
+
console.print() # Add spacing
|
|
1734
|
+
configure_global_permissions()
|
|
1735
|
+
|
|
1736
|
+
# Show final tree
|
|
1737
|
+
with Live(tracker.render(), console=console, transient=True) as live:
|
|
1738
|
+
tracker.attach_refresh(lambda: live.update(tracker.render()))
|
|
1739
|
+
|
|
1740
|
+
console.print(tracker.render())
|
|
1741
|
+
console.print("\n[bold green]✅ Project ready![/bold green]")
|
|
1742
|
+
|
|
1743
|
+
# Next steps
|
|
1744
|
+
steps_lines = []
|
|
1745
|
+
if not use_current_dir:
|
|
1746
|
+
steps_lines.append(f"1. Go to the project folder: [cyan]cd {project_name}[/cyan]")
|
|
1747
|
+
step_num = 2
|
|
1748
|
+
else:
|
|
1749
|
+
steps_lines.append("1. You're already in the project directory!")
|
|
1750
|
+
step_num = 2
|
|
1751
|
+
|
|
1752
|
+
steps_lines.append(f"{step_num}. Start using MAP commands with Claude Code:")
|
|
1753
|
+
steps_lines.append(" • [cyan]/map-feature[/] - Implement new feature with MAP workflow")
|
|
1754
|
+
steps_lines.append(" • [cyan]/map-debug[/] - Debug issue using MAP analysis")
|
|
1755
|
+
steps_lines.append(" • [cyan]/map-refactor[/] - Refactor with impact analysis")
|
|
1756
|
+
steps_lines.append(" • [cyan]/map-review[/] - Full MAP review of changes")
|
|
1757
|
+
steps_lines.append(f"{step_num + 1}. Or use orchestrator directly:")
|
|
1758
|
+
steps_lines.append(' [cyan]"Use orchestrator agent to implement [feature]"[/]')
|
|
1759
|
+
|
|
1760
|
+
steps_panel = Panel("\n".join(steps_lines), title="Next Steps", border_style="cyan", padding=(1, 2))
|
|
1761
|
+
console.print()
|
|
1762
|
+
console.print(steps_panel)
|
|
1763
|
+
|
|
1764
|
+
|
|
1765
|
+
@app.command()
|
|
1766
|
+
def check(
|
|
1767
|
+
debug: bool = typer.Option(False, "--debug", help="Enable debug logging")
|
|
1768
|
+
):
|
|
1769
|
+
"""Check that all required tools are installed."""
|
|
1770
|
+
# Initialize workflow logger if debug mode is enabled
|
|
1771
|
+
if is_debug_enabled(debug):
|
|
1772
|
+
from mapify_cli.workflow_logger import MapWorkflowLogger
|
|
1773
|
+
workflow_logger = MapWorkflowLogger(Path.cwd(), enabled=True)
|
|
1774
|
+
log_file = workflow_logger.start_session(task_id=f"mapify_check_{datetime.now().strftime('%Y%m%d_%H%M%S')}")
|
|
1775
|
+
console.print(f"[dim]Debug logging enabled: {log_file}[/dim]")
|
|
1776
|
+
workflow_logger.log_event("command_start", "mapify check", metadata={"debug": debug})
|
|
1777
|
+
show_banner()
|
|
1778
|
+
console.print("[bold]Checking for installed tools...[/bold]\n")
|
|
1779
|
+
|
|
1780
|
+
tracker = StepTracker("Check Available Tools")
|
|
1781
|
+
|
|
1782
|
+
tools = [
|
|
1783
|
+
("git", "Git version control"),
|
|
1784
|
+
("claude", "Claude Code CLI"),
|
|
1785
|
+
]
|
|
1786
|
+
|
|
1787
|
+
# Add tools to tracker
|
|
1788
|
+
for tool, description in tools:
|
|
1789
|
+
tracker.add(tool, description)
|
|
1790
|
+
|
|
1791
|
+
# Check each tool
|
|
1792
|
+
results = {}
|
|
1793
|
+
for tool, description in tools:
|
|
1794
|
+
if check_tool(tool):
|
|
1795
|
+
tracker.complete(tool, "available")
|
|
1796
|
+
results[tool] = True
|
|
1797
|
+
else:
|
|
1798
|
+
tracker.error(tool, "not found")
|
|
1799
|
+
results[tool] = False
|
|
1800
|
+
|
|
1801
|
+
console.print(tracker.render())
|
|
1802
|
+
console.print()
|
|
1803
|
+
|
|
1804
|
+
if all(results.values()):
|
|
1805
|
+
console.print("[bold green]All tools are installed! MAP Framework is ready to use.[/bold green]")
|
|
1806
|
+
else:
|
|
1807
|
+
console.print("[yellow]Some tools are missing:[/yellow]")
|
|
1808
|
+
if not results.get("git"):
|
|
1809
|
+
console.print(" • Install git: https://git-scm.com/downloads")
|
|
1810
|
+
if not results.get("claude"):
|
|
1811
|
+
console.print(" • Install Claude Code: https://docs.anthropic.com/en/docs/claude-code/setup")
|
|
1812
|
+
|
|
1813
|
+
|
|
1814
|
+
@app.command()
|
|
1815
|
+
def upgrade():
|
|
1816
|
+
"""Upgrade MAP agents to the latest version."""
|
|
1817
|
+
show_banner()
|
|
1818
|
+
console.print("[cyan]Checking for updates...[/cyan]")
|
|
1819
|
+
|
|
1820
|
+
# In a real implementation, this would:
|
|
1821
|
+
# 1. Fetch latest release from GitHub
|
|
1822
|
+
# 2. Compare versions
|
|
1823
|
+
# 3. Update agents if newer version available
|
|
1824
|
+
|
|
1825
|
+
console.print("[yellow]Upgrade feature coming soon![/yellow]")
|
|
1826
|
+
console.print("For now, run: [cyan]mapify init . --force[/cyan] to update agents")
|
|
1827
|
+
|
|
1828
|
+
|
|
1829
|
+
# Recitation commands
|
|
1830
|
+
|
|
1831
|
+
@recitation_app.command("create")
|
|
1832
|
+
def recitation_create(
|
|
1833
|
+
task_id: str,
|
|
1834
|
+
goal: str,
|
|
1835
|
+
subtasks_json: str,
|
|
1836
|
+
force: bool = typer.Option(False, "--force", help="Overwrite existing plan")
|
|
1837
|
+
):
|
|
1838
|
+
"""Create a new task execution plan"""
|
|
1839
|
+
from mapify_cli.recitation_manager import RecitationManager
|
|
1840
|
+
manager = RecitationManager(Path.cwd())
|
|
1841
|
+
try:
|
|
1842
|
+
subtasks = json.loads(subtasks_json)
|
|
1843
|
+
plan = manager.create_plan(task_id, goal, subtasks, force=force)
|
|
1844
|
+
result = {"status": "success", "message": "Plan created", "plan_file": str(manager.plan_file), "subtasks_count": len(plan.subtasks)}
|
|
1845
|
+
console.print_json(data=result)
|
|
1846
|
+
except (ValueError, json.JSONDecodeError) as e:
|
|
1847
|
+
console.print_json(data={"status": "error", "message": str(e)})
|
|
1848
|
+
raise typer.Exit(1)
|
|
1849
|
+
|
|
1850
|
+
@recitation_app.command("update")
|
|
1851
|
+
def recitation_update(
|
|
1852
|
+
subtask_id: int,
|
|
1853
|
+
status: str,
|
|
1854
|
+
error: Optional[str] = typer.Argument(None)
|
|
1855
|
+
):
|
|
1856
|
+
"""Update subtask status"""
|
|
1857
|
+
from mapify_cli.recitation_manager import RecitationManager
|
|
1858
|
+
manager = RecitationManager(Path.cwd())
|
|
1859
|
+
try:
|
|
1860
|
+
plan = manager.update_subtask_status(subtask_id, status, error)
|
|
1861
|
+
result = {"status": "success", "message": f"Subtask {subtask_id} updated to {status}", "current_subtask": plan.current_subtask_id, "updated_at": plan.updated_at}
|
|
1862
|
+
console.print_json(data=result)
|
|
1863
|
+
except Exception as e:
|
|
1864
|
+
console.print_json(data={"status": "error", "message": str(e)})
|
|
1865
|
+
raise typer.Exit(1)
|
|
1866
|
+
|
|
1867
|
+
@recitation_app.command("get-context")
|
|
1868
|
+
def recitation_get_context():
|
|
1869
|
+
"""Get current plan context as markdown"""
|
|
1870
|
+
from mapify_cli.recitation_manager import RecitationManager
|
|
1871
|
+
manager = RecitationManager(Path.cwd())
|
|
1872
|
+
context = manager.get_current_context()
|
|
1873
|
+
if context:
|
|
1874
|
+
console.print(context)
|
|
1875
|
+
else:
|
|
1876
|
+
console.print("# No active plan\n\nNo recitation plan is currently active.")
|
|
1877
|
+
raise typer.Exit(1)
|
|
1878
|
+
|
|
1879
|
+
@recitation_app.command("stats")
|
|
1880
|
+
def recitation_stats():
|
|
1881
|
+
"""Show plan statistics"""
|
|
1882
|
+
from mapify_cli.recitation_manager import RecitationManager
|
|
1883
|
+
manager = RecitationManager(Path.cwd())
|
|
1884
|
+
stats = manager.get_statistics()
|
|
1885
|
+
if stats:
|
|
1886
|
+
console.print_json(data=stats)
|
|
1887
|
+
else:
|
|
1888
|
+
console.print_json(data={"status": "error", "message": "No active plan"})
|
|
1889
|
+
raise typer.Exit(1)
|
|
1890
|
+
|
|
1891
|
+
@recitation_app.command("clear")
|
|
1892
|
+
def recitation_clear():
|
|
1893
|
+
"""Clear active plan"""
|
|
1894
|
+
from mapify_cli.recitation_manager import RecitationManager
|
|
1895
|
+
manager = RecitationManager(Path.cwd())
|
|
1896
|
+
manager.clear_plan()
|
|
1897
|
+
console.print_json(data={"status": "success", "message": "Plan cleared"})
|
|
1898
|
+
|
|
1899
|
+
# Playbook commands
|
|
1900
|
+
|
|
1901
|
+
@playbook_app.command("stats")
|
|
1902
|
+
def playbook_stats():
|
|
1903
|
+
"""Show playbook statistics"""
|
|
1904
|
+
playbook_path = Path.cwd() / ".claude" / "playbook.json"
|
|
1905
|
+
if not playbook_path.exists():
|
|
1906
|
+
console.print_json(data={"error": "Playbook not found"})
|
|
1907
|
+
raise typer.Exit(1)
|
|
1908
|
+
playbook = json.loads(playbook_path.read_text())
|
|
1909
|
+
total = sum(len(section["bullets"]) for section in playbook.get("sections", {}).values())
|
|
1910
|
+
stats = {"total_bullets": total, "sections": len(playbook.get("sections", {})), "metadata": playbook.get("metadata", {})}
|
|
1911
|
+
console.print_json(data=stats)
|
|
1912
|
+
|
|
1913
|
+
@playbook_app.command("search")
|
|
1914
|
+
def playbook_search(query: str, top_k: int = typer.Option(5, help="Number of results")):
|
|
1915
|
+
"""Search playbook for relevant patterns"""
|
|
1916
|
+
from mapify_cli.playbook_manager import PlaybookManager
|
|
1917
|
+
playbook_path = Path.cwd() / ".claude" / "playbook.json"
|
|
1918
|
+
if not playbook_path.exists():
|
|
1919
|
+
console.print("No patterns found (playbook not initialized)")
|
|
1920
|
+
return
|
|
1921
|
+
manager = PlaybookManager(playbook_path)
|
|
1922
|
+
results = manager.get_relevant_bullets(query, limit=top_k)
|
|
1923
|
+
if not results:
|
|
1924
|
+
console.print("No patterns found matching your query")
|
|
1925
|
+
else:
|
|
1926
|
+
console.print_json(data={"query": query, "count": len(results), "results": [{"id": b.get("id"), "content": b.get("content")[:100] + "..."} for b in results]})
|
|
1927
|
+
|
|
1928
|
+
@playbook_app.command("sync")
|
|
1929
|
+
def playbook_sync(threshold: int = typer.Option(5, help="Minimum helpful count")):
|
|
1930
|
+
"""Show high-quality patterns ready for cross-project sync"""
|
|
1931
|
+
from mapify_cli.playbook_manager import PlaybookManager
|
|
1932
|
+
playbook_path = Path.cwd() / ".claude" / "playbook.json"
|
|
1933
|
+
if not playbook_path.exists():
|
|
1934
|
+
console.print_json(data={"status": "error", "message": "Playbook not found"})
|
|
1935
|
+
raise typer.Exit(1)
|
|
1936
|
+
manager = PlaybookManager(playbook_path)
|
|
1937
|
+
patterns = manager.get_bullets_for_sync(threshold=threshold)
|
|
1938
|
+
console.print_json(data={"threshold": threshold, "count": len(patterns), "patterns": [{"id": p.get("id"), "helpful_count": p.get("helpful_count")} for p in patterns]})
|
|
1939
|
+
|
|
1940
|
+
|
|
1941
|
+
def main():
|
|
1942
|
+
app()
|
|
1943
|
+
|
|
1944
|
+
|
|
1945
|
+
if __name__ == "__main__":
|
|
1946
|
+
main()
|