livepilot 1.9.23 → 1.9.24

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/.claude-plugin/marketplace.json +1 -1
  2. package/AGENTS.md +2 -2
  3. package/CHANGELOG.md +46 -0
  4. package/README.md +94 -0
  5. package/livepilot/.Codex-plugin/plugin.json +1 -1
  6. package/livepilot/.claude-plugin/plugin.json +1 -1
  7. package/livepilot/skills/livepilot-core/references/overview.md +1 -1
  8. package/livepilot/skills/livepilot-release/SKILL.md +14 -0
  9. package/livepilot.mcpb +0 -0
  10. package/manifest.json +1 -1
  11. package/mcp_server/__init__.py +1 -1
  12. package/mcp_server/hook_hunter/analyzer.py +23 -0
  13. package/mcp_server/hook_hunter/models.py +1 -0
  14. package/mcp_server/hook_hunter/tools.py +4 -2
  15. package/mcp_server/memory/taste_graph.py +68 -1
  16. package/mcp_server/memory/tools.py +15 -4
  17. package/mcp_server/musical_intelligence/detectors.py +14 -1
  18. package/mcp_server/musical_intelligence/tools.py +11 -8
  19. package/mcp_server/persistence/__init__.py +1 -0
  20. package/mcp_server/persistence/base_store.py +82 -0
  21. package/mcp_server/persistence/project_store.py +106 -0
  22. package/mcp_server/persistence/taste_store.py +122 -0
  23. package/mcp_server/preview_studio/models.py +1 -0
  24. package/mcp_server/preview_studio/tools.py +56 -13
  25. package/mcp_server/runtime/capability.py +66 -0
  26. package/mcp_server/runtime/capability_probe.py +118 -0
  27. package/mcp_server/runtime/execution_router.py +139 -0
  28. package/mcp_server/runtime/remote_commands.py +82 -0
  29. package/mcp_server/semantic_moves/mix_moves.py +41 -41
  30. package/mcp_server/semantic_moves/performance_moves.py +13 -13
  31. package/mcp_server/semantic_moves/sound_design_moves.py +15 -15
  32. package/mcp_server/semantic_moves/tools.py +18 -17
  33. package/mcp_server/semantic_moves/transition_moves.py +16 -16
  34. package/mcp_server/services/__init__.py +1 -0
  35. package/mcp_server/services/motif_service.py +67 -0
  36. package/mcp_server/session_continuity/tracker.py +29 -1
  37. package/mcp_server/song_brain/builder.py +28 -1
  38. package/mcp_server/song_brain/models.py +4 -0
  39. package/mcp_server/song_brain/tools.py +20 -2
  40. package/mcp_server/wonder_mode/tools.py +6 -1
  41. package/package.json +1 -1
  42. package/remote_script/LivePilot/__init__.py +1 -1
  43. package/scripts/sync_metadata.py +132 -0
@@ -0,0 +1,106 @@
1
+ """Per-project persistent state — threads, turns, Wonder outcomes.
2
+
3
+ Stores session continuity data scoped to a project identity.
4
+ Located at ~/.livepilot/projects/<hash>/state.json.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import hashlib
10
+ import time
11
+ from pathlib import Path
12
+ from typing import Optional
13
+
14
+ from .base_store import PersistentJsonStore
15
+
16
+
17
+ _PROJECTS_DIR = Path.home() / ".livepilot" / "projects"
18
+ _MAX_TURNS = 50
19
+ _MAX_WONDER_OUTCOMES = 10
20
+
21
+
22
+ def project_hash(session_info: dict) -> str:
23
+ """Compute a stable project fingerprint from session info.
24
+
25
+ Uses tempo + track count + sorted track names. This is imperfect
26
+ but stable enough for per-song state within a production session.
27
+ """
28
+ tempo = session_info.get("tempo", 120.0)
29
+ tracks = session_info.get("tracks", [])
30
+ track_names = sorted(t.get("name", "") for t in tracks if isinstance(t, dict))
31
+ seed = f"{tempo:.1f}|{len(tracks)}|{'|'.join(track_names)}"
32
+ return hashlib.sha256(seed.encode()).hexdigest()[:12]
33
+
34
+
35
+ class ProjectStore:
36
+ """Persistent per-project state."""
37
+
38
+ def __init__(self, project_id: str, base_dir: Optional[Path] = None):
39
+ base = base_dir or _PROJECTS_DIR
40
+ self._store = PersistentJsonStore(base / project_id / "state.json")
41
+ self._project_id = project_id
42
+
43
+ @property
44
+ def project_id(self) -> str:
45
+ return self._project_id
46
+
47
+ def get_all(self) -> dict:
48
+ data = self._store.read()
49
+ return data if data.get("version") == 1 else self._default()
50
+
51
+ def save_thread(self, thread: dict) -> None:
52
+ """Save or update a creative thread."""
53
+ def _update(data: dict) -> dict:
54
+ data = data if data.get("version") == 1 else self._default()
55
+ threads = data.setdefault("threads", [])
56
+ # Update existing or append
57
+ for i, t in enumerate(threads):
58
+ if t.get("thread_id") == thread.get("thread_id"):
59
+ threads[i] = thread
60
+ return data
61
+ threads.append(thread)
62
+ return data
63
+ self._store.update(_update)
64
+
65
+ def save_turn(self, turn: dict) -> None:
66
+ """Save a turn resolution (capped at MAX_TURNS)."""
67
+ def _update(data: dict) -> dict:
68
+ data = data if data.get("version") == 1 else self._default()
69
+ turns = data.setdefault("turns", [])
70
+ turns.append(turn)
71
+ # Cap at max
72
+ if len(turns) > _MAX_TURNS:
73
+ data["turns"] = turns[-_MAX_TURNS:]
74
+ data["last_updated_ms"] = int(time.time() * 1000)
75
+ return data
76
+ self._store.update(_update)
77
+
78
+ def save_wonder_outcome(self, outcome: dict) -> None:
79
+ """Save a Wonder session outcome (capped at MAX_WONDER_OUTCOMES)."""
80
+ def _update(data: dict) -> dict:
81
+ data = data if data.get("version") == 1 else self._default()
82
+ outcomes = data.setdefault("wonder_outcomes", [])
83
+ outcomes.append(outcome)
84
+ if len(outcomes) > _MAX_WONDER_OUTCOMES:
85
+ data["wonder_outcomes"] = outcomes[-_MAX_WONDER_OUTCOMES:]
86
+ return data
87
+ self._store.update(_update)
88
+
89
+ def get_threads(self) -> list[dict]:
90
+ return self.get_all().get("threads", [])
91
+
92
+ def get_turns(self) -> list[dict]:
93
+ return self.get_all().get("turns", [])
94
+
95
+ def get_wonder_outcomes(self) -> list[dict]:
96
+ return self.get_all().get("wonder_outcomes", [])
97
+
98
+ @staticmethod
99
+ def _default() -> dict:
100
+ return {
101
+ "version": 1,
102
+ "threads": [],
103
+ "turns": [],
104
+ "wonder_outcomes": [],
105
+ "last_updated_ms": 0,
106
+ }
@@ -0,0 +1,122 @@
1
+ """Persistent taste state — survives server restart.
2
+
3
+ Stores move outcomes, novelty preference, device affinity,
4
+ anti-preferences, and dimension weights. Located at
5
+ ~/.livepilot/taste.json.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import time
11
+ from pathlib import Path
12
+ from typing import Optional
13
+
14
+ from .base_store import PersistentJsonStore
15
+
16
+
17
+ _DEFAULT_PATH = Path.home() / ".livepilot" / "taste.json"
18
+
19
+
20
+ class PersistentTasteStore:
21
+ """Persistent backing for TasteGraph data."""
22
+
23
+ def __init__(self, path: Optional[Path] = None):
24
+ self._store = PersistentJsonStore(path or _DEFAULT_PATH)
25
+
26
+ def get_all(self) -> dict:
27
+ """Get all persisted taste data."""
28
+ data = self._store.read()
29
+ return data if data.get("version") == 1 else self._default()
30
+
31
+ def record_move_outcome(
32
+ self, move_id: str, family: str, kept: bool, score: float = 0.0,
33
+ ) -> None:
34
+ """Persist a move outcome."""
35
+ def _update(data: dict) -> dict:
36
+ data = data if data.get("version") == 1 else self._default()
37
+ outcomes = data.setdefault("move_outcomes", {})
38
+ entry = outcomes.setdefault(move_id, {
39
+ "family": family, "kept_count": 0, "undone_count": 0,
40
+ })
41
+ entry["family"] = family
42
+ if kept:
43
+ entry["kept_count"] = entry.get("kept_count", 0) + 1
44
+ else:
45
+ entry["undone_count"] = entry.get("undone_count", 0) + 1
46
+ data["evidence_count"] = data.get("evidence_count", 0) + 1
47
+ data["last_updated_ms"] = int(time.time() * 1000)
48
+ return data
49
+ self._store.update(_update)
50
+
51
+ def update_novelty(self, chose_bold: bool) -> None:
52
+ """Update novelty band from experiment choice."""
53
+ def _update(data: dict) -> dict:
54
+ data = data if data.get("version") == 1 else self._default()
55
+ band = data.get("novelty_band", 0.5)
56
+ if chose_bold:
57
+ data["novelty_band"] = min(1.0, band + 0.05)
58
+ else:
59
+ data["novelty_band"] = max(0.0, band - 0.05)
60
+ data["evidence_count"] = data.get("evidence_count", 0) + 1
61
+ return data
62
+ self._store.update(_update)
63
+
64
+ def record_device_use(self, device_name: str, positive: bool = True) -> None:
65
+ """Persist device affinity."""
66
+ def _update(data: dict) -> dict:
67
+ data = data if data.get("version") == 1 else self._default()
68
+ affinities = data.setdefault("device_affinities", {})
69
+ entry = affinities.setdefault(device_name, {
70
+ "affinity": 0.0, "use_count": 0,
71
+ })
72
+ entry["use_count"] = entry.get("use_count", 0) + 1
73
+ aff = entry.get("affinity", 0.0)
74
+ if positive:
75
+ entry["affinity"] = min(1.0, aff + 0.05)
76
+ else:
77
+ entry["affinity"] = max(-1.0, aff - 0.08)
78
+ data["evidence_count"] = data.get("evidence_count", 0) + 1
79
+ return data
80
+ self._store.update(_update)
81
+
82
+ def record_anti_preference(self, dimension: str, direction: str) -> None:
83
+ """Persist an anti-preference."""
84
+ def _update(data: dict) -> dict:
85
+ data = data if data.get("version") == 1 else self._default()
86
+ antis = data.setdefault("anti_preferences", [])
87
+ existing = next(
88
+ (a for a in antis if a["dimension"] == dimension and a["direction"] == direction),
89
+ None,
90
+ )
91
+ if existing:
92
+ existing["count"] = existing.get("count", 0) + 1
93
+ existing["strength"] = min(1.0, existing["count"] * 0.2)
94
+ else:
95
+ antis.append({
96
+ "dimension": dimension, "direction": direction,
97
+ "count": 1, "strength": 0.2,
98
+ })
99
+ data["evidence_count"] = data.get("evidence_count", 0) + 1
100
+ return data
101
+ self._store.update(_update)
102
+
103
+ def record_dimension_weight(self, dimension: str, value: float) -> None:
104
+ """Persist a dimension weight update."""
105
+ def _update(data: dict) -> dict:
106
+ data = data if data.get("version") == 1 else self._default()
107
+ data.setdefault("dimension_weights", {})[dimension] = round(value, 3)
108
+ return data
109
+ self._store.update(_update)
110
+
111
+ @staticmethod
112
+ def _default() -> dict:
113
+ return {
114
+ "version": 1,
115
+ "move_outcomes": {},
116
+ "novelty_band": 0.5,
117
+ "device_affinities": {},
118
+ "anti_preferences": [],
119
+ "dimension_weights": {},
120
+ "evidence_count": 0,
121
+ "last_updated_ms": 0,
122
+ }
@@ -35,6 +35,7 @@ class PreviewVariant:
35
35
 
36
36
  # State
37
37
  status: str = "pending" # pending, rendered, committed, discarded
38
+ preview_mode: str = "" # audible_preview, metadata_only_preview, analytical_preview
38
39
  created_at_ms: int = 0
39
40
 
40
41
  def to_dict(self) -> dict:
@@ -154,15 +154,20 @@ def create_preview_set(
154
154
  import sys
155
155
  print(f"LivePilot: SongBrain unavailable in preview_studio: {_e}", file=sys.stderr)
156
156
 
157
- # Get taste graph — use session-scoped stores, extract numeric weights
157
+ # Get taste graph — session + persistent stores
158
158
  taste_graph: dict = {}
159
159
  try:
160
160
  from ..memory.taste_graph import build_taste_graph
161
161
  from ..memory.taste_memory import TasteMemoryStore
162
162
  from ..memory.anti_memory import AntiMemoryStore
163
+ from ..persistence.taste_store import PersistentTasteStore
163
164
  taste_store = ctx.lifespan_context.setdefault("taste_memory", TasteMemoryStore())
164
165
  anti_store = ctx.lifespan_context.setdefault("anti_memory", AntiMemoryStore())
165
- graph = build_taste_graph(taste_store=taste_store, anti_store=anti_store)
166
+ persistent = ctx.lifespan_context.setdefault("persistent_taste", PersistentTasteStore())
167
+ graph = build_taste_graph(
168
+ taste_store=taste_store, anti_store=anti_store,
169
+ persistent_store=persistent,
170
+ )
166
171
  taste_graph = graph.to_dict()
167
172
  except Exception:
168
173
  pass
@@ -269,14 +274,19 @@ def commit_preview_variant(
269
274
  except Exception:
270
275
  pass
271
276
 
272
- # Update taste graph
277
+ # Update taste graph (with persistent backing)
273
278
  try:
274
279
  from ..memory.taste_graph import build_taste_graph
275
280
  from ..memory.taste_memory import TasteMemoryStore
276
281
  from ..memory.anti_memory import AntiMemoryStore
282
+ from ..persistence.taste_store import PersistentTasteStore
277
283
  taste_store = ctx.lifespan_context.setdefault("taste_memory", TasteMemoryStore())
278
284
  anti_store = ctx.lifespan_context.setdefault("anti_memory", AntiMemoryStore())
279
- graph = build_taste_graph(taste_store=taste_store, anti_store=anti_store)
285
+ persistent = ctx.lifespan_context.setdefault("persistent_taste", PersistentTasteStore())
286
+ graph = build_taste_graph(
287
+ taste_store=taste_store, anti_store=anti_store,
288
+ persistent_store=persistent,
289
+ )
280
290
  # Look up family from WonderSession's variant list
281
291
  family = ""
282
292
  for v in ws.variants:
@@ -349,18 +359,17 @@ def render_preview_variant(
349
359
  # compiled_plan may be a list (from semantic moves) or a dict with "steps" key
350
360
  plan = variant.compiled_plan
351
361
  steps = plan if isinstance(plan, list) else plan.get("steps", [])
362
+
363
+ from ..runtime.execution_router import execute_plan_steps
364
+
352
365
  applied_count = 0
353
366
  try:
354
367
  # Capture before state
355
368
  before_info = ableton.send_command("get_session_info", {})
356
369
 
357
- # Apply the plan steps, tracking how many succeed
358
- for step in steps:
359
- cmd = step.get("tool") or step.get("command")
360
- args = step.get("params") or step.get("args", {})
361
- if cmd:
362
- ableton.send_command(cmd, args)
363
- applied_count += 1
370
+ # Execute through unified router
371
+ exec_results = execute_plan_steps(steps, ableton=ableton, ctx=ctx)
372
+ applied_count = sum(1 for r in exec_results if r.ok)
364
373
 
365
374
  # Capture after state
366
375
  after_info = ableton.send_command("get_session_info", {})
@@ -374,29 +383,63 @@ def render_preview_variant(
374
383
  except Exception:
375
384
  break
376
385
 
386
+ # Determine preview mode: audible (M4L available) or metadata-only
387
+ preview_mode = "metadata_only_preview"
388
+ spectral_before = None
389
+ spectral_after = None
390
+
391
+ # Try audible preview — capture spectrum via M4L spectral cache
392
+ try:
393
+ from ..m4l_bridge import SpectralCache
394
+ cache = ctx.lifespan_context.get("spectral_cache")
395
+ if cache and isinstance(cache, SpectralCache) and cache.has_data():
396
+ spectral_before = cache.get_snapshot()
397
+ # Play for the requested bar count
398
+ tempo = before_info.get("tempo", 120)
399
+ play_seconds = bars * (60.0 / tempo) * 4 # bars * beat_duration * 4 beats
400
+ ableton.send_command("start_playback", {})
401
+ import time as _time
402
+ _time.sleep(min(play_seconds, 8.0)) # cap at 8 seconds
403
+ spectral_after = cache.get_snapshot()
404
+ ableton.send_command("stop_playback", {})
405
+ preview_mode = "audible_preview"
406
+ except Exception:
407
+ pass # fall back to metadata_only
408
+
377
409
  variant.status = "rendered"
410
+ variant.preview_mode = preview_mode
378
411
  variant.render_ref = f"render_{variant_id}_{bars}bars"
379
412
 
380
- return {
413
+ result = {
381
414
  "rendered": True,
382
415
  "variant_id": variant_id,
383
416
  "label": variant.label,
384
417
  "bars": bars,
418
+ "preview_mode": preview_mode,
385
419
  "before_summary": {"tempo": before_info.get("tempo"), "tracks": before_info.get("track_count")},
386
420
  "after_summary": {"tempo": after_info.get("tempo"), "tracks": after_info.get("track_count")},
387
421
  "identity_effect": variant.identity_effect,
388
422
  "what_changed": variant.what_changed,
389
423
  "what_preserved": variant.what_preserved,
390
424
  }
425
+
426
+ if spectral_before and spectral_after:
427
+ result["spectral_comparison"] = {
428
+ "before": spectral_before,
429
+ "after": spectral_after,
430
+ }
431
+
432
+ return result
391
433
  else:
392
434
  # Analytical preview — no live render
393
435
  variant.status = "rendered"
436
+ variant.preview_mode = "analytical_preview"
394
437
  return {
395
438
  "rendered": True,
396
439
  "variant_id": variant_id,
397
440
  "label": variant.label,
398
441
  "bars": bars,
399
- "mode": "analytical",
442
+ "preview_mode": "analytical_preview",
400
443
  "intent": variant.intent,
401
444
  "novelty_level": variant.novelty_level,
402
445
  "identity_effect": variant.identity_effect,
@@ -0,0 +1,66 @@
1
+ """Capability and degradation reporting for advanced tools.
2
+
3
+ Every advanced tool reports its operational state so callers know
4
+ what data was available, what was missing, and how much to trust
5
+ the result.
6
+
7
+ Levels:
8
+ full — all required data sources available
9
+ fallback — some data missing, result is degraded but useful
10
+ analytical_only — no live data, pure heuristic
11
+ unavailable — cannot operate at all
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import dataclass, field
17
+
18
+
19
+ @dataclass
20
+ class CapabilityReport:
21
+ """Operational state of an advanced tool invocation."""
22
+
23
+ level: str = "full" # full, fallback, analytical_only, unavailable
24
+ confidence: float = 1.0
25
+ available_sources: list[str] = field(default_factory=list)
26
+ missing_sources: list[str] = field(default_factory=list)
27
+ fallback_used: str = ""
28
+ reason: str = ""
29
+
30
+ def to_dict(self) -> dict:
31
+ d = {"capability": self.level, "confidence": round(self.confidence, 2)}
32
+ if self.missing_sources:
33
+ d["missing"] = self.missing_sources
34
+ if self.fallback_used:
35
+ d["fallback"] = self.fallback_used
36
+ if self.reason:
37
+ d["reason"] = self.reason
38
+ return d
39
+
40
+
41
+ def build_capability(
42
+ required: list[str],
43
+ available: dict[str, bool],
44
+ ) -> CapabilityReport:
45
+ """Build a capability report from required vs available data sources."""
46
+ missing = [r for r in required if not available.get(r, False)]
47
+ present = [r for r in required if available.get(r, False)]
48
+
49
+ if not missing:
50
+ return CapabilityReport(
51
+ level="full", confidence=1.0, available_sources=present,
52
+ )
53
+
54
+ if len(missing) == len(required):
55
+ return CapabilityReport(
56
+ level="analytical_only", confidence=0.2,
57
+ available_sources=[], missing_sources=missing,
58
+ reason="No required data sources available",
59
+ )
60
+
61
+ ratio = len(present) / len(required)
62
+ return CapabilityReport(
63
+ level="fallback", confidence=round(ratio * 0.8, 2),
64
+ available_sources=present, missing_sources=missing,
65
+ fallback_used="degraded inference from partial data",
66
+ )
@@ -0,0 +1,118 @@
1
+ """Runtime capability probe — detects what's available at startup.
2
+
3
+ Reports capability tiers: Core Control, Analyzer-Enhanced,
4
+ Offline Analysis, Creative Intelligence, Persistent Memory.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import os
10
+ from pathlib import Path
11
+ from typing import Any
12
+
13
+
14
+ def probe_capabilities(
15
+ ableton: Any = None,
16
+ ctx: Any = None,
17
+ ) -> dict:
18
+ """Probe runtime capabilities and return a structured report.
19
+
20
+ Can be called at startup or on demand via --doctor.
21
+ """
22
+ report: dict[str, dict] = {}
23
+
24
+ # 1. Ableton reachability
25
+ ableton_ok = False
26
+ if ableton is not None:
27
+ try:
28
+ info = ableton.send_command("ping")
29
+ ableton_ok = info is not None
30
+ except Exception:
31
+ pass
32
+ report["ableton"] = {
33
+ "status": "ok" if ableton_ok else "unavailable",
34
+ "detail": "TCP 9878 connection active" if ableton_ok else "Not connected",
35
+ }
36
+
37
+ # 2. Remote Script parity
38
+ from .remote_commands import REMOTE_COMMANDS
39
+ report["remote_script"] = {
40
+ "status": "ok",
41
+ "command_count": len(REMOTE_COMMANDS),
42
+ "detail": f"{len(REMOTE_COMMANDS)} registered commands",
43
+ }
44
+
45
+ # 3. M4L bridge
46
+ bridge_ok = False
47
+ if ctx is not None:
48
+ bridge = getattr(ctx, "lifespan_context", {}).get("m4l_bridge") if hasattr(ctx, "lifespan_context") else None
49
+ bridge_ok = bridge is not None
50
+ report["m4l_bridge"] = {
51
+ "status": "ok" if bridge_ok else "unavailable",
52
+ "detail": "UDP 9880 / OSC 9881 active" if bridge_ok else "Not connected — 30 analyzer tools unavailable",
53
+ }
54
+
55
+ # 4. Offline perception
56
+ numpy_ok = False
57
+ try:
58
+ import numpy # noqa: F401
59
+ numpy_ok = True
60
+ except ImportError:
61
+ pass
62
+ report["offline_perception"] = {
63
+ "status": "ok" if numpy_ok else "degraded",
64
+ "detail": "numpy available" if numpy_ok else "numpy not installed — offline analysis unavailable",
65
+ }
66
+
67
+ # 5. Persistence
68
+ livepilot_dir = Path.home() / ".livepilot"
69
+ persistence_ok = livepilot_dir.exists() and os.access(livepilot_dir, os.W_OK)
70
+ taste_exists = (livepilot_dir / "taste.json").exists()
71
+ techniques_exists = (livepilot_dir / "memory" / "techniques.json").exists()
72
+ report["persistence"] = {
73
+ "status": "ok" if persistence_ok else "unavailable",
74
+ "detail": f"~/.livepilot/ {'writable' if persistence_ok else 'not found'}",
75
+ "taste_store": taste_exists,
76
+ "technique_store": techniques_exists,
77
+ }
78
+
79
+ # 6. Capability tier — highest active tier
80
+ if ableton_ok and bridge_ok:
81
+ tier = "analyzer_enhanced"
82
+ elif ableton_ok:
83
+ tier = "core_control"
84
+ else:
85
+ tier = "creative_intelligence" # heuristic-only, no Ableton connection
86
+
87
+ report["tier"] = {
88
+ "active": tier,
89
+ "levels": {
90
+ "core_control": ableton_ok,
91
+ "analyzer_enhanced": ableton_ok and bridge_ok,
92
+ "offline_analysis": numpy_ok,
93
+ "creative_intelligence": True, # always available
94
+ "persistent_memory": persistence_ok,
95
+ },
96
+ }
97
+
98
+ return report
99
+
100
+
101
+ def format_doctor_report(report: dict) -> str:
102
+ """Format capability report for --doctor output."""
103
+ lines = ["LivePilot Capability Report", "=" * 40]
104
+
105
+ icons = {"ok": " PASS", "unavailable": " FAIL", "degraded": " WARN"}
106
+
107
+ for area in ["ableton", "remote_script", "m4l_bridge", "offline_perception", "persistence"]:
108
+ info = report.get(area, {})
109
+ status = info.get("status", "unknown")
110
+ icon = icons.get(status, " ????")
111
+ detail = info.get("detail", "")
112
+ lines.append(f"{icon} {area}: {detail}")
113
+
114
+ tier = report.get("tier", {}).get("active", "unknown")
115
+ lines.append("")
116
+ lines.append(f"Active tier: {tier}")
117
+
118
+ return "\n".join(lines)