livepilot 1.10.0 → 1.10.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/.claude-plugin/marketplace.json +3 -3
  2. package/AGENTS.md +3 -3
  3. package/CHANGELOG.md +214 -0
  4. package/CONTRIBUTING.md +2 -2
  5. package/LICENSE +62 -21
  6. package/README.md +264 -286
  7. package/livepilot/.Codex-plugin/plugin.json +2 -2
  8. package/livepilot/.claude-plugin/plugin.json +2 -2
  9. package/livepilot/skills/livepilot-arrangement/SKILL.md +18 -1
  10. package/livepilot/skills/livepilot-core/SKILL.md +5 -5
  11. package/livepilot/skills/livepilot-core/references/overview.md +3 -3
  12. package/livepilot/skills/livepilot-devices/SKILL.md +23 -2
  13. package/livepilot/skills/livepilot-evaluation/references/capability-modes.md +1 -1
  14. package/livepilot/skills/livepilot-release/SKILL.md +21 -17
  15. package/livepilot/skills/livepilot-sample-engine/SKILL.md +2 -1
  16. package/livepilot/skills/livepilot-wonder/SKILL.md +8 -6
  17. package/livepilot.mcpb +0 -0
  18. package/m4l_device/LivePilot_Analyzer.adv +0 -0
  19. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  20. package/m4l_device/livepilot_bridge.js +1 -1
  21. package/manifest.json +4 -4
  22. package/mcp_server/__init__.py +1 -1
  23. package/mcp_server/composer/engine.py +249 -169
  24. package/mcp_server/composer/sample_resolver.py +153 -0
  25. package/mcp_server/composer/tools.py +97 -87
  26. package/mcp_server/memory/taste_accessors.py +47 -0
  27. package/mcp_server/preview_studio/engine.py +9 -2
  28. package/mcp_server/preview_studio/tools.py +78 -35
  29. package/mcp_server/project_brain/tools.py +34 -0
  30. package/mcp_server/runtime/execution_router.py +180 -38
  31. package/mcp_server/runtime/mcp_dispatch.py +46 -0
  32. package/mcp_server/runtime/remote_commands.py +4 -1
  33. package/mcp_server/runtime/tools.py +55 -32
  34. package/mcp_server/sample_engine/moves.py +12 -12
  35. package/mcp_server/sample_engine/slice_workflow.py +190 -0
  36. package/mcp_server/sample_engine/tools.py +104 -1
  37. package/mcp_server/semantic_moves/device_creation_moves.py +7 -7
  38. package/mcp_server/semantic_moves/mix_moves.py +8 -8
  39. package/mcp_server/semantic_moves/models.py +7 -7
  40. package/mcp_server/semantic_moves/performance_moves.py +4 -4
  41. package/mcp_server/semantic_moves/sample_compilers.py +14 -9
  42. package/mcp_server/semantic_moves/sound_design_moves.py +4 -4
  43. package/mcp_server/semantic_moves/tools.py +63 -10
  44. package/mcp_server/semantic_moves/transition_moves.py +4 -4
  45. package/mcp_server/server.py +20 -1
  46. package/mcp_server/session_continuity/tracker.py +4 -1
  47. package/mcp_server/tools/_conductor.py +16 -0
  48. package/mcp_server/tools/_planner_engine.py +24 -0
  49. package/mcp_server/tools/analyzer.py +2 -0
  50. package/mcp_server/tools/planner.py +3 -0
  51. package/mcp_server/wonder_mode/engine.py +59 -13
  52. package/mcp_server/wonder_mode/tools.py +33 -1
  53. package/package.json +8 -8
  54. package/remote_script/LivePilot/__init__.py +1 -1
  55. package/remote_script/LivePilot/devices.py +10 -0
@@ -0,0 +1,153 @@
1
+ """Local-first sample resolution for composer plans.
2
+
3
+ Moves sample resolution from execution time (where the old pseudo-tool
4
+ _agent_pick_best_sample was supposed to "figure it out") to plan time.
5
+
6
+ Async because splice_remote downloads real samples over gRPC. Filesystem-only
7
+ callers still work synchronously from an async perspective — the function
8
+ only awaits when it actually has to hit the network.
9
+
10
+ Returns (local_path, source) where source is one of:
11
+ 'filesystem' — hit in a provided search_root directory (no network)
12
+ 'splice_local' — Splice catalog hit that's already downloaded (no credit spend)
13
+ 'splice_remote' — Splice catalog hit that required download (1 credit)
14
+ 'browser' — Ableton browser match with a local path
15
+ 'unresolved' — no match; caller drops the layer from the plan and warns
16
+
17
+ Preference order is fixed: filesystem > splice_local > splice_remote > browser.
18
+ Filesystem wins even if Splice has a faster hit — local files are free.
19
+ """
20
+
21
+ from __future__ import annotations
22
+
23
+ from pathlib import Path
24
+ from typing import Optional, Tuple
25
+
26
+ from .layer_planner import LayerSpec
27
+
28
+
29
+ _AUDIO_EXTENSIONS = (".wav", ".aif", ".aiff", ".flac")
30
+
31
+
32
+ def _query_tokens(query: str) -> list[str]:
33
+ """Return lowercase query tokens meaningful for matching (len > 2)."""
34
+ return [t.lower() for t in query.split() if len(t) > 2]
35
+
36
+
37
+ def _iter_candidates(root: Path):
38
+ """Yield all audio-format files beneath root."""
39
+ if not root.exists():
40
+ return
41
+ for ext in _AUDIO_EXTENSIONS:
42
+ yield from root.rglob(f"*{ext}")
43
+
44
+
45
+ def _filesystem_match(layer: LayerSpec, search_roots: list[Path]) -> Optional[str]:
46
+ """First filename-substring match on role or any query token.
47
+
48
+ Sync helper — no network, no async needed.
49
+ """
50
+ tokens = _query_tokens(layer.search_query)
51
+ role = layer.role.lower()
52
+ for root in search_roots:
53
+ for path in _iter_candidates(Path(root)):
54
+ name = path.name.lower()
55
+ if role and role in name:
56
+ return str(path)
57
+ if any(tok in name for tok in tokens):
58
+ return str(path)
59
+ return None
60
+
61
+
62
+ async def _splice_resolve(
63
+ layer: LayerSpec,
64
+ splice_client: object,
65
+ credit_budget: int,
66
+ ) -> Tuple[Optional[str], str]:
67
+ """Query Splice for the layer. Returns (path, source) or (None, 'unresolved').
68
+
69
+ Tries local hits first (free), then remote downloads (1 credit each,
70
+ respecting the hard floor). Stops on first success.
71
+ """
72
+ if splice_client is None or not getattr(splice_client, "connected", False):
73
+ return None, "unresolved"
74
+
75
+ try:
76
+ result = await splice_client.search_samples(
77
+ query=layer.search_query,
78
+ per_page=5,
79
+ )
80
+ except Exception:
81
+ return None, "unresolved"
82
+
83
+ samples = list(result.samples) if result and hasattr(result, "samples") else []
84
+ if not samples:
85
+ return None, "unresolved"
86
+
87
+ # 1. Prefer already-local Splice hits (zero credit spend)
88
+ for sample in samples:
89
+ lp = getattr(sample, "local_path", "") or ""
90
+ if lp and Path(lp).exists():
91
+ return lp, "splice_local"
92
+
93
+ # 2. Remote download — respect the credit hard floor
94
+ for sample in samples:
95
+ if getattr(sample, "local_path", ""):
96
+ continue # already handled above
97
+ file_hash = getattr(sample, "file_hash", "")
98
+ if not file_hash:
99
+ continue
100
+ try:
101
+ can, _remaining = await splice_client.can_afford(1, credit_budget)
102
+ if not can:
103
+ break # credit floor hit — stop trying, don't try next sample
104
+ downloaded = await splice_client.download_sample(file_hash)
105
+ if downloaded and Path(downloaded).exists():
106
+ return downloaded, "splice_remote"
107
+ except Exception:
108
+ continue # try next hit
109
+
110
+ return None, "unresolved"
111
+
112
+
113
+ async def resolve_sample_for_layer(
114
+ layer: LayerSpec,
115
+ search_roots: Optional[list] = None,
116
+ splice_client: object = None,
117
+ browser_client: object = None,
118
+ credit_budget: int = 1,
119
+ ) -> Tuple[Optional[str], str]:
120
+ """Resolve a layer's sample to a concrete local file path.
121
+
122
+ Preference order: filesystem > splice_local > splice_remote > browser.
123
+ Unresolved layers return (None, 'unresolved'); callers drop them from
124
+ the plan and surface a warning.
125
+
126
+ search_roots accepts Path or str entries. Missing dirs are silently
127
+ skipped. None entries are filtered out.
128
+ """
129
+ roots = [Path(r) for r in (search_roots or []) if r]
130
+
131
+ # 1. Filesystem — always try first, no network
132
+ fs_hit = _filesystem_match(layer, roots)
133
+ if fs_hit:
134
+ return fs_hit, "filesystem"
135
+
136
+ # 2 & 3. Splice (local hits + remote download)
137
+ path, source = await _splice_resolve(layer, splice_client, credit_budget)
138
+ if path is not None:
139
+ return path, source
140
+
141
+ # 4. Browser (sync, optional)
142
+ if browser_client is not None:
143
+ try:
144
+ search = getattr(browser_client, "search", None)
145
+ hits = search(layer.search_query, limit=5) if callable(search) else []
146
+ for hit in hits or []:
147
+ lp = hit.get("file_path") if isinstance(hit, dict) else None
148
+ if lp and Path(lp).exists():
149
+ return lp, "browser"
150
+ except Exception:
151
+ pass
152
+
153
+ return None, "unresolved"
@@ -20,6 +20,61 @@ from .engine import ComposerEngine
20
20
  _engine = ComposerEngine()
21
21
 
22
22
 
23
+ def _get_search_roots(ctx: Context) -> list:
24
+ """Pull sample-search roots from ctx (if the server wired any) plus
25
+ environment fallbacks.
26
+ """
27
+ roots = []
28
+ try:
29
+ cfg = ctx.lifespan_context.get("sample_search_roots") if hasattr(ctx, "lifespan_context") else None
30
+ if cfg:
31
+ roots.extend(cfg)
32
+ except Exception:
33
+ pass
34
+ return roots
35
+
36
+
37
+ async def _credit_safety_prelude(splice_client, max_credits: int) -> tuple[int, int | None, list[str]]:
38
+ """Apply the hard floor / budget trimming rules upfront.
39
+
40
+ Returns (adjusted_max_credits, credits_remaining_or_None, warnings).
41
+ """
42
+ warnings: list[str] = []
43
+ credits_remaining: int | None = None
44
+
45
+ if splice_client is None or not getattr(splice_client, "connected", False):
46
+ warnings.append(
47
+ "Splice not connected. Plan will use browser/filesystem fallback "
48
+ "for sample search."
49
+ )
50
+ return max_credits, None, warnings
51
+
52
+ try:
53
+ info = await splice_client.get_credits()
54
+ credits_remaining = getattr(info, "credits", None)
55
+ except Exception:
56
+ credits_remaining = None
57
+
58
+ if credits_remaining is None:
59
+ return max_credits, None, warnings
60
+
61
+ if credits_remaining <= 5:
62
+ warnings.append(
63
+ f"Splice credits critically low ({credits_remaining}). "
64
+ f"Using downloaded samples only."
65
+ )
66
+ max_credits = 0
67
+ elif max_credits > credits_remaining - 5:
68
+ safe_budget = max(0, credits_remaining - 5)
69
+ warnings.append(
70
+ f"Budget capped at {safe_budget} credits "
71
+ f"(remaining: {credits_remaining}, floor: 5)."
72
+ )
73
+ max_credits = safe_budget
74
+
75
+ return max_credits, credits_remaining, warnings
76
+
77
+
23
78
  @mcp.tool()
24
79
  async def compose(
25
80
  ctx: Context,
@@ -27,61 +82,33 @@ async def compose(
27
82
  max_credits: int = 50,
28
83
  dry_run: bool = False,
29
84
  ) -> dict:
30
- """Create a full multi-layer composition from a text prompt.
85
+ """Plan a full multi-layer composition from a text prompt.
31
86
 
32
- Searches Splice's catalog, selects matching samples with critic scoring,
33
- downloads them, loads into Ableton, applies processing techniques, and
34
- arranges into genre-appropriate sections.
87
+ Parses the prompt into genre/mood/tempo/key, plans layers using role
88
+ templates, and compiles an executable plan of tool calls. Does NOT
89
+ execute returns the plan for the agent to step through.
35
90
 
36
91
  prompt: "dark minimal techno 128bpm with industrial textures and ghostly vocals"
37
- max_credits: maximum Splice credits to spend (default 50, 0 = use only downloaded)
38
- dry_run: if True, return the plan without executing (same as get_composition_plan)
92
+ max_credits: maximum Splice credits budget for the plan (default 50, 0 = downloaded only)
93
+ dry_run: if True, return the plan without credit checks
39
94
 
40
- Returns a compiled plan with all execution steps. When dry_run is False,
41
- the plan is ready for step-by-step execution by the agent.
95
+ Returns a compiled plan with step-by-step tool calls. The agent
96
+ executes each step by calling the referenced tools in sequence.
42
97
  """
43
- # Parse the prompt into structured intent
44
98
  intent = parse_prompt(prompt)
45
99
 
46
- # Credit safety check
47
- splice_client = None
48
- credits_remaining = None
49
- try:
50
- lifespan = ctx.lifespan_context
51
- if lifespan and "splice" in lifespan:
52
- splice_client = lifespan["splice"]
53
- if splice_client and splice_client.connected:
54
- credits_remaining = await splice_client.get_credits_remaining()
55
- except Exception:
56
- pass
57
-
58
- warnings: list[str] = []
59
-
60
- if credits_remaining is not None:
61
- if credits_remaining <= 5:
62
- warnings.append(
63
- f"Splice credits critically low ({credits_remaining}). "
64
- f"Using downloaded samples only."
65
- )
66
- max_credits = 0
67
- elif max_credits > credits_remaining - 5:
68
- safe_budget = max(0, credits_remaining - 5)
69
- warnings.append(
70
- f"Budget capped at {safe_budget} credits "
71
- f"(remaining: {credits_remaining}, floor: 5)."
72
- )
73
- max_credits = safe_budget
74
-
75
- if splice_client is None or not getattr(splice_client, "connected", False):
76
- warnings.append(
77
- "Splice not connected. Plan will use browser/filesystem fallback "
78
- "for sample search."
79
- )
100
+ splice_client = ctx.lifespan_context.get("splice_client") if hasattr(ctx, "lifespan_context") else None
101
+ search_roots = _get_search_roots(ctx)
80
102
 
81
- # Compose
82
- result = _engine.compose(intent, dry_run=dry_run, max_credits=max_credits)
103
+ max_credits, credits_remaining, warnings = await _credit_safety_prelude(splice_client, max_credits)
83
104
 
84
- # Merge warnings
105
+ result = await _engine.compose(
106
+ intent,
107
+ dry_run=dry_run,
108
+ max_credits=max_credits,
109
+ search_roots=search_roots,
110
+ splice_client=splice_client,
111
+ )
85
112
  result.warnings.extend(warnings)
86
113
 
87
114
  output = result.to_dict()
@@ -101,48 +128,24 @@ async def augment_with_samples(
101
128
  max_credits: int = 10,
102
129
  max_layers: int = 3,
103
130
  ) -> dict:
104
- """Add sample-based layers to the existing session.
131
+ """Plan sample-based layers to add to the existing session.
105
132
 
106
- Analyzes the request, searches Splice for complementary samples,
107
- and creates a plan to add new tracks with appropriate processing.
133
+ Parses the request and builds a plan for new tracks with sample
134
+ search queries, processing techniques, and volume/pan settings.
135
+ Does NOT execute — returns the plan for the agent to step through.
108
136
 
109
137
  request: "add organic textures" or "layer a vocal chop over the verse"
110
- max_credits: maximum Splice credits to spend (default 10)
111
- max_layers: maximum number of new tracks to add (default 3)
138
+ max_credits: maximum Splice credits budget for the plan (default 10)
139
+ max_layers: maximum number of new tracks in the plan (default 3)
112
140
 
113
- Returns a compiled plan for adding new layers to the session.
141
+ Returns a compiled plan with step-by-step tool calls.
114
142
  """
115
- # Credit safety
116
- splice_client = None
117
- credits_remaining = None
118
- try:
119
- lifespan = ctx.lifespan_context
120
- if lifespan and "splice" in lifespan:
121
- splice_client = lifespan["splice"]
122
- if splice_client and splice_client.connected:
123
- credits_remaining = await splice_client.get_credits_remaining()
124
- except Exception:
125
- pass
143
+ splice_client = ctx.lifespan_context.get("splice_client") if hasattr(ctx, "lifespan_context") else None
144
+ search_roots = _get_search_roots(ctx)
126
145
 
127
- warnings: list[str] = []
146
+ max_credits, credits_remaining, warnings = await _credit_safety_prelude(splice_client, max_credits)
128
147
 
129
- if credits_remaining is not None:
130
- if credits_remaining <= 5:
131
- warnings.append(
132
- f"Splice credits critically low ({credits_remaining}). "
133
- f"Using downloaded samples only."
134
- )
135
- max_credits = 0
136
- elif max_credits > credits_remaining - 5:
137
- safe_budget = max(0, credits_remaining - 5)
138
- max_credits = safe_budget
139
-
140
- if splice_client is None or not getattr(splice_client, "connected", False):
141
- warnings.append(
142
- "Splice not connected. Will use browser/filesystem fallback."
143
- )
144
-
145
- # Get current session info for context
148
+ # Pull current session info for tempo context
146
149
  session_context: dict = {}
147
150
  try:
148
151
  ableton = ctx.lifespan_context.get("ableton")
@@ -153,14 +156,14 @@ async def augment_with_samples(
153
156
  except Exception:
154
157
  pass
155
158
 
156
- # Augment
157
- result = _engine.augment(
159
+ result = await _engine.augment(
158
160
  request=request,
159
161
  max_credits=max_credits,
160
162
  max_layers=max_layers,
163
+ search_roots=search_roots,
164
+ splice_client=splice_client,
161
165
  )
162
166
 
163
- # Override tempo from session if available
164
167
  if session_context.get("tempo"):
165
168
  result.intent.tempo = int(session_context["tempo"])
166
169
 
@@ -192,10 +195,17 @@ async def get_composition_plan(
192
195
  prompt: "dark minimal techno 128bpm with industrial textures"
193
196
  """
194
197
  intent = parse_prompt(prompt)
195
- plan = _engine.get_plan(intent)
198
+ splice_client = ctx.lifespan_context.get("splice_client") if hasattr(ctx, "lifespan_context") else None
199
+ search_roots = _get_search_roots(ctx)
200
+ plan = await _engine.get_plan(
201
+ intent,
202
+ search_roots=search_roots,
203
+ splice_client=splice_client,
204
+ )
196
205
  plan["prompt"] = prompt
197
206
  plan["note"] = (
198
- "This is a dry run. No samples searched, downloaded, or loaded. "
199
- "Use compose() to execute this plan."
207
+ "This is a dry run. No samples searched or loaded. "
208
+ "Use compose() to get the full plan with credit checks, "
209
+ "then step through each tool call in sequence."
200
210
  )
201
211
  return plan
@@ -0,0 +1,47 @@
1
+ """Shared accessors for reading from a taste-graph dict.
2
+
3
+ Three shapes exist in the wild:
4
+ canonical: {"dimension_weights": {"dim": 0.3, ...}, ...} TasteGraph.to_dict()
5
+ legacy flat: {"dim": 0.3, ...} arbitrary caller dicts
6
+ legacy obj: {"dim": {"value": 0.3, ...}} TasteDimension.to_dict()
7
+
8
+ Every consumer that wants to read a dimension preference MUST route through
9
+ get_dimension_pref so new callers standardize on the canonical path and
10
+ pre-existing dicts keep working until fully migrated.
11
+
12
+ Do not add new shapes. If you find yourself writing a fourth shape, fix the
13
+ producer instead.
14
+ """
15
+
16
+ from __future__ import annotations
17
+
18
+
19
+ def get_dimension_pref(
20
+ taste_graph: object,
21
+ dimension: str,
22
+ default: float = 0.5,
23
+ ) -> float:
24
+ """Read a dimension preference from a taste graph dict, regardless of shape.
25
+
26
+ Returns default for non-dict input, missing dimensions, or non-numeric values.
27
+ """
28
+ if not isinstance(taste_graph, dict):
29
+ return default
30
+
31
+ # Canonical shape wins
32
+ dw = taste_graph.get("dimension_weights")
33
+ if isinstance(dw, dict) and dimension in dw:
34
+ val = dw[dimension]
35
+ if isinstance(val, (int, float)):
36
+ return float(val)
37
+
38
+ # Legacy flat shapes
39
+ val = taste_graph.get(dimension)
40
+ if isinstance(val, (int, float)):
41
+ return float(val)
42
+ if isinstance(val, dict):
43
+ v = val.get("value")
44
+ if isinstance(v, (int, float)):
45
+ return float(v)
46
+
47
+ return default
@@ -121,7 +121,11 @@ def _build_triptych(
121
121
  compiled_plan = None
122
122
  if moves and i < len(moves):
123
123
  move_id = moves[i].get("move_id", "")
124
- compiled_plan = moves[i].get("compile_plan")
124
+ # Compile through the semantic compiler — single source of truth
125
+ from ..wonder_mode.engine import _compile_variant_plan
126
+ kernel = {"session_info": {"tempo": 120, "tracks": []}, "mode": "improve"}
127
+ compiled_plan = _compile_variant_plan(moves[i], kernel)
128
+ # No fallback to plan_template — uncompilable moves stay analytical
125
129
 
126
130
  variants.append(PreviewVariant(
127
131
  variant_id=f"{set_id}_{profile['label']}",
@@ -264,7 +268,10 @@ def _compute_set_id(request_text: str, kernel_id: str) -> str:
264
268
 
265
269
  def _estimate_taste_fit(novelty: float, taste_graph: dict) -> float:
266
270
  """Estimate how well a novelty level fits user taste."""
267
- boldness = taste_graph.get("transition_boldness", 0.5)
271
+ # Routes through the canonical accessor so dimension_weights.transition_boldness
272
+ # is honored. Previously read the top-level key directly and always got 0.5.
273
+ from ..memory.taste_accessors import get_dimension_pref
274
+ boldness = get_dimension_pref(taste_graph, "transition_boldness", default=0.5)
268
275
  # Users who like boldness prefer higher novelty
269
276
  fit = 1.0 - abs(novelty - boldness) * 0.5
270
277
  return round(max(0.0, min(1.0, fit)), 3)
@@ -23,7 +23,15 @@ def _get_ableton(ctx: Context):
23
23
 
24
24
  def _should_refuse_analytical(compiled_plan, wonder_linked: bool) -> bool:
25
25
  """Check if an analytical variant should be refused in Wonder context."""
26
- return compiled_plan is None and wonder_linked
26
+ if not wonder_linked:
27
+ return False
28
+ if compiled_plan is None:
29
+ return True
30
+ if isinstance(compiled_plan, dict):
31
+ return len(compiled_plan.get("steps", [])) == 0
32
+ if isinstance(compiled_plan, list):
33
+ return len(compiled_plan) == 0
34
+ return True
27
35
 
28
36
 
29
37
  def _find_wonder_session_by_preview(set_id: str):
@@ -308,7 +316,7 @@ def commit_preview_variant(
308
316
 
309
317
 
310
318
  @mcp.tool()
311
- def render_preview_variant(
319
+ async def render_preview_variant(
312
320
  ctx: Context,
313
321
  set_id: str = "",
314
322
  variant_id: str = "",
@@ -352,7 +360,7 @@ def render_preview_variant(
352
360
  "analytical_only": True,
353
361
  }
354
362
 
355
- # If the variant has a compiled plan, we could apply-capture-undo.
363
+ # If the variant has a compiled plan, apply -> capture audible -> undo.
356
364
  # Without a compiled plan, return the variant's analytical preview.
357
365
  if variant.compiled_plan:
358
366
  ableton = _get_ableton(ctx)
@@ -360,52 +368,87 @@ def render_preview_variant(
360
368
  plan = variant.compiled_plan
361
369
  steps = plan if isinstance(plan, list) else plan.get("steps", [])
362
370
 
363
- from ..runtime.execution_router import execute_plan_steps
371
+ from ..runtime.execution_router import execute_plan_steps_async
364
372
 
365
373
  applied_count = 0
366
- try:
367
- # Capture before state
368
- before_info = ableton.send_command("get_session_info", {})
374
+ playback_started = False
375
+ preview_mode = "metadata_only_preview"
376
+ spectral_before: Optional[dict] = None
377
+ spectral_after: Optional[dict] = None
378
+ before_info: dict = {}
379
+ after_info: dict = {}
380
+
381
+ bridge = ctx.lifespan_context.get("m4l")
382
+ mcp_registry = ctx.lifespan_context.get("mcp_dispatch", {})
369
383
 
370
- # Execute through unified router
371
- exec_results = execute_plan_steps(steps, ableton=ableton, ctx=ctx)
384
+ try:
385
+ # ── 1. Capture BEFORE metadata ──
386
+ before_info = ableton.send_command("get_session_info", {}) or {}
387
+
388
+ # ── 2. Apply the variant ──
389
+ exec_results = await execute_plan_steps_async(
390
+ steps,
391
+ ableton=ableton,
392
+ bridge=bridge,
393
+ mcp_registry=mcp_registry,
394
+ ctx=ctx,
395
+ )
372
396
  applied_count = sum(1 for r in exec_results if r.ok)
397
+ if applied_count == 0 and steps:
398
+ return {
399
+ "error": "Variant failed to apply any steps",
400
+ "variant_id": variant_id,
401
+ "step_errors": [r.error for r in exec_results if not r.ok],
402
+ }
403
+
404
+ # ── 3. Capture AFTER metadata (variant is live) ──
405
+ after_info = ableton.send_command("get_session_info", {}) or {}
406
+
407
+ # ── 4. Audible capture WHILE variant is still applied ──
408
+ # This is the critical ordering fix: previously this block ran AFTER
409
+ # the finally's undo loop, so "audible_preview" captured pre-variant
410
+ # audio and lied about it. Now playback + spectrum sampling happens
411
+ # while the variant is actually in effect, then the finally undoes it.
412
+ try:
413
+ from ..m4l_bridge import SpectralCache
414
+ cache = ctx.lifespan_context.get("spectral")
415
+ if cache and isinstance(cache, SpectralCache) and cache.is_connected:
416
+ spectral_before = cache.get_all()
417
+
418
+ tempo = before_info.get("tempo", 120) or 120
419
+ play_seconds = min(bars * (60.0 / tempo) * 4, 8.0)
420
+
421
+ ableton.send_command("start_playback", {})
422
+ playback_started = True
423
+
424
+ import time as _time
425
+ _time.sleep(play_seconds)
426
+
427
+ spectral_after = cache.get_all()
428
+
429
+ ableton.send_command("stop_playback", {})
430
+ playback_started = False
431
+
432
+ preview_mode = "audible_preview"
433
+ except Exception:
434
+ # Spectral capture is best-effort; keep preview_mode as metadata_only
435
+ pass
373
436
 
374
- # Capture after state
375
- after_info = ableton.send_command("get_session_info", {})
376
437
  except Exception as e:
377
438
  return {"error": f"Render failed: {e}", "variant_id": variant_id}
378
439
  finally:
379
- # Undo all applied changes regardless of success/failure
440
+ # ── 5. Cleanup: stop playback if still running, then undo everything ──
441
+ if playback_started:
442
+ try:
443
+ ableton.send_command("stop_playback", {})
444
+ except Exception:
445
+ pass
380
446
  for _ in range(applied_count):
381
447
  try:
382
448
  ableton.send_command("undo")
383
449
  except Exception:
384
450
  break
385
451
 
386
- # Determine preview mode: audible (M4L available) or metadata-only
387
- preview_mode = "metadata_only_preview"
388
- spectral_before = None
389
- spectral_after = None
390
-
391
- # Try audible preview — capture spectrum via M4L spectral cache
392
- try:
393
- from ..m4l_bridge import SpectralCache
394
- cache = ctx.lifespan_context.get("spectral")
395
- if cache and isinstance(cache, SpectralCache) and cache.is_connected:
396
- spectral_before = cache.get_all()
397
- # Play for the requested bar count
398
- tempo = before_info.get("tempo", 120)
399
- play_seconds = bars * (60.0 / tempo) * 4 # bars * beat_duration * 4 beats
400
- ableton.send_command("start_playback", {})
401
- import time as _time
402
- _time.sleep(min(play_seconds, 8.0)) # cap at 8 seconds
403
- spectral_after = cache.get_all()
404
- ableton.send_command("stop_playback", {})
405
- preview_mode = "audible_preview"
406
- except Exception:
407
- pass # fall back to metadata_only
408
-
409
452
  variant.status = "rendered"
410
453
  variant.preview_mode = preview_mode
411
454
  variant.render_ref = f"render_{variant_id}_{bars}bars"
@@ -78,6 +78,39 @@ def build_project_brain(ctx: Context) -> dict:
78
78
  except Exception:
79
79
  pass
80
80
 
81
+ # 5b. Build notes_map for role inference.
82
+ # Shape: {section_id: {track_index: [notes]}}. Without this, role_graph
83
+ # falls back to "assume all tracks active in every section" which destroys
84
+ # section-scoped role confidence.
85
+ notes_map: dict[str, dict[int, list[dict]]] = {}
86
+ try:
87
+ for scene_idx, scene in enumerate(scenes or []):
88
+ section_id = str(
89
+ scene.get("section_id")
90
+ or scene.get("name")
91
+ or f"scene_{scene_idx}"
92
+ )
93
+ per_track: dict[int, list[dict]] = {}
94
+ for track in tracks:
95
+ t_idx = track.get("index", 0)
96
+ try:
97
+ notes_resp = ableton.send_command("get_notes", {
98
+ "track_index": t_idx,
99
+ "clip_index": scene_idx,
100
+ })
101
+ if isinstance(notes_resp, dict):
102
+ notes = notes_resp.get("notes", [])
103
+ if notes:
104
+ per_track[t_idx] = notes
105
+ except Exception:
106
+ # Individual note fetch failing is fine — continue with others
107
+ continue
108
+ if per_track:
109
+ notes_map[section_id] = per_track
110
+ except Exception:
111
+ # Overall failure: empty map, degrade to "all tracks active" fallback
112
+ notes_map = {}
113
+
81
114
  # 6. Probe capabilities (direct SpectralCache access, not TCP)
82
115
  analyzer_ok = False
83
116
  analyzer_fresh = False
@@ -103,6 +136,7 @@ def build_project_brain(ctx: Context) -> dict:
103
136
  scenes=scenes if scenes and clip_matrix else None,
104
137
  clip_matrix=clip_matrix if clip_matrix else None,
105
138
  track_infos=track_infos if track_infos else None,
139
+ notes_map=notes_map if notes_map else None,
106
140
  arrangement_clips=arrangement_clips if arrangement_clips else None,
107
141
  analyzer_ok=analyzer_ok,
108
142
  flucoma_ok=flucoma_ok,