livepilot 1.10.6 → 1.10.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/.claude-plugin/marketplace.json +3 -3
  2. package/.mcp.json.disabled +9 -0
  3. package/.mcpbignore +3 -0
  4. package/AGENTS.md +3 -3
  5. package/BUGS.md +1570 -0
  6. package/CHANGELOG.md +42 -0
  7. package/CONTRIBUTING.md +1 -1
  8. package/README.md +7 -7
  9. package/bin/livepilot.js +28 -8
  10. package/livepilot/.Codex-plugin/plugin.json +2 -2
  11. package/livepilot/.claude-plugin/plugin.json +2 -2
  12. package/livepilot/skills/livepilot-core/SKILL.md +4 -4
  13. package/livepilot/skills/livepilot-core/references/overview.md +2 -2
  14. package/livepilot/skills/livepilot-release/SKILL.md +8 -8
  15. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  16. package/m4l_device/LivePilot_Analyzer.amxd.pre-presentation-backup +0 -0
  17. package/m4l_device/LivePilot_Analyzer.maxproj +53 -0
  18. package/m4l_device/livepilot_bridge.js +214 -2
  19. package/manifest.json +3 -3
  20. package/mcp_server/__init__.py +1 -1
  21. package/mcp_server/atlas/__init__.py +93 -26
  22. package/mcp_server/creative_constraints/tools.py +206 -33
  23. package/mcp_server/experiment/engine.py +7 -9
  24. package/mcp_server/hook_hunter/analyzer.py +62 -9
  25. package/mcp_server/hook_hunter/tools.py +60 -9
  26. package/mcp_server/m4l_bridge.py +21 -6
  27. package/mcp_server/musical_intelligence/detectors.py +32 -0
  28. package/mcp_server/performance_engine/tools.py +112 -29
  29. package/mcp_server/preview_studio/engine.py +89 -8
  30. package/mcp_server/preview_studio/tools.py +22 -6
  31. package/mcp_server/project_brain/automation_graph.py +71 -19
  32. package/mcp_server/project_brain/builder.py +2 -0
  33. package/mcp_server/project_brain/tools.py +55 -5
  34. package/mcp_server/reference_engine/profile_builder.py +129 -3
  35. package/mcp_server/reference_engine/tools.py +47 -6
  36. package/mcp_server/runtime/execution_router.py +50 -0
  37. package/mcp_server/runtime/mcp_dispatch.py +75 -3
  38. package/mcp_server/runtime/remote_commands.py +4 -2
  39. package/mcp_server/sample_engine/analyzer.py +131 -4
  40. package/mcp_server/sample_engine/critics.py +29 -8
  41. package/mcp_server/sample_engine/models.py +20 -1
  42. package/mcp_server/sample_engine/tools.py +48 -14
  43. package/mcp_server/semantic_moves/sound_design_compilers.py +22 -59
  44. package/mcp_server/semantic_moves/transition_compilers.py +12 -19
  45. package/mcp_server/server.py +68 -2
  46. package/mcp_server/session_continuity/models.py +4 -0
  47. package/mcp_server/session_continuity/tracker.py +14 -1
  48. package/mcp_server/song_brain/builder.py +110 -12
  49. package/mcp_server/song_brain/tools.py +77 -13
  50. package/mcp_server/sound_design/tools.py +112 -1
  51. package/mcp_server/stuckness_detector/detector.py +90 -0
  52. package/mcp_server/stuckness_detector/tools.py +41 -0
  53. package/mcp_server/tools/_agent_os_engine/critics.py +24 -0
  54. package/mcp_server/tools/_composition_engine/__init__.py +2 -2
  55. package/mcp_server/tools/_composition_engine/harmony.py +90 -0
  56. package/mcp_server/tools/_composition_engine/sections.py +47 -4
  57. package/mcp_server/tools/_harmony_engine.py +52 -8
  58. package/mcp_server/tools/_research_engine.py +98 -19
  59. package/mcp_server/tools/_theory_engine.py +138 -9
  60. package/mcp_server/tools/agent_os.py +20 -3
  61. package/mcp_server/tools/analyzer.py +98 -0
  62. package/mcp_server/tools/clips.py +45 -0
  63. package/mcp_server/tools/composition.py +66 -23
  64. package/mcp_server/tools/devices.py +22 -1
  65. package/mcp_server/tools/harmony.py +115 -14
  66. package/mcp_server/tools/midi_io.py +13 -1
  67. package/mcp_server/tools/mixing.py +35 -1
  68. package/mcp_server/tools/motif.py +49 -3
  69. package/mcp_server/tools/research.py +24 -0
  70. package/mcp_server/tools/theory.py +108 -16
  71. package/mcp_server/transition_engine/critics.py +18 -11
  72. package/package.json +2 -2
  73. package/remote_script/LivePilot/__init__.py +57 -2
  74. package/remote_script/LivePilot/clips.py +69 -0
  75. package/remote_script/LivePilot/mixing.py +117 -0
  76. package/remote_script/LivePilot/router.py +13 -1
  77. package/scripts/generate_tool_catalog.py +13 -38
  78. package/scripts/sync_metadata.py +231 -14
@@ -135,7 +135,18 @@ def develop_hook(
135
135
  # Look up the actual hook to adapt strategies by type
136
136
  hook_type = "melodic" # default
137
137
  hook_description = "the hook"
138
- if hook_id:
138
+ # BUG-B31: when no hook_id is provided, default to the session's primary
139
+ # hook. Previously the tool emitted generic advice even though
140
+ # find_primary_hook was already available — users had to manually chain
141
+ # find_primary_hook → develop_hook to get type-specific tactics.
142
+ if not hook_id:
143
+ tracks, scenes, motif_data = _fetch_tracks_and_scenes(ctx)
144
+ primary = analyzer.find_primary_hook(tracks, motif_data, scenes)
145
+ if primary is not None:
146
+ hook_id = primary.hook_id
147
+ hook_type = primary.hook_type
148
+ hook_description = primary.description
149
+ elif hook_id:
139
150
  tracks, scenes, motif_data = _fetch_tracks_and_scenes(ctx)
140
151
  candidates = analyzer.find_hook_candidates(tracks, motif_data, scenes)
141
152
  match = [c for c in candidates if c.hook_id == hook_id]
@@ -333,7 +344,14 @@ def suggest_payoff_repair(ctx: Context) -> dict:
333
344
 
334
345
 
335
346
  def _get_section_data(ableton) -> list[dict]:
336
- """Build section data from Ableton scenes with real energy/density/has_drums."""
347
+ """Build section data from Ableton scenes with real energy/density/has_drums.
348
+
349
+ BUG-B51 fix: also fetches per-section note signals (unique pitch
350
+ count, note count, velocity-variance) so compare_phrase_impact can
351
+ differentiate two sections that share energy/density but have
352
+ different clip contents. Without these, the old comparator emitted
353
+ identical scores for every pair of same-density sections.
354
+ """
337
355
  sections: list[dict] = []
338
356
  try:
339
357
  matrix = ableton.send_command("get_scene_matrix")
@@ -342,11 +360,6 @@ def _get_section_data(ableton) -> list[dict]:
342
360
 
343
361
  # Detect drum track indices by name
344
362
  drum_keywords = {"drum", "beat", "kick", "hat", "perc", "snare"}
345
- track_names = []
346
- # tracks may be in matrix metadata or session_info
347
- for ti, row_entry in enumerate(matrix_rows[0] if matrix_rows else []):
348
- track_names.append("") # placeholder — we'll use scenes_list tracks if available
349
- # Use scene matrix track info if available
350
363
  track_info = matrix.get("tracks", [])
351
364
  drum_indices = set()
352
365
  for ti, track in enumerate(track_info):
@@ -361,16 +374,49 @@ def _get_section_data(ableton) -> list[dict]:
361
374
  clip_count = sum(1 for c in row if c)
362
375
  total_tracks = max(len(row), 1)
363
376
 
364
- # has_drums: check if any drum track has a clip in this scene
365
377
  has_drums = any(
366
378
  di < len(row) and row[di]
367
379
  for di in drum_indices
368
380
  ) if drum_indices else False
369
381
 
370
382
  density = min(1.0, clip_count / total_tracks)
371
- # energy: density + drum bonus
372
383
  energy = min(1.0, density + (0.1 if has_drums else 0.0))
373
384
 
385
+ # BUG-B51: cheap per-section note signals. Sample up to 3
386
+ # non-drum tracks in this scene for a flavor of the
387
+ # section's harmonic/rhythmic content. Keeps the call
388
+ # count bounded so compare_phrase_impact doesn't explode.
389
+ unique_pitches: set = set()
390
+ note_count = 0
391
+ velocity_variance = 0.0
392
+ sampled = 0
393
+ for t_idx, cell in enumerate(row):
394
+ if sampled >= 3 or not cell:
395
+ continue
396
+ if t_idx in drum_indices:
397
+ continue
398
+ try:
399
+ notes_resp = ableton.send_command("get_notes", {
400
+ "track_index": t_idx, "clip_index": i,
401
+ })
402
+ except Exception:
403
+ continue
404
+ notes = notes_resp.get("notes", []) if isinstance(
405
+ notes_resp, dict
406
+ ) else []
407
+ if not notes:
408
+ continue
409
+ sampled += 1
410
+ note_count += len(notes)
411
+ for n in notes:
412
+ unique_pitches.add(int(n.get("pitch", 0)) % 12)
413
+ vels = [int(n.get("velocity", 0)) for n in notes]
414
+ if len(vels) >= 2:
415
+ mean_v = sum(vels) / len(vels)
416
+ velocity_variance += sum(
417
+ (v - mean_v) ** 2 for v in vels
418
+ ) / len(vels)
419
+
374
420
  sections.append({
375
421
  "id": f"scene_{i}",
376
422
  "name": scene.get("name", f"Scene {i}"),
@@ -378,6 +424,11 @@ def _get_section_data(ableton) -> list[dict]:
378
424
  "energy": round(energy, 3),
379
425
  "density": round(density, 3),
380
426
  "has_drums": has_drums,
427
+ # BUG-B51: these three differentiate otherwise-identical
428
+ # sections. Downstream phrase scorer reads them.
429
+ "unique_pitch_classes": len(unique_pitches),
430
+ "note_count": note_count,
431
+ "velocity_variance": round(velocity_variance, 3),
381
432
  })
382
433
  except Exception as exc:
383
434
  logger.debug("_get_section_data failed: %s", exc)
@@ -278,14 +278,26 @@ class SpectralReceiver(asyncio.DatagramProtocol):
278
278
  self._handle_chunk(int(args[0]), int(args[1]), str(args[2]))
279
279
 
280
280
  def _handle_response(self, encoded: str) -> None:
281
- """Decode a single-packet base64 response."""
281
+ """Decode a single-packet base64 response.
282
+
283
+ Resolves _response_callback exactly once, then clears it. Without the
284
+ clear, a second late packet could overwrite a future belonging to a
285
+ different in-flight command. The protocol has no request id yet
286
+ (livepilot_bridge.js:666 emits bare /response), so correlation relies
287
+ on the single-command-in-flight invariant enforced by M4LBridge._cmd_lock
288
+ plus this one-shot clear.
289
+ """
282
290
  try:
283
291
  # URL-safe base64 decode (- and _ instead of + and /)
284
292
  padded = encoded + "=" * (-len(encoded) % 4)
285
293
  decoded = base64.urlsafe_b64decode(padded).decode('utf-8')
286
294
  result = _normalize_bridge_payload(json.loads(decoded))
287
- if self._response_callback and not self._response_callback.done():
288
- self._response_callback.set_result(result)
295
+ cb = self._response_callback
296
+ if cb and not cb.done():
297
+ cb.set_result(result)
298
+ # Clear regardless — either we consumed it, or it was already
299
+ # done/abandoned. Future packets with no owner get dropped.
300
+ self._response_callback = None
289
301
  except Exception as exc:
290
302
  import sys
291
303
  print(f"LivePilot: failed to decode bridge response: {exc}", file=sys.stderr)
@@ -376,11 +388,14 @@ class M4LBridge:
376
388
  result = await asyncio.wait_for(future, timeout=timeout)
377
389
  return result
378
390
  except asyncio.TimeoutError:
379
- # Clear the stale future so a delayed response doesn't resolve
380
- # a future that no caller is waiting on
391
+ return {"error": "M4L bridge timeout device may be busy or removed"}
392
+ finally:
393
+ # Always clear the future — on success the receiver has already
394
+ # cleared it inside _handle_response, but calling again is a
395
+ # no-op. On timeout this is what prevents a delayed packet from
396
+ # resolving a future belonging to the next command.
381
397
  if self.receiver:
382
398
  self.receiver.set_response_future(None)
383
- return {"error": "M4L bridge timeout — device may be busy or removed"}
384
399
 
385
400
  async def send_capture(self, command: str, *args: Any, timeout: float = 35.0) -> dict:
386
401
  """Send a capture command to the M4L device and wait for /capture_complete."""
@@ -208,11 +208,43 @@ def detect_role_conflicts(
208
208
  "Layer drum parts into one Drum Rack or pan them apart"),
209
209
  }
210
210
 
211
+ # BUG-B1 fix: intentional drum + percussion layering is the core
212
+ # aesthetic in hip-hop / Dilla / lo-fi / beat-scene music, not a
213
+ # conflict. Heuristic to demote drum-role conflicts when the track
214
+ # names make that layering obvious (one "DRUMS" + one "PERC/CONGA/
215
+ # SHAKER" is distinct instruments, not a fight for the same role).
216
+ _PERC_NAMES = {
217
+ "perc", "percussion", "conga", "congas", "shaker",
218
+ "tambourine", "cowbell", "triangle", "bongo",
219
+ "djembe", "claves", "hi-hat", "hihat", "hat",
220
+ }
221
+
222
+ def _looks_like_layering(group: list[dict]) -> bool:
223
+ """True if at least one of the tracks has a percussion-specific
224
+ name (distinct from the main drum kit)."""
225
+ if len(group) < 2:
226
+ return False
227
+ perc_track_count = 0
228
+ for track in group:
229
+ name = str(track.get("name", "")).lower()
230
+ if any(tok in name for tok in _PERC_NAMES):
231
+ perc_track_count += 1
232
+ # Needs at least one main "drums" track AND one perc track
233
+ return 1 <= perc_track_count < len(group)
234
+
211
235
  conflicts = []
212
236
  for role, (desc, rec) in UNIQUE_ROLES.items():
213
237
  group = role_groups.get(role, [])
214
238
  if len(group) > 1:
215
239
  severity = min(0.9, 0.3 + (len(group) - 1) * 0.2)
240
+ if role == "drums" and _looks_like_layering(group):
241
+ # Demote severity — this looks intentional, not a conflict
242
+ severity = max(0.1, severity - 0.4)
243
+ rec = (
244
+ "Drum + percussion layering detected — if this is "
245
+ "intentional (hip-hop / Dilla / lo-fi), ignore. "
246
+ "Otherwise: " + rec
247
+ )
216
248
  conflicts.append(RoleConflict(
217
249
  role=role,
218
250
  tracks=group,
@@ -21,34 +21,59 @@ logger = logging.getLogger(__name__)
21
21
  # ── Helpers ─────────────────────────────────────────────────────────
22
22
 
23
23
 
24
- def _infer_role(name: str, index: int, scene_count: int) -> str:
25
- """Infer a scene's role from its name or position."""
26
- lower = name.lower()
27
- for role in ("intro", "verse", "chorus", "build", "drop", "breakdown", "outro", "transition"):
28
- if role in lower:
29
- return role
30
- # Positional fallback
24
+ # BUG-E4 / E5 fix: performance_engine used to have its own _infer_role() keyword
25
+ # list and _infer_energy() static {role number} table. Those diverged from
26
+ # _composition_engine's richer section classifier, which caused
27
+ # get_performance_state and analyze_composition to label the same scenes
28
+ # differently (Deep Flow: drop vs verse, Sun Peak: drop vs chorus) and to
29
+ # report dissimilar energies (composition derived from active-track density,
30
+ # performance looked up a hard-coded 0.2/0.4/0.7 table). Now performance
31
+ # consumes composition's section graph as the source of truth and only keeps
32
+ # a positional fallback for scenes without enough data.
33
+ _POSITIONAL_FALLBACK_ROLES = {
34
+ "first": "intro",
35
+ "last": "outro",
36
+ "early": "intro",
37
+ "middle_low": "verse",
38
+ "middle_high": "chorus",
39
+ "late": "outro",
40
+ "default": "verse",
41
+ }
42
+
43
+
44
+ def _positional_fallback_role(index: int, scene_count: int) -> str:
45
+ """Map a scene index to a role when no composition data is available.
46
+
47
+ Kept only as a last-resort so we still produce a sensible answer for
48
+ unnamed scenes or when build_section_graph_from_scenes returns empty.
49
+ Callers should prefer the composition-engine result when it exists.
50
+ """
51
+ if scene_count <= 0:
52
+ return _POSITIONAL_FALLBACK_ROLES["default"]
31
53
  if index == 0:
32
- return "intro"
54
+ return _POSITIONAL_FALLBACK_ROLES["first"]
33
55
  if index == scene_count - 1:
34
- return "outro"
56
+ return _POSITIONAL_FALLBACK_ROLES["last"]
35
57
  if scene_count > 4:
36
- quarter = scene_count / 4
58
+ quarter = scene_count / 4.0
37
59
  if index < quarter:
38
- return "intro"
39
- elif index < quarter * 2:
40
- return "verse"
41
- elif index < quarter * 3:
42
- return "chorus"
43
- else:
44
- return "outro"
45
- return "verse"
60
+ return _POSITIONAL_FALLBACK_ROLES["early"]
61
+ if index < quarter * 2:
62
+ return _POSITIONAL_FALLBACK_ROLES["middle_low"]
63
+ if index < quarter * 3:
64
+ return _POSITIONAL_FALLBACK_ROLES["middle_high"]
65
+ return _POSITIONAL_FALLBACK_ROLES["late"]
66
+ return _POSITIONAL_FALLBACK_ROLES["default"]
67
+
46
68
 
69
+ def _positional_fallback_energy(role: str) -> float:
70
+ """Static energy map used only when density is unavailable.
47
71
 
48
- def _infer_energy(role: str) -> float:
49
- """Infer energy level from scene role."""
50
- energy_map = {
51
- "intro": 0.2,
72
+ Kept tiny and explicit so the fallback path is obvious — the primary
73
+ source of energy is _composition_engine's density-based value.
74
+ """
75
+ return {
76
+ "intro": 0.3,
52
77
  "verse": 0.4,
53
78
  "build": 0.6,
54
79
  "chorus": 0.7,
@@ -56,23 +81,82 @@ def _infer_energy(role: str) -> float:
56
81
  "breakdown": 0.3,
57
82
  "transition": 0.5,
58
83
  "outro": 0.2,
59
- }
60
- return energy_map.get(role, 0.5)
84
+ }.get(role, 0.5)
61
85
 
62
86
 
63
87
  def _fetch_scene_data(ctx: Context) -> tuple[list[SceneRole], int]:
64
- """Fetch scene info from Ableton and build SceneRole list."""
88
+ """Fetch scene info + composition graph from Ableton and build SceneRole list.
89
+
90
+ BUG-E4 / E5 fix: roles + energies now flow from composition_engine's
91
+ build_section_graph_from_scenes, which uses keyword matching + active-
92
+ track density for energy. Unnamed scenes fall back to the positional
93
+ heuristic. This keeps get_performance_state in sync with
94
+ get_section_graph / analyze_composition.
95
+ """
96
+ from ..tools._composition_engine import (
97
+ build_section_graph_from_scenes,
98
+ SectionNode as CESectionNode,
99
+ )
100
+
65
101
  ableton = ctx.lifespan_context["ableton"]
66
102
 
67
103
  scenes_info = ableton.send_command("get_scenes_info", {})
68
104
  scenes_list = scenes_info.get("scenes", [])
69
105
  scene_count = len(scenes_list)
70
106
 
107
+ # Pull session topology + clip matrix so composition engine can compute
108
+ # active-track density. If any of these fails we fall back to the
109
+ # positional heuristic — preserving the old behavior as a safety net.
110
+ track_count = 0
111
+ clip_matrix: list[list[dict]] = []
112
+ try:
113
+ session_info = ableton.send_command("get_session_info", {})
114
+ track_count = int(session_info.get("track_count", 0))
115
+ except Exception as exc:
116
+ logger.debug("_fetch_scene_data session_info failed: %s", exc)
117
+ try:
118
+ mtx = ableton.send_command("get_scene_matrix", {})
119
+ if isinstance(mtx, dict):
120
+ clip_matrix = mtx.get("matrix", []) or []
121
+ except Exception as exc:
122
+ logger.debug("_fetch_scene_data scene_matrix failed: %s", exc)
123
+
124
+ # Build the composition section graph. Each SectionNode has
125
+ # section_id = f"sec_{raw_enumerate_index:02d}" per BUG-E1 fix, so we
126
+ # can index by scene position directly.
127
+ ce_sections: list[CESectionNode] = []
128
+ try:
129
+ if scenes_list and clip_matrix and track_count > 0:
130
+ ce_sections = build_section_graph_from_scenes(
131
+ scenes_list, clip_matrix, track_count,
132
+ )
133
+ except Exception as exc:
134
+ logger.debug("_fetch_scene_data section graph failed: %s", exc)
135
+
136
+ ce_by_scene_idx: dict[int, CESectionNode] = {}
137
+ for sec in ce_sections:
138
+ # section_id format "sec_02" → scene index 2 (raw enumerate index)
139
+ sid = str(sec.section_id)
140
+ if sid.startswith("sec_"):
141
+ try:
142
+ ce_by_scene_idx[int(sid[4:])] = sec
143
+ except ValueError:
144
+ pass
145
+
71
146
  scene_roles: list[SceneRole] = []
72
147
  for i, scene_data in enumerate(scenes_list):
73
148
  name = scene_data.get("name", f"Scene {i}")
74
- role = _infer_role(name, i, scene_count)
75
- energy = _infer_energy(role)
149
+ ce_sec = ce_by_scene_idx.get(i)
150
+ if ce_sec is not None:
151
+ # SectionType is an enum; .value gives the string vocabulary
152
+ stype = ce_sec.section_type
153
+ role = stype.value if hasattr(stype, "value") else str(stype)
154
+ energy = float(ce_sec.energy)
155
+ else:
156
+ # Unnamed scene or build failed — positional fallback
157
+ role = _positional_fallback_role(i, scene_count)
158
+ energy = _positional_fallback_energy(role)
159
+
76
160
  scene_roles.append(SceneRole(
77
161
  scene_index=i,
78
162
  name=name,
@@ -85,14 +169,13 @@ def _fetch_scene_data(ctx: Context) -> tuple[list[SceneRole], int]:
85
169
  current_scene = 0
86
170
  try:
87
171
  session_info = ableton.send_command("get_session_info", {})
88
- # Check if any scene is marked as triggered/playing
89
172
  session_scenes = session_info.get("scenes", [])
90
173
  for i, s in enumerate(session_scenes):
91
174
  if s.get("is_triggered", False):
92
175
  current_scene = i
93
176
  break
94
177
  except Exception as exc:
95
- logger.debug("_fetch_scene_data failed: %s", exc)
178
+ logger.debug("_fetch_scene_data current_scene failed: %s", exc)
96
179
 
97
180
  return scene_roles, current_scene
98
181
 
@@ -42,11 +42,17 @@ def create_preview_set(
42
42
  available_moves: Optional[list[dict]] = None,
43
43
  song_brain: Optional[dict] = None,
44
44
  taste_graph: Optional[dict] = None,
45
+ kernel: Optional[dict] = None,
45
46
  ) -> PreviewSet:
46
47
  """Create a preview set with variant slots.
47
48
 
48
49
  For creative_triptych, generates 3 variants: safe, strong, unexpected.
49
50
  Each variant gets a move_id from available_moves ranked by novelty.
51
+
52
+ kernel: the live session kernel (track topology + device chains). Compilers
53
+ resolve targets from it — without it, variants degrade into no-ops or
54
+ generic reads. Callers that have a `ctx` should fetch a real kernel
55
+ via runtime.tools.get_session_kernel(ctx).
50
56
  """
51
57
  set_id = _compute_set_id(request_text, kernel_id)
52
58
  now = int(time.time() * 1000)
@@ -56,11 +62,15 @@ def create_preview_set(
56
62
  taste_graph = taste_graph or {}
57
63
 
58
64
  if strategy == "creative_triptych":
59
- variants = _build_triptych(request_text, moves, song_brain, taste_graph, set_id, now)
65
+ variants = _build_triptych(
66
+ request_text, moves, song_brain, taste_graph, set_id, now, kernel,
67
+ )
60
68
  elif strategy == "binary":
61
69
  variants = _build_binary(request_text, moves, song_brain, set_id, now)
62
70
  else:
63
- variants = _build_triptych(request_text, moves, song_brain, taste_graph, set_id, now)
71
+ variants = _build_triptych(
72
+ request_text, moves, song_brain, taste_graph, set_id, now, kernel,
73
+ )
64
74
 
65
75
  ps = PreviewSet(
66
76
  set_id=set_id,
@@ -81,6 +91,7 @@ def _build_triptych(
81
91
  taste_graph: dict,
82
92
  set_id: str,
83
93
  now: int,
94
+ kernel: Optional[dict] = None,
84
95
  ) -> list[PreviewVariant]:
85
96
  """Build safe / strong / unexpected variants."""
86
97
  identity = song_brain.get("identity_core", "")
@@ -114,20 +125,34 @@ def _build_triptych(
114
125
  },
115
126
  ]
116
127
 
128
+ # Normalize kernel for the compiler. If the caller supplied a real kernel
129
+ # use it; otherwise fall back to an empty-but-valid shape so compilers
130
+ # degrade to no-op steps and emit warnings instead of crashing.
131
+ compile_kernel = kernel if kernel else {
132
+ "session_info": {"tempo": 120, "tracks": []},
133
+ "mode": "improve",
134
+ }
135
+
117
136
  variants = []
118
137
  for i, profile in enumerate(profiles):
119
138
  # Pick a move if available
120
139
  move_id = ""
121
140
  compiled_plan = None
122
- if moves and i < len(moves):
123
- move_id = moves[i].get("move_id", "")
141
+ move = moves[i] if moves and i < len(moves) else None
142
+ if move is not None:
143
+ move_id = move.get("move_id", "")
124
144
  # Compile through the semantic compiler — single source of truth
125
145
  from ..wonder_mode.engine import _compile_variant_plan
126
- kernel = {"session_info": {"tempo": 120, "tracks": []}, "mode": "improve"}
127
- compiled_plan = _compile_variant_plan(moves[i], kernel)
146
+ compiled_plan = _compile_variant_plan(move, compile_kernel)
128
147
  # No fallback to plan_template — uncompilable moves stay analytical
129
148
 
130
- variants.append(PreviewVariant(
149
+ # BUG-B44 / B45: populate user-facing description fields and flag
150
+ # variants that lack a compiled_plan as not-executable (so callers
151
+ # don't commit shells).
152
+ description = _describe_variant(move, compiled_plan, profile)
153
+ executable = compiled_plan is not None and bool(move_id)
154
+
155
+ variant = PreviewVariant(
131
156
  variant_id=f"{set_id}_{profile['label']}",
132
157
  label=profile["label"],
133
158
  intent=profile["intent"],
@@ -139,11 +164,67 @@ def _build_triptych(
139
164
  compiled_plan=compiled_plan,
140
165
  taste_fit=_estimate_taste_fit(profile["novelty"], taste_graph),
141
166
  created_at_ms=now,
142
- ))
167
+ what_changed=description["what_changed"],
168
+ summary=description["summary"],
169
+ )
170
+ # Non-executable variants get status='blocked' so callers know to
171
+ # skip preview/commit. Stored as status since executable/blocked_reason
172
+ # aren't modeled yet.
173
+ if not executable:
174
+ variant.status = "blocked"
175
+ variants.append(variant)
143
176
 
144
177
  return variants
145
178
 
146
179
 
180
+ def _describe_variant(
181
+ move: Optional[dict],
182
+ compiled_plan: Optional[dict],
183
+ profile: dict,
184
+ ) -> dict:
185
+ """Build user-facing description fields for a variant (BUG-B45).
186
+
187
+ Priority order:
188
+ 1. Move's `intent` or `description` — the authored one-liner
189
+ 2. Compiled plan's step descriptions joined with " → "
190
+ 3. The profile label + novelty level as a last-resort fallback
191
+
192
+ Returns {"what_changed": str, "summary": str}.
193
+ """
194
+ what_changed = ""
195
+ summary = ""
196
+ if move:
197
+ # Move-level narrative beats plan-level — captures intent, not execution
198
+ move_intent = str(move.get("intent") or move.get("description") or "")
199
+ if move_intent:
200
+ what_changed = move_intent
201
+ summary = move_intent[:120]
202
+
203
+ if not what_changed and compiled_plan:
204
+ steps = compiled_plan.get("steps") or []
205
+ step_descriptions = [
206
+ str(s.get("description") or s.get("summary") or s.get("intent") or "")
207
+ for s in steps
208
+ ]
209
+ step_descriptions = [d for d in step_descriptions if d]
210
+ if step_descriptions:
211
+ what_changed = " → ".join(step_descriptions[:4])
212
+ summary = (
213
+ step_descriptions[0][:120]
214
+ if step_descriptions else ""
215
+ )
216
+
217
+ if not what_changed:
218
+ # Final fallback — describe the profile so the UI has something
219
+ what_changed = (
220
+ f"{profile['label'].title()} variant at novelty "
221
+ f"{profile['novelty']:.1f} (no executable plan available)"
222
+ )
223
+ summary = what_changed
224
+
225
+ return {"what_changed": what_changed, "summary": summary}
226
+
227
+
147
228
  def _build_binary(
148
229
  request_text: str,
149
230
  moves: list[dict],
@@ -182,6 +182,16 @@ def create_preview_set(
182
182
  except Exception as exc:
183
183
  logger.debug("create_preview_set failed: %s", exc)
184
184
 
185
+ # Fetch a real session kernel so compilers resolve targets from the live
186
+ # set instead of an empty placeholder. Degrades gracefully when Ableton
187
+ # is unreachable (unit tests, no-connection environments).
188
+ live_kernel: dict = {}
189
+ try:
190
+ from ..runtime.tools import get_session_kernel
191
+ live_kernel = get_session_kernel(ctx, request_text=request_text) or {}
192
+ except Exception as exc:
193
+ logger.debug("create_preview_set: could not fetch session kernel: %s", exc)
194
+
185
195
  ps = engine.create_preview_set(
186
196
  request_text=request_text,
187
197
  kernel_id=kernel_id,
@@ -189,6 +199,7 @@ def create_preview_set(
189
199
  available_moves=available_moves,
190
200
  song_brain=song_brain,
191
201
  taste_graph=taste_graph,
202
+ kernel=live_kernel,
192
203
  )
193
204
 
194
205
  return ps.to_dict()
@@ -436,7 +447,12 @@ async def render_preview_variant(
436
447
  plan = variant.compiled_plan
437
448
  steps = plan if isinstance(plan, list) else plan.get("steps", [])
438
449
 
439
- from ..runtime.execution_router import execute_plan_steps_async
450
+ from ..runtime.execution_router import execute_plan_steps_async, filter_apply_steps
451
+
452
+ # Read-only verification steps (meters/spectrum/info) don't create undo
453
+ # points in Ableton — counting them and then undoing walks back earlier
454
+ # user edits. Separate writes from reads before the apply pass.
455
+ apply_steps = filter_apply_steps(steps)
440
456
 
441
457
  applied_count = 0
442
458
  playback_started = False
@@ -453,16 +469,16 @@ async def render_preview_variant(
453
469
  # ── 1. Capture BEFORE metadata ──
454
470
  before_info = ableton.send_command("get_session_info", {}) or {}
455
471
 
456
- # ── 2. Apply the variant ──
472
+ # ── 2. Apply the variant (write steps only) ──
457
473
  exec_results = await execute_plan_steps_async(
458
- steps,
474
+ apply_steps,
459
475
  ableton=ableton,
460
476
  bridge=bridge,
461
477
  mcp_registry=mcp_registry,
462
478
  ctx=ctx,
463
479
  )
464
480
  applied_count = sum(1 for r in exec_results if r.ok)
465
- if applied_count == 0 and steps:
481
+ if applied_count == 0 and apply_steps:
466
482
  return {
467
483
  "error": "Variant failed to apply any steps",
468
484
  "variant_id": variant_id,
@@ -489,9 +505,9 @@ async def render_preview_variant(
489
505
  ableton.send_command("start_playback", {})
490
506
  playback_started = True
491
507
 
492
- import time as _time
508
+ import asyncio as _asyncio
493
509
 
494
- _time.sleep(play_seconds)
510
+ await _asyncio.sleep(play_seconds)
495
511
 
496
512
  spectral_after = cache.get_all()
497
513