livepilot 1.10.0 → 1.10.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/.claude-plugin/marketplace.json +3 -3
  2. package/AGENTS.md +3 -3
  3. package/CHANGELOG.md +214 -0
  4. package/CONTRIBUTING.md +2 -2
  5. package/LICENSE +62 -21
  6. package/README.md +264 -286
  7. package/livepilot/.Codex-plugin/plugin.json +2 -2
  8. package/livepilot/.claude-plugin/plugin.json +2 -2
  9. package/livepilot/skills/livepilot-arrangement/SKILL.md +18 -1
  10. package/livepilot/skills/livepilot-core/SKILL.md +5 -5
  11. package/livepilot/skills/livepilot-core/references/overview.md +3 -3
  12. package/livepilot/skills/livepilot-devices/SKILL.md +23 -2
  13. package/livepilot/skills/livepilot-evaluation/references/capability-modes.md +1 -1
  14. package/livepilot/skills/livepilot-release/SKILL.md +21 -17
  15. package/livepilot/skills/livepilot-sample-engine/SKILL.md +2 -1
  16. package/livepilot/skills/livepilot-wonder/SKILL.md +8 -6
  17. package/livepilot.mcpb +0 -0
  18. package/m4l_device/LivePilot_Analyzer.adv +0 -0
  19. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  20. package/m4l_device/livepilot_bridge.js +1 -1
  21. package/manifest.json +4 -4
  22. package/mcp_server/__init__.py +1 -1
  23. package/mcp_server/composer/engine.py +249 -169
  24. package/mcp_server/composer/sample_resolver.py +153 -0
  25. package/mcp_server/composer/tools.py +97 -87
  26. package/mcp_server/memory/taste_accessors.py +47 -0
  27. package/mcp_server/preview_studio/engine.py +9 -2
  28. package/mcp_server/preview_studio/tools.py +78 -35
  29. package/mcp_server/project_brain/tools.py +34 -0
  30. package/mcp_server/runtime/execution_router.py +180 -38
  31. package/mcp_server/runtime/mcp_dispatch.py +46 -0
  32. package/mcp_server/runtime/remote_commands.py +4 -1
  33. package/mcp_server/runtime/tools.py +55 -32
  34. package/mcp_server/sample_engine/moves.py +12 -12
  35. package/mcp_server/sample_engine/slice_workflow.py +190 -0
  36. package/mcp_server/sample_engine/tools.py +104 -1
  37. package/mcp_server/semantic_moves/device_creation_moves.py +7 -7
  38. package/mcp_server/semantic_moves/mix_moves.py +8 -8
  39. package/mcp_server/semantic_moves/models.py +7 -7
  40. package/mcp_server/semantic_moves/performance_moves.py +4 -4
  41. package/mcp_server/semantic_moves/sample_compilers.py +14 -9
  42. package/mcp_server/semantic_moves/sound_design_moves.py +4 -4
  43. package/mcp_server/semantic_moves/tools.py +63 -10
  44. package/mcp_server/semantic_moves/transition_moves.py +4 -4
  45. package/mcp_server/server.py +20 -1
  46. package/mcp_server/session_continuity/tracker.py +4 -1
  47. package/mcp_server/tools/_conductor.py +16 -0
  48. package/mcp_server/tools/_planner_engine.py +24 -0
  49. package/mcp_server/tools/analyzer.py +2 -0
  50. package/mcp_server/tools/planner.py +3 -0
  51. package/mcp_server/wonder_mode/engine.py +59 -13
  52. package/mcp_server/wonder_mode/tools.py +33 -1
  53. package/package.json +8 -8
  54. package/remote_script/LivePilot/__init__.py +1 -1
  55. package/remote_script/LivePilot/devices.py +10 -0
@@ -27,13 +27,15 @@ def list_semantic_moves(
27
27
  Semantic moves express WHAT to achieve musically, not HOW parametrically.
28
28
  Each move compiles into a sequence of existing deterministic tools.
29
29
 
30
- domain: filter by family (mix, arrangement, transition, sound_design, performance)
30
+ domain: filter by family (e.g. mix, arrangement, transition, sound_design, sample, performance)
31
31
  style: filter by genre/style (reserved for future use)
32
32
 
33
33
  Returns: list of moves with move_id, family, intent, targets, risk_level.
34
34
  """
35
35
  moves = registry.list_moves(domain=domain, style=style)
36
- return {"moves": moves, "count": len(moves), "available_domains": ["mix", "arrangement"]}
36
+ all_moves = registry.list_moves()
37
+ domains = sorted({m.get("family", "") for m in all_moves if m.get("family")})
38
+ return {"moves": moves, "count": len(moves), "available_domains": domains}
37
39
 
38
40
 
39
41
  @mcp.tool()
@@ -43,9 +45,13 @@ def preview_semantic_move(
43
45
  ) -> dict:
44
46
  """Preview what a semantic move will do before applying it.
45
47
 
46
- Returns the full compile plan (tool sequence), verification plan,
47
- targets, protection constraints, and risk level. Use this to understand
48
- the impact before committing.
48
+ Returns the static plan_template + verification_plans, PLUS an additive
49
+ compiled_plan field built by compiling the move against a lightweight
50
+ kernel of the current session. Use compiled_plan to inspect the concrete
51
+ tool calls the move would emit right now; use plan_template to understand
52
+ the move's shape independent of session state.
53
+
54
+ Existing callers reading plan_template are unaffected by the addition.
49
55
  """
50
56
  move = registry.get_move(move_id)
51
57
  if not move:
@@ -55,7 +61,46 @@ def preview_semantic_move(
55
61
  "available_moves": available,
56
62
  }
57
63
 
58
- return move.to_full_dict()
64
+ result = move.to_full_dict()
65
+
66
+ # Additive: compile against a lightweight kernel so callers get an
67
+ # executable representation alongside the static plan_template.
68
+ try:
69
+ from ..runtime.session_kernel import build_session_kernel
70
+ from ..runtime.capability_state import build_capability_state
71
+ from . import compiler as move_compiler
72
+
73
+ ableton = None
74
+ if hasattr(ctx, "lifespan_context"):
75
+ ableton = ctx.lifespan_context.get("ableton")
76
+
77
+ session_info: dict = {}
78
+ if ableton is not None:
79
+ try:
80
+ info = ableton.send_command("get_session_info")
81
+ if isinstance(info, dict):
82
+ session_info = info
83
+ except Exception:
84
+ session_info = {}
85
+
86
+ state = build_capability_state(
87
+ session_ok=bool(session_info),
88
+ analyzer_ok=False,
89
+ memory_ok=True,
90
+ )
91
+ kernel = build_session_kernel(
92
+ session_info=session_info,
93
+ capability_state=state.to_dict(),
94
+ )
95
+ plan = move_compiler.compile(move, kernel.to_dict())
96
+ result["compiled_plan"] = plan.to_dict()
97
+ result["compiled_plan_executable"] = bool(plan.executable)
98
+ except Exception as e:
99
+ result["compiled_plan"] = None
100
+ result["compiled_plan_executable"] = False
101
+ result["compiled_plan_error"] = str(e)
102
+
103
+ return result
59
104
 
60
105
 
61
106
  @mcp.tool()
@@ -124,7 +169,7 @@ def propose_next_best_move(
124
169
 
125
170
 
126
171
  @mcp.tool()
127
- def apply_semantic_move(
172
+ async def apply_semantic_move(
128
173
  ctx: Context,
129
174
  move_id: str,
130
175
  mode: str = "improve",
@@ -177,14 +222,22 @@ def apply_semantic_move(
177
222
  result["note"] = "Awaiting approval — present the plan to the user, then execute steps individually"
178
223
  return result
179
224
 
180
- # explore mode — execute through unified router
181
- from ..runtime.execution_router import execute_plan_steps
225
+ # explore mode — execute through the async router
226
+ from ..runtime.execution_router import execute_plan_steps_async
182
227
 
183
228
  step_dicts = [
184
229
  {"tool": step.tool, "params": step.params, "description": step.description}
185
230
  for step in plan.steps
186
231
  ]
187
- exec_results = execute_plan_steps(step_dicts, ableton=ableton, ctx=ctx)
232
+ bridge = ctx.lifespan_context.get("m4l")
233
+ mcp_registry = ctx.lifespan_context.get("mcp_dispatch", {})
234
+ exec_results = await execute_plan_steps_async(
235
+ step_dicts,
236
+ ableton=ableton,
237
+ bridge=bridge,
238
+ mcp_registry=mcp_registry,
239
+ ctx=ctx,
240
+ )
188
241
 
189
242
  executed_steps = []
190
243
  for i, er in enumerate(exec_results):
@@ -10,7 +10,7 @@ INCREASE_FORWARD_MOTION = SemanticMove(
10
10
  targets={"motion": 0.5, "energy": 0.3, "tension": 0.2},
11
11
  protect={"clarity": 0.6},
12
12
  risk_level="low",
13
- compile_plan=[
13
+ plan_template=[
14
14
  {"tool": "apply_automation_shape", "params": {"curve_type": "exponential", "description": "Rising filter cutoff over 4 bars"}, "description": "Rising filter sweep", "backend": "mcp_tool"},
15
15
  {"tool": "set_track_volume", "params": {"description": "Push rhythm elements +5-8%"}, "description": "Push rhythm forward", "backend": "remote_command"},
16
16
  {"tool": "apply_automation_shape", "params": {"curve_type": "linear", "description": "Rising reverb send for anticipation"}, "description": "Build reverb wash", "backend": "mcp_tool"},
@@ -27,7 +27,7 @@ OPEN_CHORUS = SemanticMove(
27
27
  targets={"energy": 0.4, "width": 0.3, "contrast": 0.3},
28
28
  protect={"clarity": 0.6, "cohesion": 0.5},
29
29
  risk_level="medium",
30
- compile_plan=[
30
+ plan_template=[
31
31
  {"tool": "set_track_volume", "params": {"description": "Push all melodic tracks +10-15%"}, "description": "Push chorus energy", "backend": "remote_command"},
32
32
  {"tool": "set_track_pan", "params": {"description": "Widen stereo field on chords/pads"}, "description": "Widen stereo", "backend": "remote_command"},
33
33
  {"tool": "set_track_send", "params": {"description": "Increase reverb/delay sends for spaciousness"}, "description": "Add space", "backend": "remote_command"},
@@ -45,7 +45,7 @@ CREATE_BREAKDOWN = SemanticMove(
45
45
  targets={"contrast": 0.5, "depth": 0.3, "clarity": 0.2},
46
46
  protect={"cohesion": 0.5},
47
47
  risk_level="medium",
48
- compile_plan=[
48
+ plan_template=[
49
49
  {"tool": "set_track_volume", "params": {"description": "Pull drums to 20-30%"}, "description": "Strip drums", "backend": "remote_command"},
50
50
  {"tool": "set_track_volume", "params": {"description": "Pull bass to 30-40%"}, "description": "Reduce bass", "backend": "remote_command"},
51
51
  {"tool": "set_track_send", "params": {"description": "Increase reverb send on remaining elements"}, "description": "Add reverb depth", "backend": "remote_command"},
@@ -62,7 +62,7 @@ BRIDGE_SECTIONS = SemanticMove(
62
62
  targets={"motion": 0.4, "contrast": 0.3, "cohesion": 0.3},
63
63
  protect={"clarity": 0.6},
64
64
  risk_level="low",
65
- compile_plan=[
65
+ plan_template=[
66
66
  {"tool": "apply_automation_shape", "params": {"curve_type": "cosine", "description": "Gentle filter sweep across bridge"}, "description": "Bridge filter motion", "backend": "mcp_tool"},
67
67
  {"tool": "set_track_volume", "params": {"description": "Gentle volume crossfade between section elements"}, "description": "Crossfade elements", "backend": "remote_command"},
68
68
  ],
@@ -76,11 +76,24 @@ async def _warm_analyzer_bridge(
76
76
 
77
77
  @asynccontextmanager
78
78
  async def lifespan(server):
79
- """Create and yield the shared AbletonConnection + M4L bridge."""
79
+ """Create and yield the shared AbletonConnection + M4L bridge + registries."""
80
+ from .runtime.mcp_dispatch import build_mcp_dispatch_registry
81
+ from .splice_client.client import SpliceGRPCClient
82
+
80
83
  ableton = AbletonConnection()
81
84
  spectral = SpectralCache()
82
85
  receiver = SpectralReceiver(spectral)
83
86
  m4l = M4LBridge(spectral, receiver)
87
+ mcp_dispatch = build_mcp_dispatch_registry()
88
+
89
+ # Splice gRPC client — graceful degradation if Splice desktop isn't
90
+ # running or grpcio isn't installed. .connected will be False in that
91
+ # case and sample_resolver treats it as "no splice hits".
92
+ splice_client = SpliceGRPCClient()
93
+ try:
94
+ await splice_client.connect()
95
+ except Exception:
96
+ pass # client remains in disconnected state
84
97
 
85
98
  # Start UDP listener for incoming M4L spectral data (port 9880)
86
99
  loop = asyncio.get_running_loop()
@@ -121,12 +134,18 @@ async def lifespan(server):
121
134
  "spectral": spectral,
122
135
  "m4l": m4l,
123
136
  "_bridge_state": bridge_state,
137
+ "mcp_dispatch": mcp_dispatch,
138
+ "splice_client": splice_client,
124
139
  }
125
140
  finally:
126
141
  if bridge_state["transport"]:
127
142
  bridge_state["transport"].close()
128
143
  m4l.close()
129
144
  ableton.disconnect()
145
+ try:
146
+ await splice_client.disconnect()
147
+ except Exception:
148
+ pass
130
149
 
131
150
 
132
151
  mcp = FastMCP("LivePilot", lifespan=lifespan)
@@ -210,7 +210,10 @@ def rank_by_taste_and_identity(
210
210
  identity_effect = candidate.get("identity_effect", "preserves")
211
211
 
212
212
  # Taste score — how well does this fit cross-session preferences?
213
- boldness_pref = taste_graph.get("transition_boldness", 0.5)
213
+ # Routed through the canonical accessor so dimension_weights.transition_boldness
214
+ # is honored. Previously read the top-level key directly and always got 0.5.
215
+ from ..memory.taste_accessors import get_dimension_pref
216
+ boldness_pref = get_dimension_pref(taste_graph, "transition_boldness", default=0.5)
214
217
  taste_score = 1.0 - abs(novelty - boldness_pref) * 0.8
215
218
  taste_score = round(max(0.0, min(1.0, taste_score)), 3)
216
219
 
@@ -110,6 +110,12 @@ _ROUTING_PATTERNS: list[tuple[str, str, str, str, list[str]]] = [
110
110
  # Research requests
111
111
  (r"research|how.?to|technique|tutorial|learn", "research", "research", "research_technique", []),
112
112
  (r"style.?tactic|production.?style|genre.?approach", "research", "research", "get_style_tactics", []),
113
+
114
+ # Sample requests
115
+ (r"sample|splice|loop|chop|flip|break(?:beat)?|one.?shot", "sample_engine", "sample", "search_samples", ["analyze_sample", "plan_sample_workflow"]),
116
+ (r"slice|transient.?hit|slice.?mode", "sample_engine", "sample", "plan_slice_workflow", ["search_samples"]),
117
+ (r"vocal.?sample|foley|field.?record|found.?sound", "sample_engine", "sample", "search_samples", ["analyze_sample"]),
118
+ (r"texture.?sample|ambient.?sample|atmo.?sample", "sample_engine", "sample", "search_samples", ["suggest_sample_technique"]),
113
119
  ]
114
120
 
115
121
 
@@ -164,6 +170,16 @@ def _infer_workflow_mode(request_lower: str) -> str:
164
170
  if re.search(r"fix|quick|just|only|undo|revert|simple", request_lower):
165
171
  return "quick_fix"
166
172
 
173
+ # Slice workflow
174
+ if re.search(r"slice|chop|transient.?hit", request_lower):
175
+ return "slice_workflow"
176
+
177
+ # Sample workflows
178
+ if re.search(r"sample|splice|foley|found.?sound|one.?shot|break(?:beat)?|flip|loop", request_lower):
179
+ if re.search(r"arrange|section|verse|chorus|drop|bridge|hook", request_lower):
180
+ return "sample_plus_arrangement"
181
+ return "sample_discovery"
182
+
167
183
  # Agentic loop keywords (full autonomous)
168
184
  if re.search(r"autonomous|auto|full|everything|deep|polish|finish", request_lower):
169
185
  return "agentic_loop"
@@ -164,6 +164,8 @@ class SectionPlan:
164
164
  tracks_entering: list[int] # new elements introduced in this section
165
165
  tracks_exiting: list[int] # elements removed in this section
166
166
 
167
+ sample_hints: list[str] = field(default_factory=list)
168
+
167
169
  def length_bars(self) -> int:
168
170
  return self.end_bar - self.start_bar
169
171
 
@@ -196,6 +198,28 @@ class ArrangementPlan:
196
198
  }
197
199
 
198
200
 
201
+ # ── Section Sample Hints ─────────────────────────────────────────────
202
+
203
+ _SECTION_SAMPLE_DEFAULTS: dict[str, list[str]] = {
204
+ "intro": ["texture_bed", "fill_one_shot"],
205
+ "verse": ["texture_bed", "fill_one_shot"],
206
+ "pre_chorus": ["transition_fx", "texture_bed"],
207
+ "chorus": ["hook_sample", "break_layer", "fill_one_shot"],
208
+ "drop": ["hook_sample", "break_layer", "fill_one_shot"],
209
+ "build": ["transition_fx", "texture_bed"],
210
+ "bridge": ["texture_bed", "transition_fx"],
211
+ "breakdown": ["texture_bed"],
212
+ "outro": ["texture_bed", "fill_one_shot"],
213
+ }
214
+
215
+
216
+ def add_sample_hints(plan: "ArrangementPlan") -> None:
217
+ """Populate sample_hints on each section based on section type."""
218
+ for section in plan.sections:
219
+ section_key = section.section_type.value.lower()
220
+ section.sample_hints = _SECTION_SAMPLE_DEFAULTS.get(section_key, ["texture_bed"])
221
+
222
+
199
223
  # ── Core Planner ─────────────────────────────────────────────────────
200
224
 
201
225
  def plan_arrangement_from_loop(
@@ -381,6 +381,8 @@ async def load_sample_to_simpler(
381
381
  return {"error": "Sample replacement failed after bootstrap"}
382
382
 
383
383
  result["method"] = "bootstrap_and_replace"
384
+ result["device_index"] = actual_device_index # additive — for step-result binding
385
+ result["track_index"] = track_index
384
386
  return result
385
387
 
386
388
 
@@ -88,6 +88,9 @@ def plan_arrangement(
88
88
  style=style,
89
89
  )
90
90
 
91
+ # Add section-level sample role hints
92
+ planner_engine.add_sample_hints(plan)
93
+
91
94
  result = plan.to_dict()
92
95
  result["loop_identity"] = loop_identity.to_dict()
93
96
  result["available_styles"] = sorted(planner_engine.VALID_STYLES)
@@ -25,11 +25,11 @@ def discover_moves(
25
25
  """Find semantic moves relevant to the request.
26
26
 
27
27
  Uses keyword scoring + optional taste reranking + constraint filtering.
28
- Returns full move dicts including compile_plan (via registry.get_move).
28
+ Returns full move dicts including plan_template (via registry.get_move).
29
29
  """
30
30
  from ..semantic_moves import registry
31
31
 
32
- all_moves = registry.list_moves() # returns to_dict() — no compile_plan
32
+ all_moves = registry.list_moves() # returns to_dict() — no plan_template
33
33
  if not all_moves:
34
34
  return []
35
35
 
@@ -77,7 +77,7 @@ def discover_moves(
77
77
 
78
78
  scored.sort(key=lambda x: -x[1])
79
79
 
80
- # Enrich with full compile_plan via get_move()
80
+ # Enrich with full plan_template via get_move()
81
81
  result = []
82
82
  for move_dict, score in scored:
83
83
  full_move = registry.get_move(move_dict["move_id"])
@@ -98,7 +98,7 @@ def discover_moves(
98
98
  for move in result:
99
99
  plan = {"steps": [
100
100
  {"action": step.get("tool", ""), **step}
101
- for step in (move.get("compile_plan") or [])
101
+ for step in (move.get("plan_template") or [])
102
102
  ]}
103
103
  validation = validate_plan_against_constraints(plan, active_constraints)
104
104
  if validation["valid"]:
@@ -137,9 +137,9 @@ def _with_envelope(move: dict, tier: str) -> dict:
137
137
  # ── Distinctness selection ───────────────────────────────────────
138
138
 
139
139
 
140
- def _compile_plan_shape(move: dict) -> frozenset[str]:
141
- """Extract the set of tool names from a move's compile_plan."""
142
- plan = move.get("compile_plan") or []
140
+ def _plan_template_shape(move: dict) -> frozenset[str]:
141
+ """Extract the set of tool names from a move's plan_template."""
142
+ plan = move.get("plan_template") or []
143
143
  return frozenset(step.get("tool", "") for step in plan if step.get("tool"))
144
144
 
145
145
 
@@ -147,7 +147,7 @@ def select_distinct_variants(scored_moves: list[dict]) -> list[dict]:
147
147
  """Select genuinely distinct moves for variant generation.
148
148
 
149
149
  Each selected move must differ from all previously selected moves by
150
- at least one of: move_id, family, or compile_plan shape.
150
+ at least one of: move_id, family, or plan_template shape.
151
151
  Returns 0-3 moves.
152
152
  """
153
153
  if not scored_moves:
@@ -160,7 +160,7 @@ def select_distinct_variants(scored_moves: list[dict]) -> list[dict]:
160
160
  for move in scored_moves:
161
161
  mid = move.get("move_id", "")
162
162
  family = move.get("family", "")
163
- shape = _compile_plan_shape(move)
163
+ shape = _plan_template_shape(move)
164
164
 
165
165
  # Skip duplicate move_ids
166
166
  if mid in used_ids:
@@ -190,14 +190,45 @@ _NOVELTY_LEVELS = {"safe": 0.25, "strong": 0.55, "unexpected": 0.85}
190
190
  _RISK_TO_EFFECT = {"low": "preserves", "medium": "evolves", "high": "contrasts"}
191
191
 
192
192
 
193
+ def _compile_variant_plan(move_dict: dict, kernel: dict | None) -> dict | None:
194
+ """Compile a move through the semantic compiler if possible.
195
+
196
+ Returns CompiledPlan.to_dict() or None if no compiler is registered.
197
+ """
198
+ if kernel is None:
199
+ return None
200
+
201
+ move_id = move_dict.get("move_id", "")
202
+ from ..semantic_moves.compiler import compile as sem_compile, _COMPILERS
203
+ from ..semantic_moves import registry
204
+
205
+ if move_id not in _COMPILERS:
206
+ return None
207
+
208
+ move_obj = registry.get_move(move_id)
209
+ if move_obj is None:
210
+ return None
211
+
212
+ try:
213
+ plan = sem_compile(move_obj, kernel)
214
+ return plan.to_dict()
215
+ except Exception:
216
+ return None
217
+
218
+
193
219
  def build_variant(
194
220
  label: str,
195
221
  move_dict: dict,
196
222
  song_brain: Optional[dict] = None,
197
223
  novelty_level: float = 0.5,
198
224
  variant_id: str = "",
225
+ kernel: dict | None = None,
199
226
  ) -> dict:
200
- """Build a variant dict from a real move + SongBrain context."""
227
+ """Build a variant dict from a real move + SongBrain context.
228
+
229
+ If kernel is provided, compiles the move through the semantic compiler
230
+ for an executable plan. Otherwise falls back to plan_template metadata.
231
+ """
201
232
  song_brain = song_brain or {}
202
233
  targets = move_dict.get("targets", {})
203
234
  protect = move_dict.get("protect", {})
@@ -226,6 +257,10 @@ def build_variant(
226
257
  if sacred and identity_effect == "preserves":
227
258
  why += f". Preserves {sacred[0].get('description', 'sacred elements')}"
228
259
 
260
+ # Compile through semantic compiler if kernel available
261
+ compiled = _compile_variant_plan(move_dict, kernel)
262
+ analytical = compiled is None
263
+
229
264
  return {
230
265
  "variant_id": variant_id,
231
266
  "label": label,
@@ -239,11 +274,11 @@ def build_variant(
239
274
  "novelty_level": novelty_level,
240
275
  "taste_fit": 0.5,
241
276
  "targets_snapshot": dict(targets),
242
- "compiled_plan": move_dict.get("compile_plan"),
277
+ "compiled_plan": compiled,
243
278
  "score": 0.0,
244
279
  "rank": 0,
245
280
  "score_breakdown": {},
246
- "analytical_only": False,
281
+ "analytical_only": analytical,
247
282
  "distinctness_reason": "",
248
283
  }
249
284
 
@@ -479,6 +514,8 @@ def generate_wonder_variants(
479
514
  song_brain: dict | None = None,
480
515
  taste_graph: object = None,
481
516
  active_constraints: object = None,
517
+ session_info: dict | None = None,
518
+ sample_context: dict | None = None,
482
519
  ) -> dict:
483
520
  """Full wonder mode pipeline: discover -> select distinct -> build -> taste -> rank."""
484
521
  song_brain = song_brain or {}
@@ -495,6 +532,14 @@ def generate_wonder_variants(
495
532
  # Load corpus intelligence for variant enrichment
496
533
  corpus_hints = _get_corpus_hints(request_text, diagnosis)
497
534
 
535
+ # Build kernel for variant compilation
536
+ kernel = {
537
+ "session_info": session_info or {},
538
+ "mode": "improve",
539
+ }
540
+ if sample_context:
541
+ kernel.update(sample_context)
542
+
498
543
  # Build executable variants from distinct moves
499
544
  for i, move in enumerate(distinct):
500
545
  label = labels[i]
@@ -505,6 +550,7 @@ def generate_wonder_variants(
505
550
  song_brain=song_brain,
506
551
  novelty_level=_NOVELTY_LEVELS.get(label, 0.5),
507
552
  variant_id=f"{set_prefix}_{label}",
553
+ kernel=kernel,
508
554
  )
509
555
  if taste_graph is not None:
510
556
  # Score taste on envelope-adjusted move for consistency with targets_snapshot
@@ -567,7 +613,7 @@ def _explain_distinctness(move: dict, all_moves: list[dict], index: int) -> str:
567
613
 
568
614
  if family not in other_families:
569
615
  return f"Different family: {family}"
570
- shape = _compile_plan_shape(move)
616
+ shape = _plan_template_shape(move)
571
617
  return f"Different approach: {', '.join(sorted(shape))}"
572
618
 
573
619
 
@@ -126,14 +126,46 @@ def enter_wonder_mode(
126
126
  action_ledger=action_ledger,
127
127
  )
128
128
 
129
+ # 1b. If diagnosis includes sample domains, search for candidates
130
+ sample_context = {}
131
+ diag_dict = diagnosis.to_dict()
132
+ candidate_domains = diag_dict.get("candidate_domains") or []
133
+ if "sample" in candidate_domains:
134
+ try:
135
+ from ..sample_engine.tools import get_sample_opportunities, search_samples
136
+ opportunities = get_sample_opportunities(ctx)
137
+ if opportunities.get("opportunities"):
138
+ opp = opportunities["opportunities"][0]
139
+ query = opp.get("search_query", opp.get("description", "sample"))
140
+ results = search_samples(ctx, query=query, max_results=3)
141
+ candidates = results.get("results", [])
142
+ if candidates:
143
+ best = candidates[0]
144
+ sample_context["sample_file_path"] = best.get("file_path", "")
145
+ sample_context["sample_name"] = best.get("name", "")
146
+ sample_context["material_type"] = best.get("material_type", "")
147
+ except Exception:
148
+ pass # Graceful degradation — analytical variants still work
149
+
150
+ # 1c. Get session info for kernel
151
+ session_info = {}
152
+ try:
153
+ ableton = ctx.lifespan_context.get("ableton")
154
+ if ableton:
155
+ session_info = ableton.send_command("get_session_info", {})
156
+ except Exception:
157
+ pass
158
+
129
159
  # 2. Generate variants
130
160
  result = engine.generate_wonder_variants(
131
161
  request_text=request_text,
132
- diagnosis=diagnosis.to_dict(),
162
+ diagnosis=diag_dict,
133
163
  kernel_id=kernel_id,
134
164
  song_brain=song_brain,
135
165
  taste_graph=taste_graph,
136
166
  active_constraints=active_constraints,
167
+ session_info=session_info,
168
+ sample_context=sample_context,
137
169
  )
138
170
 
139
171
  # 3. Create WonderSession (unique per invocation, not deterministic)
package/package.json CHANGED
@@ -1,10 +1,10 @@
1
1
  {
2
2
  "name": "livepilot",
3
- "version": "1.10.0",
3
+ "version": "1.10.2",
4
4
  "mcpName": "io.github.dreamrec/livepilot",
5
- "description": "Agentic production system for Ableton Live 12 — 316 tools, 43 domains, device atlas, spectral perception, technique memory, neo-Riemannian harmony, Euclidean rhythm, species counterpoint, MIDI I/O",
5
+ "description": "Agentic production system for Ableton Live 12 — 317 tools, 43 domains. Device atlas (1305 devices), sample engine (Splice + browser + filesystem), auto-composition, spectral perception, technique memory, creative intelligence (12 engines)",
6
6
  "author": "Pilot Studio",
7
- "license": "MIT",
7
+ "license": "BSL-1.1",
8
8
  "type": "commonjs",
9
9
  "bin": {
10
10
  "livepilot": "./bin/livepilot.js"
@@ -18,10 +18,6 @@
18
18
  "url": "https://github.com/dreamrec/LivePilot/issues"
19
19
  },
20
20
  "funding": [
21
- {
22
- "type": "patreon",
23
- "url": "https://www.patreon.com/c/dreamrec"
24
- },
25
21
  {
26
22
  "type": "github",
27
23
  "url": "https://github.com/sponsors/dreamrec"
@@ -39,7 +35,11 @@
39
35
  "ai",
40
36
  "sound-design",
41
37
  "mixing",
42
- "arrangement"
38
+ "arrangement",
39
+ "splice",
40
+ "sample-engine",
41
+ "auto-composition",
42
+ "device-atlas"
43
43
  ],
44
44
  "engines": {
45
45
  "node": ">=18.0.0"
@@ -5,7 +5,7 @@ Entry point for the ControlSurface. Ableton calls create_instance(c_instance)
5
5
  when this script is selected in Preferences > Link, Tempo & MIDI.
6
6
  """
7
7
 
8
- __version__ = "1.10.0"
8
+ __version__ = "1.10.2"
9
9
 
10
10
  from _Framework.ControlSurface import ControlSurface
11
11
  from .server import LivePilotServer
@@ -508,21 +508,31 @@ def insert_device(song, params):
508
508
  device = chain.insert_device(canonical, position)
509
509
  else:
510
510
  device = chain.insert_device(canonical)
511
+ container_devices = list(chain.devices)
511
512
  else:
512
513
  # Track-level insertion
513
514
  if position >= 0:
514
515
  device = track.insert_device(canonical, position)
515
516
  else:
516
517
  device = track.insert_device(canonical)
518
+ container_devices = list(track.devices)
517
519
  finally:
518
520
  song.end_undo_step()
519
521
 
522
+ # Resolve the index the newly-inserted device landed at so callers can
523
+ # bind later parameter/chain operations to it (composer plans rely on this).
524
+ try:
525
+ inserted_index = container_devices.index(device)
526
+ except ValueError:
527
+ inserted_index = len(container_devices) - 1
528
+
520
529
  # Read back the device info — use "loaded" key to match
521
530
  # the convention expected by _postflight_loaded_device on MCP side
522
531
  result = {
523
532
  "loaded": device.name,
524
533
  "class_name": device.class_name,
525
534
  "track_index": track_index,
535
+ "device_index": inserted_index, # additive — for step-result binding
526
536
  "parameter_count": len(list(device.parameters)),
527
537
  }
528
538
  if position >= 0: