livepilot 1.10.7 → 1.10.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. package/CHANGELOG.md +126 -0
  2. package/README.md +11 -9
  3. package/bin/livepilot.js +146 -28
  4. package/installer/install.js +117 -11
  5. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  6. package/m4l_device/livepilot_bridge.js +1 -1
  7. package/mcp_server/__init__.py +1 -1
  8. package/mcp_server/atlas/__init__.py +39 -7
  9. package/mcp_server/atlas/tools.py +56 -15
  10. package/mcp_server/composer/layer_planner.py +27 -0
  11. package/mcp_server/composer/prompt_parser.py +15 -6
  12. package/mcp_server/connection.py +11 -3
  13. package/mcp_server/corpus/__init__.py +14 -4
  14. package/mcp_server/m4l_bridge.py +48 -7
  15. package/mcp_server/runtime/execution_router.py +16 -2
  16. package/mcp_server/runtime/remote_commands.py +6 -0
  17. package/mcp_server/sample_engine/models.py +22 -3
  18. package/mcp_server/semantic_moves/__init__.py +1 -0
  19. package/mcp_server/semantic_moves/compiler.py +9 -1
  20. package/mcp_server/semantic_moves/device_creation_compilers.py +47 -0
  21. package/mcp_server/semantic_moves/mix_compilers.py +170 -0
  22. package/mcp_server/semantic_moves/mix_moves.py +1 -1
  23. package/mcp_server/semantic_moves/models.py +5 -0
  24. package/mcp_server/semantic_moves/tools.py +15 -4
  25. package/mcp_server/server.py +7 -3
  26. package/mcp_server/services/singletons.py +68 -0
  27. package/mcp_server/splice_client/client.py +29 -8
  28. package/mcp_server/tools/analyzer.py +7 -6
  29. package/mcp_server/tools/clips.py +1 -1
  30. package/mcp_server/tools/midi_io.py +10 -0
  31. package/mcp_server/tools/tracks.py +1 -1
  32. package/mcp_server/tools/transport.py +1 -1
  33. package/mcp_server/translation_engine/tools.py +8 -4
  34. package/package.json +25 -3
  35. package/remote_script/LivePilot/__init__.py +29 -9
  36. package/remote_script/LivePilot/arrangement.py +12 -2
  37. package/remote_script/LivePilot/browser.py +16 -6
  38. package/remote_script/LivePilot/devices.py +10 -5
  39. package/remote_script/LivePilot/notes.py +13 -2
  40. package/remote_script/LivePilot/server.py +51 -13
  41. package/remote_script/LivePilot/version_detect.py +7 -4
  42. package/server.json +20 -0
  43. package/.claude-plugin/marketplace.json +0 -21
  44. package/.mcp.json.disabled +0 -9
  45. package/.mcpbignore +0 -60
  46. package/AGENTS.md +0 -46
  47. package/BUGS.md +0 -1570
  48. package/CODE_OF_CONDUCT.md +0 -27
  49. package/CONTRIBUTING.md +0 -131
  50. package/SECURITY.md +0 -48
  51. package/livepilot/.Codex-plugin/plugin.json +0 -8
  52. package/livepilot/.claude-plugin/plugin.json +0 -8
  53. package/livepilot/agents/livepilot-producer/AGENT.md +0 -313
  54. package/livepilot/commands/arrange.md +0 -47
  55. package/livepilot/commands/beat.md +0 -77
  56. package/livepilot/commands/evaluate.md +0 -49
  57. package/livepilot/commands/memory.md +0 -22
  58. package/livepilot/commands/mix.md +0 -44
  59. package/livepilot/commands/perform.md +0 -42
  60. package/livepilot/commands/session.md +0 -13
  61. package/livepilot/commands/sounddesign.md +0 -43
  62. package/livepilot/skills/livepilot-arrangement/SKILL.md +0 -155
  63. package/livepilot/skills/livepilot-composition-engine/SKILL.md +0 -107
  64. package/livepilot/skills/livepilot-composition-engine/references/form-patterns.md +0 -97
  65. package/livepilot/skills/livepilot-composition-engine/references/transition-archetypes.md +0 -102
  66. package/livepilot/skills/livepilot-core/SKILL.md +0 -184
  67. package/livepilot/skills/livepilot-core/references/ableton-workflow-patterns.md +0 -831
  68. package/livepilot/skills/livepilot-core/references/automation-atlas.md +0 -272
  69. package/livepilot/skills/livepilot-core/references/device-atlas/00-index.md +0 -110
  70. package/livepilot/skills/livepilot-core/references/device-atlas/distortion-and-character.md +0 -687
  71. package/livepilot/skills/livepilot-core/references/device-atlas/drums-and-percussion.md +0 -753
  72. package/livepilot/skills/livepilot-core/references/device-atlas/dynamics-and-punch.md +0 -525
  73. package/livepilot/skills/livepilot-core/references/device-atlas/eq-and-filtering.md +0 -402
  74. package/livepilot/skills/livepilot-core/references/device-atlas/midi-tools.md +0 -963
  75. package/livepilot/skills/livepilot-core/references/device-atlas/movement-and-modulation.md +0 -874
  76. package/livepilot/skills/livepilot-core/references/device-atlas/space-and-depth.md +0 -571
  77. package/livepilot/skills/livepilot-core/references/device-atlas/spectral-and-weird.md +0 -714
  78. package/livepilot/skills/livepilot-core/references/device-atlas/synths-native.md +0 -953
  79. package/livepilot/skills/livepilot-core/references/device-knowledge/00-index.md +0 -34
  80. package/livepilot/skills/livepilot-core/references/device-knowledge/automation-as-music.md +0 -204
  81. package/livepilot/skills/livepilot-core/references/device-knowledge/chains-genre.md +0 -173
  82. package/livepilot/skills/livepilot-core/references/device-knowledge/creative-thinking.md +0 -211
  83. package/livepilot/skills/livepilot-core/references/device-knowledge/effects-distortion.md +0 -188
  84. package/livepilot/skills/livepilot-core/references/device-knowledge/effects-space.md +0 -162
  85. package/livepilot/skills/livepilot-core/references/device-knowledge/effects-spectral.md +0 -229
  86. package/livepilot/skills/livepilot-core/references/device-knowledge/instruments-synths.md +0 -243
  87. package/livepilot/skills/livepilot-core/references/m4l-devices.md +0 -352
  88. package/livepilot/skills/livepilot-core/references/memory-guide.md +0 -107
  89. package/livepilot/skills/livepilot-core/references/midi-recipes.md +0 -402
  90. package/livepilot/skills/livepilot-core/references/mixing-patterns.md +0 -578
  91. package/livepilot/skills/livepilot-core/references/overview.md +0 -290
  92. package/livepilot/skills/livepilot-core/references/sample-manipulation.md +0 -724
  93. package/livepilot/skills/livepilot-core/references/sound-design-deep.md +0 -140
  94. package/livepilot/skills/livepilot-core/references/sound-design.md +0 -393
  95. package/livepilot/skills/livepilot-devices/SKILL.md +0 -169
  96. package/livepilot/skills/livepilot-evaluation/SKILL.md +0 -156
  97. package/livepilot/skills/livepilot-evaluation/references/capability-modes.md +0 -118
  98. package/livepilot/skills/livepilot-evaluation/references/evaluation-contracts.md +0 -121
  99. package/livepilot/skills/livepilot-evaluation/references/memory-promotion.md +0 -110
  100. package/livepilot/skills/livepilot-mix-engine/SKILL.md +0 -123
  101. package/livepilot/skills/livepilot-mix-engine/references/mix-critics.md +0 -143
  102. package/livepilot/skills/livepilot-mix-engine/references/mix-moves.md +0 -105
  103. package/livepilot/skills/livepilot-mixing/SKILL.md +0 -157
  104. package/livepilot/skills/livepilot-notes/SKILL.md +0 -130
  105. package/livepilot/skills/livepilot-performance-engine/SKILL.md +0 -122
  106. package/livepilot/skills/livepilot-performance-engine/references/performance-safety.md +0 -98
  107. package/livepilot/skills/livepilot-release/SKILL.md +0 -130
  108. package/livepilot/skills/livepilot-sample-engine/SKILL.md +0 -105
  109. package/livepilot/skills/livepilot-sample-engine/references/sample-critics.md +0 -87
  110. package/livepilot/skills/livepilot-sample-engine/references/sample-philosophy.md +0 -51
  111. package/livepilot/skills/livepilot-sample-engine/references/sample-techniques.md +0 -131
  112. package/livepilot/skills/livepilot-sound-design-engine/SKILL.md +0 -168
  113. package/livepilot/skills/livepilot-sound-design-engine/references/patch-model.md +0 -119
  114. package/livepilot/skills/livepilot-sound-design-engine/references/sound-design-critics.md +0 -118
  115. package/livepilot/skills/livepilot-wonder/SKILL.md +0 -79
  116. package/m4l_device/LivePilot_Analyzer.amxd.pre-presentation-backup +0 -0
  117. package/m4l_device/LivePilot_Analyzer.maxpat +0 -2705
  118. package/m4l_device/LivePilot_Analyzer.maxproj +0 -53
  119. package/manifest.json +0 -91
  120. package/mcp_server/splice_client/protos/app_pb2.pyi +0 -1153
  121. package/scripts/generate_tool_catalog.py +0 -106
  122. package/scripts/sync_metadata.py +0 -349
@@ -1,2 +1,2 @@
1
1
  """LivePilot MCP Server — bridges MCP protocol to Ableton Live."""
2
- __version__ = "1.10.7"
2
+ __version__ = "1.10.8"
@@ -411,14 +411,46 @@ class AtlasManager:
411
411
 
412
412
 
413
413
  # ── Module-level lazy loader ───────────────────────────────────────
414
+ #
415
+ # Thread-safe via services.singletons.Singleton. The previous check-then-set
416
+ # pattern raced under FastMCP concurrency (two handlers could both construct
417
+ # AtlasManager) and never refreshed the in-memory index after a rebuild of
418
+ # device_atlas.json on disk. The Singleton helper handles both.
419
+ #
420
+ # The ``_atlas_instance`` module attribute is preserved for backward
421
+ # compatibility with call sites that read it directly (atlas/tools.py),
422
+ # but new code should call ``get_atlas()`` / ``invalidate_atlas()`` instead.
414
423
 
415
- _atlas_instance: Optional[AtlasManager] = None
424
+ from pathlib import Path
425
+ from ..services.singletons import Singleton
416
426
 
427
+ ATLAS_PATH = Path(__file__).parent / "device_atlas.json"
417
428
 
418
- def _load_atlas() -> AtlasManager:
419
- """Lazy-load the atlas from device_atlas.json in the same directory."""
429
+ _atlas_instance: Optional[AtlasManager] = None # kept for legacy imports
430
+
431
+
432
+ def _build_atlas() -> AtlasManager:
433
+ return AtlasManager(str(ATLAS_PATH))
434
+
435
+
436
+ _atlas_holder = Singleton(_build_atlas)
437
+
438
+
439
+ def get_atlas() -> AtlasManager:
440
+ """Thread-safe accessor. Re-reads device_atlas.json if its mtime advanced."""
420
441
  global _atlas_instance
421
- if _atlas_instance is None:
422
- atlas_path = os.path.join(os.path.dirname(__file__), "device_atlas.json")
423
- _atlas_instance = AtlasManager(atlas_path)
424
- return _atlas_instance
442
+ instance = _atlas_holder.get(reload_if_newer=ATLAS_PATH)
443
+ _atlas_instance = instance # keep legacy attribute in sync
444
+ return instance
445
+
446
+
447
+ def invalidate_atlas() -> None:
448
+ """Force the next get_atlas() to re-read device_atlas.json from disk."""
449
+ global _atlas_instance
450
+ _atlas_holder.invalidate()
451
+ _atlas_instance = None
452
+
453
+
454
+ def _load_atlas() -> AtlasManager:
455
+ """Legacy shim — kept so atlas/tools.py still works. Prefer get_atlas()."""
456
+ return get_atlas()
@@ -19,15 +19,17 @@ def _get_ableton(ctx: Context):
19
19
 
20
20
 
21
21
  def _get_atlas():
22
- """Get the global AtlasManager instance, loading lazily if needed."""
23
- from . import _atlas_instance, _load_atlas
24
- if _atlas_instance is None:
25
- try:
26
- _load_atlas()
27
- except FileNotFoundError:
28
- return None
29
- from . import _atlas_instance as inst
30
- return inst
22
+ """Get the global AtlasManager instance, loading lazily if needed.
23
+
24
+ Uses the thread-safe singleton helper — concurrent FastMCP tool calls no
25
+ longer race on the check-then-set, and the atlas auto-reloads from disk
26
+ if device_atlas.json's mtime advanced (e.g. after scan_full_library).
27
+ """
28
+ from . import get_atlas
29
+ try:
30
+ return get_atlas()
31
+ except FileNotFoundError:
32
+ return None
31
33
 
32
34
 
33
35
  @mcp.tool()
@@ -197,23 +199,44 @@ def scan_full_library(ctx: Context, force: bool = False) -> dict:
197
199
  stats[cat] = stats.get(cat, 0) + 1
198
200
  stats["enriched_devices"] = sum(1 for d in devices if d.get("enriched"))
199
201
 
202
+ # Read the actual running Live version from the session rather than
203
+ # hardcoding "12.3.6" — the hardcoded string was baking last year's
204
+ # version into every new user's atlas until they forced a rescan.
205
+ try:
206
+ session_info = ableton.send_command("get_session_info", {}) or {}
207
+ live_version = session_info.get("live_version", "unknown")
208
+ except Exception:
209
+ live_version = "unknown"
210
+
200
211
  # Build atlas
201
212
  atlas_data = {
202
213
  "version": "2.0.0",
203
- "live_version": "12.3.6",
214
+ "live_version": live_version,
204
215
  "scanned_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
205
216
  "stats": stats,
206
217
  "devices": devices,
207
218
  "packs": [],
208
219
  }
209
220
 
210
- # Write
211
- with open(atlas_path, "w") as f:
221
+ # Atomic write: tmp + rename. Same pattern as PersistentJsonStore. Previous
222
+ # version used open(atlas_path, "w") + json.dump with no fsync, so a crash
223
+ # mid-write produced a truncated JSON file that the next AtlasManager init
224
+ # silently read as empty-dict — devices vanished from memory.
225
+ tmp_path = atlas_path + ".tmp"
226
+ with open(tmp_path, "w") as f:
212
227
  json.dump(atlas_data, f, indent=2)
213
-
214
- # Reload into global
228
+ f.flush()
229
+ try:
230
+ os.fsync(f.fileno())
231
+ except OSError:
232
+ # fsync may be unavailable on some filesystems/Windows paths —
233
+ # best-effort; the rename below is still atomic on POSIX.
234
+ pass
235
+ os.replace(tmp_path, atlas_path)
236
+
237
+ # Invalidate singleton so next get_atlas() picks up the new file.
215
238
  import mcp_server.atlas as atlas_mod
216
- atlas_mod._atlas_instance = AtlasManager(atlas_path)
239
+ atlas_mod.invalidate_atlas()
217
240
 
218
241
  return {
219
242
  "status": "scanned",
@@ -222,3 +245,21 @@ def scan_full_library(ctx: Context, force: bool = False) -> dict:
222
245
  "stats": stats,
223
246
  "atlas_path": atlas_path,
224
247
  }
248
+
249
+
250
+ @mcp.tool()
251
+ def reload_atlas(ctx: Context) -> dict:
252
+ """Force the atlas to re-read device_atlas.json from disk.
253
+
254
+ Useful after an out-of-band rebuild (e.g. a manual edit to the JSON file,
255
+ or a scan that crashed before invalidating the cache). The next search /
256
+ suggest / compare call will see the fresh data. No-op if the atlas has
257
+ never been loaded — the first real call will load it fresh anyway.
258
+ """
259
+ from . import invalidate_atlas, get_atlas
260
+ invalidate_atlas()
261
+ atlas = get_atlas()
262
+ return {
263
+ "reloaded": True,
264
+ "device_count": atlas.device_count if atlas else 0,
265
+ }
@@ -403,6 +403,33 @@ def plan_sections(intent: CompositionIntent) -> list[dict]:
403
403
  })
404
404
  current_bar += scaled_bars
405
405
 
406
+ # Clamp overshoot. Rounding each section up to the nearest 4 bars plus
407
+ # the min-of-4-bars floor means a short duration_bars (e.g. 16) against
408
+ # a 6-section template could produce 24+ bars of sections — a 50%
409
+ # overshoot that pushed arrangement clips into unexpected territory.
410
+ # Trim from the longest non-intro section until we fit.
411
+ total_placed = sum(s["bars"] for s in sections)
412
+ overshoot = total_placed - intent.duration_bars
413
+ if overshoot > 0 and sections:
414
+ # Sort indices by section length desc, skipping the first section
415
+ # (usually intro) which we'd rather preserve at its snapped length.
416
+ trimmable = sorted(
417
+ range(1, len(sections)),
418
+ key=lambda i: -sections[i]["bars"],
419
+ ) or [0]
420
+ i = 0
421
+ while overshoot > 0 and i < len(trimmable) * 4:
422
+ idx = trimmable[i % len(trimmable)]
423
+ if sections[idx]["bars"] > 4:
424
+ sections[idx]["bars"] -= 4
425
+ overshoot -= 4
426
+ i += 1
427
+ # Recompute start_bar values after any trim
428
+ running = 0
429
+ for s in sections:
430
+ s["start_bar"] = running
431
+ running += s["bars"]
432
+
406
433
  return sections
407
434
 
408
435
 
@@ -202,9 +202,15 @@ _ELEMENT_PATTERNS: list[tuple[str, str]] = [
202
202
 
203
203
  _TEMPO_RE = re.compile(r"\b(\d{2,3})\s*bpm\b", re.IGNORECASE)
204
204
 
205
- # Key patterns: C, Cm, C#, C# minor, Db, Dbm, F# minor, Bb major
205
+ # Key patterns: must have either an accidental (C#, Db) OR an explicit
206
+ # quality word (C minor, F major, Am). The previous regex made the
207
+ # quality group optional AND allowed a bare letter — so "dark ambient"
208
+ # matched D as a key root, silently overwriting any mood-inferred key.
206
209
  _KEY_RE = re.compile(
207
- r"\b([A-Ga-g][#b]?)\s*(minor|major|min|maj|m)?\b"
210
+ # Case 1: root + quality word (explicit minor/major/min/maj/m suffix)
211
+ r"\b([A-Ga-g])\s*(minor|major|min|maj|m)\b"
212
+ # Case 2: root + accidental (optional quality)
213
+ r"|\b([A-Ga-g][#b])\s*(minor|major|min|maj|m)?\b"
208
214
  )
209
215
 
210
216
 
@@ -228,19 +234,22 @@ def parse_prompt(text: str) -> CompositionIntent:
228
234
  intent.tempo = int(tempo_match.group(1))
229
235
 
230
236
  # 2. Extract key (search original text to preserve case)
237
+ # Regex has TWO alternations (root+quality OR root-with-accidental
238
+ # +optional-quality). Take whichever branch matched.
231
239
  key_match = _KEY_RE.search(text)
232
240
  if key_match:
233
- root = key_match.group(1)
234
- # Normalize root: uppercase first letter
241
+ root = key_match.group(1) or key_match.group(3)
242
+ quality = key_match.group(2) or key_match.group(4) or ""
243
+ # Normalize root: uppercase first letter, preserve accidental
235
244
  root = root[0].upper() + root[1:] if len(root) > 1 else root.upper()
236
- quality = key_match.group(2) or ""
237
245
  quality_lower = quality.lower()
238
246
  if quality_lower in ("minor", "min", "m"):
239
247
  intent.key = f"{root}m"
240
248
  elif quality_lower in ("major", "maj"):
241
249
  intent.key = root
242
250
  else:
243
- # Standalone note check if followed by 'm' in the original
251
+ # Only reached when Case 2 matched without quality an
252
+ # accidental was present (C#, Db), so this IS a legit key root.
244
253
  intent.key = root
245
254
 
246
255
  # 3. Match genre (check aliases first, then canonical names)
@@ -213,9 +213,17 @@ class AbletonConnection:
213
213
  # The single-client guard can briefly reject an immediate reconnect
214
214
  # after this process closes a previous socket. Retry once after a
215
215
  # short delay when the command was rejected before execution.
216
- if fresh_connect and _is_single_client_state_error(response):
217
- self.disconnect()
218
- time.sleep(SINGLE_CLIENT_RETRY_DELAY)
216
+ #
217
+ # IMPORTANT: release the lock around the sleep so concurrent tool
218
+ # calls are not blocked on an idle timer. The previous version
219
+ # slept 250ms while holding the lock, which stalled every other
220
+ # async MCP handler in the server.
221
+ needs_retry = fresh_connect and _is_single_client_state_error(response)
222
+
223
+ if needs_retry:
224
+ self.disconnect()
225
+ time.sleep(SINGLE_CLIENT_RETRY_DELAY)
226
+ with self._lock:
219
227
  self.connect()
220
228
  response = self._send_raw(
221
229
  command,
@@ -365,13 +365,23 @@ def load_corpus() -> Corpus:
365
365
 
366
366
 
367
367
  # ── Module-level lazy singleton ─────────────────────────────────────────
368
+ #
369
+ # Thread-safe via services.singletons.Singleton — concurrent FastMCP
370
+ # handlers can no longer both trigger load_corpus() (which did heavy
371
+ # filesystem I/O) on a cold start.
368
372
 
373
+ from ..services.singletons import Singleton
374
+
375
+ _corpus_holder = Singleton(load_corpus)
376
+
377
+ # Preserved for backward compatibility with any code that reads the legacy
378
+ # attribute directly.
369
379
  _corpus_instance: Optional[Corpus] = None
370
380
 
371
381
 
372
382
  def get_corpus() -> Corpus:
373
- """Get the global corpus instance (lazy-loaded on first call)."""
383
+ """Get the global corpus instance (lazy-loaded, thread-safe)."""
374
384
  global _corpus_instance
375
- if _corpus_instance is None:
376
- _corpus_instance = load_corpus()
377
- return _corpus_instance
385
+ instance = _corpus_holder.get()
386
+ _corpus_instance = instance
387
+ return instance
@@ -303,13 +303,43 @@ class SpectralReceiver(asyncio.DatagramProtocol):
303
303
  print(f"LivePilot: failed to decode bridge response: {exc}", file=sys.stderr)
304
304
 
305
305
  def _handle_chunk(self, index: int, total: int, encoded: str) -> None:
306
- """Reassemble chunked responses."""
306
+ """Reassemble chunked responses.
307
+
308
+ The previous implementation incremented ``_chunk_id`` only when
309
+ ``index == 0`` and assumed the first chunk always arrived first.
310
+ Under UDP reordering (rare on loopback but possible under system
311
+ load), a chunk with ``index > 0`` arriving before ``index 0`` would
312
+ be dropped into the PREVIOUS sequence's bucket — silently corrupting
313
+ that earlier response's payload.
314
+
315
+ Until the wire protocol adds an explicit sequence id, the safer
316
+ behavior is: if we see an out-of-order first-chunk (``index > 0``
317
+ with no open bucket), start a fresh bucket but log a warning. That
318
+ way we never poison a prior sequence, and the problem surfaces in
319
+ logs if it happens.
320
+ """
307
321
  if index == 0:
308
322
  self._chunk_id += 1
309
- key = str(self._chunk_id)
310
- if key not in self._chunks:
323
+ key = str(self._chunk_id)
311
324
  self._chunks[key] = {"parts": {}, "total": total}
312
325
  self._chunk_times[key] = time.monotonic()
326
+ else:
327
+ key = str(self._chunk_id)
328
+ if key not in self._chunks:
329
+ # Out-of-order arrival. Start a new bucket rather than append
330
+ # to the previous sequence's parts — that's the corruption
331
+ # path. Log once so it's diagnosable.
332
+ import sys
333
+ print(
334
+ f"LivePilot: chunk index={index}/{total} arrived before "
335
+ f"index=0 — starting fresh bucket. UDP reordering on "
336
+ f"loopback suggests system load.",
337
+ file=sys.stderr,
338
+ )
339
+ self._chunk_id += 1
340
+ key = str(self._chunk_id)
341
+ self._chunks[key] = {"parts": {}, "total": total}
342
+ self._chunk_times[key] = time.monotonic()
313
343
 
314
344
  self._chunks[key]["parts"][index] = encoded
315
345
 
@@ -369,14 +399,26 @@ class M4LBridge:
369
399
  if not self.cache.is_connected:
370
400
  return {"error": "LivePilot Analyzer not connected. Drop it on the master track."}
371
401
 
402
+ # Fail fast if there is no receiver to correlate the response. The
403
+ # previous version sent the OSC packet anyway, dropped the reply
404
+ # inside _handle_response (no future registered), and waited out
405
+ # the full 5s timeout before returning a misleading "device may be
406
+ # busy or removed" error. The real cause was "no receiver wired",
407
+ # which the caller should see immediately.
408
+ if self.receiver is None:
409
+ return {
410
+ "error": "M4L bridge has no active receiver — the UDP 9880 "
411
+ "listener did not start. Check server startup logs "
412
+ "for a bind failure on port 9880."
413
+ }
414
+
372
415
  if self._cmd_lock is None:
373
416
  self._cmd_lock = asyncio.Lock()
374
417
  async with self._cmd_lock:
375
418
  # Create a future for the response
376
419
  loop = asyncio.get_running_loop()
377
420
  future = loop.create_future()
378
- if self.receiver:
379
- self.receiver.set_response_future(future)
421
+ self.receiver.set_response_future(future)
380
422
 
381
423
  # Build and send OSC message (no leading / — Max udpreceive
382
424
  # passes messagename with / intact to JS, breaking dispatch)
@@ -394,8 +436,7 @@ class M4LBridge:
394
436
  # cleared it inside _handle_response, but calling again is a
395
437
  # no-op. On timeout this is what prevents a delayed packet from
396
438
  # resolving a future belonging to the next command.
397
- if self.receiver:
398
- self.receiver.set_response_future(None)
439
+ self.receiver.set_response_future(None)
399
440
 
400
441
  async def send_capture(self, command: str, *args: Any, timeout: float = 35.0) -> dict:
401
442
  """Send a capture command to the M4L device and wait for /capture_complete."""
@@ -326,8 +326,22 @@ async def execute_plan_steps_async(
326
326
  results.append(result)
327
327
 
328
328
  # Record successful step result for future bindings
329
- if result.ok and step_id and isinstance(result.result, dict):
330
- step_results[step_id] = result.result
329
+ if result.ok and step_id:
330
+ if isinstance(result.result, dict):
331
+ step_results[step_id] = result.result
332
+ else:
333
+ # Log but DO NOT silently drop the binding without telling
334
+ # anyone — the previous version let non-dict results slip
335
+ # past, which meant any downstream {"$from_step": step_id}
336
+ # reference blew up with a confusing "step_id not found"
337
+ # instead of the real "result wasn't a dict" cause.
338
+ import logging as _logging
339
+ _logging.getLogger(__name__).warning(
340
+ "step_results: dropping non-dict result for "
341
+ "step_id=%s tool=%s type=%s. Any $from_step refs to "
342
+ "this step_id will fail with 'step_id not found'.",
343
+ step_id, tool, type(result.result).__name__,
344
+ )
331
345
 
332
346
  if not result.ok and stop_on_failure:
333
347
  break
@@ -82,6 +82,12 @@ BRIDGE_COMMANDS: frozenset[str] = frozenset({
82
82
  "remove_warp_marker", "capture_audio", "capture_stop",
83
83
  "check_flucoma", "scrub_clip", "stop_scrub", "get_display_values",
84
84
  "get_plugin_params", "map_plugin_param", "get_plugin_presets",
85
+ # Deep-LOM writes that the Python Remote Script cannot reach (live on
86
+ # the sample child object or require device-selection semantics that
87
+ # only Max JS LiveAPI exposes). See mcp_server/tools/analyzer.py for
88
+ # the matching MCP tools that route through bridge.send_command.
89
+ "simpler_set_warp",
90
+ "compressor_set_sidechain",
85
91
  # NOTE: load_sample_to_simpler used to live here, but it's actually an
86
92
  # async Python MCP tool in mcp_server/tools/analyzer.py, not a bridge
87
93
  # command. It has no case in livepilot_bridge.js and no @register handler
@@ -139,11 +139,30 @@ class SampleFitReport:
139
139
 
140
140
  @property
141
141
  def overall_score(self) -> float:
142
+ """Average over AVAILABLE critics only.
143
+
144
+ BUG-B38 reshaped frequency_fit to report ``-1.0`` with
145
+ ``available=False`` when no mix snapshot is present. The previous
146
+ aggregator mean-folded that sentinel into the overall score,
147
+ dropping it by ~17 points (one critic out of six). The fix is to
148
+ respect the ``available`` flag — same contract every other caller
149
+ uses.
150
+ """
142
151
  if not self.critics:
143
152
  return 0.0
144
- scores = [c.score if isinstance(c, CriticResult) else c.get("score", 0)
145
- for c in self.critics.values()]
146
- return sum(scores) / len(scores) if scores else 0.0
153
+ available_scores = []
154
+ for c in self.critics.values():
155
+ if isinstance(c, CriticResult):
156
+ if c.available is False:
157
+ continue
158
+ available_scores.append(c.score)
159
+ else: # legacy dict shape
160
+ if c.get("available") is False:
161
+ continue
162
+ available_scores.append(c.get("score", 0))
163
+ if not available_scores:
164
+ return 0.0
165
+ return sum(available_scores) / len(available_scores)
147
166
 
148
167
  def to_dict(self) -> dict:
149
168
  return {
@@ -14,3 +14,4 @@ from . import transition_compilers # noqa: F401
14
14
  from . import sound_design_compilers # noqa: F401
15
15
  from . import performance_compilers # noqa: F401
16
16
  from . import sample_compilers # noqa: F401
17
+ from . import device_creation_compilers # noqa: F401
@@ -24,14 +24,22 @@ class CompiledStep:
24
24
  params: dict # Concrete params, e.g. {"track_index": 0, "volume": 0.72}
25
25
  description: str # Human-readable, e.g. "Push Drums from 0.65 → 0.72"
26
26
  verify_after: bool = True # Whether to check meters after this step
27
+ # Optional explicit backend. If set, the execution router uses it verbatim
28
+ # and skips classify_step(). Leave None to let the router auto-classify at
29
+ # dispatch time — safe because test_move_annotations enforces every
30
+ # registered move's steps map to a known backend.
31
+ backend: Optional[str] = None
27
32
 
28
33
  def to_dict(self) -> dict:
29
- return {
34
+ d = {
30
35
  "tool": self.tool,
31
36
  "params": self.params,
32
37
  "description": self.description,
33
38
  "verify_after": self.verify_after,
34
39
  }
40
+ if self.backend:
41
+ d["backend"] = self.backend
42
+ return d
35
43
 
36
44
 
37
45
  @dataclass
@@ -0,0 +1,47 @@
1
+ """Family compiler for device-creation semantic moves.
2
+
3
+ Device-creation moves generate custom M4L devices via the Device Forge
4
+ (``generate_m4l_effect``). Unlike mix/sound-design moves — where the
5
+ compiler inspects the kernel's track topology — device-creation moves
6
+ are parametric: the plan_template already contains the tool call and
7
+ concrete arguments.
8
+
9
+ We therefore use a single family-level compiler that just maps
10
+ ``plan_template`` → ``CompiledStep`` objects. This keeps the registry
11
+ honest (every move is either compilable or analytical_only) without
12
+ duplicating templates into per-move compilers.
13
+ """
14
+ from __future__ import annotations
15
+
16
+ from .compiler import CompiledPlan, CompiledStep, register_family_compiler
17
+ from .models import SemanticMove
18
+
19
+
20
+ def _compile_device_creation(move: SemanticMove, kernel: dict) -> CompiledPlan:
21
+ """Map plan_template steps straight to CompiledStep.
22
+
23
+ plan_template is trusted for this family: each step already has
24
+ ``tool``, ``params``, ``description``, and ``backend`` annotated.
25
+ """
26
+ steps: list[CompiledStep] = []
27
+ for step in move.plan_template:
28
+ steps.append(CompiledStep(
29
+ tool=step.get("tool", ""),
30
+ params=step.get("params", {}),
31
+ description=step.get("description", ""),
32
+ verify_after=bool(step.get("verify_after", True)),
33
+ backend=step.get("backend"),
34
+ ))
35
+
36
+ return CompiledPlan(
37
+ move_id=move.move_id,
38
+ intent=move.intent,
39
+ steps=steps,
40
+ risk_level=move.risk_level,
41
+ summary=move.intent,
42
+ requires_approval=(kernel.get("mode", "improve") != "explore"),
43
+ warnings=[],
44
+ )
45
+
46
+
47
+ register_family_compiler("device_creation", _compile_device_creation)