livepilot 1.10.7 → 1.10.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +254 -0
- package/README.md +19 -17
- package/bin/livepilot.js +146 -28
- package/installer/install.js +117 -11
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +1 -1
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/atlas/__init__.py +39 -7
- package/mcp_server/atlas/tools.py +56 -15
- package/mcp_server/composer/layer_planner.py +27 -0
- package/mcp_server/composer/prompt_parser.py +15 -6
- package/mcp_server/connection.py +11 -3
- package/mcp_server/corpus/__init__.py +14 -4
- package/mcp_server/evaluation/fabric.py +62 -1
- package/mcp_server/m4l_bridge.py +63 -12
- package/mcp_server/project_brain/automation_graph.py +23 -1
- package/mcp_server/project_brain/builder.py +2 -0
- package/mcp_server/project_brain/models.py +20 -1
- package/mcp_server/project_brain/tools.py +10 -3
- package/mcp_server/runtime/execution_router.py +16 -2
- package/mcp_server/runtime/remote_commands.py +6 -0
- package/mcp_server/sample_engine/models.py +22 -3
- package/mcp_server/semantic_moves/__init__.py +1 -0
- package/mcp_server/semantic_moves/compiler.py +9 -1
- package/mcp_server/semantic_moves/device_creation_compilers.py +47 -0
- package/mcp_server/semantic_moves/mix_compilers.py +170 -0
- package/mcp_server/semantic_moves/mix_moves.py +1 -1
- package/mcp_server/semantic_moves/models.py +5 -0
- package/mcp_server/semantic_moves/tools.py +154 -35
- package/mcp_server/server.py +147 -17
- package/mcp_server/services/singletons.py +68 -0
- package/mcp_server/session_continuity/models.py +13 -0
- package/mcp_server/session_continuity/tools.py +2 -0
- package/mcp_server/session_continuity/tracker.py +93 -0
- package/mcp_server/splice_client/client.py +29 -8
- package/mcp_server/tools/_analyzer_engine/__init__.py +39 -0
- package/mcp_server/tools/_analyzer_engine/context.py +103 -0
- package/mcp_server/tools/_analyzer_engine/flucoma.py +23 -0
- package/mcp_server/tools/_analyzer_engine/sample.py +122 -0
- package/mcp_server/tools/_motif_engine.py +19 -4
- package/mcp_server/tools/analyzer.py +25 -180
- package/mcp_server/tools/clips.py +240 -2
- package/mcp_server/tools/midi_io.py +10 -0
- package/mcp_server/tools/tracks.py +1 -1
- package/mcp_server/tools/transport.py +59 -4
- package/mcp_server/translation_engine/tools.py +8 -4
- package/package.json +25 -3
- package/remote_script/LivePilot/__init__.py +36 -9
- package/remote_script/LivePilot/arrangement.py +12 -2
- package/remote_script/LivePilot/browser.py +16 -6
- package/remote_script/LivePilot/devices.py +10 -5
- package/remote_script/LivePilot/notes.py +13 -2
- package/remote_script/LivePilot/server.py +51 -13
- package/remote_script/LivePilot/version_detect.py +7 -4
- package/server.json +20 -0
- package/.claude-plugin/marketplace.json +0 -21
- package/.mcp.json.disabled +0 -9
- package/.mcpbignore +0 -60
- package/AGENTS.md +0 -46
- package/BUGS.md +0 -1570
- package/CODE_OF_CONDUCT.md +0 -27
- package/CONTRIBUTING.md +0 -131
- package/SECURITY.md +0 -48
- package/livepilot/.Codex-plugin/plugin.json +0 -8
- package/livepilot/.claude-plugin/plugin.json +0 -8
- package/livepilot/agents/livepilot-producer/AGENT.md +0 -313
- package/livepilot/commands/arrange.md +0 -47
- package/livepilot/commands/beat.md +0 -77
- package/livepilot/commands/evaluate.md +0 -49
- package/livepilot/commands/memory.md +0 -22
- package/livepilot/commands/mix.md +0 -44
- package/livepilot/commands/perform.md +0 -42
- package/livepilot/commands/session.md +0 -13
- package/livepilot/commands/sounddesign.md +0 -43
- package/livepilot/skills/livepilot-arrangement/SKILL.md +0 -155
- package/livepilot/skills/livepilot-composition-engine/SKILL.md +0 -107
- package/livepilot/skills/livepilot-composition-engine/references/form-patterns.md +0 -97
- package/livepilot/skills/livepilot-composition-engine/references/transition-archetypes.md +0 -102
- package/livepilot/skills/livepilot-core/SKILL.md +0 -184
- package/livepilot/skills/livepilot-core/references/ableton-workflow-patterns.md +0 -831
- package/livepilot/skills/livepilot-core/references/automation-atlas.md +0 -272
- package/livepilot/skills/livepilot-core/references/device-atlas/00-index.md +0 -110
- package/livepilot/skills/livepilot-core/references/device-atlas/distortion-and-character.md +0 -687
- package/livepilot/skills/livepilot-core/references/device-atlas/drums-and-percussion.md +0 -753
- package/livepilot/skills/livepilot-core/references/device-atlas/dynamics-and-punch.md +0 -525
- package/livepilot/skills/livepilot-core/references/device-atlas/eq-and-filtering.md +0 -402
- package/livepilot/skills/livepilot-core/references/device-atlas/midi-tools.md +0 -963
- package/livepilot/skills/livepilot-core/references/device-atlas/movement-and-modulation.md +0 -874
- package/livepilot/skills/livepilot-core/references/device-atlas/space-and-depth.md +0 -571
- package/livepilot/skills/livepilot-core/references/device-atlas/spectral-and-weird.md +0 -714
- package/livepilot/skills/livepilot-core/references/device-atlas/synths-native.md +0 -953
- package/livepilot/skills/livepilot-core/references/device-knowledge/00-index.md +0 -34
- package/livepilot/skills/livepilot-core/references/device-knowledge/automation-as-music.md +0 -204
- package/livepilot/skills/livepilot-core/references/device-knowledge/chains-genre.md +0 -173
- package/livepilot/skills/livepilot-core/references/device-knowledge/creative-thinking.md +0 -211
- package/livepilot/skills/livepilot-core/references/device-knowledge/effects-distortion.md +0 -188
- package/livepilot/skills/livepilot-core/references/device-knowledge/effects-space.md +0 -162
- package/livepilot/skills/livepilot-core/references/device-knowledge/effects-spectral.md +0 -229
- package/livepilot/skills/livepilot-core/references/device-knowledge/instruments-synths.md +0 -243
- package/livepilot/skills/livepilot-core/references/m4l-devices.md +0 -352
- package/livepilot/skills/livepilot-core/references/memory-guide.md +0 -107
- package/livepilot/skills/livepilot-core/references/midi-recipes.md +0 -402
- package/livepilot/skills/livepilot-core/references/mixing-patterns.md +0 -578
- package/livepilot/skills/livepilot-core/references/overview.md +0 -290
- package/livepilot/skills/livepilot-core/references/sample-manipulation.md +0 -724
- package/livepilot/skills/livepilot-core/references/sound-design-deep.md +0 -140
- package/livepilot/skills/livepilot-core/references/sound-design.md +0 -393
- package/livepilot/skills/livepilot-devices/SKILL.md +0 -169
- package/livepilot/skills/livepilot-evaluation/SKILL.md +0 -156
- package/livepilot/skills/livepilot-evaluation/references/capability-modes.md +0 -118
- package/livepilot/skills/livepilot-evaluation/references/evaluation-contracts.md +0 -121
- package/livepilot/skills/livepilot-evaluation/references/memory-promotion.md +0 -110
- package/livepilot/skills/livepilot-mix-engine/SKILL.md +0 -123
- package/livepilot/skills/livepilot-mix-engine/references/mix-critics.md +0 -143
- package/livepilot/skills/livepilot-mix-engine/references/mix-moves.md +0 -105
- package/livepilot/skills/livepilot-mixing/SKILL.md +0 -157
- package/livepilot/skills/livepilot-notes/SKILL.md +0 -130
- package/livepilot/skills/livepilot-performance-engine/SKILL.md +0 -122
- package/livepilot/skills/livepilot-performance-engine/references/performance-safety.md +0 -98
- package/livepilot/skills/livepilot-release/SKILL.md +0 -130
- package/livepilot/skills/livepilot-sample-engine/SKILL.md +0 -105
- package/livepilot/skills/livepilot-sample-engine/references/sample-critics.md +0 -87
- package/livepilot/skills/livepilot-sample-engine/references/sample-philosophy.md +0 -51
- package/livepilot/skills/livepilot-sample-engine/references/sample-techniques.md +0 -131
- package/livepilot/skills/livepilot-sound-design-engine/SKILL.md +0 -168
- package/livepilot/skills/livepilot-sound-design-engine/references/patch-model.md +0 -119
- package/livepilot/skills/livepilot-sound-design-engine/references/sound-design-critics.md +0 -118
- package/livepilot/skills/livepilot-wonder/SKILL.md +0 -79
- package/m4l_device/LivePilot_Analyzer.amxd.pre-presentation-backup +0 -0
- package/m4l_device/LivePilot_Analyzer.maxpat +0 -2705
- package/m4l_device/LivePilot_Analyzer.maxproj +0 -53
- package/manifest.json +0 -91
- package/mcp_server/splice_client/protos/app_pb2.pyi +0 -1153
- package/scripts/generate_tool_catalog.py +0 -106
- package/scripts/sync_metadata.py +0 -349
package/mcp_server/m4l_bridge.py
CHANGED
|
@@ -303,13 +303,43 @@ class SpectralReceiver(asyncio.DatagramProtocol):
|
|
|
303
303
|
print(f"LivePilot: failed to decode bridge response: {exc}", file=sys.stderr)
|
|
304
304
|
|
|
305
305
|
def _handle_chunk(self, index: int, total: int, encoded: str) -> None:
|
|
306
|
-
"""Reassemble chunked responses.
|
|
306
|
+
"""Reassemble chunked responses.
|
|
307
|
+
|
|
308
|
+
The previous implementation incremented ``_chunk_id`` only when
|
|
309
|
+
``index == 0`` and assumed the first chunk always arrived first.
|
|
310
|
+
Under UDP reordering (rare on loopback but possible under system
|
|
311
|
+
load), a chunk with ``index > 0`` arriving before ``index 0`` would
|
|
312
|
+
be dropped into the PREVIOUS sequence's bucket — silently corrupting
|
|
313
|
+
that earlier response's payload.
|
|
314
|
+
|
|
315
|
+
Until the wire protocol adds an explicit sequence id, the safer
|
|
316
|
+
behavior is: if we see an out-of-order first-chunk (``index > 0``
|
|
317
|
+
with no open bucket), start a fresh bucket but log a warning. That
|
|
318
|
+
way we never poison a prior sequence, and the problem surfaces in
|
|
319
|
+
logs if it happens.
|
|
320
|
+
"""
|
|
307
321
|
if index == 0:
|
|
308
322
|
self._chunk_id += 1
|
|
309
|
-
|
|
310
|
-
if key not in self._chunks:
|
|
323
|
+
key = str(self._chunk_id)
|
|
311
324
|
self._chunks[key] = {"parts": {}, "total": total}
|
|
312
325
|
self._chunk_times[key] = time.monotonic()
|
|
326
|
+
else:
|
|
327
|
+
key = str(self._chunk_id)
|
|
328
|
+
if key not in self._chunks:
|
|
329
|
+
# Out-of-order arrival. Start a new bucket rather than append
|
|
330
|
+
# to the previous sequence's parts — that's the corruption
|
|
331
|
+
# path. Log once so it's diagnosable.
|
|
332
|
+
import sys
|
|
333
|
+
print(
|
|
334
|
+
f"LivePilot: chunk index={index}/{total} arrived before "
|
|
335
|
+
f"index=0 — starting fresh bucket. UDP reordering on "
|
|
336
|
+
f"loopback suggests system load.",
|
|
337
|
+
file=sys.stderr,
|
|
338
|
+
)
|
|
339
|
+
self._chunk_id += 1
|
|
340
|
+
key = str(self._chunk_id)
|
|
341
|
+
self._chunks[key] = {"parts": {}, "total": total}
|
|
342
|
+
self._chunk_times[key] = time.monotonic()
|
|
313
343
|
|
|
314
344
|
self._chunks[key]["parts"][index] = encoded
|
|
315
345
|
|
|
@@ -369,14 +399,26 @@ class M4LBridge:
|
|
|
369
399
|
if not self.cache.is_connected:
|
|
370
400
|
return {"error": "LivePilot Analyzer not connected. Drop it on the master track."}
|
|
371
401
|
|
|
402
|
+
# Fail fast if there is no receiver to correlate the response. The
|
|
403
|
+
# previous version sent the OSC packet anyway, dropped the reply
|
|
404
|
+
# inside _handle_response (no future registered), and waited out
|
|
405
|
+
# the full 5s timeout before returning a misleading "device may be
|
|
406
|
+
# busy or removed" error. The real cause was "no receiver wired",
|
|
407
|
+
# which the caller should see immediately.
|
|
408
|
+
if self.receiver is None:
|
|
409
|
+
return {
|
|
410
|
+
"error": "M4L bridge has no active receiver — the UDP 9880 "
|
|
411
|
+
"listener did not start. Check server startup logs "
|
|
412
|
+
"for a bind failure on port 9880."
|
|
413
|
+
}
|
|
414
|
+
|
|
372
415
|
if self._cmd_lock is None:
|
|
373
416
|
self._cmd_lock = asyncio.Lock()
|
|
374
417
|
async with self._cmd_lock:
|
|
375
418
|
# Create a future for the response
|
|
376
419
|
loop = asyncio.get_running_loop()
|
|
377
420
|
future = loop.create_future()
|
|
378
|
-
|
|
379
|
-
self.receiver.set_response_future(future)
|
|
421
|
+
self.receiver.set_response_future(future)
|
|
380
422
|
|
|
381
423
|
# Build and send OSC message (no leading / — Max udpreceive
|
|
382
424
|
# passes messagename with / intact to JS, breaking dispatch)
|
|
@@ -394,25 +436,35 @@ class M4LBridge:
|
|
|
394
436
|
# cleared it inside _handle_response, but calling again is a
|
|
395
437
|
# no-op. On timeout this is what prevents a delayed packet from
|
|
396
438
|
# resolving a future belonging to the next command.
|
|
397
|
-
|
|
398
|
-
self.receiver.set_response_future(None)
|
|
439
|
+
self.receiver.set_response_future(None)
|
|
399
440
|
|
|
400
441
|
async def send_capture(self, command: str, *args: Any, timeout: float = 35.0) -> dict:
|
|
401
442
|
"""Send a capture command to the M4L device and wait for /capture_complete."""
|
|
402
443
|
if not self.cache.is_connected:
|
|
403
444
|
return {"error": "LivePilot Analyzer not connected. Drop it on the master track."}
|
|
404
445
|
|
|
446
|
+
# Fail fast if there is no receiver to correlate the reply. Prior
|
|
447
|
+
# versions sent the OSC packet anyway, never registered a future,
|
|
448
|
+
# and then waited out the full 35s timeout with a misleading
|
|
449
|
+
# "device may be busy or removed" diagnosis — the real cause was
|
|
450
|
+
# "no receiver wired" (UDP 9880 failed to bind at startup).
|
|
451
|
+
if self.receiver is None:
|
|
452
|
+
return {
|
|
453
|
+
"error": "M4L bridge has no active receiver — the UDP 9880 "
|
|
454
|
+
"listener did not start. Check server startup logs "
|
|
455
|
+
"for a bind failure on port 9880."
|
|
456
|
+
}
|
|
457
|
+
|
|
405
458
|
if self._cmd_lock is None:
|
|
406
459
|
self._cmd_lock = asyncio.Lock()
|
|
407
460
|
async with self._cmd_lock:
|
|
408
461
|
# Cancel any stale capture future before creating a new one
|
|
409
|
-
if self.receiver
|
|
462
|
+
if self.receiver._capture_future and not self.receiver._capture_future.done():
|
|
410
463
|
self.receiver._capture_future.cancel()
|
|
411
464
|
|
|
412
465
|
loop = asyncio.get_running_loop()
|
|
413
466
|
future = loop.create_future()
|
|
414
|
-
|
|
415
|
-
self.receiver.set_capture_future(future)
|
|
467
|
+
self.receiver.set_capture_future(future)
|
|
416
468
|
|
|
417
469
|
osc_data = self._build_osc(command, args)
|
|
418
470
|
self._sock.sendto(osc_data, self._m4l_addr)
|
|
@@ -422,8 +474,7 @@ class M4LBridge:
|
|
|
422
474
|
return result
|
|
423
475
|
except asyncio.TimeoutError:
|
|
424
476
|
# Clean up the dangling future
|
|
425
|
-
|
|
426
|
-
self.receiver._capture_future = None
|
|
477
|
+
self.receiver._capture_future = None
|
|
427
478
|
return {"error": "M4L capture timeout — device may be busy or removed"}
|
|
428
479
|
|
|
429
480
|
async def cancel_capture_future(self) -> None:
|
|
@@ -12,6 +12,7 @@ def build_automation_graph(
|
|
|
12
12
|
track_infos: list[dict],
|
|
13
13
|
sections: list[dict] | None = None,
|
|
14
14
|
clip_automation: list[dict] | None = None,
|
|
15
|
+
clips_scanned: int = 0,
|
|
15
16
|
) -> AutomationGraph:
|
|
16
17
|
"""Build an AutomationGraph covering both device-parameter automation
|
|
17
18
|
hints and real clip envelopes (BUG-E2).
|
|
@@ -27,11 +28,17 @@ def build_automation_graph(
|
|
|
27
28
|
parameter_name, parameter_type, device_name}].
|
|
28
29
|
This is the ground truth — `device.parameters[i].is_automated`
|
|
29
30
|
only reflects mapping state, not the presence of an envelope.
|
|
31
|
+
clips_scanned: total number of session clips the caller actually
|
|
32
|
+
probed for envelopes. Used to compute ``coverage_pct``; pass 0
|
|
33
|
+
when the caller couldn't enumerate clips (unknown → 0.0).
|
|
30
34
|
|
|
31
35
|
Returns:
|
|
32
|
-
AutomationGraph with automated_params and
|
|
36
|
+
AutomationGraph with automated_params, density_by_section, and
|
|
37
|
+
the v1.10.9 coverage signals (coverage_pct, clip_envelope_count,
|
|
38
|
+
clips_scanned).
|
|
33
39
|
"""
|
|
34
40
|
graph = AutomationGraph()
|
|
41
|
+
graph.clips_scanned = max(0, int(clips_scanned))
|
|
35
42
|
|
|
36
43
|
if not track_infos and not clip_automation:
|
|
37
44
|
return graph
|
|
@@ -121,4 +128,19 @@ def build_automation_graph(
|
|
|
121
128
|
else:
|
|
122
129
|
graph.density_by_section[section_id] = 0.0
|
|
123
130
|
|
|
131
|
+
# BUG-D2 coverage signals.
|
|
132
|
+
# clip_envelope_count = distinct (track, clip) slots containing any envelope.
|
|
133
|
+
clip_slots_with_envelope: set[tuple[int, int | None]] = set()
|
|
134
|
+
for env in clip_automation or []:
|
|
135
|
+
clip_slots_with_envelope.add(
|
|
136
|
+
(int(env.get("track_index", -1)), env.get("clip_index"))
|
|
137
|
+
)
|
|
138
|
+
graph.clip_envelope_count = len(clip_slots_with_envelope)
|
|
139
|
+
if graph.clips_scanned > 0:
|
|
140
|
+
graph.coverage_pct = min(
|
|
141
|
+
1.0, graph.clip_envelope_count / float(graph.clips_scanned)
|
|
142
|
+
)
|
|
143
|
+
else:
|
|
144
|
+
graph.coverage_pct = 0.0
|
|
145
|
+
|
|
124
146
|
return graph
|
|
@@ -24,6 +24,7 @@ def build_project_state_from_data(
|
|
|
24
24
|
notes_map: Optional[dict[str, dict[int, list[dict]]]] = None,
|
|
25
25
|
arrangement_clips: Optional[dict] = None,
|
|
26
26
|
clip_automation: Optional[list[dict]] = None,
|
|
27
|
+
clips_scanned: int = 0,
|
|
27
28
|
analyzer_ok: bool = False,
|
|
28
29
|
flucoma_ok: bool = False,
|
|
29
30
|
plugin_health: Optional[dict[str, Any]] = None,
|
|
@@ -107,6 +108,7 @@ def build_project_state_from_data(
|
|
|
107
108
|
track_infos=track_infos or [],
|
|
108
109
|
sections=section_dicts_for_auto,
|
|
109
110
|
clip_automation=clip_automation or [],
|
|
111
|
+
clips_scanned=clips_scanned,
|
|
110
112
|
)
|
|
111
113
|
state.automation_graph.freshness.mark_fresh(state.revision)
|
|
112
114
|
|
|
@@ -205,16 +205,35 @@ class RoleGraph:
|
|
|
205
205
|
|
|
206
206
|
@dataclass
|
|
207
207
|
class AutomationGraph:
|
|
208
|
-
"""Automation presence and gesture density.
|
|
208
|
+
"""Automation presence and gesture density.
|
|
209
|
+
|
|
210
|
+
``coverage_pct`` is the fraction of scanned clips that have at least
|
|
211
|
+
one automation envelope (0.0–1.0). Introduced in v1.10.9 to close
|
|
212
|
+
BUG-D2's "is this session missing automation?" signal — downstream
|
|
213
|
+
engines (Wonder Mode, Sound Design, etc.) can branch on a low
|
|
214
|
+
coverage value to recommend filter sweeps, volume crescendos, and
|
|
215
|
+
dub-style handoffs that the producer hasn't written yet.
|
|
216
|
+
|
|
217
|
+
``clip_envelope_count`` is the raw total of per-clip envelopes
|
|
218
|
+
discovered; distinguishes "no automation in the project at all"
|
|
219
|
+
(count=0) from "automation exists but is lightly used" (count>0 but
|
|
220
|
+
coverage_pct<0.2).
|
|
221
|
+
"""
|
|
209
222
|
|
|
210
223
|
automated_params: list[dict] = field(default_factory=list)
|
|
211
224
|
density_by_section: dict[str, float] = field(default_factory=dict)
|
|
225
|
+
coverage_pct: float = 0.0
|
|
226
|
+
clip_envelope_count: int = 0
|
|
227
|
+
clips_scanned: int = 0
|
|
212
228
|
freshness: FreshnessInfo = field(default_factory=FreshnessInfo)
|
|
213
229
|
|
|
214
230
|
def to_dict(self) -> dict:
|
|
215
231
|
return {
|
|
216
232
|
"automated_params": list(self.automated_params),
|
|
217
233
|
"density_by_section": dict(self.density_by_section),
|
|
234
|
+
"coverage_pct": round(self.coverage_pct, 3),
|
|
235
|
+
"clip_envelope_count": self.clip_envelope_count,
|
|
236
|
+
"clips_scanned": self.clips_scanned,
|
|
218
237
|
"freshness": self.freshness.to_dict(),
|
|
219
238
|
}
|
|
220
239
|
|
|
@@ -131,11 +131,15 @@ def build_project_brain(ctx: Context) -> dict:
|
|
|
131
131
|
# automation actually lives on each clip (session + arrangement). We
|
|
132
132
|
# walk every clip slot that has a clip and ask get_clip_automation, then
|
|
133
133
|
# aggregate into a flat list keyed by section.
|
|
134
|
+
#
|
|
135
|
+
# clips_scanned is the denominator for coverage_pct (BUG-D2) — it
|
|
136
|
+
# counts how many (track, scene) slots we probed, regardless of
|
|
137
|
+
# whether an envelope came back. Without this, a session with zero
|
|
138
|
+
# automation would be indistinguishable from a session where we
|
|
139
|
+
# failed to probe, which is exactly the ambiguity BUG-D2 flagged.
|
|
134
140
|
clip_automation: list[dict] = []
|
|
141
|
+
clips_scanned = 0
|
|
135
142
|
try:
|
|
136
|
-
# Iterate session scenes x tracks, plus arrangement clips we already have.
|
|
137
|
-
# Use the raw enumerate index for section_id so it stays aligned with
|
|
138
|
-
# arrangement_graph sections (which use the same scheme — see E1 fix).
|
|
139
143
|
for scene_idx, scene in enumerate(scenes or []):
|
|
140
144
|
scene_name = str(scene.get("name", "")).strip()
|
|
141
145
|
if not scene_name:
|
|
@@ -143,6 +147,7 @@ def build_project_brain(ctx: Context) -> dict:
|
|
|
143
147
|
section_id = f"sec_{scene_idx:02d}"
|
|
144
148
|
for track in tracks:
|
|
145
149
|
t_idx = track.get("index", 0)
|
|
150
|
+
clips_scanned += 1
|
|
146
151
|
try:
|
|
147
152
|
auto_resp = ableton.send_command("get_clip_automation", {
|
|
148
153
|
"track_index": t_idx,
|
|
@@ -196,6 +201,7 @@ def build_project_brain(ctx: Context) -> dict:
|
|
|
196
201
|
notes_map=notes_map if notes_map else None,
|
|
197
202
|
arrangement_clips=arrangement_clips if arrangement_clips else None,
|
|
198
203
|
clip_automation=clip_automation if clip_automation else None,
|
|
204
|
+
clips_scanned=clips_scanned,
|
|
199
205
|
analyzer_ok=analyzer_ok,
|
|
200
206
|
flucoma_ok=flucoma_ok,
|
|
201
207
|
session_ok=True,
|
|
@@ -230,6 +236,7 @@ def get_project_brain_summary(ctx: Context) -> dict:
|
|
|
230
236
|
"section_count": len(state.arrangement_graph.sections),
|
|
231
237
|
"role_count": len(state.role_graph.roles),
|
|
232
238
|
"automated_param_count": len(state.automation_graph.automated_params),
|
|
239
|
+
"automation_coverage_pct": round(state.automation_graph.coverage_pct, 3),
|
|
233
240
|
"tempo": state.session_graph.tempo,
|
|
234
241
|
"time_signature": state.session_graph.time_signature,
|
|
235
242
|
"is_stale": state.is_stale(),
|
|
@@ -326,8 +326,22 @@ async def execute_plan_steps_async(
|
|
|
326
326
|
results.append(result)
|
|
327
327
|
|
|
328
328
|
# Record successful step result for future bindings
|
|
329
|
-
if result.ok and step_id
|
|
330
|
-
|
|
329
|
+
if result.ok and step_id:
|
|
330
|
+
if isinstance(result.result, dict):
|
|
331
|
+
step_results[step_id] = result.result
|
|
332
|
+
else:
|
|
333
|
+
# Log but DO NOT silently drop the binding without telling
|
|
334
|
+
# anyone — the previous version let non-dict results slip
|
|
335
|
+
# past, which meant any downstream {"$from_step": step_id}
|
|
336
|
+
# reference blew up with a confusing "step_id not found"
|
|
337
|
+
# instead of the real "result wasn't a dict" cause.
|
|
338
|
+
import logging as _logging
|
|
339
|
+
_logging.getLogger(__name__).warning(
|
|
340
|
+
"step_results: dropping non-dict result for "
|
|
341
|
+
"step_id=%s tool=%s type=%s. Any $from_step refs to "
|
|
342
|
+
"this step_id will fail with 'step_id not found'.",
|
|
343
|
+
step_id, tool, type(result.result).__name__,
|
|
344
|
+
)
|
|
331
345
|
|
|
332
346
|
if not result.ok and stop_on_failure:
|
|
333
347
|
break
|
|
@@ -82,6 +82,12 @@ BRIDGE_COMMANDS: frozenset[str] = frozenset({
|
|
|
82
82
|
"remove_warp_marker", "capture_audio", "capture_stop",
|
|
83
83
|
"check_flucoma", "scrub_clip", "stop_scrub", "get_display_values",
|
|
84
84
|
"get_plugin_params", "map_plugin_param", "get_plugin_presets",
|
|
85
|
+
# Deep-LOM writes that the Python Remote Script cannot reach (live on
|
|
86
|
+
# the sample child object or require device-selection semantics that
|
|
87
|
+
# only Max JS LiveAPI exposes). See mcp_server/tools/analyzer.py for
|
|
88
|
+
# the matching MCP tools that route through bridge.send_command.
|
|
89
|
+
"simpler_set_warp",
|
|
90
|
+
"compressor_set_sidechain",
|
|
85
91
|
# NOTE: load_sample_to_simpler used to live here, but it's actually an
|
|
86
92
|
# async Python MCP tool in mcp_server/tools/analyzer.py, not a bridge
|
|
87
93
|
# command. It has no case in livepilot_bridge.js and no @register handler
|
|
@@ -139,11 +139,30 @@ class SampleFitReport:
|
|
|
139
139
|
|
|
140
140
|
@property
|
|
141
141
|
def overall_score(self) -> float:
|
|
142
|
+
"""Average over AVAILABLE critics only.
|
|
143
|
+
|
|
144
|
+
BUG-B38 reshaped frequency_fit to report ``-1.0`` with
|
|
145
|
+
``available=False`` when no mix snapshot is present. The previous
|
|
146
|
+
aggregator mean-folded that sentinel into the overall score,
|
|
147
|
+
dropping it by ~17 points (one critic out of six). The fix is to
|
|
148
|
+
respect the ``available`` flag — same contract every other caller
|
|
149
|
+
uses.
|
|
150
|
+
"""
|
|
142
151
|
if not self.critics:
|
|
143
152
|
return 0.0
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
153
|
+
available_scores = []
|
|
154
|
+
for c in self.critics.values():
|
|
155
|
+
if isinstance(c, CriticResult):
|
|
156
|
+
if c.available is False:
|
|
157
|
+
continue
|
|
158
|
+
available_scores.append(c.score)
|
|
159
|
+
else: # legacy dict shape
|
|
160
|
+
if c.get("available") is False:
|
|
161
|
+
continue
|
|
162
|
+
available_scores.append(c.get("score", 0))
|
|
163
|
+
if not available_scores:
|
|
164
|
+
return 0.0
|
|
165
|
+
return sum(available_scores) / len(available_scores)
|
|
147
166
|
|
|
148
167
|
def to_dict(self) -> dict:
|
|
149
168
|
return {
|
|
@@ -24,14 +24,22 @@ class CompiledStep:
|
|
|
24
24
|
params: dict # Concrete params, e.g. {"track_index": 0, "volume": 0.72}
|
|
25
25
|
description: str # Human-readable, e.g. "Push Drums from 0.65 → 0.72"
|
|
26
26
|
verify_after: bool = True # Whether to check meters after this step
|
|
27
|
+
# Optional explicit backend. If set, the execution router uses it verbatim
|
|
28
|
+
# and skips classify_step(). Leave None to let the router auto-classify at
|
|
29
|
+
# dispatch time — safe because test_move_annotations enforces every
|
|
30
|
+
# registered move's steps map to a known backend.
|
|
31
|
+
backend: Optional[str] = None
|
|
27
32
|
|
|
28
33
|
def to_dict(self) -> dict:
|
|
29
|
-
|
|
34
|
+
d = {
|
|
30
35
|
"tool": self.tool,
|
|
31
36
|
"params": self.params,
|
|
32
37
|
"description": self.description,
|
|
33
38
|
"verify_after": self.verify_after,
|
|
34
39
|
}
|
|
40
|
+
if self.backend:
|
|
41
|
+
d["backend"] = self.backend
|
|
42
|
+
return d
|
|
35
43
|
|
|
36
44
|
|
|
37
45
|
@dataclass
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
"""Family compiler for device-creation semantic moves.
|
|
2
|
+
|
|
3
|
+
Device-creation moves generate custom M4L devices via the Device Forge
|
|
4
|
+
(``generate_m4l_effect``). Unlike mix/sound-design moves — where the
|
|
5
|
+
compiler inspects the kernel's track topology — device-creation moves
|
|
6
|
+
are parametric: the plan_template already contains the tool call and
|
|
7
|
+
concrete arguments.
|
|
8
|
+
|
|
9
|
+
We therefore use a single family-level compiler that just maps
|
|
10
|
+
``plan_template`` → ``CompiledStep`` objects. This keeps the registry
|
|
11
|
+
honest (every move is either compilable or analytical_only) without
|
|
12
|
+
duplicating templates into per-move compilers.
|
|
13
|
+
"""
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from .compiler import CompiledPlan, CompiledStep, register_family_compiler
|
|
17
|
+
from .models import SemanticMove
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _compile_device_creation(move: SemanticMove, kernel: dict) -> CompiledPlan:
|
|
21
|
+
"""Map plan_template steps straight to CompiledStep.
|
|
22
|
+
|
|
23
|
+
plan_template is trusted for this family: each step already has
|
|
24
|
+
``tool``, ``params``, ``description``, and ``backend`` annotated.
|
|
25
|
+
"""
|
|
26
|
+
steps: list[CompiledStep] = []
|
|
27
|
+
for step in move.plan_template:
|
|
28
|
+
steps.append(CompiledStep(
|
|
29
|
+
tool=step.get("tool", ""),
|
|
30
|
+
params=step.get("params", {}),
|
|
31
|
+
description=step.get("description", ""),
|
|
32
|
+
verify_after=bool(step.get("verify_after", True)),
|
|
33
|
+
backend=step.get("backend"),
|
|
34
|
+
))
|
|
35
|
+
|
|
36
|
+
return CompiledPlan(
|
|
37
|
+
move_id=move.move_id,
|
|
38
|
+
intent=move.intent,
|
|
39
|
+
steps=steps,
|
|
40
|
+
risk_level=move.risk_level,
|
|
41
|
+
summary=move.intent,
|
|
42
|
+
requires_approval=(kernel.get("mode", "improve") != "explore"),
|
|
43
|
+
warnings=[],
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
register_family_compiler("device_creation", _compile_device_creation)
|
|
@@ -282,6 +282,173 @@ def _compile_reduce_repetition(move: SemanticMove, kernel: dict) -> CompiledPlan
|
|
|
282
282
|
)
|
|
283
283
|
|
|
284
284
|
|
|
285
|
+
def _compile_make_kick_bass_lock(move: SemanticMove, kernel: dict) -> CompiledPlan:
|
|
286
|
+
"""Compile 'make_kick_bass_lock': carve space between kick and bass.
|
|
287
|
+
|
|
288
|
+
Strategy: reduce bass level slightly (clears sub for kick), verify both
|
|
289
|
+
tracks remain active. Sidechain compressor insertion is left as a future
|
|
290
|
+
step — it requires device selection + parameter mapping that varies too
|
|
291
|
+
much across projects to hardcode safely.
|
|
292
|
+
"""
|
|
293
|
+
steps: list[CompiledStep] = []
|
|
294
|
+
warnings: list[str] = []
|
|
295
|
+
descriptions: list[str] = []
|
|
296
|
+
|
|
297
|
+
bass_tracks = resolvers.find_tracks_by_role(kernel, ["bass"])
|
|
298
|
+
kick_tracks = resolvers.find_tracks_by_role(kernel, ["drums", "percussion"])
|
|
299
|
+
|
|
300
|
+
if not bass_tracks:
|
|
301
|
+
warnings.append("No bass track found — cannot lock kick and bass")
|
|
302
|
+
if not kick_tracks:
|
|
303
|
+
warnings.append("No kick/drum track found — reference track missing")
|
|
304
|
+
|
|
305
|
+
steps.append(CompiledStep(
|
|
306
|
+
tool="get_master_spectrum",
|
|
307
|
+
params={},
|
|
308
|
+
description="Read current sub/low balance before carving",
|
|
309
|
+
verify_after=False,
|
|
310
|
+
))
|
|
311
|
+
|
|
312
|
+
if bass_tracks:
|
|
313
|
+
bass = bass_tracks[0]
|
|
314
|
+
idx = bass["index"]
|
|
315
|
+
steps.append(CompiledStep(
|
|
316
|
+
tool="set_track_volume",
|
|
317
|
+
params={"track_index": idx, "volume": 0.60},
|
|
318
|
+
description=f"Pull {bass['name']} to 0.60 to clear sub for kick",
|
|
319
|
+
))
|
|
320
|
+
descriptions.append(f"Pull {bass['name']} to 0.60")
|
|
321
|
+
|
|
322
|
+
steps.append(CompiledStep(
|
|
323
|
+
tool="get_track_meters",
|
|
324
|
+
params={"include_stereo": True},
|
|
325
|
+
description="Verify kick and bass both still producing audio",
|
|
326
|
+
))
|
|
327
|
+
|
|
328
|
+
return CompiledPlan(
|
|
329
|
+
move_id=move.move_id,
|
|
330
|
+
intent=move.intent,
|
|
331
|
+
steps=steps,
|
|
332
|
+
before_reads=[{"tool": "get_master_spectrum", "params": {}}],
|
|
333
|
+
after_reads=[
|
|
334
|
+
{"tool": "get_master_spectrum", "params": {}},
|
|
335
|
+
{"tool": "get_track_meters", "params": {"include_stereo": True}},
|
|
336
|
+
],
|
|
337
|
+
risk_level="low",
|
|
338
|
+
summary="; ".join(descriptions) if descriptions else "No kick/bass changes compiled",
|
|
339
|
+
requires_approval=(kernel.get("mode", "improve") != "explore"),
|
|
340
|
+
warnings=warnings,
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
def _compile_create_buildup_tension(move: SemanticMove, kernel: dict) -> CompiledPlan:
|
|
345
|
+
"""Compile 'create_buildup_tension': pull harmony back, raise perc energy.
|
|
346
|
+
|
|
347
|
+
We apply volume moves as the minimal, reversible tension-builder. Filter
|
|
348
|
+
rises and send ramps belong in an automation recipe — we issue a tension
|
|
349
|
+
gesture template step if the gesture engine is available, otherwise fall
|
|
350
|
+
back to direct volume changes only.
|
|
351
|
+
"""
|
|
352
|
+
steps: list[CompiledStep] = []
|
|
353
|
+
warnings: list[str] = []
|
|
354
|
+
descriptions: list[str] = []
|
|
355
|
+
|
|
356
|
+
perc_tracks = resolvers.find_tracks_by_role(kernel, ["drums", "percussion"])
|
|
357
|
+
harmony_tracks = resolvers.find_tracks_by_role(kernel, ["chords", "pad"])
|
|
358
|
+
|
|
359
|
+
if not perc_tracks and not harmony_tracks:
|
|
360
|
+
warnings.append("No percussion or harmony tracks found — cannot build tension")
|
|
361
|
+
|
|
362
|
+
# Raise perc for energy
|
|
363
|
+
for pt in perc_tracks[:1]:
|
|
364
|
+
steps.append(CompiledStep(
|
|
365
|
+
tool="set_track_volume",
|
|
366
|
+
params={"track_index": pt["index"], "volume": 0.78},
|
|
367
|
+
description=f"Push {pt['name']} to 0.78 for rising energy",
|
|
368
|
+
))
|
|
369
|
+
descriptions.append(f"Push {pt['name']} to 0.78")
|
|
370
|
+
|
|
371
|
+
# Pull harmony slightly to amplify perc contrast
|
|
372
|
+
for ht in harmony_tracks[:1]:
|
|
373
|
+
steps.append(CompiledStep(
|
|
374
|
+
tool="set_track_volume",
|
|
375
|
+
params={"track_index": ht["index"], "volume": 0.35},
|
|
376
|
+
description=f"Pull {ht['name']} to 0.35 to create harmonic vacuum before drop",
|
|
377
|
+
))
|
|
378
|
+
descriptions.append(f"Pull {ht['name']} to 0.35")
|
|
379
|
+
|
|
380
|
+
steps.append(CompiledStep(
|
|
381
|
+
tool="get_track_meters",
|
|
382
|
+
params={"include_stereo": True},
|
|
383
|
+
description="Verify tension steps did not silence any track",
|
|
384
|
+
))
|
|
385
|
+
|
|
386
|
+
return CompiledPlan(
|
|
387
|
+
move_id=move.move_id,
|
|
388
|
+
intent=move.intent,
|
|
389
|
+
steps=steps,
|
|
390
|
+
before_reads=[{"tool": "get_emotional_arc", "params": {}}],
|
|
391
|
+
after_reads=[
|
|
392
|
+
{"tool": "get_emotional_arc", "params": {}},
|
|
393
|
+
{"tool": "get_track_meters", "params": {"include_stereo": True}},
|
|
394
|
+
],
|
|
395
|
+
risk_level="medium",
|
|
396
|
+
summary="; ".join(descriptions) if descriptions else "No tracks to ratchet",
|
|
397
|
+
requires_approval=(kernel.get("mode", "improve") != "explore"),
|
|
398
|
+
warnings=warnings,
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
|
|
402
|
+
def _compile_smooth_scene_handoff(move: SemanticMove, kernel: dict) -> CompiledPlan:
|
|
403
|
+
"""Compile 'smooth_scene_handoff': reduce master volume briefly around the handoff.
|
|
404
|
+
|
|
405
|
+
Without knowing which two scenes are involved, the compiler can only do a
|
|
406
|
+
conservative energy dip using master volume. A future version should take
|
|
407
|
+
scene indices via kernel.intent_context and apply targeted crossfades.
|
|
408
|
+
"""
|
|
409
|
+
steps: list[CompiledStep] = []
|
|
410
|
+
warnings: list[str] = []
|
|
411
|
+
descriptions: list[str] = []
|
|
412
|
+
|
|
413
|
+
# Minimal approach — gentle master dip the agent can reverse easily.
|
|
414
|
+
steps.append(CompiledStep(
|
|
415
|
+
tool="get_master_meters",
|
|
416
|
+
params={},
|
|
417
|
+
description="Record current master level for handoff reference",
|
|
418
|
+
verify_after=False,
|
|
419
|
+
))
|
|
420
|
+
|
|
421
|
+
steps.append(CompiledStep(
|
|
422
|
+
tool="set_master_volume",
|
|
423
|
+
params={"volume": 0.78},
|
|
424
|
+
description="Gentle master dip for transition",
|
|
425
|
+
))
|
|
426
|
+
descriptions.append("Master dip to 0.78")
|
|
427
|
+
|
|
428
|
+
steps.append(CompiledStep(
|
|
429
|
+
tool="get_master_meters",
|
|
430
|
+
params={},
|
|
431
|
+
description="Verify master dip applied without clipping",
|
|
432
|
+
))
|
|
433
|
+
|
|
434
|
+
warnings.append(
|
|
435
|
+
"Scene-aware handoff (from_scene/to_scene) not yet compiled — "
|
|
436
|
+
"this is a conservative energy-dip fallback"
|
|
437
|
+
)
|
|
438
|
+
|
|
439
|
+
return CompiledPlan(
|
|
440
|
+
move_id=move.move_id,
|
|
441
|
+
intent=move.intent,
|
|
442
|
+
steps=steps,
|
|
443
|
+
before_reads=[{"tool": "get_emotional_arc", "params": {}}],
|
|
444
|
+
after_reads=[{"tool": "get_emotional_arc", "params": {}}],
|
|
445
|
+
risk_level="low",
|
|
446
|
+
summary="; ".join(descriptions),
|
|
447
|
+
requires_approval=(kernel.get("mode", "improve") != "explore"),
|
|
448
|
+
warnings=warnings,
|
|
449
|
+
)
|
|
450
|
+
|
|
451
|
+
|
|
285
452
|
# ── Register all compilers ──────────────────────────────────────────────────
|
|
286
453
|
|
|
287
454
|
register_compiler("make_punchier", _compile_make_punchier)
|
|
@@ -289,3 +456,6 @@ register_compiler("tighten_low_end", _compile_tighten_low_end)
|
|
|
289
456
|
register_compiler("widen_stereo", _compile_widen_stereo)
|
|
290
457
|
register_compiler("darken_without_losing_width", _compile_darken_mix)
|
|
291
458
|
register_compiler("reduce_repetition_fatigue", _compile_reduce_repetition)
|
|
459
|
+
register_compiler("make_kick_bass_lock", _compile_make_kick_bass_lock)
|
|
460
|
+
register_compiler("create_buildup_tension", _compile_create_buildup_tension)
|
|
461
|
+
register_compiler("smooth_scene_handoff", _compile_smooth_scene_handoff)
|
|
@@ -92,7 +92,7 @@ REDUCE_REPETITION = SemanticMove(
|
|
|
92
92
|
],
|
|
93
93
|
verification_plan=[
|
|
94
94
|
{"tool": "get_track_meters", "check": "all tracks still producing audio", "backend": "remote_command"},
|
|
95
|
-
{"tool": "capture_audio", "check": "LRA > 2 LU (dynamic range should increase)", "backend": "
|
|
95
|
+
{"tool": "capture_audio", "check": "LRA > 2 LU (dynamic range should increase)", "backend": "bridge_command"},
|
|
96
96
|
],
|
|
97
97
|
)
|
|
98
98
|
|
|
@@ -24,6 +24,10 @@ class SemanticMove:
|
|
|
24
24
|
plan_template: list = field(default_factory=list) # [{tool, params, description}] — static metadata, NOT runtime truth
|
|
25
25
|
verification_plan: list = field(default_factory=list) # [{tool, check}]
|
|
26
26
|
confidence: float = 0.7
|
|
27
|
+
# analytical_only: move is intentionally metadata-only — no compiler is
|
|
28
|
+
# expected. Surfaces in discovery/wonder_mode but never executes. Set this
|
|
29
|
+
# to True for moves that are deliberate "hints" rather than orphan-by-bug.
|
|
30
|
+
analytical_only: bool = False
|
|
27
31
|
|
|
28
32
|
def to_dict(self) -> dict:
|
|
29
33
|
return {
|
|
@@ -36,6 +40,7 @@ class SemanticMove:
|
|
|
36
40
|
"required_capabilities": self.required_capabilities,
|
|
37
41
|
"plan_template_steps": len(self.plan_template),
|
|
38
42
|
"confidence": self.confidence,
|
|
43
|
+
"analytical_only": self.analytical_only,
|
|
39
44
|
}
|
|
40
45
|
|
|
41
46
|
def to_full_dict(self) -> dict:
|