livepilot 1.10.9 → 1.12.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/CHANGELOG.md +245 -0
  2. package/README.md +7 -7
  3. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  4. package/m4l_device/livepilot_bridge.js +1 -1
  5. package/mcp_server/__init__.py +1 -1
  6. package/mcp_server/m4l_bridge.py +488 -13
  7. package/mcp_server/runtime/execution_router.py +7 -0
  8. package/mcp_server/runtime/mcp_dispatch.py +32 -0
  9. package/mcp_server/runtime/remote_commands.py +54 -0
  10. package/mcp_server/sample_engine/slice_classifier.py +169 -0
  11. package/mcp_server/server.py +11 -3
  12. package/mcp_server/tools/analyzer.py +187 -7
  13. package/mcp_server/tools/clips.py +65 -0
  14. package/mcp_server/tools/devices.py +517 -5
  15. package/mcp_server/tools/diagnostics.py +42 -0
  16. package/mcp_server/tools/follow_actions.py +202 -0
  17. package/mcp_server/tools/grooves.py +142 -0
  18. package/mcp_server/tools/miditool.py +280 -0
  19. package/mcp_server/tools/scales.py +126 -0
  20. package/mcp_server/tools/take_lanes.py +135 -0
  21. package/mcp_server/tools/tracks.py +46 -3
  22. package/mcp_server/tools/transport.py +62 -1
  23. package/package.json +2 -2
  24. package/remote_script/LivePilot/__init__.py +8 -4
  25. package/remote_script/LivePilot/clips.py +62 -0
  26. package/remote_script/LivePilot/devices.py +444 -0
  27. package/remote_script/LivePilot/diagnostics.py +52 -1
  28. package/remote_script/LivePilot/follow_actions.py +235 -0
  29. package/remote_script/LivePilot/grooves.py +185 -0
  30. package/remote_script/LivePilot/scales.py +138 -0
  31. package/remote_script/LivePilot/take_lanes.py +175 -0
  32. package/remote_script/LivePilot/tracks.py +59 -1
  33. package/remote_script/LivePilot/transport.py +90 -1
  34. package/remote_script/LivePilot/version_detect.py +9 -0
  35. package/server.json +3 -3
@@ -48,6 +48,21 @@ REMOTE_COMMANDS: frozenset[str] = frozenset({
48
48
  "insert_device", # 12.3+ native device insertion
49
49
  "insert_rack_chain", # 12.3+ rack chain insertion
50
50
  "set_drum_chain_note", # 12.3+ drum chain note assignment
51
+ # rack variations + macro CRUD (Live 11+)
52
+ "get_rack_variations", "store_rack_variation",
53
+ "recall_rack_variation", "delete_rack_variation",
54
+ "randomize_rack_macros", "add_rack_macro",
55
+ "remove_rack_macro", "set_rack_visible_macros",
56
+ # simpler slice CRUD (Live 11+)
57
+ "insert_simpler_slice", "move_simpler_slice",
58
+ "remove_simpler_slice", "clear_simpler_slices",
59
+ "reset_simpler_slices", "import_slices_from_onsets",
60
+ # wavetable modulation matrix (Live 11+)
61
+ "get_wavetable_mod_targets", "add_wavetable_mod_route",
62
+ "set_wavetable_mod_amount", "get_wavetable_mod_amount",
63
+ "get_wavetable_mod_matrix",
64
+ # device A/B compare (Live 12.3+)
65
+ "get_device_ab_state", "toggle_device_ab", "copy_device_state",
51
66
  # clip_automation (3)
52
67
  "get_clip_automation", "set_clip_automation", "clear_clip_automation",
53
68
  # browser (6)
@@ -65,8 +80,39 @@ REMOTE_COMMANDS: frozenset[str] = frozenset({
65
80
  "capture_midi", "start_recording", "stop_recording",
66
81
  "get_cue_points", "jump_to_cue", "toggle_cue_point",
67
82
  "back_to_arranger", "force_arrangement",
83
+ # scales — Song + per-clip scale awareness (Live 12.0+)
84
+ "get_song_scale", "set_song_scale", "set_song_scale_mode",
85
+ "list_available_scales",
86
+ "get_clip_scale", "set_clip_scale", "set_clip_scale_mode",
87
+ # tuning system (Live 12.1+)
88
+ "get_tuning_system", "set_tuning_reference_pitch",
89
+ "set_tuning_note", "reset_tuning_system",
90
+ # follow actions — clip (Live 12.0 revamp) + scene (Live 12.2+)
91
+ "get_clip_follow_action", "set_clip_follow_action",
92
+ "clear_clip_follow_action", "list_follow_action_types",
93
+ "apply_follow_action_preset",
94
+ "get_scene_follow_action", "set_scene_follow_action",
95
+ "clear_scene_follow_action",
96
+ # groove pool (Live 11+)
97
+ "list_grooves", "get_groove_info", "set_groove_params",
98
+ "assign_clip_groove", "get_clip_groove",
99
+ "get_song_groove_amount", "set_song_groove_amount",
100
+ # take lanes (Live 12.0 UI / 12.2 API)
101
+ "get_take_lanes", "create_take_lane", "set_take_lane_name",
102
+ "create_audio_clip_on_take_lane", "create_midi_clip_on_take_lane",
103
+ "get_take_lane_clips",
68
104
  # diagnostics (1)
69
105
  "get_session_diagnostics",
106
+ # control surfaces (diagnostic)
107
+ "list_control_surfaces", "get_control_surface_info",
108
+ # song primitives — transport/link
109
+ "tap_tempo", "nudge_tempo",
110
+ "set_exclusive_arm", "set_exclusive_solo",
111
+ "capture_and_insert_scene", "set_count_in_duration",
112
+ "get_link_state", "set_link_enabled", "force_link_beat_time",
113
+ # track primitives
114
+ "jump_in_session_clip", "get_track_performance_impact",
115
+ "get_appointed_device",
70
116
  # ping (built-in)
71
117
  "ping",
72
118
  })
@@ -92,6 +138,14 @@ BRIDGE_COMMANDS: frozenset[str] = frozenset({
92
138
  # async Python MCP tool in mcp_server/tools/analyzer.py, not a bridge
93
139
  # command. It has no case in livepilot_bridge.js and no @register handler
94
140
  # in remote_script. See mcp_server/runtime/execution_router.MCP_TOOLS.
141
+ # NOTE: MIDI Tool bridge commands (Live 12.0+ MIDI Generators /
142
+ # Transformations, requires LivePilot_MIDITool.amxd) do NOT belong in
143
+ # this set. They ride OSC prefixes /miditool/request, /miditool/ready
144
+ # (bridge→server) and miditool/config, miditool/response (server→bridge),
145
+ # dispatched through m4l_device/miditool_bridge.js (a separate JS, not
146
+ # livepilot_bridge.js) and pushed directly via M4LBridge.send_miditool_*
147
+ # helpers rather than through send_command. BRIDGE_COMMANDS is reserved
148
+ # for send_command targets that dispatch inside livepilot_bridge.js.
95
149
  })
96
150
 
97
151
  # Combined: all valid send_command targets
@@ -0,0 +1,169 @@
1
+ """Spectral classification of Simpler-slice drum break segments.
2
+
3
+ Lessons from the 2026-04-18 creative session that drove this module:
4
+
5
+ - Never assume slice 0 is a kick. Slice content depends on transient
6
+ detection order in the source audio, not drum-rack convention.
7
+ - Snares ALWAYS have 20-35% mid-frequency energy (drum body resonance).
8
+ Hi-hats / cymbals have <25% mid (they're thin metal discs with no
9
+ resonant body). This mid-content threshold is the critical separator
10
+ that tripped up the first classification pass.
11
+ - Ghosts are defined by low peak amplitude (<0.35) regardless of spectrum.
12
+
13
+ Thresholds validated against "Break Ghosts 90 bpm" (Ableton Core Library)
14
+ in the session. See ``feedback_analyze_slices_before_programming`` memory
15
+ entry for the full story and the specific slice map for that sample.
16
+
17
+ This module is pure Python (numpy only). No LivePilot / Ableton /
18
+ FastMCP dependencies — testable and runnable in isolation.
19
+ """
20
+ from __future__ import annotations
21
+
22
+ from typing import List, Literal, Sequence, TypedDict
23
+
24
+ import numpy as np
25
+
26
+
27
+ Label = Literal["KICK", "SNARE", "HAT", "ghost"]
28
+
29
+
30
+ class SliceFeatures(TypedDict):
31
+ """Per-slice classification result plus the features that drove the decision."""
32
+
33
+ index: int
34
+ label: Label
35
+ peak: float
36
+ rms: float
37
+ sub_pct: float
38
+ low_pct: float
39
+ mid_pct: float
40
+ high_pct: float
41
+
42
+
43
+ # ---------------------------------------------------------------------------
44
+ # Band boundaries (Hz). Tuned against the 2026-04-18 session's working set.
45
+ # ---------------------------------------------------------------------------
46
+
47
+ _SUB_MAX = 100.0 # sub-bass (kick fundamentals live here)
48
+ _LOW_MAX = 300.0 # body / low-mid (kick "thud" + bass fundamentals)
49
+ _MID_MAX = 3000.0 # presence / drum body / vocals
50
+ # Everything above _MID_MAX is "high" (sizzle / air / cymbal).
51
+
52
+
53
+ # ---------------------------------------------------------------------------
54
+ # Classification thresholds. DO NOT loosen these without re-validating on a
55
+ # real break — the 2026-04-18 session proved that relaxing the HAT mid-cap
56
+ # to 32% misclassifies loud snares as hats.
57
+ # ---------------------------------------------------------------------------
58
+
59
+ _GHOST_PEAK = 0.35 # Below this peak → ghost regardless of spectrum
60
+ _KICK_SUB_LOW_MIN = 0.45 # sub + low must be this dominant
61
+ _KICK_HIGH_MAX = 0.40 # kicks never have >40% high
62
+ _SNARE_MID_MIN = 0.25 # snares HAVE a drum body (mid content)
63
+ _SNARE_HIGH_MIN = 0.40 # + sizzle
64
+ _SNARE_PEAK_MIN = 0.60 # + loud enough to not be a ghost
65
+ _HAT_HIGH_MIN = 0.70 # hats are thin metal — overwhelmingly high
66
+ _HAT_MID_MAX = 0.25 # hats have almost no drum body
67
+
68
+
69
+ def _band_energy(segment: np.ndarray, sr: int) -> tuple[float, float, float, float]:
70
+ """Return (sub, low, mid, high) energy fractions that sum to ~1.0.
71
+
72
+ Uses an rFFT. If the segment is empty or silent, returns equal quarters
73
+ so downstream logic doesn't have to handle zero-sum edge cases (the
74
+ caller still sees silence via the peak/rms fields).
75
+ """
76
+ if len(segment) < 2:
77
+ return 0.25, 0.25, 0.25, 0.25
78
+ spec = np.abs(np.fft.rfft(segment))
79
+ total = float(spec.sum())
80
+ if total <= 0:
81
+ return 0.25, 0.25, 0.25, 0.25
82
+ freqs = np.fft.rfftfreq(len(segment), 1.0 / sr)
83
+ sub = float(spec[freqs < _SUB_MAX].sum() / total)
84
+ low = float(spec[(freqs >= _SUB_MAX) & (freqs < _LOW_MAX)].sum() / total)
85
+ mid = float(spec[(freqs >= _LOW_MAX) & (freqs < _MID_MAX)].sum() / total)
86
+ high = float(spec[freqs >= _MID_MAX].sum() / total)
87
+ return sub, low, mid, high
88
+
89
+
90
+ def classify_segment(segment: np.ndarray, sr: int) -> Label:
91
+ """Classify a single audio segment as KICK / SNARE / HAT / ghost.
92
+
93
+ Returns the label string. See module docstring for the reasoning behind
94
+ each threshold.
95
+ """
96
+ if len(segment) < 2:
97
+ return "ghost"
98
+ peak = float(np.max(np.abs(segment)))
99
+ if peak < _GHOST_PEAK:
100
+ return "ghost"
101
+
102
+ sub, low, mid, high = _band_energy(segment, sr)
103
+
104
+ # KICK: sub+low dominance with limited high content.
105
+ if (sub + low) >= _KICK_SUB_LOW_MIN and high < _KICK_HIGH_MAX:
106
+ return "KICK"
107
+
108
+ # HAT: overwhelmingly high-freq, almost no drum-body mid content.
109
+ if high >= _HAT_HIGH_MIN and mid <= _HAT_MID_MAX:
110
+ return "HAT"
111
+
112
+ # SNARE: broadband (mid body + high sizzle) AND loud.
113
+ if (
114
+ mid >= _SNARE_MID_MIN
115
+ and high >= _SNARE_HIGH_MIN
116
+ and peak >= _SNARE_PEAK_MIN
117
+ ):
118
+ return "SNARE"
119
+
120
+ # Fallback for ambiguous mid/high dominant loud hits — usually
121
+ # snares with unusual spectrum (e.g., rim-shots, piccolo snare).
122
+ if peak >= _SNARE_PEAK_MIN and (mid + high) >= 0.70:
123
+ return "SNARE"
124
+
125
+ # If nothing else matched but there's real energy, call it a hat
126
+ # rather than a ghost (ghost is reserved for quiet hits).
127
+ return "HAT"
128
+
129
+
130
+ def classify_slices(
131
+ audio: np.ndarray,
132
+ sr: int,
133
+ frame_boundaries: Sequence[int],
134
+ ) -> List[SliceFeatures]:
135
+ """Classify every slice defined by ``frame_boundaries``.
136
+
137
+ ``frame_boundaries`` is a list of N+1 frame positions defining N slices.
138
+ For a sample with slices starting at frames [0, 1000, 3000, 5000] and
139
+ total length 10000 frames, pass [0, 1000, 3000, 5000, 10000].
140
+
141
+ Stereo input is auto-downmixed (mean of the two channels).
142
+
143
+ Returns a list of ``SliceFeatures`` in slice-index order.
144
+ """
145
+ if audio.ndim > 1:
146
+ audio = audio.mean(axis=1)
147
+
148
+ results: List[SliceFeatures] = []
149
+ for i in range(len(frame_boundaries) - 1):
150
+ start = int(frame_boundaries[i])
151
+ end = int(frame_boundaries[i + 1])
152
+ segment = audio[start:end]
153
+ label = classify_segment(segment, sr)
154
+ peak = float(np.max(np.abs(segment))) if len(segment) else 0.0
155
+ rms = float(np.sqrt(np.mean(segment ** 2))) if len(segment) else 0.0
156
+ sub, low, mid, high = _band_energy(segment, sr)
157
+ results.append(
158
+ SliceFeatures(
159
+ index=i,
160
+ label=label,
161
+ peak=peak,
162
+ rms=rms,
163
+ sub_pct=sub,
164
+ low_pct=low,
165
+ mid_pct=mid,
166
+ high_pct=high,
167
+ )
168
+ )
169
+ return results
@@ -9,7 +9,7 @@ import subprocess
9
9
  from fastmcp import FastMCP, Context # noqa: F401
10
10
 
11
11
  from .connection import AbletonConnection
12
- from .m4l_bridge import SpectralCache, SpectralReceiver, M4LBridge
12
+ from .m4l_bridge import SpectralCache, SpectralReceiver, M4LBridge, MidiToolCache
13
13
 
14
14
  # Logger must be defined before any function uses it — several module-level
15
15
  # helpers below (e.g. _master_has_livepilot_analyzer) call logger.debug on
@@ -174,8 +174,9 @@ async def lifespan(server):
174
174
 
175
175
  ableton = AbletonConnection()
176
176
  spectral = SpectralCache()
177
- receiver = SpectralReceiver(spectral)
178
- m4l = M4LBridge(spectral, receiver)
177
+ miditool = MidiToolCache()
178
+ receiver = SpectralReceiver(spectral, miditool_cache=miditool)
179
+ m4l = M4LBridge(spectral, receiver, miditool_cache=miditool)
179
180
  mcp_dispatch = build_mcp_dispatch_registry()
180
181
 
181
182
  # Splice gRPC client — graceful degradation if Splice desktop isn't
@@ -234,6 +235,7 @@ async def lifespan(server):
234
235
  yield {
235
236
  "ableton": ableton,
236
237
  "spectral": spectral,
238
+ "miditool": miditool,
237
239
  "m4l": m4l,
238
240
  "_bridge_state": bridge_state,
239
241
  "mcp_dispatch": mcp_dispatch,
@@ -257,6 +259,10 @@ from .tools import clips # noqa: F401, E402
257
259
  from .tools import notes # noqa: F401, E402
258
260
  from .tools import devices # noqa: F401, E402
259
261
  from .tools import scenes # noqa: F401, E402
262
+ from .tools import scales # noqa: F401, E402
263
+ from .tools import follow_actions # noqa: F401, E402
264
+ from .tools import grooves # noqa: F401, E402
265
+ from .tools import take_lanes # noqa: F401, E402
260
266
  from .tools import mixing # noqa: F401, E402
261
267
  from .tools import browser # noqa: F401, E402
262
268
  from .tools import arrangement # noqa: F401, E402
@@ -299,6 +305,8 @@ from .device_forge import tools as device_forge_tools # noqa: F401, E40
299
305
  from .sample_engine import tools as sample_engine_tools # noqa: F401, E402
300
306
  from .atlas import tools as atlas_tools # noqa: F401, E402
301
307
  from .composer import tools as composer_tools # noqa: F401, E402
308
+ from .tools import diagnostics # noqa: F401, E402
309
+ from .tools import miditool # noqa: F401, E402
302
310
 
303
311
  # ---------------------------------------------------------------------------
304
312
  # Schema coercion patch — accept strings for numeric parameters
@@ -34,6 +34,36 @@ logger = logging.getLogger(__name__)
34
34
  CAPTURE_DIR = os.path.expanduser("~/Documents/LivePilot/captures")
35
35
 
36
36
 
37
+ # Live 12 Simpler Slice mode maps slice N to MIDI pitch 36+N (C1 base).
38
+ # This is NOT exposed by the Remote Script API and is a common source of
39
+ # silent audio bugs (BUG-F2). See feedback_analyze_slices_before_programming
40
+ # memory for context.
41
+ SIMPLER_SLICE_BASE_PITCH = 36
42
+
43
+
44
+ def _enrich_slice_response(response: Optional[dict]) -> Optional[dict]:
45
+ """Add base_midi_pitch field + per-slice midi_pitch to bridge response (BUG-F2).
46
+
47
+ The Remote Script returns slice indices only. Users then have to know
48
+ that slice N plays at MIDI pitch 36+N — a fact that's undocumented in
49
+ both Ableton's and LivePilot's public API. This enrichment makes the
50
+ mapping explicit so MIDI pattern generation doesn't silently produce
51
+ out-of-range notes.
52
+ """
53
+ if response is None:
54
+ return None
55
+ enriched = dict(response)
56
+ enriched["base_midi_pitch"] = SIMPLER_SLICE_BASE_PITCH
57
+ slices = enriched.get("slices") or []
58
+ # BUG-audit-M2: fall back to positional index when the bridge response
59
+ # omits the `index` field (protects against bridge version skew).
60
+ enriched["slices"] = [
61
+ {**s, "midi_pitch": SIMPLER_SLICE_BASE_PITCH + s.get("index", i)}
62
+ for i, s in enumerate(slices)
63
+ ]
64
+ return enriched
65
+
66
+
37
67
  @mcp.tool()
38
68
  async def reconnect_bridge(ctx: Context) -> dict:
39
69
  """Attempt to reconnect the M4L UDP bridge (port 9880).
@@ -97,11 +127,36 @@ def get_master_spectrum(ctx: Context) -> dict:
97
127
  return result
98
128
 
99
129
 
130
+ def _sanitize_pitch(pitch: Optional[dict]) -> Optional[dict]:
131
+ """Validate a pitch reading from the M4L analyzer (BUG-F1).
132
+
133
+ The polyphonic pitch detector can emit out-of-range MIDI notes
134
+ (e.g., 319, -50, 128+) when it can't latch onto a single
135
+ fundamental — typical for dense mixes. The amplitude field is the
136
+ reliable confidence signal: if the detector was sure of its
137
+ reading, amplitude is non-zero.
138
+
139
+ Returns the original dict if the reading is usable, None otherwise.
140
+ """
141
+ if not pitch:
142
+ return None
143
+ amplitude = pitch.get("amplitude")
144
+ midi_note = pitch.get("midi_note")
145
+ if amplitude is None or amplitude <= 0:
146
+ return None
147
+ if midi_note is None or midi_note < 0 or midi_note > 127:
148
+ return None
149
+ return pitch
150
+
151
+
100
152
  @mcp.tool()
101
153
  def get_master_rms(ctx: Context) -> dict:
102
154
  """Get real-time RMS and peak levels from the master bus.
103
155
 
104
156
  More accurate than LOM meters — includes true RMS (not just peak hold).
157
+ Pitch readings are validated: the field is only present when the
158
+ polyphonic pitch detector produced a reading with non-zero
159
+ amplitude and a MIDI note in [0, 127] (BUG-F1).
105
160
  Requires LivePilot Analyzer on master track.
106
161
  """
107
162
  cache = _get_spectral(ctx)
@@ -117,9 +172,11 @@ def get_master_rms(ctx: Context) -> dict:
117
172
  if peak:
118
173
  result["peak"] = peak["value"]
119
174
 
120
- pitch = cache.get("pitch")
121
- if pitch:
122
- result["pitch"] = pitch["value"]
175
+ pitch_entry = cache.get("pitch")
176
+ if pitch_entry:
177
+ clean = _sanitize_pitch(pitch_entry.get("value"))
178
+ if clean is not None:
179
+ result["pitch"] = clean
123
180
 
124
181
  return result
125
182
 
@@ -414,15 +471,138 @@ async def get_simpler_slices(
414
471
  ) -> dict:
415
472
  """Get slice point positions from a Simpler device.
416
473
 
417
- Returns each slice's position in frames and seconds, plus sample metadata
418
- (sample rate, length, playback mode). Use this to understand the rhythmic
419
- structure of a sliced sample and program MIDI patterns targeting slices.
474
+ Returns each slice's position in frames and seconds, the MIDI pitch
475
+ that triggers it (slice 0 = C1 / MIDI 36, slice 1 = C#1 / MIDI 37, etc.
476
+ per BUG-F2), plus sample metadata (sample rate, length, playback mode).
477
+
478
+ **Always use the returned `midi_pitch` when programming MIDI notes to
479
+ trigger slices.** The Live 12 Simpler Slice-mode base note is C1,
480
+ NOT C3 — writing notes at pitch 60+ on a sample with <24 slices
481
+ triggers nothing and produces silent output.
482
+
483
+ Use this to understand the rhythmic structure of a sliced sample
484
+ and program MIDI patterns targeting slices. Requires LivePilot
485
+ Analyzer on master track.
486
+ """
487
+ cache = _get_spectral(ctx)
488
+ _require_analyzer(cache)
489
+ bridge = _get_m4l(ctx)
490
+ raw = await bridge.send_command("get_simpler_slices", track_index, device_index)
491
+ return _enrich_slice_response(raw)
492
+
493
+
494
+ @mcp.tool()
495
+ async def classify_simpler_slices(
496
+ ctx: Context,
497
+ track_index: int,
498
+ device_index: int = 0,
499
+ file_path: Optional[str] = None,
500
+ ) -> dict:
501
+ """Classify each Simpler slice as KICK / SNARE / HAT / ghost via FFT analysis.
502
+
503
+ Reads slice positions via ``get_simpler_slices``, loads the backing
504
+ WAV file, and runs 4-band spectral classification on each segment.
505
+ Returns the enriched slice list with a ``label`` field per entry
506
+ plus feature breakdown (peak, rms, sub_pct, low_pct, mid_pct,
507
+ high_pct).
508
+
509
+ **Always run this before programming drum patterns on a sliced
510
+ break.** Slice content depends on transient detection order in the
511
+ source audio — slice 0 is NOT guaranteed to be a kick. Assuming
512
+ drum-rack convention produces wrong grooves that take iterations to
513
+ diagnose (see 2026-04-18 creative session for the canonical case).
514
+
515
+ Classification rules (validated on "Break Ghosts 90 bpm"):
516
+ - KICK: sub+low >= 45%, high < 40%
517
+ - HAT: high >= 70% AND mid < 25% (thin metal disc = no drum body)
518
+ - SNARE: mid >= 25% AND high >= 40% AND peak >= 0.6 (broadband loud)
519
+ - ghost: peak < 0.35
520
+
521
+ Parameters:
522
+ track_index, device_index: the Simpler to analyze
523
+ file_path: (optional) explicit WAV path. If omitted, attempts
524
+ lookup via the bridge. Bridge-native resolution is limited in
525
+ v1.11 — when the sample lives in the Core Library, pass the
526
+ absolute path explicitly.
527
+
528
+ Returns: dict with ``slices`` list. Each slice entry has:
529
+ index, frame, seconds, midi_pitch (36+index), label, peak, rms,
530
+ sub_pct, low_pct, mid_pct, high_pct.
531
+
420
532
  Requires LivePilot Analyzer on master track.
421
533
  """
534
+ import soundfile as sf
535
+
536
+ from ..sample_engine.slice_classifier import classify_slices
537
+
422
538
  cache = _get_spectral(ctx)
423
539
  _require_analyzer(cache)
424
540
  bridge = _get_m4l(ctx)
425
- return await bridge.send_command("get_simpler_slices", track_index, device_index)
541
+
542
+ # 1. Get slice positions
543
+ raw_slices = await bridge.send_command(
544
+ "get_simpler_slices", track_index, device_index
545
+ )
546
+ enriched = _enrich_slice_response(raw_slices)
547
+ if enriched is None:
548
+ return {"error": "Bridge returned no slice data"}
549
+
550
+ # 2. Resolve file path
551
+ wav_path = file_path
552
+ if not wav_path:
553
+ try:
554
+ file_info = await bridge.send_command(
555
+ "get_simpler_file_path", track_index, device_index
556
+ )
557
+ if isinstance(file_info, dict):
558
+ wav_path = file_info.get("file_path")
559
+ except Exception: # noqa: BLE001 — bridge command may not exist yet
560
+ wav_path = None
561
+
562
+ if not wav_path:
563
+ return {
564
+ **enriched,
565
+ "error": (
566
+ "No file_path available — pass file_path= explicitly. "
567
+ "Bridge-based lookup for Simpler sample paths is a v1.12 "
568
+ "follow-up."
569
+ ),
570
+ }
571
+
572
+ # 3. Load WAV and build frame boundaries
573
+ try:
574
+ audio, sr = sf.read(wav_path)
575
+ except (sf.LibsndfileError, sf.SoundFileError, RuntimeError, OSError) as exc:
576
+ # BUG-audit-C3: corrupt / missing / non-audio files must return a
577
+ # structured error dict instead of raising through the MCP framework
578
+ # (inconsistent with every other tool in this module).
579
+ return {
580
+ **enriched,
581
+ "error": f"Could not load WAV at {wav_path!r}: {exc}",
582
+ }
583
+ slices = enriched["slices"]
584
+ frame_boundaries = [s["frame"] for s in slices] + [len(audio)]
585
+
586
+ # 4. Classify
587
+ classifications = classify_slices(audio, sr, frame_boundaries)
588
+
589
+ # 5. Merge classification into each slice entry
590
+ merged_slices = []
591
+ for slice_entry, features in zip(slices, classifications):
592
+ merged_slices.append({
593
+ **slice_entry,
594
+ "label": features["label"],
595
+ "peak": features["peak"],
596
+ "rms": features["rms"],
597
+ "sub_pct": features["sub_pct"],
598
+ "low_pct": features["low_pct"],
599
+ "mid_pct": features["mid_pct"],
600
+ "high_pct": features["high_pct"],
601
+ })
602
+
603
+ enriched["slices"] = merged_slices
604
+ enriched["classifier_version"] = "v1.0"
605
+ return enriched
426
606
 
427
607
 
428
608
  @mcp.tool()
@@ -503,3 +503,68 @@ async def check_clip_key_consistency(
503
503
  f"{mode_note}"
504
504
  ),
505
505
  }
506
+
507
+
508
+ @mcp.tool()
509
+ def get_clip_scale(ctx: Context, track_index: int, clip_index: int) -> dict:
510
+ """Read a clip's per-clip scale override (Live 12.0+).
511
+
512
+ Per-clip scales are independent of Song.scale_*. A clip can have
513
+ Scale Mode enabled with a different root/name than the Song.
514
+
515
+ Returns {root_note (0-11), scale_mode (bool), scale_name (str)}.
516
+ Raises if the clip slot is empty.
517
+ """
518
+ return _get_ableton(ctx).send_command("get_clip_scale", {
519
+ "track_index": track_index,
520
+ "clip_index": clip_index,
521
+ })
522
+
523
+
524
+ @mcp.tool()
525
+ def set_clip_scale(
526
+ ctx: Context,
527
+ track_index: int,
528
+ clip_index: int,
529
+ root_note: int,
530
+ scale_name: str,
531
+ ) -> dict:
532
+ """Set a clip's per-clip scale override (Live 12.0+).
533
+
534
+ Overrides the Song-level scale for this clip only. Useful for
535
+ key changes within a set, or for clips that live in a different
536
+ mode than the rest of the arrangement.
537
+
538
+ root_note: 0-11 (C=0, C#=1, ... B=11)
539
+ scale_name: must match one of Live's built-in scales
540
+ (call list_available_scales() if unsure)
541
+ """
542
+ if not 0 <= root_note <= 11:
543
+ raise ValueError("root_note must be 0-11")
544
+ if not scale_name.strip():
545
+ raise ValueError("scale_name cannot be empty")
546
+ return _get_ableton(ctx).send_command("set_clip_scale", {
547
+ "track_index": track_index,
548
+ "clip_index": clip_index,
549
+ "root_note": root_note,
550
+ "scale_name": scale_name,
551
+ })
552
+
553
+
554
+ @mcp.tool()
555
+ def set_clip_scale_mode(
556
+ ctx: Context,
557
+ track_index: int,
558
+ clip_index: int,
559
+ enabled: bool,
560
+ ) -> dict:
561
+ """Enable or disable Scale Mode on a single clip (Live 12.0+).
562
+
563
+ When enabled on a clip, its notes are constrained/highlighted
564
+ by the clip's own root_note + scale_name (set via set_clip_scale).
565
+ """
566
+ return _get_ableton(ctx).send_command("set_clip_scale_mode", {
567
+ "track_index": track_index,
568
+ "clip_index": clip_index,
569
+ "enabled": enabled,
570
+ })