livepilot 1.9.17 → 1.9.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/.claude-plugin/marketplace.json +1 -1
  2. package/AGENTS.md +2 -2
  3. package/CHANGELOG.md +35 -1
  4. package/README.md +2 -2
  5. package/livepilot/.Codex-plugin/plugin.json +1 -1
  6. package/livepilot/.claude-plugin/plugin.json +1 -1
  7. package/livepilot/skills/livepilot-core/SKILL.md +4 -8
  8. package/livepilot/skills/livepilot-core/references/overview.md +1 -1
  9. package/livepilot/skills/livepilot-evaluation/references/capability-modes.md +1 -1
  10. package/livepilot/skills/livepilot-mixing/SKILL.md +2 -3
  11. package/livepilot/skills/livepilot-release/SKILL.md +10 -10
  12. package/m4l_device/livepilot_bridge.js +1 -1
  13. package/mcp_server/__init__.py +1 -1
  14. package/mcp_server/connection.py +29 -22
  15. package/mcp_server/evaluation/tools.py +1 -1
  16. package/mcp_server/m4l_bridge.py +7 -4
  17. package/mcp_server/mix_engine/tools.py +1 -1
  18. package/mcp_server/performance_engine/tools.py +1 -1
  19. package/mcp_server/reference_engine/tools.py +1 -1
  20. package/mcp_server/sound_design/tools.py +1 -1
  21. package/mcp_server/tools/_harmony_engine.py +36 -2
  22. package/mcp_server/tools/_theory_engine.py +92 -35
  23. package/mcp_server/tools/analyzer.py +4 -3
  24. package/mcp_server/tools/tracks.py +3 -3
  25. package/mcp_server/translation_engine/tools.py +1 -1
  26. package/package.json +1 -1
  27. package/remote_script/LivePilot/__init__.py +1 -1
  28. package/remote_script/LivePilot/arrangement.py +9 -2
  29. package/remote_script/LivePilot/browser.py +1 -0
  30. package/remote_script/LivePilot/clip_automation.py +6 -0
  31. package/remote_script/LivePilot/clips.py +2 -0
  32. package/remote_script/LivePilot/devices.py +1 -0
  33. package/remote_script/LivePilot/mixing.py +12 -5
  34. package/remote_script/LivePilot/tracks.py +6 -0
  35. package/remote_script/LivePilot/utils.py +2 -2
@@ -10,7 +10,7 @@
10
10
  {
11
11
  "name": "livepilot",
12
12
  "description": "Agentic production system for Ableton Live 12 — 236 tools, 32 domains, device atlas, spectral perception, technique memory, neo-Riemannian harmony, Euclidean rhythm, species counterpoint, MIDI I/O",
13
- "version": "1.9.17",
13
+ "version": "1.9.19",
14
14
  "author": {
15
15
  "name": "Pilot Studio"
16
16
  },
package/AGENTS.md CHANGED
@@ -1,4 +1,4 @@
1
- # LivePilot v1.9.17 — Ableton Live 12
1
+ # LivePilot v1.9.18 — Ableton Live 12
2
2
 
3
3
  ## Project
4
4
  - **Repo:** This directory (LivePilot)
@@ -22,7 +22,7 @@
22
22
  ## Key Rules
23
23
  - ALL Live Object Model (LOM) calls must execute on Ableton's main thread via schedule_message queue
24
24
  - Live 12 minimum — use modern note API (add_new_notes, get_notes_extended, apply_note_modifications)
25
- - 236 tools across 32 domains: transport, tracks, clips, notes, devices, scenes, mixing, browser, arrangement, memory, analyzer, automation, theory, generative, harmony, midi_io, perception, agent_os, composition, motif, research, planner, project_brain, runtime, evaluation, memory_fabric, mix_engine, sound_design, transition_engine, reference_engine, translation_engine, performance_engine
25
+ - 236 tools across 31 domains: transport, tracks, clips, notes, devices, scenes, mixing, browser, arrangement, memory, analyzer, automation, theory, generative, harmony, midi_io, perception, agent_os, composition, motif, research, planner, project_brain, runtime, evaluation, mix_engine, sound_design, transition_engine, reference_engine, translation_engine, performance_engine
26
26
  - JSON over TCP, newline-delimited, port 9878
27
27
  - Structured errors with codes: INDEX_ERROR, NOT_FOUND, INVALID_PARAM, STATE_ERROR, TIMEOUT, INTERNAL
28
28
 
package/CHANGELOG.md CHANGED
@@ -1,5 +1,39 @@
1
1
  # Changelog
2
2
 
3
+ ## 1.9.19 — Theory Engine & Meters Fix Pass (April 2026)
4
+
5
+ ### Bug Fixes
6
+ - **fix(mixing):** `get_track_meters` crashed on tracks with MIDI-only output — now guards `output_meter_*` with `has_audio_output` check
7
+ - **fix(mixing):** `get_mix_snapshot` same crash on MIDI-output tracks — same guard applied
8
+ - **fix(tracks):** `create_midi_track` / `create_audio_track` left newly created tracks armed — now auto-disarms unless `arm=true` param is passed
9
+ - **fix(theory):** `roman_numeral()` failed to recognize 7th chords (Dm7, Gm7, Bbmaj7) — now detects 7th intervals via triad-subset matching with scored best-match selection
10
+ - **fix(theory):** `roman_figure_to_pitches()` produced out-of-key pitches (C#, G#) for jazz figures in minor keys — now uses scale-derived chord quality and scale-derived 7th intervals instead of forcing quality from Roman numeral case
11
+ - **fix(harmony):** `parse_chord()` rejected "minor seventh", "dominant seventh" and other extended chord qualities — now normalizes to base triad for neo-Riemannian analysis
12
+ - **fix(harmony):** `classify_transform_sequence()` only detected single P/L/R transforms — now tries 2-step compound transforms (PL, PR, RL, etc.)
13
+ - **fix(theory):** `roman_numeral()` picked wrong chord when 7th created ambiguity (Bbmaj7 matched as Dm instead of Bb) — scoring prefers highest overlap + root-position bonus
14
+
15
+ ## 1.9.18 — Deep Audit Fix Pass (April 2026)
16
+
17
+ ### Critical Fixes
18
+ - **fix(tracks):** monitoring enum mismatch — MCP advertised `0=Off,1=In,2=Auto` but Remote Script uses `0=In,1=Auto,2=Off`; clients deterministically chose wrong mode
19
+ - **fix(connection):** retry logic could replay mutating commands after `sendall` succeeded — added `_send_completed` flag to prevent double mutations
20
+ - **fix(m4l_bridge):** `capture_stop` cancelled in-flight capture future instead of resolving it — callers got `CancelledError` instead of partial result
21
+
22
+ ### Medium Fixes
23
+ - **fix(skills):** removed 6 phantom tool names from speed tiers (`analyze_dynamic_range`, `analyze_spectral_evolution`, `separate_stems`, `diagnose_mix`, `transcribe_to_midi`, `compare_loudness`)
24
+ - **fix(clip_automation):** added `int()` casts to `send_index`, `device_index`, `parameter_index` — prevented TypeError when MCP sends strings
25
+ - **fix(arrangement):** `add_arrangement_notes` now supports `probability`, `velocity_deviation`, `release_velocity` (parity with session `add_notes`)
26
+ - **fix(devices/browser):** reset `_iterations` counter per category in URI search — prevented premature cutoff for devices in later categories
27
+ - **fix(imports):** standardized 6 engine files from `mcp.server.fastmcp` to `fastmcp` import path
28
+ - **fix(docs):** corrected domain count from 32 to 31 (`memory_fabric` is an alias for `memory`) across 17 files
29
+ - **fix(server.json):** added missing `, MIDI I/O` to description to match package.json
30
+
31
+ ### Low Fixes
32
+ - **fix(clips):** `delete_clip` now checks `has_clip` before deleting
33
+ - **fix(arrangement):** `back_to_arranger` no longer reads write-only trigger property
34
+ - **fix(utils):** return track error message no longer shows `(0..-1)` when none exist
35
+ - **fix(connection):** removed dead `send_command_async` and unused `asyncio` import
36
+
3
37
  ## 1.9.17 — Skills Architecture V2 (April 2026)
4
38
 
5
39
  ### Skills (9 new, 1 slimmed)
@@ -106,7 +140,7 @@
106
140
  - Fix(Med): Ledger key mismatch — memory promotion now reads correct 'action_ledger' key
107
141
 
108
142
  ### Stats
109
- - 236 tools across 32 domains (was 194)
143
+ - 236 tools across 31 domains (was 194)
110
144
  - 1,014 tests passing (was ~400)
111
145
  - 12 new engine packages
112
146
  - 36 new MCP tools
package/README.md CHANGED
@@ -50,7 +50,7 @@
50
50
  │ ▼ │
51
51
  │ ┌─────────────────┐ │
52
52
  │ │ 236 MCP Tools │ │
53
- │ │ 32 domains │ │
53
+ │ │ 31 domains │ │
54
54
  │ └────────┬────────┘ │
55
55
  │ │ │
56
56
  │ Remote Script ──┤── TCP 9878 │
@@ -79,7 +79,7 @@ All three feed into 236 deterministic tools that execute on Ableton's main threa
79
79
 
80
80
  ## Tools
81
81
 
82
- 236 tools across 32 domains. Highlights below — [full catalog here](docs/manual/tool-catalog.md).
82
+ 236 tools across 31 domains. Highlights below — [full catalog here](docs/manual/tool-catalog.md).
83
83
 
84
84
  <br>
85
85
 
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "livepilot",
3
- "version": "1.9.17",
3
+ "version": "1.9.19",
4
4
  "description": "Agentic production system for Ableton Live 12 — 236 tools, 32 domains, device atlas, spectral perception, technique memory, neo-Riemannian harmony, Euclidean rhythm, species counterpoint, MIDI I/O",
5
5
  "author": {
6
6
  "name": "Pilot Studio"
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "livepilot",
3
- "version": "1.9.17",
3
+ "version": "1.9.19",
4
4
  "description": "Agentic production system for Ableton Live 12 — 236 tools, 32 domains, device atlas, spectral perception, technique memory, neo-Riemannian harmony, Euclidean rhythm, species counterpoint, MIDI I/O",
5
5
  "author": {
6
6
  "name": "Pilot Studio"
@@ -34,20 +34,16 @@ Agentic production system for Ableton Live 12. 236 tools across 32 domains, thre
34
34
  All 236 core tools plus M4L perception tools.
35
35
 
36
36
  ### Fast (1-5s) — Use freely
37
- `analyze_loudness` · `analyze_dynamic_range` · `compare_loudness`
37
+ `analyze_loudness` · `analyze_mix` · `analyze_sound_design`
38
38
 
39
39
  ### Slow (5-15s) — Tell the user first
40
- `analyze_spectral_evolution` · `compare_to_reference` · `transcribe_to_midi`
41
-
42
- ### Heavy (30-120s) — ALWAYS ask first
43
- `separate_stems` · `diagnose_mix` — GPU-intensive. Never call speculatively.
40
+ `compare_to_reference` · `analyze_spectrum_offline` · `read_audio_metadata`
44
41
 
45
42
  **Escalation pattern:** Start fast, escalate only with consent:
46
43
  ```
47
44
  Level 1 (instant): get_master_spectrum + get_track_meters
48
- Level 2 (fast): analyze_loudness + analyze_dynamic_range
49
- Level 3 (slow): analyze_spectral_evolution + compare_to_reference
50
- Level 4 (heavy): separate_stems → diagnose_mix
45
+ Level 2 (fast): analyze_loudness + analyze_mix
46
+ Level 3 (slow): compare_to_reference + analyze_spectrum_offline
51
47
  ```
52
48
 
53
49
  ## Error Handling Protocol
@@ -1,4 +1,4 @@
1
- # LivePilot v1.9.17 — Architecture & Tool Reference
1
+ # LivePilot v1.9.19 — Architecture & Tool Reference
2
2
 
3
3
  Agentic production system for Ableton Live 12. 236 tools across 32 domains. Device atlas (280+ devices), spectral perception (M4L analyzer), technique memory, automation intelligence (16 curve types, 15 recipes), music theory (Krumhansl-Schmuckler, species counterpoint), generative algorithms (Euclidean rhythm, tintinnabuli, phase shift, additive process), neo-Riemannian harmony (PRL transforms, Tonnetz), MIDI file I/O.
4
4
 
@@ -104,7 +104,7 @@ Call `get_capability_state` at the start of any evaluation session. The response
104
104
  {
105
105
  "mode": "normal",
106
106
  "analyzer_connected": true,
107
- "bridge_version": "1.9.17",
107
+ "bridge_version": "1.9.18",
108
108
  "spectral_cache_age_ms": 1200,
109
109
  "flucoma_available": false,
110
110
  "session_connected": true
@@ -145,9 +145,8 @@ Use the mix engine when the user wants a critical evaluation of their mix, not j
145
145
  Follow this progression — start fast, go deeper only when needed:
146
146
 
147
147
  1. **Instant:** `get_master_spectrum` + `get_track_meters` — frequency balance + levels. Answers 80% of mix questions.
148
- 2. **Fast (1-5s):** `analyze_loudness` + `analyze_dynamic_range` — LUFS, true peak, LRA, crest factor. For mastering prep.
149
- 3. **Slow (5-15s):** `analyze_spectral_evolution` + `compare_to_reference` — timbral trends, reference matching. Ask the user first.
150
- 4. **Heavy (30-120s):** `separate_stems` + `diagnose_mix` — full diagnostic. Requires explicit user consent.
148
+ 2. **Fast (1-5s):** `analyze_loudness` + `analyze_mix` — LUFS, true peak, and full mix analysis. For mastering prep.
149
+ 3. **Slow (5-15s):** `compare_to_reference` + `analyze_spectrum_offline` — reference matching, offline spectral analysis. Ask the user first.
151
150
 
152
151
  Never skip levels. Start at the lowest appropriate level and offer to go deeper.
153
152
 
@@ -28,21 +28,21 @@ Run this checklist EVERY time the user says "update everything", "push", "releas
28
28
 
29
29
  ## 2. Tool Count (must ALL match)
30
30
 
31
- Current: **236 tools across 32 domains**.
31
+ Current: **236 tools across 31 domains**.
32
32
  Core (no M4L): **149**. Analyzer (M4L): **29**. Perception (offline): **4**.
33
33
 
34
34
  Verify: `grep -rc "@mcp.tool" mcp_server/tools/ | grep -v ":0" | awk -F: '{sum+=$2} END{print sum}'`
35
35
 
36
36
  Files that reference tool count:
37
37
  - [ ] `README.md` — header, PERCEPTION section ("207 core...29 analyzer"), Analyzer table header "(29)", Perception table header "(4)"
38
- - [ ] `package.json` → `"description"` (236 tools, 32 domains)
38
+ - [ ] `package.json` → `"description"` (236 tools, 31 domains)
39
39
  - [ ] `server.json` → `"description"`
40
40
  - [ ] `livepilot/.Codex-plugin/plugin.json` → `"description"` (primary Codex manifest)
41
41
  - [ ] `livepilot/.claude-plugin/plugin.json` → `"description"` (must match Codex plugin)
42
42
  - [ ] `.claude-plugin/marketplace.json` → `"description"`
43
- - [ ] `CLAUDE.md` → "236 tools across 32 domains"
44
- - [ ] `livepilot/skills/livepilot-core/SKILL.md` — "236 tools across 32 domains", Analyzer (29), Perception (4)
45
- - [ ] `livepilot/skills/livepilot-core/references/overview.md` — "236 tools across 32 domains"
43
+ - [ ] `CLAUDE.md` → "236 tools across 31 domains"
44
+ - [ ] `livepilot/skills/livepilot-core/SKILL.md` — "236 tools across 31 domains", Analyzer (29), Perception (4)
45
+ - [ ] `livepilot/skills/livepilot-core/references/overview.md` — "236 tools across 31 domains"
46
46
  - [ ] `docs/manual/index.md` — domain table: Analyzer (29), Perception (4)
47
47
  - [ ] `docs/manual/getting-started.md` — "207 core tools...29 analyzer"
48
48
  - [ ] `docs/manual/tool-reference.md` — all domains present with correct counts
@@ -56,10 +56,10 @@ Files that reference tool count:
56
56
 
57
57
  ## 3. Domain Count
58
58
 
59
- Current: **32 domains**: transport, tracks, clips, notes, devices, scenes, mixing, browser, arrangement, memory, analyzer, automation, theory, generative, harmony, midi_io, perception, agent_os, composition, research, planner, project_brain, runtime, evaluation, memory_fabric, mix_engine, sound_design, transition_engine, reference_engine, translation_engine, performance_engine.
59
+ Current: **31 domains**: transport, tracks, clips, notes, devices, scenes, mixing, browser, arrangement, memory, analyzer, automation, theory, generative, harmony, midi_io, perception, agent_os, composition, motif, research, planner, project_brain, runtime, evaluation, mix_engine, sound_design, transition_engine, reference_engine, translation_engine, performance_engine.
60
60
 
61
- - [ ] All files that mention domain count say "32 domains"
62
- - [ ] Domain lists include ALL 32 (especially newer domains — they're the most often omitted)
61
+ - [ ] All files that mention domain count say "31 domains"
62
+ - [ ] Domain lists include ALL 31 (especially newer domains — they're the most often omitted)
63
63
 
64
64
  ## 4. npm Registry
65
65
 
@@ -89,8 +89,8 @@ Current: **32 domains**: transport, tracks, clips, notes, devices, scenes, mixin
89
89
 
90
90
  - [ ] `README.md` — features match current capabilities, "Coming" section is accurate
91
91
  - [ ] `docs/manual/getting-started.md` — install instructions current
92
- - [ ] `docs/manual/tool-reference.md` — all 32 domains listed, all 236 tools present
93
- - [ ] `docs/TOOL_REFERENCE.md` — all 32 domains present
92
+ - [ ] `docs/manual/tool-reference.md` — all 31 domains listed, all 236 tools present
93
+ - [ ] `docs/TOOL_REFERENCE.md` — all 31 domains present
94
94
  - [ ] `docs/M4L_BRIDGE.md` — architecture accurate, core tool count correct
95
95
 
96
96
  ## 9. Derived Artifacts
@@ -84,7 +84,7 @@ function anything() {
84
84
  function dispatch(cmd, args) {
85
85
  switch(cmd) {
86
86
  case "ping":
87
- send_response({"ok": true, "version": "1.9.17"});
87
+ send_response({"ok": true, "version": "1.9.19"});
88
88
  break;
89
89
  case "get_params":
90
90
  cmd_get_params(args);
@@ -1,2 +1,2 @@
1
1
  """LivePilot MCP Server — bridges MCP protocol to Ableton Live."""
2
- __version__ = "1.9.17"
2
+ __version__ = "1.9.19"
@@ -2,7 +2,6 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- import asyncio
6
5
  import json
7
6
  import os
8
7
  import socket
@@ -162,15 +161,20 @@ class AbletonConnection:
162
161
  try:
163
162
  response = self._send_raw(command)
164
163
  except AbletonConnectionError as exc:
165
- # Don't retry timeouts Ableton may have processed the command
164
+ # If the send phase succeeded (data left this process),
165
+ # Ableton may have already applied the command. Never
166
+ # replay — the duplicate mutation is worse than the error.
167
+ if getattr(exc, '_send_completed', False):
168
+ raise
169
+ # Don't retry timeouts either
166
170
  if "Timeout" in str(exc):
167
171
  raise
168
- # Retry once with a fresh connection for non-timeout errors
172
+ # Send itself failed — safe to retry with a fresh connection
169
173
  self.disconnect()
170
174
  self.connect()
171
175
  response = self._send_raw(command)
172
176
  except OSError:
173
- # Retry once with a fresh connection
177
+ # Socket error before send safe to retry
174
178
  self.disconnect()
175
179
  self.connect()
176
180
  response = self._send_raw(command)
@@ -196,15 +200,6 @@ class AbletonConnection:
196
200
  self._command_log.append(log_entry)
197
201
  return response.get("result", {})
198
202
 
199
- async def send_command_async(self, command_type: str, params: Optional[dict] = None) -> dict:
200
- """Async wrapper around send_command that avoids blocking the event loop.
201
-
202
- Runs the blocking TCP send/receive in a thread pool executor so the
203
- asyncio event loop remains responsive to other concurrent MCP tools.
204
- """
205
- loop = asyncio.get_running_loop()
206
- return await loop.run_in_executor(None, self.send_command, command_type, params)
207
-
208
203
  # ------------------------------------------------------------------
209
204
  # Command log
210
205
  # ------------------------------------------------------------------
@@ -234,7 +229,9 @@ class AbletonConnection:
234
229
  self.disconnect()
235
230
  raise AbletonConnectionError(f"Failed to send command: {exc}") from exc
236
231
 
237
- # Read until newline, preserving any trailing bytes in _recv_buf
232
+ # Read until newline, preserving any trailing bytes in _recv_buf.
233
+ # Any error past this point means the send already reached Ableton,
234
+ # so callers must NOT retry the command (it may have been applied).
238
235
  buf = self._recv_buf
239
236
  try:
240
237
  while b"\n" not in buf:
@@ -242,31 +239,41 @@ class AbletonConnection:
242
239
  if not chunk:
243
240
  self._recv_buf = b""
244
241
  self.disconnect()
245
- raise AbletonConnectionError("Connection closed by Ableton")
242
+ err = AbletonConnectionError("Connection closed by Ableton")
243
+ err._send_completed = True
244
+ raise err
246
245
  buf += chunk
247
246
  if len(buf) > 10 * 1024 * 1024: # 10 MB
248
247
  self._recv_buf = b""
249
248
  self.disconnect()
250
- raise AbletonConnectionError("Response too large (>10 MB)")
249
+ err = AbletonConnectionError("Response too large (>10 MB)")
250
+ err._send_completed = True
251
+ raise err
251
252
  except socket.timeout as exc:
252
253
  self._recv_buf = buf
253
254
  self.disconnect()
254
255
  other_client = _identify_other_tcp_client(self.host, self.port)
255
256
  if other_client:
256
- raise AbletonConnectionError(
257
+ err = AbletonConnectionError(
257
258
  "Timeout waiting for response from Ableton. "
258
259
  f"Another LivePilot client appears to be connected on {self.host}:{self.port} "
259
260
  f"({other_client}). Disconnect the other client and retry."
260
- ) from exc
261
- raise AbletonConnectionError(
261
+ )
262
+ err._send_completed = True
263
+ raise err from exc
264
+ err = AbletonConnectionError(
262
265
  f"Timeout waiting for response from Ableton ({RECV_TIMEOUT}s)"
263
- ) from exc
266
+ )
267
+ err._send_completed = True
268
+ raise err from exc
264
269
  except OSError as exc:
265
270
  self._recv_buf = b""
266
271
  self.disconnect()
267
- raise AbletonConnectionError(
272
+ err = AbletonConnectionError(
268
273
  f"Socket error reading response: {exc}"
269
- ) from exc
274
+ )
275
+ err._send_completed = True
276
+ raise err from exc
270
277
 
271
278
  line, remainder = buf.split(b"\n", 1)
272
279
  self._recv_buf = remainder
@@ -6,7 +6,7 @@ to the appropriate engine-specific evaluator via fabric.evaluate().
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
- from mcp.server.fastmcp import Context
9
+ from fastmcp import Context
10
10
 
11
11
  from ..server import mcp
12
12
  from ..tools._evaluation_contracts import EvaluationRequest, EvaluationResult
@@ -400,15 +400,18 @@ class M4LBridge:
400
400
  return {"error": "M4L capture timeout — device may be busy or removed"}
401
401
 
402
402
  async def cancel_capture_future(self) -> None:
403
- """Cancel any in-progress capture future (called by capture_stop).
403
+ """Resolve any in-progress capture future with a stopped result.
404
404
 
405
405
  Does NOT acquire _cmd_lock — send_capture holds it during recording.
406
- Cancelling the future causes send_capture's wait_for to raise
407
- CancelledError, which releases the lock naturally.
406
+ Resolving (not cancelling) the future lets send_capture return a
407
+ clean partial-result dict instead of raising CancelledError.
408
408
  """
409
409
  if self.receiver and self.receiver._capture_future \
410
410
  and not self.receiver._capture_future.done():
411
- self.receiver._capture_future.cancel()
411
+ self.receiver._capture_future.set_result({
412
+ "ok": True,
413
+ "stopped_early": True,
414
+ })
412
415
  self.receiver._capture_future = None
413
416
 
414
417
  def _build_osc(self, address: str, args: tuple) -> bytes:
@@ -6,7 +6,7 @@ then delegates to pure-computation modules.
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
- from mcp.server.fastmcp import Context
9
+ from fastmcp import Context
10
10
 
11
11
  from ..server import mcp
12
12
  from ..tools._evaluation_contracts import EvaluationRequest
@@ -6,7 +6,7 @@ then delegates to pure-computation modules.
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
- from mcp.server.fastmcp import Context
9
+ from fastmcp import Context
10
10
 
11
11
  from ..server import mcp
12
12
  from .models import EnergyWindow, SceneRole
@@ -6,7 +6,7 @@ then delegates to pure-computation modules.
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
- from mcp.server.fastmcp import Context
9
+ from fastmcp import Context
10
10
 
11
11
  from ..server import mcp
12
12
  from ..tools._research_engine import get_style_tactics
@@ -6,7 +6,7 @@ then delegates to pure-computation modules.
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
- from mcp.server.fastmcp import Context
9
+ from fastmcp import Context
10
10
 
11
11
  from ..server import mcp
12
12
  from .models import (
@@ -29,8 +29,26 @@ def chord_to_str(root_pc: int, quality: str) -> str:
29
29
  def parse_chord(chord_str: str) -> tuple[int, str]:
30
30
  """Parse 'C major' → (0, 'major'), 'F# minor' → (6, 'minor').
31
31
 
32
- Uses _theory_engine.parse_key() internally check what it returns!
32
+ Also handles 7th chord qualities by reducing to base triad:
33
+ 'D minor seventh' → (2, 'minor'), 'G dominant seventh' → (7, 'major').
34
+ Neo-Riemannian transforms operate on triads, so we strip extensions.
33
35
  """
36
+ # Normalize extended chord quality names to base triad
37
+ s = chord_str.strip()
38
+ _QUALITY_MAP = {
39
+ "minor seventh": "minor", "minor 7th": "minor", "minor7": "minor",
40
+ "major seventh": "major", "major 7th": "major", "major7": "major",
41
+ "dominant seventh": "major", "dominant 7th": "major", "dominant7": "major",
42
+ "diminished seventh": "minor", "diminished 7th": "minor",
43
+ "half-diminished seventh": "minor", "half-diminished": "minor",
44
+ }
45
+ for ext, base in _QUALITY_MAP.items():
46
+ if ext in s.lower():
47
+ # Extract root (everything before the quality)
48
+ idx = s.lower().index(ext)
49
+ root = s[:idx].strip() or s.split()[0]
50
+ return (engine.parse_key(f"{root} {base}")["tonic"], base)
51
+
34
52
  parsed = engine.parse_key(chord_str)
35
53
  mode = parsed["mode"]
36
54
  if mode not in ("major", "minor"):
@@ -177,14 +195,30 @@ def find_shortest_path(
177
195
  # ---------------------------------------------------------------------------
178
196
 
179
197
  def classify_transform_sequence(chords: list[tuple[int, str]]) -> list[str]:
180
- """Identify the PRL transform between each consecutive pair of chords."""
198
+ """Identify the PRL transform between each consecutive pair of chords.
199
+
200
+ Tries single transforms (P, L, R) first, then 2-step compound
201
+ transforms (PL, PR, LP, LR, RP, RL) for richer classification.
202
+ """
203
+ _COMPOUNDS = ["PL", "PR", "LP", "LR", "RP", "RL",
204
+ "PP", "LL", "RR"]
181
205
  result = []
182
206
  for i in range(len(chords) - 1):
183
207
  found = "?"
208
+ # Try single transforms first
184
209
  for label, fn in TRANSFORMS.items():
185
210
  if fn(*chords[i]) == chords[i + 1]:
186
211
  found = label
187
212
  break
213
+ # Try 2-step compound transforms
214
+ if found == "?":
215
+ for compound in _COMPOUNDS:
216
+ try:
217
+ if apply_transforms(*chords[i], compound) == chords[i + 1]:
218
+ found = compound
219
+ break
220
+ except (ValueError, KeyError):
221
+ continue
188
222
  result.append(found)
189
223
  return result
190
224
 
@@ -231,33 +231,73 @@ def chord_name(midi_pitches: list[int]) -> str:
231
231
 
232
232
 
233
233
  def roman_numeral(chord_pcs: list[int], tonic: int, mode: str) -> dict:
234
- """Match chord pitch classes -> Roman numeral figure."""
234
+ """Match chord pitch classes -> Roman numeral figure.
235
+
236
+ Recognizes triads and 7th chords by checking if the input contains
237
+ a scale-degree triad, then detecting the 7th (if any).
238
+ """
235
239
  pcs_set = set(pc % 12 for pc in chord_pcs)
236
240
  bass_pc = chord_pcs[0] % 12 if chord_pcs else 0
237
241
 
238
242
  best = {"figure": "?", "quality": "unknown", "degree": 0,
239
243
  "inversion": 0, "root_name": NOTE_NAMES[tonic]}
244
+ best_score = -1
240
245
 
241
246
  for degree in range(7):
242
247
  triad = build_chord(degree, tonic, mode)
243
248
  triad_set = set(triad["pitch_classes"])
244
- if pcs_set == triad_set or pcs_set.issubset(triad_set):
245
- quality = triad["quality"]
246
- label = ROMAN_LABELS[degree]
247
- if quality in ("minor", "diminished"):
248
- label = label.lower()
249
- if quality == "diminished":
250
- label += "\u00b0"
251
- # Detect inversion
252
- inv = 0
253
- if bass_pc != triad["root_pc"]:
254
- if bass_pc == triad["pitch_classes"][1]:
255
- inv = 1
256
- elif bass_pc == triad["pitch_classes"][2]:
257
- inv = 2
258
- best = {"figure": label, "quality": quality, "degree": degree,
259
- "inversion": inv, "root_name": triad["root_name"]}
260
- break
249
+ # Match: exact triad, triad is subset of input (7th chord),
250
+ # or input is subset of triad (power chord / omitted note)
251
+ if not (pcs_set == triad_set or triad_set.issubset(pcs_set)
252
+ or pcs_set.issubset(triad_set)):
253
+ continue
254
+
255
+ # Score: prefer matches with more overlap and bass-note match
256
+ overlap = len(pcs_set & triad_set)
257
+ score = overlap * 10
258
+ if bass_pc == triad["root_pc"]:
259
+ score += 5 # root position bonus
260
+
261
+ if score <= best_score:
262
+ continue
263
+
264
+ quality = triad["quality"]
265
+ label = ROMAN_LABELS[degree]
266
+ if quality in ("minor", "diminished"):
267
+ label = label.lower()
268
+ if quality == "diminished":
269
+ label += "\u00b0"
270
+
271
+ # Detect 7th: extra pitch class beyond the triad
272
+ extra_pcs = pcs_set - triad_set
273
+ if extra_pcs:
274
+ seventh_interval = (list(extra_pcs)[0] - triad["root_pc"]) % 12
275
+ if seventh_interval == 10: # minor/dominant 7th
276
+ label += "7"
277
+ if quality == "diminished":
278
+ quality = "half-diminished seventh"
279
+ elif quality == "minor":
280
+ quality = "minor seventh"
281
+ else:
282
+ quality = "dominant seventh"
283
+ elif seventh_interval == 11: # major 7th
284
+ label += "maj7"
285
+ quality = "major seventh"
286
+ elif seventh_interval == 9: # diminished 7th
287
+ label += "o7"
288
+ quality = "diminished seventh"
289
+
290
+ # Detect inversion
291
+ inv = 0
292
+ if bass_pc != triad["root_pc"]:
293
+ if bass_pc == triad["pitch_classes"][1]:
294
+ inv = 1
295
+ elif bass_pc == triad["pitch_classes"][2]:
296
+ inv = 2
297
+
298
+ best = {"figure": label, "quality": quality, "degree": degree,
299
+ "inversion": inv, "root_name": triad["root_name"]}
300
+ best_score = score
261
301
 
262
302
  return best
263
303
 
@@ -302,31 +342,48 @@ def roman_figure_to_pitches(figure: str, tonic: int, mode: str) -> dict:
302
342
  chord = build_chord(degree, tonic, mode)
303
343
  root_pc = (chord["root_pc"] + chromatic_shift) % 12
304
344
 
305
- # Build pitch classes based on quality
306
- if is_minor_quality:
307
- pcs = [root_pc, (root_pc + 3) % 12, (root_pc + 7) % 12]
345
+ # Build pitch classes based on quality.
346
+ # When there's no chromatic alteration, use scale-derived quality so
347
+ # that e.g. "vi7" in D minor correctly yields Bb major 7th, not Bb minor.
348
+ # Only force minor from case when there's an explicit accidental.
349
+ if chromatic_shift != 0 and is_minor_quality:
308
350
  quality = "minor"
351
+ elif chromatic_shift != 0 and not is_minor_quality:
352
+ quality = "major"
309
353
  else:
310
- # Use scale-derived quality
311
354
  quality = chord["quality"]
312
- if quality == "minor":
313
- pcs = [root_pc, (root_pc + 3) % 12, (root_pc + 7) % 12]
314
- elif quality == "diminished":
315
- pcs = [root_pc, (root_pc + 3) % 12, (root_pc + 6) % 12]
316
- elif quality == "augmented":
317
- pcs = [root_pc, (root_pc + 4) % 12, (root_pc + 8) % 12]
318
- else:
319
- pcs = [root_pc, (root_pc + 4) % 12, (root_pc + 7) % 12]
320
355
 
321
- # Handle suffix
356
+ if quality == "minor":
357
+ pcs = [root_pc, (root_pc + 3) % 12, (root_pc + 7) % 12]
358
+ elif quality == "diminished":
359
+ pcs = [root_pc, (root_pc + 3) % 12, (root_pc + 6) % 12]
360
+ elif quality == "augmented":
361
+ pcs = [root_pc, (root_pc + 4) % 12, (root_pc + 8) % 12]
362
+ else:
363
+ pcs = [root_pc, (root_pc + 4) % 12, (root_pc + 7) % 12]
364
+
365
+ # Handle suffix — derive 7th from the scale when possible
322
366
  suffix = remaining.lower()
323
367
  if suffix == "7":
324
- seventh = (root_pc + 10) % 12 # dominant/minor 7th
368
+ # Use scale-derived 7th: pitch class a diatonic 7th above the root
369
+ scale = get_scale_pitches(tonic, mode)
370
+ seventh_degree = (degree + 6) % 7 # 7th of the chord = 6 steps up
371
+ seventh = scale[seventh_degree]
372
+ seventh_interval = (seventh - root_pc) % 12
325
373
  pcs.append(seventh)
326
- if quality == "minor":
327
- quality = "minor seventh"
374
+ if seventh_interval == 11:
375
+ quality = "major seventh"
376
+ elif seventh_interval == 10:
377
+ if quality == "diminished":
378
+ quality = "half-diminished seventh"
379
+ elif quality == "minor":
380
+ quality = "minor seventh"
381
+ else:
382
+ quality = "dominant seventh"
383
+ elif seventh_interval == 9:
384
+ quality = "diminished seventh"
328
385
  else:
329
- quality = "dominant seventh"
386
+ quality = "minor seventh" if quality == "minor" else "dominant seventh"
330
387
  elif suffix == "o7":
331
388
  seventh = (root_pc + 9) % 12 # diminished 7th
332
389
  pcs.append(seventh)
@@ -634,14 +634,15 @@ async def capture_audio(
634
634
  async def capture_stop(ctx: Context) -> dict:
635
635
  """Stop an in-progress audio capture early.
636
636
 
637
- Cancels the running buffer~ recording and returns whatever audio has
638
- been captured so far. The partial file is still written to disk.
637
+ Tells the M4L bridge to stop buffer~ recording and resolves the
638
+ in-flight capture_audio call with a partial result (stopped_early=True).
639
+ The partial file is still written to disk by the bridge.
639
640
  Requires LivePilot Analyzer on master track.
640
641
  """
641
642
  cache = _get_spectral(ctx)
642
643
  _require_analyzer(cache)
643
644
  bridge = _get_m4l(ctx)
644
- # Cancel the capture future so send_capture doesn't hang forever
645
+ # Resolve the capture future so send_capture returns cleanly
645
646
  await bridge.cancel_capture_future()
646
647
  return await bridge.send_command("capture_stop")
647
648
 
@@ -176,10 +176,10 @@ def set_group_fold(ctx: Context, track_index: int, folded: bool) -> dict:
176
176
 
177
177
  @mcp.tool()
178
178
  def set_track_input_monitoring(ctx: Context, track_index: int, state: int) -> dict:
179
- """Set input monitoring (0=Off, 1=In, 2=Auto). Only for regular tracks, not return tracks."""
180
- _validate_track_index(track_index)
179
+ """Set input monitoring (0=In, 1=Auto, 2=Off). Only for regular tracks, not return tracks."""
180
+ _validate_track_index(track_index, allow_return=False)
181
181
  if state not in (0, 1, 2):
182
- raise ValueError("Monitoring state must be 0=Off, 1=In, or 2=Auto")
182
+ raise ValueError("Monitoring state must be 0=In, 1=Auto, or 2=Off")
183
183
  return _get_ableton(ctx).send_command("set_track_input_monitoring", {
184
184
  "track_index": track_index,
185
185
  "state": state,
@@ -6,7 +6,7 @@ then delegates to pure-computation critics.
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
- from mcp.server.fastmcp import Context
9
+ from fastmcp import Context
10
10
 
11
11
  from ..server import mcp
12
12
  from .critics import build_translation_report, run_all_translation_critics
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "livepilot",
3
- "version": "1.9.17",
3
+ "version": "1.9.19",
4
4
  "mcpName": "io.github.dreamrec/livepilot",
5
5
  "description": "Agentic production system for Ableton Live 12 — 236 tools, 32 domains, device atlas, spectral perception, technique memory, neo-Riemannian harmony, Euclidean rhythm, species counterpoint, MIDI I/O",
6
6
  "author": "Pilot Studio",
@@ -5,7 +5,7 @@ Entry point for the ControlSurface. Ableton calls create_instance(c_instance)
5
5
  when this script is selected in Preferences > Link, Tempo & MIDI.
6
6
  """
7
7
 
8
- __version__ = "1.9.17"
8
+ __version__ = "1.9.19"
9
9
 
10
10
  from _Framework.ControlSurface import ControlSurface
11
11
  from .server import LivePilotServer
@@ -190,13 +190,20 @@ def add_arrangement_notes(song, params):
190
190
  try:
191
191
  note_specs = []
192
192
  for note in notes:
193
- spec = Live.Clip.MidiNoteSpecification(
193
+ kwargs = dict(
194
194
  pitch=int(note["pitch"]),
195
195
  start_time=float(note["start_time"]),
196
196
  duration=float(note["duration"]),
197
197
  velocity=float(note.get("velocity", 100)),
198
198
  mute=bool(note.get("mute", False)),
199
199
  )
200
+ if "probability" in note:
201
+ kwargs["probability"] = float(note["probability"])
202
+ if "velocity_deviation" in note:
203
+ kwargs["velocity_deviation"] = float(note["velocity_deviation"])
204
+ if "release_velocity" in note:
205
+ kwargs["release_velocity"] = float(note["release_velocity"])
206
+ spec = Live.Clip.MidiNoteSpecification(**kwargs)
200
207
  note_specs.append(spec)
201
208
  clip.add_new_notes(tuple(note_specs))
202
209
  finally:
@@ -703,4 +710,4 @@ def toggle_cue_point(song, params):
703
710
  def back_to_arranger(song, params):
704
711
  """Switch playback from session clips back to the arrangement timeline."""
705
712
  song.back_to_arranger = True
706
- return {"back_to_arranger": song.back_to_arranger}
713
+ return {"back_to_arranger": True}
@@ -263,6 +263,7 @@ def load_browser_item(song, params):
263
263
  return None
264
264
 
265
265
  for category in categories:
266
+ _iterations[0] = 0 # Reset counter per category to avoid premature cutoff
266
267
  found = find_by_uri(category, uri)
267
268
  if found is not None:
268
269
  song.view.selected_track = track
@@ -100,6 +100,7 @@ def set_clip_automation(song, params):
100
100
  elif parameter_type == "send":
101
101
  if send_index is None:
102
102
  raise ValueError("send_index required for send automation")
103
+ send_index = int(send_index)
103
104
  sends = list(track.mixer_device.sends)
104
105
  if send_index < 0 or send_index >= len(sends):
105
106
  raise IndexError("send_index %d out of range" % send_index)
@@ -107,6 +108,8 @@ def set_clip_automation(song, params):
107
108
  elif parameter_type == "device":
108
109
  if device_index is None or parameter_index is None:
109
110
  raise ValueError("device_index and parameter_index required")
111
+ device_index = int(device_index)
112
+ parameter_index = int(parameter_index)
110
113
  devices = list(track.devices)
111
114
  if device_index < 0 or device_index >= len(devices):
112
115
  raise IndexError("device_index %d out of range" % device_index)
@@ -180,6 +183,7 @@ def clear_clip_automation(song, params):
180
183
  send_index = params.get("send_index")
181
184
  if send_index is None:
182
185
  raise ValueError("send_index required for send automation")
186
+ send_index = int(send_index)
183
187
  sends = list(track.mixer_device.sends)
184
188
  if send_index < 0 or send_index >= len(sends):
185
189
  raise IndexError("send_index %d out of range" % send_index)
@@ -189,6 +193,8 @@ def clear_clip_automation(song, params):
189
193
  parameter_index = params.get("parameter_index")
190
194
  if device_index is None or parameter_index is None:
191
195
  raise ValueError("device_index and parameter_index required")
196
+ device_index = int(device_index)
197
+ parameter_index = int(parameter_index)
192
198
  devices = list(track.devices)
193
199
  if device_index < 0 or device_index >= len(devices):
194
200
  raise IndexError("device_index %d out of range" % device_index)
@@ -72,6 +72,8 @@ def delete_clip(song, params):
72
72
  track_index = int(params["track_index"])
73
73
  clip_index = int(params["clip_index"])
74
74
  clip_slot = get_clip_slot(song, track_index, clip_index)
75
+ if not clip_slot.has_clip:
76
+ raise ValueError("No clip in slot %d on track %d" % (clip_index, track_index))
75
77
  clip_slot.delete_clip()
76
78
  return {"track_index": track_index, "clip_index": clip_index, "deleted": True}
77
79
 
@@ -270,6 +270,7 @@ def load_device_by_uri(song, params):
270
270
  return None
271
271
 
272
272
  for category in categories:
273
+ _iterations[0] = 0 # Reset counter per category to avoid premature cutoff
273
274
  found = find_by_uri(category, uri)
274
275
  if found is not None:
275
276
  song.view.selected_track = track
@@ -132,11 +132,18 @@ def get_track_meters(song, params):
132
132
  entry = {
133
133
  "index": idx,
134
134
  "name": track.name,
135
- "level": track.output_meter_level,
136
135
  }
137
- if include_stereo:
138
- entry["left"] = track.output_meter_left
139
- entry["right"] = track.output_meter_right
136
+ if track.has_audio_output:
137
+ entry["level"] = track.output_meter_level
138
+ if include_stereo:
139
+ entry["left"] = track.output_meter_left
140
+ entry["right"] = track.output_meter_right
141
+ else:
142
+ entry["level"] = 0.0
143
+ entry["has_audio_output"] = False
144
+ if include_stereo:
145
+ entry["left"] = 0.0
146
+ entry["right"] = 0.0
140
147
  return entry
141
148
 
142
149
  if track_index is not None:
@@ -170,7 +177,7 @@ def get_mix_snapshot(song, params):
170
177
  tracks.append({
171
178
  "index": i,
172
179
  "name": track.name,
173
- "meter_level": track.output_meter_level,
180
+ "meter_level": track.output_meter_level if track.has_audio_output else 0.0,
174
181
  "volume": track.mixer_device.volume.value,
175
182
  "pan": track.mixer_device.panning.value,
176
183
  "mute": track.mute,
@@ -122,6 +122,9 @@ def create_midi_track(song, params):
122
122
  track.name = str(params["name"])
123
123
  if "color_index" in params:
124
124
  track.color_index = int(params["color_index"])
125
+ # Ableton auto-arms newly created tracks — disarm to avoid surprises
126
+ if track.arm and not params.get("arm", False):
127
+ track.arm = False
125
128
  return {"index": new_index, "name": track.name}
126
129
 
127
130
 
@@ -139,6 +142,9 @@ def create_audio_track(song, params):
139
142
  track.name = str(params["name"])
140
143
  if "color_index" in params:
141
144
  track.color_index = int(params["color_index"])
145
+ # Ableton auto-arms newly created tracks — disarm to avoid surprises
146
+ if track.arm and not params.get("arm", False):
147
+ track.arm = False
142
148
  return {"index": new_index, "name": track.name}
143
149
 
144
150
 
@@ -54,8 +54,8 @@ def get_track(song, track_index):
54
54
  ri = abs(track_index) - 1
55
55
  if ri >= len(return_tracks):
56
56
  raise IndexError(
57
- "Return track index %d out of range (0..%d)"
58
- % (ri, len(return_tracks) - 1)
57
+ "Return track index %d out of range — %d return tracks available"
58
+ % (ri, len(return_tracks))
59
59
  )
60
60
  return return_tracks[ri]
61
61
  if track_index >= len(tracks):