livepilot 1.9.13 → 1.9.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +3 -3
- package/AGENTS.md +3 -3
- package/CHANGELOG.md +51 -0
- package/CONTRIBUTING.md +1 -1
- package/README.md +7 -7
- package/bin/livepilot.js +32 -8
- package/installer/install.js +21 -2
- package/livepilot/.Codex-plugin/plugin.json +2 -2
- package/livepilot/.claude-plugin/plugin.json +2 -2
- package/livepilot/agents/livepilot-producer/AGENT.md +243 -49
- package/livepilot/skills/livepilot-core/SKILL.md +81 -6
- package/livepilot/skills/livepilot-core/references/m4l-devices.md +2 -2
- package/livepilot/skills/livepilot-core/references/overview.md +3 -3
- package/livepilot/skills/livepilot-core/references/sound-design.md +3 -2
- package/livepilot/skills/livepilot-release/SKILL.md +13 -13
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +6 -3
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/curves.py +11 -3
- package/mcp_server/evaluation/__init__.py +1 -0
- package/mcp_server/evaluation/fabric.py +575 -0
- package/mcp_server/evaluation/feature_extractors.py +84 -0
- package/mcp_server/evaluation/policy.py +67 -0
- package/mcp_server/evaluation/tools.py +53 -0
- package/mcp_server/memory/__init__.py +11 -2
- package/mcp_server/memory/anti_memory.py +78 -0
- package/mcp_server/memory/promotion.py +94 -0
- package/mcp_server/memory/session_memory.py +108 -0
- package/mcp_server/memory/taste_memory.py +158 -0
- package/mcp_server/memory/technique_store.py +2 -1
- package/mcp_server/memory/tools.py +112 -0
- package/mcp_server/mix_engine/__init__.py +1 -0
- package/mcp_server/mix_engine/critics.py +299 -0
- package/mcp_server/mix_engine/models.py +152 -0
- package/mcp_server/mix_engine/planner.py +103 -0
- package/mcp_server/mix_engine/state_builder.py +316 -0
- package/mcp_server/mix_engine/tools.py +214 -0
- package/mcp_server/performance_engine/__init__.py +1 -0
- package/mcp_server/performance_engine/models.py +148 -0
- package/mcp_server/performance_engine/planner.py +267 -0
- package/mcp_server/performance_engine/safety.py +162 -0
- package/mcp_server/performance_engine/tools.py +183 -0
- package/mcp_server/project_brain/__init__.py +6 -0
- package/mcp_server/project_brain/arrangement_graph.py +64 -0
- package/mcp_server/project_brain/automation_graph.py +72 -0
- package/mcp_server/project_brain/builder.py +123 -0
- package/mcp_server/project_brain/capability_graph.py +64 -0
- package/mcp_server/project_brain/models.py +282 -0
- package/mcp_server/project_brain/refresh.py +80 -0
- package/mcp_server/project_brain/role_graph.py +103 -0
- package/mcp_server/project_brain/session_graph.py +51 -0
- package/mcp_server/project_brain/tools.py +144 -0
- package/mcp_server/reference_engine/__init__.py +1 -0
- package/mcp_server/reference_engine/gap_analyzer.py +239 -0
- package/mcp_server/reference_engine/models.py +105 -0
- package/mcp_server/reference_engine/profile_builder.py +149 -0
- package/mcp_server/reference_engine/tactic_router.py +117 -0
- package/mcp_server/reference_engine/tools.py +235 -0
- package/mcp_server/runtime/__init__.py +1 -0
- package/mcp_server/runtime/action_ledger.py +117 -0
- package/mcp_server/runtime/action_ledger_models.py +84 -0
- package/mcp_server/runtime/action_tools.py +57 -0
- package/mcp_server/runtime/capability_state.py +218 -0
- package/mcp_server/runtime/safety_kernel.py +339 -0
- package/mcp_server/runtime/safety_tools.py +42 -0
- package/mcp_server/runtime/tools.py +64 -0
- package/mcp_server/server.py +23 -1
- package/mcp_server/sound_design/__init__.py +1 -0
- package/mcp_server/sound_design/critics.py +297 -0
- package/mcp_server/sound_design/models.py +147 -0
- package/mcp_server/sound_design/planner.py +104 -0
- package/mcp_server/sound_design/tools.py +297 -0
- package/mcp_server/tools/_agent_os_engine.py +947 -0
- package/mcp_server/tools/_composition_engine.py +1530 -0
- package/mcp_server/tools/_conductor.py +199 -0
- package/mcp_server/tools/_conductor_budgets.py +222 -0
- package/mcp_server/tools/_evaluation_contracts.py +91 -0
- package/mcp_server/tools/_form_engine.py +416 -0
- package/mcp_server/tools/_motif_engine.py +351 -0
- package/mcp_server/tools/_planner_engine.py +516 -0
- package/mcp_server/tools/_research_engine.py +542 -0
- package/mcp_server/tools/_research_provider.py +185 -0
- package/mcp_server/tools/_snapshot_normalizer.py +49 -0
- package/mcp_server/tools/agent_os.py +440 -0
- package/mcp_server/tools/analyzer.py +18 -0
- package/mcp_server/tools/automation.py +25 -10
- package/mcp_server/tools/composition.py +563 -0
- package/mcp_server/tools/motif.py +104 -0
- package/mcp_server/tools/planner.py +144 -0
- package/mcp_server/tools/research.py +223 -0
- package/mcp_server/tools/tracks.py +18 -3
- package/mcp_server/tools/transport.py +10 -2
- package/mcp_server/transition_engine/__init__.py +6 -0
- package/mcp_server/transition_engine/archetypes.py +167 -0
- package/mcp_server/transition_engine/critics.py +340 -0
- package/mcp_server/transition_engine/models.py +90 -0
- package/mcp_server/transition_engine/tools.py +291 -0
- package/mcp_server/translation_engine/__init__.py +5 -0
- package/mcp_server/translation_engine/critics.py +297 -0
- package/mcp_server/translation_engine/models.py +27 -0
- package/mcp_server/translation_engine/tools.py +74 -0
- package/package.json +2 -2
- package/remote_script/LivePilot/__init__.py +1 -1
- package/remote_script/LivePilot/arrangement.py +12 -2
- package/requirements.txt +1 -1
|
@@ -43,8 +43,11 @@ def get_clip_automation(
|
|
|
43
43
|
parameter name, and type (mixer/send/device). Use this to see
|
|
44
44
|
what's already automated before writing new curves.
|
|
45
45
|
"""
|
|
46
|
-
if track_index <
|
|
47
|
-
raise ValueError(
|
|
46
|
+
if track_index < -99:
|
|
47
|
+
raise ValueError(
|
|
48
|
+
"track_index must be >= 0 for regular tracks, "
|
|
49
|
+
"or -1..-99 for return tracks (-1=A, -2=B)"
|
|
50
|
+
)
|
|
48
51
|
if clip_index < 0:
|
|
49
52
|
raise ValueError("clip_index must be >= 0")
|
|
50
53
|
return _get_ableton(ctx).send_command("get_clip_automation", {
|
|
@@ -76,8 +79,11 @@ def set_clip_automation(
|
|
|
76
79
|
Tip: Use apply_automation_shape to generate points from curves/recipes
|
|
77
80
|
instead of calculating points manually.
|
|
78
81
|
"""
|
|
79
|
-
if track_index <
|
|
80
|
-
raise ValueError(
|
|
82
|
+
if track_index < -99:
|
|
83
|
+
raise ValueError(
|
|
84
|
+
"track_index must be >= 0 for regular tracks, "
|
|
85
|
+
"or -1..-99 for return tracks (-1=A, -2=B)"
|
|
86
|
+
)
|
|
81
87
|
if clip_index < 0:
|
|
82
88
|
raise ValueError("clip_index must be >= 0")
|
|
83
89
|
if parameter_type not in ("device", "volume", "panning", "send"):
|
|
@@ -117,8 +123,11 @@ def clear_clip_automation(
|
|
|
117
123
|
If parameter_type is omitted, clears ALL envelopes.
|
|
118
124
|
If provided, clears only that parameter's envelope.
|
|
119
125
|
"""
|
|
120
|
-
if track_index <
|
|
121
|
-
raise ValueError(
|
|
126
|
+
if track_index < -99:
|
|
127
|
+
raise ValueError(
|
|
128
|
+
"track_index must be >= 0 for regular tracks, "
|
|
129
|
+
"or -1..-99 for return tracks (-1=A, -2=B)"
|
|
130
|
+
)
|
|
122
131
|
if clip_index < 0:
|
|
123
132
|
raise ValueError("clip_index must be >= 0")
|
|
124
133
|
params: dict = {
|
|
@@ -217,8 +226,11 @@ def apply_automation_shape(
|
|
|
217
226
|
- Tremolo/pan: use sine with frequency in musical divisions
|
|
218
227
|
"""
|
|
219
228
|
# Validate indices and parameter_type (same rules as set_clip_automation)
|
|
220
|
-
if track_index <
|
|
221
|
-
raise ValueError(
|
|
229
|
+
if track_index < -99:
|
|
230
|
+
raise ValueError(
|
|
231
|
+
"track_index must be >= 0 for regular tracks, "
|
|
232
|
+
"or -1..-99 for return tracks (-1=A, -2=B)"
|
|
233
|
+
)
|
|
222
234
|
if clip_index < 0:
|
|
223
235
|
raise ValueError("clip_index must be >= 0")
|
|
224
236
|
if parameter_type not in ("device", "volume", "panning", "send"):
|
|
@@ -312,8 +324,11 @@ def apply_automation_recipe(
|
|
|
312
324
|
- stereo_narrow: collapse to mono before drop
|
|
313
325
|
"""
|
|
314
326
|
# Validate indices and parameter_type (same rules as set_clip_automation)
|
|
315
|
-
if track_index <
|
|
316
|
-
raise ValueError(
|
|
327
|
+
if track_index < -99:
|
|
328
|
+
raise ValueError(
|
|
329
|
+
"track_index must be >= 0 for regular tracks, "
|
|
330
|
+
"or -1..-99 for return tracks (-1=A, -2=B)"
|
|
331
|
+
)
|
|
317
332
|
if clip_index < 0:
|
|
318
333
|
raise ValueError("clip_index must be >= 0")
|
|
319
334
|
if parameter_type not in ("device", "volume", "panning", "send"):
|
|
@@ -0,0 +1,563 @@
|
|
|
1
|
+
"""Composition Engine V1 MCP tools — structural and musical intelligence.
|
|
2
|
+
|
|
3
|
+
5 tools that connect the pure-computation engine (_composition_engine.py) to the
|
|
4
|
+
live Ableton session via the existing MCP infrastructure.
|
|
5
|
+
|
|
6
|
+
These tools power the composition intelligence layer:
|
|
7
|
+
analyze_composition — full structural analysis (sections, phrases, roles, issues)
|
|
8
|
+
get_section_graph — lightweight section inference only
|
|
9
|
+
get_phrase_grid — phrase boundaries for a section
|
|
10
|
+
plan_gesture — map musical intent to concrete automation plan
|
|
11
|
+
evaluate_composition_move — composition-specific keep/undo scoring
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import json
|
|
17
|
+
from typing import Optional
|
|
18
|
+
|
|
19
|
+
from fastmcp import Context
|
|
20
|
+
|
|
21
|
+
from ..server import mcp
|
|
22
|
+
from . import _composition_engine as engine
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _get_ableton(ctx: Context):
|
|
26
|
+
return ctx.lifespan_context["ableton"]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _parse_json_param(value, name: str) -> dict:
|
|
30
|
+
"""Parse a dict, JSON string, or None parameter."""
|
|
31
|
+
if value is None:
|
|
32
|
+
return {}
|
|
33
|
+
if isinstance(value, str):
|
|
34
|
+
try:
|
|
35
|
+
return json.loads(value)
|
|
36
|
+
except json.JSONDecodeError as exc:
|
|
37
|
+
raise ValueError(f"Invalid JSON in {name}: {exc}") from exc
|
|
38
|
+
if isinstance(value, dict):
|
|
39
|
+
return value
|
|
40
|
+
raise ValueError(f"{name} must be a dict or JSON string")
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def _build_clip_matrix(ableton, scene_count: int, track_count: int) -> list[list]:
|
|
44
|
+
"""Build the clip matrix from scene_matrix data."""
|
|
45
|
+
try:
|
|
46
|
+
matrix_data = ableton.send_command("get_scene_matrix")
|
|
47
|
+
raw_matrix = matrix_data.get("matrix", [])
|
|
48
|
+
return raw_matrix
|
|
49
|
+
except Exception:
|
|
50
|
+
return [[] for _ in range(scene_count)]
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# ── analyze_composition ───────────────────────────────────────────────
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@mcp.tool()
|
|
57
|
+
def analyze_composition(ctx: Context) -> dict:
|
|
58
|
+
"""Run full composition analysis on the current Ableton session.
|
|
59
|
+
|
|
60
|
+
Returns section graph, phrase grid, role graph, and issues from
|
|
61
|
+
form/section-identity/phrase critics. This is the "one call to
|
|
62
|
+
understand the arrangement structure."
|
|
63
|
+
|
|
64
|
+
Uses scene names + clip activity to infer sections, note data for
|
|
65
|
+
phrases, and track names + note patterns for role assignment.
|
|
66
|
+
|
|
67
|
+
The issues section contains actionable structural recommendations.
|
|
68
|
+
"""
|
|
69
|
+
ableton = _get_ableton(ctx)
|
|
70
|
+
|
|
71
|
+
# 1. Get session info
|
|
72
|
+
session = ableton.send_command("get_session_info")
|
|
73
|
+
scenes = session.get("scenes", [])
|
|
74
|
+
tracks = session.get("tracks", [])
|
|
75
|
+
track_count = session.get("track_count", 0)
|
|
76
|
+
|
|
77
|
+
# 2. Get clip matrix for section inference
|
|
78
|
+
clip_matrix = _build_clip_matrix(ableton, len(scenes), track_count)
|
|
79
|
+
|
|
80
|
+
# 3. Build section graph (from scenes)
|
|
81
|
+
sections = engine.build_section_graph_from_scenes(
|
|
82
|
+
scenes, clip_matrix, track_count,
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
# 4. Try arrangement clips as supplement
|
|
86
|
+
arr_clips = {}
|
|
87
|
+
for track in tracks:
|
|
88
|
+
try:
|
|
89
|
+
arr = ableton.send_command("get_arrangement_clips", {
|
|
90
|
+
"track_index": track["index"]
|
|
91
|
+
})
|
|
92
|
+
clips = arr.get("clips", [])
|
|
93
|
+
if clips:
|
|
94
|
+
arr_clips[track["index"]] = clips
|
|
95
|
+
except Exception:
|
|
96
|
+
pass
|
|
97
|
+
|
|
98
|
+
if not sections and arr_clips:
|
|
99
|
+
sections = engine.build_section_graph_from_arrangement(
|
|
100
|
+
arr_clips, track_count,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
# 5. Get per-track info for role inference
|
|
104
|
+
track_data = []
|
|
105
|
+
for track in tracks:
|
|
106
|
+
try:
|
|
107
|
+
ti = ableton.send_command("get_track_info", {
|
|
108
|
+
"track_index": track["index"]
|
|
109
|
+
})
|
|
110
|
+
track_data.append(ti)
|
|
111
|
+
except Exception:
|
|
112
|
+
track_data.append({"index": track["index"], "name": track.get("name", ""),
|
|
113
|
+
"devices": []})
|
|
114
|
+
|
|
115
|
+
# 6. Get notes for phrase detection + role inference
|
|
116
|
+
notes_by_section_track: dict[str, dict[int, list]] = {}
|
|
117
|
+
all_notes_by_track: dict[int, list] = {}
|
|
118
|
+
|
|
119
|
+
for track in tracks:
|
|
120
|
+
t_idx = track["index"]
|
|
121
|
+
# Collect notes from all clips
|
|
122
|
+
track_notes = []
|
|
123
|
+
for s_idx in range(len(scenes)):
|
|
124
|
+
try:
|
|
125
|
+
result = ableton.send_command("get_notes", {
|
|
126
|
+
"track_index": t_idx, "clip_index": s_idx
|
|
127
|
+
})
|
|
128
|
+
notes = result.get("notes", [])
|
|
129
|
+
track_notes.extend(notes)
|
|
130
|
+
except Exception:
|
|
131
|
+
pass
|
|
132
|
+
all_notes_by_track[t_idx] = track_notes
|
|
133
|
+
|
|
134
|
+
# Map notes to sections
|
|
135
|
+
for section in sections:
|
|
136
|
+
notes_by_section_track[section.section_id] = {}
|
|
137
|
+
for t_idx in section.tracks_active:
|
|
138
|
+
notes_by_section_track[section.section_id][t_idx] = (
|
|
139
|
+
all_notes_by_track.get(t_idx, [])
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# 7. Build phrase grid
|
|
143
|
+
all_phrases = []
|
|
144
|
+
for section in sections:
|
|
145
|
+
section_notes = {t: all_notes_by_track.get(t, []) for t in section.tracks_active}
|
|
146
|
+
phrases = engine.detect_phrases(section, section_notes)
|
|
147
|
+
all_phrases.extend(phrases)
|
|
148
|
+
|
|
149
|
+
# 8. Build role graph
|
|
150
|
+
roles = engine.build_role_graph(sections, track_data, notes_by_section_track)
|
|
151
|
+
|
|
152
|
+
# 9. Run critics
|
|
153
|
+
form_issues = engine.run_form_critic(sections)
|
|
154
|
+
identity_issues = engine.run_section_identity_critic(sections, roles)
|
|
155
|
+
phrase_issues = engine.run_phrase_critic(all_phrases)
|
|
156
|
+
all_issues = form_issues + identity_issues + phrase_issues
|
|
157
|
+
|
|
158
|
+
# 10. Assemble result
|
|
159
|
+
analysis = engine.CompositionAnalysis(
|
|
160
|
+
sections=sections,
|
|
161
|
+
phrases=all_phrases,
|
|
162
|
+
roles=roles,
|
|
163
|
+
issues=all_issues,
|
|
164
|
+
)
|
|
165
|
+
return analysis.to_dict()
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
# ── get_section_graph ─────────────────────────────────────────────────
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
@mcp.tool()
|
|
172
|
+
def get_section_graph(ctx: Context) -> dict:
|
|
173
|
+
"""Get just the section graph — lightweight structural overview.
|
|
174
|
+
|
|
175
|
+
Infers sections from scene names and clip activity. Returns
|
|
176
|
+
section types, energy levels, density, and active tracks per section.
|
|
177
|
+
Faster than analyze_composition when you only need structure.
|
|
178
|
+
"""
|
|
179
|
+
ableton = _get_ableton(ctx)
|
|
180
|
+
session = ableton.send_command("get_session_info")
|
|
181
|
+
scenes = session.get("scenes", [])
|
|
182
|
+
track_count = session.get("track_count", 0)
|
|
183
|
+
|
|
184
|
+
clip_matrix = _build_clip_matrix(ableton, len(scenes), track_count)
|
|
185
|
+
sections = engine.build_section_graph_from_scenes(
|
|
186
|
+
scenes, clip_matrix, track_count,
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
return {
|
|
190
|
+
"sections": [s.to_dict() for s in sections],
|
|
191
|
+
"section_count": len(sections),
|
|
192
|
+
"has_energy_arc": _has_energy_arc(sections),
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def _has_energy_arc(sections: list[engine.SectionNode]) -> bool:
|
|
197
|
+
if len(sections) < 2:
|
|
198
|
+
return False
|
|
199
|
+
energies = [s.energy for s in sections]
|
|
200
|
+
return (max(energies) - min(energies)) >= 0.15
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
# ── get_phrase_grid ───────────────────────────────────────────────────
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
@mcp.tool()
|
|
207
|
+
def get_phrase_grid(
|
|
208
|
+
ctx: Context,
|
|
209
|
+
section_index: int = 0,
|
|
210
|
+
) -> dict:
|
|
211
|
+
"""Get phrase boundaries for a specific section.
|
|
212
|
+
|
|
213
|
+
section_index: which section to analyze (0-based, from get_section_graph).
|
|
214
|
+
Returns phrase boundaries, cadence strengths, and note densities.
|
|
215
|
+
"""
|
|
216
|
+
ableton = _get_ableton(ctx)
|
|
217
|
+
session = ableton.send_command("get_session_info")
|
|
218
|
+
scenes = session.get("scenes", [])
|
|
219
|
+
tracks = session.get("tracks", [])
|
|
220
|
+
track_count = session.get("track_count", 0)
|
|
221
|
+
|
|
222
|
+
clip_matrix = _build_clip_matrix(ableton, len(scenes), track_count)
|
|
223
|
+
sections = engine.build_section_graph_from_scenes(
|
|
224
|
+
scenes, clip_matrix, track_count,
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
if section_index < 0 or section_index >= len(sections):
|
|
228
|
+
return {"error": f"section_index {section_index} out of range (0-{len(sections) - 1})"}
|
|
229
|
+
|
|
230
|
+
section = sections[section_index]
|
|
231
|
+
|
|
232
|
+
# Collect notes for active tracks
|
|
233
|
+
notes_by_track: dict[int, list] = {}
|
|
234
|
+
for t_idx in section.tracks_active:
|
|
235
|
+
try:
|
|
236
|
+
result = ableton.send_command("get_notes", {
|
|
237
|
+
"track_index": t_idx,
|
|
238
|
+
"clip_index": section_index,
|
|
239
|
+
})
|
|
240
|
+
notes_by_track[t_idx] = result.get("notes", [])
|
|
241
|
+
except Exception:
|
|
242
|
+
notes_by_track[t_idx] = []
|
|
243
|
+
|
|
244
|
+
phrases = engine.detect_phrases(section, notes_by_track)
|
|
245
|
+
return {
|
|
246
|
+
"section": section.to_dict(),
|
|
247
|
+
"phrases": [p.to_dict() for p in phrases],
|
|
248
|
+
"phrase_count": len(phrases),
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
# ── plan_gesture ──────────────────────────────────────────────────────
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
@mcp.tool()
|
|
256
|
+
def plan_gesture(
|
|
257
|
+
ctx: Context,
|
|
258
|
+
intent: str,
|
|
259
|
+
target_tracks: list | str = "[]",
|
|
260
|
+
start_bar: int = 0,
|
|
261
|
+
duration_bars: int = 0,
|
|
262
|
+
foreground: bool = False,
|
|
263
|
+
) -> dict:
|
|
264
|
+
"""Plan a musical gesture — map abstract intent to concrete automation.
|
|
265
|
+
|
|
266
|
+
intent: reveal | conceal | handoff | inhale | release | lift | sink | punctuate | drift
|
|
267
|
+
target_tracks: list of track indices the gesture applies to
|
|
268
|
+
start_bar: where the gesture begins
|
|
269
|
+
duration_bars: how long (0 = use gesture default)
|
|
270
|
+
foreground: is this a focal point or background motion?
|
|
271
|
+
|
|
272
|
+
Returns a GesturePlan with: curve_family, parameter_hints, direction,
|
|
273
|
+
and timing — ready for use with apply_automation_shape.
|
|
274
|
+
|
|
275
|
+
Example: plan_gesture(intent="reveal", target_tracks=[6], start_bar=8)
|
|
276
|
+
→ exponential curve on filter_cutoff, sweep up over 4 bars
|
|
277
|
+
"""
|
|
278
|
+
# Parse intent
|
|
279
|
+
try:
|
|
280
|
+
gesture_intent = engine.GestureIntent(intent)
|
|
281
|
+
except ValueError:
|
|
282
|
+
valid = [g.value for g in engine.GestureIntent]
|
|
283
|
+
raise ValueError(f"Unknown intent '{intent}'. Valid: {valid}")
|
|
284
|
+
|
|
285
|
+
# Parse target_tracks
|
|
286
|
+
if isinstance(target_tracks, str):
|
|
287
|
+
try:
|
|
288
|
+
target_tracks = json.loads(target_tracks)
|
|
289
|
+
except json.JSONDecodeError:
|
|
290
|
+
target_tracks = []
|
|
291
|
+
|
|
292
|
+
duration = duration_bars if duration_bars > 0 else None
|
|
293
|
+
gesture = engine.plan_gesture(
|
|
294
|
+
intent=gesture_intent,
|
|
295
|
+
target_tracks=target_tracks,
|
|
296
|
+
start_bar=start_bar,
|
|
297
|
+
duration_bars=duration,
|
|
298
|
+
foreground=foreground,
|
|
299
|
+
)
|
|
300
|
+
return gesture.to_dict()
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
# ── evaluate_composition_move ─────────────────────────────────────────
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
@mcp.tool()
|
|
307
|
+
def evaluate_composition_move(
|
|
308
|
+
ctx: Context,
|
|
309
|
+
before_issues: list | str,
|
|
310
|
+
after_issues: list | str,
|
|
311
|
+
target_dimensions: dict | str = "{}",
|
|
312
|
+
protect: dict | str = "{}",
|
|
313
|
+
) -> dict:
|
|
314
|
+
"""Evaluate whether a composition move improved the arrangement.
|
|
315
|
+
|
|
316
|
+
Takes before/after issue lists (from analyze_composition) and compares
|
|
317
|
+
severity and count. Returns a score and keep/undo recommendation.
|
|
318
|
+
|
|
319
|
+
before_issues: issues list from analyze_composition BEFORE the move
|
|
320
|
+
after_issues: issues list from analyze_composition AFTER the move
|
|
321
|
+
target_dimensions: optional composition dimensions being targeted
|
|
322
|
+
protect: optional dimensions to preserve
|
|
323
|
+
|
|
324
|
+
Returns: {score, keep_change, issue_delta, severity_improvement, notes}
|
|
325
|
+
"""
|
|
326
|
+
# Parse inputs
|
|
327
|
+
if isinstance(before_issues, str):
|
|
328
|
+
before_issues = json.loads(before_issues)
|
|
329
|
+
if isinstance(after_issues, str):
|
|
330
|
+
after_issues = json.loads(after_issues)
|
|
331
|
+
|
|
332
|
+
targets = _parse_json_param(target_dimensions, "target_dimensions")
|
|
333
|
+
prot = _parse_json_param(protect, "protect")
|
|
334
|
+
|
|
335
|
+
# Convert raw dicts back to CompositionIssue objects
|
|
336
|
+
before = [engine.CompositionIssue(**{k: v for k, v in i.items()
|
|
337
|
+
if k in ("issue_type", "critic", "severity", "confidence",
|
|
338
|
+
"scope", "recommended_moves", "evidence")})
|
|
339
|
+
for i in before_issues]
|
|
340
|
+
after = [engine.CompositionIssue(**{k: v for k, v in i.items()
|
|
341
|
+
if k in ("issue_type", "critic", "severity", "confidence",
|
|
342
|
+
"scope", "recommended_moves", "evidence")})
|
|
343
|
+
for i in after_issues]
|
|
344
|
+
|
|
345
|
+
return engine.evaluate_composition_move(before, after, targets, prot)
|
|
346
|
+
|
|
347
|
+
|
|
348
|
+
# ── get_harmony_field (Round 1) ───────────────────────────────────────
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
@mcp.tool()
|
|
352
|
+
def get_harmony_field(
|
|
353
|
+
ctx: Context,
|
|
354
|
+
section_index: int = 0,
|
|
355
|
+
) -> dict:
|
|
356
|
+
"""Analyze the harmonic content of a section — key, chords, voice-leading, tension.
|
|
357
|
+
|
|
358
|
+
Combines identify_scale, analyze_harmony, classify_progression, and
|
|
359
|
+
find_voice_leading_path into a single structured HarmonyField.
|
|
360
|
+
|
|
361
|
+
section_index: which section to analyze (0-based, from get_section_graph).
|
|
362
|
+
Returns: key, mode, chord_progression, voice_leading_quality, instability,
|
|
363
|
+
resolution_potential.
|
|
364
|
+
"""
|
|
365
|
+
ableton = _get_ableton(ctx)
|
|
366
|
+
session = ableton.send_command("get_session_info")
|
|
367
|
+
scenes = session.get("scenes", [])
|
|
368
|
+
tracks = session.get("tracks", [])
|
|
369
|
+
track_count = session.get("track_count", 0)
|
|
370
|
+
|
|
371
|
+
clip_matrix = _build_clip_matrix(ableton, len(scenes), track_count)
|
|
372
|
+
sections = engine.build_section_graph_from_scenes(scenes, clip_matrix, track_count)
|
|
373
|
+
|
|
374
|
+
if section_index < 0 or section_index >= len(sections):
|
|
375
|
+
return {"error": f"section_index {section_index} out of range (0-{len(sections) - 1})"}
|
|
376
|
+
|
|
377
|
+
section = sections[section_index]
|
|
378
|
+
|
|
379
|
+
# Find a track with notes to analyze harmony
|
|
380
|
+
scale_info = None
|
|
381
|
+
harmony_analysis = None
|
|
382
|
+
progression_info = None
|
|
383
|
+
voice_leading_info = None
|
|
384
|
+
|
|
385
|
+
for t_idx in section.tracks_active:
|
|
386
|
+
try:
|
|
387
|
+
# Try identify_scale
|
|
388
|
+
si = ableton.send_command("identify_scale", {
|
|
389
|
+
"track_index": t_idx, "clip_index": section_index
|
|
390
|
+
})
|
|
391
|
+
if si.get("top_match"):
|
|
392
|
+
scale_info = si
|
|
393
|
+
|
|
394
|
+
# Try analyze_harmony
|
|
395
|
+
ha = ableton.send_command("analyze_harmony", {
|
|
396
|
+
"track_index": t_idx, "clip_index": section_index
|
|
397
|
+
})
|
|
398
|
+
if ha.get("chords"):
|
|
399
|
+
harmony_analysis = ha
|
|
400
|
+
|
|
401
|
+
# Classify progression if we have chords
|
|
402
|
+
chord_names = [c.get("chord_name", "") for c in ha.get("chords", []) if c.get("chord_name")]
|
|
403
|
+
if len(chord_names) >= 2:
|
|
404
|
+
try:
|
|
405
|
+
progression_info = ableton.send_command("classify_progression", {
|
|
406
|
+
"chords": chord_names[:8]
|
|
407
|
+
})
|
|
408
|
+
except Exception:
|
|
409
|
+
pass
|
|
410
|
+
|
|
411
|
+
if scale_info and harmony_analysis:
|
|
412
|
+
break
|
|
413
|
+
except Exception:
|
|
414
|
+
continue
|
|
415
|
+
|
|
416
|
+
hf = engine.build_harmony_field(
|
|
417
|
+
section_id=section.section_id,
|
|
418
|
+
harmony_analysis=harmony_analysis,
|
|
419
|
+
scale_info=scale_info,
|
|
420
|
+
progression_info=progression_info,
|
|
421
|
+
voice_leading_info=voice_leading_info,
|
|
422
|
+
)
|
|
423
|
+
result = hf.to_dict()
|
|
424
|
+
result["section_name"] = section.name
|
|
425
|
+
return result
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
# ── get_transition_analysis (Round 1) ─────────────────────────────────
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
@mcp.tool()
|
|
432
|
+
def get_transition_analysis(ctx: Context) -> dict:
|
|
433
|
+
"""Analyze transition quality between all adjacent sections.
|
|
434
|
+
|
|
435
|
+
Checks for: hard cuts, missing pre-arrival subtraction, groove breaks,
|
|
436
|
+
harmonic non-sequiturs, and weak builds without role rotation.
|
|
437
|
+
|
|
438
|
+
Returns issues with recommended composition moves for each boundary.
|
|
439
|
+
"""
|
|
440
|
+
ableton = _get_ableton(ctx)
|
|
441
|
+
session = ableton.send_command("get_session_info")
|
|
442
|
+
scenes = session.get("scenes", [])
|
|
443
|
+
tracks = session.get("tracks", [])
|
|
444
|
+
track_count = session.get("track_count", 0)
|
|
445
|
+
|
|
446
|
+
clip_matrix = _build_clip_matrix(ableton, len(scenes), track_count)
|
|
447
|
+
sections = engine.build_section_graph_from_scenes(scenes, clip_matrix, track_count)
|
|
448
|
+
|
|
449
|
+
if len(sections) < 2:
|
|
450
|
+
return {"issues": [], "note": "Need at least 2 sections for transition analysis"}
|
|
451
|
+
|
|
452
|
+
# Build role graph for transition critic
|
|
453
|
+
track_data = []
|
|
454
|
+
notes_map: dict[str, dict[int, list]] = {}
|
|
455
|
+
for track in tracks:
|
|
456
|
+
t_idx = track["index"]
|
|
457
|
+
try:
|
|
458
|
+
ti = ableton.send_command("get_track_info", {"track_index": t_idx})
|
|
459
|
+
track_data.append(ti)
|
|
460
|
+
except Exception:
|
|
461
|
+
track_data.append({"index": t_idx, "name": track.get("name", ""), "devices": []})
|
|
462
|
+
|
|
463
|
+
for section in sections:
|
|
464
|
+
notes_map[section.section_id] = {}
|
|
465
|
+
for t_idx in section.tracks_active:
|
|
466
|
+
notes_map[section.section_id][t_idx] = []
|
|
467
|
+
|
|
468
|
+
roles = engine.build_role_graph(sections, track_data, notes_map)
|
|
469
|
+
|
|
470
|
+
# Build harmony fields (lightweight — skip if tools fail)
|
|
471
|
+
harmony_fields = []
|
|
472
|
+
for i, section in enumerate(sections):
|
|
473
|
+
hf = engine.HarmonyField(section_id=section.section_id)
|
|
474
|
+
harmony_fields.append(hf)
|
|
475
|
+
|
|
476
|
+
issues = engine.run_transition_critic(sections, roles, harmony_fields)
|
|
477
|
+
|
|
478
|
+
return {
|
|
479
|
+
"transition_count": len(sections) - 1,
|
|
480
|
+
"issues": [i.to_dict() for i in issues],
|
|
481
|
+
"issue_count": len(issues),
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
|
|
485
|
+
# ── apply_gesture_template (Round 2) ──────────────────────────────────
|
|
486
|
+
|
|
487
|
+
|
|
488
|
+
@mcp.tool()
|
|
489
|
+
def apply_gesture_template(
|
|
490
|
+
ctx: Context,
|
|
491
|
+
template_name: str,
|
|
492
|
+
target_tracks: list | str = "[]",
|
|
493
|
+
anchor_bar: int = 0,
|
|
494
|
+
foreground: bool = False,
|
|
495
|
+
) -> dict:
|
|
496
|
+
"""Apply a compound gesture template — multiple coordinated automation gestures.
|
|
497
|
+
|
|
498
|
+
template_name: pre_arrival_vacuum | sectional_width_bloom | phrase_end_throw |
|
|
499
|
+
turnaround_accent | outro_decay_dissolve | bass_tuck_before_kick |
|
|
500
|
+
harmonic_tint_rise | response_echo | texture_drift_bed |
|
|
501
|
+
tension_ratchet | re_entry_spotlight
|
|
502
|
+
target_tracks: list of track indices
|
|
503
|
+
anchor_bar: reference point (section boundary bar number)
|
|
504
|
+
foreground: is this a focal point?
|
|
505
|
+
|
|
506
|
+
Returns: list of GesturePlans — execute each with apply_automation_shape.
|
|
507
|
+
"""
|
|
508
|
+
if isinstance(target_tracks, str):
|
|
509
|
+
try:
|
|
510
|
+
target_tracks = json.loads(target_tracks)
|
|
511
|
+
except json.JSONDecodeError:
|
|
512
|
+
target_tracks = []
|
|
513
|
+
|
|
514
|
+
plans = engine.resolve_gesture_template(
|
|
515
|
+
template_name, target_tracks, anchor_bar, foreground,
|
|
516
|
+
)
|
|
517
|
+
return {
|
|
518
|
+
"template": template_name,
|
|
519
|
+
"description": engine.GESTURE_TEMPLATES[template_name]["description"],
|
|
520
|
+
"gesture_count": len(plans),
|
|
521
|
+
"gestures": [g.to_dict() for g in plans],
|
|
522
|
+
}
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
# ── get_section_outcomes (Round 2) ────────────────────────────────────
|
|
526
|
+
|
|
527
|
+
|
|
528
|
+
@mcp.tool()
|
|
529
|
+
def get_section_outcomes(
|
|
530
|
+
ctx: Context,
|
|
531
|
+
section_type: str = "",
|
|
532
|
+
limit: int = 50,
|
|
533
|
+
) -> dict:
|
|
534
|
+
"""Get composition move success rates grouped by section type.
|
|
535
|
+
|
|
536
|
+
Analyzes stored composition outcomes to answer: which moves work
|
|
537
|
+
best in which section types? Use before making structural changes
|
|
538
|
+
to learn from past sessions.
|
|
539
|
+
|
|
540
|
+
section_type: filter to a specific type (intro, verse, chorus, etc.)
|
|
541
|
+
Leave empty for all types.
|
|
542
|
+
"""
|
|
543
|
+
ableton = _get_ableton(ctx)
|
|
544
|
+
|
|
545
|
+
try:
|
|
546
|
+
memory_result = ableton.send_command("memory_list", {
|
|
547
|
+
"type": "composition_outcome",
|
|
548
|
+
"limit": limit,
|
|
549
|
+
"sort_by": "updated_at",
|
|
550
|
+
})
|
|
551
|
+
techniques = memory_result.get("techniques", [])
|
|
552
|
+
except Exception:
|
|
553
|
+
techniques = []
|
|
554
|
+
|
|
555
|
+
outcomes = [t.get("payload", {}) for t in techniques if isinstance(t.get("payload"), dict)]
|
|
556
|
+
|
|
557
|
+
result = engine.analyze_section_outcomes(outcomes)
|
|
558
|
+
|
|
559
|
+
if section_type and section_type in result.get("outcomes_by_section", {}):
|
|
560
|
+
result["filtered_section"] = section_type
|
|
561
|
+
result["section_moves"] = result["outcomes_by_section"][section_type]
|
|
562
|
+
|
|
563
|
+
return result
|