livepilot 1.9.13 → 1.9.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/.claude-plugin/marketplace.json +3 -3
  2. package/AGENTS.md +3 -3
  3. package/CHANGELOG.md +51 -0
  4. package/CONTRIBUTING.md +1 -1
  5. package/README.md +7 -7
  6. package/bin/livepilot.js +32 -8
  7. package/installer/install.js +21 -2
  8. package/livepilot/.Codex-plugin/plugin.json +2 -2
  9. package/livepilot/.claude-plugin/plugin.json +2 -2
  10. package/livepilot/agents/livepilot-producer/AGENT.md +243 -49
  11. package/livepilot/skills/livepilot-core/SKILL.md +81 -6
  12. package/livepilot/skills/livepilot-core/references/m4l-devices.md +2 -2
  13. package/livepilot/skills/livepilot-core/references/overview.md +3 -3
  14. package/livepilot/skills/livepilot-core/references/sound-design.md +3 -2
  15. package/livepilot/skills/livepilot-release/SKILL.md +13 -13
  16. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  17. package/m4l_device/livepilot_bridge.js +6 -3
  18. package/mcp_server/__init__.py +1 -1
  19. package/mcp_server/curves.py +11 -3
  20. package/mcp_server/evaluation/__init__.py +1 -0
  21. package/mcp_server/evaluation/fabric.py +575 -0
  22. package/mcp_server/evaluation/feature_extractors.py +84 -0
  23. package/mcp_server/evaluation/policy.py +67 -0
  24. package/mcp_server/evaluation/tools.py +53 -0
  25. package/mcp_server/memory/__init__.py +11 -2
  26. package/mcp_server/memory/anti_memory.py +78 -0
  27. package/mcp_server/memory/promotion.py +94 -0
  28. package/mcp_server/memory/session_memory.py +108 -0
  29. package/mcp_server/memory/taste_memory.py +158 -0
  30. package/mcp_server/memory/technique_store.py +2 -1
  31. package/mcp_server/memory/tools.py +112 -0
  32. package/mcp_server/mix_engine/__init__.py +1 -0
  33. package/mcp_server/mix_engine/critics.py +299 -0
  34. package/mcp_server/mix_engine/models.py +152 -0
  35. package/mcp_server/mix_engine/planner.py +103 -0
  36. package/mcp_server/mix_engine/state_builder.py +316 -0
  37. package/mcp_server/mix_engine/tools.py +214 -0
  38. package/mcp_server/performance_engine/__init__.py +1 -0
  39. package/mcp_server/performance_engine/models.py +148 -0
  40. package/mcp_server/performance_engine/planner.py +267 -0
  41. package/mcp_server/performance_engine/safety.py +162 -0
  42. package/mcp_server/performance_engine/tools.py +183 -0
  43. package/mcp_server/project_brain/__init__.py +6 -0
  44. package/mcp_server/project_brain/arrangement_graph.py +64 -0
  45. package/mcp_server/project_brain/automation_graph.py +72 -0
  46. package/mcp_server/project_brain/builder.py +123 -0
  47. package/mcp_server/project_brain/capability_graph.py +64 -0
  48. package/mcp_server/project_brain/models.py +282 -0
  49. package/mcp_server/project_brain/refresh.py +80 -0
  50. package/mcp_server/project_brain/role_graph.py +103 -0
  51. package/mcp_server/project_brain/session_graph.py +51 -0
  52. package/mcp_server/project_brain/tools.py +144 -0
  53. package/mcp_server/reference_engine/__init__.py +1 -0
  54. package/mcp_server/reference_engine/gap_analyzer.py +239 -0
  55. package/mcp_server/reference_engine/models.py +105 -0
  56. package/mcp_server/reference_engine/profile_builder.py +149 -0
  57. package/mcp_server/reference_engine/tactic_router.py +117 -0
  58. package/mcp_server/reference_engine/tools.py +235 -0
  59. package/mcp_server/runtime/__init__.py +1 -0
  60. package/mcp_server/runtime/action_ledger.py +117 -0
  61. package/mcp_server/runtime/action_ledger_models.py +84 -0
  62. package/mcp_server/runtime/action_tools.py +57 -0
  63. package/mcp_server/runtime/capability_state.py +218 -0
  64. package/mcp_server/runtime/safety_kernel.py +339 -0
  65. package/mcp_server/runtime/safety_tools.py +42 -0
  66. package/mcp_server/runtime/tools.py +64 -0
  67. package/mcp_server/server.py +23 -1
  68. package/mcp_server/sound_design/__init__.py +1 -0
  69. package/mcp_server/sound_design/critics.py +297 -0
  70. package/mcp_server/sound_design/models.py +147 -0
  71. package/mcp_server/sound_design/planner.py +104 -0
  72. package/mcp_server/sound_design/tools.py +297 -0
  73. package/mcp_server/tools/_agent_os_engine.py +947 -0
  74. package/mcp_server/tools/_composition_engine.py +1530 -0
  75. package/mcp_server/tools/_conductor.py +199 -0
  76. package/mcp_server/tools/_conductor_budgets.py +222 -0
  77. package/mcp_server/tools/_evaluation_contracts.py +91 -0
  78. package/mcp_server/tools/_form_engine.py +416 -0
  79. package/mcp_server/tools/_motif_engine.py +351 -0
  80. package/mcp_server/tools/_planner_engine.py +516 -0
  81. package/mcp_server/tools/_research_engine.py +542 -0
  82. package/mcp_server/tools/_research_provider.py +185 -0
  83. package/mcp_server/tools/_snapshot_normalizer.py +49 -0
  84. package/mcp_server/tools/agent_os.py +440 -0
  85. package/mcp_server/tools/analyzer.py +18 -0
  86. package/mcp_server/tools/automation.py +25 -10
  87. package/mcp_server/tools/composition.py +563 -0
  88. package/mcp_server/tools/motif.py +104 -0
  89. package/mcp_server/tools/planner.py +144 -0
  90. package/mcp_server/tools/research.py +223 -0
  91. package/mcp_server/tools/tracks.py +18 -3
  92. package/mcp_server/tools/transport.py +10 -2
  93. package/mcp_server/transition_engine/__init__.py +6 -0
  94. package/mcp_server/transition_engine/archetypes.py +167 -0
  95. package/mcp_server/transition_engine/critics.py +340 -0
  96. package/mcp_server/transition_engine/models.py +90 -0
  97. package/mcp_server/transition_engine/tools.py +291 -0
  98. package/mcp_server/translation_engine/__init__.py +5 -0
  99. package/mcp_server/translation_engine/critics.py +297 -0
  100. package/mcp_server/translation_engine/models.py +27 -0
  101. package/mcp_server/translation_engine/tools.py +74 -0
  102. package/package.json +2 -2
  103. package/remote_script/LivePilot/__init__.py +1 -1
  104. package/remote_script/LivePilot/arrangement.py +12 -2
  105. package/requirements.txt +1 -1
@@ -0,0 +1,104 @@
1
+ """Motif MCP tools — pattern detection and transformation.
2
+
3
+ 2 tools: get_motif_graph (detect patterns) and transform_motif (apply transformations).
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import json
9
+
10
+ from fastmcp import Context
11
+
12
+ from ..server import mcp
13
+ from . import _motif_engine as motif_engine
14
+
15
+
16
+ def _get_ableton(ctx: Context):
17
+ return ctx.lifespan_context["ableton"]
18
+
19
+
20
+ @mcp.tool()
21
+ def get_motif_graph(ctx: Context) -> dict:
22
+ """Detect recurring melodic and rhythmic patterns across all tracks.
23
+
24
+ Scans note data from all session clips to find repeated interval
25
+ patterns. Returns motifs sorted by salience (most memorable first),
26
+ with occurrence locations, fatigue risk, and suggested transformations.
27
+
28
+ Use this to understand what musical ideas are present and which
29
+ ones need development or variation.
30
+ """
31
+ ableton = _get_ableton(ctx)
32
+ session = ableton.send_command("get_session_info")
33
+ tracks = session.get("tracks", [])
34
+
35
+ # Collect notes from all tracks and clips
36
+ notes_by_track: dict[int, list[dict]] = {}
37
+ for track in tracks:
38
+ t_idx = track["index"]
39
+ if not track.get("has_midi_input", False):
40
+ continue
41
+ track_notes = []
42
+ for clip_idx in range(session.get("scene_count", 8)):
43
+ try:
44
+ result = ableton.send_command("get_notes", {
45
+ "track_index": t_idx,
46
+ "clip_index": clip_idx,
47
+ })
48
+ track_notes.extend(result.get("notes", []))
49
+ except Exception:
50
+ pass
51
+ if track_notes:
52
+ notes_by_track[t_idx] = track_notes
53
+
54
+ motifs = motif_engine.detect_motifs(notes_by_track)
55
+
56
+ return {
57
+ "motifs": [m.to_dict() for m in motifs],
58
+ "motif_count": len(motifs),
59
+ "tracks_analyzed": len(notes_by_track),
60
+ }
61
+
62
+
63
+ @mcp.tool()
64
+ def transform_motif(
65
+ ctx: Context,
66
+ motif_intervals: list | str,
67
+ transformation: str,
68
+ reference_pitch: int = 60,
69
+ ) -> dict:
70
+ """Transform a musical motif using classical composition techniques.
71
+
72
+ motif_intervals: interval pattern (list of semitone distances, e.g., [2, -1, 3])
73
+ Get this from get_motif_graph → motif.intervals
74
+ transformation: inversion | retrograde | augmentation | diminution |
75
+ fragmentation | register_shift_up | register_shift_down
76
+ reference_pitch: starting MIDI pitch for output (default: C4=60)
77
+
78
+ Returns: list of notes ready for add_notes.
79
+
80
+ Example: transform_motif([2, 2, -1, 2], "inversion", 60)
81
+ → notes descending instead of ascending
82
+ """
83
+ if isinstance(motif_intervals, str):
84
+ try:
85
+ motif_intervals = json.loads(motif_intervals)
86
+ except json.JSONDecodeError as exc:
87
+ raise ValueError(f"Invalid JSON in motif_intervals: {exc}") from exc
88
+
89
+ # Build a temporary MotifUnit for the transformation
90
+ motif = motif_engine.MotifUnit(
91
+ motif_id="transform_input",
92
+ kind="melodic",
93
+ intervals=motif_intervals,
94
+ rhythm=[],
95
+ representative_pitches=[reference_pitch],
96
+ )
97
+
98
+ notes = motif_engine.transform_motif(motif, transformation, reference_pitch)
99
+ return {
100
+ "notes": notes,
101
+ "note_count": len(notes),
102
+ "transformation": transformation,
103
+ "original_intervals": motif_intervals,
104
+ }
@@ -0,0 +1,144 @@
1
+ """Planner MCP tools — loop-to-song arrangement planning.
2
+
3
+ 2 tools that connect the planner engine (_planner_engine.py) to the
4
+ live Ableton session.
5
+
6
+ plan_arrangement — transform a loop into a full arrangement blueprint
7
+ get_emotional_arc — (in research.py, shares composition data)
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import json
13
+ from typing import Optional
14
+
15
+ from fastmcp import Context
16
+
17
+ from ..server import mcp
18
+ from . import _composition_engine as comp_engine
19
+ from . import _planner_engine as planner_engine
20
+
21
+
22
+ def _get_ableton(ctx: Context):
23
+ return ctx.lifespan_context["ableton"]
24
+
25
+
26
+ @mcp.tool()
27
+ def plan_arrangement(
28
+ ctx: Context,
29
+ target_bars: int = 128,
30
+ style: str = "electronic",
31
+ ) -> dict:
32
+ """Transform the current loop/session into a full arrangement blueprint.
33
+
34
+ Analyzes the existing tracks and their roles, then proposes:
35
+ - Section sequence (intro → verse → build → drop → etc.)
36
+ - Element reveal order (what enters/exits when)
37
+ - Gesture automation suggestions for transitions
38
+ - Orchestration plan (which tracks play in which sections)
39
+
40
+ target_bars: desired total arrangement length (default: 128 bars)
41
+ style: electronic | hiphop | pop | ambient | techno
42
+
43
+ Returns: full ArrangementPlan with actionable section-by-section instructions.
44
+ """
45
+ if style not in planner_engine.VALID_STYLES:
46
+ return {"error": f"Unknown style '{style}'. Valid: {sorted(planner_engine.VALID_STYLES)}"}
47
+
48
+ ableton = _get_ableton(ctx)
49
+
50
+ # 1. Get session info
51
+ session = ableton.send_command("get_session_info")
52
+ scenes = session.get("scenes", [])
53
+ tracks = session.get("tracks", [])
54
+ track_count = session.get("track_count", 0)
55
+
56
+ # 2. Build section graph (to analyze current state)
57
+ from .composition import _build_clip_matrix
58
+ clip_matrix = _build_clip_matrix(ableton, len(scenes), track_count)
59
+ sections = comp_engine.build_section_graph_from_scenes(scenes, clip_matrix, track_count)
60
+
61
+ # 3. Get track info for role inference
62
+ track_data = []
63
+ notes_map: dict[str, dict[int, list]] = {}
64
+
65
+ for track in tracks:
66
+ t_idx = track["index"]
67
+ try:
68
+ ti = ableton.send_command("get_track_info", {"track_index": t_idx})
69
+ track_data.append(ti)
70
+ except Exception:
71
+ track_data.append({"index": t_idx, "name": track.get("name", ""), "devices": []})
72
+
73
+ for section in sections:
74
+ notes_map[section.section_id] = {}
75
+ for t_idx in section.tracks_active:
76
+ notes_map[section.section_id][t_idx] = []
77
+
78
+ # 4. Build role graph
79
+ roles = comp_engine.build_role_graph(sections, track_data, notes_map)
80
+
81
+ # 5. Analyze loop identity
82
+ loop_identity = planner_engine.analyze_loop_identity(roles, sections)
83
+
84
+ # 6. Plan arrangement
85
+ plan = planner_engine.plan_arrangement_from_loop(
86
+ loop_identity,
87
+ target_duration_bars=target_bars,
88
+ style=style,
89
+ )
90
+
91
+ result = plan.to_dict()
92
+ result["loop_identity"] = loop_identity.to_dict()
93
+ result["available_styles"] = sorted(planner_engine.VALID_STYLES)
94
+ return result
95
+
96
+
97
+ # ── transform_section (Round 4) ─────────────────────────────────────
98
+
99
+
100
+ @mcp.tool()
101
+ def transform_section(
102
+ ctx: Context,
103
+ transformation: str,
104
+ section_index: int = -1,
105
+ bars: int = 8,
106
+ ) -> dict:
107
+ """Apply a structural transformation to the arrangement.
108
+
109
+ Proposes radical structural moves — reorder sections, expand loops,
110
+ compress verbose arrangements, insert bridges. Returns the proposed
111
+ new section graph without modifying the actual session.
112
+
113
+ transformation: insert_bridge_before_final_chorus | swap_verse_positions |
114
+ extend_section | compress_section | insert_breakdown |
115
+ duplicate_section | remove_section | reverse_section_order |
116
+ split_section
117
+ section_index: which section to transform (required for targeted operations, -1 = auto)
118
+ bars: how many bars for extend/compress/insert operations
119
+
120
+ Returns: before/after section graphs with description and bar delta.
121
+ """
122
+ from . import _form_engine as form_engine
123
+
124
+ ableton = _get_ableton(ctx)
125
+ session = ableton.send_command("get_session_info")
126
+ scenes = session.get("scenes", [])
127
+ track_count = session.get("track_count", 0)
128
+
129
+ from .composition import _build_clip_matrix
130
+ clip_matrix = _build_clip_matrix(ableton, len(scenes), track_count)
131
+ sections = comp_engine.build_section_graph_from_scenes(scenes, clip_matrix, track_count)
132
+
133
+ if not sections:
134
+ return {"error": "No sections detected in the arrangement"}
135
+
136
+ target = section_index if section_index >= 0 else None
137
+
138
+ try:
139
+ result = form_engine.transform_section_order(
140
+ sections, transformation, target_index=target, bars=bars,
141
+ )
142
+ return result.to_dict()
143
+ except ValueError as e:
144
+ return {"error": str(e)}
@@ -0,0 +1,223 @@
1
+ """Research MCP tools — targeted and deep technique research.
2
+
3
+ 2 tools that connect the research engine (_research_engine.py) to the
4
+ live session context via device atlas and memory.
5
+
6
+ research_technique — search corpus + memory for production answers
7
+ deep_research — multi-source synthesis (adds web if available)
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ import json
13
+ from typing import Optional
14
+
15
+ from fastmcp import Context
16
+
17
+ from ..server import mcp
18
+ from . import _research_engine as research_engine
19
+
20
+
21
+ def _get_ableton(ctx: Context):
22
+ return ctx.lifespan_context["ableton"]
23
+
24
+
25
+ @mcp.tool()
26
+ def research_technique(
27
+ ctx: Context,
28
+ query: str,
29
+ scope: str = "targeted",
30
+ ) -> dict:
31
+ """Research a production technique — search device atlas + memory for answers.
32
+
33
+ Synthesizes findings from the device atlas (built-in device knowledge),
34
+ technique memory (past session learnings), and reference corpus into
35
+ a structured TechniqueCard with devices, method, and verification steps.
36
+
37
+ query: what you want to learn (e.g., "how to sidechain bass to kick")
38
+ scope: "targeted" (device atlas + memory) or "deep" (adds web search)
39
+
40
+ Returns: findings ranked by relevance, synthesized technique card, confidence.
41
+ """
42
+ if not query or not query.strip():
43
+ return {"error": "query cannot be empty"}
44
+
45
+ if scope not in ("targeted", "deep"):
46
+ return {"error": f"scope must be 'targeted' or 'deep', got '{scope}'"}
47
+
48
+ ableton = _get_ableton(ctx)
49
+
50
+ # 1. Analyze query to predict relevant devices
51
+ query_info = research_engine.analyze_query(query)
52
+
53
+ # 2. Search device atlas for relevant devices
54
+ device_atlas_results = []
55
+ for device_name in query_info.get("likely_devices", [])[:5]:
56
+ try:
57
+ ref = ableton.send_command("search_browser", {"query": device_name, "category": "instruments"})
58
+ if ref and not ref.get("error"):
59
+ device_atlas_results.append(ref)
60
+ except Exception:
61
+ pass
62
+
63
+ # 3. Search memory for related techniques
64
+ memory_results = []
65
+ try:
66
+ # Search technique cards
67
+ mem = ableton.send_command("memory_list", {
68
+ "type": "technique_card",
69
+ "limit": 10,
70
+ "sort_by": "updated_at",
71
+ })
72
+ memory_results.extend(mem.get("techniques", []))
73
+ except Exception:
74
+ pass
75
+
76
+ try:
77
+ # Also search research memories
78
+ mem = ableton.send_command("memory_list", {
79
+ "type": "research",
80
+ "limit": 5,
81
+ "sort_by": "updated_at",
82
+ })
83
+ memory_results.extend(mem.get("techniques", []))
84
+ except Exception:
85
+ pass
86
+
87
+ if scope == "targeted":
88
+ result = research_engine.targeted_research(
89
+ query, device_atlas_results, memory_results,
90
+ )
91
+ else:
92
+ # Deep research — try to get web results
93
+ # Note: web search requires external integration (graceful degradation)
94
+ result = research_engine.deep_research(
95
+ query,
96
+ web_results=[], # Web search injected by agent if available
97
+ device_atlas_results=device_atlas_results,
98
+ memory_results=memory_results,
99
+ )
100
+
101
+ return result.to_dict()
102
+
103
+
104
+ @mcp.tool()
105
+ def get_emotional_arc(ctx: Context) -> dict:
106
+ """Analyze the emotional arc of the arrangement — tension, climax, resolution.
107
+
108
+ Checks for: monotone energy, all-climax (no rest), build without payoff,
109
+ no resolution at the end, peak too early.
110
+
111
+ Returns: tension curve and issues with recommended composition moves.
112
+ """
113
+ from . import _composition_engine as engine
114
+
115
+ ableton = _get_ableton(ctx)
116
+ session = ableton.send_command("get_session_info")
117
+ scenes = session.get("scenes", [])
118
+ tracks = session.get("tracks", [])
119
+ track_count = session.get("track_count", 0)
120
+
121
+ # Build section graph
122
+ from .composition import _build_clip_matrix
123
+ clip_matrix = _build_clip_matrix(ableton, len(scenes), track_count)
124
+ sections = engine.build_section_graph_from_scenes(scenes, clip_matrix, track_count)
125
+
126
+ if len(sections) < 3:
127
+ return {
128
+ "issues": [],
129
+ "tension_curve": [],
130
+ "note": "Need at least 3 sections for emotional arc analysis",
131
+ }
132
+
133
+ # Try to build harmony fields for richer analysis
134
+ harmony_fields = []
135
+ for i, section in enumerate(sections):
136
+ hf = engine.HarmonyField(section_id=section.section_id)
137
+ # Try to get harmony data
138
+ for t_idx in section.tracks_active[:3]:
139
+ try:
140
+ si = ableton.send_command("identify_scale", {
141
+ "track_index": t_idx, "clip_index": i,
142
+ })
143
+ if si.get("top_match"):
144
+ hf.key = si["top_match"].get("tonic", "")
145
+ hf.mode = si["top_match"].get("mode", "")
146
+ hf.confidence = si["top_match"].get("confidence", 0.0)
147
+ break
148
+ except Exception:
149
+ continue
150
+ harmony_fields.append(hf)
151
+
152
+ issues = engine.run_emotional_arc_critic(sections, harmony_fields)
153
+
154
+ # Build tension curve for visualization
155
+ tension_curve = []
156
+ for section in sections:
157
+ hf_match = next((hf for hf in harmony_fields if hf.section_id == section.section_id), None)
158
+ instability = hf_match.instability if hf_match else 0.3
159
+ tension = round(section.energy * 0.5 + section.density * 0.3 + instability * 0.2, 3)
160
+ tension_curve.append({
161
+ "section": section.name or section.section_id,
162
+ "section_type": section.section_type.value,
163
+ "tension": tension,
164
+ "energy": section.energy,
165
+ "density": section.density,
166
+ })
167
+
168
+ return {
169
+ "tension_curve": tension_curve,
170
+ "issues": [i.to_dict() for i in issues],
171
+ "issue_count": len(issues),
172
+ "section_count": len(sections),
173
+ }
174
+
175
+
176
+ # ── get_style_tactics (Round 4) ─────────────────────────────────────
177
+
178
+
179
+ @mcp.tool()
180
+ def get_style_tactics(
181
+ ctx: Context,
182
+ artist_or_genre: str,
183
+ ) -> dict:
184
+ """Get production tactics for a specific artist style or genre.
185
+
186
+ Returns structured recipe cards with device chains, arrangement patterns,
187
+ automation gestures, and verification steps.
188
+
189
+ artist_or_genre: e.g., "burial", "techno", "daft punk", "ambient", "trap"
190
+
191
+ Returns: list of StyleTactic cards with actionable production instructions.
192
+ """
193
+ if not artist_or_genre or not artist_or_genre.strip():
194
+ return {"error": "artist_or_genre cannot be empty"}
195
+
196
+ ableton = _get_ableton(ctx)
197
+
198
+ # Search user memory for saved tactics
199
+ memory_tactics = []
200
+ try:
201
+ mem = ableton.send_command("memory_list", {
202
+ "type": "style_tactic",
203
+ "limit": 10,
204
+ })
205
+ memory_tactics = mem.get("techniques", [])
206
+ except Exception:
207
+ pass
208
+
209
+ tactics = research_engine.get_style_tactics(artist_or_genre, memory_tactics)
210
+
211
+ if not tactics:
212
+ return {
213
+ "query": artist_or_genre,
214
+ "tactics": [],
215
+ "note": f"No tactics found for '{artist_or_genre}'. "
216
+ f"Available built-in styles: burial, daft punk, techno, ambient, trap, lo-fi",
217
+ }
218
+
219
+ return {
220
+ "query": artist_or_genre,
221
+ "tactics": [t.to_dict() for t in tactics],
222
+ "tactic_count": len(tactics),
223
+ }
@@ -17,10 +17,21 @@ def _get_ableton(ctx: Context):
17
17
  return ctx.lifespan_context["ableton"]
18
18
 
19
19
 
20
- def _validate_track_index(track_index: int):
21
- """Validate track index. Must be >= 0 for regular tracks."""
20
+ def _validate_track_index(track_index: int, allow_return: bool = True):
21
+ """Validate track index.
22
+
23
+ Regular tracks: >= 0. Return tracks: -1 (A), -2 (B), etc.
24
+ Set allow_return=False for operations that only work on regular tracks
25
+ (e.g., create_scene, set_group_fold).
26
+ """
22
27
  if track_index < 0:
23
- raise ValueError("track_index must be >= 0")
28
+ if not allow_return:
29
+ raise ValueError("track_index must be >= 0 (return tracks not supported for this operation)")
30
+ if track_index < -99:
31
+ raise ValueError(
32
+ "track_index must be >= 0 for regular tracks, "
33
+ "or -1..-99 for return tracks (-1=A, -2=B)"
34
+ )
24
35
 
25
36
 
26
37
  def _validate_color_index(color_index: int):
@@ -185,6 +196,10 @@ def freeze_track(ctx: Context, track_index: int) -> dict:
185
196
  Freeze is async in Ableton: this initiates the render and returns
186
197
  immediately. Poll get_freeze_status to check when it's done.
187
198
  Freezing a track that's already frozen is a no-op.
199
+
200
+ Note: freeze() is not available via ControlSurface API in all Live
201
+ versions. If this fails, use Ableton's Freeze Track menu command
202
+ (Cmd+F on Mac) manually instead.
188
203
  """
189
204
  _validate_track_index(track_index)
190
205
  return _get_ableton(ctx).send_command("freeze_track", {
@@ -73,8 +73,16 @@ def continue_playback(ctx: Context) -> dict:
73
73
 
74
74
 
75
75
  @mcp.tool()
76
- def toggle_metronome(ctx: Context, enabled: bool) -> dict:
77
- """Enable or disable the metronome click."""
76
+ def toggle_metronome(ctx: Context, enabled: Optional[bool] = None) -> dict:
77
+ """Enable or disable the metronome click.
78
+
79
+ If enabled is omitted, toggles the current state (true toggle).
80
+ If enabled is provided, sets to that value explicitly.
81
+ """
82
+ if enabled is None:
83
+ # True toggle: read current state and flip it
84
+ info = _get_ableton(ctx).send_command("get_session_info")
85
+ enabled = not info.get("metronome", False)
78
86
  return _get_ableton(ctx).send_command("toggle_metronome", {"enabled": enabled})
79
87
 
80
88
 
@@ -0,0 +1,6 @@
1
+ """Transition Engine V1 — section-boundary intelligence.
2
+
3
+ Arrivals, exits, handoffs, payoff scoring, archetype library.
4
+ Builds on top of the composition engine's transition critic and
5
+ gesture templates. Pure computation, zero I/O.
6
+ """