livepilot 1.4.5 → 1.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,431 @@
1
+ """Automation MCP tools — clip envelope CRUD + intelligent curve generation.
2
+
3
+ 8 tools for writing, reading, and generating automation curves on session clips.
4
+ Combines the clip automation handlers (Remote Script) with the curve generation
5
+ engine (curves.py) for musically intelligent automation.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from typing import Any, Optional
11
+
12
+ from fastmcp import Context
13
+
14
+ from ..curves import generate_curve, generate_from_recipe, list_recipes
15
+ from ..server import mcp
16
+
17
+
18
+ def _get_ableton(ctx: Context):
19
+ return ctx.lifespan_context["ableton"]
20
+
21
+
22
+ def _ensure_list(v: Any) -> list:
23
+ if isinstance(v, str):
24
+ import json
25
+ return json.loads(v)
26
+ return list(v)
27
+
28
+
29
+ @mcp.tool()
30
+ def get_clip_automation(
31
+ ctx: Context,
32
+ track_index: int,
33
+ clip_index: int,
34
+ ) -> dict:
35
+ """List all automation envelopes on a session clip.
36
+
37
+ Returns which parameters have automation, including device name,
38
+ parameter name, and type (mixer/send/device). Use this to see
39
+ what's already automated before writing new curves.
40
+ """
41
+ return _get_ableton(ctx).send_command("get_clip_automation", {
42
+ "track_index": track_index,
43
+ "clip_index": clip_index,
44
+ })
45
+
46
+
47
+ @mcp.tool()
48
+ def set_clip_automation(
49
+ ctx: Context,
50
+ track_index: int,
51
+ clip_index: int,
52
+ parameter_type: str,
53
+ points: Any,
54
+ device_index: Optional[int] = None,
55
+ parameter_index: Optional[int] = None,
56
+ send_index: Optional[int] = None,
57
+ ) -> dict:
58
+ """Write automation points to a session clip envelope.
59
+
60
+ parameter_type: "device", "volume", "panning", or "send"
61
+ points: [{time, value, duration?}] — time relative to clip start (beats)
62
+ values: 0.0-1.0 normalized (or parameter's actual min/max range)
63
+
64
+ For device params: provide device_index + parameter_index.
65
+ For sends: provide send_index (0=A, 1=B, etc).
66
+
67
+ Tip: Use apply_automation_shape to generate points from curves/recipes
68
+ instead of calculating points manually.
69
+ """
70
+ params: dict = {
71
+ "track_index": track_index,
72
+ "clip_index": clip_index,
73
+ "parameter_type": parameter_type,
74
+ "points": _ensure_list(points),
75
+ }
76
+ if device_index is not None:
77
+ params["device_index"] = device_index
78
+ if parameter_index is not None:
79
+ params["parameter_index"] = parameter_index
80
+ if send_index is not None:
81
+ params["send_index"] = send_index
82
+ return _get_ableton(ctx).send_command("set_clip_automation", params)
83
+
84
+
85
+ @mcp.tool()
86
+ def clear_clip_automation(
87
+ ctx: Context,
88
+ track_index: int,
89
+ clip_index: int,
90
+ parameter_type: Optional[str] = None,
91
+ device_index: Optional[int] = None,
92
+ parameter_index: Optional[int] = None,
93
+ send_index: Optional[int] = None,
94
+ ) -> dict:
95
+ """Clear automation envelopes from a session clip.
96
+
97
+ If parameter_type is omitted, clears ALL envelopes.
98
+ If provided, clears only that parameter's envelope.
99
+ """
100
+ params: dict = {
101
+ "track_index": track_index,
102
+ "clip_index": clip_index,
103
+ }
104
+ if parameter_type is not None:
105
+ params["parameter_type"] = parameter_type
106
+ if device_index is not None:
107
+ params["device_index"] = device_index
108
+ if parameter_index is not None:
109
+ params["parameter_index"] = parameter_index
110
+ if send_index is not None:
111
+ params["send_index"] = send_index
112
+ return _get_ableton(ctx).send_command("clear_clip_automation", params)
113
+
114
+
115
+ @mcp.tool()
116
+ def apply_automation_shape(
117
+ ctx: Context,
118
+ track_index: int,
119
+ clip_index: int,
120
+ parameter_type: str,
121
+ curve_type: str,
122
+ duration: float = 4.0,
123
+ density: int = 16,
124
+ device_index: Optional[int] = None,
125
+ parameter_index: Optional[int] = None,
126
+ send_index: Optional[int] = None,
127
+ start: float = 0.0,
128
+ end: float = 1.0,
129
+ center: float = 0.5,
130
+ amplitude: float = 0.5,
131
+ frequency: float = 1.0,
132
+ phase: float = 0.0,
133
+ peak: float = 1.0,
134
+ decay: float = 4.0,
135
+ low: float = 0.0,
136
+ high: float = 1.0,
137
+ factor: float = 3.0,
138
+ invert: bool = False,
139
+ time_offset: float = 0.0,
140
+ ) -> dict:
141
+ """Generate and apply an automation curve to a session clip.
142
+
143
+ Combines curve generation with clip automation writing in one call.
144
+
145
+ curve_type: linear, exponential, logarithmic, s_curve, sine,
146
+ sawtooth, spike, square, steps, perlin, brownian,
147
+ spring, bezier, easing, euclidean, stochastic
148
+ duration: curve length in beats
149
+ density: number of automation points
150
+ time_offset: shift the entire curve forward by N beats
151
+
152
+ Curve-specific params:
153
+ - linear/exp/log: start, end, factor (steepness 2-6)
154
+ - sine: center, amplitude, frequency, phase
155
+ - sawtooth: start, end, frequency (resets per duration)
156
+ - spike: peak, decay (higher = faster)
157
+ - square: low, high, frequency
158
+ - s_curve: start, end
159
+
160
+ Musical guidance:
161
+ - Filter sweeps: use exponential (perceptually even)
162
+ - Volume fades: use logarithmic (matches ear's response)
163
+ - Crossfades: use s_curve (natural acceleration/deceleration)
164
+ - Pumping: use sawtooth with frequency matching beat divisions
165
+ - Throws: use spike with short duration (1-2 beats)
166
+ - Tremolo/pan: use sine with frequency in musical divisions
167
+ """
168
+ # Generate the curve
169
+ points = generate_curve(
170
+ curve_type=curve_type,
171
+ duration=duration,
172
+ density=density,
173
+ start=start, end=end,
174
+ center=center, amplitude=amplitude,
175
+ frequency=frequency, phase=phase,
176
+ peak=peak, decay=decay,
177
+ low=low, high=high,
178
+ factor=factor,
179
+ invert=invert,
180
+ )
181
+
182
+ # Apply time offset
183
+ if time_offset > 0:
184
+ for p in points:
185
+ p["time"] += time_offset
186
+
187
+ # Write to clip
188
+ params: dict = {
189
+ "track_index": track_index,
190
+ "clip_index": clip_index,
191
+ "parameter_type": parameter_type,
192
+ "points": points,
193
+ }
194
+ if device_index is not None:
195
+ params["device_index"] = device_index
196
+ if parameter_index is not None:
197
+ params["parameter_index"] = parameter_index
198
+ if send_index is not None:
199
+ params["send_index"] = send_index
200
+
201
+ result = _get_ableton(ctx).send_command("set_clip_automation", params)
202
+ result["curve_type"] = curve_type
203
+ result["curve_points"] = len(points)
204
+ return result
205
+
206
+
207
+ @mcp.tool()
208
+ def apply_automation_recipe(
209
+ ctx: Context,
210
+ track_index: int,
211
+ clip_index: int,
212
+ parameter_type: str,
213
+ recipe: str,
214
+ duration: float = 4.0,
215
+ density: int = 16,
216
+ device_index: Optional[int] = None,
217
+ parameter_index: Optional[int] = None,
218
+ send_index: Optional[int] = None,
219
+ time_offset: float = 0.0,
220
+ ) -> dict:
221
+ """Apply a named automation recipe to a session clip.
222
+
223
+ Recipes are predefined curve shapes for common production techniques.
224
+ Use get_automation_recipes to list all available recipes.
225
+
226
+ Available recipes:
227
+ - filter_sweep_up: LP filter opening (exponential, 8-32 bars)
228
+ - filter_sweep_down: LP filter closing (logarithmic, 4-16 bars)
229
+ - dub_throw: send spike for reverb/delay throw (1-2 beats)
230
+ - tape_stop: pitch dropping to zero (0.5-2 beats)
231
+ - build_rise: tension build on HP filter + volume (8-32 bars)
232
+ - sidechain_pump: volume ducking per beat (sawtooth, 1 beat loop)
233
+ - fade_in / fade_out: perceptually smooth volume fades
234
+ - tremolo: periodic volume oscillation
235
+ - auto_pan: stereo movement via pan sine
236
+ - stutter: rapid on/off gating
237
+ - breathing: subtle filter movement (acoustic instrument feel)
238
+ - washout: reverb/delay feedback increasing
239
+ - vinyl_crackle: slow bit reduction movement
240
+ - stereo_narrow: collapse to mono before drop
241
+ """
242
+ points = generate_from_recipe(recipe, duration=duration, density=density)
243
+
244
+ if time_offset > 0:
245
+ for p in points:
246
+ p["time"] += time_offset
247
+
248
+ params: dict = {
249
+ "track_index": track_index,
250
+ "clip_index": clip_index,
251
+ "parameter_type": parameter_type,
252
+ "points": points,
253
+ }
254
+ if device_index is not None:
255
+ params["device_index"] = device_index
256
+ if parameter_index is not None:
257
+ params["parameter_index"] = parameter_index
258
+ if send_index is not None:
259
+ params["send_index"] = send_index
260
+
261
+ result = _get_ableton(ctx).send_command("set_clip_automation", params)
262
+ result["recipe"] = recipe
263
+ result["curve_points"] = len(points)
264
+ return result
265
+
266
+
267
+ @mcp.tool()
268
+ def get_automation_recipes(ctx: Context) -> dict:
269
+ """List all available automation recipes with descriptions.
270
+
271
+ Each recipe includes: curve type, description, typical duration,
272
+ and recommended target parameter. Use apply_automation_recipe
273
+ to apply any recipe to a clip.
274
+ """
275
+ return {"recipes": list_recipes()}
276
+
277
+
278
+ @mcp.tool()
279
+ def generate_automation_curve(
280
+ ctx: Context,
281
+ curve_type: str,
282
+ duration: float = 4.0,
283
+ density: int = 16,
284
+ start: float = 0.0,
285
+ end: float = 1.0,
286
+ center: float = 0.5,
287
+ amplitude: float = 0.5,
288
+ frequency: float = 1.0,
289
+ phase: float = 0.0,
290
+ peak: float = 1.0,
291
+ decay: float = 4.0,
292
+ low: float = 0.0,
293
+ high: float = 1.0,
294
+ factor: float = 3.0,
295
+ invert: bool = False,
296
+ ) -> dict:
297
+ """Generate automation curve points WITHOUT writing them.
298
+
299
+ Returns the points array for preview/inspection. Use this to see
300
+ what a curve looks like before committing it to a clip.
301
+ Pass the returned points to set_clip_automation or
302
+ set_arrangement_automation to write them.
303
+ """
304
+ points = generate_curve(
305
+ curve_type=curve_type,
306
+ duration=duration,
307
+ density=density,
308
+ start=start, end=end,
309
+ center=center, amplitude=amplitude,
310
+ frequency=frequency, phase=phase,
311
+ peak=peak, decay=decay,
312
+ low=low, high=high,
313
+ factor=factor,
314
+ invert=invert,
315
+ )
316
+ return {
317
+ "curve_type": curve_type,
318
+ "duration": duration,
319
+ "point_count": len(points),
320
+ "points": points,
321
+ "value_range": {
322
+ "min": min(p["value"] for p in points) if points else 0,
323
+ "max": max(p["value"] for p in points) if points else 0,
324
+ },
325
+ }
326
+
327
+
328
+ @mcp.tool()
329
+ def analyze_for_automation(
330
+ ctx: Context,
331
+ track_index: int,
332
+ ) -> dict:
333
+ """Analyze a track's spectrum and suggest automation targets.
334
+
335
+ Reads the track's current spectral data and device chain,
336
+ then suggests which parameters would benefit from automation
337
+ based on the frequency content and device types present.
338
+
339
+ Requires LivePilot Analyzer on master track and audio playing.
340
+ """
341
+ ableton = _get_ableton(ctx)
342
+
343
+ # Get track devices
344
+ track_info = ableton.send_command("get_track_info", {
345
+ "track_index": track_index,
346
+ })
347
+
348
+ # Get current spectrum
349
+ spectral = ctx.lifespan_context.get("spectral")
350
+ spectrum = {}
351
+ if spectral and spectral.is_connected:
352
+ spectrum = spectral.get_spectrum()
353
+
354
+ # Get meter level
355
+ meters = ableton.send_command("get_track_meters", {
356
+ "track_index": track_index,
357
+ })
358
+
359
+ devices = track_info.get("devices", [])
360
+ suggestions = []
361
+
362
+ # Analyze based on device types and spectrum
363
+ for i, dev in enumerate(devices):
364
+ dev_name = dev.get("name", "").lower()
365
+ dev_class = dev.get("class_name", "").lower()
366
+
367
+ # Filter devices — suggest sweep automation
368
+ if any(kw in dev_class for kw in ["autofilter", "eq8", "filter"]):
369
+ suggestions.append({
370
+ "device_index": i,
371
+ "device_name": dev.get("name"),
372
+ "suggestion": "filter_sweep",
373
+ "reason": "Filter detected — automate cutoff for movement",
374
+ "recipe": "filter_sweep_up",
375
+ })
376
+
377
+ # Reverb/delay — suggest send throws or washout
378
+ if any(kw in dev_class for kw in ["reverb", "delay", "hybrid", "echo"]):
379
+ suggestions.append({
380
+ "device_index": i,
381
+ "device_name": dev.get("name"),
382
+ "suggestion": "spatial_automation",
383
+ "reason": "Space effect — automate mix/decay for depth changes",
384
+ "recipe": "washout",
385
+ })
386
+
387
+ # Distortion — suggest drive automation
388
+ if any(kw in dev_class for kw in ["saturator", "overdrive", "pedal", "amp"]):
389
+ suggestions.append({
390
+ "device_index": i,
391
+ "device_name": dev.get("name"),
392
+ "suggestion": "drive_automation",
393
+ "reason": "Distortion — automate drive for dynamic saturation",
394
+ "recipe": "breathing",
395
+ })
396
+
397
+ # Synths — suggest wavetable/macro automation
398
+ if any(kw in dev_class for kw in ["wavetable", "drift", "analog", "operator"]):
399
+ suggestions.append({
400
+ "device_index": i,
401
+ "device_name": dev.get("name"),
402
+ "suggestion": "timbre_evolution",
403
+ "reason": "Synth — automate timbre params for evolving sound",
404
+ "recipe": "breathing",
405
+ })
406
+
407
+ # Mixer suggestions based on spectrum
408
+ if spectrum:
409
+ sub = spectrum.get("sub", 0)
410
+ if sub > 0.15:
411
+ suggestions.append({
412
+ "suggestion": "high_pass_automation",
413
+ "reason": "Heavy sub content (%.2f) — HP filter sweep for builds" % sub,
414
+ "recipe": "build_rise",
415
+ })
416
+
417
+ # Always suggest send automation for spatial depth
418
+ suggestions.append({
419
+ "suggestion": "send_throws",
420
+ "reason": "Reverb/delay sends — automate for dub throws and spatial variation",
421
+ "recipe": "dub_throw",
422
+ })
423
+
424
+ return {
425
+ "track_index": track_index,
426
+ "track_name": track_info.get("name", ""),
427
+ "device_count": len(devices),
428
+ "current_level": meters.get("tracks", [{}])[0].get("level", 0),
429
+ "spectrum": spectrum,
430
+ "suggestions": suggestions,
431
+ }
@@ -144,27 +144,31 @@ def set_clip_loop(
144
144
  ctx: Context,
145
145
  track_index: int,
146
146
  clip_index: int,
147
- enabled: bool,
148
- start: Optional[float] = None,
149
- end: Optional[float] = None,
147
+ enabled: Optional[bool] = None,
148
+ loop_start: Optional[float] = None,
149
+ loop_end: Optional[float] = None,
150
150
  ) -> dict:
151
- """Enable/disable clip looping and optionally set loop start/end (in beats)."""
151
+ """Enable/disable clip looping and optionally set loop start/end (in beats).
152
+ All parameters are optional but at least one must be provided."""
152
153
  _validate_track_index(track_index)
153
154
  _validate_clip_index(clip_index)
155
+ if enabled is None and loop_start is None and loop_end is None:
156
+ raise ValueError("At least one of enabled, loop_start, or loop_end must be provided")
154
157
  params = {
155
158
  "track_index": track_index,
156
159
  "clip_index": clip_index,
157
- "enabled": enabled,
158
160
  }
159
- if start is not None:
160
- if start < 0:
161
+ if enabled is not None:
162
+ params["enabled"] = enabled
163
+ if loop_start is not None:
164
+ if loop_start < 0:
161
165
  raise ValueError("Loop start must be >= 0")
162
- params["start"] = start
163
- if end is not None:
164
- if end <= 0:
166
+ params["start"] = loop_start
167
+ if loop_end is not None:
168
+ if loop_end <= 0:
165
169
  raise ValueError("Loop end must be > 0")
166
- params["end"] = end
167
- if start is not None and end is not None and start >= end:
170
+ params["end"] = loop_end
171
+ if loop_start is not None and loop_end is not None and loop_start >= loop_end:
168
172
  raise ValueError("Loop start must be less than loop end")
169
173
  return _get_ableton(ctx).send_command("set_clip_loop", params)
170
174
 
@@ -106,9 +106,9 @@ def batch_set_parameters(
106
106
  ctx: Context,
107
107
  track_index: int,
108
108
  device_index: int,
109
- parameters: list,
109
+ parameters: Any,
110
110
  ) -> dict:
111
- """Set multiple device parameters in one call. parameters is a list of objects: [{"name_or_index": "Dry/Wet", "value": 0.5}, ...].
111
+ """Set multiple device parameters in one call. parameters is a JSON array of objects: [{"name_or_index": "Dry/Wet", "value": 0.5}, ...].
112
112
  track_index: 0+ for regular tracks, -1/-2/... for return tracks (A/B/...), -1000 for master."""
113
113
  _validate_track_index(track_index)
114
114
  _validate_device_index(device_index)
@@ -1,6 +1,6 @@
1
- """Mixing MCP tools — volume, pan, sends, routing, master.
1
+ """Mixing MCP tools — volume, pan, sends, routing, master, metering.
2
2
 
3
- 8 tools matching the Remote Script mixing domain.
3
+ 11 tools matching the Remote Script mixing domain.
4
4
  """
5
5
 
6
6
  from __future__ import annotations
@@ -87,6 +87,42 @@ def set_master_volume(ctx: Context, volume: float) -> dict:
87
87
  return _get_ableton(ctx).send_command("set_master_volume", {"volume": volume})
88
88
 
89
89
 
90
+ @mcp.tool()
91
+ def get_track_meters(
92
+ ctx: Context,
93
+ track_index: Optional[int] = None,
94
+ include_stereo: bool = False,
95
+ ) -> dict:
96
+ """Read real-time output meter levels for tracks.
97
+
98
+ Returns peak level (0.0-1.0) for each track. Call while playing to
99
+ check levels, detect clipping, or verify a track is producing sound.
100
+
101
+ track_index: specific track (omit for all tracks)
102
+ include_stereo: include left/right channel meters (adds GUI load)
103
+ """
104
+ params: dict = {}
105
+ if track_index is not None:
106
+ params["track_index"] = track_index
107
+ if include_stereo:
108
+ params["include_stereo"] = include_stereo
109
+ return _get_ableton(ctx).send_command("get_track_meters", params)
110
+
111
+
112
+ @mcp.tool()
113
+ def get_master_meters(ctx: Context) -> dict:
114
+ """Read real-time output meter levels for the master track (left, right, peak)."""
115
+ return _get_ableton(ctx).send_command("get_master_meters")
116
+
117
+
118
+ @mcp.tool()
119
+ def get_mix_snapshot(ctx: Context) -> dict:
120
+ """Get a complete mix snapshot: all track meters, volumes, pans, mute/solo,
121
+ return tracks, and master levels. One call to assess the full mix state.
122
+ Call while playing for meaningful meter readings."""
123
+ return _get_ableton(ctx).send_command("get_mix_snapshot")
124
+
125
+
90
126
  @mcp.tool()
91
127
  def get_track_routing(ctx: Context, track_index: int) -> dict:
92
128
  """Get input/output routing info for a track. Use negative track_index for return tracks (-1=A, -2=B)."""
@@ -100,22 +136,22 @@ def get_track_routing(ctx: Context, track_index: int) -> dict:
100
136
  def set_track_routing(
101
137
  ctx: Context,
102
138
  track_index: int,
103
- input_type: Optional[str] = None,
104
- input_channel: Optional[str] = None,
105
- output_type: Optional[str] = None,
106
- output_channel: Optional[str] = None,
139
+ input_routing_type: Optional[str] = None,
140
+ input_routing_channel: Optional[str] = None,
141
+ output_routing_type: Optional[str] = None,
142
+ output_routing_channel: Optional[str] = None,
107
143
  ) -> dict:
108
144
  """Set input/output routing for a track by display name. Use negative track_index for return tracks (-1=A, -2=B)."""
109
145
  _validate_track_index(track_index)
110
146
  params = {"track_index": track_index}
111
- if input_type is not None:
112
- params["input_type"] = input_type
113
- if input_channel is not None:
114
- params["input_channel"] = input_channel
115
- if output_type is not None:
116
- params["output_type"] = output_type
117
- if output_channel is not None:
118
- params["output_channel"] = output_channel
147
+ if input_routing_type is not None:
148
+ params["input_type"] = input_routing_type
149
+ if input_routing_channel is not None:
150
+ params["input_channel"] = input_routing_channel
151
+ if output_routing_type is not None:
152
+ params["output_type"] = output_routing_type
153
+ if output_routing_channel is not None:
154
+ params["output_channel"] = output_routing_channel
119
155
  if len(params) == 1:
120
156
  raise ValueError("At least one routing parameter must be provided")
121
157
  return _get_ableton(ctx).send_command("set_track_routing", params)
@@ -127,12 +127,12 @@ def set_track_mute(ctx: Context, track_index: int, muted: bool) -> dict:
127
127
 
128
128
 
129
129
  @mcp.tool()
130
- def set_track_solo(ctx: Context, track_index: int, soloed: bool) -> dict:
130
+ def set_track_solo(ctx: Context, track_index: int, solo: bool) -> dict:
131
131
  """Solo or unsolo a track."""
132
132
  _validate_track_index(track_index)
133
133
  return _get_ableton(ctx).send_command("set_track_solo", {
134
134
  "track_index": track_index,
135
- "solo": soloed,
135
+ "solo": solo,
136
136
  })
137
137
 
138
138
 
package/package.json CHANGED
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "name": "livepilot",
3
- "version": "1.4.5",
3
+ "version": "1.6.1",
4
4
  "mcpName": "io.github.dreamrec/livepilot",
5
- "description": "AI copilot for Ableton Live 12 — 104 MCP tools for music production, sound design, and mixing",
5
+ "description": "AI copilot for Ableton Live 12 — 135 tools, device atlas (280+ devices), real-time audio analysis, automation intelligence, and technique memory",
6
6
  "author": "Pilot Studio",
7
7
  "license": "MIT",
8
8
  "type": "commonjs",
@@ -27,7 +27,6 @@
27
27
  "midi",
28
28
  "daw",
29
29
  "ai",
30
- "claude",
31
30
  "sound-design",
32
31
  "mixing",
33
32
  "arrangement"
@@ -24,8 +24,9 @@ Given a high-level description, you:
24
24
  6. **Program patterns** — write MIDI notes that fit the genre and style
25
25
  7. **Add effects** — load and configure effect chains for the desired sound
26
26
  8. **HEALTH CHECK** — verify effects aren't pass-throughs (Dry/Wet > 0, Drive set, etc.)
27
- 9. **Mix** — balance volumes, set panning, configure sends
28
- 10. **Final verify** — `get_session_info`, fire scenes, confirm audio output
27
+ 9. **Automate** — add movement and evolution to the mix (see Automation Phase below)
28
+ 10. **Mix** — balance volumes, set panning, configure sends
29
+ 11. **Final verify** — `get_session_info`, fire scenes, confirm audio output
29
30
 
30
31
  ## Mandatory Track Health Checks
31
32
 
@@ -49,6 +50,35 @@ After loading any instrument, run this checklist:
49
50
  - **For synths, use `search_browser` → `load_browser_item`** with exact URI. `find_and_load_device` can match sample files before the actual instrument (e.g., "Drift" matches a .wav sample first)
50
51
  - **After loading any effect**, set its key parameters to non-default values. A Saturator with Drive=0, a Reverb with Dry/Wet=0, or a Compressor with Threshold at max are all pass-throughs.
51
52
 
53
+ ## Automation Phase (after writing notes, before mixing)
54
+
55
+ ### Step 1: Spectral Diagnosis
56
+ - Solo each track -> `get_master_spectrum` -> build spectral map
57
+ - Identify frequency overlaps between tracks (masking)
58
+ - Note problem areas: resonances, mud, harshness
59
+
60
+ ### Step 2: Per-Track Analysis
61
+ - `analyze_for_automation` on each track -> get device-specific suggestions
62
+ - Cross-reference with spectral map: which suggestions address the problems found?
63
+
64
+ ### Step 3: Write Automation (perception-action loop)
65
+ For each automation decision:
66
+ 1. Read spectrum BEFORE
67
+ 2. Apply recipe or custom curve
68
+ 3. Read spectrum AFTER
69
+ 4. Compare: did it improve? If not, clear and adjust
70
+ 5. Store the final working automation parameters in memory
71
+
72
+ ### Step 4: Spatial Design
73
+ - Add send automation for depth (dub throws, reverb washes)
74
+ - Consider complementary automation: as one track's filter opens, another's narrows
75
+ - Use cross-track spectral awareness to avoid new masking from automation
76
+
77
+ ### Step 5: Generative/Evolving Textures
78
+ - Consider polyrhythmic automation for non-repeating evolution
79
+ - Unlinked envelopes with prime-number beat lengths (3, 5, 7 beats)
80
+ - Spectral-driven automation: use analyzer data to modulate parameters in real-time concepts
81
+
52
82
  ## Rules
53
83
 
54
84
  - Always use the livepilot-core skill for guidance on tool usage
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "livepilot",
3
- "version": "1.4.5",
4
- "description": "AI copilot for Ableton Live 12 — 104 MCP tools for music production, sound design, and mixing",
3
+ "version": "1.6.1",
4
+ "description": "AI copilot for Ableton Live 12 — 135 tools, device atlas (280+ devices), real-time audio analysis, automation intelligence, and technique memory",
5
5
  "author": "Pilot Studio",
6
6
  "skills": [
7
7
  "skills/livepilot-core"