livepilot 1.10.6 → 1.10.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/.claude-plugin/marketplace.json +3 -3
  2. package/.mcp.json.disabled +9 -0
  3. package/.mcpbignore +3 -0
  4. package/AGENTS.md +3 -3
  5. package/BUGS.md +1570 -0
  6. package/CHANGELOG.md +42 -0
  7. package/CONTRIBUTING.md +1 -1
  8. package/README.md +7 -7
  9. package/bin/livepilot.js +28 -8
  10. package/livepilot/.Codex-plugin/plugin.json +2 -2
  11. package/livepilot/.claude-plugin/plugin.json +2 -2
  12. package/livepilot/skills/livepilot-core/SKILL.md +4 -4
  13. package/livepilot/skills/livepilot-core/references/overview.md +2 -2
  14. package/livepilot/skills/livepilot-release/SKILL.md +8 -8
  15. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  16. package/m4l_device/LivePilot_Analyzer.amxd.pre-presentation-backup +0 -0
  17. package/m4l_device/LivePilot_Analyzer.maxproj +53 -0
  18. package/m4l_device/livepilot_bridge.js +214 -2
  19. package/manifest.json +3 -3
  20. package/mcp_server/__init__.py +1 -1
  21. package/mcp_server/atlas/__init__.py +93 -26
  22. package/mcp_server/creative_constraints/tools.py +206 -33
  23. package/mcp_server/experiment/engine.py +7 -9
  24. package/mcp_server/hook_hunter/analyzer.py +62 -9
  25. package/mcp_server/hook_hunter/tools.py +60 -9
  26. package/mcp_server/m4l_bridge.py +21 -6
  27. package/mcp_server/musical_intelligence/detectors.py +32 -0
  28. package/mcp_server/performance_engine/tools.py +112 -29
  29. package/mcp_server/preview_studio/engine.py +89 -8
  30. package/mcp_server/preview_studio/tools.py +22 -6
  31. package/mcp_server/project_brain/automation_graph.py +71 -19
  32. package/mcp_server/project_brain/builder.py +2 -0
  33. package/mcp_server/project_brain/tools.py +55 -5
  34. package/mcp_server/reference_engine/profile_builder.py +129 -3
  35. package/mcp_server/reference_engine/tools.py +47 -6
  36. package/mcp_server/runtime/execution_router.py +50 -0
  37. package/mcp_server/runtime/mcp_dispatch.py +75 -3
  38. package/mcp_server/runtime/remote_commands.py +4 -2
  39. package/mcp_server/sample_engine/analyzer.py +131 -4
  40. package/mcp_server/sample_engine/critics.py +29 -8
  41. package/mcp_server/sample_engine/models.py +20 -1
  42. package/mcp_server/sample_engine/tools.py +48 -14
  43. package/mcp_server/semantic_moves/sound_design_compilers.py +22 -59
  44. package/mcp_server/semantic_moves/transition_compilers.py +12 -19
  45. package/mcp_server/server.py +68 -2
  46. package/mcp_server/session_continuity/models.py +4 -0
  47. package/mcp_server/session_continuity/tracker.py +14 -1
  48. package/mcp_server/song_brain/builder.py +110 -12
  49. package/mcp_server/song_brain/tools.py +77 -13
  50. package/mcp_server/sound_design/tools.py +112 -1
  51. package/mcp_server/stuckness_detector/detector.py +90 -0
  52. package/mcp_server/stuckness_detector/tools.py +41 -0
  53. package/mcp_server/tools/_agent_os_engine/critics.py +24 -0
  54. package/mcp_server/tools/_composition_engine/__init__.py +2 -2
  55. package/mcp_server/tools/_composition_engine/harmony.py +90 -0
  56. package/mcp_server/tools/_composition_engine/sections.py +47 -4
  57. package/mcp_server/tools/_harmony_engine.py +52 -8
  58. package/mcp_server/tools/_research_engine.py +98 -19
  59. package/mcp_server/tools/_theory_engine.py +138 -9
  60. package/mcp_server/tools/agent_os.py +20 -3
  61. package/mcp_server/tools/analyzer.py +98 -0
  62. package/mcp_server/tools/clips.py +45 -0
  63. package/mcp_server/tools/composition.py +66 -23
  64. package/mcp_server/tools/devices.py +22 -1
  65. package/mcp_server/tools/harmony.py +115 -14
  66. package/mcp_server/tools/midi_io.py +13 -1
  67. package/mcp_server/tools/mixing.py +35 -1
  68. package/mcp_server/tools/motif.py +49 -3
  69. package/mcp_server/tools/research.py +24 -0
  70. package/mcp_server/tools/theory.py +108 -16
  71. package/mcp_server/transition_engine/critics.py +18 -11
  72. package/package.json +2 -2
  73. package/remote_script/LivePilot/__init__.py +57 -2
  74. package/remote_script/LivePilot/clips.py +69 -0
  75. package/remote_script/LivePilot/mixing.py +117 -0
  76. package/remote_script/LivePilot/router.py +13 -1
  77. package/scripts/generate_tool_catalog.py +13 -38
  78. package/scripts/sync_metadata.py +231 -14
@@ -188,14 +188,30 @@ def build_profile_from_filename(
188
188
  source: str = "filesystem",
189
189
  duration_seconds: float = 0.0,
190
190
  ) -> SampleProfile:
191
- """Build a SampleProfile from filename metadata only (no spectral analysis).
192
-
193
- This is the fallback when M4L bridge is unavailable.
191
+ """Build a SampleProfile from filename metadata + offline spectral
192
+ analysis (BUG-B49 fix).
193
+
194
+ Filename still supplies key / bpm / material-type hints when
195
+ present, but we now ALSO open the audio file via soundfile and
196
+ compute:
197
+ - duration_seconds (exact)
198
+ - frequency_center / frequency_spread (FFT-based centroid)
199
+ - brightness (high-band energy ratio)
200
+ - transient_density (RMS-gradient peak count)
201
+ - has_clear_downbeat (peak-interval consistency)
202
+ These used to be zeros regardless of file contents — downstream
203
+ critics had no real data.
204
+
205
+ If soundfile isn't available or the file can't be decoded, we
206
+ gracefully fall back to the filename-only path (legacy behavior).
194
207
  """
195
208
  name = os.path.splitext(os.path.basename(file_path))[0]
196
209
  metadata = parse_filename_metadata(file_path)
197
210
  material = classify_material_from_name(name)
198
211
 
212
+ # Offline spectral analysis — best-effort, never raises.
213
+ spectral = _analyze_audio_file(file_path)
214
+
199
215
  profile = SampleProfile(
200
216
  source=source,
201
217
  file_path=file_path,
@@ -206,7 +222,14 @@ def build_profile_from_filename(
206
222
  bpm_confidence=0.5 if metadata.get("bpm") else 0.0,
207
223
  material_type=material,
208
224
  material_confidence=0.4, # filename-only is low confidence
209
- duration_seconds=duration_seconds,
225
+ duration_seconds=(
226
+ spectral.get("duration_seconds") or duration_seconds
227
+ ),
228
+ frequency_center=spectral.get("frequency_center", 0.0),
229
+ frequency_spread=spectral.get("frequency_spread", 0.0),
230
+ brightness=spectral.get("brightness", 0.0),
231
+ transient_density=spectral.get("transient_density", 0.0),
232
+ has_clear_downbeat=spectral.get("has_clear_downbeat", False),
210
233
  )
211
234
 
212
235
  profile.suggested_mode = suggest_simpler_mode(profile)
@@ -214,3 +237,107 @@ def build_profile_from_filename(
214
237
  profile.suggested_warp_mode = suggest_warp_mode(profile)
215
238
 
216
239
  return profile
240
+
241
+
242
+ def _analyze_audio_file(file_path: str) -> dict:
243
+ """Read an audio file and compute lightweight spectral/temporal
244
+ features via numpy. Returns {} if the file can't be decoded.
245
+
246
+ Uses soundfile (already a dependency) + numpy FFT — no librosa
247
+ required. Falls back cleanly so file-not-found / unsupported
248
+ format doesn't break the analyzer.
249
+ """
250
+ try:
251
+ import soundfile as sf
252
+ import numpy as np
253
+ except ImportError:
254
+ return {}
255
+
256
+ if not file_path or not os.path.exists(file_path):
257
+ return {}
258
+
259
+ try:
260
+ data, samplerate = sf.read(file_path, dtype="float32")
261
+ except Exception:
262
+ return {}
263
+
264
+ # Downmix to mono
265
+ if data.ndim > 1:
266
+ data = data.mean(axis=1)
267
+ if data.size == 0 or samplerate <= 0:
268
+ return {}
269
+
270
+ duration = float(data.size) / float(samplerate)
271
+
272
+ # Spectral centroid via magnitude-weighted frequency average.
273
+ # Use a Welch-style average over ~50ms windows to stabilize.
274
+ win_len = max(1024, int(samplerate * 0.05))
275
+ hop = win_len // 2
276
+ centroids: list[float] = []
277
+ spreads: list[float] = []
278
+ frames = range(0, max(len(data) - win_len, 1), hop)
279
+ for start in frames:
280
+ frame = data[start:start + win_len]
281
+ if len(frame) < 32:
282
+ continue
283
+ # Hann-window + FFT
284
+ mags = np.abs(np.fft.rfft(frame * np.hanning(len(frame))))
285
+ total = mags.sum()
286
+ if total <= 0:
287
+ continue
288
+ freqs = np.linspace(0, samplerate / 2, len(mags))
289
+ c = float((mags * freqs).sum() / total)
290
+ centroids.append(c)
291
+ # Spectral spread = sqrt(sum(mags * (freqs - c)**2) / total)
292
+ s = float(np.sqrt(((mags * (freqs - c) ** 2).sum()) / total))
293
+ spreads.append(s)
294
+
295
+ if not centroids:
296
+ return {"duration_seconds": duration}
297
+
298
+ frequency_center = float(np.mean(centroids))
299
+ frequency_spread = float(np.mean(spreads))
300
+ # Brightness: fraction of energy above 4kHz
301
+ # Use a single FFT on the whole signal for this (cheap)
302
+ full_mags = np.abs(np.fft.rfft(data * np.hanning(len(data))))
303
+ full_freqs = np.linspace(0, samplerate / 2, len(full_mags))
304
+ total_energy = full_mags.sum() or 1.0
305
+ high_energy = full_mags[full_freqs >= 4000].sum()
306
+ brightness = float(high_energy / total_energy)
307
+
308
+ # Transient density: peak count in rectified-RMS gradient
309
+ # Coarse envelope over ~20ms windows
310
+ env_win = max(256, int(samplerate * 0.02))
311
+ envelope = np.array([
312
+ float(np.sqrt(np.mean(data[i:i + env_win] ** 2)))
313
+ for i in range(0, len(data), env_win)
314
+ ])
315
+ if envelope.size > 1:
316
+ diffs = np.diff(envelope)
317
+ # Count upward transitions above a dynamic threshold
318
+ thresh = max(envelope.std() * 1.5, 1e-4)
319
+ peaks = int(np.sum(diffs > thresh))
320
+ transient_density = float(peaks / max(duration, 0.001))
321
+ else:
322
+ transient_density = 0.0
323
+
324
+ # Clear downbeat: peaks evenly spaced
325
+ has_clear_downbeat = False
326
+ if envelope.size > 4:
327
+ # Find top-N peaks and check interval stddev
328
+ peak_positions = np.argsort(envelope)[-8:]
329
+ peak_positions.sort()
330
+ if len(peak_positions) >= 3:
331
+ intervals = np.diff(peak_positions)
332
+ if intervals.size > 0 and float(np.mean(intervals)) > 0:
333
+ cv = float(np.std(intervals)) / float(np.mean(intervals))
334
+ has_clear_downbeat = cv < 0.5 # low variation → steady
335
+
336
+ return {
337
+ "duration_seconds": duration,
338
+ "frequency_center": frequency_center,
339
+ "frequency_spread": frequency_spread,
340
+ "brightness": brightness,
341
+ "transient_density": transient_density,
342
+ "has_clear_downbeat": has_clear_downbeat,
343
+ }
@@ -164,21 +164,36 @@ def run_frequency_fit_critic(
164
164
  ) -> CriticResult:
165
165
  """Score frequency fit against existing mix.
166
166
 
167
- Without mix_snapshot (no M4L bridge), returns neutral 0.5.
167
+ BUG-B38 fix: the old stub branch returned a neutral 0.5 "fair"
168
+ score even when the analyzer had no spectral data at all —
169
+ misleading the user into thinking the sample was a middling fit
170
+ when in reality the critic couldn't evaluate anything. We now
171
+ mark the result as explicitly unavailable (score=-1 sentinel +
172
+ available=False + rating="unavailable") so downstream aggregators
173
+ can skip this critic rather than fold a fake 0.5 into the overall
174
+ score.
168
175
  """
169
176
  if mix_snapshot is None or not mix_snapshot:
170
177
  return CriticResult(
171
- critic_name="frequency_fit", score=0.5,
172
- recommendation="No spectral data — verify frequency fit by ear",
173
- adjustments=[{"note": "stub — spectral overlap analysis not yet implemented"}],
178
+ critic_name="frequency_fit",
179
+ score=-1.0,
180
+ available=False,
181
+ rating_override="unavailable",
182
+ recommendation=(
183
+ "No mix snapshot available — load LivePilot_Analyzer on "
184
+ "master and call get_mix_snapshot first. Falling back to "
185
+ "by-ear verification."
186
+ ),
174
187
  )
175
188
 
176
189
  # Basic frequency overlap check using mix_snapshot track data
177
- # mix_snapshot expected shape: {"tracks": [{"name": ..., "peak_frequency": ...}]}
178
190
  tracks = mix_snapshot.get("tracks", [])
179
191
  if not tracks:
180
192
  return CriticResult(
181
- critic_name="frequency_fit", score=0.5,
193
+ critic_name="frequency_fit",
194
+ score=-1.0,
195
+ available=False,
196
+ rating_override="unavailable",
182
197
  recommendation="Mix snapshot has no track data",
183
198
  )
184
199
 
@@ -186,8 +201,14 @@ def run_frequency_fit_critic(
186
201
  sample_center = profile.frequency_center
187
202
  if sample_center <= 0:
188
203
  return CriticResult(
189
- critic_name="frequency_fit", score=0.5,
190
- recommendation="Sample has no spectral data — verify by ear",
204
+ critic_name="frequency_fit",
205
+ score=-1.0,
206
+ available=False,
207
+ rating_override="unavailable",
208
+ recommendation=(
209
+ "Sample has no spectral data — analyze_sample couldn't "
210
+ "decode the file, or it's a clip-only reference."
211
+ ),
191
212
  )
192
213
 
193
214
  # Count tracks with energy near the sample's center frequency
@@ -82,15 +82,32 @@ class SampleIntent:
82
82
 
83
83
  @dataclass
84
84
  class CriticResult:
85
- """Result from a single sample critic."""
85
+ """Result from a single sample critic.
86
+
87
+ BUG-B38: added `available` + rating override so critics can
88
+ explicitly mark themselves as unevaluated (e.g. no mix snapshot
89
+ for frequency_fit) rather than returning a misleading 0.5 score.
90
+ Downstream aggregators check `available` before folding a critic's
91
+ score into the composite.
92
+ """
86
93
 
87
94
  critic_name: str
88
95
  score: float
89
96
  recommendation: str
90
97
  adjustments: list = field(default_factory=list)
98
+ # Explicit availability flag — False when critic couldn't evaluate
99
+ # (score will be -1.0 sentinel; aggregators should skip)
100
+ available: bool = True
101
+ # Optional hand-set rating label — overrides the score-based
102
+ # default when provided (used for "unavailable" status)
103
+ rating_override: str = ""
91
104
 
92
105
  @property
93
106
  def rating(self) -> str:
107
+ if self.rating_override:
108
+ return self.rating_override
109
+ if not self.available:
110
+ return "unavailable"
94
111
  if self.score >= 0.8:
95
112
  return "excellent"
96
113
  if self.score >= 0.6:
@@ -102,6 +119,8 @@ class CriticResult:
102
119
  def to_dict(self) -> dict:
103
120
  d = asdict(self)
104
121
  d["rating"] = self.rating
122
+ # Strip internal override from payload (not for consumers)
123
+ d.pop("rating_override", None)
105
124
  return d
106
125
 
107
126
 
@@ -104,30 +104,64 @@ def evaluate_sample_fit(
104
104
  logger.debug("get_track_info(%d) skipped: %s", i, exc)
105
105
  continue
106
106
 
107
- # Detect key from MIDI tracks
107
+ # Detect key from MIDI tracks.
108
+ # BUG-B37 fix: the old code checked clip_info.get("is_midi") but
109
+ # the Remote Script returns is_midi_clip (different field name),
110
+ # so the check always failed and song_key stayed None —
111
+ # key_fit then reported "Song key unknown" even on obvious
112
+ # Dm sessions. Now we check both field names for safety AND
113
+ # aggregate notes from all harmonic tracks via harmonic_score
114
+ # (Batch 5 helper), so key detection uses the richest signal.
108
115
  try:
109
116
  from ..tools._theory_engine import detect_key
110
- for i in range(min(track_count, 8)):
117
+ from ..tools._composition_engine.harmony import harmonic_score
118
+
119
+ # Collect all tracks' notes, scored by harmonic-ness
120
+ harmonic_pool: list[dict] = []
121
+ for i in range(min(track_count, 16)):
111
122
  try:
112
123
  clip_info = ableton.send_command("get_clip_info", {
113
124
  "track_index": i, "clip_index": 0,
114
125
  })
115
- if clip_info.get("is_midi"):
116
- notes_result = ableton.send_command("get_notes", {
117
- "track_index": i, "clip_index": 0,
118
- })
119
- notes = notes_result.get("notes", [])
120
- if notes:
121
- key_result = detect_key(notes)
122
- mode = key_result.get("mode", "")
123
- mode_suffix = "m" if "minor" in mode else ""
124
- song_key = f"{key_result['tonic_name']}{mode_suffix}"
125
- break
126
126
  except Exception as exc:
127
- logger.debug("key detection on track %d skipped: %s", i, exc)
127
+ logger.debug("get_clip_info(%d) skipped: %s", i, exc)
128
+ continue
129
+ # Accept either the new is_midi_clip field or the legacy
130
+ # is_midi (in case some install combines versions)
131
+ is_midi = (
132
+ clip_info.get("is_midi_clip")
133
+ or clip_info.get("is_midi")
134
+ or False
135
+ )
136
+ if not is_midi:
128
137
  continue
138
+ try:
139
+ notes_result = ableton.send_command("get_notes", {
140
+ "track_index": i, "clip_index": 0,
141
+ })
142
+ except Exception as exc:
143
+ logger.debug("get_notes(%d) skipped: %s", i, exc)
144
+ continue
145
+ notes = notes_result.get("notes", []) if isinstance(
146
+ notes_result, dict
147
+ ) else []
148
+ if not notes:
149
+ continue
150
+ track_name = (
151
+ existing_roles[i] if i < len(existing_roles) else ""
152
+ )
153
+ if harmonic_score(notes, track_name) >= 0.3:
154
+ harmonic_pool.extend(notes)
155
+
156
+ if harmonic_pool:
157
+ key_result = detect_key(harmonic_pool)
158
+ mode = key_result.get("mode", "")
159
+ mode_suffix = "m" if "minor" in mode else ""
160
+ song_key = f"{key_result['tonic_name']}{mode_suffix}"
129
161
  except ImportError:
130
162
  pass
163
+ except Exception as exc:
164
+ logger.debug("key aggregation failed: %s", exc)
131
165
  except Exception as exc:
132
166
  logger.warning("session context for evaluate_sample_fit failed: %s", exc)
133
167
 
@@ -14,9 +14,11 @@ from . import resolvers
14
14
  def _compile_add_warmth(move: SemanticMove, kernel: dict) -> CompiledPlan:
15
15
  """Compile 'add_warmth': volume boost + reverb send for perceived warmth.
16
16
 
17
- SAFETY: Never blindly set device parameters device_index=0, parameter_index=0
18
- can kill audio if the first device isn't a Saturator. Only adjust device params
19
- when find_device_on_track confirms a Saturator is present.
17
+ SAFETY: Never target device parameters by raw index. Ableton's parameter
18
+ index 0 is "Device On" on every device, so set_device_parameter(idx=0)
19
+ with any fractional value rounds to 0 and DISABLES the device. Use sends
20
+ and volume for warmth; device-param automation is only safe once the
21
+ resolver can look parameters up by name.
20
22
  """
21
23
  steps = []
22
24
  descriptions = []
@@ -31,24 +33,6 @@ def _compile_add_warmth(move: SemanticMove, kernel: dict) -> CompiledPlan:
31
33
  idx = t["index"]
32
34
  name = t["name"]
33
35
 
34
- # Try to find a Saturator on the track (safe device adjustment)
35
- saturator = resolvers.find_device_on_track(kernel, idx, "Saturator")
36
- if saturator:
37
- steps.append(CompiledStep(
38
- tool="set_device_parameter",
39
- params={
40
- "track_index": idx,
41
- "device_index": saturator["device_index"],
42
- "parameter_index": 0,
43
- "value": 0.3,
44
- },
45
- description=f"Gentle Saturator drive on {name}",
46
- ))
47
- descriptions.append(f"Saturate {name}")
48
- else:
49
- # No Saturator found — use volume + send instead of risky device params
50
- warnings.append(f"No Saturator on {name} — using volume+reverb for warmth")
51
-
52
36
  # Boost volume slightly for perceived warmth
53
37
  steps.append(CompiledStep(
54
38
  tool="set_track_volume",
@@ -84,32 +68,22 @@ def _compile_add_warmth(move: SemanticMove, kernel: dict) -> CompiledPlan:
84
68
 
85
69
 
86
70
  def _compile_add_texture(move: SemanticMove, kernel: dict) -> CompiledPlan:
87
- """Compile 'add_texture': perlin filter motion + delay send."""
71
+ """Compile 'add_texture': delay send for spatial texture.
72
+
73
+ Device-parameter automation (perlin filter motion) was removed because it
74
+ targeted device_index=0, parameter_index=0 without a resolver check — that
75
+ hits "Device On" on every Ableton device and would silently disable the
76
+ first device. Re-enable once resolvers.find_device_parameter lands.
77
+ """
88
78
  steps = []
89
79
  descriptions = []
80
+ warnings = []
90
81
 
91
82
  targets = resolvers.find_tracks_by_role(kernel, ["pad", "chords", "lead"])
92
83
 
93
84
  for t in targets[:1]:
94
85
  idx = t["index"]
95
86
  name = t["name"]
96
- steps.append(CompiledStep(
97
- tool="apply_automation_shape",
98
- params={
99
- "track_index": idx,
100
- "clip_index": 0,
101
- "parameter_type": "device",
102
- "device_index": 0,
103
- "parameter_index": 0,
104
- "curve_type": "perlin",
105
- "center": 0.4,
106
- "amplitude": 0.2,
107
- "duration": 8,
108
- "density": 16,
109
- },
110
- description=f"Perlin filter motion on {name} for organic texture",
111
- ))
112
- descriptions.append(f"Perlin filter on {name}")
113
87
 
114
88
  # Add delay send
115
89
  steps.append(CompiledStep(
@@ -119,6 +93,9 @@ def _compile_add_texture(move: SemanticMove, kernel: dict) -> CompiledPlan:
119
93
  ))
120
94
  descriptions.append(f"Delay texture on {name}")
121
95
 
96
+ if not targets:
97
+ warnings.append("No pad/chords/lead tracks — texture needs a melodic bed")
98
+
122
99
  steps.append(CompiledStep(
123
100
  tool="get_track_meters",
124
101
  params={"include_stereo": True},
@@ -132,14 +109,17 @@ def _compile_add_texture(move: SemanticMove, kernel: dict) -> CompiledPlan:
132
109
  risk_level="medium",
133
110
  summary="; ".join(descriptions) if descriptions else "No tracks for texture",
134
111
  requires_approval=(kernel.get("mode", "improve") != "explore"),
112
+ warnings=warnings,
135
113
  )
136
114
 
137
115
 
138
116
  def _compile_shape_transients(move: SemanticMove, kernel: dict) -> CompiledPlan:
139
117
  """Compile 'shape_transients': push drum volume for punch, adjust sends.
140
118
 
141
- SAFETY: Never blindly set device parameters. Only adjust Compressor params
142
- when find_device_on_track confirms one exists. Otherwise use volume for punch.
119
+ SAFETY: Never target device parameters by raw index. Index 0 on every
120
+ Ableton device is "Device On" — writing 0.2 rounds to 0 and disables the
121
+ device. Punch is achieved via volume + send shaping; Compressor attack
122
+ automation is only safe once the resolver can look parameters up by name.
143
123
  """
144
124
  steps = []
145
125
  descriptions = []
@@ -158,24 +138,7 @@ def _compile_shape_transients(move: SemanticMove, kernel: dict) -> CompiledPlan:
158
138
  idx = dt["index"]
159
139
  name = dt["name"]
160
140
 
161
- # Try to find a Compressor on the track
162
- compressor = resolvers.find_device_on_track(kernel, idx, "Compressor")
163
- if compressor:
164
- steps.append(CompiledStep(
165
- tool="set_device_parameter",
166
- params={
167
- "track_index": idx,
168
- "device_index": compressor["device_index"],
169
- "parameter_index": 0,
170
- "value": 0.2,
171
- },
172
- description=f"Faster Compressor attack on {name} for snap",
173
- ))
174
- descriptions.append(f"Shape {name} compressor")
175
- else:
176
- warnings.append(f"No Compressor on {name} — using volume push for punch")
177
-
178
- # Push volume for transient punch regardless
141
+ # Push volume for transient punch
179
142
  steps.append(CompiledStep(
180
143
  tool="set_track_volume",
181
144
  params={"track_index": idx, "volume": 0.75},
@@ -8,31 +8,20 @@ from . import resolvers
8
8
 
9
9
 
10
10
  def _compile_increase_forward_motion(move: SemanticMove, kernel: dict) -> CompiledPlan:
11
- """Compile 'increase_forward_motion': rising filter + rhythm push."""
11
+ """Compile 'increase_forward_motion': rhythm push + reverb wash.
12
+
13
+ Device-parameter automation (rising filter sweep) was removed: targeting
14
+ device_index=0, parameter_index=0 without a resolver lookup hits "Device
15
+ On" on every Ableton device and would disable the first effect. Re-enable
16
+ once resolvers.find_device_parameter can locate a filter cutoff by name.
17
+ """
12
18
  steps = []
13
19
  descriptions = []
20
+ warnings = []
14
21
 
15
22
  melodic = resolvers.find_tracks_by_role(kernel, ["chords", "lead", "pad"])
16
23
  drums = resolvers.find_tracks_by_role(kernel, ["drums", "percussion"])
17
24
 
18
- for mt in melodic[:1]:
19
- steps.append(CompiledStep(
20
- tool="apply_automation_shape",
21
- params={
22
- "track_index": mt["index"],
23
- "clip_index": 0,
24
- "parameter_type": "device",
25
- "device_index": 0,
26
- "parameter_index": 0,
27
- "curve_type": "exponential",
28
- "start": 0.2,
29
- "end": 0.7,
30
- "duration": 4,
31
- },
32
- description=f"Rising filter sweep on {mt['name']} over 4 bars",
33
- ))
34
- descriptions.append(f"Rising filter on {mt['name']}")
35
-
36
25
  for dt in drums[:1]:
37
26
  steps.append(CompiledStep(
38
27
  tool="set_track_volume",
@@ -49,6 +38,9 @@ def _compile_increase_forward_motion(move: SemanticMove, kernel: dict) -> Compil
49
38
  ))
50
39
  descriptions.append(f"Reverb wash on {mt['name']}")
51
40
 
41
+ if not drums and not melodic:
42
+ warnings.append("No drum or melodic tracks — cannot build forward motion")
43
+
52
44
  steps.append(CompiledStep(
53
45
  tool="get_track_meters",
54
46
  params={"include_stereo": True},
@@ -62,6 +54,7 @@ def _compile_increase_forward_motion(move: SemanticMove, kernel: dict) -> Compil
62
54
  risk_level="low",
63
55
  summary="; ".join(descriptions) if descriptions else "No melodic tracks for motion",
64
56
  requires_approval=(kernel.get("mode", "improve") != "explore"),
57
+ warnings=warnings,
65
58
  )
66
59
 
67
60
 
@@ -33,13 +33,76 @@ def _identify_port_holder(port: int) -> str | None:
33
33
  text=True, timeout=2,
34
34
  ).strip()
35
35
  return f"{pid} ({cmdline[:60]})"
36
- except (subprocess.CalledProcessError, FileNotFoundError):
36
+ except (subprocess.CalledProcessError,
37
+ subprocess.TimeoutExpired,
38
+ FileNotFoundError):
37
39
  return str(pid)
38
40
  return None
39
- except (subprocess.CalledProcessError, FileNotFoundError, ValueError):
41
+ except (subprocess.CalledProcessError,
42
+ subprocess.TimeoutExpired,
43
+ FileNotFoundError,
44
+ ValueError):
45
+ # TimeoutExpired catches the busy-system case where lsof exceeds
46
+ # the 3-second budget; we treat it as "can't identify" and return
47
+ # None so startup never stalls for slow host diagnostics.
40
48
  return None
41
49
 
42
50
 
51
+ def _check_remote_script_version(ableton: AbletonConnection) -> None:
52
+ """BUG-A1: detect stale Remote Script installs at startup.
53
+
54
+ The installed Remote Script is loaded by Ableton at its own launch time
55
+ and cached in Python's module system — source-tree edits don't take
56
+ effect until the user reinstalls + restarts Live. When the installed
57
+ copy lags behind the MCP-server source, commands added after the install
58
+ date (e.g. ``insert_device`` in v1.10.6) return "Unknown command type".
59
+
60
+ This check pings the Remote Script, compares its reported version to
61
+ the MCP server version, and logs a loud warning on mismatch. We don't
62
+ abort — the server should still work for whatever handlers the older
63
+ Remote Script does support — but we make the drift visible.
64
+ """
65
+ import sys
66
+
67
+ try:
68
+ from . import __version__ as mcp_version
69
+ except ImportError:
70
+ mcp_version = "unknown"
71
+
72
+ try:
73
+ pong = ableton.send_command("ping")
74
+ except Exception as exc:
75
+ import logging as _logging
76
+ _logging.getLogger(__name__).debug(
77
+ "Remote Script version check failed: %s", exc,
78
+ )
79
+ return
80
+
81
+ if not isinstance(pong, dict):
82
+ return
83
+ rs_version = pong.get("remote_script_version")
84
+ if rs_version is None:
85
+ # Remote Script is old enough that it doesn't even embed its version
86
+ # in ping responses — definitely stale.
87
+ msg = (
88
+ "LivePilot: Remote Script is out of date (pre-version-handshake). "
89
+ "Run 'npx livepilot --install' and restart Ableton Live to fix "
90
+ "'Unknown command type' errors for newer tools (insert_device, "
91
+ "set_clip_pitch, etc)."
92
+ )
93
+ print(msg, file=sys.stderr)
94
+ return
95
+
96
+ if str(rs_version) != str(mcp_version):
97
+ msg = (
98
+ f"LivePilot: Remote Script version {rs_version} does not match "
99
+ f"MCP server version {mcp_version}. Newer tools may fail with "
100
+ f"'Unknown command type'. Run 'npx livepilot --install' and "
101
+ f"restart Ableton Live to resync."
102
+ )
103
+ print(msg, file=sys.stderr)
104
+
105
+
43
106
  def _master_has_livepilot_analyzer(ableton: AbletonConnection) -> bool:
44
107
  """Check whether the analyzer device is currently on the master track."""
45
108
  try:
@@ -128,6 +191,9 @@ async def lifespan(server):
128
191
  }
129
192
 
130
193
  try:
194
+ # BUG-A1: detect stale Remote Script installs early so the user
195
+ # sees a clear message instead of cryptic "Unknown command type" errors.
196
+ _check_remote_script_version(ableton)
131
197
  if bridge_state["transport"] is not None:
132
198
  await _warm_analyzer_bridge(ableton, spectral)
133
199
  yield {
@@ -50,6 +50,9 @@ class SessionStory:
50
50
  """The narrative of the current session."""
51
51
 
52
52
  song_id: str = ""
53
+ # BUG-B16: link back to the SongBrain snapshot that generated the
54
+ # identity_summary so callers can tell which brain was used.
55
+ song_brain_id: str = ""
53
56
  identity_summary: str = ""
54
57
  what_changed_last: str = ""
55
58
  what_still_feels_open: list[str] = field(default_factory=list)
@@ -60,6 +63,7 @@ class SessionStory:
60
63
  def to_dict(self) -> dict:
61
64
  return {
62
65
  "song_id": self.song_id,
66
+ "song_brain_id": self.song_brain_id,
63
67
  "identity_summary": self.identity_summary,
64
68
  "what_changed_last": self.what_changed_last,
65
69
  "what_still_feels_open": self.what_still_feels_open,
@@ -50,10 +50,23 @@ def reset_story() -> None:
50
50
  def get_session_story(
51
51
  song_brain: Optional[dict] = None,
52
52
  ) -> SessionStory:
53
- """Get the current session story with identity summary."""
53
+ """Get the current session story with identity summary.
54
+
55
+ BUG-B16: now also populates song_brain_id from the passed brain so
56
+ callers can tell which brain generated the identity_summary.
57
+ Previously the field was empty and users got a half-populated
58
+ response that read as "something's wrong" even though the partial
59
+ data was correct for a fresh session.
60
+ """
54
61
  song_brain = song_brain or {}
55
62
 
56
63
  _story.identity_summary = song_brain.get("identity_core", "")
64
+ _story.song_brain_id = str(song_brain.get("brain_id", "") or "")
65
+ # Carry song_id through when present on the brain — fresh sessions
66
+ # leave this empty, which is documented below.
67
+ if not _story.song_id and song_brain.get("song_id"):
68
+ _story.song_id = str(song_brain.get("song_id"))
69
+
57
70
  _story.threads = [t for t in _threads.values() if t.status == "open"]
58
71
  _story.turns = _turns
59
72
  _story.what_still_feels_open = [