livepilot 1.9.21 → 1.9.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/.claude-plugin/marketplace.json +3 -3
  2. package/.mcpbignore +40 -0
  3. package/AGENTS.md +2 -2
  4. package/CHANGELOG.md +47 -0
  5. package/CONTRIBUTING.md +1 -1
  6. package/README.md +47 -72
  7. package/bin/livepilot.js +135 -0
  8. package/livepilot/.Codex-plugin/plugin.json +2 -2
  9. package/livepilot/.claude-plugin/plugin.json +2 -2
  10. package/livepilot/agents/livepilot-producer/AGENT.md +13 -0
  11. package/livepilot/commands/arrange.md +42 -14
  12. package/livepilot/commands/beat.md +68 -21
  13. package/livepilot/commands/evaluate.md +23 -13
  14. package/livepilot/commands/mix.md +35 -11
  15. package/livepilot/commands/perform.md +31 -19
  16. package/livepilot/commands/sounddesign.md +38 -17
  17. package/livepilot/skills/livepilot-arrangement/SKILL.md +2 -1
  18. package/livepilot/skills/livepilot-composition-engine/references/transition-archetypes.md +2 -2
  19. package/livepilot/skills/livepilot-core/SKILL.md +60 -4
  20. package/livepilot/skills/livepilot-core/references/device-atlas/distortion-and-character.md +11 -11
  21. package/livepilot/skills/livepilot-core/references/device-atlas/drums-and-percussion.md +25 -25
  22. package/livepilot/skills/livepilot-core/references/device-atlas/dynamics-and-punch.md +21 -21
  23. package/livepilot/skills/livepilot-core/references/device-atlas/eq-and-filtering.md +13 -13
  24. package/livepilot/skills/livepilot-core/references/device-atlas/midi-tools.md +13 -13
  25. package/livepilot/skills/livepilot-core/references/device-atlas/movement-and-modulation.md +5 -5
  26. package/livepilot/skills/livepilot-core/references/device-atlas/space-and-depth.md +16 -16
  27. package/livepilot/skills/livepilot-core/references/device-atlas/spectral-and-weird.md +40 -40
  28. package/livepilot/skills/livepilot-core/references/m4l-devices.md +3 -3
  29. package/livepilot/skills/livepilot-core/references/overview.md +4 -4
  30. package/livepilot/skills/livepilot-evaluation/SKILL.md +12 -8
  31. package/livepilot/skills/livepilot-evaluation/references/memory-promotion.md +2 -2
  32. package/livepilot/skills/livepilot-mix-engine/SKILL.md +1 -1
  33. package/livepilot/skills/livepilot-mix-engine/references/mix-moves.md +2 -2
  34. package/livepilot/skills/livepilot-mixing/SKILL.md +3 -1
  35. package/livepilot/skills/livepilot-notes/SKILL.md +2 -1
  36. package/livepilot/skills/livepilot-release/SKILL.md +15 -15
  37. package/livepilot/skills/livepilot-sound-design-engine/SKILL.md +2 -2
  38. package/livepilot/skills/livepilot-wonder/SKILL.md +62 -0
  39. package/livepilot.mcpb +0 -0
  40. package/m4l_device/livepilot_bridge.js +1 -1
  41. package/manifest.json +91 -0
  42. package/mcp_server/__init__.py +1 -1
  43. package/mcp_server/creative_constraints/__init__.py +6 -0
  44. package/mcp_server/creative_constraints/engine.py +277 -0
  45. package/mcp_server/creative_constraints/models.py +75 -0
  46. package/mcp_server/creative_constraints/tools.py +341 -0
  47. package/mcp_server/experiment/__init__.py +6 -0
  48. package/mcp_server/experiment/engine.py +213 -0
  49. package/mcp_server/experiment/models.py +120 -0
  50. package/mcp_server/experiment/tools.py +263 -0
  51. package/mcp_server/hook_hunter/__init__.py +5 -0
  52. package/mcp_server/hook_hunter/analyzer.py +342 -0
  53. package/mcp_server/hook_hunter/models.py +57 -0
  54. package/mcp_server/hook_hunter/tools.py +586 -0
  55. package/mcp_server/memory/taste_graph.py +261 -0
  56. package/mcp_server/memory/tools.py +88 -0
  57. package/mcp_server/mix_engine/critics.py +2 -2
  58. package/mcp_server/mix_engine/models.py +1 -1
  59. package/mcp_server/mix_engine/state_builder.py +2 -2
  60. package/mcp_server/musical_intelligence/__init__.py +8 -0
  61. package/mcp_server/musical_intelligence/detectors.py +421 -0
  62. package/mcp_server/musical_intelligence/phrase_critic.py +163 -0
  63. package/mcp_server/musical_intelligence/tools.py +221 -0
  64. package/mcp_server/preview_studio/__init__.py +5 -0
  65. package/mcp_server/preview_studio/engine.py +280 -0
  66. package/mcp_server/preview_studio/models.py +73 -0
  67. package/mcp_server/preview_studio/tools.py +423 -0
  68. package/mcp_server/runtime/session_kernel.py +96 -0
  69. package/mcp_server/runtime/tools.py +90 -1
  70. package/mcp_server/semantic_moves/__init__.py +13 -0
  71. package/mcp_server/semantic_moves/compiler.py +116 -0
  72. package/mcp_server/semantic_moves/mix_compilers.py +291 -0
  73. package/mcp_server/semantic_moves/mix_moves.py +157 -0
  74. package/mcp_server/semantic_moves/models.py +46 -0
  75. package/mcp_server/semantic_moves/performance_compilers.py +208 -0
  76. package/mcp_server/semantic_moves/performance_moves.py +81 -0
  77. package/mcp_server/semantic_moves/registry.py +32 -0
  78. package/mcp_server/semantic_moves/resolvers.py +126 -0
  79. package/mcp_server/semantic_moves/sound_design_compilers.py +266 -0
  80. package/mcp_server/semantic_moves/sound_design_moves.py +78 -0
  81. package/mcp_server/semantic_moves/tools.py +204 -0
  82. package/mcp_server/semantic_moves/transition_compilers.py +222 -0
  83. package/mcp_server/semantic_moves/transition_moves.py +76 -0
  84. package/mcp_server/server.py +10 -0
  85. package/mcp_server/session_continuity/__init__.py +6 -0
  86. package/mcp_server/session_continuity/models.py +86 -0
  87. package/mcp_server/session_continuity/tools.py +230 -0
  88. package/mcp_server/session_continuity/tracker.py +235 -0
  89. package/mcp_server/song_brain/__init__.py +6 -0
  90. package/mcp_server/song_brain/builder.py +477 -0
  91. package/mcp_server/song_brain/models.py +132 -0
  92. package/mcp_server/song_brain/tools.py +294 -0
  93. package/mcp_server/stuckness_detector/__init__.py +5 -0
  94. package/mcp_server/stuckness_detector/detector.py +400 -0
  95. package/mcp_server/stuckness_detector/models.py +66 -0
  96. package/mcp_server/stuckness_detector/tools.py +195 -0
  97. package/mcp_server/tools/_conductor.py +104 -6
  98. package/mcp_server/tools/analyzer.py +1 -1
  99. package/mcp_server/tools/devices.py +34 -0
  100. package/mcp_server/wonder_mode/__init__.py +6 -0
  101. package/mcp_server/wonder_mode/diagnosis.py +84 -0
  102. package/mcp_server/wonder_mode/engine.py +493 -0
  103. package/mcp_server/wonder_mode/session.py +114 -0
  104. package/mcp_server/wonder_mode/tools.py +285 -0
  105. package/package.json +2 -2
  106. package/remote_script/LivePilot/__init__.py +1 -1
  107. package/remote_script/LivePilot/browser.py +4 -1
  108. package/remote_script/LivePilot/devices.py +29 -0
  109. package/remote_script/LivePilot/tracks.py +11 -4
  110. package/scripts/generate_tool_catalog.py +131 -0
@@ -0,0 +1,586 @@
1
+ """Hook Hunter MCP tools — 9 tools for hook and phrase intelligence.
2
+
3
+ find_primary_hook — detect the most salient hook in the session
4
+ rank_hook_candidates — list and rank all hook candidates
5
+ develop_hook — suggest development strategies for a hook
6
+ measure_hook_salience — score a specific hook's salience
7
+ score_phrase_impact — score a section's emotional landing
8
+ detect_payoff_failure — find where the song should deliver but doesn't
9
+ suggest_payoff_repair — generate repair strategies for payoff failures
10
+ detect_hook_neglect — check if a strong hook is underused across sections
11
+ compare_phrase_impact — compare emotional impact across multiple sections
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ from fastmcp import Context
17
+
18
+ from ..server import mcp
19
+ from . import analyzer
20
+
21
+
22
+ def _get_ableton(ctx: Context):
23
+ return ctx.lifespan_context["ableton"]
24
+
25
+
26
+ def _fetch_tracks_and_scenes(ctx: Context) -> tuple[list[dict], list[dict], dict]:
27
+ """Fetch tracks, scenes, and motif data from Ableton.
28
+
29
+ Motif data comes from the motif engine (get_motif_graph). When available,
30
+ it enables the analyzer's strongest path: motif recurrence and salience
31
+ scoring. Falls back to track-name + clip-reuse heuristics when no
32
+ motif data exists (e.g., clips have no MIDI notes).
33
+ """
34
+ ableton = _get_ableton(ctx)
35
+ tracks: list[dict] = []
36
+ scenes: list[dict] = []
37
+ motif_data: dict = {}
38
+
39
+ try:
40
+ session = ableton.send_command("get_session_info", {})
41
+ tracks = session.get("tracks", [])
42
+ except Exception:
43
+ pass
44
+
45
+ try:
46
+ matrix = ableton.send_command("get_scene_matrix")
47
+ scenes = [
48
+ {"name": s.get("name", f"Scene {i}"), "clips": row}
49
+ for i, (s, row) in enumerate(
50
+ zip(matrix.get("scenes", []), matrix.get("matrix", []))
51
+ )
52
+ ]
53
+ except Exception:
54
+ pass
55
+
56
+ # Fetch motif data from the motif engine for salience-based hook discovery
57
+ try:
58
+ motif_data = ableton.send_command("get_motif_graph")
59
+ except Exception:
60
+ pass # Motif graph requires notes in clips; empty dict is valid fallback
61
+
62
+ return tracks, scenes, motif_data
63
+
64
+
65
+ @mcp.tool()
66
+ def find_primary_hook(ctx: Context) -> dict:
67
+ """Find the most salient hook in the current session.
68
+
69
+ Analyzes melodic motifs, distinctive rhythmic cells, and signature
70
+ textures to identify what the track is most "about."
71
+
72
+ Returns the primary hook with salience scores, or a note if no
73
+ clear hook is detected.
74
+ """
75
+ tracks, scenes, motif_data = _fetch_tracks_and_scenes(ctx)
76
+
77
+ hook = analyzer.find_primary_hook(tracks, motif_data, scenes)
78
+ if hook:
79
+ return {
80
+ "found": True,
81
+ **hook.to_dict(),
82
+ }
83
+
84
+ return {
85
+ "found": False,
86
+ "note": "No clear primary hook detected — consider developing a defining element",
87
+ "suggestion": "Try creating a memorable melodic phrase, distinctive rhythm, or signature texture",
88
+ }
89
+
90
+
91
+ @mcp.tool()
92
+ def rank_hook_candidates(ctx: Context, limit: int = 5) -> dict:
93
+ """List and rank all hook candidates in the session.
94
+
95
+ Returns candidates sorted by salience — a composite of memorability,
96
+ recurrence, contrast potential, and development potential.
97
+
98
+ limit: max candidates to return (default 5)
99
+ """
100
+ tracks, scenes, motif_data = _fetch_tracks_and_scenes(ctx)
101
+
102
+ candidates = analyzer.find_hook_candidates(tracks, motif_data, scenes)
103
+ top = candidates[:limit]
104
+
105
+ return {
106
+ "candidates": [c.to_dict() for c in top],
107
+ "total_found": len(candidates),
108
+ "shown": len(top),
109
+ }
110
+
111
+
112
+ @mcp.tool()
113
+ def develop_hook(
114
+ ctx: Context,
115
+ hook_id: str = "",
116
+ mode: str = "chorus",
117
+ ) -> dict:
118
+ """Suggest development strategies for a hook.
119
+
120
+ hook_id: the hook to develop (from rank_hook_candidates).
121
+ If provided, strategies are adapted to the hook's type
122
+ (melodic, rhythmic, timbral, harmonic, textural).
123
+ mode: development style — "chorus" (lift/strengthen), "variation"
124
+ (melodic variation), "counterline" (complementary line),
125
+ "breakdown" (stripped version), "fill" (ornamental version)
126
+
127
+ Returns development strategies with musical explanations.
128
+ """
129
+ # Look up the actual hook to adapt strategies by type
130
+ hook_type = "melodic" # default
131
+ hook_description = "the hook"
132
+ if hook_id:
133
+ tracks, scenes, motif_data = _fetch_tracks_and_scenes(ctx)
134
+ candidates = analyzer.find_hook_candidates(tracks, motif_data, scenes)
135
+ match = [c for c in candidates if c.hook_id == hook_id]
136
+ if match:
137
+ hook_type = match[0].hook_type
138
+ hook_description = match[0].description
139
+
140
+ # Type-specific focus areas
141
+ _type_focus = {
142
+ "melodic": {"dimension": "melodic contour and pitch", "double": "octave or harmony", "strip": "melodic core", "ornament": "grace notes and embellishments"},
143
+ "rhythmic": {"dimension": "rhythmic pattern and groove", "double": "layered percussion or polyrhythm", "strip": "rhythmic skeleton", "ornament": "ghost notes and syncopation"},
144
+ "timbral": {"dimension": "timbre and texture", "double": "parallel processing or layered timbres", "strip": "raw unprocessed sound", "ornament": "modulation and movement"},
145
+ "harmonic": {"dimension": "harmonic movement and voicing", "double": "extended voicings or inversions", "strip": "root notes only", "ornament": "passing tones and suspensions"},
146
+ "textural": {"dimension": "spatial and textural quality", "double": "stereo widening or reverb layers", "strip": "dry mono version", "ornament": "granular or delay effects"},
147
+ }
148
+ focus = _type_focus.get(hook_type, _type_focus["melodic"])
149
+
150
+ strategies = {
151
+ "chorus": {
152
+ "approach": f"Lift and strengthen the {hook_type} hook for maximum impact",
153
+ "tactics": [
154
+ f"Double {hook_description} with {focus['double']}",
155
+ f"Add supporting harmonic movement underneath the {focus['dimension']}",
156
+ f"Increase rhythmic density around {hook_description}",
157
+ f"Layer complementary textures that frame the {focus['dimension']}",
158
+ ],
159
+ "identity_effect": "preserves — amplifies the core idea",
160
+ },
161
+ "variation": {
162
+ "approach": f"Create {hook_type} variations of {hook_description}",
163
+ "tactics": [
164
+ f"Transpose or shift the {focus['dimension']} to a different register",
165
+ f"Invert or retrograde the {focus['dimension']}",
166
+ "Apply rhythmic displacement (shift by 1/8 or 1/16)",
167
+ f"Fragment {hook_description} — use only the first half or last half",
168
+ ],
169
+ "identity_effect": "evolves — develops the idea further",
170
+ },
171
+ "counterline": {
172
+ "approach": f"Write a complementary line that dialogues with the {hook_type} hook",
173
+ "tactics": [
174
+ f"Use contrary motion against the {focus['dimension']}",
175
+ f"Fill rhythmic gaps in {hook_description} with the counterline",
176
+ "Match the harmonic context but use different intervals or timbre",
177
+ f"Use a contrasting {hook_type} character to distinguish the counter",
178
+ ],
179
+ "identity_effect": "evolves — adds depth without replacing the core",
180
+ },
181
+ "breakdown": {
182
+ "approach": f"Create a stripped-down version of {hook_description} for contrast",
183
+ "tactics": [
184
+ f"Isolate the {focus['strip']} — remove everything else",
185
+ "Use a different instrument/timbre for the stripped version",
186
+ "Slow down or halve the rhythmic density",
187
+ "Add space and reverb to create distance",
188
+ ],
189
+ "identity_effect": "preserves — the hook is still recognizable in reduced form",
190
+ },
191
+ "fill": {
192
+ "approach": f"Create ornamental variations of {hook_description} for transitions",
193
+ "tactics": [
194
+ f"Add {focus['ornament']}",
195
+ "Create a call-and-response pattern",
196
+ f"Use the hook's rhythm with new {focus['dimension']} material",
197
+ f"Build a riser or fill from {hook_description} fragments",
198
+ ],
199
+ "identity_effect": "evolves — decorates without replacing",
200
+ },
201
+ }
202
+
203
+ if mode not in strategies:
204
+ return {
205
+ "error": f"Unknown mode: {mode}",
206
+ "available_modes": list(strategies.keys()),
207
+ }
208
+
209
+ strategy = strategies[mode]
210
+ return {
211
+ "hook_id": hook_id,
212
+ "hook_type": hook_type,
213
+ "hook_description": hook_description,
214
+ "mode": mode,
215
+ **strategy,
216
+ }
217
+
218
+
219
+ @mcp.tool()
220
+ def measure_hook_salience(ctx: Context, hook_id: str = "") -> dict:
221
+ """Measure the salience of a specific hook or the primary hook.
222
+
223
+ Returns detailed scores for memorability, recurrence, contrast
224
+ potential, and development potential.
225
+ """
226
+ tracks, scenes, motif_data = _fetch_tracks_and_scenes(ctx)
227
+ candidates = analyzer.find_hook_candidates(tracks, motif_data, scenes)
228
+
229
+ if hook_id:
230
+ match = [c for c in candidates if c.hook_id == hook_id]
231
+ if not match:
232
+ return {
233
+ "error": f"Hook {hook_id} not found",
234
+ "available_hooks": [c.hook_id for c in candidates[:5]],
235
+ }
236
+ hook = match[0]
237
+ elif candidates:
238
+ hook = candidates[0]
239
+ else:
240
+ return {"error": "No hooks detected in the session"}
241
+
242
+ return {
243
+ **hook.to_dict(),
244
+ "interpretation": _interpret_salience(hook),
245
+ }
246
+
247
+
248
+ @mcp.tool()
249
+ def score_phrase_impact(
250
+ ctx: Context,
251
+ section_index: int = 0,
252
+ target: str = "hook",
253
+ ) -> dict:
254
+ """Score a section's emotional impact as a musical phrase.
255
+
256
+ Evaluates arrival strength, anticipation, contrast, groove
257
+ continuity, and payoff balance. Phrase-level judgment outranks
258
+ parameter-only evaluation for arrangement and transition decisions.
259
+
260
+ section_index: which section/scene to evaluate (0-based)
261
+ target: what it should function as — "hook", "drop", "chorus",
262
+ "transition", or "loop"
263
+ """
264
+ ableton = _get_ableton(ctx)
265
+
266
+ # Build section data from scenes
267
+ sections = _get_section_data(ableton)
268
+ if section_index >= len(sections):
269
+ return {"error": f"Section index {section_index} out of range (have {len(sections)})"}
270
+
271
+ section = sections[section_index]
272
+ prev_section = sections[section_index - 1] if section_index > 0 else {}
273
+
274
+ # Get song brain for context
275
+ song_brain = _get_song_brain_dict()
276
+
277
+ impact = analyzer.score_phrase_impact(section, target, song_brain, prev_section)
278
+ return impact.to_dict()
279
+
280
+
281
+ @mcp.tool()
282
+ def detect_payoff_failure(ctx: Context) -> dict:
283
+ """Detect where the song should deliver a payoff but doesn't.
284
+
285
+ Checks chorus, drop, and hook sections for flat arrivals,
286
+ weak contrast, missing setups, and absent hooks.
287
+
288
+ Returns failures with severity and repair suggestions.
289
+ """
290
+ ableton = _get_ableton(ctx)
291
+ sections = _get_section_data(ableton)
292
+ song_brain = _get_song_brain_dict()
293
+
294
+ failures = analyzer.detect_payoff_failures(sections, song_brain)
295
+
296
+ return {
297
+ "failures": [f.to_dict() for f in failures],
298
+ "failure_count": len(failures),
299
+ "overall_health": "healthy" if not failures else (
300
+ "needs_attention" if len(failures) <= 2 else "significant_issues"
301
+ ),
302
+ }
303
+
304
+
305
+ @mcp.tool()
306
+ def suggest_payoff_repair(ctx: Context) -> dict:
307
+ """Generate repair strategies for detected payoff failures.
308
+
309
+ Runs payoff detection first, then suggests specific fixes
310
+ for each failure.
311
+ """
312
+ ableton = _get_ableton(ctx)
313
+ sections = _get_section_data(ableton)
314
+ song_brain = _get_song_brain_dict()
315
+
316
+ failures = analyzer.detect_payoff_failures(sections, song_brain)
317
+ if not failures:
318
+ return {"note": "No payoff failures detected — the song delivers where expected"}
319
+
320
+ repairs = analyzer.suggest_payoff_repairs(failures)
321
+ return {
322
+ "repairs": repairs,
323
+ "repair_count": len(repairs),
324
+ }
325
+
326
+
327
+ # ── Helpers ───────────────────────────────────────────────────────
328
+
329
+
330
+ def _get_section_data(ableton) -> list[dict]:
331
+ """Build section data from Ableton scenes with real energy/density/has_drums."""
332
+ sections: list[dict] = []
333
+ try:
334
+ matrix = ableton.send_command("get_scene_matrix")
335
+ scenes_list = matrix.get("scenes", [])
336
+ matrix_rows = matrix.get("matrix", [])
337
+
338
+ # Detect drum track indices by name
339
+ drum_keywords = {"drum", "beat", "kick", "hat", "perc", "snare"}
340
+ track_names = []
341
+ # tracks may be in matrix metadata or session_info
342
+ for ti, row_entry in enumerate(matrix_rows[0] if matrix_rows else []):
343
+ track_names.append("") # placeholder — we'll use scenes_list tracks if available
344
+ # Use scene matrix track info if available
345
+ track_info = matrix.get("tracks", [])
346
+ drum_indices = set()
347
+ for ti, track in enumerate(track_info):
348
+ name_lower = track.get("name", "").lower() if isinstance(track, dict) else ""
349
+ if any(kw in name_lower for kw in drum_keywords):
350
+ drum_indices.add(ti)
351
+
352
+ for i, scene in enumerate(scenes_list):
353
+ row = matrix_rows[i] if i < len(matrix_rows) else []
354
+ if not isinstance(row, list):
355
+ row = []
356
+ clip_count = sum(1 for c in row if c)
357
+ total_tracks = max(len(row), 1)
358
+
359
+ # has_drums: check if any drum track has a clip in this scene
360
+ has_drums = any(
361
+ di < len(row) and row[di]
362
+ for di in drum_indices
363
+ ) if drum_indices else False
364
+
365
+ density = min(1.0, clip_count / total_tracks)
366
+ # energy: density + drum bonus
367
+ energy = min(1.0, density + (0.1 if has_drums else 0.0))
368
+
369
+ sections.append({
370
+ "id": f"scene_{i}",
371
+ "name": scene.get("name", f"Scene {i}"),
372
+ "label": scene.get("name", "").lower(),
373
+ "energy": round(energy, 3),
374
+ "density": round(density, 3),
375
+ "has_drums": has_drums,
376
+ })
377
+ except Exception:
378
+ pass
379
+
380
+ return sections
381
+
382
+
383
+ def _get_song_brain_dict() -> dict:
384
+ """Get current SongBrain as dict, or empty dict."""
385
+ try:
386
+ from ..song_brain.tools import _current_brain
387
+ if _current_brain is not None:
388
+ return _current_brain.to_dict()
389
+ except Exception as _e:
390
+ if __debug__:
391
+ import sys
392
+ print(f"LivePilot: SongBrain unavailable in hook_hunter: {_e}", file=sys.stderr)
393
+ return {}
394
+
395
+
396
+ @mcp.tool()
397
+ def detect_hook_neglect(ctx: Context) -> dict:
398
+ """Detect if a strong hook exists but is underused across sections.
399
+
400
+ Checks whether the primary hook appears in enough sections to
401
+ create adequate repetition and memorability. A hook that only
402
+ appears in one section is "neglected" — it needs to recur.
403
+
404
+ Returns neglect analysis with underused sections and suggestions.
405
+ """
406
+ tracks, scenes, motif_data = _fetch_tracks_and_scenes(ctx)
407
+
408
+ hook = analyzer.find_primary_hook(tracks, motif_data, scenes)
409
+ if not hook:
410
+ return {
411
+ "neglected": False,
412
+ "note": "No primary hook detected — hook neglect N/A",
413
+ "suggestion": "Create a defining hook before checking for neglect",
414
+ }
415
+
416
+ # Check per-track hook presence across scenes using scene matrix
417
+ hook_location = hook.location if hook.location else ""
418
+ ableton = _get_ableton(ctx)
419
+
420
+ try:
421
+ matrix = ableton.send_command("get_scene_matrix")
422
+ except Exception:
423
+ return {
424
+ "neglected": False,
425
+ "hook": hook.to_dict(),
426
+ "note": "Could not fetch scene matrix to assess neglect",
427
+ }
428
+
429
+ scenes_list = matrix.get("scenes", [])
430
+ matrix_rows = matrix.get("matrix", [])
431
+ track_info = matrix.get("tracks", [])
432
+
433
+ if not scenes_list or not hook_location:
434
+ return {
435
+ "neglected": False,
436
+ "hook": hook.to_dict(),
437
+ "note": "Insufficient section data to assess neglect",
438
+ }
439
+
440
+ # Find the hook's track index by matching location to track names
441
+ hook_track_idx = None
442
+ hook_loc_lower = hook_location.lower()
443
+ for ti, track in enumerate(track_info):
444
+ track_name = track.get("name", "") if isinstance(track, dict) else ""
445
+ if track_name.lower() == hook_loc_lower or hook_loc_lower in track_name.lower():
446
+ hook_track_idx = ti
447
+ break
448
+
449
+ if hook_track_idx is None:
450
+ # Fallback: can't find the track, use density proxy
451
+ sections = _get_section_data(ableton)
452
+ present_count = sum(1 for s in sections if s.get("density", 0) > 0.3)
453
+ total = max(len(sections), 1)
454
+ return {
455
+ "neglected": present_count / total < 0.5 and hook.salience > 0.3,
456
+ "hook": hook.to_dict(),
457
+ "presence_ratio": round(present_count / total, 2),
458
+ "note": f"Could not find track '{hook_location}' — used density fallback",
459
+ }
460
+
461
+ # Check each scene for hook track clip presence
462
+ present_count = 0
463
+ absent_sections = []
464
+ for i, scene in enumerate(scenes_list):
465
+ scene_name = scene.get("name", f"Scene {i}")
466
+ # Skip intro — hook absence there is normal
467
+ if i == 0 and "intro" in scene_name.lower():
468
+ continue
469
+
470
+ row = matrix_rows[i] if i < len(matrix_rows) else []
471
+ if isinstance(row, list) and hook_track_idx < len(row) and row[hook_track_idx]:
472
+ present_count += 1
473
+ else:
474
+ absent_sections.append(scene_name)
475
+
476
+ total_eligible = max(len(scenes_list) - 1, 1) # exclude first intro
477
+ presence_ratio = present_count / total_eligible
478
+
479
+ neglected = presence_ratio < 0.5 and hook.salience > 0.3
480
+
481
+ return {
482
+ "neglected": neglected,
483
+ "hook": hook.to_dict(),
484
+ "hook_track": hook_location,
485
+ "hook_track_index": hook_track_idx,
486
+ "presence_ratio": round(presence_ratio, 2),
487
+ "present_in_sections": present_count,
488
+ "absent_from": absent_sections,
489
+ "suggestion": (
490
+ f"The hook ({hook.description}) on track '{hook_location}' only has clips in "
491
+ f"{presence_ratio:.0%} of sections. Consider adding variations in: {', '.join(absent_sections)}"
492
+ ) if neglected else "Hook track has clips in most sections — well-distributed",
493
+ }
494
+
495
+
496
+ @mcp.tool()
497
+ def compare_phrase_impact(
498
+ ctx: Context,
499
+ section_indices: list[int] | None = None,
500
+ target: str = "hook",
501
+ ) -> dict:
502
+ """Compare phrase-level emotional impact across multiple sections.
503
+
504
+ Runs score_phrase_impact for each section and returns a ranked
505
+ comparison with delta analysis between the strongest and weakest.
506
+
507
+ section_indices: list of 0-based section indices to compare
508
+ target: what the sections should function as — "hook", "drop",
509
+ "chorus", "transition", or "loop"
510
+ """
511
+ if not section_indices or len(section_indices) < 2:
512
+ return {"error": "Provide at least 2 section_indices to compare"}
513
+
514
+ ableton = _get_ableton(ctx)
515
+ sections = _get_section_data(ableton)
516
+ song_brain = _get_song_brain_dict()
517
+
518
+ results = []
519
+ for idx in section_indices:
520
+ if idx >= len(sections):
521
+ results.append({
522
+ "section_index": idx,
523
+ "error": f"Index {idx} out of range (have {len(sections)} sections)",
524
+ })
525
+ continue
526
+
527
+ section = sections[idx]
528
+ prev_section = sections[idx - 1] if idx > 0 else {}
529
+ impact = analyzer.score_phrase_impact(section, target, song_brain, prev_section)
530
+ results.append({
531
+ "section_index": idx,
532
+ "section_name": section.get("name", f"Section {idx}"),
533
+ **impact.to_dict(),
534
+ })
535
+
536
+ # Rank by composite impact
537
+ valid = [r for r in results if "composite_impact" in r]
538
+ valid.sort(key=lambda r: r.get("composite_impact", 0), reverse=True)
539
+
540
+ # Delta analysis between best and worst
541
+ delta = {}
542
+ if len(valid) >= 2:
543
+ best, worst = valid[0], valid[-1]
544
+ delta = {
545
+ "strongest": best["section_name"],
546
+ "weakest": worst["section_name"],
547
+ "composite_delta": round(
548
+ best.get("composite_impact", 0) - worst.get("composite_impact", 0), 3
549
+ ),
550
+ "biggest_gap_dimension": _find_biggest_gap(best, worst),
551
+ }
552
+
553
+ return {
554
+ "target": target,
555
+ "rankings": valid,
556
+ "delta_analysis": delta,
557
+ "section_count": len(section_indices),
558
+ }
559
+
560
+
561
+ def _find_biggest_gap(best: dict, worst: dict) -> str:
562
+ """Find which impact dimension has the biggest gap between best and worst."""
563
+ dimensions = [
564
+ "arrival_strength", "anticipation_strength", "contrast_quality",
565
+ "groove_continuity", "payoff_balance", "section_clarity",
566
+ ]
567
+ max_gap = 0.0
568
+ max_dim = ""
569
+ for dim in dimensions:
570
+ gap = abs(best.get(dim, 0) - worst.get(dim, 0))
571
+ if gap > max_gap:
572
+ max_gap = gap
573
+ max_dim = dim
574
+ return max_dim
575
+
576
+
577
+ def _interpret_salience(hook) -> str:
578
+ """Human-readable interpretation of salience score."""
579
+ if hook.salience > 0.7:
580
+ return "Strong hook — this is clearly the track's defining element"
581
+ elif hook.salience > 0.4:
582
+ return "Moderate hook — recognizable but could be developed further"
583
+ elif hook.salience > 0.2:
584
+ return "Emerging hook — has potential but needs more prominence"
585
+ else:
586
+ return "Weak hook candidate — consider strengthening or replacing"