livepilot 1.9.13 → 1.9.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/.claude-plugin/marketplace.json +3 -3
  2. package/AGENTS.md +3 -3
  3. package/CHANGELOG.md +51 -0
  4. package/CONTRIBUTING.md +1 -1
  5. package/README.md +7 -7
  6. package/bin/livepilot.js +32 -8
  7. package/installer/install.js +21 -2
  8. package/livepilot/.Codex-plugin/plugin.json +2 -2
  9. package/livepilot/.claude-plugin/plugin.json +2 -2
  10. package/livepilot/agents/livepilot-producer/AGENT.md +243 -49
  11. package/livepilot/skills/livepilot-core/SKILL.md +81 -6
  12. package/livepilot/skills/livepilot-core/references/m4l-devices.md +2 -2
  13. package/livepilot/skills/livepilot-core/references/overview.md +3 -3
  14. package/livepilot/skills/livepilot-core/references/sound-design.md +3 -2
  15. package/livepilot/skills/livepilot-release/SKILL.md +13 -13
  16. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  17. package/m4l_device/livepilot_bridge.js +6 -3
  18. package/mcp_server/__init__.py +1 -1
  19. package/mcp_server/curves.py +11 -3
  20. package/mcp_server/evaluation/__init__.py +1 -0
  21. package/mcp_server/evaluation/fabric.py +575 -0
  22. package/mcp_server/evaluation/feature_extractors.py +84 -0
  23. package/mcp_server/evaluation/policy.py +67 -0
  24. package/mcp_server/evaluation/tools.py +53 -0
  25. package/mcp_server/memory/__init__.py +11 -2
  26. package/mcp_server/memory/anti_memory.py +78 -0
  27. package/mcp_server/memory/promotion.py +94 -0
  28. package/mcp_server/memory/session_memory.py +108 -0
  29. package/mcp_server/memory/taste_memory.py +158 -0
  30. package/mcp_server/memory/technique_store.py +2 -1
  31. package/mcp_server/memory/tools.py +112 -0
  32. package/mcp_server/mix_engine/__init__.py +1 -0
  33. package/mcp_server/mix_engine/critics.py +299 -0
  34. package/mcp_server/mix_engine/models.py +152 -0
  35. package/mcp_server/mix_engine/planner.py +103 -0
  36. package/mcp_server/mix_engine/state_builder.py +316 -0
  37. package/mcp_server/mix_engine/tools.py +214 -0
  38. package/mcp_server/performance_engine/__init__.py +1 -0
  39. package/mcp_server/performance_engine/models.py +148 -0
  40. package/mcp_server/performance_engine/planner.py +267 -0
  41. package/mcp_server/performance_engine/safety.py +162 -0
  42. package/mcp_server/performance_engine/tools.py +183 -0
  43. package/mcp_server/project_brain/__init__.py +6 -0
  44. package/mcp_server/project_brain/arrangement_graph.py +64 -0
  45. package/mcp_server/project_brain/automation_graph.py +72 -0
  46. package/mcp_server/project_brain/builder.py +123 -0
  47. package/mcp_server/project_brain/capability_graph.py +64 -0
  48. package/mcp_server/project_brain/models.py +282 -0
  49. package/mcp_server/project_brain/refresh.py +80 -0
  50. package/mcp_server/project_brain/role_graph.py +103 -0
  51. package/mcp_server/project_brain/session_graph.py +51 -0
  52. package/mcp_server/project_brain/tools.py +144 -0
  53. package/mcp_server/reference_engine/__init__.py +1 -0
  54. package/mcp_server/reference_engine/gap_analyzer.py +239 -0
  55. package/mcp_server/reference_engine/models.py +105 -0
  56. package/mcp_server/reference_engine/profile_builder.py +149 -0
  57. package/mcp_server/reference_engine/tactic_router.py +117 -0
  58. package/mcp_server/reference_engine/tools.py +235 -0
  59. package/mcp_server/runtime/__init__.py +1 -0
  60. package/mcp_server/runtime/action_ledger.py +117 -0
  61. package/mcp_server/runtime/action_ledger_models.py +84 -0
  62. package/mcp_server/runtime/action_tools.py +57 -0
  63. package/mcp_server/runtime/capability_state.py +218 -0
  64. package/mcp_server/runtime/safety_kernel.py +339 -0
  65. package/mcp_server/runtime/safety_tools.py +42 -0
  66. package/mcp_server/runtime/tools.py +64 -0
  67. package/mcp_server/server.py +23 -1
  68. package/mcp_server/sound_design/__init__.py +1 -0
  69. package/mcp_server/sound_design/critics.py +297 -0
  70. package/mcp_server/sound_design/models.py +147 -0
  71. package/mcp_server/sound_design/planner.py +104 -0
  72. package/mcp_server/sound_design/tools.py +297 -0
  73. package/mcp_server/tools/_agent_os_engine.py +947 -0
  74. package/mcp_server/tools/_composition_engine.py +1530 -0
  75. package/mcp_server/tools/_conductor.py +199 -0
  76. package/mcp_server/tools/_conductor_budgets.py +222 -0
  77. package/mcp_server/tools/_evaluation_contracts.py +91 -0
  78. package/mcp_server/tools/_form_engine.py +416 -0
  79. package/mcp_server/tools/_motif_engine.py +351 -0
  80. package/mcp_server/tools/_planner_engine.py +516 -0
  81. package/mcp_server/tools/_research_engine.py +542 -0
  82. package/mcp_server/tools/_research_provider.py +185 -0
  83. package/mcp_server/tools/_snapshot_normalizer.py +49 -0
  84. package/mcp_server/tools/agent_os.py +440 -0
  85. package/mcp_server/tools/analyzer.py +18 -0
  86. package/mcp_server/tools/automation.py +25 -10
  87. package/mcp_server/tools/composition.py +563 -0
  88. package/mcp_server/tools/motif.py +104 -0
  89. package/mcp_server/tools/planner.py +144 -0
  90. package/mcp_server/tools/research.py +223 -0
  91. package/mcp_server/tools/tracks.py +18 -3
  92. package/mcp_server/tools/transport.py +10 -2
  93. package/mcp_server/transition_engine/__init__.py +6 -0
  94. package/mcp_server/transition_engine/archetypes.py +167 -0
  95. package/mcp_server/transition_engine/critics.py +340 -0
  96. package/mcp_server/transition_engine/models.py +90 -0
  97. package/mcp_server/transition_engine/tools.py +291 -0
  98. package/mcp_server/translation_engine/__init__.py +5 -0
  99. package/mcp_server/translation_engine/critics.py +297 -0
  100. package/mcp_server/translation_engine/models.py +27 -0
  101. package/mcp_server/translation_engine/tools.py +74 -0
  102. package/package.json +2 -2
  103. package/remote_script/LivePilot/__init__.py +1 -1
  104. package/remote_script/LivePilot/arrangement.py +12 -2
  105. package/requirements.txt +1 -1
@@ -0,0 +1,1530 @@
1
+ """Composition Engine V1 — structural and musical intelligence for arrangement.
2
+
3
+ Pure-computation core: section inference, phrase boundary detection,
4
+ section-aware role assignment, composition critics, gesture planning,
5
+ and composition-specific evaluation.
6
+
7
+ Zero external dependencies beyond stdlib. The MCP tool wrappers in
8
+ composition.py handle data fetching; this module handles computation.
9
+
10
+ Design: spec at docs/COMPOSITION_ENGINE_V1.md, sections 7-15.
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import math
16
+ import re
17
+ from dataclasses import asdict, dataclass, field
18
+ from enum import Enum
19
+ from typing import Any, Optional
20
+
21
+
22
+ # ── Enums ─────────────────────────────────────────────────────────────
23
+
24
+ class SectionType(str, Enum):
25
+ LOOP = "loop"
26
+ INTRO = "intro"
27
+ VERSE = "verse"
28
+ PRE_CHORUS = "pre_chorus"
29
+ CHORUS = "chorus"
30
+ BUILD = "build"
31
+ DROP = "drop"
32
+ BRIDGE = "bridge"
33
+ BREAKDOWN = "breakdown"
34
+ OUTRO = "outro"
35
+ UNKNOWN = "unknown"
36
+
37
+
38
+ class RoleType(str, Enum):
39
+ KICK_ANCHOR = "kick_anchor"
40
+ BASS_ANCHOR = "bass_anchor"
41
+ HOOK = "hook"
42
+ LEAD = "lead"
43
+ HARMONY_BED = "harmony_bed"
44
+ RHYTHMIC_TEXTURE = "rhythmic_texture"
45
+ TEXTURE_WASH = "texture_wash"
46
+ TRANSITION_FX = "transition_fx"
47
+ UTILITY = "utility"
48
+ UNKNOWN = "unknown"
49
+
50
+
51
+ class GestureIntent(str, Enum):
52
+ REVEAL = "reveal"
53
+ CONCEAL = "conceal"
54
+ HANDOFF = "handoff"
55
+ INHALE = "inhale"
56
+ RELEASE = "release"
57
+ LIFT = "lift"
58
+ SINK = "sink"
59
+ PUNCTUATE = "punctuate"
60
+ DRIFT = "drift"
61
+
62
+
63
+ # ── Section Graph ─────────────────────────────────────────────────────
64
+
65
+ # Patterns for inferring section type from scene/clip names
66
+ _SECTION_NAME_PATTERNS: list[tuple[str, SectionType]] = [
67
+ (r"intro", SectionType.INTRO),
68
+ (r"verse|vrs", SectionType.VERSE),
69
+ (r"pre[\s\-]?chorus", SectionType.PRE_CHORUS),
70
+ (r"chorus|hook|chrs", SectionType.CHORUS),
71
+ (r"build|riser|tension", SectionType.BUILD),
72
+ (r"drop|main|peak", SectionType.DROP),
73
+ (r"bridge|brg", SectionType.BRIDGE),
74
+ (r"break(?:down)?|strip", SectionType.BREAKDOWN),
75
+ (r"outro|end|fade", SectionType.OUTRO),
76
+ (r"loop", SectionType.LOOP),
77
+ ]
78
+
79
+
80
+ @dataclass
81
+ class SectionNode:
82
+ """A section of the arrangement with inferred type and energy."""
83
+ section_id: str
84
+ start_bar: int
85
+ end_bar: int
86
+ section_type: SectionType
87
+ confidence: float # 0.0-1.0
88
+ energy: float # 0.0-1.0 (relative within the track)
89
+ density: float # 0.0-1.0 (how many tracks are active)
90
+ tracks_active: list[int] = field(default_factory=list)
91
+ name: str = ""
92
+
93
+ def length_bars(self) -> int:
94
+ return self.end_bar - self.start_bar
95
+
96
+ def to_dict(self) -> dict:
97
+ d = asdict(self)
98
+ d["section_type"] = self.section_type.value
99
+ d["length_bars"] = self.length_bars()
100
+ return d
101
+
102
+
103
+ def _infer_section_type_from_name(name: str) -> tuple[SectionType, float]:
104
+ """Infer section type from a scene or clip name. Returns (type, confidence)."""
105
+ lower = name.lower().strip()
106
+ for pattern, stype in _SECTION_NAME_PATTERNS:
107
+ if re.search(pattern, lower):
108
+ return stype, 0.85
109
+ return SectionType.UNKNOWN, 0.0
110
+
111
+
112
+ def _infer_section_type_from_energy(
113
+ energy: float, density: float, position_ratio: float, total_sections: int,
114
+ ) -> tuple[SectionType, float]:
115
+ """Infer section type from energy/density/position heuristics."""
116
+ # Position-based heuristics
117
+ if position_ratio < 0.1 and density < 0.4:
118
+ return SectionType.INTRO, 0.6
119
+ if position_ratio > 0.9 and density < 0.4:
120
+ return SectionType.OUTRO, 0.6
121
+
122
+ # Energy-based heuristics
123
+ if energy > 0.8 and density > 0.7:
124
+ return SectionType.DROP, 0.5
125
+ if energy < 0.3 and density < 0.3:
126
+ return SectionType.BREAKDOWN, 0.5
127
+ if 0.4 <= energy <= 0.7:
128
+ return SectionType.VERSE, 0.4
129
+
130
+ return SectionType.UNKNOWN, 0.0
131
+
132
+
133
+ def build_section_graph_from_scenes(
134
+ scenes: list[dict],
135
+ clip_matrix: list[list[dict]],
136
+ track_count: int,
137
+ beats_per_bar: int = 4,
138
+ ) -> list[SectionNode]:
139
+ """Build section graph from session view scenes.
140
+
141
+ scenes: list of {index, name, tempo, color_index}
142
+ clip_matrix: [scene_index][track_index] = {state, name, ...} or None
143
+ """
144
+ sections = []
145
+ # Estimate bar positions: each scene is a section, assume 8-bar default
146
+ # unless clips provide length info
147
+ current_bar = 0
148
+
149
+ for i, scene in enumerate(scenes):
150
+ scene_name = scene.get("name", "")
151
+ if not scene_name.strip():
152
+ continue # Skip unnamed empty scenes
153
+
154
+ # Count active tracks in this scene
155
+ active_tracks = []
156
+ if i < len(clip_matrix):
157
+ for t_idx in range(min(track_count, len(clip_matrix[i]))):
158
+ slot = clip_matrix[i][t_idx]
159
+ if slot and slot.get("state") in ("playing", "stopped", "triggered"):
160
+ if slot.get("has_clip", True):
161
+ active_tracks.append(t_idx)
162
+
163
+ density = len(active_tracks) / max(track_count, 1)
164
+
165
+ # Estimate section length (default 32 beats = 8 bars)
166
+ section_length_bars = 8
167
+ start_bar = current_bar
168
+ end_bar = start_bar + section_length_bars
169
+
170
+ # Infer type from name first, then energy/position
171
+ stype, confidence = _infer_section_type_from_name(scene_name)
172
+ if stype == SectionType.UNKNOWN:
173
+ total = len([s for s in scenes if s.get("name", "").strip()])
174
+ position_ratio = i / max(total - 1, 1) if total > 1 else 0.5
175
+ stype, confidence = _infer_section_type_from_energy(
176
+ energy=density, density=density,
177
+ position_ratio=position_ratio, total_sections=total,
178
+ )
179
+
180
+ sections.append(SectionNode(
181
+ section_id=f"sec_{i:02d}",
182
+ start_bar=start_bar,
183
+ end_bar=end_bar,
184
+ section_type=stype,
185
+ confidence=confidence,
186
+ energy=density, # density as energy proxy
187
+ density=density,
188
+ tracks_active=active_tracks,
189
+ name=scene_name,
190
+ ))
191
+ current_bar = end_bar
192
+
193
+ return sections
194
+
195
+
196
+ def build_section_graph_from_arrangement(
197
+ arrangement_clips: dict[int, list[dict]],
198
+ track_count: int,
199
+ beats_per_bar: int = 4,
200
+ ) -> list[SectionNode]:
201
+ """Build section graph from arrangement view clips.
202
+
203
+ arrangement_clips: {track_index: [{start_time, end_time, length, name}, ...]}
204
+ """
205
+ if not arrangement_clips:
206
+ return []
207
+
208
+ # Collect all time boundaries
209
+ boundaries: set[float] = set()
210
+ for clips in arrangement_clips.values():
211
+ for clip in clips:
212
+ boundaries.add(clip.get("start_time", 0))
213
+ boundaries.add(clip.get("end_time", clip.get("start_time", 0) + clip.get("length", 0)))
214
+
215
+ sorted_bounds = sorted(boundaries)
216
+ if len(sorted_bounds) < 2:
217
+ return []
218
+
219
+ sections = []
220
+ for i in range(len(sorted_bounds) - 1):
221
+ start_beat = sorted_bounds[i]
222
+ end_beat = sorted_bounds[i + 1]
223
+ if end_beat - start_beat < beats_per_bar:
224
+ continue # Skip very short segments
225
+
226
+ start_bar = int(start_beat / beats_per_bar)
227
+ end_bar = int(end_beat / beats_per_bar)
228
+ if end_bar <= start_bar:
229
+ continue
230
+
231
+ # Count active tracks in this time range
232
+ active_tracks = []
233
+ for t_idx, clips in arrangement_clips.items():
234
+ for clip in clips:
235
+ clip_start = clip.get("start_time", 0)
236
+ clip_end = clip.get("end_time", clip_start + clip.get("length", 0))
237
+ if clip_start < end_beat and clip_end > start_beat:
238
+ active_tracks.append(t_idx)
239
+ break
240
+
241
+ density = len(active_tracks) / max(track_count, 1)
242
+ total_sections = len(sorted_bounds) - 1
243
+ position_ratio = i / max(total_sections - 1, 1) if total_sections > 1 else 0.5
244
+
245
+ stype, confidence = _infer_section_type_from_energy(
246
+ energy=density, density=density,
247
+ position_ratio=position_ratio, total_sections=total_sections,
248
+ )
249
+
250
+ sections.append(SectionNode(
251
+ section_id=f"arr_{i:02d}",
252
+ start_bar=start_bar,
253
+ end_bar=end_bar,
254
+ section_type=stype,
255
+ confidence=confidence,
256
+ energy=density,
257
+ density=density,
258
+ tracks_active=active_tracks,
259
+ ))
260
+
261
+ return sections
262
+
263
+
264
+ # ── Phrase Grid ───────────────────────────────────────────────────────
265
+
266
+ @dataclass
267
+ class PhraseUnit:
268
+ """A musical phrase within a section."""
269
+ phrase_id: str
270
+ section_id: str
271
+ start_bar: int
272
+ end_bar: int
273
+ cadence_strength: float # 0.0-1.0 (how strongly it resolves)
274
+ note_density: float # notes per bar
275
+ has_variation: bool # differs from adjacent phrases
276
+
277
+ def length_bars(self) -> int:
278
+ return self.end_bar - self.start_bar
279
+
280
+ def to_dict(self) -> dict:
281
+ d = asdict(self)
282
+ d["length_bars"] = self.length_bars()
283
+ return d
284
+
285
+
286
+ def detect_phrases(
287
+ section: SectionNode,
288
+ notes_by_track: dict[int, list[dict]],
289
+ default_phrase_length: int = 4,
290
+ beats_per_bar: int = 4,
291
+ ) -> list[PhraseUnit]:
292
+ """Detect phrase boundaries within a section from note data.
293
+
294
+ Uses note density changes and gap detection to find phrase boundaries.
295
+ Falls back to regular grid (4 or 8 bar phrases).
296
+ """
297
+ section_length = section.length_bars()
298
+ if section_length <= 0:
299
+ return []
300
+
301
+ # Aggregate all notes into a bar-level density map
302
+ bar_densities: dict[int, int] = {}
303
+ for bar in range(section.start_bar, section.end_bar):
304
+ bar_densities[bar] = 0
305
+
306
+ for track_notes in notes_by_track.values():
307
+ for note in track_notes:
308
+ start_beat = note.get("start_time", 0)
309
+ note_bar = section.start_bar + int(start_beat / beats_per_bar)
310
+ if section.start_bar <= note_bar < section.end_bar:
311
+ bar_densities[note_bar] = bar_densities.get(note_bar, 0) + 1
312
+
313
+ # Find phrase boundaries using density drops (gaps)
314
+ boundaries = [section.start_bar]
315
+ bars = sorted(bar_densities.keys())
316
+
317
+ for i in range(1, len(bars)):
318
+ prev_density = bar_densities.get(bars[i - 1], 0)
319
+ curr_density = bar_densities.get(bars[i], 0)
320
+
321
+ # A phrase boundary is where density drops significantly or a gap exists
322
+ if prev_density > 0 and curr_density == 0:
323
+ boundaries.append(bars[i])
324
+ elif (bars[i] - section.start_bar) % default_phrase_length == 0:
325
+ # Regular grid fallback
326
+ if bars[i] not in boundaries:
327
+ boundaries.append(bars[i])
328
+
329
+ boundaries.append(section.end_bar)
330
+ boundaries = sorted(set(boundaries))
331
+
332
+ # Build phrases from boundaries
333
+ phrases = []
334
+ for i in range(len(boundaries) - 1):
335
+ start = boundaries[i]
336
+ end = boundaries[i + 1]
337
+ if end <= start:
338
+ continue
339
+
340
+ # Calculate note density for this phrase
341
+ total_notes = sum(bar_densities.get(b, 0) for b in range(start, end))
342
+ phrase_bars = end - start
343
+ density = total_notes / max(phrase_bars, 1)
344
+
345
+ # Cadence strength: higher if the last bar has lower density (resolution)
346
+ last_bar_density = bar_densities.get(end - 1, 0)
347
+ avg_density = density
348
+ cadence = max(0.0, min(1.0, 1.0 - (last_bar_density / max(avg_density, 0.1)))) if avg_density > 0 else 0.3
349
+
350
+ phrases.append(PhraseUnit(
351
+ phrase_id=f"{section.section_id}_phr_{i:02d}",
352
+ section_id=section.section_id,
353
+ start_bar=start,
354
+ end_bar=end,
355
+ cadence_strength=round(cadence, 3),
356
+ note_density=round(density, 2),
357
+ has_variation=False, # Computed later by phrase critic
358
+ ))
359
+
360
+ # Mark variation: compare adjacent phrase densities
361
+ for i in range(1, len(phrases)):
362
+ density_diff = abs(phrases[i].note_density - phrases[i - 1].note_density)
363
+ if density_diff > 1.0:
364
+ phrases[i].has_variation = True
365
+
366
+ return phrases
367
+
368
+
369
+ # ── Role Inference ────────────────────────────────────────────────────
370
+
371
+ @dataclass
372
+ class RoleNode:
373
+ """A track's musical role within a specific section."""
374
+ track_index: int
375
+ track_name: str
376
+ section_id: str
377
+ role: RoleType
378
+ confidence: float # 0.0-1.0
379
+ foreground: bool # is this a focal element?
380
+
381
+ def to_dict(self) -> dict:
382
+ d = asdict(self)
383
+ d["role"] = self.role.value
384
+ return d
385
+
386
+
387
+ # Name-based role hints (extends _agent_os_engine.infer_track_role)
388
+ _ROLE_NAME_HINTS: list[tuple[str, RoleType]] = [
389
+ (r"kick|bd|bass\s*drum", RoleType.KICK_ANCHOR),
390
+ (r"sub\s*bass|sub|bass", RoleType.BASS_ANCHOR),
391
+ (r"lead|melody|mel|hook|synth\s*lead", RoleType.LEAD),
392
+ (r"pad|atmosphere|atmo|ambient|drone|chord|keys", RoleType.HARMONY_BED),
393
+ (r"h(?:i)?[\s\-]?hat|hh|hat|perc|percussion|clap|snare|rim", RoleType.RHYTHMIC_TEXTURE),
394
+ (r"fx|sfx|riser|sweep|noise|texture|tape", RoleType.TEXTURE_WASH),
395
+ (r"resamp|bounce|bus|group|master|return", RoleType.UTILITY),
396
+ ]
397
+
398
+
399
+ def infer_role_for_track(
400
+ track_name: str,
401
+ notes: list[dict],
402
+ device_class: str = "",
403
+ beats_per_bar: int = 4,
404
+ ) -> tuple[RoleType, float, bool]:
405
+ """Infer a track's role from name, notes, and device class.
406
+
407
+ Returns (role, confidence, is_foreground).
408
+ """
409
+ # 1. Name-based inference (highest confidence)
410
+ lower_name = track_name.lower().strip()
411
+ for pattern, role in _ROLE_NAME_HINTS:
412
+ if re.search(pattern, lower_name):
413
+ foreground = role in (RoleType.LEAD, RoleType.HOOK, RoleType.KICK_ANCHOR)
414
+ return role, 0.80, foreground
415
+
416
+ # 2. Device-class inference
417
+ dc = device_class.lower()
418
+ if "drumgroup" in dc or "drum" in dc:
419
+ return RoleType.RHYTHMIC_TEXTURE, 0.70, False
420
+ if "simpler" in dc and not notes:
421
+ return RoleType.TEXTURE_WASH, 0.50, False
422
+
423
+ # 3. Note-based inference
424
+ if not notes:
425
+ return RoleType.UNKNOWN, 0.0, False
426
+
427
+ # Analyze pitch register and density
428
+ pitches = [n.get("pitch", 60) for n in notes]
429
+ durations = [n.get("duration", 0.5) for n in notes]
430
+ avg_pitch = sum(pitches) / len(pitches)
431
+ avg_duration = sum(durations) / len(durations)
432
+ note_count = len(notes)
433
+
434
+ # Sub-bass register (< MIDI 48 = C3)
435
+ if avg_pitch < 48:
436
+ return RoleType.BASS_ANCHOR, 0.65, False
437
+
438
+ # Very long sustained notes → harmony bed
439
+ if avg_duration > 4.0:
440
+ return RoleType.HARMONY_BED, 0.60, False
441
+
442
+ # Dense short notes → rhythmic or lead
443
+ if avg_duration < 0.5 and note_count > 8:
444
+ if avg_pitch > 60:
445
+ return RoleType.LEAD, 0.55, True
446
+ return RoleType.RHYTHMIC_TEXTURE, 0.55, False
447
+
448
+ # Medium density, mid register → could be hook or lead
449
+ if 55 <= avg_pitch <= 80 and 0.5 <= avg_duration <= 2.0:
450
+ return RoleType.HOOK, 0.45, True
451
+
452
+ return RoleType.UNKNOWN, 0.3, False
453
+
454
+
455
+ def build_role_graph(
456
+ sections: list[SectionNode],
457
+ track_data: list[dict],
458
+ notes_by_section_track: dict[str, dict[int, list[dict]]],
459
+ ) -> list[RoleNode]:
460
+ """Build role graph: what each track does in each section.
461
+
462
+ track_data: [{index, name, devices: [{class_name, ...}]}]
463
+ notes_by_section_track: {section_id: {track_index: [notes]}}
464
+ """
465
+ roles = []
466
+ for section in sections:
467
+ for track in track_data:
468
+ t_idx = track.get("index", 0)
469
+ if t_idx not in section.tracks_active:
470
+ continue
471
+
472
+ t_name = track.get("name", "")
473
+ devices = track.get("devices", [])
474
+ device_class = devices[0].get("class_name", "") if devices else ""
475
+
476
+ section_notes = notes_by_section_track.get(section.section_id, {}).get(t_idx, [])
477
+
478
+ role, confidence, foreground = infer_role_for_track(
479
+ t_name, section_notes, device_class,
480
+ )
481
+
482
+ roles.append(RoleNode(
483
+ track_index=t_idx,
484
+ track_name=t_name,
485
+ section_id=section.section_id,
486
+ role=role,
487
+ confidence=confidence,
488
+ foreground=foreground,
489
+ ))
490
+
491
+ return roles
492
+
493
+
494
+ # ── Composition Critics ───────────────────────────────────────────────
495
+
496
+ @dataclass
497
+ class CompositionIssue:
498
+ """A structural or musical problem detected by a critic."""
499
+ issue_type: str
500
+ critic: str # "form", "section_identity", "phrase"
501
+ severity: float # 0.0-1.0
502
+ confidence: float # 0.0-1.0
503
+ scope: dict = field(default_factory=dict) # e.g., {"section_id": "sec_01"}
504
+ recommended_moves: list[str] = field(default_factory=list)
505
+ evidence: str = ""
506
+
507
+ def to_dict(self) -> dict:
508
+ return asdict(self)
509
+
510
+
511
+ def run_form_critic(sections: list[SectionNode]) -> list[CompositionIssue]:
512
+ """Critique the overall form/structure of the arrangement."""
513
+ issues = []
514
+ if not sections:
515
+ issues.append(CompositionIssue(
516
+ issue_type="no_sections",
517
+ critic="form",
518
+ severity=0.8,
519
+ confidence=1.0,
520
+ evidence="No sections detected in the arrangement",
521
+ recommended_moves=["create_sections", "add_scene_structure"],
522
+ ))
523
+ return issues
524
+
525
+ # 1. Too few sections for a full track
526
+ if len(sections) < 3:
527
+ issues.append(CompositionIssue(
528
+ issue_type="too_few_sections",
529
+ critic="form",
530
+ severity=0.6,
531
+ confidence=0.8,
532
+ evidence=f"Only {len(sections)} section(s) detected",
533
+ recommended_moves=["section_expansion", "add_contrast_section"],
534
+ ))
535
+
536
+ # 2. No energy arc (all sections similar energy)
537
+ if len(sections) >= 2:
538
+ energies = [s.energy for s in sections]
539
+ energy_range = max(energies) - min(energies)
540
+ if energy_range < 0.15:
541
+ issues.append(CompositionIssue(
542
+ issue_type="flat_energy_arc",
543
+ critic="form",
544
+ severity=0.7,
545
+ confidence=0.75,
546
+ evidence=f"Energy range: {energy_range:.2f} (all sections similar density)",
547
+ recommended_moves=["vary_track_count", "add_build_section", "create_breakdown"],
548
+ ))
549
+
550
+ # 3. No contrast between adjacent sections
551
+ for i in range(1, len(sections)):
552
+ prev = sections[i - 1]
553
+ curr = sections[i]
554
+ energy_diff = abs(curr.energy - prev.energy)
555
+ density_diff = abs(curr.density - prev.density)
556
+ if energy_diff < 0.1 and density_diff < 0.1:
557
+ issues.append(CompositionIssue(
558
+ issue_type="no_adjacent_contrast",
559
+ critic="form",
560
+ severity=0.5,
561
+ confidence=0.7,
562
+ scope={"sections": [prev.section_id, curr.section_id]},
563
+ evidence=f"Sections '{prev.name or prev.section_id}' and '{curr.name or curr.section_id}' have similar energy/density",
564
+ recommended_moves=["thin_one_section", "add_element_to_one", "vary_automation"],
565
+ ))
566
+
567
+ # 4. First section too dense (reveals too much too early)
568
+ if sections and sections[0].density > 0.7:
569
+ issues.append(CompositionIssue(
570
+ issue_type="intro_too_dense",
571
+ critic="form",
572
+ severity=0.5,
573
+ confidence=0.65,
574
+ scope={"section_id": sections[0].section_id},
575
+ evidence=f"First section density: {sections[0].density:.2f} (reveals too much)",
576
+ recommended_moves=["remove_elements_from_intro", "defer_reveal"],
577
+ ))
578
+
579
+ return issues
580
+
581
+
582
+ def run_section_identity_critic(
583
+ sections: list[SectionNode],
584
+ roles: list[RoleNode],
585
+ ) -> list[CompositionIssue]:
586
+ """Critique individual section identity and clarity."""
587
+ issues = []
588
+
589
+ for section in sections:
590
+ section_roles = [r for r in roles if r.section_id == section.section_id]
591
+ foreground_count = sum(1 for r in section_roles if r.foreground)
592
+
593
+ # 1. No clear foreground element
594
+ if foreground_count == 0 and len(section_roles) > 0:
595
+ issues.append(CompositionIssue(
596
+ issue_type="no_foreground",
597
+ critic="section_identity",
598
+ severity=0.6,
599
+ confidence=0.70,
600
+ scope={"section_id": section.section_id},
601
+ evidence=f"Section '{section.name or section.section_id}' has {len(section_roles)} tracks but none inferred as foreground",
602
+ recommended_moves=["assign_lead_role", "add_hook_element"],
603
+ ))
604
+
605
+ # 2. Too many foreground voices
606
+ if foreground_count > 3:
607
+ issues.append(CompositionIssue(
608
+ issue_type="too_many_foregrounds",
609
+ critic="section_identity",
610
+ severity=0.5,
611
+ confidence=0.65,
612
+ scope={"section_id": section.section_id},
613
+ evidence=f"Section has {foreground_count} foreground elements (max recommended: 3)",
614
+ recommended_moves=["background_some_elements", "thin_section", "use_automation_to_rotate"],
615
+ ))
616
+
617
+ # 3. Section type mismatch — e.g., "chorus" less energetic than "verse"
618
+ # (Compare against adjacent sections of different type)
619
+
620
+ # Cross-section type check
621
+ choruses = [s for s in sections if s.section_type == SectionType.CHORUS]
622
+ verses = [s for s in sections if s.section_type == SectionType.VERSE]
623
+ if choruses and verses:
624
+ chorus_energy = max(s.energy for s in choruses)
625
+ verse_energy = max(s.energy for s in verses)
626
+ if chorus_energy <= verse_energy:
627
+ issues.append(CompositionIssue(
628
+ issue_type="chorus_not_stronger_than_verse",
629
+ critic="section_identity",
630
+ severity=0.6,
631
+ confidence=0.60,
632
+ evidence=f"Chorus energy ({chorus_energy:.2f}) <= verse energy ({verse_energy:.2f})",
633
+ recommended_moves=["add_elements_to_chorus", "thin_verse", "add_chorus_hook"],
634
+ ))
635
+
636
+ return issues
637
+
638
+
639
+ def run_phrase_critic(phrases: list[PhraseUnit]) -> list[CompositionIssue]:
640
+ """Critique phrase structure within sections."""
641
+ issues = []
642
+
643
+ if len(phrases) < 2:
644
+ return issues
645
+
646
+ # 1. All phrases identical length (no variation)
647
+ lengths = [p.length_bars() for p in phrases]
648
+ unique_lengths = set(lengths)
649
+ if len(unique_lengths) == 1 and len(phrases) > 3:
650
+ issues.append(CompositionIssue(
651
+ issue_type="uniform_phrase_lengths",
652
+ critic="phrase",
653
+ severity=0.4,
654
+ confidence=0.60,
655
+ evidence=f"All {len(phrases)} phrases are {lengths[0]} bars — no structural variation",
656
+ recommended_moves=["extend_one_phrase", "add_pickup", "truncate_for_surprise"],
657
+ ))
658
+
659
+ # 2. No cadence detected (all cadence_strength near 0)
660
+ weak_cadences = [p for p in phrases if p.cadence_strength < 0.2]
661
+ if len(weak_cadences) > len(phrases) * 0.7:
662
+ issues.append(CompositionIssue(
663
+ issue_type="weak_cadences",
664
+ critic="phrase",
665
+ severity=0.5,
666
+ confidence=0.55,
667
+ evidence=f"{len(weak_cadences)}/{len(phrases)} phrases have weak cadence (< 0.2)",
668
+ recommended_moves=["add_resolution_notes", "create_turnaround", "vary_last_bar"],
669
+ ))
670
+
671
+ # 3. No variation between adjacent phrases
672
+ no_variation = sum(1 for p in phrases if not p.has_variation)
673
+ if no_variation >= len(phrases) - 1 and len(phrases) > 2:
674
+ issues.append(CompositionIssue(
675
+ issue_type="no_phrase_variation",
676
+ critic="phrase",
677
+ severity=0.5,
678
+ confidence=0.60,
679
+ evidence=f"{no_variation}/{len(phrases)} phrases identical to their neighbor",
680
+ recommended_moves=["add_fill", "vary_notes", "create_response_phrase"],
681
+ ))
682
+
683
+ return issues
684
+
685
+
686
+ # ── Gesture Planner ───────────────────────────────────────────────────
687
+
688
+ # Maps gesture intents to automation parameters and curve families
689
+ _GESTURE_MAPPINGS: dict[GestureIntent, dict] = {
690
+ GestureIntent.REVEAL: {
691
+ "description": "Open filter, introduce width, grow send level, unmask harmonics",
692
+ "parameter_hints": ["filter_cutoff", "send_level", "utility_width"],
693
+ "curve_family": "exponential",
694
+ "default_direction": "up",
695
+ "typical_duration_bars": 4,
696
+ },
697
+ GestureIntent.CONCEAL: {
698
+ "description": "Close filter, narrow image, reduce send, darken support",
699
+ "parameter_hints": ["filter_cutoff", "volume", "utility_width"],
700
+ "curve_family": "logarithmic",
701
+ "default_direction": "down",
702
+ "typical_duration_bars": 4,
703
+ },
704
+ GestureIntent.HANDOFF: {
705
+ "description": "One voice dims while another emerges",
706
+ "parameter_hints": ["volume", "send_level"],
707
+ "curve_family": "s_curve",
708
+ "default_direction": "crossfade",
709
+ "typical_duration_bars": 2,
710
+ },
711
+ GestureIntent.INHALE: {
712
+ "description": "Pull energy back before impact — pre-drop vacuum",
713
+ "parameter_hints": ["volume", "filter_cutoff", "send_level"],
714
+ "curve_family": "exponential",
715
+ "default_direction": "down",
716
+ "typical_duration_bars": 2,
717
+ },
718
+ GestureIntent.RELEASE: {
719
+ "description": "Restore weight, width, or harmonic color after tension",
720
+ "parameter_hints": ["filter_cutoff", "utility_width", "volume"],
721
+ "curve_family": "spring",
722
+ "default_direction": "up",
723
+ "typical_duration_bars": 1,
724
+ },
725
+ GestureIntent.LIFT: {
726
+ "description": "HP filter rise, reverb send increase — upward energy",
727
+ "parameter_hints": ["hp_filter", "send_level", "reverb_mix"],
728
+ "curve_family": "exponential",
729
+ "default_direction": "up",
730
+ "typical_duration_bars": 8,
731
+ },
732
+ GestureIntent.SINK: {
733
+ "description": "LP filter close, remove highs, settle into sub",
734
+ "parameter_hints": ["filter_cutoff", "eq_high"],
735
+ "curve_family": "logarithmic",
736
+ "default_direction": "down",
737
+ "typical_duration_bars": 4,
738
+ },
739
+ GestureIntent.PUNCTUATE: {
740
+ "description": "Dub throw spike, beat repeat burst — accent a moment",
741
+ "parameter_hints": ["send_level", "beat_repeat"],
742
+ "curve_family": "spike",
743
+ "default_direction": "burst",
744
+ "typical_duration_bars": 1,
745
+ },
746
+ GestureIntent.DRIFT: {
747
+ "description": "Subtle organic movement — perlin noise on parameters",
748
+ "parameter_hints": ["filter_cutoff", "pan", "send_level"],
749
+ "curve_family": "perlin",
750
+ "default_direction": "oscillate",
751
+ "typical_duration_bars": 8,
752
+ },
753
+ }
754
+
755
+
756
+ @dataclass
757
+ class GesturePlan:
758
+ """A concrete automation plan derived from a musical gesture intent."""
759
+ gesture_id: str
760
+ intent: GestureIntent
761
+ description: str
762
+ target_tracks: list[int]
763
+ parameter_hints: list[str]
764
+ curve_family: str
765
+ direction: str
766
+ start_bar: int
767
+ end_bar: int
768
+ foreground: bool # is this a musical focus or background motion?
769
+
770
+ def to_dict(self) -> dict:
771
+ d = asdict(self)
772
+ d["intent"] = self.intent.value
773
+ d["duration_bars"] = self.end_bar - self.start_bar
774
+ return d
775
+
776
+
777
+ def plan_gesture(
778
+ intent: GestureIntent,
779
+ target_tracks: list[int],
780
+ start_bar: int,
781
+ duration_bars: Optional[int] = None,
782
+ foreground: bool = False,
783
+ ) -> GesturePlan:
784
+ """Create a gesture plan from a musical intent.
785
+
786
+ Maps the abstract intent to concrete automation parameters and curve type.
787
+ The agent uses this plan with apply_automation_shape to execute.
788
+ """
789
+ mapping = _GESTURE_MAPPINGS.get(intent)
790
+ if mapping is None:
791
+ raise ValueError(f"Unknown gesture intent: {intent}")
792
+
793
+ actual_duration = duration_bars or mapping["typical_duration_bars"]
794
+
795
+ return GesturePlan(
796
+ gesture_id=f"gest_{intent.value}_{start_bar}",
797
+ intent=intent,
798
+ description=mapping["description"],
799
+ target_tracks=target_tracks,
800
+ parameter_hints=mapping["parameter_hints"],
801
+ curve_family=mapping["curve_family"],
802
+ direction=mapping["default_direction"],
803
+ start_bar=start_bar,
804
+ end_bar=start_bar + actual_duration,
805
+ foreground=foreground,
806
+ )
807
+
808
+
809
+ # ── Gesture Templates (Round 2) ───────────────────────────────────────
810
+
811
+ # Compound gesture sequences for common arrangement patterns.
812
+ # Each template is a list of gesture steps with relative offsets.
813
+ GESTURE_TEMPLATES: dict[str, dict] = {
814
+ "pre_arrival_vacuum": {
815
+ "description": "Pull energy back before impact — classic build technique",
816
+ "steps": [
817
+ {"intent": "inhale", "offset_bars": -4, "duration_bars": 3},
818
+ {"intent": "release", "offset_bars": 0, "duration_bars": 1},
819
+ ],
820
+ "best_for": ["pre_drop", "pre_chorus", "turnaround"],
821
+ },
822
+ "sectional_width_bloom": {
823
+ "description": "Narrow then widen — creates sense of opening up",
824
+ "steps": [
825
+ {"intent": "conceal", "offset_bars": -2, "duration_bars": 2},
826
+ {"intent": "reveal", "offset_bars": 0, "duration_bars": 4},
827
+ {"intent": "drift", "offset_bars": 4, "duration_bars": 8},
828
+ ],
829
+ "best_for": ["chorus_entry", "verse_to_chorus", "section_expansion"],
830
+ },
831
+ "phrase_end_throw": {
832
+ "description": "Accent the end of a phrase with a dub throw",
833
+ "steps": [
834
+ {"intent": "punctuate", "offset_bars": -1, "duration_bars": 1},
835
+ ],
836
+ "best_for": ["phrase_cadence", "hook_accent", "transition"],
837
+ },
838
+ "turnaround_accent": {
839
+ "description": "Mark turnaround with lift then settle",
840
+ "steps": [
841
+ {"intent": "lift", "offset_bars": -2, "duration_bars": 2},
842
+ {"intent": "sink", "offset_bars": 0, "duration_bars": 2},
843
+ ],
844
+ "best_for": ["loop_turnaround", "phrase_repeat", "section_end"],
845
+ },
846
+ "outro_decay_dissolve": {
847
+ "description": "Gradual dissolution for endings",
848
+ "steps": [
849
+ {"intent": "conceal", "offset_bars": 0, "duration_bars": 8},
850
+ {"intent": "sink", "offset_bars": 4, "duration_bars": 8},
851
+ ],
852
+ "best_for": ["outro", "fade_out", "ending"],
853
+ },
854
+ "bass_tuck_before_kick": {
855
+ "description": "Duck bass before kick re-entry",
856
+ "steps": [
857
+ {"intent": "inhale", "offset_bars": -1, "duration_bars": 1},
858
+ {"intent": "release", "offset_bars": 0, "duration_bars": 1},
859
+ ],
860
+ "best_for": ["kick_reentry", "drop", "bass_return"],
861
+ },
862
+ "harmonic_tint_rise": {
863
+ "description": "Gradually introduce harmonic color via filter opening",
864
+ "steps": [
865
+ {"intent": "reveal", "offset_bars": 0, "duration_bars": 8},
866
+ ],
867
+ "best_for": ["verse_development", "pad_introduction", "harmonic_shift"],
868
+ },
869
+ "response_echo": {
870
+ "description": "Echo gesture — punctuate then drift the tail",
871
+ "steps": [
872
+ {"intent": "punctuate", "offset_bars": 0, "duration_bars": 1},
873
+ {"intent": "drift", "offset_bars": 1, "duration_bars": 4},
874
+ ],
875
+ "best_for": ["call_and_response", "hook_echo", "delay_throw"],
876
+ },
877
+ "texture_drift_bed": {
878
+ "description": "Subtle ongoing motion for background textures",
879
+ "steps": [
880
+ {"intent": "drift", "offset_bars": 0, "duration_bars": 16},
881
+ ],
882
+ "best_for": ["pad_movement", "background_texture", "atmosphere"],
883
+ },
884
+ "tension_ratchet": {
885
+ "description": "Stepped tension increase — reveal in stages",
886
+ "steps": [
887
+ {"intent": "reveal", "offset_bars": 0, "duration_bars": 4},
888
+ {"intent": "reveal", "offset_bars": 4, "duration_bars": 4},
889
+ {"intent": "lift", "offset_bars": 8, "duration_bars": 4},
890
+ ],
891
+ "best_for": ["long_build", "riser", "gradual_intensification"],
892
+ },
893
+ "re_entry_spotlight": {
894
+ "description": "Spotlight a returning element",
895
+ "steps": [
896
+ {"intent": "conceal", "offset_bars": -2, "duration_bars": 2},
897
+ {"intent": "release", "offset_bars": 0, "duration_bars": 1},
898
+ ],
899
+ "best_for": ["hook_return", "melody_reentry", "element_spotlight"],
900
+ },
901
+ }
902
+
903
+
904
+ def resolve_gesture_template(
905
+ template_name: str,
906
+ target_tracks: list[int],
907
+ anchor_bar: int,
908
+ foreground: bool = False,
909
+ ) -> list[GesturePlan]:
910
+ """Resolve a gesture template into a sequence of concrete GesturePlans.
911
+
912
+ anchor_bar: the reference point (e.g., section boundary bar number).
913
+ Steps with negative offsets happen before the anchor.
914
+ """
915
+ template = GESTURE_TEMPLATES.get(template_name)
916
+ if template is None:
917
+ valid = list(GESTURE_TEMPLATES.keys())
918
+ raise ValueError(f"Unknown template '{template_name}'. Valid: {valid}")
919
+
920
+ plans = []
921
+ for i, step in enumerate(template["steps"]):
922
+ intent = GestureIntent(step["intent"])
923
+ start = anchor_bar + step.get("offset_bars", 0)
924
+ duration = step.get("duration_bars", None)
925
+ gesture = plan_gesture(intent, target_tracks, start, duration, foreground)
926
+ gesture.gesture_id = f"{template_name}_{i:02d}_{start}"
927
+ plans.append(gesture)
928
+
929
+ return plans
930
+
931
+
932
+ # ── Section Outcome Analysis (Round 2) ────────────────────────────────
933
+
934
+ def analyze_section_outcomes(
935
+ outcomes: list[dict],
936
+ ) -> dict:
937
+ """Analyze composition outcomes grouped by section type.
938
+
939
+ outcomes: list of composition_outcome payloads
940
+ Returns: {section_type: {move_name: {avg_score, count, keep_rate}}}
941
+ """
942
+ by_section: dict[str, list[dict]] = {}
943
+
944
+ for o in outcomes:
945
+ section_type = o.get("section_type", "unknown")
946
+ by_section.setdefault(section_type, []).append(o)
947
+
948
+ result = {}
949
+ for stype, section_outcomes in by_section.items():
950
+ move_stats: dict[str, dict] = {}
951
+ for o in section_outcomes:
952
+ move = o.get("move_name", "unknown")
953
+ stats = move_stats.setdefault(move, {"scores": [], "kept": 0, "total": 0})
954
+ stats["scores"].append(o.get("score", 0))
955
+ stats["total"] += 1
956
+ if o.get("kept", False):
957
+ stats["kept"] += 1
958
+
959
+ result[stype] = {
960
+ move: {
961
+ "avg_score": round(sum(s["scores"]) / len(s["scores"]), 3) if s["scores"] else 0,
962
+ "count": s["total"],
963
+ "keep_rate": round(s["kept"] / s["total"], 3) if s["total"] > 0 else 0,
964
+ }
965
+ for move, s in move_stats.items()
966
+ }
967
+
968
+ return {
969
+ "section_types": list(result.keys()),
970
+ "outcomes_by_section": result,
971
+ "total_outcomes": sum(len(v) for v in by_section.values()),
972
+ }
973
+
974
+
975
+ # ── Composition Evaluation ────────────────────────────────────────────
976
+
977
+ COMPOSITION_DIMENSIONS = frozenset({
978
+ "section_clarity", "phrase_completion", "narrative_pacing",
979
+ "transition_strength", "orchestration_clarity", "tension_release",
980
+ })
981
+
982
+
983
+ def evaluate_composition_move(
984
+ before_issues: list[CompositionIssue],
985
+ after_issues: list[CompositionIssue],
986
+ target_dimensions: dict[str, float],
987
+ protect: dict[str, float],
988
+ ) -> dict:
989
+ """Evaluate whether a composition move improved the arrangement.
990
+
991
+ Compares issue counts and severities before and after.
992
+ Returns: {score, keep_change, issue_delta, notes}
993
+ """
994
+ notes: list[str] = []
995
+
996
+ # Count issues by type before and after
997
+ before_count = len(before_issues)
998
+ after_count = len(after_issues)
999
+ issue_delta = before_count - after_count
1000
+
1001
+ # Severity-weighted improvement
1002
+ before_severity = sum(i.severity for i in before_issues)
1003
+ after_severity = sum(i.severity for i in after_issues)
1004
+ severity_improvement = before_severity - after_severity
1005
+
1006
+ # Score: positive improvement = good
1007
+ if before_count > 0:
1008
+ improvement_ratio = severity_improvement / max(before_severity, 0.01)
1009
+ else:
1010
+ improvement_ratio = 0.0 if after_count == 0 else -0.5
1011
+
1012
+ # Normalize to 0-1 score
1013
+ score = max(0.0, min(1.0, 0.5 + improvement_ratio * 0.5))
1014
+
1015
+ # Keep/undo decision
1016
+ keep_change = True
1017
+
1018
+ if severity_improvement < 0:
1019
+ keep_change = False
1020
+ notes.append(f"WORSE: total severity increased by {-severity_improvement:.2f}")
1021
+
1022
+ if after_count > before_count + 1:
1023
+ keep_change = False
1024
+ notes.append(f"NEW ISSUES: {after_count - before_count} new issues introduced")
1025
+
1026
+ if score < 0.40:
1027
+ keep_change = False
1028
+ notes.append(f"SCORE: {score:.3f} below 0.40 threshold")
1029
+
1030
+ if keep_change and severity_improvement > 0:
1031
+ notes.append(f"IMPROVED: resolved {issue_delta} issue(s), severity reduced by {severity_improvement:.2f}")
1032
+
1033
+ return {
1034
+ "score": round(score, 4),
1035
+ "keep_change": keep_change,
1036
+ "issue_delta": issue_delta,
1037
+ "before_issue_count": before_count,
1038
+ "after_issue_count": after_count,
1039
+ "severity_improvement": round(severity_improvement, 4),
1040
+ "notes": notes,
1041
+ "consecutive_undo_hint": not keep_change,
1042
+ }
1043
+
1044
+
1045
+ # ── Full Analysis Pipeline ────────────────────────────────────────────
1046
+
1047
+ @dataclass
1048
+ class CompositionAnalysis:
1049
+ """Complete composition analysis result."""
1050
+ sections: list[SectionNode]
1051
+ phrases: list[PhraseUnit]
1052
+ roles: list[RoleNode]
1053
+ issues: list[CompositionIssue]
1054
+
1055
+ def to_dict(self) -> dict:
1056
+ return {
1057
+ "sections": [s.to_dict() for s in self.sections],
1058
+ "section_count": len(self.sections),
1059
+ "phrases": [p.to_dict() for p in self.phrases],
1060
+ "phrase_count": len(self.phrases),
1061
+ "roles": [r.to_dict() for r in self.roles],
1062
+ "role_count": len(self.roles),
1063
+ "issues": [i.to_dict() for i in self.issues],
1064
+ "issue_count": len(self.issues),
1065
+ "issue_summary": {
1066
+ "form": len([i for i in self.issues if i.critic == "form"]),
1067
+ "section_identity": len([i for i in self.issues if i.critic == "section_identity"]),
1068
+ "phrase": len([i for i in self.issues if i.critic == "phrase"]),
1069
+ "transition": len([i for i in self.issues if i.critic == "transition"]),
1070
+ },
1071
+ }
1072
+
1073
+
1074
+ # ── Harmony Field (Round 1) ──────────────────────────────────────────
1075
+
1076
+ @dataclass
1077
+ class HarmonyField:
1078
+ """Harmonic analysis of a section — key, chords, voice-leading, tension."""
1079
+ section_id: str
1080
+ key: str = ""
1081
+ mode: str = ""
1082
+ confidence: float = 0.0
1083
+ chord_progression: list[str] = field(default_factory=list)
1084
+ voice_leading_quality: float = 0.5 # 0=rough, 1=smooth
1085
+ instability: float = 0.0 # 0=stable/tonic, 1=highly unstable
1086
+ resolution_potential: float = 0.5 # tendency toward resolution
1087
+
1088
+ def to_dict(self) -> dict:
1089
+ return asdict(self)
1090
+
1091
+
1092
+ def build_harmony_field(
1093
+ section_id: str,
1094
+ harmony_analysis: Optional[dict] = None,
1095
+ scale_info: Optional[dict] = None,
1096
+ progression_info: Optional[dict] = None,
1097
+ voice_leading_info: Optional[dict] = None,
1098
+ ) -> HarmonyField:
1099
+ """Build a HarmonyField from theory/harmony tool outputs.
1100
+
1101
+ All parameters are optional — degrades gracefully.
1102
+ """
1103
+ hf = HarmonyField(section_id=section_id)
1104
+
1105
+ # Scale / key info
1106
+ if scale_info:
1107
+ top = scale_info.get("top_match", {})
1108
+ hf.key = top.get("tonic", "")
1109
+ hf.mode = top.get("mode", "")
1110
+ hf.confidence = top.get("confidence", 0.0)
1111
+
1112
+ # Chord progression
1113
+ if harmony_analysis:
1114
+ chords = harmony_analysis.get("chords", [])
1115
+ hf.chord_progression = [c.get("chord_name", "?") for c in chords]
1116
+
1117
+ # Instability: ratio of non-tonic chords
1118
+ roman_numerals = [c.get("roman_numeral", "?") for c in chords]
1119
+ if roman_numerals:
1120
+ non_tonic = sum(1 for r in roman_numerals if r not in ("i", "I", "?"))
1121
+ hf.instability = non_tonic / len(roman_numerals)
1122
+
1123
+ # Resolution potential: does it end on tonic?
1124
+ if roman_numerals:
1125
+ hf.resolution_potential = 1.0 if roman_numerals[-1] in ("i", "I") else 0.3
1126
+
1127
+ # Progression classification
1128
+ if progression_info:
1129
+ classification = progression_info.get("classification", "")
1130
+ # "diatonic" = more stable, "free neo-Riemannian" = more unstable
1131
+ if "diatonic" in classification.lower():
1132
+ hf.instability = max(0.0, hf.instability - 0.1)
1133
+ elif "free" in classification.lower():
1134
+ hf.instability = min(1.0, hf.instability + 0.1)
1135
+
1136
+ # Voice leading quality
1137
+ if voice_leading_info:
1138
+ steps = voice_leading_info.get("steps", 0)
1139
+ found = voice_leading_info.get("found", False)
1140
+ if found and steps > 0:
1141
+ # Fewer steps = smoother voice leading
1142
+ hf.voice_leading_quality = max(0.0, 1.0 - (steps - 1) * 0.15)
1143
+
1144
+ return hf
1145
+
1146
+
1147
+ # ── Transition Critic (Round 1) ──────────────────────────────────────
1148
+
1149
+ def run_transition_critic(
1150
+ sections: list[SectionNode],
1151
+ roles: list[RoleNode],
1152
+ harmony_fields: Optional[list[HarmonyField]] = None,
1153
+ ) -> list[CompositionIssue]:
1154
+ """Analyze boundaries between adjacent sections for transition quality."""
1155
+ issues = []
1156
+ if len(sections) < 2:
1157
+ return issues
1158
+
1159
+ harmony_map = {}
1160
+ if harmony_fields:
1161
+ harmony_map = {hf.section_id: hf for hf in harmony_fields}
1162
+
1163
+ for i in range(1, len(sections)):
1164
+ prev = sections[i - 1]
1165
+ curr = sections[i]
1166
+
1167
+ # 1. Hard cut — no energy or density change at boundary
1168
+ energy_delta = abs(curr.energy - prev.energy)
1169
+ density_delta = abs(curr.density - prev.density)
1170
+
1171
+ if energy_delta < 0.05 and density_delta < 0.05:
1172
+ issues.append(CompositionIssue(
1173
+ issue_type="hard_cut_transition",
1174
+ critic="transition",
1175
+ severity=0.5,
1176
+ confidence=0.70,
1177
+ scope={"from": prev.section_id, "to": curr.section_id},
1178
+ evidence=f"No energy/density change between '{prev.name or prev.section_id}' and '{curr.name or curr.section_id}'",
1179
+ recommended_moves=["add_transition_fx", "create_fill", "vary_density_at_boundary"],
1180
+ ))
1181
+
1182
+ # 2. No pre-arrival subtraction before high-energy section
1183
+ if curr.energy > 0.7 and prev.energy > 0.6:
1184
+ issues.append(CompositionIssue(
1185
+ issue_type="no_pre_arrival_subtraction",
1186
+ critic="transition",
1187
+ severity=0.6,
1188
+ confidence=0.65,
1189
+ scope={"from": prev.section_id, "to": curr.section_id},
1190
+ evidence=f"High-energy section '{curr.name or curr.section_id}' (E={curr.energy:.2f}) not preceded by subtraction (prev E={prev.energy:.2f})",
1191
+ recommended_moves=["thin_preceding_section", "add_breakdown_before_peak", "inhale_gesture"],
1192
+ ))
1193
+
1194
+ # 3. Groove break — rhythmic elements drop out at boundary
1195
+ prev_rhythm = {r.track_index for r in roles
1196
+ if r.section_id == prev.section_id
1197
+ and r.role in (RoleType.KICK_ANCHOR, RoleType.RHYTHMIC_TEXTURE)}
1198
+ curr_rhythm = {r.track_index for r in roles
1199
+ if r.section_id == curr.section_id
1200
+ and r.role in (RoleType.KICK_ANCHOR, RoleType.RHYTHMIC_TEXTURE)}
1201
+
1202
+ if prev_rhythm and not curr_rhythm:
1203
+ issues.append(CompositionIssue(
1204
+ issue_type="groove_break_at_transition",
1205
+ critic="transition",
1206
+ severity=0.5,
1207
+ confidence=0.60,
1208
+ scope={"from": prev.section_id, "to": curr.section_id},
1209
+ evidence=f"All rhythmic elements ({len(prev_rhythm)} tracks) drop out at '{curr.name or curr.section_id}'",
1210
+ recommended_moves=["carry_one_rhythm_element", "add_transition_percussion"],
1211
+ ))
1212
+
1213
+ # 4. Harmonic non-sequitur — key change without voice-leading support
1214
+ prev_hf = harmony_map.get(prev.section_id)
1215
+ curr_hf = harmony_map.get(curr.section_id)
1216
+
1217
+ if prev_hf and curr_hf and prev_hf.key and curr_hf.key:
1218
+ if prev_hf.key != curr_hf.key:
1219
+ # Key change: check if it's prepared
1220
+ if prev_hf.resolution_potential < 0.5 and curr_hf.instability > 0.5:
1221
+ issues.append(CompositionIssue(
1222
+ issue_type="harmonic_non_sequitur",
1223
+ critic="transition",
1224
+ severity=0.6,
1225
+ confidence=0.55,
1226
+ scope={"from": prev.section_id, "to": curr.section_id},
1227
+ evidence=f"Key change {prev_hf.key} → {curr_hf.key} without harmonic preparation",
1228
+ recommended_moves=["add_pivot_chord", "use_chromatic_mediant", "prepare_with_dominant"],
1229
+ ))
1230
+
1231
+ # 5. Weak build — energy rises but no role rotation
1232
+ if curr.energy > prev.energy + 0.2:
1233
+ prev_fg = {r.track_index for r in roles
1234
+ if r.section_id == prev.section_id and r.foreground}
1235
+ curr_fg = {r.track_index for r in roles
1236
+ if r.section_id == curr.section_id and r.foreground}
1237
+
1238
+ if prev_fg == curr_fg and prev_fg:
1239
+ issues.append(CompositionIssue(
1240
+ issue_type="weak_build",
1241
+ critic="transition",
1242
+ severity=0.4,
1243
+ confidence=0.55,
1244
+ scope={"from": prev.section_id, "to": curr.section_id},
1245
+ evidence=f"Energy rises but same foreground voices ({len(prev_fg)} tracks) — no role rotation",
1246
+ recommended_moves=["rotate_foreground_voice", "add_new_element", "handoff_gesture"],
1247
+ ))
1248
+
1249
+ return issues
1250
+
1251
+
1252
+ # ── Emotional Arc Critic (Round 3) ──────────────────────────────────
1253
+
1254
+ def run_emotional_arc_critic(
1255
+ sections: list[SectionNode],
1256
+ harmony_fields: Optional[list["HarmonyField"]] = None,
1257
+ ) -> list[CompositionIssue]:
1258
+ """Analyze whether the arrangement tells an emotional story.
1259
+
1260
+ Builds a tension curve from energy, harmonic instability, and density,
1261
+ then checks for common arc problems: monotone, all-climax, build
1262
+ without payoff, no resolution.
1263
+ """
1264
+ issues = []
1265
+ if len(sections) < 3:
1266
+ return issues # Need enough sections to judge an arc
1267
+
1268
+ # Build tension curve: composite of energy, density, and harmonic instability
1269
+ harmony_map = {}
1270
+ if harmony_fields:
1271
+ harmony_map = {hf.section_id: hf for hf in harmony_fields}
1272
+
1273
+ tension_curve: list[float] = []
1274
+ for section in sections:
1275
+ energy_component = section.energy
1276
+ density_component = section.density
1277
+
1278
+ # Add harmonic instability if available
1279
+ hf = harmony_map.get(section.section_id)
1280
+ instability = hf.instability if hf else 0.3 # neutral default
1281
+
1282
+ tension = (energy_component * 0.5 + density_component * 0.3 + instability * 0.2)
1283
+ tension_curve.append(round(tension, 3))
1284
+
1285
+ # 1. Monotone arc — tension doesn't vary enough
1286
+ tension_range = max(tension_curve) - min(tension_curve)
1287
+ if tension_range < 0.15:
1288
+ issues.append(CompositionIssue(
1289
+ issue_type="monotone_arc",
1290
+ critic="emotional_arc",
1291
+ severity=0.7,
1292
+ confidence=0.70,
1293
+ evidence=f"Tension range: {tension_range:.2f} — arrangement feels static",
1294
+ recommended_moves=[
1295
+ "add_breakdown_section", "create_energy_contrast",
1296
+ "thin_one_section", "add_build_before_peak",
1297
+ ],
1298
+ ))
1299
+
1300
+ # 2. All-climax — high tension everywhere, no rest
1301
+ high_tension_count = sum(1 for t in tension_curve if t > 0.7)
1302
+ if high_tension_count > len(tension_curve) * 0.6:
1303
+ issues.append(CompositionIssue(
1304
+ issue_type="all_climax",
1305
+ critic="emotional_arc",
1306
+ severity=0.6,
1307
+ confidence=0.65,
1308
+ evidence=f"{high_tension_count}/{len(tension_curve)} sections have tension > 0.7 — no rest",
1309
+ recommended_moves=[
1310
+ "add_low_energy_section", "create_breakdown",
1311
+ "reduce_density_in_verse", "strip_back_intro",
1312
+ ],
1313
+ ))
1314
+
1315
+ # 3. Build without payoff — tension rises then doesn't reach peak
1316
+ peak_idx = tension_curve.index(max(tension_curve))
1317
+ if peak_idx < len(tension_curve) - 1:
1318
+ # Check if there's a build (rising tension) before the peak
1319
+ has_build = False
1320
+ for i in range(1, peak_idx + 1):
1321
+ if tension_curve[i] > tension_curve[i - 1] + 0.1:
1322
+ has_build = True
1323
+ break
1324
+
1325
+ if not has_build and len(tension_curve) > 4:
1326
+ issues.append(CompositionIssue(
1327
+ issue_type="no_clear_build",
1328
+ critic="emotional_arc",
1329
+ severity=0.5,
1330
+ confidence=0.55,
1331
+ evidence="No gradual tension increase before peak — peak arrives without anticipation",
1332
+ recommended_moves=[
1333
+ "add_build_section", "tension_ratchet_gesture",
1334
+ "gradual_element_addition", "harmonic_tint_rise",
1335
+ ],
1336
+ ))
1337
+
1338
+ # 4. No resolution — tension stays high at the end
1339
+ if len(tension_curve) >= 3:
1340
+ final_tension = tension_curve[-1]
1341
+ peak_tension = max(tension_curve)
1342
+ if final_tension > peak_tension * 0.8 and peak_tension > 0.5:
1343
+ issues.append(CompositionIssue(
1344
+ issue_type="no_resolution",
1345
+ critic="emotional_arc",
1346
+ severity=0.5,
1347
+ confidence=0.60,
1348
+ evidence=f"Final tension ({final_tension:.2f}) nearly as high as peak ({peak_tension:.2f}) — no release",
1349
+ recommended_moves=[
1350
+ "add_outro", "create_energy_drop_at_end",
1351
+ "outro_decay_dissolve_gesture", "strip_elements_gradually",
1352
+ ],
1353
+ ))
1354
+
1355
+ # 5. Peak too early — climax in first third
1356
+ if peak_idx < len(tension_curve) / 3 and len(tension_curve) > 4:
1357
+ issues.append(CompositionIssue(
1358
+ issue_type="peak_too_early",
1359
+ critic="emotional_arc",
1360
+ severity=0.5,
1361
+ confidence=0.55,
1362
+ evidence=f"Peak tension at section {peak_idx + 1}/{len(tension_curve)} — climax in first third",
1363
+ recommended_moves=[
1364
+ "move_peak_elements_later", "add_second_bigger_climax",
1365
+ "reorder_sections", "save_hook_reveal_for_later",
1366
+ ],
1367
+ ))
1368
+
1369
+ return issues
1370
+
1371
+
1372
+ # ── Cross-Section Critic (Round 4) ──────────────────────────────────
1373
+
1374
+ def run_cross_section_critic(
1375
+ sections: list[SectionNode],
1376
+ roles: list[RoleNode],
1377
+ harmony_fields: Optional[list["HarmonyField"]] = None,
1378
+ motif_count: int = 0,
1379
+ ) -> list[CompositionIssue]:
1380
+ """Reason across the entire arrangement for cross-section coherence.
1381
+
1382
+ Checks that the arrangement works as a whole, not just per-section:
1383
+ - Clear reveal order (elements shouldn't all appear at once)
1384
+ - Foreground voice rotation (same lead everywhere = fatigue)
1385
+ - Harmonic pacing (rapid key changes everywhere = chaos)
1386
+ - Element variety across sections
1387
+ """
1388
+ issues = []
1389
+ if len(sections) < 3:
1390
+ return issues
1391
+
1392
+ # 1. All elements appear from the start — no reveal order
1393
+ if len(sections) >= 3:
1394
+ first_active = set(sections[0].tracks_active)
1395
+ second_active = set(sections[1].tracks_active)
1396
+ third_active = set(sections[2].tracks_active)
1397
+ if first_active == second_active == third_active and first_active:
1398
+ issues.append(CompositionIssue(
1399
+ issue_type="no_reveal_order",
1400
+ critic="cross_section",
1401
+ severity=0.6,
1402
+ confidence=0.65,
1403
+ evidence=f"First 3 sections all have same {len(first_active)} active tracks — no staggered reveal",
1404
+ recommended_moves=[
1405
+ "defer_elements_to_later_sections", "strip_intro",
1406
+ "create_reveal_sequence", "mute_tracks_in_early_sections",
1407
+ ],
1408
+ ))
1409
+
1410
+ # 2. Same foreground voices in every section — no rotation
1411
+ fg_by_section: list[set[int]] = []
1412
+ for section in sections:
1413
+ fg = {r.track_index for r in roles
1414
+ if r.section_id == section.section_id and r.foreground}
1415
+ fg_by_section.append(fg)
1416
+
1417
+ if len(fg_by_section) >= 3:
1418
+ all_same = all(fg == fg_by_section[0] for fg in fg_by_section[1:])
1419
+ if all_same and fg_by_section[0]:
1420
+ issues.append(CompositionIssue(
1421
+ issue_type="no_foreground_rotation",
1422
+ critic="cross_section",
1423
+ severity=0.5,
1424
+ confidence=0.60,
1425
+ evidence=f"Same foreground voices ({len(fg_by_section[0])} tracks) in all {len(sections)} sections",
1426
+ recommended_moves=[
1427
+ "alternate_lead_voice", "handoff_gesture_between_sections",
1428
+ "mute_lead_in_bridge", "introduce_new_hook_element",
1429
+ ],
1430
+ ))
1431
+
1432
+ # 3. Harmonic monotony — same key across all sections
1433
+ if harmony_fields:
1434
+ keys = [hf.key for hf in harmony_fields if hf.key]
1435
+ if len(keys) >= 3 and len(set(keys)) == 1:
1436
+ issues.append(CompositionIssue(
1437
+ issue_type="harmonic_monotony",
1438
+ critic="cross_section",
1439
+ severity=0.4,
1440
+ confidence=0.50,
1441
+ evidence=f"All {len(keys)} sections in same key ({keys[0]}) — consider modulation",
1442
+ recommended_moves=[
1443
+ "modulate_for_bridge", "use_chromatic_mediant",
1444
+ "borrow_from_parallel_key", "transpose_final_chorus",
1445
+ ],
1446
+ ))
1447
+
1448
+ # 4. Harmonic chaos — different key in every section
1449
+ unique_keys = set(keys)
1450
+ if len(unique_keys) > len(keys) * 0.7 and len(keys) >= 4:
1451
+ issues.append(CompositionIssue(
1452
+ issue_type="harmonic_chaos",
1453
+ critic="cross_section",
1454
+ severity=0.5,
1455
+ confidence=0.45,
1456
+ evidence=f"{len(unique_keys)} different keys across {len(keys)} sections — hard to follow",
1457
+ recommended_moves=[
1458
+ "consolidate_to_two_keys", "use_pivot_chords",
1459
+ "establish_home_key", "group_related_sections",
1460
+ ],
1461
+ ))
1462
+
1463
+ # 5. No motif development (if motifs exist but aren't varied)
1464
+ if motif_count > 0:
1465
+ # Check if sections have varying density (proxy for development)
1466
+ densities = [s.density for s in sections]
1467
+ unique_densities = len(set(round(d, 1) for d in densities))
1468
+ if unique_densities <= 2 and len(sections) > 4:
1469
+ issues.append(CompositionIssue(
1470
+ issue_type="static_arrangement",
1471
+ critic="cross_section",
1472
+ severity=0.4,
1473
+ confidence=0.50,
1474
+ evidence=f"Only {unique_densities} distinct density levels across {len(sections)} sections with {motif_count} motifs",
1475
+ recommended_moves=[
1476
+ "vary_motif_density_per_section", "fragment_motif_in_bridge",
1477
+ "augment_motif_in_outro", "register_shift_for_variety",
1478
+ ],
1479
+ ))
1480
+
1481
+ return issues
1482
+
1483
+
1484
+ # ── Composition Taste Model (Round 4) ───────────────────────────────
1485
+
1486
+ def build_composition_taste_model(
1487
+ section_outcomes: list[dict],
1488
+ ) -> dict:
1489
+ """Build per-section-type preferences from composition outcome history.
1490
+
1491
+ Aggregates section outcomes to learn: what density, foreground count,
1492
+ and move types does this user prefer for each section type?
1493
+
1494
+ Returns: {section_type: {preferred_density, preferred_foreground_count,
1495
+ top_moves, sample_size}}
1496
+ """
1497
+ if not section_outcomes:
1498
+ return {"section_types": {}, "sample_size": 0}
1499
+
1500
+ by_type: dict[str, list[dict]] = {}
1501
+ for o in section_outcomes:
1502
+ stype = o.get("section_type", "unknown")
1503
+ by_type.setdefault(stype, []).append(o)
1504
+
1505
+ preferences: dict[str, dict] = {}
1506
+ for stype, outcomes in by_type.items():
1507
+ kept = [o for o in outcomes if o.get("kept", False)]
1508
+ densities = [o.get("density", 0.5) for o in kept if "density" in o]
1509
+ fg_counts = [o.get("foreground_count", 1) for o in kept if "foreground_count" in o]
1510
+
1511
+ # Tally move types
1512
+ move_counts: dict[str, int] = {}
1513
+ for o in kept:
1514
+ move = o.get("move_name", "unknown")
1515
+ move_counts[move] = move_counts.get(move, 0) + 1
1516
+
1517
+ top_moves = sorted(move_counts.items(), key=lambda x: -x[1])[:3]
1518
+
1519
+ preferences[stype] = {
1520
+ "preferred_density": round(sum(densities) / len(densities), 2) if densities else 0.5,
1521
+ "preferred_foreground_count": round(sum(fg_counts) / len(fg_counts), 1) if fg_counts else 1.0,
1522
+ "top_moves": [{"move": m, "count": c} for m, c in top_moves],
1523
+ "keep_rate": round(len(kept) / len(outcomes), 3) if outcomes else 0,
1524
+ "sample_size": len(outcomes),
1525
+ }
1526
+
1527
+ return {
1528
+ "section_types": preferences,
1529
+ "sample_size": sum(len(v) for v in by_type.values()),
1530
+ }