livepilot 1.9.21 → 1.9.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/.claude-plugin/marketplace.json +3 -3
  2. package/.mcpbignore +40 -0
  3. package/AGENTS.md +2 -2
  4. package/CHANGELOG.md +47 -0
  5. package/CONTRIBUTING.md +1 -1
  6. package/README.md +47 -72
  7. package/bin/livepilot.js +135 -0
  8. package/livepilot/.Codex-plugin/plugin.json +2 -2
  9. package/livepilot/.claude-plugin/plugin.json +2 -2
  10. package/livepilot/agents/livepilot-producer/AGENT.md +13 -0
  11. package/livepilot/commands/arrange.md +42 -14
  12. package/livepilot/commands/beat.md +68 -21
  13. package/livepilot/commands/evaluate.md +23 -13
  14. package/livepilot/commands/mix.md +35 -11
  15. package/livepilot/commands/perform.md +31 -19
  16. package/livepilot/commands/sounddesign.md +38 -17
  17. package/livepilot/skills/livepilot-arrangement/SKILL.md +2 -1
  18. package/livepilot/skills/livepilot-composition-engine/references/transition-archetypes.md +2 -2
  19. package/livepilot/skills/livepilot-core/SKILL.md +60 -4
  20. package/livepilot/skills/livepilot-core/references/device-atlas/distortion-and-character.md +11 -11
  21. package/livepilot/skills/livepilot-core/references/device-atlas/drums-and-percussion.md +25 -25
  22. package/livepilot/skills/livepilot-core/references/device-atlas/dynamics-and-punch.md +21 -21
  23. package/livepilot/skills/livepilot-core/references/device-atlas/eq-and-filtering.md +13 -13
  24. package/livepilot/skills/livepilot-core/references/device-atlas/midi-tools.md +13 -13
  25. package/livepilot/skills/livepilot-core/references/device-atlas/movement-and-modulation.md +5 -5
  26. package/livepilot/skills/livepilot-core/references/device-atlas/space-and-depth.md +16 -16
  27. package/livepilot/skills/livepilot-core/references/device-atlas/spectral-and-weird.md +40 -40
  28. package/livepilot/skills/livepilot-core/references/m4l-devices.md +3 -3
  29. package/livepilot/skills/livepilot-core/references/overview.md +4 -4
  30. package/livepilot/skills/livepilot-evaluation/SKILL.md +12 -8
  31. package/livepilot/skills/livepilot-evaluation/references/memory-promotion.md +2 -2
  32. package/livepilot/skills/livepilot-mix-engine/SKILL.md +1 -1
  33. package/livepilot/skills/livepilot-mix-engine/references/mix-moves.md +2 -2
  34. package/livepilot/skills/livepilot-mixing/SKILL.md +3 -1
  35. package/livepilot/skills/livepilot-notes/SKILL.md +2 -1
  36. package/livepilot/skills/livepilot-release/SKILL.md +15 -15
  37. package/livepilot/skills/livepilot-sound-design-engine/SKILL.md +2 -2
  38. package/livepilot/skills/livepilot-wonder/SKILL.md +62 -0
  39. package/livepilot.mcpb +0 -0
  40. package/m4l_device/livepilot_bridge.js +1 -1
  41. package/manifest.json +91 -0
  42. package/mcp_server/__init__.py +1 -1
  43. package/mcp_server/creative_constraints/__init__.py +6 -0
  44. package/mcp_server/creative_constraints/engine.py +277 -0
  45. package/mcp_server/creative_constraints/models.py +75 -0
  46. package/mcp_server/creative_constraints/tools.py +341 -0
  47. package/mcp_server/experiment/__init__.py +6 -0
  48. package/mcp_server/experiment/engine.py +213 -0
  49. package/mcp_server/experiment/models.py +120 -0
  50. package/mcp_server/experiment/tools.py +263 -0
  51. package/mcp_server/hook_hunter/__init__.py +5 -0
  52. package/mcp_server/hook_hunter/analyzer.py +342 -0
  53. package/mcp_server/hook_hunter/models.py +57 -0
  54. package/mcp_server/hook_hunter/tools.py +586 -0
  55. package/mcp_server/memory/taste_graph.py +261 -0
  56. package/mcp_server/memory/tools.py +88 -0
  57. package/mcp_server/mix_engine/critics.py +2 -2
  58. package/mcp_server/mix_engine/models.py +1 -1
  59. package/mcp_server/mix_engine/state_builder.py +2 -2
  60. package/mcp_server/musical_intelligence/__init__.py +8 -0
  61. package/mcp_server/musical_intelligence/detectors.py +421 -0
  62. package/mcp_server/musical_intelligence/phrase_critic.py +163 -0
  63. package/mcp_server/musical_intelligence/tools.py +221 -0
  64. package/mcp_server/preview_studio/__init__.py +5 -0
  65. package/mcp_server/preview_studio/engine.py +280 -0
  66. package/mcp_server/preview_studio/models.py +73 -0
  67. package/mcp_server/preview_studio/tools.py +423 -0
  68. package/mcp_server/runtime/session_kernel.py +96 -0
  69. package/mcp_server/runtime/tools.py +90 -1
  70. package/mcp_server/semantic_moves/__init__.py +13 -0
  71. package/mcp_server/semantic_moves/compiler.py +116 -0
  72. package/mcp_server/semantic_moves/mix_compilers.py +291 -0
  73. package/mcp_server/semantic_moves/mix_moves.py +157 -0
  74. package/mcp_server/semantic_moves/models.py +46 -0
  75. package/mcp_server/semantic_moves/performance_compilers.py +208 -0
  76. package/mcp_server/semantic_moves/performance_moves.py +81 -0
  77. package/mcp_server/semantic_moves/registry.py +32 -0
  78. package/mcp_server/semantic_moves/resolvers.py +126 -0
  79. package/mcp_server/semantic_moves/sound_design_compilers.py +266 -0
  80. package/mcp_server/semantic_moves/sound_design_moves.py +78 -0
  81. package/mcp_server/semantic_moves/tools.py +204 -0
  82. package/mcp_server/semantic_moves/transition_compilers.py +222 -0
  83. package/mcp_server/semantic_moves/transition_moves.py +76 -0
  84. package/mcp_server/server.py +10 -0
  85. package/mcp_server/session_continuity/__init__.py +6 -0
  86. package/mcp_server/session_continuity/models.py +86 -0
  87. package/mcp_server/session_continuity/tools.py +230 -0
  88. package/mcp_server/session_continuity/tracker.py +235 -0
  89. package/mcp_server/song_brain/__init__.py +6 -0
  90. package/mcp_server/song_brain/builder.py +477 -0
  91. package/mcp_server/song_brain/models.py +132 -0
  92. package/mcp_server/song_brain/tools.py +294 -0
  93. package/mcp_server/stuckness_detector/__init__.py +5 -0
  94. package/mcp_server/stuckness_detector/detector.py +400 -0
  95. package/mcp_server/stuckness_detector/models.py +66 -0
  96. package/mcp_server/stuckness_detector/tools.py +195 -0
  97. package/mcp_server/tools/_conductor.py +104 -6
  98. package/mcp_server/tools/analyzer.py +1 -1
  99. package/mcp_server/tools/devices.py +34 -0
  100. package/mcp_server/wonder_mode/__init__.py +6 -0
  101. package/mcp_server/wonder_mode/diagnosis.py +84 -0
  102. package/mcp_server/wonder_mode/engine.py +493 -0
  103. package/mcp_server/wonder_mode/session.py +114 -0
  104. package/mcp_server/wonder_mode/tools.py +285 -0
  105. package/package.json +2 -2
  106. package/remote_script/LivePilot/__init__.py +1 -1
  107. package/remote_script/LivePilot/browser.py +4 -1
  108. package/remote_script/LivePilot/devices.py +29 -0
  109. package/remote_script/LivePilot/tracks.py +11 -4
  110. package/scripts/generate_tool_catalog.py +131 -0
@@ -0,0 +1,477 @@
1
+ """SongBrain builder — pure computation, zero I/O.
2
+
3
+ Constructs a SongBrain from project brain data, scene/clip analysis,
4
+ motif data, and session memory. MCP tool wrappers call this with
5
+ pre-fetched data from Ableton.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import hashlib
11
+ import json
12
+ from collections import Counter
13
+ from typing import Optional
14
+
15
+ from .models import (
16
+ IdentityDrift,
17
+ OpenQuestion,
18
+ SacredElement,
19
+ SectionPurpose,
20
+ SongBrain,
21
+ )
22
+
23
+
24
+ # ── Main builder ──────────────────────────────────────────────────
25
+
26
+
27
+ def build_song_brain(
28
+ session_info: dict,
29
+ scenes: Optional[list[dict]] = None,
30
+ tracks: Optional[list[dict]] = None,
31
+ motif_data: Optional[dict] = None,
32
+ composition_analysis: Optional[dict] = None,
33
+ role_graph: Optional[dict] = None,
34
+ recent_moves: Optional[list[dict]] = None,
35
+ taste_graph: Optional[dict] = None,
36
+ ) -> SongBrain:
37
+ """Build a SongBrain from available session data.
38
+
39
+ All inputs are optional — the builder degrades gracefully when
40
+ data is missing, producing lower-confidence results.
41
+ """
42
+ scenes = scenes or []
43
+ tracks = tracks or []
44
+ motif_data = motif_data or {}
45
+ composition_analysis = composition_analysis or {}
46
+ role_graph = role_graph or {}
47
+ recent_moves = recent_moves or []
48
+
49
+ brain_id = _compute_brain_id(session_info, scenes)
50
+ built_from: dict[str, bool] = {
51
+ "session_info": True,
52
+ "scenes": bool(scenes),
53
+ "tracks": bool(tracks),
54
+ "motif_data": bool(motif_data),
55
+ "composition_analysis": bool(composition_analysis),
56
+ "role_graph": bool(role_graph),
57
+ "recent_moves": bool(recent_moves),
58
+ }
59
+
60
+ identity_core, identity_confidence = _infer_identity_core(
61
+ tracks, motif_data, composition_analysis, role_graph
62
+ )
63
+
64
+ sacred = _detect_sacred_elements(
65
+ tracks, motif_data, composition_analysis, role_graph
66
+ )
67
+
68
+ sections = _infer_section_purposes(scenes, composition_analysis)
69
+ energy_arc = _build_energy_arc(scenes, sections)
70
+ payoff_targets = [s.section_id for s in sections if s.is_payoff]
71
+ open_questions = _detect_open_questions(
72
+ sections, sacred, identity_core, tracks, composition_analysis
73
+ )
74
+
75
+ drift_risk = _estimate_drift_risk(recent_moves, sacred)
76
+
77
+ return SongBrain(
78
+ brain_id=brain_id,
79
+ identity_core=identity_core,
80
+ identity_confidence=identity_confidence,
81
+ sacred_elements=sacred,
82
+ section_purposes=sections,
83
+ energy_arc=energy_arc,
84
+ identity_drift_risk=drift_risk,
85
+ payoff_targets=payoff_targets,
86
+ open_questions=open_questions,
87
+ built_from=built_from,
88
+ )
89
+
90
+
91
+ # ── Identity core inference ───────────────────────────────────────
92
+
93
+
94
+ def _infer_identity_core(
95
+ tracks: list[dict],
96
+ motif_data: dict,
97
+ composition: dict,
98
+ role_graph: dict,
99
+ ) -> tuple[str, float]:
100
+ """Infer the single strongest defining idea in the session.
101
+
102
+ Returns (description, confidence).
103
+ """
104
+ candidates: list[tuple[str, float]] = []
105
+
106
+ # From motif data — most salient recurring motif
107
+ motifs = motif_data.get("motifs", [])
108
+ if motifs:
109
+ top_motif = max(motifs, key=lambda m: m.get("salience", 0))
110
+ salience = top_motif.get("salience", 0)
111
+ if salience > 0.3:
112
+ desc = top_motif.get("description", top_motif.get("name", "recurring motif"))
113
+ candidates.append((f"Recurring motif: {desc}", min(0.9, salience)))
114
+
115
+ # From composition — dominant emotional arc
116
+ arc_type = composition.get("arc_type", "")
117
+ if arc_type:
118
+ candidates.append((f"Emotional arc: {arc_type}", 0.6))
119
+
120
+ # From role graph — dominant texture
121
+ # role_graph format: {track_name: {index: int, role: str}}
122
+ if role_graph:
123
+ role_counts = Counter(
124
+ info.get("role", "unknown")
125
+ for info in role_graph.values()
126
+ if isinstance(info, dict)
127
+ )
128
+ role_counts.pop("unknown", None)
129
+ if role_counts:
130
+ dominant_role = role_counts.most_common(1)[0]
131
+ candidates.append((f"Dominant texture: {dominant_role[0]}", 0.5))
132
+
133
+ # From track analysis — genre/style cues
134
+ track_names = [t.get("name", "").lower() for t in tracks]
135
+ genre_cues = _detect_genre_cues(track_names)
136
+ if genre_cues:
137
+ candidates.append((f"Style: {genre_cues}", 0.4))
138
+
139
+ if not candidates:
140
+ # Fallback: describe by track count and tempo
141
+ return ("Emerging piece — identity not yet established", 0.2)
142
+
143
+ best = max(candidates, key=lambda c: c[1])
144
+ return best
145
+
146
+
147
+ def _detect_genre_cues(track_names: list[str]) -> str:
148
+ """Simple genre/style detection from track naming patterns."""
149
+ cue_map = {
150
+ "808": "trap/hip-hop",
151
+ "kick": "beat-driven",
152
+ "pad": "atmospheric",
153
+ "strings": "orchestral",
154
+ "bass": "bass-forward",
155
+ "vocal": "vocal-driven",
156
+ "synth": "synth-based",
157
+ "guitar": "guitar-based",
158
+ "piano": "keys-driven",
159
+ "ambient": "ambient",
160
+ "drone": "drone/textural",
161
+ }
162
+ found = Counter()
163
+ for name in track_names:
164
+ for keyword, cue in cue_map.items():
165
+ if keyword in name:
166
+ found[cue] += 1
167
+
168
+ if not found:
169
+ return ""
170
+ top = found.most_common(2)
171
+ return ", ".join(c[0] for c in top)
172
+
173
+
174
+ # ── Sacred elements ───────────────────────────────────────────────
175
+
176
+
177
+ def _detect_sacred_elements(
178
+ tracks: list[dict],
179
+ motif_data: dict,
180
+ composition: dict,
181
+ role_graph: dict,
182
+ ) -> list[SacredElement]:
183
+ """Detect elements that should not be casually damaged.
184
+
185
+ Conservative by default — prefer under-protecting nothing
186
+ over over-editing the hook.
187
+ """
188
+ sacred: list[SacredElement] = []
189
+
190
+ # High-salience motifs are sacred
191
+ for motif in motif_data.get("motifs", []):
192
+ if motif.get("salience", 0) > 0.5:
193
+ sacred.append(SacredElement(
194
+ element_type="motif",
195
+ description=motif.get("description", motif.get("name", "motif")),
196
+ location=motif.get("location", ""),
197
+ salience=motif.get("salience", 0.6),
198
+ confidence=0.7,
199
+ ))
200
+
201
+ # Lead/hook tracks from role graph
202
+ # role_graph format: {track_name: {index: int, role: str}}
203
+ for track_name, role_info in role_graph.items():
204
+ if not isinstance(role_info, dict):
205
+ continue
206
+ role = role_info.get("role", "")
207
+ if role in ("lead",):
208
+ sacred.append(SacredElement(
209
+ element_type="texture",
210
+ description=f"{track_name} (lead role)",
211
+ location=track_name,
212
+ salience=0.7,
213
+ confidence=0.6,
214
+ ))
215
+
216
+ # Primary groove (if clearly defined)
217
+ groove_tracks = [
218
+ t for t in tracks
219
+ if any(kw in t.get("name", "").lower() for kw in ("drum", "beat", "kick", "hat", "perc"))
220
+ ]
221
+ if groove_tracks:
222
+ sacred.append(SacredElement(
223
+ element_type="groove",
224
+ description="Primary rhythmic foundation",
225
+ location=groove_tracks[0].get("name", "drums"),
226
+ salience=0.6,
227
+ confidence=0.5,
228
+ ))
229
+
230
+ return sacred
231
+
232
+
233
+ # ── Section purposes ──────────────────────────────────────────────
234
+
235
+
236
+ def _infer_section_purposes(
237
+ scenes: list[dict],
238
+ composition: dict,
239
+ ) -> list[SectionPurpose]:
240
+ """Infer what each section is trying to do emotionally."""
241
+ sections: list[SectionPurpose] = []
242
+
243
+ # From composition analysis if available
244
+ comp_sections = composition.get("sections", [])
245
+ if comp_sections:
246
+ for sec in comp_sections:
247
+ sections.append(SectionPurpose(
248
+ section_id=sec.get("id", sec.get("name", "")),
249
+ label=sec.get("label", sec.get("name", "")),
250
+ emotional_intent=sec.get("intent", sec.get("purpose", "")),
251
+ energy_level=sec.get("energy", 0.5),
252
+ is_payoff=sec.get("is_payoff", False),
253
+ confidence=0.7,
254
+ ))
255
+ return sections
256
+
257
+ # Fallback: infer from scene names
258
+ for i, scene in enumerate(scenes):
259
+ name = scene.get("name", f"Scene {i}")
260
+ label, intent, energy, is_payoff = _classify_scene_name(name, i, len(scenes))
261
+ sections.append(SectionPurpose(
262
+ section_id=f"scene_{i}",
263
+ label=label,
264
+ emotional_intent=intent,
265
+ energy_level=energy,
266
+ is_payoff=is_payoff,
267
+ confidence=0.4,
268
+ ))
269
+
270
+ return sections
271
+
272
+
273
+ def _classify_scene_name(
274
+ name: str, index: int, total: int
275
+ ) -> tuple[str, str, float, bool]:
276
+ """Classify a scene by its name into (label, intent, energy, is_payoff)."""
277
+ name_lower = name.lower()
278
+
279
+ patterns = {
280
+ "intro": ("intro", "establish mood", 0.3, False),
281
+ "verse": ("verse", "develop narrative", 0.5, False),
282
+ "chorus": ("chorus", "deliver hook", 0.8, True),
283
+ "drop": ("drop", "peak energy release", 0.9, True),
284
+ "bridge": ("bridge", "contrast and transition", 0.5, False),
285
+ "break": ("breakdown", "reduce and create anticipation", 0.3, False),
286
+ "build": ("buildup", "create tension", 0.6, False),
287
+ "outro": ("outro", "resolve and fade", 0.2, False),
288
+ "hook": ("hook", "deliver memorable idea", 0.8, True),
289
+ }
290
+
291
+ for keyword, (label, intent, energy, payoff) in patterns.items():
292
+ if keyword in name_lower:
293
+ return label, intent, energy, payoff
294
+
295
+ # Position-based fallback
296
+ position = index / max(total - 1, 1)
297
+ if position < 0.15:
298
+ return "opening", "establish mood", 0.3, False
299
+ elif position > 0.85:
300
+ return "closing", "resolve", 0.3, False
301
+ else:
302
+ return "section", "develop", 0.5, False
303
+
304
+
305
+ # ── Energy arc ────────────────────────────────────────────────────
306
+
307
+
308
+ def _build_energy_arc(
309
+ scenes: list[dict],
310
+ sections: list[SectionPurpose],
311
+ ) -> list[float]:
312
+ """Build ordered energy levels across sections."""
313
+ if sections:
314
+ return [s.energy_level for s in sections]
315
+ return [0.5] * len(scenes) if scenes else []
316
+
317
+
318
+ # ── Open questions ────────────────────────────────────────────────
319
+
320
+
321
+ def _detect_open_questions(
322
+ sections: list[SectionPurpose],
323
+ sacred: list[SacredElement],
324
+ identity_core: str,
325
+ tracks: list[dict],
326
+ composition: dict,
327
+ ) -> list[OpenQuestion]:
328
+ """Detect unresolved creative questions about the song."""
329
+ questions: list[OpenQuestion] = []
330
+
331
+ # No clear identity
332
+ if "not yet established" in identity_core.lower():
333
+ questions.append(OpenQuestion(
334
+ question="What is this track's defining idea?",
335
+ domain="identity",
336
+ priority=0.9,
337
+ ))
338
+
339
+ # No payoff sections
340
+ payoffs = [s for s in sections if s.is_payoff]
341
+ if sections and not payoffs:
342
+ questions.append(OpenQuestion(
343
+ question="No section is marked as a payoff/arrival — where does the song deliver?",
344
+ domain="arrangement",
345
+ priority=0.8,
346
+ ))
347
+
348
+ # Single section (loop, no form)
349
+ if len(sections) <= 1 and len(tracks) > 2:
350
+ questions.append(OpenQuestion(
351
+ question="The track appears to be a single loop — is there intended form?",
352
+ domain="arrangement",
353
+ priority=0.7,
354
+ ))
355
+
356
+ # No sacred elements
357
+ if not sacred:
358
+ questions.append(OpenQuestion(
359
+ question="No clearly sacred elements detected — what should be preserved?",
360
+ domain="identity",
361
+ priority=0.6,
362
+ ))
363
+
364
+ # Missing sections (common gaps)
365
+ labels = {s.label for s in sections}
366
+ if len(sections) > 3 and "intro" not in labels:
367
+ questions.append(OpenQuestion(
368
+ question="No intro section — does the track need an opening?",
369
+ domain="arrangement",
370
+ priority=0.4,
371
+ ))
372
+
373
+ return questions
374
+
375
+
376
+ # ── Drift estimation ──────────────────────────────────────────────
377
+
378
+
379
+ def _estimate_drift_risk(
380
+ recent_moves: list[dict],
381
+ sacred: list[SacredElement],
382
+ ) -> float:
383
+ """Estimate how much recent edits are moving the song away from itself.
384
+
385
+ Checks two signals:
386
+ 1. Moves that touch sacred element locations (scope.track matches)
387
+ 2. Moves that were undone (kept=False) — instability signal
388
+ """
389
+ if not recent_moves:
390
+ return 0.0
391
+
392
+ sacred_locations = {e.location.lower() for e in sacred if e.location}
393
+ sacred_types = {e.element_type.lower() for e in sacred if e.element_type}
394
+ drift_signals = 0
395
+ total_moves = len(recent_moves)
396
+
397
+ for move in recent_moves:
398
+ # Check if the move's scope touches a sacred track
399
+ scope_track = move.get("scope", {}).get("track", "")
400
+ if scope_track and scope_track.lower() in sacred_locations:
401
+ drift_signals += 1
402
+ continue
403
+
404
+ # Check if move engine/intent relates to sacred element types
405
+ intent = move.get("intent", "").lower()
406
+ for stype in sacred_types:
407
+ if stype in intent:
408
+ drift_signals += 1
409
+ break
410
+
411
+ # Undone moves are a mild drift signal (instability)
412
+ if move.get("kept") is False:
413
+ drift_signals += 0.5
414
+
415
+ if total_moves == 0:
416
+ return 0.0
417
+ return min(1.0, drift_signals / max(total_moves, 1) * 1.5)
418
+
419
+
420
+ # ── Identity drift detection ─────────────────────────────────────
421
+
422
+
423
+ def detect_identity_drift(
424
+ before: SongBrain,
425
+ after: SongBrain,
426
+ ) -> IdentityDrift:
427
+ """Compare two SongBrain snapshots to detect identity drift."""
428
+ drift = IdentityDrift()
429
+
430
+ # Identity core change
431
+ if before.identity_core != after.identity_core:
432
+ drift.changed_elements.append("identity_core")
433
+ drift.drift_score += 0.3
434
+
435
+ # Sacred element damage
436
+ before_sacred = {e.description for e in before.sacred_elements}
437
+ after_sacred = {e.description for e in after.sacred_elements}
438
+ lost = before_sacred - after_sacred
439
+ if lost:
440
+ drift.sacred_damage = list(lost)
441
+ drift.drift_score += 0.2 * len(lost)
442
+
443
+ # Energy arc shift
444
+ if before.energy_arc and after.energy_arc:
445
+ min_len = min(len(before.energy_arc), len(after.energy_arc))
446
+ if min_len > 0:
447
+ diff = sum(
448
+ abs(before.energy_arc[i] - after.energy_arc[i])
449
+ for i in range(min_len)
450
+ ) / min_len
451
+ drift.energy_arc_shift = round(diff, 3)
452
+ drift.drift_score += diff * 0.2
453
+
454
+ drift.drift_score = min(1.0, round(drift.drift_score, 3))
455
+
456
+ # Recommendation
457
+ if drift.drift_score < 0.15:
458
+ drift.recommendation = "safe"
459
+ elif drift.drift_score < 0.4:
460
+ drift.recommendation = "caution"
461
+ else:
462
+ drift.recommendation = "rollback_suggested"
463
+
464
+ return drift
465
+
466
+
467
+ # ── Helpers ───────────────────────────────────────────────────────
468
+
469
+
470
+ def _compute_brain_id(session_info: dict, scenes: list[dict]) -> str:
471
+ """Deterministic brain ID from session state."""
472
+ seed = json.dumps({
473
+ "tempo": session_info.get("tempo"),
474
+ "track_count": session_info.get("track_count"),
475
+ "scene_count": len(scenes),
476
+ }, sort_keys=True)
477
+ return hashlib.sha256(seed.encode()).hexdigest()[:12]
@@ -0,0 +1,132 @@
1
+ """SongBrain data models — pure dataclasses, zero I/O.
2
+
3
+ SongBrain is the runtime object that captures the musical identity of the
4
+ current piece. It is distinct from project topology and from cross-session
5
+ user taste.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from dataclasses import asdict, dataclass, field
11
+ from typing import Optional
12
+
13
+
14
+ @dataclass
15
+ class SacredElement:
16
+ """A musical element that should not be casually damaged."""
17
+
18
+ element_type: str = "" # "motif", "texture", "groove", "progression", "timbre"
19
+ description: str = ""
20
+ location: str = "" # track/clip reference
21
+ salience: float = 0.0 # 0-1 how central to identity
22
+ confidence: float = 0.5
23
+
24
+ def to_dict(self) -> dict:
25
+ return asdict(self)
26
+
27
+
28
+ @dataclass
29
+ class SectionPurpose:
30
+ """What a section is trying to do emotionally."""
31
+
32
+ section_id: str = ""
33
+ label: str = "" # "intro", "verse", "chorus", "bridge", "breakdown", "outro"
34
+ emotional_intent: str = "" # "build tension", "release", "establish mood", etc.
35
+ energy_level: float = 0.5 # 0-1
36
+ is_payoff: bool = False # whether this section should feel like an arrival
37
+ confidence: float = 0.5
38
+
39
+ def to_dict(self) -> dict:
40
+ return asdict(self)
41
+
42
+
43
+ @dataclass
44
+ class OpenQuestion:
45
+ """An unresolved creative question about the song."""
46
+
47
+ question: str = ""
48
+ domain: str = "" # "arrangement", "mix", "harmony", "sound_design", "identity"
49
+ priority: float = 0.5 # 0-1
50
+
51
+ def to_dict(self) -> dict:
52
+ return asdict(self)
53
+
54
+
55
+ @dataclass
56
+ class SongBrain:
57
+ """The musical identity of the current piece.
58
+
59
+ Built from project brain, composition analysis, motif data, phrase
60
+ similarity, role graph, and recent accepted moves.
61
+ """
62
+
63
+ brain_id: str = ""
64
+
65
+ # Core identity
66
+ identity_core: str = "" # the strongest defining idea in the session
67
+ identity_confidence: float = 0.5 # 0-1
68
+
69
+ # Sacred elements
70
+ sacred_elements: list[SacredElement] = field(default_factory=list)
71
+
72
+ # Section purposes
73
+ section_purposes: list[SectionPurpose] = field(default_factory=list)
74
+
75
+ # Energy arc across sections (ordered floats 0-1)
76
+ energy_arc: list[float] = field(default_factory=list)
77
+
78
+ # Identity drift risk (0=stable, 1=drifting away from itself)
79
+ identity_drift_risk: float = 0.0
80
+
81
+ # Payoff targets — sections that should feel like arrival points
82
+ payoff_targets: list[str] = field(default_factory=list)
83
+
84
+ # Open questions the song has not resolved
85
+ open_questions: list[OpenQuestion] = field(default_factory=list)
86
+
87
+ # Metadata
88
+ built_from: dict = field(default_factory=dict) # what data sources contributed
89
+
90
+ def to_dict(self) -> dict:
91
+ return {
92
+ "brain_id": self.brain_id,
93
+ "identity_core": self.identity_core,
94
+ "identity_confidence": self.identity_confidence,
95
+ "sacred_elements": [e.to_dict() for e in self.sacred_elements],
96
+ "section_purposes": [s.to_dict() for s in self.section_purposes],
97
+ "energy_arc": self.energy_arc,
98
+ "identity_drift_risk": self.identity_drift_risk,
99
+ "payoff_targets": self.payoff_targets,
100
+ "open_questions": [q.to_dict() for q in self.open_questions],
101
+ "built_from": self.built_from,
102
+ }
103
+
104
+ @property
105
+ def summary(self) -> str:
106
+ """Human-readable one-line summary."""
107
+ parts = []
108
+ if self.identity_core:
109
+ parts.append(f"Identity: {self.identity_core}")
110
+ sacred_count = len(self.sacred_elements)
111
+ if sacred_count:
112
+ parts.append(f"{sacred_count} sacred element(s)")
113
+ section_count = len(self.section_purposes)
114
+ if section_count:
115
+ parts.append(f"{section_count} section(s)")
116
+ if self.identity_drift_risk > 0.3:
117
+ parts.append(f"drift risk {self.identity_drift_risk:.0%}")
118
+ return " | ".join(parts) if parts else "No identity established yet"
119
+
120
+
121
+ @dataclass
122
+ class IdentityDrift:
123
+ """Result of comparing two SongBrain snapshots."""
124
+
125
+ drift_score: float = 0.0 # 0=identical, 1=completely different
126
+ changed_elements: list[str] = field(default_factory=list)
127
+ sacred_damage: list[str] = field(default_factory=list) # sacred elements affected
128
+ energy_arc_shift: float = 0.0
129
+ recommendation: str = "" # "safe", "caution", "rollback_suggested"
130
+
131
+ def to_dict(self) -> dict:
132
+ return asdict(self)