livepilot 1.9.21 → 1.9.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +3 -3
- package/.mcpbignore +40 -0
- package/AGENTS.md +2 -2
- package/CHANGELOG.md +47 -0
- package/CONTRIBUTING.md +1 -1
- package/README.md +47 -72
- package/bin/livepilot.js +135 -0
- package/livepilot/.Codex-plugin/plugin.json +2 -2
- package/livepilot/.claude-plugin/plugin.json +2 -2
- package/livepilot/agents/livepilot-producer/AGENT.md +13 -0
- package/livepilot/commands/arrange.md +42 -14
- package/livepilot/commands/beat.md +68 -21
- package/livepilot/commands/evaluate.md +23 -13
- package/livepilot/commands/mix.md +35 -11
- package/livepilot/commands/perform.md +31 -19
- package/livepilot/commands/sounddesign.md +38 -17
- package/livepilot/skills/livepilot-arrangement/SKILL.md +2 -1
- package/livepilot/skills/livepilot-composition-engine/references/transition-archetypes.md +2 -2
- package/livepilot/skills/livepilot-core/SKILL.md +60 -4
- package/livepilot/skills/livepilot-core/references/device-atlas/distortion-and-character.md +11 -11
- package/livepilot/skills/livepilot-core/references/device-atlas/drums-and-percussion.md +25 -25
- package/livepilot/skills/livepilot-core/references/device-atlas/dynamics-and-punch.md +21 -21
- package/livepilot/skills/livepilot-core/references/device-atlas/eq-and-filtering.md +13 -13
- package/livepilot/skills/livepilot-core/references/device-atlas/midi-tools.md +13 -13
- package/livepilot/skills/livepilot-core/references/device-atlas/movement-and-modulation.md +5 -5
- package/livepilot/skills/livepilot-core/references/device-atlas/space-and-depth.md +16 -16
- package/livepilot/skills/livepilot-core/references/device-atlas/spectral-and-weird.md +40 -40
- package/livepilot/skills/livepilot-core/references/m4l-devices.md +3 -3
- package/livepilot/skills/livepilot-core/references/overview.md +4 -4
- package/livepilot/skills/livepilot-evaluation/SKILL.md +12 -8
- package/livepilot/skills/livepilot-evaluation/references/memory-promotion.md +2 -2
- package/livepilot/skills/livepilot-mix-engine/SKILL.md +1 -1
- package/livepilot/skills/livepilot-mix-engine/references/mix-moves.md +2 -2
- package/livepilot/skills/livepilot-mixing/SKILL.md +3 -1
- package/livepilot/skills/livepilot-notes/SKILL.md +2 -1
- package/livepilot/skills/livepilot-release/SKILL.md +15 -15
- package/livepilot/skills/livepilot-sound-design-engine/SKILL.md +2 -2
- package/livepilot/skills/livepilot-wonder/SKILL.md +62 -0
- package/livepilot.mcpb +0 -0
- package/m4l_device/livepilot_bridge.js +1 -1
- package/manifest.json +91 -0
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/creative_constraints/__init__.py +6 -0
- package/mcp_server/creative_constraints/engine.py +277 -0
- package/mcp_server/creative_constraints/models.py +75 -0
- package/mcp_server/creative_constraints/tools.py +341 -0
- package/mcp_server/experiment/__init__.py +6 -0
- package/mcp_server/experiment/engine.py +213 -0
- package/mcp_server/experiment/models.py +120 -0
- package/mcp_server/experiment/tools.py +263 -0
- package/mcp_server/hook_hunter/__init__.py +5 -0
- package/mcp_server/hook_hunter/analyzer.py +342 -0
- package/mcp_server/hook_hunter/models.py +57 -0
- package/mcp_server/hook_hunter/tools.py +586 -0
- package/mcp_server/memory/taste_graph.py +261 -0
- package/mcp_server/memory/tools.py +88 -0
- package/mcp_server/mix_engine/critics.py +2 -2
- package/mcp_server/mix_engine/models.py +1 -1
- package/mcp_server/mix_engine/state_builder.py +2 -2
- package/mcp_server/musical_intelligence/__init__.py +8 -0
- package/mcp_server/musical_intelligence/detectors.py +421 -0
- package/mcp_server/musical_intelligence/phrase_critic.py +163 -0
- package/mcp_server/musical_intelligence/tools.py +221 -0
- package/mcp_server/preview_studio/__init__.py +5 -0
- package/mcp_server/preview_studio/engine.py +280 -0
- package/mcp_server/preview_studio/models.py +73 -0
- package/mcp_server/preview_studio/tools.py +423 -0
- package/mcp_server/runtime/session_kernel.py +96 -0
- package/mcp_server/runtime/tools.py +90 -1
- package/mcp_server/semantic_moves/__init__.py +13 -0
- package/mcp_server/semantic_moves/compiler.py +116 -0
- package/mcp_server/semantic_moves/mix_compilers.py +291 -0
- package/mcp_server/semantic_moves/mix_moves.py +157 -0
- package/mcp_server/semantic_moves/models.py +46 -0
- package/mcp_server/semantic_moves/performance_compilers.py +208 -0
- package/mcp_server/semantic_moves/performance_moves.py +81 -0
- package/mcp_server/semantic_moves/registry.py +32 -0
- package/mcp_server/semantic_moves/resolvers.py +126 -0
- package/mcp_server/semantic_moves/sound_design_compilers.py +266 -0
- package/mcp_server/semantic_moves/sound_design_moves.py +78 -0
- package/mcp_server/semantic_moves/tools.py +204 -0
- package/mcp_server/semantic_moves/transition_compilers.py +222 -0
- package/mcp_server/semantic_moves/transition_moves.py +76 -0
- package/mcp_server/server.py +10 -0
- package/mcp_server/session_continuity/__init__.py +6 -0
- package/mcp_server/session_continuity/models.py +86 -0
- package/mcp_server/session_continuity/tools.py +230 -0
- package/mcp_server/session_continuity/tracker.py +235 -0
- package/mcp_server/song_brain/__init__.py +6 -0
- package/mcp_server/song_brain/builder.py +477 -0
- package/mcp_server/song_brain/models.py +132 -0
- package/mcp_server/song_brain/tools.py +294 -0
- package/mcp_server/stuckness_detector/__init__.py +5 -0
- package/mcp_server/stuckness_detector/detector.py +400 -0
- package/mcp_server/stuckness_detector/models.py +66 -0
- package/mcp_server/stuckness_detector/tools.py +195 -0
- package/mcp_server/tools/_conductor.py +104 -6
- package/mcp_server/tools/analyzer.py +1 -1
- package/mcp_server/tools/devices.py +34 -0
- package/mcp_server/wonder_mode/__init__.py +6 -0
- package/mcp_server/wonder_mode/diagnosis.py +84 -0
- package/mcp_server/wonder_mode/engine.py +493 -0
- package/mcp_server/wonder_mode/session.py +114 -0
- package/mcp_server/wonder_mode/tools.py +285 -0
- package/package.json +2 -2
- package/remote_script/LivePilot/__init__.py +1 -1
- package/remote_script/LivePilot/browser.py +4 -1
- package/remote_script/LivePilot/devices.py +29 -0
- package/remote_script/LivePilot/tracks.py +11 -4
- package/scripts/generate_tool_catalog.py +131 -0
|
@@ -0,0 +1,421 @@
|
|
|
1
|
+
"""Musical intelligence detectors — pure computation, no I/O.
|
|
2
|
+
|
|
3
|
+
Each detector takes session data dicts and returns structured findings.
|
|
4
|
+
These feed into arrangement, transition, and diagnostic workflows.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from collections import Counter, defaultdict
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from typing import Optional
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
15
|
+
# Repetition Fatigue
|
|
16
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
17
|
+
|
|
18
|
+
@dataclass
|
|
19
|
+
class FatigueReport:
|
|
20
|
+
"""Report on repetition fatigue across the arrangement."""
|
|
21
|
+
fatigue_level: float = 0.0 # 0 = fresh, 1 = extremely fatigued
|
|
22
|
+
issues: list[dict] = field(default_factory=list)
|
|
23
|
+
section_staleness: dict[str, float] = field(default_factory=dict)
|
|
24
|
+
recommendations: list[str] = field(default_factory=list)
|
|
25
|
+
|
|
26
|
+
def to_dict(self) -> dict:
|
|
27
|
+
return {
|
|
28
|
+
"fatigue_level": round(self.fatigue_level, 3),
|
|
29
|
+
"issue_count": len(self.issues),
|
|
30
|
+
"issues": self.issues,
|
|
31
|
+
"section_staleness": self.section_staleness,
|
|
32
|
+
"recommendations": self.recommendations,
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def detect_repetition_fatigue(
|
|
37
|
+
scenes: list[dict],
|
|
38
|
+
motif_graph: Optional[dict] = None,
|
|
39
|
+
) -> FatigueReport:
|
|
40
|
+
"""Detect repetition fatigue from scene/clip data.
|
|
41
|
+
|
|
42
|
+
Analyzes:
|
|
43
|
+
- How many scenes share the same clips (pattern reuse)
|
|
44
|
+
- Motif overuse from motif_graph if available
|
|
45
|
+
- Density stability (everything at same level = fatiguing)
|
|
46
|
+
|
|
47
|
+
scenes: list of scene dicts with clip names per track
|
|
48
|
+
motif_graph: optional output from get_motif_graph
|
|
49
|
+
"""
|
|
50
|
+
report = FatigueReport()
|
|
51
|
+
|
|
52
|
+
if not scenes:
|
|
53
|
+
return report
|
|
54
|
+
|
|
55
|
+
# 1. Clip reuse across scenes
|
|
56
|
+
clip_usage = Counter()
|
|
57
|
+
for scene in scenes:
|
|
58
|
+
clips = scene.get("clips", [])
|
|
59
|
+
if isinstance(clips, list):
|
|
60
|
+
for clip in clips:
|
|
61
|
+
name = clip.get("name", "") if isinstance(clip, dict) else str(clip)
|
|
62
|
+
if name:
|
|
63
|
+
clip_usage[name] += 1
|
|
64
|
+
|
|
65
|
+
overused = {name: count for name, count in clip_usage.items() if count >= 3}
|
|
66
|
+
if overused:
|
|
67
|
+
report.issues.append({
|
|
68
|
+
"type": "clip_overuse",
|
|
69
|
+
"severity": min(0.8, len(overused) * 0.15),
|
|
70
|
+
"detail": f"{len(overused)} clip(s) used 3+ times",
|
|
71
|
+
"clips": dict(overused),
|
|
72
|
+
})
|
|
73
|
+
|
|
74
|
+
# 2. Scene similarity (how many scenes have identical clip sets)
|
|
75
|
+
scene_fingerprints = []
|
|
76
|
+
for scene in scenes:
|
|
77
|
+
clips = scene.get("clips", [])
|
|
78
|
+
names = sorted(
|
|
79
|
+
(c.get("name", "") if isinstance(c, dict) else str(c))
|
|
80
|
+
for c in (clips if isinstance(clips, list) else [])
|
|
81
|
+
if (c.get("name", "") if isinstance(c, dict) else str(c))
|
|
82
|
+
)
|
|
83
|
+
scene_fingerprints.append(tuple(names))
|
|
84
|
+
|
|
85
|
+
duplicate_scenes = sum(
|
|
86
|
+
1 for i, fp in enumerate(scene_fingerprints)
|
|
87
|
+
if fp and scene_fingerprints.index(fp) != i
|
|
88
|
+
)
|
|
89
|
+
if duplicate_scenes > 0:
|
|
90
|
+
report.issues.append({
|
|
91
|
+
"type": "duplicate_scenes",
|
|
92
|
+
"severity": min(0.7, duplicate_scenes * 0.2),
|
|
93
|
+
"detail": f"{duplicate_scenes} scene(s) are identical to earlier ones",
|
|
94
|
+
})
|
|
95
|
+
|
|
96
|
+
# 3. Motif fatigue from motif_graph
|
|
97
|
+
if motif_graph:
|
|
98
|
+
motifs = motif_graph.get("motifs", [])
|
|
99
|
+
for motif in motifs:
|
|
100
|
+
fatigue_risk = motif.get("fatigue_risk", 0)
|
|
101
|
+
if fatigue_risk > 0.6:
|
|
102
|
+
report.issues.append({
|
|
103
|
+
"type": "motif_overuse",
|
|
104
|
+
"severity": fatigue_risk,
|
|
105
|
+
"detail": f"Motif {motif.get('motif_id', '?')} fatigue risk {fatigue_risk:.2f}",
|
|
106
|
+
"motif_id": motif.get("motif_id"),
|
|
107
|
+
})
|
|
108
|
+
|
|
109
|
+
# 4. Section staleness (per named scene)
|
|
110
|
+
for i, scene in enumerate(scenes):
|
|
111
|
+
name = scene.get("name", f"Scene {i}")
|
|
112
|
+
if not name:
|
|
113
|
+
continue
|
|
114
|
+
clips = scene.get("clips", [])
|
|
115
|
+
clip_names = [
|
|
116
|
+
(c.get("name", "") if isinstance(c, dict) else "")
|
|
117
|
+
for c in (clips if isinstance(clips, list) else [])
|
|
118
|
+
]
|
|
119
|
+
reuse_count = sum(clip_usage.get(n, 0) for n in clip_names if n)
|
|
120
|
+
total = max(1, len([n for n in clip_names if n]))
|
|
121
|
+
staleness = min(1.0, (reuse_count / total - 1) * 0.3) if total else 0
|
|
122
|
+
report.section_staleness[name] = round(max(0, staleness), 3)
|
|
123
|
+
|
|
124
|
+
# Overall fatigue level
|
|
125
|
+
if report.issues:
|
|
126
|
+
report.fatigue_level = min(1.0, sum(i["severity"] for i in report.issues) / max(1, len(report.issues)))
|
|
127
|
+
|
|
128
|
+
# Recommendations
|
|
129
|
+
if report.fatigue_level > 0.5:
|
|
130
|
+
report.recommendations.append("Add variation clips to overused patterns")
|
|
131
|
+
report.recommendations.append("Use transform_motif (inversion, retrograde) to refresh stale melodic ideas")
|
|
132
|
+
if duplicate_scenes > 1:
|
|
133
|
+
report.recommendations.append("Create unique clip variations for duplicate scenes")
|
|
134
|
+
if report.fatigue_level > 0.3:
|
|
135
|
+
report.recommendations.append("Add perlin automation for organic movement within loops")
|
|
136
|
+
|
|
137
|
+
return report
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
141
|
+
# Role Conflict Detection
|
|
142
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
143
|
+
|
|
144
|
+
@dataclass
|
|
145
|
+
class RoleConflict:
|
|
146
|
+
"""A detected conflict where multiple tracks compete for the same musical role."""
|
|
147
|
+
role: str
|
|
148
|
+
tracks: list[dict] # [{index, name}]
|
|
149
|
+
severity: float = 0.0
|
|
150
|
+
recommendation: str = ""
|
|
151
|
+
|
|
152
|
+
def to_dict(self) -> dict:
|
|
153
|
+
return {
|
|
154
|
+
"role": self.role,
|
|
155
|
+
"tracks": self.tracks,
|
|
156
|
+
"severity": round(self.severity, 3),
|
|
157
|
+
"recommendation": self.recommendation,
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def detect_role_conflicts(
|
|
162
|
+
tracks: list[dict],
|
|
163
|
+
role_fn=None,
|
|
164
|
+
) -> list[RoleConflict]:
|
|
165
|
+
"""Detect tracks competing for the same musical role.
|
|
166
|
+
|
|
167
|
+
Roles that should be unique: sub_anchor (only 1 bass), foreground (only 1 lead),
|
|
168
|
+
transient_anchor (only 1 main drum track).
|
|
169
|
+
|
|
170
|
+
tracks: list of track dicts with at least 'name' and 'index'
|
|
171
|
+
role_fn: optional function(track_name) -> role_str
|
|
172
|
+
"""
|
|
173
|
+
if role_fn is None:
|
|
174
|
+
from ..semantic_moves.resolvers import infer_role
|
|
175
|
+
role_fn = infer_role
|
|
176
|
+
|
|
177
|
+
# Group tracks by role
|
|
178
|
+
role_groups: dict[str, list[dict]] = defaultdict(list)
|
|
179
|
+
for track in tracks:
|
|
180
|
+
name = track.get("name", "")
|
|
181
|
+
role = role_fn(name)
|
|
182
|
+
if role != "unknown":
|
|
183
|
+
role_groups[role].append({
|
|
184
|
+
"index": track.get("index", 0),
|
|
185
|
+
"name": name,
|
|
186
|
+
})
|
|
187
|
+
|
|
188
|
+
# Roles that should be unique (1 track only)
|
|
189
|
+
UNIQUE_ROLES = {
|
|
190
|
+
"bass": ("Sub/bass conflict — multiple bass tracks compete for the low end",
|
|
191
|
+
"Consider merging bass parts or using EQ to give each a distinct range"),
|
|
192
|
+
"lead": ("Lead conflict — multiple foreground melodies compete for attention",
|
|
193
|
+
"Mute one lead or use arrangement to alternate them across sections"),
|
|
194
|
+
"drums": ("Drum conflict — multiple drum tracks may mask each other's transients",
|
|
195
|
+
"Layer drum parts into one Drum Rack or pan them apart"),
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
conflicts = []
|
|
199
|
+
for role, (desc, rec) in UNIQUE_ROLES.items():
|
|
200
|
+
group = role_groups.get(role, [])
|
|
201
|
+
if len(group) > 1:
|
|
202
|
+
severity = min(0.9, 0.3 + (len(group) - 1) * 0.2)
|
|
203
|
+
conflicts.append(RoleConflict(
|
|
204
|
+
role=role,
|
|
205
|
+
tracks=group,
|
|
206
|
+
severity=severity,
|
|
207
|
+
recommendation=rec,
|
|
208
|
+
))
|
|
209
|
+
|
|
210
|
+
# Check for missing essential roles
|
|
211
|
+
essential = {"bass", "drums"}
|
|
212
|
+
for role in essential:
|
|
213
|
+
if role not in role_groups:
|
|
214
|
+
conflicts.append(RoleConflict(
|
|
215
|
+
role=role,
|
|
216
|
+
tracks=[],
|
|
217
|
+
severity=0.3,
|
|
218
|
+
recommendation=f"No {role} track detected — the mix may lack foundation",
|
|
219
|
+
))
|
|
220
|
+
|
|
221
|
+
return conflicts
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
225
|
+
# Section Purpose Inference
|
|
226
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
227
|
+
|
|
228
|
+
@dataclass
|
|
229
|
+
class SectionPurpose:
|
|
230
|
+
"""Inferred musical purpose of a section/scene."""
|
|
231
|
+
name: str
|
|
232
|
+
purpose: str # setup | tension | payoff | contrast | release | outro | unknown
|
|
233
|
+
energy: float = 0.0 # 0-1
|
|
234
|
+
density: float = 0.0 # 0-1 (how many tracks are active)
|
|
235
|
+
confidence: float = 0.5
|
|
236
|
+
|
|
237
|
+
def to_dict(self) -> dict:
|
|
238
|
+
return {
|
|
239
|
+
"name": self.name,
|
|
240
|
+
"purpose": self.purpose,
|
|
241
|
+
"energy": round(self.energy, 3),
|
|
242
|
+
"density": round(self.density, 3),
|
|
243
|
+
"confidence": round(self.confidence, 3),
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def infer_section_purposes(
|
|
248
|
+
scenes: list[dict],
|
|
249
|
+
total_tracks: int = 6,
|
|
250
|
+
) -> list[SectionPurpose]:
|
|
251
|
+
"""Infer the musical purpose of each scene based on density and position.
|
|
252
|
+
|
|
253
|
+
Uses heuristics:
|
|
254
|
+
- Low density at start → setup/intro
|
|
255
|
+
- Increasing density → tension/build
|
|
256
|
+
- Maximum density → payoff/drop
|
|
257
|
+
- Sudden density drop → contrast/breakdown
|
|
258
|
+
- Low density at end → release/outro
|
|
259
|
+
- Decreasing density → outro/dissolve
|
|
260
|
+
|
|
261
|
+
scenes: list of scene dicts with name and clip count
|
|
262
|
+
total_tracks: total track count for density calculation
|
|
263
|
+
"""
|
|
264
|
+
if not scenes:
|
|
265
|
+
return []
|
|
266
|
+
|
|
267
|
+
# Calculate density for each scene
|
|
268
|
+
densities = []
|
|
269
|
+
for scene in scenes:
|
|
270
|
+
clips = scene.get("clips", [])
|
|
271
|
+
if isinstance(clips, list):
|
|
272
|
+
active = sum(1 for c in clips
|
|
273
|
+
if isinstance(c, dict) and c.get("state") not in ("empty", None))
|
|
274
|
+
else:
|
|
275
|
+
active = 0
|
|
276
|
+
density = active / max(1, total_tracks)
|
|
277
|
+
densities.append(density)
|
|
278
|
+
|
|
279
|
+
results = []
|
|
280
|
+
n = len(scenes)
|
|
281
|
+
|
|
282
|
+
for i, scene in enumerate(scenes):
|
|
283
|
+
name = scene.get("name", f"Scene {i}")
|
|
284
|
+
density = densities[i]
|
|
285
|
+
position = i / max(1, n - 1) # 0 = first, 1 = last
|
|
286
|
+
|
|
287
|
+
# Density change from previous
|
|
288
|
+
prev_density = densities[i - 1] if i > 0 else 0
|
|
289
|
+
density_delta = density - prev_density
|
|
290
|
+
|
|
291
|
+
# Infer purpose
|
|
292
|
+
if position < 0.15 and density < 0.5:
|
|
293
|
+
purpose = "setup"
|
|
294
|
+
confidence = 0.7
|
|
295
|
+
elif density_delta > 0.2:
|
|
296
|
+
purpose = "tension"
|
|
297
|
+
confidence = 0.6
|
|
298
|
+
elif density >= 0.8:
|
|
299
|
+
purpose = "payoff"
|
|
300
|
+
confidence = 0.65
|
|
301
|
+
elif density_delta < -0.3:
|
|
302
|
+
purpose = "contrast"
|
|
303
|
+
confidence = 0.6
|
|
304
|
+
elif position > 0.8 and density < 0.5:
|
|
305
|
+
purpose = "release"
|
|
306
|
+
confidence = 0.65
|
|
307
|
+
elif position > 0.85 and density_delta < 0:
|
|
308
|
+
purpose = "outro"
|
|
309
|
+
confidence = 0.6
|
|
310
|
+
else:
|
|
311
|
+
purpose = "development"
|
|
312
|
+
confidence = 0.4
|
|
313
|
+
|
|
314
|
+
results.append(SectionPurpose(
|
|
315
|
+
name=name,
|
|
316
|
+
purpose=purpose,
|
|
317
|
+
energy=density,
|
|
318
|
+
density=density,
|
|
319
|
+
confidence=confidence,
|
|
320
|
+
))
|
|
321
|
+
|
|
322
|
+
return results
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
326
|
+
# Emotional Arc Scoring
|
|
327
|
+
# ═══════════════════════════════════════════════════════════════════════
|
|
328
|
+
|
|
329
|
+
@dataclass
|
|
330
|
+
class ArcScore:
|
|
331
|
+
"""Score for the overall emotional arc of the arrangement."""
|
|
332
|
+
arc_clarity: float = 0.0 # How clear is the build → climax → resolve?
|
|
333
|
+
contrast: float = 0.0 # How different are sections from each other?
|
|
334
|
+
payoff_strength: float = 0.0 # Does the climax feel earned?
|
|
335
|
+
resolution: float = 0.0 # Does the ending resolve tension?
|
|
336
|
+
issues: list[str] = field(default_factory=list)
|
|
337
|
+
|
|
338
|
+
@property
|
|
339
|
+
def overall(self) -> float:
|
|
340
|
+
return round(
|
|
341
|
+
(self.arc_clarity + self.contrast + self.payoff_strength + self.resolution) / 4, 3
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
def to_dict(self) -> dict:
|
|
345
|
+
return {
|
|
346
|
+
"overall": self.overall,
|
|
347
|
+
"arc_clarity": round(self.arc_clarity, 3),
|
|
348
|
+
"contrast": round(self.contrast, 3),
|
|
349
|
+
"payoff_strength": round(self.payoff_strength, 3),
|
|
350
|
+
"resolution": round(self.resolution, 3),
|
|
351
|
+
"issues": self.issues,
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
|
|
355
|
+
def score_emotional_arc(sections: list[SectionPurpose]) -> ArcScore:
|
|
356
|
+
"""Score the emotional arc from inferred section purposes.
|
|
357
|
+
|
|
358
|
+
Checks for:
|
|
359
|
+
- Build before payoff (tension should precede climax)
|
|
360
|
+
- Variety of purposes (not all the same energy level)
|
|
361
|
+
- Resolution at the end (shouldn't end at peak tension)
|
|
362
|
+
- Clear climax point (should have at least one payoff section)
|
|
363
|
+
"""
|
|
364
|
+
score = ArcScore()
|
|
365
|
+
|
|
366
|
+
if not sections:
|
|
367
|
+
score.issues.append("No sections to analyze")
|
|
368
|
+
return score
|
|
369
|
+
|
|
370
|
+
purposes = [s.purpose for s in sections]
|
|
371
|
+
energies = [s.energy for s in sections]
|
|
372
|
+
|
|
373
|
+
# Arc clarity: do we have a clear build → peak → resolve shape?
|
|
374
|
+
has_setup = "setup" in purposes
|
|
375
|
+
has_tension = "tension" in purposes
|
|
376
|
+
has_payoff = "payoff" in purposes
|
|
377
|
+
has_release = "release" in purposes or "outro" in purposes
|
|
378
|
+
|
|
379
|
+
clarity_points = sum([has_setup, has_tension, has_payoff, has_release])
|
|
380
|
+
score.arc_clarity = clarity_points / 4
|
|
381
|
+
|
|
382
|
+
if not has_payoff:
|
|
383
|
+
score.issues.append("No clear climax/payoff section")
|
|
384
|
+
if not has_setup and not has_tension:
|
|
385
|
+
score.issues.append("No build — payoff arrives without anticipation")
|
|
386
|
+
|
|
387
|
+
# Contrast: how different are sections?
|
|
388
|
+
if len(energies) >= 2:
|
|
389
|
+
energy_range = max(energies) - min(energies)
|
|
390
|
+
score.contrast = min(1.0, energy_range * 1.5)
|
|
391
|
+
if energy_range < 0.2:
|
|
392
|
+
score.issues.append("Low contrast — sections are too similar in energy")
|
|
393
|
+
else:
|
|
394
|
+
score.contrast = 0.0
|
|
395
|
+
|
|
396
|
+
# Payoff strength: does tension precede the peak?
|
|
397
|
+
if has_payoff:
|
|
398
|
+
payoff_idx = purposes.index("payoff")
|
|
399
|
+
if payoff_idx > 0 and sections[payoff_idx - 1].energy < sections[payoff_idx].energy:
|
|
400
|
+
score.payoff_strength = 0.8
|
|
401
|
+
else:
|
|
402
|
+
score.payoff_strength = 0.4
|
|
403
|
+
score.issues.append("Payoff doesn't feel earned — no energy build before it")
|
|
404
|
+
else:
|
|
405
|
+
score.payoff_strength = 0.0
|
|
406
|
+
|
|
407
|
+
# Resolution: does energy decrease at the end?
|
|
408
|
+
if len(energies) >= 3:
|
|
409
|
+
final_energy = energies[-1]
|
|
410
|
+
peak_energy = max(energies)
|
|
411
|
+
if final_energy < peak_energy * 0.7:
|
|
412
|
+
score.resolution = 0.8
|
|
413
|
+
elif final_energy < peak_energy:
|
|
414
|
+
score.resolution = 0.5
|
|
415
|
+
else:
|
|
416
|
+
score.resolution = 0.2
|
|
417
|
+
score.issues.append("No resolution — ending at or near peak energy")
|
|
418
|
+
else:
|
|
419
|
+
score.resolution = 0.3
|
|
420
|
+
|
|
421
|
+
return score
|
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
"""Phrase-level evaluation — judges musical phrases, not just parameter deltas.
|
|
2
|
+
|
|
3
|
+
Operates on 8-16 bar windows. Analyzes arc clarity, contrast, fatigue risk,
|
|
4
|
+
payoff strength, and translation risk from audio captures and spectral data.
|
|
5
|
+
|
|
6
|
+
Pure computation — receives analysis data, returns structured critique.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from dataclasses import dataclass, field
|
|
12
|
+
from typing import Optional
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class PhraseCritique:
|
|
17
|
+
"""Evaluation of a rendered musical phrase."""
|
|
18
|
+
render_id: str = ""
|
|
19
|
+
arc_clarity: float = 0.0 # How clear is the phrase's tension shape?
|
|
20
|
+
contrast: float = 0.0 # How different are the beginning and end?
|
|
21
|
+
fatigue_risk: float = 0.0 # How repetitive is the material?
|
|
22
|
+
payoff_strength: float = 0.0 # Does the phrase deliver on its promise?
|
|
23
|
+
identity_strength: float = 0.0 # How distinct is this from other phrases?
|
|
24
|
+
translation_risk: float = 0.0 # How likely to sound bad on small speakers?
|
|
25
|
+
notes: list[str] = field(default_factory=list)
|
|
26
|
+
|
|
27
|
+
@property
|
|
28
|
+
def overall(self) -> float:
|
|
29
|
+
scores = [
|
|
30
|
+
self.arc_clarity,
|
|
31
|
+
self.contrast,
|
|
32
|
+
1.0 - self.fatigue_risk,
|
|
33
|
+
self.payoff_strength,
|
|
34
|
+
self.identity_strength,
|
|
35
|
+
1.0 - self.translation_risk,
|
|
36
|
+
]
|
|
37
|
+
return round(sum(scores) / len(scores), 3)
|
|
38
|
+
|
|
39
|
+
def to_dict(self) -> dict:
|
|
40
|
+
return {
|
|
41
|
+
"render_id": self.render_id,
|
|
42
|
+
"overall": self.overall,
|
|
43
|
+
"arc_clarity": round(self.arc_clarity, 3),
|
|
44
|
+
"contrast": round(self.contrast, 3),
|
|
45
|
+
"fatigue_risk": round(self.fatigue_risk, 3),
|
|
46
|
+
"payoff_strength": round(self.payoff_strength, 3),
|
|
47
|
+
"identity_strength": round(self.identity_strength, 3),
|
|
48
|
+
"translation_risk": round(self.translation_risk, 3),
|
|
49
|
+
"notes": self.notes,
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def analyze_phrase(
|
|
54
|
+
loudness_data: Optional[dict] = None,
|
|
55
|
+
spectrum_data: Optional[dict] = None,
|
|
56
|
+
target: str = "loop",
|
|
57
|
+
) -> PhraseCritique:
|
|
58
|
+
"""Analyze a captured phrase from loudness and spectral data.
|
|
59
|
+
|
|
60
|
+
loudness_data: output from analyze_loudness (LUFS, LRA, peak, short_term_lufs)
|
|
61
|
+
spectrum_data: output from analyze_spectrum_offline (centroid, rolloff, balance)
|
|
62
|
+
target: what the phrase is supposed to be: "loop", "drop", "chorus", "transition", "intro", "outro"
|
|
63
|
+
"""
|
|
64
|
+
critique = PhraseCritique()
|
|
65
|
+
|
|
66
|
+
if not loudness_data and not spectrum_data:
|
|
67
|
+
critique.notes.append("No analysis data — capture audio first")
|
|
68
|
+
return critique
|
|
69
|
+
|
|
70
|
+
# Arc clarity from short-term LUFS variation
|
|
71
|
+
if loudness_data:
|
|
72
|
+
stl = loudness_data.get("short_term_lufs", [])
|
|
73
|
+
if len(stl) >= 3:
|
|
74
|
+
lufs_range = max(stl) - min(stl)
|
|
75
|
+
# Good arc = variation between 2-8 LU
|
|
76
|
+
if 2 <= lufs_range <= 8:
|
|
77
|
+
critique.arc_clarity = 0.8
|
|
78
|
+
elif lufs_range > 8:
|
|
79
|
+
critique.arc_clarity = 0.5
|
|
80
|
+
critique.notes.append("Loudness variation too extreme — may feel chaotic")
|
|
81
|
+
else:
|
|
82
|
+
critique.arc_clarity = 0.3 + lufs_range * 0.1
|
|
83
|
+
if lufs_range < 1:
|
|
84
|
+
critique.notes.append("Very flat dynamics — phrase sounds static")
|
|
85
|
+
|
|
86
|
+
# Fatigue risk from LRA
|
|
87
|
+
lra = loudness_data.get("lra_lu", 0)
|
|
88
|
+
if lra < 1:
|
|
89
|
+
critique.fatigue_risk = 0.8
|
|
90
|
+
critique.notes.append(f"LRA {lra:.1f} LU — extremely repetitive")
|
|
91
|
+
elif lra < 3:
|
|
92
|
+
critique.fatigue_risk = 0.5
|
|
93
|
+
else:
|
|
94
|
+
critique.fatigue_risk = max(0, 0.3 - lra * 0.03)
|
|
95
|
+
|
|
96
|
+
# Translation risk from true peak
|
|
97
|
+
peak = loudness_data.get("true_peak_dbtp", 0)
|
|
98
|
+
if peak > -1:
|
|
99
|
+
critique.translation_risk = 0.7
|
|
100
|
+
critique.notes.append(f"True peak {peak:.1f} dBTP — clipping risk on playback")
|
|
101
|
+
elif peak > -3:
|
|
102
|
+
critique.translation_risk = 0.3
|
|
103
|
+
else:
|
|
104
|
+
critique.translation_risk = 0.1
|
|
105
|
+
|
|
106
|
+
# Spectral analysis
|
|
107
|
+
if spectrum_data:
|
|
108
|
+
balance = spectrum_data.get("band_balance", {})
|
|
109
|
+
sub = balance.get("sub_60hz", 0)
|
|
110
|
+
mid = balance.get("mid_2khz", 0)
|
|
111
|
+
high = balance.get("high_8khz", 0)
|
|
112
|
+
|
|
113
|
+
# Identity strength: how distinctive is the spectral shape?
|
|
114
|
+
if sub > 0.5:
|
|
115
|
+
critique.identity_strength = 0.6
|
|
116
|
+
critique.notes.append("Sub-heavy identity — bass-driven phrase")
|
|
117
|
+
elif mid > 0.5:
|
|
118
|
+
critique.identity_strength = 0.7
|
|
119
|
+
critique.notes.append("Mid-focused — melodic/harmonic identity")
|
|
120
|
+
elif high > 0.3:
|
|
121
|
+
critique.identity_strength = 0.5
|
|
122
|
+
critique.notes.append("Bright character — texture-driven")
|
|
123
|
+
else:
|
|
124
|
+
critique.identity_strength = 0.4
|
|
125
|
+
|
|
126
|
+
# Contrast from centroid
|
|
127
|
+
centroid = spectrum_data.get("centroid_hz", 500)
|
|
128
|
+
if centroid < 200:
|
|
129
|
+
critique.contrast = 0.3
|
|
130
|
+
critique.notes.append("Very dark — limited spectral contrast")
|
|
131
|
+
elif centroid > 2000:
|
|
132
|
+
critique.contrast = 0.6
|
|
133
|
+
else:
|
|
134
|
+
critique.contrast = 0.5
|
|
135
|
+
|
|
136
|
+
# Payoff strength depends on target type
|
|
137
|
+
_payoff_targets = {
|
|
138
|
+
"drop": 0.8, # Drops need high payoff
|
|
139
|
+
"chorus": 0.7, # Choruses need good payoff
|
|
140
|
+
"loop": 0.5, # Loops are neutral
|
|
141
|
+
"transition": 0.4,
|
|
142
|
+
"intro": 0.3,
|
|
143
|
+
"outro": 0.3,
|
|
144
|
+
}
|
|
145
|
+
critique.payoff_strength = _payoff_targets.get(target, 0.5)
|
|
146
|
+
|
|
147
|
+
return critique
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def compare_phrases(critiques: list[PhraseCritique]) -> list[dict]:
|
|
151
|
+
"""Rank multiple phrase critiques by overall score."""
|
|
152
|
+
ranked = sorted(critiques, key=lambda c: -c.overall)
|
|
153
|
+
return [
|
|
154
|
+
{
|
|
155
|
+
"rank": i + 1,
|
|
156
|
+
"render_id": c.render_id,
|
|
157
|
+
"overall": c.overall,
|
|
158
|
+
"arc_clarity": c.arc_clarity,
|
|
159
|
+
"fatigue_risk": c.fatigue_risk,
|
|
160
|
+
"notes": c.notes[:3],
|
|
161
|
+
}
|
|
162
|
+
for i, c in enumerate(ranked)
|
|
163
|
+
]
|