livepilot 1.9.13 → 1.9.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +3 -3
- package/AGENTS.md +3 -3
- package/CHANGELOG.md +51 -0
- package/CONTRIBUTING.md +1 -1
- package/README.md +7 -7
- package/bin/livepilot.js +32 -8
- package/installer/install.js +21 -2
- package/livepilot/.Codex-plugin/plugin.json +2 -2
- package/livepilot/.claude-plugin/plugin.json +2 -2
- package/livepilot/agents/livepilot-producer/AGENT.md +243 -49
- package/livepilot/skills/livepilot-core/SKILL.md +81 -6
- package/livepilot/skills/livepilot-core/references/m4l-devices.md +2 -2
- package/livepilot/skills/livepilot-core/references/overview.md +3 -3
- package/livepilot/skills/livepilot-core/references/sound-design.md +3 -2
- package/livepilot/skills/livepilot-release/SKILL.md +13 -13
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +6 -3
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/curves.py +11 -3
- package/mcp_server/evaluation/__init__.py +1 -0
- package/mcp_server/evaluation/fabric.py +575 -0
- package/mcp_server/evaluation/feature_extractors.py +84 -0
- package/mcp_server/evaluation/policy.py +67 -0
- package/mcp_server/evaluation/tools.py +53 -0
- package/mcp_server/memory/__init__.py +11 -2
- package/mcp_server/memory/anti_memory.py +78 -0
- package/mcp_server/memory/promotion.py +94 -0
- package/mcp_server/memory/session_memory.py +108 -0
- package/mcp_server/memory/taste_memory.py +158 -0
- package/mcp_server/memory/technique_store.py +2 -1
- package/mcp_server/memory/tools.py +112 -0
- package/mcp_server/mix_engine/__init__.py +1 -0
- package/mcp_server/mix_engine/critics.py +299 -0
- package/mcp_server/mix_engine/models.py +152 -0
- package/mcp_server/mix_engine/planner.py +103 -0
- package/mcp_server/mix_engine/state_builder.py +316 -0
- package/mcp_server/mix_engine/tools.py +214 -0
- package/mcp_server/performance_engine/__init__.py +1 -0
- package/mcp_server/performance_engine/models.py +148 -0
- package/mcp_server/performance_engine/planner.py +267 -0
- package/mcp_server/performance_engine/safety.py +162 -0
- package/mcp_server/performance_engine/tools.py +183 -0
- package/mcp_server/project_brain/__init__.py +6 -0
- package/mcp_server/project_brain/arrangement_graph.py +64 -0
- package/mcp_server/project_brain/automation_graph.py +72 -0
- package/mcp_server/project_brain/builder.py +123 -0
- package/mcp_server/project_brain/capability_graph.py +64 -0
- package/mcp_server/project_brain/models.py +282 -0
- package/mcp_server/project_brain/refresh.py +80 -0
- package/mcp_server/project_brain/role_graph.py +103 -0
- package/mcp_server/project_brain/session_graph.py +51 -0
- package/mcp_server/project_brain/tools.py +144 -0
- package/mcp_server/reference_engine/__init__.py +1 -0
- package/mcp_server/reference_engine/gap_analyzer.py +239 -0
- package/mcp_server/reference_engine/models.py +105 -0
- package/mcp_server/reference_engine/profile_builder.py +149 -0
- package/mcp_server/reference_engine/tactic_router.py +117 -0
- package/mcp_server/reference_engine/tools.py +235 -0
- package/mcp_server/runtime/__init__.py +1 -0
- package/mcp_server/runtime/action_ledger.py +117 -0
- package/mcp_server/runtime/action_ledger_models.py +84 -0
- package/mcp_server/runtime/action_tools.py +57 -0
- package/mcp_server/runtime/capability_state.py +218 -0
- package/mcp_server/runtime/safety_kernel.py +339 -0
- package/mcp_server/runtime/safety_tools.py +42 -0
- package/mcp_server/runtime/tools.py +64 -0
- package/mcp_server/server.py +23 -1
- package/mcp_server/sound_design/__init__.py +1 -0
- package/mcp_server/sound_design/critics.py +297 -0
- package/mcp_server/sound_design/models.py +147 -0
- package/mcp_server/sound_design/planner.py +104 -0
- package/mcp_server/sound_design/tools.py +297 -0
- package/mcp_server/tools/_agent_os_engine.py +947 -0
- package/mcp_server/tools/_composition_engine.py +1530 -0
- package/mcp_server/tools/_conductor.py +199 -0
- package/mcp_server/tools/_conductor_budgets.py +222 -0
- package/mcp_server/tools/_evaluation_contracts.py +91 -0
- package/mcp_server/tools/_form_engine.py +416 -0
- package/mcp_server/tools/_motif_engine.py +351 -0
- package/mcp_server/tools/_planner_engine.py +516 -0
- package/mcp_server/tools/_research_engine.py +542 -0
- package/mcp_server/tools/_research_provider.py +185 -0
- package/mcp_server/tools/_snapshot_normalizer.py +49 -0
- package/mcp_server/tools/agent_os.py +440 -0
- package/mcp_server/tools/analyzer.py +18 -0
- package/mcp_server/tools/automation.py +25 -10
- package/mcp_server/tools/composition.py +563 -0
- package/mcp_server/tools/motif.py +104 -0
- package/mcp_server/tools/planner.py +144 -0
- package/mcp_server/tools/research.py +223 -0
- package/mcp_server/tools/tracks.py +18 -3
- package/mcp_server/tools/transport.py +10 -2
- package/mcp_server/transition_engine/__init__.py +6 -0
- package/mcp_server/transition_engine/archetypes.py +167 -0
- package/mcp_server/transition_engine/critics.py +340 -0
- package/mcp_server/transition_engine/models.py +90 -0
- package/mcp_server/transition_engine/tools.py +291 -0
- package/mcp_server/translation_engine/__init__.py +5 -0
- package/mcp_server/translation_engine/critics.py +297 -0
- package/mcp_server/translation_engine/models.py +27 -0
- package/mcp_server/translation_engine/tools.py +74 -0
- package/package.json +2 -2
- package/remote_script/LivePilot/__init__.py +1 -1
- package/remote_script/LivePilot/arrangement.py +12 -2
- package/requirements.txt +1 -1
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
"""Role graph builder — infers musical function per track per section.
|
|
2
|
+
|
|
3
|
+
Reuses _composition_engine.build_role_graph and infer_role_for_track
|
|
4
|
+
for the heavy inference, then converts composition RoleNodes to brain
|
|
5
|
+
RoleNodes and computes an overall confidence summary.
|
|
6
|
+
|
|
7
|
+
Pure computation, zero I/O.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
from ..tools._composition_engine import (
|
|
13
|
+
SectionNode as CESectionNode,
|
|
14
|
+
SectionType as CESectionType,
|
|
15
|
+
build_role_graph as _ce_build_role_graph,
|
|
16
|
+
)
|
|
17
|
+
from .models import ConfidenceInfo, RoleGraph, RoleNode
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def build_role_graph(
|
|
21
|
+
sections: list[dict],
|
|
22
|
+
track_data: list[dict],
|
|
23
|
+
notes_map: dict[str, dict[int, list[dict]]],
|
|
24
|
+
) -> RoleGraph:
|
|
25
|
+
"""Build a RoleGraph from brain sections, track data, and note data.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
sections: list of brain SectionNode.to_dict() or raw dicts with
|
|
29
|
+
section_id, start_bar, end_bar, section_type, energy, density.
|
|
30
|
+
track_data: [{index, name, devices: [{class_name, ...}]}].
|
|
31
|
+
notes_map: {section_id: {track_index: [notes]}}.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
RoleGraph with role assignments and overall confidence.
|
|
35
|
+
"""
|
|
36
|
+
graph = RoleGraph()
|
|
37
|
+
|
|
38
|
+
if not sections or not track_data:
|
|
39
|
+
return graph
|
|
40
|
+
|
|
41
|
+
# Convert brain section dicts to composition-engine SectionNodes
|
|
42
|
+
ce_sections = []
|
|
43
|
+
for sec in sections:
|
|
44
|
+
# Determine which tracks are active in this section
|
|
45
|
+
# If notes_map has data for this section, those tracks are active
|
|
46
|
+
section_id = sec.get("section_id", sec.get("id", ""))
|
|
47
|
+
active_tracks = []
|
|
48
|
+
section_notes = notes_map.get(section_id, {})
|
|
49
|
+
for t_idx, notes in section_notes.items():
|
|
50
|
+
if notes:
|
|
51
|
+
active_tracks.append(t_idx)
|
|
52
|
+
|
|
53
|
+
# Also include all tracks if no notes data (assume all active)
|
|
54
|
+
if not active_tracks and not notes_map:
|
|
55
|
+
active_tracks = [t.get("index", 0) for t in track_data]
|
|
56
|
+
|
|
57
|
+
try:
|
|
58
|
+
stype = CESectionType(sec.get("section_type", "unknown"))
|
|
59
|
+
except ValueError:
|
|
60
|
+
stype = CESectionType.UNKNOWN
|
|
61
|
+
|
|
62
|
+
ce_sections.append(CESectionNode(
|
|
63
|
+
section_id=section_id,
|
|
64
|
+
start_bar=sec.get("start_bar", 0),
|
|
65
|
+
end_bar=sec.get("end_bar", 0),
|
|
66
|
+
section_type=stype,
|
|
67
|
+
confidence=sec.get("confidence", 0.5),
|
|
68
|
+
energy=sec.get("energy", 0.0),
|
|
69
|
+
density=sec.get("density", 0.0),
|
|
70
|
+
tracks_active=active_tracks,
|
|
71
|
+
))
|
|
72
|
+
|
|
73
|
+
# Delegate to composition engine
|
|
74
|
+
ce_roles = _ce_build_role_graph(ce_sections, track_data, notes_map)
|
|
75
|
+
|
|
76
|
+
# Convert composition RoleNodes -> brain RoleNodes
|
|
77
|
+
low_confidence_nodes = []
|
|
78
|
+
confidence_sum = 0.0
|
|
79
|
+
|
|
80
|
+
for ce_role in ce_roles:
|
|
81
|
+
brain_role = RoleNode(
|
|
82
|
+
track_index=ce_role.track_index,
|
|
83
|
+
section_id=ce_role.section_id,
|
|
84
|
+
role=ce_role.role.value if hasattr(ce_role.role, "value") else str(ce_role.role),
|
|
85
|
+
confidence=ce_role.confidence,
|
|
86
|
+
foreground=ce_role.foreground,
|
|
87
|
+
)
|
|
88
|
+
graph.add_role(brain_role)
|
|
89
|
+
confidence_sum += ce_role.confidence
|
|
90
|
+
|
|
91
|
+
if ce_role.confidence < 0.5:
|
|
92
|
+
low_confidence_nodes.append(
|
|
93
|
+
f"t{ce_role.track_index}@{ce_role.section_id}"
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Compute overall confidence
|
|
97
|
+
overall = confidence_sum / max(len(ce_roles), 1)
|
|
98
|
+
graph.confidence = ConfidenceInfo(
|
|
99
|
+
overall=round(overall, 3),
|
|
100
|
+
low_confidence_nodes=low_confidence_nodes,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
return graph
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
"""Session graph builder — transforms raw get_session_info into SessionGraph.
|
|
2
|
+
|
|
3
|
+
Pure computation, zero I/O.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from .models import FreshnessInfo, SessionGraph, TrackNode
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def build_session_graph(session_info: dict) -> SessionGraph:
|
|
12
|
+
"""Build a SessionGraph from raw get_session_info output.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
session_info: dict from Ableton's get_session_info command.
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
Populated SessionGraph with freshness marked fresh (revision 0).
|
|
19
|
+
"""
|
|
20
|
+
graph = SessionGraph()
|
|
21
|
+
|
|
22
|
+
# Tracks
|
|
23
|
+
for raw in session_info.get("tracks", []):
|
|
24
|
+
track = TrackNode(
|
|
25
|
+
index=raw.get("index", 0),
|
|
26
|
+
name=raw.get("name", ""),
|
|
27
|
+
has_midi=raw.get("has_midi_input", False) or raw.get("has_midi", False),
|
|
28
|
+
has_audio=raw.get("has_audio_input", False) or raw.get("has_audio", False),
|
|
29
|
+
mute=raw.get("mute", False),
|
|
30
|
+
solo=raw.get("solo", False),
|
|
31
|
+
arm=raw.get("arm", False),
|
|
32
|
+
group_index=raw.get("group_track_index", None),
|
|
33
|
+
)
|
|
34
|
+
graph.add_track(track)
|
|
35
|
+
|
|
36
|
+
# Return tracks
|
|
37
|
+
graph.return_tracks = session_info.get("return_tracks", [])
|
|
38
|
+
|
|
39
|
+
# Scenes
|
|
40
|
+
graph.scenes = session_info.get("scenes", [])
|
|
41
|
+
|
|
42
|
+
# Tempo & time signature
|
|
43
|
+
graph.tempo = session_info.get("tempo", 120.0)
|
|
44
|
+
ts_num = session_info.get("time_signature_numerator", 4)
|
|
45
|
+
ts_den = session_info.get("time_signature_denominator", 4)
|
|
46
|
+
graph.time_signature = f"{ts_num}/{ts_den}"
|
|
47
|
+
|
|
48
|
+
# Mark fresh
|
|
49
|
+
graph.freshness.mark_fresh(revision=0)
|
|
50
|
+
|
|
51
|
+
return graph
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
"""Project Brain MCP tools — build and query the shared state substrate.
|
|
2
|
+
|
|
3
|
+
2 tools:
|
|
4
|
+
build_project_brain — full build from live Ableton session
|
|
5
|
+
get_project_brain_summary — lightweight summary without full rebuild
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from fastmcp import Context
|
|
11
|
+
|
|
12
|
+
from ..server import mcp
|
|
13
|
+
from .builder import build_project_state_from_data
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _get_ableton(ctx: Context):
|
|
17
|
+
return ctx.lifespan_context["ableton"]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@mcp.tool()
|
|
21
|
+
def build_project_brain(ctx: Context) -> dict:
|
|
22
|
+
"""Build a full Project Brain snapshot from the current Ableton session.
|
|
23
|
+
|
|
24
|
+
Gathers session info, scenes, clip matrix, track infos with device data,
|
|
25
|
+
builds all five subgraphs (session, arrangement, role, automation,
|
|
26
|
+
capability), and returns the canonical project state.
|
|
27
|
+
|
|
28
|
+
This is the primary entry point for engines that need a coherent view
|
|
29
|
+
of the project. Call once at session start, then use scoped refreshes.
|
|
30
|
+
"""
|
|
31
|
+
ableton = _get_ableton(ctx)
|
|
32
|
+
|
|
33
|
+
# 1. Get session info
|
|
34
|
+
session_info = ableton.send_command("get_session_info")
|
|
35
|
+
tracks = session_info.get("tracks", [])
|
|
36
|
+
|
|
37
|
+
# 2. Get scenes info
|
|
38
|
+
scenes = []
|
|
39
|
+
try:
|
|
40
|
+
scenes_resp = ableton.send_command("get_scenes_info")
|
|
41
|
+
scenes = scenes_resp.get("scenes", [])
|
|
42
|
+
except Exception:
|
|
43
|
+
scenes = session_info.get("scenes", [])
|
|
44
|
+
|
|
45
|
+
# 3. Get clip matrix (scene_matrix)
|
|
46
|
+
clip_matrix = []
|
|
47
|
+
try:
|
|
48
|
+
matrix_resp = ableton.send_command("get_scene_matrix")
|
|
49
|
+
clip_matrix = matrix_resp.get("matrix", [])
|
|
50
|
+
except Exception:
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
# 4. Gather per-track info with devices
|
|
54
|
+
track_infos = []
|
|
55
|
+
for track in tracks:
|
|
56
|
+
try:
|
|
57
|
+
info = ableton.send_command("get_track_info", {
|
|
58
|
+
"track_index": track["index"],
|
|
59
|
+
})
|
|
60
|
+
track_infos.append(info)
|
|
61
|
+
except Exception:
|
|
62
|
+
track_infos.append({
|
|
63
|
+
"index": track.get("index", 0),
|
|
64
|
+
"name": track.get("name", ""),
|
|
65
|
+
"devices": [],
|
|
66
|
+
})
|
|
67
|
+
|
|
68
|
+
# 5. Gather arrangement clips per track (legacy path)
|
|
69
|
+
arrangement_clips = {}
|
|
70
|
+
for track in tracks:
|
|
71
|
+
try:
|
|
72
|
+
arr = ableton.send_command("get_arrangement_clips", {
|
|
73
|
+
"track_index": track["index"],
|
|
74
|
+
})
|
|
75
|
+
clips = arr.get("clips", [])
|
|
76
|
+
if clips:
|
|
77
|
+
arrangement_clips[track["index"]] = clips
|
|
78
|
+
except Exception:
|
|
79
|
+
pass
|
|
80
|
+
|
|
81
|
+
# 6. Probe capabilities
|
|
82
|
+
analyzer_ok = False
|
|
83
|
+
analyzer_fresh = False
|
|
84
|
+
flucoma_ok = False
|
|
85
|
+
try:
|
|
86
|
+
# Check if M4L bridge is responding via spectral cache
|
|
87
|
+
bridge = ctx.lifespan_context.get("spectral_cache")
|
|
88
|
+
if bridge:
|
|
89
|
+
analyzer_ok = True
|
|
90
|
+
analyzer_fresh = not bridge.is_stale() if hasattr(bridge, "is_stale") else False
|
|
91
|
+
except Exception:
|
|
92
|
+
pass
|
|
93
|
+
|
|
94
|
+
try:
|
|
95
|
+
flucoma_resp = ableton.send_command("check_flucoma")
|
|
96
|
+
flucoma_ok = flucoma_resp.get("available", False)
|
|
97
|
+
except Exception:
|
|
98
|
+
pass
|
|
99
|
+
|
|
100
|
+
# 7. Build state
|
|
101
|
+
state = build_project_state_from_data(
|
|
102
|
+
session_info=session_info,
|
|
103
|
+
scenes=scenes if scenes and clip_matrix else None,
|
|
104
|
+
clip_matrix=clip_matrix if clip_matrix else None,
|
|
105
|
+
track_infos=track_infos if track_infos else None,
|
|
106
|
+
arrangement_clips=arrangement_clips if arrangement_clips else None,
|
|
107
|
+
analyzer_ok=analyzer_ok,
|
|
108
|
+
flucoma_ok=flucoma_ok,
|
|
109
|
+
session_ok=True,
|
|
110
|
+
analyzer_fresh=analyzer_fresh,
|
|
111
|
+
previous_revision=0,
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
return state.to_dict()
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
@mcp.tool()
|
|
118
|
+
def get_project_brain_summary(ctx: Context) -> dict:
|
|
119
|
+
"""Get a lightweight Project Brain summary — track count, section count, stale status.
|
|
120
|
+
|
|
121
|
+
Faster than build_project_brain when you just need an overview.
|
|
122
|
+
Builds session graph only, skips deep inference.
|
|
123
|
+
"""
|
|
124
|
+
ableton = _get_ableton(ctx)
|
|
125
|
+
session_info = ableton.send_command("get_session_info")
|
|
126
|
+
|
|
127
|
+
state = build_project_state_from_data(
|
|
128
|
+
session_info=session_info,
|
|
129
|
+
previous_revision=0,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
return {
|
|
133
|
+
"project_id": state.project_id,
|
|
134
|
+
"revision": state.revision,
|
|
135
|
+
"track_count": len(state.session_graph.tracks),
|
|
136
|
+
"return_track_count": len(state.session_graph.return_tracks),
|
|
137
|
+
"scene_count": len(state.session_graph.scenes),
|
|
138
|
+
"section_count": len(state.arrangement_graph.sections),
|
|
139
|
+
"role_count": len(state.role_graph.roles),
|
|
140
|
+
"automated_param_count": len(state.automation_graph.automated_params),
|
|
141
|
+
"tempo": state.session_graph.tempo,
|
|
142
|
+
"time_signature": state.session_graph.time_signature,
|
|
143
|
+
"is_stale": state.is_stale(),
|
|
144
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Reference Engine V1 — reference-aware production intelligence."""
|
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
"""Gap analyzer — compute and classify gaps between project and reference.
|
|
2
|
+
|
|
3
|
+
Pure functions, zero I/O.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import math
|
|
9
|
+
|
|
10
|
+
from .models import GapEntry, GapReport, ReferenceProfile
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# ── Domain thresholds ──────────────────────────────────────────────
|
|
14
|
+
|
|
15
|
+
# Minimum delta magnitude to consider a gap meaningful
|
|
16
|
+
_RELEVANCE_THRESHOLDS: dict[str, float] = {
|
|
17
|
+
"spectral": 0.01,
|
|
18
|
+
"loudness": 1.0, # 1 LU
|
|
19
|
+
"density": 0.1,
|
|
20
|
+
"width": 0.05,
|
|
21
|
+
"pacing": 0.15,
|
|
22
|
+
"harmonic": 0.0, # always relevant if different
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
# When a gap exceeds this fraction of the project value, closing it
|
|
26
|
+
# risks flattening identity.
|
|
27
|
+
_IDENTITY_WARNING_THRESHOLD = 0.6
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# ── Main analysis ──────────────────────────────────────────────────
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def analyze_gaps(
|
|
34
|
+
project_snapshot: dict,
|
|
35
|
+
reference: ReferenceProfile,
|
|
36
|
+
) -> GapReport:
|
|
37
|
+
"""Compare a project snapshot against a reference profile.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
project_snapshot: dict with keys matching ReferenceProfile fields:
|
|
41
|
+
loudness (float), spectral (dict with band_balance),
|
|
42
|
+
width (float), density (float or list), pacing (list),
|
|
43
|
+
harmonic_character (str).
|
|
44
|
+
reference: The target ReferenceProfile.
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
GapReport with all detected gaps.
|
|
48
|
+
"""
|
|
49
|
+
gaps: list[GapEntry] = []
|
|
50
|
+
ref_id = f"{reference.source_type}"
|
|
51
|
+
|
|
52
|
+
# 1. Loudness gap
|
|
53
|
+
proj_loudness = project_snapshot.get("loudness", 0.0)
|
|
54
|
+
if reference.loudness_posture != 0.0 or proj_loudness != 0.0:
|
|
55
|
+
delta = proj_loudness - reference.loudness_posture
|
|
56
|
+
gaps.append(GapEntry(
|
|
57
|
+
domain="loudness",
|
|
58
|
+
delta=round(delta, 2),
|
|
59
|
+
relevant=abs(delta) >= _RELEVANCE_THRESHOLDS["loudness"],
|
|
60
|
+
identity_warning=False,
|
|
61
|
+
suggested_tactic=_suggest_loudness_tactic(delta),
|
|
62
|
+
))
|
|
63
|
+
|
|
64
|
+
# 2. Spectral gaps (per-band)
|
|
65
|
+
proj_spectral = project_snapshot.get("spectral", {})
|
|
66
|
+
proj_bands = proj_spectral.get("band_balance", {})
|
|
67
|
+
ref_bands = reference.spectral_contour.get("band_balance", {})
|
|
68
|
+
|
|
69
|
+
all_bands = set(list(proj_bands.keys()) + list(ref_bands.keys()))
|
|
70
|
+
for band in sorted(all_bands):
|
|
71
|
+
proj_val = proj_bands.get(band, 0.0)
|
|
72
|
+
ref_val = ref_bands.get(band, 0.0)
|
|
73
|
+
delta = proj_val - ref_val
|
|
74
|
+
if abs(delta) >= _RELEVANCE_THRESHOLDS["spectral"]:
|
|
75
|
+
gaps.append(GapEntry(
|
|
76
|
+
domain="spectral",
|
|
77
|
+
delta=round(delta, 6),
|
|
78
|
+
relevant=True,
|
|
79
|
+
identity_warning=_is_identity_risk(proj_val, delta),
|
|
80
|
+
suggested_tactic=_suggest_spectral_tactic(band, delta),
|
|
81
|
+
))
|
|
82
|
+
|
|
83
|
+
# 3. Width gap
|
|
84
|
+
proj_width = project_snapshot.get("width", 0.0)
|
|
85
|
+
ref_width = reference.width_depth.get("stereo_width", 0.0)
|
|
86
|
+
if proj_width != 0.0 or ref_width != 0.0:
|
|
87
|
+
delta = proj_width - ref_width
|
|
88
|
+
gaps.append(GapEntry(
|
|
89
|
+
domain="width",
|
|
90
|
+
delta=round(delta, 4),
|
|
91
|
+
relevant=abs(delta) >= _RELEVANCE_THRESHOLDS["width"],
|
|
92
|
+
identity_warning=_is_identity_risk(proj_width, delta),
|
|
93
|
+
suggested_tactic=_suggest_width_tactic(delta),
|
|
94
|
+
))
|
|
95
|
+
|
|
96
|
+
# 4. Density gap
|
|
97
|
+
proj_density = project_snapshot.get("density", 0.0)
|
|
98
|
+
if isinstance(proj_density, list):
|
|
99
|
+
proj_density = sum(proj_density) / max(len(proj_density), 1)
|
|
100
|
+
ref_density = (
|
|
101
|
+
sum(reference.density_arc) / max(len(reference.density_arc), 1)
|
|
102
|
+
if reference.density_arc
|
|
103
|
+
else 0.0
|
|
104
|
+
)
|
|
105
|
+
if proj_density != 0.0 or ref_density != 0.0:
|
|
106
|
+
delta = proj_density - ref_density
|
|
107
|
+
gaps.append(GapEntry(
|
|
108
|
+
domain="density",
|
|
109
|
+
delta=round(delta, 3),
|
|
110
|
+
relevant=abs(delta) >= _RELEVANCE_THRESHOLDS["density"],
|
|
111
|
+
identity_warning=_is_identity_risk(proj_density, delta),
|
|
112
|
+
suggested_tactic=_suggest_density_tactic(delta),
|
|
113
|
+
))
|
|
114
|
+
|
|
115
|
+
# 5. Pacing gap
|
|
116
|
+
proj_pacing = project_snapshot.get("pacing", [])
|
|
117
|
+
ref_pacing = reference.section_pacing
|
|
118
|
+
if proj_pacing or ref_pacing:
|
|
119
|
+
delta = len(proj_pacing) - len(ref_pacing)
|
|
120
|
+
gaps.append(GapEntry(
|
|
121
|
+
domain="pacing",
|
|
122
|
+
delta=float(delta),
|
|
123
|
+
relevant=abs(delta) >= _RELEVANCE_THRESHOLDS["pacing"],
|
|
124
|
+
identity_warning=False,
|
|
125
|
+
suggested_tactic=_suggest_pacing_tactic(delta),
|
|
126
|
+
))
|
|
127
|
+
|
|
128
|
+
# 6. Harmonic gap
|
|
129
|
+
proj_harmonic = project_snapshot.get("harmonic_character", "")
|
|
130
|
+
ref_harmonic = reference.harmonic_character
|
|
131
|
+
if ref_harmonic and proj_harmonic != ref_harmonic:
|
|
132
|
+
gaps.append(GapEntry(
|
|
133
|
+
domain="harmonic",
|
|
134
|
+
delta=1.0 if proj_harmonic != ref_harmonic else 0.0,
|
|
135
|
+
relevant=True,
|
|
136
|
+
identity_warning=True, # harmonic identity is core
|
|
137
|
+
suggested_tactic=f"Consider {ref_harmonic} voicings",
|
|
138
|
+
))
|
|
139
|
+
|
|
140
|
+
# Compute overall distance
|
|
141
|
+
overall = _compute_overall_distance(gaps)
|
|
142
|
+
|
|
143
|
+
return GapReport(
|
|
144
|
+
reference_id=ref_id,
|
|
145
|
+
gaps=gaps,
|
|
146
|
+
overall_distance=round(overall, 3),
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
# ── Relevance classification ──────────────────────────────────────
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def classify_gap_relevance(
|
|
154
|
+
gap: GapEntry,
|
|
155
|
+
goal_dimensions: list[str],
|
|
156
|
+
) -> bool:
|
|
157
|
+
"""Reclassify a gap's relevance against the user's stated goal dimensions.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
gap: The GapEntry to classify.
|
|
161
|
+
goal_dimensions: list of domain names the user cares about
|
|
162
|
+
(e.g. ["spectral", "width"]).
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
True if the gap is relevant to the goal.
|
|
166
|
+
"""
|
|
167
|
+
if not goal_dimensions:
|
|
168
|
+
return gap.relevant # keep original classification
|
|
169
|
+
|
|
170
|
+
return gap.domain in goal_dimensions
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
# ── Identity warnings ──────────────────────────────────────────────
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def detect_identity_warnings(gaps: list[GapEntry]) -> list[str]:
|
|
177
|
+
"""Detect gaps where closing them would destroy project identity.
|
|
178
|
+
|
|
179
|
+
Returns human-readable warning strings.
|
|
180
|
+
"""
|
|
181
|
+
warnings: list[str] = []
|
|
182
|
+
for g in gaps:
|
|
183
|
+
if g.identity_warning:
|
|
184
|
+
warnings.append(
|
|
185
|
+
f"[{g.domain}] delta={g.delta:+.3f}: closing this gap "
|
|
186
|
+
f"risks flattening your project's unique {g.domain} character"
|
|
187
|
+
)
|
|
188
|
+
return warnings
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
# ── Internal helpers ──────────────────────────────────────────────
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def _is_identity_risk(project_value: float, delta: float) -> bool:
|
|
195
|
+
"""Check if the gap is large enough relative to project value
|
|
196
|
+
that closing it would significantly alter character."""
|
|
197
|
+
if abs(project_value) < 1e-9:
|
|
198
|
+
return False
|
|
199
|
+
return abs(delta / project_value) > _IDENTITY_WARNING_THRESHOLD
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def _compute_overall_distance(gaps: list[GapEntry]) -> float:
|
|
203
|
+
"""Euclidean-like distance across all relevant gap deltas."""
|
|
204
|
+
relevant = [g for g in gaps if g.relevant]
|
|
205
|
+
if not relevant:
|
|
206
|
+
return 0.0
|
|
207
|
+
sum_sq = sum(g.delta ** 2 for g in relevant)
|
|
208
|
+
return math.sqrt(sum_sq)
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def _suggest_loudness_tactic(delta: float) -> str:
|
|
212
|
+
if delta > 2.0:
|
|
213
|
+
return "Reduce master gain or limiter ceiling"
|
|
214
|
+
elif delta < -2.0:
|
|
215
|
+
return "Increase gain staging or limiter drive"
|
|
216
|
+
return "Loudness is close — fine-tune with limiter"
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def _suggest_spectral_tactic(band: str, delta: float) -> str:
|
|
220
|
+
direction = "cut" if delta > 0 else "boost"
|
|
221
|
+
return f"EQ {direction} in {band} range"
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def _suggest_width_tactic(delta: float) -> str:
|
|
225
|
+
if delta > 0:
|
|
226
|
+
return "Narrow stereo image — check Utility width or mono bass"
|
|
227
|
+
return "Widen stereo image — try chorus, haas, or panning spread"
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
def _suggest_density_tactic(delta: float) -> str:
|
|
231
|
+
if delta > 0:
|
|
232
|
+
return "Thin arrangement — mute or remove layers"
|
|
233
|
+
return "Add layers or textural elements for density"
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def _suggest_pacing_tactic(delta: float) -> str:
|
|
237
|
+
if delta > 0:
|
|
238
|
+
return "Consolidate sections — fewer, longer sections"
|
|
239
|
+
return "Add more section variety or transitions"
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"""Reference Engine state models — all dataclasses with to_dict().
|
|
2
|
+
|
|
3
|
+
Pure data structures for reference profiles, gap reports, and tactic plans.
|
|
4
|
+
Zero I/O.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from dataclasses import asdict, dataclass, field
|
|
10
|
+
from typing import Optional
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# ── Reference Profile ──────────────────────────────────────────────
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class ReferenceProfile:
|
|
18
|
+
"""Multi-dimensional snapshot of a reference target.
|
|
19
|
+
|
|
20
|
+
Built from audio analysis, style tactic data, user descriptions,
|
|
21
|
+
or project-internal section comparisons.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
source_type: str = "audio" # "audio", "style", "user_described", "internal_section"
|
|
25
|
+
loudness_posture: float = 0.0 # integrated LUFS
|
|
26
|
+
spectral_contour: dict = field(default_factory=dict) # band_balance + centroid
|
|
27
|
+
width_depth: dict = field(default_factory=dict) # stereo width, depth hints
|
|
28
|
+
density_arc: list[float] = field(default_factory=list) # per-section density 0-1
|
|
29
|
+
section_pacing: list[dict] = field(default_factory=list) # [{label, bars, energy}]
|
|
30
|
+
harmonic_character: str = "" # e.g. "minor_modal", "chromatic", "diatonic_major"
|
|
31
|
+
transition_tendencies: list[str] = field(default_factory=list) # gesture names
|
|
32
|
+
|
|
33
|
+
def to_dict(self) -> dict:
|
|
34
|
+
return asdict(self)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
# ── Gap Entry / Report ─────────────────────────────────────────────
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class GapEntry:
|
|
42
|
+
"""A single measured difference between project and reference."""
|
|
43
|
+
|
|
44
|
+
domain: str = "" # "spectral", "loudness", "density", "width", "pacing", "harmonic"
|
|
45
|
+
delta: float = 0.0 # signed difference (project - reference)
|
|
46
|
+
relevant: bool = True # whether the user's goal cares about this
|
|
47
|
+
identity_warning: bool = False # closing this gap risks flattening identity
|
|
48
|
+
suggested_tactic: str = "" # short tactic label
|
|
49
|
+
|
|
50
|
+
def to_dict(self) -> dict:
|
|
51
|
+
return asdict(self)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass
|
|
55
|
+
class GapReport:
|
|
56
|
+
"""All gaps between a project snapshot and a reference profile."""
|
|
57
|
+
|
|
58
|
+
reference_id: str = ""
|
|
59
|
+
gaps: list[GapEntry] = field(default_factory=list)
|
|
60
|
+
overall_distance: float = 0.0 # aggregate distance metric
|
|
61
|
+
|
|
62
|
+
@property
|
|
63
|
+
def relevant_gaps(self) -> list[GapEntry]:
|
|
64
|
+
"""Return only gaps marked as relevant to the user's goal."""
|
|
65
|
+
return [g for g in self.gaps if g.relevant]
|
|
66
|
+
|
|
67
|
+
@property
|
|
68
|
+
def identity_warnings(self) -> list[str]:
|
|
69
|
+
"""Return warning messages for gaps that threaten project identity."""
|
|
70
|
+
warnings: list[str] = []
|
|
71
|
+
for g in self.gaps:
|
|
72
|
+
if g.identity_warning:
|
|
73
|
+
warnings.append(
|
|
74
|
+
f"Closing the {g.domain} gap (delta={g.delta:+.2f}) "
|
|
75
|
+
f"may flatten your project's identity"
|
|
76
|
+
)
|
|
77
|
+
return warnings
|
|
78
|
+
|
|
79
|
+
def to_dict(self) -> dict:
|
|
80
|
+
return {
|
|
81
|
+
"reference_id": self.reference_id,
|
|
82
|
+
"gaps": [g.to_dict() for g in self.gaps],
|
|
83
|
+
"relevant_gaps": [g.to_dict() for g in self.relevant_gaps],
|
|
84
|
+
"identity_warnings": self.identity_warnings,
|
|
85
|
+
"overall_distance": self.overall_distance,
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
# ── Reference Plan ─────────────────────────────────────────────────
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
@dataclass
|
|
93
|
+
class ReferencePlan:
|
|
94
|
+
"""Actionable plan derived from a gap report."""
|
|
95
|
+
|
|
96
|
+
gap_report: GapReport = field(default_factory=GapReport)
|
|
97
|
+
ranked_tactics: list[dict] = field(default_factory=list)
|
|
98
|
+
target_engines: list[str] = field(default_factory=list)
|
|
99
|
+
|
|
100
|
+
def to_dict(self) -> dict:
|
|
101
|
+
return {
|
|
102
|
+
"gap_report": self.gap_report.to_dict(),
|
|
103
|
+
"ranked_tactics": list(self.ranked_tactics),
|
|
104
|
+
"target_engines": list(self.target_engines),
|
|
105
|
+
}
|