livepilot 1.9.13 β 1.9.15
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +3 -3
- package/AGENTS.md +3 -3
- package/CHANGELOG.md +51 -0
- package/CONTRIBUTING.md +1 -1
- package/README.md +7 -7
- package/bin/livepilot.js +32 -8
- package/installer/install.js +21 -2
- package/livepilot/.Codex-plugin/plugin.json +2 -2
- package/livepilot/.claude-plugin/plugin.json +2 -2
- package/livepilot/agents/livepilot-producer/AGENT.md +243 -49
- package/livepilot/skills/livepilot-core/SKILL.md +81 -6
- package/livepilot/skills/livepilot-core/references/m4l-devices.md +2 -2
- package/livepilot/skills/livepilot-core/references/overview.md +3 -3
- package/livepilot/skills/livepilot-core/references/sound-design.md +3 -2
- package/livepilot/skills/livepilot-release/SKILL.md +13 -13
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +6 -3
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/curves.py +11 -3
- package/mcp_server/evaluation/__init__.py +1 -0
- package/mcp_server/evaluation/fabric.py +575 -0
- package/mcp_server/evaluation/feature_extractors.py +84 -0
- package/mcp_server/evaluation/policy.py +67 -0
- package/mcp_server/evaluation/tools.py +53 -0
- package/mcp_server/memory/__init__.py +11 -2
- package/mcp_server/memory/anti_memory.py +78 -0
- package/mcp_server/memory/promotion.py +94 -0
- package/mcp_server/memory/session_memory.py +108 -0
- package/mcp_server/memory/taste_memory.py +158 -0
- package/mcp_server/memory/technique_store.py +2 -1
- package/mcp_server/memory/tools.py +112 -0
- package/mcp_server/mix_engine/__init__.py +1 -0
- package/mcp_server/mix_engine/critics.py +299 -0
- package/mcp_server/mix_engine/models.py +152 -0
- package/mcp_server/mix_engine/planner.py +103 -0
- package/mcp_server/mix_engine/state_builder.py +316 -0
- package/mcp_server/mix_engine/tools.py +214 -0
- package/mcp_server/performance_engine/__init__.py +1 -0
- package/mcp_server/performance_engine/models.py +148 -0
- package/mcp_server/performance_engine/planner.py +267 -0
- package/mcp_server/performance_engine/safety.py +162 -0
- package/mcp_server/performance_engine/tools.py +183 -0
- package/mcp_server/project_brain/__init__.py +6 -0
- package/mcp_server/project_brain/arrangement_graph.py +64 -0
- package/mcp_server/project_brain/automation_graph.py +72 -0
- package/mcp_server/project_brain/builder.py +123 -0
- package/mcp_server/project_brain/capability_graph.py +64 -0
- package/mcp_server/project_brain/models.py +282 -0
- package/mcp_server/project_brain/refresh.py +80 -0
- package/mcp_server/project_brain/role_graph.py +103 -0
- package/mcp_server/project_brain/session_graph.py +51 -0
- package/mcp_server/project_brain/tools.py +144 -0
- package/mcp_server/reference_engine/__init__.py +1 -0
- package/mcp_server/reference_engine/gap_analyzer.py +239 -0
- package/mcp_server/reference_engine/models.py +105 -0
- package/mcp_server/reference_engine/profile_builder.py +149 -0
- package/mcp_server/reference_engine/tactic_router.py +117 -0
- package/mcp_server/reference_engine/tools.py +235 -0
- package/mcp_server/runtime/__init__.py +1 -0
- package/mcp_server/runtime/action_ledger.py +117 -0
- package/mcp_server/runtime/action_ledger_models.py +84 -0
- package/mcp_server/runtime/action_tools.py +57 -0
- package/mcp_server/runtime/capability_state.py +218 -0
- package/mcp_server/runtime/safety_kernel.py +339 -0
- package/mcp_server/runtime/safety_tools.py +42 -0
- package/mcp_server/runtime/tools.py +64 -0
- package/mcp_server/server.py +23 -1
- package/mcp_server/sound_design/__init__.py +1 -0
- package/mcp_server/sound_design/critics.py +297 -0
- package/mcp_server/sound_design/models.py +147 -0
- package/mcp_server/sound_design/planner.py +104 -0
- package/mcp_server/sound_design/tools.py +297 -0
- package/mcp_server/tools/_agent_os_engine.py +947 -0
- package/mcp_server/tools/_composition_engine.py +1530 -0
- package/mcp_server/tools/_conductor.py +199 -0
- package/mcp_server/tools/_conductor_budgets.py +222 -0
- package/mcp_server/tools/_evaluation_contracts.py +91 -0
- package/mcp_server/tools/_form_engine.py +416 -0
- package/mcp_server/tools/_motif_engine.py +351 -0
- package/mcp_server/tools/_planner_engine.py +516 -0
- package/mcp_server/tools/_research_engine.py +542 -0
- package/mcp_server/tools/_research_provider.py +185 -0
- package/mcp_server/tools/_snapshot_normalizer.py +49 -0
- package/mcp_server/tools/agent_os.py +440 -0
- package/mcp_server/tools/analyzer.py +18 -0
- package/mcp_server/tools/automation.py +25 -10
- package/mcp_server/tools/composition.py +563 -0
- package/mcp_server/tools/motif.py +104 -0
- package/mcp_server/tools/planner.py +144 -0
- package/mcp_server/tools/research.py +223 -0
- package/mcp_server/tools/tracks.py +18 -3
- package/mcp_server/tools/transport.py +10 -2
- package/mcp_server/transition_engine/__init__.py +6 -0
- package/mcp_server/transition_engine/archetypes.py +167 -0
- package/mcp_server/transition_engine/critics.py +340 -0
- package/mcp_server/transition_engine/models.py +90 -0
- package/mcp_server/transition_engine/tools.py +291 -0
- package/mcp_server/translation_engine/__init__.py +5 -0
- package/mcp_server/translation_engine/critics.py +297 -0
- package/mcp_server/translation_engine/models.py +27 -0
- package/mcp_server/translation_engine/tools.py +74 -0
- package/package.json +2 -2
- package/remote_script/LivePilot/__init__.py +1 -1
- package/remote_script/LivePilot/arrangement.py +12 -2
- package/requirements.txt +1 -1
|
@@ -0,0 +1,291 @@
|
|
|
1
|
+
"""Transition Engine MCP tools β 3 tools for boundary analysis and planning.
|
|
2
|
+
|
|
3
|
+
Each tool fetches section data from Ableton via the shared connection,
|
|
4
|
+
builds TransitionBoundary objects, then delegates to pure-computation modules.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from fastmcp import Context
|
|
10
|
+
|
|
11
|
+
from ..server import mcp
|
|
12
|
+
from ..tools import _composition_engine as comp_engine
|
|
13
|
+
|
|
14
|
+
from .archetypes import TRANSITION_ARCHETYPES, select_archetype
|
|
15
|
+
from .critics import run_all_transition_critics
|
|
16
|
+
from .models import TransitionBoundary, TransitionPlan, TransitionScore
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# ββ Helpers βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _build_sections_from_ableton(ctx: Context) -> list[comp_engine.SectionNode]:
|
|
23
|
+
"""Fetch session data and build section graph."""
|
|
24
|
+
ableton = ctx.lifespan_context["ableton"]
|
|
25
|
+
session = ableton.send_command("get_session_info")
|
|
26
|
+
scenes = session.get("scenes", [])
|
|
27
|
+
track_count = session.get("track_count", 0)
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
matrix_data = ableton.send_command("get_scene_matrix")
|
|
31
|
+
clip_matrix = matrix_data.get("matrix", [])
|
|
32
|
+
except Exception:
|
|
33
|
+
clip_matrix = [[] for _ in range(len(scenes))]
|
|
34
|
+
|
|
35
|
+
return comp_engine.build_section_graph_from_scenes(
|
|
36
|
+
scenes, clip_matrix, track_count,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _find_section_pair(
|
|
41
|
+
sections: list[comp_engine.SectionNode],
|
|
42
|
+
from_section: str,
|
|
43
|
+
to_section: str,
|
|
44
|
+
) -> tuple[comp_engine.SectionNode | None, comp_engine.SectionNode | None]:
|
|
45
|
+
"""Find two sections by ID or name."""
|
|
46
|
+
from_node = None
|
|
47
|
+
to_node = None
|
|
48
|
+
for s in sections:
|
|
49
|
+
sid = s.section_id.lower()
|
|
50
|
+
sname = (s.name or "").lower()
|
|
51
|
+
if sid == from_section.lower() or sname == from_section.lower():
|
|
52
|
+
from_node = s
|
|
53
|
+
if sid == to_section.lower() or sname == to_section.lower():
|
|
54
|
+
to_node = s
|
|
55
|
+
return from_node, to_node
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def _build_boundary(
|
|
59
|
+
from_node: comp_engine.SectionNode,
|
|
60
|
+
to_node: comp_engine.SectionNode,
|
|
61
|
+
) -> TransitionBoundary:
|
|
62
|
+
"""Build a TransitionBoundary from two adjacent SectionNodes."""
|
|
63
|
+
return TransitionBoundary(
|
|
64
|
+
from_section_id=from_node.section_id,
|
|
65
|
+
to_section_id=to_node.section_id,
|
|
66
|
+
boundary_bar=to_node.start_bar,
|
|
67
|
+
from_type=from_node.section_type.value,
|
|
68
|
+
to_type=to_node.section_type.value,
|
|
69
|
+
energy_delta=to_node.energy - from_node.energy,
|
|
70
|
+
density_delta=to_node.density - from_node.density,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _score_boundary(boundary: TransitionBoundary) -> TransitionScore:
|
|
75
|
+
"""Compute a TransitionScore from boundary data.
|
|
76
|
+
|
|
77
|
+
Pure heuristic scoring β no I/O.
|
|
78
|
+
"""
|
|
79
|
+
abs_energy = abs(boundary.energy_delta)
|
|
80
|
+
abs_density = abs(boundary.density_delta)
|
|
81
|
+
|
|
82
|
+
# Boundary clarity: how obvious is the section change?
|
|
83
|
+
boundary_clarity = min(1.0, abs_energy * 2.0 + abs_density * 1.5)
|
|
84
|
+
|
|
85
|
+
# Payoff strength: does a high-energy arrival feel earned?
|
|
86
|
+
if boundary.energy_delta > 0.2:
|
|
87
|
+
# Rising energy β payoff depends on contrast magnitude
|
|
88
|
+
payoff_strength = min(1.0, boundary.energy_delta * 1.5)
|
|
89
|
+
elif boundary.energy_delta < -0.2:
|
|
90
|
+
# Falling energy β payoff is the breath/relief
|
|
91
|
+
payoff_strength = min(1.0, abs(boundary.energy_delta) * 1.2)
|
|
92
|
+
else:
|
|
93
|
+
# Flat β low payoff unless density compensates
|
|
94
|
+
payoff_strength = min(1.0, abs_density * 1.5)
|
|
95
|
+
|
|
96
|
+
# Energy redirection: how much does energy actually shift?
|
|
97
|
+
energy_redirection = min(1.0, abs_energy * 2.5)
|
|
98
|
+
|
|
99
|
+
# Identity preservation (heuristic: same density = same character)
|
|
100
|
+
identity_preservation = max(0.0, 1.0 - abs_density * 2.0)
|
|
101
|
+
|
|
102
|
+
# Cliche risk: common pairs with standard archetypes are higher risk
|
|
103
|
+
_common_pairs = {
|
|
104
|
+
("build", "drop"), ("verse", "chorus"), ("pre_chorus", "chorus"),
|
|
105
|
+
}
|
|
106
|
+
pair = (boundary.from_type, boundary.to_type)
|
|
107
|
+
cliche_risk = 0.5 if pair in _common_pairs else 0.2
|
|
108
|
+
|
|
109
|
+
# Overall: weighted average (cliche_risk is inverted β lower is better)
|
|
110
|
+
overall = (
|
|
111
|
+
boundary_clarity * 0.25
|
|
112
|
+
+ payoff_strength * 0.30
|
|
113
|
+
+ energy_redirection * 0.20
|
|
114
|
+
+ identity_preservation * 0.10
|
|
115
|
+
+ (1.0 - cliche_risk) * 0.15
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
return TransitionScore(
|
|
119
|
+
boundary_clarity=round(boundary_clarity, 3),
|
|
120
|
+
payoff_strength=round(payoff_strength, 3),
|
|
121
|
+
energy_redirection=round(energy_redirection, 3),
|
|
122
|
+
identity_preservation=round(identity_preservation, 3),
|
|
123
|
+
cliche_risk=round(cliche_risk, 3),
|
|
124
|
+
overall=round(overall, 3),
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def _build_plan(
|
|
129
|
+
boundary: TransitionBoundary,
|
|
130
|
+
archetype=None,
|
|
131
|
+
) -> TransitionPlan:
|
|
132
|
+
"""Build a TransitionPlan for a boundary using the selected archetype."""
|
|
133
|
+
if archetype is None:
|
|
134
|
+
archetype = select_archetype(boundary)
|
|
135
|
+
|
|
136
|
+
# Map archetype gestures to lead-in and arrival
|
|
137
|
+
lead_in_gestures = []
|
|
138
|
+
arrival_gestures = []
|
|
139
|
+
|
|
140
|
+
for gesture_name in archetype.gestures:
|
|
141
|
+
gesture = {"intent": gesture_name, "archetype": archetype.name}
|
|
142
|
+
if gesture_name in ("inhale", "conceal", "lift", "punctuate"):
|
|
143
|
+
lead_in_gestures.append({
|
|
144
|
+
**gesture,
|
|
145
|
+
"offset_bars": -2,
|
|
146
|
+
"duration_bars": 2,
|
|
147
|
+
})
|
|
148
|
+
else:
|
|
149
|
+
arrival_gestures.append({
|
|
150
|
+
**gesture,
|
|
151
|
+
"offset_bars": 0,
|
|
152
|
+
"duration_bars": 2,
|
|
153
|
+
})
|
|
154
|
+
|
|
155
|
+
# Payoff estimate from energy delta and archetype risk
|
|
156
|
+
risk_penalty = {"low": 0.0, "medium": 0.1, "high": 0.2}.get(
|
|
157
|
+
archetype.risk_profile, 0.0,
|
|
158
|
+
)
|
|
159
|
+
payoff_estimate = min(1.0, max(0.0,
|
|
160
|
+
abs(boundary.energy_delta) * 1.5 + 0.3 - risk_penalty
|
|
161
|
+
))
|
|
162
|
+
|
|
163
|
+
return TransitionPlan(
|
|
164
|
+
boundary=boundary,
|
|
165
|
+
archetype=archetype,
|
|
166
|
+
lead_in_gestures=lead_in_gestures,
|
|
167
|
+
arrival_gestures=arrival_gestures,
|
|
168
|
+
payoff_estimate=round(payoff_estimate, 3),
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
# ββ MCP Tools βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
@mcp.tool()
|
|
176
|
+
def analyze_transition(
|
|
177
|
+
ctx: Context,
|
|
178
|
+
from_section: str,
|
|
179
|
+
to_section: str,
|
|
180
|
+
) -> dict:
|
|
181
|
+
"""Analyze the transition boundary between two sections.
|
|
182
|
+
|
|
183
|
+
Builds a TransitionBoundary, selects an archetype, scores the
|
|
184
|
+
boundary, and runs all 5 transition critics.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
from_section: Name or ID of the outgoing section.
|
|
188
|
+
to_section: Name or ID of the arriving section.
|
|
189
|
+
|
|
190
|
+
Returns: boundary, archetype, score, issues, and recommended moves.
|
|
191
|
+
"""
|
|
192
|
+
sections = _build_sections_from_ableton(ctx)
|
|
193
|
+
from_node, to_node = _find_section_pair(sections, from_section, to_section)
|
|
194
|
+
|
|
195
|
+
if not from_node:
|
|
196
|
+
return {"error": f"Section '{from_section}' not found",
|
|
197
|
+
"available": [s.name or s.section_id for s in sections]}
|
|
198
|
+
if not to_node:
|
|
199
|
+
return {"error": f"Section '{to_section}' not found",
|
|
200
|
+
"available": [s.name or s.section_id for s in sections]}
|
|
201
|
+
|
|
202
|
+
boundary = _build_boundary(from_node, to_node)
|
|
203
|
+
archetype = select_archetype(boundary)
|
|
204
|
+
score = _score_boundary(boundary)
|
|
205
|
+
plan = _build_plan(boundary, archetype)
|
|
206
|
+
issues = run_all_transition_critics(boundary, plan, score)
|
|
207
|
+
|
|
208
|
+
return {
|
|
209
|
+
"boundary": boundary.to_dict(),
|
|
210
|
+
"archetype": archetype.to_dict(),
|
|
211
|
+
"score": score.to_dict(),
|
|
212
|
+
"issues": [i.to_dict() for i in issues],
|
|
213
|
+
"issue_count": len(issues),
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
@mcp.tool()
|
|
218
|
+
def plan_transition(
|
|
219
|
+
ctx: Context,
|
|
220
|
+
from_section: str,
|
|
221
|
+
to_section: str,
|
|
222
|
+
) -> dict:
|
|
223
|
+
"""Plan a transition between two sections with concrete gestures.
|
|
224
|
+
|
|
225
|
+
Selects the best archetype for the boundary and generates
|
|
226
|
+
lead-in and arrival gesture sequences.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
from_section: Name or ID of the outgoing section.
|
|
230
|
+
to_section: Name or ID of the arriving section.
|
|
231
|
+
|
|
232
|
+
Returns: plan with archetype, gestures, payoff estimate, and issues.
|
|
233
|
+
"""
|
|
234
|
+
sections = _build_sections_from_ableton(ctx)
|
|
235
|
+
from_node, to_node = _find_section_pair(sections, from_section, to_section)
|
|
236
|
+
|
|
237
|
+
if not from_node:
|
|
238
|
+
return {"error": f"Section '{from_section}' not found",
|
|
239
|
+
"available": [s.name or s.section_id for s in sections]}
|
|
240
|
+
if not to_node:
|
|
241
|
+
return {"error": f"Section '{to_section}' not found",
|
|
242
|
+
"available": [s.name or s.section_id for s in sections]}
|
|
243
|
+
|
|
244
|
+
boundary = _build_boundary(from_node, to_node)
|
|
245
|
+
plan = _build_plan(boundary)
|
|
246
|
+
score = _score_boundary(boundary)
|
|
247
|
+
issues = run_all_transition_critics(boundary, plan, score)
|
|
248
|
+
|
|
249
|
+
return {
|
|
250
|
+
"plan": plan.to_dict(),
|
|
251
|
+
"score": score.to_dict(),
|
|
252
|
+
"issues": [i.to_dict() for i in issues],
|
|
253
|
+
"issue_count": len(issues),
|
|
254
|
+
"available_archetypes": list(TRANSITION_ARCHETYPES.keys()),
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
@mcp.tool()
|
|
259
|
+
def score_transition(
|
|
260
|
+
ctx: Context,
|
|
261
|
+
from_section: str,
|
|
262
|
+
to_section: str,
|
|
263
|
+
) -> dict:
|
|
264
|
+
"""Score the transition quality between two sections.
|
|
265
|
+
|
|
266
|
+
Returns a multi-dimensional score: boundary clarity, payoff strength,
|
|
267
|
+
energy redirection, identity preservation, and cliche risk.
|
|
268
|
+
|
|
269
|
+
Args:
|
|
270
|
+
from_section: Name or ID of the outgoing section.
|
|
271
|
+
to_section: Name or ID of the arriving section.
|
|
272
|
+
|
|
273
|
+
Returns: score breakdown and overall rating.
|
|
274
|
+
"""
|
|
275
|
+
sections = _build_sections_from_ableton(ctx)
|
|
276
|
+
from_node, to_node = _find_section_pair(sections, from_section, to_section)
|
|
277
|
+
|
|
278
|
+
if not from_node:
|
|
279
|
+
return {"error": f"Section '{from_section}' not found",
|
|
280
|
+
"available": [s.name or s.section_id for s in sections]}
|
|
281
|
+
if not to_node:
|
|
282
|
+
return {"error": f"Section '{to_section}' not found",
|
|
283
|
+
"available": [s.name or s.section_id for s in sections]}
|
|
284
|
+
|
|
285
|
+
boundary = _build_boundary(from_node, to_node)
|
|
286
|
+
score = _score_boundary(boundary)
|
|
287
|
+
|
|
288
|
+
return {
|
|
289
|
+
"boundary": boundary.to_dict(),
|
|
290
|
+
"score": score.to_dict(),
|
|
291
|
+
}
|
|
@@ -0,0 +1,297 @@
|
|
|
1
|
+
"""Translation Engine critics β detect playback robustness issues.
|
|
2
|
+
|
|
3
|
+
Five critics: mono_collapse, small_speaker, harshness,
|
|
4
|
+
low_end_instability, front_element.
|
|
5
|
+
All pure computation, zero I/O.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from dataclasses import asdict, dataclass, field
|
|
11
|
+
from typing import List
|
|
12
|
+
|
|
13
|
+
from .models import TranslationReport
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# ββ TranslationIssue ββββββββββββββββββββββββββββββββββββββββββββββ
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class TranslationIssue:
|
|
21
|
+
"""A single detected translation/playback issue."""
|
|
22
|
+
|
|
23
|
+
issue_type: str = ""
|
|
24
|
+
critic: str = ""
|
|
25
|
+
severity: float = 0.0 # 0-1
|
|
26
|
+
confidence: float = 0.0 # 0-1
|
|
27
|
+
evidence: str = ""
|
|
28
|
+
recommended_moves: List[str] = field(default_factory=list)
|
|
29
|
+
|
|
30
|
+
def to_dict(self) -> dict:
|
|
31
|
+
return asdict(self)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
# ββ Mono Collapse Critic ββββββββββββββββββββββββββββββββββββββββββ
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def run_mono_collapse_critic(
|
|
38
|
+
stereo_width: float,
|
|
39
|
+
center_strength: float,
|
|
40
|
+
) -> List[TranslationIssue]:
|
|
41
|
+
"""Warn if stereo width > 0.7 and center strength < 0.4.
|
|
42
|
+
|
|
43
|
+
A wide mix with weak center will lose significant content
|
|
44
|
+
when summed to mono.
|
|
45
|
+
"""
|
|
46
|
+
issues: List[TranslationIssue] = []
|
|
47
|
+
|
|
48
|
+
if stereo_width > 0.7 and center_strength < 0.4:
|
|
49
|
+
severity = min(1.0, (stereo_width - 0.5) * (0.5 - center_strength) * 4.0)
|
|
50
|
+
issues.append(TranslationIssue(
|
|
51
|
+
issue_type="mono_collapse",
|
|
52
|
+
critic="mono_collapse",
|
|
53
|
+
severity=max(0.0, severity),
|
|
54
|
+
confidence=0.7,
|
|
55
|
+
evidence=(
|
|
56
|
+
f"Stereo width {stereo_width:.2f} with center strength "
|
|
57
|
+
f"{center_strength:.2f} β mono playback will lose "
|
|
58
|
+
f"significant stereo content"
|
|
59
|
+
),
|
|
60
|
+
recommended_moves=["narrow_stereo_width", "strengthen_center"],
|
|
61
|
+
))
|
|
62
|
+
|
|
63
|
+
return issues
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
# ββ Small Speaker Critic ββββββββββββββββββββββββββββββββββββββββββ
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def run_small_speaker_critic(
|
|
70
|
+
sub_energy: float,
|
|
71
|
+
low_energy: float,
|
|
72
|
+
) -> List[TranslationIssue]:
|
|
73
|
+
"""Warn if sub-bass dominates the low-end balance.
|
|
74
|
+
|
|
75
|
+
If sub_energy > 0.5 of total low-end (sub + low), small speakers
|
|
76
|
+
will lose the foundation because they cannot reproduce sub frequencies.
|
|
77
|
+
"""
|
|
78
|
+
issues: List[TranslationIssue] = []
|
|
79
|
+
total_low = sub_energy + low_energy
|
|
80
|
+
|
|
81
|
+
if total_low > 0 and sub_energy / total_low > 0.5:
|
|
82
|
+
sub_ratio = sub_energy / total_low
|
|
83
|
+
severity = min(1.0, (sub_ratio - 0.5) * 4.0)
|
|
84
|
+
issues.append(TranslationIssue(
|
|
85
|
+
issue_type="small_speaker_loss",
|
|
86
|
+
critic="small_speaker",
|
|
87
|
+
severity=max(0.0, severity),
|
|
88
|
+
confidence=0.65,
|
|
89
|
+
evidence=(
|
|
90
|
+
f"Sub energy {sub_energy:.2f} is {sub_ratio:.0%} of total "
|
|
91
|
+
f"low-end ({total_low:.2f}) β small speakers will lose "
|
|
92
|
+
f"the low-end foundation"
|
|
93
|
+
),
|
|
94
|
+
recommended_moves=["add_harmonics_to_bass", "reduce_sub_energy"],
|
|
95
|
+
))
|
|
96
|
+
|
|
97
|
+
return issues
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
# ββ Harshness Critic ββββββββββββββββββββββββββββββββββββββββββββββ
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def run_harshness_critic(
|
|
104
|
+
high_energy: float,
|
|
105
|
+
presence_energy: float,
|
|
106
|
+
) -> List[TranslationIssue]:
|
|
107
|
+
"""Warn if combined high + presence energy exceeds 0.75.
|
|
108
|
+
|
|
109
|
+
Excessive brightness causes listening fatigue, especially on
|
|
110
|
+
earbuds and small speakers with boosted treble response.
|
|
111
|
+
"""
|
|
112
|
+
issues: List[TranslationIssue] = []
|
|
113
|
+
combined = high_energy + presence_energy
|
|
114
|
+
|
|
115
|
+
if combined > 0.75:
|
|
116
|
+
severity = min(1.0, (combined - 0.75) * 4.0)
|
|
117
|
+
issues.append(TranslationIssue(
|
|
118
|
+
issue_type="harshness_risk",
|
|
119
|
+
critic="harshness",
|
|
120
|
+
severity=max(0.0, severity),
|
|
121
|
+
confidence=0.6,
|
|
122
|
+
evidence=(
|
|
123
|
+
f"High energy {high_energy:.2f} + presence energy "
|
|
124
|
+
f"{presence_energy:.2f} = {combined:.2f} β "
|
|
125
|
+
f"likely harsh on earbuds and small speakers"
|
|
126
|
+
),
|
|
127
|
+
recommended_moves=["reduce_high_shelf", "tame_presence_peak"],
|
|
128
|
+
))
|
|
129
|
+
|
|
130
|
+
return issues
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
# ββ Low End Instability Critic ββββββββββββββββββββββββββββββββββββ
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def run_low_end_instability_critic(
|
|
137
|
+
sub_energy: float,
|
|
138
|
+
low_mid_energy: float,
|
|
139
|
+
) -> List[TranslationIssue]:
|
|
140
|
+
"""Warn if sub and low-mid energies are competing.
|
|
141
|
+
|
|
142
|
+
When both are high, they create muddiness and masking in
|
|
143
|
+
the low-frequency range.
|
|
144
|
+
"""
|
|
145
|
+
issues: List[TranslationIssue] = []
|
|
146
|
+
|
|
147
|
+
if sub_energy > 0.4 and low_mid_energy > 0.4:
|
|
148
|
+
severity = min(1.0, (sub_energy + low_mid_energy - 0.8) * 2.5)
|
|
149
|
+
issues.append(TranslationIssue(
|
|
150
|
+
issue_type="low_end_instability",
|
|
151
|
+
critic="low_end_instability",
|
|
152
|
+
severity=max(0.0, severity),
|
|
153
|
+
confidence=0.6,
|
|
154
|
+
evidence=(
|
|
155
|
+
f"Sub energy {sub_energy:.2f} and low-mid energy "
|
|
156
|
+
f"{low_mid_energy:.2f} are both high β "
|
|
157
|
+
f"competing low frequencies cause muddiness"
|
|
158
|
+
),
|
|
159
|
+
recommended_moves=["high_pass_non_bass", "eq_low_mid_cut"],
|
|
160
|
+
))
|
|
161
|
+
|
|
162
|
+
return issues
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
# ββ Front Element Critic ββββββββββββββββββββββββββββββββββββββββββ
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def run_front_element_critic(
|
|
169
|
+
has_foreground: bool,
|
|
170
|
+
foreground_masked: bool,
|
|
171
|
+
) -> List[TranslationIssue]:
|
|
172
|
+
"""Warn if lead/vocal is absent or buried.
|
|
173
|
+
|
|
174
|
+
The front element (vocal, lead synth, melody) must remain
|
|
175
|
+
present and unmasked for the mix to translate.
|
|
176
|
+
"""
|
|
177
|
+
issues: List[TranslationIssue] = []
|
|
178
|
+
|
|
179
|
+
if not has_foreground:
|
|
180
|
+
issues.append(TranslationIssue(
|
|
181
|
+
issue_type="no_front_element",
|
|
182
|
+
critic="front_element",
|
|
183
|
+
severity=0.7,
|
|
184
|
+
confidence=0.5,
|
|
185
|
+
evidence="No foreground element detected β mix lacks a focal point",
|
|
186
|
+
recommended_moves=["add_lead_element", "boost_vocal"],
|
|
187
|
+
))
|
|
188
|
+
|
|
189
|
+
if has_foreground and foreground_masked:
|
|
190
|
+
issues.append(TranslationIssue(
|
|
191
|
+
issue_type="front_element_masked",
|
|
192
|
+
critic="front_element",
|
|
193
|
+
severity=0.6,
|
|
194
|
+
confidence=0.6,
|
|
195
|
+
evidence=(
|
|
196
|
+
"Foreground element is present but masked β "
|
|
197
|
+
"lead/vocal is buried in the mix"
|
|
198
|
+
),
|
|
199
|
+
recommended_moves=["eq_pocket_for_vocal", "reduce_competing_mids"],
|
|
200
|
+
))
|
|
201
|
+
|
|
202
|
+
return issues
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
# ββ Run all translation critics βββββββββββββββββββββββββββββββββββ
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def run_all_translation_critics(mix_snapshot: dict) -> List[TranslationIssue]:
|
|
209
|
+
"""Run all 5 translation critics against a mix snapshot.
|
|
210
|
+
|
|
211
|
+
Expected mix_snapshot keys:
|
|
212
|
+
stereo_width: float (0-1)
|
|
213
|
+
center_strength: float (0-1)
|
|
214
|
+
sub_energy: float (0-1)
|
|
215
|
+
low_energy: float (0-1)
|
|
216
|
+
low_mid_energy: float (0-1)
|
|
217
|
+
high_energy: float (0-1)
|
|
218
|
+
presence_energy: float (0-1)
|
|
219
|
+
has_foreground: bool
|
|
220
|
+
foreground_masked: bool
|
|
221
|
+
"""
|
|
222
|
+
issues: List[TranslationIssue] = []
|
|
223
|
+
|
|
224
|
+
issues.extend(run_mono_collapse_critic(
|
|
225
|
+
stereo_width=mix_snapshot.get("stereo_width", 0.0),
|
|
226
|
+
center_strength=mix_snapshot.get("center_strength", 0.5),
|
|
227
|
+
))
|
|
228
|
+
|
|
229
|
+
issues.extend(run_small_speaker_critic(
|
|
230
|
+
sub_energy=mix_snapshot.get("sub_energy", 0.0),
|
|
231
|
+
low_energy=mix_snapshot.get("low_energy", 0.0),
|
|
232
|
+
))
|
|
233
|
+
|
|
234
|
+
issues.extend(run_harshness_critic(
|
|
235
|
+
high_energy=mix_snapshot.get("high_energy", 0.0),
|
|
236
|
+
presence_energy=mix_snapshot.get("presence_energy", 0.0),
|
|
237
|
+
))
|
|
238
|
+
|
|
239
|
+
issues.extend(run_low_end_instability_critic(
|
|
240
|
+
sub_energy=mix_snapshot.get("sub_energy", 0.0),
|
|
241
|
+
low_mid_energy=mix_snapshot.get("low_mid_energy", 0.0),
|
|
242
|
+
))
|
|
243
|
+
|
|
244
|
+
issues.extend(run_front_element_critic(
|
|
245
|
+
has_foreground=mix_snapshot.get("has_foreground", True),
|
|
246
|
+
foreground_masked=mix_snapshot.get("foreground_masked", False),
|
|
247
|
+
))
|
|
248
|
+
|
|
249
|
+
return issues
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
# ββ Build TranslationReport βββββββββββββββββββββββββββββββββββββββ
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
def build_translation_report(mix_snapshot: dict) -> TranslationReport:
|
|
256
|
+
"""Run all critics and compile a TranslationReport."""
|
|
257
|
+
issues = run_all_translation_critics(mix_snapshot)
|
|
258
|
+
|
|
259
|
+
# Classify booleans from issues
|
|
260
|
+
mono_safe = not any(i.issue_type == "mono_collapse" for i in issues)
|
|
261
|
+
small_speaker_safe = not any(i.issue_type == "small_speaker_loss" for i in issues)
|
|
262
|
+
harshness = max(
|
|
263
|
+
(i.severity for i in issues if i.issue_type == "harshness_risk"),
|
|
264
|
+
default=0.0,
|
|
265
|
+
)
|
|
266
|
+
low_end_stable = not any(i.issue_type == "low_end_instability" for i in issues)
|
|
267
|
+
front_element_present = not any(
|
|
268
|
+
i.issue_type in ("no_front_element", "front_element_masked")
|
|
269
|
+
for i in issues
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
# Collect all suggested moves
|
|
273
|
+
all_moves: list[str] = []
|
|
274
|
+
for issue in issues:
|
|
275
|
+
for move in issue.recommended_moves:
|
|
276
|
+
if move not in all_moves:
|
|
277
|
+
all_moves.append(move)
|
|
278
|
+
|
|
279
|
+
# Overall robustness classification
|
|
280
|
+
max_severity = max((i.severity for i in issues), default=0.0)
|
|
281
|
+
if max_severity >= 0.7 or len(issues) >= 3:
|
|
282
|
+
overall = "critical"
|
|
283
|
+
elif max_severity >= 0.4 or len(issues) >= 2:
|
|
284
|
+
overall = "fragile"
|
|
285
|
+
else:
|
|
286
|
+
overall = "robust"
|
|
287
|
+
|
|
288
|
+
return TranslationReport(
|
|
289
|
+
mono_safe=mono_safe,
|
|
290
|
+
small_speaker_safe=small_speaker_safe,
|
|
291
|
+
harshness_risk=harshness,
|
|
292
|
+
low_end_stable=low_end_stable,
|
|
293
|
+
front_element_present=front_element_present,
|
|
294
|
+
overall_robustness=overall,
|
|
295
|
+
issues=[i.to_dict() for i in issues],
|
|
296
|
+
suggested_moves=all_moves,
|
|
297
|
+
)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
"""Translation Engine models β all dataclasses with to_dict().
|
|
2
|
+
|
|
3
|
+
Pure data structures for playback robustness analysis.
|
|
4
|
+
Zero I/O.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from dataclasses import asdict, dataclass, field
|
|
10
|
+
from typing import List
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@dataclass
|
|
14
|
+
class TranslationReport:
|
|
15
|
+
"""Full playback robustness report."""
|
|
16
|
+
|
|
17
|
+
mono_safe: bool = True
|
|
18
|
+
small_speaker_safe: bool = True
|
|
19
|
+
harshness_risk: float = 0.0 # 0-1
|
|
20
|
+
low_end_stable: bool = True
|
|
21
|
+
front_element_present: bool = True
|
|
22
|
+
overall_robustness: str = "robust" # "robust", "fragile", "critical"
|
|
23
|
+
issues: List[dict] = field(default_factory=list)
|
|
24
|
+
suggested_moves: List[str] = field(default_factory=list)
|
|
25
|
+
|
|
26
|
+
def to_dict(self) -> dict:
|
|
27
|
+
return asdict(self)
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
"""Translation Engine MCP tools β 2 tools for playback robustness.
|
|
2
|
+
|
|
3
|
+
Each tool fetches data from Ableton via the shared connection,
|
|
4
|
+
then delegates to pure-computation critics.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from mcp.server.fastmcp import Context
|
|
10
|
+
|
|
11
|
+
from ..server import mcp
|
|
12
|
+
from .critics import build_translation_report, run_all_translation_critics
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# ββ Helpers βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _fetch_translation_data(ctx: Context) -> dict:
|
|
19
|
+
"""Fetch mix snapshot data needed for translation analysis."""
|
|
20
|
+
ableton = ctx.lifespan_context["ableton"]
|
|
21
|
+
|
|
22
|
+
# Get mix snapshot β contains spectral and stereo info
|
|
23
|
+
snapshot = {}
|
|
24
|
+
try:
|
|
25
|
+
snapshot = ableton.send_command("get_mix_snapshot", {})
|
|
26
|
+
except Exception:
|
|
27
|
+
pass
|
|
28
|
+
|
|
29
|
+
# Extract spectral bands from snapshot
|
|
30
|
+
spectrum = snapshot.get("spectrum", {})
|
|
31
|
+
stereo = snapshot.get("stereo", {})
|
|
32
|
+
|
|
33
|
+
return {
|
|
34
|
+
"stereo_width": stereo.get("side_activity", 0.0),
|
|
35
|
+
"center_strength": stereo.get("center_strength", 0.5),
|
|
36
|
+
"sub_energy": spectrum.get("sub", 0.0),
|
|
37
|
+
"low_energy": spectrum.get("low", 0.0),
|
|
38
|
+
"low_mid_energy": spectrum.get("low_mid", 0.0),
|
|
39
|
+
"high_energy": spectrum.get("high", 0.0),
|
|
40
|
+
"presence_energy": spectrum.get("presence", 0.0),
|
|
41
|
+
"has_foreground": snapshot.get("has_foreground", True),
|
|
42
|
+
"foreground_masked": snapshot.get("foreground_masked", False),
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
# ββ MCP Tools βββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@mcp.tool()
|
|
50
|
+
def check_translation(ctx: Context) -> dict:
|
|
51
|
+
"""Check playback robustness β mono safety, small speakers, harshness.
|
|
52
|
+
|
|
53
|
+
Returns a full translation report with robustness classification
|
|
54
|
+
(robust/fragile/critical), boolean safety flags, and suggested
|
|
55
|
+
corrective moves.
|
|
56
|
+
"""
|
|
57
|
+
mix_snapshot = _fetch_translation_data(ctx)
|
|
58
|
+
report = build_translation_report(mix_snapshot)
|
|
59
|
+
return report.to_dict()
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@mcp.tool()
|
|
63
|
+
def get_translation_issues(ctx: Context) -> dict:
|
|
64
|
+
"""Get just the translation issues without the full report.
|
|
65
|
+
|
|
66
|
+
Lighter than check_translation β returns only detected issues
|
|
67
|
+
from the 5 playback robustness critics.
|
|
68
|
+
"""
|
|
69
|
+
mix_snapshot = _fetch_translation_data(ctx)
|
|
70
|
+
issues = run_all_translation_critics(mix_snapshot)
|
|
71
|
+
return {
|
|
72
|
+
"issues": [i.to_dict() for i in issues],
|
|
73
|
+
"issue_count": len(issues),
|
|
74
|
+
}
|