livepilot 1.12.2 → 1.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +82 -0
- package/README.md +3 -3
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +1 -1
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/branches/__init__.py +32 -0
- package/mcp_server/branches/types.py +230 -0
- package/mcp_server/composer/__init__.py +10 -1
- package/mcp_server/composer/branch_producer.py +229 -0
- package/mcp_server/evaluation/policy.py +129 -2
- package/mcp_server/experiment/engine.py +47 -11
- package/mcp_server/experiment/models.py +72 -7
- package/mcp_server/experiment/tools.py +231 -35
- package/mcp_server/memory/taste_graph.py +84 -11
- package/mcp_server/persistence/taste_store.py +21 -5
- package/mcp_server/runtime/session_kernel.py +46 -0
- package/mcp_server/runtime/tools.py +29 -3
- package/mcp_server/synthesis_brain/__init__.py +53 -0
- package/mcp_server/synthesis_brain/adapters/__init__.py +34 -0
- package/mcp_server/synthesis_brain/adapters/analog.py +167 -0
- package/mcp_server/synthesis_brain/adapters/base.py +86 -0
- package/mcp_server/synthesis_brain/adapters/drift.py +166 -0
- package/mcp_server/synthesis_brain/adapters/meld.py +151 -0
- package/mcp_server/synthesis_brain/adapters/operator.py +169 -0
- package/mcp_server/synthesis_brain/adapters/wavetable.py +228 -0
- package/mcp_server/synthesis_brain/engine.py +91 -0
- package/mcp_server/synthesis_brain/models.py +121 -0
- package/mcp_server/synthesis_brain/timbre.py +194 -0
- package/mcp_server/tools/_conductor.py +144 -0
- package/mcp_server/wonder_mode/engine.py +324 -0
- package/mcp_server/wonder_mode/tools.py +153 -1
- package/package.json +2 -2
- package/remote_script/LivePilot/__init__.py +1 -1
- package/server.json +2 -2
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
"""Meld adapter — Ableton 12's newest FM/granular hybrid.
|
|
2
|
+
|
|
3
|
+
Meld pairs two "Engines" with per-engine algorithms and a shared
|
|
4
|
+
modulation / amp / filter section. PR10 ships one canned proposer:
|
|
5
|
+
engine_algo_swap — changes Engine 1's algorithm to produce a
|
|
6
|
+
materially different core timbre without disturbing the envelope
|
|
7
|
+
or filter. Later PRs add engine-blend, unison, and modulation-matrix
|
|
8
|
+
variants.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import hashlib
|
|
14
|
+
from typing import Optional
|
|
15
|
+
|
|
16
|
+
from ...branches import BranchSeed, freeform_seed
|
|
17
|
+
from ..models import (
|
|
18
|
+
SynthProfile,
|
|
19
|
+
TimbralFingerprint,
|
|
20
|
+
ModulationGraph,
|
|
21
|
+
ArticulationProfile,
|
|
22
|
+
NATIVE,
|
|
23
|
+
)
|
|
24
|
+
from .base import register_adapter
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
_KNOWN_PARAMS = {
|
|
28
|
+
"Engine 1 Algorithm",
|
|
29
|
+
"Engine 2 Algorithm",
|
|
30
|
+
"Engine 1 Level",
|
|
31
|
+
"Engine 2 Level",
|
|
32
|
+
"Engine 1 Morph",
|
|
33
|
+
"Engine 2 Morph",
|
|
34
|
+
"Filter Freq",
|
|
35
|
+
"Filter Res",
|
|
36
|
+
"Amp A",
|
|
37
|
+
"Amp D",
|
|
38
|
+
"Amp S",
|
|
39
|
+
"Amp R",
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
@register_adapter
|
|
44
|
+
class MeldAdapter:
|
|
45
|
+
device_name: str = "Meld"
|
|
46
|
+
|
|
47
|
+
def extract_profile(
|
|
48
|
+
self,
|
|
49
|
+
track_index: int,
|
|
50
|
+
device_index: int,
|
|
51
|
+
parameter_state: dict,
|
|
52
|
+
display_values: Optional[dict] = None,
|
|
53
|
+
role_hint: str = "",
|
|
54
|
+
) -> SynthProfile:
|
|
55
|
+
notes: list[str] = []
|
|
56
|
+
|
|
57
|
+
e1_algo = parameter_state.get("Engine 1 Algorithm")
|
|
58
|
+
e2_algo = parameter_state.get("Engine 2 Algorithm")
|
|
59
|
+
if e1_algo is not None and e2_algo is not None and e1_algo == e2_algo:
|
|
60
|
+
notes.append(
|
|
61
|
+
"Both Engines on same algorithm — consider differentiating for depth"
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
articulation = ArticulationProfile(
|
|
65
|
+
attack_ms=float(parameter_state.get("Amp A", 0.0) or 0.0),
|
|
66
|
+
release_ms=float(parameter_state.get("Amp R", 0.0) or 0.0),
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
mod = ModulationGraph()
|
|
70
|
+
# Meld has many internal mod routes; PR10 just records engine levels
|
|
71
|
+
# as rough "sources" so downstream can see the mix balance.
|
|
72
|
+
e1_level = parameter_state.get("Engine 1 Level", 0.0)
|
|
73
|
+
e2_level = parameter_state.get("Engine 2 Level", 0.0)
|
|
74
|
+
if e1_level and e1_level > 0:
|
|
75
|
+
mod.routes.append({"source": "Engine 1", "target": "output", "amount": e1_level})
|
|
76
|
+
if e2_level and e2_level > 0:
|
|
77
|
+
mod.routes.append({"source": "Engine 2", "target": "output", "amount": e2_level})
|
|
78
|
+
|
|
79
|
+
focused_state = {k: v for k, v in parameter_state.items() if k in _KNOWN_PARAMS}
|
|
80
|
+
focused_display = (
|
|
81
|
+
{k: v for k, v in (display_values or {}).items() if k in _KNOWN_PARAMS}
|
|
82
|
+
if display_values else {}
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
return SynthProfile(
|
|
86
|
+
device_name=self.device_name,
|
|
87
|
+
opacity=NATIVE,
|
|
88
|
+
track_index=track_index,
|
|
89
|
+
device_index=device_index,
|
|
90
|
+
parameter_state=focused_state,
|
|
91
|
+
display_values=focused_display,
|
|
92
|
+
role_hint=role_hint,
|
|
93
|
+
modulation=mod,
|
|
94
|
+
articulation=articulation,
|
|
95
|
+
notes=notes,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
def propose_branches(
|
|
99
|
+
self,
|
|
100
|
+
profile: SynthProfile,
|
|
101
|
+
target: TimbralFingerprint,
|
|
102
|
+
kernel: Optional[dict] = None,
|
|
103
|
+
) -> list[tuple[BranchSeed, dict]]:
|
|
104
|
+
kernel = kernel or {}
|
|
105
|
+
freshness = float(kernel.get("freshness", 0.5) or 0.5)
|
|
106
|
+
track = profile.track_index
|
|
107
|
+
device = profile.device_index
|
|
108
|
+
|
|
109
|
+
# Algorithm indices are device-internal; we propose a relative shift
|
|
110
|
+
# by +1 (modulo a safe ceiling of 10 — Meld has 12 algos but we're
|
|
111
|
+
# conservative). Freshness amplifies the shift to +3.
|
|
112
|
+
current_algo = int(profile.parameter_state.get("Engine 1 Algorithm", 0) or 0)
|
|
113
|
+
shift = 1 if freshness < 0.5 else 3
|
|
114
|
+
new_algo = (current_algo + shift) % 10
|
|
115
|
+
|
|
116
|
+
seed = freeform_seed(
|
|
117
|
+
seed_id=_short_id("ml_algo", f"{track}:{device}:{new_algo}"),
|
|
118
|
+
hypothesis=(
|
|
119
|
+
f"Meld Engine 1 algorithm swap: {current_algo} → {new_algo} "
|
|
120
|
+
f"for a materially different core timbre"
|
|
121
|
+
),
|
|
122
|
+
source="synthesis",
|
|
123
|
+
novelty_label="unexpected" if shift == 3 else "strong",
|
|
124
|
+
risk_label="medium",
|
|
125
|
+
affected_scope={
|
|
126
|
+
"track_indices": [track],
|
|
127
|
+
"device_paths": [f"track/{track}/device/{device}"],
|
|
128
|
+
},
|
|
129
|
+
distinctness_reason="only Meld seed that changes Engine 1 algorithm",
|
|
130
|
+
)
|
|
131
|
+
plan = {
|
|
132
|
+
"steps": [
|
|
133
|
+
{
|
|
134
|
+
"tool": "set_device_parameter",
|
|
135
|
+
"params": {
|
|
136
|
+
"track_index": track,
|
|
137
|
+
"device_index": device,
|
|
138
|
+
"parameter_name": "Engine 1 Algorithm",
|
|
139
|
+
"value": new_algo,
|
|
140
|
+
},
|
|
141
|
+
},
|
|
142
|
+
],
|
|
143
|
+
"step_count": 1,
|
|
144
|
+
"summary": f"Engine 1 Algorithm {current_algo} → {new_algo}",
|
|
145
|
+
}
|
|
146
|
+
return [(seed, plan)]
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def _short_id(prefix: str, key: str) -> str:
|
|
150
|
+
h = hashlib.sha256(f"{prefix}:{key}".encode()).hexdigest()[:10]
|
|
151
|
+
return f"{prefix}_{h}"
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
"""Operator adapter — native-synth-aware branch production for Ableton's Operator.
|
|
2
|
+
|
|
3
|
+
FM synthesis is defined by operator ratios + algorithm topology + per-op
|
|
4
|
+
envelopes. PR9 ships one canned proposer that shifts a carrier/modulator
|
|
5
|
+
ratio, which is the highest-leverage single parameter change for FM tone.
|
|
6
|
+
Later PRs add algorithm swaps, envelope reshaping, and feedback variants.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import hashlib
|
|
12
|
+
from typing import Optional
|
|
13
|
+
|
|
14
|
+
from ...branches import BranchSeed, freeform_seed
|
|
15
|
+
from ..models import (
|
|
16
|
+
SynthProfile,
|
|
17
|
+
TimbralFingerprint,
|
|
18
|
+
ModulationGraph,
|
|
19
|
+
ArticulationProfile,
|
|
20
|
+
NATIVE,
|
|
21
|
+
)
|
|
22
|
+
from .base import register_adapter
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
_KNOWN_PARAMS = {
|
|
26
|
+
"Algorithm",
|
|
27
|
+
"Oscillator A Coarse",
|
|
28
|
+
"Oscillator B Coarse",
|
|
29
|
+
"Oscillator C Coarse",
|
|
30
|
+
"Oscillator D Coarse",
|
|
31
|
+
"Oscillator A Fine",
|
|
32
|
+
"Oscillator B Fine",
|
|
33
|
+
"Oscillator A Level",
|
|
34
|
+
"Oscillator B Level",
|
|
35
|
+
"Oscillator C Level",
|
|
36
|
+
"Oscillator D Level",
|
|
37
|
+
"Oscillator A Attack",
|
|
38
|
+
"Oscillator A Release",
|
|
39
|
+
"Filter Frequency",
|
|
40
|
+
"Filter Resonance",
|
|
41
|
+
"Time", # global envelope time
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@register_adapter
|
|
46
|
+
class OperatorAdapter:
|
|
47
|
+
"""Adapter for Ableton's native Operator."""
|
|
48
|
+
|
|
49
|
+
device_name: str = "Operator"
|
|
50
|
+
|
|
51
|
+
def extract_profile(
|
|
52
|
+
self,
|
|
53
|
+
track_index: int,
|
|
54
|
+
device_index: int,
|
|
55
|
+
parameter_state: dict,
|
|
56
|
+
display_values: Optional[dict] = None,
|
|
57
|
+
role_hint: str = "",
|
|
58
|
+
) -> SynthProfile:
|
|
59
|
+
notes: list[str] = []
|
|
60
|
+
|
|
61
|
+
algo = parameter_state.get("Algorithm", 0)
|
|
62
|
+
if algo is not None:
|
|
63
|
+
notes.append(f"Algorithm={algo} — topology governs which ops are carriers vs modulators")
|
|
64
|
+
|
|
65
|
+
# Crude modulator-detection: any oscillator with Coarse > 1 and Level > 0
|
|
66
|
+
# is acting as a modulator. Precise detection needs algorithm decoding,
|
|
67
|
+
# which lands in PR10.
|
|
68
|
+
mod_routes = []
|
|
69
|
+
for op in ("A", "B", "C", "D"):
|
|
70
|
+
coarse = parameter_state.get(f"Oscillator {op} Coarse", 1)
|
|
71
|
+
level = parameter_state.get(f"Oscillator {op} Level", 0)
|
|
72
|
+
if coarse and coarse > 1 and level and level > 0:
|
|
73
|
+
mod_routes.append({
|
|
74
|
+
"source": f"Oscillator {op}",
|
|
75
|
+
"target": "(per algorithm)",
|
|
76
|
+
"amount": level,
|
|
77
|
+
"range": None,
|
|
78
|
+
"coarse": coarse,
|
|
79
|
+
})
|
|
80
|
+
mod = ModulationGraph(routes=mod_routes)
|
|
81
|
+
|
|
82
|
+
articulation = ArticulationProfile(
|
|
83
|
+
attack_ms=float(parameter_state.get("Oscillator A Attack", 0.0) or 0.0),
|
|
84
|
+
release_ms=float(parameter_state.get("Oscillator A Release", 0.0) or 0.0),
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
focused_state = {k: v for k, v in parameter_state.items() if k in _KNOWN_PARAMS}
|
|
88
|
+
focused_display = (
|
|
89
|
+
{k: v for k, v in (display_values or {}).items() if k in _KNOWN_PARAMS}
|
|
90
|
+
if display_values
|
|
91
|
+
else {}
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
return SynthProfile(
|
|
95
|
+
device_name=self.device_name,
|
|
96
|
+
opacity=NATIVE,
|
|
97
|
+
track_index=track_index,
|
|
98
|
+
device_index=device_index,
|
|
99
|
+
parameter_state=focused_state,
|
|
100
|
+
display_values=focused_display,
|
|
101
|
+
role_hint=role_hint,
|
|
102
|
+
modulation=mod,
|
|
103
|
+
articulation=articulation,
|
|
104
|
+
notes=notes,
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
def propose_branches(
|
|
108
|
+
self,
|
|
109
|
+
profile: SynthProfile,
|
|
110
|
+
target: TimbralFingerprint,
|
|
111
|
+
kernel: Optional[dict] = None,
|
|
112
|
+
) -> list[tuple[BranchSeed, dict]]:
|
|
113
|
+
kernel = kernel or {}
|
|
114
|
+
freshness = float(kernel.get("freshness", 0.5) or 0.5)
|
|
115
|
+
track = profile.track_index
|
|
116
|
+
device = profile.device_index
|
|
117
|
+
|
|
118
|
+
results: list[tuple[BranchSeed, dict]] = []
|
|
119
|
+
|
|
120
|
+
# ── Branch A: ratio_shift on modulator B ─────────────────────
|
|
121
|
+
# Pick a new Coarse ratio for Oscillator B (a common modulator slot)
|
|
122
|
+
# that contrasts with current. 2 → 3 is inharmonic, 1 → 2 is octave+,
|
|
123
|
+
# 3 → 5 is bell-like. Default to +1 coarse step when freshness < 0.5,
|
|
124
|
+
# +2 steps when higher.
|
|
125
|
+
current_coarse = int(profile.parameter_state.get("Oscillator B Coarse", 1) or 1)
|
|
126
|
+
step = 1 if freshness < 0.5 else 2
|
|
127
|
+
new_coarse = min(24, current_coarse + step)
|
|
128
|
+
if new_coarse == current_coarse:
|
|
129
|
+
new_coarse = max(1, current_coarse - step)
|
|
130
|
+
seed_a = freeform_seed(
|
|
131
|
+
seed_id=_short_id("op_ratio", f"{track}:{device}:{new_coarse}"),
|
|
132
|
+
hypothesis=(
|
|
133
|
+
f"Shift Operator Osc B Coarse from {current_coarse} to {new_coarse} "
|
|
134
|
+
f"for a {'subtle' if step == 1 else 'significant'} FM tone change"
|
|
135
|
+
),
|
|
136
|
+
source="synthesis",
|
|
137
|
+
novelty_label="strong" if step == 1 else "unexpected",
|
|
138
|
+
risk_label="medium",
|
|
139
|
+
affected_scope={
|
|
140
|
+
"track_indices": [track],
|
|
141
|
+
"device_paths": [f"track/{track}/device/{device}"],
|
|
142
|
+
},
|
|
143
|
+
distinctness_reason=(
|
|
144
|
+
"only seed that changes the modulator/carrier ratio on Osc B"
|
|
145
|
+
),
|
|
146
|
+
)
|
|
147
|
+
plan_a = {
|
|
148
|
+
"steps": [
|
|
149
|
+
{
|
|
150
|
+
"tool": "set_device_parameter",
|
|
151
|
+
"params": {
|
|
152
|
+
"track_index": track,
|
|
153
|
+
"device_index": device,
|
|
154
|
+
"parameter_name": "Oscillator B Coarse",
|
|
155
|
+
"value": new_coarse,
|
|
156
|
+
},
|
|
157
|
+
},
|
|
158
|
+
],
|
|
159
|
+
"step_count": 1,
|
|
160
|
+
"summary": f"Osc B Coarse {current_coarse} → {new_coarse}",
|
|
161
|
+
}
|
|
162
|
+
results.append((seed_a, plan_a))
|
|
163
|
+
|
|
164
|
+
return results
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def _short_id(prefix: str, key: str) -> str:
|
|
168
|
+
h = hashlib.sha256(f"{prefix}:{key}".encode()).hexdigest()[:10]
|
|
169
|
+
return f"{prefix}_{h}"
|
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
"""Wavetable adapter — native-synth-aware branch production for Ableton's Wavetable.
|
|
2
|
+
|
|
3
|
+
Knows the relevant parameter names and proposes two canned variant
|
|
4
|
+
branches per call:
|
|
5
|
+
- osc_position_shift: moves Osc 1 position to create a timbral contrast
|
|
6
|
+
- voice_width_variant: increases unison voices and detune for width
|
|
7
|
+
|
|
8
|
+
Later PRs add: modulation-matrix inversion, filter-envelope reshaping,
|
|
9
|
+
tuning-table variants, sub/body-layer variants.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
import hashlib
|
|
15
|
+
from typing import Optional
|
|
16
|
+
|
|
17
|
+
from ...branches import BranchSeed, freeform_seed
|
|
18
|
+
from ..models import (
|
|
19
|
+
SynthProfile,
|
|
20
|
+
TimbralFingerprint,
|
|
21
|
+
ModulationGraph,
|
|
22
|
+
ArticulationProfile,
|
|
23
|
+
NATIVE,
|
|
24
|
+
)
|
|
25
|
+
from .base import register_adapter
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# Parameter names we know and care about. Extracted from the Wavetable
|
|
29
|
+
# corpus (see skills/livepilot-core/references/device-knowledge/
|
|
30
|
+
# instruments-synths.md). PR9 uses a small subset; later PRs extend.
|
|
31
|
+
_KNOWN_PARAMS = {
|
|
32
|
+
"Osc 1 Position",
|
|
33
|
+
"Osc 2 Position",
|
|
34
|
+
"Osc 1 Transpose",
|
|
35
|
+
"Osc 2 Transpose",
|
|
36
|
+
"Voices",
|
|
37
|
+
"Voices Detune",
|
|
38
|
+
"Filter Freq",
|
|
39
|
+
"Filter Res",
|
|
40
|
+
"Filter Drive",
|
|
41
|
+
"Amp Attack",
|
|
42
|
+
"Amp Release",
|
|
43
|
+
"LFO 1 Rate",
|
|
44
|
+
"LFO 1 Amount",
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@register_adapter
|
|
49
|
+
class WavetableAdapter:
|
|
50
|
+
"""Adapter for Ableton's native Wavetable."""
|
|
51
|
+
|
|
52
|
+
device_name: str = "Wavetable"
|
|
53
|
+
|
|
54
|
+
def extract_profile(
|
|
55
|
+
self,
|
|
56
|
+
track_index: int,
|
|
57
|
+
device_index: int,
|
|
58
|
+
parameter_state: dict,
|
|
59
|
+
display_values: Optional[dict] = None,
|
|
60
|
+
role_hint: str = "",
|
|
61
|
+
) -> SynthProfile:
|
|
62
|
+
notes: list[str] = []
|
|
63
|
+
|
|
64
|
+
voices = parameter_state.get("Voices", 0)
|
|
65
|
+
detune = parameter_state.get("Voices Detune", 0.0)
|
|
66
|
+
if voices and voices >= 4 and detune and detune > 0.1:
|
|
67
|
+
notes.append(
|
|
68
|
+
f"voices={voices}, detune={detune:.2f} — already rich, avoid over-thickening"
|
|
69
|
+
)
|
|
70
|
+
if voices and voices <= 1:
|
|
71
|
+
notes.append("mono voice mode — width variants must add voices")
|
|
72
|
+
|
|
73
|
+
# Articulation from amp envelope when present
|
|
74
|
+
articulation = ArticulationProfile(
|
|
75
|
+
attack_ms=float(parameter_state.get("Amp Attack", 0.0) or 0.0),
|
|
76
|
+
release_ms=float(parameter_state.get("Amp Release", 0.0) or 0.0),
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
# Modulation graph — minimal in PR9, just LFO 1 if it has amount > 0
|
|
80
|
+
mod = ModulationGraph()
|
|
81
|
+
lfo_amount = parameter_state.get("LFO 1 Amount", 0.0)
|
|
82
|
+
if lfo_amount and abs(lfo_amount) > 0.01:
|
|
83
|
+
mod.routes.append({
|
|
84
|
+
"source": "LFO 1",
|
|
85
|
+
"target": "(destination inferred from patch)",
|
|
86
|
+
"amount": lfo_amount,
|
|
87
|
+
"range": None,
|
|
88
|
+
})
|
|
89
|
+
|
|
90
|
+
# Filter only the known parameters into parameter_state for a compact
|
|
91
|
+
# profile — full state is available to callers via the raw dict they
|
|
92
|
+
# already have. This keeps the profile focused on what adapters use.
|
|
93
|
+
focused_state = {
|
|
94
|
+
k: v for k, v in parameter_state.items() if k in _KNOWN_PARAMS
|
|
95
|
+
}
|
|
96
|
+
focused_display = (
|
|
97
|
+
{k: v for k, v in (display_values or {}).items() if k in _KNOWN_PARAMS}
|
|
98
|
+
if display_values
|
|
99
|
+
else {}
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
return SynthProfile(
|
|
103
|
+
device_name=self.device_name,
|
|
104
|
+
opacity=NATIVE,
|
|
105
|
+
track_index=track_index,
|
|
106
|
+
device_index=device_index,
|
|
107
|
+
parameter_state=focused_state,
|
|
108
|
+
display_values=focused_display,
|
|
109
|
+
role_hint=role_hint,
|
|
110
|
+
modulation=mod,
|
|
111
|
+
articulation=articulation,
|
|
112
|
+
notes=notes,
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
def propose_branches(
|
|
116
|
+
self,
|
|
117
|
+
profile: SynthProfile,
|
|
118
|
+
target: TimbralFingerprint,
|
|
119
|
+
kernel: Optional[dict] = None,
|
|
120
|
+
) -> list[tuple[BranchSeed, dict]]:
|
|
121
|
+
kernel = kernel or {}
|
|
122
|
+
freshness = float(kernel.get("freshness", 0.5) or 0.5)
|
|
123
|
+
track = profile.track_index
|
|
124
|
+
device = profile.device_index
|
|
125
|
+
|
|
126
|
+
results: list[tuple[BranchSeed, dict]] = []
|
|
127
|
+
|
|
128
|
+
# ── Branch A: osc_position_shift ──────────────────────────────
|
|
129
|
+
# Moves Osc 1 position to a contrasting point. Safe / incremental
|
|
130
|
+
# when freshness < 0.5; more aggressive shift when higher.
|
|
131
|
+
current_pos = float(profile.parameter_state.get("Osc 1 Position", 0.0) or 0.0)
|
|
132
|
+
shift = 0.2 if freshness < 0.5 else 0.45
|
|
133
|
+
# Wrap within [0, 1]; if the current position is high, shift down.
|
|
134
|
+
if current_pos + shift > 1.0:
|
|
135
|
+
new_pos = max(0.0, current_pos - shift)
|
|
136
|
+
else:
|
|
137
|
+
new_pos = min(1.0, current_pos + shift)
|
|
138
|
+
seed_a = freeform_seed(
|
|
139
|
+
seed_id=_short_id("wt_pos", f"{track}:{device}:{new_pos:.2f}"),
|
|
140
|
+
hypothesis=(
|
|
141
|
+
f"Shift Wavetable Osc 1 Position from {current_pos:.2f} to {new_pos:.2f} "
|
|
142
|
+
f"for a contrasting harmonic spectrum"
|
|
143
|
+
),
|
|
144
|
+
source="synthesis",
|
|
145
|
+
novelty_label="strong" if freshness < 0.7 else "unexpected",
|
|
146
|
+
risk_label="low",
|
|
147
|
+
affected_scope={
|
|
148
|
+
"track_indices": [track],
|
|
149
|
+
"device_paths": [f"track/{track}/device/{device}"],
|
|
150
|
+
},
|
|
151
|
+
distinctness_reason="only seed that changes Osc 1 Position",
|
|
152
|
+
)
|
|
153
|
+
plan_a = {
|
|
154
|
+
"steps": [
|
|
155
|
+
{
|
|
156
|
+
"tool": "set_device_parameter",
|
|
157
|
+
"params": {
|
|
158
|
+
"track_index": track,
|
|
159
|
+
"device_index": device,
|
|
160
|
+
"parameter_name": "Osc 1 Position",
|
|
161
|
+
"value": round(new_pos, 3),
|
|
162
|
+
},
|
|
163
|
+
},
|
|
164
|
+
],
|
|
165
|
+
"step_count": 1,
|
|
166
|
+
"summary": f"Osc 1 Position {current_pos:.2f} → {new_pos:.2f}",
|
|
167
|
+
}
|
|
168
|
+
results.append((seed_a, plan_a))
|
|
169
|
+
|
|
170
|
+
# ── Branch B: voice_width_variant ─────────────────────────────
|
|
171
|
+
# Push Voices + Detune for a richer stereo image — unless profile
|
|
172
|
+
# notes flag that voices are already high (avoid over-thickening).
|
|
173
|
+
skip_width = any("over-thickening" in n for n in profile.notes)
|
|
174
|
+
if not skip_width:
|
|
175
|
+
current_voices = float(profile.parameter_state.get("Voices", 1) or 1)
|
|
176
|
+
current_detune = float(profile.parameter_state.get("Voices Detune", 0.0) or 0.0)
|
|
177
|
+
new_voices = min(8.0, max(current_voices, 4.0))
|
|
178
|
+
new_detune = min(0.5, max(current_detune + 0.1, 0.15))
|
|
179
|
+
seed_b = freeform_seed(
|
|
180
|
+
seed_id=_short_id("wt_width", f"{track}:{device}:{new_voices}:{new_detune:.2f}"),
|
|
181
|
+
hypothesis=(
|
|
182
|
+
f"Increase Wavetable voices to {int(new_voices)} with detune "
|
|
183
|
+
f"{new_detune:.2f} for a wider, richer image"
|
|
184
|
+
),
|
|
185
|
+
source="synthesis",
|
|
186
|
+
novelty_label="safe",
|
|
187
|
+
risk_label="low",
|
|
188
|
+
affected_scope={
|
|
189
|
+
"track_indices": [track],
|
|
190
|
+
"device_paths": [f"track/{track}/device/{device}"],
|
|
191
|
+
},
|
|
192
|
+
distinctness_reason=(
|
|
193
|
+
"only seed that changes voice count + detune; focuses on "
|
|
194
|
+
"width rather than spectrum"
|
|
195
|
+
),
|
|
196
|
+
)
|
|
197
|
+
plan_b = {
|
|
198
|
+
"steps": [
|
|
199
|
+
{
|
|
200
|
+
"tool": "set_device_parameter",
|
|
201
|
+
"params": {
|
|
202
|
+
"track_index": track,
|
|
203
|
+
"device_index": device,
|
|
204
|
+
"parameter_name": "Voices",
|
|
205
|
+
"value": new_voices,
|
|
206
|
+
},
|
|
207
|
+
},
|
|
208
|
+
{
|
|
209
|
+
"tool": "set_device_parameter",
|
|
210
|
+
"params": {
|
|
211
|
+
"track_index": track,
|
|
212
|
+
"device_index": device,
|
|
213
|
+
"parameter_name": "Voices Detune",
|
|
214
|
+
"value": round(new_detune, 3),
|
|
215
|
+
},
|
|
216
|
+
},
|
|
217
|
+
],
|
|
218
|
+
"step_count": 2,
|
|
219
|
+
"summary": f"Voices → {int(new_voices)}, Detune → {new_detune:.2f}",
|
|
220
|
+
}
|
|
221
|
+
results.append((seed_b, plan_b))
|
|
222
|
+
|
|
223
|
+
return results
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def _short_id(prefix: str, key: str) -> str:
|
|
227
|
+
h = hashlib.sha256(f"{prefix}:{key}".encode()).hexdigest()[:10]
|
|
228
|
+
return f"{prefix}_{h}"
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
"""Synthesis-brain engine — dispatches to the right adapter based on device name.
|
|
2
|
+
|
|
3
|
+
Two primary entry points:
|
|
4
|
+
analyze_synth_patch(device_name, ...) -> SynthProfile
|
|
5
|
+
propose_synth_branches(device_name, ...) -> list[(BranchSeed, compiled_plan)]
|
|
6
|
+
|
|
7
|
+
Both are pure Python — no @mcp.tool() decorators in PR9. PR12 wires
|
|
8
|
+
dedicated MCP tools and does the tool-count metadata sweep.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
from typing import Optional
|
|
14
|
+
|
|
15
|
+
from ..branches import BranchSeed
|
|
16
|
+
from .adapters import get_adapter, registered_devices
|
|
17
|
+
from .models import SynthProfile, TimbralFingerprint, OPAQUE
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def supported_devices() -> list[str]:
|
|
21
|
+
"""List devices that synthesis_brain has an adapter for."""
|
|
22
|
+
return registered_devices()
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def analyze_synth_patch(
|
|
26
|
+
device_name: str,
|
|
27
|
+
track_index: int,
|
|
28
|
+
device_index: int,
|
|
29
|
+
parameter_state: dict,
|
|
30
|
+
display_values: Optional[dict] = None,
|
|
31
|
+
role_hint: str = "",
|
|
32
|
+
) -> SynthProfile:
|
|
33
|
+
"""Extract a SynthProfile from live parameter state.
|
|
34
|
+
|
|
35
|
+
When no adapter exists for the device, returns an opaque SynthProfile —
|
|
36
|
+
the profile still carries parameter_state + display_values so callers
|
|
37
|
+
can inspect the raw patch, but without device-specific structure.
|
|
38
|
+
Opacity lets the composer / Wonder / user-facing layer decide how to
|
|
39
|
+
handle unsupported devices.
|
|
40
|
+
"""
|
|
41
|
+
adapter = get_adapter(device_name)
|
|
42
|
+
if adapter is None:
|
|
43
|
+
return SynthProfile(
|
|
44
|
+
device_name=device_name,
|
|
45
|
+
opacity=OPAQUE,
|
|
46
|
+
track_index=track_index,
|
|
47
|
+
device_index=device_index,
|
|
48
|
+
parameter_state=dict(parameter_state or {}),
|
|
49
|
+
display_values=dict(display_values or {}),
|
|
50
|
+
role_hint=role_hint,
|
|
51
|
+
notes=[
|
|
52
|
+
f"No synthesis_brain adapter for '{device_name}'; "
|
|
53
|
+
f"falling back to opaque profile"
|
|
54
|
+
],
|
|
55
|
+
)
|
|
56
|
+
return adapter.extract_profile(
|
|
57
|
+
track_index=track_index,
|
|
58
|
+
device_index=device_index,
|
|
59
|
+
parameter_state=parameter_state or {},
|
|
60
|
+
display_values=display_values or {},
|
|
61
|
+
role_hint=role_hint,
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def propose_synth_branches(
|
|
66
|
+
profile: SynthProfile,
|
|
67
|
+
target: Optional[TimbralFingerprint] = None,
|
|
68
|
+
kernel: Optional[dict] = None,
|
|
69
|
+
) -> list[tuple[BranchSeed, dict]]:
|
|
70
|
+
"""Emit synthesis-source branch seeds for the given profile.
|
|
71
|
+
|
|
72
|
+
Returns a list of (seed, compiled_plan) tuples. Seeds carry
|
|
73
|
+
source="synthesis" and a distinctness reason; compiled_plan is the
|
|
74
|
+
execution_router-ready dict with pre-filled steps. Both can be
|
|
75
|
+
handed to create_experiment(seeds=[seed_dicts], compiled_plans=[plans])
|
|
76
|
+
with no further compilation needed.
|
|
77
|
+
|
|
78
|
+
When the profile is opaque (no adapter), returns an empty list.
|
|
79
|
+
Callers can fall back to analytical-only seeds in that case.
|
|
80
|
+
"""
|
|
81
|
+
if profile.opacity != "native":
|
|
82
|
+
return []
|
|
83
|
+
adapter = get_adapter(profile.device_name)
|
|
84
|
+
if adapter is None:
|
|
85
|
+
return []
|
|
86
|
+
target = target or TimbralFingerprint()
|
|
87
|
+
return adapter.propose_branches(
|
|
88
|
+
profile=profile,
|
|
89
|
+
target=target,
|
|
90
|
+
kernel=kernel or {},
|
|
91
|
+
)
|