livepilot 1.12.2 → 1.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/CHANGELOG.md +219 -0
  2. package/README.md +7 -7
  3. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  4. package/m4l_device/livepilot_bridge.js +1 -1
  5. package/mcp_server/__init__.py +1 -1
  6. package/mcp_server/branches/__init__.py +34 -0
  7. package/mcp_server/branches/types.py +286 -0
  8. package/mcp_server/composer/__init__.py +10 -1
  9. package/mcp_server/composer/branch_producer.py +349 -0
  10. package/mcp_server/composer/tools.py +58 -1
  11. package/mcp_server/evaluation/policy.py +227 -2
  12. package/mcp_server/experiment/engine.py +47 -11
  13. package/mcp_server/experiment/models.py +112 -8
  14. package/mcp_server/experiment/tools.py +502 -38
  15. package/mcp_server/memory/taste_graph.py +84 -11
  16. package/mcp_server/persistence/taste_store.py +21 -5
  17. package/mcp_server/runtime/session_kernel.py +46 -0
  18. package/mcp_server/runtime/tools.py +29 -3
  19. package/mcp_server/server.py +1 -0
  20. package/mcp_server/synthesis_brain/__init__.py +53 -0
  21. package/mcp_server/synthesis_brain/adapters/__init__.py +34 -0
  22. package/mcp_server/synthesis_brain/adapters/analog.py +273 -0
  23. package/mcp_server/synthesis_brain/adapters/base.py +86 -0
  24. package/mcp_server/synthesis_brain/adapters/drift.py +271 -0
  25. package/mcp_server/synthesis_brain/adapters/meld.py +261 -0
  26. package/mcp_server/synthesis_brain/adapters/operator.py +292 -0
  27. package/mcp_server/synthesis_brain/adapters/wavetable.py +364 -0
  28. package/mcp_server/synthesis_brain/engine.py +91 -0
  29. package/mcp_server/synthesis_brain/models.py +121 -0
  30. package/mcp_server/synthesis_brain/timbre.py +194 -0
  31. package/mcp_server/synthesis_brain/tools.py +231 -0
  32. package/mcp_server/tools/_conductor.py +144 -0
  33. package/mcp_server/wonder_mode/engine.py +324 -0
  34. package/mcp_server/wonder_mode/tools.py +153 -1
  35. package/package.json +2 -2
  36. package/remote_script/LivePilot/__init__.py +1 -1
  37. package/server.json +3 -3
@@ -0,0 +1,364 @@
1
+ """Wavetable adapter — native-synth-aware branch production for Ableton's Wavetable.
2
+
3
+ Knows the relevant parameter names. PR9 shipped two canned proposers;
4
+ PR2/v2 adds position-region classification so the shift direction and
5
+ magnitude depend on *where* in the wavetable the patch currently sits,
6
+ not just freshness.
7
+
8
+ Strategies (selected based on profile + region):
9
+ - osc_position_to_bright: shift toward the bright/complex end when
10
+ the current position is sub_region or mid_region and the target
11
+ timbre asks for brightness.
12
+ - osc_position_to_dark: shift toward sub/mid when starting bright and
13
+ the profile or target prefers warmth.
14
+ - voice_width_variant: increase unison voices + detune for width,
15
+ unless the patch is already over-thickened.
16
+
17
+ Each seed's producer_payload captures:
18
+ {schema_version, device_name, track_index, device_index,
19
+ strategy, topology_hint: {current_region, target_region,
20
+ current_pos, new_pos}}
21
+ so PR4 render-verification and future position-to-spectrum mappings can
22
+ refine the heuristic without losing provenance.
23
+
24
+ Known limitation: region classification is a coarse heuristic on the
25
+ raw Osc 1 Pos float. Specific factory wavetables don't always follow
26
+ the "low value = simple, high value = complex" rule. PR4's render-based
27
+ mapping will refine per-wavetable — producer_payload's topology_hint
28
+ is the contract for that upgrade.
29
+ """
30
+
31
+ from __future__ import annotations
32
+
33
+ import hashlib
34
+ from typing import Optional
35
+
36
+ from ...branches import BranchSeed, freeform_seed
37
+ from ..models import (
38
+ SynthProfile,
39
+ TimbralFingerprint,
40
+ ModulationGraph,
41
+ ArticulationProfile,
42
+ NATIVE,
43
+ )
44
+ from .base import register_adapter
45
+
46
+
47
+ # Parameter names we know and care about. Extracted from the Wavetable
48
+ # corpus (see skills/livepilot-core/references/device-knowledge/
49
+ # instruments-synths.md). PR9 uses a small subset; later PRs extend.
50
+ _KNOWN_PARAMS = {
51
+ "Osc 1 Position",
52
+ "Osc 2 Position",
53
+ "Osc 1 Transpose",
54
+ "Osc 2 Transpose",
55
+ "Voices",
56
+ "Voices Detune",
57
+ "Filter Freq",
58
+ "Filter Res",
59
+ "Filter Drive",
60
+ "Amp Attack",
61
+ "Amp Release",
62
+ "LFO 1 Rate",
63
+ "LFO 1 Amount",
64
+ }
65
+
66
+
67
+ # Coarse position → region mapping. Most Ableton factory wavetables fade
68
+ # from low-harmonic (position 0) toward high-harmonic (position 1), but
69
+ # this is approximate. PR4 will refine with render-based spectral mapping.
70
+ _WAVETABLE_REGIONS: list[tuple[float, float, str]] = [
71
+ (0.0, 0.25, "sub_region"),
72
+ (0.25, 0.5, "mid_region"),
73
+ (0.5, 0.75, "bright_region"),
74
+ (0.75, 1.01, "complex_region"),
75
+ ]
76
+
77
+
78
+ def _classify_position(pos: float) -> str:
79
+ """Map an Osc 1 Pos float to a coarse spectral region name."""
80
+ for lo, hi, region in _WAVETABLE_REGIONS:
81
+ if lo <= pos < hi:
82
+ return region
83
+ return "complex_region"
84
+
85
+
86
+ def _choose_target_region(
87
+ current_region: str,
88
+ target: "TimbralFingerprint",
89
+ ) -> str:
90
+ """Pick a contrasting region based on the target fingerprint.
91
+
92
+ When the target asks for more brightness, move toward
93
+ bright_region/complex_region. When it asks for more warmth or less
94
+ brightness (negative target.brightness), move toward
95
+ sub_region/mid_region. When the target is neutral, shift one region
96
+ away from current for contrast.
97
+ """
98
+ want_bright = target.brightness
99
+ if abs(want_bright) < 0.1:
100
+ # Neutral target — shift one region away for variety.
101
+ fallback_map = {
102
+ "sub_region": "mid_region",
103
+ "mid_region": "bright_region",
104
+ "bright_region": "mid_region",
105
+ "complex_region": "bright_region",
106
+ }
107
+ return fallback_map.get(current_region, "mid_region")
108
+
109
+ if want_bright > 0:
110
+ # Bias brighter.
111
+ upshift = {
112
+ "sub_region": "mid_region",
113
+ "mid_region": "bright_region",
114
+ "bright_region": "complex_region",
115
+ "complex_region": "complex_region",
116
+ }
117
+ return upshift.get(current_region, "bright_region")
118
+
119
+ # want_bright < 0 — bias darker.
120
+ downshift = {
121
+ "complex_region": "bright_region",
122
+ "bright_region": "mid_region",
123
+ "mid_region": "sub_region",
124
+ "sub_region": "sub_region",
125
+ }
126
+ return downshift.get(current_region, "sub_region")
127
+
128
+
129
+ def _region_center(region: str) -> float:
130
+ """Middle of the region's position range — the target for a shift."""
131
+ for lo, hi, name in _WAVETABLE_REGIONS:
132
+ if name == region:
133
+ return round((lo + min(hi, 1.0)) / 2.0, 3)
134
+ return 0.5
135
+
136
+
137
+ @register_adapter
138
+ class WavetableAdapter:
139
+ """Adapter for Ableton's native Wavetable."""
140
+
141
+ device_name: str = "Wavetable"
142
+
143
+ def extract_profile(
144
+ self,
145
+ track_index: int,
146
+ device_index: int,
147
+ parameter_state: dict,
148
+ display_values: Optional[dict] = None,
149
+ role_hint: str = "",
150
+ ) -> SynthProfile:
151
+ notes: list[str] = []
152
+
153
+ voices = parameter_state.get("Voices", 0)
154
+ detune = parameter_state.get("Voices Detune", 0.0)
155
+ if voices and voices >= 4 and detune and detune > 0.1:
156
+ notes.append(
157
+ f"voices={voices}, detune={detune:.2f} — already rich, avoid over-thickening"
158
+ )
159
+ if voices and voices <= 1:
160
+ notes.append("mono voice mode — width variants must add voices")
161
+
162
+ # Articulation from amp envelope when present
163
+ articulation = ArticulationProfile(
164
+ attack_ms=float(parameter_state.get("Amp Attack", 0.0) or 0.0),
165
+ release_ms=float(parameter_state.get("Amp Release", 0.0) or 0.0),
166
+ )
167
+
168
+ # Modulation graph — minimal in PR9, just LFO 1 if it has amount > 0
169
+ mod = ModulationGraph()
170
+ lfo_amount = parameter_state.get("LFO 1 Amount", 0.0)
171
+ if lfo_amount and abs(lfo_amount) > 0.01:
172
+ mod.routes.append({
173
+ "source": "LFO 1",
174
+ "target": "(destination inferred from patch)",
175
+ "amount": lfo_amount,
176
+ "range": None,
177
+ })
178
+
179
+ # Filter only the known parameters into parameter_state for a compact
180
+ # profile — full state is available to callers via the raw dict they
181
+ # already have. This keeps the profile focused on what adapters use.
182
+ focused_state = {
183
+ k: v for k, v in parameter_state.items() if k in _KNOWN_PARAMS
184
+ }
185
+ focused_display = (
186
+ {k: v for k, v in (display_values or {}).items() if k in _KNOWN_PARAMS}
187
+ if display_values
188
+ else {}
189
+ )
190
+
191
+ return SynthProfile(
192
+ device_name=self.device_name,
193
+ opacity=NATIVE,
194
+ track_index=track_index,
195
+ device_index=device_index,
196
+ parameter_state=focused_state,
197
+ display_values=focused_display,
198
+ role_hint=role_hint,
199
+ modulation=mod,
200
+ articulation=articulation,
201
+ notes=notes,
202
+ )
203
+
204
+ def propose_branches(
205
+ self,
206
+ profile: SynthProfile,
207
+ target: TimbralFingerprint,
208
+ kernel: Optional[dict] = None,
209
+ ) -> list[tuple[BranchSeed, dict]]:
210
+ kernel = kernel or {}
211
+ freshness = float(kernel.get("freshness", 0.5) or 0.5)
212
+ track = profile.track_index
213
+ device = profile.device_index
214
+
215
+ results: list[tuple[BranchSeed, dict]] = []
216
+
217
+ # ── Branch A: region-aware Osc 1 Position shift ──────────────
218
+ # Classify current position into a spectral region, pick a
219
+ # contrasting target region based on the timbral target, then
220
+ # shift to that region's center. The actual shift magnitude
221
+ # (how close to the center) scales with freshness — low
222
+ # freshness stops partway, high freshness commits fully.
223
+ current_pos = float(profile.parameter_state.get("Osc 1 Position", 0.0) or 0.0)
224
+ current_region = _classify_position(current_pos)
225
+ target_region = _choose_target_region(current_region, target)
226
+ region_target_pos = _region_center(target_region)
227
+
228
+ # Blend: low freshness only moves partway toward the target region,
229
+ # high freshness commits fully.
230
+ blend = 0.4 if freshness < 0.5 else 1.0
231
+ new_pos = round(
232
+ current_pos + (region_target_pos - current_pos) * blend, 3
233
+ )
234
+ new_pos = max(0.0, min(1.0, new_pos))
235
+
236
+ # Strategy name reflects direction (pick name from target region).
237
+ if target_region in ("bright_region", "complex_region"):
238
+ strategy = "osc_position_to_bright"
239
+ elif target_region in ("sub_region", "mid_region"):
240
+ strategy = "osc_position_to_dark"
241
+ else:
242
+ strategy = "osc_position_shift"
243
+
244
+ topology_hint = {
245
+ "current_region": current_region,
246
+ "target_region": target_region,
247
+ "current_pos": current_pos,
248
+ "new_pos": new_pos,
249
+ }
250
+
251
+ seed_a = freeform_seed(
252
+ seed_id=_short_id(
253
+ "wt_pos", f"{track}:{device}:{current_region}:{target_region}:{new_pos:.2f}"
254
+ ),
255
+ hypothesis=(
256
+ f"Shift Wavetable Osc 1 Position {current_pos:.2f} ({current_region}) → "
257
+ f"{new_pos:.2f} ({target_region}) for a {strategy.split('_to_')[-1]} spectrum"
258
+ ),
259
+ source="synthesis",
260
+ novelty_label="strong" if freshness < 0.7 else "unexpected",
261
+ risk_label="low",
262
+ affected_scope={
263
+ "track_indices": [track],
264
+ "device_paths": [f"track/{track}/device/{device}"],
265
+ },
266
+ distinctness_reason=(
267
+ f"moves Osc 1 Position from {current_region} to {target_region}"
268
+ ),
269
+ producer_payload={
270
+ "device_name": self.device_name,
271
+ "track_index": track,
272
+ "device_index": device,
273
+ "strategy": strategy,
274
+ "topology_hint": topology_hint,
275
+ },
276
+ )
277
+ plan_a = {
278
+ "steps": [
279
+ {
280
+ "tool": "set_device_parameter",
281
+ "params": {
282
+ "track_index": track,
283
+ "device_index": device,
284
+ "parameter_name": "Osc 1 Position",
285
+ "value": new_pos,
286
+ },
287
+ },
288
+ ],
289
+ "step_count": 1,
290
+ "summary": f"Osc 1 Position {current_pos:.2f} ({current_region}) → {new_pos:.2f} ({target_region})",
291
+ }
292
+ results.append((seed_a, plan_a))
293
+
294
+ # ── Branch B: voice_width_variant ─────────────────────────────
295
+ # Push Voices + Detune for a richer stereo image — unless profile
296
+ # notes flag that voices are already high (avoid over-thickening).
297
+ skip_width = any("over-thickening" in n for n in profile.notes)
298
+ if not skip_width:
299
+ current_voices = float(profile.parameter_state.get("Voices", 1) or 1)
300
+ current_detune = float(profile.parameter_state.get("Voices Detune", 0.0) or 0.0)
301
+ new_voices = min(8.0, max(current_voices, 4.0))
302
+ new_detune = min(0.5, max(current_detune + 0.1, 0.15))
303
+ seed_b = freeform_seed(
304
+ seed_id=_short_id("wt_width", f"{track}:{device}:{new_voices}:{new_detune:.2f}"),
305
+ hypothesis=(
306
+ f"Increase Wavetable voices to {int(new_voices)} with detune "
307
+ f"{new_detune:.2f} for a wider, richer image"
308
+ ),
309
+ source="synthesis",
310
+ novelty_label="safe",
311
+ risk_label="low",
312
+ affected_scope={
313
+ "track_indices": [track],
314
+ "device_paths": [f"track/{track}/device/{device}"],
315
+ },
316
+ distinctness_reason=(
317
+ "only seed that changes voice count + detune; focuses on "
318
+ "width rather than spectrum"
319
+ ),
320
+ producer_payload={
321
+ "device_name": self.device_name,
322
+ "track_index": track,
323
+ "device_index": device,
324
+ "strategy": "voice_width_variant",
325
+ "topology_hint": {
326
+ "current_voices": int(current_voices),
327
+ "current_detune": current_detune,
328
+ "new_voices": int(new_voices),
329
+ "new_detune": new_detune,
330
+ },
331
+ },
332
+ )
333
+ plan_b = {
334
+ "steps": [
335
+ {
336
+ "tool": "set_device_parameter",
337
+ "params": {
338
+ "track_index": track,
339
+ "device_index": device,
340
+ "parameter_name": "Voices",
341
+ "value": new_voices,
342
+ },
343
+ },
344
+ {
345
+ "tool": "set_device_parameter",
346
+ "params": {
347
+ "track_index": track,
348
+ "device_index": device,
349
+ "parameter_name": "Voices Detune",
350
+ "value": round(new_detune, 3),
351
+ },
352
+ },
353
+ ],
354
+ "step_count": 2,
355
+ "summary": f"Voices → {int(new_voices)}, Detune → {new_detune:.2f}",
356
+ }
357
+ results.append((seed_b, plan_b))
358
+
359
+ return results
360
+
361
+
362
+ def _short_id(prefix: str, key: str) -> str:
363
+ h = hashlib.sha256(f"{prefix}:{key}".encode()).hexdigest()[:10]
364
+ return f"{prefix}_{h}"
@@ -0,0 +1,91 @@
1
+ """Synthesis-brain engine — dispatches to the right adapter based on device name.
2
+
3
+ Two primary entry points:
4
+ analyze_synth_patch(device_name, ...) -> SynthProfile
5
+ propose_synth_branches(device_name, ...) -> list[(BranchSeed, compiled_plan)]
6
+
7
+ Both are pure Python — no @mcp.tool() decorators in PR9. PR12 wires
8
+ dedicated MCP tools and does the tool-count metadata sweep.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ from typing import Optional
14
+
15
+ from ..branches import BranchSeed
16
+ from .adapters import get_adapter, registered_devices
17
+ from .models import SynthProfile, TimbralFingerprint, OPAQUE
18
+
19
+
20
+ def supported_devices() -> list[str]:
21
+ """List devices that synthesis_brain has an adapter for."""
22
+ return registered_devices()
23
+
24
+
25
+ def analyze_synth_patch(
26
+ device_name: str,
27
+ track_index: int,
28
+ device_index: int,
29
+ parameter_state: dict,
30
+ display_values: Optional[dict] = None,
31
+ role_hint: str = "",
32
+ ) -> SynthProfile:
33
+ """Extract a SynthProfile from live parameter state.
34
+
35
+ When no adapter exists for the device, returns an opaque SynthProfile —
36
+ the profile still carries parameter_state + display_values so callers
37
+ can inspect the raw patch, but without device-specific structure.
38
+ Opacity lets the composer / Wonder / user-facing layer decide how to
39
+ handle unsupported devices.
40
+ """
41
+ adapter = get_adapter(device_name)
42
+ if adapter is None:
43
+ return SynthProfile(
44
+ device_name=device_name,
45
+ opacity=OPAQUE,
46
+ track_index=track_index,
47
+ device_index=device_index,
48
+ parameter_state=dict(parameter_state or {}),
49
+ display_values=dict(display_values or {}),
50
+ role_hint=role_hint,
51
+ notes=[
52
+ f"No synthesis_brain adapter for '{device_name}'; "
53
+ f"falling back to opaque profile"
54
+ ],
55
+ )
56
+ return adapter.extract_profile(
57
+ track_index=track_index,
58
+ device_index=device_index,
59
+ parameter_state=parameter_state or {},
60
+ display_values=display_values or {},
61
+ role_hint=role_hint,
62
+ )
63
+
64
+
65
+ def propose_synth_branches(
66
+ profile: SynthProfile,
67
+ target: Optional[TimbralFingerprint] = None,
68
+ kernel: Optional[dict] = None,
69
+ ) -> list[tuple[BranchSeed, dict]]:
70
+ """Emit synthesis-source branch seeds for the given profile.
71
+
72
+ Returns a list of (seed, compiled_plan) tuples. Seeds carry
73
+ source="synthesis" and a distinctness reason; compiled_plan is the
74
+ execution_router-ready dict with pre-filled steps. Both can be
75
+ handed to create_experiment(seeds=[seed_dicts], compiled_plans=[plans])
76
+ with no further compilation needed.
77
+
78
+ When the profile is opaque (no adapter), returns an empty list.
79
+ Callers can fall back to analytical-only seeds in that case.
80
+ """
81
+ if profile.opacity != "native":
82
+ return []
83
+ adapter = get_adapter(profile.device_name)
84
+ if adapter is None:
85
+ return []
86
+ target = target or TimbralFingerprint()
87
+ return adapter.propose_branches(
88
+ profile=profile,
89
+ target=target,
90
+ kernel=kernel or {},
91
+ )
@@ -0,0 +1,121 @@
1
+ """Synthesis-brain data models.
2
+
3
+ Pure dataclasses — zero I/O. Shape is intentionally minimal in PR9;
4
+ later PRs firm up fields as adapters discover what's actually useful.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from dataclasses import asdict, dataclass, field
10
+ from typing import Literal, Optional
11
+
12
+
13
+ # Device opacity markers — natives are inspectable via device parameters,
14
+ # opaque plugins (AU / VST) are not. Adapters are registered for natives only.
15
+ NATIVE = "native"
16
+ OPAQUE = "opaque"
17
+
18
+ DeviceOpacity = Literal["native", "opaque"]
19
+
20
+
21
+ @dataclass
22
+ class TimbralFingerprint:
23
+ """A compact per-device timbre target.
24
+
25
+ All dimensions are floats in [-1.0, 1.0]; 0.0 means "no change from
26
+ whatever the source patch is". This intentionally mirrors the existing
27
+ TimbralGoalVector in sound_design.models so the two subsystems can
28
+ share goal inputs.
29
+ """
30
+
31
+ brightness: float = 0.0
32
+ warmth: float = 0.0
33
+ bite: float = 0.0
34
+ softness: float = 0.0
35
+ instability: float = 0.0
36
+ width: float = 0.0
37
+ texture_density: float = 0.0
38
+ movement: float = 0.0
39
+ polish: float = 0.0
40
+
41
+ def to_dict(self) -> dict:
42
+ return asdict(self)
43
+
44
+
45
+ @dataclass
46
+ class ModulationGraph:
47
+ """Flat list of modulation routes on a single device.
48
+
49
+ Each route: {source, target, amount, range}. Shape is deliberately
50
+ loose because natives differ (Wavetable has LFO routing, Operator
51
+ has a per-osc modulation matrix, Analog has FM + Envelope routing).
52
+ Adapters populate it in a device-consistent way.
53
+ """
54
+
55
+ routes: list[dict] = field(default_factory=list)
56
+
57
+ def to_dict(self) -> dict:
58
+ return {"routes": list(self.routes)}
59
+
60
+
61
+ @dataclass
62
+ class ArticulationProfile:
63
+ """How a patch responds to note-on / note-off / velocity.
64
+
65
+ attack_ms / release_ms are envelope rough times; velocity_mapping is
66
+ a tag ("linear", "exponential", "flat"); mono indicates mono-only
67
+ mode (portamento hints live here in later PRs).
68
+ """
69
+
70
+ attack_ms: float = 0.0
71
+ release_ms: float = 0.0
72
+ velocity_mapping: str = "linear"
73
+ mono: bool = False
74
+
75
+ def to_dict(self) -> dict:
76
+ return asdict(self)
77
+
78
+
79
+ @dataclass
80
+ class SynthProfile:
81
+ """Extracted per-device patch state.
82
+
83
+ Fields:
84
+ device_name: the Ableton device name ("Wavetable", "Operator", ...)
85
+ opacity: NATIVE ⇒ adapter knows this device; OPAQUE ⇒ fallback path
86
+ track_index / device_index: where the device lives in the session
87
+ parameter_state: raw ``{name: value}`` dict from get_device_parameters;
88
+ adapters translate this into structured knowledge
89
+ display_values: parallel ``{name: value_string}`` when available
90
+ (lets adapters reason about actual Hz / dB / % rather than 0-1 floats)
91
+ role_hint: caller-supplied role ("pad", "lead", "bass", "perc", ...) or ""
92
+ modulation: the device's current modulation graph
93
+ articulation: envelope + velocity response
94
+ notes: free-form observations the adapter wants to record for downstream
95
+ reasoning (e.g. "voices=4, detune=0.12 — subtly rich already")
96
+ """
97
+
98
+ device_name: str = ""
99
+ opacity: DeviceOpacity = OPAQUE
100
+ track_index: int = -1
101
+ device_index: int = -1
102
+ parameter_state: dict = field(default_factory=dict)
103
+ display_values: dict = field(default_factory=dict)
104
+ role_hint: str = ""
105
+ modulation: ModulationGraph = field(default_factory=ModulationGraph)
106
+ articulation: ArticulationProfile = field(default_factory=ArticulationProfile)
107
+ notes: list[str] = field(default_factory=list)
108
+
109
+ def to_dict(self) -> dict:
110
+ return {
111
+ "device_name": self.device_name,
112
+ "opacity": self.opacity,
113
+ "track_index": self.track_index,
114
+ "device_index": self.device_index,
115
+ "parameter_state": dict(self.parameter_state),
116
+ "display_values": dict(self.display_values),
117
+ "role_hint": self.role_hint,
118
+ "modulation": self.modulation.to_dict(),
119
+ "articulation": self.articulation.to_dict(),
120
+ "notes": list(self.notes),
121
+ }