livepilot 1.10.0 → 1.10.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +3 -3
- package/AGENTS.md +3 -3
- package/CHANGELOG.md +214 -0
- package/CONTRIBUTING.md +2 -2
- package/LICENSE +62 -21
- package/README.md +264 -286
- package/livepilot/.Codex-plugin/plugin.json +2 -2
- package/livepilot/.claude-plugin/plugin.json +2 -2
- package/livepilot/skills/livepilot-arrangement/SKILL.md +18 -1
- package/livepilot/skills/livepilot-core/SKILL.md +5 -5
- package/livepilot/skills/livepilot-core/references/overview.md +3 -3
- package/livepilot/skills/livepilot-devices/SKILL.md +23 -2
- package/livepilot/skills/livepilot-evaluation/references/capability-modes.md +1 -1
- package/livepilot/skills/livepilot-release/SKILL.md +21 -17
- package/livepilot/skills/livepilot-sample-engine/SKILL.md +2 -1
- package/livepilot/skills/livepilot-wonder/SKILL.md +8 -6
- package/livepilot.mcpb +0 -0
- package/m4l_device/LivePilot_Analyzer.adv +0 -0
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +1 -1
- package/manifest.json +4 -4
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/composer/engine.py +249 -169
- package/mcp_server/composer/sample_resolver.py +153 -0
- package/mcp_server/composer/tools.py +97 -87
- package/mcp_server/memory/taste_accessors.py +47 -0
- package/mcp_server/preview_studio/engine.py +9 -2
- package/mcp_server/preview_studio/tools.py +78 -35
- package/mcp_server/project_brain/tools.py +34 -0
- package/mcp_server/runtime/execution_router.py +180 -38
- package/mcp_server/runtime/mcp_dispatch.py +46 -0
- package/mcp_server/runtime/remote_commands.py +4 -1
- package/mcp_server/runtime/tools.py +55 -32
- package/mcp_server/sample_engine/moves.py +12 -12
- package/mcp_server/sample_engine/slice_workflow.py +190 -0
- package/mcp_server/sample_engine/tools.py +104 -1
- package/mcp_server/semantic_moves/device_creation_moves.py +7 -7
- package/mcp_server/semantic_moves/mix_moves.py +8 -8
- package/mcp_server/semantic_moves/models.py +7 -7
- package/mcp_server/semantic_moves/performance_moves.py +4 -4
- package/mcp_server/semantic_moves/sample_compilers.py +14 -9
- package/mcp_server/semantic_moves/sound_design_moves.py +4 -4
- package/mcp_server/semantic_moves/tools.py +63 -10
- package/mcp_server/semantic_moves/transition_moves.py +4 -4
- package/mcp_server/server.py +20 -1
- package/mcp_server/session_continuity/tracker.py +4 -1
- package/mcp_server/tools/_conductor.py +16 -0
- package/mcp_server/tools/_planner_engine.py +24 -0
- package/mcp_server/tools/analyzer.py +2 -0
- package/mcp_server/tools/planner.py +3 -0
- package/mcp_server/wonder_mode/engine.py +59 -13
- package/mcp_server/wonder_mode/tools.py +33 -1
- package/package.json +8 -8
- package/remote_script/LivePilot/__init__.py +1 -1
- package/remote_script/LivePilot/devices.py +10 -0
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
"""Slice workflow planner — generates MIDI patterns for sliced samples.
|
|
2
|
+
|
|
3
|
+
Given a slice count and musical intent, produces a complete workflow plan:
|
|
4
|
+
- Create a clip
|
|
5
|
+
- Program MIDI notes mapped to Simpler slices
|
|
6
|
+
- Suggest follow-up techniques
|
|
7
|
+
|
|
8
|
+
This is pure computation — does not execute against Ableton.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import hashlib
|
|
14
|
+
|
|
15
|
+
# Simpler maps slices to MIDI notes starting at C3 (60)
|
|
16
|
+
SLICE_BASE_NOTE = 60
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def plan_slice_steps(
|
|
20
|
+
slice_count: int,
|
|
21
|
+
intent: str = "rhythm",
|
|
22
|
+
bars: int = 4,
|
|
23
|
+
tempo: float = 120.0,
|
|
24
|
+
track_index: int = 0,
|
|
25
|
+
clip_index: int = 0,
|
|
26
|
+
) -> dict:
|
|
27
|
+
"""Generate a slice workflow plan with real MIDI notes.
|
|
28
|
+
|
|
29
|
+
Returns a dict with steps (tool calls), note_map, and suggestions.
|
|
30
|
+
"""
|
|
31
|
+
note_map = _build_note_map(slice_count)
|
|
32
|
+
beats = bars * 4 # 4/4 time
|
|
33
|
+
notes = _generate_notes(note_map, intent, beats, slice_count)
|
|
34
|
+
|
|
35
|
+
steps = []
|
|
36
|
+
|
|
37
|
+
steps.append({
|
|
38
|
+
"tool": "create_clip",
|
|
39
|
+
"params": {
|
|
40
|
+
"track_index": track_index,
|
|
41
|
+
"clip_index": clip_index,
|
|
42
|
+
"length": float(beats),
|
|
43
|
+
},
|
|
44
|
+
"description": f"Create {bars}-bar clip for {intent} slice pattern",
|
|
45
|
+
})
|
|
46
|
+
|
|
47
|
+
steps.append({
|
|
48
|
+
"tool": "add_notes",
|
|
49
|
+
"params": {
|
|
50
|
+
"track_index": track_index,
|
|
51
|
+
"clip_index": clip_index,
|
|
52
|
+
"notes": notes,
|
|
53
|
+
},
|
|
54
|
+
"description": f"Program {len(notes)} notes across {slice_count} slices",
|
|
55
|
+
})
|
|
56
|
+
|
|
57
|
+
return {
|
|
58
|
+
"steps": steps,
|
|
59
|
+
"note_map": note_map,
|
|
60
|
+
"slice_count": slice_count,
|
|
61
|
+
"intent": intent,
|
|
62
|
+
"bars": bars,
|
|
63
|
+
"note_count": len(notes),
|
|
64
|
+
"suggested_techniques": _suggest_techniques(intent),
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def _build_note_map(slice_count: int) -> list[dict]:
|
|
69
|
+
"""Map slice indices to MIDI notes."""
|
|
70
|
+
return [
|
|
71
|
+
{"slice_index": i, "midi_note": SLICE_BASE_NOTE + i, "label": f"Slice {i + 1}"}
|
|
72
|
+
for i in range(slice_count)
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def _generate_notes(
|
|
77
|
+
note_map: list[dict], intent: str, beats: int, slice_count: int,
|
|
78
|
+
) -> list[dict]:
|
|
79
|
+
"""Generate MIDI notes based on intent. Uses deterministic patterns."""
|
|
80
|
+
generators = {
|
|
81
|
+
"rhythm": _gen_rhythm,
|
|
82
|
+
"hook": _gen_hook,
|
|
83
|
+
"texture": _gen_texture,
|
|
84
|
+
"percussion": _gen_percussion,
|
|
85
|
+
"melodic": _gen_melodic,
|
|
86
|
+
}
|
|
87
|
+
gen = generators.get(intent, _gen_rhythm)
|
|
88
|
+
return gen(note_map, beats, slice_count)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def _gen_rhythm(note_map: list, beats: int, sc: int) -> list[dict]:
|
|
92
|
+
"""Sparse groove — hits on downbeats and off-beats."""
|
|
93
|
+
notes = []
|
|
94
|
+
step = 0.5 # 8th notes
|
|
95
|
+
for t in range(int(beats / step)):
|
|
96
|
+
time = t * step
|
|
97
|
+
if t % 4 == 0:
|
|
98
|
+
idx = 0
|
|
99
|
+
elif t % 4 == 2:
|
|
100
|
+
idx = min(1, sc - 1)
|
|
101
|
+
elif t % 8 in (3, 7) and sc > 2:
|
|
102
|
+
idx = min(2 + (t % 3), sc - 1)
|
|
103
|
+
else:
|
|
104
|
+
continue
|
|
105
|
+
vel = 100 - (t % 4) * 5 # Downbeats louder
|
|
106
|
+
notes.append({
|
|
107
|
+
"pitch": note_map[idx]["midi_note"],
|
|
108
|
+
"start_time": time,
|
|
109
|
+
"duration": step * 0.8,
|
|
110
|
+
"velocity": max(60, min(127, vel)),
|
|
111
|
+
})
|
|
112
|
+
return notes
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def _gen_hook(note_map: list, beats: int, sc: int) -> list[dict]:
|
|
116
|
+
"""Repeated motif contour — short phrase looped."""
|
|
117
|
+
phrase_len = min(4.0, beats)
|
|
118
|
+
motif_slices = list(range(min(4, sc)))
|
|
119
|
+
notes = []
|
|
120
|
+
reps = max(1, int(beats / phrase_len))
|
|
121
|
+
for rep in range(reps):
|
|
122
|
+
offset = rep * phrase_len
|
|
123
|
+
for i, idx in enumerate(motif_slices):
|
|
124
|
+
notes.append({
|
|
125
|
+
"pitch": note_map[idx]["midi_note"],
|
|
126
|
+
"start_time": offset + i * (phrase_len / len(motif_slices)),
|
|
127
|
+
"duration": phrase_len / len(motif_slices) * 0.9,
|
|
128
|
+
"velocity": 100 - i * 5,
|
|
129
|
+
})
|
|
130
|
+
return notes
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def _gen_texture(note_map: list, beats: int, sc: int) -> list[dict]:
|
|
134
|
+
"""Sparse, long notes — sustained atmosphere."""
|
|
135
|
+
notes = []
|
|
136
|
+
used = min(3, sc)
|
|
137
|
+
spacing = beats / max(used, 1)
|
|
138
|
+
for i in range(used):
|
|
139
|
+
notes.append({
|
|
140
|
+
"pitch": note_map[i]["midi_note"],
|
|
141
|
+
"start_time": i * spacing,
|
|
142
|
+
"duration": spacing * 0.95,
|
|
143
|
+
"velocity": 65,
|
|
144
|
+
})
|
|
145
|
+
return notes
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def _gen_percussion(note_map: list, beats: int, sc: int) -> list[dict]:
|
|
149
|
+
"""Kick/snare/hat-like distribution."""
|
|
150
|
+
notes = []
|
|
151
|
+
step = 0.25 # 16th notes
|
|
152
|
+
for t in range(int(beats / step)):
|
|
153
|
+
time = t * step
|
|
154
|
+
if t % 8 == 0 and sc > 0:
|
|
155
|
+
notes.append({"pitch": note_map[0]["midi_note"], "start_time": time,
|
|
156
|
+
"duration": 0.2, "velocity": 110})
|
|
157
|
+
if t % 8 == 4 and sc > 1:
|
|
158
|
+
notes.append({"pitch": note_map[min(1, sc - 1)]["midi_note"], "start_time": time,
|
|
159
|
+
"duration": 0.2, "velocity": 100})
|
|
160
|
+
if t % 2 == 0 and sc > 2:
|
|
161
|
+
notes.append({"pitch": note_map[min(2, sc - 1)]["midi_note"], "start_time": time,
|
|
162
|
+
"duration": 0.15, "velocity": 70})
|
|
163
|
+
return notes
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def _gen_melodic(note_map: list, beats: int, sc: int) -> list[dict]:
|
|
167
|
+
"""Pitch contour phrase — ascending/descending motion."""
|
|
168
|
+
notes = []
|
|
169
|
+
phrase_notes = min(8, sc)
|
|
170
|
+
step = beats / max(phrase_notes, 1)
|
|
171
|
+
for i in range(phrase_notes):
|
|
172
|
+
notes.append({
|
|
173
|
+
"pitch": note_map[i % sc]["midi_note"],
|
|
174
|
+
"start_time": i * step,
|
|
175
|
+
"duration": step * 0.85,
|
|
176
|
+
"velocity": 85,
|
|
177
|
+
})
|
|
178
|
+
return notes
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def _suggest_techniques(intent: str) -> list[str]:
|
|
182
|
+
"""Suggest follow-up techniques based on intent."""
|
|
183
|
+
suggestions = {
|
|
184
|
+
"rhythm": ["quantize_clip", "add reverb send for depth", "layer with acoustic hits"],
|
|
185
|
+
"hook": ["duplicate for variation", "add filter automation", "pitch shift for call-response"],
|
|
186
|
+
"texture": ["heavy reverb send", "low-pass filter automation", "pan automation"],
|
|
187
|
+
"percussion": ["parallel compression", "transient shaping", "short room reverb send"],
|
|
188
|
+
"melodic": ["add delay send", "pitch correction if needed", "double with octave layer"],
|
|
189
|
+
}
|
|
190
|
+
return suggestions.get(intent, ["quantize_clip", "add effects"])
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
"""Sample Engine MCP tools —
|
|
1
|
+
"""Sample Engine MCP tools — 7 intelligence-layer tools.
|
|
2
2
|
|
|
3
3
|
No new Ableton communication — these orchestrate existing tools
|
|
4
4
|
through the analyzer, critics, planner, and technique library.
|
|
@@ -319,6 +319,8 @@ def plan_sample_workflow(
|
|
|
319
319
|
intent: str = "rhythm",
|
|
320
320
|
philosophy: str = "auto",
|
|
321
321
|
target_track: Optional[int] = None,
|
|
322
|
+
section_type: Optional[str] = None,
|
|
323
|
+
desired_role: Optional[str] = None,
|
|
322
324
|
) -> dict:
|
|
323
325
|
"""Full end-to-end sample workflow: analyze, critique, select technique, compile plan.
|
|
324
326
|
|
|
@@ -328,6 +330,8 @@ def plan_sample_workflow(
|
|
|
328
330
|
intent: rhythm, texture, layer, melody, vocal, atmosphere, transform
|
|
329
331
|
philosophy: surgeon, alchemist, auto
|
|
330
332
|
target_track: existing track index, or None for new track
|
|
333
|
+
section_type: optional section context (intro, verse, chorus, drop, etc.)
|
|
334
|
+
desired_role: optional sample role (hook_sample, texture_bed, break_layer, etc.)
|
|
331
335
|
"""
|
|
332
336
|
if file_path is None and search_query is None:
|
|
333
337
|
return {"error": "Provide either file_path or search_query"}
|
|
@@ -440,3 +444,102 @@ def get_sample_opportunities(ctx: Context) -> dict:
|
|
|
440
444
|
"opportunities": opportunities,
|
|
441
445
|
"track_count": track_count,
|
|
442
446
|
}
|
|
447
|
+
|
|
448
|
+
|
|
449
|
+
@mcp.tool()
|
|
450
|
+
def plan_slice_workflow(
|
|
451
|
+
ctx: Context,
|
|
452
|
+
file_path: Optional[str] = None,
|
|
453
|
+
track_index: Optional[int] = None,
|
|
454
|
+
device_index: int = 0,
|
|
455
|
+
intent: str = "rhythm",
|
|
456
|
+
target_section: Optional[str] = None,
|
|
457
|
+
target_track: Optional[int] = None,
|
|
458
|
+
bars: int = 4,
|
|
459
|
+
style_hint: str = "",
|
|
460
|
+
) -> dict:
|
|
461
|
+
"""Plan an end-to-end slice workflow for a sample.
|
|
462
|
+
|
|
463
|
+
Generates a Simpler slice strategy, MIDI note mapping, and starter
|
|
464
|
+
pattern based on musical intent. Returns a compiled workflow plan —
|
|
465
|
+
does NOT execute. The agent steps through each tool call in sequence.
|
|
466
|
+
|
|
467
|
+
Provide either file_path (new sample to load) or track_index +
|
|
468
|
+
device_index (existing Simpler with loaded sample).
|
|
469
|
+
|
|
470
|
+
intent: rhythm | hook | texture | percussion | melodic
|
|
471
|
+
bars: number of bars for the pattern (default 4)
|
|
472
|
+
target_section: optional section name for arrangement hints
|
|
473
|
+
style_hint: optional genre/style context (e.g. "dilla", "burial")
|
|
474
|
+
"""
|
|
475
|
+
from .slice_workflow import plan_slice_steps
|
|
476
|
+
|
|
477
|
+
# Determine slice count — default 8 for file-based, or would come from
|
|
478
|
+
# get_simpler_slices in a real execution
|
|
479
|
+
# Read tempo from session if connected, otherwise default
|
|
480
|
+
tempo = 120.0
|
|
481
|
+
try:
|
|
482
|
+
ableton = ctx.lifespan_context.get("ableton")
|
|
483
|
+
if ableton:
|
|
484
|
+
info = ableton.send_command("get_session_info", {})
|
|
485
|
+
tempo = float(info.get("tempo", 120.0))
|
|
486
|
+
except Exception:
|
|
487
|
+
pass
|
|
488
|
+
|
|
489
|
+
# Read slice count from existing Simpler if track provided
|
|
490
|
+
slice_count = 8 # Default transient slice count
|
|
491
|
+
if track_index is not None:
|
|
492
|
+
try:
|
|
493
|
+
ableton = ctx.lifespan_context.get("ableton")
|
|
494
|
+
if ableton:
|
|
495
|
+
slices = ableton.send_command("get_simpler_slices", {
|
|
496
|
+
"track_index": track_index, "device_index": device_index,
|
|
497
|
+
})
|
|
498
|
+
if isinstance(slices, dict) and slices.get("slice_count"):
|
|
499
|
+
slice_count = slices["slice_count"]
|
|
500
|
+
except Exception:
|
|
501
|
+
pass # Fall back to default
|
|
502
|
+
|
|
503
|
+
# Build the plan
|
|
504
|
+
plan = plan_slice_steps(
|
|
505
|
+
slice_count=slice_count,
|
|
506
|
+
intent=intent,
|
|
507
|
+
bars=bars,
|
|
508
|
+
tempo=tempo,
|
|
509
|
+
track_index=target_track if target_track is not None else 0,
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
# Prepend sample loading steps if file_path provided
|
|
513
|
+
if file_path:
|
|
514
|
+
load_steps = [
|
|
515
|
+
{
|
|
516
|
+
"tool": "create_midi_track",
|
|
517
|
+
"params": {"name": f"Slice {intent.title()}"},
|
|
518
|
+
"description": "Create track for sliced sample",
|
|
519
|
+
},
|
|
520
|
+
{
|
|
521
|
+
"tool": "load_sample_to_simpler",
|
|
522
|
+
"params": {"track_index": target_track or 0, "file_path": file_path},
|
|
523
|
+
"description": f"Load sample into Simpler: {file_path}",
|
|
524
|
+
},
|
|
525
|
+
{
|
|
526
|
+
"tool": "set_simpler_playback_mode",
|
|
527
|
+
"params": {"track_index": target_track or 0, "device_index": 0, "playback_mode": 2},
|
|
528
|
+
"description": "Set Simpler to Slice mode",
|
|
529
|
+
},
|
|
530
|
+
]
|
|
531
|
+
plan["steps"] = load_steps + plan["steps"]
|
|
532
|
+
|
|
533
|
+
# Add arrangement hints if section provided
|
|
534
|
+
if target_section:
|
|
535
|
+
plan["arrangement_hints"] = {
|
|
536
|
+
"target_section": target_section,
|
|
537
|
+
"suggested_placement": f"Place slice pattern in {target_section}",
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
plan["file_path"] = file_path
|
|
541
|
+
plan["track_index"] = track_index
|
|
542
|
+
plan["device_index"] = device_index
|
|
543
|
+
plan["style_hint"] = style_hint
|
|
544
|
+
|
|
545
|
+
return plan
|
|
@@ -15,7 +15,7 @@ CREATE_CHAOS_MODULATOR = SemanticMove(
|
|
|
15
15
|
targets={"novelty": 0.8, "motion": 0.6, "surprise": 0.7},
|
|
16
16
|
protect={"clarity": 0.5},
|
|
17
17
|
risk_level="medium",
|
|
18
|
-
|
|
18
|
+
plan_template=[
|
|
19
19
|
{
|
|
20
20
|
"tool": "generate_m4l_effect",
|
|
21
21
|
"params": {
|
|
@@ -46,7 +46,7 @@ CREATE_FEEDBACK_RESONATOR = SemanticMove(
|
|
|
46
46
|
targets={"depth": 0.6, "texture": 0.5, "novelty": 0.4},
|
|
47
47
|
protect={"clarity": 0.6, "punch": 0.5},
|
|
48
48
|
risk_level="low",
|
|
49
|
-
|
|
49
|
+
plan_template=[
|
|
50
50
|
{
|
|
51
51
|
"tool": "generate_m4l_effect",
|
|
52
52
|
"params": {
|
|
@@ -77,7 +77,7 @@ CREATE_WAVEFOLDER_EFFECT = SemanticMove(
|
|
|
77
77
|
targets={"edge": 0.6, "novelty": 0.5, "energy": 0.4},
|
|
78
78
|
protect={"warmth": 0.5},
|
|
79
79
|
risk_level="medium",
|
|
80
|
-
|
|
80
|
+
plan_template=[
|
|
81
81
|
{
|
|
82
82
|
"tool": "generate_m4l_effect",
|
|
83
83
|
"params": {
|
|
@@ -108,7 +108,7 @@ CREATE_BITCRUSHER_EFFECT = SemanticMove(
|
|
|
108
108
|
targets={"edge": 0.5, "novelty": 0.4, "texture": 0.4},
|
|
109
109
|
protect={"clarity": 0.4},
|
|
110
110
|
risk_level="low",
|
|
111
|
-
|
|
111
|
+
plan_template=[
|
|
112
112
|
{
|
|
113
113
|
"tool": "generate_m4l_effect",
|
|
114
114
|
"params": {
|
|
@@ -139,7 +139,7 @@ CREATE_KARPLUS_STRING = SemanticMove(
|
|
|
139
139
|
targets={"novelty": 0.5, "texture": 0.6, "depth": 0.4},
|
|
140
140
|
protect={"clarity": 0.6},
|
|
141
141
|
risk_level="low",
|
|
142
|
-
|
|
142
|
+
plan_template=[
|
|
143
143
|
{
|
|
144
144
|
"tool": "generate_m4l_effect",
|
|
145
145
|
"params": {
|
|
@@ -170,7 +170,7 @@ CREATE_STOCHASTIC_TEXTURE = SemanticMove(
|
|
|
170
170
|
targets={"texture": 0.7, "novelty": 0.6, "motion": 0.5},
|
|
171
171
|
protect={"clarity": 0.4},
|
|
172
172
|
risk_level="medium",
|
|
173
|
-
|
|
173
|
+
plan_template=[
|
|
174
174
|
{
|
|
175
175
|
"tool": "generate_m4l_effect",
|
|
176
176
|
"params": {
|
|
@@ -201,7 +201,7 @@ CREATE_FDN_REVERB = SemanticMove(
|
|
|
201
201
|
targets={"depth": 0.7, "width": 0.5, "novelty": 0.4},
|
|
202
202
|
protect={"punch": 0.5, "clarity": 0.5},
|
|
203
203
|
risk_level="low",
|
|
204
|
-
|
|
204
|
+
plan_template=[
|
|
205
205
|
{
|
|
206
206
|
"tool": "generate_m4l_effect",
|
|
207
207
|
"params": {
|
|
@@ -10,7 +10,7 @@ TIGHTEN_LOW_END = SemanticMove(
|
|
|
10
10
|
targets={"weight": 0.4, "punch": 0.3, "clarity": 0.3},
|
|
11
11
|
protect={"warmth": 0.6},
|
|
12
12
|
risk_level="low",
|
|
13
|
-
|
|
13
|
+
plan_template=[
|
|
14
14
|
{"tool": "get_master_spectrum", "params": {}, "description": "Check current sub/low balance", "backend": "mcp_tool"},
|
|
15
15
|
{"tool": "set_device_parameter", "params": {"description": "High-pass sub bass around 30-40 Hz"}, "description": "HP filter sub rumble", "backend": "remote_command"},
|
|
16
16
|
{"tool": "set_track_volume", "params": {"description": "Reduce sub bass volume 5-10% if sub > 50%"}, "description": "Reduce sub volume", "backend": "remote_command"},
|
|
@@ -29,7 +29,7 @@ WIDEN_STEREO = SemanticMove(
|
|
|
29
29
|
targets={"width": 0.5, "clarity": 0.3, "depth": 0.2},
|
|
30
30
|
protect={"cohesion": 0.7},
|
|
31
31
|
risk_level="low",
|
|
32
|
-
|
|
32
|
+
plan_template=[
|
|
33
33
|
{"tool": "analyze_mix", "params": {}, "description": "Check current stereo state", "backend": "mcp_tool"},
|
|
34
34
|
{"tool": "set_track_pan", "params": {"description": "Pan harmonic elements wider: +/-25-40%"}, "description": "Pan harmonics wider", "backend": "remote_command"},
|
|
35
35
|
{"tool": "set_track_pan", "params": {"description": "Pan percussion subtly: +/-10-20%"}, "description": "Pan perc subtly", "backend": "remote_command"},
|
|
@@ -48,7 +48,7 @@ MAKE_PUNCHIER = SemanticMove(
|
|
|
48
48
|
targets={"punch": 0.5, "energy": 0.3, "contrast": 0.2},
|
|
49
49
|
protect={"clarity": 0.7, "warmth": 0.5},
|
|
50
50
|
risk_level="low",
|
|
51
|
-
|
|
51
|
+
plan_template=[
|
|
52
52
|
{"tool": "get_track_meters", "params": {"include_stereo": True}, "description": "Read current levels", "backend": "remote_command"},
|
|
53
53
|
{"tool": "set_track_volume", "params": {"description": "Push drum track +5-8%"}, "description": "Push drum level", "backend": "remote_command"},
|
|
54
54
|
{"tool": "set_track_volume", "params": {"description": "Pull pad/texture -5-10%"}, "description": "Pull back pads", "backend": "remote_command"},
|
|
@@ -67,7 +67,7 @@ DARKEN_MIX = SemanticMove(
|
|
|
67
67
|
targets={"warmth": 0.5, "depth": 0.3, "width": 0.2},
|
|
68
68
|
protect={"width": 0.7, "clarity": 0.5},
|
|
69
69
|
risk_level="low",
|
|
70
|
-
|
|
70
|
+
plan_template=[
|
|
71
71
|
{"tool": "get_master_spectrum", "params": {}, "description": "Check current tonal balance", "backend": "mcp_tool"},
|
|
72
72
|
{"tool": "set_device_parameter", "params": {"description": "Lower EQ/Auto Filter high shelf -2-4dB on bright tracks"}, "description": "Roll off highs", "backend": "remote_command"},
|
|
73
73
|
{"tool": "set_track_send", "params": {"description": "Increase reverb send on darkened elements for depth compensation"}, "description": "Compensate depth", "backend": "remote_command"},
|
|
@@ -85,7 +85,7 @@ REDUCE_REPETITION = SemanticMove(
|
|
|
85
85
|
targets={"motion": 0.4, "novelty": 0.3, "contrast": 0.3},
|
|
86
86
|
protect={"cohesion": 0.6},
|
|
87
87
|
risk_level="medium",
|
|
88
|
-
|
|
88
|
+
plan_template=[
|
|
89
89
|
{"tool": "apply_automation_shape", "params": {"curve_type": "perlin", "description": "Perlin noise on filter cutoff"}, "description": "Add organic filter drift", "backend": "mcp_tool"},
|
|
90
90
|
{"tool": "apply_automation_shape", "params": {"curve_type": "perlin", "description": "Perlin noise on send levels"}, "description": "Add depth movement", "backend": "mcp_tool"},
|
|
91
91
|
{"tool": "set_device_parameter", "params": {"description": "Increase Beat Repeat variation or chance"}, "description": "Add rhythmic variation", "backend": "remote_command"},
|
|
@@ -103,7 +103,7 @@ MAKE_KICK_BASS_LOCK = SemanticMove(
|
|
|
103
103
|
targets={"weight": 0.4, "punch": 0.3, "groove": 0.3},
|
|
104
104
|
protect={"warmth": 0.6, "cohesion": 0.7},
|
|
105
105
|
risk_level="low",
|
|
106
|
-
|
|
106
|
+
plan_template=[
|
|
107
107
|
{"tool": "get_device_parameters", "params": {"description": "Read bass EQ/filter state"}, "description": "Inspect bass chain", "backend": "remote_command"},
|
|
108
108
|
{"tool": "set_device_parameter", "params": {"description": "High-pass bass at 40-60 Hz to clear space for kick sub"}, "description": "HP bass for kick clearance", "backend": "remote_command"},
|
|
109
109
|
{"tool": "set_device_parameter", "params": {"description": "Sidechain compressor or volume duck on bass from kick"}, "description": "Duck bass on kick hits", "backend": "remote_command"},
|
|
@@ -121,7 +121,7 @@ CREATE_BUILDUP_TENSION = SemanticMove(
|
|
|
121
121
|
targets={"tension": 0.5, "energy": 0.3, "contrast": 0.2},
|
|
122
122
|
protect={"clarity": 0.6},
|
|
123
123
|
risk_level="medium",
|
|
124
|
-
|
|
124
|
+
plan_template=[
|
|
125
125
|
{"tool": "apply_gesture_template", "params": {"template_name": "tension_ratchet"}, "description": "Apply staged tension ratchet", "backend": "mcp_tool"},
|
|
126
126
|
{"tool": "apply_automation_shape", "params": {"curve_type": "exponential", "description": "Rising HP filter over 4-8 bars"}, "description": "HP filter rise", "backend": "mcp_tool"},
|
|
127
127
|
{"tool": "set_track_send", "params": {"description": "Increase reverb send for wash effect"}, "description": "Add reverb wash", "backend": "remote_command"},
|
|
@@ -139,7 +139,7 @@ SMOOTH_SCENE_HANDOFF = SemanticMove(
|
|
|
139
139
|
targets={"cohesion": 0.5, "motion": 0.3, "contrast": 0.2},
|
|
140
140
|
protect={"clarity": 0.7},
|
|
141
141
|
risk_level="low",
|
|
142
|
-
|
|
142
|
+
plan_template=[
|
|
143
143
|
{"tool": "apply_gesture_template", "params": {"template_name": "pre_arrival_vacuum"}, "description": "Pull energy back before transition", "backend": "mcp_tool"},
|
|
144
144
|
{"tool": "apply_gesture_template", "params": {"template_name": "re_entry_spotlight"}, "description": "Spotlight returning elements", "backend": "mcp_tool"},
|
|
145
145
|
],
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
"""SemanticMove — high-level musical intent that compiles to tool sequences.
|
|
2
2
|
|
|
3
3
|
A semantic move expresses WHAT to achieve musically, not HOW to achieve it
|
|
4
|
-
parametrically. Each move has a
|
|
5
|
-
|
|
4
|
+
parametrically. Each move has a plan_template (static metadata for discovery)
|
|
5
|
+
and is compiled at runtime through compiler.compile() for executable plans.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
from __future__ import annotations
|
|
@@ -15,13 +15,13 @@ class SemanticMove:
|
|
|
15
15
|
"""A musical action expressed as intent, not parameters."""
|
|
16
16
|
|
|
17
17
|
move_id: str
|
|
18
|
-
family: str # mix, arrangement, transition, sound_design, performance
|
|
18
|
+
family: str # mix, arrangement, transition, sound_design, performance, sample
|
|
19
19
|
intent: str # human-readable description of the musical goal
|
|
20
20
|
targets: dict = field(default_factory=dict) # dimension -> weight
|
|
21
21
|
protect: dict = field(default_factory=dict) # dimension -> threshold
|
|
22
22
|
risk_level: str = "low" # low, medium, high
|
|
23
23
|
required_capabilities: list = field(default_factory=list)
|
|
24
|
-
|
|
24
|
+
plan_template: list = field(default_factory=list) # [{tool, params, description}] — static metadata, NOT runtime truth
|
|
25
25
|
verification_plan: list = field(default_factory=list) # [{tool, check}]
|
|
26
26
|
confidence: float = 0.7
|
|
27
27
|
|
|
@@ -34,13 +34,13 @@ class SemanticMove:
|
|
|
34
34
|
"protect": self.protect,
|
|
35
35
|
"risk_level": self.risk_level,
|
|
36
36
|
"required_capabilities": self.required_capabilities,
|
|
37
|
-
"
|
|
37
|
+
"plan_template_steps": len(self.plan_template),
|
|
38
38
|
"confidence": self.confidence,
|
|
39
39
|
}
|
|
40
40
|
|
|
41
41
|
def to_full_dict(self) -> dict:
|
|
42
|
-
"""Full representation including
|
|
42
|
+
"""Full representation including plan template and verification plans."""
|
|
43
43
|
d = self.to_dict()
|
|
44
|
-
d["
|
|
44
|
+
d["plan_template"] = self.plan_template
|
|
45
45
|
d["verification_plan"] = self.verification_plan
|
|
46
46
|
return d
|
|
@@ -15,7 +15,7 @@ RECOVER_ENERGY = SemanticMove(
|
|
|
15
15
|
protect={"clarity": 0.7},
|
|
16
16
|
risk_level="low",
|
|
17
17
|
required_capabilities=["session"],
|
|
18
|
-
|
|
18
|
+
plan_template=[
|
|
19
19
|
{"tool": "set_track_volume", "params": {"description": "Gradually restore drum volume"}, "description": "Bring drums back", "backend": "remote_command"},
|
|
20
20
|
{"tool": "set_track_volume", "params": {"description": "Restore bass volume"}, "description": "Bring bass back", "backend": "remote_command"},
|
|
21
21
|
{"tool": "set_track_send", "params": {"description": "Reduce reverb send to tighten mix"}, "description": "Tighten reverb", "backend": "remote_command"},
|
|
@@ -33,7 +33,7 @@ DECOMPRESS_TENSION = SemanticMove(
|
|
|
33
33
|
protect={"cohesion": 0.6},
|
|
34
34
|
risk_level="low",
|
|
35
35
|
required_capabilities=["session"],
|
|
36
|
-
|
|
36
|
+
plan_template=[
|
|
37
37
|
{"tool": "set_track_volume", "params": {"description": "Pull back high-energy elements 15-20%"}, "description": "Pull energy down", "backend": "remote_command"},
|
|
38
38
|
{"tool": "set_track_send", "params": {"description": "Increase reverb for spaciousness"}, "description": "Open space", "backend": "remote_command"},
|
|
39
39
|
],
|
|
@@ -50,7 +50,7 @@ SAFE_SPOTLIGHT = SemanticMove(
|
|
|
50
50
|
protect={"cohesion": 0.7, "energy": 0.5},
|
|
51
51
|
risk_level="low",
|
|
52
52
|
required_capabilities=["session"],
|
|
53
|
-
|
|
53
|
+
plan_template=[
|
|
54
54
|
{"tool": "set_track_volume", "params": {"description": "Pull non-spotlight tracks to 30-40%"}, "description": "Pull background", "backend": "remote_command"},
|
|
55
55
|
{"tool": "set_track_volume", "params": {"description": "Push spotlight track to 80-85%"}, "description": "Push spotlight", "backend": "remote_command"},
|
|
56
56
|
],
|
|
@@ -67,7 +67,7 @@ EMERGENCY_SIMPLIFY = SemanticMove(
|
|
|
67
67
|
protect={"energy": 0.3},
|
|
68
68
|
risk_level="low",
|
|
69
69
|
required_capabilities=["session"],
|
|
70
|
-
|
|
70
|
+
plan_template=[
|
|
71
71
|
{"tool": "set_track_volume", "params": {"description": "Pull all non-rhythm tracks to 10-15%"}, "description": "Strip to essentials", "backend": "remote_command"},
|
|
72
72
|
{"tool": "set_track_volume", "params": {"description": "Keep drums at current level"}, "description": "Maintain rhythm", "backend": "remote_command"},
|
|
73
73
|
],
|
|
@@ -11,6 +11,11 @@ from .models import SemanticMove
|
|
|
11
11
|
from . import resolvers
|
|
12
12
|
|
|
13
13
|
|
|
14
|
+
def _resolve_sample_path(kernel: dict) -> str:
|
|
15
|
+
"""Get the sample file path from kernel, or return placeholder."""
|
|
16
|
+
return kernel.get("sample_file_path", "{sample_file_path}")
|
|
17
|
+
|
|
18
|
+
|
|
14
19
|
def _compile_sample_chop_rhythm(move: SemanticMove, kernel: dict) -> CompiledPlan:
|
|
15
20
|
"""Compile 'sample_chop_rhythm': load, slice, and chop a sample for rhythm."""
|
|
16
21
|
steps = []
|
|
@@ -34,13 +39,13 @@ def _compile_sample_chop_rhythm(move: SemanticMove, kernel: dict) -> CompiledPla
|
|
|
34
39
|
|
|
35
40
|
steps.append(CompiledStep(
|
|
36
41
|
tool="load_sample_to_simpler",
|
|
37
|
-
params={"track_index": new_idx},
|
|
42
|
+
params={"track_index": new_idx, "file_path": _resolve_sample_path(kernel)},
|
|
38
43
|
description="Load sample into Simpler for slicing",
|
|
39
44
|
))
|
|
40
45
|
|
|
41
46
|
steps.append(CompiledStep(
|
|
42
47
|
tool="set_simpler_playback_mode",
|
|
43
|
-
params={"track_index": new_idx, "
|
|
48
|
+
params={"track_index": new_idx, "device_index": 0, "playback_mode": 2},
|
|
44
49
|
description="Switch to slice mode for rhythmic chopping",
|
|
45
50
|
))
|
|
46
51
|
descriptions.append("Slice sample")
|
|
@@ -88,7 +93,7 @@ def _compile_sample_texture_layer(move: SemanticMove, kernel: dict) -> CompiledP
|
|
|
88
93
|
|
|
89
94
|
steps.append(CompiledStep(
|
|
90
95
|
tool="load_sample_to_simpler",
|
|
91
|
-
params={"track_index": new_idx},
|
|
96
|
+
params={"track_index": new_idx, "file_path": _resolve_sample_path(kernel)},
|
|
92
97
|
description="Load textural sample into Simpler",
|
|
93
98
|
))
|
|
94
99
|
descriptions.append("Load texture sample")
|
|
@@ -141,7 +146,7 @@ def _compile_sample_vocal_ghost(move: SemanticMove, kernel: dict) -> CompiledPla
|
|
|
141
146
|
|
|
142
147
|
steps.append(CompiledStep(
|
|
143
148
|
tool="load_sample_to_simpler",
|
|
144
|
-
params={"track_index": new_idx},
|
|
149
|
+
params={"track_index": new_idx, "file_path": _resolve_sample_path(kernel)},
|
|
145
150
|
description="Load vocal sample into Simpler",
|
|
146
151
|
))
|
|
147
152
|
|
|
@@ -205,13 +210,13 @@ def _compile_sample_break_layer(move: SemanticMove, kernel: dict) -> CompiledPla
|
|
|
205
210
|
|
|
206
211
|
steps.append(CompiledStep(
|
|
207
212
|
tool="load_sample_to_simpler",
|
|
208
|
-
params={"track_index": new_idx},
|
|
213
|
+
params={"track_index": new_idx, "file_path": _resolve_sample_path(kernel)},
|
|
209
214
|
description="Load breakbeat into Simpler",
|
|
210
215
|
))
|
|
211
216
|
|
|
212
217
|
steps.append(CompiledStep(
|
|
213
218
|
tool="set_simpler_playback_mode",
|
|
214
|
-
params={"track_index": new_idx, "
|
|
219
|
+
params={"track_index": new_idx, "device_index": 0, "playback_mode": 2},
|
|
215
220
|
description="Slice break by transients for individual hits",
|
|
216
221
|
))
|
|
217
222
|
descriptions.append("Slice break")
|
|
@@ -262,7 +267,7 @@ def _compile_sample_resample_destroy(move: SemanticMove, kernel: dict) -> Compil
|
|
|
262
267
|
|
|
263
268
|
steps.append(CompiledStep(
|
|
264
269
|
tool="load_sample_to_simpler",
|
|
265
|
-
params={"track_index": new_idx},
|
|
270
|
+
params={"track_index": new_idx, "file_path": _resolve_sample_path(kernel)},
|
|
266
271
|
description="Load sample for destruction",
|
|
267
272
|
))
|
|
268
273
|
descriptions.append("Load source")
|
|
@@ -321,13 +326,13 @@ def _compile_sample_one_shot_accent(move: SemanticMove, kernel: dict) -> Compile
|
|
|
321
326
|
|
|
322
327
|
steps.append(CompiledStep(
|
|
323
328
|
tool="load_sample_to_simpler",
|
|
324
|
-
params={"track_index": new_idx},
|
|
329
|
+
params={"track_index": new_idx, "file_path": _resolve_sample_path(kernel)},
|
|
325
330
|
description="Load one-shot into Simpler",
|
|
326
331
|
))
|
|
327
332
|
|
|
328
333
|
steps.append(CompiledStep(
|
|
329
334
|
tool="set_simpler_playback_mode",
|
|
330
|
-
params={"track_index": new_idx, "
|
|
335
|
+
params={"track_index": new_idx, "device_index": 0, "playback_mode": 1},
|
|
331
336
|
description="One-shot mode for trigger playback",
|
|
332
337
|
))
|
|
333
338
|
descriptions.append("One-shot mode")
|
|
@@ -13,7 +13,7 @@ ADD_WARMTH = SemanticMove(
|
|
|
13
13
|
targets={"warmth": 0.5, "depth": 0.3, "cohesion": 0.2},
|
|
14
14
|
protect={"clarity": 0.6, "punch": 0.5},
|
|
15
15
|
risk_level="low",
|
|
16
|
-
|
|
16
|
+
plan_template=[
|
|
17
17
|
{"tool": "set_device_parameter", "params": {"description": "Add Saturator drive +2-4dB for harmonic warmth"}, "description": "Add saturation", "backend": "remote_command"},
|
|
18
18
|
{"tool": "set_device_parameter", "params": {"description": "Boost EQ low-mid shelf +1-2dB"}, "description": "Low-mid warmth", "backend": "remote_command"},
|
|
19
19
|
],
|
|
@@ -30,7 +30,7 @@ ADD_TEXTURE = SemanticMove(
|
|
|
30
30
|
targets={"motion": 0.4, "novelty": 0.3, "depth": 0.3},
|
|
31
31
|
protect={"clarity": 0.6},
|
|
32
32
|
risk_level="medium",
|
|
33
|
-
|
|
33
|
+
plan_template=[
|
|
34
34
|
{"tool": "apply_automation_shape", "params": {"curve_type": "perlin", "description": "Perlin noise on filter cutoff for organic texture"}, "description": "Organic filter motion", "backend": "mcp_tool"},
|
|
35
35
|
{"tool": "set_track_send", "params": {"description": "Increase delay send for spatial texture"}, "description": "Spatial texture via delay", "backend": "remote_command"},
|
|
36
36
|
],
|
|
@@ -46,7 +46,7 @@ SHAPE_TRANSIENTS = SemanticMove(
|
|
|
46
46
|
targets={"punch": 0.5, "clarity": 0.3, "groove": 0.2},
|
|
47
47
|
protect={"warmth": 0.5},
|
|
48
48
|
risk_level="low",
|
|
49
|
-
|
|
49
|
+
plan_template=[
|
|
50
50
|
{"tool": "set_device_parameter", "params": {"description": "Adjust Compressor attack time (faster = sharper transients, slower = rounder)"}, "description": "Shape attack", "backend": "remote_command"},
|
|
51
51
|
{"tool": "set_device_parameter", "params": {"description": "Adjust Compressor release for rhythmic pumping"}, "description": "Shape release", "backend": "remote_command"},
|
|
52
52
|
],
|
|
@@ -62,7 +62,7 @@ ADD_SPACE = SemanticMove(
|
|
|
62
62
|
targets={"depth": 0.5, "width": 0.3, "clarity": 0.2},
|
|
63
63
|
protect={"punch": 0.6, "clarity": 0.5},
|
|
64
64
|
risk_level="low",
|
|
65
|
-
|
|
65
|
+
plan_template=[
|
|
66
66
|
{"tool": "set_track_send", "params": {"description": "Increase reverb send to 25-35%"}, "description": "Add reverb depth", "backend": "remote_command"},
|
|
67
67
|
{"tool": "set_track_send", "params": {"description": "Add subtle delay send 10-15%"}, "description": "Add delay texture", "backend": "remote_command"},
|
|
68
68
|
{"tool": "set_track_pan", "params": {"description": "Widen pan slightly for spatial presence"}, "description": "Widen spatial field", "backend": "remote_command"},
|