livepilot 1.20.3 → 1.21.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +290 -0
- package/README.md +62 -33
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +1 -1
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/affordances/__init__.py +31 -0
- package/mcp_server/affordances/_schema.py +143 -0
- package/mcp_server/affordances/devices/auto-filter.yaml +14 -0
- package/mcp_server/affordances/devices/delay.yaml +18 -0
- package/mcp_server/affordances/devices/reverb.yaml +16 -0
- package/mcp_server/affordances/presets.py +74 -0
- package/mcp_server/experiment/tools.py +149 -12
- package/mcp_server/memory/tools.py +10 -0
- package/mcp_server/runtime/tools.py +7 -0
- package/mcp_server/semantic_moves/device_creation_compilers.py +37 -3
- package/mcp_server/semantic_moves/device_creation_moves.py +7 -7
- package/mcp_server/semantic_moves/device_mutation_compilers.py +66 -12
- package/mcp_server/semantic_moves/performance_compilers.py +157 -0
- package/mcp_server/semantic_moves/performance_moves.py +46 -1
- package/mcp_server/semantic_moves/tools.py +8 -5
- package/mcp_server/song_brain/tools.py +6 -0
- package/mcp_server/stuckness_detector/tools.py +4 -0
- package/mcp_server/tools/_agent_os_engine/taste.py +6 -0
- package/mcp_server/tools/memory.py +7 -0
- package/mcp_server/wonder_mode/tools.py +4 -0
- package/package.json +2 -2
- package/remote_script/LivePilot/__init__.py +1 -1
- package/server.json +3 -3
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
"""Schema validator for affordance-device preset YAML files.
|
|
2
|
+
|
|
3
|
+
Enforces at test-time that every seed preset file is well-formed. The
|
|
4
|
+
runtime loader (``presets.py``) is tolerant of missing optional fields
|
|
5
|
+
so the product never crashes on a bad preset; this validator is the
|
|
6
|
+
pre-ship gate run by ``tests/test_affordance_presets.py``.
|
|
7
|
+
|
|
8
|
+
Schema:
|
|
9
|
+
device_slug: str # REQUIRED — must match filename stem
|
|
10
|
+
device_class_name: str # REQUIRED — Ableton's class_name
|
|
11
|
+
presets: # REQUIRED — dict[name → PresetRecord]
|
|
12
|
+
<name>:
|
|
13
|
+
description: str # REQUIRED — human-readable
|
|
14
|
+
param_overrides: dict # REQUIRED — {name: number|bool|int}, ≥1 entry
|
|
15
|
+
risk_notes: str # optional
|
|
16
|
+
suggested_pairings: list[str] # optional
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
from typing import Any
|
|
23
|
+
|
|
24
|
+
import yaml
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
_REQUIRED_TOPLEVEL = {"device_slug", "device_class_name", "presets"}
|
|
28
|
+
_REQUIRED_PER_PRESET = {"description", "param_overrides"}
|
|
29
|
+
_OPTIONAL_PER_PRESET = {"risk_notes", "suggested_pairings"}
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def validate_preset_file(path: Path) -> list[str]:
|
|
33
|
+
"""Return a list of validation errors. Empty list = valid.
|
|
34
|
+
|
|
35
|
+
Errors are human-readable one-liners suitable for test failure output.
|
|
36
|
+
"""
|
|
37
|
+
errors: list[str] = []
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
data = yaml.safe_load(path.read_text(encoding="utf-8"))
|
|
41
|
+
except yaml.YAMLError as exc:
|
|
42
|
+
return [f"{path.name}: YAML parse error — {exc}"]
|
|
43
|
+
|
|
44
|
+
if not isinstance(data, dict):
|
|
45
|
+
return [
|
|
46
|
+
f"{path.name}: top-level must be a mapping, "
|
|
47
|
+
f"got {type(data).__name__}"
|
|
48
|
+
]
|
|
49
|
+
|
|
50
|
+
missing = _REQUIRED_TOPLEVEL - set(data.keys())
|
|
51
|
+
if missing:
|
|
52
|
+
errors.append(
|
|
53
|
+
f"{path.name}: missing required top-level fields {sorted(missing)}"
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
slug = data.get("device_slug")
|
|
57
|
+
if slug is not None and not isinstance(slug, str):
|
|
58
|
+
errors.append(f"{path.name}: device_slug must be a string")
|
|
59
|
+
elif slug and slug != path.stem:
|
|
60
|
+
errors.append(
|
|
61
|
+
f"{path.name}: device_slug {slug!r} must match "
|
|
62
|
+
f"filename stem {path.stem!r}"
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
class_name = data.get("device_class_name")
|
|
66
|
+
if class_name is not None and not isinstance(class_name, str):
|
|
67
|
+
errors.append(f"{path.name}: device_class_name must be a string")
|
|
68
|
+
|
|
69
|
+
presets = data.get("presets")
|
|
70
|
+
if presets is not None:
|
|
71
|
+
if not isinstance(presets, dict):
|
|
72
|
+
errors.append(f"{path.name}: presets must be a mapping")
|
|
73
|
+
elif not presets:
|
|
74
|
+
errors.append(f"{path.name}: presets dict is empty")
|
|
75
|
+
else:
|
|
76
|
+
for preset_name, preset in presets.items():
|
|
77
|
+
errors.extend(_validate_preset(path.name, preset_name, preset))
|
|
78
|
+
|
|
79
|
+
return errors
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def _validate_preset(filename: str, name: str, preset: Any) -> list[str]:
|
|
83
|
+
errors: list[str] = []
|
|
84
|
+
if not isinstance(preset, dict):
|
|
85
|
+
return [f"{filename}: preset {name!r} must be a mapping"]
|
|
86
|
+
|
|
87
|
+
missing = _REQUIRED_PER_PRESET - set(preset.keys())
|
|
88
|
+
if missing:
|
|
89
|
+
errors.append(
|
|
90
|
+
f"{filename}: preset {name!r} missing required fields {sorted(missing)}"
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
description = preset.get("description")
|
|
94
|
+
if description is not None and not isinstance(description, str):
|
|
95
|
+
errors.append(f"{filename}: preset {name!r} description must be a string")
|
|
96
|
+
|
|
97
|
+
overrides = preset.get("param_overrides")
|
|
98
|
+
if overrides is not None:
|
|
99
|
+
if not isinstance(overrides, dict):
|
|
100
|
+
errors.append(
|
|
101
|
+
f"{filename}: preset {name!r} param_overrides must be a dict"
|
|
102
|
+
)
|
|
103
|
+
elif not overrides:
|
|
104
|
+
errors.append(
|
|
105
|
+
f"{filename}: preset {name!r} param_overrides is empty"
|
|
106
|
+
)
|
|
107
|
+
else:
|
|
108
|
+
for k, v in overrides.items():
|
|
109
|
+
if not isinstance(k, str):
|
|
110
|
+
errors.append(
|
|
111
|
+
f"{filename}: preset {name!r} param_overrides key "
|
|
112
|
+
f"{k!r} must be a string"
|
|
113
|
+
)
|
|
114
|
+
if not isinstance(v, (int, float, bool)):
|
|
115
|
+
errors.append(
|
|
116
|
+
f"{filename}: preset {name!r} param_overrides value for "
|
|
117
|
+
f"{k!r} must be number|bool, got {type(v).__name__}"
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
pairings = preset.get("suggested_pairings")
|
|
121
|
+
if pairings is not None:
|
|
122
|
+
if not isinstance(pairings, list):
|
|
123
|
+
errors.append(
|
|
124
|
+
f"{filename}: preset {name!r} suggested_pairings must be a list"
|
|
125
|
+
)
|
|
126
|
+
elif not all(isinstance(p, str) for p in pairings):
|
|
127
|
+
errors.append(
|
|
128
|
+
f"{filename}: preset {name!r} suggested_pairings entries "
|
|
129
|
+
f"must be strings"
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
risk_notes = preset.get("risk_notes")
|
|
133
|
+
if risk_notes is not None and not isinstance(risk_notes, str):
|
|
134
|
+
errors.append(f"{filename}: preset {name!r} risk_notes must be a string")
|
|
135
|
+
|
|
136
|
+
extras = set(preset.keys()) - _REQUIRED_PER_PRESET - _OPTIONAL_PER_PRESET
|
|
137
|
+
if extras:
|
|
138
|
+
errors.append(
|
|
139
|
+
f"{filename}: preset {name!r} has unknown fields {sorted(extras)} "
|
|
140
|
+
f"(allowed: {sorted(_REQUIRED_PER_PRESET | _OPTIONAL_PER_PRESET)})"
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
return errors
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
device_slug: auto-filter
|
|
2
|
+
device_class_name: AutoFilter2
|
|
3
|
+
presets:
|
|
4
|
+
slow-sweep:
|
|
5
|
+
description: >
|
|
6
|
+
Slow LFO sweep for return chains. Synced rate (bar-long), moderate
|
|
7
|
+
resonance, low-pass type. Layers well with dub-cathedral reverb.
|
|
8
|
+
param_overrides:
|
|
9
|
+
Filter Type: 0 # LP
|
|
10
|
+
LFO Amount: 0.60
|
|
11
|
+
LFO Sync: 1
|
|
12
|
+
LFO Rate: 4 # bar-long sweep
|
|
13
|
+
Resonance: 0.25
|
|
14
|
+
suggested_pairings: ["Echo", "Reverb"]
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
device_slug: delay
|
|
2
|
+
device_class_name: Delay
|
|
3
|
+
presets:
|
|
4
|
+
ping-pong-dub:
|
|
5
|
+
description: >
|
|
6
|
+
Dotted-eighth ping-pong for dub-techno sends. Moderate feedback,
|
|
7
|
+
high wet, HP+LP filtered to stay out of the kick's sub and the
|
|
8
|
+
vocal's top end.
|
|
9
|
+
param_overrides:
|
|
10
|
+
Sync: 1
|
|
11
|
+
16th Note: 6 # dotted 8th
|
|
12
|
+
Feedback: 0.45
|
|
13
|
+
Filter On: 1
|
|
14
|
+
HP Freq: 0.20 # ~100 Hz cut below
|
|
15
|
+
LP Freq: 0.65 # mid-top rolloff
|
|
16
|
+
Dry/Wet: 0.55
|
|
17
|
+
risk_notes: "Feedback above 0.6 self-oscillates; keep the return track's output watchable."
|
|
18
|
+
suggested_pairings: ["Auto Filter", "Saturator"]
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
device_slug: reverb
|
|
2
|
+
device_class_name: Reverb
|
|
3
|
+
presets:
|
|
4
|
+
dub-cathedral:
|
|
5
|
+
description: >
|
|
6
|
+
Basic Channel-adjacent huge-space reverb. Long decay, large room,
|
|
7
|
+
noticeable predelay, high diffusion. Pairs well with Echo + Auto
|
|
8
|
+
Filter on a return chain for dub-techno sends.
|
|
9
|
+
param_overrides:
|
|
10
|
+
Decay Time: 0.85
|
|
11
|
+
Room Size: 0.95
|
|
12
|
+
Dry/Wet: 0.40
|
|
13
|
+
Predelay: 0.45
|
|
14
|
+
Diffusion: 0.80
|
|
15
|
+
risk_notes: "Long decay masks transients; avoid routing percussion here."
|
|
16
|
+
suggested_pairings: ["Echo", "Auto Filter"]
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
"""Preset loader for affordance-device YAML files.
|
|
2
|
+
|
|
3
|
+
Runtime resolution only — schema validation lives in ``_schema.py`` and
|
|
4
|
+
fires at test-time. The loader is tolerant of malformed files (returns
|
|
5
|
+
None rather than raising) so production code never crashes on a bad
|
|
6
|
+
preset; the schema validator catches those pre-ship.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Any, Optional
|
|
13
|
+
|
|
14
|
+
import yaml
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
_AFFORDANCES_ROOT = Path(__file__).parent / "devices"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _load_device_yaml(device_slug: str) -> Optional[dict]:
|
|
21
|
+
"""Load and parse a device's YAML file. Returns None on any error."""
|
|
22
|
+
path = _AFFORDANCES_ROOT / f"{device_slug}.yaml"
|
|
23
|
+
if not path.exists():
|
|
24
|
+
return None
|
|
25
|
+
try:
|
|
26
|
+
data = yaml.safe_load(path.read_text(encoding="utf-8"))
|
|
27
|
+
except yaml.YAMLError:
|
|
28
|
+
return None
|
|
29
|
+
return data if isinstance(data, dict) else None
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def resolve_preset(device_slug: str, preset_name: str) -> Optional[dict[str, Any]]:
|
|
33
|
+
"""Return the ``param_overrides`` dict for a named preset, or None.
|
|
34
|
+
|
|
35
|
+
Returns None on: missing device file, missing preset, unparseable YAML,
|
|
36
|
+
or a preset record without ``param_overrides``.
|
|
37
|
+
"""
|
|
38
|
+
data = _load_device_yaml(device_slug)
|
|
39
|
+
if data is None:
|
|
40
|
+
return None
|
|
41
|
+
preset = data.get("presets", {}).get(preset_name)
|
|
42
|
+
if not isinstance(preset, dict):
|
|
43
|
+
return None
|
|
44
|
+
overrides = preset.get("param_overrides")
|
|
45
|
+
return dict(overrides) if isinstance(overrides, dict) else None
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def get_preset_metadata(device_slug: str, preset_name: str) -> Optional[dict]:
|
|
49
|
+
"""Return the full preset record (description + pairings + risk_notes
|
|
50
|
+
+ param_overrides) or None."""
|
|
51
|
+
data = _load_device_yaml(device_slug)
|
|
52
|
+
if data is None:
|
|
53
|
+
return None
|
|
54
|
+
preset = data.get("presets", {}).get(preset_name)
|
|
55
|
+
return dict(preset) if isinstance(preset, dict) else None
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def list_devices() -> list[str]:
|
|
59
|
+
"""Return device slugs with available preset files, sorted."""
|
|
60
|
+
if not _AFFORDANCES_ROOT.exists():
|
|
61
|
+
return []
|
|
62
|
+
return sorted(p.stem for p in _AFFORDANCES_ROOT.glob("*.yaml"))
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def list_presets(device_slug: str) -> list[str]:
|
|
66
|
+
"""Return preset names for a given device slug, sorted. Empty list
|
|
67
|
+
on missing device or malformed YAML."""
|
|
68
|
+
data = _load_device_yaml(device_slug)
|
|
69
|
+
if data is None:
|
|
70
|
+
return []
|
|
71
|
+
presets = data.get("presets", {})
|
|
72
|
+
if not isinstance(presets, dict):
|
|
73
|
+
return []
|
|
74
|
+
return sorted(presets.keys())
|
|
@@ -630,6 +630,54 @@ def compare_experiments(
|
|
|
630
630
|
}
|
|
631
631
|
|
|
632
632
|
|
|
633
|
+
# v1.21: helpers for commit_experiment's ledger-write block. Mirrors the
|
|
634
|
+
# v1.20 apply_semantic_move pattern (commit 0b3489b) — both writers feed
|
|
635
|
+
# the same SessionLedger, so anti-repetition filters downstream see a
|
|
636
|
+
# unified recency log regardless of which surface executed the move.
|
|
637
|
+
|
|
638
|
+
_TOOL_TO_FAMILY: dict[str, str] = {
|
|
639
|
+
# Minimal first-step-tool → family mapping. Used only when a branch
|
|
640
|
+
# lacks an explicit seed.family. Uncovered tools fall through to
|
|
641
|
+
# default "mix" (same safe default apply_semantic_move would use).
|
|
642
|
+
"set_track_volume": "mix",
|
|
643
|
+
"set_track_pan": "mix",
|
|
644
|
+
"set_track_send": "mix",
|
|
645
|
+
"set_device_parameter": "sound_design",
|
|
646
|
+
"batch_set_parameters": "sound_design",
|
|
647
|
+
"create_clip": "arrangement",
|
|
648
|
+
"add_notes": "arrangement",
|
|
649
|
+
"create_scene": "arrangement",
|
|
650
|
+
"set_scene_tempo": "arrangement",
|
|
651
|
+
"create_midi_track": "arrangement",
|
|
652
|
+
"find_and_load_device": "device_creation",
|
|
653
|
+
"generate_m4l_effect": "device_creation",
|
|
654
|
+
"apply_gesture_template": "transition",
|
|
655
|
+
"set_track_arm": "performance",
|
|
656
|
+
"load_sample_to_simpler": "sample",
|
|
657
|
+
}
|
|
658
|
+
|
|
659
|
+
|
|
660
|
+
def _infer_move_family(target) -> str:
|
|
661
|
+
"""Determine move_class for a commit_experiment ledger entry.
|
|
662
|
+
|
|
663
|
+
Priority:
|
|
664
|
+
1. ``target.seed.family`` — explicit seed classification.
|
|
665
|
+
2. First compiled_plan step's tool via _TOOL_TO_FAMILY lookup.
|
|
666
|
+
3. Default "mix" — safe fallback.
|
|
667
|
+
"""
|
|
668
|
+
seed = getattr(target, "seed", None)
|
|
669
|
+
if seed is not None and getattr(seed, "family", None):
|
|
670
|
+
return seed.family
|
|
671
|
+
|
|
672
|
+
plan = getattr(target, "compiled_plan", None) or {}
|
|
673
|
+
steps = plan.get("steps", []) or []
|
|
674
|
+
if steps:
|
|
675
|
+
first_tool = steps[0].get("tool", "")
|
|
676
|
+
return _TOOL_TO_FAMILY.get(first_tool, "mix")
|
|
677
|
+
|
|
678
|
+
return "mix"
|
|
679
|
+
|
|
680
|
+
|
|
633
681
|
@mcp.tool()
|
|
634
682
|
async def commit_experiment(
|
|
635
683
|
ctx: Context,
|
|
@@ -647,23 +695,47 @@ async def commit_experiment(
|
|
|
647
695
|
if not experiment:
|
|
648
696
|
return {"error": f"Experiment {experiment_id} not found"}
|
|
649
697
|
|
|
650
|
-
#
|
|
651
|
-
#
|
|
652
|
-
#
|
|
653
|
-
#
|
|
654
|
-
#
|
|
698
|
+
# v1.21.1 fix (external audit 2026-04-24): accept ONLY status='evaluated'.
|
|
699
|
+
# Pre-fix, the check was an exclusion list —
|
|
700
|
+
# `if target.status in ("rejected", "analytical", "failed"):` — which
|
|
701
|
+
# implicitly allowed 'pending', 'running', 'discarded', and
|
|
702
|
+
# 'interesting_but_failed' to commit even though
|
|
703
|
+
# compare_experiments() never ranks them. The code's inline comment
|
|
704
|
+
# below ("only status='evaluated' branches are ranking candidates")
|
|
705
|
+
# already described the correct contract; this fix flips the
|
|
706
|
+
# polarity so the implementation matches. See
|
|
707
|
+
# docs/plans/v1.21-impl-status.md Appendix C for the audit-response log.
|
|
708
|
+
#
|
|
709
|
+
# Status semantics (from ExperimentBranch lifecycle):
|
|
710
|
+
# pending — create_experiment landed; run_experiment hasn't touched it
|
|
711
|
+
# running — run_experiment is mid-flight on this branch
|
|
712
|
+
# evaluated — run_experiment finished; ranking candidate ✓
|
|
713
|
+
# rejected — hard-rule classifier rolled back (protect violation, etc.)
|
|
714
|
+
# analytical — no executable plan (seed was analytical_only)
|
|
715
|
+
# failed — zero steps applied successfully
|
|
716
|
+
# committed — already committed (re-commit is wrong)
|
|
717
|
+
# discarded — caller explicitly threw it out
|
|
718
|
+
# interesting_but_failed — exploration-mode audit trail; not ranked
|
|
655
719
|
target = experiment.get_branch(branch_id)
|
|
656
720
|
if target is None:
|
|
657
721
|
return {"error": f"Branch {branch_id} not found"}
|
|
658
|
-
if target.status
|
|
722
|
+
if target.status != "evaluated":
|
|
659
723
|
return {
|
|
660
724
|
"error": (
|
|
661
|
-
f"Cannot commit branch with status '{target.status}'
|
|
662
|
-
f"'
|
|
663
|
-
f"
|
|
664
|
-
f"'
|
|
665
|
-
f"
|
|
666
|
-
f"
|
|
725
|
+
f"Cannot commit branch with status '{target.status}' — "
|
|
726
|
+
f"only status='evaluated' branches are commit candidates. "
|
|
727
|
+
f"Reason depends on current status: "
|
|
728
|
+
f"'pending' / 'running' = run_experiment hasn't evaluated "
|
|
729
|
+
f"this branch yet (run it first); "
|
|
730
|
+
f"'rejected' = hard-rule classifier rolled it back; "
|
|
731
|
+
f"'analytical' = no executable plan (analytical_only seed); "
|
|
732
|
+
f"'failed' = zero steps applied successfully during run; "
|
|
733
|
+
f"'committed' = already committed (don't re-run); "
|
|
734
|
+
f"'discarded' = caller explicitly threw this branch out; "
|
|
735
|
+
f"'interesting_but_failed' = kept for audit in "
|
|
736
|
+
f"exploration mode, but classifier excluded from ranking. "
|
|
737
|
+
f"Use compare_experiments to see eligible (ranked) "
|
|
738
|
+
f"winners — they are always status='evaluated'."
|
|
667
739
|
),
|
|
668
740
|
"branch_id": branch_id,
|
|
669
741
|
"branch_status": target.status,
|
|
@@ -759,6 +831,65 @@ async def commit_experiment(
|
|
|
759
831
|
ctx=ctx,
|
|
760
832
|
)
|
|
761
833
|
|
|
834
|
+
# v1.21: write the committed experiment to the SessionLedger so
|
|
835
|
+
# get_last_move / anti-repetition can see it. Best-effort — a
|
|
836
|
+
# ledger write failure is logged but does not fail the commit.
|
|
837
|
+
ledger_entry_id: Optional[str] = None
|
|
838
|
+
if isinstance(commit_result, dict) and commit_result.get("committed") is True:
|
|
839
|
+
try:
|
|
840
|
+
# store_purpose: writer (v1.21 commit_experiment auto-ledger
|
|
841
|
+
# write; shape mirrors apply_semantic_move commit 0b3489b).
|
|
842
|
+
from ..runtime.action_ledger import SessionLedger
|
|
843
|
+
ledger = ctx.lifespan_context.setdefault(
|
|
844
|
+
"action_ledger", SessionLedger()
|
|
845
|
+
)
|
|
846
|
+
# Engine tag reflects branch SOURCE (not escalation success).
|
|
847
|
+
# A composer-sourced branch that fell back to scaffold is still
|
|
848
|
+
# a composer-engine commit; the escalation-success detail is
|
|
849
|
+
# captured in target.evaluation["composer_escalation"], and
|
|
850
|
+
# doubling up on the engine tag would be noise for the
|
|
851
|
+
# anti-repetition filters downstream.
|
|
852
|
+
engine_tag = (
|
|
853
|
+
"composer"
|
|
854
|
+
if (
|
|
855
|
+
target.seed is not None
|
|
856
|
+
and target.seed.source == "composer"
|
|
857
|
+
)
|
|
858
|
+
else "experiment"
|
|
859
|
+
)
|
|
860
|
+
move_class = _infer_move_family(target)
|
|
861
|
+
ledger_entry_id = ledger.start_move(
|
|
862
|
+
engine=engine_tag,
|
|
863
|
+
move_class=move_class,
|
|
864
|
+
intent=(
|
|
865
|
+
f"{experiment_id}/{branch_id}: "
|
|
866
|
+
f"{target.name or 'committed winner'}"
|
|
867
|
+
),
|
|
868
|
+
undo_scope="micro",
|
|
869
|
+
)
|
|
870
|
+
# Actions from the POST-escalation plan (execution_log is the
|
|
871
|
+
# router's actual execution record — captures the swapped plan
|
|
872
|
+
# when composer escalation fired successfully).
|
|
873
|
+
for er in (commit_result.get("execution_log") or []):
|
|
874
|
+
if er.get("ok"):
|
|
875
|
+
ledger.append_action(
|
|
876
|
+
ledger_entry_id,
|
|
877
|
+
tool_name=er.get("tool", ""),
|
|
878
|
+
summary=er.get("tool", "") or "step",
|
|
879
|
+
)
|
|
880
|
+
steps_executed = int(commit_result.get("steps_executed", 0))
|
|
881
|
+
steps_failed = int(commit_result.get("steps_failed", 0))
|
|
882
|
+
total = steps_executed + steps_failed
|
|
883
|
+
ledger.finalize_move(
|
|
884
|
+
ledger_entry_id,
|
|
885
|
+
kept=(steps_failed == 0),
|
|
886
|
+
score=(float(steps_executed) / total) if total else 0.0,
|
|
887
|
+
memory_candidate=False,
|
|
888
|
+
)
|
|
889
|
+
except Exception as exc: # pragma: no cover — ledger is best-effort
|
|
890
|
+
logger.warning("commit_experiment ledger write failed: %s", exc)
|
|
891
|
+
ledger_entry_id = None
|
|
892
|
+
|
|
762
893
|
# Surface escalation details on the commit response so the caller
|
|
763
894
|
# sees whether a scaffold or resolved plan was applied.
|
|
764
895
|
if escalation_info is not None and isinstance(commit_result, dict):
|
|
@@ -770,6 +901,12 @@ async def commit_experiment(
|
|
|
770
901
|
"warnings": escalation_info.get("warnings", []),
|
|
771
902
|
}
|
|
772
903
|
|
|
904
|
+
# Surface ledger_entry_id on the commit response so callers can
|
|
905
|
+
# correlate their MCP response with the ledger entry for post-hoc
|
|
906
|
+
# evaluation. Same pattern as apply_semantic_move.
|
|
907
|
+
if ledger_entry_id is not None and isinstance(commit_result, dict):
|
|
908
|
+
commit_result["ledger_entry_id"] = ledger_entry_id
|
|
909
|
+
|
|
773
910
|
return commit_result
|
|
774
911
|
|
|
775
912
|
|
|
@@ -55,6 +55,10 @@ def record_anti_preference(
|
|
|
55
55
|
@mcp.tool()
|
|
56
56
|
def get_promotion_candidates(ctx: Context, limit: int = 10) -> dict:
|
|
57
57
|
"""Check the session ledger for entries eligible for memory promotion."""
|
|
58
|
+
# store_purpose: audit_readonly
|
|
59
|
+
# Reads the ledger to find entries already flagged as
|
|
60
|
+
# memory-promotion candidates — an audit/export surface, NOT an
|
|
61
|
+
# anti-repetition recency read.
|
|
58
62
|
ledger = ctx.lifespan_context.get("action_ledger")
|
|
59
63
|
if ledger is None:
|
|
60
64
|
return {"candidates": [], "count": 0, "note": "no session ledger active"}
|
|
@@ -75,6 +79,12 @@ def get_promotion_candidates(ctx: Context, limit: int = 10) -> dict:
|
|
|
75
79
|
# ── Session Memory ──────────────────────────────────────────────────
|
|
76
80
|
|
|
77
81
|
|
|
82
|
+
# store_purpose: mcp_tool_definition
|
|
83
|
+
# get_session_memory is the MCP tool that surfaces session-scoped
|
|
84
|
+
# ephemeral observations/decisions. It is NOT the action ledger and
|
|
85
|
+
# NOT the persistent technique library — use the right tool for
|
|
86
|
+
# recency (SessionLedger.get_recent_moves / get_action_ledger_summary)
|
|
87
|
+
# or for learned techniques (memory_list).
|
|
78
88
|
@mcp.tool()
|
|
79
89
|
def get_session_memory(
|
|
80
90
|
ctx: Context, limit: int = 10, category: str = ""
|
|
@@ -215,6 +215,13 @@ def get_session_kernel(
|
|
|
215
215
|
session_mem: list = []
|
|
216
216
|
kernel_warnings: list[str] = []
|
|
217
217
|
|
|
218
|
+
# store_purpose: audit_readonly
|
|
219
|
+
# The world-model kernel builder surfaces ledger state (total moves,
|
|
220
|
+
# memory candidates, last_move, recent_moves) as diagnostic data for
|
|
221
|
+
# downstream consumers. Not an anti-repetition reader — it's a
|
|
222
|
+
# kernel-assembly surface; consumers that want recency should either
|
|
223
|
+
# call SessionLedger.get_recent_moves directly (annotated as
|
|
224
|
+
# anti_repetition) or use get_action_ledger_summary.
|
|
218
225
|
try:
|
|
219
226
|
from .action_ledger import SessionLedger
|
|
220
227
|
ledger = ctx.lifespan_context.get("action_ledger")
|
|
@@ -44,7 +44,33 @@ _MOVE_TO_TEMPLATE: dict[str, str] = {
|
|
|
44
44
|
|
|
45
45
|
def _compile_device_creation(move: SemanticMove, kernel: dict) -> CompiledPlan:
|
|
46
46
|
"""Map plan_template steps to CompiledStep, injecting Device Forge
|
|
47
|
-
`gen_code`
|
|
47
|
+
`gen_code` and threading `track_index` through `find_and_load_device`."""
|
|
48
|
+
# v1.21 parity-gate fix: thread track_index from seed_args into
|
|
49
|
+
# find_and_load_device steps. Pre-fix, plan_templates emitted the
|
|
50
|
+
# ergonomic key ``query`` with no track_index — broken at runtime
|
|
51
|
+
# since pre-v1.20 because the ``remote_command`` backend bypasses
|
|
52
|
+
# MCP normalization and Ableton's handler requires
|
|
53
|
+
# ``track_index`` + ``device_name``.
|
|
54
|
+
seed_args = kernel.get("seed_args") or {}
|
|
55
|
+
track_index = seed_args.get("track_index")
|
|
56
|
+
needs_load_step = any(
|
|
57
|
+
s.get("tool") == "find_and_load_device" for s in move.plan_template
|
|
58
|
+
)
|
|
59
|
+
if needs_load_step and track_index is None:
|
|
60
|
+
return CompiledPlan(
|
|
61
|
+
move_id=move.move_id,
|
|
62
|
+
intent=move.intent,
|
|
63
|
+
steps=[],
|
|
64
|
+
risk_level=move.risk_level,
|
|
65
|
+
summary=f"{move.move_id} requires seed_args.track_index",
|
|
66
|
+
warnings=[
|
|
67
|
+
f"{move.move_id} requires seed_args.track_index (int) to "
|
|
68
|
+
"load the generated device onto a track. Example: "
|
|
69
|
+
f"apply_semantic_move(\"{move.move_id}\", mode=\"explore\", "
|
|
70
|
+
"args={\"track_index\": 0})"
|
|
71
|
+
],
|
|
72
|
+
)
|
|
73
|
+
|
|
48
74
|
# Resolve the GenExpr template once per compile (idempotent).
|
|
49
75
|
template_code: str | None = None
|
|
50
76
|
template_id = _MOVE_TO_TEMPLATE.get(move.move_id)
|
|
@@ -60,14 +86,22 @@ def _compile_device_creation(move: SemanticMove, kernel: dict) -> CompiledPlan:
|
|
|
60
86
|
steps: list[CompiledStep] = []
|
|
61
87
|
for step in move.plan_template:
|
|
62
88
|
params = dict(step.get("params") or {})
|
|
89
|
+
tool = step.get("tool", "")
|
|
63
90
|
|
|
64
91
|
# Inject gen_code for Device Forge moves. Done BEFORE CompiledStep
|
|
65
92
|
# construction so the step snapshot is correct, not mutated later.
|
|
66
|
-
if template_code is not None and
|
|
93
|
+
if template_code is not None and tool == "generate_m4l_effect":
|
|
67
94
|
params["gen_code"] = template_code
|
|
68
95
|
|
|
96
|
+
# v1.21: inject track_index into find_and_load_device. plan_templates
|
|
97
|
+
# now emit {"device_name": ...} (wire-format key); compiler adds
|
|
98
|
+
# {"track_index": ...} from seed_args so the remote_command backend
|
|
99
|
+
# sends a handler-compatible payload.
|
|
100
|
+
if tool == "find_and_load_device":
|
|
101
|
+
params["track_index"] = track_index
|
|
102
|
+
|
|
69
103
|
steps.append(CompiledStep(
|
|
70
|
-
tool=
|
|
104
|
+
tool=tool,
|
|
71
105
|
params=params,
|
|
72
106
|
description=step.get("description", ""),
|
|
73
107
|
verify_after=bool(step.get("verify_after", True)),
|
|
@@ -28,7 +28,7 @@ CREATE_CHAOS_MODULATOR = SemanticMove(
|
|
|
28
28
|
},
|
|
29
29
|
{
|
|
30
30
|
"tool": "find_and_load_device",
|
|
31
|
-
"params": {"
|
|
31
|
+
"params": {"device_name": "Wonder Chaos Mod"},
|
|
32
32
|
"description": "Load generated device onto target track",
|
|
33
33
|
"backend": "remote_command",
|
|
34
34
|
},
|
|
@@ -59,7 +59,7 @@ CREATE_FEEDBACK_RESONATOR = SemanticMove(
|
|
|
59
59
|
},
|
|
60
60
|
{
|
|
61
61
|
"tool": "find_and_load_device",
|
|
62
|
-
"params": {"
|
|
62
|
+
"params": {"device_name": "Wonder Resonator"},
|
|
63
63
|
"description": "Load resonator onto target track",
|
|
64
64
|
"backend": "remote_command",
|
|
65
65
|
},
|
|
@@ -90,7 +90,7 @@ CREATE_WAVEFOLDER_EFFECT = SemanticMove(
|
|
|
90
90
|
},
|
|
91
91
|
{
|
|
92
92
|
"tool": "find_and_load_device",
|
|
93
|
-
"params": {"
|
|
93
|
+
"params": {"device_name": "Wonder Wavefolder"},
|
|
94
94
|
"description": "Load wavefolder onto target track",
|
|
95
95
|
"backend": "remote_command",
|
|
96
96
|
},
|
|
@@ -121,7 +121,7 @@ CREATE_BITCRUSHER_EFFECT = SemanticMove(
|
|
|
121
121
|
},
|
|
122
122
|
{
|
|
123
123
|
"tool": "find_and_load_device",
|
|
124
|
-
"params": {"
|
|
124
|
+
"params": {"device_name": "Wonder Bitcrusher"},
|
|
125
125
|
"description": "Load bitcrusher onto target track",
|
|
126
126
|
"backend": "remote_command",
|
|
127
127
|
},
|
|
@@ -152,7 +152,7 @@ CREATE_KARPLUS_STRING = SemanticMove(
|
|
|
152
152
|
},
|
|
153
153
|
{
|
|
154
154
|
"tool": "find_and_load_device",
|
|
155
|
-
"params": {"
|
|
155
|
+
"params": {"device_name": "Wonder String"},
|
|
156
156
|
"description": "Load string synth onto target track",
|
|
157
157
|
"backend": "remote_command",
|
|
158
158
|
},
|
|
@@ -183,7 +183,7 @@ CREATE_STOCHASTIC_TEXTURE = SemanticMove(
|
|
|
183
183
|
},
|
|
184
184
|
{
|
|
185
185
|
"tool": "find_and_load_device",
|
|
186
|
-
"params": {"
|
|
186
|
+
"params": {"device_name": "Wonder Stochastic"},
|
|
187
187
|
"description": "Load stochastic texture device onto target track",
|
|
188
188
|
"backend": "remote_command",
|
|
189
189
|
},
|
|
@@ -214,7 +214,7 @@ CREATE_FDN_REVERB = SemanticMove(
|
|
|
214
214
|
},
|
|
215
215
|
{
|
|
216
216
|
"tool": "find_and_load_device",
|
|
217
|
-
"params": {"
|
|
217
|
+
"params": {"device_name": "Wonder FDN Verb"},
|
|
218
218
|
"description": "Load FDN reverb onto target track",
|
|
219
219
|
"backend": "remote_command",
|
|
220
220
|
},
|