livepilot 1.10.8 → 1.12.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. package/CHANGELOG.md +373 -0
  2. package/README.md +16 -16
  3. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  4. package/m4l_device/livepilot_bridge.js +1 -1
  5. package/mcp_server/__init__.py +1 -1
  6. package/mcp_server/evaluation/fabric.py +62 -1
  7. package/mcp_server/m4l_bridge.py +503 -18
  8. package/mcp_server/project_brain/automation_graph.py +23 -1
  9. package/mcp_server/project_brain/builder.py +2 -0
  10. package/mcp_server/project_brain/models.py +20 -1
  11. package/mcp_server/project_brain/tools.py +10 -3
  12. package/mcp_server/runtime/execution_router.py +7 -0
  13. package/mcp_server/runtime/mcp_dispatch.py +32 -0
  14. package/mcp_server/runtime/remote_commands.py +54 -0
  15. package/mcp_server/sample_engine/slice_classifier.py +169 -0
  16. package/mcp_server/semantic_moves/tools.py +139 -31
  17. package/mcp_server/server.py +151 -17
  18. package/mcp_server/session_continuity/models.py +13 -0
  19. package/mcp_server/session_continuity/tools.py +2 -0
  20. package/mcp_server/session_continuity/tracker.py +93 -0
  21. package/mcp_server/tools/_analyzer_engine/__init__.py +39 -0
  22. package/mcp_server/tools/_analyzer_engine/context.py +103 -0
  23. package/mcp_server/tools/_analyzer_engine/flucoma.py +23 -0
  24. package/mcp_server/tools/_analyzer_engine/sample.py +122 -0
  25. package/mcp_server/tools/_motif_engine.py +19 -4
  26. package/mcp_server/tools/analyzer.py +204 -180
  27. package/mcp_server/tools/clips.py +304 -1
  28. package/mcp_server/tools/devices.py +517 -5
  29. package/mcp_server/tools/diagnostics.py +42 -0
  30. package/mcp_server/tools/follow_actions.py +202 -0
  31. package/mcp_server/tools/grooves.py +142 -0
  32. package/mcp_server/tools/miditool.py +280 -0
  33. package/mcp_server/tools/scales.py +126 -0
  34. package/mcp_server/tools/take_lanes.py +135 -0
  35. package/mcp_server/tools/tracks.py +46 -3
  36. package/mcp_server/tools/transport.py +120 -4
  37. package/package.json +2 -2
  38. package/remote_script/LivePilot/__init__.py +15 -4
  39. package/remote_script/LivePilot/clips.py +62 -0
  40. package/remote_script/LivePilot/devices.py +444 -0
  41. package/remote_script/LivePilot/diagnostics.py +52 -1
  42. package/remote_script/LivePilot/follow_actions.py +235 -0
  43. package/remote_script/LivePilot/grooves.py +185 -0
  44. package/remote_script/LivePilot/scales.py +138 -0
  45. package/remote_script/LivePilot/take_lanes.py +175 -0
  46. package/remote_script/LivePilot/tracks.py +59 -1
  47. package/remote_script/LivePilot/transport.py +90 -1
  48. package/remote_script/LivePilot/version_detect.py +9 -0
  49. package/server.json +3 -3
@@ -205,16 +205,35 @@ class RoleGraph:
205
205
 
206
206
  @dataclass
207
207
  class AutomationGraph:
208
- """Automation presence and gesture density."""
208
+ """Automation presence and gesture density.
209
+
210
+ ``coverage_pct`` is the fraction of scanned clips that have at least
211
+ one automation envelope (0.0–1.0). Introduced in v1.10.9 to close
212
+ BUG-D2's "is this session missing automation?" signal — downstream
213
+ engines (Wonder Mode, Sound Design, etc.) can branch on a low
214
+ coverage value to recommend filter sweeps, volume crescendos, and
215
+ dub-style handoffs that the producer hasn't written yet.
216
+
217
+ ``clip_envelope_count`` is the raw total of per-clip envelopes
218
+ discovered; distinguishes "no automation in the project at all"
219
+ (count=0) from "automation exists but is lightly used" (count>0 but
220
+ coverage_pct<0.2).
221
+ """
209
222
 
210
223
  automated_params: list[dict] = field(default_factory=list)
211
224
  density_by_section: dict[str, float] = field(default_factory=dict)
225
+ coverage_pct: float = 0.0
226
+ clip_envelope_count: int = 0
227
+ clips_scanned: int = 0
212
228
  freshness: FreshnessInfo = field(default_factory=FreshnessInfo)
213
229
 
214
230
  def to_dict(self) -> dict:
215
231
  return {
216
232
  "automated_params": list(self.automated_params),
217
233
  "density_by_section": dict(self.density_by_section),
234
+ "coverage_pct": round(self.coverage_pct, 3),
235
+ "clip_envelope_count": self.clip_envelope_count,
236
+ "clips_scanned": self.clips_scanned,
218
237
  "freshness": self.freshness.to_dict(),
219
238
  }
220
239
 
@@ -131,11 +131,15 @@ def build_project_brain(ctx: Context) -> dict:
131
131
  # automation actually lives on each clip (session + arrangement). We
132
132
  # walk every clip slot that has a clip and ask get_clip_automation, then
133
133
  # aggregate into a flat list keyed by section.
134
+ #
135
+ # clips_scanned is the denominator for coverage_pct (BUG-D2) — it
136
+ # counts how many (track, scene) slots we probed, regardless of
137
+ # whether an envelope came back. Without this, a session with zero
138
+ # automation would be indistinguishable from a session where we
139
+ # failed to probe, which is exactly the ambiguity BUG-D2 flagged.
134
140
  clip_automation: list[dict] = []
141
+ clips_scanned = 0
135
142
  try:
136
- # Iterate session scenes x tracks, plus arrangement clips we already have.
137
- # Use the raw enumerate index for section_id so it stays aligned with
138
- # arrangement_graph sections (which use the same scheme — see E1 fix).
139
143
  for scene_idx, scene in enumerate(scenes or []):
140
144
  scene_name = str(scene.get("name", "")).strip()
141
145
  if not scene_name:
@@ -143,6 +147,7 @@ def build_project_brain(ctx: Context) -> dict:
143
147
  section_id = f"sec_{scene_idx:02d}"
144
148
  for track in tracks:
145
149
  t_idx = track.get("index", 0)
150
+ clips_scanned += 1
146
151
  try:
147
152
  auto_resp = ableton.send_command("get_clip_automation", {
148
153
  "track_index": t_idx,
@@ -196,6 +201,7 @@ def build_project_brain(ctx: Context) -> dict:
196
201
  notes_map=notes_map if notes_map else None,
197
202
  arrangement_clips=arrangement_clips if arrangement_clips else None,
198
203
  clip_automation=clip_automation if clip_automation else None,
204
+ clips_scanned=clips_scanned,
199
205
  analyzer_ok=analyzer_ok,
200
206
  flucoma_ok=flucoma_ok,
201
207
  session_ok=True,
@@ -230,6 +236,7 @@ def get_project_brain_summary(ctx: Context) -> dict:
230
236
  "section_count": len(state.arrangement_graph.sections),
231
237
  "role_count": len(state.role_graph.roles),
232
238
  "automated_param_count": len(state.automation_graph.automated_params),
239
+ "automation_coverage_pct": round(state.automation_graph.coverage_pct, 3),
233
240
  "tempo": state.session_graph.tempo,
234
241
  "time_signature": state.session_graph.time_signature,
235
242
  "is_stale": state.is_stale(),
@@ -47,6 +47,13 @@ MCP_TOOLS: frozenset[str] = frozenset({
47
47
  "generate_m4l_effect",
48
48
  "install_m4l_device",
49
49
  "list_genexpr_templates",
50
+ # MIDI Tool bridge (v1.12.0+) — these run entirely in the MCP server:
51
+ # config dispatch via OSC to m4l_bridge, cache state reads, filesystem
52
+ # copy. No TCP remote command, no bridge TCP round-trip.
53
+ "install_miditool_device",
54
+ "set_miditool_target",
55
+ "get_miditool_context",
56
+ "list_miditool_generators",
50
57
  })
51
58
 
52
59
 
@@ -95,6 +95,33 @@ async def _list_genexpr_templates(params: dict, ctx: Any = None) -> dict:
95
95
  return await _call(list_genexpr_templates, ctx, params)
96
96
 
97
97
 
98
+ # ── MIDI Tool bridge (v1.12.0+) ───────────────────────────────────────────
99
+ #
100
+ # These four run entirely in-process: install_miditool_device copies .amxd
101
+ # files, set_miditool_target writes to MidiToolCache + OSC-sends config,
102
+ # get_miditool_context reads the cache, list_miditool_generators reads the
103
+ # GENERATOR_METADATA dict. None of them need TCP or bridge round-trips.
104
+
105
+ async def _install_miditool_device(params: dict, ctx: Any = None) -> dict:
106
+ from ..tools.miditool import install_miditool_device
107
+ return await _call(install_miditool_device, ctx, params)
108
+
109
+
110
+ async def _set_miditool_target(params: dict, ctx: Any = None) -> dict:
111
+ from ..tools.miditool import set_miditool_target
112
+ return await _call(set_miditool_target, ctx, params)
113
+
114
+
115
+ async def _get_miditool_context(params: dict, ctx: Any = None) -> dict:
116
+ from ..tools.miditool import get_miditool_context
117
+ return await _call(get_miditool_context, ctx, params)
118
+
119
+
120
+ async def _list_miditool_generators(params: dict, ctx: Any = None) -> dict:
121
+ from ..tools.miditool import list_miditool_generators
122
+ return await _call(list_miditool_generators, ctx, params)
123
+
124
+
98
125
  def build_mcp_dispatch_registry() -> dict[str, Callable]:
99
126
  """Return the canonical registry of MCP-only tools for plan execution.
100
127
 
@@ -115,4 +142,9 @@ def build_mcp_dispatch_registry() -> dict[str, Callable]:
115
142
  "generate_m4l_effect": _generate_m4l_effect,
116
143
  "install_m4l_device": _install_m4l_device,
117
144
  "list_genexpr_templates": _list_genexpr_templates,
145
+ # v1.12.0 MIDI Tool bridge
146
+ "install_miditool_device": _install_miditool_device,
147
+ "set_miditool_target": _set_miditool_target,
148
+ "get_miditool_context": _get_miditool_context,
149
+ "list_miditool_generators": _list_miditool_generators,
118
150
  }
@@ -48,6 +48,21 @@ REMOTE_COMMANDS: frozenset[str] = frozenset({
48
48
  "insert_device", # 12.3+ native device insertion
49
49
  "insert_rack_chain", # 12.3+ rack chain insertion
50
50
  "set_drum_chain_note", # 12.3+ drum chain note assignment
51
+ # rack variations + macro CRUD (Live 11+)
52
+ "get_rack_variations", "store_rack_variation",
53
+ "recall_rack_variation", "delete_rack_variation",
54
+ "randomize_rack_macros", "add_rack_macro",
55
+ "remove_rack_macro", "set_rack_visible_macros",
56
+ # simpler slice CRUD (Live 11+)
57
+ "insert_simpler_slice", "move_simpler_slice",
58
+ "remove_simpler_slice", "clear_simpler_slices",
59
+ "reset_simpler_slices", "import_slices_from_onsets",
60
+ # wavetable modulation matrix (Live 11+)
61
+ "get_wavetable_mod_targets", "add_wavetable_mod_route",
62
+ "set_wavetable_mod_amount", "get_wavetable_mod_amount",
63
+ "get_wavetable_mod_matrix",
64
+ # device A/B compare (Live 12.3+)
65
+ "get_device_ab_state", "toggle_device_ab", "copy_device_state",
51
66
  # clip_automation (3)
52
67
  "get_clip_automation", "set_clip_automation", "clear_clip_automation",
53
68
  # browser (6)
@@ -65,8 +80,39 @@ REMOTE_COMMANDS: frozenset[str] = frozenset({
65
80
  "capture_midi", "start_recording", "stop_recording",
66
81
  "get_cue_points", "jump_to_cue", "toggle_cue_point",
67
82
  "back_to_arranger", "force_arrangement",
83
+ # scales — Song + per-clip scale awareness (Live 12.0+)
84
+ "get_song_scale", "set_song_scale", "set_song_scale_mode",
85
+ "list_available_scales",
86
+ "get_clip_scale", "set_clip_scale", "set_clip_scale_mode",
87
+ # tuning system (Live 12.1+)
88
+ "get_tuning_system", "set_tuning_reference_pitch",
89
+ "set_tuning_note", "reset_tuning_system",
90
+ # follow actions — clip (Live 12.0 revamp) + scene (Live 12.2+)
91
+ "get_clip_follow_action", "set_clip_follow_action",
92
+ "clear_clip_follow_action", "list_follow_action_types",
93
+ "apply_follow_action_preset",
94
+ "get_scene_follow_action", "set_scene_follow_action",
95
+ "clear_scene_follow_action",
96
+ # groove pool (Live 11+)
97
+ "list_grooves", "get_groove_info", "set_groove_params",
98
+ "assign_clip_groove", "get_clip_groove",
99
+ "get_song_groove_amount", "set_song_groove_amount",
100
+ # take lanes (Live 12.0 UI / 12.2 API)
101
+ "get_take_lanes", "create_take_lane", "set_take_lane_name",
102
+ "create_audio_clip_on_take_lane", "create_midi_clip_on_take_lane",
103
+ "get_take_lane_clips",
68
104
  # diagnostics (1)
69
105
  "get_session_diagnostics",
106
+ # control surfaces (diagnostic)
107
+ "list_control_surfaces", "get_control_surface_info",
108
+ # song primitives — transport/link
109
+ "tap_tempo", "nudge_tempo",
110
+ "set_exclusive_arm", "set_exclusive_solo",
111
+ "capture_and_insert_scene", "set_count_in_duration",
112
+ "get_link_state", "set_link_enabled", "force_link_beat_time",
113
+ # track primitives
114
+ "jump_in_session_clip", "get_track_performance_impact",
115
+ "get_appointed_device",
70
116
  # ping (built-in)
71
117
  "ping",
72
118
  })
@@ -92,6 +138,14 @@ BRIDGE_COMMANDS: frozenset[str] = frozenset({
92
138
  # async Python MCP tool in mcp_server/tools/analyzer.py, not a bridge
93
139
  # command. It has no case in livepilot_bridge.js and no @register handler
94
140
  # in remote_script. See mcp_server/runtime/execution_router.MCP_TOOLS.
141
+ # NOTE: MIDI Tool bridge commands (Live 12.0+ MIDI Generators /
142
+ # Transformations, requires LivePilot_MIDITool.amxd) do NOT belong in
143
+ # this set. They ride OSC prefixes /miditool/request, /miditool/ready
144
+ # (bridge→server) and miditool/config, miditool/response (server→bridge),
145
+ # dispatched through m4l_device/miditool_bridge.js (a separate JS, not
146
+ # livepilot_bridge.js) and pushed directly via M4LBridge.send_miditool_*
147
+ # helpers rather than through send_command. BRIDGE_COMMANDS is reserved
148
+ # for send_command targets that dispatch inside livepilot_bridge.js.
95
149
  })
96
150
 
97
151
  # Combined: all valid send_command targets
@@ -0,0 +1,169 @@
1
+ """Spectral classification of Simpler-slice drum break segments.
2
+
3
+ Lessons from the 2026-04-18 creative session that drove this module:
4
+
5
+ - Never assume slice 0 is a kick. Slice content depends on transient
6
+ detection order in the source audio, not drum-rack convention.
7
+ - Snares ALWAYS have 20-35% mid-frequency energy (drum body resonance).
8
+ Hi-hats / cymbals have <25% mid (they're thin metal discs with no
9
+ resonant body). This mid-content threshold is the critical separator
10
+ that tripped up the first classification pass.
11
+ - Ghosts are defined by low peak amplitude (<0.35) regardless of spectrum.
12
+
13
+ Thresholds validated against "Break Ghosts 90 bpm" (Ableton Core Library)
14
+ in the session. See ``feedback_analyze_slices_before_programming`` memory
15
+ entry for the full story and the specific slice map for that sample.
16
+
17
+ This module is pure Python (numpy only). No LivePilot / Ableton /
18
+ FastMCP dependencies — testable and runnable in isolation.
19
+ """
20
+ from __future__ import annotations
21
+
22
+ from typing import List, Literal, Sequence, TypedDict
23
+
24
+ import numpy as np
25
+
26
+
27
+ Label = Literal["KICK", "SNARE", "HAT", "ghost"]
28
+
29
+
30
+ class SliceFeatures(TypedDict):
31
+ """Per-slice classification result plus the features that drove the decision."""
32
+
33
+ index: int
34
+ label: Label
35
+ peak: float
36
+ rms: float
37
+ sub_pct: float
38
+ low_pct: float
39
+ mid_pct: float
40
+ high_pct: float
41
+
42
+
43
+ # ---------------------------------------------------------------------------
44
+ # Band boundaries (Hz). Tuned against the 2026-04-18 session's working set.
45
+ # ---------------------------------------------------------------------------
46
+
47
+ _SUB_MAX = 100.0 # sub-bass (kick fundamentals live here)
48
+ _LOW_MAX = 300.0 # body / low-mid (kick "thud" + bass fundamentals)
49
+ _MID_MAX = 3000.0 # presence / drum body / vocals
50
+ # Everything above _MID_MAX is "high" (sizzle / air / cymbal).
51
+
52
+
53
+ # ---------------------------------------------------------------------------
54
+ # Classification thresholds. DO NOT loosen these without re-validating on a
55
+ # real break — the 2026-04-18 session proved that relaxing the HAT mid-cap
56
+ # to 32% misclassifies loud snares as hats.
57
+ # ---------------------------------------------------------------------------
58
+
59
+ _GHOST_PEAK = 0.35 # Below this peak → ghost regardless of spectrum
60
+ _KICK_SUB_LOW_MIN = 0.45 # sub + low must be this dominant
61
+ _KICK_HIGH_MAX = 0.40 # kicks never have >40% high
62
+ _SNARE_MID_MIN = 0.25 # snares HAVE a drum body (mid content)
63
+ _SNARE_HIGH_MIN = 0.40 # + sizzle
64
+ _SNARE_PEAK_MIN = 0.60 # + loud enough to not be a ghost
65
+ _HAT_HIGH_MIN = 0.70 # hats are thin metal — overwhelmingly high
66
+ _HAT_MID_MAX = 0.25 # hats have almost no drum body
67
+
68
+
69
+ def _band_energy(segment: np.ndarray, sr: int) -> tuple[float, float, float, float]:
70
+ """Return (sub, low, mid, high) energy fractions that sum to ~1.0.
71
+
72
+ Uses an rFFT. If the segment is empty or silent, returns equal quarters
73
+ so downstream logic doesn't have to handle zero-sum edge cases (the
74
+ caller still sees silence via the peak/rms fields).
75
+ """
76
+ if len(segment) < 2:
77
+ return 0.25, 0.25, 0.25, 0.25
78
+ spec = np.abs(np.fft.rfft(segment))
79
+ total = float(spec.sum())
80
+ if total <= 0:
81
+ return 0.25, 0.25, 0.25, 0.25
82
+ freqs = np.fft.rfftfreq(len(segment), 1.0 / sr)
83
+ sub = float(spec[freqs < _SUB_MAX].sum() / total)
84
+ low = float(spec[(freqs >= _SUB_MAX) & (freqs < _LOW_MAX)].sum() / total)
85
+ mid = float(spec[(freqs >= _LOW_MAX) & (freqs < _MID_MAX)].sum() / total)
86
+ high = float(spec[freqs >= _MID_MAX].sum() / total)
87
+ return sub, low, mid, high
88
+
89
+
90
+ def classify_segment(segment: np.ndarray, sr: int) -> Label:
91
+ """Classify a single audio segment as KICK / SNARE / HAT / ghost.
92
+
93
+ Returns the label string. See module docstring for the reasoning behind
94
+ each threshold.
95
+ """
96
+ if len(segment) < 2:
97
+ return "ghost"
98
+ peak = float(np.max(np.abs(segment)))
99
+ if peak < _GHOST_PEAK:
100
+ return "ghost"
101
+
102
+ sub, low, mid, high = _band_energy(segment, sr)
103
+
104
+ # KICK: sub+low dominance with limited high content.
105
+ if (sub + low) >= _KICK_SUB_LOW_MIN and high < _KICK_HIGH_MAX:
106
+ return "KICK"
107
+
108
+ # HAT: overwhelmingly high-freq, almost no drum-body mid content.
109
+ if high >= _HAT_HIGH_MIN and mid <= _HAT_MID_MAX:
110
+ return "HAT"
111
+
112
+ # SNARE: broadband (mid body + high sizzle) AND loud.
113
+ if (
114
+ mid >= _SNARE_MID_MIN
115
+ and high >= _SNARE_HIGH_MIN
116
+ and peak >= _SNARE_PEAK_MIN
117
+ ):
118
+ return "SNARE"
119
+
120
+ # Fallback for ambiguous mid/high dominant loud hits — usually
121
+ # snares with unusual spectrum (e.g., rim-shots, piccolo snare).
122
+ if peak >= _SNARE_PEAK_MIN and (mid + high) >= 0.70:
123
+ return "SNARE"
124
+
125
+ # If nothing else matched but there's real energy, call it a hat
126
+ # rather than a ghost (ghost is reserved for quiet hits).
127
+ return "HAT"
128
+
129
+
130
+ def classify_slices(
131
+ audio: np.ndarray,
132
+ sr: int,
133
+ frame_boundaries: Sequence[int],
134
+ ) -> List[SliceFeatures]:
135
+ """Classify every slice defined by ``frame_boundaries``.
136
+
137
+ ``frame_boundaries`` is a list of N+1 frame positions defining N slices.
138
+ For a sample with slices starting at frames [0, 1000, 3000, 5000] and
139
+ total length 10000 frames, pass [0, 1000, 3000, 5000, 10000].
140
+
141
+ Stereo input is auto-downmixed (mean of the two channels).
142
+
143
+ Returns a list of ``SliceFeatures`` in slice-index order.
144
+ """
145
+ if audio.ndim > 1:
146
+ audio = audio.mean(axis=1)
147
+
148
+ results: List[SliceFeatures] = []
149
+ for i in range(len(frame_boundaries) - 1):
150
+ start = int(frame_boundaries[i])
151
+ end = int(frame_boundaries[i + 1])
152
+ segment = audio[start:end]
153
+ label = classify_segment(segment, sr)
154
+ peak = float(np.max(np.abs(segment))) if len(segment) else 0.0
155
+ rms = float(np.sqrt(np.mean(segment ** 2))) if len(segment) else 0.0
156
+ sub, low, mid, high = _band_energy(segment, sr)
157
+ results.append(
158
+ SliceFeatures(
159
+ index=i,
160
+ label=label,
161
+ peak=peak,
162
+ rms=rms,
163
+ sub_pct=sub,
164
+ low_pct=low,
165
+ mid_pct=mid,
166
+ high_pct=high,
167
+ )
168
+ )
169
+ return results
@@ -107,16 +107,137 @@ def preview_semantic_move(
107
107
  return result
108
108
 
109
109
 
110
+ def _build_taste_context(ctx: Context) -> dict:
111
+ """Pull the active taste graph for ranking, with defensive fallbacks.
112
+
113
+ Returns a dict with ``dimension_weights``, ``dimension_avoidances``,
114
+ ``move_family_scores`` (family → score), and ``evidence_count``.
115
+ Empty dicts when no taste has been recorded yet — the ranker then
116
+ collapses to pure keyword matching, which is the correct behavior for
117
+ a cold-start user with no history.
118
+ """
119
+ try:
120
+ from ..memory.taste_graph import build_taste_graph
121
+ from ..memory.taste_memory import TasteMemoryStore
122
+ from ..memory.anti_memory import AntiMemoryStore
123
+
124
+ taste_store = ctx.lifespan_context.setdefault("taste_memory", TasteMemoryStore())
125
+ anti_store = ctx.lifespan_context.setdefault("anti_memory", AntiMemoryStore())
126
+ graph = build_taste_graph(taste_store=taste_store, anti_store=anti_store)
127
+
128
+ move_family_scores: dict[str, float] = {}
129
+ for family, entry in getattr(graph, "move_family_scores", {}).items():
130
+ score = getattr(entry, "score", None)
131
+ if isinstance(score, (int, float)):
132
+ move_family_scores[family] = float(score)
133
+
134
+ return {
135
+ "dimension_weights": dict(getattr(graph, "dimension_weights", {}) or {}),
136
+ "dimension_avoidances": dict(getattr(graph, "dimension_avoidances", {}) or {}),
137
+ "move_family_scores": move_family_scores,
138
+ "evidence_count": int(getattr(graph, "evidence_count", 0) or 0),
139
+ }
140
+ except Exception as exc:
141
+ logger.debug("_build_taste_context failed: %s", exc)
142
+ return {
143
+ "dimension_weights": {},
144
+ "dimension_avoidances": {},
145
+ "move_family_scores": {},
146
+ "evidence_count": 0,
147
+ }
148
+
149
+
150
+ def _score_move_for_request(move, request_lower: str, request_words: set, taste: dict) -> tuple[float, dict]:
151
+ """Compute the composite score for a single move.
152
+
153
+ Composition:
154
+ 0.55 × keyword overlap (intent + move_id + targets)
155
+ 0.30 × taste alignment (from taste_graph.dimension_weights on move.targets)
156
+ 0.15 × (1 - anti avoidance penalty) (from dimension_avoidances)
157
+
158
+ ± up to 0.10 family bonus/penalty from move_family_scores[family].
159
+
160
+ When the user has no recorded taste (evidence_count == 0), the taste
161
+ and anti-penalty components collapse to neutral 0.5 so cold-start
162
+ behavior stays identical to the old keyword-only ranker.
163
+ """
164
+ # ── Keyword overlap component (0..1) ──────────────────────────────
165
+ intent_lower = move.intent.lower()
166
+ move_words = set(move.move_id.replace("_", " ").split())
167
+ intent_words = set(intent_lower.split())
168
+
169
+ overlap = request_words & (move_words | intent_words)
170
+ keyword_score = min(1.0, len(overlap) * 0.3)
171
+
172
+ for dim in move.targets:
173
+ if dim.lower() in request_lower:
174
+ keyword_score = min(1.0, keyword_score + 0.2)
175
+
176
+ if move.move_id.replace("_", " ") in request_lower:
177
+ keyword_score = 1.0
178
+
179
+ # ── Taste alignment component (0..1) ──────────────────────────────
180
+ evidence_count = taste["evidence_count"]
181
+ dim_weights = taste["dimension_weights"]
182
+ dim_avoid = taste["dimension_avoidances"]
183
+
184
+ if evidence_count > 0 and move.targets:
185
+ # Average dimension_weights for this move's targets; weights are
186
+ # -1..1 with 0 meaning unknown. Remap to 0..1 so "neutral" is 0.5.
187
+ raw_taste = [
188
+ dim_weights.get(dim, 0.0) for dim in move.targets
189
+ ]
190
+ taste_alignment = sum((w + 1.0) / 2.0 for w in raw_taste) / len(raw_taste)
191
+ avoidance = sum(
192
+ dim_avoid.get(dim, 0.0) for dim in move.targets
193
+ ) / len(move.targets)
194
+ avoidance = max(0.0, min(1.0, avoidance))
195
+ else:
196
+ taste_alignment = 0.5
197
+ avoidance = 0.0
198
+
199
+ composite = (
200
+ 0.55 * keyword_score
201
+ + 0.30 * taste_alignment
202
+ + 0.15 * (1.0 - avoidance)
203
+ )
204
+
205
+ # ── Family bonus/penalty (±0.1) ────────────────────────────────────
206
+ family_bonus = 0.0
207
+ family_score = taste["move_family_scores"].get(move.family)
208
+ if family_score is not None:
209
+ # family score is 0..1 with 0.5 neutral; remap to -0.1..+0.1
210
+ family_bonus = (family_score - 0.5) * 0.2
211
+ composite += family_bonus
212
+
213
+ composite = max(0.0, min(1.0, composite))
214
+
215
+ breakdown = {
216
+ "keyword_score": round(keyword_score, 3),
217
+ "taste_alignment": round(taste_alignment, 3),
218
+ "avoidance_penalty": round(avoidance, 3),
219
+ "family_bonus": round(family_bonus, 3),
220
+ "evidence_count": evidence_count,
221
+ }
222
+ return composite, breakdown
223
+
224
+
110
225
  @mcp.tool()
111
226
  def propose_next_best_move(
112
227
  ctx: Context,
113
228
  request_text: str,
114
229
  limit: int = 3,
115
230
  ) -> dict:
116
- """Propose the best semantic moves for a natural language request.
231
+ """Propose the best semantic moves for a natural language request, ranked
232
+ by keyword fit AND the active taste graph.
117
233
 
118
- Analyzes the request text and ranks available semantic moves by
119
- relevance. Returns up to `limit` suggestions with confidence scores.
234
+ Shipped in v1.10.9: ranking is no longer pure keyword overlap — it now
235
+ blends keyword match with taste alignment (``dimension_weights`` on each
236
+ move's targets), an anti-preference penalty (``dimension_avoidances``),
237
+ and a small family bonus from ``move_family_scores``. Cold-start users
238
+ with zero recorded evidence get the same ranking as before; users with
239
+ history see recommendations pulled toward dimensions they've kept and
240
+ away from ones they've undone.
120
241
 
121
242
  request_text: what the user wants (e.g., "make this punchier",
122
243
  "tighten the low end", "reduce repetition")
@@ -125,50 +246,37 @@ def propose_next_best_move(
125
246
  if not request_text.strip():
126
247
  return {"error": "request_text cannot be empty"}
127
248
 
128
- # Simple keyword matching for now — will be replaced by conductor
129
- # routing + taste ranking in V2 Step 7
130
249
  request_lower = request_text.lower()
250
+ request_words = set(request_lower.split())
251
+ taste = _build_taste_context(ctx)
131
252
  all_moves = list(registry._REGISTRY.values())
132
253
 
133
- scored = []
254
+ scored: list[tuple[object, float, dict]] = []
134
255
  for move in all_moves:
135
- score = 0.0
136
- # Match keywords from intent and move_id
137
- intent_lower = move.intent.lower()
138
- move_words = set(move.move_id.replace("_", " ").split())
139
- intent_words = set(intent_lower.split())
140
- request_words = set(request_lower.split())
141
-
142
- # Word overlap scoring
143
- overlap = request_words & (move_words | intent_words)
144
- score += len(overlap) * 0.3
145
-
146
- # Dimension matching
147
- for dim in move.targets:
148
- if dim in request_lower:
149
- score += 0.2
150
-
151
- # Boost exact intent matches
152
- if move.move_id.replace("_", " ") in request_lower:
153
- score += 1.0
154
-
155
- if score > 0:
156
- scored.append((move, min(score, 1.0)))
157
-
158
- # Sort by score descending
256
+ score, breakdown = _score_move_for_request(
257
+ move, request_lower, request_words, taste,
258
+ )
259
+ # Keep only moves that had any keyword signal or strong taste pull —
260
+ # a move with zero keyword overlap AND neutral taste would be noise.
261
+ if breakdown["keyword_score"] > 0 or taste["evidence_count"] >= 5:
262
+ scored.append((move, score, breakdown))
263
+
159
264
  scored.sort(key=lambda x: -x[1])
160
265
  top = scored[:limit]
161
266
 
162
267
  suggestions = []
163
- for move, score in top:
268
+ for move, score, breakdown in top:
164
269
  d = move.to_dict()
165
270
  d["match_score"] = round(score, 3)
271
+ d["score_breakdown"] = breakdown
166
272
  suggestions.append(d)
167
273
 
168
274
  return {
169
275
  "request": request_text,
170
276
  "suggestions": suggestions,
171
277
  "count": len(suggestions),
278
+ "taste_active": taste["evidence_count"] > 0,
279
+ "taste_evidence_count": taste["evidence_count"],
172
280
  }
173
281
 
174
282