livepilot 1.10.9 → 1.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/CHANGELOG.md +327 -0
  2. package/README.md +7 -7
  3. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  4. package/m4l_device/livepilot_bridge.js +1 -1
  5. package/mcp_server/__init__.py +1 -1
  6. package/mcp_server/branches/__init__.py +32 -0
  7. package/mcp_server/branches/types.py +230 -0
  8. package/mcp_server/composer/__init__.py +10 -1
  9. package/mcp_server/composer/branch_producer.py +229 -0
  10. package/mcp_server/evaluation/policy.py +129 -2
  11. package/mcp_server/experiment/engine.py +47 -11
  12. package/mcp_server/experiment/models.py +72 -7
  13. package/mcp_server/experiment/tools.py +231 -35
  14. package/mcp_server/m4l_bridge.py +488 -13
  15. package/mcp_server/memory/taste_graph.py +84 -11
  16. package/mcp_server/persistence/taste_store.py +21 -5
  17. package/mcp_server/runtime/execution_router.py +7 -0
  18. package/mcp_server/runtime/mcp_dispatch.py +32 -0
  19. package/mcp_server/runtime/remote_commands.py +54 -0
  20. package/mcp_server/runtime/session_kernel.py +46 -0
  21. package/mcp_server/runtime/tools.py +29 -3
  22. package/mcp_server/sample_engine/slice_classifier.py +169 -0
  23. package/mcp_server/server.py +11 -3
  24. package/mcp_server/synthesis_brain/__init__.py +53 -0
  25. package/mcp_server/synthesis_brain/adapters/__init__.py +34 -0
  26. package/mcp_server/synthesis_brain/adapters/analog.py +167 -0
  27. package/mcp_server/synthesis_brain/adapters/base.py +86 -0
  28. package/mcp_server/synthesis_brain/adapters/drift.py +166 -0
  29. package/mcp_server/synthesis_brain/adapters/meld.py +151 -0
  30. package/mcp_server/synthesis_brain/adapters/operator.py +169 -0
  31. package/mcp_server/synthesis_brain/adapters/wavetable.py +228 -0
  32. package/mcp_server/synthesis_brain/engine.py +91 -0
  33. package/mcp_server/synthesis_brain/models.py +121 -0
  34. package/mcp_server/synthesis_brain/timbre.py +194 -0
  35. package/mcp_server/tools/_conductor.py +144 -0
  36. package/mcp_server/tools/analyzer.py +187 -7
  37. package/mcp_server/tools/clips.py +65 -0
  38. package/mcp_server/tools/devices.py +517 -5
  39. package/mcp_server/tools/diagnostics.py +42 -0
  40. package/mcp_server/tools/follow_actions.py +202 -0
  41. package/mcp_server/tools/grooves.py +142 -0
  42. package/mcp_server/tools/miditool.py +280 -0
  43. package/mcp_server/tools/scales.py +126 -0
  44. package/mcp_server/tools/take_lanes.py +135 -0
  45. package/mcp_server/tools/tracks.py +46 -3
  46. package/mcp_server/tools/transport.py +62 -1
  47. package/mcp_server/wonder_mode/engine.py +324 -0
  48. package/mcp_server/wonder_mode/tools.py +153 -1
  49. package/package.json +2 -2
  50. package/remote_script/LivePilot/__init__.py +8 -4
  51. package/remote_script/LivePilot/clips.py +62 -0
  52. package/remote_script/LivePilot/devices.py +444 -0
  53. package/remote_script/LivePilot/diagnostics.py +52 -1
  54. package/remote_script/LivePilot/follow_actions.py +235 -0
  55. package/remote_script/LivePilot/grooves.py +185 -0
  56. package/remote_script/LivePilot/scales.py +138 -0
  57. package/remote_script/LivePilot/take_lanes.py +175 -0
  58. package/remote_script/LivePilot/tracks.py +59 -1
  59. package/remote_script/LivePilot/transport.py +90 -1
  60. package/remote_script/LivePilot/version_detect.py +9 -0
  61. package/server.json +3 -3
@@ -75,13 +75,37 @@ class TasteGraph:
75
75
  # Device preferences
76
76
  device_affinities: dict[str, DeviceAffinity] = field(default_factory=dict)
77
77
 
78
- # Novelty tolerance: 0 = very conservative, 1 = very experimental
79
- novelty_band: float = 0.5
78
+ # PR8 per-goal-mode novelty bands. Canonical source of truth.
79
+ # Keys are goal modes ("improve", "explore", or any string a caller
80
+ # supplies). novelty_band (below, as a property) reads/writes
81
+ # novelty_bands["improve"] — one storage, two access paths.
82
+ novelty_bands: dict = field(
83
+ default_factory=lambda: {"improve": 0.5, "explore": 0.5}
84
+ )
85
+
86
+ # PR8 — when True, rank_moves returns uniform taste scores (0.5) to
87
+ # bypass taste filtering during branch generation. Callers flip this
88
+ # for fresh / surprise-me mode so novelty survives to post-hoc ranking.
89
+ bypass_taste_in_generation: bool = False
80
90
 
81
91
  # Total evidence count (how many decisions informed this graph)
82
92
  evidence_count: int = 0
83
93
  last_updated_ms: int = 0
84
94
 
95
+ @property
96
+ def novelty_band(self) -> float:
97
+ """Legacy flat novelty band — mirrors novelty_bands["improve"].
98
+
99
+ Kept for back-compat with callers that set/read novelty_band
100
+ directly. Setting this property writes through to the bands dict
101
+ so there's no dual source of truth.
102
+ """
103
+ return self.novelty_bands.get("improve", 0.5)
104
+
105
+ @novelty_band.setter
106
+ def novelty_band(self, value: float) -> None:
107
+ self.novelty_bands["improve"] = float(value)
108
+
85
109
  def to_dict(self) -> dict:
86
110
  return {
87
111
  "dimension_weights": self.dimension_weights,
@@ -96,7 +120,11 @@ class TasteGraph:
96
120
  key=lambda x: -x[1].affinity,
97
121
  )[:10] # Top 10 only
98
122
  },
123
+ # novelty_band kept for legacy consumers that read it directly;
124
+ # novelty_bands is the canonical per-goal-mode shape going forward.
99
125
  "novelty_band": round(self.novelty_band, 3),
126
+ "novelty_bands": {k: round(v, 3) for k, v in self.novelty_bands.items()},
127
+ "bypass_taste_in_generation": self.bypass_taste_in_generation,
100
128
  "evidence_count": self.evidence_count,
101
129
  }
102
130
 
@@ -154,21 +182,53 @@ class TasteGraph:
154
182
  self.evidence_count += 1
155
183
  self.last_updated_ms = now
156
184
 
157
- def update_novelty_from_experiment(self, chose_bold: bool) -> None:
158
- """Shift novelty band based on experiment choices."""
185
+ def update_novelty_from_experiment(
186
+ self, chose_bold: bool, goal_mode: str = "improve",
187
+ ) -> None:
188
+ """Shift novelty band for a given goal mode based on experiment choices.
189
+
190
+ PR8: goal_mode defaults to "improve" so legacy callers land on the
191
+ same band they updated before. Pass "explore" to shift the
192
+ exploration-mode band without touching improve-mode preference.
193
+ (novelty_band is a property view over novelty_bands["improve"], so
194
+ improve-mode updates automatically surface there too.)
195
+ """
196
+ current = self.novelty_bands.get(goal_mode, 0.5)
159
197
  if chose_bold:
160
- self.novelty_band = min(1.0, self.novelty_band + 0.05)
198
+ new_val = min(1.0, current + 0.05)
161
199
  else:
162
- self.novelty_band = max(0.0, self.novelty_band - 0.05)
200
+ new_val = max(0.0, current - 0.05)
201
+ self.novelty_bands[goal_mode] = new_val
163
202
 
164
203
  # ── Ranking ──────────────────────────────────────────────────────
165
204
 
166
- def rank_moves(self, move_specs: list[dict]) -> list[dict]:
205
+ def rank_moves(
206
+ self,
207
+ move_specs: list[dict],
208
+ goal_mode: str = "improve",
209
+ ) -> list[dict]:
167
210
  """Rank a list of semantic move dicts by taste fit.
168
211
 
169
212
  Each move dict should have: move_id, family, targets, risk_level.
170
213
  Returns the same dicts with added 'taste_score' field, sorted desc.
214
+
215
+ PR8 additions:
216
+ goal_mode (str, default "improve"): which novelty band to use for
217
+ risk alignment. "improve" respects the user's conservative history;
218
+ "explore" uses the explore-mode band so past timid choices don't
219
+ punish surprise-me branch generation.
220
+ bypass_taste_in_generation (instance flag): when True, every move
221
+ scores a uniform 0.5. Used during branch generation so taste
222
+ doesn't prune novelty before the user has a chance to audition.
223
+ Ranking order is preserved from input when this flag is on.
171
224
  """
225
+ if self.bypass_taste_in_generation:
226
+ return [dict(move, taste_score=0.5) for move in move_specs]
227
+
228
+ # Read the band for the requested mode. Falls back to the improve
229
+ # band (via self.novelty_band property) when the mode is unknown.
230
+ novelty_band = self.novelty_bands.get(goal_mode, self.novelty_band)
231
+
172
232
  ranked = []
173
233
  for move in move_specs:
174
234
  taste_score = 0.5 # Neutral baseline
@@ -190,10 +250,10 @@ class TasteGraph:
190
250
  if dim in self.dimension_avoidances:
191
251
  taste_score -= 0.3
192
252
 
193
- # Novelty/risk alignment
253
+ # Novelty/risk alignment (PR8: per-mode band)
194
254
  risk = move.get("risk_level", "low")
195
255
  risk_val = {"low": 0.2, "medium": 0.5, "high": 0.8}.get(risk, 0.5)
196
- novelty_match = 1.0 - abs(risk_val - self.novelty_band)
256
+ novelty_match = 1.0 - abs(risk_val - novelty_band)
197
257
  taste_score += novelty_match * 0.1
198
258
 
199
259
  # Clamp
@@ -297,8 +357,21 @@ def build_taste_graph(
297
357
  if total > 0:
298
358
  fam.score = round((fam.kept_count - fam.undone_count) / total, 3)
299
359
 
300
- # Novelty band
301
- graph.novelty_band = persisted.get("novelty_band", 0.5)
360
+ # Novelty band — migrate from flat float if present, OR read
361
+ # per-mode dict if newer persistence format has it (PR8).
362
+ persisted_band = persisted.get("novelty_band", 0.5)
363
+ persisted_bands = persisted.get("novelty_bands")
364
+ if isinstance(persisted_bands, dict) and persisted_bands:
365
+ graph.novelty_bands = {
366
+ k: float(v) for k, v in persisted_bands.items() if isinstance(v, (int, float))
367
+ }
368
+ # Ensure both canonical keys are present with sensible defaults.
369
+ graph.novelty_bands.setdefault("improve", persisted_band)
370
+ graph.novelty_bands.setdefault("explore", persisted_band)
371
+ graph.novelty_band = graph.novelty_bands["improve"]
372
+ else:
373
+ graph.novelty_band = persisted_band
374
+ graph.novelty_bands = {"improve": persisted_band, "explore": persisted_band}
302
375
 
303
376
  # Device affinities
304
377
  for dev_name, dev_data in persisted.get("device_affinities", {}).items():
@@ -48,15 +48,29 @@ class PersistentTasteStore:
48
48
  return data
49
49
  self._store.update(_update)
50
50
 
51
- def update_novelty(self, chose_bold: bool) -> None:
52
- """Update novelty band from experiment choice."""
51
+ def update_novelty(self, chose_bold: bool, goal_mode: str = "improve") -> None:
52
+ """Update novelty band from experiment choice for a given goal mode.
53
+
54
+ PR8: goal_mode defaults to "improve" so legacy callers land on the
55
+ same band they updated before. The per-mode dict ``novelty_bands``
56
+ is maintained alongside the flat ``novelty_band`` field; the flat
57
+ field mirrors the "improve" band.
58
+ """
53
59
  def _update(data: dict) -> dict:
54
60
  data = data if data.get("version") == 1 else self._default()
55
- band = data.get("novelty_band", 0.5)
61
+ # Ensure the per-mode dict exists (migrating from legacy shape).
62
+ bands = data.get("novelty_bands")
63
+ if not isinstance(bands, dict) or not bands:
64
+ flat = data.get("novelty_band", 0.5)
65
+ bands = {"improve": flat, "explore": flat}
66
+ current = bands.get(goal_mode, 0.5)
56
67
  if chose_bold:
57
- data["novelty_band"] = min(1.0, band + 0.05)
68
+ bands[goal_mode] = min(1.0, current + 0.05)
58
69
  else:
59
- data["novelty_band"] = max(0.0, band - 0.05)
70
+ bands[goal_mode] = max(0.0, current - 0.05)
71
+ data["novelty_bands"] = bands
72
+ # Mirror the improve band onto the flat field for back-compat.
73
+ data["novelty_band"] = bands.get("improve", 0.5)
60
74
  data["evidence_count"] = data.get("evidence_count", 0) + 1
61
75
  return data
62
76
  self._store.update(_update)
@@ -114,6 +128,8 @@ class PersistentTasteStore:
114
128
  "version": 1,
115
129
  "move_outcomes": {},
116
130
  "novelty_band": 0.5,
131
+ # PR8 — per-goal-mode novelty bands; novelty_band mirrors "improve"
132
+ "novelty_bands": {"improve": 0.5, "explore": 0.5},
117
133
  "device_affinities": {},
118
134
  "anti_preferences": [],
119
135
  "dimension_weights": {},
@@ -47,6 +47,13 @@ MCP_TOOLS: frozenset[str] = frozenset({
47
47
  "generate_m4l_effect",
48
48
  "install_m4l_device",
49
49
  "list_genexpr_templates",
50
+ # MIDI Tool bridge (v1.12.0+) — these run entirely in the MCP server:
51
+ # config dispatch via OSC to m4l_bridge, cache state reads, filesystem
52
+ # copy. No TCP remote command, no bridge TCP round-trip.
53
+ "install_miditool_device",
54
+ "set_miditool_target",
55
+ "get_miditool_context",
56
+ "list_miditool_generators",
50
57
  })
51
58
 
52
59
 
@@ -95,6 +95,33 @@ async def _list_genexpr_templates(params: dict, ctx: Any = None) -> dict:
95
95
  return await _call(list_genexpr_templates, ctx, params)
96
96
 
97
97
 
98
+ # ── MIDI Tool bridge (v1.12.0+) ───────────────────────────────────────────
99
+ #
100
+ # These four run entirely in-process: install_miditool_device copies .amxd
101
+ # files, set_miditool_target writes to MidiToolCache + OSC-sends config,
102
+ # get_miditool_context reads the cache, list_miditool_generators reads the
103
+ # GENERATOR_METADATA dict. None of them need TCP or bridge round-trips.
104
+
105
+ async def _install_miditool_device(params: dict, ctx: Any = None) -> dict:
106
+ from ..tools.miditool import install_miditool_device
107
+ return await _call(install_miditool_device, ctx, params)
108
+
109
+
110
+ async def _set_miditool_target(params: dict, ctx: Any = None) -> dict:
111
+ from ..tools.miditool import set_miditool_target
112
+ return await _call(set_miditool_target, ctx, params)
113
+
114
+
115
+ async def _get_miditool_context(params: dict, ctx: Any = None) -> dict:
116
+ from ..tools.miditool import get_miditool_context
117
+ return await _call(get_miditool_context, ctx, params)
118
+
119
+
120
+ async def _list_miditool_generators(params: dict, ctx: Any = None) -> dict:
121
+ from ..tools.miditool import list_miditool_generators
122
+ return await _call(list_miditool_generators, ctx, params)
123
+
124
+
98
125
  def build_mcp_dispatch_registry() -> dict[str, Callable]:
99
126
  """Return the canonical registry of MCP-only tools for plan execution.
100
127
 
@@ -115,4 +142,9 @@ def build_mcp_dispatch_registry() -> dict[str, Callable]:
115
142
  "generate_m4l_effect": _generate_m4l_effect,
116
143
  "install_m4l_device": _install_m4l_device,
117
144
  "list_genexpr_templates": _list_genexpr_templates,
145
+ # v1.12.0 MIDI Tool bridge
146
+ "install_miditool_device": _install_miditool_device,
147
+ "set_miditool_target": _set_miditool_target,
148
+ "get_miditool_context": _get_miditool_context,
149
+ "list_miditool_generators": _list_miditool_generators,
118
150
  }
@@ -48,6 +48,21 @@ REMOTE_COMMANDS: frozenset[str] = frozenset({
48
48
  "insert_device", # 12.3+ native device insertion
49
49
  "insert_rack_chain", # 12.3+ rack chain insertion
50
50
  "set_drum_chain_note", # 12.3+ drum chain note assignment
51
+ # rack variations + macro CRUD (Live 11+)
52
+ "get_rack_variations", "store_rack_variation",
53
+ "recall_rack_variation", "delete_rack_variation",
54
+ "randomize_rack_macros", "add_rack_macro",
55
+ "remove_rack_macro", "set_rack_visible_macros",
56
+ # simpler slice CRUD (Live 11+)
57
+ "insert_simpler_slice", "move_simpler_slice",
58
+ "remove_simpler_slice", "clear_simpler_slices",
59
+ "reset_simpler_slices", "import_slices_from_onsets",
60
+ # wavetable modulation matrix (Live 11+)
61
+ "get_wavetable_mod_targets", "add_wavetable_mod_route",
62
+ "set_wavetable_mod_amount", "get_wavetable_mod_amount",
63
+ "get_wavetable_mod_matrix",
64
+ # device A/B compare (Live 12.3+)
65
+ "get_device_ab_state", "toggle_device_ab", "copy_device_state",
51
66
  # clip_automation (3)
52
67
  "get_clip_automation", "set_clip_automation", "clear_clip_automation",
53
68
  # browser (6)
@@ -65,8 +80,39 @@ REMOTE_COMMANDS: frozenset[str] = frozenset({
65
80
  "capture_midi", "start_recording", "stop_recording",
66
81
  "get_cue_points", "jump_to_cue", "toggle_cue_point",
67
82
  "back_to_arranger", "force_arrangement",
83
+ # scales — Song + per-clip scale awareness (Live 12.0+)
84
+ "get_song_scale", "set_song_scale", "set_song_scale_mode",
85
+ "list_available_scales",
86
+ "get_clip_scale", "set_clip_scale", "set_clip_scale_mode",
87
+ # tuning system (Live 12.1+)
88
+ "get_tuning_system", "set_tuning_reference_pitch",
89
+ "set_tuning_note", "reset_tuning_system",
90
+ # follow actions — clip (Live 12.0 revamp) + scene (Live 12.2+)
91
+ "get_clip_follow_action", "set_clip_follow_action",
92
+ "clear_clip_follow_action", "list_follow_action_types",
93
+ "apply_follow_action_preset",
94
+ "get_scene_follow_action", "set_scene_follow_action",
95
+ "clear_scene_follow_action",
96
+ # groove pool (Live 11+)
97
+ "list_grooves", "get_groove_info", "set_groove_params",
98
+ "assign_clip_groove", "get_clip_groove",
99
+ "get_song_groove_amount", "set_song_groove_amount",
100
+ # take lanes (Live 12.0 UI / 12.2 API)
101
+ "get_take_lanes", "create_take_lane", "set_take_lane_name",
102
+ "create_audio_clip_on_take_lane", "create_midi_clip_on_take_lane",
103
+ "get_take_lane_clips",
68
104
  # diagnostics (1)
69
105
  "get_session_diagnostics",
106
+ # control surfaces (diagnostic)
107
+ "list_control_surfaces", "get_control_surface_info",
108
+ # song primitives — transport/link
109
+ "tap_tempo", "nudge_tempo",
110
+ "set_exclusive_arm", "set_exclusive_solo",
111
+ "capture_and_insert_scene", "set_count_in_duration",
112
+ "get_link_state", "set_link_enabled", "force_link_beat_time",
113
+ # track primitives
114
+ "jump_in_session_clip", "get_track_performance_impact",
115
+ "get_appointed_device",
70
116
  # ping (built-in)
71
117
  "ping",
72
118
  })
@@ -92,6 +138,14 @@ BRIDGE_COMMANDS: frozenset[str] = frozenset({
92
138
  # async Python MCP tool in mcp_server/tools/analyzer.py, not a bridge
93
139
  # command. It has no case in livepilot_bridge.js and no @register handler
94
140
  # in remote_script. See mcp_server/runtime/execution_router.MCP_TOOLS.
141
+ # NOTE: MIDI Tool bridge commands (Live 12.0+ MIDI Generators /
142
+ # Transformations, requires LivePilot_MIDITool.amxd) do NOT belong in
143
+ # this set. They ride OSC prefixes /miditool/request, /miditool/ready
144
+ # (bridge→server) and miditool/config, miditool/response (server→bridge),
145
+ # dispatched through m4l_device/miditool_bridge.js (a separate JS, not
146
+ # livepilot_bridge.js) and pushed directly via M4LBridge.send_miditool_*
147
+ # helpers rather than through send_command. BRIDGE_COMMANDS is reserved
148
+ # for send_command targets that dispatch inside livepilot_bridge.js.
95
149
  })
96
150
 
97
151
  # Combined: all valid send_command targets
@@ -45,6 +45,37 @@ class SessionKernel:
45
45
  recommended_engines: list = field(default_factory=list)
46
46
  recommended_workflow: str = ""
47
47
 
48
+ # ── Creative controls (PR2 — branch-native migration) ──────────────
49
+ # All optional. Producers (Wonder, synthesis_brain, composer) read these
50
+ # to bias branch generation. Pre-PR2 callers leave them at defaults and
51
+ # nothing changes. PR6 (Wonder refactor) and PR9 (synthesis_brain) start
52
+ # reading them in earnest.
53
+
54
+ # 0.0 = conservative / don't surprise me; 1.0 = surprise me.
55
+ # Distinct from aggression (which is about execution boldness).
56
+ freshness: float = 0.5
57
+
58
+ # Shorthand producer philosophy tag. The sample_engine already uses
59
+ # "surgeon" / "alchemist" (see livepilot-sample-engine); synth work
60
+ # may add "sculptor". Empty string = producer picks a default.
61
+ creativity_profile: str = ""
62
+
63
+ # Caller-asserted sacred elements. Normally sacred elements come from
64
+ # song_brain; this lets the user or a skill override. Shape matches
65
+ # song_brain.sacred_elements entries: {element_type, description, salience}.
66
+ sacred_elements: list = field(default_factory=list)
67
+
68
+ # Hints for synthesis_brain: which tracks/devices to focus on and what
69
+ # target timbre to aim for. Shape is open in PR2 and will be firmed up
70
+ # when PR9 adds the first adapters.
71
+ # {
72
+ # "track_indices": [int, ...],
73
+ # "device_paths": ["track/Wavetable", ...],
74
+ # "target_timbre": {"brightness": +0.3, "width": +0.2, ...},
75
+ # "preferred_devices": ["Wavetable", "Operator", ...],
76
+ # }
77
+ synth_hints: dict = field(default_factory=dict)
78
+
48
79
  def to_dict(self) -> dict:
49
80
  return asdict(self)
50
81
 
@@ -60,12 +91,23 @@ def build_session_kernel(
60
91
  taste_graph: Optional[dict] = None,
61
92
  anti_preferences: Optional[list] = None,
62
93
  protected_dimensions: Optional[dict] = None,
94
+ # PR2 — creative controls. All optional; legacy callers unaffected.
95
+ freshness: float = 0.5,
96
+ creativity_profile: str = "",
97
+ sacred_elements: Optional[list] = None,
98
+ synth_hints: Optional[dict] = None,
63
99
  ) -> SessionKernel:
64
100
  """Build a SessionKernel from raw data.
65
101
 
66
102
  All optional fields degrade gracefully to empty defaults.
67
103
  The kernel_id is deterministic from the core inputs so it's stable
68
104
  within the same turn context.
105
+
106
+ The PR2 creative-control fields (freshness, creativity_profile,
107
+ sacred_elements, synth_hints) are intentionally excluded from the
108
+ kernel_id hash so existing callers see no identity changes. Producers
109
+ that need these fields to influence identity can compose their own
110
+ derived id downstream.
69
111
  """
70
112
  # Deterministic kernel_id from inputs
71
113
  id_seed = json.dumps(
@@ -93,4 +135,8 @@ def build_session_kernel(
93
135
  taste_graph=taste_graph or {},
94
136
  anti_preferences=anti_preferences or [],
95
137
  protected_dimensions=protected_dimensions or {},
138
+ freshness=freshness,
139
+ creativity_profile=creativity_profile,
140
+ sacred_elements=sacred_elements or [],
141
+ synth_hints=synth_hints or {},
96
142
  )
@@ -7,6 +7,8 @@ Tools:
7
7
 
8
8
  from __future__ import annotations
9
9
 
10
+ from typing import Optional
11
+
10
12
  from fastmcp import Context
11
13
 
12
14
  from ..server import mcp
@@ -79,6 +81,10 @@ def get_session_kernel(
79
81
  request_text: str = "",
80
82
  mode: str = "improve",
81
83
  aggression: float = 0.5,
84
+ freshness: float = 0.5,
85
+ creativity_profile: str = "",
86
+ sacred_elements: Optional[list] = None,
87
+ synth_hints: Optional[dict] = None,
82
88
  ) -> dict:
83
89
  """Build the unified turn snapshot for V2 orchestration.
84
90
 
@@ -86,11 +92,27 @@ def get_session_kernel(
86
92
  Assembles: session info, capability state, action ledger, taste profile,
87
93
  anti-preferences, and session memory into one canonical snapshot.
88
94
 
89
- mode: observe | improve | explore | finish | diagnose
90
- aggression: 0.0 (subtle) to 1.0 (bold)
95
+ Core params:
96
+ mode: observe | improve | explore | finish | diagnose
97
+ aggression: 0.0 (subtle) to 1.0 (bold) — execution boldness.
98
+
99
+ Creative controls (PR2 — branch-native migration, optional):
100
+ freshness: 0.0 (don't surprise me) to 1.0 (surprise me). Read by
101
+ producers (Wonder, synthesis_brain, composer) to bias branch
102
+ generation. Distinct from aggression, which is about applying
103
+ a single move boldly; freshness is about how far to roam.
104
+ creativity_profile: shorthand producer philosophy tag. Known values
105
+ include "surgeon" (targeted), "alchemist" (transformative),
106
+ "sculptor" (synthesis-focused). Empty ⇒ producer picks a default.
107
+ sacred_elements: caller-asserted list of sacred elements that
108
+ override or augment what song_brain infers. Shape matches
109
+ song_brain entries: {element_type, description, salience}.
110
+ synth_hints: focus hints for synthesis_brain; shape is open in PR2
111
+ and firms up in PR9. Typical keys: track_indices, device_paths,
112
+ target_timbre, preferred_devices.
91
113
 
92
114
  Returns: SessionKernel dict with kernel_id, session topology, capabilities,
93
- memory context, and routing hints.
115
+ memory context, routing hints, and (if provided) creative controls.
94
116
  """
95
117
  from .session_kernel import build_session_kernel
96
118
 
@@ -179,6 +201,10 @@ def get_session_kernel(
179
201
  session_memory=session_mem,
180
202
  taste_graph=taste_graph,
181
203
  anti_preferences=anti_prefs,
204
+ freshness=freshness,
205
+ creativity_profile=creativity_profile,
206
+ sacred_elements=sacred_elements,
207
+ synth_hints=synth_hints,
182
208
  )
183
209
 
184
210
  # Populate routing hints from conductor when request context is available
@@ -0,0 +1,169 @@
1
+ """Spectral classification of Simpler-slice drum break segments.
2
+
3
+ Lessons from the 2026-04-18 creative session that drove this module:
4
+
5
+ - Never assume slice 0 is a kick. Slice content depends on transient
6
+ detection order in the source audio, not drum-rack convention.
7
+ - Snares ALWAYS have 20-35% mid-frequency energy (drum body resonance).
8
+ Hi-hats / cymbals have <25% mid (they're thin metal discs with no
9
+ resonant body). This mid-content threshold is the critical separator
10
+ that tripped up the first classification pass.
11
+ - Ghosts are defined by low peak amplitude (<0.35) regardless of spectrum.
12
+
13
+ Thresholds validated against "Break Ghosts 90 bpm" (Ableton Core Library)
14
+ in the session. See ``feedback_analyze_slices_before_programming`` memory
15
+ entry for the full story and the specific slice map for that sample.
16
+
17
+ This module is pure Python (numpy only). No LivePilot / Ableton /
18
+ FastMCP dependencies — testable and runnable in isolation.
19
+ """
20
+ from __future__ import annotations
21
+
22
+ from typing import List, Literal, Sequence, TypedDict
23
+
24
+ import numpy as np
25
+
26
+
27
+ Label = Literal["KICK", "SNARE", "HAT", "ghost"]
28
+
29
+
30
+ class SliceFeatures(TypedDict):
31
+ """Per-slice classification result plus the features that drove the decision."""
32
+
33
+ index: int
34
+ label: Label
35
+ peak: float
36
+ rms: float
37
+ sub_pct: float
38
+ low_pct: float
39
+ mid_pct: float
40
+ high_pct: float
41
+
42
+
43
+ # ---------------------------------------------------------------------------
44
+ # Band boundaries (Hz). Tuned against the 2026-04-18 session's working set.
45
+ # ---------------------------------------------------------------------------
46
+
47
+ _SUB_MAX = 100.0 # sub-bass (kick fundamentals live here)
48
+ _LOW_MAX = 300.0 # body / low-mid (kick "thud" + bass fundamentals)
49
+ _MID_MAX = 3000.0 # presence / drum body / vocals
50
+ # Everything above _MID_MAX is "high" (sizzle / air / cymbal).
51
+
52
+
53
+ # ---------------------------------------------------------------------------
54
+ # Classification thresholds. DO NOT loosen these without re-validating on a
55
+ # real break — the 2026-04-18 session proved that relaxing the HAT mid-cap
56
+ # to 32% misclassifies loud snares as hats.
57
+ # ---------------------------------------------------------------------------
58
+
59
+ _GHOST_PEAK = 0.35 # Below this peak → ghost regardless of spectrum
60
+ _KICK_SUB_LOW_MIN = 0.45 # sub + low must be this dominant
61
+ _KICK_HIGH_MAX = 0.40 # kicks never have >40% high
62
+ _SNARE_MID_MIN = 0.25 # snares HAVE a drum body (mid content)
63
+ _SNARE_HIGH_MIN = 0.40 # + sizzle
64
+ _SNARE_PEAK_MIN = 0.60 # + loud enough to not be a ghost
65
+ _HAT_HIGH_MIN = 0.70 # hats are thin metal — overwhelmingly high
66
+ _HAT_MID_MAX = 0.25 # hats have almost no drum body
67
+
68
+
69
+ def _band_energy(segment: np.ndarray, sr: int) -> tuple[float, float, float, float]:
70
+ """Return (sub, low, mid, high) energy fractions that sum to ~1.0.
71
+
72
+ Uses an rFFT. If the segment is empty or silent, returns equal quarters
73
+ so downstream logic doesn't have to handle zero-sum edge cases (the
74
+ caller still sees silence via the peak/rms fields).
75
+ """
76
+ if len(segment) < 2:
77
+ return 0.25, 0.25, 0.25, 0.25
78
+ spec = np.abs(np.fft.rfft(segment))
79
+ total = float(spec.sum())
80
+ if total <= 0:
81
+ return 0.25, 0.25, 0.25, 0.25
82
+ freqs = np.fft.rfftfreq(len(segment), 1.0 / sr)
83
+ sub = float(spec[freqs < _SUB_MAX].sum() / total)
84
+ low = float(spec[(freqs >= _SUB_MAX) & (freqs < _LOW_MAX)].sum() / total)
85
+ mid = float(spec[(freqs >= _LOW_MAX) & (freqs < _MID_MAX)].sum() / total)
86
+ high = float(spec[freqs >= _MID_MAX].sum() / total)
87
+ return sub, low, mid, high
88
+
89
+
90
+ def classify_segment(segment: np.ndarray, sr: int) -> Label:
91
+ """Classify a single audio segment as KICK / SNARE / HAT / ghost.
92
+
93
+ Returns the label string. See module docstring for the reasoning behind
94
+ each threshold.
95
+ """
96
+ if len(segment) < 2:
97
+ return "ghost"
98
+ peak = float(np.max(np.abs(segment)))
99
+ if peak < _GHOST_PEAK:
100
+ return "ghost"
101
+
102
+ sub, low, mid, high = _band_energy(segment, sr)
103
+
104
+ # KICK: sub+low dominance with limited high content.
105
+ if (sub + low) >= _KICK_SUB_LOW_MIN and high < _KICK_HIGH_MAX:
106
+ return "KICK"
107
+
108
+ # HAT: overwhelmingly high-freq, almost no drum-body mid content.
109
+ if high >= _HAT_HIGH_MIN and mid <= _HAT_MID_MAX:
110
+ return "HAT"
111
+
112
+ # SNARE: broadband (mid body + high sizzle) AND loud.
113
+ if (
114
+ mid >= _SNARE_MID_MIN
115
+ and high >= _SNARE_HIGH_MIN
116
+ and peak >= _SNARE_PEAK_MIN
117
+ ):
118
+ return "SNARE"
119
+
120
+ # Fallback for ambiguous mid/high dominant loud hits — usually
121
+ # snares with unusual spectrum (e.g., rim-shots, piccolo snare).
122
+ if peak >= _SNARE_PEAK_MIN and (mid + high) >= 0.70:
123
+ return "SNARE"
124
+
125
+ # If nothing else matched but there's real energy, call it a hat
126
+ # rather than a ghost (ghost is reserved for quiet hits).
127
+ return "HAT"
128
+
129
+
130
+ def classify_slices(
131
+ audio: np.ndarray,
132
+ sr: int,
133
+ frame_boundaries: Sequence[int],
134
+ ) -> List[SliceFeatures]:
135
+ """Classify every slice defined by ``frame_boundaries``.
136
+
137
+ ``frame_boundaries`` is a list of N+1 frame positions defining N slices.
138
+ For a sample with slices starting at frames [0, 1000, 3000, 5000] and
139
+ total length 10000 frames, pass [0, 1000, 3000, 5000, 10000].
140
+
141
+ Stereo input is auto-downmixed (mean of the two channels).
142
+
143
+ Returns a list of ``SliceFeatures`` in slice-index order.
144
+ """
145
+ if audio.ndim > 1:
146
+ audio = audio.mean(axis=1)
147
+
148
+ results: List[SliceFeatures] = []
149
+ for i in range(len(frame_boundaries) - 1):
150
+ start = int(frame_boundaries[i])
151
+ end = int(frame_boundaries[i + 1])
152
+ segment = audio[start:end]
153
+ label = classify_segment(segment, sr)
154
+ peak = float(np.max(np.abs(segment))) if len(segment) else 0.0
155
+ rms = float(np.sqrt(np.mean(segment ** 2))) if len(segment) else 0.0
156
+ sub, low, mid, high = _band_energy(segment, sr)
157
+ results.append(
158
+ SliceFeatures(
159
+ index=i,
160
+ label=label,
161
+ peak=peak,
162
+ rms=rms,
163
+ sub_pct=sub,
164
+ low_pct=low,
165
+ mid_pct=mid,
166
+ high_pct=high,
167
+ )
168
+ )
169
+ return results