livepilot 1.17.0 → 1.17.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,12 +19,16 @@ from .models import TimbralFingerprint
19
19
 
20
20
  # ── Band-based brightness / warmth mapping ──────────────────────────────
21
21
  #
22
- # The M4L analyzer returns an 8-band spectrum by default. When a full
23
- # spectrum dict is passed, we look for these band keys in order. If the
24
- # raw {freq: magnitude} shape is passed instead, we fall back to a coarser
25
- # low/mid/high split.
22
+ # Two upstream producers feed this extractor with different band schemas:
23
+ # 1. get_master_spectrum (M4L analyzer) — v1.16+: 9 bands (sub_low,
24
+ # sub, low, low_mid, mid, high_mid, high, presence, air);
25
+ # pre-v1.16: 8 bands (no sub_low).
26
+ # 2. analyze_spectrum_offline — 8 bands with legacy names
27
+ # (sub, low, low_mid, mid, high_mid, high, very_high, ultra).
28
+ # We index the union of both name sets below; `_band_energy` uses dict.get
29
+ # so missing bands simply return 0 without complaint.
26
30
 
27
- _BANDS = ("sub", "low", "low_mid", "mid", "high_mid", "high", "very_high", "ultra")
31
+ _BANDS = ("sub_low", "sub", "low", "low_mid", "mid", "high_mid", "high", "presence", "air", "very_high", "ultra")
28
32
 
29
33
 
30
34
  def _band_energy(spectrum: Optional[dict], band: str) -> float:
@@ -55,9 +59,11 @@ def extract_timbre_fingerprint(
55
59
  Inputs are all optional — the function degrades gracefully when only
56
60
  some dimensions are measurable.
57
61
 
58
- spectrum: either {sub, low, low_mid, mid, high_mid, high, very_high, ultra}
59
- or {"bands": {...}} the 8-band shape returned by get_master_spectrum /
60
- analyze_spectrum_offline. Missing bands default to 0.
62
+ spectrum: either the 9-band shape from get_master_spectrum
63
+ ({sub_low, sub, low, low_mid, mid, high_mid, high, presence, air}),
64
+ the legacy 8-band shape from analyze_spectrum_offline
65
+ ({sub, low, low_mid, mid, high_mid, high, very_high, ultra}),
66
+ or {"bands": {...}} wrapping either. Missing bands default to 0.
61
67
  loudness: {"rms": float, "peak": float, "lufs": float, "lra": float} —
62
68
  output shape from analyze_loudness.
63
69
  spectral_shape: FluCoMa descriptors when available — {"centroid", "flatness",
@@ -35,6 +35,12 @@ from .taste import (
35
35
  compute_taste_fit,
36
36
  get_taste_profile,
37
37
  )
38
+ from .iteration import (
39
+ iterate_toward_goal_engine,
40
+ iterate_toward_goal_engine_async,
41
+ IterationResult,
42
+ IterationStep,
43
+ )
38
44
 
39
45
  __all__ = [
40
46
  "QUALITY_DIMENSIONS", "MEASURABLE_PROXIES",
@@ -49,4 +55,8 @@ __all__ = [
49
55
  "analyze_outcome_history",
50
56
  "compute_taste_fit",
51
57
  "get_taste_profile",
58
+ "iterate_toward_goal_engine",
59
+ "iterate_toward_goal_engine_async",
60
+ "IterationResult",
61
+ "IterationStep",
52
62
  ]
@@ -0,0 +1,344 @@
1
+ """Iteration engine — closes the evaluation loop by running experiments
2
+ repeatedly against a compiled GoalVector until threshold or timeout.
3
+
4
+ Pure-python: takes callables for experiment create/run/commit/discard so
5
+ tests can substitute in-memory fakes without an Ableton connection. The
6
+ callables may be sync or async — the engine uses `iterate_toward_goal_engine`
7
+ (sync) for the former and `iterate_toward_goal_engine_async` for the latter.
8
+ """
9
+ from __future__ import annotations
10
+
11
+ import inspect
12
+ from dataclasses import dataclass, field
13
+ from typing import Any, Awaitable, Callable, Optional, Union
14
+
15
+
16
+ @dataclass
17
+ class IterationStep:
18
+ """One iteration of the outer loop — one experiment's worth of work."""
19
+ iteration: int
20
+ experiment_id: str
21
+ winner_branch_id: Optional[str]
22
+ winner_score: float
23
+ threshold_met: bool
24
+ note: str = ""
25
+
26
+ def to_dict(self) -> dict:
27
+ return {
28
+ "iteration": self.iteration,
29
+ "experiment_id": self.experiment_id,
30
+ "winner_branch_id": self.winner_branch_id,
31
+ "winner_score": self.winner_score,
32
+ "threshold_met": self.threshold_met,
33
+ "note": self.note,
34
+ }
35
+
36
+
37
+ @dataclass
38
+ class IterationResult:
39
+ """Final result of iterate_toward_goal.
40
+
41
+ status:
42
+ - "committed" — a winner hit threshold, was committed permanently
43
+ - "exhausted" — max_iterations reached, committed best-so-far (on_timeout=commit_best)
44
+ - "timeout_no_commit" — max_iterations reached, no commit (on_timeout=discard_on_timeout)
45
+ - "no_candidates" — caller provided empty candidate_move_sets
46
+ - "error" — unrecoverable error; see reason
47
+ """
48
+ status: str
49
+ iterations_run: int
50
+ committed_experiment_id: Optional[str]
51
+ committed_branch_id: Optional[str]
52
+ final_score: float
53
+ steps: list[IterationStep] = field(default_factory=list)
54
+ reason: str = ""
55
+
56
+ def to_dict(self) -> dict:
57
+ return {
58
+ "status": self.status,
59
+ "iterations_run": self.iterations_run,
60
+ "committed_experiment_id": self.committed_experiment_id,
61
+ "committed_branch_id": self.committed_branch_id,
62
+ "final_score": self.final_score,
63
+ "steps": [s.to_dict() for s in self.steps],
64
+ "reason": self.reason,
65
+ }
66
+
67
+
68
+ def iterate_toward_goal_engine(
69
+ candidate_move_sets: list,
70
+ threshold: float,
71
+ max_iterations: int,
72
+ create_experiment_fn: Callable[[list], str],
73
+ run_experiment_fn: Callable[[str], Any],
74
+ commit_fn: Callable[[str, str], dict],
75
+ discard_fn: Callable[[str], dict],
76
+ on_timeout: str = "commit_best",
77
+ ) -> IterationResult:
78
+ """Run experiments repeatedly until winner_score >= threshold or timeout.
79
+
80
+ Pure orchestration — all I/O happens through the injected callbacks. The
81
+ run/commit/discard callbacks may be sync or async; coroutines will be
82
+ awaited when reached. This keeps the engine reusable by both the
83
+ sync test suite and the async MCP tool wrapper.
84
+
85
+ See module docstring for full contract. Invariant: never issues raw
86
+ undo calls — per-branch undo is the responsibility of run_experiment_fn.
87
+ This loop only chooses commit vs discard.
88
+ """
89
+ import asyncio
90
+
91
+ async def _as_async():
92
+ return await _iterate_async_core(
93
+ candidate_move_sets=candidate_move_sets,
94
+ threshold=threshold,
95
+ max_iterations=max_iterations,
96
+ create_experiment_fn=create_experiment_fn,
97
+ run_experiment_fn=run_experiment_fn,
98
+ commit_fn=commit_fn,
99
+ discard_fn=discard_fn,
100
+ on_timeout=on_timeout,
101
+ )
102
+
103
+ # If any callback is a coroutine function, run via asyncio. Otherwise
104
+ # execute the sync path directly to avoid event-loop overhead in tests.
105
+ any_async = any(
106
+ inspect.iscoroutinefunction(fn)
107
+ for fn in (create_experiment_fn, run_experiment_fn, commit_fn, discard_fn)
108
+ )
109
+ if any_async:
110
+ return asyncio.run(_as_async())
111
+
112
+ return _iterate_sync_core(
113
+ candidate_move_sets=candidate_move_sets,
114
+ threshold=threshold,
115
+ max_iterations=max_iterations,
116
+ create_experiment_fn=create_experiment_fn,
117
+ run_experiment_fn=run_experiment_fn,
118
+ commit_fn=commit_fn,
119
+ discard_fn=discard_fn,
120
+ on_timeout=on_timeout,
121
+ )
122
+
123
+
124
+ async def iterate_toward_goal_engine_async(
125
+ candidate_move_sets: list,
126
+ threshold: float,
127
+ max_iterations: int,
128
+ create_experiment_fn: Callable[[list], Any],
129
+ run_experiment_fn: Callable[[str], Any],
130
+ commit_fn: Callable[[str, str], Any],
131
+ discard_fn: Callable[[str], Any],
132
+ on_timeout: str = "commit_best",
133
+ ) -> IterationResult:
134
+ """Async variant — used by the MCP tool wrapper which has async callbacks."""
135
+ return await _iterate_async_core(
136
+ candidate_move_sets=candidate_move_sets,
137
+ threshold=threshold,
138
+ max_iterations=max_iterations,
139
+ create_experiment_fn=create_experiment_fn,
140
+ run_experiment_fn=run_experiment_fn,
141
+ commit_fn=commit_fn,
142
+ discard_fn=discard_fn,
143
+ on_timeout=on_timeout,
144
+ )
145
+
146
+
147
+ # ── Internal cores ─────────────────────────────────────────────────────────
148
+
149
+ def _iterate_sync_core(
150
+ candidate_move_sets,
151
+ threshold,
152
+ max_iterations,
153
+ create_experiment_fn,
154
+ run_experiment_fn,
155
+ commit_fn,
156
+ discard_fn,
157
+ on_timeout,
158
+ ) -> IterationResult:
159
+ if not candidate_move_sets:
160
+ return IterationResult(
161
+ status="no_candidates",
162
+ iterations_run=0,
163
+ committed_experiment_id=None,
164
+ committed_branch_id=None,
165
+ final_score=0.0,
166
+ reason="candidate_move_sets is empty",
167
+ )
168
+
169
+ steps: list[IterationStep] = []
170
+ best_score = -1.0
171
+ best_exp_id: Optional[str] = None
172
+ best_branch_id: Optional[str] = None
173
+ n = min(max_iterations, len(candidate_move_sets))
174
+
175
+ for i in range(n):
176
+ move_ids = candidate_move_sets[i]
177
+ exp_id = create_experiment_fn(move_ids)
178
+ winner_branch_id, winner_score = run_experiment_fn(exp_id)
179
+
180
+ met = winner_score >= threshold and winner_branch_id is not None
181
+ steps.append(IterationStep(
182
+ iteration=i,
183
+ experiment_id=exp_id,
184
+ winner_branch_id=winner_branch_id,
185
+ winner_score=winner_score,
186
+ threshold_met=met,
187
+ note=(
188
+ f"committed on iteration {i}" if met
189
+ else f"below threshold (need {threshold}, got {winner_score})"
190
+ ),
191
+ ))
192
+
193
+ if met:
194
+ # Discard any prior best-so-far before committing the new winner —
195
+ # otherwise the old non-winning experiment leaks in the store.
196
+ if best_exp_id is not None and best_exp_id != exp_id:
197
+ discard_fn(best_exp_id)
198
+ commit_fn(exp_id, winner_branch_id)
199
+ return IterationResult(
200
+ status="committed",
201
+ iterations_run=i + 1,
202
+ committed_experiment_id=exp_id,
203
+ committed_branch_id=winner_branch_id,
204
+ final_score=winner_score,
205
+ steps=steps,
206
+ reason=f"threshold {threshold} met on iteration {i}",
207
+ )
208
+
209
+ if winner_branch_id is not None and winner_score > best_score:
210
+ # Supersede previous best-so-far. It's now stale, free the slot.
211
+ if best_exp_id is not None:
212
+ discard_fn(best_exp_id)
213
+ best_score = winner_score
214
+ best_exp_id = exp_id
215
+ best_branch_id = winner_branch_id
216
+ else:
217
+ discard_fn(exp_id)
218
+
219
+ if on_timeout == "commit_best" and best_exp_id and best_branch_id:
220
+ commit_fn(best_exp_id, best_branch_id)
221
+ return IterationResult(
222
+ status="exhausted",
223
+ iterations_run=n,
224
+ committed_experiment_id=best_exp_id,
225
+ committed_branch_id=best_branch_id,
226
+ final_score=best_score,
227
+ steps=steps,
228
+ reason=(
229
+ f"max_iterations={n} reached, threshold {threshold} never met; "
230
+ f"committed best-so-far with score {best_score}"
231
+ ),
232
+ )
233
+
234
+ if best_exp_id:
235
+ discard_fn(best_exp_id)
236
+ return IterationResult(
237
+ status="timeout_no_commit",
238
+ iterations_run=n,
239
+ committed_experiment_id=None,
240
+ committed_branch_id=None,
241
+ final_score=max(best_score, 0.0),
242
+ steps=steps,
243
+ reason=f"max_iterations={n} reached, policy={on_timeout}, no commit issued",
244
+ )
245
+
246
+
247
+ async def _iterate_async_core(
248
+ candidate_move_sets,
249
+ threshold,
250
+ max_iterations,
251
+ create_experiment_fn,
252
+ run_experiment_fn,
253
+ commit_fn,
254
+ discard_fn,
255
+ on_timeout,
256
+ ) -> IterationResult:
257
+ if not candidate_move_sets:
258
+ return IterationResult(
259
+ status="no_candidates",
260
+ iterations_run=0,
261
+ committed_experiment_id=None,
262
+ committed_branch_id=None,
263
+ final_score=0.0,
264
+ reason="candidate_move_sets is empty",
265
+ )
266
+
267
+ async def _maybe_await(value):
268
+ if inspect.isawaitable(value):
269
+ return await value
270
+ return value
271
+
272
+ steps: list[IterationStep] = []
273
+ best_score = -1.0
274
+ best_exp_id: Optional[str] = None
275
+ best_branch_id: Optional[str] = None
276
+ n = min(max_iterations, len(candidate_move_sets))
277
+
278
+ for i in range(n):
279
+ move_ids = candidate_move_sets[i]
280
+ exp_id = await _maybe_await(create_experiment_fn(move_ids))
281
+ winner_branch_id, winner_score = await _maybe_await(run_experiment_fn(exp_id))
282
+
283
+ met = winner_score >= threshold and winner_branch_id is not None
284
+ steps.append(IterationStep(
285
+ iteration=i,
286
+ experiment_id=exp_id,
287
+ winner_branch_id=winner_branch_id,
288
+ winner_score=winner_score,
289
+ threshold_met=met,
290
+ note=(
291
+ f"committed on iteration {i}" if met
292
+ else f"below threshold (need {threshold}, got {winner_score})"
293
+ ),
294
+ ))
295
+
296
+ if met:
297
+ if best_exp_id is not None and best_exp_id != exp_id:
298
+ await _maybe_await(discard_fn(best_exp_id))
299
+ await _maybe_await(commit_fn(exp_id, winner_branch_id))
300
+ return IterationResult(
301
+ status="committed",
302
+ iterations_run=i + 1,
303
+ committed_experiment_id=exp_id,
304
+ committed_branch_id=winner_branch_id,
305
+ final_score=winner_score,
306
+ steps=steps,
307
+ reason=f"threshold {threshold} met on iteration {i}",
308
+ )
309
+
310
+ if winner_branch_id is not None and winner_score > best_score:
311
+ if best_exp_id is not None:
312
+ await _maybe_await(discard_fn(best_exp_id))
313
+ best_score = winner_score
314
+ best_exp_id = exp_id
315
+ best_branch_id = winner_branch_id
316
+ else:
317
+ await _maybe_await(discard_fn(exp_id))
318
+
319
+ if on_timeout == "commit_best" and best_exp_id and best_branch_id:
320
+ await _maybe_await(commit_fn(best_exp_id, best_branch_id))
321
+ return IterationResult(
322
+ status="exhausted",
323
+ iterations_run=n,
324
+ committed_experiment_id=best_exp_id,
325
+ committed_branch_id=best_branch_id,
326
+ final_score=best_score,
327
+ steps=steps,
328
+ reason=(
329
+ f"max_iterations={n} reached, threshold {threshold} never met; "
330
+ f"committed best-so-far with score {best_score}"
331
+ ),
332
+ )
333
+
334
+ if best_exp_id:
335
+ await _maybe_await(discard_fn(best_exp_id))
336
+ return IterationResult(
337
+ status="timeout_no_commit",
338
+ iterations_run=n,
339
+ committed_experiment_id=None,
340
+ committed_branch_id=None,
341
+ final_score=max(best_score, 0.0),
342
+ steps=steps,
343
+ reason=f"max_iterations={n} reached, policy={on_timeout}, no commit issued",
344
+ )
@@ -240,9 +240,9 @@ def evaluate_move(
240
240
  Takes before/after sonic snapshots and the active GoalVector.
241
241
  Returns a score and keep/undo recommendation.
242
242
 
243
- Snapshots should contain: spectrum (8-band dict), rms, peak.
244
- Get these from get_master_spectrum + get_master_rms before and after
245
- making changes.
243
+ Snapshots should contain: spectrum (9-band dict sub_low → air, or
244
+ 8-band from pre-v1.16 .amxd builds), rms, peak. Get these from
245
+ get_master_spectrum + get_master_rms before and after making changes.
246
246
 
247
247
  Hard rules enforce undo when:
248
248
  - No measurable improvement (delta <= 0)
@@ -471,3 +471,194 @@ def route_request(
471
471
 
472
472
  plan = conductor.classify_request(request)
473
473
  return plan.to_dict()
474
+
475
+
476
+ # ── iterate_toward_goal (closed evaluation loop) ──────────────────────
477
+
478
+
479
+ @mcp.tool()
480
+ async def iterate_toward_goal(
481
+ ctx: Context,
482
+ goal_vector: dict | str,
483
+ candidate_move_sets: list,
484
+ threshold: float = 0.70,
485
+ max_iterations: int = 3,
486
+ on_timeout: str = "commit_best",
487
+ render_verify: bool = False,
488
+ ) -> dict:
489
+ """Close the evaluation loop: run experiments until threshold or timeout.
490
+
491
+ Each iteration creates an experiment from one candidate_move_sets entry,
492
+ runs all branches (which auto-undo per-branch via the experiment engine),
493
+ and checks the top-ranked branch's score against the GoalVector. If score
494
+ >= threshold, commit that branch permanently and stop. Otherwise discard
495
+ the experiment and try the next candidate set. On timeout, commit the
496
+ best-so-far (on_timeout='commit_best') or commit nothing
497
+ (on_timeout='discard_on_timeout').
498
+
499
+ Args:
500
+ goal_vector: Compiled GoalVector dict (from compile_goal_vector) or
501
+ JSON string. Provides the scoring target passed through to the
502
+ evaluation scorer inside each run_experiment call.
503
+ candidate_move_sets: List of move_id lists — one per iteration.
504
+ Example: [["make_punchier", "widen_stereo"], ["tighten_low_end"]].
505
+ Iteration 0 tries the first list, iteration 1 the second, etc.
506
+ If shorter than max_iterations, iteration stops when exhausted.
507
+ threshold: Winner score required to commit early. 0.0–1.0. Default 0.70.
508
+ max_iterations: Hard cap on outer-loop iterations. Default 3.
509
+ on_timeout: "commit_best" (commit highest-scoring experiment at end)
510
+ or "discard_on_timeout" (no commit if threshold never met).
511
+ render_verify: When True each branch captures + analyzes audio
512
+ (~6s extra per branch). Default False.
513
+
514
+ Returns: IterationResult dict with status, iterations_run,
515
+ committed_experiment_id, committed_branch_id, final_score, steps,
516
+ reason.
517
+
518
+ Safety: Only commits when threshold_met OR (on_timeout='commit_best' AND
519
+ best-so-far exists). Never double-undoes — per-branch undo is handled
520
+ inside run_experiment; this tool only issues commit or discard.
521
+ """
522
+ import time as _time
523
+ from ..branches import seed_from_move_id
524
+ from ..experiment import engine as exp_engine
525
+ from ..experiment.tools import (
526
+ _capture_snapshot,
527
+ _capture_snapshot_with_render_verify,
528
+ )
529
+ from ..semantic_moves import registry, compiler
530
+ from ..evaluation.policy import classify_branch_outcome
531
+ from ._agent_os_engine import iterate_toward_goal_engine_async
532
+
533
+ gv_dict = _parse_json_param(goal_vector, "goal_vector")
534
+
535
+ if not isinstance(candidate_move_sets, list) or not all(
536
+ isinstance(s, list) and all(isinstance(m, str) for m in s)
537
+ for s in candidate_move_sets
538
+ ):
539
+ return {
540
+ "error": (
541
+ "candidate_move_sets must be a list of lists of move_id strings"
542
+ )
543
+ }
544
+
545
+ ableton = _get_ableton(ctx)
546
+ bridge = ctx.lifespan_context.get("m4l")
547
+ mcp_registry = ctx.lifespan_context.get("mcp_dispatch", {})
548
+
549
+ # Pre-validate the GoalVector once — the eval_fn closure reuses this.
550
+ goal = engine.validate_goal_vector(
551
+ request_text=gv_dict.get("request_text", "iterate_toward_goal"),
552
+ targets=gv_dict.get("targets", {}),
553
+ protect=gv_dict.get("protect", {}),
554
+ mode=gv_dict.get("mode", "improve"),
555
+ aggression=float(gv_dict.get("aggression", 0.5)),
556
+ research_mode=gv_dict.get("research_mode", "none"),
557
+ )
558
+
559
+ # ── Callbacks wire the pure-logic engine to real experiment I/O ──
560
+
561
+ async def _create(move_ids: list[str]) -> str:
562
+ seeds = [seed_from_move_id(mid) for mid in move_ids]
563
+ kernel_id = f"iter_kern_{int(_time.time())}"
564
+ exp = exp_engine.create_experiment_from_seeds(
565
+ request_text=gv_dict.get("request_text", "iterate_toward_goal"),
566
+ seeds=seeds,
567
+ kernel_id=kernel_id,
568
+ )
569
+ return exp.experiment_id
570
+
571
+ async def _run(experiment_id: str):
572
+ experiment = exp_engine.get_experiment(experiment_id)
573
+ if experiment is None:
574
+ return None, 0.0
575
+
576
+ if render_verify:
577
+ capture_fn = lambda: _capture_snapshot_with_render_verify(ctx, 2.0)
578
+ else:
579
+ capture_fn = lambda: _capture_snapshot(ctx)
580
+
581
+ for branch in experiment.branches:
582
+ if branch.status != "pending":
583
+ continue
584
+
585
+ # Compile plan from semantic move when branch doesn't carry one
586
+ if branch.compiled_plan is None and branch.move_id:
587
+ move = registry.get_move(branch.move_id)
588
+ if move is None:
589
+ branch.status = "failed"
590
+ continue
591
+ session_info = ableton.send_command("get_session_info")
592
+ kernel = {"session_info": session_info, "mode": "explore"}
593
+ plan = compiler.compile(move, kernel)
594
+ branch.compiled_plan = plan.to_dict()
595
+
596
+ if branch.compiled_plan is None:
597
+ branch.status = "failed"
598
+ continue
599
+
600
+ await exp_engine.run_branch_async(
601
+ branch=branch,
602
+ ableton=ableton,
603
+ compiled_plan=branch.compiled_plan,
604
+ capture_fn=capture_fn,
605
+ bridge=bridge,
606
+ mcp_registry=mcp_registry,
607
+ ctx=ctx,
608
+ )
609
+
610
+ def eval_fn(before, after):
611
+ score_result = engine.compute_evaluation_score(goal, before, after)
612
+ outcome = classify_branch_outcome(
613
+ score=score_result.get("score", 0.0),
614
+ protection_violated=not score_result.get("keep_change", True)
615
+ and "protected" in " ".join(score_result.get("notes", [])).lower(),
616
+ measurable_count=0,
617
+ target_count=0,
618
+ goal_progress=score_result.get("goal_progress", 0.0),
619
+ exploration_rules=False,
620
+ )
621
+ return {
622
+ "score": outcome.score,
623
+ "keep_change": outcome.keep_change,
624
+ "status": outcome.status,
625
+ "note": outcome.note,
626
+ "dimension_changes": score_result.get("dimension_changes", {}),
627
+ }
628
+
629
+ exp_engine.evaluate_branch(branch, eval_fn)
630
+ if branch.evaluation and branch.evaluation.get("status") == "keep":
631
+ branch.status = "evaluated"
632
+ elif branch.evaluation and branch.evaluation.get("status") == "undo":
633
+ branch.status = "rejected"
634
+
635
+ ranked = experiment.ranked_branches()
636
+ if not ranked:
637
+ return None, 0.0
638
+ top = ranked[0]
639
+ return top.branch_id, float(top.score or 0.0)
640
+
641
+ async def _commit(experiment_id: str, branch_id: str) -> dict:
642
+ return await exp_engine.commit_branch_async(
643
+ exp_engine.get_experiment(experiment_id),
644
+ branch_id,
645
+ ableton,
646
+ bridge=bridge,
647
+ mcp_registry=mcp_registry,
648
+ ctx=ctx,
649
+ )
650
+
651
+ async def _discard(experiment_id: str) -> dict:
652
+ return exp_engine.discard_experiment(experiment_id)
653
+
654
+ result = await iterate_toward_goal_engine_async(
655
+ candidate_move_sets=candidate_move_sets,
656
+ threshold=float(threshold),
657
+ max_iterations=int(max_iterations),
658
+ create_experiment_fn=_create,
659
+ run_experiment_fn=_run,
660
+ commit_fn=_commit,
661
+ discard_fn=_discard,
662
+ on_timeout=on_timeout,
663
+ )
664
+ return result.to_dict()
@@ -214,11 +214,24 @@ async def get_master_spectrum(
214
214
  samples: int = 0,
215
215
  sub_detail: bool = False,
216
216
  ) -> dict:
217
- """Get 8-band frequency analysis of the master bus.
218
-
219
- Returns band energies: sub (20-60Hz), low (60-200Hz), low_mid (200-500Hz),
220
- mid (500-2kHz), high_mid (2-4kHz), high (4-8kHz), presence (8-12kHz),
221
- air (12-20kHz). Values 0.0-1.0.
217
+ """Get 9-band frequency analysis of the master bus.
218
+
219
+ Returns band energies (fffb~ center frequencies shown in parens):
220
+ sub_low 20-60 Hz (~35 Hz center) kick fundamentals, Villalobos subs
221
+ sub 60-120 Hz (~85 Hz) 808s, sub-bass body
222
+ low 120-250 Hz (~175 Hz) — bass body, warmth
223
+ low_mid 250-500 Hz (~350 Hz) — mud zone, male vocal lows
224
+ mid 500-1 kHz (~700 Hz) — vocal presence, snare body
225
+ high_mid 1-2 kHz (~1.4 kHz) — consonants, pick attack
226
+ high 2-4 kHz (~2.8 kHz) — presence, vocal intelligibility
227
+ presence 4-8 kHz (~5.6 kHz) — cymbal definition, air of breath
228
+ air 8-20 kHz (~12 kHz) — shimmer, sparkle
229
+ Values 0.0-1.0.
230
+
231
+ Older .amxd builds (pre-v1.16) emit the legacy 8-band layout without the
232
+ explicit `sub_low` split — the server auto-detects band count from the OSC
233
+ payload and picks the right name set. Re-freeze the Max device to get the
234
+ 9-band resolution.
222
235
 
223
236
  Also returns detected key/scale if enough audio has been analyzed.
224
237
  Requires LivePilot Analyzer on master track.
@@ -242,7 +255,7 @@ async def get_master_spectrum(
242
255
  Pass `sub_detail=True` to attach a `sub_detail` dict with three
243
256
  finer buckets: `sub_deep` (20-45 Hz), `sub_mid` (45-60 Hz),
244
257
  `sub_high` (60-80 Hz). Derived from the FluCoMa mel spectrum
245
- (40 bands) rather than the 8-band cache, so it requires FluCoMa
258
+ (40 bands) rather than the 9-band cache, so it requires FluCoMa
246
259
  to be active. When FluCoMa is unavailable, sub_detail is omitted
247
260
  with a `sub_detail_warning` field explaining why.
248
261
  """
package/package.json CHANGED
@@ -1,8 +1,8 @@
1
1
  {
2
2
  "name": "livepilot",
3
- "version": "1.17.0",
3
+ "version": "1.17.2",
4
4
  "mcpName": "io.github.dreamrec/livepilot",
5
- "description": "Agentic production system for Ableton Live 12 — 426 tools, 52 domains. Device atlas (1305 devices), sample engine (Splice + browser + filesystem), auto-composition, spectral perception, technique memory, creative intelligence (12 engines)",
5
+ "description": "Agentic production system for Ableton Live 12 — 427 tools, 52 domains. Device atlas (1305 devices), sample engine (Splice + browser + filesystem), auto-composition, spectral perception, technique memory, creative intelligence (12 engines)",
6
6
  "author": "Pilot Studio",
7
7
  "license": "BSL-1.1",
8
8
  "type": "commonjs",
@@ -5,7 +5,7 @@ Entry point for the ControlSurface. Ableton calls create_instance(c_instance)
5
5
  when this script is selected in Preferences > Link, Tempo & MIDI.
6
6
  """
7
7
 
8
- __version__ = "1.17.0"
8
+ __version__ = "1.17.2"
9
9
 
10
10
  from _Framework.ControlSurface import ControlSurface
11
11
  from . import router