livepilot 1.23.3 → 1.23.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/CHANGELOG.md +93 -0
  2. package/README.md +106 -8
  3. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  4. package/m4l_device/livepilot_bridge.js +1 -1
  5. package/mcp_server/__init__.py +1 -1
  6. package/mcp_server/atlas/cross_pack_chain.py +658 -0
  7. package/mcp_server/atlas/demo_story.py +700 -0
  8. package/mcp_server/atlas/extract_chain.py +786 -0
  9. package/mcp_server/atlas/macro_fingerprint.py +554 -0
  10. package/mcp_server/atlas/overlays.py +95 -3
  11. package/mcp_server/atlas/pack_aware_compose.py +1255 -0
  12. package/mcp_server/atlas/preset_resolver.py +238 -0
  13. package/mcp_server/atlas/tools.py +1001 -31
  14. package/mcp_server/atlas/transplant.py +1177 -0
  15. package/mcp_server/mix_engine/state_builder.py +44 -1
  16. package/mcp_server/runtime/capability_state.py +34 -3
  17. package/mcp_server/server.py +45 -24
  18. package/mcp_server/tools/agent_os.py +33 -9
  19. package/mcp_server/tools/analyzer.py +38 -7
  20. package/mcp_server/tools/browser.py +20 -1
  21. package/mcp_server/tools/devices.py +78 -11
  22. package/mcp_server/tools/perception.py +5 -1
  23. package/mcp_server/tools/tracks.py +39 -2
  24. package/mcp_server/user_corpus/__init__.py +48 -0
  25. package/mcp_server/user_corpus/manifest.py +142 -0
  26. package/mcp_server/user_corpus/plugin_engine/__init__.py +39 -0
  27. package/mcp_server/user_corpus/plugin_engine/detector.py +579 -0
  28. package/mcp_server/user_corpus/plugin_engine/manual.py +347 -0
  29. package/mcp_server/user_corpus/plugin_engine/research.py +247 -0
  30. package/mcp_server/user_corpus/runner.py +261 -0
  31. package/mcp_server/user_corpus/scanner.py +115 -0
  32. package/mcp_server/user_corpus/scanners/__init__.py +18 -0
  33. package/mcp_server/user_corpus/scanners/adg.py +79 -0
  34. package/mcp_server/user_corpus/scanners/als.py +144 -0
  35. package/mcp_server/user_corpus/scanners/amxd.py +374 -0
  36. package/mcp_server/user_corpus/scanners/plugin_preset.py +202 -0
  37. package/mcp_server/user_corpus/tools.py +904 -0
  38. package/mcp_server/user_corpus/wizard.py +224 -0
  39. package/package.json +2 -2
  40. package/remote_script/LivePilot/__init__.py +1 -1
  41. package/remote_script/LivePilot/browser.py +7 -2
  42. package/requirements.txt +3 -3
  43. package/server.json +2 -2
@@ -0,0 +1,1177 @@
1
+ """Pack-Atlas Phase C — Transplant Engine.
2
+
3
+ Adapt a structure (demo track set, preset chain, workflow recipe) from one
4
+ musical context to another. Returns a structured translation plan + prose
5
+ reasoning artifact. All data comes from the local JSON sidecar layer —
6
+ no Live connection required.
7
+
8
+ Real sidecar schema (discovered 2026-04-27):
9
+ Demo sidecars (~/.livepilot/atlas-overlays/packs/_demo_parses/<slug>.json):
10
+ {file, name, bpm, scale{root_note:str, name:str},
11
+ tracks[{name, type, id, device_count, devices[{class, user_name, params,
12
+ macros[{index, value}]}],
13
+ routing}],
14
+ scenes[...]}
15
+ NOTE: demo-track macro entries have {index, value} ONLY — no "name" field.
16
+ Macro names are in _preset_parses sidecars, not demo sidecars.
17
+
18
+ Preset sidecars (~/.livepilot/atlas-overlays/packs/_preset_parses/<pack>/<slug>.json):
19
+ {file, name, preset_type, rack_class,
20
+ macros[{index, value:str, name:str}], chains, device_summary, branch_counts}
21
+ NOTE: "name" in macros is the producer-assigned macro label.
22
+
23
+ File naming: demo sidecar files use hyphens in pack and demo slugs,
24
+ e.g. drone-lab__earth.json. The spec uses underscores in entity_id strings
25
+ (drone_lab__earth) — this module translates automatically.
26
+ """
27
+
28
+ from __future__ import annotations
29
+
30
+ import json
31
+ import re
32
+ from functools import lru_cache
33
+ from pathlib import Path
34
+ from typing import Any
35
+
36
+ # ─── Paths ────────────────────────────────────────────────────────────────────
37
+
38
+ DEMO_PARSES_ROOT = (
39
+ Path.home() / ".livepilot" / "atlas-overlays" / "packs" / "_demo_parses"
40
+ )
41
+ PRESET_PARSES_ROOT = (
42
+ Path.home() / ".livepilot" / "atlas-overlays" / "packs" / "_preset_parses"
43
+ )
44
+
45
+ # ─── Mode-degree tables ───────────────────────────────────────────────────────
46
+ # Semitone intervals from root for each mode.
47
+
48
+ _MODE_DEGREES: dict[str, list[int]] = {
49
+ "major": [0, 2, 4, 5, 7, 9, 11],
50
+ "minor": [0, 2, 3, 5, 7, 8, 10],
51
+ "phrygian": [0, 1, 3, 5, 7, 8, 10],
52
+ "dorian": [0, 2, 3, 5, 7, 9, 10],
53
+ "mixolydian": [0, 2, 4, 5, 7, 9, 10],
54
+ "lydian": [0, 2, 4, 6, 7, 9, 11],
55
+ "locrian": [0, 1, 3, 5, 6, 8, 10],
56
+ # Aliases
57
+ "ionian": [0, 2, 4, 5, 7, 9, 11],
58
+ "aeolian": [0, 2, 3, 5, 7, 8, 10],
59
+ }
60
+
61
+ # ─── Aesthetic-replace rules ──────────────────────────────────────────────────
62
+ # Tuple: (source_device_keywords, target_aesthetic_keywords, replace_action)
63
+ # Evaluated in order; first match wins.
64
+
65
+ _REPLACE_RULES: list[tuple[list[str], list[str], dict]] = [
66
+ # Vinyl Distortion + clean/cinematic target → Saturator (gentle warmth)
67
+ (
68
+ ["vinyl distortion", "vinyl"],
69
+ ["clean", "cinematic", "mood-reel", "sublime", "romantic", "orchestral"],
70
+ {
71
+ "action": "replace",
72
+ "remove_device": "Vinyl Distortion",
73
+ "add_device": "Saturator",
74
+ "parameters": [{"name": "Drive", "value": 0.4}, {"name": "Dry/Wet", "value": 0.6}],
75
+ "rationale_fragment": "Vinyl Distortion conflicts with clean/cinematic aesthetic — replaced with Saturator (drive 0.4) for subtle harmonic warmth.",
76
+ },
77
+ ),
78
+ # Erosion + clean/cinematic target → remove
79
+ (
80
+ ["erosion"],
81
+ ["clean", "cinematic", "mood-reel", "sublime", "romantic"],
82
+ {
83
+ "action": "remove",
84
+ "remove_device": "Erosion",
85
+ "add_device": None,
86
+ "parameters": [],
87
+ "rationale_fragment": "Erosion (digital degradation) removed — incompatible with clean/cinematic aesthetic.",
88
+ },
89
+ ),
90
+ # Redux + clean target → remove
91
+ (
92
+ ["redux"],
93
+ ["clean", "cinematic", "mood-reel", "sublime", "romantic"],
94
+ {
95
+ "action": "remove",
96
+ "remove_device": "Redux",
97
+ "add_device": None,
98
+ "parameters": [],
99
+ "rationale_fragment": "Redux (bit-crusher) removed — incompatible with clean/cinematic aesthetic.",
100
+ },
101
+ ),
102
+ # Saturator high-drive + lo-fi/grit target → keep + add Erosion
103
+ (
104
+ ["saturator"],
105
+ ["lo-fi", "grit", "tape", "dusty", "vintage"],
106
+ {
107
+ "action": "enhance",
108
+ "remove_device": None,
109
+ "add_device": "Erosion",
110
+ "parameters": [{"name": "Amount", "value": 0.2}, {"name": "Dry/Wet", "value": 0.35}],
111
+ "rationale_fragment": "Saturator retained; Erosion added for tape-noise texture consistent with lo-fi/vintage aesthetic.",
112
+ },
113
+ ),
114
+ # Convolution Reverb → short Reverb when mood is intimate/dry
115
+ (
116
+ ["convolution reverb"],
117
+ ["dry", "intimate", "mono", "direct"],
118
+ {
119
+ "action": "replace",
120
+ "remove_device": "Convolution Reverb",
121
+ "add_device": "Reverb",
122
+ "parameters": [{"name": "Decay Time", "value": 0.8}, {"name": "Dry/Wet", "value": 0.25}],
123
+ "rationale_fragment": "Convolution Reverb shortened — intimate/dry aesthetic calls for tight reflections.",
124
+ },
125
+ ),
126
+ ]
127
+
128
+ # ─── Producer vocabulary anchors ─────────────────────────────────────────────
129
+ # Maps pack/aesthetic keywords to known producer vocabulary from
130
+ # livepilot-core/references/artist-vocabularies.md.
131
+
132
+ _PRODUCER_ANCHORS: dict[str, str] = {
133
+ "drone-lab": "Drone Lab invites the Villalobos / Henke texture-first philosophy: "
134
+ "harmonic drones as rhythmic structures, macro-controlled spectral erosion "
135
+ "and recovery, patient evolution over silence.",
136
+ "drone_lab": "Drone Lab invites the Villalobos / Henke texture-first philosophy: "
137
+ "harmonic drones as rhythmic structures, macro-controlled spectral erosion "
138
+ "and recovery, patient evolution over silence.",
139
+ "mood-reel": "Mood Reel occupies the Arca / Mica Levi register: cinematic suspension, "
140
+ "lush stereo field, emotional restraint over grit.",
141
+ "mood_reel": "Mood Reel occupies the Arca / Mica Levi register: cinematic suspension, "
142
+ "lush stereo field, emotional restraint over grit.",
143
+ "inspired-by-nature": "Inspired by Nature channels Dillon Bastan's "
144
+ "generative-ecological aesthetic: physical-model resonance, "
145
+ "slow self-organizing change, biological unpredictability.",
146
+ "inspired_by_nature": "Inspired by Nature channels Dillon Bastan's "
147
+ "generative-ecological aesthetic: physical-model resonance, "
148
+ "slow self-organizing change, biological unpredictability.",
149
+ "tree_tone": "Tree Tone (Inspired by Nature) — generative physical-model oscillator; "
150
+ "reaches for Dillon Bastan's ecosystem aesthetic: filter branching "
151
+ "that reads as breath, tuning drift that reads as micro-weather.",
152
+ "tree tone": "Tree Tone (Inspired by Nature) — generative physical-model oscillator; "
153
+ "reaches for Dillon Bastan's ecosystem aesthetic.",
154
+ "henke": "Robert Henke / Monolake: minimal dub-techno pulse with deep spectral field, "
155
+ "granular erosion as composition, restraint as maximalism.",
156
+ "monolake": "Monolake: minimal dub-techno pulse with deep spectral field.",
157
+ "boc": "Boards of Canada: chromatic degradation that reads as memory — "
158
+ "cassette saturation, slowed-pitch warble, modal simplicity.",
159
+ "boards of canada": "Boards of Canada: chromatic degradation that reads as memory.",
160
+ "burial": "Burial: urban desolation through UK garage rhythm displaced by one-tick, "
161
+ "shredded vocal as texture, sidechain 'wobble' as structural element.",
162
+ "arca": "Arca: body-horror via pitch extremity, metallic percussion, "
163
+ "surgical stereo placement.",
164
+ "mica levi": "Mica Levi: sustained orchestral dread — slow-moving string harmonics, "
165
+ "unconventional intonation, silence as pressure.",
166
+ }
167
+
168
+ # BPM ratio sanity-clamp threshold
169
+ _BPM_RATIO_CLAMP = 2.0 # ratios outside [0.5, 2.0] trigger warning + conservative clamp
170
+ _BPM_RATIO_MIN = 0.5
171
+
172
+
173
+ # ─── Slug normalisation helpers ───────────────────────────────────────────────
174
+
175
+ def _entity_id_to_slug(entity_id: str) -> str:
176
+ """Normalise an entity_id like 'drone_lab__earth' to 'drone-lab__earth'.
177
+
178
+ The spec uses underscores; sidecar files use hyphens. The convention is:
179
+ - pack portion: underscores → hyphens
180
+ - demo portion after '__': underscores → hyphens
181
+
182
+ We replace underscores with hyphens unless the separator '__' is involved.
183
+ Strategy: split on '__', hyphenate each part, rejoin with '__'.
184
+ """
185
+ parts = entity_id.split("__")
186
+ return "__".join(p.replace("_", "-") for p in parts)
187
+
188
+
189
+ def _resolve_demo_slug(entity_id: str) -> Path | None:
190
+ """Return the Path for a demo sidecar, handling underscore/hyphen variants.
191
+
192
+ Tries in order:
193
+ 1. Direct entity_id as slug (e.g. 'drone-lab__earth')
194
+ 2. Hyphenated form of underscored entity_id
195
+ 3. Underscored form of hyphenated entity_id
196
+ """
197
+ if not DEMO_PARSES_ROOT.exists():
198
+ return None
199
+ candidates = [
200
+ entity_id,
201
+ _entity_id_to_slug(entity_id),
202
+ entity_id.replace("-", "_"),
203
+ ]
204
+ for slug in candidates:
205
+ p = DEMO_PARSES_ROOT / f"{slug}.json"
206
+ if p.exists():
207
+ return p
208
+ return None
209
+
210
+
211
+ def _resolve_preset_slug(pack_slug: str, preset_path: str) -> tuple[str, str] | None:
212
+ """Return (pack_slug, preset_file_stem) for a preset sidecar.
213
+
214
+ Handles underscore/hyphen translation in both pack_slug and preset_path.
215
+ """
216
+ if not PRESET_PARSES_ROOT.exists():
217
+ return None
218
+ # Try pack_slug variants
219
+ pack_candidates = [
220
+ pack_slug,
221
+ pack_slug.replace("_", "-"),
222
+ pack_slug.replace("-", "_"),
223
+ ]
224
+ # Preset path may use / or _ separators
225
+ preset_candidates = [
226
+ preset_path,
227
+ preset_path.replace("/", "_"),
228
+ preset_path.replace("-", "_"),
229
+ preset_path.replace("_", "-"),
230
+ ]
231
+ for p_pack in pack_candidates:
232
+ pack_dir = PRESET_PARSES_ROOT / p_pack
233
+ if not pack_dir.exists():
234
+ continue
235
+ for p_preset in preset_candidates:
236
+ candidate = pack_dir / f"{p_preset}.json"
237
+ if candidate.exists():
238
+ return (p_pack, p_preset)
239
+ return None
240
+
241
+
242
+ # ─── Sidecar loaders ─────────────────────────────────────────────────────────
243
+
244
+ @lru_cache(maxsize=None)
245
+ def _load_demo_sidecar(entity_id: str) -> dict | None:
246
+ """Load a demo sidecar by entity_id (handles slug translation).
247
+
248
+ Cached per entity_id string. Returns None if not found.
249
+ """
250
+ p = _resolve_demo_slug(entity_id)
251
+ if p is None:
252
+ return None
253
+ with p.open() as fh:
254
+ return json.load(fh)
255
+
256
+
257
+ @lru_cache(maxsize=None)
258
+ def _load_preset_sidecar_cached(pack_slug: str, preset_path_slug: str) -> dict | None:
259
+ """Load a preset sidecar. Wraps macro_fingerprint._load_preset_sidecar."""
260
+ from .macro_fingerprint import _load_preset_sidecar
261
+ return _load_preset_sidecar(pack_slug, preset_path_slug)
262
+
263
+
264
+ # ─── Source structure extraction ─────────────────────────────────────────────
265
+
266
+ def _walk_device_chain(devices: list[dict], depth: int = 0) -> list[str]:
267
+ """Recursively walk a device chain, collecting class + user_name for each
268
+ device including those nested inside rack chains.
269
+
270
+ BUG-C#2 fix: previously only top-level rack class names ("InstrumentGroupDevice",
271
+ "AudioEffectGroupDevice") landed in the inventory, so REPLACE rules that
272
+ target inner-chain devices (Vinyl Distortion, Erosion, Redux — the canonical
273
+ aesthetic-incompatibility cases) had nothing to match against.
274
+
275
+ Now uses the v1.23.5 sidecar `chains` field (Schema A: nested) to surface
276
+ every inner device class up to a sane recursion depth.
277
+ """
278
+ inventory: list[str] = []
279
+ if depth > 8: # defensive cap; real corpus never exceeds 4
280
+ return inventory
281
+ for dev in devices or []:
282
+ class_name = dev.get("class", "") or ""
283
+ user_name = dev.get("user_name") or ""
284
+ if class_name:
285
+ inventory.append(class_name)
286
+ if user_name:
287
+ inventory.append(user_name)
288
+ for chain in dev.get("chains") or []:
289
+ inventory.extend(
290
+ _walk_device_chain(chain.get("devices") or [], depth + 1)
291
+ )
292
+ return inventory
293
+
294
+
295
+ def _extract_source_structure(sidecar: dict) -> dict:
296
+ """Extract musical structure from a demo sidecar.
297
+
298
+ Returns:
299
+ {
300
+ bpm: float,
301
+ scale: {root_note: int, name: str},
302
+ tracks_summary: list[str], # track names
303
+ device_inventory: list[str], # all device class names + user_names,
304
+ # recursive across rack chains
305
+ track_count: int,
306
+ scene_count: int,
307
+ return_tracks: list[str],
308
+ }
309
+ """
310
+ bpm = float(sidecar.get("bpm") or 120.0)
311
+
312
+ scale_raw = sidecar.get("scale") or {}
313
+ try:
314
+ root_note = int(str(scale_raw.get("root_note", "0")))
315
+ except (ValueError, TypeError):
316
+ root_note = 0
317
+ scale_name = scale_raw.get("name", "Major") or "Major"
318
+
319
+ tracks = sidecar.get("tracks") or []
320
+ tracks_summary = []
321
+ device_inventory: list[str] = []
322
+ return_tracks = []
323
+
324
+ for track in tracks:
325
+ t_name = track.get("name", "")
326
+ t_type = track.get("type", "")
327
+ if t_type in ("ReturnTrack",):
328
+ return_tracks.append(t_name)
329
+ tracks_summary.append(t_name)
330
+ device_inventory.extend(_walk_device_chain(track.get("devices") or []))
331
+
332
+ scenes = sidecar.get("scenes") or []
333
+
334
+ return {
335
+ "bpm": bpm,
336
+ "scale": {"root_note": root_note, "name": scale_name},
337
+ "tracks_summary": tracks_summary,
338
+ "device_inventory": device_inventory,
339
+ "track_count": len(tracks),
340
+ "scene_count": len(scenes),
341
+ "return_tracks": return_tracks,
342
+ }
343
+
344
+
345
+ # ─── Scale transposition helpers ─────────────────────────────────────────────
346
+
347
+ def _remap_pitch_class(
348
+ pitch: int,
349
+ src_root: int,
350
+ src_mode: str,
351
+ tgt_root: int,
352
+ tgt_mode: str,
353
+ ) -> int:
354
+ """Remap a MIDI pitch from source scale to target scale.
355
+
356
+ Algorithm:
357
+ 1. Compute pitch-class relative to src_root
358
+ 2. Find nearest degree in src_mode scale
359
+ 3. Map that degree index to corresponding degree in tgt_mode
360
+ 4. Shift by (tgt_root - src_root)
361
+ Returns the remapped MIDI pitch.
362
+ """
363
+ src_degrees = _MODE_DEGREES.get(src_mode.lower(), _MODE_DEGREES["major"])
364
+ tgt_degrees = _MODE_DEGREES.get(tgt_mode.lower(), _MODE_DEGREES["minor"])
365
+
366
+ # Pitch class relative to source root (0-11)
367
+ rel_pc = (pitch - src_root) % 12
368
+ octave_offset = (pitch - src_root) // 12
369
+
370
+ # Find nearest degree in source mode
371
+ best_degree_idx = 0
372
+ best_dist = 12
373
+ for i, deg in enumerate(src_degrees):
374
+ dist = min(abs(rel_pc - deg), 12 - abs(rel_pc - deg))
375
+ if dist < best_dist:
376
+ best_dist = dist
377
+ best_degree_idx = i
378
+
379
+ # Map to target mode degree (wrap if target mode has fewer degrees)
380
+ tgt_degree_idx = min(best_degree_idx, len(tgt_degrees) - 1)
381
+ tgt_pc = tgt_degrees[tgt_degree_idx]
382
+
383
+ # Reconstruct absolute MIDI pitch
384
+ new_pitch = tgt_root + octave_offset * 12 + tgt_pc
385
+ return new_pitch
386
+
387
+
388
+ # ─── Aesthetic-replace evaluation ────────────────────────────────────────────
389
+
390
+ def _evaluate_replace_rules(
391
+ device_inventory: list[str],
392
+ target_aesthetic: str,
393
+ ) -> list[dict]:
394
+ """Return list of replace decisions for aesthetic-incompatible devices.
395
+
396
+ Scans the _REPLACE_RULES table against the device_inventory and
397
+ target_aesthetic string.
398
+ """
399
+ aesthetic_lower = target_aesthetic.lower()
400
+ inventory_lower = [d.lower() for d in device_inventory]
401
+ decisions = []
402
+ triggered_removals: set[str] = set()
403
+
404
+ for src_keywords, tgt_keywords, action_dict in _REPLACE_RULES:
405
+ # Check if any source device keyword matches inventory
406
+ device_match = any(
407
+ kw in inv_item
408
+ for kw in src_keywords
409
+ for inv_item in inventory_lower
410
+ )
411
+ if not device_match:
412
+ continue
413
+ # Check if any target aesthetic keyword matches
414
+ aesthetic_match = any(kw in aesthetic_lower for kw in tgt_keywords)
415
+ if not aesthetic_match:
416
+ continue
417
+ # Deduplicate — don't emit two rules removing the same device
418
+ remove_key = action_dict.get("remove_device") or ""
419
+ if remove_key and remove_key.lower() in triggered_removals:
420
+ continue
421
+ if remove_key:
422
+ triggered_removals.add(remove_key.lower())
423
+ decisions.append(dict(action_dict))
424
+
425
+ return decisions
426
+
427
+
428
+ # ─── Translation decisions ────────────────────────────────────────────────────
429
+
430
+ def _compute_translation_decisions(
431
+ source_struct: dict,
432
+ target_bpm: float | None,
433
+ target_scale_root: int | None,
434
+ target_scale_name: str,
435
+ target_aesthetic: str,
436
+ preserve_pitch_intervals: bool,
437
+ preserve_macro_ratios: bool,
438
+ source_sidecar: dict | None = None,
439
+ ) -> tuple[list[dict], list[str]]:
440
+ """Generate per-element translation decisions.
441
+
442
+ Returns (decisions_list, warnings_list).
443
+
444
+ Decision types:
445
+ PRESERVE — keep as-is (pitch intervals, macro ratios)
446
+ SCALE — scale rhythmic density by BPM ratio
447
+ REMAP — scale-locked notes via pitch-class-set transform
448
+ REPLACE — swap aesthetic-incompatible device
449
+ """
450
+ decisions: list[dict] = []
451
+ warnings: list[str] = []
452
+
453
+ src_bpm = source_struct["bpm"]
454
+ src_scale = source_struct["scale"]
455
+ src_root = src_scale["root_note"] # int
456
+ src_mode = src_scale["name"]
457
+
458
+ tgt_root = target_scale_root if target_scale_root is not None else src_root
459
+ tgt_mode = target_scale_name if target_scale_name else src_mode
460
+ tgt_bpm = target_bpm if target_bpm is not None else src_bpm
461
+
462
+ # ── BPM ratio + SCALE decision ─────────────────────────────────────────
463
+ bpm_ratio = tgt_bpm / src_bpm if src_bpm > 0 else 1.0
464
+ clamp_applied = False
465
+
466
+ if bpm_ratio < _BPM_RATIO_MIN or bpm_ratio > _BPM_RATIO_CLAMP:
467
+ warnings.append(
468
+ f"BPM ratio {bpm_ratio:.2f} is outside the safe range "
469
+ f"[{_BPM_RATIO_MIN}, {_BPM_RATIO_CLAMP}] "
470
+ f"(source {src_bpm:.0f} BPM → target {tgt_bpm:.0f} BPM). "
471
+ "Rhythmic-density scaling clamped to conservative 1:1 — "
472
+ "manual note-density adjustment recommended."
473
+ )
474
+ effective_bpm_ratio = 1.0 # conservative clamp
475
+ clamp_applied = True
476
+ else:
477
+ effective_bpm_ratio = bpm_ratio
478
+
479
+ if abs(bpm_ratio - 1.0) > 0.01:
480
+ decisions.append({
481
+ "element": "Global tempo mapping",
482
+ "decision": "SCALE",
483
+ "detail": {
484
+ "source_bpm": src_bpm,
485
+ "target_bpm": tgt_bpm,
486
+ "bpm_ratio": round(bpm_ratio, 4),
487
+ "effective_bpm_ratio": round(effective_bpm_ratio, 4),
488
+ "clamp_applied": clamp_applied,
489
+ "rhythmic_density_multiplier": round(effective_bpm_ratio, 4),
490
+ },
491
+ "rationale": (
492
+ f"Source {src_bpm:.0f} BPM → target {tgt_bpm:.0f} BPM "
493
+ f"(ratio {bpm_ratio:.3f}). "
494
+ + ("Density CLAMPED to 1:1 due to extreme ratio. " if clamp_applied else "")
495
+ + "Delay/echo times scale inversely with BPM ratio to maintain "
496
+ "rhythmic feel (e.g. 1/4 dotted stays proportional to bar length)."
497
+ ),
498
+ "executable_steps": [
499
+ {
500
+ "action": "set_tempo",
501
+ "bpm": tgt_bpm,
502
+ "comment": f"Set project tempo to {tgt_bpm} BPM",
503
+ }
504
+ ],
505
+ })
506
+
507
+ # ── Scale REMAP decision ──────────────────────────────────────────────
508
+ scale_changed = (tgt_root != src_root) or (tgt_mode.lower() != src_mode.lower())
509
+ if scale_changed:
510
+ root_names = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
511
+ src_name = f"{root_names[src_root % 12]} {src_mode}"
512
+ tgt_name = f"{root_names[tgt_root % 12]} {tgt_mode}"
513
+
514
+ # Example: remap a middle C (60) to show what the pitch shift looks like
515
+ example_remapped = _remap_pitch_class(60, src_root, src_mode, tgt_root, tgt_mode)
516
+ semitone_shift = tgt_root - src_root
517
+
518
+ decisions.append({
519
+ "element": "Scale / key transposition",
520
+ "decision": "REMAP",
521
+ "detail": {
522
+ "source_scale": src_name,
523
+ "target_scale": tgt_name,
524
+ "root_shift_semitones": semitone_shift,
525
+ "mode_remap": f"{src_mode} → {tgt_mode}",
526
+ "example": f"MIDI 60 (C4 in {src_name}) → MIDI {example_remapped} in {tgt_name}",
527
+ },
528
+ "rationale": (
529
+ f"All MIDI clips transposed: {src_name} → {tgt_name}. "
530
+ + (f"Root shift {abs(semitone_shift)} semitones "
531
+ f"({'up' if semitone_shift > 0 else 'down'}). " if semitone_shift != 0 else "")
532
+ + (f"Mode remapped {src_mode} → {tgt_mode}: scale-degree relationships "
533
+ f"preserved by mapping each degree to nearest equivalent."
534
+ if src_mode.lower() != tgt_mode.lower() else
535
+ "Same mode — pitch-class shift only.")
536
+ ),
537
+ "executable_steps": [
538
+ {
539
+ "action": "set_song_scale",
540
+ "root": tgt_root,
541
+ "name": tgt_mode,
542
+ "comment": (
543
+ f"Set song scale to {tgt_name}. "
544
+ "Live's scale-snap will remap scale-locked clips per degree. "
545
+ "For non-scale-locked clips, manually transpose by "
546
+ f"{semitone_shift:+d} semitones after setting the scale."
547
+ ),
548
+ },
549
+ ],
550
+ # LIMITATION: demo sidecars expose only macro {index, value} — no clip
551
+ # note data. Per-note _remap_pitch_class offsets (Option A) therefore
552
+ # cannot be pre-computed here. Option B is used: set_song_scale activates
553
+ # Live's built-in per-degree remapping for scale-locked clips.
554
+ # Non-scale-locked clips still need manual transposition (see comment above).
555
+ })
556
+
557
+ # ── PRESERVE: pitch intervals ─────────────────────────────────────────
558
+ if preserve_pitch_intervals:
559
+ decisions.append({
560
+ "element": "Pitch interval relationships",
561
+ "decision": "PRESERVE",
562
+ "detail": {"mode": "interval-relative"},
563
+ "rationale": (
564
+ "Pitch intervals within each voice preserved (preserve_pitch_intervals=True). "
565
+ "Transposition applied as a global shift; voicings retain original shape."
566
+ ),
567
+ "executable_steps": [],
568
+ })
569
+
570
+ # ── PRESERVE: macro ratios ────────────────────────────────────────────
571
+ if preserve_macro_ratios and source_sidecar:
572
+ tracks = source_sidecar.get("tracks") or []
573
+ macro_notes = []
574
+ for track in tracks:
575
+ devices = track.get("devices") or []
576
+ for dev in devices:
577
+ macros = dev.get("macros") or []
578
+ nonzero = [
579
+ m for m in macros
580
+ if float(str(m.get("value", "0"))) != 0.0
581
+ ]
582
+ if nonzero:
583
+ user_name = dev.get("user_name") or dev.get("class", "device")
584
+ macro_notes.append(
585
+ f"{track.get('name','track')} / {user_name}: "
586
+ f"{len(nonzero)} non-default macros preserved as ratios"
587
+ )
588
+ decisions.append({
589
+ "element": "Macro values (non-default)",
590
+ "decision": "PRESERVE",
591
+ "detail": {"mode": "ratio", "macro_notes": macro_notes[:8]},
592
+ "rationale": (
593
+ "Non-default macro values preserved as normalised ratios [0-127 → 0-1]. "
594
+ "These encode the author's committed artistic decisions; carry them forward "
595
+ "even when the target preset has different raw parameter ranges."
596
+ ),
597
+ "executable_steps": [
598
+ {
599
+ "action": "set_device_parameter",
600
+ "note": "Apply per-track macro values after loading each preset; "
601
+ "use normalised ratio × 127 to convert back to raw values.",
602
+ }
603
+ ],
604
+ })
605
+
606
+ # ── Per-track decisions ───────────────────────────────────────────────
607
+ if source_sidecar:
608
+ tracks = source_sidecar.get("tracks") or []
609
+ for track in tracks:
610
+ t_name = track.get("name", "Unknown Track")
611
+ t_type = track.get("type", "")
612
+ if t_type in ("ReturnTrack", "MasterTrack"):
613
+ continue
614
+ devices = track.get("devices") or []
615
+ if not devices:
616
+ continue
617
+
618
+ dev = devices[0]
619
+ dev_class = dev.get("class", "")
620
+ user_name = dev.get("user_name") or dev_class
621
+
622
+ # Check aesthetic-replace rules for this device
623
+ track_inventory = [dev_class, user_name]
624
+ replace_decisions = _evaluate_replace_rules(track_inventory, target_aesthetic)
625
+
626
+ if replace_decisions:
627
+ for rd in replace_decisions:
628
+ steps = _build_replace_steps(rd, t_name, user_name)
629
+ decisions.append({
630
+ "element": f"{t_name} ({user_name})",
631
+ "decision": "REPLACE",
632
+ "detail": rd,
633
+ "rationale": rd.get("rationale_fragment", "Aesthetic replacement."),
634
+ "executable_steps": steps,
635
+ })
636
+ else:
637
+ # Default: PRESERVE the track structure
638
+ # BUG-NEW#1: emit_load_step adds browser_search_hint so the agent
639
+ # can resolve a URI before calling load_browser_item.
640
+ try:
641
+ from .preset_resolver import emit_load_step as _emit_load_step
642
+ _pack_slug = source_sidecar.get("file", "").split("/")[-2] if source_sidecar.get("file") else ""
643
+ preserve_step = _emit_load_step(_pack_slug, dev_class, user_name, -1)
644
+ except Exception:
645
+ # Fallback if preset_resolver unavailable
646
+ preserve_step = {
647
+ "action": "load_browser_item",
648
+ "name": user_name,
649
+ "browser_search_hint": {
650
+ "name_filter": user_name,
651
+ "suggested_path": "sounds",
652
+ },
653
+ "comment": f"Load {user_name} preset from source pack",
654
+ }
655
+ decisions.append({
656
+ "element": f"{t_name} ({user_name})",
657
+ "decision": "PRESERVE",
658
+ "detail": {
659
+ "device_class": dev_class,
660
+ "user_name": user_name,
661
+ },
662
+ "rationale": (
663
+ f"{user_name} is aesthetically compatible with the target context. "
664
+ "Load via browser URI; apply preserved macro values."
665
+ ),
666
+ "executable_steps": [preserve_step],
667
+ })
668
+
669
+ # ── Global aesthetic replace (whole-sidecar device inventory) ─────────
670
+ if source_sidecar and target_aesthetic:
671
+ all_inventory = source_struct.get("device_inventory", [])
672
+ global_replaces = _evaluate_replace_rules(all_inventory, target_aesthetic)
673
+ already_covered = {
674
+ d.get("detail", {}).get("remove_device") or ""
675
+ for d in decisions
676
+ if d.get("decision") == "REPLACE"
677
+ }
678
+ for rd in global_replaces:
679
+ if (rd.get("remove_device") or "") not in already_covered:
680
+ decisions.append({
681
+ "element": f"Global effect chain ({rd.get('remove_device', 'device')})",
682
+ "decision": "REPLACE",
683
+ "detail": rd,
684
+ "rationale": rd.get("rationale_fragment", "Aesthetic replacement."),
685
+ "executable_steps": _build_replace_steps(rd, "global", rd.get("remove_device", "")),
686
+ })
687
+
688
+ return decisions, warnings
689
+
690
+
691
+ def _build_replace_steps(
692
+ rd: dict,
693
+ track_context: str,
694
+ user_name: str,
695
+ ) -> list[dict]:
696
+ """Build executable_steps list for a REPLACE decision."""
697
+ steps = []
698
+ if rd.get("action") in ("replace", "remove") and rd.get("remove_device"):
699
+ steps.append({
700
+ "action": "delete_device",
701
+ "filter": rd["remove_device"],
702
+ "track_context": track_context,
703
+ "comment": f"Remove {rd['remove_device']}",
704
+ })
705
+ if rd.get("action") in ("replace", "enhance") and rd.get("add_device"):
706
+ steps.append({
707
+ "action": "insert_device",
708
+ "device_name": rd["add_device"],
709
+ "track_context": track_context,
710
+ "comment": f"Insert {rd['add_device']}",
711
+ })
712
+ for param in rd.get("parameters", []):
713
+ steps.append({
714
+ "action": "set_device_parameter",
715
+ "name": param["name"],
716
+ "value": param["value"],
717
+ "track_context": track_context,
718
+ })
719
+ return steps
720
+
721
+
722
+ # ─── Producer vocabulary anchor detection ────────────────────────────────────
723
+
724
+ def _detect_producer_anchor(
725
+ source_entity_id: str,
726
+ target_aesthetic: str,
727
+ source_namespace: str,
728
+ ) -> str:
729
+ """Return producer-vocabulary anchor sentences for the reasoning artifact.
730
+
731
+ Checks target_aesthetic FIRST (higher priority — the user's intent), then
732
+ source entity_id and namespace. Returns all matching anchors joined by a
733
+ newline so that both source-pack and target-aesthetic vocabulary are surfaced.
734
+ Returns empty string if no match.
735
+ """
736
+ target_lower = target_aesthetic.lower()
737
+ source_lower = f"{source_entity_id} {source_namespace}".lower()
738
+
739
+ seen: set[str] = set()
740
+ results: list[str] = []
741
+
742
+ # 1. Target-aesthetic keywords — checked first (intent takes priority)
743
+ for keyword, anchor_text in _PRODUCER_ANCHORS.items():
744
+ if keyword.lower() in target_lower and anchor_text not in seen:
745
+ seen.add(anchor_text)
746
+ results.append(anchor_text)
747
+
748
+ # 2. Source entity_id / namespace keywords — appended after target anchors
749
+ for keyword, anchor_text in _PRODUCER_ANCHORS.items():
750
+ if keyword.lower() in source_lower and anchor_text not in seen:
751
+ seen.add(anchor_text)
752
+ results.append(anchor_text)
753
+
754
+ return "\n".join(results)
755
+
756
+
757
+ # ─── Reasoning artifact generation ───────────────────────────────────────────
758
+
759
+ def _generate_reasoning_artifact(
760
+ source_struct: dict,
761
+ target_bpm: float | None,
762
+ target_scale_root: int | None,
763
+ target_scale_name: str,
764
+ target_aesthetic: str,
765
+ decisions: list[dict],
766
+ warnings: list[str],
767
+ depth: str,
768
+ source_entity_id: str,
769
+ source_namespace: str,
770
+ ) -> str:
771
+ """Generate a prose reasoning artifact.
772
+
773
+ depth: "terse" | "standard" | "verbose"
774
+ """
775
+ root_names = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
776
+ src_bpm = source_struct["bpm"]
777
+ src_root = source_struct["scale"]["root_note"]
778
+ src_mode = source_struct["scale"]["name"]
779
+ src_scale_str = f"{root_names[src_root % 12]} {src_mode}"
780
+
781
+ tgt_bpm = target_bpm or src_bpm
782
+ tgt_root = target_scale_root if target_scale_root is not None else src_root
783
+ tgt_mode = target_scale_name or src_mode
784
+ tgt_scale_str = f"{root_names[tgt_root % 12]} {tgt_mode}"
785
+
786
+ bpm_ratio = tgt_bpm / src_bpm if src_bpm > 0 else 1.0
787
+ n_tracks = source_struct.get("track_count", 0)
788
+ n_replace = sum(1 for d in decisions if d.get("decision") == "REPLACE")
789
+ n_preserve = sum(1 for d in decisions if d.get("decision") == "PRESERVE")
790
+ n_remap = sum(1 for d in decisions if d.get("decision") == "REMAP")
791
+
792
+ # Producer anchor
793
+ producer_anchor = _detect_producer_anchor(source_entity_id, target_aesthetic, source_namespace)
794
+
795
+ if depth == "terse":
796
+ parts = [
797
+ f"Transplant {source_entity_id} ({src_bpm:.0f} BPM {src_scale_str}) → "
798
+ f"{tgt_bpm:.0f} BPM {tgt_scale_str}."
799
+ ]
800
+ if n_replace:
801
+ parts.append(f"{n_replace} device(s) replaced for aesthetic fit.")
802
+ if warnings:
803
+ parts.append(f"Warning: {warnings[0]}")
804
+ return " ".join(parts)
805
+
806
+ if depth == "standard":
807
+ body = (
808
+ f"This transplant adapts {source_entity_id} from {src_bpm:.0f} BPM {src_scale_str} "
809
+ f"to {tgt_bpm:.0f} BPM {tgt_scale_str} (BPM ratio {bpm_ratio:.3f}). "
810
+ )
811
+ if producer_anchor:
812
+ body += producer_anchor + " "
813
+ if n_remap:
814
+ body += (
815
+ f"Key transposition shifts all MIDI clips: {src_scale_str} → {tgt_scale_str}. "
816
+ )
817
+ if n_replace:
818
+ replace_elements = [
819
+ d.get("detail", {}).get("remove_device", "device")
820
+ for d in decisions
821
+ if d.get("decision") == "REPLACE"
822
+ ]
823
+ body += (
824
+ f"{n_replace} aesthetic-incompatible device(s) swapped "
825
+ f"({', '.join(filter(None, replace_elements))}). "
826
+ )
827
+ body += (
828
+ f"{n_preserve} structural element(s) preserved as-is. "
829
+ f"The plan covers {n_tracks} source tracks; execute sequentially."
830
+ )
831
+ if warnings:
832
+ body += f" NOTE: {warnings[0]}"
833
+ return body.strip()
834
+
835
+ # verbose
836
+ lines = [
837
+ f"## Transplant Plan: {source_entity_id}",
838
+ f"",
839
+ f"**Source:** {src_bpm:.0f} BPM, {src_scale_str}, "
840
+ f"{n_tracks} tracks, {source_struct.get('scene_count', 0)} scenes",
841
+ f"**Target:** {tgt_bpm:.0f} BPM, {tgt_scale_str}, aesthetic: {target_aesthetic or '(none)'}",
842
+ f"",
843
+ ]
844
+ if producer_anchor:
845
+ lines += [f"### Producer Vocabulary Anchor", f"", producer_anchor, f""]
846
+
847
+ lines += [f"### Translation Summary", f""]
848
+ for i, d in enumerate(decisions, 1):
849
+ lines.append(
850
+ f"{i}. **{d['decision']}** — {d['element']}: {d.get('rationale', '')}"
851
+ )
852
+
853
+ if warnings:
854
+ lines += [f"", f"### Warnings", f""]
855
+ for w in warnings:
856
+ lines.append(f"- {w}")
857
+
858
+ lines += [f"", f"### Executable Plan", f""]
859
+ for d in decisions:
860
+ steps = d.get("executable_steps", [])
861
+ if steps:
862
+ lines.append(f"**{d['element']}**:")
863
+ for step in steps:
864
+ lines.append(f" - `{step.get('action')}` {step.get('comment', '')}")
865
+
866
+ return "\n".join(lines)
867
+
868
+
869
+ # ─── Find compatible target preset (Phase D integration) ─────────────────────
870
+
871
+ def _find_compatible_preset_targets(
872
+ source_sidecar: dict | None,
873
+ target_aesthetic: str,
874
+ top_k: int = 3,
875
+ ) -> list[dict]:
876
+ """Use Phase D's macro_fingerprint matcher to find compatible target presets.
877
+
878
+ Returns a list of {pack_slug, preset_path, preset_name, similarity_score}.
879
+ Used when source_track_or_preset is a preset sidecar.
880
+ """
881
+ if source_sidecar is None:
882
+ return []
883
+ try:
884
+ from .macro_fingerprint import (
885
+ _extract_fingerprint,
886
+ _compute_similarity,
887
+ _iter_all_preset_sidecars,
888
+ _generate_rationale,
889
+ )
890
+ except ImportError:
891
+ return []
892
+
893
+ source_fp = _extract_fingerprint(source_sidecar)
894
+ if not source_fp:
895
+ return []
896
+
897
+ # Build target aesthetic pack filter heuristic
898
+ tgt_lower = target_aesthetic.lower()
899
+ # Prefer packs that match aesthetic keywords
900
+ scored: list[tuple[float, str, str, dict, list[dict]]] = []
901
+ for cand_pack, cand_slug, cand_sidecar in _iter_all_preset_sidecars():
902
+ cand_fp = _extract_fingerprint(cand_sidecar)
903
+ if len(cand_fp) < 2:
904
+ continue
905
+ score, matched = _compute_similarity(source_fp, cand_fp)
906
+ if score >= 0.1:
907
+ # Boost if pack name appears in aesthetic
908
+ boost = 0.05 if any(
909
+ kw in tgt_lower
910
+ for kw in cand_pack.replace("-", " ").split()
911
+ ) else 0.0
912
+ scored.append((score + boost, cand_pack, cand_slug, cand_sidecar, matched))
913
+
914
+ scored.sort(key=lambda x: x[0], reverse=True)
915
+ results = []
916
+ for score, cand_pack, cand_slug, cand_sidecar, matched in scored[:top_k]:
917
+ rationale = _generate_rationale(
918
+ source_pack=source_sidecar.get("file", "").split("/")[-2] if source_sidecar.get("file") else "",
919
+ source_name=source_sidecar.get("name", ""),
920
+ cand_pack=cand_pack,
921
+ cand_name=cand_sidecar.get("name", ""),
922
+ matching_macros=matched,
923
+ )
924
+ results.append({
925
+ "pack_slug": cand_pack,
926
+ "preset_path": cand_slug,
927
+ "preset_name": cand_sidecar.get("name", ""),
928
+ "similarity_score": score,
929
+ "rationale": rationale,
930
+ })
931
+ return results
932
+
933
+
934
+ # ─── Main transplant function ─────────────────────────────────────────────────
935
+
936
+ def transplant(
937
+ source_namespace: str,
938
+ source_entity_id: str,
939
+ source_track_or_preset: str = "",
940
+ target_bpm: float | None = None,
941
+ target_scale_root: int | None = None,
942
+ target_scale_name: str = "",
943
+ target_aesthetic: str = "",
944
+ preserve_macro_ratios: bool = True,
945
+ preserve_pitch_intervals: bool = True,
946
+ explanation_depth: str = "standard",
947
+ ) -> dict:
948
+ """Core transplant logic — returns the structured plan dict.
949
+
950
+ Called directly by the MCP tool registration in tools.py.
951
+ Separated from the tool wrapper to allow direct unit-testing.
952
+ """
953
+ sources_cited: list[str] = []
954
+ warnings: list[str] = []
955
+
956
+ # ── 0a. Normalize sentinel values ────────────────────────────────────
957
+ # BUG-EDGE#6: -1 is the "keep source root" sentinel from the tools.py wrapper.
958
+ # If the inner function is called directly with target_scale_root=-1, treat it
959
+ # as None (no remapping) rather than emitting an invalid set_song_scale step.
960
+ if target_scale_root is not None and target_scale_root < 0:
961
+ target_scale_root = None
962
+
963
+ # ── 0. Validate source_namespace ─────────────────────────────────────
964
+ _ALLOWED_NAMESPACES = ["packs", "m4l-devices", "elektron"]
965
+ if source_namespace not in _ALLOWED_NAMESPACES:
966
+ return {
967
+ "error": (
968
+ f"Unknown source_namespace: '{source_namespace}'. "
969
+ f"Allowed: {_ALLOWED_NAMESPACES}"
970
+ ),
971
+ "status": "error",
972
+ }
973
+
974
+ # ── 1. Load source data ───────────────────────────────────────────────
975
+ source_sidecar: dict | None = None
976
+ preset_sidecar: dict | None = None
977
+ entity_id_resolved = source_entity_id
978
+
979
+ if source_namespace == "packs":
980
+ # Try demo sidecar first (entity_id like "drone_lab__earth")
981
+ if "__" in source_entity_id:
982
+ source_sidecar = _load_demo_sidecar(source_entity_id)
983
+ if source_sidecar:
984
+ sidecar_path = _resolve_demo_slug(source_entity_id)
985
+ # BUG-C#5: resolve to canonical hyphenated slug, not raw input form
986
+ entity_id_resolved = _entity_id_to_slug(source_entity_id)
987
+ sources_cited.append(
988
+ f"als-parse: {sidecar_path} [SOURCE: als-parse]"
989
+ )
990
+ else:
991
+ warnings.append(
992
+ f"Demo sidecar not found for entity_id='{source_entity_id}'. "
993
+ "Checked slug variants: underscore ↔ hyphen. "
994
+ "Falling back to minimal structure."
995
+ )
996
+
997
+ # If source_track_or_preset provided, try to load the preset sidecar
998
+ if source_track_or_preset:
999
+ # entity_id may be the pack slug (e.g. "drone_lab")
1000
+ pack_slug_guess = source_entity_id.replace("__", "_").split("__")[0]
1001
+ resolved = _resolve_preset_slug(pack_slug_guess, source_track_or_preset)
1002
+ if resolved:
1003
+ preset_sidecar = _load_preset_sidecar_cached(*resolved)
1004
+ entity_id_resolved = resolved[1]
1005
+ sources_cited.append(
1006
+ f"adg-parse: {PRESET_PARSES_ROOT / resolved[0] / resolved[1]}.json "
1007
+ f"[SOURCE: adg-parse]"
1008
+ )
1009
+ if preset_sidecar is None:
1010
+ # Try harder: source_entity_id itself might be the pack slug
1011
+ for pack_guess in [
1012
+ source_entity_id,
1013
+ source_entity_id.replace("_", "-"),
1014
+ source_entity_id.split("__")[0].replace("_", "-"),
1015
+ ]:
1016
+ resolved2 = _resolve_preset_slug(pack_guess, source_track_or_preset)
1017
+ if resolved2:
1018
+ preset_sidecar = _load_preset_sidecar_cached(*resolved2)
1019
+ sources_cited.append(
1020
+ f"adg-parse: preset/{resolved2[0]}/{resolved2[1]} "
1021
+ f"[SOURCE: adg-parse]"
1022
+ )
1023
+ entity_id_resolved = resolved2[1]
1024
+ break
1025
+ if preset_sidecar is None:
1026
+ warnings.append(
1027
+ f"Preset sidecar not found for source_track_or_preset='{source_track_or_preset}' "
1028
+ f"in pack '{source_entity_id}'."
1029
+ )
1030
+
1031
+ # Choose which sidecar drives the structure
1032
+ primary_sidecar = source_sidecar if source_sidecar else preset_sidecar
1033
+
1034
+ # ── 2. Extract source structure ───────────────────────────────────────
1035
+ if primary_sidecar:
1036
+ source_struct = _extract_source_structure(primary_sidecar)
1037
+ elif preset_sidecar:
1038
+ source_struct = _extract_preset_structure(preset_sidecar)
1039
+ else:
1040
+ # Minimal fallback — no sidecar found
1041
+ source_struct = {
1042
+ "bpm": 120.0,
1043
+ "scale": {"root_note": 0, "name": "Major"},
1044
+ "tracks_summary": [],
1045
+ "device_inventory": [],
1046
+ "track_count": 0,
1047
+ "scene_count": 0,
1048
+ "return_tracks": [],
1049
+ }
1050
+ warnings.append("No sidecar data found — structural inference not possible.")
1051
+
1052
+ # Override BPM / scale from preset sidecar if that's our primary source
1053
+ if preset_sidecar and not source_sidecar:
1054
+ # Preset sidecars don't have BPM/scale — keep defaults
1055
+ pass
1056
+
1057
+ # ── 3. Find compatible targets via macro-fingerprint if preset source ─
1058
+ compatible_targets: list[dict] = []
1059
+ if preset_sidecar and target_aesthetic:
1060
+ compatible_targets = _find_compatible_preset_targets(
1061
+ preset_sidecar, target_aesthetic, top_k=3
1062
+ )
1063
+ if compatible_targets:
1064
+ sources_cited.append(
1065
+ f"adg-parse: macro-fingerprint scan across {len(compatible_targets)} "
1066
+ f"best matches [SOURCE: adg-parse]"
1067
+ )
1068
+
1069
+ # ── 4. Compute translation decisions ──────────────────────────────────
1070
+ decisions, decision_warnings = _compute_translation_decisions(
1071
+ source_struct=source_struct,
1072
+ target_bpm=target_bpm,
1073
+ target_scale_root=target_scale_root,
1074
+ target_scale_name=target_scale_name,
1075
+ target_aesthetic=target_aesthetic,
1076
+ preserve_pitch_intervals=preserve_pitch_intervals,
1077
+ preserve_macro_ratios=preserve_macro_ratios,
1078
+ source_sidecar=primary_sidecar,
1079
+ )
1080
+ warnings.extend(decision_warnings)
1081
+
1082
+ # ── 5. Build translation_plan (spec return shape) ─────────────────────
1083
+ translation_plan = []
1084
+ for dec in decisions:
1085
+ translation_plan.append({
1086
+ "element": dec["element"],
1087
+ "decision": dec["decision"],
1088
+ "detail": dec.get("detail"), # BUG-INT#3: was dropped; must include for REPLACE decisions
1089
+ "rationale": dec.get("rationale", ""),
1090
+ "executable_steps": dec.get("executable_steps", []),
1091
+ })
1092
+
1093
+ # Inject compatible targets into plan if found
1094
+ if compatible_targets:
1095
+ target_names = [ct["preset_name"] for ct in compatible_targets]
1096
+ translation_plan.append({
1097
+ "element": "Compatible target presets (macro-fingerprint matched)",
1098
+ "decision": "REPLACE", # Preset-swap suggestion — not a scale-degree transform
1099
+ "rationale": (
1100
+ f"Macro-fingerprint similarity search found {len(compatible_targets)} "
1101
+ f"compatible target preset(s) for target aesthetic '{target_aesthetic}': "
1102
+ + "; ".join(
1103
+ f"{ct['preset_name']} ({ct['pack_slug']}, score {ct['similarity_score']:.2f})"
1104
+ for ct in compatible_targets
1105
+ )
1106
+ ),
1107
+ "executable_steps": [
1108
+ {
1109
+ "action": "load_browser_item",
1110
+ "name": ct["preset_name"],
1111
+ "pack": ct["pack_slug"],
1112
+ "comment": f"Load compatible target: {ct['preset_name']} (score {ct['similarity_score']:.2f})",
1113
+ }
1114
+ for ct in compatible_targets
1115
+ ],
1116
+ })
1117
+
1118
+ # ── 6. Generate reasoning artifact ───────────────────────────────────
1119
+ reasoning = _generate_reasoning_artifact(
1120
+ source_struct=source_struct,
1121
+ target_bpm=target_bpm,
1122
+ target_scale_root=target_scale_root,
1123
+ target_scale_name=target_scale_name,
1124
+ target_aesthetic=target_aesthetic,
1125
+ decisions=decisions,
1126
+ warnings=warnings,
1127
+ depth=explanation_depth,
1128
+ source_entity_id=entity_id_resolved,
1129
+ source_namespace=source_namespace,
1130
+ )
1131
+ sources_cited.append("agent-inference: translation decisions [SOURCE: agent-inference]")
1132
+
1133
+ # ── Build output ──────────────────────────────────────────────────────
1134
+ root_names = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
1135
+ src_root = source_struct["scale"]["root_note"]
1136
+
1137
+ return {
1138
+ "source": {
1139
+ "namespace": source_namespace,
1140
+ "entity_id": entity_id_resolved,
1141
+ "bpm": source_struct["bpm"],
1142
+ "scale": source_struct["scale"],
1143
+ "tracks_summary": source_struct["tracks_summary"],
1144
+ },
1145
+ "target": {
1146
+ "bpm": target_bpm if target_bpm is not None else source_struct["bpm"],
1147
+ "scale": {
1148
+ "root_note": target_scale_root if target_scale_root is not None else src_root,
1149
+ "name": target_scale_name or source_struct["scale"]["name"],
1150
+ },
1151
+ "aesthetic": target_aesthetic,
1152
+ },
1153
+ "translation_plan": translation_plan,
1154
+ "reasoning_artifact": reasoning,
1155
+ "warnings": warnings,
1156
+ "sources": sources_cited,
1157
+ }
1158
+
1159
+
1160
+ def _extract_preset_structure(sidecar: dict) -> dict:
1161
+ """Extract minimal structure from a preset sidecar (no BPM/scale available)."""
1162
+ macros = sidecar.get("macros") or []
1163
+ named_macros = [
1164
+ m.get("name", "")
1165
+ for m in macros
1166
+ if m.get("name") and not m["name"].startswith("Macro ")
1167
+ ]
1168
+ return {
1169
+ "bpm": 120.0,
1170
+ "scale": {"root_note": 0, "name": "Major"},
1171
+ "tracks_summary": [sidecar.get("name", "preset")],
1172
+ "device_inventory": sidecar.get("device_summary") or [],
1173
+ "track_count": 1,
1174
+ "scene_count": 0,
1175
+ "return_tracks": [],
1176
+ "named_macros": named_macros,
1177
+ }