livepilot 1.23.2 → 1.23.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/CHANGELOG.md +124 -0
  2. package/README.md +108 -10
  3. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  4. package/m4l_device/livepilot_bridge.js +39 -1
  5. package/mcp_server/__init__.py +1 -1
  6. package/mcp_server/atlas/cross_pack_chain.py +658 -0
  7. package/mcp_server/atlas/demo_story.py +700 -0
  8. package/mcp_server/atlas/extract_chain.py +786 -0
  9. package/mcp_server/atlas/macro_fingerprint.py +554 -0
  10. package/mcp_server/atlas/overlays.py +95 -3
  11. package/mcp_server/atlas/pack_aware_compose.py +1255 -0
  12. package/mcp_server/atlas/preset_resolver.py +238 -0
  13. package/mcp_server/atlas/tools.py +1001 -31
  14. package/mcp_server/atlas/transplant.py +1177 -0
  15. package/mcp_server/mix_engine/state_builder.py +44 -1
  16. package/mcp_server/runtime/capability_state.py +34 -3
  17. package/mcp_server/runtime/remote_commands.py +10 -0
  18. package/mcp_server/server.py +45 -24
  19. package/mcp_server/tools/agent_os.py +33 -9
  20. package/mcp_server/tools/analyzer.py +84 -23
  21. package/mcp_server/tools/browser.py +20 -1
  22. package/mcp_server/tools/devices.py +78 -11
  23. package/mcp_server/tools/perception.py +5 -1
  24. package/mcp_server/tools/tracks.py +39 -2
  25. package/mcp_server/user_corpus/__init__.py +48 -0
  26. package/mcp_server/user_corpus/manifest.py +142 -0
  27. package/mcp_server/user_corpus/plugin_engine/__init__.py +39 -0
  28. package/mcp_server/user_corpus/plugin_engine/detector.py +579 -0
  29. package/mcp_server/user_corpus/plugin_engine/manual.py +347 -0
  30. package/mcp_server/user_corpus/plugin_engine/research.py +247 -0
  31. package/mcp_server/user_corpus/runner.py +261 -0
  32. package/mcp_server/user_corpus/scanner.py +115 -0
  33. package/mcp_server/user_corpus/scanners/__init__.py +18 -0
  34. package/mcp_server/user_corpus/scanners/adg.py +79 -0
  35. package/mcp_server/user_corpus/scanners/als.py +144 -0
  36. package/mcp_server/user_corpus/scanners/amxd.py +374 -0
  37. package/mcp_server/user_corpus/scanners/plugin_preset.py +202 -0
  38. package/mcp_server/user_corpus/tools.py +904 -0
  39. package/mcp_server/user_corpus/wizard.py +224 -0
  40. package/package.json +2 -2
  41. package/remote_script/LivePilot/__init__.py +1 -1
  42. package/remote_script/LivePilot/browser.py +7 -2
  43. package/remote_script/LivePilot/devices.py +9 -0
  44. package/remote_script/LivePilot/simpler_sample.py +98 -0
  45. package/requirements.txt +3 -3
  46. package/server.json +2 -2
@@ -0,0 +1,700 @@
1
+ """Pack-Atlas Phase E — Demo Story.
2
+
3
+ Synthesizes a track-by-track narrative + production-sequence inference for a
4
+ demo .als sidecar. Turns the 104 parsed demo files into interactive learning
5
+ artifacts. All data from local JSON sidecars — no Live connection required.
6
+
7
+ Real sidecar schema (from Phase C appendix, 2026-04-27):
8
+ Demo sidecars (~/.livepilot/atlas-overlays/packs/_demo_parses/<slug>.json):
9
+ top-level: {file, name, bpm, time_signature, scale, tracks, scenes}
10
+ scale: {root_note: str, name: str} — root_note is a STRING, cast to int
11
+ bpm: float
12
+ tracks[*]: {name, type, id, device_count, devices[{class, user_name,
13
+ params, macros}], routing}
14
+ devices[*].macros: [{index, value}] — NO name field in demo macros
15
+ ReturnTrack type: "ReturnTrack"
16
+ GroupTrack type: "GroupTrack"
17
+
18
+ Demo macro entries have no names — macro names live in preset sidecars only.
19
+ """
20
+
21
+ from __future__ import annotations
22
+
23
+ import re
24
+ from functools import lru_cache
25
+ from typing import Any
26
+
27
+ # Re-use path constants and loading helpers from Phase C — no duplication.
28
+ from .transplant import (
29
+ DEMO_PARSES_ROOT,
30
+ _load_demo_sidecar,
31
+ _resolve_demo_slug,
32
+ _PRODUCER_ANCHORS,
33
+ _detect_producer_anchor,
34
+ )
35
+
36
+ # ─── Track-role classifier ────────────────────────────────────────────────────
37
+
38
+ # Device classes that indicate rhythmic content
39
+ _DRUM_CLASSES = {"DrumGroupDevice", "DrumRack"}
40
+ _DRUM_NAME_RE = re.compile(r"\b(drum|kick|snare|hat|perc|beat|rhythm|kit)\b", re.IGNORECASE)
41
+
42
+ # Device classes that are clearly utility/spatial (not harmonic content)
43
+ _SPATIAL_CLASSES = {"Reverb", "Delay", "Echo", "Chorus", "Phaser", "Flanger"}
44
+ _FX_BUS_CLASSES = {"Limiter", "Compressor2", "EQ8", "EquallyLoud", "GlueCompressor"}
45
+
46
+ # GroupDevice rack classes
47
+ _INSTRUMENT_GROUP = "InstrumentGroupDevice"
48
+ _AUDIO_EFFECT_GROUP = "AudioEffectGroupDevice"
49
+ _MIDI_EFFECT_CLASSES = {"MidiRandom", "MidiArpeggiator", "MidiChord", "MidiPitcher",
50
+ "MidiScale", "MidiVelocity"}
51
+
52
+
53
+ def _count_nonzero_macros(devices: list[dict]) -> int:
54
+ """Count total non-zero macro values across all devices on a track."""
55
+ total = 0
56
+ for dev in devices:
57
+ for m in dev.get("macros") or []:
58
+ try:
59
+ if float(str(m.get("value", "0"))) != 0.0:
60
+ total += 1
61
+ except (ValueError, TypeError):
62
+ pass
63
+ return total
64
+
65
+
66
+ def _classify_track_role(track: dict, all_tracks: list[dict]) -> str:
67
+ """Classify a track into one of six semantic roles.
68
+
69
+ Returns one of:
70
+ "harmonic-foundation" — primary harmonic/melodic source (most devices,
71
+ instrument rack, or lead MIDI track)
72
+ "rhythmic-driver" — drum rack or percussion-named track
73
+ "texture" — additional MIDI instrument layers (no drums)
74
+ "spatial-glue" — return/send tracks with reverb/delay
75
+ "fx-bus" — group/return tracks with bus processing
76
+ "decoration" — audio tracks with no MIDI clips, or minimal devices
77
+ """
78
+ t_type = track.get("type", "")
79
+ t_name = track.get("name", "")
80
+ devices = track.get("devices") or []
81
+ device_count = track.get("device_count", 0)
82
+
83
+ # Return tracks → spatial-glue or fx-bus
84
+ if t_type == "ReturnTrack":
85
+ device_classes = [d.get("class", "") for d in devices]
86
+ if any(c in _SPATIAL_CLASSES for c in device_classes):
87
+ return "spatial-glue"
88
+ return "fx-bus"
89
+
90
+ # Group tracks with only audio-effect rack → fx-bus
91
+ if t_type == "GroupTrack":
92
+ device_classes = [d.get("class", "") for d in devices]
93
+ if all(c in {_AUDIO_EFFECT_GROUP} | _FX_BUS_CLASSES | _SPATIAL_CLASSES
94
+ for c in device_classes if c):
95
+ return "fx-bus"
96
+ return "texture"
97
+
98
+ # Check device classes
99
+ device_classes = [d.get("class", "") for d in devices]
100
+
101
+ # Drum rack → rhythmic-driver
102
+ if any(c in _DRUM_CLASSES for c in device_classes):
103
+ return "rhythmic-driver"
104
+
105
+ # Name-based drum detection
106
+ if _DRUM_NAME_RE.search(t_name):
107
+ return "rhythmic-driver"
108
+
109
+ # Audio track with no devices → decoration
110
+ if t_type == "AudioTrack" and device_count == 0:
111
+ return "decoration"
112
+
113
+ # Audio track with only effects, no instruments → decoration
114
+ if t_type == "AudioTrack":
115
+ has_instrument = any(
116
+ c in (_INSTRUMENT_GROUP, "Simpler", "Sampler", "Operator",
117
+ "Drift", "Wavetable", "AnalogSynth", "Tension", "Electric",
118
+ "Collision", "Mallet", "DrumGroupDevice")
119
+ for c in device_classes
120
+ )
121
+ if not has_instrument:
122
+ return "decoration"
123
+
124
+ # MIDI track with instrument rack — figure out harmonic vs texture
125
+ # Strategy: among all non-return, non-drum MIDI tracks, the one with
126
+ # the most devices (and/or the first/longest named track) is harmonic-foundation.
127
+ midi_instrument_tracks = [
128
+ t for t in all_tracks
129
+ if t.get("type") in ("MidiTrack", "GroupTrack")
130
+ and t.get("type") != "ReturnTrack"
131
+ and not _DRUM_NAME_RE.search(t.get("name", ""))
132
+ and any(
133
+ d.get("class", "") not in _DRUM_CLASSES
134
+ and d.get("class", "") not in _MIDI_EFFECT_CLASSES
135
+ for d in (t.get("devices") or [])
136
+ )
137
+ ]
138
+
139
+ if not midi_instrument_tracks:
140
+ return "texture"
141
+
142
+ # First numbered track (e.g. "1-Pioneer Drone") is usually harmonic spine
143
+ # Track with highest device_count is usually harmonic foundation
144
+ max_devices = max(
145
+ (t.get("device_count", 0) for t in midi_instrument_tracks),
146
+ default=0,
147
+ )
148
+
149
+ # Check for numbered prefix — "1-" is harmonic anchor
150
+ numbered_first = None
151
+ for t in midi_instrument_tracks:
152
+ if re.match(r"^1[-\s]", t.get("name", "")):
153
+ numbered_first = t
154
+ break
155
+
156
+ # This track is harmonic-foundation if:
157
+ # - it's the numbered-first track, OR
158
+ # - it has the most devices among non-drum MIDI tracks
159
+ if numbered_first and track.get("name") == numbered_first.get("name"):
160
+ return "harmonic-foundation"
161
+ if (not numbered_first
162
+ and track.get("device_count", 0) == max_devices
163
+ and max_devices > 0):
164
+ return "harmonic-foundation"
165
+
166
+ return "texture"
167
+
168
+
169
+ # ─── Macro signature builder ──────────────────────────────────────────────────
170
+
171
+ def _extract_macro_signature(track: dict) -> str:
172
+ """Build a human-readable macro signature for a demo track.
173
+
174
+ Demo macros lack names (per Phase C appendix — names only exist in preset
175
+ sidecars). We report the structural commitment pattern:
176
+ - device class + user_name
177
+ - how many non-zero macros are committed
178
+ - top 3 non-zero macro index/value pairs
179
+
180
+ Returns a concise prose string or empty string if no devices.
181
+ """
182
+ devices = track.get("devices") or []
183
+ if not devices:
184
+ return ""
185
+
186
+ parts = []
187
+ for dev in devices:
188
+ cls = dev.get("class", "")
189
+ uname = dev.get("user_name") or ""
190
+ label = uname or cls or "device"
191
+
192
+ macros = dev.get("macros") or []
193
+ nonzero = [
194
+ m for m in macros
195
+ if _safe_float(m.get("value", "0")) != 0.0
196
+ ]
197
+
198
+ if nonzero:
199
+ top3 = nonzero[:3]
200
+ vals = ", ".join(
201
+ f"M{m['index']}={int(round(_safe_float(m.get('value','0'))))}"
202
+ for m in top3
203
+ )
204
+ if len(nonzero) > 3:
205
+ vals += f" (+ {len(nonzero) - 3} more)"
206
+ parts.append(f"{label}: {len(nonzero)} macros committed ({vals})")
207
+ else:
208
+ parts.append(f"{label}: all macros at default")
209
+
210
+ return "; ".join(parts)
211
+
212
+
213
+ def _safe_float(v: Any) -> float:
214
+ try:
215
+ return float(str(v))
216
+ except (ValueError, TypeError):
217
+ return 0.0
218
+
219
+
220
+ # ─── Production sequence inference ───────────────────────────────────────────
221
+
222
+ def _infer_production_sequence(tracks: list[dict]) -> list[str]:
223
+ """Infer the likely creation order + production decisions as narrative steps.
224
+
225
+ Heuristics (per spec §E algorithm):
226
+ 1. Numbered prefix tracks ("1-", "2-") → creation order follows numbering
227
+ 2. Non-numbered tracks → assumed added in list order
228
+ 3. Return tracks → created after the tracks that send to them
229
+ 4. Tracks with many non-zero macros → "producer settled on specific values"
230
+ """
231
+ steps: list[str] = []
232
+
233
+ regular = [t for t in tracks if t.get("type") not in ("ReturnTrack",)]
234
+ returns = [t for t in tracks if t.get("type") == "ReturnTrack"]
235
+
236
+ # Sort by numeric prefix if present
237
+ def sort_key(t: dict) -> tuple[int, str]:
238
+ m = re.match(r"^(\d+)[-\s]", t.get("name", ""))
239
+ return (int(m.group(1)), t["name"]) if m else (999, t.get("name", ""))
240
+
241
+ ordered = sorted(regular, key=sort_key)
242
+
243
+ step_idx = 1
244
+ for t in ordered:
245
+ t_name = t.get("name", "Unknown")
246
+ device_count = t.get("device_count", 0)
247
+ devices = t.get("devices") or []
248
+ cls_list = [d.get("class", "") for d in devices]
249
+ uname_list = [d.get("user_name") or "" for d in devices]
250
+
251
+ primary_dev = (uname_list[0] or cls_list[0]) if devices else "device"
252
+ nonzero = _count_nonzero_macros(devices)
253
+
254
+ if device_count == 0:
255
+ steps.append(
256
+ f"Step {step_idx}: '{t_name}' — audio source "
257
+ f"(no devices; clip-based content)"
258
+ )
259
+ elif any(c in _DRUM_CLASSES for c in cls_list):
260
+ steps.append(
261
+ f"Step {step_idx}: '{t_name}' — rhythmic foundation "
262
+ f"via {primary_dev}"
263
+ )
264
+ else:
265
+ commit_note = (
266
+ f" — {nonzero} macro(s) committed from default"
267
+ if nonzero else " — macros at default (exploratory preset)"
268
+ )
269
+ steps.append(
270
+ f"Step {step_idx}: '{t_name}' — {primary_dev}{commit_note}"
271
+ )
272
+ step_idx += 1
273
+
274
+ if returns:
275
+ return_names = ", ".join(t.get("name", "Return") for t in returns)
276
+ steps.append(
277
+ f"Step {step_idx}: Return tracks wired ({return_names}) — "
278
+ "shared spatial processing applied across sends"
279
+ )
280
+
281
+ # Global macro note
282
+ macro_committed = sum(
283
+ _count_nonzero_macros(t.get("devices") or []) for t in regular
284
+ )
285
+ if macro_committed > 0:
286
+ steps.append(
287
+ f"Macro note: {macro_committed} total non-default macro values "
288
+ "across tracks — these encode the producer's committed artistic decisions."
289
+ )
290
+
291
+ return steps
292
+
293
+
294
+ # ─── Learning path ────────────────────────────────────────────────────────────
295
+
296
+ def _suggest_learning_path(track_breakdown: list[dict]) -> list[str]:
297
+ """Build a solo-each-then-add learning sequence.
298
+
299
+ Start with harmonic-foundation, add layers in production order,
300
+ end with spatial-glue / fx-bus returns.
301
+ """
302
+ path: list[str] = []
303
+
304
+ # Separate by role priority
305
+ foundation = [t for t in track_breakdown if t["role"] == "harmonic-foundation"]
306
+ rhythmic = [t for t in track_breakdown if t["role"] == "rhythmic-driver"]
307
+ textures = [t for t in track_breakdown if t["role"] == "texture"]
308
+ decoration = [t for t in track_breakdown if t["role"] == "decoration"]
309
+ spatial = [t for t in track_breakdown
310
+ if t["role"] in ("spatial-glue", "fx-bus")]
311
+
312
+ # Build ordered sequence
313
+ ordered = foundation + rhythmic + textures + decoration
314
+
315
+ if not ordered:
316
+ return ["Open the demo and listen through once before deconstructing."]
317
+
318
+ # Step 1: mute everything, listen to foundation solo
319
+ first = ordered[0]
320
+ path.append(
321
+ f"Open the demo. Mute every track except '{first['name']}'. "
322
+ f"Listen: this is the {first['role']} — {first['device_chain_summary']}."
323
+ )
324
+
325
+ # Add layers one by one
326
+ cumulative = [first["name"]]
327
+ for t in ordered[1:]:
328
+ unmuted = "' + '".join(cumulative)
329
+ path.append(
330
+ f"Unmute '{t['name']}' (keep '{unmuted}' playing). "
331
+ f"Role: {t['role']}. "
332
+ f"Notice: {t['device_chain_summary']}."
333
+ )
334
+ cumulative.append(t["name"])
335
+
336
+ # Final: add returns
337
+ if spatial:
338
+ return_names = "' and '".join(t["name"] for t in spatial)
339
+ path.append(
340
+ f"Enable the return tracks ('{return_names}'). "
341
+ "Hear how shared spatial processing glues the layers together."
342
+ )
343
+
344
+ path.append(
345
+ "Rebuild the sequence from scratch in a new set — this order "
346
+ "is the producer's creation path."
347
+ )
348
+
349
+ return path
350
+
351
+
352
+ # ─── Chain summary builder ────────────────────────────────────────────────────
353
+
354
+ _MAX_CHAIN_DEPTH = 4 # matches transplant and extract_chain caps
355
+
356
+
357
+ def _build_chain_summary(devices: list[dict], _depth: int = 0) -> str:
358
+ """Build a concise device-chain string from a track's device list.
359
+
360
+ BUG-INT#2 fix: recurses into dev.chains (Schema A — nested) so that inner
361
+ rack devices (e.g. InstrumentVector, Pedal, Erosion, Limiter inside an
362
+ InstrumentGroupDevice) appear in the chain summary rather than being hidden
363
+ behind the rack's top-level class name.
364
+
365
+ Returns e.g.:
366
+ "InstrumentGroupDevice (Saturn Ascends) [InstrumentVector → Pedal → Erosion → Limiter] → Delay"
367
+ """
368
+ if not devices:
369
+ return "(no devices)"
370
+
371
+ parts = []
372
+ for dev in devices:
373
+ cls = dev.get("class", "")
374
+ uname = dev.get("user_name") or ""
375
+ if uname and uname != cls:
376
+ label = f"{cls} ({uname})"
377
+ elif uname:
378
+ label = uname
379
+ elif cls:
380
+ label = cls
381
+ else:
382
+ label = ""
383
+
384
+ # Recurse into inner chains if present and within depth cap
385
+ inner_parts: list[str] = []
386
+ if _depth < _MAX_CHAIN_DEPTH:
387
+ for chain in dev.get("chains") or []:
388
+ for inner_dev in chain.get("devices") or []:
389
+ inner_cls = inner_dev.get("class", "")
390
+ inner_uname = inner_dev.get("user_name") or ""
391
+ if inner_uname and inner_uname != inner_cls:
392
+ inner_parts.append(f"{inner_cls} ({inner_uname})")
393
+ elif inner_uname:
394
+ inner_parts.append(inner_uname)
395
+ elif inner_cls:
396
+ inner_parts.append(inner_cls)
397
+ # One more level of nesting (depth+2) for racks inside racks
398
+ if _depth + 1 < _MAX_CHAIN_DEPTH:
399
+ for sub_chain in inner_dev.get("chains") or []:
400
+ for sub_dev in sub_chain.get("devices") or []:
401
+ sub_cls = sub_dev.get("class", "") or ""
402
+ sub_uname = sub_dev.get("user_name") or ""
403
+ inner_parts.append(sub_uname if sub_uname else sub_cls)
404
+
405
+ if inner_parts:
406
+ label = f"{label} [{' → '.join(inner_parts)}]"
407
+
408
+ if label:
409
+ parts.append(label)
410
+
411
+ return " → ".join(parts) if parts else "(empty chain)"
412
+
413
+
414
+ # ─── Narrative synthesis ──────────────────────────────────────────────────────
415
+
416
+ _ROOT_NAMES = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
417
+
418
+
419
+ def _synthesize_narrative(
420
+ demo_meta: dict,
421
+ track_breakdown: list[dict],
422
+ prod_sequence: list[str],
423
+ depth: str,
424
+ ) -> str:
425
+ """Generate prose narrative about the demo.
426
+
427
+ depth: "terse" | "standard" | "verbose"
428
+ """
429
+ name = demo_meta.get("name", "Demo")
430
+ bpm = demo_meta.get("bpm", 0)
431
+ scale = demo_meta.get("scale", "Unknown")
432
+ track_count = demo_meta.get("track_count", 0)
433
+ scene_count = demo_meta.get("scene_count", 0)
434
+
435
+ # Detect producer anchor from entity_id
436
+ entity_id = demo_meta.get("entity_id", "")
437
+ producer_anchor = _detect_producer_anchor(entity_id, "", "packs")
438
+
439
+ # Summarise roles
440
+ role_counts: dict[str, int] = {}
441
+ for t in track_breakdown:
442
+ role_counts[t["role"]] = role_counts.get(t["role"], 0) + 1
443
+
444
+ foundation_tracks = [t for t in track_breakdown if t["role"] == "harmonic-foundation"]
445
+ rhythmic_tracks = [t for t in track_breakdown if t["role"] == "rhythmic-driver"]
446
+ return_tracks = [t for t in track_breakdown if t["role"] in ("spatial-glue", "fx-bus")]
447
+
448
+ if depth == "terse":
449
+ lines = [
450
+ f"{name} — {bpm:.0f} BPM, {scale}, {track_count} tracks.",
451
+ ]
452
+ if producer_anchor:
453
+ lines.append(producer_anchor)
454
+ if foundation_tracks:
455
+ lines.append(
456
+ f"Harmonic spine: {foundation_tracks[0]['name']} "
457
+ f"({foundation_tracks[0]['device_chain_summary']})."
458
+ )
459
+ if rhythmic_tracks:
460
+ lines.append(
461
+ f"Rhythm: {rhythmic_tracks[0]['name']}."
462
+ )
463
+ lines.append(f"{len(prod_sequence)} production steps inferred. [SOURCE: agent-inference]")
464
+ return " ".join(lines)
465
+
466
+ if depth == "standard":
467
+ intro = (
468
+ f"'{name}' is a {bpm:.0f} BPM demo in {scale} with "
469
+ f"{track_count} track{'s' if track_count != 1 else ''} "
470
+ f"and {scene_count} scene{'s' if scene_count != 1 else ''}. "
471
+ )
472
+ if producer_anchor:
473
+ intro += producer_anchor + " "
474
+ if foundation_tracks:
475
+ intro += (
476
+ f"The harmonic spine is '{foundation_tracks[0]['name']}' "
477
+ f"({foundation_tracks[0]['device_chain_summary']}). "
478
+ )
479
+ texture_count = role_counts.get("texture", 0)
480
+ deco_count = role_counts.get("decoration", 0)
481
+ if texture_count:
482
+ intro += f"{texture_count} texture layer(s) build over the foundation. "
483
+ if rhythmic_tracks:
484
+ intro += (
485
+ f"Rhythm comes from '{rhythmic_tracks[0]['name']}' "
486
+ f"({rhythmic_tracks[0]['device_chain_summary']}). "
487
+ )
488
+ if return_tracks:
489
+ return_names = ", ".join(f"'{t['name']}'" for t in return_tracks)
490
+ intro += f"Spatial cohesion via return tracks: {return_names}. "
491
+ intro += "[SOURCE: als-parse] [SOURCE: agent-inference]"
492
+ return intro.strip()
493
+
494
+ # verbose
495
+ lines = [
496
+ f"## Demo Analysis: {name}",
497
+ f"",
498
+ f"**{bpm:.0f} BPM · {scale} · {track_count} tracks · "
499
+ f"{scene_count} scene{'s' if scene_count != 1 else ''}**",
500
+ f"",
501
+ ]
502
+ if producer_anchor:
503
+ lines += [
504
+ "### Producer Vocabulary",
505
+ "",
506
+ producer_anchor,
507
+ "",
508
+ ]
509
+
510
+ lines += ["### Track Architecture", ""]
511
+ for t in track_breakdown:
512
+ lines.append(
513
+ f"- **{t['name']}** — *{t['role']}*: "
514
+ f"{t['device_chain_summary']}. "
515
+ f"{t.get('macro_signature', '')}"
516
+ )
517
+
518
+ lines += ["", "### Inferred Production Sequence", ""]
519
+ for step in prod_sequence:
520
+ lines.append(f"- {step}")
521
+
522
+ lines += ["", f"*[SOURCE: als-parse] [SOURCE: agent-inference]*"]
523
+ return "\n".join(lines)
524
+
525
+
526
+ # ─── Main entry point ─────────────────────────────────────────────────────────
527
+
528
+ def demo_story(
529
+ demo_entity_id: str,
530
+ focus_tracks: list[str] | None = None,
531
+ detail_level: str = "standard",
532
+ ) -> dict:
533
+ """Generate track-by-track narrative + production-sequence for a demo.
534
+
535
+ Called by the MCP tool wrapper in tools.py. Separated for direct unit tests.
536
+
537
+ Parameters
538
+ ----------
539
+ demo_entity_id : str
540
+ Entity ID, e.g. "drone_lab__earth" or "drone-lab__earth".
541
+ focus_tracks : list[str] | None
542
+ Optional list of track names to include in breakdown (others omitted).
543
+ None = include all tracks.
544
+ detail_level : str
545
+ "terse" | "standard" | "verbose" — controls narrative verbosity.
546
+
547
+ Returns
548
+ -------
549
+ dict matching the spec return shape.
550
+ """
551
+ # ── 1. Load sidecar ───────────────────────────────────────────────────────
552
+ sidecar = _load_demo_sidecar(demo_entity_id)
553
+ if sidecar is None:
554
+ # Enumerate real demo IDs from disk for a helpful error message
555
+ _available: list[str] = []
556
+ try:
557
+ import pathlib
558
+ _demo_root = pathlib.Path(DEMO_PARSES_ROOT)
559
+ if _demo_root.exists():
560
+ _jsons = sorted(_demo_root.glob("*.json"))
561
+ _available = [p.stem for p in _jsons[:10]]
562
+ except Exception:
563
+ pass
564
+ return {
565
+ "error": (
566
+ f"Demo sidecar not found for entity_id='{demo_entity_id}'. "
567
+ "Check that the pack + demo slug exist under "
568
+ f"~/.livepilot/atlas-overlays/packs/_demo_parses/."
569
+ ),
570
+ "entity_id": demo_entity_id,
571
+ "available_demos": _available,
572
+ "sources": [],
573
+ }
574
+
575
+ sidecar_path = _resolve_demo_slug(demo_entity_id)
576
+
577
+ # ── 2. Extract metadata ───────────────────────────────────────────────────
578
+ bpm = float(sidecar.get("bpm") or 120.0)
579
+ scale_raw = sidecar.get("scale") or {}
580
+ try:
581
+ root_note_int = int(str(scale_raw.get("root_note", "0")))
582
+ except (ValueError, TypeError):
583
+ root_note_int = 0
584
+ scale_name = scale_raw.get("name", "Major") or "Major"
585
+ scale_str = f"{_ROOT_NAMES[root_note_int % 12]} {scale_name}"
586
+
587
+ all_tracks = sidecar.get("tracks") or []
588
+ scenes = sidecar.get("scenes") or []
589
+
590
+ # ── 3. Build per-track breakdown ─────────────────────────────────────────
591
+ track_breakdown: list[dict] = []
592
+ for t in all_tracks:
593
+ t_name = t.get("name", "Unnamed")
594
+ t_type = t.get("type", "")
595
+
596
+ # Focus filter — substring match on any token
597
+ if focus_tracks and not any(tok.lower() in t_name.lower() for tok in focus_tracks):
598
+ continue
599
+
600
+ devices = t.get("devices") or []
601
+ role = _classify_track_role(t, all_tracks)
602
+ chain_summary = _build_chain_summary(devices)
603
+ macro_sig = _extract_macro_signature(t)
604
+
605
+ # Production decision prose
606
+ primary_device = (devices[0] if devices else {})
607
+ primary_cls = primary_device.get("class", "")
608
+ primary_uname = primary_device.get("user_name") or ""
609
+ nonzero = _count_nonzero_macros(devices)
610
+
611
+ if t_type == "ReturnTrack":
612
+ prod_decision = (
613
+ f"{primary_cls} shared return — applies uniformly across all sends."
614
+ )
615
+ elif role == "harmonic-foundation":
616
+ prod_decision = (
617
+ f"{primary_uname or primary_cls} chosen as harmonic spine; "
618
+ f"{nonzero} macro(s) committed to specific values, "
619
+ "suggesting deliberate timbral targeting."
620
+ )
621
+ elif role == "rhythmic-driver":
622
+ prod_decision = (
623
+ f"Drum rack '{primary_uname or primary_cls}' provides rhythmic drive. "
624
+ f"{nonzero} non-default macro values indicate custom tuning."
625
+ )
626
+ elif role == "texture":
627
+ prod_decision = (
628
+ f"'{primary_uname or t_name}' adds textural density. "
629
+ + (f"{nonzero} macro(s) dialed in." if nonzero else
630
+ "Macros at default — exploratory layer.")
631
+ )
632
+ elif role == "decoration":
633
+ prod_decision = (
634
+ "Audio source or effects-only layer — provides colour rather than melodic content."
635
+ )
636
+ else:
637
+ prod_decision = f"{role.replace('-', ' ').title()} role."
638
+
639
+ # Narrative role (how it fits the overall piece)
640
+ if role == "harmonic-foundation":
641
+ narrative_role = "carries the harmonic spine; all other layers decorate this"
642
+ elif role == "rhythmic-driver":
643
+ narrative_role = "provides rhythmic pulse and groove engine"
644
+ elif role == "texture":
645
+ narrative_role = "adds timbral complexity and spectral density"
646
+ elif role == "spatial-glue":
647
+ narrative_role = "shared reverb/delay — glues layers into a single acoustic space"
648
+ elif role == "fx-bus":
649
+ narrative_role = "bus processing — shapes the collective output of routed tracks"
650
+ elif role == "decoration":
651
+ narrative_role = "decorative layer — accent or atmospherics"
652
+ else:
653
+ narrative_role = role
654
+
655
+ track_breakdown.append({
656
+ "name": t_name,
657
+ "type": t_type,
658
+ "role": role,
659
+ "device_chain_summary": chain_summary,
660
+ "macro_signature": macro_sig,
661
+ "production_decision": prod_decision,
662
+ "narrative_role": narrative_role,
663
+ })
664
+
665
+ # ── 4. Infer production sequence ─────────────────────────────────────────
666
+ # Use all tracks (not just focus filtered) for sequence
667
+ prod_sequence = _infer_production_sequence(all_tracks)
668
+
669
+ # ── 5. Suggest learning path ──────────────────────────────────────────────
670
+ learning_path = _suggest_learning_path(track_breakdown)
671
+
672
+ # ── 6. Synthesize narrative ───────────────────────────────────────────────
673
+ demo_meta = {
674
+ "entity_id": demo_entity_id,
675
+ "name": sidecar.get("name", demo_entity_id),
676
+ "bpm": bpm,
677
+ "scale": scale_str,
678
+ "track_count": len(all_tracks),
679
+ "scene_count": len(scenes),
680
+ }
681
+ narrative = _synthesize_narrative(demo_meta, track_breakdown, prod_sequence, detail_level)
682
+
683
+ return {
684
+ "demo": {
685
+ "entity_id": demo_entity_id,
686
+ "name": sidecar.get("name", demo_entity_id),
687
+ "bpm": bpm,
688
+ "scale": scale_str,
689
+ "track_count": len(all_tracks),
690
+ "scene_count": len(scenes),
691
+ },
692
+ "narrative": narrative,
693
+ "track_breakdown": track_breakdown,
694
+ "production_sequence_inference": prod_sequence,
695
+ "suggested_learning_path": learning_path,
696
+ "sources": [
697
+ f"als-parse: {sidecar_path} [SOURCE: als-parse]",
698
+ "agent-inference: role classification + narrative synthesis [SOURCE: agent-inference]",
699
+ ],
700
+ }