livepilot 1.23.2 → 1.23.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +124 -0
- package/README.md +108 -10
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +39 -1
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/atlas/cross_pack_chain.py +658 -0
- package/mcp_server/atlas/demo_story.py +700 -0
- package/mcp_server/atlas/extract_chain.py +786 -0
- package/mcp_server/atlas/macro_fingerprint.py +554 -0
- package/mcp_server/atlas/overlays.py +95 -3
- package/mcp_server/atlas/pack_aware_compose.py +1255 -0
- package/mcp_server/atlas/preset_resolver.py +238 -0
- package/mcp_server/atlas/tools.py +1001 -31
- package/mcp_server/atlas/transplant.py +1177 -0
- package/mcp_server/mix_engine/state_builder.py +44 -1
- package/mcp_server/runtime/capability_state.py +34 -3
- package/mcp_server/runtime/remote_commands.py +10 -0
- package/mcp_server/server.py +45 -24
- package/mcp_server/tools/agent_os.py +33 -9
- package/mcp_server/tools/analyzer.py +84 -23
- package/mcp_server/tools/browser.py +20 -1
- package/mcp_server/tools/devices.py +78 -11
- package/mcp_server/tools/perception.py +5 -1
- package/mcp_server/tools/tracks.py +39 -2
- package/mcp_server/user_corpus/__init__.py +48 -0
- package/mcp_server/user_corpus/manifest.py +142 -0
- package/mcp_server/user_corpus/plugin_engine/__init__.py +39 -0
- package/mcp_server/user_corpus/plugin_engine/detector.py +579 -0
- package/mcp_server/user_corpus/plugin_engine/manual.py +347 -0
- package/mcp_server/user_corpus/plugin_engine/research.py +247 -0
- package/mcp_server/user_corpus/runner.py +261 -0
- package/mcp_server/user_corpus/scanner.py +115 -0
- package/mcp_server/user_corpus/scanners/__init__.py +18 -0
- package/mcp_server/user_corpus/scanners/adg.py +79 -0
- package/mcp_server/user_corpus/scanners/als.py +144 -0
- package/mcp_server/user_corpus/scanners/amxd.py +374 -0
- package/mcp_server/user_corpus/scanners/plugin_preset.py +202 -0
- package/mcp_server/user_corpus/tools.py +904 -0
- package/mcp_server/user_corpus/wizard.py +224 -0
- package/package.json +2 -2
- package/remote_script/LivePilot/__init__.py +1 -1
- package/remote_script/LivePilot/browser.py +7 -2
- package/remote_script/LivePilot/devices.py +9 -0
- package/remote_script/LivePilot/simpler_sample.py +98 -0
- package/requirements.txt +3 -3
- package/server.json +2 -2
|
@@ -36,34 +36,92 @@ def _get_atlas():
|
|
|
36
36
|
def atlas_search(ctx: Context, query: str, category: str = "all", limit: int = 10) -> dict:
|
|
37
37
|
"""Search the device atlas for instruments, effects, kits, or plugins.
|
|
38
38
|
|
|
39
|
+
Searches BOTH:
|
|
40
|
+
1. The bundled factory atlas (5,264 devices across 33 packs)
|
|
41
|
+
2. The user-local overlay corpus (~/.livepilot/atlas-overlays/) — including
|
|
42
|
+
user-scanned Max devices, racks, plugin presets, and AI-synthesized
|
|
43
|
+
plugin identity yamls. This is the wiring that lets LivePilot reason
|
|
44
|
+
over the user's PERSONAL library, not just Ableton's defaults.
|
|
45
|
+
|
|
39
46
|
query: natural language search — name, sonic character, use case, or genre
|
|
40
|
-
Examples: "warm analog bass", "reverb", "808 kit", "granular"
|
|
47
|
+
Examples: "warm analog bass", "reverb", "808 kit", "granular",
|
|
48
|
+
"my arpeggiator", "the polyrhythmic sequencer in my user library"
|
|
41
49
|
category: filter by category (all, instruments, audio_effects, midi_effects,
|
|
42
|
-
max_for_live, drum_kits, plugins)
|
|
43
|
-
|
|
50
|
+
max_for_live, drum_kits, plugins). For user-corpus content, pass
|
|
51
|
+
"all" — overlay entity_types are surfaced regardless of category.
|
|
52
|
+
limit: max combined results (default 10). Per-source limits are split
|
|
53
|
+
proportionally; factory + user content interleave by score.
|
|
44
54
|
"""
|
|
45
55
|
atlas = _get_atlas()
|
|
46
|
-
|
|
47
|
-
|
|
56
|
+
factory_results = []
|
|
57
|
+
if atlas is not None:
|
|
58
|
+
factory_results = atlas.search(query, category=category, limit=limit)
|
|
59
|
+
|
|
60
|
+
# Also search user-local overlay namespaces (v1.23.6+). All non-bundled
|
|
61
|
+
# namespaces (user, m4l-devices, elektron, etc.) get queried — the overlay
|
|
62
|
+
# system stores everything from corpus_scan + corpus_emit_synthesis_briefs +
|
|
63
|
+
# the v1.23.0 extension overlays here.
|
|
64
|
+
overlay_results = []
|
|
65
|
+
try:
|
|
66
|
+
from .overlays import get_overlay_index
|
|
67
|
+
idx = get_overlay_index()
|
|
68
|
+
# Search all non-`packs` namespaces; `packs` is already in the bundled atlas
|
|
69
|
+
for ns in idx.list_namespaces():
|
|
70
|
+
if ns == "packs":
|
|
71
|
+
continue
|
|
72
|
+
overlay_results.extend(idx.search(query, namespace=ns, limit=limit))
|
|
73
|
+
except Exception: # noqa: BLE001 — never fail atlas_search over an overlay glitch
|
|
74
|
+
pass
|
|
75
|
+
|
|
76
|
+
# Allocate the result budget so the user corpus actually surfaces alongside
|
|
77
|
+
# the factory atlas — split limit roughly 50/50 when both sources have hits.
|
|
78
|
+
# If only one source has results, it gets the full limit.
|
|
79
|
+
has_factory = len(factory_results) > 0
|
|
80
|
+
has_overlay = len(overlay_results) > 0
|
|
81
|
+
if has_factory and has_overlay:
|
|
82
|
+
factory_budget = (limit + 1) // 2 # rounds up — factory gets the extra slot
|
|
83
|
+
overlay_budget = limit // 2
|
|
84
|
+
elif has_factory:
|
|
85
|
+
factory_budget = limit
|
|
86
|
+
overlay_budget = 0
|
|
87
|
+
else:
|
|
88
|
+
factory_budget = 0
|
|
89
|
+
overlay_budget = limit
|
|
90
|
+
|
|
91
|
+
results: list[dict] = [
|
|
92
|
+
{
|
|
93
|
+
"id": r["device"].get("id", ""),
|
|
94
|
+
"name": r["device"].get("name", ""),
|
|
95
|
+
"uri": r["device"].get("uri", ""),
|
|
96
|
+
"category": r["device"].get("category", ""),
|
|
97
|
+
"sonic_description": r["device"].get("sonic_description", "")[:120],
|
|
98
|
+
"character_tags": r["device"].get("character_tags", [])[:5],
|
|
99
|
+
"enriched": r["device"].get("enriched", False),
|
|
100
|
+
"score": r.get("score", 0),
|
|
101
|
+
"source": "factory_atlas",
|
|
102
|
+
}
|
|
103
|
+
for r in factory_results[:factory_budget]
|
|
104
|
+
]
|
|
105
|
+
for entry in overlay_results[:overlay_budget]:
|
|
106
|
+
results.append({
|
|
107
|
+
"id": entry.entity_id,
|
|
108
|
+
"name": entry.name,
|
|
109
|
+
"uri": "", # overlay entries don't have Live browser URIs — caller resolves via search_browser
|
|
110
|
+
"category": entry.entity_type,
|
|
111
|
+
"sonic_description": (entry.description or "")[:120],
|
|
112
|
+
"character_tags": list(entry.tags)[:5],
|
|
113
|
+
"enriched": True,
|
|
114
|
+
"score": 0, # overlay search has its own ranking; surfaced with no factory-comparable score
|
|
115
|
+
"source": f"user_overlay:{entry.namespace}",
|
|
116
|
+
})
|
|
48
117
|
|
|
49
|
-
results = atlas.search(query, category=category, limit=limit)
|
|
50
118
|
return {
|
|
51
119
|
"query": query,
|
|
52
120
|
"category": category,
|
|
53
121
|
"count": len(results),
|
|
54
|
-
"
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
"name": r["device"].get("name", ""),
|
|
58
|
-
"uri": r["device"].get("uri", ""),
|
|
59
|
-
"category": r["device"].get("category", ""),
|
|
60
|
-
"sonic_description": r["device"].get("sonic_description", "")[:120],
|
|
61
|
-
"character_tags": r["device"].get("character_tags", [])[:5],
|
|
62
|
-
"enriched": r["device"].get("enriched", False),
|
|
63
|
-
"score": r.get("score", 0),
|
|
64
|
-
}
|
|
65
|
-
for r in results
|
|
66
|
-
],
|
|
122
|
+
"factory_count": len(factory_results),
|
|
123
|
+
"overlay_count": len(overlay_results),
|
|
124
|
+
"results": results,
|
|
67
125
|
}
|
|
68
126
|
|
|
69
127
|
|
|
@@ -124,6 +182,10 @@ def atlas_suggest(
|
|
|
124
182
|
def atlas_chain_suggest(ctx: Context, role: str, genre: str = "") -> dict:
|
|
125
183
|
"""Suggest a full device chain for a track role.
|
|
126
184
|
|
|
185
|
+
Searches BOTH the bundled factory atlas AND user-local overlay namespaces
|
|
186
|
+
(e.g., m4l-devices, elektron, user). User-corpus devices (PEACH, Particle-Reverb,
|
|
187
|
+
te.drone, etc.) are surfaced when their tags match the role+genre keywords.
|
|
188
|
+
|
|
127
189
|
role: the musical role — "bass", "lead", "pad", "drums", "percussion", "texture"
|
|
128
190
|
genre: target genre for style-appropriate choices
|
|
129
191
|
"""
|
|
@@ -131,7 +193,42 @@ def atlas_chain_suggest(ctx: Context, role: str, genre: str = "") -> dict:
|
|
|
131
193
|
if atlas is None:
|
|
132
194
|
return {"error": "Atlas not loaded. Run scan_full_library first."}
|
|
133
195
|
|
|
134
|
-
|
|
196
|
+
factory_result = atlas.chain_suggest(role, genre=genre)
|
|
197
|
+
|
|
198
|
+
# Also search user-local overlay namespaces for devices that match this role+genre.
|
|
199
|
+
# Merge any hits as additional overlay_suggestions on top of the factory chain.
|
|
200
|
+
overlay_suggestions = []
|
|
201
|
+
try:
|
|
202
|
+
from .overlays import get_overlay_index
|
|
203
|
+
idx = get_overlay_index()
|
|
204
|
+
# Build a query from role + genre keywords
|
|
205
|
+
query_parts = [role]
|
|
206
|
+
if genre:
|
|
207
|
+
query_parts.append(genre)
|
|
208
|
+
query = " ".join(query_parts)
|
|
209
|
+
|
|
210
|
+
for ns in idx.list_namespaces():
|
|
211
|
+
if ns == "packs":
|
|
212
|
+
continue
|
|
213
|
+
hits = idx.search(query, namespace=ns, limit=5)
|
|
214
|
+
for entry in hits:
|
|
215
|
+
overlay_suggestions.append({
|
|
216
|
+
"namespace": entry.namespace,
|
|
217
|
+
"entity_id": entry.entity_id,
|
|
218
|
+
"name": entry.name,
|
|
219
|
+
"description": (entry.description or "")[:120],
|
|
220
|
+
"tags": list(entry.tags)[:5],
|
|
221
|
+
"source": f"user_overlay:{entry.namespace}",
|
|
222
|
+
"note": "Load via search_browser or extension_atlas_get; no Live URI.",
|
|
223
|
+
})
|
|
224
|
+
except Exception: # noqa: BLE001 — never fail chain_suggest over an overlay glitch
|
|
225
|
+
pass
|
|
226
|
+
|
|
227
|
+
result = dict(factory_result)
|
|
228
|
+
result["overlay_suggestions"] = overlay_suggestions
|
|
229
|
+
result["factory_count"] = len(factory_result.get("chain", []))
|
|
230
|
+
result["overlay_count"] = len(overlay_suggestions)
|
|
231
|
+
return result
|
|
135
232
|
|
|
136
233
|
|
|
137
234
|
@mcp.tool()
|
|
@@ -264,7 +361,15 @@ def atlas_describe_chain(
|
|
|
264
361
|
if not (t in seen or seen.add(t))
|
|
265
362
|
]
|
|
266
363
|
|
|
267
|
-
# ── Build per-role suggestions via atlas.suggest
|
|
364
|
+
# ── Build per-role suggestions via atlas.suggest + overlay search ─
|
|
365
|
+
# Load overlay index once for all role iterations (graceful no-op on failure)
|
|
366
|
+
_overlay_idx = None
|
|
367
|
+
try:
|
|
368
|
+
from .overlays import get_overlay_index
|
|
369
|
+
_overlay_idx = get_overlay_index()
|
|
370
|
+
except Exception: # noqa: BLE001
|
|
371
|
+
pass
|
|
372
|
+
|
|
268
373
|
per_role_suggestions = []
|
|
269
374
|
for role in detected_roles:
|
|
270
375
|
# Build an intent string that combines role + aesthetic cues
|
|
@@ -277,19 +382,44 @@ def atlas_describe_chain(
|
|
|
277
382
|
energy="medium",
|
|
278
383
|
limit=int(limit_per_role),
|
|
279
384
|
)
|
|
385
|
+
factory_suggestions = [
|
|
386
|
+
{
|
|
387
|
+
"device_id": r["device"].get("id", ""),
|
|
388
|
+
"device_name": r["device"].get("name", ""),
|
|
389
|
+
"uri": r["device"].get("uri", ""),
|
|
390
|
+
"rationale": r.get("rationale", ""),
|
|
391
|
+
"recipe": r.get("recipe"),
|
|
392
|
+
"source": "factory_atlas",
|
|
393
|
+
}
|
|
394
|
+
for r in results
|
|
395
|
+
]
|
|
396
|
+
|
|
397
|
+
# Query overlay namespaces for matching user-corpus devices
|
|
398
|
+
overlay_hits = []
|
|
399
|
+
if _overlay_idx is not None:
|
|
400
|
+
try:
|
|
401
|
+
for ns in _overlay_idx.list_namespaces():
|
|
402
|
+
if ns == "packs":
|
|
403
|
+
continue
|
|
404
|
+
hits = _overlay_idx.search(intent, namespace=ns, limit=limit_per_role)
|
|
405
|
+
for entry in hits:
|
|
406
|
+
overlay_hits.append({
|
|
407
|
+
"device_id": entry.entity_id,
|
|
408
|
+
"device_name": entry.name,
|
|
409
|
+
"uri": "",
|
|
410
|
+
"rationale": (entry.description or "")[:120],
|
|
411
|
+
"recipe": None,
|
|
412
|
+
"source": f"user_overlay:{entry.namespace}",
|
|
413
|
+
})
|
|
414
|
+
except Exception: # noqa: BLE001
|
|
415
|
+
pass
|
|
416
|
+
|
|
280
417
|
per_role_suggestions.append({
|
|
281
418
|
"role": role,
|
|
282
419
|
"intent_used": intent,
|
|
283
|
-
"suggestions":
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
"device_name": r["device"].get("name", ""),
|
|
287
|
-
"uri": r["device"].get("uri", ""),
|
|
288
|
-
"rationale": r.get("rationale", ""),
|
|
289
|
-
"recipe": r.get("recipe"),
|
|
290
|
-
}
|
|
291
|
-
for r in results
|
|
292
|
-
],
|
|
420
|
+
"suggestions": factory_suggestions + overlay_hits,
|
|
421
|
+
"factory_count": len(factory_suggestions),
|
|
422
|
+
"overlay_count": len(overlay_hits),
|
|
293
423
|
})
|
|
294
424
|
|
|
295
425
|
# ── Propose a simple chain from the highest-ranked suggestions ─
|
|
@@ -686,3 +816,843 @@ def extension_atlas_list(ctx: Context, namespace: str = "") -> dict:
|
|
|
686
816
|
"namespaces": idx.list_namespaces(),
|
|
687
817
|
"counts": idx.stats(),
|
|
688
818
|
}
|
|
819
|
+
|
|
820
|
+
|
|
821
|
+
@mcp.tool()
|
|
822
|
+
def atlas_macro_fingerprint(
|
|
823
|
+
ctx: Context,
|
|
824
|
+
source_pack_slug: str = "",
|
|
825
|
+
source_preset_path: str = "",
|
|
826
|
+
source_live_track: int = -1,
|
|
827
|
+
source_live_device: int = -1,
|
|
828
|
+
rack_class_filter: str = "",
|
|
829
|
+
pack_filter: list = None,
|
|
830
|
+
top_k: int = 8,
|
|
831
|
+
min_named_macros: int = 3,
|
|
832
|
+
similarity_threshold: float = 0.4,
|
|
833
|
+
) -> dict:
|
|
834
|
+
"""Find presets with similar macro state to the source — 'more like this' search.
|
|
835
|
+
|
|
836
|
+
Source must be a known corpus preset (via source_pack_slug + source_preset_path).
|
|
837
|
+
Live-device source via source_live_track/source_live_device is stubbed and returns
|
|
838
|
+
an error; only the corpus path works currently (as of v1.23.4).
|
|
839
|
+
|
|
840
|
+
Similarity is computed as:
|
|
841
|
+
0.6 × macro-name-overlap-ratio (synonym-aware: 'Filter Control' ≈ 'Filter Cutoff')
|
|
842
|
+
+ 0.4 × (1 − mean value distance)
|
|
843
|
+
|
|
844
|
+
Parameters
|
|
845
|
+
----------
|
|
846
|
+
source_pack_slug : Pack directory name, e.g. "drone-lab".
|
|
847
|
+
source_preset_path : Sidecar filename stem, e.g.
|
|
848
|
+
"instruments_laboratory_razor-wire-drone".
|
|
849
|
+
Use underscores for directory separators (matches
|
|
850
|
+
the sidecar naming convention from als_deep_parse.py).
|
|
851
|
+
source_live_track : Track index in the live session (0-based). Used only
|
|
852
|
+
when source_pack_slug is empty.
|
|
853
|
+
source_live_device : Device index on that track. Used only when
|
|
854
|
+
source_pack_slug is empty.
|
|
855
|
+
rack_class_filter : Filter candidates by rack class. One of:
|
|
856
|
+
"InstrumentGroupDevice", "AudioEffectGroupDevice",
|
|
857
|
+
"DrumGroupDevice", "MidiEffectGroupDevice".
|
|
858
|
+
Empty string = all classes.
|
|
859
|
+
pack_filter : Optional list of pack slugs to restrict the candidate
|
|
860
|
+
scan (e.g. ["drone-lab", "mood-reel"]).
|
|
861
|
+
top_k : Maximum number of matches to return (default 8).
|
|
862
|
+
min_named_macros : Require source to have at least this many
|
|
863
|
+
producer-named macros; also applied to candidates.
|
|
864
|
+
Below this floor the fingerprint is too weak to be
|
|
865
|
+
useful (default 3).
|
|
866
|
+
similarity_threshold : Drop matches below this score (default 0.4).
|
|
867
|
+
|
|
868
|
+
Returns
|
|
869
|
+
-------
|
|
870
|
+
{
|
|
871
|
+
"source": {
|
|
872
|
+
"pack_slug": str,
|
|
873
|
+
"preset_path": str,
|
|
874
|
+
"rack_class": str,
|
|
875
|
+
"macros_named": [{"index", "name", "value"}, ...],
|
|
876
|
+
"fingerprint_strength": "strong" | "moderate" | "weak"
|
|
877
|
+
},
|
|
878
|
+
"matches": [
|
|
879
|
+
{
|
|
880
|
+
"pack_slug": str,
|
|
881
|
+
"preset_path": str,
|
|
882
|
+
"preset_name": str,
|
|
883
|
+
"rack_class": str,
|
|
884
|
+
"similarity_score": float,
|
|
885
|
+
"matching_macros": [{"name_overlap", "value_distance", ...}, ...],
|
|
886
|
+
"rationale": str
|
|
887
|
+
},
|
|
888
|
+
...
|
|
889
|
+
],
|
|
890
|
+
"sources": ["adg-parse: N sidecars across M packs"]
|
|
891
|
+
}
|
|
892
|
+
|
|
893
|
+
Citation tags: [SOURCE: adg-parse] for all preset data, [SOURCE: agent-inference]
|
|
894
|
+
for rationale prose.
|
|
895
|
+
"""
|
|
896
|
+
# BUG-EDGE#1: coerce string args that MCP may pass as strings
|
|
897
|
+
try:
|
|
898
|
+
top_k = int(top_k) if top_k is not None else 8
|
|
899
|
+
except (ValueError, TypeError):
|
|
900
|
+
top_k = 8
|
|
901
|
+
try:
|
|
902
|
+
min_named_macros = int(min_named_macros) if min_named_macros is not None else 3
|
|
903
|
+
except (ValueError, TypeError):
|
|
904
|
+
min_named_macros = 3
|
|
905
|
+
try:
|
|
906
|
+
similarity_threshold = float(similarity_threshold) if similarity_threshold is not None else 0.4
|
|
907
|
+
except (ValueError, TypeError):
|
|
908
|
+
similarity_threshold = 0.4
|
|
909
|
+
|
|
910
|
+
from .macro_fingerprint import (
|
|
911
|
+
_extract_fingerprint,
|
|
912
|
+
_compute_similarity,
|
|
913
|
+
_generate_rationale,
|
|
914
|
+
_fingerprint_strength,
|
|
915
|
+
_load_preset_sidecar,
|
|
916
|
+
_iter_all_preset_sidecars,
|
|
917
|
+
PRESET_PARSES_ROOT,
|
|
918
|
+
)
|
|
919
|
+
import json as _json
|
|
920
|
+
from pathlib import Path as _Path
|
|
921
|
+
|
|
922
|
+
# User-corpus rack sidecar root: ~/.livepilot/atlas-overlays/user/racks/_parses/<id>.json
|
|
923
|
+
USER_RACK_PARSES_ROOT = _Path.home() / ".livepilot" / "atlas-overlays" / "user" / "racks" / "_parses"
|
|
924
|
+
|
|
925
|
+
def _load_user_rack_sidecar(entity_id: str) -> dict | None:
|
|
926
|
+
"""Load a user-corpus rack sidecar by entity_id. Returns None if absent."""
|
|
927
|
+
p = USER_RACK_PARSES_ROOT / f"{entity_id}.json"
|
|
928
|
+
if not p.exists():
|
|
929
|
+
return None
|
|
930
|
+
try:
|
|
931
|
+
return _json.loads(p.read_text())
|
|
932
|
+
except (OSError, _json.JSONDecodeError):
|
|
933
|
+
return None
|
|
934
|
+
|
|
935
|
+
def _iter_user_rack_sidecars():
|
|
936
|
+
"""Yield (namespace_slug, preset_path_slug, sidecar_dict) for user rack parses."""
|
|
937
|
+
if not USER_RACK_PARSES_ROOT.exists():
|
|
938
|
+
return
|
|
939
|
+
for sidecar_path in sorted(USER_RACK_PARSES_ROOT.glob("*.json")):
|
|
940
|
+
try:
|
|
941
|
+
sidecar = _json.loads(sidecar_path.read_text())
|
|
942
|
+
except (OSError, _json.JSONDecodeError):
|
|
943
|
+
continue
|
|
944
|
+
yield "user", sidecar_path.stem, sidecar
|
|
945
|
+
|
|
946
|
+
# Overlay namespace IDs that pack_filter may contain — separate from pack slugs.
|
|
947
|
+
# We'll resolve these to the user-rack sidecar iterator instead of the factory iterator.
|
|
948
|
+
_OVERLAY_NAMESPACE_IDS = {"user", "m4l-devices", "elektron"}
|
|
949
|
+
|
|
950
|
+
# ── 1. Resolve source fingerprint ─────────────────────────────────────────
|
|
951
|
+
|
|
952
|
+
source_sidecar: dict | None = None
|
|
953
|
+
source_pack_resolved = source_pack_slug
|
|
954
|
+
source_path_resolved = source_preset_path
|
|
955
|
+
|
|
956
|
+
if source_pack_slug and source_preset_path:
|
|
957
|
+
# Check if source_pack_slug is an overlay namespace (user rack corpus)
|
|
958
|
+
if source_pack_slug in _OVERLAY_NAMESPACE_IDS:
|
|
959
|
+
source_sidecar = _load_user_rack_sidecar(source_preset_path)
|
|
960
|
+
if source_sidecar is None:
|
|
961
|
+
slug_attempt = source_preset_path.replace("/", "_")
|
|
962
|
+
source_sidecar = _load_user_rack_sidecar(slug_attempt)
|
|
963
|
+
if source_sidecar is not None:
|
|
964
|
+
source_path_resolved = slug_attempt
|
|
965
|
+
if source_sidecar is None:
|
|
966
|
+
available = (
|
|
967
|
+
[p.stem for p in sorted(USER_RACK_PARSES_ROOT.glob("*.json"))[:10]]
|
|
968
|
+
if USER_RACK_PARSES_ROOT.exists()
|
|
969
|
+
else []
|
|
970
|
+
)
|
|
971
|
+
return {
|
|
972
|
+
"error": (
|
|
973
|
+
f"User-corpus rack sidecar not found: {source_pack_slug}/{source_preset_path}.json. "
|
|
974
|
+
f"Expected under {USER_RACK_PARSES_ROOT}."
|
|
975
|
+
),
|
|
976
|
+
"available_user_rack_sidecars": available,
|
|
977
|
+
"hint": "Run corpus_scan to generate user-corpus rack sidecars.",
|
|
978
|
+
}
|
|
979
|
+
else:
|
|
980
|
+
# Corpus path: load from bundled _preset_parses
|
|
981
|
+
source_sidecar = _load_preset_sidecar(source_pack_slug, source_preset_path)
|
|
982
|
+
if source_sidecar is None:
|
|
983
|
+
# Try converting "/" separators to "_"
|
|
984
|
+
slug_attempt = source_preset_path.replace("/", "_")
|
|
985
|
+
source_sidecar = _load_preset_sidecar(source_pack_slug, slug_attempt)
|
|
986
|
+
if source_sidecar is not None:
|
|
987
|
+
source_path_resolved = slug_attempt
|
|
988
|
+
if source_sidecar is None:
|
|
989
|
+
return {
|
|
990
|
+
"error": (
|
|
991
|
+
f"Sidecar not found: {source_pack_slug}/{source_preset_path}.json. "
|
|
992
|
+
"Check that the pack slug and preset path match the _preset_parses "
|
|
993
|
+
"directory layout. Use underscores as separators, e.g. "
|
|
994
|
+
"'instruments_laboratory_razor-wire-drone'."
|
|
995
|
+
),
|
|
996
|
+
"hint": (
|
|
997
|
+
f"Available files in {source_pack_slug}: "
|
|
998
|
+
+ ", ".join(
|
|
999
|
+
p.stem
|
|
1000
|
+
for p in sorted(
|
|
1001
|
+
(PRESET_PARSES_ROOT / source_pack_slug).glob("*.json")
|
|
1002
|
+
)[:10]
|
|
1003
|
+
)
|
|
1004
|
+
if (PRESET_PARSES_ROOT / source_pack_slug).is_dir()
|
|
1005
|
+
else "pack directory not found"
|
|
1006
|
+
),
|
|
1007
|
+
}
|
|
1008
|
+
|
|
1009
|
+
elif source_live_track >= 0 and source_live_device >= 0:
|
|
1010
|
+
# TODO(Phase D follow-up): live-Live path — reads macro names/values from
|
|
1011
|
+
# a running Ableton session via the get_device_parameters MCP tool.
|
|
1012
|
+
# Not implemented in this release; corpus path is fully operational.
|
|
1013
|
+
return {
|
|
1014
|
+
"error": (
|
|
1015
|
+
"Live-device source path is not yet implemented (Phase D follow-up). "
|
|
1016
|
+
"Please use source_pack_slug + source_preset_path to query from the "
|
|
1017
|
+
"corpus instead."
|
|
1018
|
+
),
|
|
1019
|
+
"hint": (
|
|
1020
|
+
"To use a live device as the source, save the rack as a .adg preset "
|
|
1021
|
+
"via Ableton's browser and re-run als_deep_parse.py to generate a "
|
|
1022
|
+
"sidecar, then reference it by pack_slug + preset_path."
|
|
1023
|
+
),
|
|
1024
|
+
}
|
|
1025
|
+
else:
|
|
1026
|
+
return {
|
|
1027
|
+
"error": (
|
|
1028
|
+
"Provide either (source_pack_slug + source_preset_path) for corpus "
|
|
1029
|
+
"lookup, or (source_live_track + source_live_device) for a live "
|
|
1030
|
+
"device source."
|
|
1031
|
+
)
|
|
1032
|
+
}
|
|
1033
|
+
|
|
1034
|
+
# ── 2. Build source fingerprint ───────────────────────────────────────────
|
|
1035
|
+
|
|
1036
|
+
source_fp = _extract_fingerprint(source_sidecar)
|
|
1037
|
+
n_named_source = len(source_fp)
|
|
1038
|
+
|
|
1039
|
+
if n_named_source < min_named_macros:
|
|
1040
|
+
named_display = [
|
|
1041
|
+
m for m in source_sidecar.get("macros", [])
|
|
1042
|
+
if m.get("name", "") and not m["name"].startswith("Macro ")
|
|
1043
|
+
]
|
|
1044
|
+
return {
|
|
1045
|
+
"error": (
|
|
1046
|
+
f"Source preset has only {n_named_source} producer-named macro(s) "
|
|
1047
|
+
f"(min_named_macros={min_named_macros}). "
|
|
1048
|
+
"Fingerprint is too weak for reliable matching."
|
|
1049
|
+
),
|
|
1050
|
+
"source_named_macros": [m["name"] for m in named_display],
|
|
1051
|
+
"suggestion": (
|
|
1052
|
+
"Lower min_named_macros, or choose a source preset with more "
|
|
1053
|
+
"producer-named macros."
|
|
1054
|
+
),
|
|
1055
|
+
}
|
|
1056
|
+
|
|
1057
|
+
# ── 3. Scan candidates ────────────────────────────────────────────────────
|
|
1058
|
+
|
|
1059
|
+
pack_whitelist = set(pack_filter) if pack_filter else None
|
|
1060
|
+
|
|
1061
|
+
# Split pack_whitelist into overlay namespaces vs. bundled pack slugs so
|
|
1062
|
+
# pack_filter=["user"] scans user rack sidecars, not bundled _preset_parses.
|
|
1063
|
+
overlay_whitelist = (
|
|
1064
|
+
{ns for ns in pack_whitelist if ns in _OVERLAY_NAMESPACE_IDS}
|
|
1065
|
+
if pack_whitelist else None
|
|
1066
|
+
)
|
|
1067
|
+
bundled_whitelist = (
|
|
1068
|
+
{slug for slug in pack_whitelist if slug not in _OVERLAY_NAMESPACE_IDS}
|
|
1069
|
+
if pack_whitelist else None
|
|
1070
|
+
)
|
|
1071
|
+
|
|
1072
|
+
candidates_scanned = 0
|
|
1073
|
+
packs_seen: set[str] = set()
|
|
1074
|
+
scored: list[tuple[float, str, str, dict, list[dict]]] = []
|
|
1075
|
+
|
|
1076
|
+
# Decide which iterators to run based on pack_filter contents:
|
|
1077
|
+
# - No filter → run both bundled + user-rack
|
|
1078
|
+
# - Filter with only overlay namespaces → run only user-rack
|
|
1079
|
+
# - Filter with only bundled slugs → run only bundled
|
|
1080
|
+
# - Mixed → run both, each filtered to its respective whitelist
|
|
1081
|
+
run_bundled = (pack_whitelist is None) or bool(bundled_whitelist)
|
|
1082
|
+
run_user_racks = (pack_whitelist is None) or bool(overlay_whitelist)
|
|
1083
|
+
|
|
1084
|
+
def _scan_candidates(iterator):
|
|
1085
|
+
nonlocal candidates_scanned
|
|
1086
|
+
for cand_pack, cand_slug, cand_sidecar in iterator:
|
|
1087
|
+
# Skip the source itself
|
|
1088
|
+
if (cand_pack == source_pack_resolved
|
|
1089
|
+
and cand_slug == source_path_resolved):
|
|
1090
|
+
continue
|
|
1091
|
+
|
|
1092
|
+
# Rack class filter
|
|
1093
|
+
if rack_class_filter:
|
|
1094
|
+
if cand_sidecar.get("rack_class", "") != rack_class_filter:
|
|
1095
|
+
continue
|
|
1096
|
+
|
|
1097
|
+
# Candidate must also have enough named macros
|
|
1098
|
+
cand_fp = _extract_fingerprint(cand_sidecar)
|
|
1099
|
+
if len(cand_fp) < min_named_macros:
|
|
1100
|
+
continue
|
|
1101
|
+
|
|
1102
|
+
candidates_scanned += 1
|
|
1103
|
+
packs_seen.add(cand_pack)
|
|
1104
|
+
|
|
1105
|
+
score, matched = _compute_similarity(source_fp, cand_fp)
|
|
1106
|
+
if score >= similarity_threshold:
|
|
1107
|
+
scored.append((score, cand_pack, cand_slug, cand_sidecar, matched))
|
|
1108
|
+
|
|
1109
|
+
if run_bundled:
|
|
1110
|
+
def _bundled_iter():
|
|
1111
|
+
for cand_pack, cand_slug, cand_sidecar in _iter_all_preset_sidecars():
|
|
1112
|
+
if bundled_whitelist and cand_pack not in bundled_whitelist:
|
|
1113
|
+
continue
|
|
1114
|
+
yield cand_pack, cand_slug, cand_sidecar
|
|
1115
|
+
_scan_candidates(_bundled_iter())
|
|
1116
|
+
|
|
1117
|
+
if run_user_racks:
|
|
1118
|
+
try:
|
|
1119
|
+
_scan_candidates(_iter_user_rack_sidecars())
|
|
1120
|
+
except Exception: # noqa: BLE001 — never fail over a missing user-rack corpus
|
|
1121
|
+
pass
|
|
1122
|
+
|
|
1123
|
+
# Sort descending by score
|
|
1124
|
+
scored.sort(key=lambda x: x[0], reverse=True)
|
|
1125
|
+
top = scored[:top_k]
|
|
1126
|
+
|
|
1127
|
+
# ── 4. Format matches ─────────────────────────────────────────────────────
|
|
1128
|
+
|
|
1129
|
+
matches_out = []
|
|
1130
|
+
for score, cand_pack, cand_slug, cand_sidecar, matched in top:
|
|
1131
|
+
rationale = _generate_rationale(
|
|
1132
|
+
source_pack=source_pack_resolved,
|
|
1133
|
+
source_name=source_sidecar.get("name", ""),
|
|
1134
|
+
cand_pack=cand_pack,
|
|
1135
|
+
cand_name=cand_sidecar.get("name", ""),
|
|
1136
|
+
matching_macros=matched,
|
|
1137
|
+
)
|
|
1138
|
+
matches_out.append({
|
|
1139
|
+
"pack_slug": cand_pack,
|
|
1140
|
+
"preset_path": cand_slug,
|
|
1141
|
+
"preset_name": cand_sidecar.get("name", ""),
|
|
1142
|
+
"rack_class": cand_sidecar.get("rack_class", ""),
|
|
1143
|
+
"similarity_score": score,
|
|
1144
|
+
"matching_macros": matched[:5], # show up to 5; see total_matching_macros for full count
|
|
1145
|
+
"total_matching_macros": len(matched),
|
|
1146
|
+
"rationale": rationale, # [SOURCE: agent-inference]
|
|
1147
|
+
})
|
|
1148
|
+
|
|
1149
|
+
# ── 5. Format source block ────────────────────────────────────────────────
|
|
1150
|
+
|
|
1151
|
+
source_macros_named = [
|
|
1152
|
+
{
|
|
1153
|
+
"index": m.get("index"),
|
|
1154
|
+
"name": m.get("name"),
|
|
1155
|
+
"value": m.get("value"),
|
|
1156
|
+
}
|
|
1157
|
+
for m in source_sidecar.get("macros", [])
|
|
1158
|
+
if m.get("name", "") and not m["name"].startswith("Macro ")
|
|
1159
|
+
]
|
|
1160
|
+
|
|
1161
|
+
overlay_count = sum(
|
|
1162
|
+
1 for _, cand_pack, _slug, _sd, _m in top
|
|
1163
|
+
if cand_pack in _OVERLAY_NAMESPACE_IDS
|
|
1164
|
+
)
|
|
1165
|
+
factory_match_count = len(matches_out) - overlay_count
|
|
1166
|
+
|
|
1167
|
+
return {
|
|
1168
|
+
"source": {
|
|
1169
|
+
"pack_slug": source_pack_resolved,
|
|
1170
|
+
"preset_path": source_path_resolved,
|
|
1171
|
+
"rack_class": source_sidecar.get("rack_class", ""),
|
|
1172
|
+
"macros_named": source_macros_named,
|
|
1173
|
+
"fingerprint_strength": _fingerprint_strength(n_named_source),
|
|
1174
|
+
},
|
|
1175
|
+
"matches": matches_out,
|
|
1176
|
+
"candidates_scanned": candidates_scanned,
|
|
1177
|
+
"factory_count": factory_match_count,
|
|
1178
|
+
"overlay_count": overlay_count,
|
|
1179
|
+
"sources": [
|
|
1180
|
+
f"adg-parse: {candidates_scanned} candidate sidecars across "
|
|
1181
|
+
f"{len(packs_seen)} packs/namespaces [SOURCE: adg-parse]",
|
|
1182
|
+
f"user-corpus: {'checked' if run_user_racks else 'skipped'} "
|
|
1183
|
+
f"(~/.livepilot/atlas-overlays/user/racks/_parses/)",
|
|
1184
|
+
],
|
|
1185
|
+
}
|
|
1186
|
+
|
|
1187
|
+
|
|
1188
|
+
@mcp.tool()
|
|
1189
|
+
def atlas_transplant(
|
|
1190
|
+
ctx: Context,
|
|
1191
|
+
source_namespace: str,
|
|
1192
|
+
source_entity_id: str,
|
|
1193
|
+
source_track_or_preset: str = "",
|
|
1194
|
+
target_bpm: float = 0.0,
|
|
1195
|
+
target_scale_root: int = -1,
|
|
1196
|
+
target_scale_name: str = "",
|
|
1197
|
+
target_aesthetic: str = "",
|
|
1198
|
+
preserve_macro_ratios: bool = True,
|
|
1199
|
+
preserve_pitch_intervals: bool = True,
|
|
1200
|
+
explanation_depth: str = "standard",
|
|
1201
|
+
) -> dict:
|
|
1202
|
+
"""Adapt a structure from one musical context to another (Pack-Atlas Phase C).
|
|
1203
|
+
|
|
1204
|
+
Takes a demo project, preset chain, or workflow recipe from the Pack-Atlas
|
|
1205
|
+
corpus and translates it to a new musical context (different BPM, scale,
|
|
1206
|
+
aesthetic register). Returns a structured plan with executable tool calls
|
|
1207
|
+
— agent applies the plan via load_browser_item, set_device_parameter,
|
|
1208
|
+
set_clip_pitch, etc. No Live connection required; all data from local
|
|
1209
|
+
JSON sidecars.
|
|
1210
|
+
|
|
1211
|
+
Parameters
|
|
1212
|
+
----------
|
|
1213
|
+
source_namespace : str
|
|
1214
|
+
Namespace to look up the source entity. Use "packs" for demo projects
|
|
1215
|
+
and Factory Pack presets; "m4l-devices" for M4L vendor devices;
|
|
1216
|
+
"elektron" for Elektron signature chains.
|
|
1217
|
+
|
|
1218
|
+
source_entity_id : str
|
|
1219
|
+
Entity identifier. For demo projects use the form "pack-slug__demo-slug"
|
|
1220
|
+
or "pack_slug__demo_slug" (hyphens and underscores are normalised).
|
|
1221
|
+
Examples: "drone_lab__earth", "drone-lab__emergent-planes",
|
|
1222
|
+
"mood-reel__mood-reel-demo".
|
|
1223
|
+
For pack presets (with source_track_or_preset): use the pack slug,
|
|
1224
|
+
e.g. "drone_lab".
|
|
1225
|
+
|
|
1226
|
+
source_track_or_preset : str, optional
|
|
1227
|
+
Sub-selector within a demo or pack. For pack presets: the preset
|
|
1228
|
+
file path slug such as "instruments_laboratory_razor-wire-drone"
|
|
1229
|
+
(underscores or hyphens both accepted). Omit when targeting the
|
|
1230
|
+
whole demo project.
|
|
1231
|
+
|
|
1232
|
+
target_bpm : float, optional
|
|
1233
|
+
Target BPM. Pass 0.0 to keep source BPM.
|
|
1234
|
+
|
|
1235
|
+
target_scale_root : int, optional
|
|
1236
|
+
Target scale root note as MIDI pitch-class (0=C, 1=C#, … 11=B).
|
|
1237
|
+
Pass -1 to keep source root.
|
|
1238
|
+
|
|
1239
|
+
target_scale_name : str, optional
|
|
1240
|
+
Target scale mode name. Supported: "Major", "Minor", "Dorian",
|
|
1241
|
+
"Phrygian", "Mixolydian", "Lydian", "Locrian". Empty string = keep
|
|
1242
|
+
source mode.
|
|
1243
|
+
|
|
1244
|
+
target_aesthetic : str, optional
|
|
1245
|
+
Free-text aesthetic descriptor. Used to detect aesthetic-incompatible
|
|
1246
|
+
devices and drive REPLACE decisions. Examples: "mood-reel cinematic",
|
|
1247
|
+
"inspired_by_nature tree_tone", "lo-fi dusty tape", "clean orchestral".
|
|
1248
|
+
|
|
1249
|
+
preserve_macro_ratios : bool, default True
|
|
1250
|
+
When True, non-default macro values from the source are carried forward
|
|
1251
|
+
as normalised ratios [0-1] even when the target has different raw ranges.
|
|
1252
|
+
|
|
1253
|
+
preserve_pitch_intervals : bool, default True
|
|
1254
|
+
When True, pitch interval relationships within each voice are preserved
|
|
1255
|
+
and only a global transposition is applied (scale shift stays parallel).
|
|
1256
|
+
|
|
1257
|
+
explanation_depth : str, default "standard"
|
|
1258
|
+
Controls verbosity of the reasoning_artifact field.
|
|
1259
|
+
"terse" — 1-2 sentence summary.
|
|
1260
|
+
"standard" — 1 paragraph with key decisions enumerated.
|
|
1261
|
+
"verbose" — full per-decision narrative with producer-vocabulary
|
|
1262
|
+
anchors where applicable.
|
|
1263
|
+
|
|
1264
|
+
Returns
|
|
1265
|
+
-------
|
|
1266
|
+
dict with keys:
|
|
1267
|
+
source — source musical context (bpm, scale, tracks_summary)
|
|
1268
|
+
target — target context (bpm, scale, aesthetic)
|
|
1269
|
+
translation_plan — list of per-element decisions with executable_steps
|
|
1270
|
+
reasoning_artifact — prose explanation of the plan
|
|
1271
|
+
warnings — list of caution strings (BPM ratio, missing sidecars)
|
|
1272
|
+
sources — citation list with [SOURCE: als-parse | adg-parse |
|
|
1273
|
+
agent-inference] tags
|
|
1274
|
+
|
|
1275
|
+
Example
|
|
1276
|
+
-------
|
|
1277
|
+
atlas_transplant(
|
|
1278
|
+
source_namespace="packs",
|
|
1279
|
+
source_entity_id="drone_lab__earth",
|
|
1280
|
+
target_bpm=130,
|
|
1281
|
+
target_scale_root=5, # F
|
|
1282
|
+
target_scale_name="Minor",
|
|
1283
|
+
target_aesthetic="mood-reel cinematic",
|
|
1284
|
+
explanation_depth="standard"
|
|
1285
|
+
)
|
|
1286
|
+
"""
|
|
1287
|
+
from .transplant import transplant as _transplant
|
|
1288
|
+
|
|
1289
|
+
# Normalise optional params — FastMCP passes typed defaults
|
|
1290
|
+
|
|
1291
|
+
# BUG-EDGE#4: target_bpm may arrive as a string (e.g. "130.0") when the MCP
|
|
1292
|
+
# client serialises the value. Cast to float BEFORE the > 0 comparison to
|
|
1293
|
+
# avoid TypeError: '>' not supported between instances of 'str' and 'int'.
|
|
1294
|
+
resolved_bpm = None
|
|
1295
|
+
if target_bpm:
|
|
1296
|
+
try:
|
|
1297
|
+
fbpm = float(target_bpm)
|
|
1298
|
+
if fbpm > 0:
|
|
1299
|
+
resolved_bpm = fbpm
|
|
1300
|
+
except (ValueError, TypeError):
|
|
1301
|
+
pass # invalid string — treat as unset
|
|
1302
|
+
|
|
1303
|
+
# BUG-EDGE#7: out-of-range root (e.g. 99) must be rejected; -1 is the
|
|
1304
|
+
# "keep source" sentinel and resolves to None (not passed to inner function).
|
|
1305
|
+
if target_scale_root is not None and not (0 <= target_scale_root <= 11):
|
|
1306
|
+
if target_scale_root != -1:
|
|
1307
|
+
return {
|
|
1308
|
+
"error": (
|
|
1309
|
+
f"target_scale_root={target_scale_root} is out of range. "
|
|
1310
|
+
"Valid values: 0–11 (pitch-class, C=0 … B=11), or -1 to keep source root."
|
|
1311
|
+
),
|
|
1312
|
+
"status": "error",
|
|
1313
|
+
}
|
|
1314
|
+
resolved_root = None # -1 sentinel → keep source
|
|
1315
|
+
else:
|
|
1316
|
+
resolved_root = int(target_scale_root) if target_scale_root is not None and target_scale_root >= 0 else None
|
|
1317
|
+
|
|
1318
|
+
return _transplant(
|
|
1319
|
+
source_namespace=source_namespace,
|
|
1320
|
+
source_entity_id=source_entity_id,
|
|
1321
|
+
source_track_or_preset=source_track_or_preset,
|
|
1322
|
+
target_bpm=resolved_bpm,
|
|
1323
|
+
target_scale_root=resolved_root,
|
|
1324
|
+
target_scale_name=target_scale_name,
|
|
1325
|
+
target_aesthetic=target_aesthetic,
|
|
1326
|
+
preserve_macro_ratios=preserve_macro_ratios,
|
|
1327
|
+
preserve_pitch_intervals=preserve_pitch_intervals,
|
|
1328
|
+
explanation_depth=explanation_depth,
|
|
1329
|
+
)
|
|
1330
|
+
|
|
1331
|
+
|
|
1332
|
+
@mcp.tool()
|
|
1333
|
+
def atlas_demo_story(
|
|
1334
|
+
ctx: Context,
|
|
1335
|
+
demo_entity_id: str,
|
|
1336
|
+
focus_tracks: list = None,
|
|
1337
|
+
detail_level: str = "standard",
|
|
1338
|
+
) -> dict:
|
|
1339
|
+
"""Generate a track-by-track narrative + production-sequence for a demo .als (Pack-Atlas Phase E).
|
|
1340
|
+
|
|
1341
|
+
Turns the 104 parsed demo files into interactive learning artifacts. Reads
|
|
1342
|
+
from local JSON sidecars — no Live connection required.
|
|
1343
|
+
|
|
1344
|
+
Parameters
|
|
1345
|
+
----------
|
|
1346
|
+
demo_entity_id : str
|
|
1347
|
+
Entity ID for the demo. Use the form "pack_slug__demo_slug" or the
|
|
1348
|
+
hyphenated variant — both are normalised.
|
|
1349
|
+
Examples: "drone_lab__earth", "drone-lab__emergent-planes",
|
|
1350
|
+
"mood_reel__the_killer_awaits_gmin_135_bpm".
|
|
1351
|
+
|
|
1352
|
+
focus_tracks : list of str, optional
|
|
1353
|
+
Narrow the track_breakdown to only these track names (exact or fuzzy
|
|
1354
|
+
matched). Pass None (default) to include all tracks.
|
|
1355
|
+
|
|
1356
|
+
detail_level : str, default "standard"
|
|
1357
|
+
Controls narrative verbosity.
|
|
1358
|
+
"terse" — 2-3 sentence summary.
|
|
1359
|
+
"standard" — 1 paragraph narrative + structured breakdown.
|
|
1360
|
+
"verbose" — full markdown narrative with producer-vocabulary anchors,
|
|
1361
|
+
track architecture table, production sequence, learning path.
|
|
1362
|
+
|
|
1363
|
+
Returns
|
|
1364
|
+
-------
|
|
1365
|
+
dict with keys:
|
|
1366
|
+
demo — {entity_id, name, bpm, scale, track_count, scene_count}
|
|
1367
|
+
narrative — prose synthesis of the demo [SOURCE: als-parse,
|
|
1368
|
+
agent-inference]
|
|
1369
|
+
track_breakdown — list of per-track dicts:
|
|
1370
|
+
{name, type, role, device_chain_summary,
|
|
1371
|
+
macro_signature, production_decision, narrative_role}
|
|
1372
|
+
production_sequence_inference — ordered list of inferred creation steps
|
|
1373
|
+
suggested_learning_path — solo-each-then-add sequence for study
|
|
1374
|
+
sources — citation list with [SOURCE: als-parse | agent-inference]
|
|
1375
|
+
error — (only on failure) error message
|
|
1376
|
+
|
|
1377
|
+
Track roles:
|
|
1378
|
+
"harmonic-foundation" — primary instrument/melodic source
|
|
1379
|
+
"rhythmic-driver" — drum rack or percussion-named track
|
|
1380
|
+
"texture" — additional instrument layers
|
|
1381
|
+
"spatial-glue" — return tracks with reverb/delay
|
|
1382
|
+
"fx-bus" — group/return tracks with bus processing
|
|
1383
|
+
"decoration" — audio sources or effects-only layers
|
|
1384
|
+
|
|
1385
|
+
Example
|
|
1386
|
+
-------
|
|
1387
|
+
atlas_demo_story(
|
|
1388
|
+
demo_entity_id="drone_lab__earth",
|
|
1389
|
+
detail_level="verbose"
|
|
1390
|
+
)
|
|
1391
|
+
"""
|
|
1392
|
+
from .demo_story import demo_story as _demo_story
|
|
1393
|
+
return _demo_story(
|
|
1394
|
+
demo_entity_id=demo_entity_id,
|
|
1395
|
+
focus_tracks=list(focus_tracks) if focus_tracks else None,
|
|
1396
|
+
detail_level=detail_level,
|
|
1397
|
+
)
|
|
1398
|
+
|
|
1399
|
+
|
|
1400
|
+
@mcp.tool()
|
|
1401
|
+
def atlas_extract_chain(
|
|
1402
|
+
ctx: Context,
|
|
1403
|
+
demo_entity_id: str,
|
|
1404
|
+
track_name: str,
|
|
1405
|
+
target_track_index: int = -1,
|
|
1406
|
+
parameter_fidelity: str = "exact",
|
|
1407
|
+
) -> dict:
|
|
1408
|
+
"""Rebuild a specific demo track's device chain as an executable plan (Pack-Atlas Phase E).
|
|
1409
|
+
|
|
1410
|
+
Reads from local JSON sidecars — no Live connection required for planning.
|
|
1411
|
+
Always returns a dry-run plan (executed: false). Execute the plan manually
|
|
1412
|
+
via the listed MCP tool calls (load_browser_item, insert_device,
|
|
1413
|
+
set_device_parameter) or pass target_track_index >= 0 to target an existing
|
|
1414
|
+
track in the returned plan.
|
|
1415
|
+
|
|
1416
|
+
Parameters
|
|
1417
|
+
----------
|
|
1418
|
+
demo_entity_id : str
|
|
1419
|
+
Entity ID for the demo, e.g. "drone_lab__emergent_planes".
|
|
1420
|
+
|
|
1421
|
+
track_name : str
|
|
1422
|
+
Name of the track to extract. Fuzzy matched (case-insensitive substring,
|
|
1423
|
+
token match). Example: "Mindless Self-Encounters", "Pioneer Drone",
|
|
1424
|
+
"mindless" (partial match accepted).
|
|
1425
|
+
|
|
1426
|
+
target_track_index : int, default -1
|
|
1427
|
+
Target track in the current project. -1 = plan includes a new-track
|
|
1428
|
+
creation step. >= 0 = plan targets the existing track at that index.
|
|
1429
|
+
(Phase E ships dry-run only — use the plan to drive manual execution.)
|
|
1430
|
+
|
|
1431
|
+
parameter_fidelity : str, default "exact"
|
|
1432
|
+
Controls how many parameters are included in set_device_parameter steps.
|
|
1433
|
+
"exact" — emit set_device_parameter for every non-default macro
|
|
1434
|
+
"approximate" — top 5 macros by deviation from zero (most production-
|
|
1435
|
+
meaningful committed values)
|
|
1436
|
+
"structure-only" — chain topology only; no parameter steps
|
|
1437
|
+
|
|
1438
|
+
Returns
|
|
1439
|
+
-------
|
|
1440
|
+
dict with keys:
|
|
1441
|
+
source — {demo, track, track_type, device_count, device_chain}
|
|
1442
|
+
device_chain: [{class, user_name, chain_depth, macros?}]
|
|
1443
|
+
execution_plan — list of action dicts. Action types:
|
|
1444
|
+
"create_midi_track" | "create_audio_track" |
|
|
1445
|
+
"target_existing_track" |
|
|
1446
|
+
"load_browser_item" | "insert_device" |
|
|
1447
|
+
"set_device_parameter" | "manual_rebuild"
|
|
1448
|
+
executed — always False (Phase E is dry-run only)
|
|
1449
|
+
parameter_fidelity — echoed back
|
|
1450
|
+
warnings — list of caution strings (unknown classes, unnamed racks)
|
|
1451
|
+
sources — citation list
|
|
1452
|
+
error — (only on failure) error message with available_tracks
|
|
1453
|
+
|
|
1454
|
+
Citation tags: [SOURCE: als-parse] for sidecar data, [SOURCE: agent-inference]
|
|
1455
|
+
for step generation logic.
|
|
1456
|
+
|
|
1457
|
+
Example
|
|
1458
|
+
-------
|
|
1459
|
+
atlas_extract_chain(
|
|
1460
|
+
demo_entity_id="drone_lab__emergent_planes",
|
|
1461
|
+
track_name="Mindless Self-Encounters",
|
|
1462
|
+
target_track_index=-1,
|
|
1463
|
+
parameter_fidelity="approximate"
|
|
1464
|
+
)
|
|
1465
|
+
"""
|
|
1466
|
+
from .extract_chain import extract_chain as _extract_chain
|
|
1467
|
+
return _extract_chain(
|
|
1468
|
+
demo_entity_id=demo_entity_id,
|
|
1469
|
+
track_name=track_name,
|
|
1470
|
+
target_track_index=target_track_index,
|
|
1471
|
+
parameter_fidelity=parameter_fidelity,
|
|
1472
|
+
)
|
|
1473
|
+
|
|
1474
|
+
|
|
1475
|
+
@mcp.tool()
|
|
1476
|
+
def atlas_pack_aware_compose(
|
|
1477
|
+
ctx: Context,
|
|
1478
|
+
aesthetic_brief: str,
|
|
1479
|
+
target_bpm: float = 0.0,
|
|
1480
|
+
target_scale: str = "",
|
|
1481
|
+
track_count: int = 6,
|
|
1482
|
+
pack_diversity: str = "coherent",
|
|
1483
|
+
) -> dict:
|
|
1484
|
+
"""Bootstrap a project with pack-coherent track selection given an aesthetic brief (Pack-Atlas Phase F).
|
|
1485
|
+
|
|
1486
|
+
Parses the aesthetic brief against the artist/genre vocabulary files and the pack
|
|
1487
|
+
atlas overlay, builds a pack cohort (which Factory Packs best serve this brief),
|
|
1488
|
+
selects real presets from the corpus for each track role via macro-fingerprint
|
|
1489
|
+
similarity, and returns a full executable plan.
|
|
1490
|
+
|
|
1491
|
+
Parameters
|
|
1492
|
+
----------
|
|
1493
|
+
aesthetic_brief : str
|
|
1494
|
+
Free-text aesthetic description.
|
|
1495
|
+
Examples: "dub-techno spectral drone bed monolake henke",
|
|
1496
|
+
"BoC pastoral decayed pad", "footwork breakcore",
|
|
1497
|
+
"orchestral dread Mica Levi".
|
|
1498
|
+
|
|
1499
|
+
target_bpm : float, optional
|
|
1500
|
+
Target project BPM. Pass 0.0 to omit.
|
|
1501
|
+
|
|
1502
|
+
target_scale : str, optional
|
|
1503
|
+
Target scale string, e.g. "Cmin", "Fmaj", "Fmin". Pass "" to omit.
|
|
1504
|
+
|
|
1505
|
+
track_count : int, default 6
|
|
1506
|
+
Number of tracks to propose.
|
|
1507
|
+
|
|
1508
|
+
pack_diversity : str, default "coherent"
|
|
1509
|
+
"coherent" — all tracks from packs aligned to the brief's aesthetic.
|
|
1510
|
+
"eclectic" — deliberately spans conflicting aesthetics (Eclectic Mode
|
|
1511
|
+
reasoning: picks packs whose anti_patterns conflict,
|
|
1512
|
+
explains tension_resolution in reasoning_artifact).
|
|
1513
|
+
|
|
1514
|
+
Returns
|
|
1515
|
+
-------
|
|
1516
|
+
dict with keys:
|
|
1517
|
+
brief_analysis : {
|
|
1518
|
+
primary_aesthetic: str,
|
|
1519
|
+
secondary_aesthetics: list[str],
|
|
1520
|
+
anchor_producers: list[str],
|
|
1521
|
+
anchor_genres: list[str],
|
|
1522
|
+
pack_cohort: list[str] # Factory Pack slugs
|
|
1523
|
+
}
|
|
1524
|
+
track_proposal : list of {
|
|
1525
|
+
track_name: str,
|
|
1526
|
+
role: str, # e.g. "harmonic-foundation"
|
|
1527
|
+
preset: str, # "pack-slug/preset-path-slug"
|
|
1528
|
+
preset_name: str,
|
|
1529
|
+
rationale: str # [SOURCE: adg-parse | agent-inference]
|
|
1530
|
+
}
|
|
1531
|
+
suggested_routing : list[str] # routing hints + cross-pack workflow refs
|
|
1532
|
+
executable_steps : list[dict] # create_track + load_browser_item + set_device_parameter
|
|
1533
|
+
sources : list[str] # citation list
|
|
1534
|
+
reasoning_artifact: dict # only present in eclectic mode
|
|
1535
|
+
|
|
1536
|
+
Citation tags: [SOURCE: adg-parse] for corpus preset data,
|
|
1537
|
+
[SOURCE: artist-vocabularies.md] / [SOURCE: genre-vocabularies.md] for
|
|
1538
|
+
vocabulary lookups, [SOURCE: cross_pack_workflow.yaml] for routing hints,
|
|
1539
|
+
[SOURCE: agent-inference] for role/step generation.
|
|
1540
|
+
|
|
1541
|
+
Integrations (Phase F uses C+D+E):
|
|
1542
|
+
- Phase D: _extract_fingerprint + _fingerprint_strength for preset selection
|
|
1543
|
+
- Phase E: _emit_execution_steps step structure for executable plan
|
|
1544
|
+
- Phase C: transplant aesthetic-replace rules (via target_scale/customize_aesthetic)
|
|
1545
|
+
|
|
1546
|
+
Example
|
|
1547
|
+
-------
|
|
1548
|
+
atlas_pack_aware_compose(
|
|
1549
|
+
aesthetic_brief="dub-techno spectral drone bed monolake",
|
|
1550
|
+
target_bpm=130,
|
|
1551
|
+
track_count=4
|
|
1552
|
+
)
|
|
1553
|
+
"""
|
|
1554
|
+
from .pack_aware_compose import pack_aware_compose as _pack_aware_compose, _coerce_float, _coerce_int
|
|
1555
|
+
|
|
1556
|
+
# BUG-EDGE#2/#3: coerce before the > 0 comparison to avoid TypeError on string inputs
|
|
1557
|
+
_bpm = _coerce_float(target_bpm, 0.0)
|
|
1558
|
+
resolved_bpm = _bpm if _bpm and _bpm > 0 else None
|
|
1559
|
+
_track_count = _coerce_int(track_count, 6)
|
|
1560
|
+
return _pack_aware_compose(
|
|
1561
|
+
aesthetic_brief=aesthetic_brief,
|
|
1562
|
+
target_bpm=resolved_bpm,
|
|
1563
|
+
target_scale=target_scale,
|
|
1564
|
+
track_count=_track_count,
|
|
1565
|
+
pack_diversity=pack_diversity,
|
|
1566
|
+
)
|
|
1567
|
+
|
|
1568
|
+
|
|
1569
|
+
@mcp.tool()
|
|
1570
|
+
def atlas_cross_pack_chain(
|
|
1571
|
+
ctx: Context,
|
|
1572
|
+
workflow_entity_id: str,
|
|
1573
|
+
target_track_index: int = -1,
|
|
1574
|
+
customize_aesthetic: dict = None,
|
|
1575
|
+
) -> dict:
|
|
1576
|
+
"""Execute a cross-pack signature recipe step-by-step (Pack-Atlas Phase F).
|
|
1577
|
+
|
|
1578
|
+
Reads a cross_pack_workflow entry from the Pack-Atlas overlay, parses its
|
|
1579
|
+
signal_flow body into structured actions, and returns a dry-run execution log.
|
|
1580
|
+
All 15 cross-pack workflow recipes are supported.
|
|
1581
|
+
|
|
1582
|
+
Parameters
|
|
1583
|
+
----------
|
|
1584
|
+
workflow_entity_id : str
|
|
1585
|
+
Entity ID of the workflow. Use underscores or hyphens interchangeably.
|
|
1586
|
+
Examples:
|
|
1587
|
+
"dub_techno_spectral_drone_bed" (HDG → PitchLoop89 → ConvReverb → AutoFilter)
|
|
1588
|
+
"boc_decayed_pad" (Tree Tone → Bad Speaker → Echo → Reverb)
|
|
1589
|
+
"mica_levi_orchestral_dread" (Strings → Bass Clarinet → AutoPan → ConvReverb)
|
|
1590
|
+
"henke_full_granular_chain"
|
|
1591
|
+
"footwork_breakcore_drum_chain"
|
|
1592
|
+
Use atlas_cross_pack_chain(workflow_entity_id="") with an invalid ID to
|
|
1593
|
+
see the list of available workflows in the error.available_workflows field.
|
|
1594
|
+
|
|
1595
|
+
target_track_index : int, default -1
|
|
1596
|
+
-1 = dry run. All steps returned with result: "dry-run".
|
|
1597
|
+
>= 0 = plan targets an existing track at that index (still dry-run in Phase F;
|
|
1598
|
+
live execution gated on Remote Script connection).
|
|
1599
|
+
|
|
1600
|
+
customize_aesthetic : dict, optional
|
|
1601
|
+
Optional aesthetic-shift parameters. Supported keys:
|
|
1602
|
+
- "target_scale": str — insert set_song_scale step (e.g. "Fmin")
|
|
1603
|
+
- "target_bpm": float — insert set_tempo step
|
|
1604
|
+
- "transpose_semitones": float — shift numeric pitch parameter values
|
|
1605
|
+
|
|
1606
|
+
Returns
|
|
1607
|
+
-------
|
|
1608
|
+
dict with keys:
|
|
1609
|
+
workflow : {
|
|
1610
|
+
entity_id: str,
|
|
1611
|
+
name: str,
|
|
1612
|
+
packs_used: list[str],
|
|
1613
|
+
description: str,
|
|
1614
|
+
when_to_reach: str,
|
|
1615
|
+
gotcha: str
|
|
1616
|
+
}
|
|
1617
|
+
executed_steps : list of {
|
|
1618
|
+
step: int,
|
|
1619
|
+
action: str, # load_browser_item | insert_device |
|
|
1620
|
+
# set_device_parameter | fire_clip |
|
|
1621
|
+
# set_track_send | manual_step |
|
|
1622
|
+
# set_song_scale | set_tempo
|
|
1623
|
+
device_name: str?,
|
|
1624
|
+
parameter_name: str?,
|
|
1625
|
+
value: float?,
|
|
1626
|
+
raw_text: str, # original signal_flow line
|
|
1627
|
+
result: "dry-run",
|
|
1628
|
+
target_track_index: int? # only when target_track_index >= 0
|
|
1629
|
+
}
|
|
1630
|
+
warnings : list[str] # gotcha + avoid text from workflow YAML
|
|
1631
|
+
sources : list[str] # citation list
|
|
1632
|
+
error : str # only on failure; also has available_workflows
|
|
1633
|
+
|
|
1634
|
+
Signal-flow verb → action mapping:
|
|
1635
|
+
"load" / "open" / "import" → load_browser_item
|
|
1636
|
+
"insert" / "add" → insert_device
|
|
1637
|
+
"set" / "tweak" / "configure"→ set_device_parameter
|
|
1638
|
+
"fire" / "play" / "trigger" → fire_clip
|
|
1639
|
+
"chain" / "route" / "→" → set_track_send
|
|
1640
|
+
anything else → manual_step
|
|
1641
|
+
|
|
1642
|
+
Citation tags: [SOURCE: cross_pack_workflow.yaml] for workflow YAML data,
|
|
1643
|
+
[SOURCE: agent-inference] for parsing logic.
|
|
1644
|
+
|
|
1645
|
+
Example
|
|
1646
|
+
-------
|
|
1647
|
+
atlas_cross_pack_chain(
|
|
1648
|
+
workflow_entity_id="dub_techno_spectral_drone_bed",
|
|
1649
|
+
target_track_index=-1
|
|
1650
|
+
)
|
|
1651
|
+
"""
|
|
1652
|
+
from .cross_pack_chain import cross_pack_chain as _cross_pack_chain
|
|
1653
|
+
|
|
1654
|
+
return _cross_pack_chain(
|
|
1655
|
+
workflow_entity_id=workflow_entity_id,
|
|
1656
|
+
target_track_index=target_track_index,
|
|
1657
|
+
customize_aesthetic=customize_aesthetic or {},
|
|
1658
|
+
)
|