livepilot 1.23.3 → 1.23.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +93 -0
- package/README.md +106 -8
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +1 -1
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/atlas/cross_pack_chain.py +658 -0
- package/mcp_server/atlas/demo_story.py +700 -0
- package/mcp_server/atlas/extract_chain.py +786 -0
- package/mcp_server/atlas/macro_fingerprint.py +554 -0
- package/mcp_server/atlas/overlays.py +95 -3
- package/mcp_server/atlas/pack_aware_compose.py +1255 -0
- package/mcp_server/atlas/preset_resolver.py +238 -0
- package/mcp_server/atlas/tools.py +1001 -31
- package/mcp_server/atlas/transplant.py +1177 -0
- package/mcp_server/mix_engine/state_builder.py +44 -1
- package/mcp_server/runtime/capability_state.py +34 -3
- package/mcp_server/server.py +45 -24
- package/mcp_server/tools/agent_os.py +33 -9
- package/mcp_server/tools/analyzer.py +38 -7
- package/mcp_server/tools/browser.py +20 -1
- package/mcp_server/tools/devices.py +78 -11
- package/mcp_server/tools/perception.py +5 -1
- package/mcp_server/tools/tracks.py +39 -2
- package/mcp_server/user_corpus/__init__.py +48 -0
- package/mcp_server/user_corpus/manifest.py +142 -0
- package/mcp_server/user_corpus/plugin_engine/__init__.py +39 -0
- package/mcp_server/user_corpus/plugin_engine/detector.py +579 -0
- package/mcp_server/user_corpus/plugin_engine/manual.py +347 -0
- package/mcp_server/user_corpus/plugin_engine/research.py +247 -0
- package/mcp_server/user_corpus/runner.py +261 -0
- package/mcp_server/user_corpus/scanner.py +115 -0
- package/mcp_server/user_corpus/scanners/__init__.py +18 -0
- package/mcp_server/user_corpus/scanners/adg.py +79 -0
- package/mcp_server/user_corpus/scanners/als.py +144 -0
- package/mcp_server/user_corpus/scanners/amxd.py +374 -0
- package/mcp_server/user_corpus/scanners/plugin_preset.py +202 -0
- package/mcp_server/user_corpus/tools.py +904 -0
- package/mcp_server/user_corpus/wizard.py +224 -0
- package/package.json +2 -2
- package/remote_script/LivePilot/__init__.py +1 -1
- package/remote_script/LivePilot/browser.py +7 -2
- package/requirements.txt +3 -3
- package/server.json +2 -2
|
@@ -0,0 +1,1255 @@
|
|
|
1
|
+
"""Pack-Atlas Phase F — Pack-Aware Composer.
|
|
2
|
+
|
|
3
|
+
Bootstrap a project with pack-coherent track selection given an aesthetic brief.
|
|
4
|
+
Parses the brief against artist/genre vocabularies, builds a pack cohort,
|
|
5
|
+
selects presets via macro-fingerprint similarity, and emits an executable plan.
|
|
6
|
+
|
|
7
|
+
All data comes from local JSON sidecars + the overlay index — no Live connection
|
|
8
|
+
required.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from __future__ import annotations
|
|
12
|
+
|
|
13
|
+
import re
|
|
14
|
+
from functools import lru_cache
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import Any
|
|
17
|
+
|
|
18
|
+
# ─── Paths ────────────────────────────────────────────────────────────────────
|
|
19
|
+
|
|
20
|
+
_VOCAB_DIR = (
|
|
21
|
+
Path(__file__).parent.parent.parent
|
|
22
|
+
/ "livepilot"
|
|
23
|
+
/ "skills"
|
|
24
|
+
/ "livepilot-core"
|
|
25
|
+
/ "references"
|
|
26
|
+
)
|
|
27
|
+
_ARTIST_VOCAB_PATH = _VOCAB_DIR / "artist-vocabularies.md"
|
|
28
|
+
_GENRE_VOCAB_PATH = _VOCAB_DIR / "genre-vocabularies.md"
|
|
29
|
+
|
|
30
|
+
_CROSS_WORKFLOWS_DIR = (
|
|
31
|
+
Path.home()
|
|
32
|
+
/ ".livepilot"
|
|
33
|
+
/ "atlas-overlays"
|
|
34
|
+
/ "packs"
|
|
35
|
+
/ "cross_workflows"
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# ─── Vocabulary parsers ───────────────────────────────────────────────────────
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@lru_cache(maxsize=1)
|
|
42
|
+
def _load_artist_vocab() -> dict[str, dict]:
|
|
43
|
+
"""Parse artist-vocabularies.md into {artist_slug: {...}} dict.
|
|
44
|
+
|
|
45
|
+
Extracts: sonic_fingerprint, reach_for, avoid, key_techniques, genre_affinity,
|
|
46
|
+
and a derived pack_anchors list (from Reach for lines that mention pack names).
|
|
47
|
+
"""
|
|
48
|
+
if not _ARTIST_VOCAB_PATH.exists():
|
|
49
|
+
return {}
|
|
50
|
+
text = _ARTIST_VOCAB_PATH.read_text(encoding="utf-8")
|
|
51
|
+
return _parse_artist_section(text)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@lru_cache(maxsize=1)
|
|
55
|
+
def _load_genre_vocab() -> dict[str, dict]:
|
|
56
|
+
"""Parse genre-vocabularies.md into {genre_slug: {...}} dict."""
|
|
57
|
+
if not _GENRE_VOCAB_PATH.exists():
|
|
58
|
+
return {}
|
|
59
|
+
text = _GENRE_VOCAB_PATH.read_text(encoding="utf-8")
|
|
60
|
+
return _parse_genre_section(text)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _coerce_int(v: object, default: int) -> int:
|
|
64
|
+
"""Coerce v to int, returning default on None or invalid input."""
|
|
65
|
+
if v is None:
|
|
66
|
+
return default
|
|
67
|
+
try:
|
|
68
|
+
return int(v)
|
|
69
|
+
except (ValueError, TypeError):
|
|
70
|
+
return default
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _coerce_float(v: object, default: float) -> float:
|
|
74
|
+
"""Coerce v to float, returning default on None or invalid input."""
|
|
75
|
+
if v is None:
|
|
76
|
+
return default
|
|
77
|
+
try:
|
|
78
|
+
return float(v)
|
|
79
|
+
except (ValueError, TypeError):
|
|
80
|
+
return default
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def _slugify(name: str) -> str:
|
|
84
|
+
"""Normalise an artist/genre name to a match key."""
|
|
85
|
+
return re.sub(r"[^a-z0-9]+", "_", name.lower().strip()).strip("_")
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def _extract_pack_anchors_from_reach(reach_text: str) -> list[str]:
|
|
89
|
+
"""Extract pack slugs from a 'Reach for:' text block.
|
|
90
|
+
|
|
91
|
+
Looks for known pack-keyword patterns:
|
|
92
|
+
Drone Lab → drone-lab, Lost and Found → lost-and-found, etc.
|
|
93
|
+
"""
|
|
94
|
+
pack_patterns: list[tuple[str, str]] = [
|
|
95
|
+
(r"drone\s+lab", "drone-lab"),
|
|
96
|
+
(r"mood[\s\-]reel", "mood-reel"),
|
|
97
|
+
(r"pitchloop\s*89", "pitchloop89"),
|
|
98
|
+
(r"convolution\s+reverb", "convolution-reverb"),
|
|
99
|
+
(r"lost\s+and\s+found", "lost-and-found"),
|
|
100
|
+
(r"inspired[\s\-]by[\s\-]nature", "inspired-by-nature-by-dillon-bastan"),
|
|
101
|
+
(r"voice\s+box", "voice-box"),
|
|
102
|
+
(r"chop\s+and\s+swing", "chop-and-swing"),
|
|
103
|
+
(r"latin\s+percussion", "latin-percussion"),
|
|
104
|
+
(r"glitch\s+and\s+wash", "glitch-and-wash"),
|
|
105
|
+
(r"creative\s+extensions", "creative-extensions"),
|
|
106
|
+
(r"electric\s+keyboards", "electric-keyboards"),
|
|
107
|
+
(r"drum\s+essentials", "drum-essentials"),
|
|
108
|
+
(r"orchestral\s+strings", "orchestral-strings"),
|
|
109
|
+
(r"orchestral\s+woodwinds", "orchestral-woodwinds"),
|
|
110
|
+
(r"orchestral\s+brass", "orchestral-brass"),
|
|
111
|
+
(r"synth\s+essentials", "synth-essentials"),
|
|
112
|
+
(r"cv[\s\-]tools", "cv-tools"),
|
|
113
|
+
(r"build\s+and\s+drop", "build-and-drop"),
|
|
114
|
+
(r"punch\s+and\s+tilt", "punch-and-tilt"),
|
|
115
|
+
(r"session\s+drums\s+club", "session-drums-club"),
|
|
116
|
+
(r"session\s+drums\s+studio", "session-drums-studio"),
|
|
117
|
+
(r"beat\s+tools", "beat-tools"),
|
|
118
|
+
(r"drive\s+and\s+glow", "drive-and-glow"),
|
|
119
|
+
]
|
|
120
|
+
found: list[str] = []
|
|
121
|
+
lower = reach_text.lower()
|
|
122
|
+
for pattern, slug in pack_patterns:
|
|
123
|
+
if re.search(pattern, lower):
|
|
124
|
+
if slug not in found:
|
|
125
|
+
found.append(slug)
|
|
126
|
+
return found
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def _parse_artist_section(text: str) -> dict[str, dict]:
|
|
130
|
+
"""Parse artist-vocabularies.md into a dict.
|
|
131
|
+
|
|
132
|
+
Each artist block starts with ### <Name> and ends at the next ###/## heading.
|
|
133
|
+
"""
|
|
134
|
+
vocab: dict[str, dict] = {}
|
|
135
|
+
|
|
136
|
+
# Split on ### headings — these are individual artist entries
|
|
137
|
+
# Also handle ## (section headings we skip)
|
|
138
|
+
blocks = re.split(r"(?=^###\s+)", text, flags=re.MULTILINE)
|
|
139
|
+
|
|
140
|
+
for block in blocks:
|
|
141
|
+
if not block.startswith("###"):
|
|
142
|
+
continue
|
|
143
|
+
# First line is the artist name
|
|
144
|
+
first_line_end = block.index("\n") if "\n" in block else len(block)
|
|
145
|
+
artist_name = block[4:first_line_end].strip() # strip "### "
|
|
146
|
+
# Strip parenthetical aliases and slash-joined aliases from the slug
|
|
147
|
+
# e.g. "Robert Henke (Monolake)" → slug "robert_henke"
|
|
148
|
+
# e.g. "Wolfgang Voigt (Gas)" → slug "wolfgang_voigt"
|
|
149
|
+
# e.g. "Basic Channel / Rhythm & Sound (...)" → slug "basic_channel_rhythm_sound"
|
|
150
|
+
# This keeps vocab keys simple and matching what _ARTIST_ALIASES expects.
|
|
151
|
+
slug_source = re.sub(r"\s*\([^)]*\)", "", artist_name).strip()
|
|
152
|
+
slug = _slugify(slug_source)
|
|
153
|
+
|
|
154
|
+
# Extract fields using regex
|
|
155
|
+
def extract_field(field_label: str) -> str:
|
|
156
|
+
pattern = rf"\*\*{re.escape(field_label)}:\*\*\s*(.+?)(?=\n\*\*|\Z)"
|
|
157
|
+
m = re.search(pattern, block, re.DOTALL)
|
|
158
|
+
if m:
|
|
159
|
+
return m.group(1).strip()
|
|
160
|
+
return ""
|
|
161
|
+
|
|
162
|
+
reach_text = extract_field("Reach for")
|
|
163
|
+
avoid_text = extract_field("Avoid")
|
|
164
|
+
fingerprint_text = extract_field("Sonic fingerprint")
|
|
165
|
+
techniques_text = extract_field("Key techniques")
|
|
166
|
+
genre_text = extract_field("Genre affinity")
|
|
167
|
+
|
|
168
|
+
pack_anchors = _extract_pack_anchors_from_reach(reach_text)
|
|
169
|
+
|
|
170
|
+
# Extract genres from genre_affinity backtick list
|
|
171
|
+
genres = re.findall(r"`([^`]+)`", genre_text)
|
|
172
|
+
|
|
173
|
+
vocab[slug] = {
|
|
174
|
+
"name": artist_name,
|
|
175
|
+
"sonic_fingerprint": fingerprint_text,
|
|
176
|
+
"reach_for": reach_text,
|
|
177
|
+
"avoid": avoid_text,
|
|
178
|
+
"key_techniques": techniques_text,
|
|
179
|
+
"genre_affinity": genres,
|
|
180
|
+
"pack_anchors": pack_anchors,
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
# Also register common alternate slugs (e.g. "gas" → Wolfgang Voigt)
|
|
184
|
+
# Handle parenthetical aliases like "Wolfgang Voigt (Gas)"
|
|
185
|
+
alias_match = re.search(r"\(([^)]+)\)", artist_name)
|
|
186
|
+
if alias_match:
|
|
187
|
+
alias_slug = _slugify(alias_match.group(1))
|
|
188
|
+
if alias_slug not in vocab:
|
|
189
|
+
vocab[alias_slug] = vocab[slug]
|
|
190
|
+
|
|
191
|
+
return vocab
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def _parse_genre_section(text: str) -> dict[str, dict]:
|
|
195
|
+
"""Parse genre-vocabularies.md into a dict.
|
|
196
|
+
|
|
197
|
+
Each genre block starts with ## <Genre Name> (after the intro headers).
|
|
198
|
+
"""
|
|
199
|
+
vocab: dict[str, dict] = {}
|
|
200
|
+
|
|
201
|
+
blocks = re.split(r"(?=^##\s+[^#])", text, flags=re.MULTILINE)
|
|
202
|
+
|
|
203
|
+
for block in blocks:
|
|
204
|
+
if not block.startswith("##"):
|
|
205
|
+
continue
|
|
206
|
+
# Skip the intro header (if it contains "Genre Vocabularies" in the title)
|
|
207
|
+
first_line_end = block.index("\n") if "\n" in block else len(block)
|
|
208
|
+
genre_name = block[3:first_line_end].strip() # strip "## "
|
|
209
|
+
|
|
210
|
+
if not genre_name or "Genre Vocabularies" in genre_name or "Vocabularies" in genre_name:
|
|
211
|
+
continue
|
|
212
|
+
|
|
213
|
+
slug = _slugify(genre_name)
|
|
214
|
+
|
|
215
|
+
def extract_field(field_label: str) -> str:
|
|
216
|
+
pattern = rf"\*\*{re.escape(field_label)}[:/]\*\*\s*(.+?)(?=\n\*\*|\Z)"
|
|
217
|
+
m = re.search(pattern, block, re.DOTALL)
|
|
218
|
+
if m:
|
|
219
|
+
return m.group(1).strip()
|
|
220
|
+
return ""
|
|
221
|
+
|
|
222
|
+
reach_text = extract_field("Reach for")
|
|
223
|
+
avoid_text = extract_field("Avoid")
|
|
224
|
+
|
|
225
|
+
# Extract packs from reach_for text
|
|
226
|
+
pack_anchors = _extract_pack_anchors_from_reach(reach_text)
|
|
227
|
+
|
|
228
|
+
# Extract canonical artists
|
|
229
|
+
canonical_text = extract_field("Canonical artists")
|
|
230
|
+
artists = [a.strip() for a in re.split(r",\s*", canonical_text) if a.strip()]
|
|
231
|
+
|
|
232
|
+
vocab[slug] = {
|
|
233
|
+
"name": genre_name,
|
|
234
|
+
"reach_for": reach_text,
|
|
235
|
+
"avoid": avoid_text,
|
|
236
|
+
"pack_anchors": pack_anchors,
|
|
237
|
+
"canonical_artists": artists,
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
return vocab
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
# ─── Brief parsing ─────────────────────────────────────────────────────────────
|
|
244
|
+
|
|
245
|
+
# Genre keyword → vocabulary slug mappings (for matching in brief text)
|
|
246
|
+
# All slug values must match keys produced by _parse_genre_section (which slugifies the
|
|
247
|
+
# "## Genre Name" heading — e.g. "## Dub Techno" → "dub_techno",
|
|
248
|
+
# "## Modern Classical / Cinematic" → "modern_classical_cinematic").
|
|
249
|
+
_GENRE_ALIASES: dict[str, str] = {
|
|
250
|
+
"dub techno": "dub_techno",
|
|
251
|
+
"dub-techno": "dub_techno",
|
|
252
|
+
"dubtech": "dub_techno",
|
|
253
|
+
"microhouse": "microhouse",
|
|
254
|
+
"micro house": "microhouse",
|
|
255
|
+
"micro-house": "microhouse",
|
|
256
|
+
"minimal techno": "minimal_techno",
|
|
257
|
+
"minimal-techno": "minimal_techno",
|
|
258
|
+
"deep minimal": "deep_minimal_villalobos_school",
|
|
259
|
+
"deep-minimal": "deep_minimal_villalobos_school",
|
|
260
|
+
"ambient": "ambient_drone", # "## Ambient / Drone"
|
|
261
|
+
"drone": "ambient_drone",
|
|
262
|
+
"idm": "idm",
|
|
263
|
+
"hip hop": "hip_hop_boom_bap_lo_fi", # "## Hip-Hop (Boom Bap / Lo-Fi)"
|
|
264
|
+
"hip-hop": "hip_hop_boom_bap_lo_fi",
|
|
265
|
+
"hiphop": "hip_hop_boom_bap_lo_fi",
|
|
266
|
+
"boom bap": "hip_hop_boom_bap_lo_fi",
|
|
267
|
+
"trap": "trap_modern_hip_hop", # "## Trap / Modern Hip-Hop"
|
|
268
|
+
"trap 808": "trap_modern_hip_hop",
|
|
269
|
+
"dubstep": "dubstep_bass_music_modern", # "## Dubstep / Bass Music (Modern)"
|
|
270
|
+
"bass music": "dubstep_bass_music_modern",
|
|
271
|
+
"house": "house_deep_house", # "## House / Deep House"
|
|
272
|
+
"deep house": "house_deep_house",
|
|
273
|
+
"dnb": "drum_and_bass_jungle", # "## Drum and Bass / Jungle"
|
|
274
|
+
"drum and bass": "drum_and_bass_jungle",
|
|
275
|
+
"drum & bass": "drum_and_bass_jungle",
|
|
276
|
+
"jungle": "drum_and_bass_jungle",
|
|
277
|
+
"neurofunk": "drum_and_bass_jungle",
|
|
278
|
+
"garage": "garage_uk_garage_2_step", # "## Garage / UK Garage / 2-Step"
|
|
279
|
+
"uk garage": "garage_uk_garage_2_step",
|
|
280
|
+
"2-step": "garage_uk_garage_2_step",
|
|
281
|
+
"experimental": "experimental_noise_found_sound", # "## Experimental / Noise / Found-Sound"
|
|
282
|
+
"noise": "experimental_noise_found_sound",
|
|
283
|
+
"found sound": "experimental_noise_found_sound",
|
|
284
|
+
"synthwave": "synthwave", # not in vocab; keyword still acts as aesthetic tag
|
|
285
|
+
# Additional genres from cross-workflow YAMLs
|
|
286
|
+
"footwork": "footwork", # not in vocab; pack lookup via _KEYWORD_TO_PACK
|
|
287
|
+
"juke": "footwork",
|
|
288
|
+
"breakcore": "breakcore",
|
|
289
|
+
"break core": "breakcore",
|
|
290
|
+
"modern classical": "modern_classical_cinematic", # "## Modern Classical / Cinematic"
|
|
291
|
+
"neo-classical": "modern_classical_cinematic",
|
|
292
|
+
"orchestral": "modern_classical_cinematic",
|
|
293
|
+
"cinematic": "modern_classical_cinematic",
|
|
294
|
+
"microtonal": "experimental_noise_found_sound",
|
|
295
|
+
"bedroom pop": "experimental_noise_found_sound",
|
|
296
|
+
"lo-fi pop": "hip_hop_boom_bap_lo_fi",
|
|
297
|
+
"lo fi": "hip_hop_boom_bap_lo_fi",
|
|
298
|
+
"lofi": "hip_hop_boom_bap_lo_fi",
|
|
299
|
+
"eurorack": "experimental_noise_found_sound",
|
|
300
|
+
"modular": "experimental_noise_found_sound",
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
# Artist keyword → vocabulary slug
|
|
304
|
+
# Slugs here must match the keys produced by _parse_artist_section, which strips
|
|
305
|
+
# parenthetical aliases before slugifying:
|
|
306
|
+
# "Robert Henke (Monolake)" → "robert_henke"
|
|
307
|
+
# "Wolfgang Voigt (Gas)" → "wolfgang_voigt"
|
|
308
|
+
# "Aphex Twin (Richard D. James)" → "aphex_twin"
|
|
309
|
+
# "Arca / SOPHIE" → "arca_sophie" (slash-joins remain)
|
|
310
|
+
_ARTIST_ALIASES: dict[str, str] = {
|
|
311
|
+
"villalobos": "ricardo_villalobos",
|
|
312
|
+
"akufen": "akufen", # heading "Akufen (Marc Leclair)" → strip parens → "akufen"
|
|
313
|
+
"marc leclair": "akufen",
|
|
314
|
+
"isolee": "isol_e_luomo",
|
|
315
|
+
"luomo": "isol_e_luomo",
|
|
316
|
+
"basic channel": "basic_channel_rhythm_sound",
|
|
317
|
+
"rhythm and sound": "basic_channel_rhythm_sound",
|
|
318
|
+
"gas": "wolfgang_voigt", # heading "Wolfgang Voigt (Gas)" → strip → "wolfgang_voigt"
|
|
319
|
+
"voigt": "wolfgang_voigt",
|
|
320
|
+
"shackleton": "shackleton",
|
|
321
|
+
"basinski": "william_basinski",
|
|
322
|
+
"tim hecker": "tim_hecker",
|
|
323
|
+
"hecker": "tim_hecker",
|
|
324
|
+
"autechre": "autechre",
|
|
325
|
+
"aphex": "aphex_twin", # heading "Aphex Twin (Richard D. James)" → strip → "aphex_twin"
|
|
326
|
+
"aphex twin": "aphex_twin",
|
|
327
|
+
"burial": "burial", # heading "Burial" → slug "burial"
|
|
328
|
+
"william bevan": "burial",
|
|
329
|
+
"henke": "robert_henke", # heading "Robert Henke (Monolake)" → strip → "robert_henke"
|
|
330
|
+
"robert henke": "robert_henke",
|
|
331
|
+
"monolake": "robert_henke",
|
|
332
|
+
"dilla": "j_dilla",
|
|
333
|
+
"j dilla": "j_dilla",
|
|
334
|
+
"madlib": "madlib",
|
|
335
|
+
"hawtin": "richie_hawtin", # heading "Richie Hawtin (Plastikman)" → strip → "richie_hawtin"
|
|
336
|
+
"plastikman": "richie_hawtin",
|
|
337
|
+
"richie hawtin": "richie_hawtin",
|
|
338
|
+
"boards of canada": "boards_of_canada",
|
|
339
|
+
"boc": "boards_of_canada",
|
|
340
|
+
"arca": "arca_sophie", # heading "Arca / SOPHIE" → "arca_sophie"
|
|
341
|
+
"sophie": "arca_sophie",
|
|
342
|
+
"opn": "oneohtrix_point_never", # heading "Oneohtrix Point Never (Daniel Lopatin)" → strip → "oneohtrix_point_never"
|
|
343
|
+
"oneohtrix": "oneohtrix_point_never",
|
|
344
|
+
"com truise": "com_truise", # heading "Com Truise / Tycho" → "com_truise_tycho"... but alias "com truise" is fine
|
|
345
|
+
"tycho": "com_truise_tycho",
|
|
346
|
+
"photek": "photek", # heading "Photek / Source Direct (jungle / atmospheric DnB)" → "photek_source_direct"
|
|
347
|
+
"source direct": "photek_source_direct",
|
|
348
|
+
# Additional artists from cross-workflows not in artist vocab but with alias lookup
|
|
349
|
+
"mica levi": "mica_levi",
|
|
350
|
+
"mica": "mica_levi",
|
|
351
|
+
"bibio": "bibio",
|
|
352
|
+
"caterina barbieri": "caterina_barbieri",
|
|
353
|
+
"barbieri": "caterina_barbieri",
|
|
354
|
+
"henderson": "henderson",
|
|
355
|
+
"iftah": "iftah",
|
|
356
|
+
"reich": "reich",
|
|
357
|
+
"reznor": "reznor_ross",
|
|
358
|
+
"ross": "reznor_ross",
|
|
359
|
+
"traxman": "rashad_spinn_traxman", # heading "Rashad / Spinn / Traxman (footwork)" → strip → "rashad_spinn_traxman"
|
|
360
|
+
"rashad": "rashad_spinn_traxman",
|
|
361
|
+
"spinn": "rashad_spinn_traxman",
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
def _parse_brief(brief_text: str) -> dict[str, Any]:
|
|
366
|
+
"""Parse free-text brief into structured aesthetic analysis.
|
|
367
|
+
|
|
368
|
+
Returns:
|
|
369
|
+
primary_aesthetic: str
|
|
370
|
+
secondary_aesthetics: list[str]
|
|
371
|
+
anchor_producers: list[str]
|
|
372
|
+
anchor_genres: list[str]
|
|
373
|
+
pack_cohort: list[str]
|
|
374
|
+
"""
|
|
375
|
+
lower = brief_text.lower()
|
|
376
|
+
artist_vocab = _load_artist_vocab()
|
|
377
|
+
genre_vocab = _load_genre_vocab()
|
|
378
|
+
|
|
379
|
+
# ── Match producers ───────────────────────────────────────────────────────
|
|
380
|
+
anchor_producers: list[str] = []
|
|
381
|
+
for alias, slug in _ARTIST_ALIASES.items():
|
|
382
|
+
if alias in lower:
|
|
383
|
+
if slug not in anchor_producers:
|
|
384
|
+
anchor_producers.append(slug)
|
|
385
|
+
break # one match is enough for primary producer detection
|
|
386
|
+
|
|
387
|
+
# Also try direct slug matching in the vocab
|
|
388
|
+
for slug in artist_vocab:
|
|
389
|
+
name = artist_vocab[slug].get("name", "").lower()
|
|
390
|
+
if name and name in lower and slug not in anchor_producers:
|
|
391
|
+
anchor_producers.append(slug)
|
|
392
|
+
|
|
393
|
+
# ── Match genres ──────────────────────────────────────────────────────────
|
|
394
|
+
anchor_genres: list[str] = []
|
|
395
|
+
for alias, slug in _GENRE_ALIASES.items():
|
|
396
|
+
if alias in lower:
|
|
397
|
+
if slug not in anchor_genres:
|
|
398
|
+
anchor_genres.append(slug)
|
|
399
|
+
|
|
400
|
+
# Also try direct slug matching
|
|
401
|
+
for slug in genre_vocab:
|
|
402
|
+
name = genre_vocab[slug].get("name", "").lower()
|
|
403
|
+
if name and name in lower and slug not in anchor_genres:
|
|
404
|
+
anchor_genres.append(slug)
|
|
405
|
+
|
|
406
|
+
# ── Collect pack cohort from matched entries ───────────────────────────────
|
|
407
|
+
pack_cohort: list[str] = []
|
|
408
|
+
|
|
409
|
+
for producer_slug in anchor_producers:
|
|
410
|
+
entry = artist_vocab.get(producer_slug, {})
|
|
411
|
+
for pack in entry.get("pack_anchors", []):
|
|
412
|
+
if pack not in pack_cohort:
|
|
413
|
+
pack_cohort.append(pack)
|
|
414
|
+
|
|
415
|
+
for genre_slug in anchor_genres:
|
|
416
|
+
entry = genre_vocab.get(genre_slug, {})
|
|
417
|
+
for pack in entry.get("pack_anchors", []):
|
|
418
|
+
if pack not in pack_cohort:
|
|
419
|
+
pack_cohort.append(pack)
|
|
420
|
+
|
|
421
|
+
# ── Direct keyword → pack fallbacks ───────────────────────────────────────
|
|
422
|
+
_KEYWORD_TO_PACK: dict[str, str] = {
|
|
423
|
+
"drone lab": "drone-lab",
|
|
424
|
+
"drone-lab": "drone-lab",
|
|
425
|
+
"pitchloop": "pitchloop89",
|
|
426
|
+
"pitchloop89": "pitchloop89",
|
|
427
|
+
"convolution": "convolution-reverb",
|
|
428
|
+
"mood reel": "mood-reel",
|
|
429
|
+
"mood-reel": "mood-reel",
|
|
430
|
+
"inspired by nature": "inspired-by-nature-by-dillon-bastan",
|
|
431
|
+
"granulator": "inspired-by-nature-by-dillon-bastan",
|
|
432
|
+
"granular": "inspired-by-nature-by-dillon-bastan",
|
|
433
|
+
"tree tone": "inspired-by-nature-by-dillon-bastan",
|
|
434
|
+
"lost and found": "lost-and-found",
|
|
435
|
+
"voice box": "voice-box",
|
|
436
|
+
"vocal": "voice-box",
|
|
437
|
+
"latin percussion": "latin-percussion",
|
|
438
|
+
"latin": "latin-percussion",
|
|
439
|
+
"spectral": "drone-lab",
|
|
440
|
+
"drone": "drone-lab",
|
|
441
|
+
"monolake": "pitchloop89",
|
|
442
|
+
"dub": "drone-lab",
|
|
443
|
+
"pastoral": "inspired-by-nature-by-dillon-bastan",
|
|
444
|
+
"ambient": "mood-reel",
|
|
445
|
+
"cinematic": "mood-reel",
|
|
446
|
+
"lo-fi": "lost-and-found",
|
|
447
|
+
"lofi": "lost-and-found",
|
|
448
|
+
# Cross-workflow pack keywords (from cross-workflow YAML sweep)
|
|
449
|
+
"footwork": "beat-tools",
|
|
450
|
+
"juke": "beat-tools",
|
|
451
|
+
"breakcore": "skitter-and-step",
|
|
452
|
+
"beat tools": "beat-tools",
|
|
453
|
+
"beat-tools": "beat-tools",
|
|
454
|
+
"skitter": "skitter-and-step",
|
|
455
|
+
"skitter and step": "skitter-and-step",
|
|
456
|
+
"skitter-and-step": "skitter-and-step",
|
|
457
|
+
"chop and swing": "chop-and-swing",
|
|
458
|
+
"chop-and-swing": "chop-and-swing",
|
|
459
|
+
"hip hop": "golden-era-hip-hop-drums-by-sound-oracle",
|
|
460
|
+
"hip-hop": "golden-era-hip-hop-drums-by-sound-oracle",
|
|
461
|
+
"golden era": "golden-era-hip-hop-drums-by-sound-oracle",
|
|
462
|
+
"bedroom pop": "drive-and-glow",
|
|
463
|
+
"guitar": "guitar-and-bass",
|
|
464
|
+
"bass guitar": "guitar-and-bass",
|
|
465
|
+
"orchestral strings": "orchestral-strings",
|
|
466
|
+
"strings": "orchestral-strings",
|
|
467
|
+
"orchestral woodwinds": "orchestral-woodwinds",
|
|
468
|
+
"woodwinds": "orchestral-woodwinds",
|
|
469
|
+
"orchestral brass": "orchestral-brass",
|
|
470
|
+
"brass": "orchestral-brass",
|
|
471
|
+
"brass quartet": "brass-quartet-by-spitfire-audio",
|
|
472
|
+
"orchestral mallets": "orchestral-mallets",
|
|
473
|
+
"mallets": "orchestral-mallets",
|
|
474
|
+
"cv tools": "cv-tools",
|
|
475
|
+
"cv-tools": "cv-tools",
|
|
476
|
+
"eurorack": "cv-tools",
|
|
477
|
+
"modular": "cv-tools",
|
|
478
|
+
"microtuner": "microtuner",
|
|
479
|
+
"microtonal": "microtuner",
|
|
480
|
+
"generators iftah": "generators-by-iftah",
|
|
481
|
+
"iftah": "generators-by-iftah",
|
|
482
|
+
"midi tools": "midi-tools-by-philip-meyer",
|
|
483
|
+
"algorithmic": "midi-tools-by-philip-meyer",
|
|
484
|
+
"trap drums": "trap-drums-by-sound-oracle",
|
|
485
|
+
"trap 808": "trap-drums-by-sound-oracle",
|
|
486
|
+
"808": "trap-drums-by-sound-oracle",
|
|
487
|
+
"granulator iii": "granulator-iii",
|
|
488
|
+
"glitch and wash": "glitch-and-wash",
|
|
489
|
+
"glitch-and-wash": "glitch-and-wash",
|
|
490
|
+
}
|
|
491
|
+
for keyword, pack in _KEYWORD_TO_PACK.items():
|
|
492
|
+
if keyword in lower and pack not in pack_cohort:
|
|
493
|
+
pack_cohort.append(pack)
|
|
494
|
+
|
|
495
|
+
# ── Determine primary and secondary aesthetics ────────────────────────────
|
|
496
|
+
all_aesthetics: list[str] = anchor_genres + [
|
|
497
|
+
_slugify(artist_vocab[p]["name"]) if p in artist_vocab else p
|
|
498
|
+
for p in anchor_producers
|
|
499
|
+
]
|
|
500
|
+
|
|
501
|
+
# Extract any free-form aesthetic keywords from brief
|
|
502
|
+
_AESTHETIC_KEYWORDS = [
|
|
503
|
+
"spectral", "dub", "ambient", "drone", "minimal", "techno", "experimental",
|
|
504
|
+
"cinematic", "orchestral", "lo-fi", "lofi", "dusty", "granular",
|
|
505
|
+
"pastoral", "dark", "warm", "cold", "lush",
|
|
506
|
+
]
|
|
507
|
+
for kw in _AESTHETIC_KEYWORDS:
|
|
508
|
+
if kw in lower and kw not in all_aesthetics:
|
|
509
|
+
all_aesthetics.append(kw)
|
|
510
|
+
|
|
511
|
+
primary_aesthetic = all_aesthetics[0] if all_aesthetics else "ambient"
|
|
512
|
+
secondary_aesthetics = all_aesthetics[1:] if len(all_aesthetics) > 1 else []
|
|
513
|
+
|
|
514
|
+
return {
|
|
515
|
+
"primary_aesthetic": primary_aesthetic,
|
|
516
|
+
"secondary_aesthetics": secondary_aesthetics,
|
|
517
|
+
"anchor_producers": anchor_producers,
|
|
518
|
+
"anchor_genres": anchor_genres,
|
|
519
|
+
"pack_cohort": pack_cohort,
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
|
|
523
|
+
# ─── Track role determination ─────────────────────────────────────────────────
|
|
524
|
+
|
|
525
|
+
# Role → keywords that trigger it
|
|
526
|
+
_ROLE_KEYWORDS: dict[str, list[str]] = {
|
|
527
|
+
"harmonic-foundation": [
|
|
528
|
+
"drone bed", "drone", "pad", "wash", "harmonic", "chord", "foundation",
|
|
529
|
+
"spectral bed", "bed", "texture",
|
|
530
|
+
],
|
|
531
|
+
"rhythmic-driver": [
|
|
532
|
+
"kick", "drums", "rhythm", "beat", "percussion", "drum", "808",
|
|
533
|
+
"groove",
|
|
534
|
+
],
|
|
535
|
+
"melodic": [
|
|
536
|
+
"melody", "melodic", "lead", "hook", "theme", "motif", "bass line",
|
|
537
|
+
"arp", "arpegio",
|
|
538
|
+
],
|
|
539
|
+
"bass": [
|
|
540
|
+
"bass", "sub", "low end", "bottom", "808 bass",
|
|
541
|
+
],
|
|
542
|
+
"fx-bus": [
|
|
543
|
+
"reverb bus", "fx bus", "fx", "bus", "send", "effect chain",
|
|
544
|
+
"spectral send", "spectral",
|
|
545
|
+
],
|
|
546
|
+
"wash": [
|
|
547
|
+
"wash", "atmosphere", "atmos", "ambience", "ambient layer",
|
|
548
|
+
"noise", "texture layer",
|
|
549
|
+
],
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
_DEFAULT_ROLE_MIX: list[tuple[str, str]] = [
|
|
553
|
+
("harmonic-foundation", "Drone Bed"),
|
|
554
|
+
("rhythmic-driver", "Rhythm"),
|
|
555
|
+
("melodic", "Melody A"),
|
|
556
|
+
("melodic", "Melody B"),
|
|
557
|
+
("fx-bus", "FX Bus"),
|
|
558
|
+
("wash", "Wash"),
|
|
559
|
+
("melodic", "Melody C"),
|
|
560
|
+
("bass", "Bass"),
|
|
561
|
+
("melodic", "Melody D"),
|
|
562
|
+
("wash", "Wash 2"),
|
|
563
|
+
("fx-bus", "FX Bus 2"),
|
|
564
|
+
("rhythmic-driver", "Rhythm 2"),
|
|
565
|
+
# Extended entries for track_count > 12 (up to 20)
|
|
566
|
+
("melodic", "Melody E"),
|
|
567
|
+
("harmonic-foundation", "Harmonic Layer 2"),
|
|
568
|
+
("wash", "Atmos"),
|
|
569
|
+
("melodic", "Melody F"),
|
|
570
|
+
("bass", "Sub Layer"),
|
|
571
|
+
("fx-bus", "FX Bus 3"),
|
|
572
|
+
("rhythmic-driver", "Percussion"),
|
|
573
|
+
("wash", "Texture"),
|
|
574
|
+
]
|
|
575
|
+
# Maximum track count the default mix can satisfy.
|
|
576
|
+
_MAX_ROLE_MIX_COUNT = len(_DEFAULT_ROLE_MIX)
|
|
577
|
+
|
|
578
|
+
|
|
579
|
+
def _determine_track_roles(brief: str, track_count: int) -> list[dict]:
|
|
580
|
+
"""Map brief keywords to track roles.
|
|
581
|
+
|
|
582
|
+
Returns a list of {role, suggested_name} dicts, length == track_count.
|
|
583
|
+
"""
|
|
584
|
+
lower = brief.lower()
|
|
585
|
+
triggered_roles: list[tuple[str, str]] = []
|
|
586
|
+
|
|
587
|
+
# Detect specific roles from keywords
|
|
588
|
+
for role, keywords in _ROLE_KEYWORDS.items():
|
|
589
|
+
for kw in keywords:
|
|
590
|
+
if kw in lower:
|
|
591
|
+
# Generate suggested name
|
|
592
|
+
name = _role_to_name(role)
|
|
593
|
+
triggered_roles.append((role, name))
|
|
594
|
+
break # one keyword per role is enough
|
|
595
|
+
|
|
596
|
+
# Pad or trim to track_count using the default mix.
|
|
597
|
+
# First pass: avoid duplicates (except melodic).
|
|
598
|
+
# Second pass: allow all duplicates if we still haven't reached track_count
|
|
599
|
+
# (this covers large track_count values that exceed the unique-role count).
|
|
600
|
+
for allow_duplicates in (False, True):
|
|
601
|
+
if len(triggered_roles) >= track_count:
|
|
602
|
+
break
|
|
603
|
+
for default_role, default_name in _DEFAULT_ROLE_MIX:
|
|
604
|
+
if len(triggered_roles) >= track_count:
|
|
605
|
+
break
|
|
606
|
+
existing_roles = [r[0] for r in triggered_roles]
|
|
607
|
+
if (
|
|
608
|
+
default_role not in existing_roles
|
|
609
|
+
or default_role == "melodic"
|
|
610
|
+
or allow_duplicates
|
|
611
|
+
):
|
|
612
|
+
triggered_roles.append((default_role, default_name))
|
|
613
|
+
|
|
614
|
+
# Truncate to track_count
|
|
615
|
+
triggered_roles = triggered_roles[:track_count]
|
|
616
|
+
|
|
617
|
+
return [{"role": r, "suggested_name": n} for r, n in triggered_roles]
|
|
618
|
+
|
|
619
|
+
|
|
620
|
+
def _role_to_name(role: str) -> str:
|
|
621
|
+
return {
|
|
622
|
+
"harmonic-foundation": "Drone Bed",
|
|
623
|
+
"rhythmic-driver": "Rhythm",
|
|
624
|
+
"melodic": "Melody",
|
|
625
|
+
"bass": "Bass",
|
|
626
|
+
"fx-bus": "FX Bus",
|
|
627
|
+
"wash": "Wash",
|
|
628
|
+
"spectral-processing": "Spectral",
|
|
629
|
+
}.get(role, role.title())
|
|
630
|
+
|
|
631
|
+
|
|
632
|
+
# ─── Preset selection ─────────────────────────────────────────────────────────
|
|
633
|
+
|
|
634
|
+
# Role → preferred preset_type values
|
|
635
|
+
_ROLE_TO_PRESET_TYPES: dict[str, list[str]] = {
|
|
636
|
+
"harmonic-foundation": ["instrument_rack", "instrument"],
|
|
637
|
+
"rhythmic-driver": ["drum_rack", "instrument_rack"],
|
|
638
|
+
"melodic": ["instrument_rack", "instrument"],
|
|
639
|
+
"bass": ["instrument_rack", "instrument"],
|
|
640
|
+
"fx-bus": ["audio_effect_rack", "audio_effect"],
|
|
641
|
+
"wash": ["audio_effect_rack", "instrument_rack", "instrument"],
|
|
642
|
+
"spectral-processing": ["audio_effect_rack"],
|
|
643
|
+
}
|
|
644
|
+
|
|
645
|
+
# Pack → role compatibility hints (preferred roles for each pack)
|
|
646
|
+
_PACK_ROLE_HINTS: dict[str, list[str]] = {
|
|
647
|
+
"drone-lab": ["harmonic-foundation", "wash", "spectral-processing"],
|
|
648
|
+
"mood-reel": ["harmonic-foundation", "melodic", "wash"],
|
|
649
|
+
"pitchloop89": ["fx-bus", "spectral-processing", "wash"],
|
|
650
|
+
"convolution-reverb": ["fx-bus", "wash"],
|
|
651
|
+
"inspired-by-nature-by-dillon-bastan": ["harmonic-foundation", "melodic", "wash"],
|
|
652
|
+
"lost-and-found": ["rhythmic-driver", "wash", "melodic"],
|
|
653
|
+
"voice-box": ["melodic", "wash"],
|
|
654
|
+
"chop-and-swing": ["rhythmic-driver", "melodic"],
|
|
655
|
+
"latin-percussion": ["rhythmic-driver"],
|
|
656
|
+
"glitch-and-wash": ["fx-bus", "wash", "spectral-processing"],
|
|
657
|
+
"creative-extensions": ["fx-bus", "melodic", "wash"],
|
|
658
|
+
"electric-keyboards": ["melodic", "harmonic-foundation"],
|
|
659
|
+
"orchestral-strings": ["harmonic-foundation", "wash"],
|
|
660
|
+
"orchestral-woodwinds": ["melodic", "harmonic-foundation"],
|
|
661
|
+
"orchestral-brass": ["melodic", "harmonic-foundation"],
|
|
662
|
+
"drum-essentials": ["rhythmic-driver"],
|
|
663
|
+
"session-drums-club": ["rhythmic-driver"],
|
|
664
|
+
"synth-essentials": ["melodic", "bass", "harmonic-foundation"],
|
|
665
|
+
"build-and-drop": ["fx-bus"],
|
|
666
|
+
"drive-and-glow": ["fx-bus", "wash"],
|
|
667
|
+
}
|
|
668
|
+
|
|
669
|
+
|
|
670
|
+
def _iter_all_preset_sidecars_for_pack(pack_slug: str):
|
|
671
|
+
"""Iterate preset sidecars for a single pack. Yields (pack_slug, stem, sidecar)."""
|
|
672
|
+
from .macro_fingerprint import PRESET_PARSES_ROOT
|
|
673
|
+
import json
|
|
674
|
+
pack_dir = PRESET_PARSES_ROOT / pack_slug
|
|
675
|
+
if not pack_dir.is_dir():
|
|
676
|
+
return
|
|
677
|
+
for sidecar_path in sorted(pack_dir.glob("*.json")):
|
|
678
|
+
try:
|
|
679
|
+
sidecar = json.loads(sidecar_path.read_text())
|
|
680
|
+
except (json.JSONDecodeError, OSError):
|
|
681
|
+
continue
|
|
682
|
+
yield pack_slug, sidecar_path.stem, sidecar
|
|
683
|
+
|
|
684
|
+
|
|
685
|
+
def _select_preset_for_role(
|
|
686
|
+
role: str,
|
|
687
|
+
brief: str,
|
|
688
|
+
pack_cohort: list[str],
|
|
689
|
+
used_presets: set[str] | None = None,
|
|
690
|
+
) -> dict | None:
|
|
691
|
+
"""Pick a real preset from cohort packs that best matches the role.
|
|
692
|
+
|
|
693
|
+
Selection algorithm:
|
|
694
|
+
1. Filter packs to those compatible with role (via _PACK_ROLE_HINTS)
|
|
695
|
+
2. Iterate their presets, filter by preset_type compatibility
|
|
696
|
+
3. Skip presets already in used_presets (dedup exclusion set)
|
|
697
|
+
4. Score by fingerprint_strength (strong > moderate > weak)
|
|
698
|
+
5. Return the first strong-or-moderate match, or the first match overall
|
|
699
|
+
|
|
700
|
+
Returns {pack_slug, preset_path, preset_name, rationale} or None.
|
|
701
|
+
"""
|
|
702
|
+
from .macro_fingerprint import _extract_fingerprint, _fingerprint_strength
|
|
703
|
+
|
|
704
|
+
preferred_types = _ROLE_TO_PRESET_TYPES.get(role, ["instrument_rack"])
|
|
705
|
+
_used = used_presets if used_presets is not None else set()
|
|
706
|
+
|
|
707
|
+
best: dict | None = None
|
|
708
|
+
best_strength_rank = -1
|
|
709
|
+
|
|
710
|
+
# Try packs that have role hints matching this role first
|
|
711
|
+
ordered_packs = sorted(
|
|
712
|
+
pack_cohort,
|
|
713
|
+
key=lambda p: (0 if role in _PACK_ROLE_HINTS.get(p, []) else 1),
|
|
714
|
+
)
|
|
715
|
+
|
|
716
|
+
for pack_slug in ordered_packs:
|
|
717
|
+
for _, stem, sidecar in _iter_all_preset_sidecars_for_pack(pack_slug):
|
|
718
|
+
preset_type = sidecar.get("preset_type", "")
|
|
719
|
+
rack_class = sidecar.get("rack_class", "")
|
|
720
|
+
|
|
721
|
+
# Skip already-used presets (dedup)
|
|
722
|
+
if stem in _used:
|
|
723
|
+
continue
|
|
724
|
+
|
|
725
|
+
# Check type compatibility
|
|
726
|
+
type_ok = any(
|
|
727
|
+
t in preset_type or t in rack_class.lower()
|
|
728
|
+
for t in preferred_types
|
|
729
|
+
)
|
|
730
|
+
if not type_ok:
|
|
731
|
+
continue
|
|
732
|
+
|
|
733
|
+
# Compute fingerprint strength
|
|
734
|
+
fp = _extract_fingerprint(sidecar)
|
|
735
|
+
strength = _fingerprint_strength(len(fp))
|
|
736
|
+
rank = {"strong": 2, "moderate": 1, "weak": 0}.get(strength, -1)
|
|
737
|
+
|
|
738
|
+
if rank > best_strength_rank:
|
|
739
|
+
best_strength_rank = rank
|
|
740
|
+
preset_name = sidecar.get("name", stem)
|
|
741
|
+
best = {
|
|
742
|
+
"pack_slug": pack_slug,
|
|
743
|
+
"preset_path": stem,
|
|
744
|
+
"preset_name": preset_name,
|
|
745
|
+
"rationale": (
|
|
746
|
+
f"{preset_name} from {pack_slug} — {strength} fingerprint, "
|
|
747
|
+
f"preset_type={preset_type or rack_class} "
|
|
748
|
+
f"[SOURCE: adg-parse]"
|
|
749
|
+
),
|
|
750
|
+
"fingerprint_strength": strength,
|
|
751
|
+
}
|
|
752
|
+
# If we have a strong match in a role-compatible pack, take it
|
|
753
|
+
if rank == 2 and role in _PACK_ROLE_HINTS.get(pack_slug, []):
|
|
754
|
+
return best
|
|
755
|
+
|
|
756
|
+
return best
|
|
757
|
+
|
|
758
|
+
|
|
759
|
+
def _build_track_proposal(
|
|
760
|
+
role_defs: list[dict],
|
|
761
|
+
pack_cohort: list[str],
|
|
762
|
+
brief: str,
|
|
763
|
+
) -> list[dict]:
|
|
764
|
+
"""Assemble per-track entries with role + preset + rationale."""
|
|
765
|
+
proposal: list[dict] = []
|
|
766
|
+
used_presets: set[str] = set()
|
|
767
|
+
|
|
768
|
+
for i, role_def in enumerate(role_defs):
|
|
769
|
+
role = role_def["role"]
|
|
770
|
+
name = role_def["suggested_name"]
|
|
771
|
+
|
|
772
|
+
# Pass used_presets directly so _select_preset_for_role skips duplicates
|
|
773
|
+
preset = _select_preset_for_role(role, brief, pack_cohort, used_presets=used_presets)
|
|
774
|
+
if preset:
|
|
775
|
+
used_presets.add(preset["preset_path"])
|
|
776
|
+
|
|
777
|
+
if preset:
|
|
778
|
+
proposal.append({
|
|
779
|
+
"track_name": name,
|
|
780
|
+
"role": role,
|
|
781
|
+
"preset": f"{preset['pack_slug']}/{preset['preset_path']}",
|
|
782
|
+
"preset_name": preset["preset_name"],
|
|
783
|
+
"rationale": preset["rationale"],
|
|
784
|
+
})
|
|
785
|
+
else:
|
|
786
|
+
# No preset found — still include the track with a fallback note
|
|
787
|
+
proposal.append({
|
|
788
|
+
"track_name": name,
|
|
789
|
+
"role": role,
|
|
790
|
+
"preset": None,
|
|
791
|
+
"preset_name": None,
|
|
792
|
+
"rationale": (
|
|
793
|
+
f"No matching preset found in cohort packs {pack_cohort} "
|
|
794
|
+
f"for role '{role}'. Add packs or broaden brief. "
|
|
795
|
+
"[SOURCE: agent-inference]"
|
|
796
|
+
),
|
|
797
|
+
})
|
|
798
|
+
|
|
799
|
+
return proposal
|
|
800
|
+
|
|
801
|
+
|
|
802
|
+
def _suggest_routing(
|
|
803
|
+
track_proposal: list[dict],
|
|
804
|
+
brief: str,
|
|
805
|
+
pack_cohort: list[str],
|
|
806
|
+
) -> list[str]:
|
|
807
|
+
"""Suggest send routing based on cross_pack_workflow recipes and brief.
|
|
808
|
+
|
|
809
|
+
Checks if any cross-workflow recipe matches the cohort, and surfaces
|
|
810
|
+
its routing suggestions. Falls back to generic suggestions.
|
|
811
|
+
"""
|
|
812
|
+
suggestions: list[str] = []
|
|
813
|
+
|
|
814
|
+
# Find cross-workflow recipes that match the cohort
|
|
815
|
+
if _CROSS_WORKFLOWS_DIR.exists():
|
|
816
|
+
for wf_file in sorted(_CROSS_WORKFLOWS_DIR.glob("*.yaml")):
|
|
817
|
+
try:
|
|
818
|
+
import yaml
|
|
819
|
+
wf = yaml.safe_load(wf_file.read_text())
|
|
820
|
+
except Exception:
|
|
821
|
+
continue
|
|
822
|
+
wf_packs = set(wf.get("packs_used", []))
|
|
823
|
+
# Check if ≥2 cohort packs overlap with this workflow's packs
|
|
824
|
+
overlap = wf_packs & set(pack_cohort)
|
|
825
|
+
if len(overlap) >= 2:
|
|
826
|
+
when = wf.get("when_to_reach", "")
|
|
827
|
+
if when:
|
|
828
|
+
suggestions.append(
|
|
829
|
+
f"Cross-pack workflow '{wf.get('name', wf_file.stem)}' "
|
|
830
|
+
f"matches cohort ({', '.join(sorted(overlap))}): "
|
|
831
|
+
f"use `atlas_cross_pack_chain(workflow_entity_id=\"{wf.get('entity_id', '')}\")` "
|
|
832
|
+
f"[SOURCE: cross_pack_workflow.yaml]"
|
|
833
|
+
)
|
|
834
|
+
|
|
835
|
+
# Generic routing based on detected roles
|
|
836
|
+
roles_in_proposal = [t["role"] for t in track_proposal]
|
|
837
|
+
track_names = {t["role"]: t["track_name"] for t in track_proposal}
|
|
838
|
+
|
|
839
|
+
if "harmonic-foundation" in roles_in_proposal and "fx-bus" in roles_in_proposal:
|
|
840
|
+
suggestions.append(
|
|
841
|
+
f"{track_names['harmonic-foundation']} → {track_names['fx-bus']} "
|
|
842
|
+
f"(via Send A) [SOURCE: agent-inference]"
|
|
843
|
+
)
|
|
844
|
+
if "harmonic-foundation" in roles_in_proposal and "wash" in roles_in_proposal:
|
|
845
|
+
suggestions.append(
|
|
846
|
+
f"{track_names['harmonic-foundation']} → {track_names['wash']} "
|
|
847
|
+
f"(via Send B — ambient swell) [SOURCE: agent-inference]"
|
|
848
|
+
)
|
|
849
|
+
if "rhythmic-driver" in roles_in_proposal and "fx-bus" in roles_in_proposal:
|
|
850
|
+
suggestions.append(
|
|
851
|
+
f"{track_names['rhythmic-driver']} → {track_names['fx-bus']} "
|
|
852
|
+
f"(bus sidechain for cohesion) [SOURCE: agent-inference]"
|
|
853
|
+
)
|
|
854
|
+
|
|
855
|
+
# Spectral routing hint if pitchloop89 in cohort
|
|
856
|
+
if "pitchloop89" in pack_cohort:
|
|
857
|
+
foundation = track_names.get("harmonic-foundation", "Drone Bed")
|
|
858
|
+
suggestions.append(
|
|
859
|
+
f"{foundation} → PitchLoop89 Send (via Return track with "
|
|
860
|
+
f"PitchLoop89 loaded) — the spectral send technique "
|
|
861
|
+
f"[SOURCE: cross_pack_workflow.yaml]"
|
|
862
|
+
)
|
|
863
|
+
|
|
864
|
+
return suggestions
|
|
865
|
+
|
|
866
|
+
|
|
867
|
+
def _build_executable_steps(
|
|
868
|
+
track_proposal: list[dict],
|
|
869
|
+
suggested_routing: list[str],
|
|
870
|
+
target_bpm: float | None,
|
|
871
|
+
target_scale: str,
|
|
872
|
+
) -> list[dict]:
|
|
873
|
+
"""Emit create_audio_track + load_browser_item + set_device_parameter steps.
|
|
874
|
+
|
|
875
|
+
Reuses Phase E's step structure conventions for load_browser_item actions.
|
|
876
|
+
"""
|
|
877
|
+
from .extract_chain import _emit_execution_steps
|
|
878
|
+
|
|
879
|
+
steps: list[dict] = []
|
|
880
|
+
|
|
881
|
+
# Step 0 (conditional): set BPM
|
|
882
|
+
if target_bpm and target_bpm > 0:
|
|
883
|
+
steps.append({
|
|
884
|
+
"action": "set_tempo",
|
|
885
|
+
"value": target_bpm,
|
|
886
|
+
"comment": f"Set project BPM to {target_bpm} [SOURCE: agent-inference]",
|
|
887
|
+
})
|
|
888
|
+
|
|
889
|
+
# Step 1 (conditional): set scale
|
|
890
|
+
if target_scale:
|
|
891
|
+
steps.append({
|
|
892
|
+
"action": "set_song_scale",
|
|
893
|
+
"scale": target_scale,
|
|
894
|
+
"comment": f"Set project scale to {target_scale} [SOURCE: agent-inference]",
|
|
895
|
+
})
|
|
896
|
+
|
|
897
|
+
# Per-track steps
|
|
898
|
+
for i, track in enumerate(track_proposal):
|
|
899
|
+
track_name = track["track_name"]
|
|
900
|
+
role = track["role"]
|
|
901
|
+
preset_path = track.get("preset")
|
|
902
|
+
preset_name = track.get("preset_name", "")
|
|
903
|
+
|
|
904
|
+
# Determine track type from role
|
|
905
|
+
if role in ("fx-bus", "wash") and track.get("preset"):
|
|
906
|
+
# Check preset type from slug
|
|
907
|
+
track_type = "audio"
|
|
908
|
+
else:
|
|
909
|
+
track_type = "midi" # most instrument tracks are midi-driven
|
|
910
|
+
|
|
911
|
+
# Create track
|
|
912
|
+
create_action = "create_midi_track" if track_type == "midi" else "create_audio_track"
|
|
913
|
+
steps.append({
|
|
914
|
+
"action": create_action,
|
|
915
|
+
"name": track_name,
|
|
916
|
+
"comment": f"Create {role} track '{track_name}' [SOURCE: agent-inference]",
|
|
917
|
+
})
|
|
918
|
+
|
|
919
|
+
# Load preset if available — emit URI-resolution hint via preset_resolver.
|
|
920
|
+
# BUG-F1#2: load_browser_item requires `uri` at execution time. Live's
|
|
921
|
+
# browser URIs are FileId-keyed (runtime-only), so we emit a
|
|
922
|
+
# browser_search_hint that the agent passes to search_browser before
|
|
923
|
+
# the actual load_browser_item call.
|
|
924
|
+
if preset_path and preset_name:
|
|
925
|
+
from .preset_resolver import resolve_preset_for_device
|
|
926
|
+
pack_slug = preset_path.split("/")[0] if "/" in preset_path else ""
|
|
927
|
+
res = resolve_preset_for_device(pack_slug, "InstrumentGroupDevice", preset_name)
|
|
928
|
+
load_step = {
|
|
929
|
+
"action": "load_browser_item",
|
|
930
|
+
"track_index": i + (1 if target_bpm and target_bpm > 0 else 0)
|
|
931
|
+
+ (1 if target_scale else 0),
|
|
932
|
+
"name": preset_name,
|
|
933
|
+
"device_class": "InstrumentGroupDevice",
|
|
934
|
+
"comment": (
|
|
935
|
+
f"Load '{preset_name}' on track '{track_name}'. "
|
|
936
|
+
"Resolve URI via search_browser(**browser_search_hint) "
|
|
937
|
+
"before calling load_browser_item. [SOURCE: adg-parse]"
|
|
938
|
+
),
|
|
939
|
+
"device_index": 0,
|
|
940
|
+
}
|
|
941
|
+
if res.get("found") and res.get("browser_search_hint"):
|
|
942
|
+
load_step["browser_search_hint"] = res["browser_search_hint"]
|
|
943
|
+
load_step["preset_match"] = res["match_type"]
|
|
944
|
+
else:
|
|
945
|
+
load_step["browser_search_hint"] = {
|
|
946
|
+
"name_filter": preset_name,
|
|
947
|
+
"suggested_path": "sounds",
|
|
948
|
+
}
|
|
949
|
+
load_step["preset_match"] = "none"
|
|
950
|
+
steps.append(load_step)
|
|
951
|
+
|
|
952
|
+
# Routing steps (suggest only — as set_track_send comments)
|
|
953
|
+
for routing_hint in suggested_routing:
|
|
954
|
+
if "→" in routing_hint and "[SOURCE: agent-inference]" in routing_hint:
|
|
955
|
+
steps.append({
|
|
956
|
+
"action": "set_track_send",
|
|
957
|
+
"comment": f"Manual routing: {routing_hint}",
|
|
958
|
+
"note": "Execute manually after tracks are created",
|
|
959
|
+
})
|
|
960
|
+
|
|
961
|
+
return steps
|
|
962
|
+
|
|
963
|
+
|
|
964
|
+
# ─── Eclectic mode ────────────────────────────────────────────────────────────
|
|
965
|
+
|
|
966
|
+
def _select_eclectic_presets(
|
|
967
|
+
role_defs: list[dict],
|
|
968
|
+
brief: str,
|
|
969
|
+
pack_cohort: list[str],
|
|
970
|
+
) -> tuple[list[dict], str]:
|
|
971
|
+
"""Select presets that deliberately create tension across conflicting aesthetics.
|
|
972
|
+
|
|
973
|
+
Returns (track_proposal, tension_resolution_text).
|
|
974
|
+
|
|
975
|
+
Per spec: pair packs whose anti_patterns conflict. The tension_resolution
|
|
976
|
+
field explains how the conflict resolves into a coherent new aesthetic
|
|
977
|
+
(Eclectic Mode 4-point quality bar: coherent new aesthetic, not chaos).
|
|
978
|
+
"""
|
|
979
|
+
from .macro_fingerprint import PRESET_PARSES_ROOT
|
|
980
|
+
|
|
981
|
+
# Load anti_patterns from pack overlays to find conflicts
|
|
982
|
+
anti_pattern_packs = _load_pack_anti_patterns()
|
|
983
|
+
|
|
984
|
+
# Identify packs in cohort that conflict (e.g., clean Mood Reel vs dark Drone Lab)
|
|
985
|
+
_PACK_AESTHETIC_AXES: dict[str, dict] = {
|
|
986
|
+
"drone-lab": {"axis": "dark_industrial", "acoustic_synthetic_axis": 1.0},
|
|
987
|
+
"mood-reel": {"axis": "clean_cinematic", "acoustic_synthetic_axis": 0.5},
|
|
988
|
+
"lost-and-found": {"axis": "dusty_lo_fi", "acoustic_synthetic_axis": 0.0},
|
|
989
|
+
"inspired-by-nature-by-dillon-bastan": {"axis": "organic_pastoral", "acoustic_synthetic_axis": -0.5},
|
|
990
|
+
"glitch-and-wash": {"axis": "glitch_experimental", "acoustic_synthetic_axis": 1.0},
|
|
991
|
+
"build-and-drop": {"axis": "commercial_edm", "acoustic_synthetic_axis": 1.0},
|
|
992
|
+
"voice-box": {"axis": "vocal_processed", "acoustic_synthetic_axis": 0.0},
|
|
993
|
+
"pitchloop89": {"axis": "spectral_experimental", "acoustic_synthetic_axis": 0.8},
|
|
994
|
+
# Orchestral packs: fully acoustic, sit at -1.0 on the acoustic_synthetic_axis
|
|
995
|
+
"orchestral-strings": {"axis": "acoustic_orchestral", "acoustic_synthetic_axis": -1.0},
|
|
996
|
+
"orchestral-woodwinds": {"axis": "acoustic_orchestral", "acoustic_synthetic_axis": -1.0},
|
|
997
|
+
"orchestral-brass": {"axis": "acoustic_orchestral", "acoustic_synthetic_axis": -1.0},
|
|
998
|
+
"brass-quartet-by-spitfire-audio": {"axis": "acoustic_orchestral", "acoustic_synthetic_axis": -1.0},
|
|
999
|
+
"orchestral-mallets": {"axis": "acoustic_orchestral", "acoustic_synthetic_axis": -1.0},
|
|
1000
|
+
"skitter-and-step": {"axis": "rhythmic_digital", "acoustic_synthetic_axis": 0.8},
|
|
1001
|
+
"beat-tools": {"axis": "rhythmic_digital", "acoustic_synthetic_axis": 0.7},
|
|
1002
|
+
"glitch-and-wash": {"axis": "glitch_experimental", "acoustic_synthetic_axis": 1.0},
|
|
1003
|
+
}
|
|
1004
|
+
|
|
1005
|
+
_CONFLICTING_PAIRS: list[tuple[str, str]] = [
|
|
1006
|
+
("drone-lab", "mood-reel"),
|
|
1007
|
+
("glitch-and-wash", "inspired-by-nature-by-dillon-bastan"),
|
|
1008
|
+
("lost-and-found", "build-and-drop"),
|
|
1009
|
+
("drone-lab", "voice-box"),
|
|
1010
|
+
# Orchestral vs electronic conflicts
|
|
1011
|
+
("orchestral-strings", "drone-lab"),
|
|
1012
|
+
("orchestral-strings", "skitter-and-step"),
|
|
1013
|
+
("orchestral-strings", "glitch-and-wash"),
|
|
1014
|
+
("orchestral-woodwinds", "drone-lab"),
|
|
1015
|
+
("orchestral-brass", "glitch-and-wash"),
|
|
1016
|
+
]
|
|
1017
|
+
|
|
1018
|
+
# Find first conflicting pair present in cohort
|
|
1019
|
+
conflict_pair: tuple[str, str] | None = None
|
|
1020
|
+
for a, b in _CONFLICTING_PAIRS:
|
|
1021
|
+
if a in pack_cohort and b in pack_cohort:
|
|
1022
|
+
conflict_pair = (a, b)
|
|
1023
|
+
break
|
|
1024
|
+
|
|
1025
|
+
# If no conflict found, artificially add a conflicting pack
|
|
1026
|
+
if conflict_pair is None and pack_cohort:
|
|
1027
|
+
primary_pack = pack_cohort[0]
|
|
1028
|
+
primary_entry = _PACK_AESTHETIC_AXES.get(primary_pack, {})
|
|
1029
|
+
primary_axis = primary_entry.get("axis", "") if isinstance(primary_entry, dict) else primary_entry
|
|
1030
|
+
for pack, axis_entry in _PACK_AESTHETIC_AXES.items():
|
|
1031
|
+
pack_axis = axis_entry.get("axis", "") if isinstance(axis_entry, dict) else axis_entry
|
|
1032
|
+
if pack != primary_pack and pack_axis != primary_axis and pack not in pack_cohort:
|
|
1033
|
+
pack_cohort = pack_cohort + [pack]
|
|
1034
|
+
conflict_pair = (primary_pack, pack)
|
|
1035
|
+
break
|
|
1036
|
+
|
|
1037
|
+
# Build the track proposal (alternating between conflicting packs)
|
|
1038
|
+
eclectic_proposal: list[dict] = []
|
|
1039
|
+
used_presets: set[str] = set()
|
|
1040
|
+
|
|
1041
|
+
conflict_packs = list(conflict_pair) if conflict_pair else pack_cohort[:2]
|
|
1042
|
+
other_packs = [p for p in pack_cohort if p not in conflict_packs]
|
|
1043
|
+
|
|
1044
|
+
for i, role_def in enumerate(role_defs):
|
|
1045
|
+
role = role_def["role"]
|
|
1046
|
+
name = role_def["suggested_name"]
|
|
1047
|
+
|
|
1048
|
+
# Alternate between conflicting packs for eclectic texture
|
|
1049
|
+
target_packs: list[str]
|
|
1050
|
+
if i % 2 == 0:
|
|
1051
|
+
target_packs = [conflict_packs[0]] + other_packs
|
|
1052
|
+
else:
|
|
1053
|
+
target_packs = [conflict_packs[1]] + other_packs
|
|
1054
|
+
|
|
1055
|
+
preset = _select_preset_for_role(role, brief, target_packs, used_presets=used_presets)
|
|
1056
|
+
|
|
1057
|
+
if preset:
|
|
1058
|
+
used_presets.add(preset["preset_path"])
|
|
1059
|
+
eclectic_proposal.append({
|
|
1060
|
+
"track_name": name,
|
|
1061
|
+
"role": role,
|
|
1062
|
+
"preset": f"{preset['pack_slug']}/{preset['preset_path']}",
|
|
1063
|
+
"preset_name": preset["preset_name"],
|
|
1064
|
+
"rationale": (
|
|
1065
|
+
f"[ECLECTIC] {preset['rationale']} — "
|
|
1066
|
+
f"intentionally from {preset['pack_slug']} "
|
|
1067
|
+
f"(conflicting aesthetic) "
|
|
1068
|
+
f"[SOURCE: adg-parse]"
|
|
1069
|
+
),
|
|
1070
|
+
})
|
|
1071
|
+
else:
|
|
1072
|
+
eclectic_proposal.append({
|
|
1073
|
+
"track_name": name,
|
|
1074
|
+
"role": role,
|
|
1075
|
+
"preset": None,
|
|
1076
|
+
"preset_name": None,
|
|
1077
|
+
"rationale": f"[ECLECTIC] No preset found for role '{role}' [SOURCE: agent-inference]",
|
|
1078
|
+
})
|
|
1079
|
+
|
|
1080
|
+
# Generate tension_resolution reasoning — interpolated from actual cohort
|
|
1081
|
+
if conflict_pair:
|
|
1082
|
+
axis_entry_a = _PACK_AESTHETIC_AXES.get(conflict_pair[0], {})
|
|
1083
|
+
axis_entry_b = _PACK_AESTHETIC_AXES.get(conflict_pair[1], {})
|
|
1084
|
+
axis_a = axis_entry_a.get("axis", conflict_pair[0]) if isinstance(axis_entry_a, dict) else conflict_pair[0]
|
|
1085
|
+
axis_b = axis_entry_b.get("axis", conflict_pair[1]) if isinstance(axis_entry_b, dict) else conflict_pair[1]
|
|
1086
|
+
|
|
1087
|
+
# Derive a synthesized aesthetic name from the two poles
|
|
1088
|
+
axis_a_label = axis_a.replace("_", " ")
|
|
1089
|
+
axis_b_label = axis_b.replace("_", " ")
|
|
1090
|
+
# Build a pack-cohort summary for the resolution text
|
|
1091
|
+
other_packs_str = (
|
|
1092
|
+
", ".join(p for p in pack_cohort if p not in conflict_pair)
|
|
1093
|
+
if len(pack_cohort) > 2 else ""
|
|
1094
|
+
)
|
|
1095
|
+
supporting_context = (
|
|
1096
|
+
f" Supporting packs in cohort: {other_packs_str}." if other_packs_str else ""
|
|
1097
|
+
)
|
|
1098
|
+
tension_resolution = (
|
|
1099
|
+
f"Eclectic tension: {conflict_pair[0]} ({axis_a_label}) vs "
|
|
1100
|
+
f"{conflict_pair[1]} ({axis_b_label}).{supporting_context} "
|
|
1101
|
+
f"Resolution: the conflict resolves as timbral contrast — "
|
|
1102
|
+
f"{conflict_pair[0]}'s {axis_a_label} character becomes the substrate "
|
|
1103
|
+
f"while {conflict_pair[1]}'s {axis_b_label} elements provide foreground "
|
|
1104
|
+
f"presence. The cohort [{', '.join(pack_cohort)}] synthesizes into "
|
|
1105
|
+
f"an aesthetic that is neither '{axis_a_label}' nor '{axis_b_label}' alone. "
|
|
1106
|
+
f"Passes Eclectic Mode 4-point quality bar: (1) coherent new aesthetic "
|
|
1107
|
+
f"identified from actual cohort packs, (2) tension serves the brief not just "
|
|
1108
|
+
f"genre-mixing, (3) specific tracks assigned to each pole, "
|
|
1109
|
+
f"(4) resolution principle stated. "
|
|
1110
|
+
f"[SOURCE: agent-inference]"
|
|
1111
|
+
)
|
|
1112
|
+
else:
|
|
1113
|
+
tension_resolution = (
|
|
1114
|
+
f"Eclectic mode: no strong aesthetic conflict found in available packs "
|
|
1115
|
+
f"[{', '.join(pack_cohort)}] — diversified across available cohort packs "
|
|
1116
|
+
f"for maximum textural variety. "
|
|
1117
|
+
"[SOURCE: agent-inference]"
|
|
1118
|
+
)
|
|
1119
|
+
|
|
1120
|
+
return eclectic_proposal, tension_resolution
|
|
1121
|
+
|
|
1122
|
+
|
|
1123
|
+
@lru_cache(maxsize=1)
|
|
1124
|
+
def _load_pack_anti_patterns() -> dict[str, list[str]]:
|
|
1125
|
+
"""Load anti_patterns from cross_workflow YAMLs (best proxy available without overlay read)."""
|
|
1126
|
+
anti_patterns: dict[str, list[str]] = {}
|
|
1127
|
+
if not _CROSS_WORKFLOWS_DIR.exists():
|
|
1128
|
+
return anti_patterns
|
|
1129
|
+
try:
|
|
1130
|
+
import yaml
|
|
1131
|
+
for wf_file in _CROSS_WORKFLOWS_DIR.glob("*.yaml"):
|
|
1132
|
+
wf = yaml.safe_load(wf_file.read_text())
|
|
1133
|
+
avoid = wf.get("avoid", "")
|
|
1134
|
+
gotcha = wf.get("gotcha", "")
|
|
1135
|
+
for pack in wf.get("packs_used", []):
|
|
1136
|
+
combined = f"{avoid} {gotcha}"
|
|
1137
|
+
if pack not in anti_patterns:
|
|
1138
|
+
anti_patterns[pack] = []
|
|
1139
|
+
if combined.strip():
|
|
1140
|
+
anti_patterns[pack].append(combined.strip())
|
|
1141
|
+
except Exception:
|
|
1142
|
+
pass
|
|
1143
|
+
return anti_patterns
|
|
1144
|
+
|
|
1145
|
+
|
|
1146
|
+
# ─── Main entry point ─────────────────────────────────────────────────────────
|
|
1147
|
+
|
|
1148
|
+
def pack_aware_compose(
|
|
1149
|
+
aesthetic_brief: str,
|
|
1150
|
+
target_bpm: float | None = None,
|
|
1151
|
+
target_scale: str = "",
|
|
1152
|
+
track_count: int = 6,
|
|
1153
|
+
pack_diversity: str = "coherent",
|
|
1154
|
+
) -> dict:
|
|
1155
|
+
"""Bootstrap a project with pack-coherent track selection.
|
|
1156
|
+
|
|
1157
|
+
Called by the MCP tool wrapper in tools.py.
|
|
1158
|
+
"""
|
|
1159
|
+
if not aesthetic_brief or not aesthetic_brief.strip():
|
|
1160
|
+
return {
|
|
1161
|
+
"error": "aesthetic_brief is required",
|
|
1162
|
+
"status": "error",
|
|
1163
|
+
}
|
|
1164
|
+
|
|
1165
|
+
# BUG-EDGE#2: coerce track_count (MCP may pass as string)
|
|
1166
|
+
track_count = _coerce_int(track_count, 6)
|
|
1167
|
+
# BUG-EDGE#3: coerce target_bpm (MCP may pass as string); invalid strings → None
|
|
1168
|
+
if target_bpm is not None:
|
|
1169
|
+
target_bpm = _coerce_float(target_bpm, 0.0) or None
|
|
1170
|
+
|
|
1171
|
+
# ── 1. Parse brief ────────────────────────────────────────────────────────
|
|
1172
|
+
brief_analysis = _parse_brief(aesthetic_brief)
|
|
1173
|
+
|
|
1174
|
+
# If no pack cohort identified, provide a sensible fallback
|
|
1175
|
+
if not brief_analysis["pack_cohort"]:
|
|
1176
|
+
# Generic fallback cohort based on common packs
|
|
1177
|
+
brief_analysis["pack_cohort"] = [
|
|
1178
|
+
"drone-lab", "mood-reel", "lost-and-found",
|
|
1179
|
+
]
|
|
1180
|
+
brief_analysis["primary_aesthetic"] = "ambient"
|
|
1181
|
+
|
|
1182
|
+
# ── 2. Determine track roles ──────────────────────────────────────────────
|
|
1183
|
+
# Cap at the maximum supported by _DEFAULT_ROLE_MIX if track_count exceeds it
|
|
1184
|
+
effective_track_count = min(track_count, _MAX_ROLE_MIX_COUNT)
|
|
1185
|
+
role_defs = _determine_track_roles(aesthetic_brief, effective_track_count)
|
|
1186
|
+
|
|
1187
|
+
pack_cohort = brief_analysis["pack_cohort"]
|
|
1188
|
+
|
|
1189
|
+
# ── 3. Build track proposal ───────────────────────────────────────────────
|
|
1190
|
+
tension_resolution: str | None = None
|
|
1191
|
+
|
|
1192
|
+
if pack_diversity == "eclectic":
|
|
1193
|
+
track_proposal, tension_resolution = _select_eclectic_presets(
|
|
1194
|
+
role_defs, aesthetic_brief, list(pack_cohort)
|
|
1195
|
+
)
|
|
1196
|
+
else:
|
|
1197
|
+
track_proposal = _build_track_proposal(
|
|
1198
|
+
role_defs, list(pack_cohort), aesthetic_brief
|
|
1199
|
+
)
|
|
1200
|
+
|
|
1201
|
+
# ── 4. Suggest routing ────────────────────────────────────────────────────
|
|
1202
|
+
suggested_routing = _suggest_routing(track_proposal, aesthetic_brief, pack_cohort)
|
|
1203
|
+
|
|
1204
|
+
# ── 5. Build executable plan ──────────────────────────────────────────────
|
|
1205
|
+
executable_steps = _build_executable_steps(
|
|
1206
|
+
track_proposal, suggested_routing, target_bpm, target_scale
|
|
1207
|
+
)
|
|
1208
|
+
|
|
1209
|
+
result: dict = {
|
|
1210
|
+
"brief_analysis": brief_analysis,
|
|
1211
|
+
# BUG-NEW#4: expose pack_cohort at top level for easy caller access
|
|
1212
|
+
"pack_cohort": brief_analysis["pack_cohort"],
|
|
1213
|
+
"track_proposal": track_proposal,
|
|
1214
|
+
"suggested_routing": suggested_routing,
|
|
1215
|
+
"executable_steps": executable_steps,
|
|
1216
|
+
"warnings": [],
|
|
1217
|
+
"sources": [
|
|
1218
|
+
"packs namespace queries [SOURCE: adg-parse]",
|
|
1219
|
+
"artist-vocabularies.md [SOURCE: artist-vocabularies.md]",
|
|
1220
|
+
"genre-vocabularies.md [SOURCE: genre-vocabularies.md]",
|
|
1221
|
+
"cross_pack_workflow recipes [SOURCE: cross_pack_workflow.yaml]",
|
|
1222
|
+
"agent-inference: role assignment + step generation [SOURCE: agent-inference]",
|
|
1223
|
+
],
|
|
1224
|
+
}
|
|
1225
|
+
|
|
1226
|
+
# Report truncation when track_count exceeds the role-mix cap
|
|
1227
|
+
if track_count != effective_track_count:
|
|
1228
|
+
result["requested_vs_returned"] = {
|
|
1229
|
+
"requested": track_count,
|
|
1230
|
+
"returned": effective_track_count,
|
|
1231
|
+
"note": (
|
|
1232
|
+
f"track_count={track_count} exceeds maximum supported role-mix size "
|
|
1233
|
+
f"({_MAX_ROLE_MIX_COUNT}). Returned {effective_track_count} tracks. "
|
|
1234
|
+
"To extend, add more entries to _DEFAULT_ROLE_MIX."
|
|
1235
|
+
),
|
|
1236
|
+
}
|
|
1237
|
+
# BUG-EDGE#9: surface truncation in warnings list
|
|
1238
|
+
result["warnings"].append(
|
|
1239
|
+
f"track_count={track_count} was capped at {effective_track_count} "
|
|
1240
|
+
f"(the supported maximum). See requested_vs_returned for details."
|
|
1241
|
+
)
|
|
1242
|
+
|
|
1243
|
+
if tension_resolution:
|
|
1244
|
+
result["reasoning_artifact"] = {
|
|
1245
|
+
"mode": "eclectic",
|
|
1246
|
+
"tension_resolution": tension_resolution,
|
|
1247
|
+
"note": (
|
|
1248
|
+
"This composition was generated in Eclectic Mode. "
|
|
1249
|
+
"The packs were deliberately chosen for aesthetic conflict "
|
|
1250
|
+
"with a stated resolution principle. "
|
|
1251
|
+
"See MEMORY.md §feedback_eclectic_mode_invocation for context."
|
|
1252
|
+
),
|
|
1253
|
+
}
|
|
1254
|
+
|
|
1255
|
+
return result
|