livepilot 1.23.2 → 1.23.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +124 -0
- package/README.md +108 -10
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +39 -1
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/atlas/cross_pack_chain.py +658 -0
- package/mcp_server/atlas/demo_story.py +700 -0
- package/mcp_server/atlas/extract_chain.py +786 -0
- package/mcp_server/atlas/macro_fingerprint.py +554 -0
- package/mcp_server/atlas/overlays.py +95 -3
- package/mcp_server/atlas/pack_aware_compose.py +1255 -0
- package/mcp_server/atlas/preset_resolver.py +238 -0
- package/mcp_server/atlas/tools.py +1001 -31
- package/mcp_server/atlas/transplant.py +1177 -0
- package/mcp_server/mix_engine/state_builder.py +44 -1
- package/mcp_server/runtime/capability_state.py +34 -3
- package/mcp_server/runtime/remote_commands.py +10 -0
- package/mcp_server/server.py +45 -24
- package/mcp_server/tools/agent_os.py +33 -9
- package/mcp_server/tools/analyzer.py +84 -23
- package/mcp_server/tools/browser.py +20 -1
- package/mcp_server/tools/devices.py +78 -11
- package/mcp_server/tools/perception.py +5 -1
- package/mcp_server/tools/tracks.py +39 -2
- package/mcp_server/user_corpus/__init__.py +48 -0
- package/mcp_server/user_corpus/manifest.py +142 -0
- package/mcp_server/user_corpus/plugin_engine/__init__.py +39 -0
- package/mcp_server/user_corpus/plugin_engine/detector.py +579 -0
- package/mcp_server/user_corpus/plugin_engine/manual.py +347 -0
- package/mcp_server/user_corpus/plugin_engine/research.py +247 -0
- package/mcp_server/user_corpus/runner.py +261 -0
- package/mcp_server/user_corpus/scanner.py +115 -0
- package/mcp_server/user_corpus/scanners/__init__.py +18 -0
- package/mcp_server/user_corpus/scanners/adg.py +79 -0
- package/mcp_server/user_corpus/scanners/als.py +144 -0
- package/mcp_server/user_corpus/scanners/amxd.py +374 -0
- package/mcp_server/user_corpus/scanners/plugin_preset.py +202 -0
- package/mcp_server/user_corpus/tools.py +904 -0
- package/mcp_server/user_corpus/wizard.py +224 -0
- package/package.json +2 -2
- package/remote_script/LivePilot/__init__.py +1 -1
- package/remote_script/LivePilot/browser.py +7 -2
- package/remote_script/LivePilot/devices.py +9 -0
- package/remote_script/LivePilot/simpler_sample.py +98 -0
- package/requirements.txt +3 -3
- package/server.json +2 -2
|
@@ -0,0 +1,658 @@
|
|
|
1
|
+
"""Pack-Atlas Phase F — Cross-Pack Chain Executor.
|
|
2
|
+
|
|
3
|
+
Execute a cross_pack_workflow recipe step-by-step by parsing its signal_flow
|
|
4
|
+
field into structured executable actions.
|
|
5
|
+
|
|
6
|
+
Real YAML schema (discovered 2026-04-27):
|
|
7
|
+
entity_id: dub_techno_spectral_drone_bed
|
|
8
|
+
entity_type: cross_pack_workflow
|
|
9
|
+
name: Dub Techno Spectral Drone Bed
|
|
10
|
+
description: ...
|
|
11
|
+
packs_used: [drone-lab, pitchloop89, convolution-reverb]
|
|
12
|
+
devices_used: [harmonic_drone_generator, pitchloop89, convolution_reverb]
|
|
13
|
+
signal_flow: |-
|
|
14
|
+
1. <step text>
|
|
15
|
+
2. → <step text>
|
|
16
|
+
...
|
|
17
|
+
when_to_reach: ...
|
|
18
|
+
gotcha: ...
|
|
19
|
+
avoid: ...
|
|
20
|
+
|
|
21
|
+
signal_flow is a multi-line string with numbered steps. Steps beginning with
|
|
22
|
+
"→" are continuation steps (chained into the previous device).
|
|
23
|
+
|
|
24
|
+
All workflow execution in Phase F is DRY-RUN only. Live execution is gated
|
|
25
|
+
on an active Remote Script connection.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
from __future__ import annotations
|
|
29
|
+
|
|
30
|
+
import re
|
|
31
|
+
from pathlib import Path
|
|
32
|
+
from typing import Any
|
|
33
|
+
|
|
34
|
+
# ─── Paths ────────────────────────────────────────────────────────────────────
|
|
35
|
+
|
|
36
|
+
_CROSS_WORKFLOWS_DIR = (
|
|
37
|
+
Path.home()
|
|
38
|
+
/ ".livepilot"
|
|
39
|
+
/ "atlas-overlays"
|
|
40
|
+
/ "packs"
|
|
41
|
+
/ "cross_workflows"
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
# ─── Slug normalization ───────────────────────────────────────────────────────
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _normalize_slug(workflow_entity_id: str) -> str:
|
|
48
|
+
"""Normalize entity_id: convert underscores to hyphens for filename lookup,
|
|
49
|
+
or hyphens to underscores for entity_id matching.
|
|
50
|
+
|
|
51
|
+
The YAML files use hyphens in filenames but entity_id fields use underscores.
|
|
52
|
+
"""
|
|
53
|
+
return workflow_entity_id.replace("_", "-")
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _entity_id_to_filename(workflow_entity_id: str) -> str:
|
|
57
|
+
"""Convert entity_id (underscores) to filename (hyphens)."""
|
|
58
|
+
return _normalize_slug(workflow_entity_id) + ".yaml"
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# ─── Workflow loader ──────────────────────────────────────────────────────────
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _load_workflow(workflow_entity_id: str) -> dict | None:
|
|
65
|
+
"""Load a cross-pack workflow YAML.
|
|
66
|
+
|
|
67
|
+
Handles normalization:
|
|
68
|
+
- Try hyphenated filename: dub-techno-spectral-drone-bed.yaml
|
|
69
|
+
- Try underscore filename: dub_techno_spectral_drone_bed.yaml
|
|
70
|
+
- Try entity_id field match across all YAMLs
|
|
71
|
+
|
|
72
|
+
Returns the parsed YAML dict, or None if not found.
|
|
73
|
+
"""
|
|
74
|
+
if not _CROSS_WORKFLOWS_DIR.exists():
|
|
75
|
+
return None
|
|
76
|
+
|
|
77
|
+
try:
|
|
78
|
+
import yaml
|
|
79
|
+
except ImportError:
|
|
80
|
+
return None
|
|
81
|
+
|
|
82
|
+
# Try hyphenated filename first
|
|
83
|
+
hyphen_name = _entity_id_to_filename(workflow_entity_id)
|
|
84
|
+
hyphen_path = _CROSS_WORKFLOWS_DIR / hyphen_name
|
|
85
|
+
if hyphen_path.exists():
|
|
86
|
+
try:
|
|
87
|
+
return yaml.safe_load(hyphen_path.read_text())
|
|
88
|
+
except Exception:
|
|
89
|
+
return None
|
|
90
|
+
|
|
91
|
+
# Try underscore filename (some might be stored with underscores)
|
|
92
|
+
under_name = workflow_entity_id.replace("-", "_") + ".yaml"
|
|
93
|
+
under_path = _CROSS_WORKFLOWS_DIR / under_name
|
|
94
|
+
if under_path.exists():
|
|
95
|
+
try:
|
|
96
|
+
return yaml.safe_load(under_path.read_text())
|
|
97
|
+
except Exception:
|
|
98
|
+
return None
|
|
99
|
+
|
|
100
|
+
# Scan all YAMLs for matching entity_id field
|
|
101
|
+
for wf_file in sorted(_CROSS_WORKFLOWS_DIR.glob("*.yaml")):
|
|
102
|
+
try:
|
|
103
|
+
wf = yaml.safe_load(wf_file.read_text())
|
|
104
|
+
if wf and str(wf.get("entity_id", "")) == workflow_entity_id:
|
|
105
|
+
return wf
|
|
106
|
+
# Also check hyphen/underscore variants
|
|
107
|
+
eid = str(wf.get("entity_id", ""))
|
|
108
|
+
if eid.replace("_", "-") == workflow_entity_id.replace("_", "-"):
|
|
109
|
+
return wf
|
|
110
|
+
except Exception:
|
|
111
|
+
continue
|
|
112
|
+
|
|
113
|
+
return None
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
# ─── Signal flow parser ───────────────────────────────────────────────────────
|
|
117
|
+
|
|
118
|
+
# Verb → action mapping for signal_flow step parsing.
|
|
119
|
+
# Each entry is (list_of_verb_strings, action).
|
|
120
|
+
# Verb strings are matched with `in text_lower` — they MUST include a trailing
|
|
121
|
+
# space so that substring-of-word matches (e.g. "chain" inside "Sidechain") are
|
|
122
|
+
# avoided. For the routing semantic, require "chain to " or "chain into " so
|
|
123
|
+
# that "Sidechain" and "master-bus chain" are not caught.
|
|
124
|
+
_VERB_TO_ACTION: list[tuple[list[str], str]] = [
|
|
125
|
+
# Load / open / import
|
|
126
|
+
(["load ", "open ", "import ", "place "], "load_browser_item"),
|
|
127
|
+
# Insert / add
|
|
128
|
+
(["insert ", "add ", "drop "], "insert_device"),
|
|
129
|
+
# Set / tweak / configure / adjust
|
|
130
|
+
(["set ", "tweak ", "configure ", "adjust ", "tune ", "dial ", "use "], "set_device_parameter"),
|
|
131
|
+
# Automate
|
|
132
|
+
(["automate ", "modulate ", "lfo "], "set_device_parameter"),
|
|
133
|
+
# Fire / play / trigger / activate
|
|
134
|
+
(["fire ", "play ", "trigger ", "activate ", "launch "], "fire_clip"),
|
|
135
|
+
# Chain / route / send / feed — require "chain to"/"chain into" to avoid
|
|
136
|
+
# matching "Sidechain" or "master-bus chain"
|
|
137
|
+
(["chain to ", "chain into ", "route ", "feed ", "send to "], "set_track_send"),
|
|
138
|
+
# Anything else
|
|
139
|
+
]
|
|
140
|
+
|
|
141
|
+
# Known device name fragments that indicate a "load" step even without a load verb.
|
|
142
|
+
# Used when a step begins with a noun (device name) rather than a verb.
|
|
143
|
+
_KNOWN_DEVICE_FRAGMENTS: list[str] = [
|
|
144
|
+
"harmonic drone generator",
|
|
145
|
+
"pitchloop89",
|
|
146
|
+
"convolution reverb",
|
|
147
|
+
"auto filter",
|
|
148
|
+
"tree tone",
|
|
149
|
+
"bad speaker",
|
|
150
|
+
"reverb",
|
|
151
|
+
"delay",
|
|
152
|
+
"echo",
|
|
153
|
+
"saturator",
|
|
154
|
+
"granulator",
|
|
155
|
+
"sampler",
|
|
156
|
+
"simpler",
|
|
157
|
+
"operator",
|
|
158
|
+
"drift",
|
|
159
|
+
"wavetable",
|
|
160
|
+
]
|
|
161
|
+
|
|
162
|
+
# BUG-INT#1 / BUG-NEW#3: Fragments that map to a more specific suggested_path
|
|
163
|
+
# rather than the broad "sounds" default. If a fragment appears in this dict,
|
|
164
|
+
# its path is used instead. Native Ableton audio-effect fragments use
|
|
165
|
+
# "audio_effects"; instrument/synth fragments use "instruments".
|
|
166
|
+
_FRAGMENT_TO_SUGGESTED_PATH: dict[str, str] = {
|
|
167
|
+
"echo": "audio_effects",
|
|
168
|
+
"reverb": "audio_effects",
|
|
169
|
+
"convolution reverb": "audio_effects",
|
|
170
|
+
"delay": "audio_effects",
|
|
171
|
+
"auto filter": "audio_effects",
|
|
172
|
+
"saturator": "audio_effects",
|
|
173
|
+
"granulator": "instruments",
|
|
174
|
+
"sampler": "instruments",
|
|
175
|
+
"simpler": "instruments",
|
|
176
|
+
"operator": "instruments",
|
|
177
|
+
"drift": "instruments",
|
|
178
|
+
"wavetable": "instruments",
|
|
179
|
+
"harmonic drone generator": "instruments",
|
|
180
|
+
"tree tone": "instruments",
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
# Patterns to extract device/clip names from step text
|
|
184
|
+
_DEVICE_EXTRACT_PATTERNS: list[re.Pattern] = [
|
|
185
|
+
re.compile(r"`([^`]+)`"), # backtick-quoted names
|
|
186
|
+
re.compile(r'"([^"]+)"'), # double-quoted names
|
|
187
|
+
re.compile(r"'([^']+)'"), # single-quoted names
|
|
188
|
+
re.compile(r"\b([A-Z][A-Za-z0-9\s\-]+(?:Reverb|Delay|Filter|Loop|Echo|Generator|Device|89))\b"),
|
|
189
|
+
]
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def _classify_step_verb(text_lower: str) -> str:
|
|
193
|
+
"""Classify a signal_flow step text into an action type.
|
|
194
|
+
|
|
195
|
+
Handles three patterns:
|
|
196
|
+
- verb-first ("Load HDG from...")
|
|
197
|
+
- noun-first ("Harmonic Drone Generator (Drone Lab) tuned to...")
|
|
198
|
+
- pack-prefix-first ("Inspired by Nature `tree_tone` on a sustained Cmaj7...")
|
|
199
|
+
→ BUG-F2#4: pack-name prefix shadows device-name match. Fix by
|
|
200
|
+
checking the device fragment anywhere in the first 80 chars
|
|
201
|
+
rather than only at the start.
|
|
202
|
+
"""
|
|
203
|
+
for verbs, action in _VERB_TO_ACTION:
|
|
204
|
+
for verb in verbs:
|
|
205
|
+
if verb in text_lower:
|
|
206
|
+
return action
|
|
207
|
+
|
|
208
|
+
# Noun-first device mentions — startswith first (cheap), then in-prefix substring
|
|
209
|
+
for fragment in _KNOWN_DEVICE_FRAGMENTS:
|
|
210
|
+
if text_lower.startswith(fragment) or text_lower.startswith("→ " + fragment):
|
|
211
|
+
return "load_browser_item"
|
|
212
|
+
|
|
213
|
+
# Pack-prefix or quoted device-name pattern — substring search in first 80 chars.
|
|
214
|
+
# Catches "Inspired by Nature `tree_tone`...", "Lost and Found `Bad Speaker`...".
|
|
215
|
+
# Underscore-normalize so `tree_tone` matches the `tree tone` fragment.
|
|
216
|
+
head = text_lower[:80].replace("_", " ")
|
|
217
|
+
for fragment in _KNOWN_DEVICE_FRAGMENTS:
|
|
218
|
+
if fragment in head:
|
|
219
|
+
return "load_browser_item"
|
|
220
|
+
|
|
221
|
+
return "manual_step"
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def _extract_device_name(text: str) -> str | None:
|
|
225
|
+
"""Try to extract a device or clip name from step text."""
|
|
226
|
+
for pattern in _DEVICE_EXTRACT_PATTERNS:
|
|
227
|
+
m = pattern.search(text)
|
|
228
|
+
if m:
|
|
229
|
+
return m.group(1).strip()
|
|
230
|
+
return None
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def _extract_numeric_value(text: str) -> float | None:
|
|
234
|
+
"""Extract the first numeric value from text (for set_device_parameter).
|
|
235
|
+
|
|
236
|
+
Requires a word/whitespace/sign boundary before the digit so that digits
|
|
237
|
+
embedded in device names (e.g. 'PitchLoop89') are not matched.
|
|
238
|
+
"""
|
|
239
|
+
m = re.search(r"(?<![A-Za-z\d])([-+]?(?:\d+\.?\d*|\.\d+)(?:[eE][-+]?\d+)?)", text)
|
|
240
|
+
if m:
|
|
241
|
+
try:
|
|
242
|
+
return float(m.group(1))
|
|
243
|
+
except ValueError:
|
|
244
|
+
pass
|
|
245
|
+
return None
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def _extract_parameter_name(text: str) -> str | None:
|
|
249
|
+
"""Extract a parameter name from step text.
|
|
250
|
+
|
|
251
|
+
Looks for patterns like "Pitch A +0.05", "Feedback A ... 0.85",
|
|
252
|
+
"Drive 0.3-0.4", "LFO at 0.1Hz".
|
|
253
|
+
"""
|
|
254
|
+
# Pattern: "Parameter Name value" where Parameter Name is Title Case words
|
|
255
|
+
# or backtick-quoted
|
|
256
|
+
param_patterns = [
|
|
257
|
+
re.compile(r"`([^`]+)`\s+[-+]?[\d.]+"), # `Pitch A` +0.05
|
|
258
|
+
re.compile(r"\b((?:[A-Z][a-zA-Z]+\s+){1,3})[-+]?[\d.]"), # Pitch A 0.05
|
|
259
|
+
re.compile(r"\b([A-Z][a-zA-Z]+(?:\s+[A-Z])?)\s+(?:at\s+)?[-+]?[\d.]"),
|
|
260
|
+
]
|
|
261
|
+
for pattern in param_patterns:
|
|
262
|
+
m = pattern.search(text)
|
|
263
|
+
if m:
|
|
264
|
+
name = m.group(1).strip()
|
|
265
|
+
# Filter out generic English words that aren't parameter names
|
|
266
|
+
if name not in ("Set", "Use", "Load", "Add", "With", "For", "The", "A", "An"):
|
|
267
|
+
return name
|
|
268
|
+
return None
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def _parse_signal_flow(signal_flow: str | list | None) -> list[dict]:
|
|
272
|
+
"""Parse the signal_flow field into structured action steps.
|
|
273
|
+
|
|
274
|
+
Handles three formats:
|
|
275
|
+
1. Multi-line string with numbered steps (most common in corpus)
|
|
276
|
+
2. List of strings
|
|
277
|
+
3. List of dicts
|
|
278
|
+
|
|
279
|
+
Each output step:
|
|
280
|
+
{
|
|
281
|
+
step: int,
|
|
282
|
+
action: str, # load_browser_item | insert_device | set_device_parameter
|
|
283
|
+
# fire_clip | set_track_send | manual_step
|
|
284
|
+
device_name: str?,
|
|
285
|
+
parameter_name: str?,
|
|
286
|
+
value: float?,
|
|
287
|
+
raw_text: str,
|
|
288
|
+
result: "dry-run"
|
|
289
|
+
}
|
|
290
|
+
"""
|
|
291
|
+
if not signal_flow:
|
|
292
|
+
return []
|
|
293
|
+
|
|
294
|
+
# Normalize to a list of raw text strings
|
|
295
|
+
raw_lines: list[str] = []
|
|
296
|
+
|
|
297
|
+
if isinstance(signal_flow, str):
|
|
298
|
+
# Split on numbered steps: "1.", "2.", etc.
|
|
299
|
+
# Also handle continuation lines starting with "→" or "->"
|
|
300
|
+
for line in signal_flow.splitlines():
|
|
301
|
+
line = line.strip()
|
|
302
|
+
if not line:
|
|
303
|
+
continue
|
|
304
|
+
# Split compound steps that have mid-line "→" (e.g. "HDG ... → fire clip")
|
|
305
|
+
# Lines that START with digits+dot are the main step; any "→" within
|
|
306
|
+
# the line after the first segment is a sub-step.
|
|
307
|
+
# Only split if the line doesn't START with "→" (those are handled below)
|
|
308
|
+
if not line.lstrip().startswith("→"):
|
|
309
|
+
# Strip the leading step number to get the content
|
|
310
|
+
content = re.sub(r"^\d+[.)]\s*", "", line).strip()
|
|
311
|
+
# Split on mid-line "→" markers to produce sub-steps,
|
|
312
|
+
# but ONLY when the arrow is NOT inside parentheses.
|
|
313
|
+
# Strategy: mask text inside (...) before splitting, then restore.
|
|
314
|
+
masked = re.sub(r"\([^)]*\)", lambda m: "(" + "X" * (len(m.group(0)) - 2) + ")", content)
|
|
315
|
+
split_positions = [m.start() for m in re.finditer(r"\s+→\s+", masked)]
|
|
316
|
+
if split_positions:
|
|
317
|
+
# Reconstruct sub_parts using original content at split positions
|
|
318
|
+
parts: list[str] = []
|
|
319
|
+
prev = 0
|
|
320
|
+
for pos in split_positions:
|
|
321
|
+
# Find the actual end of the arrow in original (lengths match)
|
|
322
|
+
arrow_m = re.search(r"\s+→\s+", content[pos:])
|
|
323
|
+
if arrow_m:
|
|
324
|
+
parts.append(content[prev:pos].strip())
|
|
325
|
+
prev = pos + arrow_m.end()
|
|
326
|
+
parts.append(content[prev:].strip())
|
|
327
|
+
if len(parts) > 1:
|
|
328
|
+
raw_lines.append(parts[0])
|
|
329
|
+
for sub in parts[1:]:
|
|
330
|
+
raw_lines.append("→ " + sub)
|
|
331
|
+
continue
|
|
332
|
+
# No split happened — fall through with number-stripped content
|
|
333
|
+
raw_lines.append(content)
|
|
334
|
+
continue
|
|
335
|
+
raw_lines.append(line)
|
|
336
|
+
|
|
337
|
+
elif isinstance(signal_flow, list):
|
|
338
|
+
for item in signal_flow:
|
|
339
|
+
if isinstance(item, str):
|
|
340
|
+
raw_lines.append(item.strip())
|
|
341
|
+
elif isinstance(item, dict):
|
|
342
|
+
# Dict item — convert to text representation
|
|
343
|
+
text = item.get("text") or item.get("description") or str(item)
|
|
344
|
+
raw_lines.append(text.strip())
|
|
345
|
+
else:
|
|
346
|
+
# Fallback: stringify
|
|
347
|
+
raw_lines = [str(signal_flow)]
|
|
348
|
+
|
|
349
|
+
# Parse each line into a structured step
|
|
350
|
+
steps: list[dict] = []
|
|
351
|
+
step_counter = 0
|
|
352
|
+
|
|
353
|
+
for line in raw_lines:
|
|
354
|
+
if not line:
|
|
355
|
+
continue
|
|
356
|
+
|
|
357
|
+
# Strip leading step number (e.g. "1." or "1)")
|
|
358
|
+
clean = re.sub(r"^\d+[.)]\s*", "", line).strip()
|
|
359
|
+
# Strip leading "→" or "->"
|
|
360
|
+
clean = re.sub(r"^[→\->]+\s*", "", clean).strip()
|
|
361
|
+
|
|
362
|
+
if not clean:
|
|
363
|
+
continue
|
|
364
|
+
|
|
365
|
+
step_counter += 1
|
|
366
|
+
clean_lower = clean.lower()
|
|
367
|
+
|
|
368
|
+
action = _classify_step_verb(clean_lower)
|
|
369
|
+
device_name = _extract_device_name(clean)
|
|
370
|
+
param_name = None
|
|
371
|
+
value = None
|
|
372
|
+
|
|
373
|
+
if action == "set_device_parameter":
|
|
374
|
+
param_name = _extract_parameter_name(clean)
|
|
375
|
+
value = _extract_numeric_value(clean)
|
|
376
|
+
if not param_name and device_name:
|
|
377
|
+
# The "device_name" might actually be the parameter
|
|
378
|
+
param_name = device_name
|
|
379
|
+
device_name = None
|
|
380
|
+
|
|
381
|
+
step: dict = {
|
|
382
|
+
"step": step_counter,
|
|
383
|
+
"action": action,
|
|
384
|
+
"raw_text": line,
|
|
385
|
+
"result": "dry-run",
|
|
386
|
+
}
|
|
387
|
+
|
|
388
|
+
# BUG-INT#1 / BUG-NEW#3: if the regex-based extractor returned None for a
|
|
389
|
+
# load_browser_item step (e.g. "Echo with subtle wow/flutter" has no
|
|
390
|
+
# backtick/quote/TitleCase device name), fall back to scanning the first
|
|
391
|
+
# 80 chars of the line for any known device fragment and use that as the
|
|
392
|
+
# name_filter.
|
|
393
|
+
if action == "load_browser_item" and device_name is None:
|
|
394
|
+
head = clean_lower[:80].replace("_", " ")
|
|
395
|
+
for fragment in _KNOWN_DEVICE_FRAGMENTS:
|
|
396
|
+
if fragment in head:
|
|
397
|
+
device_name = fragment # lower-case is fine for name_filter
|
|
398
|
+
break
|
|
399
|
+
|
|
400
|
+
if device_name:
|
|
401
|
+
step["device_name"] = device_name
|
|
402
|
+
# Consistency with extract_chain + pack_aware_compose: every
|
|
403
|
+
# load_browser_item step gets a browser_search_hint the executor
|
|
404
|
+
# can pass to search_browser to resolve the runtime FileId-keyed
|
|
405
|
+
# URI. Cross-pack workflows have no pack context (the YAML names
|
|
406
|
+
# the device by free-form prose), so suggested_path defaults to
|
|
407
|
+
# the broadest browser category.
|
|
408
|
+
if action == "load_browser_item":
|
|
409
|
+
# Use a more precise suggested_path if the fragment is a known
|
|
410
|
+
# native audio-effect or instrument (BUG-INT#1 / BUG-NEW#3).
|
|
411
|
+
hint_name = device_name
|
|
412
|
+
hint_path = _FRAGMENT_TO_SUGGESTED_PATH.get(
|
|
413
|
+
device_name.lower(), "sounds"
|
|
414
|
+
)
|
|
415
|
+
step["browser_search_hint"] = {
|
|
416
|
+
"name_filter": hint_name,
|
|
417
|
+
"suggested_path": hint_path,
|
|
418
|
+
}
|
|
419
|
+
if param_name:
|
|
420
|
+
step["parameter_name"] = param_name
|
|
421
|
+
if value is not None:
|
|
422
|
+
step["value"] = value
|
|
423
|
+
|
|
424
|
+
steps.append(step)
|
|
425
|
+
|
|
426
|
+
return steps
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
# ─── Aesthetic overrides ──────────────────────────────────────────────────────
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
def _apply_aesthetic_overrides(
|
|
433
|
+
steps: list[dict],
|
|
434
|
+
customize_aesthetic: dict | None,
|
|
435
|
+
) -> list[dict]:
|
|
436
|
+
"""Apply aesthetic customizations to parsed steps.
|
|
437
|
+
|
|
438
|
+
Currently handles:
|
|
439
|
+
- target_scale: inserts a set_song_scale step at the top
|
|
440
|
+
- target_bpm: inserts a set_tempo step at the top
|
|
441
|
+
- transpose_semitones: adjusts any numeric pitch values
|
|
442
|
+
|
|
443
|
+
For pitch transposition, imports Phase C's transplant logic.
|
|
444
|
+
"""
|
|
445
|
+
if not customize_aesthetic:
|
|
446
|
+
return steps
|
|
447
|
+
|
|
448
|
+
overrides: list[dict] = []
|
|
449
|
+
|
|
450
|
+
# Scale override
|
|
451
|
+
target_scale = customize_aesthetic.get("target_scale", "")
|
|
452
|
+
if target_scale:
|
|
453
|
+
overrides.append({
|
|
454
|
+
"step": 0,
|
|
455
|
+
"action": "set_song_scale",
|
|
456
|
+
"scale": target_scale,
|
|
457
|
+
"raw_text": f"[OVERRIDE] Set scale to {target_scale}",
|
|
458
|
+
"result": "dry-run",
|
|
459
|
+
"comment": f"Aesthetic override: target_scale={target_scale} [SOURCE: agent-inference]",
|
|
460
|
+
})
|
|
461
|
+
|
|
462
|
+
# BPM override
|
|
463
|
+
# BUG-EDGE#5: guard against non-numeric target_bpm (e.g. "not-a-number" string)
|
|
464
|
+
target_bpm = customize_aesthetic.get("target_bpm")
|
|
465
|
+
if target_bpm is not None:
|
|
466
|
+
try:
|
|
467
|
+
bpm_val = float(target_bpm)
|
|
468
|
+
overrides.append({
|
|
469
|
+
"step": 0,
|
|
470
|
+
"action": "set_tempo",
|
|
471
|
+
"value": bpm_val,
|
|
472
|
+
"raw_text": f"[OVERRIDE] Set BPM to {target_bpm}",
|
|
473
|
+
"result": "dry-run",
|
|
474
|
+
"comment": f"Aesthetic override: target_bpm={target_bpm} [SOURCE: agent-inference]",
|
|
475
|
+
})
|
|
476
|
+
except (ValueError, TypeError):
|
|
477
|
+
pass # malformed target_bpm — skip silently
|
|
478
|
+
|
|
479
|
+
# Transpose override for pitch-related parameter steps
|
|
480
|
+
transpose_st = customize_aesthetic.get("transpose_semitones")
|
|
481
|
+
if transpose_st is not None:
|
|
482
|
+
try:
|
|
483
|
+
st = float(transpose_st)
|
|
484
|
+
mutated_count = 0
|
|
485
|
+
for i, step in enumerate(steps):
|
|
486
|
+
if step.get("action") == "set_device_parameter":
|
|
487
|
+
pname = (step.get("parameter_name") or "").lower()
|
|
488
|
+
if any(k in pname for k in ("pitch", "note", "transpose", "tune")):
|
|
489
|
+
if step.get("value") is not None:
|
|
490
|
+
step = dict(step) # copy
|
|
491
|
+
step["value"] = round(step["value"] + st, 3)
|
|
492
|
+
step["comment"] = (
|
|
493
|
+
f"Pitch transposed by {st:+.1f} semitones "
|
|
494
|
+
f"[SOURCE: agent-inference]"
|
|
495
|
+
)
|
|
496
|
+
steps[i] = step # write copy back into list
|
|
497
|
+
mutated_count += 1
|
|
498
|
+
|
|
499
|
+
# BUG-NEW#2: if no numeric pitch steps were found to mutate, emit a
|
|
500
|
+
# manual_step so the caller knows the transpose was requested but
|
|
501
|
+
# couldn't be applied automatically.
|
|
502
|
+
if st != 0 and mutated_count == 0:
|
|
503
|
+
sign = "+" if st > 0 else ""
|
|
504
|
+
steps.append({
|
|
505
|
+
"step": len(steps) + 1,
|
|
506
|
+
"action": "manual_step",
|
|
507
|
+
"raw_text": f"[OVERRIDE] Transpose {sign}{st} semitones",
|
|
508
|
+
"comment": (
|
|
509
|
+
"No numeric pitch parameters were parsed from this workflow's "
|
|
510
|
+
"signal_flow. Apply the transpose manually after loading the chain. "
|
|
511
|
+
"[SOURCE: agent-inference]"
|
|
512
|
+
),
|
|
513
|
+
"result": "dry-run",
|
|
514
|
+
})
|
|
515
|
+
except (TypeError, ValueError):
|
|
516
|
+
pass
|
|
517
|
+
|
|
518
|
+
# Renumber: overrides go first (negative step numbers), then original
|
|
519
|
+
for i, ov in enumerate(overrides):
|
|
520
|
+
ov["step"] = -(len(overrides) - i) # -N, -(N-1), ..., -1
|
|
521
|
+
|
|
522
|
+
return overrides + steps
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
# ─── Dry-run executor ─────────────────────────────────────────────────────────
|
|
526
|
+
|
|
527
|
+
|
|
528
|
+
def _execute_or_dry_run(
|
|
529
|
+
steps: list[dict],
|
|
530
|
+
target_track_index: int,
|
|
531
|
+
) -> list[dict]:
|
|
532
|
+
"""For Phase F, all execution is dry-run only.
|
|
533
|
+
|
|
534
|
+
Marks each step result: "dry-run". If target_track_index >= 0,
|
|
535
|
+
adds a target_track annotation to device-loading steps.
|
|
536
|
+
"""
|
|
537
|
+
executed: list[dict] = []
|
|
538
|
+
for step in steps:
|
|
539
|
+
out = dict(step)
|
|
540
|
+
out["result"] = "dry-run"
|
|
541
|
+
if target_track_index >= 0:
|
|
542
|
+
if out.get("action") in ("load_browser_item", "insert_device"):
|
|
543
|
+
out["target_track_index"] = target_track_index
|
|
544
|
+
executed.append(out)
|
|
545
|
+
return executed
|
|
546
|
+
|
|
547
|
+
|
|
548
|
+
# ─── Main entry point ─────────────────────────────────────────────────────────
|
|
549
|
+
|
|
550
|
+
|
|
551
|
+
def cross_pack_chain(
|
|
552
|
+
workflow_entity_id: str,
|
|
553
|
+
target_track_index: int = -1,
|
|
554
|
+
customize_aesthetic: dict | None = None,
|
|
555
|
+
) -> dict:
|
|
556
|
+
"""Execute (dry-run) a cross-pack workflow recipe step-by-step.
|
|
557
|
+
|
|
558
|
+
Called by the MCP tool wrapper in tools.py.
|
|
559
|
+
|
|
560
|
+
Parameters
|
|
561
|
+
----------
|
|
562
|
+
workflow_entity_id : str
|
|
563
|
+
Entity ID from the cross_pack_workflow namespace.
|
|
564
|
+
E.g. "dub_techno_spectral_drone_bed", "boc_decayed_pad".
|
|
565
|
+
Hyphens and underscores are interchangeable.
|
|
566
|
+
|
|
567
|
+
target_track_index : int
|
|
568
|
+
-1 = dry run (all steps marked result: "dry-run").
|
|
569
|
+
>= 0 = target existing track (Phase F ships dry-run only for both modes).
|
|
570
|
+
|
|
571
|
+
customize_aesthetic : dict, optional
|
|
572
|
+
Optional overrides. Supported keys:
|
|
573
|
+
- "target_scale": str (e.g. "Fmin") — inserts set_song_scale step
|
|
574
|
+
- "target_bpm": float — inserts set_tempo step
|
|
575
|
+
- "transpose_semitones": float — adjusts numeric pitch parameter values
|
|
576
|
+
|
|
577
|
+
Returns
|
|
578
|
+
-------
|
|
579
|
+
dict with keys:
|
|
580
|
+
workflow: {entity_id, name, packs_used, description, when_to_reach, gotcha}
|
|
581
|
+
executed_steps: list of action dicts with result: "dry-run"
|
|
582
|
+
warnings: list of caution strings
|
|
583
|
+
sources: citation list
|
|
584
|
+
error: (only on failure) error message
|
|
585
|
+
"""
|
|
586
|
+
# ── 1. Load workflow ──────────────────────────────────────────────────────
|
|
587
|
+
wf = _load_workflow(workflow_entity_id)
|
|
588
|
+
|
|
589
|
+
if wf is None:
|
|
590
|
+
# List available workflows for helpful error message
|
|
591
|
+
available: list[str] = []
|
|
592
|
+
if _CROSS_WORKFLOWS_DIR.exists():
|
|
593
|
+
for f in sorted(_CROSS_WORKFLOWS_DIR.glob("*.yaml")):
|
|
594
|
+
# Return the entity_id (underscore form) not the filename
|
|
595
|
+
eid = f.stem.replace("-", "_")
|
|
596
|
+
available.append(eid)
|
|
597
|
+
|
|
598
|
+
return {
|
|
599
|
+
"error": (
|
|
600
|
+
f"Cross-pack workflow '{workflow_entity_id}' not found. "
|
|
601
|
+
f"Check ~/.livepilot/atlas-overlays/packs/cross_workflows/."
|
|
602
|
+
),
|
|
603
|
+
"available_workflows": available,
|
|
604
|
+
"hint": (
|
|
605
|
+
"Entity IDs use underscores (e.g. 'dub_techno_spectral_drone_bed'). "
|
|
606
|
+
"Filenames use hyphens (dub-techno-spectral-drone-bed.yaml). "
|
|
607
|
+
"Both forms are accepted."
|
|
608
|
+
),
|
|
609
|
+
"sources": [],
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
# ── 2. Parse signal_flow ──────────────────────────────────────────────────
|
|
613
|
+
signal_flow_raw = wf.get("signal_flow", "")
|
|
614
|
+
parsed_steps = _parse_signal_flow(signal_flow_raw)
|
|
615
|
+
|
|
616
|
+
warnings: list[str] = []
|
|
617
|
+
|
|
618
|
+
if not parsed_steps:
|
|
619
|
+
warnings.append(
|
|
620
|
+
f"signal_flow field is empty or could not be parsed for "
|
|
621
|
+
f"'{workflow_entity_id}'. The workflow YAML may need updating."
|
|
622
|
+
)
|
|
623
|
+
|
|
624
|
+
# ── 3. Apply aesthetic overrides ──────────────────────────────────────────
|
|
625
|
+
if customize_aesthetic:
|
|
626
|
+
parsed_steps = _apply_aesthetic_overrides(parsed_steps, customize_aesthetic)
|
|
627
|
+
|
|
628
|
+
# ── 4. Execute (dry-run) ──────────────────────────────────────────────────
|
|
629
|
+
executed_steps = _execute_or_dry_run(parsed_steps, target_track_index)
|
|
630
|
+
|
|
631
|
+
# ── 5. Warn about gotchas ─────────────────────────────────────────────────
|
|
632
|
+
gotcha = wf.get("gotcha", "")
|
|
633
|
+
avoid = wf.get("avoid", "")
|
|
634
|
+
if gotcha:
|
|
635
|
+
warnings.append(f"Gotcha: {gotcha}")
|
|
636
|
+
if avoid:
|
|
637
|
+
warnings.append(f"Avoid: {avoid}")
|
|
638
|
+
|
|
639
|
+
# ── 6. Build return shape ─────────────────────────────────────────────────
|
|
640
|
+
workflow_meta = {
|
|
641
|
+
"entity_id": wf.get("entity_id", workflow_entity_id),
|
|
642
|
+
"name": wf.get("name", ""),
|
|
643
|
+
"packs_used": wf.get("packs_used", []),
|
|
644
|
+
"devices_used": wf.get("devices_used", []),
|
|
645
|
+
"description": wf.get("description", ""),
|
|
646
|
+
"when_to_reach": wf.get("when_to_reach", ""),
|
|
647
|
+
"gotcha": gotcha,
|
|
648
|
+
}
|
|
649
|
+
|
|
650
|
+
return {
|
|
651
|
+
"workflow": workflow_meta,
|
|
652
|
+
"executed_steps": executed_steps,
|
|
653
|
+
"warnings": warnings,
|
|
654
|
+
"sources": [
|
|
655
|
+
f"cross_pack_workflow body.signal_flow [SOURCE: cross_pack_workflow.yaml]",
|
|
656
|
+
f"agent-inference: verb parsing + action classification [SOURCE: agent-inference]",
|
|
657
|
+
],
|
|
658
|
+
}
|