livepilot 1.23.6 → 1.25.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/CHANGELOG.md +107 -0
  2. package/README.md +60 -14
  3. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  4. package/m4l_device/livepilot_bridge.js +1 -1
  5. package/mcp_server/__init__.py +1 -1
  6. package/mcp_server/atlas/__init__.py +17 -3
  7. package/mcp_server/atlas/explore_tools.py +332 -0
  8. package/mcp_server/atlas/tools.py +161 -0
  9. package/mcp_server/audit/__init__.py +6 -0
  10. package/mcp_server/audit/checks.py +618 -0
  11. package/mcp_server/audit/tools.py +232 -0
  12. package/mcp_server/composer/branch_producer.py +5 -2
  13. package/mcp_server/composer/develop/__init__.py +19 -0
  14. package/mcp_server/composer/develop/apply.py +217 -0
  15. package/mcp_server/composer/develop/brief_builder.py +269 -0
  16. package/mcp_server/composer/develop/seed_introspector.py +195 -0
  17. package/mcp_server/composer/engine.py +15 -521
  18. package/mcp_server/composer/fast/__init__.py +62 -0
  19. package/mcp_server/composer/fast/apply.py +533 -0
  20. package/mcp_server/composer/fast/brief_builder.py +1479 -0
  21. package/mcp_server/composer/fast/tier_classification.py +159 -0
  22. package/mcp_server/composer/framework/__init__.py +0 -0
  23. package/mcp_server/composer/framework/applier.py +179 -0
  24. package/mcp_server/composer/framework/artist_loader.py +63 -0
  25. package/mcp_server/composer/framework/atlas_resolver.py +554 -0
  26. package/mcp_server/composer/framework/brief.py +79 -0
  27. package/mcp_server/composer/framework/event_lexicon.py +71 -0
  28. package/mcp_server/composer/framework/genre_loader.py +77 -0
  29. package/mcp_server/composer/framework/intent_source.py +137 -0
  30. package/mcp_server/composer/framework/knowledge_pack.py +140 -0
  31. package/mcp_server/composer/framework/plan_compiler.py +10 -0
  32. package/mcp_server/composer/full/__init__.py +10 -0
  33. package/mcp_server/composer/full/apply.py +1139 -0
  34. package/mcp_server/composer/full/brief_builder.py +227 -0
  35. package/mcp_server/composer/full/engine.py +541 -0
  36. package/mcp_server/composer/full/layer_planner.py +491 -0
  37. package/mcp_server/composer/layer_planner.py +19 -465
  38. package/mcp_server/composer/sample_resolver.py +80 -7
  39. package/mcp_server/composer/tools.py +626 -28
  40. package/mcp_server/server.py +1 -0
  41. package/mcp_server/splice_client/client.py +7 -0
  42. package/mcp_server/tools/_analyzer_engine/sample.py +172 -7
  43. package/mcp_server/tools/_planner_engine.py +25 -63
  44. package/mcp_server/tools/analyzer.py +10 -4
  45. package/mcp_server/tools/browser.py +102 -19
  46. package/package.json +2 -2
  47. package/remote_script/LivePilot/__init__.py +1 -1
  48. package/server.json +3 -3
@@ -0,0 +1,269 @@
1
+ """Develop-mode brief builder — Phase 1 of the LLM-creative two-phase flow.
2
+
3
+ Takes a SeedState (from seed_introspector) and an optional prompt directive,
4
+ returns a brief carrying VOCABULARY for the agent to design variants from.
5
+
6
+ CRITICAL: The brief MUST NOT contain predetermined section sequences, bar
7
+ counts, or fixed variant taxonomies. The agent decides those per call.
8
+ The framework only provides:
9
+ - The existing seed (read-only context)
10
+ - Genre/artist character vocabulary (descriptive)
11
+ - The 42-event structural lexicon (named primitives, not a sequence)
12
+ - Atlas instrument alternates (for sample-trigger swaps)
13
+ - Research hooks (WebSearch directives for niche styles)
14
+ - An open-ended design_targets text describing the variation surface
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import re
20
+ from pathlib import Path
21
+ from typing import Any, Optional
22
+
23
+
24
+ # ── artist vocabulary loader ────────────────────────────────────────
25
+
26
+ # Cached after first load; ARTIST_NAMES is a tuple of producer names parsed from
27
+ # artist-vocabularies.md (matches the file's ### heading lines).
28
+ _ARTIST_NAMES_CACHE: Optional[tuple[str, ...]] = None
29
+
30
+
31
+ def _load_artist_names() -> tuple[str, ...]:
32
+ """Parse artist-vocabularies.md for known producer names (cached).
33
+
34
+ Artist entries use ### headings (h3). Section groupings use ## (h2).
35
+ We parse ### lines only.
36
+ """
37
+ global _ARTIST_NAMES_CACHE
38
+ if _ARTIST_NAMES_CACHE is not None:
39
+ return _ARTIST_NAMES_CACHE
40
+
41
+ # Locate the markdown file relative to repo root
42
+ here = Path(__file__).resolve()
43
+ # Walk up to find livepilot/skills/livepilot-core/references/artist-vocabularies.md
44
+ for parent in here.parents:
45
+ candidate = (
46
+ parent
47
+ / "livepilot"
48
+ / "skills"
49
+ / "livepilot-core"
50
+ / "references"
51
+ / "artist-vocabularies.md"
52
+ )
53
+ if candidate.exists():
54
+ text = candidate.read_text(encoding="utf-8")
55
+ names = []
56
+ for line in text.splitlines():
57
+ # Artist entries are ### headings, e.g. "### Burial"
58
+ # or "### Aphex Twin (Richard D. James)"
59
+ m = re.match(r"^###\s+(.+?)\s*$", line)
60
+ if m:
61
+ raw = m.group(1).strip()
62
+ # Strip parenthetical aliases: "Aphex Twin (Richard D. James)" → "Aphex Twin"
63
+ # Keep the primary name only
64
+ primary = re.sub(r"\s*\(.*?\)\s*$", "", raw).strip()
65
+ if primary:
66
+ names.append(primary)
67
+ # Also add alias if present (the part inside parens)
68
+ alias_m = re.search(r"\(([^)]+)\)", raw)
69
+ if alias_m:
70
+ alias = alias_m.group(1).strip()
71
+ # Only add meaningful aliases (not descriptive phrases)
72
+ if alias and len(alias.split()) <= 4:
73
+ names.append(alias)
74
+ _ARTIST_NAMES_CACHE = tuple(names)
75
+ return _ARTIST_NAMES_CACHE
76
+
77
+ # Fallback if the markdown isn't found: empty tuple
78
+ _ARTIST_NAMES_CACHE = ()
79
+ return _ARTIST_NAMES_CACHE
80
+
81
+
82
+ def extract_artist_refs(prompt: str) -> list[str]:
83
+ """Find producer names in the prompt (case-insensitive substring match).
84
+
85
+ Returns a list of canonical artist names (preserving the artist-
86
+ vocabularies.md spelling — the primary name, not the alias).
87
+ """
88
+ if not prompt:
89
+ return []
90
+ names = _load_artist_names()
91
+ if not names:
92
+ return []
93
+ prompt_lower = prompt.lower()
94
+ found = []
95
+ seen_lower: set[str] = set()
96
+ for name in names:
97
+ name_lower = name.lower()
98
+ if name_lower in seen_lower:
99
+ continue
100
+ if name_lower in prompt_lower:
101
+ found.append(name)
102
+ seen_lower.add(name_lower)
103
+ return found
104
+
105
+
106
+ # ── research hooks ──────────────────────────────────────────────────
107
+
108
+ # Common-genre terms that DON'T need research (LLM training data covers them)
109
+ _COMMON_GENRE_TERMS = {
110
+ "techno",
111
+ "house",
112
+ "ambient",
113
+ "hiphop",
114
+ "hip-hop",
115
+ "trap",
116
+ "pop",
117
+ "rock",
118
+ "jazz",
119
+ "edm",
120
+ "electronic",
121
+ "dance",
122
+ "downtempo",
123
+ "minimal",
124
+ "deep house",
125
+ "minor",
126
+ "major",
127
+ "key",
128
+ "tempo",
129
+ "bpm",
130
+ "lo-fi",
131
+ "lofi",
132
+ "dark",
133
+ }
134
+
135
+ # Niche terms that warrant research (not exhaustive — heuristic)
136
+ _NICHE_GENRE_HINTS = (
137
+ "wonky",
138
+ "uk funky",
139
+ "footwork",
140
+ "juke",
141
+ "kuduro",
142
+ "gqom",
143
+ "speed garage",
144
+ "hyperpop",
145
+ "vapor",
146
+ "chillwave",
147
+ "future garage",
148
+ "dubstep wobble",
149
+ "psy-trance",
150
+ "psytrance",
151
+ "balearic",
152
+ "italo",
153
+ "freestyle",
154
+ "screwed",
155
+ "chopped and screwed",
156
+ "phonk",
157
+ "drift phonk",
158
+ )
159
+
160
+
161
+ def detect_research_hooks(prompt: str) -> list[str]:
162
+ """Identify niche style terms the agent should research before designing.
163
+
164
+ Returns a list of terms found in the prompt that are NOT in the common
165
+ set. Heuristic — agent uses these as WebSearch directives.
166
+ """
167
+ if not prompt:
168
+ return []
169
+ prompt_lower = prompt.lower()
170
+ hooks = []
171
+ for hint in _NICHE_GENRE_HINTS:
172
+ if hint in prompt_lower:
173
+ hooks.append(hint)
174
+ return hooks
175
+
176
+
177
+ # ── atlas alternates (stub for Phase 4 enrichment) ─────────────────
178
+
179
+ def _atlas_alternates_per_role(seed_state: dict) -> dict:
180
+ """For sample-trigger roles, return alternate sample suggestions.
181
+
182
+ Phase 1 stub — returns empty dict per role. Phase 4 KnowledgePack
183
+ integration will populate with real atlas_search results.
184
+ """
185
+ alternates: dict = {}
186
+ for track in seed_state.get("tracks", []):
187
+ if track.get("classification") == "sample_trigger":
188
+ alternates[track["role"]] = [] # populated in Phase 4
189
+ return alternates
190
+
191
+
192
+ # ── genre context ──────────────────────────────────────────────────
193
+
194
+ def _genre_context_for(prompt_directive: Optional[str]) -> dict:
195
+ """Phase 1 stub — full genre-vocabularies.md loading happens in Phase 4.
196
+
197
+ Returns empty dict shape; Phase 4 will populate with the descriptive
198
+ character data (kick, bass register, harmonic palette, devices).
199
+ """
200
+ return {}
201
+
202
+
203
+ # ── identity preservation ──────────────────────────────────────────
204
+
205
+ _IDENTITY_DIRECTIVE = (
206
+ "Preserve the existing seed identity. Existing samples MUST NOT be replaced "
207
+ "(except where you intentionally schedule a sample swap as part of a variant). "
208
+ "Existing notes in scene 0 MUST NOT be overwritten — write variants to NEW scenes "
209
+ "(scene_index >= 1). Existing automation curves MUST be preserved as the 'main' "
210
+ "state. The original loop must still play identically when fired. New material "
211
+ "extends the loop; it does not replace it."
212
+ )
213
+
214
+
215
+ # ── design targets ─────────────────────────────────────────────────
216
+
217
+ _DESIGN_TARGETS = (
218
+ "Design a set of variant clips for the seed loop that allow it to develop into a "
219
+ "fuller arrangement. You decide: how many variants per layer, what sections those "
220
+ "variants serve, the section sequence and length, where the hook lands, when to "
221
+ "withhold and restate. Use the seed's identity (key, tempo, role classification) "
222
+ "as the unbreakable foundation. For midi_riff layers, design fresh per-variant "
223
+ "MIDI rooted in the same scale and tonal center. For sample_trigger layers, "
224
+ "consider sample swaps in fills and breakdowns — but only when the variation "
225
+ "genuinely benefits, not as default. Drum dropouts, sustained pad swells, and "
226
+ "filter sweeps are valid structural moves drawn from the event lexicon. The form "
227
+ "is yours to design — vocabularies tell you what a genre or artist sounds like, "
228
+ "they do not tell you the bar count of an intro."
229
+ )
230
+
231
+
232
+ # ── main entry point ───────────────────────────────────────────────
233
+
234
+ def build_develop_brief(
235
+ ctx: Any,
236
+ seed_state: dict,
237
+ prompt_directive: Optional[str] = None,
238
+ ) -> dict:
239
+ """Build a Phase-1 develop brief.
240
+
241
+ Args:
242
+ ctx: Lifespan context (lifespan_context.ableton, etc.) — not heavily
243
+ used in Phase 1; Phase 4 KnowledgePack consumes it.
244
+ seed_state: SeedState dict from introspect_seed() — read-only
245
+ prompt_directive: optional free-text directive ("extend in microhouse style",
246
+ "make it sound like Burial", etc.)
247
+
248
+ Returns dict with vocabulary fields. NEVER returns form-prescriptive fields.
249
+ """
250
+ artist_refs = extract_artist_refs(prompt_directive or "")
251
+ research_hooks = detect_research_hooks(prompt_directive or "")
252
+ genre_context = _genre_context_for(prompt_directive)
253
+ artist_context = {name: {} for name in artist_refs} # Phase 4 fills in
254
+ atlas_alternates = _atlas_alternates_per_role(seed_state)
255
+
256
+ brief = {
257
+ "mode": "develop",
258
+ "tempo": seed_state.get("tempo", 120.0),
259
+ "key": seed_state.get("key"),
260
+ "seed_state": seed_state,
261
+ "identity_preservation_directive": _IDENTITY_DIRECTIVE,
262
+ "design_targets": _DESIGN_TARGETS,
263
+ "genre_context": genre_context,
264
+ "artist_context": artist_context,
265
+ "atlas_alternates_per_role": atlas_alternates,
266
+ "research_hooks": research_hooks,
267
+ "prompt_directive": prompt_directive,
268
+ }
269
+ return brief
@@ -0,0 +1,195 @@
1
+ """SeedIntrospector — read-only classifier for the existing loop in a live session.
2
+
3
+ Takes the project's current state (focused on a single scene, scene 0 by
4
+ default) and produces a SeedState dict describing what's there. Callers
5
+ (DevelopBrief builder, develop_apply) consume this to know what to extend.
6
+
7
+ Role classification:
8
+ - Name-match first: track.name lower-cased matches against role keyword sets
9
+ - Fallback to register heuristic when name is unrecognized
10
+
11
+ Sample-trigger vs MIDI-riff:
12
+ - A track is sample_trigger if ALL of: exactly 1 note, pitch == 60, duration >= clip_length
13
+ - Otherwise: midi_riff (multiple notes OR pitch != 60 OR duration < clip_length)
14
+ - Empty clip returns 'empty'
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import logging
20
+ import re
21
+ from typing import Any, Optional
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ # Role-keyword maps. Order matters within each list — check more specific names first
27
+ _ROLE_KEYWORDS: dict[str, tuple[str, ...]] = {
28
+ "drums": ("drum", "kick", "snare", "hat", "hi-hat", "hihat", "cymbal", "perc", "percussion", "clap", "ride", "tom"),
29
+ "bass": ("bass", "sub", "808"),
30
+ "lead": ("lead", "melody", "synth lead", "arp"),
31
+ "pad": ("pad", "string", "chord", "harm"),
32
+ "texture": ("texture", "atmos", "ambient", "noise", "drone"),
33
+ "fx": ("fx", "riser", "impact", "swell", "sweep"),
34
+ "vocal": ("vocal", "voice", "vox", "chop", "ad-lib", "adlib"),
35
+ }
36
+
37
+
38
+ def infer_role_from_name(name: str) -> str:
39
+ """Match track name (case-insensitive) against role keywords.
40
+
41
+ Returns one of: drums, bass, lead, pad, texture, fx, vocal, unknown.
42
+ Caller may fall back to register heuristic when this returns 'unknown'.
43
+ """
44
+ if not name:
45
+ return "unknown"
46
+ lower = name.lower()
47
+ for role, keywords in _ROLE_KEYWORDS.items():
48
+ for kw in keywords:
49
+ # Use prefix word-boundary to avoid false positives (e.g. "tom" in "MyCustomTrack")
50
+ # but still match "drum" in "drums", "string" in "strings", "chop" in "chops"
51
+ if re.search(r'\b' + re.escape(kw), lower):
52
+ return role
53
+ return "unknown"
54
+
55
+
56
+ def classify_track(notes: list[dict], clip_length: float) -> str:
57
+ """Classify a track as sample_trigger / midi_riff / empty.
58
+
59
+ Heuristic per v1.24 spec:
60
+ - empty: no notes
61
+ - sample_trigger: exactly 1 note, pitch == 60, duration >= clip_length
62
+ - midi_riff: anything else
63
+ """
64
+ if not notes:
65
+ return "empty"
66
+ if len(notes) == 1:
67
+ n = notes[0]
68
+ if int(n.get("pitch", -1)) == 60 and float(n.get("duration", 0.0)) >= clip_length:
69
+ return "sample_trigger"
70
+ return "midi_riff"
71
+
72
+
73
+ def introspect_seed(ctx: Any, scene_index: int = 0) -> dict:
74
+ """Build a SeedState dict from the live session.
75
+
76
+ Reads tempo, time signature, song scale, and per-track clip content
77
+ for the given scene. Returns dict shape:
78
+
79
+ {
80
+ "scene_index": int,
81
+ "tempo": float,
82
+ "clip_length": float, # bars-in-beats; 4.0 = 1 bar at 4/4
83
+ "time_signature": str, # e.g. "4/4"
84
+ "key": str | None,
85
+ "scale_mode": str | None,
86
+ "tracks": [
87
+ {
88
+ "index": int,
89
+ "name": str,
90
+ "role": str, # from name-match
91
+ "classification": str, # sample_trigger | midi_riff | empty
92
+ "notes": list[dict],
93
+ "muted": bool,
94
+ },
95
+ ...
96
+ ],
97
+ "status": str | None, # "no_seed_found" if scene has no clips
98
+ "error": str | None,
99
+ }
100
+
101
+ On missing ableton context: returns {"error": "..."}.
102
+ """
103
+ ableton = ctx.lifespan_context.get("ableton") if hasattr(ctx, "lifespan_context") else None
104
+ if ableton is None:
105
+ return {"error": "ableton client not available in ctx"}
106
+
107
+ try:
108
+ session = ableton.send_command("get_session_info", {})
109
+ except Exception as exc:
110
+ return {"error": f"get_session_info failed: {exc}"}
111
+
112
+ seed: dict = {
113
+ "scene_index": scene_index,
114
+ "tempo": float(session.get("tempo", 120.0)),
115
+ "tracks": [],
116
+ }
117
+ sig_num = session.get("signature_numerator")
118
+ sig_den = session.get("signature_denominator")
119
+ if sig_num and sig_den:
120
+ seed["time_signature"] = f"{sig_num}/{sig_den}"
121
+
122
+ # Try to read song scale (Live 12.4)
123
+ try:
124
+ scale_result = ableton.send_command("get_song_scale", {})
125
+ if scale_result and not scale_result.get("error"):
126
+ seed["key"] = scale_result.get("root_note") or scale_result.get("key")
127
+ seed["scale_mode"] = scale_result.get("scale_name") or scale_result.get("mode")
128
+ except Exception as exc:
129
+ logger.debug("introspect_seed: get_song_scale unavailable: %s", exc)
130
+
131
+ track_descriptors = session.get("tracks", []) or []
132
+ clip_length_seen: Optional[float] = None
133
+ populated_track_count = 0
134
+
135
+ for td in track_descriptors:
136
+ ti = int(td.get("index", -1))
137
+ name = td.get("name", "")
138
+ muted = bool(td.get("mute", False))
139
+
140
+ # Read the clip in this scene
141
+ try:
142
+ clip_info = ableton.send_command(
143
+ "get_clip_info",
144
+ {"track_index": ti, "clip_index": scene_index},
145
+ )
146
+ except Exception as exc:
147
+ logger.debug("introspect_seed: get_clip_info(%d, %d) failed: %s", ti, scene_index, exc)
148
+ continue
149
+
150
+ if clip_info.get("error"):
151
+ # No clip in this slot — skip but include the track stub for completeness
152
+ seed["tracks"].append({
153
+ "index": ti,
154
+ "name": name,
155
+ "role": infer_role_from_name(name),
156
+ "classification": "empty",
157
+ "notes": [],
158
+ "muted": muted,
159
+ })
160
+ continue
161
+
162
+ clip_length = float(clip_info.get("length", 0.0))
163
+ if clip_length_seen is None and clip_length > 0:
164
+ clip_length_seen = clip_length
165
+
166
+ try:
167
+ notes_result = ableton.send_command(
168
+ "get_notes",
169
+ {"track_index": ti, "clip_index": scene_index},
170
+ )
171
+ notes = notes_result.get("notes", []) if isinstance(notes_result, dict) else []
172
+ except Exception as exc:
173
+ logger.debug("introspect_seed: get_notes(%d, %d) failed: %s", ti, scene_index, exc)
174
+ notes = []
175
+
176
+ classification = classify_track(notes, clip_length) if clip_length > 0 else "empty"
177
+ if classification != "empty":
178
+ populated_track_count += 1
179
+
180
+ seed["tracks"].append({
181
+ "index": ti,
182
+ "name": name,
183
+ "role": infer_role_from_name(name),
184
+ "classification": classification,
185
+ "notes": notes,
186
+ "muted": muted,
187
+ })
188
+
189
+ seed["clip_length"] = clip_length_seen if clip_length_seen is not None else 0.0
190
+
191
+ if populated_track_count == 0:
192
+ seed["status"] = "no_seed_found"
193
+ seed["tracks"] = [] # Per spec: empty result tracks list
194
+
195
+ return seed