livepilot 1.16.1 → 1.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/CHANGELOG.md +269 -0
  2. package/README.md +16 -15
  3. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  4. package/mcp_server/__init__.py +1 -1
  5. package/mcp_server/atlas/__init__.py +85 -0
  6. package/mcp_server/atlas/device_atlas.json +3183 -382
  7. package/mcp_server/atlas/device_techniques_index.json +1510 -0
  8. package/mcp_server/atlas/enrichments/__init__.py +1 -0
  9. package/mcp_server/atlas/enrichments/audio_effects/amp.yaml +112 -0
  10. package/mcp_server/atlas/enrichments/audio_effects/audio_effect_rack.yaml +77 -0
  11. package/mcp_server/atlas/enrichments/audio_effects/cabinet.yaml +81 -0
  12. package/mcp_server/atlas/enrichments/audio_effects/corpus.yaml +128 -0
  13. package/mcp_server/atlas/enrichments/audio_effects/envelope_follower.yaml +99 -0
  14. package/mcp_server/atlas/enrichments/audio_effects/external_audio_effect.yaml +64 -0
  15. package/mcp_server/atlas/enrichments/audio_effects/looper.yaml +85 -0
  16. package/mcp_server/atlas/enrichments/audio_effects/resonators.yaml +121 -0
  17. package/mcp_server/atlas/enrichments/audio_effects/snipper.yaml +17 -0
  18. package/mcp_server/atlas/enrichments/audio_effects/spectrum.yaml +61 -0
  19. package/mcp_server/atlas/enrichments/audio_effects/tuner.yaml +43 -0
  20. package/mcp_server/atlas/enrichments/audio_effects/utility.yaml +118 -0
  21. package/mcp_server/atlas/enrichments/audio_effects/vocoder.yaml +94 -0
  22. package/mcp_server/atlas/enrichments/instruments/analog.yaml +11 -0
  23. package/mcp_server/atlas/enrichments/instruments/bass.yaml +11 -0
  24. package/mcp_server/atlas/enrichments/instruments/bell_tower.yaml +38 -0
  25. package/mcp_server/atlas/enrichments/instruments/collision.yaml +11 -0
  26. package/mcp_server/atlas/enrichments/instruments/drift.yaml +11 -0
  27. package/mcp_server/atlas/enrichments/instruments/drum_rack.yaml +142 -0
  28. package/mcp_server/atlas/enrichments/instruments/electric.yaml +11 -0
  29. package/mcp_server/atlas/enrichments/instruments/emit.yaml +11 -0
  30. package/mcp_server/atlas/enrichments/instruments/meld.yaml +11 -0
  31. package/mcp_server/atlas/enrichments/instruments/operator.yaml +11 -0
  32. package/mcp_server/atlas/enrichments/instruments/poli.yaml +11 -0
  33. package/mcp_server/atlas/enrichments/instruments/sampler.yaml +12 -0
  34. package/mcp_server/atlas/enrichments/instruments/simpler.yaml +15 -0
  35. package/mcp_server/atlas/enrichments/instruments/tension.yaml +11 -0
  36. package/mcp_server/atlas/enrichments/instruments/vector_fm.yaml +11 -0
  37. package/mcp_server/atlas/enrichments/instruments/vector_grain.yaml +11 -0
  38. package/mcp_server/atlas/enrichments/instruments/wavetable.yaml +11 -0
  39. package/mcp_server/atlas/enrichments/midi_effects/filler.yaml +17 -0
  40. package/mcp_server/atlas/enrichments/utility/performer.yaml +15 -0
  41. package/mcp_server/atlas/enrichments/utility/vector_map.yaml +21 -0
  42. package/mcp_server/atlas/tools.py +291 -0
  43. package/mcp_server/m4l_bridge.py +19 -2
  44. package/mcp_server/sample_engine/tools.py +140 -68
  45. package/mcp_server/splice_client/http_bridge.py +319 -116
  46. package/mcp_server/tools/automation.py +168 -0
  47. package/package.json +2 -2
  48. package/remote_script/LivePilot/__init__.py +1 -1
  49. package/remote_script/LivePilot/arrangement.py +216 -1
  50. package/server.json +3 -3
@@ -149,6 +149,297 @@ def atlas_compare(ctx: Context, device_a: str, device_b: str, role: str = "") ->
149
149
  return atlas.compare(device_a, device_b, role=role)
150
150
 
151
151
 
152
+ @mcp.tool()
153
+ def atlas_describe_chain(
154
+ ctx: Context,
155
+ description: str,
156
+ genre: str = "",
157
+ limit_per_role: int = 3,
158
+ ) -> dict:
159
+ """Free-text describe-a-chain: "a granular pad that sounds like Tim Hecker"
160
+ → device chain proposal.
161
+
162
+ The mirror of `splice_describe_sound` for the device library. Where
163
+ `atlas_chain_suggest(role, genre)` takes structured inputs, this takes
164
+ a free-form sentence and proposes a chain by:
165
+
166
+ 1. Parsing role hints from the description ("bass", "pad", "lead",
167
+ "percussion", "drum", "texture", "vocal", "keys")
168
+ 2. Parsing aesthetic hints (artist names → `artist-vocabularies.md`,
169
+ genre names → `genre-vocabularies.md`, character words → atlas tags)
170
+ 3. Searching the atlas with those terms
171
+ 4. Proposing the top devices per role with brief rationale
172
+
173
+ This does NOT autoload anything — it returns a proposal the caller can
174
+ review, adjust, then execute with `load_browser_item` + a chain of FX.
175
+
176
+ description: free text. Examples:
177
+ "a granular pad that sounds like Tim Hecker"
178
+ "warm analog bass for minimal techno, deep and dubby"
179
+ "chopped vocal melody, Akufen-style microhouse"
180
+ "brittle mallet percussion with long reverb, Stars of the Lid territory"
181
+ genre: optional genre bias if the description is genre-agnostic
182
+ limit_per_role: max devices to suggest per detected role (default 3)
183
+
184
+ Returns {description, detected_roles, detected_aesthetic,
185
+ per_role_suggestions: [...], chain_proposal: [...]}.
186
+ """
187
+ atlas = _get_atlas()
188
+ if atlas is None:
189
+ return {"error": "Atlas not loaded. Run scan_full_library first."}
190
+ if not description or not description.strip():
191
+ return {"error": "description is required"}
192
+
193
+ desc_lower = description.lower().strip()
194
+
195
+ # ── Detect roles ──────────────────────────────────────────────
196
+ ROLE_KEYWORDS = {
197
+ "bass": ["bass", "sub", "808", "low end", "bottom"],
198
+ "lead": ["lead", "melody", "topline", "hook"],
199
+ "pad": ["pad", "texture", "atmosphere", "atmos", "drone", "ambient"],
200
+ "keys": ["keys", "piano", "rhodes", "wurli", "wurly", "chord"],
201
+ "percussion": ["percussion", "perc", "shaker", "conga", "claves", "tambourine"],
202
+ "drums": ["drums", "drum kit", "kick", "snare", "hat", "hi-hat", "hihat", "break"],
203
+ "vocal": ["vocal", "vox", "voice", "chop", "chant"],
204
+ "fx": ["fx", "riser", "downlifter", "sweep", "whoosh", "impact"],
205
+ }
206
+ detected_roles = []
207
+ for role, keywords in ROLE_KEYWORDS.items():
208
+ if any(k in desc_lower for k in keywords):
209
+ detected_roles.append(role)
210
+ if not detected_roles:
211
+ detected_roles = ["pad"] # sensible default
212
+
213
+ # ── Detect aesthetic / artist cues ────────────────────────────
214
+ ARTIST_TO_TAGS = {
215
+ "villalobos": ["minimal_techno", "deep_minimal"],
216
+ "hawtin": ["minimal_techno", "deep_minimal"],
217
+ "plastikman": ["minimal_techno"],
218
+ "basic channel": ["dub_techno", "dub"],
219
+ "rhythm and sound": ["dub_techno", "dub"],
220
+ "voigt": ["ambient", "dub_techno"],
221
+ "gas": ["ambient"],
222
+ "basinski": ["ambient", "drone"],
223
+ "stars of the lid": ["ambient", "drone", "modern_classical"],
224
+ "hecker": ["ambient", "drone", "experimental"],
225
+ "aphex": ["idm", "experimental"],
226
+ "autechre": ["idm", "experimental"],
227
+ "dilla": ["hip_hop", "lo_fi"],
228
+ "burial": ["dubstep", "uk_garage", "ambient"],
229
+ "akufen": ["microhouse"],
230
+ "isolee": ["microhouse", "deep_house"],
231
+ "henke": ["minimal_techno", "experimental"],
232
+ "monolake": ["minimal_techno", "experimental"],
233
+ "tycho": ["synthwave", "electronica"],
234
+ "boards of canada": ["downtempo", "lo_fi"],
235
+ }
236
+ CHARACTER_TAGS = [
237
+ "warm", "cold", "bright", "dark", "lush", "thin", "fat", "metallic",
238
+ "granular", "glitch", "gritty", "clean", "wet", "dry", "resonant",
239
+ "breathy", "analog", "digital", "vintage", "modern", "organic", "synthetic",
240
+ ]
241
+ GENRE_KEYWORDS = [
242
+ "microhouse", "minimal", "techno", "house", "deep house", "ambient",
243
+ "drone", "idm", "experimental", "dubstep", "dnb", "drum and bass",
244
+ "hip hop", "hip-hop", "lo-fi", "lo fi", "lofi", "trap", "garage",
245
+ "dub techno", "dub", "jazz", "classical", "cinematic", "synthwave",
246
+ "vaporwave", "ambient techno", "deep minimal",
247
+ ]
248
+ detected_aesthetic = []
249
+ for artist, tags in ARTIST_TO_TAGS.items():
250
+ if artist in desc_lower:
251
+ detected_aesthetic.extend(tags)
252
+ for tag in CHARACTER_TAGS:
253
+ if f" {tag}" in f" {desc_lower}":
254
+ detected_aesthetic.append(tag)
255
+ for g in GENRE_KEYWORDS:
256
+ if g in desc_lower:
257
+ detected_aesthetic.append(g.replace(" ", "_").replace("-", "_"))
258
+ if genre:
259
+ detected_aesthetic.append(genre.lower())
260
+ # Dedupe preserving order
261
+ seen = set()
262
+ detected_aesthetic = [
263
+ t for t in detected_aesthetic
264
+ if not (t in seen or seen.add(t))
265
+ ]
266
+
267
+ # ── Build per-role suggestions via atlas.suggest ─────────────
268
+ per_role_suggestions = []
269
+ for role in detected_roles:
270
+ # Build an intent string that combines role + aesthetic cues
271
+ intent_parts = [role]
272
+ intent_parts.extend(detected_aesthetic[:3]) # top 3 aesthetic tags
273
+ intent = " ".join(intent_parts)
274
+ results = atlas.suggest(
275
+ intent=intent,
276
+ genre=(detected_aesthetic[0] if detected_aesthetic else genre),
277
+ energy="medium",
278
+ limit=int(limit_per_role),
279
+ )
280
+ per_role_suggestions.append({
281
+ "role": role,
282
+ "intent_used": intent,
283
+ "suggestions": [
284
+ {
285
+ "device_id": r["device"].get("id", ""),
286
+ "device_name": r["device"].get("name", ""),
287
+ "uri": r["device"].get("uri", ""),
288
+ "rationale": r.get("rationale", ""),
289
+ "recipe": r.get("recipe"),
290
+ }
291
+ for r in results
292
+ ],
293
+ })
294
+
295
+ # ── Propose a simple chain from the highest-ranked suggestions ─
296
+ chain_proposal = []
297
+ position = 0
298
+ for role_block in per_role_suggestions:
299
+ if not role_block["suggestions"]:
300
+ continue
301
+ top = role_block["suggestions"][0]
302
+ chain_proposal.append({
303
+ "position": position,
304
+ "role": role_block["role"],
305
+ "device_name": top["device_name"],
306
+ "device_id": top["device_id"],
307
+ "uri": top["uri"],
308
+ "why": top["rationale"],
309
+ })
310
+ position += 1
311
+
312
+ # ── Cross-reference aesthetic to the vocabulary files ──────────
313
+ next_steps = []
314
+ if any("villalobos" in desc_lower or a in detected_aesthetic for a in
315
+ ("microhouse", "deep_minimal", "minimal_techno", "dub_techno",
316
+ "ambient", "drone", "idm", "experimental")):
317
+ next_steps.append(
318
+ "Cross-reference "
319
+ "`livepilot/skills/livepilot-core/references/artist-vocabularies.md` "
320
+ "and `genre-vocabularies.md` for deeper aesthetic guidance."
321
+ )
322
+ if not detected_aesthetic:
323
+ next_steps.append(
324
+ "No aesthetic or genre cues detected. If the description "
325
+ "should have matched, add it to the ARTIST_TO_TAGS map or "
326
+ "provide genre= explicitly."
327
+ )
328
+ next_steps.append(
329
+ "Call `atlas_techniques_for_device(device_id)` on any proposal "
330
+ "to see what techniques reference it."
331
+ )
332
+
333
+ return {
334
+ "description": description,
335
+ "detected_roles": detected_roles,
336
+ "detected_aesthetic": detected_aesthetic,
337
+ "per_role_suggestions": per_role_suggestions,
338
+ "chain_proposal": chain_proposal,
339
+ "next_steps": next_steps,
340
+ }
341
+
342
+
343
+ @mcp.tool()
344
+ def atlas_techniques_for_device(ctx: Context, device_id: str) -> dict:
345
+ """Reverse-lookup: what techniques / principles reference this device?
346
+
347
+ Answers questions like "what can I do with Granulator III?" by returning
348
+ every technique across the knowledge base that mentions this device —
349
+ the device's own `signature_techniques`, sample-manipulation principles
350
+ that use it, sound-design-deep.md references. Complements
351
+ `atlas_device_info` (which returns the device's own curated fields) by
352
+ showing the device's OUTWARD connections — how it fits into techniques
353
+ that weren't written from the device's perspective.
354
+
355
+ device_id: atlas ID (e.g. "granulator_iii", "simpler", "analog"). Use
356
+ `atlas_search` or `atlas_device_info` to discover IDs.
357
+
358
+ Returns {device_id, technique_count, techniques: [...]}, where each
359
+ technique entry has:
360
+ - technique: short name (e.g. "Vocal micro-chop (Akufen)")
361
+ - description: one-line
362
+ - aesthetic: list of aesthetic/genre tags
363
+ - source: where this technique lives (`atlas/<id>`,
364
+ `sample-techniques.md`, `sound-design-deep.md`)
365
+ - kind: signature_technique | sample_technique | sound_design_principle
366
+
367
+ Index is auto-generated from the knowledge base; regenerate via the
368
+ companion script when adding new techniques (rare — most additions
369
+ happen through enrichment YAMLs, which the index reads directly).
370
+ """
371
+ import json, os
372
+ index_path = os.path.join(
373
+ os.path.dirname(os.path.abspath(__file__)),
374
+ "device_techniques_index.json",
375
+ )
376
+ if not os.path.isfile(index_path):
377
+ return {
378
+ "error": "device_techniques_index.json not found",
379
+ "hint": "regenerate via the post-v1.17 reverse-index builder script",
380
+ }
381
+ try:
382
+ with open(index_path, "r") as f:
383
+ data = json.load(f)
384
+ except (OSError, json.JSONDecodeError) as exc:
385
+ return {"error": f"Failed to load index: {exc}"}
386
+
387
+ if not device_id:
388
+ # Return a summary of indexed devices
389
+ devices = data.get("devices", {})
390
+ return {
391
+ "indexed_device_count": len(devices),
392
+ "total_cross_references": data.get("entry_count", 0),
393
+ "devices": sorted(devices.keys()),
394
+ "hint": "Pass a device_id for per-device techniques",
395
+ }
396
+
397
+ entries = data.get("devices", {}).get(device_id)
398
+ if entries is None:
399
+ return {
400
+ "device_id": device_id,
401
+ "technique_count": 0,
402
+ "techniques": [],
403
+ "hint": (
404
+ "No techniques indexed for this device. Try a different ID "
405
+ "or use `atlas_search` to find the correct one. Devices "
406
+ "with no cross-references either haven't been enriched yet "
407
+ "or aren't referenced in any technique doc."
408
+ ),
409
+ }
410
+
411
+ return {
412
+ "device_id": device_id,
413
+ "technique_count": len(entries),
414
+ "techniques": entries,
415
+ }
416
+
417
+
418
+ @mcp.tool()
419
+ def atlas_pack_info(ctx: Context, pack_name: str = "") -> dict:
420
+ """Inspect a single Ableton pack — device list + enrichment coverage.
421
+
422
+ pack_name: the pack name (e.g., "Drone Lab", "Core Library",
423
+ "Creative Extensions", "Inspired by Nature"). Case-insensitive.
424
+ Pass an empty string to get the full list of packs known to
425
+ the atlas with device counts.
426
+
427
+ Returns {pack, device_count, enriched_count, devices[...]} for a
428
+ specific pack, or {packs: [...]} when called with no name.
429
+
430
+ Use this to answer questions like "what's in Drone Lab?" or "how
431
+ much of Creative Extensions do we have aesthetic knowledge about?"
432
+ """
433
+ atlas = _get_atlas()
434
+ if atlas is None:
435
+ return {"error": "Atlas not loaded. Run scan_full_library first."}
436
+
437
+ if not pack_name:
438
+ return {"packs": atlas.list_packs()}
439
+
440
+ return atlas.pack_info(pack_name)
441
+
442
+
152
443
  @mcp.tool()
153
444
  def scan_full_library(
154
445
  ctx: Context,
@@ -479,7 +479,16 @@ class SpectralReceiver(asyncio.DatagramProtocol):
479
479
  /response_chunk i i s — chunked response (index, total, data)
480
480
  """
481
481
 
482
- BAND_NAMES = ["sub", "low", "low_mid", "mid", "high_mid", "high", "presence", "air"]
482
+ # Band names keyed by how many bands the .amxd emits. 8 bands is the v1.x
483
+ # layout (sub starts at 20 Hz, ~octave per band). 9 bands is v1.16.x+
484
+ # with an explicit sub_low (20-60 Hz) split off so Villalobos-style kicks
485
+ # at 40-50 Hz are no longer hidden inside the sub band. The .amxd is the
486
+ # source of truth for band count — this server picks the right names
487
+ # based on how many floats actually arrive on /spectrum.
488
+ BAND_NAMES_8 = ["sub", "low", "low_mid", "mid", "high_mid", "high", "presence", "air"]
489
+ BAND_NAMES_9 = ["sub_low", "sub", "low", "low_mid", "mid", "high_mid", "high", "presence", "air"]
490
+ # Default alias kept for any external reader.
491
+ BAND_NAMES = BAND_NAMES_9
483
492
 
484
493
  def __init__(self, cache: SpectralCache, miditool_cache: Optional["MidiToolCache"] = None):
485
494
  self.cache = cache
@@ -571,8 +580,16 @@ class SpectralReceiver(asyncio.DatagramProtocol):
571
580
 
572
581
  def _handle_message(self, address: str, args: list) -> None:
573
582
  if address == "/spectrum" and len(args) >= 8:
583
+ # Pick the right name set based on how many bands the .amxd emits.
584
+ # 9-band payloads come from v1.16.x+ devices with the sub_low split.
585
+ # 8-band payloads come from older frozen .amxd builds — we keep
586
+ # working against them until every user has re-frozen.
587
+ if len(args) >= 9:
588
+ names = self.BAND_NAMES_9
589
+ else:
590
+ names = self.BAND_NAMES_8
574
591
  bands = {}
575
- for i, name in enumerate(self.BAND_NAMES):
592
+ for i, name in enumerate(names):
576
593
  if i < len(args):
577
594
  bands[name] = round(float(args[i]), 4)
578
595
  self.cache.update("spectrum", bands)
@@ -1496,23 +1496,29 @@ async def splice_describe_sound(
1496
1496
  bpm: Optional[int] = None,
1497
1497
  key: Optional[str] = None,
1498
1498
  limit: int = 20,
1499
+ rephrase: bool = True,
1499
1500
  ) -> dict:
1500
1501
  """Natural-language sample search — the Sounds Plugin's "Describe a Sound".
1501
1502
 
1502
1503
  Splice's AI matches free-form descriptions like "dark ambient pad with
1503
- shimmer" or "tight 90s house hi-hat" to catalog samples. This is NOT
1504
- on the local gRPC — the bridge proxies to api.splice.com using your
1505
- session token.
1504
+ shimmer" or "tight 90s house hi-hat" to catalog samples. Hits the
1505
+ GraphQL `SamplesSearch` operation on `surfaces-graphql.splice.com`
1506
+ with `semantic=1` + `rephrase=true` enabled.
1506
1507
 
1507
- **Status: scaffolding complete, endpoint pending real-traffic capture.**
1508
- Until `SPLICE_DESCRIBE_ENDPOINT` env var is set (or
1509
- `SPLICE_ALLOW_UNVERIFIED_ENDPOINTS=1`), this tool returns a structured
1510
- ENDPOINT_NOT_CONFIGURED error with actionable setup steps.
1508
+ **Status: LIVE** as of 2026-04-22. Endpoint captured via mitmproxy
1509
+ against Splice desktop 5.4.9 + Sounds Plugin.
1511
1510
 
1512
1511
  description: free-text prompt ("warm analog bass under 80bpm")
1513
1512
  bpm: optional BPM filter
1514
1513
  key: optional musical key ("Dm", "F#")
1515
1514
  limit: max results (default 20)
1515
+ rephrase: let Splice's ML rephrase the query for better matches
1516
+ (default True). Returned as `rephrased_query_string`.
1517
+
1518
+ Returns `{ok, query, samples[], total_hits, rephrased_query_string,
1519
+ tag_summary[], ...}`. Each sample has uuid/name/bpm/key/duration/
1520
+ instrument/tags/pack_name/files. Use the uuid with
1521
+ `splice_download_sample(uuid)` to pull the audio file.
1516
1522
  """
1517
1523
  bridge, err = _build_http_bridge(ctx)
1518
1524
  if err:
@@ -1524,95 +1530,161 @@ async def splice_describe_sound(
1524
1530
  result = await bridge.describe_sound(
1525
1531
  description=description.strip(),
1526
1532
  bpm=bpm, key=key, limit=int(limit),
1533
+ rephrase=bool(rephrase),
1527
1534
  )
1528
1535
  except SpliceHTTPError as exc:
1529
1536
  return exc.to_dict()
1530
1537
  except Exception as exc:
1531
1538
  return {"ok": False, "error": f"describe_sound failed: {exc}"}
1532
- return {"ok": True, "query": description, **(result if isinstance(result, dict) else {"raw": result})}
1539
+ # Don't expose the full GraphQL `raw` dict in the user-facing response
1540
+ # unless they asked — it adds ~270KB noise per call. Keep it for
1541
+ # power users via an explicit future flag.
1542
+ out = dict(result) if isinstance(result, dict) else {"raw": result}
1543
+ out.pop("raw", None)
1544
+ return {"ok": True, "query": description, **out}
1533
1545
 
1534
1546
 
1535
1547
  @mcp.tool()
1536
1548
  async def splice_generate_variation(
1537
1549
  ctx: Context,
1538
- file_hash: str,
1539
- target_key: Optional[str] = None,
1540
- target_bpm: Optional[int] = None,
1541
- count: int = 1,
1550
+ uuid: str,
1551
+ is_legacy: bool = True,
1542
1552
  ) -> dict:
1543
- """Generate AI variations of a Splice sample — the Sounds Plugin's "Variations".
1544
-
1545
- Splice's AI produces unique re-keyed / re-tempo'd versions of any
1546
- sample. Costs additional credits per variation (on top of the base
1547
- license). NOT on the local gRPC bridged via api.splice.com.
1548
-
1549
- **Status: scaffolding complete, endpoint pending real-traffic capture.**
1550
- Until `SPLICE_VARIATION_ENDPOINT` env var is set (or
1551
- `SPLICE_ALLOW_UNVERIFIED_ENDPOINTS=1`), this tool returns a structured
1552
- ENDPOINT_NOT_CONFIGURED error with actionable setup steps.
1553
-
1554
- file_hash: sample identifier (from search results)
1555
- target_key: desired key (e.g. "Am")
1556
- target_bpm: desired tempo
1557
- count: number of variations to generate (1-5)
1558
-
1559
- WARNING: this WILL spend credits when the endpoint is live.
1560
- Consider previewing the source sample with splice_preview_sample first.
1553
+ """Find catalog samples similar to a given Splice sample — the "Variations" feature.
1554
+
1555
+ Splice's right-click "Variations" menu item surfaces other catalog
1556
+ samples with similar sonic character. The GraphQL operation name
1557
+ is `AssetSimilarSoundsQuery`. Up to 10 results per call. No credit
1558
+ cost (this is a recommender lookup, not AI audio synthesis — the
1559
+ original naming in the handoff was aspirational).
1560
+
1561
+ **Status: LIVE** as of 2026-04-22. Endpoint captured via mitmproxy
1562
+ against Splice desktop v5.4.9.
1563
+
1564
+ uuid: source sample's catalog uuid (from `splice_describe_sound`
1565
+ results or any other Splice metadata call)
1566
+ is_legacy: match how Splice's own client sets it — default True is
1567
+ correct for all mainstream catalog samples; set False only
1568
+ if working with post-catalog-v2 assets
1569
+
1570
+ Returns `{ok, uuid, similar_samples[], count}`. Each entry has the
1571
+ same flat shape as a describe_sound sample (uuid/name/bpm/key/
1572
+ duration/tags/pack_name/files). Use the uuid of any result with
1573
+ `splice_download_sample()` to pull the audio.
1561
1574
  """
1562
1575
  bridge, err = _build_http_bridge(ctx)
1563
1576
  if err:
1564
1577
  return err
1565
1578
  from ..splice_client.http_bridge import SpliceHTTPError
1566
- if not file_hash or not file_hash.strip():
1567
- return {"ok": False, "error": "file_hash is required"}
1568
- if count < 1 or count > 5:
1569
- return {"ok": False, "error": "count must be 1-5"}
1579
+ if not uuid or not uuid.strip():
1580
+ return {"ok": False, "error": "uuid is required"}
1570
1581
  try:
1571
1582
  result = await bridge.generate_variation(
1572
- file_hash=file_hash.strip(),
1573
- target_key=target_key,
1574
- target_bpm=target_bpm,
1575
- count=int(count),
1583
+ uuid=uuid.strip(),
1584
+ is_legacy=bool(is_legacy),
1576
1585
  )
1577
1586
  except SpliceHTTPError as exc:
1578
1587
  return exc.to_dict()
1579
1588
  except Exception as exc:
1580
1589
  return {"ok": False, "error": f"generate_variation failed: {exc}"}
1581
- return {"ok": True, "file_hash": file_hash, **(result if isinstance(result, dict) else {"raw": result})}
1590
+ out = dict(result) if isinstance(result, dict) else {"raw": result}
1591
+ out.pop("raw", None) # drop verbose debug payload
1592
+ return {"ok": True, "uuid": uuid, **out}
1582
1593
 
1583
1594
 
1584
- @mcp.tool()
1585
- async def splice_search_with_sound(
1586
- ctx: Context,
1587
- audio_path: str,
1588
- limit: int = 20,
1589
- ) -> dict:
1590
- """Reference-audio search — the Sounds Plugin's "Search with Sound".
1595
+ # NOTE: splice_search_with_sound was removed 2026-04-22 — user does this
1596
+ # in-Splice manually. If someone wants to resurrect it, the capture recipe
1597
+ # is still at docs/2026-04-22-splice-https-capture-recipe.md.
1591
1598
 
1592
- Uploads a local audio file to Splice's AI and returns catalog samples
1593
- with similar character. Complements `splice_describe_sound` (text)
1594
- and `search_samples` (keyword).
1595
1599
 
1596
- **Status: scaffolding complete, wiring pending real-traffic capture
1597
- (multipart upload shape is the most uncertain part of the bridge).**
1598
- Until `SPLICE_SEARCH_WITH_SOUND_ENDPOINT` is set, returns a structured
1599
- NOT_YET_IMPLEMENTED error.
1600
+ @mcp.tool()
1601
+ async def splice_http_diagnose(ctx: Context) -> dict:
1602
+ """Diagnose the Splice HTTPS bridge configuration and readiness.
1600
1603
 
1601
- audio_path: absolute path to a local audio file (.wav, .mp3, .flac)
1602
- limit: max results (default 20)
1604
+ Reports which endpoints are configured, whether a session token is
1605
+ reachable from the gRPC client, and what the next step is to unblock
1606
+ `splice_describe_sound` and `splice_generate_variation`.
1607
+
1608
+ Use this BEFORE calling either tool if you want a clear readout of
1609
+ "what's missing, and how do I fix it" instead of per-tool
1610
+ ENDPOINT_NOT_CONFIGURED errors.
1603
1611
  """
1604
- bridge, err = _build_http_bridge(ctx)
1605
- if err:
1606
- return err
1607
- from ..splice_client.http_bridge import SpliceHTTPError
1608
- if not audio_path or not os.path.isfile(audio_path):
1609
- return {"ok": False, "error": f"audio_path not found: {audio_path}"}
1612
+ from ..splice_client.http_bridge import SpliceHTTPConfig
1613
+
1614
+ cfg = SpliceHTTPConfig.from_env()
1615
+ endpoints = {
1616
+ "describe": cfg.describe_endpoint,
1617
+ "variation": cfg.variation_endpoint,
1618
+ }
1619
+ verified = {
1620
+ "describe": cfg.describe_verified,
1621
+ "variation": cfg.variation_verified,
1622
+ }
1623
+ unverified = [name for name, ok in verified.items() if not ok]
1624
+ configured_count = sum(1 for v in endpoints.values() if v not in (None, ""))
1625
+
1626
+ # Try to read the session token via the gRPC client the SAME way
1627
+ # the real tools do — reach into ctx.lifespan_context["splice_client"]
1628
+ # and actually attempt a GetSession fetch. Walking a different
1629
+ # engine-nested path (earlier mistake) reported "token unavailable"
1630
+ # while the bridge's real request path succeeded — a misleading
1631
+ # diagnostic is worse than no diagnostic.
1632
+ session_token_available = False
1633
+ session_token_error = None
1634
+ grpc_client = None
1610
1635
  try:
1611
- result = await bridge.search_with_sound(
1612
- audio_path=audio_path, limit=int(limit),
1636
+ grpc_client = ctx.lifespan_context.get("splice_client")
1637
+ except AttributeError:
1638
+ pass
1639
+ if grpc_client is None or not getattr(grpc_client, "connected", False):
1640
+ session_token_error = "Splice gRPC not connected"
1641
+ else:
1642
+ # Connection is up; confirm a token actually comes back.
1643
+ from ..splice_client.http_bridge import fetch_session_token
1644
+ try:
1645
+ token = await fetch_session_token(grpc_client)
1646
+ if token:
1647
+ session_token_available = True
1648
+ else:
1649
+ session_token_error = (
1650
+ "GetSession RPC returned no token — user may be "
1651
+ "logged out or gRPC schema drifted"
1652
+ )
1653
+ except Exception as exc:
1654
+ session_token_error = f"GetSession call failed: {exc}"
1655
+
1656
+ next_steps: list = []
1657
+ if "describe" in unverified:
1658
+ next_steps.append(
1659
+ "Describe endpoint unverified — reset config to defaults "
1660
+ "(delete ~/.livepilot/splice.json or unset env vars) so the "
1661
+ "captured surfaces-graphql.splice.com/graphql endpoint is used."
1613
1662
  )
1614
- except SpliceHTTPError as exc:
1615
- return exc.to_dict()
1616
- except Exception as exc:
1617
- return {"ok": False, "error": f"search_with_sound failed: {exc}"}
1618
- return {"ok": True, "audio_path": audio_path, **(result if isinstance(result, dict) else {"raw": result})}
1663
+ if "variation" in unverified:
1664
+ next_steps.append(
1665
+ "Variation GraphQL operation not yet captured. Right-click "
1666
+ "a Splice sample and click Variations with mitmproxy running. "
1667
+ "See docs/2026-04-22-splice-https-capture-recipe.md."
1668
+ )
1669
+ if not session_token_available:
1670
+ next_steps.append(
1671
+ "Splice desktop app is not reachable — the bridge reads the "
1672
+ "session token via gRPC GetSession RPC. Ensure the app is "
1673
+ "running and logged in."
1674
+ )
1675
+ if not next_steps:
1676
+ next_steps.append("Bridge fully ready — test with splice_describe_sound.")
1677
+
1678
+ return {
1679
+ "ok": True,
1680
+ "base_url": cfg.base_url,
1681
+ "endpoints": endpoints,
1682
+ "verified": verified,
1683
+ "configured_count": configured_count,
1684
+ "unverified_endpoints": unverified,
1685
+ "is_user_configured": cfg.is_user_configured,
1686
+ "session_token_available": session_token_available,
1687
+ "session_token_error": session_token_error,
1688
+ "next_steps": next_steps,
1689
+ "docs": "docs/2026-04-22-splice-https-capture-recipe.md",
1690
+ }