livepilot 1.23.2 → 1.23.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/CHANGELOG.md +124 -0
  2. package/README.md +108 -10
  3. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  4. package/m4l_device/livepilot_bridge.js +39 -1
  5. package/mcp_server/__init__.py +1 -1
  6. package/mcp_server/atlas/cross_pack_chain.py +658 -0
  7. package/mcp_server/atlas/demo_story.py +700 -0
  8. package/mcp_server/atlas/extract_chain.py +786 -0
  9. package/mcp_server/atlas/macro_fingerprint.py +554 -0
  10. package/mcp_server/atlas/overlays.py +95 -3
  11. package/mcp_server/atlas/pack_aware_compose.py +1255 -0
  12. package/mcp_server/atlas/preset_resolver.py +238 -0
  13. package/mcp_server/atlas/tools.py +1001 -31
  14. package/mcp_server/atlas/transplant.py +1177 -0
  15. package/mcp_server/mix_engine/state_builder.py +44 -1
  16. package/mcp_server/runtime/capability_state.py +34 -3
  17. package/mcp_server/runtime/remote_commands.py +10 -0
  18. package/mcp_server/server.py +45 -24
  19. package/mcp_server/tools/agent_os.py +33 -9
  20. package/mcp_server/tools/analyzer.py +84 -23
  21. package/mcp_server/tools/browser.py +20 -1
  22. package/mcp_server/tools/devices.py +78 -11
  23. package/mcp_server/tools/perception.py +5 -1
  24. package/mcp_server/tools/tracks.py +39 -2
  25. package/mcp_server/user_corpus/__init__.py +48 -0
  26. package/mcp_server/user_corpus/manifest.py +142 -0
  27. package/mcp_server/user_corpus/plugin_engine/__init__.py +39 -0
  28. package/mcp_server/user_corpus/plugin_engine/detector.py +579 -0
  29. package/mcp_server/user_corpus/plugin_engine/manual.py +347 -0
  30. package/mcp_server/user_corpus/plugin_engine/research.py +247 -0
  31. package/mcp_server/user_corpus/runner.py +261 -0
  32. package/mcp_server/user_corpus/scanner.py +115 -0
  33. package/mcp_server/user_corpus/scanners/__init__.py +18 -0
  34. package/mcp_server/user_corpus/scanners/adg.py +79 -0
  35. package/mcp_server/user_corpus/scanners/als.py +144 -0
  36. package/mcp_server/user_corpus/scanners/amxd.py +374 -0
  37. package/mcp_server/user_corpus/scanners/plugin_preset.py +202 -0
  38. package/mcp_server/user_corpus/tools.py +904 -0
  39. package/mcp_server/user_corpus/wizard.py +224 -0
  40. package/package.json +2 -2
  41. package/remote_script/LivePilot/__init__.py +1 -1
  42. package/remote_script/LivePilot/browser.py +7 -2
  43. package/remote_script/LivePilot/devices.py +9 -0
  44. package/remote_script/LivePilot/simpler_sample.py +98 -0
  45. package/requirements.txt +3 -3
  46. package/server.json +2 -2
@@ -0,0 +1,261 @@
1
+ """Scan runner — orchestrates per-file scans across all configured sources.
2
+
3
+ Reads a Manifest, walks each source's directory, dispatches to the registered
4
+ Scanner for that source's type_id, and writes:
5
+ - <output_root>/<scanner.output_subdir>/_parses/<slug>.json (full sidecar)
6
+ - <output_root>/<scanner.output_subdir>/<slug>.yaml (search wrapper)
7
+
8
+ Per-file errors are logged and counted but never abort the whole scan.
9
+ mtime-based incremental skipping is on by default.
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import hashlib
15
+ import json
16
+ import logging
17
+ import os
18
+ from dataclasses import dataclass, field
19
+ from datetime import datetime, timezone
20
+ from pathlib import Path
21
+ from typing import Iterable
22
+
23
+ import yaml
24
+
25
+ from .manifest import Manifest, Source, save_manifest
26
+ from .scanner import Scanner, get_scanner
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+
31
+ # ─── Result types ────────────────────────────────────────────────────────────
32
+
33
+
34
+ @dataclass
35
+ class FileScanResult:
36
+ path: Path
37
+ sidecar_path: Path | None = None
38
+ wrapper_path: Path | None = None
39
+ skipped: bool = False
40
+ error: str | None = None
41
+
42
+
43
+ @dataclass
44
+ class SourceScanResult:
45
+ source_id: str
46
+ type_id: str
47
+ files_scanned: int = 0
48
+ files_skipped: int = 0
49
+ files_errored: int = 0
50
+ errors: list[tuple[str, str]] = field(default_factory=list) # (path, msg)
51
+ elapsed_sec: float = 0.0
52
+
53
+
54
+ @dataclass
55
+ class ScanResult:
56
+ sources: list[SourceScanResult] = field(default_factory=list)
57
+ total_scanned: int = 0
58
+ total_skipped: int = 0
59
+ total_errored: int = 0
60
+
61
+
62
+ # ─── Entry point ─────────────────────────────────────────────────────────────
63
+
64
+
65
+ def run_scan(
66
+ manifest: Manifest,
67
+ only_source_id: str | None = None,
68
+ update_manifest_path: Path | None = None,
69
+ ) -> ScanResult:
70
+ """Run scans for every source in the manifest (or just one if filtered).
71
+
72
+ Always writes sidecars + wrappers to the output root. Returns a ScanResult
73
+ aggregating per-source counts. If `update_manifest_path` is provided, the
74
+ runner persists `last_scanned` + `file_count` updates to disk.
75
+ """
76
+ skip_unchanged = bool(manifest.options.get("skip_unchanged", True))
77
+ output_root = manifest.output_root
78
+
79
+ overall = ScanResult()
80
+ for src in manifest.sources:
81
+ if only_source_id and src.id != only_source_id:
82
+ continue
83
+ try:
84
+ scanner = get_scanner(src.type)
85
+ except KeyError:
86
+ logger.warning(
87
+ "No scanner registered for type_id=%s (source %s) — skipping",
88
+ src.type, src.id,
89
+ )
90
+ continue
91
+ ssr = _scan_source(src, scanner, output_root, skip_unchanged)
92
+ overall.sources.append(ssr)
93
+ overall.total_scanned += ssr.files_scanned
94
+ overall.total_skipped += ssr.files_skipped
95
+ overall.total_errored += ssr.files_errored
96
+
97
+ # Update the source's metadata
98
+ src.mark_scanned(file_count=ssr.files_scanned + ssr.files_skipped)
99
+
100
+ if update_manifest_path is not None:
101
+ save_manifest(manifest, update_manifest_path)
102
+ return overall
103
+
104
+
105
+ # ─── Per-source scan ─────────────────────────────────────────────────────────
106
+
107
+
108
+ def _scan_source(
109
+ source: Source,
110
+ scanner: Scanner,
111
+ output_root: Path,
112
+ skip_unchanged: bool,
113
+ ) -> SourceScanResult:
114
+ import time
115
+ t0 = time.time()
116
+ ssr = SourceScanResult(source_id=source.id, type_id=source.type)
117
+
118
+ src_path = source.resolved_path
119
+ if not src_path.exists():
120
+ ssr.errors.append((str(src_path), "Source path does not exist"))
121
+ return ssr
122
+
123
+ excludes = source.exclude_globs or []
124
+ files = list(_iter_files(src_path, scanner, source.recursive, excludes))
125
+
126
+ sub_root = output_root / scanner.output_subdir
127
+ parses_dir = sub_root / "_parses"
128
+ parses_dir.mkdir(parents=True, exist_ok=True)
129
+
130
+ for f in files:
131
+ try:
132
+ slug = f"{source.id}__{scanner.slug(f)}"
133
+ sidecar_path = parses_dir / f"{slug}.json"
134
+ wrapper_path = sub_root / f"{slug}.yaml"
135
+
136
+ if skip_unchanged and _is_up_to_date(f, sidecar_path):
137
+ ssr.files_skipped += 1
138
+ continue
139
+
140
+ sidecar = _build_sidecar(scanner, f, source)
141
+ sidecar_path.write_text(
142
+ json.dumps(sidecar, indent=2, ensure_ascii=False),
143
+ encoding="utf-8",
144
+ )
145
+
146
+ wrapper = _build_wrapper(scanner, source, sidecar, sidecar_path)
147
+ wrapper_path.write_text(
148
+ yaml.dump(wrapper, sort_keys=False, default_flow_style=False,
149
+ width=200, allow_unicode=True),
150
+ encoding="utf-8",
151
+ )
152
+ ssr.files_scanned += 1
153
+ except Exception as e: # noqa: BLE001 — never abort over one bad file
154
+ ssr.files_errored += 1
155
+ ssr.errors.append((str(f), f"{type(e).__name__}: {e}"))
156
+ logger.warning("Scan error %s: %s", f, e)
157
+
158
+ ssr.elapsed_sec = round(time.time() - t0, 2)
159
+ return ssr
160
+
161
+
162
+ # ─── Helpers ─────────────────────────────────────────────────────────────────
163
+
164
+
165
+ def _iter_files(
166
+ root: Path, scanner: Scanner, recursive: bool, excludes: list[str],
167
+ ) -> Iterable[Path]:
168
+ """Yield files matching scanner extensions, honoring excludes + recursion.
169
+
170
+ Filters out:
171
+ - macOS AppleDouble metadata files (filenames starting with "._")
172
+ - Hidden files (filenames starting with ".") — usually OS/cache cruft
173
+ - Anything matching a caller-supplied exclude_glob
174
+ """
175
+ walker = root.rglob("*") if recursive else root.glob("*")
176
+ for p in walker:
177
+ if not p.is_file():
178
+ continue
179
+ # Skip macOS resource-fork siblings and hidden files
180
+ if p.name.startswith("._") or p.name.startswith("."):
181
+ continue
182
+ if not scanner.is_applicable(p):
183
+ continue
184
+ if any(p.match(g) for g in excludes):
185
+ continue
186
+ yield p
187
+
188
+
189
+ def _is_up_to_date(source_file: Path, sidecar_path: Path) -> bool:
190
+ """True iff the sidecar exists AND its source mtime matches what's recorded."""
191
+ if not sidecar_path.exists():
192
+ return False
193
+ try:
194
+ sidecar = json.loads(sidecar_path.read_text(encoding="utf-8"))
195
+ except Exception: # noqa: BLE001
196
+ return False
197
+ recorded_mtime = sidecar.get("source_mtime")
198
+ if recorded_mtime is None:
199
+ return False
200
+ try:
201
+ return int(recorded_mtime) == int(source_file.stat().st_mtime)
202
+ except OSError:
203
+ return False
204
+
205
+
206
+ def _build_sidecar(scanner: Scanner, path: Path, source: Source) -> dict:
207
+ """Run the scanner + wrap with provenance metadata."""
208
+ data = scanner.scan_one(path)
209
+ return {
210
+ "schema_version": scanner.schema_version,
211
+ "scanner": scanner.type_id,
212
+ "source_id": source.id,
213
+ "source_path": str(path),
214
+ "source_mtime": int(path.stat().st_mtime),
215
+ "source_sha256": _sha256_short(path),
216
+ "scan_timestamp": datetime.now(timezone.utc).isoformat(timespec="seconds"),
217
+ "data": data,
218
+ }
219
+
220
+
221
+ def _sha256_short(path: Path, chunk: int = 1 << 20) -> str:
222
+ """First 16 hex chars of SHA-256. Cheap fingerprint for change detection."""
223
+ try:
224
+ h = hashlib.sha256()
225
+ with path.open("rb") as fh:
226
+ while True:
227
+ buf = fh.read(chunk)
228
+ if not buf:
229
+ break
230
+ h.update(buf)
231
+ return h.hexdigest()[:16]
232
+ except OSError:
233
+ return ""
234
+
235
+
236
+ def _build_wrapper(
237
+ scanner: Scanner, source: Source, sidecar: dict, sidecar_path: Path,
238
+ ) -> dict:
239
+ """Build the searchable YAML wrapper that the overlay loader indexes."""
240
+ data = sidecar.get("data") or {}
241
+ slug = sidecar_path.stem
242
+ namespace = f"user.{source.id}"
243
+ return {
244
+ "entity_id": slug,
245
+ "entity_type": f"user_{scanner.type_id}",
246
+ "namespace": namespace,
247
+ "name": data.get("name") or sidecar_path.stem,
248
+ "description": scanner.derive_description(data),
249
+ "tags": _common_tags(source, scanner) + scanner.derive_tags(data),
250
+ "sidecar_path": str(sidecar_path.relative_to(sidecar_path.parent.parent)),
251
+ "schema_version": scanner.schema_version,
252
+ }
253
+
254
+
255
+ def _common_tags(source: Source, scanner: Scanner) -> list[str]:
256
+ """Tags every wrapper gets regardless of scanner type."""
257
+ return [
258
+ f"scanner:{scanner.type_id}",
259
+ f"source:{source.id}",
260
+ f"namespace:user.{source.id}",
261
+ ]
@@ -0,0 +1,115 @@
1
+ """Scanner abstract base class + registry.
2
+
3
+ A Scanner reads ONE file of its type (e.g. one .als, one .adg, one .amxd) and
4
+ returns a JSON-serializable sidecar dict. It also derives searchable tags +
5
+ description from that sidecar so the corpus runner can build a YAML wrapper.
6
+
7
+ Plug in your own:
8
+
9
+ from mcp_server.user_corpus.scanner import Scanner, register_scanner
10
+
11
+ @register_scanner
12
+ class MyScanner(Scanner):
13
+ type_id = "my-format"
14
+ file_extensions = [".myx"]
15
+ output_subdir = "my_format"
16
+
17
+ def scan_one(self, path):
18
+ return {"data": ...}
19
+
20
+ def derive_tags(self, sidecar):
21
+ return ["my-format", "..."]
22
+
23
+ def derive_description(self, sidecar):
24
+ return "..."
25
+
26
+ The decorator self-registers in the global SCANNERS dict; the runner discovers
27
+ it automatically as long as the module is imported somewhere.
28
+ """
29
+
30
+ from __future__ import annotations
31
+
32
+ from abc import ABC, abstractmethod
33
+ from pathlib import Path
34
+ from typing import ClassVar
35
+
36
+
37
+ class Scanner(ABC):
38
+ """Per-file content scanner.
39
+
40
+ Subclasses must define:
41
+ type_id — unique short id, e.g. "als"
42
+ file_extensions — list of lowercase extensions, e.g. [".als"]
43
+ output_subdir — where sidecars land under the output root, e.g. "projects"
44
+ schema_version — bump when sidecar shape changes (defaults to 1)
45
+
46
+ And implement:
47
+ scan_one(path) — parse one file → dict
48
+ derive_tags(sidecar) — searchable tags
49
+ derive_description(sidecar)— human-readable one-liner
50
+
51
+ Optional overrides:
52
+ slug(path) — how to name the sidecar file (default: lowercased stem)
53
+ is_applicable(p) — file-applicability predicate (default: extension match)
54
+ """
55
+
56
+ type_id: ClassVar[str]
57
+ file_extensions: ClassVar[list[str]]
58
+ output_subdir: ClassVar[str]
59
+ schema_version: ClassVar[int] = 1
60
+
61
+ @abstractmethod
62
+ def scan_one(self, path: Path) -> dict:
63
+ ...
64
+
65
+ @abstractmethod
66
+ def derive_tags(self, sidecar: dict) -> list[str]:
67
+ ...
68
+
69
+ @abstractmethod
70
+ def derive_description(self, sidecar: dict) -> str:
71
+ ...
72
+
73
+ def is_applicable(self, path: Path) -> bool:
74
+ return path.suffix.lower() in self.file_extensions
75
+
76
+ def slug(self, path: Path) -> str:
77
+ """Filesystem-safe slug derived from the file's stem.
78
+
79
+ Lowercase, spaces and underscores → hyphens, strip the suffix.
80
+ Override to disambiguate (e.g. include parent folder when filenames collide).
81
+ """
82
+ s = path.stem.lower()
83
+ s = s.replace(" ", "-").replace("_", "-")
84
+ return "-".join(part for part in s.split("-") if part)
85
+
86
+
87
+ # ─── Registry ────────────────────────────────────────────────────────────────
88
+
89
+ SCANNERS: dict[str, type[Scanner]] = {}
90
+
91
+
92
+ def register_scanner(cls: type[Scanner]) -> type[Scanner]:
93
+ """Decorator: registers a Scanner subclass under its type_id.
94
+
95
+ Idempotent — re-registering the same type_id silently overrides the prior
96
+ registration (useful for hot-reload during development; do NOT depend on
97
+ re-registration for production correctness).
98
+ """
99
+ if not issubclass(cls, Scanner):
100
+ raise TypeError(f"{cls.__name__} is not a Scanner subclass")
101
+ if not getattr(cls, "type_id", None):
102
+ raise ValueError(f"{cls.__name__} must define type_id")
103
+ SCANNERS[cls.type_id] = cls
104
+ return cls
105
+
106
+
107
+ def get_scanner(type_id: str) -> Scanner:
108
+ """Instantiate a registered scanner by type_id. Raises KeyError if missing."""
109
+ cls = SCANNERS[type_id]
110
+ return cls()
111
+
112
+
113
+ def list_scanners() -> list[str]:
114
+ """Return all registered scanner type_ids in insertion order."""
115
+ return list(SCANNERS.keys())
@@ -0,0 +1,18 @@
1
+ """Built-in scanners for the user corpus builder.
2
+
3
+ Each module in this package registers its Scanner subclass via the
4
+ @register_scanner decorator. The package __init__ in mcp_server.user_corpus
5
+ imports all of them eagerly so the registry is populated at import time.
6
+
7
+ To add your own scanner:
8
+
9
+ # mcp_server/user_corpus/scanners/my_format.py
10
+ from ..scanner import Scanner, register_scanner
11
+
12
+ @register_scanner
13
+ class MyScanner(Scanner):
14
+ type_id = "my-format"
15
+ ...
16
+
17
+ Then reference it in your manifest as `type: my-format`.
18
+ """
@@ -0,0 +1,79 @@
1
+ """ADG scanner — wraps scripts/als_deep_parse.parse_adg for one-file use.
2
+
3
+ Reuses the production-grade preset parser:
4
+ - Macro display names via KeyMidi binding resolution (BUG-PARSER#2 fix)
5
+ - Branch / chain extraction
6
+ - Device summary
7
+
8
+ Same module as the .als parser; we share the lazy importer.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import re
14
+ from pathlib import Path
15
+
16
+ from ..scanner import Scanner, register_scanner
17
+ from .als import _als_parse # share the lazy importer
18
+
19
+
20
+ @register_scanner
21
+ class AdgScanner(Scanner):
22
+ type_id = "adg"
23
+ file_extensions = [".adg", ".adv"]
24
+ output_subdir = "racks"
25
+ schema_version = 1
26
+
27
+ def scan_one(self, path: Path) -> dict:
28
+ parser = _als_parse()
29
+ return parser.parse_adg(str(path))
30
+
31
+ def derive_tags(self, sidecar: dict) -> list[str]:
32
+ tags: list[str] = ["rack-preset"]
33
+ rack_class = sidecar.get("rack_class")
34
+ if rack_class:
35
+ tags.append(_class_to_tag(rack_class))
36
+ preset_type = sidecar.get("preset_type")
37
+ if preset_type:
38
+ tags.append(preset_type)
39
+
40
+ # Top 3 named macros (already canonicalized by the parser)
41
+ for m in (sidecar.get("macros") or [])[:8]:
42
+ name = (m.get("name") or "").strip()
43
+ if name and not name.startswith("Macro "):
44
+ slug = _slug(name)
45
+ if slug:
46
+ tags.append(f"macro:{slug}")
47
+
48
+ if sidecar.get("chains"):
49
+ tags.append("has-chains")
50
+ bc = sidecar.get("branch_counts") or {}
51
+ if isinstance(bc, dict) and sum(bc.values()) > 1:
52
+ tags.append("multi-branch")
53
+ return tags
54
+
55
+ def derive_description(self, sidecar: dict) -> str:
56
+ rack_class = sidecar.get("rack_class") or "rack"
57
+ macros = sidecar.get("macros") or []
58
+ n_named = sum(
59
+ 1 for m in macros
60
+ if (m.get("name") or "").strip() and not (m.get("name") or "").startswith("Macro ")
61
+ )
62
+ n_branches = sum((sidecar.get("branch_counts") or {}).values())
63
+ return (
64
+ f"{rack_class} preset with {len(macros)} macros "
65
+ f"({n_named} producer-named), {n_branches} branches"
66
+ )
67
+
68
+
69
+ def _slug(s: str) -> str:
70
+ return re.sub(r"[^a-z0-9]+", "-", s.lower()).strip("-")
71
+
72
+
73
+ def _class_to_tag(rack_class: str) -> str:
74
+ return {
75
+ "InstrumentGroupDevice": "instrument-rack",
76
+ "AudioEffectGroupDevice": "audio-effect-rack",
77
+ "MidiEffectGroupDevice": "midi-effect-rack",
78
+ "DrumGroupDevice": "drum-rack",
79
+ }.get(rack_class, _slug(rack_class))
@@ -0,0 +1,144 @@
1
+ """ALS scanner — wraps scripts/als_deep_parse.parse_als for one-file use.
2
+
3
+ Reuses the production-grade parser already used by the factory pack atlas:
4
+ - Macro-value extraction (BUG-PARSER#1 fix: direct child scan, not recursive iter)
5
+ - Numeric scale-mode decode for Live 9/10 files (BUG-PARSER#3)
6
+ - Filename-key fallback for construction-kit packs (BUG-PARSER#4)
7
+ - Nested chain recursion (BUG-C#2 schema A)
8
+ - PluginDevice metadata (BUG-PARSER#5)
9
+
10
+ The user corpus inherits all of these for free.
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import importlib.util
16
+ import re
17
+ import sys
18
+ from pathlib import Path
19
+ from typing import Any
20
+
21
+ from ..scanner import Scanner, register_scanner
22
+
23
+
24
+ # ─── Lazy import of scripts/als_deep_parse.py ────────────────────────────────
25
+
26
+ _ALS_PARSE_MODULE = None
27
+
28
+
29
+ def _als_parse():
30
+ """Load scripts/als_deep_parse.py once and cache it.
31
+
32
+ Lives outside the mcp_server package, so we use importlib by path.
33
+ """
34
+ global _ALS_PARSE_MODULE
35
+ if _ALS_PARSE_MODULE is not None:
36
+ return _ALS_PARSE_MODULE
37
+ repo_root = Path(__file__).resolve().parents[3]
38
+ parser_path = repo_root / "scripts" / "als_deep_parse.py"
39
+ if not parser_path.exists():
40
+ raise ImportError(f"als_deep_parse.py not found at {parser_path}")
41
+ spec = importlib.util.spec_from_file_location(
42
+ "_als_deep_parse_user_corpus", parser_path,
43
+ )
44
+ if spec is None or spec.loader is None:
45
+ raise ImportError(f"Could not load {parser_path}")
46
+ module = importlib.util.module_from_spec(spec)
47
+ sys.modules["_als_deep_parse_user_corpus"] = module
48
+ spec.loader.exec_module(module)
49
+ _ALS_PARSE_MODULE = module
50
+ return module
51
+
52
+
53
+ # ─── Scanner ────────────────────────────────────────────────────────────────
54
+
55
+
56
+ @register_scanner
57
+ class AlsScanner(Scanner):
58
+ type_id = "als"
59
+ file_extensions = [".als"]
60
+ output_subdir = "projects"
61
+ schema_version = 1
62
+
63
+ def scan_one(self, path: Path) -> dict:
64
+ parser = _als_parse()
65
+ return parser.parse_als(str(path))
66
+
67
+ def derive_tags(self, sidecar: dict) -> list[str]:
68
+ tags: list[str] = ["als-project"]
69
+ bpm = sidecar.get("bpm")
70
+ if isinstance(bpm, (int, float)):
71
+ tags.append(f"{int(round(bpm))}bpm")
72
+ scale = sidecar.get("scale") or {}
73
+ scale_name = scale.get("name")
74
+ if scale_name and scale_name != "Major": # don't add the default
75
+ tags.append(scale_name.lower())
76
+ if scale.get("source") == "filename-fallback":
77
+ tags.append("filename-key")
78
+
79
+ # Top device classes from track inventory (recursive into chains)
80
+ seen: dict[str, int] = {}
81
+ for cls in _walk_device_classes(sidecar.get("tracks") or []):
82
+ seen[cls] = seen.get(cls, 0) + 1
83
+ for cls, _ in sorted(seen.items(), key=lambda kv: -kv[1])[:5]:
84
+ tags.append(f"has-{_slug(cls)}")
85
+
86
+ # Track-count bucket
87
+ n_tracks = len(sidecar.get("tracks") or [])
88
+ if n_tracks > 0:
89
+ if n_tracks < 5:
90
+ tags.append("small-session")
91
+ elif n_tracks < 16:
92
+ tags.append("medium-session")
93
+ else:
94
+ tags.append("large-session")
95
+ return tags
96
+
97
+ def derive_description(self, sidecar: dict) -> str:
98
+ bpm = sidecar.get("bpm")
99
+ bpm_str = f"{int(round(bpm))} BPM" if isinstance(bpm, (int, float)) else "unknown BPM"
100
+ scale = sidecar.get("scale") or {}
101
+ root = scale.get("root_note", "")
102
+ name = scale.get("name", "")
103
+ scale_str = _format_scale(root, name)
104
+ n_tracks = len(sidecar.get("tracks") or [])
105
+ return f"{bpm_str}, {scale_str}, {n_tracks} tracks"
106
+
107
+
108
+ # ─── Helpers ─────────────────────────────────────────────────────────────────
109
+
110
+
111
+ def _slug(s: str) -> str:
112
+ return re.sub(r"[^a-z0-9]+", "-", s.lower()).strip("-")
113
+
114
+
115
+ def _format_scale(root: Any, name: str) -> str:
116
+ root_names = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
117
+ try:
118
+ i = int(str(root))
119
+ if 0 <= i < 12:
120
+ return f"{root_names[i]} {name}"
121
+ except (ValueError, TypeError):
122
+ pass
123
+ return name or "unknown scale"
124
+
125
+
126
+ def _walk_device_classes(tracks: list[dict]) -> list[str]:
127
+ """Yield every device class across tracks AND nested rack chains."""
128
+ out: list[str] = []
129
+ for t in tracks:
130
+ out.extend(_walk_devices(t.get("devices") or []))
131
+ return out
132
+
133
+
134
+ def _walk_devices(devices: list[dict], depth: int = 0) -> list[str]:
135
+ if depth > 6: # defensive cap
136
+ return []
137
+ out: list[str] = []
138
+ for d in devices:
139
+ cls = d.get("class") or ""
140
+ if cls:
141
+ out.append(cls)
142
+ for chain in d.get("chains") or []:
143
+ out.extend(_walk_devices(chain.get("devices") or [], depth + 1))
144
+ return out