livepilot 1.10.8 → 1.10.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -107,16 +107,137 @@ def preview_semantic_move(
107
107
  return result
108
108
 
109
109
 
110
+ def _build_taste_context(ctx: Context) -> dict:
111
+ """Pull the active taste graph for ranking, with defensive fallbacks.
112
+
113
+ Returns a dict with ``dimension_weights``, ``dimension_avoidances``,
114
+ ``move_family_scores`` (family → score), and ``evidence_count``.
115
+ Empty dicts when no taste has been recorded yet — the ranker then
116
+ collapses to pure keyword matching, which is the correct behavior for
117
+ a cold-start user with no history.
118
+ """
119
+ try:
120
+ from ..memory.taste_graph import build_taste_graph
121
+ from ..memory.taste_memory import TasteMemoryStore
122
+ from ..memory.anti_memory import AntiMemoryStore
123
+
124
+ taste_store = ctx.lifespan_context.setdefault("taste_memory", TasteMemoryStore())
125
+ anti_store = ctx.lifespan_context.setdefault("anti_memory", AntiMemoryStore())
126
+ graph = build_taste_graph(taste_store=taste_store, anti_store=anti_store)
127
+
128
+ move_family_scores: dict[str, float] = {}
129
+ for family, entry in getattr(graph, "move_family_scores", {}).items():
130
+ score = getattr(entry, "score", None)
131
+ if isinstance(score, (int, float)):
132
+ move_family_scores[family] = float(score)
133
+
134
+ return {
135
+ "dimension_weights": dict(getattr(graph, "dimension_weights", {}) or {}),
136
+ "dimension_avoidances": dict(getattr(graph, "dimension_avoidances", {}) or {}),
137
+ "move_family_scores": move_family_scores,
138
+ "evidence_count": int(getattr(graph, "evidence_count", 0) or 0),
139
+ }
140
+ except Exception as exc:
141
+ logger.debug("_build_taste_context failed: %s", exc)
142
+ return {
143
+ "dimension_weights": {},
144
+ "dimension_avoidances": {},
145
+ "move_family_scores": {},
146
+ "evidence_count": 0,
147
+ }
148
+
149
+
150
+ def _score_move_for_request(move, request_lower: str, request_words: set, taste: dict) -> tuple[float, dict]:
151
+ """Compute the composite score for a single move.
152
+
153
+ Composition:
154
+ 0.55 × keyword overlap (intent + move_id + targets)
155
+ 0.30 × taste alignment (from taste_graph.dimension_weights on move.targets)
156
+ 0.15 × (1 - anti avoidance penalty) (from dimension_avoidances)
157
+
158
+ ± up to 0.10 family bonus/penalty from move_family_scores[family].
159
+
160
+ When the user has no recorded taste (evidence_count == 0), the taste
161
+ and anti-penalty components collapse to neutral 0.5 so cold-start
162
+ behavior stays identical to the old keyword-only ranker.
163
+ """
164
+ # ── Keyword overlap component (0..1) ──────────────────────────────
165
+ intent_lower = move.intent.lower()
166
+ move_words = set(move.move_id.replace("_", " ").split())
167
+ intent_words = set(intent_lower.split())
168
+
169
+ overlap = request_words & (move_words | intent_words)
170
+ keyword_score = min(1.0, len(overlap) * 0.3)
171
+
172
+ for dim in move.targets:
173
+ if dim.lower() in request_lower:
174
+ keyword_score = min(1.0, keyword_score + 0.2)
175
+
176
+ if move.move_id.replace("_", " ") in request_lower:
177
+ keyword_score = 1.0
178
+
179
+ # ── Taste alignment component (0..1) ──────────────────────────────
180
+ evidence_count = taste["evidence_count"]
181
+ dim_weights = taste["dimension_weights"]
182
+ dim_avoid = taste["dimension_avoidances"]
183
+
184
+ if evidence_count > 0 and move.targets:
185
+ # Average dimension_weights for this move's targets; weights are
186
+ # -1..1 with 0 meaning unknown. Remap to 0..1 so "neutral" is 0.5.
187
+ raw_taste = [
188
+ dim_weights.get(dim, 0.0) for dim in move.targets
189
+ ]
190
+ taste_alignment = sum((w + 1.0) / 2.0 for w in raw_taste) / len(raw_taste)
191
+ avoidance = sum(
192
+ dim_avoid.get(dim, 0.0) for dim in move.targets
193
+ ) / len(move.targets)
194
+ avoidance = max(0.0, min(1.0, avoidance))
195
+ else:
196
+ taste_alignment = 0.5
197
+ avoidance = 0.0
198
+
199
+ composite = (
200
+ 0.55 * keyword_score
201
+ + 0.30 * taste_alignment
202
+ + 0.15 * (1.0 - avoidance)
203
+ )
204
+
205
+ # ── Family bonus/penalty (±0.1) ────────────────────────────────────
206
+ family_bonus = 0.0
207
+ family_score = taste["move_family_scores"].get(move.family)
208
+ if family_score is not None:
209
+ # family score is 0..1 with 0.5 neutral; remap to -0.1..+0.1
210
+ family_bonus = (family_score - 0.5) * 0.2
211
+ composite += family_bonus
212
+
213
+ composite = max(0.0, min(1.0, composite))
214
+
215
+ breakdown = {
216
+ "keyword_score": round(keyword_score, 3),
217
+ "taste_alignment": round(taste_alignment, 3),
218
+ "avoidance_penalty": round(avoidance, 3),
219
+ "family_bonus": round(family_bonus, 3),
220
+ "evidence_count": evidence_count,
221
+ }
222
+ return composite, breakdown
223
+
224
+
110
225
  @mcp.tool()
111
226
  def propose_next_best_move(
112
227
  ctx: Context,
113
228
  request_text: str,
114
229
  limit: int = 3,
115
230
  ) -> dict:
116
- """Propose the best semantic moves for a natural language request.
231
+ """Propose the best semantic moves for a natural language request, ranked
232
+ by keyword fit AND the active taste graph.
117
233
 
118
- Analyzes the request text and ranks available semantic moves by
119
- relevance. Returns up to `limit` suggestions with confidence scores.
234
+ Shipped in v1.10.9: ranking is no longer pure keyword overlap — it now
235
+ blends keyword match with taste alignment (``dimension_weights`` on each
236
+ move's targets), an anti-preference penalty (``dimension_avoidances``),
237
+ and a small family bonus from ``move_family_scores``. Cold-start users
238
+ with zero recorded evidence get the same ranking as before; users with
239
+ history see recommendations pulled toward dimensions they've kept and
240
+ away from ones they've undone.
120
241
 
121
242
  request_text: what the user wants (e.g., "make this punchier",
122
243
  "tighten the low end", "reduce repetition")
@@ -125,50 +246,37 @@ def propose_next_best_move(
125
246
  if not request_text.strip():
126
247
  return {"error": "request_text cannot be empty"}
127
248
 
128
- # Simple keyword matching for now — will be replaced by conductor
129
- # routing + taste ranking in V2 Step 7
130
249
  request_lower = request_text.lower()
250
+ request_words = set(request_lower.split())
251
+ taste = _build_taste_context(ctx)
131
252
  all_moves = list(registry._REGISTRY.values())
132
253
 
133
- scored = []
254
+ scored: list[tuple[object, float, dict]] = []
134
255
  for move in all_moves:
135
- score = 0.0
136
- # Match keywords from intent and move_id
137
- intent_lower = move.intent.lower()
138
- move_words = set(move.move_id.replace("_", " ").split())
139
- intent_words = set(intent_lower.split())
140
- request_words = set(request_lower.split())
141
-
142
- # Word overlap scoring
143
- overlap = request_words & (move_words | intent_words)
144
- score += len(overlap) * 0.3
145
-
146
- # Dimension matching
147
- for dim in move.targets:
148
- if dim in request_lower:
149
- score += 0.2
150
-
151
- # Boost exact intent matches
152
- if move.move_id.replace("_", " ") in request_lower:
153
- score += 1.0
154
-
155
- if score > 0:
156
- scored.append((move, min(score, 1.0)))
157
-
158
- # Sort by score descending
256
+ score, breakdown = _score_move_for_request(
257
+ move, request_lower, request_words, taste,
258
+ )
259
+ # Keep only moves that had any keyword signal or strong taste pull —
260
+ # a move with zero keyword overlap AND neutral taste would be noise.
261
+ if breakdown["keyword_score"] > 0 or taste["evidence_count"] >= 5:
262
+ scored.append((move, score, breakdown))
263
+
159
264
  scored.sort(key=lambda x: -x[1])
160
265
  top = scored[:limit]
161
266
 
162
267
  suggestions = []
163
- for move, score in top:
268
+ for move, score, breakdown in top:
164
269
  d = move.to_dict()
165
270
  d["match_score"] = round(score, 3)
271
+ d["score_breakdown"] = breakdown
166
272
  suggestions.append(d)
167
273
 
168
274
  return {
169
275
  "request": request_text,
170
276
  "suggestions": suggestions,
171
277
  "count": len(suggestions),
278
+ "taste_active": taste["evidence_count"] > 0,
279
+ "taste_evidence_count": taste["evidence_count"],
172
280
  }
173
281
 
174
282
 
@@ -144,6 +144,28 @@ async def _warm_analyzer_bridge(
144
144
  await asyncio.sleep(0.05)
145
145
 
146
146
 
147
+ def _bind_session_continuity(ableton: AbletonConnection) -> None:
148
+ """Hydrate the session-continuity tracker from persistent per-project state.
149
+
150
+ Fetches a minimal session fingerprint (tempo, signature, track/scene
151
+ layout) from the Remote Script, computes a project hash, and asks the
152
+ tracker to bind the matching ProjectStore + restore any previously-saved
153
+ creative threads and turn resolutions from disk.
154
+
155
+ Never raises: startup must succeed even if Ableton isn't reachable. In
156
+ that case, the tracker stays in-memory and the first ``record_turn_*`` /
157
+ ``open_thread`` call will lazy-bind via ``ensure_project_store_bound()``.
158
+ """
159
+ try:
160
+ from .session_continuity.tracker import bind_project_store_from_session
161
+
162
+ info = ableton.send_command("get_session_info")
163
+ if isinstance(info, dict) and not info.get("error"):
164
+ bind_project_store_from_session(info)
165
+ except Exception as exc:
166
+ logger.debug("_bind_session_continuity: lazy-bind (reason: %s)", exc)
167
+
168
+
147
169
  @asynccontextmanager
148
170
  async def lifespan(server):
149
171
  """Create and yield the shared AbletonConnection + M4L bridge + registries."""
@@ -203,6 +225,12 @@ async def lifespan(server):
203
225
  _check_remote_script_version(ableton)
204
226
  if bridge_state["transport"] is not None:
205
227
  await _warm_analyzer_bridge(ableton, spectral)
228
+ # Bind per-project persistent store so creative threads and turn
229
+ # history survive server restarts. Until v1.10.9 this was plumbed
230
+ # through the tracker but never called — threads/turns were effectively
231
+ # in-memory only. If Ableton isn't reachable yet, tools will lazy-bind
232
+ # on first write via ensure_project_store_bound().
233
+ _bind_session_continuity(ableton)
206
234
  yield {
207
235
  "ableton": ableton,
208
236
  "spectral": spectral,
@@ -312,28 +340,125 @@ def _coerce_schema_property(prop: dict) -> None:
312
340
 
313
341
 
314
342
  def _get_all_tools():
315
- """Get all registered tools, compatible with FastMCP 0.x and 3.x.
316
-
317
- WARNING: Accesses FastMCP private internals (_tool_manager, _local_provider).
318
- Pinned to fastmcp>=3.0.0,<3.3.0 in requirements.txt. If upgrading FastMCP,
319
- verify these attributes still exist or update this function.
343
+ """Get all registered tools defends against FastMCP internal drift.
344
+
345
+ FastMCP's public API doesn't expose the registry as of 3.2.x (see
346
+ docs/FASTMCP_UPSTREAM_FR.md). Until it does, we probe known internal
347
+ attribute paths. Each probe fires in try/except so a structural
348
+ rearrangement (e.g. ``_components`` renamed under 3.3+) falls through
349
+ to the next path rather than exploding.
350
+
351
+ WARNING: Accesses FastMCP private internals. Pinned to
352
+ fastmcp>=3.0.0,<3.3.0 in requirements.txt. The startup self-test
353
+ (_assert_tool_registry_accessible) will fail loudly if every probe
354
+ returns empty — better than silently returning [] and disabling
355
+ schema coercion.
320
356
  """
321
- # FastMCP 0.x: mcp._tool_manager._tools (dict of name -> Tool)
322
- if hasattr(mcp, "_tool_manager"):
323
- return list(mcp._tool_manager._tools.values())
324
- # FastMCP 3.x: mcp._local_provider._components (dict of key -> Tool)
325
- if hasattr(mcp, "_local_provider") and hasattr(mcp._local_provider, "_components"):
326
- return list(mcp._local_provider._components.values())
357
+ probes = [
358
+ # FastMCP 0.x: mcp._tool_manager._tools (dict of name -> Tool)
359
+ ("_tool_manager._tools", lambda: list(mcp._tool_manager._tools.values())),
360
+ # FastMCP 3.0–3.2: mcp._local_provider._components
361
+ (
362
+ "_local_provider._components",
363
+ lambda: list(mcp._local_provider._components.values()),
364
+ ),
365
+ # FastMCP 3.3+ speculative: mcp._local_provider._tools (anticipated
366
+ # rename based on naming conventions in other providers). Kept here
367
+ # so a future bump surfaces a partial match rather than a full miss.
368
+ (
369
+ "_local_provider._tools",
370
+ lambda: list(mcp._local_provider._tools.values()),
371
+ ),
372
+ # Public-API future path (what we're asking for in the upstream FR);
373
+ # harmless to probe now so that once it ships we can lift the ceiling
374
+ # without touching this function again.
375
+ ("list_tools", lambda: list(mcp.list_tools())),
376
+ ]
377
+ for label, fn in probes:
378
+ try:
379
+ tools = fn()
380
+ except (AttributeError, TypeError):
381
+ continue
382
+ except Exception: # noqa: BLE001 — any error from an internal probe means "skip"
383
+ continue
384
+ if tools:
385
+ return tools
386
+
387
+ # All probes empty. Surface fastmcp version + attempted paths so the
388
+ # breakage is diagnosable without re-reading the code.
327
389
  import sys
328
-
390
+ try:
391
+ import fastmcp as _fm
392
+ fm_version = getattr(_fm, "__version__", "unknown")
393
+ except Exception: # noqa: BLE001
394
+ fm_version = "unknown"
329
395
  print(
330
- "LivePilot: WARNING — could not access FastMCP tool registry, "
331
- "string-to-number schema coercion will not work",
396
+ "LivePilot: ERROR — could not access FastMCP tool registry "
397
+ f"(fastmcp=={fm_version}). Tried: "
398
+ + ", ".join(label for label, _ in probes)
399
+ + ". Schema coercion and tool-catalog generation will be broken. "
400
+ "If FastMCP updated its internals, see docs/FASTMCP_UPSTREAM_FR.md.",
332
401
  file=sys.stderr,
333
402
  )
334
403
  return []
335
404
 
336
405
 
406
+ def _assert_tool_registry_accessible() -> None:
407
+ """Loudly fail startup if the FastMCP registry probe returns nothing.
408
+
409
+ Called once at module import, just before schema patching. The schema
410
+ patch silently no-ops on an empty registry, so without this assertion
411
+ a FastMCP-internals rename would degrade silently and produce a server
412
+ with 324 tools but no string-to-number coercion — a subtle, hard-to-
413
+ diagnose class of failure we've paid for once already.
414
+
415
+ Reads the expected count from ``tests/test_tools_contract.py`` (same
416
+ source of truth sync_metadata.py uses), so no second magic number.
417
+ """
418
+ import re
419
+ import sys
420
+
421
+ try:
422
+ contract_src = (
423
+ (__file__.rsplit("/", 2)[0] + "/tests/test_tools_contract.py")
424
+ if "__file__" in globals() else None
425
+ )
426
+ # Prefer an absolute path via Path for reliability:
427
+ from pathlib import Path
428
+ contract_path = Path(__file__).resolve().parents[1] / "tests" / "test_tools_contract.py"
429
+ expected = None
430
+ if contract_path.exists():
431
+ match = re.search(
432
+ r"assert len\(tools\) == (\d+)",
433
+ contract_path.read_text(encoding="utf-8"),
434
+ )
435
+ if match:
436
+ expected = int(match.group(1))
437
+ except Exception: # noqa: BLE001 — self-test must not block startup
438
+ expected = None
439
+
440
+ actual = len(_get_all_tools())
441
+ if actual == 0:
442
+ # Registry probe returned empty — this is the regression the test guards.
443
+ # Don't sys.exit (some test harnesses import server.py without a live
444
+ # FastMCP); print a loud diagnostic and let downstream code react.
445
+ print(
446
+ "LivePilot: STARTUP SELF-TEST FAILED — _get_all_tools() returned 0. "
447
+ "FastMCP internals likely changed. Verify requirements.txt pin "
448
+ "(fastmcp>=3.0.0,<3.3.0) matches the installed version.",
449
+ file=sys.stderr,
450
+ )
451
+ return
452
+ if expected is not None and actual != expected:
453
+ print(
454
+ f"LivePilot: STARTUP SELF-TEST WARNING — _get_all_tools() "
455
+ f"returned {actual} tools, tests/test_tools_contract.py expects "
456
+ f"{expected}. If you've added/removed tools, update the contract "
457
+ "and run scripts/sync_metadata.py --fix.",
458
+ file=sys.stderr,
459
+ )
460
+
461
+
337
462
  def _patch_tool_schemas() -> None:
338
463
  """Post-process all registered tool schemas for string coercion."""
339
464
  for tool in _get_all_tools():
@@ -346,6 +471,7 @@ def _patch_tool_schemas() -> None:
346
471
  if isinstance(definition, dict):
347
472
  _coerce_schema_property(definition)
348
473
 
474
+ _assert_tool_registry_accessible()
349
475
  _patch_tool_schemas()
350
476
 
351
477
 
@@ -22,6 +22,13 @@ class CreativeThread:
22
22
  def to_dict(self) -> dict:
23
23
  return asdict(self)
24
24
 
25
+ @classmethod
26
+ def from_dict(cls, data: dict) -> "CreativeThread":
27
+ """Rehydrate from persisted dict; unknown keys are ignored so a future
28
+ schema bump won't break load on older on-disk state."""
29
+ allowed = {f for f in cls.__dataclass_fields__}
30
+ return cls(**{k: v for k, v in data.items() if k in allowed})
31
+
25
32
  @property
26
33
  def is_stale(self) -> bool:
27
34
  """A thread is stale if untouched for >30 minutes."""
@@ -44,6 +51,12 @@ class TurnResolution:
44
51
  def to_dict(self) -> dict:
45
52
  return asdict(self)
46
53
 
54
+ @classmethod
55
+ def from_dict(cls, data: dict) -> "TurnResolution":
56
+ """Rehydrate from persisted dict; unknown keys are ignored."""
57
+ allowed = {f for f in cls.__dataclass_fields__}
58
+ return cls(**{k: v for k, v in data.items() if k in allowed})
59
+
47
60
 
48
61
  @dataclass
49
62
  class SessionStory:
@@ -65,6 +65,7 @@ def record_turn_resolution(
65
65
  identity_effect: "preserves", "evolves", "contrasts", or "resets"
66
66
  user_sentiment: "loved", "liked", "neutral", "disliked", or "hated"
67
67
  """
68
+ tracker.ensure_project_store_bound(ctx)
68
69
  turn = tracker.record_turn_resolution(
69
70
  request_text=request_text,
70
71
  outcome=outcome,
@@ -130,6 +131,7 @@ def open_creative_thread(
130
131
  if not description.strip():
131
132
  return {"error": "description cannot be empty"}
132
133
 
134
+ tracker.ensure_project_store_bound(ctx)
133
135
  thread = tracker.open_thread(description, domain=domain, priority=priority)
134
136
  return thread.to_dict()
135
137
 
@@ -44,6 +44,99 @@ def reset_story() -> None:
44
44
  _project_store = None
45
45
 
46
46
 
47
+ def bind_project_store_from_session(session_info: dict) -> Optional[str]:
48
+ """Bind a per-project persistent store and hydrate in-memory state.
49
+
50
+ Computes a project fingerprint from ``session_info`` (tempo, time sig,
51
+ song length, track/scene/return layout), opens the matching
52
+ ``ProjectStore`` under ``~/.livepilot/projects/<hash>/``, and rehydrates
53
+ the in-memory ``_threads`` and ``_turns`` from disk so that restarting
54
+ the MCP server preserves the user's creative threads and turn history.
55
+
56
+ Returns the project_id (12-char hash) on success, ``None`` on failure
57
+ (so callers can log without aborting startup). If the hash hasn't
58
+ changed since the last bind, this is a no-op — hot path is safe to
59
+ call on every turn.
60
+
61
+ Without this function, ``set_project_store()`` existed but nobody
62
+ called it, meaning README's "return to a project with prior creative
63
+ threads intact" was literally false — threads/turns were in-memory
64
+ only and reset on every server restart.
65
+ """
66
+ global _threads, _turns, _project_store
67
+
68
+ try:
69
+ from ..persistence.project_store import ProjectStore, project_hash
70
+ except Exception as exc:
71
+ logger.debug("bind_project_store_from_session: import failed: %s", exc)
72
+ return None
73
+
74
+ try:
75
+ new_id = project_hash(session_info or {})
76
+ except Exception as exc:
77
+ logger.debug("bind_project_store_from_session: hash failed: %s", exc)
78
+ return None
79
+
80
+ # Already bound to this project? Nothing to do.
81
+ if _project_store is not None and getattr(_project_store, "project_id", None) == new_id:
82
+ return new_id
83
+
84
+ try:
85
+ store = ProjectStore(new_id)
86
+ except Exception as exc:
87
+ logger.debug("bind_project_store_from_session: store open failed: %s", exc)
88
+ return None
89
+
90
+ # Hydrate in-memory threads + turns from the persisted store. We only
91
+ # rebuild what the tracker keeps live — SessionStory is recomputed on
92
+ # each get_session_story() call, so it doesn't need a direct restore.
93
+ try:
94
+ raw_threads = store.get_threads()
95
+ raw_turns = store.get_turns()
96
+ except Exception as exc:
97
+ logger.debug("bind_project_store_from_session: read failed: %s", exc)
98
+ raw_threads, raw_turns = [], []
99
+
100
+ _threads = {
101
+ t["thread_id"]: CreativeThread.from_dict(t)
102
+ for t in raw_threads
103
+ if isinstance(t, dict) and "thread_id" in t
104
+ }
105
+ _turns = [
106
+ TurnResolution.from_dict(t)
107
+ for t in raw_turns
108
+ if isinstance(t, dict)
109
+ ]
110
+ _project_store = store
111
+ logger.info(
112
+ "session_continuity: bound project %s (%d threads, %d turns restored)",
113
+ new_id, len(_threads), len(_turns),
114
+ )
115
+ return new_id
116
+
117
+
118
+ def ensure_project_store_bound(ctx) -> Optional[str]:
119
+ """Lazy bind on first use — for tools called before lifespan could reach Ableton.
120
+
121
+ ``ctx`` is a FastMCP Context; reads the ``ableton`` connection from
122
+ ``ctx.lifespan_context`` and fetches session info to compute the project
123
+ hash. Safe to call on every turn — if already bound to this project, it's
124
+ a no-op. Returns the project_id or ``None`` on failure.
125
+ """
126
+ if _project_store is not None:
127
+ return getattr(_project_store, "project_id", None)
128
+ try:
129
+ ableton = ctx.lifespan_context.get("ableton")
130
+ if ableton is None:
131
+ return None
132
+ info = ableton.send_command("get_session_info")
133
+ if isinstance(info, dict) and not info.get("error"):
134
+ return bind_project_store_from_session(info)
135
+ except Exception as exc:
136
+ logger.debug("ensure_project_store_bound: %s", exc)
137
+ return None
138
+
139
+
47
140
  # ── Session story ─────────────────────────────────────────────────
48
141
 
49
142
 
@@ -0,0 +1,39 @@
1
+ """Analyzer helpers — pure-computation + context accessors split out from analyzer.py.
2
+
3
+ The public tool surface (32 ``@mcp.tool()`` functions) still lives in
4
+ ``mcp_server/tools/analyzer.py`` — moving decorators across files risks
5
+ reordering FastMCP's tool registration. This package only holds the
6
+ supporting code that ``analyzer.py`` used to carry inline:
7
+
8
+ - ``context`` — SpectralCache + M4LBridge accessors and the analyzer
9
+ health check that formats user-facing error messages.
10
+ - ``sample`` — Simpler post-load hygiene (Snap=0, warped-loop defaults,
11
+ sample-name verification) + filename helpers.
12
+ - ``flucoma`` — FluCoMa-specific hint formatting + pitch-name tables.
13
+
14
+ Re-exports the public-ish helpers (``_`` prefix is intentional — these
15
+ are implementation details of ``analyzer.py``, not tools in their own
16
+ right) so existing ``from .tools.analyzer import _foo`` imports in tests
17
+ continue to resolve via the thin analyzer module.
18
+ """
19
+
20
+ from .context import _get_spectral, _get_m4l, _require_analyzer
21
+ from .sample import (
22
+ _BPM_IN_FILENAME_RE,
23
+ _filename_stem,
24
+ _is_warped_loop,
25
+ _simpler_post_load_hygiene,
26
+ )
27
+ from .flucoma import PITCH_NAMES, _flucoma_hint
28
+
29
+ __all__ = [
30
+ "_get_spectral",
31
+ "_get_m4l",
32
+ "_require_analyzer",
33
+ "_BPM_IN_FILENAME_RE",
34
+ "_filename_stem",
35
+ "_is_warped_loop",
36
+ "_simpler_post_load_hygiene",
37
+ "PITCH_NAMES",
38
+ "_flucoma_hint",
39
+ ]