livepilot 1.10.7 → 1.10.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +254 -0
- package/README.md +19 -17
- package/bin/livepilot.js +146 -28
- package/installer/install.js +117 -11
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +1 -1
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/atlas/__init__.py +39 -7
- package/mcp_server/atlas/tools.py +56 -15
- package/mcp_server/composer/layer_planner.py +27 -0
- package/mcp_server/composer/prompt_parser.py +15 -6
- package/mcp_server/connection.py +11 -3
- package/mcp_server/corpus/__init__.py +14 -4
- package/mcp_server/evaluation/fabric.py +62 -1
- package/mcp_server/m4l_bridge.py +63 -12
- package/mcp_server/project_brain/automation_graph.py +23 -1
- package/mcp_server/project_brain/builder.py +2 -0
- package/mcp_server/project_brain/models.py +20 -1
- package/mcp_server/project_brain/tools.py +10 -3
- package/mcp_server/runtime/execution_router.py +16 -2
- package/mcp_server/runtime/remote_commands.py +6 -0
- package/mcp_server/sample_engine/models.py +22 -3
- package/mcp_server/semantic_moves/__init__.py +1 -0
- package/mcp_server/semantic_moves/compiler.py +9 -1
- package/mcp_server/semantic_moves/device_creation_compilers.py +47 -0
- package/mcp_server/semantic_moves/mix_compilers.py +170 -0
- package/mcp_server/semantic_moves/mix_moves.py +1 -1
- package/mcp_server/semantic_moves/models.py +5 -0
- package/mcp_server/semantic_moves/tools.py +154 -35
- package/mcp_server/server.py +147 -17
- package/mcp_server/services/singletons.py +68 -0
- package/mcp_server/session_continuity/models.py +13 -0
- package/mcp_server/session_continuity/tools.py +2 -0
- package/mcp_server/session_continuity/tracker.py +93 -0
- package/mcp_server/splice_client/client.py +29 -8
- package/mcp_server/tools/_analyzer_engine/__init__.py +39 -0
- package/mcp_server/tools/_analyzer_engine/context.py +103 -0
- package/mcp_server/tools/_analyzer_engine/flucoma.py +23 -0
- package/mcp_server/tools/_analyzer_engine/sample.py +122 -0
- package/mcp_server/tools/_motif_engine.py +19 -4
- package/mcp_server/tools/analyzer.py +25 -180
- package/mcp_server/tools/clips.py +240 -2
- package/mcp_server/tools/midi_io.py +10 -0
- package/mcp_server/tools/tracks.py +1 -1
- package/mcp_server/tools/transport.py +59 -4
- package/mcp_server/translation_engine/tools.py +8 -4
- package/package.json +25 -3
- package/remote_script/LivePilot/__init__.py +36 -9
- package/remote_script/LivePilot/arrangement.py +12 -2
- package/remote_script/LivePilot/browser.py +16 -6
- package/remote_script/LivePilot/devices.py +10 -5
- package/remote_script/LivePilot/notes.py +13 -2
- package/remote_script/LivePilot/server.py +51 -13
- package/remote_script/LivePilot/version_detect.py +7 -4
- package/server.json +20 -0
- package/.claude-plugin/marketplace.json +0 -21
- package/.mcp.json.disabled +0 -9
- package/.mcpbignore +0 -60
- package/AGENTS.md +0 -46
- package/BUGS.md +0 -1570
- package/CODE_OF_CONDUCT.md +0 -27
- package/CONTRIBUTING.md +0 -131
- package/SECURITY.md +0 -48
- package/livepilot/.Codex-plugin/plugin.json +0 -8
- package/livepilot/.claude-plugin/plugin.json +0 -8
- package/livepilot/agents/livepilot-producer/AGENT.md +0 -313
- package/livepilot/commands/arrange.md +0 -47
- package/livepilot/commands/beat.md +0 -77
- package/livepilot/commands/evaluate.md +0 -49
- package/livepilot/commands/memory.md +0 -22
- package/livepilot/commands/mix.md +0 -44
- package/livepilot/commands/perform.md +0 -42
- package/livepilot/commands/session.md +0 -13
- package/livepilot/commands/sounddesign.md +0 -43
- package/livepilot/skills/livepilot-arrangement/SKILL.md +0 -155
- package/livepilot/skills/livepilot-composition-engine/SKILL.md +0 -107
- package/livepilot/skills/livepilot-composition-engine/references/form-patterns.md +0 -97
- package/livepilot/skills/livepilot-composition-engine/references/transition-archetypes.md +0 -102
- package/livepilot/skills/livepilot-core/SKILL.md +0 -184
- package/livepilot/skills/livepilot-core/references/ableton-workflow-patterns.md +0 -831
- package/livepilot/skills/livepilot-core/references/automation-atlas.md +0 -272
- package/livepilot/skills/livepilot-core/references/device-atlas/00-index.md +0 -110
- package/livepilot/skills/livepilot-core/references/device-atlas/distortion-and-character.md +0 -687
- package/livepilot/skills/livepilot-core/references/device-atlas/drums-and-percussion.md +0 -753
- package/livepilot/skills/livepilot-core/references/device-atlas/dynamics-and-punch.md +0 -525
- package/livepilot/skills/livepilot-core/references/device-atlas/eq-and-filtering.md +0 -402
- package/livepilot/skills/livepilot-core/references/device-atlas/midi-tools.md +0 -963
- package/livepilot/skills/livepilot-core/references/device-atlas/movement-and-modulation.md +0 -874
- package/livepilot/skills/livepilot-core/references/device-atlas/space-and-depth.md +0 -571
- package/livepilot/skills/livepilot-core/references/device-atlas/spectral-and-weird.md +0 -714
- package/livepilot/skills/livepilot-core/references/device-atlas/synths-native.md +0 -953
- package/livepilot/skills/livepilot-core/references/device-knowledge/00-index.md +0 -34
- package/livepilot/skills/livepilot-core/references/device-knowledge/automation-as-music.md +0 -204
- package/livepilot/skills/livepilot-core/references/device-knowledge/chains-genre.md +0 -173
- package/livepilot/skills/livepilot-core/references/device-knowledge/creative-thinking.md +0 -211
- package/livepilot/skills/livepilot-core/references/device-knowledge/effects-distortion.md +0 -188
- package/livepilot/skills/livepilot-core/references/device-knowledge/effects-space.md +0 -162
- package/livepilot/skills/livepilot-core/references/device-knowledge/effects-spectral.md +0 -229
- package/livepilot/skills/livepilot-core/references/device-knowledge/instruments-synths.md +0 -243
- package/livepilot/skills/livepilot-core/references/m4l-devices.md +0 -352
- package/livepilot/skills/livepilot-core/references/memory-guide.md +0 -107
- package/livepilot/skills/livepilot-core/references/midi-recipes.md +0 -402
- package/livepilot/skills/livepilot-core/references/mixing-patterns.md +0 -578
- package/livepilot/skills/livepilot-core/references/overview.md +0 -290
- package/livepilot/skills/livepilot-core/references/sample-manipulation.md +0 -724
- package/livepilot/skills/livepilot-core/references/sound-design-deep.md +0 -140
- package/livepilot/skills/livepilot-core/references/sound-design.md +0 -393
- package/livepilot/skills/livepilot-devices/SKILL.md +0 -169
- package/livepilot/skills/livepilot-evaluation/SKILL.md +0 -156
- package/livepilot/skills/livepilot-evaluation/references/capability-modes.md +0 -118
- package/livepilot/skills/livepilot-evaluation/references/evaluation-contracts.md +0 -121
- package/livepilot/skills/livepilot-evaluation/references/memory-promotion.md +0 -110
- package/livepilot/skills/livepilot-mix-engine/SKILL.md +0 -123
- package/livepilot/skills/livepilot-mix-engine/references/mix-critics.md +0 -143
- package/livepilot/skills/livepilot-mix-engine/references/mix-moves.md +0 -105
- package/livepilot/skills/livepilot-mixing/SKILL.md +0 -157
- package/livepilot/skills/livepilot-notes/SKILL.md +0 -130
- package/livepilot/skills/livepilot-performance-engine/SKILL.md +0 -122
- package/livepilot/skills/livepilot-performance-engine/references/performance-safety.md +0 -98
- package/livepilot/skills/livepilot-release/SKILL.md +0 -130
- package/livepilot/skills/livepilot-sample-engine/SKILL.md +0 -105
- package/livepilot/skills/livepilot-sample-engine/references/sample-critics.md +0 -87
- package/livepilot/skills/livepilot-sample-engine/references/sample-philosophy.md +0 -51
- package/livepilot/skills/livepilot-sample-engine/references/sample-techniques.md +0 -131
- package/livepilot/skills/livepilot-sound-design-engine/SKILL.md +0 -168
- package/livepilot/skills/livepilot-sound-design-engine/references/patch-model.md +0 -119
- package/livepilot/skills/livepilot-sound-design-engine/references/sound-design-critics.md +0 -118
- package/livepilot/skills/livepilot-wonder/SKILL.md +0 -79
- package/m4l_device/LivePilot_Analyzer.amxd.pre-presentation-backup +0 -0
- package/m4l_device/LivePilot_Analyzer.maxpat +0 -2705
- package/m4l_device/LivePilot_Analyzer.maxproj +0 -53
- package/manifest.json +0 -91
- package/mcp_server/splice_client/protos/app_pb2.pyi +0 -1153
- package/scripts/generate_tool_catalog.py +0 -106
- package/scripts/sync_metadata.py +0 -349
|
@@ -107,16 +107,137 @@ def preview_semantic_move(
|
|
|
107
107
|
return result
|
|
108
108
|
|
|
109
109
|
|
|
110
|
+
def _build_taste_context(ctx: Context) -> dict:
|
|
111
|
+
"""Pull the active taste graph for ranking, with defensive fallbacks.
|
|
112
|
+
|
|
113
|
+
Returns a dict with ``dimension_weights``, ``dimension_avoidances``,
|
|
114
|
+
``move_family_scores`` (family → score), and ``evidence_count``.
|
|
115
|
+
Empty dicts when no taste has been recorded yet — the ranker then
|
|
116
|
+
collapses to pure keyword matching, which is the correct behavior for
|
|
117
|
+
a cold-start user with no history.
|
|
118
|
+
"""
|
|
119
|
+
try:
|
|
120
|
+
from ..memory.taste_graph import build_taste_graph
|
|
121
|
+
from ..memory.taste_memory import TasteMemoryStore
|
|
122
|
+
from ..memory.anti_memory import AntiMemoryStore
|
|
123
|
+
|
|
124
|
+
taste_store = ctx.lifespan_context.setdefault("taste_memory", TasteMemoryStore())
|
|
125
|
+
anti_store = ctx.lifespan_context.setdefault("anti_memory", AntiMemoryStore())
|
|
126
|
+
graph = build_taste_graph(taste_store=taste_store, anti_store=anti_store)
|
|
127
|
+
|
|
128
|
+
move_family_scores: dict[str, float] = {}
|
|
129
|
+
for family, entry in getattr(graph, "move_family_scores", {}).items():
|
|
130
|
+
score = getattr(entry, "score", None)
|
|
131
|
+
if isinstance(score, (int, float)):
|
|
132
|
+
move_family_scores[family] = float(score)
|
|
133
|
+
|
|
134
|
+
return {
|
|
135
|
+
"dimension_weights": dict(getattr(graph, "dimension_weights", {}) or {}),
|
|
136
|
+
"dimension_avoidances": dict(getattr(graph, "dimension_avoidances", {}) or {}),
|
|
137
|
+
"move_family_scores": move_family_scores,
|
|
138
|
+
"evidence_count": int(getattr(graph, "evidence_count", 0) or 0),
|
|
139
|
+
}
|
|
140
|
+
except Exception as exc:
|
|
141
|
+
logger.debug("_build_taste_context failed: %s", exc)
|
|
142
|
+
return {
|
|
143
|
+
"dimension_weights": {},
|
|
144
|
+
"dimension_avoidances": {},
|
|
145
|
+
"move_family_scores": {},
|
|
146
|
+
"evidence_count": 0,
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def _score_move_for_request(move, request_lower: str, request_words: set, taste: dict) -> tuple[float, dict]:
|
|
151
|
+
"""Compute the composite score for a single move.
|
|
152
|
+
|
|
153
|
+
Composition:
|
|
154
|
+
0.55 × keyword overlap (intent + move_id + targets)
|
|
155
|
+
0.30 × taste alignment (from taste_graph.dimension_weights on move.targets)
|
|
156
|
+
0.15 × (1 - anti avoidance penalty) (from dimension_avoidances)
|
|
157
|
+
|
|
158
|
+
± up to 0.10 family bonus/penalty from move_family_scores[family].
|
|
159
|
+
|
|
160
|
+
When the user has no recorded taste (evidence_count == 0), the taste
|
|
161
|
+
and anti-penalty components collapse to neutral 0.5 so cold-start
|
|
162
|
+
behavior stays identical to the old keyword-only ranker.
|
|
163
|
+
"""
|
|
164
|
+
# ── Keyword overlap component (0..1) ──────────────────────────────
|
|
165
|
+
intent_lower = move.intent.lower()
|
|
166
|
+
move_words = set(move.move_id.replace("_", " ").split())
|
|
167
|
+
intent_words = set(intent_lower.split())
|
|
168
|
+
|
|
169
|
+
overlap = request_words & (move_words | intent_words)
|
|
170
|
+
keyword_score = min(1.0, len(overlap) * 0.3)
|
|
171
|
+
|
|
172
|
+
for dim in move.targets:
|
|
173
|
+
if dim.lower() in request_lower:
|
|
174
|
+
keyword_score = min(1.0, keyword_score + 0.2)
|
|
175
|
+
|
|
176
|
+
if move.move_id.replace("_", " ") in request_lower:
|
|
177
|
+
keyword_score = 1.0
|
|
178
|
+
|
|
179
|
+
# ── Taste alignment component (0..1) ──────────────────────────────
|
|
180
|
+
evidence_count = taste["evidence_count"]
|
|
181
|
+
dim_weights = taste["dimension_weights"]
|
|
182
|
+
dim_avoid = taste["dimension_avoidances"]
|
|
183
|
+
|
|
184
|
+
if evidence_count > 0 and move.targets:
|
|
185
|
+
# Average dimension_weights for this move's targets; weights are
|
|
186
|
+
# -1..1 with 0 meaning unknown. Remap to 0..1 so "neutral" is 0.5.
|
|
187
|
+
raw_taste = [
|
|
188
|
+
dim_weights.get(dim, 0.0) for dim in move.targets
|
|
189
|
+
]
|
|
190
|
+
taste_alignment = sum((w + 1.0) / 2.0 for w in raw_taste) / len(raw_taste)
|
|
191
|
+
avoidance = sum(
|
|
192
|
+
dim_avoid.get(dim, 0.0) for dim in move.targets
|
|
193
|
+
) / len(move.targets)
|
|
194
|
+
avoidance = max(0.0, min(1.0, avoidance))
|
|
195
|
+
else:
|
|
196
|
+
taste_alignment = 0.5
|
|
197
|
+
avoidance = 0.0
|
|
198
|
+
|
|
199
|
+
composite = (
|
|
200
|
+
0.55 * keyword_score
|
|
201
|
+
+ 0.30 * taste_alignment
|
|
202
|
+
+ 0.15 * (1.0 - avoidance)
|
|
203
|
+
)
|
|
204
|
+
|
|
205
|
+
# ── Family bonus/penalty (±0.1) ────────────────────────────────────
|
|
206
|
+
family_bonus = 0.0
|
|
207
|
+
family_score = taste["move_family_scores"].get(move.family)
|
|
208
|
+
if family_score is not None:
|
|
209
|
+
# family score is 0..1 with 0.5 neutral; remap to -0.1..+0.1
|
|
210
|
+
family_bonus = (family_score - 0.5) * 0.2
|
|
211
|
+
composite += family_bonus
|
|
212
|
+
|
|
213
|
+
composite = max(0.0, min(1.0, composite))
|
|
214
|
+
|
|
215
|
+
breakdown = {
|
|
216
|
+
"keyword_score": round(keyword_score, 3),
|
|
217
|
+
"taste_alignment": round(taste_alignment, 3),
|
|
218
|
+
"avoidance_penalty": round(avoidance, 3),
|
|
219
|
+
"family_bonus": round(family_bonus, 3),
|
|
220
|
+
"evidence_count": evidence_count,
|
|
221
|
+
}
|
|
222
|
+
return composite, breakdown
|
|
223
|
+
|
|
224
|
+
|
|
110
225
|
@mcp.tool()
|
|
111
226
|
def propose_next_best_move(
|
|
112
227
|
ctx: Context,
|
|
113
228
|
request_text: str,
|
|
114
229
|
limit: int = 3,
|
|
115
230
|
) -> dict:
|
|
116
|
-
"""Propose the best semantic moves for a natural language request
|
|
231
|
+
"""Propose the best semantic moves for a natural language request, ranked
|
|
232
|
+
by keyword fit AND the active taste graph.
|
|
117
233
|
|
|
118
|
-
|
|
119
|
-
|
|
234
|
+
Shipped in v1.10.9: ranking is no longer pure keyword overlap — it now
|
|
235
|
+
blends keyword match with taste alignment (``dimension_weights`` on each
|
|
236
|
+
move's targets), an anti-preference penalty (``dimension_avoidances``),
|
|
237
|
+
and a small family bonus from ``move_family_scores``. Cold-start users
|
|
238
|
+
with zero recorded evidence get the same ranking as before; users with
|
|
239
|
+
history see recommendations pulled toward dimensions they've kept and
|
|
240
|
+
away from ones they've undone.
|
|
120
241
|
|
|
121
242
|
request_text: what the user wants (e.g., "make this punchier",
|
|
122
243
|
"tighten the low end", "reduce repetition")
|
|
@@ -125,50 +246,37 @@ def propose_next_best_move(
|
|
|
125
246
|
if not request_text.strip():
|
|
126
247
|
return {"error": "request_text cannot be empty"}
|
|
127
248
|
|
|
128
|
-
# Simple keyword matching for now — will be replaced by conductor
|
|
129
|
-
# routing + taste ranking in V2 Step 7
|
|
130
249
|
request_lower = request_text.lower()
|
|
250
|
+
request_words = set(request_lower.split())
|
|
251
|
+
taste = _build_taste_context(ctx)
|
|
131
252
|
all_moves = list(registry._REGISTRY.values())
|
|
132
253
|
|
|
133
|
-
scored = []
|
|
254
|
+
scored: list[tuple[object, float, dict]] = []
|
|
134
255
|
for move in all_moves:
|
|
135
|
-
score =
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
overlap = request_words & (move_words | intent_words)
|
|
144
|
-
score += len(overlap) * 0.3
|
|
145
|
-
|
|
146
|
-
# Dimension matching
|
|
147
|
-
for dim in move.targets:
|
|
148
|
-
if dim in request_lower:
|
|
149
|
-
score += 0.2
|
|
150
|
-
|
|
151
|
-
# Boost exact intent matches
|
|
152
|
-
if move.move_id.replace("_", " ") in request_lower:
|
|
153
|
-
score += 1.0
|
|
154
|
-
|
|
155
|
-
if score > 0:
|
|
156
|
-
scored.append((move, min(score, 1.0)))
|
|
157
|
-
|
|
158
|
-
# Sort by score descending
|
|
256
|
+
score, breakdown = _score_move_for_request(
|
|
257
|
+
move, request_lower, request_words, taste,
|
|
258
|
+
)
|
|
259
|
+
# Keep only moves that had any keyword signal or strong taste pull —
|
|
260
|
+
# a move with zero keyword overlap AND neutral taste would be noise.
|
|
261
|
+
if breakdown["keyword_score"] > 0 or taste["evidence_count"] >= 5:
|
|
262
|
+
scored.append((move, score, breakdown))
|
|
263
|
+
|
|
159
264
|
scored.sort(key=lambda x: -x[1])
|
|
160
265
|
top = scored[:limit]
|
|
161
266
|
|
|
162
267
|
suggestions = []
|
|
163
|
-
for move, score in top:
|
|
268
|
+
for move, score, breakdown in top:
|
|
164
269
|
d = move.to_dict()
|
|
165
270
|
d["match_score"] = round(score, 3)
|
|
271
|
+
d["score_breakdown"] = breakdown
|
|
166
272
|
suggestions.append(d)
|
|
167
273
|
|
|
168
274
|
return {
|
|
169
275
|
"request": request_text,
|
|
170
276
|
"suggestions": suggestions,
|
|
171
277
|
"count": len(suggestions),
|
|
278
|
+
"taste_active": taste["evidence_count"] > 0,
|
|
279
|
+
"taste_evidence_count": taste["evidence_count"],
|
|
172
280
|
}
|
|
173
281
|
|
|
174
282
|
|
|
@@ -229,10 +337,21 @@ async def apply_semantic_move(
|
|
|
229
337
|
# explore mode — execute through the async router
|
|
230
338
|
from ..runtime.execution_router import execute_plan_steps_async
|
|
231
339
|
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
340
|
+
# Propagate the optional backend annotation through to the router so a
|
|
341
|
+
# compiler that's certain about a step's backend (e.g. bridge_command for
|
|
342
|
+
# capture_audio) can short-circuit classify_step(). Steps without backend
|
|
343
|
+
# fall back to the classifier as before.
|
|
344
|
+
def _step_to_dict(step):
|
|
345
|
+
d = {
|
|
346
|
+
"tool": step.tool,
|
|
347
|
+
"params": step.params,
|
|
348
|
+
"description": step.description,
|
|
349
|
+
}
|
|
350
|
+
if getattr(step, "backend", None):
|
|
351
|
+
d["backend"] = step.backend
|
|
352
|
+
return d
|
|
353
|
+
|
|
354
|
+
step_dicts = [_step_to_dict(step) for step in plan.steps]
|
|
236
355
|
bridge = ctx.lifespan_context.get("m4l")
|
|
237
356
|
mcp_registry = ctx.lifespan_context.get("mcp_dispatch", {})
|
|
238
357
|
exec_results = await execute_plan_steps_async(
|
package/mcp_server/server.py
CHANGED
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from contextlib import asynccontextmanager
|
|
4
4
|
import asyncio
|
|
5
|
+
import logging
|
|
5
6
|
import os
|
|
6
7
|
import subprocess
|
|
7
8
|
|
|
@@ -10,6 +11,12 @@ from fastmcp import FastMCP, Context # noqa: F401
|
|
|
10
11
|
from .connection import AbletonConnection
|
|
11
12
|
from .m4l_bridge import SpectralCache, SpectralReceiver, M4LBridge
|
|
12
13
|
|
|
14
|
+
# Logger must be defined before any function uses it — several module-level
|
|
15
|
+
# helpers below (e.g. _master_has_livepilot_analyzer) call logger.debug on
|
|
16
|
+
# the import-time code path, so defining logger later raised NameError when
|
|
17
|
+
# those helpers fired from a tool module's module-level init.
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
13
20
|
|
|
14
21
|
def _identify_port_holder(port: int) -> str | None:
|
|
15
22
|
"""Identify which process holds the given UDP port (for logging only).
|
|
@@ -137,6 +144,28 @@ async def _warm_analyzer_bridge(
|
|
|
137
144
|
await asyncio.sleep(0.05)
|
|
138
145
|
|
|
139
146
|
|
|
147
|
+
def _bind_session_continuity(ableton: AbletonConnection) -> None:
|
|
148
|
+
"""Hydrate the session-continuity tracker from persistent per-project state.
|
|
149
|
+
|
|
150
|
+
Fetches a minimal session fingerprint (tempo, signature, track/scene
|
|
151
|
+
layout) from the Remote Script, computes a project hash, and asks the
|
|
152
|
+
tracker to bind the matching ProjectStore + restore any previously-saved
|
|
153
|
+
creative threads and turn resolutions from disk.
|
|
154
|
+
|
|
155
|
+
Never raises: startup must succeed even if Ableton isn't reachable. In
|
|
156
|
+
that case, the tracker stays in-memory and the first ``record_turn_*`` /
|
|
157
|
+
``open_thread`` call will lazy-bind via ``ensure_project_store_bound()``.
|
|
158
|
+
"""
|
|
159
|
+
try:
|
|
160
|
+
from .session_continuity.tracker import bind_project_store_from_session
|
|
161
|
+
|
|
162
|
+
info = ableton.send_command("get_session_info")
|
|
163
|
+
if isinstance(info, dict) and not info.get("error"):
|
|
164
|
+
bind_project_store_from_session(info)
|
|
165
|
+
except Exception as exc:
|
|
166
|
+
logger.debug("_bind_session_continuity: lazy-bind (reason: %s)", exc)
|
|
167
|
+
|
|
168
|
+
|
|
140
169
|
@asynccontextmanager
|
|
141
170
|
async def lifespan(server):
|
|
142
171
|
"""Create and yield the shared AbletonConnection + M4L bridge + registries."""
|
|
@@ -196,6 +225,12 @@ async def lifespan(server):
|
|
|
196
225
|
_check_remote_script_version(ableton)
|
|
197
226
|
if bridge_state["transport"] is not None:
|
|
198
227
|
await _warm_analyzer_bridge(ableton, spectral)
|
|
228
|
+
# Bind per-project persistent store so creative threads and turn
|
|
229
|
+
# history survive server restarts. Until v1.10.9 this was plumbed
|
|
230
|
+
# through the tracker but never called — threads/turns were effectively
|
|
231
|
+
# in-memory only. If Ableton isn't reachable yet, tools will lazy-bind
|
|
232
|
+
# on first write via ensure_project_store_bound().
|
|
233
|
+
_bind_session_continuity(ableton)
|
|
199
234
|
yield {
|
|
200
235
|
"ableton": ableton,
|
|
201
236
|
"spectral": spectral,
|
|
@@ -264,9 +299,6 @@ from .device_forge import tools as device_forge_tools # noqa: F401, E40
|
|
|
264
299
|
from .sample_engine import tools as sample_engine_tools # noqa: F401, E402
|
|
265
300
|
from .atlas import tools as atlas_tools # noqa: F401, E402
|
|
266
301
|
from .composer import tools as composer_tools # noqa: F401, E402
|
|
267
|
-
import logging
|
|
268
|
-
|
|
269
|
-
logger = logging.getLogger(__name__)
|
|
270
302
|
|
|
271
303
|
# ---------------------------------------------------------------------------
|
|
272
304
|
# Schema coercion patch — accept strings for numeric parameters
|
|
@@ -308,28 +340,125 @@ def _coerce_schema_property(prop: dict) -> None:
|
|
|
308
340
|
|
|
309
341
|
|
|
310
342
|
def _get_all_tools():
|
|
311
|
-
"""Get all registered tools
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
343
|
+
"""Get all registered tools — defends against FastMCP internal drift.
|
|
344
|
+
|
|
345
|
+
FastMCP's public API doesn't expose the registry as of 3.2.x (see
|
|
346
|
+
docs/FASTMCP_UPSTREAM_FR.md). Until it does, we probe known internal
|
|
347
|
+
attribute paths. Each probe fires in try/except so a structural
|
|
348
|
+
rearrangement (e.g. ``_components`` renamed under 3.3+) falls through
|
|
349
|
+
to the next path rather than exploding.
|
|
350
|
+
|
|
351
|
+
WARNING: Accesses FastMCP private internals. Pinned to
|
|
352
|
+
fastmcp>=3.0.0,<3.3.0 in requirements.txt. The startup self-test
|
|
353
|
+
(_assert_tool_registry_accessible) will fail loudly if every probe
|
|
354
|
+
returns empty — better than silently returning [] and disabling
|
|
355
|
+
schema coercion.
|
|
316
356
|
"""
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
357
|
+
probes = [
|
|
358
|
+
# FastMCP 0.x: mcp._tool_manager._tools (dict of name -> Tool)
|
|
359
|
+
("_tool_manager._tools", lambda: list(mcp._tool_manager._tools.values())),
|
|
360
|
+
# FastMCP 3.0–3.2: mcp._local_provider._components
|
|
361
|
+
(
|
|
362
|
+
"_local_provider._components",
|
|
363
|
+
lambda: list(mcp._local_provider._components.values()),
|
|
364
|
+
),
|
|
365
|
+
# FastMCP 3.3+ speculative: mcp._local_provider._tools (anticipated
|
|
366
|
+
# rename based on naming conventions in other providers). Kept here
|
|
367
|
+
# so a future bump surfaces a partial match rather than a full miss.
|
|
368
|
+
(
|
|
369
|
+
"_local_provider._tools",
|
|
370
|
+
lambda: list(mcp._local_provider._tools.values()),
|
|
371
|
+
),
|
|
372
|
+
# Public-API future path (what we're asking for in the upstream FR);
|
|
373
|
+
# harmless to probe now so that once it ships we can lift the ceiling
|
|
374
|
+
# without touching this function again.
|
|
375
|
+
("list_tools", lambda: list(mcp.list_tools())),
|
|
376
|
+
]
|
|
377
|
+
for label, fn in probes:
|
|
378
|
+
try:
|
|
379
|
+
tools = fn()
|
|
380
|
+
except (AttributeError, TypeError):
|
|
381
|
+
continue
|
|
382
|
+
except Exception: # noqa: BLE001 — any error from an internal probe means "skip"
|
|
383
|
+
continue
|
|
384
|
+
if tools:
|
|
385
|
+
return tools
|
|
386
|
+
|
|
387
|
+
# All probes empty. Surface fastmcp version + attempted paths so the
|
|
388
|
+
# breakage is diagnosable without re-reading the code.
|
|
323
389
|
import sys
|
|
324
|
-
|
|
390
|
+
try:
|
|
391
|
+
import fastmcp as _fm
|
|
392
|
+
fm_version = getattr(_fm, "__version__", "unknown")
|
|
393
|
+
except Exception: # noqa: BLE001
|
|
394
|
+
fm_version = "unknown"
|
|
325
395
|
print(
|
|
326
|
-
"LivePilot:
|
|
327
|
-
"
|
|
396
|
+
"LivePilot: ERROR — could not access FastMCP tool registry "
|
|
397
|
+
f"(fastmcp=={fm_version}). Tried: "
|
|
398
|
+
+ ", ".join(label for label, _ in probes)
|
|
399
|
+
+ ". Schema coercion and tool-catalog generation will be broken. "
|
|
400
|
+
"If FastMCP updated its internals, see docs/FASTMCP_UPSTREAM_FR.md.",
|
|
328
401
|
file=sys.stderr,
|
|
329
402
|
)
|
|
330
403
|
return []
|
|
331
404
|
|
|
332
405
|
|
|
406
|
+
def _assert_tool_registry_accessible() -> None:
|
|
407
|
+
"""Loudly fail startup if the FastMCP registry probe returns nothing.
|
|
408
|
+
|
|
409
|
+
Called once at module import, just before schema patching. The schema
|
|
410
|
+
patch silently no-ops on an empty registry, so without this assertion
|
|
411
|
+
a FastMCP-internals rename would degrade silently and produce a server
|
|
412
|
+
with 324 tools but no string-to-number coercion — a subtle, hard-to-
|
|
413
|
+
diagnose class of failure we've paid for once already.
|
|
414
|
+
|
|
415
|
+
Reads the expected count from ``tests/test_tools_contract.py`` (same
|
|
416
|
+
source of truth sync_metadata.py uses), so no second magic number.
|
|
417
|
+
"""
|
|
418
|
+
import re
|
|
419
|
+
import sys
|
|
420
|
+
|
|
421
|
+
try:
|
|
422
|
+
contract_src = (
|
|
423
|
+
(__file__.rsplit("/", 2)[0] + "/tests/test_tools_contract.py")
|
|
424
|
+
if "__file__" in globals() else None
|
|
425
|
+
)
|
|
426
|
+
# Prefer an absolute path via Path for reliability:
|
|
427
|
+
from pathlib import Path
|
|
428
|
+
contract_path = Path(__file__).resolve().parents[1] / "tests" / "test_tools_contract.py"
|
|
429
|
+
expected = None
|
|
430
|
+
if contract_path.exists():
|
|
431
|
+
match = re.search(
|
|
432
|
+
r"assert len\(tools\) == (\d+)",
|
|
433
|
+
contract_path.read_text(encoding="utf-8"),
|
|
434
|
+
)
|
|
435
|
+
if match:
|
|
436
|
+
expected = int(match.group(1))
|
|
437
|
+
except Exception: # noqa: BLE001 — self-test must not block startup
|
|
438
|
+
expected = None
|
|
439
|
+
|
|
440
|
+
actual = len(_get_all_tools())
|
|
441
|
+
if actual == 0:
|
|
442
|
+
# Registry probe returned empty — this is the regression the test guards.
|
|
443
|
+
# Don't sys.exit (some test harnesses import server.py without a live
|
|
444
|
+
# FastMCP); print a loud diagnostic and let downstream code react.
|
|
445
|
+
print(
|
|
446
|
+
"LivePilot: STARTUP SELF-TEST FAILED — _get_all_tools() returned 0. "
|
|
447
|
+
"FastMCP internals likely changed. Verify requirements.txt pin "
|
|
448
|
+
"(fastmcp>=3.0.0,<3.3.0) matches the installed version.",
|
|
449
|
+
file=sys.stderr,
|
|
450
|
+
)
|
|
451
|
+
return
|
|
452
|
+
if expected is not None and actual != expected:
|
|
453
|
+
print(
|
|
454
|
+
f"LivePilot: STARTUP SELF-TEST WARNING — _get_all_tools() "
|
|
455
|
+
f"returned {actual} tools, tests/test_tools_contract.py expects "
|
|
456
|
+
f"{expected}. If you've added/removed tools, update the contract "
|
|
457
|
+
"and run scripts/sync_metadata.py --fix.",
|
|
458
|
+
file=sys.stderr,
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
|
|
333
462
|
def _patch_tool_schemas() -> None:
|
|
334
463
|
"""Post-process all registered tool schemas for string coercion."""
|
|
335
464
|
for tool in _get_all_tools():
|
|
@@ -342,6 +471,7 @@ def _patch_tool_schemas() -> None:
|
|
|
342
471
|
if isinstance(definition, dict):
|
|
343
472
|
_coerce_schema_property(definition)
|
|
344
473
|
|
|
474
|
+
_assert_tool_registry_accessible()
|
|
345
475
|
_patch_tool_schemas()
|
|
346
476
|
|
|
347
477
|
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
"""Thread-safe singleton helpers.
|
|
2
|
+
|
|
3
|
+
The server has several subsystems (atlas, corpus, sample-engine indexes)
|
|
4
|
+
that are loaded lazily into module-level globals via a check-then-set
|
|
5
|
+
pattern. Under FastMCP's async concurrency that pattern races: two
|
|
6
|
+
handlers can both observe ``None`` and both construct the (expensive)
|
|
7
|
+
object. Most of the time the GIL hides the race, but when it doesn't you
|
|
8
|
+
get redundant I/O and, worse, one thread's half-parsed state overwriting
|
|
9
|
+
the other's completed state.
|
|
10
|
+
|
|
11
|
+
This module provides a small helper that wraps a factory in a lock and
|
|
12
|
+
optionally tracks an on-disk mtime for cache invalidation. Use it in
|
|
13
|
+
place of hand-rolled ``_instance = None`` patterns.
|
|
14
|
+
"""
|
|
15
|
+
from __future__ import annotations
|
|
16
|
+
|
|
17
|
+
from pathlib import Path
|
|
18
|
+
from threading import Lock
|
|
19
|
+
from typing import Callable, TypeVar
|
|
20
|
+
|
|
21
|
+
T = TypeVar("T")
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class Singleton:
|
|
25
|
+
"""Lazy, thread-safe singleton with optional mtime-based reload.
|
|
26
|
+
|
|
27
|
+
Example:
|
|
28
|
+
atlas_holder = Singleton(_load_atlas)
|
|
29
|
+
|
|
30
|
+
def get_atlas():
|
|
31
|
+
return atlas_holder.get(reload_if_newer=atlas_path)
|
|
32
|
+
|
|
33
|
+
def on_atlas_rebuild():
|
|
34
|
+
atlas_holder.invalidate()
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def __init__(self, factory: Callable[[], T]):
|
|
38
|
+
self._factory = factory
|
|
39
|
+
self._instance: T | None = None
|
|
40
|
+
self._mtime: float | None = None
|
|
41
|
+
self._lock = Lock()
|
|
42
|
+
|
|
43
|
+
def get(self, *, reload_if_newer: Path | None = None) -> T:
|
|
44
|
+
with self._lock:
|
|
45
|
+
if self._instance is None:
|
|
46
|
+
self._instance = self._factory()
|
|
47
|
+
if reload_if_newer is not None:
|
|
48
|
+
try:
|
|
49
|
+
self._mtime = reload_if_newer.stat().st_mtime
|
|
50
|
+
except OSError:
|
|
51
|
+
self._mtime = None
|
|
52
|
+
return self._instance
|
|
53
|
+
|
|
54
|
+
if reload_if_newer is not None:
|
|
55
|
+
try:
|
|
56
|
+
current = reload_if_newer.stat().st_mtime
|
|
57
|
+
except OSError:
|
|
58
|
+
return self._instance
|
|
59
|
+
if self._mtime is None or current > self._mtime:
|
|
60
|
+
self._instance = self._factory()
|
|
61
|
+
self._mtime = current
|
|
62
|
+
return self._instance
|
|
63
|
+
|
|
64
|
+
def invalidate(self) -> None:
|
|
65
|
+
"""Discard the cached instance. Next .get() will re-run the factory."""
|
|
66
|
+
with self._lock:
|
|
67
|
+
self._instance = None
|
|
68
|
+
self._mtime = None
|
|
@@ -22,6 +22,13 @@ class CreativeThread:
|
|
|
22
22
|
def to_dict(self) -> dict:
|
|
23
23
|
return asdict(self)
|
|
24
24
|
|
|
25
|
+
@classmethod
|
|
26
|
+
def from_dict(cls, data: dict) -> "CreativeThread":
|
|
27
|
+
"""Rehydrate from persisted dict; unknown keys are ignored so a future
|
|
28
|
+
schema bump won't break load on older on-disk state."""
|
|
29
|
+
allowed = {f for f in cls.__dataclass_fields__}
|
|
30
|
+
return cls(**{k: v for k, v in data.items() if k in allowed})
|
|
31
|
+
|
|
25
32
|
@property
|
|
26
33
|
def is_stale(self) -> bool:
|
|
27
34
|
"""A thread is stale if untouched for >30 minutes."""
|
|
@@ -44,6 +51,12 @@ class TurnResolution:
|
|
|
44
51
|
def to_dict(self) -> dict:
|
|
45
52
|
return asdict(self)
|
|
46
53
|
|
|
54
|
+
@classmethod
|
|
55
|
+
def from_dict(cls, data: dict) -> "TurnResolution":
|
|
56
|
+
"""Rehydrate from persisted dict; unknown keys are ignored."""
|
|
57
|
+
allowed = {f for f in cls.__dataclass_fields__}
|
|
58
|
+
return cls(**{k: v for k, v in data.items() if k in allowed})
|
|
59
|
+
|
|
47
60
|
|
|
48
61
|
@dataclass
|
|
49
62
|
class SessionStory:
|
|
@@ -65,6 +65,7 @@ def record_turn_resolution(
|
|
|
65
65
|
identity_effect: "preserves", "evolves", "contrasts", or "resets"
|
|
66
66
|
user_sentiment: "loved", "liked", "neutral", "disliked", or "hated"
|
|
67
67
|
"""
|
|
68
|
+
tracker.ensure_project_store_bound(ctx)
|
|
68
69
|
turn = tracker.record_turn_resolution(
|
|
69
70
|
request_text=request_text,
|
|
70
71
|
outcome=outcome,
|
|
@@ -130,6 +131,7 @@ def open_creative_thread(
|
|
|
130
131
|
if not description.strip():
|
|
131
132
|
return {"error": "description cannot be empty"}
|
|
132
133
|
|
|
134
|
+
tracker.ensure_project_store_bound(ctx)
|
|
133
135
|
thread = tracker.open_thread(description, domain=domain, priority=priority)
|
|
134
136
|
return thread.to_dict()
|
|
135
137
|
|
|
@@ -44,6 +44,99 @@ def reset_story() -> None:
|
|
|
44
44
|
_project_store = None
|
|
45
45
|
|
|
46
46
|
|
|
47
|
+
def bind_project_store_from_session(session_info: dict) -> Optional[str]:
|
|
48
|
+
"""Bind a per-project persistent store and hydrate in-memory state.
|
|
49
|
+
|
|
50
|
+
Computes a project fingerprint from ``session_info`` (tempo, time sig,
|
|
51
|
+
song length, track/scene/return layout), opens the matching
|
|
52
|
+
``ProjectStore`` under ``~/.livepilot/projects/<hash>/``, and rehydrates
|
|
53
|
+
the in-memory ``_threads`` and ``_turns`` from disk so that restarting
|
|
54
|
+
the MCP server preserves the user's creative threads and turn history.
|
|
55
|
+
|
|
56
|
+
Returns the project_id (12-char hash) on success, ``None`` on failure
|
|
57
|
+
(so callers can log without aborting startup). If the hash hasn't
|
|
58
|
+
changed since the last bind, this is a no-op — hot path is safe to
|
|
59
|
+
call on every turn.
|
|
60
|
+
|
|
61
|
+
Without this function, ``set_project_store()`` existed but nobody
|
|
62
|
+
called it, meaning README's "return to a project with prior creative
|
|
63
|
+
threads intact" was literally false — threads/turns were in-memory
|
|
64
|
+
only and reset on every server restart.
|
|
65
|
+
"""
|
|
66
|
+
global _threads, _turns, _project_store
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
from ..persistence.project_store import ProjectStore, project_hash
|
|
70
|
+
except Exception as exc:
|
|
71
|
+
logger.debug("bind_project_store_from_session: import failed: %s", exc)
|
|
72
|
+
return None
|
|
73
|
+
|
|
74
|
+
try:
|
|
75
|
+
new_id = project_hash(session_info or {})
|
|
76
|
+
except Exception as exc:
|
|
77
|
+
logger.debug("bind_project_store_from_session: hash failed: %s", exc)
|
|
78
|
+
return None
|
|
79
|
+
|
|
80
|
+
# Already bound to this project? Nothing to do.
|
|
81
|
+
if _project_store is not None and getattr(_project_store, "project_id", None) == new_id:
|
|
82
|
+
return new_id
|
|
83
|
+
|
|
84
|
+
try:
|
|
85
|
+
store = ProjectStore(new_id)
|
|
86
|
+
except Exception as exc:
|
|
87
|
+
logger.debug("bind_project_store_from_session: store open failed: %s", exc)
|
|
88
|
+
return None
|
|
89
|
+
|
|
90
|
+
# Hydrate in-memory threads + turns from the persisted store. We only
|
|
91
|
+
# rebuild what the tracker keeps live — SessionStory is recomputed on
|
|
92
|
+
# each get_session_story() call, so it doesn't need a direct restore.
|
|
93
|
+
try:
|
|
94
|
+
raw_threads = store.get_threads()
|
|
95
|
+
raw_turns = store.get_turns()
|
|
96
|
+
except Exception as exc:
|
|
97
|
+
logger.debug("bind_project_store_from_session: read failed: %s", exc)
|
|
98
|
+
raw_threads, raw_turns = [], []
|
|
99
|
+
|
|
100
|
+
_threads = {
|
|
101
|
+
t["thread_id"]: CreativeThread.from_dict(t)
|
|
102
|
+
for t in raw_threads
|
|
103
|
+
if isinstance(t, dict) and "thread_id" in t
|
|
104
|
+
}
|
|
105
|
+
_turns = [
|
|
106
|
+
TurnResolution.from_dict(t)
|
|
107
|
+
for t in raw_turns
|
|
108
|
+
if isinstance(t, dict)
|
|
109
|
+
]
|
|
110
|
+
_project_store = store
|
|
111
|
+
logger.info(
|
|
112
|
+
"session_continuity: bound project %s (%d threads, %d turns restored)",
|
|
113
|
+
new_id, len(_threads), len(_turns),
|
|
114
|
+
)
|
|
115
|
+
return new_id
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def ensure_project_store_bound(ctx) -> Optional[str]:
|
|
119
|
+
"""Lazy bind on first use — for tools called before lifespan could reach Ableton.
|
|
120
|
+
|
|
121
|
+
``ctx`` is a FastMCP Context; reads the ``ableton`` connection from
|
|
122
|
+
``ctx.lifespan_context`` and fetches session info to compute the project
|
|
123
|
+
hash. Safe to call on every turn — if already bound to this project, it's
|
|
124
|
+
a no-op. Returns the project_id or ``None`` on failure.
|
|
125
|
+
"""
|
|
126
|
+
if _project_store is not None:
|
|
127
|
+
return getattr(_project_store, "project_id", None)
|
|
128
|
+
try:
|
|
129
|
+
ableton = ctx.lifespan_context.get("ableton")
|
|
130
|
+
if ableton is None:
|
|
131
|
+
return None
|
|
132
|
+
info = ableton.send_command("get_session_info")
|
|
133
|
+
if isinstance(info, dict) and not info.get("error"):
|
|
134
|
+
return bind_project_store_from_session(info)
|
|
135
|
+
except Exception as exc:
|
|
136
|
+
logger.debug("ensure_project_store_bound: %s", exc)
|
|
137
|
+
return None
|
|
138
|
+
|
|
139
|
+
|
|
47
140
|
# ── Session story ─────────────────────────────────────────────────
|
|
48
141
|
|
|
49
142
|
|