livepilot 1.9.21 → 1.9.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +3 -3
- package/.mcpbignore +40 -0
- package/AGENTS.md +2 -2
- package/CHANGELOG.md +47 -0
- package/CONTRIBUTING.md +1 -1
- package/README.md +47 -72
- package/bin/livepilot.js +135 -0
- package/livepilot/.Codex-plugin/plugin.json +2 -2
- package/livepilot/.claude-plugin/plugin.json +2 -2
- package/livepilot/agents/livepilot-producer/AGENT.md +13 -0
- package/livepilot/commands/arrange.md +42 -14
- package/livepilot/commands/beat.md +68 -21
- package/livepilot/commands/evaluate.md +23 -13
- package/livepilot/commands/mix.md +35 -11
- package/livepilot/commands/perform.md +31 -19
- package/livepilot/commands/sounddesign.md +38 -17
- package/livepilot/skills/livepilot-arrangement/SKILL.md +2 -1
- package/livepilot/skills/livepilot-composition-engine/references/transition-archetypes.md +2 -2
- package/livepilot/skills/livepilot-core/SKILL.md +60 -4
- package/livepilot/skills/livepilot-core/references/device-atlas/distortion-and-character.md +11 -11
- package/livepilot/skills/livepilot-core/references/device-atlas/drums-and-percussion.md +25 -25
- package/livepilot/skills/livepilot-core/references/device-atlas/dynamics-and-punch.md +21 -21
- package/livepilot/skills/livepilot-core/references/device-atlas/eq-and-filtering.md +13 -13
- package/livepilot/skills/livepilot-core/references/device-atlas/midi-tools.md +13 -13
- package/livepilot/skills/livepilot-core/references/device-atlas/movement-and-modulation.md +5 -5
- package/livepilot/skills/livepilot-core/references/device-atlas/space-and-depth.md +16 -16
- package/livepilot/skills/livepilot-core/references/device-atlas/spectral-and-weird.md +40 -40
- package/livepilot/skills/livepilot-core/references/m4l-devices.md +3 -3
- package/livepilot/skills/livepilot-core/references/overview.md +4 -4
- package/livepilot/skills/livepilot-evaluation/SKILL.md +12 -8
- package/livepilot/skills/livepilot-evaluation/references/memory-promotion.md +2 -2
- package/livepilot/skills/livepilot-mix-engine/SKILL.md +1 -1
- package/livepilot/skills/livepilot-mix-engine/references/mix-moves.md +2 -2
- package/livepilot/skills/livepilot-mixing/SKILL.md +3 -1
- package/livepilot/skills/livepilot-notes/SKILL.md +2 -1
- package/livepilot/skills/livepilot-release/SKILL.md +15 -15
- package/livepilot/skills/livepilot-sound-design-engine/SKILL.md +2 -2
- package/livepilot/skills/livepilot-wonder/SKILL.md +62 -0
- package/livepilot.mcpb +0 -0
- package/m4l_device/livepilot_bridge.js +1 -1
- package/manifest.json +91 -0
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/creative_constraints/__init__.py +6 -0
- package/mcp_server/creative_constraints/engine.py +277 -0
- package/mcp_server/creative_constraints/models.py +75 -0
- package/mcp_server/creative_constraints/tools.py +341 -0
- package/mcp_server/experiment/__init__.py +6 -0
- package/mcp_server/experiment/engine.py +213 -0
- package/mcp_server/experiment/models.py +120 -0
- package/mcp_server/experiment/tools.py +263 -0
- package/mcp_server/hook_hunter/__init__.py +5 -0
- package/mcp_server/hook_hunter/analyzer.py +342 -0
- package/mcp_server/hook_hunter/models.py +57 -0
- package/mcp_server/hook_hunter/tools.py +586 -0
- package/mcp_server/memory/taste_graph.py +261 -0
- package/mcp_server/memory/tools.py +88 -0
- package/mcp_server/mix_engine/critics.py +2 -2
- package/mcp_server/mix_engine/models.py +1 -1
- package/mcp_server/mix_engine/state_builder.py +2 -2
- package/mcp_server/musical_intelligence/__init__.py +8 -0
- package/mcp_server/musical_intelligence/detectors.py +421 -0
- package/mcp_server/musical_intelligence/phrase_critic.py +163 -0
- package/mcp_server/musical_intelligence/tools.py +221 -0
- package/mcp_server/preview_studio/__init__.py +5 -0
- package/mcp_server/preview_studio/engine.py +280 -0
- package/mcp_server/preview_studio/models.py +73 -0
- package/mcp_server/preview_studio/tools.py +423 -0
- package/mcp_server/runtime/session_kernel.py +96 -0
- package/mcp_server/runtime/tools.py +90 -1
- package/mcp_server/semantic_moves/__init__.py +13 -0
- package/mcp_server/semantic_moves/compiler.py +116 -0
- package/mcp_server/semantic_moves/mix_compilers.py +291 -0
- package/mcp_server/semantic_moves/mix_moves.py +157 -0
- package/mcp_server/semantic_moves/models.py +46 -0
- package/mcp_server/semantic_moves/performance_compilers.py +208 -0
- package/mcp_server/semantic_moves/performance_moves.py +81 -0
- package/mcp_server/semantic_moves/registry.py +32 -0
- package/mcp_server/semantic_moves/resolvers.py +126 -0
- package/mcp_server/semantic_moves/sound_design_compilers.py +266 -0
- package/mcp_server/semantic_moves/sound_design_moves.py +78 -0
- package/mcp_server/semantic_moves/tools.py +204 -0
- package/mcp_server/semantic_moves/transition_compilers.py +222 -0
- package/mcp_server/semantic_moves/transition_moves.py +76 -0
- package/mcp_server/server.py +10 -0
- package/mcp_server/session_continuity/__init__.py +6 -0
- package/mcp_server/session_continuity/models.py +86 -0
- package/mcp_server/session_continuity/tools.py +230 -0
- package/mcp_server/session_continuity/tracker.py +235 -0
- package/mcp_server/song_brain/__init__.py +6 -0
- package/mcp_server/song_brain/builder.py +477 -0
- package/mcp_server/song_brain/models.py +132 -0
- package/mcp_server/song_brain/tools.py +294 -0
- package/mcp_server/stuckness_detector/__init__.py +5 -0
- package/mcp_server/stuckness_detector/detector.py +400 -0
- package/mcp_server/stuckness_detector/models.py +66 -0
- package/mcp_server/stuckness_detector/tools.py +195 -0
- package/mcp_server/tools/_conductor.py +104 -6
- package/mcp_server/tools/analyzer.py +1 -1
- package/mcp_server/tools/devices.py +34 -0
- package/mcp_server/wonder_mode/__init__.py +6 -0
- package/mcp_server/wonder_mode/diagnosis.py +84 -0
- package/mcp_server/wonder_mode/engine.py +493 -0
- package/mcp_server/wonder_mode/session.py +114 -0
- package/mcp_server/wonder_mode/tools.py +285 -0
- package/package.json +2 -2
- package/remote_script/LivePilot/__init__.py +1 -1
- package/remote_script/LivePilot/browser.py +4 -1
- package/remote_script/LivePilot/devices.py +29 -0
- package/remote_script/LivePilot/tracks.py +11 -4
- package/scripts/generate_tool_catalog.py +131 -0
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
"""Stuckness Detector data models — pure dataclasses, zero I/O."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import asdict, dataclass, field
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
RESCUE_TYPES = [
|
|
9
|
+
"contrast_needed",
|
|
10
|
+
"section_missing",
|
|
11
|
+
"hook_underdeveloped",
|
|
12
|
+
"transition_not_earned",
|
|
13
|
+
"overpolished_loop",
|
|
14
|
+
"identity_unclear",
|
|
15
|
+
"too_dense_to_progress",
|
|
16
|
+
"too_safe_to_progress",
|
|
17
|
+
]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class StucknessSignal:
|
|
22
|
+
"""A single signal contributing to stuckness detection."""
|
|
23
|
+
|
|
24
|
+
signal_type: str = "" # "repeated_undo", "local_tweaking", "long_loop", etc.
|
|
25
|
+
strength: float = 0.0 # 0-1
|
|
26
|
+
evidence: str = ""
|
|
27
|
+
|
|
28
|
+
def to_dict(self) -> dict:
|
|
29
|
+
return asdict(self)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class StucknessReport:
|
|
34
|
+
"""Full stuckness analysis for a session."""
|
|
35
|
+
|
|
36
|
+
confidence: float = 0.0 # 0-1 how stuck the session is
|
|
37
|
+
level: str = "flowing" # "flowing", "slowing", "stuck", "deeply_stuck"
|
|
38
|
+
signals: list[StucknessSignal] = field(default_factory=list)
|
|
39
|
+
diagnosis: str = ""
|
|
40
|
+
primary_rescue_type: str = ""
|
|
41
|
+
secondary_rescue_types: list[str] = field(default_factory=list)
|
|
42
|
+
|
|
43
|
+
def to_dict(self) -> dict:
|
|
44
|
+
return {
|
|
45
|
+
"confidence": round(self.confidence, 3),
|
|
46
|
+
"level": self.level,
|
|
47
|
+
"signals": [s.to_dict() for s in self.signals],
|
|
48
|
+
"diagnosis": self.diagnosis,
|
|
49
|
+
"primary_rescue_type": self.primary_rescue_type,
|
|
50
|
+
"secondary_rescue_types": self.secondary_rescue_types,
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass
|
|
55
|
+
class RescueSuggestion:
|
|
56
|
+
"""A momentum rescue suggestion."""
|
|
57
|
+
|
|
58
|
+
rescue_type: str = ""
|
|
59
|
+
title: str = ""
|
|
60
|
+
description: str = ""
|
|
61
|
+
urgency: str = "medium" # "low", "medium", "high"
|
|
62
|
+
strategies: list[str] = field(default_factory=list)
|
|
63
|
+
identity_effect: str = "preserves"
|
|
64
|
+
|
|
65
|
+
def to_dict(self) -> dict:
|
|
66
|
+
return asdict(self)
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
"""Stuckness Detector MCP tools — 3 tools for momentum rescue.
|
|
2
|
+
|
|
3
|
+
detect_stuckness — identify whether the session is losing momentum
|
|
4
|
+
suggest_momentum_rescue — get strategic rescue suggestions
|
|
5
|
+
start_rescue_workflow — structured step-by-step rescue for a stuckness type
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from fastmcp import Context
|
|
11
|
+
|
|
12
|
+
from ..server import mcp
|
|
13
|
+
from . import detector
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _get_ableton(ctx: Context):
|
|
17
|
+
return ctx.lifespan_context["ableton"]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _get_action_history(ctx: Context) -> list[dict]:
|
|
21
|
+
"""Get recent action history from the session-scoped action ledger.
|
|
22
|
+
|
|
23
|
+
Returns move entries as dicts for stuckness pattern analysis:
|
|
24
|
+
repeated undos, local-tweaking, loop-without-structure detection.
|
|
25
|
+
Falls back to empty list when no ledger data exists (graceful degradation).
|
|
26
|
+
"""
|
|
27
|
+
try:
|
|
28
|
+
from ..runtime.action_ledger import SessionLedger
|
|
29
|
+
ledger = ctx.lifespan_context.get("action_ledger")
|
|
30
|
+
if isinstance(ledger, SessionLedger):
|
|
31
|
+
recent = ledger.get_recent_moves(limit=20)
|
|
32
|
+
return [e.to_dict() for e in recent]
|
|
33
|
+
except Exception:
|
|
34
|
+
pass
|
|
35
|
+
return []
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _get_session_and_brain(ctx: Context) -> tuple[dict, dict, int]:
|
|
39
|
+
"""Fetch session info, song brain, and section count."""
|
|
40
|
+
ableton = _get_ableton(ctx)
|
|
41
|
+
session_info: dict = {}
|
|
42
|
+
song_brain: dict = {}
|
|
43
|
+
section_count = 0
|
|
44
|
+
|
|
45
|
+
try:
|
|
46
|
+
session_info = ableton.send_command("get_session_info", {})
|
|
47
|
+
section_count = session_info.get("scene_count", 0)
|
|
48
|
+
except Exception:
|
|
49
|
+
pass
|
|
50
|
+
|
|
51
|
+
try:
|
|
52
|
+
from ..song_brain.tools import _current_brain
|
|
53
|
+
if _current_brain is not None:
|
|
54
|
+
song_brain = _current_brain.to_dict()
|
|
55
|
+
except Exception as _e:
|
|
56
|
+
if __debug__:
|
|
57
|
+
import sys
|
|
58
|
+
print(f"LivePilot: SongBrain unavailable in stuckness_detector: {_e}", file=sys.stderr)
|
|
59
|
+
|
|
60
|
+
return session_info, song_brain, section_count
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@mcp.tool()
|
|
64
|
+
def detect_stuckness(ctx: Context) -> dict:
|
|
65
|
+
"""Detect whether the session is losing momentum.
|
|
66
|
+
|
|
67
|
+
Analyzes action history for stuckness signals:
|
|
68
|
+
- repeated undos
|
|
69
|
+
- many low-impact parameter changes in one area
|
|
70
|
+
- long loop time with no structural edits
|
|
71
|
+
- repeated requests without acceptance
|
|
72
|
+
- too many decorative layers without role clarity
|
|
73
|
+
- unclear song identity
|
|
74
|
+
|
|
75
|
+
Returns confidence level, diagnosis, and recommended rescue type.
|
|
76
|
+
Use this proactively when the user seems to be going in circles.
|
|
77
|
+
"""
|
|
78
|
+
history = _get_action_history(ctx)
|
|
79
|
+
session_info, song_brain, section_count = _get_session_and_brain(ctx)
|
|
80
|
+
|
|
81
|
+
report = detector.detect_stuckness(
|
|
82
|
+
action_history=history,
|
|
83
|
+
session_info=session_info,
|
|
84
|
+
song_brain=song_brain,
|
|
85
|
+
section_count=section_count,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
return report.to_dict()
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@mcp.tool()
|
|
92
|
+
def suggest_momentum_rescue(
|
|
93
|
+
ctx: Context,
|
|
94
|
+
mode: str = "gentle",
|
|
95
|
+
) -> dict:
|
|
96
|
+
"""Suggest strategic moves to restore session momentum.
|
|
97
|
+
|
|
98
|
+
First detects stuckness, then generates rescue suggestions.
|
|
99
|
+
In "gentle" mode, provides the top suggestion. In "direct" mode,
|
|
100
|
+
provides up to 3 rescue strategies.
|
|
101
|
+
|
|
102
|
+
mode: "gentle" (one suggestion) or "direct" (up to 3 suggestions)
|
|
103
|
+
|
|
104
|
+
Returns rescue suggestions with strategies and identity effects.
|
|
105
|
+
"""
|
|
106
|
+
if mode not in ("gentle", "direct"):
|
|
107
|
+
mode = "gentle"
|
|
108
|
+
|
|
109
|
+
history = _get_action_history(ctx)
|
|
110
|
+
session_info, song_brain, section_count = _get_session_and_brain(ctx)
|
|
111
|
+
|
|
112
|
+
report = detector.detect_stuckness(
|
|
113
|
+
action_history=history,
|
|
114
|
+
session_info=session_info,
|
|
115
|
+
song_brain=song_brain,
|
|
116
|
+
section_count=section_count,
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
if report.level == "flowing":
|
|
120
|
+
return {
|
|
121
|
+
"stuckness": report.to_dict(),
|
|
122
|
+
"note": "Session is flowing well — no rescue needed",
|
|
123
|
+
"suggestions": [],
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
suggestions = detector.suggest_rescue(report, mode)
|
|
127
|
+
|
|
128
|
+
return {
|
|
129
|
+
"stuckness": report.to_dict(),
|
|
130
|
+
"suggestions": [s.to_dict() for s in suggestions],
|
|
131
|
+
"suggestion_count": len(suggestions),
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
@mcp.tool()
|
|
136
|
+
def start_rescue_workflow(
|
|
137
|
+
ctx: Context,
|
|
138
|
+
rescue_type: str = "",
|
|
139
|
+
kernel_id: str = "",
|
|
140
|
+
) -> dict:
|
|
141
|
+
"""Start a structured rescue workflow for a specific stuckness type.
|
|
142
|
+
|
|
143
|
+
Provides a step-by-step action plan to restore session momentum.
|
|
144
|
+
Each rescue type has targeted strategies with identity-preserving defaults.
|
|
145
|
+
|
|
146
|
+
rescue_type: one of "contrast_needed", "section_missing",
|
|
147
|
+
"hook_underdeveloped", "transition_not_earned",
|
|
148
|
+
"overpolished_loop", "identity_unclear",
|
|
149
|
+
"too_dense_to_progress", "too_safe_to_progress"
|
|
150
|
+
kernel_id: optional session kernel reference
|
|
151
|
+
"""
|
|
152
|
+
from .models import RESCUE_TYPES
|
|
153
|
+
|
|
154
|
+
if not rescue_type:
|
|
155
|
+
return {
|
|
156
|
+
"error": "rescue_type is required",
|
|
157
|
+
"available_types": RESCUE_TYPES,
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
if rescue_type not in RESCUE_TYPES:
|
|
161
|
+
return {
|
|
162
|
+
"error": f"Unknown rescue type: {rescue_type}",
|
|
163
|
+
"available_types": RESCUE_TYPES,
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
# Build a rescue suggestion for this specific type
|
|
167
|
+
from .models import StucknessReport
|
|
168
|
+
report = StucknessReport(
|
|
169
|
+
confidence=0.6,
|
|
170
|
+
level="stuck",
|
|
171
|
+
primary_rescue_type=rescue_type,
|
|
172
|
+
secondary_rescue_types=[],
|
|
173
|
+
)
|
|
174
|
+
suggestions = detector.suggest_rescue(report, mode="direct")
|
|
175
|
+
|
|
176
|
+
if not suggestions:
|
|
177
|
+
return {"error": f"No rescue strategies available for {rescue_type}"}
|
|
178
|
+
|
|
179
|
+
rescue = suggestions[0]
|
|
180
|
+
|
|
181
|
+
# Build workflow steps from strategies
|
|
182
|
+
steps = [
|
|
183
|
+
{"step": i + 1, "action": strategy, "done": False}
|
|
184
|
+
for i, strategy in enumerate(rescue.strategies)
|
|
185
|
+
]
|
|
186
|
+
|
|
187
|
+
return {
|
|
188
|
+
"rescue_type": rescue_type,
|
|
189
|
+
"title": rescue.title,
|
|
190
|
+
"description": rescue.description,
|
|
191
|
+
"steps": steps,
|
|
192
|
+
"identity_effect": rescue.identity_effect,
|
|
193
|
+
"urgency": rescue.urgency,
|
|
194
|
+
"note": "Complete steps in order. Each step should be followed by evaluation.",
|
|
195
|
+
}
|
|
@@ -39,6 +39,12 @@ class ConductorPlan:
|
|
|
39
39
|
notes: list[str] = field(default_factory=list)
|
|
40
40
|
budget: Optional[dict] = None
|
|
41
41
|
|
|
42
|
+
# V2 additions
|
|
43
|
+
semantic_moves: list[dict] = field(default_factory=list)
|
|
44
|
+
workflow_mode: str = "guided_workflow" # quick_fix | guided_workflow | agentic_loop | creative_search | performance_safe
|
|
45
|
+
use_session_kernel: bool = True
|
|
46
|
+
experiment_recommended: bool = False
|
|
47
|
+
|
|
42
48
|
def to_dict(self) -> dict:
|
|
43
49
|
result = {
|
|
44
50
|
"request": self.request,
|
|
@@ -48,6 +54,10 @@ class ConductorPlan:
|
|
|
48
54
|
"primary_engine": self.routes[0].engine if self.routes else None,
|
|
49
55
|
"capability_requirements": self.capability_requirements,
|
|
50
56
|
"notes": self.notes,
|
|
57
|
+
"semantic_moves": self.semantic_moves,
|
|
58
|
+
"workflow_mode": self.workflow_mode,
|
|
59
|
+
"use_session_kernel": self.use_session_kernel,
|
|
60
|
+
"experiment_recommended": self.experiment_recommended,
|
|
51
61
|
}
|
|
52
62
|
if self.budget is not None:
|
|
53
63
|
result["budget"] = self.budget
|
|
@@ -103,6 +113,65 @@ _ROUTING_PATTERNS: list[tuple[str, str, str, str, list[str]]] = [
|
|
|
103
113
|
]
|
|
104
114
|
|
|
105
115
|
|
|
116
|
+
def _find_matching_semantic_moves(request_lower: str) -> list[dict]:
|
|
117
|
+
"""Search the semantic move registry for moves matching the request."""
|
|
118
|
+
try:
|
|
119
|
+
from ..semantic_moves.registry import _REGISTRY
|
|
120
|
+
except ImportError:
|
|
121
|
+
return []
|
|
122
|
+
|
|
123
|
+
matches = []
|
|
124
|
+
request_words = set(request_lower.split())
|
|
125
|
+
|
|
126
|
+
for move in _REGISTRY.values():
|
|
127
|
+
score = 0.0
|
|
128
|
+
move_words = set(move.move_id.replace("_", " ").split())
|
|
129
|
+
intent_words = set(move.intent.lower().split())
|
|
130
|
+
|
|
131
|
+
# Word overlap
|
|
132
|
+
overlap = request_words & (move_words | intent_words)
|
|
133
|
+
score += len(overlap) * 0.3
|
|
134
|
+
|
|
135
|
+
# Dimension keyword matching
|
|
136
|
+
for dim in move.targets:
|
|
137
|
+
if dim in request_lower:
|
|
138
|
+
score += 0.2
|
|
139
|
+
|
|
140
|
+
# Direct ID match
|
|
141
|
+
if move.move_id.replace("_", " ") in request_lower:
|
|
142
|
+
score += 1.0
|
|
143
|
+
|
|
144
|
+
if score > 0.1:
|
|
145
|
+
d = move.to_dict()
|
|
146
|
+
d["match_score"] = round(score, 3)
|
|
147
|
+
matches.append(d)
|
|
148
|
+
|
|
149
|
+
matches.sort(key=lambda x: -x["match_score"])
|
|
150
|
+
return matches[:3]
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def _infer_workflow_mode(request_lower: str) -> str:
|
|
154
|
+
"""Infer the appropriate workflow mode from request language."""
|
|
155
|
+
# Performance-safe keywords
|
|
156
|
+
if re.search(r"live|perform|safe|set\b|show\b|gig", request_lower):
|
|
157
|
+
return "performance_safe"
|
|
158
|
+
|
|
159
|
+
# Creative search keywords
|
|
160
|
+
if re.search(r"try|experiment|explore|surprise|option|variant|idea|branch", request_lower):
|
|
161
|
+
return "creative_search"
|
|
162
|
+
|
|
163
|
+
# Quick fix keywords
|
|
164
|
+
if re.search(r"fix|quick|just|only|undo|revert|simple", request_lower):
|
|
165
|
+
return "quick_fix"
|
|
166
|
+
|
|
167
|
+
# Agentic loop keywords (full autonomous)
|
|
168
|
+
if re.search(r"autonomous|auto|full|everything|deep|polish|finish", request_lower):
|
|
169
|
+
return "agentic_loop"
|
|
170
|
+
|
|
171
|
+
# Default
|
|
172
|
+
return "guided_workflow"
|
|
173
|
+
|
|
174
|
+
|
|
106
175
|
def classify_request(request: str) -> ConductorPlan:
|
|
107
176
|
"""Analyze a production request and route to the right engines.
|
|
108
177
|
|
|
@@ -126,7 +195,15 @@ def classify_request(request: str) -> ConductorPlan:
|
|
|
126
195
|
engine_scores[engine]["score"] += 1
|
|
127
196
|
|
|
128
197
|
if not engine_scores:
|
|
129
|
-
#
|
|
198
|
+
# No engine matched — but semantic moves might still apply
|
|
199
|
+
semantic_moves = _find_matching_semantic_moves(lower)
|
|
200
|
+
workflow_mode = _infer_workflow_mode(lower)
|
|
201
|
+
notes = ["General request — Agent OS core loop with goal vector"]
|
|
202
|
+
if semantic_moves:
|
|
203
|
+
notes.append(
|
|
204
|
+
f"Semantic moves available: {', '.join(m['move_id'] for m in semantic_moves[:3])}. "
|
|
205
|
+
"Use apply_semantic_move for intent-level execution."
|
|
206
|
+
)
|
|
130
207
|
return ConductorPlan(
|
|
131
208
|
request=request,
|
|
132
209
|
request_type="general",
|
|
@@ -134,11 +211,14 @@ def classify_request(request: str) -> ConductorPlan:
|
|
|
134
211
|
engine="agent_os",
|
|
135
212
|
priority=1,
|
|
136
213
|
reason="No specific engine matched — using core Agent OS loop",
|
|
137
|
-
entry_tool="
|
|
138
|
-
follow_up_tools=["evaluate_move"],
|
|
214
|
+
entry_tool="get_session_kernel",
|
|
215
|
+
follow_up_tools=["propose_next_best_move", "evaluate_move"],
|
|
139
216
|
)],
|
|
140
217
|
capability_requirements=["session_access"],
|
|
141
|
-
notes=
|
|
218
|
+
notes=notes,
|
|
219
|
+
semantic_moves=semantic_moves,
|
|
220
|
+
workflow_mode=workflow_mode,
|
|
221
|
+
experiment_recommended=(workflow_mode == "creative_search"),
|
|
142
222
|
)
|
|
143
223
|
|
|
144
224
|
# Sort engines by score (most matches = primary)
|
|
@@ -165,19 +245,37 @@ def classify_request(request: str) -> ConductorPlan:
|
|
|
165
245
|
if any(r.engine == "performance_engine" for r in routes):
|
|
166
246
|
caps.append("live_performance_safe")
|
|
167
247
|
|
|
168
|
-
#
|
|
248
|
+
# Notes and guidance
|
|
169
249
|
notes = []
|
|
170
250
|
if len(routes) > 1:
|
|
171
|
-
notes.append("Multi-engine task —
|
|
251
|
+
notes.append("Multi-engine task — start with get_session_kernel for shared state")
|
|
172
252
|
if any(r.engine == "mix_engine" for r in routes):
|
|
173
253
|
notes.append("Mix engine works best with analyzer data — check get_capability_state")
|
|
174
254
|
|
|
255
|
+
# V2: Search semantic moves for matching intents
|
|
256
|
+
semantic_moves = _find_matching_semantic_moves(lower)
|
|
257
|
+
|
|
258
|
+
# V2: Infer workflow mode from request language
|
|
259
|
+
workflow_mode = _infer_workflow_mode(lower)
|
|
260
|
+
|
|
261
|
+
# V2: Recommend experiments for exploratory/creative requests
|
|
262
|
+
experiment_recommended = workflow_mode == "creative_search"
|
|
263
|
+
|
|
264
|
+
if semantic_moves:
|
|
265
|
+
notes.append(
|
|
266
|
+
f"Semantic moves available: {', '.join(m['move_id'] for m in semantic_moves[:3])}. "
|
|
267
|
+
"Use apply_semantic_move for intent-level execution."
|
|
268
|
+
)
|
|
269
|
+
|
|
175
270
|
return ConductorPlan(
|
|
176
271
|
request=request,
|
|
177
272
|
request_type=primary_type,
|
|
178
273
|
routes=routes,
|
|
179
274
|
capability_requirements=caps,
|
|
180
275
|
notes=notes,
|
|
276
|
+
semantic_moves=semantic_moves,
|
|
277
|
+
workflow_mode=workflow_mode,
|
|
278
|
+
experiment_recommended=experiment_recommended,
|
|
181
279
|
)
|
|
182
280
|
|
|
183
281
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Analyzer MCP tools — real-time spectral analysis and deep LOM access.
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
30 tools requiring the LivePilot Analyzer M4L device on the master track.
|
|
4
4
|
These tools are optional — all core tools work without the device.
|
|
5
5
|
"""
|
|
6
6
|
|
|
@@ -328,6 +328,29 @@ def load_device_by_uri(ctx: Context, track_index: int, uri: str) -> dict:
|
|
|
328
328
|
return _postflight_loaded_device(ctx, result)
|
|
329
329
|
|
|
330
330
|
|
|
331
|
+
@mcp.tool()
|
|
332
|
+
def move_device(
|
|
333
|
+
ctx: Context,
|
|
334
|
+
track_index: int,
|
|
335
|
+
device_index: int,
|
|
336
|
+
target_index: int,
|
|
337
|
+
target_track_index: Optional[int] = None,
|
|
338
|
+
) -> dict:
|
|
339
|
+
"""Move a device to a new position on the same or different track.
|
|
340
|
+
track_index: 0+ for regular tracks, -1/-2/... for return tracks, -1000 for master."""
|
|
341
|
+
_validate_track_index(track_index)
|
|
342
|
+
_validate_device_index(device_index)
|
|
343
|
+
params: dict = {
|
|
344
|
+
"track_index": track_index,
|
|
345
|
+
"device_index": device_index,
|
|
346
|
+
"target_index": target_index,
|
|
347
|
+
}
|
|
348
|
+
if target_track_index is not None:
|
|
349
|
+
_validate_track_index(target_track_index)
|
|
350
|
+
params["target_track_index"] = target_track_index
|
|
351
|
+
return _get_ableton(ctx).send_command("move_device", params)
|
|
352
|
+
|
|
353
|
+
|
|
331
354
|
@mcp.tool()
|
|
332
355
|
def find_and_load_device(ctx: Context, track_index: int, device_name: str) -> dict:
|
|
333
356
|
"""Search the browser for a device by name and load it onto a track.
|
|
@@ -335,6 +358,17 @@ def find_and_load_device(ctx: Context, track_index: int, device_name: str) -> di
|
|
|
335
358
|
_validate_track_index(track_index)
|
|
336
359
|
if not device_name.strip():
|
|
337
360
|
raise ValueError("device_name cannot be empty")
|
|
361
|
+
|
|
362
|
+
# Guardrail: bare Drum Rack produces silence (no samples loaded)
|
|
363
|
+
if device_name.strip().lower() == "drum rack":
|
|
364
|
+
raise ValueError(
|
|
365
|
+
"Loading a bare 'Drum Rack' creates an empty rack that produces silence. "
|
|
366
|
+
"Instead, use search_browser(path='drums') to find a kit preset "
|
|
367
|
+
"(e.g., '808 Core Kit'), then load it with load_browser_item(). "
|
|
368
|
+
"Or use DS drum synths (DS Kick, DS Snare, DS HH, DS Tom, DS Clap, "
|
|
369
|
+
"DS Cymbal) which are self-contained."
|
|
370
|
+
)
|
|
371
|
+
|
|
338
372
|
result = _get_ableton(ctx).send_command("find_and_load_device", {
|
|
339
373
|
"track_index": track_index,
|
|
340
374
|
"device_name": device_name,
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"""Wonder Mode diagnosis builder — pure computation, zero I/O.
|
|
2
|
+
|
|
3
|
+
Builds a WonderDiagnosis from stuckness report, SongBrain, action
|
|
4
|
+
ledger, and open creative threads. Each input is optional — the
|
|
5
|
+
builder degrades gracefully.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
from typing import Optional
|
|
11
|
+
|
|
12
|
+
from .session import WonderDiagnosis
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# ── Problem class -> candidate domains mapping ────────────────────
|
|
16
|
+
|
|
17
|
+
_DOMAIN_MAP: dict[str, list[str]] = {
|
|
18
|
+
"overpolished_loop": ["arrangement", "transition"],
|
|
19
|
+
"identity_unclear": ["sound_design", "mix"],
|
|
20
|
+
"contrast_needed": ["transition", "arrangement", "sound_design"],
|
|
21
|
+
"hook_underdeveloped": ["sound_design", "mix"],
|
|
22
|
+
"too_dense_to_progress": ["mix", "arrangement"],
|
|
23
|
+
"too_safe_to_progress": ["sound_design", "transition"],
|
|
24
|
+
"section_missing": ["arrangement", "transition"],
|
|
25
|
+
"transition_not_earned": ["transition", "arrangement"],
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
_STUCKNESS_THRESHOLD = 0.2 # Below this, treat as user_request
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def build_diagnosis(
|
|
32
|
+
stuckness_report: Optional[dict] = None,
|
|
33
|
+
song_brain: Optional[dict] = None,
|
|
34
|
+
action_ledger: Optional[list[dict]] = None,
|
|
35
|
+
) -> WonderDiagnosis:
|
|
36
|
+
"""Build a WonderDiagnosis from available session state.
|
|
37
|
+
|
|
38
|
+
Note: open_threads domain prioritization is deferred — not yet implemented.
|
|
39
|
+
"""
|
|
40
|
+
degraded: list[str] = []
|
|
41
|
+
|
|
42
|
+
# 1. Determine trigger reason and problem class from stuckness
|
|
43
|
+
trigger_reason = "user_request"
|
|
44
|
+
problem_class = "exploration"
|
|
45
|
+
confidence = 0.0
|
|
46
|
+
|
|
47
|
+
# Check action ledger for repeated undos first
|
|
48
|
+
if action_ledger:
|
|
49
|
+
undo_count = sum(1 for a in action_ledger if a.get("kept") is False)
|
|
50
|
+
if undo_count >= 3:
|
|
51
|
+
trigger_reason = "repeated_undos"
|
|
52
|
+
|
|
53
|
+
if stuckness_report and stuckness_report.get("confidence", 0) >= _STUCKNESS_THRESHOLD:
|
|
54
|
+
trigger_reason = "stuckness_detected"
|
|
55
|
+
problem_class = stuckness_report.get("primary_rescue_type", "exploration") or "exploration"
|
|
56
|
+
confidence = stuckness_report.get("confidence", 0.0)
|
|
57
|
+
|
|
58
|
+
# If trigger is repeated_undos but no stuckness, keep problem_class as exploration
|
|
59
|
+
if trigger_reason == "repeated_undos" and problem_class == "exploration":
|
|
60
|
+
confidence = max(confidence, 0.3)
|
|
61
|
+
|
|
62
|
+
# 2. Read SongBrain
|
|
63
|
+
current_identity = ""
|
|
64
|
+
sacred_elements: list[dict] = []
|
|
65
|
+
|
|
66
|
+
if song_brain:
|
|
67
|
+
current_identity = song_brain.get("identity_core", "")
|
|
68
|
+
sacred_elements = song_brain.get("sacred_elements", [])
|
|
69
|
+
else:
|
|
70
|
+
degraded.append("song_brain")
|
|
71
|
+
|
|
72
|
+
# 3. Map problem_class to candidate domains
|
|
73
|
+
candidate_domains = _DOMAIN_MAP.get(problem_class, [])
|
|
74
|
+
|
|
75
|
+
return WonderDiagnosis(
|
|
76
|
+
trigger_reason=trigger_reason,
|
|
77
|
+
problem_class=problem_class,
|
|
78
|
+
current_identity=current_identity,
|
|
79
|
+
sacred_elements=sacred_elements,
|
|
80
|
+
blocked_dimensions=[],
|
|
81
|
+
candidate_domains=list(candidate_domains), # copy
|
|
82
|
+
confidence=confidence,
|
|
83
|
+
degraded_capabilities=degraded,
|
|
84
|
+
)
|