livepilot 1.9.21 → 1.9.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/.claude-plugin/marketplace.json +3 -3
  2. package/.mcpbignore +40 -0
  3. package/AGENTS.md +2 -2
  4. package/CHANGELOG.md +47 -0
  5. package/CONTRIBUTING.md +1 -1
  6. package/README.md +47 -72
  7. package/bin/livepilot.js +135 -0
  8. package/livepilot/.Codex-plugin/plugin.json +2 -2
  9. package/livepilot/.claude-plugin/plugin.json +2 -2
  10. package/livepilot/agents/livepilot-producer/AGENT.md +13 -0
  11. package/livepilot/commands/arrange.md +42 -14
  12. package/livepilot/commands/beat.md +68 -21
  13. package/livepilot/commands/evaluate.md +23 -13
  14. package/livepilot/commands/mix.md +35 -11
  15. package/livepilot/commands/perform.md +31 -19
  16. package/livepilot/commands/sounddesign.md +38 -17
  17. package/livepilot/skills/livepilot-arrangement/SKILL.md +2 -1
  18. package/livepilot/skills/livepilot-composition-engine/references/transition-archetypes.md +2 -2
  19. package/livepilot/skills/livepilot-core/SKILL.md +60 -4
  20. package/livepilot/skills/livepilot-core/references/device-atlas/distortion-and-character.md +11 -11
  21. package/livepilot/skills/livepilot-core/references/device-atlas/drums-and-percussion.md +25 -25
  22. package/livepilot/skills/livepilot-core/references/device-atlas/dynamics-and-punch.md +21 -21
  23. package/livepilot/skills/livepilot-core/references/device-atlas/eq-and-filtering.md +13 -13
  24. package/livepilot/skills/livepilot-core/references/device-atlas/midi-tools.md +13 -13
  25. package/livepilot/skills/livepilot-core/references/device-atlas/movement-and-modulation.md +5 -5
  26. package/livepilot/skills/livepilot-core/references/device-atlas/space-and-depth.md +16 -16
  27. package/livepilot/skills/livepilot-core/references/device-atlas/spectral-and-weird.md +40 -40
  28. package/livepilot/skills/livepilot-core/references/m4l-devices.md +3 -3
  29. package/livepilot/skills/livepilot-core/references/overview.md +4 -4
  30. package/livepilot/skills/livepilot-evaluation/SKILL.md +12 -8
  31. package/livepilot/skills/livepilot-evaluation/references/memory-promotion.md +2 -2
  32. package/livepilot/skills/livepilot-mix-engine/SKILL.md +1 -1
  33. package/livepilot/skills/livepilot-mix-engine/references/mix-moves.md +2 -2
  34. package/livepilot/skills/livepilot-mixing/SKILL.md +3 -1
  35. package/livepilot/skills/livepilot-notes/SKILL.md +2 -1
  36. package/livepilot/skills/livepilot-release/SKILL.md +15 -15
  37. package/livepilot/skills/livepilot-sound-design-engine/SKILL.md +2 -2
  38. package/livepilot/skills/livepilot-wonder/SKILL.md +62 -0
  39. package/livepilot.mcpb +0 -0
  40. package/m4l_device/livepilot_bridge.js +1 -1
  41. package/manifest.json +91 -0
  42. package/mcp_server/__init__.py +1 -1
  43. package/mcp_server/creative_constraints/__init__.py +6 -0
  44. package/mcp_server/creative_constraints/engine.py +277 -0
  45. package/mcp_server/creative_constraints/models.py +75 -0
  46. package/mcp_server/creative_constraints/tools.py +341 -0
  47. package/mcp_server/experiment/__init__.py +6 -0
  48. package/mcp_server/experiment/engine.py +213 -0
  49. package/mcp_server/experiment/models.py +120 -0
  50. package/mcp_server/experiment/tools.py +263 -0
  51. package/mcp_server/hook_hunter/__init__.py +5 -0
  52. package/mcp_server/hook_hunter/analyzer.py +342 -0
  53. package/mcp_server/hook_hunter/models.py +57 -0
  54. package/mcp_server/hook_hunter/tools.py +586 -0
  55. package/mcp_server/memory/taste_graph.py +261 -0
  56. package/mcp_server/memory/tools.py +88 -0
  57. package/mcp_server/mix_engine/critics.py +2 -2
  58. package/mcp_server/mix_engine/models.py +1 -1
  59. package/mcp_server/mix_engine/state_builder.py +2 -2
  60. package/mcp_server/musical_intelligence/__init__.py +8 -0
  61. package/mcp_server/musical_intelligence/detectors.py +421 -0
  62. package/mcp_server/musical_intelligence/phrase_critic.py +163 -0
  63. package/mcp_server/musical_intelligence/tools.py +221 -0
  64. package/mcp_server/preview_studio/__init__.py +5 -0
  65. package/mcp_server/preview_studio/engine.py +280 -0
  66. package/mcp_server/preview_studio/models.py +73 -0
  67. package/mcp_server/preview_studio/tools.py +423 -0
  68. package/mcp_server/runtime/session_kernel.py +96 -0
  69. package/mcp_server/runtime/tools.py +90 -1
  70. package/mcp_server/semantic_moves/__init__.py +13 -0
  71. package/mcp_server/semantic_moves/compiler.py +116 -0
  72. package/mcp_server/semantic_moves/mix_compilers.py +291 -0
  73. package/mcp_server/semantic_moves/mix_moves.py +157 -0
  74. package/mcp_server/semantic_moves/models.py +46 -0
  75. package/mcp_server/semantic_moves/performance_compilers.py +208 -0
  76. package/mcp_server/semantic_moves/performance_moves.py +81 -0
  77. package/mcp_server/semantic_moves/registry.py +32 -0
  78. package/mcp_server/semantic_moves/resolvers.py +126 -0
  79. package/mcp_server/semantic_moves/sound_design_compilers.py +266 -0
  80. package/mcp_server/semantic_moves/sound_design_moves.py +78 -0
  81. package/mcp_server/semantic_moves/tools.py +204 -0
  82. package/mcp_server/semantic_moves/transition_compilers.py +222 -0
  83. package/mcp_server/semantic_moves/transition_moves.py +76 -0
  84. package/mcp_server/server.py +10 -0
  85. package/mcp_server/session_continuity/__init__.py +6 -0
  86. package/mcp_server/session_continuity/models.py +86 -0
  87. package/mcp_server/session_continuity/tools.py +230 -0
  88. package/mcp_server/session_continuity/tracker.py +235 -0
  89. package/mcp_server/song_brain/__init__.py +6 -0
  90. package/mcp_server/song_brain/builder.py +477 -0
  91. package/mcp_server/song_brain/models.py +132 -0
  92. package/mcp_server/song_brain/tools.py +294 -0
  93. package/mcp_server/stuckness_detector/__init__.py +5 -0
  94. package/mcp_server/stuckness_detector/detector.py +400 -0
  95. package/mcp_server/stuckness_detector/models.py +66 -0
  96. package/mcp_server/stuckness_detector/tools.py +195 -0
  97. package/mcp_server/tools/_conductor.py +104 -6
  98. package/mcp_server/tools/analyzer.py +1 -1
  99. package/mcp_server/tools/devices.py +34 -0
  100. package/mcp_server/wonder_mode/__init__.py +6 -0
  101. package/mcp_server/wonder_mode/diagnosis.py +84 -0
  102. package/mcp_server/wonder_mode/engine.py +493 -0
  103. package/mcp_server/wonder_mode/session.py +114 -0
  104. package/mcp_server/wonder_mode/tools.py +285 -0
  105. package/package.json +2 -2
  106. package/remote_script/LivePilot/__init__.py +1 -1
  107. package/remote_script/LivePilot/browser.py +4 -1
  108. package/remote_script/LivePilot/devices.py +29 -0
  109. package/remote_script/LivePilot/tracks.py +11 -4
  110. package/scripts/generate_tool_catalog.py +131 -0
@@ -0,0 +1,294 @@
1
+ """SongBrain MCP tools — 3 tools for song identity modeling.
2
+
3
+ build_song_brain — construct the musical identity of the current piece
4
+ explain_song_identity — human-readable summary of what the song is about
5
+ detect_identity_drift — compare before/after to detect identity damage
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from fastmcp import Context
11
+
12
+ from ..server import mcp
13
+ from . import builder
14
+ from .models import SongBrain
15
+
16
+
17
+ # Module-level fallback for consumers without ctx.
18
+ # Prefer ctx.lifespan_context["current_brain"] when ctx is available.
19
+ _current_brain: SongBrain | None = None
20
+
21
+ # Snapshot store: brain_id -> SongBrain, max 10 snapshots
22
+ _brain_snapshots: dict[str, SongBrain] = {}
23
+ _MAX_SNAPSHOTS = 10
24
+
25
+
26
+ def _set_brain(ctx: Context, brain: SongBrain) -> None:
27
+ """Store brain in lifespan_context, module fallback, and snapshot store."""
28
+ global _current_brain
29
+ _current_brain = brain
30
+ ctx.lifespan_context["current_brain"] = brain
31
+ # Save snapshot for later drift comparison
32
+ _brain_snapshots[brain.brain_id] = brain
33
+ # Evict oldest if over limit
34
+ while len(_brain_snapshots) > _MAX_SNAPSHOTS:
35
+ oldest_key = next(iter(_brain_snapshots))
36
+ del _brain_snapshots[oldest_key]
37
+
38
+
39
+ def _get_snapshot(brain_id: str) -> SongBrain | None:
40
+ """Retrieve a past brain snapshot by ID."""
41
+ return _brain_snapshots.get(brain_id)
42
+
43
+
44
+ def _get_ableton(ctx: Context):
45
+ return ctx.lifespan_context["ableton"]
46
+
47
+
48
+ def _fetch_session_data(ctx: Context) -> dict:
49
+ """Fetch all available session data for brain building.
50
+
51
+ Populates real data from Ableton and pure-computation modules:
52
+ - motif_data: from get_motif_graph (motif engine)
53
+ - composition_analysis: from musical intelligence section inference
54
+ - role_graph: from semantic move resolvers (track role inference)
55
+ - recent_moves: from session-scoped action ledger
56
+ """
57
+ ableton = _get_ableton(ctx)
58
+ data: dict = {
59
+ "session_info": {},
60
+ "scenes": [],
61
+ "tracks": [],
62
+ "motif_data": {},
63
+ "composition_analysis": {},
64
+ "role_graph": {},
65
+ "recent_moves": [],
66
+ }
67
+
68
+ try:
69
+ data["session_info"] = ableton.send_command("get_session_info", {})
70
+ except Exception:
71
+ data["session_info"] = {"tempo": 120.0, "track_count": 0}
72
+
73
+ try:
74
+ matrix = ableton.send_command("get_scene_matrix")
75
+ data["scenes"] = [
76
+ {"name": s.get("name", f"Scene {i}"), "clips": row}
77
+ for i, (s, row) in enumerate(
78
+ zip(matrix.get("scenes", []), matrix.get("matrix", []))
79
+ )
80
+ ]
81
+ except Exception:
82
+ pass
83
+
84
+ try:
85
+ info = data["session_info"]
86
+ tracks_list = info.get("tracks", [])
87
+ data["tracks"] = tracks_list if isinstance(tracks_list, list) else []
88
+ except Exception:
89
+ pass
90
+
91
+ # Motif data — from the motif engine if notes exist
92
+ try:
93
+ data["motif_data"] = ableton.send_command("get_motif_graph")
94
+ except Exception:
95
+ pass # Motif graph requires notes in clips; empty is valid
96
+
97
+ # Composition analysis — from musical intelligence detectors (pure computation)
98
+ try:
99
+ from ..musical_intelligence import detectors
100
+ total_tracks = data["session_info"].get("track_count", 6)
101
+ purposes = detectors.infer_section_purposes(data["scenes"], total_tracks)
102
+ arc = detectors.score_emotional_arc(purposes)
103
+ data["composition_analysis"] = {
104
+ "sections": [p.to_dict() for p in purposes],
105
+ "emotional_arc": arc.to_dict(),
106
+ }
107
+ except Exception:
108
+ pass
109
+
110
+ # Role graph — from semantic move resolvers (pure computation, no I/O)
111
+ try:
112
+ from ..semantic_moves.resolvers import infer_role
113
+ roles = {}
114
+ for track in data["tracks"]:
115
+ name = track.get("name", "")
116
+ role = infer_role(name)
117
+ roles[name] = {"index": track.get("index", 0), "role": role}
118
+ data["role_graph"] = roles
119
+ except Exception:
120
+ pass
121
+
122
+ # Recent moves — from session-scoped action ledger
123
+ try:
124
+ from ..runtime.action_ledger import SessionLedger
125
+ ledger = ctx.lifespan_context.get("action_ledger")
126
+ if isinstance(ledger, SessionLedger):
127
+ recent = ledger.get_recent_moves(limit=10)
128
+ data["recent_moves"] = [e.to_dict() for e in recent]
129
+ except Exception:
130
+ pass
131
+
132
+ return data
133
+
134
+
135
+ @mcp.tool()
136
+ def build_song_brain(ctx: Context) -> dict:
137
+ """Build the musical identity model for the current song.
138
+
139
+ Analyzes the session to identify:
140
+ - identity_core: the strongest defining idea
141
+ - sacred_elements: motifs/textures/grooves that must be preserved
142
+ - section_purposes: what each section is trying to do emotionally
143
+ - energy_arc: rise/fall shape across sections
144
+ - open_questions: what the song has not resolved yet
145
+
146
+ Call this at the start of complex creative workflows.
147
+ Returns the full SongBrain as a dict.
148
+ """
149
+ data = _fetch_session_data(ctx)
150
+ brain = builder.build_song_brain(
151
+ session_info=data["session_info"],
152
+ scenes=data["scenes"],
153
+ tracks=data["tracks"],
154
+ motif_data=data["motif_data"],
155
+ composition_analysis=data["composition_analysis"],
156
+ role_graph=data["role_graph"],
157
+ recent_moves=data["recent_moves"],
158
+ )
159
+ _set_brain(ctx, brain)
160
+
161
+ return {
162
+ **brain.to_dict(),
163
+ "summary": brain.summary,
164
+ }
165
+
166
+
167
+ @mcp.tool()
168
+ def explain_song_identity(ctx: Context) -> dict:
169
+ """Explain the current song's identity in human musical language.
170
+
171
+ If no SongBrain exists yet, builds one first. Returns a structured
172
+ explanation suitable for the agent to talk about the song naturally.
173
+ """
174
+ if _current_brain is None:
175
+ data = _fetch_session_data(ctx)
176
+ brain = builder.build_song_brain(
177
+ session_info=data["session_info"],
178
+ scenes=data["scenes"],
179
+ tracks=data["tracks"],
180
+ motif_data=data["motif_data"],
181
+ composition_analysis=data["composition_analysis"],
182
+ role_graph=data["role_graph"],
183
+ recent_moves=data["recent_moves"],
184
+ )
185
+ _set_brain(ctx, brain)
186
+
187
+ brain = _current_brain
188
+ explanation: dict = {
189
+ "identity": brain.identity_core,
190
+ "confidence": brain.identity_confidence,
191
+ }
192
+
193
+ # Sacred elements in natural language
194
+ if brain.sacred_elements:
195
+ explanation["protect"] = [
196
+ f"{e.element_type}: {e.description}" for e in brain.sacred_elements
197
+ ]
198
+ else:
199
+ explanation["protect"] = ["No clearly sacred elements detected yet"]
200
+
201
+ # What each section does
202
+ if brain.section_purposes:
203
+ explanation["sections"] = [
204
+ f"{s.label} — {s.emotional_intent} (energy {s.energy_level:.0%})"
205
+ for s in brain.section_purposes
206
+ ]
207
+
208
+ # Energy shape
209
+ if brain.energy_arc:
210
+ arc = brain.energy_arc
211
+ if len(arc) >= 3:
212
+ peak_idx = arc.index(max(arc))
213
+ peak_pct = peak_idx / max(len(arc) - 1, 1)
214
+ if peak_pct < 0.3:
215
+ explanation["energy_shape"] = "front-loaded — peaks early"
216
+ elif peak_pct > 0.7:
217
+ explanation["energy_shape"] = "slow burn — builds to late peak"
218
+ else:
219
+ explanation["energy_shape"] = "centered arc — peaks in the middle"
220
+ else:
221
+ explanation["energy_shape"] = "short form — limited arc data"
222
+
223
+ # Open questions
224
+ if brain.open_questions:
225
+ explanation["open_questions"] = [q.question for q in brain.open_questions]
226
+
227
+ # Drift warning
228
+ if brain.identity_drift_risk > 0.3:
229
+ explanation["warning"] = (
230
+ f"Identity drift risk is {brain.identity_drift_risk:.0%} — "
231
+ "recent edits may be moving the song away from itself"
232
+ )
233
+
234
+ explanation["summary"] = brain.summary
235
+ return explanation
236
+
237
+
238
+ @mcp.tool()
239
+ def detect_identity_drift(
240
+ ctx: Context,
241
+ before_brain_id: str = "",
242
+ ) -> dict:
243
+ """Detect whether recent changes have damaged the song's identity.
244
+
245
+ Compares the current state against a previous SongBrain snapshot.
246
+ If before_brain_id is provided, looks up that specific snapshot.
247
+ If empty, uses the last cached brain.
248
+ If no previous brain exists, builds baseline and reports no drift.
249
+
250
+ before_brain_id: optional brain_id from a previous build_song_brain call.
251
+
252
+ Returns drift score, changed elements, sacred damage, and recommendation.
253
+ """
254
+ # Look up the "before" brain — by ID if provided, else use last cached
255
+ if before_brain_id:
256
+ before = _get_snapshot(before_brain_id)
257
+ if before is None:
258
+ available = list(_brain_snapshots.keys())
259
+ return {
260
+ "error": f"No snapshot found for brain_id '{before_brain_id}'",
261
+ "available_snapshots": available,
262
+ }
263
+ else:
264
+ before = _current_brain
265
+
266
+ # Build fresh brain from current state
267
+ data = _fetch_session_data(ctx)
268
+ after = builder.build_song_brain(
269
+ session_info=data["session_info"],
270
+ scenes=data["scenes"],
271
+ tracks=data["tracks"],
272
+ motif_data=data["motif_data"],
273
+ composition_analysis=data["composition_analysis"],
274
+ role_graph=data["role_graph"],
275
+ recent_moves=data["recent_moves"],
276
+ )
277
+
278
+ if before is None:
279
+ _set_brain(ctx, after)
280
+ return {
281
+ "drift_score": 0.0,
282
+ "note": "No previous brain to compare — this is the baseline",
283
+ "brain_id": after.brain_id,
284
+ "recommendation": "safe",
285
+ }
286
+
287
+ drift = builder.detect_identity_drift(before, after)
288
+ _set_brain(ctx, after)
289
+
290
+ return {
291
+ **drift.to_dict(),
292
+ "before_brain_id": before.brain_id,
293
+ "after_brain_id": after.brain_id,
294
+ }
@@ -0,0 +1,5 @@
1
+ """Stuckness Detector — momentum rescue for Stage 2.
2
+
3
+ Identifies when the session is losing momentum and shifts the agent
4
+ from micro-fixing into directional help.
5
+ """
@@ -0,0 +1,400 @@
1
+ """Stuckness detection engine — pure computation, zero I/O.
2
+
3
+ Analyzes action history, session state, and patterns to detect
4
+ when the user is stuck and suggest rescue strategies.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from collections import Counter
10
+ from typing import Optional
11
+
12
+ from .models import RescueSuggestion, StucknessReport, StucknessSignal
13
+
14
+
15
+ # ── Main detection ────────────────────────────────────────────────
16
+
17
+
18
+ def detect_stuckness(
19
+ action_history: list[dict],
20
+ session_info: Optional[dict] = None,
21
+ song_brain: Optional[dict] = None,
22
+ section_count: int = 0,
23
+ ) -> StucknessReport:
24
+ """Detect whether the session is stuck.
25
+
26
+ Analyzes action history for repeated undos, local tweaking,
27
+ long loops without structural edits, and other stuckness signals.
28
+ """
29
+ session_info = session_info or {}
30
+ song_brain = song_brain or {}
31
+ signals: list[StucknessSignal] = []
32
+
33
+ # 1. Repeated undos
34
+ undo_signal = _check_repeated_undos(action_history)
35
+ if undo_signal:
36
+ signals.append(undo_signal)
37
+
38
+ # 2. Local tweaking (many small changes in one area)
39
+ tweak_signal = _check_local_tweaking(action_history)
40
+ if tweak_signal:
41
+ signals.append(tweak_signal)
42
+
43
+ # 3. Long loop time without structural edits
44
+ loop_signal = _check_loop_without_structure(action_history, section_count)
45
+ if loop_signal:
46
+ signals.append(loop_signal)
47
+
48
+ # 4. Repeated asks without acceptance
49
+ repeat_signal = _check_repeated_requests(action_history)
50
+ if repeat_signal:
51
+ signals.append(repeat_signal)
52
+
53
+ # 5. Too many decorative layers
54
+ density_signal = _check_decoration_overload(session_info)
55
+ if density_signal:
56
+ signals.append(density_signal)
57
+
58
+ # 6. Identity unclear
59
+ identity_signal = _check_identity_unclear(song_brain)
60
+ if identity_signal:
61
+ signals.append(identity_signal)
62
+
63
+ # Compute overall confidence
64
+ if not signals:
65
+ return StucknessReport(confidence=0.0, level="flowing")
66
+
67
+ # Compound: strongest signal + 0.15 per additional signal (don't average)
68
+ strengths = sorted((s.strength for s in signals), reverse=True)
69
+ confidence = strengths[0]
70
+ for extra in strengths[1:]:
71
+ confidence += extra * 0.15
72
+ confidence = min(1.0, round(confidence, 3))
73
+
74
+ # Determine level
75
+ if confidence > 0.7:
76
+ level = "deeply_stuck"
77
+ elif confidence > 0.45:
78
+ level = "stuck"
79
+ elif confidence > 0.2:
80
+ level = "slowing"
81
+ else:
82
+ level = "flowing"
83
+
84
+ # Determine rescue types
85
+ primary, secondary = _classify_rescue_type(signals, song_brain, session_info)
86
+ diagnosis = _build_diagnosis(signals, level)
87
+
88
+ return StucknessReport(
89
+ confidence=confidence,
90
+ level=level,
91
+ signals=signals,
92
+ diagnosis=diagnosis,
93
+ primary_rescue_type=primary,
94
+ secondary_rescue_types=secondary,
95
+ )
96
+
97
+
98
+ # ── Signal checkers ───────────────────────────────────────────────
99
+
100
+
101
+ def _check_repeated_undos(history: list[dict]) -> Optional[StucknessSignal]:
102
+ """Check for repeated undone moves (kept=False in ledger entries)."""
103
+ recent = history[-20:] if len(history) > 20 else history
104
+ undo_count = sum(1 for a in recent if a.get("kept") is False)
105
+
106
+ if undo_count >= 4:
107
+ return StucknessSignal(
108
+ signal_type="repeated_undo",
109
+ strength=min(0.8, undo_count * 0.15),
110
+ evidence=f"{undo_count} undone moves in last {len(recent)} entries",
111
+ )
112
+ return None
113
+
114
+
115
+ def _check_local_tweaking(history: list[dict]) -> Optional[StucknessSignal]:
116
+ """Check for many small parameter changes in one local area."""
117
+ recent = history[-15:] if len(history) > 15 else history
118
+ param_tools = {"set_device_parameter", "set_track_volume", "set_track_pan",
119
+ "set_send_level", "set_clip_loop", "batch_set_parameters"}
120
+ param_entries = []
121
+ for entry in recent:
122
+ tools_used = [a.get("tool", "") for a in entry.get("actions", [])]
123
+ if any(t in param_tools for t in tools_used):
124
+ param_entries.append(entry)
125
+
126
+ if len(param_entries) >= 6:
127
+ scopes = Counter(
128
+ entry.get("scope", {}).get("track", entry.get("intent", ""))
129
+ for entry in param_entries
130
+ )
131
+ most_common = scopes.most_common(1)
132
+ if most_common and most_common[0][1] >= 4:
133
+ return StucknessSignal(
134
+ signal_type="local_tweaking",
135
+ strength=min(0.7, len(param_entries) * 0.1),
136
+ evidence=f"{len(param_entries)} parameter tweaks, mostly on {most_common[0][0]}",
137
+ )
138
+ return None
139
+
140
+
141
+ def _check_loop_without_structure(
142
+ history: list[dict], section_count: int
143
+ ) -> Optional[StucknessSignal]:
144
+ """Check for long work without structural changes."""
145
+ recent = history[-30:] if len(history) > 30 else history
146
+ structural_tools = {"create_clip", "delete_clip", "create_midi_track",
147
+ "create_audio_track", "delete_track", "duplicate_clip"}
148
+ structural = 0
149
+ for entry in recent:
150
+ tools_used = {a.get("tool", "") for a in entry.get("actions", [])}
151
+ if tools_used & structural_tools:
152
+ structural += 1
153
+
154
+ if len(recent) >= 15 and structural == 0:
155
+ return StucknessSignal(
156
+ signal_type="long_loop_no_structure",
157
+ strength=0.5,
158
+ evidence=f"{len(recent)} moves without any structural changes",
159
+ )
160
+
161
+ if section_count <= 1 and len(recent) > 20:
162
+ return StucknessSignal(
163
+ signal_type="single_loop",
164
+ strength=0.4,
165
+ evidence="Working in a single loop/scene for extended period",
166
+ )
167
+
168
+ return None
169
+
170
+
171
+ def _check_repeated_requests(history: list[dict]) -> Optional[StucknessSignal]:
172
+ """Check for repeated similar intents without acceptance."""
173
+ recent = history[-10:] if len(history) > 10 else history
174
+ intents = [a.get("intent", "").lower() for a in recent if a.get("intent")]
175
+
176
+ if len(intents) >= 3:
177
+ words = Counter()
178
+ for intent in intents:
179
+ for word in intent.split():
180
+ if len(word) > 3:
181
+ words[word] += 1
182
+
183
+ repeated = {w: c for w, c in words.items() if c >= 3}
184
+ if repeated:
185
+ return StucknessSignal(
186
+ signal_type="repeated_requests",
187
+ strength=0.5,
188
+ evidence=f"Repeated intent keywords: {', '.join(repeated.keys())}",
189
+ )
190
+ return None
191
+
192
+
193
+ def _check_decoration_overload(session_info: dict) -> Optional[StucknessSignal]:
194
+ """Check for too many decorative layers without role clarity."""
195
+ track_count = session_info.get("track_count", 0)
196
+ if track_count > 16:
197
+ return StucknessSignal(
198
+ signal_type="high_density",
199
+ strength=min(0.6, (track_count - 16) * 0.05),
200
+ evidence=f"{track_count} tracks — may be too dense to progress",
201
+ )
202
+ return None
203
+
204
+
205
+ def _check_identity_unclear(song_brain: dict) -> Optional[StucknessSignal]:
206
+ """Check if song identity is unclear."""
207
+ confidence = song_brain.get("identity_confidence", 0.5)
208
+ if confidence < 0.3:
209
+ return StucknessSignal(
210
+ signal_type="identity_unclear",
211
+ strength=0.5,
212
+ evidence="Song identity is not clearly established",
213
+ )
214
+ return None
215
+
216
+
217
+ # ── Rescue classification ─────────────────────────────────────────
218
+
219
+
220
+ def _classify_rescue_type(
221
+ signals: list[StucknessSignal],
222
+ song_brain: dict,
223
+ session_info: dict,
224
+ ) -> tuple[str, list[str]]:
225
+ """Determine the best rescue type from signals."""
226
+ signal_types = {s.signal_type for s in signals}
227
+
228
+ primary = "contrast_needed" # default
229
+ secondary: list[str] = []
230
+
231
+ if "identity_unclear" in signal_types:
232
+ primary = "identity_unclear"
233
+ secondary = ["hook_underdeveloped", "too_safe_to_progress"]
234
+ elif "single_loop" in signal_types:
235
+ primary = "overpolished_loop"
236
+ secondary = ["section_missing", "contrast_needed"]
237
+ elif "high_density" in signal_types:
238
+ primary = "too_dense_to_progress"
239
+ secondary = ["contrast_needed", "identity_unclear"]
240
+ elif "local_tweaking" in signal_types:
241
+ primary = "overpolished_loop"
242
+ secondary = ["contrast_needed", "section_missing"]
243
+ elif "repeated_undo" in signal_types:
244
+ primary = "contrast_needed"
245
+ secondary = ["hook_underdeveloped", "too_safe_to_progress"]
246
+ elif "long_loop_no_structure" in signal_types:
247
+ primary = "section_missing"
248
+ secondary = ["contrast_needed", "transition_not_earned"]
249
+
250
+ return primary, secondary
251
+
252
+
253
+ # ── Rescue suggestions ────────────────────────────────────────────
254
+
255
+
256
+ def suggest_rescue(
257
+ report: StucknessReport,
258
+ mode: str = "gentle",
259
+ ) -> list[RescueSuggestion]:
260
+ """Generate rescue suggestions based on stuckness analysis."""
261
+ suggestions: list[RescueSuggestion] = []
262
+
263
+ rescue_strategies = {
264
+ "contrast_needed": RescueSuggestion(
265
+ rescue_type="contrast_needed",
266
+ title="Add contrast to break the plateau",
267
+ description="The session needs a moment that feels different from what's been happening.",
268
+ strategies=[
269
+ "Strip everything except the hook for 4-8 bars, then re-enter",
270
+ "Introduce a new timbral element that wasn't there before",
271
+ "Change the harmonic context (try a relative minor/major shift)",
272
+ "Create a rhythmic break — half-time or double-time feel",
273
+ ],
274
+ ),
275
+ "section_missing": RescueSuggestion(
276
+ rescue_type="section_missing",
277
+ title="Add a new section for structural progress",
278
+ description="The track needs more form — a new section would create momentum.",
279
+ strategies=[
280
+ "Create a B section that contrasts the current loop",
281
+ "Add an intro that sets up the main idea",
282
+ "Build a breakdown section that strips to essentials",
283
+ "Design a transition that earns the next section",
284
+ ],
285
+ ),
286
+ "hook_underdeveloped": RescueSuggestion(
287
+ rescue_type="hook_underdeveloped",
288
+ title="Develop the hook before adding more layers",
289
+ description="The most memorable idea needs more attention before the arrangement grows.",
290
+ strategies=[
291
+ "Write a variation of the hook for a different section",
292
+ "Add a countermelody that complements the hook",
293
+ "Create a stripped version of the hook for contrast sections",
294
+ "Make the hook hit harder — better sound design or arrangement support",
295
+ ],
296
+ ),
297
+ "transition_not_earned": RescueSuggestion(
298
+ rescue_type="transition_not_earned",
299
+ title="Build better transitions between sections",
300
+ description="Sections jump abruptly — earn the transitions.",
301
+ strategies=[
302
+ "Add a 2-4 bar transition between sections",
303
+ "Use filter sweeps or risers to build anticipation",
304
+ "Create drum fills or melodic ornaments at section boundaries",
305
+ "Use silence or space before the next section arrives",
306
+ ],
307
+ ),
308
+ "overpolished_loop": RescueSuggestion(
309
+ rescue_type="overpolished_loop",
310
+ title="Stop polishing — move forward structurally",
311
+ description="This loop is getting over-refined. Time to build form.",
312
+ strategies=[
313
+ "Duplicate the scene and subtract elements for a contrasting section",
314
+ "Record a live take over the loop to find new directions",
315
+ "Commit to the current state and start the arrangement",
316
+ "Create a completely different section from scratch",
317
+ ],
318
+ ),
319
+ "identity_unclear": RescueSuggestion(
320
+ rescue_type="identity_unclear",
321
+ title="Define the track's identity before adding more",
322
+ description="It's hard to progress when the track doesn't know what it is.",
323
+ strategies=[
324
+ "Identify or create one defining melodic/rhythmic idea",
325
+ "Choose a reference track and distill its key principles",
326
+ "Remove tracks that don't serve a clear purpose",
327
+ "Write a one-sentence description of what this track should feel like",
328
+ ],
329
+ ),
330
+ "too_dense_to_progress": RescueSuggestion(
331
+ rescue_type="too_dense_to_progress",
332
+ title="Subtract before adding more",
333
+ description="Too many elements fighting for attention. Simplify first.",
334
+ strategies=[
335
+ "Mute all tracks, then bring back only the essential ones",
336
+ "Delete or freeze tracks with no clear role",
337
+ "Create a stripped version as a new starting point",
338
+ "Focus on making 3-4 elements work perfectly instead of 12 elements existing",
339
+ ],
340
+ ),
341
+ "too_safe_to_progress": RescueSuggestion(
342
+ rescue_type="too_safe_to_progress",
343
+ title="Take a risk — the safe path isn't working",
344
+ description="Everything is technically correct but uninspired. Time for a bold move.",
345
+ strategies=[
346
+ "Try a dramatic sound design change on a key element",
347
+ "Add an unexpected harmonic or rhythmic element",
348
+ "Radically change the arrangement structure",
349
+ "Experiment with an extreme processing chain (distortion, granular, etc.)",
350
+ ],
351
+ ),
352
+ }
353
+
354
+ # Primary suggestion
355
+ primary_strat = rescue_strategies.get(report.primary_rescue_type)
356
+ if primary_strat:
357
+ primary_strat.urgency = "high" if report.confidence > 0.6 else "medium"
358
+ suggestions.append(primary_strat)
359
+
360
+ # Secondary suggestions (in gentle mode, only show primary)
361
+ if mode == "direct":
362
+ for rt in report.secondary_rescue_types[:2]:
363
+ sec = rescue_strategies.get(rt)
364
+ if sec:
365
+ sec.urgency = "medium"
366
+ suggestions.append(sec)
367
+
368
+ return suggestions
369
+
370
+
371
+ # ── Diagnosis builder ─────────────────────────────────────────────
372
+
373
+
374
+ def _build_diagnosis(signals: list[StucknessSignal], level: str) -> str:
375
+ """Build a human-readable diagnosis from signals."""
376
+ if level == "flowing":
377
+ return "Session is flowing well — no intervention needed"
378
+
379
+ signal_descriptions = {
380
+ "repeated_undo": "frequent undos suggest dissatisfaction with results",
381
+ "local_tweaking": "lots of small parameter changes in one area",
382
+ "long_loop_no_structure": "no structural changes for a while",
383
+ "single_loop": "working in a single loop without expanding",
384
+ "repeated_requests": "similar requests being repeated",
385
+ "high_density": "track count is very high",
386
+ "identity_unclear": "the song's identity isn't clear yet",
387
+ }
388
+
389
+ parts = []
390
+ for s in signals:
391
+ desc = signal_descriptions.get(s.signal_type, s.signal_type)
392
+ parts.append(desc)
393
+
394
+ prefix = {
395
+ "slowing": "The session is slowing down",
396
+ "stuck": "The session appears stuck",
397
+ "deeply_stuck": "The session is deeply stuck",
398
+ }.get(level, "Momentum issue detected")
399
+
400
+ return f"{prefix}: {'; '.join(parts)}"