livepilot 1.9.21 → 1.9.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +3 -3
- package/.mcpbignore +40 -0
- package/AGENTS.md +2 -2
- package/CHANGELOG.md +47 -0
- package/CONTRIBUTING.md +1 -1
- package/README.md +47 -72
- package/bin/livepilot.js +135 -0
- package/livepilot/.Codex-plugin/plugin.json +2 -2
- package/livepilot/.claude-plugin/plugin.json +2 -2
- package/livepilot/agents/livepilot-producer/AGENT.md +13 -0
- package/livepilot/commands/arrange.md +42 -14
- package/livepilot/commands/beat.md +68 -21
- package/livepilot/commands/evaluate.md +23 -13
- package/livepilot/commands/mix.md +35 -11
- package/livepilot/commands/perform.md +31 -19
- package/livepilot/commands/sounddesign.md +38 -17
- package/livepilot/skills/livepilot-arrangement/SKILL.md +2 -1
- package/livepilot/skills/livepilot-composition-engine/references/transition-archetypes.md +2 -2
- package/livepilot/skills/livepilot-core/SKILL.md +60 -4
- package/livepilot/skills/livepilot-core/references/device-atlas/distortion-and-character.md +11 -11
- package/livepilot/skills/livepilot-core/references/device-atlas/drums-and-percussion.md +25 -25
- package/livepilot/skills/livepilot-core/references/device-atlas/dynamics-and-punch.md +21 -21
- package/livepilot/skills/livepilot-core/references/device-atlas/eq-and-filtering.md +13 -13
- package/livepilot/skills/livepilot-core/references/device-atlas/midi-tools.md +13 -13
- package/livepilot/skills/livepilot-core/references/device-atlas/movement-and-modulation.md +5 -5
- package/livepilot/skills/livepilot-core/references/device-atlas/space-and-depth.md +16 -16
- package/livepilot/skills/livepilot-core/references/device-atlas/spectral-and-weird.md +40 -40
- package/livepilot/skills/livepilot-core/references/m4l-devices.md +3 -3
- package/livepilot/skills/livepilot-core/references/overview.md +4 -4
- package/livepilot/skills/livepilot-evaluation/SKILL.md +12 -8
- package/livepilot/skills/livepilot-evaluation/references/memory-promotion.md +2 -2
- package/livepilot/skills/livepilot-mix-engine/SKILL.md +1 -1
- package/livepilot/skills/livepilot-mix-engine/references/mix-moves.md +2 -2
- package/livepilot/skills/livepilot-mixing/SKILL.md +3 -1
- package/livepilot/skills/livepilot-notes/SKILL.md +2 -1
- package/livepilot/skills/livepilot-release/SKILL.md +15 -15
- package/livepilot/skills/livepilot-sound-design-engine/SKILL.md +2 -2
- package/livepilot/skills/livepilot-wonder/SKILL.md +62 -0
- package/livepilot.mcpb +0 -0
- package/m4l_device/livepilot_bridge.js +1 -1
- package/manifest.json +91 -0
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/creative_constraints/__init__.py +6 -0
- package/mcp_server/creative_constraints/engine.py +277 -0
- package/mcp_server/creative_constraints/models.py +75 -0
- package/mcp_server/creative_constraints/tools.py +341 -0
- package/mcp_server/experiment/__init__.py +6 -0
- package/mcp_server/experiment/engine.py +213 -0
- package/mcp_server/experiment/models.py +120 -0
- package/mcp_server/experiment/tools.py +263 -0
- package/mcp_server/hook_hunter/__init__.py +5 -0
- package/mcp_server/hook_hunter/analyzer.py +342 -0
- package/mcp_server/hook_hunter/models.py +57 -0
- package/mcp_server/hook_hunter/tools.py +586 -0
- package/mcp_server/memory/taste_graph.py +261 -0
- package/mcp_server/memory/tools.py +88 -0
- package/mcp_server/mix_engine/critics.py +2 -2
- package/mcp_server/mix_engine/models.py +1 -1
- package/mcp_server/mix_engine/state_builder.py +2 -2
- package/mcp_server/musical_intelligence/__init__.py +8 -0
- package/mcp_server/musical_intelligence/detectors.py +421 -0
- package/mcp_server/musical_intelligence/phrase_critic.py +163 -0
- package/mcp_server/musical_intelligence/tools.py +221 -0
- package/mcp_server/preview_studio/__init__.py +5 -0
- package/mcp_server/preview_studio/engine.py +280 -0
- package/mcp_server/preview_studio/models.py +73 -0
- package/mcp_server/preview_studio/tools.py +423 -0
- package/mcp_server/runtime/session_kernel.py +96 -0
- package/mcp_server/runtime/tools.py +90 -1
- package/mcp_server/semantic_moves/__init__.py +13 -0
- package/mcp_server/semantic_moves/compiler.py +116 -0
- package/mcp_server/semantic_moves/mix_compilers.py +291 -0
- package/mcp_server/semantic_moves/mix_moves.py +157 -0
- package/mcp_server/semantic_moves/models.py +46 -0
- package/mcp_server/semantic_moves/performance_compilers.py +208 -0
- package/mcp_server/semantic_moves/performance_moves.py +81 -0
- package/mcp_server/semantic_moves/registry.py +32 -0
- package/mcp_server/semantic_moves/resolvers.py +126 -0
- package/mcp_server/semantic_moves/sound_design_compilers.py +266 -0
- package/mcp_server/semantic_moves/sound_design_moves.py +78 -0
- package/mcp_server/semantic_moves/tools.py +204 -0
- package/mcp_server/semantic_moves/transition_compilers.py +222 -0
- package/mcp_server/semantic_moves/transition_moves.py +76 -0
- package/mcp_server/server.py +10 -0
- package/mcp_server/session_continuity/__init__.py +6 -0
- package/mcp_server/session_continuity/models.py +86 -0
- package/mcp_server/session_continuity/tools.py +230 -0
- package/mcp_server/session_continuity/tracker.py +235 -0
- package/mcp_server/song_brain/__init__.py +6 -0
- package/mcp_server/song_brain/builder.py +477 -0
- package/mcp_server/song_brain/models.py +132 -0
- package/mcp_server/song_brain/tools.py +294 -0
- package/mcp_server/stuckness_detector/__init__.py +5 -0
- package/mcp_server/stuckness_detector/detector.py +400 -0
- package/mcp_server/stuckness_detector/models.py +66 -0
- package/mcp_server/stuckness_detector/tools.py +195 -0
- package/mcp_server/tools/_conductor.py +104 -6
- package/mcp_server/tools/analyzer.py +1 -1
- package/mcp_server/tools/devices.py +34 -0
- package/mcp_server/wonder_mode/__init__.py +6 -0
- package/mcp_server/wonder_mode/diagnosis.py +84 -0
- package/mcp_server/wonder_mode/engine.py +493 -0
- package/mcp_server/wonder_mode/session.py +114 -0
- package/mcp_server/wonder_mode/tools.py +285 -0
- package/package.json +2 -2
- package/remote_script/LivePilot/__init__.py +1 -1
- package/remote_script/LivePilot/browser.py +4 -1
- package/remote_script/LivePilot/devices.py +29 -0
- package/remote_script/LivePilot/tracks.py +11 -4
- package/scripts/generate_tool_catalog.py +131 -0
|
@@ -0,0 +1,493 @@
|
|
|
1
|
+
"""Wonder Mode engine — pure computation, zero I/O.
|
|
2
|
+
|
|
3
|
+
Generates contextually different creative variants ranked by
|
|
4
|
+
taste, identity, and coherence. Each variant is built from a
|
|
5
|
+
real semantic move matched to the request.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import hashlib
|
|
11
|
+
import json
|
|
12
|
+
import math
|
|
13
|
+
from typing import Optional
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# ── Move discovery ───────────────────────────────────────────────
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def discover_moves(
|
|
20
|
+
request_text: str,
|
|
21
|
+
taste_graph: object = None,
|
|
22
|
+
active_constraints: object = None,
|
|
23
|
+
candidate_domains: list[str] | None = None,
|
|
24
|
+
) -> list[dict]:
|
|
25
|
+
"""Find semantic moves relevant to the request.
|
|
26
|
+
|
|
27
|
+
Uses keyword scoring + optional taste reranking + constraint filtering.
|
|
28
|
+
Returns full move dicts including compile_plan (via registry.get_move).
|
|
29
|
+
"""
|
|
30
|
+
from ..semantic_moves import registry
|
|
31
|
+
|
|
32
|
+
all_moves = registry.list_moves() # returns to_dict() — no compile_plan
|
|
33
|
+
if not all_moves:
|
|
34
|
+
return []
|
|
35
|
+
|
|
36
|
+
request_lower = request_text.lower()
|
|
37
|
+
request_words = set(request_lower.split())
|
|
38
|
+
|
|
39
|
+
scored: list[tuple[dict, float]] = []
|
|
40
|
+
for move in all_moves:
|
|
41
|
+
score = 0.0
|
|
42
|
+
move_words = set(move["move_id"].replace("_", " ").split())
|
|
43
|
+
intent_words = set(move.get("intent", "").lower().split())
|
|
44
|
+
overlap = request_words & (move_words | intent_words)
|
|
45
|
+
score += len(overlap) * 0.3
|
|
46
|
+
|
|
47
|
+
for dim in move.get("targets", {}):
|
|
48
|
+
if dim in request_lower:
|
|
49
|
+
score += 0.2
|
|
50
|
+
|
|
51
|
+
if score > 0.1:
|
|
52
|
+
scored.append((move, score))
|
|
53
|
+
|
|
54
|
+
if not scored:
|
|
55
|
+
return []
|
|
56
|
+
|
|
57
|
+
# Domain filtering if provided (fall back to full list if filtering removes all)
|
|
58
|
+
if candidate_domains:
|
|
59
|
+
domain_filtered = [(m, s) for m, s in scored if m.get("family") in candidate_domains]
|
|
60
|
+
if domain_filtered:
|
|
61
|
+
scored = domain_filtered
|
|
62
|
+
|
|
63
|
+
# Taste-based reranking if available
|
|
64
|
+
if (
|
|
65
|
+
taste_graph is not None
|
|
66
|
+
and hasattr(taste_graph, "rank_moves")
|
|
67
|
+
and hasattr(taste_graph, "evidence_count")
|
|
68
|
+
and taste_graph.evidence_count > 0
|
|
69
|
+
):
|
|
70
|
+
move_dicts = [m for m, _ in scored]
|
|
71
|
+
ranked = taste_graph.rank_moves(move_dicts)
|
|
72
|
+
taste_by_id = {m["move_id"]: m.get("taste_score", 0.5) for m in ranked}
|
|
73
|
+
scored = [
|
|
74
|
+
(m, kw_score * 0.6 + taste_by_id.get(m["move_id"], 0.5) * 0.4)
|
|
75
|
+
for m, kw_score in scored
|
|
76
|
+
]
|
|
77
|
+
|
|
78
|
+
scored.sort(key=lambda x: -x[1])
|
|
79
|
+
|
|
80
|
+
# Enrich with full compile_plan via get_move()
|
|
81
|
+
result = []
|
|
82
|
+
for move_dict, score in scored:
|
|
83
|
+
full_move = registry.get_move(move_dict["move_id"])
|
|
84
|
+
if full_move:
|
|
85
|
+
enriched = full_move.to_full_dict()
|
|
86
|
+
enriched["relevance_score"] = round(score, 3)
|
|
87
|
+
result.append(enriched)
|
|
88
|
+
|
|
89
|
+
# Filter by active constraints if any
|
|
90
|
+
if (
|
|
91
|
+
active_constraints is not None
|
|
92
|
+
and hasattr(active_constraints, "constraints")
|
|
93
|
+
and active_constraints.constraints
|
|
94
|
+
):
|
|
95
|
+
try:
|
|
96
|
+
from ..creative_constraints.engine import validate_plan_against_constraints
|
|
97
|
+
filtered = []
|
|
98
|
+
for move in result:
|
|
99
|
+
plan = {"steps": [
|
|
100
|
+
{"action": step.get("tool", ""), **step}
|
|
101
|
+
for step in (move.get("compile_plan") or [])
|
|
102
|
+
]}
|
|
103
|
+
validation = validate_plan_against_constraints(plan, active_constraints)
|
|
104
|
+
if validation["valid"]:
|
|
105
|
+
filtered.append(move)
|
|
106
|
+
result = filtered
|
|
107
|
+
except Exception:
|
|
108
|
+
pass # constraint filtering is optional
|
|
109
|
+
|
|
110
|
+
return result
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
# ── Tier assignment ──────────────────────────────────────────────
|
|
114
|
+
|
|
115
|
+
_RISK_NUMERIC = {"low": 0.2, "medium": 0.5, "high": 0.8}
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _with_envelope(move: dict, tier: str) -> dict:
|
|
120
|
+
"""Apply novelty envelope to a move's targets and protect."""
|
|
121
|
+
result = dict(move)
|
|
122
|
+
targets = dict(move.get("targets", {}))
|
|
123
|
+
protect = dict(move.get("protect", {}))
|
|
124
|
+
|
|
125
|
+
if tier == "safe":
|
|
126
|
+
targets = {k: round(v * 0.7, 3) for k, v in targets.items()}
|
|
127
|
+
elif tier == "unexpected":
|
|
128
|
+
targets = {k: round(v * 1.4, 3) for k, v in targets.items()}
|
|
129
|
+
protect = {k: round(v * 0.8, 3) for k, v in protect.items()}
|
|
130
|
+
# "strong" keeps targets and protect as-is
|
|
131
|
+
|
|
132
|
+
result["targets"] = targets
|
|
133
|
+
result["protect"] = protect
|
|
134
|
+
return result
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
# ── Distinctness selection ───────────────────────────────────────
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def _compile_plan_shape(move: dict) -> frozenset[str]:
|
|
141
|
+
"""Extract the set of tool names from a move's compile_plan."""
|
|
142
|
+
plan = move.get("compile_plan") or []
|
|
143
|
+
return frozenset(step.get("tool", "") for step in plan if step.get("tool"))
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def select_distinct_variants(scored_moves: list[dict]) -> list[dict]:
|
|
147
|
+
"""Select genuinely distinct moves for variant generation.
|
|
148
|
+
|
|
149
|
+
Each selected move must differ from all previously selected moves by
|
|
150
|
+
at least one of: move_id, family, or compile_plan shape.
|
|
151
|
+
Returns 0-3 moves.
|
|
152
|
+
"""
|
|
153
|
+
if not scored_moves:
|
|
154
|
+
return []
|
|
155
|
+
|
|
156
|
+
selected: list[dict] = []
|
|
157
|
+
used_ids: set[str] = set()
|
|
158
|
+
used_shapes: list[tuple[str, frozenset]] = [] # (family, shape) pairs
|
|
159
|
+
|
|
160
|
+
for move in scored_moves:
|
|
161
|
+
mid = move.get("move_id", "")
|
|
162
|
+
family = move.get("family", "")
|
|
163
|
+
shape = _compile_plan_shape(move)
|
|
164
|
+
|
|
165
|
+
# Skip duplicate move_ids
|
|
166
|
+
if mid in used_ids:
|
|
167
|
+
continue
|
|
168
|
+
|
|
169
|
+
# Check distinctness against already-selected moves
|
|
170
|
+
is_distinct = True
|
|
171
|
+
for sel_family, sel_shape in used_shapes:
|
|
172
|
+
if family == sel_family and shape == sel_shape:
|
|
173
|
+
is_distinct = False
|
|
174
|
+
break
|
|
175
|
+
|
|
176
|
+
if is_distinct:
|
|
177
|
+
selected.append(move)
|
|
178
|
+
used_ids.add(mid)
|
|
179
|
+
used_shapes.append((family, shape))
|
|
180
|
+
|
|
181
|
+
if len(selected) >= 3:
|
|
182
|
+
break
|
|
183
|
+
|
|
184
|
+
return selected
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
# ── Variant building ─────────────────────────────────────────────
|
|
188
|
+
|
|
189
|
+
_NOVELTY_LEVELS = {"safe": 0.25, "strong": 0.55, "unexpected": 0.85}
|
|
190
|
+
_RISK_TO_EFFECT = {"low": "preserves", "medium": "evolves", "high": "contrasts"}
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def build_variant(
|
|
194
|
+
label: str,
|
|
195
|
+
move_dict: dict,
|
|
196
|
+
song_brain: Optional[dict] = None,
|
|
197
|
+
novelty_level: float = 0.5,
|
|
198
|
+
variant_id: str = "",
|
|
199
|
+
) -> dict:
|
|
200
|
+
"""Build a variant dict from a real move + SongBrain context."""
|
|
201
|
+
song_brain = song_brain or {}
|
|
202
|
+
targets = move_dict.get("targets", {})
|
|
203
|
+
protect = move_dict.get("protect", {})
|
|
204
|
+
risk = move_dict.get("risk_level", "low")
|
|
205
|
+
sacred = song_brain.get("sacred_elements", [])
|
|
206
|
+
|
|
207
|
+
# what_changed from targets
|
|
208
|
+
target_parts = [f"{dim} ({val:+.1f})" for dim, val in targets.items()]
|
|
209
|
+
what_changed = f"Targets {', '.join(target_parts)}" if target_parts else "Analytical suggestion"
|
|
210
|
+
|
|
211
|
+
# what_preserved from protect + sacred
|
|
212
|
+
preserved_parts = []
|
|
213
|
+
if protect:
|
|
214
|
+
preserved_parts.extend(f"{dim} (threshold {thresh})" for dim, thresh in protect.items())
|
|
215
|
+
if sacred:
|
|
216
|
+
sacred_descs = [e.get("description", e.get("element_type", "element")) for e in sacred[:3]]
|
|
217
|
+
preserved_parts.append(f"Sacred: {', '.join(sacred_descs)}")
|
|
218
|
+
what_preserved = " | ".join(preserved_parts) if preserved_parts else "core elements"
|
|
219
|
+
|
|
220
|
+
# identity_effect from risk
|
|
221
|
+
identity_effect = _RISK_TO_EFFECT.get(risk, "preserves")
|
|
222
|
+
|
|
223
|
+
# why_it_matters
|
|
224
|
+
risk_label = {"low": "Low", "medium": "Moderate", "high": "High"}.get(risk, "Unknown")
|
|
225
|
+
why = f"{risk_label} risk — {move_dict.get('intent', 'creative suggestion')}"
|
|
226
|
+
if sacred and identity_effect == "preserves":
|
|
227
|
+
why += f". Preserves {sacred[0].get('description', 'sacred elements')}"
|
|
228
|
+
|
|
229
|
+
return {
|
|
230
|
+
"variant_id": variant_id,
|
|
231
|
+
"label": label,
|
|
232
|
+
"move_id": move_dict.get("move_id", ""),
|
|
233
|
+
"family": move_dict.get("family", ""),
|
|
234
|
+
"intent": move_dict.get("intent", ""),
|
|
235
|
+
"what_changed": what_changed,
|
|
236
|
+
"what_preserved": what_preserved,
|
|
237
|
+
"why_it_matters": why,
|
|
238
|
+
"identity_effect": identity_effect,
|
|
239
|
+
"novelty_level": novelty_level,
|
|
240
|
+
"taste_fit": 0.5,
|
|
241
|
+
"targets_snapshot": dict(targets),
|
|
242
|
+
"compiled_plan": move_dict.get("compile_plan"),
|
|
243
|
+
"score": 0.0,
|
|
244
|
+
"rank": 0,
|
|
245
|
+
"score_breakdown": {},
|
|
246
|
+
"analytical_only": False,
|
|
247
|
+
"distinctness_reason": "",
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def build_analytical_variant(label: str, request_text: str, novelty_level: float, variant_id: str = "") -> dict:
|
|
252
|
+
"""Fallback variant when no moves match — analytical only."""
|
|
253
|
+
return {
|
|
254
|
+
"variant_id": variant_id,
|
|
255
|
+
"label": label,
|
|
256
|
+
"move_id": "",
|
|
257
|
+
"family": "",
|
|
258
|
+
"intent": f"Analytical suggestion for: {request_text}",
|
|
259
|
+
"what_changed": "No specific move matched — consider rephrasing the request",
|
|
260
|
+
"what_preserved": "core elements",
|
|
261
|
+
"why_it_matters": "No matching moves found — this is a directional suggestion only",
|
|
262
|
+
"identity_effect": "preserves",
|
|
263
|
+
"novelty_level": novelty_level,
|
|
264
|
+
"taste_fit": 0.5,
|
|
265
|
+
"targets_snapshot": {},
|
|
266
|
+
"compiled_plan": None,
|
|
267
|
+
"score": 0.0,
|
|
268
|
+
"rank": 0,
|
|
269
|
+
"score_breakdown": {},
|
|
270
|
+
"analytical_only": True,
|
|
271
|
+
"distinctness_reason": "No matching executable move — directional suggestion only",
|
|
272
|
+
}
|
|
273
|
+
|
|
274
|
+
|
|
275
|
+
# ── Taste fit scoring ────────────────────────────────────────────
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def compute_taste_fit(move_dict: dict, taste_graph: object = None) -> float:
|
|
279
|
+
"""Score how well a move fits user taste using the full TasteGraph."""
|
|
280
|
+
if taste_graph is None:
|
|
281
|
+
return 0.5
|
|
282
|
+
if not hasattr(taste_graph, "rank_moves"):
|
|
283
|
+
return 0.5
|
|
284
|
+
if not hasattr(taste_graph, "evidence_count") or taste_graph.evidence_count == 0:
|
|
285
|
+
return 0.5
|
|
286
|
+
|
|
287
|
+
ranked = taste_graph.rank_moves([move_dict])
|
|
288
|
+
if ranked:
|
|
289
|
+
return ranked[0].get("taste_score", 0.5)
|
|
290
|
+
return 0.5
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
# ── Ranking ──────────────────────────────────────────────────────
|
|
294
|
+
|
|
295
|
+
_IDENTITY_BASE = {"preserves": 0.9, "evolves": 0.7, "contrasts": 0.4, "resets": 0.15}
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
def rank_variants(
|
|
299
|
+
variant_dicts: list[dict],
|
|
300
|
+
song_brain: Optional[dict] = None,
|
|
301
|
+
novelty_band: float = 0.5,
|
|
302
|
+
taste_evidence: int = -1,
|
|
303
|
+
) -> list[dict]:
|
|
304
|
+
"""Rank variants by taste + identity + novelty + coherence."""
|
|
305
|
+
song_brain = song_brain or {}
|
|
306
|
+
sacred = song_brain.get("sacred_elements", [])
|
|
307
|
+
identity_confidence = song_brain.get("identity_confidence", 0.5)
|
|
308
|
+
|
|
309
|
+
weights = _select_weights(
|
|
310
|
+
identity_confidence=identity_confidence,
|
|
311
|
+
taste_evidence=taste_evidence,
|
|
312
|
+
all_same_family=_all_same_family(variant_dicts),
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
move_ids = [v.get("move_id", "") for v in variant_dicts]
|
|
316
|
+
all_target_dims = [set(v.get("targets_snapshot", {}).keys()) for v in variant_dicts]
|
|
317
|
+
|
|
318
|
+
for i, v in enumerate(variant_dicts):
|
|
319
|
+
taste_score = v.get("taste_fit", 0.5)
|
|
320
|
+
|
|
321
|
+
# Identity component
|
|
322
|
+
effect = v.get("identity_effect", "preserves")
|
|
323
|
+
base = _IDENTITY_BASE.get(effect, 0.5)
|
|
324
|
+
targets = v.get("targets_snapshot", {})
|
|
325
|
+
sacred_penalty = sum(
|
|
326
|
+
s.get("salience", 0.5) * 0.15
|
|
327
|
+
for s in sacred
|
|
328
|
+
if s.get("element_type") in targets and effect != "preserves"
|
|
329
|
+
)
|
|
330
|
+
identity_score = max(0.0, base - sacred_penalty)
|
|
331
|
+
|
|
332
|
+
# Novelty — bell curve centered on user's novelty_band
|
|
333
|
+
nov = v.get("novelty_level", 0.5)
|
|
334
|
+
novelty_score = math.exp(-((nov - novelty_band) ** 2) / (2 * 0.15 ** 2))
|
|
335
|
+
|
|
336
|
+
# Coherence — penalize same move_id and same target dimensions
|
|
337
|
+
coherence_score = 1.0
|
|
338
|
+
mid = move_ids[i]
|
|
339
|
+
if mid and move_ids.count(mid) > 1:
|
|
340
|
+
coherence_score -= 0.15
|
|
341
|
+
if i < len(all_target_dims):
|
|
342
|
+
for j, other_dims in enumerate(all_target_dims):
|
|
343
|
+
if j != i and all_target_dims[i] == other_dims and all_target_dims[i]:
|
|
344
|
+
coherence_score -= 0.1
|
|
345
|
+
break
|
|
346
|
+
coherence_score = max(0.0, coherence_score)
|
|
347
|
+
|
|
348
|
+
composite = (
|
|
349
|
+
taste_score * weights["taste"]
|
|
350
|
+
+ identity_score * weights["identity"]
|
|
351
|
+
+ novelty_score * weights["novelty"]
|
|
352
|
+
+ coherence_score * weights["coherence"]
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
v["score"] = round(max(0.0, min(1.0, composite)), 3)
|
|
356
|
+
v["score_breakdown"] = {
|
|
357
|
+
"taste": round(taste_score, 3),
|
|
358
|
+
"identity": round(identity_score, 3),
|
|
359
|
+
"novelty": round(novelty_score, 3),
|
|
360
|
+
"coherence": round(coherence_score, 3),
|
|
361
|
+
"weights": dict(weights),
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
variant_dicts.sort(key=lambda v: -v["score"])
|
|
365
|
+
for i, v in enumerate(variant_dicts):
|
|
366
|
+
v["rank"] = i + 1
|
|
367
|
+
|
|
368
|
+
return variant_dicts
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
def _select_weights(
|
|
372
|
+
identity_confidence: float,
|
|
373
|
+
taste_evidence: int,
|
|
374
|
+
all_same_family: bool,
|
|
375
|
+
) -> dict[str, float]:
|
|
376
|
+
"""Select ranking weights based on context."""
|
|
377
|
+
if taste_evidence == 0:
|
|
378
|
+
return {"taste": 0.00, "identity": 0.40, "novelty": 0.25, "coherence": 0.35}
|
|
379
|
+
if identity_confidence > 0.7:
|
|
380
|
+
return {"taste": 0.20, "identity": 0.40, "novelty": 0.10, "coherence": 0.30}
|
|
381
|
+
if all_same_family:
|
|
382
|
+
return {"taste": 0.25, "identity": 0.25, "novelty": 0.15, "coherence": 0.35}
|
|
383
|
+
return {"taste": 0.25, "identity": 0.30, "novelty": 0.20, "coherence": 0.25}
|
|
384
|
+
|
|
385
|
+
|
|
386
|
+
def _all_same_family(variants: list[dict]) -> bool:
|
|
387
|
+
"""Check if all variants are from the same move family."""
|
|
388
|
+
families = {v.get("family", "") for v in variants}
|
|
389
|
+
families.discard("")
|
|
390
|
+
return len(families) <= 1 and len(variants) > 1
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
# ── Pipeline orchestrator ────────────────────────────────────────
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
def generate_wonder_variants(
|
|
398
|
+
request_text: str,
|
|
399
|
+
diagnosis: dict | None = None,
|
|
400
|
+
kernel_id: str = "",
|
|
401
|
+
song_brain: dict | None = None,
|
|
402
|
+
taste_graph: object = None,
|
|
403
|
+
active_constraints: object = None,
|
|
404
|
+
) -> dict:
|
|
405
|
+
"""Full wonder mode pipeline: discover -> select distinct -> build -> taste -> rank."""
|
|
406
|
+
song_brain = song_brain or {}
|
|
407
|
+
diagnosis = diagnosis or {}
|
|
408
|
+
set_prefix = _wonder_id(request_text, kernel_id)
|
|
409
|
+
|
|
410
|
+
candidate_domains = diagnosis.get("candidate_domains") or None
|
|
411
|
+
moves = discover_moves(request_text, taste_graph, active_constraints, candidate_domains)
|
|
412
|
+
distinct = select_distinct_variants(moves)
|
|
413
|
+
|
|
414
|
+
labels = ["safe", "strong", "unexpected"]
|
|
415
|
+
variants = []
|
|
416
|
+
|
|
417
|
+
# Build executable variants from distinct moves
|
|
418
|
+
for i, move in enumerate(distinct):
|
|
419
|
+
label = labels[i]
|
|
420
|
+
move_with_envelope = _with_envelope(move, label)
|
|
421
|
+
v = build_variant(
|
|
422
|
+
label=label,
|
|
423
|
+
move_dict=move_with_envelope,
|
|
424
|
+
song_brain=song_brain,
|
|
425
|
+
novelty_level=_NOVELTY_LEVELS.get(label, 0.5),
|
|
426
|
+
variant_id=f"{set_prefix}_{label}",
|
|
427
|
+
)
|
|
428
|
+
if taste_graph is not None:
|
|
429
|
+
# Score taste on envelope-adjusted move for consistency with targets_snapshot
|
|
430
|
+
v["taste_fit"] = compute_taste_fit(move_with_envelope, taste_graph)
|
|
431
|
+
v["distinctness_reason"] = _explain_distinctness(move, distinct, i)
|
|
432
|
+
variants.append(v)
|
|
433
|
+
|
|
434
|
+
executable_count = len(variants)
|
|
435
|
+
|
|
436
|
+
# Pad with analytical variants
|
|
437
|
+
while len(variants) < 3:
|
|
438
|
+
idx = len(variants)
|
|
439
|
+
v = build_analytical_variant(
|
|
440
|
+
label=labels[idx],
|
|
441
|
+
request_text=request_text,
|
|
442
|
+
novelty_level=_NOVELTY_LEVELS.get(labels[idx], 0.5),
|
|
443
|
+
variant_id=f"{set_prefix}_{labels[idx]}",
|
|
444
|
+
)
|
|
445
|
+
variants.append(v)
|
|
446
|
+
|
|
447
|
+
novelty_band = 0.5
|
|
448
|
+
taste_evidence = 0
|
|
449
|
+
if taste_graph is not None and hasattr(taste_graph, "novelty_band"):
|
|
450
|
+
novelty_band = taste_graph.novelty_band
|
|
451
|
+
taste_evidence = getattr(taste_graph, "evidence_count", 0)
|
|
452
|
+
|
|
453
|
+
ranked = rank_variants(
|
|
454
|
+
variants,
|
|
455
|
+
song_brain=song_brain,
|
|
456
|
+
novelty_band=novelty_band,
|
|
457
|
+
taste_evidence=taste_evidence,
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
degraded_reason = ""
|
|
461
|
+
if executable_count == 0:
|
|
462
|
+
degraded_reason = "No matching executable moves found"
|
|
463
|
+
elif executable_count == 1:
|
|
464
|
+
degraded_reason = "Only 1 distinct executable move found"
|
|
465
|
+
|
|
466
|
+
return {
|
|
467
|
+
"mode": "wonder",
|
|
468
|
+
"request": request_text,
|
|
469
|
+
"variants": ranked,
|
|
470
|
+
"recommended": ranked[0]["variant_id"] if ranked else "",
|
|
471
|
+
"taste_evidence": taste_evidence,
|
|
472
|
+
"identity_confidence": song_brain.get("identity_confidence", 0.0),
|
|
473
|
+
"move_count_matched": len(moves),
|
|
474
|
+
"variant_count_actual": executable_count,
|
|
475
|
+
"degraded_reason": degraded_reason,
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
def _explain_distinctness(move: dict, all_moves: list[dict], index: int) -> str:
|
|
480
|
+
"""Explain why this variant is different from the others."""
|
|
481
|
+
family = move.get("family", "")
|
|
482
|
+
other_families = {m.get("family", "") for i, m in enumerate(all_moves) if i != index}
|
|
483
|
+
|
|
484
|
+
if family not in other_families:
|
|
485
|
+
return f"Different family: {family}"
|
|
486
|
+
shape = _compile_plan_shape(move)
|
|
487
|
+
return f"Different approach: {', '.join(sorted(shape))}"
|
|
488
|
+
|
|
489
|
+
|
|
490
|
+
def _wonder_id(request_text: str, kernel_id: str) -> str:
|
|
491
|
+
"""Deterministic variant ID prefix — no timestamp."""
|
|
492
|
+
seed = json.dumps({"r": request_text, "k": kernel_id}, sort_keys=True)
|
|
493
|
+
return "wm_" + hashlib.sha256(seed.encode()).hexdigest()[:10]
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
"""WonderSession and WonderDiagnosis — thin lifecycle coordinator.
|
|
2
|
+
|
|
3
|
+
WonderSession ties the Wonder lifecycle together: diagnosis, variant
|
|
4
|
+
generation, preview, commit/discard, and outcome recording.
|
|
5
|
+
|
|
6
|
+
WonderDiagnosis is a structured diagnosis built from stuckness,
|
|
7
|
+
SongBrain, action ledger, and creative threads.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
from dataclasses import asdict, dataclass, field
|
|
13
|
+
from typing import Optional
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
_MAX_WONDER_SESSIONS = 10
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class WonderDiagnosis:
|
|
21
|
+
"""Structured diagnosis driving Wonder variant generation."""
|
|
22
|
+
|
|
23
|
+
trigger_reason: str # "user_request", "stuckness_detected", "repeated_undos"
|
|
24
|
+
problem_class: str # from RESCUE_TYPES + "exploration"
|
|
25
|
+
current_identity: str # from SongBrain.identity_core
|
|
26
|
+
sacred_elements: list[dict] = field(default_factory=list)
|
|
27
|
+
blocked_dimensions: list[str] = field(default_factory=list)
|
|
28
|
+
candidate_domains: list[str] = field(default_factory=list)
|
|
29
|
+
variant_budget: int = 3
|
|
30
|
+
confidence: float = 0.0
|
|
31
|
+
degraded_capabilities: list[str] = field(default_factory=list)
|
|
32
|
+
|
|
33
|
+
def to_dict(self) -> dict:
|
|
34
|
+
return asdict(self)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass
|
|
38
|
+
class WonderSession:
|
|
39
|
+
"""Thin lifecycle coordinator for a Wonder Mode session."""
|
|
40
|
+
|
|
41
|
+
session_id: str
|
|
42
|
+
request_text: str
|
|
43
|
+
kernel_id: str = ""
|
|
44
|
+
|
|
45
|
+
# Diagnosis
|
|
46
|
+
diagnosis: Optional[WonderDiagnosis] = None
|
|
47
|
+
|
|
48
|
+
# Lifecycle references
|
|
49
|
+
creative_thread_id: str = ""
|
|
50
|
+
preview_set_id: str = ""
|
|
51
|
+
|
|
52
|
+
# Variants
|
|
53
|
+
variants: list[dict] = field(default_factory=list)
|
|
54
|
+
recommended: str = ""
|
|
55
|
+
variant_count_actual: int = 0
|
|
56
|
+
|
|
57
|
+
# Outcome
|
|
58
|
+
selected_variant_id: str = ""
|
|
59
|
+
outcome: str = "pending" # pending, committed, rejected_all, abandoned
|
|
60
|
+
|
|
61
|
+
# Degradation
|
|
62
|
+
degraded_reason: str = ""
|
|
63
|
+
|
|
64
|
+
status: str = "diagnosing" # diagnosing, variants_ready, previewing, resolved
|
|
65
|
+
|
|
66
|
+
# Valid state transitions
|
|
67
|
+
_VALID_TRANSITIONS: dict = field(default_factory=lambda: {
|
|
68
|
+
"diagnosing": {"variants_ready"},
|
|
69
|
+
"variants_ready": {"previewing", "resolved"},
|
|
70
|
+
"previewing": {"resolved"},
|
|
71
|
+
"resolved": set(), # terminal
|
|
72
|
+
}, repr=False)
|
|
73
|
+
|
|
74
|
+
def transition_to(self, new_status: str) -> bool:
|
|
75
|
+
"""Attempt a state transition. Returns False if invalid."""
|
|
76
|
+
valid = self._VALID_TRANSITIONS.get(self.status, set())
|
|
77
|
+
if new_status not in valid:
|
|
78
|
+
return False
|
|
79
|
+
self.status = new_status
|
|
80
|
+
return True
|
|
81
|
+
|
|
82
|
+
def to_dict(self) -> dict:
|
|
83
|
+
d = asdict(self)
|
|
84
|
+
if self.diagnosis:
|
|
85
|
+
d["diagnosis"] = self.diagnosis.to_dict()
|
|
86
|
+
return d
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
# ── In-memory store ───────────────────────────────────────────────
|
|
90
|
+
|
|
91
|
+
_wonder_sessions: dict[str, WonderSession] = {}
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def store_wonder_session(ws: WonderSession) -> None:
|
|
95
|
+
"""Store a WonderSession with FIFO eviction at capacity."""
|
|
96
|
+
_wonder_sessions[ws.session_id] = ws
|
|
97
|
+
while len(_wonder_sessions) > _MAX_WONDER_SESSIONS:
|
|
98
|
+
oldest_key = next(iter(_wonder_sessions))
|
|
99
|
+
evicted = _wonder_sessions.pop(oldest_key)
|
|
100
|
+
if evicted.outcome == "pending":
|
|
101
|
+
evicted.outcome = "abandoned"
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def get_wonder_session(session_id: str) -> Optional[WonderSession]:
|
|
105
|
+
"""Retrieve a WonderSession by ID."""
|
|
106
|
+
return _wonder_sessions.get(session_id)
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def find_session_by_preview_set(set_id: str) -> Optional[WonderSession]:
|
|
110
|
+
"""Find a WonderSession linked to a preview set ID."""
|
|
111
|
+
for ws in _wonder_sessions.values():
|
|
112
|
+
if ws.preview_set_id == set_id:
|
|
113
|
+
return ws
|
|
114
|
+
return None
|