livepilot 1.9.14 → 1.9.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. package/.claude-plugin/marketplace.json +3 -3
  2. package/AGENTS.md +3 -3
  3. package/CHANGELOG.md +82 -0
  4. package/CONTRIBUTING.md +1 -1
  5. package/README.md +8 -8
  6. package/livepilot/.Codex-plugin/plugin.json +2 -2
  7. package/livepilot/.claude-plugin/plugin.json +2 -2
  8. package/livepilot/agents/livepilot-producer/AGENT.md +243 -49
  9. package/livepilot/skills/livepilot-core/SKILL.md +81 -6
  10. package/livepilot/skills/livepilot-core/references/m4l-devices.md +2 -2
  11. package/livepilot/skills/livepilot-core/references/overview.md +3 -3
  12. package/livepilot/skills/livepilot-core/references/sound-design.md +3 -2
  13. package/livepilot/skills/livepilot-release/SKILL.md +13 -13
  14. package/m4l_device/livepilot_bridge.js +32 -15
  15. package/mcp_server/__init__.py +1 -1
  16. package/mcp_server/connection.py +24 -2
  17. package/mcp_server/curves.py +14 -6
  18. package/mcp_server/evaluation/__init__.py +1 -0
  19. package/mcp_server/evaluation/fabric.py +575 -0
  20. package/mcp_server/evaluation/feature_extractors.py +84 -0
  21. package/mcp_server/evaluation/policy.py +67 -0
  22. package/mcp_server/evaluation/tools.py +53 -0
  23. package/mcp_server/m4l_bridge.py +9 -1
  24. package/mcp_server/memory/__init__.py +11 -2
  25. package/mcp_server/memory/anti_memory.py +78 -0
  26. package/mcp_server/memory/promotion.py +94 -0
  27. package/mcp_server/memory/session_memory.py +108 -0
  28. package/mcp_server/memory/taste_memory.py +158 -0
  29. package/mcp_server/memory/technique_store.py +27 -18
  30. package/mcp_server/memory/tools.py +112 -0
  31. package/mcp_server/mix_engine/__init__.py +1 -0
  32. package/mcp_server/mix_engine/critics.py +299 -0
  33. package/mcp_server/mix_engine/models.py +152 -0
  34. package/mcp_server/mix_engine/planner.py +103 -0
  35. package/mcp_server/mix_engine/state_builder.py +316 -0
  36. package/mcp_server/mix_engine/tools.py +220 -0
  37. package/mcp_server/performance_engine/__init__.py +1 -0
  38. package/mcp_server/performance_engine/models.py +148 -0
  39. package/mcp_server/performance_engine/planner.py +267 -0
  40. package/mcp_server/performance_engine/safety.py +165 -0
  41. package/mcp_server/performance_engine/tools.py +183 -0
  42. package/mcp_server/project_brain/__init__.py +6 -0
  43. package/mcp_server/project_brain/arrangement_graph.py +64 -0
  44. package/mcp_server/project_brain/automation_graph.py +72 -0
  45. package/mcp_server/project_brain/builder.py +123 -0
  46. package/mcp_server/project_brain/capability_graph.py +64 -0
  47. package/mcp_server/project_brain/models.py +282 -0
  48. package/mcp_server/project_brain/refresh.py +86 -0
  49. package/mcp_server/project_brain/role_graph.py +103 -0
  50. package/mcp_server/project_brain/session_graph.py +51 -0
  51. package/mcp_server/project_brain/tools.py +144 -0
  52. package/mcp_server/reference_engine/__init__.py +1 -0
  53. package/mcp_server/reference_engine/gap_analyzer.py +239 -0
  54. package/mcp_server/reference_engine/models.py +105 -0
  55. package/mcp_server/reference_engine/profile_builder.py +149 -0
  56. package/mcp_server/reference_engine/tactic_router.py +117 -0
  57. package/mcp_server/reference_engine/tools.py +236 -0
  58. package/mcp_server/runtime/__init__.py +1 -0
  59. package/mcp_server/runtime/action_ledger.py +117 -0
  60. package/mcp_server/runtime/action_ledger_models.py +91 -0
  61. package/mcp_server/runtime/action_tools.py +57 -0
  62. package/mcp_server/runtime/capability_state.py +219 -0
  63. package/mcp_server/runtime/safety_kernel.py +339 -0
  64. package/mcp_server/runtime/safety_tools.py +42 -0
  65. package/mcp_server/runtime/tools.py +67 -0
  66. package/mcp_server/server.py +17 -0
  67. package/mcp_server/sound_design/__init__.py +1 -0
  68. package/mcp_server/sound_design/critics.py +297 -0
  69. package/mcp_server/sound_design/models.py +147 -0
  70. package/mcp_server/sound_design/planner.py +104 -0
  71. package/mcp_server/sound_design/tools.py +297 -0
  72. package/mcp_server/tools/_agent_os_engine.py +947 -0
  73. package/mcp_server/tools/_composition_engine.py +1530 -0
  74. package/mcp_server/tools/_conductor.py +199 -0
  75. package/mcp_server/tools/_conductor_budgets.py +222 -0
  76. package/mcp_server/tools/_evaluation_contracts.py +91 -0
  77. package/mcp_server/tools/_form_engine.py +416 -0
  78. package/mcp_server/tools/_motif_engine.py +351 -0
  79. package/mcp_server/tools/_planner_engine.py +516 -0
  80. package/mcp_server/tools/_research_engine.py +542 -0
  81. package/mcp_server/tools/_research_provider.py +185 -0
  82. package/mcp_server/tools/_snapshot_normalizer.py +49 -0
  83. package/mcp_server/tools/agent_os.py +448 -0
  84. package/mcp_server/tools/analyzer.py +18 -0
  85. package/mcp_server/tools/automation.py +25 -10
  86. package/mcp_server/tools/composition.py +645 -0
  87. package/mcp_server/tools/devices.py +15 -1
  88. package/mcp_server/tools/midi_io.py +3 -1
  89. package/mcp_server/tools/motif.py +104 -0
  90. package/mcp_server/tools/planner.py +144 -0
  91. package/mcp_server/tools/research.py +223 -0
  92. package/mcp_server/tools/tracks.py +21 -6
  93. package/mcp_server/tools/transport.py +10 -2
  94. package/mcp_server/transition_engine/__init__.py +6 -0
  95. package/mcp_server/transition_engine/archetypes.py +167 -0
  96. package/mcp_server/transition_engine/critics.py +340 -0
  97. package/mcp_server/transition_engine/models.py +90 -0
  98. package/mcp_server/transition_engine/tools.py +291 -0
  99. package/mcp_server/translation_engine/__init__.py +5 -0
  100. package/mcp_server/translation_engine/critics.py +297 -0
  101. package/mcp_server/translation_engine/models.py +27 -0
  102. package/mcp_server/translation_engine/tools.py +108 -0
  103. package/package.json +2 -2
  104. package/remote_script/LivePilot/__init__.py +1 -1
  105. package/remote_script/LivePilot/arrangement.py +21 -3
  106. package/remote_script/LivePilot/clips.py +22 -6
  107. package/remote_script/LivePilot/notes.py +9 -1
  108. package/remote_script/LivePilot/server.py +6 -6
@@ -0,0 +1,67 @@
1
+ """Policy — hard rule enforcement for all evaluators.
2
+
3
+ Consistent keep/undo semantics shared across sonic, composition,
4
+ and all future evaluators.
5
+
6
+ Design: EVALUATION_FABRIC_V1.md, section 8
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+
12
+ def apply_hard_rules(
13
+ goal_progress: float,
14
+ collateral_damage: float,
15
+ protection_violated: bool,
16
+ measurable_count: int,
17
+ score: float,
18
+ target_count: int,
19
+ ) -> tuple[bool, list[str]]:
20
+ """Enforce hard rules and return (keep_change, failure_reasons).
21
+
22
+ Rules (evaluated in order):
23
+ 1. All targets unmeasurable + no protection violation -> defer to agent
24
+ 2. Protection violated -> force undo
25
+ 3. Measurable delta <= 0 when measurable targets exist -> force undo
26
+ 4. Score < 0.40 -> force undo
27
+
28
+ Args:
29
+ goal_progress: weighted sum of dimension deltas
30
+ collateral_damage: max drop across protected dimensions
31
+ protection_violated: any protected dimension below threshold
32
+ measurable_count: how many target dimensions were measurable
33
+ score: composite quality score (0-1)
34
+ target_count: total number of target dimensions
35
+
36
+ Returns:
37
+ (keep_change, list_of_rule_failure_reasons)
38
+ """
39
+ failures: list[str] = []
40
+
41
+ # Rule 1: all unmeasurable + no protection violation -> defer
42
+ if measurable_count == 0 and not protection_violated:
43
+ return True, [
44
+ "No measurable target dimensions — deferring keep/undo "
45
+ "to agent musical judgment"
46
+ ]
47
+
48
+ # Rule 2: protection violated -> force undo
49
+ if protection_violated:
50
+ failures.append("HARD RULE: protected dimension violated")
51
+
52
+ # Rule 3: no measurable improvement -> force undo
53
+ if measurable_count > 0:
54
+ measurable_delta = goal_progress / max(measurable_count, 1)
55
+ if measurable_delta <= 0:
56
+ failures.append(
57
+ "HARD RULE: measurable delta <= 0 — no measurable improvement"
58
+ )
59
+
60
+ # Rule 4: score threshold -> force undo
61
+ if score < 0.40:
62
+ failures.append(
63
+ f"HARD RULE: total score {score:.3f} < 0.40 threshold"
64
+ )
65
+
66
+ keep_change = len(failures) == 0
67
+ return keep_change, failures
@@ -0,0 +1,53 @@
1
+ """Evaluation Fabric MCP tools — unified evaluation entry points.
2
+
3
+ Provides evaluate_with_fabric as a generic evaluation tool that routes
4
+ to the appropriate engine-specific evaluator via fabric.evaluate().
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from mcp.server.fastmcp import Context
10
+
11
+ from ..server import mcp
12
+ from ..tools._evaluation_contracts import EvaluationRequest, EvaluationResult
13
+ from ..tools._snapshot_normalizer import normalize_sonic_snapshot
14
+ from . import fabric
15
+
16
+
17
+ @mcp.tool()
18
+ def evaluate_with_fabric(
19
+ ctx: Context,
20
+ engine: str,
21
+ before_snapshot: dict,
22
+ after_snapshot: dict,
23
+ targets: dict | None = None,
24
+ protect: dict | None = None,
25
+ ) -> dict:
26
+ """Evaluate a move using the unified Evaluation Fabric.
27
+
28
+ Routes to the appropriate engine-specific evaluator.
29
+
30
+ Args:
31
+ engine: "sonic", "composition", "mix", "transition", or "translation"
32
+ before_snapshot: State before the move (format depends on engine)
33
+ after_snapshot: State after the move (format depends on engine)
34
+ targets: Goal targets — for sonic: {dimension: weight}, ignored for others
35
+ protect: Protected dimensions — for sonic: {dimension: threshold}
36
+
37
+ Returns:
38
+ EvaluationResult as dict with score, keep_change, goal_progress,
39
+ collateral_damage, dimension_changes, notes, etc.
40
+ """
41
+ targets = targets or {}
42
+ protect = protect or {}
43
+
44
+ request = EvaluationRequest(
45
+ engine=engine or "sonic",
46
+ goal={"targets": targets},
47
+ before=before_snapshot,
48
+ after=after_snapshot,
49
+ protect=protect,
50
+ )
51
+
52
+ result = fabric.evaluate(request)
53
+ return result.to_dict()
@@ -338,13 +338,15 @@ class M4LBridge:
338
338
  self.receiver = receiver
339
339
  self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
340
340
  self._m4l_addr = ("127.0.0.1", 9881)
341
- self._cmd_lock = asyncio.Lock()
341
+ self._cmd_lock: Optional[asyncio.Lock] = None
342
342
 
343
343
  async def send_command(self, command: str, *args: Any, timeout: float = 5.0) -> dict:
344
344
  """Send an OSC command to the M4L device and wait for the response."""
345
345
  if not self.cache.is_connected:
346
346
  return {"error": "LivePilot Analyzer not connected. Drop it on the master track."}
347
347
 
348
+ if self._cmd_lock is None:
349
+ self._cmd_lock = asyncio.Lock()
348
350
  async with self._cmd_lock:
349
351
  # Create a future for the response
350
352
  loop = asyncio.get_running_loop()
@@ -362,6 +364,10 @@ class M4LBridge:
362
364
  result = await asyncio.wait_for(future, timeout=timeout)
363
365
  return result
364
366
  except asyncio.TimeoutError:
367
+ # Clear the stale future so a delayed response doesn't resolve
368
+ # a future that no caller is waiting on
369
+ if self.receiver:
370
+ self.receiver.set_response_future(None)
365
371
  return {"error": "M4L bridge timeout — device may be busy or removed"}
366
372
 
367
373
  async def send_capture(self, command: str, *args: Any, timeout: float = 35.0) -> dict:
@@ -369,6 +375,8 @@ class M4LBridge:
369
375
  if not self.cache.is_connected:
370
376
  return {"error": "LivePilot Analyzer not connected. Drop it on the master track."}
371
377
 
378
+ if self._cmd_lock is None:
379
+ self._cmd_lock = asyncio.Lock()
372
380
  async with self._cmd_lock:
373
381
  # Cancel any stale capture future before creating a new one
374
382
  if self.receiver and self.receiver._capture_future and not self.receiver._capture_future.done():
@@ -1,5 +1,14 @@
1
- """LivePilot technique memorypersistent storage for learned patterns."""
1
+ """Memory Fabric V2extended memory with anti-memory, promotion, session memory."""
2
2
 
3
3
  from .technique_store import TechniqueStore
4
+ from .anti_memory import AntiMemoryStore, AntiPreference
5
+ from .promotion import PromotionCandidate, evaluate_promotion, batch_evaluate_promotions
4
6
 
5
- __all__ = ["TechniqueStore"]
7
+ __all__ = [
8
+ "TechniqueStore",
9
+ "AntiMemoryStore",
10
+ "AntiPreference",
11
+ "PromotionCandidate",
12
+ "evaluate_promotion",
13
+ "batch_evaluate_promotions",
14
+ ]
@@ -0,0 +1,78 @@
1
+ """AntiMemory — tracks user dislikes and anti-preferences.
2
+
3
+ Pure Python, zero I/O. Records dimensions the user repeatedly rejects
4
+ so that planners and critics can caution against repeating them.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import time
10
+ from dataclasses import dataclass, field
11
+
12
+
13
+ @dataclass
14
+ class AntiPreference:
15
+ """A single anti-preference: something the user dislikes."""
16
+
17
+ dimension: str # e.g. "brightness", "width", "density"
18
+ direction: str # "increase" or "decrease"
19
+ strength: float = 0.0 # 0-1, how strongly disliked
20
+ evidence_count: int = 0 # how many times undone/rejected
21
+ last_seen_ms: int = 0
22
+
23
+ def to_dict(self) -> dict:
24
+ return {
25
+ "dimension": self.dimension,
26
+ "direction": self.direction,
27
+ "strength": self.strength,
28
+ "evidence_count": self.evidence_count,
29
+ "last_seen_ms": self.last_seen_ms,
30
+ }
31
+
32
+
33
+ class AntiMemoryStore:
34
+ """In-memory store for anti-preferences."""
35
+
36
+ def __init__(self) -> None:
37
+ self._prefs: dict[tuple[str, str], AntiPreference] = {}
38
+
39
+ def record_dislike(self, dimension: str, direction: str) -> AntiPreference:
40
+ """Record or increment an anti-preference.
41
+
42
+ Strength grows with evidence but caps at 1.0.
43
+ """
44
+ key = (dimension, direction)
45
+ pref = self._prefs.get(key)
46
+ if pref is None:
47
+ pref = AntiPreference(dimension=dimension, direction=direction)
48
+ self._prefs[key] = pref
49
+
50
+ pref.evidence_count += 1
51
+ # Strength: asymptotic growth toward 1.0
52
+ pref.strength = min(1.0, pref.evidence_count * 0.2)
53
+ pref.last_seen_ms = int(time.time() * 1000)
54
+ return pref
55
+
56
+ def get_anti_preferences(self) -> list[AntiPreference]:
57
+ """Return all active anti-preferences."""
58
+ return list(self._prefs.values())
59
+
60
+ def get_anti_preference(
61
+ self, dimension: str, direction: str
62
+ ) -> AntiPreference | None:
63
+ """Return a specific anti-preference, or None."""
64
+ return self._prefs.get((dimension, direction))
65
+
66
+ def should_caution(self, dimension: str, direction: str) -> bool:
67
+ """True if evidence_count >= 2 for the given dimension+direction."""
68
+ pref = self._prefs.get((dimension, direction))
69
+ if pref is None:
70
+ return False
71
+ return pref.evidence_count >= 2
72
+
73
+ def to_dict(self) -> dict:
74
+ """Serialize the full store."""
75
+ return {
76
+ "anti_preferences": [p.to_dict() for p in self._prefs.values()],
77
+ "count": len(self._prefs),
78
+ }
@@ -0,0 +1,94 @@
1
+ """Promotion rules — decide which ledger entries deserve long-term memory.
2
+
3
+ Pure Python, zero I/O. Evaluates LedgerEntry dicts against promotion
4
+ criteria and returns structured candidates.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from dataclasses import dataclass, field
10
+
11
+
12
+ @dataclass
13
+ class PromotionCandidate:
14
+ """A ledger entry evaluated for memory promotion."""
15
+
16
+ ledger_entry_id: str
17
+ engine: str
18
+ intent: str
19
+ score: float
20
+ dimension_improvements: dict = field(default_factory=dict)
21
+ eligible: bool = False
22
+ reason: str = ""
23
+
24
+ def to_dict(self) -> dict:
25
+ return {
26
+ "ledger_entry_id": self.ledger_entry_id,
27
+ "engine": self.engine,
28
+ "intent": self.intent,
29
+ "score": self.score,
30
+ "dimension_improvements": dict(self.dimension_improvements),
31
+ "eligible": self.eligible,
32
+ "reason": self.reason,
33
+ }
34
+
35
+
36
+ def evaluate_promotion(entry_dict: dict) -> PromotionCandidate:
37
+ """Evaluate a single ledger entry dict for memory promotion.
38
+
39
+ Rules:
40
+ - must be kept (kept=True)
41
+ - score >= 0.6
42
+ - at least one dimension improvement > 0.05
43
+ - non-empty intent
44
+ """
45
+ entry_id = entry_dict.get("id", "unknown")
46
+ engine = entry_dict.get("engine", "")
47
+ intent = entry_dict.get("intent", "")
48
+ score = entry_dict.get("score", 0.0)
49
+ kept = entry_dict.get("kept", False)
50
+
51
+ # Extract dimension improvements from evaluation sub-dict
52
+ evaluation = entry_dict.get("evaluation", {})
53
+ dimension_improvements = evaluation.get("dimension_improvements", {})
54
+
55
+ candidate = PromotionCandidate(
56
+ ledger_entry_id=entry_id,
57
+ engine=engine,
58
+ intent=intent,
59
+ score=score,
60
+ dimension_improvements=dimension_improvements,
61
+ )
62
+
63
+ # Rule 1: must be kept
64
+ if not kept:
65
+ candidate.reason = "not kept — entry was undone or rejected"
66
+ return candidate
67
+
68
+ # Rule 2: score threshold
69
+ if score < 0.6:
70
+ candidate.reason = f"score too low ({score:.2f} < 0.60)"
71
+ return candidate
72
+
73
+ # Rule 3: non-empty intent
74
+ if not intent or not intent.strip():
75
+ candidate.reason = "empty intent — no semantic goal recorded"
76
+ return candidate
77
+
78
+ # Rule 4: at least one meaningful dimension improvement
79
+ has_improvement = any(v > 0.05 for v in dimension_improvements.values())
80
+ if not has_improvement:
81
+ candidate.reason = "no dimension improvement > 0.05"
82
+ return candidate
83
+
84
+ # All rules pass
85
+ candidate.eligible = True
86
+ candidate.reason = "meets all promotion criteria"
87
+ return candidate
88
+
89
+
90
+ def batch_evaluate_promotions(entries: list[dict]) -> list[PromotionCandidate]:
91
+ """Evaluate multiple entries, return only eligible ones."""
92
+ return [
93
+ c for c in (evaluate_promotion(e) for e in entries) if c.eligible
94
+ ]
@@ -0,0 +1,108 @@
1
+ """SessionMemory — ephemeral per-session observations, hypotheses, decisions.
2
+
3
+ Pure Python, zero I/O. Tracks what happened *this* session so that engines
4
+ can reference recent context without polluting long-term memory.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import time
10
+ import uuid
11
+ from dataclasses import dataclass, field
12
+ from typing import Optional
13
+
14
+ _VALID_CATEGORIES = {"observation", "hypothesis", "decision", "issue"}
15
+
16
+
17
+ @dataclass
18
+ class SessionMemoryEntry:
19
+ """Ephemeral per-session memory — what happened this session."""
20
+
21
+ id: str
22
+ timestamp_ms: int
23
+ category: str # "observation", "hypothesis", "decision", "issue"
24
+ content: str
25
+ engine: str # which engine created this
26
+ confidence: float
27
+ related_tracks: list[int] = field(default_factory=list)
28
+ expires_with_session: bool = True
29
+
30
+ def to_dict(self) -> dict:
31
+ return {
32
+ "id": self.id,
33
+ "timestamp_ms": self.timestamp_ms,
34
+ "category": self.category,
35
+ "content": self.content,
36
+ "engine": self.engine,
37
+ "confidence": self.confidence,
38
+ "related_tracks": list(self.related_tracks),
39
+ "expires_with_session": self.expires_with_session,
40
+ }
41
+
42
+
43
+ class SessionMemoryStore:
44
+ """In-memory store for session-scoped observations and decisions."""
45
+
46
+ def __init__(self) -> None:
47
+ self._entries: list[SessionMemoryEntry] = []
48
+
49
+ def add(
50
+ self,
51
+ category: str,
52
+ content: str,
53
+ engine: str,
54
+ confidence: float = 0.5,
55
+ tracks: Optional[list[int]] = None,
56
+ ) -> str:
57
+ """Add a session memory entry. Returns the new entry id."""
58
+ if category not in _VALID_CATEGORIES:
59
+ raise ValueError(
60
+ f"category must be one of {_VALID_CATEGORIES}, got {category!r}"
61
+ )
62
+ confidence = max(0.0, min(1.0, confidence))
63
+
64
+ entry = SessionMemoryEntry(
65
+ id=f"smem_{uuid.uuid4().hex[:8]}",
66
+ timestamp_ms=int(time.time() * 1000),
67
+ category=category,
68
+ content=content,
69
+ engine=engine,
70
+ confidence=confidence,
71
+ related_tracks=list(tracks) if tracks else [],
72
+ )
73
+ self._entries.append(entry)
74
+ return entry.id
75
+
76
+ def get_recent(
77
+ self,
78
+ limit: int = 10,
79
+ category: Optional[str] = None,
80
+ engine: Optional[str] = None,
81
+ ) -> list[SessionMemoryEntry]:
82
+ """Return the most recent entries, optionally filtered."""
83
+ filtered = self._entries
84
+ if category:
85
+ filtered = [e for e in filtered if e.category == category]
86
+ if engine:
87
+ filtered = [e for e in filtered if e.engine == engine]
88
+ # Most recent first
89
+ return list(reversed(filtered))[:limit]
90
+
91
+ def get_by_tracks(self, track_indices: list[int]) -> list[SessionMemoryEntry]:
92
+ """Return entries related to any of the given track indices."""
93
+ idx_set = set(track_indices)
94
+ return [
95
+ e for e in self._entries
96
+ if idx_set.intersection(e.related_tracks)
97
+ ]
98
+
99
+ def clear(self) -> None:
100
+ """Wipe all session memory."""
101
+ self._entries.clear()
102
+
103
+ def to_dict(self) -> dict:
104
+ """Serialize the full store."""
105
+ return {
106
+ "entries": [e.to_dict() for e in self._entries],
107
+ "count": len(self._entries),
108
+ }
@@ -0,0 +1,158 @@
1
+ """TasteMemory — extended taste tracking beyond quality dimensions.
2
+
3
+ Pure Python, zero I/O. Infers user taste from kept/undone outcomes
4
+ across 8 production dimensions so that planners can bias toward preferences.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import time
10
+ from dataclasses import dataclass
11
+ from typing import Optional
12
+
13
+
14
+ EXTENDED_TASTE_DIMENSIONS = [
15
+ "transition_boldness",
16
+ "automation_density",
17
+ "dryness_preference",
18
+ "harmonic_boldness",
19
+ "width_preference",
20
+ "native_vs_plugin",
21
+ "density_tolerance",
22
+ "fx_intensity",
23
+ ]
24
+
25
+ # Maps outcome signals to taste dimension adjustments.
26
+ # Each key is a dimension name; values map (outcome_signal -> adjustment).
27
+ _OUTCOME_SIGNALS: dict[str, dict[str, float]] = {
28
+ "transition_boldness": {
29
+ "bold_transition_kept": 0.15,
30
+ "bold_transition_undone": -0.15,
31
+ "subtle_transition_kept": -0.10,
32
+ "subtle_transition_undone": 0.10,
33
+ },
34
+ "automation_density": {
35
+ "dense_automation_kept": 0.12,
36
+ "dense_automation_undone": -0.12,
37
+ "sparse_automation_kept": -0.08,
38
+ },
39
+ "dryness_preference": {
40
+ "dry_mix_kept": 0.15,
41
+ "dry_mix_undone": -0.15,
42
+ "wet_mix_kept": -0.12,
43
+ "wet_mix_undone": 0.12,
44
+ },
45
+ "harmonic_boldness": {
46
+ "bold_harmony_kept": 0.15,
47
+ "bold_harmony_undone": -0.15,
48
+ "safe_harmony_kept": -0.10,
49
+ },
50
+ "width_preference": {
51
+ "wide_mix_kept": 0.12,
52
+ "wide_mix_undone": -0.12,
53
+ "narrow_mix_kept": -0.10,
54
+ },
55
+ "native_vs_plugin": {
56
+ "native_device_kept": 0.10,
57
+ "plugin_kept": -0.10,
58
+ },
59
+ "density_tolerance": {
60
+ "dense_arrangement_kept": 0.12,
61
+ "dense_arrangement_undone": -0.12,
62
+ "sparse_arrangement_kept": -0.08,
63
+ },
64
+ "fx_intensity": {
65
+ "heavy_fx_kept": 0.15,
66
+ "heavy_fx_undone": -0.15,
67
+ "light_fx_kept": -0.10,
68
+ "light_fx_undone": 0.10,
69
+ },
70
+ }
71
+
72
+
73
+ @dataclass
74
+ class TasteDimension:
75
+ """Extended taste tracking beyond quality dimensions."""
76
+
77
+ name: str # e.g. "transition_boldness", "automation_density"
78
+ value: float # -1 to 1 (negative=prefers less, positive=prefers more)
79
+ evidence_count: int
80
+ last_updated_ms: int
81
+
82
+ def to_dict(self) -> dict:
83
+ return {
84
+ "name": self.name,
85
+ "value": round(self.value, 3),
86
+ "evidence_count": self.evidence_count,
87
+ "last_updated_ms": self.last_updated_ms,
88
+ }
89
+
90
+
91
+ class TasteMemoryStore:
92
+ """In-memory store for taste dimensions inferred from outcomes."""
93
+
94
+ def __init__(self) -> None:
95
+ self._dims: dict[str, TasteDimension] = {}
96
+ # Initialize all known dimensions at neutral
97
+ for name in EXTENDED_TASTE_DIMENSIONS:
98
+ self._dims[name] = TasteDimension(
99
+ name=name, value=0.0, evidence_count=0, last_updated_ms=0
100
+ )
101
+
102
+ def update_from_outcome(self, outcome: dict) -> None:
103
+ """Infer taste dimensions from a kept/undone outcome dict.
104
+
105
+ The outcome dict should contain:
106
+ - kept: bool
107
+ - signals: list[str] — e.g. ["bold_transition_kept", "wide_mix_kept"]
108
+ """
109
+ signals = outcome.get("signals", [])
110
+ if not signals:
111
+ return
112
+
113
+ now_ms = int(time.time() * 1000)
114
+
115
+ for signal in signals:
116
+ for dim_name, signal_map in _OUTCOME_SIGNALS.items():
117
+ adj = signal_map.get(signal)
118
+ if adj is not None:
119
+ dim = self._dims.get(dim_name)
120
+ if dim is None:
121
+ dim = TasteDimension(
122
+ name=dim_name, value=0.0,
123
+ evidence_count=0, last_updated_ms=0,
124
+ )
125
+ self._dims[dim_name] = dim
126
+ dim.value = max(-1.0, min(1.0, dim.value + adj))
127
+ dim.evidence_count += 1
128
+ dim.last_updated_ms = now_ms
129
+
130
+ def get_taste_dimensions(self) -> list[TasteDimension]:
131
+ """Return all taste dimensions."""
132
+ return list(self._dims.values())
133
+
134
+ def get_dimension(self, name: str) -> Optional[TasteDimension]:
135
+ """Return a specific taste dimension, or None."""
136
+ return self._dims.get(name)
137
+
138
+ def should_prefer(self, dimension: str, direction: str) -> bool:
139
+ """True if evidence suggests the user prefers this direction.
140
+
141
+ direction: "more" or "less"
142
+ Returns True only if evidence_count >= 2 and value agrees.
143
+ """
144
+ dim = self._dims.get(dimension)
145
+ if dim is None or dim.evidence_count < 2:
146
+ return False
147
+ if direction == "more":
148
+ return dim.value > 0.1
149
+ elif direction == "less":
150
+ return dim.value < -0.1
151
+ return False
152
+
153
+ def to_dict(self) -> dict:
154
+ """Serialize the full store."""
155
+ return {
156
+ "dimensions": [d.to_dict() for d in self._dims.values()],
157
+ "count": len(self._dims),
158
+ }
@@ -11,7 +11,8 @@ from typing import Any, Optional
11
11
 
12
12
 
13
13
  VALID_TYPES = frozenset(
14
- ["beat_pattern", "device_chain", "mix_template", "browser_pin", "preference"]
14
+ ["beat_pattern", "device_chain", "mix_template", "browser_pin", "preference",
15
+ "outcome", "composition_outcome", "technique_card"]
15
16
  )
16
17
 
17
18
  VALID_SORT_FIELDS = frozenset(
@@ -36,28 +37,35 @@ class TechniqueStore:
36
37
 
37
38
  Deferred so that a read-only HOME doesn't crash the entire MCP
38
39
  server at import time — memory tools just return errors instead.
40
+ Thread-safe: uses double-checked locking to prevent concurrent
41
+ callers from racing on initialization.
39
42
  """
40
43
  if self._initialized:
41
44
  return
42
- try:
43
- self._base_dir.mkdir(parents=True, exist_ok=True)
44
- except OSError as exc:
45
- raise RuntimeError(
46
- f"Cannot create memory directory {self._base_dir}: {exc}. "
47
- "Memory tools are unavailable."
48
- ) from exc
49
- if self._file.exists():
45
+ with self._lock:
46
+ # Double-check after acquiring lock — another thread may have
47
+ # initialized while we were waiting.
48
+ if self._initialized:
49
+ return
50
50
  try:
51
- with open(self._file, "r") as f:
52
- self._data = json.load(f)
53
- except (json.JSONDecodeError, ValueError):
54
- corrupt = self._file.with_suffix(".json.corrupt")
55
- self._file.rename(corrupt)
51
+ self._base_dir.mkdir(parents=True, exist_ok=True)
52
+ except OSError as exc:
53
+ raise RuntimeError(
54
+ f"Cannot create memory directory {self._base_dir}: {exc}. "
55
+ "Memory tools are unavailable."
56
+ ) from exc
57
+ if self._file.exists():
58
+ try:
59
+ with open(self._file, "r") as f:
60
+ self._data = json.load(f)
61
+ except (json.JSONDecodeError, ValueError):
62
+ corrupt = self._file.with_suffix(".json.corrupt")
63
+ self._file.rename(corrupt)
64
+ self._data = {"version": 1, "techniques": []}
65
+ else:
56
66
  self._data = {"version": 1, "techniques": []}
57
- else:
58
- self._data = {"version": 1, "techniques": []}
59
- self._flush()
60
- self._initialized = True
67
+ self._flush()
68
+ self._initialized = True
61
69
 
62
70
  # ── persistence ──────────────────────────────────────────────
63
71
 
@@ -262,6 +270,7 @@ class TechniqueStore:
262
270
 
263
271
  def increment_replay(self, technique_id: str) -> None:
264
272
  """Increment replay_count and set last_replayed_at."""
273
+ self._ensure_initialized()
265
274
  with self._lock:
266
275
  t = self._find(technique_id)
267
276
  t["replay_count"] = t.get("replay_count", 0) + 1