livepilot 1.9.13 → 1.9.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/.claude-plugin/marketplace.json +3 -3
  2. package/AGENTS.md +3 -3
  3. package/CHANGELOG.md +51 -0
  4. package/CONTRIBUTING.md +1 -1
  5. package/README.md +7 -7
  6. package/bin/livepilot.js +32 -8
  7. package/installer/install.js +21 -2
  8. package/livepilot/.Codex-plugin/plugin.json +2 -2
  9. package/livepilot/.claude-plugin/plugin.json +2 -2
  10. package/livepilot/agents/livepilot-producer/AGENT.md +243 -49
  11. package/livepilot/skills/livepilot-core/SKILL.md +81 -6
  12. package/livepilot/skills/livepilot-core/references/m4l-devices.md +2 -2
  13. package/livepilot/skills/livepilot-core/references/overview.md +3 -3
  14. package/livepilot/skills/livepilot-core/references/sound-design.md +3 -2
  15. package/livepilot/skills/livepilot-release/SKILL.md +13 -13
  16. package/m4l_device/LivePilot_Analyzer.amxd +0 -0
  17. package/m4l_device/livepilot_bridge.js +6 -3
  18. package/mcp_server/__init__.py +1 -1
  19. package/mcp_server/curves.py +11 -3
  20. package/mcp_server/evaluation/__init__.py +1 -0
  21. package/mcp_server/evaluation/fabric.py +575 -0
  22. package/mcp_server/evaluation/feature_extractors.py +84 -0
  23. package/mcp_server/evaluation/policy.py +67 -0
  24. package/mcp_server/evaluation/tools.py +53 -0
  25. package/mcp_server/memory/__init__.py +11 -2
  26. package/mcp_server/memory/anti_memory.py +78 -0
  27. package/mcp_server/memory/promotion.py +94 -0
  28. package/mcp_server/memory/session_memory.py +108 -0
  29. package/mcp_server/memory/taste_memory.py +158 -0
  30. package/mcp_server/memory/technique_store.py +2 -1
  31. package/mcp_server/memory/tools.py +112 -0
  32. package/mcp_server/mix_engine/__init__.py +1 -0
  33. package/mcp_server/mix_engine/critics.py +299 -0
  34. package/mcp_server/mix_engine/models.py +152 -0
  35. package/mcp_server/mix_engine/planner.py +103 -0
  36. package/mcp_server/mix_engine/state_builder.py +316 -0
  37. package/mcp_server/mix_engine/tools.py +214 -0
  38. package/mcp_server/performance_engine/__init__.py +1 -0
  39. package/mcp_server/performance_engine/models.py +148 -0
  40. package/mcp_server/performance_engine/planner.py +267 -0
  41. package/mcp_server/performance_engine/safety.py +162 -0
  42. package/mcp_server/performance_engine/tools.py +183 -0
  43. package/mcp_server/project_brain/__init__.py +6 -0
  44. package/mcp_server/project_brain/arrangement_graph.py +64 -0
  45. package/mcp_server/project_brain/automation_graph.py +72 -0
  46. package/mcp_server/project_brain/builder.py +123 -0
  47. package/mcp_server/project_brain/capability_graph.py +64 -0
  48. package/mcp_server/project_brain/models.py +282 -0
  49. package/mcp_server/project_brain/refresh.py +80 -0
  50. package/mcp_server/project_brain/role_graph.py +103 -0
  51. package/mcp_server/project_brain/session_graph.py +51 -0
  52. package/mcp_server/project_brain/tools.py +144 -0
  53. package/mcp_server/reference_engine/__init__.py +1 -0
  54. package/mcp_server/reference_engine/gap_analyzer.py +239 -0
  55. package/mcp_server/reference_engine/models.py +105 -0
  56. package/mcp_server/reference_engine/profile_builder.py +149 -0
  57. package/mcp_server/reference_engine/tactic_router.py +117 -0
  58. package/mcp_server/reference_engine/tools.py +235 -0
  59. package/mcp_server/runtime/__init__.py +1 -0
  60. package/mcp_server/runtime/action_ledger.py +117 -0
  61. package/mcp_server/runtime/action_ledger_models.py +84 -0
  62. package/mcp_server/runtime/action_tools.py +57 -0
  63. package/mcp_server/runtime/capability_state.py +218 -0
  64. package/mcp_server/runtime/safety_kernel.py +339 -0
  65. package/mcp_server/runtime/safety_tools.py +42 -0
  66. package/mcp_server/runtime/tools.py +64 -0
  67. package/mcp_server/server.py +23 -1
  68. package/mcp_server/sound_design/__init__.py +1 -0
  69. package/mcp_server/sound_design/critics.py +297 -0
  70. package/mcp_server/sound_design/models.py +147 -0
  71. package/mcp_server/sound_design/planner.py +104 -0
  72. package/mcp_server/sound_design/tools.py +297 -0
  73. package/mcp_server/tools/_agent_os_engine.py +947 -0
  74. package/mcp_server/tools/_composition_engine.py +1530 -0
  75. package/mcp_server/tools/_conductor.py +199 -0
  76. package/mcp_server/tools/_conductor_budgets.py +222 -0
  77. package/mcp_server/tools/_evaluation_contracts.py +91 -0
  78. package/mcp_server/tools/_form_engine.py +416 -0
  79. package/mcp_server/tools/_motif_engine.py +351 -0
  80. package/mcp_server/tools/_planner_engine.py +516 -0
  81. package/mcp_server/tools/_research_engine.py +542 -0
  82. package/mcp_server/tools/_research_provider.py +185 -0
  83. package/mcp_server/tools/_snapshot_normalizer.py +49 -0
  84. package/mcp_server/tools/agent_os.py +440 -0
  85. package/mcp_server/tools/analyzer.py +18 -0
  86. package/mcp_server/tools/automation.py +25 -10
  87. package/mcp_server/tools/composition.py +563 -0
  88. package/mcp_server/tools/motif.py +104 -0
  89. package/mcp_server/tools/planner.py +144 -0
  90. package/mcp_server/tools/research.py +223 -0
  91. package/mcp_server/tools/tracks.py +18 -3
  92. package/mcp_server/tools/transport.py +10 -2
  93. package/mcp_server/transition_engine/__init__.py +6 -0
  94. package/mcp_server/transition_engine/archetypes.py +167 -0
  95. package/mcp_server/transition_engine/critics.py +340 -0
  96. package/mcp_server/transition_engine/models.py +90 -0
  97. package/mcp_server/transition_engine/tools.py +291 -0
  98. package/mcp_server/translation_engine/__init__.py +5 -0
  99. package/mcp_server/translation_engine/critics.py +297 -0
  100. package/mcp_server/translation_engine/models.py +27 -0
  101. package/mcp_server/translation_engine/tools.py +74 -0
  102. package/package.json +2 -2
  103. package/remote_script/LivePilot/__init__.py +1 -1
  104. package/remote_script/LivePilot/arrangement.py +12 -2
  105. package/requirements.txt +1 -1
@@ -0,0 +1,542 @@
1
+ """Research Engine — targeted and deep research synthesis for production techniques.
2
+
3
+ Searches the device atlas, technique memory, and optionally web sources to answer
4
+ production questions. Synthesizes findings into structured TechniqueCards.
5
+
6
+ Zero external dependencies beyond stdlib. The MCP tool wrappers in research.py
7
+ handle data fetching; this module handles synthesis and ranking.
8
+
9
+ Design: spec at docs/specs/2026-04-08-phase2-4-roadmap.md, Round 3 (3.1, 3.2).
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import re
15
+ from dataclasses import asdict, dataclass, field
16
+ from typing import Any, Optional
17
+
18
+ from ._agent_os_engine import TechniqueCard
19
+
20
+
21
+ # ── Research Results ─────────────────────────────────────────────────
22
+
23
+ @dataclass
24
+ class ResearchFinding:
25
+ """A single finding from a research source."""
26
+ source_type: str # "device_atlas", "memory", "web", "corpus"
27
+ source_id: str # e.g., "Wavetable", "mem_042", "url:..."
28
+ relevance: float # 0-1, how relevant to the query
29
+ content: str # the actual finding text
30
+ metadata: dict = field(default_factory=dict) # source-specific extras
31
+
32
+ def to_dict(self) -> dict:
33
+ return asdict(self)
34
+
35
+
36
+ @dataclass
37
+ class ResearchResult:
38
+ """Aggregated research output."""
39
+ query: str
40
+ scope: str # "targeted" or "deep"
41
+ findings: list[ResearchFinding] = field(default_factory=list)
42
+ technique_card: Optional[TechniqueCard] = None
43
+ confidence: float = 0.0 # 0-1, overall confidence in findings
44
+ sources_searched: list[str] = field(default_factory=list)
45
+
46
+ def to_dict(self) -> dict:
47
+ d = asdict(self)
48
+ # TechniqueCard.to_dict() for cleaner output
49
+ if self.technique_card:
50
+ d["technique_card"] = self.technique_card.to_dict()
51
+ d["finding_count"] = len(self.findings)
52
+ return d
53
+
54
+
55
+ # ── Query Analysis ───────────────────────────────────────────────────
56
+
57
+ # Common production technique keywords → likely device families
58
+ _TECHNIQUE_KEYWORDS: dict[str, list[str]] = {
59
+ "sidechain": ["Compressor", "Glue Compressor", "Auto Filter"],
60
+ "reverb": ["Reverb", "Convolution Reverb", "Hybrid Reverb"],
61
+ "delay": ["Delay", "Echo", "Filter Delay", "Grain Delay"],
62
+ "distortion": ["Saturator", "Overdrive", "Pedal", "Amp"],
63
+ "eq": ["EQ Eight", "EQ Three", "Channel EQ"],
64
+ "compress": ["Compressor", "Glue Compressor", "Multiband Dynamics"],
65
+ "filter": ["Auto Filter", "EQ Eight"],
66
+ "modulation": ["LFO", "Chorus-Ensemble", "Phaser-Flanger"],
67
+ "granular": ["Granulator II", "Grain Delay"],
68
+ "sampling": ["Simpler", "Sampler", "Drum Rack"],
69
+ "synthesis": ["Wavetable", "Operator", "Analog", "Drift"],
70
+ "bass": ["Operator", "Analog", "Wavetable", "Saturator"],
71
+ "pad": ["Wavetable", "Drift", "Reverb", "Chorus-Ensemble"],
72
+ "drums": ["Drum Rack", "Simpler", "Compressor", "Saturator"],
73
+ "vocals": ["Compressor", "EQ Eight", "Reverb", "Delay"],
74
+ "mastering": ["Multiband Dynamics", "Limiter", "EQ Eight", "Utility"],
75
+ "width": ["Utility", "Chorus-Ensemble", "Haas", "Mid/Side"],
76
+ "warmth": ["Saturator", "Amp", "Analog"],
77
+ "space": ["Reverb", "Convolution Reverb", "Delay"],
78
+ "glue": ["Glue Compressor", "Saturator", "Bus compression"],
79
+ }
80
+
81
+
82
+ def analyze_query(query: str) -> dict:
83
+ """Analyze a research query to extract intent, keywords, and likely devices.
84
+
85
+ Returns: {keywords, likely_devices, technique_category, specificity}
86
+ """
87
+ lower = query.lower().strip()
88
+ words = re.findall(r'\b[a-z]+\b', lower)
89
+
90
+ # Match technique keywords
91
+ matched_devices: list[str] = []
92
+ matched_categories: list[str] = []
93
+
94
+ for keyword, devices in _TECHNIQUE_KEYWORDS.items():
95
+ if keyword in lower:
96
+ matched_devices.extend(devices)
97
+ matched_categories.append(keyword)
98
+
99
+ # Deduplicate
100
+ matched_devices = list(dict.fromkeys(matched_devices))
101
+
102
+ # Specificity: how detailed is the query?
103
+ specificity = min(1.0, len(words) / 10.0)
104
+ if matched_categories:
105
+ specificity = min(1.0, specificity + 0.2)
106
+
107
+ return {
108
+ "keywords": words,
109
+ "likely_devices": matched_devices[:5],
110
+ "technique_categories": matched_categories,
111
+ "specificity": round(specificity, 2),
112
+ }
113
+
114
+
115
+ # ── Finding Ranking ──────────────────────────────────────────────────
116
+
117
+ def _score_finding_relevance(finding_text: str, query_words: list[str]) -> float:
118
+ """Score how relevant a finding is to the query keywords."""
119
+ if not query_words or not finding_text:
120
+ return 0.0
121
+
122
+ lower_text = finding_text.lower()
123
+ matches = sum(1 for w in query_words if w in lower_text)
124
+ return min(1.0, matches / max(len(query_words), 1))
125
+
126
+
127
+ def rank_findings(findings: list[ResearchFinding]) -> list[ResearchFinding]:
128
+ """Sort findings by relevance, deduplicating low-value entries."""
129
+ # Sort by relevance descending
130
+ ranked = sorted(findings, key=lambda f: -f.relevance)
131
+
132
+ # Deduplicate: skip findings that are too similar to a higher-ranked one
133
+ seen_content: set[str] = set()
134
+ deduped = []
135
+ for f in ranked:
136
+ # Simple dedup: check if first 50 chars already seen
137
+ sig = f.content[:50].lower().strip()
138
+ if sig not in seen_content:
139
+ deduped.append(f)
140
+ seen_content.add(sig)
141
+
142
+ return deduped[:10] # Cap at 10 findings
143
+
144
+
145
+ # ── Targeted Research ────────────────────────────────────────────────
146
+
147
+ def targeted_research(
148
+ query: str,
149
+ device_atlas_results: list[dict],
150
+ memory_results: list[dict],
151
+ corpus_results: Optional[list[dict]] = None,
152
+ ) -> ResearchResult:
153
+ """Synthesize targeted research from device atlas + memory.
154
+
155
+ device_atlas_results: list of device reference entries (from get_device_reference)
156
+ memory_results: list of technique memories (from memory_list)
157
+ corpus_results: optional additional reference entries
158
+
159
+ Returns: ResearchResult with ranked findings and synthesized technique card.
160
+ """
161
+ query_info = analyze_query(query)
162
+ findings: list[ResearchFinding] = []
163
+ sources_searched = []
164
+
165
+ # 1. Device atlas findings
166
+ if device_atlas_results:
167
+ sources_searched.append("device_atlas")
168
+ for entry in device_atlas_results:
169
+ name = entry.get("name", "")
170
+ text = _format_device_finding(entry)
171
+ relevance = _score_finding_relevance(text, query_info["keywords"])
172
+
173
+ # Boost relevance if device was in our predicted list
174
+ if name in query_info["likely_devices"]:
175
+ relevance = min(1.0, relevance + 0.3)
176
+
177
+ findings.append(ResearchFinding(
178
+ source_type="device_atlas",
179
+ source_id=name,
180
+ relevance=round(relevance, 3),
181
+ content=text,
182
+ metadata={"device_name": name, "category": entry.get("category", "")},
183
+ ))
184
+
185
+ # 2. Memory findings (technique cards, outcomes, research notes)
186
+ if memory_results:
187
+ sources_searched.append("memory")
188
+ for mem in memory_results:
189
+ payload = mem.get("payload", {})
190
+ if isinstance(payload, dict):
191
+ text = _format_memory_finding(mem)
192
+ relevance = _score_finding_relevance(text, query_info["keywords"])
193
+
194
+ # Boost technique cards (more structured = more useful)
195
+ mem_type = mem.get("type", "")
196
+ if mem_type == "technique_card":
197
+ relevance = min(1.0, relevance + 0.2)
198
+
199
+ findings.append(ResearchFinding(
200
+ source_type="memory",
201
+ source_id=mem.get("id", "unknown"),
202
+ relevance=round(relevance, 3),
203
+ content=text,
204
+ metadata={"memory_type": mem_type},
205
+ ))
206
+
207
+ # 3. Corpus findings (additional references)
208
+ if corpus_results:
209
+ sources_searched.append("corpus")
210
+ for entry in corpus_results:
211
+ text = entry.get("content", entry.get("text", str(entry)))
212
+ relevance = _score_finding_relevance(text, query_info["keywords"])
213
+ findings.append(ResearchFinding(
214
+ source_type="corpus",
215
+ source_id=entry.get("id", "corpus"),
216
+ relevance=round(relevance, 3),
217
+ content=text[:500], # Cap length
218
+ ))
219
+
220
+ # Rank and deduplicate
221
+ ranked = rank_findings(findings)
222
+
223
+ # Synthesize technique card from top findings
224
+ card = _synthesize_technique_card(query, ranked, query_info)
225
+
226
+ # Overall confidence
227
+ if not ranked:
228
+ confidence = 0.0
229
+ else:
230
+ top_relevances = [f.relevance for f in ranked[:3]]
231
+ confidence = sum(top_relevances) / len(top_relevances)
232
+
233
+ return ResearchResult(
234
+ query=query,
235
+ scope="targeted",
236
+ findings=ranked,
237
+ technique_card=card,
238
+ confidence=round(confidence, 3),
239
+ sources_searched=sources_searched,
240
+ )
241
+
242
+
243
+ def _format_device_finding(entry: dict) -> str:
244
+ """Format a device atlas entry into a readable finding."""
245
+ name = entry.get("name", "Unknown")
246
+ category = entry.get("category", "")
247
+ description = entry.get("description", "")
248
+ params = entry.get("key_parameters", entry.get("parameters", []))
249
+
250
+ parts = [f"Device: {name}"]
251
+ if category:
252
+ parts.append(f"Category: {category}")
253
+ if description:
254
+ parts.append(description[:200])
255
+ if params and isinstance(params, list):
256
+ param_names = [p.get("name", p) if isinstance(p, dict) else str(p) for p in params[:5]]
257
+ parts.append(f"Key params: {', '.join(param_names)}")
258
+
259
+ return " | ".join(parts)
260
+
261
+
262
+ def _format_memory_finding(mem: dict) -> str:
263
+ """Format a memory entry into a readable finding."""
264
+ mem_type = mem.get("type", "unknown")
265
+ payload = mem.get("payload", {})
266
+
267
+ if mem_type == "technique_card":
268
+ problem = payload.get("problem", "")
269
+ method = payload.get("method", "")
270
+ devices = payload.get("devices", [])
271
+ return f"Technique: {problem} | Method: {method} | Devices: {', '.join(devices)}"
272
+ elif mem_type == "outcome":
273
+ move = payload.get("move", {})
274
+ score = payload.get("score", 0)
275
+ move_name = move.get("name", "unknown") if isinstance(move, dict) else str(move)
276
+ return f"Outcome: {move_name} (score: {score:.2f})"
277
+ else:
278
+ # Research or note
279
+ content = payload.get("content", payload.get("text", str(payload)))
280
+ if isinstance(content, str):
281
+ return content[:300]
282
+ return str(content)[:300]
283
+
284
+
285
+ def _synthesize_technique_card(
286
+ query: str,
287
+ findings: list[ResearchFinding],
288
+ query_info: dict,
289
+ ) -> Optional[TechniqueCard]:
290
+ """Synthesize a technique card from research findings."""
291
+ if not findings:
292
+ return None
293
+
294
+ # Collect devices from findings
295
+ devices: list[str] = []
296
+ for f in findings:
297
+ if f.source_type == "device_atlas":
298
+ dev = f.metadata.get("device_name", "")
299
+ if dev and dev not in devices:
300
+ devices.append(dev)
301
+ elif f.source_type == "memory" and f.metadata.get("memory_type") == "technique_card":
302
+ # Pull devices from technique card memories
303
+ pass # Already in the finding content
304
+
305
+ # Also include query-predicted devices
306
+ for d in query_info.get("likely_devices", []):
307
+ if d not in devices:
308
+ devices.append(d)
309
+
310
+ # Build method from top findings
311
+ method_parts = []
312
+ for f in findings[:3]:
313
+ if f.relevance >= 0.3:
314
+ method_parts.append(f.content[:150])
315
+
316
+ method = " → ".join(method_parts) if method_parts else f"Research findings for: {query}"
317
+
318
+ # Build verification from technique categories
319
+ verification = []
320
+ for cat in query_info.get("technique_categories", []):
321
+ verification.append(f"Check {cat} results with analyzer")
322
+
323
+ if not verification:
324
+ verification = ["Listen for intended effect", "Compare before/after with analyzer"]
325
+
326
+ return TechniqueCard(
327
+ problem=query,
328
+ context=query_info.get("technique_categories", []),
329
+ devices=devices[:5],
330
+ method=method,
331
+ verification=verification,
332
+ evidence={"scope": "targeted", "finding_count": len(findings)},
333
+ )
334
+
335
+
336
+ # ── Deep Research ────────────────────────────────────────────────────
337
+
338
+ def deep_research(
339
+ query: str,
340
+ web_results: list[dict],
341
+ device_atlas_results: list[dict],
342
+ memory_results: list[dict],
343
+ corpus_results: Optional[list[dict]] = None,
344
+ ) -> ResearchResult:
345
+ """Multi-source synthesis: targeted sources + web search results.
346
+
347
+ web_results: list of {url, title, snippet} from web search
348
+ Other params same as targeted_research.
349
+
350
+ Returns: ResearchResult with deeper analysis and multiple technique cards.
351
+ """
352
+ # Start with targeted research
353
+ targeted = targeted_research(query, device_atlas_results, memory_results, corpus_results)
354
+
355
+ query_info = analyze_query(query)
356
+ sources_searched = list(targeted.sources_searched)
357
+
358
+ # Add web findings
359
+ web_findings: list[ResearchFinding] = []
360
+ if web_results:
361
+ sources_searched.append("web")
362
+ for wr in web_results:
363
+ title = wr.get("title", "")
364
+ snippet = wr.get("snippet", wr.get("text", ""))
365
+ url = wr.get("url", "")
366
+
367
+ text = f"{title}: {snippet}"
368
+ relevance = _score_finding_relevance(text, query_info["keywords"])
369
+
370
+ # Boost results from known production sources
371
+ if any(domain in url.lower() for domain in
372
+ ["ableton.com", "soundonsound.com", "musicradar.com",
373
+ "attackmagazine.com", "producerhive.com"]):
374
+ relevance = min(1.0, relevance + 0.15)
375
+
376
+ web_findings.append(ResearchFinding(
377
+ source_type="web",
378
+ source_id=url or title,
379
+ relevance=round(relevance, 3),
380
+ content=text[:500],
381
+ metadata={"url": url, "title": title},
382
+ ))
383
+
384
+ # Merge and re-rank all findings
385
+ all_findings = targeted.findings + web_findings
386
+ ranked = rank_findings(all_findings)
387
+
388
+ # Synthesize card from richer data
389
+ card = _synthesize_technique_card(query, ranked, query_info)
390
+
391
+ # Higher confidence with more sources
392
+ if not ranked:
393
+ confidence = 0.0
394
+ else:
395
+ top_relevances = [f.relevance for f in ranked[:3]]
396
+ base_confidence = sum(top_relevances) / len(top_relevances)
397
+ source_bonus = min(0.15, len(sources_searched) * 0.05)
398
+ confidence = min(1.0, base_confidence + source_bonus)
399
+
400
+ return ResearchResult(
401
+ query=query,
402
+ scope="deep",
403
+ findings=ranked,
404
+ technique_card=card,
405
+ confidence=round(confidence, 3),
406
+ sources_searched=sources_searched,
407
+ )
408
+
409
+
410
+ # ── Style Tactics (Round 4) ──────────────────────────────────────────
411
+
412
+ @dataclass
413
+ class StyleTactic:
414
+ """Artist/genre reference study as a reusable composition tactic."""
415
+ artist_or_genre: str
416
+ tactic_name: str
417
+ arrangement_patterns: list[str] = field(default_factory=list)
418
+ device_chain: list[dict] = field(default_factory=list)
419
+ automation_gestures: list[str] = field(default_factory=list)
420
+ verification: list[str] = field(default_factory=list)
421
+
422
+ def to_dict(self) -> dict:
423
+ return asdict(self)
424
+
425
+
426
+ # Built-in style tactic library — common production approaches by genre/artist
427
+ STYLE_TACTIC_LIBRARY: list[StyleTactic] = [
428
+ StyleTactic(
429
+ artist_or_genre="burial",
430
+ tactic_name="ghostly_reverb_treatment",
431
+ arrangement_patterns=["sparse_intro", "gradual_buildup", "sudden_strip_back"],
432
+ device_chain=[
433
+ {"name": "Reverb", "params": {"Decay Time": 4.5, "Dry/Wet": 0.6}},
434
+ {"name": "Auto Filter", "params": {"Frequency": 800, "Resonance": 0.4}},
435
+ {"name": "Utility", "params": {"Width": 0.7}},
436
+ ],
437
+ automation_gestures=["conceal", "drift", "punctuate"],
438
+ verification=["Check reverb tail doesn't mud the low end", "Verify width feels intimate"],
439
+ ),
440
+ StyleTactic(
441
+ artist_or_genre="daft punk",
442
+ tactic_name="filter_disco_sweep",
443
+ arrangement_patterns=["4_bar_filter_open", "loop_with_variation", "energy_plateau"],
444
+ device_chain=[
445
+ {"name": "Auto Filter", "params": {"Frequency": 200, "Resonance": 0.6}},
446
+ {"name": "Saturator", "params": {"Drive": 8, "Dry/Wet": 0.4}},
447
+ {"name": "Compressor", "params": {"Ratio": 4, "Attack": 10}},
448
+ ],
449
+ automation_gestures=["reveal", "lift", "release"],
450
+ verification=["Filter sweep should feel musical, not mechanical", "Check groove isn't crushed"],
451
+ ),
452
+ StyleTactic(
453
+ artist_or_genre="techno",
454
+ tactic_name="rolling_hypnotic_groove",
455
+ arrangement_patterns=["long_intro_16bars", "minimal_variation", "subtle_addition_per_8bars"],
456
+ device_chain=[
457
+ {"name": "Compressor", "params": {"Attack": 0.1, "Release": 100, "Ratio": 6}},
458
+ {"name": "Delay", "params": {"Dry/Wet": 0.15, "Feedback": 0.3}},
459
+ {"name": "EQ Eight", "params": {}},
460
+ ],
461
+ automation_gestures=["drift", "inhale", "release"],
462
+ verification=["Groove should be hypnotic not boring", "Check low-end stays clean"],
463
+ ),
464
+ StyleTactic(
465
+ artist_or_genre="ambient",
466
+ tactic_name="evolving_texture_bed",
467
+ arrangement_patterns=["very_slow_reveal", "32bar_sections", "layered_textures"],
468
+ device_chain=[
469
+ {"name": "Reverb", "params": {"Decay Time": 8.0, "Dry/Wet": 0.8}},
470
+ {"name": "Chorus-Ensemble", "params": {"Rate 1": 0.3}},
471
+ {"name": "Delay", "params": {"Dry/Wet": 0.3, "Feedback": 0.5}},
472
+ ],
473
+ automation_gestures=["drift", "reveal", "sink"],
474
+ verification=["Texture should feel alive, not static", "Check nothing competes for attention"],
475
+ ),
476
+ StyleTactic(
477
+ artist_or_genre="trap",
478
+ tactic_name="808_bounce_pattern",
479
+ arrangement_patterns=["8bar_loop_base", "hihat_triplet_fills", "vocal_chop_hooks"],
480
+ device_chain=[
481
+ {"name": "Operator", "params": {}},
482
+ {"name": "Saturator", "params": {"Drive": 12}},
483
+ {"name": "Glue Compressor", "params": {"Ratio": 4, "Attack": 0.1}},
484
+ ],
485
+ automation_gestures=["punctuate", "release", "lift"],
486
+ verification=["808 should hit hard but not clip", "Hi-hats should groove not machine-gun"],
487
+ ),
488
+ StyleTactic(
489
+ artist_or_genre="lo-fi",
490
+ tactic_name="dusty_warmth",
491
+ arrangement_patterns=["simple_loop_structure", "minimal_sections", "fade_endings"],
492
+ device_chain=[
493
+ {"name": "Saturator", "params": {"Drive": 5, "Dry/Wet": 0.5}},
494
+ {"name": "EQ Eight", "params": {}},
495
+ {"name": "Auto Filter", "params": {"Frequency": 3000}},
496
+ ],
497
+ automation_gestures=["conceal", "drift", "sink"],
498
+ verification=["Should feel warm not muddy", "High-end roll-off should sound natural"],
499
+ ),
500
+ ]
501
+
502
+
503
+ def get_style_tactics(
504
+ artist_or_genre: str,
505
+ memory_tactics: Optional[list[dict]] = None,
506
+ ) -> list[StyleTactic]:
507
+ """Find style tactics matching an artist or genre query.
508
+
509
+ Searches the built-in library and optionally user-saved tactics from memory.
510
+ """
511
+ query = artist_or_genre.lower().strip()
512
+ results: list[StyleTactic] = []
513
+
514
+ # Search built-in library
515
+ for tactic in STYLE_TACTIC_LIBRARY:
516
+ if query in tactic.artist_or_genre.lower() or query in tactic.tactic_name.lower():
517
+ results.append(tactic)
518
+
519
+ # Also partial match on arrangement patterns
520
+ if not results:
521
+ for tactic in STYLE_TACTIC_LIBRARY:
522
+ if any(query in p.lower() for p in tactic.arrangement_patterns):
523
+ results.append(tactic)
524
+
525
+ # Search user memory tactics
526
+ if memory_tactics:
527
+ for mem in memory_tactics:
528
+ payload = mem.get("payload", {})
529
+ if isinstance(payload, dict):
530
+ mem_genre = payload.get("artist_or_genre", "").lower()
531
+ mem_name = payload.get("tactic_name", "").lower()
532
+ if query in mem_genre or query in mem_name:
533
+ results.append(StyleTactic(
534
+ artist_or_genre=payload.get("artist_or_genre", ""),
535
+ tactic_name=payload.get("tactic_name", ""),
536
+ arrangement_patterns=payload.get("arrangement_patterns", []),
537
+ device_chain=payload.get("device_chain", []),
538
+ automation_gestures=payload.get("automation_gestures", []),
539
+ verification=payload.get("verification", []),
540
+ ))
541
+
542
+ return results