livepilot 1.9.21 → 1.9.23

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (110) hide show
  1. package/.claude-plugin/marketplace.json +3 -3
  2. package/.mcpbignore +40 -0
  3. package/AGENTS.md +2 -2
  4. package/CHANGELOG.md +47 -0
  5. package/CONTRIBUTING.md +1 -1
  6. package/README.md +47 -72
  7. package/bin/livepilot.js +135 -0
  8. package/livepilot/.Codex-plugin/plugin.json +2 -2
  9. package/livepilot/.claude-plugin/plugin.json +2 -2
  10. package/livepilot/agents/livepilot-producer/AGENT.md +13 -0
  11. package/livepilot/commands/arrange.md +42 -14
  12. package/livepilot/commands/beat.md +68 -21
  13. package/livepilot/commands/evaluate.md +23 -13
  14. package/livepilot/commands/mix.md +35 -11
  15. package/livepilot/commands/perform.md +31 -19
  16. package/livepilot/commands/sounddesign.md +38 -17
  17. package/livepilot/skills/livepilot-arrangement/SKILL.md +2 -1
  18. package/livepilot/skills/livepilot-composition-engine/references/transition-archetypes.md +2 -2
  19. package/livepilot/skills/livepilot-core/SKILL.md +60 -4
  20. package/livepilot/skills/livepilot-core/references/device-atlas/distortion-and-character.md +11 -11
  21. package/livepilot/skills/livepilot-core/references/device-atlas/drums-and-percussion.md +25 -25
  22. package/livepilot/skills/livepilot-core/references/device-atlas/dynamics-and-punch.md +21 -21
  23. package/livepilot/skills/livepilot-core/references/device-atlas/eq-and-filtering.md +13 -13
  24. package/livepilot/skills/livepilot-core/references/device-atlas/midi-tools.md +13 -13
  25. package/livepilot/skills/livepilot-core/references/device-atlas/movement-and-modulation.md +5 -5
  26. package/livepilot/skills/livepilot-core/references/device-atlas/space-and-depth.md +16 -16
  27. package/livepilot/skills/livepilot-core/references/device-atlas/spectral-and-weird.md +40 -40
  28. package/livepilot/skills/livepilot-core/references/m4l-devices.md +3 -3
  29. package/livepilot/skills/livepilot-core/references/overview.md +4 -4
  30. package/livepilot/skills/livepilot-evaluation/SKILL.md +12 -8
  31. package/livepilot/skills/livepilot-evaluation/references/memory-promotion.md +2 -2
  32. package/livepilot/skills/livepilot-mix-engine/SKILL.md +1 -1
  33. package/livepilot/skills/livepilot-mix-engine/references/mix-moves.md +2 -2
  34. package/livepilot/skills/livepilot-mixing/SKILL.md +3 -1
  35. package/livepilot/skills/livepilot-notes/SKILL.md +2 -1
  36. package/livepilot/skills/livepilot-release/SKILL.md +15 -15
  37. package/livepilot/skills/livepilot-sound-design-engine/SKILL.md +2 -2
  38. package/livepilot/skills/livepilot-wonder/SKILL.md +62 -0
  39. package/livepilot.mcpb +0 -0
  40. package/m4l_device/livepilot_bridge.js +1 -1
  41. package/manifest.json +91 -0
  42. package/mcp_server/__init__.py +1 -1
  43. package/mcp_server/creative_constraints/__init__.py +6 -0
  44. package/mcp_server/creative_constraints/engine.py +277 -0
  45. package/mcp_server/creative_constraints/models.py +75 -0
  46. package/mcp_server/creative_constraints/tools.py +341 -0
  47. package/mcp_server/experiment/__init__.py +6 -0
  48. package/mcp_server/experiment/engine.py +213 -0
  49. package/mcp_server/experiment/models.py +120 -0
  50. package/mcp_server/experiment/tools.py +263 -0
  51. package/mcp_server/hook_hunter/__init__.py +5 -0
  52. package/mcp_server/hook_hunter/analyzer.py +342 -0
  53. package/mcp_server/hook_hunter/models.py +57 -0
  54. package/mcp_server/hook_hunter/tools.py +586 -0
  55. package/mcp_server/memory/taste_graph.py +261 -0
  56. package/mcp_server/memory/tools.py +88 -0
  57. package/mcp_server/mix_engine/critics.py +2 -2
  58. package/mcp_server/mix_engine/models.py +1 -1
  59. package/mcp_server/mix_engine/state_builder.py +2 -2
  60. package/mcp_server/musical_intelligence/__init__.py +8 -0
  61. package/mcp_server/musical_intelligence/detectors.py +421 -0
  62. package/mcp_server/musical_intelligence/phrase_critic.py +163 -0
  63. package/mcp_server/musical_intelligence/tools.py +221 -0
  64. package/mcp_server/preview_studio/__init__.py +5 -0
  65. package/mcp_server/preview_studio/engine.py +280 -0
  66. package/mcp_server/preview_studio/models.py +73 -0
  67. package/mcp_server/preview_studio/tools.py +423 -0
  68. package/mcp_server/runtime/session_kernel.py +96 -0
  69. package/mcp_server/runtime/tools.py +90 -1
  70. package/mcp_server/semantic_moves/__init__.py +13 -0
  71. package/mcp_server/semantic_moves/compiler.py +116 -0
  72. package/mcp_server/semantic_moves/mix_compilers.py +291 -0
  73. package/mcp_server/semantic_moves/mix_moves.py +157 -0
  74. package/mcp_server/semantic_moves/models.py +46 -0
  75. package/mcp_server/semantic_moves/performance_compilers.py +208 -0
  76. package/mcp_server/semantic_moves/performance_moves.py +81 -0
  77. package/mcp_server/semantic_moves/registry.py +32 -0
  78. package/mcp_server/semantic_moves/resolvers.py +126 -0
  79. package/mcp_server/semantic_moves/sound_design_compilers.py +266 -0
  80. package/mcp_server/semantic_moves/sound_design_moves.py +78 -0
  81. package/mcp_server/semantic_moves/tools.py +204 -0
  82. package/mcp_server/semantic_moves/transition_compilers.py +222 -0
  83. package/mcp_server/semantic_moves/transition_moves.py +76 -0
  84. package/mcp_server/server.py +10 -0
  85. package/mcp_server/session_continuity/__init__.py +6 -0
  86. package/mcp_server/session_continuity/models.py +86 -0
  87. package/mcp_server/session_continuity/tools.py +230 -0
  88. package/mcp_server/session_continuity/tracker.py +235 -0
  89. package/mcp_server/song_brain/__init__.py +6 -0
  90. package/mcp_server/song_brain/builder.py +477 -0
  91. package/mcp_server/song_brain/models.py +132 -0
  92. package/mcp_server/song_brain/tools.py +294 -0
  93. package/mcp_server/stuckness_detector/__init__.py +5 -0
  94. package/mcp_server/stuckness_detector/detector.py +400 -0
  95. package/mcp_server/stuckness_detector/models.py +66 -0
  96. package/mcp_server/stuckness_detector/tools.py +195 -0
  97. package/mcp_server/tools/_conductor.py +104 -6
  98. package/mcp_server/tools/analyzer.py +1 -1
  99. package/mcp_server/tools/devices.py +34 -0
  100. package/mcp_server/wonder_mode/__init__.py +6 -0
  101. package/mcp_server/wonder_mode/diagnosis.py +84 -0
  102. package/mcp_server/wonder_mode/engine.py +493 -0
  103. package/mcp_server/wonder_mode/session.py +114 -0
  104. package/mcp_server/wonder_mode/tools.py +285 -0
  105. package/package.json +2 -2
  106. package/remote_script/LivePilot/__init__.py +1 -1
  107. package/remote_script/LivePilot/browser.py +4 -1
  108. package/remote_script/LivePilot/devices.py +29 -0
  109. package/remote_script/LivePilot/tracks.py +11 -4
  110. package/scripts/generate_tool_catalog.py +131 -0
@@ -7,7 +7,7 @@ Memory promotion saves successful production moves to persistent storage for rec
7
7
  1. A move scores > 0.7 in evaluation
8
8
  2. Call `get_promotion_candidates` to list all eligible moves from this session
9
9
  3. Present the candidate to the user with score and description
10
- 4. If the user confirms, call `memory_learn(type, data)` to save
10
+ 4. If the user confirms, call `memory_learn(name, type, qualities, payload)` to save
11
11
  5. The technique is now available via `memory_recall` in future sessions
12
12
 
13
13
  ## Promotion Candidates
@@ -58,7 +58,7 @@ Anti-preferences are the inverse of promotion — they record moves the user exp
58
58
 
59
59
  ### Recording
60
60
 
61
- Call `record_anti_preference(description)` when:
61
+ Call `record_anti_preference(dimension, direction)` when:
62
62
  - The user says "I hate that", "never do that again", "that's wrong"
63
63
  - A move is undone and the user expresses displeasure (not just neutral undo)
64
64
  - The user explicitly states a preference against a technique
@@ -77,7 +77,7 @@ If `keep_change` is `true`, report the improvement to the user with the score an
77
77
 
78
78
  ### Step 8 — Learn (Optional)
79
79
 
80
- If the move scored above 0.7 and the user confirms satisfaction, call `memory_learn(type="mix_template")` to save the technique for future recall.
80
+ If the move scored above 0.7 and the user confirms satisfaction, call `memory_learn(name="...", type="mix_template", qualities={"summary": "..."}, payload={...})` to save the technique for future recall.
81
81
 
82
82
  ### Step 9 — Repeat
83
83
 
@@ -35,7 +35,7 @@ Apply or adjust glue compression on groups, return tracks, or master bus.
35
35
  - Always capture before/after RMS to verify loudness is maintained
36
36
  - On master bus, prefer ratio <= 2:1 and attack >= 10 ms
37
37
 
38
- **Tools:** `set_device_parameter`, `find_and_load_device("Compressor")`
38
+ **Tools:** `set_device_parameter`, `find_and_load_device(track_index, "Compressor")`
39
39
 
40
40
  ## transient_shaping
41
41
 
@@ -68,7 +68,7 @@ Subtractive EQ to clear masking, remove resonances, or fix spectral balance.
68
68
  - Cut on the less important track in a masking pair
69
69
  - Verify after cut that the track still sounds full — over-cutting kills body
70
70
 
71
- **Tools:** `set_device_parameter`, `find_and_load_device("EQ Eight")`
71
+ **Tools:** `set_device_parameter`, `find_and_load_device(track_index, "EQ Eight")`
72
72
 
73
73
  ## eq_boost
74
74
 
@@ -152,4 +152,6 @@ Never skip levels. Start at the lowest appropriate level and offer to go deeper.
152
152
 
153
153
  ## Reference
154
154
 
155
- Consult `references/mixing-patterns.md` in the livepilot-core skill for gain staging recipes, parallel compression setups, sidechain configurations, EQ by instrument type, bus processing chains, and stereo width techniques. Consult `references/automation-atlas.md` for curve theory, genre-specific automation recipes, diagnostic filter technique, and cross-track spectral mapping.
155
+ Supporting references live in the `livepilot-core` skill's `references/` directory:
156
+ - `livepilot-core/references/mixing-patterns.md` — gain staging, parallel compression, sidechain, EQ by instrument, bus processing, stereo width
157
+ - `livepilot-core/references/automation-atlas.md` — curve theory, genre-specific recipes, diagnostic filter, cross-track spectral mapping
@@ -126,4 +126,5 @@ Before playing any clip with melodic or harmonic content:
126
126
 
127
127
  ## Reference
128
128
 
129
- Consult `references/midi-recipes.md` in the livepilot-core skill for drum patterns by genre, chord voicings by style, scale lookup tables, hi-hat articulation techniques, humanization recipes, and polymetric layering patterns.
129
+ Supporting references live in the `livepilot-core` skill's `references/` directory:
130
+ - `livepilot-core/references/midi-recipes.md` — drum patterns by genre, chord voicings, scale tables, hi-hat articulations, humanization, polymetric layering
@@ -28,26 +28,26 @@ Run this checklist EVERY time the user says "update everything", "push", "releas
28
28
 
29
29
  ## 2. Tool Count (must ALL match)
30
30
 
31
- Current: **236 tools across 31 domains**.
32
- Core (no M4L): **149**. Analyzer (M4L): **29**. Perception (offline): **4**.
31
+ Current: **293 tools across 39 domains**.
32
+ Core (no M4L): **149**. Analyzer (M4L): **29**. Perception (offline): **4**. V2 engines: **86+**.
33
33
 
34
34
  Verify: `grep -rc "@mcp.tool" mcp_server/tools/ | grep -v ":0" | awk -F: '{sum+=$2} END{print sum}'`
35
35
 
36
36
  Files that reference tool count:
37
- - [ ] `README.md` — header, PERCEPTION section ("207 core...29 analyzer"), Analyzer table header "(29)", Perception table header "(4)"
38
- - [ ] `package.json` → `"description"` (236 tools, 31 domains)
37
+ - [ ] `README.md` — header, PERCEPTION section ("207 core...30 analyzer"), Analyzer table header "(29)", Perception table header "(4)"
38
+ - [ ] `package.json` → `"description"` (293 tools, 39 domains)
39
39
  - [ ] `server.json` → `"description"`
40
40
  - [ ] `livepilot/.Codex-plugin/plugin.json` → `"description"` (primary Codex manifest)
41
41
  - [ ] `livepilot/.claude-plugin/plugin.json` → `"description"` (must match Codex plugin)
42
42
  - [ ] `.claude-plugin/marketplace.json` → `"description"`
43
- - [ ] `CLAUDE.md` → "236 tools across 31 domains"
44
- - [ ] `livepilot/skills/livepilot-core/SKILL.md` — "236 tools across 31 domains", Analyzer (29), Perception (4)
45
- - [ ] `livepilot/skills/livepilot-core/references/overview.md` — "236 tools across 31 domains"
46
- - [ ] `docs/manual/index.md` — domain table: Analyzer (29), Perception (4)
47
- - [ ] `docs/manual/getting-started.md` — "207 core tools...29 analyzer"
43
+ - [ ] `CLAUDE.md` → "293 tools across 39 domains"
44
+ - [ ] `livepilot/skills/livepilot-core/SKILL.md` — "293 tools across 39 domains", Analyzer (30), Perception (4)
45
+ - [ ] `livepilot/skills/livepilot-core/references/overview.md` — "293 tools across 39 domains"
46
+ - [ ] `docs/manual/index.md` — domain table: Analyzer (30), Perception (4)
47
+ - [ ] `docs/manual/getting-started.md` — "207 core tools...30 analyzer"
48
48
  - [ ] `docs/manual/tool-reference.md` — all domains present with correct counts
49
49
  - [ ] `docs/TOOL_REFERENCE.md` — all domains present
50
- - [ ] `docs/M4L_BRIDGE.md` — "207 core tools...29 analyzer"
50
+ - [ ] `docs/M4L_BRIDGE.md` — "207 core tools...30 analyzer"
51
51
  - [ ] `docs/social-banner.html`
52
52
  - [ ] `mcp_server/tools/analyzer.py` → module docstring
53
53
  - [ ] `tests/test_tools_contract.py` → expected total count
@@ -56,10 +56,10 @@ Files that reference tool count:
56
56
 
57
57
  ## 3. Domain Count
58
58
 
59
- Current: **31 domains**: transport, tracks, clips, notes, devices, scenes, mixing, browser, arrangement, memory, analyzer, automation, theory, generative, harmony, midi_io, perception, agent_os, composition, motif, research, planner, project_brain, runtime, evaluation, mix_engine, sound_design, transition_engine, reference_engine, translation_engine, performance_engine.
59
+ Current: **39 domains**: transport, tracks, clips, notes, devices, scenes, mixing, browser, arrangement, memory, analyzer, automation, theory, generative, harmony, midi_io, perception, agent_os, composition, motif, research, planner, project_brain, runtime, evaluation, mix_engine, sound_design, transition_engine, reference_engine, translation_engine, performance_engine, song_brain, preview_studio, hook_hunter, stuckness_detector, wonder_mode, session_continuity, creative_constraints.
60
60
 
61
- - [ ] All files that mention domain count say "31 domains"
62
- - [ ] Domain lists include ALL 31 (especially newer domains — they're the most often omitted)
61
+ - [ ] All files that mention domain count say "39 domains"
62
+ - [ ] Domain lists include ALL 39 (especially newer domains — they're the most often omitted)
63
63
 
64
64
  ## 4. npm Registry
65
65
 
@@ -89,8 +89,8 @@ Current: **31 domains**: transport, tracks, clips, notes, devices, scenes, mixin
89
89
 
90
90
  - [ ] `README.md` — features match current capabilities, "Coming" section is accurate
91
91
  - [ ] `docs/manual/getting-started.md` — install instructions current
92
- - [ ] `docs/manual/tool-reference.md` — all 31 domains listed, all 236 tools present
93
- - [ ] `docs/TOOL_REFERENCE.md` — all 31 domains present
92
+ - [ ] `docs/manual/tool-reference.md` — all 39 domains listed, all 293 tools present
93
+ - [ ] `docs/TOOL_REFERENCE.md` — all 39 domains present
94
94
  - [ ] `docs/M4L_BRIDGE.md` — architecture accurate, core tool count correct
95
95
 
96
96
  ## 9. Derived Artifacts
@@ -76,7 +76,7 @@ Repeat the same measurements:
76
76
 
77
77
  ### Step 7 — Evaluate
78
78
 
79
- Call `evaluate_move(engine="sound_design")` with the before and after snapshots. Read:
79
+ Call `evaluate_move(goal_vector, before_snapshot, after_snapshot)` where `goal_vector` is the compiled goal from Step 1 and snapshots contain `{spectrum: {...}, rms: float, peak: float}`. Read:
80
80
 
81
81
  - `keep_change` (bool): whether the change improved the sound
82
82
  - `score` (0.0-1.0): quality improvement magnitude
@@ -87,7 +87,7 @@ Call `evaluate_move(engine="sound_design")` with the before and after snapshots.
87
87
 
88
88
  If `keep_change` is `false`, call `undo()`. Explain what was tried and why it did not improve the sound.
89
89
 
90
- If `keep_change` is `true`, report the improvement. If score > 0.7, consider calling `memory_learn(type="sound_design")` to save the technique.
90
+ If `keep_change` is `true`, report the improvement. If score > 0.7, consider calling `memory_learn(name="...", type="device_chain", qualities={"summary": "..."}, payload={...})` to save the technique.
91
91
 
92
92
  ### Step 9 — Repeat
93
93
 
@@ -0,0 +1,62 @@
1
+ ---
2
+ name: livepilot-wonder
3
+ description: >
4
+ This skill should be used when the user asks to "surprise me",
5
+ "make it magical", "I'm stuck", "give me options", "take it somewhere",
6
+ or when stuckness confidence is high. Provides the Wonder Mode
7
+ stuck-rescue workflow with honest variant labeling and preview-first UX.
8
+ ---
9
+
10
+ # Wonder Mode — Stuck-Rescue Workflow
11
+
12
+ Wonder Mode is a **preview-first stuck-rescue workflow**. It diagnoses
13
+ why a session is stuck, generates genuinely distinct executable options,
14
+ and lets the user preview, compare, and commit.
15
+
16
+ ## When to Trigger
17
+
18
+ - User says "I'm stuck", "surprise me", "make it magical", "give me options"
19
+ - `detect_stuckness` confidence > 0.5
20
+ - 3+ consecutive undos in action ledger
21
+ - Multiple plausible next moves with no clear winner
22
+
23
+ ## When NOT to Trigger
24
+
25
+ - Exact operational requests ("set track 3 volume to -6dB")
26
+ - Narrow deterministic edits ("quantize this clip")
27
+ - Performance-safe-only context (unless explicitly requested)
28
+
29
+ ## The Workflow
30
+
31
+ 1. `enter_wonder_mode` — get diagnosis + 1-3 variants
32
+ 2. Explain the diagnosis in **musical language**, not tool language
33
+ 3. Present variants honestly:
34
+ - Executable variants: can be previewed and committed
35
+ - Analytical variants (`analytical_only: true`): directional ideas only
36
+ 4. `create_preview_set` with `wonder_session_id` for executable variants
37
+ 5. `render_preview_variant` for each executable variant
38
+ 6. `compare_preview_variants` — present recommendation with reasons
39
+ 7. User chooses:
40
+ - `commit_preview_variant` — applies the variant, records outcome
41
+ - `discard_wonder_session` — rejects all, keeps creative thread open
42
+
43
+ ## Honesty Rules
44
+
45
+ - **Never describe an analytical variant as previewable**
46
+ - **Never fabricate distinctness** by relabeling the same move
47
+ - **Fewer than 3 variants is correct** when fewer distinct moves exist
48
+ - 1 executable + 2 analytical is an honest, useful result
49
+ - The `variant_count_actual` field tells you how many are real
50
+
51
+ ## Presenting Results
52
+
53
+ For each variant, explain:
54
+ - What it changes (in musical terms)
55
+ - What it preserves (sacred elements)
56
+ - Why it matters for this specific session
57
+ - Whether it's executable or analytical-only
58
+
59
+ For the recommendation, explain:
60
+ - Why this one over the others
61
+ - What risk it introduces
62
+ - What sacred elements it preserves
package/livepilot.mcpb ADDED
Binary file
@@ -84,7 +84,7 @@ function anything() {
84
84
  function dispatch(cmd, args) {
85
85
  switch(cmd) {
86
86
  case "ping":
87
- send_response({"ok": true, "version": "1.9.21"});
87
+ send_response({"ok": true, "version": "1.9.22"});
88
88
  break;
89
89
  case "get_params":
90
90
  cmd_get_params(args);
package/manifest.json ADDED
@@ -0,0 +1,91 @@
1
+ {
2
+ "manifest_version": "0.3",
3
+ "name": "livepilot",
4
+ "display_name": "LivePilot — AI for Ableton Live",
5
+ "version": "1.9.23",
6
+ "description": "Agentic production system for Ableton Live 12. Make beats, mix tracks, design sounds, and arrange songs with 293 AI-powered tools.",
7
+ "long_description": "LivePilot is an AI production assistant that connects directly to Ableton Live 12. It can create drum patterns, program basslines, write chord progressions, design sounds, mix your tracks, analyze your audio, and arrange full songs — all through natural language.\n\n**What it does:**\n- Creates MIDI clips with notes, chords, and rhythms\n- Loads instruments and effects from Ableton's browser\n- Shapes sounds by adjusting device parameters\n- Mixes with volume, panning, sends, and automation\n- Analyzes your mix with real-time spectral data\n- Remembers your production style across sessions\n\n**How it works:**\nLivePilot installs a Remote Script in Ableton that communicates with the AI over a local TCP connection. Everything runs on your machine — no audio leaves your computer.",
8
+ "author": {
9
+ "name": "Pilot Studio",
10
+ "url": "https://github.com/dreamrec/LivePilot"
11
+ },
12
+ "license": "MIT",
13
+ "repository": {
14
+ "type": "git",
15
+ "url": "https://github.com/dreamrec/LivePilot"
16
+ },
17
+ "homepage": "https://github.com/dreamrec/LivePilot",
18
+ "documentation": "https://github.com/dreamrec/LivePilot#readme",
19
+ "support": "https://github.com/dreamrec/LivePilot/issues",
20
+
21
+ "server": {
22
+ "type": "node",
23
+ "entry_point": "bin/livepilot.js",
24
+ "mcp_config": {
25
+ "command": "node",
26
+ "args": ["${__dirname}/bin/livepilot.js"],
27
+ "env": {
28
+ "LIVEPILOT_AUTO_INSTALL": "${user_config.auto_install_remote_script}",
29
+ "LIVEPILOT_TCP_PORT": "${user_config.ableton_port}"
30
+ }
31
+ }
32
+ },
33
+
34
+ "user_config": {
35
+ "auto_install_remote_script": {
36
+ "type": "boolean",
37
+ "title": "Auto-install Remote Script",
38
+ "description": "Automatically install the LivePilot Remote Script into Ableton Live's Remote Scripts folder on first launch. You'll still need to select 'LivePilot' in Ableton's Preferences > Link, Tempo & MIDI > Control Surface.",
39
+ "default": true
40
+ },
41
+ "ableton_port": {
42
+ "type": "number",
43
+ "title": "Ableton TCP Port",
44
+ "description": "TCP port for communication with Ableton Live. Only change this if you have a port conflict. Default: 9878.",
45
+ "default": 9878,
46
+ "min": 1024,
47
+ "max": 65535
48
+ }
49
+ },
50
+
51
+ "tools_generated": true,
52
+
53
+ "tools": [
54
+ {"name": "get_session_info", "description": "Get comprehensive session state: tempo, tracks, scenes, transport"},
55
+ {"name": "set_tempo", "description": "Set the song tempo (20-999 BPM)"},
56
+ {"name": "create_midi_track", "description": "Create a new MIDI track with optional name and color"},
57
+ {"name": "add_notes", "description": "Add MIDI notes to a clip with pitch, timing, velocity, probability"},
58
+ {"name": "search_browser", "description": "Search Ableton's browser for instruments, effects, drums, samples"},
59
+ {"name": "find_and_load_device", "description": "Find a device by name and load it onto a track"},
60
+ {"name": "set_device_parameter", "description": "Set any device parameter by name with value verification"},
61
+ {"name": "fire_scene", "description": "Launch a scene, triggering all its clips"},
62
+ {"name": "analyze_harmony", "description": "Analyze chord progression with Roman numeral analysis"},
63
+ {"name": "get_master_spectrum", "description": "Real-time 8-band frequency analysis of the master bus"},
64
+ {"name": "memory_learn", "description": "Save a production technique for future recall"},
65
+ {"name": "plan_arrangement", "description": "Generate a full arrangement blueprint with sections and transitions"}
66
+ ],
67
+
68
+ "keywords": [
69
+ "ableton",
70
+ "ableton-live",
71
+ "music-production",
72
+ "daw",
73
+ "midi",
74
+ "audio",
75
+ "mixing",
76
+ "mastering",
77
+ "beat-making",
78
+ "sound-design",
79
+ "music-theory",
80
+ "ai-music"
81
+ ],
82
+
83
+ "compatibility": {
84
+ "platforms": ["darwin", "win32"],
85
+ "runtimes": {
86
+ "python": ">=3.9"
87
+ }
88
+ },
89
+
90
+ "screenshots": []
91
+ }
@@ -1,2 +1,2 @@
1
1
  """LivePilot MCP Server — bridges MCP protocol to Ableton Live."""
2
- __version__ = "1.9.21"
2
+ __version__ = "1.9.23"
@@ -0,0 +1,6 @@
1
+ """Creative Constraints — constraint-driven creativity for Stage 2.
2
+
3
+ High-level creative constraints that make suggestions feel intentional
4
+ and original. Also handles reference distillation — learning principles
5
+ from a reference and applying them to the current session.
6
+ """
@@ -0,0 +1,277 @@
1
+ """Creative Constraints engine — pure computation, zero I/O.
2
+
3
+ Handles constraint application, reference distillation, and
4
+ constrained variant generation.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import hashlib
10
+ from typing import Optional
11
+
12
+ from .models import (
13
+ CONSTRAINT_MODES,
14
+ ConstraintSet,
15
+ ReferenceDistillation,
16
+ ReferencePrinciple,
17
+ )
18
+
19
+
20
+ # ── Constraint application ────────────────────────────────────────
21
+
22
+
23
+ def build_constraint_set(
24
+ constraints: list[str],
25
+ session_info: Optional[dict] = None,
26
+ ) -> ConstraintSet:
27
+ """Validate and build a constraint set."""
28
+ valid = [c for c in constraints if c in CONSTRAINT_MODES]
29
+ invalid = [c for c in constraints if c not in CONSTRAINT_MODES]
30
+
31
+ descriptions = {
32
+ "use_loaded_devices_only": "Only use devices already loaded in the session",
33
+ "no_new_tracks": "Work within existing tracks — no new tracks",
34
+ "subtraction_only": "Only remove or reduce — no additions",
35
+ "arrangement_only": "Only arrangement changes — no sound design or mixing",
36
+ "mood_shift_without_new_fx": "Shift the mood using only existing tools",
37
+ "make_it_stranger_but_keep_the_hook": "Push novelty while preserving the hook",
38
+ "club_translation_safe": "Keep changes club/DJ-friendly",
39
+ "performance_safe_creative": "Only changes safe for live performance",
40
+ }
41
+
42
+ desc_parts = [descriptions.get(c, c) for c in valid]
43
+
44
+ reasons = {
45
+ "use_loaded_devices_only": "Forces creative use of existing palette",
46
+ "no_new_tracks": "Keeps complexity manageable",
47
+ "subtraction_only": "Sometimes less is more — helps find the essence",
48
+ "arrangement_only": "Separates arrangement thinking from production details",
49
+ "mood_shift_without_new_fx": "Tests whether the composition carries the mood",
50
+ "make_it_stranger_but_keep_the_hook": "Pushes boundaries safely",
51
+ "club_translation_safe": "Ensures dancefloor viability",
52
+ "performance_safe_creative": "Ensures live-safe changes",
53
+ }
54
+
55
+ reason_parts = [reasons.get(c, "") for c in valid if c in reasons]
56
+
57
+ cs = ConstraintSet(
58
+ constraints=valid,
59
+ description="; ".join(desc_parts),
60
+ reason="; ".join(reason_parts),
61
+ )
62
+
63
+ return cs
64
+
65
+
66
+ def validate_plan_against_constraints(
67
+ plan: dict,
68
+ constraint_set: ConstraintSet,
69
+ session_info: Optional[dict] = None,
70
+ ) -> dict:
71
+ """Check whether a plan respects the active constraints."""
72
+ session_info = session_info or {}
73
+ violations: list[str] = []
74
+ warnings: list[str] = []
75
+
76
+ steps = plan.get("steps", [])
77
+
78
+ for constraint in constraint_set.constraints:
79
+ if constraint == "no_new_tracks":
80
+ for step in steps:
81
+ action = step.get("action", "")
82
+ if action in ("create_midi_track", "create_audio_track", "create_return_track"):
83
+ violations.append(f"Step creates a new track ({action}) — violates no_new_tracks")
84
+
85
+ elif constraint == "subtraction_only":
86
+ add_actions = {"create_clip", "create_midi_track", "create_audio_track",
87
+ "duplicate_clip", "duplicate_track"}
88
+ for step in steps:
89
+ if step.get("action", "") in add_actions:
90
+ violations.append(f"Step adds content ({step['action']}) — violates subtraction_only")
91
+
92
+ elif constraint == "arrangement_only":
93
+ mix_actions = {"set_device_parameter", "set_track_volume", "set_track_pan",
94
+ "set_send_level"}
95
+ for step in steps:
96
+ if step.get("action", "") in mix_actions:
97
+ violations.append(f"Step modifies mix ({step['action']}) — violates arrangement_only")
98
+
99
+ return {
100
+ "valid": len(violations) == 0,
101
+ "violations": violations,
102
+ "warnings": warnings,
103
+ "constraint_count": len(constraint_set.constraints),
104
+ }
105
+
106
+
107
+ # ── Reference distillation ────────────────────────────────────────
108
+
109
+
110
+ def distill_reference_principles(
111
+ reference_profile: dict,
112
+ reference_description: str = "",
113
+ ) -> ReferenceDistillation:
114
+ """Distill musical principles from a reference profile.
115
+
116
+ Extracts emotional posture, density motion, arrangement patience,
117
+ texture treatment, and payoff architecture — never surface traits.
118
+ """
119
+ ref_id = hashlib.sha256(str(reference_profile).encode()).hexdigest()[:10]
120
+
121
+ principles: list[ReferencePrinciple] = []
122
+
123
+ # Emotional posture
124
+ emotional = reference_profile.get("emotional_stance", "")
125
+ if emotional:
126
+ principles.append(ReferencePrinciple(
127
+ domain="emotional",
128
+ principle=f"Emotional posture: {emotional}",
129
+ value=0.0,
130
+ applicability=0.7,
131
+ note="Apply the feeling, not the specific sounds",
132
+ ))
133
+
134
+ # Density motion
135
+ density_arc = reference_profile.get("density_arc", [])
136
+ if density_arc:
137
+ motion = _describe_density_motion(density_arc)
138
+ principles.append(ReferencePrinciple(
139
+ domain="density",
140
+ principle=f"Density motion: {motion}",
141
+ value=sum(density_arc) / len(density_arc) if density_arc else 0.5,
142
+ applicability=0.6,
143
+ ))
144
+
145
+ # Arrangement patience
146
+ pacing = reference_profile.get("section_pacing", [])
147
+ if pacing:
148
+ avg_bars = sum(s.get("bars", 8) for s in pacing) / max(len(pacing), 1)
149
+ patience = "patient" if avg_bars > 16 else "moderate" if avg_bars > 8 else "rapid"
150
+ principles.append(ReferencePrinciple(
151
+ domain="arrangement",
152
+ principle=f"Arrangement patience: {patience} (avg {avg_bars:.0f} bars/section)",
153
+ value=avg_bars,
154
+ applicability=0.7,
155
+ ))
156
+
157
+ # Width/space strategy
158
+ width = reference_profile.get("width_depth", {})
159
+ if width:
160
+ w_val = width.get("stereo_width", 0.5)
161
+ strategy = "wide" if w_val > 0.7 else "focused" if w_val < 0.3 else "balanced"
162
+ principles.append(ReferencePrinciple(
163
+ domain="width",
164
+ principle=f"Width strategy: {strategy} stereo field",
165
+ value=w_val,
166
+ applicability=0.5,
167
+ ))
168
+
169
+ # Spectral character
170
+ spectral = reference_profile.get("spectral_contour", {})
171
+ if spectral:
172
+ balance = spectral.get("band_balance", {})
173
+ if balance:
174
+ dominant = max(balance.items(), key=lambda kv: kv[1], default=("mid", 0.5))
175
+ principles.append(ReferencePrinciple(
176
+ domain="spectral",
177
+ principle=f"Spectral emphasis: {dominant[0]}-forward",
178
+ value=dominant[1],
179
+ applicability=0.5,
180
+ ))
181
+
182
+ # Groove posture
183
+ groove = reference_profile.get("groove_posture", {})
184
+ if groove:
185
+ swing = groove.get("swing", 0)
186
+ groove_desc = "swung" if swing > 20 else "straight" if swing < 5 else "lightly swung"
187
+ principles.append(ReferencePrinciple(
188
+ domain="groove",
189
+ principle=f"Groove feel: {groove_desc}",
190
+ value=swing,
191
+ applicability=0.6,
192
+ ))
193
+
194
+ return ReferenceDistillation(
195
+ reference_id=ref_id,
196
+ reference_description=reference_description,
197
+ principles=principles,
198
+ emotional_posture=emotional,
199
+ density_motion=_describe_density_motion(density_arc) if density_arc else "",
200
+ arrangement_patience=f"{sum(s.get('bars', 8) for s in pacing) / max(len(pacing), 1):.0f} bars avg" if pacing else "",
201
+ texture_treatment=reference_profile.get("harmonic_character", ""),
202
+ foreground_background="",
203
+ width_strategy=width.get("description", "") if isinstance(width, dict) else "",
204
+ payoff_architecture="",
205
+ )
206
+
207
+
208
+ def map_principles_to_song(
209
+ song_brain: dict,
210
+ distillation: ReferenceDistillation,
211
+ ) -> list[dict]:
212
+ """Map reference principles to the current song's context.
213
+
214
+ Translates each principle through the song's identity, loaded tools,
215
+ and hook identity. Never outputs a plan that simply copies surface traits.
216
+ """
217
+ identity = song_brain.get("identity_core", "")
218
+ sacred = [e.get("description", "") for e in song_brain.get("sacred_elements", [])]
219
+
220
+ mappings = []
221
+ for p in distillation.principles:
222
+ mapping = {
223
+ "principle": p.principle,
224
+ "domain": p.domain,
225
+ "applicability": p.applicability,
226
+ "in_your_song": _translate_principle(p, identity, sacred),
227
+ "preserves": "Adapts the principle while maintaining your song's identity",
228
+ }
229
+ mappings.append(mapping)
230
+
231
+ return mappings
232
+
233
+
234
+ # ── Helpers ───────────────────────────────────────────────────────
235
+
236
+
237
+ def _describe_density_motion(arc: list[float]) -> str:
238
+ """Describe the density arc pattern."""
239
+ if len(arc) < 2:
240
+ return "static"
241
+
242
+ # Check for patterns
243
+ increasing = all(arc[i] <= arc[i + 1] for i in range(len(arc) - 1))
244
+ decreasing = all(arc[i] >= arc[i + 1] for i in range(len(arc) - 1))
245
+
246
+ if increasing:
247
+ return "steadily building"
248
+ if decreasing:
249
+ return "gradually thinning"
250
+
251
+ # Find peak position
252
+ peak_idx = arc.index(max(arc))
253
+ peak_pct = peak_idx / max(len(arc) - 1, 1)
254
+
255
+ if peak_pct < 0.3:
256
+ return "front-loaded density"
257
+ elif peak_pct > 0.7:
258
+ return "late-peak density"
259
+ else:
260
+ return "centered density arc"
261
+
262
+
263
+ def _translate_principle(
264
+ principle: ReferencePrinciple,
265
+ identity: str,
266
+ sacred: list[str],
267
+ ) -> str:
268
+ """Translate a reference principle into current-song language."""
269
+ translations = {
270
+ "emotional": f"Channel the {principle.principle.split(': ')[-1]} feeling through your existing palette",
271
+ "density": f"Apply {principle.principle.split(': ')[-1]} while keeping your identity ({identity})",
272
+ "arrangement": f"Use {principle.principle.split(': ')[-1]} pacing to develop your song's structure",
273
+ "width": f"Apply this stereo approach while preserving your groove",
274
+ "spectral": f"Lean into this spectral emphasis using your existing sounds",
275
+ "groove": f"Adapt this groove feel to your rhythm section",
276
+ }
277
+ return translations.get(principle.domain, f"Apply: {principle.principle}")