livepilot 1.6.5 → 1.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +29 -0
- package/README.md +20 -5
- package/bin/livepilot.js +2 -2
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +1 -1
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/server.py +3 -0
- package/mcp_server/tools/_generative_engine.py +271 -0
- package/mcp_server/tools/_harmony_engine.py +207 -0
- package/mcp_server/tools/generative.py +273 -0
- package/mcp_server/tools/harmony.py +253 -0
- package/mcp_server/tools/midi_io.py +305 -0
- package/package.json +2 -2
- package/plugin/plugin.json +2 -2
- package/plugin/skills/livepilot-core/SKILL.md +44 -6
- package/plugin/skills/livepilot-core/references/overview.md +3 -3
- package/remote_script/LivePilot/__init__.py +2 -2
- package/requirements.txt +3 -0
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,34 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## 1.7.0 — Creative Engine (March 2026)
|
|
4
|
+
|
|
5
|
+
**13 new tools (142 → 155), 3 new domains, MIDI file I/O, neo-Riemannian harmony, generative algorithms.**
|
|
6
|
+
|
|
7
|
+
### MIDI I/O Domain (4 tools)
|
|
8
|
+
- `export_clip_midi` — export session clip to .mid file
|
|
9
|
+
- `import_midi_to_clip` — load .mid file into session clip
|
|
10
|
+
- `analyze_midi_file` — offline analysis of any .mid file
|
|
11
|
+
- `extract_piano_roll` — 2D velocity matrix from .mid file
|
|
12
|
+
|
|
13
|
+
### Generative Domain (5 tools)
|
|
14
|
+
- `generate_euclidean_rhythm` — Bjorklund algorithm, identifies known rhythms
|
|
15
|
+
- `layer_euclidean_rhythms` — stack patterns for polyrhythmic textures
|
|
16
|
+
- `generate_tintinnabuli` — Arvo Pärt technique: triad voice from melody
|
|
17
|
+
- `generate_phase_shift` — Steve Reich technique: drifting canon
|
|
18
|
+
- `generate_additive_process` — Philip Glass technique: expanding melody
|
|
19
|
+
|
|
20
|
+
### Harmony Domain (4 tools)
|
|
21
|
+
- `navigate_tonnetz` — PRL neighbors in harmonic space
|
|
22
|
+
- `find_voice_leading_path` — shortest path between two chords through Tonnetz
|
|
23
|
+
- `classify_progression` — identify neo-Riemannian transform pattern
|
|
24
|
+
- `suggest_chromatic_mediants` — all chromatic mediant relations with film score picks
|
|
25
|
+
|
|
26
|
+
### Architecture
|
|
27
|
+
- Two new pure Python engines (`_generative_engine.py`, `_harmony_engine.py`)
|
|
28
|
+
- New dependencies: midiutil, pretty-midi, opycleid (~5 MB total, lazy-loaded)
|
|
29
|
+
- Opycleid fallback: harmony tools work without the package via pure Python PRL
|
|
30
|
+
- All generative tools return note arrays — LLM orchestrates placement
|
|
31
|
+
|
|
3
32
|
## 1.6.5 — Drop music21 (March 2026)
|
|
4
33
|
|
|
5
34
|
**Theory tools rewritten with zero-dependency pure Python engine.**
|
package/README.md
CHANGED
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
[](https://github.com/dreamrec/LivePilot/stargazers)
|
|
13
13
|
[](https://www.npmjs.com/package/livepilot)
|
|
14
14
|
|
|
15
|
-
**AI copilot for Ableton Live 12** —
|
|
15
|
+
**AI copilot for Ableton Live 12** — 155 MCP tools, a deep device knowledge corpus, real-time audio analysis, generative algorithms, neo-Riemannian harmony, MIDI file I/O, and persistent technique memory.
|
|
16
16
|
|
|
17
17
|
Most Ableton MCP servers give the AI tools to push buttons. LivePilot gives it three things on top of that:
|
|
18
18
|
|
|
@@ -20,7 +20,7 @@ Most Ableton MCP servers give the AI tools to push buttons. LivePilot gives it t
|
|
|
20
20
|
- **Perception** — An M4L analyzer that reads the master bus in real-time: 8-band spectrum, RMS/peak metering, pitch tracking, key detection. The AI makes decisions based on what it hears, not just what's configured.
|
|
21
21
|
- **Memory** — A technique library that persists across sessions. The AI remembers how you built that bass sound, what swing you like on hi-hats, which reverb chain worked on vocals. It learns your taste over time.
|
|
22
22
|
|
|
23
|
-
These three layers sit on top of
|
|
23
|
+
These three layers sit on top of 155 deterministic MCP tools that cover transport, tracks, clips, MIDI, devices, scenes, mixing, browser, arrangement, sample manipulation, generative algorithms, neo-Riemannian harmony, and MIDI file I/O. Every command goes through Ableton's official Live Object Model API — the same interface Ableton's own control surfaces use. Everything is reversible with undo.
|
|
24
24
|
|
|
25
25
|
---
|
|
26
26
|
|
|
@@ -296,7 +296,7 @@ npx -y github:dreamrec/LivePilot --status
|
|
|
296
296
|
|
|
297
297
|
---
|
|
298
298
|
|
|
299
|
-
##
|
|
299
|
+
## 155 Tools Across 16 Domains
|
|
300
300
|
|
|
301
301
|
| Domain | Tools | What you can do |
|
|
302
302
|
|--------|:-----:|-----------------|
|
|
@@ -313,6 +313,9 @@ npx -y github:dreamrec/LivePilot --status
|
|
|
313
313
|
| **Memory** | 8 | Save, recall, replay, and manage production techniques |
|
|
314
314
|
| **Analyzer** | 20 | Real-time spectral analysis, key detection, sample manipulation, warp markers, device introspection (requires M4L device) |
|
|
315
315
|
| **Theory** | 7 | Harmony analysis, Roman numerals, scale identification, chord suggestions, countermelody, SATB harmonization, smart transposition |
|
|
316
|
+
| **Generative** | 5 | Euclidean rhythms (Bjorklund), polyrhythmic layering, Pärt tintinnabuli, Reich phase shift, Glass additive process |
|
|
317
|
+
| **Harmony** | 4 | Tonnetz navigation, voice leading paths, neo-Riemannian classification, chromatic mediants |
|
|
318
|
+
| **MIDI I/O** | 4 | Export clips to .mid, import .mid files, offline MIDI analysis, piano roll extraction |
|
|
316
319
|
|
|
317
320
|
<details>
|
|
318
321
|
<summary><strong>Full tool list</strong></summary>
|
|
@@ -353,6 +356,18 @@ npx -y github:dreamrec/LivePilot --status
|
|
|
353
356
|
### Analyzer (20) — requires LivePilot Analyzer M4L device on master track
|
|
354
357
|
`get_master_spectrum` · `get_master_rms` · `get_detected_key` · `get_hidden_parameters` · `get_automation_state` · `walk_device_tree` · `get_clip_file_path` · `replace_simpler_sample` · `load_sample_to_simpler` · `get_simpler_slices` · `crop_simpler` · `reverse_simpler` · `warp_simpler` · `get_warp_markers` · `add_warp_marker` · `move_warp_marker` · `remove_warp_marker` · `scrub_clip` · `stop_scrub` · `get_display_values`
|
|
355
358
|
|
|
359
|
+
### Theory (7)
|
|
360
|
+
`analyze_harmony` · `suggest_next_chord` · `detect_theory_issues` · `identify_scale` · `harmonize_melody` · `generate_countermelody` · `transpose_smart`
|
|
361
|
+
|
|
362
|
+
### Generative (5)
|
|
363
|
+
`generate_euclidean_rhythm` · `layer_euclidean_rhythms` · `generate_tintinnabuli` · `generate_phase_shift` · `generate_additive_process`
|
|
364
|
+
|
|
365
|
+
### Harmony (4)
|
|
366
|
+
`navigate_tonnetz` · `find_voice_leading_path` · `classify_progression` · `suggest_chromatic_mediants`
|
|
367
|
+
|
|
368
|
+
### MIDI I/O (4)
|
|
369
|
+
`export_clip_midi` · `import_midi_to_clip` · `analyze_midi_file` · `extract_piano_roll`
|
|
370
|
+
|
|
356
371
|
</details>
|
|
357
372
|
|
|
358
373
|
---
|
|
@@ -403,7 +418,7 @@ There are **15+ MCP servers for Ableton Live** as of March 2026. Here's how the
|
|
|
403
418
|
|
|
404
419
|
| | [LivePilot](https://github.com/dreamrec/LivePilot) | [AbletonMCP](https://github.com/ahujasid/ableton-mcp) | [MCP Extended](https://github.com/uisato/ableton-mcp-extended) | [Ableton Copilot](https://github.com/xiaolaa2/ableton-copilot-mcp) | [AbletonBridge](https://github.com/hidingwill/AbletonBridge) | [Producer Pal](https://github.com/adamjmurray/producer-pal) |
|
|
405
420
|
|---|:-:|:-:|:-:|:-:|:-:|:-:|
|
|
406
|
-
| **Tools** |
|
|
421
|
+
| **Tools** | 155 | ~20 | ~35 | ~45 | 322 | ~25 |
|
|
407
422
|
| **Device knowledge** | 280+ devices | -- | -- | -- | -- | -- |
|
|
408
423
|
| **Audio analysis** | Spectrum/RMS/key | -- | -- | -- | Metering | -- |
|
|
409
424
|
| **Technique memory** | Persistent | -- | -- | -- | -- | -- |
|
|
@@ -460,7 +475,7 @@ Every server on this list gives the AI tools to control Ableton. LivePilot is th
|
|
|
460
475
|
|
|
461
476
|
The practical difference: other servers let the AI set a parameter. LivePilot lets the AI choose the right parameter based on what device is loaded (atlas), verify the result by reading the audio output (analyzer), and remember the technique for next time (memory).
|
|
462
477
|
|
|
463
|
-
AbletonBridge has more raw tools (322 vs
|
|
478
|
+
AbletonBridge has more raw tools (322 vs 155). Producer Pal has the easiest install (drag a .amxd). The original AbletonMCP has the community (2.3k stars). LivePilot has the deepest integration — tools that execute, knowledge that informs, perception that verifies, and memory that accumulates.
|
|
464
479
|
|
|
465
480
|
---
|
|
466
481
|
|
package/bin/livepilot.js
CHANGED
|
@@ -70,12 +70,12 @@ function ensureVenv(systemPython, prefixArgs) {
|
|
|
70
70
|
// Check if venv already exists and has our deps
|
|
71
71
|
if (fs.existsSync(venvPy)) {
|
|
72
72
|
try {
|
|
73
|
-
execFileSync(venvPy, ["-c", "import fastmcp"], {
|
|
73
|
+
execFileSync(venvPy, ["-c", "import fastmcp; import midiutil; import pretty_midi"], {
|
|
74
74
|
encoding: "utf-8",
|
|
75
75
|
timeout: 10000,
|
|
76
76
|
stdio: "pipe",
|
|
77
77
|
});
|
|
78
|
-
return venvPy; // venv exists and
|
|
78
|
+
return venvPy; // venv exists and all deps importable
|
|
79
79
|
} catch {
|
|
80
80
|
// venv exists but deps missing — reinstall
|
|
81
81
|
console.error("LivePilot: reinstalling Python dependencies...");
|
|
Binary file
|
package/mcp_server/__init__.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
1
|
"""LivePilot MCP Server — bridges MCP protocol to Ableton Live."""
|
|
2
|
-
__version__ = "1.
|
|
2
|
+
__version__ = "1.7.0"
|
package/mcp_server/server.py
CHANGED
|
@@ -57,6 +57,9 @@ from .tools import memory # noqa: F401, E402
|
|
|
57
57
|
from .tools import analyzer # noqa: F401, E402
|
|
58
58
|
from .tools import automation # noqa: F401, E402
|
|
59
59
|
from .tools import theory # noqa: F401, E402
|
|
60
|
+
from .tools import generative # noqa: F401, E402
|
|
61
|
+
from .tools import harmony # noqa: F401, E402
|
|
62
|
+
from .tools import midi_io # noqa: F401, E402
|
|
60
63
|
|
|
61
64
|
|
|
62
65
|
# ---------------------------------------------------------------------------
|
|
@@ -0,0 +1,271 @@
|
|
|
1
|
+
"""Pure Python generative music engine — zero dependencies.
|
|
2
|
+
|
|
3
|
+
Implements: Bjorklund/Euclidean rhythms, tintinnabuli, phase shifting,
|
|
4
|
+
additive process. All functions are pure — no state, no I/O.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import math
|
|
10
|
+
from collections import defaultdict
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# ---------------------------------------------------------------------------
|
|
14
|
+
# Known Euclidean rhythms
|
|
15
|
+
# ---------------------------------------------------------------------------
|
|
16
|
+
|
|
17
|
+
KNOWN_RHYTHMS: dict[tuple[int, int], str] = {
|
|
18
|
+
(2, 3): "shuffle",
|
|
19
|
+
(2, 5): "khafif-e-ramal",
|
|
20
|
+
(3, 4): "cumbia",
|
|
21
|
+
(3, 7): "ruchenitza",
|
|
22
|
+
(3, 8): "tresillo",
|
|
23
|
+
(4, 7): "yoruba bell",
|
|
24
|
+
(5, 8): "cinquillo",
|
|
25
|
+
(5, 16): "bossa nova",
|
|
26
|
+
(7, 12): "west african bell",
|
|
27
|
+
(7, 16): "brazilian necklace",
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# ---------------------------------------------------------------------------
|
|
32
|
+
# Bjorklund / Euclidean rhythm
|
|
33
|
+
# ---------------------------------------------------------------------------
|
|
34
|
+
|
|
35
|
+
def bjorklund(pulses: int, steps: int) -> list[int]:
|
|
36
|
+
"""Bjorklund/Euclidean rhythm: distribute pulses evenly across steps.
|
|
37
|
+
|
|
38
|
+
Returns list of 0s and 1s with length == steps.
|
|
39
|
+
"""
|
|
40
|
+
if steps <= 0:
|
|
41
|
+
return []
|
|
42
|
+
if pulses <= 0:
|
|
43
|
+
return [0] * steps
|
|
44
|
+
if pulses >= steps:
|
|
45
|
+
return [1] * steps
|
|
46
|
+
|
|
47
|
+
# Bresenham-style Euclidean distribution
|
|
48
|
+
pattern = []
|
|
49
|
+
counts = [0] * steps
|
|
50
|
+
remainders = [0] * steps
|
|
51
|
+
divisor = steps - pulses
|
|
52
|
+
remainders[0] = pulses
|
|
53
|
+
level = 0
|
|
54
|
+
|
|
55
|
+
while True:
|
|
56
|
+
counts[level] = divisor // remainders[level]
|
|
57
|
+
remainders[level + 1] = divisor % remainders[level]
|
|
58
|
+
divisor = remainders[level]
|
|
59
|
+
level += 1
|
|
60
|
+
if remainders[level] <= 1:
|
|
61
|
+
break
|
|
62
|
+
|
|
63
|
+
counts[level] = divisor
|
|
64
|
+
|
|
65
|
+
def _build(lv: int) -> list[int]:
|
|
66
|
+
if lv == -1:
|
|
67
|
+
return [0]
|
|
68
|
+
if lv == -2:
|
|
69
|
+
return [1]
|
|
70
|
+
seq: list[int] = []
|
|
71
|
+
for _ in range(counts[lv]):
|
|
72
|
+
seq.extend(_build(lv - 1))
|
|
73
|
+
if remainders[lv] != 0:
|
|
74
|
+
seq.extend(_build(lv - 2))
|
|
75
|
+
return seq
|
|
76
|
+
|
|
77
|
+
pattern = _build(level)
|
|
78
|
+
# Rotate to canonical form: first hit followed by a rest (1 then 0).
|
|
79
|
+
# If every position after a hit is also a hit (e.g. pulses == steps-1),
|
|
80
|
+
# fall back to rotating so the pattern simply starts with 1.
|
|
81
|
+
if not pattern or 1 not in pattern or 0 not in pattern:
|
|
82
|
+
return pattern
|
|
83
|
+
n = len(pattern)
|
|
84
|
+
for rot in range(n):
|
|
85
|
+
rotated = pattern[rot:] + pattern[:rot]
|
|
86
|
+
if rotated[0] == 1 and rotated[1] == 0:
|
|
87
|
+
return rotated
|
|
88
|
+
# Fallback: rotate to first 1
|
|
89
|
+
idx = pattern.index(1)
|
|
90
|
+
return pattern[idx:] + pattern[:idx]
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def rotate_pattern(pattern: list[int], rotation: int) -> list[int]:
|
|
94
|
+
"""Rotate a pattern by N steps (positive = shift left)."""
|
|
95
|
+
if not pattern:
|
|
96
|
+
return pattern
|
|
97
|
+
n = len(pattern)
|
|
98
|
+
rotation = rotation % n
|
|
99
|
+
return pattern[rotation:] + pattern[:rotation]
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def identify_rhythm(pulses: int, steps: int) -> str | None:
|
|
103
|
+
"""Return known rhythm name for (pulses, steps), or None."""
|
|
104
|
+
return KNOWN_RHYTHMS.get((pulses, steps))
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
# ---------------------------------------------------------------------------
|
|
108
|
+
# Tintinnabuli (Arvo Pärt)
|
|
109
|
+
# ---------------------------------------------------------------------------
|
|
110
|
+
|
|
111
|
+
def tintinnabuli_voice(
|
|
112
|
+
melody_pitches: list[int],
|
|
113
|
+
triad_pcs: list[int],
|
|
114
|
+
position: str = "nearest",
|
|
115
|
+
) -> list[int]:
|
|
116
|
+
"""Generate tintinnabuli voice: for each melody pitch, find nearest triad tone.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
melody_pitches: MIDI pitch numbers of the melody.
|
|
120
|
+
triad_pcs: Pitch classes (0-11) of the triad (e.g. [0,4,7] for C major).
|
|
121
|
+
position: "above", "below", or "nearest".
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
List of MIDI pitches for the tintinnabuli voice.
|
|
125
|
+
"""
|
|
126
|
+
result = []
|
|
127
|
+
for mp in melody_pitches:
|
|
128
|
+
best = _find_triad_tone(mp, triad_pcs, position)
|
|
129
|
+
result.append(best)
|
|
130
|
+
return result
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def _find_triad_tone(pitch: int, triad_pcs: list[int], position: str) -> int:
|
|
134
|
+
"""Find the nearest triad tone relative to a given pitch."""
|
|
135
|
+
candidates = []
|
|
136
|
+
for octave_offset in range(-2, 3):
|
|
137
|
+
for pc in triad_pcs:
|
|
138
|
+
candidate = (pitch // 12 + octave_offset) * 12 + pc
|
|
139
|
+
if 0 <= candidate <= 127:
|
|
140
|
+
candidates.append(candidate)
|
|
141
|
+
|
|
142
|
+
if position == "above":
|
|
143
|
+
above = [c for c in candidates if c > pitch]
|
|
144
|
+
return min(above) if above else max(candidates)
|
|
145
|
+
elif position == "below":
|
|
146
|
+
below = [c for c in candidates if c < pitch]
|
|
147
|
+
return max(below) if below else min(candidates)
|
|
148
|
+
else: # nearest
|
|
149
|
+
others = [c for c in candidates if c != pitch]
|
|
150
|
+
if not others:
|
|
151
|
+
return pitch
|
|
152
|
+
return min(others, key=lambda c: abs(c - pitch))
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
# ---------------------------------------------------------------------------
|
|
156
|
+
# Phase shifting (Steve Reich)
|
|
157
|
+
# ---------------------------------------------------------------------------
|
|
158
|
+
|
|
159
|
+
def phase_shift(
|
|
160
|
+
pattern_notes: list[dict],
|
|
161
|
+
voices: int = 2,
|
|
162
|
+
shift_amount: float = 0.125,
|
|
163
|
+
total_length: float = 16.0,
|
|
164
|
+
) -> list[dict]:
|
|
165
|
+
"""Generate phase-shifted canon.
|
|
166
|
+
|
|
167
|
+
Voice 0 loops the pattern normally. Each subsequent voice accumulates
|
|
168
|
+
shift_amount offset per repetition, creating gradual phase drift.
|
|
169
|
+
|
|
170
|
+
Returns combined note array with velocity encoding per voice:
|
|
171
|
+
voice 0 = 100, voice 1 = 90, voice 2 = 80, etc.
|
|
172
|
+
"""
|
|
173
|
+
if not pattern_notes:
|
|
174
|
+
return []
|
|
175
|
+
|
|
176
|
+
sorted_notes = sorted(pattern_notes, key=lambda n: n["start_time"])
|
|
177
|
+
pattern_length = max(n["start_time"] + n["duration"] for n in sorted_notes)
|
|
178
|
+
if pattern_length <= 0:
|
|
179
|
+
return []
|
|
180
|
+
|
|
181
|
+
result: list[dict] = []
|
|
182
|
+
|
|
183
|
+
for voice in range(voices):
|
|
184
|
+
velocity = max(100 - voice * 10, 30)
|
|
185
|
+
# Voice N starts shifted by voice * shift_amount and accumulates
|
|
186
|
+
# additional drift of voice * shift_amount per repetition.
|
|
187
|
+
# offset(rep) = rep * pattern_length + voice * shift_amount * (rep + 1)
|
|
188
|
+
repetition = 0
|
|
189
|
+
while True:
|
|
190
|
+
offset = repetition * pattern_length + voice * shift_amount * (repetition + 1)
|
|
191
|
+
|
|
192
|
+
if offset >= total_length:
|
|
193
|
+
break
|
|
194
|
+
|
|
195
|
+
for note in sorted_notes:
|
|
196
|
+
t = offset + note["start_time"]
|
|
197
|
+
if t >= total_length:
|
|
198
|
+
continue
|
|
199
|
+
result.append({
|
|
200
|
+
"pitch": note["pitch"],
|
|
201
|
+
"start_time": round(t, 4),
|
|
202
|
+
"duration": note["duration"],
|
|
203
|
+
"velocity": velocity,
|
|
204
|
+
})
|
|
205
|
+
repetition += 1
|
|
206
|
+
|
|
207
|
+
return sorted(result, key=lambda n: n["start_time"])
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
# ---------------------------------------------------------------------------
|
|
211
|
+
# Additive process (Philip Glass)
|
|
212
|
+
# ---------------------------------------------------------------------------
|
|
213
|
+
|
|
214
|
+
def additive_process(
|
|
215
|
+
melody_notes: list[dict],
|
|
216
|
+
direction: str = "forward",
|
|
217
|
+
repetitions_per_stage: int = 2,
|
|
218
|
+
) -> list[dict]:
|
|
219
|
+
"""Glass-style additive process.
|
|
220
|
+
|
|
221
|
+
Forward: play note 0, then 0-1, then 0-1-2, ... each repeated N times.
|
|
222
|
+
Backward: play all, then remove from front.
|
|
223
|
+
Both: forward, then backward (skipping the full melody to avoid duplicate).
|
|
224
|
+
"""
|
|
225
|
+
if not melody_notes:
|
|
226
|
+
return []
|
|
227
|
+
|
|
228
|
+
sorted_notes = sorted(melody_notes, key=lambda n: n["start_time"])
|
|
229
|
+
base = sorted_notes[0]["start_time"]
|
|
230
|
+
normalized = []
|
|
231
|
+
for n in sorted_notes:
|
|
232
|
+
normalized.append({
|
|
233
|
+
"pitch": n["pitch"],
|
|
234
|
+
"start_time": round(n["start_time"] - base, 4),
|
|
235
|
+
"duration": n["duration"],
|
|
236
|
+
"velocity": n.get("velocity", 100),
|
|
237
|
+
})
|
|
238
|
+
|
|
239
|
+
def _stage_duration(notes: list[dict]) -> float:
|
|
240
|
+
if not notes:
|
|
241
|
+
return 0.0
|
|
242
|
+
return max(n["start_time"] + n["duration"] for n in notes)
|
|
243
|
+
|
|
244
|
+
def _build_stages(note_slices: list[list[dict]]) -> list[dict]:
|
|
245
|
+
result: list[dict] = []
|
|
246
|
+
cursor = 0.0
|
|
247
|
+
for stage_notes in note_slices:
|
|
248
|
+
for _ in range(repetitions_per_stage):
|
|
249
|
+
for n in stage_notes:
|
|
250
|
+
result.append({
|
|
251
|
+
"pitch": n["pitch"],
|
|
252
|
+
"start_time": round(cursor + n["start_time"], 4),
|
|
253
|
+
"duration": n["duration"],
|
|
254
|
+
"velocity": n["velocity"],
|
|
255
|
+
})
|
|
256
|
+
cursor += _stage_duration(stage_notes)
|
|
257
|
+
cursor = round(cursor, 4)
|
|
258
|
+
return result
|
|
259
|
+
|
|
260
|
+
n_notes = len(normalized)
|
|
261
|
+
|
|
262
|
+
if direction == "forward":
|
|
263
|
+
slices = [normalized[:k + 1] for k in range(n_notes)]
|
|
264
|
+
return _build_stages(slices)
|
|
265
|
+
elif direction == "backward":
|
|
266
|
+
slices = [normalized[k:] for k in range(n_notes)]
|
|
267
|
+
return _build_stages(slices)
|
|
268
|
+
else: # both
|
|
269
|
+
forward_slices = [normalized[:k + 1] for k in range(n_notes)]
|
|
270
|
+
backward_slices = [normalized[k:] for k in range(1, n_notes)] # skip full
|
|
271
|
+
return _build_stages(forward_slices + backward_slices)
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
"""Neo-Riemannian harmony engine — pure Python with optional opycleid.
|
|
2
|
+
|
|
3
|
+
Implements P/L/R transforms, Tonnetz navigation, BFS path finding,
|
|
4
|
+
progression classification, and chromatic mediant computation.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from collections import deque
|
|
10
|
+
from typing import Callable
|
|
11
|
+
|
|
12
|
+
from . import _theory_engine as engine
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# ---------------------------------------------------------------------------
|
|
16
|
+
# Note name display
|
|
17
|
+
# ---------------------------------------------------------------------------
|
|
18
|
+
|
|
19
|
+
_PC_NAMES_MAJOR = ['C', 'C#', 'D', 'Eb', 'E', 'F', 'F#', 'G', 'Ab', 'A', 'Bb', 'B']
|
|
20
|
+
_PC_NAMES_MINOR = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'Bb', 'B']
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def chord_to_str(root_pc: int, quality: str) -> str:
|
|
24
|
+
"""Convert (root_pc, quality) to display string like 'Ab major'."""
|
|
25
|
+
names = _PC_NAMES_MAJOR if quality == "major" else _PC_NAMES_MINOR
|
|
26
|
+
return f"{names[root_pc % 12]} {quality}"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def parse_chord(chord_str: str) -> tuple[int, str]:
|
|
30
|
+
"""Parse 'C major' → (0, 'major'), 'F# minor' → (6, 'minor').
|
|
31
|
+
|
|
32
|
+
Uses _theory_engine.parse_key() internally — check what it returns!
|
|
33
|
+
"""
|
|
34
|
+
parsed = engine.parse_key(chord_str)
|
|
35
|
+
mode = parsed["mode"]
|
|
36
|
+
if mode not in ("major", "minor"):
|
|
37
|
+
raise ValueError(f"Only major/minor chords supported, got: {mode}")
|
|
38
|
+
return (parsed["tonic"], mode)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
# ---------------------------------------------------------------------------
|
|
42
|
+
# PRL transforms
|
|
43
|
+
# ---------------------------------------------------------------------------
|
|
44
|
+
|
|
45
|
+
def parallel(root_pc: int, quality: str) -> tuple[int, str]:
|
|
46
|
+
"""P: flip the third. Major ↔ minor, same root."""
|
|
47
|
+
new_q = "minor" if quality == "major" else "major"
|
|
48
|
+
return (root_pc, new_q)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def leading_tone(root_pc: int, quality: str) -> tuple[int, str]:
|
|
52
|
+
"""L: major → lower root by semitone → minor; minor → raise fifth by semitone → major."""
|
|
53
|
+
if quality == "major":
|
|
54
|
+
return ((root_pc + 4) % 12, "minor")
|
|
55
|
+
else:
|
|
56
|
+
return ((root_pc - 4) % 12, "major")
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def relative(root_pc: int, quality: str) -> tuple[int, str]:
|
|
60
|
+
"""R: major → raise fifth by whole tone → minor; minor → lower root by whole tone → major."""
|
|
61
|
+
if quality == "major":
|
|
62
|
+
return ((root_pc + 9) % 12, "minor")
|
|
63
|
+
else:
|
|
64
|
+
return ((root_pc + 3) % 12, "major")
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
TRANSFORMS: dict[str, Callable] = {
|
|
68
|
+
"P": parallel,
|
|
69
|
+
"L": leading_tone,
|
|
70
|
+
"R": relative,
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def apply_transforms(root_pc: int, quality: str, transforms: str) -> tuple[int, str]:
|
|
75
|
+
"""Apply a sequence of PRL transforms: 'PRL' → P, then R, then L."""
|
|
76
|
+
current = (root_pc, quality)
|
|
77
|
+
for char in transforms:
|
|
78
|
+
fn = TRANSFORMS.get(char)
|
|
79
|
+
if fn is None:
|
|
80
|
+
raise ValueError(f"Unknown transform: {char}. Use P, L, or R.")
|
|
81
|
+
current = fn(*current)
|
|
82
|
+
return current
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def chord_to_midi(root_pc: int, quality: str, octave: int = 4) -> list[int]:
|
|
86
|
+
"""Convert chord to MIDI pitches in the given octave."""
|
|
87
|
+
base = (octave + 1) * 12 + root_pc
|
|
88
|
+
if quality == "major":
|
|
89
|
+
return [base, base + 4, base + 7]
|
|
90
|
+
else:
|
|
91
|
+
return [base, base + 3, base + 7]
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
# ---------------------------------------------------------------------------
|
|
95
|
+
# Tonnetz navigation
|
|
96
|
+
# ---------------------------------------------------------------------------
|
|
97
|
+
|
|
98
|
+
def get_neighbors(root_pc: int, quality: str, depth: int = 1) -> dict:
|
|
99
|
+
"""Return all reachable chords up to depth PRL transforms."""
|
|
100
|
+
result = {}
|
|
101
|
+
_explore(root_pc, quality, "", depth, result)
|
|
102
|
+
return result
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def _explore(root_pc: int, quality: str, path: str, remaining: int,
|
|
106
|
+
result: dict) -> None:
|
|
107
|
+
"""Recursive neighbor exploration."""
|
|
108
|
+
if remaining <= 0:
|
|
109
|
+
return
|
|
110
|
+
for label, fn in TRANSFORMS.items():
|
|
111
|
+
new_chord = fn(root_pc, quality)
|
|
112
|
+
key = path + label
|
|
113
|
+
if key not in result:
|
|
114
|
+
result[key] = new_chord
|
|
115
|
+
_explore(*new_chord, key, remaining - 1, result)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
# ---------------------------------------------------------------------------
|
|
119
|
+
# BFS path finding
|
|
120
|
+
# ---------------------------------------------------------------------------
|
|
121
|
+
|
|
122
|
+
def find_shortest_path(
|
|
123
|
+
from_chord: tuple[int, str],
|
|
124
|
+
to_chord: tuple[int, str],
|
|
125
|
+
max_depth: int = 4,
|
|
126
|
+
) -> dict:
|
|
127
|
+
"""BFS through PRL space to find shortest path between two chords."""
|
|
128
|
+
if from_chord == to_chord:
|
|
129
|
+
return {
|
|
130
|
+
"found": True,
|
|
131
|
+
"steps": 0,
|
|
132
|
+
"path": [from_chord],
|
|
133
|
+
"transforms": [],
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
queue: deque = deque()
|
|
137
|
+
visited: set = {from_chord}
|
|
138
|
+
|
|
139
|
+
for label, fn in TRANSFORMS.items():
|
|
140
|
+
next_chord = fn(*from_chord)
|
|
141
|
+
if next_chord == to_chord:
|
|
142
|
+
return {
|
|
143
|
+
"found": True,
|
|
144
|
+
"steps": 1,
|
|
145
|
+
"path": [from_chord, to_chord],
|
|
146
|
+
"transforms": [label],
|
|
147
|
+
}
|
|
148
|
+
if next_chord not in visited:
|
|
149
|
+
visited.add(next_chord)
|
|
150
|
+
queue.append((next_chord, [from_chord, next_chord], [label]))
|
|
151
|
+
|
|
152
|
+
depth = 1
|
|
153
|
+
while queue and depth < max_depth:
|
|
154
|
+
depth += 1
|
|
155
|
+
level_size = len(queue)
|
|
156
|
+
for _ in range(level_size):
|
|
157
|
+
current, path, transforms = queue.popleft()
|
|
158
|
+
for label, fn in TRANSFORMS.items():
|
|
159
|
+
next_chord = fn(*current)
|
|
160
|
+
if next_chord == to_chord:
|
|
161
|
+
return {
|
|
162
|
+
"found": True,
|
|
163
|
+
"steps": depth,
|
|
164
|
+
"path": path + [to_chord],
|
|
165
|
+
"transforms": transforms + [label],
|
|
166
|
+
}
|
|
167
|
+
if next_chord not in visited:
|
|
168
|
+
visited.add(next_chord)
|
|
169
|
+
queue.append((next_chord, path + [next_chord],
|
|
170
|
+
transforms + [label]))
|
|
171
|
+
|
|
172
|
+
return {"found": False, "steps": -1, "path": [], "transforms": []}
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
# ---------------------------------------------------------------------------
|
|
176
|
+
# Progression classification
|
|
177
|
+
# ---------------------------------------------------------------------------
|
|
178
|
+
|
|
179
|
+
def classify_transform_sequence(chords: list[tuple[int, str]]) -> list[str]:
|
|
180
|
+
"""Identify the PRL transform between each consecutive pair of chords."""
|
|
181
|
+
result = []
|
|
182
|
+
for i in range(len(chords) - 1):
|
|
183
|
+
found = "?"
|
|
184
|
+
for label, fn in TRANSFORMS.items():
|
|
185
|
+
if fn(*chords[i]) == chords[i + 1]:
|
|
186
|
+
found = label
|
|
187
|
+
break
|
|
188
|
+
result.append(found)
|
|
189
|
+
return result
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
# ---------------------------------------------------------------------------
|
|
193
|
+
# Chromatic mediants
|
|
194
|
+
# ---------------------------------------------------------------------------
|
|
195
|
+
|
|
196
|
+
def get_chromatic_mediants(root_pc: int, quality: str) -> dict:
|
|
197
|
+
"""Return all 6 chromatic mediant relations."""
|
|
198
|
+
return {
|
|
199
|
+
"upper_major_third": ((root_pc + 4) % 12, quality),
|
|
200
|
+
"lower_major_third": ((root_pc - 4) % 12, quality),
|
|
201
|
+
"upper_minor_third": ((root_pc + 3) % 12, quality),
|
|
202
|
+
"lower_minor_third": ((root_pc - 3) % 12, quality),
|
|
203
|
+
"upper_major_third_flip": ((root_pc + 4) % 12,
|
|
204
|
+
"minor" if quality == "major" else "major"),
|
|
205
|
+
"lower_major_third_flip": ((root_pc - 4) % 12,
|
|
206
|
+
"minor" if quality == "major" else "major"),
|
|
207
|
+
}
|