livepilot 1.6.4 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +43 -7
- package/README.md +21 -6
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +1 -1
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/server.py +3 -0
- package/mcp_server/tools/_generative_engine.py +271 -0
- package/mcp_server/tools/_harmony_engine.py +207 -0
- package/mcp_server/tools/_theory_engine.py +366 -0
- package/mcp_server/tools/generative.py +273 -0
- package/mcp_server/tools/harmony.py +253 -0
- package/mcp_server/tools/midi_io.py +305 -0
- package/mcp_server/tools/theory.py +160 -320
- package/package.json +2 -2
- package/plugin/plugin.json +2 -2
- package/plugin/skills/livepilot-core/SKILL.md +46 -8
- package/plugin/skills/livepilot-core/references/overview.md +5 -5
- package/remote_script/LivePilot/__init__.py +2 -2
- package/requirements.txt +3 -3
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
"""Neo-Riemannian harmony tools — Tonnetz navigation, voice-leading paths,
|
|
2
|
+
progression classification, chromatic mediant suggestions.
|
|
3
|
+
|
|
4
|
+
4 tools for advanced harmonic analysis and exploration.
|
|
5
|
+
Pure computation — no Ableton connection needed.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import json
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from fastmcp import Context
|
|
14
|
+
|
|
15
|
+
from ..server import mcp
|
|
16
|
+
from . import _harmony_engine as harmony
|
|
17
|
+
from . import _theory_engine as theory
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def _ensure_list(value: Any) -> list:
|
|
21
|
+
if isinstance(value, str):
|
|
22
|
+
return json.loads(value)
|
|
23
|
+
return value
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# -- Tool 1: navigate_tonnetz ------------------------------------------------
|
|
27
|
+
|
|
28
|
+
@mcp.tool()
|
|
29
|
+
def navigate_tonnetz(
|
|
30
|
+
ctx: Context,
|
|
31
|
+
chord: str,
|
|
32
|
+
depth: int = 1,
|
|
33
|
+
) -> dict:
|
|
34
|
+
"""Show neo-Riemannian neighbors of a chord on the Tonnetz.
|
|
35
|
+
|
|
36
|
+
P (Parallel) flips the third: C major → C minor.
|
|
37
|
+
L (Leading-tone) shifts by semitone: C major → E minor.
|
|
38
|
+
R (Relative) shifts by whole tone: C major → A minor.
|
|
39
|
+
|
|
40
|
+
Use depth 2-3 to see compound transforms (PL, PR, PRL, etc.).
|
|
41
|
+
"""
|
|
42
|
+
if not 1 <= depth <= 3:
|
|
43
|
+
return {"error": "depth must be 1-3"}
|
|
44
|
+
try:
|
|
45
|
+
root_pc, quality = harmony.parse_chord(chord)
|
|
46
|
+
except ValueError as e:
|
|
47
|
+
return {"error": str(e)}
|
|
48
|
+
|
|
49
|
+
all_neighbors = harmony.get_neighbors(root_pc, quality, depth)
|
|
50
|
+
|
|
51
|
+
descriptions = {
|
|
52
|
+
"P": "flip third (Parallel)",
|
|
53
|
+
"L": "shift by semitone (Leading-tone)",
|
|
54
|
+
"R": "shift by whole tone (Relative)",
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
depth_1 = {}
|
|
58
|
+
for label in ("P", "L", "R"):
|
|
59
|
+
if label in all_neighbors:
|
|
60
|
+
r, q = all_neighbors[label]
|
|
61
|
+
depth_1[label] = {
|
|
62
|
+
"chord": harmony.chord_to_str(r, q),
|
|
63
|
+
"transform": label,
|
|
64
|
+
"description": descriptions[label],
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
result: dict = {"chord": chord, "neighbors": depth_1}
|
|
68
|
+
|
|
69
|
+
if depth >= 2:
|
|
70
|
+
depth_2 = {}
|
|
71
|
+
for key, (r, q) in all_neighbors.items():
|
|
72
|
+
if len(key) == 2:
|
|
73
|
+
depth_2[key] = {
|
|
74
|
+
"chord": harmony.chord_to_str(r, q),
|
|
75
|
+
"transforms": key,
|
|
76
|
+
}
|
|
77
|
+
result["depth_2"] = depth_2
|
|
78
|
+
|
|
79
|
+
if depth >= 3:
|
|
80
|
+
depth_3 = {}
|
|
81
|
+
for key, (r, q) in all_neighbors.items():
|
|
82
|
+
if len(key) == 3:
|
|
83
|
+
depth_3[key] = {
|
|
84
|
+
"chord": harmony.chord_to_str(r, q),
|
|
85
|
+
"transforms": key,
|
|
86
|
+
}
|
|
87
|
+
result["depth_3"] = depth_3
|
|
88
|
+
|
|
89
|
+
return result
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
# -- Tool 2: find_voice_leading_path -----------------------------------------
|
|
93
|
+
|
|
94
|
+
@mcp.tool()
|
|
95
|
+
def find_voice_leading_path(
|
|
96
|
+
ctx: Context,
|
|
97
|
+
from_chord: str,
|
|
98
|
+
to_chord: str,
|
|
99
|
+
max_steps: int = 4,
|
|
100
|
+
) -> dict:
|
|
101
|
+
"""Find the shortest neo-Riemannian path between two chords.
|
|
102
|
+
|
|
103
|
+
Returns each intermediate chord and the specific voice movements.
|
|
104
|
+
This is the 'film score progression finder' — chromatic mediants,
|
|
105
|
+
hexatonic poles, and other cinematic chord moves.
|
|
106
|
+
"""
|
|
107
|
+
if not 1 <= max_steps <= 6:
|
|
108
|
+
return {"error": "max_steps must be 1-6"}
|
|
109
|
+
try:
|
|
110
|
+
from_parsed = harmony.parse_chord(from_chord)
|
|
111
|
+
to_parsed = harmony.parse_chord(to_chord)
|
|
112
|
+
except ValueError as e:
|
|
113
|
+
return {"error": str(e)}
|
|
114
|
+
|
|
115
|
+
result = harmony.find_shortest_path(from_parsed, to_parsed, max_steps)
|
|
116
|
+
|
|
117
|
+
if not result["found"]:
|
|
118
|
+
return {
|
|
119
|
+
"from": from_chord,
|
|
120
|
+
"to": to_chord,
|
|
121
|
+
"found": False,
|
|
122
|
+
"steps": -1,
|
|
123
|
+
"path": [],
|
|
124
|
+
"transforms": [],
|
|
125
|
+
"voice_leading": [],
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
path_strs = [harmony.chord_to_str(*c) for c in result["path"]]
|
|
129
|
+
voice_leading = []
|
|
130
|
+
for i in range(len(result["path"]) - 1):
|
|
131
|
+
from_midi = harmony.chord_to_midi(*result["path"][i])
|
|
132
|
+
to_midi = harmony.chord_to_midi(*result["path"][i + 1])
|
|
133
|
+
movements = []
|
|
134
|
+
for f, t in zip(from_midi, to_midi):
|
|
135
|
+
if f != t:
|
|
136
|
+
movements.append(f"{theory.pitch_name(f)}→{theory.pitch_name(t)}")
|
|
137
|
+
voice_leading.append({
|
|
138
|
+
"from": from_midi,
|
|
139
|
+
"to": to_midi,
|
|
140
|
+
"movement": ", ".join(movements) if movements else "no movement",
|
|
141
|
+
})
|
|
142
|
+
|
|
143
|
+
return {
|
|
144
|
+
"from": from_chord,
|
|
145
|
+
"to": to_chord,
|
|
146
|
+
"found": True,
|
|
147
|
+
"steps": result["steps"],
|
|
148
|
+
"path": path_strs,
|
|
149
|
+
"transforms": result["transforms"],
|
|
150
|
+
"voice_leading": voice_leading,
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
# -- Tool 3: classify_progression --------------------------------------------
|
|
155
|
+
|
|
156
|
+
@mcp.tool()
|
|
157
|
+
def classify_progression(
|
|
158
|
+
ctx: Context,
|
|
159
|
+
chords: Any,
|
|
160
|
+
) -> dict:
|
|
161
|
+
"""Classify a chord progression by its neo-Riemannian transform pattern.
|
|
162
|
+
|
|
163
|
+
Identifies hexatonic cycles (PL), octatonic cycles (PR), diatonic
|
|
164
|
+
cycles (LR), and other known patterns. Pairs with analyze_harmony
|
|
165
|
+
to understand why a progression sounds 'cinematic' or 'otherworldly'.
|
|
166
|
+
"""
|
|
167
|
+
chords = _ensure_list(chords)
|
|
168
|
+
if len(chords) < 2:
|
|
169
|
+
return {"error": "Need at least 2 chords to classify"}
|
|
170
|
+
|
|
171
|
+
try:
|
|
172
|
+
parsed = [harmony.parse_chord(c) for c in chords]
|
|
173
|
+
except ValueError as e:
|
|
174
|
+
return {"error": str(e)}
|
|
175
|
+
|
|
176
|
+
transforms = harmony.classify_transform_sequence(parsed)
|
|
177
|
+
pattern = "".join(transforms)
|
|
178
|
+
|
|
179
|
+
classification = "free neo-Riemannian progression"
|
|
180
|
+
notable_usage = None
|
|
181
|
+
clean = pattern.replace("?", "")
|
|
182
|
+
|
|
183
|
+
if len(clean) >= 2:
|
|
184
|
+
pair = clean[:2]
|
|
185
|
+
if pair in ("PL", "LP") and all(c in "PL" for c in clean):
|
|
186
|
+
classification = "hexatonic cycle fragment"
|
|
187
|
+
notable_usage = "Radiohead, film scores (Zimmer, Howard)"
|
|
188
|
+
elif pair in ("PR", "RP") and all(c in "PR" for c in clean):
|
|
189
|
+
classification = "octatonic cycle fragment"
|
|
190
|
+
notable_usage = "late Romantic (Wagner, Strauss), horror film scores"
|
|
191
|
+
elif pair in ("LR", "RL") and all(c in "LR" for c in clean):
|
|
192
|
+
classification = "diatonic cycle fragment"
|
|
193
|
+
notable_usage = "functional harmony, common in classical and pop"
|
|
194
|
+
|
|
195
|
+
if len(clean) == 1:
|
|
196
|
+
names = {"P": "parallel transform", "L": "leading-tone transform",
|
|
197
|
+
"R": "relative transform"}
|
|
198
|
+
classification = names.get(clean, classification)
|
|
199
|
+
|
|
200
|
+
return {
|
|
201
|
+
"chords": chords,
|
|
202
|
+
"transforms": transforms,
|
|
203
|
+
"pattern": pattern,
|
|
204
|
+
"classification": classification,
|
|
205
|
+
"notable_usage": notable_usage,
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
# -- Tool 4: suggest_chromatic_mediants --------------------------------------
|
|
210
|
+
|
|
211
|
+
@mcp.tool()
|
|
212
|
+
def suggest_chromatic_mediants(
|
|
213
|
+
ctx: Context,
|
|
214
|
+
chord: str,
|
|
215
|
+
) -> dict:
|
|
216
|
+
"""Suggest all chromatic mediant relations for a chord.
|
|
217
|
+
|
|
218
|
+
Chromatic mediants are chords a major/minor third away — they share
|
|
219
|
+
0-1 common tones, creating maximum color shift with minimal voice movement.
|
|
220
|
+
Includes 'cinematic picks' highlighting the most film-score-friendly options.
|
|
221
|
+
"""
|
|
222
|
+
try:
|
|
223
|
+
root_pc, quality = harmony.parse_chord(chord)
|
|
224
|
+
except ValueError as e:
|
|
225
|
+
return {"error": str(e)}
|
|
226
|
+
|
|
227
|
+
mediants = harmony.get_chromatic_mediants(root_pc, quality)
|
|
228
|
+
|
|
229
|
+
chord_pcs = set(harmony.chord_to_midi(root_pc, quality))
|
|
230
|
+
formatted = {}
|
|
231
|
+
for key, (r, q) in mediants.items():
|
|
232
|
+
mediant_pcs = set(harmony.chord_to_midi(r, q))
|
|
233
|
+
common = len(chord_pcs & mediant_pcs)
|
|
234
|
+
formatted[key] = {
|
|
235
|
+
"chord": harmony.chord_to_str(r, q),
|
|
236
|
+
"common_tones": common,
|
|
237
|
+
"relation": key.replace("_", " "),
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
cinematic = [
|
|
241
|
+
harmony.chord_to_str(*mediants["lower_major_third"]),
|
|
242
|
+
harmony.chord_to_str(*mediants["upper_major_third"]),
|
|
243
|
+
]
|
|
244
|
+
|
|
245
|
+
return {
|
|
246
|
+
"chord": chord,
|
|
247
|
+
"chromatic_mediants": formatted,
|
|
248
|
+
"cinematic_picks": cinematic,
|
|
249
|
+
"explanation": (
|
|
250
|
+
"Chromatic mediants share 0-1 common tones with the original chord. "
|
|
251
|
+
"Maximum color shift with minimal voice movement — the 'epic' sound."
|
|
252
|
+
),
|
|
253
|
+
}
|
|
@@ -0,0 +1,305 @@
|
|
|
1
|
+
"""MIDI file I/O tools — export, import, analyze, piano roll.
|
|
2
|
+
|
|
3
|
+
4 tools bridging LivePilot's session clips with .mid files.
|
|
4
|
+
Tools 1-2 require Ableton connection. Tools 3-4 are offline-capable.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import os
|
|
11
|
+
import statistics
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any, Optional
|
|
14
|
+
|
|
15
|
+
from fastmcp import Context
|
|
16
|
+
|
|
17
|
+
from ..server import mcp
|
|
18
|
+
from . import _theory_engine as theory
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _get_ableton(ctx: Context):
|
|
22
|
+
return ctx.lifespan_context["ableton"]
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def _require_midiutil():
|
|
26
|
+
try:
|
|
27
|
+
from midiutil import MIDIFile
|
|
28
|
+
return MIDIFile
|
|
29
|
+
except ImportError:
|
|
30
|
+
raise ImportError(
|
|
31
|
+
"midiutil is required for MIDI export. "
|
|
32
|
+
"Install with: pip install midiutil"
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _require_pretty_midi():
|
|
37
|
+
try:
|
|
38
|
+
import pretty_midi
|
|
39
|
+
return pretty_midi
|
|
40
|
+
except ImportError:
|
|
41
|
+
raise ImportError(
|
|
42
|
+
"pretty-midi is required for this tool. "
|
|
43
|
+
"Install with: pip install pretty-midi"
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _output_dir() -> Path:
|
|
48
|
+
d = Path.home() / "Documents" / "LivePilot" / "outputs" / "midi"
|
|
49
|
+
d.mkdir(parents=True, exist_ok=True)
|
|
50
|
+
return d
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def _validate_midi_path(file_path: str) -> Path:
|
|
54
|
+
p = Path(file_path)
|
|
55
|
+
if not p.exists():
|
|
56
|
+
raise FileNotFoundError(f"File not found: {file_path}")
|
|
57
|
+
if p.suffix.lower() not in (".mid", ".midi"):
|
|
58
|
+
raise ValueError(f"Not a MIDI file: {file_path}")
|
|
59
|
+
return p
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
# -- Tool 1: export_clip_midi ------------------------------------------------
|
|
63
|
+
|
|
64
|
+
@mcp.tool()
|
|
65
|
+
def export_clip_midi(
|
|
66
|
+
ctx: Context,
|
|
67
|
+
track_index: int,
|
|
68
|
+
clip_index: int,
|
|
69
|
+
filename: Optional[str] = None,
|
|
70
|
+
) -> dict:
|
|
71
|
+
"""Export a session clip's notes to a .mid file.
|
|
72
|
+
|
|
73
|
+
Fetches notes from the clip and writes them to a standard MIDI file.
|
|
74
|
+
Auto-generates filename from track/clip if not provided.
|
|
75
|
+
"""
|
|
76
|
+
MIDIFile = _require_midiutil()
|
|
77
|
+
ableton = _get_ableton(ctx)
|
|
78
|
+
|
|
79
|
+
notes_result = ableton.send_command("get_notes", {
|
|
80
|
+
"track_index": track_index,
|
|
81
|
+
"clip_index": clip_index,
|
|
82
|
+
})
|
|
83
|
+
notes = notes_result.get("notes", [])
|
|
84
|
+
|
|
85
|
+
session = ableton.send_command("get_session_info", {})
|
|
86
|
+
tempo = float(session.get("tempo", 120.0))
|
|
87
|
+
|
|
88
|
+
if not filename:
|
|
89
|
+
filename = f"track{track_index}_clip{clip_index}.mid"
|
|
90
|
+
if not filename.endswith((".mid", ".midi")):
|
|
91
|
+
filename += ".mid"
|
|
92
|
+
|
|
93
|
+
out_path = _output_dir() / filename
|
|
94
|
+
|
|
95
|
+
midi = MIDIFile(1)
|
|
96
|
+
midi.addTempo(0, 0, tempo)
|
|
97
|
+
|
|
98
|
+
duration_beats = 0.0
|
|
99
|
+
for n in notes:
|
|
100
|
+
start = float(n["start_time"])
|
|
101
|
+
dur = float(n["duration"])
|
|
102
|
+
pitch = int(n["pitch"])
|
|
103
|
+
vel = int(n.get("velocity", 100))
|
|
104
|
+
midi.addNote(0, 0, pitch, start, dur, vel)
|
|
105
|
+
duration_beats = max(duration_beats, start + dur)
|
|
106
|
+
|
|
107
|
+
with open(out_path, "wb") as f:
|
|
108
|
+
midi.writeFile(f)
|
|
109
|
+
|
|
110
|
+
return {
|
|
111
|
+
"file_path": str(out_path),
|
|
112
|
+
"note_count": len(notes),
|
|
113
|
+
"duration_beats": round(duration_beats, 4),
|
|
114
|
+
"tempo": tempo,
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
# -- Tool 2: import_midi_to_clip ---------------------------------------------
|
|
119
|
+
|
|
120
|
+
@mcp.tool()
|
|
121
|
+
def import_midi_to_clip(
|
|
122
|
+
ctx: Context,
|
|
123
|
+
file_path: str,
|
|
124
|
+
track_index: int,
|
|
125
|
+
clip_index: int,
|
|
126
|
+
create_clip: bool = True,
|
|
127
|
+
) -> dict:
|
|
128
|
+
"""Load a .mid file into a session clip.
|
|
129
|
+
|
|
130
|
+
Reads MIDI, converts timing to beats using session tempo, and writes
|
|
131
|
+
notes into the target clip slot. Creates the clip if needed.
|
|
132
|
+
"""
|
|
133
|
+
pretty_midi = _require_pretty_midi()
|
|
134
|
+
ableton = _get_ableton(ctx)
|
|
135
|
+
|
|
136
|
+
path = _validate_midi_path(file_path)
|
|
137
|
+
pm = pretty_midi.PrettyMIDI(str(path))
|
|
138
|
+
|
|
139
|
+
session = ableton.send_command("get_session_info", {})
|
|
140
|
+
tempo = float(session.get("tempo", 120.0))
|
|
141
|
+
|
|
142
|
+
notes_raw = []
|
|
143
|
+
for inst in pm.instruments:
|
|
144
|
+
for n in inst.notes:
|
|
145
|
+
start_beat = round(n.start * (tempo / 60.0), 3)
|
|
146
|
+
dur_beat = round((n.end - n.start) * (tempo / 60.0), 3)
|
|
147
|
+
notes_raw.append({
|
|
148
|
+
"pitch": n.pitch,
|
|
149
|
+
"start_time": start_beat,
|
|
150
|
+
"duration": max(dur_beat, 0.001),
|
|
151
|
+
"velocity": n.velocity,
|
|
152
|
+
})
|
|
153
|
+
|
|
154
|
+
seen = set()
|
|
155
|
+
notes = []
|
|
156
|
+
for n in notes_raw:
|
|
157
|
+
key = (n["pitch"], round(n["start_time"], 3), round(n["duration"], 3))
|
|
158
|
+
if key not in seen:
|
|
159
|
+
seen.add(key)
|
|
160
|
+
notes.append(n)
|
|
161
|
+
|
|
162
|
+
notes = notes[:2000]
|
|
163
|
+
|
|
164
|
+
duration_beats = max((n["start_time"] + n["duration"] for n in notes),
|
|
165
|
+
default=4.0)
|
|
166
|
+
|
|
167
|
+
if create_clip:
|
|
168
|
+
ableton.send_command("create_clip", {
|
|
169
|
+
"track_index": track_index,
|
|
170
|
+
"clip_index": clip_index,
|
|
171
|
+
"length": round(duration_beats, 2),
|
|
172
|
+
})
|
|
173
|
+
|
|
174
|
+
if notes:
|
|
175
|
+
ableton.send_command("add_notes", {
|
|
176
|
+
"track_index": track_index,
|
|
177
|
+
"clip_index": clip_index,
|
|
178
|
+
"notes": notes,
|
|
179
|
+
})
|
|
180
|
+
|
|
181
|
+
return {
|
|
182
|
+
"note_count": len(notes),
|
|
183
|
+
"duration_beats": round(duration_beats, 4),
|
|
184
|
+
"tempo_source": tempo,
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
# -- Tool 3: analyze_midi_file -----------------------------------------------
|
|
189
|
+
|
|
190
|
+
@mcp.tool()
|
|
191
|
+
def analyze_midi_file(
|
|
192
|
+
ctx: Context,
|
|
193
|
+
file_path: str,
|
|
194
|
+
) -> dict:
|
|
195
|
+
"""Analyze a .mid file — works offline, no Ableton needed.
|
|
196
|
+
|
|
197
|
+
Returns note count, duration, tempo, pitch range, instruments,
|
|
198
|
+
velocity stats, density curve, and estimated key.
|
|
199
|
+
"""
|
|
200
|
+
pretty_midi = _require_pretty_midi()
|
|
201
|
+
path = _validate_midi_path(file_path)
|
|
202
|
+
pm = pretty_midi.PrettyMIDI(str(path))
|
|
203
|
+
|
|
204
|
+
all_notes = []
|
|
205
|
+
for inst in pm.instruments:
|
|
206
|
+
for n in inst.notes:
|
|
207
|
+
all_notes.append(n)
|
|
208
|
+
|
|
209
|
+
if not all_notes:
|
|
210
|
+
return {
|
|
211
|
+
"note_count": 0,
|
|
212
|
+
"duration_seconds": round(pm.get_end_time(), 2),
|
|
213
|
+
"tempo_estimates": list(pm.get_tempo_changes()[1]),
|
|
214
|
+
"pitch_range": [0, 0],
|
|
215
|
+
"instruments": [i.name for i in pm.instruments],
|
|
216
|
+
"velocity_stats": {},
|
|
217
|
+
"density_curve": [],
|
|
218
|
+
"key_estimate": "unknown",
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
pitches = [n.pitch for n in all_notes]
|
|
222
|
+
velocities = [n.velocity for n in all_notes]
|
|
223
|
+
duration = pm.get_end_time()
|
|
224
|
+
|
|
225
|
+
density_curve = []
|
|
226
|
+
window = 1.0
|
|
227
|
+
t = 0.0
|
|
228
|
+
while t < duration:
|
|
229
|
+
count = sum(1 for n in all_notes if t <= n.start < t + window)
|
|
230
|
+
density_curve.append({
|
|
231
|
+
"time": round(t, 1),
|
|
232
|
+
"density": count / window,
|
|
233
|
+
})
|
|
234
|
+
t += window
|
|
235
|
+
|
|
236
|
+
notes_for_key = [
|
|
237
|
+
{"pitch": n.pitch, "duration": n.end - n.start}
|
|
238
|
+
for n in all_notes
|
|
239
|
+
]
|
|
240
|
+
key_result = theory.detect_key(notes_for_key)
|
|
241
|
+
key_str = f"{key_result['tonic_name']} {key_result['mode']}"
|
|
242
|
+
|
|
243
|
+
vel_stats = {
|
|
244
|
+
"mean": round(statistics.mean(velocities), 1),
|
|
245
|
+
"min": min(velocities),
|
|
246
|
+
"max": max(velocities),
|
|
247
|
+
"std": round(statistics.stdev(velocities), 1) if len(velocities) > 1 else 0.0,
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
return {
|
|
251
|
+
"note_count": len(all_notes),
|
|
252
|
+
"duration_seconds": round(duration, 2),
|
|
253
|
+
"tempo_estimates": [round(t, 1) for t in pm.get_tempo_changes()[1]],
|
|
254
|
+
"pitch_range": [min(pitches), max(pitches)],
|
|
255
|
+
"instruments": [i.name for i in pm.instruments],
|
|
256
|
+
"velocity_stats": vel_stats,
|
|
257
|
+
"density_curve": density_curve,
|
|
258
|
+
"key_estimate": key_str,
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
# -- Tool 4: extract_piano_roll ----------------------------------------------
|
|
263
|
+
|
|
264
|
+
@mcp.tool()
|
|
265
|
+
def extract_piano_roll(
|
|
266
|
+
ctx: Context,
|
|
267
|
+
file_path: str,
|
|
268
|
+
resolution: float = 0.125,
|
|
269
|
+
) -> dict:
|
|
270
|
+
"""Extract a 2D piano roll matrix from a .mid file. Offline-capable.
|
|
271
|
+
|
|
272
|
+
Returns a velocity matrix [pitch_index][time_step] trimmed to
|
|
273
|
+
the actual pitch range. Resolution is in beats (0.125 = 32nd note).
|
|
274
|
+
"""
|
|
275
|
+
pretty_midi = _require_pretty_midi()
|
|
276
|
+
path = _validate_midi_path(file_path)
|
|
277
|
+
pm = pretty_midi.PrettyMIDI(str(path))
|
|
278
|
+
|
|
279
|
+
tempo_changes = pm.get_tempo_changes()
|
|
280
|
+
tempo = float(tempo_changes[1][0]) if len(tempo_changes[1]) > 0 else 120.0
|
|
281
|
+
fs = (tempo / 60.0) / resolution
|
|
282
|
+
|
|
283
|
+
roll = pm.get_piano_roll(fs=fs) # shape (128, T)
|
|
284
|
+
|
|
285
|
+
active_pitches = roll.sum(axis=1).nonzero()[0]
|
|
286
|
+
if len(active_pitches) == 0:
|
|
287
|
+
return {
|
|
288
|
+
"piano_roll": [],
|
|
289
|
+
"pitch_min": 0,
|
|
290
|
+
"pitch_max": 0,
|
|
291
|
+
"time_steps": 0,
|
|
292
|
+
"resolution": resolution,
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
pitch_min = int(active_pitches[0])
|
|
296
|
+
pitch_max = int(active_pitches[-1])
|
|
297
|
+
trimmed = roll[pitch_min:pitch_max + 1, :]
|
|
298
|
+
|
|
299
|
+
return {
|
|
300
|
+
"piano_roll": trimmed.astype(int).tolist(),
|
|
301
|
+
"pitch_min": pitch_min,
|
|
302
|
+
"pitch_max": pitch_max,
|
|
303
|
+
"time_steps": int(trimmed.shape[1]),
|
|
304
|
+
"resolution": resolution,
|
|
305
|
+
}
|