livepilot 1.6.4 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +43 -7
- package/README.md +21 -6
- package/m4l_device/LivePilot_Analyzer.amxd +0 -0
- package/m4l_device/livepilot_bridge.js +1 -1
- package/mcp_server/__init__.py +1 -1
- package/mcp_server/server.py +3 -0
- package/mcp_server/tools/_generative_engine.py +271 -0
- package/mcp_server/tools/_harmony_engine.py +207 -0
- package/mcp_server/tools/_theory_engine.py +366 -0
- package/mcp_server/tools/generative.py +273 -0
- package/mcp_server/tools/harmony.py +253 -0
- package/mcp_server/tools/midi_io.py +305 -0
- package/mcp_server/tools/theory.py +160 -320
- package/package.json +2 -2
- package/plugin/plugin.json +2 -2
- package/plugin/skills/livepilot-core/SKILL.md +46 -8
- package/plugin/skills/livepilot-core/references/overview.md +5 -5
- package/remote_script/LivePilot/__init__.py +2 -2
- package/requirements.txt +3 -3
|
@@ -0,0 +1,366 @@
|
|
|
1
|
+
"""Pure Python music theory engine — zero dependencies.
|
|
2
|
+
|
|
3
|
+
Replaces music21 for LivePilot's 7 theory tools. Implements:
|
|
4
|
+
Krumhansl-Schmuckler key detection, Roman numeral analysis,
|
|
5
|
+
voice leading checks, chord naming, and scale construction.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from __future__ import annotations
|
|
9
|
+
|
|
10
|
+
import math
|
|
11
|
+
import re
|
|
12
|
+
from collections import defaultdict
|
|
13
|
+
|
|
14
|
+
# ---------------------------------------------------------------------------
|
|
15
|
+
# Constants
|
|
16
|
+
# ---------------------------------------------------------------------------
|
|
17
|
+
|
|
18
|
+
NOTE_NAMES = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B']
|
|
19
|
+
|
|
20
|
+
ENHARMONIC = {
|
|
21
|
+
'Cb': 'B', 'Db': 'C#', 'Eb': 'D#', 'Fb': 'E', 'Gb': 'F#',
|
|
22
|
+
'Ab': 'G#', 'Bb': 'A#',
|
|
23
|
+
'B#': 'C', 'E#': 'F',
|
|
24
|
+
'Cbb': 'A#', 'Dbb': 'C', 'Ebb': 'D', 'Fbb': 'D#', 'Gbb': 'F',
|
|
25
|
+
'Abb': 'G', 'Bbb': 'A',
|
|
26
|
+
'C##': 'D', 'D##': 'E', 'E##': 'F#', 'F##': 'G', 'G##': 'A',
|
|
27
|
+
'A##': 'B', 'B##': 'C#',
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
MAJOR_PROFILE = [6.35, 2.23, 3.48, 2.33, 4.38, 4.09, 2.52, 5.19, 2.39, 3.66, 2.29, 2.88]
|
|
31
|
+
MINOR_PROFILE = [6.33, 2.68, 3.52, 5.38, 2.60, 3.53, 2.54, 4.75, 3.98, 2.69, 3.34, 3.17]
|
|
32
|
+
|
|
33
|
+
DORIAN_PROFILE = MAJOR_PROFILE[2:] + MAJOR_PROFILE[:2]
|
|
34
|
+
PHRYGIAN_PROFILE = MAJOR_PROFILE[4:] + MAJOR_PROFILE[:4]
|
|
35
|
+
LYDIAN_PROFILE = MAJOR_PROFILE[5:] + MAJOR_PROFILE[:5]
|
|
36
|
+
MIXOLYDIAN_PROFILE = MAJOR_PROFILE[7:] + MAJOR_PROFILE[:7]
|
|
37
|
+
LOCRIAN_PROFILE = MAJOR_PROFILE[11:] + MAJOR_PROFILE[:11]
|
|
38
|
+
|
|
39
|
+
MODE_PROFILES = {
|
|
40
|
+
'major': MAJOR_PROFILE, 'minor': MINOR_PROFILE,
|
|
41
|
+
'dorian': DORIAN_PROFILE, 'phrygian': PHRYGIAN_PROFILE,
|
|
42
|
+
'lydian': LYDIAN_PROFILE, 'mixolydian': MIXOLYDIAN_PROFILE,
|
|
43
|
+
'locrian': LOCRIAN_PROFILE,
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
SCALES = {
|
|
47
|
+
'major': [0, 2, 4, 5, 7, 9, 11], 'minor': [0, 2, 3, 5, 7, 8, 10],
|
|
48
|
+
'dorian': [0, 2, 3, 5, 7, 9, 10], 'phrygian': [0, 1, 3, 5, 7, 8, 10],
|
|
49
|
+
'lydian': [0, 2, 4, 6, 7, 9, 11], 'mixolydian': [0, 2, 4, 5, 7, 9, 10],
|
|
50
|
+
'locrian': [0, 1, 3, 5, 6, 8, 10],
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
TRIAD_QUALITIES = {
|
|
54
|
+
'major': ['major', 'minor', 'minor', 'major', 'major', 'minor', 'diminished'],
|
|
55
|
+
'minor': ['minor', 'diminished', 'major', 'minor', 'minor', 'major', 'major'],
|
|
56
|
+
'dorian': ['minor', 'minor', 'major', 'major', 'minor', 'diminished', 'major'],
|
|
57
|
+
'phrygian': ['minor', 'major', 'major', 'minor', 'diminished', 'major', 'minor'],
|
|
58
|
+
'lydian': ['major', 'major', 'minor', 'diminished', 'major', 'minor', 'minor'],
|
|
59
|
+
'mixolydian': ['major', 'minor', 'diminished', 'major', 'minor', 'minor', 'major'],
|
|
60
|
+
'locrian': ['diminished', 'major', 'minor', 'minor', 'major', 'major', 'minor'],
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
CHORD_PATTERNS = {
|
|
64
|
+
(0, 4, 7): 'major triad', (0, 3, 7): 'minor triad',
|
|
65
|
+
(0, 3, 6): 'diminished triad', (0, 4, 8): 'augmented triad',
|
|
66
|
+
(0, 2, 7): 'sus2', (0, 5, 7): 'sus4',
|
|
67
|
+
(0, 4, 7, 11): 'major seventh', (0, 3, 7, 10): 'minor seventh',
|
|
68
|
+
(0, 4, 7, 10): 'dominant seventh', (0, 3, 6, 9): 'diminished seventh',
|
|
69
|
+
(0, 3, 6, 10): 'half-diminished seventh',
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
ROMAN_LABELS = ['I', 'II', 'III', 'IV', 'V', 'VI', 'VII']
|
|
73
|
+
|
|
74
|
+
# ---------------------------------------------------------------------------
|
|
75
|
+
# Functions
|
|
76
|
+
# ---------------------------------------------------------------------------
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def pitch_name(midi: int) -> str:
|
|
80
|
+
"""MIDI number -> note name. Always sharp spelling."""
|
|
81
|
+
return NOTE_NAMES[midi % 12] + str(midi // 12 - 1)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def parse_key(key_str: str) -> dict:
|
|
85
|
+
"""Parse key string -> {tonic: 0-11, mode: str}."""
|
|
86
|
+
parts = key_str.strip().split()
|
|
87
|
+
raw_tonic = parts[0]
|
|
88
|
+
mode = parts[1].lower() if len(parts) > 1 else 'major'
|
|
89
|
+
|
|
90
|
+
# Normalize tonic: capitalize first letter
|
|
91
|
+
tonic_name = raw_tonic[0].upper() + raw_tonic[1:]
|
|
92
|
+
|
|
93
|
+
# Resolve enharmonics
|
|
94
|
+
if tonic_name in ENHARMONIC:
|
|
95
|
+
tonic_name = ENHARMONIC[tonic_name]
|
|
96
|
+
|
|
97
|
+
if tonic_name not in NOTE_NAMES:
|
|
98
|
+
raise ValueError(f"Unknown tonic: {tonic_name} (from '{key_str}')")
|
|
99
|
+
|
|
100
|
+
return {"tonic": NOTE_NAMES.index(tonic_name), "mode": mode}
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def get_scale_pitches(tonic: int, mode: str) -> list[int]:
|
|
104
|
+
"""Return pitch classes (0-11) for the scale."""
|
|
105
|
+
intervals = SCALES.get(mode, SCALES['major'])
|
|
106
|
+
return [(tonic + iv) % 12 for iv in intervals]
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def build_chord(degree: int, tonic: int, mode: str) -> dict:
|
|
110
|
+
"""Build triad from scale degree (0-indexed)."""
|
|
111
|
+
scale = get_scale_pitches(tonic, mode)
|
|
112
|
+
root = scale[degree % 7]
|
|
113
|
+
third = scale[(degree + 2) % 7]
|
|
114
|
+
fifth = scale[(degree + 4) % 7]
|
|
115
|
+
quality = TRIAD_QUALITIES.get(mode, TRIAD_QUALITIES['major'])[degree % 7]
|
|
116
|
+
return {
|
|
117
|
+
"root_pc": root,
|
|
118
|
+
"pitch_classes": [root, third, fifth],
|
|
119
|
+
"quality": quality,
|
|
120
|
+
"root_name": NOTE_NAMES[root],
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def _pearson(x: list[float], y: list[float]) -> float:
|
|
125
|
+
"""Pearson correlation coefficient."""
|
|
126
|
+
n = len(x)
|
|
127
|
+
mx = sum(x) / n
|
|
128
|
+
my = sum(y) / n
|
|
129
|
+
num = sum((xi - mx) * (yi - my) for xi, yi in zip(x, y))
|
|
130
|
+
dx = math.sqrt(sum((xi - mx) ** 2 for xi in x))
|
|
131
|
+
dy = math.sqrt(sum((yi - my) ** 2 for yi in y))
|
|
132
|
+
if dx == 0 or dy == 0:
|
|
133
|
+
return 0.0
|
|
134
|
+
return num / (dx * dy)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def detect_key(notes: list[dict], mode_detection: bool = True) -> dict:
|
|
138
|
+
"""Krumhansl-Schmuckler key detection."""
|
|
139
|
+
# Build pitch-class histogram weighted by duration
|
|
140
|
+
histogram = [0.0] * 12
|
|
141
|
+
for n in notes:
|
|
142
|
+
if n.get("mute", False):
|
|
143
|
+
continue
|
|
144
|
+
pc = n["pitch"] % 12
|
|
145
|
+
histogram[pc] += n.get("duration", 1.0)
|
|
146
|
+
|
|
147
|
+
profiles = MODE_PROFILES if mode_detection else {
|
|
148
|
+
'major': MAJOR_PROFILE, 'minor': MINOR_PROFILE,
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
candidates = []
|
|
152
|
+
for mode_name, profile in profiles.items():
|
|
153
|
+
for tonic in range(12):
|
|
154
|
+
rotated = [histogram[(tonic + i) % 12] for i in range(12)]
|
|
155
|
+
r = _pearson(rotated, profile)
|
|
156
|
+
candidates.append({
|
|
157
|
+
"tonic": tonic,
|
|
158
|
+
"tonic_name": NOTE_NAMES[tonic],
|
|
159
|
+
"mode": mode_name,
|
|
160
|
+
"confidence": round(r, 3),
|
|
161
|
+
})
|
|
162
|
+
|
|
163
|
+
candidates.sort(key=lambda c: -c["confidence"])
|
|
164
|
+
best = candidates[0]
|
|
165
|
+
return {
|
|
166
|
+
"tonic": best["tonic"],
|
|
167
|
+
"tonic_name": best["tonic_name"],
|
|
168
|
+
"mode": best["mode"],
|
|
169
|
+
"confidence": best["confidence"],
|
|
170
|
+
"alternatives": candidates[1:9],
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def chord_name(midi_pitches: list[int]) -> str:
|
|
175
|
+
"""Identify chord from MIDI pitches -> 'C-major triad'."""
|
|
176
|
+
pcs = sorted(set(p % 12 for p in midi_pitches))
|
|
177
|
+
if not pcs:
|
|
178
|
+
return "unknown"
|
|
179
|
+
# Try each pitch class as potential root
|
|
180
|
+
for root in pcs:
|
|
181
|
+
intervals = tuple(sorted((pc - root) % 12 for pc in pcs))
|
|
182
|
+
if intervals in CHORD_PATTERNS:
|
|
183
|
+
return f"{NOTE_NAMES[root]}-{CHORD_PATTERNS[intervals]}"
|
|
184
|
+
return f"{NOTE_NAMES[pcs[0]]} chord"
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def roman_numeral(chord_pcs: list[int], tonic: int, mode: str) -> dict:
|
|
188
|
+
"""Match chord pitch classes -> Roman numeral figure."""
|
|
189
|
+
pcs_set = set(pc % 12 for pc in chord_pcs)
|
|
190
|
+
bass_pc = chord_pcs[0] % 12 if chord_pcs else 0
|
|
191
|
+
|
|
192
|
+
best = {"figure": "?", "quality": "unknown", "degree": 0,
|
|
193
|
+
"inversion": 0, "root_name": NOTE_NAMES[tonic]}
|
|
194
|
+
|
|
195
|
+
for degree in range(7):
|
|
196
|
+
triad = build_chord(degree, tonic, mode)
|
|
197
|
+
triad_set = set(triad["pitch_classes"])
|
|
198
|
+
if pcs_set == triad_set or pcs_set.issubset(triad_set):
|
|
199
|
+
quality = triad["quality"]
|
|
200
|
+
label = ROMAN_LABELS[degree]
|
|
201
|
+
if quality in ("minor", "diminished"):
|
|
202
|
+
label = label.lower()
|
|
203
|
+
if quality == "diminished":
|
|
204
|
+
label += "\u00b0"
|
|
205
|
+
# Detect inversion
|
|
206
|
+
inv = 0
|
|
207
|
+
if bass_pc != triad["root_pc"]:
|
|
208
|
+
if bass_pc == triad["pitch_classes"][1]:
|
|
209
|
+
inv = 1
|
|
210
|
+
elif bass_pc == triad["pitch_classes"][2]:
|
|
211
|
+
inv = 2
|
|
212
|
+
best = {"figure": label, "quality": quality, "degree": degree,
|
|
213
|
+
"inversion": inv, "root_name": triad["root_name"]}
|
|
214
|
+
break
|
|
215
|
+
|
|
216
|
+
return best
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def roman_figure_to_pitches(figure: str, tonic: int, mode: str) -> dict:
|
|
220
|
+
"""Parse Roman numeral string -> concrete MIDI pitches.
|
|
221
|
+
|
|
222
|
+
Handles: 'IV', 'bVII7', '#ivo7', 'ii7', etc.
|
|
223
|
+
"""
|
|
224
|
+
remaining = figure
|
|
225
|
+
chromatic_shift = 0
|
|
226
|
+
acc_len = 0
|
|
227
|
+
|
|
228
|
+
# Parse leading accidentals
|
|
229
|
+
while remaining and remaining[0] in ('b', '#'):
|
|
230
|
+
if remaining[0] == 'b':
|
|
231
|
+
chromatic_shift -= 1
|
|
232
|
+
else:
|
|
233
|
+
chromatic_shift += 1
|
|
234
|
+
remaining = remaining[1:]
|
|
235
|
+
acc_len += 1
|
|
236
|
+
|
|
237
|
+
# Parse Roman numeral
|
|
238
|
+
upper_remaining = remaining.upper()
|
|
239
|
+
numeral = ""
|
|
240
|
+
for rn in ['VII', 'VI', 'IV', 'III', 'II', 'V', 'I']:
|
|
241
|
+
if upper_remaining.startswith(rn):
|
|
242
|
+
numeral = rn
|
|
243
|
+
break
|
|
244
|
+
|
|
245
|
+
if not numeral:
|
|
246
|
+
return {"figure": figure, "error": f"Cannot parse: {figure}"}
|
|
247
|
+
|
|
248
|
+
# Detect case of the numeral in original figure
|
|
249
|
+
numeral_in_orig = remaining[:len(numeral)]
|
|
250
|
+
is_minor_quality = numeral_in_orig == numeral_in_orig.lower()
|
|
251
|
+
remaining = remaining[len(numeral):]
|
|
252
|
+
|
|
253
|
+
degree = ['I', 'II', 'III', 'IV', 'V', 'VI', 'VII'].index(numeral)
|
|
254
|
+
|
|
255
|
+
# Build base triad from scale
|
|
256
|
+
chord = build_chord(degree, tonic, mode)
|
|
257
|
+
root_pc = (chord["root_pc"] + chromatic_shift) % 12
|
|
258
|
+
|
|
259
|
+
# Build pitch classes based on quality
|
|
260
|
+
if is_minor_quality:
|
|
261
|
+
pcs = [root_pc, (root_pc + 3) % 12, (root_pc + 7) % 12]
|
|
262
|
+
quality = "minor"
|
|
263
|
+
else:
|
|
264
|
+
# Use scale-derived quality
|
|
265
|
+
quality = chord["quality"]
|
|
266
|
+
if quality == "minor":
|
|
267
|
+
pcs = [root_pc, (root_pc + 3) % 12, (root_pc + 7) % 12]
|
|
268
|
+
elif quality == "diminished":
|
|
269
|
+
pcs = [root_pc, (root_pc + 3) % 12, (root_pc + 6) % 12]
|
|
270
|
+
elif quality == "augmented":
|
|
271
|
+
pcs = [root_pc, (root_pc + 4) % 12, (root_pc + 8) % 12]
|
|
272
|
+
else:
|
|
273
|
+
pcs = [root_pc, (root_pc + 4) % 12, (root_pc + 7) % 12]
|
|
274
|
+
|
|
275
|
+
# Handle suffix
|
|
276
|
+
suffix = remaining.lower()
|
|
277
|
+
if suffix == "7":
|
|
278
|
+
seventh = (root_pc + 10) % 12 # dominant/minor 7th
|
|
279
|
+
pcs.append(seventh)
|
|
280
|
+
if quality == "minor":
|
|
281
|
+
quality = "minor seventh"
|
|
282
|
+
else:
|
|
283
|
+
quality = "dominant seventh"
|
|
284
|
+
elif suffix == "o7":
|
|
285
|
+
seventh = (root_pc + 9) % 12 # diminished 7th
|
|
286
|
+
pcs.append(seventh)
|
|
287
|
+
quality = "diminished seventh"
|
|
288
|
+
elif suffix == "\u00b0":
|
|
289
|
+
quality = "diminished"
|
|
290
|
+
pcs = [root_pc, (root_pc + 3) % 12, (root_pc + 6) % 12]
|
|
291
|
+
|
|
292
|
+
# Convert to MIDI pitches in octave 4 (root at its natural octave-4 pitch)
|
|
293
|
+
base_midi = 60 + root_pc
|
|
294
|
+
midi = []
|
|
295
|
+
for pc in pcs:
|
|
296
|
+
p = base_midi + ((pc - root_pc) % 12)
|
|
297
|
+
midi.append(p)
|
|
298
|
+
|
|
299
|
+
return {
|
|
300
|
+
"figure": figure,
|
|
301
|
+
"root_pc": root_pc,
|
|
302
|
+
"pitches": [pitch_name(m) for m in midi],
|
|
303
|
+
"midi_pitches": midi,
|
|
304
|
+
"quality": quality,
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
def check_voice_leading(prev_pitches: list[int], curr_pitches: list[int]) -> list[dict]:
|
|
309
|
+
"""Check voice leading issues between two chords."""
|
|
310
|
+
issues = []
|
|
311
|
+
if len(prev_pitches) < 2 or len(curr_pitches) < 2:
|
|
312
|
+
if len(curr_pitches) >= 2 and curr_pitches[-1] < curr_pitches[0]:
|
|
313
|
+
issues.append({"type": "voice_crossing"})
|
|
314
|
+
return issues
|
|
315
|
+
|
|
316
|
+
prev_bass, prev_sop = prev_pitches[0], prev_pitches[-1]
|
|
317
|
+
curr_bass, curr_sop = curr_pitches[0], curr_pitches[-1]
|
|
318
|
+
|
|
319
|
+
prev_iv = (prev_sop - prev_bass) % 12
|
|
320
|
+
curr_iv = (curr_sop - curr_bass) % 12
|
|
321
|
+
|
|
322
|
+
bass_moved = prev_bass != curr_bass
|
|
323
|
+
sop_moved = prev_sop != curr_sop
|
|
324
|
+
both_moved = bass_moved and sop_moved
|
|
325
|
+
|
|
326
|
+
if both_moved and prev_iv == 7 and curr_iv == 7:
|
|
327
|
+
issues.append({"type": "parallel_fifths"})
|
|
328
|
+
|
|
329
|
+
if both_moved and prev_iv % 12 == 0 and curr_iv % 12 == 0:
|
|
330
|
+
issues.append({"type": "parallel_octaves"})
|
|
331
|
+
|
|
332
|
+
if curr_sop < curr_bass:
|
|
333
|
+
issues.append({"type": "voice_crossing"})
|
|
334
|
+
|
|
335
|
+
# Hidden fifth: same direction motion landing on P5
|
|
336
|
+
if both_moved:
|
|
337
|
+
bass_dir = curr_bass - prev_bass
|
|
338
|
+
sop_dir = curr_sop - prev_sop
|
|
339
|
+
same_dir = (bass_dir > 0 and sop_dir > 0) or (bass_dir < 0 and sop_dir < 0)
|
|
340
|
+
if same_dir and curr_iv == 7:
|
|
341
|
+
issues.append({"type": "hidden_fifth"})
|
|
342
|
+
|
|
343
|
+
return issues
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
def chordify(notes: list[dict], quant: float = 0.125) -> list[dict]:
|
|
347
|
+
"""Group notes by quantized beat position."""
|
|
348
|
+
groups: dict[float, list[dict]] = defaultdict(list)
|
|
349
|
+
for n in notes:
|
|
350
|
+
if n.get("mute", False):
|
|
351
|
+
continue
|
|
352
|
+
q_time = round(n["start_time"] / quant) * quant
|
|
353
|
+
groups[q_time].append(n)
|
|
354
|
+
|
|
355
|
+
result = []
|
|
356
|
+
for beat in sorted(groups.keys()):
|
|
357
|
+
group = groups[beat]
|
|
358
|
+
pitches = sorted(n["pitch"] for n in group)
|
|
359
|
+
duration = max(max(n["duration"] for n in group), quant)
|
|
360
|
+
result.append({
|
|
361
|
+
"beat": round(beat, 4),
|
|
362
|
+
"duration": round(duration, 4),
|
|
363
|
+
"pitches": pitches,
|
|
364
|
+
"pitch_classes": sorted(set(p % 12 for p in pitches)),
|
|
365
|
+
})
|
|
366
|
+
return result
|
|
@@ -0,0 +1,273 @@
|
|
|
1
|
+
"""Generative music tools — Euclidean rhythms, tintinnabuli, phase shift, additive process.
|
|
2
|
+
|
|
3
|
+
5 tools returning note arrays. Pure computation — no Ableton connection needed.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
from typing import Any, Optional
|
|
10
|
+
|
|
11
|
+
from fastmcp import Context
|
|
12
|
+
|
|
13
|
+
from ..server import mcp
|
|
14
|
+
from . import _generative_engine as gen
|
|
15
|
+
from . import _theory_engine as theory
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _ensure_list(value: Any) -> list:
|
|
19
|
+
if isinstance(value, str):
|
|
20
|
+
return json.loads(value)
|
|
21
|
+
return value
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# -- Tool 1: generate_euclidean_rhythm --------------------------------------
|
|
25
|
+
|
|
26
|
+
@mcp.tool()
|
|
27
|
+
def generate_euclidean_rhythm(
|
|
28
|
+
ctx: Context,
|
|
29
|
+
pulses: int,
|
|
30
|
+
steps: int,
|
|
31
|
+
rotation: int = 0,
|
|
32
|
+
pitch: int = 36,
|
|
33
|
+
velocity: int = 100,
|
|
34
|
+
step_duration: float = 0.25,
|
|
35
|
+
) -> dict:
|
|
36
|
+
"""Generate a Euclidean rhythm using the Bjorklund algorithm.
|
|
37
|
+
|
|
38
|
+
Distributes pulses as evenly as possible across steps. Identifies
|
|
39
|
+
known rhythms (tresillo, cinquillo, bossa nova, etc.) when matched.
|
|
40
|
+
Returns note array — use add_notes to place in a clip.
|
|
41
|
+
"""
|
|
42
|
+
if not 0 <= pulses <= 64:
|
|
43
|
+
return {"error": "pulses must be 0-64"}
|
|
44
|
+
if not 1 <= steps <= 64:
|
|
45
|
+
return {"error": "steps must be 1-64"}
|
|
46
|
+
if pulses > steps:
|
|
47
|
+
return {"error": "pulses must be <= steps"}
|
|
48
|
+
|
|
49
|
+
pattern = gen.bjorklund(pulses, steps)
|
|
50
|
+
if rotation:
|
|
51
|
+
pattern = gen.rotate_pattern(pattern, rotation)
|
|
52
|
+
|
|
53
|
+
notes = []
|
|
54
|
+
for i, hit in enumerate(pattern):
|
|
55
|
+
if hit:
|
|
56
|
+
notes.append({
|
|
57
|
+
"pitch": pitch,
|
|
58
|
+
"start_time": round(i * step_duration, 4),
|
|
59
|
+
"duration": step_duration,
|
|
60
|
+
"velocity": velocity,
|
|
61
|
+
})
|
|
62
|
+
|
|
63
|
+
return {
|
|
64
|
+
"notes": notes,
|
|
65
|
+
"pattern": pattern,
|
|
66
|
+
"name": gen.identify_rhythm(pulses, steps),
|
|
67
|
+
"total_duration": round(steps * step_duration, 4),
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
# -- Tool 2: layer_euclidean_rhythms ----------------------------------------
|
|
72
|
+
|
|
73
|
+
@mcp.tool()
|
|
74
|
+
def layer_euclidean_rhythms(
|
|
75
|
+
ctx: Context,
|
|
76
|
+
layers: Any,
|
|
77
|
+
) -> dict:
|
|
78
|
+
"""Stack multiple Euclidean rhythms for polyrhythmic textures.
|
|
79
|
+
|
|
80
|
+
Each layer specifies pulses, steps, pitch, and optional velocity/rotation.
|
|
81
|
+
Returns combined note array ready for add_notes.
|
|
82
|
+
"""
|
|
83
|
+
layers = _ensure_list(layers)
|
|
84
|
+
if not layers:
|
|
85
|
+
return {"error": "At least one layer required"}
|
|
86
|
+
|
|
87
|
+
all_notes: list[dict] = []
|
|
88
|
+
layer_info: list[dict] = []
|
|
89
|
+
max_duration = 0.0
|
|
90
|
+
|
|
91
|
+
for layer in layers:
|
|
92
|
+
p = int(layer["pulses"])
|
|
93
|
+
s = int(layer["steps"])
|
|
94
|
+
rot = int(layer.get("rotation", 0))
|
|
95
|
+
pitch = int(layer["pitch"])
|
|
96
|
+
vel = int(layer.get("velocity", 100))
|
|
97
|
+
dur = float(layer.get("step_duration", 0.25))
|
|
98
|
+
|
|
99
|
+
pattern = gen.bjorklund(p, s)
|
|
100
|
+
if rot:
|
|
101
|
+
pattern = gen.rotate_pattern(pattern, rot)
|
|
102
|
+
|
|
103
|
+
layer_notes = []
|
|
104
|
+
for i, hit in enumerate(pattern):
|
|
105
|
+
if hit:
|
|
106
|
+
layer_notes.append({
|
|
107
|
+
"pitch": pitch,
|
|
108
|
+
"start_time": round(i * dur, 4),
|
|
109
|
+
"duration": dur,
|
|
110
|
+
"velocity": vel,
|
|
111
|
+
})
|
|
112
|
+
|
|
113
|
+
all_notes.extend(layer_notes)
|
|
114
|
+
total_dur = round(s * dur, 4)
|
|
115
|
+
max_duration = max(max_duration, total_dur)
|
|
116
|
+
layer_info.append({
|
|
117
|
+
"pattern": pattern,
|
|
118
|
+
"note_count": len(layer_notes),
|
|
119
|
+
"name": gen.identify_rhythm(p, s),
|
|
120
|
+
})
|
|
121
|
+
|
|
122
|
+
return {
|
|
123
|
+
"notes": sorted(all_notes, key=lambda n: n["start_time"]),
|
|
124
|
+
"layers": layer_info,
|
|
125
|
+
"total_duration": max_duration,
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
# -- Tool 3: generate_tintinnabuli ------------------------------------------
|
|
130
|
+
|
|
131
|
+
@mcp.tool()
|
|
132
|
+
def generate_tintinnabuli(
|
|
133
|
+
ctx: Context,
|
|
134
|
+
melody_notes: Any,
|
|
135
|
+
triad: str,
|
|
136
|
+
position: str = "nearest",
|
|
137
|
+
velocity: int = 80,
|
|
138
|
+
) -> dict:
|
|
139
|
+
"""Generate a tintinnabuli voice (Arvo Pärt technique).
|
|
140
|
+
|
|
141
|
+
For each melody note, finds the nearest note of the specified triad.
|
|
142
|
+
Returns the tintinnabuli voice as a separate note array — combine
|
|
143
|
+
with the original melody via add_notes for the full Pärt effect.
|
|
144
|
+
Only major and minor triads are supported.
|
|
145
|
+
"""
|
|
146
|
+
melody_notes = _ensure_list(melody_notes)
|
|
147
|
+
if not melody_notes:
|
|
148
|
+
return {"error": "melody_notes cannot be empty"}
|
|
149
|
+
if position not in ("above", "below", "nearest"):
|
|
150
|
+
return {"error": "position must be 'above', 'below', or 'nearest'"}
|
|
151
|
+
|
|
152
|
+
try:
|
|
153
|
+
parsed = theory.parse_key(triad)
|
|
154
|
+
except ValueError:
|
|
155
|
+
return {"error": f"Cannot parse triad: {triad}"}
|
|
156
|
+
if parsed["mode"] not in ("major", "minor"):
|
|
157
|
+
return {"error": "Only major and minor triads are supported"}
|
|
158
|
+
|
|
159
|
+
root = parsed["tonic"]
|
|
160
|
+
if parsed["mode"] == "major":
|
|
161
|
+
triad_pcs = [root, (root + 4) % 12, (root + 7) % 12]
|
|
162
|
+
else:
|
|
163
|
+
triad_pcs = [root, (root + 3) % 12, (root + 7) % 12]
|
|
164
|
+
|
|
165
|
+
melody_pitches = [int(n["pitch"]) for n in melody_notes]
|
|
166
|
+
t_pitches = gen.tintinnabuli_voice(melody_pitches, triad_pcs, position)
|
|
167
|
+
|
|
168
|
+
notes = []
|
|
169
|
+
for i, n in enumerate(melody_notes):
|
|
170
|
+
notes.append({
|
|
171
|
+
"pitch": t_pitches[i],
|
|
172
|
+
"start_time": float(n["start_time"]),
|
|
173
|
+
"duration": float(n["duration"]),
|
|
174
|
+
"velocity": velocity,
|
|
175
|
+
})
|
|
176
|
+
|
|
177
|
+
triad_name = f"{theory.NOTE_NAMES[root]} {parsed['mode']}"
|
|
178
|
+
return {
|
|
179
|
+
"notes": notes,
|
|
180
|
+
"technique": "tintinnabuli",
|
|
181
|
+
"triad_used": triad_name,
|
|
182
|
+
"description": f"T-voice moves to {position} {triad_name} triad tone for each melody note",
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
# -- Tool 4: generate_phase_shift -------------------------------------------
|
|
187
|
+
|
|
188
|
+
@mcp.tool()
|
|
189
|
+
def generate_phase_shift(
|
|
190
|
+
ctx: Context,
|
|
191
|
+
pattern_notes: Any,
|
|
192
|
+
voices: int = 2,
|
|
193
|
+
shift_amount: float = 0.125,
|
|
194
|
+
total_length: float = 16.0,
|
|
195
|
+
) -> dict:
|
|
196
|
+
"""Generate a phase-shifted canon (Steve Reich technique).
|
|
197
|
+
|
|
198
|
+
Voice 0 loops the pattern normally. Each subsequent voice drifts
|
|
199
|
+
by shift_amount beats per repetition, creating gradual phase displacement.
|
|
200
|
+
Returns combined note array with velocity-encoded voices.
|
|
201
|
+
"""
|
|
202
|
+
pattern_notes = _ensure_list(pattern_notes)
|
|
203
|
+
if not pattern_notes:
|
|
204
|
+
return {"error": "pattern_notes cannot be empty"}
|
|
205
|
+
if not 1 <= voices <= 8:
|
|
206
|
+
return {"error": "voices must be 1-8"}
|
|
207
|
+
if shift_amount <= 0:
|
|
208
|
+
return {"error": "shift_amount must be > 0"}
|
|
209
|
+
|
|
210
|
+
result_notes = gen.phase_shift(pattern_notes, voices, shift_amount, total_length)
|
|
211
|
+
|
|
212
|
+
pattern_length = max(n["start_time"] + n["duration"] for n in pattern_notes)
|
|
213
|
+
|
|
214
|
+
alignment = None
|
|
215
|
+
if voices == 2 and shift_amount > 0 and pattern_length > 0:
|
|
216
|
+
alignment = round((pattern_length / shift_amount) * pattern_length, 4)
|
|
217
|
+
if alignment > total_length:
|
|
218
|
+
alignment = None
|
|
219
|
+
|
|
220
|
+
return {
|
|
221
|
+
"notes": result_notes,
|
|
222
|
+
"voices": voices,
|
|
223
|
+
"shift_per_repeat": shift_amount,
|
|
224
|
+
"pattern_length": round(pattern_length, 4),
|
|
225
|
+
"full_alignment_at": alignment,
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
# -- Tool 5: generate_additive_process --------------------------------------
|
|
230
|
+
|
|
231
|
+
@mcp.tool()
|
|
232
|
+
def generate_additive_process(
|
|
233
|
+
ctx: Context,
|
|
234
|
+
melody_notes: Any,
|
|
235
|
+
direction: str = "forward",
|
|
236
|
+
repetitions_per_stage: int = 2,
|
|
237
|
+
) -> dict:
|
|
238
|
+
"""Generate an additive process (Philip Glass technique).
|
|
239
|
+
|
|
240
|
+
Forward: builds melody note by note (1, then 1-2, then 1-2-3...).
|
|
241
|
+
Backward: full melody, then removes from front.
|
|
242
|
+
Both: forward then backward.
|
|
243
|
+
Returns note array — use add_notes to place in a clip.
|
|
244
|
+
"""
|
|
245
|
+
melody_notes = _ensure_list(melody_notes)
|
|
246
|
+
if not melody_notes:
|
|
247
|
+
return {"error": "melody_notes cannot be empty"}
|
|
248
|
+
if direction not in ("forward", "backward", "both"):
|
|
249
|
+
return {"error": "direction must be 'forward', 'backward', or 'both'"}
|
|
250
|
+
if repetitions_per_stage < 1:
|
|
251
|
+
return {"error": "repetitions_per_stage must be >= 1"}
|
|
252
|
+
|
|
253
|
+
result_notes = gen.additive_process(melody_notes, direction,
|
|
254
|
+
repetitions_per_stage)
|
|
255
|
+
n = len(melody_notes)
|
|
256
|
+
if direction == "forward":
|
|
257
|
+
stages = n
|
|
258
|
+
elif direction == "backward":
|
|
259
|
+
stages = n
|
|
260
|
+
else:
|
|
261
|
+
stages = (2 * n) - 1
|
|
262
|
+
|
|
263
|
+
total_duration = max(
|
|
264
|
+
(no["start_time"] + no["duration"] for no in result_notes),
|
|
265
|
+
default=0.0,
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
return {
|
|
269
|
+
"notes": result_notes,
|
|
270
|
+
"stages": stages,
|
|
271
|
+
"total_duration": round(total_duration, 4),
|
|
272
|
+
"direction": direction,
|
|
273
|
+
}
|