midi-cli 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
midi_cli/__init__.py ADDED
File without changes
midi_cli/app.py ADDED
@@ -0,0 +1,179 @@
1
+ import os
2
+ import select
3
+ import sys
4
+ import termios
5
+ import threading
6
+ import tty
7
+
8
+ import sounddevice as sd
9
+
10
+ import midi_cli.state as st
11
+ from midi_cli.audio import audio_callback
12
+ from midi_cli.sampler import load_samples, load_pitch_offsets, save_sample, adjust_pitch, compute_chromatic_samples_async, execute_copy, stop_recording
13
+ from midi_cli.midi import open_midi_input, trigger_keyboard_note, midi_callback
14
+ from midi_cli.ui import draw_ui, reset_trim_state, reset_copy_state
15
+ from midi_cli.loop import reset_loop_state, cycle_loop_state
16
+
17
+
18
+ def main():
19
+ from midi_cli.config import run_setup_wizard
20
+ cfg = run_setup_wizard()
21
+ st.input_device = cfg.get("input_device")
22
+ st.output_device = cfg.get("output_device")
23
+ st.midi_port = cfg.get("midi_port")
24
+ sys.stdout.write("\033[2J\033[H")
25
+ sys.stdout.flush()
26
+
27
+ midi_in, port_name = open_midi_input(st.midi_port)
28
+ if midi_in is None:
29
+ st.keyboard_mock_mode = True
30
+ else:
31
+ st.ui_midi_port = port_name
32
+
33
+ load_samples()
34
+ load_pitch_offsets()
35
+
36
+ stream = sd.OutputStream(
37
+ samplerate=st.SAMPLE_RATE,
38
+ blocksize=st.BLOCK_SIZE,
39
+ channels=1,
40
+ callback=audio_callback,
41
+ device=st.output_device,
42
+ )
43
+ stream.start()
44
+
45
+ fd = sys.stdin.fileno()
46
+ old_settings = termios.tcgetattr(fd)
47
+ sys.stdout.write(st.HIDE_CURSOR)
48
+ sys.stdout.flush()
49
+
50
+ try:
51
+ tty.setraw(fd)
52
+ initialized = False
53
+ while True:
54
+ draw_ui(initialized)
55
+ initialized = True
56
+
57
+ ready, _, _ = select.select([fd], [], [], 0.05)
58
+ if ready:
59
+ ch = os.read(fd, 1).decode("utf-8", errors="replace")
60
+ if ch in ("q", "\x03"): # q or Ctrl+C
61
+ break
62
+ elif ch == "\r" and st.mode == "trim" and st.trim_selected_note is not None:
63
+ if st.trim_step == 0:
64
+ # Confirm start, advance to end adjustment
65
+ st.trim_step = 1
66
+ st.trim_loop_pos = st.trim_start
67
+ else:
68
+ # Confirm end, slice and save
69
+ audio = st.samples[st.trim_selected_note][st.trim_start:st.trim_end]
70
+ note = st.trim_selected_note
71
+ save_sample(note, audio)
72
+ # Recompute chromatic if this was the root
73
+ if st.chromatic_root_note == note:
74
+ compute_chromatic_samples_async(note)
75
+ reset_trim_state()
76
+ elif ch == "\r" and st.mode == "copy" and st.copy_confirm_pending:
77
+ execute_copy()
78
+ elif ch == "\t": # Tab: next mode
79
+ if st.mode == "record":
80
+ st.keyboard_recording_notes.clear()
81
+ if st.recording_note is not None:
82
+ threading.Thread(target=stop_recording, args=(st.recording_note,), daemon=True).start()
83
+ if st.mode == "trim":
84
+ reset_trim_state()
85
+ if st.mode == "copy":
86
+ reset_copy_state()
87
+ if st.mode == "loop":
88
+ with st.voices_lock:
89
+ reset_loop_state()
90
+ st.mode = st.MODES[(st.MODES.index(st.mode) + 1) % len(st.MODES)]
91
+ if st.mode == "chromatic":
92
+ st.chromatic_selecting = st.chromatic_root_note is None
93
+ elif ch == "r" and st.mode == "chromatic" and not st.chromatic_selecting:
94
+ st.chromatic_selecting = True
95
+ elif ch == "\x1b": # escape sequence
96
+ # Use select on fd with timeout to distinguish bare Escape from sequences
97
+ esc_ready, _, _ = select.select([fd], [], [], 0.02)
98
+ if not esc_ready:
99
+ # Bare Escape
100
+ if st.mode == "trim" and st.trim_selected_note is not None:
101
+ reset_trim_state()
102
+ elif st.mode == "copy" and (st.copy_source_note is not None or st.copy_confirm_pending):
103
+ reset_copy_state()
104
+ elif st.mode == "loop":
105
+ with st.voices_lock:
106
+ reset_loop_state()
107
+ else:
108
+ seq = os.read(fd, 2).decode("utf-8", errors="replace")
109
+ if seq == "[Z": # Shift+Tab: previous mode
110
+ if st.mode == "record":
111
+ st.keyboard_recording_notes.clear()
112
+ if st.recording_note is not None:
113
+ threading.Thread(target=stop_recording, args=(st.recording_note,), daemon=True).start()
114
+ if st.mode == "trim":
115
+ reset_trim_state()
116
+ if st.mode == "copy":
117
+ reset_copy_state()
118
+ if st.mode == "loop":
119
+ with st.voices_lock:
120
+ reset_loop_state()
121
+ st.mode = st.MODES[(st.MODES.index(st.mode) - 1) % len(st.MODES)]
122
+ if st.mode == "chromatic":
123
+ st.chromatic_selecting = st.chromatic_root_note is None
124
+ elif st.mode == "trim" and st.trim_selected_note is not None:
125
+ sample_len = len(st.samples[st.trim_selected_note])
126
+ fine = max(1, sample_len // 1000) # 0.1%
127
+ coarse = max(1, sample_len // 100) # 1%
128
+ if st.trim_step == 0:
129
+ if seq == "[D": # Left: decrease start
130
+ st.trim_start = max(0, st.trim_start - fine)
131
+ elif seq == "[C": # Right: increase start
132
+ st.trim_start = min(st.trim_end - st.MIN_TRIM_SAMPLES, st.trim_start + fine)
133
+ elif seq == "[B": # Down: decrease start (coarse)
134
+ st.trim_start = max(0, st.trim_start - coarse)
135
+ elif seq == "[A": # Up: increase start (coarse)
136
+ st.trim_start = min(st.trim_end - st.MIN_TRIM_SAMPLES, st.trim_start + coarse)
137
+ else:
138
+ if seq == "[D": # Left: decrease end
139
+ st.trim_end = max(st.trim_start + st.MIN_TRIM_SAMPLES, st.trim_end - fine)
140
+ elif seq == "[C": # Right: increase end
141
+ st.trim_end = min(sample_len, st.trim_end + fine)
142
+ elif seq == "[B": # Down: decrease end (coarse)
143
+ st.trim_end = max(st.trim_start + st.MIN_TRIM_SAMPLES, st.trim_end - coarse)
144
+ elif seq == "[A": # Up: increase end (coarse)
145
+ st.trim_end = min(sample_len, st.trim_end + coarse)
146
+ st.trim_start = max(0, st.trim_start)
147
+ st.trim_end = max(st.trim_start + st.MIN_TRIM_SAMPLES, st.trim_end)
148
+ elif st.mode == "pitch" and st.pitch_selected_note is not None:
149
+ if seq == "[A": # Up: +1 semitone
150
+ adjust_pitch(st.pitch_selected_note, 1, 0)
151
+ elif seq == "[B": # Down: -1 semitone
152
+ adjust_pitch(st.pitch_selected_note, -1, 0)
153
+ elif seq == "[C": # Right: +10 cents
154
+ adjust_pitch(st.pitch_selected_note, 0, st.CENTS_STEP)
155
+ elif seq == "[D": # Left: -10 cents
156
+ adjust_pitch(st.pitch_selected_note, 0, -st.CENTS_STEP)
157
+ elif ch == 'l' and st.mode == "loop":
158
+ with st.voices_lock:
159
+ cycle_loop_state()
160
+ elif st.keyboard_mock_mode and ch in st.KEYBOARD_NOTE_MAP:
161
+ trigger_keyboard_note(st.KEYBOARD_NOTE_MAP[ch])
162
+ elif st.keyboard_mock_mode and ch == '`':
163
+ new_val = 0 if st.sustain else 127
164
+ midi_callback(([0xB0, 64, new_val], 0), None)
165
+ except (KeyboardInterrupt, EOFError):
166
+ pass
167
+ finally:
168
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
169
+ sys.stdout.write(st.SHOW_CURSOR)
170
+ sys.stdout.flush()
171
+ stream.stop()
172
+ stream.close()
173
+ if midi_in is not None:
174
+ midi_in.close_port()
175
+ midi_in.delete()
176
+
177
+
178
+ if __name__ == "__main__":
179
+ main()
midi_cli/audio.py ADDED
@@ -0,0 +1,278 @@
1
+ import numpy as np
2
+
3
+ import midi_cli.state as st
4
+
5
+
6
+ def build_synth_envelope(gain, releasing, faded_in, age, frames):
7
+ """Vectorized envelope: returns (gains_array, new_gain, new_age, new_faded_in)."""
8
+ idx = np.arange(frames)
9
+
10
+ if releasing:
11
+ gains = np.maximum(0.0, gain - st.FADE_OUT_STEP * (idx + 1))
12
+ new_gain = max(0.0, gain - st.FADE_OUT_STEP * frames)
13
+ return gains, new_gain, age, faded_in
14
+
15
+ if not faded_in:
16
+ # Fade-in with concurrent stage1 decay:
17
+ # Recurrence: g' = (g + FADE_IN_STEP) * STAGE1_DECAY
18
+ # We compute iteratively to find the clamp point, then switch to pure decay.
19
+ gains = np.empty(frames)
20
+ g = gain
21
+ clamp_idx = frames # assume no clamp within block
22
+ for i in range(frames):
23
+ g = (g + st.FADE_IN_STEP) * st.STAGE1_DECAY
24
+ if g >= 1.0:
25
+ g = 1.0
26
+ clamp_idx = i
27
+ break
28
+ gains[i] = g
29
+
30
+ if clamp_idx < frames:
31
+ # Faded in at clamp_idx
32
+ gains[clamp_idx] = g
33
+ remaining = frames - clamp_idx - 1
34
+ if remaining > 0:
35
+ post_age = age + clamp_idx + 1
36
+ if post_age < st.STAGE1_SAMPLES:
37
+ s1_left = st.STAGE1_SAMPLES - post_age
38
+ if remaining <= s1_left:
39
+ gains[clamp_idx + 1:] = g * st.STAGE1_DECAY ** (idx[:remaining] + 1)
40
+ else:
41
+ gains[clamp_idx + 1:clamp_idx + 1 + s1_left] = g * st.STAGE1_DECAY ** (idx[:s1_left] + 1)
42
+ g_at_boundary = g * st.STAGE1_DECAY ** s1_left
43
+ rest = remaining - s1_left
44
+ gains[clamp_idx + 1 + s1_left:] = g_at_boundary * st.STAGE2_DECAY ** (idx[:rest] + 1)
45
+ else:
46
+ gains[clamp_idx + 1:] = g * st.STAGE2_DECAY ** (idx[:remaining] + 1)
47
+ new_gain = gains[-1]
48
+ new_age = age + frames
49
+ return gains, new_gain, new_age, True
50
+ else:
51
+ # Still fading in at end of block
52
+ new_gain = g
53
+ new_age = age + frames
54
+ return gains, new_gain, new_age, False
55
+
56
+ # Already faded in — pure decay
57
+ if age >= st.STAGE1_SAMPLES:
58
+ # Pure stage2
59
+ gains = gain * st.STAGE2_DECAY ** (idx + 1)
60
+ elif age + frames <= st.STAGE1_SAMPLES:
61
+ # Pure stage1
62
+ gains = gain * st.STAGE1_DECAY ** (idx + 1)
63
+ else:
64
+ # Stage1 → stage2 boundary mid-block
65
+ s1_left = st.STAGE1_SAMPLES - age
66
+ gains = np.empty(frames)
67
+ gains[:s1_left] = gain * st.STAGE1_DECAY ** (idx[:s1_left] + 1)
68
+ g_at_boundary = gain * st.STAGE1_DECAY ** s1_left
69
+ s2_count = frames - s1_left
70
+ gains[s1_left:] = g_at_boundary * st.STAGE2_DECAY ** (idx[:s2_count] + 1)
71
+
72
+ new_gain = gains[-1]
73
+ new_age = age + frames
74
+ return gains, new_gain, new_age, faded_in
75
+
76
+
77
+ def audio_callback(outdata, frames, time_info, status):
78
+ with st.voices_lock:
79
+ if st.mode == "trim" and st.trim_looping and st.trim_selected_note is not None:
80
+ sample_data = st.samples.get(st.trim_selected_note)
81
+ if sample_data is None or st.trim_end <= st.trim_start:
82
+ outdata[:] = 0
83
+ return
84
+ # Bounds-check loop pos
85
+ region_len = st.trim_end - st.trim_start
86
+ cycle_len = max(region_len, st.MIN_LOOP_PERIOD)
87
+ if st.trim_loop_pos < 0 or st.trim_loop_pos >= cycle_len:
88
+ st.trim_loop_pos = 0
89
+ mix = np.zeros(frames)
90
+ written = 0
91
+ while written < frames:
92
+ if st.trim_loop_pos < region_len:
93
+ # Playing audio portion
94
+ src_pos = st.trim_start + st.trim_loop_pos
95
+ avail = region_len - st.trim_loop_pos
96
+ n = min(frames - written, avail)
97
+ mix[written:written + n] = sample_data[src_pos:src_pos + n]
98
+ st.trim_loop_pos += n
99
+ written += n
100
+ else:
101
+ # Silence padding portion
102
+ avail = cycle_len - st.trim_loop_pos
103
+ n = min(frames - written, avail)
104
+ # mix is already zeros
105
+ st.trim_loop_pos += n
106
+ written += n
107
+ if st.trim_loop_pos >= cycle_len:
108
+ st.trim_loop_pos = 0
109
+ mix *= 0.25
110
+ np.clip(mix, -1.0, 1.0, out=mix)
111
+ outdata[:, 0] = mix
112
+ return
113
+
114
+ if st.mode == "loop":
115
+ mix = np.zeros(frames)
116
+
117
+ # Sample voice mixing (same logic as sample mode)
118
+ finished = []
119
+ for note, sv in st.sample_voices.items():
120
+ if note not in st.samples:
121
+ finished.append(note)
122
+ continue
123
+ pos = sv["pos"]
124
+ gain = sv["gain"]
125
+ releasing = sv["releasing"]
126
+ sample_data = st.samples[note]
127
+ remaining = len(sample_data) - pos
128
+ n = min(frames, remaining)
129
+ if n <= 0:
130
+ finished.append(note)
131
+ continue
132
+ buf = np.zeros(frames)
133
+ buf[:n] = sample_data[pos:pos + n] * gain
134
+ if releasing:
135
+ fade_gains = np.maximum(0.0, gain - st.FADE_OUT_STEP * (np.arange(n) + 1))
136
+ buf[:n] *= fade_gains
137
+ gain = max(0.0, gain - st.FADE_OUT_STEP * n)
138
+ else:
139
+ gain = min(1.0, gain + st.FADE_IN_STEP * n)
140
+ mix += buf
141
+ sv["pos"] = pos + n
142
+ sv["gain"] = gain
143
+ if (releasing and gain <= 0.0) or pos + n >= len(sample_data):
144
+ finished.append(note)
145
+ for note in finished:
146
+ del st.sample_voices[note]
147
+
148
+ # Loop capture/playback
149
+ if st.loop_state == "recording":
150
+ st.loop_record_chunks.append(mix.copy())
151
+ elif st.loop_state in ("playing", "overdub") and st.loop_buffer is not None:
152
+ buf = st.loop_buffer
153
+ buf_len = len(buf)
154
+ loop_chunk = np.zeros(frames)
155
+ written = 0
156
+ pos = st.loop_pos
157
+ while written < frames:
158
+ avail = buf_len - pos
159
+ take = min(avail, frames - written)
160
+ loop_chunk[written:written + take] = buf[pos:pos + take]
161
+ if st.loop_state == "overdub":
162
+ buf[pos:pos + take] += mix[written:written + take]
163
+ np.clip(buf[pos:pos + take], -1.0, 1.0, out=buf[pos:pos + take])
164
+ written += take
165
+ pos = (pos + take) % buf_len
166
+ mix += loop_chunk
167
+ st.loop_pos = (st.loop_pos + frames) % buf_len
168
+
169
+ mix *= 0.25
170
+ np.clip(mix, -1.0, 1.0, out=mix)
171
+ outdata[:, 0] = mix
172
+ return
173
+
174
+ if st.mode in ("sample", "pitch", "chromatic", "trim"):
175
+ if not st.sample_voices:
176
+ outdata[:] = 0
177
+ return
178
+
179
+ mix = np.zeros(frames)
180
+ finished = []
181
+
182
+ for note, sv in st.sample_voices.items():
183
+ if st.mode == "chromatic":
184
+ sample_data = st.chromatic_samples.get(note)
185
+ if sample_data is None:
186
+ finished.append(note)
187
+ continue
188
+ else:
189
+ if note not in st.samples:
190
+ finished.append(note)
191
+ continue
192
+ ps = st.pitched_samples.get(note)
193
+ sample_data = ps if ps is not None else st.samples.get(note)
194
+ pos = sv["pos"]
195
+ gain = sv["gain"]
196
+ releasing = sv["releasing"]
197
+
198
+ remaining = len(sample_data) - pos
199
+ n = min(frames, remaining)
200
+ if n <= 0:
201
+ finished.append(note)
202
+ continue
203
+
204
+ buf = np.zeros(frames)
205
+ buf[:n] = sample_data[pos:pos + n] * gain
206
+
207
+ if releasing:
208
+ fade_gains = np.maximum(0.0, gain - st.FADE_OUT_STEP * (np.arange(n) + 1))
209
+ buf[:n] *= fade_gains
210
+ gain = max(0.0, gain - st.FADE_OUT_STEP * n)
211
+ else:
212
+ gain = min(1.0, gain + st.FADE_IN_STEP * n)
213
+
214
+ mix += buf
215
+ sv["pos"] = pos + n
216
+ sv["gain"] = gain
217
+ if (releasing and gain <= 0.0) or pos + n >= len(sample_data):
218
+ finished.append(note)
219
+
220
+ for note in finished:
221
+ del st.sample_voices[note]
222
+
223
+ mix *= 0.25
224
+ np.clip(mix, -1.0, 1.0, out=mix)
225
+ outdata[:, 0] = mix
226
+ return
227
+
228
+ # Synth mode
229
+ if not st.voices:
230
+ outdata[:] = 0
231
+ return
232
+
233
+ mix = np.zeros(frames)
234
+ finished = []
235
+
236
+ for note, v in st.voices.items():
237
+ freq = st.note_to_freq(note)
238
+ phase = v["phase"]
239
+ gain = v["gain"]
240
+ releasing = v["releasing"]
241
+ age = v["age"]
242
+ faded_in = v["faded_in"]
243
+
244
+ phase_step = 2.0 * np.pi * freq / st.SAMPLE_RATE
245
+ phases = phase + np.arange(frames) * phase_step
246
+ phase_matrix = phases[:, None] * st.H_MULTIPLIERS[None, :]
247
+ raw_signal = np.sin(phase_matrix) @ st.HARMONICS
248
+ gains, gain, age, faded_in = build_synth_envelope(gain, releasing, faded_in, age, frames)
249
+ buf = raw_signal * gains
250
+ phase = phases[-1] + phase_step
251
+
252
+ mix += buf
253
+ v["phase"] = phase % (2.0 * np.pi)
254
+ v["gain"] = gain
255
+ v["age"] = age
256
+ v["faded_in"] = faded_in
257
+ if (releasing and gain == 0.0) or gain < st.SILENCE_THRESHOLD:
258
+ finished.append(note)
259
+
260
+ for note in finished:
261
+ del st.voices[note]
262
+
263
+ mix *= 0.25 # fixed headroom (up to ~4 voices at full level)
264
+ np.clip(mix, -1.0, 1.0, out=mix)
265
+ outdata[:, 0] = mix
266
+
267
+
268
+ def trim_silence(audio, threshold_db=-20):
269
+ """Trim leading and trailing silence from an audio array."""
270
+ threshold = 10 ** (threshold_db / 20.0) # convert dB to linear
271
+ above = np.abs(audio) > threshold
272
+ if not np.any(above):
273
+ return audio # all silence, return as-is
274
+ indices = np.where(above)[0]
275
+ pad = int(0.05 * st.SAMPLE_RATE) # 50ms pad
276
+ start = max(indices[0] - pad, 0)
277
+ end = min(indices[-1] + 1 + pad, len(audio))
278
+ return audio[start:end]
midi_cli/config.py ADDED
@@ -0,0 +1,133 @@
1
+ import json
2
+ import os
3
+
4
+ import rtmidi
5
+ import sounddevice as sd
6
+
7
+
8
+ CONFIG_PATH = "config.json"
9
+
10
+
11
+ def load_config():
12
+ """Read config.json; return empty dict if missing."""
13
+ if not os.path.isfile(CONFIG_PATH):
14
+ return {}
15
+ with open(CONFIG_PATH, "r") as f:
16
+ return json.load(f)
17
+
18
+
19
+ def save_config(cfg):
20
+ """Write config.json."""
21
+ with open(CONFIG_PATH, "w") as f:
22
+ json.dump(cfg, f, indent=2)
23
+
24
+
25
+ def _get_input_devices():
26
+ """Return input device names with the system default first."""
27
+ devs = sd.query_devices()
28
+ names = [d["name"] for d in devs if d["max_input_channels"] > 0]
29
+ try:
30
+ idx = sd.default.device[0]
31
+ if idx >= 0:
32
+ default_name = devs[idx]["name"]
33
+ if default_name in names:
34
+ names.remove(default_name)
35
+ names.insert(0, default_name)
36
+ except Exception:
37
+ pass
38
+ return names
39
+
40
+
41
+ def _get_output_devices():
42
+ """Return output device names with the system default first."""
43
+ devs = sd.query_devices()
44
+ names = [d["name"] for d in devs if d["max_output_channels"] > 0]
45
+ try:
46
+ idx = sd.default.device[1]
47
+ if idx >= 0:
48
+ default_name = devs[idx]["name"]
49
+ if default_name in names:
50
+ names.remove(default_name)
51
+ names.insert(0, default_name)
52
+ except Exception:
53
+ pass
54
+ return names
55
+
56
+
57
+ def _prompt_device(label, names):
58
+ """Prompt user to pick from a numbered list; Enter accepts the first."""
59
+ print(f"\n{label}:")
60
+ for i, name in enumerate(names):
61
+ suffix = " (default)" if i == 0 else ""
62
+ print(f" {i + 1}. {name}{suffix}")
63
+ while True:
64
+ try:
65
+ raw = input("Select [1]: ").strip()
66
+ except EOFError:
67
+ return names[0]
68
+ if raw == "":
69
+ return names[0]
70
+ try:
71
+ idx = int(raw) - 1
72
+ if 0 <= idx < len(names):
73
+ return names[idx]
74
+ except ValueError:
75
+ pass
76
+ print(f" Please enter 1\u2013{len(names)}.")
77
+
78
+
79
+ def run_setup_wizard():
80
+ """
81
+ Detect audio/MIDI devices, prompt only when there is ambiguity (>1 option)
82
+ or when a saved device has disappeared. Returns config dict.
83
+ """
84
+ cfg = load_config()
85
+ changed = False
86
+
87
+ # --- Audio input ---
88
+ input_names = _get_input_devices()
89
+ saved = cfg.get("input_device")
90
+ if "input_device" in cfg and saved is not None and saved not in input_names:
91
+ print(f"Warning: saved input device '{saved}' not found.")
92
+ del cfg["input_device"]
93
+ changed = True
94
+ # Re-prompt if previously defaulted but new devices have appeared
95
+ if "input_device" in cfg and cfg["input_device"] is None and len(input_names) > 1:
96
+ del cfg["input_device"]
97
+ changed = True
98
+ if "input_device" not in cfg:
99
+ changed = True
100
+ cfg["input_device"] = _prompt_device("Input device", input_names) if len(input_names) > 1 else None
101
+
102
+ # --- Audio output ---
103
+ output_names = _get_output_devices()
104
+ saved = cfg.get("output_device")
105
+ if "output_device" in cfg and saved is not None and saved not in output_names:
106
+ print(f"Warning: saved output device '{saved}' not found.")
107
+ del cfg["output_device"]
108
+ changed = True
109
+ # Re-prompt if previously defaulted but new devices have appeared
110
+ if "output_device" in cfg and cfg["output_device"] is None and len(output_names) > 1:
111
+ del cfg["output_device"]
112
+ changed = True
113
+ if "output_device" not in cfg:
114
+ changed = True
115
+ cfg["output_device"] = _prompt_device("Output device", output_names) if len(output_names) > 1 else None
116
+
117
+ # --- MIDI ---
118
+ _tmp = rtmidi.MidiIn()
119
+ midi_ports = _tmp.get_ports()
120
+ _tmp.delete()
121
+ saved = cfg.get("midi_port")
122
+ if "midi_port" in cfg and saved is not None and saved not in midi_ports:
123
+ print(f"Warning: saved MIDI port '{saved}' not found.")
124
+ del cfg["midi_port"]
125
+ changed = True
126
+ if "midi_port" not in cfg:
127
+ changed = True
128
+ cfg["midi_port"] = _prompt_device("MIDI input port", midi_ports) if len(midi_ports) > 1 else None
129
+
130
+ if changed:
131
+ save_config(cfg)
132
+
133
+ return cfg
midi_cli/loop.py ADDED
@@ -0,0 +1,30 @@
1
+ import numpy as np
2
+
3
+ import midi_cli.state as st
4
+
5
+
6
+ def reset_loop_state():
7
+ """Clear loop buffer and return to idle. Must be called with voices_lock held."""
8
+ st.loop_state = "idle"
9
+ st.loop_buffer = None
10
+ st.loop_pos = 0
11
+ st.loop_record_chunks = []
12
+
13
+
14
+ def cycle_loop_state():
15
+ """Advance the loop state machine. Must be called with voices_lock held."""
16
+ if st.loop_state == "idle":
17
+ st.loop_record_chunks = []
18
+ st.loop_state = "recording"
19
+ elif st.loop_state == "recording":
20
+ if st.loop_record_chunks:
21
+ st.loop_buffer = np.concatenate(st.loop_record_chunks)
22
+ else:
23
+ st.loop_buffer = None
24
+ st.loop_record_chunks = []
25
+ st.loop_pos = 0
26
+ st.loop_state = "playing"
27
+ elif st.loop_state == "playing":
28
+ st.loop_state = "overdub"
29
+ elif st.loop_state == "overdub":
30
+ st.loop_state = "playing"