audio2midi 0.9.0__tar.gz → 0.11.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {audio2midi-0.9.0 → audio2midi-0.11.0}/PKG-INFO +1 -1
- {audio2midi-0.9.0 → audio2midi-0.11.0}/pyproject.toml +1 -1
- {audio2midi-0.9.0 → audio2midi-0.11.0}/src/audio2midi/basic_pitch_pitch_detector.py +1 -2
- {audio2midi-0.9.0 → audio2midi-0.11.0}/src/audio2midi/crepe_pitch_detector.py +2 -1
- {audio2midi-0.9.0 → audio2midi-0.11.0}/.gitignore +0 -0
- {audio2midi-0.9.0 → audio2midi-0.11.0}/.python-version +0 -0
- {audio2midi-0.9.0 → audio2midi-0.11.0}/README.md +0 -0
- {audio2midi-0.9.0 → audio2midi-0.11.0}/src/audio2midi/__init__.py +0 -0
- {audio2midi-0.9.0 → audio2midi-0.11.0}/src/audio2midi/crepe_pitch_detector_tf.py +0 -0
- {audio2midi-0.9.0 → audio2midi-0.11.0}/src/audio2midi/librosa_pitch_detector.py +0 -0
- {audio2midi-0.9.0 → audio2midi-0.11.0}/src/audio2midi/magenta_music_transcription.py +0 -0
- {audio2midi-0.9.0 → audio2midi-0.11.0}/src/audio2midi/melodia_pitch_detector.py +0 -0
- {audio2midi-0.9.0 → audio2midi-0.11.0}/src/audio2midi/mt3_music_transcription.py +0 -0
- {audio2midi-0.9.0 → audio2midi-0.11.0}/src/audio2midi/pop2piano.py +0 -0
- {audio2midi-0.9.0 → audio2midi-0.11.0}/src/audio2midi/py.typed +0 -0
- {audio2midi-0.9.0 → audio2midi-0.11.0}/src/audio2midi/violin_pitch_detector.py +0 -0
@@ -778,7 +778,6 @@ class BasicPitch():
|
|
778
778
|
}
|
779
779
|
return unwrapped_output
|
780
780
|
|
781
|
-
def predict(self,audio,onset_thresh=0.5,frame_thresh=0.3,min_note_len=11,midi_tempo=120,infer_onsets=True,include_pitch_bends=True,multiple_pitch_bends=
|
782
|
-
int(np.round(min_note_len / 1000 * (AUDIO_SAMPLE_RATE / FFT_HOP)))
|
781
|
+
def predict(self,audio,onset_thresh=0.5,frame_thresh=0.3,min_note_len=11,midi_tempo=120,infer_onsets=True,include_pitch_bends=True,multiple_pitch_bends=False,melodia_trick=True,progress_callback: Callable[[int, int], None] = None,min_freqat=None,max_freqat=None,output_file="output.mid"):
|
783
782
|
model_output_to_notes(self.run_inference(audio,progress_callback),onset_thresh = onset_thresh,frame_thresh = frame_thresh,infer_onsets = infer_onsets,min_note_len = min_note_len,min_freq = min_freqat,max_freq = max_freqat,include_pitch_bends = include_pitch_bends,multiple_pitch_bends = multiple_pitch_bends,melodia_trick = melodia_trick,midi_tempo = midi_tempo).write(output_file)
|
784
783
|
return output_file
|
@@ -913,7 +913,8 @@ class Crepe():
|
|
913
913
|
inputs = batch[0].to(device)
|
914
914
|
outputs = self.model(inputs)
|
915
915
|
all_outputs.append(outputs.cpu())
|
916
|
-
progress_callback
|
916
|
+
if progress_callback:
|
917
|
+
progress_callback(i,total_batch)
|
917
918
|
return torch.cat(all_outputs, dim=0)
|
918
919
|
|
919
920
|
def model_predict(self,audio:np.ndarray,viterbi, center, step_size,progress_callback,batch_size):
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|