audio2midi 0.2.0__tar.gz → 0.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,10 +1,10 @@
1
- # Python-generated files
2
- __pycache__/
3
- *.py[oc]
4
- build/
5
- dist/
6
- wheels/
7
- *.egg-info
8
-
9
- # Virtual environments
10
- .venv
1
+ # Python-generated files
2
+ __pycache__/
3
+ *.py[oc]
4
+ build/
5
+ dist/
6
+ wheels/
7
+ *.egg-info
8
+
9
+ # Virtual environments
10
+ .venv
@@ -1 +1 @@
1
- 3.11
1
+ 3.11
@@ -0,0 +1,207 @@
1
+ Metadata-Version: 2.4
2
+ Name: audio2midi
3
+ Version: 0.4.0
4
+ Summary: Audio To Midi
5
+ Author-email: dummyjenil <dummyjenil@gmail.com>
6
+ Provides-Extra: all
7
+ Requires-Dist: essentia; extra == 'all'
8
+ Requires-Dist: hmmlearn; extra == 'all'
9
+ Requires-Dist: huggingface-hub; extra == 'all'
10
+ Requires-Dist: keras; extra == 'all'
11
+ Requires-Dist: librosa; extra == 'all'
12
+ Requires-Dist: mir-eval; extra == 'all'
13
+ Requires-Dist: nnaudio; extra == 'all'
14
+ Requires-Dist: numpy==1.26.4; extra == 'all'
15
+ Requires-Dist: pretty-midi; extra == 'all'
16
+ Requires-Dist: pretty-midi-fix; extra == 'all'
17
+ Requires-Dist: resampy; extra == 'all'
18
+ Requires-Dist: scipy; extra == 'all'
19
+ Requires-Dist: tensorflow; extra == 'all'
20
+ Requires-Dist: torch; extra == 'all'
21
+ Requires-Dist: torchaudio; extra == 'all'
22
+ Requires-Dist: transformers; extra == 'all'
23
+ Requires-Dist: vamp; extra == 'all'
24
+ Provides-Extra: basic-pitch-pitch-detector
25
+ Requires-Dist: huggingface-hub; extra == 'basic-pitch-pitch-detector'
26
+ Requires-Dist: librosa; extra == 'basic-pitch-pitch-detector'
27
+ Requires-Dist: nnaudio; extra == 'basic-pitch-pitch-detector'
28
+ Requires-Dist: numpy; extra == 'basic-pitch-pitch-detector'
29
+ Requires-Dist: pretty-midi-fix; extra == 'basic-pitch-pitch-detector'
30
+ Requires-Dist: scipy; extra == 'basic-pitch-pitch-detector'
31
+ Requires-Dist: torch; extra == 'basic-pitch-pitch-detector'
32
+ Provides-Extra: crepe-pitch-detector
33
+ Requires-Dist: hmmlearn; extra == 'crepe-pitch-detector'
34
+ Requires-Dist: huggingface-hub; extra == 'crepe-pitch-detector'
35
+ Requires-Dist: librosa; extra == 'crepe-pitch-detector'
36
+ Requires-Dist: numpy; extra == 'crepe-pitch-detector'
37
+ Requires-Dist: pretty-midi-fix; extra == 'crepe-pitch-detector'
38
+ Requires-Dist: tensorflow; extra == 'crepe-pitch-detector'
39
+ Requires-Dist: torch; extra == 'crepe-pitch-detector'
40
+ Requires-Dist: tqdm; extra == 'crepe-pitch-detector'
41
+ Provides-Extra: crepe-pitch-detector-tf
42
+ Requires-Dist: hmmlearn; extra == 'crepe-pitch-detector-tf'
43
+ Requires-Dist: huggingface-hub; extra == 'crepe-pitch-detector-tf'
44
+ Requires-Dist: keras; extra == 'crepe-pitch-detector-tf'
45
+ Requires-Dist: librosa; extra == 'crepe-pitch-detector-tf'
46
+ Requires-Dist: numpy; extra == 'crepe-pitch-detector-tf'
47
+ Requires-Dist: pretty-midi-fix; extra == 'crepe-pitch-detector-tf'
48
+ Requires-Dist: tensorflow; extra == 'crepe-pitch-detector-tf'
49
+ Provides-Extra: librosa-pitch-detector
50
+ Requires-Dist: librosa; extra == 'librosa-pitch-detector'
51
+ Requires-Dist: numpy; extra == 'librosa-pitch-detector'
52
+ Requires-Dist: pretty-midi-fix; extra == 'librosa-pitch-detector'
53
+ Provides-Extra: melodia-pitch-detector
54
+ Requires-Dist: huggingface-hub; extra == 'melodia-pitch-detector'
55
+ Requires-Dist: librosa; extra == 'melodia-pitch-detector'
56
+ Requires-Dist: numpy; extra == 'melodia-pitch-detector'
57
+ Requires-Dist: pretty-midi-fix; extra == 'melodia-pitch-detector'
58
+ Requires-Dist: scipy; extra == 'melodia-pitch-detector'
59
+ Requires-Dist: vamp; extra == 'melodia-pitch-detector'
60
+ Provides-Extra: pop2piano
61
+ Requires-Dist: essentia; extra == 'pop2piano'
62
+ Requires-Dist: huggingface-hub; extra == 'pop2piano'
63
+ Requires-Dist: librosa; extra == 'pop2piano'
64
+ Requires-Dist: numpy==1.26.4; extra == 'pop2piano'
65
+ Requires-Dist: pretty-midi; extra == 'pop2piano'
66
+ Requires-Dist: pretty-midi-fix; extra == 'pop2piano'
67
+ Requires-Dist: resampy; extra == 'pop2piano'
68
+ Requires-Dist: scipy; extra == 'pop2piano'
69
+ Requires-Dist: torch; extra == 'pop2piano'
70
+ Requires-Dist: transformers; extra == 'pop2piano'
71
+ Provides-Extra: violin-pitch-detector
72
+ Requires-Dist: huggingface-hub; extra == 'violin-pitch-detector'
73
+ Requires-Dist: librosa; extra == 'violin-pitch-detector'
74
+ Requires-Dist: mir-eval; extra == 'violin-pitch-detector'
75
+ Requires-Dist: numpy; extra == 'violin-pitch-detector'
76
+ Requires-Dist: pretty-midi-fix; extra == 'violin-pitch-detector'
77
+ Requires-Dist: scipy; extra == 'violin-pitch-detector'
78
+ Requires-Dist: torch; extra == 'violin-pitch-detector'
79
+ Requires-Dist: torchaudio; extra == 'violin-pitch-detector'
80
+ Description-Content-Type: text/markdown
81
+
82
+ [Audio2Midi Demo](https://huggingface.co/spaces/shethjenil/Audio2Midi)
83
+ ---
84
+
85
+ [Github](https://github.com/dummyjenil/audio2midi)
86
+ ---
87
+
88
+ ```bash
89
+ pip install audio2midi[all] audio2midi[pop2piano] audio2midi[violin_pitch_detector] audio2midi[crepe_pitch_detector] audio2midi[crepe_pitch_detector_tf] audio2midi[melodia_pitch_detector] audio2midi[basic_pitch_pitch_detector] audio2midi[librosa_pitch_detector]
90
+ ```
91
+ ---
92
+
93
+ violin_model_capacity crepe_model_capacity
94
+
95
+ * tiny
96
+ * small
97
+ * medium
98
+ * large
99
+ * full
100
+ ---
101
+
102
+ ``` python
103
+ from audio2midi.librosa_pitch_detector import Normal_Pitch_Det , Guitar_Pitch_Det
104
+
105
+ audio_path = "audio.mp3"
106
+ Normal_Pitch_Det().predict(audio_path)
107
+ Guitar_Pitch_Det().predict(audio_path)
108
+ ```
109
+
110
+ ---
111
+
112
+ ``` python
113
+ from os import environ
114
+ from huggingface_hub import hf_hub_download
115
+ from shutil import unpack_archive
116
+ from pathlib import Path
117
+ from audio2midi.melodia_pitch_detector import Melodia
118
+ from platform import system as platform_system , architecture as platform_architecture
119
+
120
+ unpack_archive(hf_hub_download("shethjenil/Audio2Midi_Models",f"melodia_vamp_plugin_{'win' if (system := platform_system()) == 'Windows' else 'mac' if system == 'Darwin' else 'linux64' if (arch := platform_architecture()[0]) == '64bit' else 'linux32' if arch == '32bit' else None}.zip"),"vamp_melodia",format="zip")
121
+ environ['VAMP_PATH'] = str(Path("vamp_melodia").absolute())
122
+ Melodia().predict(audio_path)
123
+ ```
124
+
125
+ ---
126
+
127
+ ```python
128
+ from audio2midi.basic_pitch_pitch_detector import BasicPitch
129
+ from audio2midi.crepe_pitch_detector import Crepe
130
+ from audio2midi.violin_pitch_detector import Violin_Pitch_Det
131
+ from audio2midi.pop2piano import Pop2Piano
132
+ from torch import device as Device
133
+ from torch.cuda import is_available as cuda_is_available
134
+ device = Device("cuda" if cuda_is_available() else "cpu")
135
+ Crepe().predict(audio_path)
136
+ Pop2Piano(device=device).predict(audio_path)
137
+ Violin_Pitch_Det(device=device).predict(audio_path)
138
+ BasicPitch(device=device).predict(audio_path)
139
+ ```
140
+
141
+ ---
142
+
143
+ ```python
144
+ from audio2midi.basic_pitch_pitch_detector import BasicPitch
145
+ from audio2midi.crepe_pitch_detector_tf import CrepeTF
146
+ from audio2midi.crepe_pitch_detector import Crepe
147
+ from audio2midi.librosa_pitch_detector import Normal_Pitch_Det , Guitar_Pitch_Det
148
+ from audio2midi.melodia_pitch_detector import Melodia
149
+ from audio2midi.pop2piano import Pop2Piano
150
+ from audio2midi.violin_pitch_detector import Violin_Pitch_Det
151
+
152
+ from os import environ
153
+ from huggingface_hub import hf_hub_download
154
+ from shutil import unpack_archive
155
+ from pathlib import Path
156
+ from platform import system as platform_system , architecture as platform_architecture
157
+ unpack_archive(hf_hub_download("shethjenil/Audio2Midi_Models",f"melodia_vamp_plugin_{'win' if (system := platform_system()) == 'Windows' else 'mac' if system == 'Darwin' else 'linux64' if (arch := platform_architecture()[0]) == '64bit' else 'linux32' if arch == '32bit' else None}.zip"),"vamp_melodia",format="zip")
158
+ environ['VAMP_PATH'] = str(Path("vamp_melodia").absolute())
159
+
160
+ from os import getenv
161
+ from torch import device as Device
162
+ from torch.cuda import is_available as cuda_is_available
163
+ device = Device("cuda" if cuda_is_available() else "cpu")
164
+
165
+ import gradio as gr
166
+ with gr.Blocks() as midi_viz_ui:
167
+ midi = gr.File(label="Upload MIDI")
168
+ sf = gr.File(label="Upload SoundFont")
169
+ output_html = gr.HTML(f'''
170
+ <div style="display: flex; justify-content: center; align-items: center;">
171
+ <iframe style="width: 100%; height: 500px;" src="https://shethjenil-midivizsf2.static.hf.space/index_single_file.html" id="midiviz"></iframe>
172
+ </div>''')
173
+ midi.upload(None, inputs=midi, js="""
174
+ async (file) => {
175
+ if (!file || !file.url || !file.orig_name) return;
176
+ const iframe = document.getElementById("midiviz");
177
+ iframe.contentWindow.postMessage({
178
+ type: "load-midi",
179
+ url: file.url,
180
+ name: file.orig_name
181
+ }, "*");
182
+ }
183
+ """)
184
+ sf.upload(None, inputs=sf, js="""
185
+ async (file) => {
186
+ if (!file || !file.url || !file.orig_name) return;
187
+ const iframe = document.getElementById("midiviz");
188
+ iframe.contentWindow.postMessage({
189
+ type: "load-sf",
190
+ url: file.url,
191
+ name: file.orig_name
192
+ }, "*");
193
+ }
194
+ """)
195
+
196
+ gr.TabbedInterface([
197
+ gr.Interface(Normal_Pitch_Det().predict,[gr.Audio(type="filepath",label="Input Audio"),gr.Number(120,label="BPM"),gr.Number(512,label="HOP Len"),gr.Number(2,label="minimum note length"),gr.Number(0.1,label="threshold")],gr.File(label="Midi File")),
198
+ gr.Interface(Guitar_Pitch_Det().predict,[gr.Audio(type="filepath",label="Input Audio"),gr.Number(4,label="mag_exp"),gr.Number(-61,label="Threshold"),gr.Number(6,label="Pre_post_max"),gr.Checkbox(False,label="backtrack"),gr.Checkbox(False,label="round_to_sixteenth"),gr.Number(1024,label="hop_length"),gr.Number(72,label="n_bins"),gr.Number(12,label="bins_per_octave")],gr.File(label="Midi File")),
199
+ gr.Interface(Melodia().predict,[gr.Audio(type="filepath",label="Input Audio"),gr.Number(120,label="BPM",step=30),gr.Number(0.25,label="smoothness",step=0.05,info="Smooth the pitch sequence with a median filter of the provided duration (in seconds)."),gr.Number(0.1,label="minimum duration",step=0.1,info="Minimum allowed duration for note (in seconds). Shorter notes will be removed."),gr.Number(128,label="HOP")],gr.File(label="Midi File")),
200
+ gr.Interface(BasicPitch(device=device).predict,[gr.Audio(type="filepath", label="Upload Audio"),gr.Number(0.5,label="onset_thresh",info="Minimum amplitude of an onset activation to be considered an onset."),gr.Number(0.3,label="frame_thresh",info="Minimum energy requirement for a frame to be considered present."),gr.Number(127.70,label="min_note_len",info="The minimum allowed note length in milliseconds."),gr.Number(120,label="midi_tempo"),gr.Checkbox(True,label="infer_onsets",info="add additional onsets when there are large differences in frame amplitudes."),gr.Checkbox(True,label="include_pitch_bends",info="include pitch bends."),gr.Checkbox(False,label="multiple_pitch_bends",info="allow overlapping notes in midi file to have pitch bends."),gr.Checkbox(True,label="melodia_trick",info="Use the melodia post-processing step.")],gr.File(label="Download Midi File")),
201
+ gr.Interface(Violin_Pitch_Det(device=device,model_capacity=getenv("violin_model_capacity","full")).predict, [gr.Audio(label="Upload your Audio file",type="filepath"),gr.Number(32,label="Batch size"),gr.Radio(["spotify","tiktok"],value="spotify",label="Post Processing"),gr.Checkbox(True,label="include_pitch_bends")],gr.File(label="Download MIDI file")),
202
+ gr.Interface(Crepe(getenv("crepe_model_capacity","full")).predict,[gr.Audio(type="filepath",label="Input Audio"),gr.Checkbox(False,label="viterbi",info="Apply viterbi smoothing to the estimated pitch curve"),gr.Checkbox(True,label="center"),gr.Number(10,label="step size",info="The step size in milliseconds for running pitch estimation."),gr.Number(0.8,label="minimum confidence"),gr.Number(32,label="batch size")],gr.File(label="Midi File")),
203
+ gr.Interface(CrepeTF(getenv("crepe_model_capacity","full")).predict,[gr.Audio(type="filepath",label="Input Audio"),gr.Checkbox(False,label="viterbi",info="Apply viterbi smoothing to the estimated pitch curve"),gr.Checkbox(True,label="center"),gr.Number(10,label="step size",info="The step size in milliseconds for running pitch estimation."),gr.Number(0.8,label="minimum confidence"),gr.Number(32,label="batch size")],gr.File(label="Midi File")),
204
+ gr.Interface(Pop2Piano(device).predict,[gr.Audio(label="Input Audio",type="filepath"),gr.Number(1, minimum=1, maximum=21, label="Composer"),gr.Number(2,label="Details in Piano"),gr.Number(1,label="Efficiency of Piano"),gr.Radio([1,2,4],label="steps per beat",value=2)],gr.File(label="MIDI File")),
205
+ midi_viz_ui
206
+ ],["Normal Pitch Detection","Guitar Based Pitch Detection","Melodia","Spotify Pitch Detection","Violin Based Pitch Detection","Crepe Pitch Detection","Crepe Pitch Detection TF","Pop2Piano","Midi Vizulizer"]).launch()
207
+ ```
@@ -1,5 +1,68 @@
1
+ [Audio2Midi Demo](https://huggingface.co/spaces/shethjenil/Audio2Midi)
2
+ ---
3
+
4
+ [Github](https://github.com/dummyjenil/audio2midi)
5
+ ---
6
+
7
+ ```bash
8
+ pip install audio2midi[all] audio2midi[pop2piano] audio2midi[violin_pitch_detector] audio2midi[crepe_pitch_detector] audio2midi[crepe_pitch_detector_tf] audio2midi[melodia_pitch_detector] audio2midi[basic_pitch_pitch_detector] audio2midi[librosa_pitch_detector]
9
+ ```
10
+ ---
11
+
12
+ violin_model_capacity crepe_model_capacity
13
+
14
+ * tiny
15
+ * small
16
+ * medium
17
+ * large
18
+ * full
19
+ ---
20
+
21
+ ``` python
22
+ from audio2midi.librosa_pitch_detector import Normal_Pitch_Det , Guitar_Pitch_Det
23
+
24
+ audio_path = "audio.mp3"
25
+ Normal_Pitch_Det().predict(audio_path)
26
+ Guitar_Pitch_Det().predict(audio_path)
27
+ ```
28
+
29
+ ---
30
+
31
+ ``` python
32
+ from os import environ
33
+ from huggingface_hub import hf_hub_download
34
+ from shutil import unpack_archive
35
+ from pathlib import Path
36
+ from audio2midi.melodia_pitch_detector import Melodia
37
+ from platform import system as platform_system , architecture as platform_architecture
38
+
39
+ unpack_archive(hf_hub_download("shethjenil/Audio2Midi_Models",f"melodia_vamp_plugin_{'win' if (system := platform_system()) == 'Windows' else 'mac' if system == 'Darwin' else 'linux64' if (arch := platform_architecture()[0]) == '64bit' else 'linux32' if arch == '32bit' else None}.zip"),"vamp_melodia",format="zip")
40
+ environ['VAMP_PATH'] = str(Path("vamp_melodia").absolute())
41
+ Melodia().predict(audio_path)
42
+ ```
43
+
44
+ ---
45
+
46
+ ```python
1
47
  from audio2midi.basic_pitch_pitch_detector import BasicPitch
2
48
  from audio2midi.crepe_pitch_detector import Crepe
49
+ from audio2midi.violin_pitch_detector import Violin_Pitch_Det
50
+ from audio2midi.pop2piano import Pop2Piano
51
+ from torch import device as Device
52
+ from torch.cuda import is_available as cuda_is_available
53
+ device = Device("cuda" if cuda_is_available() else "cpu")
54
+ Crepe().predict(audio_path)
55
+ Pop2Piano(device=device).predict(audio_path)
56
+ Violin_Pitch_Det(device=device).predict(audio_path)
57
+ BasicPitch(device=device).predict(audio_path)
58
+ ```
59
+
60
+ ---
61
+
62
+ ```python
63
+ from audio2midi.basic_pitch_pitch_detector import BasicPitch
64
+ from audio2midi.crepe_pitch_detector_tf import CrepeTF
65
+ from audio2midi.crepe_pitch_detector import Crepe
3
66
  from audio2midi.librosa_pitch_detector import Normal_Pitch_Det , Guitar_Pitch_Det
4
67
  from audio2midi.melodia_pitch_detector import Melodia
5
68
  from audio2midi.pop2piano import Pop2Piano
@@ -50,13 +113,14 @@ with gr.Blocks() as midi_viz_ui:
50
113
  """)
51
114
 
52
115
  gr.TabbedInterface([
53
- gr.Interface(Normal_Pitch_Det().predict,[gr.Audio(type="filepath",label="Input Audio"),gr.Number(120,label="BPM"),gr.Number(512,label="HOP Len"),gr.Number(512,label="minimum note length"),gr.Number(0.1,label="threshold")],gr.File(label="Midi File")),
116
+ gr.Interface(Normal_Pitch_Det().predict,[gr.Audio(type="filepath",label="Input Audio"),gr.Number(120,label="BPM"),gr.Number(512,label="HOP Len"),gr.Number(2,label="minimum note length"),gr.Number(0.1,label="threshold")],gr.File(label="Midi File")),
54
117
  gr.Interface(Guitar_Pitch_Det().predict,[gr.Audio(type="filepath",label="Input Audio"),gr.Number(4,label="mag_exp"),gr.Number(-61,label="Threshold"),gr.Number(6,label="Pre_post_max"),gr.Checkbox(False,label="backtrack"),gr.Checkbox(False,label="round_to_sixteenth"),gr.Number(1024,label="hop_length"),gr.Number(72,label="n_bins"),gr.Number(12,label="bins_per_octave")],gr.File(label="Midi File")),
55
118
  gr.Interface(Melodia().predict,[gr.Audio(type="filepath",label="Input Audio"),gr.Number(120,label="BPM",step=30),gr.Number(0.25,label="smoothness",step=0.05,info="Smooth the pitch sequence with a median filter of the provided duration (in seconds)."),gr.Number(0.1,label="minimum duration",step=0.1,info="Minimum allowed duration for note (in seconds). Shorter notes will be removed."),gr.Number(128,label="HOP")],gr.File(label="Midi File")),
56
119
  gr.Interface(BasicPitch(device=device).predict,[gr.Audio(type="filepath", label="Upload Audio"),gr.Number(0.5,label="onset_thresh",info="Minimum amplitude of an onset activation to be considered an onset."),gr.Number(0.3,label="frame_thresh",info="Minimum energy requirement for a frame to be considered present."),gr.Number(127.70,label="min_note_len",info="The minimum allowed note length in milliseconds."),gr.Number(120,label="midi_tempo"),gr.Checkbox(True,label="infer_onsets",info="add additional onsets when there are large differences in frame amplitudes."),gr.Checkbox(True,label="include_pitch_bends",info="include pitch bends."),gr.Checkbox(False,label="multiple_pitch_bends",info="allow overlapping notes in midi file to have pitch bends."),gr.Checkbox(True,label="melodia_trick",info="Use the melodia post-processing step.")],gr.File(label="Download Midi File")),
57
120
  gr.Interface(Violin_Pitch_Det(device=device,model_capacity=getenv("violin_model_capacity","full")).predict, [gr.Audio(label="Upload your Audio file",type="filepath"),gr.Number(32,label="Batch size"),gr.Radio(["spotify","tiktok"],value="spotify",label="Post Processing"),gr.Checkbox(True,label="include_pitch_bends")],gr.File(label="Download MIDI file")),
58
121
  gr.Interface(Crepe(getenv("crepe_model_capacity","full")).predict,[gr.Audio(type="filepath",label="Input Audio"),gr.Checkbox(False,label="viterbi",info="Apply viterbi smoothing to the estimated pitch curve"),gr.Checkbox(True,label="center"),gr.Number(10,label="step size",info="The step size in milliseconds for running pitch estimation."),gr.Number(0.8,label="minimum confidence"),gr.Number(32,label="batch size")],gr.File(label="Midi File")),
122
+ gr.Interface(CrepeTF(getenv("crepe_model_capacity","full")).predict,[gr.Audio(type="filepath",label="Input Audio"),gr.Checkbox(False,label="viterbi",info="Apply viterbi smoothing to the estimated pitch curve"),gr.Checkbox(True,label="center"),gr.Number(10,label="step size",info="The step size in milliseconds for running pitch estimation."),gr.Number(0.8,label="minimum confidence"),gr.Number(32,label="batch size")],gr.File(label="Midi File")),
59
123
  gr.Interface(Pop2Piano(device).predict,[gr.Audio(label="Input Audio",type="filepath"),gr.Number(1, minimum=1, maximum=21, label="Composer"),gr.Number(2,label="Details in Piano"),gr.Number(1,label="Efficiency of Piano"),gr.Radio([1,2,4],label="steps per beat",value=2)],gr.File(label="MIDI File")),
60
124
  midi_viz_ui
61
- ],["Normal Pitch Detection","Guitar Based Pitch Detection","Melodia","Spotify Pitch Detection","Violin Based Pitch Detection","Crepe Pitch Detection","Pop2Piano","Midi Vizulizer"]).launch()
62
-
125
+ ],["Normal Pitch Detection","Guitar Based Pitch Detection","Melodia","Spotify Pitch Detection","Violin Based Pitch Detection","Crepe Pitch Detection","Crepe Pitch Detection TF","Pop2Piano","Midi Vizulizer"]).launch()
126
+ ```
@@ -1,23 +1,24 @@
1
- [project]
2
- name = "audio2midi"
3
- version = "0.2.0"
4
- description = "Audio To Midi"
5
- readme = "README.md"
6
- authors = [
7
- { name = "dummyjenil", email = "dummyjenil@gmail.com" }
8
- ]
9
-
10
- dependencies = []
11
-
12
- [project.optional-dependencies]
13
- librosa_pitch_detector = ["librosa", "numpy","pretty_midi_fix"]
14
- basic_pitch_pitch_detector = ["librosa", "numpy","pretty_midi_fix","scipy","torch","nnAudio","huggingface_hub"]
15
- melodia_pitch_detector = ["librosa", "numpy","pretty_midi_fix","scipy","vamp","huggingface_hub"]
16
- crepe_pitch_detector = ["librosa", "numpy","pretty_midi_fix","hmmlearn","tensorflow","keras","huggingface_hub"]
17
- violin_pitch_detector = ["librosa", "numpy","pretty_midi_fix","scipy","torchaudio","torch","mir_eval","huggingface_hub"]
18
- pop2piano = ["librosa", "numpy==1.26.4","pretty_midi_fix","transformers","essentia","torch","scipy","resampy","pretty_midi","huggingface_hub"]
19
- all = ["librosa", "numpy==1.26.4","pretty_midi_fix","transformers","essentia","torch","scipy","torchaudio","torch","mir_eval","hmmlearn","tensorflow","keras","vamp","nnAudio","resampy","pretty_midi","huggingface_hub"]
20
-
21
- [build-system]
22
- requires = ["hatchling"]
23
- build-backend = "hatchling.build"
1
+ [project]
2
+ name = "audio2midi"
3
+ version = "0.4.0"
4
+ description = "Audio To Midi"
5
+ readme = "README.md"
6
+ authors = [
7
+ { name = "dummyjenil", email = "dummyjenil@gmail.com" }
8
+ ]
9
+
10
+ dependencies = []
11
+
12
+ [project.optional-dependencies]
13
+ librosa_pitch_detector = ["librosa", "numpy","pretty_midi_fix"]
14
+ basic_pitch_pitch_detector = ["librosa", "numpy","pretty_midi_fix","scipy","torch","nnAudio","huggingface_hub"]
15
+ melodia_pitch_detector = ["librosa", "numpy","pretty_midi_fix","scipy","vamp","huggingface_hub"]
16
+ crepe_pitch_detector_tf = ["librosa", "numpy","pretty_midi_fix","hmmlearn","tensorflow","keras","huggingface_hub"]
17
+ crepe_pitch_detector = ["librosa", "numpy","pretty_midi_fix","hmmlearn","tensorflow","torch","huggingface_hub","tqdm"]
18
+ violin_pitch_detector = ["librosa", "numpy","pretty_midi_fix","scipy","torchaudio","torch","mir_eval","huggingface_hub"]
19
+ pop2piano = ["librosa", "numpy==1.26.4","pretty_midi_fix","transformers","essentia","torch","scipy","resampy","pretty_midi","huggingface_hub"]
20
+ all = ["librosa", "numpy==1.26.4","pretty_midi_fix","transformers","essentia","torch","scipy","torchaudio","torch","mir_eval","hmmlearn","tensorflow","keras","vamp","nnAudio","resampy","pretty_midi","huggingface_hub"]
21
+
22
+ [build-system]
23
+ requires = ["hatchling"]
24
+ build-backend = "hatchling.build"