analyzeAudio 0.0.15__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,5 @@
1
- from .pythonator import pythonizeFFprobe
1
+ """Analyzers that use the filename of an audio file to analyze its audio data."""
2
+ from analyzeAudio.pythonator import pythonizeFFprobe
2
3
  from analyzeAudio import registrationAudioAspect, cacheAudioAnalyzers
3
4
  from os import PathLike
4
5
  from statistics import mean
@@ -1,30 +1,30 @@
1
+ """Analyzers that use the spectrogram to analyze audio data."""
1
2
  from analyzeAudio import registrationAudioAspect, audioAspects, cacheAudioAnalyzers
2
3
  from typing import Any
3
4
  import cachetools
4
5
  import librosa
5
6
  import numpy
6
- from optype.numpy import AnyFloatingDType, ToArray3D, ToFloat3D
7
7
  from numpy import dtype, floating
8
8
 
9
9
  @registrationAudioAspect('Chromagram')
10
- def analyzeChromagram(spectrogramPower: numpy.ndarray[Any, dtype[floating[Any]]], sampleRate: int, **keywordArguments: Any) -> numpy.ndarray:
11
- return librosa.feature.chroma_stft(S=spectrogramPower, sr=sampleRate, **keywordArguments)
10
+ def analyzeChromagram(spectrogramPower: numpy.ndarray[Any, dtype[floating[Any]]], sampleRate: int, **keywordArguments: Any) -> numpy.ndarray: # pyright: ignore [reportMissingTypeArgument, reportUnknownParameterType]
11
+ return librosa.feature.chroma_stft(S=spectrogramPower, sr=sampleRate, **keywordArguments) # type: ignore
12
12
 
13
13
  @registrationAudioAspect('Spectral Contrast')
14
- def analyzeSpectralContrast(spectrogramMagnitude: numpy.ndarray[Any, dtype[floating[Any]]], **keywordArguments: Any) -> numpy.ndarray:
15
- return librosa.feature.spectral_contrast(S=spectrogramMagnitude, **keywordArguments)
14
+ def analyzeSpectralContrast(spectrogramMagnitude: numpy.ndarray[Any, dtype[floating[Any]]], **keywordArguments: Any) -> numpy.ndarray: # pyright: ignore [reportMissingTypeArgument, reportUnknownParameterType]
15
+ return librosa.feature.spectral_contrast(S=spectrogramMagnitude, **keywordArguments) # type: ignore
16
16
 
17
17
  @registrationAudioAspect('Spectral Bandwidth')
18
- def analyzeSpectralBandwidth(spectrogramMagnitude: numpy.ndarray[Any, dtype[floating[Any]]], **keywordArguments: Any) -> numpy.ndarray:
18
+ def analyzeSpectralBandwidth(spectrogramMagnitude: numpy.ndarray[Any, dtype[floating[Any]]], **keywordArguments: Any) -> numpy.ndarray: # pyright: ignore [reportMissingTypeArgument, reportUnknownParameterType]
19
19
  centroid = audioAspects['Spectral Centroid']['analyzer'](spectrogramMagnitude)
20
- return librosa.feature.spectral_bandwidth(S=spectrogramMagnitude, centroid=centroid, **keywordArguments)
20
+ return librosa.feature.spectral_bandwidth(S=spectrogramMagnitude, centroid=centroid, **keywordArguments) # type: ignore
21
21
 
22
22
  @cachetools.cached(cache=cacheAudioAnalyzers)
23
23
  @registrationAudioAspect('Spectral Centroid')
24
- def analyzeSpectralCentroid(spectrogramMagnitude: numpy.ndarray[Any, dtype[floating[Any]]], **keywordArguments: Any) -> numpy.ndarray:
25
- return librosa.feature.spectral_centroid(S=spectrogramMagnitude, **keywordArguments)
24
+ def analyzeSpectralCentroid(spectrogramMagnitude: numpy.ndarray[Any, dtype[floating[Any]]], **keywordArguments: Any) -> numpy.ndarray: # pyright: ignore [reportMissingTypeArgument, reportUnknownParameterType]
25
+ return librosa.feature.spectral_centroid(S=spectrogramMagnitude, **keywordArguments) # type: ignore
26
26
 
27
27
  @registrationAudioAspect('Spectral Flatness')
28
- def analyzeSpectralFlatness(spectrogramMagnitude: numpy.ndarray[Any, dtype[floating[Any]]], **keywordArguments: Any) -> numpy.ndarray:
29
- spectralFlatness = librosa.feature.spectral_flatness(S=spectrogramMagnitude, **keywordArguments)
28
+ def analyzeSpectralFlatness(spectrogramMagnitude: numpy.ndarray[Any, dtype[floating[Any]]], **keywordArguments: Any) -> numpy.ndarray: # pyright: ignore [reportMissingTypeArgument, reportUnknownParameterType]
29
+ spectralFlatness: numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.floating[Any]]] = librosa.feature.spectral_flatness(S=spectrogramMagnitude, **keywordArguments) # type: ignore
30
30
  return 20 * numpy.log10(spectralFlatness, where=(spectralFlatness != 0)) # dB
@@ -1,3 +1,4 @@
1
+ """Analyzers that use the tensor to analyze audio data."""
1
2
  from analyzeAudio import registrationAudioAspect
2
3
  from torchmetrics.functional.audio.srmr import speech_reverberation_modulation_energy_ratio
3
4
  from typing import Any
@@ -5,6 +6,6 @@ import numpy
5
6
  import torch
6
7
 
7
8
  @registrationAudioAspect('SRMR')
8
- def analyzeSRMR(tensorAudio: torch.Tensor, sampleRate: int, pytorchOnCPU: bool | None, **keywordArguments: Any) -> numpy.ndarray:
9
+ def analyzeSRMR(tensorAudio: torch.Tensor, sampleRate: int, pytorchOnCPU: bool | None, **keywordArguments: Any) -> numpy.ndarray: # pyright: ignore [reportMissingTypeArgument, reportUnknownParameterType]
9
10
  keywordArguments['fast'] = keywordArguments.get('fast') or pytorchOnCPU or None
10
- return torch.Tensor.numpy(speech_reverberation_modulation_energy_ratio(tensorAudio, sampleRate, **keywordArguments))
11
+ return torch.Tensor.numpy(speech_reverberation_modulation_energy_ratio(tensorAudio, sampleRate, **keywordArguments)) # type: ignore
@@ -1,26 +1,26 @@
1
+ """Analyzers that use the waveform of audio data."""
1
2
  from analyzeAudio import registrationAudioAspect, audioAspects, cacheAudioAnalyzers
2
3
  from typing import Any
3
4
  import librosa
4
5
  import numpy
5
- from optype.numpy import ToArray2D, AnyFloatingDType
6
6
  import cachetools
7
7
 
8
8
  @cachetools.cached(cache=cacheAudioAnalyzers)
9
9
  @registrationAudioAspect('Tempogram')
10
- def analyzeTempogram(waveform: ToArray2D[AnyFloatingDType], sampleRate: int, **keywordArguments: Any) -> numpy.ndarray:
11
- return librosa.feature.tempogram(y=waveform, sr=sampleRate, **keywordArguments)
10
+ def analyzeTempogram(waveform: numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.floating[Any]]], sampleRate: int, **keywordArguments: Any) -> numpy.ndarray: # pyright: ignore [reportMissingTypeArgument, reportUnknownParameterType]
11
+ return librosa.feature.tempogram(y=waveform, sr=sampleRate, **keywordArguments) # type: ignore
12
12
 
13
13
  # "RMS value from audio samples is faster ... However, ... spectrogram ... more accurate ... because ... windowed"
14
14
  @registrationAudioAspect('RMS from waveform')
15
- def analyzeRMS(waveform: ToArray2D[AnyFloatingDType], **keywordArguments: Any) -> numpy.ndarray:
16
- arrayRMS = librosa.feature.rms(y=waveform, **keywordArguments)
15
+ def analyzeRMS(waveform: numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.floating[Any]]], **keywordArguments: Any) -> numpy.ndarray: # pyright: ignore [reportMissingTypeArgument, reportUnknownParameterType]
16
+ arrayRMS: numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.floating[Any]]] = librosa.feature.rms(y=waveform, **keywordArguments) # type: ignore
17
17
  return 20 * numpy.log10(arrayRMS, where=(arrayRMS != 0)) # dB
18
18
 
19
19
  @registrationAudioAspect('Tempo')
20
- def analyzeTempo(waveform: ToArray2D[AnyFloatingDType], sampleRate: int, **keywordArguments: Any) -> numpy.ndarray:
20
+ def analyzeTempo(waveform: numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.floating[Any]]], sampleRate: int, **keywordArguments: Any) -> numpy.ndarray: # pyright: ignore [reportMissingTypeArgument, reportUnknownParameterType]
21
21
  tempogram = audioAspects['Tempogram']['analyzer'](waveform, sampleRate)
22
- return librosa.feature.tempo(y=waveform, sr=sampleRate, tg=tempogram, **keywordArguments)
22
+ return librosa.feature.tempo(y=waveform, sr=sampleRate, tg=tempogram, **keywordArguments) # type: ignore
23
23
 
24
24
  @registrationAudioAspect('Zero-crossing rate') # This is distinct from 'Zero-crossings rate'
25
- def analyzeZeroCrossingRate(waveform: ToArray2D[AnyFloatingDType], **keywordArguments: Any) -> numpy.ndarray:
26
- return librosa.feature.zero_crossing_rate(y=waveform, **keywordArguments)
25
+ def analyzeZeroCrossingRate(waveform: numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.floating[Any]]], **keywordArguments: Any) -> numpy.ndarray: # pyright: ignore [reportMissingTypeArgument, reportUnknownParameterType]
26
+ return librosa.feature.zero_crossing_rate(y=waveform, **keywordArguments) # type: ignore
@@ -3,7 +3,7 @@ from concurrent.futures import ProcessPoolExecutor, as_completed
3
3
  from numpy.typing import NDArray
4
4
  from os import PathLike
5
5
  from typing import Any, cast, ParamSpec, TypeAlias, TYPE_CHECKING, TypeVar
6
- from Z0Z_tools import defineConcurrencyLimit, oopsieKwargsie, stft
6
+ from Z0Z_tools import defineConcurrencyLimit, oopsieKwargsie, stft, Spectrogram
7
7
  import cachetools
8
8
  import inspect
9
9
  import numpy
@@ -102,7 +102,7 @@ def analyzeAudioFile(pathFilename: str | PathLike[Any], listAspectNames: list[st
102
102
 
103
103
  # I need "lazy" loading
104
104
  tryAgain = True
105
- while tryAgain: # `tenacity`?
105
+ while tryAgain:
106
106
  try:
107
107
  tensorAudio = torch.from_numpy(waveform) # memory-sharing
108
108
  tryAgain = False
@@ -113,9 +113,9 @@ def analyzeAudioFile(pathFilename: str | PathLike[Any], listAspectNames: list[st
113
113
  else:
114
114
  raise ERRORmessage
115
115
 
116
- # spectrogram = stft(waveform, sampleRate=sampleRate)
117
- # spectrogramMagnitude = numpy.absolute(spectrogram)
118
- # spectrogramPower = spectrogramMagnitude ** 2
116
+ spectrogram = stft(waveform, sampleRate=sampleRate)
117
+ spectrogramMagnitude = numpy.absolute(spectrogram)
118
+ spectrogramPower = spectrogramMagnitude ** 2
119
119
 
120
120
  pytorchOnCPU = not torch.cuda.is_available() # False if GPU available, True if not
121
121
 
@@ -1,3 +1,4 @@
1
+ """Convert FFprobe output to a standardized Python object."""
1
2
  from collections import defaultdict
2
3
  from typing import Any, cast, NamedTuple
3
4
  import json
@@ -89,7 +90,7 @@ def pythonizeFFprobe(FFprobeJSON_utf8: str):
89
90
  Z0Z_dictionaries[registrant] = {}
90
91
  elif statistic not in Z0Z_dictionaries[registrant]:
91
92
  # NOTE (as of this writing) `registrar` can only understand the generic class `numpy.ndarray` and not more specific typing
92
- valueSherpa = cast(numpy.ndarray, numpy.zeros((channel, len(FFroot['frames']))))
93
+ valueSherpa = cast(numpy.ndarray, numpy.zeros((channel, len(FFroot['frames'])))) # type: ignore
93
94
  Z0Z_dictionaries[registrant][statistic] = valueSherpa
94
95
  else:
95
96
  raise # Re-raise the exception
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: analyzeAudio
3
- Version: 0.0.15
3
+ Version: 0.0.16
4
4
  Summary: Measure one or more aspects of one or more audio files.
5
5
  Author-email: Hunter Hogan <HunterHogan@pm.me>
6
6
  License: CC-BY-NC-4.0
@@ -35,7 +35,6 @@ License-File: LICENSE
35
35
  Requires-Dist: cachetools
36
36
  Requires-Dist: librosa
37
37
  Requires-Dist: numpy
38
- Requires-Dist: optype[numpy]
39
38
  Requires-Dist: standard-aifc; python_version >= "3.13"
40
39
  Requires-Dist: standard-sunau; python_version >= "3.13"
41
40
  Requires-Dist: torch
@@ -213,4 +212,13 @@ pip install analyzeAudio
213
212
  [![Static Badge](https://img.shields.io/badge/2011_August-Homeless_since-blue?style=flat)](https://HunterThinks.com/support)
214
213
  [![YouTube Channel Subscribers](https://img.shields.io/youtube/channel/subscribers/UC3Gx7kz61009NbhpRtPP7tw)](https://www.youtube.com/@HunterHogan)
215
214
 
215
+ ## How to code
216
+
217
+ Coding One Step at a Time:
218
+
219
+ 0. WRITE CODE.
220
+ 1. Don't write stupid code that's hard to revise.
221
+ 2. Write good code.
222
+ 3. When revising, write better code.
223
+
216
224
  [![CC-BY-NC-4.0](https://github.com/hunterhogan/analyzeAudio/blob/main/CC-BY-NC-4.0.svg)](https://creativecommons.org/licenses/by-nc/4.0/)
@@ -0,0 +1,17 @@
1
+ analyzeAudio/__init__.py,sha256=2D5JMeZfLGnwyQvt4Q2-HCsShHulcNXBM_Wu9vZPCiI,437
2
+ analyzeAudio/analyzersUseFilename.py,sha256=AOA_Ab6-QU4q4i7AOy9Z68yPiGBi-Zjpo4zYmqvOwuM,11021
3
+ analyzeAudio/analyzersUseSpectrogram.py,sha256=RjKW9it_9EDgKwkx9sB99z7qbLrVq5xM7ALTcwo4xE8,2346
4
+ analyzeAudio/analyzersUseTensor.py,sha256=oRxKw42Q4bdBhuSlwdyVo2MNgI0AbnjEjpnRLQVaqco,701
5
+ analyzeAudio/analyzersUseWaveform.py,sha256=2nGhwdpDpamAAgeZ4XAbtX1aGAn5R-VIwHO1qnyxz0U,2042
6
+ analyzeAudio/audioAspectsRegistry.py,sha256=NoRUflTxls4palGtc7iQsfMLc9nMGITCVRPhDehNFJ4,8564
7
+ analyzeAudio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ analyzeAudio/pythonator.py,sha256=hL2KzeO7cgboSCL9uOl1uyuWMrnR0xhDt9o78GkM2_0,5680
9
+ analyzeaudio-0.0.16.dist-info/licenses/LICENSE,sha256=NxH5Y8BdC-gNU-WSMwim3uMbID2iNDXJz7fHtuTdXhk,19346
10
+ tests/conftest.py,sha256=BhZswOjkl_u-qiS4Zy38d2fETdWAtZiigeuXYBK8l0k,397
11
+ tests/test_audioAspectsRegistry.py,sha256=-TWTLMdAn6IFv7ZdFWrBm1KxpLBa3Mz1sCygAwxV6gE,27
12
+ tests/test_other.py,sha256=sd20ms4StQ13_3-gmwZwtAuoIAwfxWC5IXM_Cp5GQXM,428
13
+ analyzeaudio-0.0.16.dist-info/METADATA,sha256=Nn-a1bKLTdVvwmayCHEA8SdelzcSDhmbCJyMrdIv5XU,9178
14
+ analyzeaudio-0.0.16.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
15
+ analyzeaudio-0.0.16.dist-info/entry_points.txt,sha256=FHgSx7fndtZ6SnQ-nWVXf0NB59exaHQ2DtatTK9KrLg,100
16
+ analyzeaudio-0.0.16.dist-info/top_level.txt,sha256=QV8LQ0r_1LIQuewxDcEzODpykv5qRYG3I70piOUSVRg,19
17
+ analyzeaudio-0.0.16.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.1.0)
2
+ Generator: setuptools (80.8.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,17 +0,0 @@
1
- analyzeAudio/__init__.py,sha256=2D5JMeZfLGnwyQvt4Q2-HCsShHulcNXBM_Wu9vZPCiI,437
2
- analyzeAudio/analyzersUseFilename.py,sha256=XMRmbARX7UzbTdPXOmQOQDDPennF2Vd1UaIIV8k6NrQ,10927
3
- analyzeAudio/analyzersUseSpectrogram.py,sha256=j-e69ICVHOC_W3ev8HOTem4KGGpE9QyKnYZEvFseMkE,1835
4
- analyzeAudio/analyzersUseTensor.py,sha256=-wD_QAd41lB65ceT5UedEoxWL078tZ0E-kC9ssDSghc,553
5
- analyzeAudio/analyzersUseWaveform.py,sha256=AwDyagz9bLLAq8ji4by51x7d2YWRdUQaY3iaw1k0yD4,1472
6
- analyzeAudio/audioAspectsRegistry.py,sha256=p8GoVbmfGwZvj0YtSC3qbENaf-gKfAfUbNwpvC2R9V8,8571
7
- analyzeAudio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
- analyzeAudio/pythonator.py,sha256=St6KmIxvHwoW6IuHcEJjjvUUrEkG0sPv2S3Th9rjJtE,5603
9
- analyzeaudio-0.0.15.dist-info/licenses/LICENSE,sha256=NxH5Y8BdC-gNU-WSMwim3uMbID2iNDXJz7fHtuTdXhk,19346
10
- tests/conftest.py,sha256=BhZswOjkl_u-qiS4Zy38d2fETdWAtZiigeuXYBK8l0k,397
11
- tests/test_audioAspectsRegistry.py,sha256=-TWTLMdAn6IFv7ZdFWrBm1KxpLBa3Mz1sCygAwxV6gE,27
12
- tests/test_other.py,sha256=sd20ms4StQ13_3-gmwZwtAuoIAwfxWC5IXM_Cp5GQXM,428
13
- analyzeaudio-0.0.15.dist-info/METADATA,sha256=EGGk2mw9BlVFMauAW9cjwo2uSUpxnNBw8CLcb9iA0sQ,9040
14
- analyzeaudio-0.0.15.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
15
- analyzeaudio-0.0.15.dist-info/entry_points.txt,sha256=FHgSx7fndtZ6SnQ-nWVXf0NB59exaHQ2DtatTK9KrLg,100
16
- analyzeaudio-0.0.15.dist-info/top_level.txt,sha256=QV8LQ0r_1LIQuewxDcEzODpykv5qRYG3I70piOUSVRg,19
17
- analyzeaudio-0.0.15.dist-info/RECORD,,