sonusai 0.18.9__py3-none-any.whl → 0.19.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. sonusai/__init__.py +20 -29
  2. sonusai/aawscd_probwrite.py +18 -18
  3. sonusai/audiofe.py +93 -80
  4. sonusai/calc_metric_spenh.py +395 -321
  5. sonusai/data/genmixdb.yml +5 -11
  6. sonusai/{gentcst.py → deprecated/gentcst.py} +146 -149
  7. sonusai/{plot.py → deprecated/plot.py} +177 -131
  8. sonusai/{tplot.py → deprecated/tplot.py} +124 -102
  9. sonusai/doc/__init__.py +1 -1
  10. sonusai/doc/doc.py +112 -177
  11. sonusai/doc.py +10 -10
  12. sonusai/genft.py +93 -77
  13. sonusai/genmetrics.py +59 -46
  14. sonusai/genmix.py +116 -104
  15. sonusai/genmixdb.py +194 -153
  16. sonusai/lsdb.py +56 -66
  17. sonusai/main.py +23 -20
  18. sonusai/metrics/__init__.py +2 -0
  19. sonusai/metrics/calc_audio_stats.py +29 -24
  20. sonusai/metrics/calc_class_weights.py +7 -7
  21. sonusai/metrics/calc_optimal_thresholds.py +5 -7
  22. sonusai/metrics/calc_pcm.py +3 -3
  23. sonusai/metrics/calc_pesq.py +10 -7
  24. sonusai/metrics/calc_phase_distance.py +3 -3
  25. sonusai/metrics/calc_sa_sdr.py +10 -8
  26. sonusai/metrics/calc_segsnr_f.py +15 -17
  27. sonusai/metrics/calc_speech.py +105 -47
  28. sonusai/metrics/calc_wer.py +35 -32
  29. sonusai/metrics/calc_wsdr.py +10 -7
  30. sonusai/metrics/class_summary.py +30 -27
  31. sonusai/metrics/confusion_matrix_summary.py +25 -22
  32. sonusai/metrics/one_hot.py +91 -57
  33. sonusai/metrics/snr_summary.py +53 -46
  34. sonusai/mixture/__init__.py +19 -14
  35. sonusai/mixture/audio.py +4 -6
  36. sonusai/mixture/augmentation.py +37 -43
  37. sonusai/mixture/class_count.py +5 -14
  38. sonusai/mixture/config.py +292 -225
  39. sonusai/mixture/constants.py +41 -30
  40. sonusai/mixture/data_io.py +155 -0
  41. sonusai/mixture/datatypes.py +111 -108
  42. sonusai/mixture/db_datatypes.py +54 -70
  43. sonusai/mixture/eq_rule_is_valid.py +6 -9
  44. sonusai/mixture/feature.py +40 -38
  45. sonusai/mixture/generation.py +522 -389
  46. sonusai/mixture/helpers.py +217 -272
  47. sonusai/mixture/log_duration_and_sizes.py +16 -13
  48. sonusai/mixture/mixdb.py +669 -477
  49. sonusai/mixture/soundfile_audio.py +12 -17
  50. sonusai/mixture/sox_audio.py +91 -112
  51. sonusai/mixture/sox_augmentation.py +8 -9
  52. sonusai/mixture/spectral_mask.py +4 -6
  53. sonusai/mixture/target_class_balancing.py +41 -36
  54. sonusai/mixture/targets.py +69 -67
  55. sonusai/mixture/tokenized_shell_vars.py +23 -23
  56. sonusai/mixture/torchaudio_audio.py +14 -15
  57. sonusai/mixture/torchaudio_augmentation.py +23 -27
  58. sonusai/mixture/truth.py +48 -26
  59. sonusai/mixture/truth_functions/__init__.py +26 -0
  60. sonusai/mixture/truth_functions/crm.py +56 -38
  61. sonusai/mixture/truth_functions/datatypes.py +37 -0
  62. sonusai/mixture/truth_functions/energy.py +85 -59
  63. sonusai/mixture/truth_functions/file.py +30 -30
  64. sonusai/mixture/truth_functions/phoneme.py +14 -7
  65. sonusai/mixture/truth_functions/sed.py +71 -45
  66. sonusai/mixture/truth_functions/target.py +69 -106
  67. sonusai/mkwav.py +52 -85
  68. sonusai/onnx_predict.py +46 -43
  69. sonusai/queries/__init__.py +3 -1
  70. sonusai/queries/queries.py +100 -59
  71. sonusai/speech/__init__.py +2 -0
  72. sonusai/speech/l2arctic.py +24 -23
  73. sonusai/speech/librispeech.py +16 -17
  74. sonusai/speech/mcgill.py +22 -21
  75. sonusai/speech/textgrid.py +32 -25
  76. sonusai/speech/timit.py +45 -42
  77. sonusai/speech/vctk.py +14 -13
  78. sonusai/speech/voxceleb.py +26 -20
  79. sonusai/summarize_metric_spenh.py +11 -10
  80. sonusai/utils/__init__.py +4 -3
  81. sonusai/utils/asl_p56.py +1 -1
  82. sonusai/utils/asr.py +37 -17
  83. sonusai/utils/asr_functions/__init__.py +2 -0
  84. sonusai/utils/asr_functions/aaware_whisper.py +18 -12
  85. sonusai/utils/audio_devices.py +12 -12
  86. sonusai/utils/braced_glob.py +6 -8
  87. sonusai/utils/calculate_input_shape.py +1 -4
  88. sonusai/utils/compress.py +2 -2
  89. sonusai/utils/convert_string_to_number.py +1 -3
  90. sonusai/utils/create_timestamp.py +1 -1
  91. sonusai/utils/create_ts_name.py +2 -2
  92. sonusai/utils/dataclass_from_dict.py +1 -1
  93. sonusai/utils/docstring.py +6 -6
  94. sonusai/utils/energy_f.py +9 -7
  95. sonusai/utils/engineering_number.py +56 -54
  96. sonusai/utils/get_label_names.py +8 -10
  97. sonusai/utils/human_readable_size.py +2 -2
  98. sonusai/utils/model_utils.py +3 -5
  99. sonusai/utils/numeric_conversion.py +2 -4
  100. sonusai/utils/onnx_utils.py +43 -32
  101. sonusai/utils/parallel.py +40 -27
  102. sonusai/utils/print_mixture_details.py +25 -22
  103. sonusai/utils/ranges.py +12 -12
  104. sonusai/utils/read_predict_data.py +11 -9
  105. sonusai/utils/reshape.py +19 -26
  106. sonusai/utils/seconds_to_hms.py +1 -1
  107. sonusai/utils/stacked_complex.py +8 -16
  108. sonusai/utils/stratified_shuffle_split.py +29 -27
  109. sonusai/utils/write_audio.py +2 -2
  110. sonusai/utils/yes_or_no.py +3 -3
  111. sonusai/vars.py +14 -14
  112. {sonusai-0.18.9.dist-info → sonusai-0.19.5.dist-info}/METADATA +20 -21
  113. sonusai-0.19.5.dist-info/RECORD +125 -0
  114. {sonusai-0.18.9.dist-info → sonusai-0.19.5.dist-info}/WHEEL +1 -1
  115. sonusai/mixture/truth_functions/data.py +0 -58
  116. sonusai/utils/read_mixture_data.py +0 -14
  117. sonusai-0.18.9.dist-info/RECORD +0 -125
  118. {sonusai-0.18.9.dist-info → sonusai-0.19.5.dist-info}/entry_points.txt +0 -0
@@ -7,13 +7,12 @@ def read_predict_data(filename: str) -> Predict:
7
7
  """Read predict data from given HDF5 file and return it."""
8
8
  import h5py
9
9
 
10
- from sonusai import SonusAIError
11
10
  from sonusai import logger
12
11
 
13
- logger.debug(f'Reading prediction data from {filename}')
14
- with h5py.File(filename, 'r') as f:
12
+ logger.debug(f"Reading prediction data from {filename}")
13
+ with h5py.File(filename, "r") as f:
15
14
  # prediction data is either [frames, num_classes], or [frames, timesteps, num_classes]
16
- predict = np.array(f['predict'])
15
+ predict = np.array(f["predict"])
17
16
 
18
17
  if predict.ndim == 2:
19
18
  return predict
@@ -21,10 +20,13 @@ def read_predict_data(filename: str) -> Predict:
21
20
  if predict.ndim == 3:
22
21
  frames, timesteps, num_classes = predict.shape
23
22
 
24
- logger.debug(f'Reshaping prediction data in {filename} 'f''
25
- f'from [{frames}, {timesteps}, {num_classes}] '
26
- f'to [{frames * timesteps}, {num_classes}]')
27
- predict = np.reshape(predict, [frames * timesteps, num_classes], order='F')
23
+ logger.debug(
24
+ f"Reshaping prediction data in {filename} "
25
+ f""
26
+ f"from [{frames}, {timesteps}, {num_classes}] "
27
+ f"to [{frames * timesteps}, {num_classes}]"
28
+ )
29
+ predict = np.reshape(predict, [frames * timesteps, num_classes], order="F")
28
30
  return predict
29
31
 
30
- raise SonusAIError(f'Invalid prediction data dimensions in {filename}')
32
+ raise RuntimeError(f"Invalid prediction data dimensions in {filename}")
sonusai/utils/reshape.py CHANGED
@@ -1,5 +1,3 @@
1
- from typing import Optional
2
-
3
1
  import numpy as np
4
2
 
5
3
  from sonusai.mixture.datatypes import Feature
@@ -11,12 +9,14 @@ def get_input_shape(feature: Feature) -> tuple[int, ...]:
11
9
  return feature.shape[1:]
12
10
 
13
11
 
14
- def reshape_inputs(feature: Feature,
15
- batch_size: int,
16
- truth: Optional[Truth] = None,
17
- timesteps: int = 0,
18
- flatten: bool = False,
19
- add1ch: bool = False) -> tuple[Feature, Optional[Truth]]:
12
+ def reshape_inputs(
13
+ feature: Feature,
14
+ batch_size: int,
15
+ truth: Truth | None = None,
16
+ timesteps: int = 0,
17
+ flatten: bool = False,
18
+ add1ch: bool = False,
19
+ ) -> tuple[Feature, Truth | None]:
20
20
  """Check SonusAI feature and truth data and reshape feature of size [frames, strides, feature_parameters] into
21
21
  one of several options:
22
22
 
@@ -38,16 +38,14 @@ def reshape_inputs(feature: Feature,
38
38
  feature reshaped feature
39
39
  truth reshaped truth
40
40
  """
41
- from sonusai import SonusAIError
42
-
43
41
  frames, strides, feature_parameters = feature.shape
44
42
  if truth is not None:
45
43
  truth_frames, num_classes = truth.shape
46
44
  # Double-check correctness of inputs
47
45
  if frames != truth_frames:
48
- raise SonusAIError('Frames in feature and truth do not match')
46
+ raise ValueError("Frames in feature and truth do not match")
49
47
  else:
50
- num_classes = None
48
+ num_classes = 0
51
49
 
52
50
  if flatten:
53
51
  feature = np.reshape(feature, (frames, strides * feature_parameters))
@@ -64,12 +62,12 @@ def reshape_inputs(feature: Feature,
64
62
  fr2drop = frames_rem + bf_rem
65
63
  if fr2drop:
66
64
  if feature.ndim == 2:
67
- feature = feature[0:-fr2drop, ] # flattened input
65
+ feature = feature[0:-fr2drop,] # flattened input
68
66
  elif feature.ndim == 3:
69
- feature = feature[0:-fr2drop, ] # un-flattened input
67
+ feature = feature[0:-fr2drop,] # un-flattened input
70
68
 
71
69
  if truth is not None:
72
- truth = truth[0:-fr2drop, ]
70
+ truth = truth[0:-fr2drop,]
73
71
 
74
72
  # Reshape
75
73
  if feature.ndim == 2: # flattened input
@@ -88,9 +86,9 @@ def reshape_inputs(feature: Feature,
88
86
  # Drop frames if remainder exists (not fitting into a multiple of new number of sequences)
89
87
  fr2drop = feature.shape[0] % batch_size
90
88
  if fr2drop > 0:
91
- feature = feature[0:-fr2drop, ]
89
+ feature = feature[0:-fr2drop,]
92
90
  if truth is not None:
93
- truth = truth[0:-fr2drop, ]
91
+ truth = truth[0:-fr2drop,]
94
92
 
95
93
  # Add channel dimension if required for input to model (i.e. for cnn type input)
96
94
  if add1ch:
@@ -119,25 +117,20 @@ def get_num_classes_from_predict(predict: Predict, timesteps: int = 0) -> int:
119
117
  return dims[1]
120
118
 
121
119
 
122
- def reshape_outputs(predict: Predict,
123
- truth: Optional[Truth] = None,
124
- timesteps: int = 0) -> tuple[Predict, Optional[Truth]]:
120
+ def reshape_outputs(predict: Predict, truth: Truth | None = None, timesteps: int = 0) -> tuple[Predict, Truth | None]:
125
121
  """Reshape model output data.
126
122
 
127
123
  truth and predict can be either [frames, num_classes], or [frames, timesteps, num_classes]
128
124
  In binary case, num_classes dim may not exist; detect this and set num_classes to 1.
129
125
  """
130
- from sonusai import SonusAIError
131
-
132
- if truth is not None:
133
- if predict.shape != truth.shape:
134
- raise SonusAIError('predict and truth shapes do not match')
126
+ if truth is not None and predict.shape != truth.shape:
127
+ raise ValueError("predict and truth shapes do not match")
135
128
 
136
129
  ndim = predict.ndim
137
130
  shape = predict.shape
138
131
 
139
132
  if not (0 < ndim <= 3):
140
- raise SonusAIError(f'do not know how to reshape data with {ndim} dimensions')
133
+ raise ValueError(f"do not know how to reshape data with {ndim} dimensions")
141
134
 
142
135
  if ndim == 3 or (ndim == 2 and timesteps > 0):
143
136
  if ndim == 2:
@@ -4,4 +4,4 @@ def seconds_to_hms(seconds: float) -> str:
4
4
  s = seconds - h * 3600
5
5
  m = int(s / 60)
6
6
  s = int(seconds - h * 3600 - m * 60)
7
- return f'{h:d}:{m:02d}:{s:02d} (H:MM:SS)'
7
+ return f"{h:d}:{m:02d}:{s:02d} (H:MM:SS)"
@@ -12,14 +12,12 @@ def stack_complex(unstacked: np.ndarray) -> np.ndarray:
12
12
  :return: A stacked array
13
13
  :raises TypeError:
14
14
  """
15
- from sonusai import SonusAIError
16
-
17
15
  if not unstacked.ndim > 1:
18
- raise SonusAIError('unstacked must have more than 1 dimension')
16
+ raise ValueError("unstacked must have more than 1 dimension")
19
17
 
20
18
  shape = list(unstacked.shape)
21
19
  shape[-1] = shape[-1] * 2
22
- stacked = np.empty(shape, dtype=np.complex64)
20
+ stacked = np.empty(shape, dtype=np.float32)
23
21
  half = unstacked.shape[-1]
24
22
  stacked[..., :half] = np.real(unstacked)
25
23
  stacked[..., half:] = np.imag(unstacked)
@@ -35,13 +33,11 @@ def unstack_complex(stacked: np.ndarray) -> np.ndarray:
35
33
  :return: An unstacked complex array
36
34
  :raises TypeError:
37
35
  """
38
- from sonusai import SonusAIError
39
-
40
36
  if not stacked.ndim > 1:
41
- raise SonusAIError('stacked must have more than 1 dimension')
37
+ raise ValueError("stacked must have more than 1 dimension")
42
38
 
43
39
  if stacked.shape[-1] % 2 != 0:
44
- raise SonusAIError('last dimension of stacked must be a multiple of 2')
40
+ raise ValueError("last dimension of stacked must be a multiple of 2")
45
41
 
46
42
  half = stacked.shape[-1] // 2
47
43
  unstacked = 1j * stacked[..., half:]
@@ -58,13 +54,11 @@ def stacked_complex_real(stacked: np.ndarray) -> np.ndarray:
58
54
  :return: The real elements
59
55
  :raises TypeError:
60
56
  """
61
- from sonusai import SonusAIError
62
-
63
57
  if not stacked.ndim > 1:
64
- raise SonusAIError('stacked must have more than 1 dimension')
58
+ raise ValueError("stacked must have more than 1 dimension")
65
59
 
66
60
  if stacked.shape[-1] % 2 != 0:
67
- raise SonusAIError('last dimension of stacked must be a multiple of 2')
61
+ raise ValueError("last dimension of stacked must be a multiple of 2")
68
62
 
69
63
  half = stacked.shape[-1] // 2
70
64
  return stacked[..., :half]
@@ -78,13 +72,11 @@ def stacked_complex_imag(stacked: np.ndarray) -> np.ndarray:
78
72
  :return: The imaginary elements
79
73
  :raises TypeError:
80
74
  """
81
- from sonusai import SonusAIError
82
-
83
75
  if not stacked.ndim > 1:
84
- raise SonusAIError('stacked must have more than 1 dimension')
76
+ raise ValueError("stacked must have more than 1 dimension")
85
77
 
86
78
  if stacked.shape[-1] % 2 != 0:
87
- raise SonusAIError('last dimension of stacked must be a multiple of 2')
79
+ raise ValueError("last dimension of stacked must be a multiple of 2")
88
80
 
89
81
  half = stacked.shape[-1] // 2
90
82
  return stacked[..., half:]
@@ -1,14 +1,14 @@
1
- from typing import Optional
2
-
3
1
  import numpy as np
4
2
 
5
3
  from sonusai.mixture import MixtureDatabase
6
4
 
7
5
 
8
- def stratified_shuffle_split_mixid(mixdb: MixtureDatabase,
9
- vsplit: float = 0.2,
10
- nsplit: int = 0,
11
- rnd_seed: Optional[int] = 0) -> tuple[list[int], list[int], np.ndarray, np.ndarray]:
6
+ def stratified_shuffle_split_mixid(
7
+ mixdb: MixtureDatabase,
8
+ vsplit: float = 0.2,
9
+ nsplit: int = 0,
10
+ rnd_seed: int | None = 0,
11
+ ) -> tuple[list[int], list[int], np.ndarray, np.ndarray]:
12
12
  """
13
13
  Create a training and test/validation list of mixture IDs from all mixtures in a mixture database.
14
14
  The test/validation split is specified by vsplit (0.0 to 1.0), default 0.2.
@@ -35,20 +35,18 @@ def stratified_shuffle_split_mixid(mixdb: MixtureDatabase,
35
35
  import random
36
36
  from copy import deepcopy
37
37
 
38
- from sonusai import SonusAIError
39
38
  from sonusai import logger
40
39
  from sonusai.mixture import get_class_count_from_mixids
41
- from sonusai.mixture import get_truth_indices_for_target
42
40
 
43
41
  if vsplit < 0 or vsplit > 1:
44
- raise SonusAIError('vsplit must be between 0 and 1')
42
+ raise ValueError("vsplit must be between 0 and 1")
45
43
 
46
44
  a_class_mixid: dict[int, list[int]] = {i + 1: [] for i in range(mixdb.num_classes)}
47
45
  for mixid, mixture in enumerate(mixdb.mixtures):
48
46
  class_count = get_class_count_from_mixids(mixdb, mixid)
49
- if any(class_count) or mixdb.truth_mutex == 0:
50
- for truth_index in get_truth_indices_for_target(mixdb.target_files[mixture.targets[0].file_id]):
51
- a_class_mixid[truth_index].append(mixid)
47
+ if any(class_count):
48
+ for class_index in mixdb.target_files[mixture.targets[0].file_id].class_indices:
49
+ a_class_mixid[class_index].append(mixid)
52
50
  else:
53
51
  # no counts and mutex mode means this is all 'other' class
54
52
  a_class_mixid[mixdb.num_classes].append(mixid)
@@ -80,11 +78,11 @@ def stratified_shuffle_split_mixid(mixdb: MixtureDatabase,
80
78
  # randomize order
81
79
  random.shuffle(indices)
82
80
 
83
- t_class_mixid[ci] = [a_class_mixid[ci + 1][ii] for ii in indices[0:t_num_mixid[ci]]]
84
- v_class_mixid[ci] = [a_class_mixid[ci + 1][ii] for ii in indices[t_num_mixid[ci]:]]
81
+ t_class_mixid[ci] = [a_class_mixid[ci + 1][ii] for ii in indices[0 : t_num_mixid[ci]]]
82
+ v_class_mixid[ci] = [a_class_mixid[ci + 1][ii] for ii in indices[t_num_mixid[ci] :]]
85
83
 
86
84
  if np.any(~(t_num_mixid > 0)):
87
- logger.warning(f'Some classes have zero coverage: {np.where(~(t_num_mixid > 0))[0]}')
85
+ logger.warning(f"Some classes have zero coverage: {np.where(~(t_num_mixid > 0))[0]}")
88
86
 
89
87
  # Stratify over non-zero classes
90
88
  nz_indices = np.where(t_num_mixid > 0)[0]
@@ -97,8 +95,10 @@ def stratified_shuffle_split_mixid(mixdb: MixtureDatabase,
97
95
  # 2nd stage stratify by class_count/min(class_count-n3) n2 times
98
96
  n2 = int(max(min_class - n0 - n3, 0))
99
97
 
100
- logger.info(f'Stratifying training, x1 cnt {n0}: x(class_count/{n2}): x1 cnt {n3} x1, '
101
- f'for {len(nz_indices)} populated classes')
98
+ logger.info(
99
+ f"Stratifying training, x1 cnt {n0}: x(class_count/{n2}): x1 cnt {n3} x1, "
100
+ f"for {len(nz_indices)} populated classes"
101
+ )
102
102
 
103
103
  # initialize source list
104
104
  tt = deepcopy(t_class_mixid)
@@ -116,13 +116,13 @@ def stratified_shuffle_split_mixid(mixdb: MixtureDatabase,
116
116
  # which will leave approx n3 remaining
117
117
  if n2 > 0:
118
118
  # should always be non-zero
119
- min_class = np.min(t_num_mixid2 - n3)
119
+ min_class = int(np.min(t_num_mixid2 - n3))
120
120
  class_count = np.floor((t_num_mixid2 - n3) / min_class)
121
121
  # class_count = np.maximum(np.floor((t_num_mixid2 - n3) / n2),0) # Counts per class
122
122
  for _ in range(min_class):
123
123
  for ci in range(mixdb.num_classes):
124
124
  if class_count[ci] > 0:
125
- for cc in range(int(class_count[ci])):
125
+ for _ in range(int(class_count[ci])):
126
126
  # append first
127
127
  t_mixid.append(tt[ci][0])
128
128
  del tt[ci][0]
@@ -133,10 +133,10 @@ def stratified_shuffle_split_mixid(mixdb: MixtureDatabase,
133
133
  t_mixid = _extract_remaining_mixids(mixdb, t_mixid, t_num_mixid2, tt)
134
134
 
135
135
  if len(t_mixid) != sum(t_num_mixid):
136
- logger.warning('Final stratified training list length does not match starting list length.')
136
+ logger.warning("Final stratified training list length does not match starting list length.")
137
137
 
138
138
  if any(t_num_mixid2) or any(tt):
139
- logger.warning('Remaining training mixid list not empty.')
139
+ logger.warning("Remaining training mixid list not empty.")
140
140
 
141
141
  # Now stratify the validation list, which is probably not as important, so use simple method
142
142
  # initialize source list
@@ -145,18 +145,20 @@ def stratified_shuffle_split_mixid(mixdb: MixtureDatabase,
145
145
  v_mixid = _extract_remaining_mixids(mixdb, [], v_num_mixid2, vv)
146
146
 
147
147
  if len(v_mixid) != sum(v_num_mixid):
148
- logger.warning('Final stratified validation list length does not match starting lists length.')
148
+ logger.warning("Final stratified validation list length does not match starting lists length.")
149
149
 
150
150
  if any(v_num_mixid2) or any(vv):
151
- logger.warning('Remaining validation mixid list not empty.')
151
+ logger.warning("Remaining validation mixid list not empty.")
152
152
 
153
153
  return t_mixid, v_mixid, t_num_mixid, v_num_mixid
154
154
 
155
155
 
156
- def _extract_remaining_mixids(mixdb: MixtureDatabase,
157
- mixid: list[int],
158
- num_mixid: np.ndarray,
159
- class_mixid: list[list[int]]) -> list[int]:
156
+ def _extract_remaining_mixids(
157
+ mixdb: MixtureDatabase,
158
+ mixid: list[int],
159
+ num_mixid: np.ndarray,
160
+ class_mixid: list[list[int]],
161
+ ) -> list[int]:
160
162
  for _ in range(max(num_mixid)):
161
163
  for ci in range(mixdb.num_classes):
162
164
  if num_mixid[ci] > 0:
@@ -3,7 +3,7 @@ from sonusai.mixture.datatypes import AudioT
3
3
 
4
4
 
5
5
  def write_audio(name: str, audio: AudioT, sample_rate: int = SAMPLE_RATE) -> None:
6
- """ Write an audio file.
6
+ """Write an audio file.
7
7
 
8
8
  To write multiple channels, use a 2D array of shape [channels, samples].
9
9
  The bits per sample and PCM/float are determined by the data type.
@@ -17,7 +17,7 @@ def write_audio(name: str, audio: AudioT, sample_rate: int = SAMPLE_RATE) -> Non
17
17
  if data.dim() == 1:
18
18
  data = torch.reshape(data, (1, data.shape[0]))
19
19
  if data.dim() != 2:
20
- raise ValueError(f'audio must be a 1D or 2D array')
20
+ raise ValueError("audio must be a 1D or 2D array")
21
21
 
22
22
  # Assuming data has more samples than channels, check if array needs to be transposed
23
23
  if data.shape[1] < data.shape[0]:
@@ -1,8 +1,8 @@
1
1
  def yes_or_no(question: str) -> bool:
2
2
  """Wait for yes or no input"""
3
3
  while True:
4
- reply = str(input(question + ' (y/n)?: ')).lower().strip()
5
- if reply[:1] == 'y':
4
+ reply = str(input(question + " (y/n)?: ")).lower().strip()
5
+ if reply[:1] == "y":
6
6
  return True
7
- if reply[:1] == 'n':
7
+ if reply[:1] == "n":
8
8
  return False
sonusai/vars.py CHANGED
@@ -23,18 +23,18 @@ def main() -> None:
23
23
 
24
24
  from sonusai.mixture import DEFAULT_NOISE
25
25
 
26
- print('Custom SonusAI variables:')
27
- print('')
28
- print(f'${{default_noise}}: {DEFAULT_NOISE}')
29
- print('')
30
- print('SonusAI recognized environment variables:')
31
- print('')
32
- print(f'DEEPGRAM_API_KEY {getenv("DEEPGRAM_API_KEY")}')
33
- print(f'GOOGLE_SPEECH_API_KEY {getenv("GOOGLE_SPEECH_API_KEY")}')
34
- print('')
35
- items = ['DEEPGRAM_API_KEY', 'GOOGLE_SPEECH_API_KEY']
36
- items += [item for item in environ.keys() if item.upper().startswith("AIXP_WHISPER_")]
37
-
38
-
39
- if __name__ == '__main__':
26
+ print("Custom SonusAI variables:")
27
+ print("")
28
+ print(f"${{default_noise}}: {DEFAULT_NOISE}")
29
+ print("")
30
+ print("SonusAI recognized environment variables:")
31
+ print("")
32
+ print(f"DEEPGRAM_API_KEY {getenv('DEEPGRAM_API_KEY')}")
33
+ print(f"GOOGLE_SPEECH_API_KEY {getenv('GOOGLE_SPEECH_API_KEY')}")
34
+ print("")
35
+ items = ["DEEPGRAM_API_KEY", "GOOGLE_SPEECH_API_KEY"]
36
+ items += [item for item in environ if item.upper().startswith("AIXP_WHISPER_")]
37
+
38
+
39
+ if __name__ == "__main__":
40
40
  main()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sonusai
3
- Version: 0.18.9
3
+ Version: 0.19.5
4
4
  Summary: Framework for building deep neural network models for sound, speech, and voice AI
5
5
  Home-page: https://aaware.com
6
6
  License: GPL-3.0-only
@@ -8,39 +8,38 @@ Author: Chris Eddington
8
8
  Author-email: chris@aaware.com
9
9
  Maintainer: Chris Eddington
10
10
  Maintainer-email: chris@aaware.com
11
- Requires-Python: >=3.9,<3.12
11
+ Requires-Python: >=3.11,<3.12
12
12
  Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
13
13
  Classifier: Programming Language :: Python :: 3
14
- Classifier: Programming Language :: Python :: 3.9
15
- Classifier: Programming Language :: Python :: 3.10
16
14
  Classifier: Programming Language :: Python :: 3.11
17
- Requires-Dist: PyYAML (>=6.0.1,<7.0.0)
18
- Requires-Dist: dataclasses-json (>=0.6.1,<0.7.0)
15
+ Requires-Dist: dataclasses-json (>=0.6.7,<0.7.0)
19
16
  Requires-Dist: docopt (>=0.6.2,<0.7.0)
20
- Requires-Dist: h5py (>=3.11.0,<4.0.0)
21
- Requires-Dist: jiwer (>=3.0.3,<4.0.0)
22
- Requires-Dist: librosa (>=0.10.1,<0.11.0)
23
- Requires-Dist: matplotlib (>=3.8.0,<4.0.0)
17
+ Requires-Dist: h5py (>=3.12.1,<4.0.0)
18
+ Requires-Dist: jiwer (>=3.0.4,<4.0.0)
19
+ Requires-Dist: librosa (>=0.10.2.post1,<0.11.0)
20
+ Requires-Dist: matplotlib (>=3.9.2,<4.0.0)
24
21
  Requires-Dist: mgzip (>=0.2.1,<0.3.0)
25
- Requires-Dist: numpy (>=1.26.4,<2.0.0)
26
- Requires-Dist: onnx (>=1.14.1,<2.0.0)
27
- Requires-Dist: onnxruntime (>=1.16.1,<2.0.0)
28
- Requires-Dist: paho-mqtt (>=2.0.0,<3.0.0)
29
- Requires-Dist: pandas (>=2.1.1,<3.0.0)
22
+ Requires-Dist: numpy (>=1,<2)
23
+ Requires-Dist: onnx (>=1.17.0,<2.0.0)
24
+ Requires-Dist: onnxruntime (>=1.19.2,<2.0.0)
25
+ Requires-Dist: paho-mqtt (>=2.1.0,<3.0.0)
26
+ Requires-Dist: pandas (>=2.2.3,<3.0.0)
30
27
  Requires-Dist: pesq (>=0.0.4,<0.0.5)
31
28
  Requires-Dist: praatio (>=6.2.0,<7.0.0)
32
- Requires-Dist: psutil (>=5,<6)
33
- Requires-Dist: pyaaware (>=1.5.7,<2.0.0)
29
+ Requires-Dist: psutil (>=6.0.0,<7.0.0)
30
+ Requires-Dist: pyaaware (>=1.5.18,<2.0.0)
34
31
  Requires-Dist: pyaudio (>=0.2.14,<0.3.0)
35
32
  Requires-Dist: pydub (>=0.25.1,<0.26.0)
36
- Requires-Dist: pystoi (>=0.4.0,<0.5.0)
37
- Requires-Dist: requests (>=2.31.0,<3.0.0)
33
+ Requires-Dist: pystoi (>=0.4.1,<0.5.0)
34
+ Requires-Dist: pyyaml (>=6.0.2,<7.0.0)
35
+ Requires-Dist: requests (>=2.32.3,<3.0.0)
36
+ Requires-Dist: rich (>=13.9.4,<14.0.0)
38
37
  Requires-Dist: samplerate (>=0.2.1,<0.3.0)
39
38
  Requires-Dist: soundfile (>=0.12.1,<0.13.0)
40
- Requires-Dist: sox (>=1.4.1,<2.0.0)
39
+ Requires-Dist: sox (>=1.5.0,<2.0.0)
41
40
  Requires-Dist: torch (>=2.2,<2.3)
42
41
  Requires-Dist: torchaudio (>=2.2,<2.3)
43
- Requires-Dist: tqdm (>=4.66.1,<5.0.0)
42
+ Requires-Dist: tqdm (>=4.66.5,<5.0.0)
44
43
  Description-Content-Type: text/x-rst
45
44
 
46
45
  SonusAI: Framework for simplified creation of deep NN models for sound, speech, and voice AI
@@ -0,0 +1,125 @@
1
+ sonusai/__init__.py,sha256=AzY-ysnmnzg0dDsephK12AuPoClcf8ggphjk4i9Qe-I,2762
2
+ sonusai/aawscd_probwrite.py,sha256=DZwvkrNy-v4meCShSNAEtlwLnd-3OcpbHkHYvJt2jFI,3652
3
+ sonusai/audiofe.py,sha256=iFdthh4UrOvziT8urjrjD7dACWZPQz9orM5bVAW3WSQ,11269
4
+ sonusai/calc_metric_spenh.py,sha256=w7lCuAI01B_E_pWQnGlGQQcmzopi7q3QeO5ByMXnzos,46712
5
+ sonusai/data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ sonusai/data/genmixdb.yml,sha256=U_kLbE7gZ5rA7yNSB2NW7eK5dnYP5grJVMR321VMLt8,940
7
+ sonusai/data/speech_ma01_01.wav,sha256=PK0vMKg-NR6rPE3KouxHGF6PKXnJCr7AwjMqfu98LUA,76644
8
+ sonusai/data/whitenoise.wav,sha256=I2umov0m34y56F9IsIBi1XtE76ZeZaSKDf70cJRe3pI,1920044
9
+ sonusai/deprecated/gentcst.py,sha256=nKbHy3aHreHqA-XnLQOzOApS8RuTNUFqnx52a8I5zLQ,19921
10
+ sonusai/deprecated/plot.py,sha256=xL0w8Dtjdns2KX8tbTrdBGXviy_aoV3WUJSVKPZkQng,17423
11
+ sonusai/deprecated/tplot.py,sha256=0p238DvTaP4oU9y-dp0JdLaTV4TKrooAwbx7zdz_QAc,14641
12
+ sonusai/doc/__init__.py,sha256=KyQ26Um0RM8A3GYsb_tbFH64RwpoAw6lja2f_moUWas,33
13
+ sonusai/doc/doc.py,sha256=VZXauwbOb-VIufWw-lu0yfrd6jMRPeFeVPaaEjZNvn4,18881
14
+ sonusai/doc.py,sha256=zSmXpioB0YS_5-7kqfS5cr--veSaXkxRKzldId9Hyoc,878
15
+ sonusai/genft.py,sha256=nMpmn1rhbWWUaFBS8AEb4v2_iP-sPxPwQEb2etULJzY,5610
16
+ sonusai/genmetrics.py,sha256=rOzkQT6ihUBE-nDTzRilJTBeSgVQiDFveVlt75ROIBk,5225
17
+ sonusai/genmix.py,sha256=eYq4B9aZ8PXjgv7_Y2oHJH4QUPAMw5Kk8_iN0spsIng,6579
18
+ sonusai/genmixdb.py,sha256=oby7eigG-JhJGZIBKs9WIO-X5a5JMyqUAG4krfIUhos,19526
19
+ sonusai/lsdb.py,sha256=0HOGDDndB3LT9cz9AaxKIpt9vslAoSP4F239gply4Xg,5149
20
+ sonusai/main.py,sha256=HbnEia1B1-Z-mlHkLfojH8aj9GIpL1Btw3oH60T_CCQ,2590
21
+ sonusai/metrics/__init__.py,sha256=ssV6JEK_oklRSocsp6HMcG-GtJvV8IkRQtdKhHHmwU8,878
22
+ sonusai/metrics/calc_audio_stats.py,sha256=IHgYEPaob_Nw35SaH3tyHp7Wwju4f-2-BJZ99JyeLmc,1572
23
+ sonusai/metrics/calc_class_weights.py,sha256=DP36f0W5Vr8v8LhzmOWFxb1cgXFV-pzN6i4lbG7xMhw,3796
24
+ sonusai/metrics/calc_optimal_thresholds.py,sha256=y2bTF409iJBOK0WblJ62XcF83BT3QvPuu_nb7xH4Vt8,3433
25
+ sonusai/metrics/calc_pcm.py,sha256=yBQV9UJ1GK5f4c_8TNABMtZR-xyStKJCsSTT0FQGa50,1886
26
+ sonusai/metrics/calc_pesq.py,sha256=dCztUTaPyyjkUI2DpejqhiPzQv4FOtigzffFnDXDs-M,949
27
+ sonusai/metrics/calc_phase_distance.py,sha256=-aNZEkfqxv3UjQsYcGqJ-Ips-ZAktBcykGJ6O2seHjA,1863
28
+ sonusai/metrics/calc_sa_sdr.py,sha256=IdCzlQ_w94A3eK42t-gy_DrFN_tziwfDjTU6-WKuCFs,2531
29
+ sonusai/metrics/calc_sample_weights.py,sha256=0O2EH1-FKlCa0HFgKPUF1BJiknR1hCH7zLbXnoXH7Ag,945
30
+ sonusai/metrics/calc_segsnr_f.py,sha256=paMwdPlaqKaIo6nYOV5qYoJ2GmisuHObrZ7desMLhPQ,2896
31
+ sonusai/metrics/calc_speech.py,sha256=8QhOpupbQOte9l3a4RsUrmSsG0qXtAeMcGfT2tMDzhs,14776
32
+ sonusai/metrics/calc_wer.py,sha256=1MQYMx8ldHeodtJEtGibvDKhvSaGe6DBmZV4L8qOMgg,2362
33
+ sonusai/metrics/calc_wsdr.py,sha256=vcALY-zuhyThRa1QMz2qW8L9kSBc2v32gV9u8bV7VaM,2556
34
+ sonusai/metrics/class_summary.py,sha256=qBWxtQ9YKJUzk5LsjDuwnYEKrpfwubF-vZZL0HbcSH8,3193
35
+ sonusai/metrics/confusion_matrix_summary.py,sha256=eZFmt7k5Eanx7FYPwcfnzVq-AMYNSP03gfj9GtsfHWI,3841
36
+ sonusai/metrics/one_hot.py,sha256=jo-YsiDVr3PYoZTHhA0t24-VpSXEsZ4lukrP1xFbXAQ,13810
37
+ sonusai/metrics/snr_summary.py,sha256=sRUE6XI5PFFhaSpag537WUnukbMizOeMCwVQ2ww34Cw,5617
38
+ sonusai/mixture/__init__.py,sha256=vSpEjVuHP-OH5RIY6O_1GeA-311VVaNIjaZh_ddYnLM,5617
39
+ sonusai/mixture/audio.py,sha256=Tg0-HzSgZDttKwWwONqgK6qK4maTSmWcfiL1atZcIXQ,2390
40
+ sonusai/mixture/augmentation.py,sha256=QqAuM2mN5S4SkQtCg1eDSJEtGsxsa6MBYZ9IOGVBvxg,10372
41
+ sonusai/mixture/class_count.py,sha256=U_FiWBU_yH6_ZAmZRdeDKUQyS0fnTtyfJ2V1lgqkFls,605
42
+ sonusai/mixture/config.py,sha256=b7fyacZm2FVjcTYC1vOy5FLrxkuJRYR0KrrATcGwQK4,24658
43
+ sonusai/mixture/constants.py,sha256=fXcWuSI4YZOAuncLGEUeEW9WWNZeN-6mI8LFNILwyTc,1494
44
+ sonusai/mixture/data_io.py,sha256=s8DNUIwdgcgrpqP_JplgGB_zVBczgGVTtAZe2qqrtiI,4632
45
+ sonusai/mixture/datatypes.py,sha256=D20qe93HjXZ80R_lNgN3Pu4pZH2qTxf2ZmntknDGNrs,10355
46
+ sonusai/mixture/db_datatypes.py,sha256=kQG_-G3zOqixoHxcmTrZlmxdrWNr1VhUzYF5IKYi19c,1542
47
+ sonusai/mixture/eq_rule_is_valid.py,sha256=O3gCAs_0hpxENK5b7kxxpDmOpKHlXGBWuLGT_97ARSM,1210
48
+ sonusai/mixture/feature.py,sha256=q9QZRe1qfKvHOuDxLAbpdrQF1gIqL8NRvk4WYWZX2IU,2996
49
+ sonusai/mixture/generation.py,sha256=SMRYhTYmuUHejR894HDGdYSDOZvEdFobdfQPCTopB-E,40218
50
+ sonusai/mixture/helpers.py,sha256=1La31bTdXA5gFCHRqQWDk6vfNlpKG1AX2jqlhOnObqU,20834
51
+ sonusai/mixture/log_duration_and_sizes.py,sha256=qhgl87C2KbjxLdKEpjYOoqNL6rc-8-PB4R7Gx_7UG8g,1240
52
+ sonusai/mixture/mixdb.py,sha256=Qj_12vZ4_4CdKrkWdBa0ysBt9LSCgs6YdJoiH3L9YQ0,65790
53
+ sonusai/mixture/soundfile_audio.py,sha256=fAbNIW8Xi7tfglmrbOzMSLQIG0dZEqTjLxhPGUeFyFc,4073
54
+ sonusai/mixture/sox_audio.py,sha256=QnHToWYy1l9kKGIefciUf6CdcAphDiaWcHo4imukUgY,16522
55
+ sonusai/mixture/sox_augmentation.py,sha256=txaoSdbLdizrzBbdV0LYleM7lxitV3h27_C68Fij1GY,4506
56
+ sonusai/mixture/spectral_mask.py,sha256=U9XJ_SAoI9b67K_3SE7bNw6U8cPGFOBttaZAxMjA_Jc,2042
57
+ sonusai/mixture/target_class_balancing.py,sha256=B8h9fqdvzZgcpsi-Z91GBEi4YSZJj0uMGzNCeGQk7RU,4313
58
+ sonusai/mixture/targets.py,sha256=QmhidE9wWU4oeumtUTHrndlcl0cG8_TiIzltnPwfBOg,6397
59
+ sonusai/mixture/tokenized_shell_vars.py,sha256=lXTzUDutuBWGV1zIsqeIxWmy-eKm0Vx1y8-iLdsL1gQ,4921
60
+ sonusai/mixture/torchaudio_audio.py,sha256=169VXKEoOHc5nyiHJwaihkcQ_a1ZH_O-nnk9Gq4JtaQ,2887
61
+ sonusai/mixture/torchaudio_augmentation.py,sha256=68QpJ4JcAH2fsL4qVck7VvBv615UbCBPEnNe4HMvA_8,3844
62
+ sonusai/mixture/truth.py,sha256=IJ9BHrBOMlFS8UGlxuw0-Uao0kHX4mDCmcKXxDs0Lzs,2342
63
+ sonusai/mixture/truth_functions/__init__.py,sha256=MEjNPOCd-cXLHpN09GuMv_DPIVWLLI6uhxsiKoaj6JM,1285
64
+ sonusai/mixture/truth_functions/crm.py,sha256=mE0q7ax37ZUMVFRi5EW3uqFB8v50WkLB4tgPYdCPjAc,3212
65
+ sonusai/mixture/truth_functions/datatypes.py,sha256=fbh16Q4Du4uCpQE_VGp8r3rhZTaRGK3glkkDmJ9BqPY,1285
66
+ sonusai/mixture/truth_functions/energy.py,sha256=4uYBEaU-lPnmzJGROa77qWizDMNW2M-c7imOqApmXJE,5451
67
+ sonusai/mixture/truth_functions/file.py,sha256=1jw7C3d45NXh4AZYfycyZm1aipX4xgEkpS-GZGgdJ6Y,1339
68
+ sonusai/mixture/truth_functions/phoneme.py,sha256=5DbELzq6CDKmjvVndQzq_RzqOLCL7oBqiMEsAuak_UU,855
69
+ sonusai/mixture/truth_functions/sed.py,sha256=2Aq_q5POrGbfpBCPFbuXY7LTjiVi5-gNHHr6-mezZqs,3471
70
+ sonusai/mixture/truth_functions/target.py,sha256=FaIlsQUqEpP7pokCVQ_x19SLKzRT0Ncw4rgbv0e4_dw,3664
71
+ sonusai/mkwav.py,sha256=FOq2PC1SXzIE2v4g3fHifpGCwcghZehbldRhO4V5-_c,4145
72
+ sonusai/onnx_predict.py,sha256=Y1VUN0wuvloEW46uxg7X4ywaec_Xx92djCU3BP0KAx0,8699
73
+ sonusai/queries/__init__.py,sha256=bhoeOFfu9GA5DOUuxRrIev7MYdXaGN8xdKJ6BXyNNtQ,277
74
+ sonusai/queries/queries.py,sha256=u8tZCJVnw9wJlwc03ue63FFqD5CHAKe4PWLJk0z4rto,7589
75
+ sonusai/speech/__init__.py,sha256=vqAymCBPjMUSM4OZKHTai6BYwXsOBlf_G_vOhELVf8I,133
76
+ sonusai/speech/l2arctic.py,sha256=Gon9OPAtcI0kItQ87wuyrvNqDpo_Lad3N2lOXyLSwkY,3631
77
+ sonusai/speech/librispeech.py,sha256=ugP3NVOenSsBF1cUG4Nyl7dumGHQmE4Ugk1yYjtOyj4,3070
78
+ sonusai/speech/mcgill.py,sha256=sgPHEZTPHlFXF8GVYFfKXMUEyBikfKha2RWOPfpNy_U,1981
79
+ sonusai/speech/textgrid.py,sha256=WvsUeamKoYyXBNnNnZgug-xfTiq2Z6RnFc1u0swmqNw,2813
80
+ sonusai/speech/timit.py,sha256=B1DZCS5Crt9Y54drqVqurhEiOMUZGxVOW7gxzZA5ErY,4099
81
+ sonusai/speech/types.py,sha256=4eKVPAktpkIrZ2qoVp2iT45zxTVNocQEGT6O_Zlub_w,214
82
+ sonusai/speech/vctk.py,sha256=WInvRRRkZCW6t_NcZAJffJzgCbyetal-j2w0kKX5SDw,1527
83
+ sonusai/speech/voxceleb.py,sha256=Uu1kB1krf8hess1yuvGbYfV_VgYhklEyoz4I7KfrVpw,2658
84
+ sonusai/summarize_metric_spenh.py,sha256=2HBFfVlezIoz18DTBi5rLyUqWy4QL87OXahMaJ6NDUQ,1818
85
+ sonusai/utils/__init__.py,sha256=BAo0J8ZqDHiTb6dpfBu0KrueCtsSlrtOZ9Kstst3kBk,2342
86
+ sonusai/utils/asl_p56.py,sha256=cPUVwXawF7vLJgs4zUtoRGk7Wdbe5KKti_-v_8xIU10,3862
87
+ sonusai/utils/asr.py,sha256=20eKkatNqGrbxFQoyeCRzqXaSH3lcA-9ZXGneCXjLvs,2791
88
+ sonusai/utils/asr_functions/__init__.py,sha256=HKGRm_c48tcxlfwqH63m-MvhAoK_pCcw76lxmFmiP_U,63
89
+ sonusai/utils/asr_functions/aaware_whisper.py,sha256=MHH238wj6arkQUMDHqHjc4DDVY2sXzu6iGi3ISswTeM,2129
90
+ sonusai/utils/audio_devices.py,sha256=Z_xstSy8KeJ2xtzHJGmrIHyCiNMgpdpJd5T-19u8dGU,1421
91
+ sonusai/utils/braced_glob.py,sha256=uvxo7USbxH6cWuVdNeGrz1SbZISFR1gPGbpy0EWm3m8,1645
92
+ sonusai/utils/calculate_input_shape.py,sha256=TIa_rHW3VIvOhlv5Wa3orcWFPMT-a3EPrIFHstbrmo4,906
93
+ sonusai/utils/compress.py,sha256=tT983XlgRf9bzutCegtHzkfKHK5LZD6a_mMFP47FGMI,605
94
+ sonusai/utils/convert_string_to_number.py,sha256=cktFu6Jp1jifi3F6j1WIlVhEoeiCzaw4JlI43dBg9WI,213
95
+ sonusai/utils/create_timestamp.py,sha256=s7ALOX3MAyK3EOX2BVOiYTIcspsKlIM6zXJk2cbFkz0,148
96
+ sonusai/utils/create_ts_name.py,sha256=3xu10hbZkV18sDqK4oR1JYvXeYE53ufzddmvGYx83Vg,405
97
+ sonusai/utils/dataclass_from_dict.py,sha256=iUagjF7CzbDIBKTX4ktd7EXn8q1jxmiRClMH3fu2_oA,389
98
+ sonusai/utils/db.py,sha256=lI77MJJLs4CTYxhjFUvBom2Kk2imAP34okOeO4irbDc,371
99
+ sonusai/utils/docstring.py,sha256=nJEwaVlOuSEZiSH34zwNdfGNxrY0NLCTZ7VoLzc9-cU,1447
100
+ sonusai/utils/energy_f.py,sha256=4gN5mNGuHgCgUbnOQ9ReYkDqNvE2pueoYBYWE-GBQPU,1457
101
+ sonusai/utils/engineering_number.py,sha256=SToFf6S0Xu0NtAJ1SjsVH4wxylH7qK8S9TBkPa15opY,5510
102
+ sonusai/utils/get_frames_per_batch.py,sha256=xnq4tV7MT74N0H6b5ZsiAezqdXucboCLQw1Np9XpZbs,134
103
+ sonusai/utils/get_label_names.py,sha256=df4jZVaQ3WnYQqNj21iUV4aYWyQEZUNmgs93qKW-_rA,820
104
+ sonusai/utils/grouper.py,sha256=qyZ0nj84yOrC-RZsXHC-KJvcUliGktnV8S6-P3PD6_w,203
105
+ sonusai/utils/human_readable_size.py,sha256=CmLxq9y5f-OPkwTEu4Gm0Io-H6p4SRhD2ciJRuYXfWk,293
106
+ sonusai/utils/max_text_width.py,sha256=pxiJMwb_zlkNntexgo7S6lAuF7NLLZvFdOCkxdsQJVY,315
107
+ sonusai/utils/model_utils.py,sha256=OIJBhOjxR0wpxsd7A2r6J2AjqfdYgZzi6UEThw4S1lI,828
108
+ sonusai/utils/numeric_conversion.py,sha256=iFPXFU8C_1mW5tmDqHq8-xP1tL8nVaSmhQRakdCqy30,328
109
+ sonusai/utils/onnx_utils.py,sha256=mbat4xvLhKMSvMC2kTEc4ukc7PI_gjsG4IVGvzpcOd4,5764
110
+ sonusai/utils/parallel.py,sha256=eT8PNtELUekjk8EXWgJEhaMwXuQe4T8CNSklCj4ejo8,2205
111
+ sonusai/utils/path_info.py,sha256=QY7iQ0nYpeEDnPN9RyPh4DsgYmVYsLrrlAzKuzkqX1o,118
112
+ sonusai/utils/print_mixture_details.py,sha256=Wh5M0aTy3NzxvR3P0HDlSq_S3G9L7LoQx-FnDCrHODs,2911
113
+ sonusai/utils/ranges.py,sha256=-TtAR0Vg_j4kYtJOvEOYQllBZEat_KfUKsfRxr5oj-o,1235
114
+ sonusai/utils/read_predict_data.py,sha256=PUSroxmWQGtr6_EcdSHmIFQoRGou8CKKqcggWylfTqQ,1056
115
+ sonusai/utils/reshape.py,sha256=Ozuh3UlmAS5NCeOK7NR8KgcQacHvgq10pys0VfCnOPU,5746
116
+ sonusai/utils/seconds_to_hms.py,sha256=9Ya9O97txFtTIXZUQw1K8g7b7Xx-ptvUtMUlzsIduTo,260
117
+ sonusai/utils/stacked_complex.py,sha256=JW6iAa1C-4Tuh4dD5c-D-O-yo-OY5Xm0AKVU0YsqsJU,2782
118
+ sonusai/utils/stratified_shuffle_split.py,sha256=d7WLUirywSvgZWkt_5a0F8YvTnJjuXlyPB5CRmHCN0U,6704
119
+ sonusai/utils/write_audio.py,sha256=0lKdaX57N6H-UWdioqmXCJMjwT1eBz5B-bSGqDvloAc,838
120
+ sonusai/utils/yes_or_no.py,sha256=0h1okjXmDNbJp7rZJFR2V-HFU1GJDm3YFTUVmYExkOU,263
121
+ sonusai/vars.py,sha256=kBBzuvC8szmdIZEEDA7XXmD765addZKdM2aFipeGO1w,933
122
+ sonusai-0.19.5.dist-info/METADATA,sha256=dSLUrr68uSSXvEz2NHkQM9vtfDqWm9yprz12fRLG6zY,2536
123
+ sonusai-0.19.5.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
124
+ sonusai-0.19.5.dist-info/entry_points.txt,sha256=zMNjEphEPO6B3cD1GNpit7z-yA9tUU5-j3W2v-UWstU,92
125
+ sonusai-0.19.5.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.0
2
+ Generator: poetry-core 1.9.1
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any