nkululeko 0.93.0__py3-none-any.whl → 0.93.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nkululeko/constants.py CHANGED
@@ -1,2 +1,2 @@
1
- VERSION="0.93.0"
1
+ VERSION="0.93.2"
2
2
  SAMPLING_RATE = 16000
nkululeko/experiment.py CHANGED
@@ -197,6 +197,8 @@ class Experiment:
197
197
  )
198
198
  self.df_test = self._import_csv(storage_test)
199
199
  self.df_train = self._import_csv(storage_train)
200
+ self.train_empty = True if self.df_train.shape[0] == 0 else False
201
+ self.test_empty = True if self.df_test.shape[0] == 0 else False
200
202
  else:
201
203
  self.df_train, self.df_test = pd.DataFrame(), pd.DataFrame()
202
204
  for d in self.datasets.values():
@@ -212,6 +214,8 @@ class Experiment:
212
214
  self.util.debug(f"warn: {d.name} test empty")
213
215
  self.df_test = pd.concat([self.df_test, d.df_test])
214
216
  self.util.copy_flags(d, self.df_test)
217
+ self.train_empty = True if self.df_train.shape[0] == 0 else False
218
+ self.test_empty = True if self.df_test.shape[0] == 0 else False
215
219
  store = self.util.get_path("store")
216
220
  storage_test = f"{store}testdf.csv"
217
221
  storage_train = f"{store}traindf.csv"
@@ -253,50 +257,49 @@ class Experiment:
253
257
  if self.util.exp_is_classification():
254
258
  datatype = self.util.config_val("DATA", "type", "dummy")
255
259
  if datatype == "continuous":
256
- # if self.df_test.is_labeled:
257
- # # remember the target in case they get labelencoded later
258
- # self.df_test["class_label"] = self.df_test[self.target]
259
- test_cats = self.df_test["class_label"].unique()
260
- # else:
261
- # # if there is no target, copy a dummy label
262
- # self.df_test = self._add_random_target(self.df_test)
263
- # if self.df_train.is_labeled:
264
- # # remember the target in case they get labelencoded later
265
- # self.df_train["class_label"] = self.df_train[self.target]
266
- train_cats = self.df_train["class_label"].unique()
267
-
268
- else:
269
- if self.df_test.is_labeled:
270
- test_cats = self.df_test[self.target].unique()
271
- else:
272
- # if there is no target, copy a dummy label
273
- self.df_test = self._add_random_target(self.df_test).astype("str")
274
- train_cats = self.df_train[self.target].unique()
275
- # print(f"df_train: {pd.DataFrame(self.df_train[self.target])}")
276
- # print(f"train_cats with target {self.target}: {train_cats}")
277
- if self.df_test.is_labeled:
278
- if type(test_cats) == np.ndarray:
279
- self.util.debug(f"Categories test (nd.array): {test_cats}")
280
- else:
281
- self.util.debug(f"Categories test (list): {list(test_cats)}")
282
- if type(train_cats) == np.ndarray:
283
- self.util.debug(f"Categories train (nd.array): {train_cats}")
260
+ if not self.test_empty:
261
+ test_cats = self.df_test["class_label"].unique()
262
+ if not self.train_empty:
263
+ train_cats = self.df_train["class_label"].unique()
284
264
  else:
285
- self.util.debug(f"Categories train (list): {list(train_cats)}")
286
-
265
+ if not self.test_empty:
266
+ if self.df_test.is_labeled:
267
+ test_cats = self.df_test[self.target].unique()
268
+ else:
269
+ # if there is no target, copy a dummy label
270
+ self.df_test = self._add_random_target(self.df_test).astype(
271
+ "str"
272
+ )
273
+ if not self.train_empty:
274
+ train_cats = self.df_train[self.target].unique()
287
275
  # encode the labels as numbers
288
276
  self.label_encoder = LabelEncoder()
289
- self.df_train[self.target] = self.label_encoder.fit_transform(
290
- self.df_train[self.target]
291
- )
292
- self.df_test[self.target] = self.label_encoder.transform(
293
- self.df_test[self.target]
294
- )
295
277
  glob_conf.set_label_encoder(self.label_encoder)
278
+ if not self.train_empty:
279
+ if type(train_cats) == np.ndarray:
280
+ self.util.debug(f"Categories train (nd.array): {train_cats}")
281
+ else:
282
+ self.util.debug(f"Categories train (list): {list(train_cats)}")
283
+
284
+ self.df_train[self.target] = self.label_encoder.fit_transform(
285
+ self.df_train[self.target]
286
+ )
287
+ if not self.test_empty:
288
+ if self.df_test.is_labeled:
289
+ if type(test_cats) == np.ndarray:
290
+ self.util.debug(f"Categories test (nd.array): {test_cats}")
291
+ else:
292
+ self.util.debug(f"Categories test (list): {list(test_cats)}")
293
+ if not self.train_empty:
294
+ self.df_test[self.target] = self.label_encoder.transform(
295
+ self.df_test[self.target]
296
+ )
296
297
  if self.got_speaker:
298
+ speakers_train = 0 if self.train_empty else self.df_train.speaker.nunique()
299
+ speakers_test = 0 if self.test_empty else self.df_test.speaker.nunique()
297
300
  self.util.debug(
298
- f"{self.df_test.speaker.nunique()} speakers in test and"
299
- f" {self.df_train.speaker.nunique()} speakers in train"
301
+ f"{speakers_test} speakers in test and"
302
+ f" {speakers_train} speakers in train"
300
303
  )
301
304
 
302
305
  target_factor = self.util.config_val("DATA", "target_divide_by", False)
@@ -363,14 +366,16 @@ class Experiment:
363
366
  self.util.debug("no feature extractor specified.")
364
367
  self.feats_train, self.feats_test = pd.DataFrame(), pd.DataFrame()
365
368
  return
366
- self.feature_extractor = FeatureExtractor(
367
- df_train, feats_types, feats_name, "train"
368
- )
369
- self.feats_train = self.feature_extractor.extract()
370
- self.feature_extractor = FeatureExtractor(
371
- df_test, feats_types, feats_name, "test"
372
- )
373
- self.feats_test = self.feature_extractor.extract()
369
+ if not self.train_empty:
370
+ self.feature_extractor = FeatureExtractor(
371
+ df_train, feats_types, feats_name, "train"
372
+ )
373
+ self.feats_train = self.feature_extractor.extract()
374
+ if not self.test_empty:
375
+ self.feature_extractor = FeatureExtractor(
376
+ df_test, feats_types, feats_name, "test"
377
+ )
378
+ self.feats_test = self.feature_extractor.extract()
374
379
  self.util.debug(
375
380
  f"All features: train shape : {self.feats_train.shape}, test"
376
381
  f" shape:{self.feats_test.shape}"
@@ -393,12 +398,6 @@ class Experiment:
393
398
  self.util.warn(f"new test labels shape: {self.df_test.shape[0]}")
394
399
 
395
400
  self._check_scale()
396
- # store = self.util.get_path("store")
397
- # store_format = self.util.config_val("FEATS", "store_format", "pkl")
398
- # storage = f"{store}test_feats.{store_format}"
399
- # self.util.write_store(self.feats_test, storage, store_format)
400
- # storage = f"{store}train_feats.{store_format}"
401
- # self.util.write_store(self.feats_train, storage, store_format)
402
401
 
403
402
  def augment(self):
404
403
  """Augment the selected samples."""
@@ -435,7 +434,9 @@ class Experiment:
435
434
  f"unknown augmentation selection specifier {sample_selection},"
436
435
  " should be [all | train | test]"
437
436
  )
438
- targets = self.util.config_val_list("PREDICT", "targets", ["gender"])
437
+ targets = self.util.config_val_list("PREDICT", "targets", None)
438
+ if targets is None:
439
+ self.util.error("no prediction target specified")
439
440
  for target in targets:
440
441
  if target == "speaker":
441
442
  from nkululeko.autopredict.ap_sid import SIDPredictor
nkululeko/explore.py CHANGED
@@ -1,5 +1,4 @@
1
- """
2
- Explore the feature sets of a machine learning experiment.
1
+ """Explore the feature sets of a machine learning experiment.
3
2
 
4
3
  This script is the entry point for the 'explore' module of the nkululeko framework.
5
4
  It handles loading the experiment configuration, setting up the experiment, and
@@ -77,7 +76,6 @@ def main():
77
76
  plot_feats = eval(util.config_val("EXPL", "feature_distributions", "False"))
78
77
  tsne = eval(util.config_val("EXPL", "tsne", "False"))
79
78
  scatter = eval(util.config_val("EXPL", "scatter", "False"))
80
- spotlight = eval(util.config_val("EXPL", "spotlight", "False"))
81
79
  shap = eval(util.config_val("EXPL", "shap", "False"))
82
80
  model_type = util.config_val("EXPL", "model", False)
83
81
  plot_tree = eval(util.config_val("EXPL", "plot_tree", "False"))
@@ -87,8 +85,8 @@ def main():
87
85
  expr.extract_feats()
88
86
  needs_feats = True
89
87
  # explore
90
- expr.init_runmanager()
91
- expr.runmgr.do_runs()
88
+ # expr.init_runmanager()
89
+ # expr.runmgr.do_runs()
92
90
  expr.analyse_features(needs_feats)
93
91
  expr.store_report()
94
92
  print("DONE")
nkululeko/plots.py CHANGED
@@ -8,6 +8,8 @@ from scipy import stats
8
8
  import seaborn as sns
9
9
  from sklearn.manifold import TSNE
10
10
 
11
+ from audmetric import concordance_cc as ccc
12
+
11
13
  import nkululeko.glob_conf as glob_conf
12
14
  from nkululeko.reporting.defines import Header
13
15
  from nkululeko.reporting.report_item import ReportItem
@@ -239,28 +241,54 @@ class Plots:
239
241
 
240
242
  def _plot2cont_cat(self, df, cont1, cont2, cat, ylab):
241
243
  """Plot relation of two continuous distributions with one categorical."""
244
+ if cont2 == "class_label":
245
+ df.rename(columns={cont2: self.target})
246
+ cont2 = self.target
247
+ if cont1 == "class_label":
248
+ df.rename(columns={cont1: self.target})
249
+ cont1 = self.target
250
+ if cat == "class_label":
251
+ df.rename(columns={cat: self.target})
252
+ cat = self.target
242
253
  pearson = stats.pearsonr(df[cont1], df[cont2])
243
254
  # trunc to three digits
244
255
  pearson = int(pearson[0] * 1000) / 1000
245
256
  pearson_string = f"PCC: {pearson}"
257
+ ccc_val = ccc(df[cont1], df[cont2])
258
+ ccc_val = int(ccc_val * 1000) / 1000
259
+ ccc_string = f"CCC: {ccc_val}"
246
260
  ax = sns.lmplot(data=df, x=cont1, y=cont2, hue=cat)
247
- caption = f"{ylab} {df.shape[0]}. {pearson_string}"
261
+ caption = f"{ylab} {df.shape[0]}. {pearson_string} {ccc_string}"
248
262
  ax.figure.suptitle(caption)
249
263
  return ax, caption
250
264
 
251
265
  def _plot2cont(self, df, col1, col2, ylab):
252
266
  """Plot relation of two continuous distributions."""
267
+ # rename "class_label" to the original target
268
+ if col2 == "class_label":
269
+ df.rename(columns={col2: self.target})
270
+ col2 = self.target
271
+ if col1 == "class_label":
272
+ df.rename(columns={col1: self.target})
273
+ col1 = self.target
253
274
  pearson = stats.pearsonr(df[col1], df[col2])
254
275
  # trunc to three digits
255
276
  pearson = int(pearson[0] * 1000) / 1000
256
277
  pearson_string = f"PCC: {pearson}"
278
+ ccc_val = ccc(df[cont1], df[cont2])
279
+ ccc_val = int(ccc_val * 1000) / 1000
280
+ ccc_string = f"CCC: {ccc_val}"
257
281
  ax = sns.lmplot(data=df, x=col1, y=col2)
258
- caption = f"{ylab} {df.shape[0]}. {pearson_string}"
282
+ caption = f"{ylab} {df.shape[0]}. {pearson_string} {ccc_string}"
259
283
  ax.figure.suptitle(caption)
260
284
  return ax, caption
261
285
 
262
286
  def plotcatcont(self, df, cat_col, cont_col, xlab, ylab):
263
287
  """Plot relation of categorical distribution with continuous."""
288
+ # rename "class_label" to the original target
289
+ if cat_col == "class_label":
290
+ df.rename(columns={cat_col: self.target})
291
+ cat_col = self.target
264
292
  dist_type = self.util.config_val("EXPL", "dist_type", "kde")
265
293
  cats, cat_str, es = su.get_effect_size(df, cat_col, cont_col)
266
294
  model_type = self.util.get_model_type()
nkululeko/utils/util.py CHANGED
@@ -226,11 +226,18 @@ class Util:
226
226
  return self.config["DATA"]["target"]
227
227
 
228
228
  def get_model_type(self):
229
- return self.config["MODEL"]["type"]
229
+ try:
230
+ return self.config["MODEL"]["type"]
231
+ except KeyError:
232
+ return ""
230
233
 
231
234
  def get_model_description(self):
232
235
  mt = ""
233
- mt = f'{self.config["MODEL"]["type"]}'
236
+ try:
237
+ mt = f'{self.config["MODEL"]["type"]}'
238
+ except KeyError:
239
+ # no model type given
240
+ pass
234
241
  # ft = "_".join(ast.literal_eval(self.config["FEATS"]["type"]))
235
242
  ft_value = self.config["FEATS"]["type"]
236
243
  if (
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: nkululeko
3
- Version: 0.93.0
3
+ Version: 0.93.2
4
4
  Summary: Machine learning audio prediction experiments based on templates
5
5
  Home-page: https://github.com/felixbur/nkululeko
6
6
  Author: Felix Burkhardt
@@ -100,22 +100,22 @@ And can show the distribution of specific features per category:
100
100
  <img src="meta/images/feat_dist.png" width="500px"/>
101
101
 
102
102
  ### t-SNE plots
103
- A t-SNE plot can give you an estimate wether your acoustic features are useful at all:
103
+ A t-SNE plot can give you an estimate of whether your acoustic features are useful at all:
104
104
 
105
105
  <img src="meta/images/tsne.png" width="500px"/>
106
106
 
107
107
  ### Data distribution
108
- Sometimes you only want to take a look at your data:
108
+ Sometimes, you only want to take a look at your data:
109
109
 
110
110
  <img src="meta/images/data_plot.png" width="500px"/>
111
111
 
112
112
  ### Bias checking
113
- In cases you might wonder if there's bias in your data. You can try to detect this with automatically estimated speech properties, by visualizing the correlation of target label and predicted labels.
113
+ In some cases, you might wonder if there's bias in your data. You can try to detect this with automatically estimated speech properties by visualizing the correlation of target labels and predicted labels.
114
114
 
115
115
  <img src="meta/images/emotion-pesq.png" width="500px"/>
116
116
 
117
117
  ### Uncertainty
118
- Nkululeko estimates uncertainty of model decision (only for classifiers) with entropy over the class-probabilities or logits per sample.
118
+ Nkululeko estimates the uncertainty of model decisions (only for classifiers) with entropy over the class probabilities or logits per sample.
119
119
 
120
120
  <img src="meta/images/uncertainty.png" width="500px"/>
121
121
 
@@ -138,7 +138,7 @@ appears, please try
138
138
  ```
139
139
  pip install x
140
140
  ```
141
- For many packages you will need the missing torch package.
141
+ For many packages, you will need the missing torch package.
142
142
  If you don't have a GPU (which is probably true if you don't know what that is), please use
143
143
  ```
144
144
  pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
@@ -219,7 +219,7 @@ All of them take *--config <my_config.ini>* as an argument.
219
219
  * **nkululeko.predict**: [predict features](http://blog.syntheticspeech.de/2023/08/16/nkululeko-how-to-predict-labels-for-your-data-from-existing-models-and-check-them/) like SNR, MOS, arousal/valence, age/gender, with DNN models
220
220
  * **nkululeko.segment**: [segment a database](http://blog.syntheticspeech.de/2023/07/14/nkululeko-segmenting-a-database/) based on VAD (voice activity detection)
221
221
  * **nkululeko.resample**: check on all [sampling rates and change](http://blog.syntheticspeech.de/2023/08/31/how-to-fix-different-sampling-rates-in-a-dataset-with-nkululeko/) to 16kHz
222
- * **nkululeko.nkuluflag**: a convenient module to specify configuration parameters on the command-line. Usage:
222
+ * **nkululeko.nkuluflag**: a convenient module to specify configuration parameters on the command line. Usage:
223
223
 
224
224
  ```bash
225
225
  $ python -m nkululeko.nkuluflag.py [-h] [--config CONFIG] [--data [DATA ...]] [--label [LABEL ...]] [--tuning_params [TUNING_PARAMS ...]] [--layers [LAYERS ...]] [--model MODEL] [--feat FEAT] [--set SET] [--with_os WITH_OS] [--target TARGET] [--epochs EPOCHS] [--runs RUNS] [--learning_rate LEARNING_RATE] [--drop DROP]
@@ -236,7 +236,7 @@ There's my [blog](http://blog.syntheticspeech.de/?s=nkululeko) with tutorials:
236
236
  * [Combine feature sets](http://blog.syntheticspeech.de/2022/06/30/how-to-combine-feature-sets-with-nkululeko/)
237
237
  * [Classifying continuous variables](http://blog.syntheticspeech.de/2022/01/26/nkululeko-classifying-continuous-variables/)
238
238
  * [Try out / demo a trained model](http://blog.syntheticspeech.de/2022/01/24/nkululeko-try-out-demo-a-trained-model/)
239
- * [Perform cross database experiments](http://blog.syntheticspeech.de/2021/10/05/nkululeko-perform-cross-database-experiments/)
239
+ * [Perform cross-database experiments](http://blog.syntheticspeech.de/2021/10/05/nkululeko-perform-cross-database-experiments/)
240
240
  * [Meta parameter optimization](http://blog.syntheticspeech.de/2021/09/03/perform-optimization-with-nkululeko/)
241
241
  * [How to set up wav2vec embedding](http://blog.syntheticspeech.de/2021/12/03/how-to-set-up-wav2vec-embedding-for-nkululeko/)
242
242
  * [How to soft-label a database](http://blog.syntheticspeech.de/2022/01/24/how-to-soft-label-a-database-with-nkululeko/)
@@ -261,7 +261,7 @@ There's my [blog](http://blog.syntheticspeech.de/?s=nkululeko) with tutorials:
261
261
  * [Predict new labels for your data from public models and check bias](http://blog.syntheticspeech.de/2023/08/16/nkululeko-how-to-predict-labels-for-your-data-from-existing-models-and-check-them/)
262
262
  * [Resample](http://blog.syntheticspeech.de/2023/08/31/how-to-fix-different-sampling-rates-in-a-dataset-with-nkululeko/)
263
263
  * [Get some statistics on correlation and effect-size](http://blog.syntheticspeech.de/2023/09/05/nkululeko-get-some-statistics-on-correlation-and-effect-size/)
264
- * [Automatic generation of a latex / pdf report](http://blog.syntheticspeech.de/2023/09/26/nkululeko-generate-a-latex-pdf-report/)
264
+ * [Automatic generation of a latex/pdf report](http://blog.syntheticspeech.de/2023/09/26/nkululeko-generate-a-latex-pdf-report/)
265
265
  * [Inspect your data with Spotlight](http://blog.syntheticspeech.de/2023/10/31/nkululeko-inspect-your-data-with-spotlight/)
266
266
  * [Automatically stratify your split sets](http://blog.syntheticspeech.de/2023/11/07/nkululeko-automatically-stratify-your-split-sets/)
267
267
  * [re-name data column names](http://blog.syntheticspeech.de/2023/11/16/nkululeko-re-name-data-column-names/)
@@ -277,7 +277,7 @@ There's my [blog](http://blog.syntheticspeech.de/?s=nkululeko) with tutorials:
277
277
  * NEW: [Here's a Google colab that runs this example out-of-the-box](https://colab.research.google.com/drive/1Up7t5Nn7VwDPCCEpTg2U7cpZ_PdoEgj-?usp=sharing), and here is the same [with Kaggle](https://www.kaggle.com/felixburk/nkululeko-hello-world-example)
278
278
  * [I made a video to show you how to do this on Windows](https://www.youtube.com/playlist?list=PLRceVavtxLg0y2jiLmpnUfiMtfvkK912D)
279
279
  * Set up Python on your computer, version >= 3.8
280
- * Open a terminal/commandline/console window
280
+ * Open a terminal/command line/console window
281
281
  * Test python by typing ```python```, python should start with version >3 (NOT 2!). You can leave the Python Interpreter by typing *exit()*
282
282
  * Create a folder on your computer for this example, let's call it `nkulu_work`
283
283
  * Get a copy of the [Berlin emodb in audformat](https://zenodo.org/records/7447302/files/emodb.zip?download=1) and unpack inside the folder you just created (`nkulu_work`)
@@ -293,7 +293,7 @@ There's my [blog](http://blog.syntheticspeech.de/?s=nkululeko) with tutorials:
293
293
  * if that worked, you should see a ```(venv)``` in front of your prompt
294
294
  * Install the required packages in your environment
295
295
  * ```pip install nkululeko```
296
- * Repeat until all error messages vanished (or fix them, or try to ignore them)...
296
+ * Repeat until all error messages vanish (or fix them, or try to ignore them)...
297
297
  * Now you should have two folders in your *nkulu_work* folder:
298
298
  * *emodb* and *venv*
299
299
  * Download a copy of the file [exp_emodb.ini](meta/demos/exp_emodb.ini) to the current working directory (```nkulu_work```)
@@ -301,9 +301,9 @@ There's my [blog](http://blog.syntheticspeech.de/?s=nkululeko) with tutorials:
301
301
  * ```python -m nkululeko.nkululeko --config exp_emodb.ini```
302
302
  * Find the results in the newly created folder exp_emodb
303
303
  * Inspect ```exp_emodb/images/run_0/emodb_xgb_os_0_000_cnf.png```
304
- * This is the main result of you experiment: a confusion matrix for the emodb emotional categories
304
+ * This is the main result of your experiment: a confusion matrix for the emodb emotional categories
305
305
  * Inspect and play around with the [demo configuration file](meta/demos/exp_emodb.ini) that defined your experiment, then re-run.
306
- * There are many ways to experiment with different classifiers and acoustic features sets, [all described here](https://github.com/felixbur/nkululeko/blob/main/ini_file.md)
306
+ * There are many ways to experiment with different classifiers and acoustic feature sets, [all described here](https://github.com/felixbur/nkululeko/blob/main/ini_file.md)
307
307
 
308
308
  ### Features
309
309
  The framework is targeted at the speech domain and supports experiments where different classifiers are combined with different feature extractors.
@@ -327,16 +327,16 @@ Here's [an animation that shows the progress of classification done with nkulule
327
327
 
328
328
 
329
329
  ## License
330
- Nkululeko can be used under the [MIT license](https://choosealicense.com/licenses/mit/)
330
+ Nkululeko can be used under the [MIT license](https://choosealicense.com/licenses/mit/).
331
331
 
332
332
 
333
333
  ## Contributing
334
- Contributions are welcome and encouraged. To learn more about how to contribute to nkululeko please refer to the [Contributing guidelines](./CONTRIBUTING.md)
334
+ Contributions are welcome and encouraged. To learn more about how to contribute to nkululeko, please refer to the [Contributing guidelines](./CONTRIBUTING.md).
335
335
 
336
336
  ## Citing
337
- If you use it, please mention the Nkululeko paper
337
+ If you use it, please mention the Nkululeko paper:
338
338
 
339
- F. Burkhardt, Johannes Wagner, Hagen Wierstorf, Florian Eyben and Björn Schuller: Nkululeko: A Tool For Rapid Speaker Characteristics Detection, Proc. Proc. LREC, 2022
339
+ > F. Burkhardt, Johannes Wagner, Hagen Wierstorf, Florian Eyben and Björn Schuller: Nkululeko: A Tool For Rapid Speaker Characteristics Detection, Proc. Proc. LREC, 2022
340
340
 
341
341
 
342
342
  ```
@@ -355,6 +355,14 @@ F. Burkhardt, Johannes Wagner, Hagen Wierstorf, Florian Eyben and Björn Schulle
355
355
  Changelog
356
356
  =========
357
357
 
358
+ Version 0.93.2
359
+ --------------
360
+ * changed class_label in plots to actual target
361
+
362
+ Version 0.93.1
363
+ --------------
364
+ * made explore module more robust
365
+
358
366
  Version 0.93.0
359
367
  --------------
360
368
  * integrated pyannote for speaker prediction for predict module
@@ -2,14 +2,14 @@ nkululeko/__init__.py,sha256=62f8HiEzJ8rG2QlTFJXUCMpvuH3fKI33DoJSj33mscc,63
2
2
  nkululeko/aug_train.py,sha256=FoMbBrfyOZd4QAw7oIHl3X6-UpsqAKWVDIolCA7qOWs,3196
3
3
  nkululeko/augment.py,sha256=3RzaxB3gRxovgJVjHXi0glprW01J7RaHhUkqotW2T3U,2955
4
4
  nkululeko/cacheddataset.py,sha256=XFpWZmbJRg0pvhnIgYf0TkclxllD-Fctu-Ol0PF_00c,969
5
- nkululeko/constants.py,sha256=YWEAJOJi8kWTdeRVHLVLQH66lH0czOfFjt-jbpnGmwY,39
5
+ nkululeko/constants.py,sha256=BbVRqe9PVLlKhJNVX9kRT-tO_jL8AU7O9uhFeSzq9sw,39
6
6
  nkululeko/demo-ft.py,sha256=iD9Pzp9QjyAv31q1cDZ75vPez7Ve8A4Cfukv5yfZdrQ,770
7
7
  nkululeko/demo.py,sha256=4Yzhg6pCPBYPGJrP7JX2TysVosl_R1llpVDKc2P_gUA,4955
8
8
  nkululeko/demo_feats.py,sha256=BvZjeNFTlERIRlq34OHM4Z96jdDQAhB01BGQAUcX9dM,2026
9
9
  nkululeko/demo_predictor.py,sha256=lDF-xOxRdEAclOmbepAYg-BQXQdGkHfq2n74PTIoop8,4872
10
10
  nkululeko/ensemble.py,sha256=71V-rre61H3J4sh7lu-OTo4I2_g7mm_rQxwW1ARDHgY,12782
11
- nkululeko/experiment.py,sha256=uU_8WR8JuUD50lgcl_K_BBQYmHMbuwAniWft8bGHuDU,31842
12
- nkululeko/explore.py,sha256=Y5lPPychnI-7fyP8zvwVb9P09fvprbUPOofOppuABYQ,3658
11
+ nkululeko/experiment.py,sha256=0xe_mrGtO6q8HF6dZ7slXca7BvSoyIh6j61U9mtcS_o,31785
12
+ nkululeko/explore.py,sha256=FPM2CS-LKgcDV-LnjYlD6pEv7HuCQpH_C3KyyiOCdk4,3589
13
13
  nkululeko/export.py,sha256=U-V4acxtuL6qKt6oAsVcM5TTeWogYUJ3GU-lA6rq6d4,4336
14
14
  nkululeko/feature_extractor.py,sha256=UnspIWz3XrNhKnBBhWZkH2bHvD-sROtrQVqB1JvkUyw,4088
15
15
  nkululeko/file_checker.py,sha256=xJY0Q6w47pnmgJVK5rcAKPYBrCpV7eBT4_3YBzTx-H8,3454
@@ -20,7 +20,7 @@ nkululeko/modelrunner.py,sha256=lJy-xM4QfDDWeL0dLTE_VIb4sYrnd_Z_yJRK3wwohQA,1119
20
20
  nkululeko/multidb.py,sha256=sO6OwJn8sn1-C-ig3thsIL8QMWHdV9SnJhDodKjeKrI,6876
21
21
  nkululeko/nkuluflag.py,sha256=PGWSmZz-PiiHLgcZJAoGOI_Y-sZDVI1ksB8p5r7riWM,3725
22
22
  nkululeko/nkululeko.py,sha256=M7baIq2nAoi6dEoBL4ATEuqAs5U1fvl_hyqAl5DybAQ,2040
23
- nkululeko/plots.py,sha256=dK3jVwsZufqXgHwAvDYt6uDg_KYk5cfxlP1Fo8kb9HA,23935
23
+ nkululeko/plots.py,sha256=5V64_dPusl6t4p2yO6mAa253ydB1T8qtDx2eERKfGqI,25078
24
24
  nkululeko/predict.py,sha256=MLnHEyFmSiHLLs-HDczag8Vu3zKF5T1rXLKdZZJ6py8,2083
25
25
  nkululeko/resample.py,sha256=akSAjJ3qn-O5NAyLJHVHdsK7MUZPGaZUvM2TwMSmj2M,5194
26
26
  nkululeko/runmanager.py,sha256=AswmORVUkCIH0gTx6zEyufvFATQBS8C5TXo2erSNdVg,7611
@@ -111,10 +111,10 @@ nkululeko/segmenting/seg_silero.py,sha256=ulodnvtRq5MLHDxy_RmAK4tJg6h1d-mPq-uCPF
111
111
  nkululeko/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
112
112
  nkululeko/utils/files.py,sha256=SrrYaU7AB80MZHiV1jcB0h_zigvYLYgSVNTXV4ao38g,4593
113
113
  nkululeko/utils/stats.py,sha256=vCRzhCR0Gx5SiJyAGbj1TIto8ocGz58CM5Pr3LltagA,2948
114
- nkululeko/utils/util.py,sha256=XFZdhCc_LM4EmoZ5tKKaBCQLXclcNmvHwhfT_CXB98c,16723
115
- nkululeko-0.93.0.dist-info/LICENSE,sha256=0zGP5B_W35yAcGfHPS18Q2B8UhvLRY3dQq1MhpsJU_U,1076
116
- nkululeko-0.93.0.dist-info/METADATA,sha256=3q74htqBYa_dfgtZzah5SPDwjG3o2c9sfGBlJK9sfI4,41933
117
- nkululeko-0.93.0.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
118
- nkululeko-0.93.0.dist-info/entry_points.txt,sha256=lNTkFEdh6Kjo5o95ZAWf_0Lq-4ztGoAoMVSDuPtuyS0,442
119
- nkululeko-0.93.0.dist-info/top_level.txt,sha256=DPFNNSHPjUeVKj44dVANAjuVGRCC3MusJ08lc2a8xFA,10
120
- nkululeko-0.93.0.dist-info/RECORD,,
114
+ nkululeko/utils/util.py,sha256=wFDslqxpCVDwi6LBakIFDDy1kYsxt5G7ykE38CocmtA,16880
115
+ nkululeko-0.93.2.dist-info/LICENSE,sha256=0zGP5B_W35yAcGfHPS18Q2B8UhvLRY3dQq1MhpsJU_U,1076
116
+ nkululeko-0.93.2.dist-info/METADATA,sha256=Dyhm_lOM-nr_GSqThzGnOLscolalOux9O4j6Dwoqa_c,42097
117
+ nkululeko-0.93.2.dist-info/WHEEL,sha256=R06PA3UVYHThwHvxuRWMqaGcr-PuniXahwjmQRFMEkY,91
118
+ nkululeko-0.93.2.dist-info/entry_points.txt,sha256=lNTkFEdh6Kjo5o95ZAWf_0Lq-4ztGoAoMVSDuPtuyS0,442
119
+ nkululeko-0.93.2.dist-info/top_level.txt,sha256=DPFNNSHPjUeVKj44dVANAjuVGRCC3MusJ08lc2a8xFA,10
120
+ nkululeko-0.93.2.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.3.0)
2
+ Generator: setuptools (75.5.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5