nkululeko 0.86.1__py3-none-any.whl → 0.86.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nkululeko/constants.py CHANGED
@@ -1,2 +1,2 @@
1
- VERSION="0.86.1"
1
+ VERSION="0.86.3"
2
2
  SAMPLING_RATE = 16000
@@ -35,6 +35,7 @@ class ImportSet(Featureset):
35
35
  if not os.path.isfile(feat_import_file):
36
36
  self.util.error(f"no import file: {feat_import_file}")
37
37
  df = audformat.utils.read_csv(feat_import_file)
38
+ df = self.util.make_segmented_index(df)
38
39
  df = df[df.index.isin(self.data_df.index)]
39
40
  feat_df = pd.concat([feat_df, df])
40
41
  if feat_df.shape[0] == 0:
@@ -14,12 +14,12 @@ from nkululeko.losses.loss_softf1loss import SoftF1Loss
14
14
 
15
15
 
16
16
  class MLP_model(Model):
17
- """MLP = multi layer perceptron"""
17
+ """MLP = multi layer perceptron."""
18
18
 
19
19
  is_classifier = True
20
20
 
21
21
  def __init__(self, df_train, df_test, feats_train, feats_test):
22
- """Constructor taking the configuration and all dataframes"""
22
+ """Constructor taking the configuration and all dataframes."""
23
23
  super().__init__(df_train, df_test, feats_train, feats_test)
24
24
  super().set_model_type("ann")
25
25
  self.name = "mlp"
@@ -1,5 +1,6 @@
1
1
  """Code based on @jwagner."""
2
2
 
3
+ import ast
3
4
  import dataclasses
4
5
  import json
5
6
  import os
@@ -51,8 +52,9 @@ class TunedModel(BaseModel):
51
52
  self.batch_size = int(self.util.config_val("MODEL", "batch_size", "8"))
52
53
  self.util.debug(f"batch size: {self.batch_size}")
53
54
  self.learning_rate = float(
54
- self.util.config_val("MODEL", "learning_rate", 0.0001)
55
+ self.util.config_val("MODEL", "learning_rate", "0.0001")
55
56
  )
57
+ self.max_duration = float(self.util.config_val("MODEL", "max_duration", "8.0"))
56
58
  self.df_train, self.df_test = df_train, df_test
57
59
  self.epoch_num = int(self.util.config_val("EXP", "epochs", 1))
58
60
  drop = self.util.config_val("MODEL", "drop", False)
@@ -60,6 +62,7 @@ class TunedModel(BaseModel):
60
62
  if drop:
61
63
  self.drop = float(drop)
62
64
  self.util.debug(f"init: training with dropout: {self.drop}")
65
+ self.push = eval(self.util.config_val("MODEL", "push_to_hub", "False"))
63
66
  self._init_model()
64
67
 
65
68
  def _init_model(self):
@@ -68,7 +71,7 @@ class TunedModel(BaseModel):
68
71
  "MODEL", "pretrained_model", model_path)
69
72
  self.num_layers = None
70
73
  self.sampling_rate = 16000
71
- self.max_duration_sec = 8.0
74
+ self.max_duration_sec = self.max_duration
72
75
  self.accumulation_steps = 4
73
76
 
74
77
  # print finetuning information via debug
@@ -130,6 +133,11 @@ class TunedModel(BaseModel):
130
133
  tokenizer = transformers.Wav2Vec2CTCTokenizer("./vocab.json")
131
134
  tokenizer.save_pretrained(".")
132
135
 
136
+ # uoload tokenizer to hub if true
137
+ if self.push:
138
+ tokenizer.push_to_hub(self.util.get_name())
139
+
140
+
133
141
  feature_extractor = transformers.Wav2Vec2FeatureExtractor(
134
142
  feature_size=1,
135
143
  sampling_rate=16000,
@@ -235,8 +243,8 @@ class TunedModel(BaseModel):
235
243
  def train(self):
236
244
  """Train the model."""
237
245
  model_root = self.util.get_path("model_dir")
238
- log_root = os.path.join(self.util.get_exp_dir(), "log")
239
- audeer.mkdir(log_root)
246
+ self.log_root = os.path.join(self.util.get_exp_dir(), "log")
247
+ audeer.mkdir(self.log_root)
240
248
  self.torch_root = audeer.path(model_root, "torch")
241
249
  conf_file = os.path.join(self.torch_root, "config.json")
242
250
  if os.path.isfile(conf_file):
@@ -244,20 +252,35 @@ class TunedModel(BaseModel):
244
252
  self.load(self.run, self.epoch_num)
245
253
  return
246
254
  targets = pd.DataFrame(self.dataset["train"]["targets"])
247
- counts = targets[0].value_counts().sort_index()
248
255
 
249
256
  if self.is_classifier:
250
- train_weights = 1 / counts
251
- train_weights /= train_weights.sum()
252
- self.util.debug(f"train weights: {train_weights}")
253
- criterion = torch.nn.CrossEntropyLoss(
254
- weight=torch.Tensor(train_weights).to("cuda"),
255
- )
257
+ criterion = self.util.config_val("MODEL", "loss", "cross")
258
+ if criterion == "cross":
259
+ if self.util.config_val("MODEL", "class_weight", False):
260
+ counts = targets[0].value_counts().sort_index()
261
+ train_weights = 1 / counts
262
+ train_weights /= train_weights.sum()
263
+ self.util.debug(f"train weights: {train_weights}")
264
+ criterion = torch.nn.CrossEntropyLoss(
265
+ weight=torch.Tensor(train_weights).to("cuda"),
266
+ )
267
+ else:
268
+ criterion = torch.nn.CrossEntropyLoss()
269
+ else:
270
+ self.util.error(f"criterion {criterion} not supported for classifier")
256
271
  else:
257
- criterion = ConcordanceCorCoeff()
272
+ self.criterion = self.util.config_val("MODEL", "loss", "ccc")
273
+ if criterion == "1-ccc":
274
+ criterion = ConcordanceCorCoeff()
275
+ elif criterion == "mse":
276
+ criterion = torch.nn.MSELoss()
277
+ elif criterion == "mae":
278
+ criterion = torch.nn.L1Loss()
279
+ else:
280
+ self.util.error(f"criterion {criterion} not supported for regressor")
258
281
 
259
282
  # set push_to_hub value, default false
260
- push = self.util.config_val("MODEL", "push_to_hub", False)
283
+ # push = eval(self.util.config_val("MODEL", "push_to_hub", "False"))
261
284
 
262
285
  class Trainer(transformers.Trainer):
263
286
  def compute_loss(
@@ -284,10 +307,20 @@ class TunedModel(BaseModel):
284
307
  num_steps = max(1, num_steps)
285
308
 
286
309
  metrics_for_best_model = self.measure.upper()
310
+ if metrics_for_best_model == "UAR":
311
+ greater_is_better = True
312
+ elif metrics_for_best_model == "CCC":
313
+ greater_is_better = True
314
+ elif metrics_for_best_model == "MSE":
315
+ greater_is_better = False
316
+ elif metrics_for_best_model == "MAE":
317
+ greater_is_better = False
318
+ else:
319
+ self.util.error(f"unknown metric/measure: {metrics_for_best_model}")
287
320
 
288
321
  training_args = transformers.TrainingArguments(
289
- output_dir=model_root,
290
- logging_dir=log_root,
322
+ output_dir=self.torch_root,
323
+ logging_dir=self.log_root,
291
324
  per_device_train_batch_size=self.batch_size,
292
325
  per_device_eval_batch_size=self.batch_size,
293
326
  gradient_accumulation_steps=self.accumulation_steps,
@@ -301,11 +334,11 @@ class TunedModel(BaseModel):
301
334
  learning_rate=self.learning_rate,
302
335
  save_total_limit=2,
303
336
  metric_for_best_model=metrics_for_best_model,
304
- greater_is_better=True,
337
+ greater_is_better=greater_is_better,
305
338
  load_best_model_at_end=True,
306
339
  remove_unused_columns=False,
307
340
  report_to="none",
308
- push_to_hub=push,
341
+ push_to_hub=self.push,
309
342
  hub_model_id=f"{self.util.get_name()}",
310
343
  )
311
344
 
@@ -319,8 +352,15 @@ class TunedModel(BaseModel):
319
352
  tokenizer=self.processor.feature_extractor,
320
353
  callbacks=[transformers.integrations.TensorBoardCallback()],
321
354
  )
355
+
322
356
  trainer.train()
323
- trainer.save_model(self.torch_root)
357
+ # trainer.save_model(self.torch_root)
358
+ log_file = os.path.join(
359
+ self.log_root,
360
+ "log.txt",
361
+ )
362
+ with open(log_file, "w") as text_file:
363
+ print(trainer.state.log_history, file=text_file)
324
364
  self.util.debug(f"saved best model to {self.torch_root}")
325
365
  self.load(self.run, self.epoch)
326
366
 
@@ -351,8 +391,30 @@ class TunedModel(BaseModel):
351
391
  self.run,
352
392
  self.epoch_num,
353
393
  )
394
+ self._plot_epoch_progression(report)
354
395
  return report
355
396
 
397
+ def _plot_epoch_progression(self, report):
398
+ log_file = os.path.join(
399
+ self.log_root,
400
+ "log.txt",
401
+ )
402
+ with open(log_file, "r") as file:
403
+ data = file.read()
404
+ list = ast.literal_eval(data)
405
+ epochs, vals, loss = [], [], []
406
+ for index, tp in enumerate(list):
407
+ try:
408
+ epochs.append(tp["epoch"])
409
+ measure = self.measure.upper()
410
+ vals.append(tp[f"eval_{measure}"])
411
+ loss.append(tp["eval_loss"])
412
+ except KeyError:
413
+ del epochs[-1]
414
+ # print(f'no value at {index}')
415
+ df = pd.DataFrame({"results": vals, "losses": loss}, index=epochs)
416
+ report.plot_epoch_progression_finetuned(df)
417
+
356
418
  def predict_sample(self, signal):
357
419
  """Predict one sample"""
358
420
  prediction = {}
@@ -462,7 +524,10 @@ class Model(Wav2Vec2PreTrainedModel):
462
524
  )
463
525
  outputs = torch.sum(hidden_states, dim=1)
464
526
  attention_sum = torch.sum(attention_mask, dim=1)
465
- outputs = outputs / torch.reshape(attention_sum, (-1, 1))
527
+
528
+ epsilon = 1e-6 # to avoid division by zero and numerical instability
529
+ outputs = outputs / (torch.reshape(attention_sum, (-1, 1)) +
530
+ epsilon)
466
531
 
467
532
  return outputs
468
533
 
nkululeko/plots.py CHANGED
@@ -1,21 +1,23 @@
1
1
  # plots.py
2
- import pandas as pd
2
+ import ast
3
+
3
4
  import matplotlib.pyplot as plt
4
- from sklearn.manifold import TSNE
5
- import seaborn as sns
6
5
  import numpy as np
7
- import ast
6
+ import pandas as pd
8
7
  from scipy import stats
9
- from nkululeko.utils.util import Util
10
- import nkululeko.utils.stats as su
8
+ import seaborn as sns
9
+ from sklearn.manifold import TSNE
10
+
11
11
  import nkululeko.glob_conf as glob_conf
12
- from nkululeko.reporting.report_item import ReportItem
13
12
  from nkululeko.reporting.defines import Header
13
+ from nkululeko.reporting.report_item import ReportItem
14
+ import nkululeko.utils.stats as su
15
+ from nkululeko.utils.util import Util
14
16
 
15
17
 
16
18
  class Plots:
17
19
  def __init__(self):
18
- """Initializing the util system"""
20
+ """Initializing the util system."""
19
21
  self.util = Util("plots")
20
22
  self.format = self.util.config_val("PLOT", "format", "png")
21
23
  self.target = self.util.config_val("DATA", "target", "emotion")
@@ -138,8 +140,7 @@ class Plots:
138
140
  df, att1, class_label, att1, type_s
139
141
  )
140
142
  else:
141
- ax, caption = self._plot2cont(
142
- df, class_label, att1, type_s)
143
+ ax, caption = self._plot2cont(df, class_label, att1, type_s)
143
144
  self._save_plot(
144
145
  ax,
145
146
  caption,
@@ -152,8 +153,7 @@ class Plots:
152
153
  att1 = att[0]
153
154
  att2 = att[1]
154
155
  if att1 == self.target or att2 == self.target:
155
- self.util.debug(
156
- f"no need to correlate {self.target} with itself")
156
+ self.util.debug(f"no need to correlate {self.target} with itself")
157
157
  return
158
158
  if att1 not in df:
159
159
  self.util.error(f"unknown feature: {att1}")
@@ -168,8 +168,7 @@ class Plots:
168
168
  if self.util.is_categorical(df[att1]):
169
169
  if self.util.is_categorical(df[att2]):
170
170
  # class_label = cat, att1 = cat, att2 = cat
171
- ax, caption = self._plot2cat(
172
- df, att1, att2, att1, type_s)
171
+ ax, caption = self._plot2cat(df, att1, att2, att1, type_s)
173
172
  else:
174
173
  # class_label = cat, att1 = cat, att2 = cont
175
174
  ax, caption = self._plotcatcont(
@@ -190,8 +189,7 @@ class Plots:
190
189
  if self.util.is_categorical(df[att1]):
191
190
  if self.util.is_categorical(df[att2]):
192
191
  # class_label = cont, att1 = cat, att2 = cat
193
- ax, caption = self._plot2cat(
194
- df, att1, att2, att1, type_s)
192
+ ax, caption = self._plot2cat(df, att1, att2, att1, type_s)
195
193
  else:
196
194
  # class_label = cont, att1 = cat, att2 = cont
197
195
  ax, caption = self._plot2cont_cat(
@@ -205,8 +203,7 @@ class Plots:
205
203
  )
206
204
  else:
207
205
  # class_label = cont, att1 = cont, att2 = cont
208
- ax, caption = self._plot2cont(
209
- df, att1, att2, type_s)
206
+ ax, caption = self._plot2cont(df, att1, att2, type_s)
210
207
 
211
208
  self._save_plot(
212
209
  ax, caption, f"Correlation of {att1} and {att2}", filename, type_s
@@ -238,8 +235,7 @@ class Plots:
238
235
  )
239
236
 
240
237
  def _check_binning(self, att, df):
241
- bin_reals_att = eval(self.util.config_val(
242
- "EXPL", f"{att}.bin_reals", "False"))
238
+ bin_reals_att = eval(self.util.config_val("EXPL", f"{att}.bin_reals", "False"))
243
239
  if bin_reals_att:
244
240
  self.util.debug(f"binning continuous variable {att} to categories")
245
241
  att_new = f"{att}_binned"
@@ -342,8 +338,7 @@ class Plots:
342
338
 
343
339
  def describe_df(self, name, df, target, filename):
344
340
  """Make a stacked barplot of samples and speakers per sex and target values. speaker, gender and target columns must be present"""
345
- fig_dir = self.util.get_path(
346
- "fig_dir") + "../" # one up because of the runs
341
+ fig_dir = self.util.get_path("fig_dir") + "../" # one up because of the runs
347
342
  sampl_num = df.shape[0]
348
343
  sex_col = "gender"
349
344
  if target == "gender":
@@ -392,8 +387,7 @@ class Plots:
392
387
  dim_num = int(self.util.config_val("EXPL", "scatter.dim", 2))
393
388
  # one up because of the runs
394
389
  fig_dir = self.util.get_path("fig_dir") + "../"
395
- sample_selection = self.util.config_val(
396
- "EXPL", "sample_selection", "all")
390
+ sample_selection = self.util.config_val("EXPL", "sample_selection", "all")
397
391
  filename = f"{label}_{self.util.get_feattype_name()}_{sample_selection}_{dimred_type}_{str(dim_num)}d"
398
392
  filename = f"{fig_dir}{filename}.{self.format}"
399
393
  self.util.debug(f"computing {dimred_type}, this might take a while...")
@@ -435,8 +429,7 @@ class Plots:
435
429
 
436
430
  if dim_num == 2:
437
431
  plot_data = np.vstack((data.T, labels)).T
438
- plot_df = pd.DataFrame(
439
- data=plot_data, columns=("Dim_1", "Dim_2", "label"))
432
+ plot_df = pd.DataFrame(data=plot_data, columns=("Dim_1", "Dim_2", "label"))
440
433
  # plt.tight_layout()
441
434
  ax = (
442
435
  sns.FacetGrid(plot_df, hue="label", height=6)
@@ -300,6 +300,23 @@ class Reporter:
300
300
  def get_result(self):
301
301
  return self.result
302
302
 
303
+ def plot_epoch_progression_finetuned(self, df):
304
+ plot_name_suggest = self.util.get_exp_name()
305
+ fig_dir = self.util.get_path("fig_dir")
306
+ plot_name = (
307
+ self.util.config_val("PLOT", "name", plot_name_suggest)
308
+ + "_epoch_progression"
309
+ )
310
+ ax = df.plot()
311
+ fig = ax.figure
312
+ plt.xlabel("epochs")
313
+ plt.ylabel(f"{self.MEASURE}")
314
+ plot_path = f"{fig_dir}{plot_name}.{self.format}"
315
+ plt.savefig(plot_path)
316
+ self.util.debug(f"plotted epoch progression to {plot_path}")
317
+ plt.close(fig)
318
+ fig.clear()
319
+
303
320
  def plot_epoch_progression(self, reports, out_name):
304
321
  fig_dir = self.util.get_path("fig_dir")
305
322
  results, losses, train_results, losses_eval = [], [], [], []
nkululeko/utils/util.py CHANGED
@@ -35,9 +35,9 @@ class Util:
35
35
  if has_config:
36
36
  try:
37
37
  import nkululeko.glob_conf as glob_conf
38
+
38
39
  self.config = glob_conf.config
39
- self.got_data_roots = self.config_val(
40
- "DATA", "root_folders", False)
40
+ self.got_data_roots = self.config_val("DATA", "root_folders", False)
41
41
  if self.got_data_roots:
42
42
  # if there is a global data rootfolder file, read from there
43
43
  if not os.path.isfile(self.got_data_roots):
@@ -116,8 +116,7 @@ class Util:
116
116
  )
117
117
  return default
118
118
  if not default in self.stopvals:
119
- self.debug(
120
- f"value for {key} not found, using default: {default}")
119
+ self.debug(f"value for {key} not found, using default: {default}")
121
120
  return default
122
121
 
123
122
  def set_config(self, config):
@@ -160,8 +159,8 @@ class Util:
160
159
  if len(df) == 0:
161
160
  return df
162
161
  if not isinstance(df.index, pd.MultiIndex):
163
- df.index = audformat.utils.to_segmented_index(
164
- df.index, allow_nat=False)
162
+ self.debug("converting to segmented index, this might take a while...")
163
+ df.index = audformat.utils.to_segmented_index(df.index, allow_nat=False)
165
164
  return df
166
165
 
167
166
  def _get_value_descript(self, section, name):
@@ -272,8 +271,7 @@ class Util:
272
271
  return self.config[section][key]
273
272
  except KeyError:
274
273
  if default not in self.stopvals:
275
- self.debug(
276
- f"value for {key} not found, using default: {default}")
274
+ self.debug(f"value for {key} not found, using default: {default}")
277
275
  return default
278
276
 
279
277
  def config_val_list(self, section, key, default):
@@ -281,8 +279,7 @@ class Util:
281
279
  return ast.literal_eval(self.config[section][key])
282
280
  except KeyError:
283
281
  if not default in self.stopvals:
284
- self.debug(
285
- f"value for {key} not found, using default: {default}")
282
+ self.debug(f"value for {key} not found, using default: {default}")
286
283
  return default
287
284
 
288
285
  def continuous_to_categorical(self, series):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: nkululeko
3
- Version: 0.86.1
3
+ Version: 0.86.3
4
4
  Summary: Machine learning audio prediction experiments based on templates
5
5
  Home-page: https://github.com/felixbur/nkululeko
6
6
  Author: Felix Burkhardt
@@ -334,6 +334,15 @@ F. Burkhardt, Johannes Wagner, Hagen Wierstorf, Florian Eyben and Björn Schulle
334
334
  Changelog
335
335
  =========
336
336
 
337
+ Version 0.86.3
338
+ --------------
339
+ * bugfixed: nan in finetuned model and double saving
340
+ * import features now get multiindex automatically
341
+
342
+ Version 0.86.2
343
+ --------------
344
+ * plots epoch progression for finetuned models now
345
+
337
346
  Version 0.86.1
338
347
  --------------
339
348
  * functionality to push to hub
@@ -2,7 +2,7 @@ nkululeko/__init__.py,sha256=62f8HiEzJ8rG2QlTFJXUCMpvuH3fKI33DoJSj33mscc,63
2
2
  nkululeko/aug_train.py,sha256=YhuZnS_WVWnun9G-M6g5n6rbRxoVREz6Zh7k6qprFNQ,3194
3
3
  nkululeko/augment.py,sha256=4MG0apTAG5RgkuJrYEjGgDdbodZWi_HweSPNI1JJ5QA,3051
4
4
  nkululeko/cacheddataset.py,sha256=lIJ6hUo5LoxSrzXtWV8mzwO7wRtUETWnOQ4ws2XfL1E,969
5
- nkululeko/constants.py,sha256=pZ3DZYgXdEpxfaj-mnI6q21TyYMa2QQG_sKa6CBxCCA,39
5
+ nkululeko/constants.py,sha256=2ysebEFzu3zwO0-FXWf2pBOs8XRLPmy718GFrZ2O9pU,39
6
6
  nkululeko/demo.py,sha256=8bl15Kitoesnz8oa8yrs52T6YCSOhWbbq9PnZ8Hj6D0,3232
7
7
  nkululeko/demo_feats.py,sha256=sAeGFojhEj9WEDFtG3SzPBmyYJWLF2rkbpp65m8Ujo4,2025
8
8
  nkululeko/demo_predictor.py,sha256=es56xbT8ifkS_vnrlb5NTZT54gNmeUtNlA4zVA_gnN8,4757
@@ -17,7 +17,7 @@ nkululeko/modelrunner.py,sha256=iCmfJxsS2UafcikjRdUqPQuqQMOYA-Ctr3et3HeNR3c,1045
17
17
  nkululeko/multidb.py,sha256=fG3VukEWP1vreVN4gB1IRXxwwg4jLftsSEYtu0o1f78,5634
18
18
  nkululeko/nkuluflag.py,sha256=PGWSmZz-PiiHLgcZJAoGOI_Y-sZDVI1ksB8p5r7riWM,3725
19
19
  nkululeko/nkululeko.py,sha256=Kn3s2E3yyH8cJ7z6lkMxrnqtCxTu7-qfe9Zr_ONTD5g,1968
20
- nkululeko/plots.py,sha256=nd9tF_61DyAx7oGZF8gTrHXazkgFjFe4eClxu1nQ_XU,23276
20
+ nkululeko/plots.py,sha256=C2mwQFK0Vxfl5ZM7CO87tULDoEf7G16ek0nU77bhOc4,23070
21
21
  nkululeko/predict.py,sha256=sF091sSSLnEWcISx9ZcULLie3tY5XeFsQJd6b3vrxFg,2409
22
22
  nkululeko/resample.py,sha256=2d9eao_0sLrGZ_KSl8OVKsPor3BkFrlmMhrpB9WelIs,4267
23
23
  nkululeko/runmanager.py,sha256=eTM1DNQKt1lxYhzt4vZyZluPXW9sWlIJHNQzex4lkJU,7624
@@ -55,7 +55,7 @@ nkululeko/feat_extract/feats_auddim.py,sha256=VlzKKXTXa5kjLgQBWyEFy-daIyU1SkOwCC
55
55
  nkululeko/feat_extract/feats_audmodel.py,sha256=VjBNgAoxsHJhwr6Kwt9CxX6SaCM4RK_OV-GU2W5-bhU,3187
56
56
  nkululeko/feat_extract/feats_clap.py,sha256=nR6eEIRdsMHcfmD1bNtt5WfDvkxKjvEbukSSrXHm-HU,3489
57
57
  nkululeko/feat_extract/feats_hubert.py,sha256=cLoUzSLjSYBkQnftjacSL7ES3O7Ysh_KrPYvZtLX_TU,5196
58
- nkululeko/feat_extract/feats_import.py,sha256=rj1p8lz19tCAC8hLzzZAwZ0M6gzwH3BzfabFUgal0yw,1622
58
+ nkululeko/feat_extract/feats_import.py,sha256=WiU5lCkJsmFNTDyPV0qIh8mJssa6bpgP7AYw_ClKfWM,1674
59
59
  nkululeko/feat_extract/feats_mld.py,sha256=Vvu7GZOkn7Vda8eIOXqHjg78zegkFe3vTUaCXyVM0eA,2021
60
60
  nkululeko/feat_extract/feats_mos.py,sha256=KXNt7QYEfxkvr6UyVhig2aWQBaIvovlrR4gPuP03gmo,4174
61
61
  nkululeko/feat_extract/feats_opensmile.py,sha256=g6ZsAxjjGGvGfrr5fngWC-NJ8E7CP1kYZwrlodZJzzU,4028
@@ -82,13 +82,13 @@ nkululeko/models/model_gmm.py,sha256=hZ9UO36KNf48qa3J-xkWIicIj9-TApmt21zNES2vEOs
82
82
  nkululeko/models/model_knn.py,sha256=KlnrJfwiVnmXZrAaYGFrKA2f5sznvTzSJQ8-5etOP0k,599
83
83
  nkululeko/models/model_knn_reg.py,sha256=j7YFfVm6xOR2d9yBYdQiwwqYfqkX0JynX_qLCvkr1fk,610
84
84
  nkululeko/models/model_lin_reg.py,sha256=0D7mSnSwK82lNWDMwHYRyq3FmGa6y-DHDGg4qUe85q4,422
85
- nkululeko/models/model_mlp.py,sha256=JtC83GYKtqCTW00rUm_xKSKjAsdMUAsqtnBfEFZBCwA,9854
85
+ nkululeko/models/model_mlp.py,sha256=xMirtYax3bLBz_0kkC0M4Rc6-KQY05NNKHQGw7rbum8,9856
86
86
  nkululeko/models/model_mlp_regression.py,sha256=PO5qyfjgAJH8hawhmeXDaUThyXDYdM642dQHkO0NY7c,10204
87
87
  nkululeko/models/model_svm.py,sha256=rsME3KvKvNG7bdE5lbvYUu85WZhaASZxxmdNDIVJRZ4,940
88
88
  nkululeko/models/model_svr.py,sha256=_YZeksqB3eBENGlg3g9RwYFlk9rQQ-XCeNBKLlGGVoE,725
89
89
  nkululeko/models/model_tree.py,sha256=rf16faUm4o2LJgkoYpeY998b8DQIvXZ73_m1IS3TnnE,417
90
90
  nkululeko/models/model_tree_reg.py,sha256=IgQcPTE-304HQLYSKPF8Z4ot_Ur9dH01fZjS0nXke_M,428
91
- nkululeko/models/model_tuned.py,sha256=eiSKFmObn9_VNTqF1lZvWbyyWxvhy1PVjOiIcs3YiGA,18379
91
+ nkululeko/models/model_tuned.py,sha256=RDcvcejBQNGY_uW00r22i7EDT6oKchS5uqFFnj0Gtzg,21146
92
92
  nkululeko/models/model_xgb.py,sha256=Thgx5ESdIok4v72mKh4plxpo4smGcKALWNCJTDScY0M,447
93
93
  nkululeko/models/model_xgr.py,sha256=aGBtNGLWjOE_2rICGYGFxmT8DtnHYsIl1lIpMtghHsY,418
94
94
  nkululeko/reporting/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -96,7 +96,7 @@ nkululeko/reporting/defines.py,sha256=IsY1YgKRMaABpylVKjBJgJ5bNCEbGCVA_E6pivraqS
96
96
  nkululeko/reporting/latex_writer.py,sha256=qiCRSmB4KOD_za4oHu5x-PhwjZohzfo8wecMOwlXZwc,1886
97
97
  nkululeko/reporting/report.py,sha256=W0rcigDdjBvxZQ3pZja_gvToILYvaZ1BFtnN2qFRfYI,1060
98
98
  nkululeko/reporting/report_item.py,sha256=siWeGNgo4bAE46YBMNcsdf3jTMTy76BO9Fi6DTvDig4,533
99
- nkululeko/reporting/reporter.py,sha256=eLqwKEUTQ7v5CedzhZP2617qmXGcvi0rjyyFLOBdxtQ,12841
99
+ nkululeko/reporting/reporter.py,sha256=NugmGmS3iwuBJ59jqyuTCKPRpiPLGhnz12z_nlVh69Y,13445
100
100
  nkululeko/reporting/result.py,sha256=nSN5or-Py2GPRWHkWpGRh7UCi1W0er7WLEHz8fYLk-A,742
101
101
  nkululeko/segmenting/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
102
102
  nkululeko/segmenting/seg_inaspeechsegmenter.py,sha256=pmLHuXsaqvcdYxB4PSW9l1mbQWZZBJFhi_CGabqydas,1947
@@ -104,9 +104,9 @@ nkululeko/segmenting/seg_silero.py,sha256=lLytS38KzARS17omwv8VBw-zz60RVSXGSvZ5Ev
104
104
  nkululeko/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
105
105
  nkululeko/utils/files.py,sha256=UiGAtZRWYjHSvlmPaTMtzyNNGE6qaLaxQkybctS7iRM,4021
106
106
  nkululeko/utils/stats.py,sha256=1yUq0FTOyqkU8TwUocJRYdJaqMU5SlOBBRUun9STo2M,2829
107
- nkululeko/utils/util.py,sha256=mK1MgO14NinrPhavJw72eR_2WN_kBKjVKiEJnzvdO1Q,13946
108
- nkululeko-0.86.1.dist-info/LICENSE,sha256=0zGP5B_W35yAcGfHPS18Q2B8UhvLRY3dQq1MhpsJU_U,1076
109
- nkululeko-0.86.1.dist-info/METADATA,sha256=LXoMlzo5QBzABv0fpIDvf4nYDjCJkRCZL1XmffikrRc,37088
110
- nkululeko-0.86.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
111
- nkululeko-0.86.1.dist-info/top_level.txt,sha256=DPFNNSHPjUeVKj44dVANAjuVGRCC3MusJ08lc2a8xFA,10
112
- nkululeko-0.86.1.dist-info/RECORD,,
107
+ nkululeko/utils/util.py,sha256=PcyAuCGgGuxjlv-e4JrVbpewiRTiAXWk47w5X0dVgx8,13930
108
+ nkululeko-0.86.3.dist-info/LICENSE,sha256=0zGP5B_W35yAcGfHPS18Q2B8UhvLRY3dQq1MhpsJU_U,1076
109
+ nkululeko-0.86.3.dist-info/METADATA,sha256=Nnb3gRWEI1DSqf8KpaD8CDqdkHyiKdv-j9HpN4jjeks,37305
110
+ nkululeko-0.86.3.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
111
+ nkululeko-0.86.3.dist-info/top_level.txt,sha256=DPFNNSHPjUeVKj44dVANAjuVGRCC3MusJ08lc2a8xFA,10
112
+ nkululeko-0.86.3.dist-info/RECORD,,