nkululeko 0.86.1__py3-none-any.whl → 0.86.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
nkululeko/constants.py CHANGED
@@ -1,2 +1,2 @@
1
- VERSION="0.86.1"
1
+ VERSION="0.86.2"
2
2
  SAMPLING_RATE = 16000
@@ -14,12 +14,12 @@ from nkululeko.losses.loss_softf1loss import SoftF1Loss
14
14
 
15
15
 
16
16
  class MLP_model(Model):
17
- """MLP = multi layer perceptron"""
17
+ """MLP = multi layer perceptron."""
18
18
 
19
19
  is_classifier = True
20
20
 
21
21
  def __init__(self, df_train, df_test, feats_train, feats_test):
22
- """Constructor taking the configuration and all dataframes"""
22
+ """Constructor taking the configuration and all dataframes."""
23
23
  super().__init__(df_train, df_test, feats_train, feats_test)
24
24
  super().set_model_type("ann")
25
25
  self.name = "mlp"
@@ -1,5 +1,6 @@
1
1
  """Code based on @jwagner."""
2
2
 
3
+ import ast
3
4
  import dataclasses
4
5
  import json
5
6
  import os
@@ -51,8 +52,9 @@ class TunedModel(BaseModel):
51
52
  self.batch_size = int(self.util.config_val("MODEL", "batch_size", "8"))
52
53
  self.util.debug(f"batch size: {self.batch_size}")
53
54
  self.learning_rate = float(
54
- self.util.config_val("MODEL", "learning_rate", 0.0001)
55
+ self.util.config_val("MODEL", "learning_rate", "0.0001")
55
56
  )
57
+ self.max_duration = float(self.util.config_val("MODEL", "max_duration", "8.0"))
56
58
  self.df_train, self.df_test = df_train, df_test
57
59
  self.epoch_num = int(self.util.config_val("EXP", "epochs", 1))
58
60
  drop = self.util.config_val("MODEL", "drop", False)
@@ -64,11 +66,10 @@ class TunedModel(BaseModel):
64
66
 
65
67
  def _init_model(self):
66
68
  model_path = "facebook/wav2vec2-large-robust-ft-swbd-300h"
67
- pretrained_model = self.util.config_val(
68
- "MODEL", "pretrained_model", model_path)
69
+ pretrained_model = self.util.config_val("MODEL", "pretrained_model", model_path)
69
70
  self.num_layers = None
70
71
  self.sampling_rate = 16000
71
- self.max_duration_sec = 8.0
72
+ self.max_duration_sec = self.max_duration
72
73
  self.accumulation_steps = 4
73
74
 
74
75
  # print finetuning information via debug
@@ -235,8 +236,8 @@ class TunedModel(BaseModel):
235
236
  def train(self):
236
237
  """Train the model."""
237
238
  model_root = self.util.get_path("model_dir")
238
- log_root = os.path.join(self.util.get_exp_dir(), "log")
239
- audeer.mkdir(log_root)
239
+ self.log_root = os.path.join(self.util.get_exp_dir(), "log")
240
+ audeer.mkdir(self.log_root)
240
241
  self.torch_root = audeer.path(model_root, "torch")
241
242
  conf_file = os.path.join(self.torch_root, "config.json")
242
243
  if os.path.isfile(conf_file):
@@ -244,20 +245,35 @@ class TunedModel(BaseModel):
244
245
  self.load(self.run, self.epoch_num)
245
246
  return
246
247
  targets = pd.DataFrame(self.dataset["train"]["targets"])
247
- counts = targets[0].value_counts().sort_index()
248
248
 
249
249
  if self.is_classifier:
250
- train_weights = 1 / counts
251
- train_weights /= train_weights.sum()
252
- self.util.debug(f"train weights: {train_weights}")
253
- criterion = torch.nn.CrossEntropyLoss(
254
- weight=torch.Tensor(train_weights).to("cuda"),
255
- )
250
+ criterion = self.util.config_val("MODEL", "loss", "cross")
251
+ if criterion == "cross":
252
+ if self.util.config_val("MODEL", "class_weight", False):
253
+ counts = targets[0].value_counts().sort_index()
254
+ train_weights = 1 / counts
255
+ train_weights /= train_weights.sum()
256
+ self.util.debug(f"train weights: {train_weights}")
257
+ criterion = torch.nn.CrossEntropyLoss(
258
+ weight=torch.Tensor(train_weights).to("cuda"),
259
+ )
260
+ else:
261
+ criterion = torch.nn.CrossEntropyLoss()
262
+ else:
263
+ self.util.error(f"criterion {criterion} not supported for classifier")
256
264
  else:
257
- criterion = ConcordanceCorCoeff()
265
+ self.criterion = self.util.config_val("MODEL", "loss", "ccc")
266
+ if criterion == "1-ccc":
267
+ criterion = ConcordanceCorCoeff()
268
+ elif criterion == "mse":
269
+ criterion = torch.nn.MSELoss()
270
+ elif criterion == "mae":
271
+ criterion = torch.nn.L1Loss()
272
+ else:
273
+ self.util.error(f"criterion {criterion} not supported for regressor")
258
274
 
259
275
  # set push_to_hub value, default false
260
- push = self.util.config_val("MODEL", "push_to_hub", False)
276
+ push = eval(self.util.config_val("MODEL", "push_to_hub", "False"))
261
277
 
262
278
  class Trainer(transformers.Trainer):
263
279
  def compute_loss(
@@ -284,10 +300,20 @@ class TunedModel(BaseModel):
284
300
  num_steps = max(1, num_steps)
285
301
 
286
302
  metrics_for_best_model = self.measure.upper()
303
+ if metrics_for_best_model == "UAR":
304
+ greater_is_better = True
305
+ elif metrics_for_best_model == "CCC":
306
+ greater_is_better = True
307
+ elif metrics_for_best_model == "MSE":
308
+ greater_is_better = False
309
+ elif metrics_for_best_model == "MAE":
310
+ greater_is_better = False
311
+ else:
312
+ self.util.error(f"unknown metric/measure: {metrics_for_best_model}")
287
313
 
288
314
  training_args = transformers.TrainingArguments(
289
315
  output_dir=model_root,
290
- logging_dir=log_root,
316
+ logging_dir=self.log_root,
291
317
  per_device_train_batch_size=self.batch_size,
292
318
  per_device_eval_batch_size=self.batch_size,
293
319
  gradient_accumulation_steps=self.accumulation_steps,
@@ -301,7 +327,7 @@ class TunedModel(BaseModel):
301
327
  learning_rate=self.learning_rate,
302
328
  save_total_limit=2,
303
329
  metric_for_best_model=metrics_for_best_model,
304
- greater_is_better=True,
330
+ greater_is_better=greater_is_better,
305
331
  load_best_model_at_end=True,
306
332
  remove_unused_columns=False,
307
333
  report_to="none",
@@ -319,8 +345,15 @@ class TunedModel(BaseModel):
319
345
  tokenizer=self.processor.feature_extractor,
320
346
  callbacks=[transformers.integrations.TensorBoardCallback()],
321
347
  )
348
+
322
349
  trainer.train()
323
350
  trainer.save_model(self.torch_root)
351
+ log_file = os.path.join(
352
+ self.log_root,
353
+ "log.txt",
354
+ )
355
+ with open(log_file, "w") as text_file:
356
+ print(trainer.state.log_history, file=text_file)
324
357
  self.util.debug(f"saved best model to {self.torch_root}")
325
358
  self.load(self.run, self.epoch)
326
359
 
@@ -351,8 +384,30 @@ class TunedModel(BaseModel):
351
384
  self.run,
352
385
  self.epoch_num,
353
386
  )
387
+ self._plot_epoch_progression(report)
354
388
  return report
355
389
 
390
+ def _plot_epoch_progression(self, report):
391
+ log_file = os.path.join(
392
+ self.log_root,
393
+ "log.txt",
394
+ )
395
+ with open(log_file, "r") as file:
396
+ data = file.read()
397
+ list = ast.literal_eval(data)
398
+ epochs, vals, loss = [], [], []
399
+ for index, tp in enumerate(list):
400
+ try:
401
+ epochs.append(tp["epoch"])
402
+ measure = self.measure.upper()
403
+ vals.append(tp[f"eval_{measure}"])
404
+ loss.append(tp["eval_loss"])
405
+ except KeyError:
406
+ del epochs[-1]
407
+ # print(f'no value at {index}')
408
+ df = pd.DataFrame({"results": vals, "losses": loss}, index=epochs)
409
+ report.plot_epoch_progression_finetuned(df)
410
+
356
411
  def predict_sample(self, signal):
357
412
  """Predict one sample"""
358
413
  prediction = {}
nkululeko/plots.py CHANGED
@@ -1,21 +1,23 @@
1
1
  # plots.py
2
- import pandas as pd
2
+ import ast
3
+
3
4
  import matplotlib.pyplot as plt
4
- from sklearn.manifold import TSNE
5
- import seaborn as sns
6
5
  import numpy as np
7
- import ast
6
+ import pandas as pd
8
7
  from scipy import stats
9
- from nkululeko.utils.util import Util
10
- import nkululeko.utils.stats as su
8
+ import seaborn as sns
9
+ from sklearn.manifold import TSNE
10
+
11
11
  import nkululeko.glob_conf as glob_conf
12
- from nkululeko.reporting.report_item import ReportItem
13
12
  from nkululeko.reporting.defines import Header
13
+ from nkululeko.reporting.report_item import ReportItem
14
+ import nkululeko.utils.stats as su
15
+ from nkululeko.utils.util import Util
14
16
 
15
17
 
16
18
  class Plots:
17
19
  def __init__(self):
18
- """Initializing the util system"""
20
+ """Initializing the util system."""
19
21
  self.util = Util("plots")
20
22
  self.format = self.util.config_val("PLOT", "format", "png")
21
23
  self.target = self.util.config_val("DATA", "target", "emotion")
@@ -138,8 +140,7 @@ class Plots:
138
140
  df, att1, class_label, att1, type_s
139
141
  )
140
142
  else:
141
- ax, caption = self._plot2cont(
142
- df, class_label, att1, type_s)
143
+ ax, caption = self._plot2cont(df, class_label, att1, type_s)
143
144
  self._save_plot(
144
145
  ax,
145
146
  caption,
@@ -152,8 +153,7 @@ class Plots:
152
153
  att1 = att[0]
153
154
  att2 = att[1]
154
155
  if att1 == self.target or att2 == self.target:
155
- self.util.debug(
156
- f"no need to correlate {self.target} with itself")
156
+ self.util.debug(f"no need to correlate {self.target} with itself")
157
157
  return
158
158
  if att1 not in df:
159
159
  self.util.error(f"unknown feature: {att1}")
@@ -168,8 +168,7 @@ class Plots:
168
168
  if self.util.is_categorical(df[att1]):
169
169
  if self.util.is_categorical(df[att2]):
170
170
  # class_label = cat, att1 = cat, att2 = cat
171
- ax, caption = self._plot2cat(
172
- df, att1, att2, att1, type_s)
171
+ ax, caption = self._plot2cat(df, att1, att2, att1, type_s)
173
172
  else:
174
173
  # class_label = cat, att1 = cat, att2 = cont
175
174
  ax, caption = self._plotcatcont(
@@ -190,8 +189,7 @@ class Plots:
190
189
  if self.util.is_categorical(df[att1]):
191
190
  if self.util.is_categorical(df[att2]):
192
191
  # class_label = cont, att1 = cat, att2 = cat
193
- ax, caption = self._plot2cat(
194
- df, att1, att2, att1, type_s)
192
+ ax, caption = self._plot2cat(df, att1, att2, att1, type_s)
195
193
  else:
196
194
  # class_label = cont, att1 = cat, att2 = cont
197
195
  ax, caption = self._plot2cont_cat(
@@ -205,8 +203,7 @@ class Plots:
205
203
  )
206
204
  else:
207
205
  # class_label = cont, att1 = cont, att2 = cont
208
- ax, caption = self._plot2cont(
209
- df, att1, att2, type_s)
206
+ ax, caption = self._plot2cont(df, att1, att2, type_s)
210
207
 
211
208
  self._save_plot(
212
209
  ax, caption, f"Correlation of {att1} and {att2}", filename, type_s
@@ -238,8 +235,7 @@ class Plots:
238
235
  )
239
236
 
240
237
  def _check_binning(self, att, df):
241
- bin_reals_att = eval(self.util.config_val(
242
- "EXPL", f"{att}.bin_reals", "False"))
238
+ bin_reals_att = eval(self.util.config_val("EXPL", f"{att}.bin_reals", "False"))
243
239
  if bin_reals_att:
244
240
  self.util.debug(f"binning continuous variable {att} to categories")
245
241
  att_new = f"{att}_binned"
@@ -342,8 +338,7 @@ class Plots:
342
338
 
343
339
  def describe_df(self, name, df, target, filename):
344
340
  """Make a stacked barplot of samples and speakers per sex and target values. speaker, gender and target columns must be present"""
345
- fig_dir = self.util.get_path(
346
- "fig_dir") + "../" # one up because of the runs
341
+ fig_dir = self.util.get_path("fig_dir") + "../" # one up because of the runs
347
342
  sampl_num = df.shape[0]
348
343
  sex_col = "gender"
349
344
  if target == "gender":
@@ -392,8 +387,7 @@ class Plots:
392
387
  dim_num = int(self.util.config_val("EXPL", "scatter.dim", 2))
393
388
  # one up because of the runs
394
389
  fig_dir = self.util.get_path("fig_dir") + "../"
395
- sample_selection = self.util.config_val(
396
- "EXPL", "sample_selection", "all")
390
+ sample_selection = self.util.config_val("EXPL", "sample_selection", "all")
397
391
  filename = f"{label}_{self.util.get_feattype_name()}_{sample_selection}_{dimred_type}_{str(dim_num)}d"
398
392
  filename = f"{fig_dir}{filename}.{self.format}"
399
393
  self.util.debug(f"computing {dimred_type}, this might take a while...")
@@ -435,8 +429,7 @@ class Plots:
435
429
 
436
430
  if dim_num == 2:
437
431
  plot_data = np.vstack((data.T, labels)).T
438
- plot_df = pd.DataFrame(
439
- data=plot_data, columns=("Dim_1", "Dim_2", "label"))
432
+ plot_df = pd.DataFrame(data=plot_data, columns=("Dim_1", "Dim_2", "label"))
440
433
  # plt.tight_layout()
441
434
  ax = (
442
435
  sns.FacetGrid(plot_df, hue="label", height=6)
@@ -300,6 +300,23 @@ class Reporter:
300
300
  def get_result(self):
301
301
  return self.result
302
302
 
303
+ def plot_epoch_progression_finetuned(self, df):
304
+ plot_name_suggest = self.util.get_exp_name()
305
+ fig_dir = self.util.get_path("fig_dir")
306
+ plot_name = (
307
+ self.util.config_val("PLOT", "name", plot_name_suggest)
308
+ + "_epoch_progression"
309
+ )
310
+ ax = df.plot()
311
+ fig = ax.figure
312
+ plt.xlabel("epochs")
313
+ plt.ylabel(f"{self.MEASURE}")
314
+ plot_path = f"{fig_dir}{plot_name}.{self.format}"
315
+ plt.savefig(plot_path)
316
+ self.util.debug(f"plotted epoch progression to {plot_path}")
317
+ plt.close(fig)
318
+ fig.clear()
319
+
303
320
  def plot_epoch_progression(self, reports, out_name):
304
321
  fig_dir = self.util.get_path("fig_dir")
305
322
  results, losses, train_results, losses_eval = [], [], [], []
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: nkululeko
3
- Version: 0.86.1
3
+ Version: 0.86.2
4
4
  Summary: Machine learning audio prediction experiments based on templates
5
5
  Home-page: https://github.com/felixbur/nkululeko
6
6
  Author: Felix Burkhardt
@@ -334,6 +334,10 @@ F. Burkhardt, Johannes Wagner, Hagen Wierstorf, Florian Eyben and Björn Schulle
334
334
  Changelog
335
335
  =========
336
336
 
337
+ Version 0.86.2
338
+ --------------
339
+ * plots epoch progression for finetuned models now
340
+
337
341
  Version 0.86.1
338
342
  --------------
339
343
  * functionality to push to hub
@@ -2,7 +2,7 @@ nkululeko/__init__.py,sha256=62f8HiEzJ8rG2QlTFJXUCMpvuH3fKI33DoJSj33mscc,63
2
2
  nkululeko/aug_train.py,sha256=YhuZnS_WVWnun9G-M6g5n6rbRxoVREz6Zh7k6qprFNQ,3194
3
3
  nkululeko/augment.py,sha256=4MG0apTAG5RgkuJrYEjGgDdbodZWi_HweSPNI1JJ5QA,3051
4
4
  nkululeko/cacheddataset.py,sha256=lIJ6hUo5LoxSrzXtWV8mzwO7wRtUETWnOQ4ws2XfL1E,969
5
- nkululeko/constants.py,sha256=pZ3DZYgXdEpxfaj-mnI6q21TyYMa2QQG_sKa6CBxCCA,39
5
+ nkululeko/constants.py,sha256=E9mXpvAI5IDamRRXgBlBH8XGTw1xEjEBzNibjhFPEFc,39
6
6
  nkululeko/demo.py,sha256=8bl15Kitoesnz8oa8yrs52T6YCSOhWbbq9PnZ8Hj6D0,3232
7
7
  nkululeko/demo_feats.py,sha256=sAeGFojhEj9WEDFtG3SzPBmyYJWLF2rkbpp65m8Ujo4,2025
8
8
  nkululeko/demo_predictor.py,sha256=es56xbT8ifkS_vnrlb5NTZT54gNmeUtNlA4zVA_gnN8,4757
@@ -17,7 +17,7 @@ nkululeko/modelrunner.py,sha256=iCmfJxsS2UafcikjRdUqPQuqQMOYA-Ctr3et3HeNR3c,1045
17
17
  nkululeko/multidb.py,sha256=fG3VukEWP1vreVN4gB1IRXxwwg4jLftsSEYtu0o1f78,5634
18
18
  nkululeko/nkuluflag.py,sha256=PGWSmZz-PiiHLgcZJAoGOI_Y-sZDVI1ksB8p5r7riWM,3725
19
19
  nkululeko/nkululeko.py,sha256=Kn3s2E3yyH8cJ7z6lkMxrnqtCxTu7-qfe9Zr_ONTD5g,1968
20
- nkululeko/plots.py,sha256=nd9tF_61DyAx7oGZF8gTrHXazkgFjFe4eClxu1nQ_XU,23276
20
+ nkululeko/plots.py,sha256=C2mwQFK0Vxfl5ZM7CO87tULDoEf7G16ek0nU77bhOc4,23070
21
21
  nkululeko/predict.py,sha256=sF091sSSLnEWcISx9ZcULLie3tY5XeFsQJd6b3vrxFg,2409
22
22
  nkululeko/resample.py,sha256=2d9eao_0sLrGZ_KSl8OVKsPor3BkFrlmMhrpB9WelIs,4267
23
23
  nkululeko/runmanager.py,sha256=eTM1DNQKt1lxYhzt4vZyZluPXW9sWlIJHNQzex4lkJU,7624
@@ -82,13 +82,13 @@ nkululeko/models/model_gmm.py,sha256=hZ9UO36KNf48qa3J-xkWIicIj9-TApmt21zNES2vEOs
82
82
  nkululeko/models/model_knn.py,sha256=KlnrJfwiVnmXZrAaYGFrKA2f5sznvTzSJQ8-5etOP0k,599
83
83
  nkululeko/models/model_knn_reg.py,sha256=j7YFfVm6xOR2d9yBYdQiwwqYfqkX0JynX_qLCvkr1fk,610
84
84
  nkululeko/models/model_lin_reg.py,sha256=0D7mSnSwK82lNWDMwHYRyq3FmGa6y-DHDGg4qUe85q4,422
85
- nkululeko/models/model_mlp.py,sha256=JtC83GYKtqCTW00rUm_xKSKjAsdMUAsqtnBfEFZBCwA,9854
85
+ nkululeko/models/model_mlp.py,sha256=xMirtYax3bLBz_0kkC0M4Rc6-KQY05NNKHQGw7rbum8,9856
86
86
  nkululeko/models/model_mlp_regression.py,sha256=PO5qyfjgAJH8hawhmeXDaUThyXDYdM642dQHkO0NY7c,10204
87
87
  nkululeko/models/model_svm.py,sha256=rsME3KvKvNG7bdE5lbvYUu85WZhaASZxxmdNDIVJRZ4,940
88
88
  nkululeko/models/model_svr.py,sha256=_YZeksqB3eBENGlg3g9RwYFlk9rQQ-XCeNBKLlGGVoE,725
89
89
  nkululeko/models/model_tree.py,sha256=rf16faUm4o2LJgkoYpeY998b8DQIvXZ73_m1IS3TnnE,417
90
90
  nkululeko/models/model_tree_reg.py,sha256=IgQcPTE-304HQLYSKPF8Z4ot_Ur9dH01fZjS0nXke_M,428
91
- nkululeko/models/model_tuned.py,sha256=eiSKFmObn9_VNTqF1lZvWbyyWxvhy1PVjOiIcs3YiGA,18379
91
+ nkululeko/models/model_tuned.py,sha256=xOoY5TROzzTVu3sDtlmEle3V1MAgpf8S3WxO9o4MzV4,20777
92
92
  nkululeko/models/model_xgb.py,sha256=Thgx5ESdIok4v72mKh4plxpo4smGcKALWNCJTDScY0M,447
93
93
  nkululeko/models/model_xgr.py,sha256=aGBtNGLWjOE_2rICGYGFxmT8DtnHYsIl1lIpMtghHsY,418
94
94
  nkululeko/reporting/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -96,7 +96,7 @@ nkululeko/reporting/defines.py,sha256=IsY1YgKRMaABpylVKjBJgJ5bNCEbGCVA_E6pivraqS
96
96
  nkululeko/reporting/latex_writer.py,sha256=qiCRSmB4KOD_za4oHu5x-PhwjZohzfo8wecMOwlXZwc,1886
97
97
  nkululeko/reporting/report.py,sha256=W0rcigDdjBvxZQ3pZja_gvToILYvaZ1BFtnN2qFRfYI,1060
98
98
  nkululeko/reporting/report_item.py,sha256=siWeGNgo4bAE46YBMNcsdf3jTMTy76BO9Fi6DTvDig4,533
99
- nkululeko/reporting/reporter.py,sha256=eLqwKEUTQ7v5CedzhZP2617qmXGcvi0rjyyFLOBdxtQ,12841
99
+ nkululeko/reporting/reporter.py,sha256=NugmGmS3iwuBJ59jqyuTCKPRpiPLGhnz12z_nlVh69Y,13445
100
100
  nkululeko/reporting/result.py,sha256=nSN5or-Py2GPRWHkWpGRh7UCi1W0er7WLEHz8fYLk-A,742
101
101
  nkululeko/segmenting/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
102
102
  nkululeko/segmenting/seg_inaspeechsegmenter.py,sha256=pmLHuXsaqvcdYxB4PSW9l1mbQWZZBJFhi_CGabqydas,1947
@@ -105,8 +105,8 @@ nkululeko/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
105
105
  nkululeko/utils/files.py,sha256=UiGAtZRWYjHSvlmPaTMtzyNNGE6qaLaxQkybctS7iRM,4021
106
106
  nkululeko/utils/stats.py,sha256=1yUq0FTOyqkU8TwUocJRYdJaqMU5SlOBBRUun9STo2M,2829
107
107
  nkululeko/utils/util.py,sha256=mK1MgO14NinrPhavJw72eR_2WN_kBKjVKiEJnzvdO1Q,13946
108
- nkululeko-0.86.1.dist-info/LICENSE,sha256=0zGP5B_W35yAcGfHPS18Q2B8UhvLRY3dQq1MhpsJU_U,1076
109
- nkululeko-0.86.1.dist-info/METADATA,sha256=LXoMlzo5QBzABv0fpIDvf4nYDjCJkRCZL1XmffikrRc,37088
110
- nkululeko-0.86.1.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
111
- nkululeko-0.86.1.dist-info/top_level.txt,sha256=DPFNNSHPjUeVKj44dVANAjuVGRCC3MusJ08lc2a8xFA,10
112
- nkululeko-0.86.1.dist-info/RECORD,,
108
+ nkululeko-0.86.2.dist-info/LICENSE,sha256=0zGP5B_W35yAcGfHPS18Q2B8UhvLRY3dQq1MhpsJU_U,1076
109
+ nkululeko-0.86.2.dist-info/METADATA,sha256=DmmpMrftBptpWqx7h9US7_4mvMIQbZ5ugzv_4kyBjkM,37170
110
+ nkululeko-0.86.2.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
111
+ nkululeko-0.86.2.dist-info/top_level.txt,sha256=DPFNNSHPjUeVKj44dVANAjuVGRCC3MusJ08lc2a8xFA,10
112
+ nkululeko-0.86.2.dist-info/RECORD,,