nkululeko 0.74.2__py3-none-any.whl → 0.74.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nkululeko/augment.py +7 -3
- nkululeko/augmenting/augmenter.py +1 -2
- nkululeko/constants.py +1 -1
- nkululeko/demo_predictor.py +7 -0
- nkululeko/experiment.py +21 -3
- nkululeko/feat_extract/feats_praat.py +24 -38
- nkululeko/feat_extract/feats_wav2vec2.py +1 -1
- nkululeko/feat_extract/feinberg_praat.py +14 -12
- nkululeko/models/model_mlp.py +3 -0
- {nkululeko-0.74.2.dist-info → nkululeko-0.74.6.dist-info}/METADATA +18 -2
- {nkululeko-0.74.2.dist-info → nkululeko-0.74.6.dist-info}/RECORD +14 -14
- {nkululeko-0.74.2.dist-info → nkululeko-0.74.6.dist-info}/LICENSE +0 -0
- {nkululeko-0.74.2.dist-info → nkululeko-0.74.6.dist-info}/WHEEL +0 -0
- {nkululeko-0.74.2.dist-info → nkululeko-0.74.6.dist-info}/top_level.txt +0 -0
nkululeko/augment.py
CHANGED
@@ -53,17 +53,21 @@ def main(src_dir):
|
|
53
53
|
got_one = False
|
54
54
|
if augmentings:
|
55
55
|
augmentings = ast.literal_eval(augmentings)
|
56
|
-
|
56
|
+
results = []
|
57
57
|
if "traditional" in augmentings:
|
58
|
-
|
58
|
+
df1 = expr.augment()
|
59
|
+
results.append(df1)
|
59
60
|
got_one = True
|
60
61
|
if "random_splice" in augmentings:
|
61
|
-
|
62
|
+
df2 = expr.random_splice()
|
63
|
+
results.append(df2)
|
62
64
|
got_one = True
|
63
65
|
if not augmentings:
|
64
66
|
util.error("no augmentation selected")
|
65
67
|
if not got_one:
|
66
68
|
util.error(f"invalid augmentation(s): {augmentings}")
|
69
|
+
df_ret = pd.DataFrame()
|
70
|
+
df_ret = pd.concat(results)
|
67
71
|
# remove encoded labels
|
68
72
|
target = util.config_val("DATA", "target", "emotion")
|
69
73
|
if "class_label" in df_ret.columns:
|
@@ -30,10 +30,9 @@ class Augmenter:
|
|
30
30
|
self.audioment = Compose(
|
31
31
|
[
|
32
32
|
AddGaussianNoise(min_amplitude=0.001, max_amplitude=0.015, p=0.5),
|
33
|
-
# AddGaussianSNR(min_snr_db=10, max_snr_db=40, p=0.5),
|
34
33
|
TimeStretch(min_rate=0.8, max_rate=1.25, p=0.5),
|
35
34
|
PitchShift(min_semitones=-4, max_semitones=4, p=0.5),
|
36
|
-
Shift(
|
35
|
+
Shift(p=0.5),
|
37
36
|
]
|
38
37
|
)
|
39
38
|
|
nkululeko/constants.py
CHANGED
@@ -1,2 +1,2 @@
|
|
1
|
-
VERSION="0.74.
|
1
|
+
VERSION="0.74.6"
|
2
2
|
SAMPLING_RATE = 16000
|
nkululeko/demo_predictor.py
CHANGED
@@ -42,6 +42,13 @@ class Demo_predictor:
|
|
42
42
|
|
43
43
|
def predict_signal(self, signal, sr):
|
44
44
|
features = self.feature_extractor.extract_sample(signal, sr)
|
45
|
+
scale_feats = self.util.config_val("FEATS", "scale", False)
|
46
|
+
if scale_feats:
|
47
|
+
from sklearn.preprocessing import StandardScaler
|
48
|
+
|
49
|
+
scaler = StandardScaler()
|
50
|
+
features = scaler.fit_transform(features)
|
51
|
+
features = np.nan_to_num(features)
|
45
52
|
result_dict = self.model.predict_sample(features)
|
46
53
|
keys = result_dict.keys()
|
47
54
|
if self.label_encoder is not None:
|
nkululeko/experiment.py
CHANGED
@@ -100,12 +100,14 @@ class Experiment:
|
|
100
100
|
dbs = ",".join(list(self.datasets.keys()))
|
101
101
|
labels = self.util.config_val("DATA", "labels", False)
|
102
102
|
if labels:
|
103
|
-
labels = ast.literal_eval(labels)
|
103
|
+
self.labels = ast.literal_eval(labels)
|
104
104
|
self.util.debug(f"Target labels (from config): {labels}")
|
105
105
|
else:
|
106
|
-
labels = list(
|
106
|
+
self.labels = list(
|
107
|
+
next(iter(self.datasets.values())).df[self.target].unique()
|
108
|
+
)
|
107
109
|
self.util.debug(f"Target labels (from database): {labels}")
|
108
|
-
glob_conf.set_labels(labels)
|
110
|
+
glob_conf.set_labels(self.labels)
|
109
111
|
self.util.debug(f"loaded databases {dbs}")
|
110
112
|
|
111
113
|
def _import_csv(self, storage):
|
@@ -589,6 +591,7 @@ class Experiment:
|
|
589
591
|
if save:
|
590
592
|
# save the experiment for future use
|
591
593
|
self.save(self.util.get_save_name())
|
594
|
+
# self.save_onnx(self.util.get_save_name())
|
592
595
|
|
593
596
|
# self.__collect_reports()
|
594
597
|
self.util.print_best_results(self.reports)
|
@@ -667,6 +670,7 @@ class Experiment:
|
|
667
670
|
tmp_dict = pickle.load(f)
|
668
671
|
f.close()
|
669
672
|
self.__dict__.update(tmp_dict)
|
673
|
+
glob_conf.set_labels(self.labels)
|
670
674
|
|
671
675
|
def save(self, filename):
|
672
676
|
try:
|
@@ -675,3 +679,17 @@ class Experiment:
|
|
675
679
|
f.close()
|
676
680
|
except (AttributeError, TypeError, RuntimeError) as error:
|
677
681
|
self.util.warn(f"Save experiment: Can't pickle local object: {error}")
|
682
|
+
|
683
|
+
def save_onnx(self, filename):
|
684
|
+
# export the model to onnx
|
685
|
+
model = self.runmgr.get_best_model()
|
686
|
+
if model.is_ANN():
|
687
|
+
print("converting to onnx from torch")
|
688
|
+
else:
|
689
|
+
from skl2onnx import to_onnx
|
690
|
+
|
691
|
+
print("converting to onnx from sklearn")
|
692
|
+
# save the rest
|
693
|
+
f = open(filename, "wb")
|
694
|
+
pickle.dump(self.__dict__, f)
|
695
|
+
f.close()
|
@@ -2,6 +2,7 @@
|
|
2
2
|
from nkululeko.feat_extract.featureset import Featureset
|
3
3
|
import os
|
4
4
|
import pandas as pd
|
5
|
+
import numpy as np
|
5
6
|
import nkululeko.glob_conf as glob_conf
|
6
7
|
from nkululeko.feat_extract import feinberg_praat
|
7
8
|
import ast
|
@@ -23,14 +24,10 @@ class Praatset(Featureset):
|
|
23
24
|
store = self.util.get_path("store")
|
24
25
|
store_format = self.util.config_val("FEATS", "store_format", "pkl")
|
25
26
|
storage = f"{store}{self.name}.{store_format}"
|
26
|
-
extract = self.util.config_val(
|
27
|
-
"FEATS", "needs_feature_extraction", False
|
28
|
-
)
|
27
|
+
extract = self.util.config_val("FEATS", "needs_feature_extraction", False)
|
29
28
|
no_reuse = eval(self.util.config_val("FEATS", "no_reuse", "False"))
|
30
29
|
if extract or no_reuse or not os.path.isfile(storage):
|
31
|
-
self.util.debug(
|
32
|
-
"extracting Praat features, this might take a while..."
|
33
|
-
)
|
30
|
+
self.util.debug("extracting Praat features, this might take a while...")
|
34
31
|
self.df = feinberg_praat.compute_features(self.data_df.index)
|
35
32
|
self.df = self.df.set_index(self.data_df.index)
|
36
33
|
for i, col in enumerate(self.df.columns):
|
@@ -53,36 +50,25 @@ class Praatset(Featureset):
|
|
53
50
|
self.df = self.df.astype(float)
|
54
51
|
|
55
52
|
def extract_sample(self, signal, sr):
|
56
|
-
|
57
|
-
"feats_praat: extracting single samples not implemented yet"
|
58
|
-
)
|
59
|
-
feats = None
|
60
|
-
return feats
|
53
|
+
import audiofile, audformat
|
61
54
|
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
# self.df = sel_feats_df
|
83
|
-
# self.util.debug(
|
84
|
-
# "new feats shape after selecting Praat features:"
|
85
|
-
# f" {self.df.shape}"
|
86
|
-
# )
|
87
|
-
# except KeyError:
|
88
|
-
# pass
|
55
|
+
tmp_audio_names = ["praat_audio_tmp.wav"]
|
56
|
+
audiofile.write(tmp_audio_names[0], signal, sr)
|
57
|
+
df = pd.DataFrame(index=tmp_audio_names)
|
58
|
+
index = audformat.utils.to_segmented_index(df.index, allow_nat=False)
|
59
|
+
df = feinberg_praat.compute_features(index)
|
60
|
+
df.set_index(index)
|
61
|
+
for i, col in enumerate(df.columns):
|
62
|
+
if df[col].isnull().values.any():
|
63
|
+
self.util.debug(
|
64
|
+
f"{col} includes {df[col].isnull().sum()} nan,"
|
65
|
+
" inserting mean values"
|
66
|
+
)
|
67
|
+
mean_val = df[col].mean()
|
68
|
+
if not np.isnan(mean_val):
|
69
|
+
df[col] = df[col].fillna(mean_val)
|
70
|
+
else:
|
71
|
+
df[col] = df[col].fillna(0)
|
72
|
+
df = df.astype(float)
|
73
|
+
feats = df.to_numpy()
|
74
|
+
return feats
|
@@ -19,7 +19,7 @@ class Wav2vec2(Featureset):
|
|
19
19
|
cuda = "cuda" if torch.cuda.is_available() else "cpu"
|
20
20
|
self.device = self.util.config_val("MODEL", "device", cuda)
|
21
21
|
self.model_initialized = False
|
22
|
-
if feat_type == "wav2vec":
|
22
|
+
if feat_type == "wav2vec" or feat_type == "wav2vec2":
|
23
23
|
self.feat_type = "wav2vec2-large-robust-ft-swbd-300h"
|
24
24
|
else:
|
25
25
|
self.feat_type = feat_type
|
@@ -199,28 +199,30 @@ def runPCA(df):
|
|
199
199
|
# pickle.dump(x, f)
|
200
200
|
# f.close()
|
201
201
|
|
202
|
-
x = StandardScaler().fit_transform(x)
|
203
|
-
if np.any(np.isnan(x)):
|
202
|
+
# x = StandardScaler().fit_transform(x)
|
203
|
+
if np.any(np.isnan(x[0])):
|
204
204
|
print(
|
205
205
|
f"Warning: {np.count_nonzero(np.isnan(x))} Nans in x, replacing" " with 0"
|
206
206
|
)
|
207
207
|
x[np.isnan(x)] = 0
|
208
|
-
if np.any(np.isfinite(x)):
|
209
|
-
|
208
|
+
# if np.any(np.isfinite(x[0])):
|
209
|
+
# print(f"Warning: {np.count_nonzero(np.isfinite(x))} finite in x")
|
210
210
|
|
211
211
|
# PCA
|
212
212
|
pca = PCA(n_components=2)
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
213
|
+
try:
|
214
|
+
principalComponents = pca.fit_transform(x)
|
215
|
+
if np.any(np.isnan(principalComponents)):
|
216
|
+
print("pc is nan")
|
217
|
+
print(f"count: {np.count_nonzero(np.isnan(principalComponents))}")
|
218
|
+
print(principalComponents)
|
219
|
+
principalComponents = np.nan_to_num(principalComponents)
|
220
|
+
except ValueError:
|
221
|
+
print("need more than one file for pca")
|
222
|
+
principalComponents = [[0, 0]]
|
220
223
|
principalDf = pd.DataFrame(
|
221
224
|
data=principalComponents, columns=["JitterPCA", "ShimmerPCA"]
|
222
225
|
)
|
223
|
-
|
224
226
|
return principalDf
|
225
227
|
|
226
228
|
|
nkululeko/models/model_mlp.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: nkululeko
|
3
|
-
Version: 0.74.
|
3
|
+
Version: 0.74.6
|
4
4
|
Summary: Machine learning audio prediction experiments based on templates
|
5
5
|
Home-page: https://github.com/felixbur/nkululeko
|
6
6
|
Author: Felix Burkhardt
|
@@ -61,7 +61,7 @@ Requires-Dist: xgboost
|
|
61
61
|
## Overview
|
62
62
|
A project to detect speaker characteristics by machine learning experiments with a high-level interface.
|
63
63
|
|
64
|
-
The idea is to have a framework (based on e.g. sklearn and torch) that can be used to rapidly and automatically analyse and
|
64
|
+
The idea is to have a framework (based on e.g. sklearn and torch) that can be used to rapidly and automatically analyse audio data and explore machine learning models based on that data.
|
65
65
|
|
66
66
|
* NEW: Nkululeko now automatically generates PDF reports [sample for EmoDB](meta/images/emodb_report.pdf)
|
67
67
|
* The latest features can be seen in [the ini-file](./ini_file.md) options that are used to control Nkululeko
|
@@ -308,6 +308,22 @@ F. Burkhardt, Johannes Wagner, Hagen Wierstorf, Florian Eyben and Björn Schulle
|
|
308
308
|
Changelog
|
309
309
|
=========
|
310
310
|
|
311
|
+
Version 0.74.6
|
312
|
+
--------------
|
313
|
+
* added standard Wav2vec2 model
|
314
|
+
|
315
|
+
Version 0.74.5
|
316
|
+
--------------
|
317
|
+
* added praat feature extractor for one sample
|
318
|
+
|
319
|
+
Version 0.74.4
|
320
|
+
--------------
|
321
|
+
* fixed bug combining augmentations
|
322
|
+
|
323
|
+
Version 0.74.3
|
324
|
+
--------------
|
325
|
+
* audiomentations interface changed
|
326
|
+
|
311
327
|
Version 0.74.2
|
312
328
|
--------------
|
313
329
|
* combined augmentation methods
|
@@ -1,11 +1,11 @@
|
|
1
1
|
nkululeko/__init__.py,sha256=62f8HiEzJ8rG2QlTFJXUCMpvuH3fKI33DoJSj33mscc,63
|
2
|
-
nkululeko/augment.py,sha256=
|
2
|
+
nkululeko/augment.py,sha256=1kzUjscTPDrFxkR_HwnhPoB3SQQaKs5zQdWN9hRE6p4,2680
|
3
3
|
nkululeko/balancer.py,sha256=WslJxQwMNnVYgZXF1y0ueS5zilRPQJZDhUG72Csb4Gw,11
|
4
4
|
nkululeko/cacheddataset.py,sha256=lIJ6hUo5LoxSrzXtWV8mzwO7wRtUETWnOQ4ws2XfL1E,969
|
5
|
-
nkululeko/constants.py,sha256=
|
5
|
+
nkululeko/constants.py,sha256=qEXgFJllipG3vjFOgo9g4RbcwzKbKqCzu42-olPPiT0,39
|
6
6
|
nkululeko/demo.py,sha256=6CmLxH_0QJIMazPPg7IZur7ciNdqby6yOlh-6zu6YE0,1951
|
7
|
-
nkululeko/demo_predictor.py,sha256=
|
8
|
-
nkululeko/experiment.py,sha256=
|
7
|
+
nkululeko/demo_predictor.py,sha256=z4t8IlwRsc-MrE83JlZ9KupOsW-Xalziu89nQD1FbCA,2623
|
8
|
+
nkululeko/experiment.py,sha256=5vah4roe5PNc4jLSOGtpT5foVsO0n1y8GKUD371rHcE,28483
|
9
9
|
nkululeko/explore.py,sha256=1OdBEYU5LYsuLTaW6WpDTciiGVpZQmZ-PYYiBd1HJtI,2251
|
10
10
|
nkululeko/export.py,sha256=XqY7nFnta_hRFWeoqEwfCDz6BpCtPNNIs8r76o5g9rQ,4690
|
11
11
|
nkululeko/feature_extractor.py,sha256=US5zFJ_DqReF9Q7Ynqo1qtamFPMYrgfs_I4VwIYSY8A,7275
|
@@ -27,7 +27,7 @@ nkululeko/test.py,sha256=Z00CQrJ6Pp9zycKSLrCFjzew-_AXll3pud2o0xur_KY,1457
|
|
27
27
|
nkululeko/test_predictor.py,sha256=mO-jm1ViTtZY8QfWhJLyEboU1nn_CfKQ9c7-dgUxMp0,2403
|
28
28
|
nkululeko/util.py,sha256=CY7vfFFa2XnWexq0HoIUIMxxwJ_JkATa2eQplyfqOX4,11293
|
29
29
|
nkululeko/augmenting/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
30
|
-
nkululeko/augmenting/augmenter.py,sha256=
|
30
|
+
nkululeko/augmenting/augmenter.py,sha256=oodHgKO4DzHIGSryMLGQvDc9DYcQ6_XNajXSW813wNo,2300
|
31
31
|
nkululeko/augmenting/randomsplicer.py,sha256=pgrTdwnd-I1CBbMx-do7QC5eJwx4z88bkqKzagl45OI,2389
|
32
32
|
nkululeko/augmenting/randomsplicing.py,sha256=ldym9vZNsZIU5BAAaJVaOmAgmVHNs4a5i5K3bW-WAQU,1791
|
33
33
|
nkululeko/augmenting/resampler.py,sha256=Bz-QMrcmH8eUgT_klIpQAgueVesPx72Erqr9fTFN5Ls,3413
|
@@ -59,16 +59,16 @@ nkululeko/feat_extract/feats_mld.py,sha256=RbRAaTTTfdIQeoDrGRsVUr5O-GVG443zbjdCy
|
|
59
59
|
nkululeko/feat_extract/feats_mos.py,sha256=Bly7p6B0Guj4MQBdX_0G994lO5VUcmy5LLbXTSKi29Q,4247
|
60
60
|
nkululeko/feat_extract/feats_opensmile.py,sha256=yDRGSiUQV3K3oLxVqq8Cxj5bkc-RiLzDYbAGKC9I5vc,4140
|
61
61
|
nkululeko/feat_extract/feats_oxbow.py,sha256=N7uThvewVlH8HqSda-s_7UAtgXZkMwCOwUrsqeVHyLk,4830
|
62
|
-
nkululeko/feat_extract/feats_praat.py,sha256=
|
62
|
+
nkululeko/feat_extract/feats_praat.py,sha256=dqPhAUceze_6LN6vivXJFT2PPDXZKjmzYx0XnYKYGE8,3039
|
63
63
|
nkululeko/feat_extract/feats_snr.py,sha256=zsxwRAzt3C-0B3EqT4p4zDaDVueMyyk8uxcaaa_ITso,2817
|
64
64
|
nkululeko/feat_extract/feats_spectra.py,sha256=PLKoc_S3v3wibodUCiOnFFdF87U2rk2sfndRo2mmG64,3656
|
65
65
|
nkululeko/feat_extract/feats_spkrec.py,sha256=VK4ma3uWzM0YZStsgRTirfkbzjWIfRWSgsYI038QlRY,4803
|
66
66
|
nkululeko/feat_extract/feats_squim.py,sha256=8MaQ5lKfRqTJAub5VqEO9VziEVgMVTVe36CHkIQhGt4,4423
|
67
67
|
nkululeko/feat_extract/feats_trill.py,sha256=vP7OKQCU8miz-NM9xEwP7kt-RL73uODcyqtUAkXM5Es,2994
|
68
|
-
nkululeko/feat_extract/feats_wav2vec2.py,sha256=
|
68
|
+
nkululeko/feat_extract/feats_wav2vec2.py,sha256=r-HQ-oV6x9Ioe00gNMCTEI5iZuMyHvSUyKFDK_iXrdA,4728
|
69
69
|
nkululeko/feat_extract/feats_wavlm.py,sha256=QoLQNYLFJ8BgEyx0lVgb48HHH9LYUOX7pJtKbgNLk1I,4509
|
70
70
|
nkululeko/feat_extract/featureset.py,sha256=Xed_qbXFFasyKEyRpgCyZM3vYLqX0-O9RXnjN7hpUbY,1399
|
71
|
-
nkululeko/feat_extract/feinberg_praat.py,sha256=
|
71
|
+
nkululeko/feat_extract/feinberg_praat.py,sha256=od8dV1ZRHytww70OwWK9Wm-M6nccOkT6CfaN3FJwRCY,21247
|
72
72
|
nkululeko/losses/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
73
73
|
nkululeko/losses/loss_ccc.py,sha256=NOK0y0fxKUnU161B5geap6Fmn8QzoPl2MqtPiV8IuJE,976
|
74
74
|
nkululeko/losses/loss_softf1loss.py,sha256=5gW-PuiqeAZcRgfwjueIOQtMokOjZWgQnVIv59HKTCo,1309
|
@@ -79,7 +79,7 @@ nkululeko/models/model_cnn.py,sha256=Omu7xPPeft3GwqZMySd2xF4IAm7WrzTAVjAnI_DsD6A
|
|
79
79
|
nkululeko/models/model_gmm.py,sha256=onovzGBeguwZ-upXtuDLaBw9sd6fDDQslVBOrz1Z8TE,645
|
80
80
|
nkululeko/models/model_knn.py,sha256=5tGqiPo2JTw9VLmD-MXNZKFJ5RTLA6uv_blJDJ9lScA,573
|
81
81
|
nkululeko/models/model_knn_reg.py,sha256=Fbuk6Ku6eyrbbMEk7rB5dwfhvQOMsdZk6HI_0T0gYPw,580
|
82
|
-
nkululeko/models/model_mlp.py,sha256=
|
82
|
+
nkululeko/models/model_mlp.py,sha256=YLbaC-4fDUeaozoUC4hT4oFlxyXfgkE3GtoR3wpp4Ho,8488
|
83
83
|
nkululeko/models/model_mlp_regression.py,sha256=UXkMCHmLT-wl2aed8QmgJcvAebwRduUvdTLSzFsT1v4,9451
|
84
84
|
nkululeko/models/model_svm.py,sha256=J1d8mf5T4QHtilkUTBkhegVB_0D2kRY0BiBGz-LUJmw,554
|
85
85
|
nkululeko/models/model_svr.py,sha256=au5AtzjEpaY9_7Fz6CQoIZ3s6OAvLUxjAXuqHF9dRbk,514
|
@@ -103,8 +103,8 @@ nkululeko/split/split_utils.py,sha256=gdlXBwssgEogDBIbdVboeiYRWyJKYwuQm_-MkF1YCl
|
|
103
103
|
nkululeko/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
104
104
|
nkululeko/utils/files.py,sha256=UiGAtZRWYjHSvlmPaTMtzyNNGE6qaLaxQkybctS7iRM,4021
|
105
105
|
nkululeko/utils/stats.py,sha256=zoZkrbELuukf9eKWh-EmzxKGjzJWQuCM18-2f_aIBz4,2554
|
106
|
-
nkululeko-0.74.
|
107
|
-
nkululeko-0.74.
|
108
|
-
nkululeko-0.74.
|
109
|
-
nkululeko-0.74.
|
110
|
-
nkululeko-0.74.
|
106
|
+
nkululeko-0.74.6.dist-info/LICENSE,sha256=0zGP5B_W35yAcGfHPS18Q2B8UhvLRY3dQq1MhpsJU_U,1076
|
107
|
+
nkululeko-0.74.6.dist-info/METADATA,sha256=YbvU_3aAKddP86mjv373jJKNEOYKMTJl-AlfMD7E1xI,29686
|
108
|
+
nkululeko-0.74.6.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
109
|
+
nkululeko-0.74.6.dist-info/top_level.txt,sha256=DPFNNSHPjUeVKj44dVANAjuVGRCC3MusJ08lc2a8xFA,10
|
110
|
+
nkululeko-0.74.6.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|