nkululeko 0.84.0__py3-none-any.whl → 0.85.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nkululeko/augmenting/resampler.py +9 -4
- nkululeko/constants.py +1 -1
- nkululeko/experiment.py +6 -1
- nkululeko/feat_extract/feats_whisper.py +3 -6
- nkululeko/modelrunner.py +56 -33
- nkululeko/models/finetune_model.py +190 -0
- nkululeko/models/model.py +1 -1
- nkululeko/models/model_tuned.py +506 -0
- nkululeko/resample.py +76 -54
- nkululeko/test_pretrain.py +200 -11
- nkululeko/utils/util.py +53 -32
- {nkululeko-0.84.0.dist-info → nkululeko-0.85.0.dist-info}/METADATA +9 -1
- {nkululeko-0.84.0.dist-info → nkululeko-0.85.0.dist-info}/RECORD +16 -14
- {nkululeko-0.84.0.dist-info → nkululeko-0.85.0.dist-info}/LICENSE +0 -0
- {nkululeko-0.84.0.dist-info → nkululeko-0.85.0.dist-info}/WHEEL +0 -0
- {nkululeko-0.84.0.dist-info → nkululeko-0.85.0.dist-info}/top_level.txt +0 -0
nkululeko/test_pretrain.py
CHANGED
@@ -11,11 +11,14 @@ import transformers
|
|
11
11
|
|
12
12
|
import audeer
|
13
13
|
import audiofile
|
14
|
+
import audmetric
|
14
15
|
|
15
16
|
from nkululeko.constants import VERSION
|
16
17
|
import nkululeko.experiment as exp
|
18
|
+
import nkululeko.models.finetune_model as fm
|
17
19
|
import nkululeko.glob_conf as glob_conf
|
18
20
|
from nkululeko.utils.util import Util
|
21
|
+
import json
|
19
22
|
|
20
23
|
|
21
24
|
def doit(config_file):
|
@@ -50,28 +53,42 @@ def doit(config_file):
|
|
50
53
|
expr.fill_train_and_tests()
|
51
54
|
util.debug(f"train shape : {expr.df_train.shape}, test shape:{expr.df_test.shape}")
|
52
55
|
|
56
|
+
model_root = util.get_path("model_dir")
|
57
|
+
log_root = audeer.mkdir("log")
|
58
|
+
torch_root = audeer.path(model_root, "torch")
|
59
|
+
|
60
|
+
metrics_gender = {
|
61
|
+
"UAR": audmetric.unweighted_average_recall,
|
62
|
+
"ACC": audmetric.accuracy,
|
63
|
+
}
|
64
|
+
|
53
65
|
sampling_rate = 16000
|
54
66
|
max_duration_sec = 8.0
|
55
67
|
|
56
68
|
model_path = "facebook/wav2vec2-large-robust-ft-swbd-300h"
|
57
69
|
num_layers = None
|
58
70
|
|
71
|
+
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
|
72
|
+
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
73
|
+
|
59
74
|
batch_size = 16
|
60
75
|
accumulation_steps = 4
|
61
|
-
|
62
76
|
# create dataset
|
63
77
|
|
64
78
|
dataset = {}
|
79
|
+
target_name = glob_conf.target
|
65
80
|
data_sources = {
|
66
|
-
"train": pd.DataFrame(expr.df_train[
|
67
|
-
"dev": pd.DataFrame(expr.df_test[
|
81
|
+
"train": pd.DataFrame(expr.df_train[target_name]),
|
82
|
+
"dev": pd.DataFrame(expr.df_test[target_name]),
|
68
83
|
}
|
69
84
|
|
70
85
|
for split in ["train", "dev"]:
|
86
|
+
df = data_sources[split]
|
87
|
+
df[target_name] = df[target_name].astype("float")
|
71
88
|
|
72
89
|
y = pd.Series(
|
73
|
-
data=
|
74
|
-
index=
|
90
|
+
data=df.itertuples(index=False, name=None),
|
91
|
+
index=df.index,
|
75
92
|
dtype=object,
|
76
93
|
name="labels",
|
77
94
|
)
|
@@ -80,23 +97,195 @@ def doit(config_file):
|
|
80
97
|
df = y.reset_index()
|
81
98
|
df.start = df.start.dt.total_seconds()
|
82
99
|
df.end = df.end.dt.total_seconds()
|
100
|
+
|
83
101
|
print(f"{split}: {len(df)}")
|
102
|
+
|
84
103
|
ds = datasets.Dataset.from_pandas(df)
|
85
104
|
dataset[split] = ds
|
86
105
|
|
87
|
-
|
106
|
+
dataset = datasets.DatasetDict(dataset)
|
107
|
+
|
108
|
+
# load pre-trained model
|
109
|
+
le = glob_conf.label_encoder
|
110
|
+
mapping = dict(zip(le.classes_, range(len(le.classes_))))
|
111
|
+
target_mapping = {k: int(v) for k, v in mapping.items()}
|
112
|
+
target_mapping_reverse = {value: key for key, value in target_mapping.items()}
|
88
113
|
|
89
114
|
config = transformers.AutoConfig.from_pretrained(
|
90
115
|
model_path,
|
91
|
-
num_labels=len(
|
92
|
-
label2id=
|
93
|
-
id2label=
|
94
|
-
finetuning_task=
|
116
|
+
num_labels=len(target_mapping),
|
117
|
+
label2id=target_mapping,
|
118
|
+
id2label=target_mapping_reverse,
|
119
|
+
finetuning_task=target_name,
|
95
120
|
)
|
96
121
|
if num_layers is not None:
|
97
122
|
config.num_hidden_layers = num_layers
|
98
123
|
setattr(config, "sampling_rate", sampling_rate)
|
99
|
-
setattr(config, "data",
|
124
|
+
setattr(config, "data", util.get_data_name())
|
125
|
+
|
126
|
+
vocab_dict = {}
|
127
|
+
with open("vocab.json", "w") as vocab_file:
|
128
|
+
json.dump(vocab_dict, vocab_file)
|
129
|
+
tokenizer = transformers.Wav2Vec2CTCTokenizer("./vocab.json")
|
130
|
+
tokenizer.save_pretrained(".")
|
131
|
+
|
132
|
+
feature_extractor = transformers.Wav2Vec2FeatureExtractor(
|
133
|
+
feature_size=1,
|
134
|
+
sampling_rate=16000,
|
135
|
+
padding_value=0.0,
|
136
|
+
do_normalize=True,
|
137
|
+
return_attention_mask=True,
|
138
|
+
)
|
139
|
+
processor = transformers.Wav2Vec2Processor(
|
140
|
+
feature_extractor=feature_extractor,
|
141
|
+
tokenizer=tokenizer,
|
142
|
+
)
|
143
|
+
assert processor.feature_extractor.sampling_rate == sampling_rate
|
144
|
+
|
145
|
+
model = fm.Model.from_pretrained(
|
146
|
+
model_path,
|
147
|
+
config=config,
|
148
|
+
)
|
149
|
+
model.freeze_feature_extractor()
|
150
|
+
model.train()
|
151
|
+
|
152
|
+
# training
|
153
|
+
|
154
|
+
def data_collator(data):
|
155
|
+
|
156
|
+
files = [d["file"] for d in data]
|
157
|
+
starts = [d["start"] for d in data]
|
158
|
+
ends = [d["end"] for d in data]
|
159
|
+
targets = [d["targets"] for d in data]
|
160
|
+
|
161
|
+
signals = []
|
162
|
+
for file, start, end in zip(
|
163
|
+
files,
|
164
|
+
starts,
|
165
|
+
ends,
|
166
|
+
):
|
167
|
+
offset = start
|
168
|
+
duration = end - offset
|
169
|
+
if max_duration_sec is not None:
|
170
|
+
duration = min(duration, max_duration_sec)
|
171
|
+
signal, _ = audiofile.read(
|
172
|
+
file,
|
173
|
+
offset=offset,
|
174
|
+
duration=duration,
|
175
|
+
)
|
176
|
+
signals.append(signal.squeeze())
|
177
|
+
|
178
|
+
input_values = processor(
|
179
|
+
signals,
|
180
|
+
sampling_rate=sampling_rate,
|
181
|
+
padding=True,
|
182
|
+
)
|
183
|
+
batch = processor.pad(
|
184
|
+
input_values,
|
185
|
+
padding=True,
|
186
|
+
return_tensors="pt",
|
187
|
+
)
|
188
|
+
|
189
|
+
batch["labels"] = torch.tensor(targets)
|
190
|
+
|
191
|
+
return batch
|
192
|
+
|
193
|
+
def compute_metrics(p: transformers.EvalPrediction):
|
194
|
+
|
195
|
+
truth_gender = p.label_ids[:, 0].astype(int)
|
196
|
+
preds = p.predictions
|
197
|
+
preds_gender = np.argmax(preds, axis=1)
|
198
|
+
|
199
|
+
scores = {}
|
200
|
+
|
201
|
+
for name, metric in metrics_gender.items():
|
202
|
+
scores[f"gender-{name}"] = metric(truth_gender, preds_gender)
|
203
|
+
|
204
|
+
scores["combined"] = scores["gender-UAR"]
|
205
|
+
|
206
|
+
return scores
|
207
|
+
|
208
|
+
targets = pd.DataFrame(dataset["train"]["targets"])
|
209
|
+
counts = targets[0].value_counts().sort_index()
|
210
|
+
train_weights = 1 / counts
|
211
|
+
train_weights /= train_weights.sum()
|
212
|
+
|
213
|
+
print(train_weights)
|
214
|
+
|
215
|
+
criterion_gender = torch.nn.CrossEntropyLoss(
|
216
|
+
weight=torch.Tensor(train_weights).to("cuda"),
|
217
|
+
)
|
218
|
+
|
219
|
+
class Trainer(transformers.Trainer):
|
220
|
+
|
221
|
+
def compute_loss(
|
222
|
+
self,
|
223
|
+
model,
|
224
|
+
inputs,
|
225
|
+
return_outputs=False,
|
226
|
+
):
|
227
|
+
|
228
|
+
targets = inputs.pop("labels").squeeze()
|
229
|
+
targets_gender = targets.type(torch.long)
|
230
|
+
|
231
|
+
outputs = model(**inputs)
|
232
|
+
logits_gender = outputs[0].squeeze()
|
233
|
+
|
234
|
+
loss_gender = criterion_gender(logits_gender, targets_gender)
|
235
|
+
|
236
|
+
loss = loss_gender
|
237
|
+
|
238
|
+
return (loss, outputs) if return_outputs else loss
|
239
|
+
|
240
|
+
num_steps = len(dataset["train"]) // (batch_size * accumulation_steps) // 5
|
241
|
+
num_steps = max(1, num_steps)
|
242
|
+
print(num_steps)
|
243
|
+
|
244
|
+
training_args = transformers.TrainingArguments(
|
245
|
+
output_dir=model_root,
|
246
|
+
logging_dir=log_root,
|
247
|
+
per_device_train_batch_size=batch_size,
|
248
|
+
per_device_eval_batch_size=batch_size,
|
249
|
+
gradient_accumulation_steps=accumulation_steps,
|
250
|
+
evaluation_strategy="steps",
|
251
|
+
num_train_epochs=5.0,
|
252
|
+
fp16=True,
|
253
|
+
save_steps=num_steps,
|
254
|
+
eval_steps=num_steps,
|
255
|
+
logging_steps=num_steps,
|
256
|
+
learning_rate=1e-4,
|
257
|
+
save_total_limit=2,
|
258
|
+
metric_for_best_model="combined",
|
259
|
+
greater_is_better=True,
|
260
|
+
load_best_model_at_end=True,
|
261
|
+
remove_unused_columns=False,
|
262
|
+
report_to="none",
|
263
|
+
)
|
264
|
+
|
265
|
+
trainer = Trainer(
|
266
|
+
model=model,
|
267
|
+
data_collator=data_collator,
|
268
|
+
args=training_args,
|
269
|
+
compute_metrics=compute_metrics,
|
270
|
+
train_dataset=dataset["train"],
|
271
|
+
eval_dataset=dataset["dev"],
|
272
|
+
tokenizer=processor.feature_extractor,
|
273
|
+
callbacks=[transformers.integrations.TensorBoardCallback()],
|
274
|
+
)
|
275
|
+
if False:
|
276
|
+
trainer.train()
|
277
|
+
trainer.save_model(torch_root)
|
278
|
+
|
279
|
+
modelnew = fm.Model.from_pretrained(
|
280
|
+
torch_root,
|
281
|
+
config=config,
|
282
|
+
)
|
283
|
+
print(f"loaded new model type{type(modelnew)}")
|
284
|
+
import audiofile
|
285
|
+
|
286
|
+
signal, _ = audiofile.read("./test.wav", always_2d=True)
|
287
|
+
result = modelnew.predict(signal)
|
288
|
+
print(result)
|
100
289
|
|
101
290
|
print("DONE")
|
102
291
|
|
nkululeko/utils/util.py
CHANGED
@@ -33,43 +33,58 @@ class Util:
|
|
33
33
|
else:
|
34
34
|
self.caller = ""
|
35
35
|
if has_config:
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
36
|
+
try:
|
37
|
+
import nkululeko.glob_conf as glob_conf
|
38
|
+
self.config = glob_conf.config
|
39
|
+
self.got_data_roots = self.config_val(
|
40
|
+
"DATA", "root_folders", False)
|
41
|
+
if self.got_data_roots:
|
42
|
+
# if there is a global data rootfolder file, read from there
|
43
|
+
if not os.path.isfile(self.got_data_roots):
|
44
|
+
self.error(f"no such file: {self.got_data_roots}")
|
45
|
+
self.data_roots = configparser.ConfigParser()
|
46
|
+
self.data_roots.read(self.got_data_roots)
|
47
|
+
except (ModuleNotFoundError, AttributeError):
|
48
|
+
self.config = None
|
49
|
+
self.got_data_roots = False
|
47
50
|
|
48
51
|
def get_path(self, entry):
|
49
52
|
"""
|
50
53
|
This method allows the user to get the directory path for the given argument.
|
51
54
|
"""
|
52
|
-
|
53
|
-
|
54
|
-
try:
|
55
|
-
entryn = self.config["EXP"][entry]
|
56
|
-
except KeyError:
|
57
|
-
# some default values
|
55
|
+
if self.config is None:
|
56
|
+
# If no configuration file is provided, use default paths
|
58
57
|
if entry == "fig_dir":
|
59
|
-
|
58
|
+
dir_name = "./images/"
|
60
59
|
elif entry == "res_dir":
|
61
|
-
|
60
|
+
dir_name = "./results/"
|
62
61
|
elif entry == "model_dir":
|
63
|
-
|
62
|
+
dir_name = "./models/"
|
64
63
|
else:
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
64
|
+
dir_name = "./store/"
|
65
|
+
else:
|
66
|
+
root = os.path.join(self.config["EXP"]["root"], "")
|
67
|
+
name = self.config["EXP"]["name"]
|
68
|
+
try:
|
69
|
+
entryn = self.config["EXP"][entry]
|
70
|
+
except KeyError:
|
71
|
+
# some default values
|
72
|
+
if entry == "fig_dir":
|
73
|
+
entryn = "./images/"
|
74
|
+
elif entry == "res_dir":
|
75
|
+
entryn = "./results/"
|
76
|
+
elif entry == "model_dir":
|
77
|
+
entryn = "./models/"
|
78
|
+
else:
|
79
|
+
entryn = "./store/"
|
80
|
+
|
81
|
+
# Expand image, model and result directories with run index
|
82
|
+
if entry == "fig_dir" or entry == "res_dir" or entry == "model_dir":
|
83
|
+
run = self.config_val("EXP", "run", 0)
|
84
|
+
entryn = entryn + f"run_{run}/"
|
85
|
+
|
86
|
+
dir_name = f"{root}{name}/{entryn}"
|
71
87
|
|
72
|
-
dir_name = f"{root}{name}/{entryn}"
|
73
88
|
audeer.mkdir(dir_name)
|
74
89
|
return dir_name
|
75
90
|
|
@@ -101,7 +116,8 @@ class Util:
|
|
101
116
|
)
|
102
117
|
return default
|
103
118
|
if not default in self.stopvals:
|
104
|
-
self.debug(
|
119
|
+
self.debug(
|
120
|
+
f"value for {key} not found, using default: {default}")
|
105
121
|
return default
|
106
122
|
|
107
123
|
def set_config(self, config):
|
@@ -138,7 +154,8 @@ class Util:
|
|
138
154
|
if len(df) == 0:
|
139
155
|
return df
|
140
156
|
if not isinstance(df.index, pd.MultiIndex):
|
141
|
-
df.index = audformat.utils.to_segmented_index(
|
157
|
+
df.index = audformat.utils.to_segmented_index(
|
158
|
+
df.index, allow_nat=False)
|
142
159
|
return df
|
143
160
|
|
144
161
|
def _get_value_descript(self, section, name):
|
@@ -243,11 +260,14 @@ class Util:
|
|
243
260
|
print(df.head(1))
|
244
261
|
|
245
262
|
def config_val(self, section, key, default):
|
263
|
+
if self.config is None:
|
264
|
+
return default
|
246
265
|
try:
|
247
266
|
return self.config[section][key]
|
248
267
|
except KeyError:
|
249
|
-
if not
|
250
|
-
self.debug(
|
268
|
+
if default not in self.stopvals:
|
269
|
+
self.debug(
|
270
|
+
f"value for {key} not found, using default: {default}")
|
251
271
|
return default
|
252
272
|
|
253
273
|
def config_val_list(self, section, key, default):
|
@@ -255,7 +275,8 @@ class Util:
|
|
255
275
|
return ast.literal_eval(self.config[section][key])
|
256
276
|
except KeyError:
|
257
277
|
if not default in self.stopvals:
|
258
|
-
self.debug(
|
278
|
+
self.debug(
|
279
|
+
f"value for {key} not found, using default: {default}")
|
259
280
|
return default
|
260
281
|
|
261
282
|
def continuous_to_categorical(self, series):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: nkululeko
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.85.0
|
4
4
|
Summary: Machine learning audio prediction experiments based on templates
|
5
5
|
Home-page: https://github.com/felixbur/nkululeko
|
6
6
|
Author: Felix Burkhardt
|
@@ -333,6 +333,14 @@ F. Burkhardt, Johannes Wagner, Hagen Wierstorf, Florian Eyben and Björn Schulle
|
|
333
333
|
Changelog
|
334
334
|
=========
|
335
335
|
|
336
|
+
Version 0.85.0
|
337
|
+
--------------
|
338
|
+
* first version with finetuning wav2vec2 layers
|
339
|
+
|
340
|
+
Version 0.84.1
|
341
|
+
--------------
|
342
|
+
* made resample independent of config file
|
343
|
+
|
336
344
|
Version 0.84.0
|
337
345
|
--------------
|
338
346
|
* added SHAP analysis
|
@@ -2,36 +2,36 @@ nkululeko/__init__.py,sha256=62f8HiEzJ8rG2QlTFJXUCMpvuH3fKI33DoJSj33mscc,63
|
|
2
2
|
nkululeko/aug_train.py,sha256=YhuZnS_WVWnun9G-M6g5n6rbRxoVREz6Zh7k6qprFNQ,3194
|
3
3
|
nkululeko/augment.py,sha256=4MG0apTAG5RgkuJrYEjGgDdbodZWi_HweSPNI1JJ5QA,3051
|
4
4
|
nkululeko/cacheddataset.py,sha256=lIJ6hUo5LoxSrzXtWV8mzwO7wRtUETWnOQ4ws2XfL1E,969
|
5
|
-
nkululeko/constants.py,sha256=
|
5
|
+
nkululeko/constants.py,sha256=flWSUNQs4r0X0SgoR1I72Mk49cRUdpBN8Zng8sySFBE,39
|
6
6
|
nkululeko/demo.py,sha256=8bl15Kitoesnz8oa8yrs52T6YCSOhWbbq9PnZ8Hj6D0,3232
|
7
7
|
nkululeko/demo_feats.py,sha256=sAeGFojhEj9WEDFtG3SzPBmyYJWLF2rkbpp65m8Ujo4,2025
|
8
8
|
nkululeko/demo_predictor.py,sha256=es56xbT8ifkS_vnrlb5NTZT54gNmeUtNlA4zVA_gnN8,4757
|
9
|
-
nkululeko/experiment.py,sha256=
|
9
|
+
nkululeko/experiment.py,sha256=9Nw23b7sVOciH8IaOuAAKbY7otXYSsPrj_rQCA_U9cc,30465
|
10
10
|
nkululeko/explore.py,sha256=lDzRoW_Taa5u4BBABZLD89BcQWnYlrftJR4jgt1yyj0,2609
|
11
11
|
nkululeko/export.py,sha256=mHeEAAmtZuxdyebLlbSzPrHSi9OMgJHbk35d3DTxRBc,4632
|
12
12
|
nkululeko/feature_extractor.py,sha256=8mssYKmo4LclVI-hiLmJEDZ0ZPyDavFG2YwtXcrGzwM,3976
|
13
13
|
nkululeko/file_checker.py,sha256=LoLnL8aHpW-axMQ46qbqrManTs5otG9ShpEZuz9iRSk,3474
|
14
14
|
nkululeko/filter_data.py,sha256=w-X2mhKdYr5DxDIz50E5yzO6Jmzk4jjDBoXsgOOVtcA,7222
|
15
15
|
nkululeko/glob_conf.py,sha256=KL9YJQTHvTztxo1vr25qRRgaPnx4NTg0XrdbovKGMmw,525
|
16
|
-
nkululeko/modelrunner.py,sha256=
|
16
|
+
nkululeko/modelrunner.py,sha256=pPhvTh1rIrFQg5Ox9T1KoFJ4wRcLCmJl7LFud2DA41w,10464
|
17
17
|
nkululeko/multidb.py,sha256=fG3VukEWP1vreVN4gB1IRXxwwg4jLftsSEYtu0o1f78,5634
|
18
18
|
nkululeko/nkuluflag.py,sha256=PGWSmZz-PiiHLgcZJAoGOI_Y-sZDVI1ksB8p5r7riWM,3725
|
19
19
|
nkululeko/nkululeko.py,sha256=Kn3s2E3yyH8cJ7z6lkMxrnqtCxTu7-qfe9Zr_ONTD5g,1968
|
20
20
|
nkululeko/plots.py,sha256=nd9tF_61DyAx7oGZF8gTrHXazkgFjFe4eClxu1nQ_XU,23276
|
21
21
|
nkululeko/predict.py,sha256=sF091sSSLnEWcISx9ZcULLie3tY5XeFsQJd6b3vrxFg,2409
|
22
|
-
nkululeko/resample.py,sha256=
|
22
|
+
nkululeko/resample.py,sha256=IPtYqU0nhZ-CqO_O1jJN0EvpfjxHZdFRwdTpEJOVuaQ,3354
|
23
23
|
nkululeko/runmanager.py,sha256=eTM1DNQKt1lxYhzt4vZyZluPXW9sWlIJHNQzex4lkJU,7624
|
24
24
|
nkululeko/scaler.py,sha256=4nkIqoajkIkuTPK0Z02ifMN_awl6fP_i-GBYdoGYgGM,4101
|
25
25
|
nkululeko/segment.py,sha256=YLKckX44tbvTb3LrdgYw9X4guzuF27sutl92z9DkpZU,4835
|
26
26
|
nkululeko/syllable_nuclei.py,sha256=Sky-C__MeUDaxqHnDl2TGLLYOYvsahD35TUjWGeG31k,10047
|
27
27
|
nkululeko/test.py,sha256=1w624vo5KTzmFC8BUStGlLDmIEAFuJUz7J0W-gp7AxI,1677
|
28
28
|
nkululeko/test_predictor.py,sha256=_w5J8CxH6hmW3mLTKbdfmywl5QpdNAnW1Y8TE5GtlfE,3237
|
29
|
-
nkululeko/test_pretrain.py,sha256=
|
29
|
+
nkululeko/test_pretrain.py,sha256=ZWl-bR6nmeSmXkGAIE6zyfQEjN8Zg0rIxfaS-O6Zbas,8465
|
30
30
|
nkululeko/augmenting/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
31
31
|
nkululeko/augmenting/augmenter.py,sha256=XAt0dpmlnKxqyysqCgV3rcz-pRIvOz7rU7dmGDCVAzs,2905
|
32
32
|
nkululeko/augmenting/randomsplicer.py,sha256=Z5rxdKKUpuncLWuTS6xVfVKUeVbeiYU_dLRHQ5fcg4Y,2669
|
33
33
|
nkululeko/augmenting/randomsplicing.py,sha256=ldym9vZNsZIU5BAAaJVaOmAgmVHNs4a5i5K3bW-WAQU,1791
|
34
|
-
nkululeko/augmenting/resampler.py,sha256=
|
34
|
+
nkululeko/augmenting/resampler.py,sha256=nOBsiQpX6p4jXsP7x6wak78F3B5YYYRmC_iHX8iuOXs,3542
|
35
35
|
nkululeko/autopredict/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
36
36
|
nkululeko/autopredict/ap_age.py,sha256=2Wn5E-Jd49sTn40WqaMcYtUEl4zEq3OY75XmjOpdxsA,1095
|
37
37
|
nkululeko/autopredict/ap_arousal.py,sha256=ymt0diu4v1osw3VxJbSglsVKDAJYRzebQ2TTfFMKKxk,1024
|
@@ -68,14 +68,15 @@ nkululeko/feat_extract/feats_squim.py,sha256=Y31YmDmscuG0YozvxyBZIutO3id8t7IZJWC
|
|
68
68
|
nkululeko/feat_extract/feats_trill.py,sha256=K2ahhdpwpjgg3WZS1POg3UMP2U44i8cLZZvn5Rq7fUI,3228
|
69
69
|
nkululeko/feat_extract/feats_wav2vec2.py,sha256=9WUMfyddB_3nx79g7mZoQrRynhM1uEBWuOotRq8bxoU,5268
|
70
70
|
nkululeko/feat_extract/feats_wavlm.py,sha256=ulxpGjifUFx2ZgGmY32SmBJGIuvkYHoLb2n1LZ8KMwA,4703
|
71
|
-
nkululeko/feat_extract/feats_whisper.py,sha256=
|
71
|
+
nkululeko/feat_extract/feats_whisper.py,sha256=0N7Vj65OVi2PNoB_NrDjWT5lP6xZNKxFOZZIoxkJvcA,4533
|
72
72
|
nkululeko/feat_extract/featureset.py,sha256=HtgW2389rmlRAgFP3F1sSFzq2_iUVr2NhOfIXG9omt0,1448
|
73
73
|
nkululeko/feat_extract/feinberg_praat.py,sha256=EP9pMALjlKdiYInLQdrZ7MmE499Mq-ISRCgqbqL3Rxc,21304
|
74
74
|
nkululeko/losses/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
75
75
|
nkululeko/losses/loss_ccc.py,sha256=NOK0y0fxKUnU161B5geap6Fmn8QzoPl2MqtPiV8IuJE,976
|
76
76
|
nkululeko/losses/loss_softf1loss.py,sha256=5gW-PuiqeAZcRgfwjueIOQtMokOjZWgQnVIv59HKTCo,1309
|
77
77
|
nkululeko/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
78
|
-
nkululeko/models/
|
78
|
+
nkululeko/models/finetune_model.py,sha256=OMlzDyUFNXZ2xSiqqH8tbzey_KzPJ4jsoYT-4KrWFKM,5091
|
79
|
+
nkululeko/models/model.py,sha256=PUCqF2r_dEfmFsZn6Cgr1UIzYvxziLH6nSqZ5-vuN1o,11639
|
79
80
|
nkululeko/models/model_bayes.py,sha256=WJFZ8wFKwWATz6MhmjeZIi1Pal1viU549WL_PjXDSy8,406
|
80
81
|
nkululeko/models/model_cnn.py,sha256=bJxqwe6FnVR2hFeqN6EXexYGgvKYFED1VOhBXVlLWaE,9954
|
81
82
|
nkululeko/models/model_gmm.py,sha256=hZ9UO36KNf48qa3J-xkWIicIj9-TApmt21zNES2vEOs,649
|
@@ -88,6 +89,7 @@ nkululeko/models/model_svm.py,sha256=rsME3KvKvNG7bdE5lbvYUu85WZhaASZxxmdNDIVJRZ4
|
|
88
89
|
nkululeko/models/model_svr.py,sha256=_YZeksqB3eBENGlg3g9RwYFlk9rQQ-XCeNBKLlGGVoE,725
|
89
90
|
nkululeko/models/model_tree.py,sha256=rf16faUm4o2LJgkoYpeY998b8DQIvXZ73_m1IS3TnnE,417
|
90
91
|
nkululeko/models/model_tree_reg.py,sha256=IgQcPTE-304HQLYSKPF8Z4ot_Ur9dH01fZjS0nXke_M,428
|
92
|
+
nkululeko/models/model_tuned.py,sha256=zmagIE3QHP67_XJCx5r7ZXBojsp6SC8IS-L3XRWmCEk,15650
|
91
93
|
nkululeko/models/model_xgb.py,sha256=Thgx5ESdIok4v72mKh4plxpo4smGcKALWNCJTDScY0M,447
|
92
94
|
nkululeko/models/model_xgr.py,sha256=aGBtNGLWjOE_2rICGYGFxmT8DtnHYsIl1lIpMtghHsY,418
|
93
95
|
nkululeko/reporting/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -103,9 +105,9 @@ nkululeko/segmenting/seg_silero.py,sha256=lLytS38KzARS17omwv8VBw-zz60RVSXGSvZ5Ev
|
|
103
105
|
nkululeko/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
104
106
|
nkululeko/utils/files.py,sha256=UiGAtZRWYjHSvlmPaTMtzyNNGE6qaLaxQkybctS7iRM,4021
|
105
107
|
nkululeko/utils/stats.py,sha256=1yUq0FTOyqkU8TwUocJRYdJaqMU5SlOBBRUun9STo2M,2829
|
106
|
-
nkululeko/utils/util.py,sha256=
|
107
|
-
nkululeko-0.
|
108
|
-
nkululeko-0.
|
109
|
-
nkululeko-0.
|
110
|
-
nkululeko-0.
|
111
|
-
nkululeko-0.
|
108
|
+
nkululeko/utils/util.py,sha256=b1IHFucRNuF9Iyv5IJeK4AEg0Rga0xKG80UM5GWWdHA,13816
|
109
|
+
nkululeko-0.85.0.dist-info/LICENSE,sha256=0zGP5B_W35yAcGfHPS18Q2B8UhvLRY3dQq1MhpsJU_U,1076
|
110
|
+
nkululeko-0.85.0.dist-info/METADATA,sha256=Zt3H0FmIXOJvzyLOI0aC8VfvjrdIkd4uNvb937luo_k,36499
|
111
|
+
nkululeko-0.85.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
112
|
+
nkululeko-0.85.0.dist-info/top_level.txt,sha256=DPFNNSHPjUeVKj44dVANAjuVGRCC3MusJ08lc2a8xFA,10
|
113
|
+
nkululeko-0.85.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|