uniovi-simur-wearablepermed-ml 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of uniovi-simur-wearablepermed-ml might be problematic. Click here for more details.
- uniovi_simur_wearablepermed_ml-1.1.0.dist-info/METADATA +411 -0
- uniovi_simur_wearablepermed_ml-1.1.0.dist-info/RECORD +19 -0
- uniovi_simur_wearablepermed_ml-1.1.0.dist-info/WHEEL +5 -0
- uniovi_simur_wearablepermed_ml-1.1.0.dist-info/entry_points.txt +3 -0
- uniovi_simur_wearablepermed_ml-1.1.0.dist-info/licenses/LICENSE.txt +21 -0
- uniovi_simur_wearablepermed_ml-1.1.0.dist-info/top_level.txt +1 -0
- wearablepermed_ml/__init__.py +16 -0
- wearablepermed_ml/basic_functions/__init__.py +0 -0
- wearablepermed_ml/basic_functions/address.py +17 -0
- wearablepermed_ml/data/DataReader.py +388 -0
- wearablepermed_ml/data/__init__.py +1 -0
- wearablepermed_ml/models/SiMuR_Model.py +671 -0
- wearablepermed_ml/models/__init__.py +1 -0
- wearablepermed_ml/models/model_generator.py +63 -0
- wearablepermed_ml/run_trainer_and_tester_30_times.py +130 -0
- wearablepermed_ml/tester.py +156 -0
- wearablepermed_ml/testing/__init__.py +0 -0
- wearablepermed_ml/testing/testing.py +203 -0
- wearablepermed_ml/trainer.py +782 -0
|
@@ -0,0 +1,388 @@
|
|
|
1
|
+
# En este script se preprocesan los datos.
|
|
2
|
+
|
|
3
|
+
# Se normaliza, limpian , filtran, etc.
|
|
4
|
+
|
|
5
|
+
# El resultado puede ser una clase o un dictionario que contenga:
|
|
6
|
+
|
|
7
|
+
# data.X_train
|
|
8
|
+
# data.y_train
|
|
9
|
+
# data.X_test
|
|
10
|
+
# data.y_test
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
from enum import Enum
|
|
14
|
+
import os
|
|
15
|
+
import random
|
|
16
|
+
import numpy as np
|
|
17
|
+
from sklearn.preprocessing import LabelEncoder
|
|
18
|
+
import joblib
|
|
19
|
+
from collections import defaultdict
|
|
20
|
+
|
|
21
|
+
_DEF_WINDOWS_REBALANCED_MEAN = 50 # for all tasks (training + test)
|
|
22
|
+
|
|
23
|
+
class ML_Model(Enum):
|
|
24
|
+
ESANN = 'ESANN'
|
|
25
|
+
CAPTURE24 = 'CAPTURE24'
|
|
26
|
+
RANDOM_FOREST = 'RandomForest'
|
|
27
|
+
XGBOOST = 'XGBoost'
|
|
28
|
+
|
|
29
|
+
WINDOW_CONCATENATED_DATA = "arr_0"
|
|
30
|
+
WINDOW_ALL_LABELS = "arr_1"
|
|
31
|
+
WINDOW_ALL_METADATA = "arr_2"
|
|
32
|
+
|
|
33
|
+
# Jittering
|
|
34
|
+
def jitter(X, sigma=0.5):
|
|
35
|
+
# Añadir ruido gaussiano a los datos
|
|
36
|
+
return X + np.random.normal(loc=0, scale=sigma, size=X.shape)
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
# Magnitude Warping
|
|
40
|
+
def magnitude_warp(X, sigma=0.2):
|
|
41
|
+
"""
|
|
42
|
+
Aplica una distorsión en la magnitud de un vector 1D o matriz 2D.
|
|
43
|
+
|
|
44
|
+
Parámetros:
|
|
45
|
+
- X: np.array de 1D (shape (n,)) o 2D (shape (n_samples, n_features))
|
|
46
|
+
- sigma: Desviación estándar del ruido gaussiano aplicado.
|
|
47
|
+
|
|
48
|
+
Retorna:
|
|
49
|
+
- X modificado con la distorsión aplicada.
|
|
50
|
+
"""
|
|
51
|
+
factor = np.random.normal(1, sigma, X.shape) # Genera un factor de escala aleatorio para cada elemento
|
|
52
|
+
return X * factor
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def shift(X, shift_max=2):
|
|
56
|
+
"""
|
|
57
|
+
Aplica un desplazamiento aleatorio a un vector 1D.
|
|
58
|
+
|
|
59
|
+
Parámetros:
|
|
60
|
+
- X: np.array de 1D (shape (n,))
|
|
61
|
+
- shift_max: Máximo número de posiciones a desplazar (positivo o negativo).
|
|
62
|
+
|
|
63
|
+
Retorna:
|
|
64
|
+
- np.array con los valores desplazados aleatoriamente.
|
|
65
|
+
"""
|
|
66
|
+
shift = np.random.randint(-shift_max, shift_max + 1) # Generar shift aleatorio
|
|
67
|
+
return np.roll(X, shift) # Aplicar desplazamiento
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def time_warp(X, sigma=0.2):
|
|
71
|
+
"""
|
|
72
|
+
Aplica un time warping sobre un vector 1D, distorsionando su temporalidad.
|
|
73
|
+
|
|
74
|
+
Parámetros:
|
|
75
|
+
- X: np.array de 1D (shape (n,))
|
|
76
|
+
- sigma: Desviación estándar del ruido gaussiano aplicado a las distorsiones.
|
|
77
|
+
|
|
78
|
+
Retorna:
|
|
79
|
+
- np.array con la serie temporal distorsionada.
|
|
80
|
+
"""
|
|
81
|
+
n = len(X)
|
|
82
|
+
# Creamos un desplazamiento para cada índice, que sigue una distribución normal.
|
|
83
|
+
time_warp = np.cumsum(np.random.normal(1, sigma, n)) # Cumsum para obtener una curva suave
|
|
84
|
+
|
|
85
|
+
# Normalizamos para que el tiempo total no cambie (para que no se expanda ni se contraiga el vector)
|
|
86
|
+
time_warp -= time_warp[0]
|
|
87
|
+
time_warp /= time_warp[-1]
|
|
88
|
+
|
|
89
|
+
# Interpolamos el vector original según la distRorsión
|
|
90
|
+
new_indices = np.interp(np.linspace(0, 1, n), time_warp, np.linspace(0, 1, n))
|
|
91
|
+
X_new = np.interp(new_indices, np.linspace(0, 1, n), X)
|
|
92
|
+
|
|
93
|
+
return X_new
|
|
94
|
+
|
|
95
|
+
def config_participants(config_path, metadata_keys_train, metadata_keys_validation, metadata_keys_test):
|
|
96
|
+
with open(config_path, "r") as f:
|
|
97
|
+
lines = f.readlines()
|
|
98
|
+
|
|
99
|
+
# Replace content from line 5 onward (i.e. index 4)
|
|
100
|
+
new_lines = lines[:5] # Keep first 4 lines (up to line 4)
|
|
101
|
+
new_lines += [
|
|
102
|
+
"\nTraining participants: " + ",".join(metadata_keys_train)+"\n\n",
|
|
103
|
+
"Validation participants: " + ",".join(metadata_keys_validation)+"\n\n",
|
|
104
|
+
"Testing participants: " + ",".join(metadata_keys_test)+"\n\n"
|
|
105
|
+
]
|
|
106
|
+
|
|
107
|
+
with open(config_path, "w") as f:
|
|
108
|
+
f.writelines(new_lines)
|
|
109
|
+
|
|
110
|
+
def aggregate_superclasses(etiquetas_output):
|
|
111
|
+
etiquetas_superclase_1 = ['CAMINAR CON LA COMPRA', 'CAMINAR CON MÓVIL O LIBRO', 'CAMINAR USUAL SPEED', 'CAMINAR ZIGZAG']
|
|
112
|
+
etiquetas_superclase_2 = ['DE PIE BARRIENDO', 'DE PIE DOBLANDO TOALLAS', 'DE PIE MOVIENDO LIBROS', 'DE PIE USANDO PC', 'YOGA', 'SUBIR Y BAJAR ESCALERAS']
|
|
113
|
+
etiquetas_superclase_3 = ['FASE REPOSO CON K5', 'SENTADO LEYENDO', 'SENTADO USANDO PC', 'SENTADO VIENDO LA TV']
|
|
114
|
+
etiquetas_superclase_4 = ['TAPIZ RODANTE', 'TROTAR', 'INCREMENTAL CICLOERGOMETRO']
|
|
115
|
+
|
|
116
|
+
for i in range(len(etiquetas_output)):
|
|
117
|
+
if etiquetas_output[i] in etiquetas_superclase_1:
|
|
118
|
+
etiquetas_output[i] = 'CAMINAR'
|
|
119
|
+
elif etiquetas_output[i] in etiquetas_superclase_2:
|
|
120
|
+
etiquetas_output[i] = 'DE PIE + ACTIVIDAD'
|
|
121
|
+
elif etiquetas_output[i] in etiquetas_superclase_3:
|
|
122
|
+
etiquetas_output[i] = 'SENTADO/REPOSO'
|
|
123
|
+
elif etiquetas_output[i] in etiquetas_superclase_4:
|
|
124
|
+
etiquetas_output[i] = 'CORRER/PEDALEAR'
|
|
125
|
+
|
|
126
|
+
return etiquetas_output
|
|
127
|
+
|
|
128
|
+
def aggregate_superclasses_CPA_METs(etiquetas_output):
|
|
129
|
+
etiquetas_superclase_1 = ['FASE REPOSO CON K5', 'SENTADO LEYENDO', 'SENTADO USANDO PC', 'SENTADO VIENDO LA TV']
|
|
130
|
+
etiquetas_superclase_2 = ['YOGA', 'DE PIE DOBLANDO TOALLAS', 'DE PIE USANDO PC', 'CAMINAR CON MÓVIL O LIBRO', 'CAMINAR ZIGZAG']
|
|
131
|
+
etiquetas_superclase_3 = ['DE PIE BARRIENDO', 'DE PIE MOVIENDO LIBROS', 'CAMINAR CON LA COMPRA', 'CAMINAR USUAL SPEED', 'SUBIR Y BAJAR ESCALERAS']
|
|
132
|
+
etiquetas_superclase_4 = ['INCREMENTAL CICLOERGOMETRO', 'TAPIZ RODANTE', 'TROTAR']
|
|
133
|
+
|
|
134
|
+
for i in range(len(etiquetas_output)):
|
|
135
|
+
if etiquetas_output[i] in etiquetas_superclase_1:
|
|
136
|
+
etiquetas_output[i] = 'SEDENTARY'
|
|
137
|
+
elif etiquetas_output[i] in etiquetas_superclase_2:
|
|
138
|
+
etiquetas_output[i] = 'LIGHT-INTENSITY'
|
|
139
|
+
elif etiquetas_output[i] in etiquetas_superclase_3:
|
|
140
|
+
etiquetas_output[i] = 'MODERATE-INTENSITY'
|
|
141
|
+
elif etiquetas_output[i] in etiquetas_superclase_4:
|
|
142
|
+
etiquetas_output[i] = 'VIGOROUS-INTENSITY'
|
|
143
|
+
|
|
144
|
+
return etiquetas_output
|
|
145
|
+
|
|
146
|
+
def rebalanced(data, labels, metadata):
|
|
147
|
+
# flat three datasets in one dictionary
|
|
148
|
+
grouped = defaultdict(lambda: defaultdict(list))
|
|
149
|
+
|
|
150
|
+
for xi, yi, mi in zip(data, labels, metadata):
|
|
151
|
+
grouped[mi][yi].append(xi)
|
|
152
|
+
|
|
153
|
+
participants = {mi: dict(classes) for mi, classes in grouped.items()}
|
|
154
|
+
|
|
155
|
+
# rebalanced
|
|
156
|
+
for participant_key in participants:
|
|
157
|
+
for activity_key in participants[str(participant_key)]:
|
|
158
|
+
try:
|
|
159
|
+
random_windows = random.sample(participants[str(participant_key)][str(activity_key)], _DEF_WINDOWS_REBALANCED_MEAN)
|
|
160
|
+
participants[str(participant_key)][str(activity_key)] = random_windows
|
|
161
|
+
except:
|
|
162
|
+
print("This activity can't be balanced (in a downsampling way)")
|
|
163
|
+
|
|
164
|
+
# return to three datasets from dictionary
|
|
165
|
+
data_reconstructed = []
|
|
166
|
+
labels_reconstructed = []
|
|
167
|
+
metadata_reconstructed = []
|
|
168
|
+
|
|
169
|
+
for metadata, class_participant in participants.items():
|
|
170
|
+
for label, windows in class_participant.items():
|
|
171
|
+
for window in windows:
|
|
172
|
+
data_reconstructed.append(window)
|
|
173
|
+
labels_reconstructed.append(label)
|
|
174
|
+
metadata_reconstructed.append(metadata)
|
|
175
|
+
|
|
176
|
+
data_reconstructed_stack=np.stack(data_reconstructed, axis=0)
|
|
177
|
+
|
|
178
|
+
return data_reconstructed_stack, labels_reconstructed, metadata_reconstructed
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
class DataReader(object):
|
|
182
|
+
def __init__(self, modelID, create_superclasses, create_superclasses_CPA_METs, p_train, p_validation, file_path, label_encoder_path, config_path=None, add_sintetic_data=False):
|
|
183
|
+
self.p_train = p_train / 100
|
|
184
|
+
|
|
185
|
+
if (p_validation is not None):
|
|
186
|
+
self.p_validation = p_validation / 100
|
|
187
|
+
self.p_test = 1 - (self.p_train + self.p_validation )
|
|
188
|
+
else:
|
|
189
|
+
self.p_test = 1 - ( self.p_train )
|
|
190
|
+
|
|
191
|
+
stack_de_datos_y_etiquetas_PMP_tot = np.load(file_path)
|
|
192
|
+
datos_input = stack_de_datos_y_etiquetas_PMP_tot[WINDOW_CONCATENATED_DATA]
|
|
193
|
+
etiquetas_output = stack_de_datos_y_etiquetas_PMP_tot[WINDOW_ALL_LABELS]
|
|
194
|
+
metadata_output = stack_de_datos_y_etiquetas_PMP_tot[WINDOW_ALL_METADATA]
|
|
195
|
+
|
|
196
|
+
# X data
|
|
197
|
+
# X = datos_input
|
|
198
|
+
|
|
199
|
+
# Creation of Activity Superclasses
|
|
200
|
+
if create_superclasses == True:
|
|
201
|
+
etiquetas_output = aggregate_superclasses(etiquetas_output)
|
|
202
|
+
datos_input, etiquetas_output, metadata_output = rebalanced(datos_input, etiquetas_output, metadata_output)
|
|
203
|
+
|
|
204
|
+
if create_superclasses_CPA_METs == True:
|
|
205
|
+
etiquetas_output = aggregate_superclasses_CPA_METs(etiquetas_output)
|
|
206
|
+
datos_input, etiquetas_output, metadata_output = rebalanced(datos_input, etiquetas_output, metadata_output)
|
|
207
|
+
|
|
208
|
+
# y data
|
|
209
|
+
# Codificación numérica de las etiquetas para cada muestra de datos
|
|
210
|
+
# Crear el codificador de etiquetas
|
|
211
|
+
label_encoder = LabelEncoder()
|
|
212
|
+
y_encoded = label_encoder.fit_transform(etiquetas_output)
|
|
213
|
+
|
|
214
|
+
# Split train and test datasets
|
|
215
|
+
grouped = defaultdict(list)
|
|
216
|
+
for s in metadata_output:
|
|
217
|
+
grouped[s].append(s)
|
|
218
|
+
metadata_grouped = dict(grouped)
|
|
219
|
+
|
|
220
|
+
metadata_keys = list(metadata_grouped.keys())
|
|
221
|
+
metadata_keys_len = len(metadata_keys)
|
|
222
|
+
|
|
223
|
+
number_of_keys_train = round(metadata_keys_len * self.p_train)
|
|
224
|
+
metadata_keys_train = metadata_keys[0:number_of_keys_train]
|
|
225
|
+
|
|
226
|
+
number_of_keys_validation = round(metadata_keys_len * self.p_validation)
|
|
227
|
+
metadata_keys_validation = metadata_keys[number_of_keys_train:(number_of_keys_train+number_of_keys_validation)]
|
|
228
|
+
|
|
229
|
+
number_of_keys_test = round(metadata_keys_len * self.p_test)
|
|
230
|
+
metadata_keys_test = metadata_keys[(number_of_keys_train+number_of_keys_validation):(number_of_keys_train+number_of_keys_validation+number_of_keys_test)]
|
|
231
|
+
|
|
232
|
+
if modelID == ML_Model.RANDOM_FOREST.value or modelID == ML_Model.XGBOOST.value:
|
|
233
|
+
X_train = np.empty((0, datos_input.shape[1])) # Inicializar vacío con n columnas
|
|
234
|
+
X_validation = np.empty((0, datos_input.shape[1])) # Inicializar vacío con n columnas
|
|
235
|
+
X_test = np.empty((0, datos_input.shape[1]))
|
|
236
|
+
elif modelID == ML_Model.ESANN.value or modelID == ML_Model.CAPTURE24.value:
|
|
237
|
+
X_train_list = []
|
|
238
|
+
X_validation_list = []
|
|
239
|
+
X_test_list = []
|
|
240
|
+
|
|
241
|
+
y_train = np.empty((0, 1))
|
|
242
|
+
y_validation = np.empty((0, 1))
|
|
243
|
+
y_test = np.empty((0, 1))
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
# Save training, validation and test participants in the config file only in training step
|
|
247
|
+
if (config_path is not None):
|
|
248
|
+
config_participants(config_path, metadata_keys_train, metadata_keys_validation, metadata_keys_test)
|
|
249
|
+
|
|
250
|
+
# Split train, validation and test datasets by participant
|
|
251
|
+
for i in range(datos_input.shape[0]):
|
|
252
|
+
participant_id_i = metadata_output[i]
|
|
253
|
+
if participant_id_i in metadata_keys_train:
|
|
254
|
+
if modelID == ML_Model.RANDOM_FOREST.value or modelID == ML_Model.XGBOOST.value:
|
|
255
|
+
fila_data = datos_input[i, :].reshape(1, -1) # Asegura forma (1, n)
|
|
256
|
+
X_train = np.vstack([X_train, fila_data])
|
|
257
|
+
elif modelID == ML_Model.ESANN.value or modelID == ML_Model.CAPTURE24.value:
|
|
258
|
+
window_data = datos_input[i, :, :]
|
|
259
|
+
X_train_list.append(window_data)
|
|
260
|
+
|
|
261
|
+
label_i = y_encoded[i]
|
|
262
|
+
label_i = np.array([[label_i]])
|
|
263
|
+
y_train = np.vstack([y_train, label_i])
|
|
264
|
+
|
|
265
|
+
if participant_id_i in metadata_keys_validation:
|
|
266
|
+
if modelID == ML_Model.RANDOM_FOREST.value or modelID == ML_Model.XGBOOST.value:
|
|
267
|
+
fila_data = datos_input[i, :].reshape(1, -1) # Asegura forma (1, n)
|
|
268
|
+
X_validation = np.vstack([X_validation, fila_data])
|
|
269
|
+
elif modelID == ML_Model.ESANN.value or modelID == ML_Model.CAPTURE24.value:
|
|
270
|
+
window_data = datos_input[i, :, :]
|
|
271
|
+
X_validation_list.append(window_data)
|
|
272
|
+
|
|
273
|
+
label_i = y_encoded[i]
|
|
274
|
+
label_i = np.array([[label_i]])
|
|
275
|
+
y_validation = np.vstack([y_validation, label_i])
|
|
276
|
+
|
|
277
|
+
if participant_id_i in metadata_keys_test:
|
|
278
|
+
if modelID == ML_Model.RANDOM_FOREST.value or modelID == ML_Model.XGBOOST.value:
|
|
279
|
+
fila_data = datos_input[i, :].reshape(1, -1) # Asegura forma (1, n)
|
|
280
|
+
X_test = np.vstack([X_test, fila_data])
|
|
281
|
+
elif modelID == ML_Model.ESANN.value or modelID == ML_Model.CAPTURE24.value:
|
|
282
|
+
window_data = datos_input[i, :, :]
|
|
283
|
+
X_test_list.append(window_data)
|
|
284
|
+
|
|
285
|
+
label_i = y_encoded[i]
|
|
286
|
+
label_i = np.array([[label_i]])
|
|
287
|
+
y_test = np.vstack([y_test, label_i])
|
|
288
|
+
|
|
289
|
+
try:
|
|
290
|
+
if X_train_list:
|
|
291
|
+
X_train = np.stack(X_train_list)
|
|
292
|
+
if X_validation_list:
|
|
293
|
+
X_validation = np.stack(X_validation_list)
|
|
294
|
+
if X_test_list:
|
|
295
|
+
X_test = np.stack(X_test_list)
|
|
296
|
+
except:
|
|
297
|
+
print("Training a non-convolutional model.")
|
|
298
|
+
|
|
299
|
+
# --------------------------------------------------------------------------------------------------
|
|
300
|
+
# Realizamos el aumento de datos en el conjunto de entrenamiento. En el conjunto de test mantenemos
|
|
301
|
+
# los datos origifile_pathnales:
|
|
302
|
+
num_filas = X_train.shape[0]
|
|
303
|
+
num_columnas = X_train.shape[1]
|
|
304
|
+
|
|
305
|
+
if ((modelID == ML_Model.ESANN or modelID == ML_Model.CAPTURE24) and add_sintetic_data == True):
|
|
306
|
+
profundidad = X_train.shape[2]
|
|
307
|
+
|
|
308
|
+
# 1.- Jittering
|
|
309
|
+
# ---------------------------
|
|
310
|
+
# Generar nuevas series con jitter (una por cada serie original)
|
|
311
|
+
datos_aumentados_jittering = np.zeros((num_filas, num_columnas, profundidad))
|
|
312
|
+
etiquetas_aumentadas_jittering = np.zeros((num_filas,))
|
|
313
|
+
|
|
314
|
+
for i in range(num_filas):
|
|
315
|
+
for j in range(num_columnas):
|
|
316
|
+
# Extraemos la serie temporal de longitud 250
|
|
317
|
+
serie = X_train[i, j, :]
|
|
318
|
+
nueva_serie = jitter(serie, 0.01) # Añadir ruido gaussiano a la serie temporal
|
|
319
|
+
datos_aumentados_jittering[i,j,:] = nueva_serie
|
|
320
|
+
etiquetas_aumentadas_jittering[i] = y_train[i] # Mantener la misma etiqueta
|
|
321
|
+
|
|
322
|
+
# X_train = np.concatenate((X_train, datos_aumentados_jittering), axis=0) # X_train original + X_train aumentado
|
|
323
|
+
# y_train = np.concatenate((y_train, etiquetas_aumentadas_jittering), axis=0) # y_train original + y_train aumentado
|
|
324
|
+
|
|
325
|
+
|
|
326
|
+
# 2.- Magnitude Warping
|
|
327
|
+
# ---------------------------
|
|
328
|
+
# Generar nuevas series con Magnitude Warping (una por cada serie original)
|
|
329
|
+
datos_aumentados_magnitude_warping = np.zeros((num_filas, num_columnas, profundidad))
|
|
330
|
+
etiquetas_aumentadas_magnitude_warping = np.zeros((num_filas,))
|
|
331
|
+
for i in range(num_filas):
|
|
332
|
+
for j in range(num_columnas):
|
|
333
|
+
# Extraemos la serie temporal de longitud 250
|
|
334
|
+
serie = X_train[i, j, :]
|
|
335
|
+
nueva_serie = magnitude_warp(serie, 0.03)
|
|
336
|
+
datos_aumentados_magnitude_warping[i,j,:] = nueva_serie
|
|
337
|
+
etiquetas_aumentadas_magnitude_warping[i] = y_train[i] # Mantener la misma etiqueta
|
|
338
|
+
|
|
339
|
+
# X_train = np.concatenate((X_train, datos_aumentados_jittering, datos_aumentados_magnitude_warping), axis=0) # X_train original + X_train aumentado
|
|
340
|
+
# y_train = np.concatenate((y_train, etiquetas_aumentadas_jittering, etiquetas_aumentadas_magnitude_warping), axis=0) # y_train original + y_train aumentado
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
# 3.- Shifting
|
|
344
|
+
# ---------------------------
|
|
345
|
+
# Generar nuevas series con Shifting (una por cada serie original)
|
|
346
|
+
datos_aumentados_shifting = np.zeros((num_filas, num_columnas, profundidad))
|
|
347
|
+
etiquetas_aumentadas_shifting = np.zeros((num_filas,))
|
|
348
|
+
for i in range(num_filas):
|
|
349
|
+
for j in range(num_columnas):
|
|
350
|
+
# Extraemos la serie temporal de longitud 250
|
|
351
|
+
serie = X_train[i, j, :]
|
|
352
|
+
nueva_serie = shift(serie, 0.03)
|
|
353
|
+
datos_aumentados_shifting[i,j,:] = nueva_serie
|
|
354
|
+
etiquetas_aumentadas_shifting[i] = y_train[i] # Mantener la misma etiqueta
|
|
355
|
+
|
|
356
|
+
# X_train = np.concatenate((X_train, datos_aumentados_jittering, datos_aumentados_magnitude_warping, datos_aumentados_shifting), axis=0) # X_train original + X_train aumentado
|
|
357
|
+
# y_train = np.concatenate((y_train, etiquetas_aumentadas_jittering, etiquetas_aumentadas_magnitude_warping, etiquetas_aumentadas_shifting), axis=0) # y_train original + y_train aumentado
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
# 4.- Time Warping
|
|
361
|
+
# ---------------------------
|
|
362
|
+
# Generar nuevas series con Time Warping (una por cada serie original)
|
|
363
|
+
datos_aumentados_time_warping = np.zeros((num_filas, num_columnas, profundidad))
|
|
364
|
+
etiquetas_aumentadas_time_warping = np.zeros((num_filas,))
|
|
365
|
+
for i in range(num_filas):
|
|
366
|
+
for j in range(num_columnas):
|
|
367
|
+
# Extraemos la serie temporal de longitud 250
|
|
368
|
+
serie = X_train[i, j, :]
|
|
369
|
+
nueva_serie = shift(serie, 0.03)
|
|
370
|
+
datos_aumentados_time_warping[i,j,:] = nueva_serie
|
|
371
|
+
etiquetas_aumentadas_time_warping[i] = y_train[i] # Mantener la misma etiqueta
|
|
372
|
+
|
|
373
|
+
X_train = np.concatenate((X_train, datos_aumentados_jittering, datos_aumentados_magnitude_warping, datos_aumentados_shifting, datos_aumentados_time_warping), axis=0) # X_train original + X_train aumentado
|
|
374
|
+
y_train = np.concatenate((y_train, etiquetas_aumentadas_jittering, etiquetas_aumentadas_magnitude_warping, etiquetas_aumentadas_shifting, etiquetas_aumentadas_time_warping), axis=0) # y_train original + y_train aumentado
|
|
375
|
+
|
|
376
|
+
self.X_train = X_train
|
|
377
|
+
self.y_train = y_train
|
|
378
|
+
try:
|
|
379
|
+
self.X_validation = X_validation
|
|
380
|
+
self.y_validation = y_validation
|
|
381
|
+
self.X_test = X_test
|
|
382
|
+
self.y_test = y_test
|
|
383
|
+
except:
|
|
384
|
+
print("Not enough data for validation and/or test.")
|
|
385
|
+
|
|
386
|
+
# Guardar el LabelEncoder después de ajustarlo
|
|
387
|
+
joblib.dump(label_encoder, label_encoder_path)
|
|
388
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .DataReader import DataReader
|