uniovi-simur-wearablepermed-ml 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of uniovi-simur-wearablepermed-ml might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: uniovi-simur-wearablepermed-ml
3
- Version: 1.1.0
3
+ Version: 1.2.0
4
4
  Summary: Uniovi Simur WearablePerMed Machine Learning.
5
5
  Home-page: https://github.com/Simur-project/uniovi-simur-wearablepermed-ml.git
6
6
  Author: Miguel Salinas Gancedo
@@ -0,0 +1,19 @@
1
+ uniovi_simur_wearablepermed_ml-1.2.0.dist-info/licenses/LICENSE.txt,sha256=MJSf2LY2uh50n0Y7vPzbMcIfTXiY_IvEp7dQMhSHBWo,1089
2
+ wearablepermed_ml/__init__.py,sha256=Jl6IIVd6LQLVAQG-uKC7nnY2204vk_YrBMz3bC4JxvU,601
3
+ wearablepermed_ml/run_trainer_and_tester_30_times.py,sha256=rYa9vyOF-cqjs61_FqqszimBVwU8WDg-ydTHpAponFQ,8788
4
+ wearablepermed_ml/tester.py,sha256=rh_ku_upI9jtHjr9GtEpj-q5G6fYN-CFv7MloYqcLUY,4364
5
+ wearablepermed_ml/trainer.py,sha256=CUzRPCK6NEujS6mFLKspY6HA7I-V0DXVtRucFrO4V1Q,43745
6
+ wearablepermed_ml/basic_functions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ wearablepermed_ml/basic_functions/address.py,sha256=VYAvZLqyd4DvX2bYUJI5fnepKPMaI4CY7uiaeCsNauM,664
8
+ wearablepermed_ml/data/DataReader.py,sha256=Bd1AqSytx0AXx1S_C9osk9I205vXdoNYP6IZctDSOmw,18476
9
+ wearablepermed_ml/data/__init__.py,sha256=ce3QTcR18knSsPJguwJkVqHJnTnxYv4WWBKngmTL_BI,58
10
+ wearablepermed_ml/models/SiMuR_Model.py,sha256=WXAkvFhOhLkzgCjiwkmT4X9693-zlolnhsBepNCXarc,30346
11
+ wearablepermed_ml/models/__init__.py,sha256=HkTS7w1Mpj0TP7ACtgcKv2gZEVYDNqAectiwGBI_Mmo,108
12
+ wearablepermed_ml/models/model_generator.py,sha256=8RX5HZWcz1aZV5hl5RNRchihrlNYmXV8LgAD2UrPsxw,1906
13
+ wearablepermed_ml/testing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
+ wearablepermed_ml/testing/testing.py,sha256=93ki6WocLcY5MpKHOscYOqm0J1A3FGXjEKCSDwOmr00,10068
15
+ uniovi_simur_wearablepermed_ml-1.2.0.dist-info/METADATA,sha256=HJS303mn7jlsDyuaI_pK7N9HVOkpS2r3Q8Jdw8yCgoo,15626
16
+ uniovi_simur_wearablepermed_ml-1.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
17
+ uniovi_simur_wearablepermed_ml-1.2.0.dist-info/entry_points.txt,sha256=BW3Dw-fW_hquNHbK1q7pUNgIGwRDMPHqs9znQE6SECM,96
18
+ uniovi_simur_wearablepermed_ml-1.2.0.dist-info/top_level.txt,sha256=PK7Cm_vvudFpRS-vGYiQDsZLkFZ6mdaocJqSPXHMP4c,18
19
+ uniovi_simur_wearablepermed_ml-1.2.0.dist-info/RECORD,,
@@ -1 +1,2 @@
1
+ # export python modules
1
2
  from .DataReader import DataReader
@@ -1,7 +1,7 @@
1
- from models import *
2
- from basic_functions.address import *
1
+ from wearablepermed_ml.models import *
2
+ from wearablepermed_ml.basic_functions.address import *
3
3
  import pandas as pd
4
- from data import DataReader
4
+ from wearablepermed_ml.data import DataReader
5
5
  import ast
6
6
 
7
7
  # Model factory pattern
@@ -8,10 +8,10 @@ N_RUNS = 30 # Número de ejecuc
8
8
  # Ruta Windows
9
9
  # case_id_folder = "D:\\DATA_PMP_File_Server\\output" # Carpeta base de los datos
10
10
  # Ruta Linux
11
- # case_id_folder = "/mnt/nvme1n2/git/uniovi-simur-wearablepermed-data/output"
12
- case_id_folder = "/mnt/simur-fileserver/data/wearablepermed/output"
11
+ case_id_folder = "/mnt/nvme1n2/git/uniovi-simur-wearablepermed-data/output"
12
+ # case_id_folder = "/mnt/simur-fileserver/data/wearablepermed/output"
13
13
 
14
- case_id = "cases_dataset_PI_M/case_PI_M_BRF_superclasses" # Identificador del caso
14
+ case_id = "cases_dataset_PI_M/case_PI_M_BRF_superclasses_activity_intensity/" # Identificador del caso
15
15
 
16
16
  # Argumentos para el script de entrenamiento
17
17
  train_args = [
@@ -24,7 +24,8 @@ train_args = [
24
24
  "--ml-models", "RandomForest", # Modelo ML a usar
25
25
  "--training-percent", "70", # Porcentaje de datos para entrenamiento
26
26
  # "--validation-percent", "20", # Porcentaje de datos para validación
27
- "--create-superclasses" # Flag opcional para crear superclases
27
+ # "--create-superclasses" # Flag opcional para crear superclases
28
+ "--create-superclasses-CPA-METs"
28
29
  ]
29
30
 
30
31
  # Argumentos para el script de test
@@ -38,7 +39,8 @@ test_args = [
38
39
  "--model-id", "RandomForest", # Modelo ML usado para test
39
40
  "--training-percent", "70", # Porcentaje usado en entrenamiento
40
41
  # "--validation-percent", "20", # Porcentaje de datos para validaciones
41
- "--create-superclasses" # Flag opcional
42
+ # "--create-superclasses" # Flag opcional
43
+ "--create-superclasses-CPA-METs"
42
44
  ]
43
45
 
44
46
  # Ruta del ejecutable de Python del entorno virtual (Windows)
@@ -4,7 +4,7 @@ import sys
4
4
  import argparse
5
5
  import logging
6
6
 
7
- from testing import testing
7
+ from wearablepermed_ml.testing import testing
8
8
 
9
9
  __author__ = "Miguel Salinas <uo34525@uniovi.es>, Alejandro <uo265351@uniovi.es>"
10
10
  __copyright__ = "Uniovi"
@@ -1,8 +1,8 @@
1
1
  from enum import Enum
2
2
  import json
3
- from data import DataReader
4
- from models.model_generator import modelGenerator
5
- from basic_functions.address import *
3
+ from wearablepermed_ml.data import DataReader
4
+ from wearablepermed_ml.models.model_generator import modelGenerator
5
+ from wearablepermed_ml.basic_functions.address import *
6
6
  import numpy as np
7
7
  import matplotlib.pyplot as plt
8
8
  import seaborn as sns
@@ -104,18 +104,22 @@ def tester(case_id_folder, model_id, create_superclasses, create_superclasses_CP
104
104
 
105
105
  # testing the model
106
106
  y_predicted_train = model.predict(model.X_train)
107
- y_predicted_validation = model.predict(model.X_validation)
108
107
  y_predicted_test = model.predict(model.X_test)
109
108
 
110
109
  # get the class with the highest probability
111
110
  if (model_id == ML_Model.ESANN.value or model_id == ML_Model.CAPTURE24.value):
111
+ y_predicted_validation = model.predict(model.X_validation)
112
112
  y_final_prediction_train = np.argmax(y_predicted_train, axis=1)
113
113
  y_final_prediction_validation = np.argmax(y_predicted_validation, axis=1)
114
114
  y_final_prediction_test = np.argmax(y_predicted_test, axis=1) # Trabajamos con clasificación multicategoría, no necesario para los bosques aleatorios
115
+
116
+ acc_score_validation = accuracy_score(model.y_validation, y_final_prediction_validation)
117
+ print("Global accuracy score (validation) = "+str(round(acc_score_validation*100,2))+" [%]")
118
+ F1_score_validation = f1_score(model.y_validation, y_final_prediction_validation, average='macro') # revisar las opciones de average
119
+ print("Global F1 score (validation) = "+str(round(F1_score_validation*100,2))+" [%]")
115
120
 
116
- else:
121
+ else: # random forest, xgboost
117
122
  y_final_prediction_train = y_predicted_train
118
- y_final_prediction_validation = y_predicted_validation
119
123
  y_final_prediction_test = y_predicted_test # esta línea solo es necesaria para los bosques aleatorios y XGBoost
120
124
 
121
125
 
@@ -134,7 +138,7 @@ def tester(case_id_folder, model_id, create_superclasses, create_superclasses_CP
134
138
  cm = confusion_matrix(model.y_test, y_final_prediction_test, labels=all_classes)
135
139
 
136
140
  # Graficar la matriz de confusión
137
- confusion_matrix_test_path = os.path.join(case_id_folder, "confusion_matrix_test_"+run_index+".png")
141
+ confusion_matrix_test_path = os.path.join(case_id_folder, "confusion_matrix_test_"+str(run_index)+".png")
138
142
 
139
143
  plt.figure(figsize=(10,7))
140
144
  sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=class_names_total, yticklabels=class_names_total)
@@ -149,10 +153,7 @@ def tester(case_id_folder, model_id, create_superclasses, create_superclasses_CP
149
153
 
150
154
  # Accuracy
151
155
  acc_score_train = accuracy_score(model.y_train, y_final_prediction_train)
152
- print("Global accuracy score (train) = "+str(round(acc_score_train*100,2))+" [%]")
153
-
154
- acc_score_validation = accuracy_score(model.y_validation, y_final_prediction_validation)
155
- print("Global accuracy score (validation) = "+str(round(acc_score_validation*100,2))+" [%]")
156
+ print("Global accuracy score (train) = "+str(round(acc_score_train*100,2))+" [%]")
156
157
 
157
158
  acc_score_test = accuracy_score(model.y_test, y_final_prediction_test)
158
159
  print("Global accuracy score (test) = "+str(round(acc_score_test*100,2))+" [%]")
@@ -161,9 +162,6 @@ def tester(case_id_folder, model_id, create_superclasses, create_superclasses_CP
161
162
  F1_score_train = f1_score(model.y_train, y_final_prediction_train, average='macro') # revisar las opciones de average
162
163
  print("Global F1 score (train) = "+str(round(F1_score_train*100,2))+" [%]")
163
164
 
164
- F1_score_validation = f1_score(model.y_validation, y_final_prediction_validation, average='macro') # revisar las opciones de average
165
- print("Global F1 score (validation) = "+str(round(F1_score_validation*100,2))+" [%]")
166
-
167
165
  F1_score_test = f1_score(model.y_test, y_final_prediction_test, average='macro') # revisar las opciones de average
168
166
  print("Global F1 score (test) = "+str(round(F1_score_test*100,2))+" [%]")
169
167
 
@@ -172,12 +170,13 @@ def tester(case_id_folder, model_id, create_superclasses, create_superclasses_CP
172
170
  # print("Global recall score = "+str(round(recall_score_global*100,2))+" [%]")
173
171
 
174
172
  # Save to a file
175
- clasification_global_report_path = os.path.join(case_id_folder, "clasification_global_report_"+run_index+".txt")
173
+ clasification_global_report_path = os.path.join(case_id_folder, "clasification_global_report_"+str(run_index)+".txt")
176
174
  with open(clasification_global_report_path, "w") as f:
177
175
  f.write(f"Global F1 Score (train): {F1_score_train:.4f}\n")
178
176
  f.write(f"Global accuracy score (train): {acc_score_train:.4f}\n")
179
- f.write(f"Global F1 Score (validation): {F1_score_validation:.4f}\n")
180
- f.write(f"Global accuracy score (validation): {acc_score_validation:.4f}\n")
177
+ if model_id in [ML_Model.ESANN, ML_Model.CAPTURE24, ML_Model.XGBOOST]:
178
+ f.write(f"Global F1 Score (validation): {F1_score_validation:.4f}\n")
179
+ f.write(f"Global accuracy score (validation): {acc_score_validation:.4f}\n")
181
180
  f.write(f"Global F1 Score (test): {F1_score_test:.4f}\n")
182
181
  f.write(f"Global accuracy score (test): {acc_score_test:.4f}\n")
183
182
  # f.write(f"Global recall score: {recall_score_global:.4f}\n")
@@ -198,6 +197,6 @@ def tester(case_id_folder, model_id, create_superclasses, create_superclasses_CP
198
197
  print(classification_per_class_report)
199
198
 
200
199
  # Save per-class report to a file
201
- clasification_per_class_report_path = os.path.join(case_id_folder, "clasification_per_class_report_"+run_index+".txt")
200
+ clasification_per_class_report_path = os.path.join(case_id_folder, "clasification_per_class_report_"+str(run_index)+".txt")
202
201
  with open(clasification_per_class_report_path, "w") as f:
203
202
  f.write(classification_per_class_report)
@@ -5,9 +5,9 @@ import logging
5
5
  from enum import Enum
6
6
 
7
7
  import numpy as np
8
- from data import DataReader
9
- from models.model_generator import modelGenerator
10
- from basic_functions.address import *
8
+ from wearablepermed_ml.data import DataReader
9
+ from wearablepermed_ml.models.model_generator import modelGenerator
10
+ from wearablepermed_ml.basic_functions.address import *
11
11
 
12
12
  import tensorflow as tf
13
13
 
@@ -22,9 +22,11 @@ from ray.air import session
22
22
 
23
23
  from ray.tune.tuner import TuneConfig
24
24
 
25
- from models import SiMuRModel_ESANN, SiMuRModel_CAPTURE24, SiMuRModel_RandomForest, SiMuRModel_XGBoost
25
+ from wearablepermed_ml.models import SiMuRModel_ESANN, SiMuRModel_CAPTURE24, SiMuRModel_RandomForest, SiMuRModel_XGBoost
26
26
  from sklearn.metrics import accuracy_score
27
27
 
28
+ from sklearn.model_selection import cross_val_score
29
+
28
30
 
29
31
  # Configuration of GPU
30
32
  gpus = tf.config.list_physical_devices('GPU')
@@ -574,11 +576,11 @@ def main(args):
574
576
  # ------------------------------------------------------------------------------------------------------
575
577
  # Espacio de búsqueda
576
578
  search_space = {
577
- "n_estimators": tune.randint(50, 301), # Número de árboles entre 50 y 300
578
- "max_depth": tune.choice([5, 10, 15, 20, None]), # Profundidad máxima del árbol
579
- "min_samples_split": tune.randint(2, 11), # Muestras mínimas para dividir un nodo
580
- "min_samples_leaf": tune.randint(1, 11), # Muestras mínimas por hoja
581
- "max_features": tune.choice([None, "sqrt", "log2"]), # Número de características por división
579
+ "n_estimators": tune.randint(200, 301), # Número de árboles entre 50 y 300
580
+ "max_depth": tune.choice([5, 6, 8]), # Profundidad máxima del árbol
581
+ "min_samples_split": tune.randint(8, 15), # Muestras mínimas para dividir un nodo
582
+ "min_samples_leaf": tune.randint(6, 10), # Muestras mínimas por hoja
583
+ "max_features": tune.choice(["sqrt", "log2"]), # Número de características por división
582
584
  "random_state": tune.randint(0, 10000)
583
585
  }
584
586
 
@@ -694,15 +696,15 @@ def main(args):
694
696
  # ------------------------------------------------------------------------------------------------------
695
697
  # Espacio de búsqueda para XGBoost
696
698
  search_space_xgb = {
697
- "num_boost_round": tune.randint(50, 3001), # Árboles (rondas) de boosting
698
- "max_depth": tune.randint(3, 11), # Profundidad máxima
699
- "learning_rate": tune.uniform(0.01, 0.3), # Tasa de aprendizaje
700
- "subsample": tune.uniform(0.5, 1.0), # Fracción de muestras por árbol
701
- "colsample_bytree": tune.uniform(0.5, 1.0), # Fracción de columnas por árbol
702
- "gamma": tune.uniform(0, 5), # Regularización mínima de pérdida
703
- "min_child_weight": tune.randint(1, 10), # Peso mínimo de hijos
704
- "reg_alpha": tune.uniform(0, 1), # L1 regularization
705
- "reg_lambda": tune.uniform(0, 1), # L2 regularization
699
+ "num_boost_round": tune.randint(200, 1000), # Árboles (rondas) de boosting
700
+ "max_depth": tune.randint(2, 5), # Profundidad máxima
701
+ "learning_rate": tune.uniform(0.01, 0.07), # Tasa de aprendizaje
702
+ "subsample": tune.uniform(0.4, 0.8), # Fracción de muestras por árbol
703
+ "colsample_bytree": tune.uniform(0.4, 0.8), # Fracción de columnas por árbol
704
+ "gamma": tune.uniform(0, 10), # Regularización mínima de pérdida
705
+ "min_child_weight": tune.randint(5, 50), # Peso mínimo de hijos
706
+ "reg_alpha": tune.loguniform(0.1, 100), # L1 regularization
707
+ "reg_lambda": tune.loguniform(0.1, 100), # L2 regularization
706
708
  "random_state": tune.randint(0, 10000)
707
709
  }
708
710
 
@@ -1,19 +0,0 @@
1
- uniovi_simur_wearablepermed_ml-1.1.0.dist-info/licenses/LICENSE.txt,sha256=MJSf2LY2uh50n0Y7vPzbMcIfTXiY_IvEp7dQMhSHBWo,1089
2
- wearablepermed_ml/__init__.py,sha256=Jl6IIVd6LQLVAQG-uKC7nnY2204vk_YrBMz3bC4JxvU,601
3
- wearablepermed_ml/run_trainer_and_tester_30_times.py,sha256=6k7Sl6FAFhdYTQgM1MigUNXo5kAtYGAtZlTvXFd7oT8,8690
4
- wearablepermed_ml/tester.py,sha256=1Ac2l8MypZJTf1xl866G4dllH7r60iDWTBNMHieW40I,4346
5
- wearablepermed_ml/trainer.py,sha256=zoglrCPToTyQBEDPZ39rbmglnvjQcHghH2qX1lrEFRc,43621
6
- wearablepermed_ml/basic_functions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- wearablepermed_ml/basic_functions/address.py,sha256=VYAvZLqyd4DvX2bYUJI5fnepKPMaI4CY7uiaeCsNauM,664
8
- wearablepermed_ml/data/DataReader.py,sha256=Bd1AqSytx0AXx1S_C9osk9I205vXdoNYP6IZctDSOmw,18476
9
- wearablepermed_ml/data/__init__.py,sha256=V0piawbJbkBykxziMrDr0W8KloG6TUyWhj_QDhlZDqw,34
10
- wearablepermed_ml/models/SiMuR_Model.py,sha256=WXAkvFhOhLkzgCjiwkmT4X9693-zlolnhsBepNCXarc,30346
11
- wearablepermed_ml/models/__init__.py,sha256=HkTS7w1Mpj0TP7ACtgcKv2gZEVYDNqAectiwGBI_Mmo,108
12
- wearablepermed_ml/models/model_generator.py,sha256=r23ahAE-otf0OGt_keD77dIb2-x9KN2ZP_e9ZhGEukk,1852
13
- wearablepermed_ml/testing/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- wearablepermed_ml/testing/testing.py,sha256=Uiq8AolTgbR2wcV12pUygSv3T8Y38hdpVC2yKoz04Ks,9927
15
- uniovi_simur_wearablepermed_ml-1.1.0.dist-info/METADATA,sha256=-g_EosjeCgXKo6h132EKu177_cDmv7q1d-cnwvNJVS0,15626
16
- uniovi_simur_wearablepermed_ml-1.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
17
- uniovi_simur_wearablepermed_ml-1.1.0.dist-info/entry_points.txt,sha256=BW3Dw-fW_hquNHbK1q7pUNgIGwRDMPHqs9znQE6SECM,96
18
- uniovi_simur_wearablepermed_ml-1.1.0.dist-info/top_level.txt,sha256=PK7Cm_vvudFpRS-vGYiQDsZLkFZ6mdaocJqSPXHMP4c,18
19
- uniovi_simur_wearablepermed_ml-1.1.0.dist-info/RECORD,,