gpbench 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (188) hide show
  1. gp_agent_tool/compute_dataset_feature.py +67 -0
  2. gp_agent_tool/config.py +65 -0
  3. gp_agent_tool/experience/create_masked_dataset_summary.py +97 -0
  4. gp_agent_tool/experience/dataset_summary_info.py +13 -0
  5. gp_agent_tool/experience/experience_info.py +12 -0
  6. gp_agent_tool/experience/get_matched_experience.py +111 -0
  7. gp_agent_tool/llm_client.py +119 -0
  8. gp_agent_tool/logging_utils.py +24 -0
  9. gp_agent_tool/main.py +347 -0
  10. gp_agent_tool/read_agent/__init__.py +46 -0
  11. gp_agent_tool/read_agent/nodes.py +674 -0
  12. gp_agent_tool/read_agent/prompts.py +547 -0
  13. gp_agent_tool/read_agent/python_repl_tool.py +165 -0
  14. gp_agent_tool/read_agent/state.py +101 -0
  15. gp_agent_tool/read_agent/workflow.py +54 -0
  16. gpbench/__init__.py +25 -0
  17. gpbench/_selftest.py +104 -0
  18. gpbench/method_class/BayesA/BayesA_class.py +141 -0
  19. gpbench/method_class/BayesA/__init__.py +5 -0
  20. gpbench/method_class/BayesA/_bayesfromR.py +96 -0
  21. gpbench/method_class/BayesA/_param_free_base_model.py +84 -0
  22. gpbench/method_class/BayesA/bayesAfromR.py +16 -0
  23. gpbench/method_class/BayesB/BayesB_class.py +140 -0
  24. gpbench/method_class/BayesB/__init__.py +5 -0
  25. gpbench/method_class/BayesB/_bayesfromR.py +96 -0
  26. gpbench/method_class/BayesB/_param_free_base_model.py +84 -0
  27. gpbench/method_class/BayesB/bayesBfromR.py +16 -0
  28. gpbench/method_class/BayesC/BayesC_class.py +141 -0
  29. gpbench/method_class/BayesC/__init__.py +4 -0
  30. gpbench/method_class/BayesC/_bayesfromR.py +96 -0
  31. gpbench/method_class/BayesC/_param_free_base_model.py +84 -0
  32. gpbench/method_class/BayesC/bayesCfromR.py +16 -0
  33. gpbench/method_class/CropARNet/CropARNet_class.py +186 -0
  34. gpbench/method_class/CropARNet/CropARNet_he_class.py +154 -0
  35. gpbench/method_class/CropARNet/__init__.py +5 -0
  36. gpbench/method_class/CropARNet/base_CropARNet_class.py +178 -0
  37. gpbench/method_class/Cropformer/Cropformer_class.py +308 -0
  38. gpbench/method_class/Cropformer/__init__.py +5 -0
  39. gpbench/method_class/Cropformer/cropformer_he_class.py +221 -0
  40. gpbench/method_class/DL_GWAS/DL_GWAS_class.py +250 -0
  41. gpbench/method_class/DL_GWAS/DL_GWAS_he_class.py +169 -0
  42. gpbench/method_class/DL_GWAS/__init__.py +5 -0
  43. gpbench/method_class/DNNGP/DNNGP_class.py +163 -0
  44. gpbench/method_class/DNNGP/DNNGP_he_class.py +138 -0
  45. gpbench/method_class/DNNGP/__init__.py +5 -0
  46. gpbench/method_class/DNNGP/base_dnngp_class.py +116 -0
  47. gpbench/method_class/DeepCCR/DeepCCR_class.py +172 -0
  48. gpbench/method_class/DeepCCR/DeepCCR_he_class.py +161 -0
  49. gpbench/method_class/DeepCCR/__init__.py +5 -0
  50. gpbench/method_class/DeepCCR/base_DeepCCR_class.py +209 -0
  51. gpbench/method_class/DeepGS/DeepGS_class.py +184 -0
  52. gpbench/method_class/DeepGS/DeepGS_he_class.py +150 -0
  53. gpbench/method_class/DeepGS/__init__.py +5 -0
  54. gpbench/method_class/DeepGS/base_deepgs_class.py +153 -0
  55. gpbench/method_class/EIR/EIR_class.py +276 -0
  56. gpbench/method_class/EIR/EIR_he_class.py +184 -0
  57. gpbench/method_class/EIR/__init__.py +5 -0
  58. gpbench/method_class/EIR/utils/__init__.py +0 -0
  59. gpbench/method_class/EIR/utils/array_output_modules.py +97 -0
  60. gpbench/method_class/EIR/utils/common.py +65 -0
  61. gpbench/method_class/EIR/utils/lcl_layers.py +235 -0
  62. gpbench/method_class/EIR/utils/logging.py +59 -0
  63. gpbench/method_class/EIR/utils/mlp_layers.py +92 -0
  64. gpbench/method_class/EIR/utils/models_locally_connected.py +642 -0
  65. gpbench/method_class/EIR/utils/transformer_models.py +546 -0
  66. gpbench/method_class/ElasticNet/ElasticNet_class.py +133 -0
  67. gpbench/method_class/ElasticNet/ElasticNet_he_class.py +91 -0
  68. gpbench/method_class/ElasticNet/__init__.py +5 -0
  69. gpbench/method_class/G2PDeep/G2PDeep_he_class.py +217 -0
  70. gpbench/method_class/G2PDeep/G2Pdeep_class.py +205 -0
  71. gpbench/method_class/G2PDeep/__init__.py +5 -0
  72. gpbench/method_class/G2PDeep/base_G2PDeep_class.py +209 -0
  73. gpbench/method_class/GBLUP/GBLUP_class.py +183 -0
  74. gpbench/method_class/GBLUP/__init__.py +5 -0
  75. gpbench/method_class/GEFormer/GEFormer_class.py +169 -0
  76. gpbench/method_class/GEFormer/GEFormer_he_class.py +137 -0
  77. gpbench/method_class/GEFormer/__init__.py +5 -0
  78. gpbench/method_class/GEFormer/gMLP_class.py +357 -0
  79. gpbench/method_class/LightGBM/LightGBM_class.py +224 -0
  80. gpbench/method_class/LightGBM/LightGBM_he_class.py +121 -0
  81. gpbench/method_class/LightGBM/__init__.py +5 -0
  82. gpbench/method_class/RF/RF_GPU_class.py +165 -0
  83. gpbench/method_class/RF/RF_GPU_he_class.py +124 -0
  84. gpbench/method_class/RF/__init__.py +5 -0
  85. gpbench/method_class/SVC/SVC_GPU.py +181 -0
  86. gpbench/method_class/SVC/SVC_GPU_he.py +106 -0
  87. gpbench/method_class/SVC/__init__.py +5 -0
  88. gpbench/method_class/SoyDNGP/AlexNet_206_class.py +179 -0
  89. gpbench/method_class/SoyDNGP/SoyDNGP_class.py +189 -0
  90. gpbench/method_class/SoyDNGP/SoyDNGP_he_class.py +112 -0
  91. gpbench/method_class/SoyDNGP/__init__.py +5 -0
  92. gpbench/method_class/XGBoost/XGboost_GPU_class.py +198 -0
  93. gpbench/method_class/XGBoost/XGboost_GPU_he_class.py +178 -0
  94. gpbench/method_class/XGBoost/__init__.py +5 -0
  95. gpbench/method_class/__init__.py +52 -0
  96. gpbench/method_class/rrBLUP/__init__.py +5 -0
  97. gpbench/method_class/rrBLUP/rrBLUP_class.py +140 -0
  98. gpbench/method_reg/BayesA/BayesA.py +116 -0
  99. gpbench/method_reg/BayesA/__init__.py +5 -0
  100. gpbench/method_reg/BayesA/_bayesfromR.py +96 -0
  101. gpbench/method_reg/BayesA/_param_free_base_model.py +84 -0
  102. gpbench/method_reg/BayesA/bayesAfromR.py +16 -0
  103. gpbench/method_reg/BayesB/BayesB.py +117 -0
  104. gpbench/method_reg/BayesB/__init__.py +5 -0
  105. gpbench/method_reg/BayesB/_bayesfromR.py +96 -0
  106. gpbench/method_reg/BayesB/_param_free_base_model.py +84 -0
  107. gpbench/method_reg/BayesB/bayesBfromR.py +16 -0
  108. gpbench/method_reg/BayesC/BayesC.py +115 -0
  109. gpbench/method_reg/BayesC/__init__.py +5 -0
  110. gpbench/method_reg/BayesC/_bayesfromR.py +96 -0
  111. gpbench/method_reg/BayesC/_param_free_base_model.py +84 -0
  112. gpbench/method_reg/BayesC/bayesCfromR.py +16 -0
  113. gpbench/method_reg/CropARNet/CropARNet.py +159 -0
  114. gpbench/method_reg/CropARNet/CropARNet_Hyperparameters.py +109 -0
  115. gpbench/method_reg/CropARNet/__init__.py +5 -0
  116. gpbench/method_reg/CropARNet/base_CropARNet.py +137 -0
  117. gpbench/method_reg/Cropformer/Cropformer.py +313 -0
  118. gpbench/method_reg/Cropformer/Cropformer_Hyperparameters.py +250 -0
  119. gpbench/method_reg/Cropformer/__init__.py +5 -0
  120. gpbench/method_reg/DL_GWAS/DL_GWAS.py +186 -0
  121. gpbench/method_reg/DL_GWAS/DL_GWAS_Hyperparameters.py +125 -0
  122. gpbench/method_reg/DL_GWAS/__init__.py +5 -0
  123. gpbench/method_reg/DNNGP/DNNGP.py +157 -0
  124. gpbench/method_reg/DNNGP/DNNGP_Hyperparameters.py +118 -0
  125. gpbench/method_reg/DNNGP/__init__.py +5 -0
  126. gpbench/method_reg/DNNGP/base_dnngp.py +101 -0
  127. gpbench/method_reg/DeepCCR/DeepCCR.py +149 -0
  128. gpbench/method_reg/DeepCCR/DeepCCR_Hyperparameters.py +110 -0
  129. gpbench/method_reg/DeepCCR/__init__.py +5 -0
  130. gpbench/method_reg/DeepCCR/base_DeepCCR.py +171 -0
  131. gpbench/method_reg/DeepGS/DeepGS.py +165 -0
  132. gpbench/method_reg/DeepGS/DeepGS_Hyperparameters.py +114 -0
  133. gpbench/method_reg/DeepGS/__init__.py +5 -0
  134. gpbench/method_reg/DeepGS/base_deepgs.py +98 -0
  135. gpbench/method_reg/EIR/EIR.py +258 -0
  136. gpbench/method_reg/EIR/EIR_Hyperparameters.py +178 -0
  137. gpbench/method_reg/EIR/__init__.py +5 -0
  138. gpbench/method_reg/EIR/utils/__init__.py +0 -0
  139. gpbench/method_reg/EIR/utils/array_output_modules.py +97 -0
  140. gpbench/method_reg/EIR/utils/common.py +65 -0
  141. gpbench/method_reg/EIR/utils/lcl_layers.py +235 -0
  142. gpbench/method_reg/EIR/utils/logging.py +59 -0
  143. gpbench/method_reg/EIR/utils/mlp_layers.py +92 -0
  144. gpbench/method_reg/EIR/utils/models_locally_connected.py +642 -0
  145. gpbench/method_reg/EIR/utils/transformer_models.py +546 -0
  146. gpbench/method_reg/ElasticNet/ElasticNet.py +123 -0
  147. gpbench/method_reg/ElasticNet/ElasticNet_he.py +83 -0
  148. gpbench/method_reg/ElasticNet/__init__.py +5 -0
  149. gpbench/method_reg/G2PDeep/G2PDeep_Hyperparameters.py +107 -0
  150. gpbench/method_reg/G2PDeep/G2Pdeep.py +166 -0
  151. gpbench/method_reg/G2PDeep/__init__.py +5 -0
  152. gpbench/method_reg/G2PDeep/base_G2PDeep.py +209 -0
  153. gpbench/method_reg/GBLUP/GBLUP_R.py +182 -0
  154. gpbench/method_reg/GBLUP/__init__.py +5 -0
  155. gpbench/method_reg/GEFormer/GEFormer.py +164 -0
  156. gpbench/method_reg/GEFormer/GEFormer_Hyperparameters.py +106 -0
  157. gpbench/method_reg/GEFormer/__init__.py +5 -0
  158. gpbench/method_reg/GEFormer/gMLP.py +341 -0
  159. gpbench/method_reg/LightGBM/LightGBM.py +237 -0
  160. gpbench/method_reg/LightGBM/LightGBM_Hyperparameters.py +77 -0
  161. gpbench/method_reg/LightGBM/__init__.py +5 -0
  162. gpbench/method_reg/MVP/MVP.py +182 -0
  163. gpbench/method_reg/MVP/MVP_Hyperparameters.py +126 -0
  164. gpbench/method_reg/MVP/__init__.py +5 -0
  165. gpbench/method_reg/MVP/base_MVP.py +113 -0
  166. gpbench/method_reg/RF/RF_GPU.py +174 -0
  167. gpbench/method_reg/RF/RF_Hyperparameters.py +163 -0
  168. gpbench/method_reg/RF/__init__.py +5 -0
  169. gpbench/method_reg/SVC/SVC_GPU.py +194 -0
  170. gpbench/method_reg/SVC/SVC_Hyperparameters.py +107 -0
  171. gpbench/method_reg/SVC/__init__.py +5 -0
  172. gpbench/method_reg/SoyDNGP/AlexNet_206.py +185 -0
  173. gpbench/method_reg/SoyDNGP/SoyDNGP.py +179 -0
  174. gpbench/method_reg/SoyDNGP/SoyDNGP_Hyperparameters.py +105 -0
  175. gpbench/method_reg/SoyDNGP/__init__.py +5 -0
  176. gpbench/method_reg/XGBoost/XGboost_GPU.py +188 -0
  177. gpbench/method_reg/XGBoost/XGboost_Hyperparameters.py +167 -0
  178. gpbench/method_reg/XGBoost/__init__.py +5 -0
  179. gpbench/method_reg/__init__.py +55 -0
  180. gpbench/method_reg/rrBLUP/__init__.py +5 -0
  181. gpbench/method_reg/rrBLUP/rrBLUP.py +123 -0
  182. gpbench-1.0.0.dist-info/METADATA +379 -0
  183. gpbench-1.0.0.dist-info/RECORD +188 -0
  184. gpbench-1.0.0.dist-info/WHEEL +5 -0
  185. gpbench-1.0.0.dist-info/entry_points.txt +2 -0
  186. gpbench-1.0.0.dist-info/top_level.txt +3 -0
  187. tests/test_import.py +80 -0
  188. tests/test_method.py +232 -0
@@ -0,0 +1,84 @@
1
+ import abc
2
+ import joblib
3
+ import numpy as np
4
+ import pathlib
5
+
6
+
7
+ class ParamFreeBaseModel(abc.ABC):
8
+ """
9
+ BaseModel parent class for all models that do not have hyperparameters, e.g. BLUP.
10
+
11
+ Every model must be based on :obj:`~easypheno.model.param_free_base_model.ParamFreeBaseModel` directly or ParamFreeBaseModel's child classes.
12
+
13
+ Please add ``super().__init__(PARAMS)`` to the constructor in case you override it in a child class
14
+
15
+ **Attributes**
16
+
17
+ *Class attributes*
18
+
19
+ - standard_encoding (*str*): the standard encoding for this model
20
+ - possible_encodings (*List<str>*): a list of all encodings that are possible according to the model definition
21
+
22
+ *Instance attributes*
23
+
24
+ - task (*str*): ML task ('regression' or 'classification') depending on target variable
25
+ - encoding (*str*): the encoding to use (standard encoding or user-defined)
26
+
27
+
28
+ :param task: ML task (regression or classification) depending on target variable
29
+ :param encoding: the encoding to use (standard encoding or user-defined)
30
+
31
+ """
32
+
33
+ # Class attributes #
34
+ @property
35
+ @classmethod
36
+ @abc.abstractmethod
37
+ def standard_encoding(cls):
38
+ """the standard encoding for this model"""
39
+ raise NotImplementedError
40
+
41
+ @property
42
+ @classmethod
43
+ @abc.abstractmethod
44
+ def possible_encodings(cls):
45
+ """a list of all encodings that are possible according to the model definition"""
46
+ raise NotImplementedError
47
+
48
+ # Constructor super class #
49
+ def __init__(self, task: str, encoding: str = None):
50
+ self.task = task
51
+ self.encoding = self.standard_encoding if encoding is None else encoding
52
+
53
+ # Methods required by each child class #
54
+
55
+ @abc.abstractmethod
56
+ def fit(self, X: np.array, y: np.array) -> np.array:
57
+ """
58
+ Method that fits the model based on features X and targets y
59
+
60
+ :param X: feature matrix for retraining
61
+ :param y: target vector
62
+
63
+ :return: numpy array with values predicted for X
64
+ """
65
+
66
+ @abc.abstractmethod
67
+ def predict(self, X_in: np.array) -> np.array:
68
+ """
69
+ Method that predicts target values based on the input X_in
70
+
71
+ :param X_in: feature matrix as input
72
+
73
+ :return: numpy array with the predicted values
74
+ """
75
+
76
+ def save_model(self, path: pathlib.Path, filename: str):
77
+ """
78
+ Persist the whole model object on a hard drive
79
+ (can be loaded with :obj:`~easypheno.model._model_functions.load_model`)
80
+
81
+ :param path: path where the model will be saved
82
+ :param filename: filename of the model
83
+ """
84
+ joblib.dump(self, path.joinpath(filename), compress=3)
@@ -0,0 +1,16 @@
1
+ from . import _bayesfromR
2
+
3
+
4
+ class BayesA(_bayesfromR.Bayes_R):
5
+ """
6
+ Implementation of a class for Bayes A.
7
+
8
+ *Attributes*
9
+
10
+ *Inherited attributes*
11
+
12
+ See :obj:`~easypheno.model._bayesfromR.Bayes_R` for more information on the attributes.
13
+ """
14
+
15
+ def __init__(self, task: str, encoding: str = None):
16
+ super().__init__(task=task, model_name='BayesA', encoding=encoding)
@@ -0,0 +1,140 @@
1
+ import os
2
+ import time
3
+ import psutil
4
+ import swanlab
5
+ import argparse
6
+ import random
7
+ import torch
8
+ import numpy as np
9
+ import pandas as pd
10
+ import sys
11
+ from .bayesBfromR import BayesB
12
+ from sklearn.model_selection import StratifiedKFold
13
+ from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix
14
+ from sklearn.preprocessing import LabelEncoder
15
+
16
+
17
+ def parse_args():
18
+ parser = argparse.ArgumentParser(description="Argument parser")
19
+ parser.add_argument('--methods', type=str, default='BayesB/', help='Model name')
20
+ parser.add_argument('--species', type=str, default='Human/', help='Species name')
21
+ parser.add_argument('--phe', type=str, default='', help='Phenotype name')
22
+ parser.add_argument('--task', type=str, default='classification', choices=['regression','classification'], help='Task: regression or classification')
23
+ parser.add_argument('--data_dir', type=str, default='../../data/', help='Path to data directory')
24
+ parser.add_argument('--result_dir', type=str, default='result/', help='Path to result directory')
25
+ return parser.parse_args()
26
+
27
+
28
+ def load_data(args):
29
+ xData = np.load(os.path.join(args.data_dir, args.species, 'genotype.npz'))["arr_0"]
30
+ yData = np.load(os.path.join(args.data_dir, args.species, 'phenotype.npz'))["arr_0"]
31
+ names = np.load(os.path.join(args.data_dir, args.species, 'phenotype.npz'))["arr_1"]
32
+
33
+ nsample = xData.shape[0]
34
+ nsnp = xData.shape[1]
35
+ print("Number of samples: ", nsample)
36
+ print("Number of SNPs: ", nsnp)
37
+ return xData, yData, nsample, nsnp, names
38
+
39
+
40
+ def set_seed(seed=42):
41
+ random.seed(seed)
42
+ np.random.seed(seed)
43
+ torch.manual_seed(seed)
44
+ torch.cuda.manual_seed_all(seed)
45
+ torch.backends.cudnn.deterministic = True
46
+ torch.backends.cudnn.benchmark = False
47
+
48
+
49
+ def run_nested_cv(args, data, label):
50
+ result_dir = os.path.join(args.result_dir, args.methods + args.species)
51
+ os.makedirs(result_dir, exist_ok=True)
52
+ print("Starting 10-fold cross-validation...")
53
+
54
+ kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
55
+ le = LabelEncoder()
56
+ label_all = le.fit_transform(label)
57
+ np.save(os.path.join(result_dir, 'label_mapping.npy'), le.classes_)
58
+
59
+ all_acc, all_prec, all_rec, all_f1 = [], [], [], []
60
+ start_time = time.time()
61
+ process = psutil.Process(os.getpid())
62
+
63
+ for fold, (train_index, test_index) in enumerate(kf.split(data, label_all)):
64
+ fold_start = time.time()
65
+ print(f"\n===== Fold {fold} =====")
66
+ X_train, X_test = data[train_index], data[test_index]
67
+ Y_train, Y_test = label_all[train_index], label_all[test_index]
68
+
69
+ if torch.cuda.is_available():
70
+ torch.cuda.reset_peak_memory_stats()
71
+
72
+ classes = np.unique(Y_train)
73
+ scores = np.zeros((len(classes), X_test.shape[0]))
74
+ for idx, cls in enumerate(classes):
75
+ y_train_bin = (Y_train == cls).astype(float)
76
+ model_k = BayesB(task="regression")
77
+ model_k.fit(X_train, y_train_bin)
78
+ scores[idx, :] = model_k.predict(X_test)
79
+
80
+ Y_pred = np.argmax(scores, axis=0)
81
+
82
+ acc = accuracy_score(Y_test, Y_pred)
83
+ prec, rec, f1, _ = precision_recall_fscore_support(Y_test, Y_pred, average='macro', zero_division=0)
84
+ cm = confusion_matrix(Y_test, Y_pred)
85
+
86
+ all_acc.append(acc)
87
+ all_prec.append(prec)
88
+ all_rec.append(rec)
89
+ all_f1.append(f1)
90
+
91
+ fold_time = time.time() - fold_start
92
+ fold_gpu_mem = torch.cuda.max_memory_allocated() / 1024**2 if torch.cuda.is_available() else 0
93
+ fold_cpu_mem = process.memory_info().rss / 1024**2
94
+ print(f'Fold {fold}: ACC={acc:.4f}, PREC={prec:.4f}, REC={rec:.4f}, F1={f1:.4f}, Time={fold_time:.2f}s, '
95
+ f'GPU={fold_gpu_mem:.2f}MB, CPU={fold_cpu_mem:.2f}MB')
96
+
97
+ Y_test_orig = le.inverse_transform(Y_test)
98
+ Y_pred_orig = le.inverse_transform(Y_pred)
99
+ results_df = pd.DataFrame({'Y_test': Y_test_orig, 'Y_pred': Y_pred_orig})
100
+ results_df.to_csv(os.path.join(result_dir, f"fold{fold}.csv"), index=False)
101
+
102
+ # ========== 汇总 ==========
103
+ print("\n===== Cross-validation summary =====")
104
+ print(f"Average ACC: {np.mean(all_acc):.4f} ± {np.std(all_acc):.4f}")
105
+ print(f"Average PREC: {np.mean(all_prec):.4f} ± {np.std(all_prec):.4f}")
106
+ print(f"Average REC: {np.mean(all_rec):.4f} ± {np.std(all_rec):.4f}")
107
+ print(f"Average F1 : {np.mean(all_f1):.4f} ± {np.std(all_f1):.4f}")
108
+ print(f"Total time : {time.time() - start_time:.2f}s")
109
+
110
+
111
+ def BayesB_class():
112
+ set_seed(42)
113
+ torch.cuda.empty_cache()
114
+ args = parse_args()
115
+ all_species = ["Human/Sim/"]
116
+ for i in range(len(all_species)):
117
+ args.species = all_species[i]
118
+ X, Y, nsamples, nsnp, names = load_data(args)
119
+ args.phe = names
120
+ print("Starting run " + args.methods + args.species)
121
+ label = Y[:, 0]
122
+
123
+ if args.task == 'classification':
124
+ s = pd.Series(label)
125
+ fill_val = s.mode().iloc[0] if not s.dropna().empty else 0
126
+ label = np.nan_to_num(label, nan=fill_val)
127
+
128
+ start_time = time.time()
129
+ torch.cuda.reset_peak_memory_stats()
130
+ process = psutil.Process(os.getpid())
131
+
132
+ run_nested_cv(args, data=X, label=label)
133
+
134
+ elapsed_time = time.time() - start_time
135
+ print(f"Total running time: {elapsed_time:.2f} s")
136
+ print("Successfully finished!")
137
+
138
+
139
+ if __name__ == "__main__":
140
+ BayesB_class()
@@ -0,0 +1,5 @@
1
+ from .BayesB_class import BayesB_class
2
+
3
+ BayesB = BayesB_class
4
+
5
+ __all__ = ["BayesB","BayesB_class"]
@@ -0,0 +1,96 @@
1
+ import numpy as np
2
+ import rpy2
3
+ from rpy2.robjects import numpy2ri
4
+ rpy2.robjects.numpy2ri.activate()
5
+ import rpy2.robjects as robjects
6
+ from rpy2.robjects.packages import importr
7
+ from . import _param_free_base_model
8
+ from joblib import Parallel, delayed
9
+
10
+ class Bayes_R(_param_free_base_model.ParamFreeBaseModel):
11
+ """
12
+ Implementation of a class for Bayesian alphabet.
13
+
14
+ *Attributes*
15
+
16
+ *Inherited attributes*
17
+
18
+ See :obj:`~easypheno.model._param_free_base_model.ParamFreeBaseModel` for more information on the attributes.
19
+
20
+ *Additional attributes*
21
+
22
+ - mu (*np.array*): intercept
23
+ - beta (*np.array*): effect size
24
+ - model_name (*str*): model to use (BayesA, BayesB or BayesC)
25
+ - n_iter (*int*): iterations for sampling
26
+ - burn_in (*int*): warmup/burnin for sampling
27
+ """
28
+ standard_encoding = '012'
29
+ possible_encodings = ['101']
30
+
31
+ def __init__(self, task: str, model_name: str, encoding: str = None, n_iter: int =1000, burn_in: int = 200):
32
+ super().__init__(task=task, encoding=encoding)
33
+ self.model_name = model_name
34
+ self.n_iter = n_iter
35
+ self.burn_in = burn_in
36
+ self.n_jobs = 1
37
+ self.mu = None
38
+ self.beta = None
39
+
40
+ def _run_chain(self, chain_num: int, R_X, R_y):
41
+ """
42
+ Helper function to run an individual MCMC chain.
43
+ """
44
+ BGLR = importr('BGLR')
45
+
46
+ # Run BGLR for BayesB on a single chain
47
+ ETA = robjects.r['list'](robjects.r['list'](X=R_X, model=self.model_name))
48
+ fmBB = BGLR.BGLR(y=R_y, ETA=ETA, verbose=False, nIter=self.n_iter, burnIn=self.burn_in)
49
+
50
+ # Extract the results for this chain
51
+ beta_chain = np.asarray(fmBB.rx2('ETA').rx2(1).rx2('b'))
52
+ mu_chain = np.asarray(fmBB.rx2('mu')) # Extract mu (intercept) for this chain
53
+ return beta_chain, mu_chain
54
+
55
+ def fit(self, X: np.array, y: np.array) -> np.array:
56
+ """
57
+ Implementation of fit function for Bayesian alphabet imported from R.
58
+
59
+ See :obj:`~easypheno.model._param_free_base_model.ParamFreeBaseModel` for more information.
60
+ """
61
+ # import necessary R packages
62
+ base = importr('base')
63
+ BGLR = importr('BGLR')
64
+
65
+ # create R objects for X and y
66
+ R_X = robjects.r['matrix'](X, nrow=X.shape[0], ncol=X.shape[1])
67
+ R_y = robjects.FloatVector(y)
68
+
69
+ results = Parallel(n_jobs=self.n_jobs)(
70
+ delayed(self._run_chain)(chain_num, R_X, R_y) for chain_num in range(self.n_jobs)
71
+ )
72
+
73
+ # Aggregate results from all chains
74
+ beta_chains = [result[0] for result in results]
75
+ mu_chains = [result[1] for result in results]
76
+
77
+ # Compute the mean of beta and mu over all chains
78
+ self.beta = np.mean(beta_chains, axis=0)
79
+ self.mu = np.mean(mu_chains, axis=0)
80
+
81
+ # run BGLR for BayesB
82
+ # ETA = base.list(base.list(X=R_X, model=self.model_name))
83
+ # fmBB = BGLR.BGLR(y=R_y, ETA=ETA, verbose=True, nIter=self.n_iter, burnIn=self.burn_in)
84
+
85
+ # # save results as numpy arrays
86
+ # self.beta = np.asarray(fmBB.rx2('ETA').rx2(1).rx2('b'))
87
+ # self.mu = fmBB.rx2('mu')
88
+ return self.predict(X_in=X)
89
+
90
+ def predict(self, X_in: np.array) -> np.array:
91
+ """
92
+ Implementation of predict function for Bayesian alphabet model imported from R.
93
+
94
+ See :obj:`~easypheno.model._param_free_base_model.ParamFreeBaseModel` for more information.
95
+ """
96
+ return self.mu + np.matmul(X_in, self.beta)
@@ -0,0 +1,84 @@
1
+ import abc
2
+ import joblib
3
+ import numpy as np
4
+ import pathlib
5
+
6
+
7
+ class ParamFreeBaseModel(abc.ABC):
8
+ """
9
+ BaseModel parent class for all models that do not have hyperparameters, e.g. BLUP.
10
+
11
+ Every model must be based on :obj:`~easypheno.model.param_free_base_model.ParamFreeBaseModel` directly or ParamFreeBaseModel's child classes.
12
+
13
+ Please add ``super().__init__(PARAMS)`` to the constructor in case you override it in a child class
14
+
15
+ **Attributes**
16
+
17
+ *Class attributes*
18
+
19
+ - standard_encoding (*str*): the standard encoding for this model
20
+ - possible_encodings (*List<str>*): a list of all encodings that are possible according to the model definition
21
+
22
+ *Instance attributes*
23
+
24
+ - task (*str*): ML task ('regression' or 'classification') depending on target variable
25
+ - encoding (*str*): the encoding to use (standard encoding or user-defined)
26
+
27
+
28
+ :param task: ML task (regression or classification) depending on target variable
29
+ :param encoding: the encoding to use (standard encoding or user-defined)
30
+
31
+ """
32
+
33
+ # Class attributes #
34
+ @property
35
+ @classmethod
36
+ @abc.abstractmethod
37
+ def standard_encoding(cls):
38
+ """the standard encoding for this model"""
39
+ raise NotImplementedError
40
+
41
+ @property
42
+ @classmethod
43
+ @abc.abstractmethod
44
+ def possible_encodings(cls):
45
+ """a list of all encodings that are possible according to the model definition"""
46
+ raise NotImplementedError
47
+
48
+ # Constructor super class #
49
+ def __init__(self, task: str, encoding: str = None):
50
+ self.task = task
51
+ self.encoding = self.standard_encoding if encoding is None else encoding
52
+
53
+ # Methods required by each child class #
54
+
55
+ @abc.abstractmethod
56
+ def fit(self, X: np.array, y: np.array) -> np.array:
57
+ """
58
+ Method that fits the model based on features X and targets y
59
+
60
+ :param X: feature matrix for retraining
61
+ :param y: target vector
62
+
63
+ :return: numpy array with values predicted for X
64
+ """
65
+
66
+ @abc.abstractmethod
67
+ def predict(self, X_in: np.array) -> np.array:
68
+ """
69
+ Method that predicts target values based on the input X_in
70
+
71
+ :param X_in: feature matrix as input
72
+
73
+ :return: numpy array with the predicted values
74
+ """
75
+
76
+ def save_model(self, path: pathlib.Path, filename: str):
77
+ """
78
+ Persist the whole model object on a hard drive
79
+ (can be loaded with :obj:`~easypheno.model._model_functions.load_model`)
80
+
81
+ :param path: path where the model will be saved
82
+ :param filename: filename of the model
83
+ """
84
+ joblib.dump(self, path.joinpath(filename), compress=3)
@@ -0,0 +1,16 @@
1
+ from . import _bayesfromR
2
+
3
+
4
+ class BayesB(_bayesfromR.Bayes_R):
5
+ """
6
+ Implementation of a class for Bayes B.
7
+
8
+ *Attributes*
9
+
10
+ *Inherited attributes*
11
+
12
+ See :obj:`~easypheno.model._bayesfromR.Bayes_R` for more information on the attributes.
13
+ """
14
+
15
+ def __init__(self, task: str, encoding: str = None):
16
+ super().__init__(task=task, model_name='BayesB', encoding=encoding)
@@ -0,0 +1,141 @@
1
+ import os
2
+ import time
3
+ import psutil
4
+ import swanlab
5
+ import argparse
6
+ import random
7
+ import torch
8
+ import numpy as np
9
+ import pandas as pd
10
+ import sys
11
+ from .bayesCfromR import BayesC
12
+ from sklearn.model_selection import StratifiedKFold
13
+ from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix
14
+ from sklearn.preprocessing import LabelEncoder
15
+
16
+
17
+ def parse_args():
18
+ parser = argparse.ArgumentParser(description="Argument parser")
19
+ parser.add_argument('--methods', type=str, default='BayesC/', help='Model name')
20
+ parser.add_argument('--species', type=str, default='Human/', help='Species name')
21
+ parser.add_argument('--phe', type=str, default='', help='Phenotype name')
22
+ parser.add_argument('--task', type=str, default='classification', choices=['regression','classification'], help='Task: regression or classification')
23
+ parser.add_argument('--data_dir', type=str, default='../../data/', help='Path to data directory')
24
+ parser.add_argument('--result_dir', type=str, default='result/', help='Path to result directory')
25
+ return parser.parse_args()
26
+
27
+
28
+ def load_data(args):
29
+ xData = np.load(os.path.join(args.data_dir, args.species, 'genotype.npz'))["arr_0"]
30
+ yData = np.load(os.path.join(args.data_dir, args.species, 'phenotype.npz'))["arr_0"]
31
+ names = np.load(os.path.join(args.data_dir, args.species, 'phenotype.npz'))["arr_1"]
32
+
33
+ nsample = xData.shape[0]
34
+ nsnp = xData.shape[1]
35
+ print("Number of samples: ", nsample)
36
+ print("Number of SNPs: ", nsnp)
37
+ return xData, yData, nsample, nsnp, names
38
+
39
+
40
+ def set_seed(seed=42):
41
+ random.seed(seed)
42
+ np.random.seed(seed)
43
+ torch.manual_seed(seed)
44
+ torch.cuda.manual_seed_all(seed)
45
+ torch.backends.cudnn.deterministic = True
46
+ torch.backends.cudnn.benchmark = False
47
+
48
+
49
+ def run_nested_cv(args, data, label):
50
+ result_dir = os.path.join(args.result_dir, args.methods + args.species)
51
+ os.makedirs(result_dir, exist_ok=True)
52
+ print("Starting 10-fold cross-validation...")
53
+
54
+ kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
55
+ le = LabelEncoder()
56
+ label_all = le.fit_transform(label)
57
+
58
+ np.save(os.path.join(result_dir, 'label_mapping.npy'), le.classes_)
59
+
60
+ all_acc, all_prec, all_rec, all_f1 = [], [], [], []
61
+ start_time = time.time()
62
+ process = psutil.Process(os.getpid())
63
+
64
+ for fold, (train_index, test_index) in enumerate(kf.split(data, label_all)):
65
+ fold_start = time.time()
66
+ print(f"\n===== Fold {fold} =====")
67
+ X_train, X_test = data[train_index], data[test_index]
68
+ Y_train, Y_test = label_all[train_index], label_all[test_index]
69
+
70
+ if torch.cuda.is_available():
71
+ torch.cuda.reset_peak_memory_stats()
72
+
73
+ classes = np.unique(Y_train)
74
+ scores = np.zeros((len(classes), X_test.shape[0]))
75
+ for idx, cls in enumerate(classes):
76
+ y_train_bin = (Y_train == cls).astype(float)
77
+ model_k = BayesC(task="regression")
78
+ model_k.fit(X_train, y_train_bin)
79
+ scores[idx, :] = model_k.predict(X_test)
80
+
81
+ Y_pred = np.argmax(scores, axis=0)
82
+
83
+ acc = accuracy_score(Y_test, Y_pred)
84
+ prec, rec, f1, _ = precision_recall_fscore_support(Y_test, Y_pred, average='macro', zero_division=0)
85
+ cm = confusion_matrix(Y_test, Y_pred)
86
+
87
+ all_acc.append(acc)
88
+ all_prec.append(prec)
89
+ all_rec.append(rec)
90
+ all_f1.append(f1)
91
+
92
+ fold_time = time.time() - fold_start
93
+ fold_gpu_mem = torch.cuda.max_memory_allocated() / 1024**2 if torch.cuda.is_available() else 0
94
+ fold_cpu_mem = process.memory_info().rss / 1024**2
95
+ print(f'Fold {fold}: ACC={acc:.4f}, PREC={prec:.4f}, REC={rec:.4f}, F1={f1:.4f}, Time={fold_time:.2f}s, '
96
+ f'GPU={fold_gpu_mem:.2f}MB, CPU={fold_cpu_mem:.2f}MB')
97
+
98
+ Y_test_orig = le.inverse_transform(Y_test)
99
+ Y_pred_orig = le.inverse_transform(Y_pred)
100
+ results_df = pd.DataFrame({'Y_test': Y_test_orig, 'Y_pred': Y_pred_orig})
101
+ results_df.to_csv(os.path.join(result_dir, f"fold{fold}.csv"), index=False)
102
+
103
+ # ========== 汇总 ==========
104
+ print("\n===== Cross-validation summary =====")
105
+ print(f"Average ACC: {np.mean(all_acc):.4f} ± {np.std(all_acc):.4f}")
106
+ print(f"Average PREC: {np.mean(all_prec):.4f} ± {np.std(all_prec):.4f}")
107
+ print(f"Average REC: {np.mean(all_rec):.4f} ± {np.std(all_rec):.4f}")
108
+ print(f"Average F1 : {np.mean(all_f1):.4f} ± {np.std(all_f1):.4f}")
109
+ print(f"Total time : {time.time() - start_time:.2f}s")
110
+
111
+
112
+
113
+ def BayesC_class():
114
+ set_seed(42)
115
+ torch.cuda.empty_cache()
116
+ args = parse_args()
117
+ all_species = ["Human/Sim/"]
118
+ for i in range(len(all_species)):
119
+ args.species = all_species[i]
120
+ X, Y, nsamples, nsnp, names = load_data(args)
121
+ args.phe = names
122
+ print("Starting run " + args.methods + args.species)
123
+ label = Y[:, 0]
124
+
125
+ if args.task == 'classification':
126
+ s = pd.Series(label)
127
+ fill_val = s.mode().iloc[0] if not s.dropna().empty else 0
128
+ label = np.nan_to_num(label, nan=fill_val)
129
+
130
+ start_time = time.time()
131
+ torch.cuda.reset_peak_memory_stats()
132
+ process = psutil.Process(os.getpid())
133
+ run_nested_cv(args, data=X, label=label)
134
+
135
+ elapsed_time = time.time() - start_time
136
+ print(f"Total running time: {elapsed_time:.2f} s")
137
+ print("Successfully finished!")
138
+
139
+
140
+ if __name__ == "__main__":
141
+ BayesC_class()
@@ -0,0 +1,4 @@
1
+ from .BayesC_class import BayesC_class
2
+
3
+ BayesC = BayesC_class
4
+ __all__ = ["BayesC","BayesC_class"]
@@ -0,0 +1,96 @@
1
+ import numpy as np
2
+ import rpy2
3
+ from rpy2.robjects import numpy2ri
4
+ rpy2.robjects.numpy2ri.activate()
5
+ import rpy2.robjects as robjects
6
+ from rpy2.robjects.packages import importr
7
+ from . import _param_free_base_model
8
+ from joblib import Parallel, delayed
9
+
10
+ class Bayes_R(_param_free_base_model.ParamFreeBaseModel):
11
+ """
12
+ Implementation of a class for Bayesian alphabet.
13
+
14
+ *Attributes*
15
+
16
+ *Inherited attributes*
17
+
18
+ See :obj:`~easypheno.model._param_free_base_model.ParamFreeBaseModel` for more information on the attributes.
19
+
20
+ *Additional attributes*
21
+
22
+ - mu (*np.array*): intercept
23
+ - beta (*np.array*): effect size
24
+ - model_name (*str*): model to use (BayesA, BayesB or BayesC)
25
+ - n_iter (*int*): iterations for sampling
26
+ - burn_in (*int*): warmup/burnin for sampling
27
+ """
28
+ standard_encoding = '012'
29
+ possible_encodings = ['101']
30
+
31
+ def __init__(self, task: str, model_name: str, encoding: str = None, n_iter: int =1000, burn_in: int = 200):
32
+ super().__init__(task=task, encoding=encoding)
33
+ self.model_name = model_name
34
+ self.n_iter = n_iter
35
+ self.burn_in = burn_in
36
+ self.n_jobs = 1
37
+ self.mu = None
38
+ self.beta = None
39
+
40
+ def _run_chain(self, chain_num: int, R_X, R_y):
41
+ """
42
+ Helper function to run an individual MCMC chain.
43
+ """
44
+ BGLR = importr('BGLR')
45
+
46
+ # Run BGLR for BayesB on a single chain
47
+ ETA = robjects.r['list'](robjects.r['list'](X=R_X, model=self.model_name))
48
+ fmBB = BGLR.BGLR(y=R_y, ETA=ETA, verbose=False, nIter=self.n_iter, burnIn=self.burn_in)
49
+
50
+ # Extract the results for this chain
51
+ beta_chain = np.asarray(fmBB.rx2('ETA').rx2(1).rx2('b'))
52
+ mu_chain = np.asarray(fmBB.rx2('mu')) # Extract mu (intercept) for this chain
53
+ return beta_chain, mu_chain
54
+
55
+ def fit(self, X: np.array, y: np.array) -> np.array:
56
+ """
57
+ Implementation of fit function for Bayesian alphabet imported from R.
58
+
59
+ See :obj:`~easypheno.model._param_free_base_model.ParamFreeBaseModel` for more information.
60
+ """
61
+ # import necessary R packages
62
+ base = importr('base')
63
+ BGLR = importr('BGLR')
64
+
65
+ # create R objects for X and y
66
+ R_X = robjects.r['matrix'](X, nrow=X.shape[0], ncol=X.shape[1])
67
+ R_y = robjects.FloatVector(y)
68
+
69
+ results = Parallel(n_jobs=self.n_jobs)(
70
+ delayed(self._run_chain)(chain_num, R_X, R_y) for chain_num in range(self.n_jobs)
71
+ )
72
+
73
+ # Aggregate results from all chains
74
+ beta_chains = [result[0] for result in results]
75
+ mu_chains = [result[1] for result in results]
76
+
77
+ # Compute the mean of beta and mu over all chains
78
+ self.beta = np.mean(beta_chains, axis=0)
79
+ self.mu = np.mean(mu_chains, axis=0)
80
+
81
+ # run BGLR for BayesB
82
+ # ETA = base.list(base.list(X=R_X, model=self.model_name))
83
+ # fmBB = BGLR.BGLR(y=R_y, ETA=ETA, verbose=True, nIter=self.n_iter, burnIn=self.burn_in)
84
+
85
+ # # save results as numpy arrays
86
+ # self.beta = np.asarray(fmBB.rx2('ETA').rx2(1).rx2('b'))
87
+ # self.mu = fmBB.rx2('mu')
88
+ return self.predict(X_in=X)
89
+
90
+ def predict(self, X_in: np.array) -> np.array:
91
+ """
92
+ Implementation of predict function for Bayesian alphabet model imported from R.
93
+
94
+ See :obj:`~easypheno.model._param_free_base_model.ParamFreeBaseModel` for more information.
95
+ """
96
+ return self.mu + np.matmul(X_in, self.beta)