gpbench 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (188) hide show
  1. gp_agent_tool/compute_dataset_feature.py +67 -0
  2. gp_agent_tool/config.py +65 -0
  3. gp_agent_tool/experience/create_masked_dataset_summary.py +97 -0
  4. gp_agent_tool/experience/dataset_summary_info.py +13 -0
  5. gp_agent_tool/experience/experience_info.py +12 -0
  6. gp_agent_tool/experience/get_matched_experience.py +111 -0
  7. gp_agent_tool/llm_client.py +119 -0
  8. gp_agent_tool/logging_utils.py +24 -0
  9. gp_agent_tool/main.py +347 -0
  10. gp_agent_tool/read_agent/__init__.py +46 -0
  11. gp_agent_tool/read_agent/nodes.py +674 -0
  12. gp_agent_tool/read_agent/prompts.py +547 -0
  13. gp_agent_tool/read_agent/python_repl_tool.py +165 -0
  14. gp_agent_tool/read_agent/state.py +101 -0
  15. gp_agent_tool/read_agent/workflow.py +54 -0
  16. gpbench/__init__.py +25 -0
  17. gpbench/_selftest.py +104 -0
  18. gpbench/method_class/BayesA/BayesA_class.py +141 -0
  19. gpbench/method_class/BayesA/__init__.py +5 -0
  20. gpbench/method_class/BayesA/_bayesfromR.py +96 -0
  21. gpbench/method_class/BayesA/_param_free_base_model.py +84 -0
  22. gpbench/method_class/BayesA/bayesAfromR.py +16 -0
  23. gpbench/method_class/BayesB/BayesB_class.py +140 -0
  24. gpbench/method_class/BayesB/__init__.py +5 -0
  25. gpbench/method_class/BayesB/_bayesfromR.py +96 -0
  26. gpbench/method_class/BayesB/_param_free_base_model.py +84 -0
  27. gpbench/method_class/BayesB/bayesBfromR.py +16 -0
  28. gpbench/method_class/BayesC/BayesC_class.py +141 -0
  29. gpbench/method_class/BayesC/__init__.py +4 -0
  30. gpbench/method_class/BayesC/_bayesfromR.py +96 -0
  31. gpbench/method_class/BayesC/_param_free_base_model.py +84 -0
  32. gpbench/method_class/BayesC/bayesCfromR.py +16 -0
  33. gpbench/method_class/CropARNet/CropARNet_class.py +186 -0
  34. gpbench/method_class/CropARNet/CropARNet_he_class.py +154 -0
  35. gpbench/method_class/CropARNet/__init__.py +5 -0
  36. gpbench/method_class/CropARNet/base_CropARNet_class.py +178 -0
  37. gpbench/method_class/Cropformer/Cropformer_class.py +308 -0
  38. gpbench/method_class/Cropformer/__init__.py +5 -0
  39. gpbench/method_class/Cropformer/cropformer_he_class.py +221 -0
  40. gpbench/method_class/DL_GWAS/DL_GWAS_class.py +250 -0
  41. gpbench/method_class/DL_GWAS/DL_GWAS_he_class.py +169 -0
  42. gpbench/method_class/DL_GWAS/__init__.py +5 -0
  43. gpbench/method_class/DNNGP/DNNGP_class.py +163 -0
  44. gpbench/method_class/DNNGP/DNNGP_he_class.py +138 -0
  45. gpbench/method_class/DNNGP/__init__.py +5 -0
  46. gpbench/method_class/DNNGP/base_dnngp_class.py +116 -0
  47. gpbench/method_class/DeepCCR/DeepCCR_class.py +172 -0
  48. gpbench/method_class/DeepCCR/DeepCCR_he_class.py +161 -0
  49. gpbench/method_class/DeepCCR/__init__.py +5 -0
  50. gpbench/method_class/DeepCCR/base_DeepCCR_class.py +209 -0
  51. gpbench/method_class/DeepGS/DeepGS_class.py +184 -0
  52. gpbench/method_class/DeepGS/DeepGS_he_class.py +150 -0
  53. gpbench/method_class/DeepGS/__init__.py +5 -0
  54. gpbench/method_class/DeepGS/base_deepgs_class.py +153 -0
  55. gpbench/method_class/EIR/EIR_class.py +276 -0
  56. gpbench/method_class/EIR/EIR_he_class.py +184 -0
  57. gpbench/method_class/EIR/__init__.py +5 -0
  58. gpbench/method_class/EIR/utils/__init__.py +0 -0
  59. gpbench/method_class/EIR/utils/array_output_modules.py +97 -0
  60. gpbench/method_class/EIR/utils/common.py +65 -0
  61. gpbench/method_class/EIR/utils/lcl_layers.py +235 -0
  62. gpbench/method_class/EIR/utils/logging.py +59 -0
  63. gpbench/method_class/EIR/utils/mlp_layers.py +92 -0
  64. gpbench/method_class/EIR/utils/models_locally_connected.py +642 -0
  65. gpbench/method_class/EIR/utils/transformer_models.py +546 -0
  66. gpbench/method_class/ElasticNet/ElasticNet_class.py +133 -0
  67. gpbench/method_class/ElasticNet/ElasticNet_he_class.py +91 -0
  68. gpbench/method_class/ElasticNet/__init__.py +5 -0
  69. gpbench/method_class/G2PDeep/G2PDeep_he_class.py +217 -0
  70. gpbench/method_class/G2PDeep/G2Pdeep_class.py +205 -0
  71. gpbench/method_class/G2PDeep/__init__.py +5 -0
  72. gpbench/method_class/G2PDeep/base_G2PDeep_class.py +209 -0
  73. gpbench/method_class/GBLUP/GBLUP_class.py +183 -0
  74. gpbench/method_class/GBLUP/__init__.py +5 -0
  75. gpbench/method_class/GEFormer/GEFormer_class.py +169 -0
  76. gpbench/method_class/GEFormer/GEFormer_he_class.py +137 -0
  77. gpbench/method_class/GEFormer/__init__.py +5 -0
  78. gpbench/method_class/GEFormer/gMLP_class.py +357 -0
  79. gpbench/method_class/LightGBM/LightGBM_class.py +224 -0
  80. gpbench/method_class/LightGBM/LightGBM_he_class.py +121 -0
  81. gpbench/method_class/LightGBM/__init__.py +5 -0
  82. gpbench/method_class/RF/RF_GPU_class.py +165 -0
  83. gpbench/method_class/RF/RF_GPU_he_class.py +124 -0
  84. gpbench/method_class/RF/__init__.py +5 -0
  85. gpbench/method_class/SVC/SVC_GPU.py +181 -0
  86. gpbench/method_class/SVC/SVC_GPU_he.py +106 -0
  87. gpbench/method_class/SVC/__init__.py +5 -0
  88. gpbench/method_class/SoyDNGP/AlexNet_206_class.py +179 -0
  89. gpbench/method_class/SoyDNGP/SoyDNGP_class.py +189 -0
  90. gpbench/method_class/SoyDNGP/SoyDNGP_he_class.py +112 -0
  91. gpbench/method_class/SoyDNGP/__init__.py +5 -0
  92. gpbench/method_class/XGBoost/XGboost_GPU_class.py +198 -0
  93. gpbench/method_class/XGBoost/XGboost_GPU_he_class.py +178 -0
  94. gpbench/method_class/XGBoost/__init__.py +5 -0
  95. gpbench/method_class/__init__.py +52 -0
  96. gpbench/method_class/rrBLUP/__init__.py +5 -0
  97. gpbench/method_class/rrBLUP/rrBLUP_class.py +140 -0
  98. gpbench/method_reg/BayesA/BayesA.py +116 -0
  99. gpbench/method_reg/BayesA/__init__.py +5 -0
  100. gpbench/method_reg/BayesA/_bayesfromR.py +96 -0
  101. gpbench/method_reg/BayesA/_param_free_base_model.py +84 -0
  102. gpbench/method_reg/BayesA/bayesAfromR.py +16 -0
  103. gpbench/method_reg/BayesB/BayesB.py +117 -0
  104. gpbench/method_reg/BayesB/__init__.py +5 -0
  105. gpbench/method_reg/BayesB/_bayesfromR.py +96 -0
  106. gpbench/method_reg/BayesB/_param_free_base_model.py +84 -0
  107. gpbench/method_reg/BayesB/bayesBfromR.py +16 -0
  108. gpbench/method_reg/BayesC/BayesC.py +115 -0
  109. gpbench/method_reg/BayesC/__init__.py +5 -0
  110. gpbench/method_reg/BayesC/_bayesfromR.py +96 -0
  111. gpbench/method_reg/BayesC/_param_free_base_model.py +84 -0
  112. gpbench/method_reg/BayesC/bayesCfromR.py +16 -0
  113. gpbench/method_reg/CropARNet/CropARNet.py +159 -0
  114. gpbench/method_reg/CropARNet/CropARNet_Hyperparameters.py +109 -0
  115. gpbench/method_reg/CropARNet/__init__.py +5 -0
  116. gpbench/method_reg/CropARNet/base_CropARNet.py +137 -0
  117. gpbench/method_reg/Cropformer/Cropformer.py +313 -0
  118. gpbench/method_reg/Cropformer/Cropformer_Hyperparameters.py +250 -0
  119. gpbench/method_reg/Cropformer/__init__.py +5 -0
  120. gpbench/method_reg/DL_GWAS/DL_GWAS.py +186 -0
  121. gpbench/method_reg/DL_GWAS/DL_GWAS_Hyperparameters.py +125 -0
  122. gpbench/method_reg/DL_GWAS/__init__.py +5 -0
  123. gpbench/method_reg/DNNGP/DNNGP.py +157 -0
  124. gpbench/method_reg/DNNGP/DNNGP_Hyperparameters.py +118 -0
  125. gpbench/method_reg/DNNGP/__init__.py +5 -0
  126. gpbench/method_reg/DNNGP/base_dnngp.py +101 -0
  127. gpbench/method_reg/DeepCCR/DeepCCR.py +149 -0
  128. gpbench/method_reg/DeepCCR/DeepCCR_Hyperparameters.py +110 -0
  129. gpbench/method_reg/DeepCCR/__init__.py +5 -0
  130. gpbench/method_reg/DeepCCR/base_DeepCCR.py +171 -0
  131. gpbench/method_reg/DeepGS/DeepGS.py +165 -0
  132. gpbench/method_reg/DeepGS/DeepGS_Hyperparameters.py +114 -0
  133. gpbench/method_reg/DeepGS/__init__.py +5 -0
  134. gpbench/method_reg/DeepGS/base_deepgs.py +98 -0
  135. gpbench/method_reg/EIR/EIR.py +258 -0
  136. gpbench/method_reg/EIR/EIR_Hyperparameters.py +178 -0
  137. gpbench/method_reg/EIR/__init__.py +5 -0
  138. gpbench/method_reg/EIR/utils/__init__.py +0 -0
  139. gpbench/method_reg/EIR/utils/array_output_modules.py +97 -0
  140. gpbench/method_reg/EIR/utils/common.py +65 -0
  141. gpbench/method_reg/EIR/utils/lcl_layers.py +235 -0
  142. gpbench/method_reg/EIR/utils/logging.py +59 -0
  143. gpbench/method_reg/EIR/utils/mlp_layers.py +92 -0
  144. gpbench/method_reg/EIR/utils/models_locally_connected.py +642 -0
  145. gpbench/method_reg/EIR/utils/transformer_models.py +546 -0
  146. gpbench/method_reg/ElasticNet/ElasticNet.py +123 -0
  147. gpbench/method_reg/ElasticNet/ElasticNet_he.py +83 -0
  148. gpbench/method_reg/ElasticNet/__init__.py +5 -0
  149. gpbench/method_reg/G2PDeep/G2PDeep_Hyperparameters.py +107 -0
  150. gpbench/method_reg/G2PDeep/G2Pdeep.py +166 -0
  151. gpbench/method_reg/G2PDeep/__init__.py +5 -0
  152. gpbench/method_reg/G2PDeep/base_G2PDeep.py +209 -0
  153. gpbench/method_reg/GBLUP/GBLUP_R.py +182 -0
  154. gpbench/method_reg/GBLUP/__init__.py +5 -0
  155. gpbench/method_reg/GEFormer/GEFormer.py +164 -0
  156. gpbench/method_reg/GEFormer/GEFormer_Hyperparameters.py +106 -0
  157. gpbench/method_reg/GEFormer/__init__.py +5 -0
  158. gpbench/method_reg/GEFormer/gMLP.py +341 -0
  159. gpbench/method_reg/LightGBM/LightGBM.py +237 -0
  160. gpbench/method_reg/LightGBM/LightGBM_Hyperparameters.py +77 -0
  161. gpbench/method_reg/LightGBM/__init__.py +5 -0
  162. gpbench/method_reg/MVP/MVP.py +182 -0
  163. gpbench/method_reg/MVP/MVP_Hyperparameters.py +126 -0
  164. gpbench/method_reg/MVP/__init__.py +5 -0
  165. gpbench/method_reg/MVP/base_MVP.py +113 -0
  166. gpbench/method_reg/RF/RF_GPU.py +174 -0
  167. gpbench/method_reg/RF/RF_Hyperparameters.py +163 -0
  168. gpbench/method_reg/RF/__init__.py +5 -0
  169. gpbench/method_reg/SVC/SVC_GPU.py +194 -0
  170. gpbench/method_reg/SVC/SVC_Hyperparameters.py +107 -0
  171. gpbench/method_reg/SVC/__init__.py +5 -0
  172. gpbench/method_reg/SoyDNGP/AlexNet_206.py +185 -0
  173. gpbench/method_reg/SoyDNGP/SoyDNGP.py +179 -0
  174. gpbench/method_reg/SoyDNGP/SoyDNGP_Hyperparameters.py +105 -0
  175. gpbench/method_reg/SoyDNGP/__init__.py +5 -0
  176. gpbench/method_reg/XGBoost/XGboost_GPU.py +188 -0
  177. gpbench/method_reg/XGBoost/XGboost_Hyperparameters.py +167 -0
  178. gpbench/method_reg/XGBoost/__init__.py +5 -0
  179. gpbench/method_reg/__init__.py +55 -0
  180. gpbench/method_reg/rrBLUP/__init__.py +5 -0
  181. gpbench/method_reg/rrBLUP/rrBLUP.py +123 -0
  182. gpbench-1.0.0.dist-info/METADATA +379 -0
  183. gpbench-1.0.0.dist-info/RECORD +188 -0
  184. gpbench-1.0.0.dist-info/WHEEL +5 -0
  185. gpbench-1.0.0.dist-info/entry_points.txt +2 -0
  186. gpbench-1.0.0.dist-info/top_level.txt +3 -0
  187. tests/test_import.py +80 -0
  188. tests/test_method.py +232 -0
@@ -0,0 +1,224 @@
1
+ import os
2
+ import time
3
+ import psutil
4
+ import swanlab
5
+ import argparse
6
+ import random
7
+ import torch
8
+ import pandas as pd
9
+ import numpy as np
10
+ import lightgbm as lgb
11
+ import subprocess
12
+ import threading
13
+ import queue
14
+
15
+ from sklearn.model_selection import StratifiedKFold
16
+ from sklearn.preprocessing import LabelEncoder
17
+ from sklearn.metrics import accuracy_score, precision_recall_fscore_support
18
+
19
+ from . import LightGBM_he_class
20
+
21
+ class GPUMonitor:
22
+ def __init__(self, gpu_id=0, interval=0.2):
23
+ self.gpu_id = gpu_id
24
+ self.interval = interval
25
+ self.max_memory = 0
26
+ self.current_memory = 0
27
+ self.monitoring = False
28
+ self.pid = os.getpid()
29
+
30
+ def _get_gpu_memory_by_pid(self):
31
+ try:
32
+ result = subprocess.check_output([
33
+ 'nvidia-smi',
34
+ '--query-compute-apps=pid,used_memory',
35
+ '--format=csv,nounits,noheader'
36
+ ])
37
+ lines = result.decode().strip().split('\n')
38
+ for line in lines:
39
+ pid, mem = line.split(',')
40
+ if int(pid.strip()) == self.pid:
41
+ return int(mem.strip())
42
+ return 0
43
+ except Exception:
44
+ return 0
45
+
46
+ def _monitor_loop(self):
47
+ while self.monitoring:
48
+ mem = self._get_gpu_memory_by_pid()
49
+ self.current_memory = mem
50
+ self.max_memory = max(self.max_memory, mem)
51
+ time.sleep(self.interval)
52
+
53
+ def start(self):
54
+ self.max_memory = 0
55
+ self.monitoring = True
56
+ self.thread = threading.Thread(target=self._monitor_loop, daemon=True)
57
+ self.thread.start()
58
+
59
+ def stop(self):
60
+ self.monitoring = False
61
+ self.thread.join(timeout=1)
62
+ return self.max_memory
63
+ gpu_monitor = GPUMonitor()
64
+
65
+
66
+ def parse_args():
67
+ parser = argparse.ArgumentParser()
68
+ parser.add_argument('--methods', type=str, default='LightGBM/')
69
+ parser.add_argument('--species', type=str, default='')
70
+ parser.add_argument('--phe', type=str, default='')
71
+ parser.add_argument('--data_dir', type=str, default='../../data/')
72
+ parser.add_argument('--result_dir', type=str, default='result/')
73
+
74
+ parser.add_argument('--learning_rate', type=float, default=0.01)
75
+ parser.add_argument('--num_leaves', type=int, default=31)
76
+ parser.add_argument('--min_data_in_leaf', type=int, default=20)
77
+ parser.add_argument('--max_depth', type=int, default=-1)
78
+ parser.add_argument('--lambda_l1', type=float, default=0.0)
79
+ parser.add_argument('--lambda_l2', type=float, default=0.0)
80
+ parser.add_argument('--min_gain_to_split', type=float, default=0.0)
81
+ parser.add_argument('--feature_fraction', type=float, default=0.9)
82
+ parser.add_argument('--bagging_fraction', type=float, default=0.9)
83
+ parser.add_argument('--bagging_freq', type=int, default=1)
84
+ parser.add_argument('--num_boost_round', type=int, default=200)
85
+
86
+ return parser.parse_args()
87
+
88
+ def set_seed(seed=42):
89
+ random.seed(seed)
90
+ np.random.seed(seed)
91
+ torch.manual_seed(seed)
92
+ torch.cuda.manual_seed_all(seed)
93
+ torch.backends.cudnn.deterministic = True
94
+ torch.backends.cudnn.benchmark = False
95
+
96
+
97
+ def load_data(args):
98
+ X = np.load(os.path.join(args.data_dir, args.species, 'genotype.npz'))["arr_0"]
99
+ Y = np.load(os.path.join(args.data_dir, args.species, 'phenotype.npz'))["arr_0"]
100
+ return X, Y
101
+
102
+ def run_nested_cv(args, data, label):
103
+ result_dir = os.path.join(args.result_dir, args.methods + args.species + args.phe)
104
+ os.makedirs(result_dir, exist_ok=True)
105
+
106
+ le = LabelEncoder()
107
+ y_all = le.fit_transform(label)
108
+ n_classes = len(np.unique(y_all))
109
+
110
+ kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
111
+
112
+ all_acc, all_prec, all_rec, all_f1 = [], [], [], []
113
+ process = psutil.Process(os.getpid())
114
+
115
+ params = {
116
+ 'objective': 'binary' if n_classes == 2 else 'multiclass',
117
+ 'metric': 'binary_logloss' if n_classes == 2 else 'multi_logloss',
118
+ 'num_class': n_classes if n_classes > 2 else None,
119
+ 'learning_rate': args.learning_rate,
120
+ 'num_leaves': args.num_leaves,
121
+ 'min_data_in_leaf': args.min_data_in_leaf,
122
+ 'max_depth': args.max_depth,
123
+ 'lambda_l1': args.lambda_l1,
124
+ 'lambda_l2': args.lambda_l2,
125
+ 'min_gain_to_split': args.min_gain_to_split,
126
+ 'feature_fraction': args.feature_fraction,
127
+ 'bagging_fraction': args.bagging_fraction,
128
+ 'bagging_freq': args.bagging_freq,
129
+ 'device_type': 'gpu',
130
+ 'gpu_device_id': 0,
131
+ 'num_threads': 8,
132
+ 'verbose': -1
133
+ }
134
+
135
+ for fold, (train_idx, test_idx) in enumerate(kf.split(data, y_all)):
136
+ print(f"\n===== Fold {fold} =====")
137
+ start_time = time.time()
138
+
139
+ gpu_monitor.start()
140
+ cpu_mem_before = process.memory_info().rss / 1024**2
141
+
142
+ X_train, X_test = data[train_idx], data[test_idx]
143
+ y_train, y_test = y_all[train_idx], y_all[test_idx]
144
+
145
+ train_set = lgb.Dataset(X_train, label=y_train)
146
+ test_set = lgb.Dataset(X_test, label=y_test)
147
+
148
+ model = lgb.train(
149
+ params,
150
+ train_set,
151
+ num_boost_round=args.num_boost_round,
152
+ valid_sets=[test_set]
153
+ )
154
+
155
+ y_prob = model.predict(X_test)
156
+ if n_classes == 2:
157
+ y_pred = (y_prob > 0.5).astype(int)
158
+ else:
159
+ y_pred = np.argmax(y_prob, axis=1)
160
+
161
+ acc = accuracy_score(y_test, y_pred)
162
+ prec, rec, f1, _ = precision_recall_fscore_support(
163
+ y_test, y_pred, average="macro", zero_division=0
164
+ )
165
+
166
+ all_acc.append(acc)
167
+ all_prec.append(prec)
168
+ all_rec.append(rec)
169
+ all_f1.append(f1)
170
+
171
+ fold_time = time.time() - start_time
172
+ gpu_mem = gpu_monitor.stop()
173
+ cpu_mem = process.memory_info().rss / 1024**2
174
+
175
+ print(
176
+ f"ACC={acc:.4f}, PREC={prec:.4f}, REC={rec:.4f}, "
177
+ f"F1={f1:.4f}, Time={fold_time:.2f}s, "
178
+ f"GPU={gpu_mem:.2f}MB, CPU={cpu_mem:.2f}MB"
179
+ )
180
+
181
+ pd.DataFrame({
182
+ "Y_test": le.inverse_transform(y_test),
183
+ "Y_pred": le.inverse_transform(y_pred)
184
+ }).to_csv(os.path.join(result_dir, f"fold{fold}.csv"), index=False)
185
+
186
+ print("\n===== CV Summary =====")
187
+ print(f"ACC : {np.mean(all_acc):.4f} ± {np.std(all_acc):.4f}")
188
+ print(f"PREC: {np.mean(all_prec):.4f} ± {np.std(all_prec):.4f}")
189
+ print(f"REC : {np.mean(all_rec):.4f} ± {np.std(all_rec):.4f}")
190
+ print(f"F1 : {np.mean(all_f1):.4f} ± {np.std(all_f1):.4f}")
191
+
192
+
193
+ def LightGBM_class():
194
+ set_seed(42)
195
+ args = parse_args()
196
+
197
+ all_species = ["Human/Sim/"]
198
+
199
+ for species in all_species:
200
+ args.species = species
201
+ X, Y = load_data(args)
202
+ label = Y[:, 0]
203
+
204
+ best_params = LightGBM_he_class.Hyperparameter(X, label)
205
+ args.learning_rate = best_params['learning_rate']
206
+ args.num_leaves = best_params['num_leaves']
207
+ args.min_data_in_leaf = best_params['min_data_in_leaf']
208
+ args.max_depth = best_params['max_depth']
209
+ args.lambda_l1 = best_params['lambda_l1']
210
+ args.lambda_l2 = best_params['lambda_l2']
211
+ args.min_gain_to_split = best_params['min_gain_to_split']
212
+ args.feature_fraction = best_params['feature_fraction']
213
+ args.bagging_fraction = best_params['bagging_fraction']
214
+ args.bagging_freq = best_params['bagging_freq']
215
+
216
+ start_time = time.time()
217
+ run_nested_cv(args, X, label)
218
+ elapsed_time = time.time() - start_time
219
+ print(f"Running time: {elapsed_time:.2f}s")
220
+ print("successfully")
221
+
222
+
223
+ if __name__ == "__main__":
224
+ LightGBM_class()
@@ -0,0 +1,121 @@
1
+ import random
2
+ import torch
3
+ import numpy as np
4
+ import lightgbm as lgb
5
+ import optuna
6
+
7
+ from sklearn.model_selection import StratifiedKFold
8
+ from sklearn.preprocessing import LabelEncoder
9
+ from sklearn.metrics import accuracy_score, precision_recall_fscore_support
10
+
11
+ def set_seed(seed=42):
12
+ random.seed(seed)
13
+ np.random.seed(seed)
14
+ torch.manual_seed(seed)
15
+ torch.cuda.manual_seed_all(seed)
16
+ torch.backends.cudnn.deterministic = True
17
+ torch.backends.cudnn.benchmark = False
18
+
19
+ def run_cv_eval(data, label, params):
20
+ kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
21
+
22
+ le = LabelEncoder()
23
+ y_all = le.fit_transform(label)
24
+ n_classes = len(np.unique(y_all))
25
+
26
+ accs, precs, recs, f1s = [], [], [], []
27
+
28
+ for fold, (train_idx, test_idx) in enumerate(kf.split(data, y_all)):
29
+ print(f"===== Fold {fold+1} =====")
30
+
31
+ X_train, X_test = data[train_idx], data[test_idx]
32
+ y_train, y_test = y_all[train_idx], y_all[test_idx]
33
+
34
+ train_set = lgb.Dataset(X_train, label=y_train)
35
+ valid_set = lgb.Dataset(X_test, label=y_test)
36
+
37
+
38
+ model = lgb.train(
39
+ params,
40
+ train_set,
41
+ valid_sets=[valid_set],
42
+ num_boost_round=100,
43
+ )
44
+
45
+ y_prob = model.predict(X_test)
46
+
47
+ # ===== binary / multiclass safe =====
48
+ if n_classes == 2:
49
+ y_pred = (y_prob > 0.5).astype(int)
50
+ else:
51
+ y_pred = np.argmax(y_prob, axis=1)
52
+
53
+ acc = accuracy_score(y_test, y_pred)
54
+ prec, rec, f1, _ = precision_recall_fscore_support(
55
+ y_test,
56
+ y_pred,
57
+ average="macro",
58
+ zero_division=0
59
+ )
60
+
61
+ accs.append(acc)
62
+ precs.append(prec)
63
+ recs.append(rec)
64
+ f1s.append(f1)
65
+
66
+ print(
67
+ f"Fold {fold+1}: "
68
+ f"ACC={acc:.4f}, "
69
+ f"PREC={prec:.4f}, "
70
+ f"REC={rec:.4f}, "
71
+ f"F1={f1:.4f}"
72
+ )
73
+
74
+ return (
75
+ np.mean(accs),
76
+ np.mean(precs),
77
+ np.mean(recs),
78
+ np.mean(f1s)
79
+ )
80
+
81
+ def Hyperparameter(X, label):
82
+ set_seed(42)
83
+ torch.cuda.empty_cache()
84
+
85
+ n_classes = len(np.unique(label))
86
+
87
+ def objective(trial):
88
+ params = {
89
+ 'objective': 'binary' if n_classes == 2 else 'multiclass',
90
+ 'metric': 'multi_logloss' if n_classes > 2 else 'binary_logloss',
91
+ 'num_class': n_classes if n_classes > 2 else None,
92
+
93
+ 'learning_rate': trial.suggest_float('learning_rate', 1e-3, 0.2, log=True),
94
+ 'num_leaves': trial.suggest_int('num_leaves', 15, 255),
95
+ 'min_data_in_leaf': trial.suggest_int('min_data_in_leaf', 10, 100),
96
+ 'max_depth': trial.suggest_int('max_depth', 3, 10),
97
+ 'lambda_l1': trial.suggest_float('lambda_l1', 0.0, 5.0),
98
+ 'lambda_l2': trial.suggest_float('lambda_l2', 0.0, 5.0),
99
+ 'min_gain_to_split': trial.suggest_float('min_gain_to_split', 0.0, 5.0),
100
+ 'feature_fraction': trial.suggest_float('feature_fraction', 0.6, 1.0),
101
+ 'bagging_fraction': trial.suggest_float('bagging_fraction', 0.6, 1.0),
102
+ 'bagging_freq': trial.suggest_int('bagging_freq', 0, 10),
103
+
104
+ 'num_boost_round': trial.suggest_int('num_boost_round', 100, 1000),
105
+
106
+ 'device_type': 'gpu',
107
+ 'gpu_device_id': 1,
108
+ 'num_threads': 8,
109
+ 'verbosity': -1
110
+ }
111
+
112
+ acc, prec, rec, f1 = run_cv_eval(X, label, params)
113
+
114
+ # ===== optimize macro-F1 =====
115
+ return f1
116
+
117
+ study = optuna.create_study(direction="maximize")
118
+ study.optimize(objective, n_trials=20)
119
+ print("Best macro-F1:", study.best_value)
120
+
121
+ return study.best_params
@@ -0,0 +1,5 @@
1
+ from .LightGBM_class import LightGBM_class
2
+
3
+ LightGBM = LightGBM_class
4
+
5
+ __all__ = ["LightGBM","LightGBM_class"]
@@ -0,0 +1,165 @@
1
+ import os
2
+ import time
3
+ import psutil
4
+ import pynvml
5
+ import argparse
6
+ import random
7
+ import torch
8
+ import pandas as pd
9
+ import numpy as np
10
+ import swanlab
11
+ import cupy as cp
12
+
13
+ from sklearn.preprocessing import LabelEncoder
14
+ from sklearn.model_selection import StratifiedKFold
15
+ from sklearn.metrics import accuracy_score, precision_recall_fscore_support
16
+ from . import RF_GPU_he_class
17
+
18
+ try:
19
+ from cuml.ensemble import RandomForestClassifier as cuRFClassifier
20
+ GPU_AVAILABLE = True
21
+ except ImportError:
22
+ from sklearn.ensemble import RandomForestClassifier
23
+ GPU_AVAILABLE = False
24
+
25
+ def parse_args():
26
+ parser = argparse.ArgumentParser()
27
+ parser.add_argument('--methods', type=str, default='RF/')
28
+ parser.add_argument('--species', type=str, default='')
29
+ parser.add_argument('--phe', type=str, default='')
30
+ parser.add_argument('--data_dir', type=str, default='../../data/')
31
+ parser.add_argument('--result_dir', type=str, default='result/')
32
+
33
+ parser.add_argument('--n_estimators', type=int, default=200)
34
+ parser.add_argument('--max_depth', type=int, default=10)
35
+ parser.add_argument('--use_gpu', type=bool, default=True)
36
+ return parser.parse_args()
37
+
38
+ def set_seed(seed=42):
39
+ random.seed(seed)
40
+ np.random.seed(seed)
41
+ torch.manual_seed(seed)
42
+ if torch.cuda.is_available():
43
+ torch.cuda.manual_seed_all(seed)
44
+
45
+
46
+ def load_data(args):
47
+ X = np.load(os.path.join(args.data_dir, args.species, 'genotype.npz'))['arr_0']
48
+ Y = np.load(os.path.join(args.data_dir, args.species, 'phenotype.npz'))['arr_0']
49
+ return X, Y
50
+
51
+
52
+ def get_gpu_mem_by_pid(pid):
53
+ procs = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
54
+ for p in procs:
55
+ if p.pid == pid:
56
+ return p.usedGpuMemory / 1024**2
57
+ return 0.0
58
+
59
+ def run_cv(args, X, label):
60
+ result_dir = os.path.join(args.result_dir, args.methods + args.species + args.phe)
61
+ os.makedirs(result_dir, exist_ok=True)
62
+
63
+ le = LabelEncoder()
64
+ y_all = le.fit_transform(label)
65
+ num_classes = len(np.unique(y_all))
66
+ np.save(os.path.join(result_dir, "label_mapping.npy"), le.classes_)
67
+
68
+ print(f"Classes: {le.classes_} (n={num_classes})")
69
+
70
+ skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=42)
71
+
72
+ all_acc, all_prec, all_rec, all_f1 = [], [], [], []
73
+ process = psutil.Process(os.getpid())
74
+ start_time = time.time()
75
+
76
+ for fold, (train_idx, test_idx) in enumerate(skf.split(X, y_all)):
77
+ print(f"\n===== Fold {fold} =====")
78
+ fold_start = time.time()
79
+
80
+ X_train, X_test = X[train_idx], X[test_idx]
81
+ y_train, y_test = y_all[train_idx], y_all[test_idx]
82
+
83
+ X_train = cp.asarray(X_train, dtype=cp.float32)
84
+ X_test = cp.asarray(X_test, dtype=cp.float32)
85
+ y_train = cp.asarray(y_train, dtype=cp.int32)
86
+
87
+ model = cuRFClassifier(
88
+ n_estimators=args.n_estimators,
89
+ max_depth=args.max_depth,
90
+ random_state=42
91
+ )
92
+
93
+ model.fit(X_train, y_train)
94
+
95
+ y_pred = model.predict(X_test)
96
+ y_pred = cp.asnumpy(y_pred)
97
+
98
+ acc = accuracy_score(y_test, y_pred)
99
+ prec, rec, f1, _ = precision_recall_fscore_support(
100
+ y_test, y_pred, average="macro", zero_division=0
101
+ )
102
+
103
+ all_acc.append(acc)
104
+ all_prec.append(prec)
105
+ all_rec.append(rec)
106
+ all_f1.append(f1)
107
+
108
+ fold_time = time.time() - fold_start
109
+ gpu_mem = get_gpu_mem_by_pid(os.getpid()) if args.use_gpu else 0.0
110
+ cpu_mem = process.memory_info().rss / 1024**2
111
+
112
+ print(
113
+ f"ACC={acc:.4f}, PREC={prec:.4f}, REC={rec:.4f}, "
114
+ f"F1={f1:.4f}, Time={fold_time:.2f}s, "
115
+ f"GPU={gpu_mem:.2f}MB, CPU={cpu_mem:.2f}MB"
116
+ )
117
+
118
+ pd.DataFrame({
119
+ "y_true": le.inverse_transform(y_test),
120
+ "y_pred": le.inverse_transform(y_pred)
121
+ }).to_csv(os.path.join(result_dir, f"fold{fold}.csv"), index=False)
122
+
123
+ # ===== Summary =====
124
+ print("\n===== CV Summary =====")
125
+ print(f"ACC : {np.mean(all_acc):.4f} ± {np.std(all_acc):.4f}")
126
+ print(f"PREC: {np.mean(all_prec):.4f} ± {np.std(all_prec):.4f}")
127
+ print(f"REC : {np.mean(all_rec):.4f} ± {np.std(all_rec):.4f}")
128
+ print(f"F1 : {np.mean(all_f1):.4f} ± {np.std(all_f1):.4f}")
129
+
130
+
131
+ def RF_class():
132
+ set_seed(42)
133
+ pynvml.nvmlInit()
134
+ handle = pynvml.nvmlDeviceGetHandleByIndex(0)
135
+
136
+ args = parse_args()
137
+
138
+ all_species = ["Human/Sim/"]
139
+ for species in all_species:
140
+ args.species = species
141
+ X, Y = load_data(args)
142
+ print(f"\n▶ Running {args.methods}{args.species}")
143
+ label = Y[:, 0]
144
+ label = np.nan_to_num(label, nan=np.nanmedian(label))
145
+
146
+ best_params = RF_GPU_he_class.Hyperparameter(X, label)
147
+ args.n_estimators = best_params['n_estimators']
148
+ args.max_depth = best_params['max_depth']
149
+
150
+ if torch.cuda.is_available():
151
+ torch.cuda.reset_peak_memory_stats()
152
+ process = psutil.Process(os.getpid())
153
+
154
+ start_time = time.time()
155
+
156
+ run_cv(args, X, label)
157
+ elapsed_time = time.time() - start_time
158
+ print(f"Running time: {elapsed_time:.2f}s")
159
+
160
+ if GPU_AVAILABLE:
161
+ cp.get_default_memory_pool().free_all_blocks()
162
+
163
+
164
+ if __name__ == "__main__":
165
+ RF_class()
@@ -0,0 +1,124 @@
1
+ import os
2
+ import random
3
+ import torch
4
+ import numpy as np
5
+ import optuna
6
+ from sklearn.model_selection import KFold
7
+ from sklearn.preprocessing import LabelEncoder
8
+ from sklearn.metrics import accuracy_score, precision_recall_fscore_support
9
+
10
+ try:
11
+ import cupy as cp
12
+ from cuml.ensemble import RandomForestClassifier as cuRandomForestClassifier
13
+ CUML_AVAILABLE = True
14
+ except ImportError:
15
+ from sklearn.ensemble import RandomForestClassifier
16
+ CUML_AVAILABLE = False
17
+
18
+ def run_nested_cv_with_early_stopping(
19
+ data, label, outer_cv,
20
+ n_estimators, max_depth, use_gpu=True):
21
+
22
+ all_acc, all_prec, all_rec, all_f1 = [], [], [], []
23
+ le = LabelEncoder()
24
+ y_all = le.fit_transform(label)
25
+ num_classes = len(np.unique(y_all))
26
+ print(f"Classes: {le.classes_} (n={num_classes})")
27
+
28
+ gpu_available = use_gpu and CUML_AVAILABLE and torch.cuda.is_available()
29
+
30
+ import time
31
+ time_start = time.time()
32
+
33
+ for fold, (train_idx, test_idx) in enumerate(outer_cv.split(data, y_all)):
34
+ X_train, X_test = data[train_idx], data[test_idx]
35
+ y_train, y_test = y_all[train_idx], y_all[test_idx]
36
+
37
+ X_train = X_train.astype(np.float32)
38
+ X_test = X_test.astype(np.float32)
39
+
40
+ if gpu_available:
41
+ X_train = cp.asarray(X_train)
42
+ X_test = cp.asarray(X_test)
43
+ y_train = cp.asarray(y_train)
44
+
45
+ model = cuRandomForestClassifier(
46
+ n_estimators=n_estimators,
47
+ max_depth=max_depth,
48
+ random_state=42,
49
+ n_streams=1
50
+ )
51
+ else:
52
+ model = RandomForestClassifier(
53
+ n_estimators=n_estimators,
54
+ max_depth=max_depth,
55
+ random_state=42,
56
+ n_jobs=-1
57
+ )
58
+
59
+ model.fit(X_train, y_train)
60
+
61
+ y_pred = model.predict(X_test)
62
+
63
+ if gpu_available:
64
+ y_pred = cp.asnumpy(y_pred)
65
+
66
+ acc = accuracy_score(y_test, y_pred)
67
+ prec, rec, f1, _ = precision_recall_fscore_support(
68
+ y_test, y_pred, average="macro", zero_division=0
69
+ )
70
+
71
+ all_acc.append(acc)
72
+ all_prec.append(prec)
73
+ all_rec.append(rec)
74
+ all_f1.append(f1)
75
+
76
+ device = "GPU" if gpu_available else "CPU"
77
+ print(
78
+ f"Fold {fold + 1}[{device}]: "
79
+ f"ACC={acc:.4f}, PREC={prec:.4f}, "
80
+ f"REC={rec:.4f}, F1={f1:.4f}"
81
+ )
82
+
83
+ if gpu_available:
84
+ cp.get_default_memory_pool().free_all_blocks()
85
+
86
+ print("\n==== Final Results ====")
87
+ print(f"ACC : {np.mean(all_acc):.4f} ± {np.std(all_acc):.4f}")
88
+ print(f"PREC: {np.mean(all_prec):.4f} ± {np.std(all_prec):.4f}")
89
+ print(f"REC : {np.mean(all_rec):.4f} ± {np.std(all_rec):.4f}")
90
+ print(f"F1 : {np.mean(all_f1):.4f} ± {np.std(all_f1):.4f}")
91
+ print(f"Time: {time.time() - time_start:.2f}s")
92
+ return np.mean(all_f1)
93
+
94
+ def set_seed(seed=42):
95
+ random.seed(seed)
96
+ np.random.seed(seed)
97
+ torch.manual_seed(seed)
98
+ if torch.cuda.is_available():
99
+ torch.cuda.manual_seed_all(seed)
100
+
101
+ def Hyperparameter(data, label, use_gpu=True):
102
+ set_seed(42)
103
+
104
+ def objective(trial):
105
+ n_estimators = trial.suggest_int("n_estimators", 100, 1000)
106
+ max_depth = trial.suggest_int("max_depth", 3, 10)
107
+
108
+ outer_cv = KFold(n_splits=10, shuffle=True, random_state=42)
109
+
110
+ score = run_nested_cv_with_early_stopping(
111
+ data=data,
112
+ label=label,
113
+ outer_cv=outer_cv,
114
+ n_estimators=n_estimators,
115
+ max_depth=max_depth,
116
+ use_gpu=use_gpu
117
+ )
118
+ return score
119
+
120
+ study = optuna.create_study(direction="maximize")
121
+ study.optimize(objective, n_trials=20)
122
+
123
+ print("best params:", study.best_params)
124
+ return study.best_params
@@ -0,0 +1,5 @@
1
+ from .RF_GPU_class import RF_class
2
+
3
+ RF = RF_class
4
+
5
+ __all__ = ["RF","RF_class"]