junshan-kit 2.8.5__py2.py3-none-any.whl → 2.8.6__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
junshan_kit/Check_Info.py CHANGED
@@ -7,22 +7,23 @@
7
7
 
8
8
  from junshan_kit import ModelsHub
9
9
 
10
- def check_args(self, args, parser, allowed_models, allowed_optimizers, allowed_datasets):
10
+ def check_args(args, parser, allowed_models, allowed_optimizers, allowed_datasets):
11
11
  # Parse and validate each train_group
12
12
  for cfg in args.train:
13
- try:
14
- model, dataset, optimizer = cfg.split("-")
13
+ model, dataset, optimizer = cfg.split("-")
14
+
15
+ if model not in allowed_models:
16
+ parser.error(f"Invalid model '{model}'. Choose from {allowed_models}")
17
+
18
+ if optimizer not in allowed_optimizers:
19
+ parser.error(f"Invalid optimizer '{optimizer}'. Choose from {allowed_optimizers}")
15
20
 
16
- if model not in allowed_models:
17
- parser.error(f"Invalid model '{model}'. Choose from {allowed_models}")
18
- if optimizer not in allowed_optimizers:
19
- parser.error(f"Invalid optimizer '{optimizer}'. Choose from {allowed_optimizers}")
20
- if dataset not in allowed_datasets:
21
- parser.error(f"Invalid dataset '{dataset}'. Choose from {allowed_datasets}")
21
+ if dataset not in allowed_datasets:
22
+ print(type(allowed_datasets), allowed_datasets)
23
+ parser.error(f"Invalid dataset '{dataset}'. Choose from {allowed_datasets}")
22
24
 
23
- except ValueError:
24
- parser.error(f"Invalid format '{cfg}'. Use model-dataset-optimizer")
25
25
 
26
+ # Check if the model-dataset-optimizer combination exists
26
27
  for cfg in args.train:
27
28
  model_name, dataset_name, optimizer_name = cfg.split("-")
28
29
  try:
@@ -31,8 +32,15 @@ def check_args(self, args, parser, allowed_models, allowed_optimizers, allowed_d
31
32
  except:
32
33
  print(getattr(ModelsHub, f"Build_{args.model_name_mapping[model_name]}_{args.data_name_mapping[dataset_name]}"))
33
34
  assert False
35
+
36
+ # Check epochs or iterations
37
+ if args.e is None and args.iter is None:
38
+ parser.error("one of --e or --iter must be specified")
39
+
40
+ if args.e is not None and args.iter is not None:
41
+ parser.error("one of --e or --iter must be specified")
34
42
 
35
- def check_subset_info(self, args, parser):
43
+ def check_subset_info(args, parser):
36
44
  total = sum(args.subset)
37
45
  if args.subset[0]>1:
38
46
  # CHECK
junshan_kit/FiguresHub.py CHANGED
@@ -89,6 +89,10 @@ def colors_schedule(colors_schedule=None):
89
89
  def Search_Paras(Paras, args, model_name, data_name, optimizer_name, metric_key = "training_loss"):
90
90
 
91
91
  param_dict = Paras["Results_dict"][model_name][data_name][optimizer_name]
92
+ if Paras["epochs"] is not None:
93
+ xlabel = "epochs"
94
+ else:
95
+ xlabel = "iterations"
92
96
 
93
97
  num_polts = len(param_dict)
94
98
  cols = 3
@@ -104,7 +108,7 @@ def Search_Paras(Paras, args, model_name, data_name, optimizer_name, metric_key
104
108
  ax.plot(metric_list)
105
109
  # ax.set_title(f"time:{duration:.8f}s - seed: {Paras['seed']}, ID: {Paras['time_str']} \n params = {param_str}", fontsize=10)
106
110
  ax.set_title(f'time = {info["train_time"]:.2f}, seed: {Paras["seed"]}, ID: {Paras["time_str"]} \n params = {param_str}', fontsize=10)
107
- ax.set_xlabel("epochs")
111
+ ax.set_xlabel(xlabel)
108
112
  ax.set_ylabel(ParametersHub.fig_ylabel(metric_key))
109
113
  ax.grid(True)
110
114
  if Paras.get('use_log_scale', False) and any(k in metric_key for k in ['loss', 'grad']):
@@ -127,24 +131,28 @@ def Search_Paras(Paras, args, model_name, data_name, optimizer_name, metric_key
127
131
 
128
132
  def Read_Results_from_pkl(info_dict, Exp_name, model_name):
129
133
  draw_data = defaultdict(dict)
134
+ xlabels = {}
130
135
  for data_name, info in info_dict.items():
131
136
  for optimizer_name, info_opt in info["optimizer"].items():
132
137
 
133
- pkl_path = f'{Exp_name}/seed_{info["seed"]}/{model_name}/{data_name}/{optimizer_name}/train_{info["train_test"][0]}_test_{info["train_test"][1]}/Batch_size_{info["batch_size"]}/epoch_{info["epochs"]}/{info_opt["ID"]}/Results_{ParametersHub.model_abbr(model_name)}_{data_name}_{optimizer_name}.pkl'
138
+ if hasattr(info, "epochs") and info["epochs"] is not None:
139
+ pkl_path = f'{Exp_name}/seed_{info["seed"]}/{model_name}/{data_name}/{optimizer_name}/train_{info["train_test"][0]}_test_{info["train_test"][1]}/Batch_size_{info["batch_size"]}/epoch_{info["epochs"]}/{info_opt["ID"]}/Results_{ParametersHub.model_abbr(model_name)}_{data_name}_{optimizer_name}.pkl'
140
+ xlabels[data_name] = "epochs"
141
+
142
+ else:
143
+ pkl_path = f'{Exp_name}/seed_{info["seed"]}/{model_name}/{data_name}/{optimizer_name}/train_{info["train_test"][0]}_test_{info["train_test"][1]}/Batch_size_{info["batch_size"]}/iter_{info["iter"]}/{info_opt["ID"]}/Results_{ParametersHub.model_abbr(model_name)}_{data_name}_{optimizer_name}.pkl'
144
+ xlabels[data_name] = "iterations"
134
145
 
135
146
  data_ = kit.read_pkl_data(pkl_path)
136
147
 
137
148
  param_str = ParametersHub.opt_paras_str(info["optimizer"][optimizer_name])
138
149
 
139
- # draw_data[data_name][optimizer_name] = data_[param_str][info["metric_key"]]
140
- # draw_data[data_name][optimizer_name][param_str] = param_str
141
- # Store both metric list and parameter string
142
150
  draw_data[data_name][optimizer_name] = {
143
151
  "metrics": data_[param_str][info["metric_key"]],
144
152
  "param_str": param_str
145
153
  }
146
154
 
147
- return draw_data
155
+ return draw_data, xlabels
148
156
 
149
157
 
150
158
 
@@ -155,13 +163,20 @@ def Mul_Plot(model_name, info_dict, Exp_name = "SPBM", cols = 3, save_path = Non
155
163
  mpl.rcParams["axes.unicode_minus"] = False
156
164
  mpl.rcParams["font.size"] = 12
157
165
  mpl.rcParams["font.family"] = "serif"
166
+ xlabels = {}
158
167
 
159
168
  # Read data
160
169
  draw_data = defaultdict(dict)
161
170
  for data_name, info in info_dict.items():
162
171
  for optimizer_name, info_opt in info["optimizer"].items():
163
172
 
164
- pkl_path = f'{Exp_name}/seed_{info["seed"]}/{model_name}/{data_name}/{optimizer_name}/train_{info["train_test"][0]}_test_{info["train_test"][1]}/Batch_size_{info["batch_size"]}/epoch_{info["epochs"]}/{info_opt["ID"]}/Results_{ParametersHub.model_abbr(model_name)}_{data_name}_{optimizer_name}.pkl'
173
+ if hasattr(info, "epochs") and info["epochs"] is not None:
174
+ pkl_path = f'{Exp_name}/seed_{info["seed"]}/{model_name}/{data_name}/{optimizer_name}/train_{info["train_test"][0]}_test_{info["train_test"][1]}/Batch_size_{info["batch_size"]}/epoch_{info["epochs"]}/{info_opt["ID"]}/Results_{ParametersHub.model_abbr(model_name)}_{data_name}_{optimizer_name}.pkl'
175
+ xlabels[data_name] = "epochs"
176
+
177
+ else:
178
+ pkl_path = f'{Exp_name}/seed_{info["seed"]}/{model_name}/{data_name}/{optimizer_name}/train_{info["train_test"][0]}_test_{info["train_test"][1]}/Batch_size_{info["batch_size"]}/iter_{info["iter"]}/{info_opt["ID"]}/Results_{ParametersHub.model_abbr(model_name)}_{data_name}_{optimizer_name}.pkl'
179
+ xlabels[data_name] = "iterations"
165
180
 
166
181
  data_ = kit.read_pkl_data(pkl_path)
167
182
 
@@ -192,7 +207,7 @@ def Mul_Plot(model_name, info_dict, Exp_name = "SPBM", cols = 3, save_path = Non
192
207
  ax.scatter(x, metric_list_arr[x], marker=marker_schedule("SPBM")[optimizer_name], color = colors_schedule("SPBM")[optimizer_name])
193
208
 
194
209
  ax.set_title(f'{data_name}', fontsize=12)
195
- ax.set_xlabel("epochs", fontsize=12)
210
+ ax.set_xlabel(xlabels[data_name], fontsize=12)
196
211
  ax.set_ylabel(ParametersHub.fig_ylabel(info_dict[data_name]["metric_key"]), fontsize=12)
197
212
  if any(k in info_dict[data_name]["metric_key"] for k in ['loss', 'grad']):
198
213
  ax.set_yscale("log")
@@ -245,7 +260,7 @@ def Opt_Paras_Plot(model_name, info_dict, Exp_name = "SPBM", save_path = None, s
245
260
  mpl.rcParams["font.family"] = "serif"
246
261
 
247
262
  # Read data
248
- draw_data = Read_Results_from_pkl(info_dict, Exp_name, model_name)
263
+ draw_data, xlabels = Read_Results_from_pkl(info_dict, Exp_name, model_name)
249
264
 
250
265
  if len(draw_data) >1:
251
266
  print('*' * 40)
@@ -273,7 +288,7 @@ def Opt_Paras_Plot(model_name, info_dict, Exp_name = "SPBM", save_path = None, s
273
288
  plt.yscale("log")
274
289
 
275
290
  plt.tight_layout() # Adjust layout so the legend fits
276
- plt.xlabel("epochs") # Or whatever your x-axis represents
291
+ plt.xlabel(xlabels[data_name]) # Or whatever your x-axis represents
277
292
  plt.ylabel(f'{ParametersHub.fig_ylabel(info_dict[data_name]["metric_key"])}')
278
293
  if save_path is None:
279
294
  save_path_ = f'{model_name}.pdf'
@@ -49,7 +49,7 @@ class args:
49
49
  "HL",
50
50
  "HQC",
51
51
  "TN_Weather",
52
- ],
52
+ ]
53
53
  # <allowed_datasets>
54
54
  data_name_mapping = {
55
55
  "MNIST": "MNIST",
@@ -103,10 +103,15 @@ class args:
103
103
  parser.add_argument(
104
104
  "--e",
105
105
  type=int,
106
- required=True,
107
106
  help="Number of training epochs. Example: --e 50"
108
107
  )
109
108
 
109
+ parser.add_argument(
110
+ "--iter",
111
+ type=int,
112
+ help="Number of iteration. Example: --iter 50"
113
+ )
114
+
110
115
  parser.add_argument(
111
116
  "--seed",
112
117
  type=int,
@@ -182,6 +187,8 @@ class args:
182
187
  args.data_name_mapping = data_name_mapping
183
188
  args.optimizers_name_mapping = optimizers_mapping
184
189
 
190
+ # <Check_Info>
191
+ Check_Info.check_args(args, parser, allowed_models, allowed_optimizers, allowed_datasets)
185
192
  return args
186
193
  # <args>
187
194
 
@@ -248,9 +255,6 @@ def set_paras(args, OtherParas):
248
255
  # batch-size
249
256
  "batch_size": args.bs,
250
257
 
251
- # epochs
252
- "epochs": args.e,
253
-
254
258
  # split_train_data
255
259
  "split_train_data": args.s,
256
260
 
@@ -263,7 +267,8 @@ def set_paras(args, OtherParas):
263
267
  # type: bool
264
268
  "user_search_grid": OtherParas["user_search_grid"],
265
269
  }
266
-
270
+ Paras["iter"] = args.iter
271
+ Paras["epochs"] = args.e
267
272
  Paras = model_list(Paras)
268
273
  Paras = model_type(Paras)
269
274
  Paras = data_list(Paras)
@@ -500,7 +505,15 @@ def hyperparas_and_path(Paras, model_name, data_name, optimizer_name, params_gir
500
505
 
501
506
  keys, values = list(params_gird.keys()), list(params_gird.values())
502
507
 
503
- Paras["Results_folder"] = f'./{Paras["results_folder_name"]}/seed_{Paras["seed"]}/{model_name}/{data_name}/{optimizer_name}/train_{Paras["train_data_num"]}_test_{Paras["test_data_num"]}/Batch_size_{Paras["batch_size"]}/epoch_{Paras["epochs"]}/{Paras["time_str"]}'
508
+ if Paras["epochs"] is not None:
509
+ Paras["Results_folder"] = f'./{Paras["results_folder_name"]}/seed_{Paras["seed"]}/{model_name}/{data_name}/{optimizer_name}/train_{Paras["train_data_num"]}_test_{Paras["test_data_num"]}/Batch_size_{Paras["batch_size"]}/epoch_{Paras["epochs"]}/{Paras["time_str"]}'
510
+
511
+ elif Paras["iter"] is not None:
512
+ Paras["Results_folder"] = f'./{Paras["results_folder_name"]}/seed_{Paras["seed"]}/{model_name}/{data_name}/{optimizer_name}/train_{Paras["train_data_num"]}_test_{Paras["test_data_num"]}/Batch_size_{Paras["batch_size"]}/iter_{Paras["iter"]}/{Paras["time_str"]}'
513
+
514
+ else:
515
+ assert "one of --e or --iter must be specified"
516
+
504
517
  os.makedirs(Paras["Results_folder"], exist_ok=True)
505
518
 
506
519
  return keys, values, Paras
@@ -603,6 +616,7 @@ def opt_paras_str(opt_paras_dict):
603
616
  def set_marker_point(epoch_num: int) -> list:
604
617
  marker_point = {
605
618
  1: [0],
619
+ 3: [0, 2],
606
620
  4: [0, 2, 4],
607
621
  6: [0, 2, 4, 6],
608
622
  8: [0, 2, 4, 6, 8],
junshan_kit/Print_Info.py CHANGED
@@ -9,51 +9,75 @@ def training_group(training_group):
9
9
  print(f"-------------------------------------------------------")
10
10
 
11
11
 
12
- def training_info(args, data_name, optimizer_name, hyperparams, Paras, model_name):
13
- if Paras["use_color"]:
14
- print("\033[90m" + "-" * 115 + "\033[0m")
12
+ def training_info(data_name, optimizer_name, hyperparams, Paras, model_name):
13
+ if Paras['use_color']:
14
+ print('\033[90m' + '-' * 115 + '\033[0m')
15
+ print(
16
+ f'\033[32m✅ \033[34mDataset:\033[32m {data_name}, \t'
17
+ f'\033[34mBatch-size:\033[32m {Paras["batch_size"]}, \t'
18
+ f'\033[34m(training, test) = \033[32m '
19
+ f'({Paras["train_data_num"]}/{Paras["train_data_all_num"]}, '
20
+ f'{Paras["test_data_num"]}/{Paras["test_data_all_num"]}), \t'
21
+ f'\033[34mdevice:\033[32m {Paras["device"]}'
22
+ )
15
23
  print(
16
- f"\033[32m✅ \033[34mDataset:\033[32m {data_name}, \t\033[34mBatch-size:\033[32m {args.bs}, \t\033[34m(training, test) = \033[32m ({Paras['train_data_num']}/{Paras['train_data_all_num']}, {Paras['test_data_num']}/{Paras['test_data_all_num']}), \t\033[34m device:\033[32m {Paras['device']}"
24
+ f'\033[32m✅ \033[34mOptimizer:\033[32m {optimizer_name}, \t'
25
+ f'\033[34mParams:\033[32m {hyperparams}'
17
26
  )
18
27
  print(
19
- f"\033[32m✅ \033[34mOptimizer:\033[32m {optimizer_name}, \t\033[34mParams:\033[32m {hyperparams}"
28
+ f'\033[32m✅ \033[34mmodel:\033[32m {model_name}, \t'
29
+ f'\033[34mmodel type:\033[32m {Paras["model_type"][model_name]}, \t'
30
+ f'\033[34mloss_fn:\033[32m {Paras["loss_fn"]}'
20
31
  )
21
32
  print(
22
- f'\033[32m✅ \033[34mmodel:\033[32m {model_name}, \t\033[34mmodel type:\033[32m {Paras["model_type"][model_name]},\t\033[34m loss_fn:\033[32m {Paras["loss_fn"]}'
33
+ f'\033[32m✅ \033[34mResults_folder:\033[32m {Paras["Results_folder"]}'
23
34
  )
24
- print(f'\033[32m✅ \033[34mResults_folder:\033[32m {Paras["Results_folder"]}')
25
- print("\033[90m" + "-" * 115 + "\033[0m")
35
+ print('\033[90m' + '-' * 115 + '\033[0m')
26
36
 
27
37
  else:
28
- print("-" * 115)
38
+ print('-' * 115)
29
39
  print(
30
- f"✅ Dataset: {data_name}, \tBatch-size: {args.bs}, \t(training, test) = ({Paras['train_data_num']}/{Paras['train_data_all_num']}, {Paras['test_data_num']}/{Paras['test_data_all_num']}), \tdevice: {Paras['device']}"
40
+ f'✅ Dataset: {data_name}, \t'
41
+ f'Batch-size: {Paras["batch_size"]}, \t'
42
+ f'(training, test) = '
43
+ f'({Paras["train_data_num"]}/{Paras["train_data_all_num"]}, '
44
+ f'{Paras["test_data_num"]}/{Paras["test_data_all_num"]}), \t'
45
+ f'device: {Paras["device"]}'
31
46
  )
32
- print(f"✅ Optimizer: {optimizer_name}, \tParams: {hyperparams}")
47
+ print(f'✅ Optimizer: {optimizer_name}, \tParams: {hyperparams}')
33
48
  print(
34
- f"✅ model: {model_name}, \tmodel type: {Paras['model_type'][model_name]}, \tloss_fn: {Paras['loss_fn']}"
49
+ f'✅ model: {model_name}, \t'
50
+ f'model type: {Paras["model_type"][model_name]}, \t'
51
+ f'loss_fn: {Paras["loss_fn"]}'
35
52
  )
36
- print(f"✅ Results_folder: {Paras['Results_folder']}")
37
- print("-" * 115)
38
-
53
+ print(f'✅ Results_folder: {Paras["Results_folder"]}')
54
+ print('-' * 115)
39
55
  # <Step_7_2>
40
56
 
41
- def per_epoch_info(Paras, epoch, metrics, time):
42
- if Paras["use_color"]:
43
- print(
44
- f'\033[34m epoch = \033[32m{epoch+1}/{Paras["epochs"]}\033[0m,\t\b'
45
- f'\033[34m training_loss = \033[32m{metrics["training_loss"][epoch+1]:.4e}\033[0m,\t\b'
46
- f'\033[34m training_acc = \033[32m{100 * metrics["training_acc"][epoch+1]:.2f}\033[0m,\t\b'
47
- f'\033[34m time = \033[32m{time:.2f}\033[0m,\t\b')
57
+ def per_epoch_info(Paras, epoch, metrics, epoch_time):
58
+ if Paras.get('epochs') is not None:
59
+ progress = f'{epoch + 1}/{Paras["epochs"]}'
60
+ progress_label = 'epoch'
61
+ else:
62
+ progress = f'{epoch + 1}/{Paras["iter"]}'
63
+ progress_label = 'iter'
48
64
 
65
+ if Paras['use_color']:
66
+ print(
67
+ f'\033[34m {progress_label} = \033[32m{progress}\033[0m,\t'
68
+ f'\033[34m training_loss = \033[32m{metrics["training_loss"][epoch + 1]:.4e}\033[0m,\t'
69
+ f'\033[34m training_acc = \033[32m{100 * metrics["training_acc"][epoch + 1]:.2f}%\033[0m,\t'
70
+ f'\033[34m time = \033[32m{epoch_time:.2f}s\033[0m'
71
+ )
49
72
  else:
50
73
  print(
51
- f"epoch = {epoch+1}/{Paras['epochs']},\t"
52
- f"training_loss = {metrics['training_loss'][epoch+1]:.4e},\t"
53
- f"training_acc = {100 * metrics['training_acc'][epoch+1]:.2f}%,\t"
54
- f"time = {time:.2f}"
74
+ f'{progress_label} = {progress},\t'
75
+ f'training_loss = {metrics["training_loss"][epoch + 1]:.4e},\t'
76
+ f'training_acc = {100 * metrics["training_acc"][epoch + 1]:.2f}%,\t'
77
+ f'time = {epoch_time:.2f}s'
55
78
  )
56
79
 
80
+
57
81
  def print_per_epoch_info(epoch, Paras, epoch_loss, training_loss, training_acc, test_loss, test_acc, run_time):
58
82
  epochs = Paras["epochs"][Paras["data_name"]]
59
83
  # result = [(k, f"{v:.4f}") for k, v in run_time.items()]
@@ -238,7 +238,6 @@ def train(train_loader, optimizer_name, optimizer, model, loss_fn, Paras):
238
238
  loss = 0
239
239
  raise NotImplementedError(f"{optimizer_name} is not supported.")
240
240
 
241
-
242
241
  # Evaluation
243
242
  training_loss, training_acc = Evaluate_Metrics.get_loss_acc(train_loader, model, loss_fn, Paras)
244
243
 
@@ -254,6 +253,67 @@ def train(train_loader, optimizer_name, optimizer, model, loss_fn, Paras):
254
253
  return metrics
255
254
  # <training>
256
255
 
256
+ # <training_iteration>
257
+ def train_iteration(train_loader, optimizer_name, optimizer, model, loss_fn, Paras):
258
+ train_time = time.time()
259
+ metrics = ParametersHub.metrics()
260
+ for iter in range(Paras["iter"]):
261
+ iter_time = time.time()
262
+ for index, (X, Y) in enumerate(train_loader):
263
+ X, Y = X.to(Paras["device"]), Y.to(Paras["device"])
264
+
265
+ if iter == 0 and index == 0:
266
+ initial_time = time.time()
267
+ initial_loss, initial_correct = Evaluate_Metrics.get_loss_acc(train_loader, model, loss_fn, Paras)
268
+ metrics["training_loss"].append(initial_loss)
269
+ metrics["training_acc"].append(initial_correct)
270
+
271
+ Print_Info.per_epoch_info(Paras, -1, metrics, time.time() - initial_time)
272
+
273
+ # Update the model
274
+ if optimizer_name in ["SGD", "ADAM"]:
275
+ optimizer.zero_grad()
276
+ loss = Evaluate_Metrics.loss(X, Y, model, loss_fn, Paras)
277
+ loss.backward()
278
+ optimizer.step()
279
+
280
+ elif optimizer_name in [
281
+ "Bundle",
282
+ "SPBM-TR",
283
+ "SPBM-PF",
284
+ "ALR-SMAG",
285
+ "SPSmax",
286
+ "SPBM-TR-NoneSpecial",
287
+ "SPBM-TR-NoneLower",
288
+ "SPBM-TR-NoneCut",
289
+ "SPBM-PF-NoneCut",
290
+ ]:
291
+ def closure():
292
+ optimizer.zero_grad()
293
+ loss = Evaluate_Metrics.loss(X, Y, model, loss_fn, Paras)
294
+ loss.backward()
295
+ return loss
296
+
297
+ loss = optimizer.step(closure)
298
+
299
+ else:
300
+ loss = 0
301
+ raise NotImplementedError(f"{optimizer_name} is not supported.")
302
+
303
+ # Evaluation
304
+ training_loss, training_acc = Evaluate_Metrics.get_loss_acc(train_loader, model, loss_fn, Paras)
305
+
306
+ metrics["training_loss"].append(training_loss)
307
+ metrics["training_acc"].append(training_acc)
308
+
309
+ Print_Info.per_epoch_info(Paras, iter, metrics, time.time() - iter_time)
310
+
311
+ time_cost = time.time() - train_time
312
+ metrics["train_time"] = time_cost
313
+
314
+ return metrics
315
+ # <training_iteration>
316
+
257
317
  def Record_Results(hyperparams,data_name, model_name, optimizer_name, metrics, Paras):
258
318
 
259
319
  keys = list(hyperparams.keys())
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: junshan_kit
3
- Version: 2.8.5
3
+ Version: 2.8.6
4
4
  Summary: This is an optimization tool.
5
5
  Author-email: Junshan Yin <junshanyin@163.com>
6
6
  Requires-Dist: cvxpy==1.6.5
@@ -1,20 +1,20 @@
1
1
  junshan_kit/BenchmarkFunctions.py,sha256=tXgZGg-CjTNz78nMyVEQflVFIJDgmmePytXjY_RT9BM,120
2
- junshan_kit/Check_Info.py,sha256=Z6Ls2S7Fl4h8S9s0NB8jP_YpSLZInvQAeyjIXzq5Bpc,1872
2
+ junshan_kit/Check_Info.py,sha256=CTJgzyTYA_kPbBnm3x5oH148SzbULYQnePyY8S_bpsI,2106
3
3
  junshan_kit/DataHub.py,sha256=6RCNr8dBTqK-8ey4m-baMU1qOsJP6swOFkaraGdk0fM,6801
4
4
  junshan_kit/DataProcessor.py,sha256=Uc9ixhnVmGf5PoGIe3vvhobH_ADtDAosG9MTjnB1KDQ,15677
5
5
  junshan_kit/DataSets.py,sha256=DcpwWRm1_B29hIDjOhvaeKAYYeBknEW2QqsS_qm8Hxs,13367
6
6
  junshan_kit/Evaluate_Metrics.py,sha256=PQBGU8fETIvDon1VMdouZ1dhG2n7XHYGbzs2EQUA9FM,3392
7
- junshan_kit/FiguresHub.py,sha256=TVbo9ioEECrH_iJjpt0HgkCoiAdFEcTdtiUtzDNYrJY,10455
7
+ junshan_kit/FiguresHub.py,sha256=Z9mVN3Pnklt1JhkFPd7LO-46kZnf8JRrjfJEy38H3IE,11407
8
8
  junshan_kit/ModelsHub.py,sha256=xM6cwLecq9vukrt1c9l7l9dy7mQn3yq0ZwrRg5f_CfM,7995
9
- junshan_kit/ParametersHub.py,sha256=_LvkdV95vKSU4h2LtF7W63EaF5mRBg85ZHf0ymb28tA,20248
10
- junshan_kit/Print_Info.py,sha256=7pfd_mGEuQdQGyz6kcSSvjVRCrPgi5RafQgi7ZSS9VU,4890
11
- junshan_kit/TrainingHub.py,sha256=unoI8zzm0oekUxz-3retHCFhxwx6j8e6Tp9VQDywTPg,11565
9
+ junshan_kit/ParametersHub.py,sha256=pDtVEL9nx3UBtNfKfZyvXf9KXjC-OJkBwursOlSigvs,20935
10
+ junshan_kit/Print_Info.py,sha256=xhxcq1XayMZIbNjrG_74sAa8VCNvw5osPjXQhxkgN_M,5393
11
+ junshan_kit/TrainingHub.py,sha256=eK06q_xtcj4E6qkV-EHT2-UKv-yDDXUwx9n1EivdetQ,13775
12
12
  junshan_kit/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  junshan_kit/kit.py,sha256=tQGoJJQZW9BeadX2cuwhvOxX2riHBZG0iFExelS4MIY,11487
14
14
  junshan_kit/OptimizerHup/OptimizerFactory.py,sha256=x1_cE5ZSkKffdY0uCIirocBNj2X-u_R-V5jNawJ1EfA,4607
15
15
  junshan_kit/OptimizerHup/SPBM.py,sha256=h449QddeN0MvUIQeKcNxFsdxdBuhN354sGc_sN2LZR8,13816
16
16
  junshan_kit/OptimizerHup/SPBM_func.py,sha256=5Fz6eHYIVGMoR_CBDA_Xk_1dnPRq3K16DUNoNaWQ2Ag,17301
17
17
  junshan_kit/OptimizerHup/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
- junshan_kit-2.8.5.dist-info/METADATA,sha256=vxUXFvPqaGc2liYh0A3B4CmlbdlrK1CVdRNx0i7RoRA,455
19
- junshan_kit-2.8.5.dist-info/WHEEL,sha256=aha0VrrYvgDJ3Xxl3db_g_MDIW-ZexDdrc_m-Hk8YY4,105
20
- junshan_kit-2.8.5.dist-info/RECORD,,
18
+ junshan_kit-2.8.6.dist-info/METADATA,sha256=PzbDtkti_zBXqS4nZyt3o770Ocx5kBlriB3_LdytvIs,455
19
+ junshan_kit-2.8.6.dist-info/WHEEL,sha256=aha0VrrYvgDJ3Xxl3db_g_MDIW-ZexDdrc_m-Hk8YY4,105
20
+ junshan_kit-2.8.6.dist-info/RECORD,,