junshan-kit 2.3.9__py2.py3-none-any.whl → 2.4.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,328 @@
1
+ import sys, os, torch, random
2
+ import numpy as np
3
+ import torch.nn as nn
4
+ import torch.utils.data as Data
5
+ from torch.utils.data import Subset, random_split
6
+ from junshan_kit import ComOptimizers, datahub, Models, TrainingParas, SPBM
7
+
8
+ # -------------------------------------
9
+ def set_seed(seed=42):
10
+ torch.manual_seed(seed)
11
+ torch.cuda.manual_seed_all(seed)
12
+ np.random.seed(seed)
13
+ random.seed(seed)
14
+ torch.backends.cudnn.deterministic = True
15
+ torch.backends.cudnn.benchmark = False
16
+
17
+ def device(Paras):
18
+ device = torch.device(f"{Paras['cuda']}" if torch.cuda.is_available() else "cpu")
19
+ Paras["device"] = device
20
+ use_color = sys.stdout.isatty()
21
+ Paras["use_color"] = use_color
22
+
23
+ return Paras
24
+
25
+ # -------------------------------------
26
+ class Train_Steps:
27
+ def __init__(self, args) -> None:
28
+ self.args = args
29
+
30
+ def _model_map(self, model_name):
31
+ model_mapping = self.args.model_mapping
32
+
33
+ return model_mapping[model_name]
34
+
35
+ def get_train_group(self):
36
+ training_group = []
37
+ for cfg in self.args.train_group:
38
+ model, dataset, optimizer = cfg.split("-")
39
+ training_group.append((self._model_map(model), dataset, optimizer))
40
+
41
+ return training_group
42
+
43
+ def set_paras(self, results_folder_name, py_name, time_str, OtherParas):
44
+ Paras = {
45
+ # Name of the folder where results will be saved.
46
+ "results_folder_name": results_folder_name,
47
+ # Whether to draw loss/accuracy figures.
48
+ "DrawFigs": "ON",
49
+ # Whether to use log scale when drawing plots.
50
+ "use_log_scale": "ON",
51
+ # Print loss every N epochs.
52
+ "epoch_log_interval": 1,
53
+ # Timestamp string for result saving.
54
+ "time_str": time_str,
55
+ # Random seed
56
+ "seed": OtherParas['seed'],
57
+ # Device used for training.
58
+ "cuda": f"cuda:{self.args.cuda}",
59
+
60
+ # batch-size
61
+ "batch_size": self.args.bs,
62
+
63
+ # epochs
64
+ "epochs": self.args.e,
65
+
66
+ # split_train_data
67
+ "split_train_data": self.args.s,
68
+
69
+ # select_subset
70
+ "select_subset": self.args.subset,
71
+
72
+ # subset_number_dict
73
+ "subset_number_dict": TrainingParas.subset_number_dict(OtherParas),
74
+
75
+ # validation
76
+ "validation": TrainingParas.validation(),
77
+
78
+ # validation_rate
79
+ "validation_rate": TrainingParas.validation_rate(),
80
+
81
+ # model list
82
+ "model_list" : TrainingParas.model_list(),
83
+
84
+ # model_type
85
+ "model_type": TrainingParas.model_type(),
86
+
87
+ # data_list
88
+ "data_list": TrainingParas.data_list(),
89
+
90
+ # optimizer_dict
91
+ "optimizer_dict": TrainingParas.optimizer_dict(OtherParas)
92
+ }
93
+ Paras["py_name"] = py_name
94
+
95
+ return Paras
96
+
97
+ # <Step_3> : Chosen_loss
98
+ def chosen_loss(self, model_name, Paras):
99
+ # ---------------------------------------------------
100
+ # There have an addition parameter
101
+ if model_name == "LogRegressionBinaryL2":
102
+ Paras["lambda"] = 1e-3
103
+ # ---------------------------------------------------
104
+
105
+ if model_name in ["LeastSquares"]:
106
+ loss_fn = nn.MSELoss()
107
+
108
+ else:
109
+ if Paras["model_type"][model_name] == "binary":
110
+ loss_fn = nn.BCEWithLogitsLoss()
111
+
112
+ elif Paras["model_type"][model_name] == "multi":
113
+ loss_fn = nn.CrossEntropyLoss()
114
+
115
+ else:
116
+ loss_fn = nn.MSELoss()
117
+ print("\033[91m The loss function is error!\033[0m")
118
+ assert False
119
+ Paras["loss_fn"] = loss_fn
120
+
121
+ return loss_fn, Paras
122
+
123
+ # <Step_4> : import data --> step.py
124
+ def load_data(self, model_name, data_name, Paras):
125
+ # load data
126
+ train_path = f"./exp_data/{data_name}/training_data"
127
+ test_path = f"./exp_data/{data_name}/test_data"
128
+ # Paras["train_ratio"] = 1.0
129
+ # Paras["select_subset"].setdefault(data_name, False)
130
+ # Paras["validation"].setdefault(data_name, False)
131
+
132
+ if data_name == "MNIST":
133
+ train_dataset, test_dataset, transform = datahub.MNIST(Paras, model_name)
134
+
135
+ elif data_name == "CIFAR100":
136
+ train_dataset, test_dataset, transform = datahub.CIFAR100(Paras, model_name)
137
+
138
+ elif data_name == "CALTECH101_Resize_32":
139
+ Paras["train_ratio"] = 0.7
140
+ train_dataset, test_dataset, transform = datahub.caltech101_Resize_32(
141
+ Paras["seed"], Paras["train_ratio"], split=True
142
+ )
143
+
144
+ elif data_name in ["Vowel", "Letter", "Shuttle", "w8a"]:
145
+ Paras["train_ratio"] = Paras["split_train_data"][data_name]
146
+ train_dataset, test_dataset, transform = datahub.get_libsvm_data(
147
+ train_path + ".txt", test_path + ".txt", data_name
148
+ )
149
+
150
+ elif data_name in ["RCV1", "Duke", "Ijcnn"]:
151
+ Paras["train_ratio"] = Paras["split_train_data"][data_name]
152
+ train_dataset, test_dataset, transform = datahub.get_libsvm_bz2_data(
153
+ train_path + ".bz2", test_path + ".bz2", data_name, Paras
154
+ )
155
+
156
+ else:
157
+ transform = None
158
+ print(f"The data_name is error!")
159
+ assert False
160
+
161
+ return train_dataset, test_dataset, transform
162
+ # <Step_4>
163
+
164
+ # <subset> : Step 5.1 -->step.py
165
+ def set_subset(self, data_name, Paras, train_dataset, test_dataset):
166
+ if self.args.subset[0]>1:
167
+ train_num = self.args.subset[0]
168
+ test_num = self.args.subset[1]
169
+ train_subset_num = min(train_num, len(train_dataset))
170
+ test_subset_num = min(test_num, len(test_dataset))
171
+
172
+ train_subset_indices = list(range(int(train_subset_num)))
173
+ train_dataset = Subset(train_dataset, train_subset_indices)
174
+
175
+ test_subset_indices = list(range(int(test_subset_num)))
176
+ test_dataset = Subset(test_dataset, test_subset_indices)
177
+
178
+ else:
179
+ train_ratios= self.args.subset[0]
180
+ test_ratios= self.args.subset[1]
181
+
182
+ train_subset_indices = list(range(int(train_ratios * len(train_dataset))))
183
+ train_dataset = Subset(train_dataset, train_subset_indices)
184
+
185
+ test_subset_indices = list(range(int(test_ratios * len(test_dataset))))
186
+ test_dataset = Subset(test_dataset, test_subset_indices)
187
+
188
+ return train_dataset, test_dataset
189
+
190
+ # <validation> : Step 5.2 --> step.py
191
+ def set_val_set(self, data_name, train_dataset, Paras):
192
+ if Paras["validation"][data_name]:
193
+ size_ = len(train_dataset)
194
+ val_size = int(size_ * Paras["validation_rate"][data_name])
195
+ train_size = size_ - val_size
196
+
197
+ train_dataset, val_dataset = random_split(
198
+ train_dataset,
199
+ [train_size, val_size],
200
+ generator=torch.Generator().manual_seed(Paras["seed"]),
201
+ )
202
+
203
+ else:
204
+ val_dataset = Subset(train_dataset, [])
205
+
206
+ return train_dataset, val_dataset
207
+ # <validation>
208
+
209
+
210
+ # <get_dataloader> Step 5.3 -->step.py
211
+ def get_dataloader(self, data_name, train_dataset, test_dataset, Paras):
212
+ set_seed(Paras["seed"])
213
+ g = torch.Generator()
214
+ g.manual_seed(Paras["seed"])
215
+
216
+ # Create training DataLoader
217
+
218
+ train_loader = Data.DataLoader(
219
+ dataset=train_dataset,
220
+ shuffle=True,
221
+ batch_size=self.args.bs,
222
+ generator=g,
223
+ num_workers=4,
224
+ )
225
+
226
+ # test loader
227
+ test_loader = Data.DataLoader(
228
+ dataset=test_dataset,
229
+ shuffle=False,
230
+ batch_size=self.args.bs,
231
+ generator=g,
232
+ num_workers=4,
233
+ )
234
+
235
+ return train_loader, test_loader
236
+ # <get_dataloader>
237
+
238
+ def hyperparas_and_path(
239
+ self,
240
+ model_name,
241
+ data_name,
242
+ optimizer_name,
243
+ Paras,
244
+ ):
245
+ params_gird = Paras["optimizer_dict"][optimizer_name]["params"]
246
+ keys, values = list(params_gird.keys()), list(params_gird.values())
247
+
248
+ # Set the path for saving results
249
+ folder_path = f'./{Paras["results_folder_name"]}/seed_{Paras["seed"]}/{model_name}/{data_name}/{optimizer_name}/train_{Paras["train_data_num"]}_test_{Paras["test_data_num"]}/Batch_size_{self.args.bs}/epoch_{self.args.e}/{Paras["time_str"]}'
250
+ os.makedirs(folder_path, exist_ok=True)
251
+
252
+ return keys, values, folder_path
253
+
254
+
255
+ # <Reloading> Step 7.3 --> step.py
256
+ def reloading_model_dataloader(self,
257
+ base_model_fn,
258
+ initial_state_dict,
259
+ data_name,
260
+ train_dataset,
261
+ test_dataset,
262
+ Paras,
263
+ ):
264
+ set_seed(Paras["seed"])
265
+ model = base_model_fn()
266
+ model.load_state_dict(initial_state_dict)
267
+ model.to(Paras["device"])
268
+ train_loader, test_loader = self.get_dataloader(
269
+ data_name, train_dataset, test_dataset, Paras
270
+ )
271
+
272
+ return model, train_loader, test_loader
273
+ # <Reloading>
274
+
275
+ def chosen_optimizer(self, optimizer_name, model, hyperparams, Paras):
276
+ if optimizer_name == "SGD":
277
+ optimizer = torch.optim.SGD(model.parameters(), lr=hyperparams["alpha"])
278
+
279
+ elif optimizer_name == "ADAM":
280
+ optimizer = torch.optim.Adam(
281
+ model.parameters(),
282
+ lr=hyperparams["alpha"],
283
+ betas=(hyperparams["beta1"], hyperparams["beta2"]),
284
+ eps=hyperparams["epsilon"],
285
+ )
286
+
287
+ elif optimizer_name in ["SPBM-TR"]:
288
+ optimizer = SPBM.TR(model.parameters(), model, hyperparams, Paras)
289
+
290
+ elif optimizer_name in ["SPBM-TR-NoneLower"]:
291
+ optimizer = SPBM.TR_NoneLower(model.parameters(), model, hyperparams, Paras)
292
+
293
+ elif optimizer_name in ["SPBM-TR-NoneSpecial"]:
294
+ optimizer = SPBM.TR_NoneSpecial(model.parameters(), model, hyperparams, Paras)
295
+
296
+ elif optimizer_name in ["SPBM-TR-NoneCut"]:
297
+ optimizer = SPBM.TR_NoneCut(model.parameters(), model, hyperparams, Paras)
298
+
299
+ elif optimizer_name in ["SPBM-PF-NoneLower"]:
300
+ optimizer = SPBM.PF_NoneLower(model.parameters(), model, hyperparams, Paras)
301
+
302
+ elif optimizer_name in ["SPBM-PF"]:
303
+ optimizer = SPBM.PF(model.parameters(), model, hyperparams, Paras)
304
+
305
+ elif optimizer_name in ["SPBM-PF-NoneCut"]:
306
+ optimizer = SPBM.PF_NoneCut(model.parameters(), model, hyperparams, Paras)
307
+
308
+ elif optimizer_name in ["SPSmax"]:
309
+ optimizer = ComOptimizers.SPSmax(
310
+ model.parameters(), model, hyperparams, Paras
311
+ )
312
+
313
+ elif optimizer_name in ["ALR-SMAG"]:
314
+ optimizer = ComOptimizers.ALR_SMAG(
315
+ model.parameters(), model, hyperparams, Paras
316
+ )
317
+
318
+ elif optimizer_name in ["Bundle"]:
319
+ optimizer = ComOptimizers.Bundle(
320
+ model.parameters(), model, hyperparams, Paras
321
+ )
322
+
323
+ else:
324
+ raise NotImplementedError(f"{optimizer_name} is not supported.")
325
+
326
+ return optimizer
327
+
328
+
junshan_kit/Models.py ADDED
@@ -0,0 +1,207 @@
1
+ import torchvision,torch, random
2
+ import numpy as np
3
+ from torchvision.models import resnet18,resnet34, ResNet18_Weights, ResNet34_Weights
4
+ import torch.nn as nn
5
+
6
+
7
+ # ---------------- Build ResNet18 - Caltech101 -----------------------
8
+ def Build_ResNet18_CALTECH101_Resize_32():
9
+
10
+ """
11
+ 1. Modify the first convolutional layer for smaller input (e.g., 32x32 instead of 224x224)
12
+ Original: kernel_size=7, stride=2, padding=3 → changed to 3x3 kernel, stride=1, padding=1
13
+
14
+ 2. Adjust the final fully connected layer to match the number of Caltech101 classes (101)
15
+ """
16
+ model = resnet18(weights=None)
17
+ model.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) # 1
18
+ model.fc = nn.Linear(model.fc.in_features, 101) # 2
19
+
20
+ return model
21
+
22
+
23
+ # ---------------- Build ResNet18 - CIFAR100 -----------------------
24
+ def Build_ResNet18_CIFAR100():
25
+ """
26
+ 1. Modify the first convolutional layer for smaller input (e.g., 32x32 instead of 224x224)
27
+ Original: kernel_size=7, stride=2, padding=3 → changed to 3x3 kernel, stride=1, padding=1
28
+
29
+ 2. Adjust the final fully connected layer to match the number of CIFAR-100 classes (100)
30
+ """
31
+
32
+ model = resnet18(weights=None)
33
+ # model = resnet18(weights=ResNet18_Weights.DEFAULT)
34
+ model.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) # 1
35
+ model.fc = nn.Linear(model.fc.in_features, 100) # 2
36
+
37
+ return model
38
+
39
+
40
+ # ---------------- Build ResNet18 - MNIST ----------------------------
41
+ def Build_ResNet18_MNIST():
42
+ """
43
+ 1. Modify the first convolutional layer to accept grayscale input (1 channel instead of 3)
44
+ Original: in_channels=3 → changed to in_channels=1
45
+
46
+ 2. Adjust the final fully connected layer to match the number of MNIST classes (10)
47
+ """
48
+
49
+ model = resnet18(weights=None)
50
+ model.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False) # 1
51
+ model.fc = nn.Linear(model.fc.in_features, 10) # 2
52
+
53
+ return model
54
+
55
+
56
+ # ---------------- Build ResNet34 - CIFAR100 -----------------------
57
+ def Build_ResNet34_CIFAR100():
58
+
59
+ model = resnet34(weights=None)
60
+ model.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
61
+ model.fc = nn.Linear(model.fc.in_features, 100)
62
+ return model
63
+
64
+ # ---------------- Build ResNet18 - MNIST ----------------------------
65
+ def Build_ResNet34_MNIST():
66
+ # Do not load the pre-trained weights
67
+ model = resnet34(weights=None)
68
+
69
+ model.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
70
+ model.fc = nn.Linear(model.fc.in_features, 10)
71
+
72
+ return model
73
+
74
+ # ---------------- Build ResNet34 - Caltech101 -----------------------
75
+ def Build_ResNet34_CALTECH101_Resize_32():
76
+
77
+ model = resnet34(weights=None)
78
+ model.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
79
+ model.fc = nn.Linear(model.fc.in_features, 101)
80
+ return model
81
+
82
+
83
+ #**************************************************************
84
+ # ---------------------- LeastSquares -------------------------
85
+ #**************************************************************
86
+ # ---------------- LeastSquares - MNIST -----------------------
87
+ def Build_LeastSquares_MNIST():
88
+ """
89
+ 1. flatten MNIST images (1x28x28 → 784)
90
+ 2. Use a linear layer for multi-classification
91
+ """
92
+ return nn.Sequential(
93
+ nn.Flatten(),
94
+ nn.Linear(28 * 28, 10))
95
+
96
+ # ---------------- LeastSquares - CIFAR100 --------------------
97
+ def Build_LeastSquares_CIFAR100():
98
+ """
99
+ 1. flatten MNIST images (3 * 32 * 32 → 784)
100
+ 2. Use a linear layer for multi-classification
101
+ """
102
+ return nn.Sequential(
103
+ nn.Flatten(),
104
+ nn.Linear(3 * 32 * 32, 100))
105
+
106
+ # ---------------- LeastSquares - Caltech101 ------------------
107
+ def Build_LeastSquares_CALTECH101_Resize_32():
108
+ return nn.Sequential(
109
+ nn.Flatten(),
110
+ nn.Linear(3*32*32, 101)
111
+ )
112
+
113
+
114
+ #*************************************************************
115
+ # --------------- LogRegressionBinary ------------------------
116
+ #*************************************************************
117
+ # -------------- LogRegressionBinary - MNIST ------------------
118
+ def Build_LogRegressionBinary_MNIST():
119
+ """
120
+ 1. flatten MNIST images (1x28x28 → 784)
121
+ 2. Use a linear layer for binary classification
122
+ """
123
+ return nn.Sequential(
124
+ nn.Flatten(),
125
+ nn.Linear(28 * 28, 1))
126
+
127
+
128
+ # --------------- LogRegressionBinary - CIFAR100 --------------
129
+ def Build_LogRegressionBinary_CIFAR100():
130
+ """
131
+ 1. flatten CIFAR100 images
132
+ 2. Use a linear layer for binary classification
133
+ """
134
+ return nn.Sequential(
135
+ nn.Flatten(),
136
+ nn.Linear(3* 32 * 32, 1))
137
+
138
+ # -------------- LogRegressionBinary - RCV1 ------------------
139
+ def Build_LogRegressionBinary_RCV1():
140
+ """
141
+ 1. Use a linear layer for binary classification
142
+ """
143
+ return nn.Sequential(
144
+ nn.Linear(47236, 1))
145
+
146
+ # <LogRegressionBinaryL2>
147
+ #**************************************************************
148
+ # ------------- LogRegressionBinaryL2 -------------------------
149
+ #**************************************************************
150
+ def Build_LogRegressionBinaryL2_RCV1():
151
+ """
152
+ 1. Use a linear layer for binary classification
153
+ """
154
+ return nn.Sequential(
155
+ nn.Linear(47236, 1))
156
+ # <LogRegressionBinaryL2>
157
+
158
+ # ---------------------------------------------------------
159
+ def Build_LogRegressionBinaryL2_MNIST():
160
+ """
161
+ 1. flatten MNIST images (1x28x28 -> 784)
162
+ 2. Use a linear layer for binary classification
163
+ """
164
+ return nn.Sequential(
165
+ nn.Flatten(),
166
+ nn.Linear(28 * 28, 1))
167
+
168
+ # ---------------------------------------------------------
169
+ def Build_LogRegressionBinaryL2_CIFAR100():
170
+ """
171
+ 1. flatten CIFAR100 images
172
+ 2. Use a linear layer for binary classification
173
+ """
174
+ return nn.Sequential(
175
+ nn.Flatten(),
176
+ nn.Linear(3* 32 * 32, 1))
177
+
178
+ # ---------------------------------------------------------
179
+ def Build_LogRegressionBinaryL2_Duke():
180
+ """
181
+ Use a linear layer for binary classification
182
+ """
183
+ return nn.Sequential(
184
+ nn.Flatten(),
185
+ nn.Linear(7129, 1))
186
+
187
+ # ---------------------------------------------------------
188
+ def Build_LogRegressionBinaryL2_Ijcnn():
189
+ """
190
+ Use a linear layer for binary classification
191
+ """
192
+ return nn.Sequential(
193
+ nn.Flatten(),
194
+ nn.Linear(22, 1))
195
+
196
+ # ---------------------------------------------------------
197
+ def Build_LogRegressionBinaryL2_w8a():
198
+ """
199
+ Use a linear layer for binary classification
200
+ """
201
+ return nn.Sequential(
202
+ nn.Flatten(),
203
+ nn.Linear(300, 1))
204
+
205
+
206
+
207
+
@@ -0,0 +1,59 @@
1
+
2
+
3
+
4
+ # -------------------------------------------------------------
5
+ def training_group(training_group):
6
+ print(f"--------------------- training_group ------------------")
7
+ for g in training_group:
8
+ print(g)
9
+ print(f"-------------------------------------------------------")
10
+
11
+
12
+ def training_info(args, use_color, data_name, optimizer_name, folder_path, hyperparams, Paras, model_name):
13
+ if use_color:
14
+ print("\033[90m" + "-" * 115 + "\033[0m")
15
+ print(
16
+ f"\033[32m✅ \033[34mDataset:\033[32m {data_name}, \t\033[34mBatch-size:\033[32m {args.bs}, \t\033[34m(training, test) = \033[32m ({Paras['train_data_num']}, {Paras['test_data_num']}), \t\033[34m device:\033[32m {Paras['device']}"
17
+ )
18
+ print(
19
+ f"\033[32m✅ \033[34mOptimizer:\033[32m {optimizer_name}, \t\033[34mParams:\033[32m {hyperparams}"
20
+ )
21
+ print(
22
+ f'\033[32m✅ \033[34mmodel:\033[32m {model_name}, \t\033[34mmodel type:\033[32m {Paras["model_type"][model_name]},\t\033[34m loss_fn:\033[32m {Paras["loss_fn"]}'
23
+ )
24
+ print(f"\033[32m✅ \033[34mfolder_path:\033[32m {folder_path}")
25
+ print("\033[90m" + "-" * 115 + "\033[0m")
26
+
27
+ else:
28
+ print("-" * 115)
29
+ print(
30
+ f"✅ Dataset: {data_name}, \tBatch-size: {Paras['batch_size'][data_name]}, \t(training, val, test) = ({Paras['training_samples']}, {Paras['val_samples']}, {Paras['test_samples']}), \tdevice: {Paras['device']}"
31
+ )
32
+ print(f"✅ Optimizer: {optimizer_name}, \tParams: {hyperparams}")
33
+ print(
34
+ f'✅ model: {model_name}, \t model type: {Paras["model_type"][model_name]}, loss_fn: {Paras["loss_fn"]}'
35
+ )
36
+ print(f"✅ folder_path: {folder_path}")
37
+ print("-" * 115)
38
+
39
+ # <Step_7_2>
40
+
41
+ def print_per_epoch_info(use_color, epoch, Paras, epoch_loss, training_loss, training_acc, test_loss, test_acc, run_time):
42
+ epochs = Paras["epochs"][Paras["data_name"]]
43
+ # result = [(k, f"{v:.4f}") for k, v in run_time.items()]
44
+ if use_color:
45
+ print(
46
+ f'\033[34m epoch = \033[32m{epoch+1}/{epochs}\033[0m,\t\b'
47
+ f'\033[34m epoch_loss = \033[32m{epoch_loss[epoch+1]:.4e}\033[0m,\t\b'
48
+ f'\033[34m train_loss = \033[32m{training_loss[epoch+1]:.4e}\033[0m,\t\b'
49
+ f'\033[34m train_acc = \033[32m{100 * training_acc[epoch+1]:.2f}%\033[0m,\t\b'
50
+ f'\033[34m test_acc = \033[32m{100 * test_acc[epoch+1]:.2f}%\033[0m,\t\b'
51
+ f'\033[34m time (ep, tr, te) = \033[32m({run_time["epoch"]:.2f}, {run_time["train"]:.2f}, {run_time["test"]:.2f})\033[0m')
52
+ else:
53
+ print(
54
+ f'epoch = {epoch+1}/{epochs},\t'
55
+ f'epoch_loss = {epoch_loss[epoch+1]:.4e},\t'
56
+ f'train_loss = {training_loss[epoch+1]:.4e},\t'
57
+ f'train_acc = {100 * training_acc[epoch+1]:.2f}%,\t'
58
+ f'test_acc = {100 * test_acc[epoch+1]:.2f}%,\t'
59
+ f'time (ep, tr, te) = ({run_time["epoch"]:.2f}, {run_time["train"]:.2f}, {run_time["test"]:.2f})')