deepliif 1.2.1__py3-none-any.whl → 1.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
deepliif/train.py DELETED
@@ -1,280 +0,0 @@
1
- """
2
- Keep this train.py up-to-date with train() in cli.py!
3
- They are EXACTLY THE SAME.
4
- """
5
-
6
- import os
7
- import json
8
- import time
9
- import random
10
-
11
- import click
12
- import cv2
13
- import torch
14
- import numpy as np
15
- from PIL import Image
16
-
17
- from deepliif.data import create_dataset, AlignedDataset, transform
18
- from deepliif.models import inference, postprocess, compute_overlap, init_nets, DeepLIIFModel
19
- from deepliif.util import allowed_file, Visualizer
20
-
21
- import torch.distributed as dist
22
- import os
23
- import torch
24
-
25
- import numpy as np
26
- import random
27
- import torch
28
-
29
- def set_seed(seed=0,rank=None):
30
- """
31
- seed: basic seed
32
- rank: rank of the current process, using which to mutate basic seed to have a unique seed per process
33
-
34
- output: a boolean flag indicating whether deterministic training is enabled (True) or not (False)
35
- """
36
- os.environ['DEEPLIIF_SEED'] = str(seed)
37
-
38
- if seed is not None:
39
- if rank is not None:
40
- seed_final = seed + int(rank)
41
- else:
42
- seed_final = seed
43
-
44
- os.environ['PYTHONHASHSEED'] = str(seed_final)
45
- random.seed(seed_final)
46
- np.random.seed(seed_final)
47
- torch.manual_seed(seed_final)
48
- torch.cuda.manual_seed(seed_final)
49
- torch.cuda.manual_seed_all(seed_final)
50
- torch.backends.cudnn.benchmark = False
51
- torch.backends.cudnn.deterministic = True
52
- torch.use_deterministic_algorithms(True)
53
- print(f'deterministic training, seed set to {seed_final}')
54
- return True
55
- else:
56
- print(f'not using deterministic training')
57
- return False
58
-
59
-
60
-
61
-
62
- @click.command()
63
- @click.option('--dataroot', required=True, type=str,
64
- help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
65
- @click.option('--name', default='experiment_name',
66
- help='name of the experiment. It decides where to store samples and models')
67
- @click.option('--gpu-ids', type=int, multiple=True, help='gpu-ids 0 gpu-ids 1 or gpu-ids -1 for CPU')
68
- @click.option('--checkpoints-dir', default='./checkpoints', help='models are saved here')
69
- @click.option('--targets-no', default=5, help='number of targets')
70
- # model parameters
71
- @click.option('--input-nc', default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
72
- @click.option('--output-nc', default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
73
- @click.option('--ngf', default=64, help='# of gen filters in the last conv layer')
74
- @click.option('--ndf', default=64, help='# of discrim filters in the first conv layer')
75
- @click.option('--net-d', default='n_layers',
76
- help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 '
77
- 'PatchGAN. n_layers allows you to specify the layers in the discriminator')
78
- @click.option('--net-g', default='resnet_9blocks',
79
- help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_512 | unet_256 | unet_128]')
80
- @click.option('--n-layers-d', default=4, help='only used if netD==n_layers')
81
- @click.option('--norm', default='batch',
82
- help='instance normalization or batch normalization [instance | batch | none]')
83
- @click.option('--init-type', default='normal',
84
- help='network initialization [normal | xavier | kaiming | orthogonal]')
85
- @click.option('--init-gain', default=0.02, help='scaling factor for normal, xavier and orthogonal.')
86
- @click.option('--padding-type', default='reflect', help='network padding type.')
87
- @click.option('--no-dropout', is_flag=True, help='no dropout for the generator')
88
- # dataset parameters
89
- @click.option('--direction', default='AtoB', help='AtoB or BtoA')
90
- @click.option('--serial-batches', is_flag=True,
91
- help='if true, takes images in order to make batches, otherwise takes them randomly')
92
- @click.option('--num-threads', default=4, help='# threads for loading data')
93
- @click.option('--batch-size', default=1, help='input batch size')
94
- @click.option('--load-size', default=512, help='scale images to this size')
95
- @click.option('--crop-size', default=512, help='then crop to this size')
96
- @click.option('--max-dataset-size', type=int,
97
- help='Maximum number of samples allowed per dataset. If the dataset directory contains more than '
98
- 'max_dataset_size, only a subset is loaded.')
99
- @click.option('--preprocess', type=str,
100
- help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | '
101
- 'scale_width_and_crop | none]')
102
- @click.option('--no-flip', is_flag=True,
103
- help='if specified, do not flip the images for data augmentation')
104
- @click.option('--display-winsize', default=512, help='display window size for both visdom and HTML')
105
- # additional parameters
106
- @click.option('--epoch', default='latest',
107
- help='which epoch to load? set to latest to use latest cached model')
108
- @click.option('--load-iter', default=0,
109
- help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; '
110
- 'otherwise, the code will load models by [epoch]')
111
- @click.option('--verbose', is_flag=True, help='if specified, print more debugging information')
112
- @click.option('--lambda-L1', default=100.0, help='weight for L1 loss')
113
- @click.option('--is-train', is_flag=True, default=True)
114
- @click.option('--continue-train', is_flag=True, help='continue training: load the latest model')
115
- @click.option('--epoch-count', type=int, default=0,
116
- help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>')
117
- @click.option('--phase', default='train', help='train, val, test, etc')
118
- # training parameters
119
- @click.option('--n-epochs', type=int, default=100,
120
- help='number of epochs with the initial learning rate')
121
- @click.option('--n-epochs-decay', type=int, default=100,
122
- help='number of epochs to linearly decay learning rate to zero')
123
- @click.option('--beta1', default=0.5, help='momentum term of adam')
124
- @click.option('--lr', default=0.0002, help='initial learning rate for adam')
125
- @click.option('--lr-policy', default='linear',
126
- help='learning rate policy. [linear | step | plateau | cosine]')
127
- @click.option('--lr-decay-iters', type=int, default=50,
128
- help='multiply by a gamma every lr_decay_iters iterations')
129
- # visdom and HTML visualization parameters
130
- @click.option('--display-freq', default=400, help='frequency of showing training results on screen')
131
- @click.option('--display-ncols', default=4,
132
- help='if positive, display all images in a single visdom web panel with certain number of images per row.')
133
- @click.option('--display-id', default=1, help='window id of the web display')
134
- @click.option('--display-server', default="http://localhost", help='visdom server of the web display')
135
- @click.option('--display-env', default='main',
136
- help='visdom display environment name (default is "main")')
137
- @click.option('--display-port', default=8097, help='visdom port of the web display')
138
- @click.option('--update-html-freq', default=1000, help='frequency of saving training results to html')
139
- @click.option('--print-freq', default=100, help='frequency of showing training results on console')
140
- @click.option('--no-html', is_flag=True,
141
- help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
142
- # network saving and loading parameters
143
- @click.option('--save-latest-freq', default=500, help='frequency of saving the latest results')
144
- @click.option('--save-epoch-freq', default=100,
145
- help='frequency of saving checkpoints at the end of epochs')
146
- @click.option('--save-by-iter', is_flag=True, help='whether saves model by iteration')
147
- @click.option('--remote', type=bool, default=False, help='whether isolate visdom checkpoints or not; if False, you can run a separate visdom server anywhere that consumes the checkpoints')
148
- @click.option('--remote-transfer-cmd', type=str, default=None, help='module and function to be used to transfer remote files to target storage location, for example mymodule.myfunction')
149
- @click.option('--local-rank', type=int, default=None, help='placeholder argument for torchrun, no need for manual setup')
150
- @click.option('--seed', type=int, default=None, help='basic seed to be used for deterministic training, default to None (non-deterministic)')
151
- def train(dataroot, name, gpu_ids, checkpoints_dir, targets_no, input_nc, output_nc, ngf, ndf, net_d, net_g,
152
- n_layers_d, norm, init_type, init_gain, padding_type, no_dropout, direction, serial_batches, num_threads,
153
- batch_size, load_size, crop_size, max_dataset_size, preprocess, no_flip, display_winsize, epoch, load_iter,
154
- verbose, lambda_l1, is_train, display_freq, display_ncols, display_id, display_server, display_env,
155
- display_port, update_html_freq, print_freq, no_html, save_latest_freq, save_epoch_freq, save_by_iter,
156
- continue_train, epoch_count, phase, lr_policy, n_epochs, n_epochs_decay, beta1, lr, lr_decay_iters,
157
- remote, local_rank, remote_transfer_cmd, seed):
158
- """General-purpose training script for multi-task image-to-image translation.
159
-
160
- This script works for various models (with option '--model': e.g., DeepLIIF) and
161
- different datasets (with option '--dataset_mode': e.g., aligned, unaligned, single, colorization).
162
- You need to specify the dataset ('--dataroot'), experiment name ('--name'), and model ('--model').
163
-
164
- It first creates model, dataset, and visualizer given the option.
165
- It then does standard network training. During the training, it also visualize/save the images, print/save the loss
166
- plot, and save models.The script supports continue/resume training.
167
- Use '--continue_train' to resume your previous training.
168
- """
169
- local_rank = os.getenv('LOCAL_RANK') # DDP single node training triggered by torchrun has LOCAL_RANK
170
- rank = os.getenv('RANK') # if using DDP with multiple nodes, please provide global rank in env var RANK
171
-
172
- if len(gpu_ids) > 0:
173
- if local_rank is not None:
174
- local_rank = int(local_rank)
175
- torch.cuda.set_device(gpu_ids[local_rank])
176
- gpu_ids=[gpu_ids[local_rank]]
177
- else:
178
- torch.cuda.set_device(gpu_ids[0])
179
-
180
- if local_rank is not None: # LOCAL_RANK will be assigned a rank number if torchrun ddp is used
181
- dist.init_process_group(backend='nccl')
182
- print('local rank:',local_rank)
183
- flag_deterministic = set_seed(seed,local_rank)
184
- elif rank is not None:
185
- flag_deterministic = set_seed(seed, rank)
186
- else:
187
- flag_deterministic = set_seed(seed)
188
-
189
- if flag_deterministic:
190
- padding_type = 'zero'
191
- print('padding type is forced to zero padding, because neither refection pad2d or replication pad2d has a deterministic implementation')
192
-
193
- # create a dataset given dataset_mode and other options
194
- dataset = AlignedDataset(dataroot, load_size, crop_size, input_nc, output_nc, direction, targets_no, preprocess,
195
- no_flip, phase, max_dataset_size)
196
-
197
- dataset = create_dataset(dataset, batch_size, serial_batches, num_threads, max_dataset_size, gpu_ids)
198
- # get the number of images in the dataset.
199
- click.echo('The number of training images = %d' % len(dataset))
200
-
201
- # create a model given model and other options
202
- model = DeepLIIFModel(gpu_ids, is_train, checkpoints_dir, name, preprocess, targets_no, input_nc, output_nc, ngf,
203
- net_g, norm, no_dropout, init_type, init_gain, padding_type, ndf, net_d, n_layers_d, lr, beta1, lambda_l1,
204
- lr_policy, remote_transfer_cmd)
205
- # regular setup: load and print networks; create schedulers
206
- model.setup(lr_policy, epoch_count, n_epochs, n_epochs_decay, lr_decay_iters, continue_train, load_iter, epoch,
207
- verbose)
208
-
209
- # create a visualizer that display/save images and plots
210
- visualizer = Visualizer(display_id, is_train, no_html, display_winsize, name, display_port, display_ncols,
211
- display_server, display_env, checkpoints_dir, remote, remote_transfer_cmd)
212
- # the total number of training iterations
213
- total_iters = 0
214
-
215
- # outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
216
- for epoch in range(epoch_count, n_epochs + n_epochs_decay + 1):
217
- # timer for entire epoch
218
- epoch_start_time = time.time()
219
- # timer for data loading per iteration
220
- iter_data_time = time.time()
221
- # the number of training iterations in current epoch, reset to 0 every epoch
222
- epoch_iter = 0
223
- # reset the visualizer: make sure it saves the results to HTML at least once every epoch
224
- visualizer.reset()
225
-
226
- # https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler
227
- if local_rank is not None or os.getenv('RANK') is not None: # if DDP is used, either on one node or multi nodes
228
- if not serial_batches: # if we want randome order in mini batches
229
- dataset.sampler.set_epoch(epoch)
230
-
231
- # inner loop within one epoch
232
- for i, data in enumerate(dataset):
233
- # timer for computation per iteration
234
- iter_start_time = time.time()
235
- if total_iters % print_freq == 0:
236
- t_data = iter_start_time - iter_data_time
237
-
238
- total_iters += batch_size
239
- epoch_iter += batch_size
240
- # unpack data from dataset and apply preprocessing
241
- model.set_input(data)
242
- # calculate loss functions, get gradients, update network weights
243
- model.optimize_parameters()
244
-
245
- # display images on visdom and save images to a HTML file
246
- if total_iters % display_freq == 0:
247
- save_result = total_iters % update_html_freq == 0
248
- model.compute_visuals()
249
- visualizer.display_current_results(model.get_current_visuals(), epoch, save_result)
250
-
251
- # print training losses and save logging information to the disk
252
- if total_iters % print_freq == 0:
253
- losses = model.get_current_losses()
254
- t_comp = (time.time() - iter_start_time) / batch_size
255
- visualizer.print_current_losses(epoch, epoch_iter, losses, t_comp, t_data)
256
- if display_id > 0:
257
- visualizer.plot_current_losses(epoch, float(epoch_iter) / len(dataset), losses)
258
-
259
- # cache our latest model every <save_latest_freq> iterations
260
- if total_iters % save_latest_freq == 0:
261
- print('saving the latest model (epoch %d, total_iters %d)' % (epoch, total_iters))
262
- save_suffix = 'iter_%d' % total_iters if save_by_iter else 'latest'
263
- model.save_networks(save_suffix)
264
-
265
- iter_data_time = time.time()
266
-
267
- # cache our model every <save_epoch_freq> epochs
268
- if epoch % save_epoch_freq == 0:
269
- print('saving the model at the end of epoch %d, iters %d' % (epoch, total_iters))
270
- model.save_networks('latest')
271
- model.save_networks(epoch)
272
-
273
- print('End of epoch %d / %d \t Time Taken: %d sec' % (
274
- epoch, n_epochs + n_epochs_decay, time.time() - epoch_start_time))
275
- # update learning rates at the end of every epoch.
276
- model.update_learning_rate()
277
-
278
-
279
- if __name__ == '__main__':
280
- train()
@@ -1,255 +0,0 @@
1
- """This module contains simple helper functions """
2
- import os
3
- from time import time
4
- from functools import wraps
5
-
6
- import torch
7
- import numpy as np
8
- from PIL import Image
9
- import cv2
10
- from skimage.metrics import structural_similarity as ssim
11
-
12
-
13
- def timeit(f):
14
- @wraps(f)
15
- def wrap(*args, **kwargs):
16
- ts = time()
17
- result = f(*args, **kwargs)
18
- print(f'{f.__name__} {time() - ts}')
19
-
20
- return result
21
-
22
- return wrap
23
-
24
-
25
- def diagnose_network(net, name='network'):
26
- """Calculate and print the mean of average absolute(gradients)
27
-
28
- Parameters:
29
- net (torch network) -- Torch network
30
- name (str) -- the name of the network
31
- """
32
- mean = 0.0
33
- count = 0
34
- for param in net.parameters():
35
- if param.grad is not None:
36
- mean += torch.mean(torch.abs(param.grad.data))
37
- count += 1
38
- if count > 0:
39
- mean = mean / count
40
- print(name)
41
- print(mean)
42
-
43
-
44
- def save_image(image_numpy, image_path, aspect_ratio=1.0):
45
- """Save a numpy image to the disk
46
-
47
- Parameters:
48
- image_numpy (numpy array) -- input numpy array
49
- image_path (str) -- the path of the image
50
- """
51
- x, y, nc = image_numpy.shape
52
-
53
- if nc > 3:
54
- if nc % 3 == 0:
55
- nc_img = 3
56
- no_img = nc // nc_img
57
-
58
- elif nc % 2 == 0:
59
- nc_img = 2
60
- no_img = nc // nc_img
61
- else:
62
- nc_img = 1
63
- no_img = nc // nc_img
64
- print(f'image (numpy) has {nc}>3 channels, inferred to have {no_img} images each with {nc_img} channel(s)')
65
- l_image_numpy = np.dsplit(image_numpy,[nc_img*i for i in range(1,no_img)])
66
- image_numpy = np.concatenate(l_image_numpy, axis=1) # stack horizontally
67
-
68
- image_pil = Image.fromarray(image_numpy)
69
- h, w, _ = image_numpy.shape
70
-
71
- if aspect_ratio > 1.0:
72
- image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
73
- if aspect_ratio < 1.0:
74
- image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
75
- image_pil.save(image_path)
76
-
77
-
78
- def print_numpy(x, val=True, shp=False):
79
- """Print the mean, min, max, median, std, and size of a numpy array
80
-
81
- Parameters:
82
- val (bool) -- if print the values of the numpy array
83
- shp (bool) -- if print the shape of the numpy array
84
- """
85
- x = x.astype(np.float64)
86
- if shp:
87
- print('shape,', x.shape)
88
- if val:
89
- x = x.flatten()
90
- print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (
91
- np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))
92
-
93
-
94
- def mkdirs(paths):
95
- """create empty directories if they don't exist
96
-
97
- Parameters:
98
- paths (str list) -- a list of directory paths
99
- """
100
- if isinstance(paths, list) and not isinstance(paths, str):
101
- for path in paths:
102
- mkdir(path)
103
- else:
104
- mkdir(paths)
105
-
106
-
107
- def mkdir(path):
108
- """create a single empty directory if it didn't exist
109
-
110
- Parameters:
111
- path (str) -- a single directory path
112
- """
113
- if not os.path.exists(path):
114
- os.makedirs(path, exist_ok=True)
115
-
116
-
117
- import time
118
- time_tensor = 0
119
- time_convert = 0
120
- time_transpose = 0
121
- time_astype = 0
122
- time_topil = 0
123
- time_scale = 0
124
- def print_times():
125
- print('Time to get tensor:', round(time_tensor, 1), flush=True)
126
- print('Time to convert:', round(time_convert, 1), flush=True)
127
- print('Time to transpose:', round(time_transpose, 1), flush=True)
128
- print('Time to scale:', round(time_scale, 1), flush=True)
129
- print('Time for astype:', round(time_transpose, 1), flush=True)
130
- print('Time to pil:', round(time_topil, 1), flush=True)
131
-
132
- def tensor2im(input_image, imtype=np.uint8):
133
- """"Converts a Tensor array into a numpy image array.
134
-
135
- Parameters:
136
- input_image (tensor) -- the input image tensor array
137
- imtype (type) -- the desired type of the converted numpy array
138
- """
139
- if not isinstance(input_image, np.ndarray):
140
- if isinstance(input_image, torch.Tensor): # get the data from a variable
141
- ts = time.time()
142
- image_tensor = input_image.data
143
- te = time.time()
144
- global time_tensor
145
- time_tensor += (te - ts)
146
- else:
147
- return input_image
148
- ts = time.time()
149
- image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
150
- te = time.time()
151
- global time_convert
152
- time_convert += (te - ts)
153
- if image_numpy.shape[0] == 1: # grayscale to RGB
154
- image_numpy = np.tile(image_numpy, (3, 1, 1))
155
- ts = time.time()
156
- #image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
157
- image_numpy = np.transpose(image_numpy, (1, 2, 0))
158
- te = time.time()
159
- global time_transpose
160
- time_transpose += (te - ts)
161
- ts = time.time()
162
- image_numpy = cv2.resize(image_numpy, (256, 256), interpolation=cv2.INTER_AREA)
163
- image_numpy = (image_numpy + 1) / 2.0 * 255.0
164
- te = time.time()
165
- global time_scale
166
- time_scale += (te - ts)
167
- else: # if it is a numpy array, do nothing
168
- image_numpy = input_image
169
- return image_numpy.astype(imtype)
170
- ts = time.time()
171
- image_numpy = image_numpy.astype(imtype)
172
- te = time.time()
173
- global time_astype
174
- time_astype += (te - ts)
175
- return image_numpy
176
-
177
-
178
- def tensor_to_pil(t):
179
- #return Image.fromarray(tensor2im(t))
180
- arr = tensor2im(t)
181
- ts = time.time()
182
- #arr = cv2.resize(arr, (256, 256), interpolation=cv2.INTER_CUBIC)
183
- im = Image.fromarray(arr)
184
- te = time.time()
185
- global time_topil
186
- time_topil += (te - ts)
187
- return im
188
-
189
-
190
- def calculate_ssim(img1, img2):
191
- return ssim(img1, img2, data_range=img2.max() - img2.min())
192
-
193
-
194
- def check_multi_scale(img1, img2):
195
- img1 = np.array(img1)
196
- img2 = np.array(img2)
197
- max_ssim = (512, 0)
198
- for tile_size in range(100, 1000, 100):
199
- image_ssim = 0
200
- tile_no = 0
201
- for i in range(0, img2.shape[0], tile_size):
202
- for j in range(0, img2.shape[1], tile_size):
203
- if i + tile_size <= img2.shape[0] and j + tile_size <= img2.shape[1]:
204
- tile = img2[i: i + tile_size, j: j + tile_size]
205
- tile = cv2.resize(tile, (img1.shape[0], img1.shape[1]))
206
- tile_ssim = calculate_ssim(img1, tile)
207
- image_ssim += tile_ssim
208
- tile_no += 1
209
- if tile_no > 0:
210
- image_ssim /= tile_no
211
- if max_ssim[1] < image_ssim:
212
- max_ssim = (tile_size, image_ssim)
213
- return max_ssim[0]
214
-
215
-
216
- import subprocess
217
- import os
218
- from threading import Thread , Timer
219
- import sched, time
220
-
221
- # modified from https://stackoverflow.com/questions/67707828/how-to-get-every-seconds-gpu-usage-in-python
222
- def get_gpu_memory(gpu_id=0):
223
- """
224
- Currently collects gpu memory info for a given gpu id.
225
- """
226
- output_to_list = lambda x: x.decode('ascii').split('\n')[:-1]
227
- ACCEPTABLE_AVAILABLE_MEMORY = 1024
228
- COMMAND = "nvidia-smi --query-gpu=memory.used --format=csv"
229
- try:
230
- memory_use_info = output_to_list(subprocess.check_output(COMMAND.split(),stderr=subprocess.STDOUT))[1:]
231
- except subprocess.CalledProcessError as e:
232
- raise RuntimeError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
233
- memory_use_values = [int(x.split()[0]) for i, x in enumerate(memory_use_info)]
234
-
235
- #assert len(memory_use_values)==1, f"get_gpu_memory::memory_use_values should have only 1 value, now has {len(memory_use_values)} (memory_use_values)"
236
- return memory_use_values[gpu_id]
237
-
238
- class HardwareStatus():
239
- def __init__(self):
240
- self.gpu_mem = []
241
- self.timer = None
242
-
243
- def get_status_every_sec(self, gpu_id=0):
244
- """
245
- This function calls itself every 1 sec and appends the gpu_memory.
246
- """
247
- self.timer = Timer(1.0, self.get_status_every_sec)
248
- self.timer.start()
249
- self.gpu_mem.append(get_gpu_memory(gpu_id))
250
- # print('self.gpu_mem',self.gpu_mem)
251
-
252
- def stop_timer(self):
253
- self.timer.cancel()
254
-
255
-