deepliif 1.1.9__py3-none-any.whl → 1.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cli.py CHANGED
@@ -14,7 +14,7 @@ from deepliif.models import init_nets, infer_modalities, infer_results_for_wsi,
14
14
  from deepliif.util import allowed_file, Visualizer, get_information, test_diff_original_serialized, disable_batchnorm_tracking_stats
15
15
  from deepliif.util.util import mkdirs, check_multi_scale
16
16
  # from deepliif.util import infer_results_for_wsi
17
- from deepliif.options import Options
17
+ from deepliif.options import Options, print_options
18
18
 
19
19
  import torch.distributed as dist
20
20
 
@@ -59,29 +59,6 @@ def set_seed(seed=0,rank=None):
59
59
  def ensure_exists(d):
60
60
  if not os.path.exists(d):
61
61
  os.makedirs(d)
62
-
63
- def print_options(opt):
64
- """Print and save options
65
-
66
- It will print both current options and default values(if different).
67
- It will save options into a text file / [checkpoints_dir] / opt.txt
68
- """
69
- message = ''
70
- message += '----------------- Options ---------------\n'
71
- for k, v in sorted(vars(opt).items()):
72
- comment = ''
73
- message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
74
- message += '----------------- End -------------------'
75
- print(message)
76
-
77
- # save to the disk
78
- if opt.phase == 'train':
79
- expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
80
- mkdirs(expr_dir)
81
- file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
82
- with open(file_name, 'wt') as opt_file:
83
- opt_file.write(message)
84
- opt_file.write('\n')
85
62
 
86
63
 
87
64
  @click.group()
@@ -212,6 +189,18 @@ def train(dataroot, name, gpu_ids, checkpoints_dir, input_nc, output_nc, ngf, nd
212
189
  plot, and save models.The script supports continue/resume training.
213
190
  Use '--continue_train' to resume your previous training.
214
191
  """
192
+ assert model in ['DeepLIIF','DeepLIIFExt','SDG'], f'model class {model} is not implemented'
193
+ if model == 'DeepLIIF':
194
+ seg_no = 1
195
+ elif model == 'DeepLIIFExt':
196
+ if seg_gen:
197
+ seg_no = modalities_no
198
+ else:
199
+ seg_no = 0
200
+ else: # SDG
201
+ seg_no = 0
202
+ seg_gen = False
203
+
215
204
  d_params = locals()
216
205
 
217
206
  if gpu_ids and gpu_ids[0] == -1:
@@ -241,11 +230,26 @@ def train(dataroot, name, gpu_ids, checkpoints_dir, input_nc, output_nc, ngf, nd
241
230
  d_params['padding'] = 'zero'
242
231
  print('padding type is forced to zero padding, because neither refection pad2d or replication pad2d has a deterministic implementation')
243
232
 
233
+ # infer number of input images
234
+ dir_data_train = dataroot + '/train'
235
+ fns = os.listdir(dir_data_train)
236
+ fns = [x for x in fns if x.endswith('.png')]
237
+ img = Image.open(f"{dir_data_train}/{fns[0]}")
238
+
239
+ num_img = img.size[0] / img.size[1]
240
+ assert int(num_img) == num_img, f'img size {img.size[0]} / {img.size[1]} = {num_img} is not an integer'
241
+ num_img = int(num_img)
242
+
243
+ input_no = num_img - modalities_no - seg_no
244
+ assert input_no > 0, f'inferred number of input images is {input_no}; should be greater than 0'
245
+ d_params['input_no'] = input_no
246
+ d_params['scale_size'] = img.size[1]
247
+
244
248
  # create a dataset given dataset_mode and other options
245
249
  # dataset = AlignedDataset(opt)
246
250
 
247
251
  opt = Options(d_params=d_params)
248
- print_options(opt)
252
+ print_options(opt, save=True)
249
253
 
250
254
  dataset = create_dataset(opt)
251
255
  # get the number of images in the dataset.
@@ -468,28 +472,30 @@ def trainlaunch(**kwargs):
468
472
 
469
473
 
470
474
  @cli.command()
471
- @click.option('--models-dir', default='./model-server/DeepLIIF_Latest_Model', help='reads models from here')
475
+ @click.option('--model-dir', default='./model-server/DeepLIIF_Latest_Model', help='reads models from here')
472
476
  @click.option('--output-dir', help='saves results here.')
473
- @click.option('--tile-size', type=int, default=None, help='tile size')
474
- @click.option('--device', default='cpu', type=str, help='device to load model, either cpu or gpu')
477
+ #@click.option('--tile-size', type=int, default=None, help='tile size')
478
+ @click.option('--device', default='cpu', type=str, help='device to load model for the similarity test, either cpu or gpu')
475
479
  @click.option('--verbose', default=0, type=int,help='saves results here.')
476
- def serialize(models_dir, output_dir, tile_size, device, verbose):
480
+ def serialize(model_dir, output_dir, device, verbose):
477
481
  """Serialize DeepLIIF models using Torchscript
478
482
  """
479
- if tile_size is None:
480
- tile_size = 512
481
- output_dir = output_dir or models_dir
483
+ #if tile_size is None:
484
+ # tile_size = 512
485
+ output_dir = output_dir or model_dir
482
486
  ensure_exists(output_dir)
483
487
 
484
488
  # copy train_opt.txt to the target location
485
489
  import shutil
486
- if models_dir != output_dir:
487
- shutil.copy(f'{models_dir}/train_opt.txt',f'{output_dir}/train_opt.txt')
490
+ if model_dir != output_dir:
491
+ shutil.copy(f'{model_dir}/train_opt.txt',f'{output_dir}/train_opt.txt')
488
492
 
489
- sample = transform(Image.new('RGB', (tile_size, tile_size)))
493
+ opt = Options(path_file=os.path.join(model_dir,'train_opt.txt'), mode='test')
494
+ sample = transform(Image.new('RGB', (opt.scale_size, opt.scale_size)))
495
+ sample = torch.cat([sample]*opt.input_no, 1)
490
496
 
491
497
  with click.progressbar(
492
- init_nets(models_dir, eager_mode=True, phase='test').items(),
498
+ init_nets(model_dir, eager_mode=True, phase='test').items(),
493
499
  label='Tracing nets',
494
500
  item_show_func=lambda n: n[0] if n else n
495
501
  ) as bar:
@@ -508,7 +514,7 @@ def serialize(models_dir, output_dir, tile_size, device, verbose):
508
514
 
509
515
  # test: whether the original and the serialized model produces highly similar predictions
510
516
  print('testing similarity between prediction from original vs serialized models...')
511
- models_original = init_nets(models_dir,eager_mode=True,phase='test')
517
+ models_original = init_nets(model_dir,eager_mode=True,phase='test')
512
518
  models_serialized = init_nets(output_dir,eager_mode=False,phase='test')
513
519
  if device == 'gpu':
514
520
  sample = sample.cuda()
@@ -590,11 +596,12 @@ def test(input_dir, output_dir, tile_size, model_dir, gpu_ids, region_size, eage
590
596
  filename.replace('.' + filename.split('.')[-1], f'_{name}.png')
591
597
  ))
592
598
 
593
- with open(os.path.join(
594
- output_dir,
595
- filename.replace('.' + filename.split('.')[-1], f'.json')
596
- ), 'w') as f:
597
- json.dump(scoring, f, indent=2)
599
+ if scoring is not None:
600
+ with open(os.path.join(
601
+ output_dir,
602
+ filename.replace('.' + filename.split('.')[-1], f'.json')
603
+ ), 'w') as f:
604
+ json.dump(scoring, f, indent=2)
598
605
 
599
606
  @cli.command()
600
607
  @click.option('--input-dir', type=str, required=True, help='Path to input images')
@@ -26,6 +26,8 @@ class AlignedDataset(BaseDataset):
26
26
  self.output_nc = opt.input_nc if opt.direction == 'BtoA' else opt.output_nc
27
27
  self.no_flip = opt.no_flip
28
28
  self.modalities_no = opt.modalities_no
29
+ self.seg_no = opt.seg_no
30
+ self.input_no = opt.input_no
29
31
  self.seg_gen = opt.seg_gen
30
32
  self.load_size = opt.load_size
31
33
  self.crop_size = opt.crop_size
@@ -52,6 +54,8 @@ class AlignedDataset(BaseDataset):
52
54
  num_img = self.modalities_no + 1 + 1 # +1 for segmentation channel, +1 for input image
53
55
  elif self.model == 'DeepLIIFExt':
54
56
  num_img = self.modalities_no * 2 + 1 if self.seg_gen else self.modalities_no + 1 # +1 for segmentation channel
57
+ elif self.model == 'SDG':
58
+ num_img = self.modalities_no + self.seg_no + self.input_no
55
59
  else:
56
60
  raise Exception(f'model class {self.model} does not have corresponding implementation in deepliif/data/aligned_dataset.py')
57
61
  w2 = int(w / num_img)
@@ -85,6 +89,19 @@ class AlignedDataset(BaseDataset):
85
89
  BS_Array.append(BS)
86
90
 
87
91
  return {'A': A, 'B': B_Array, 'BS': BS_Array,'A_paths': AB_path, 'B_paths': AB_path}
92
+ elif self.model == 'SDG':
93
+ A_Array = []
94
+ for i in range(self.input_no):
95
+ A = AB.crop((w2 * i, 0, w2 * (i+1), h))
96
+ A = A_transform(A)
97
+ A_Array.append(A)
98
+
99
+ for i in range(self.input_no, self.input_no + self.modalities_no + 1):
100
+ B = AB.crop((w2 * i, 0, w2 * (i + 1), h))
101
+ B = B_transform(B)
102
+ B_Array.append(B)
103
+
104
+ return {'A': A_Array, 'B': B_Array, 'A_paths': AB_path, 'B_paths': AB_path}
88
105
  else:
89
106
  raise Exception(f'model class {self.model} does not have corresponding implementation in deepliif/data/aligned_dataset.py')
90
107
 
@@ -0,0 +1,189 @@
1
+ import torch
2
+ from .base_model import BaseModel
3
+ from . import networks
4
+
5
+
6
+ class SDGModel(BaseModel):
7
+ """ This class implements the Synthetic Data Generation model (based on DeepLIIFExt), for learning a mapping from input images to modalities given paired data."""
8
+
9
+ def __init__(self, opt):
10
+ """Initialize the DeepLIIF class.
11
+
12
+ Parameters:
13
+ opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
14
+ """
15
+ BaseModel.__init__(self, opt)
16
+
17
+ self.mod_gen_no = self.opt.modalities_no
18
+
19
+ # weights of the modalities in generating segmentation mask
20
+ self.seg_weights = [0, 0, 0]
21
+ if opt.seg_gen:
22
+ self.seg_weights = [0.3] * self.mod_gen_no
23
+ self.seg_weights[1] = 0.4
24
+
25
+ # self.seg_weights = opt.seg_weights
26
+ # assert len(self.seg_weights) == self.seg_gen_no, 'The number of the segmentation weights (seg_weights) is not equal to the number of target images (modalities_no)!'
27
+ # print(self.seg_weights)
28
+ # loss weights in calculating the final loss
29
+ self.loss_G_weights = [1 / self.mod_gen_no] * self.mod_gen_no
30
+ self.loss_GS_weights = [1 / self.mod_gen_no] * self.mod_gen_no
31
+
32
+ self.loss_D_weights = [1 / self.mod_gen_no] * self.mod_gen_no
33
+ self.loss_DS_weights = [1 / self.mod_gen_no] * self.mod_gen_no
34
+
35
+ self.loss_names = []
36
+ self.visual_names = ['real_A']
37
+ # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
38
+ for i in range(1, self.mod_gen_no + 1):
39
+ self.loss_names.extend(['G_GAN_' + str(i), 'G_L1_' + str(i), 'D_real_' + str(i), 'D_fake_' + str(i)])
40
+ self.visual_names.extend(['fake_B_' + str(i), 'real_B_' + str(i)])
41
+
42
+ # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
43
+ # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
44
+ if self.is_train:
45
+ self.model_names = []
46
+ for i in range(1, self.mod_gen_no + 1):
47
+ self.model_names.extend(['G_' + str(i), 'D_' + str(i)])
48
+
49
+ else: # during test time, only load G
50
+ self.model_names = []
51
+ for i in range(1, self.mod_gen_no + 1):
52
+ self.model_names.extend(['G_' + str(i)])
53
+
54
+ # define networks (both generator and discriminator)
55
+ self.netG = [None for _ in range(self.mod_gen_no)]
56
+ for i in range(self.mod_gen_no):
57
+ self.netG[i] = networks.define_G(self.opt.input_nc * self.opt.input_no, self.opt.output_nc, self.opt.ngf, self.opt.net_g, self.opt.norm,
58
+ not self.opt.no_dropout, self.opt.init_type, self.opt.init_gain, self.opt.gpu_ids, self.opt.padding)
59
+ print('***************************************')
60
+ print(self.opt.input_nc, self.opt.output_nc, self.opt.ngf, self.opt.net_g, self.opt.norm,
61
+ not self.opt.no_dropout, self.opt.init_type, self.opt.init_gain, self.opt.gpu_ids, self.opt.padding)
62
+ print('***************************************')
63
+
64
+ if self.is_train: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
65
+ self.netD = [None for _ in range(self.mod_gen_no)]
66
+ for i in range(self.mod_gen_no):
67
+ self.netD[i] = networks.define_D(self.opt.input_nc * self.opt.input_no + self.opt.output_nc, self.opt.ndf, self.opt.net_d,
68
+ self.opt.n_layers_D, self.opt.norm, self.opt.init_type, self.opt.init_gain,
69
+ self.opt.gpu_ids)
70
+
71
+ if self.is_train:
72
+ # define loss functions
73
+ self.criterionGAN_mod = networks.GANLoss(self.opt.gan_mode).to(self.device)
74
+ self.criterionGAN_seg = networks.GANLoss(self.opt.gan_mode_s).to(self.device)
75
+
76
+ self.criterionSmoothL1 = torch.nn.SmoothL1Loss()
77
+
78
+ self.criterionVGG = networks.VGGLoss().to(self.device)
79
+
80
+ # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
81
+ params = []
82
+ for i in range(len(self.netG)):
83
+ params += list(self.netG[i].parameters())
84
+ self.optimizer_G = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
85
+
86
+ params = []
87
+ for i in range(len(self.netD)):
88
+ params += list(self.netD[i].parameters())
89
+ self.optimizer_D = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
90
+
91
+ self.optimizers.append(self.optimizer_G)
92
+ self.optimizers.append(self.optimizer_D)
93
+
94
+ def set_input(self, input):
95
+ """
96
+ Unpack input data from the dataloader and perform necessary pre-processing steps.
97
+
98
+ :param input (dict): include the input image and the output modalities
99
+ """
100
+ self.real_A_array = input['A']
101
+ As = [A.to(self.device) for A in self.real_A_array]
102
+ self.real_A = torch.cat(As, dim=1) # shape: 1, (3 x input_no), 512, 512
103
+
104
+ self.real_B_array = input['B']
105
+ self.real_B = []
106
+ for i in range(len(self.real_B_array)):
107
+ self.real_B.append(self.real_B_array[i].to(self.device))
108
+
109
+ self.real_concatenated = []
110
+ self.image_paths = input['A_paths']
111
+
112
+ def forward(self):
113
+ """Run forward pass; called by both functions <optimize_parameters> and <test>."""
114
+ self.fake_B = []
115
+ for i in range(self.mod_gen_no):
116
+ self.fake_B.append(self.netG[i](self.real_A))
117
+
118
+
119
+ def backward_D(self):
120
+ """Calculate GAN loss for the discriminators"""
121
+
122
+ pred_fake = []
123
+ for i in range(self.mod_gen_no):
124
+ pred_fake.append(self.netD[i](torch.cat((self.real_A, self.fake_B[i]), 1).detach()))
125
+
126
+ self.loss_D_fake = []
127
+ for i in range(self.mod_gen_no):
128
+ self.loss_D_fake.append(self.criterionGAN_mod(pred_fake[i], False))
129
+
130
+ pred_real = []
131
+ for i in range(self.mod_gen_no):
132
+ pred_real.append(self.netD[i](torch.cat((self.real_A, self.real_B[i]), 1)))
133
+
134
+ self.loss_D_real = []
135
+ for i in range(self.mod_gen_no):
136
+ self.loss_D_real.append(self.criterionGAN_mod(pred_real[i], True))
137
+
138
+ # combine losses and calculate gradients
139
+ # self.loss_D = (self.loss_D_fake[0] + self.loss_D_real[0]) * 0.5 * self.loss_D_weights[0]
140
+ self.loss_D = torch.tensor(0., device=self.device)
141
+ for i in range(0, self.mod_gen_no):
142
+ self.loss_D += (self.loss_D_fake[i] + self.loss_D_real[i]) * 0.5 * self.loss_D_weights[i]
143
+ self.loss_D.backward()
144
+
145
+ def backward_G(self):
146
+ """Calculate GAN and L1 loss for the generator"""
147
+ pred_fake = []
148
+ for i in range(self.mod_gen_no):
149
+ pred_fake.append(self.netD[i](torch.cat((self.real_A, self.fake_B[i]), 1)))
150
+
151
+ self.loss_G_GAN = []
152
+ self.loss_GS_GAN = []
153
+ for i in range(self.mod_gen_no):
154
+ self.loss_G_GAN.append(self.criterionGAN_mod(pred_fake[i], True))
155
+
156
+ # Second, G(A) = B
157
+ self.loss_G_L1 = []
158
+ self.loss_GS_L1 = []
159
+ for i in range(self.mod_gen_no):
160
+ self.loss_G_L1.append(self.criterionSmoothL1(self.fake_B[i], self.real_B[i]) * self.opt.lambda_L1)
161
+
162
+ #self.loss_G_VGG = []
163
+ #for i in range(self.mod_gen_no):
164
+ # self.loss_G_VGG.append(self.criterionVGG(self.fake_B[i], self.real_B[i]) * self.opt.lambda_feat)
165
+
166
+ # self.loss_G = (self.loss_G_GAN[0] + self.loss_G_L1[0]) * self.loss_G_weights[0]
167
+ self.loss_G = torch.tensor(0., device=self.device)
168
+ for i in range(0, self.mod_gen_no):
169
+ self.loss_G += (self.loss_G_GAN[i] + self.loss_G_L1[i]) * self.loss_G_weights[i]
170
+ # self.loss_G += (self.loss_G_GAN[i] + self.loss_G_L1[i] + self.loss_G_VGG[i]) * self.loss_G_weights[i]
171
+ self.loss_G.backward()
172
+
173
+ def optimize_parameters(self):
174
+ self.forward() # compute fake images: G(A)
175
+ # update D
176
+ for i in range(self.mod_gen_no):
177
+ self.set_requires_grad(self.netD[i], True) # enable backprop for D1
178
+
179
+ self.optimizer_D.zero_grad() # set D's gradients to zero
180
+ self.backward_D() # calculate gradients for D
181
+ self.optimizer_D.step() # update D's weights
182
+
183
+ # update G
184
+ for i in range(self.mod_gen_no):
185
+ self.set_requires_grad(self.netD[i], False)
186
+
187
+ self.optimizer_G.zero_grad() # set G's gradients to zero
188
+ self.backward_G() # calculate graidents for G
189
+ self.optimizer_G.step() # udpate G's weights
@@ -133,6 +133,10 @@ def load_eager_models(opt, devices):
133
133
  net.eval()
134
134
  net = disable_batchnorm_tracking_stats(net)
135
135
 
136
+ # SDG models when loaded are still DP.. not sure why
137
+ if isinstance(net, torch.nn.DataParallel):
138
+ net = net.module
139
+
136
140
  nets[name] = net
137
141
  nets[name].to(devices[name])
138
142
 
@@ -161,7 +165,7 @@ def init_nets(model_dir, eager_mode=False, opt=None, phase='test'):
161
165
  ('G4', 'G55'),
162
166
  ('G51',)
163
167
  ]
164
- elif opt.model == 'DeepLIIFExt':
168
+ elif opt.model in ['DeepLIIFExt','SDG']:
165
169
  if opt.seg_gen:
166
170
  net_groups = [(f'G_{i+1}',f'GS_{i+1}') for i in range(opt.modalities_no)]
167
171
  else:
@@ -172,6 +176,7 @@ def init_nets(model_dir, eager_mode=False, opt=None, phase='test'):
172
176
  number_of_gpus_all = torch.cuda.device_count()
173
177
  number_of_gpus = len(opt.gpu_ids)
174
178
  #print(number_of_gpus)
179
+
175
180
  if number_of_gpus > 0:
176
181
  mapping_gpu_ids = {i:idx for i,idx in enumerate(opt.gpu_ids)}
177
182
  chunks = [itertools.chain.from_iterable(c) for c in chunker(net_groups, number_of_gpus)]
@@ -206,10 +211,7 @@ def run_torchserve(img, model_path=None, eager_mode=False, opt=None):
206
211
  opt: same as eager_mode
207
212
  """
208
213
  buffer = BytesIO()
209
- if opt.model == 'DeepLIIFExt':
210
- torch.save(transform(img.resize((1024, 1024))), buffer)
211
- else:
212
- torch.save(transform(img.resize((512, 512))), buffer)
214
+ torch.save(transform(img.resize((opt.scale_size, opt.scale_size))), buffer)
213
215
 
214
216
  torchserve_host = os.getenv('TORCHSERVE_HOST', 'http://localhost')
215
217
  res = requests.post(
@@ -229,10 +231,12 @@ def run_dask(img, model_path, eager_mode=False, opt=None):
229
231
  model_dir = os.getenv('DEEPLIIF_MODEL_DIR', model_path)
230
232
  nets = init_nets(model_dir, eager_mode, opt)
231
233
 
232
- if opt.model == 'DeepLIIFExt':
233
- ts = transform(img.resize((1024, 1024)))
234
+ if opt.input_no > 1 or opt.model == 'SDG':
235
+ l_ts = [transform(img_i.resize((opt.scale_size,opt.scale_size))) for img_i in img]
236
+ ts = torch.cat(l_ts, dim=1)
234
237
  else:
235
- ts = transform(img.resize((512, 512)))
238
+ ts = transform(img.resize((opt.scale_size, opt.scale_size)))
239
+
236
240
 
237
241
  @delayed
238
242
  def forward(input, model):
@@ -249,14 +253,20 @@ def run_dask(img, model_path, eager_mode=False, opt=None):
249
253
  lazy_segs['G51'] = forward(ts, nets['G51']).to(torch.device('cpu'))
250
254
  segs = compute(lazy_segs)[0]
251
255
 
252
- seg_weights = [0.25, 0.25, 0.25, 0, 0.25]
253
- seg = torch.stack([torch.mul(n, w) for n, w in zip(segs.values(), seg_weights)]).sum(dim=0)
256
+ weights = {
257
+ 'G51': 0.25, # IHC
258
+ 'G52': 0.25, # Hema
259
+ 'G53': 0.25, # DAPI
260
+ 'G54': 0.00, # Lap2
261
+ 'G55': 0.25, # Marker
262
+ }
263
+ seg = torch.stack([torch.mul(segs[k], weights[k]) for k in segs.keys()]).sum(dim=0)
254
264
 
255
265
  res = {k: tensor_to_pil(v) for k, v in gens.items()}
256
266
  res['G5'] = tensor_to_pil(seg)
257
267
 
258
268
  return res
259
- elif opt.model == 'DeepLIIFExt':
269
+ elif opt.model in ['DeepLIIFExt','SDG']:
260
270
  seg_map = {'G_' + str(i): 'GS_' + str(i) for i in range(1, opt.modalities_no + 1)}
261
271
 
262
272
  lazy_gens = {k: forward(ts, nets[k]) for k in seg_map}
@@ -271,14 +281,22 @@ def run_dask(img, model_path, eager_mode=False, opt=None):
271
281
 
272
282
  return res
273
283
  else:
274
- raise Exception(f'run_dask() not implemented for {opt.model}')
284
+ raise Exception(f'run_dask() not fully implemented for {opt.model}')
275
285
 
276
-
286
+
287
+ def is_empty_old(tile):
288
+ # return True if np.mean(np.array(tile) - np.array(mean_background_val)) < 40 else False
289
+ if isinstance(tile, list): # for pair of tiles, only mark it as empty / no need for prediction if ALL tiles are empty
290
+ return all([True if calculate_background_area(t) > 98 else False for t in tile])
291
+ else:
292
+ return True if calculate_background_area(tile) > 98 else False
277
293
 
278
294
 
279
295
  def is_empty(tile):
280
- # return True if np.mean(np.array(tile) - np.array(mean_background_val)) < 40 else False
281
- return True if calculate_background_area(tile) > 98 else False
296
+ if isinstance(tile, list): # for pair of tiles, only mark it as empty / no need for prediction if ALL tiles are empty
297
+ return all([True if np.max(image_variance_rgb(tile)) < 15 else False for t in tile])
298
+ else:
299
+ return True if np.max(image_variance_rgb(tile)) < 15 else False
282
300
 
283
301
 
284
302
  def run_wrapper(tile, run_fn, model_path, eager_mode=False, opt=None):
@@ -293,7 +311,7 @@ def run_wrapper(tile, run_fn, model_path, eager_mode=False, opt=None):
293
311
  }
294
312
  else:
295
313
  return run_fn(tile, model_path, eager_mode, opt)
296
- elif opt.model == 'DeepLIIFExt':
314
+ elif opt.model in ['DeepLIIFExt', 'SDG']:
297
315
  if is_empty(tile):
298
316
  res = {'G_' + str(i): Image.new(mode='RGB', size=(512, 512)) for i in range(1, opt.modalities_no + 1)}
299
317
  res.update({'GS_' + str(i): Image.new(mode='RGB', size=(512, 512)) for i in range(1, opt.modalities_no + 1)})
@@ -362,28 +380,25 @@ def inference(img, tile_size, overlap_size, model_path, use_torchserve=False, ea
362
380
  run_fn = run_torchserve if use_torchserve else run_dask
363
381
 
364
382
  images = {}
365
- images['Hema'] = create_image_for_stitching(tile_size, rows, cols)
366
- images['DAPI'] = create_image_for_stitching(tile_size, rows, cols)
367
- images['Lap2'] = create_image_for_stitching(tile_size, rows, cols)
368
- images['Marker'] = create_image_for_stitching(tile_size, rows, cols)
369
- images['Seg'] = create_image_for_stitching(tile_size, rows, cols)
383
+ d_modality2net = {'Hema':'G1',
384
+ 'DAPI':'G2',
385
+ 'Lap2':'G3',
386
+ 'Marker':'G4',
387
+ 'Seg':'G5'}
388
+
389
+ for k in d_modality2net.keys():
390
+ images[k] = create_image_for_stitching(tile_size, rows, cols)
370
391
 
371
392
  for i in range(cols):
372
393
  for j in range(rows):
373
394
  tile = extract_tile(rescaled, tile_size, overlap_size, i, j)
374
395
  res = run_wrapper(tile, run_fn, model_path, eager_mode, opt)
375
-
376
- stitch_tile(images['Hema'], res['G1'], tile_size, overlap_size, i, j)
377
- stitch_tile(images['DAPI'], res['G2'], tile_size, overlap_size, i, j)
378
- stitch_tile(images['Lap2'], res['G3'], tile_size, overlap_size, i, j)
379
- stitch_tile(images['Marker'], res['G4'], tile_size, overlap_size, i, j)
380
- stitch_tile(images['Seg'], res['G5'], tile_size, overlap_size, i, j)
381
-
382
- images['Hema'] = images['Hema'].resize(img.size)
383
- images['DAPI'] = images['DAPI'].resize(img.size)
384
- images['Lap2'] = images['Lap2'].resize(img.size)
385
- images['Marker'] = images['Marker'].resize(img.size)
386
- images['Seg'] = images['Seg'].resize(img.size)
396
+
397
+ for modality_name, net_name in d_modality2net.items():
398
+ stitch_tile(images[modality_name], res[net_name], tile_size, overlap_size, i, j)
399
+
400
+ for modality_name, output_img in images.items():
401
+ images[modality_name] = output_img.resize(img.size)
387
402
 
388
403
  if color_dapi:
389
404
  matrix = ( 0, 0, 0, 0,
@@ -403,24 +418,70 @@ def inference(img, tile_size, overlap_size, model_path, use_torchserve=False, ea
403
418
  #param_dict = read_train_options(model_path)
404
419
  #modalities_no = int(param_dict['modalities_no']) if param_dict else 4
405
420
  #seg_gen = (param_dict['seg_gen'] == 'True') if param_dict else True
406
-
407
- tiles = list(generate_tiles(img, tile_size, overlap_size))
408
-
421
+
422
+
423
+ rescaled, rows, cols = format_image_for_tiling(img, tile_size, overlap_size)
409
424
  run_fn = run_torchserve if use_torchserve else run_dask
410
- res = [Tile(t.i, t.j, run_wrapper(t.img, run_fn, model_path, eager_mode, opt)) for t in tiles]
411
425
 
412
426
  def get_net_tiles(n):
413
427
  return [Tile(t.i, t.j, t.img[n]) for t in res]
414
428
 
415
429
  images = {}
416
-
417
- for i in range(1, opt.modalities_no + 1):
418
- images['mod' + str(i)] = stitch(get_net_tiles('G_' + str(i)), tile_size, overlap_size).resize(img.size)
419
-
430
+ d_modality2net = {f'mod{i}':f'G_{i}' for i in range(1, opt.modalities_no + 1)}
420
431
  if opt.seg_gen:
421
- for i in range(1, opt.modalities_no + 1):
422
- images['Seg' + str(i)] = stitch(get_net_tiles('GS_' + str(i)), tile_size, overlap_size).resize(img.size)
432
+ d_modality2net.update({f'Seg{i}':f'GS_{i}' for i in range(1, opt.modalities_no + 1)})
433
+
434
+ for k in d_modality2net.keys():
435
+ images[k] = create_image_for_stitching(tile_size, rows, cols)
423
436
 
437
+ for i in range(cols):
438
+ for j in range(rows):
439
+ tile = extract_tile(rescaled, tile_size, overlap_size, i, j)
440
+ res = run_wrapper(tile, run_fn, model_path, eager_mode, opt)
441
+
442
+ for modality_name, net_name in d_modality2net.items():
443
+ stitch_tile(images[modality_name], res[net_name], tile_size, overlap_size, i, j)
444
+
445
+ for modality_name, output_img in images.items():
446
+ images[modality_name] = output_img.resize(img.size)
447
+
448
+ return images
449
+
450
+ elif opt.model == 'SDG':
451
+ # SDG could have multiple input images / modalities
452
+ # the input hence could be a rectangle
453
+ # we split the input to get each modality image one by one
454
+ # then create tiles for each of the modality images
455
+ # tile_pair is a list that contains the tiles at the given location for each modality image
456
+ # l_tile_pair is a list of tile_pair that covers all locations
457
+ # for inference, each tile_pair is used to get the output at the given location
458
+ w, h = img.size
459
+ w2 = int(w / opt.input_no)
460
+
461
+ l_img = []
462
+ for i in range(opt.input_no):
463
+ img_i = img.crop((w2 * i, 0, w2 * (i+1), h))
464
+ rescaled_img_i, rows, cols = format_image_for_tiling(img_i, tile_size, overlap_size)
465
+ l_img.append(rescaled_img_i)
466
+
467
+ run_fn = run_torchserve if use_torchserve else run_dask
468
+
469
+ images = {}
470
+ d_modality2net = {f'mod{i}':f'G_{i}' for i in range(1, opt.modalities_no + 1)}
471
+ for k in d_modality2net.keys():
472
+ images[k] = create_image_for_stitching(tile_size, rows, cols)
473
+
474
+ for i in range(cols):
475
+ for j in range(rows):
476
+ tile_pair = [extract_tile(rescaled, tile_size, overlap_size, i, j) for rescaled in l_img]
477
+ res = run_wrapper(tile_pair, run_fn, model_path, eager_mode, opt)
478
+
479
+ for modality_name, net_name in d_modality2net.items():
480
+ stitch_tile(images[modality_name], res[net_name], tile_size, overlap_size, i, j)
481
+
482
+ for modality_name, output_img in images.items():
483
+ images[modality_name] = output_img.resize((w2,w2))
484
+
424
485
  return images
425
486
 
426
487
  else:
@@ -476,11 +537,15 @@ def infer_modalities(img, tile_size, model_dir, eager_mode=False,
476
537
  tile_size = check_multi_scale(Image.open('./images/target.png').convert('L'),
477
538
  img.convert('L'))
478
539
  tile_size = int(tile_size)
540
+
541
+ # for those with multiple input modalities, find the correct size to calculate overlap_size
542
+ input_no = opt.input_no if hasattr(opt, 'input_no') else 1
543
+ img_size = (img.size[0] / input_no, img.size[1]) # (width, height)
479
544
 
480
545
  images = inference(
481
546
  img,
482
547
  tile_size=tile_size,
483
- overlap_size=compute_overlap(img.size, tile_size),
548
+ overlap_size=compute_overlap(img_size, tile_size),
484
549
  model_path=model_dir,
485
550
  eager_mode=eager_mode,
486
551
  color_dapi=color_dapi,
@@ -13,7 +13,6 @@ def read_model_params(file_addr):
13
13
  key = line.split(':')[0].strip()
14
14
  val = line.split(':')[1].split('[')[0].strip()
15
15
  param_dict[key] = val
16
- print(param_dict)
17
16
  return param_dict
18
17
 
19
18
  class Options:
@@ -32,19 +31,7 @@ class Options:
32
31
  else:
33
32
  setattr(self,k,v)
34
33
  except:
35
- setattr(self,k,v)
36
-
37
- if mode != 'train':
38
- # to account for old settings where gpu_ids value is an integer, not a tuple
39
- if isinstance(self.gpu_ids,int):
40
- self.gpu_ids = (self.gpu_ids,)
41
-
42
- # to account for old settings before modalities_no was introduced
43
- if not hasattr(self,'modalities_no') and hasattr(self,'targets_no'):
44
- self.modalities_no = self.targets_no - 1
45
- del self.targets_no
46
-
47
-
34
+ setattr(self,k,v)
48
35
 
49
36
  if mode == 'train':
50
37
  self.is_train = True
@@ -71,32 +58,78 @@ class Options:
71
58
  self.checkpoints_dir = str(model_dir.parent)
72
59
  self.name = str(model_dir.name)
73
60
 
74
- self.gpu_ids = [] # gpu_ids is only used by eager mode, set to empty / cpu to be the same as the old settings; non-eager mode will use all gpus
61
+ #self.gpu_ids = [] # gpu_ids is only used by eager mode, set to empty / cpu to be the same as the old settings; non-eager mode will use all gpus
62
+
63
+ # to account for old settings where gpu_ids value is an integer, not a tuple
64
+ if isinstance(self.gpu_ids,int):
65
+ self.gpu_ids = (self.gpu_ids,)
66
+
67
+ # to account for old settings before modalities_no was introduced
68
+ if not hasattr(self,'modalities_no') and hasattr(self,'targets_no'):
69
+ self.modalities_no = self.targets_no - 1
70
+ del self.targets_no
71
+
72
+ # to account for old settings: same as in cli.py train
73
+ if not hasattr(self,'seg_no'):
74
+ if self.model == 'DeepLIIF':
75
+ self.seg_no = 1
76
+ elif self.model == 'DeepLIIFExt':
77
+ if self.seg_gen:
78
+ self.seg_no = self.modalities_no
79
+ else:
80
+ self.seg_no = 0
81
+ elif self.model == 'SDG':
82
+ self.seg_no = 0
83
+ self.seg_gen = False
84
+ else:
85
+ raise Exception(f'seg_gen cannot be automatically determined for {opt.model}')
86
+
87
+ # to account for old settings: prior to SDG, our models only have 1 input image
88
+ if not hasattr(self,'input_no'):
89
+ self.input_no = 1
90
+
91
+ # to account for old settings: before adding scale_size
92
+ if not hasattr(self, 'scale_size'):
93
+ if self.model in ['DeepLIIF','SDG']:
94
+ self.scale_size = 512
95
+ elif self.model == 'DeepLIIFExt':
96
+ self.scale_size = 1024
97
+ else:
98
+ raise Exception(f'scale_size cannot be automatically determined for {opt.model}')
99
+
100
+
75
101
 
76
102
  def _get_kwargs(self):
77
103
  common_attr = ['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__']
78
104
  l_args = [x for x in dir(self) if x not in common_attr]
79
105
  return {k:getattr(self,k) for k in l_args}
80
-
81
- def print_options(opt):
82
- """Print and save options
83
106
 
84
- It will print both current options and default values(if different).
85
- It will save options into a text file / [checkpoints_dir] / opt.txt
86
- """
107
+ def format_options(opt):
87
108
  message = ''
88
109
  message += '----------------- Options ---------------\n'
89
110
  for k, v in sorted(vars(opt).items()):
90
111
  comment = ''
91
112
  message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
92
113
  message += '----------------- End -------------------'
93
- print(message)
114
+ return message
115
+
116
+ def print_options(opt, save=False):
117
+ """Print (and save) options
94
118
 
119
+ It will print both current options and default values(if different).
120
+ If save=True, it will save options into a text file / [checkpoints_dir] / opt.txt
121
+ """
122
+ message = format_options(opt)
123
+ print(message)
124
+
95
125
  # save to the disk
96
- if opt.phase == 'train':
97
- expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
98
- mkdirs(expr_dir)
99
- file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
100
- with open(file_name, 'wt') as opt_file:
101
- opt_file.write(message)
102
- opt_file.write('\n')
126
+ if save:
127
+ save_options(opt)
128
+
129
+ def save_options(opt):
130
+ message = format_options(opt)
131
+ expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
132
+ mkdirs(expr_dir)
133
+ file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
134
+ with open(file_name, 'wt') as opt_file:
135
+ opt_file.write(message+'\n')
deepliif/util/__init__.py CHANGED
@@ -164,6 +164,28 @@ def adjust_background_tile(img):
164
164
  return image
165
165
 
166
166
 
167
+ def image_variance_gray(img):
168
+ px = np.asarray(img) if img.mode == 'L' else np.asarray(img.convert('L'))
169
+ idx = np.logical_and(px != 255, px != 0)
170
+ val = px[idx]
171
+ if val.shape[0] == 0:
172
+ return 0
173
+ var = np.var(val)
174
+ return var
175
+
176
+
177
+ def image_variance_rgb(img):
178
+ px = np.asarray(img) if img.mode == 'RGB' else np.asarray(img.convert('RGB'))
179
+ nonwhite = np.any(px != [255, 255, 255], axis=-1)
180
+ nonblack = np.any(px != [0, 0, 0], axis=-1)
181
+ idx = np.logical_and(nonwhite, nonblack)
182
+ val = px[idx]
183
+ if val.shape[0] == 0:
184
+ return [0, 0, 0]
185
+ var = np.var(val, axis=0)
186
+ return var
187
+
188
+
167
189
  def read_bioformats_image_with_reader(path, channel=0, region=(0, 0, 0, 0)):
168
190
  """
169
191
  Using this function, you can read a specific region of a large image by giving the region bounding box (XYWH format)
deepliif/util/util.py CHANGED
@@ -48,7 +48,23 @@ def save_image(image_numpy, image_path, aspect_ratio=1.0):
48
48
  image_numpy (numpy array) -- input numpy array
49
49
  image_path (str) -- the path of the image
50
50
  """
51
-
51
+ x, y, nc = image_numpy.shape
52
+
53
+ if nc > 3:
54
+ if nc % 3 == 0:
55
+ nc_img = 3
56
+ no_img = nc // nc_img
57
+
58
+ elif nc % 2 == 0:
59
+ nc_img = 2
60
+ no_img = nc // nc_img
61
+ else:
62
+ nc_img = 1
63
+ no_img = nc // nc_img
64
+ print(f'image (numpy) has {nc}>3 channels, inferred to have {no_img} images each with {nc_img} channel(s)')
65
+ l_image_numpy = np.dsplit(image_numpy,[nc_img*i for i in range(1,no_img)])
66
+ image_numpy = np.concatenate(l_image_numpy, axis=1) # stack horizontally
67
+
52
68
  image_pil = Image.fromarray(image_numpy)
53
69
  h, w, _ = image_numpy.shape
54
70
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: deepliif
3
- Version: 1.1.9
3
+ Version: 1.1.10
4
4
  Summary: DeepLIIF: Deep-Learning Inferred Multiplex Immunofluorescence for Immunohistochemical Image Quantification
5
5
  Home-page: https://github.com/nadeemlab/DeepLIIF
6
6
  Author: Parmida93
@@ -61,6 +61,9 @@ segmentation.*
61
61
 
62
62
  © This code is made available for non-commercial academic purposes.
63
63
 
64
+ ![Version](https://img.shields.io/static/v1?label=latest&message=v1.1.9&color=darkgreen)
65
+ [![Total Downloads](https://static.pepy.tech/personalized-badge/deepliif?period=total&units=international_system&left_color=grey&right_color=blue&left_text=total%20downloads)](https://pepy.tech/project/deepliif?&left_text=totalusers)
66
+
64
67
  ![overview_image](./images/overview.png)*Overview of DeepLIIF pipeline and sample input IHCs (different
65
68
  brown/DAB markers -- BCL2, BCL6, CD10, CD3/CD8, Ki67) with corresponding DeepLIIF-generated hematoxylin/mpIF modalities
66
69
  and classified (positive (red) and negative (blue) cell) segmentation masks. (a) Overview of DeepLIIF. Given an IHC
@@ -132,7 +135,7 @@ deepliif prepare-training-data --input-dir /path/to/input/images
132
135
  To train a model:
133
136
  ```
134
137
  deepliif train --dataroot /path/to/input/images
135
- --name Model_Name
138
+ --name Model_Name
136
139
  ```
137
140
  or
138
141
  ```
@@ -178,7 +181,7 @@ The installed `deepliif` uses Dask to perform inference on the input IHC images.
178
181
  Before running the `test` command, the model files must be serialized using Torchscript.
179
182
  To serialize the model files:
180
183
  ```
181
- deepliif serialize --models-dir /path/to/input/model/files
184
+ deepliif serialize --model-dir /path/to/input/model/files
182
185
  --output-dir /path/to/output/model/files
183
186
  ```
184
187
  * By default, the model files are expected to be located in `DeepLIIF/model-server/DeepLIIF_Latest_Model`.
@@ -187,15 +190,17 @@ deepliif serialize --models-dir /path/to/input/model/files
187
190
  ## Testing
188
191
  To test the model:
189
192
  ```
190
- deepliif test --input-dir /path/to/input/images
191
- --output-dir /path/to/output/images
192
- --model-dir path/to/the/serialized/model
193
+ deepliif test --input-dir /path/to/input/images
194
+ --output-dir /path/to/output/images
195
+ --model-dir /path/to/the/serialized/model
193
196
  --tile-size 512
194
197
  ```
195
198
  or
196
199
  ```
197
- python test.py --dataroot /path/to/input/images
198
- --name Model_Name
200
+ python test.py --dataroot /path/to/input/images
201
+ --results_dir /path/to/output/images
202
+ --checkpoints_dir /path/to/model/files
203
+ --name Model_Name
199
204
  ```
200
205
  * The latest version of the pretrained models can be downloaded [here](https://zenodo.org/record/4751737#.YKRTS0NKhH4).
201
206
  * Before running test on images, the model files must be serialized as described above.
@@ -216,7 +221,7 @@ Based on the available GPU resources, the region-size can be changed.
216
221
  ```
217
222
  deepliif test --input-dir /path/to/input/images
218
223
  --output-dir /path/to/output/images
219
- --model-dir path/to/the/serialized/model
224
+ --model-dir /path/to/the/serialized/model
220
225
  --tile-size 512
221
226
  --region-size 20000
222
227
  ```
@@ -257,25 +262,61 @@ If you don't have access to GPU or appropriate hardware and don't want to instal
257
262
 
258
263
  ![DeepLIIF Website Demo](images/deepliif-website-demo-03.gif)
259
264
 
260
- DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request
261
- containing the original image file:
265
+ ## Cloud API Endpoints
266
+
267
+ DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
262
268
 
263
269
  ```
264
270
  POST /api/infer
265
271
 
266
- Parameters
272
+ File Parameter:
273
+
274
+ img (required)
275
+ Image on which to run DeepLIIF.
276
+
277
+ Query String Parameters:
278
+
279
+ resolution
280
+ Resolution used to scan the slide (10x, 20x, 40x). Default is 40x.
267
281
 
268
- img (required)
269
- file: image to run the models on
282
+ pil
283
+ If present, use Pillow to load the image instead of Bio-Formats. Pillow is
284
+ faster, but works only on common image types (png, jpeg, etc.).
270
285
 
271
- resolution
272
- string: resolution used to scan the slide (10x, 20x, 40x), defaults to 40x
286
+ slim
287
+ If present, return only the refined segmentation result image.
273
288
 
274
- pil
275
- boolean: if true, use PIL.Image.open() to load the image, instead of python-bioformats
289
+ nopost
290
+ If present, do not perform postprocessing (returns only inferred images).
276
291
 
277
- slim
278
- boolean: if true, return only the segmentation result image
292
+ prob_thresh
293
+ Probability threshold used in postprocessing the inferred segmentation map
294
+ image. The segmentation map value must be above this value in order for a
295
+ pixel to be included in the final cell segmentation. Valid values are an
296
+ integer in the range 0-254. Default is 150.
297
+
298
+ size_thresh
299
+ Lower threshold for size gating the cells in postprocessing. Segmented
300
+ cells must have more pixels than this value in order to be included in the
301
+ final cell segmentation. Valid values are 0, a positive integer, or 'auto'.
302
+ 'Auto' will try to automatically determine this lower bound for size gating
303
+ based on the distribution of detected cell sizes. Default is 'auto'.
304
+
305
+ size_thresh_upper
306
+ Upper threshold for size gating the cells in postprocessing. Segmented
307
+ cells must have less pixels that this value in order to be included in the
308
+ final cell segmentation. Valid values are a positive integer or 'none'.
309
+ 'None' will use no upper threshold in size gating. Default is 'none'.
310
+
311
+ marker_thresh
312
+ Threshold for the effect that the inferred marker image will have on the
313
+ postprocessing classification of cells as positive. If any corresponding
314
+ pixel in the marker image for a cell is above this threshold, the cell will
315
+ be classified as being positive regardless of the values from the inferred
316
+ segmentation image. Valid values are an integer in the range 0-255, 'none',
317
+ or 'auto'. 'None' will not use the marker image during classification.
318
+ 'Auto' will automatically determine a threshold from the marker image.
319
+ Default is 'auto'.
279
320
  ```
280
321
 
281
322
  For example, in Python:
@@ -293,15 +334,118 @@ from PIL import Image
293
334
  images_dir = './Sample_Large_Tissues'
294
335
  filename = 'ROI_1.png'
295
336
 
337
+ root = os.path.splitext(filename)[0]
338
+
296
339
  res = requests.post(
297
340
  url='https://deepliif.org/api/infer',
298
341
  files={
299
- 'img': open(f'{images_dir}/{filename}', 'rb')
342
+ 'img': open(f'{images_dir}/{filename}', 'rb'),
300
343
  },
301
- # optional param that can be 10x, 20x, or 40x (default)
302
344
  params={
303
- 'resolution': '40x'
304
- }
345
+ 'resolution': '40x',
346
+ },
347
+ )
348
+
349
+ data = res.json()
350
+
351
+ def b64_to_pil(b):
352
+ return Image.open(BytesIO(base64.b64decode(b.encode())))
353
+
354
+ for name, img in data['images'].items():
355
+ with open(f'{images_dir}/{root}_{name}.png', 'wb') as f:
356
+ b64_to_pil(img).save(f, format='PNG')
357
+
358
+ with open(f'{images_dir}/{root}_scoring.json', 'w') as f:
359
+ json.dump(data['scoring'], f, indent=2)
360
+ print(json.dumps(data['scoring'], indent=2))
361
+ ```
362
+
363
+ If you have previously run DeepLIIF on an image and want to postprocess it with different thresholds, the postprocessing routine can be called directly using the previously inferred results:
364
+
365
+ ```
366
+ POST /api/postprocess
367
+
368
+ File Parameters:
369
+
370
+ img (required)
371
+ Image on which DeepLIIF was run.
372
+
373
+ seg_img (required)
374
+ Inferred segmentation image previously generated by DeepLIIF.
375
+
376
+ marker_img (optional)
377
+ Inferred marker image previously generated by DeepLIIF. If this is
378
+ omitted, then the marker image will not be used in classification.
379
+
380
+ Query String Parameters:
381
+
382
+ resolution
383
+ Resolution used to scan the slide (10x, 20x, 40x). Default is 40x.
384
+
385
+ pil
386
+ If present, use Pillow to load the original image instead of Bio-Formats.
387
+ Pillow is faster, but works only on common image types (png, jpeg, etc.).
388
+ Pillow is always used to open the seg_img and marker_img files.
389
+
390
+ prob_thresh
391
+ Probability threshold used in postprocessing the inferred segmentation map
392
+ image. The segmentation map value must be above this value in order for a
393
+ pixel to be included in the final cell segmentation. Valid values are an
394
+ integer in the range 0-254. Default is 150.
395
+
396
+ size_thresh
397
+ Lower threshold for size gating the cells in postprocessing. Segmented
398
+ cells must have more pixels than this value in order to be included in the
399
+ final cell segmentation. Valid values are 0, a positive integer, or 'auto'.
400
+ 'Auto' will try to automatically determine this lower bound for size gating
401
+ based on the distribution of detected cell sizes. Default is 'auto'.
402
+
403
+ size_thresh_upper
404
+ Upper threshold for size gating the cells in postprocessing. Segmented
405
+ cells must have less pixels that this value in order to be included in the
406
+ final cell segmentation. Valid values are a positive integer or 'none'.
407
+ 'None' will use no upper threshold in size gating. Default is 'none'.
408
+
409
+ marker_thresh
410
+ Threshold for the effect that the inferred marker image will have on the
411
+ postprocessing classification of cells as positive. If any corresponding
412
+ pixel in the marker image for a cell is above this threshold, the cell will
413
+ be classified as being positive regardless of the values from the inferred
414
+ segmentation image. Valid values are an integer in the range 0-255, 'none',
415
+ or 'auto'. 'None' will not use the marker image during classification.
416
+ 'Auto' will automatically determine a threshold from the marker image.
417
+ Default is 'auto'. (If marker_img is not supplied, this has no effect.)
418
+ ```
419
+
420
+ For example, in Python:
421
+
422
+ ```python
423
+ import os
424
+ import json
425
+ import base64
426
+ from io import BytesIO
427
+
428
+ import requests
429
+ from PIL import Image
430
+
431
+ # Use the sample images from the main DeepLIIF repo
432
+ images_dir = './Sample_Large_Tissues'
433
+ filename = 'ROI_1.png'
434
+
435
+ root = os.path.splitext(filename)[0]
436
+
437
+ res = requests.post(
438
+ url='https://deepliif.org/api/infer',
439
+ files={
440
+ 'img': open(f'{images_dir}/{filename}', 'rb'),
441
+ 'seg_img': open(f'{images_dir}/{root}_Seg.png', 'rb'),
442
+ 'marker_img': open(f'{images_dir}/{root}_Marker.png', 'rb'),
443
+ },
444
+ params={
445
+ 'resolution': '40x',
446
+ 'pil': True,
447
+ 'size_thresh': 250,
448
+ },
305
449
  )
306
450
 
307
451
  data = res.json()
@@ -310,10 +454,11 @@ def b64_to_pil(b):
310
454
  return Image.open(BytesIO(base64.b64decode(b.encode())))
311
455
 
312
456
  for name, img in data['images'].items():
313
- output_filepath = f'{images_dir}/{os.path.splitext(filename)[0]}_{name}.png'
314
- with open(output_filepath, 'wb') as f:
457
+ with open(f'{images_dir}/{root}_{name}.png', 'wb') as f:
315
458
  b64_to_pil(img).save(f, format='PNG')
316
459
 
460
+ with open(f'{images_dir}/{root}_scoring.json', 'w') as f:
461
+ json.dump(data['scoring'], f, indent=2)
317
462
  print(json.dumps(data['scoring'], indent=2))
318
463
  ```
319
464
 
@@ -1,9 +1,9 @@
1
- cli.py,sha256=elGu-9di_LcUXPsWWknWPevbaTv2r2rpORVPH5lsnAU,39723
1
+ cli.py,sha256=iU9YxO65T1rxX2Mx9f9LsEPC4o_ZXO-wH_-FUjIA1so,40088
2
2
  deepliif/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  deepliif/postprocessing.py,sha256=cM-cYVidY691Sjb1-B8a1jkLq5UR_hTCbuKzuF4765o,17589
4
4
  deepliif/train.py,sha256=-ZORL5vQrD0_Jq2Adgr3w8vJ7L1QcAgNTqMnBgtixgk,15757
5
5
  deepliif/data/__init__.py,sha256=euf9eUboK4RYR0jvdiyZDgPGozC1Nv7WRqRbTxSZD6A,5281
6
- deepliif/data/aligned_dataset.py,sha256=bAofVfgMwtb8Exe4EtQ3aP2ZYewBT4N_X8BDWSeFFj0,4405
6
+ deepliif/data/aligned_dataset.py,sha256=6qNFLXXW1phuIfNhCJSaLfOc-KN2kl7EuUdmyAPPU4I,5148
7
7
  deepliif/data/base_dataset.py,sha256=bQlxfY7bGSE9WPj31ZHkCxv5CAEJovjakGDCcK-aYdc,5564
8
8
  deepliif/data/colorization_dataset.py,sha256=uDYWciSxwqZkStQ_Vte27D9x5FNhv6eR9wSPn39K3RY,2808
9
9
  deepliif/data/image_folder.py,sha256=eesP9vn__YQ-dw1KJG9J-yVUHMmJjLcIEQI552Iv2vE,2006
@@ -12,23 +12,24 @@ deepliif/data/template_dataset.py,sha256=PCDBnFRzRKReaeWgKUZmW0LrzRByI9adrKDJ6SN
12
12
  deepliif/data/unaligned_dataset.py,sha256=m7j-CX-hkXbhg96NSEcaCagNVhTuXKkMsBADdMEJDBA,3393
13
13
  deepliif/models/DeepLIIFExt_model.py,sha256=Sc60rOfDJuoGrJ1CYe4beAg6as6F0o864AO6ZB7paBY,14527
14
14
  deepliif/models/DeepLIIF_model.py,sha256=ECZyM9jzoJAWSgB1ProBoarVuGcbScQMaSkRjSMgt0k,20872
15
- deepliif/models/__init__.py,sha256=E2udWyU4eScFnvDO2qtwMeHRz4ihw8hhCoP666QBK4o,22674
15
+ deepliif/models/SDG_model.py,sha256=xcZCTMNyJbcB78I1c8KtYVIB6OWL7WSMKdCxNemIzxs,9074
16
+ deepliif/models/__init__.py,sha256=Bjya0xOHjoJa0Wnfiwby-gzJaUzfVsVDS4S_37Uid-g,25597
16
17
  deepliif/models/base_model.py,sha256=HKcUOBHtL-zLs5ZcmeXT-ZV_ubqsSUo4wMCQ0W27YHU,15583
17
18
  deepliif/models/networks.py,sha256=bN4yjRdE413efUESq8pvhzPDgFCTwFKXyQOrRqHckWY,32177
18
- deepliif/options/__init__.py,sha256=nm231wh_hQw1mX0AfXZu7-cx6WZdxURsE-jvnjlHQzE,4581
19
+ deepliif/options/__init__.py,sha256=-syiyTK_oAeTLCBDm0bz1f_1jI3VK3LCwo2UNwOz6eM,5949
19
20
  deepliif/options/base_options.py,sha256=m5UXY8MvjNcDisUWuiP228yoT27SsCh1bXS_Td6SwTc,9852
20
21
  deepliif/options/processing_options.py,sha256=OnNT-ytoTQzetFiMEKrWvrsrhZlupRK4smcnIk0MbqY,2947
21
22
  deepliif/options/test_options.py,sha256=4ZbQC5U-nTbUz8jvdDIbse5TK_mjw4D5yNjpVevWD5M,1114
22
23
  deepliif/options/train_options.py,sha256=5eA_oxpRj2-HiuMMvC5-HLapxNFG_JXOQ3K132JjpR8,3580
23
- deepliif/util/__init__.py,sha256=dPkYGAy8s8JL7srZIkIhDuKdpQwVyf2Nsy5ABWlLFtg,16924
24
+ deepliif/util/__init__.py,sha256=5lkf-6R03VPnLXABKec_nx3BmXM-ZGQd3SZ1ft-koHA,17573
24
25
  deepliif/util/get_data.py,sha256=HaRoQYb2u0LUgLT7ES-w35AmJ4BrlBEJWU4Cok29pxI,3749
25
26
  deepliif/util/html.py,sha256=RNAONZ4opP-bViahgmpSbHwOc6jXKQRnWRAVIaeIvac,3309
26
27
  deepliif/util/image_pool.py,sha256=M89Hc7DblRWroNP71S9mAdRn7h3DrhPFPjqFxxZYSgw,2280
27
- deepliif/util/util.py,sha256=bTArzuhIMGgGweH0v5rkiHrqBxc24BDv12rssOE9OoI,4636
28
+ deepliif/util/util.py,sha256=9MNgqthJZYjl5-TJm5-sjWvMfPBz8F4P5K0RHXRQhfY,5241
28
29
  deepliif/util/visualizer.py,sha256=5V1lWidHqssJX21jn1P5-bOVgtrEXKVaQgnMWAsMfqg,15636
29
- deepliif-1.1.9.dist-info/LICENSE.md,sha256=HlZw_UPS6EtJimJ_Ci7xKh-S5Iubs0Z8y8E6EZ3ZNyE,956
30
- deepliif-1.1.9.dist-info/METADATA,sha256=CBgGs3wxeg1Hewt7lbAFUEH4Sbm5jV6jt4F3q59gvzM,25076
31
- deepliif-1.1.9.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
32
- deepliif-1.1.9.dist-info/entry_points.txt,sha256=f70-10j2q68o_rDlsE3hspnv4ejlDnXwwGZ9JJ-3yF4,37
33
- deepliif-1.1.9.dist-info/top_level.txt,sha256=vLDK5YKmDz08E7PywuvEjAo7dM5rnIpsjR4c0ubQCnc,13
34
- deepliif-1.1.9.dist-info/RECORD,,
30
+ deepliif-1.1.10.dist-info/LICENSE.md,sha256=HlZw_UPS6EtJimJ_Ci7xKh-S5Iubs0Z8y8E6EZ3ZNyE,956
31
+ deepliif-1.1.10.dist-info/METADATA,sha256=iApT5xf7jFaGZoqda3fQ68c3WCnE6e_nUxjFcxVInHk,31173
32
+ deepliif-1.1.10.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
33
+ deepliif-1.1.10.dist-info/entry_points.txt,sha256=f70-10j2q68o_rDlsE3hspnv4ejlDnXwwGZ9JJ-3yF4,37
34
+ deepliif-1.1.10.dist-info/top_level.txt,sha256=vLDK5YKmDz08E7PywuvEjAo7dM5rnIpsjR4c0ubQCnc,13
35
+ deepliif-1.1.10.dist-info/RECORD,,