deepliif 1.1.13__py3-none-any.whl → 1.1.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -25,6 +25,8 @@ from functools import lru_cache
25
25
  from io import BytesIO
26
26
  import json
27
27
  import math
28
+ import importlib.metadata
29
+ import pathlib
28
30
 
29
31
  import requests
30
32
  import torch
@@ -37,7 +39,7 @@ from dask import delayed, compute
37
39
  from deepliif.util import *
38
40
  from deepliif.util.util import tensor_to_pil
39
41
  from deepliif.data import transform
40
- from deepliif.postprocessing import compute_final_results, compute_cell_results
42
+ from deepliif.postprocessing import compute_final_results, compute_cell_results, to_array
41
43
  from deepliif.postprocessing import encode_cell_data_v4, decode_cell_data_v4
42
44
  from deepliif.options import Options, print_options
43
45
 
@@ -167,7 +169,7 @@ def init_nets(model_dir, eager_mode=False, opt=None, phase='test'):
167
169
  opt = get_opt(model_dir, mode=phase)
168
170
  opt.use_dp = False
169
171
 
170
- if opt.model == 'DeepLIIF':
172
+ if opt.model in ['DeepLIIF','DeepLIIFKD']:
171
173
  net_groups = [
172
174
  ('G1', 'G52'),
173
175
  ('G2', 'G53'),
@@ -217,13 +219,14 @@ def compute_overlap(img_size, tile_size):
217
219
  return tile_size // 4
218
220
 
219
221
 
220
- def run_torchserve(img, model_path=None, eager_mode=False, opt=None, seg_only=False):
222
+ def run_torchserve(img, model_path=None, nets=None, eager_mode=False, opt=None, seg_only=False, use_dask=True, output_tensor=False):
221
223
  """
222
224
  eager_mode: not used in this function; put in place to be consistent with run_dask
223
225
  so that run_wrapper() could call either this function or run_dask with
224
226
  same syntax
225
227
  opt: same as eager_mode
226
228
  seg_only: same as eager_mode
229
+ nets: same as eager_mode
227
230
  """
228
231
  buffer = BytesIO()
229
232
  torch.save(transform(img.resize((opt.scale_size, opt.scale_size))), buffer)
@@ -242,16 +245,28 @@ def run_torchserve(img, model_path=None, eager_mode=False, opt=None, seg_only=Fa
242
245
  return {k: tensor_to_pil(deserialize_tensor(v)) for k, v in res.json().items()}
243
246
 
244
247
 
245
- def run_dask(img, model_path, eager_mode=False, opt=None, seg_only=False):
246
- model_dir = os.getenv('DEEPLIIF_MODEL_DIR', model_path)
247
- nets = init_nets(model_dir, eager_mode, opt)
248
- use_dask = True if opt.norm != 'spectral' else False
248
+ def run_dask(img, model_path=None, nets=None, eager_mode=False, opt=None, seg_only=False, use_dask=True, output_tensor=False):
249
+ """
250
+ Provide either the model path or the networks object.
251
+
252
+ `eager_mode` is only applicable if model_path is provided.
253
+ """
254
+ assert model_path is not None or nets is not None, 'Provide either the model path or the networks object.'
255
+ if nets is None:
256
+ model_dir = os.getenv('DEEPLIIF_MODEL_DIR', model_path)
257
+ nets = init_nets(model_dir, eager_mode, opt)
258
+
259
+ if use_dask: # check if use_dask should be overwritten
260
+ use_dask = True if opt.norm != 'spectral' else False
249
261
 
250
- if opt.input_no > 1 or opt.model == 'SDG':
251
- l_ts = [transform(img_i.resize((opt.scale_size,opt.scale_size))) for img_i in img]
252
- ts = torch.cat(l_ts, dim=1)
262
+ if isinstance(img,torch.Tensor): # if img input is already a tensor, pass
263
+ ts = img
253
264
  else:
254
- ts = transform(img.resize((opt.scale_size, opt.scale_size)))
265
+ if opt.input_no > 1 or opt.model == 'SDG':
266
+ l_ts = [transform(img_i.resize((opt.scale_size,opt.scale_size))) for img_i in img]
267
+ ts = torch.cat(l_ts, dim=1)
268
+ else:
269
+ ts = transform(img.resize((opt.scale_size, opt.scale_size)))
255
270
 
256
271
 
257
272
  if use_dask:
@@ -264,13 +279,20 @@ def run_dask(img, model_path, eager_mode=False, opt=None, seg_only=False):
264
279
  with torch.no_grad():
265
280
  return model(input.to(next(model.parameters()).device))
266
281
 
267
- if opt.model == 'DeepLIIF':
282
+ if opt.model in ['DeepLIIF','DeepLIIFKD']:
283
+ #weights = {
284
+ # 'G51': 0.25, # IHC
285
+ # 'G52': 0.25, # Hema
286
+ # 'G53': 0.25, # DAPI
287
+ # 'G54': 0.00, # Lap2
288
+ # 'G55': 0.25, # Marker
289
+ #}
268
290
  weights = {
269
- 'G51': 0.25, # IHC
270
- 'G52': 0.25, # Hema
271
- 'G53': 0.25, # DAPI
272
- 'G54': 0.00, # Lap2
273
- 'G55': 0.25, # Marker
291
+ 'G51': 0.5, # IHC
292
+ 'G52': 0.0, # Hema
293
+ 'G53': 0.0, # DAPI
294
+ 'G54': 0.0, # Lap2
295
+ 'G55': 0.5, # Marker
274
296
  }
275
297
 
276
298
  seg_map = {'G1': 'G52', 'G2': 'G53', 'G3': 'G54', 'G4': 'G55'}
@@ -282,19 +304,27 @@ def run_dask(img, model_path, eager_mode=False, opt=None, seg_only=False):
282
304
  lazy_gens['G4'] = forward(ts, nets['G4'])
283
305
  gens = compute(lazy_gens)[0]
284
306
 
285
- lazy_segs = {v: forward(gens[k], nets[v]).to(torch.device('cpu')) for k, v in seg_map.items()}
307
+ lazy_segs = {v: forward(gens[k], nets[v]) for k, v in seg_map.items()}
286
308
  if not seg_only or weights['G51'] != 0:
287
- lazy_segs['G51'] = forward(ts, nets['G51']).to(torch.device('cpu'))
309
+ lazy_segs['G51'] = forward(ts, nets['G51'])
288
310
  segs = compute(lazy_segs)[0]
289
-
290
- seg = torch.stack([torch.mul(segs[k], weights[k]) for k in segs.keys()]).sum(dim=0)
291
-
292
- if seg_only:
293
- res = {'G4': tensor_to_pil(gens['G4'])} if 'G4' in gens else {}
311
+
312
+ device = next(nets['G1'].parameters()).device # take the device of the first net and move all outputs there for seg aggregation
313
+ seg = torch.stack([torch.mul(segs[k].to(device), weights[k]) for k in segs.keys()]).sum(dim=0)
314
+
315
+ if output_tensor:
316
+ if seg_only:
317
+ res = {'G4': gens['G4']} if 'G4' in gens else {}
318
+ else:
319
+ res = {**gens, **segs}
320
+ res['G5'] = seg
294
321
  else:
295
- res = {k: tensor_to_pil(v) for k, v in gens.items()}
296
- res.update({k: tensor_to_pil(v) for k, v in segs.items()})
297
- res['G5'] = tensor_to_pil(seg)
322
+ if seg_only:
323
+ res = {'G4': tensor_to_pil(gens['G4'].to(torch.device('cpu')))} if 'G4' in gens else {}
324
+ else:
325
+ res = {k: tensor_to_pil(v.to(torch.device('cpu'))) for k, v in gens.items()}
326
+ res.update({k: tensor_to_pil(v.to(torch.device('cpu'))) for k, v in segs.items()})
327
+ res['G5'] = tensor_to_pil(seg.to(torch.device('cpu')))
298
328
 
299
329
  return res
300
330
  elif opt.model in ['DeepLIIFExt','SDG','CycleGAN']:
@@ -332,8 +362,8 @@ def is_empty(tile):
332
362
  return True if np.max(image_variance_rgb(tile)) < thresh else False
333
363
 
334
364
 
335
- def run_wrapper(tile, run_fn, model_path, eager_mode=False, opt=None, seg_only=False):
336
- if opt.model == 'DeepLIIF':
365
+ def run_wrapper(tile, run_fn, model_path=None, nets=None, eager_mode=False, opt=None, seg_only=False, use_dask=True, output_tensor=False):
366
+ if opt.model in ['DeepLIIF','DeepLIIFKD']:
337
367
  if is_empty(tile):
338
368
  if seg_only:
339
369
  return {
@@ -354,31 +384,38 @@ def run_wrapper(tile, run_fn, model_path, eager_mode=False, opt=None, seg_only=F
354
384
  'G55': Image.new(mode='RGB', size=(512, 512), color=(0, 0, 0)),
355
385
  }
356
386
  else:
357
- return run_fn(tile, model_path, eager_mode, opt, seg_only)
387
+ return run_fn(tile, model_path, None, eager_mode, opt, seg_only)
358
388
  elif opt.model in ['DeepLIIFExt', 'SDG']:
359
389
  if is_empty(tile):
360
390
  res = {'G_' + str(i): Image.new(mode='RGB', size=(512, 512)) for i in range(1, opt.modalities_no + 1)}
361
391
  res.update({'GS_' + str(i): Image.new(mode='RGB', size=(512, 512)) for i in range(1, opt.modalities_no + 1)})
362
392
  return res
363
393
  else:
364
- return run_fn(tile, model_path, eager_mode, opt)
394
+ return run_fn(tile, model_path, None, eager_mode, opt)
365
395
  elif opt.model in ['CycleGAN']:
366
396
  if is_empty(tile):
367
397
  net_names = ['GB_{i+1}' for i in range(opt.modalities_no)] if opt.BtoA else [f'GA_{i+1}' for i in range(opt.modalities_no)]
368
398
  res = {net_name: Image.new(mode='RGB', size=(512, 512)) for net_name in net_names}
369
399
  return res
370
400
  else:
371
- return run_fn(tile, model_path, eager_mode, opt)
401
+ return run_fn(tile, model_path, None, eager_mode, opt)
372
402
  else:
373
403
  raise Exception(f'run_wrapper() not implemented for model {opt.model}')
374
404
 
375
405
 
376
406
  def inference(img, tile_size, overlap_size, model_path, use_torchserve=False,
377
407
  eager_mode=False, color_dapi=False, color_marker=False, opt=None,
378
- return_seg_intermediate=False, seg_only=False):
408
+ return_seg_intermediate=False, seg_only=False, opt_args={}):
409
+ """
410
+ opt_args: a dictionary of key and values to add/overwrite to opt
411
+ """
379
412
  if not opt:
380
413
  opt = get_opt(model_path)
381
414
  #print_options(opt)
415
+
416
+ for k,v in opt_args.items():
417
+ setattr(opt,k,v)
418
+ #print_options(opt)
382
419
 
383
420
  run_fn = run_torchserve if use_torchserve else run_dask
384
421
 
@@ -393,10 +430,11 @@ def inference(img, tile_size, overlap_size, model_path, use_torchserve=False,
393
430
 
394
431
  tiler = InferenceTiler(orig, tile_size, overlap_size)
395
432
  for tile in tiler:
396
- tiler.stitch(run_wrapper(tile, run_fn, model_path, eager_mode, opt, seg_only))
433
+ tiler.stitch(run_wrapper(tile, run_fn, model_path, None, eager_mode, opt, seg_only))
434
+
397
435
  results = tiler.results()
398
436
 
399
- if opt.model == 'DeepLIIF':
437
+ if opt.model in ['DeepLIIF','DeepLIIFKD']:
400
438
  if seg_only:
401
439
  images = {'Seg': results['G5']}
402
440
  if 'G4' in results:
@@ -445,7 +483,7 @@ def inference(img, tile_size, overlap_size, model_path, use_torchserve=False,
445
483
 
446
484
 
447
485
  def postprocess(orig, images, tile_size, model, seg_thresh=150, size_thresh='default', marker_thresh=None, size_thresh_upper=None):
448
- if model == 'DeepLIIF':
486
+ if model in ['DeepLIIF','DeepLIIFKD']:
449
487
  resolution = '40x' if tile_size > 384 else ('20x' if tile_size > 192 else '10x')
450
488
  overlay, refined, scoring = compute_final_results(
451
489
  orig, images['Seg'], images.get('Marker'), resolution,
@@ -706,6 +744,7 @@ def infer_cells_for_wsi(filename, model_dir, tile_size, region_size=20000, versi
706
744
  print_info(region.shape, region.dtype)
707
745
  img = Image.fromarray((region * 255).astype(np.uint8)) if rescale else Image.fromarray(region)
708
746
  print_info(img.size, img.mode)
747
+ del region
709
748
 
710
749
  images = inference(
711
750
  img,
@@ -719,7 +758,15 @@ def infer_cells_for_wsi(filename, model_dir, tile_size, region_size=20000, versi
719
758
  return_seg_intermediate=False,
720
759
  seg_only=True,
721
760
  )
722
- region_data = compute_cell_results(images['Seg'], images.get('Marker'), resolution, version=version)
761
+ del img
762
+
763
+ seg = to_array(images['Seg'])
764
+ del images['Seg']
765
+ marker = to_array(images['Marker'], True) if 'Marker' in images else None
766
+ del images
767
+ region_data = compute_cell_results(seg, marker, resolution, version=version)
768
+ del seg
769
+ del marker
723
770
 
724
771
  if start_x != 0 or start_y != 0:
725
772
  for i in range(len(region_data['cells'])):
@@ -757,4 +804,14 @@ def infer_cells_for_wsi(filename, model_dir, tile_size, region_size=20000, versi
757
804
  data['settings']['default_marker_thresh'] = round(default_marker_thresh / count_marker_thresh)
758
805
  data['settings']['default_size_thresh'] = round(default_size_thresh / count_size_thresh)
759
806
 
807
+ try:
808
+ data['deepliifVersion'] = importlib.metadata.version('deepliif')
809
+ except Exception as e:
810
+ data['deepliifVersion'] = 'unknown'
811
+
812
+ try:
813
+ data['modelVersion'] = pathlib.PurePath(model_dir).name
814
+ except Exception as e:
815
+ data['modelVersion'] = 'unknown'
816
+
760
817
  return data
@@ -147,7 +147,7 @@ class BaseModel(ABC):
147
147
  if isinstance(name, str):
148
148
  if not hasattr(self, name):
149
149
  if len(name.split('_')) != 2:
150
- if self.opt.model == 'DeepLIIF':
150
+ if self.opt.model in ['DeepLIIF','DeepLIIFKD']:
151
151
  img_name = name[:-1] + '_' + name[-1]
152
152
  visual_ret[name] = getattr(self, img_name)
153
153
  else:
@@ -172,12 +172,14 @@ def define_G(
172
172
  norm_layer = get_norm_layer(norm_type=norm)
173
173
  use_spectral_norm = norm == 'spectral'
174
174
 
175
- if netG == 'resnet_9blocks':
176
- net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9,
177
- padding_type=padding_type, upsample=upsample, use_spectral_norm=use_spectral_norm)
178
- elif netG == 'resnet_6blocks':
179
- net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6,
175
+ if netG.startswith('resnet_'):
176
+ n_blocks = int(netG.split('_')[1].replace('blocks',''))
177
+ net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=n_blocks,
180
178
  padding_type=padding_type, upsample=upsample, use_spectral_norm=use_spectral_norm)
179
+ elif netG == 'unet_32':
180
+ net = UnetGenerator(input_nc, output_nc, 5, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
181
+ elif netG == 'unet_64':
182
+ net = UnetGenerator(input_nc, output_nc, 6, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
181
183
  elif netG == 'unet_128':
182
184
  net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
183
185
  elif netG == 'unet_256':
@@ -184,32 +184,38 @@ def mark_background(mask):
184
184
  After the function executes, the pixels will be labeled as background, positive, negative, or unknown.
185
185
  """
186
186
 
187
- seeds = []
188
187
  for i in range(mask.shape[0]):
189
188
  if mask[i, 0] == LABEL_UNKNOWN:
190
- seeds.append((i, 0))
189
+ mask[i, 0] = LABEL_BACKGROUND
191
190
  if mask[i, mask.shape[1]-1] == LABEL_UNKNOWN:
192
- seeds.append((i, mask.shape[1]-1))
191
+ mask[i, mask.shape[1]-1] = LABEL_BACKGROUND
193
192
  for j in range(mask.shape[1]):
194
193
  if mask[0, j] == LABEL_UNKNOWN:
195
- seeds.append((0, j))
194
+ mask[0, j] = LABEL_BACKGROUND
196
195
  if mask[mask.shape[0]-1, j] == LABEL_UNKNOWN:
197
- seeds.append((mask.shape[0]-1, j))
198
-
199
- neighbors = [(-1, 0), (1, 0), (0, -1), (0, 1)]
200
-
201
- while len(seeds) > 0:
202
- seed = seeds.pop()
203
- if mask[seed] == LABEL_UNKNOWN:
204
- mask[seed] = LABEL_BACKGROUND
205
- for n in neighbors:
206
- idx = (seed[0] + n[0], seed[1] + n[1])
207
- if in_bounds(mask, idx) and mask[idx] == LABEL_UNKNOWN:
208
- seeds.append(idx)
196
+ mask[mask.shape[0]-1, j] = LABEL_BACKGROUND
197
+
198
+ count = 1
199
+ while count > 0:
200
+ count = 0
201
+ for i in range(mask.shape[0]):
202
+ for j in range(mask.shape[1]):
203
+ if mask[i, j] == LABEL_UNKNOWN:
204
+ if (mask[i-1, j] == LABEL_BACKGROUND or mask[i+1, j] == LABEL_BACKGROUND or
205
+ mask[i, j-1] == LABEL_BACKGROUND or mask[i, j+1] == LABEL_BACKGROUND):
206
+ mask[i, j] = LABEL_BACKGROUND
207
+ count += 1
208
+ if count > 0:
209
+ for i in range(mask.shape[0]-1, -1, -1):
210
+ for j in range(mask.shape[1]-1, -1, -1):
211
+ if mask[i, j] == LABEL_UNKNOWN:
212
+ if (mask[i-1, j] == LABEL_BACKGROUND or mask[i+1, j] == LABEL_BACKGROUND or
213
+ mask[i, j-1] == LABEL_BACKGROUND or mask[i, j+1] == LABEL_BACKGROUND):
214
+ mask[i, j] = LABEL_BACKGROUND
209
215
 
210
216
 
211
217
  @jit(nopython=True)
212
- def compute_cell_mapping(mask, marker, noise_thresh):
218
+ def compute_cell_mapping(mask, marker, noise_thresh, large_noise_thresh):
213
219
  """
214
220
  Compute the mapping from mask to positive and negative cells.
215
221
 
@@ -264,7 +270,7 @@ def compute_cell_mapping(mask, marker, noise_thresh):
264
270
  center_x += idx[1]
265
271
  count += 1
266
272
 
267
- if count > noise_thresh:
273
+ if count > noise_thresh and (large_noise_thresh is None or count < large_noise_thresh):
268
274
  center_y = int(round(center_y / count))
269
275
  center_x = int(round(center_x / count))
270
276
  positive = True if count_positive >= count_negative else False
@@ -273,7 +279,7 @@ def compute_cell_mapping(mask, marker, noise_thresh):
273
279
  return cells
274
280
 
275
281
 
276
- def get_cells_info(seg, marker, resolution, noise_thresh, seg_thresh):
282
+ def get_cells_info(seg, marker, resolution, noise_thresh, seg_thresh, large_noise_thresh):
277
283
  """
278
284
  Find all cells in the segmentation image that are larger than the noise threshold.
279
285
 
@@ -289,6 +295,8 @@ def get_cells_info(seg, marker, resolution, noise_thresh, seg_thresh):
289
295
  Threshold for tiny noise to ignore (include only cells larger than this value).
290
296
  seg_thresh : int
291
297
  Threshold to use in determining if a pixel should be labeled as positive/negative.
298
+ large_noise_thresh : int | None
299
+ Threshold for large noise to ignore (include only cells smaller than this value).
292
300
 
293
301
  Returns
294
302
  -------
@@ -303,9 +311,10 @@ def get_cells_info(seg, marker, resolution, noise_thresh, seg_thresh):
303
311
  seg = to_array(seg)
304
312
  if marker is not None:
305
313
  marker = to_array(marker, True)
314
+
306
315
  mask = create_posneg_mask(seg, seg_thresh)
307
316
  mark_background(mask)
308
- cellsinfo = compute_cell_mapping(mask, marker, noise_thresh)
317
+ cellsinfo = compute_cell_mapping(mask, marker, noise_thresh, large_noise_thresh)
309
318
 
310
319
  defaults = {}
311
320
  sizes = np.zeros(len(cellsinfo), dtype=np.int64)
@@ -1040,9 +1049,21 @@ def fill_cells(mask):
1040
1049
  mask[y, x] = LABEL_NEGATIVE
1041
1050
 
1042
1051
 
1052
+ def calculate_large_noise_thresh(large_noise_thresh, resolution):
1053
+ if large_noise_thresh != 'default':
1054
+ return large_noise_thresh
1055
+ if resolution == '10x':
1056
+ return 250
1057
+ elif resolution == '20x':
1058
+ return 1000
1059
+ else: # 40x
1060
+ return 4000
1061
+
1062
+
1043
1063
  def compute_cell_results(seg, marker, resolution, version=3,
1044
1064
  seg_thresh=DEFAULT_SEG_THRESH,
1045
- noise_thresh=DEFAULT_NOISE_THRESH):
1065
+ noise_thresh=DEFAULT_NOISE_THRESH,
1066
+ large_noise_thresh='default'):
1046
1067
  """
1047
1068
  Perform postprocessing to compute individual cell results.
1048
1069
 
@@ -1060,6 +1081,9 @@ def compute_cell_results(seg, marker, resolution, version=3,
1060
1081
  Threshold to use in determining if a pixel should be labeled as positive/negative.
1061
1082
  noise_thresh : int
1062
1083
  Threshold for tiny noise to ignore (include only cells larger than this value).
1084
+ large_noise_thresh : int | string | None
1085
+ Threshold for large noise to ignore (include only cells smaller than this value).
1086
+ Valid arguments can be an integer value, the string value 'default', or None.
1063
1087
 
1064
1088
  Returns
1065
1089
  -------
@@ -1071,7 +1095,8 @@ def compute_cell_results(seg, marker, resolution, version=3,
1071
1095
  warnings.warn('Invalid cell data version provided, defaulting to version 3.')
1072
1096
  version = 3
1073
1097
 
1074
- mask, cellsinfo, defaults = get_cells_info(seg, marker, resolution, noise_thresh, seg_thresh)
1098
+ large_noise_thresh = calculate_large_noise_thresh(large_noise_thresh, resolution)
1099
+ mask, cellsinfo, defaults = get_cells_info(seg, marker, resolution, noise_thresh, seg_thresh, large_noise_thresh)
1075
1100
 
1076
1101
  cells = []
1077
1102
  for cell in cellsinfo:
@@ -1094,6 +1119,7 @@ def compute_cell_results(seg, marker, resolution, version=3,
1094
1119
  'default_marker_thresh': defaults['marker_thresh'] if 'marker_thresh' in defaults else None,
1095
1120
  'default_size_thresh': defaults['size_thresh'],
1096
1121
  'noise_thresh': noise_thresh,
1122
+ 'large_noise_thresh': large_noise_thresh,
1097
1123
  'seg_thresh': seg_thresh,
1098
1124
  },
1099
1125
  'dataVersion': version,
@@ -1107,7 +1133,8 @@ def compute_final_results(orig, seg, marker, resolution,
1107
1133
  marker_thresh=None,
1108
1134
  size_thresh_upper=None,
1109
1135
  seg_thresh=DEFAULT_SEG_THRESH,
1110
- noise_thresh=DEFAULT_NOISE_THRESH):
1136
+ noise_thresh=DEFAULT_NOISE_THRESH,
1137
+ large_noise_thresh='default'):
1111
1138
  """
1112
1139
  Perform postprocessing to compute final count and image results.
1113
1140
 
@@ -1131,6 +1158,9 @@ def compute_final_results(orig, seg, marker, resolution,
1131
1158
  Threshold to use in determining if a pixel should be labeled as positive/negative.
1132
1159
  noise_thresh : int
1133
1160
  Threshold for tiny noise to ignore (include only cells larger than this value).
1161
+ large_noise_thresh : int | string | None
1162
+ Threshold for large noise to ignore (include only cells smaller than this value).
1163
+ Valid arguments can be an integer value, the string value 'default', or None.
1134
1164
 
1135
1165
  Returns
1136
1166
  -------
@@ -1142,7 +1172,8 @@ def compute_final_results(orig, seg, marker, resolution,
1142
1172
  Dictionary with scoring and settings information.
1143
1173
  """
1144
1174
 
1145
- mask, cellsinfo, defaults = get_cells_info(seg, marker, resolution, noise_thresh, seg_thresh)
1175
+ large_noise_thresh = calculate_large_noise_thresh(large_noise_thresh, resolution)
1176
+ mask, cellsinfo, defaults = get_cells_info(seg, marker, resolution, noise_thresh, seg_thresh, large_noise_thresh)
1146
1177
 
1147
1178
  if size_thresh is None:
1148
1179
  size_thresh = 0
@@ -0,0 +1,17 @@
1
+
2
+
3
+ def check_weights(model, modalities_no, seg_weights, loss_weights_g, loss_weights_d):
4
+ assert sum(seg_weights) == 1, 'seg weights should add up to 1'
5
+ assert sum(loss_weights_g) == 1, 'loss weights g should add up to 1'
6
+ assert sum(loss_weights_d) == 1, 'loss weights d should add up to 1'
7
+
8
+ if model in ['DeepLIIF','DeepLIIFKD']:
9
+ # +1 because input becomes an additional modality used in generating the final segmentation
10
+ assert len(seg_weights) == modalities_no+1, 'seg weights should have the same number of elements as number of modalities to be generated'
11
+ assert len(loss_weights_g) == modalities_no+1, 'loss weights g should have the same number of elements as number of modalities to be generated'
12
+ assert len(loss_weights_d) == modalities_no+1, 'loss weights d should have the same number of elements as number of modalities to be generated'
13
+
14
+ else:
15
+ assert len(seg_weights) == modalities_no, 'seg weights should have the same number of elements as number of modalities to be generated'
16
+ assert len(loss_weights_g) == modalities_no, 'loss weights g should have the same number of elements as number of modalities to be generated'
17
+ assert len(loss_weights_d) == modalities_no, 'loss weights d should have the same number of elements as number of modalities to be generated'
deepliif/util/util.py CHANGED
@@ -163,3 +163,45 @@ def check_multi_scale(img1, img2):
163
163
  if max_ssim[1] < image_ssim:
164
164
  max_ssim = (tile_size, image_ssim)
165
165
  return max_ssim[0]
166
+
167
+
168
+ import subprocess
169
+ import os
170
+ from threading import Thread , Timer
171
+ import sched, time
172
+
173
+ # modified from https://stackoverflow.com/questions/67707828/how-to-get-every-seconds-gpu-usage-in-python
174
+ def get_gpu_memory(gpu_id=0):
175
+ """
176
+ Currently collects gpu memory info for a given gpu id.
177
+ """
178
+ output_to_list = lambda x: x.decode('ascii').split('\n')[:-1]
179
+ ACCEPTABLE_AVAILABLE_MEMORY = 1024
180
+ COMMAND = "nvidia-smi --query-gpu=memory.used --format=csv"
181
+ try:
182
+ memory_use_info = output_to_list(subprocess.check_output(COMMAND.split(),stderr=subprocess.STDOUT))[1:]
183
+ except subprocess.CalledProcessError as e:
184
+ raise RuntimeError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
185
+ memory_use_values = [int(x.split()[0]) for i, x in enumerate(memory_use_info)]
186
+
187
+ #assert len(memory_use_values)==1, f"get_gpu_memory::memory_use_values should have only 1 value, now has {len(memory_use_values)} (memory_use_values)"
188
+ return memory_use_values[gpu_id]
189
+
190
+ class HardwareStatus():
191
+ def __init__(self):
192
+ self.gpu_mem = []
193
+ self.timer = None
194
+
195
+ def get_status_every_sec(self, gpu_id=0):
196
+ """
197
+ This function calls itself every 1 sec and appends the gpu_memory.
198
+ """
199
+ self.timer = Timer(1.0, self.get_status_every_sec)
200
+ self.timer.start()
201
+ self.gpu_mem.append(get_gpu_memory(gpu_id))
202
+ # print('self.gpu_mem',self.gpu_mem)
203
+
204
+ def stop_timer(self):
205
+ self.timer.cancel()
206
+
207
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: deepliif
3
- Version: 1.1.13
3
+ Version: 1.1.14
4
4
  Summary: DeepLIIF: Deep-Learning Inferred Multiplex Immunofluorescence for Immunohistochemical Image Quantification
5
5
  Home-page: https://github.com/nadeemlab/DeepLIIF
6
6
  Author: Parmida93
@@ -64,7 +64,7 @@ segmentation.*
64
64
 
65
65
  © This code is made available for non-commercial academic purposes.
66
66
 
67
- ![Version](https://img.shields.io/static/v1?label=latest&message=v1.1.13&color=darkgreen)
67
+ ![Version](https://img.shields.io/static/v1?label=latest&message=v1.1.14&color=darkgreen)
68
68
  [![Total Downloads](https://static.pepy.tech/personalized-badge/deepliif?period=total&units=international_system&left_color=grey&right_color=blue&left_text=total%20downloads)](https://pepy.tech/project/deepliif?&left_text=totalusers)
69
69
 
70
70
  ![overview_image](./images/overview.png)*Overview of DeepLIIF pipeline and sample input IHCs (different
@@ -1,10 +1,10 @@
1
- cli.py,sha256=IQIO_V9ubmeCOAniW9A5c8r9ETs7ehz4eJp_hrpuKo8,59625
1
+ cli.py,sha256=luwzcSrRZMePfJ-h4Hgz2X7lYC70TUVxGfEo3LyPJgQ,58938
2
2
  deepliif/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- deepliif/postprocessing.py,sha256=naq4Lt7WHg6wfOhksTASiCmZAx2P_wZSqozCkKvXNV0,40686
3
+ deepliif/postprocessing.py,sha256=QkeyTpR5zMvsv5mj59hRK7HXpIb2fZ_81ezfMSGAhTA,42610
4
4
  deepliif/postprocessing__OLD__DELETE.py,sha256=cM-cYVidY691Sjb1-B8a1jkLq5UR_hTCbuKzuF4765o,17589
5
5
  deepliif/train.py,sha256=-ZORL5vQrD0_Jq2Adgr3w8vJ7L1QcAgNTqMnBgtixgk,15757
6
6
  deepliif/data/__init__.py,sha256=IfqVFnFSPQJZnORdRq4sNkJiylr1TaKNmhvWP_aLHdg,5492
7
- deepliif/data/aligned_dataset.py,sha256=Tuvll1dpnNAgwReeZ6NleKLQP__yhKxZRWcvb3IOSGY,5145
7
+ deepliif/data/aligned_dataset.py,sha256=CkKXj94ANSi8RdhpRQjVETFYlRMER2XErIf-87BStTE,5175
8
8
  deepliif/data/base_dataset.py,sha256=bQlxfY7bGSE9WPj31ZHkCxv5CAEJovjakGDCcK-aYdc,5564
9
9
  deepliif/data/colorization_dataset.py,sha256=uDYWciSxwqZkStQ_Vte27D9x5FNhv6eR9wSPn39K3RY,2808
10
10
  deepliif/data/image_folder.py,sha256=eesP9vn__YQ-dw1KJG9J-yVUHMmJjLcIEQI552Iv2vE,2006
@@ -13,30 +13,33 @@ deepliif/data/template_dataset.py,sha256=PCDBnFRzRKReaeWgKUZmW0LrzRByI9adrKDJ6SN
13
13
  deepliif/data/unaligned_dataset.py,sha256=D69SxV087jKTd990UQIR4F3TahJHiSiw8i9Uz_xybt0,4697
14
14
  deepliif/models/CycleGAN_model.py,sha256=WDEa-Zgz57mVc9HbcVDXL5vfHvUDWdWXNLyz8ReH3rg,15196
15
15
  deepliif/models/DeepLIIFExt_model.py,sha256=HZaX9Z2ue0HQCFFN3guLkBcByCP70i8JvmPY02oOMyU,15022
16
+ deepliif/models/DeepLIIFKD_model.py,sha256=edq9fxrDspGivuFlAYZp9B0Opp3BRIosA9e1TI_gxpc,27152
16
17
  deepliif/models/DeepLIIF_model.py,sha256=6vmsXcBcoALrhJLa7XGhDmLamO_WCzTDYEyVUBE482o,23857
17
18
  deepliif/models/SDG_model.py,sha256=3opz7uEyhvVJ8fF4_Jw4ho1MBcc9OVye-ByZD_KF2j0,10142
18
19
  deepliif/models/__init__ - different weighted.py,sha256=Oe6ichU-Qia2mODGUtQTh1OBZZnv5N-93AzOfzQiHlw,32227
19
20
  deepliif/models/__init__ - run_dask_multi dev.py,sha256=vt8X8qeiJr2aPhFi6muZEJLUSsr8XChfI45NSwL8Rfg,39449
20
21
  deepliif/models/__init__ - time gens.py,sha256=mRUtxNaGDZuhlQtKdA-OvGWTQwl7z2yMWc-9l0QrgaY,32922
21
22
  deepliif/models/__init__ - timings.py,sha256=S_wFImwxzGKx8STqbpcYCPOlbb_84WLMRDSnaWC8qFg,31750
22
- deepliif/models/__init__.py,sha256=EZkZu28f5ju_YiEz4yAMHQ5GAzl1-Mi6AK4kfWe20UA,31934
23
+ "deepliif/models/__init__ - weights, empty, zarr, tile count.py",sha256=JsU9ui0Kv8AzlP3_1LeiNrQLHg9X_3r8WwYy3W4JgfA,33315
24
+ deepliif/models/__init__.py,sha256=sNIj6fr0HRfC2kqQ4MtwyKfHDi1sf-cdmi6UgvK4lNQ,34197
23
25
  deepliif/models/att_unet.py,sha256=tqaFMNbGQUjXObOG309P76c7sIPxEvFR38EyuyHY40o,7116
24
- deepliif/models/base_model.py,sha256=ezWkmbuuNLGDMjyXe3VzJroj7QR1h0M9ByouzpfCrQg,16843
25
- deepliif/models/networks.py,sha256=Ijeb7nGf-YFgc_sBR-sIsk--0rTeiUqKZd01k4DMsuM,36614
26
+ deepliif/models/base_model.py,sha256=ZQBI-wVfDdu296HzB_YzQraE9oUwfyRlAolNlrMi-4g,16858
27
+ deepliif/models/networks.py,sha256=SclYeZ78U9ZSOhNrky764ZXB6muIqnHa6K0h_LDSCi0,36690
26
28
  deepliif/options/__init__.py,sha256=p2IWckf3-K-wclDWfSq5ZmynKk2lNov2Tn7WPYIO11A,8329
27
29
  deepliif/options/base_options.py,sha256=m5UXY8MvjNcDisUWuiP228yoT27SsCh1bXS_Td6SwTc,9852
28
30
  deepliif/options/processing_options.py,sha256=OnNT-ytoTQzetFiMEKrWvrsrhZlupRK4smcnIk0MbqY,2947
29
31
  deepliif/options/test_options.py,sha256=4ZbQC5U-nTbUz8jvdDIbse5TK_mjw4D5yNjpVevWD5M,1114
30
32
  deepliif/options/train_options.py,sha256=5eA_oxpRj2-HiuMMvC5-HLapxNFG_JXOQ3K132JjpR8,3580
31
33
  deepliif/util/__init__.py,sha256=_b7-t5Z54CJJIy-moeKPPLFHg5BRCKgWo5V18WqRZVo,29146
34
+ deepliif/util/checks.py,sha256=xQirKbZxZErsAXc27M5miQUUyxptoIEJSDaUKv1nY7c,1371
32
35
  deepliif/util/get_data.py,sha256=HaRoQYb2u0LUgLT7ES-w35AmJ4BrlBEJWU4Cok29pxI,3749
33
36
  deepliif/util/html.py,sha256=RNAONZ4opP-bViahgmpSbHwOc6jXKQRnWRAVIaeIvac,3309
34
37
  deepliif/util/image_pool.py,sha256=M89Hc7DblRWroNP71S9mAdRn7h3DrhPFPjqFxxZYSgw,2280
35
- deepliif/util/util.py,sha256=9MNgqthJZYjl5-TJm5-sjWvMfPBz8F4P5K0RHXRQhfY,5241
38
+ deepliif/util/util.py,sha256=WV2a1Rt-McmZm3BKW7TqC2oAKkvxWhZvjofGSfY6y7s,6810
36
39
  deepliif/util/visualizer.py,sha256=6E1sPbXdgLFB9mnPwtfEjm9O40viG4dfv5MyTpOQQpo,20210
37
- deepliif-1.1.13.dist-info/LICENSE.md,sha256=HlZw_UPS6EtJimJ_Ci7xKh-S5Iubs0Z8y8E6EZ3ZNyE,956
38
- deepliif-1.1.13.dist-info/METADATA,sha256=Ff0QjUBwpZGJcU1YgHfaixVAW6IYyHjV0FUi7QiGaRo,35195
39
- deepliif-1.1.13.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
40
- deepliif-1.1.13.dist-info/entry_points.txt,sha256=f70-10j2q68o_rDlsE3hspnv4ejlDnXwwGZ9JJ-3yF4,37
41
- deepliif-1.1.13.dist-info/top_level.txt,sha256=vLDK5YKmDz08E7PywuvEjAo7dM5rnIpsjR4c0ubQCnc,13
42
- deepliif-1.1.13.dist-info/RECORD,,
40
+ deepliif-1.1.14.dist-info/LICENSE.md,sha256=HlZw_UPS6EtJimJ_Ci7xKh-S5Iubs0Z8y8E6EZ3ZNyE,956
41
+ deepliif-1.1.14.dist-info/METADATA,sha256=k4sMVqcfZ0B26XhwrueC903-Om3c5v1I99ynb873vEg,35195
42
+ deepliif-1.1.14.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
43
+ deepliif-1.1.14.dist-info/entry_points.txt,sha256=f70-10j2q68o_rDlsE3hspnv4ejlDnXwwGZ9JJ-3yF4,37
44
+ deepliif-1.1.14.dist-info/top_level.txt,sha256=vLDK5YKmDz08E7PywuvEjAo7dM5rnIpsjR4c0ubQCnc,13
45
+ deepliif-1.1.14.dist-info/RECORD,,