deepliif 1.2.3__py3-none-any.whl → 1.2.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cli.py +79 -24
- deepliif/data/base_dataset.py +2 -0
- deepliif/models/DeepLIIFKD_model.py +243 -255
- deepliif/models/DeepLIIF_model.py +344 -235
- deepliif/models/__init__.py +194 -103
- deepliif/models/base_model.py +7 -2
- deepliif/options/__init__.py +40 -8
- deepliif/postprocessing.py +1 -1
- deepliif/util/__init__.py +168 -8
- deepliif/util/util.py +85 -0
- deepliif/util/visualizer.py +2 -2
- {deepliif-1.2.3.dist-info → deepliif-1.2.5.dist-info}/METADATA +2 -2
- {deepliif-1.2.3.dist-info → deepliif-1.2.5.dist-info}/RECORD +17 -17
- {deepliif-1.2.3.dist-info → deepliif-1.2.5.dist-info}/LICENSE.md +0 -0
- {deepliif-1.2.3.dist-info → deepliif-1.2.5.dist-info}/WHEEL +0 -0
- {deepliif-1.2.3.dist-info → deepliif-1.2.5.dist-info}/entry_points.txt +0 -0
- {deepliif-1.2.3.dist-info → deepliif-1.2.5.dist-info}/top_level.txt +0 -0
deepliif/util/__init__.py
CHANGED
|
@@ -377,6 +377,104 @@ def adjust_background_tile(img):
|
|
|
377
377
|
return image
|
|
378
378
|
|
|
379
379
|
|
|
380
|
+
def infer_background_colors(dir_data, sample_size=5, input_no=1, modalities_no=4,
|
|
381
|
+
seg_no=1, tile_size=32, return_list=False):
|
|
382
|
+
fns = [x for x in os.listdir(dir_data) if x.endswith('.png')]
|
|
383
|
+
sample_size = min(sample_size, len(fns))
|
|
384
|
+
w, h, num_img = None, None, None
|
|
385
|
+
|
|
386
|
+
background_colors = {}
|
|
387
|
+
|
|
388
|
+
count = 0
|
|
389
|
+
while count < sample_size and len(fns) > 0:
|
|
390
|
+
fn = fns.pop(0)
|
|
391
|
+
img = Image.open(f"{dir_data}/{fn}")
|
|
392
|
+
|
|
393
|
+
if w is None:
|
|
394
|
+
num_img = img.size[0] / img.size[1]
|
|
395
|
+
num_img = int(num_img)
|
|
396
|
+
w, h = img.size
|
|
397
|
+
|
|
398
|
+
background_colors_img = infer_background_colors_for_img(img, input_no=input_no, modalities_no=modalities_no, seg_no=seg_no, tile_size=tile_size, w=w, h=h, num_img=num_img)
|
|
399
|
+
|
|
400
|
+
if background_colors_img is not None:
|
|
401
|
+
count += 1
|
|
402
|
+
for mod_id, rgb_avg in background_colors_img.items():
|
|
403
|
+
try:
|
|
404
|
+
background_colors[mod_id].append(rgb_avg)
|
|
405
|
+
except:
|
|
406
|
+
background_colors[mod_id] = [rgb_avg]
|
|
407
|
+
|
|
408
|
+
if count > 0:
|
|
409
|
+
print(f'Calculating average color for empty tiles from {count} images..')
|
|
410
|
+
background_colors = {k:np.mean(v,axis=0).astype(np.uint8) for k,v in background_colors.items()}
|
|
411
|
+
|
|
412
|
+
if return_list:
|
|
413
|
+
return [tuple(e) for e in background_colors.values()]
|
|
414
|
+
else:
|
|
415
|
+
return background_colors
|
|
416
|
+
else:
|
|
417
|
+
print('None of the images have empty tiles for estimating averge background color. Try with a proper tile size.')
|
|
418
|
+
return None
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
def infer_background_colors_for_img(img, input_no=1, modalities_no=4, seg_no=1, tile_size=32,
|
|
422
|
+
w=None, h=None, num_img=None):
|
|
423
|
+
"""
|
|
424
|
+
Estimate background colors for a given RGB image.
|
|
425
|
+
The empty tiles are determined by applying is_empty() function on segmentation modalities.
|
|
426
|
+
If multiple segmentation modalities present, only common empty tiles are used for background
|
|
427
|
+
color calculation.
|
|
428
|
+
"""
|
|
429
|
+
from ..models import is_empty
|
|
430
|
+
|
|
431
|
+
if w is None:
|
|
432
|
+
num_img = img.size[0] / img.size[1]
|
|
433
|
+
num_img = int(num_img)
|
|
434
|
+
w, h = img.size
|
|
435
|
+
|
|
436
|
+
empty_tiles = {}
|
|
437
|
+
l_box = []
|
|
438
|
+
background_colors = {}
|
|
439
|
+
|
|
440
|
+
for i in range(num_img-seg_no, num_img):
|
|
441
|
+
img_mod = img.crop((h*i,0,h*(i+1),h))
|
|
442
|
+
l_box_mod = []
|
|
443
|
+
for x in range(0, h, tile_size):
|
|
444
|
+
for y in range(0, h, tile_size):
|
|
445
|
+
box = (x, y, x+tile_size, y+tile_size)
|
|
446
|
+
tile = img_mod.crop(box)
|
|
447
|
+
if is_empty(tile):
|
|
448
|
+
l_box_mod.append(box)
|
|
449
|
+
l_box.append(l_box_mod)
|
|
450
|
+
|
|
451
|
+
l_box_final = set()
|
|
452
|
+
if len(l_box) > 1:
|
|
453
|
+
# only keep overlapped boxes
|
|
454
|
+
for l in l_box:
|
|
455
|
+
l_box_final = l_box_final & set(l)
|
|
456
|
+
l_box_final = list(l_box_final)
|
|
457
|
+
else:
|
|
458
|
+
l_box_final = l_box[0]
|
|
459
|
+
#print(f'{len(l_box_final)} tiles are considered empty using segmentation modalities')
|
|
460
|
+
|
|
461
|
+
if len(l_box_final) == 0: # this can happen for images with dense cells
|
|
462
|
+
return None
|
|
463
|
+
|
|
464
|
+
for i in range(input_no, modalities_no+input_no):
|
|
465
|
+
empty_tiles[i] = []
|
|
466
|
+
img_mod = img.crop((h*i,0,h*(i+1),h))
|
|
467
|
+
for box in l_box_final:
|
|
468
|
+
tile = img_mod.crop(box)
|
|
469
|
+
empty_tiles[i].append(tile)
|
|
470
|
+
|
|
471
|
+
img_avg = np.mean(np.stack(empty_tiles[i], axis=0), axis=0) # take an average across all empty images
|
|
472
|
+
rgb_avg = np.mean(img_avg,axis=(0,1)).astype(np.uint8)
|
|
473
|
+
background_colors[i] = rgb_avg
|
|
474
|
+
|
|
475
|
+
return background_colors
|
|
476
|
+
|
|
477
|
+
|
|
380
478
|
def image_variance_gray(img):
|
|
381
479
|
px = np.asarray(img) if img.mode == 'L' else np.asarray(img.convert('L'))
|
|
382
480
|
idx = np.logical_and(px != 255, px != 0)
|
|
@@ -460,8 +558,8 @@ def get_information(filename):
|
|
|
460
558
|
|
|
461
559
|
class WSIReader:
|
|
462
560
|
"""
|
|
463
|
-
Assumes the file is a single image (e.g., not a stacked
|
|
464
|
-
|
|
561
|
+
Assumes the file is a single RGB image (e.g., not a stacked OME TIFF).
|
|
562
|
+
This reader will always return the data with pixel type of uint8.
|
|
465
563
|
"""
|
|
466
564
|
|
|
467
565
|
def __init__(self, path):
|
|
@@ -470,6 +568,7 @@ class WSIReader:
|
|
|
470
568
|
omexml = bioformats.OMEXML(metadata)
|
|
471
569
|
|
|
472
570
|
self._path = path
|
|
571
|
+
self._metadata = metadata
|
|
473
572
|
self._width = omexml.image().Pixels.SizeX
|
|
474
573
|
self._height = omexml.image().Pixels.SizeY
|
|
475
574
|
self._pixel_type = omexml.image().Pixels.PixelType
|
|
@@ -516,19 +615,80 @@ class WSIReader:
|
|
|
516
615
|
def height(self):
|
|
517
616
|
return self._height
|
|
518
617
|
|
|
519
|
-
def read(self, xywh):
|
|
618
|
+
def read(self, xywh, zeros_on_error=False):
|
|
619
|
+
x, y, w, h = xywh
|
|
620
|
+
|
|
520
621
|
if self._tif is not None:
|
|
521
|
-
x, y, w, h = xywh
|
|
522
622
|
try:
|
|
523
623
|
return self._zarr[y:y+h, x:x+w]
|
|
524
624
|
except Exception as e:
|
|
525
625
|
pass
|
|
526
626
|
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
627
|
+
try:
|
|
628
|
+
px = self._bfreader.read(XYWH=xywh, rescale=self._rescale)
|
|
629
|
+
if self._rescale:
|
|
630
|
+
px = (px * 255).astype(np.uint8)
|
|
631
|
+
return px
|
|
632
|
+
except Exception as e:
|
|
633
|
+
if not zeros_on_error:
|
|
634
|
+
return self._read_region_by_tiles(xywh)
|
|
635
|
+
|
|
636
|
+
return np.zeros((h, w, 3), dtype=np.uint8)
|
|
637
|
+
|
|
638
|
+
def _read_region_by_tiles(self, xywh):
|
|
639
|
+
tile_width, tile_height = None, None
|
|
640
|
+
|
|
641
|
+
try:
|
|
642
|
+
if self._tif is not None and self._tif.pages[0].is_tiled:
|
|
643
|
+
tile_width = self._tif.pages[0].tilewidth
|
|
644
|
+
tile_height = self._tif.pages[0].tilelength
|
|
645
|
+
else:
|
|
646
|
+
root = et.fromstring(self._metadata.encode('utf-8'))
|
|
647
|
+
for origmeta in root.findall('.//{http://www.openmicroscopy.org/Schemas/OME/2016-06}OriginalMetadata'):
|
|
648
|
+
key, value = None, None
|
|
649
|
+
for om in origmeta:
|
|
650
|
+
if 'Key' in om.tag:
|
|
651
|
+
key = om.text
|
|
652
|
+
elif 'Value' in om.tag:
|
|
653
|
+
value = om.text
|
|
654
|
+
if key == 'TileWidth' and value is not None:
|
|
655
|
+
tile_width = int(value)
|
|
656
|
+
elif key == 'TileLength' and value is not None:
|
|
657
|
+
tile_height = int(value)
|
|
658
|
+
except Exception as e:
|
|
659
|
+
pass
|
|
660
|
+
|
|
661
|
+
if tile_width is None or tile_height is None:
|
|
662
|
+
tile_width, tile_height = 512, 512
|
|
663
|
+
|
|
664
|
+
x0, y0, w, h = xywh
|
|
665
|
+
w0 = x0 % tile_width
|
|
666
|
+
h0 = y0 % tile_height
|
|
667
|
+
x1 = x0 + w0
|
|
668
|
+
y1 = y0 + h0
|
|
669
|
+
|
|
670
|
+
px = np.zeros((h, w, 3), dtype=np.uint8)
|
|
671
|
+
tile = self.read((x0, y0, w0, h0), True)
|
|
672
|
+
px[0:h0, 0:w0] = tile
|
|
673
|
+
|
|
674
|
+
for x in range(x1, x0+w, tile_width):
|
|
675
|
+
tw = min(tile_width, x0+w-x)
|
|
676
|
+
tile = self.read((x, y0, tw, h0), True)
|
|
677
|
+
px[0:h0, x-x0:x-x0+tw] = tile
|
|
678
|
+
|
|
679
|
+
for y in range(y1, y0+h, tile_height):
|
|
680
|
+
th = min(tile_height, y0+h-y)
|
|
681
|
+
tile = self.read((x0, y, w0, th), True)
|
|
682
|
+
px[y-y0:y-y0+th, 0:w0] = tile
|
|
683
|
+
|
|
684
|
+
for y in range(y1, y0+h, tile_height):
|
|
685
|
+
for x in range(x1, x0+w, tile_width):
|
|
686
|
+
tw = min(tile_width, x0+w-x)
|
|
687
|
+
th = min(tile_height, y0+h-y)
|
|
688
|
+
tile = self.read((x, y, tw, th), True)
|
|
689
|
+
px[y-y0:y-y0+th, x-x0:x-x0+tw] = tile
|
|
531
690
|
|
|
691
|
+
return px
|
|
532
692
|
|
|
533
693
|
|
|
534
694
|
|
deepliif/util/util.py
CHANGED
|
@@ -205,3 +205,88 @@ class HardwareStatus():
|
|
|
205
205
|
self.timer.cancel()
|
|
206
206
|
|
|
207
207
|
|
|
208
|
+
def get_mod_id_seg(dir_model):
|
|
209
|
+
# assume we already know there are seg models - this check is intended to be done prior to calling this function
|
|
210
|
+
fns = [fn for fn in os.listdir(dir_model) if fn.endswith('.pth') and 'net_G' in fn]
|
|
211
|
+
|
|
212
|
+
if len(fns) == 0: # typically this means the directory only contains serialized models
|
|
213
|
+
fns = [fn for fn in os.listdir(dir_model) if fn.endswith('.pt') and fn.startswith('G')]
|
|
214
|
+
model_names = [fn[1:-3] for fn in fns] # 1[:-3] drop ".pt" and the starting G
|
|
215
|
+
else:
|
|
216
|
+
model_names = [fn[:-4].split('_')[2][1:] for fn in fns] # [1:] drop "G"
|
|
217
|
+
|
|
218
|
+
if len(fns) == 0:
|
|
219
|
+
raise Exception('Cannot find any model file ending with .pt or .pth in directory',dir_model)
|
|
220
|
+
|
|
221
|
+
model_name_seg = max(model_names, key=len)
|
|
222
|
+
return model_name_seg[0]
|
|
223
|
+
|
|
224
|
+
def get_input_id(dir_model):
|
|
225
|
+
# assume we already know there are seg models - this check is intended to be done prior to calling this function
|
|
226
|
+
fns = [fn for fn in os.listdir(dir_model) if fn.endswith('.pth') and 'net_G' in fn]
|
|
227
|
+
|
|
228
|
+
if len(fns) == 0: # typically this means the directory only contains serialized models
|
|
229
|
+
fns = [fn for fn in os.listdir(dir_model) if fn.endswith('.pt') and fn.startswith('G')]
|
|
230
|
+
model_names_seg = [fn[2:-3] for fn in fns] # [2:] drop "GS"/"G5"
|
|
231
|
+
else:
|
|
232
|
+
model_names_seg = [fn[:-4].split('_')[2][2:] for fn in fns] # [2:] drop "GS"/"G5"
|
|
233
|
+
|
|
234
|
+
if len(fns) == 0:
|
|
235
|
+
raise Exception('Cannot find any model file ending with .pt or .pth in directory',dir_model)
|
|
236
|
+
|
|
237
|
+
if '0' in model_names_seg:
|
|
238
|
+
return '0'
|
|
239
|
+
else:
|
|
240
|
+
return '1'
|
|
241
|
+
|
|
242
|
+
def init_input_and_mod_id(opt, dir_model=None):
|
|
243
|
+
"""
|
|
244
|
+
Used by model classes to initialize input id and mod id under different situations.
|
|
245
|
+
"""
|
|
246
|
+
mod_id_seg = None
|
|
247
|
+
input_id = None
|
|
248
|
+
|
|
249
|
+
if not opt.continue_train and opt.is_train:
|
|
250
|
+
if hasattr(opt,'mod_id_seg'): # use mod id seg from train opt file if available
|
|
251
|
+
mod_id_seg = opt.mod_id_seg
|
|
252
|
+
elif not hasattr(opt,'modalities_names'): # backward compatible with models trained before this param was introduced
|
|
253
|
+
mod_id_seg = opt.modalities_no + 1 # for original DeepLIIF, modalities_no is 4 and the seg mod id is 5
|
|
254
|
+
else:
|
|
255
|
+
mod_id_seg = 'S'
|
|
256
|
+
|
|
257
|
+
if opt.model in ['DeepLIIF','DeepLIIFKD']:
|
|
258
|
+
input_id = '0'
|
|
259
|
+
else: # for both continue train and test, we load existing models, so need to obtain seg mod id and input id from filenames
|
|
260
|
+
if hasattr(opt, 'mod_id_seg'):
|
|
261
|
+
mod_id_seg = opt.mod_id_seg
|
|
262
|
+
else:
|
|
263
|
+
# for contiue-training, extract mod id seg from existing files if not available
|
|
264
|
+
mod_id_seg = get_mod_id_seg(dir_model if dir_model is not None else os.path.join(opt.checkpoints_dir, opt.name))
|
|
265
|
+
|
|
266
|
+
if opt.model in ['DeepLIIF','DeepLIIFKD']:
|
|
267
|
+
input_id = get_input_id(dir_model if dir_model is not None else os.path.join(opt.checkpoints_dir, opt.name))
|
|
268
|
+
|
|
269
|
+
return mod_id_seg, input_id
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
import copy
|
|
273
|
+
def map_model_names(model_names,mod_id_seg_source,input_id_source,mod_id_seg_target,input_id_target):
|
|
274
|
+
"""
|
|
275
|
+
Used for DeepLIIFKD to map model/image names from teacher model to student model,
|
|
276
|
+
when mod_id_seg and/or input_id is different.
|
|
277
|
+
"""
|
|
278
|
+
d_res = {}
|
|
279
|
+
for model_name in model_names:
|
|
280
|
+
model_name_new = copy.deepcopy(model_name)
|
|
281
|
+
if len(model_name) > 2 and model_name[1] == mod_id_seg_source:
|
|
282
|
+
model_name_new = model_name[0]+mod_id_seg_target+model_name[2:] # replace mod id seg
|
|
283
|
+
if input_id_source != input_id_target: # currently only support 0 or 1 as input_id
|
|
284
|
+
if int(input_id_target) == 0:
|
|
285
|
+
model_name_new = model_name_new[:2] + str(int(model_name_new[2:])-1)
|
|
286
|
+
else:
|
|
287
|
+
model_name_new = model_name_new[:2] + str(int(model_name_new[2:])+1)
|
|
288
|
+
d_res[model_name] = model_name_new
|
|
289
|
+
# this is not a model name but the final aggregated segmentation image name
|
|
290
|
+
# so it cannot be obtained from model names
|
|
291
|
+
d_res['G'+mod_id_seg_source] = 'G'+mod_id_seg_target
|
|
292
|
+
return d_res
|
deepliif/util/visualizer.py
CHANGED
|
@@ -138,7 +138,7 @@ class Visualizer():
|
|
|
138
138
|
print('Command: %s' % cmd)
|
|
139
139
|
Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
|
|
140
140
|
|
|
141
|
-
def display_current_results(self, visuals, epoch, save_result, **kwargs):
|
|
141
|
+
def display_current_results(self, visuals, epoch, save_result, filename=None, **kwargs):
|
|
142
142
|
"""Display current results on visdom; save current results to an HTML file.
|
|
143
143
|
|
|
144
144
|
Parameters:
|
|
@@ -171,7 +171,7 @@ class Visualizer():
|
|
|
171
171
|
table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}
|
|
172
172
|
</style>""" % (w, h) # create a table css
|
|
173
173
|
# create a table of images.
|
|
174
|
-
title = self.name
|
|
174
|
+
title = self.name if filename is None else filename
|
|
175
175
|
label_html = ''
|
|
176
176
|
label_html_row = ''
|
|
177
177
|
images = []
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: deepliif
|
|
3
|
-
Version: 1.2.
|
|
3
|
+
Version: 1.2.5
|
|
4
4
|
Summary: DeepLIIF: Deep-Learning Inferred Multiplex Immunofluorescence for Immunohistochemical Image Quantification
|
|
5
5
|
Home-page: https://github.com/nadeemlab/DeepLIIF
|
|
6
6
|
Author: Parmida93
|
|
@@ -66,7 +66,7 @@ segmentation.*
|
|
|
66
66
|
|
|
67
67
|
© This code is made available for non-commercial academic purposes.
|
|
68
68
|
|
|
69
|
-

|
|
70
70
|
[](https://pepy.tech/project/deepliif?&left_text=totalusers)
|
|
71
71
|
|
|
72
72
|
*Overview of DeepLIIF pipeline and sample input IHCs (different
|
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
cli.py,sha256=
|
|
1
|
+
cli.py,sha256=hu-_-yO-ytr5gy3_Xd-kdZ7GdSUR_mISci7IE7nDKg0,63092
|
|
2
2
|
deepliif/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
-
deepliif/postprocessing.py,sha256=
|
|
3
|
+
deepliif/postprocessing.py,sha256=zMYlUeoz1tU7Cyd9VIxFez126CxBEcc1eBf6nFx-4Rg,43512
|
|
4
4
|
deepliif/data/__init__.py,sha256=IfqVFnFSPQJZnORdRq4sNkJiylr1TaKNmhvWP_aLHdg,5492
|
|
5
5
|
deepliif/data/aligned_dataset.py,sha256=CkKXj94ANSi8RdhpRQjVETFYlRMER2XErIf-87BStTE,5175
|
|
6
|
-
deepliif/data/base_dataset.py,sha256=
|
|
6
|
+
deepliif/data/base_dataset.py,sha256=6ZEjIHDJ7f2JdjxZjbxu9QWnQDoIDmUQw4yPvLRmB2Y,5661
|
|
7
7
|
deepliif/data/colorization_dataset.py,sha256=uDYWciSxwqZkStQ_Vte27D9x5FNhv6eR9wSPn39K3RY,2808
|
|
8
8
|
deepliif/data/image_folder.py,sha256=eesP9vn__YQ-dw1KJG9J-yVUHMmJjLcIEQI552Iv2vE,2006
|
|
9
9
|
deepliif/data/single_dataset.py,sha256=hWjqTkRESEMppZj_r8bi3G0hAZ5EfvXYgE_qRbpiEz4,1553
|
|
@@ -11,28 +11,28 @@ deepliif/data/template_dataset.py,sha256=PCDBnFRzRKReaeWgKUZmW0LrzRByI9adrKDJ6SN
|
|
|
11
11
|
deepliif/data/unaligned_dataset.py,sha256=D69SxV087jKTd990UQIR4F3TahJHiSiw8i9Uz_xybt0,4697
|
|
12
12
|
deepliif/models/CycleGAN_model.py,sha256=WDEa-Zgz57mVc9HbcVDXL5vfHvUDWdWXNLyz8ReH3rg,15196
|
|
13
13
|
deepliif/models/DeepLIIFExt_model.py,sha256=HZaX9Z2ue0HQCFFN3guLkBcByCP70i8JvmPY02oOMyU,15022
|
|
14
|
-
deepliif/models/DeepLIIFKD_model.py,sha256=
|
|
15
|
-
deepliif/models/DeepLIIF_model.py,sha256=
|
|
14
|
+
deepliif/models/DeepLIIFKD_model.py,sha256=YMJrun6cGLyukRj4u3yvgm7Z1IOG1juxT11eyX25BRM,25985
|
|
15
|
+
deepliif/models/DeepLIIF_model.py,sha256=hMXE_6NuZvE8UxMrDSNGMFi_zBlW5VqBGj05qzSc5ec,30296
|
|
16
16
|
deepliif/models/SDG_model.py,sha256=3opz7uEyhvVJ8fF4_Jw4ho1MBcc9OVye-ByZD_KF2j0,10142
|
|
17
|
-
deepliif/models/__init__.py,sha256=
|
|
17
|
+
deepliif/models/__init__.py,sha256=l6z3aZneEA6ZwrmwDb4sS-FtKSntYNQJMVX0UNF0qTE,40409
|
|
18
18
|
deepliif/models/att_unet.py,sha256=tqaFMNbGQUjXObOG309P76c7sIPxEvFR38EyuyHY40o,7116
|
|
19
|
-
deepliif/models/base_model.py,sha256=
|
|
19
|
+
deepliif/models/base_model.py,sha256=PNv_JwFQ1_u4MUfqTWd8IXr1JHf5_vLkCnIU3ePJftg,17116
|
|
20
20
|
deepliif/models/networks.py,sha256=SclYeZ78U9ZSOhNrky764ZXB6muIqnHa6K0h_LDSCi0,36690
|
|
21
|
-
deepliif/options/__init__.py,sha256=
|
|
21
|
+
deepliif/options/__init__.py,sha256=TIPw7LuwvZ8yUTMBZNf3aLKM_jOiD9_REUx102oWjiQ,10336
|
|
22
22
|
deepliif/options/base_options.py,sha256=m5UXY8MvjNcDisUWuiP228yoT27SsCh1bXS_Td6SwTc,9852
|
|
23
23
|
deepliif/options/processing_options.py,sha256=OnNT-ytoTQzetFiMEKrWvrsrhZlupRK4smcnIk0MbqY,2947
|
|
24
24
|
deepliif/options/test_options.py,sha256=4ZbQC5U-nTbUz8jvdDIbse5TK_mjw4D5yNjpVevWD5M,1114
|
|
25
25
|
deepliif/options/train_options.py,sha256=5eA_oxpRj2-HiuMMvC5-HLapxNFG_JXOQ3K132JjpR8,3580
|
|
26
|
-
deepliif/util/__init__.py,sha256=
|
|
26
|
+
deepliif/util/__init__.py,sha256=6grD2DvrLXSeNpPkfs3BOtxq-4CEvNWkQWX_VdWpMBY,37728
|
|
27
27
|
deepliif/util/checks.py,sha256=xQirKbZxZErsAXc27M5miQUUyxptoIEJSDaUKv1nY7c,1371
|
|
28
28
|
deepliif/util/get_data.py,sha256=HaRoQYb2u0LUgLT7ES-w35AmJ4BrlBEJWU4Cok29pxI,3749
|
|
29
29
|
deepliif/util/html.py,sha256=RNAONZ4opP-bViahgmpSbHwOc6jXKQRnWRAVIaeIvac,3309
|
|
30
30
|
deepliif/util/image_pool.py,sha256=M89Hc7DblRWroNP71S9mAdRn7h3DrhPFPjqFxxZYSgw,2280
|
|
31
|
-
deepliif/util/util.py,sha256=
|
|
32
|
-
deepliif/util/visualizer.py,sha256=
|
|
33
|
-
deepliif-1.2.
|
|
34
|
-
deepliif-1.2.
|
|
35
|
-
deepliif-1.2.
|
|
36
|
-
deepliif-1.2.
|
|
37
|
-
deepliif-1.2.
|
|
38
|
-
deepliif-1.2.
|
|
31
|
+
deepliif/util/util.py,sha256=l7QsUL8gSMJSGH5-Nin6IsjPl9HTyaKk8Z5cd-yuPjs,11055
|
|
32
|
+
deepliif/util/visualizer.py,sha256=BeRIEzbpx0YCn1_zFCtV1JHLMzyWed_nw65wiyKRlcc,20259
|
|
33
|
+
deepliif-1.2.5.dist-info/LICENSE.md,sha256=HlZw_UPS6EtJimJ_Ci7xKh-S5Iubs0Z8y8E6EZ3ZNyE,956
|
|
34
|
+
deepliif-1.2.5.dist-info/METADATA,sha256=1DS0A1oAIuZA2wFSz4hA4d7bM8LOqMLC6n_KVXm02Jo,35266
|
|
35
|
+
deepliif-1.2.5.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
|
36
|
+
deepliif-1.2.5.dist-info/entry_points.txt,sha256=f70-10j2q68o_rDlsE3hspnv4ejlDnXwwGZ9JJ-3yF4,37
|
|
37
|
+
deepliif-1.2.5.dist-info/top_level.txt,sha256=vLDK5YKmDz08E7PywuvEjAo7dM5rnIpsjR4c0ubQCnc,13
|
|
38
|
+
deepliif-1.2.5.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|