deepliif 1.1.11__py3-none-any.whl → 1.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -165,6 +165,7 @@ class Visualizer():
165
165
  if ncols > 0: # show all the images in one visdom panel
166
166
  ncols = min(ncols, len(visuals))
167
167
  h, w = next(iter(visuals.values())).shape[:2]
168
+
168
169
  table_css = """<style>
169
170
  table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}
170
171
  table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}
@@ -176,13 +177,16 @@ class Visualizer():
176
177
  images = []
177
178
  idx = 0
178
179
  for label, image in visuals.items():
179
- image_numpy = util.tensor2im(image)
180
- label_html_row += '<td>%s</td>' % label
181
- images.append(image_numpy.transpose([2, 0, 1]))
182
- idx += 1
183
- if idx % ncols == 0:
184
- label_html += '<tr>%s</tr>' % label_html_row
185
- label_html_row = ''
180
+ if image.shape[1] != 3:
181
+ pass
182
+ else:
183
+ image_numpy = util.tensor2im(image)
184
+ label_html_row += '<td>%s</td>' % label
185
+ images.append(image_numpy.transpose([2, 0, 1]))
186
+ idx += 1
187
+ if idx % ncols == 0:
188
+ label_html += '<tr>%s</tr>' % label_html_row
189
+ label_html_row = ''
186
190
 
187
191
  white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
188
192
  while idx % ncols != 0:
@@ -191,6 +195,7 @@ class Visualizer():
191
195
  idx += 1
192
196
  if label_html_row != '':
193
197
  label_html += '<tr>%s</tr>' % label_html_row
198
+
194
199
 
195
200
  try:
196
201
  self.vis.images(images, nrow=ncols, win=self.display_id + 1,
@@ -248,6 +253,10 @@ class Visualizer():
248
253
  # if having 2 processes, each process obtains 50% of the data (effective dataset_size divided by half), the effective counter ratio shall multiply by 2 to compensate that
249
254
  n_proc = int(os.getenv('WORLD_SIZE',1))
250
255
  counter_ratio = counter_ratio * n_proc
256
+
257
+ self.plot_data_update_train = False
258
+ self.plot_data_update_val = False
259
+ self.plot_data_update_metrics = False
251
260
 
252
261
  if self.remote:
253
262
  fn = 'plot_current_losses.pickle'
@@ -263,20 +272,98 @@ class Visualizer():
263
272
  exec(f'{self.remote_transfer_cmd_function}("{path_source}")')
264
273
  else:
265
274
  if not hasattr(self, 'plot_data'):
266
- self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
267
- self.plot_data['X'].append(epoch + counter_ratio)
268
- self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])
275
+ self.plot_data = {'X': [], 'X_val':[], 'X_metrics':[],
276
+ 'Y': [], 'Y_val':[], 'Y_metrics':[],
277
+ 'legend': [], 'legend_val': [], 'legend_metrics':[]}
278
+ for k in list(losses.keys()):
279
+ if k.endswith('_val'):
280
+ self.plot_data['legend_val'].append(k)
281
+ elif k.startswith(('G_','D_')):
282
+ self.plot_data['legend'].append(k)
283
+ else:
284
+ self.plot_data['legend_metrics'].append(k)
285
+
286
+ # check if all names in losses dict have been seen
287
+ # currently we assume the three types of metrics (train loss, val loss, other metrics) can come into the losses dict
288
+ # at any step, but each type will join or leave the dict as a whole (i.e., train loss metrics will either all appear or all be missing)
289
+ for k in list(losses.keys()):
290
+ if k.endswith('_val'):
291
+ if k not in self.plot_data['legend_val']:
292
+ self.plot_data['legend_val'].append(k)
293
+ elif k.startswith(('G_','D_')):
294
+ if k not in self.plot_data['legend']:
295
+ self.plot_data['legend'].append(k)
296
+ else:
297
+ if k not in self.plot_data['legend_metrics']:
298
+ self.plot_data['legend_metrics'].append(k)
299
+
300
+ # update training loss
301
+ print('update training loss')
302
+ if len(self.plot_data['legend']) > 0:
303
+ if self.plot_data['legend'][0] in losses:
304
+ self.plot_data['X'].append(epoch + counter_ratio)
305
+ self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])
306
+ self.plot_data_update_train = True
307
+
308
+ # update validation loss
309
+ print('update validation loss')
310
+ if len(self.plot_data['legend_val']) > 0:
311
+ if self.plot_data['legend_val'][0] in losses:
312
+ self.plot_data['X_val'].append(epoch + counter_ratio)
313
+ self.plot_data['Y_val'].append([losses[k] for k in self.plot_data['legend_val']])
314
+ self.plot_data_update_val = True
315
+
316
+ # update other calculated metrics
317
+ print('update other metrics')
318
+ if len(self.plot_data['legend_metrics']) > 0:
319
+ if self.plot_data['legend_metrics'][0] in losses:
320
+ self.plot_data['X_metrics'].append(epoch + counter_ratio)
321
+ self.plot_data['Y_metrics'].append([losses[k] for k in self.plot_data['legend_metrics']])
322
+ self.plot_data_update_metrics = True
269
323
 
270
324
  try:
271
- self.vis.line(
272
- X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
273
- Y=np.array(self.plot_data['Y']),
274
- opts={
275
- 'title': self.name + ' loss over time',
276
- 'legend': self.plot_data['legend'],
277
- 'xlabel': 'epoch',
278
- 'ylabel': 'loss'},
279
- win=self.display_id)
325
+ if self.plot_data_update_train:
326
+ print('plotting train loss')
327
+ self.vis.line(
328
+ X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
329
+ Y=np.array(self.plot_data['Y']),
330
+ opts={
331
+ 'title': self.name + ' train loss over time',
332
+ 'legend': self.plot_data['legend'],
333
+ 'xlabel': 'epoch',
334
+ 'ylabel': 'loss'},
335
+ win = 'train',
336
+ #env=self.display_id
337
+ )
338
+
339
+ if self.plot_data_update_val:
340
+ print('plotting val loss')
341
+ self.vis.line(
342
+ X=np.stack([np.array(self.plot_data['X_val'])] * len(self.plot_data['legend_val']), 1),
343
+ Y=np.array(self.plot_data['Y_val']),
344
+ opts={
345
+ 'title': self.name + ' val loss over time',
346
+ 'legend': self.plot_data['legend_val'],
347
+ 'xlabel': 'epoch',
348
+ 'ylabel': 'loss'},
349
+ win = 'val',
350
+ #env=self.display_id
351
+ )
352
+
353
+ if self.plot_data_update_metrics:
354
+ print('plotting other metrics')
355
+ self.vis.line(
356
+ X=np.stack([np.array(self.plot_data['X_metrics'])] * len(self.plot_data['legend_metrics']), 1),
357
+ Y=np.array(self.plot_data['Y_metrics']),
358
+ opts={
359
+ 'title': self.name + ' metrics over time',
360
+ 'legend': self.plot_data['legend_metrics'],
361
+ 'xlabel': 'epoch',
362
+ 'ylabel': 'metrics'},
363
+ win = 'metrics',
364
+ #env=self.display_id
365
+ )
366
+
280
367
  except VisdomExceptionBase:
281
368
  self.create_visdom_connections()
282
369
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: deepliif
3
- Version: 1.1.11
3
+ Version: 1.1.13
4
4
  Summary: DeepLIIF: Deep-Learning Inferred Multiplex Immunofluorescence for Immunohistochemical Image Quantification
5
5
  Home-page: https://github.com/nadeemlab/DeepLIIF
6
6
  Author: Parmida93
@@ -8,13 +8,14 @@ Author-email: ghahremani.parmida@gmail.com
8
8
  Keywords: DeepLIIF,IHC,Segmentation,Classification
9
9
  Description-Content-Type: text/markdown
10
10
  License-File: LICENSE.md
11
- Requires-Dist: opencv-python (==4.5.3.56)
12
- Requires-Dist: torchvision (==0.10.0)
11
+ Requires-Dist: opencv-python (==4.8.1.78)
12
+ Requires-Dist: torch (==1.13.1)
13
+ Requires-Dist: torchvision (==0.14.1)
13
14
  Requires-Dist: scikit-image (==0.18.3)
14
15
  Requires-Dist: dominate (==2.6.0)
15
- Requires-Dist: numba (==0.53.1)
16
+ Requires-Dist: numba (==0.57.1)
16
17
  Requires-Dist: Click (==8.0.3)
17
- Requires-Dist: requests (==2.26.0)
18
+ Requires-Dist: requests (==2.32.2)
18
19
  Requires-Dist: dask (==2021.11.2)
19
20
  Requires-Dist: visdom (>=0.1.8.3)
20
21
  Requires-Dist: python-bioformats (>=4.0.6)
@@ -63,7 +64,7 @@ segmentation.*
63
64
 
64
65
  © This code is made available for non-commercial academic purposes.
65
66
 
66
- ![Version](https://img.shields.io/static/v1?label=latest&message=v1.1.9&color=darkgreen)
67
+ ![Version](https://img.shields.io/static/v1?label=latest&message=v1.1.13&color=darkgreen)
67
68
  [![Total Downloads](https://static.pepy.tech/personalized-badge/deepliif?period=total&units=international_system&left_color=grey&right_color=blue&left_text=total%20downloads)](https://pepy.tech/project/deepliif?&left_text=totalusers)
68
69
 
69
70
  ![overview_image](./images/overview.png)*Overview of DeepLIIF pipeline and sample input IHCs (different
@@ -111,6 +112,16 @@ Commands:
111
112
  train General-purpose training script for multi-task...
112
113
  ```
113
114
 
115
+ **Note:** You might need to install a version of PyTorch that is compatible with your CUDA version.
116
+ Otherwise, only the CPU will be used.
117
+ Visit the [PyTorch website](https://pytorch.org/) for details.
118
+ You can confirm if your installation will run on the GPU by checking if the following returns `True`:
119
+
120
+ ```
121
+ import torch
122
+ torch.cuda.is_available()
123
+ ```
124
+
114
125
  ## Training Dataset
115
126
  For training, all image sets must be 512x512 and combined together in 3072x512 images (six images of size 512x512 stitched
116
127
  together horizontally).
@@ -208,26 +219,49 @@ python test.py --dataroot /path/to/input/images
208
219
  * Before running test on images, the model files must be serialized as described above.
209
220
  * The serialized model files are expected to be located in `DeepLIIF/model-server/DeepLIIF_Latest_Model`.
210
221
  * The test results will be saved to the specified output directory, which defaults to the input directory.
211
- * The default tile size is 512.
222
+ * The tile size must be specified and is used to split the image into tiles for processing. The tile size is based on the resolution (scan magnification) of the input image, and the recommended values are a tile size of 512 for 40x images, 256 for 20x, and 128 for 10x. Note that the smaller the tile size, the longer inference will take.
212
223
  * Testing datasets can be downloaded [here](https://zenodo.org/record/4751737#.YKRTS0NKhH4).
213
224
 
225
+ **Test Command Options:**
226
+ In addition to the required parameters given above, the following optional parameters are available for `deepliif test`:
227
+ * `--eager-mode` Run the original model files (instead of serialized model files).
228
+ * `--seg-intermediate` Save the intermediate segmentation maps created for each modality.
229
+ * `--seg-only` Save only the segmentation files, and do not infer images that are not needed.
230
+ * `--color-dapi` Color the inferred DAPI image.
231
+ * `--color-marker` Color the inferred marker image.
232
+
214
233
  **Whole Slide Image (WSI) Inference:**
215
234
  For translation and segmentation of whole slide images,
216
- you can simply use the same test command
217
- giving path to the directory containing your whole slide images as the input-dir.
235
+ you can simply use the `test-wsi` command
236
+ giving path to the directory containing your WSI as the input-dir
237
+ and specifying the filename of the WSI.
218
238
  DeepLIIF automatically reads the WSI region by region,
219
239
  and translate and segment each region separately and stitches the regions
220
240
  to create the translation and segmentation for whole slide image,
221
241
  then saves all masks in the format of ome.tiff in the given output-dir.
222
- Based on the available GPU resources, the region-size can be changed.
242
+ Based on the available resources, the region-size can be changed.
223
243
  ```
224
- deepliif test --input-dir /path/to/input/images
225
- --output-dir /path/to/output/images
226
- --model-dir /path/to/the/serialized/model
227
- --tile-size 512
228
- --region-size 20000
244
+ deepliif test-wsi --input-dir /path/to/input/image
245
+ --filename wsiFile.svs
246
+ --output-dir /path/to/output/images
247
+ --model-dir /path/to/the/serialized/model
248
+ --tile-size 512
229
249
  ```
230
250
 
251
+ **WSI Inference Options:**
252
+ In addition to the required parameters given above, the following optional parameters are available for `deepliif test-wsi`:
253
+ * `--region-size` Set the size of each region to read from the WSI (default is 20000).
254
+ * `--seg-intermediate` Save the intermediate segmentation maps created for each modality.
255
+ * `--seg-only` Save only the segmentation files, and do not infer images that are not needed.
256
+ * `--color-dapi` Color the inferred DAPI image.
257
+ * `--color-marker` Color the inferred marker image.
258
+
259
+ **Reducing Run Time**
260
+ If you need only the final segmentation and not the inferred multiplex images,
261
+ it is recommended to run `deepliif test` or `deepliif test-wsi` with the `--seg-only`
262
+ option. This will generate only the necessary images, thus reducing the overall run time.
263
+
264
+ **Torchserve**
231
265
  If you prefer, it is possible to run the models using Torchserve.
232
266
  Please see [the documentation](https://nadeemlab.github.io/DeepLIIF/deployment/#deploying-deepliif-with-torchserve)
233
267
  on how to deploy the model with Torchserve and for an example of how to run the inference.
@@ -264,9 +298,16 @@ If you don't have access to GPU or appropriate hardware and don't want to instal
264
298
 
265
299
  ![DeepLIIF Website Demo](images/deepliif-website-demo-04.gif)
266
300
 
301
+ Our deployment at [deepliif.org](https://deepliif.org) also provides virtual slide digitization to generate a single stitched image from a 10x video acquired with a microscope and camera. The video should be captured with the following guidelines to achieve the best results:
302
+ * Brief but complete pauses at every section of the sample to avoid motion artifacts.
303
+ * Significant overlap between pauses so that there is sufficient context for stitching frames together.
304
+ * Methodical and consistent movement over the sample. For example, start at the top left corner, then go all the way to the right, then down one step, then all the way to the left, down one step, etc., until the end of the sample is reached. Again, brief overlapping pauses throughout will allow the best quality images to be generated.
305
+
306
+ ![DeepLIIF Website Demo](images/deepliif-stitch-demo-01.gif)
307
+
267
308
  ## Cloud API Endpoints
268
309
 
269
- DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
310
+ For small images, DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
270
311
 
271
312
  ```
272
313
  POST /api/infer
@@ -362,6 +403,8 @@ with open(f'{images_dir}/{root}_scoring.json', 'w') as f:
362
403
  print(json.dumps(data['scoring'], indent=2))
363
404
  ```
364
405
 
406
+ Note that since this is a single request to send the image and receive the results, processing must complete within the timeout period (typically about one minute). If your request is receiving a 504 status code, please try a smaller image or install the `deepliif` package as detailed above to run the process locally.
407
+
365
408
  If you have previously run DeepLIIF on an image and want to postprocess it with different thresholds, the postprocessing routine can be called directly using the previously inferred results:
366
409
 
367
410
  ```
@@ -514,16 +557,17 @@ DeepLIIF model and release back to the community with full credit to the contrib
514
557
  - [x] **Moffitt Cancer Center** [AI-ready multiplex immunofluorescence and multiplex immunohistochemistry dataset](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=70226184) for head-and-neck squamous cell carcinoma (**MICCAI'23**)
515
558
 
516
559
  ## Support
517
- Please use the [Image.sc Forum](https://forum.image.sc/tag/deepliif) for discussion and questions related to DeepLIIF.
518
-
519
- Bugs can be reported in the [GitHub Issues](https://github.com/nadeemlab/DeepLIIF/issues) tab.
560
+ Please use the [GitHub Issues](https://github.com/nadeemlab/DeepLIIF/issues) tab for discussion, questions, or to report bugs related to DeepLIIF.
520
561
 
521
562
  ## License
522
563
  © [Nadeem Lab](https://nadeemlab.org/) - DeepLIIF code is distributed under **Apache 2.0 with Commons Clause** license,
523
564
  and is available for non-commercial academic purposes.
524
565
 
525
566
  ## Acknowledgments
526
- * This code is inspired by [CycleGAN and pix2pix in PyTorch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix).
567
+ This code is inspired by [CycleGAN and pix2pix in PyTorch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix).
568
+
569
+ ## Funding
570
+ This work is funded by the 7-year NIH/NCI R37 MERIT Award ([R37CA295658](https://reporter.nih.gov/search/5dgSOlHosEKepkZEAS5_kQ/project-details/11018883#description)).
527
571
 
528
572
  ## Reference
529
573
  If you find our work useful in your research or if you use parts of this code or our released dataset, please cite the following papers:
@@ -551,6 +595,8 @@ If you find our work useful in your research or if you use parts of this code or
551
595
  title={An AI-Ready Multiplex Staining Dataset for Reproducible and Accurate Characterization of Tumor Immune Microenvironment},
552
596
  author={Ghahremani, Parmida and Marino, Joseph and Hernandez-Prera, Juan and V. de la Iglesia, Janis and JC Slebos, Robbert and H. Chung, Christine and Nadeem, Saad},
553
597
  journal={International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
598
+ volume={14225},
599
+ pages={704--713},
554
600
  year={2023}
555
601
  }
556
602
 
@@ -558,14 +604,19 @@ If you find our work useful in your research or if you use parts of this code or
558
604
  author = {Nadeem, Saad and Hanna, Matthew G and Viswanathan, Kartik and Marino, Joseph and Ahadi, Mahsa and Alzumaili, Bayan and Bani, Mohamed-Amine and Chiarucci, Federico and Chou, Angela and De Leo, Antonio and Fuchs, Talia L and Lubin, Daniel J and Luxford, Catherine and Magliocca, Kelly and Martinez, Germán and Shi, Qiuying and Sidhu, Stan and Al Ghuzlan, Abir and Gill, Anthony J and Tallini, Giovanni and Ghossein, Ronald and Xu, Bin},
559
605
  title = {Ki67 proliferation index in medullary thyroid carcinoma: a comparative study of multiple counting methods and validation of image analysis and deep learning platforms},
560
606
  journal = {Histopathology},
607
+ volume = {83},
608
+ number = {6},
609
+ pages = {981--988},
561
610
  year = {2023},
562
611
  doi = {https://doi.org/10.1111/his.15048}
563
612
  }
564
613
 
565
614
  @article{zehra2024deepliifstitch,
566
- author = {Zehra, Talat and Marino, Joseph and Wang, Wendy and Frantsuzov, Grigoriy and Nadeem, Saad},
567
- title = {Rethinking Histology Slide Digitization Workflows for Low-Resource Settings},
568
- journal = {International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
569
- year = {2024}
615
+ author = {Zehra, Talat and Marino, Joseph and Wang, Wendy and Frantsuzov, Grigoriy and Nadeem, Saad},
616
+ title = {Rethinking Histology Slide Digitization Workflows for Low-Resource Settings},
617
+ journal = {International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
618
+ volume = {15004},
619
+ pages = {427--436},
620
+ year = {2024}
570
621
  }
571
622
  ```
@@ -0,0 +1,42 @@
1
+ cli.py,sha256=IQIO_V9ubmeCOAniW9A5c8r9ETs7ehz4eJp_hrpuKo8,59625
2
+ deepliif/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ deepliif/postprocessing.py,sha256=naq4Lt7WHg6wfOhksTASiCmZAx2P_wZSqozCkKvXNV0,40686
4
+ deepliif/postprocessing__OLD__DELETE.py,sha256=cM-cYVidY691Sjb1-B8a1jkLq5UR_hTCbuKzuF4765o,17589
5
+ deepliif/train.py,sha256=-ZORL5vQrD0_Jq2Adgr3w8vJ7L1QcAgNTqMnBgtixgk,15757
6
+ deepliif/data/__init__.py,sha256=IfqVFnFSPQJZnORdRq4sNkJiylr1TaKNmhvWP_aLHdg,5492
7
+ deepliif/data/aligned_dataset.py,sha256=Tuvll1dpnNAgwReeZ6NleKLQP__yhKxZRWcvb3IOSGY,5145
8
+ deepliif/data/base_dataset.py,sha256=bQlxfY7bGSE9WPj31ZHkCxv5CAEJovjakGDCcK-aYdc,5564
9
+ deepliif/data/colorization_dataset.py,sha256=uDYWciSxwqZkStQ_Vte27D9x5FNhv6eR9wSPn39K3RY,2808
10
+ deepliif/data/image_folder.py,sha256=eesP9vn__YQ-dw1KJG9J-yVUHMmJjLcIEQI552Iv2vE,2006
11
+ deepliif/data/single_dataset.py,sha256=hWjqTkRESEMppZj_r8bi3G0hAZ5EfvXYgE_qRbpiEz4,1553
12
+ deepliif/data/template_dataset.py,sha256=PCDBnFRzRKReaeWgKUZmW0LrzRByI9adrKDJ6SN2KMs,3592
13
+ deepliif/data/unaligned_dataset.py,sha256=D69SxV087jKTd990UQIR4F3TahJHiSiw8i9Uz_xybt0,4697
14
+ deepliif/models/CycleGAN_model.py,sha256=WDEa-Zgz57mVc9HbcVDXL5vfHvUDWdWXNLyz8ReH3rg,15196
15
+ deepliif/models/DeepLIIFExt_model.py,sha256=HZaX9Z2ue0HQCFFN3guLkBcByCP70i8JvmPY02oOMyU,15022
16
+ deepliif/models/DeepLIIF_model.py,sha256=6vmsXcBcoALrhJLa7XGhDmLamO_WCzTDYEyVUBE482o,23857
17
+ deepliif/models/SDG_model.py,sha256=3opz7uEyhvVJ8fF4_Jw4ho1MBcc9OVye-ByZD_KF2j0,10142
18
+ deepliif/models/__init__ - different weighted.py,sha256=Oe6ichU-Qia2mODGUtQTh1OBZZnv5N-93AzOfzQiHlw,32227
19
+ deepliif/models/__init__ - run_dask_multi dev.py,sha256=vt8X8qeiJr2aPhFi6muZEJLUSsr8XChfI45NSwL8Rfg,39449
20
+ deepliif/models/__init__ - time gens.py,sha256=mRUtxNaGDZuhlQtKdA-OvGWTQwl7z2yMWc-9l0QrgaY,32922
21
+ deepliif/models/__init__ - timings.py,sha256=S_wFImwxzGKx8STqbpcYCPOlbb_84WLMRDSnaWC8qFg,31750
22
+ deepliif/models/__init__.py,sha256=EZkZu28f5ju_YiEz4yAMHQ5GAzl1-Mi6AK4kfWe20UA,31934
23
+ deepliif/models/att_unet.py,sha256=tqaFMNbGQUjXObOG309P76c7sIPxEvFR38EyuyHY40o,7116
24
+ deepliif/models/base_model.py,sha256=ezWkmbuuNLGDMjyXe3VzJroj7QR1h0M9ByouzpfCrQg,16843
25
+ deepliif/models/networks.py,sha256=Ijeb7nGf-YFgc_sBR-sIsk--0rTeiUqKZd01k4DMsuM,36614
26
+ deepliif/options/__init__.py,sha256=p2IWckf3-K-wclDWfSq5ZmynKk2lNov2Tn7WPYIO11A,8329
27
+ deepliif/options/base_options.py,sha256=m5UXY8MvjNcDisUWuiP228yoT27SsCh1bXS_Td6SwTc,9852
28
+ deepliif/options/processing_options.py,sha256=OnNT-ytoTQzetFiMEKrWvrsrhZlupRK4smcnIk0MbqY,2947
29
+ deepliif/options/test_options.py,sha256=4ZbQC5U-nTbUz8jvdDIbse5TK_mjw4D5yNjpVevWD5M,1114
30
+ deepliif/options/train_options.py,sha256=5eA_oxpRj2-HiuMMvC5-HLapxNFG_JXOQ3K132JjpR8,3580
31
+ deepliif/util/__init__.py,sha256=_b7-t5Z54CJJIy-moeKPPLFHg5BRCKgWo5V18WqRZVo,29146
32
+ deepliif/util/get_data.py,sha256=HaRoQYb2u0LUgLT7ES-w35AmJ4BrlBEJWU4Cok29pxI,3749
33
+ deepliif/util/html.py,sha256=RNAONZ4opP-bViahgmpSbHwOc6jXKQRnWRAVIaeIvac,3309
34
+ deepliif/util/image_pool.py,sha256=M89Hc7DblRWroNP71S9mAdRn7h3DrhPFPjqFxxZYSgw,2280
35
+ deepliif/util/util.py,sha256=9MNgqthJZYjl5-TJm5-sjWvMfPBz8F4P5K0RHXRQhfY,5241
36
+ deepliif/util/visualizer.py,sha256=6E1sPbXdgLFB9mnPwtfEjm9O40viG4dfv5MyTpOQQpo,20210
37
+ deepliif-1.1.13.dist-info/LICENSE.md,sha256=HlZw_UPS6EtJimJ_Ci7xKh-S5Iubs0Z8y8E6EZ3ZNyE,956
38
+ deepliif-1.1.13.dist-info/METADATA,sha256=Ff0QjUBwpZGJcU1YgHfaixVAW6IYyHjV0FUi7QiGaRo,35195
39
+ deepliif-1.1.13.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
40
+ deepliif-1.1.13.dist-info/entry_points.txt,sha256=f70-10j2q68o_rDlsE3hspnv4ejlDnXwwGZ9JJ-3yF4,37
41
+ deepliif-1.1.13.dist-info/top_level.txt,sha256=vLDK5YKmDz08E7PywuvEjAo7dM5rnIpsjR4c0ubQCnc,13
42
+ deepliif-1.1.13.dist-info/RECORD,,
@@ -1,35 +0,0 @@
1
- cli.py,sha256=iU9YxO65T1rxX2Mx9f9LsEPC4o_ZXO-wH_-FUjIA1so,40088
2
- deepliif/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- deepliif/postprocessing.py,sha256=cM-cYVidY691Sjb1-B8a1jkLq5UR_hTCbuKzuF4765o,17589
4
- deepliif/train.py,sha256=-ZORL5vQrD0_Jq2Adgr3w8vJ7L1QcAgNTqMnBgtixgk,15757
5
- deepliif/data/__init__.py,sha256=euf9eUboK4RYR0jvdiyZDgPGozC1Nv7WRqRbTxSZD6A,5281
6
- deepliif/data/aligned_dataset.py,sha256=6qNFLXXW1phuIfNhCJSaLfOc-KN2kl7EuUdmyAPPU4I,5148
7
- deepliif/data/base_dataset.py,sha256=bQlxfY7bGSE9WPj31ZHkCxv5CAEJovjakGDCcK-aYdc,5564
8
- deepliif/data/colorization_dataset.py,sha256=uDYWciSxwqZkStQ_Vte27D9x5FNhv6eR9wSPn39K3RY,2808
9
- deepliif/data/image_folder.py,sha256=eesP9vn__YQ-dw1KJG9J-yVUHMmJjLcIEQI552Iv2vE,2006
10
- deepliif/data/single_dataset.py,sha256=hWjqTkRESEMppZj_r8bi3G0hAZ5EfvXYgE_qRbpiEz4,1553
11
- deepliif/data/template_dataset.py,sha256=PCDBnFRzRKReaeWgKUZmW0LrzRByI9adrKDJ6SN2KMs,3592
12
- deepliif/data/unaligned_dataset.py,sha256=m7j-CX-hkXbhg96NSEcaCagNVhTuXKkMsBADdMEJDBA,3393
13
- deepliif/models/DeepLIIFExt_model.py,sha256=Sc60rOfDJuoGrJ1CYe4beAg6as6F0o864AO6ZB7paBY,14527
14
- deepliif/models/DeepLIIF_model.py,sha256=ECZyM9jzoJAWSgB1ProBoarVuGcbScQMaSkRjSMgt0k,20872
15
- deepliif/models/SDG_model.py,sha256=xcZCTMNyJbcB78I1c8KtYVIB6OWL7WSMKdCxNemIzxs,9074
16
- deepliif/models/__init__.py,sha256=LWEyM7YwSoQ1TgRnsFk8O96xTf-QIxw5o_RexnECl_Q,28049
17
- deepliif/models/base_model.py,sha256=HKcUOBHtL-zLs5ZcmeXT-ZV_ubqsSUo4wMCQ0W27YHU,15583
18
- deepliif/models/networks.py,sha256=bN4yjRdE413efUESq8pvhzPDgFCTwFKXyQOrRqHckWY,32177
19
- deepliif/options/__init__.py,sha256=-syiyTK_oAeTLCBDm0bz1f_1jI3VK3LCwo2UNwOz6eM,5949
20
- deepliif/options/base_options.py,sha256=m5UXY8MvjNcDisUWuiP228yoT27SsCh1bXS_Td6SwTc,9852
21
- deepliif/options/processing_options.py,sha256=OnNT-ytoTQzetFiMEKrWvrsrhZlupRK4smcnIk0MbqY,2947
22
- deepliif/options/test_options.py,sha256=4ZbQC5U-nTbUz8jvdDIbse5TK_mjw4D5yNjpVevWD5M,1114
23
- deepliif/options/train_options.py,sha256=5eA_oxpRj2-HiuMMvC5-HLapxNFG_JXOQ3K132JjpR8,3580
24
- deepliif/util/__init__.py,sha256=bIa1Y1YQynvAoXux8ENAk_8Ykpfu9xxGByg-pgfGkK0,28090
25
- deepliif/util/get_data.py,sha256=HaRoQYb2u0LUgLT7ES-w35AmJ4BrlBEJWU4Cok29pxI,3749
26
- deepliif/util/html.py,sha256=RNAONZ4opP-bViahgmpSbHwOc6jXKQRnWRAVIaeIvac,3309
27
- deepliif/util/image_pool.py,sha256=M89Hc7DblRWroNP71S9mAdRn7h3DrhPFPjqFxxZYSgw,2280
28
- deepliif/util/util.py,sha256=9MNgqthJZYjl5-TJm5-sjWvMfPBz8F4P5K0RHXRQhfY,5241
29
- deepliif/util/visualizer.py,sha256=5V1lWidHqssJX21jn1P5-bOVgtrEXKVaQgnMWAsMfqg,15636
30
- deepliif-1.1.11.dist-info/LICENSE.md,sha256=HlZw_UPS6EtJimJ_Ci7xKh-S5Iubs0Z8y8E6EZ3ZNyE,956
31
- deepliif-1.1.11.dist-info/METADATA,sha256=3UtsAvCQz1osg0cBCnnLZVT1pvPdRxFbsaJ6B_yn_SI,31598
32
- deepliif-1.1.11.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
33
- deepliif-1.1.11.dist-info/entry_points.txt,sha256=f70-10j2q68o_rDlsE3hspnv4ejlDnXwwGZ9JJ-3yF4,37
34
- deepliif-1.1.11.dist-info/top_level.txt,sha256=vLDK5YKmDz08E7PywuvEjAo7dM5rnIpsjR4c0ubQCnc,13
35
- deepliif-1.1.11.dist-info/RECORD,,