deepliif 1.1.7__tar.gz → 1.1.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {deepliif-1.1.7/deepliif.egg-info → deepliif-1.1.8}/PKG-INFO +17 -8
- {deepliif-1.1.7 → deepliif-1.1.8}/README.md +16 -7
- {deepliif-1.1.7 → deepliif-1.1.8}/cli.py +76 -102
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/data/aligned_dataset.py +33 -7
- deepliif-1.1.8/deepliif/models/DeepLIIFExt_model.py +297 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/models/DeepLIIF_model.py +10 -5
- deepliif-1.1.8/deepliif/models/__init__.py +539 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/models/base_model.py +54 -8
- deepliif-1.1.8/deepliif/options/__init__.py +102 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/options/base_options.py +7 -6
- deepliif-1.1.8/deepliif/postprocessing.py +433 -0
- {deepliif-1.1.7 → deepliif-1.1.8/deepliif.egg-info}/PKG-INFO +17 -8
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif.egg-info/SOURCES.txt +6 -1
- {deepliif-1.1.7 → deepliif-1.1.8}/setup.cfg +1 -1
- {deepliif-1.1.7 → deepliif-1.1.8}/setup.py +1 -1
- deepliif-1.1.8/tests/test_args.py +5 -0
- deepliif-1.1.8/tests/test_cli_inference.py +389 -0
- deepliif-1.1.8/tests/test_cli_serialize.py +18 -0
- deepliif-1.1.8/tests/test_cli_train.py +63 -0
- deepliif-1.1.7/deepliif/models/__init__.py +0 -445
- deepliif-1.1.7/deepliif/options/__init__.py +0 -1
- deepliif-1.1.7/deepliif/postprocessing.py +0 -394
- {deepliif-1.1.7 → deepliif-1.1.8}/LICENSE.md +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/__init__.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/data/__init__.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/data/base_dataset.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/data/colorization_dataset.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/data/image_folder.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/data/single_dataset.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/data/template_dataset.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/data/unaligned_dataset.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/models/networks.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/options/processing_options.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/options/test_options.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/options/train_options.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/train.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/util/__init__.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/util/get_data.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/util/html.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/util/image_pool.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/util/util.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif/util/visualizer.py +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif.egg-info/dependency_links.txt +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif.egg-info/entry_points.txt +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif.egg-info/requires.txt +0 -0
- {deepliif-1.1.7 → deepliif-1.1.8}/deepliif.egg-info/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: deepliif
|
|
3
|
-
Version: 1.1.
|
|
3
|
+
Version: 1.1.8
|
|
4
4
|
Summary: DeepLIIF: Deep-Learning Inferred Multiplex Immunofluorescence for Immunohistochemical Image Quantification
|
|
5
5
|
Home-page: https://github.com/nadeemlab/DeepLIIF
|
|
6
6
|
Author: Parmida93
|
|
@@ -16,11 +16,13 @@ License-File: LICENSE.md
|
|
|
16
16
|
<img src="./images/DeepLIIF_logo.png" width="50%">
|
|
17
17
|
<h3 align="center"><strong>Deep-Learning Inferred Multiplex Immunofluorescence for Immunohistochemical Image Quantification</strong></h3>
|
|
18
18
|
<p align="center">
|
|
19
|
-
<a href="https://rdcu.be/cKSBz">Nature MI'22
|
|
19
|
+
<a href="https://rdcu.be/cKSBz">Nature MI'22</a>
|
|
20
20
|
|
|
|
21
|
-
<a href="https://openaccess.thecvf.com/content/CVPR2022/html/Ghahremani_DeepLIIF_An_Online_Platform_for_Quantification_of_Clinical_Pathology_Slides_CVPR_2022_paper.html">CVPR'22
|
|
21
|
+
<a href="https://openaccess.thecvf.com/content/CVPR2022/html/Ghahremani_DeepLIIF_An_Online_Platform_for_Quantification_of_Clinical_Pathology_Slides_CVPR_2022_paper.html">CVPR'22</a>
|
|
22
22
|
|
|
|
23
|
-
<a href="https://arxiv.org/abs/2305.16465">MICCAI'23
|
|
23
|
+
<a href="https://arxiv.org/abs/2305.16465">MICCAI'23</a>
|
|
24
|
+
|
|
|
25
|
+
<a href="https://onlinelibrary.wiley.com/share/author/4AEBAGEHSZE9GDP3H8MN?target=10.1111/his.15048">Histopathology'23</a>
|
|
24
26
|
|
|
|
25
27
|
<a href="https://deepliif.org/">Cloud Deployment</a>
|
|
26
28
|
|
|
|
@@ -217,8 +219,7 @@ on how to deploy the model with Torchserve and for an example of how to run the
|
|
|
217
219
|
We provide a Dockerfile that can be used to run the DeepLIIF models inside a container.
|
|
218
220
|
First, you need to install the [Docker Engine](https://docs.docker.com/engine/install/ubuntu/).
|
|
219
221
|
After installing the Docker, you need to follow these steps:
|
|
220
|
-
* Download the pretrained model and place them in DeepLIIF/
|
|
221
|
-
* Change XXX of the **WORKDIR** line in the **DockerFile** to the directory containing the DeepLIIF project.
|
|
222
|
+
* Download the pretrained model [here](https://zenodo.org/record/4751737#.YKRTS0NKhH4) and place them in DeepLIIF/model-server/DeepLIIF_Latest_Model.
|
|
222
223
|
* To create a docker image from the docker file:
|
|
223
224
|
```
|
|
224
225
|
docker build -t cuda/deepliif .
|
|
@@ -227,7 +228,7 @@ The image is then used as a base. You can copy and use it to run an application.
|
|
|
227
228
|
environment in which to run, referred to as a container.
|
|
228
229
|
* To create and run a container:
|
|
229
230
|
```
|
|
230
|
-
docker run -it -v `pwd`:`pwd` -w `pwd` cuda/deepliif deepliif test --input-dir Sample_Large_Tissues
|
|
231
|
+
docker run -it -v `pwd`:`pwd` -w `pwd` cuda/deepliif deepliif test --input-dir Sample_Large_Tissues --tile-size 512
|
|
231
232
|
```
|
|
232
233
|
When you run a container from the image, the `deepliif` CLI will be available.
|
|
233
234
|
You can easily run any CLI command in the activated environment and copy the results from the docker container to the host.
|
|
@@ -353,7 +354,7 @@ co-registration, whole-cell multiplex segmentation via [ImPartial](https://githu
|
|
|
353
354
|
DeepLIIF model and release back to the community with full credit to the contributors.
|
|
354
355
|
|
|
355
356
|
- [x] **Memorial Sloan Kettering Cancer Center** [AI-ready immunohistochemistry and multiplex immunofluorescence dataset](https://zenodo.org/record/4751737#.YKRTS0NKhH4) for breast, lung, and bladder cancers (**Nature Machine Intelligence'22**)
|
|
356
|
-
- [x] **Moffitt Cancer Center** AI-ready multiplex immunofluorescence and multiplex immunohistochemistry dataset for head-and-neck squamous cell carcinoma (**MICCAI'23**)
|
|
357
|
+
- [x] **Moffitt Cancer Center** [AI-ready multiplex immunofluorescence and multiplex immunohistochemistry dataset](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=70226184) for head-and-neck squamous cell carcinoma (**MICCAI'23**)
|
|
357
358
|
|
|
358
359
|
## Support
|
|
359
360
|
Please use the [Image.sc Forum](https://forum.image.sc/tag/deepliif) for discussion and questions related to DeepLIIF.
|
|
@@ -395,4 +396,12 @@ If you find our work useful in your research or if you use parts of this code or
|
|
|
395
396
|
journal={International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
|
|
396
397
|
year={2023}
|
|
397
398
|
}
|
|
399
|
+
|
|
400
|
+
@article{nadeem2023ki67validationMTC,
|
|
401
|
+
author = {Nadeem, Saad and Hanna, Matthew G and Viswanathan, Kartik and Marino, Joseph and Ahadi, Mahsa and Alzumaili, Bayan and Bani, Mohamed-Amine and Chiarucci, Federico and Chou, Angela and De Leo, Antonio and Fuchs, Talia L and Lubin, Daniel J and Luxford, Catherine and Magliocca, Kelly and Martinez, Germán and Shi, Qiuying and Sidhu, Stan and Al Ghuzlan, Abir and Gill, Anthony J and Tallini, Giovanni and Ghossein, Ronald and Xu, Bin},
|
|
402
|
+
title = {Ki67 proliferation index in medullary thyroid carcinoma: a comparative study of multiple counting methods and validation of image analysis and deep learning platforms},
|
|
403
|
+
journal = {Histopathology},
|
|
404
|
+
year = {2023},
|
|
405
|
+
doi = {https://doi.org/10.1111/his.15048}
|
|
406
|
+
}
|
|
398
407
|
```
|
|
@@ -5,11 +5,13 @@
|
|
|
5
5
|
<img src="./images/DeepLIIF_logo.png" width="50%">
|
|
6
6
|
<h3 align="center"><strong>Deep-Learning Inferred Multiplex Immunofluorescence for Immunohistochemical Image Quantification</strong></h3>
|
|
7
7
|
<p align="center">
|
|
8
|
-
<a href="https://rdcu.be/cKSBz">Nature MI'22
|
|
8
|
+
<a href="https://rdcu.be/cKSBz">Nature MI'22</a>
|
|
9
9
|
|
|
|
10
|
-
<a href="https://openaccess.thecvf.com/content/CVPR2022/html/Ghahremani_DeepLIIF_An_Online_Platform_for_Quantification_of_Clinical_Pathology_Slides_CVPR_2022_paper.html">CVPR'22
|
|
10
|
+
<a href="https://openaccess.thecvf.com/content/CVPR2022/html/Ghahremani_DeepLIIF_An_Online_Platform_for_Quantification_of_Clinical_Pathology_Slides_CVPR_2022_paper.html">CVPR'22</a>
|
|
11
11
|
|
|
|
12
|
-
<a href="https://arxiv.org/abs/2305.16465">MICCAI'23
|
|
12
|
+
<a href="https://arxiv.org/abs/2305.16465">MICCAI'23</a>
|
|
13
|
+
|
|
|
14
|
+
<a href="https://onlinelibrary.wiley.com/share/author/4AEBAGEHSZE9GDP3H8MN?target=10.1111/his.15048">Histopathology'23</a>
|
|
13
15
|
|
|
|
14
16
|
<a href="https://deepliif.org/">Cloud Deployment</a>
|
|
15
17
|
|
|
|
@@ -206,8 +208,7 @@ on how to deploy the model with Torchserve and for an example of how to run the
|
|
|
206
208
|
We provide a Dockerfile that can be used to run the DeepLIIF models inside a container.
|
|
207
209
|
First, you need to install the [Docker Engine](https://docs.docker.com/engine/install/ubuntu/).
|
|
208
210
|
After installing the Docker, you need to follow these steps:
|
|
209
|
-
* Download the pretrained model and place them in DeepLIIF/
|
|
210
|
-
* Change XXX of the **WORKDIR** line in the **DockerFile** to the directory containing the DeepLIIF project.
|
|
211
|
+
* Download the pretrained model [here](https://zenodo.org/record/4751737#.YKRTS0NKhH4) and place them in DeepLIIF/model-server/DeepLIIF_Latest_Model.
|
|
211
212
|
* To create a docker image from the docker file:
|
|
212
213
|
```
|
|
213
214
|
docker build -t cuda/deepliif .
|
|
@@ -216,7 +217,7 @@ The image is then used as a base. You can copy and use it to run an application.
|
|
|
216
217
|
environment in which to run, referred to as a container.
|
|
217
218
|
* To create and run a container:
|
|
218
219
|
```
|
|
219
|
-
docker run -it -v `pwd`:`pwd` -w `pwd` cuda/deepliif deepliif test --input-dir Sample_Large_Tissues
|
|
220
|
+
docker run -it -v `pwd`:`pwd` -w `pwd` cuda/deepliif deepliif test --input-dir Sample_Large_Tissues --tile-size 512
|
|
220
221
|
```
|
|
221
222
|
When you run a container from the image, the `deepliif` CLI will be available.
|
|
222
223
|
You can easily run any CLI command in the activated environment and copy the results from the docker container to the host.
|
|
@@ -342,7 +343,7 @@ co-registration, whole-cell multiplex segmentation via [ImPartial](https://githu
|
|
|
342
343
|
DeepLIIF model and release back to the community with full credit to the contributors.
|
|
343
344
|
|
|
344
345
|
- [x] **Memorial Sloan Kettering Cancer Center** [AI-ready immunohistochemistry and multiplex immunofluorescence dataset](https://zenodo.org/record/4751737#.YKRTS0NKhH4) for breast, lung, and bladder cancers (**Nature Machine Intelligence'22**)
|
|
345
|
-
- [x] **Moffitt Cancer Center** AI-ready multiplex immunofluorescence and multiplex immunohistochemistry dataset for head-and-neck squamous cell carcinoma (**MICCAI'23**)
|
|
346
|
+
- [x] **Moffitt Cancer Center** [AI-ready multiplex immunofluorescence and multiplex immunohistochemistry dataset](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=70226184) for head-and-neck squamous cell carcinoma (**MICCAI'23**)
|
|
346
347
|
|
|
347
348
|
## Support
|
|
348
349
|
Please use the [Image.sc Forum](https://forum.image.sc/tag/deepliif) for discussion and questions related to DeepLIIF.
|
|
@@ -384,4 +385,12 @@ If you find our work useful in your research or if you use parts of this code or
|
|
|
384
385
|
journal={International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
|
|
385
386
|
year={2023}
|
|
386
387
|
}
|
|
388
|
+
|
|
389
|
+
@article{nadeem2023ki67validationMTC,
|
|
390
|
+
author = {Nadeem, Saad and Hanna, Matthew G and Viswanathan, Kartik and Marino, Joseph and Ahadi, Mahsa and Alzumaili, Bayan and Bani, Mohamed-Amine and Chiarucci, Federico and Chou, Angela and De Leo, Antonio and Fuchs, Talia L and Lubin, Daniel J and Luxford, Catherine and Magliocca, Kelly and Martinez, Germán and Shi, Qiuying and Sidhu, Stan and Al Ghuzlan, Abir and Gill, Anthony J and Tallini, Giovanni and Ghossein, Ronald and Xu, Bin},
|
|
391
|
+
title = {Ki67 proliferation index in medullary thyroid carcinoma: a comparative study of multiple counting methods and validation of image analysis and deep learning platforms},
|
|
392
|
+
journal = {Histopathology},
|
|
393
|
+
year = {2023},
|
|
394
|
+
doi = {https://doi.org/10.1111/his.15048}
|
|
395
|
+
}
|
|
387
396
|
```
|
|
@@ -10,10 +10,11 @@ import numpy as np
|
|
|
10
10
|
from PIL import Image
|
|
11
11
|
|
|
12
12
|
from deepliif.data import create_dataset, transform
|
|
13
|
-
from deepliif.models import
|
|
13
|
+
from deepliif.models import init_nets, infer_modalities, infer_results_for_wsi, create_model
|
|
14
14
|
from deepliif.util import allowed_file, Visualizer, get_information, test_diff_original_serialized, disable_batchnorm_tracking_stats
|
|
15
15
|
from deepliif.util.util import mkdirs, check_multi_scale
|
|
16
16
|
# from deepliif.util import infer_results_for_wsi
|
|
17
|
+
from deepliif.options import Options
|
|
17
18
|
|
|
18
19
|
import torch.distributed as dist
|
|
19
20
|
|
|
@@ -74,12 +75,13 @@ def print_options(opt):
|
|
|
74
75
|
print(message)
|
|
75
76
|
|
|
76
77
|
# save to the disk
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
78
|
+
if opt.phase == 'train':
|
|
79
|
+
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
|
|
80
|
+
mkdirs(expr_dir)
|
|
81
|
+
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
|
|
82
|
+
with open(file_name, 'wt') as opt_file:
|
|
83
|
+
opt_file.write(message)
|
|
84
|
+
opt_file.write('\n')
|
|
83
85
|
|
|
84
86
|
|
|
85
87
|
@click.group()
|
|
@@ -95,8 +97,9 @@ def cli():
|
|
|
95
97
|
help='name of the experiment. It decides where to store samples and models')
|
|
96
98
|
@click.option('--gpu-ids', type=int, multiple=True, help='gpu-ids 0 gpu-ids 1 or gpu-ids -1 for CPU')
|
|
97
99
|
@click.option('--checkpoints-dir', default='./checkpoints', help='models are saved here')
|
|
98
|
-
@click.option('--
|
|
100
|
+
@click.option('--modalities-no', default=4, type=int, help='number of targets')
|
|
99
101
|
# model parameters
|
|
102
|
+
@click.option('--model', default='DeepLIIF', help='name of model class')
|
|
100
103
|
@click.option('--input-nc', default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
|
|
101
104
|
@click.option('--output-nc', default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
|
|
102
105
|
@click.option('--ngf', default=64, help='# of gen filters in the last conv layer')
|
|
@@ -112,7 +115,6 @@ def cli():
|
|
|
112
115
|
@click.option('--init-type', default='normal',
|
|
113
116
|
help='network initialization [normal | xavier | kaiming | orthogonal]')
|
|
114
117
|
@click.option('--init-gain', default=0.02, help='scaling factor for normal, xavier and orthogonal.')
|
|
115
|
-
@click.option('--padding-type', default='reflect', help='network padding type.')
|
|
116
118
|
@click.option('--no-dropout', is_flag=True, help='no dropout for the generator')
|
|
117
119
|
# dataset parameters
|
|
118
120
|
@click.option('--direction', default='AtoB', help='AtoB or BtoA')
|
|
@@ -175,19 +177,30 @@ def cli():
|
|
|
175
177
|
@click.option('--save-by-iter', is_flag=True, help='whether saves model by iteration')
|
|
176
178
|
@click.option('--remote', type=bool, default=False, help='whether isolate visdom checkpoints or not; if False, you can run a separate visdom server anywhere that consumes the checkpoints')
|
|
177
179
|
@click.option('--remote-transfer-cmd', type=str, default=None, help='module and function to be used to transfer remote files to target storage location, for example mymodule.myfunction')
|
|
178
|
-
@click.option('--
|
|
180
|
+
@click.option('--dataset-mode', type=str, default='aligned',
|
|
179
181
|
help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
|
|
180
182
|
@click.option('--padding', type=str, default='zero',
|
|
181
183
|
help='chooses the type of padding used by resnet generator. [reflect | zero]')
|
|
182
184
|
@click.option('--local-rank', type=int, default=None, help='placeholder argument for torchrun, no need for manual setup')
|
|
183
185
|
@click.option('--seed', type=int, default=None, help='basic seed to be used for deterministic training, default to None (non-deterministic)')
|
|
184
|
-
|
|
185
|
-
|
|
186
|
+
# DeepLIIFExt params
|
|
187
|
+
@click.option('--seg-gen', type=bool, default=True, help='True (Translation and Segmentation), False (Only Translation).')
|
|
188
|
+
@click.option('--net-ds', type=str, default='n_layers',
|
|
189
|
+
help='specify discriminator architecture for segmentation task [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
|
|
190
|
+
@click.option('--net-gs', type=str, default='unet_512',
|
|
191
|
+
help='specify generator architecture for segmentation task [resnet_9blocks | resnet_6blocks | unet_512 | unet_256 | unet_128]')
|
|
192
|
+
@click.option('--gan-mode', type=str, default='vanilla',
|
|
193
|
+
help='the type of GAN objective for translation task. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
|
|
194
|
+
@click.option('--gan-mode-s', type=str, default='lsgan',
|
|
195
|
+
help='the type of GAN objective for segmentation task. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
|
|
196
|
+
def train(dataroot, name, gpu_ids, checkpoints_dir, input_nc, output_nc, ngf, ndf, net_d, net_g,
|
|
197
|
+
n_layers_d, norm, init_type, init_gain, no_dropout, direction, serial_batches, num_threads,
|
|
186
198
|
batch_size, load_size, crop_size, max_dataset_size, preprocess, no_flip, display_winsize, epoch, load_iter,
|
|
187
199
|
verbose, lambda_l1, is_train, display_freq, display_ncols, display_id, display_server, display_env,
|
|
188
200
|
display_port, update_html_freq, print_freq, no_html, save_latest_freq, save_epoch_freq, save_by_iter,
|
|
189
201
|
continue_train, epoch_count, phase, lr_policy, n_epochs, n_epochs_decay, beta1, lr, lr_decay_iters,
|
|
190
|
-
remote, local_rank, remote_transfer_cmd, seed, dataset_mode, padding
|
|
202
|
+
remote, local_rank, remote_transfer_cmd, seed, dataset_mode, padding, model,
|
|
203
|
+
modalities_no, seg_gen, net_ds, net_gs, gan_mode, gan_mode_s):
|
|
191
204
|
"""General-purpose training script for multi-task image-to-image translation.
|
|
192
205
|
|
|
193
206
|
This script works for various models (with option '--model': e.g., DeepLIIF) and
|
|
@@ -199,6 +212,7 @@ def train(dataroot, name, gpu_ids, checkpoints_dir, targets_no, input_nc, output
|
|
|
199
212
|
plot, and save models.The script supports continue/resume training.
|
|
200
213
|
Use '--continue_train' to resume your previous training.
|
|
201
214
|
"""
|
|
215
|
+
d_params = locals()
|
|
202
216
|
|
|
203
217
|
if gpu_ids and gpu_ids[0] == -1:
|
|
204
218
|
gpu_ids = []
|
|
@@ -224,26 +238,21 @@ def train(dataroot, name, gpu_ids, checkpoints_dir, targets_no, input_nc, output
|
|
|
224
238
|
flag_deterministic = set_seed(seed)
|
|
225
239
|
|
|
226
240
|
if flag_deterministic:
|
|
227
|
-
|
|
241
|
+
d_params['padding'] = 'zero'
|
|
228
242
|
print('padding type is forced to zero padding, because neither refection pad2d or replication pad2d has a deterministic implementation')
|
|
229
243
|
|
|
230
244
|
# create a dataset given dataset_mode and other options
|
|
231
245
|
# dataset = AlignedDataset(opt)
|
|
232
246
|
|
|
233
|
-
opt = Options(
|
|
234
|
-
n_layers_d, norm, init_type, init_gain, no_dropout, direction, serial_batches, num_threads,
|
|
235
|
-
batch_size, load_size, crop_size, max_dataset_size, preprocess, no_flip, display_winsize, epoch,
|
|
236
|
-
load_iter, verbose, lambda_l1, is_train, display_freq, display_ncols, display_id, display_server,
|
|
237
|
-
display_env, display_port, update_html_freq, print_freq, no_html, save_latest_freq, save_epoch_freq,
|
|
238
|
-
save_by_iter, continue_train, epoch_count, phase, lr_policy, n_epochs, n_epochs_decay, beta1,
|
|
239
|
-
lr, lr_decay_iters, remote, remote_transfer_cmd, dataset_mode, padding)
|
|
247
|
+
opt = Options(d_params=d_params)
|
|
240
248
|
print_options(opt)
|
|
249
|
+
|
|
241
250
|
dataset = create_dataset(opt)
|
|
242
251
|
# get the number of images in the dataset.
|
|
243
252
|
click.echo('The number of training images = %d' % len(dataset))
|
|
244
253
|
|
|
245
254
|
# create a model given model and other options
|
|
246
|
-
model =
|
|
255
|
+
model = create_model(opt)
|
|
247
256
|
# regular setup: load and print networks; create schedulers
|
|
248
257
|
model.setup(opt)
|
|
249
258
|
|
|
@@ -461,18 +470,26 @@ def trainlaunch(**kwargs):
|
|
|
461
470
|
@cli.command()
|
|
462
471
|
@click.option('--models-dir', default='./model-server/DeepLIIF_Latest_Model', help='reads models from here')
|
|
463
472
|
@click.option('--output-dir', help='saves results here.')
|
|
473
|
+
@click.option('--tile-size', type=int, default=None, help='tile size')
|
|
464
474
|
@click.option('--device', default='cpu', type=str, help='device to load model, either cpu or gpu')
|
|
465
475
|
@click.option('--verbose', default=0, type=int,help='saves results here.')
|
|
466
|
-
def serialize(models_dir, output_dir, device, verbose):
|
|
476
|
+
def serialize(models_dir, output_dir, tile_size, device, verbose):
|
|
467
477
|
"""Serialize DeepLIIF models using Torchscript
|
|
468
478
|
"""
|
|
479
|
+
if tile_size is None:
|
|
480
|
+
tile_size = 512
|
|
469
481
|
output_dir = output_dir or models_dir
|
|
470
482
|
ensure_exists(output_dir)
|
|
471
|
-
|
|
472
|
-
|
|
483
|
+
|
|
484
|
+
# copy train_opt.txt to the target location
|
|
485
|
+
import shutil
|
|
486
|
+
if models_dir != output_dir:
|
|
487
|
+
shutil.copy(f'{models_dir}/train_opt.txt',f'{output_dir}/train_opt.txt')
|
|
488
|
+
|
|
489
|
+
sample = transform(Image.new('RGB', (tile_size, tile_size)))
|
|
473
490
|
|
|
474
491
|
with click.progressbar(
|
|
475
|
-
init_nets(models_dir, eager_mode=True).items(),
|
|
492
|
+
init_nets(models_dir, eager_mode=True, phase='test').items(),
|
|
476
493
|
label='Tracing nets',
|
|
477
494
|
item_show_func=lambda n: n[0] if n else n
|
|
478
495
|
) as bar:
|
|
@@ -491,8 +508,8 @@ def serialize(models_dir, output_dir, device, verbose):
|
|
|
491
508
|
|
|
492
509
|
# test: whether the original and the serialized model produces highly similar predictions
|
|
493
510
|
print('testing similarity between prediction from original vs serialized models...')
|
|
494
|
-
models_original = init_nets(models_dir,eager_mode=True)
|
|
495
|
-
models_serialized = init_nets(output_dir,eager_mode=False)
|
|
511
|
+
models_original = init_nets(models_dir,eager_mode=True,phase='test')
|
|
512
|
+
models_serialized = init_nets(output_dir,eager_mode=False,phase='test')
|
|
496
513
|
if device == 'gpu':
|
|
497
514
|
sample = sample.cuda()
|
|
498
515
|
else:
|
|
@@ -513,13 +530,14 @@ def serialize(models_dir, output_dir, device, verbose):
|
|
|
513
530
|
@click.option('--output-dir', help='saves results here.')
|
|
514
531
|
@click.option('--tile-size', default=None, help='tile size')
|
|
515
532
|
@click.option('--model-dir', default='./model-server/DeepLIIF_Latest_Model/', help='load models from here.')
|
|
533
|
+
@click.option('--gpu-ids', type=int, multiple=True, help='gpu-ids 0 gpu-ids 1 or gpu-ids -1 for CPU')
|
|
516
534
|
@click.option('--region-size', default=20000, help='Due to limits in the resources, the whole slide image cannot be processed in whole.'
|
|
517
535
|
'So the WSI image is read region by region. '
|
|
518
536
|
'This parameter specifies the size each region to be read into GPU for inferrence.')
|
|
519
537
|
@click.option('--eager-mode', is_flag=True, help='use eager mode (loading original models, otherwise serialized ones)')
|
|
520
538
|
@click.option('--color-dapi', is_flag=True, help='color dapi image to produce the same coloring as in the paper')
|
|
521
539
|
@click.option('--color-marker', is_flag=True, help='color marker image to produce the same coloring as in the paper')
|
|
522
|
-
def test(input_dir, output_dir, tile_size, model_dir, region_size, eager_mode,
|
|
540
|
+
def test(input_dir, output_dir, tile_size, model_dir, gpu_ids, region_size, eager_mode,
|
|
523
541
|
color_dapi, color_marker):
|
|
524
542
|
|
|
525
543
|
"""Test trained models
|
|
@@ -528,6 +546,29 @@ def test(input_dir, output_dir, tile_size, model_dir, region_size, eager_mode,
|
|
|
528
546
|
ensure_exists(output_dir)
|
|
529
547
|
|
|
530
548
|
image_files = [fn for fn in os.listdir(input_dir) if allowed_file(fn)]
|
|
549
|
+
files = os.listdir(model_dir)
|
|
550
|
+
assert 'train_opt.txt' in files, f'file train_opt.txt is missing from model directory {model_dir}'
|
|
551
|
+
opt = Options(path_file=os.path.join(model_dir,'train_opt.txt'), mode='test')
|
|
552
|
+
opt.use_dp = False
|
|
553
|
+
|
|
554
|
+
number_of_gpus_all = torch.cuda.device_count()
|
|
555
|
+
if number_of_gpus_all < len(gpu_ids) and -1 not in gpu_ids:
|
|
556
|
+
number_of_gpus = 0
|
|
557
|
+
gpu_ids = [-1]
|
|
558
|
+
print(f'Specified to use GPU {opt.gpu_ids} for inference, but there are only {number_of_gpus_all} GPU devices. Switched to CPU inference.')
|
|
559
|
+
|
|
560
|
+
if len(gpu_ids) > 0 and gpu_ids[0] == -1:
|
|
561
|
+
gpu_ids = []
|
|
562
|
+
elif len(gpu_ids) == 0:
|
|
563
|
+
gpu_ids = list(range(number_of_gpus_all))
|
|
564
|
+
|
|
565
|
+
opt.gpu_ids = gpu_ids # overwrite gpu_ids; for test command, default gpu_ids at first is [] which will be translated to a list of all gpus
|
|
566
|
+
|
|
567
|
+
# fix opt from old settings
|
|
568
|
+
if not hasattr(opt,'modalities_no') and hasattr(opt,'targets_no'):
|
|
569
|
+
opt.modalities_no = opt.targets_no - 1
|
|
570
|
+
del opt.targets_no
|
|
571
|
+
print_options(opt)
|
|
531
572
|
|
|
532
573
|
with click.progressbar(
|
|
533
574
|
image_files,
|
|
@@ -541,7 +582,7 @@ def test(input_dir, output_dir, tile_size, model_dir, region_size, eager_mode,
|
|
|
541
582
|
print(time.time() - start_time)
|
|
542
583
|
else:
|
|
543
584
|
img = Image.open(os.path.join(input_dir, filename)).convert('RGB')
|
|
544
|
-
images, scoring = infer_modalities(img, tile_size, model_dir, eager_mode, color_dapi, color_marker)
|
|
585
|
+
images, scoring = infer_modalities(img, tile_size, model_dir, eager_mode, color_dapi, color_marker, opt)
|
|
545
586
|
|
|
546
587
|
for name, i in images.items():
|
|
547
588
|
i.save(os.path.join(
|
|
@@ -634,7 +675,8 @@ class CPU_Unpickler(pickle.Unpickler):
|
|
|
634
675
|
|
|
635
676
|
@cli.command()
|
|
636
677
|
@click.option('--pickle-dir', required=True, help='directory where the pickled snapshots are stored')
|
|
637
|
-
|
|
678
|
+
@click.option('--display-env', default = None, help='window name; overwrite the display-env opt from the saved pickle file')
|
|
679
|
+
def visualize(pickle_dir, display_env):
|
|
638
680
|
|
|
639
681
|
path_init = os.path.join(pickle_dir,'opt.pickle')
|
|
640
682
|
print(f'waiting for initialization signal from {path_init}')
|
|
@@ -643,6 +685,8 @@ def visualize(pickle_dir):
|
|
|
643
685
|
|
|
644
686
|
params_opt = pickle.load(open(path_init,'rb'))
|
|
645
687
|
params_opt.remote = False
|
|
688
|
+
if display_env is not None:
|
|
689
|
+
params_opt.display_env = display_env
|
|
646
690
|
visualizer = Visualizer(params_opt) # create a visualizer that display/save images and plots
|
|
647
691
|
|
|
648
692
|
paths_plot = {'display_current_results':os.path.join(pickle_dir,'display_current_results.pickle'),
|
|
@@ -666,77 +710,7 @@ def visualize(pickle_dir):
|
|
|
666
710
|
time.sleep(10)
|
|
667
711
|
|
|
668
712
|
|
|
669
|
-
|
|
670
|
-
def __init__(self, dataroot, name, gpu_ids, checkpoints_dir, targets_no, input_nc, output_nc, ngf, ndf, net_d,
|
|
671
|
-
net_g, n_layers_d, norm, init_type, init_gain, no_dropout, direction, serial_batches, num_threads,
|
|
672
|
-
batch_size, load_size, crop_size, max_dataset_size, preprocess, no_flip, display_winsize, epoch,
|
|
673
|
-
load_iter, verbose, lambda_l1, is_train, display_freq, display_ncols, display_id, display_server, display_env,
|
|
674
|
-
display_port, update_html_freq, print_freq, no_html, save_latest_freq, save_epoch_freq, save_by_iter,
|
|
675
|
-
continue_train, epoch_count, phase, lr_policy, n_epochs, n_epochs_decay, beta1, lr, lr_decay_iters,
|
|
676
|
-
remote, remote_transfer_cmd, dataset_mode, padding):
|
|
677
|
-
self.dataroot = dataroot
|
|
678
|
-
self.name = name
|
|
679
|
-
self.gpu_ids = gpu_ids
|
|
680
|
-
self.checkpoints_dir = checkpoints_dir
|
|
681
|
-
self.targets_no = targets_no
|
|
682
|
-
self.input_nc = input_nc
|
|
683
|
-
self.output_nc = output_nc
|
|
684
|
-
self.ngf = ngf
|
|
685
|
-
self.ndf = ndf
|
|
686
|
-
self.net_d = net_d
|
|
687
|
-
self.net_g = net_g
|
|
688
|
-
self.n_layers_d = n_layers_d
|
|
689
|
-
self.norm = norm
|
|
690
|
-
self.init_type = init_type
|
|
691
|
-
self.init_gain = init_gain
|
|
692
|
-
self.no_dropout = no_dropout
|
|
693
|
-
self.direction = direction
|
|
694
|
-
self.serial_batches = serial_batches
|
|
695
|
-
self.num_threads = num_threads
|
|
696
|
-
self.batch_size = batch_size
|
|
697
|
-
self.load_size = load_size
|
|
698
|
-
self.crop_size = crop_size
|
|
699
|
-
self.max_dataset_size = max_dataset_size
|
|
700
|
-
self.preprocess = preprocess
|
|
701
|
-
self.no_flip = no_flip
|
|
702
|
-
self.display_winsize = display_winsize
|
|
703
|
-
self.epoch = epoch
|
|
704
|
-
self.load_iter = load_iter
|
|
705
|
-
self.verbose = verbose
|
|
706
|
-
self.lambda_l1 = lambda_l1
|
|
707
|
-
self.is_train = is_train
|
|
708
|
-
self.display_freq = display_freq
|
|
709
|
-
self.display_ncols = display_ncols
|
|
710
|
-
self.display_id = display_id
|
|
711
|
-
self.display_server = display_server
|
|
712
|
-
self.display_env = display_env
|
|
713
|
-
self.display_port = display_port
|
|
714
|
-
self.update_html_freq = update_html_freq
|
|
715
|
-
self.print_freq = print_freq
|
|
716
|
-
self.no_html = no_html
|
|
717
|
-
self.save_latest_freq = save_latest_freq
|
|
718
|
-
self.save_epoch_freq = save_epoch_freq
|
|
719
|
-
self.save_by_iter = save_by_iter
|
|
720
|
-
self.continue_train = continue_train
|
|
721
|
-
self.epoch_count = epoch_count
|
|
722
|
-
self.phase = phase
|
|
723
|
-
self.lr_policy = lr_policy
|
|
724
|
-
self.n_epochs = n_epochs
|
|
725
|
-
self.n_epochs_decay = n_epochs_decay
|
|
726
|
-
self.beta1 = beta1
|
|
727
|
-
self.lr = lr
|
|
728
|
-
self.lr_decay_iters = lr_decay_iters
|
|
729
|
-
self.dataset_mode = dataset_mode
|
|
730
|
-
self.padding = padding
|
|
731
|
-
self.remote_transfer_cmd = remote_transfer_cmd
|
|
732
|
-
|
|
733
|
-
self.isTrain = True
|
|
734
|
-
self.netG = 'resnet_9blocks'
|
|
735
|
-
self.netD = 'n_layers'
|
|
736
|
-
self.n_layers_D = 4
|
|
737
|
-
self.lambda_L1 = 100
|
|
738
|
-
self.lambda_feat = 100
|
|
739
|
-
self.remote = remote
|
|
713
|
+
|
|
740
714
|
|
|
741
715
|
|
|
742
716
|
if __name__ == '__main__':
|
|
@@ -25,9 +25,11 @@ class AlignedDataset(BaseDataset):
|
|
|
25
25
|
self.input_nc = opt.output_nc if opt.direction == 'BtoA' else opt.input_nc
|
|
26
26
|
self.output_nc = opt.input_nc if opt.direction == 'BtoA' else opt.output_nc
|
|
27
27
|
self.no_flip = opt.no_flip
|
|
28
|
-
self.
|
|
28
|
+
self.modalities_no = opt.modalities_no
|
|
29
|
+
self.seg_gen = opt.seg_gen
|
|
29
30
|
self.load_size = opt.load_size
|
|
30
31
|
self.crop_size = opt.crop_size
|
|
32
|
+
self.model = opt.model
|
|
31
33
|
|
|
32
34
|
def __getitem__(self, index):
|
|
33
35
|
"""Return a data point and its metadata information.
|
|
@@ -46,7 +48,13 @@ class AlignedDataset(BaseDataset):
|
|
|
46
48
|
AB = Image.open(AB_path).convert('RGB')
|
|
47
49
|
# split AB image into A and B
|
|
48
50
|
w, h = AB.size
|
|
49
|
-
|
|
51
|
+
if self.model == 'DeepLIIF':
|
|
52
|
+
num_img = self.modalities_no + 1 + 1 # +1 for segmentation channel, +1 for input image
|
|
53
|
+
elif self.model == 'DeepLIIFExt':
|
|
54
|
+
num_img = self.modalities_no * 2 + 1 if self.seg_gen else self.modalities_no + 1 # +1 for segmentation channel
|
|
55
|
+
else:
|
|
56
|
+
raise Exception(f'model class {self.model} does not have corresponding implementation in deepliif/data/aligned_dataset.py')
|
|
57
|
+
w2 = int(w / num_img)
|
|
50
58
|
A = AB.crop((0, 0, w2, h))
|
|
51
59
|
|
|
52
60
|
# apply the same transform to both A and B
|
|
@@ -56,12 +64,30 @@ class AlignedDataset(BaseDataset):
|
|
|
56
64
|
|
|
57
65
|
A = A_transform(A)
|
|
58
66
|
B_Array = []
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
67
|
+
if self.model == 'DeepLIIF':
|
|
68
|
+
for i in range(1, num_img):
|
|
69
|
+
B = AB.crop((w2 * i, 0, w2 * (i + 1), h))
|
|
70
|
+
B = B_transform(B)
|
|
71
|
+
B_Array.append(B)
|
|
63
72
|
|
|
64
|
-
|
|
73
|
+
return {'A': A, 'B': B_Array, 'A_paths': AB_path, 'B_paths': AB_path}
|
|
74
|
+
elif self.model == 'DeepLIIFExt':
|
|
75
|
+
for i in range(1, self.modalities_no + 1):
|
|
76
|
+
B = AB.crop((w2 * i, 0, w2 * (i + 1), h))
|
|
77
|
+
B = B_transform(B)
|
|
78
|
+
B_Array.append(B)
|
|
79
|
+
|
|
80
|
+
BS_Array = []
|
|
81
|
+
if self.seg_gen:
|
|
82
|
+
for i in range(self.modalities_no + 1, self.modalities_no * 2 + 1):
|
|
83
|
+
BS = AB.crop((w2 * i, 0, w2 * (i + 1), h))
|
|
84
|
+
BS = B_transform(BS)
|
|
85
|
+
BS_Array.append(BS)
|
|
86
|
+
|
|
87
|
+
return {'A': A, 'B': B_Array, 'BS': BS_Array,'A_paths': AB_path, 'B_paths': AB_path}
|
|
88
|
+
else:
|
|
89
|
+
raise Exception(f'model class {self.model} does not have corresponding implementation in deepliif/data/aligned_dataset.py')
|
|
90
|
+
|
|
65
91
|
|
|
66
92
|
def __len__(self):
|
|
67
93
|
"""Return the total number of images in the dataset."""
|