deepliif 1.1.10__tar.gz → 1.1.12__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. {deepliif-1.1.10/deepliif.egg-info → deepliif-1.1.12}/PKG-INFO +76 -17
  2. {deepliif-1.1.10 → deepliif-1.1.12}/README.md +75 -16
  3. {deepliif-1.1.10 → deepliif-1.1.12}/cli.py +354 -67
  4. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/data/__init__.py +7 -7
  5. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/data/aligned_dataset.py +2 -3
  6. deepliif-1.1.12/deepliif/data/unaligned_dataset.py +90 -0
  7. deepliif-1.1.12/deepliif/models/CycleGAN_model.py +282 -0
  8. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/models/DeepLIIFExt_model.py +47 -25
  9. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/models/DeepLIIF_model.py +69 -19
  10. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/models/SDG_model.py +57 -26
  11. deepliif-1.1.12/deepliif/models/__init__ - run_dask_multi dev.py +943 -0
  12. deepliif-1.1.12/deepliif/models/__init__ - timings.py +764 -0
  13. deepliif-1.1.12/deepliif/models/__init__.py +729 -0
  14. deepliif-1.1.12/deepliif/models/att_unet.py +199 -0
  15. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/models/base_model.py +32 -8
  16. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/models/networks.py +108 -34
  17. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/options/__init__.py +49 -5
  18. deepliif-1.1.12/deepliif/postprocessing.py +1247 -0
  19. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/util/__init__.py +290 -64
  20. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/util/visualizer.py +106 -19
  21. {deepliif-1.1.10 → deepliif-1.1.12/deepliif.egg-info}/PKG-INFO +76 -17
  22. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif.egg-info/SOURCES.txt +7 -2
  23. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif.egg-info/requires.txt +5 -3
  24. {deepliif-1.1.10 → deepliif-1.1.12}/setup.cfg +1 -1
  25. {deepliif-1.1.10 → deepliif-1.1.12}/setup.py +7 -5
  26. {deepliif-1.1.10 → deepliif-1.1.12}/tests/test_cli_inference.py +8 -5
  27. deepliif-1.1.12/tests/test_cli_train.py +213 -0
  28. deepliif-1.1.12/tests/test_cli_trainlaunch.py +204 -0
  29. deepliif-1.1.10/deepliif/data/unaligned_dataset.py +0 -71
  30. deepliif-1.1.10/deepliif/models/__init__.py +0 -607
  31. deepliif-1.1.10/deepliif/train.py +0 -280
  32. deepliif-1.1.10/tests/test_cli_train.py +0 -63
  33. {deepliif-1.1.10 → deepliif-1.1.12}/LICENSE.md +0 -0
  34. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/__init__.py +0 -0
  35. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/data/base_dataset.py +0 -0
  36. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/data/colorization_dataset.py +0 -0
  37. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/data/image_folder.py +0 -0
  38. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/data/single_dataset.py +0 -0
  39. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/data/template_dataset.py +0 -0
  40. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/options/base_options.py +0 -0
  41. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/options/processing_options.py +0 -0
  42. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/options/test_options.py +0 -0
  43. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/options/train_options.py +0 -0
  44. /deepliif-1.1.10/deepliif/postprocessing.py → /deepliif-1.1.12/deepliif/postprocessing__OLD__DELETE.py +0 -0
  45. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/util/get_data.py +0 -0
  46. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/util/html.py +0 -0
  47. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/util/image_pool.py +0 -0
  48. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif/util/util.py +0 -0
  49. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif.egg-info/dependency_links.txt +0 -0
  50. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif.egg-info/entry_points.txt +0 -0
  51. {deepliif-1.1.10 → deepliif-1.1.12}/deepliif.egg-info/top_level.txt +0 -0
  52. {deepliif-1.1.10 → deepliif-1.1.12}/tests/test_args.py +0 -0
  53. {deepliif-1.1.10 → deepliif-1.1.12}/tests/test_cli_serialize.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: deepliif
3
- Version: 1.1.10
3
+ Version: 1.1.12
4
4
  Summary: DeepLIIF: Deep-Learning Inferred Multiplex Immunofluorescence for Immunohistochemical Image Quantification
5
5
  Home-page: https://github.com/nadeemlab/DeepLIIF
6
6
  Author: Parmida93
@@ -24,6 +24,8 @@ License-File: LICENSE.md
24
24
  |
25
25
  <a href="https://onlinelibrary.wiley.com/share/author/4AEBAGEHSZE9GDP3H8MN?target=10.1111/his.15048">Histopathology'23</a>
26
26
  |
27
+ <a href="https://arxiv.org/abs/2405.08169">MICCAI'24</a>
28
+ |
27
29
  <a href="https://deepliif.org/">Cloud Deployment</a>
28
30
  |
29
31
  <a href="https://nadeemlab.github.io/DeepLIIF/">Documentation</a>
@@ -51,7 +53,7 @@ segmentation.*
51
53
 
52
54
  © This code is made available for non-commercial academic purposes.
53
55
 
54
- ![Version](https://img.shields.io/static/v1?label=latest&message=v1.1.9&color=darkgreen)
56
+ ![Version](https://img.shields.io/static/v1?label=latest&message=v1.1.12&color=darkgreen)
55
57
  [![Total Downloads](https://static.pepy.tech/personalized-badge/deepliif?period=total&units=international_system&left_color=grey&right_color=blue&left_text=total%20downloads)](https://pepy.tech/project/deepliif?&left_text=totalusers)
56
58
 
57
59
  ![overview_image](./images/overview.png)*Overview of DeepLIIF pipeline and sample input IHCs (different
@@ -99,6 +101,16 @@ Commands:
99
101
  train General-purpose training script for multi-task...
100
102
  ```
101
103
 
104
+ **Note:** You might need to install a version of PyTorch that is compatible with your CUDA version.
105
+ Otherwise, only the CPU will be used.
106
+ Visit the [PyTorch website](https://pytorch.org/) for details.
107
+ You can confirm if your installation will run on the GPU by checking if the following returns `True`:
108
+
109
+ ```
110
+ import torch
111
+ torch.cuda.is_available()
112
+ ```
113
+
102
114
  ## Training Dataset
103
115
  For training, all image sets must be 512x512 and combined together in 3072x512 images (six images of size 512x512 stitched
104
116
  together horizontally).
@@ -196,26 +208,49 @@ python test.py --dataroot /path/to/input/images
196
208
  * Before running test on images, the model files must be serialized as described above.
197
209
  * The serialized model files are expected to be located in `DeepLIIF/model-server/DeepLIIF_Latest_Model`.
198
210
  * The test results will be saved to the specified output directory, which defaults to the input directory.
199
- * The default tile size is 512.
211
+ * The tile size must be specified and is used to split the image into tiles for processing. The tile size is based on the resolution (scan magnification) of the input image, and the recommended values are a tile size of 512 for 40x images, 256 for 20x, and 128 for 10x. Note that the smaller the tile size, the longer inference will take.
200
212
  * Testing datasets can be downloaded [here](https://zenodo.org/record/4751737#.YKRTS0NKhH4).
201
213
 
214
+ **Test Command Options:**
215
+ In addition to the required parameters given above, the following optional parameters are available for `deepliif test`:
216
+ * `--eager-mode` Run the original model files (instead of serialized model files).
217
+ * `--seg-intermediate` Save the intermediate segmentation maps created for each modality.
218
+ * `--seg-only` Save only the segmentation files, and do not infer images that are not needed.
219
+ * `--color-dapi` Color the inferred DAPI image.
220
+ * `--color-marker` Color the inferred marker image.
221
+
202
222
  **Whole Slide Image (WSI) Inference:**
203
223
  For translation and segmentation of whole slide images,
204
- you can simply use the same test command
205
- giving path to the directory containing your whole slide images as the input-dir.
224
+ you can simply use the `test-wsi` command
225
+ giving path to the directory containing your WSI as the input-dir
226
+ and specifying the filename of the WSI.
206
227
  DeepLIIF automatically reads the WSI region by region,
207
228
  and translate and segment each region separately and stitches the regions
208
229
  to create the translation and segmentation for whole slide image,
209
230
  then saves all masks in the format of ome.tiff in the given output-dir.
210
- Based on the available GPU resources, the region-size can be changed.
231
+ Based on the available resources, the region-size can be changed.
211
232
  ```
212
- deepliif test --input-dir /path/to/input/images
213
- --output-dir /path/to/output/images
214
- --model-dir /path/to/the/serialized/model
215
- --tile-size 512
216
- --region-size 20000
233
+ deepliif test-wsi --input-dir /path/to/input/image
234
+ --filename wsiFile.svs
235
+ --output-dir /path/to/output/images
236
+ --model-dir /path/to/the/serialized/model
237
+ --tile-size 512
217
238
  ```
218
239
 
240
+ **WSI Inference Options:**
241
+ In addition to the required parameters given above, the following optional parameters are available for `deepliif test-wsi`:
242
+ * `--region-size` Set the size of each region to read from the WSI (default is 20000).
243
+ * `--seg-intermediate` Save the intermediate segmentation maps created for each modality.
244
+ * `--seg-only` Save only the segmentation files, and do not infer images that are not needed.
245
+ * `--color-dapi` Color the inferred DAPI image.
246
+ * `--color-marker` Color the inferred marker image.
247
+
248
+ **Reducing Run Time**
249
+ If you need only the final segmentation and not the inferred multiplex images,
250
+ it is recommended to run `deepliif test` or `deepliif test-wsi` with the `--seg-only`
251
+ option. This will generate only the necessary images, thus reducing the overall run time.
252
+
253
+ **Torchserve**
219
254
  If you prefer, it is possible to run the models using Torchserve.
220
255
  Please see [the documentation](https://nadeemlab.github.io/DeepLIIF/deployment/#deploying-deepliif-with-torchserve)
221
256
  on how to deploy the model with Torchserve and for an example of how to run the inference.
@@ -250,11 +285,18 @@ The plugin also supports submitting multiple ROIs at once:
250
285
  ## Cloud Deployment
251
286
  If you don't have access to GPU or appropriate hardware and don't want to install ImageJ, we have also created a [cloud-native DeepLIIF deployment](https://deepliif.org) with a user-friendly interface to upload images, visualize, interact, and download the final results.
252
287
 
253
- ![DeepLIIF Website Demo](images/deepliif-website-demo-03.gif)
288
+ ![DeepLIIF Website Demo](images/deepliif-website-demo-04.gif)
289
+
290
+ Our deployment at [deepliif.org](https://deepliif.org) also provides virtual slide digitization to generate a single stitched image from a 10x video acquired with a microscope and camera. The video should be captured with the following guidelines to achieve the best results:
291
+ * Brief but complete pauses at every section of the sample to avoid motion artifacts.
292
+ * Significant overlap between pauses so that there is sufficient context for stitching frames together.
293
+ * Methodical and consistent movement over the sample. For example, start at the top left corner, then go all the way to the right, then down one step, then all the way to the left, down one step, etc., until the end of the sample is reached. Again, brief overlapping pauses throughout will allow the best quality images to be generated.
294
+
295
+ ![DeepLIIF Website Demo](images/deepliif-stitch-demo-01.gif)
254
296
 
255
297
  ## Cloud API Endpoints
256
298
 
257
- DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
299
+ For small images, DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
258
300
 
259
301
  ```
260
302
  POST /api/infer
@@ -350,6 +392,8 @@ with open(f'{images_dir}/{root}_scoring.json', 'w') as f:
350
392
  print(json.dumps(data['scoring'], indent=2))
351
393
  ```
352
394
 
395
+ Note that since this is a single request to send the image and receive the results, processing must complete within the timeout period (typically about one minute). If your request is receiving a 504 status code, please try a smaller image or install the `deepliif` package as detailed above to run the process locally.
396
+
353
397
  If you have previously run DeepLIIF on an image and want to postprocess it with different thresholds, the postprocessing routine can be called directly using the previously inferred results:
354
398
 
355
399
  ```
@@ -502,16 +546,17 @@ DeepLIIF model and release back to the community with full credit to the contrib
502
546
  - [x] **Moffitt Cancer Center** [AI-ready multiplex immunofluorescence and multiplex immunohistochemistry dataset](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=70226184) for head-and-neck squamous cell carcinoma (**MICCAI'23**)
503
547
 
504
548
  ## Support
505
- Please use the [Image.sc Forum](https://forum.image.sc/tag/deepliif) for discussion and questions related to DeepLIIF.
506
-
507
- Bugs can be reported in the [GitHub Issues](https://github.com/nadeemlab/DeepLIIF/issues) tab.
549
+ Please use the [GitHub Issues](https://github.com/nadeemlab/DeepLIIF/issues) tab for discussion, questions, or to report bugs related to DeepLIIF.
508
550
 
509
551
  ## License
510
552
  © [Nadeem Lab](https://nadeemlab.org/) - DeepLIIF code is distributed under **Apache 2.0 with Commons Clause** license,
511
553
  and is available for non-commercial academic purposes.
512
554
 
513
555
  ## Acknowledgments
514
- * This code is inspired by [CycleGAN and pix2pix in PyTorch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix).
556
+ This code is inspired by [CycleGAN and pix2pix in PyTorch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix).
557
+
558
+ ## Funding
559
+ This work is funded by the 7-year NIH/NCI R37 MERIT Award ([R37CA295658](https://reporter.nih.gov/search/5dgSOlHosEKepkZEAS5_kQ/project-details/11018883#description)).
515
560
 
516
561
  ## Reference
517
562
  If you find our work useful in your research or if you use parts of this code or our released dataset, please cite the following papers:
@@ -539,6 +584,8 @@ If you find our work useful in your research or if you use parts of this code or
539
584
  title={An AI-Ready Multiplex Staining Dataset for Reproducible and Accurate Characterization of Tumor Immune Microenvironment},
540
585
  author={Ghahremani, Parmida and Marino, Joseph and Hernandez-Prera, Juan and V. de la Iglesia, Janis and JC Slebos, Robbert and H. Chung, Christine and Nadeem, Saad},
541
586
  journal={International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
587
+ volume={14225},
588
+ pages={704--713},
542
589
  year={2023}
543
590
  }
544
591
 
@@ -546,7 +593,19 @@ If you find our work useful in your research or if you use parts of this code or
546
593
  author = {Nadeem, Saad and Hanna, Matthew G and Viswanathan, Kartik and Marino, Joseph and Ahadi, Mahsa and Alzumaili, Bayan and Bani, Mohamed-Amine and Chiarucci, Federico and Chou, Angela and De Leo, Antonio and Fuchs, Talia L and Lubin, Daniel J and Luxford, Catherine and Magliocca, Kelly and Martinez, Germán and Shi, Qiuying and Sidhu, Stan and Al Ghuzlan, Abir and Gill, Anthony J and Tallini, Giovanni and Ghossein, Ronald and Xu, Bin},
547
594
  title = {Ki67 proliferation index in medullary thyroid carcinoma: a comparative study of multiple counting methods and validation of image analysis and deep learning platforms},
548
595
  journal = {Histopathology},
596
+ volume = {83},
597
+ number = {6},
598
+ pages = {981--988},
549
599
  year = {2023},
550
600
  doi = {https://doi.org/10.1111/his.15048}
551
601
  }
602
+
603
+ @article{zehra2024deepliifstitch,
604
+ author = {Zehra, Talat and Marino, Joseph and Wang, Wendy and Frantsuzov, Grigoriy and Nadeem, Saad},
605
+ title = {Rethinking Histology Slide Digitization Workflows for Low-Resource Settings},
606
+ journal = {International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
607
+ volume = {15004},
608
+ pages = {427--436},
609
+ year = {2024}
610
+ }
552
611
  ```
@@ -13,6 +13,8 @@
13
13
  |
14
14
  <a href="https://onlinelibrary.wiley.com/share/author/4AEBAGEHSZE9GDP3H8MN?target=10.1111/his.15048">Histopathology'23</a>
15
15
  |
16
+ <a href="https://arxiv.org/abs/2405.08169">MICCAI'24</a>
17
+ |
16
18
  <a href="https://deepliif.org/">Cloud Deployment</a>
17
19
  |
18
20
  <a href="https://nadeemlab.github.io/DeepLIIF/">Documentation</a>
@@ -40,7 +42,7 @@ segmentation.*
40
42
 
41
43
  © This code is made available for non-commercial academic purposes.
42
44
 
43
- ![Version](https://img.shields.io/static/v1?label=latest&message=v1.1.9&color=darkgreen)
45
+ ![Version](https://img.shields.io/static/v1?label=latest&message=v1.1.12&color=darkgreen)
44
46
  [![Total Downloads](https://static.pepy.tech/personalized-badge/deepliif?period=total&units=international_system&left_color=grey&right_color=blue&left_text=total%20downloads)](https://pepy.tech/project/deepliif?&left_text=totalusers)
45
47
 
46
48
  ![overview_image](./images/overview.png)*Overview of DeepLIIF pipeline and sample input IHCs (different
@@ -88,6 +90,16 @@ Commands:
88
90
  train General-purpose training script for multi-task...
89
91
  ```
90
92
 
93
+ **Note:** You might need to install a version of PyTorch that is compatible with your CUDA version.
94
+ Otherwise, only the CPU will be used.
95
+ Visit the [PyTorch website](https://pytorch.org/) for details.
96
+ You can confirm if your installation will run on the GPU by checking if the following returns `True`:
97
+
98
+ ```
99
+ import torch
100
+ torch.cuda.is_available()
101
+ ```
102
+
91
103
  ## Training Dataset
92
104
  For training, all image sets must be 512x512 and combined together in 3072x512 images (six images of size 512x512 stitched
93
105
  together horizontally).
@@ -185,26 +197,49 @@ python test.py --dataroot /path/to/input/images
185
197
  * Before running test on images, the model files must be serialized as described above.
186
198
  * The serialized model files are expected to be located in `DeepLIIF/model-server/DeepLIIF_Latest_Model`.
187
199
  * The test results will be saved to the specified output directory, which defaults to the input directory.
188
- * The default tile size is 512.
200
+ * The tile size must be specified and is used to split the image into tiles for processing. The tile size is based on the resolution (scan magnification) of the input image, and the recommended values are a tile size of 512 for 40x images, 256 for 20x, and 128 for 10x. Note that the smaller the tile size, the longer inference will take.
189
201
  * Testing datasets can be downloaded [here](https://zenodo.org/record/4751737#.YKRTS0NKhH4).
190
202
 
203
+ **Test Command Options:**
204
+ In addition to the required parameters given above, the following optional parameters are available for `deepliif test`:
205
+ * `--eager-mode` Run the original model files (instead of serialized model files).
206
+ * `--seg-intermediate` Save the intermediate segmentation maps created for each modality.
207
+ * `--seg-only` Save only the segmentation files, and do not infer images that are not needed.
208
+ * `--color-dapi` Color the inferred DAPI image.
209
+ * `--color-marker` Color the inferred marker image.
210
+
191
211
  **Whole Slide Image (WSI) Inference:**
192
212
  For translation and segmentation of whole slide images,
193
- you can simply use the same test command
194
- giving path to the directory containing your whole slide images as the input-dir.
213
+ you can simply use the `test-wsi` command
214
+ giving path to the directory containing your WSI as the input-dir
215
+ and specifying the filename of the WSI.
195
216
  DeepLIIF automatically reads the WSI region by region,
196
217
  and translate and segment each region separately and stitches the regions
197
218
  to create the translation and segmentation for whole slide image,
198
219
  then saves all masks in the format of ome.tiff in the given output-dir.
199
- Based on the available GPU resources, the region-size can be changed.
220
+ Based on the available resources, the region-size can be changed.
200
221
  ```
201
- deepliif test --input-dir /path/to/input/images
202
- --output-dir /path/to/output/images
203
- --model-dir /path/to/the/serialized/model
204
- --tile-size 512
205
- --region-size 20000
222
+ deepliif test-wsi --input-dir /path/to/input/image
223
+ --filename wsiFile.svs
224
+ --output-dir /path/to/output/images
225
+ --model-dir /path/to/the/serialized/model
226
+ --tile-size 512
206
227
  ```
207
228
 
229
+ **WSI Inference Options:**
230
+ In addition to the required parameters given above, the following optional parameters are available for `deepliif test-wsi`:
231
+ * `--region-size` Set the size of each region to read from the WSI (default is 20000).
232
+ * `--seg-intermediate` Save the intermediate segmentation maps created for each modality.
233
+ * `--seg-only` Save only the segmentation files, and do not infer images that are not needed.
234
+ * `--color-dapi` Color the inferred DAPI image.
235
+ * `--color-marker` Color the inferred marker image.
236
+
237
+ **Reducing Run Time**
238
+ If you need only the final segmentation and not the inferred multiplex images,
239
+ it is recommended to run `deepliif test` or `deepliif test-wsi` with the `--seg-only`
240
+ option. This will generate only the necessary images, thus reducing the overall run time.
241
+
242
+ **Torchserve**
208
243
  If you prefer, it is possible to run the models using Torchserve.
209
244
  Please see [the documentation](https://nadeemlab.github.io/DeepLIIF/deployment/#deploying-deepliif-with-torchserve)
210
245
  on how to deploy the model with Torchserve and for an example of how to run the inference.
@@ -239,11 +274,18 @@ The plugin also supports submitting multiple ROIs at once:
239
274
  ## Cloud Deployment
240
275
  If you don't have access to GPU or appropriate hardware and don't want to install ImageJ, we have also created a [cloud-native DeepLIIF deployment](https://deepliif.org) with a user-friendly interface to upload images, visualize, interact, and download the final results.
241
276
 
242
- ![DeepLIIF Website Demo](images/deepliif-website-demo-03.gif)
277
+ ![DeepLIIF Website Demo](images/deepliif-website-demo-04.gif)
278
+
279
+ Our deployment at [deepliif.org](https://deepliif.org) also provides virtual slide digitization to generate a single stitched image from a 10x video acquired with a microscope and camera. The video should be captured with the following guidelines to achieve the best results:
280
+ * Brief but complete pauses at every section of the sample to avoid motion artifacts.
281
+ * Significant overlap between pauses so that there is sufficient context for stitching frames together.
282
+ * Methodical and consistent movement over the sample. For example, start at the top left corner, then go all the way to the right, then down one step, then all the way to the left, down one step, etc., until the end of the sample is reached. Again, brief overlapping pauses throughout will allow the best quality images to be generated.
283
+
284
+ ![DeepLIIF Website Demo](images/deepliif-stitch-demo-01.gif)
243
285
 
244
286
  ## Cloud API Endpoints
245
287
 
246
- DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
288
+ For small images, DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
247
289
 
248
290
  ```
249
291
  POST /api/infer
@@ -339,6 +381,8 @@ with open(f'{images_dir}/{root}_scoring.json', 'w') as f:
339
381
  print(json.dumps(data['scoring'], indent=2))
340
382
  ```
341
383
 
384
+ Note that since this is a single request to send the image and receive the results, processing must complete within the timeout period (typically about one minute). If your request is receiving a 504 status code, please try a smaller image or install the `deepliif` package as detailed above to run the process locally.
385
+
342
386
  If you have previously run DeepLIIF on an image and want to postprocess it with different thresholds, the postprocessing routine can be called directly using the previously inferred results:
343
387
 
344
388
  ```
@@ -491,16 +535,17 @@ DeepLIIF model and release back to the community with full credit to the contrib
491
535
  - [x] **Moffitt Cancer Center** [AI-ready multiplex immunofluorescence and multiplex immunohistochemistry dataset](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=70226184) for head-and-neck squamous cell carcinoma (**MICCAI'23**)
492
536
 
493
537
  ## Support
494
- Please use the [Image.sc Forum](https://forum.image.sc/tag/deepliif) for discussion and questions related to DeepLIIF.
495
-
496
- Bugs can be reported in the [GitHub Issues](https://github.com/nadeemlab/DeepLIIF/issues) tab.
538
+ Please use the [GitHub Issues](https://github.com/nadeemlab/DeepLIIF/issues) tab for discussion, questions, or to report bugs related to DeepLIIF.
497
539
 
498
540
  ## License
499
541
  © [Nadeem Lab](https://nadeemlab.org/) - DeepLIIF code is distributed under **Apache 2.0 with Commons Clause** license,
500
542
  and is available for non-commercial academic purposes.
501
543
 
502
544
  ## Acknowledgments
503
- * This code is inspired by [CycleGAN and pix2pix in PyTorch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix).
545
+ This code is inspired by [CycleGAN and pix2pix in PyTorch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix).
546
+
547
+ ## Funding
548
+ This work is funded by the 7-year NIH/NCI R37 MERIT Award ([R37CA295658](https://reporter.nih.gov/search/5dgSOlHosEKepkZEAS5_kQ/project-details/11018883#description)).
504
549
 
505
550
  ## Reference
506
551
  If you find our work useful in your research or if you use parts of this code or our released dataset, please cite the following papers:
@@ -528,6 +573,8 @@ If you find our work useful in your research or if you use parts of this code or
528
573
  title={An AI-Ready Multiplex Staining Dataset for Reproducible and Accurate Characterization of Tumor Immune Microenvironment},
529
574
  author={Ghahremani, Parmida and Marino, Joseph and Hernandez-Prera, Juan and V. de la Iglesia, Janis and JC Slebos, Robbert and H. Chung, Christine and Nadeem, Saad},
530
575
  journal={International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
576
+ volume={14225},
577
+ pages={704--713},
531
578
  year={2023}
532
579
  }
533
580
 
@@ -535,7 +582,19 @@ If you find our work useful in your research or if you use parts of this code or
535
582
  author = {Nadeem, Saad and Hanna, Matthew G and Viswanathan, Kartik and Marino, Joseph and Ahadi, Mahsa and Alzumaili, Bayan and Bani, Mohamed-Amine and Chiarucci, Federico and Chou, Angela and De Leo, Antonio and Fuchs, Talia L and Lubin, Daniel J and Luxford, Catherine and Magliocca, Kelly and Martinez, Germán and Shi, Qiuying and Sidhu, Stan and Al Ghuzlan, Abir and Gill, Anthony J and Tallini, Giovanni and Ghossein, Ronald and Xu, Bin},
536
583
  title = {Ki67 proliferation index in medullary thyroid carcinoma: a comparative study of multiple counting methods and validation of image analysis and deep learning platforms},
537
584
  journal = {Histopathology},
585
+ volume = {83},
586
+ number = {6},
587
+ pages = {981--988},
538
588
  year = {2023},
539
589
  doi = {https://doi.org/10.1111/his.15048}
540
590
  }
591
+
592
+ @article{zehra2024deepliifstitch,
593
+ author = {Zehra, Talat and Marino, Joseph and Wang, Wendy and Frantsuzov, Grigoriy and Nadeem, Saad},
594
+ title = {Rethinking Histology Slide Digitization Workflows for Low-Resource Settings},
595
+ journal = {International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
596
+ volume = {15004},
597
+ pages = {427--436},
598
+ year = {2024}
599
+ }
541
600
  ```