deepliif 1.1.11__tar.gz → 1.1.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. {deepliif-1.1.11/deepliif.egg-info → deepliif-1.1.13}/PKG-INFO +70 -20
  2. {deepliif-1.1.11 → deepliif-1.1.13}/README.md +69 -19
  3. {deepliif-1.1.11 → deepliif-1.1.13}/cli.py +354 -67
  4. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/data/__init__.py +7 -7
  5. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/data/aligned_dataset.py +2 -3
  6. deepliif-1.1.13/deepliif/data/unaligned_dataset.py +90 -0
  7. deepliif-1.1.13/deepliif/models/CycleGAN_model.py +282 -0
  8. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/models/DeepLIIFExt_model.py +47 -25
  9. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/models/DeepLIIF_model.py +69 -19
  10. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/models/SDG_model.py +57 -26
  11. deepliif-1.1.13/deepliif/models/__init__ - different weighted.py +762 -0
  12. deepliif-1.1.13/deepliif/models/__init__ - run_dask_multi dev.py +943 -0
  13. deepliif-1.1.13/deepliif/models/__init__ - time gens.py +792 -0
  14. deepliif-1.1.11/deepliif/models/__init__.py → deepliif-1.1.13/deepliif/models/__init__ - timings.py +363 -265
  15. deepliif-1.1.13/deepliif/models/__init__.py +760 -0
  16. deepliif-1.1.13/deepliif/models/att_unet.py +199 -0
  17. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/models/base_model.py +32 -8
  18. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/models/networks.py +108 -34
  19. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/options/__init__.py +49 -5
  20. deepliif-1.1.13/deepliif/postprocessing.py +1247 -0
  21. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/util/__init__.py +86 -65
  22. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/util/visualizer.py +106 -19
  23. {deepliif-1.1.11 → deepliif-1.1.13/deepliif.egg-info}/PKG-INFO +70 -20
  24. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif.egg-info/SOURCES.txt +9 -2
  25. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif.egg-info/requires.txt +5 -4
  26. {deepliif-1.1.11 → deepliif-1.1.13}/setup.cfg +1 -1
  27. {deepliif-1.1.11 → deepliif-1.1.13}/setup.py +7 -6
  28. {deepliif-1.1.11 → deepliif-1.1.13}/tests/test_cli_inference.py +8 -5
  29. deepliif-1.1.13/tests/test_cli_train.py +213 -0
  30. deepliif-1.1.13/tests/test_cli_trainlaunch.py +204 -0
  31. deepliif-1.1.11/deepliif/data/unaligned_dataset.py +0 -71
  32. deepliif-1.1.11/deepliif/train.py +0 -280
  33. deepliif-1.1.11/tests/test_cli_train.py +0 -63
  34. {deepliif-1.1.11 → deepliif-1.1.13}/LICENSE.md +0 -0
  35. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/__init__.py +0 -0
  36. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/data/base_dataset.py +0 -0
  37. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/data/colorization_dataset.py +0 -0
  38. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/data/image_folder.py +0 -0
  39. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/data/single_dataset.py +0 -0
  40. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/data/template_dataset.py +0 -0
  41. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/options/base_options.py +0 -0
  42. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/options/processing_options.py +0 -0
  43. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/options/test_options.py +0 -0
  44. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/options/train_options.py +0 -0
  45. /deepliif-1.1.11/deepliif/postprocessing.py → /deepliif-1.1.13/deepliif/postprocessing__OLD__DELETE.py +0 -0
  46. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/util/get_data.py +0 -0
  47. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/util/html.py +0 -0
  48. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/util/image_pool.py +0 -0
  49. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif/util/util.py +0 -0
  50. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif.egg-info/dependency_links.txt +0 -0
  51. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif.egg-info/entry_points.txt +0 -0
  52. {deepliif-1.1.11 → deepliif-1.1.13}/deepliif.egg-info/top_level.txt +0 -0
  53. {deepliif-1.1.11 → deepliif-1.1.13}/tests/test_args.py +0 -0
  54. {deepliif-1.1.11 → deepliif-1.1.13}/tests/test_cli_serialize.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: deepliif
3
- Version: 1.1.11
3
+ Version: 1.1.13
4
4
  Summary: DeepLIIF: Deep-Learning Inferred Multiplex Immunofluorescence for Immunohistochemical Image Quantification
5
5
  Home-page: https://github.com/nadeemlab/DeepLIIF
6
6
  Author: Parmida93
@@ -53,7 +53,7 @@ segmentation.*
53
53
 
54
54
  © This code is made available for non-commercial academic purposes.
55
55
 
56
- ![Version](https://img.shields.io/static/v1?label=latest&message=v1.1.9&color=darkgreen)
56
+ ![Version](https://img.shields.io/static/v1?label=latest&message=v1.1.13&color=darkgreen)
57
57
  [![Total Downloads](https://static.pepy.tech/personalized-badge/deepliif?period=total&units=international_system&left_color=grey&right_color=blue&left_text=total%20downloads)](https://pepy.tech/project/deepliif?&left_text=totalusers)
58
58
 
59
59
  ![overview_image](./images/overview.png)*Overview of DeepLIIF pipeline and sample input IHCs (different
@@ -101,6 +101,16 @@ Commands:
101
101
  train General-purpose training script for multi-task...
102
102
  ```
103
103
 
104
+ **Note:** You might need to install a version of PyTorch that is compatible with your CUDA version.
105
+ Otherwise, only the CPU will be used.
106
+ Visit the [PyTorch website](https://pytorch.org/) for details.
107
+ You can confirm if your installation will run on the GPU by checking if the following returns `True`:
108
+
109
+ ```
110
+ import torch
111
+ torch.cuda.is_available()
112
+ ```
113
+
104
114
  ## Training Dataset
105
115
  For training, all image sets must be 512x512 and combined together in 3072x512 images (six images of size 512x512 stitched
106
116
  together horizontally).
@@ -198,26 +208,49 @@ python test.py --dataroot /path/to/input/images
198
208
  * Before running test on images, the model files must be serialized as described above.
199
209
  * The serialized model files are expected to be located in `DeepLIIF/model-server/DeepLIIF_Latest_Model`.
200
210
  * The test results will be saved to the specified output directory, which defaults to the input directory.
201
- * The default tile size is 512.
211
+ * The tile size must be specified and is used to split the image into tiles for processing. The tile size is based on the resolution (scan magnification) of the input image, and the recommended values are a tile size of 512 for 40x images, 256 for 20x, and 128 for 10x. Note that the smaller the tile size, the longer inference will take.
202
212
  * Testing datasets can be downloaded [here](https://zenodo.org/record/4751737#.YKRTS0NKhH4).
203
213
 
214
+ **Test Command Options:**
215
+ In addition to the required parameters given above, the following optional parameters are available for `deepliif test`:
216
+ * `--eager-mode` Run the original model files (instead of serialized model files).
217
+ * `--seg-intermediate` Save the intermediate segmentation maps created for each modality.
218
+ * `--seg-only` Save only the segmentation files, and do not infer images that are not needed.
219
+ * `--color-dapi` Color the inferred DAPI image.
220
+ * `--color-marker` Color the inferred marker image.
221
+
204
222
  **Whole Slide Image (WSI) Inference:**
205
223
  For translation and segmentation of whole slide images,
206
- you can simply use the same test command
207
- giving path to the directory containing your whole slide images as the input-dir.
224
+ you can simply use the `test-wsi` command
225
+ giving path to the directory containing your WSI as the input-dir
226
+ and specifying the filename of the WSI.
208
227
  DeepLIIF automatically reads the WSI region by region,
209
228
  and translate and segment each region separately and stitches the regions
210
229
  to create the translation and segmentation for whole slide image,
211
230
  then saves all masks in the format of ome.tiff in the given output-dir.
212
- Based on the available GPU resources, the region-size can be changed.
231
+ Based on the available resources, the region-size can be changed.
213
232
  ```
214
- deepliif test --input-dir /path/to/input/images
215
- --output-dir /path/to/output/images
216
- --model-dir /path/to/the/serialized/model
217
- --tile-size 512
218
- --region-size 20000
233
+ deepliif test-wsi --input-dir /path/to/input/image
234
+ --filename wsiFile.svs
235
+ --output-dir /path/to/output/images
236
+ --model-dir /path/to/the/serialized/model
237
+ --tile-size 512
219
238
  ```
220
239
 
240
+ **WSI Inference Options:**
241
+ In addition to the required parameters given above, the following optional parameters are available for `deepliif test-wsi`:
242
+ * `--region-size` Set the size of each region to read from the WSI (default is 20000).
243
+ * `--seg-intermediate` Save the intermediate segmentation maps created for each modality.
244
+ * `--seg-only` Save only the segmentation files, and do not infer images that are not needed.
245
+ * `--color-dapi` Color the inferred DAPI image.
246
+ * `--color-marker` Color the inferred marker image.
247
+
248
+ **Reducing Run Time**
249
+ If you need only the final segmentation and not the inferred multiplex images,
250
+ it is recommended to run `deepliif test` or `deepliif test-wsi` with the `--seg-only`
251
+ option. This will generate only the necessary images, thus reducing the overall run time.
252
+
253
+ **Torchserve**
221
254
  If you prefer, it is possible to run the models using Torchserve.
222
255
  Please see [the documentation](https://nadeemlab.github.io/DeepLIIF/deployment/#deploying-deepliif-with-torchserve)
223
256
  on how to deploy the model with Torchserve and for an example of how to run the inference.
@@ -254,9 +287,16 @@ If you don't have access to GPU or appropriate hardware and don't want to instal
254
287
 
255
288
  ![DeepLIIF Website Demo](images/deepliif-website-demo-04.gif)
256
289
 
290
+ Our deployment at [deepliif.org](https://deepliif.org) also provides virtual slide digitization to generate a single stitched image from a 10x video acquired with a microscope and camera. The video should be captured with the following guidelines to achieve the best results:
291
+ * Brief but complete pauses at every section of the sample to avoid motion artifacts.
292
+ * Significant overlap between pauses so that there is sufficient context for stitching frames together.
293
+ * Methodical and consistent movement over the sample. For example, start at the top left corner, then go all the way to the right, then down one step, then all the way to the left, down one step, etc., until the end of the sample is reached. Again, brief overlapping pauses throughout will allow the best quality images to be generated.
294
+
295
+ ![DeepLIIF Website Demo](images/deepliif-stitch-demo-01.gif)
296
+
257
297
  ## Cloud API Endpoints
258
298
 
259
- DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
299
+ For small images, DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
260
300
 
261
301
  ```
262
302
  POST /api/infer
@@ -352,6 +392,8 @@ with open(f'{images_dir}/{root}_scoring.json', 'w') as f:
352
392
  print(json.dumps(data['scoring'], indent=2))
353
393
  ```
354
394
 
395
+ Note that since this is a single request to send the image and receive the results, processing must complete within the timeout period (typically about one minute). If your request is receiving a 504 status code, please try a smaller image or install the `deepliif` package as detailed above to run the process locally.
396
+
355
397
  If you have previously run DeepLIIF on an image and want to postprocess it with different thresholds, the postprocessing routine can be called directly using the previously inferred results:
356
398
 
357
399
  ```
@@ -504,16 +546,17 @@ DeepLIIF model and release back to the community with full credit to the contrib
504
546
  - [x] **Moffitt Cancer Center** [AI-ready multiplex immunofluorescence and multiplex immunohistochemistry dataset](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=70226184) for head-and-neck squamous cell carcinoma (**MICCAI'23**)
505
547
 
506
548
  ## Support
507
- Please use the [Image.sc Forum](https://forum.image.sc/tag/deepliif) for discussion and questions related to DeepLIIF.
508
-
509
- Bugs can be reported in the [GitHub Issues](https://github.com/nadeemlab/DeepLIIF/issues) tab.
549
+ Please use the [GitHub Issues](https://github.com/nadeemlab/DeepLIIF/issues) tab for discussion, questions, or to report bugs related to DeepLIIF.
510
550
 
511
551
  ## License
512
552
  © [Nadeem Lab](https://nadeemlab.org/) - DeepLIIF code is distributed under **Apache 2.0 with Commons Clause** license,
513
553
  and is available for non-commercial academic purposes.
514
554
 
515
555
  ## Acknowledgments
516
- * This code is inspired by [CycleGAN and pix2pix in PyTorch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix).
556
+ This code is inspired by [CycleGAN and pix2pix in PyTorch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix).
557
+
558
+ ## Funding
559
+ This work is funded by the 7-year NIH/NCI R37 MERIT Award ([R37CA295658](https://reporter.nih.gov/search/5dgSOlHosEKepkZEAS5_kQ/project-details/11018883#description)).
517
560
 
518
561
  ## Reference
519
562
  If you find our work useful in your research or if you use parts of this code or our released dataset, please cite the following papers:
@@ -541,6 +584,8 @@ If you find our work useful in your research or if you use parts of this code or
541
584
  title={An AI-Ready Multiplex Staining Dataset for Reproducible and Accurate Characterization of Tumor Immune Microenvironment},
542
585
  author={Ghahremani, Parmida and Marino, Joseph and Hernandez-Prera, Juan and V. de la Iglesia, Janis and JC Slebos, Robbert and H. Chung, Christine and Nadeem, Saad},
543
586
  journal={International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
587
+ volume={14225},
588
+ pages={704--713},
544
589
  year={2023}
545
590
  }
546
591
 
@@ -548,14 +593,19 @@ If you find our work useful in your research or if you use parts of this code or
548
593
  author = {Nadeem, Saad and Hanna, Matthew G and Viswanathan, Kartik and Marino, Joseph and Ahadi, Mahsa and Alzumaili, Bayan and Bani, Mohamed-Amine and Chiarucci, Federico and Chou, Angela and De Leo, Antonio and Fuchs, Talia L and Lubin, Daniel J and Luxford, Catherine and Magliocca, Kelly and Martinez, Germán and Shi, Qiuying and Sidhu, Stan and Al Ghuzlan, Abir and Gill, Anthony J and Tallini, Giovanni and Ghossein, Ronald and Xu, Bin},
549
594
  title = {Ki67 proliferation index in medullary thyroid carcinoma: a comparative study of multiple counting methods and validation of image analysis and deep learning platforms},
550
595
  journal = {Histopathology},
596
+ volume = {83},
597
+ number = {6},
598
+ pages = {981--988},
551
599
  year = {2023},
552
600
  doi = {https://doi.org/10.1111/his.15048}
553
601
  }
554
602
 
555
603
  @article{zehra2024deepliifstitch,
556
- author = {Zehra, Talat and Marino, Joseph and Wang, Wendy and Frantsuzov, Grigoriy and Nadeem, Saad},
557
- title = {Rethinking Histology Slide Digitization Workflows for Low-Resource Settings},
558
- journal = {International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
559
- year = {2024}
604
+ author = {Zehra, Talat and Marino, Joseph and Wang, Wendy and Frantsuzov, Grigoriy and Nadeem, Saad},
605
+ title = {Rethinking Histology Slide Digitization Workflows for Low-Resource Settings},
606
+ journal = {International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
607
+ volume = {15004},
608
+ pages = {427--436},
609
+ year = {2024}
560
610
  }
561
611
  ```
@@ -42,7 +42,7 @@ segmentation.*
42
42
 
43
43
  © This code is made available for non-commercial academic purposes.
44
44
 
45
- ![Version](https://img.shields.io/static/v1?label=latest&message=v1.1.9&color=darkgreen)
45
+ ![Version](https://img.shields.io/static/v1?label=latest&message=v1.1.13&color=darkgreen)
46
46
  [![Total Downloads](https://static.pepy.tech/personalized-badge/deepliif?period=total&units=international_system&left_color=grey&right_color=blue&left_text=total%20downloads)](https://pepy.tech/project/deepliif?&left_text=totalusers)
47
47
 
48
48
  ![overview_image](./images/overview.png)*Overview of DeepLIIF pipeline and sample input IHCs (different
@@ -90,6 +90,16 @@ Commands:
90
90
  train General-purpose training script for multi-task...
91
91
  ```
92
92
 
93
+ **Note:** You might need to install a version of PyTorch that is compatible with your CUDA version.
94
+ Otherwise, only the CPU will be used.
95
+ Visit the [PyTorch website](https://pytorch.org/) for details.
96
+ You can confirm if your installation will run on the GPU by checking if the following returns `True`:
97
+
98
+ ```
99
+ import torch
100
+ torch.cuda.is_available()
101
+ ```
102
+
93
103
  ## Training Dataset
94
104
  For training, all image sets must be 512x512 and combined together in 3072x512 images (six images of size 512x512 stitched
95
105
  together horizontally).
@@ -187,26 +197,49 @@ python test.py --dataroot /path/to/input/images
187
197
  * Before running test on images, the model files must be serialized as described above.
188
198
  * The serialized model files are expected to be located in `DeepLIIF/model-server/DeepLIIF_Latest_Model`.
189
199
  * The test results will be saved to the specified output directory, which defaults to the input directory.
190
- * The default tile size is 512.
200
+ * The tile size must be specified and is used to split the image into tiles for processing. The tile size is based on the resolution (scan magnification) of the input image, and the recommended values are a tile size of 512 for 40x images, 256 for 20x, and 128 for 10x. Note that the smaller the tile size, the longer inference will take.
191
201
  * Testing datasets can be downloaded [here](https://zenodo.org/record/4751737#.YKRTS0NKhH4).
192
202
 
203
+ **Test Command Options:**
204
+ In addition to the required parameters given above, the following optional parameters are available for `deepliif test`:
205
+ * `--eager-mode` Run the original model files (instead of serialized model files).
206
+ * `--seg-intermediate` Save the intermediate segmentation maps created for each modality.
207
+ * `--seg-only` Save only the segmentation files, and do not infer images that are not needed.
208
+ * `--color-dapi` Color the inferred DAPI image.
209
+ * `--color-marker` Color the inferred marker image.
210
+
193
211
  **Whole Slide Image (WSI) Inference:**
194
212
  For translation and segmentation of whole slide images,
195
- you can simply use the same test command
196
- giving path to the directory containing your whole slide images as the input-dir.
213
+ you can simply use the `test-wsi` command
214
+ giving path to the directory containing your WSI as the input-dir
215
+ and specifying the filename of the WSI.
197
216
  DeepLIIF automatically reads the WSI region by region,
198
217
  and translate and segment each region separately and stitches the regions
199
218
  to create the translation and segmentation for whole slide image,
200
219
  then saves all masks in the format of ome.tiff in the given output-dir.
201
- Based on the available GPU resources, the region-size can be changed.
220
+ Based on the available resources, the region-size can be changed.
202
221
  ```
203
- deepliif test --input-dir /path/to/input/images
204
- --output-dir /path/to/output/images
205
- --model-dir /path/to/the/serialized/model
206
- --tile-size 512
207
- --region-size 20000
222
+ deepliif test-wsi --input-dir /path/to/input/image
223
+ --filename wsiFile.svs
224
+ --output-dir /path/to/output/images
225
+ --model-dir /path/to/the/serialized/model
226
+ --tile-size 512
208
227
  ```
209
228
 
229
+ **WSI Inference Options:**
230
+ In addition to the required parameters given above, the following optional parameters are available for `deepliif test-wsi`:
231
+ * `--region-size` Set the size of each region to read from the WSI (default is 20000).
232
+ * `--seg-intermediate` Save the intermediate segmentation maps created for each modality.
233
+ * `--seg-only` Save only the segmentation files, and do not infer images that are not needed.
234
+ * `--color-dapi` Color the inferred DAPI image.
235
+ * `--color-marker` Color the inferred marker image.
236
+
237
+ **Reducing Run Time**
238
+ If you need only the final segmentation and not the inferred multiplex images,
239
+ it is recommended to run `deepliif test` or `deepliif test-wsi` with the `--seg-only`
240
+ option. This will generate only the necessary images, thus reducing the overall run time.
241
+
242
+ **Torchserve**
210
243
  If you prefer, it is possible to run the models using Torchserve.
211
244
  Please see [the documentation](https://nadeemlab.github.io/DeepLIIF/deployment/#deploying-deepliif-with-torchserve)
212
245
  on how to deploy the model with Torchserve and for an example of how to run the inference.
@@ -243,9 +276,16 @@ If you don't have access to GPU or appropriate hardware and don't want to instal
243
276
 
244
277
  ![DeepLIIF Website Demo](images/deepliif-website-demo-04.gif)
245
278
 
279
+ Our deployment at [deepliif.org](https://deepliif.org) also provides virtual slide digitization to generate a single stitched image from a 10x video acquired with a microscope and camera. The video should be captured with the following guidelines to achieve the best results:
280
+ * Brief but complete pauses at every section of the sample to avoid motion artifacts.
281
+ * Significant overlap between pauses so that there is sufficient context for stitching frames together.
282
+ * Methodical and consistent movement over the sample. For example, start at the top left corner, then go all the way to the right, then down one step, then all the way to the left, down one step, etc., until the end of the sample is reached. Again, brief overlapping pauses throughout will allow the best quality images to be generated.
283
+
284
+ ![DeepLIIF Website Demo](images/deepliif-stitch-demo-01.gif)
285
+
246
286
  ## Cloud API Endpoints
247
287
 
248
- DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
288
+ For small images, DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
249
289
 
250
290
  ```
251
291
  POST /api/infer
@@ -341,6 +381,8 @@ with open(f'{images_dir}/{root}_scoring.json', 'w') as f:
341
381
  print(json.dumps(data['scoring'], indent=2))
342
382
  ```
343
383
 
384
+ Note that since this is a single request to send the image and receive the results, processing must complete within the timeout period (typically about one minute). If your request is receiving a 504 status code, please try a smaller image or install the `deepliif` package as detailed above to run the process locally.
385
+
344
386
  If you have previously run DeepLIIF on an image and want to postprocess it with different thresholds, the postprocessing routine can be called directly using the previously inferred results:
345
387
 
346
388
  ```
@@ -493,16 +535,17 @@ DeepLIIF model and release back to the community with full credit to the contrib
493
535
  - [x] **Moffitt Cancer Center** [AI-ready multiplex immunofluorescence and multiplex immunohistochemistry dataset](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=70226184) for head-and-neck squamous cell carcinoma (**MICCAI'23**)
494
536
 
495
537
  ## Support
496
- Please use the [Image.sc Forum](https://forum.image.sc/tag/deepliif) for discussion and questions related to DeepLIIF.
497
-
498
- Bugs can be reported in the [GitHub Issues](https://github.com/nadeemlab/DeepLIIF/issues) tab.
538
+ Please use the [GitHub Issues](https://github.com/nadeemlab/DeepLIIF/issues) tab for discussion, questions, or to report bugs related to DeepLIIF.
499
539
 
500
540
  ## License
501
541
  © [Nadeem Lab](https://nadeemlab.org/) - DeepLIIF code is distributed under **Apache 2.0 with Commons Clause** license,
502
542
  and is available for non-commercial academic purposes.
503
543
 
504
544
  ## Acknowledgments
505
- * This code is inspired by [CycleGAN and pix2pix in PyTorch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix).
545
+ This code is inspired by [CycleGAN and pix2pix in PyTorch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix).
546
+
547
+ ## Funding
548
+ This work is funded by the 7-year NIH/NCI R37 MERIT Award ([R37CA295658](https://reporter.nih.gov/search/5dgSOlHosEKepkZEAS5_kQ/project-details/11018883#description)).
506
549
 
507
550
  ## Reference
508
551
  If you find our work useful in your research or if you use parts of this code or our released dataset, please cite the following papers:
@@ -530,6 +573,8 @@ If you find our work useful in your research or if you use parts of this code or
530
573
  title={An AI-Ready Multiplex Staining Dataset for Reproducible and Accurate Characterization of Tumor Immune Microenvironment},
531
574
  author={Ghahremani, Parmida and Marino, Joseph and Hernandez-Prera, Juan and V. de la Iglesia, Janis and JC Slebos, Robbert and H. Chung, Christine and Nadeem, Saad},
532
575
  journal={International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
576
+ volume={14225},
577
+ pages={704--713},
533
578
  year={2023}
534
579
  }
535
580
 
@@ -537,14 +582,19 @@ If you find our work useful in your research or if you use parts of this code or
537
582
  author = {Nadeem, Saad and Hanna, Matthew G and Viswanathan, Kartik and Marino, Joseph and Ahadi, Mahsa and Alzumaili, Bayan and Bani, Mohamed-Amine and Chiarucci, Federico and Chou, Angela and De Leo, Antonio and Fuchs, Talia L and Lubin, Daniel J and Luxford, Catherine and Magliocca, Kelly and Martinez, Germán and Shi, Qiuying and Sidhu, Stan and Al Ghuzlan, Abir and Gill, Anthony J and Tallini, Giovanni and Ghossein, Ronald and Xu, Bin},
538
583
  title = {Ki67 proliferation index in medullary thyroid carcinoma: a comparative study of multiple counting methods and validation of image analysis and deep learning platforms},
539
584
  journal = {Histopathology},
585
+ volume = {83},
586
+ number = {6},
587
+ pages = {981--988},
540
588
  year = {2023},
541
589
  doi = {https://doi.org/10.1111/his.15048}
542
590
  }
543
591
 
544
592
  @article{zehra2024deepliifstitch,
545
- author = {Zehra, Talat and Marino, Joseph and Wang, Wendy and Frantsuzov, Grigoriy and Nadeem, Saad},
546
- title = {Rethinking Histology Slide Digitization Workflows for Low-Resource Settings},
547
- journal = {International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
548
- year = {2024}
593
+ author = {Zehra, Talat and Marino, Joseph and Wang, Wendy and Frantsuzov, Grigoriy and Nadeem, Saad},
594
+ title = {Rethinking Histology Slide Digitization Workflows for Low-Resource Settings},
595
+ journal = {International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
596
+ volume = {15004},
597
+ pages = {427--436},
598
+ year = {2024}
549
599
  }
550
600
  ```