deepliif 1.1.9__tar.gz → 1.1.11__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {deepliif-1.1.9/deepliif.egg-info → deepliif-1.1.11}/PKG-INFO +181 -27
- {deepliif-1.1.9 → deepliif-1.1.11}/README.md +180 -26
- {deepliif-1.1.9 → deepliif-1.1.11}/cli.py +49 -42
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/data/aligned_dataset.py +17 -0
- deepliif-1.1.11/deepliif/models/SDG_model.py +189 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/models/__init__.py +170 -46
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/options/__init__.py +62 -29
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/util/__init__.py +227 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/util/util.py +17 -1
- {deepliif-1.1.9 → deepliif-1.1.11/deepliif.egg-info}/PKG-INFO +181 -27
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif.egg-info/SOURCES.txt +1 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/setup.cfg +1 -1
- {deepliif-1.1.9 → deepliif-1.1.11}/setup.py +1 -1
- {deepliif-1.1.9 → deepliif-1.1.11}/tests/test_cli_inference.py +170 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/tests/test_cli_serialize.py +1 -1
- {deepliif-1.1.9 → deepliif-1.1.11}/LICENSE.md +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/__init__.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/data/__init__.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/data/base_dataset.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/data/colorization_dataset.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/data/image_folder.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/data/single_dataset.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/data/template_dataset.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/data/unaligned_dataset.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/models/DeepLIIFExt_model.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/models/DeepLIIF_model.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/models/base_model.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/models/networks.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/options/base_options.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/options/processing_options.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/options/test_options.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/options/train_options.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/postprocessing.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/train.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/util/get_data.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/util/html.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/util/image_pool.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif/util/visualizer.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif.egg-info/dependency_links.txt +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif.egg-info/entry_points.txt +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif.egg-info/requires.txt +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/deepliif.egg-info/top_level.txt +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/tests/test_args.py +0 -0
- {deepliif-1.1.9 → deepliif-1.1.11}/tests/test_cli_train.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: deepliif
|
|
3
|
-
Version: 1.1.
|
|
3
|
+
Version: 1.1.11
|
|
4
4
|
Summary: DeepLIIF: Deep-Learning Inferred Multiplex Immunofluorescence for Immunohistochemical Image Quantification
|
|
5
5
|
Home-page: https://github.com/nadeemlab/DeepLIIF
|
|
6
6
|
Author: Parmida93
|
|
@@ -24,6 +24,8 @@ License-File: LICENSE.md
|
|
|
24
24
|
|
|
|
25
25
|
<a href="https://onlinelibrary.wiley.com/share/author/4AEBAGEHSZE9GDP3H8MN?target=10.1111/his.15048">Histopathology'23</a>
|
|
26
26
|
|
|
|
27
|
+
<a href="https://arxiv.org/abs/2405.08169">MICCAI'24</a>
|
|
28
|
+
|
|
|
27
29
|
<a href="https://deepliif.org/">Cloud Deployment</a>
|
|
28
30
|
|
|
|
29
31
|
<a href="https://nadeemlab.github.io/DeepLIIF/">Documentation</a>
|
|
@@ -51,6 +53,9 @@ segmentation.*
|
|
|
51
53
|
|
|
52
54
|
© This code is made available for non-commercial academic purposes.
|
|
53
55
|
|
|
56
|
+

|
|
57
|
+
[](https://pepy.tech/project/deepliif?&left_text=totalusers)
|
|
58
|
+
|
|
54
59
|
*Overview of DeepLIIF pipeline and sample input IHCs (different
|
|
55
60
|
brown/DAB markers -- BCL2, BCL6, CD10, CD3/CD8, Ki67) with corresponding DeepLIIF-generated hematoxylin/mpIF modalities
|
|
56
61
|
and classified (positive (red) and negative (blue) cell) segmentation masks. (a) Overview of DeepLIIF. Given an IHC
|
|
@@ -122,7 +127,7 @@ deepliif prepare-training-data --input-dir /path/to/input/images
|
|
|
122
127
|
To train a model:
|
|
123
128
|
```
|
|
124
129
|
deepliif train --dataroot /path/to/input/images
|
|
125
|
-
|
|
130
|
+
--name Model_Name
|
|
126
131
|
```
|
|
127
132
|
or
|
|
128
133
|
```
|
|
@@ -168,7 +173,7 @@ The installed `deepliif` uses Dask to perform inference on the input IHC images.
|
|
|
168
173
|
Before running the `test` command, the model files must be serialized using Torchscript.
|
|
169
174
|
To serialize the model files:
|
|
170
175
|
```
|
|
171
|
-
deepliif serialize --
|
|
176
|
+
deepliif serialize --model-dir /path/to/input/model/files
|
|
172
177
|
--output-dir /path/to/output/model/files
|
|
173
178
|
```
|
|
174
179
|
* By default, the model files are expected to be located in `DeepLIIF/model-server/DeepLIIF_Latest_Model`.
|
|
@@ -177,15 +182,17 @@ deepliif serialize --models-dir /path/to/input/model/files
|
|
|
177
182
|
## Testing
|
|
178
183
|
To test the model:
|
|
179
184
|
```
|
|
180
|
-
deepliif test --input-dir /path/to/input/images
|
|
181
|
-
--output-dir /path/to/output/images
|
|
182
|
-
--model-dir path/to/the/serialized/model
|
|
185
|
+
deepliif test --input-dir /path/to/input/images
|
|
186
|
+
--output-dir /path/to/output/images
|
|
187
|
+
--model-dir /path/to/the/serialized/model
|
|
183
188
|
--tile-size 512
|
|
184
189
|
```
|
|
185
190
|
or
|
|
186
191
|
```
|
|
187
|
-
python test.py --dataroot /path/to/input/images
|
|
188
|
-
--
|
|
192
|
+
python test.py --dataroot /path/to/input/images
|
|
193
|
+
--results_dir /path/to/output/images
|
|
194
|
+
--checkpoints_dir /path/to/model/files
|
|
195
|
+
--name Model_Name
|
|
189
196
|
```
|
|
190
197
|
* The latest version of the pretrained models can be downloaded [here](https://zenodo.org/record/4751737#.YKRTS0NKhH4).
|
|
191
198
|
* Before running test on images, the model files must be serialized as described above.
|
|
@@ -206,7 +213,7 @@ Based on the available GPU resources, the region-size can be changed.
|
|
|
206
213
|
```
|
|
207
214
|
deepliif test --input-dir /path/to/input/images
|
|
208
215
|
--output-dir /path/to/output/images
|
|
209
|
-
--model-dir path/to/the/serialized/model
|
|
216
|
+
--model-dir /path/to/the/serialized/model
|
|
210
217
|
--tile-size 512
|
|
211
218
|
--region-size 20000
|
|
212
219
|
```
|
|
@@ -245,27 +252,161 @@ The plugin also supports submitting multiple ROIs at once:
|
|
|
245
252
|
## Cloud Deployment
|
|
246
253
|
If you don't have access to GPU or appropriate hardware and don't want to install ImageJ, we have also created a [cloud-native DeepLIIF deployment](https://deepliif.org) with a user-friendly interface to upload images, visualize, interact, and download the final results.
|
|
247
254
|
|
|
248
|
-

|
|
249
256
|
|
|
250
|
-
|
|
251
|
-
|
|
257
|
+
## Cloud API Endpoints
|
|
258
|
+
|
|
259
|
+
DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
|
|
252
260
|
|
|
253
261
|
```
|
|
254
262
|
POST /api/infer
|
|
255
263
|
|
|
256
|
-
|
|
264
|
+
File Parameter:
|
|
265
|
+
|
|
266
|
+
img (required)
|
|
267
|
+
Image on which to run DeepLIIF.
|
|
268
|
+
|
|
269
|
+
Query String Parameters:
|
|
270
|
+
|
|
271
|
+
resolution
|
|
272
|
+
Resolution used to scan the slide (10x, 20x, 40x). Default is 40x.
|
|
273
|
+
|
|
274
|
+
pil
|
|
275
|
+
If present, use Pillow to load the image instead of Bio-Formats. Pillow is
|
|
276
|
+
faster, but works only on common image types (png, jpeg, etc.).
|
|
277
|
+
|
|
278
|
+
slim
|
|
279
|
+
If present, return only the refined segmentation result image.
|
|
280
|
+
|
|
281
|
+
nopost
|
|
282
|
+
If present, do not perform postprocessing (returns only inferred images).
|
|
283
|
+
|
|
284
|
+
prob_thresh
|
|
285
|
+
Probability threshold used in postprocessing the inferred segmentation map
|
|
286
|
+
image. The segmentation map value must be above this value in order for a
|
|
287
|
+
pixel to be included in the final cell segmentation. Valid values are an
|
|
288
|
+
integer in the range 0-254. Default is 150.
|
|
289
|
+
|
|
290
|
+
size_thresh
|
|
291
|
+
Lower threshold for size gating the cells in postprocessing. Segmented
|
|
292
|
+
cells must have more pixels than this value in order to be included in the
|
|
293
|
+
final cell segmentation. Valid values are 0, a positive integer, or 'auto'.
|
|
294
|
+
'Auto' will try to automatically determine this lower bound for size gating
|
|
295
|
+
based on the distribution of detected cell sizes. Default is 'auto'.
|
|
296
|
+
|
|
297
|
+
size_thresh_upper
|
|
298
|
+
Upper threshold for size gating the cells in postprocessing. Segmented
|
|
299
|
+
cells must have less pixels that this value in order to be included in the
|
|
300
|
+
final cell segmentation. Valid values are a positive integer or 'none'.
|
|
301
|
+
'None' will use no upper threshold in size gating. Default is 'none'.
|
|
302
|
+
|
|
303
|
+
marker_thresh
|
|
304
|
+
Threshold for the effect that the inferred marker image will have on the
|
|
305
|
+
postprocessing classification of cells as positive. If any corresponding
|
|
306
|
+
pixel in the marker image for a cell is above this threshold, the cell will
|
|
307
|
+
be classified as being positive regardless of the values from the inferred
|
|
308
|
+
segmentation image. Valid values are an integer in the range 0-255, 'none',
|
|
309
|
+
or 'auto'. 'None' will not use the marker image during classification.
|
|
310
|
+
'Auto' will automatically determine a threshold from the marker image.
|
|
311
|
+
Default is 'auto'.
|
|
312
|
+
```
|
|
313
|
+
|
|
314
|
+
For example, in Python:
|
|
315
|
+
|
|
316
|
+
```python
|
|
317
|
+
import os
|
|
318
|
+
import json
|
|
319
|
+
import base64
|
|
320
|
+
from io import BytesIO
|
|
321
|
+
|
|
322
|
+
import requests
|
|
323
|
+
from PIL import Image
|
|
324
|
+
|
|
325
|
+
# Use the sample images from the main DeepLIIF repo
|
|
326
|
+
images_dir = './Sample_Large_Tissues'
|
|
327
|
+
filename = 'ROI_1.png'
|
|
328
|
+
|
|
329
|
+
root = os.path.splitext(filename)[0]
|
|
257
330
|
|
|
258
|
-
|
|
259
|
-
|
|
331
|
+
res = requests.post(
|
|
332
|
+
url='https://deepliif.org/api/infer',
|
|
333
|
+
files={
|
|
334
|
+
'img': open(f'{images_dir}/{filename}', 'rb'),
|
|
335
|
+
},
|
|
336
|
+
params={
|
|
337
|
+
'resolution': '40x',
|
|
338
|
+
},
|
|
339
|
+
)
|
|
340
|
+
|
|
341
|
+
data = res.json()
|
|
260
342
|
|
|
261
|
-
|
|
262
|
-
|
|
343
|
+
def b64_to_pil(b):
|
|
344
|
+
return Image.open(BytesIO(base64.b64decode(b.encode())))
|
|
345
|
+
|
|
346
|
+
for name, img in data['images'].items():
|
|
347
|
+
with open(f'{images_dir}/{root}_{name}.png', 'wb') as f:
|
|
348
|
+
b64_to_pil(img).save(f, format='PNG')
|
|
349
|
+
|
|
350
|
+
with open(f'{images_dir}/{root}_scoring.json', 'w') as f:
|
|
351
|
+
json.dump(data['scoring'], f, indent=2)
|
|
352
|
+
print(json.dumps(data['scoring'], indent=2))
|
|
353
|
+
```
|
|
263
354
|
|
|
264
|
-
|
|
265
|
-
boolean: if true, use PIL.Image.open() to load the image, instead of python-bioformats
|
|
355
|
+
If you have previously run DeepLIIF on an image and want to postprocess it with different thresholds, the postprocessing routine can be called directly using the previously inferred results:
|
|
266
356
|
|
|
267
|
-
|
|
268
|
-
|
|
357
|
+
```
|
|
358
|
+
POST /api/postprocess
|
|
359
|
+
|
|
360
|
+
File Parameters:
|
|
361
|
+
|
|
362
|
+
img (required)
|
|
363
|
+
Image on which DeepLIIF was run.
|
|
364
|
+
|
|
365
|
+
seg_img (required)
|
|
366
|
+
Inferred segmentation image previously generated by DeepLIIF.
|
|
367
|
+
|
|
368
|
+
marker_img (optional)
|
|
369
|
+
Inferred marker image previously generated by DeepLIIF. If this is
|
|
370
|
+
omitted, then the marker image will not be used in classification.
|
|
371
|
+
|
|
372
|
+
Query String Parameters:
|
|
373
|
+
|
|
374
|
+
resolution
|
|
375
|
+
Resolution used to scan the slide (10x, 20x, 40x). Default is 40x.
|
|
376
|
+
|
|
377
|
+
pil
|
|
378
|
+
If present, use Pillow to load the original image instead of Bio-Formats.
|
|
379
|
+
Pillow is faster, but works only on common image types (png, jpeg, etc.).
|
|
380
|
+
Pillow is always used to open the seg_img and marker_img files.
|
|
381
|
+
|
|
382
|
+
prob_thresh
|
|
383
|
+
Probability threshold used in postprocessing the inferred segmentation map
|
|
384
|
+
image. The segmentation map value must be above this value in order for a
|
|
385
|
+
pixel to be included in the final cell segmentation. Valid values are an
|
|
386
|
+
integer in the range 0-254. Default is 150.
|
|
387
|
+
|
|
388
|
+
size_thresh
|
|
389
|
+
Lower threshold for size gating the cells in postprocessing. Segmented
|
|
390
|
+
cells must have more pixels than this value in order to be included in the
|
|
391
|
+
final cell segmentation. Valid values are 0, a positive integer, or 'auto'.
|
|
392
|
+
'Auto' will try to automatically determine this lower bound for size gating
|
|
393
|
+
based on the distribution of detected cell sizes. Default is 'auto'.
|
|
394
|
+
|
|
395
|
+
size_thresh_upper
|
|
396
|
+
Upper threshold for size gating the cells in postprocessing. Segmented
|
|
397
|
+
cells must have less pixels that this value in order to be included in the
|
|
398
|
+
final cell segmentation. Valid values are a positive integer or 'none'.
|
|
399
|
+
'None' will use no upper threshold in size gating. Default is 'none'.
|
|
400
|
+
|
|
401
|
+
marker_thresh
|
|
402
|
+
Threshold for the effect that the inferred marker image will have on the
|
|
403
|
+
postprocessing classification of cells as positive. If any corresponding
|
|
404
|
+
pixel in the marker image for a cell is above this threshold, the cell will
|
|
405
|
+
be classified as being positive regardless of the values from the inferred
|
|
406
|
+
segmentation image. Valid values are an integer in the range 0-255, 'none',
|
|
407
|
+
or 'auto'. 'None' will not use the marker image during classification.
|
|
408
|
+
'Auto' will automatically determine a threshold from the marker image.
|
|
409
|
+
Default is 'auto'. (If marker_img is not supplied, this has no effect.)
|
|
269
410
|
```
|
|
270
411
|
|
|
271
412
|
For example, in Python:
|
|
@@ -283,15 +424,20 @@ from PIL import Image
|
|
|
283
424
|
images_dir = './Sample_Large_Tissues'
|
|
284
425
|
filename = 'ROI_1.png'
|
|
285
426
|
|
|
427
|
+
root = os.path.splitext(filename)[0]
|
|
428
|
+
|
|
286
429
|
res = requests.post(
|
|
287
430
|
url='https://deepliif.org/api/infer',
|
|
288
431
|
files={
|
|
289
|
-
'img': open(f'{images_dir}/{filename}', 'rb')
|
|
432
|
+
'img': open(f'{images_dir}/{filename}', 'rb'),
|
|
433
|
+
'seg_img': open(f'{images_dir}/{root}_Seg.png', 'rb'),
|
|
434
|
+
'marker_img': open(f'{images_dir}/{root}_Marker.png', 'rb'),
|
|
290
435
|
},
|
|
291
|
-
# optional param that can be 10x, 20x, or 40x (default)
|
|
292
436
|
params={
|
|
293
|
-
'resolution': '40x'
|
|
294
|
-
|
|
437
|
+
'resolution': '40x',
|
|
438
|
+
'pil': True,
|
|
439
|
+
'size_thresh': 250,
|
|
440
|
+
},
|
|
295
441
|
)
|
|
296
442
|
|
|
297
443
|
data = res.json()
|
|
@@ -300,10 +446,11 @@ def b64_to_pil(b):
|
|
|
300
446
|
return Image.open(BytesIO(base64.b64decode(b.encode())))
|
|
301
447
|
|
|
302
448
|
for name, img in data['images'].items():
|
|
303
|
-
|
|
304
|
-
with open(output_filepath, 'wb') as f:
|
|
449
|
+
with open(f'{images_dir}/{root}_{name}.png', 'wb') as f:
|
|
305
450
|
b64_to_pil(img).save(f, format='PNG')
|
|
306
451
|
|
|
452
|
+
with open(f'{images_dir}/{root}_scoring.json', 'w') as f:
|
|
453
|
+
json.dump(data['scoring'], f, indent=2)
|
|
307
454
|
print(json.dumps(data['scoring'], indent=2))
|
|
308
455
|
```
|
|
309
456
|
|
|
@@ -404,4 +551,11 @@ If you find our work useful in your research or if you use parts of this code or
|
|
|
404
551
|
year = {2023},
|
|
405
552
|
doi = {https://doi.org/10.1111/his.15048}
|
|
406
553
|
}
|
|
554
|
+
|
|
555
|
+
@article{zehra2024deepliifstitch,
|
|
556
|
+
author = {Zehra, Talat and Marino, Joseph and Wang, Wendy and Frantsuzov, Grigoriy and Nadeem, Saad},
|
|
557
|
+
title = {Rethinking Histology Slide Digitization Workflows for Low-Resource Settings},
|
|
558
|
+
journal = {International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
|
|
559
|
+
year = {2024}
|
|
560
|
+
}
|
|
407
561
|
```
|
|
@@ -13,6 +13,8 @@
|
|
|
13
13
|
|
|
|
14
14
|
<a href="https://onlinelibrary.wiley.com/share/author/4AEBAGEHSZE9GDP3H8MN?target=10.1111/his.15048">Histopathology'23</a>
|
|
15
15
|
|
|
|
16
|
+
<a href="https://arxiv.org/abs/2405.08169">MICCAI'24</a>
|
|
17
|
+
|
|
|
16
18
|
<a href="https://deepliif.org/">Cloud Deployment</a>
|
|
17
19
|
|
|
|
18
20
|
<a href="https://nadeemlab.github.io/DeepLIIF/">Documentation</a>
|
|
@@ -40,6 +42,9 @@ segmentation.*
|
|
|
40
42
|
|
|
41
43
|
© This code is made available for non-commercial academic purposes.
|
|
42
44
|
|
|
45
|
+

|
|
46
|
+
[](https://pepy.tech/project/deepliif?&left_text=totalusers)
|
|
47
|
+
|
|
43
48
|
*Overview of DeepLIIF pipeline and sample input IHCs (different
|
|
44
49
|
brown/DAB markers -- BCL2, BCL6, CD10, CD3/CD8, Ki67) with corresponding DeepLIIF-generated hematoxylin/mpIF modalities
|
|
45
50
|
and classified (positive (red) and negative (blue) cell) segmentation masks. (a) Overview of DeepLIIF. Given an IHC
|
|
@@ -111,7 +116,7 @@ deepliif prepare-training-data --input-dir /path/to/input/images
|
|
|
111
116
|
To train a model:
|
|
112
117
|
```
|
|
113
118
|
deepliif train --dataroot /path/to/input/images
|
|
114
|
-
|
|
119
|
+
--name Model_Name
|
|
115
120
|
```
|
|
116
121
|
or
|
|
117
122
|
```
|
|
@@ -157,7 +162,7 @@ The installed `deepliif` uses Dask to perform inference on the input IHC images.
|
|
|
157
162
|
Before running the `test` command, the model files must be serialized using Torchscript.
|
|
158
163
|
To serialize the model files:
|
|
159
164
|
```
|
|
160
|
-
deepliif serialize --
|
|
165
|
+
deepliif serialize --model-dir /path/to/input/model/files
|
|
161
166
|
--output-dir /path/to/output/model/files
|
|
162
167
|
```
|
|
163
168
|
* By default, the model files are expected to be located in `DeepLIIF/model-server/DeepLIIF_Latest_Model`.
|
|
@@ -166,15 +171,17 @@ deepliif serialize --models-dir /path/to/input/model/files
|
|
|
166
171
|
## Testing
|
|
167
172
|
To test the model:
|
|
168
173
|
```
|
|
169
|
-
deepliif test --input-dir /path/to/input/images
|
|
170
|
-
--output-dir /path/to/output/images
|
|
171
|
-
--model-dir path/to/the/serialized/model
|
|
174
|
+
deepliif test --input-dir /path/to/input/images
|
|
175
|
+
--output-dir /path/to/output/images
|
|
176
|
+
--model-dir /path/to/the/serialized/model
|
|
172
177
|
--tile-size 512
|
|
173
178
|
```
|
|
174
179
|
or
|
|
175
180
|
```
|
|
176
|
-
python test.py --dataroot /path/to/input/images
|
|
177
|
-
--
|
|
181
|
+
python test.py --dataroot /path/to/input/images
|
|
182
|
+
--results_dir /path/to/output/images
|
|
183
|
+
--checkpoints_dir /path/to/model/files
|
|
184
|
+
--name Model_Name
|
|
178
185
|
```
|
|
179
186
|
* The latest version of the pretrained models can be downloaded [here](https://zenodo.org/record/4751737#.YKRTS0NKhH4).
|
|
180
187
|
* Before running test on images, the model files must be serialized as described above.
|
|
@@ -195,7 +202,7 @@ Based on the available GPU resources, the region-size can be changed.
|
|
|
195
202
|
```
|
|
196
203
|
deepliif test --input-dir /path/to/input/images
|
|
197
204
|
--output-dir /path/to/output/images
|
|
198
|
-
--model-dir path/to/the/serialized/model
|
|
205
|
+
--model-dir /path/to/the/serialized/model
|
|
199
206
|
--tile-size 512
|
|
200
207
|
--region-size 20000
|
|
201
208
|
```
|
|
@@ -234,27 +241,161 @@ The plugin also supports submitting multiple ROIs at once:
|
|
|
234
241
|
## Cloud Deployment
|
|
235
242
|
If you don't have access to GPU or appropriate hardware and don't want to install ImageJ, we have also created a [cloud-native DeepLIIF deployment](https://deepliif.org) with a user-friendly interface to upload images, visualize, interact, and download the final results.
|
|
236
243
|
|
|
237
|
-

|
|
238
245
|
|
|
239
|
-
|
|
240
|
-
|
|
246
|
+
## Cloud API Endpoints
|
|
247
|
+
|
|
248
|
+
DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
|
|
241
249
|
|
|
242
250
|
```
|
|
243
251
|
POST /api/infer
|
|
244
252
|
|
|
245
|
-
|
|
253
|
+
File Parameter:
|
|
254
|
+
|
|
255
|
+
img (required)
|
|
256
|
+
Image on which to run DeepLIIF.
|
|
257
|
+
|
|
258
|
+
Query String Parameters:
|
|
259
|
+
|
|
260
|
+
resolution
|
|
261
|
+
Resolution used to scan the slide (10x, 20x, 40x). Default is 40x.
|
|
262
|
+
|
|
263
|
+
pil
|
|
264
|
+
If present, use Pillow to load the image instead of Bio-Formats. Pillow is
|
|
265
|
+
faster, but works only on common image types (png, jpeg, etc.).
|
|
266
|
+
|
|
267
|
+
slim
|
|
268
|
+
If present, return only the refined segmentation result image.
|
|
269
|
+
|
|
270
|
+
nopost
|
|
271
|
+
If present, do not perform postprocessing (returns only inferred images).
|
|
272
|
+
|
|
273
|
+
prob_thresh
|
|
274
|
+
Probability threshold used in postprocessing the inferred segmentation map
|
|
275
|
+
image. The segmentation map value must be above this value in order for a
|
|
276
|
+
pixel to be included in the final cell segmentation. Valid values are an
|
|
277
|
+
integer in the range 0-254. Default is 150.
|
|
278
|
+
|
|
279
|
+
size_thresh
|
|
280
|
+
Lower threshold for size gating the cells in postprocessing. Segmented
|
|
281
|
+
cells must have more pixels than this value in order to be included in the
|
|
282
|
+
final cell segmentation. Valid values are 0, a positive integer, or 'auto'.
|
|
283
|
+
'Auto' will try to automatically determine this lower bound for size gating
|
|
284
|
+
based on the distribution of detected cell sizes. Default is 'auto'.
|
|
285
|
+
|
|
286
|
+
size_thresh_upper
|
|
287
|
+
Upper threshold for size gating the cells in postprocessing. Segmented
|
|
288
|
+
cells must have less pixels that this value in order to be included in the
|
|
289
|
+
final cell segmentation. Valid values are a positive integer or 'none'.
|
|
290
|
+
'None' will use no upper threshold in size gating. Default is 'none'.
|
|
291
|
+
|
|
292
|
+
marker_thresh
|
|
293
|
+
Threshold for the effect that the inferred marker image will have on the
|
|
294
|
+
postprocessing classification of cells as positive. If any corresponding
|
|
295
|
+
pixel in the marker image for a cell is above this threshold, the cell will
|
|
296
|
+
be classified as being positive regardless of the values from the inferred
|
|
297
|
+
segmentation image. Valid values are an integer in the range 0-255, 'none',
|
|
298
|
+
or 'auto'. 'None' will not use the marker image during classification.
|
|
299
|
+
'Auto' will automatically determine a threshold from the marker image.
|
|
300
|
+
Default is 'auto'.
|
|
301
|
+
```
|
|
302
|
+
|
|
303
|
+
For example, in Python:
|
|
304
|
+
|
|
305
|
+
```python
|
|
306
|
+
import os
|
|
307
|
+
import json
|
|
308
|
+
import base64
|
|
309
|
+
from io import BytesIO
|
|
310
|
+
|
|
311
|
+
import requests
|
|
312
|
+
from PIL import Image
|
|
313
|
+
|
|
314
|
+
# Use the sample images from the main DeepLIIF repo
|
|
315
|
+
images_dir = './Sample_Large_Tissues'
|
|
316
|
+
filename = 'ROI_1.png'
|
|
317
|
+
|
|
318
|
+
root = os.path.splitext(filename)[0]
|
|
246
319
|
|
|
247
|
-
|
|
248
|
-
|
|
320
|
+
res = requests.post(
|
|
321
|
+
url='https://deepliif.org/api/infer',
|
|
322
|
+
files={
|
|
323
|
+
'img': open(f'{images_dir}/{filename}', 'rb'),
|
|
324
|
+
},
|
|
325
|
+
params={
|
|
326
|
+
'resolution': '40x',
|
|
327
|
+
},
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
data = res.json()
|
|
249
331
|
|
|
250
|
-
|
|
251
|
-
|
|
332
|
+
def b64_to_pil(b):
|
|
333
|
+
return Image.open(BytesIO(base64.b64decode(b.encode())))
|
|
334
|
+
|
|
335
|
+
for name, img in data['images'].items():
|
|
336
|
+
with open(f'{images_dir}/{root}_{name}.png', 'wb') as f:
|
|
337
|
+
b64_to_pil(img).save(f, format='PNG')
|
|
338
|
+
|
|
339
|
+
with open(f'{images_dir}/{root}_scoring.json', 'w') as f:
|
|
340
|
+
json.dump(data['scoring'], f, indent=2)
|
|
341
|
+
print(json.dumps(data['scoring'], indent=2))
|
|
342
|
+
```
|
|
252
343
|
|
|
253
|
-
|
|
254
|
-
boolean: if true, use PIL.Image.open() to load the image, instead of python-bioformats
|
|
344
|
+
If you have previously run DeepLIIF on an image and want to postprocess it with different thresholds, the postprocessing routine can be called directly using the previously inferred results:
|
|
255
345
|
|
|
256
|
-
|
|
257
|
-
|
|
346
|
+
```
|
|
347
|
+
POST /api/postprocess
|
|
348
|
+
|
|
349
|
+
File Parameters:
|
|
350
|
+
|
|
351
|
+
img (required)
|
|
352
|
+
Image on which DeepLIIF was run.
|
|
353
|
+
|
|
354
|
+
seg_img (required)
|
|
355
|
+
Inferred segmentation image previously generated by DeepLIIF.
|
|
356
|
+
|
|
357
|
+
marker_img (optional)
|
|
358
|
+
Inferred marker image previously generated by DeepLIIF. If this is
|
|
359
|
+
omitted, then the marker image will not be used in classification.
|
|
360
|
+
|
|
361
|
+
Query String Parameters:
|
|
362
|
+
|
|
363
|
+
resolution
|
|
364
|
+
Resolution used to scan the slide (10x, 20x, 40x). Default is 40x.
|
|
365
|
+
|
|
366
|
+
pil
|
|
367
|
+
If present, use Pillow to load the original image instead of Bio-Formats.
|
|
368
|
+
Pillow is faster, but works only on common image types (png, jpeg, etc.).
|
|
369
|
+
Pillow is always used to open the seg_img and marker_img files.
|
|
370
|
+
|
|
371
|
+
prob_thresh
|
|
372
|
+
Probability threshold used in postprocessing the inferred segmentation map
|
|
373
|
+
image. The segmentation map value must be above this value in order for a
|
|
374
|
+
pixel to be included in the final cell segmentation. Valid values are an
|
|
375
|
+
integer in the range 0-254. Default is 150.
|
|
376
|
+
|
|
377
|
+
size_thresh
|
|
378
|
+
Lower threshold for size gating the cells in postprocessing. Segmented
|
|
379
|
+
cells must have more pixels than this value in order to be included in the
|
|
380
|
+
final cell segmentation. Valid values are 0, a positive integer, or 'auto'.
|
|
381
|
+
'Auto' will try to automatically determine this lower bound for size gating
|
|
382
|
+
based on the distribution of detected cell sizes. Default is 'auto'.
|
|
383
|
+
|
|
384
|
+
size_thresh_upper
|
|
385
|
+
Upper threshold for size gating the cells in postprocessing. Segmented
|
|
386
|
+
cells must have less pixels that this value in order to be included in the
|
|
387
|
+
final cell segmentation. Valid values are a positive integer or 'none'.
|
|
388
|
+
'None' will use no upper threshold in size gating. Default is 'none'.
|
|
389
|
+
|
|
390
|
+
marker_thresh
|
|
391
|
+
Threshold for the effect that the inferred marker image will have on the
|
|
392
|
+
postprocessing classification of cells as positive. If any corresponding
|
|
393
|
+
pixel in the marker image for a cell is above this threshold, the cell will
|
|
394
|
+
be classified as being positive regardless of the values from the inferred
|
|
395
|
+
segmentation image. Valid values are an integer in the range 0-255, 'none',
|
|
396
|
+
or 'auto'. 'None' will not use the marker image during classification.
|
|
397
|
+
'Auto' will automatically determine a threshold from the marker image.
|
|
398
|
+
Default is 'auto'. (If marker_img is not supplied, this has no effect.)
|
|
258
399
|
```
|
|
259
400
|
|
|
260
401
|
For example, in Python:
|
|
@@ -272,15 +413,20 @@ from PIL import Image
|
|
|
272
413
|
images_dir = './Sample_Large_Tissues'
|
|
273
414
|
filename = 'ROI_1.png'
|
|
274
415
|
|
|
416
|
+
root = os.path.splitext(filename)[0]
|
|
417
|
+
|
|
275
418
|
res = requests.post(
|
|
276
419
|
url='https://deepliif.org/api/infer',
|
|
277
420
|
files={
|
|
278
|
-
'img': open(f'{images_dir}/{filename}', 'rb')
|
|
421
|
+
'img': open(f'{images_dir}/{filename}', 'rb'),
|
|
422
|
+
'seg_img': open(f'{images_dir}/{root}_Seg.png', 'rb'),
|
|
423
|
+
'marker_img': open(f'{images_dir}/{root}_Marker.png', 'rb'),
|
|
279
424
|
},
|
|
280
|
-
# optional param that can be 10x, 20x, or 40x (default)
|
|
281
425
|
params={
|
|
282
|
-
'resolution': '40x'
|
|
283
|
-
|
|
426
|
+
'resolution': '40x',
|
|
427
|
+
'pil': True,
|
|
428
|
+
'size_thresh': 250,
|
|
429
|
+
},
|
|
284
430
|
)
|
|
285
431
|
|
|
286
432
|
data = res.json()
|
|
@@ -289,10 +435,11 @@ def b64_to_pil(b):
|
|
|
289
435
|
return Image.open(BytesIO(base64.b64decode(b.encode())))
|
|
290
436
|
|
|
291
437
|
for name, img in data['images'].items():
|
|
292
|
-
|
|
293
|
-
with open(output_filepath, 'wb') as f:
|
|
438
|
+
with open(f'{images_dir}/{root}_{name}.png', 'wb') as f:
|
|
294
439
|
b64_to_pil(img).save(f, format='PNG')
|
|
295
440
|
|
|
441
|
+
with open(f'{images_dir}/{root}_scoring.json', 'w') as f:
|
|
442
|
+
json.dump(data['scoring'], f, indent=2)
|
|
296
443
|
print(json.dumps(data['scoring'], indent=2))
|
|
297
444
|
```
|
|
298
445
|
|
|
@@ -393,4 +540,11 @@ If you find our work useful in your research or if you use parts of this code or
|
|
|
393
540
|
year = {2023},
|
|
394
541
|
doi = {https://doi.org/10.1111/his.15048}
|
|
395
542
|
}
|
|
543
|
+
|
|
544
|
+
@article{zehra2024deepliifstitch,
|
|
545
|
+
author = {Zehra, Talat and Marino, Joseph and Wang, Wendy and Frantsuzov, Grigoriy and Nadeem, Saad},
|
|
546
|
+
title = {Rethinking Histology Slide Digitization Workflows for Low-Resource Settings},
|
|
547
|
+
journal = {International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
|
|
548
|
+
year = {2024}
|
|
549
|
+
}
|
|
396
550
|
```
|