deepliif 1.1.9__py3-none-any.whl → 1.1.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
deepliif/util/__init__.py CHANGED
@@ -118,6 +118,211 @@ def stitch_tile(img, tile, tile_size, overlap_size, i, j):
118
118
  img.paste(tile, (i * tile_size, j * tile_size))
119
119
 
120
120
 
121
+ class InferenceTiler:
122
+ """
123
+ Iterable class to tile image(s) and stitch result tiles together.
124
+
125
+ To perform inference on a large image, that image will need to be
126
+ tiled into smaller tiles that can be run individually and then
127
+ stitched back together. This class wraps the functionality as an
128
+ iterable object that can accept a single image or list of images
129
+ if multiple images are taken as input for inference.
130
+
131
+ An overlap size can be specified so that neighboring tiles will
132
+ overlap at the edges, helping to reduce seams or other artifacts
133
+ near the edge of a tile. Padding of a solid color around the
134
+ perimeter of the tile is also possible, if needed. The specified
135
+ tile size includes this overlap and pad sizes, so a tile size of
136
+ 512 with an overlap size of 32 and pad size of 16 would have a
137
+ central area of 416 pixels that are stitched into the result image.
138
+
139
+ Example Usage
140
+ -------------
141
+ tiler = InferenceTiler(img, 512, 32)
142
+ for tile in tiler:
143
+ result_tiles = infer(tile)
144
+ tiler.stitch(result_tiles)
145
+ images = tiler.results()
146
+ """
147
+
148
+ def __init__(self, orig, tile_size, overlap_size=0, pad_size=0, pad_color=(255, 255, 255)):
149
+ """
150
+ Initialize for tiling an image or list of images.
151
+
152
+ Parameters
153
+ ----------
154
+ orig : Image | list(Image)
155
+ Original image or list of images to be tiled.
156
+ tile_size: int
157
+ Size (width and height) of the tiles to be generated.
158
+ overlap_size: int [default: 0]
159
+ Amount of overlap on each side of the tile.
160
+ pad_size: int [default: 0]
161
+ Amount of solid color padding around perimeter of tile.
162
+ pad_color: tuple(int, int, int) [default: (255,255,255)]
163
+ RGB color to use for padding.
164
+ """
165
+
166
+ if tile_size <= 0:
167
+ raise ValueError('InfereneTiler input tile_size must be positive and non-zero')
168
+ if overlap_size < 0:
169
+ raise ValueError('InfereneTiler input overlap_size must be positive or zero')
170
+ if pad_size < 0:
171
+ raise ValueError('InfereneTiler input pad_size must be positive or zero')
172
+
173
+ self.single_orig = not type(orig) is list
174
+ if self.single_orig:
175
+ orig = [orig]
176
+
177
+ for i in range(1, len(orig)):
178
+ if orig[i].size != orig[0].size:
179
+ raise ValueError('InferenceTiler input images do not have the same size.')
180
+ self.orig_width = orig[0].width
181
+ self.orig_height = orig[0].height
182
+
183
+ # patch size to extract from input image, which is then padded to tile size
184
+ patch_size = tile_size - (2 * pad_size)
185
+
186
+ # make sure width and height are both at least patch_size
187
+ if orig[0].width < patch_size:
188
+ for i in range(len(orig)):
189
+ while orig[i].width < patch_size:
190
+ mirrored = ImageOps.mirror(orig[i])
191
+ orig[i] = ImageOps.expand(orig[i], (0, 0, orig[i].width, 0))
192
+ orig[i].paste(mirrored, (mirrored.width, 0))
193
+ orig[i] = orig[i].crop((0, 0, patch_size, orig[i].height))
194
+ if orig[0].height < patch_size:
195
+ for i in range(len(orig)):
196
+ while orig[i].height < patch_size:
197
+ flipped = ImageOps.flip(orig[i])
198
+ orig[i] = ImageOps.expand(orig[i], (0, 0, 0, orig[i].height))
199
+ orig[i].paste(flipped, (0, flipped.height))
200
+ orig[i] = orig[i].crop((0, 0, orig[i].width, patch_size))
201
+ self.image_width = orig[0].width
202
+ self.image_height = orig[0].height
203
+
204
+ overlap_width = 0 if patch_size >= self.image_width else overlap_size
205
+ overlap_height = 0 if patch_size >= self.image_height else overlap_size
206
+ center_width = patch_size - (2 * overlap_width)
207
+ center_height = patch_size - (2 * overlap_height)
208
+ if center_width <= 0 or center_height <= 0:
209
+ raise ValueError('InferenceTiler combined overlap_size and pad_size are too large')
210
+
211
+ self.c0x = pad_size # crop offset for left of non-pad content in result tile
212
+ self.c0y = pad_size # crop offset for top of non-pad content in result tile
213
+ self.c1x = overlap_width + pad_size # crop offset for left of center region in result tile
214
+ self.c1y = overlap_height + pad_size # crop offset for top of center region in result tile
215
+ self.c2x = patch_size - overlap_width + pad_size # crop offset for right of center region in result tile
216
+ self.c2y = patch_size - overlap_height + pad_size # crop offset for bottom of center region in result tile
217
+ self.c3x = patch_size + pad_size # crop offset for right of non-pad content in result tile
218
+ self.c3y = patch_size + pad_size # crop offset for bottom of non-pad content in result tile
219
+ self.p1x = overlap_width # paste offset for left of center region w.r.t (x,y) coord
220
+ self.p1y = overlap_height # paste offset for top of center region w.r.t (x,y) coord
221
+ self.p2x = patch_size - overlap_width # paste offset for right of center region w.r.t (x,y) coord
222
+ self.p2y = patch_size - overlap_height # paste offset for bottom of center region w.r.t (x,y) coord
223
+
224
+ self.overlap_width = overlap_width
225
+ self.overlap_height = overlap_height
226
+ self.patch_size = patch_size
227
+ self.center_width = center_width
228
+ self.center_height = center_height
229
+
230
+ self.orig = orig
231
+ self.tile_size = tile_size
232
+ self.pad_size = pad_size
233
+ self.pad_color = pad_color
234
+ self.res = {}
235
+
236
+ def __iter__(self):
237
+ """
238
+ Generate the tiles as an iterable.
239
+
240
+ Tiles are created and iterated over from top left to bottom
241
+ right, going across the rows. The yielded tile(s) match the
242
+ type of the original input when initialized (either a single
243
+ image or a list of images in the same order as initialized).
244
+ The (x, y) coordinate of the current tile is maintained
245
+ internally for use in the stitch function.
246
+ """
247
+
248
+ for y in range(0, self.image_height, self.center_height):
249
+ for x in range(0, self.image_width, self.center_width):
250
+ if x + self.patch_size > self.image_width:
251
+ x = self.image_width - self.patch_size
252
+ if y + self.patch_size > self.image_height:
253
+ y = self.image_height - self.patch_size
254
+ self.x = x
255
+ self.y = y
256
+ tiles = [im.crop((x, y, x + self.patch_size, y + self.patch_size)) for im in self.orig]
257
+ if self.pad_size != 0:
258
+ tiles = [ImageOps.expand(t, self.pad_size, self.pad_color) for t in tiles]
259
+ yield tiles[0] if self.single_orig else tiles
260
+
261
+ def stitch(self, result_tiles):
262
+ """
263
+ Stitch result tiles into the result images.
264
+
265
+ The key names for the dictionary of result tiles are used to
266
+ stitch each tile into its corresponding final image in the
267
+ results attribute. If a result image does not exist for a
268
+ result tile key name, then it will be created. The result tiles
269
+ are stitched at the location from which the list iterated tile
270
+ was extracted.
271
+
272
+ Parameters
273
+ ----------
274
+ result_tiles : dict(str: Image)
275
+ Dictionary of result tiles from the inference.
276
+ """
277
+
278
+ for k, tile in result_tiles.items():
279
+ if k not in self.res:
280
+ self.res[k] = Image.new('RGB', (self.image_width, self.image_height))
281
+ if tile.size != (self.tile_size, self.tile_size):
282
+ tile = tile.resize((self.tile_size, self.tile_size))
283
+ self.res[k].paste(tile.crop((self.c1x, self.c1y, self.c2x, self.c2y)), (self.x + self.p1x, self.y + self.p1y))
284
+
285
+ # top left corner
286
+ if self.x == 0 and self.y == 0:
287
+ self.res[k].paste(tile.crop((self.c0x, self.c0y, self.c1x, self.c1y)), (self.x, self.y))
288
+ # top row
289
+ if self.y == 0:
290
+ self.res[k].paste(tile.crop((self.c1x, self.c0y, self.c2x, self.c1y)), (self.x + self.p1x, self.y))
291
+ # top right corner
292
+ if self.x == self.image_width - self.patch_size and self.y == 0:
293
+ self.res[k].paste(tile.crop((self.c2x, self.c0y, self.c3x, self.c1y)), (self.x + self.p2x, self.y))
294
+ # left column
295
+ if self.x == 0:
296
+ self.res[k].paste(tile.crop((self.c0x, self.c1y, self.c1x, self.c2y)), (self.x, self.y + self.p1y))
297
+ # right column
298
+ if self.x == self.image_width - self.patch_size:
299
+ self.res[k].paste(tile.crop((self.c2x, self.c1y, self.c3x, self.c2y)), (self.x + self.p2x, self.y + self.p1y))
300
+ # bottom left corner
301
+ if self.x == 0 and self.y == self.image_height - self.patch_size:
302
+ self.res[k].paste(tile.crop((self.c0x, self.c2y, self.c1x, self.c3y)), (self.x, self.y + self.p2y))
303
+ # bottom row
304
+ if self.y == self.image_height - self.patch_size:
305
+ self.res[k].paste(tile.crop((self.c1x, self.c2y, self.c2x, self.c3y)), (self.x + self.p1x, self.y + self.p2y))
306
+ # bottom right corner
307
+ if self.x == self.image_width - self.patch_size and self.y == self.image_height - self.patch_size:
308
+ self.res[k].paste(tile.crop((self.c2x, self.c2y, self.c3x, self.c3y)), (self.x + self.p2x, self.y + self.p2y))
309
+
310
+ def results(self):
311
+ """
312
+ Return a dictionary of result images.
313
+
314
+ The keys for the result images are the same as those used for
315
+ the result tiles in the stitch function. This function should
316
+ only be called once, since the stitched images will be cropped
317
+ if the original image size was less than the patch size.
318
+ """
319
+
320
+ if self.orig_width != self.image_width or self.orig_height != self.image_height:
321
+ return {k: im.crop((0, 0, self.orig_width, self.orig_height)) for k, im in self.res.items()}
322
+ else:
323
+ return {k: im for k, im in self.res.items()}
324
+
325
+
121
326
  def calculate_background_mean_value(img):
122
327
  img = cv2.fastNlMeansDenoisingColored(np.array(img), None, 10, 10, 7, 21)
123
328
  img = np.array(img, dtype=float)
@@ -164,6 +369,28 @@ def adjust_background_tile(img):
164
369
  return image
165
370
 
166
371
 
372
+ def image_variance_gray(img):
373
+ px = np.asarray(img) if img.mode == 'L' else np.asarray(img.convert('L'))
374
+ idx = np.logical_and(px != 255, px != 0)
375
+ val = px[idx]
376
+ if val.shape[0] == 0:
377
+ return 0
378
+ var = np.var(val)
379
+ return var
380
+
381
+
382
+ def image_variance_rgb(img):
383
+ px = np.asarray(img) if img.mode == 'RGB' else np.asarray(img.convert('RGB'))
384
+ nonwhite = np.any(px != [255, 255, 255], axis=-1)
385
+ nonblack = np.any(px != [0, 0, 0], axis=-1)
386
+ idx = np.logical_and(nonwhite, nonblack)
387
+ val = px[idx]
388
+ if val.shape[0] == 0:
389
+ return [0, 0, 0]
390
+ var = np.var(val, axis=0)
391
+ return var
392
+
393
+
167
394
  def read_bioformats_image_with_reader(path, channel=0, region=(0, 0, 0, 0)):
168
395
  """
169
396
  Using this function, you can read a specific region of a large image by giving the region bounding box (XYWH format)
deepliif/util/util.py CHANGED
@@ -48,7 +48,23 @@ def save_image(image_numpy, image_path, aspect_ratio=1.0):
48
48
  image_numpy (numpy array) -- input numpy array
49
49
  image_path (str) -- the path of the image
50
50
  """
51
-
51
+ x, y, nc = image_numpy.shape
52
+
53
+ if nc > 3:
54
+ if nc % 3 == 0:
55
+ nc_img = 3
56
+ no_img = nc // nc_img
57
+
58
+ elif nc % 2 == 0:
59
+ nc_img = 2
60
+ no_img = nc // nc_img
61
+ else:
62
+ nc_img = 1
63
+ no_img = nc // nc_img
64
+ print(f'image (numpy) has {nc}>3 channels, inferred to have {no_img} images each with {nc_img} channel(s)')
65
+ l_image_numpy = np.dsplit(image_numpy,[nc_img*i for i in range(1,no_img)])
66
+ image_numpy = np.concatenate(l_image_numpy, axis=1) # stack horizontally
67
+
52
68
  image_pil = Image.fromarray(image_numpy)
53
69
  h, w, _ = image_numpy.shape
54
70
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: deepliif
3
- Version: 1.1.9
3
+ Version: 1.1.11
4
4
  Summary: DeepLIIF: Deep-Learning Inferred Multiplex Immunofluorescence for Immunohistochemical Image Quantification
5
5
  Home-page: https://github.com/nadeemlab/DeepLIIF
6
6
  Author: Parmida93
@@ -34,6 +34,8 @@ Requires-Dist: python-bioformats (>=4.0.6)
34
34
  |
35
35
  <a href="https://onlinelibrary.wiley.com/share/author/4AEBAGEHSZE9GDP3H8MN?target=10.1111/his.15048">Histopathology'23</a>
36
36
  |
37
+ <a href="https://arxiv.org/abs/2405.08169">MICCAI'24</a>
38
+ |
37
39
  <a href="https://deepliif.org/">Cloud Deployment</a>
38
40
  |
39
41
  <a href="https://nadeemlab.github.io/DeepLIIF/">Documentation</a>
@@ -61,6 +63,9 @@ segmentation.*
61
63
 
62
64
  © This code is made available for non-commercial academic purposes.
63
65
 
66
+ ![Version](https://img.shields.io/static/v1?label=latest&message=v1.1.9&color=darkgreen)
67
+ [![Total Downloads](https://static.pepy.tech/personalized-badge/deepliif?period=total&units=international_system&left_color=grey&right_color=blue&left_text=total%20downloads)](https://pepy.tech/project/deepliif?&left_text=totalusers)
68
+
64
69
  ![overview_image](./images/overview.png)*Overview of DeepLIIF pipeline and sample input IHCs (different
65
70
  brown/DAB markers -- BCL2, BCL6, CD10, CD3/CD8, Ki67) with corresponding DeepLIIF-generated hematoxylin/mpIF modalities
66
71
  and classified (positive (red) and negative (blue) cell) segmentation masks. (a) Overview of DeepLIIF. Given an IHC
@@ -132,7 +137,7 @@ deepliif prepare-training-data --input-dir /path/to/input/images
132
137
  To train a model:
133
138
  ```
134
139
  deepliif train --dataroot /path/to/input/images
135
- --name Model_Name
140
+ --name Model_Name
136
141
  ```
137
142
  or
138
143
  ```
@@ -178,7 +183,7 @@ The installed `deepliif` uses Dask to perform inference on the input IHC images.
178
183
  Before running the `test` command, the model files must be serialized using Torchscript.
179
184
  To serialize the model files:
180
185
  ```
181
- deepliif serialize --models-dir /path/to/input/model/files
186
+ deepliif serialize --model-dir /path/to/input/model/files
182
187
  --output-dir /path/to/output/model/files
183
188
  ```
184
189
  * By default, the model files are expected to be located in `DeepLIIF/model-server/DeepLIIF_Latest_Model`.
@@ -187,15 +192,17 @@ deepliif serialize --models-dir /path/to/input/model/files
187
192
  ## Testing
188
193
  To test the model:
189
194
  ```
190
- deepliif test --input-dir /path/to/input/images
191
- --output-dir /path/to/output/images
192
- --model-dir path/to/the/serialized/model
195
+ deepliif test --input-dir /path/to/input/images
196
+ --output-dir /path/to/output/images
197
+ --model-dir /path/to/the/serialized/model
193
198
  --tile-size 512
194
199
  ```
195
200
  or
196
201
  ```
197
- python test.py --dataroot /path/to/input/images
198
- --name Model_Name
202
+ python test.py --dataroot /path/to/input/images
203
+ --results_dir /path/to/output/images
204
+ --checkpoints_dir /path/to/model/files
205
+ --name Model_Name
199
206
  ```
200
207
  * The latest version of the pretrained models can be downloaded [here](https://zenodo.org/record/4751737#.YKRTS0NKhH4).
201
208
  * Before running test on images, the model files must be serialized as described above.
@@ -216,7 +223,7 @@ Based on the available GPU resources, the region-size can be changed.
216
223
  ```
217
224
  deepliif test --input-dir /path/to/input/images
218
225
  --output-dir /path/to/output/images
219
- --model-dir path/to/the/serialized/model
226
+ --model-dir /path/to/the/serialized/model
220
227
  --tile-size 512
221
228
  --region-size 20000
222
229
  ```
@@ -255,27 +262,161 @@ The plugin also supports submitting multiple ROIs at once:
255
262
  ## Cloud Deployment
256
263
  If you don't have access to GPU or appropriate hardware and don't want to install ImageJ, we have also created a [cloud-native DeepLIIF deployment](https://deepliif.org) with a user-friendly interface to upload images, visualize, interact, and download the final results.
257
264
 
258
- ![DeepLIIF Website Demo](images/deepliif-website-demo-03.gif)
265
+ ![DeepLIIF Website Demo](images/deepliif-website-demo-04.gif)
259
266
 
260
- DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request
261
- containing the original image file:
267
+ ## Cloud API Endpoints
268
+
269
+ DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
262
270
 
263
271
  ```
264
272
  POST /api/infer
265
273
 
266
- Parameters
274
+ File Parameter:
275
+
276
+ img (required)
277
+ Image on which to run DeepLIIF.
278
+
279
+ Query String Parameters:
280
+
281
+ resolution
282
+ Resolution used to scan the slide (10x, 20x, 40x). Default is 40x.
283
+
284
+ pil
285
+ If present, use Pillow to load the image instead of Bio-Formats. Pillow is
286
+ faster, but works only on common image types (png, jpeg, etc.).
287
+
288
+ slim
289
+ If present, return only the refined segmentation result image.
290
+
291
+ nopost
292
+ If present, do not perform postprocessing (returns only inferred images).
293
+
294
+ prob_thresh
295
+ Probability threshold used in postprocessing the inferred segmentation map
296
+ image. The segmentation map value must be above this value in order for a
297
+ pixel to be included in the final cell segmentation. Valid values are an
298
+ integer in the range 0-254. Default is 150.
299
+
300
+ size_thresh
301
+ Lower threshold for size gating the cells in postprocessing. Segmented
302
+ cells must have more pixels than this value in order to be included in the
303
+ final cell segmentation. Valid values are 0, a positive integer, or 'auto'.
304
+ 'Auto' will try to automatically determine this lower bound for size gating
305
+ based on the distribution of detected cell sizes. Default is 'auto'.
306
+
307
+ size_thresh_upper
308
+ Upper threshold for size gating the cells in postprocessing. Segmented
309
+ cells must have less pixels that this value in order to be included in the
310
+ final cell segmentation. Valid values are a positive integer or 'none'.
311
+ 'None' will use no upper threshold in size gating. Default is 'none'.
312
+
313
+ marker_thresh
314
+ Threshold for the effect that the inferred marker image will have on the
315
+ postprocessing classification of cells as positive. If any corresponding
316
+ pixel in the marker image for a cell is above this threshold, the cell will
317
+ be classified as being positive regardless of the values from the inferred
318
+ segmentation image. Valid values are an integer in the range 0-255, 'none',
319
+ or 'auto'. 'None' will not use the marker image during classification.
320
+ 'Auto' will automatically determine a threshold from the marker image.
321
+ Default is 'auto'.
322
+ ```
323
+
324
+ For example, in Python:
325
+
326
+ ```python
327
+ import os
328
+ import json
329
+ import base64
330
+ from io import BytesIO
331
+
332
+ import requests
333
+ from PIL import Image
334
+
335
+ # Use the sample images from the main DeepLIIF repo
336
+ images_dir = './Sample_Large_Tissues'
337
+ filename = 'ROI_1.png'
338
+
339
+ root = os.path.splitext(filename)[0]
267
340
 
268
- img (required)
269
- file: image to run the models on
341
+ res = requests.post(
342
+ url='https://deepliif.org/api/infer',
343
+ files={
344
+ 'img': open(f'{images_dir}/{filename}', 'rb'),
345
+ },
346
+ params={
347
+ 'resolution': '40x',
348
+ },
349
+ )
350
+
351
+ data = res.json()
270
352
 
271
- resolution
272
- string: resolution used to scan the slide (10x, 20x, 40x), defaults to 40x
353
+ def b64_to_pil(b):
354
+ return Image.open(BytesIO(base64.b64decode(b.encode())))
355
+
356
+ for name, img in data['images'].items():
357
+ with open(f'{images_dir}/{root}_{name}.png', 'wb') as f:
358
+ b64_to_pil(img).save(f, format='PNG')
359
+
360
+ with open(f'{images_dir}/{root}_scoring.json', 'w') as f:
361
+ json.dump(data['scoring'], f, indent=2)
362
+ print(json.dumps(data['scoring'], indent=2))
363
+ ```
273
364
 
274
- pil
275
- boolean: if true, use PIL.Image.open() to load the image, instead of python-bioformats
365
+ If you have previously run DeepLIIF on an image and want to postprocess it with different thresholds, the postprocessing routine can be called directly using the previously inferred results:
276
366
 
277
- slim
278
- boolean: if true, return only the segmentation result image
367
+ ```
368
+ POST /api/postprocess
369
+
370
+ File Parameters:
371
+
372
+ img (required)
373
+ Image on which DeepLIIF was run.
374
+
375
+ seg_img (required)
376
+ Inferred segmentation image previously generated by DeepLIIF.
377
+
378
+ marker_img (optional)
379
+ Inferred marker image previously generated by DeepLIIF. If this is
380
+ omitted, then the marker image will not be used in classification.
381
+
382
+ Query String Parameters:
383
+
384
+ resolution
385
+ Resolution used to scan the slide (10x, 20x, 40x). Default is 40x.
386
+
387
+ pil
388
+ If present, use Pillow to load the original image instead of Bio-Formats.
389
+ Pillow is faster, but works only on common image types (png, jpeg, etc.).
390
+ Pillow is always used to open the seg_img and marker_img files.
391
+
392
+ prob_thresh
393
+ Probability threshold used in postprocessing the inferred segmentation map
394
+ image. The segmentation map value must be above this value in order for a
395
+ pixel to be included in the final cell segmentation. Valid values are an
396
+ integer in the range 0-254. Default is 150.
397
+
398
+ size_thresh
399
+ Lower threshold for size gating the cells in postprocessing. Segmented
400
+ cells must have more pixels than this value in order to be included in the
401
+ final cell segmentation. Valid values are 0, a positive integer, or 'auto'.
402
+ 'Auto' will try to automatically determine this lower bound for size gating
403
+ based on the distribution of detected cell sizes. Default is 'auto'.
404
+
405
+ size_thresh_upper
406
+ Upper threshold for size gating the cells in postprocessing. Segmented
407
+ cells must have less pixels that this value in order to be included in the
408
+ final cell segmentation. Valid values are a positive integer or 'none'.
409
+ 'None' will use no upper threshold in size gating. Default is 'none'.
410
+
411
+ marker_thresh
412
+ Threshold for the effect that the inferred marker image will have on the
413
+ postprocessing classification of cells as positive. If any corresponding
414
+ pixel in the marker image for a cell is above this threshold, the cell will
415
+ be classified as being positive regardless of the values from the inferred
416
+ segmentation image. Valid values are an integer in the range 0-255, 'none',
417
+ or 'auto'. 'None' will not use the marker image during classification.
418
+ 'Auto' will automatically determine a threshold from the marker image.
419
+ Default is 'auto'. (If marker_img is not supplied, this has no effect.)
279
420
  ```
280
421
 
281
422
  For example, in Python:
@@ -293,15 +434,20 @@ from PIL import Image
293
434
  images_dir = './Sample_Large_Tissues'
294
435
  filename = 'ROI_1.png'
295
436
 
437
+ root = os.path.splitext(filename)[0]
438
+
296
439
  res = requests.post(
297
440
  url='https://deepliif.org/api/infer',
298
441
  files={
299
- 'img': open(f'{images_dir}/{filename}', 'rb')
442
+ 'img': open(f'{images_dir}/{filename}', 'rb'),
443
+ 'seg_img': open(f'{images_dir}/{root}_Seg.png', 'rb'),
444
+ 'marker_img': open(f'{images_dir}/{root}_Marker.png', 'rb'),
300
445
  },
301
- # optional param that can be 10x, 20x, or 40x (default)
302
446
  params={
303
- 'resolution': '40x'
304
- }
447
+ 'resolution': '40x',
448
+ 'pil': True,
449
+ 'size_thresh': 250,
450
+ },
305
451
  )
306
452
 
307
453
  data = res.json()
@@ -310,10 +456,11 @@ def b64_to_pil(b):
310
456
  return Image.open(BytesIO(base64.b64decode(b.encode())))
311
457
 
312
458
  for name, img in data['images'].items():
313
- output_filepath = f'{images_dir}/{os.path.splitext(filename)[0]}_{name}.png'
314
- with open(output_filepath, 'wb') as f:
459
+ with open(f'{images_dir}/{root}_{name}.png', 'wb') as f:
315
460
  b64_to_pil(img).save(f, format='PNG')
316
461
 
462
+ with open(f'{images_dir}/{root}_scoring.json', 'w') as f:
463
+ json.dump(data['scoring'], f, indent=2)
317
464
  print(json.dumps(data['scoring'], indent=2))
318
465
  ```
319
466
 
@@ -414,4 +561,11 @@ If you find our work useful in your research or if you use parts of this code or
414
561
  year = {2023},
415
562
  doi = {https://doi.org/10.1111/his.15048}
416
563
  }
564
+
565
+ @article{zehra2024deepliifstitch,
566
+ author = {Zehra, Talat and Marino, Joseph and Wang, Wendy and Frantsuzov, Grigoriy and Nadeem, Saad},
567
+ title = {Rethinking Histology Slide Digitization Workflows for Low-Resource Settings},
568
+ journal = {International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
569
+ year = {2024}
570
+ }
417
571
  ```
@@ -1,9 +1,9 @@
1
- cli.py,sha256=elGu-9di_LcUXPsWWknWPevbaTv2r2rpORVPH5lsnAU,39723
1
+ cli.py,sha256=iU9YxO65T1rxX2Mx9f9LsEPC4o_ZXO-wH_-FUjIA1so,40088
2
2
  deepliif/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  deepliif/postprocessing.py,sha256=cM-cYVidY691Sjb1-B8a1jkLq5UR_hTCbuKzuF4765o,17589
4
4
  deepliif/train.py,sha256=-ZORL5vQrD0_Jq2Adgr3w8vJ7L1QcAgNTqMnBgtixgk,15757
5
5
  deepliif/data/__init__.py,sha256=euf9eUboK4RYR0jvdiyZDgPGozC1Nv7WRqRbTxSZD6A,5281
6
- deepliif/data/aligned_dataset.py,sha256=bAofVfgMwtb8Exe4EtQ3aP2ZYewBT4N_X8BDWSeFFj0,4405
6
+ deepliif/data/aligned_dataset.py,sha256=6qNFLXXW1phuIfNhCJSaLfOc-KN2kl7EuUdmyAPPU4I,5148
7
7
  deepliif/data/base_dataset.py,sha256=bQlxfY7bGSE9WPj31ZHkCxv5CAEJovjakGDCcK-aYdc,5564
8
8
  deepliif/data/colorization_dataset.py,sha256=uDYWciSxwqZkStQ_Vte27D9x5FNhv6eR9wSPn39K3RY,2808
9
9
  deepliif/data/image_folder.py,sha256=eesP9vn__YQ-dw1KJG9J-yVUHMmJjLcIEQI552Iv2vE,2006
@@ -12,23 +12,24 @@ deepliif/data/template_dataset.py,sha256=PCDBnFRzRKReaeWgKUZmW0LrzRByI9adrKDJ6SN
12
12
  deepliif/data/unaligned_dataset.py,sha256=m7j-CX-hkXbhg96NSEcaCagNVhTuXKkMsBADdMEJDBA,3393
13
13
  deepliif/models/DeepLIIFExt_model.py,sha256=Sc60rOfDJuoGrJ1CYe4beAg6as6F0o864AO6ZB7paBY,14527
14
14
  deepliif/models/DeepLIIF_model.py,sha256=ECZyM9jzoJAWSgB1ProBoarVuGcbScQMaSkRjSMgt0k,20872
15
- deepliif/models/__init__.py,sha256=E2udWyU4eScFnvDO2qtwMeHRz4ihw8hhCoP666QBK4o,22674
15
+ deepliif/models/SDG_model.py,sha256=xcZCTMNyJbcB78I1c8KtYVIB6OWL7WSMKdCxNemIzxs,9074
16
+ deepliif/models/__init__.py,sha256=LWEyM7YwSoQ1TgRnsFk8O96xTf-QIxw5o_RexnECl_Q,28049
16
17
  deepliif/models/base_model.py,sha256=HKcUOBHtL-zLs5ZcmeXT-ZV_ubqsSUo4wMCQ0W27YHU,15583
17
18
  deepliif/models/networks.py,sha256=bN4yjRdE413efUESq8pvhzPDgFCTwFKXyQOrRqHckWY,32177
18
- deepliif/options/__init__.py,sha256=nm231wh_hQw1mX0AfXZu7-cx6WZdxURsE-jvnjlHQzE,4581
19
+ deepliif/options/__init__.py,sha256=-syiyTK_oAeTLCBDm0bz1f_1jI3VK3LCwo2UNwOz6eM,5949
19
20
  deepliif/options/base_options.py,sha256=m5UXY8MvjNcDisUWuiP228yoT27SsCh1bXS_Td6SwTc,9852
20
21
  deepliif/options/processing_options.py,sha256=OnNT-ytoTQzetFiMEKrWvrsrhZlupRK4smcnIk0MbqY,2947
21
22
  deepliif/options/test_options.py,sha256=4ZbQC5U-nTbUz8jvdDIbse5TK_mjw4D5yNjpVevWD5M,1114
22
23
  deepliif/options/train_options.py,sha256=5eA_oxpRj2-HiuMMvC5-HLapxNFG_JXOQ3K132JjpR8,3580
23
- deepliif/util/__init__.py,sha256=dPkYGAy8s8JL7srZIkIhDuKdpQwVyf2Nsy5ABWlLFtg,16924
24
+ deepliif/util/__init__.py,sha256=bIa1Y1YQynvAoXux8ENAk_8Ykpfu9xxGByg-pgfGkK0,28090
24
25
  deepliif/util/get_data.py,sha256=HaRoQYb2u0LUgLT7ES-w35AmJ4BrlBEJWU4Cok29pxI,3749
25
26
  deepliif/util/html.py,sha256=RNAONZ4opP-bViahgmpSbHwOc6jXKQRnWRAVIaeIvac,3309
26
27
  deepliif/util/image_pool.py,sha256=M89Hc7DblRWroNP71S9mAdRn7h3DrhPFPjqFxxZYSgw,2280
27
- deepliif/util/util.py,sha256=bTArzuhIMGgGweH0v5rkiHrqBxc24BDv12rssOE9OoI,4636
28
+ deepliif/util/util.py,sha256=9MNgqthJZYjl5-TJm5-sjWvMfPBz8F4P5K0RHXRQhfY,5241
28
29
  deepliif/util/visualizer.py,sha256=5V1lWidHqssJX21jn1P5-bOVgtrEXKVaQgnMWAsMfqg,15636
29
- deepliif-1.1.9.dist-info/LICENSE.md,sha256=HlZw_UPS6EtJimJ_Ci7xKh-S5Iubs0Z8y8E6EZ3ZNyE,956
30
- deepliif-1.1.9.dist-info/METADATA,sha256=CBgGs3wxeg1Hewt7lbAFUEH4Sbm5jV6jt4F3q59gvzM,25076
31
- deepliif-1.1.9.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
32
- deepliif-1.1.9.dist-info/entry_points.txt,sha256=f70-10j2q68o_rDlsE3hspnv4ejlDnXwwGZ9JJ-3yF4,37
33
- deepliif-1.1.9.dist-info/top_level.txt,sha256=vLDK5YKmDz08E7PywuvEjAo7dM5rnIpsjR4c0ubQCnc,13
34
- deepliif-1.1.9.dist-info/RECORD,,
30
+ deepliif-1.1.11.dist-info/LICENSE.md,sha256=HlZw_UPS6EtJimJ_Ci7xKh-S5Iubs0Z8y8E6EZ3ZNyE,956
31
+ deepliif-1.1.11.dist-info/METADATA,sha256=3UtsAvCQz1osg0cBCnnLZVT1pvPdRxFbsaJ6B_yn_SI,31598
32
+ deepliif-1.1.11.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
33
+ deepliif-1.1.11.dist-info/entry_points.txt,sha256=f70-10j2q68o_rDlsE3hspnv4ejlDnXwwGZ9JJ-3yF4,37
34
+ deepliif-1.1.11.dist-info/top_level.txt,sha256=vLDK5YKmDz08E7PywuvEjAo7dM5rnIpsjR4c0ubQCnc,13
35
+ deepliif-1.1.11.dist-info/RECORD,,