deepliif 1.1.11__py3-none-any.whl → 1.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cli.py +354 -67
- deepliif/data/__init__.py +7 -7
- deepliif/data/aligned_dataset.py +2 -3
- deepliif/data/unaligned_dataset.py +38 -19
- deepliif/models/CycleGAN_model.py +282 -0
- deepliif/models/DeepLIIFExt_model.py +47 -25
- deepliif/models/DeepLIIF_model.py +69 -19
- deepliif/models/SDG_model.py +57 -26
- deepliif/models/__init__ - run_dask_multi dev.py +943 -0
- deepliif/models/__init__ - timings.py +764 -0
- deepliif/models/__init__.py +328 -265
- deepliif/models/att_unet.py +199 -0
- deepliif/models/base_model.py +32 -8
- deepliif/models/networks.py +108 -34
- deepliif/options/__init__.py +49 -5
- deepliif/postprocessing.py +1034 -227
- deepliif/postprocessing__OLD__DELETE.py +440 -0
- deepliif/util/__init__.py +85 -64
- deepliif/util/visualizer.py +106 -19
- {deepliif-1.1.11.dist-info → deepliif-1.1.12.dist-info}/METADATA +75 -23
- deepliif-1.1.12.dist-info/RECORD +40 -0
- deepliif-1.1.11.dist-info/RECORD +0 -35
- {deepliif-1.1.11.dist-info → deepliif-1.1.12.dist-info}/LICENSE.md +0 -0
- {deepliif-1.1.11.dist-info → deepliif-1.1.12.dist-info}/WHEEL +0 -0
- {deepliif-1.1.11.dist-info → deepliif-1.1.12.dist-info}/entry_points.txt +0 -0
- {deepliif-1.1.11.dist-info → deepliif-1.1.12.dist-info}/top_level.txt +0 -0
deepliif/util/visualizer.py
CHANGED
|
@@ -165,6 +165,7 @@ class Visualizer():
|
|
|
165
165
|
if ncols > 0: # show all the images in one visdom panel
|
|
166
166
|
ncols = min(ncols, len(visuals))
|
|
167
167
|
h, w = next(iter(visuals.values())).shape[:2]
|
|
168
|
+
|
|
168
169
|
table_css = """<style>
|
|
169
170
|
table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}
|
|
170
171
|
table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}
|
|
@@ -176,13 +177,16 @@ class Visualizer():
|
|
|
176
177
|
images = []
|
|
177
178
|
idx = 0
|
|
178
179
|
for label, image in visuals.items():
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
180
|
+
if image.shape[1] != 3:
|
|
181
|
+
pass
|
|
182
|
+
else:
|
|
183
|
+
image_numpy = util.tensor2im(image)
|
|
184
|
+
label_html_row += '<td>%s</td>' % label
|
|
185
|
+
images.append(image_numpy.transpose([2, 0, 1]))
|
|
186
|
+
idx += 1
|
|
187
|
+
if idx % ncols == 0:
|
|
188
|
+
label_html += '<tr>%s</tr>' % label_html_row
|
|
189
|
+
label_html_row = ''
|
|
186
190
|
|
|
187
191
|
white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
|
|
188
192
|
while idx % ncols != 0:
|
|
@@ -191,6 +195,7 @@ class Visualizer():
|
|
|
191
195
|
idx += 1
|
|
192
196
|
if label_html_row != '':
|
|
193
197
|
label_html += '<tr>%s</tr>' % label_html_row
|
|
198
|
+
|
|
194
199
|
|
|
195
200
|
try:
|
|
196
201
|
self.vis.images(images, nrow=ncols, win=self.display_id + 1,
|
|
@@ -248,6 +253,10 @@ class Visualizer():
|
|
|
248
253
|
# if having 2 processes, each process obtains 50% of the data (effective dataset_size divided by half), the effective counter ratio shall multiply by 2 to compensate that
|
|
249
254
|
n_proc = int(os.getenv('WORLD_SIZE',1))
|
|
250
255
|
counter_ratio = counter_ratio * n_proc
|
|
256
|
+
|
|
257
|
+
self.plot_data_update_train = False
|
|
258
|
+
self.plot_data_update_val = False
|
|
259
|
+
self.plot_data_update_metrics = False
|
|
251
260
|
|
|
252
261
|
if self.remote:
|
|
253
262
|
fn = 'plot_current_losses.pickle'
|
|
@@ -263,20 +272,98 @@ class Visualizer():
|
|
|
263
272
|
exec(f'{self.remote_transfer_cmd_function}("{path_source}")')
|
|
264
273
|
else:
|
|
265
274
|
if not hasattr(self, 'plot_data'):
|
|
266
|
-
self.plot_data = {'X': [], '
|
|
267
|
-
|
|
268
|
-
|
|
275
|
+
self.plot_data = {'X': [], 'X_val':[], 'X_metrics':[],
|
|
276
|
+
'Y': [], 'Y_val':[], 'Y_metrics':[],
|
|
277
|
+
'legend': [], 'legend_val': [], 'legend_metrics':[]}
|
|
278
|
+
for k in list(losses.keys()):
|
|
279
|
+
if k.endswith('_val'):
|
|
280
|
+
self.plot_data['legend_val'].append(k)
|
|
281
|
+
elif k.startswith(('G_','D_')):
|
|
282
|
+
self.plot_data['legend'].append(k)
|
|
283
|
+
else:
|
|
284
|
+
self.plot_data['legend_metrics'].append(k)
|
|
285
|
+
|
|
286
|
+
# check if all names in losses dict have been seen
|
|
287
|
+
# currently we assume the three types of metrics (train loss, val loss, other metrics) can come into the losses dict
|
|
288
|
+
# at any step, but each type will join or leave the dict as a whole (i.e., train loss metrics will either all appear or all be missing)
|
|
289
|
+
for k in list(losses.keys()):
|
|
290
|
+
if k.endswith('_val'):
|
|
291
|
+
if k not in self.plot_data['legend_val']:
|
|
292
|
+
self.plot_data['legend_val'].append(k)
|
|
293
|
+
elif k.startswith(('G_','D_')):
|
|
294
|
+
if k not in self.plot_data['legend']:
|
|
295
|
+
self.plot_data['legend'].append(k)
|
|
296
|
+
else:
|
|
297
|
+
if k not in self.plot_data['legend_metrics']:
|
|
298
|
+
self.plot_data['legend_metrics'].append(k)
|
|
299
|
+
|
|
300
|
+
# update training loss
|
|
301
|
+
print('update training loss')
|
|
302
|
+
if len(self.plot_data['legend']) > 0:
|
|
303
|
+
if self.plot_data['legend'][0] in losses:
|
|
304
|
+
self.plot_data['X'].append(epoch + counter_ratio)
|
|
305
|
+
self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])
|
|
306
|
+
self.plot_data_update_train = True
|
|
307
|
+
|
|
308
|
+
# update validation loss
|
|
309
|
+
print('update validation loss')
|
|
310
|
+
if len(self.plot_data['legend_val']) > 0:
|
|
311
|
+
if self.plot_data['legend_val'][0] in losses:
|
|
312
|
+
self.plot_data['X_val'].append(epoch + counter_ratio)
|
|
313
|
+
self.plot_data['Y_val'].append([losses[k] for k in self.plot_data['legend_val']])
|
|
314
|
+
self.plot_data_update_val = True
|
|
315
|
+
|
|
316
|
+
# update other calculated metrics
|
|
317
|
+
print('update other metrics')
|
|
318
|
+
if len(self.plot_data['legend_metrics']) > 0:
|
|
319
|
+
if self.plot_data['legend_metrics'][0] in losses:
|
|
320
|
+
self.plot_data['X_metrics'].append(epoch + counter_ratio)
|
|
321
|
+
self.plot_data['Y_metrics'].append([losses[k] for k in self.plot_data['legend_metrics']])
|
|
322
|
+
self.plot_data_update_metrics = True
|
|
269
323
|
|
|
270
324
|
try:
|
|
271
|
-
self.
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
325
|
+
if self.plot_data_update_train:
|
|
326
|
+
print('plotting train loss')
|
|
327
|
+
self.vis.line(
|
|
328
|
+
X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
|
|
329
|
+
Y=np.array(self.plot_data['Y']),
|
|
330
|
+
opts={
|
|
331
|
+
'title': self.name + ' train loss over time',
|
|
332
|
+
'legend': self.plot_data['legend'],
|
|
333
|
+
'xlabel': 'epoch',
|
|
334
|
+
'ylabel': 'loss'},
|
|
335
|
+
win = 'train',
|
|
336
|
+
#env=self.display_id
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
if self.plot_data_update_val:
|
|
340
|
+
print('plotting val loss')
|
|
341
|
+
self.vis.line(
|
|
342
|
+
X=np.stack([np.array(self.plot_data['X_val'])] * len(self.plot_data['legend_val']), 1),
|
|
343
|
+
Y=np.array(self.plot_data['Y_val']),
|
|
344
|
+
opts={
|
|
345
|
+
'title': self.name + ' val loss over time',
|
|
346
|
+
'legend': self.plot_data['legend_val'],
|
|
347
|
+
'xlabel': 'epoch',
|
|
348
|
+
'ylabel': 'loss'},
|
|
349
|
+
win = 'val',
|
|
350
|
+
#env=self.display_id
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
if self.plot_data_update_metrics:
|
|
354
|
+
print('plotting other metrics')
|
|
355
|
+
self.vis.line(
|
|
356
|
+
X=np.stack([np.array(self.plot_data['X_metrics'])] * len(self.plot_data['legend_metrics']), 1),
|
|
357
|
+
Y=np.array(self.plot_data['Y_metrics']),
|
|
358
|
+
opts={
|
|
359
|
+
'title': self.name + ' metrics over time',
|
|
360
|
+
'legend': self.plot_data['legend_metrics'],
|
|
361
|
+
'xlabel': 'epoch',
|
|
362
|
+
'ylabel': 'metrics'},
|
|
363
|
+
win = 'metrics',
|
|
364
|
+
#env=self.display_id
|
|
365
|
+
)
|
|
366
|
+
|
|
280
367
|
except VisdomExceptionBase:
|
|
281
368
|
self.create_visdom_connections()
|
|
282
369
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: deepliif
|
|
3
|
-
Version: 1.1.
|
|
3
|
+
Version: 1.1.12
|
|
4
4
|
Summary: DeepLIIF: Deep-Learning Inferred Multiplex Immunofluorescence for Immunohistochemical Image Quantification
|
|
5
5
|
Home-page: https://github.com/nadeemlab/DeepLIIF
|
|
6
6
|
Author: Parmida93
|
|
@@ -8,16 +8,18 @@ Author-email: ghahremani.parmida@gmail.com
|
|
|
8
8
|
Keywords: DeepLIIF,IHC,Segmentation,Classification
|
|
9
9
|
Description-Content-Type: text/markdown
|
|
10
10
|
License-File: LICENSE.md
|
|
11
|
-
Requires-Dist: opencv-python (==4.
|
|
11
|
+
Requires-Dist: opencv-python (==4.8.1.78)
|
|
12
12
|
Requires-Dist: torchvision (==0.10.0)
|
|
13
13
|
Requires-Dist: scikit-image (==0.18.3)
|
|
14
14
|
Requires-Dist: dominate (==2.6.0)
|
|
15
|
-
Requires-Dist: numba (==0.
|
|
15
|
+
Requires-Dist: numba (==0.57.1)
|
|
16
16
|
Requires-Dist: Click (==8.0.3)
|
|
17
|
-
Requires-Dist: requests (==2.
|
|
17
|
+
Requires-Dist: requests (==2.32.2)
|
|
18
18
|
Requires-Dist: dask (==2021.11.2)
|
|
19
19
|
Requires-Dist: visdom (>=0.1.8.3)
|
|
20
20
|
Requires-Dist: python-bioformats (>=4.0.6)
|
|
21
|
+
Requires-Dist: openslide-bin (==4.0.0.6)
|
|
22
|
+
Requires-Dist: openslide-python (==1.4.1)
|
|
21
23
|
|
|
22
24
|
|
|
23
25
|
<!-- PROJECT LOGO -->
|
|
@@ -63,7 +65,7 @@ segmentation.*
|
|
|
63
65
|
|
|
64
66
|
© This code is made available for non-commercial academic purposes.
|
|
65
67
|
|
|
66
|
-

|
|
67
69
|
[](https://pepy.tech/project/deepliif?&left_text=totalusers)
|
|
68
70
|
|
|
69
71
|
*Overview of DeepLIIF pipeline and sample input IHCs (different
|
|
@@ -111,6 +113,16 @@ Commands:
|
|
|
111
113
|
train General-purpose training script for multi-task...
|
|
112
114
|
```
|
|
113
115
|
|
|
116
|
+
**Note:** You might need to install a version of PyTorch that is compatible with your CUDA version.
|
|
117
|
+
Otherwise, only the CPU will be used.
|
|
118
|
+
Visit the [PyTorch website](https://pytorch.org/) for details.
|
|
119
|
+
You can confirm if your installation will run on the GPU by checking if the following returns `True`:
|
|
120
|
+
|
|
121
|
+
```
|
|
122
|
+
import torch
|
|
123
|
+
torch.cuda.is_available()
|
|
124
|
+
```
|
|
125
|
+
|
|
114
126
|
## Training Dataset
|
|
115
127
|
For training, all image sets must be 512x512 and combined together in 3072x512 images (six images of size 512x512 stitched
|
|
116
128
|
together horizontally).
|
|
@@ -208,26 +220,49 @@ python test.py --dataroot /path/to/input/images
|
|
|
208
220
|
* Before running test on images, the model files must be serialized as described above.
|
|
209
221
|
* The serialized model files are expected to be located in `DeepLIIF/model-server/DeepLIIF_Latest_Model`.
|
|
210
222
|
* The test results will be saved to the specified output directory, which defaults to the input directory.
|
|
211
|
-
* The
|
|
223
|
+
* The tile size must be specified and is used to split the image into tiles for processing. The tile size is based on the resolution (scan magnification) of the input image, and the recommended values are a tile size of 512 for 40x images, 256 for 20x, and 128 for 10x. Note that the smaller the tile size, the longer inference will take.
|
|
212
224
|
* Testing datasets can be downloaded [here](https://zenodo.org/record/4751737#.YKRTS0NKhH4).
|
|
213
225
|
|
|
226
|
+
**Test Command Options:**
|
|
227
|
+
In addition to the required parameters given above, the following optional parameters are available for `deepliif test`:
|
|
228
|
+
* `--eager-mode` Run the original model files (instead of serialized model files).
|
|
229
|
+
* `--seg-intermediate` Save the intermediate segmentation maps created for each modality.
|
|
230
|
+
* `--seg-only` Save only the segmentation files, and do not infer images that are not needed.
|
|
231
|
+
* `--color-dapi` Color the inferred DAPI image.
|
|
232
|
+
* `--color-marker` Color the inferred marker image.
|
|
233
|
+
|
|
214
234
|
**Whole Slide Image (WSI) Inference:**
|
|
215
235
|
For translation and segmentation of whole slide images,
|
|
216
|
-
you can simply use the
|
|
217
|
-
giving path to the directory containing your
|
|
236
|
+
you can simply use the `test-wsi` command
|
|
237
|
+
giving path to the directory containing your WSI as the input-dir
|
|
238
|
+
and specifying the filename of the WSI.
|
|
218
239
|
DeepLIIF automatically reads the WSI region by region,
|
|
219
240
|
and translate and segment each region separately and stitches the regions
|
|
220
241
|
to create the translation and segmentation for whole slide image,
|
|
221
242
|
then saves all masks in the format of ome.tiff in the given output-dir.
|
|
222
|
-
Based on the available
|
|
243
|
+
Based on the available resources, the region-size can be changed.
|
|
223
244
|
```
|
|
224
|
-
deepliif test --input-dir /path/to/input/
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
245
|
+
deepliif test-wsi --input-dir /path/to/input/image
|
|
246
|
+
--filename wsiFile.svs
|
|
247
|
+
--output-dir /path/to/output/images
|
|
248
|
+
--model-dir /path/to/the/serialized/model
|
|
249
|
+
--tile-size 512
|
|
229
250
|
```
|
|
230
251
|
|
|
252
|
+
**WSI Inference Options:**
|
|
253
|
+
In addition to the required parameters given above, the following optional parameters are available for `deepliif test-wsi`:
|
|
254
|
+
* `--region-size` Set the size of each region to read from the WSI (default is 20000).
|
|
255
|
+
* `--seg-intermediate` Save the intermediate segmentation maps created for each modality.
|
|
256
|
+
* `--seg-only` Save only the segmentation files, and do not infer images that are not needed.
|
|
257
|
+
* `--color-dapi` Color the inferred DAPI image.
|
|
258
|
+
* `--color-marker` Color the inferred marker image.
|
|
259
|
+
|
|
260
|
+
**Reducing Run Time**
|
|
261
|
+
If you need only the final segmentation and not the inferred multiplex images,
|
|
262
|
+
it is recommended to run `deepliif test` or `deepliif test-wsi` with the `--seg-only`
|
|
263
|
+
option. This will generate only the necessary images, thus reducing the overall run time.
|
|
264
|
+
|
|
265
|
+
**Torchserve**
|
|
231
266
|
If you prefer, it is possible to run the models using Torchserve.
|
|
232
267
|
Please see [the documentation](https://nadeemlab.github.io/DeepLIIF/deployment/#deploying-deepliif-with-torchserve)
|
|
233
268
|
on how to deploy the model with Torchserve and for an example of how to run the inference.
|
|
@@ -264,9 +299,16 @@ If you don't have access to GPU or appropriate hardware and don't want to instal
|
|
|
264
299
|
|
|
265
300
|

|
|
266
301
|
|
|
302
|
+
Our deployment at [deepliif.org](https://deepliif.org) also provides virtual slide digitization to generate a single stitched image from a 10x video acquired with a microscope and camera. The video should be captured with the following guidelines to achieve the best results:
|
|
303
|
+
* Brief but complete pauses at every section of the sample to avoid motion artifacts.
|
|
304
|
+
* Significant overlap between pauses so that there is sufficient context for stitching frames together.
|
|
305
|
+
* Methodical and consistent movement over the sample. For example, start at the top left corner, then go all the way to the right, then down one step, then all the way to the left, down one step, etc., until the end of the sample is reached. Again, brief overlapping pauses throughout will allow the best quality images to be generated.
|
|
306
|
+
|
|
307
|
+

|
|
308
|
+
|
|
267
309
|
## Cloud API Endpoints
|
|
268
310
|
|
|
269
|
-
DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
|
|
311
|
+
For small images, DeepLIIF can also be accessed programmatically through an endpoint by posting a multipart-encoded request containing the original image file, along with optional parameters including postprocessing thresholds:
|
|
270
312
|
|
|
271
313
|
```
|
|
272
314
|
POST /api/infer
|
|
@@ -362,6 +404,8 @@ with open(f'{images_dir}/{root}_scoring.json', 'w') as f:
|
|
|
362
404
|
print(json.dumps(data['scoring'], indent=2))
|
|
363
405
|
```
|
|
364
406
|
|
|
407
|
+
Note that since this is a single request to send the image and receive the results, processing must complete within the timeout period (typically about one minute). If your request is receiving a 504 status code, please try a smaller image or install the `deepliif` package as detailed above to run the process locally.
|
|
408
|
+
|
|
365
409
|
If you have previously run DeepLIIF on an image and want to postprocess it with different thresholds, the postprocessing routine can be called directly using the previously inferred results:
|
|
366
410
|
|
|
367
411
|
```
|
|
@@ -514,16 +558,17 @@ DeepLIIF model and release back to the community with full credit to the contrib
|
|
|
514
558
|
- [x] **Moffitt Cancer Center** [AI-ready multiplex immunofluorescence and multiplex immunohistochemistry dataset](https://wiki.cancerimagingarchive.net/pages/viewpage.action?pageId=70226184) for head-and-neck squamous cell carcinoma (**MICCAI'23**)
|
|
515
559
|
|
|
516
560
|
## Support
|
|
517
|
-
Please use the [
|
|
518
|
-
|
|
519
|
-
Bugs can be reported in the [GitHub Issues](https://github.com/nadeemlab/DeepLIIF/issues) tab.
|
|
561
|
+
Please use the [GitHub Issues](https://github.com/nadeemlab/DeepLIIF/issues) tab for discussion, questions, or to report bugs related to DeepLIIF.
|
|
520
562
|
|
|
521
563
|
## License
|
|
522
564
|
© [Nadeem Lab](https://nadeemlab.org/) - DeepLIIF code is distributed under **Apache 2.0 with Commons Clause** license,
|
|
523
565
|
and is available for non-commercial academic purposes.
|
|
524
566
|
|
|
525
567
|
## Acknowledgments
|
|
526
|
-
|
|
568
|
+
This code is inspired by [CycleGAN and pix2pix in PyTorch](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix).
|
|
569
|
+
|
|
570
|
+
## Funding
|
|
571
|
+
This work is funded by the 7-year NIH/NCI R37 MERIT Award ([R37CA295658](https://reporter.nih.gov/search/5dgSOlHosEKepkZEAS5_kQ/project-details/11018883#description)).
|
|
527
572
|
|
|
528
573
|
## Reference
|
|
529
574
|
If you find our work useful in your research or if you use parts of this code or our released dataset, please cite the following papers:
|
|
@@ -551,6 +596,8 @@ If you find our work useful in your research or if you use parts of this code or
|
|
|
551
596
|
title={An AI-Ready Multiplex Staining Dataset for Reproducible and Accurate Characterization of Tumor Immune Microenvironment},
|
|
552
597
|
author={Ghahremani, Parmida and Marino, Joseph and Hernandez-Prera, Juan and V. de la Iglesia, Janis and JC Slebos, Robbert and H. Chung, Christine and Nadeem, Saad},
|
|
553
598
|
journal={International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
|
|
599
|
+
volume={14225},
|
|
600
|
+
pages={704--713},
|
|
554
601
|
year={2023}
|
|
555
602
|
}
|
|
556
603
|
|
|
@@ -558,14 +605,19 @@ If you find our work useful in your research or if you use parts of this code or
|
|
|
558
605
|
author = {Nadeem, Saad and Hanna, Matthew G and Viswanathan, Kartik and Marino, Joseph and Ahadi, Mahsa and Alzumaili, Bayan and Bani, Mohamed-Amine and Chiarucci, Federico and Chou, Angela and De Leo, Antonio and Fuchs, Talia L and Lubin, Daniel J and Luxford, Catherine and Magliocca, Kelly and Martinez, Germán and Shi, Qiuying and Sidhu, Stan and Al Ghuzlan, Abir and Gill, Anthony J and Tallini, Giovanni and Ghossein, Ronald and Xu, Bin},
|
|
559
606
|
title = {Ki67 proliferation index in medullary thyroid carcinoma: a comparative study of multiple counting methods and validation of image analysis and deep learning platforms},
|
|
560
607
|
journal = {Histopathology},
|
|
608
|
+
volume = {83},
|
|
609
|
+
number = {6},
|
|
610
|
+
pages = {981--988},
|
|
561
611
|
year = {2023},
|
|
562
612
|
doi = {https://doi.org/10.1111/his.15048}
|
|
563
613
|
}
|
|
564
614
|
|
|
565
615
|
@article{zehra2024deepliifstitch,
|
|
566
|
-
author = {Zehra, Talat and Marino, Joseph and Wang, Wendy and Frantsuzov, Grigoriy and Nadeem, Saad},
|
|
567
|
-
title = {Rethinking Histology Slide Digitization Workflows for Low-Resource Settings},
|
|
568
|
-
journal = {International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
|
|
569
|
-
|
|
616
|
+
author = {Zehra, Talat and Marino, Joseph and Wang, Wendy and Frantsuzov, Grigoriy and Nadeem, Saad},
|
|
617
|
+
title = {Rethinking Histology Slide Digitization Workflows for Low-Resource Settings},
|
|
618
|
+
journal = {International Conference on Medical Image Computing and Computer-Assisted Intervention (MICCAI)},
|
|
619
|
+
volume = {15004},
|
|
620
|
+
pages = {427--436},
|
|
621
|
+
year = {2024}
|
|
570
622
|
}
|
|
571
623
|
```
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
cli.py,sha256=IQIO_V9ubmeCOAniW9A5c8r9ETs7ehz4eJp_hrpuKo8,59625
|
|
2
|
+
deepliif/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
+
deepliif/postprocessing.py,sha256=naq4Lt7WHg6wfOhksTASiCmZAx2P_wZSqozCkKvXNV0,40686
|
|
4
|
+
deepliif/postprocessing__OLD__DELETE.py,sha256=cM-cYVidY691Sjb1-B8a1jkLq5UR_hTCbuKzuF4765o,17589
|
|
5
|
+
deepliif/train.py,sha256=-ZORL5vQrD0_Jq2Adgr3w8vJ7L1QcAgNTqMnBgtixgk,15757
|
|
6
|
+
deepliif/data/__init__.py,sha256=IfqVFnFSPQJZnORdRq4sNkJiylr1TaKNmhvWP_aLHdg,5492
|
|
7
|
+
deepliif/data/aligned_dataset.py,sha256=Tuvll1dpnNAgwReeZ6NleKLQP__yhKxZRWcvb3IOSGY,5145
|
|
8
|
+
deepliif/data/base_dataset.py,sha256=bQlxfY7bGSE9WPj31ZHkCxv5CAEJovjakGDCcK-aYdc,5564
|
|
9
|
+
deepliif/data/colorization_dataset.py,sha256=uDYWciSxwqZkStQ_Vte27D9x5FNhv6eR9wSPn39K3RY,2808
|
|
10
|
+
deepliif/data/image_folder.py,sha256=eesP9vn__YQ-dw1KJG9J-yVUHMmJjLcIEQI552Iv2vE,2006
|
|
11
|
+
deepliif/data/single_dataset.py,sha256=hWjqTkRESEMppZj_r8bi3G0hAZ5EfvXYgE_qRbpiEz4,1553
|
|
12
|
+
deepliif/data/template_dataset.py,sha256=PCDBnFRzRKReaeWgKUZmW0LrzRByI9adrKDJ6SN2KMs,3592
|
|
13
|
+
deepliif/data/unaligned_dataset.py,sha256=D69SxV087jKTd990UQIR4F3TahJHiSiw8i9Uz_xybt0,4697
|
|
14
|
+
deepliif/models/CycleGAN_model.py,sha256=WDEa-Zgz57mVc9HbcVDXL5vfHvUDWdWXNLyz8ReH3rg,15196
|
|
15
|
+
deepliif/models/DeepLIIFExt_model.py,sha256=HZaX9Z2ue0HQCFFN3guLkBcByCP70i8JvmPY02oOMyU,15022
|
|
16
|
+
deepliif/models/DeepLIIF_model.py,sha256=6vmsXcBcoALrhJLa7XGhDmLamO_WCzTDYEyVUBE482o,23857
|
|
17
|
+
deepliif/models/SDG_model.py,sha256=3opz7uEyhvVJ8fF4_Jw4ho1MBcc9OVye-ByZD_KF2j0,10142
|
|
18
|
+
deepliif/models/__init__ - run_dask_multi dev.py,sha256=vt8X8qeiJr2aPhFi6muZEJLUSsr8XChfI45NSwL8Rfg,39449
|
|
19
|
+
deepliif/models/__init__ - timings.py,sha256=S_wFImwxzGKx8STqbpcYCPOlbb_84WLMRDSnaWC8qFg,31750
|
|
20
|
+
deepliif/models/__init__.py,sha256=-R9Em7TVGl36nKzlEI894T0WEONGVMV60aTucqLab5k,30846
|
|
21
|
+
deepliif/models/att_unet.py,sha256=tqaFMNbGQUjXObOG309P76c7sIPxEvFR38EyuyHY40o,7116
|
|
22
|
+
deepliif/models/base_model.py,sha256=ezWkmbuuNLGDMjyXe3VzJroj7QR1h0M9ByouzpfCrQg,16843
|
|
23
|
+
deepliif/models/networks.py,sha256=Ijeb7nGf-YFgc_sBR-sIsk--0rTeiUqKZd01k4DMsuM,36614
|
|
24
|
+
deepliif/options/__init__.py,sha256=p2IWckf3-K-wclDWfSq5ZmynKk2lNov2Tn7WPYIO11A,8329
|
|
25
|
+
deepliif/options/base_options.py,sha256=m5UXY8MvjNcDisUWuiP228yoT27SsCh1bXS_Td6SwTc,9852
|
|
26
|
+
deepliif/options/processing_options.py,sha256=OnNT-ytoTQzetFiMEKrWvrsrhZlupRK4smcnIk0MbqY,2947
|
|
27
|
+
deepliif/options/test_options.py,sha256=4ZbQC5U-nTbUz8jvdDIbse5TK_mjw4D5yNjpVevWD5M,1114
|
|
28
|
+
deepliif/options/train_options.py,sha256=5eA_oxpRj2-HiuMMvC5-HLapxNFG_JXOQ3K132JjpR8,3580
|
|
29
|
+
deepliif/util/__init__.py,sha256=-3t8kNolblI33XwpMaRbOPwVkKg1jjtVE8s0DvA2DNs,29145
|
|
30
|
+
deepliif/util/get_data.py,sha256=HaRoQYb2u0LUgLT7ES-w35AmJ4BrlBEJWU4Cok29pxI,3749
|
|
31
|
+
deepliif/util/html.py,sha256=RNAONZ4opP-bViahgmpSbHwOc6jXKQRnWRAVIaeIvac,3309
|
|
32
|
+
deepliif/util/image_pool.py,sha256=M89Hc7DblRWroNP71S9mAdRn7h3DrhPFPjqFxxZYSgw,2280
|
|
33
|
+
deepliif/util/util.py,sha256=9MNgqthJZYjl5-TJm5-sjWvMfPBz8F4P5K0RHXRQhfY,5241
|
|
34
|
+
deepliif/util/visualizer.py,sha256=6E1sPbXdgLFB9mnPwtfEjm9O40viG4dfv5MyTpOQQpo,20210
|
|
35
|
+
deepliif-1.1.12.dist-info/LICENSE.md,sha256=HlZw_UPS6EtJimJ_Ci7xKh-S5Iubs0Z8y8E6EZ3ZNyE,956
|
|
36
|
+
deepliif-1.1.12.dist-info/METADATA,sha256=VX20mfKaSJkgH_OdGJOCbKSp3iahnE3_dnn_MUAMR5Y,35247
|
|
37
|
+
deepliif-1.1.12.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
|
38
|
+
deepliif-1.1.12.dist-info/entry_points.txt,sha256=f70-10j2q68o_rDlsE3hspnv4ejlDnXwwGZ9JJ-3yF4,37
|
|
39
|
+
deepliif-1.1.12.dist-info/top_level.txt,sha256=vLDK5YKmDz08E7PywuvEjAo7dM5rnIpsjR4c0ubQCnc,13
|
|
40
|
+
deepliif-1.1.12.dist-info/RECORD,,
|
deepliif-1.1.11.dist-info/RECORD
DELETED
|
@@ -1,35 +0,0 @@
|
|
|
1
|
-
cli.py,sha256=iU9YxO65T1rxX2Mx9f9LsEPC4o_ZXO-wH_-FUjIA1so,40088
|
|
2
|
-
deepliif/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
3
|
-
deepliif/postprocessing.py,sha256=cM-cYVidY691Sjb1-B8a1jkLq5UR_hTCbuKzuF4765o,17589
|
|
4
|
-
deepliif/train.py,sha256=-ZORL5vQrD0_Jq2Adgr3w8vJ7L1QcAgNTqMnBgtixgk,15757
|
|
5
|
-
deepliif/data/__init__.py,sha256=euf9eUboK4RYR0jvdiyZDgPGozC1Nv7WRqRbTxSZD6A,5281
|
|
6
|
-
deepliif/data/aligned_dataset.py,sha256=6qNFLXXW1phuIfNhCJSaLfOc-KN2kl7EuUdmyAPPU4I,5148
|
|
7
|
-
deepliif/data/base_dataset.py,sha256=bQlxfY7bGSE9WPj31ZHkCxv5CAEJovjakGDCcK-aYdc,5564
|
|
8
|
-
deepliif/data/colorization_dataset.py,sha256=uDYWciSxwqZkStQ_Vte27D9x5FNhv6eR9wSPn39K3RY,2808
|
|
9
|
-
deepliif/data/image_folder.py,sha256=eesP9vn__YQ-dw1KJG9J-yVUHMmJjLcIEQI552Iv2vE,2006
|
|
10
|
-
deepliif/data/single_dataset.py,sha256=hWjqTkRESEMppZj_r8bi3G0hAZ5EfvXYgE_qRbpiEz4,1553
|
|
11
|
-
deepliif/data/template_dataset.py,sha256=PCDBnFRzRKReaeWgKUZmW0LrzRByI9adrKDJ6SN2KMs,3592
|
|
12
|
-
deepliif/data/unaligned_dataset.py,sha256=m7j-CX-hkXbhg96NSEcaCagNVhTuXKkMsBADdMEJDBA,3393
|
|
13
|
-
deepliif/models/DeepLIIFExt_model.py,sha256=Sc60rOfDJuoGrJ1CYe4beAg6as6F0o864AO6ZB7paBY,14527
|
|
14
|
-
deepliif/models/DeepLIIF_model.py,sha256=ECZyM9jzoJAWSgB1ProBoarVuGcbScQMaSkRjSMgt0k,20872
|
|
15
|
-
deepliif/models/SDG_model.py,sha256=xcZCTMNyJbcB78I1c8KtYVIB6OWL7WSMKdCxNemIzxs,9074
|
|
16
|
-
deepliif/models/__init__.py,sha256=LWEyM7YwSoQ1TgRnsFk8O96xTf-QIxw5o_RexnECl_Q,28049
|
|
17
|
-
deepliif/models/base_model.py,sha256=HKcUOBHtL-zLs5ZcmeXT-ZV_ubqsSUo4wMCQ0W27YHU,15583
|
|
18
|
-
deepliif/models/networks.py,sha256=bN4yjRdE413efUESq8pvhzPDgFCTwFKXyQOrRqHckWY,32177
|
|
19
|
-
deepliif/options/__init__.py,sha256=-syiyTK_oAeTLCBDm0bz1f_1jI3VK3LCwo2UNwOz6eM,5949
|
|
20
|
-
deepliif/options/base_options.py,sha256=m5UXY8MvjNcDisUWuiP228yoT27SsCh1bXS_Td6SwTc,9852
|
|
21
|
-
deepliif/options/processing_options.py,sha256=OnNT-ytoTQzetFiMEKrWvrsrhZlupRK4smcnIk0MbqY,2947
|
|
22
|
-
deepliif/options/test_options.py,sha256=4ZbQC5U-nTbUz8jvdDIbse5TK_mjw4D5yNjpVevWD5M,1114
|
|
23
|
-
deepliif/options/train_options.py,sha256=5eA_oxpRj2-HiuMMvC5-HLapxNFG_JXOQ3K132JjpR8,3580
|
|
24
|
-
deepliif/util/__init__.py,sha256=bIa1Y1YQynvAoXux8ENAk_8Ykpfu9xxGByg-pgfGkK0,28090
|
|
25
|
-
deepliif/util/get_data.py,sha256=HaRoQYb2u0LUgLT7ES-w35AmJ4BrlBEJWU4Cok29pxI,3749
|
|
26
|
-
deepliif/util/html.py,sha256=RNAONZ4opP-bViahgmpSbHwOc6jXKQRnWRAVIaeIvac,3309
|
|
27
|
-
deepliif/util/image_pool.py,sha256=M89Hc7DblRWroNP71S9mAdRn7h3DrhPFPjqFxxZYSgw,2280
|
|
28
|
-
deepliif/util/util.py,sha256=9MNgqthJZYjl5-TJm5-sjWvMfPBz8F4P5K0RHXRQhfY,5241
|
|
29
|
-
deepliif/util/visualizer.py,sha256=5V1lWidHqssJX21jn1P5-bOVgtrEXKVaQgnMWAsMfqg,15636
|
|
30
|
-
deepliif-1.1.11.dist-info/LICENSE.md,sha256=HlZw_UPS6EtJimJ_Ci7xKh-S5Iubs0Z8y8E6EZ3ZNyE,956
|
|
31
|
-
deepliif-1.1.11.dist-info/METADATA,sha256=3UtsAvCQz1osg0cBCnnLZVT1pvPdRxFbsaJ6B_yn_SI,31598
|
|
32
|
-
deepliif-1.1.11.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
|
|
33
|
-
deepliif-1.1.11.dist-info/entry_points.txt,sha256=f70-10j2q68o_rDlsE3hspnv4ejlDnXwwGZ9JJ-3yF4,37
|
|
34
|
-
deepliif-1.1.11.dist-info/top_level.txt,sha256=vLDK5YKmDz08E7PywuvEjAo7dM5rnIpsjR4c0ubQCnc,13
|
|
35
|
-
deepliif-1.1.11.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|