spacr 0.4.60__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. spacr/__init__.py +2 -4
  2. spacr/__main__.py +3 -3
  3. spacr/core.py +13 -107
  4. spacr/gui.py +0 -1
  5. spacr/gui_core.py +2 -2
  6. spacr/gui_utils.py +5 -14
  7. spacr/io.py +189 -200
  8. spacr/mediar.py +12 -8
  9. spacr/plot.py +50 -13
  10. spacr/settings.py +71 -14
  11. spacr/submodules.py +21 -14
  12. spacr/timelapse.py +192 -6
  13. spacr/utils.py +180 -56
  14. {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/METADATA +64 -62
  15. {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/RECORD +20 -72
  16. {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/WHEEL +1 -1
  17. spacr/resources/MEDIAR/.gitignore +0 -18
  18. spacr/resources/MEDIAR/LICENSE +0 -21
  19. spacr/resources/MEDIAR/README.md +0 -189
  20. spacr/resources/MEDIAR/SetupDict.py +0 -39
  21. spacr/resources/MEDIAR/config/baseline.json +0 -60
  22. spacr/resources/MEDIAR/config/mediar_example.json +0 -72
  23. spacr/resources/MEDIAR/config/pred/pred_mediar.json +0 -17
  24. spacr/resources/MEDIAR/config/step1_pretraining/phase1.json +0 -55
  25. spacr/resources/MEDIAR/config/step1_pretraining/phase2.json +0 -58
  26. spacr/resources/MEDIAR/config/step2_finetuning/finetuning1.json +0 -66
  27. spacr/resources/MEDIAR/config/step2_finetuning/finetuning2.json +0 -66
  28. spacr/resources/MEDIAR/config/step3_prediction/base_prediction.json +0 -16
  29. spacr/resources/MEDIAR/config/step3_prediction/ensemble_tta.json +0 -23
  30. spacr/resources/MEDIAR/core/BasePredictor.py +0 -120
  31. spacr/resources/MEDIAR/core/BaseTrainer.py +0 -240
  32. spacr/resources/MEDIAR/core/Baseline/Predictor.py +0 -59
  33. spacr/resources/MEDIAR/core/Baseline/Trainer.py +0 -113
  34. spacr/resources/MEDIAR/core/Baseline/__init__.py +0 -2
  35. spacr/resources/MEDIAR/core/Baseline/utils.py +0 -80
  36. spacr/resources/MEDIAR/core/MEDIAR/EnsemblePredictor.py +0 -105
  37. spacr/resources/MEDIAR/core/MEDIAR/Predictor.py +0 -234
  38. spacr/resources/MEDIAR/core/MEDIAR/Trainer.py +0 -172
  39. spacr/resources/MEDIAR/core/MEDIAR/__init__.py +0 -3
  40. spacr/resources/MEDIAR/core/MEDIAR/utils.py +0 -429
  41. spacr/resources/MEDIAR/core/__init__.py +0 -2
  42. spacr/resources/MEDIAR/core/utils.py +0 -40
  43. spacr/resources/MEDIAR/evaluate.py +0 -71
  44. spacr/resources/MEDIAR/generate_mapping.py +0 -121
  45. spacr/resources/MEDIAR/image/examples/img1.tiff +0 -0
  46. spacr/resources/MEDIAR/image/examples/img2.tif +0 -0
  47. spacr/resources/MEDIAR/image/failure_cases.png +0 -0
  48. spacr/resources/MEDIAR/image/mediar_framework.png +0 -0
  49. spacr/resources/MEDIAR/image/mediar_model.PNG +0 -0
  50. spacr/resources/MEDIAR/image/mediar_results.png +0 -0
  51. spacr/resources/MEDIAR/main.py +0 -125
  52. spacr/resources/MEDIAR/predict.py +0 -70
  53. spacr/resources/MEDIAR/requirements.txt +0 -14
  54. spacr/resources/MEDIAR/train_tools/__init__.py +0 -3
  55. spacr/resources/MEDIAR/train_tools/data_utils/__init__.py +0 -1
  56. spacr/resources/MEDIAR/train_tools/data_utils/custom/CellAware.py +0 -88
  57. spacr/resources/MEDIAR/train_tools/data_utils/custom/LoadImage.py +0 -161
  58. spacr/resources/MEDIAR/train_tools/data_utils/custom/NormalizeImage.py +0 -77
  59. spacr/resources/MEDIAR/train_tools/data_utils/custom/__init__.py +0 -3
  60. spacr/resources/MEDIAR/train_tools/data_utils/custom/modalities.pkl +0 -0
  61. spacr/resources/MEDIAR/train_tools/data_utils/datasetter.py +0 -208
  62. spacr/resources/MEDIAR/train_tools/data_utils/transforms.py +0 -148
  63. spacr/resources/MEDIAR/train_tools/data_utils/utils.py +0 -84
  64. spacr/resources/MEDIAR/train_tools/measures.py +0 -200
  65. spacr/resources/MEDIAR/train_tools/models/MEDIARFormer.py +0 -102
  66. spacr/resources/MEDIAR/train_tools/models/__init__.py +0 -1
  67. spacr/resources/MEDIAR/train_tools/utils.py +0 -70
  68. spacr/stats.py +0 -221
  69. /spacr/{cellpose.py → spacr_cellpose.py} +0 -0
  70. {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/LICENSE +0 -0
  71. {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/entry_points.txt +0 -0
  72. {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/top_level.txt +0 -0
@@ -1,429 +0,0 @@
1
- """
2
- Copyright © 2022 Howard Hughes Medical Institute,
3
- Authored by Carsen Stringer and Marius Pachitariu.
4
-
5
- Redistribution and use in source and binary forms, with or without
6
- modification, are permitted provided that the following conditions are met:
7
-
8
- 1. Redistributions of source code must retain the above copyright notice,
9
- this list of conditions and the following disclaimer.
10
-
11
- 2. Redistributions in binary form must reproduce the above copyright notice,
12
- this list of conditions and the following disclaimer in the documentation
13
- and/or other materials provided with the distribution.
14
-
15
- 3. Neither the name of HHMI nor the names of its contributors may be used to
16
- endorse or promote products derived from this software without specific
17
- prior written permission.
18
-
19
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
- ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23
- LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24
- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25
- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27
- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28
- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
- POSSIBILITY OF SUCH DAMAGE.
30
-
31
- --------------------------------------------------------------------------
32
- MEDIAR Prediction uses CellPose's Gradient Flow Tracking.
33
-
34
- This code is adapted from the following codes:
35
- [1] https://github.com/MouseLand/cellpose/blob/main/cellpose/utils.py
36
- [2] https://github.com/MouseLand/cellpose/blob/main/cellpose/dynamics.py
37
- [3] https://github.com/MouseLand/cellpose/blob/main/cellpose/metrics.py
38
- """
39
-
40
- import torch
41
- from torch.nn.functional import grid_sample
42
- import numpy as np
43
- import fastremap
44
-
45
- from skimage import morphology
46
- from scipy.ndimage import mean, find_objects
47
- from scipy.ndimage.filters import maximum_filter1d
48
-
49
- torch_GPU = torch.device("cuda")
50
- torch_CPU = torch.device("cpu")
51
-
52
-
53
- def labels_to_flows(labels, use_gpu=False, device=None, redo_flows=False):
54
- """
55
- Convert labels (list of masks or flows) to flows for training model
56
- """
57
-
58
- # Labels b x 1 x h x w
59
- labels = labels.cpu().numpy().astype(np.int16)
60
- nimg = len(labels)
61
-
62
- if labels[0].ndim < 3:
63
- labels = [labels[n][np.newaxis, :, :] for n in range(nimg)]
64
-
65
- # Flows need to be recomputed
66
- if labels[0].shape[0] == 1 or labels[0].ndim < 3 or redo_flows:
67
- # compute flows; labels are fixed here to be unique, so they need to be passed back
68
- # make sure labels are unique!
69
- labels = [fastremap.renumber(label, in_place=True)[0] for label in labels]
70
- veci = [
71
- masks_to_flows(labels[n][0], use_gpu=use_gpu, device=device)
72
- for n in range(nimg)
73
- ]
74
-
75
- # concatenate labels, distance transform, vector flows, heat (boundary and mask are computed in augmentations)
76
- flows = [
77
- np.concatenate((labels[n], labels[n] > 0.5, veci[n]), axis=0).astype(
78
- np.float32
79
- )
80
- for n in range(nimg)
81
- ]
82
-
83
- return np.array(flows)
84
-
85
-
86
- def compute_masks(
87
- dP,
88
- cellprob,
89
- p=None,
90
- niter=200,
91
- cellprob_threshold=0.4,
92
- flow_threshold=0.4,
93
- interp=True,
94
- resize=None,
95
- use_gpu=False,
96
- device=None,
97
- ):
98
- """compute masks using dynamics from dP, cellprob, and boundary"""
99
-
100
- cp_mask = cellprob > cellprob_threshold
101
- cp_mask = morphology.remove_small_holes(cp_mask, area_threshold=16)
102
- cp_mask = morphology.remove_small_objects(cp_mask, min_size=16)
103
-
104
- if np.any(cp_mask): # mask at this point is a cell cluster binary map, not labels
105
- # follow flows
106
- if p is None:
107
- p, inds = follow_flows(
108
- dP * cp_mask / 5.0,
109
- niter=niter,
110
- interp=interp,
111
- use_gpu=use_gpu,
112
- device=device,
113
- )
114
- if inds is None:
115
- shape = resize if resize is not None else cellprob.shape
116
- mask = np.zeros(shape, np.uint16)
117
- p = np.zeros((len(shape), *shape), np.uint16)
118
- return mask, p
119
-
120
- # calculate masks
121
- mask = get_masks(p, iscell=cp_mask)
122
-
123
- # flow thresholding factored out of get_masks
124
- shape0 = p.shape[1:]
125
- if mask.max() > 0 and flow_threshold is not None and flow_threshold > 0:
126
- # make sure labels are unique at output of get_masks
127
- mask = remove_bad_flow_masks(
128
- mask, dP, threshold=flow_threshold, use_gpu=use_gpu, device=device
129
- )
130
- else: # nothing to compute, just make it compatible
131
- shape = resize if resize is not None else cellprob.shape
132
- mask = np.zeros(shape, np.uint16)
133
- p = np.zeros((len(shape), *shape), np.uint16)
134
-
135
- return mask, p
136
-
137
-
138
- def _extend_centers_gpu(
139
- neighbors, centers, isneighbor, Ly, Lx, n_iter=200, device=torch.device("cuda")
140
- ):
141
- if device is not None:
142
- device = device
143
- nimg = neighbors.shape[0] // 9
144
- pt = torch.from_numpy(neighbors).to(device)
145
-
146
- T = torch.zeros((nimg, Ly, Lx), dtype=torch.double, device=device)
147
- meds = torch.from_numpy(centers.astype(int)).to(device).long()
148
- isneigh = torch.from_numpy(isneighbor).to(device)
149
- for i in range(n_iter):
150
- T[:, meds[:, 0], meds[:, 1]] += 1
151
- Tneigh = T[:, pt[:, :, 0], pt[:, :, 1]]
152
- Tneigh *= isneigh
153
- T[:, pt[0, :, 0], pt[0, :, 1]] = Tneigh.mean(axis=1)
154
- del meds, isneigh, Tneigh
155
- T = torch.log(1.0 + T)
156
- # gradient positions
157
- grads = T[:, pt[[2, 1, 4, 3], :, 0], pt[[2, 1, 4, 3], :, 1]]
158
- del pt
159
- dy = grads[:, 0] - grads[:, 1]
160
- dx = grads[:, 2] - grads[:, 3]
161
- del grads
162
- mu_torch = np.stack((dy.cpu().squeeze(), dx.cpu().squeeze()), axis=-2)
163
- return mu_torch
164
-
165
-
166
- def diameters(masks):
167
- _, counts = np.unique(np.int32(masks), return_counts=True)
168
- counts = counts[1:]
169
- md = np.median(counts ** 0.5)
170
- if np.isnan(md):
171
- md = 0
172
- md /= (np.pi ** 0.5) / 2
173
- return md, counts ** 0.5
174
-
175
-
176
- def masks_to_flows_gpu(masks, device=None):
177
- if device is None:
178
- device = torch.device("cuda")
179
-
180
- Ly0, Lx0 = masks.shape
181
- Ly, Lx = Ly0 + 2, Lx0 + 2
182
-
183
- masks_padded = np.zeros((Ly, Lx), np.int64)
184
- masks_padded[1:-1, 1:-1] = masks
185
-
186
- # get mask pixel neighbors
187
- y, x = np.nonzero(masks_padded)
188
- neighborsY = np.stack((y, y - 1, y + 1, y, y, y - 1, y - 1, y + 1, y + 1), axis=0)
189
- neighborsX = np.stack((x, x, x, x - 1, x + 1, x - 1, x + 1, x - 1, x + 1), axis=0)
190
- neighbors = np.stack((neighborsY, neighborsX), axis=-1)
191
-
192
- # get mask centers
193
- slices = find_objects(masks)
194
-
195
- centers = np.zeros((masks.max(), 2), "int")
196
- for i, si in enumerate(slices):
197
- if si is not None:
198
- sr, sc = si
199
-
200
- ly, lx = sr.stop - sr.start + 1, sc.stop - sc.start + 1
201
- yi, xi = np.nonzero(masks[sr, sc] == (i + 1))
202
- yi = yi.astype(np.int32) + 1 # add padding
203
- xi = xi.astype(np.int32) + 1 # add padding
204
- ymed = np.median(yi)
205
- xmed = np.median(xi)
206
- imin = np.argmin((xi - xmed) ** 2 + (yi - ymed) ** 2)
207
- xmed = xi[imin]
208
- ymed = yi[imin]
209
- centers[i, 0] = ymed + sr.start
210
- centers[i, 1] = xmed + sc.start
211
-
212
- # get neighbor validator (not all neighbors are in same mask)
213
- neighbor_masks = masks_padded[neighbors[:, :, 0], neighbors[:, :, 1]]
214
- isneighbor = neighbor_masks == neighbor_masks[0]
215
- ext = np.array(
216
- [[sr.stop - sr.start + 1, sc.stop - sc.start + 1] for sr, sc in slices]
217
- )
218
- n_iter = 2 * (ext.sum(axis=1)).max()
219
- # run diffusion
220
- mu = _extend_centers_gpu(
221
- neighbors, centers, isneighbor, Ly, Lx, n_iter=n_iter, device=device
222
- )
223
-
224
- # normalize
225
- mu /= 1e-20 + (mu ** 2).sum(axis=0) ** 0.5
226
-
227
- # put into original image
228
- mu0 = np.zeros((2, Ly0, Lx0))
229
- mu0[:, y - 1, x - 1] = mu
230
- mu_c = np.zeros_like(mu0)
231
- return mu0, mu_c
232
-
233
-
234
- def masks_to_flows(masks, use_gpu=False, device=None):
235
- if masks.max() == 0 or (masks != 0).sum() == 1:
236
- # dynamics_logger.warning('empty masks!')
237
- return np.zeros((2, *masks.shape), "float32")
238
-
239
- if use_gpu:
240
- if use_gpu and device is None:
241
- device = torch_GPU
242
- elif device is None:
243
- device = torch_CPU
244
- masks_to_flows_device = masks_to_flows_gpu
245
-
246
- if masks.ndim == 3:
247
- Lz, Ly, Lx = masks.shape
248
- mu = np.zeros((3, Lz, Ly, Lx), np.float32)
249
- for z in range(Lz):
250
- mu0 = masks_to_flows_device(masks[z], device=device)[0]
251
- mu[[1, 2], z] += mu0
252
- for y in range(Ly):
253
- mu0 = masks_to_flows_device(masks[:, y], device=device)[0]
254
- mu[[0, 2], :, y] += mu0
255
- for x in range(Lx):
256
- mu0 = masks_to_flows_device(masks[:, :, x], device=device)[0]
257
- mu[[0, 1], :, :, x] += mu0
258
- return mu
259
- elif masks.ndim == 2:
260
- mu, mu_c = masks_to_flows_device(masks, device=device)
261
- return mu
262
-
263
- else:
264
- raise ValueError("masks_to_flows only takes 2D or 3D arrays")
265
-
266
-
267
- def steps2D_interp(p, dP, niter, use_gpu=False, device=None):
268
- shape = dP.shape[1:]
269
- if use_gpu:
270
- if device is None:
271
- device = torch_GPU
272
- shape = (
273
- np.array(shape)[[1, 0]].astype("float") - 1
274
- ) # Y and X dimensions (dP is 2.Ly.Lx), flipped X-1, Y-1
275
- pt = (
276
- torch.from_numpy(p[[1, 0]].T).float().to(device).unsqueeze(0).unsqueeze(0)
277
- ) # p is n_points by 2, so pt is [1 1 2 n_points]
278
- im = (
279
- torch.from_numpy(dP[[1, 0]]).float().to(device).unsqueeze(0)
280
- ) # covert flow numpy array to tensor on GPU, add dimension
281
- # normalize pt between 0 and 1, normalize the flow
282
- for k in range(2):
283
- im[:, k, :, :] *= 2.0 / shape[k]
284
- pt[:, :, :, k] /= shape[k]
285
-
286
- # normalize to between -1 and 1
287
- pt = pt * 2 - 1
288
-
289
- # here is where the stepping happens
290
- for t in range(niter):
291
- # align_corners default is False, just added to suppress warning
292
- dPt = grid_sample(im, pt, align_corners=False)
293
-
294
- for k in range(2): # clamp the final pixel locations
295
- pt[:, :, :, k] = torch.clamp(
296
- pt[:, :, :, k] + dPt[:, k, :, :], -1.0, 1.0
297
- )
298
-
299
- # undo the normalization from before, reverse order of operations
300
- pt = (pt + 1) * 0.5
301
- for k in range(2):
302
- pt[:, :, :, k] *= shape[k]
303
-
304
- p = pt[:, :, :, [1, 0]].cpu().numpy().squeeze().T
305
- return p
306
-
307
- else:
308
- assert print("ho")
309
-
310
-
311
- def follow_flows(dP, mask=None, niter=200, interp=True, use_gpu=True, device=None):
312
- shape = np.array(dP.shape[1:]).astype(np.int32)
313
- niter = np.uint32(niter)
314
-
315
- p = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing="ij")
316
- p = np.array(p).astype(np.float32)
317
-
318
- inds = np.array(np.nonzero(np.abs(dP[0]) > 1e-3)).astype(np.int32).T
319
-
320
- if inds.ndim < 2 or inds.shape[0] < 5:
321
- return p, None
322
-
323
- if not interp:
324
- assert print("woo")
325
-
326
- else:
327
- p_interp = steps2D_interp(
328
- p[:, inds[:, 0], inds[:, 1]], dP, niter, use_gpu=use_gpu, device=device
329
- )
330
- p[:, inds[:, 0], inds[:, 1]] = p_interp
331
-
332
- return p, inds
333
-
334
-
335
- def flow_error(maski, dP_net, use_gpu=False, device=None):
336
- if dP_net.shape[1:] != maski.shape:
337
- print("ERROR: net flow is not same size as predicted masks")
338
- return
339
-
340
- # flows predicted from estimated masks
341
- dP_masks = masks_to_flows(maski, use_gpu=use_gpu, device=device)
342
- # difference between predicted flows vs mask flows
343
- flow_errors = np.zeros(maski.max())
344
- for i in range(dP_masks.shape[0]):
345
- flow_errors += mean(
346
- (dP_masks[i] - dP_net[i] / 5.0) ** 2,
347
- maski,
348
- index=np.arange(1, maski.max() + 1),
349
- )
350
-
351
- return flow_errors, dP_masks
352
-
353
-
354
- def remove_bad_flow_masks(masks, flows, threshold=0.4, use_gpu=False, device=None):
355
- merrors, _ = flow_error(masks, flows, use_gpu, device)
356
- badi = 1 + (merrors > threshold).nonzero()[0]
357
- masks[np.isin(masks, badi)] = 0
358
- return masks
359
-
360
-
361
- def get_masks(p, iscell=None, rpad=20):
362
- pflows = []
363
- edges = []
364
- shape0 = p.shape[1:]
365
- dims = len(p)
366
-
367
- for i in range(dims):
368
- pflows.append(p[i].flatten().astype("int32"))
369
- edges.append(np.arange(-0.5 - rpad, shape0[i] + 0.5 + rpad, 1))
370
-
371
- h, _ = np.histogramdd(tuple(pflows), bins=edges)
372
- hmax = h.copy()
373
- for i in range(dims):
374
- hmax = maximum_filter1d(hmax, 5, axis=i)
375
-
376
- seeds = np.nonzero(np.logical_and(h - hmax > -1e-6, h > 10))
377
- Nmax = h[seeds]
378
- isort = np.argsort(Nmax)[::-1]
379
- for s in seeds:
380
- s = s[isort]
381
-
382
- pix = list(np.array(seeds).T)
383
-
384
- shape = h.shape
385
- if dims == 3:
386
- expand = np.nonzero(np.ones((3, 3, 3)))
387
- else:
388
- expand = np.nonzero(np.ones((3, 3)))
389
- for e in expand:
390
- e = np.expand_dims(e, 1)
391
-
392
- for iter in range(5):
393
- for k in range(len(pix)):
394
- if iter == 0:
395
- pix[k] = list(pix[k])
396
- newpix = []
397
- iin = []
398
- for i, e in enumerate(expand):
399
- epix = e[:, np.newaxis] + np.expand_dims(pix[k][i], 0) - 1
400
- epix = epix.flatten()
401
- iin.append(np.logical_and(epix >= 0, epix < shape[i]))
402
- newpix.append(epix)
403
- iin = np.all(tuple(iin), axis=0)
404
- for p in newpix:
405
- p = p[iin]
406
- newpix = tuple(newpix)
407
- igood = h[newpix] > 2
408
- for i in range(dims):
409
- pix[k][i] = newpix[i][igood]
410
- if iter == 4:
411
- pix[k] = tuple(pix[k])
412
-
413
- M = np.zeros(h.shape, np.uint32)
414
- for k in range(len(pix)):
415
- M[pix[k]] = 1 + k
416
-
417
- for i in range(dims):
418
- pflows[i] = pflows[i] + rpad
419
- M0 = M[tuple(pflows)]
420
-
421
- # remove big masks
422
- uniq, counts = fastremap.unique(M0, return_counts=True)
423
- big = np.prod(shape0) * 0.9
424
- bigc = uniq[counts > big]
425
- if len(bigc) > 0 and (len(bigc) > 1 or bigc[0] != 0):
426
- M0 = fastremap.mask(M0, bigc)
427
- fastremap.renumber(M0, in_place=True) # convenient to guarantee non-skipped labels
428
- M0 = np.reshape(M0, shape0)
429
- return M0
@@ -1,2 +0,0 @@
1
- from .Baseline import *
2
- from .MEDIAR import *
@@ -1,40 +0,0 @@
1
- import torch
2
- import wandb
3
- import pprint
4
-
5
- __all__ = ["print_learning_device", "print_with_logging"]
6
-
7
-
8
- def print_learning_device(device):
9
- """Get and print the learning device information."""
10
- if device == "cpu":
11
- device_name = device
12
-
13
- else:
14
- if isinstance(device, str):
15
- device_idx = int(device[-1])
16
- elif isinstance(device, torch._device):
17
- device_idx = device.index
18
-
19
- device_name = torch.cuda.get_device_name(device_idx)
20
-
21
- print("")
22
- print("=" * 50)
23
- print("Train start on device: {}".format(device_name))
24
- print("=" * 50)
25
-
26
-
27
- def print_with_logging(results, step):
28
- """Print and log on the W&B server.
29
-
30
- Args:
31
- results (dict): results dictionary
32
- step (int): epoch index
33
- """
34
- # Print the results dictionary
35
- pp = pprint.PrettyPrinter(compact=True)
36
- pp.pprint(results)
37
- print()
38
-
39
- # Log on the w&b server
40
- wandb.log(results, step=step)
@@ -1,71 +0,0 @@
1
- import numpy as np
2
- import pandas as pd
3
- import tifffile as tif
4
- import argparse
5
- import os
6
- from collections import OrderedDict
7
- from tqdm import tqdm
8
-
9
- from train_tools.measures import evaluate_f1_score_cellseg
10
-
11
-
12
- def main():
13
- ### Directory path arguments ###
14
- parser = argparse.ArgumentParser("Compute F1 score for cell segmentation results")
15
- parser.add_argument(
16
- "--gt_path",
17
- type=str,
18
- help="path to ground truth; file names end with _label.tiff",
19
- required=True,
20
- )
21
- parser.add_argument(
22
- "--pred_path", type=str, help="path to segmentation results", required=True
23
- )
24
- parser.add_argument("--save_path", default=None, help="path where to save metrics")
25
-
26
- args = parser.parse_args()
27
-
28
- # Get files from the paths
29
- gt_path, pred_path = args.gt_path, args.pred_path
30
- names = sorted(os.listdir(pred_path))
31
-
32
- names_total = []
33
- precisions_total, recalls_total, f1_scores_total = [], [], []
34
-
35
- for name in tqdm(names):
36
- assert name.endswith(
37
- "_label.tiff"
38
- ), "The suffix of label name should be _label.tiff"
39
-
40
- # Load the images
41
- gt = tif.imread(os.path.join(gt_path, name))
42
- pred = tif.imread(os.path.join(pred_path, name))
43
-
44
- # Evaluate metrics
45
- precision, recall, f1_score = evaluate_f1_score_cellseg(gt, pred, threshold=0.5)
46
-
47
- names_total.append(name)
48
- precisions_total.append(np.round(precision, 4))
49
- recalls_total.append(np.round(recall, 4))
50
- f1_scores_total.append(np.round(f1_score, 4))
51
-
52
- # Refine data as dataframe
53
- cellseg_metric = OrderedDict()
54
- cellseg_metric["Names"] = names_total
55
- cellseg_metric["Precision"] = precisions_total
56
- cellseg_metric["Recall"] = recalls_total
57
- cellseg_metric["F1_Score"] = f1_scores_total
58
-
59
- cellseg_metric = pd.DataFrame(cellseg_metric)
60
- print("mean F1 Score:", np.mean(cellseg_metric["F1_Score"]))
61
-
62
- # Save results
63
- if args.save_path is not None:
64
- os.makedirs(args.save_path, exist_ok=True)
65
- cellseg_metric.to_csv(
66
- os.path.join(args.save_path, "seg_metric.csv"), index=False
67
- )
68
-
69
-
70
- if __name__ == "__main__":
71
- main()
@@ -1,121 +0,0 @@
1
- import os, glob
2
- import json
3
- import argparse
4
-
5
-
6
- def public_paths_labeled(root):
7
- """Map paths for public datasets as dictionary list"""
8
-
9
- images_raw = sorted(glob.glob(os.path.join(root, "Public/images/*")))
10
- labels_raw = sorted(glob.glob(os.path.join(root, "Public/labels/*")))
11
-
12
- data_dicts = []
13
-
14
- for image_path, label_path in zip(images_raw, labels_raw):
15
- name1 = image_path.split("/")[-1].split(".")[0]
16
- name2 = label_path.split("/")[-1].split("_label")[0]
17
- assert name1 == name2
18
-
19
- data_item = {
20
- "img": image_path.split("MEDIAR/")[-1],
21
- "label": label_path.split("MEDIAR/")[-1],
22
- }
23
-
24
- data_dicts.append(data_item)
25
-
26
- map_dict = {"public": data_dicts}
27
-
28
- return map_dict
29
-
30
-
31
- def official_paths_labeled(root):
32
- """Map paths for official labeled datasets as dictionary list"""
33
-
34
- image_path = os.path.join(root, "Official/Training/images/*")
35
- label_path = os.path.join(root, "Official/Training/labels/*")
36
-
37
- images_raw = sorted(glob.glob(image_path))
38
- labels_raw = sorted(glob.glob(label_path))
39
- data_dicts = []
40
-
41
- for image_path, label_path in zip(images_raw, labels_raw):
42
- name1 = image_path.split("/")[-1].split(".")[0]
43
- name2 = label_path.split("/")[-1].split("_label")[0]
44
- assert name1 == name2
45
-
46
- data_item = {
47
- "img": image_path.split("MEDIAR/")[-1],
48
- "label": label_path.split("MEDIAR/")[-1],
49
- }
50
-
51
- data_dicts.append(data_item)
52
-
53
- map_dict = {"official": data_dicts}
54
-
55
- return map_dict
56
-
57
-
58
- def official_paths_tuning(root):
59
- """Map paths for official tuning datasets as dictionary list"""
60
-
61
- image_path = os.path.join(root, "Official/Tuning/images/*")
62
- images_raw = sorted(glob.glob(image_path))
63
-
64
- data_dicts = []
65
-
66
- for image_path in images_raw:
67
- data_item = {"img": image_path.split("MEDIAR/")[-1]}
68
- data_dicts.append(data_item)
69
-
70
- map_dict = {"official": data_dicts}
71
-
72
- return map_dict
73
-
74
-
75
- def add_mapping_to_json(json_file, map_dict):
76
- """Save mapped dictionary as a json file"""
77
-
78
- if not os.path.exists(json_file):
79
- with open(json_file, "w") as file:
80
- json.dump({}, file)
81
-
82
- with open(json_file, "r") as file:
83
- data = json.load(file)
84
-
85
- for map_key, map_item in map_dict.items():
86
- if map_key not in data.keys():
87
- data[map_key] = map_item
88
- else:
89
- print('>>> "{}" already exists in path map keys...'.format(map_key))
90
-
91
- with open(json_file, "w") as file:
92
- json.dump(data, file)
93
-
94
-
95
- if __name__ == "__main__":
96
- # [!Caution] The paths should be overrided for the local environment!
97
- parser = argparse.ArgumentParser(description="Mapping files and paths")
98
- parser.add_argument("--root", default=".", type=str)
99
- args = parser.parse_args()
100
-
101
- MAP_DIR = "./train_tools/data_utils/"
102
-
103
- print("\n----------- Path Mapping for Labeled Data is Started... -----------\n")
104
-
105
- map_labeled = os.path.join(MAP_DIR, "mapping_labeled.json")
106
- map_dict = official_paths_labeled(args.root)
107
- add_mapping_to_json(map_labeled, map_dict)
108
-
109
- print("\n----------- Path Mapping for Tuning Data is Started... -----------\n")
110
-
111
- map_labeled = os.path.join(MAP_DIR, "mapping_tuning.json")
112
- map_dict = official_paths_tuning(args.root)
113
- add_mapping_to_json(map_labeled, map_dict)
114
-
115
- print("\n----------- Path Mapping for Public Data is Started... -----------\n")
116
-
117
- map_public = os.path.join(MAP_DIR, "mapping_public.json")
118
- map_dict = public_paths_labeled(args.root)
119
- add_mapping_to_json(map_public, map_dict)
120
-
121
- print("\n-------------- Path Mapping is Ended !!! ---------------------------\n")