deepliif 1.1.10__py3-none-any.whl → 1.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cli.py +354 -67
- deepliif/data/__init__.py +7 -7
- deepliif/data/aligned_dataset.py +2 -3
- deepliif/data/unaligned_dataset.py +38 -19
- deepliif/models/CycleGAN_model.py +282 -0
- deepliif/models/DeepLIIFExt_model.py +47 -25
- deepliif/models/DeepLIIF_model.py +69 -19
- deepliif/models/SDG_model.py +57 -26
- deepliif/models/__init__ - run_dask_multi dev.py +943 -0
- deepliif/models/__init__ - timings.py +764 -0
- deepliif/models/__init__.py +354 -232
- deepliif/models/att_unet.py +199 -0
- deepliif/models/base_model.py +32 -8
- deepliif/models/networks.py +108 -34
- deepliif/options/__init__.py +49 -5
- deepliif/postprocessing.py +1034 -227
- deepliif/postprocessing__OLD__DELETE.py +440 -0
- deepliif/util/__init__.py +290 -64
- deepliif/util/visualizer.py +106 -19
- {deepliif-1.1.10.dist-info → deepliif-1.1.12.dist-info}/METADATA +81 -20
- deepliif-1.1.12.dist-info/RECORD +40 -0
- deepliif-1.1.10.dist-info/RECORD +0 -35
- {deepliif-1.1.10.dist-info → deepliif-1.1.12.dist-info}/LICENSE.md +0 -0
- {deepliif-1.1.10.dist-info → deepliif-1.1.12.dist-info}/WHEEL +0 -0
- {deepliif-1.1.10.dist-info → deepliif-1.1.12.dist-info}/entry_points.txt +0 -0
- {deepliif-1.1.10.dist-info → deepliif-1.1.12.dist-info}/top_level.txt +0 -0
deepliif/postprocessing.py
CHANGED
|
@@ -1,73 +1,10 @@
|
|
|
1
1
|
import math
|
|
2
|
-
import
|
|
3
|
-
from PIL import Image
|
|
4
|
-
import skimage.measure
|
|
5
|
-
from skimage import feature
|
|
6
|
-
from skimage.morphology import remove_small_objects
|
|
7
|
-
import numpy as np
|
|
8
|
-
import scipy.ndimage as ndi
|
|
9
|
-
from numba import jit
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
def remove_small_objects_from_image(img, min_size=100):
|
|
13
|
-
image_copy = img.copy()
|
|
14
|
-
image_copy[img > 0] = 1
|
|
15
|
-
image_copy = image_copy.astype(bool)
|
|
16
|
-
removed_red_channel = remove_small_objects(image_copy, min_size=min_size).astype(np.uint8)
|
|
17
|
-
img[removed_red_channel == 0] = 0
|
|
18
|
-
|
|
19
|
-
return img
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def remove_background_noise(mask, mask_boundary):
|
|
23
|
-
labeled = skimage.measure.label(mask, background=0)
|
|
24
|
-
padding = 5
|
|
25
|
-
for i in range(1, len(np.unique(labeled))):
|
|
26
|
-
component = np.zeros_like(mask)
|
|
27
|
-
component[labeled == i] = mask[labeled == i]
|
|
28
|
-
component_bound = np.zeros_like(mask_boundary)
|
|
29
|
-
component_bound[max(0, min(np.nonzero(component)[0]) - padding): min(mask_boundary.shape[1],
|
|
30
|
-
max(np.nonzero(component)[0]) + padding),
|
|
31
|
-
max(0, min(np.nonzero(component)[1]) - padding): min(mask_boundary.shape[1],
|
|
32
|
-
max(np.nonzero(component)[1]) + padding)] \
|
|
33
|
-
= mask_boundary[max(0, min(np.nonzero(component)[0]) - padding): min(mask_boundary.shape[1], max(
|
|
34
|
-
np.nonzero(component)[0]) + padding),
|
|
35
|
-
max(0, min(np.nonzero(component)[1]) - padding): min(mask_boundary.shape[1],
|
|
36
|
-
max(np.nonzero(component)[1]) + padding)]
|
|
37
|
-
if len(np.nonzero(component_bound)[0]) < len(np.nonzero(component)[0]) / 3:
|
|
38
|
-
mask[labeled == i] = 0
|
|
39
|
-
return mask
|
|
2
|
+
import warnings
|
|
40
3
|
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
for i in range(1, len(np.unique(labeled))):
|
|
46
|
-
component = np.zeros_like(mask1)
|
|
47
|
-
component[labeled == i] = mask1[labeled == i]
|
|
48
|
-
component_bound = np.zeros_like(mask2)
|
|
49
|
-
component_bound[
|
|
50
|
-
max(0, min(np.nonzero(component)[0]) - padding): min(mask2.shape[1], max(np.nonzero(component)[0]) + padding),
|
|
51
|
-
max(0, min(np.nonzero(component)[1]) - padding): min(mask2.shape[1], max(np.nonzero(component)[1]) + padding)] \
|
|
52
|
-
= mask2[max(0, min(np.nonzero(component)[0]) - padding): min(mask2.shape[1],
|
|
53
|
-
max(np.nonzero(component)[0]) + padding),
|
|
54
|
-
max(0, min(np.nonzero(component)[1]) - padding): min(mask2.shape[1],
|
|
55
|
-
max(np.nonzero(component)[1]) + padding)]
|
|
56
|
-
if len(np.nonzero(component_bound)[0]) > len(np.nonzero(component)[0]) / 3:
|
|
57
|
-
mask1[labeled == i] = 0
|
|
58
|
-
mask2[labeled == i] = 255
|
|
59
|
-
return mask1, mask2
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
def create_basic_segmentation_mask(img, seg_img, thresh=80, noise_objects_size=20, small_object_size=50):
|
|
63
|
-
positive_mask, negative_mask = positive_negative_masks_basic(img, seg_img, thresh, noise_objects_size, small_object_size)
|
|
64
|
-
|
|
65
|
-
mask = np.zeros_like(img)
|
|
66
|
-
|
|
67
|
-
mask[positive_mask > 0] = (255, 0, 0)
|
|
68
|
-
mask[negative_mask > 0] = (0, 0, 255)
|
|
69
|
-
|
|
70
|
-
return mask
|
|
4
|
+
import numpy as np
|
|
5
|
+
from numba import jit, typed
|
|
6
|
+
from PIL import Image
|
|
7
|
+
Image.MAX_IMAGE_PIXELS = None
|
|
71
8
|
|
|
72
9
|
|
|
73
10
|
def imadjust(x, gamma=0.7, c=0, d=1):
|
|
@@ -142,205 +79,267 @@ def adjust_marker(inferred_tile, orig_tile):
|
|
|
142
79
|
return Image.fromarray(processed_tile)
|
|
143
80
|
|
|
144
81
|
|
|
145
|
-
#
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
82
|
+
# Default postprocessing values
|
|
83
|
+
DEFAULT_SEG_THRESH = 150
|
|
84
|
+
DEFAULT_NOISE_THRESH = 4
|
|
85
|
+
|
|
86
|
+
# Values for uint8 label masks
|
|
87
|
+
LABEL_UNKNOWN = 50
|
|
88
|
+
LABEL_POSITIVE = 200
|
|
89
|
+
LABEL_NEGATIVE = 150
|
|
90
|
+
LABEL_BACKGROUND = 0
|
|
91
|
+
LABEL_CELL = 100
|
|
92
|
+
LABEL_BORDER_POS = 220
|
|
93
|
+
LABEL_BORDER_NEG = 170
|
|
94
|
+
LABEL_BORDER_POS2 = 221
|
|
95
|
+
LABEL_BORDER_NEG2 = 171
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def to_array(img, grayscale=False):
|
|
99
|
+
"""
|
|
100
|
+
Convert a color image to an array of pixels.
|
|
101
|
+
|
|
102
|
+
Parameters
|
|
103
|
+
----------
|
|
104
|
+
img : Image | ndarray
|
|
105
|
+
Image to convert. If an array is provided instead, it is used directly.
|
|
106
|
+
grayscale : bool
|
|
107
|
+
Whether the input image should be converted to grayscale
|
|
108
|
+
by taking the maximum channel value for each pixel.
|
|
109
|
+
|
|
110
|
+
Returns
|
|
111
|
+
-------
|
|
112
|
+
ndarray
|
|
113
|
+
A 2D (grayscale) or 3D array with the pixels of the converted image.
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
if isinstance(img, Image.Image):
|
|
117
|
+
img = np.asarray(img) if img.mode == 'RGB' else np.asarray(img.convert('RGB'))
|
|
118
|
+
if grayscale and len(img.shape) == 3:
|
|
119
|
+
img = img.max(axis=-1)
|
|
120
|
+
return img
|
|
155
121
|
|
|
156
122
|
|
|
157
123
|
@jit(nopython=True)
|
|
158
124
|
def in_bounds(array, index):
|
|
125
|
+
"""
|
|
126
|
+
Check if an index is valid for an array.
|
|
127
|
+
|
|
128
|
+
Parameters
|
|
129
|
+
----------
|
|
130
|
+
array : ndarray
|
|
131
|
+
2D array.
|
|
132
|
+
index : tuple
|
|
133
|
+
2-element tuple with index values matching the array shape (e.g., for a pixel array where
|
|
134
|
+
array.shape[0] is height and array.shape[1] is width, then index will be in (y, x) order).
|
|
135
|
+
|
|
136
|
+
Returns
|
|
137
|
+
-------
|
|
138
|
+
bool
|
|
139
|
+
Whether or not the index is within the bounds of the array.
|
|
140
|
+
"""
|
|
141
|
+
|
|
159
142
|
return index[0] >= 0 and index[0] < array.shape[0] and index[1] >= 0 and index[1] < array.shape[1]
|
|
160
143
|
|
|
161
144
|
|
|
145
|
+
@jit(nopython=True)
|
|
162
146
|
def create_posneg_mask(seg, thresh):
|
|
163
|
-
"""
|
|
147
|
+
"""
|
|
148
|
+
Create a mask of positive and negative pixels from the segmentation image.
|
|
164
149
|
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
150
|
+
Parameters
|
|
151
|
+
----------
|
|
152
|
+
seg : ndarray
|
|
153
|
+
3D uint8 array (2D image w/ 3 channels) with segmentation probabilities.
|
|
154
|
+
thresh : int
|
|
155
|
+
Threshold to use in determining if a pixel should be labeled as positive/negative.
|
|
168
156
|
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
157
|
+
Returns
|
|
158
|
+
-------
|
|
159
|
+
ndarray
|
|
160
|
+
2D uint8 array of mask values with every pixel labeled as unknown, positive, or negative.
|
|
161
|
+
"""
|
|
162
|
+
|
|
163
|
+
mask = np.full(seg.shape[0:2], LABEL_UNKNOWN, dtype=np.uint8)
|
|
164
|
+
for y in range(mask.shape[0]):
|
|
165
|
+
for x in range(mask.shape[1]):
|
|
166
|
+
if seg[y, x, 0] + seg[y, x, 2] > thresh and seg[y, x, 1] <= 80:
|
|
167
|
+
if seg[y, x, 0] >= seg[y, x, 2]:
|
|
168
|
+
mask[y, x] = LABEL_POSITIVE
|
|
169
|
+
else:
|
|
170
|
+
mask[y, x] = LABEL_NEGATIVE
|
|
172
171
|
|
|
173
172
|
return mask
|
|
174
173
|
|
|
175
174
|
|
|
176
175
|
@jit(nopython=True)
|
|
177
176
|
def mark_background(mask):
|
|
178
|
-
"""
|
|
177
|
+
"""
|
|
178
|
+
Mask all background pixels in-place by 4-connected region growing unknown boundary pixels.
|
|
179
|
+
|
|
180
|
+
Parameters
|
|
181
|
+
----------
|
|
182
|
+
mask: ndarray
|
|
183
|
+
2D uint8 array with pixels labeled as positive, negative, or unknown.
|
|
184
|
+
After the function executes, the pixels will be labeled as background, positive, negative, or unknown.
|
|
185
|
+
"""
|
|
179
186
|
|
|
180
187
|
seeds = []
|
|
181
188
|
for i in range(mask.shape[0]):
|
|
182
|
-
if mask[i, 0] ==
|
|
189
|
+
if mask[i, 0] == LABEL_UNKNOWN:
|
|
183
190
|
seeds.append((i, 0))
|
|
184
|
-
if mask[i, mask.shape[1]-1] ==
|
|
191
|
+
if mask[i, mask.shape[1]-1] == LABEL_UNKNOWN:
|
|
185
192
|
seeds.append((i, mask.shape[1]-1))
|
|
186
193
|
for j in range(mask.shape[1]):
|
|
187
|
-
if mask[0, j] ==
|
|
194
|
+
if mask[0, j] == LABEL_UNKNOWN:
|
|
188
195
|
seeds.append((0, j))
|
|
189
|
-
if mask[mask.shape[0]-1, j] ==
|
|
196
|
+
if mask[mask.shape[0]-1, j] == LABEL_UNKNOWN:
|
|
190
197
|
seeds.append((mask.shape[0]-1, j))
|
|
191
198
|
|
|
192
199
|
neighbors = [(-1, 0), (1, 0), (0, -1), (0, 1)]
|
|
193
200
|
|
|
194
201
|
while len(seeds) > 0:
|
|
195
202
|
seed = seeds.pop()
|
|
196
|
-
if mask[seed] ==
|
|
197
|
-
mask[seed] =
|
|
203
|
+
if mask[seed] == LABEL_UNKNOWN:
|
|
204
|
+
mask[seed] = LABEL_BACKGROUND
|
|
198
205
|
for n in neighbors:
|
|
199
206
|
idx = (seed[0] + n[0], seed[1] + n[1])
|
|
200
|
-
if in_bounds(mask, idx) and mask[idx] ==
|
|
207
|
+
if in_bounds(mask, idx) and mask[idx] == LABEL_UNKNOWN:
|
|
201
208
|
seeds.append(idx)
|
|
202
209
|
|
|
203
210
|
|
|
204
211
|
@jit(nopython=True)
|
|
205
|
-
def
|
|
212
|
+
def compute_cell_mapping(mask, marker, noise_thresh):
|
|
206
213
|
"""
|
|
207
|
-
Compute the mapping
|
|
214
|
+
Compute the mapping from mask to positive and negative cells.
|
|
208
215
|
|
|
209
216
|
Parameters
|
|
210
|
-
|
|
211
|
-
mask:
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
marker_thresh: Classify cell as positive if any marker value within the cell is above this threshold.
|
|
217
|
+
----------
|
|
218
|
+
mask : ndarray
|
|
219
|
+
2D uint8 array with pixels labeled as positive, negative, background, or unknown.
|
|
220
|
+
After the function executes, the pixels will be labeled as background or cell.
|
|
221
|
+
marker : ndarray
|
|
222
|
+
2D uint8 array with the inferred marker values.
|
|
217
223
|
|
|
218
224
|
Returns
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
+
-------
|
|
226
|
+
typed.List[tuple] :
|
|
227
|
+
Cell data as a list of 7-element tuples with the following:
|
|
228
|
+
[0] - number of pixels in the cell
|
|
229
|
+
[1] - whether the cell is positive (True) or negative (False)
|
|
230
|
+
[2] - marker value for the cell
|
|
231
|
+
[3-4] - first pixel coordinates (x, y) of the cell cluster
|
|
232
|
+
[5-6] - centroid of the cell (x, y)
|
|
225
233
|
"""
|
|
226
234
|
|
|
227
235
|
neighbors = [(-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1, 1)]
|
|
228
|
-
|
|
229
|
-
positive_cell_count, negative_cell_count = 0, 0
|
|
236
|
+
cells = typed.List()
|
|
230
237
|
|
|
231
238
|
for y in range(mask.shape[0]):
|
|
232
239
|
for x in range(mask.shape[1]):
|
|
233
|
-
if mask[y, x]
|
|
240
|
+
if mask[y, x] != LABEL_BACKGROUND and mask[y, x] != LABEL_CELL:
|
|
234
241
|
seeds = [(y, x)]
|
|
235
|
-
cell_coords = []
|
|
236
242
|
count = 1
|
|
237
|
-
|
|
238
|
-
|
|
243
|
+
count_positive = 1 if mask[y, x] == LABEL_POSITIVE else 0
|
|
244
|
+
count_negative = 1 if mask[y, x] == LABEL_NEGATIVE else 0
|
|
239
245
|
max_marker = marker[y, x] if marker is not None else 0
|
|
240
|
-
mask[y, x] =
|
|
241
|
-
|
|
246
|
+
mask[y, x] = LABEL_CELL
|
|
247
|
+
center_y = y
|
|
248
|
+
center_x = x
|
|
242
249
|
|
|
243
250
|
while len(seeds) > 0:
|
|
244
251
|
seed = seeds.pop()
|
|
245
252
|
for n in neighbors:
|
|
246
253
|
idx = (seed[0] + n[0], seed[1] + n[1])
|
|
247
|
-
if in_bounds(mask, idx) and
|
|
254
|
+
if in_bounds(mask, idx) and mask[idx] != LABEL_BACKGROUND and mask[idx] != LABEL_CELL:
|
|
248
255
|
seeds.append(idx)
|
|
249
|
-
if mask[idx] ==
|
|
256
|
+
if mask[idx] == LABEL_POSITIVE:
|
|
250
257
|
count_positive += 1
|
|
251
|
-
|
|
252
|
-
|
|
258
|
+
elif mask[idx] == LABEL_NEGATIVE:
|
|
259
|
+
count_negative += 1
|
|
253
260
|
if marker is not None and marker[idx] > max_marker:
|
|
254
261
|
max_marker = marker[idx]
|
|
255
|
-
mask[idx] =
|
|
256
|
-
|
|
262
|
+
mask[idx] = LABEL_CELL
|
|
263
|
+
center_y += idx[0]
|
|
264
|
+
center_x += idx[1]
|
|
257
265
|
count += 1
|
|
258
266
|
|
|
259
|
-
if count >
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
else:
|
|
265
|
-
fill_value = MASK_CELL_NEGATIVE
|
|
266
|
-
border_value = MASK_BOUNDARY_NEGATIVE
|
|
267
|
-
negative_cell_count += 1
|
|
268
|
-
else:
|
|
269
|
-
fill_value = MASK_BACKGROUND
|
|
270
|
-
border_value = MASK_BACKGROUND
|
|
271
|
-
|
|
272
|
-
for coord in cell_coords:
|
|
273
|
-
is_boundary = False
|
|
274
|
-
for n in border_neighbors:
|
|
275
|
-
idx = (coord[0] + n[0], coord[1] + n[1])
|
|
276
|
-
if in_bounds(mask, idx) and mask[idx] == MASK_BACKGROUND:
|
|
277
|
-
is_boundary = True
|
|
278
|
-
break
|
|
279
|
-
if is_boundary:
|
|
280
|
-
mask[coord] = border_value
|
|
281
|
-
else:
|
|
282
|
-
mask[coord] = fill_value
|
|
283
|
-
|
|
284
|
-
counts = {
|
|
285
|
-
'num_total': positive_cell_count + negative_cell_count,
|
|
286
|
-
'num_pos': positive_cell_count,
|
|
287
|
-
'num_neg': negative_cell_count,
|
|
288
|
-
}
|
|
289
|
-
return counts
|
|
267
|
+
if count > noise_thresh:
|
|
268
|
+
center_y = int(round(center_y / count))
|
|
269
|
+
center_x = int(round(center_x / count))
|
|
270
|
+
positive = True if count_positive >= count_negative else False
|
|
271
|
+
cells.append((count, positive, max_marker, x, y, center_x, center_y))
|
|
290
272
|
|
|
273
|
+
return cells
|
|
291
274
|
|
|
292
|
-
@jit(nopython=True)
|
|
293
|
-
def enlarge_cell_boundaries(mask):
|
|
294
|
-
neighbors = [(-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1, 1)]
|
|
295
|
-
for y in range(mask.shape[0]):
|
|
296
|
-
for x in range(mask.shape[1]):
|
|
297
|
-
if mask[y, x] == MASK_BOUNDARY_POSITIVE or mask[y, x] == MASK_BOUNDARY_NEGATIVE:
|
|
298
|
-
value = MASK_POSITIVE if mask[y, x] == MASK_BOUNDARY_POSITIVE else MASK_NEGATIVE
|
|
299
|
-
for n in neighbors:
|
|
300
|
-
idx = (y + n[0], x + n[1])
|
|
301
|
-
if in_bounds(mask, idx) and mask[idx] != MASK_BOUNDARY_POSITIVE and mask[idx] != MASK_BOUNDARY_NEGATIVE:
|
|
302
|
-
mask[idx] = value
|
|
303
|
-
for y in range(mask.shape[0]):
|
|
304
|
-
for x in range(mask.shape[1]):
|
|
305
|
-
if mask[y, x] == MASK_POSITIVE:
|
|
306
|
-
mask[y, x] = MASK_BOUNDARY_POSITIVE
|
|
307
|
-
elif mask[y, x] == MASK_NEGATIVE:
|
|
308
|
-
mask[y, x] = MASK_BOUNDARY_NEGATIVE
|
|
309
275
|
|
|
276
|
+
def get_cells_info(seg, marker, resolution, noise_thresh, seg_thresh):
|
|
277
|
+
"""
|
|
278
|
+
Find all cells in the segmentation image that are larger than the noise threshold.
|
|
310
279
|
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
280
|
+
Parameters
|
|
281
|
+
----------
|
|
282
|
+
seg : Image | ndarray
|
|
283
|
+
Inferred segmentation map image.
|
|
284
|
+
marker : Image | ndarray
|
|
285
|
+
Inferred marker image.
|
|
286
|
+
resolution: string
|
|
287
|
+
The resolution/magnification of the original image. Valid values are '10x', '20x', or '40x'.
|
|
288
|
+
noise_thresh : int
|
|
289
|
+
Threshold for tiny noise to ignore (include only cells larger than this value).
|
|
290
|
+
seg_thresh : int
|
|
291
|
+
Threshold to use in determining if a pixel should be labeled as positive/negative.
|
|
315
292
|
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
293
|
+
Returns
|
|
294
|
+
-------
|
|
295
|
+
ndarray :
|
|
296
|
+
Label mask.
|
|
297
|
+
typed.List[tuple] :
|
|
298
|
+
Cell data as a list of 7-element tuples.
|
|
299
|
+
dict :
|
|
300
|
+
Calculated default values.
|
|
301
|
+
"""
|
|
322
302
|
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
if mask[idx] == MASK_POSITIVE:
|
|
330
|
-
mask[idx] = MASK_CELL_POSITIVE
|
|
331
|
-
elif mask[idx] == MASK_NEGATIVE:
|
|
332
|
-
mask[idx] = MASK_CELL_NEGATIVE
|
|
333
|
-
else:
|
|
334
|
-
mask[idx] = MASK_CELL
|
|
335
|
-
count += 1
|
|
303
|
+
seg = to_array(seg)
|
|
304
|
+
if marker is not None:
|
|
305
|
+
marker = to_array(marker, True)
|
|
306
|
+
mask = create_posneg_mask(seg, seg_thresh)
|
|
307
|
+
mark_background(mask)
|
|
308
|
+
cellsinfo = compute_cell_mapping(mask, marker, noise_thresh)
|
|
336
309
|
|
|
337
|
-
|
|
310
|
+
defaults = {}
|
|
311
|
+
sizes = np.zeros(len(cellsinfo), dtype=np.int64)
|
|
312
|
+
for i in range(len(cellsinfo)):
|
|
313
|
+
sizes[i] = cellsinfo[i][0]
|
|
314
|
+
defaults['size_thresh'] = calculate_default_size_threshold(sizes, resolution)
|
|
315
|
+
if marker is not None:
|
|
316
|
+
defaults['marker_thresh'] = calculate_default_marker_threshold(marker)
|
|
338
317
|
|
|
339
|
-
return
|
|
318
|
+
return mask, cellsinfo, defaults
|
|
340
319
|
|
|
341
320
|
|
|
342
321
|
@jit(nopython=True)
|
|
343
322
|
def create_kde(values, count, bandwidth = 1.0):
|
|
323
|
+
"""
|
|
324
|
+
Create Gaussian kernel density estimate (KDE) for values with count number of bins.
|
|
325
|
+
|
|
326
|
+
Parameters
|
|
327
|
+
----------
|
|
328
|
+
values : list
|
|
329
|
+
Input values.
|
|
330
|
+
count : int
|
|
331
|
+
Number of bins for KDE.
|
|
332
|
+
bandwidth: float
|
|
333
|
+
Bandwidth (smoothing parameter) for KDE.
|
|
334
|
+
|
|
335
|
+
Returns
|
|
336
|
+
-------
|
|
337
|
+
ndarray :
|
|
338
|
+
Kernel density estimate.
|
|
339
|
+
float :
|
|
340
|
+
Step size.
|
|
341
|
+
"""
|
|
342
|
+
|
|
344
343
|
gaussian_denom_inv = 1 / math.sqrt(2 * math.pi);
|
|
345
344
|
max_value = max(values) + 1;
|
|
346
345
|
step = max_value / count;
|
|
@@ -360,14 +359,26 @@ def create_kde(values, count, bandwidth = 1.0):
|
|
|
360
359
|
return kde, step
|
|
361
360
|
|
|
362
361
|
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
362
|
+
@jit(nopython=True)
|
|
363
|
+
def calculate_default_size_threshold(cell_sizes, resolution='40x'):
|
|
364
|
+
"""
|
|
365
|
+
Calculate a default size threshold to exclude small cells.
|
|
366
|
+
|
|
367
|
+
Parameters
|
|
368
|
+
----------
|
|
369
|
+
cell_sizes : ndarray
|
|
370
|
+
1D array of cell sizes.
|
|
371
|
+
resolution : string
|
|
372
|
+
The resolution/magnification of the original image. Valid values are '10x', '20x', or '40x'.
|
|
368
373
|
|
|
369
|
-
|
|
370
|
-
|
|
374
|
+
Returns
|
|
375
|
+
-------
|
|
376
|
+
int :
|
|
377
|
+
Default size threshold.
|
|
378
|
+
"""
|
|
379
|
+
|
|
380
|
+
if cell_sizes.shape[0] > 1:
|
|
381
|
+
kde, step = create_kde(np.sqrt(cell_sizes), 500)
|
|
371
382
|
idx = 1
|
|
372
383
|
for i in range(1, kde.shape[0]-1):
|
|
373
384
|
if kde[i] < kde[i-1] and kde[i] < kde[i+1]:
|
|
@@ -392,49 +403,845 @@ def calc_default_size_thresh(mask, resolution):
|
|
|
392
403
|
return 0
|
|
393
404
|
|
|
394
405
|
|
|
395
|
-
def
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
406
|
+
def calculate_stain_range(stain):
|
|
407
|
+
"""
|
|
408
|
+
Calculate the range of the 99.9 percentile of non-zero pixels in the stain image.
|
|
409
|
+
|
|
410
|
+
Parameters
|
|
411
|
+
----------
|
|
412
|
+
stain : ndarray
|
|
413
|
+
2D uint8 array (image).
|
|
414
|
+
|
|
415
|
+
Returns
|
|
416
|
+
-------
|
|
417
|
+
tuple[int] :
|
|
418
|
+
2-element tuple with 0.1 and 99.9 percentile values.
|
|
419
|
+
"""
|
|
420
|
+
|
|
421
|
+
nonzero = stain[stain != 0]
|
|
422
|
+
if nonzero.shape[0] > 0:
|
|
423
|
+
return (round(np.percentile(nonzero, 0.1)), round(np.percentile(nonzero, 99.9)))
|
|
400
424
|
else:
|
|
401
|
-
return 0
|
|
425
|
+
return (0, 0)
|
|
402
426
|
|
|
403
427
|
|
|
404
|
-
def
|
|
405
|
-
|
|
406
|
-
|
|
428
|
+
def calculate_default_marker_threshold(marker):
|
|
429
|
+
"""
|
|
430
|
+
Calculate a default threshold for a marker image as 90% of the 99.9 percentile range.
|
|
431
|
+
|
|
432
|
+
Parameters
|
|
433
|
+
----------
|
|
434
|
+
marker : ndarray
|
|
435
|
+
2D uint8 array (image).
|
|
436
|
+
|
|
437
|
+
Results
|
|
438
|
+
-------
|
|
439
|
+
int :
|
|
440
|
+
Default marker threshold.
|
|
441
|
+
"""
|
|
442
|
+
|
|
443
|
+
marker_range = calculate_stain_range(marker)
|
|
444
|
+
return round((marker_range[1] - marker_range[0]) * 0.9) + marker_range[0]
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
@jit(nopython=True)
|
|
448
|
+
def get_cell_boundary(mask, x, y):
|
|
449
|
+
"""
|
|
450
|
+
Get the boundary contour pixels for a cell, and also the bounding box.
|
|
451
|
+
The provided starting (x, y) pixel must be the first pixel encountered
|
|
452
|
+
from the top left, whether found by searching via rows or columns.
|
|
453
|
+
|
|
454
|
+
Parameters
|
|
455
|
+
----------
|
|
456
|
+
mask : ndarray
|
|
457
|
+
2D uint8 ndarray of background and cell labels
|
|
458
|
+
x : int
|
|
459
|
+
x-coordinate of the first pixel of the cell
|
|
460
|
+
y : int
|
|
461
|
+
y-coordinate of the first pixel of the cell
|
|
462
|
+
|
|
463
|
+
Returns
|
|
464
|
+
-------
|
|
465
|
+
list :
|
|
466
|
+
Bounding box of the cell as a list of two 2-element tuples.
|
|
467
|
+
list :
|
|
468
|
+
All boundary pixels (x, y) going clockwise from first point.
|
|
469
|
+
"""
|
|
470
|
+
|
|
471
|
+
w = mask.shape[1]
|
|
472
|
+
h = mask.shape[0]
|
|
473
|
+
|
|
474
|
+
if not in_bounds(mask, (y, x)) or mask[y, x] == LABEL_BACKGROUND:
|
|
475
|
+
return None, None
|
|
476
|
+
|
|
477
|
+
'''
|
|
478
|
+
In normal xy coordinates, check neighbors clockwise in the following order:
|
|
479
|
+
0 1 2
|
|
480
|
+
7 - 3
|
|
481
|
+
6 5 4
|
|
482
|
+
List neighbors in xy coordinates, but xy are switched to yx for numpy array access.
|
|
483
|
+
'''
|
|
484
|
+
neighbors = [(-1, -1), (0, -1), (1, -1), (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0)] # (dx, dy)
|
|
485
|
+
neighbors *= 2
|
|
486
|
+
|
|
487
|
+
boundary = [(x, y)]
|
|
488
|
+
min_x = x
|
|
489
|
+
min_y = y
|
|
490
|
+
max_x = x
|
|
491
|
+
max_y = y
|
|
492
|
+
|
|
493
|
+
# Go counter-clockwise to find previous pixel
|
|
494
|
+
idx = 6
|
|
495
|
+
while idx >= 0:
|
|
496
|
+
nx = x + neighbors[idx][0]
|
|
497
|
+
ny = y + neighbors[idx][1]
|
|
498
|
+
if in_bounds(mask, (ny, nx)) and mask[ny, nx] != LABEL_BACKGROUND:
|
|
499
|
+
break
|
|
500
|
+
idx -= 1
|
|
501
|
+
if idx < 0:
|
|
502
|
+
return [(x, y), (x, y)], [(x, y)]
|
|
503
|
+
|
|
504
|
+
px = x + neighbors[idx][0]
|
|
505
|
+
py = y + neighbors[idx][1]
|
|
506
|
+
boundary = [(px, py), (x, y)]
|
|
507
|
+
|
|
508
|
+
# Go clockwise to get border pixels in order
|
|
509
|
+
while True:
|
|
510
|
+
dx = px - x
|
|
511
|
+
dy = py - y
|
|
512
|
+
idx = neighbors.index((dx, dy)) + 1
|
|
513
|
+
while True:
|
|
514
|
+
nx = x + neighbors[idx][0]
|
|
515
|
+
ny = y + neighbors[idx][1]
|
|
516
|
+
if in_bounds(mask, (ny, nx)) and mask[ny, nx] != LABEL_BACKGROUND:
|
|
517
|
+
break
|
|
518
|
+
idx += 1
|
|
519
|
+
px = x
|
|
520
|
+
py = y
|
|
521
|
+
x = nx
|
|
522
|
+
y = ny
|
|
523
|
+
boundary.append((x, y))
|
|
407
524
|
|
|
408
|
-
|
|
409
|
-
|
|
525
|
+
if x < min_x:
|
|
526
|
+
min_x = x
|
|
527
|
+
elif x > max_x:
|
|
528
|
+
max_x = x
|
|
529
|
+
if y < min_y:
|
|
530
|
+
min_y = y
|
|
531
|
+
elif y > max_y:
|
|
532
|
+
max_y = y
|
|
533
|
+
|
|
534
|
+
if px == boundary[0][0] and py == boundary[0][1] and x == boundary[1][0] and y == boundary[1][1]:
|
|
535
|
+
break
|
|
536
|
+
|
|
537
|
+
return [(min_x, min_y), (max_x, max_y)], boundary[1:-1]
|
|
538
|
+
|
|
539
|
+
|
|
540
|
+
def make_simple_contour(points):
|
|
541
|
+
"""
|
|
542
|
+
Make a simplified version of a contour by removing redundant points within
|
|
543
|
+
straight lines (i.e., each straight line segment will be reduced to contain
|
|
544
|
+
only the first and last point for that segment). It is assumed that the
|
|
545
|
+
vectors between points are all one of the eight pixel neighbor directions.
|
|
546
|
+
This means that either one of the x- or y-direction must be zero, or if
|
|
547
|
+
both values are non-zero then they must be the same (i.e., [1,0], [0,2],
|
|
548
|
+
[2,2], and [3,3] are vall valid direction vectors, but [1,2] is not).
|
|
549
|
+
The input parameter of contour points must contain at least one point.
|
|
550
|
+
|
|
551
|
+
Parameters
|
|
552
|
+
----------
|
|
553
|
+
points : list
|
|
554
|
+
Contour of boundary points (x, y).
|
|
555
|
+
|
|
556
|
+
Returns
|
|
557
|
+
-------
|
|
558
|
+
list :
|
|
559
|
+
Simplified contour of boundary points (x, y).
|
|
560
|
+
"""
|
|
561
|
+
|
|
562
|
+
# always keep first point
|
|
563
|
+
simple = [(points[0][0], points[0][1])]
|
|
564
|
+
|
|
565
|
+
# if only one point in contour, then done
|
|
566
|
+
if len(points) == 1:
|
|
567
|
+
return simple
|
|
568
|
+
|
|
569
|
+
# for all middle points (exclude first and last)
|
|
570
|
+
for i in range(1, len(points) - 1):
|
|
571
|
+
dx0 = points[i][0] - points[i-1][0]
|
|
572
|
+
dy0 = points[i][1] - points[i-1][1]
|
|
573
|
+
dx1 = points[i+1][0] - points[i][0]
|
|
574
|
+
dy1 = points[i+1][1] - points[i][1]
|
|
575
|
+
same_dx = (dx0 == dx1) or (dx0 > 0 and dx1 > 0) or (dx0 < 0 and dx1 < 0)
|
|
576
|
+
same_dy = (dy0 == dy1) or (dy0 > 0 and dy1 > 0) or (dy0 < 0 and dy1 < 0)
|
|
577
|
+
if not same_dx or not same_dy:
|
|
578
|
+
simple.append((points[i][0], points[i][1]))
|
|
579
|
+
|
|
580
|
+
# for last point (calculate p[n]-p[n-1] and p[0]-p[n])
|
|
581
|
+
dx0 = points[-1][0] - points[-2][0]
|
|
582
|
+
dy0 = points[-1][1] - points[-2][1]
|
|
583
|
+
dx1 = points[0][0] - points[-1][0]
|
|
584
|
+
dy1 = points[0][1] - points[-1][1]
|
|
585
|
+
same_dx = (dx0 == dx1) or (dx0 > 0 and dx1 > 0) or (dx0 < 0 and dx1 < 0)
|
|
586
|
+
same_dy = (dy0 == dy1) or (dy0 > 0 and dy1 > 0) or (dy0 < 0 and dy1 < 0)
|
|
587
|
+
if not same_dx or not same_dy:
|
|
588
|
+
simple.append((points[-1][0], points[-1][1]))
|
|
589
|
+
|
|
590
|
+
return simple
|
|
591
|
+
|
|
592
|
+
|
|
593
|
+
def make_full_contour(points):
|
|
594
|
+
"""
|
|
595
|
+
Convert a simplified contour to a complete pixel-by-pixel contour
|
|
596
|
+
(i.e., every point is an 8-neighbor of the previous point). It is
|
|
597
|
+
assumed that vectors between points are all one of the eight pixel
|
|
598
|
+
neighbor directions. The input parameter of contour points must
|
|
599
|
+
contain at least one point.
|
|
600
|
+
|
|
601
|
+
Parameters
|
|
602
|
+
----------
|
|
603
|
+
points : list
|
|
604
|
+
Contour of boundary points (x, y).
|
|
605
|
+
|
|
606
|
+
Returns
|
|
607
|
+
-------
|
|
608
|
+
list :
|
|
609
|
+
Full contour of boundary points (x, y).
|
|
610
|
+
"""
|
|
611
|
+
|
|
612
|
+
# start with first point
|
|
613
|
+
full = [(points[0][0], points[0][1])]
|
|
614
|
+
|
|
615
|
+
# for all remaining points:
|
|
616
|
+
for i in range(1, len(points)):
|
|
617
|
+
|
|
618
|
+
# calculate direction from last full point to current input point
|
|
619
|
+
dx = points[i][0] - full[-1][0]
|
|
620
|
+
dy = points[i][1] - full[-1][1]
|
|
621
|
+
dx = 1 if dx > 0 else (-1 if dx < 0 else 0)
|
|
622
|
+
dy = 1 if dy > 0 else (-1 if dy < 0 else 0)
|
|
623
|
+
|
|
624
|
+
# add direction to last full point until reach current input point
|
|
625
|
+
while full[-1][0] != points[i][0] or full[-1][1] != points[i][1]:
|
|
626
|
+
full.append((full[-1][0] + dx, full[-1][1] + dy))
|
|
627
|
+
|
|
628
|
+
# calculate direction from last full point until first point
|
|
629
|
+
dx = full[0][0] - full[-1][0]
|
|
630
|
+
dy = full[0][1] - full[-1][1]
|
|
631
|
+
dx = 1 if dx > 0 else (-1 if dx < 0 else 0)
|
|
632
|
+
dy = 1 if dy > 0 else (-1 if dy < 0 else 0)
|
|
633
|
+
|
|
634
|
+
# add direction to last full point until reach first point (avoid duplicate)
|
|
635
|
+
while full[-1][0] + dx != full[0][0] or full[-1][1] + dy != full[0][1]:
|
|
636
|
+
full.append((full[-1][0] + dx, full[-1][1] + dy))
|
|
637
|
+
|
|
638
|
+
return full
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
def to_base92(values, min_len=1):
|
|
642
|
+
"""
|
|
643
|
+
Convert integer values to base92, offset by 35 for printable ASCII values
|
|
644
|
+
(i.e., output characters for [0-91] are in the range [35-126]).
|
|
645
|
+
All encodings will have the same number of characters, of at least min_len
|
|
646
|
+
(i.e., smaller in length encodings will be padded with 35).
|
|
647
|
+
|
|
648
|
+
Parameters
|
|
649
|
+
----------
|
|
650
|
+
values : int | list[int] | tuple[int]
|
|
651
|
+
The integer value(s) to a base92 ASCII encoding.
|
|
652
|
+
min_len : int
|
|
653
|
+
The minimum number of characters for the base92 ASCII encoding of each value.
|
|
654
|
+
|
|
655
|
+
Returns
|
|
656
|
+
-------
|
|
657
|
+
string | list[string] :
|
|
658
|
+
The converted value(s).
|
|
659
|
+
"""
|
|
660
|
+
|
|
661
|
+
multi = type(values) is list or type(values) is tuple
|
|
662
|
+
if not multi:
|
|
663
|
+
values = [values]
|
|
664
|
+
|
|
665
|
+
results = []
|
|
666
|
+
for val in values:
|
|
667
|
+
res = ''
|
|
668
|
+
while val > 0:
|
|
669
|
+
res += chr((val % 92) + 35)
|
|
670
|
+
val //= 92
|
|
671
|
+
results.append(res)
|
|
672
|
+
|
|
673
|
+
max_len = max(len(r) for r in results)
|
|
674
|
+
fixed_len = max_len if max_len > min_len else min_len
|
|
675
|
+
|
|
676
|
+
for i in range(len(results)):
|
|
677
|
+
while len(results[i]) < fixed_len:
|
|
678
|
+
results[i] += chr(35)
|
|
679
|
+
results[i] = results[i][::-1]
|
|
680
|
+
|
|
681
|
+
if not multi:
|
|
682
|
+
results = results[0]
|
|
683
|
+
return results
|
|
684
|
+
|
|
685
|
+
|
|
686
|
+
def from_base92(val):
|
|
687
|
+
"""
|
|
688
|
+
Convert from base92 ASCII encoding to integer value.
|
|
689
|
+
|
|
690
|
+
Parameters
|
|
691
|
+
----------
|
|
692
|
+
val : string
|
|
693
|
+
The base92 ASCII encoded value.
|
|
694
|
+
|
|
695
|
+
Returns
|
|
696
|
+
-------
|
|
697
|
+
int :
|
|
698
|
+
The converted value.
|
|
699
|
+
"""
|
|
700
|
+
|
|
701
|
+
res = 0
|
|
702
|
+
for v in val:
|
|
703
|
+
res *= 92
|
|
704
|
+
res += (ord(v) - 35)
|
|
705
|
+
return res
|
|
706
|
+
|
|
707
|
+
|
|
708
|
+
def encode_cell_data_v4(data):
|
|
709
|
+
"""
|
|
710
|
+
Encode as v4 the provided cell data to string.
|
|
711
|
+
|
|
712
|
+
Parameters
|
|
713
|
+
----------
|
|
714
|
+
data : dict
|
|
715
|
+
Dictionary of cell data.
|
|
716
|
+
|
|
717
|
+
Returns
|
|
718
|
+
-------
|
|
719
|
+
string :
|
|
720
|
+
Encoded cell data as a single ASCII string.
|
|
721
|
+
"""
|
|
722
|
+
|
|
723
|
+
cell = '' # encoded cell data as string
|
|
724
|
+
|
|
725
|
+
# encode cell size (in pixels)
|
|
726
|
+
size = to_base92(data['size'])
|
|
727
|
+
size_len = len(size)
|
|
728
|
+
cell += size
|
|
729
|
+
|
|
730
|
+
# encode cell classification (pos/neg) and marker value
|
|
731
|
+
positive = int(data['positive'])
|
|
732
|
+
marker = data['marker']
|
|
733
|
+
classification = (marker * 2) + positive
|
|
734
|
+
cell += to_base92(classification, 2)
|
|
735
|
+
|
|
736
|
+
# encode anchor point (bbox top left) and extent (bbox bottom right)
|
|
737
|
+
topleft = to_base92(data['bbox'][0])
|
|
738
|
+
topleft_len = len(topleft[0])
|
|
739
|
+
cell += topleft[0]
|
|
740
|
+
cell += topleft[1]
|
|
741
|
+
|
|
742
|
+
# encode extent (bbox bottom right), centroid, and first boundary contour point
|
|
743
|
+
# as offsets from the previously encoded anchor point (bbox top left)
|
|
744
|
+
x = data['bbox'][0][0]
|
|
745
|
+
y = data['bbox'][0][1]
|
|
746
|
+
offsets = [*data['bbox'][1], *data['centroid'], *data['boundary'][0]]
|
|
747
|
+
for j in range(0, len(offsets), 2):
|
|
748
|
+
offsets[j] -= x
|
|
749
|
+
offsets[j+1] -= y
|
|
750
|
+
offsets = to_base92(offsets)
|
|
751
|
+
offsets_len = len(offsets[0])
|
|
752
|
+
cell += offsets[0]
|
|
753
|
+
cell += offsets[1]
|
|
754
|
+
cell += offsets[2]
|
|
755
|
+
cell += offsets[3]
|
|
756
|
+
cell += offsets[4]
|
|
757
|
+
cell += offsets[5]
|
|
758
|
+
|
|
759
|
+
# encode number of chars for variable length encodations and prepend to cell string
|
|
760
|
+
encoded_lens = ((size_len - 1) * 16) + ((topleft_len - 1) * 4) + (offsets_len - 1)
|
|
761
|
+
encoded_lens = chr(encoded_lens + 35)
|
|
762
|
+
cell = encoded_lens + cell
|
|
763
|
+
|
|
764
|
+
# encode remaining boundary contour points using Freeman chain code
|
|
765
|
+
# Freeman chain code:
|
|
766
|
+
# 3 2 1
|
|
767
|
+
# \ | /
|
|
768
|
+
# 4-- --0
|
|
769
|
+
# / | \
|
|
770
|
+
# 5 6 7
|
|
771
|
+
boundary = ''
|
|
772
|
+
for j in range(1, len(data['boundary'])):
|
|
773
|
+
dx = data['boundary'][j][0] - data['boundary'][j-1][0]
|
|
774
|
+
dy = data['boundary'][j][1] - data['boundary'][j-1][1]
|
|
775
|
+
if dx >= 1 and dy == 0:
|
|
776
|
+
direction = 0
|
|
777
|
+
elif dx >= 1 and dy <= -1:
|
|
778
|
+
direction = 1
|
|
779
|
+
elif dx == 0 and dy <= -1:
|
|
780
|
+
direction = 2
|
|
781
|
+
elif dx <= -1 and dy <= -1:
|
|
782
|
+
direction = 3
|
|
783
|
+
elif dx <= -1 and dy == 0:
|
|
784
|
+
direction = 4
|
|
785
|
+
elif dx <= -1 and dy >= 1:
|
|
786
|
+
direction = 5
|
|
787
|
+
elif dx == 0 and dy >= 1:
|
|
788
|
+
direction = 6
|
|
789
|
+
elif dx >= 1 and dy >= 1:
|
|
790
|
+
direction = 7
|
|
791
|
+
else: # this should not (cannot) happen, so if it does, then exit
|
|
792
|
+
exit()
|
|
793
|
+
distance = max(abs(dx), abs(dy))
|
|
794
|
+
if distance == 0: # this should not (cannot) happen, but if duplicate point, then skip
|
|
795
|
+
continue
|
|
796
|
+
while distance > 10:
|
|
797
|
+
encoded = (10 * 8) + direction
|
|
798
|
+
boundary += chr(encoded + 35)
|
|
799
|
+
distance -= 10
|
|
800
|
+
encoded = (distance * 8) + direction
|
|
801
|
+
boundary += chr(encoded + 35)
|
|
802
|
+
cell += boundary
|
|
803
|
+
|
|
804
|
+
return cell
|
|
805
|
+
|
|
806
|
+
|
|
807
|
+
def decode_cell_data_v4(cell):
|
|
808
|
+
"""
|
|
809
|
+
Decode v4 encoded cell string and return dictionary of cell data.
|
|
810
|
+
|
|
811
|
+
Parameters
|
|
812
|
+
----------
|
|
813
|
+
cell : string
|
|
814
|
+
Encoded cell data as a single ASCII string.
|
|
815
|
+
|
|
816
|
+
Returns
|
|
817
|
+
-------
|
|
818
|
+
dict :
|
|
819
|
+
Dictionary with the decoded cell data.
|
|
820
|
+
"""
|
|
821
|
+
|
|
822
|
+
data = {} # decoded cell data
|
|
823
|
+
|
|
824
|
+
# decode number of chars for variable length encodations
|
|
825
|
+
n = ord(cell[0]) - 35
|
|
826
|
+
ns = (n // 16) + 1 # num chars for cell size
|
|
827
|
+
na = ((n // 4) % 4) + 1 # num chars for anchor coordinates
|
|
828
|
+
no = (n % 4) + 1 # num chars for offset coordinates
|
|
829
|
+
|
|
830
|
+
# decode cell size (in pixels)
|
|
831
|
+
data['size'] = from_base92(cell[1:1+ns])
|
|
832
|
+
|
|
833
|
+
# decode cell classification (pos/neg) and marker value
|
|
834
|
+
classification = from_base92(cell[1+ns:3+ns])
|
|
835
|
+
data['positive'] = bool(classification % 2)
|
|
836
|
+
data['marker'] = classification // 2
|
|
837
|
+
|
|
838
|
+
# decode anchor point (bbox top left) and extent (bbox bottom right)
|
|
839
|
+
x = from_base92(cell[3+ns:3+ns+na])
|
|
840
|
+
y = from_base92(cell[3+ns+na:3+ns+2*na])
|
|
841
|
+
ex = x + from_base92(cell[3+ns+2*na:3+ns+2*na+no])
|
|
842
|
+
ey = y + from_base92(cell[3+ns+2*na+no:3+ns+2*na+2*no])
|
|
843
|
+
data['bbox'] = [(x, y), (ex, ey)]
|
|
844
|
+
|
|
845
|
+
# decode centroid point
|
|
846
|
+
cx = x + from_base92(cell[3+ns+2*na+2*no:3+ns+2*na+3*no])
|
|
847
|
+
cy = y + from_base92(cell[3+ns+2*na+3*no:3+ns+2*na+4*no])
|
|
848
|
+
data['centroid'] = (cx, cy)
|
|
849
|
+
|
|
850
|
+
# decode first boundary contour points
|
|
851
|
+
bx = x + from_base92(cell[3+ns+2*na+4*no:3+ns+2*na+5*no])
|
|
852
|
+
by = y + from_base92(cell[3+ns+2*na+5*no:3+ns+2*na+6*no])
|
|
853
|
+
data['boundary'] = [(bx, by)]
|
|
854
|
+
|
|
855
|
+
# directions using Freeman chain code
|
|
856
|
+
freeman = [(1,0), (1,-1), (0,-1), (-1,-1), (-1,0), (-1,1), (0,1), (1,1)]
|
|
857
|
+
|
|
858
|
+
# decode remaining boundary contour points
|
|
859
|
+
prev_direction = None
|
|
860
|
+
for c in cell[3+ns+2*na+6*no:]:
|
|
861
|
+
point = ord(c) - 35
|
|
862
|
+
distance = point // 8
|
|
863
|
+
direction = freeman[point % 8]
|
|
864
|
+
movement = (direction[0]*distance, direction[1]*distance)
|
|
865
|
+
px = data['boundary'][-1][0] + movement[0]
|
|
866
|
+
py = data['boundary'][-1][1] + movement[1]
|
|
867
|
+
if direction == prev_direction:
|
|
868
|
+
data['boundary'].pop()
|
|
869
|
+
data['boundary'].append((px, py))
|
|
870
|
+
prev_direction = direction
|
|
871
|
+
|
|
872
|
+
return data
|
|
873
|
+
|
|
874
|
+
|
|
875
|
+
@jit(nopython=True)
|
|
876
|
+
def create_cell_classification(mask, cellsinfo,
|
|
877
|
+
size_thresh=0,
|
|
878
|
+
marker_thresh=None,
|
|
879
|
+
size_thresh_upper=None):
|
|
880
|
+
"""
|
|
881
|
+
Create final cell classification in-place for the mask and
|
|
882
|
+
calculate counts for positive and negative cell counts.
|
|
883
|
+
|
|
884
|
+
Parameters
|
|
885
|
+
----------
|
|
886
|
+
mask : ndarray
|
|
887
|
+
2D uint8 array label map.
|
|
888
|
+
cellsinfo : list
|
|
889
|
+
Information about each cell found from the segmentation.
|
|
890
|
+
size_thresh : int
|
|
891
|
+
Include only cells larger than this size.
|
|
892
|
+
marker_thresh : int
|
|
893
|
+
Make cell positive if marker value is above this threshold (override original classification).
|
|
894
|
+
size_thresh_upper : int
|
|
895
|
+
Include only cells smaller than this size.
|
|
896
|
+
|
|
897
|
+
Results
|
|
898
|
+
-------
|
|
899
|
+
dict :
|
|
900
|
+
Dictionary with the counts of positive, negative, and total cells.
|
|
901
|
+
"""
|
|
902
|
+
|
|
903
|
+
neighbors = [(-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1, 1)]
|
|
904
|
+
border_neighbors = [(0, -1), (-1, 0), (1, 0), (0, 1)]
|
|
905
|
+
num_pos, num_neg = 0, 0
|
|
410
906
|
if marker_thresh is None:
|
|
411
|
-
marker_thresh =
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
907
|
+
marker_thresh = 255
|
|
908
|
+
|
|
909
|
+
for cell in cellsinfo:
|
|
910
|
+
if cell[0] > size_thresh and (size_thresh_upper is None or cell[0] < size_thresh_upper):
|
|
911
|
+
is_pos = True if cell[1] or cell[2] > marker_thresh else False
|
|
912
|
+
if is_pos:
|
|
913
|
+
label = LABEL_POSITIVE
|
|
914
|
+
label_border = LABEL_BORDER_POS
|
|
915
|
+
num_pos += 1
|
|
916
|
+
else:
|
|
917
|
+
label = LABEL_NEGATIVE
|
|
918
|
+
label_border = LABEL_BORDER_NEG
|
|
919
|
+
num_neg += 1
|
|
920
|
+
|
|
921
|
+
x = cell[3]
|
|
922
|
+
y = cell[4]
|
|
923
|
+
mask[y,x] = label_border
|
|
924
|
+
seeds = [(y, x)]
|
|
925
|
+
|
|
926
|
+
while len(seeds) > 0:
|
|
927
|
+
seed = seeds.pop()
|
|
928
|
+
for n in neighbors:
|
|
929
|
+
idx = (seed[0] + n[0], seed[1] + n[1])
|
|
930
|
+
if in_bounds(mask, idx) and mask[idx] == LABEL_CELL:
|
|
931
|
+
seeds.append(idx)
|
|
932
|
+
is_boundary = False
|
|
933
|
+
for n in border_neighbors:
|
|
934
|
+
idx2 = (idx[0] + n[0], idx[1] + n[1])
|
|
935
|
+
if in_bounds(mask, idx2) and mask[idx2] == LABEL_BACKGROUND:
|
|
936
|
+
is_boundary = True
|
|
937
|
+
break
|
|
938
|
+
if is_boundary:
|
|
939
|
+
mask[idx] = label_border
|
|
940
|
+
else:
|
|
941
|
+
mask[idx] = label
|
|
942
|
+
|
|
943
|
+
num_total = num_pos + num_neg
|
|
944
|
+
return {
|
|
945
|
+
'num_total': num_total,
|
|
946
|
+
'num_pos': num_pos,
|
|
947
|
+
'num_neg': num_neg,
|
|
948
|
+
}
|
|
949
|
+
|
|
950
|
+
|
|
951
|
+
@jit(nopython=True)
|
|
952
|
+
def enlarge_cell_boundaries(mask):
|
|
953
|
+
"""
|
|
954
|
+
Enlarge cell boundaries in-place in mask by one pixel in each direction.
|
|
415
955
|
|
|
416
|
-
|
|
956
|
+
Parameters
|
|
957
|
+
----------
|
|
958
|
+
mask : ndarray
|
|
959
|
+
2D uint8 label map.
|
|
960
|
+
"""
|
|
961
|
+
|
|
962
|
+
neighbors = [(-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1, 1)]
|
|
963
|
+
|
|
964
|
+
for y in range(mask.shape[0]):
|
|
965
|
+
for x in range(mask.shape[1]):
|
|
966
|
+
if mask[y, x] == LABEL_BORDER_POS or mask[y, x] == LABEL_BORDER_NEG:
|
|
967
|
+
value = LABEL_BORDER_POS2 if mask[y, x] == LABEL_BORDER_POS else LABEL_BORDER_NEG2
|
|
968
|
+
for n in neighbors:
|
|
969
|
+
idx = (y + n[0], x + n[1])
|
|
970
|
+
if in_bounds(mask, idx) and mask[idx] != LABEL_BORDER_POS and mask[idx] != LABEL_BORDER_NEG:
|
|
971
|
+
mask[idx] = value
|
|
972
|
+
|
|
973
|
+
for y in range(mask.shape[0]):
|
|
974
|
+
for x in range(mask.shape[1]):
|
|
975
|
+
if mask[y, x] == LABEL_BORDER_POS2:
|
|
976
|
+
mask[y, x] = LABEL_BORDER_POS
|
|
977
|
+
elif mask[y, x] == LABEL_BORDER_NEG2:
|
|
978
|
+
mask[y, x] = LABEL_BORDER_NEG
|
|
979
|
+
|
|
980
|
+
|
|
981
|
+
@jit(nopython=True)
|
|
982
|
+
def create_final_images(overlay, mask):
|
|
983
|
+
"""
|
|
984
|
+
Create the final overlay (in-place) and refined images from the mask.
|
|
985
|
+
The 'overlay' parameter is the image on which to create the overlay,
|
|
986
|
+
which will be done in-place.
|
|
987
|
+
|
|
988
|
+
Parameters
|
|
989
|
+
----------
|
|
990
|
+
overlay : ndarray
|
|
991
|
+
3D uint8 array (2D image of 3 channels).
|
|
992
|
+
Generally, a copy of the original input image.
|
|
993
|
+
mask : ndarray
|
|
994
|
+
2D uint8 label map.
|
|
995
|
+
|
|
996
|
+
Returns
|
|
997
|
+
-------
|
|
998
|
+
ndarray :
|
|
999
|
+
3D uint8 array (2D image of 3 channels) containing the overlaid image.
|
|
1000
|
+
ndarray :
|
|
1001
|
+
3D uint8 array (2D image of 3 channels) containing the refined segmentation image.
|
|
1002
|
+
"""
|
|
1003
|
+
|
|
1004
|
+
refined = np.zeros_like(overlay)
|
|
1005
|
+
|
|
1006
|
+
for y in range(mask.shape[0]):
|
|
1007
|
+
for x in range(mask.shape[1]):
|
|
1008
|
+
if mask[y, x] == LABEL_BORDER_POS:
|
|
1009
|
+
overlay[y, x] = (255, 0, 0)
|
|
1010
|
+
refined[y, x, 1] = 255
|
|
1011
|
+
elif mask[y, x] == LABEL_BORDER_NEG:
|
|
1012
|
+
overlay[y, x] = (0, 0, 255)
|
|
1013
|
+
refined[y, x, 1] = 255
|
|
1014
|
+
elif mask[y, x] == LABEL_POSITIVE:
|
|
1015
|
+
refined[y, x, 0] = 255
|
|
1016
|
+
elif mask[y, x] == LABEL_NEGATIVE:
|
|
1017
|
+
refined[y, x, 2] = 255
|
|
1018
|
+
|
|
1019
|
+
return overlay, refined
|
|
1020
|
+
|
|
1021
|
+
|
|
1022
|
+
@jit(nopython=True)
|
|
1023
|
+
def fill_cells(mask):
|
|
1024
|
+
"""
|
|
1025
|
+
For a mask with cell outlines, fill in the center of the cells in-place.
|
|
1026
|
+
The cell outlines must surround cell entirely, including image border pixels.
|
|
1027
|
+
|
|
1028
|
+
Parameters
|
|
1029
|
+
----------
|
|
1030
|
+
mask : ndarray
|
|
1031
|
+
2D uint8 label map.
|
|
1032
|
+
"""
|
|
1033
|
+
|
|
1034
|
+
for y in range(mask.shape[0]):
|
|
1035
|
+
for x in range(1, mask.shape[1]):
|
|
1036
|
+
if mask[y, x] == LABEL_UNKNOWN:
|
|
1037
|
+
if mask[y, x-1] == LABEL_BORDER_POS or mask[y, x-1] == LABEL_POSITIVE:
|
|
1038
|
+
mask[y, x] = LABEL_POSITIVE
|
|
1039
|
+
else:
|
|
1040
|
+
mask[y, x] = LABEL_NEGATIVE
|
|
1041
|
+
|
|
1042
|
+
|
|
1043
|
+
def compute_cell_results(seg, marker, resolution, version=3,
|
|
1044
|
+
seg_thresh=DEFAULT_SEG_THRESH,
|
|
1045
|
+
noise_thresh=DEFAULT_NOISE_THRESH):
|
|
1046
|
+
"""
|
|
1047
|
+
Perform postprocessing to compute individual cell results.
|
|
1048
|
+
|
|
1049
|
+
Parameters
|
|
1050
|
+
----------
|
|
1051
|
+
seg : Image | ndarray
|
|
1052
|
+
Inferred segmentation map image.
|
|
1053
|
+
marker : Image | ndarray
|
|
1054
|
+
Inferred marker image.
|
|
1055
|
+
resolution : string
|
|
1056
|
+
The resolution/magnification of the original image. Valid values are '10x', '20x', or '40x'.
|
|
1057
|
+
version : int
|
|
1058
|
+
Version of the cell data (valid values are 3 and 4).
|
|
1059
|
+
seg_thresh : int
|
|
1060
|
+
Threshold to use in determining if a pixel should be labeled as positive/negative.
|
|
1061
|
+
noise_thresh : int
|
|
1062
|
+
Threshold for tiny noise to ignore (include only cells larger than this value).
|
|
1063
|
+
|
|
1064
|
+
Returns
|
|
1065
|
+
-------
|
|
1066
|
+
dict :
|
|
1067
|
+
Individual cell data and other associated values.
|
|
1068
|
+
"""
|
|
1069
|
+
|
|
1070
|
+
if version not in [3, 4]:
|
|
1071
|
+
warnings.warn('Invalid cell data version provided, defaulting to version 3.')
|
|
1072
|
+
version = 3
|
|
1073
|
+
|
|
1074
|
+
mask, cellsinfo, defaults = get_cells_info(seg, marker, resolution, noise_thresh, seg_thresh)
|
|
1075
|
+
|
|
1076
|
+
cells = []
|
|
1077
|
+
for cell in cellsinfo:
|
|
1078
|
+
bbox, boundary = get_cell_boundary(mask, cell[3], cell[4])
|
|
1079
|
+
data = {
|
|
1080
|
+
'size': cell[0],
|
|
1081
|
+
'positive': cell[1],
|
|
1082
|
+
'marker': cell[2],
|
|
1083
|
+
'bbox': bbox,
|
|
1084
|
+
'centroid': (cell[5], cell[6]),
|
|
1085
|
+
'boundary': make_simple_contour(boundary),
|
|
1086
|
+
}
|
|
1087
|
+
if version == 4:
|
|
1088
|
+
data = encode_cell_data_v4(data)
|
|
1089
|
+
cells.append(data)
|
|
1090
|
+
|
|
1091
|
+
results = {
|
|
1092
|
+
'cells': cells,
|
|
1093
|
+
'settings': {
|
|
1094
|
+
'default_marker_thresh': defaults['marker_thresh'] if 'marker_thresh' in defaults else None,
|
|
1095
|
+
'default_size_thresh': defaults['size_thresh'],
|
|
1096
|
+
'noise_thresh': noise_thresh,
|
|
1097
|
+
'seg_thresh': seg_thresh,
|
|
1098
|
+
},
|
|
1099
|
+
'dataVersion': version,
|
|
1100
|
+
}
|
|
1101
|
+
|
|
1102
|
+
return results
|
|
1103
|
+
|
|
1104
|
+
|
|
1105
|
+
def compute_final_results(orig, seg, marker, resolution,
|
|
1106
|
+
size_thresh='default',
|
|
1107
|
+
marker_thresh=None,
|
|
1108
|
+
size_thresh_upper=None,
|
|
1109
|
+
seg_thresh=DEFAULT_SEG_THRESH,
|
|
1110
|
+
noise_thresh=DEFAULT_NOISE_THRESH):
|
|
1111
|
+
"""
|
|
1112
|
+
Perform postprocessing to compute final count and image results.
|
|
1113
|
+
|
|
1114
|
+
Parameters
|
|
1115
|
+
----------
|
|
1116
|
+
orig : Image | ndarray
|
|
1117
|
+
Original input image.
|
|
1118
|
+
seg : Image | ndarray
|
|
1119
|
+
Inferred segmentation map image.
|
|
1120
|
+
marker : Image | ndarray
|
|
1121
|
+
Inferred marker image.
|
|
1122
|
+
resolution : string
|
|
1123
|
+
The resolution/magnification of the original image. Valid values are '10x', '20x', or '40x'.
|
|
1124
|
+
size_thresh : int
|
|
1125
|
+
Include only cells larger than this size.
|
|
1126
|
+
marker_thresh : int
|
|
1127
|
+
Make cell positive if marker value is above this threshold (override original classification).
|
|
1128
|
+
size_thresh_upper : int
|
|
1129
|
+
Include only cells smaller than this size.
|
|
1130
|
+
seg_thresh : int
|
|
1131
|
+
Threshold to use in determining if a pixel should be labeled as positive/negative.
|
|
1132
|
+
noise_thresh : int
|
|
1133
|
+
Threshold for tiny noise to ignore (include only cells larger than this value).
|
|
1134
|
+
|
|
1135
|
+
Returns
|
|
1136
|
+
-------
|
|
1137
|
+
ndarray :
|
|
1138
|
+
3D uint8 array (2D image of 3 channels) containing the overlaid image.
|
|
1139
|
+
ndarray :
|
|
1140
|
+
3D uint8 array (2D image of 3 channels) containing the refined segmentation image.
|
|
1141
|
+
dict :
|
|
1142
|
+
Dictionary with scoring and settings information.
|
|
1143
|
+
"""
|
|
1144
|
+
|
|
1145
|
+
mask, cellsinfo, defaults = get_cells_info(seg, marker, resolution, noise_thresh, seg_thresh)
|
|
1146
|
+
|
|
1147
|
+
if size_thresh is None:
|
|
1148
|
+
size_thresh = 0
|
|
1149
|
+
elif size_thresh == 'default':
|
|
1150
|
+
size_thresh = defaults['size_thresh']
|
|
1151
|
+
if marker_thresh == 'default':
|
|
1152
|
+
marker_thresh = defaults['marker_thresh']
|
|
1153
|
+
|
|
1154
|
+
counts = create_cell_classification(mask, cellsinfo, size_thresh, marker_thresh, size_thresh_upper)
|
|
417
1155
|
enlarge_cell_boundaries(mask)
|
|
1156
|
+
overlay, refined = create_final_images(np.array(orig), mask)
|
|
418
1157
|
|
|
419
1158
|
scoring = {
|
|
420
1159
|
'num_total': counts['num_total'],
|
|
421
1160
|
'num_pos': counts['num_pos'],
|
|
422
1161
|
'num_neg': counts['num_neg'],
|
|
423
1162
|
'percent_pos': round(counts['num_pos'] / counts['num_total'] * 100, 1) if counts['num_pos'] > 0 else 0,
|
|
424
|
-
'
|
|
1163
|
+
'seg_thresh': seg_thresh,
|
|
425
1164
|
'size_thresh': size_thresh,
|
|
426
1165
|
'size_thresh_upper': size_thresh_upper,
|
|
427
1166
|
'marker_thresh': marker_thresh if marker is not None else None,
|
|
428
1167
|
}
|
|
429
1168
|
|
|
430
|
-
overlay
|
|
431
|
-
|
|
432
|
-
|
|
1169
|
+
return overlay, refined, scoring
|
|
1170
|
+
|
|
1171
|
+
|
|
1172
|
+
def cells_to_final_results(data, orig,
|
|
1173
|
+
size_thresh='default',
|
|
1174
|
+
marker_thresh=None,
|
|
1175
|
+
size_thresh_upper=None):
|
|
1176
|
+
"""
|
|
1177
|
+
Compute final count and image results from previously postprocessed cell results.
|
|
1178
|
+
|
|
1179
|
+
Parameters
|
|
1180
|
+
----------
|
|
1181
|
+
data : dict
|
|
1182
|
+
Individual cell data and associated values generated by the 'compute_cell_results' function.
|
|
1183
|
+
orig : Image | ndarray
|
|
1184
|
+
Original input image.
|
|
1185
|
+
size_thresh : int
|
|
1186
|
+
Include only cells larger than this size.
|
|
1187
|
+
marker_thresh : int
|
|
1188
|
+
Make cell positive if marker value is above this threshold (override original classification).
|
|
1189
|
+
size_thresh_upper : int
|
|
1190
|
+
Include only cells smaller than this size.
|
|
1191
|
+
|
|
1192
|
+
Returns
|
|
1193
|
+
-------
|
|
1194
|
+
ndarray :
|
|
1195
|
+
3D uint8 array (2D image of 3 channels) containing the overlaid image.
|
|
1196
|
+
ndarray :
|
|
1197
|
+
3D uint8 array (2D image of 3 channels) containing the refined segmentation image.
|
|
1198
|
+
dict :
|
|
1199
|
+
Dictionary with scoring and settings information.
|
|
1200
|
+
"""
|
|
1201
|
+
|
|
1202
|
+
orig = np.array(orig)
|
|
1203
|
+
mask = np.full(orig.shape[0:2], LABEL_UNKNOWN, dtype=np.uint8)
|
|
1204
|
+
num_pos, num_neg = 0, 0
|
|
1205
|
+
|
|
1206
|
+
if size_thresh is None:
|
|
1207
|
+
size_thresh = 0
|
|
1208
|
+
elif size_thresh == 'default':
|
|
1209
|
+
size_thresh = data['settings']['default_size_thresh']
|
|
1210
|
+
if marker_thresh == 'default':
|
|
1211
|
+
marker_thresh = data['settings']['default_marker_thresh']
|
|
1212
|
+
|
|
1213
|
+
for cell in data['cells']:
|
|
1214
|
+
if data['dataVersion'] == 4:
|
|
1215
|
+
c = decode_cell_data_v4(cell)
|
|
1216
|
+
else:
|
|
1217
|
+
c = cell
|
|
1218
|
+
if c['size'] > size_thresh and (size_thresh_upper is None or c['size'] < size_thresh_upper):
|
|
1219
|
+
if c['positive'] or (marker_thresh is not None and c['marker'] > marker_thresh):
|
|
1220
|
+
num_pos += 1
|
|
1221
|
+
label = LABEL_BORDER_POS
|
|
1222
|
+
else:
|
|
1223
|
+
num_neg += 1
|
|
1224
|
+
label = LABEL_BORDER_NEG
|
|
1225
|
+
border = make_full_contour(c['boundary'])
|
|
1226
|
+
for b in border:
|
|
1227
|
+
mask[b[1], b[0]] = label
|
|
433
1228
|
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
refined
|
|
1229
|
+
mark_background(mask)
|
|
1230
|
+
fill_cells(mask)
|
|
1231
|
+
|
|
1232
|
+
enlarge_cell_boundaries(mask)
|
|
1233
|
+
overlay, refined = create_final_images(np.array(orig), mask)
|
|
1234
|
+
|
|
1235
|
+
num_total = num_pos + num_neg
|
|
1236
|
+
scoring = {
|
|
1237
|
+
'num_total': num_total,
|
|
1238
|
+
'num_pos': num_pos,
|
|
1239
|
+
'num_neg': num_neg,
|
|
1240
|
+
'percent_pos': round(num_pos / num_total * 100, 1) if num_pos > 0 else 0,
|
|
1241
|
+
'seg_thresh': data['settings']['seg_thresh'],
|
|
1242
|
+
'size_thresh': size_thresh,
|
|
1243
|
+
'size_thresh_upper': size_thresh_upper,
|
|
1244
|
+
'marker_thresh': marker_thresh,
|
|
1245
|
+
}
|
|
439
1246
|
|
|
440
1247
|
return overlay, refined, scoring
|