arcn 0.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- arcn-0.0.0/MANIFEST.in +1 -0
- arcn-0.0.0/PKG-INFO +19 -0
- arcn-0.0.0/README.md +2 -0
- arcn-0.0.0/pyproject.toml +31 -0
- arcn-0.0.0/setup.cfg +4 -0
- arcn-0.0.0/src/arcn/VERSION +1 -0
- arcn-0.0.0/src/arcn/__init__.py +0 -0
- arcn-0.0.0/src/arcn/io.py +364 -0
- arcn-0.0.0/src/arcn/label.py +64 -0
- arcn-0.0.0/src/arcn/manifold.py +179 -0
- arcn-0.0.0/src/arcn/node.py +143 -0
- arcn-0.0.0/src/arcn/relabeling.py +65 -0
- arcn-0.0.0/src/arcn/render_synced_grid.py +151 -0
- arcn-0.0.0/src/arcn/resample.py +165 -0
- arcn-0.0.0/src/arcn/resample_image.py +97 -0
- arcn-0.0.0/src/arcn/segment.py +60 -0
- arcn-0.0.0/src/arcn/semantics.py +590 -0
- arcn-0.0.0/src/arcn/version.py +6 -0
- arcn-0.0.0/src/arcn.egg-info/PKG-INFO +19 -0
- arcn-0.0.0/src/arcn.egg-info/SOURCES.txt +21 -0
- arcn-0.0.0/src/arcn.egg-info/dependency_links.txt +1 -0
- arcn-0.0.0/src/arcn.egg-info/requires.txt +5 -0
- arcn-0.0.0/src/arcn.egg-info/top_level.txt +1 -0
arcn-0.0.0/MANIFEST.in
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
include src/arcn/VERSION
|
arcn-0.0.0/PKG-INFO
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: arcn
|
|
3
|
+
Version: 0.0.0
|
|
4
|
+
Summary: ARCANE - Anatomical ReConstruction along AligNEd paths
|
|
5
|
+
Author-email: "J. Hoffman" <contact@jmh.lol>
|
|
6
|
+
Project-URL: Homepage, https://gitlab.com/hoffman-lab/arcane
|
|
7
|
+
Project-URL: Bug Tracker, https://gitlab.com/hoffman-lab/arcane/issues
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: Operating System :: OS Independent
|
|
10
|
+
Requires-Python: >=3.9
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
Requires-Dist: numpy
|
|
13
|
+
Requires-Dist: SimpleITK
|
|
14
|
+
Requires-Dist: scipy
|
|
15
|
+
Requires-Dist: nibabel
|
|
16
|
+
Requires-Dist: pillow
|
|
17
|
+
|
|
18
|
+
# ARCaNe - Anatomical ReConstruction along AligNEd paths
|
|
19
|
+
|
arcn-0.0.0/README.md
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "arcn"
|
|
3
|
+
authors = [
|
|
4
|
+
{ name = "J. Hoffman", email = "contact@jmh.lol" },
|
|
5
|
+
]
|
|
6
|
+
description = "ARCANE - Anatomical ReConstruction along AligNEd paths"
|
|
7
|
+
readme = "README.md"
|
|
8
|
+
requires-python = ">=3.9"
|
|
9
|
+
classifiers = [
|
|
10
|
+
"Programming Language :: Python :: 3",
|
|
11
|
+
"Operating System :: OS Independent",
|
|
12
|
+
]
|
|
13
|
+
# Eventually, psycopg2/psycopg
|
|
14
|
+
dependencies = ["numpy", "SimpleITK", "scipy", "nibabel", "pillow"]
|
|
15
|
+
dynamic = ["version"]
|
|
16
|
+
|
|
17
|
+
[build-system]
|
|
18
|
+
requires = ["setuptools"]
|
|
19
|
+
build-backend = "setuptools.build_meta:__legacy__"
|
|
20
|
+
# The difference between `__legacy__` and the regular `build_meta`
|
|
21
|
+
# is that `__legacy__` does the equivalent of
|
|
22
|
+
# `sys.path.insert(0, os.path.dirname(__file__))`.
|
|
23
|
+
# This allows you to `import` your modules from the `CWD`.
|
|
24
|
+
# If you don't like using `__legacy__` you can
|
|
25
|
+
# manually add `CWD` to `sys.path` inside your `setup.py`.
|
|
26
|
+
|
|
27
|
+
[project.urls]
|
|
28
|
+
"Homepage" = "https://gitlab.com/hoffman-lab/arcane"
|
|
29
|
+
"Bug Tracker" = "https://gitlab.com/hoffman-lab/arcane/issues"
|
|
30
|
+
|
|
31
|
+
|
arcn-0.0.0/setup.cfg
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
v0.0.1
|
|
File without changes
|
|
@@ -0,0 +1,364 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import json
|
|
3
|
+
from PIL import Image
|
|
4
|
+
import numpy as np
|
|
5
|
+
import SimpleITK as sitk
|
|
6
|
+
|
|
7
|
+
from .node import Node
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def load_series(dir_path: str) -> sitk.Image:
|
|
11
|
+
image_reader = sitk.ImageSeriesReader()
|
|
12
|
+
dicom_names = image_reader.GetGDCMSeriesFileNames(dir_path)
|
|
13
|
+
image_reader.SetFileNames(dicom_names)
|
|
14
|
+
study = image_reader.Execute()
|
|
15
|
+
return study
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def load_mask(seg_path: str) -> sitk.Image:
|
|
19
|
+
seg_reader = sitk.ImageFileReader()
|
|
20
|
+
seg_reader.SetImageIO("NiftiImageIO")
|
|
21
|
+
seg_reader.SetFileName(seg_path)
|
|
22
|
+
seg = seg_reader.Execute()
|
|
23
|
+
return seg
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def get_aligned_array_view(image: sitk.Image) -> np.ndarray:
|
|
27
|
+
"""
|
|
28
|
+
Extracts a numpy array from a 2D SimpleITK image and corrects for memory
|
|
29
|
+
flipping and transposition based on the Direction Cosine Matrix.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
image: A 2D SimpleITK Image.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
A 2D numpy array geometrically aligned to the world axes.
|
|
36
|
+
"""
|
|
37
|
+
if image.GetDimension() != 2:
|
|
38
|
+
raise ValueError("This function specifically handles 2D SimpleITK images.")
|
|
39
|
+
|
|
40
|
+
# Extract the view. Remember: SimpleITK is (X, Y) but Numpy is (Y, X)
|
|
41
|
+
array_view = sitk.GetArrayViewFromImage(image)
|
|
42
|
+
|
|
43
|
+
# Get the 2x2 direction matrix as a flat 4-element tuple
|
|
44
|
+
# direction = (d00, d01, d10, d11)
|
|
45
|
+
direction = image.GetDirection()
|
|
46
|
+
|
|
47
|
+
# Determine if the image axes are swapped (e.g., a 90-degree rotation)
|
|
48
|
+
# We check if the off-diagonal X-projection is larger than the main diagonal
|
|
49
|
+
is_transposed = abs(direction[1]) > abs(direction[0])
|
|
50
|
+
|
|
51
|
+
if is_transposed:
|
|
52
|
+
# Swap axes so Numpy's rows/cols align with World Y/X
|
|
53
|
+
array_view = array_view.T
|
|
54
|
+
|
|
55
|
+
# Because we transposed, the physical Y axis (direction[1]) now drives X
|
|
56
|
+
# and the physical X axis (direction[2]) now drives Y.
|
|
57
|
+
flip_x = direction[1] < 0
|
|
58
|
+
flip_y = direction[2] < 0
|
|
59
|
+
else:
|
|
60
|
+
# Standard orientation
|
|
61
|
+
# direction[0] represents the physical X axis on the World X axis
|
|
62
|
+
# direction[3] represents the physical Y axis on the World Y axis
|
|
63
|
+
flip_x = direction[0] < 0
|
|
64
|
+
flip_y = direction[3] < 0
|
|
65
|
+
|
|
66
|
+
# Apply spatial corrections
|
|
67
|
+
if flip_y:
|
|
68
|
+
array_view = np.flipud(array_view) # Flip vertically (along rows)
|
|
69
|
+
if flip_x:
|
|
70
|
+
array_view = np.fliplr(array_view) # Flip horizontally (along columns)
|
|
71
|
+
|
|
72
|
+
return array_view
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def save_image(fpath: str, image: sitk.Image, wl: list[int], flipped=False):
|
|
76
|
+
# arr = sitk.GetArrayViewFromImage(image)
|
|
77
|
+
arr = get_aligned_array_view(image)
|
|
78
|
+
|
|
79
|
+
if flipped:
|
|
80
|
+
print("flipping")
|
|
81
|
+
arr = np.fliplr(arr)
|
|
82
|
+
|
|
83
|
+
wl_min = wl[0]
|
|
84
|
+
wl_max = wl[1]
|
|
85
|
+
|
|
86
|
+
img = 255 * (arr - wl_min) / (wl_max - wl_min)
|
|
87
|
+
|
|
88
|
+
png_img = np.clip(img, a_min=0, a_max=255).astype(np.uint8)
|
|
89
|
+
pil_img = Image.fromarray(png_img)
|
|
90
|
+
pil_img.save(fpath)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def save_image_mask(
|
|
94
|
+
fpath: str,
|
|
95
|
+
image: "sitk.Image",
|
|
96
|
+
wl: list[int],
|
|
97
|
+
flipped: bool = False,
|
|
98
|
+
mask: "sitk.Image | None" = None,
|
|
99
|
+
mask_color: tuple[int, int, int] = (255, 0, 0), # Default to Red
|
|
100
|
+
alpha: float = 0.5, # Transparency (0.0 to 1.0)
|
|
101
|
+
):
|
|
102
|
+
# Process base image
|
|
103
|
+
arr = get_aligned_array_view(image)
|
|
104
|
+
|
|
105
|
+
if flipped:
|
|
106
|
+
print("flipping")
|
|
107
|
+
arr = np.fliplr(arr)
|
|
108
|
+
|
|
109
|
+
wl_min = wl[0]
|
|
110
|
+
wl_max = wl[1]
|
|
111
|
+
|
|
112
|
+
img = 255 * (arr - wl_min) / (wl_max - wl_min)
|
|
113
|
+
png_img = np.clip(img, a_min=0, a_max=255).astype(np.uint8)
|
|
114
|
+
|
|
115
|
+
# Convert base image to RGBA for blending
|
|
116
|
+
pil_img = Image.fromarray(png_img).convert("RGBA")
|
|
117
|
+
|
|
118
|
+
# Process and composite mask if provided
|
|
119
|
+
if mask is not None:
|
|
120
|
+
mask_arr = get_aligned_array_view(mask)
|
|
121
|
+
|
|
122
|
+
if flipped:
|
|
123
|
+
mask_arr = np.fliplr(mask_arr)
|
|
124
|
+
|
|
125
|
+
# Ensure mask is boolean (assuming >0 is the region of interest)
|
|
126
|
+
mask_bool = mask_arr > 0
|
|
127
|
+
|
|
128
|
+
# Create an empty RGBA array for the overlay
|
|
129
|
+
# Shape will be (Height, Width, 4)
|
|
130
|
+
overlay_arr = np.zeros((*png_img.shape, 4), dtype=np.uint8)
|
|
131
|
+
|
|
132
|
+
# Fill the mask regions with the specified color and alpha
|
|
133
|
+
alpha_val = int(255 * alpha)
|
|
134
|
+
overlay_arr[mask_bool] = mask_color + (alpha_val,)
|
|
135
|
+
|
|
136
|
+
# Convert overlay to PIL Image and blend
|
|
137
|
+
overlay_img = Image.fromarray(overlay_arr, mode="RGBA")
|
|
138
|
+
pil_img = Image.alpha_composite(pil_img, overlay_img)
|
|
139
|
+
|
|
140
|
+
# Convert back to RGB to drop the alpha channel before saving as a standard PNG
|
|
141
|
+
final_img = pil_img.convert("RGB")
|
|
142
|
+
final_img.save(fpath)
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
import numpy as np
|
|
146
|
+
from PIL import Image, ImageDraw
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def _iso_project(pt_3d, scale=3.0, offset=(75, 75)):
|
|
150
|
+
"""
|
|
151
|
+
Projects a 3D point (X, Y, Z) into 2D isometric space.
|
|
152
|
+
"""
|
|
153
|
+
x, y, z = pt_3d
|
|
154
|
+
# Standard isometric projection angles (30 degrees)
|
|
155
|
+
cos30 = 0.866025
|
|
156
|
+
sin30 = 0.5
|
|
157
|
+
|
|
158
|
+
# Calculate 2D coordinates
|
|
159
|
+
u = (x - y) * cos30
|
|
160
|
+
# Invert Z because PIL's Y-axis goes down
|
|
161
|
+
v = -z + (x + y) * sin30
|
|
162
|
+
|
|
163
|
+
return (u * scale + offset[0], v * scale + offset[1])
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def _generate_plane_corners(center, normal, size=20.0):
|
|
167
|
+
"""Generates 3D corners of a square plane given a center and normal."""
|
|
168
|
+
normal = np.array(normal, dtype=float)
|
|
169
|
+
normal /= np.linalg.norm(normal)
|
|
170
|
+
|
|
171
|
+
if abs(normal[0]) > 0.9:
|
|
172
|
+
arbitrary_dir = np.array([0.0, 1.0, 0.0])
|
|
173
|
+
else:
|
|
174
|
+
arbitrary_dir = np.array([1.0, 0.0, 0.0])
|
|
175
|
+
|
|
176
|
+
u = np.cross(normal, arbitrary_dir)
|
|
177
|
+
u /= np.linalg.norm(u)
|
|
178
|
+
v = np.cross(normal, u)
|
|
179
|
+
|
|
180
|
+
s = size / 2.0
|
|
181
|
+
return [
|
|
182
|
+
center + s * u + s * v,
|
|
183
|
+
center - s * u + s * v,
|
|
184
|
+
center - s * u - s * v,
|
|
185
|
+
center + s * u - s * v,
|
|
186
|
+
]
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def _render_fast_3d_inset(current_node, prev_nodes, inset_size=(150, 150)):
|
|
190
|
+
"""Renders 3D axes and planes directly to a transparent PIL Image."""
|
|
191
|
+
inset = Image.new("RGBA", inset_size, (0, 0, 0, 0)) # Fully transparent
|
|
192
|
+
draw = ImageDraw.Draw(inset)
|
|
193
|
+
|
|
194
|
+
center_offset = (inset_size[0] // 2, inset_size[1] // 2)
|
|
195
|
+
c_curr = np.array(current_node.center)
|
|
196
|
+
|
|
197
|
+
primitives = []
|
|
198
|
+
|
|
199
|
+
# 1. Prepare previous planes
|
|
200
|
+
for i, p_node in enumerate(prev_nodes):
|
|
201
|
+
# Shift relative to current center
|
|
202
|
+
rel_center = np.array(p_node.center) - c_curr
|
|
203
|
+
corners = _generate_plane_corners(rel_center, p_node.normal, size=25.0)
|
|
204
|
+
|
|
205
|
+
# Calculate depth for Painter's Algorithm (smaller sum = further away)
|
|
206
|
+
depth = sum(rel_center)
|
|
207
|
+
alpha = int(max(30, 150 - i * 30)) # Fades out older planes
|
|
208
|
+
|
|
209
|
+
primitives.append(
|
|
210
|
+
{
|
|
211
|
+
"type": "poly",
|
|
212
|
+
"pts": corners,
|
|
213
|
+
"color": (30, 144, 255, alpha), # Dodger Blue
|
|
214
|
+
"outline": (255, 255, 255, int(alpha * 1.5)),
|
|
215
|
+
"depth": depth,
|
|
216
|
+
}
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
# 2. Prepare current plane
|
|
220
|
+
curr_corners = _generate_plane_corners(
|
|
221
|
+
np.array([0, 0, 0]), current_node.normal, size=25.0
|
|
222
|
+
)
|
|
223
|
+
primitives.append(
|
|
224
|
+
{
|
|
225
|
+
"type": "poly",
|
|
226
|
+
"pts": curr_corners,
|
|
227
|
+
"color": (220, 20, 60, 200), # Crimson Red
|
|
228
|
+
"outline": (255, 255, 255, 255),
|
|
229
|
+
"depth": 0.0,
|
|
230
|
+
}
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
# 3. Prepare Axes (Length = 20)
|
|
234
|
+
axis_len = 20.0
|
|
235
|
+
axes_data = [
|
|
236
|
+
([0, 0, 0], [axis_len, 0, 0], (255, 50, 50, 200)), # X: Red
|
|
237
|
+
([0, 0, 0], [0, axis_len, 0], (50, 255, 50, 200)), # Y: Green
|
|
238
|
+
([0, 0, 0], [0, 0, axis_len], (50, 100, 255, 200)), # Z: Blue
|
|
239
|
+
]
|
|
240
|
+
for start, end, color in axes_data:
|
|
241
|
+
depth = sum(start) + sum(end) / 2.0 # Midpoint depth
|
|
242
|
+
primitives.append(
|
|
243
|
+
{"type": "line", "pts": [start, end], "color": color, "depth": depth}
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
# 4. Sort all primitives by depth (draw furthest first)
|
|
247
|
+
primitives.sort(key=lambda x: x["depth"])
|
|
248
|
+
|
|
249
|
+
# 5. Draw to canvas
|
|
250
|
+
for prim in primitives:
|
|
251
|
+
pts_2d = [
|
|
252
|
+
_iso_project(pt, scale=2.5, offset=center_offset) for pt in prim["pts"]
|
|
253
|
+
]
|
|
254
|
+
|
|
255
|
+
if prim["type"] == "poly":
|
|
256
|
+
draw.polygon(pts_2d, fill=prim["color"], outline=prim["outline"])
|
|
257
|
+
elif prim["type"] == "line":
|
|
258
|
+
draw.line(pts_2d, fill=prim["color"], width=2)
|
|
259
|
+
|
|
260
|
+
return inset
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def save_node(
|
|
264
|
+
n: Node,
|
|
265
|
+
filename: str,
|
|
266
|
+
wl: tuple[int, int] = (-500, 1500),
|
|
267
|
+
mask_color: tuple[int, int, int] | None = None,
|
|
268
|
+
):
|
|
269
|
+
"""
|
|
270
|
+
Saves the node's image with an optimized custom 3D spatial inset.
|
|
271
|
+
wl is defined as (Window Level/Center, Window Width).
|
|
272
|
+
"""
|
|
273
|
+
# level, width = wl
|
|
274
|
+
# lower = level - (width / 2.0)
|
|
275
|
+
# upper = level + (width / 2.0)
|
|
276
|
+
|
|
277
|
+
lower, upper = wl
|
|
278
|
+
width = upper - lower
|
|
279
|
+
|
|
280
|
+
img = get_aligned_array_view(n.img)
|
|
281
|
+
mask = get_aligned_array_view(n.mask)
|
|
282
|
+
|
|
283
|
+
# 1. Apply Window/Level (explicitly maps lower -> 0, upper -> 255)
|
|
284
|
+
img_float = img.astype(float)
|
|
285
|
+
img_clipped = np.clip(img_float, lower, upper)
|
|
286
|
+
img_norm = (img_clipped - lower) / width
|
|
287
|
+
img_uint8 = (img_norm * 255.0).astype(np.uint8)
|
|
288
|
+
|
|
289
|
+
# Convert to RGB by stacking the grayscale channels
|
|
290
|
+
img_rgb = np.stack((img_uint8,) * 3, axis=-1)
|
|
291
|
+
base_img = Image.fromarray(img_rgb).convert("RGBA")
|
|
292
|
+
|
|
293
|
+
# 2. Composite the mask if requested
|
|
294
|
+
if mask_color is not None:
|
|
295
|
+
overlay = np.zeros((*mask.shape, 4), dtype=np.uint8)
|
|
296
|
+
overlay[mask > 0] = (*mask_color[:3], 100) # Ensure valid alpha
|
|
297
|
+
|
|
298
|
+
mask_img = Image.fromarray(overlay, mode="RGBA")
|
|
299
|
+
base_img = Image.alpha_composite(base_img, mask_img)
|
|
300
|
+
|
|
301
|
+
# 3. Traverse linked list to get up to 5 previous nodes
|
|
302
|
+
prev_nodes = []
|
|
303
|
+
curr = n
|
|
304
|
+
while getattr(curr, "prev", None) and len(prev_nodes) < 5:
|
|
305
|
+
p_node = curr.prev[0] if isinstance(curr.prev, list) else curr.prev
|
|
306
|
+
if p_node is None:
|
|
307
|
+
break
|
|
308
|
+
prev_nodes.append(p_node)
|
|
309
|
+
curr = p_node
|
|
310
|
+
|
|
311
|
+
# 4. Generate and paste the fast 3D inset
|
|
312
|
+
inset_img = _render_fast_3d_inset(n, prev_nodes)
|
|
313
|
+
|
|
314
|
+
# Paste in the top right corner. The mask acts as its own alpha channel.
|
|
315
|
+
paste_x = base_img.width - inset_img.width - 10
|
|
316
|
+
paste_y = 10
|
|
317
|
+
base_img.paste(inset_img, (paste_x, paste_y), inset_img)
|
|
318
|
+
|
|
319
|
+
# 5. Save to disk
|
|
320
|
+
final_img = base_img.convert("RGB")
|
|
321
|
+
final_img.save(filename)
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
def _dataset_from_labels(dataset_dir: str, file_ext: str) -> dict:
|
|
325
|
+
files = os.listdir(os.path.join(dataset_dir, "labelsTr"))
|
|
326
|
+
label_files = []
|
|
327
|
+
for f in files:
|
|
328
|
+
if f.endswith(file_ext):
|
|
329
|
+
label_files.append(os.path.join(dataset_dir, "labelsTr", f))
|
|
330
|
+
|
|
331
|
+
num_files = len(label_files)
|
|
332
|
+
|
|
333
|
+
dataset = {
|
|
334
|
+
"channel_names": {"0": "CT"},
|
|
335
|
+
"labels": {"background": 0, "mask": 1},
|
|
336
|
+
"numTraining": num_files,
|
|
337
|
+
"file_ending": file_ext,
|
|
338
|
+
# "overwrite_image_reader_writer": "SimpleITKIO"
|
|
339
|
+
}
|
|
340
|
+
return dataset
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
def _dataset_file(dataset_dir: str):
|
|
344
|
+
filepath = os.path.join(dataset_dir, "dataset.json")
|
|
345
|
+
dataset = _dataset_from_labels(dataset_dir, ".nii.gz")
|
|
346
|
+
with open(filepath, "w") as f:
|
|
347
|
+
json.dump(dataset, f)
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
def save_node_nnunet(n: Node, dataset_dir: str, case_id: str):
|
|
351
|
+
img = n.img
|
|
352
|
+
mask = n.mask
|
|
353
|
+
|
|
354
|
+
img_dir = os.path.join(dataset_dir, "imagesTr")
|
|
355
|
+
img_path = os.path.join(img_dir, f"{case_id}_0000.nii.gz")
|
|
356
|
+
|
|
357
|
+
label_dir = os.path.join(dataset_dir, "labelsTr")
|
|
358
|
+
label_path = os.path.join(label_dir, f"{case_id}.nii.gz")
|
|
359
|
+
|
|
360
|
+
os.makedirs(img_dir, exist_ok=True)
|
|
361
|
+
os.makedirs(label_dir, exist_ok=True)
|
|
362
|
+
|
|
363
|
+
sitk.WriteImage(img, img_path)
|
|
364
|
+
sitk.WriteImage(mask, label_path)
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import argparse
|
|
3
|
+
|
|
4
|
+
from . import io
|
|
5
|
+
from .manifold import Manifold
|
|
6
|
+
|
|
7
|
+
from .relabeling import manual_relabel
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
if __name__ == "__main__":
|
|
11
|
+
ap = argparse.ArgumentParser()
|
|
12
|
+
ap.add_argument("study_dir")
|
|
13
|
+
ap.add_argument("--seg", help="seed segmentation to intialize with")
|
|
14
|
+
ap.add_argument("--out", help="labeling output directory")
|
|
15
|
+
ap.add_argument(
|
|
16
|
+
"--debug-out", help="save preview images after relabeling", default=None
|
|
17
|
+
)
|
|
18
|
+
ap.add_argument("--fov", help="field of view in cm", default=5.0)
|
|
19
|
+
ap.add_argument(
|
|
20
|
+
"--nnunet-out",
|
|
21
|
+
help="output updated labels to nnU-Net-structured dataset dir",
|
|
22
|
+
default=5.0,
|
|
23
|
+
)
|
|
24
|
+
ap.add_argument(
|
|
25
|
+
"--nnunet-prefix", help="case prefix for nnU-Net samples", default="arcn"
|
|
26
|
+
)
|
|
27
|
+
ap.add_argument(
|
|
28
|
+
"--N",
|
|
29
|
+
help="final number of slices to produce from the volume",
|
|
30
|
+
default=128,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
wmin = -1000
|
|
34
|
+
wmax = 350
|
|
35
|
+
|
|
36
|
+
args = ap.parse_args()
|
|
37
|
+
|
|
38
|
+
img = io.load_series(args.study_dir)
|
|
39
|
+
mask = io.load_mask(args.seg)
|
|
40
|
+
|
|
41
|
+
# Takes care of the initial resampling
|
|
42
|
+
m = Manifold(img, mask, 32, fov=float(args.fov))
|
|
43
|
+
|
|
44
|
+
# Take care of the relabeling
|
|
45
|
+
manual_relabel(m.nodes)
|
|
46
|
+
|
|
47
|
+
# Save PNGs to verify alignment
|
|
48
|
+
if args.debug_out is not None:
|
|
49
|
+
output_dir = args.debug_out
|
|
50
|
+
for i, node in enumerate(m.nodes):
|
|
51
|
+
path = os.path.join(output_dir, f"{str(i).zfill(3)}.png")
|
|
52
|
+
io.save_node(node, path, wl=(int(wmin), int(wmax)), mask_color=(255, 0, 0))
|
|
53
|
+
|
|
54
|
+
# Save nnU-Net output
|
|
55
|
+
if args.nnunet_out is not None:
|
|
56
|
+
output_dir = args.nnunet_out
|
|
57
|
+
for i, node in enumerate(m.nodes):
|
|
58
|
+
case_id = f"{args.nnunet_prefix}_{str(i).zfill(4)}"
|
|
59
|
+
io.save_node_nnunet(node, output_dir, case_id)
|
|
60
|
+
|
|
61
|
+
from .io import _dataset_file
|
|
62
|
+
|
|
63
|
+
# Render metadata from the updated datasets
|
|
64
|
+
_dataset_file(output_dir)
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import queue
|
|
3
|
+
from typing import Iterable
|
|
4
|
+
import SimpleITK as sitk
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
7
|
+
from . import semantics
|
|
8
|
+
from .resample_image import _resample_image
|
|
9
|
+
|
|
10
|
+
from .node import Node
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# Base class describes the general user-oriented interface
|
|
14
|
+
# Implicitly assumes incoming mask has a single centerline
|
|
15
|
+
class Manifold:
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
img: sitk.Image,
|
|
19
|
+
mask: sitk.Image,
|
|
20
|
+
num_initial_nodes: int,
|
|
21
|
+
fov: float | None = None,
|
|
22
|
+
):
|
|
23
|
+
|
|
24
|
+
self.nodes = []
|
|
25
|
+
|
|
26
|
+
self.img = img
|
|
27
|
+
self.seed_mask = mask
|
|
28
|
+
self.search_nodes = queue.Queue()
|
|
29
|
+
|
|
30
|
+
n_centers = num_initial_nodes
|
|
31
|
+
centers = semantics.path_seed_points(self.seed_mask, n_centers)
|
|
32
|
+
normals = semantics.reconstruction_normals(centers)
|
|
33
|
+
up_vectors = semantics.generate_up_vectors(normals)
|
|
34
|
+
fovs = semantics.estimate_fovs(centers, normals, img, mask)
|
|
35
|
+
|
|
36
|
+
prev_node = None
|
|
37
|
+
for i, data in enumerate(zip(centers, normals, up_vectors, fovs)):
|
|
38
|
+
|
|
39
|
+
c = list(data[0])
|
|
40
|
+
n = list(data[1])
|
|
41
|
+
up = list(data[2])
|
|
42
|
+
f = data[3]
|
|
43
|
+
|
|
44
|
+
curr_node = Node(c, n, prev_node)
|
|
45
|
+
|
|
46
|
+
if fov is not None:
|
|
47
|
+
f = fov
|
|
48
|
+
|
|
49
|
+
curr_node.img = _resample_image(self.img, c, n, up, 512, f)
|
|
50
|
+
curr_node.mask = _resample_image(self.seed_mask, c, n, up, 512, f)
|
|
51
|
+
|
|
52
|
+
# Add final center to search list
|
|
53
|
+
if i == n_centers - 1:
|
|
54
|
+
self.search_nodes.put(curr_node)
|
|
55
|
+
break
|
|
56
|
+
|
|
57
|
+
self.nodes.append(curr_node)
|
|
58
|
+
prev_node = curr_node
|
|
59
|
+
|
|
60
|
+
# def relabel(self, N: int | None = None):
|
|
61
|
+
|
|
62
|
+
# # Generate the list of nodes to relabel
|
|
63
|
+
# node_list = []
|
|
64
|
+
|
|
65
|
+
# for i, n in enumerate(self.nodes):
|
|
66
|
+
# if N is not None and i >= N:
|
|
67
|
+
# break
|
|
68
|
+
|
|
69
|
+
# node_list.append(n)
|
|
70
|
+
|
|
71
|
+
# manual_relabel(node_list)
|
|
72
|
+
|
|
73
|
+
def reconstruct(self):
|
|
74
|
+
raise NotImplementedError
|
|
75
|
+
|
|
76
|
+
def visualize(self):
|
|
77
|
+
raise NotImplementedError
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class Airway(Manifold):
|
|
81
|
+
def visualize(self):
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
def reconstruct(self, output_dir: str | None = None):
|
|
85
|
+
# WARN: this implementation is not intended to be parallel-friendly, just illustrative
|
|
86
|
+
|
|
87
|
+
# Save out already added nodes
|
|
88
|
+
for n in self.nodes:
|
|
89
|
+
n.save(output_dir)
|
|
90
|
+
|
|
91
|
+
# Begin the reconstruction search
|
|
92
|
+
daughter_nodes = []
|
|
93
|
+
while not self.search_nodes.empty():
|
|
94
|
+
curr_node = self.search_nodes.get()
|
|
95
|
+
|
|
96
|
+
# NOTE: curr_node will be returned with tube_nodes.
|
|
97
|
+
# connectivity information will be set inside of _search_to_bifurcation
|
|
98
|
+
daughter_nodes, tube_nodes = self._search_to_bifurcation(curr_node)
|
|
99
|
+
|
|
100
|
+
# Add non-search nodes into the manifold structure
|
|
101
|
+
for n in tube_nodes:
|
|
102
|
+
self.nodes.append(n)
|
|
103
|
+
|
|
104
|
+
# if output_dir is not None:
|
|
105
|
+
# n.save(output_dir)
|
|
106
|
+
|
|
107
|
+
# Add search nodes to search queue (note: current implementation is breadth-first search)
|
|
108
|
+
[self.search_nodes.put(n) for n in daughter_nodes]
|
|
109
|
+
|
|
110
|
+
def _search_to_bifurcation(self, n: Node) -> tuple[Iterable[Node], Iterable[Node]]:
|
|
111
|
+
# TASK: given n, estimate the next tublar node
|
|
112
|
+
# n is passed as a *daughter_node* from a previous tube
|
|
113
|
+
# Search procedure is to incrementally step and detect airway
|
|
114
|
+
# util a bifurcation/trifurcation is detected.
|
|
115
|
+
|
|
116
|
+
prev_radius = n.prev[0].radius
|
|
117
|
+
|
|
118
|
+
# airway should occupy roughly 1/5 of the FOV; could be a good
|
|
119
|
+
# tuneable parameter. includes estimate of the *target* radius for which
|
|
120
|
+
# we have the scaling law: r^{k}_{parent} = r^{k}_{d1} + r^{k}_{d2} If
|
|
121
|
+
# we assume r_d1 == r_d2, then this results in r_d ~= 0.8 * r_parent
|
|
122
|
+
fov = 10 * (0.8 * prev_radius)
|
|
123
|
+
|
|
124
|
+
step_size = 0.05
|
|
125
|
+
|
|
126
|
+
daughter_nodes = []
|
|
127
|
+
tube_nodes = []
|
|
128
|
+
|
|
129
|
+
node_centers = n.cluster_centroids()
|
|
130
|
+
|
|
131
|
+
if len(node_centers) != 1:
|
|
132
|
+
print("ERROR more nodes than expected for tube root")
|
|
133
|
+
sys.exit(1)
|
|
134
|
+
|
|
135
|
+
prev_node = n
|
|
136
|
+
while len(node_centers) == 1:
|
|
137
|
+
|
|
138
|
+
# Generate the next node based on previous node/graph information
|
|
139
|
+
new_center = step_size * np.array(prev_node.normal) + np.array(
|
|
140
|
+
prev_node.center
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
_, normal = prev_node.predict_perpendicular_plane(
|
|
144
|
+
new_center.tolist(), u=1.00, smoothing=0.01
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
normal = -normal
|
|
148
|
+
|
|
149
|
+
print(normal)
|
|
150
|
+
|
|
151
|
+
new_node = Node(new_center.tolist(), normal.tolist(), prev_node)
|
|
152
|
+
|
|
153
|
+
# Instantiate node's imaging data
|
|
154
|
+
new_node.img = _resample_image(
|
|
155
|
+
self.img, new_center.tolist(), normal.tolist(), [0, 1, 0], 512, 5.0
|
|
156
|
+
)
|
|
157
|
+
# new_node.resample_image(self.img)
|
|
158
|
+
new_node.generate_mask()
|
|
159
|
+
|
|
160
|
+
# new_node.save("tmp")
|
|
161
|
+
|
|
162
|
+
# NOTE: All mask cleanup and postprocessing must be completed by this point
|
|
163
|
+
|
|
164
|
+
# Evaluate for next/new centers
|
|
165
|
+
node_centers = new_node.cluster_centroids()
|
|
166
|
+
# print(node_centers)
|
|
167
|
+
|
|
168
|
+
if len(node_centers) == 1:
|
|
169
|
+
tube_nodes.append(new_node)
|
|
170
|
+
prev_node = new_node
|
|
171
|
+
|
|
172
|
+
for curr_center in node_centers:
|
|
173
|
+
# curr_normal = prev_node.predict_normal(curr_center)
|
|
174
|
+
center, curr_normal = prev_node.predict_perpendicular_plane(curr_center)
|
|
175
|
+
# dn = Node(curr_center, curr_normal, prev_node)
|
|
176
|
+
dn = Node(center, curr_normal, prev_node)
|
|
177
|
+
daughter_nodes.append(dn)
|
|
178
|
+
|
|
179
|
+
return (daughter_nodes, tube_nodes)
|