isoview 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- isoview/__init__.py +36 -0
- isoview/array.py +11 -0
- isoview/config.py +213 -0
- isoview/corrections.py +135 -0
- isoview/fusion.py +979 -0
- isoview/intensity.py +427 -0
- isoview/io.py +942 -0
- isoview/masks.py +421 -0
- isoview/pipeline.py +913 -0
- isoview/segmentation.py +173 -0
- isoview/temporal.py +373 -0
- isoview/transforms.py +1115 -0
- isoview/viz.py +723 -0
- isoview-0.1.0.dist-info/METADATA +370 -0
- isoview-0.1.0.dist-info/RECORD +17 -0
- isoview-0.1.0.dist-info/WHEEL +4 -0
- isoview-0.1.0.dist-info/entry_points.txt +2 -0
isoview/__init__.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
"""Isoview: multi-view light sheet microscopy processing pipeline."""
|
|
2
|
+
|
|
3
|
+
from .config import ProcessingConfig
|
|
4
|
+
from .pipeline import IsoviewProcessor, process_dataset, process_timepoint
|
|
5
|
+
from .fusion import multi_fuse, fuse_views, blend_views
|
|
6
|
+
from .transforms import (
|
|
7
|
+
estimate_channel_transform,
|
|
8
|
+
estimate_camera_transform,
|
|
9
|
+
apply_channel_transform,
|
|
10
|
+
apply_camera_transform,
|
|
11
|
+
gradient_descent_optimize,
|
|
12
|
+
)
|
|
13
|
+
from . import temporal
|
|
14
|
+
from . import viz
|
|
15
|
+
from .io import read_volume, detect_timepoints
|
|
16
|
+
|
|
17
|
+
__version__ = "0.1.0"
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
"ProcessingConfig",
|
|
21
|
+
"IsoviewProcessor",
|
|
22
|
+
"process_dataset",
|
|
23
|
+
"process_timepoint",
|
|
24
|
+
"multi_fuse",
|
|
25
|
+
"fuse_views",
|
|
26
|
+
"blend_views",
|
|
27
|
+
"estimate_channel_transform",
|
|
28
|
+
"estimate_camera_transform",
|
|
29
|
+
"apply_channel_transform",
|
|
30
|
+
"apply_camera_transform",
|
|
31
|
+
"gradient_descent_optimize",
|
|
32
|
+
"temporal",
|
|
33
|
+
"viz",
|
|
34
|
+
"read_volume",
|
|
35
|
+
"detect_timepoints",
|
|
36
|
+
]
|
isoview/array.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"""IsoviewArray - re-exported from mbo_utilities for backwards compatibility."""
|
|
2
|
+
|
|
3
|
+
try:
|
|
4
|
+
from mbo_utilities.arrays.isoview import IsoviewArray
|
|
5
|
+
except ImportError:
|
|
6
|
+
raise ImportError(
|
|
7
|
+
"IsoviewArray requires mbo_utilities. Install with: "
|
|
8
|
+
"pip install mbo_utilities[isoview]"
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
__all__ = ["IsoviewArray"]
|
isoview/config.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
"""Configuration for isoview processing pipeline."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Optional
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class ProcessingConfig:
|
|
12
|
+
"""Configuration for IsoView image processing pipeline."""
|
|
13
|
+
|
|
14
|
+
# paths
|
|
15
|
+
input_dir: Path
|
|
16
|
+
output_dir: Optional[Path] = None
|
|
17
|
+
projection_dir: Optional[Path] = None
|
|
18
|
+
|
|
19
|
+
# specimen and timepoint selection
|
|
20
|
+
specimen: int = 0
|
|
21
|
+
timepoints: Optional[list[int]] = None # None = auto-detect all
|
|
22
|
+
cameras: list[int] = field(default_factory=lambda: [0, 1, 2, 3])
|
|
23
|
+
channels: list[int] = field(default_factory=lambda: [0, 1])
|
|
24
|
+
|
|
25
|
+
# roi cropping (none means auto-detect from xml)
|
|
26
|
+
crop_left: Optional[list[int]] = None
|
|
27
|
+
crop_top: Optional[list[int]] = None
|
|
28
|
+
crop_width: Optional[list[int]] = None
|
|
29
|
+
crop_height: Optional[list[int]] = None
|
|
30
|
+
crop_front: Optional[list[int]] = None
|
|
31
|
+
crop_depth: Optional[list[int]] = None
|
|
32
|
+
|
|
33
|
+
# file formats
|
|
34
|
+
output_format: str = "zarr" # klb, tiff, or zarr
|
|
35
|
+
zarr_compression: Optional[str] = "blosc-zstd" # blosc-zstd, blosc-lz4, blosc-lz4hc, gzip, bzip2, or None (default: blosc-zstd for best compression)
|
|
36
|
+
zarr_compression_level: int = 9 # 1-9, default to maximum compression
|
|
37
|
+
zarr_chunks: Optional[tuple[int, int, int]] = None # (z, y, x) chunk size for zarr, None = auto
|
|
38
|
+
zarr_shards: Optional[tuple[int, int, int]] = None # (z, y, x) shard size for zarr, None = auto (10 frames/shard for z>=20)
|
|
39
|
+
zarr_consolidate: bool = True # consolidate projections and masks into single OME-Zarr file (only for zarr output)
|
|
40
|
+
|
|
41
|
+
# pixel correction
|
|
42
|
+
median_kernel: Optional[tuple[int, int]] = (3, 3)
|
|
43
|
+
|
|
44
|
+
# background estimation
|
|
45
|
+
background_percentile: float = 5.0
|
|
46
|
+
mask_percentile: float = 1.0
|
|
47
|
+
subsample_factor: int = 100
|
|
48
|
+
|
|
49
|
+
# segmentation
|
|
50
|
+
# segment_mode: 0=no masking, 1=segment+mask, 2=masks only, 3=apply global mask
|
|
51
|
+
segment_mode: int = 1
|
|
52
|
+
gauss_kernel: int = 5
|
|
53
|
+
gauss_sigma: float = 2.0
|
|
54
|
+
segment_threshold: float = 0.4
|
|
55
|
+
splitting: int = 10 # for memory-efficient gaussian filtering
|
|
56
|
+
global_mask_dir: Optional[Path] = None # for segment_mode=3
|
|
57
|
+
|
|
58
|
+
# transformations
|
|
59
|
+
rotation: int = 0 # 0: none, 1: 90° cw, -1: 90° ccw
|
|
60
|
+
flip_horizontal: bool = False
|
|
61
|
+
flip_vertical: bool = False
|
|
62
|
+
|
|
63
|
+
# microscope parameters
|
|
64
|
+
pixel_spacing_z: float = 0.40625
|
|
65
|
+
detection_objective_mag: float = 16.0
|
|
66
|
+
pixel_spacing_camera: float = 6.5
|
|
67
|
+
|
|
68
|
+
# channel dependencies (for multi-channel segmentation)
|
|
69
|
+
reference_channels: Optional[list[list[int]]] = None
|
|
70
|
+
dependent_channels: Optional[list[list[int]]] = None
|
|
71
|
+
|
|
72
|
+
# camera-channel mapping (isoview hardware: CM0,1→CHN01; CM2,3→CHN00)
|
|
73
|
+
camera_channel_map: Optional[dict[int, int]] = None
|
|
74
|
+
|
|
75
|
+
# camera pairs for flipping (second camera in each pair gets flipped)
|
|
76
|
+
camera_pairs: Optional[list[tuple[int, int]]] = None # [(0, 1), (2, 3)]
|
|
77
|
+
|
|
78
|
+
# fusion parameters (for multiFuse pipeline)
|
|
79
|
+
fusion_enable: bool = False
|
|
80
|
+
fusion_type: str = "adaptive_blending" # adaptive_blending, geometric, wavelet, average
|
|
81
|
+
fusion_blending_range: tuple[int, int] = (20, 20) # (channel_range, camera_range)
|
|
82
|
+
fusion_camera_pairs: Optional[list[tuple[int, int]]] = None # [(cam0, cam1), ...]
|
|
83
|
+
fusion_channel_pairs: Optional[list[tuple[int, int]]] = None # [(ch0, ch1), ...]
|
|
84
|
+
fusion_4view_only: bool = False # skip camera fusion, use existing camera-fused files for 4-view
|
|
85
|
+
fusion_flip_h: bool = False # flip second camera horizontally (for camera fusion)
|
|
86
|
+
fusion_flip_v: bool = False # flip second camera vertically (for camera fusion)
|
|
87
|
+
fusion_channel_flip_h: bool = True # flip second channel horizontally (for 4-view/channel fusion)
|
|
88
|
+
fusion_channel_flip_v: bool = True # flip second channel vertically (for 4-view/channel fusion)
|
|
89
|
+
fusion_left_flags: tuple[int, int] = (2, 1) # light sheet direction per camera (1=ref, 2=transformed)
|
|
90
|
+
fusion_front_flag: int = 1 # which camera has quality in front (1=ref, 2=transformed)
|
|
91
|
+
fusion_search_offsets_x: tuple[int, int, int] = (-50, 50, 10) # (start, stop, step)
|
|
92
|
+
fusion_search_offsets_y: tuple[int, int, int] = (-50, 50, 10)
|
|
93
|
+
fusion_optimizer: str = "gradient_descent" # gradient_descent (MATLAB fminuncFA), nelder_mead, bfgs
|
|
94
|
+
|
|
95
|
+
# axial scaling for channel fusion (z_spacing / xy_spacing)
|
|
96
|
+
# used for anisotropic rotation in XZ plane
|
|
97
|
+
axial_scaling: float = 1.0
|
|
98
|
+
|
|
99
|
+
# temporal parameter processing
|
|
100
|
+
fusion_temporal_smoothing: bool = False # enable rloess smoothing across timepoints
|
|
101
|
+
fusion_smoothing_window: int = 100 # rloess smoothing window size
|
|
102
|
+
fusion_temporal_interpolation: bool = False # interpolate params for missing timepoints
|
|
103
|
+
fusion_offset_averaging_range: int = 10 # median/mean window for spatial offsets
|
|
104
|
+
fusion_angle_averaging_range: int = 10 # median/mean window for rotation angles
|
|
105
|
+
fusion_intensity_averaging_range: int = 5 # median/mean window for intensity correction
|
|
106
|
+
fusion_averaging: str = "median" # median or mean for lookup table
|
|
107
|
+
fusion_static: bool = False # use static (time-averaged) parameters instead of time-dependent
|
|
108
|
+
fusion_generate_lookup_table: bool = False # generate and save lookup tables
|
|
109
|
+
|
|
110
|
+
# mask processing
|
|
111
|
+
fusion_mask_fusion_mode: int = 1 # 0: overlap only, 1: combine full masks
|
|
112
|
+
fusion_mask_padding: int = 0 # 0: disable, 1: center coord, 2: smooth padding
|
|
113
|
+
fusion_mask_padding_radius: int = 50 # disk radius for smooth padding mode
|
|
114
|
+
fusion_mask_min_object_size: float = 0 # bwareaopen fraction, 0 to disable
|
|
115
|
+
fusion_mask_percentile: float = 1.0 # percentile for minimum calculation
|
|
116
|
+
fusion_mask_subsample: int = 100 # subsampling for percentile computation
|
|
117
|
+
|
|
118
|
+
# processing modes
|
|
119
|
+
fusion_slab_size_channels: int = 5 # adaptive slab size for channel correlation, 0 for slice mode
|
|
120
|
+
fusion_slab_size_cameras: int = 3 # adaptive slab size for camera correlation, 0 for slice mode
|
|
121
|
+
fusion_intensity_slab_size_channels: int = 10 # slab size for channel intensity correction
|
|
122
|
+
fusion_intensity_slab_size_cameras: int = 5 # slab size for camera intensity correction
|
|
123
|
+
|
|
124
|
+
# filtering options
|
|
125
|
+
fusion_median_filter_range: int = 100 # median filter range, 0 to disable
|
|
126
|
+
fusion_precise_gauss: bool = True # True for double precision, False for uint16 (faster)
|
|
127
|
+
fusion_gauss_kernel: int = 3 # gauss filter kernel size, 0 to disable
|
|
128
|
+
fusion_gauss_sigma: float = 1.0 # gauss filter sigma
|
|
129
|
+
fusion_subsample_slices: int = 1 # slice subsampling for percentile computation
|
|
130
|
+
fusion_subsample_stacks: int = 100 # stack subsampling for percentile computation
|
|
131
|
+
|
|
132
|
+
# diagnostic output
|
|
133
|
+
save_diagnostics: bool = True # save diagnostic PNG images during processing
|
|
134
|
+
diagnostics_dir: Optional[Path] = None # directory for diagnostic images, None = output_dir/diagnostics
|
|
135
|
+
|
|
136
|
+
def __post_init__(self):
|
|
137
|
+
"""Initialize derived attributes."""
|
|
138
|
+
self.input_dir = Path(self.input_dir)
|
|
139
|
+
|
|
140
|
+
# auto-detect timepoints if not specified
|
|
141
|
+
if self.timepoints is None:
|
|
142
|
+
from .io import detect_timepoints
|
|
143
|
+
self.timepoints = detect_timepoints(self.input_dir, self.specimen)
|
|
144
|
+
|
|
145
|
+
if self.output_dir is None:
|
|
146
|
+
self.output_dir = self.input_dir.parent / f"{self.input_dir.name}.corrected"
|
|
147
|
+
else:
|
|
148
|
+
self.output_dir = Path(self.output_dir)
|
|
149
|
+
|
|
150
|
+
if self.projection_dir is None:
|
|
151
|
+
self.projection_dir = self.output_dir.parent / f"{self.input_dir.name}.projections"
|
|
152
|
+
else:
|
|
153
|
+
self.projection_dir = Path(self.projection_dir)
|
|
154
|
+
|
|
155
|
+
# pixel spacing calculations
|
|
156
|
+
pixel_xy = self.pixel_spacing_camera / self.detection_objective_mag
|
|
157
|
+
# Order: [z, y, x, t, c] for OME-Zarr compliance
|
|
158
|
+
self.pixel_spacing = np.array([self.pixel_spacing_z, pixel_xy, pixel_xy, 1.0, 1.0])
|
|
159
|
+
self.scaling = self.pixel_spacing_z / pixel_xy
|
|
160
|
+
|
|
161
|
+
# compute axial scaling for channel fusion if not explicitly set
|
|
162
|
+
if self.axial_scaling == 1.0:
|
|
163
|
+
self.axial_scaling = self.scaling
|
|
164
|
+
|
|
165
|
+
# global mask directory
|
|
166
|
+
if self.global_mask_dir is None:
|
|
167
|
+
self.global_mask_dir = self.input_dir.parent / f"{self.input_dir.name}.globalMask"
|
|
168
|
+
else:
|
|
169
|
+
self.global_mask_dir = Path(self.global_mask_dir)
|
|
170
|
+
|
|
171
|
+
# sync segment with segment_mode for backwards compat
|
|
172
|
+
if self.segment_mode == 0:
|
|
173
|
+
self.segment = False
|
|
174
|
+
elif self.segment_mode in (1, 2):
|
|
175
|
+
self.segment = True
|
|
176
|
+
|
|
177
|
+
# default camera-channel mapping for standard isoview hardware
|
|
178
|
+
if self.camera_channel_map is None:
|
|
179
|
+
self.camera_channel_map = {0: 1, 1: 1, 2: 0, 3: 0}
|
|
180
|
+
|
|
181
|
+
# default camera pairs for standard isoview hardware
|
|
182
|
+
if self.camera_pairs is None:
|
|
183
|
+
self.camera_pairs = [(0, 1), (2, 3)]
|
|
184
|
+
|
|
185
|
+
# validate camera pairs if flipping is enabled
|
|
186
|
+
if self.flip_horizontal or self.flip_vertical:
|
|
187
|
+
self._validate_camera_pairs()
|
|
188
|
+
|
|
189
|
+
def _validate_camera_pairs(self) -> None:
|
|
190
|
+
"""Validate that all cameras belong to a pair when flipping is enabled."""
|
|
191
|
+
paired_cameras = set()
|
|
192
|
+
for cam0, cam1 in self.camera_pairs:
|
|
193
|
+
paired_cameras.add(cam0)
|
|
194
|
+
paired_cameras.add(cam1)
|
|
195
|
+
|
|
196
|
+
for camera in self.cameras:
|
|
197
|
+
if camera not in paired_cameras:
|
|
198
|
+
raise ValueError(
|
|
199
|
+
f"camera {camera} is not in any pair but flipping is enabled. "
|
|
200
|
+
f"camera_pairs={self.camera_pairs}, cameras={self.cameras}"
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
def is_second_in_pair(self, camera: int) -> bool:
|
|
204
|
+
"""Check if camera is the second in its pair (should be flipped)."""
|
|
205
|
+
for cam0, cam1 in self.camera_pairs:
|
|
206
|
+
if camera == cam1:
|
|
207
|
+
return True
|
|
208
|
+
return False
|
|
209
|
+
|
|
210
|
+
@property
|
|
211
|
+
def extension(self) -> str:
|
|
212
|
+
"""File extension for output format."""
|
|
213
|
+
return f".{self.output_format}"
|
isoview/corrections.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
"""Pixel and intensity corrections for microscopy data."""
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
from scipy.ndimage import median_filter
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
def percentile_interp(data: np.ndarray, percentile: float) -> float:
|
|
8
|
+
"""Compute percentile using (i+0.5)/n interpolation.
|
|
9
|
+
|
|
10
|
+
Args:
|
|
11
|
+
data: 1d array of values (will be sorted internally)
|
|
12
|
+
percentile: percentile to compute (0-100)
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
percentile value
|
|
16
|
+
"""
|
|
17
|
+
if len(data) == 0:
|
|
18
|
+
return 0.0
|
|
19
|
+
|
|
20
|
+
sorted_data = np.sort(data)
|
|
21
|
+
n = len(sorted_data)
|
|
22
|
+
|
|
23
|
+
p_rank = 100.0 * (np.arange(n) + 0.5) / n
|
|
24
|
+
|
|
25
|
+
# interpolate, clamp to min/max outside range
|
|
26
|
+
return float(np.interp(percentile, p_rank, sorted_data,
|
|
27
|
+
left=sorted_data[0], right=sorted_data[-1]))
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def correct_dead_pixels(
|
|
31
|
+
volume: np.ndarray,
|
|
32
|
+
background_value: float,
|
|
33
|
+
kernel: tuple[int, int],
|
|
34
|
+
verbose: bool = False,
|
|
35
|
+
) -> np.ndarray:
|
|
36
|
+
"""Detect and correct insensitive pixels using median filtering.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
volume: 3d image stack (z, y, x)
|
|
40
|
+
background_value: camera background intensity
|
|
41
|
+
kernel: median filter kernel size (y, x)
|
|
42
|
+
verbose: print detection statistics
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
corrected volume
|
|
46
|
+
"""
|
|
47
|
+
# detect dead pixels via deviation projection
|
|
48
|
+
deviation_proj = np.std(volume.astype(np.float32), axis=0)
|
|
49
|
+
deviation_filtered = median_filter(deviation_proj, size=kernel, mode="reflect")
|
|
50
|
+
deviation_dist = np.abs(deviation_proj - deviation_filtered)
|
|
51
|
+
deviation_thresh = _determine_threshold(np.sort(deviation_dist.ravel()))
|
|
52
|
+
deviation_mask = deviation_dist > deviation_thresh
|
|
53
|
+
|
|
54
|
+
# detect via mean projection
|
|
55
|
+
mean_proj = np.mean(volume, axis=0) - background_value
|
|
56
|
+
mean_filtered = median_filter(mean_proj, size=kernel, mode="reflect")
|
|
57
|
+
mean_dist = np.abs((mean_proj - mean_filtered) / (mean_filtered + 1e-10))
|
|
58
|
+
mean_thresh = _determine_threshold(np.sort(mean_dist.ravel()))
|
|
59
|
+
mean_mask = mean_dist > mean_thresh
|
|
60
|
+
|
|
61
|
+
# combine masks
|
|
62
|
+
dead_pixel_mask = deviation_mask | mean_mask
|
|
63
|
+
|
|
64
|
+
if verbose:
|
|
65
|
+
n_dead = dead_pixel_mask.sum()
|
|
66
|
+
pct = 100 * n_dead / dead_pixel_mask.size
|
|
67
|
+
print(f"detected {n_dead} dead pixels ({pct:.1f}%)")
|
|
68
|
+
|
|
69
|
+
# correct each frame
|
|
70
|
+
corrected = volume.copy()
|
|
71
|
+
for z in range(volume.shape[0]):
|
|
72
|
+
frame = corrected[z]
|
|
73
|
+
filtered = median_filter(frame, size=kernel, mode="reflect")
|
|
74
|
+
frame[dead_pixel_mask] = filtered[dead_pixel_mask]
|
|
75
|
+
corrected[z] = frame
|
|
76
|
+
|
|
77
|
+
return corrected
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _determine_threshold(sorted_array: np.ndarray, max_samples: int = 50000) -> float:
|
|
81
|
+
"""Determine threshold using maximum distance from linear fit.
|
|
82
|
+
|
|
83
|
+
matches matlab's determineThreshold: uses round() for subsampling and
|
|
84
|
+
1-based indexing for the x coordinate.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
sorted_array: sorted 1d array
|
|
88
|
+
max_samples: subsample if array is larger
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
threshold value
|
|
92
|
+
"""
|
|
93
|
+
n = len(sorted_array)
|
|
94
|
+
|
|
95
|
+
# subsample if too large (matlab uses round, not floor)
|
|
96
|
+
if n > max_samples:
|
|
97
|
+
step = round(n / max_samples)
|
|
98
|
+
sorted_array = sorted_array[::step]
|
|
99
|
+
n = len(sorted_array)
|
|
100
|
+
|
|
101
|
+
# matlab uses 1-based indexing: X = (1:elements)'
|
|
102
|
+
x = np.arange(1, n + 1)
|
|
103
|
+
|
|
104
|
+
# find maximum distance from line connecting first and last points
|
|
105
|
+
p1 = np.array([1, sorted_array[0]])
|
|
106
|
+
p2 = np.array([n, sorted_array[-1]])
|
|
107
|
+
vec = p2 - p1
|
|
108
|
+
|
|
109
|
+
points = np.column_stack([x, sorted_array])
|
|
110
|
+
h = np.dot(points - p1, vec) / (np.linalg.norm(vec) ** 2)
|
|
111
|
+
proj = p1 + h[:, np.newaxis] * vec
|
|
112
|
+
distances = np.linalg.norm(points - proj, axis=1)
|
|
113
|
+
|
|
114
|
+
max_idx = np.argmax(distances)
|
|
115
|
+
return sorted_array[max_idx]
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def estimate_background(
|
|
119
|
+
volume: np.ndarray,
|
|
120
|
+
percentile: float = 5.0,
|
|
121
|
+
subsample: int = 100,
|
|
122
|
+
) -> float:
|
|
123
|
+
"""Estimate background intensity from subsampled volume.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
volume: 3d image stack
|
|
127
|
+
percentile: percentile for background estimation
|
|
128
|
+
subsample: subsampling factor
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
background intensity value
|
|
132
|
+
"""
|
|
133
|
+
subsampled = volume.ravel()[::subsample]
|
|
134
|
+
subsampled = subsampled[subsampled > 0]
|
|
135
|
+
return percentile_interp(subsampled, percentile)
|