arcadia-microscopy-tools 0.2.3__py3-none-any.whl → 0.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,7 +7,7 @@ from matplotlib.colors import LinearSegmentedColormap, Normalize
7
7
  from skimage.color import gray2rgb
8
8
 
9
9
  from .channels import Channel
10
- from .typing import FloatArray
10
+ from .typing import Float64Array
11
11
 
12
12
 
13
13
  @dataclass
@@ -23,7 +23,7 @@ class Layer:
23
23
  """
24
24
 
25
25
  channel: Channel
26
- intensities: FloatArray
26
+ intensities: Float64Array
27
27
  opacity: float = 1.0
28
28
  transparent: bool = True
29
29
 
@@ -34,11 +34,11 @@ class Layer:
34
34
 
35
35
 
36
36
  def overlay_channels(
37
- background: FloatArray,
38
- channel_intensities: dict[Channel, FloatArray],
37
+ background: Float64Array,
38
+ channel_intensities: dict[Channel, Float64Array],
39
39
  opacity: float = 1.0,
40
40
  transparent: bool = True,
41
- ) -> FloatArray:
41
+ ) -> Float64Array:
42
42
  """Create a fluorescence overlay.
43
43
 
44
44
  All channels are blended with the same opacity and transparency settings.
@@ -73,9 +73,9 @@ def overlay_channels(
73
73
 
74
74
 
75
75
  def create_sequential_overlay(
76
- background: FloatArray,
76
+ background: Float64Array,
77
77
  layers: list[Layer],
78
- ) -> FloatArray:
78
+ ) -> Float64Array:
79
79
  """Create an overlay by sequentially blending multiple channels onto a background.
80
80
 
81
81
  Args:
@@ -109,10 +109,10 @@ def create_sequential_overlay(
109
109
 
110
110
 
111
111
  def alpha_blend(
112
- background: FloatArray,
113
- foreground: FloatArray,
114
- alpha: FloatArray,
115
- ) -> FloatArray:
112
+ background: Float64Array,
113
+ foreground: Float64Array,
114
+ alpha: Float64Array,
115
+ ) -> Float64Array:
116
116
  """Alpha blend foreground onto background.
117
117
 
118
118
  Args:
@@ -127,9 +127,9 @@ def alpha_blend(
127
127
 
128
128
 
129
129
  def colorize(
130
- intensities: FloatArray,
130
+ intensities: Float64Array,
131
131
  colormap: LinearSegmentedColormap,
132
- ) -> FloatArray:
132
+ ) -> Float64Array:
133
133
  """Apply a colormap to a 2D intensity array.
134
134
 
135
135
  Args:
@@ -10,7 +10,7 @@ import skimage as ski
10
10
  from cellpose.utils import outlines_list
11
11
 
12
12
  from .channels import Channel
13
- from .typing import BoolArray, FloatArray, Int64Array, ScalarArray, UInt16Array
13
+ from .typing import BoolArray, Float64Array, Int64Array, ScalarArray, UInt16Array
14
14
 
15
15
  DEFAULT_CELL_PROPERTY_NAMES = [
16
16
  "label",
@@ -59,7 +59,7 @@ def _process_mask(
59
59
  return label_image
60
60
 
61
61
 
62
- def _extract_outlines_cellpose(label_image: Int64Array) -> list[FloatArray]:
62
+ def _extract_outlines_cellpose(label_image: Int64Array) -> list[Float64Array]:
63
63
  """Extract cell outlines using Cellpose's outlines_list function.
64
64
 
65
65
  Args:
@@ -71,7 +71,7 @@ def _extract_outlines_cellpose(label_image: Int64Array) -> list[FloatArray]:
71
71
  return outlines_list(label_image, multiprocessing=False)
72
72
 
73
73
 
74
- def _extract_outlines_skimage(label_image: Int64Array) -> list[FloatArray]:
74
+ def _extract_outlines_skimage(label_image: Int64Array) -> list[Float64Array]:
75
75
  """Extract cell outlines using scikit-image's find_contours.
76
76
 
77
77
  Args:
@@ -181,7 +181,7 @@ class SegmentationMask:
181
181
  return int(self.label_image.max())
182
182
 
183
183
  @cached_property
184
- def cell_outlines(self) -> list[FloatArray]:
184
+ def cell_outlines(self) -> list[Float64Array]:
185
185
  """Extract cell outlines using the configured outline extractor.
186
186
 
187
187
  Returns:
@@ -253,7 +253,7 @@ class SegmentationMask:
253
253
  return properties
254
254
 
255
255
  @cached_property
256
- def centroids_yx(self) -> FloatArray:
256
+ def centroids_yx(self) -> Float64Array:
257
257
  """Get cell centroids as (y, x) coordinates.
258
258
 
259
259
  Returns:
@@ -5,7 +5,7 @@ from enum import Flag, auto
5
5
  from typing import TYPE_CHECKING
6
6
 
7
7
  from .channels import Channel
8
- from .typing import FloatArray
8
+ from .typing import Float64Array
9
9
 
10
10
  if TYPE_CHECKING:
11
11
  from dataclasses import Field
@@ -86,8 +86,8 @@ class AcquisitionSettings(DimensionValidatorMixin):
86
86
  exposure_time_ms: float
87
87
  zoom: float | None = None
88
88
  binning: str | None = None
89
- frame_intervals_ms: FloatArray | None = dimension_field(DimensionFlags.TIMELAPSE)
90
- wavelengths_nm: FloatArray | None = dimension_field(DimensionFlags.SPECTRAL)
89
+ frame_intervals_ms: Float64Array | None = dimension_field(DimensionFlags.TIMELAPSE)
90
+ wavelengths_nm: Float64Array | None = dimension_field(DimensionFlags.SPECTRAL)
91
91
 
92
92
 
93
93
  @dataclass
@@ -7,7 +7,7 @@ import numpy as np
7
7
  import torch
8
8
  from cellpose.models import CellposeModel
9
9
 
10
- from .typing import FloatArray, Int64Array
10
+ from .typing import Float64Array, Int64Array
11
11
 
12
12
  logger = logging.getLogger(__name__)
13
13
 
@@ -167,7 +167,7 @@ class SegmentationModel:
167
167
 
168
168
  def segment(
169
169
  self,
170
- intensities: FloatArray,
170
+ intensities: Float64Array,
171
171
  cell_diameter_px: float | None = None,
172
172
  flow_threshold: float | None = None,
173
173
  cellprob_threshold: float | None = None,
@@ -213,7 +213,7 @@ class SegmentationModel:
213
213
 
214
214
  def batch_segment(
215
215
  self,
216
- intensities_batch: Sequence[FloatArray],
216
+ intensities_batch: Sequence[Float64Array],
217
217
  cell_diameter_px: float | None = None,
218
218
  flow_threshold: float | None = None,
219
219
  cellprob_threshold: float | None = None,
@@ -16,7 +16,7 @@ from .metadata_structures import (
16
16
  PhysicalDimensions,
17
17
  )
18
18
  from .microscopy import ImageMetadata
19
- from .typing import FloatArray
19
+ from .typing import Float64Array
20
20
 
21
21
 
22
22
  def create_image_metadata_from_nd2(
@@ -247,7 +247,7 @@ class _NikonMetadataParser:
247
247
  return time_s * 1000 # Convert to ms for AcquisitionSettings
248
248
  return None
249
249
 
250
- def _parse_frame_intervals(self) -> FloatArray | None:
250
+ def _parse_frame_intervals(self) -> Float64Array | None:
251
251
  """Parse frame intervals from events metadata."""
252
252
  if self._nd2f.events():
253
253
  acquisition_start_times_s = [event["Time [s]"] for event in self._nd2f.events()]
@@ -1,9 +1,10 @@
1
1
  from __future__ import annotations
2
+ from typing import Literal
2
3
 
3
4
  import numpy as np
4
5
  import skimage as ski
5
6
 
6
- from .typing import FloatArray, ScalarArray
7
+ from .typing import BoolArray, Float64Array, ScalarArray
7
8
 
8
9
 
9
10
  def rescale_by_percentile(
@@ -18,11 +19,9 @@ def rescale_by_percentile(
18
19
 
19
20
  Args:
20
21
  intensities: Input image array.
21
- percentile_range:
22
- Tuple of (min, max) percentiles to use for intensity scaling.
22
+ percentile_range: Tuple of (min, max) percentiles to use for intensity scaling.
23
23
  Default is (0, 100) which uses the full intensity range.
24
- out_range:
25
- Tuple of (min, max) values for the output intensity range.
24
+ out_range: Tuple of (min, max) values for the output intensity range.
26
25
  Default is (0, 1) for float normalization.
27
26
 
28
27
  Returns:
@@ -60,7 +59,7 @@ def subtract_background_dog(
60
59
  low_sigma: float = 0.6,
61
60
  high_sigma: float = 16.0,
62
61
  percentile: float = 0,
63
- ) -> FloatArray:
62
+ ) -> Float64Array:
64
63
  """Subtract background from image using difference of Gaussians and percentile thresholding.
65
64
 
66
65
  Applies difference of Gaussians filter to enhance features and then estimates and subtracts
@@ -68,18 +67,15 @@ def subtract_background_dog(
68
67
 
69
68
  Args:
70
69
  intensities: Input image array.
71
- low_sigma:
72
- Standard deviation for the smaller Gaussian kernel. Controls fine detail enhancement.
73
- Default is 0.6.
74
- high_sigma:
75
- Standard deviation for the larger Gaussian kernel. Controls background estimation
76
- extent. Default is 16.
77
- percentile:
78
- Percentile of filtered image to use as background level (0-100).
70
+ low_sigma: Standard deviation for the smaller Gaussian kernel.
71
+ Controls fine detail enhancement. Default is 0.6.
72
+ high_sigma: Standard deviation for the larger Gaussian kernel.
73
+ Controls background estimation extent. Default is 16.
74
+ percentile: Percentile of filtered image to use as background level (0-100).
79
75
  Default is 0 (minimum value).
80
76
 
81
77
  Returns:
82
- FloatArray: Background-subtracted image with negative values clipped to zero.
78
+ Float64Array: Background-subtracted image with negative values clipped to zero.
83
79
 
84
80
  Notes:
85
81
  - For best results, low_sigma should be smaller than the smallest feature of interest,
@@ -134,3 +130,87 @@ def crop_to_center(
134
130
  top = (height - crop_height) // 2
135
131
 
136
132
  return intensities[..., top : top + crop_height, left : left + crop_width]
133
+
134
+
135
+ def apply_threshold(
136
+ intensities: ScalarArray,
137
+ method: Literal[
138
+ "otsu",
139
+ "li",
140
+ "yen",
141
+ "isodata",
142
+ "mean",
143
+ "minimum",
144
+ "triangle",
145
+ "local",
146
+ "niblack",
147
+ "sauvola",
148
+ ] = "otsu",
149
+ **kwargs,
150
+ ) -> BoolArray:
151
+ """Apply thresholding to convert grayscale image to binary using various methods.
152
+
153
+ Uses threshold calculation methods from skimage.filters to determine an optimal
154
+ threshold value, then applies it to create a binary image.
155
+
156
+ Args:
157
+ intensities: Input grayscale image array.
158
+ method: Thresholding method to use. Supported methods include:
159
+ - 'otsu': Otsu's method (default)
160
+ - 'li': Li's minimum cross entropy method
161
+ - 'yen': Yen's method
162
+ - 'isodata': ISODATA method
163
+ - 'mean': Mean-based threshold
164
+ - 'minimum': Minimum method
165
+ - 'triangle': Triangle algorithm
166
+ - 'local': Adaptive local threshold
167
+ - 'niblack': Niblack local threshold
168
+ - 'sauvola': Sauvola local threshold
169
+ **kwargs: Additional keyword arguments passed to the thresholding function.
170
+ For local methods (niblack, sauvola, local), common kwargs include:
171
+ - window_size: Size of the local neighborhood
172
+ - k: Parameter controlling threshold adjustment
173
+
174
+ Returns:
175
+ BoolArray: Binary image where pixels above threshold are True.
176
+
177
+ Raises:
178
+ ValueError: If the specified method is not supported.
179
+
180
+ Examples:
181
+ >>> binary = apply_threshold(image, method='otsu')
182
+ >>> binary = apply_threshold(image, method='sauvola', window_size=25)
183
+ """
184
+ # Map method names to skimage.filters threshold functions
185
+ threshold_methods = {
186
+ "otsu": ski.filters.threshold_otsu,
187
+ "li": ski.filters.threshold_li,
188
+ "yen": ski.filters.threshold_yen,
189
+ "isodata": ski.filters.threshold_isodata,
190
+ "mean": ski.filters.threshold_mean,
191
+ "minimum": ski.filters.threshold_minimum,
192
+ "triangle": ski.filters.threshold_triangle,
193
+ "local": ski.filters.threshold_local,
194
+ "niblack": ski.filters.threshold_niblack,
195
+ "sauvola": ski.filters.threshold_sauvola,
196
+ }
197
+
198
+ # Handle empty or constant images
199
+ if intensities.size == 0:
200
+ return np.zeros_like(intensities, dtype=bool)
201
+ if np.min(intensities) == np.max(intensities):
202
+ return np.zeros_like(intensities, dtype=bool)
203
+
204
+ method_lower = method.lower()
205
+ if method_lower not in threshold_methods:
206
+ raise ValueError(
207
+ f"Unsupported thresholding method: '{method}'. "
208
+ f"Supported methods: {', '.join(threshold_methods.keys())}"
209
+ )
210
+
211
+ threshold_func = threshold_methods[method_lower]
212
+
213
+ # Local methods (niblack, sauvola) return threshold array, others return scalar
214
+ threshold_value = threshold_func(intensities, **kwargs)
215
+
216
+ return intensities > threshold_value
@@ -54,10 +54,14 @@ class Pipeline:
54
54
  operations: List of ImageOperation instances to apply in sequence.
55
55
  copy: If True, creates a copy of the input array before processing. If False,
56
56
  operations are applied directly to the input. Default is False for performance.
57
+ preserve_dtype: If True, forces output to have the same dtype as input. If False,
58
+ allows dtype to change based on operations (e.g., uint16 -> float64 for
59
+ normalization). Default is True.
57
60
  """
58
61
 
59
62
  operations: list[ImageOperation]
60
63
  copy: bool = False
64
+ preserve_dtype: bool = True
61
65
 
62
66
  def __post_init__(self):
63
67
  """Validate the pipeline configuration."""
@@ -80,7 +84,10 @@ class Pipeline:
80
84
  Returns:
81
85
  ScalarArray: The processed image intensity array after applying all operations.
82
86
  """
83
- return self._apply_operations(intensities)
87
+ result = self._apply_operations(intensities)
88
+ if self.preserve_dtype and result.dtype != intensities.dtype:
89
+ return result.astype(intensities.dtype) # type: ignore
90
+ return result
84
91
 
85
92
  def __len__(self) -> int:
86
93
  """Return the number of operations in the pipeline."""
@@ -89,8 +96,13 @@ class Pipeline:
89
96
  def __repr__(self) -> str:
90
97
  """Create a string representation of the pipeline."""
91
98
  operations_repr = ", ".join(repr(operation) for operation in self.operations)
92
- copy_str = ", copy=True" if self.copy else ""
93
- return f"Pipeline([{operations_repr}]{copy_str})"
99
+ params = []
100
+ if self.copy:
101
+ params.append("copy=True")
102
+ if not self.preserve_dtype:
103
+ params.append("preserve_dtype=False")
104
+ params_str = f", {', '.join(params)}" if params else ""
105
+ return f"Pipeline([{operations_repr}]{params_str})"
94
106
 
95
107
 
96
108
  @dataclass
@@ -106,10 +118,13 @@ class PipelineParallelized:
106
118
 
107
119
  Attributes:
108
120
  operations: List of ImageOperation instances to apply in sequence.
109
- max_workers: Maximum number of worker threads for parallel processing. If None,
110
- ThreadPoolExecutor will use its default (typically number of CPU cores).
111
121
  copy: If True, creates a copy of each frame before processing. If False,
112
122
  operations are applied directly to each frame. Default is False for performance.
123
+ preserve_dtype: If True, forces output to have the same dtype as input. If False,
124
+ allows dtype to change based on operations (e.g., uint16 -> float64 for
125
+ normalization). Default is True.
126
+ max_workers: Maximum number of worker threads for parallel processing. If None,
127
+ ThreadPoolExecutor will use its default (typically number of CPU cores).
113
128
 
114
129
  Note:
115
130
  Uses thread-based parallelism, which is most effective for operations that release
@@ -118,8 +133,9 @@ class PipelineParallelized:
118
133
  """
119
134
 
120
135
  operations: list[ImageOperation]
121
- max_workers: int | None = None
122
136
  copy: bool = False
137
+ preserve_dtype: bool = True
138
+ max_workers: int | None = None
123
139
 
124
140
  def __post_init__(self):
125
141
  """Validate the pipeline configuration."""
@@ -145,7 +161,10 @@ class PipelineParallelized:
145
161
  with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
146
162
  processed = list(executor.map(self._apply_operations, intensities))
147
163
 
148
- return np.array(processed, dtype=intensities.dtype) # type: ignore
164
+ if self.preserve_dtype:
165
+ return np.array(processed, dtype=intensities.dtype) # type: ignore
166
+ else:
167
+ return np.array(processed) # type: ignore
149
168
 
150
169
  def __len__(self) -> int:
151
170
  """Return the number of operations in the pipeline."""
@@ -155,9 +174,11 @@ class PipelineParallelized:
155
174
  """Create a string representation of the pipeline."""
156
175
  operations_repr = ", ".join(repr(operation) for operation in self.operations)
157
176
  params = []
158
- if self.max_workers is not None:
159
- params.append(f"max_workers={self.max_workers}")
160
177
  if self.copy:
161
178
  params.append("copy=True")
179
+ if not self.preserve_dtype:
180
+ params.append("preserve_dtype=False")
181
+ if self.max_workers is not None:
182
+ params.append(f"max_workers={self.max_workers}")
162
183
  params_str = f", {', '.join(params)}" if params else ""
163
184
  return f"PipelineParallelized([{operations_repr}]{params_str})"
@@ -0,0 +1,278 @@
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from arcadia_microscopy_tools.pipeline import ImageOperation, Pipeline, PipelineParallelized
5
+
6
+
7
+ # Simple test operations for testing
8
+ def double_intensity(intensities):
9
+ """Double all intensity values."""
10
+ return intensities * 2
11
+
12
+
13
+ def add_ten(intensities):
14
+ """Add 10 to all intensity values."""
15
+ return intensities + 10
16
+
17
+
18
+ def to_float_normalized(intensities):
19
+ """Convert to float and normalize to [0, 1]."""
20
+ return intensities.astype(float) / intensities.max()
21
+
22
+
23
+ def square_values(intensities):
24
+ """Square all values."""
25
+ return intensities**2
26
+
27
+
28
+ class TestImageOperation:
29
+ def test_create_operation_no_args(self):
30
+ op = ImageOperation(double_intensity)
31
+ assert op.method == double_intensity
32
+ assert op.args == ()
33
+ assert op.kwargs == {}
34
+
35
+ def test_create_operation_with_args(self):
36
+ op = ImageOperation(np.add, 5)
37
+ assert op.method == np.add
38
+ assert op.args == (5,)
39
+
40
+ def test_create_operation_with_kwargs(self):
41
+ op = ImageOperation(np.clip, a_min=0, a_max=100)
42
+ assert op.kwargs == {"a_min": 0, "a_max": 100}
43
+
44
+ def test_call_operation(self):
45
+ op = ImageOperation(double_intensity)
46
+ image = np.array([1, 2, 3])
47
+ result = op(image)
48
+ np.testing.assert_array_equal(result, [2, 4, 6])
49
+
50
+ def test_call_operation_with_args(self):
51
+ op = ImageOperation(np.add, 10)
52
+ image = np.array([1, 2, 3])
53
+ result = op(image)
54
+ np.testing.assert_array_equal(result, [11, 12, 13])
55
+
56
+ def test_repr(self):
57
+ op = ImageOperation(double_intensity)
58
+ assert "double_intensity" in repr(op)
59
+
60
+
61
+ class TestPipeline:
62
+ def test_create_pipeline(self):
63
+ ops = [ImageOperation(double_intensity), ImageOperation(add_ten)]
64
+ pipeline = Pipeline(operations=ops)
65
+ assert len(pipeline) == 2
66
+ assert pipeline.copy is False
67
+ assert pipeline.preserve_dtype is True
68
+
69
+ def test_create_pipeline_with_copy(self):
70
+ ops = [ImageOperation(double_intensity)]
71
+ pipeline = Pipeline(operations=ops, copy=True)
72
+ assert pipeline.copy is True
73
+
74
+ def test_create_pipeline_with_preserve_dtype_false(self):
75
+ ops = [ImageOperation(to_float_normalized)]
76
+ pipeline = Pipeline(operations=ops, preserve_dtype=False)
77
+ assert pipeline.preserve_dtype is False
78
+
79
+ def test_pipeline_requires_operations(self):
80
+ with pytest.raises(ValueError, match="at least one operation"):
81
+ Pipeline(operations=[])
82
+
83
+ def test_pipeline_single_operation(self):
84
+ pipeline = Pipeline(operations=[ImageOperation(double_intensity)])
85
+ image = np.array([1, 2, 3], dtype=np.uint16)
86
+ result = pipeline(image)
87
+ np.testing.assert_array_equal(result, [2, 4, 6])
88
+ assert result.dtype == np.uint16
89
+
90
+ def test_pipeline_multiple_operations(self):
91
+ pipeline = Pipeline(operations=[ImageOperation(double_intensity), ImageOperation(add_ten)])
92
+ image = np.array([1, 2, 3], dtype=np.uint16)
93
+ result = pipeline(image)
94
+ # First double: [2, 4, 6], then add 10: [12, 14, 16]
95
+ np.testing.assert_array_equal(result, [12, 14, 16])
96
+ assert result.dtype == np.uint16
97
+
98
+ def test_pipeline_preserve_dtype_default(self):
99
+ """Test that dtype is preserved by default when it changes."""
100
+ pipeline = Pipeline(operations=[ImageOperation(to_float_normalized)])
101
+ image = np.array([10, 20, 30], dtype=np.uint16)
102
+ result = pipeline(image)
103
+ # to_float_normalized returns float, but preserve_dtype=True should cast back
104
+ assert result.dtype == np.uint16
105
+
106
+ def test_pipeline_preserve_dtype_false(self):
107
+ """Test that dtype can change when preserve_dtype=False."""
108
+ pipeline = Pipeline(operations=[ImageOperation(to_float_normalized)], preserve_dtype=False)
109
+ image = np.array([10, 20, 30], dtype=np.uint16)
110
+ result = pipeline(image)
111
+ # Should return float
112
+ assert result.dtype in (np.float32, np.float64)
113
+ np.testing.assert_allclose(result, [1 / 3, 2 / 3, 1.0])
114
+
115
+ def test_pipeline_with_2d_image(self):
116
+ """Test pipeline with 2D image arrays."""
117
+ pipeline = Pipeline(operations=[ImageOperation(double_intensity)])
118
+ image = np.array([[1, 2], [3, 4]], dtype=np.uint16)
119
+ result = pipeline(image)
120
+ expected = np.array([[2, 4], [6, 8]], dtype=np.uint16)
121
+ np.testing.assert_array_equal(result, expected)
122
+
123
+
124
+ class TestPipelineParallelized:
125
+ def test_create_pipeline_parallelized(self):
126
+ ops = [ImageOperation(double_intensity)]
127
+ pipeline = PipelineParallelized(operations=ops)
128
+ assert len(pipeline) == 1
129
+ assert pipeline.max_workers is None
130
+ assert pipeline.copy is False
131
+ assert pipeline.preserve_dtype is True
132
+
133
+ def test_create_pipeline_with_max_workers(self):
134
+ ops = [ImageOperation(double_intensity)]
135
+ pipeline = PipelineParallelized(operations=ops, max_workers=4)
136
+ assert pipeline.max_workers == 4
137
+
138
+ def test_pipeline_parallelized_requires_operations(self):
139
+ with pytest.raises(ValueError, match="at least one operation"):
140
+ PipelineParallelized(operations=[])
141
+
142
+ def test_pipeline_parallelized_3d_array(self):
143
+ """Test parallel processing of 3D array (e.g., time series)."""
144
+ pipeline = PipelineParallelized(operations=[ImageOperation(double_intensity)])
145
+ # Create 3D array: (time, height, width)
146
+ image = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]], [[9, 10], [11, 12]]], dtype=np.uint16)
147
+ result = pipeline(image)
148
+ expected = image * 2
149
+ np.testing.assert_array_equal(result, expected)
150
+ assert result.dtype == np.uint16
151
+
152
+ def test_pipeline_parallelized_preserve_dtype_default(self):
153
+ """Test that dtype is preserved by default."""
154
+ pipeline = PipelineParallelized(operations=[ImageOperation(to_float_normalized)])
155
+ image = np.array([[[10, 20], [30, 40]]], dtype=np.uint16)
156
+ result = pipeline(image)
157
+ # Should preserve uint16 dtype by default
158
+ assert result.dtype == np.uint16
159
+
160
+ def test_pipeline_parallelized_preserve_dtype_false(self):
161
+ """Test that dtype can change when preserve_dtype=False."""
162
+ pipeline = PipelineParallelized(
163
+ operations=[ImageOperation(to_float_normalized)], preserve_dtype=False
164
+ )
165
+ image = np.array([[[10, 20], [30, 40]]], dtype=np.uint16)
166
+ result = pipeline(image)
167
+ # Should return float
168
+ assert result.dtype in (np.float32, np.float64)
169
+
170
+ def test_pipeline_parallelized_multiple_operations(self):
171
+ """Test multiple operations in parallel pipeline."""
172
+ pipeline = PipelineParallelized(
173
+ operations=[ImageOperation(double_intensity), ImageOperation(add_ten)]
174
+ )
175
+ image = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.uint16)
176
+ result = pipeline(image)
177
+ # First double, then add 10
178
+ expected = (image * 2) + 10
179
+ np.testing.assert_array_equal(result, expected)
180
+
181
+ def test_pipeline_parallelized_single_frame(self):
182
+ """Test with single frame (edge case)."""
183
+ pipeline = PipelineParallelized(operations=[ImageOperation(double_intensity)])
184
+ image = np.array([[[1, 2], [3, 4]]], dtype=np.uint16)
185
+ result = pipeline(image)
186
+ expected = image * 2
187
+ np.testing.assert_array_equal(result, expected)
188
+
189
+ def test_pipeline_parallelized_many_frames(self):
190
+ """Test with many frames to ensure parallelization works."""
191
+ pipeline = PipelineParallelized(
192
+ operations=[ImageOperation(double_intensity)], max_workers=2
193
+ )
194
+ # Create 10 frames
195
+ image = np.random.randint(0, 100, size=(10, 32, 32), dtype=np.uint16)
196
+ result = pipeline(image)
197
+ expected = image * 2
198
+ np.testing.assert_array_equal(result, expected)
199
+
200
+
201
+ class TestPipelineIntegration:
202
+ """Integration tests for realistic use cases."""
203
+
204
+ def test_normalization_workflow_preserve_dtype_false(self):
205
+ """Test a realistic normalization workflow for ML preprocessing."""
206
+ from arcadia_microscopy_tools.operations import rescale_by_percentile
207
+
208
+ # Simulate 16-bit microscopy images (3 frames)
209
+ image = np.random.randint(0, 65535, size=(3, 128, 128), dtype=np.uint16)
210
+
211
+ pipeline = PipelineParallelized(
212
+ operations=[
213
+ ImageOperation(
214
+ rescale_by_percentile,
215
+ percentile_range=(2, 98),
216
+ out_range=(0, 1),
217
+ )
218
+ ],
219
+ preserve_dtype=False,
220
+ )
221
+
222
+ result = pipeline(image)
223
+
224
+ # Should be normalized to [0, 1] float range
225
+ assert result.dtype in (np.float32, np.float64)
226
+ assert result.min() >= 0
227
+ assert result.max() <= 1
228
+
229
+ def test_normalization_workflow_preserve_dtype_true(self):
230
+ """Test normalization with dtype preservation (legacy behavior)."""
231
+ from arcadia_microscopy_tools.operations import rescale_by_percentile
232
+
233
+ # Simulate 16-bit microscopy images
234
+ image = np.random.randint(0, 65535, size=(3, 128, 128), dtype=np.uint16)
235
+
236
+ pipeline = PipelineParallelized(
237
+ operations=[
238
+ ImageOperation(
239
+ rescale_by_percentile,
240
+ percentile_range=(2, 98),
241
+ out_range=(0, 65535),
242
+ )
243
+ ],
244
+ preserve_dtype=True,
245
+ )
246
+
247
+ result = pipeline(image)
248
+
249
+ # Should stay as uint16
250
+ assert result.dtype == np.uint16
251
+
252
+ def test_background_subtraction_and_normalization(self):
253
+ """Test combining background subtraction with normalization."""
254
+ from arcadia_microscopy_tools.operations import (
255
+ rescale_by_percentile,
256
+ subtract_background_dog,
257
+ )
258
+
259
+ # Create test image with background
260
+ image = np.random.randint(100, 200, size=(2, 64, 64), dtype=np.uint16)
261
+
262
+ pipeline = PipelineParallelized(
263
+ operations=[
264
+ ImageOperation(subtract_background_dog, low_sigma=1, high_sigma=10),
265
+ ImageOperation(
266
+ rescale_by_percentile,
267
+ percentile_range=(1, 99),
268
+ out_range=(0, 1),
269
+ ),
270
+ ],
271
+ preserve_dtype=False,
272
+ )
273
+
274
+ result = pipeline(image)
275
+
276
+ # Should be float after processing
277
+ assert result.dtype in (np.float32, np.float64)
278
+ assert result.shape == image.shape
@@ -2,8 +2,9 @@ import numpy as np
2
2
  from numpy.typing import NDArray
3
3
 
4
4
  BoolArray = NDArray[np.bool_]
5
- FloatArray = NDArray[np.float64]
6
- Int64Array = NDArray[np.int64]
5
+ UByteArray = NDArray[np.uint8]
7
6
  UInt16Array = NDArray[np.uint16]
8
- UByteArray = NDArray[np.ubyte]
9
- ScalarArray = BoolArray | FloatArray | Int64Array | UInt16Array
7
+ Int64Array = NDArray[np.int64]
8
+ Float64Array = NDArray[np.float64]
9
+
10
+ ScalarArray = BoolArray | Float64Array | Int64Array | UInt16Array | UByteArray
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: arcadia-microscopy-tools
3
- Version: 0.2.3
3
+ Version: 0.2.4
4
4
  Summary: Python package for processing large-scale microscopy datasets generated by Arcadia's imaging suite
5
5
  License: MIT License
6
6
 
@@ -1,15 +1,15 @@
1
1
  arcadia_microscopy_tools/__init__.py,sha256=e6hSuo_4-fBlPEt6M9dMvNPH-HcDWboWSy6Vhp5_WzI,440
2
- arcadia_microscopy_tools/blending.py,sha256=mZNveRf7oNMbRt-Uxas6xCWZH8_U2pKP218j2Zo3cKc,7086
2
+ arcadia_microscopy_tools/blending.py,sha256=Y5xuius1tHRKIEfueUjZ7qGlBK02arEYqrzRRhEdSTI,7112
3
3
  arcadia_microscopy_tools/channels.py,sha256=sE54mJoJnFIMowO_qRG4lx-s_LOaVO10tuxpuVadJg8,6854
4
- arcadia_microscopy_tools/masks.py,sha256=Qlgy24jQkx3j09vuObRdgLuYy18PMJcv8XqiAqvvi1Q,15262
5
- arcadia_microscopy_tools/metadata_structures.py,sha256=fRi0nFwsfavW3YB_1QrpkfH-klp7GC84tCgoogXmMes,3475
4
+ arcadia_microscopy_tools/masks.py,sha256=Dot0hhJAE2O4HDlad5o6-z4FW646S_WRHT-Ua9MDxec,15272
5
+ arcadia_microscopy_tools/metadata_structures.py,sha256=Bb4UXgiNuJcOITNGV_4hGR09HaN8Wt7heET4bXmNcw0,3481
6
6
  arcadia_microscopy_tools/microplate.py,sha256=df6HTeQdYQRD7rYKubx8_FWOZ1BbJVoyg7lYySHJQOU,8298
7
7
  arcadia_microscopy_tools/microscopy.py,sha256=gPvMVKukGkBY74Ajy71JOd7DfZOoCEfiE40qkBW-C-I,11313
8
- arcadia_microscopy_tools/model.py,sha256=k4VL3OY2Uw_Oij-9ZJOAHaIbaFtOdtWNQVsE7nHKh_U,12482
9
- arcadia_microscopy_tools/nikon.py,sha256=JzvPH6IdLMY_txUyVqMu_ENVA7xeznWiijE4KWy6De8,10807
10
- arcadia_microscopy_tools/operations.py,sha256=3haiordlZ-vBZ8CdOIo-xzgANoNMI-DThon3gudfB3k,4968
11
- arcadia_microscopy_tools/pipeline.py,sha256=EHn7GYgtFJBA_yKopRXzXBteXu117zXxL6YBOGw4Xu0,6304
12
- arcadia_microscopy_tools/typing.py,sha256=8y9AWMfRK83sjkxlx2z7DdZOLHhWQ7vEShIwcyOPeOQ,275
8
+ arcadia_microscopy_tools/model.py,sha256=IfDmTE7zUW2xa0O1fA0kQd_SL7Ank0_p-tIx-_UYdII,12488
9
+ arcadia_microscopy_tools/nikon.py,sha256=RdkpN2M77h7_2Bb28nEou6m2MAEIjvwPvxxCHmWzf98,10811
10
+ arcadia_microscopy_tools/operations.py,sha256=lI1SazqOGzYdQtMR2aF3aKwhglul5XJz8qZwYMQwAJE,7939
11
+ arcadia_microscopy_tools/pipeline.py,sha256=uq0O7dwFLiGWMNAZw1rdTbWFRML0Zvk2as9cAvjVQAo,7338
12
+ arcadia_microscopy_tools/typing.py,sha256=cGGvVo4cuW7xtL5MUCBPMW9_8H5yB8GFL_N1xdgBo78,293
13
13
  arcadia_microscopy_tools/utils.py,sha256=W2x4q7pbPrVvZ0xZiW4BSyvJZ5bHDZ16am520_zA6rc,2970
14
14
  arcadia_microscopy_tools/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
15
  arcadia_microscopy_tools/tests/conftest.py,sha256=NJh9D_k_rrtHdnUfWtsc5CDbqbFIhZhXgRcFb7Dqc78,1461
@@ -17,6 +17,7 @@ arcadia_microscopy_tools/tests/test_channels.py,sha256=ztrnknb9cZuIR6hwTmgQ5uP51
17
17
  arcadia_microscopy_tools/tests/test_microplate.py,sha256=3xVdv958IwH52XTThuqWuj5ZGrJrCfJqAY2Vi93IPPo,1838
18
18
  arcadia_microscopy_tools/tests/test_microscopy.py,sha256=nLiCG3sQ1huZJu2W11k9UCmSBCGDCXsXGb_9G4h1jAg,2062
19
19
  arcadia_microscopy_tools/tests/test_model.py,sha256=aVqAalWoyB_Ay03Y5ErLXIgKBqnUBorxflvllzBO4eQ,19743
20
+ arcadia_microscopy_tools/tests/test_pipeline.py,sha256=rlMPiRwOKGnL5uIlgTu0_oMtt6bAGLcBSmPNi654SxQ,10643
20
21
  arcadia_microscopy_tools/tests/data/README.md,sha256=TLV01Qo_QYTqIk351Q_9hO9NrR5FARnezKDZbI5oREc,1683
21
22
  arcadia_microscopy_tools/tests/data/example-cerevisiae.nd2,sha256=ymhS7GciOyD6XJqJvYF3iZRZtnEf-6n_z_MHjufIGoE,503808
22
23
  arcadia_microscopy_tools/tests/data/example-multichannel.nd2,sha256=_Ol47B_PK5GcF8ruBBvBElOa17nfLjeQsiYp0UUpkx0,876544
@@ -24,7 +25,7 @@ arcadia_microscopy_tools/tests/data/example-pbmc.nd2,sha256=gqVP7cGePBJk45xRpyXa
24
25
  arcadia_microscopy_tools/tests/data/example-timelapse.nd2,sha256=KHCubkVWmkRRmJabhfINx_aTwNi5nVUl-IiYNKQsJ9Y,827392
25
26
  arcadia_microscopy_tools/tests/data/example-zstack.nd2,sha256=j70DrFhRTwRgzAAJivVM3mCho05YVgsqJwTmPBobRYo,606208
26
27
  arcadia_microscopy_tools/tests/data/known-metadata.yml,sha256=_ZIE04MnoLpZtG-6e8ZytYnmAkGh0Q7-2AwSP3v6rQk,1886
27
- arcadia_microscopy_tools-0.2.3.dist-info/METADATA,sha256=X9XmEfOs5OHYyRhhsfAdtNmYQ3CbOQcQj_seQODVe6o,5007
28
- arcadia_microscopy_tools-0.2.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
29
- arcadia_microscopy_tools-0.2.3.dist-info/licenses/LICENSE,sha256=5pPae5U0NNXysjBv3vjoquhhoCqTTi1Zh0SehM_IXHI,1072
30
- arcadia_microscopy_tools-0.2.3.dist-info/RECORD,,
28
+ arcadia_microscopy_tools-0.2.4.dist-info/METADATA,sha256=xFEXOcRN4v4pDmp8gK-iIkWhEL0v6zkwDT6ZiO1x6oA,5007
29
+ arcadia_microscopy_tools-0.2.4.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
30
+ arcadia_microscopy_tools-0.2.4.dist-info/licenses/LICENSE,sha256=5pPae5U0NNXysjBv3vjoquhhoCqTTi1Zh0SehM_IXHI,1072
31
+ arcadia_microscopy_tools-0.2.4.dist-info/RECORD,,