pymmcore-plus 0.15.4__py3-none-any.whl → 0.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. pymmcore_plus/__init__.py +20 -1
  2. pymmcore_plus/_accumulator.py +23 -5
  3. pymmcore_plus/_cli.py +44 -26
  4. pymmcore_plus/_discovery.py +344 -0
  5. pymmcore_plus/_ipy_completion.py +1 -1
  6. pymmcore_plus/_logger.py +3 -3
  7. pymmcore_plus/_util.py +9 -245
  8. pymmcore_plus/core/_device.py +57 -13
  9. pymmcore_plus/core/_mmcore_plus.py +20 -23
  10. pymmcore_plus/core/_property.py +35 -29
  11. pymmcore_plus/core/_sequencing.py +2 -0
  12. pymmcore_plus/core/events/_device_signal_view.py +8 -1
  13. pymmcore_plus/experimental/simulate/__init__.py +88 -0
  14. pymmcore_plus/experimental/simulate/_objects.py +670 -0
  15. pymmcore_plus/experimental/simulate/_render.py +510 -0
  16. pymmcore_plus/experimental/simulate/_sample.py +156 -0
  17. pymmcore_plus/experimental/unicore/__init__.py +2 -0
  18. pymmcore_plus/experimental/unicore/_device_manager.py +46 -13
  19. pymmcore_plus/experimental/unicore/core/_config.py +706 -0
  20. pymmcore_plus/experimental/unicore/core/_unicore.py +834 -18
  21. pymmcore_plus/experimental/unicore/devices/_device_base.py +13 -0
  22. pymmcore_plus/experimental/unicore/devices/_hub.py +50 -0
  23. pymmcore_plus/experimental/unicore/devices/_stage.py +46 -1
  24. pymmcore_plus/experimental/unicore/devices/_state.py +6 -0
  25. pymmcore_plus/install.py +149 -18
  26. pymmcore_plus/mda/_engine.py +268 -73
  27. pymmcore_plus/mda/handlers/_5d_writer_base.py +16 -5
  28. pymmcore_plus/mda/handlers/_tensorstore_handler.py +7 -1
  29. pymmcore_plus/metadata/_ome.py +553 -0
  30. pymmcore_plus/metadata/functions.py +2 -1
  31. {pymmcore_plus-0.15.4.dist-info → pymmcore_plus-0.17.0.dist-info}/METADATA +7 -4
  32. {pymmcore_plus-0.15.4.dist-info → pymmcore_plus-0.17.0.dist-info}/RECORD +35 -27
  33. {pymmcore_plus-0.15.4.dist-info → pymmcore_plus-0.17.0.dist-info}/WHEEL +1 -1
  34. {pymmcore_plus-0.15.4.dist-info → pymmcore_plus-0.17.0.dist-info}/entry_points.txt +0 -0
  35. {pymmcore_plus-0.15.4.dist-info → pymmcore_plus-0.17.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,510 @@
1
+ """Rendering engine for simulated microscope samples.
2
+
3
+ Performance Notes
4
+ -----------------
5
+ The renderer uses several optimizations for speed:
6
+
7
+ 1. **Intensity grouping** (~5x speedup): Objects are grouped by intensity value
8
+ and drawn together on shared layers, reducing PIL Image allocations from
9
+ O(n_objects) to O(n_unique_intensities).
10
+
11
+ 2. **Optional OpenCV** (~25% speedup): When opencv-python is installed:
12
+ - Drawing primitives use cv2 functions (~20% faster than PIL)
13
+ - Gaussian blur uses cv2.GaussianBlur (~8x faster than PIL)
14
+ Install with: `pip install opencv-python`
15
+
16
+ Typical performance (512x512, 630 objects):
17
+ - Without opencv-python: ~18ms
18
+ - With opencv-python: ~14ms
19
+ """
20
+
21
+ from __future__ import annotations
22
+
23
+ from collections import defaultdict
24
+ from dataclasses import dataclass, field
25
+ from typing import TYPE_CHECKING, no_type_check
26
+
27
+ import numpy as np
28
+ from PIL import Image, ImageDraw, ImageFilter
29
+
30
+ from pymmcore_plus.core._constants import Keyword
31
+
32
+ if TYPE_CHECKING:
33
+ from typing_extensions import Literal, TypeAlias
34
+
35
+ from pymmcore_plus.metadata.schema import DeviceInfo, SummaryMetaV1
36
+
37
+ from ._objects import Bounds, SampleObject, TransformFn
38
+
39
+ Backend: TypeAlias = Literal["auto", "pil", "cv2"]
40
+
41
+ # OpenCV provides ~25% overall speedup when available:
42
+ # - cv2 drawing primitives are ~20% faster than PIL
43
+ # - cv2.GaussianBlur is ~8x faster than PIL.ImageFilter.GaussianBlur
44
+
45
+
46
+ @dataclass
47
+ class RenderConfig:
48
+ """Configuration for the rendering engine.
49
+
50
+ Parameters
51
+ ----------
52
+ photon_flux : float
53
+ Base photon flux in photons/pixel/second for intensity=255 objects.
54
+ Default 1000. Combined with exposure time to determine photon count.
55
+ shot_noise : bool
56
+ Whether to apply Poisson (shot) noise. Default True.
57
+ defocus_scale : float
58
+ Blur radius per unit of Z distance from focus. Default 0.125.
59
+ blur_radius = base_blur + abs(z) * defocus_scale
60
+ base_blur : float
61
+ Minimum blur radius (at perfect focus). Default 1.0.
62
+ random_seed : int | None
63
+ Random seed for reproducible noise. Default None (random).
64
+ backend : Backend
65
+ Rendering backend: "auto" (default), "pil", or "cv2".
66
+ "auto" uses cv2 if available, otherwise PIL.
67
+ "cv2" raises ImportError if opencv-python is not installed.
68
+ """
69
+
70
+ shot_noise: bool = True
71
+ defocus_scale: float = 0.125
72
+ base_blur: float = 1.5
73
+ random_seed: int | None = None
74
+ backend: Backend = "auto"
75
+
76
+
77
+ @dataclass
78
+ class RenderEngine:
79
+ """Engine for rendering sample objects based on microscope state.
80
+
81
+ The render engine takes a list of sample objects and renders them into
82
+ an image based on the current microscope state (stage position, exposure,
83
+ pixel size, etc.).
84
+
85
+ Parameters
86
+ ----------
87
+ objects : Sequence[SampleObject]
88
+ List of sample objects to render.
89
+ config : RenderConfig | None
90
+ Rendering configuration. If None, uses default config.
91
+
92
+ Examples
93
+ --------
94
+ >>> from pymmcore_plus.experimental.simulate import RenderEngine, Point, Line
95
+ >>> engine = RenderEngine(
96
+ ... [
97
+ ... Point(0, 0, intensity=200),
98
+ ... Line((0, 0), (100, 100), intensity=100),
99
+ ... ]
100
+ ... )
101
+ >>> state = core.state()
102
+ >>> image = engine.render(state)
103
+ """
104
+
105
+ objects: list[SampleObject]
106
+ config: RenderConfig = field(default_factory=RenderConfig)
107
+ _rng: np.random.Generator = field(default=None, repr=False) # type: ignore
108
+
109
+ def __post_init__(self) -> None:
110
+ """Initialize random number generator."""
111
+ self._rng = np.random.default_rng(self.config.random_seed)
112
+
113
+ def _should_use_cv2(self) -> bool:
114
+ """Determine whether to use cv2 backend based on config and availability."""
115
+ backend = self.config.backend
116
+ if backend == "pil":
117
+ return False
118
+ if backend in {"cv2", "auto"}:
119
+ try:
120
+ import cv2 # noqa: F401
121
+ except ImportError:
122
+ if backend == "cv2":
123
+ raise ImportError(
124
+ "opencv-python is required for backend='cv2'. "
125
+ "Install with: pip install opencv-python"
126
+ ) from None
127
+ else:
128
+ return True
129
+ return False
130
+
131
+ def _render_ground_truth(
132
+ self, props: ImageProps, stage_x: float, stage_y: float
133
+ ) -> np.ndarray:
134
+ # Sample pixel size: how many µm in the sample each pixel represents
135
+ # This accounts for both the physical sensor pixel size and magnification
136
+ sample_pixel = props.sample_pixel_size # pixel_size / magnification
137
+
138
+ # Compute field of view (FOV) rectangle in sample/world coordinates
139
+ # The FOV is centered on the stage position
140
+ fov_width = props.img_width * sample_pixel # width in µm
141
+ fov_height = props.img_height * sample_pixel # height in µm
142
+ left = stage_x - fov_width / 2
143
+ top = stage_y - fov_height / 2
144
+ fov_rect: Bounds = (left, top, left + fov_width, top + fov_height)
145
+
146
+ # Scale factor: how many pixels per µm (for scaling object sizes)
147
+ scale = 1.0 / sample_pixel
148
+
149
+ # Transform function: converts world coordinates (µm) to pixel coordinates
150
+ def transform(x: float, y: float) -> tuple[int, int]:
151
+ pixel_x = (x - left) / sample_pixel
152
+ pixel_y = (y - top) / sample_pixel
153
+ return int(pixel_x), int(pixel_y)
154
+
155
+ # Draw all objects onto the image canvas
156
+ # Intensity values (0-255) represent relative fluorophore density
157
+ # This is the "ideal" sample without any optical or noise effects
158
+ if self._should_use_cv2():
159
+ density = self._render_objects_cv2(
160
+ props.img_width, props.img_height, transform, scale, fov_rect
161
+ )
162
+ else:
163
+ density = self._render_objects_pil(
164
+ props.img_width, props.img_height, transform, scale, fov_rect
165
+ )
166
+ # density is now a float32 array with values typically 0-255
167
+ # (can exceed 255 if objects overlap)
168
+ return density
169
+
170
+ def render(self, state: SummaryMetaV1) -> np.ndarray:
171
+ """Render sample objects with physically realistic camera simulation.
172
+
173
+ Parameters
174
+ ----------
175
+ state : SummaryMetaV1
176
+ Current microscope state from `core.state()`.
177
+
178
+ Returns
179
+ -------
180
+ np.ndarray
181
+ Rendered image as uint8 (bit_depth <= 8) or uint16 (bit_depth > 8).
182
+ """
183
+ # Extract camera and optical properties from microscope state
184
+ props = img_props(state)
185
+ # Get current stage position in world coordinates (µm)
186
+ stage_x, stage_y, stage_z = _stage_position(state)
187
+
188
+ density = self._render_ground_truth(props, stage_x, stage_y)
189
+
190
+ # Scale density to get photon emission rate for each pixel
191
+ photon_flux = density * (props.photon_flux / 255.0) # photons/second
192
+
193
+ # This convolution preserves total flux (sum is conserved)
194
+ photon_flux = self._apply_defocus(photon_flux, stage_z)
195
+
196
+ # convert gain of -5 to 8 into analog gain multiplier, where 1 is unity
197
+ analog_gain = 2.0**props.gain
198
+ gray_values = simulate_camera(
199
+ photons_per_second=photon_flux,
200
+ exposure_ms=props.exposure_ms,
201
+ read_noise=props.read_noise,
202
+ ccd_binning=props.binning,
203
+ bit_depth=props.bit_depth,
204
+ offset=int(props.offset),
205
+ rnd=self._rng,
206
+ analog_gain=analog_gain,
207
+ qe=props.qe,
208
+ full_well=props.full_well_capacity,
209
+ add_poisson=self.config.shot_noise,
210
+ )
211
+ return gray_values
212
+
213
+ def _render_objects_cv2(
214
+ self,
215
+ width: int,
216
+ height: int,
217
+ transform: TransformFn,
218
+ scale: float,
219
+ fov_rect: Bounds,
220
+ ) -> np.ndarray:
221
+ """Render objects using OpenCV (faster)."""
222
+ # Group objects by intensity for batch drawing
223
+ intensity_groups: dict[int, list[SampleObject]] = defaultdict(list)
224
+ for obj in self.objects:
225
+ if obj.should_draw(fov_rect):
226
+ intensity_groups[obj.intensity].append(obj)
227
+
228
+ accumulator = np.zeros((height, width), dtype=np.float32)
229
+
230
+ for _intensity, objs in intensity_groups.items():
231
+ # Draw all objects with same intensity on one layer
232
+ layer = np.zeros((height, width), dtype=np.uint8)
233
+ for obj in objs:
234
+ obj.draw_cv2(layer, transform, scale)
235
+ accumulator += layer.astype(np.float32)
236
+
237
+ return accumulator
238
+
239
+ def _render_objects_pil(
240
+ self,
241
+ width: int,
242
+ height: int,
243
+ transform: TransformFn,
244
+ scale: float,
245
+ fov_rect: Bounds,
246
+ ) -> np.ndarray:
247
+ """Render objects using PIL, grouped by intensity for efficiency."""
248
+ # Group objects by intensity for batch drawing
249
+ intensity_groups: dict[int, list[SampleObject]] = defaultdict(list)
250
+ for obj in self.objects:
251
+ if obj.should_draw(fov_rect):
252
+ intensity_groups[obj.intensity].append(obj)
253
+
254
+ accumulator = np.zeros((height, width), dtype=np.float32)
255
+
256
+ for _intensity, objs in intensity_groups.items():
257
+ # Draw all objects with same intensity on one layer
258
+ layer = Image.new("L", (width, height), 0)
259
+ draw = ImageDraw.Draw(layer)
260
+ for obj in objs:
261
+ obj.draw(draw, transform, scale)
262
+ accumulator += np.asarray(layer, dtype=np.float32)
263
+
264
+ return accumulator
265
+
266
+ def _apply_defocus(self, arr: np.ndarray, z: float) -> np.ndarray:
267
+ """Apply defocus blur based on Z position."""
268
+ blur_radius = self.config.base_blur + abs(z) * self.config.defocus_scale
269
+ if blur_radius <= 0:
270
+ return arr
271
+
272
+ if self._should_use_cv2():
273
+ import cv2
274
+
275
+ ksize = int(blur_radius * 6) | 1 # kernel size must be odd
276
+ return cv2.GaussianBlur(arr, (ksize, ksize), blur_radius) # type: ignore [no-any-return]
277
+ else:
278
+ max_val = arr.max()
279
+ if max_val > 0:
280
+ normalized = (arr * 255.0 / max_val).astype(np.uint8)
281
+ else:
282
+ normalized = np.zeros_like(arr, dtype=np.uint8)
283
+ img = Image.fromarray(normalized)
284
+ blurred = img.filter(ImageFilter.GaussianBlur(radius=blur_radius))
285
+ result = np.asarray(blurred, dtype=np.float32)
286
+ if max_val > 0:
287
+ result = result * max_val / 255.0
288
+ return result
289
+
290
+
291
+ # -----------------------------------------------------------------------------
292
+ # Helper functions to extract state from SummaryMetaV1
293
+ # -----------------------------------------------------------------------------
294
+
295
+
296
+ def _stage_position(state: SummaryMetaV1) -> tuple[float, float, float]:
297
+ """Get stage position (x, y, z) from state."""
298
+ pos = state.get("position", {})
299
+ return (pos.get("x", 0.0), pos.get("y", 0.0), pos.get("z", 0.0))
300
+
301
+
302
+ def _get_core(state: SummaryMetaV1) -> DeviceInfo:
303
+ """Get Core device info from state."""
304
+ return next(
305
+ dev
306
+ for dev in state.get("devices", ())
307
+ if dev.get("label") == Keyword.CoreDevice
308
+ )
309
+
310
+
311
+ def _get_camera(state: SummaryMetaV1) -> DeviceInfo | None:
312
+ """Get Camera device info from state."""
313
+ core = _get_core(state)
314
+ for prop in core["properties"]:
315
+ if prop["name"] == "Camera":
316
+ camera_label = prop["value"]
317
+ break
318
+ else:
319
+ return None
320
+ return next((dev for dev in state["devices"] if dev["label"] == camera_label), None)
321
+
322
+
323
+ @dataclass
324
+ class ImageProps:
325
+ """Camera and optical properties extracted from device state.
326
+
327
+ These properties describe both the camera sensor characteristics and
328
+ the optical system configuration needed to simulate realistic images.
329
+
330
+ Attributes
331
+ ----------
332
+ exposure_ms : float
333
+ Exposure time in milliseconds. Default 10.0.
334
+ offset : float
335
+ Digital offset (bias) in gray levels (ADU). Default 100.
336
+ gain : float
337
+ Camera gain setting (raw value, typically -5 to 8). Default 0.0.
338
+ At gain=0 (unity), full_well_capacity electrons map to max gray value.
339
+ Positive gain = earlier saturation (more amplification).
340
+ Negative gain = later saturation (less amplification).
341
+ bit_depth : int
342
+ Camera bit depth. Default 16.
343
+ read_noise : float
344
+ Read noise in electrons RMS. Default 2.5.
345
+ img_width : int
346
+ Image width in pixels. Default 512.
347
+ img_height : int
348
+ Image height in pixels. Default 512.
349
+ binning : int
350
+ Pixel binning factor. Default 1.
351
+ photon_flux : float
352
+ Peak photon emission rate in photons/pixel/second for intensity=255
353
+ fluorophores. Default 1000.
354
+ pixel_size : float
355
+ Physical pixel size on the camera sensor in micrometers. Default 6.5.
356
+ magnification : float
357
+ Objective magnification. Default 20.0.
358
+ full_well_capacity : float
359
+ Full well capacity in electrons. Default 18000.
360
+ qe : float
361
+ Quantum efficiency (fraction of photons converted to electrons). Default 0.8.
362
+ """
363
+
364
+ exposure_ms: float = 10.0
365
+ offset: float = 100.0 # small baseline to capture read noise symmetry
366
+ gain: float = 0.0 # raw gain setting, NOT 2^gain
367
+ bit_depth: int = 16
368
+ read_noise: float = 2.5
369
+ img_width: int = 512
370
+ img_height: int = 512
371
+ binning: int = 1
372
+ photon_flux: float = 1000.0
373
+ pixel_size: float = 6.5 # physical sensor pixel size (µm)
374
+ magnification: float = 20.0
375
+ full_well_capacity: float = 18000.0
376
+ qe: float = 0.8
377
+
378
+ @property
379
+ def sample_pixel_size(self) -> float:
380
+ """Effective pixel size in sample space (µm/pixel)."""
381
+ return self.pixel_size / self.magnification
382
+
383
+
384
+ @no_type_check
385
+ def img_props(state: SummaryMetaV1) -> ImageProps:
386
+ """Extract camera and optical properties from device state.
387
+
388
+ Reads camera properties from the state dict and returns an ImageProps
389
+ dataclass with all relevant parameters for image simulation.
390
+ """
391
+ props: dict[str, float | int] = {}
392
+
393
+ # Get camera properties
394
+ if camera := _get_camera(state):
395
+ for prop in camera["properties"]:
396
+ name = prop["name"]
397
+ value = prop["value"]
398
+ if name == "Exposure":
399
+ props["exposure_ms"] = float(value)
400
+ elif name == "Offset":
401
+ props["offset"] = float(value)
402
+ elif name == "Gain":
403
+ # Store raw gain setting (not 2^gain)
404
+ # Unity gain (0) maps FWC to max gray value
405
+ props["gain"] = float(value)
406
+ elif name == "BitDepth":
407
+ props["bit_depth"] = int(value)
408
+ elif name == "ReadNoise (electrons)":
409
+ props["read_noise"] = float(value)
410
+ elif name == "OnCameraCCDXSize":
411
+ props["img_width"] = int(value)
412
+ elif name == "OnCameraCCDYSize":
413
+ props["img_height"] = int(value)
414
+ elif name == "Binning":
415
+ props["binning"] = int(value)
416
+ elif name == "Photon Flux":
417
+ # DemoCamera's Photon Flux is too low for realistic simulation
418
+ # i think it's modeling at the camera, rather than from the sample
419
+ # Scale up by 100x to get reasonable signal at typical exposures
420
+ props["photon_flux"] = float(value) * 100
421
+ elif name == "Full Well Capacity":
422
+ props["full_well_capacity"] = float(value)
423
+ elif name == "Quantum Efficiency":
424
+ props["qe"] = float(value)
425
+
426
+ if props.get("bit_depth") and props["bit_depth"] < 10:
427
+ props["offset"] = 10.0 # lower offset for low bit depth cameras
428
+
429
+ return ImageProps(**props)
430
+
431
+
432
+ def simulate_camera(
433
+ photons_per_second: np.ndarray,
434
+ exposure_ms: float,
435
+ read_noise: float,
436
+ bit_depth: int,
437
+ offset: int,
438
+ rnd: np.random.Generator,
439
+ analog_gain: float = 1.0,
440
+ em_gain: float = 1.0,
441
+ ccd_binning: int = 1,
442
+ qe: float = 1,
443
+ full_well: float = 18000,
444
+ serial_reg_full_well: float | None = None,
445
+ dark_current: float = 0.0,
446
+ add_poisson: bool = True,
447
+ ) -> np.ndarray:
448
+ if analog_gain <= 0:
449
+ raise ValueError("gain_multiplier must be positive")
450
+
451
+ # restrict to positive values
452
+ exposure_s = exposure_ms / 1000
453
+ incident_photons = np.maximum(photons_per_second * exposure_s, 0)
454
+
455
+ # combine signal and dark current into single poisson sample
456
+ detected_photons = incident_photons * qe
457
+ avg_dark_e = dark_current * exposure_s
458
+
459
+ if add_poisson:
460
+ # Single Poisson sample combining both sources
461
+ total_electrons: np.ndarray = rnd.poisson(detected_photons + avg_dark_e)
462
+ else:
463
+ # Just the mean values
464
+ total_electrons = detected_photons + avg_dark_e
465
+
466
+ # cap total electrons to full-well-capacity
467
+ total_electrons = np.minimum(total_electrons, full_well)
468
+
469
+ if (b := ccd_binning) > 1:
470
+ # Hardware binning: sum electrons from NxN blocks
471
+ # Reshape to create blocks
472
+ new_h = total_electrons.shape[0] // b
473
+ new_w = total_electrons.shape[1] // b
474
+ cropped = total_electrons[: new_h * b, : new_w * b]
475
+ # Sum over binning blocks
476
+ binned_electrons = cropped.reshape(new_h, b, new_w, b).sum(axis=(1, 3))
477
+ else:
478
+ binned_electrons = total_electrons
479
+
480
+ if em_gain > 1.0:
481
+ # Gamma distribution models the stochastic multiplication
482
+ # Only apply to pixels with signal
483
+ amplified = np.zeros_like(binned_electrons, dtype=float)
484
+ mask = binned_electrons > 0
485
+ amplified[mask] = rnd.gamma(shape=binned_electrons[mask], scale=em_gain)
486
+ binned_electrons = amplified
487
+
488
+ # cap total electrons to serial register full-well-capacity
489
+ if serial_reg_full_well is not None:
490
+ binned_electrons = np.minimum(binned_electrons, serial_reg_full_well)
491
+ effective_full_well = serial_reg_full_well
492
+ else:
493
+ effective_full_well = full_well * (ccd_binning**2)
494
+
495
+ # Add read noise (Gaussian, in electrons)
496
+ if read_noise > 0:
497
+ binned_electrons += rnd.normal(0, read_noise, size=binned_electrons.shape)
498
+
499
+ # unity gain is gain at which full well maps to max gray value
500
+ unity_gain = effective_full_well / (2**bit_depth - 1)
501
+ # actual gain considering analog gain setting. Final e-/ADU
502
+ actual_gain = unity_gain / analog_gain
503
+ # Convert to ADU with offset
504
+ adu = (binned_electrons / actual_gain) + offset
505
+ # Quantize/clip to bit depth
506
+ adu = np.clip(adu, 0, 2**bit_depth - 1)
507
+
508
+ # Final integer image
509
+ gray_values = np.round(adu).astype(np.uint16 if bit_depth > 8 else np.uint8)
510
+ return gray_values # type: ignore [no-any-return]
@@ -0,0 +1,156 @@
1
+ """Sample simulation that integrates with CMMCorePlus."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from contextlib import ExitStack, contextmanager
6
+ from typing import TYPE_CHECKING
7
+ from unittest.mock import patch
8
+
9
+ from ._render import RenderConfig, RenderEngine
10
+
11
+ if TYPE_CHECKING:
12
+ from collections.abc import Generator, Iterable, Iterator
13
+ from typing import Any
14
+
15
+ import numpy as np
16
+
17
+ from pymmcore_plus import CMMCorePlus
18
+ from pymmcore_plus.metadata.schema import SummaryMetaV1
19
+
20
+ from ._objects import SampleObject
21
+
22
+
23
+ class Sample:
24
+ """A simulated microscope sample that integrates with CMMCorePlus.
25
+
26
+ Use `sample.patch(core)` as a context manager to intercept image acquisition
27
+ calls and return rendered images based on microscope state.
28
+
29
+ Parameters
30
+ ----------
31
+ objects : Iterable[SampleObject]
32
+ Sample objects to render (points, lines, shapes, etc.).
33
+ config : RenderConfig | None
34
+ Rendering configuration. If None, uses default config.
35
+ """
36
+
37
+ def __init__(
38
+ self, objects: Iterable[SampleObject], config: RenderConfig | None = None
39
+ ) -> None:
40
+ self._engine = RenderEngine(list(objects), config or RenderConfig())
41
+
42
+ # ------------- Object Management -------------
43
+
44
+ @property
45
+ def objects(self) -> list[SampleObject]:
46
+ """List of sample objects."""
47
+ return self._engine.objects
48
+
49
+ @property
50
+ def config(self) -> RenderConfig:
51
+ """Rendering configuration."""
52
+ return self._engine.config
53
+
54
+ # ------------- Rendering -------------
55
+
56
+ def render(self, state: SummaryMetaV1) -> np.ndarray:
57
+ """Render the sample for the given microscope state."""
58
+ return self._engine.render(state)
59
+
60
+ def __repr__(self) -> str:
61
+ return f"Sample({len(self.objects)} objects, config={self.config!r})"
62
+
63
+ # ------------- Patching -------------
64
+
65
+ @contextmanager
66
+ def patch(self, core: CMMCorePlus) -> Generator[Sample, None, None]:
67
+ """Patch the core to use this sample for image generation.
68
+
69
+ Parameters
70
+ ----------
71
+ core : CMMCorePlus
72
+ The core instance to patch.
73
+
74
+ Yields
75
+ ------
76
+ Sample
77
+ This sample instance.
78
+ """
79
+ patcher = CoreSamplePatcher(core, self)
80
+ with patch_with_object(core, patcher):
81
+ yield self
82
+
83
+
84
+ class CoreSamplePatcher:
85
+ def __init__(self, core: CMMCorePlus, sample: Sample) -> None:
86
+ self._core = core
87
+ self._sample = sample
88
+ self._snapped_state: SummaryMetaV1 | None = None
89
+ self._original_snapImage = core.snapImage
90
+
91
+ def snapImage(self) -> None:
92
+ """Capture state before calling original snapImage."""
93
+ self._snapped_state = self._core.state()
94
+ self._original_snapImage() # emit signals, etc.
95
+
96
+ def getImage(self, *_: Any, **__: Any) -> np.ndarray:
97
+ if not self._snapped_state:
98
+ raise RuntimeError(
99
+ "No snapped state available. Call snapImage() before getImage()."
100
+ )
101
+ return self._sample.render(self._snapped_state)
102
+
103
+ def getLastImage(self, *_: Any, **__: Any) -> np.ndarray:
104
+ """Return rendered image based on current state (for live mode)."""
105
+ return self._sample.render(self._core.state())
106
+
107
+
108
+ @contextmanager
109
+ def patch_with_object(target: Any, patch_object: Any) -> Iterator[Any]:
110
+ """
111
+ Patch methods on target object with methods from patch_object.
112
+
113
+ Parameters
114
+ ----------
115
+ target : Any
116
+ object to be patched
117
+ patch_object : Any
118
+ object containing replacement methods
119
+
120
+ Examples
121
+ --------
122
+ ```
123
+ class MyClass:
124
+ def foo(self):
125
+ return "original"
126
+
127
+ def bar(self):
128
+ return "original"
129
+
130
+
131
+ class Patch:
132
+ def foo(self):
133
+ return "patched"
134
+
135
+
136
+ obj = MyClass()
137
+ with patch_with_object(obj, Patch()):
138
+ assert obj.foo() == "patched"
139
+ assert obj.bar() == "original"
140
+ ```
141
+ """
142
+ with ExitStack() as stack:
143
+ # Get all methods from patch_object
144
+ patch_methods = {
145
+ name: getattr(patch_object, name)
146
+ for name in dir(patch_object)
147
+ if (not name.startswith("_") and callable(getattr(patch_object, name)))
148
+ }
149
+
150
+ # Patch each method that exists on target (if spec=True)
151
+ for method_name, method in patch_methods.items():
152
+ if hasattr(target, method_name):
153
+ # Use patch.object to do the actual patching
154
+ stack.enter_context(patch.object(target, method_name, method))
155
+
156
+ yield target
@@ -2,6 +2,7 @@ from .core._unicore import UniMMCore
2
2
  from .devices._camera import CameraDevice
3
3
  from .devices._device_base import Device
4
4
  from .devices._generic_device import GenericDevice
5
+ from .devices._hub import HubDevice
5
6
  from .devices._properties import PropertyInfo, pymm_property
6
7
  from .devices._shutter import ShutterDevice
7
8
  from .devices._slm import SLMDevice
@@ -12,6 +13,7 @@ __all__ = [
12
13
  "CameraDevice",
13
14
  "Device",
14
15
  "GenericDevice",
16
+ "HubDevice",
15
17
  "PropertyInfo",
16
18
  "SLMDevice",
17
19
  "ShutterDevice",