ome-arrow 0.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ome_arrow/meta.py ADDED
@@ -0,0 +1,90 @@
1
+ """
2
+ Meta-definition for OME-Arrow format.
3
+ """
4
+
5
+ import pyarrow as pa
6
+
7
+ from ._version import version as ome_arrow_version
8
+
9
+ OME_ARROW_TAG_TYPE = "ome.arrow"
10
+ OME_ARROW_TAG_VERSION = ome_arrow_version
11
+
12
+ # OME_ARROW_STRUCT: ome-arrow record (describes one image/value).
13
+ # - type/version: quick identity & evolution.
14
+ # - id/name/acquisition_datetime: identity & provenance.
15
+ # - pixels_meta: pixels struct (sizes, units, channels).
16
+ # - planes: list of planes struct entries, one per (t,c,z).
17
+ # - masks: reserved for future labels/ROIs (placeholder).
18
+ OME_ARROW_STRUCT: pa.StructType = pa.struct(
19
+ [
20
+ pa.field("type", pa.string()), # must be "ome.arrow"
21
+ pa.field("version", pa.string()), # e.g., "1.0.0"
22
+ pa.field("id", pa.string()), # stable image identifier
23
+ pa.field("name", pa.string()), # human label
24
+ pa.field("acquisition_datetime", pa.timestamp("us")),
25
+ # PIXELS: OME-like "Pixels" header summarizing shape & scale.
26
+ # - dimension_order: hint like "XYZCT" (or "XYCT" when Z==1).
27
+ # - type: numeric storage type (e.g., "uint16").
28
+ # - size_*: axis lengths.
29
+ # - physical_size_* (+ *_unit): microscope scale in micrometers.
30
+ # - channels: list of channel struct entries (one per channel).
31
+ pa.field(
32
+ "pixels_meta",
33
+ pa.struct(
34
+ [
35
+ pa.field("dimension_order", pa.string()), # "XYZCT" / "XYCT"
36
+ pa.field("type", pa.string()), # "uint8","uint16","float",...
37
+ pa.field("size_x", pa.int32()), # width (pixels)
38
+ pa.field("size_y", pa.int32()), # height (pixels)
39
+ pa.field("size_z", pa.int32()), # z-slices (1 for 2D)
40
+ pa.field("size_c", pa.int16()), # channels
41
+ pa.field("size_t", pa.int32()), # time points
42
+ pa.field("physical_size_x", pa.float32()), # µm per pixel (X)
43
+ pa.field("physical_size_y", pa.float32()), # µm per pixel (Y)
44
+ pa.field("physical_size_z", pa.float32()), # µm per z-step
45
+ pa.field("physical_size_x_unit", pa.string()), # usually "µm"
46
+ pa.field("physical_size_y_unit", pa.string()),
47
+ pa.field("physical_size_z_unit", pa.string()),
48
+ pa.field(
49
+ "channels",
50
+ pa.list_(
51
+ # CHANNELS: one entry per channel (e.g., DNA, Mito, ER).
52
+ # - emission_um / excitation_um: wavelengths (micrometers).
53
+ # - illumination: modality (e.g., "Epifluorescence").
54
+ # - color_rgba: preferred display color
55
+ # (packed 0xRRGGBBAA).
56
+ pa.struct(
57
+ [
58
+ pa.field("id", pa.string()),
59
+ pa.field("name", pa.string()),
60
+ pa.field("emission_um", pa.float32()),
61
+ pa.field("excitation_um", pa.float32()),
62
+ pa.field("illumination", pa.string()),
63
+ pa.field("color_rgba", pa.uint32()),
64
+ ]
65
+ )
66
+ ),
67
+ ),
68
+ ]
69
+ ),
70
+ ),
71
+ # PLANES: one 2D image plane for a specific (t, c, z).
72
+ # - pixels: flattened numeric list (Y*X) for analysis-ready computation.
73
+ pa.field(
74
+ "planes",
75
+ pa.list_(
76
+ pa.struct(
77
+ [
78
+ pa.field("z", pa.int32()),
79
+ pa.field("t", pa.int32()),
80
+ pa.field("c", pa.int16()),
81
+ pa.field(
82
+ "pixels", pa.list_(pa.uint16())
83
+ ), # keep numeric (not PNG/JPEG)
84
+ ]
85
+ )
86
+ ),
87
+ ),
88
+ pa.field("masks", pa.null()), # reserved for future annotations
89
+ ]
90
+ )
ome_arrow/transform.py ADDED
@@ -0,0 +1,182 @@
1
+ """
2
+ Module for transforming OME-Arrow data
3
+ (e.g., slices, projections, or other changes).
4
+ """
5
+
6
+ from typing import Any, Dict, Iterable, List, Optional, Tuple
7
+
8
+ import numpy as np
9
+ import pyarrow as pa
10
+
11
+ from ome_arrow.meta import OME_ARROW_STRUCT
12
+
13
+
14
+ def slice_ome_arrow(
15
+ data: Dict[str, Any] | pa.StructScalar,
16
+ x_min: int,
17
+ x_max: int,
18
+ y_min: int,
19
+ y_max: int,
20
+ t_indices: Optional[Iterable[int]] = None,
21
+ c_indices: Optional[Iterable[int]] = None,
22
+ z_indices: Optional[Iterable[int]] = None,
23
+ fill_missing: bool = True,
24
+ ) -> pa.StructScalar:
25
+ """
26
+ Create a cropped copy of an OME-Arrow record.
27
+
28
+ Crops spatially to [y_min:y_max, x_min:x_max] (half-open) and, if provided,
29
+ filters/reindexes T/C/Z to the given index sets.
30
+
31
+ Parameters
32
+ ----------
33
+ data : dict | pa.StructScalar
34
+ OME-Arrow record.
35
+ x_min, x_max, y_min, y_max : int
36
+ Half-open crop bounds in pixels (0-based).
37
+ t_indices, c_indices, z_indices : Iterable[int] | None
38
+ Optional explicit indices to keep for T, C, Z. If None, keep all.
39
+ Selected indices are reindexed to 0..len-1 in the output.
40
+ fill_missing : bool
41
+ If True, any missing (t,c,z) planes in the selection are zero-filled.
42
+
43
+ Returns
44
+ -------
45
+ pa.StructScalar
46
+ New OME-Arrow record with updated sizes and planes.
47
+ """
48
+ # Unwrap to dict
49
+ row = data.as_py() if isinstance(data, pa.StructScalar) else dict(data)
50
+ pm = dict(row.get("pixels_meta", {}))
51
+
52
+ sx = int(pm.get("size_x", 1))
53
+ sy = int(pm.get("size_y", 1))
54
+ sz = int(pm.get("size_z", 1))
55
+ sc = int(pm.get("size_c", 1))
56
+ st = int(pm.get("size_t", 1))
57
+ if not (0 <= x_min < x_max <= sx and 0 <= y_min < y_max <= sy):
58
+ raise ValueError(
59
+ f"Crop bounds out of range: x[{x_min},{x_max}) within [0,{sx}), "
60
+ f"y[{y_min},{y_max}) within [0,{sy})."
61
+ )
62
+
63
+ # Normalize T/C/Z selections (keep all if None)
64
+ def _norm(sel: Optional[Iterable[int]], size: int) -> List[int]:
65
+ return (
66
+ list(range(size))
67
+ if sel is None
68
+ else sorted({int(i) for i in sel if 0 <= int(i) < size})
69
+ )
70
+
71
+ keep_t = _norm(t_indices, st)
72
+ keep_c = _norm(c_indices, sc)
73
+ keep_z = _norm(z_indices, sz)
74
+ if len(keep_t) == 0 or len(keep_c) == 0 or len(keep_z) == 0:
75
+ raise ValueError("Selection must keep at least one index in each of T/C/Z.")
76
+
77
+ # Reindex maps (old -> new)
78
+ t_map = {t: i for i, t in enumerate(keep_t)}
79
+ c_map = {c: i for i, c in enumerate(keep_c)}
80
+ z_map = {z: i for i, z in enumerate(keep_z)}
81
+
82
+ new_sx = x_max - x_min
83
+ new_sy = y_max - y_min
84
+ new_st = len(keep_t)
85
+ new_sc = len(keep_c)
86
+ new_sz = len(keep_z)
87
+
88
+ # Fast access to incoming planes
89
+ planes_in: List[Dict[str, Any]] = list(row.get("planes", []))
90
+ if not planes_in:
91
+ raise ValueError("Record contains no planes to slice.")
92
+
93
+ # Group incoming planes by (t,c,z)
94
+ by_tcz: Dict[Tuple[int, int, int], Dict[str, Any]] = {}
95
+ for p in planes_in:
96
+ tt = int(p["t"])
97
+ cc = int(p["c"])
98
+ zz = int(p["z"])
99
+ by_tcz[(tt, cc, zz)] = p
100
+
101
+ # Helper to crop one plane
102
+ expected_len = sx * sy
103
+
104
+ def _crop_pixels(flat: Iterable[int]) -> List[int]:
105
+ arr = np.asarray(flat)
106
+ if arr.size != expected_len:
107
+ # be strict: malformed plane
108
+ raise ValueError(f"Plane has {arr.size} pixels; expected {expected_len}.")
109
+ arr = arr.reshape(sy, sx)
110
+ sub = arr[y_min:y_max, x_min:x_max]
111
+ return sub.ravel().astype(arr.dtype, copy=False).tolist()
112
+
113
+ # Build new plane list in dense (t,c,z) order using selections
114
+ planes_out: List[Dict[str, Any]] = []
115
+ for tt in keep_t:
116
+ for cc in keep_c:
117
+ for zz in keep_z:
118
+ src = by_tcz.get((tt, cc, zz))
119
+ if src is None:
120
+ if not fill_missing:
121
+ continue
122
+ # zero-fill missing plane
123
+ planes_out.append(
124
+ {
125
+ "t": t_map[tt],
126
+ "c": c_map[cc],
127
+ "z": z_map[zz],
128
+ "pixels": [0] * (new_sx * new_sy),
129
+ }
130
+ )
131
+ else:
132
+ cropped = _crop_pixels(src["pixels"])
133
+ planes_out.append(
134
+ {
135
+ "t": t_map[tt],
136
+ "c": c_map[cc],
137
+ "z": z_map[zz],
138
+ "pixels": cropped,
139
+ }
140
+ )
141
+
142
+ # Filter channel metadata to kept channels and reindex
143
+ channels_in = list(pm.get("channels", []) or [])
144
+ channels_out: List[Dict[str, Any]] = []
145
+ # If channels metadata length mismatches, synthesize minimal entries
146
+ if len(channels_in) != sc:
147
+ channels_in = [
148
+ {"id": f"ch-{i}", "name": f"C{i}", "color_rgba": 0xFFFFFFFF}
149
+ for i in range(sc)
150
+ ]
151
+ for old_c in keep_c:
152
+ meta = dict(channels_in[old_c])
153
+ meta["id"] = f"ch-{c_map[old_c]}"
154
+ # ensure name string
155
+ if "name" in meta:
156
+ meta["name"] = str(meta["name"])
157
+ else:
158
+ meta["name"] = f"C{c_map[old_c]}"
159
+ channels_out.append(meta)
160
+
161
+ # Update pixels_meta
162
+ pm_out = dict(pm)
163
+ pm_out.update(
164
+ {
165
+ "size_x": new_sx,
166
+ "size_y": new_sy,
167
+ "size_z": new_sz,
168
+ "size_c": new_sc,
169
+ "size_t": new_st,
170
+ "channels": channels_out,
171
+ }
172
+ )
173
+
174
+ # If dimension order encoded XYCT/XYZCT etc., keep it as-is (no axis permutation).
175
+ # (Optional: you could normalize to XYCT if new_sz==1, else XYZCT.)
176
+
177
+ # Assemble new record
178
+ rec_out = dict(row)
179
+ rec_out["pixels_meta"] = pm_out
180
+ rec_out["planes"] = planes_out
181
+
182
+ return pa.scalar(rec_out, type=OME_ARROW_STRUCT)
ome_arrow/utils.py ADDED
@@ -0,0 +1,83 @@
1
+ """
2
+ Utility functions for ome-arrow.
3
+ """
4
+
5
+ from typing import Any, Dict
6
+
7
+ import pyarrow as pa
8
+
9
+
10
+ def verify_ome_arrow(data: Any, struct: pa.StructType) -> bool:
11
+ """Return True if `data` conforms to the given Arrow StructType.
12
+
13
+ This tries to convert `data` into a pyarrow scalar using `struct`
14
+ as the declared type. If conversion fails, the data does not match.
15
+
16
+ Args:
17
+ data: A nested Python dict/list structure to test.
18
+ struct: The expected pyarrow.StructType schema.
19
+
20
+ Returns:
21
+ bool: True if conversion succeeds, False otherwise.
22
+ """
23
+ try:
24
+ pa.scalar(data, type=struct)
25
+ return True
26
+ except (TypeError, pa.ArrowInvalid, pa.ArrowTypeError):
27
+ return False
28
+
29
+
30
+ def describe_ome_arrow(data: pa.StructScalar | dict) -> Dict[str, Any]:
31
+ """
32
+ Describe the structure of an OME-Arrow image record.
33
+
34
+ Reads `pixels_meta` from the OME-Arrow struct to report TCZYX
35
+ dimensions and classify whether it's a 2D image, 3D z-stack,
36
+ movie/timelapse, or 4D timelapse-volume. Also flags whether it is
37
+ multi-channel (C > 1) or single-channel.
38
+
39
+ Args:
40
+ data: OME-Arrow row as a pa.StructScalar or plain dict.
41
+
42
+ Returns:
43
+ dict with keys:
44
+ - shape: (T, C, Z, Y, X)
45
+ - type: classification string
46
+ - summary: human-readable text
47
+ """
48
+ # --- Unwrap StructScalar if needed ---
49
+ if isinstance(data, pa.StructScalar):
50
+ data = data.as_py()
51
+
52
+ pm = data.get("pixels_meta", {})
53
+ t = int(pm.get("size_t", 1))
54
+ c = int(pm.get("size_c", 1))
55
+ z = int(pm.get("size_z", 1))
56
+ y = int(pm.get("size_y", 1))
57
+ x = int(pm.get("size_x", 1))
58
+
59
+ # --- Basic dimensional classification ---
60
+ if t == 1 and z == 1:
61
+ kind = "2D image"
62
+ elif t == 1 and z > 1:
63
+ kind = "3D image (z-stack)"
64
+ elif t > 1 and z == 1:
65
+ kind = "movie / timelapse"
66
+ elif t > 1 and z > 1:
67
+ kind = "4D timelapse-volume"
68
+ else:
69
+ kind = "unknown"
70
+
71
+ # --- Channel classification ---
72
+ channel_info = f"multi-channel ({c} channels)" if c > 1 else "single-channel"
73
+
74
+ # --- Summary ---
75
+ summary = f"{kind}, {channel_info} - shape (T={t}, C={c}, Z={z}, Y={y}, X={x})"
76
+
77
+ return {
78
+ "shape": (t, c, z, y, x),
79
+ "type": kind,
80
+ "channels": c,
81
+ "is_multichannel": c > 1,
82
+ "summary": summary,
83
+ }
ome_arrow/view.py ADDED
@@ -0,0 +1,286 @@
1
+ """
2
+ Viewing utilities for OME-Arrow data.
3
+ """
4
+
5
+ import contextlib
6
+
7
+ import matplotlib.pyplot as plt
8
+ import numpy as np
9
+ import pyarrow as pa
10
+ import pyvista as pv
11
+ from matplotlib.axes import Axes
12
+ from matplotlib.figure import Figure
13
+ from matplotlib.image import AxesImage
14
+
15
+
16
+ def view_matplotlib(
17
+ data: dict[str, object] | pa.StructScalar,
18
+ tcz: tuple[int, int, int] = (0, 0, 0),
19
+ autoscale: bool = True,
20
+ vmin: int | None = None,
21
+ vmax: int | None = None,
22
+ cmap: str = "gray",
23
+ show: bool = True,
24
+ ) -> tuple[Figure, Axes, AxesImage]:
25
+ if isinstance(data, pa.StructScalar):
26
+ data = data.as_py()
27
+
28
+ pm = data["pixels_meta"]
29
+ sx, sy = int(pm["size_x"]), int(pm["size_y"])
30
+ t, c, z = (int(x) for x in tcz)
31
+
32
+ plane = next(
33
+ (
34
+ p
35
+ for p in data["planes"]
36
+ if int(p["t"]) == t and int(p["c"]) == c and int(p["z"]) == z
37
+ ),
38
+ None,
39
+ )
40
+ if plane is None:
41
+ raise ValueError(f"plane (t={t}, c={c}, z={z}) not found")
42
+
43
+ pix = plane["pixels"]
44
+ if len(pix) != sx * sy:
45
+ raise ValueError(f"pixels len {len(pix)} != size_x*size_y ({sx * sy})")
46
+
47
+ img = np.asarray(pix, dtype=np.uint16).reshape(sy, sx).copy()
48
+
49
+ if (vmin is None or vmax is None) and autoscale:
50
+ lo, hi = int(img.min()), int(img.max())
51
+ if hi == lo:
52
+ hi = lo + 1
53
+ vmin = lo if vmin is None else vmin
54
+ vmax = hi if vmax is None else vmax
55
+
56
+ fig, ax = plt.subplots()
57
+ im: AxesImage = ax.imshow(img, cmap=cmap, vmin=vmin, vmax=vmax)
58
+ ax.axis("off")
59
+
60
+ if show:
61
+ plt.show()
62
+
63
+ return fig, ax, im
64
+
65
+
66
+ def view_pyvista(
67
+ data: dict | pa.StructScalar,
68
+ c: int = 0,
69
+ downsample: int = 1,
70
+ scaling_values: tuple[float, float, float] | None = None, # (Z, Y, X)
71
+ opacity: str | float = "sigmoid",
72
+ clim: tuple[float, float] | None = None,
73
+ show_axes: bool = True,
74
+ backend: str = "auto", # "auto" | "trame" | "html" | "static"
75
+ interpolation: str = "nearest", # "nearest" or "linear"
76
+ background: str = "black",
77
+ percentile_clim: tuple[float, float] = (1.0, 99.9), # robust contrast
78
+ sampling_scale: float = 0.5, # smaller = denser rays (sharper, slower)
79
+ show: bool = True,
80
+ ) -> pv.Plotter:
81
+ """
82
+ Jupyter-inline interactive volume view using PyVista backends.
83
+ Tries 'trame' → 'html' → 'static' when backend='auto'.
84
+
85
+ sampling_scale controls ray step via the mapper after add_volume.
86
+ """
87
+ import warnings
88
+
89
+ import numpy as np
90
+
91
+ # ---- unwrap OME-Arrow row
92
+ row = data.as_py() if isinstance(data, pa.StructScalar) else data
93
+ pm = row["pixels_meta"]
94
+ sx, sy, sz = int(pm["size_x"]), int(pm["size_y"]), int(pm["size_z"])
95
+ sc, _st = int(pm["size_c"]), int(pm["size_t"])
96
+ if not (0 <= c < sc):
97
+ raise ValueError(f"Channel out of range: 0..{sc - 1}")
98
+
99
+ # ---- spacing (dx, dy, dz) in world units
100
+ dx = float(pm.get("physical_size_x", 1.0) or 1.0)
101
+ dy = float(pm.get("physical_size_y", 1.0) or 1.0)
102
+ dz = float(pm.get("physical_size_z", 1.0) or 1.0)
103
+
104
+ # optional override from legacy scaling tuple (Z, Y, X)
105
+ if scaling_values is None and "scaling_values" in pm:
106
+ try:
107
+ sz_legacy, sy_legacy, sx_legacy = pm["scaling_values"]
108
+ dz, dy, dx = float(sz_legacy), float(sy_legacy), float(sx_legacy)
109
+ except Exception:
110
+ pass
111
+ elif scaling_values is not None:
112
+ sz_legacy, sy_legacy, sx_legacy = scaling_values
113
+ dz, dy, dx = float(sz_legacy), float(sy_legacy), float(sx_legacy)
114
+
115
+ # ---- rebuild (Z,Y,X) for T=0, channel c
116
+ vol_zyx = np.zeros((sz, sy, sx), dtype=np.uint16)
117
+ for p in row["planes"]:
118
+ if int(p["t"]) == 0 and int(p["c"]) == c:
119
+ z = int(p["z"])
120
+ vol_zyx[z] = np.asarray(p["pixels"], dtype=np.uint16).reshape(sy, sx)
121
+
122
+ # optional downsampling (keep spacing consistent)
123
+ if downsample > 1:
124
+ vol_zyx = vol_zyx[::downsample, ::downsample, ::downsample]
125
+ dz, dy, dx = dz * downsample, dy * downsample, dx * downsample
126
+
127
+ # VTK expects (X,Y,Z) memory order
128
+ vol_xyz = vol_zyx.transpose(2, 1, 0) # (nx, ny, nz)
129
+ nx, ny, nz = map(int, vol_xyz.shape)
130
+
131
+ # ---- contrast limits (robust percentiles, like napari)
132
+ if clim is None:
133
+ lo, hi = np.percentile(vol_xyz, percentile_clim)
134
+ lo = float(lo)
135
+ hi = float(hi if hi > lo else lo + 1.0)
136
+ clim = (lo, hi)
137
+
138
+ # ---- backend selection
139
+ def _try_backend(name: str) -> bool:
140
+ with warnings.catch_warnings():
141
+ warnings.filterwarnings(
142
+ "ignore", message=".*notebook backend.*", category=UserWarning
143
+ )
144
+ try:
145
+ pv.set_jupyter_backend(name)
146
+ return True
147
+ except Exception:
148
+ return False
149
+
150
+ if backend == "auto":
151
+ (
152
+ "trame"
153
+ if _try_backend("trame")
154
+ else "html"
155
+ if _try_backend("html")
156
+ else "static"
157
+ )
158
+ else:
159
+ backend if _try_backend(backend) else "static"
160
+
161
+ pv.OFF_SCREEN = False
162
+
163
+ # ---- build dataset
164
+ img = pv.ImageData()
165
+ img.dimensions = (nx, ny, nz)
166
+ img.spacing = (dx, dy, dz)
167
+ img.origin = (0.0, 0.0, 0.0)
168
+ img.point_data.clear()
169
+ img.point_data["scalars"] = np.asfortranarray(vol_xyz).ravel(order="F")
170
+
171
+ # Make "scalars" active across PyVista versions
172
+ try:
173
+ img.point_data.set_active_scalars("scalars")
174
+ except AttributeError:
175
+ try:
176
+ img.point_data.active_scalars_name = "scalars"
177
+ except Exception:
178
+ img.set_active_scalars("scalars")
179
+
180
+ # ---- render
181
+ pl = pv.Plotter()
182
+ pl.set_background(background)
183
+
184
+ # sensible opacity behavior relative to spacing
185
+ base_sample = max(min(dx, dy, dz), 1e-6) # avoid zero
186
+
187
+ vol_actor = pl.add_volume(
188
+ img,
189
+ cmap="gray", # napari-like
190
+ opacity=opacity,
191
+ clim=clim,
192
+ shade=False, # microscopy usually unshaded
193
+ scalar_bar_args={"title": "intensity"},
194
+ opacity_unit_distance=base_sample, # keep opacity consistent
195
+ # no sampling_distance kwarg here (set via mapper below)
196
+ )
197
+
198
+ # -- crispness & interpolation (version-safe)
199
+ try:
200
+ prop = getattr(vol_actor, "prop", None) or vol_actor.GetProperty()
201
+ # nearest vs linear sampling
202
+ if interpolation.lower().startswith("near"):
203
+ prop.SetInterpolationTypeToNearest()
204
+ else:
205
+ prop.SetInterpolationTypeToLinear()
206
+ # stop pre-map smoothing if available (big win for microscopy)
207
+ if hasattr(prop, "SetInterpolateScalarsBeforeMapping"):
208
+ prop.SetInterpolateScalarsBeforeMapping(False)
209
+ # also expose scalar opacity unit distance in case kwarg unsupported
210
+ if hasattr(prop, "SetScalarOpacityUnitDistance"):
211
+ prop.SetScalarOpacityUnitDistance(base_sample)
212
+ except Exception:
213
+ pass
214
+
215
+ # -- ray sampling density via mapper (works across many VTK versions)
216
+ try:
217
+ mapper = getattr(vol_actor, "mapper", None) or vol_actor.GetMapper()
218
+ # lock sample distance if API allows
219
+ if hasattr(mapper, "SetAutoAdjustSampleDistances"):
220
+ mapper.SetAutoAdjustSampleDistances(False)
221
+ if hasattr(mapper, "SetUseJittering"):
222
+ mapper.SetUseJittering(False)
223
+ if hasattr(mapper, "SetSampleDistance"):
224
+ mapper.SetSampleDistance(float(base_sample * sampling_scale))
225
+ except Exception:
226
+ pass
227
+
228
+ if show_axes:
229
+ pl.add_axes()
230
+
231
+ pl.show_bounds(
232
+ color="white",
233
+ grid=None,
234
+ location="outer",
235
+ ticks="both",
236
+ xtitle="X (µm)",
237
+ ytitle="Y (µm)",
238
+ ztitle="Z (µm)",
239
+ )
240
+
241
+ def _force_white_bounds(*_args: object, **_kwargs: object) -> None:
242
+ try:
243
+ ren = pl.renderer
244
+
245
+ # Modern cube-axes path
246
+ if getattr(ren, "cube_axes_actor", None):
247
+ ca = ren.cube_axes_actor
248
+ # axis line colors
249
+ for prop in (
250
+ ca.GetXAxesLinesProperty(),
251
+ ca.GetYAxesLinesProperty(),
252
+ ca.GetZAxesLinesProperty(),
253
+ ):
254
+ prop.SetColor(1, 1, 1)
255
+ # titles and tick labels
256
+ for i in (0, 1, 2): # 0:X, 1:Y, 2:Z
257
+ ca.GetTitleTextProperty(i).SetColor(1, 1, 1)
258
+ ca.GetLabelTextProperty(i).SetColor(1, 1, 1)
259
+ ca.Modified()
260
+
261
+ # Older/internal bounds actors
262
+ if getattr(ren, "_bounds_actors", None):
263
+ for actor in ren._bounds_actors.values():
264
+ actor.GetProperty().SetColor(1, 1, 1)
265
+ actor.Modified()
266
+
267
+ except Exception:
268
+ pass
269
+
270
+ # run BEFORE drawing the frame so it's visible immediately
271
+ pl.ren_win.AddObserver("StartEvent", _force_white_bounds)
272
+
273
+ # keep the old safety net if you like (optional):
274
+ pl.iren.add_observer("RenderEvent", _force_white_bounds)
275
+
276
+ def _recolor_and_render() -> None:
277
+ _force_white_bounds()
278
+ with contextlib.suppress(Exception):
279
+ pl.render() # immediate redraw so you see the white bounds now
280
+
281
+ pl.add_key_event("r", _recolor_and_render)
282
+
283
+ if show:
284
+ pl.show()
285
+
286
+ return pl
@@ -0,0 +1,34 @@
1
+ Metadata-Version: 2.4
2
+ Name: ome-arrow
3
+ Version: 0.0.2
4
+ Summary: Using OME specifications with Apache Arrow for fast, queryable, and language agnostic bioimage data.
5
+ Author: Dave Bunten
6
+ Classifier: Programming Language :: Python :: 3 :: Only
7
+ Classifier: Programming Language :: Python :: 3.11
8
+ Classifier: Programming Language :: Python :: 3.12
9
+ Classifier: Programming Language :: Python :: 3.13
10
+ Classifier: Programming Language :: Python :: 3.14
11
+ Requires-Python: >=3.11
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+ Requires-Dist: bioio>=3
15
+ Requires-Dist: bioio-ome-tiff>=1.4
16
+ Requires-Dist: bioio-ome-zarr>=3.0.3
17
+ Requires-Dist: bioio-tifffile>=1.3
18
+ Requires-Dist: fire>=0.7
19
+ Requires-Dist: ipywidgets>=8.1.8
20
+ Requires-Dist: jupyterlab-widgets>=3.0.16
21
+ Requires-Dist: matplotlib>=3.10.7
22
+ Requires-Dist: numpy>=2.2.6
23
+ Requires-Dist: pandas>=2.2.3
24
+ Requires-Dist: pillow>=12
25
+ Requires-Dist: pyarrow>=22
26
+ Requires-Dist: pyvista>=0.46.4
27
+ Requires-Dist: trame>=3.12
28
+ Requires-Dist: trame-vtk>=2.10
29
+ Requires-Dist: trame-vuetify>=3.1
30
+ Dynamic: license-file
31
+
32
+ # ome-arrow
33
+
34
+ Using OME specifications with Apache Arrow for fast, queryable, and language agnostic bioimage data.
@@ -0,0 +1,14 @@
1
+ ome_arrow/__init__.py,sha256=DfQsw8l0mx1Qt3YiiMv2SUljKETP3wS5hrD5eBbjMDM,583
2
+ ome_arrow/_version.py,sha256=huLsL1iGeXWQKZ8bjwDdIWC7JOkj3wnzBh-HFMZl1PY,704
3
+ ome_arrow/core.py,sha256=NUCV9KUH3yCOlpetRS5NNVG_phodutE1F2ujDBPhHgY,18351
4
+ ome_arrow/export.py,sha256=CCTnEdHko4Z0i5LEHuNGFLznWSsPyAFcS42H5nHU22Q,14875
5
+ ome_arrow/ingest.py,sha256=zZz94LaLOpmoxnryLeoPsaWV0EzkYkGFizYSVcbd5w8,33016
6
+ ome_arrow/meta.py,sha256=qeD0e_ItAQyZDT7ypkBU0rBh9oHIu2ziz9MCfPpPp9g,4199
7
+ ome_arrow/transform.py,sha256=0275_Mn1mlGXSWJ86llch8JoJyvqEOfvG-ub1dUWFNI,5997
8
+ ome_arrow/utils.py,sha256=XHovcqmjqoiBpKvXY47-_yUwf07f8zVE_F9BR_VKaPU,2383
9
+ ome_arrow/view.py,sha256=DT8i56uV8Rw22KkqwjPPPKWJWNtfgR9OkI8Qj1WD8Ds,9355
10
+ ome_arrow-0.0.2.dist-info/licenses/LICENSE,sha256=9-2Pyhu3vTt2RJU8DorHQtHeNO_e5RLeFJTyOU4hOi4,1508
11
+ ome_arrow-0.0.2.dist-info/METADATA,sha256=XrGmDrHe-QMpEeDzUP4hVEmfh1P4Z8qyGMmGuMPWsfo,1164
12
+ ome_arrow-0.0.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
13
+ ome_arrow-0.0.2.dist-info/top_level.txt,sha256=aWOtkGXo_pfU-yy82guzGhz8Zh2h2nFl8Kc5qdzMGuE,10
14
+ ome_arrow-0.0.2.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+