senoquant 1.0.0b1__py3-none-any.whl → 1.0.0b3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. senoquant/__init__.py +6 -2
  2. senoquant/_reader.py +1 -1
  3. senoquant/reader/core.py +201 -18
  4. senoquant/tabs/batch/backend.py +18 -3
  5. senoquant/tabs/batch/frontend.py +8 -4
  6. senoquant/tabs/quantification/features/marker/dialog.py +26 -6
  7. senoquant/tabs/quantification/features/marker/export.py +97 -24
  8. senoquant/tabs/quantification/features/marker/rows.py +2 -2
  9. senoquant/tabs/quantification/features/spots/dialog.py +41 -11
  10. senoquant/tabs/quantification/features/spots/export.py +163 -10
  11. senoquant/tabs/quantification/frontend.py +2 -2
  12. senoquant/tabs/segmentation/frontend.py +46 -9
  13. senoquant/tabs/segmentation/models/cpsam/model.py +1 -1
  14. senoquant/tabs/segmentation/models/default_2d/model.py +22 -77
  15. senoquant/tabs/segmentation/models/default_3d/model.py +8 -74
  16. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/tools/create_zip_contents.py +0 -0
  17. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/probe.py +13 -13
  18. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/stardist_libs.py +171 -0
  19. senoquant/tabs/spots/frontend.py +42 -5
  20. senoquant/tabs/spots/models/ufish/details.json +17 -0
  21. senoquant/tabs/spots/models/ufish/model.py +129 -0
  22. senoquant/tabs/spots/ufish_utils/__init__.py +13 -0
  23. senoquant/tabs/spots/ufish_utils/core.py +357 -0
  24. senoquant/utils.py +1 -1
  25. senoquant-1.0.0b3.dist-info/METADATA +161 -0
  26. {senoquant-1.0.0b1.dist-info → senoquant-1.0.0b3.dist-info}/RECORD +41 -28
  27. {senoquant-1.0.0b1.dist-info → senoquant-1.0.0b3.dist-info}/top_level.txt +1 -0
  28. ufish/__init__.py +1 -0
  29. ufish/api.py +778 -0
  30. ufish/model/__init__.py +0 -0
  31. ufish/model/loss.py +62 -0
  32. ufish/model/network/__init__.py +0 -0
  33. ufish/model/network/spot_learn.py +50 -0
  34. ufish/model/network/ufish_net.py +204 -0
  35. ufish/model/train.py +175 -0
  36. ufish/utils/__init__.py +0 -0
  37. ufish/utils/img.py +418 -0
  38. ufish/utils/log.py +8 -0
  39. ufish/utils/spot_calling.py +115 -0
  40. senoquant/tabs/spots/models/rmp/details.json +0 -61
  41. senoquant/tabs/spots/models/rmp/model.py +0 -499
  42. senoquant/tabs/spots/models/udwt/details.json +0 -103
  43. senoquant/tabs/spots/models/udwt/model.py +0 -482
  44. senoquant-1.0.0b1.dist-info/METADATA +0 -193
  45. {senoquant-1.0.0b1.dist-info → senoquant-1.0.0b3.dist-info}/WHEEL +0 -0
  46. {senoquant-1.0.0b1.dist-info → senoquant-1.0.0b3.dist-info}/entry_points.txt +0 -0
  47. {senoquant-1.0.0b1.dist-info → senoquant-1.0.0b3.dist-info}/licenses/LICENSE +0 -0
@@ -1,482 +0,0 @@
1
- """
2
- UDWT (a trous) B3-spline wavelet spot detector.
3
-
4
- Implements the Olivo-Marin (2002) workflow: build a trous smoothing scales,
5
- compute wavelet planes, WAT-threshold per scale, multiply planes, and label
6
- connected components. Supports 2D, 3D, and per-slice 2D for 3D stacks.
7
-
8
- Algorithm (high level)
9
- ----------------------
10
- 1) Build smoothing scales A1..AJ with the dilated B3-spline kernel.
11
- 2) Form wavelet planes Wi = A_{i-1} - Ai.
12
- 3) Apply WAT thresholding per scale (with per-scale sensitivity).
13
- 4) Multiply thresholded planes to form the multiscale product PJ.
14
- 5) Threshold |PJ| with ld and label connected components.
15
- """
16
-
17
- from __future__ import annotations
18
-
19
- from dataclasses import dataclass
20
- from functools import lru_cache
21
- import numpy as np
22
- from scipy import ndimage as ndi
23
- from skimage.measure import label
24
-
25
- from ..base import SenoQuantSpotDetector
26
- from senoquant.utils import layer_data_asarray
27
-
28
-
29
- BASE_KERNEL = np.array(
30
- [1.0 / 16.0, 1.0 / 4.0, 3.0 / 8.0, 1.0 / 4.0, 1.0 / 16.0],
31
- dtype=np.float32,
32
- )
33
- """numpy.ndarray
34
- Base 1D B3-spline smoothing kernel (separable).
35
-
36
- Shape
37
- -----
38
- (5,)
39
- """
40
-
41
- MAX_SCALES = 5
42
- """int
43
- Maximum number of a trous scales.
44
- """
45
-
46
- EPS = 1e-12
47
- """float
48
- Small epsilon to avoid zero thresholds.
49
- """
50
-
51
-
52
- @dataclass(frozen=True)
53
- class _Params:
54
- """Internal parameter bundle.
55
-
56
- Parameters
57
- ----------
58
- num_scales : int
59
- Number of a trous scales (J), derived from enabled scales.
60
- ld : float
61
- Threshold on the multiscale product magnitude. Higher values reduce
62
- detections (more conservative).
63
- force_2d : bool
64
- For 3D inputs, apply the 2D detector per slice (ignore Z correlations).
65
- scale_enabled : tuple[bool, ...]
66
- Per-scale enable flags for scales 1..MAX_SCALES (non-contiguous ok).
67
- scale_sensitivity : tuple[float, ...]
68
- Per-scale sensitivity for scales 1..MAX_SCALES (higher is more permissive).
69
- """
70
-
71
- num_scales: int = 3
72
- ld: float = 1.0
73
- force_2d: bool = False
74
- scale_enabled: tuple[bool, ...] = (True, True, True)
75
- scale_sensitivity: tuple[float, ...] = (100.0, 100.0, 100.0)
76
-
77
-
78
- def _min_size(num_scales: int) -> int:
79
- """Minimum size per dimension for the requested number of scales.
80
-
81
- Parameters
82
- ----------
83
- num_scales : int
84
- Number of a trous scales (J).
85
-
86
- Returns
87
- -------
88
- int
89
- Minimum required size along each dimension.
90
- """
91
- # 5 + 4*2^(J-1)
92
- return 5 + (2 ** (num_scales - 1)) * 4
93
-
94
-
95
- def _ensure_min_size(shape: tuple[int, ...], num_scales: int) -> None:
96
- """Raise if any dimension is too small for the requested scales.
97
-
98
- Parameters
99
- ----------
100
- shape : tuple[int, ...]
101
- Image shape (2D or 3D).
102
- num_scales : int
103
- Number of a trous scales (J).
104
- """
105
- min_size = _min_size(num_scales)
106
- if any(dim < min_size for dim in shape):
107
- raise ValueError(
108
- f"UDWT à trous requires each dimension >= {min_size} "
109
- f"for {num_scales} scales (got shape={shape})."
110
- )
111
-
112
-
113
- @lru_cache(maxsize=None)
114
- def _b3_kernel(step: int) -> np.ndarray:
115
- """Return the dilated 1D B3-spline kernel for the given step.
116
-
117
- Parameters
118
- ----------
119
- step : int
120
- Step between non-zero taps (2**(j-1) for scale j).
121
-
122
- Returns
123
- -------
124
- numpy.ndarray
125
- 1D kernel of length 1 + 4*step (float32).
126
- """
127
- if step <= 0:
128
- raise ValueError("UDWT step must be positive.")
129
- if step == 1:
130
- return BASE_KERNEL
131
- kernel = np.zeros(1 + 4 * step, dtype=np.float32)
132
- kernel[::step] = BASE_KERNEL
133
- return kernel
134
-
135
-
136
- def _atrous_smoothing_scales(image: np.ndarray, J: int) -> list[np.ndarray]:
137
- """Compute a trous smoothing scales A1..AJ for a 2D/3D array.
138
-
139
- Parameters
140
- ----------
141
- image : numpy.ndarray
142
- Input 2D or 3D array.
143
- J : int
144
- Number of scales to compute.
145
-
146
- Returns
147
- -------
148
- list[numpy.ndarray]
149
- Smoothed arrays A1..AJ (float32), same shape as image.
150
- """
151
- scales: list[np.ndarray] = []
152
- current = image.astype(np.float32, copy=False)
153
-
154
- for j in range(1, J + 1):
155
- step = 2 ** (j - 1)
156
- kernel = _b3_kernel(step)
157
-
158
- filtered = current
159
- for axis in range(current.ndim):
160
- filtered = ndi.convolve1d(filtered, kernel, axis=axis, mode="mirror")
161
-
162
- scales.append(filtered.astype(np.float32, copy=False))
163
- current = filtered
164
-
165
- return scales
166
-
167
-
168
- def _wavelet_planes(A0: np.ndarray, scales: list[np.ndarray]) -> list[np.ndarray]:
169
- """Compute detail planes Wi = A_{i-1} - Ai.
170
-
171
- Parameters
172
- ----------
173
- A0 : numpy.ndarray
174
- Original image.
175
- scales : list[numpy.ndarray]
176
- Smoothing scales A1..AJ.
177
-
178
- Returns
179
- -------
180
- list[numpy.ndarray]
181
- Detail planes W1..WJ (float32), same shape as A0.
182
- """
183
- W: list[np.ndarray] = []
184
- prev = A0.astype(np.float32, copy=False)
185
- for Ai in scales:
186
- W.append((prev - Ai).astype(np.float32, copy=False))
187
- prev = Ai
188
- return W
189
-
190
-
191
- def _lambda_coeffs(num_scales: int, width: int, height: int) -> np.ndarray:
192
- """Compute WAT scale-dependent lambda coefficients."""
193
- lambdac = np.empty(num_scales + 2, dtype=np.float32)
194
- for i in range(num_scales + 2):
195
- denom = 1 << (2 * i)
196
- val = (width * height) / denom
197
- lambdac[i] = np.sqrt(2 * np.log(val)) if val > 0 else 0.0
198
- return lambdac
199
-
200
-
201
- def _mean_abs_dev(data: np.ndarray, axis=None, keepdims: bool = False) -> np.ndarray:
202
- """Mean absolute deviation from the mean."""
203
- mean = data.mean(axis=axis, keepdims=True)
204
- return np.mean(np.abs(data - mean), axis=axis, keepdims=keepdims)
205
-
206
-
207
- def _wat_threshold_inplace(
208
- Wi: np.ndarray, scale_index: int, lambdac: np.ndarray, sensitivity: float
209
- ) -> None:
210
- """Apply WAT thresholding in-place for a single scale."""
211
- dcoeff = max(sensitivity / 100.0, EPS)
212
- if Wi.ndim == 2:
213
- mad = _mean_abs_dev(Wi, axis=None, keepdims=False)
214
- coeff_thr = (lambdac[scale_index + 1] * mad) / dcoeff
215
- if coeff_thr <= EPS:
216
- return
217
- Wi[Wi < coeff_thr] = 0.0
218
- return
219
-
220
- mad = _mean_abs_dev(Wi, axis=(1, 2), keepdims=True)
221
- coeff_thr = (lambdac[scale_index + 1] * mad) / dcoeff
222
- Wi[Wi < coeff_thr] = 0.0
223
-
224
-
225
- def _multiscale_product(W_planes: list[np.ndarray]) -> np.ndarray:
226
- """Multiply thresholded planes to form the multiscale product image.
227
-
228
- Parameters
229
- ----------
230
- W_planes : list[numpy.ndarray]
231
- Thresholded wavelet planes, all the same shape.
232
-
233
- Returns
234
- -------
235
- numpy.ndarray
236
- Multiscale product image (float32).
237
- """
238
- if not W_planes:
239
- raise ValueError("W_planes must be a non-empty list.")
240
- P = np.ones_like(W_planes[0], dtype=np.float32)
241
- for Wi in W_planes:
242
- P *= Wi.astype(np.float32, copy=False)
243
- return P
244
-
245
-
246
- def _detect_from_product(P: np.ndarray, ld: float) -> np.ndarray:
247
- """Binary mask where |P| > ld.
248
-
249
- Parameters
250
- ----------
251
- P : numpy.ndarray
252
- Multiscale product image.
253
- ld : float
254
- Detection threshold on |P|.
255
-
256
- Returns
257
- -------
258
- numpy.ndarray
259
- Boolean mask of detections.
260
- """
261
- return np.abs(P) > ld
262
-
263
-
264
- def _detect_2d(image: np.ndarray, params: _Params) -> np.ndarray:
265
- """Detect spots in a 2D image and return labeled instances.
266
-
267
- Parameters
268
- ----------
269
- image : numpy.ndarray
270
- Input 2D image (Y, X).
271
- params : _Params
272
- Detector parameters.
273
-
274
- Returns
275
- -------
276
- numpy.ndarray
277
- Labeled instance mask (int32), background 0.
278
- """
279
- if params.scale_enabled:
280
- num_scales = max(
281
- (i + 1 for i, enabled in enumerate(params.scale_enabled) if enabled),
282
- default=1,
283
- )
284
- else:
285
- num_scales = max(1, params.num_scales)
286
-
287
- _ensure_min_size(image.shape, num_scales)
288
-
289
- scales = _atrous_smoothing_scales(image, num_scales)
290
- W = _wavelet_planes(image, scales)
291
- lambdac = _lambda_coeffs(num_scales, image.shape[1], image.shape[0])
292
-
293
- enabled_indices = [
294
- i for i, enabled in enumerate(params.scale_enabled) if enabled and i < len(W)
295
- ]
296
- if not enabled_indices:
297
- return np.zeros(image.shape, dtype=np.int32)
298
-
299
- enabled_planes: list[np.ndarray] = []
300
- for i in enabled_indices:
301
- sensitivity = (
302
- params.scale_sensitivity[i]
303
- if i < len(params.scale_sensitivity)
304
- else 100.0
305
- )
306
- _wat_threshold_inplace(W[i], i, lambdac, sensitivity)
307
- enabled_planes.append(W[i])
308
-
309
- P = _multiscale_product(enabled_planes)
310
- binary = _detect_from_product(P, params.ld)
311
- return label(binary, connectivity=2).astype(np.int32, copy=False)
312
-
313
-
314
- def _detect_2d_stack(stack: np.ndarray, params: _Params) -> np.ndarray:
315
- """Apply the 2D detector per Z-slice and keep labels unique.
316
-
317
- Parameters
318
- ----------
319
- stack : numpy.ndarray
320
- Input 3D stack (Z, Y, X).
321
- params : _Params
322
- Detector parameters.
323
-
324
- Returns
325
- -------
326
- numpy.ndarray
327
- Labeled mask for each slice (int32).
328
- """
329
- labels = np.zeros(stack.shape, dtype=np.int32)
330
- next_label = 1
331
-
332
- for z in range(stack.shape[0]):
333
- slice_labels = _detect_2d(stack[z], params)
334
- m = int(slice_labels.max())
335
- if m > 0:
336
- slice_labels = slice_labels + (next_label - 1)
337
- next_label += m
338
- labels[z] = slice_labels
339
-
340
- return labels
341
-
342
-
343
- def _detect_3d(volume: np.ndarray, params: _Params) -> np.ndarray:
344
- """Detect spots in a 3D volume using the direct 3D extension.
345
-
346
- Parameters
347
- ----------
348
- volume : numpy.ndarray
349
- Input 3D volume (Z, Y, X).
350
- params : _Params
351
- Detector parameters.
352
-
353
- Returns
354
- -------
355
- numpy.ndarray
356
- Labeled instance mask (int32).
357
- """
358
- if params.scale_enabled:
359
- num_scales = max(
360
- (i + 1 for i, enabled in enumerate(params.scale_enabled) if enabled),
361
- default=1,
362
- )
363
- else:
364
- num_scales = max(1, params.num_scales)
365
-
366
- _ensure_min_size(volume.shape, num_scales)
367
-
368
- scales = _atrous_smoothing_scales(volume, num_scales)
369
- W = _wavelet_planes(volume, scales)
370
- lambdac = _lambda_coeffs(num_scales, volume.shape[2], volume.shape[1])
371
-
372
- enabled_indices = [
373
- i for i, enabled in enumerate(params.scale_enabled) if enabled and i < len(W)
374
- ]
375
- if not enabled_indices:
376
- return np.zeros(volume.shape, dtype=np.int32)
377
-
378
- enabled_planes: list[np.ndarray] = []
379
- for i in enabled_indices:
380
- sensitivity = (
381
- params.scale_sensitivity[i]
382
- if i < len(params.scale_sensitivity)
383
- else 100.0
384
- )
385
- _wat_threshold_inplace(W[i], i, lambdac, sensitivity)
386
- enabled_planes.append(W[i])
387
-
388
- P = _multiscale_product(enabled_planes)
389
- binary = _detect_from_product(P, params.ld)
390
- return label(binary, connectivity=3).astype(np.int32, copy=False)
391
-
392
-
393
- class UDWTDetector(SenoQuantSpotDetector):
394
- """Undecimated B3-spline wavelet spot detector.
395
-
396
- Settings: ld, force_2d, scale_*_enabled, scale_*_sensitivity.
397
-
398
- Higher ld yields fewer detections (stricter thresholds).
399
- Higher sensitivity yields more detections on that scale.
400
- """
401
-
402
- def __init__(self, models_root=None) -> None:
403
- """Initialize the detector.
404
-
405
- Parameters
406
- ----------
407
- models_root : pathlib.Path or None, optional
408
- Root folder for detector resources (unused).
409
- """
410
- super().__init__("udwt", models_root=models_root)
411
-
412
- def run(self, **kwargs) -> dict:
413
- """Run the detector on a napari Image layer.
414
-
415
- Parameters
416
- ----------
417
- **kwargs
418
- layer : napari.layers.Image or None
419
- Image layer (single-channel).
420
- settings : dict, optional
421
- Keys:
422
- - ld (float): higher is stricter final detection threshold
423
- - force_2d (bool): for 3D, detect per slice (ignores Z context)
424
- - scale_N_enabled (bool): include scale N in the product
425
- - scale_N_sensitivity (float): higher is more permissive
426
-
427
- Returns
428
- -------
429
- dict
430
- Output with keys "mask" (labeled int32 or None) and "points" (None).
431
- """
432
- layer = kwargs.get("layer")
433
- if layer is None:
434
- return {"mask": None, "points": None}
435
-
436
- if getattr(layer, "rgb", False):
437
- raise ValueError("UDWT requires single-channel images (rgb=False).")
438
-
439
- settings = kwargs.get("settings", {}) or {}
440
-
441
- ld = float(settings.get("ld", 1.0))
442
- force_2d = bool(settings.get("force_2d", False))
443
-
444
- default_enabled = (True, True, True, False, False)
445
- enabled_all: list[bool] = []
446
- sensitivity_all: list[float] = []
447
- for i in range(MAX_SCALES):
448
- enabled_key = f"scale_{i + 1}_enabled"
449
- sens_key = f"scale_{i + 1}_sensitivity"
450
- enabled_val = bool(settings.get(enabled_key, default_enabled[i]))
451
- sens_val = float(settings.get(sens_key, 100.0))
452
- sens_val = max(1.0, min(100.0, sens_val))
453
- enabled_all.append(enabled_val)
454
- sensitivity_all.append(sens_val)
455
-
456
- if any(enabled_all):
457
- num_scales = max(i + 1 for i, enabled in enumerate(enabled_all) if enabled)
458
- else:
459
- num_scales = 1
460
-
461
- params = _Params(
462
- num_scales=num_scales,
463
- ld=ld,
464
- force_2d=force_2d,
465
- scale_enabled=tuple(enabled_all),
466
- scale_sensitivity=tuple(sensitivity_all),
467
- )
468
-
469
- data = layer_data_asarray(layer)
470
- if data.ndim not in (2, 3):
471
- raise ValueError("UDWT expects 2D images or 3D stacks.")
472
-
473
- image = np.asarray(data, dtype=np.float32)
474
-
475
- if image.ndim == 2:
476
- return {"mask": _detect_2d(image, params), "points": None}
477
-
478
- # 3D input
479
- if params.force_2d:
480
- return {"mask": _detect_2d_stack(image, params), "points": None}
481
-
482
- return {"mask": _detect_3d(image, params), "points": None}
@@ -1,193 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: senoquant
3
- Version: 1.0.0b1
4
- Summary: Napari plugin for spatial quantification of senescence markers in tissue imaging
5
- Author: SenoQuant Contributors
6
- Maintainer: SenoQuant Contributors
7
- License: BSD-3-Clause
8
- Project-URL: Homepage, https://github.com/HaamsRee/senoquant-dev
9
- Project-URL: Documentation, https://haamsree.github.io/senoquant-dev/
10
- Project-URL: Repository, https://github.com/HaamsRee/senoquant-dev
11
- Project-URL: Bug Tracker, https://github.com/HaamsRee/senoquant-dev/issues
12
- Keywords: napari,plugin,senescence,quantification,microscopy,image analysis,segmentation,spot detection
13
- Classifier: Development Status :: 4 - Beta
14
- Classifier: Framework :: napari
15
- Classifier: Intended Audience :: Science/Research
16
- Classifier: License :: OSI Approved :: BSD License
17
- Classifier: Operating System :: OS Independent
18
- Classifier: Programming Language :: Python :: 3
19
- Classifier: Programming Language :: Python :: 3.11
20
- Classifier: Programming Language :: Python :: 3.12
21
- Classifier: Topic :: Scientific/Engineering :: Bio-Informatics
22
- Classifier: Topic :: Scientific/Engineering :: Image Processing
23
- Requires-Python: >=3.11
24
- Description-Content-Type: text/markdown
25
- License-File: LICENSE
26
- Requires-Dist: bioio>=3.2.0
27
- Requires-Dist: bioio-czi>=2.4.2
28
- Requires-Dist: bioio-dv>=1.2.0
29
- Requires-Dist: bioio-imageio>=1.3.0
30
- Requires-Dist: bioio-lif>=1.4.0
31
- Requires-Dist: bioio-nd2>=1.6.0
32
- Requires-Dist: bioio-ome-tiff>=1.4.0
33
- Requires-Dist: bioio-ome-zarr>=3.2.1
34
- Requires-Dist: bioio-sldy>=1.4.0
35
- Requires-Dist: bioio-tifffile>=1.3.0
36
- Requires-Dist: bioio-tiff-glob>=1.2.0
37
- Requires-Dist: numpy<=1.26.4,>=1.23
38
- Requires-Dist: cellpose==4.0.8
39
- Requires-Dist: onnx>=1.16
40
- Requires-Dist: onnxruntime>=1.16; platform_system == "Darwin"
41
- Requires-Dist: onnxruntime-gpu>=1.16; platform_system != "Darwin"
42
- Requires-Dist: openpyxl>=3.1
43
- Requires-Dist: huggingface_hub>=0.23.0
44
- Requires-Dist: scikit-image<0.25,>=0.22
45
- Requires-Dist: scipy>=1.8
46
- Requires-Dist: senoquant-stardist-ext>=0.1.0
47
- Requires-Dist: dask[array]>=2024.4
48
- Requires-Dist: dask[distributed]>=2024.4
49
- Provides-Extra: gpu
50
- Requires-Dist: cupy-cuda12x>=12.0; extra == "gpu"
51
- Requires-Dist: cucim>=23.4; extra == "gpu"
52
- Provides-Extra: all
53
- Requires-Dist: napari[all]; extra == "all"
54
- Requires-Dist: bioio>=3.2.0; extra == "all"
55
- Requires-Dist: bioio-czi>=2.4.2; extra == "all"
56
- Requires-Dist: bioio-dv>=1.2.0; extra == "all"
57
- Requires-Dist: bioio-imageio>=1.3.0; extra == "all"
58
- Requires-Dist: bioio-lif>=1.4.0; extra == "all"
59
- Requires-Dist: bioio-nd2>=1.6.0; extra == "all"
60
- Requires-Dist: bioio-ome-tiff>=1.4.0; extra == "all"
61
- Requires-Dist: bioio-ome-zarr>=3.2.1; extra == "all"
62
- Requires-Dist: bioio-sldy>=1.4.0; extra == "all"
63
- Requires-Dist: bioio-tifffile>=1.3.0; extra == "all"
64
- Requires-Dist: bioio-tiff-glob>=1.2.0; extra == "all"
65
- Requires-Dist: numpy<=1.26.4,>=1.23; extra == "all"
66
- Requires-Dist: cellpose==4.0.8; extra == "all"
67
- Requires-Dist: onnx>=1.16; extra == "all"
68
- Requires-Dist: onnxruntime>=1.16; platform_system == "Darwin" and extra == "all"
69
- Requires-Dist: onnxruntime-gpu>=1.16; platform_system != "Darwin" and extra == "all"
70
- Requires-Dist: openpyxl>=3.1; extra == "all"
71
- Requires-Dist: huggingface_hub>=0.23.0; extra == "all"
72
- Requires-Dist: scikit-image<0.25,>=0.22; extra == "all"
73
- Requires-Dist: scipy>=1.8; extra == "all"
74
- Requires-Dist: senoquant-stardist-ext>=0.1.0; extra == "all"
75
- Requires-Dist: dask[array]>=2024.4; extra == "all"
76
- Requires-Dist: dask[distributed]>=2024.4; extra == "all"
77
- Requires-Dist: cupy-cuda12x>=12.0; extra == "all"
78
- Requires-Dist: cucim>=23.4; extra == "all"
79
- Dynamic: license-file
80
-
81
- # SenoQuant
82
-
83
- ![tests](https://github.com/HaamsRee/senoquant-dev/actions/workflows/tests.yml/badge.svg)
84
- [![PyPI version](https://badge.fury.io/py/senoquant.svg)](https://badge.fury.io/py/senoquant)
85
- [![Python 3.11+](https://img.shields.io/badge/python-3.11+-blue.svg)](https://www.python.org/downloads/)
86
- [![License](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause)
87
-
88
- SenoQuant is a versatile Napari plugin designed for comprehensive, accurate,
89
- and unbiased spatial quantification and prediction of senescence markers
90
- across diverse tissue contexts.
91
-
92
- ## Features
93
-
94
- - **Multi-Model Segmentation**: Nuclear and cytoplasmic segmentation with 5 built-in models
95
- - StarDist ONNX (2D/3D)
96
- - Cellpose SAM
97
- - Morphological operations (dilation, perinuclear rings)
98
- - **Spot Detection**: Detect and quantify punctate senescence markers
99
- - Undecimated B3-spline wavelet (UDWT)
100
- - Rotational morphological processing (RMP)
101
- - **Quantification**: Extract intensity, morphology, and spot metrics
102
- - Per-cell marker intensities
103
- - Morphological descriptors
104
- - Spot counting and colocalization
105
- - **Batch Processing**: Automated analysis of entire image folders
106
- - Profile save/load for reproducibility
107
- - Multi-scene file support
108
- - **File Format Support**: Microscopy formats via BioIO
109
- - OME-TIFF, ND2, LIF, CZI, Zarr, and more
110
-
111
- ## Installation
112
-
113
- ### Prerequisites
114
-
115
- SenoQuant requires Python 3.11+ and napari:
116
-
117
- ```bash
118
- conda create -n senoquant python=3.11
119
- conda activate senoquant
120
- pip install "napari[all]"
121
- ```
122
-
123
- Or using `uv` (faster installer):
124
-
125
- ```bash
126
- conda create -n senoquant python=3.11
127
- conda activate senoquant
128
- pip install uv
129
- uv pip install "napari[all]"
130
- ```
131
-
132
- ### Install SenoQuant
133
-
134
- ```bash
135
- pip install senoquant
136
- ```
137
-
138
- Or with `uv`:
139
-
140
- ```bash
141
- uv pip install senoquant
142
- ```
143
-
144
- Model files are downloaded automatically on first use from Hugging Face.
145
- To override the model repository, set `SENOQUANT_MODEL_REPO` environment variable.
146
-
147
- For GPU acceleration (Windows/Linux with CUDA):
148
-
149
- ```bash
150
- pip install senoquant[gpu]
151
- ```
152
-
153
- **Note:** The first launch of napari and the SenoQuant plugin will be slower as napari initializes and SenoQuant downloads model files (~1.3 GB) from Hugging Face. Subsequent launches will be faster as models are cached locally.
154
-
155
- ## Quick Start
156
-
157
- 1. **Launch napari and open your image:**
158
- ```bash
159
- napari
160
- ```
161
- File → Open File(s)... → Select your image
162
-
163
- 2. **Open SenoQuant plugin:**
164
- Plugins → SenoQuant
165
-
166
- 3. **Run segmentation:**
167
- Segmentation tab → Select nuclear channel → Choose model → Run
168
-
169
- 4. **Detect spots (optional):**
170
- Spots tab → Select channel → Choose detector → Run
171
-
172
- 5. **Export quantification:**
173
- Quantification tab → Configure features → Export
174
-
175
- 6. **Batch process (optional):**
176
- Batch tab → Configure settings → Run Batch
177
-
178
- ## Documentation
179
-
180
- Full documentation is available at [https://haamsree.github.io/senoquant-dev/](https://haamsree.github.io/senoquant-dev/)
181
-
182
- - [Installation Guide](https://haamsree.github.io/senoquant-dev/user/installation/)
183
- - [Quick Start Tutorial](https://haamsree.github.io/senoquant-dev/user/quickstart/)
184
- - [Segmentation Models](https://haamsree.github.io/senoquant-dev/user/segmentation/)
185
- - [Spot Detection](https://haamsree.github.io/senoquant-dev/user/spots/)
186
- - [Quantification Features](https://haamsree.github.io/senoquant-dev/user/quantification/)
187
- - [Batch Processing](https://haamsree.github.io/senoquant-dev/user/batch/)
188
- - [API Reference](https://haamsree.github.io/senoquant-dev/api/)
189
-
190
- ## Development
191
-
192
- See the [Contributing Guide](https://haamsree.github.io/senoquant-dev/developer/contributing/) for development setup instructions.
193
-