senoquant 1.0.0b2__py3-none-any.whl → 1.0.0b3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. senoquant/__init__.py +6 -2
  2. senoquant/_reader.py +1 -1
  3. senoquant/reader/core.py +201 -18
  4. senoquant/tabs/batch/backend.py +18 -3
  5. senoquant/tabs/batch/frontend.py +8 -4
  6. senoquant/tabs/quantification/features/marker/dialog.py +26 -6
  7. senoquant/tabs/quantification/features/marker/export.py +97 -24
  8. senoquant/tabs/quantification/features/marker/rows.py +2 -2
  9. senoquant/tabs/quantification/features/spots/dialog.py +41 -11
  10. senoquant/tabs/quantification/features/spots/export.py +163 -10
  11. senoquant/tabs/quantification/frontend.py +2 -2
  12. senoquant/tabs/segmentation/frontend.py +46 -9
  13. senoquant/tabs/segmentation/models/cpsam/model.py +1 -1
  14. senoquant/tabs/segmentation/models/default_2d/model.py +22 -77
  15. senoquant/tabs/segmentation/models/default_3d/model.py +8 -74
  16. senoquant/tabs/segmentation/stardist_onnx_utils/_csbdeep/tools/create_zip_contents.py +0 -0
  17. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/inspect/probe.py +13 -13
  18. senoquant/tabs/segmentation/stardist_onnx_utils/onnx_framework/stardist_libs.py +171 -0
  19. senoquant/tabs/spots/frontend.py +42 -5
  20. senoquant/tabs/spots/models/ufish/details.json +17 -0
  21. senoquant/tabs/spots/models/ufish/model.py +129 -0
  22. senoquant/tabs/spots/ufish_utils/__init__.py +13 -0
  23. senoquant/tabs/spots/ufish_utils/core.py +357 -0
  24. senoquant/utils.py +1 -1
  25. senoquant-1.0.0b3.dist-info/METADATA +161 -0
  26. {senoquant-1.0.0b2.dist-info → senoquant-1.0.0b3.dist-info}/RECORD +41 -28
  27. {senoquant-1.0.0b2.dist-info → senoquant-1.0.0b3.dist-info}/top_level.txt +1 -0
  28. ufish/__init__.py +1 -0
  29. ufish/api.py +778 -0
  30. ufish/model/__init__.py +0 -0
  31. ufish/model/loss.py +62 -0
  32. ufish/model/network/__init__.py +0 -0
  33. ufish/model/network/spot_learn.py +50 -0
  34. ufish/model/network/ufish_net.py +204 -0
  35. ufish/model/train.py +175 -0
  36. ufish/utils/__init__.py +0 -0
  37. ufish/utils/img.py +418 -0
  38. ufish/utils/log.py +8 -0
  39. ufish/utils/spot_calling.py +115 -0
  40. senoquant/tabs/spots/models/rmp/details.json +0 -61
  41. senoquant/tabs/spots/models/rmp/model.py +0 -499
  42. senoquant/tabs/spots/models/udwt/details.json +0 -103
  43. senoquant/tabs/spots/models/udwt/model.py +0 -482
  44. senoquant-1.0.0b2.dist-info/METADATA +0 -193
  45. {senoquant-1.0.0b2.dist-info → senoquant-1.0.0b3.dist-info}/WHEEL +0 -0
  46. {senoquant-1.0.0b2.dist-info → senoquant-1.0.0b3.dist-info}/entry_points.txt +0 -0
  47. {senoquant-1.0.0b2.dist-info → senoquant-1.0.0b3.dist-info}/licenses/LICENSE +0 -0
ufish/utils/img.py ADDED
@@ -0,0 +1,418 @@
1
+ import typing as T
2
+ import numpy as np
3
+ import pandas as pd
4
+ from itertools import product
5
+ from os.path import isdir
6
+
7
+ from skimage.exposure import rescale_intensity
8
+
9
+
10
+ def scale_image(
11
+ img: np.ndarray,
12
+ big_quantile: float = 0.9999,
13
+ warning: bool = False,
14
+ ) -> np.ndarray:
15
+ """Scale an image to 0-255.
16
+ If the image has outlier values,
17
+ the image will be scaled to 0-big_value.
18
+
19
+ Args:
20
+ img: Image to scale.
21
+ big_quantile: Quantile to calculate the big value.
22
+ warning: Whether to print a warning message.
23
+ """
24
+ dtype = img.dtype
25
+ img = img.astype(np.float32)
26
+ if dtype is not np.uint8:
27
+ big_value = np.quantile(img, big_quantile)
28
+ if img_has_outlier(img, big_value):
29
+ if warning:
30
+ from .log import logger
31
+ logger.warning(
32
+ 'Image has outlier values. ')
33
+ in_range = (0, big_value)
34
+ else:
35
+ in_range = 'image'
36
+ img = rescale_intensity(
37
+ img,
38
+ in_range=in_range,
39
+ out_range=(0, 255),
40
+ )
41
+ return img
42
+
43
+
44
+ def img_has_outlier(
45
+ img: np.ndarray,
46
+ big_value: float,
47
+ ) -> bool:
48
+ """Check if an image has outlier values.
49
+ If the difference between the maximum value
50
+ and the big value is greater than the big value,
51
+ then the image has outlier values.
52
+
53
+ Args:
54
+ img: Image to check.
55
+ big_value: Value to compare with the maximum value.
56
+ """
57
+ max_value = np.max(img)
58
+ diff = max_value - big_value
59
+ if diff > big_value:
60
+ return True
61
+ else:
62
+ return False
63
+
64
+
65
+ def infer_img_axes(shape: tuple) -> str:
66
+ """Infer the axes of an image.
67
+
68
+ Args:
69
+ shape: Shape of the image.
70
+ """
71
+ if len(shape) == 2:
72
+ return 'yx'
73
+ elif len(shape) == 3:
74
+ min_dim_idx = shape.index(min(shape))
75
+ low_dim_shape = list(shape)
76
+ low_dim_shape.pop(min_dim_idx)
77
+ low_dim_axes = infer_img_axes(tuple(low_dim_shape))
78
+ return low_dim_axes[:min_dim_idx] + 'z' + low_dim_axes[min_dim_idx:]
79
+ elif len(shape) == 4:
80
+ min_dim_idx = shape.index(min(shape))
81
+ low_dim_shape = list(shape)
82
+ low_dim_shape.pop(min_dim_idx)
83
+ low_dim_axes = infer_img_axes(tuple(low_dim_shape))
84
+ return low_dim_axes[:min_dim_idx] + 'c' + low_dim_axes[min_dim_idx:]
85
+ elif len(shape) == 5:
86
+ low_dim_shape = infer_img_axes(shape[1:])
87
+ return 't' + low_dim_shape
88
+ else:
89
+ raise ValueError(
90
+ f'Image shape {shape} is not supported. ')
91
+
92
+
93
+ def check_img_axes(img: np.ndarray, axes: str):
94
+ """Check if the axes of an image is valid.
95
+
96
+ Args:
97
+ img: Image to check.
98
+ axes: Axes of the image.
99
+ """
100
+ if len(img.shape) != len(axes):
101
+ raise ValueError(
102
+ f'Axes {axes} does not match image shape {img.shape}. ')
103
+ if len(axes) < 2 or len(axes) > 5:
104
+ raise ValueError(
105
+ f'Axes {axes} is not supported. ')
106
+ if len(axes) != len(set(axes)):
107
+ raise ValueError(
108
+ f'Axes {axes} must be unique. ')
109
+ if 'y' not in axes:
110
+ raise ValueError(
111
+ f'Axes {axes} must contain y. ')
112
+ if 'x' not in axes:
113
+ raise ValueError(
114
+ f'Axes {axes} must contain x. ')
115
+
116
+
117
+ def expand_df_axes(
118
+ df: pd.DataFrame, axes: str,
119
+ axes_vals: T.Sequence[int],
120
+ ) -> pd.DataFrame:
121
+ """Expand the axes of a DataFrame."""
122
+ # insert new columns
123
+ for i, vals in enumerate(axes_vals):
124
+ df.insert(i, axes[i], vals)
125
+ df.columns = list(axes)
126
+ return df
127
+
128
+
129
+ def transform_axes(img, axes: str, orig_axes: T.Optional[str] = None):
130
+ """Re-order the axes of an image,
131
+ axes in order of 'tczyx'."""
132
+ if orig_axes is None:
133
+ new_axes = ''.join(sorted(axes, key=lambda x: 'tczyx'.index(x)))
134
+ img = np.moveaxis(
135
+ img, [axes.index(c) for c in new_axes], range(len(axes)))
136
+ return img, new_axes
137
+ else:
138
+ # recover the original axes
139
+ img = np.moveaxis(
140
+ img, [axes.index(c) for c in orig_axes], range(len(axes)))
141
+ return img, orig_axes
142
+
143
+
144
+ def map_predfunc_to_img(
145
+ predfunc: T.Callable[
146
+ [np.ndarray],
147
+ T.Tuple[pd.DataFrame, np.ndarray]
148
+ ],
149
+ img: np.ndarray,
150
+ axes: str,
151
+ is_transform_axes: bool = True,
152
+ ):
153
+ """Map a prediction function to an multi-dimensional image."""
154
+ from .log import logger
155
+ if is_transform_axes:
156
+ img, new_axes = transform_axes(img, axes)
157
+ else:
158
+ new_axes = axes
159
+ dfs = []
160
+ if (len(img.shape) == 2) or (new_axes == 'zyx'):
161
+ df, e_img = predfunc(img, axes=axes)
162
+ df = expand_df_axes(df, new_axes, [])
163
+ dfs.append(df)
164
+ else:
165
+ e_img = np.zeros_like(img, dtype=np.float32)
166
+ for i, sub_img in enumerate(img):
167
+ logger.info(
168
+ f'Processing multi-dimensional image on axis {new_axes[0]}'
169
+ f': {i+1}/{len(img)}')
170
+ df, e_img[i] = map_predfunc_to_img(
171
+ predfunc, sub_img, new_axes[1:], False)
172
+ df = expand_df_axes(df, new_axes, [i])
173
+ dfs.append(df)
174
+ if is_transform_axes:
175
+ e_img, _ = transform_axes(e_img, new_axes, axes)
176
+ res_df = pd.concat(dfs, ignore_index=True)
177
+ # re-order columns
178
+ res_df = res_df[list(axes)]
179
+ res_df.columns = [f'axis-{i}' for i in range(len(axes))]
180
+ return res_df, e_img
181
+
182
+
183
+ def get_default_chunk_size(
184
+ axes: str,
185
+ default_x: T.Union[int, str] = 512,
186
+ default_y: T.Union[int, str] = 512,
187
+ default_z: T.Union[int, str] = 'image',
188
+ default_c: T.Union[int, str] = 'image',
189
+ default_t: T.Union[int, str] = 'image',
190
+ ) -> tuple:
191
+ """Get the default chunk size of an image.
192
+
193
+ Args:
194
+ img_shape: Shape of the image.
195
+ axes: Axes of the image.
196
+ default_x: Default chunk size for x axis.
197
+ 'image' means the whole image.
198
+ default_y: Default chunk size for y axis.
199
+ 'image' means the whole image.
200
+ default_z: Default chunk size for z axis.
201
+ 'image' means the whole image.
202
+ default_c: Default chunk size for c axis.
203
+ 'image' means the whole image.
204
+ default_t: Default chunk size for t axis.
205
+ 'image' means the whole image.
206
+ """
207
+ default_sizes = {
208
+ 'y': default_y,
209
+ 'x': default_x,
210
+ 'z': default_z,
211
+ 'c': default_c,
212
+ 't': default_t,
213
+ }
214
+ chunk_size = []
215
+
216
+ for c in axes:
217
+ if c in default_sizes:
218
+ chunk_size.append(default_sizes[c])
219
+ else:
220
+ raise ValueError(
221
+ f'Axis {c} is not supported. ')
222
+ return tuple(chunk_size)
223
+
224
+
225
+ def process_chunk_size(
226
+ chunk_size: T.Tuple[T.Union[int, str], ...],
227
+ img_shape: T.Tuple[int, ...],
228
+ ) -> T.Tuple[int, ...]:
229
+ """Process the chunk size of an image.
230
+ If the chunk size is 'image', then the chunk size
231
+ will be the same as the image shape.
232
+
233
+ Args:
234
+ chunk_size: Chunk size of the image.
235
+ img_shape: Shape of the image.
236
+ """
237
+ assert len(chunk_size) == len(img_shape), \
238
+ "chunk_size and img_shape must have the same length"
239
+ new_chunk_size = []
240
+ for i, size in enumerate(chunk_size):
241
+ if size == 'image':
242
+ new_chunk_size.append(img_shape[i])
243
+ else:
244
+ new_chunk_size.append(size)
245
+ return tuple(new_chunk_size)
246
+
247
+
248
+ def get_chunks_range(
249
+ img_shape: tuple,
250
+ chunk_size: tuple,
251
+ ) -> T.List[T.List[T.List[int]]]:
252
+ """Get the ranges of each chunk.
253
+ For example, if the image shape is (100, 100)
254
+ and the chunk size is (50, 50), then the ranges
255
+ of the chunks are: [
256
+ [[0, 50], [0, 50]],
257
+ [[0, 50], [50, 100]],
258
+ [[50, 100], [0, 50]],
259
+ [[50, 100], [50, 100]],
260
+ ]
261
+
262
+ Args:
263
+ img_shape: Shape of the image.
264
+ chunk_size: Chunk size of the image.
265
+ """
266
+ ranges_each_dim = []
267
+ for dim_size, chunk_dim_size in zip(img_shape, chunk_size):
268
+ num_chunks = int(np.ceil(dim_size / chunk_dim_size))
269
+ dim_ranges = []
270
+ for j in range(num_chunks):
271
+ start = j * chunk_dim_size
272
+ end = min((j + 1) * chunk_dim_size, dim_size)
273
+ dim_ranges.append([start, end])
274
+ ranges_each_dim.append(dim_ranges)
275
+ chunk_ranges = list(product(*ranges_each_dim))
276
+ return chunk_ranges
277
+
278
+
279
+ def chunks_iterator(
280
+ original_img: np.ndarray,
281
+ chunk_size: tuple,
282
+ padding: bool = True,
283
+ ) -> T.Iterator[
284
+ T.Tuple[T.List[T.List[int]],
285
+ np.ndarray]]:
286
+ """Iterate over chunks of an image.
287
+
288
+ Args:
289
+ original_img: Image to iterate over.
290
+ chunk_size: Chunk size of the image.
291
+ """
292
+ chunk_ranges = get_chunks_range(
293
+ original_img.shape, chunk_size)
294
+ for chunk_range in chunk_ranges:
295
+ chunk = original_img[
296
+ tuple(slice(*r) for r in chunk_range)]
297
+ if padding:
298
+ chunk = np.pad(
299
+ chunk,
300
+ [(0, chunk_size[i] - (r[1] - r[0]))
301
+ for i, r in enumerate(chunk_range)],
302
+ mode='constant',
303
+ constant_values=0,
304
+ )
305
+ yield chunk_range, chunk
306
+
307
+
308
+ def enhance_blend_3d(
309
+ img: np.ndarray,
310
+ enh_func: T.Callable[[np.ndarray, int], np.ndarray],
311
+ axes: str,
312
+ batch_size: int = 4,
313
+ ) -> np.ndarray:
314
+ """Run enhancement along 3 directions and blend the results.
315
+
316
+ Args:
317
+ enh_func: Enhancement function.
318
+ img: Image to enhance.
319
+ axes: Axes of the image.
320
+ batch_size: Batch size for enhancement.
321
+ """
322
+ if axes != 'zyx':
323
+ # move z to the first axis
324
+ z_idx = axes.index('z')
325
+ img = np.moveaxis(img, z_idx, 0)
326
+ enh_z = enh_func(img, batch_size)
327
+ zimg_size = np.array(img.shape[1:]).prod()
328
+
329
+ img_y = np.moveaxis(img, 1, 0)
330
+ yimg_size = np.array(img_y.shape[1:]).prod()
331
+ factor_y = int(zimg_size / yimg_size)
332
+ bz_y = max(batch_size * factor_y, 1)
333
+ enh_y = enh_func(img_y, bz_y)
334
+ enh_y = np.moveaxis(enh_y, 0, 1)
335
+
336
+ img_x = np.moveaxis(img, 2, 0)
337
+ ximg_size = np.array(img_x.shape[1:]).prod()
338
+ factor_x = int(zimg_size / ximg_size)
339
+ bz_x = max(batch_size * factor_x, 1)
340
+ enh_x = enh_func(img_x, bz_x)
341
+ enh_x = np.moveaxis(enh_x, 0, 2)
342
+ enh_img = enh_z * enh_y * enh_x
343
+ return enh_img
344
+
345
+
346
+ def open_for_read(path: str):
347
+ from .ngff import is_ngff_suffix
348
+ if is_ngff_suffix(path) or isdir(path):
349
+ from .ngff import read_ngff
350
+ img = read_ngff(path)
351
+ elif path.endswith('.zarr'):
352
+ import zarr
353
+ img = zarr.open(path, 'r')
354
+ elif path.endswith('.n5'):
355
+ import zarr
356
+ store = zarr.N5Store(path)
357
+ img = zarr.open(store, 'r')
358
+ else:
359
+ from skimage.io import imread
360
+ img = imread(path)
361
+ return img
362
+
363
+
364
+ def open_for_write(
365
+ path: str, shape: tuple,
366
+ dtype=np.float32):
367
+ img = None
368
+ if path is not None:
369
+ from .ngff import is_ngff_suffix
370
+ if path.endswith('.zarr') or is_ngff_suffix(path):
371
+ import zarr
372
+ img = zarr.open(
373
+ path, 'w', shape=shape, dtype=dtype)
374
+ elif path.endswith('.n5'):
375
+ import zarr
376
+ store = zarr.N5Store(path)
377
+ img = zarr.zeros(
378
+ shape, dtype=dtype,
379
+ store=store, overwrite=True)
380
+ return img
381
+
382
+
383
+ def open_enhimg_storage(enh_path: str, shape: tuple):
384
+ from .ngff import is_ngff_suffix
385
+ tmp_enh_path = None
386
+ if (enh_path is not None) and is_ngff_suffix(enh_path):
387
+ if enh_path.endswith("/"):
388
+ enh_path = enh_path[:-1]
389
+ tmp_enh_path = enh_path + '.tmp.zarr'
390
+ enhanced = open_for_write(tmp_enh_path, shape)
391
+ else:
392
+ enhanced = open_for_write(enh_path, shape)
393
+ return enhanced, tmp_enh_path
394
+
395
+
396
+ def save_enhimg(
397
+ enhanced, tmp_enh_path,
398
+ enh_path: str, axes: str):
399
+ from .ngff import is_ngff_suffix
400
+ from .log import logger
401
+ if is_ngff_suffix(enh_path):
402
+ logger.info("Saving enhanced image to ngff file.")
403
+ from .ngff import create_ngff, generate_omero_info
404
+ omero_info = generate_omero_info(data=enhanced, axes=axes)
405
+ create_ngff(
406
+ data=enhanced, out_path=enh_path,
407
+ axes=axes, omero_info=omero_info)
408
+ if tmp_enh_path is not None:
409
+ import shutil
410
+ shutil.rmtree(tmp_enh_path)
411
+ elif enh_path.endswith('.zarr'):
412
+ logger.info("Saving enhanced image to zarr file.")
413
+ elif enh_path.endswith('.n5'):
414
+ logger.info("Saving enhanced image to n5 file.")
415
+ else:
416
+ from skimage.io import imsave
417
+ imsave(enh_path, enhanced, check_contrast=False)
418
+ logger.info(f'Saved enhanced image to {enh_path}')
ufish/utils/log.py ADDED
@@ -0,0 +1,8 @@
1
+ """Logger shim for vendored UFish."""
2
+
3
+ try: # pragma: no cover - optional dependency
4
+ from loguru import logger # type: ignore[import-not-found] # noqa: F401
5
+ except Exception: # pragma: no cover - fallback logger
6
+ import logging
7
+
8
+ logger = logging.getLogger("ufish")
@@ -0,0 +1,115 @@
1
+ """
2
+ Spots calling from the enhanced images.
3
+ """
4
+ import typing as T
5
+
6
+ import numpy as np
7
+ import pandas as pd
8
+ from skimage.filters import threshold_otsu, laplace
9
+ from skimage.measure import label, regionprops
10
+ from scipy import ndimage as ndi
11
+ from skimage.feature import peak_local_max
12
+ from skimage.morphology import local_maxima
13
+ from skimage.segmentation import watershed
14
+
15
+
16
+ def watershed_center(binary_image: np.ndarray) -> list:
17
+ """Segment the dense regions using watershed algorithm.
18
+ and return the centroids of the regions."""
19
+ distance = ndi.distance_transform_edt(binary_image)
20
+ ndim = binary_image.ndim
21
+ if ndim == 2:
22
+ coords = peak_local_max(
23
+ distance,
24
+ footprint=np.ones((4, 4)),
25
+ labels=binary_image)
26
+ mask = np.zeros(distance.shape, dtype=bool)
27
+ mask[tuple(coords.T)] = True
28
+ elif ndim == 3:
29
+ mask = local_maxima(distance)
30
+ else:
31
+ raise ValueError('Only 2D and 3D images are supported.')
32
+ markers, _ = ndi.label(mask)
33
+ labels = watershed(-distance, markers, mask=binary_image)
34
+ regions = regionprops(label(labels))
35
+ centroids = [r.centroid for r in regions]
36
+ return centroids
37
+
38
+
39
+ def call_spots_cc_center(
40
+ image: np.ndarray,
41
+ binary_threshold: T.Union[str, float] = 'otsu',
42
+ cc_size_threshold: int = 20,
43
+ output_dense_mark: bool = False,
44
+ ) -> pd.DataFrame:
45
+ """Call spots from the connected components' centroids.
46
+
47
+ Args:
48
+ image: The input image.
49
+ binary_threshold: The threshold for binarizing the image.
50
+ cc_size_threshold: The threshold for connected components' size.
51
+ output_dense_mark: Whether to output a column indicating whether
52
+ the spot is from a dense region.
53
+ """
54
+ ndim = image.ndim
55
+ if binary_threshold == 'otsu':
56
+ thresh = threshold_otsu(image)
57
+ else:
58
+ thresh = binary_threshold
59
+ binary_image = image > thresh
60
+ regions = regionprops(label(binary_image))
61
+ centroids_sparse = []
62
+ dense_regions = np.zeros_like(binary_image, dtype=np.uint8)
63
+ for region in regions:
64
+ if region.area > cc_size_threshold:
65
+ coords = region.coords
66
+ if ndim == 2:
67
+ dense_regions[coords[:, 0], coords[:, 1]] = 1
68
+ elif ndim == 3:
69
+ dense_regions[coords[:, 0], coords[:, 1], coords[:, 2]] = 1
70
+ else:
71
+ raise ValueError('Only 2D and 3D images are supported.')
72
+ else:
73
+ centroids_sparse.append(region.centroid)
74
+ centroids_dense = watershed_center(dense_regions)
75
+ all_centroids = centroids_sparse + centroids_dense
76
+ all_centroids = np.array(all_centroids) # type: ignore
77
+ columns = [f'axis-{i}' for i in range(ndim)]
78
+ if all_centroids.shape[0] == 0:
79
+ df = pd.DataFrame(columns=columns)
80
+ else:
81
+ df = pd.DataFrame(
82
+ all_centroids,
83
+ columns=columns
84
+ )
85
+ if output_dense_mark:
86
+ dense_mark = np.zeros(len(all_centroids), dtype=bool)
87
+ dense_mark[len(centroids_sparse):] = True
88
+ df['is_dense'] = dense_mark
89
+ return df
90
+
91
+
92
+ def call_spots_local_maxima(
93
+ enhanced_img: np.ndarray,
94
+ connectivity: int = 2,
95
+ intensity_threshold: float = 0.5,
96
+ laplace_process: bool = True,
97
+ ) -> pd.DataFrame:
98
+ """Call spots by finding the local maxima.
99
+
100
+ Args:
101
+ enhanced_img: The enhanced image.
102
+ connectivity: The connectivity for the local maxima.
103
+ intensity_threshold: The threshold for the intensity.
104
+
105
+ Returns:
106
+ A pandas dataframe containing the spots.
107
+ """
108
+ if laplace_process:
109
+ enhanced_img = laplace(enhanced_img)
110
+ mask = local_maxima(enhanced_img, connectivity=connectivity)
111
+ mask = mask & (enhanced_img > intensity_threshold)
112
+ peaks = np.array(np.where(mask)).T
113
+ df = pd.DataFrame(
114
+ peaks, columns=[f'axis-{i}' for i in range(mask.ndim)])
115
+ return df
@@ -1,61 +0,0 @@
1
- {
2
- "name": "rmp",
3
- "description": "Placeholder details for the rmp spot detector",
4
- "version": "0.1.0",
5
- "order": 2,
6
- "settings": [
7
- {
8
- "key": "denoising_kernel_length",
9
- "label": "Denoising kernel length",
10
- "type": "int",
11
- "min": 2,
12
- "max": 9999,
13
- "default": 2,
14
- "enabled_by": "enable_denoising"
15
- },
16
- {
17
- "key": "extraction_kernel_length",
18
- "label": "Extraction kernel length",
19
- "type": "int",
20
- "min": 1,
21
- "max": 9999,
22
- "default": 10
23
- },
24
- {
25
- "key": "angle_spacing",
26
- "label": "Angle spacing",
27
- "type": "int",
28
- "min": 1,
29
- "max": 10,
30
- "default": 5
31
- },
32
- {
33
- "key": "manual_threshold",
34
- "label": "Manual threshold",
35
- "type": "float",
36
- "decimals": 2,
37
- "min": 0.0,
38
- "max": 1.0,
39
- "default": 0.05,
40
- "disabled_by": "auto_threshold"
41
- },
42
- {
43
- "key": "auto_threshold",
44
- "label": "Auto threshold",
45
- "type": "bool",
46
- "default": true
47
- },
48
- {
49
- "key": "enable_denoising",
50
- "label": "Enable denoising",
51
- "type": "bool",
52
- "default": true
53
- },
54
- {
55
- "key": "use_3d",
56
- "label": "3D",
57
- "type": "bool",
58
- "default": false
59
- }
60
- ]
61
- }