spacr 0.9.26__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/app_annotate.py +4 -0
- spacr/core.py +30 -44
- spacr/gui_core.py +62 -6
- spacr/gui_elements.py +79 -10
- spacr/gui_utils.py +20 -16
- spacr/io.py +45 -1
- spacr/measure.py +6 -75
- spacr/plot.py +49 -2
- spacr/settings.py +30 -10
- spacr/sp_stats.py +0 -1
- spacr/spacr_cellpose.py +59 -0
- spacr/utils.py +102 -3
- {spacr-0.9.26.dist-info → spacr-1.0.1.dist-info}/METADATA +22 -6
- {spacr-0.9.26.dist-info → spacr-1.0.1.dist-info}/RECORD +18 -18
- {spacr-0.9.26.dist-info → spacr-1.0.1.dist-info}/LICENSE +0 -0
- {spacr-0.9.26.dist-info → spacr-1.0.1.dist-info}/WHEEL +0 -0
- {spacr-0.9.26.dist-info → spacr-1.0.1.dist-info}/entry_points.txt +0 -0
- {spacr-0.9.26.dist-info → spacr-1.0.1.dist-info}/top_level.txt +0 -0
spacr/app_annotate.py
CHANGED
@@ -30,6 +30,10 @@ def initiate_annotation_app(parent_frame):
|
|
30
30
|
settings['percentiles'] = list(map(convert_to_number, settings['percentiles'].split(','))) if settings['percentiles'] else [2, 98]
|
31
31
|
settings['normalize'] = settings['normalize'].lower() == 'true'
|
32
32
|
settings['normalize_channels'] = settings['normalize_channels'].split(',')
|
33
|
+
settings['outline'] = settings['outline'].split(',') if settings['outline'] else None
|
34
|
+
settings['outline_threshold_factor'] = float(settings['outline_threshold_factor']) if settings['outline_threshold_factor'] else 1.0
|
35
|
+
settings['outline_sigma'] = float(settings['outline_threshold_factor']) if settings['outline_threshold_factor'] else 1.0
|
36
|
+
|
33
37
|
try:
|
34
38
|
settings['measurement'] = settings['measurement'].split(',') if settings['measurement'] else None
|
35
39
|
settings['threshold'] = None if settings['threshold'].lower() == 'none' else int(settings['threshold'])
|
spacr/core.py
CHANGED
@@ -196,11 +196,12 @@ def preprocess_generate_masks(settings):
|
|
196
196
|
|
197
197
|
def generate_cellpose_masks(src, settings, object_type):
|
198
198
|
|
199
|
-
from .utils import _masks_to_masks_stack, _filter_cp_masks, _get_cellpose_batch_size, _get_cellpose_channels, _choose_model,
|
199
|
+
from .utils import _masks_to_masks_stack, _filter_cp_masks, _get_cellpose_batch_size, _get_cellpose_channels, _choose_model, all_elements_match, prepare_batch_for_segmentation
|
200
200
|
from .io import _create_database, _save_object_counts_to_database, _check_masks, _get_avg_object_size
|
201
201
|
from .timelapse import _npz_to_movie, _btrack_track_cells, _trackpy_track_cells
|
202
|
-
from .plot import
|
202
|
+
from .plot import plot_cellpose4_output
|
203
203
|
from .settings import set_default_settings_preprocess_generate_masks, _get_object_settings
|
204
|
+
from .spacr_cellpose import parse_cellpose4_output
|
204
205
|
|
205
206
|
gc.collect()
|
206
207
|
if not torch.cuda.is_available():
|
@@ -239,9 +240,12 @@ def generate_cellpose_masks(src, settings, object_type):
|
|
239
240
|
cellpose_channels = _get_cellpose_channels(src, settings['nucleus_channel'], settings['pathogen_channel'], settings['cell_channel'])
|
240
241
|
if settings['verbose']:
|
241
242
|
print(cellpose_channels)
|
242
|
-
|
243
|
+
|
244
|
+
if object_type not in cellpose_channels:
|
245
|
+
raise ValueError(f"Error: No channels were specified for object_type '{object_type}'. Check your settings.")
|
243
246
|
channels = cellpose_channels[object_type]
|
244
|
-
|
247
|
+
|
248
|
+
#cellpose_batch_size = _get_cellpose_batch_size()
|
245
249
|
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
246
250
|
|
247
251
|
if object_type == 'pathogen' and not settings['pathogen_model'] is None:
|
@@ -249,7 +253,8 @@ def generate_cellpose_masks(src, settings, object_type):
|
|
249
253
|
|
250
254
|
model = _choose_model(model_name, device, object_type=object_type, restore_type=None, object_settings=object_settings)
|
251
255
|
|
252
|
-
chans = [2, 1] if model_name == 'cyto2' else [0,0] if model_name == 'nucleus' else [2,0] if model_name == 'cyto' else [2, 0] if model_name == 'cyto3' else [2, 0]
|
256
|
+
#chans = [2, 1] if model_name == 'cyto2' else [0,0] if model_name == 'nucleus' else [2,0] if model_name == 'cyto' else [2, 0] if model_name == 'cyto3' else [2, 0]
|
257
|
+
|
253
258
|
paths = [os.path.join(src, file) for file in os.listdir(src) if file.endswith('.npz')]
|
254
259
|
|
255
260
|
count_loc = os.path.dirname(src)+'/measurements/measurements.db'
|
@@ -257,6 +262,7 @@ def generate_cellpose_masks(src, settings, object_type):
|
|
257
262
|
_create_database(count_loc)
|
258
263
|
|
259
264
|
average_sizes = []
|
265
|
+
average_count = []
|
260
266
|
time_ls = []
|
261
267
|
|
262
268
|
for file_index, path in enumerate(paths):
|
@@ -310,49 +316,30 @@ def generate_cellpose_masks(src, settings, object_type):
|
|
310
316
|
continue
|
311
317
|
|
312
318
|
batch = prepare_batch_for_segmentation(batch)
|
313
|
-
|
314
|
-
|
315
|
-
#if settings['denoise']:
|
316
|
-
# if object_type == 'cell':
|
317
|
-
# model_type = "denoise_cyto3"
|
318
|
-
# elif object_type == 'nucleus':
|
319
|
-
# model_type = "denoise_nucleus"
|
320
|
-
# else:
|
321
|
-
# raise ValueError(f"No denoise model for object_type: {object_type}")
|
322
|
-
# dn = denoise.DenoiseModel(model_type=model_type, gpu=device)
|
323
|
-
# batch = dn.eval(imgs=batch, channels=chans, diameter=object_settings['diameter'])
|
319
|
+
batch_list = [batch[i] for i in range(batch.shape[0])]
|
324
320
|
|
325
321
|
if timelapse:
|
326
322
|
movie_path = os.path.join(os.path.dirname(src), 'movies')
|
327
323
|
os.makedirs(movie_path, exist_ok=True)
|
328
324
|
save_path = os.path.join(movie_path, f'timelapse_{object_type}_{name}.mp4')
|
329
325
|
_npz_to_movie(batch, batch_filenames, save_path, fps=2)
|
330
|
-
|
331
|
-
output = model.eval(x=
|
332
|
-
batch_size=
|
326
|
+
|
327
|
+
output = model.eval(x=batch_list,
|
328
|
+
batch_size=batch_size,
|
333
329
|
normalize=False,
|
334
|
-
|
335
|
-
|
330
|
+
channel_axis=-1,
|
331
|
+
channels=channels,
|
336
332
|
diameter=object_settings['diameter'],
|
337
333
|
flow_threshold=flow_threshold,
|
338
334
|
cellprob_threshold=cellprob_threshold,
|
339
335
|
rescale=None,
|
340
336
|
resample=object_settings['resample'])
|
341
|
-
|
342
|
-
|
343
|
-
masks, flows, _, _ = output
|
344
|
-
elif len(output) == 3:
|
345
|
-
masks, flows, _ = output
|
346
|
-
else:
|
347
|
-
raise ValueError(f"Unexpected number of return values from model.eval(). Expected 3 or 4, got {len(output)}")
|
337
|
+
|
338
|
+
masks, flows, _, _, _ = parse_cellpose4_output(output)
|
348
339
|
|
349
340
|
if timelapse:
|
350
341
|
if settings['plot']:
|
351
|
-
|
352
|
-
if idx == 0:
|
353
|
-
num_objects = mask_object_count(mask)
|
354
|
-
print(f'Number of objects: {num_objects}')
|
355
|
-
plot_masks(batch=image, masks=mask, flows=flow, cmap='inferno', figuresize=figuresize, nr=1, file_type='.npz', print_object_number=True)
|
342
|
+
plot_cellpose4_output(batch_list, masks, flows, cmap='inferno', figuresize=figuresize, nr=1, print_object_number=True)
|
356
343
|
|
357
344
|
_save_object_counts_to_database(masks, object_type, batch_filenames, count_loc, added_string='_timelapse')
|
358
345
|
if object_type in timelapse_objects:
|
@@ -430,25 +417,24 @@ def generate_cellpose_masks(src, settings, object_type):
|
|
430
417
|
else:
|
431
418
|
mask_stack = _masks_to_masks_stack(masks)
|
432
419
|
|
433
|
-
if settings['plot']:
|
434
|
-
|
435
|
-
if idx == 0:
|
436
|
-
num_objects = mask_object_count(mask)
|
437
|
-
print(f'Number of objects, : {num_objects}')
|
438
|
-
plot_masks(batch=image, masks=mask, flows=flow, cmap='inferno', figuresize=figuresize, nr=1, file_type='.npz', print_object_number=True)
|
420
|
+
#if settings['plot']:
|
421
|
+
# plot_cellpose4_output(batch_list, masks, flows, cmap='inferno', figuresize=figuresize, nr=1, print_object_number=True)
|
439
422
|
|
440
423
|
if not np.any(mask_stack):
|
441
|
-
average_obj_size = 0
|
424
|
+
avg_num_objects_per_image, average_obj_size = 0, 0
|
442
425
|
else:
|
443
|
-
average_obj_size = _get_avg_object_size(mask_stack)
|
444
|
-
|
426
|
+
avg_num_objects_per_image, average_obj_size = _get_avg_object_size(mask_stack)
|
427
|
+
|
428
|
+
average_count.append(avg_num_objects_per_image)
|
445
429
|
average_sizes.append(average_obj_size)
|
446
430
|
overall_average_size = np.mean(average_sizes) if len(average_sizes) > 0 else 0
|
447
|
-
|
431
|
+
overall_average_count = np.mean(average_count) if len(average_count) > 0 else 0
|
432
|
+
print(f'Found {overall_average_count} {object_type}/FOV. average size: {overall_average_size:.3f} px2')
|
448
433
|
|
449
434
|
if not timelapse:
|
450
435
|
if settings['plot']:
|
451
|
-
|
436
|
+
plot_cellpose4_output(batch_list, masks, flows, cmap='inferno', figuresize=figuresize, nr=batch_size)
|
437
|
+
|
452
438
|
if settings['save']:
|
453
439
|
for mask_index, mask in enumerate(mask_stack):
|
454
440
|
output_filename = os.path.join(output_folder, batch_filenames[mask_index])
|
spacr/gui_core.py
CHANGED
@@ -103,6 +103,11 @@ def display_figure(fig):
|
|
103
103
|
new_canvas = FigureCanvasTkAgg(fig, master=canvas_widget.master)
|
104
104
|
new_canvas.draw()
|
105
105
|
new_canvas.get_tk_widget().grid(row=0, column=0, sticky="nsew")
|
106
|
+
|
107
|
+
# Store existing text labels on each axis for zoom visibility control (new feature)
|
108
|
+
for ax in fig.get_axes():
|
109
|
+
texts = ax.texts
|
110
|
+
ax._label_annotations = texts
|
106
111
|
|
107
112
|
# Update the global canvas and canvas_widget references
|
108
113
|
canvas = new_canvas
|
@@ -169,14 +174,65 @@ def display_figure(fig):
|
|
169
174
|
else:
|
170
175
|
#flash_feedback("right")
|
171
176
|
show_next_figure()
|
177
|
+
|
178
|
+
def zoom(event):
|
179
|
+
zoom_in_factor = 1 / 1.2
|
180
|
+
zoom_out_factor = 1.2
|
172
181
|
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
182
|
+
if event.num == 4 or (hasattr(event, 'delta') and event.delta > 0):
|
183
|
+
factor = zoom_in_factor
|
184
|
+
elif event.num == 5 or (hasattr(event, 'delta') and event.delta < 0):
|
185
|
+
factor = zoom_out_factor
|
186
|
+
else:
|
187
|
+
return
|
188
|
+
|
189
|
+
# Find the axis under the cursor
|
190
|
+
ref_ax = None
|
191
|
+
for ax in canvas.figure.get_axes():
|
192
|
+
if ax.get_window_extent().contains(event.x, event.y):
|
193
|
+
ref_ax = ax
|
194
|
+
break
|
195
|
+
|
196
|
+
if ref_ax is None:
|
197
|
+
return
|
198
|
+
|
199
|
+
try:
|
200
|
+
# Convert mouse position to data coords in reference axis
|
201
|
+
data_x, data_y = ref_ax.transData.inverted().transform((event.x, event.y))
|
202
|
+
except ValueError:
|
203
|
+
return
|
204
|
+
|
205
|
+
# Get current limits
|
206
|
+
xlim = ref_ax.get_xlim()
|
207
|
+
ylim = ref_ax.get_ylim()
|
208
|
+
|
209
|
+
# Compute new limits for the reference axis
|
210
|
+
new_xlim = [
|
211
|
+
data_x - (data_x - xlim[0]) * factor,
|
212
|
+
data_x + (xlim[1] - data_x) * factor
|
213
|
+
]
|
214
|
+
new_ylim = [
|
215
|
+
data_y - (data_y - ylim[0]) * factor,
|
216
|
+
data_y + (ylim[1] - data_y) * factor
|
217
|
+
]
|
218
|
+
|
219
|
+
# Apply the same limits to all axes
|
220
|
+
for ax in canvas.figure.get_axes():
|
221
|
+
ax.set_xlim(new_xlim)
|
222
|
+
ax.set_ylim(new_ylim)
|
223
|
+
|
224
|
+
for label in ax.texts:
|
225
|
+
label.set_clip_on(True)
|
226
|
+
|
227
|
+
if hasattr(ax, '_label_annotations'):
|
228
|
+
for label in ax._label_annotations:
|
229
|
+
x, y = label.get_position()
|
230
|
+
visible = new_xlim[0] <= x <= new_xlim[1] and new_ylim[0] <= y <= new_ylim[1]
|
231
|
+
label.set_visible(visible)
|
232
|
+
|
233
|
+
canvas.draw_idle()
|
178
234
|
|
179
|
-
def
|
235
|
+
def zoom_v1(event):
|
180
236
|
# Fixed zoom factors (adjust these if you want faster or slower zoom)
|
181
237
|
zoom_in_factor = 0.9 # When zooming in, ranges shrink by 10%
|
182
238
|
zoom_out_factor = 1.1 # When zooming out, ranges increase by 10%
|
spacr/gui_elements.py
CHANGED
@@ -10,18 +10,23 @@ import numpy as np
|
|
10
10
|
import pandas as pd
|
11
11
|
from PIL import Image, ImageOps, ImageTk, ImageDraw, ImageFont, ImageEnhance
|
12
12
|
from concurrent.futures import ThreadPoolExecutor
|
13
|
-
from skimage.exposure import rescale_intensity
|
14
13
|
from IPython.display import display, HTML
|
15
14
|
import imageio.v2 as imageio
|
16
15
|
from collections import deque
|
16
|
+
from skimage.filters import threshold_otsu
|
17
|
+
from skimage.exposure import rescale_intensity
|
17
18
|
from skimage.draw import polygon, line
|
18
19
|
from skimage.transform import resize
|
19
|
-
from
|
20
|
+
from skimage.morphology import dilation, disk
|
21
|
+
from skimage.segmentation import find_boundaries
|
22
|
+
from skimage.util import img_as_ubyte
|
23
|
+
from scipy.ndimage import binary_fill_holes, label, gaussian_filter
|
20
24
|
from tkinter import ttk, scrolledtext
|
21
25
|
from sklearn.model_selection import train_test_split
|
22
26
|
from xgboost import XGBClassifier
|
23
27
|
from sklearn.metrics import classification_report, confusion_matrix
|
24
28
|
|
29
|
+
|
25
30
|
fig = None
|
26
31
|
|
27
32
|
def restart_gui_app(root):
|
@@ -2209,7 +2214,7 @@ class ModifyMaskApp:
|
|
2209
2214
|
self.update_display()
|
2210
2215
|
|
2211
2216
|
class AnnotateApp:
|
2212
|
-
def __init__(self, root, db_path, src, image_type=None, channels=None, image_size=200, annotation_column='annotate', normalize=False, percentiles=(1, 99), measurement=None, threshold=None, normalize_channels=None):
|
2217
|
+
def __init__(self, root, db_path, src, image_type=None, channels=None, image_size=200, annotation_column='annotate', normalize=False, percentiles=(1, 99), measurement=None, threshold=None, normalize_channels=None, outline=None, outline_threshold_factor=1, outline_sigma=1):
|
2213
2218
|
self.root = root
|
2214
2219
|
self.db_path = db_path
|
2215
2220
|
self.src = src
|
@@ -2237,7 +2242,10 @@ class AnnotateApp:
|
|
2237
2242
|
self.measurement = measurement
|
2238
2243
|
self.threshold = threshold
|
2239
2244
|
self.normalize_channels = normalize_channels
|
2240
|
-
|
2245
|
+
self.outline = outline #([s.strip().lower() for s in outline.split(',') if s.strip()]if isinstance(outline, str) and outline else None)
|
2246
|
+
self.outline_threshold_factor = outline_threshold_factor
|
2247
|
+
self.outline_sigma = outline_sigma
|
2248
|
+
|
2241
2249
|
style_out = set_dark_style(ttk.Style())
|
2242
2250
|
self.font_loader = style_out['font_loader']
|
2243
2251
|
self.font_size = style_out['font_size']
|
@@ -2337,7 +2345,12 @@ class AnnotateApp:
|
|
2337
2345
|
'percentiles': ','.join(map(str, self.percentiles)),
|
2338
2346
|
'measurement': ','.join(self.measurement) if self.measurement else '',
|
2339
2347
|
'threshold': str(self.threshold) if self.threshold is not None else '',
|
2340
|
-
'normalize_channels': ','.join(self.normalize_channels) if self.normalize_channels else ''
|
2348
|
+
'normalize_channels': ','.join(self.normalize_channels) if self.normalize_channels else '',
|
2349
|
+
'outline': ','.join(self.outline) if self.outline else '',
|
2350
|
+
'outline_threshold_factor': str(self.outline_threshold_factor) if hasattr(self, 'outline_threshold_factor') else '1.0',
|
2351
|
+
'outline_sigma': str(self.outline_sigma) if hasattr(self, 'outline_sigma') else '1.0',
|
2352
|
+
'src': self.src,
|
2353
|
+
'db_path': self.db_path,
|
2341
2354
|
}
|
2342
2355
|
|
2343
2356
|
for key, data in vars_dict.items():
|
@@ -2354,7 +2367,10 @@ class AnnotateApp:
|
|
2354
2367
|
settings['percentiles'] = list(map(convert_to_number, settings['percentiles'].split(','))) if settings['percentiles'] else [1, 99]
|
2355
2368
|
settings['normalize'] = settings['normalize'].lower() == 'true'
|
2356
2369
|
settings['normalize_channels'] = settings['normalize_channels'].split(',') if settings['normalize_channels'] else None
|
2357
|
-
|
2370
|
+
settings['outline'] = settings['outline'].split(',') if settings['outline'] else None
|
2371
|
+
settings['outline_threshold_factor'] = float(settings['outline_threshold_factor'].replace(',', '.')) if settings['outline_threshold_factor'] else 1.0
|
2372
|
+
settings['outline_sigma'] = float(settings['outline_sigma'].replace(',', '.')) if settings['outline_sigma'] else 1.0
|
2373
|
+
|
2358
2374
|
try:
|
2359
2375
|
settings['measurement'] = settings['measurement'].split(',') if settings['measurement'] else None
|
2360
2376
|
settings['threshold'] = None if settings['threshold'].lower() == 'none' else int(settings['threshold'])
|
@@ -2379,7 +2395,12 @@ class AnnotateApp:
|
|
2379
2395
|
'percentiles': settings.get('percentiles'),
|
2380
2396
|
'measurement': settings.get('measurement'),
|
2381
2397
|
'threshold': settings.get('threshold'),
|
2382
|
-
'normalize_channels': settings.get('normalize_channels')
|
2398
|
+
'normalize_channels': settings.get('normalize_channels'),
|
2399
|
+
'outline': settings.get('outline'),
|
2400
|
+
'outline_threshold_factor': settings.get('outline_threshold_factor'),
|
2401
|
+
'outline_sigma': settings.get('outline_sigma'),
|
2402
|
+
'src': self.src,
|
2403
|
+
'db_path': self.db_path
|
2383
2404
|
})
|
2384
2405
|
|
2385
2406
|
settings_window.destroy()
|
@@ -2389,22 +2410,32 @@ class AnnotateApp:
|
|
2389
2410
|
|
2390
2411
|
def update_settings(self, **kwargs):
|
2391
2412
|
allowed_attributes = {
|
2392
|
-
'image_type', 'channels', 'image_size', 'annotation_column',
|
2393
|
-
'normalize', 'percentiles', 'measurement', 'threshold', 'normalize_channels'
|
2413
|
+
'image_type', 'channels', 'image_size', 'annotation_column', 'src', 'db_path',
|
2414
|
+
'normalize', 'percentiles', 'measurement', 'threshold', 'normalize_channels', 'outline', 'outline_threshold_factor', 'outline_sigma'
|
2394
2415
|
}
|
2395
2416
|
|
2396
2417
|
updated = False
|
2397
|
-
|
2418
|
+
|
2398
2419
|
for attr, value in kwargs.items():
|
2399
2420
|
if attr in allowed_attributes and value is not None:
|
2421
|
+
if attr == 'outline':
|
2422
|
+
if isinstance(value, str):
|
2423
|
+
value = [s.strip().lower() for s in value.split(',') if s.strip()]
|
2424
|
+
elif attr == 'outline_threshold_factor':
|
2425
|
+
value = float(value)
|
2426
|
+
elif attr == 'outline_sigma':
|
2427
|
+
value = float(value)
|
2400
2428
|
setattr(self, attr, value)
|
2401
2429
|
updated = True
|
2402
2430
|
|
2431
|
+
|
2403
2432
|
if 'image_size' in kwargs:
|
2404
2433
|
if isinstance(self.image_size, list):
|
2405
2434
|
self.image_size = (int(self.image_size[0]), int(self.image_size[0]))
|
2406
2435
|
elif isinstance(self.image_size, int):
|
2407
2436
|
self.image_size = (self.image_size, self.image_size)
|
2437
|
+
elif isinstance(self.image_size, tuple) and len(self.image_size) == 2:
|
2438
|
+
self.image_size = tuple(map(int, self.image_size))
|
2408
2439
|
else:
|
2409
2440
|
raise ValueError("Invalid image size")
|
2410
2441
|
|
@@ -2599,9 +2630,47 @@ class AnnotateApp:
|
|
2599
2630
|
img = self.normalize_image(img, self.normalize, self.percentiles, self.normalize_channels)
|
2600
2631
|
img = img.convert('RGB')
|
2601
2632
|
img = self.filter_channels(img)
|
2633
|
+
|
2634
|
+
if self.outline:
|
2635
|
+
img = self.outline_image(img, self.outline_sigma)
|
2636
|
+
|
2602
2637
|
img = img.resize(self.image_size)
|
2603
2638
|
return img, annotation
|
2604
2639
|
|
2640
|
+
def outline_image(self, img, edge_sigma=1, edge_thickness=1):
|
2641
|
+
"""
|
2642
|
+
For each selected channel, compute a continuous outline from the intensity landscape
|
2643
|
+
using Otsu threshold scaled by a correction factor. Replace only that channel.
|
2644
|
+
"""
|
2645
|
+
arr = np.asarray(img)
|
2646
|
+
if arr.ndim != 3 or arr.shape[2] != 3:
|
2647
|
+
return img # not RGB
|
2648
|
+
|
2649
|
+
out_img = arr.copy()
|
2650
|
+
channel_map = {'r': 0, 'g': 1, 'b': 2}
|
2651
|
+
factor = getattr(self, 'outline_threshold_factor', 1.0)
|
2652
|
+
|
2653
|
+
for ch in self.outline:
|
2654
|
+
if ch not in channel_map:
|
2655
|
+
continue
|
2656
|
+
idx = channel_map[ch]
|
2657
|
+
channel_data = arr[:, :, idx]
|
2658
|
+
|
2659
|
+
try:
|
2660
|
+
channel_data = gaussian_filter(channel_data, sigma=edge_sigma)
|
2661
|
+
otsu_thresh = threshold_otsu(channel_data)
|
2662
|
+
corrected_thresh = min(255, otsu_thresh * factor)
|
2663
|
+
fg_mask = channel_data > corrected_thresh
|
2664
|
+
except Exception:
|
2665
|
+
continue
|
2666
|
+
|
2667
|
+
edge = find_boundaries(fg_mask, mode='inner')
|
2668
|
+
thick_edge = dilation(edge, disk(edge_thickness))
|
2669
|
+
|
2670
|
+
out_img[:, :, idx] = (thick_edge * 255).astype(np.uint8)
|
2671
|
+
|
2672
|
+
return Image.fromarray(out_img)
|
2673
|
+
|
2605
2674
|
@staticmethod
|
2606
2675
|
def normalize_image(img, normalize=False, percentiles=(1, 99), normalize_channels=None):
|
2607
2676
|
"""
|
spacr/gui_utils.py
CHANGED
@@ -242,13 +242,6 @@ def annotate(settings):
|
|
242
242
|
threshold=settings['threshold'],
|
243
243
|
normalize_channels=settings['normalize_channels'])
|
244
244
|
|
245
|
-
#next_button = tk.Button(root, text="Next", command=app.next_page)
|
246
|
-
#next_button.grid(row=app.grid_rows, column=app.grid_cols - 1)
|
247
|
-
#back_button = tk.Button(root, text="Back", command=app.previous_page)
|
248
|
-
#back_button.grid(row=app.grid_rows, column=app.grid_cols - 2)
|
249
|
-
#exit_button = tk.Button(root, text="Exit", command=app.shutdown)
|
250
|
-
#exit_button.grid(row=app.grid_rows, column=app.grid_cols - 3)
|
251
|
-
|
252
245
|
app.load_images()
|
253
246
|
root.mainloop()
|
254
247
|
|
@@ -271,14 +264,27 @@ def generate_annotate_fields(frame):
|
|
271
264
|
|
272
265
|
# Arrange input fields and labels
|
273
266
|
for row, (name, data) in enumerate(vars_dict.items()):
|
274
|
-
tk.Label(
|
275
|
-
|
276
|
-
|
277
|
-
|
267
|
+
tk.Label(
|
268
|
+
frame,
|
269
|
+
text=f"{name.replace('_', ' ').capitalize()}:",
|
270
|
+
bg=style_out['bg_color'],
|
271
|
+
fg=style_out['fg_color'],
|
272
|
+
font=font_loader.get_font(size=font_size)
|
273
|
+
).grid(row=row, column=0)
|
274
|
+
|
275
|
+
value = data['value']
|
276
|
+
if isinstance(value, list):
|
277
|
+
string_value = ','.join(map(str, value))
|
278
|
+
elif isinstance(value, (int, float, bool)):
|
279
|
+
string_value = str(value)
|
280
|
+
elif value is None:
|
281
|
+
string_value = ''
|
278
282
|
else:
|
279
|
-
|
283
|
+
string_value = value
|
284
|
+
|
285
|
+
data['entry'].insert(0, string_value)
|
280
286
|
data['entry'].grid(row=row, column=1)
|
281
|
-
|
287
|
+
|
282
288
|
return vars_dict
|
283
289
|
|
284
290
|
def run_annotate_app(vars_dict, parent_frame):
|
@@ -349,7 +355,7 @@ def annotate_with_image_refs(settings, root, shutdown_callback):
|
|
349
355
|
screen_height = root.winfo_screenheight()
|
350
356
|
root.geometry(f"{screen_width}x{screen_height}")
|
351
357
|
|
352
|
-
app = AnnotateApp(root, db, src, image_type=settings['image_type'], channels=settings['channels'], image_size=settings['img_size'], annotation_column=settings['annotation_column'], normalize=settings['normalize'], percentiles=settings['percentiles'], measurement=settings['measurement'], threshold=settings['threshold'], normalize_channels=settings['normalize_channels'])
|
358
|
+
app = AnnotateApp(root, db, src, image_type=settings['image_type'], channels=settings['channels'], image_size=settings['img_size'], annotation_column=settings['annotation_column'], normalize=settings['normalize'], percentiles=settings['percentiles'], measurement=settings['measurement'], threshold=settings['threshold'], normalize_channels=settings['normalize_channels'], outline=settings['outline'], outline_threshold_factor=settings['outline_threshold_factor'], outline_sigma=settings['outline_sigma'])
|
353
359
|
|
354
360
|
# Set the canvas background to black
|
355
361
|
root.configure(bg='black')
|
@@ -454,8 +460,6 @@ def function_gui_wrapper(function=None, settings={}, q=None, fig_queue=None, imp
|
|
454
460
|
# Restore the original plt.show function
|
455
461
|
plt.show = original_show
|
456
462
|
|
457
|
-
|
458
|
-
|
459
463
|
def run_function_gui(settings_type, settings, q, fig_queue, stop_requested):
|
460
464
|
|
461
465
|
from .core import generate_image_umap, preprocess_generate_masks
|
spacr/io.py
CHANGED
@@ -1587,7 +1587,7 @@ def preprocess_img_data(settings):
|
|
1587
1587
|
print(f"Found {extension_counts[most_common_extension]} {most_common_extension} files")
|
1588
1588
|
|
1589
1589
|
else:
|
1590
|
-
print(f"Could not find any {valid_ext} files in {src}
|
1590
|
+
print(f"Could not find any {valid_ext} files in {src}")
|
1591
1591
|
print(f"{files} in {src}")
|
1592
1592
|
print(f"Please check the folder and try again")
|
1593
1593
|
|
@@ -1699,6 +1699,50 @@ def _check_masks(batch, batch_filenames, output_folder):
|
|
1699
1699
|
return np.array(filtered_batch), filtered_filenames
|
1700
1700
|
|
1701
1701
|
def _get_avg_object_size(masks):
|
1702
|
+
"""
|
1703
|
+
Calculate:
|
1704
|
+
- average number of objects per image
|
1705
|
+
- average object size over all objects
|
1706
|
+
|
1707
|
+
Parameters:
|
1708
|
+
masks (list): A list of 2D or 3D masks with labeled objects.
|
1709
|
+
|
1710
|
+
Returns:
|
1711
|
+
tuple:
|
1712
|
+
avg_num_objects_per_image (float)
|
1713
|
+
avg_object_size (float)
|
1714
|
+
"""
|
1715
|
+
per_image_counts = []
|
1716
|
+
all_areas = []
|
1717
|
+
|
1718
|
+
for idx, mask in enumerate(masks):
|
1719
|
+
if mask.ndim in [2, 3] and np.any(mask):
|
1720
|
+
props = measure.regionprops(mask)
|
1721
|
+
areas = [prop.area for prop in props]
|
1722
|
+
per_image_counts.append(len(areas))
|
1723
|
+
all_areas.extend(areas)
|
1724
|
+
else:
|
1725
|
+
per_image_counts.append(0)
|
1726
|
+
if not np.any(mask):
|
1727
|
+
print(f"Warning: Mask {idx} is empty.")
|
1728
|
+
elif mask.ndim not in [2, 3]:
|
1729
|
+
print(f"Warning: Mask {idx} has invalid dimension: {mask.ndim}")
|
1730
|
+
|
1731
|
+
# Average number of objects per image
|
1732
|
+
if per_image_counts:
|
1733
|
+
avg_num_objects_per_image = sum(per_image_counts) / len(per_image_counts)
|
1734
|
+
else:
|
1735
|
+
avg_num_objects_per_image = 0
|
1736
|
+
|
1737
|
+
# Average object size over all objects
|
1738
|
+
if all_areas:
|
1739
|
+
avg_object_size = sum(all_areas) / len(all_areas)
|
1740
|
+
else:
|
1741
|
+
avg_object_size = 0
|
1742
|
+
|
1743
|
+
return avg_num_objects_per_image, avg_object_size
|
1744
|
+
|
1745
|
+
def _get_avg_object_size_v1(masks):
|
1702
1746
|
"""
|
1703
1747
|
Calculate the average size of objects in a list of masks.
|
1704
1748
|
|
spacr/measure.py
CHANGED
@@ -331,7 +331,7 @@ def _extended_regionprops_table(labels, image, intensity_props):
|
|
331
331
|
df['frac_low10'] = frac_low10
|
332
332
|
df['entropy_intensity'] = entropy_intensity
|
333
333
|
|
334
|
-
percentiles = [5, 10, 25,
|
334
|
+
percentiles = [5, 10, 25, 75, 85, 95]
|
335
335
|
for p in percentiles:
|
336
336
|
df[f'percentile_{p}'] = [
|
337
337
|
np.percentile(region.intensity_image[region.image], p)
|
@@ -339,78 +339,6 @@ def _extended_regionprops_table(labels, image, intensity_props):
|
|
339
339
|
]
|
340
340
|
return df
|
341
341
|
|
342
|
-
def _extended_regionprops_table_v2(labels, image, intensity_props):
|
343
|
-
"""
|
344
|
-
Calculate extended region properties table, adding integrated intensity,
|
345
|
-
skewness, kurtosis, std, and median intensity per region.
|
346
|
-
"""
|
347
|
-
# regionprops_table gives you vectorized props, but not everything you want
|
348
|
-
props = regionprops_table(labels, image, properties=intensity_props)
|
349
|
-
df = pd.DataFrame(props)
|
350
|
-
|
351
|
-
# Compute extra features region-by-region
|
352
|
-
regions = regionprops(labels, intensity_image=image)
|
353
|
-
integrated_intensity = []
|
354
|
-
std_intensity = []
|
355
|
-
median_intensity = []
|
356
|
-
skew_intensity = []
|
357
|
-
kurtosis_intensity = []
|
358
|
-
for region in regions:
|
359
|
-
intens = region.intensity_image[region.image]
|
360
|
-
# Handle empty region edge-case (shouldn't happen)
|
361
|
-
if intens.size == 0:
|
362
|
-
integrated_intensity.append(np.nan)
|
363
|
-
std_intensity.append(np.nan)
|
364
|
-
median_intensity.append(np.nan)
|
365
|
-
skew_intensity.append(np.nan)
|
366
|
-
kurtosis_intensity.append(np.nan)
|
367
|
-
else:
|
368
|
-
integrated_intensity.append(np.sum(intens))
|
369
|
-
std_intensity.append(np.std(intens))
|
370
|
-
median_intensity.append(np.median(intens))
|
371
|
-
# Only valid for >2 pixels
|
372
|
-
skew_intensity.append(skew(intens) if intens.size > 2 else np.nan)
|
373
|
-
kurtosis_intensity.append(kurtosis(intens) if intens.size > 3 else np.nan)
|
374
|
-
|
375
|
-
df['integrated_intensity'] = integrated_intensity
|
376
|
-
df['std_intensity'] = std_intensity
|
377
|
-
df['median_intensity'] = median_intensity
|
378
|
-
df['skew_intensity'] = skew_intensity
|
379
|
-
df['kurtosis_intensity'] = kurtosis_intensity
|
380
|
-
|
381
|
-
# You can add other features here if desired
|
382
|
-
|
383
|
-
# Percentiles (your existing code—optional if you want to keep)
|
384
|
-
percentiles = [5, 10, 25, 50, 75, 85, 95]
|
385
|
-
for p in percentiles:
|
386
|
-
df[f'percentile_{p}'] = [
|
387
|
-
np.percentile(region.intensity_image[region.image], p)
|
388
|
-
for region in regions
|
389
|
-
]
|
390
|
-
return df
|
391
|
-
|
392
|
-
def _extended_regionprops_table_v1(labels, image, intensity_props):
|
393
|
-
"""
|
394
|
-
Calculate extended region properties table.
|
395
|
-
|
396
|
-
Args:
|
397
|
-
labels (ndarray): Labeled image.
|
398
|
-
image (ndarray): Input image.
|
399
|
-
intensity_props (list): List of intensity properties to calculate.
|
400
|
-
|
401
|
-
Returns:
|
402
|
-
DataFrame: Extended region properties table.
|
403
|
-
|
404
|
-
"""
|
405
|
-
regions = regionprops(labels, image)
|
406
|
-
props = regionprops_table(labels, image, properties=intensity_props)
|
407
|
-
percentiles = [5, 10, 25, 50, 75, 85, 95]
|
408
|
-
for p in percentiles:
|
409
|
-
props[f'percentile_{p}'] = [
|
410
|
-
np.percentile(region.intensity_image.flatten()[~np.isnan(region.intensity_image.flatten())], p)
|
411
|
-
for region in regions]
|
412
|
-
return pd.DataFrame(props)
|
413
|
-
|
414
342
|
def _calculate_homogeneity(label, channel, distances=[2,4,8,16,32,64]):
|
415
343
|
"""
|
416
344
|
Calculate the homogeneity values for each region in the label mask.
|
@@ -767,8 +695,11 @@ def _intensity_measurements(cell_mask, nucleus_mask, pathogen_mask, cytoplasm_ma
|
|
767
695
|
df.append(mask_intensity_df)
|
768
696
|
|
769
697
|
if isinstance(settings['distance_gaussian_sigma'], int):
|
770
|
-
|
771
|
-
|
698
|
+
if settings['distance_gaussian_sigma'] != 0:
|
699
|
+
if settings['cell_mask_dim'] != None:
|
700
|
+
if settings['nucleus_mask_dim'] != None or settings['pathogen_mask_dim'] != None:
|
701
|
+
intensity_distance_df = _measure_intensity_distance(cell_mask, nucleus_mask, pathogen_mask, channel_arrays, settings)
|
702
|
+
cell_dfs.append(intensity_distance_df)
|
772
703
|
|
773
704
|
if radial_dist:
|
774
705
|
if np.max(nucleus_mask) != 0:
|
spacr/plot.py
CHANGED
@@ -34,7 +34,6 @@ from collections import defaultdict
|
|
34
34
|
from matplotlib.gridspec import GridSpec
|
35
35
|
from matplotlib_venn import venn2
|
36
36
|
|
37
|
-
#filter_dict={'cell':[(0,100000), (0, 65000)],'nucleus':[(3000,100000), (1500, 65000)],'pathogen':[(500,100000), (0, 65000)]}
|
38
37
|
def plot_image_mask_overlay(
|
39
38
|
file,
|
40
39
|
channels,
|
@@ -367,6 +366,54 @@ def plot_image_mask_overlay(
|
|
367
366
|
|
368
367
|
return fig
|
369
368
|
|
369
|
+
def plot_cellpose4_output(batch, masks, flows, cmap='inferno', figuresize=10, nr=1, print_object_number=True):
|
370
|
+
"""
|
371
|
+
Plot the masks and flows for a given batch of images.
|
372
|
+
|
373
|
+
Args:
|
374
|
+
batch (numpy.ndarray): The batch of images.
|
375
|
+
masks (list or numpy.ndarray): The masks corresponding to the images.
|
376
|
+
flows (list or numpy.ndarray): The flows corresponding to the images.
|
377
|
+
cmap (str, optional): The colormap to use for displaying the images. Defaults to 'inferno'.
|
378
|
+
figuresize (int, optional): The size of the figure. Defaults to 20.
|
379
|
+
nr (int, optional): The maximum number of images to plot. Defaults to 1.
|
380
|
+
file_type (str, optional): The file type of the flows. Defaults to '.npz'.
|
381
|
+
print_object_number (bool, optional): Whether to print the object number on the mask. Defaults to True.
|
382
|
+
|
383
|
+
Returns:
|
384
|
+
None
|
385
|
+
"""
|
386
|
+
|
387
|
+
from .utils import _generate_mask_random_cmap, mask_object_count
|
388
|
+
|
389
|
+
font = figuresize/2
|
390
|
+
index = 0
|
391
|
+
|
392
|
+
for image, mask, flow in zip(batch, masks, flows):
|
393
|
+
#if print_object_number:
|
394
|
+
# num_objects = mask_object_count(mask)
|
395
|
+
# print(f'Number of objects: {num_objects}')
|
396
|
+
random_cmap = _generate_mask_random_cmap(mask)
|
397
|
+
|
398
|
+
if index < nr:
|
399
|
+
index += 1
|
400
|
+
chans = image.shape[-1]
|
401
|
+
fig, ax = plt.subplots(1, image.shape[-1] + 2, figsize=(4 * figuresize, figuresize))
|
402
|
+
for v in range(0, image.shape[-1]):
|
403
|
+
ax[v].imshow(image[..., v], cmap=cmap, interpolation='nearest')
|
404
|
+
ax[v].set_title('Image - Channel'+str(v))
|
405
|
+
ax[chans].imshow(mask, cmap=random_cmap, interpolation='nearest')
|
406
|
+
ax[chans].set_title('Mask')
|
407
|
+
if print_object_number:
|
408
|
+
unique_objects = np.unique(mask)[1:]
|
409
|
+
for obj in unique_objects:
|
410
|
+
cy, cx = ndi.center_of_mass(mask == obj)
|
411
|
+
ax[chans].text(cx, cy, str(obj), color='white', fontsize=font, ha='center', va='center')
|
412
|
+
ax[chans+1].imshow(flow, cmap='viridis', interpolation='nearest')
|
413
|
+
ax[chans+1].set_title('Flow')
|
414
|
+
plt.show()
|
415
|
+
return
|
416
|
+
|
370
417
|
def plot_masks(batch, masks, flows, cmap='inferno', figuresize=10, nr=1, file_type='.npz', print_object_number=True):
|
371
418
|
"""
|
372
419
|
Plot the masks and flows for a given batch of images.
|
@@ -1154,7 +1201,7 @@ def _plot_cropped_arrays(stack, filename, figuresize=10, cmap='inferno', thresho
|
|
1154
1201
|
for channel in range(num_channels):
|
1155
1202
|
plot_single_array(stack[:, :, channel], axs[channel], f'C. {channel}', plt.get_cmap(cmap))
|
1156
1203
|
fig.tight_layout()
|
1157
|
-
print(f'{filename}')
|
1204
|
+
#print(f'{filename}')
|
1158
1205
|
return fig
|
1159
1206
|
|
1160
1207
|
def _visualize_and_save_timelapse_stack_with_tracks(masks, tracks_df, save, src, name, plot, filenames, object_type, mode='btrack', interactive=False):
|
spacr/settings.py
CHANGED
@@ -64,9 +64,9 @@ def set_default_settings_preprocess_generate_masks(settings={}):
|
|
64
64
|
settings.setdefault('nucleus_background', 100)
|
65
65
|
settings.setdefault('nucleus_Signal_to_noise', 10)
|
66
66
|
settings.setdefault('nucleus_CP_prob', 0)
|
67
|
-
settings.setdefault('nucleus_FT',
|
68
|
-
settings.setdefault('cell_FT',
|
69
|
-
settings.setdefault('pathogen_FT',
|
67
|
+
settings.setdefault('nucleus_FT', 1.0)
|
68
|
+
settings.setdefault('cell_FT', 1.0)
|
69
|
+
settings.setdefault('pathogen_FT', 1.0)
|
70
70
|
|
71
71
|
# Plot settings
|
72
72
|
settings.setdefault('plot', False)
|
@@ -97,6 +97,10 @@ def set_default_settings_preprocess_generate_masks(settings={}):
|
|
97
97
|
settings.setdefault('upscale', False)
|
98
98
|
settings.setdefault('upscale_factor', 2.0)
|
99
99
|
settings.setdefault('adjust_cells', False)
|
100
|
+
settings.setdefault('use_sam_cell', False)
|
101
|
+
settings.setdefault('use_sam_nucleus', False)
|
102
|
+
settings.setdefault('use_sam_pathogen', False)
|
103
|
+
|
100
104
|
return settings
|
101
105
|
|
102
106
|
def set_default_plot_data_from_db(settings):
|
@@ -173,6 +177,8 @@ def _get_object_settings(object_type, settings):
|
|
173
177
|
object_settings['maximum_size'] = (object_settings['diameter']**2)*10
|
174
178
|
else:
|
175
179
|
print(f'Cell diameter must be an integer or float, got {settings["cell_diamiter"]}')
|
180
|
+
if settings['use_sam_cell']:
|
181
|
+
object_settings['model_name'] = 'sam'
|
176
182
|
|
177
183
|
elif object_type == 'nucleus':
|
178
184
|
object_settings['model_name'] = 'nuclei'
|
@@ -187,6 +193,8 @@ def _get_object_settings(object_type, settings):
|
|
187
193
|
object_settings['maximum_size'] = (object_settings['diameter']**2)*10
|
188
194
|
else:
|
189
195
|
print(f'Nucleus diameter must be an integer or float, got {settings["nucleus_diamiter"]}')
|
196
|
+
if settings['use_sam_nucleus']:
|
197
|
+
object_settings['model_name'] = 'sam'
|
190
198
|
|
191
199
|
elif object_type == 'pathogen':
|
192
200
|
object_settings['model_name'] = 'cyto'
|
@@ -203,10 +211,13 @@ def _get_object_settings(object_type, settings):
|
|
203
211
|
object_settings['maximum_size'] = (object_settings['diameter']**2)*10
|
204
212
|
else:
|
205
213
|
print(f'Pathogen diameter must be an integer or float, got {settings["pathogen_diamiter"]}')
|
214
|
+
|
215
|
+
if settings['use_sam_pathogen']:
|
216
|
+
object_settings['model_name'] = 'sam'
|
206
217
|
|
207
218
|
else:
|
208
219
|
print(f'Object type: {object_type} not supported. Supported object types are : cell, nucleus and pathogen')
|
209
|
-
|
220
|
+
|
210
221
|
if settings['verbose']:
|
211
222
|
print(object_settings)
|
212
223
|
|
@@ -314,7 +325,7 @@ def get_measure_crop_settings(settings={}):
|
|
314
325
|
settings.setdefault('cytoplasm_min_size',0)
|
315
326
|
settings.setdefault('merge_edge_pathogen_cells', True)
|
316
327
|
|
317
|
-
settings.setdefault('distance_gaussian_sigma',
|
328
|
+
settings.setdefault('distance_gaussian_sigma', 10)
|
318
329
|
|
319
330
|
if settings['test_mode']:
|
320
331
|
settings['verbose'] = True
|
@@ -712,7 +723,7 @@ expected_types = {
|
|
712
723
|
"nucleus_background": int,
|
713
724
|
"nucleus_Signal_to_noise": float,
|
714
725
|
"nucleus_CP_prob": float,
|
715
|
-
"nucleus_FT": float,
|
726
|
+
"nucleus_FT": (int, float),
|
716
727
|
"cell_channel": (int, type(None)),
|
717
728
|
"cell_background": (int, float),
|
718
729
|
"cell_Signal_to_noise": (int, float),
|
@@ -1003,11 +1014,14 @@ expected_types = {
|
|
1003
1014
|
"nucleus_diamiter":int,
|
1004
1015
|
"pathogen_diamiter":int,
|
1005
1016
|
"consolidate":bool,
|
1006
|
-
|
1017
|
+
'use_sam_cell':bool,
|
1018
|
+
'use_sam_nucleus':bool,
|
1019
|
+
'use_sam_pathogen':bool,
|
1020
|
+
"distance_gaussian_sigma": (int, type(None))
|
1007
1021
|
}
|
1008
1022
|
|
1009
1023
|
categories = {"Paths":[ "src", "grna", "barcodes", "custom_model_path", "dataset","model_path","grna_csv","row_csv","column_csv", "metadata_files", "score_data","count_data"],
|
1010
|
-
"General": ["cell_mask_dim", "cytoplasm", "cell_chann_dim", "cell_channel", "nucleus_chann_dim", "nucleus_channel", "nucleus_mask_dim", "pathogen_mask_dim", "pathogen_chann_dim", "pathogen_channel",
|
1024
|
+
"General": ["cell_mask_dim", "cytoplasm", "cell_chann_dim", "cell_channel", "nucleus_chann_dim", "nucleus_channel", "nucleus_mask_dim", "pathogen_mask_dim", "pathogen_chann_dim", "pathogen_channel", "test_mode", "plot", "metadata_type", "custom_regex", "experiment", "channels", "magnification", "channel_dims", "apply_model_to_dataset", "generate_training_dataset", "train_DL_model", "delete_intermediate", "uninfected", ],
|
1011
1025
|
"Cellpose":["denoise","fill_in","from_scratch", "n_epochs", "width_height", "model_name", "custom_model", "resample", "rescale", "CP_prob", "flow_threshold", "percentiles", "invert", "diameter", "grayscale", "Signal_to_noise", "resize", "target_height", "target_width"],
|
1012
1026
|
"Cell": ["cell_diamiter","cell_intensity_range", "cell_size_range", "cell_background", "cell_Signal_to_noise", "cell_CP_prob", "cell_FT", "remove_background_cell", "cell_min_size", "cytoplasm_min_size", "adjust_cells", "cells", "cell_loc"],
|
1013
1027
|
"Nucleus": ["nucleus_diamiter","nucleus_intensity_range", "nucleus_size_range", "nucleus_background", "nucleus_Signal_to_noise", "nucleus_CP_prob", "nucleus_FT", "remove_background_nucleus", "nucleus_min_size", "nucleus_loc"],
|
@@ -1025,7 +1039,7 @@ categories = {"Paths":[ "src", "grna", "barcodes", "custom_model_path", "dataset
|
|
1025
1039
|
"Plot": ["split_axis_lims", "x_lim","log_x","log_y", "plot_control", "plot_nr", "examples_to_plot", "normalize_plots", "cmap", "figuresize", "plot_cluster_grids", "img_zoom", "row_limit", "color_by", "plot_images", "smooth_lines", "plot_points", "plot_outlines", "black_background", "plot_by_cluster", "heatmap_feature","grouping","min_max","cmap","save_figure"],
|
1026
1040
|
"Timelapse": ["timelapse", "fps", "timelapse_displacement", "timelapse_memory", "timelapse_frame_limits", "timelapse_remove_transient", "timelapse_mode", "timelapse_objects", "compartments"],
|
1027
1041
|
"Advanced": ["merge_edge_pathogen_cells", "test_images", "random_test", "test_nr", "test", "test_split", "normalize", "target_unique_count","threshold_multiplier", "threshold_method", "min_n","shuffle", "target_intensity_min", "cells_per_well", "nuclei_limit", "pathogen_limit", "background", "backgrounds", "schedule", "test_size","exclude","n_repeats","top_features", "model_type_ml", "model_type","minimum_cell_count","n_estimators","preprocess", "remove_background", "normalize", "lower_percentile", "merge_pathogens", "batch_size", "filter", "save", "masks", "verbose", "randomize", "n_jobs"],
|
1028
|
-
"Beta": ["all_to_mip", "upscale", "upscale_factor", "consolidate", "distance_gaussian_sigma"]
|
1042
|
+
"Beta": ["all_to_mip", "upscale", "upscale_factor", "consolidate", "distance_gaussian_sigma","use_sam_pathogen","use_sam_nucleus", "use_sam_cell"]
|
1029
1043
|
}
|
1030
1044
|
|
1031
1045
|
|
@@ -1053,7 +1067,7 @@ def check_settings(vars_dict, expected_types, q=None):
|
|
1053
1067
|
expected_type = expected_types.get(key, str)
|
1054
1068
|
|
1055
1069
|
try:
|
1056
|
-
if key in ["cell_plate_metadata", "timelapse_frame_limits", "png_size", "png_dims", "pathogen_plate_metadata", "treatment_plate_metadata", "timelapse_objects", "class_metadata", "crop_mode"]:
|
1070
|
+
if key in ["cell_plate_metadata", "timelapse_frame_limits", "png_size", "png_dims", "pathogen_plate_metadata", "treatment_plate_metadata", "timelapse_objects", "class_metadata", "crop_mode", "dialate_png_ratios"]:
|
1057
1071
|
if value is None:
|
1058
1072
|
parsed_value = None
|
1059
1073
|
else:
|
@@ -1415,6 +1429,9 @@ def generate_fields(variables, scrollable_frame):
|
|
1415
1429
|
"overlay": "(bool) - Overlay activation maps on the images.",
|
1416
1430
|
"shuffle": "(bool) - Shuffle the dataset bufore generating the activation maps",
|
1417
1431
|
"correlation": "(bool) - Calculate correlation between image channels and activation maps. Data is saved to .db.",
|
1432
|
+
"use_sam_cell": "(bool) - Whether to use SAM for cell segmentation.",
|
1433
|
+
"use_sam_nucleus": "(bool) - Whether to use SAM for nucleus segmentation.",
|
1434
|
+
"use_sam_pathogen": "(bool) - Whether to use SAM for pathogen segmentation.",
|
1418
1435
|
"normalize_input": "(bool) - Normalize the input images before passing them to the model.",
|
1419
1436
|
"normalize_plots": "(bool) - Normalize images before plotting.",
|
1420
1437
|
}
|
@@ -1467,6 +1484,9 @@ def set_annotate_default_settings(settings):
|
|
1467
1484
|
settings.setdefault('annotation_column', 'test')
|
1468
1485
|
settings.setdefault('normalize', 'False')
|
1469
1486
|
settings.setdefault('normalize_channels', "r,g,b")
|
1487
|
+
settings.setdefault('outline', None)
|
1488
|
+
settings.setdefault('outline_threshold_factor', 1)
|
1489
|
+
settings.setdefault('outline_sigma', 1)
|
1470
1490
|
settings.setdefault('percentiles', [2, 98])
|
1471
1491
|
settings.setdefault('measurement', '') #'cytoplasm_channel_3_mean_intensity,pathogen_channel_3_mean_intensity')
|
1472
1492
|
settings.setdefault('threshold', '') #'2')
|
spacr/sp_stats.py
CHANGED
@@ -7,7 +7,6 @@ from scipy.stats import chi2_contingency, fisher_exact
|
|
7
7
|
import itertools
|
8
8
|
from statsmodels.stats.multitest import multipletests
|
9
9
|
|
10
|
-
|
11
10
|
def choose_p_adjust_method(num_groups, num_data_points):
|
12
11
|
"""
|
13
12
|
Selects the most appropriate p-value adjustment method based on data characteristics.
|
spacr/spacr_cellpose.py
CHANGED
@@ -7,6 +7,65 @@ from multiprocessing import Pool
|
|
7
7
|
from skimage.transform import resize as resizescikit
|
8
8
|
from scipy.ndimage import binary_fill_holes
|
9
9
|
|
10
|
+
def parse_cellpose4_output(output):
|
11
|
+
"""
|
12
|
+
General parser for Cellpose eval output.
|
13
|
+
Handles:
|
14
|
+
- batched format (list of 4 arrays)
|
15
|
+
- per-image list of flows
|
16
|
+
Returns:
|
17
|
+
masks, flows0, flows1, flows2, flows3
|
18
|
+
"""
|
19
|
+
|
20
|
+
masks = output[0]
|
21
|
+
flows = output[1]
|
22
|
+
|
23
|
+
if not isinstance(flows, (list, tuple)):
|
24
|
+
raise ValueError(f"Unrecognized Cellpose flows type: {type(flows)}")
|
25
|
+
|
26
|
+
# Determine number of images
|
27
|
+
try:
|
28
|
+
num_images = len(masks)
|
29
|
+
except TypeError:
|
30
|
+
raise ValueError(f"Cannot determine number of images in masks (type={type(masks)})")
|
31
|
+
|
32
|
+
# Case A: batched format (4 arrays stacked over batch)
|
33
|
+
if len(flows) == 4 and all(isinstance(f, np.ndarray) for f in flows):
|
34
|
+
flow0_array, flow1_array, flow2_array, flow3_array = flows
|
35
|
+
|
36
|
+
flows0 = [flow0_array[i] for i in range(num_images)]
|
37
|
+
flows1 = [flow1_array[:, i] for i in range(num_images)]
|
38
|
+
flows2 = [flow2_array[i] for i in range(num_images)]
|
39
|
+
flows3 = [flow3_array[i] for i in range(num_images)]
|
40
|
+
|
41
|
+
return masks, flows0, flows1, flows2, flows3
|
42
|
+
|
43
|
+
# Case B: per-image format
|
44
|
+
elif len(flows) == num_images:
|
45
|
+
flows0, flows1, flows2, flows3 = [], [], [], []
|
46
|
+
|
47
|
+
for item in flows:
|
48
|
+
if isinstance(item, (list, tuple)):
|
49
|
+
n = len(item)
|
50
|
+
f0 = item[0] if n > 0 else None
|
51
|
+
f1 = item[1] if n > 1 else None
|
52
|
+
f2 = item[2] if n > 2 else None
|
53
|
+
f3 = item[3] if n > 3 else None
|
54
|
+
elif isinstance(item, np.ndarray):
|
55
|
+
f0, f1, f2, f3 = item, None, None, None
|
56
|
+
else:
|
57
|
+
f0 = f1 = f2 = f3 = None
|
58
|
+
|
59
|
+
flows0.append(f0)
|
60
|
+
flows1.append(f1)
|
61
|
+
flows2.append(f2)
|
62
|
+
flows3.append(f3)
|
63
|
+
|
64
|
+
return masks, flows0, flows1, flows2, flows3
|
65
|
+
|
66
|
+
# Unrecognized structure
|
67
|
+
raise ValueError(f"Unrecognized Cellpose flows format: type={type(flows)}, len={len(flows) if hasattr(flows,'__len__') else 'unknown'}")
|
68
|
+
|
10
69
|
def identify_masks_finetune(settings):
|
11
70
|
|
12
71
|
from .plot import print_mask_and_flows
|
spacr/utils.py
CHANGED
@@ -42,6 +42,7 @@ from torchvision.utils import make_grid
|
|
42
42
|
import seaborn as sns
|
43
43
|
import matplotlib.pyplot as plt
|
44
44
|
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
|
45
|
+
import matplotlib as mpl
|
45
46
|
|
46
47
|
from scipy import stats
|
47
48
|
import scipy.ndimage as ndi
|
@@ -69,6 +70,24 @@ from spacr import __file__ as spacr_path
|
|
69
70
|
import umap.umap_ as umap
|
70
71
|
#import umap
|
71
72
|
|
73
|
+
def _generate_mask_random_cmap(mask):
|
74
|
+
"""
|
75
|
+
Generate a random colormap based on the unique labels in the given mask.
|
76
|
+
|
77
|
+
Parameters:
|
78
|
+
mask (ndarray): The mask array containing unique labels.
|
79
|
+
|
80
|
+
Returns:
|
81
|
+
ListedColormap: A random colormap generated based on the unique labels in the mask.
|
82
|
+
"""
|
83
|
+
unique_labels = np.unique(mask)
|
84
|
+
num_objects = len(unique_labels[unique_labels != 0])
|
85
|
+
random_colors = np.random.rand(num_objects+1, 4)
|
86
|
+
random_colors[:, 3] = 1
|
87
|
+
random_colors[0, :] = [0, 0, 0, 1]
|
88
|
+
random_cmap = mpl.colors.ListedColormap(random_colors)
|
89
|
+
return random_cmap
|
90
|
+
|
72
91
|
def filepaths_to_database(img_paths, settings, source_folder, crop_mode):
|
73
92
|
|
74
93
|
png_df = pd.DataFrame(img_paths, columns=['png_path'])
|
@@ -1265,6 +1284,34 @@ def _pivot_counts_table(db_path):
|
|
1265
1284
|
conn.close()
|
1266
1285
|
|
1267
1286
|
def _get_cellpose_channels(src, nucleus_channel, pathogen_channel, cell_channel):
|
1287
|
+
cell_mask_path = os.path.join(src, 'masks', 'cell_mask_stack')
|
1288
|
+
nucleus_mask_path = os.path.join(src, 'masks', 'nucleus_mask_stack')
|
1289
|
+
pathogen_mask_path = os.path.join(src, 'masks', 'pathogen_mask_stack')
|
1290
|
+
|
1291
|
+
if any(os.path.exists(p) for p in [cell_mask_path, nucleus_mask_path, pathogen_mask_path]):
|
1292
|
+
if any(c is None for c in [nucleus_channel, pathogen_channel, cell_channel]):
|
1293
|
+
print('Warning: Cellpose masks already exist. Unexpected behaviour if any channel is None while masks exist.')
|
1294
|
+
|
1295
|
+
cellpose_channels = {}
|
1296
|
+
|
1297
|
+
# Nucleus: always duplicated single channel
|
1298
|
+
if nucleus_channel is not None:
|
1299
|
+
cellpose_channels['nucleus'] = [nucleus_channel, nucleus_channel]
|
1300
|
+
|
1301
|
+
# Pathogen: always duplicated single channel
|
1302
|
+
if pathogen_channel is not None:
|
1303
|
+
cellpose_channels['pathogen'] = [pathogen_channel, pathogen_channel]
|
1304
|
+
|
1305
|
+
# Cell: prefer nucleus as second if available
|
1306
|
+
if cell_channel is not None:
|
1307
|
+
if nucleus_channel is not None:
|
1308
|
+
cellpose_channels['cell'] = [nucleus_channel, cell_channel]
|
1309
|
+
else:
|
1310
|
+
cellpose_channels['cell'] = [cell_channel, cell_channel]
|
1311
|
+
|
1312
|
+
return cellpose_channels
|
1313
|
+
|
1314
|
+
def _get_cellpose_channels_v1(src, nucleus_channel, pathogen_channel, cell_channel):
|
1268
1315
|
|
1269
1316
|
cell_mask_path = os.path.join(src, 'masks', 'cell_mask_stack')
|
1270
1317
|
nucleus_mask_path = os.path.join(src, 'masks', 'nucleus_mask_stack')
|
@@ -3150,6 +3197,58 @@ def _run_test_mode(src, regex, timelapse=False, test_images=10, random_test=True
|
|
3150
3197
|
return test_folder_path
|
3151
3198
|
|
3152
3199
|
def _choose_model(model_name, device, object_type='cell', restore_type=None, object_settings={}):
|
3200
|
+
if object_type == 'pathogen':
|
3201
|
+
if model_name == 'toxo_pv_lumen':
|
3202
|
+
diameter = object_settings['diameter']
|
3203
|
+
current_dir = os.path.dirname(__file__)
|
3204
|
+
model_path = os.path.join(current_dir, 'models', 'cp', 'toxo_pv_lumen.CP_model')
|
3205
|
+
print(model_path)
|
3206
|
+
model = cp_models.CellposeModel(
|
3207
|
+
gpu=torch.cuda.is_available(),
|
3208
|
+
model_type=None,
|
3209
|
+
pretrained_model=model_path,
|
3210
|
+
diam_mean=diameter,
|
3211
|
+
device=device
|
3212
|
+
)
|
3213
|
+
print('Using Toxoplasma PV lumen model to generate pathogen masks')
|
3214
|
+
return model
|
3215
|
+
|
3216
|
+
restore_list = ['denoise', 'deblur', 'upsample', None]
|
3217
|
+
if restore_type not in restore_list:
|
3218
|
+
print(f"Invalid restore type. Choose from {restore_list}, defaulting to None")
|
3219
|
+
restore_type = None
|
3220
|
+
|
3221
|
+
if restore_type is None:
|
3222
|
+
if model_name == 'sam':
|
3223
|
+
model = cp_models.CellposeModel(gpu=torch.cuda.is_available(), device=device, pretrained_model='cpsam',)
|
3224
|
+
return model
|
3225
|
+
if model_name in ['cyto', 'cyto2', 'cyto3', 'nuclei']:
|
3226
|
+
model = cp_models.CellposeModel(gpu=torch.cuda.is_available(), model_type=model_name, device=device)
|
3227
|
+
return model
|
3228
|
+
else:
|
3229
|
+
if object_type == 'nucleus':
|
3230
|
+
restore = f'{restore_type}_nuclei'
|
3231
|
+
model = denoise.CellposeDenoiseModel(
|
3232
|
+
gpu=torch.cuda.is_available(),
|
3233
|
+
model_type="nuclei",
|
3234
|
+
restore_type=restore,
|
3235
|
+
chan2_restore=False,
|
3236
|
+
device=device
|
3237
|
+
)
|
3238
|
+
return model
|
3239
|
+
else:
|
3240
|
+
restore = f'{restore_type}_cyto3'
|
3241
|
+
chan2_restore = (model_name == 'cyto2')
|
3242
|
+
model = denoise.CellposeDenoiseModel(
|
3243
|
+
gpu=torch.cuda.is_available(),
|
3244
|
+
model_type="cyto3",
|
3245
|
+
restore_type=restore,
|
3246
|
+
chan2_restore=chan2_restore,
|
3247
|
+
device=device
|
3248
|
+
)
|
3249
|
+
return model
|
3250
|
+
|
3251
|
+
def _choose_model_v1(model_name, device, object_type='cell', restore_type=None, object_settings={}):
|
3153
3252
|
|
3154
3253
|
if object_type == 'pathogen':
|
3155
3254
|
if model_name == 'toxo_pv_lumen':
|
@@ -3168,16 +3267,16 @@ def _choose_model(model_name, device, object_type='cell', restore_type=None, obj
|
|
3168
3267
|
|
3169
3268
|
if restore_type == None:
|
3170
3269
|
if model_name in ['cyto', 'cyto2', 'cyto3', 'nuclei']:
|
3171
|
-
model = cp_models.
|
3270
|
+
model = cp_models.CellposeModel(gpu=torch.cuda.is_available(), model_type=model_name, device=device)
|
3172
3271
|
return model
|
3173
3272
|
else:
|
3174
3273
|
if object_type == 'nucleus':
|
3175
|
-
restore = f'{
|
3274
|
+
restore = f'{restore_type}_nuclei'
|
3176
3275
|
model = denoise.CellposeDenoiseModel(gpu=torch.cuda.is_available(), model_type="nuclei",restore_type=restore, chan2_restore=False, device=device)
|
3177
3276
|
return model
|
3178
3277
|
|
3179
3278
|
else:
|
3180
|
-
restore = f'{
|
3279
|
+
restore = f'{restore_type}_cyto3'
|
3181
3280
|
if model_name =='cyto2':
|
3182
3281
|
chan2_restore = True
|
3183
3282
|
if model_name =='cyto':
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: spacr
|
3
|
-
Version: 0.
|
3
|
+
Version: 1.0.1
|
4
4
|
Summary: Spatial phenotype analysis of crisp screens (SpaCr)
|
5
5
|
Home-page: https://github.com/EinarOlafsson/spacr
|
6
6
|
Author: Einar Birnir Olafsson
|
@@ -13,7 +13,8 @@ License-File: LICENSE
|
|
13
13
|
Requires-Dist: numpy <2.0,>=1.26.4
|
14
14
|
Requires-Dist: pandas <3.0,>=2.2.1
|
15
15
|
Requires-Dist: scipy <2.0,>=1.12.0
|
16
|
-
Requires-Dist: cellpose <
|
16
|
+
Requires-Dist: cellpose <5.0,>=4.0
|
17
|
+
Requires-Dist: segment-anything
|
17
18
|
Requires-Dist: scikit-image <1.0,>=0.22.0
|
18
19
|
Requires-Dist: scikit-learn <2.0,>=1.4.1
|
19
20
|
Requires-Dist: scikit-posthocs <0.20,>=0.10.0
|
@@ -90,12 +91,15 @@ Requires-Dist: opencv-python-headless ; extra == 'headless'
|
|
90
91
|
:target: https://github.com/EinarOlafsson/spacr/blob/main/LICENSE
|
91
92
|
.. |repo size| image:: https://img.shields.io/github/repo-size/EinarOlafsson/spacr
|
92
93
|
:target: https://github.com/EinarOlafsson/spacr/
|
94
|
+
.. |Tutorial| image:: https://img.shields.io/badge/Tutorial-Click%20Here-brightgreen
|
95
|
+
:target: https://einarolafsson.github.io/spacr/tutorial/
|
96
|
+
|
93
97
|
|
94
98
|
.. _docs: https://einarolafsson.github.io/spacr/index.html
|
95
99
|
|
96
100
|
Badges
|
97
101
|
------
|
98
|
-
|Docs| |PyPI version| |Python version| |Licence: MIT| |repo size|
|
102
|
+
|Docs| |PyPI version| |Python version| |Licence: MIT| |repo size| |Tutorial|
|
99
103
|
|
100
104
|
SpaCr
|
101
105
|
=====
|
@@ -122,7 +126,6 @@ Features
|
|
122
126
|
:alt: SpaCr workflow
|
123
127
|
:align: center
|
124
128
|
|
125
|
-
|
126
129
|
**Overview and data organization of spaCR.**
|
127
130
|
|
128
131
|
**a.** Schematic workflow of the spaCR pipeline for pooled image-based CRISPR screens. Microscopy images (TIFF, LIF, CZI, NDI) and sequencing reads (FASTQ) are used as inputs (black). The main modules (teal) are: (1) Mask: generates object masks for cells, nuclei, pathogens, and cytoplasm; (2) Measure: extracts object-level features and crops object images, storing quantitative data in an SQL database; (3) Classify—applies machine learning (ML, e.g., XGBoost) or deep learning (DL, e.g., PyTorch) models to classify objects, summarizing results as well-level classification scores; (4) Map Barcodes: extracts and maps row, column, and gRNA barcodes from sequencing data to corresponding wells; (5) Regression: estimates gRNA effect sizes and gene scores via multiple linear regression using well-level summary statistics.
|
@@ -140,8 +143,7 @@ If using Windows, switch to Linux—it's free, open-source, and better.
|
|
140
143
|
|
141
144
|
::
|
142
145
|
|
143
|
-
brew install libomp
|
144
|
-
brew install hdf5
|
146
|
+
brew install libomp hdf5 cmake openssl
|
145
147
|
|
146
148
|
**Linux GUI requirement:**
|
147
149
|
SpaCr GUI requires Tkinter.
|
@@ -195,6 +197,13 @@ The following example Jupyter notebooks illustrate common workflows using spaCR.
|
|
195
197
|
- `Finetune cellpose models <https://github.com/EinarOlafsson/spacr/blob/main/Notebooks/5_spacr_train_cellpose.ipynb>`_
|
196
198
|
*Finetune Cellpose models using your own annotated training data for improved segmentation accuracy.*
|
197
199
|
|
200
|
+
Interactive Tutorial (under construction)
|
201
|
+
-----------------------------------------
|
202
|
+
|
203
|
+
Click below to explore the step-by-step GUI and Notebook tutorials for spaCR:
|
204
|
+
|
205
|
+
|Tutorial|
|
206
|
+
|
198
207
|
License
|
199
208
|
-------
|
200
209
|
spaCR is distributed under the terms of the MIT License.
|
@@ -205,3 +214,10 @@ How to Cite
|
|
205
214
|
If you use spaCR in your research, please cite:
|
206
215
|
Olafsson EB, et al. SpaCr: Spatial phenotype analysis of CRISPR-Cas9 screens. *Manuscript in preparation*.
|
207
216
|
|
217
|
+
Papers Using spaCR
|
218
|
+
-------------------
|
219
|
+
Below are selected publications that have used or cited spaCR:
|
220
|
+
|
221
|
+
- Olafsson EB, et al. *SpaCr: Spatial phenotype analysis of CRISPR-Cas9 screens.* Manuscript in preparation.
|
222
|
+
- `IRE1α promotes phagosomal calcium flux to enhance macrophage fungicidal activity <https://doi.org/10.1016/j.celrep.2025.115694>`_
|
223
|
+
- `Metabolic adaptability and nutrient scavenging in Toxoplasma gondii: insights from ingestion pathway-deficient mutants <https://doi.org/10.1128/msphere.01011-24>`_
|
@@ -1,6 +1,6 @@
|
|
1
1
|
spacr/__init__.py,sha256=EoGInYks0M4foZElYNhksrQK6aEO1au7cncWexWNhRw,1376
|
2
2
|
spacr/__main__.py,sha256=H4MjaMF9ohZL6xfl1kTxVn1Nt_vEhhZArENMMBv8f4E,77
|
3
|
-
spacr/app_annotate.py,sha256=
|
3
|
+
spacr/app_annotate.py,sha256=p0OyvgFycIug7RcLfejFmc4HWB7yQskCBxxy3Sdq_Y0,2905
|
4
4
|
spacr/app_classify.py,sha256=urTP_wlZ58hSyM5a19slYlBxN0PdC-9-ga0hvq8CGWc,165
|
5
5
|
spacr/app_make_masks.py,sha256=pqDhRpluiHZz-kPX2Zh_KbYe4TsU43qYBa_7f-rsjpw,1694
|
6
6
|
spacr/app_mask.py,sha256=l-dBY8ftzCMdDe6-pXc2Nh_u-idNL9G7UOARiLJBtds,153
|
@@ -8,28 +8,28 @@ spacr/app_measure.py,sha256=_K7APYIeOKpV6e_LcqabBjvEi7mfq9Fch8175x1x0k8,162
|
|
8
8
|
spacr/app_sequencing.py,sha256=DjG26jy4cpddnV8WOOAIiExtOe9MleVMY4MFa5uTo5w,157
|
9
9
|
spacr/app_umap.py,sha256=ZWAmf_OsIKbYvolYuWPMYhdlVe-n2CADoJulAizMiEo,153
|
10
10
|
spacr/chat_bot.py,sha256=n3Fhqg3qofVXHmh3H9sUcmfYy9MmgRnr48663MVdY9E,1244
|
11
|
-
spacr/core.py,sha256=
|
11
|
+
spacr/core.py,sha256=A1PCJHtlYjJBMdIY0QH1VcrmjsAZy-JYVHQcBXQ5za8,46640
|
12
12
|
spacr/deep_spacr.py,sha256=055tIo3WP3elGFiIuSZaLURgu2XyUDxAdbw5ezASEqM,54526
|
13
13
|
spacr/gui.py,sha256=NhMh96KoArrSAaJBV6PhDQpIC1cQpxgb6SclhRbYG8s,8122
|
14
|
-
spacr/gui_core.py,sha256=
|
15
|
-
spacr/gui_elements.py,sha256=
|
16
|
-
spacr/gui_utils.py,sha256=
|
17
|
-
spacr/io.py,sha256=
|
14
|
+
spacr/gui_core.py,sha256=kgOXpQiBp4QXp08KViWinDVB6pVM7Gfn26bKeees6tM,54561
|
15
|
+
spacr/gui_elements.py,sha256=OTU7aeLrPiMUTnyCT-J7ygng3beI9tdA0MmypOavEkw,156123
|
16
|
+
spacr/gui_utils.py,sha256=1mWns46yK-CQpbhtERSTPx7KXBYPUIQz2UnFFxNxOrU,40749
|
17
|
+
spacr/io.py,sha256=g6vybQeGLdTXrAqEjM6X1aoB6lyZVUq6pTI0ASppX4g,159257
|
18
18
|
spacr/logger.py,sha256=lJhTqt-_wfAunCPl93xE65Wr9Y1oIHJWaZMjunHUeIw,1538
|
19
|
-
spacr/measure.py,sha256=
|
19
|
+
spacr/measure.py,sha256=nYvrfVfCIqD1AUk4QBE2jtpeSFtLdfUcnkhkqf9G4xQ,60877
|
20
20
|
spacr/mediar.py,sha256=p0F515eFbm6_rePSnChsgqrgH-H5Sr_3zWrghtOnAUg,14863
|
21
21
|
spacr/ml.py,sha256=XCRZeX7UkbMctQICIoskeWVx8CCmmCoHNauUOAkfFq0,91692
|
22
22
|
spacr/openai.py,sha256=5vBZ3Jl2llYcW3oaTEXgdyCB2aJujMUIO5K038z7w_A,1246
|
23
|
-
spacr/plot.py,sha256=
|
23
|
+
spacr/plot.py,sha256=76E1CZpsmNeNtbnkXJtgcVOesq4voL7XkaUnD74RDMk,169418
|
24
24
|
spacr/sequencing.py,sha256=EY12RdW5QRKpHDRQCw1QoAlxCq8FK2v6WoVa5uuDBXQ,26745
|
25
|
-
spacr/settings.py,sha256=
|
25
|
+
spacr/settings.py,sha256=38MClzEv-eP4Kfo_UJ_jlIzj0Vos3TY5-scPlBpolYI,88520
|
26
26
|
spacr/sim.py,sha256=1xKhXimNU3ukzIw-3l9cF3Znc_brW8h20yv8fSTzvss,71173
|
27
|
-
spacr/sp_stats.py,sha256=
|
28
|
-
spacr/spacr_cellpose.py,sha256=
|
27
|
+
spacr/sp_stats.py,sha256=C93Xe5fphQOKthw4Tmj8pHx-Nb1houIL-YYVIfmnQPg,9535
|
28
|
+
spacr/spacr_cellpose.py,sha256=AvnyD2qoj-lUqhICeTpfhyk9T2hCjZrpBXn2iKh1EYE,16785
|
29
29
|
spacr/submodules.py,sha256=Z2i4kv_rWdxqoXsOKCF7BaSXtvaCZB69Ow8_FQBnZsY,83093
|
30
30
|
spacr/timelapse.py,sha256=-5ZupTsCCpbenIQ2zsUmnwXh45B82fO-gPrSXOxu2s8,42980
|
31
31
|
spacr/toxo.py,sha256=GoNfgyH-NJx3WOzNQPgzODir7Jp65fs7UM46XpzcrUo,26056
|
32
|
-
spacr/utils.py,sha256=
|
32
|
+
spacr/utils.py,sha256=PulmDqENBFYKgN2fjcVSB39B_9CwwGDiBi9FOI55zQs,238470
|
33
33
|
spacr/version.py,sha256=axH5tnGwtgSnJHb5IDhiu4Zjk5GhLyAEDRe-rnaoFOA,409
|
34
34
|
spacr/resources/data/lopit.csv,sha256=ERI5f9W8RdJGiSx_khoaylD374f8kmvLia1xjhD_mII,4421709
|
35
35
|
spacr/resources/data/toxoplasma_metadata.csv,sha256=9TXx0VlClDHAxQmaLhoklE8NuETduXaGHZjhR_6lZfs,2969409
|
@@ -103,9 +103,9 @@ spacr/resources/icons/umap.png,sha256=dOLF3DeLYy9k0nkUybiZMe1wzHQwLJFRmgccppw-8b
|
|
103
103
|
spacr/resources/images/plate1_E01_T0001F001L01A01Z01C02.tif,sha256=Tl0ZUfZ_AYAbu0up_nO0tPRtF1BxXhWQ3T3pURBCCRo,7958528
|
104
104
|
spacr/resources/images/plate1_E01_T0001F001L01A02Z01C01.tif,sha256=m8N-V71rA1TT4dFlENNg8s0Q0YEXXs8slIn7yObmZJQ,7958528
|
105
105
|
spacr/resources/images/plate1_E01_T0001F001L01A03Z01C03.tif,sha256=Pbhk7xn-KUP6RSIhJsxQcrHFImBm3GEpLkzx7WOc-5M,7958528
|
106
|
-
spacr-0.
|
107
|
-
spacr-0.
|
108
|
-
spacr-0.
|
109
|
-
spacr-0.
|
110
|
-
spacr-0.
|
111
|
-
spacr-0.
|
106
|
+
spacr-1.0.1.dist-info/LICENSE,sha256=t0Pov6pnK8thLteoF4xZGmdCwe5mhNwl3OXxLYTGD9U,1081
|
107
|
+
spacr-1.0.1.dist-info/METADATA,sha256=C_PPoW7NPT7RH3d3iJ2yfX3GR8cy-uQ8tx1RGmqq854,10963
|
108
|
+
spacr-1.0.1.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
|
109
|
+
spacr-1.0.1.dist-info/entry_points.txt,sha256=BMC0ql9aNNpv8lUZ8sgDLQMsqaVnX5L535gEhKUP5ho,296
|
110
|
+
spacr-1.0.1.dist-info/top_level.txt,sha256=GJPU8FgwRXGzKeut6JopsSRY2R8T3i9lDgya42tLInY,6
|
111
|
+
spacr-1.0.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|