spacr 1.0.0__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
spacr/__init__.py CHANGED
@@ -1,4 +1,13 @@
1
- import logging, os
1
+ import logging, os, builtins
2
+
3
+ def silent_print(*args, **kwargs):
4
+ if "Welcome to CellposeSAM" in str(args[0]):
5
+ return # skip the banner
6
+ return _original_print(*args, **kwargs)
7
+
8
+ _original_print = builtins.print
9
+ builtins.print = silent_print
10
+
2
11
 
3
12
  from . import core
4
13
  from . import io
@@ -22,7 +31,6 @@ from . import app_classify
22
31
  from . import app_sequencing
23
32
  from . import app_umap
24
33
  from . import submodules
25
- from . import openai
26
34
  from . import ml
27
35
  from . import toxo
28
36
  from . import spacr_cellpose
spacr/core.py CHANGED
@@ -145,7 +145,7 @@ def preprocess_generate_masks(settings):
145
145
  parasite_folder = os.path.join(mask_src, 'pathogen_mask_stack')
146
146
  #organelle_folder = os.path.join(mask_src, 'organelle_mask_stack')
147
147
  print(f'Adjusting cell masks with nuclei and pathogen masks')
148
- adjust_cell_masks(parasite_folder, cell_folder, nuclei_folder, overlap_threshold=5, perimeter_threshold=30)
148
+ adjust_cell_masks(parasite_folder, cell_folder, nuclei_folder, overlap_threshold=5, perimeter_threshold=30, n_jobs=settings['n_jobs'])
149
149
  stop = time.time()
150
150
  adjust_time = (stop-start)/60
151
151
  print(f'Cell mask adjustment: {adjust_time} min.')
@@ -243,6 +243,7 @@ def generate_cellpose_masks(src, settings, object_type):
243
243
 
244
244
  if object_type not in cellpose_channels:
245
245
  raise ValueError(f"Error: No channels were specified for object_type '{object_type}'. Check your settings.")
246
+
246
247
  channels = cellpose_channels[object_type]
247
248
 
248
249
  #cellpose_batch_size = _get_cellpose_batch_size()
@@ -417,8 +418,8 @@ def generate_cellpose_masks(src, settings, object_type):
417
418
  else:
418
419
  mask_stack = _masks_to_masks_stack(masks)
419
420
 
420
- if settings['plot']:
421
- plot_cellpose4_output(batch_list, masks, flows, cmap='inferno', figuresize=figuresize, nr=1, print_object_number=True)
421
+ #if settings['plot']:
422
+ # plot_cellpose4_output(batch_list, masks, flows, cmap='inferno', figuresize=figuresize, nr=1, print_object_number=True)
422
423
 
423
424
  if not np.any(mask_stack):
424
425
  avg_num_objects_per_image, average_obj_size = 0, 0
spacr/deep_spacr.py CHANGED
@@ -940,7 +940,7 @@ def deep_spacr(settings={}):
940
940
 
941
941
  def model_knowledge_transfer(teacher_paths, student_save_path, data_loader, device='cpu', student_model_name='maxvit_t', pretrained=True, dropout_rate=None, use_checkpoint=False, alpha=0.5, temperature=2.0, lr=1e-4, epochs=10):
942
942
 
943
- from spacr.utils import TorchModel # Adapt if needed
943
+ from .utils import TorchModel
944
944
 
945
945
  # Adjust filename to reflect knowledge-distillation if desired
946
946
  if student_save_path.endswith('.pth'):
@@ -1044,7 +1044,7 @@ def model_knowledge_transfer(teacher_paths, student_save_path, data_loader, devi
1044
1044
 
1045
1045
  def model_fusion(model_paths,save_path,device='cpu',model_name='maxvit_t',pretrained=True,dropout_rate=None,use_checkpoint=False,aggregator='mean'):
1046
1046
 
1047
- from spacr.utils import TorchModel
1047
+ from .utils import TorchModel
1048
1048
 
1049
1049
  if save_path.endswith('.pth'):
1050
1050
  save_path_part1, ext = os.path.splitext(save_path)
spacr/gui_core.py CHANGED
@@ -176,7 +176,6 @@ def display_figure(fig):
176
176
  show_next_figure()
177
177
 
178
178
  def zoom(event):
179
- # Define zoom factors
180
179
  zoom_in_factor = 1 / 1.2
181
180
  zoom_out_factor = 1.2
182
181
 
@@ -187,83 +186,49 @@ def display_figure(fig):
187
186
  else:
188
187
  return
189
188
 
189
+ # Find the axis under the cursor
190
+ ref_ax = None
190
191
  for ax in canvas.figure.get_axes():
191
- # Convert canvas pixel (event.x, event.y) to axis data coordinates
192
- # EVEN IF the mouse is over a different axis, we use the same pixel to data mapping for each
193
- inv = ax.transData.inverted()
194
- try:
195
- data_x, data_y = inv.transform((event.x, event.y))
196
- except ValueError:
197
- continue # e.g. axis has no data
198
-
199
- xlim = ax.get_xlim()
200
- ylim = ax.get_ylim()
201
-
202
- # Zoom around (data_x, data_y) in *that axis's* data space
203
- new_xlim = [data_x - (data_x - xlim[0]) * factor,
204
- data_x + (xlim[1] - data_x) * factor]
205
- new_ylim = [data_y - (data_y - ylim[0]) * factor,
206
- data_y + (ylim[1] - data_y) * factor]
207
-
208
- ax.set_xlim(new_xlim)
209
- ax.set_ylim(new_ylim)
210
-
211
- # Clip text labels
212
- for label in ax.texts:
213
- label.set_clip_on(True)
214
-
215
- # Update label visibility
216
- if hasattr(ax, '_label_annotations'):
217
- for label in ax._label_annotations:
218
- x, y = label.get_position()
219
- is_visible = (new_xlim[0] <= x <= new_xlim[1]) and (new_ylim[0] <= y <= new_ylim[1])
220
- label.set_visible(is_visible)
192
+ if ax.get_window_extent().contains(event.x, event.y):
193
+ ref_ax = ax
194
+ break
221
195
 
222
- canvas.draw_idle()
223
-
224
- def zoom_v2(event):
225
- # Define zoom factors
226
- zoom_in_factor = 1 / 1.2
227
- zoom_out_factor = 1.2
196
+ if ref_ax is None:
197
+ return
228
198
 
229
- if event.num == 4 or (hasattr(event, 'delta') and event.delta > 0):
230
- factor = zoom_in_factor
231
- elif event.num == 5 or (hasattr(event, 'delta') and event.delta < 0):
232
- factor = zoom_out_factor
233
- else:
199
+ try:
200
+ # Convert mouse position to data coords in reference axis
201
+ data_x, data_y = ref_ax.transData.inverted().transform((event.x, event.y))
202
+ except ValueError:
234
203
  return
235
204
 
205
+ # Get current limits
206
+ xlim = ref_ax.get_xlim()
207
+ ylim = ref_ax.get_ylim()
208
+
209
+ # Compute new limits for the reference axis
210
+ new_xlim = [
211
+ data_x - (data_x - xlim[0]) * factor,
212
+ data_x + (xlim[1] - data_x) * factor
213
+ ]
214
+ new_ylim = [
215
+ data_y - (data_y - ylim[0]) * factor,
216
+ data_y + (ylim[1] - data_y) * factor
217
+ ]
218
+
219
+ # Apply the same limits to all axes
236
220
  for ax in canvas.figure.get_axes():
237
- # Translate mouse position to figure coordinates
238
- mouse_x, mouse_y = event.x, event.y
239
- inv = ax.transData.inverted()
240
- data_x, data_y = inv.transform((mouse_x, mouse_y))
241
-
242
- xlim = ax.get_xlim()
243
- ylim = ax.get_ylim()
244
-
245
- # Compute new limits centered on the mouse data position
246
- new_width = (xlim[1] - xlim[0]) * factor
247
- new_height = (ylim[1] - ylim[0]) * factor
248
-
249
- new_xlim = [data_x - (data_x - xlim[0]) * factor,
250
- data_x + (xlim[1] - data_x) * factor]
251
- new_ylim = [data_y - (data_y - ylim[0]) * factor,
252
- data_y + (ylim[1] - data_y) * factor]
253
-
254
221
  ax.set_xlim(new_xlim)
255
222
  ax.set_ylim(new_ylim)
256
223
 
257
- # Clip all text labels to the axes area
258
224
  for label in ax.texts:
259
225
  label.set_clip_on(True)
260
226
 
261
- # Update label visibility based on new limits
262
227
  if hasattr(ax, '_label_annotations'):
263
228
  for label in ax._label_annotations:
264
229
  x, y = label.get_position()
265
- is_visible = (new_xlim[0] <= x <= new_xlim[1]) and (new_ylim[0] <= y <= new_ylim[1])
266
- label.set_visible(is_visible)
230
+ visible = new_xlim[0] <= x <= new_xlim[1] and new_ylim[0] <= y <= new_ylim[1]
231
+ label.set_visible(visible)
267
232
 
268
233
  canvas.draw_idle()
269
234
 
@@ -584,16 +549,19 @@ def setup_settings_panel(vertical_container, settings_type='mask'):
584
549
  from .gui_elements import set_element_size
585
550
 
586
551
  size_dict = set_element_size()
552
+
587
553
  settings_width = size_dict['settings_width']
588
-
554
+
589
555
  # Create a PanedWindow for the settings panel
590
- settings_paned_window = tk.PanedWindow(vertical_container, orient=tk.HORIZONTAL, width=size_dict['settings_width'])
556
+ settings_paned_window = tk.PanedWindow(vertical_container, orient=tk.HORIZONTAL, width=settings_width)
591
557
  vertical_container.add(settings_paned_window, stretch="always")
592
558
 
593
559
  settings_frame = tk.Frame(settings_paned_window, width=settings_width)
594
- settings_frame.pack_propagate(False) # Prevent the frame from resizing based on its children
560
+ settings_frame.pack_propagate(False)
595
561
 
596
562
  settings_paned_window.add(settings_frame)
563
+ #settings_paned_window.add(settings_frame, width=settings_width)
564
+
597
565
 
598
566
  scrollable_frame = spacrFrame(settings_frame)
599
567
  scrollable_frame.grid(row=1, column=0, sticky="nsew")
@@ -1244,7 +1212,7 @@ def cleanup_previous_instance():
1244
1212
  canvas = None
1245
1213
 
1246
1214
  print("Previous instance cleaned up successfully.")
1247
-
1215
+
1248
1216
  def initiate_root(parent, settings_type='mask'):
1249
1217
  """
1250
1218
  Initializes the root window and sets up the GUI components based on the specified settings type.
@@ -1258,6 +1226,7 @@ def initiate_root(parent, settings_type='mask'):
1258
1226
  """
1259
1227
 
1260
1228
  global q, fig_queue, thread_control, parent_frame, scrollable_frame, button_frame, vars_dict, canvas, canvas_widget, button_scrollable_frame, progress_bar, uppdate_frequency, figures, figure_index, index_control, usage_bars
1229
+ from .gui_elements import set_element_size
1261
1230
 
1262
1231
  # Clean up any previous instance
1263
1232
  cleanup_previous_instance()
@@ -1270,8 +1239,6 @@ def initiate_root(parent, settings_type='mask'):
1270
1239
  uppdate_frequency = 500
1271
1240
  num_cores = os.cpu_count()
1272
1241
 
1273
- #chatbot = Chatbot(api_key="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")
1274
-
1275
1242
  # Start tracemalloc and initialize global variables
1276
1243
  tracemalloc.start()
1277
1244
 
spacr/gui_elements.py CHANGED
@@ -39,7 +39,7 @@ def restart_gui_app(root):
39
39
  root.destroy()
40
40
 
41
41
  # Import and launch a new instance of the application
42
- from spacr.gui import gui_app
42
+ from .gui import gui_app
43
43
  new_root = tk.Tk() # Create a fresh Tkinter root instance
44
44
  gui_app()
45
45
  except Exception as e:
@@ -81,7 +81,7 @@ def create_menu_bar(root):
81
81
  # Add a separator and an exit option
82
82
  app_menu.add_separator()
83
83
  #app_menu.add_command(label="Home",command=lambda: restart_gui_app(root))
84
- app_menu.add_command(label="Help", command=lambda: webbrowser.open("https://spacr.readthedocs.io/en/latest/?badge=latest"))
84
+ app_menu.add_command(label="Help", command=lambda: webbrowser.open("https://einarolafsson.github.io/spacr/index.html"))
85
85
  app_menu.add_command(label="Exit", command=root.quit)
86
86
 
87
87
  # Configure the menu for the root window
@@ -2851,7 +2851,7 @@ class AnnotateApp:
2851
2851
  # Optionally, update your GUI status label
2852
2852
  self.update_gui_text("Merging data...")
2853
2853
 
2854
- from spacr.io import _read_and_merge_data # Adapt to your actual import
2854
+ from .io import _read_and_merge_data
2855
2855
 
2856
2856
  # (1) Merge data
2857
2857
  merged_df, obj_df_ls = _read_and_merge_data(
spacr/gui_utils.py CHANGED
@@ -242,45 +242,9 @@ def annotate(settings):
242
242
  threshold=settings['threshold'],
243
243
  normalize_channels=settings['normalize_channels'])
244
244
 
245
- #next_button = tk.Button(root, text="Next", command=app.next_page)
246
- #next_button.grid(row=app.grid_rows, column=app.grid_cols - 1)
247
- #back_button = tk.Button(root, text="Back", command=app.previous_page)
248
- #back_button.grid(row=app.grid_rows, column=app.grid_cols - 2)
249
- #exit_button = tk.Button(root, text="Exit", command=app.shutdown)
250
- #exit_button.grid(row=app.grid_rows, column=app.grid_cols - 3)
251
-
252
245
  app.load_images()
253
246
  root.mainloop()
254
247
 
255
- def generate_annotate_fields_v1(frame):
256
- from .settings import set_annotate_default_settings
257
- from .gui_elements import set_dark_style
258
-
259
- style_out = set_dark_style(ttk.Style())
260
- font_loader = style_out['font_loader']
261
- font_size = style_out['font_size'] - 2
262
-
263
- vars_dict = {}
264
- settings = set_annotate_default_settings(settings={})
265
-
266
- for setting in settings:
267
- vars_dict[setting] = {
268
- 'entry': ttk.Entry(frame),
269
- 'value': settings[setting]
270
- }
271
-
272
- # Arrange input fields and labels
273
- for row, (name, data) in enumerate(vars_dict.items()):
274
- tk.Label(frame, text=f"{name.replace('_', ' ').capitalize()}:", bg=style_out['bg_color'], fg=style_out['fg_color'], font=font_loader.get_font(size=font_size)).grid(row=row, column=0)
275
- if isinstance(data['value'], list):
276
- # Convert lists to comma-separated strings
277
- data['entry'].insert(0, ','.join(map(str, data['value'])))
278
- else:
279
- data['entry'].insert(0, data['value'])
280
- data['entry'].grid(row=row, column=1)
281
-
282
- return vars_dict
283
-
284
248
  def generate_annotate_fields(frame):
285
249
  from .settings import set_annotate_default_settings
286
250
  from .gui_elements import set_dark_style
@@ -496,8 +460,6 @@ def function_gui_wrapper(function=None, settings={}, q=None, fig_queue=None, imp
496
460
  # Restore the original plt.show function
497
461
  plt.show = original_show
498
462
 
499
-
500
-
501
463
  def run_function_gui(settings_type, settings, q, fig_queue, stop_requested):
502
464
 
503
465
  from .core import generate_image_umap, preprocess_generate_masks
@@ -632,6 +594,12 @@ def setup_frame(parent_frame):
632
594
  tk.Label(settings_container, text="Settings Container", bg=style_out['bg_color']).pack(fill=tk.BOTH, expand=True)
633
595
 
634
596
  set_dark_style(style, parent_frame, [settings_container, vertical_container, horizontal_container, main_paned])
597
+
598
+ # Set initial sash position for main_paned (left/right split)
599
+ parent_frame.update_idletasks()
600
+ screen_width = parent_frame.winfo_screenwidth()
601
+ target_width = int(screen_width / 4)
602
+ main_paned.sash_place(0, target_width, 0)
635
603
 
636
604
  return parent_frame, vertical_container, horizontal_container, settings_container
637
605
 
spacr/io.py CHANGED
@@ -1,10 +1,9 @@
1
- import os, re, sqlite3, gc, torch, time, random, shutil, cv2, tarfile, cellpose, glob, queue, tifffile, czifile, atexit, datetime, traceback
1
+ import os, re, sqlite3, gc, torch, time, random, shutil, cv2, tarfile, cellpose, glob, queue, tifffile, czifile, atexit, datetime
2
2
  import numpy as np
3
3
  import pandas as pd
4
4
  from PIL import Image, ImageOps
5
5
  from collections import defaultdict, Counter
6
6
  from pathlib import Path
7
- from functools import partial
8
7
  from matplotlib.animation import FuncAnimation
9
8
  from IPython.display import display
10
9
  from skimage.util import img_as_uint
@@ -298,26 +297,16 @@ def _load_normalized_images_and_labels(image_files, label_files, channels=None,
298
297
  return normalized_images, labels, image_names, label_names, orig_dims
299
298
 
300
299
  class CombineLoaders:
301
-
302
300
  """
303
301
  A class that combines multiple data loaders into a single iterator.
304
302
 
305
303
  Args:
306
304
  train_loaders (list): A list of data loaders.
307
305
 
308
- Attributes:
309
- train_loaders (list): A list of data loaders.
310
- loader_iters (list): A list of iterator objects for each data loader.
311
-
312
- Methods:
313
- __iter__(): Returns the iterator object itself.
314
- __next__(): Returns the next batch from one of the data loaders.
315
-
316
306
  Raises:
317
307
  StopIteration: If all data loaders have been exhausted.
318
-
319
308
  """
320
-
309
+
321
310
  def __init__(self, train_loaders):
322
311
  self.train_loaders = train_loaders
323
312
  self.loader_iters = [iter(loader) for loader in train_loaders]
@@ -327,7 +316,7 @@ class CombineLoaders:
327
316
 
328
317
  def __next__(self):
329
318
  while self.loader_iters:
330
- random.shuffle(self.loader_iters) # Shuffle the loader_iters list
319
+ random.shuffle(self.loader_iters)
331
320
  for i, loader_iter in enumerate(self.loader_iters):
332
321
  try:
333
322
  batch = next(loader_iter)
@@ -377,14 +366,6 @@ class NoClassDataset(Dataset):
377
366
  transform (callable, optional): A function/transform to apply to the image data. Default is None.
378
367
  shuffle (bool, optional): Whether to shuffle the dataset. Default is True.
379
368
  load_to_memory (bool, optional): Whether to load all images into memory. Default is False.
380
-
381
- Attributes:
382
- data_dir (str): The directory path where the image files are located.
383
- transform (callable): A function/transform to apply to the image data.
384
- shuffle (bool): Whether to shuffle the dataset.
385
- load_to_memory (bool): Whether to load all images into memory.
386
- filenames (list): A list of file paths for the image files.
387
- images (list): A list of loaded images (if load_to_memory is True).
388
369
  """
389
370
 
390
371
  def __init__(self, data_dir, transform=None, shuffle=True, load_to_memory=False):
@@ -392,13 +373,16 @@ class NoClassDataset(Dataset):
392
373
  self.transform = transform
393
374
  self.shuffle = shuffle
394
375
  self.load_to_memory = load_to_memory
395
- self.filenames = [os.path.join(data_dir, f) for f in os.listdir(data_dir) if os.path.isfile(os.path.join(data_dir, f))]
376
+ self.filenames = [
377
+ os.path.join(data_dir, f)
378
+ for f in os.listdir(data_dir)
379
+ if os.path.isfile(os.path.join(data_dir, f))
380
+ ]
396
381
  if self.shuffle:
397
382
  self.shuffle_dataset()
398
383
  if self.load_to_memory:
399
384
  self.images = [self.load_image(f) for f in self.filenames]
400
385
 
401
- #@lru_cache(maxsize=None)
402
386
  def load_image(self, img_path):
403
387
  """
404
388
  Load an image from the given file path.
@@ -446,9 +430,9 @@ class NoClassDataset(Dataset):
446
430
  img = self.transform(img)
447
431
  else:
448
432
  img = ToTensor()(img)
449
- # Return both the image and its filename
450
433
  return img, self.filenames[index]
451
434
 
435
+
452
436
  class spacrDataset(Dataset):
453
437
  def __init__(self, data_dir, loader_classes, transform=None, shuffle=True, pin_memory=False, specific_files=None, specific_labels=None):
454
438
  self.data_dir = data_dir
@@ -579,7 +563,7 @@ class spacrDataLoader(DataLoader):
579
563
  def __del__(self):
580
564
  self.cleanup()
581
565
 
582
- class NoClassDataset(Dataset):
566
+ class NoClassDataset_v1(Dataset):
583
567
  def __init__(self, data_dir, transform=None, shuffle=True, load_to_memory=False):
584
568
  self.data_dir = data_dir
585
569
  self.transform = transform
@@ -614,7 +598,6 @@ class NoClassDataset(Dataset):
614
598
  img = ToTensor()(img)
615
599
  return img, self.filenames[index]
616
600
 
617
-
618
601
  class TarImageDataset(Dataset):
619
602
  def __init__(self, tar_path, transform=None):
620
603
  self.tar_path = tar_path
spacr/ml.py CHANGED
@@ -14,21 +14,18 @@ from IPython.display import display
14
14
  import scipy.stats as st
15
15
  import statsmodels.api as sm
16
16
  import statsmodels.formula.api as smf
17
- from statsmodels.tools import add_constant
18
17
  from statsmodels.regression.mixed_linear_model import MixedLM
19
- from statsmodels.tools.sm_exceptions import PerfectSeparationError
20
18
  from statsmodels.stats.outliers_influence import variance_inflation_factor
21
19
  from statsmodels.genmod.families import Binomial
22
20
  from statsmodels.genmod.families.links import logit
23
21
  from statsmodels.othermod.betareg import BetaModel
24
- from scipy.optimize import minimize
25
22
  from scipy.special import gammaln, psi, expit
26
23
  from sklearn.linear_model import Lasso, Ridge
27
24
  from sklearn.preprocessing import FunctionTransformer
28
25
  from patsy import dmatrices
29
26
 
30
- from sklearn.metrics import classification_report, accuracy_score
31
- from sklearn.model_selection import StratifiedKFold, cross_val_score
27
+ from sklearn.metrics import classification_report
28
+ from sklearn.model_selection import StratifiedKFold
32
29
  from sklearn.feature_selection import SelectKBest, f_classif
33
30
  from sklearn.model_selection import train_test_split
34
31
  from sklearn.ensemble import RandomForestClassifier, HistGradientBoostingClassifier
@@ -37,7 +34,7 @@ from sklearn.inspection import permutation_importance
37
34
  from sklearn.metrics import classification_report, precision_recall_curve
38
35
  from sklearn.preprocessing import StandardScaler
39
36
  from sklearn.preprocessing import MinMaxScaler
40
- from scipy.spatial.distance import cosine, euclidean, mahalanobis, cityblock, minkowski, chebyshev, hamming, jaccard, braycurtis
37
+ from scipy.spatial.distance import cosine, euclidean, mahalanobis, cityblock, minkowski, chebyshev, braycurtis
41
38
  from xgboost import XGBClassifier
42
39
 
43
40
  import numpy as np
@@ -120,7 +117,7 @@ def scale_variables(X, y):
120
117
 
121
118
  return X_scaled, y_scaled
122
119
 
123
- def process_model_coefficients(model, regression_type, X, y, nc, pc, controls):
120
+ def process_model_coefficients_v1(model, regression_type, X, y, nc, pc, controls):
124
121
  """Return DataFrame of model coefficients and p-values."""
125
122
  if regression_type in ['ols', 'gls', 'wls', 'rlm', 'glm', 'mixed', 'quantile', 'logit', 'probit', 'poisson']:
126
123
  coefs = model.params
@@ -159,7 +156,7 @@ def process_model_coefficients(model, regression_type, X, y, nc, pc, controls):
159
156
  coef_df['condition'] = coef_df.apply(lambda row: 'nc' if nc in row['feature'] else 'pc' if pc in row['feature'] else ('control' if row['grna'] in controls else 'other'),axis=1)
160
157
  return coef_df[~coef_df['feature'].str.contains('row|column')]
161
158
 
162
- def check_distribution(y):
159
+ def check_distribution_v1(y):
163
160
  """Check the type of distribution to recommend a model."""
164
161
  if np.all((y == 0) | (y == 1)):
165
162
  print("Detected binary data.")
@@ -307,7 +304,7 @@ def check_and_clean_data(df, dependent_variable):
307
304
  print("Data is ready for model fitting.")
308
305
  return df_cleaned
309
306
 
310
- def check_normality(y, variable_name):
307
+ def check_normality_v1(y, variable_name):
311
308
  """Check if the data is normally distributed using the Shapiro-Wilk test."""
312
309
  from scipy.stats import shapiro
313
310
 
@@ -326,7 +323,7 @@ def minimum_cell_simulation(settings, num_repeats=10, sample_size=100, tolerance
326
323
  Detect and mark the elbow point (inflection) with smoothing and tolerance control.
327
324
  """
328
325
 
329
- from spacr.utils import correct_metadata_column_names
326
+ from .utils import correct_metadata_column_names
330
327
 
331
328
  # Load and process data
332
329
  if isinstance(settings['score_data'], str):
spacr/plot.py CHANGED
@@ -1,4 +1,4 @@
1
- import os, random, cv2, glob, math, torch, skimage
1
+ import os, random, cv2, glob, math, torch, itertools
2
2
 
3
3
  import numpy as np
4
4
  import pandas as pd
@@ -11,27 +11,19 @@ import scipy.stats as stats
11
11
  import statsmodels.api as sm
12
12
  import imageio.v2 as imageio
13
13
  from IPython.display import display
14
- from skimage.segmentation import find_boundaries
15
14
  from skimage import measure
16
15
  from skimage.measure import find_contours, label, regionprops
17
- from skimage.segmentation import mark_boundaries
18
16
  from skimage.transform import resize as sk_resize
19
17
  import scikit_posthocs as sp
20
18
  from scipy.stats import chi2_contingency
21
19
  import tifffile as tiff
22
20
 
23
- from scipy.stats import normaltest, ttest_ind, mannwhitneyu, f_oneway, kruskal
21
+ from scipy.stats import normaltest, ttest_ind, mannwhitneyu, f_oneway, kruskal, levene, shapiro
24
22
  from statsmodels.stats.multicomp import pairwise_tukeyhsd
25
- from scipy.stats import ttest_ind, mannwhitneyu, levene, wilcoxon, kruskal, normaltest, shapiro
26
- import itertools
27
23
  import pingouin as pg
28
24
 
29
25
  from ipywidgets import IntSlider, interact
30
26
  from IPython.display import Image as ipyimage
31
-
32
- import matplotlib.patches as patches
33
- from collections import defaultdict
34
- from matplotlib.gridspec import GridSpec
35
27
  from matplotlib_venn import venn2
36
28
 
37
29
  def plot_image_mask_overlay(
spacr/sim.py CHANGED
@@ -65,7 +65,7 @@ def gini_coefficient(x):
65
65
  diffsum = np.sum(np.abs(np.subtract.outer(x, x)))
66
66
  return diffsum / (2 * len(x) ** 2 * np.mean(x))
67
67
 
68
- def gini(x):
68
+ def gini_V1(x):
69
69
  """
70
70
  Calculate the Gini coefficient for a given array of values.
71
71
 
spacr/sp_stats.py CHANGED
@@ -1,7 +1,6 @@
1
1
  from scipy.stats import shapiro, normaltest, levene, ttest_ind, mannwhitneyu, kruskal, f_oneway
2
2
  from statsmodels.stats.multicomp import pairwise_tukeyhsd
3
3
  import scikit_posthocs as sp
4
- import numpy as np
5
4
  import pandas as pd
6
5
  from scipy.stats import chi2_contingency, fisher_exact
7
6
  import itertools
spacr/submodules.py CHANGED
@@ -20,8 +20,7 @@ from IPython.display import display
20
20
  from sklearn.ensemble import RandomForestClassifier
21
21
  from sklearn.inspection import permutation_importance
22
22
  from math import pi
23
- from scipy.stats import chi2_contingency, pearsonr
24
- from scipy.spatial.distance import cosine
23
+ from scipy.stats import chi2_contingency
25
24
 
26
25
  from sklearn.metrics import mean_absolute_error
27
26
  from skimage.measure import regionprops, label as sklabel
@@ -691,7 +690,8 @@ def analyze_plaques(settings):
691
690
  from .spacr_cellpose import identify_masks_finetune
692
691
  from .settings import get_analyze_plaque_settings
693
692
  from .utils import save_settings, download_models
694
- from spacr import __file__ as spacr_path
693
+ #from spacr import __file__ as spacr_path
694
+ spacr_path = os.path.join(os.path.dirname(__file__), '__init__.py')
695
695
 
696
696
  download_models()
697
697
  package_dir = os.path.dirname(spacr_path)
spacr/timelapse.py CHANGED
@@ -7,12 +7,18 @@ from IPython.display import display
7
7
  from IPython.display import Image as ipyimage
8
8
  import trackpy as tp
9
9
  from btrack import datasets as btrack_datasets
10
- from skimage.measure import regionprops, regionprops_table
10
+ from skimage.measure import regionprops_table
11
11
  from scipy.signal import find_peaks
12
12
  from scipy.optimize import curve_fit, linear_sum_assignment
13
- from scipy.integrate import trapz
13
+
14
+ try:
15
+ from numpy import trapz
16
+ except ImportError:
17
+ from scipy.integrate import trapz
18
+
14
19
  import matplotlib.pyplot as plt
15
20
 
21
+
16
22
  def _npz_to_movie(arrays, filenames, save_path, fps=10):
17
23
  """
18
24
  Convert a list of numpy arrays to a movie file.
spacr/toxo.py CHANGED
@@ -5,23 +5,7 @@ import numpy as np
5
5
  from adjustText import adjust_text
6
6
  import pandas as pd
7
7
  from scipy.stats import fisher_exact
8
- from IPython.display import display
9
- from matplotlib.legend import Legend
10
- from matplotlib.transforms import Bbox
11
- from brokenaxes import brokenaxes
12
-
13
- import os
14
- import pandas as pd
15
- import seaborn as sns
16
- import matplotlib.pyplot as plt
17
- from scipy.spatial.distance import cosine
18
- from scipy.stats import pearsonr
19
- import pandas as pd
20
- import matplotlib.pyplot as plt
21
- import seaborn as sns
22
8
  from sklearn.metrics import mean_absolute_error
23
-
24
-
25
9
  from matplotlib.gridspec import GridSpec
26
10
 
27
11
  def custom_volcano_plot(data_path, metadata_path, metadata_column='tagm_location',point_size=50, figsize=20, threshold=0,save_path=None, x_lim=[-0.5, 0.5], y_lims=[[0, 6], [9, 20]]):
spacr/utils.py CHANGED
@@ -1,8 +1,11 @@
1
+
1
2
  import os, re, sqlite3, torch, torchvision, random, string, shutil, cv2, tarfile, glob, psutil, platform, gzip, subprocess, time, requests, ast, traceback
3
+
2
4
  import numpy as np
3
5
  import pandas as pd
4
6
  from cellpose import models as cp_models
5
7
  from cellpose import denoise
8
+ from functools import partial
6
9
 
7
10
  from skimage import morphology
8
11
  from skimage.measure import label, regionprops_table, regionprops
@@ -37,7 +40,6 @@ from torchvision import models
37
40
  from torchvision.models.resnet import ResNet18_Weights, ResNet34_Weights, ResNet50_Weights, ResNet101_Weights, ResNet152_Weights
38
41
  import torchvision.transforms as transforms
39
42
  from torchvision.models import resnet50
40
- from torchvision.utils import make_grid
41
43
 
42
44
  import seaborn as sns
43
45
  import matplotlib.pyplot as plt
@@ -65,10 +67,11 @@ from sklearn.decomposition import PCA
65
67
  from sklearn.ensemble import RandomForestClassifier
66
68
 
67
69
  from huggingface_hub import list_repo_files
68
- from spacr import __file__ as spacr_path
70
+
71
+ #from spacr import __file__ as spacr_path
72
+ spacr_path = os.path.join(os.path.dirname(__file__), '__init__.py')
69
73
 
70
74
  import umap.umap_ as umap
71
- #import umap
72
75
 
73
76
  def _generate_mask_random_cmap(mask):
74
77
  """
@@ -1283,7 +1286,7 @@ def _pivot_counts_table(db_path):
1283
1286
  pivoted_df.to_sql('pivoted_counts', conn, if_exists='replace', index=False)
1284
1287
  conn.close()
1285
1288
 
1286
- def _get_cellpose_channels(src, nucleus_channel, pathogen_channel, cell_channel):
1289
+ def _get_cellpose_channels_v2(src, nucleus_channel, pathogen_channel, cell_channel):
1287
1290
  cell_mask_path = os.path.join(src, 'masks', 'cell_mask_stack')
1288
1291
  nucleus_mask_path = os.path.join(src, 'masks', 'nucleus_mask_stack')
1289
1292
  pathogen_mask_path = os.path.join(src, 'masks', 'pathogen_mask_stack')
@@ -1311,7 +1314,7 @@ def _get_cellpose_channels(src, nucleus_channel, pathogen_channel, cell_channel)
1311
1314
 
1312
1315
  return cellpose_channels
1313
1316
 
1314
- def _get_cellpose_channels_v1(src, nucleus_channel, pathogen_channel, cell_channel):
1317
+ def _get_cellpose_channels(src, nucleus_channel, pathogen_channel, cell_channel):
1315
1318
 
1316
1319
  cell_mask_path = os.path.join(src, 'masks', 'cell_mask_stack')
1317
1320
  nucleus_mask_path = os.path.join(src, 'masks', 'nucleus_mask_stack')
@@ -1340,6 +1343,7 @@ def _get_cellpose_channels_v1(src, nucleus_channel, pathogen_channel, cell_chann
1340
1343
  cellpose_channels['cell'] = [0,1]
1341
1344
  else:
1342
1345
  cellpose_channels['cell'] = [0,0]
1346
+
1343
1347
  return cellpose_channels
1344
1348
 
1345
1349
  def annotate_conditions(df, cells=None, cell_loc=None, pathogens=None, pathogen_loc=None, treatments=None, treatment_loc=None):
@@ -1541,9 +1545,8 @@ class Cache:
1541
1545
  """
1542
1546
  A class representing a cache with a maximum size.
1543
1547
 
1544
- Attributes:
1548
+ Args:
1545
1549
  max_size (int): The maximum size of the cache.
1546
- cache (OrderedDict): The cache data structure.
1547
1550
  """
1548
1551
 
1549
1552
  def __init__(self, max_size):
@@ -1562,23 +1565,15 @@ class Cache:
1562
1565
  self.cache.popitem(last=False)
1563
1566
  self.cache[key] = value
1564
1567
 
1565
- class ScaledDotProductAttention(nn.Module):
1568
+ class ScaledDotProductAttention_v1(nn.Module):
1566
1569
  """
1567
1570
  Scaled Dot-Product Attention module.
1568
1571
 
1569
1572
  Args:
1570
1573
  d_k (int): The dimension of the key and query vectors.
1571
-
1572
- Attributes:
1573
- d_k (int): The dimension of the key and query vectors.
1574
-
1575
- Methods:
1576
- forward(Q, K, V): Performs the forward pass of the attention mechanism.
1577
-
1578
1574
  """
1579
-
1580
1575
  def __init__(self, d_k):
1581
- super(ScaledDotProductAttention, self).__init__()
1576
+ super(ScaledDotProductAttention_v1, self).__init__()
1582
1577
  self.d_k = d_k
1583
1578
 
1584
1579
  def forward(self, Q, K, V):
@@ -1592,24 +1587,23 @@ class ScaledDotProductAttention(nn.Module):
1592
1587
 
1593
1588
  Returns:
1594
1589
  torch.Tensor: The output tensor of shape (batch_size, seq_len_q, d_k).
1595
-
1596
1590
  """
1597
1591
  scores = torch.matmul(Q, K.transpose(-2, -1)) / torch.sqrt(torch.tensor(self.d_k, dtype=torch.float32))
1598
1592
  attention_probs = F.softmax(scores, dim=-1)
1599
1593
  output = torch.matmul(attention_probs, V)
1600
1594
  return output
1601
1595
 
1602
- class SelfAttention(nn.Module):
1596
+ class SelfAttention_v1(nn.Module):
1603
1597
  """
1604
1598
  Self-Attention module that applies scaled dot-product attention mechanism.
1605
-
1599
+
1606
1600
  Args:
1607
1601
  in_channels (int): Number of input channels.
1608
1602
  d_k (int): Dimensionality of the key and query vectors.
1609
1603
  """
1610
1604
 
1611
1605
  def __init__(self, in_channels, d_k):
1612
- super(SelfAttention, self).__init__()
1606
+ super(SelfAttention_v1, self).__init__()
1613
1607
  self.W_q = nn.Linear(in_channels, d_k)
1614
1608
  self.W_k = nn.Linear(in_channels, d_k)
1615
1609
  self.W_v = nn.Linear(in_channels, d_k)
@@ -1618,10 +1612,10 @@ class SelfAttention(nn.Module):
1618
1612
  def forward(self, x):
1619
1613
  """
1620
1614
  Forward pass of the SelfAttention module.
1621
-
1615
+
1622
1616
  Args:
1623
1617
  x (torch.Tensor): Input tensor of shape (batch_size, in_channels).
1624
-
1618
+
1625
1619
  Returns:
1626
1620
  torch.Tensor: Output tensor of shape (batch_size, d_k).
1627
1621
  """
@@ -3298,7 +3292,7 @@ class SelectChannels:
3298
3292
  img[2, :, :] = 0 # Zero out the blue channel
3299
3293
  return img
3300
3294
 
3301
- def preprocess_image(image_path, image_size=224, channels=[1,2,3], normalize=True):
3295
+ def preprocess_image_v1(image_path, image_size=224, channels=[1,2,3], normalize=True):
3302
3296
 
3303
3297
  if normalize:
3304
3298
  transform = transforms.Compose([
@@ -3739,7 +3733,7 @@ def merge_dataframes(df, image_paths_df, verbose):
3739
3733
  display(df)
3740
3734
  return df
3741
3735
 
3742
- def remove_highly_correlated_columns(df, threshold):
3736
+ def remove_highly_correlated_columns_v1(df, threshold):
3743
3737
  corr_matrix = df.corr().abs()
3744
3738
  upper_tri = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(bool))
3745
3739
  to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > threshold)]
@@ -4678,7 +4672,54 @@ def _merge_cells_without_nucleus(adj_cell_mask: np.ndarray, nuclei_mask: np.ndar
4678
4672
 
4679
4673
  return out.astype(np.uint16)
4680
4674
 
4681
- def adjust_cell_masks(parasite_folder, cell_folder, nuclei_folder, overlap_threshold=5, perimeter_threshold=30):
4675
+ def process_mask_file_adjust_cell(file_name, parasite_folder, cell_folder, nuclei_folder, overlap_threshold, perimeter_threshold):
4676
+ start = time.perf_counter()
4677
+
4678
+ parasite_path = os.path.join(parasite_folder, file_name)
4679
+ cell_path = os.path.join(cell_folder, file_name)
4680
+ nuclei_path = os.path.join(nuclei_folder, file_name)
4681
+
4682
+ if not (os.path.exists(cell_path) and os.path.exists(nuclei_path)):
4683
+ raise ValueError(f"Corresponding cell or nuclei mask file for {file_name} not found.")
4684
+
4685
+ parasite_mask = np.load(parasite_path, allow_pickle=True)
4686
+ cell_mask = np.load(cell_path, allow_pickle=True)
4687
+ nuclei_mask = np.load(nuclei_path, allow_pickle=True)
4688
+
4689
+ merged_cell_mask = _merge_cells_based_on_parasite_overlap(parasite_mask, cell_mask, nuclei_mask, overlap_threshold, perimeter_threshold)
4690
+ #merged_cell_mask = _merge_cells_without_nucleus(merged_cell_mask, nuclei_mask)
4691
+
4692
+ np.save(cell_path, merged_cell_mask)
4693
+
4694
+ end = time.perf_counter()
4695
+ return end - start
4696
+
4697
+ def adjust_cell_masks(parasite_folder, cell_folder, nuclei_folder, overlap_threshold=5, perimeter_threshold=30, n_jobs=None):
4698
+
4699
+ parasite_files = sorted([f for f in os.listdir(parasite_folder) if f.endswith('.npy')])
4700
+ cell_files = sorted([f for f in os.listdir(cell_folder) if f.endswith('.npy')])
4701
+ nuclei_files = sorted([f for f in os.listdir(nuclei_folder) if f.endswith('.npy')])
4702
+
4703
+ if not (len(parasite_files) == len(cell_files) == len(nuclei_files)):
4704
+ raise ValueError("The number of files in the folders do not match.")
4705
+
4706
+ n_jobs = n_jobs or max(1, cpu_count() - 2)
4707
+
4708
+ time_ls = []
4709
+ files_to_process = len(parasite_files)
4710
+ process_fn = partial(process_mask_file_adjust_cell,
4711
+ parasite_folder=parasite_folder,
4712
+ cell_folder=cell_folder,
4713
+ nuclei_folder=nuclei_folder,
4714
+ overlap_threshold=overlap_threshold,
4715
+ perimeter_threshold=perimeter_threshold)
4716
+
4717
+ with Pool(n_jobs) as pool:
4718
+ for i, duration in enumerate(pool.imap_unordered(process_fn, parasite_files), 1):
4719
+ time_ls.append(duration)
4720
+ print_progress(i, files_to_process, n_jobs=n_jobs, time_ls=time_ls, batch_size=None, operation_type='adjust_cell_masks')
4721
+
4722
+ def adjust_cell_masks_v1(parasite_folder, cell_folder, nuclei_folder, overlap_threshold=5, perimeter_threshold=30):
4682
4723
 
4683
4724
  """
4684
4725
  Process all npy files in the given folders. Merge and relabel cells in cell masks
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: spacr
3
- Version: 1.0.0
3
+ Version: 1.0.3
4
4
  Summary: Spatial phenotype analysis of crisp screens (SpaCr)
5
5
  Home-page: https://github.com/EinarOlafsson/spacr
6
6
  Author: Einar Birnir Olafsson
@@ -75,7 +75,7 @@ Requires-Dist: ipywidgets <9.0,>=8.1.2
75
75
  Requires-Dist: brokenaxes <1.0,>=0.6.2
76
76
  Requires-Dist: huggingface-hub <0.25,>=0.24.0
77
77
  Provides-Extra: dev
78
- Requires-Dist: pytest <3.11,>=3.9 ; extra == 'dev'
78
+ Requires-Dist: pytest <3.12,>=3.9 ; extra == 'dev'
79
79
  Provides-Extra: full
80
80
  Requires-Dist: opencv-python ; extra == 'full'
81
81
  Provides-Extra: headless
@@ -91,12 +91,15 @@ Requires-Dist: opencv-python-headless ; extra == 'headless'
91
91
  :target: https://github.com/EinarOlafsson/spacr/blob/main/LICENSE
92
92
  .. |repo size| image:: https://img.shields.io/github/repo-size/EinarOlafsson/spacr
93
93
  :target: https://github.com/EinarOlafsson/spacr/
94
+ .. |Tutorial| image:: https://img.shields.io/badge/Tutorial-Click%20Here-brightgreen
95
+ :target: https://einarolafsson.github.io/spacr/tutorial/
96
+
94
97
 
95
98
  .. _docs: https://einarolafsson.github.io/spacr/index.html
96
99
 
97
100
  Badges
98
101
  ------
99
- |Docs| |PyPI version| |Python version| |Licence: MIT| |repo size|
102
+ |Docs| |PyPI version| |Python version| |Licence: MIT| |repo size| |Tutorial|
100
103
 
101
104
  SpaCr
102
105
  =====
@@ -123,7 +126,6 @@ Features
123
126
  :alt: SpaCr workflow
124
127
  :align: center
125
128
 
126
-
127
129
  **Overview and data organization of spaCR.**
128
130
 
129
131
  **a.** Schematic workflow of the spaCR pipeline for pooled image-based CRISPR screens. Microscopy images (TIFF, LIF, CZI, NDI) and sequencing reads (FASTQ) are used as inputs (black). The main modules (teal) are: (1) Mask: generates object masks for cells, nuclei, pathogens, and cytoplasm; (2) Measure: extracts object-level features and crops object images, storing quantitative data in an SQL database; (3) Classify—applies machine learning (ML, e.g., XGBoost) or deep learning (DL, e.g., PyTorch) models to classify objects, summarizing results as well-level classification scores; (4) Map Barcodes: extracts and maps row, column, and gRNA barcodes from sequencing data to corresponding wells; (5) Regression: estimates gRNA effect sizes and gene scores via multiple linear regression using well-level summary statistics.
@@ -141,8 +143,7 @@ If using Windows, switch to Linux—it's free, open-source, and better.
141
143
 
142
144
  ::
143
145
 
144
- brew install libomp
145
- brew install hdf5
146
+ brew install libomp hdf5 cmake openssl
146
147
 
147
148
  **Linux GUI requirement:**
148
149
  SpaCr GUI requires Tkinter.
@@ -163,16 +164,6 @@ SpaCr GUI requires Tkinter.
163
164
 
164
165
  spacr
165
166
 
166
- Data Availability
167
- -----------------
168
-
169
- - **Raw sequencing data** are available from NCBI BioProject `PRJNA1261935 <https://www.ncbi.nlm.nih.gov/bioproject/PRJNA1261935>`_ and SRA accessions: `SRR33531217 <https://www.ncbi.nlm.nih.gov/sra/SRR33531217>`_, `SRR33531218 <https://www.ncbi.nlm.nih.gov/sra/SRR33531218>`_, `SRR33531219 <https://www.ncbi.nlm.nih.gov/sra/SRR33531219>`_, `SRR33531220 <https://www.ncbi.nlm.nih.gov/sra/SRR33531220>`_
170
-
171
- - **Image data** is deposited at EBI BioStudies under accession:
172
- `S-BIAD2076 <https://www.ebi.ac.uk/biostudies/studies/S-BIAD2076>`_
173
- *(If the link redirects to the main BioStudies portal, copy and paste it directly into your browser.)*
174
-
175
-
176
167
  Example Notebooks
177
168
  -----------------
178
169
 
@@ -201,7 +192,26 @@ Interactive Tutorial (under construction)
201
192
 
202
193
  Click below to explore the step-by-step GUI and Notebook tutorials for spaCR:
203
194
 
204
- `spaCR Tutorial Page <https://einarolafsson.github.io/spacr/tutorial/>`_
195
+ |Tutorial|
196
+
197
+ spaCRPower
198
+ ----------
199
+
200
+ Power analasys of pooled perterbation spaCR screens.
201
+
202
+ `spaCRPower <https://github.com/maomlab/spaCRPower>`_
203
+
204
+ Data Availability
205
+ -----------------
206
+
207
+ - **Full microscopy image dataset:**
208
+ `EMBL-EBI BioStudies S-BIAD2135 <https://doi.org/10.6019/S-BIAD2135>`_
209
+
210
+ - **Testing dataset:**
211
+ `Hugging Face toxo_mito <https://huggingface.co/datasets/einarolafsson/toxo_mito>`_
212
+
213
+ - **Sequencing data:**
214
+ `NCBI BioProject PRJNA1261935 <https://www.ncbi.nlm.nih.gov/bioproject/?term=PRJNA1261935>`_
205
215
 
206
216
  License
207
217
  -------
@@ -213,3 +223,10 @@ How to Cite
213
223
  If you use spaCR in your research, please cite:
214
224
  Olafsson EB, et al. SpaCr: Spatial phenotype analysis of CRISPR-Cas9 screens. *Manuscript in preparation*.
215
225
 
226
+ Papers Using spaCR
227
+ -------------------
228
+ Below are selected publications that have used or cited spaCR:
229
+
230
+ - Olafsson EB, et al. *SpaCr: Spatial phenotype analysis of CRISPR-Cas9 screens.* Manuscript in preparation.
231
+ - `IRE1α promotes phagosomal calcium flux to enhance macrophage fungicidal activity <https://doi.org/10.1016/j.celrep.2025.115694>`_
232
+ - `Metabolic adaptability and nutrient scavenging in Toxoplasma gondii: insights from ingestion pathway-deficient mutants <https://doi.org/10.1128/msphere.01011-24>`_
@@ -1,4 +1,4 @@
1
- spacr/__init__.py,sha256=EoGInYks0M4foZElYNhksrQK6aEO1au7cncWexWNhRw,1376
1
+ spacr/__init__.py,sha256=dB3aFOfOe5m5x81CTENDDP3FToXQwZwdBPtLBTuT4Js,1601
2
2
  spacr/__main__.py,sha256=H4MjaMF9ohZL6xfl1kTxVn1Nt_vEhhZArENMMBv8f4E,77
3
3
  spacr/app_annotate.py,sha256=p0OyvgFycIug7RcLfejFmc4HWB7yQskCBxxy3Sdq_Y0,2905
4
4
  spacr/app_classify.py,sha256=urTP_wlZ58hSyM5a19slYlBxN0PdC-9-ga0hvq8CGWc,165
@@ -8,28 +8,28 @@ spacr/app_measure.py,sha256=_K7APYIeOKpV6e_LcqabBjvEi7mfq9Fch8175x1x0k8,162
8
8
  spacr/app_sequencing.py,sha256=DjG26jy4cpddnV8WOOAIiExtOe9MleVMY4MFa5uTo5w,157
9
9
  spacr/app_umap.py,sha256=ZWAmf_OsIKbYvolYuWPMYhdlVe-n2CADoJulAizMiEo,153
10
10
  spacr/chat_bot.py,sha256=n3Fhqg3qofVXHmh3H9sUcmfYy9MmgRnr48663MVdY9E,1244
11
- spacr/core.py,sha256=SDsJsgarbMHDw2i4OOMCKtMtVx9t1tjsGCVs6iHPj7s,46638
12
- spacr/deep_spacr.py,sha256=055tIo3WP3elGFiIuSZaLURgu2XyUDxAdbw5ezASEqM,54526
11
+ spacr/core.py,sha256=GZWrKVKH_3EghGGcvAtia9OFTQ6hkZtwQRw9dpv2MBM,46672
12
+ spacr/deep_spacr.py,sha256=x3_102k-p9IN3gE3XYdsEnXGCOyGqQh_ny5A-SO9HkA,54497
13
13
  spacr/gui.py,sha256=NhMh96KoArrSAaJBV6PhDQpIC1cQpxgb6SclhRbYG8s,8122
14
- spacr/gui_core.py,sha256=pXTswrqPMTb2mgF_mvnCzBgmXEaf9w7wDnSD6uMA67w,56228
15
- spacr/gui_elements.py,sha256=OTU7aeLrPiMUTnyCT-J7ygng3beI9tdA0MmypOavEkw,156123
16
- spacr/gui_utils.py,sha256=F6KfNY3OqNkvfkOP1rxwBha5IOdLVyBgqZYPw3xPLes,42293
17
- spacr/io.py,sha256=g6vybQeGLdTXrAqEjM6X1aoB6lyZVUq6pTI0ASppX4g,159257
14
+ spacr/gui_core.py,sha256=IWkTEHI8imt_Pk9s8IUKbK_zLXtQNrbvDVl36Ft-n5k,54429
15
+ spacr/gui_elements.py,sha256=9hEcIJ5ROQmBfHIROukgFawF3go37w5B-3OEHedNk3A,156078
16
+ spacr/gui_utils.py,sha256=OJT1LX3X1U95joUY30b5Vn9fBrC0V65K4VBvdCckeNI,40995
17
+ spacr/io.py,sha256=RKvy8DjyWbGnjV6zcbx8YHm2AdrPiTQBpHKEVrpA6dc,158412
18
18
  spacr/logger.py,sha256=lJhTqt-_wfAunCPl93xE65Wr9Y1oIHJWaZMjunHUeIw,1538
19
19
  spacr/measure.py,sha256=nYvrfVfCIqD1AUk4QBE2jtpeSFtLdfUcnkhkqf9G4xQ,60877
20
20
  spacr/mediar.py,sha256=p0F515eFbm6_rePSnChsgqrgH-H5Sr_3zWrghtOnAUg,14863
21
- spacr/ml.py,sha256=XCRZeX7UkbMctQICIoskeWVx8CCmmCoHNauUOAkfFq0,91692
21
+ spacr/ml.py,sha256=iE7vI9Q1rjN_gHukqe8IXiWFCRaFj1lj3poFjEZY6Kw,91499
22
22
  spacr/openai.py,sha256=5vBZ3Jl2llYcW3oaTEXgdyCB2aJujMUIO5K038z7w_A,1246
23
- spacr/plot.py,sha256=76E1CZpsmNeNtbnkXJtgcVOesq4voL7XkaUnD74RDMk,169418
23
+ spacr/plot.py,sha256=OgMr1ur1RFcFoZC_K8f-Qra48i-YmwgyyE_JNi5mw28,169111
24
24
  spacr/sequencing.py,sha256=EY12RdW5QRKpHDRQCw1QoAlxCq8FK2v6WoVa5uuDBXQ,26745
25
25
  spacr/settings.py,sha256=38MClzEv-eP4Kfo_UJ_jlIzj0Vos3TY5-scPlBpolYI,88520
26
- spacr/sim.py,sha256=1xKhXimNU3ukzIw-3l9cF3Znc_brW8h20yv8fSTzvss,71173
27
- spacr/sp_stats.py,sha256=C93Xe5fphQOKthw4Tmj8pHx-Nb1houIL-YYVIfmnQPg,9535
26
+ spacr/sim.py,sha256=EiCXNCarU6TAFwIKKSr7LOPKtNrxWK6_3XSBMaEpg20,71176
27
+ spacr/sp_stats.py,sha256=KoQe5F0jfPBrZQ38MG_1b3AnF4XFNupR8a9crXiyi64,9516
28
28
  spacr/spacr_cellpose.py,sha256=AvnyD2qoj-lUqhICeTpfhyk9T2hCjZrpBXn2iKh1EYE,16785
29
- spacr/submodules.py,sha256=Z2i4kv_rWdxqoXsOKCF7BaSXtvaCZB69Ow8_FQBnZsY,83093
30
- spacr/timelapse.py,sha256=-5ZupTsCCpbenIQ2zsUmnwXh45B82fO-gPrSXOxu2s8,42980
31
- spacr/toxo.py,sha256=GoNfgyH-NJx3WOzNQPgzODir7Jp65fs7UM46XpzcrUo,26056
32
- spacr/utils.py,sha256=PulmDqENBFYKgN2fjcVSB39B_9CwwGDiBi9FOI55zQs,238470
29
+ spacr/submodules.py,sha256=kyX_oX0llXBrU5YwLLq0DssCczG3J1BAa2QeKI9P7oE,83114
30
+ spacr/timelapse.py,sha256=WKz21Qy4ZJAMHUeYZNHnuw8SATA-q2CfnKgEUETDGMM,43031
31
+ spacr/toxo.py,sha256=7to5GHfHt0EPL9qaxAQnA-_Io1t2NQ1nBxXlfZc3_oI,25674
32
+ spacr/utils.py,sha256=L1-F13dcBxRgBIX4toe_zYqL4FyIbtz3tYwtiQwyShY,240591
33
33
  spacr/version.py,sha256=axH5tnGwtgSnJHb5IDhiu4Zjk5GhLyAEDRe-rnaoFOA,409
34
34
  spacr/resources/data/lopit.csv,sha256=ERI5f9W8RdJGiSx_khoaylD374f8kmvLia1xjhD_mII,4421709
35
35
  spacr/resources/data/toxoplasma_metadata.csv,sha256=9TXx0VlClDHAxQmaLhoklE8NuETduXaGHZjhR_6lZfs,2969409
@@ -103,9 +103,9 @@ spacr/resources/icons/umap.png,sha256=dOLF3DeLYy9k0nkUybiZMe1wzHQwLJFRmgccppw-8b
103
103
  spacr/resources/images/plate1_E01_T0001F001L01A01Z01C02.tif,sha256=Tl0ZUfZ_AYAbu0up_nO0tPRtF1BxXhWQ3T3pURBCCRo,7958528
104
104
  spacr/resources/images/plate1_E01_T0001F001L01A02Z01C01.tif,sha256=m8N-V71rA1TT4dFlENNg8s0Q0YEXXs8slIn7yObmZJQ,7958528
105
105
  spacr/resources/images/plate1_E01_T0001F001L01A03Z01C03.tif,sha256=Pbhk7xn-KUP6RSIhJsxQcrHFImBm3GEpLkzx7WOc-5M,7958528
106
- spacr-1.0.0.dist-info/LICENSE,sha256=t0Pov6pnK8thLteoF4xZGmdCwe5mhNwl3OXxLYTGD9U,1081
107
- spacr-1.0.0.dist-info/METADATA,sha256=FAPU_0wMd3rz1-fbRo0TS568-_O7_NIWgx1jXPM4fuE,10356
108
- spacr-1.0.0.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
109
- spacr-1.0.0.dist-info/entry_points.txt,sha256=BMC0ql9aNNpv8lUZ8sgDLQMsqaVnX5L535gEhKUP5ho,296
110
- spacr-1.0.0.dist-info/top_level.txt,sha256=GJPU8FgwRXGzKeut6JopsSRY2R8T3i9lDgya42tLInY,6
111
- spacr-1.0.0.dist-info/RECORD,,
106
+ spacr-1.0.3.dist-info/LICENSE,sha256=t0Pov6pnK8thLteoF4xZGmdCwe5mhNwl3OXxLYTGD9U,1081
107
+ spacr-1.0.3.dist-info/METADATA,sha256=mHQ3iqlrD2kQfkGHtW4LURYbI1VOuMjzCpDLQJFW3NY,10792
108
+ spacr-1.0.3.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
109
+ spacr-1.0.3.dist-info/entry_points.txt,sha256=BMC0ql9aNNpv8lUZ8sgDLQMsqaVnX5L535gEhKUP5ho,296
110
+ spacr-1.0.3.dist-info/top_level.txt,sha256=GJPU8FgwRXGzKeut6JopsSRY2R8T3i9lDgya42tLInY,6
111
+ spacr-1.0.3.dist-info/RECORD,,
File without changes
File without changes