spacr 0.0.17__py3-none-any.whl → 0.0.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
spacr/utils.py CHANGED
@@ -1,6 +1,8 @@
1
1
  import os, re, sqlite3, gc, torch, torchvision, time, random, string, shutil, cv2, tarfile, glob
2
2
 
3
3
  import numpy as np
4
+ from cellpose import models as cp_models
5
+ from cellpose import denoise
4
6
  from skimage import morphology
5
7
  from skimage.measure import label, regionprops_table, regionprops
6
8
  import skimage.measure as measure
@@ -18,6 +20,8 @@ from functools import reduce
18
20
  from IPython.display import display, clear_output
19
21
  from multiprocessing import Pool, cpu_count
20
22
  from skimage.transform import resize as resizescikit
23
+ from skimage.morphology import dilation, square
24
+ from skimage.measure import find_contours
21
25
  import torch.nn as nn
22
26
  import torch.nn.functional as F
23
27
  #from torchsummary import summary
@@ -29,6 +33,7 @@ from skimage.segmentation import clear_border
29
33
  import seaborn as sns
30
34
  import matplotlib.pyplot as plt
31
35
  import scipy.ndimage as ndi
36
+ from scipy.spatial import distance
32
37
  from scipy.stats import fisher_exact
33
38
  from scipy.ndimage import binary_erosion, binary_dilation
34
39
  from skimage.exposure import rescale_intensity
@@ -36,6 +41,7 @@ from sklearn.metrics import auc, precision_recall_curve
36
41
  from sklearn.model_selection import train_test_split
37
42
  from sklearn.linear_model import Lasso, Ridge
38
43
  from sklearn.preprocessing import OneHotEncoder
44
+ from sklearn.cluster import KMeans
39
45
  from torchvision.models.resnet import ResNet18_Weights, ResNet34_Weights, ResNet50_Weights, ResNet101_Weights, ResNet152_Weights
40
46
 
41
47
  from .logger import log_function_call
@@ -45,6 +51,53 @@ from .logger import log_function_call
45
51
  #from .plot import _plot_images_on_grid, plot_masks, _plot_histograms_and_stats, plot_resize, _plot_plates, _reg_v_plot, plot_masks
46
52
  #from .core import identify_masks
47
53
 
54
+
55
+ def _gen_rgb_image(image, cahnnels):
56
+ rgb_image = np.take(image, cahnnels, axis=-1)
57
+ rgb_image = rgb_image.astype(float)
58
+ rgb_image -= rgb_image.min()
59
+ rgb_image /= rgb_image.max()
60
+ return rgb_image
61
+
62
+ def _outline_and_overlay(image, rgb_image, mask_dims, outline_colors, outline_thickness):
63
+ from concurrent.futures import ThreadPoolExecutor
64
+ import cv2
65
+
66
+ outlines = []
67
+ overlayed_image = rgb_image.copy()
68
+
69
+ def process_dim(mask_dim):
70
+ mask = np.take(image, mask_dim, axis=-1)
71
+ outline = np.zeros_like(mask, dtype=np.uint8) # Use uint8 for contour detection efficiency
72
+
73
+ # Find and draw contours
74
+ for j in np.unique(mask)[1:]:
75
+ contours = find_contours(mask == j, 0.5)
76
+ # Convert contours for OpenCV format and draw directly to optimize
77
+ cv_contours = [np.flip(contour.astype(int), axis=1) for contour in contours]
78
+ cv2.drawContours(outline, cv_contours, -1, color=int(j), thickness=outline_thickness)
79
+
80
+ return dilation(outline, square(outline_thickness))
81
+
82
+ # Parallel processing
83
+ with ThreadPoolExecutor() as executor:
84
+ outlines = list(executor.map(process_dim, mask_dims))
85
+
86
+ # Overlay outlines onto the RGB image in a batch/vectorized manner if possible
87
+ for i, outline in enumerate(outlines):
88
+ # This part may need to be adapted to your specific use case and available functions
89
+ # The goal is to overlay each outline with its respective color more efficiently
90
+ color = outline_colors[i % len(outline_colors)]
91
+ for j in np.unique(outline)[1:]:
92
+ mask = outline == j
93
+ overlayed_image[mask] = color # Direct assignment with broadcasting
94
+
95
+ # Remove mask_dims from image
96
+ channels_to_keep = [i for i in range(image.shape[-1]) if i not in mask_dims]
97
+ image = np.take(image, channels_to_keep, axis=-1)
98
+
99
+ return overlayed_image, outlines, image
100
+
48
101
  def _convert_cq1_well_id(well_id):
49
102
  """
50
103
  Converts a well ID to the CQ1 well format.
@@ -114,7 +167,7 @@ def _extract_filename_metadata(filenames, src, images_by_key, regular_expression
114
167
  if metadata_type =='cq1':
115
168
  orig_wellID = wellID
116
169
  wellID = _convert_cq1_well_id(wellID)
117
- clear_output(wait=True)
170
+ #clear_output(wait=True)
118
171
  print(f'Converted Well ID: {orig_wellID} to {wellID}', end='\r', flush=True)
119
172
 
120
173
  if pick_slice:
@@ -673,9 +726,6 @@ def _crop_center(img, cell_mask, new_width, new_height, normalize=(2,98)):
673
726
  img = img[start_y:end_y, start_x:end_x, :]
674
727
  return img
675
728
 
676
-
677
-
678
-
679
729
  def _masks_to_masks_stack(masks):
680
730
  """
681
731
  Convert a list of masks into a stack of masks.
@@ -692,53 +742,50 @@ def _masks_to_masks_stack(masks):
692
742
  return mask_stack
693
743
 
694
744
  def _get_diam(mag, obj):
695
- if obj == 'cell':
696
- if mag == 20:
697
- scale = 6
698
- if mag == 40:
699
- scale = 4.5
700
- if mag == 60:
701
- scale = 3
702
- elif obj == 'nucleus':
703
- if mag == 20:
704
- scale = 3
705
- if mag == 40:
706
- scale = 2
707
- if mag == 60:
708
- scale = 1.5
709
- elif obj == 'pathogen':
710
- if mag == 20:
711
- scale = 1.5
712
- if mag == 40:
713
- scale = 1
714
- if mag == 60:
715
- scale = 1.25
716
- elif obj == 'pathogen_nucleus':
717
- if mag == 20:
718
- scale = 0.25
719
- if mag == 40:
720
- scale = 0.2
721
- if mag == 60:
722
- scale = 0.2
745
+
746
+ if mag == 20:
747
+ if obj == 'cell':
748
+ diamiter = 120
749
+ elif obj == 'nucleus':
750
+ diamiter = 60
751
+ elif obj == 'pathogen':
752
+ diamiter = 30
753
+ else:
754
+ raise ValueError("Invalid magnification: Use 20, 40 or 60")
755
+
756
+ elif mag == 40:
757
+ if obj == 'cell':
758
+ diamiter = 160
759
+ elif obj == 'nucleus':
760
+ diamiter = 80
761
+ elif obj == 'pathogen':
762
+ diamiter = 40
763
+ else:
764
+ raise ValueError("Invalid magnification: Use 20, 40 or 60")
765
+
766
+ elif mag == 60:
767
+ if obj == 'cell':
768
+ diamiter = 200
769
+ if obj == 'nucleus':
770
+ diamiter = 90
771
+ if obj == 'pathogen':
772
+ diamiter = 75
773
+ else:
774
+ raise ValueError("Invalid magnification: Use 20, 40 or 60")
723
775
  else:
724
- raise ValueError("Invalid object type")
725
- diamiter = mag*scale
776
+ raise ValueError("Invalid magnification: Use 20, 40 or 60")
777
+
726
778
  return diamiter
727
779
 
728
780
  def _get_object_settings(object_type, settings):
729
-
730
781
  object_settings = {}
731
- object_settings['refine_masks'] = False
732
- object_settings['filter_size'] = False
733
- object_settings['filter_dimm'] = False
734
- print(object_type)
782
+
735
783
  object_settings['diameter'] = _get_diam(settings['magnification'], obj=object_type)
736
- object_settings['remove_border_objects'] = False
737
- object_settings['minimum_size'] = (object_settings['diameter']**2)/10
738
- object_settings['maximum_size'] = object_settings['minimum_size']*50
784
+ object_settings['minimum_size'] = (object_settings['diameter']**2)/5
785
+ object_settings['maximum_size'] = (object_settings['diameter']**2)*3
739
786
  object_settings['merge'] = False
740
- object_settings['net_avg'] = True
741
787
  object_settings['resample'] = True
788
+ object_settings['remove_border_objects'] = False
742
789
  object_settings['model_name'] = 'cyto'
743
790
 
744
791
  if object_type == 'cell':
@@ -746,20 +793,27 @@ def _get_object_settings(object_type, settings):
746
793
  object_settings['model_name'] = 'cyto'
747
794
  else:
748
795
  object_settings['model_name'] = 'cyto2'
749
-
796
+ object_settings['filter_size'] = True
797
+ object_settings['filter_intensity'] = True
798
+ object_settings['restore_type'] = settings.get('cell_restore_type', None)
799
+
750
800
  elif object_type == 'nucleus':
751
801
  object_settings['model_name'] = 'nuclei'
802
+ object_settings['filter_size'] = True
803
+ object_settings['filter_intensity'] = True
804
+ object_settings['restore_type'] = settings.get('nucleus_restore_type', None)
752
805
 
753
806
  elif object_type == 'pathogen':
754
- object_settings['model_name'] = 'cyto3'
755
-
756
- elif object_type == 'pathogen_nucleus':
757
- object_settings['filter_size'] = True
758
807
  object_settings['model_name'] = 'cyto'
808
+ object_settings['filter_size'] = True
809
+ object_settings['filter_intensity'] = True
810
+ object_settings['restore_type'] = settings.get('pathogen_restore_type', None)
759
811
 
760
812
  else:
761
813
  print(f'Object type: {object_type} not supported. Supported object types are : cell, nucleus and pathogen')
762
- print(f'using settings: {object_settings}')
814
+
815
+ if settings['verbose']:
816
+ print(object_settings)
763
817
 
764
818
  return object_settings
765
819
 
@@ -786,6 +840,7 @@ def _pivot_counts_table(db_path):
786
840
  return df
787
841
 
788
842
  def _pivot_dataframe(df):
843
+
789
844
  """
790
845
  Pivot the DataFrame.
791
846
 
@@ -812,44 +867,17 @@ def _pivot_counts_table(db_path):
812
867
  pivoted_df.to_sql('pivoted_counts', conn, if_exists='replace', index=False)
813
868
  conn.close()
814
869
 
815
- def _get_cellpose_channels_v1(mask_channels, nucleus_chann_dim, pathogen_chann_dim, cell_chann_dim):
816
- cellpose_channels = {}
817
- if nucleus_chann_dim in mask_channels:
818
- cellpose_channels['nucleus'] = [0, mask_channels.index(nucleus_chann_dim)]
819
- if pathogen_chann_dim in mask_channels:
820
- cellpose_channels['pathogen'] = [0, mask_channels.index(pathogen_chann_dim)]
821
- if cell_chann_dim in mask_channels:
822
- cellpose_channels['cell'] = [0, mask_channels.index(cell_chann_dim)]
823
- return cellpose_channels
870
+ def _get_cellpose_channels(src, nucleus_channel, pathogen_channel, cell_channel):
824
871
 
825
- def _get_cellpose_channels_v1(cell_channel, nucleus_channel, pathogen_channel):
826
- # Initialize a dictionary to hold the new indices for the specified channels
827
- cellpose_channels = {}
872
+ cell_mask_path = os.path.join(src, 'norm_channel_stack', 'cell_mask_stack')
873
+ nucleus_mask_path = os.path.join(src, 'norm_channel_stack', 'nucleus_mask_stack')
874
+ pathogen_mask_path = os.path.join(src, 'norm_channel_stack', 'pathogen_mask_stack')
828
875
 
829
- # Initialize a list to keep track of the channels in their new order
830
- new_channel_order = []
831
-
832
- # Add each channel to the new order list if it is not None
833
- if cell_channel is not None:
834
- new_channel_order.append(('cell', cell_channel))
835
- if nucleus_channel is not None:
836
- new_channel_order.append(('nucleus', nucleus_channel))
837
- if pathogen_channel is not None:
838
- new_channel_order.append(('pathogen', pathogen_channel))
839
-
840
- # Sort the list based on the original channel indices to maintain the original order
841
- new_channel_order.sort(key=lambda x: x[1])
842
- print(new_channel_order)
843
- # Assign new indices based on the sorted order
844
- for new_index, (channel_name, _) in enumerate(new_channel_order):
845
- cellpose_channels[channel_name] = [new_index, 0]
846
-
847
- if cell_channel is not None and nucleus_channel is not None:
848
- cellpose_channels['cell'][1] = cellpose_channels['nucleus'][0]
849
-
850
- return cellpose_channels
851
876
 
852
- def _get_cellpose_channels(nucleus_channel, pathogen_channel, cell_channel):
877
+ if os.path.exists(cell_mask_path) or os.path.exists(nucleus_mask_path) or os.path.exists(pathogen_mask_path):
878
+ if nucleus_channel is None or nucleus_channel is None or nucleus_channel is None:
879
+ print('Warning: Cellpose masks already exist. Unexpected behaviour when setting any object dimention to None when the object masks have been created.')
880
+
853
881
  cellpose_channels = {}
854
882
  if not nucleus_channel is None:
855
883
  cellpose_channels['nucleus'] = [0,0]
@@ -1027,9 +1055,6 @@ def _group_by_well(df):
1027
1055
  # Apply mean function to numeric columns and first to non-numeric
1028
1056
  df_grouped = df.groupby(['plate', 'row', 'col']).agg({**{col: np.mean for col in numeric_cols}, **{col: 'first' for col in non_numeric_cols}})
1029
1057
  return df_grouped
1030
-
1031
-
1032
-
1033
1058
 
1034
1059
  ###################################################
1035
1060
  # Classify
@@ -2612,24 +2637,21 @@ def _filter_object(mask, min_value):
2612
2637
  mask[np.isin(mask, to_remove)] = 0
2613
2638
  return mask
2614
2639
 
2615
- def _filter_cp_masks(masks, flows, filter_size, minimum_size, maximum_size, remove_border_objects, merge, filter_dimm, batch, moving_avg_q1, moving_avg_q3, moving_count, plot, figuresize):
2640
+ def _filter_cp_masks(masks, flows, filter_size, filter_intensity, minimum_size, maximum_size, remove_border_objects, merge, batch, plot, figuresize):
2641
+
2616
2642
  """
2617
2643
  Filter the masks based on various criteria such as size, border objects, merging, and intensity.
2618
2644
 
2619
2645
  Args:
2620
2646
  masks (list): List of masks.
2621
2647
  flows (list): List of flows.
2622
- refine_masks (bool): Flag indicating whether to refine masks.
2623
2648
  filter_size (bool): Flag indicating whether to filter based on size.
2649
+ filter_intensity (bool): Flag indicating whether to filter based on intensity.
2624
2650
  minimum_size (int): Minimum size of objects to keep.
2625
2651
  maximum_size (int): Maximum size of objects to keep.
2626
2652
  remove_border_objects (bool): Flag indicating whether to remove border objects.
2627
2653
  merge (bool): Flag indicating whether to merge adjacent objects.
2628
- filter_dimm (bool): Flag indicating whether to filter based on intensity.
2629
2654
  batch (ndarray): Batch of images.
2630
- moving_avg_q1 (float): Moving average of the first quartile of object intensities.
2631
- moving_avg_q3 (float): Moving average of the third quartile of object intensities.
2632
- moving_count (int): Count of moving averages.
2633
2655
  plot (bool): Flag indicating whether to plot the masks.
2634
2656
  figuresize (tuple): Size of the figure.
2635
2657
 
@@ -2641,51 +2663,66 @@ def _filter_cp_masks(masks, flows, filter_size, minimum_size, maximum_size, remo
2641
2663
 
2642
2664
  mask_stack = []
2643
2665
  for idx, (mask, flow, image) in enumerate(zip(masks, flows[0], batch)):
2666
+
2644
2667
  if plot and idx == 0:
2645
2668
  num_objects = mask_object_count(mask)
2646
2669
  print(f'Number of objects before filtration: {num_objects}')
2647
2670
  plot_masks(batch=image, masks=mask, flows=flow, cmap='inferno', figuresize=figuresize, nr=1, file_type='.npz', print_object_number=True)
2648
2671
 
2649
2672
  if filter_size:
2650
- props = measure.regionprops_table(mask, properties=['label', 'area']) # Measure properties of labeled image regions.
2651
- valid_labels = props['label'][np.logical_and(props['area'] > minimum_size, props['area'] < maximum_size)] # Select labels of valid size.
2652
- masks[idx] = np.isin(mask, valid_labels) * mask # Keep only valid objects.
2673
+ props = measure.regionprops_table(mask, properties=['label', 'area'])
2674
+ valid_labels = props['label'][np.logical_and(props['area'] > minimum_size, props['area'] < maximum_size)]
2675
+ mask = np.isin(mask, valid_labels) * mask
2653
2676
  if plot and idx == 0:
2654
2677
  num_objects = mask_object_count(mask)
2655
2678
  print(f'Number of objects after size filtration >{minimum_size} and <{maximum_size} : {num_objects}')
2656
2679
  plot_masks(batch=image, masks=mask, flows=flow, cmap='inferno', figuresize=figuresize, nr=1, file_type='.npz', print_object_number=True)
2680
+
2681
+ if filter_intensity:
2682
+ intensity_image = image[:, :, 1]
2683
+ props = measure.regionprops_table(mask, intensity_image=intensity_image, properties=['label', 'mean_intensity'])
2684
+ mean_intensities = np.array(props['mean_intensity']).reshape(-1, 1)
2685
+
2686
+ if mean_intensities.shape[0] >= 2:
2687
+ kmeans = KMeans(n_clusters=2, random_state=0).fit(mean_intensities)
2688
+ centroids = kmeans.cluster_centers_
2689
+
2690
+ # Calculate the Euclidean distance between the two centroids
2691
+ dist_between_centroids = distance.euclidean(centroids[0], centroids[1])
2692
+
2693
+ # Set a threshold for the minimum distance to consider clusters distinct
2694
+ distance_threshold = 0.25
2695
+
2696
+ if dist_between_centroids > distance_threshold:
2697
+ high_intensity_cluster = np.argmax(centroids)
2698
+ valid_labels = np.array(props['label'])[kmeans.labels_ == high_intensity_cluster]
2699
+ mask = np.isin(mask, valid_labels) * mask
2700
+
2701
+ if plot and idx == 0:
2702
+ num_objects = mask_object_count(mask)
2703
+ props_after = measure.regionprops_table(mask, intensity_image=intensity_image, properties=['label', 'mean_intensity'])
2704
+ mean_intensities_after = np.mean(np.array(props_after['mean_intensity']))
2705
+ average_intensity_before = np.mean(mean_intensities)
2706
+ print(f'Number of objects after potential intensity clustering: {num_objects}. Mean intensity before:{average_intensity_before:.4f}. After:{mean_intensities_after:.4f}.')
2707
+ plot_masks(batch=image, masks=mask, flows=flow, cmap='inferno', figuresize=figuresize, nr=1, file_type='.npz', print_object_number=True)
2708
+
2709
+
2657
2710
  if remove_border_objects:
2658
2711
  mask = clear_border(mask)
2659
2712
  if plot and idx == 0:
2660
2713
  num_objects = mask_object_count(mask)
2661
2714
  print(f'Number of objects after removing border objects, : {num_objects}')
2662
2715
  plot_masks(batch=image, masks=mask, flows=flow, cmap='inferno', figuresize=figuresize, nr=1, file_type='.npz', print_object_number=True)
2716
+
2663
2717
  if merge:
2664
2718
  mask = merge_touching_objects(mask, threshold=0.25)
2665
2719
  if plot and idx == 0:
2666
2720
  num_objects = mask_object_count(mask)
2667
2721
  print(f'Number of objects after merging adjacent objects, : {num_objects}')
2668
2722
  plot_masks(batch=image, masks=mask, flows=flow, cmap='inferno', figuresize=figuresize, nr=1, file_type='.npz', print_object_number=True)
2669
- if filter_dimm:
2670
- unique_labels = np.unique(mask)
2671
- if len(unique_labels) == 1 and unique_labels[0] == 0:
2672
- continue
2673
- object_intensities = [np.mean(batch[idx, :, :, 1][mask == label]) for label in unique_labels if label != 0]
2674
- object_q1s = [np.percentile(intensities, 25) for intensities in object_intensities if intensities.size > 0]
2675
- object_q3s = [np.percentile(intensities, 75) for intensities in object_intensities if intensities.size > 0]
2676
- if object_q1s:
2677
- object_q1_mean = np.mean(object_q1s)
2678
- object_q3_mean = np.mean(object_q3s)
2679
- moving_avg_q1 = (moving_avg_q1 * moving_count + object_q1_mean) / (moving_count + 1)
2680
- moving_avg_q3 = (moving_avg_q3 * moving_count + object_q3_mean) / (moving_count + 1)
2681
- moving_count += 1
2682
- mask = remove_intensity_objects(batch[idx, :, :, 1], mask, intensity_threshold=moving_avg_q1, mode='low')
2683
- mask = remove_intensity_objects(batch[idx, :, :, 1], mask, intensity_threshold=moving_avg_q3, mode='high')
2684
- if plot and idx == 0:
2685
- num_objects = mask_object_count(mask)
2686
- print(f'Objects after intensity filtration > {moving_avg_q1} and <{moving_avg_q3}: {num_objects}')
2687
- plot_masks(batch=image, masks=mask, flows=flow, cmap='inferno', figuresize=figuresize, nr=1, file_type='.npz', print_object_number=True)
2723
+
2688
2724
  mask_stack.append(mask)
2725
+
2689
2726
  return mask_stack
2690
2727
 
2691
2728
  def _object_filter(df, object_type, size_range, intensity_range, mask_chans, mask_chan):
@@ -2721,6 +2758,70 @@ def _object_filter(df, object_type, size_range, intensity_range, mask_chans, mas
2721
2758
  print(f'After {object_type} maximum mean intensity filter: {len(df)}')
2722
2759
  return df
2723
2760
 
2724
- ###################################################
2725
- # Classify
2726
- ###################################################
2761
+ def _run_test_mode(src, regex, timelapse=False):
2762
+ if timelapse:
2763
+ test_images = 1 # Use only 1 set for timelapse to ensure full sequence inclusion
2764
+ else:
2765
+ test_images = 10 # Use 10 sets for non-timelapse scenarios
2766
+
2767
+ test_folder_path = os.path.join(src, 'test')
2768
+ os.makedirs(test_folder_path, exist_ok=True)
2769
+ regular_expression = re.compile(regex)
2770
+
2771
+ all_filenames = [filename for filename in os.listdir(src) if regular_expression.match(filename)]
2772
+ print(f'Found {len(all_filenames)} files')
2773
+ images_by_set = defaultdict(list)
2774
+
2775
+ for filename in all_filenames:
2776
+ match = regular_expression.match(filename)
2777
+ if match:
2778
+ plate = match.group('plateID') if 'plateID' in match.groupdict() else os.path.basename(src)
2779
+ well = match.group('wellID')
2780
+ field = match.group('fieldID')
2781
+ # For timelapse experiments, group images by plate, well, and field only
2782
+ if timelapse:
2783
+ set_identifier = (plate, well, field)
2784
+ else:
2785
+ # For non-timelapse, you might want to distinguish sets more granularly
2786
+ # Here, assuming you're grouping by plate, well, and field for simplicity
2787
+ set_identifier = (plate, well, field)
2788
+ images_by_set[set_identifier].append(filename)
2789
+
2790
+ # Prepare for random selection
2791
+ set_identifiers = list(images_by_set.keys())
2792
+ random.shuffle(set_identifiers) # Randomize the order
2793
+
2794
+ # Select a subset based on the test_images count
2795
+ selected_sets = set_identifiers[:test_images]
2796
+
2797
+ # Print information about the number of sets used
2798
+ print(f'Using {test_images} random image set(s) for test model')
2799
+
2800
+ # Copy files for selected sets to the test folder
2801
+ for set_identifier in selected_sets:
2802
+ for filename in images_by_set[set_identifier]:
2803
+ shutil.copy(os.path.join(src, filename), test_folder_path)
2804
+
2805
+ return test_folder_path
2806
+
2807
+ def _choose_model(model_name, device, object_type='cell', restore_type=None):
2808
+ restore_list = ['denoise', 'deblur', 'upsample', None]
2809
+ if restore_type not in restore_list:
2810
+ print(f"Invalid restore type. Choose from {restore_list} defaulting to None")
2811
+ restore_type = None
2812
+
2813
+ if restore_type == None:
2814
+ model = cp_models.Cellpose(gpu=True, model_type=model_name, device=device)
2815
+ else:
2816
+ if object_type == 'nucleus':
2817
+ restore = f'{type}_nuclei'
2818
+ model = denoise.CellposeDenoiseModel(gpu=True, model_type="nuclei",restore_type=restore, chan2_restore=False, device=device)
2819
+ else:
2820
+ restore = f'{type}_cyto3'
2821
+ if model_name =='cyto2':
2822
+ chan2_restore = True
2823
+ if model_name =='cyto':
2824
+ chan2_restore = False
2825
+ model = denoise.CellposeDenoiseModel(gpu=True, model_type="cyto3",restore_type=restore, chan2_restore=chan2_restore, device=device)
2826
+
2827
+ return model
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: spacr
3
- Version: 0.0.17
3
+ Version: 0.0.20
4
4
  Summary: Spatial phenotype analysis of crisp screens (SpaCr)
5
5
  Home-page: https://github.com/EinarOlafsson/spacr
6
6
  Author: Einar Birnir Olafsson
@@ -9,29 +9,29 @@ Classifier: Programming Language :: Python :: 3
9
9
  Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Operating System :: OS Independent
11
11
  License-File: LICENSE
12
- Requires-Dist: torch
13
- Requires-Dist: torchvision
14
- Requires-Dist: torch-geometric
15
- Requires-Dist: numpy
16
- Requires-Dist: pandas
17
- Requires-Dist: statsmodels
18
- Requires-Dist: scikit-image
19
- Requires-Dist: scikit-learn
20
- Requires-Dist: seaborn
21
- Requires-Dist: matplotlib
22
- Requires-Dist: pillow
23
- Requires-Dist: imageio
24
- Requires-Dist: scipy
25
- Requires-Dist: ipywidgets
26
- Requires-Dist: mahotas
27
- Requires-Dist: btrack
28
- Requires-Dist: trackpy
29
- Requires-Dist: cellpose
30
- Requires-Dist: IPython
31
- Requires-Dist: opencv-python-headless
32
- Requires-Dist: umap
33
- Requires-Dist: ttkthemes
34
- Requires-Dist: lxml
12
+ Requires-Dist: torch >=2.2.1
13
+ Requires-Dist: torchvision >=0.17.1
14
+ Requires-Dist: torch-geometric >=2.5.1
15
+ Requires-Dist: numpy >=1.26.4
16
+ Requires-Dist: pandas >=2.2.1
17
+ Requires-Dist: statsmodels >=0.14.1
18
+ Requires-Dist: scikit-image >=0.22.0
19
+ Requires-Dist: scikit-learn >=1.4.1
20
+ Requires-Dist: seaborn >=0.13.2
21
+ Requires-Dist: matplotlib >=3.8.3
22
+ Requires-Dist: pillow >=10.2.0
23
+ Requires-Dist: imageio >=2.34.0
24
+ Requires-Dist: scipy >=1.12.0
25
+ Requires-Dist: ipywidgets >=8.1.2
26
+ Requires-Dist: mahotas >=1.4.13
27
+ Requires-Dist: btrack >=0.6.5
28
+ Requires-Dist: trackpy >=0.6.2
29
+ Requires-Dist: cellpose >=3.0.6
30
+ Requires-Dist: IPython >=8.18.1
31
+ Requires-Dist: opencv-python-headless >=4.9.0.80
32
+ Requires-Dist: umap >=0.1.1
33
+ Requires-Dist: ttkthemes >=3.2.2
34
+ Requires-Dist: lxml >=5.1.0
35
35
  Provides-Extra: dev
36
36
  Requires-Dist: pytest >=3.9 ; extra == 'dev'
37
37
  Provides-Extra: full
@@ -68,16 +68,18 @@ Spatial phenotype analysis of crisp screens (SpaCr). A collection of functions f
68
68
 
69
69
  - **Crop Images:** Objects (e.g. cells) can be saved as PNGs from the object area or bounding box area of each object. Object paths are saved in an sql database that can be annotated and used to train CNNs/Transformer models for classefication tasks.
70
70
 
71
- - **Train CNNs or Transformers:** Train Torch Convolutional Neural Networks (CNNs) or Transformers to classify single object images. Train Torch models with IRM/ERM, checkpointing,
71
+ - **Train CNNs or Transformers:** Train Torch Convolutional Neural Networks (CNNs) or Transformers to classify single object images. Train Torch models with IRM/ERM, checkpointing.
72
72
 
73
73
  - **Manual Annotation:** Supports manual annotation of single cell images and segmentation to refine training datasets for training CNNs/Transformers or cellpose, respectively.
74
74
 
75
75
  - **Finetune Cellpose Models:** Adjust pre-existing Cellpose models to your specific dataset for improved performance.
76
76
 
77
- - **Timelapse Data Support:** Includes support for analyzing timelapse data.
77
+ - **Timelapse Data Support:** Track objects in timelapse image data.
78
78
 
79
79
  - **Simulations:** Simulate spatial phenotype screens.
80
80
 
81
+ - **Misc:** Analyze Ca oscillation, recruitment, infection rate, plaque size/count.
82
+
81
83
  ## Installation
82
84
 
83
85
  spacr requires Tkinter for its graphical user interface features.
@@ -0,0 +1,31 @@
1
+ spacr/__init__.py,sha256=mDi-Qu5r1vZnqIbUBV1JAoSq-mxmMEOmni1JSG2e4Wo,879
2
+ spacr/__main__.py,sha256=_qRkhbFrH_cXr7AZs6KHL8Hh4VApqNdpNCtiKn2ePTo,285
3
+ spacr/alpha.py,sha256=Q1vnqO0hvU1G7QP26amFwJY2RjZ68zIc3jYoqQSBMrw,462
4
+ spacr/annotate_app.py,sha256=IPgZfS4TrSqbJr81P1FWUNOgCPPcS6EdQjUsXRwY-4E,19932
5
+ spacr/cli.py,sha256=507jfOOEV8BoL4eeUcblvH-iiDHdBrEVJLu1ghAAPSc,1800
6
+ spacr/core.py,sha256=VTk81S80PKq5pGeUEdduubjbsAyPWKDyRu11EWDO2ms,120307
7
+ spacr/graph_learning.py,sha256=sD4eOC7Q16rr7WO20mCi_E16_LqioGUUgPamAHIIeNI,12568
8
+ spacr/graph_learning_lap.py,sha256=MyNRLb63gsjBlui-ByZ0anHugYulL6M-OsGm8rnGBmE,3385
9
+ spacr/gui_classify_app.py,sha256=-I06tVoA3U0jaAoTs32H1Y5ACMz6QBaEM1NEfg5w-9c,7965
10
+ spacr/gui_mask_app.py,sha256=xpQ_kh-8lTb9xnyKNrZGtf9JnKqutBFu2-LdYzQShh0,8079
11
+ spacr/gui_measure_app.py,sha256=9mAw3Tiuq61uKTzMVslr0MgD8m1Lv5PNI0K4-gQiuXE,8061
12
+ spacr/gui_sim_app.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
+ spacr/gui_utils.py,sha256=dpbrsDVebjm8ZmkYYOmIZCbkQYyc6JyMIPA7C0r4Xxw,29631
14
+ spacr/io.py,sha256=bCe40kli7jx7hKQyDNuZYCpZ44-Brj4TZVl9r02cFHo,102042
15
+ spacr/logger.py,sha256=7Zqr3TuuOQLWT32gYr2q1qvv7x0a2JhLANmZcnBXAW8,670
16
+ spacr/mask_app.py,sha256=B6-zYXVFg-cc58gLcz-Ry6LClO2jxLitL6B2ACb0HTw,39278
17
+ spacr/measure.py,sha256=LvF6D-TydhXPwKkeAKuIvzHn14qVlqsn6h7ENnoKn5s,50795
18
+ spacr/old_code.py,sha256=KxljHpKNsV5EfX9ifN2xJTnUeqAhyabZyfDWd5THOOc,11226
19
+ spacr/plot.py,sha256=qSM0NzVQYqMYioRc_BPCkUipLRZH7_GaHKGpLXGL8oI,55040
20
+ spacr/sim.py,sha256=tl40lgTMeeJSyBq_c-Rn54C9Ri0FJ2zLkLLLPLSjz3o,51534
21
+ spacr/timelapse.py,sha256=wAEMv7oPyusLph3RPmF4F6UGmLfMZmrupYfsuaeJ9vI,34003
22
+ spacr/train.py,sha256=r77zLvLFMzx6MJxXG3JjynD8qTWYM9pNgrChEXYQhtY,25631
23
+ spacr/umap.py,sha256=4QSrQ16Og-Ijq-SwguMQT2f20UWz1LE5HQeSLmzSl8c,29370
24
+ spacr/utils.py,sha256=kEQxucklUdogxjOSQxKdA1R_NU5qYj6dJPiQKIB8un4,120992
25
+ spacr/version.py,sha256=axH5tnGwtgSnJHb5IDhiu4Zjk5GhLyAEDRe-rnaoFOA,409
26
+ spacr-0.0.20.dist-info/LICENSE,sha256=SR-2MeGc6SCM1UORJYyarSWY_A-JaOMFDj7ReSs9tRM,1083
27
+ spacr-0.0.20.dist-info/METADATA,sha256=S8bOdo2ZqIj-ScjQRBxQnBQNXXFV-HKfidMs7MRIsIc,4381
28
+ spacr-0.0.20.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
29
+ spacr-0.0.20.dist-info/entry_points.txt,sha256=5uyJaAxWCbjWYwP15InAKU1yFxTwyuvCGtIGceso1es,290
30
+ spacr-0.0.20.dist-info/top_level.txt,sha256=GJPU8FgwRXGzKeut6JopsSRY2R8T3i9lDgya42tLInY,6
31
+ spacr-0.0.20.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.42.0)
2
+ Generator: bdist_wheel (0.43.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5