spacr 1.0.1__py3-none-any.whl → 1.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/__init__.py +10 -2
- spacr/core.py +2 -1
- spacr/deep_spacr.py +2 -2
- spacr/gui_core.py +8 -6
- spacr/gui_elements.py +3 -3
- spacr/gui_utils.py +6 -0
- spacr/io.py +10 -27
- spacr/ml.py +7 -10
- spacr/plot.py +2 -10
- spacr/sim.py +1 -1
- spacr/sp_stats.py +0 -1
- spacr/submodules.py +3 -3
- spacr/timelapse.py +8 -2
- spacr/toxo.py +0 -16
- spacr/utils.py +67 -26
- {spacr-1.0.1.dist-info → spacr-1.0.3.dist-info}/METADATA +21 -12
- {spacr-1.0.1.dist-info → spacr-1.0.3.dist-info}/RECORD +21 -21
- {spacr-1.0.1.dist-info → spacr-1.0.3.dist-info}/LICENSE +0 -0
- {spacr-1.0.1.dist-info → spacr-1.0.3.dist-info}/WHEEL +0 -0
- {spacr-1.0.1.dist-info → spacr-1.0.3.dist-info}/entry_points.txt +0 -0
- {spacr-1.0.1.dist-info → spacr-1.0.3.dist-info}/top_level.txt +0 -0
spacr/__init__.py
CHANGED
@@ -1,4 +1,13 @@
|
|
1
|
-
import logging, os
|
1
|
+
import logging, os, builtins
|
2
|
+
|
3
|
+
def silent_print(*args, **kwargs):
|
4
|
+
if "Welcome to CellposeSAM" in str(args[0]):
|
5
|
+
return # skip the banner
|
6
|
+
return _original_print(*args, **kwargs)
|
7
|
+
|
8
|
+
_original_print = builtins.print
|
9
|
+
builtins.print = silent_print
|
10
|
+
|
2
11
|
|
3
12
|
from . import core
|
4
13
|
from . import io
|
@@ -22,7 +31,6 @@ from . import app_classify
|
|
22
31
|
from . import app_sequencing
|
23
32
|
from . import app_umap
|
24
33
|
from . import submodules
|
25
|
-
from . import openai
|
26
34
|
from . import ml
|
27
35
|
from . import toxo
|
28
36
|
from . import spacr_cellpose
|
spacr/core.py
CHANGED
@@ -145,7 +145,7 @@ def preprocess_generate_masks(settings):
|
|
145
145
|
parasite_folder = os.path.join(mask_src, 'pathogen_mask_stack')
|
146
146
|
#organelle_folder = os.path.join(mask_src, 'organelle_mask_stack')
|
147
147
|
print(f'Adjusting cell masks with nuclei and pathogen masks')
|
148
|
-
adjust_cell_masks(parasite_folder, cell_folder, nuclei_folder, overlap_threshold=5, perimeter_threshold=30)
|
148
|
+
adjust_cell_masks(parasite_folder, cell_folder, nuclei_folder, overlap_threshold=5, perimeter_threshold=30, n_jobs=settings['n_jobs'])
|
149
149
|
stop = time.time()
|
150
150
|
adjust_time = (stop-start)/60
|
151
151
|
print(f'Cell mask adjustment: {adjust_time} min.')
|
@@ -243,6 +243,7 @@ def generate_cellpose_masks(src, settings, object_type):
|
|
243
243
|
|
244
244
|
if object_type not in cellpose_channels:
|
245
245
|
raise ValueError(f"Error: No channels were specified for object_type '{object_type}'. Check your settings.")
|
246
|
+
|
246
247
|
channels = cellpose_channels[object_type]
|
247
248
|
|
248
249
|
#cellpose_batch_size = _get_cellpose_batch_size()
|
spacr/deep_spacr.py
CHANGED
@@ -940,7 +940,7 @@ def deep_spacr(settings={}):
|
|
940
940
|
|
941
941
|
def model_knowledge_transfer(teacher_paths, student_save_path, data_loader, device='cpu', student_model_name='maxvit_t', pretrained=True, dropout_rate=None, use_checkpoint=False, alpha=0.5, temperature=2.0, lr=1e-4, epochs=10):
|
942
942
|
|
943
|
-
from
|
943
|
+
from .utils import TorchModel
|
944
944
|
|
945
945
|
# Adjust filename to reflect knowledge-distillation if desired
|
946
946
|
if student_save_path.endswith('.pth'):
|
@@ -1044,7 +1044,7 @@ def model_knowledge_transfer(teacher_paths, student_save_path, data_loader, devi
|
|
1044
1044
|
|
1045
1045
|
def model_fusion(model_paths,save_path,device='cpu',model_name='maxvit_t',pretrained=True,dropout_rate=None,use_checkpoint=False,aggregator='mean'):
|
1046
1046
|
|
1047
|
-
from
|
1047
|
+
from .utils import TorchModel
|
1048
1048
|
|
1049
1049
|
if save_path.endswith('.pth'):
|
1050
1050
|
save_path_part1, ext = os.path.splitext(save_path)
|
spacr/gui_core.py
CHANGED
@@ -549,16 +549,19 @@ def setup_settings_panel(vertical_container, settings_type='mask'):
|
|
549
549
|
from .gui_elements import set_element_size
|
550
550
|
|
551
551
|
size_dict = set_element_size()
|
552
|
+
|
552
553
|
settings_width = size_dict['settings_width']
|
553
|
-
|
554
|
+
|
554
555
|
# Create a PanedWindow for the settings panel
|
555
|
-
settings_paned_window = tk.PanedWindow(vertical_container, orient=tk.HORIZONTAL, width=
|
556
|
+
settings_paned_window = tk.PanedWindow(vertical_container, orient=tk.HORIZONTAL, width=settings_width)
|
556
557
|
vertical_container.add(settings_paned_window, stretch="always")
|
557
558
|
|
558
559
|
settings_frame = tk.Frame(settings_paned_window, width=settings_width)
|
559
|
-
settings_frame.pack_propagate(False)
|
560
|
+
settings_frame.pack_propagate(False)
|
560
561
|
|
561
562
|
settings_paned_window.add(settings_frame)
|
563
|
+
#settings_paned_window.add(settings_frame, width=settings_width)
|
564
|
+
|
562
565
|
|
563
566
|
scrollable_frame = spacrFrame(settings_frame)
|
564
567
|
scrollable_frame.grid(row=1, column=0, sticky="nsew")
|
@@ -1209,7 +1212,7 @@ def cleanup_previous_instance():
|
|
1209
1212
|
canvas = None
|
1210
1213
|
|
1211
1214
|
print("Previous instance cleaned up successfully.")
|
1212
|
-
|
1215
|
+
|
1213
1216
|
def initiate_root(parent, settings_type='mask'):
|
1214
1217
|
"""
|
1215
1218
|
Initializes the root window and sets up the GUI components based on the specified settings type.
|
@@ -1223,6 +1226,7 @@ def initiate_root(parent, settings_type='mask'):
|
|
1223
1226
|
"""
|
1224
1227
|
|
1225
1228
|
global q, fig_queue, thread_control, parent_frame, scrollable_frame, button_frame, vars_dict, canvas, canvas_widget, button_scrollable_frame, progress_bar, uppdate_frequency, figures, figure_index, index_control, usage_bars
|
1229
|
+
from .gui_elements import set_element_size
|
1226
1230
|
|
1227
1231
|
# Clean up any previous instance
|
1228
1232
|
cleanup_previous_instance()
|
@@ -1235,8 +1239,6 @@ def initiate_root(parent, settings_type='mask'):
|
|
1235
1239
|
uppdate_frequency = 500
|
1236
1240
|
num_cores = os.cpu_count()
|
1237
1241
|
|
1238
|
-
#chatbot = Chatbot(api_key="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")
|
1239
|
-
|
1240
1242
|
# Start tracemalloc and initialize global variables
|
1241
1243
|
tracemalloc.start()
|
1242
1244
|
|
spacr/gui_elements.py
CHANGED
@@ -39,7 +39,7 @@ def restart_gui_app(root):
|
|
39
39
|
root.destroy()
|
40
40
|
|
41
41
|
# Import and launch a new instance of the application
|
42
|
-
from
|
42
|
+
from .gui import gui_app
|
43
43
|
new_root = tk.Tk() # Create a fresh Tkinter root instance
|
44
44
|
gui_app()
|
45
45
|
except Exception as e:
|
@@ -81,7 +81,7 @@ def create_menu_bar(root):
|
|
81
81
|
# Add a separator and an exit option
|
82
82
|
app_menu.add_separator()
|
83
83
|
#app_menu.add_command(label="Home",command=lambda: restart_gui_app(root))
|
84
|
-
app_menu.add_command(label="Help", command=lambda: webbrowser.open("https://
|
84
|
+
app_menu.add_command(label="Help", command=lambda: webbrowser.open("https://einarolafsson.github.io/spacr/index.html"))
|
85
85
|
app_menu.add_command(label="Exit", command=root.quit)
|
86
86
|
|
87
87
|
# Configure the menu for the root window
|
@@ -2851,7 +2851,7 @@ class AnnotateApp:
|
|
2851
2851
|
# Optionally, update your GUI status label
|
2852
2852
|
self.update_gui_text("Merging data...")
|
2853
2853
|
|
2854
|
-
from
|
2854
|
+
from .io import _read_and_merge_data
|
2855
2855
|
|
2856
2856
|
# (1) Merge data
|
2857
2857
|
merged_df, obj_df_ls = _read_and_merge_data(
|
spacr/gui_utils.py
CHANGED
@@ -594,6 +594,12 @@ def setup_frame(parent_frame):
|
|
594
594
|
tk.Label(settings_container, text="Settings Container", bg=style_out['bg_color']).pack(fill=tk.BOTH, expand=True)
|
595
595
|
|
596
596
|
set_dark_style(style, parent_frame, [settings_container, vertical_container, horizontal_container, main_paned])
|
597
|
+
|
598
|
+
# Set initial sash position for main_paned (left/right split)
|
599
|
+
parent_frame.update_idletasks()
|
600
|
+
screen_width = parent_frame.winfo_screenwidth()
|
601
|
+
target_width = int(screen_width / 4)
|
602
|
+
main_paned.sash_place(0, target_width, 0)
|
597
603
|
|
598
604
|
return parent_frame, vertical_container, horizontal_container, settings_container
|
599
605
|
|
spacr/io.py
CHANGED
@@ -1,10 +1,9 @@
|
|
1
|
-
import os, re, sqlite3, gc, torch, time, random, shutil, cv2, tarfile, cellpose, glob, queue, tifffile, czifile, atexit, datetime
|
1
|
+
import os, re, sqlite3, gc, torch, time, random, shutil, cv2, tarfile, cellpose, glob, queue, tifffile, czifile, atexit, datetime
|
2
2
|
import numpy as np
|
3
3
|
import pandas as pd
|
4
4
|
from PIL import Image, ImageOps
|
5
5
|
from collections import defaultdict, Counter
|
6
6
|
from pathlib import Path
|
7
|
-
from functools import partial
|
8
7
|
from matplotlib.animation import FuncAnimation
|
9
8
|
from IPython.display import display
|
10
9
|
from skimage.util import img_as_uint
|
@@ -298,26 +297,16 @@ def _load_normalized_images_and_labels(image_files, label_files, channels=None,
|
|
298
297
|
return normalized_images, labels, image_names, label_names, orig_dims
|
299
298
|
|
300
299
|
class CombineLoaders:
|
301
|
-
|
302
300
|
"""
|
303
301
|
A class that combines multiple data loaders into a single iterator.
|
304
302
|
|
305
303
|
Args:
|
306
304
|
train_loaders (list): A list of data loaders.
|
307
305
|
|
308
|
-
Attributes:
|
309
|
-
train_loaders (list): A list of data loaders.
|
310
|
-
loader_iters (list): A list of iterator objects for each data loader.
|
311
|
-
|
312
|
-
Methods:
|
313
|
-
__iter__(): Returns the iterator object itself.
|
314
|
-
__next__(): Returns the next batch from one of the data loaders.
|
315
|
-
|
316
306
|
Raises:
|
317
307
|
StopIteration: If all data loaders have been exhausted.
|
318
|
-
|
319
308
|
"""
|
320
|
-
|
309
|
+
|
321
310
|
def __init__(self, train_loaders):
|
322
311
|
self.train_loaders = train_loaders
|
323
312
|
self.loader_iters = [iter(loader) for loader in train_loaders]
|
@@ -327,7 +316,7 @@ class CombineLoaders:
|
|
327
316
|
|
328
317
|
def __next__(self):
|
329
318
|
while self.loader_iters:
|
330
|
-
random.shuffle(self.loader_iters)
|
319
|
+
random.shuffle(self.loader_iters)
|
331
320
|
for i, loader_iter in enumerate(self.loader_iters):
|
332
321
|
try:
|
333
322
|
batch = next(loader_iter)
|
@@ -377,14 +366,6 @@ class NoClassDataset(Dataset):
|
|
377
366
|
transform (callable, optional): A function/transform to apply to the image data. Default is None.
|
378
367
|
shuffle (bool, optional): Whether to shuffle the dataset. Default is True.
|
379
368
|
load_to_memory (bool, optional): Whether to load all images into memory. Default is False.
|
380
|
-
|
381
|
-
Attributes:
|
382
|
-
data_dir (str): The directory path where the image files are located.
|
383
|
-
transform (callable): A function/transform to apply to the image data.
|
384
|
-
shuffle (bool): Whether to shuffle the dataset.
|
385
|
-
load_to_memory (bool): Whether to load all images into memory.
|
386
|
-
filenames (list): A list of file paths for the image files.
|
387
|
-
images (list): A list of loaded images (if load_to_memory is True).
|
388
369
|
"""
|
389
370
|
|
390
371
|
def __init__(self, data_dir, transform=None, shuffle=True, load_to_memory=False):
|
@@ -392,13 +373,16 @@ class NoClassDataset(Dataset):
|
|
392
373
|
self.transform = transform
|
393
374
|
self.shuffle = shuffle
|
394
375
|
self.load_to_memory = load_to_memory
|
395
|
-
self.filenames = [
|
376
|
+
self.filenames = [
|
377
|
+
os.path.join(data_dir, f)
|
378
|
+
for f in os.listdir(data_dir)
|
379
|
+
if os.path.isfile(os.path.join(data_dir, f))
|
380
|
+
]
|
396
381
|
if self.shuffle:
|
397
382
|
self.shuffle_dataset()
|
398
383
|
if self.load_to_memory:
|
399
384
|
self.images = [self.load_image(f) for f in self.filenames]
|
400
385
|
|
401
|
-
#@lru_cache(maxsize=None)
|
402
386
|
def load_image(self, img_path):
|
403
387
|
"""
|
404
388
|
Load an image from the given file path.
|
@@ -446,9 +430,9 @@ class NoClassDataset(Dataset):
|
|
446
430
|
img = self.transform(img)
|
447
431
|
else:
|
448
432
|
img = ToTensor()(img)
|
449
|
-
# Return both the image and its filename
|
450
433
|
return img, self.filenames[index]
|
451
434
|
|
435
|
+
|
452
436
|
class spacrDataset(Dataset):
|
453
437
|
def __init__(self, data_dir, loader_classes, transform=None, shuffle=True, pin_memory=False, specific_files=None, specific_labels=None):
|
454
438
|
self.data_dir = data_dir
|
@@ -579,7 +563,7 @@ class spacrDataLoader(DataLoader):
|
|
579
563
|
def __del__(self):
|
580
564
|
self.cleanup()
|
581
565
|
|
582
|
-
class
|
566
|
+
class NoClassDataset_v1(Dataset):
|
583
567
|
def __init__(self, data_dir, transform=None, shuffle=True, load_to_memory=False):
|
584
568
|
self.data_dir = data_dir
|
585
569
|
self.transform = transform
|
@@ -614,7 +598,6 @@ class NoClassDataset(Dataset):
|
|
614
598
|
img = ToTensor()(img)
|
615
599
|
return img, self.filenames[index]
|
616
600
|
|
617
|
-
|
618
601
|
class TarImageDataset(Dataset):
|
619
602
|
def __init__(self, tar_path, transform=None):
|
620
603
|
self.tar_path = tar_path
|
spacr/ml.py
CHANGED
@@ -14,21 +14,18 @@ from IPython.display import display
|
|
14
14
|
import scipy.stats as st
|
15
15
|
import statsmodels.api as sm
|
16
16
|
import statsmodels.formula.api as smf
|
17
|
-
from statsmodels.tools import add_constant
|
18
17
|
from statsmodels.regression.mixed_linear_model import MixedLM
|
19
|
-
from statsmodels.tools.sm_exceptions import PerfectSeparationError
|
20
18
|
from statsmodels.stats.outliers_influence import variance_inflation_factor
|
21
19
|
from statsmodels.genmod.families import Binomial
|
22
20
|
from statsmodels.genmod.families.links import logit
|
23
21
|
from statsmodels.othermod.betareg import BetaModel
|
24
|
-
from scipy.optimize import minimize
|
25
22
|
from scipy.special import gammaln, psi, expit
|
26
23
|
from sklearn.linear_model import Lasso, Ridge
|
27
24
|
from sklearn.preprocessing import FunctionTransformer
|
28
25
|
from patsy import dmatrices
|
29
26
|
|
30
|
-
from sklearn.metrics import classification_report
|
31
|
-
from sklearn.model_selection import StratifiedKFold
|
27
|
+
from sklearn.metrics import classification_report
|
28
|
+
from sklearn.model_selection import StratifiedKFold
|
32
29
|
from sklearn.feature_selection import SelectKBest, f_classif
|
33
30
|
from sklearn.model_selection import train_test_split
|
34
31
|
from sklearn.ensemble import RandomForestClassifier, HistGradientBoostingClassifier
|
@@ -37,7 +34,7 @@ from sklearn.inspection import permutation_importance
|
|
37
34
|
from sklearn.metrics import classification_report, precision_recall_curve
|
38
35
|
from sklearn.preprocessing import StandardScaler
|
39
36
|
from sklearn.preprocessing import MinMaxScaler
|
40
|
-
from scipy.spatial.distance import cosine, euclidean, mahalanobis, cityblock, minkowski, chebyshev,
|
37
|
+
from scipy.spatial.distance import cosine, euclidean, mahalanobis, cityblock, minkowski, chebyshev, braycurtis
|
41
38
|
from xgboost import XGBClassifier
|
42
39
|
|
43
40
|
import numpy as np
|
@@ -120,7 +117,7 @@ def scale_variables(X, y):
|
|
120
117
|
|
121
118
|
return X_scaled, y_scaled
|
122
119
|
|
123
|
-
def
|
120
|
+
def process_model_coefficients_v1(model, regression_type, X, y, nc, pc, controls):
|
124
121
|
"""Return DataFrame of model coefficients and p-values."""
|
125
122
|
if regression_type in ['ols', 'gls', 'wls', 'rlm', 'glm', 'mixed', 'quantile', 'logit', 'probit', 'poisson']:
|
126
123
|
coefs = model.params
|
@@ -159,7 +156,7 @@ def process_model_coefficients(model, regression_type, X, y, nc, pc, controls):
|
|
159
156
|
coef_df['condition'] = coef_df.apply(lambda row: 'nc' if nc in row['feature'] else 'pc' if pc in row['feature'] else ('control' if row['grna'] in controls else 'other'),axis=1)
|
160
157
|
return coef_df[~coef_df['feature'].str.contains('row|column')]
|
161
158
|
|
162
|
-
def
|
159
|
+
def check_distribution_v1(y):
|
163
160
|
"""Check the type of distribution to recommend a model."""
|
164
161
|
if np.all((y == 0) | (y == 1)):
|
165
162
|
print("Detected binary data.")
|
@@ -307,7 +304,7 @@ def check_and_clean_data(df, dependent_variable):
|
|
307
304
|
print("Data is ready for model fitting.")
|
308
305
|
return df_cleaned
|
309
306
|
|
310
|
-
def
|
307
|
+
def check_normality_v1(y, variable_name):
|
311
308
|
"""Check if the data is normally distributed using the Shapiro-Wilk test."""
|
312
309
|
from scipy.stats import shapiro
|
313
310
|
|
@@ -326,7 +323,7 @@ def minimum_cell_simulation(settings, num_repeats=10, sample_size=100, tolerance
|
|
326
323
|
Detect and mark the elbow point (inflection) with smoothing and tolerance control.
|
327
324
|
"""
|
328
325
|
|
329
|
-
from
|
326
|
+
from .utils import correct_metadata_column_names
|
330
327
|
|
331
328
|
# Load and process data
|
332
329
|
if isinstance(settings['score_data'], str):
|
spacr/plot.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
import os, random, cv2, glob, math, torch,
|
1
|
+
import os, random, cv2, glob, math, torch, itertools
|
2
2
|
|
3
3
|
import numpy as np
|
4
4
|
import pandas as pd
|
@@ -11,27 +11,19 @@ import scipy.stats as stats
|
|
11
11
|
import statsmodels.api as sm
|
12
12
|
import imageio.v2 as imageio
|
13
13
|
from IPython.display import display
|
14
|
-
from skimage.segmentation import find_boundaries
|
15
14
|
from skimage import measure
|
16
15
|
from skimage.measure import find_contours, label, regionprops
|
17
|
-
from skimage.segmentation import mark_boundaries
|
18
16
|
from skimage.transform import resize as sk_resize
|
19
17
|
import scikit_posthocs as sp
|
20
18
|
from scipy.stats import chi2_contingency
|
21
19
|
import tifffile as tiff
|
22
20
|
|
23
|
-
from scipy.stats import normaltest, ttest_ind, mannwhitneyu, f_oneway, kruskal
|
21
|
+
from scipy.stats import normaltest, ttest_ind, mannwhitneyu, f_oneway, kruskal, levene, shapiro
|
24
22
|
from statsmodels.stats.multicomp import pairwise_tukeyhsd
|
25
|
-
from scipy.stats import ttest_ind, mannwhitneyu, levene, wilcoxon, kruskal, normaltest, shapiro
|
26
|
-
import itertools
|
27
23
|
import pingouin as pg
|
28
24
|
|
29
25
|
from ipywidgets import IntSlider, interact
|
30
26
|
from IPython.display import Image as ipyimage
|
31
|
-
|
32
|
-
import matplotlib.patches as patches
|
33
|
-
from collections import defaultdict
|
34
|
-
from matplotlib.gridspec import GridSpec
|
35
27
|
from matplotlib_venn import venn2
|
36
28
|
|
37
29
|
def plot_image_mask_overlay(
|
spacr/sim.py
CHANGED
spacr/sp_stats.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1
1
|
from scipy.stats import shapiro, normaltest, levene, ttest_ind, mannwhitneyu, kruskal, f_oneway
|
2
2
|
from statsmodels.stats.multicomp import pairwise_tukeyhsd
|
3
3
|
import scikit_posthocs as sp
|
4
|
-
import numpy as np
|
5
4
|
import pandas as pd
|
6
5
|
from scipy.stats import chi2_contingency, fisher_exact
|
7
6
|
import itertools
|
spacr/submodules.py
CHANGED
@@ -20,8 +20,7 @@ from IPython.display import display
|
|
20
20
|
from sklearn.ensemble import RandomForestClassifier
|
21
21
|
from sklearn.inspection import permutation_importance
|
22
22
|
from math import pi
|
23
|
-
from scipy.stats import chi2_contingency
|
24
|
-
from scipy.spatial.distance import cosine
|
23
|
+
from scipy.stats import chi2_contingency
|
25
24
|
|
26
25
|
from sklearn.metrics import mean_absolute_error
|
27
26
|
from skimage.measure import regionprops, label as sklabel
|
@@ -691,7 +690,8 @@ def analyze_plaques(settings):
|
|
691
690
|
from .spacr_cellpose import identify_masks_finetune
|
692
691
|
from .settings import get_analyze_plaque_settings
|
693
692
|
from .utils import save_settings, download_models
|
694
|
-
from spacr import __file__ as spacr_path
|
693
|
+
#from spacr import __file__ as spacr_path
|
694
|
+
spacr_path = os.path.join(os.path.dirname(__file__), '__init__.py')
|
695
695
|
|
696
696
|
download_models()
|
697
697
|
package_dir = os.path.dirname(spacr_path)
|
spacr/timelapse.py
CHANGED
@@ -7,12 +7,18 @@ from IPython.display import display
|
|
7
7
|
from IPython.display import Image as ipyimage
|
8
8
|
import trackpy as tp
|
9
9
|
from btrack import datasets as btrack_datasets
|
10
|
-
from skimage.measure import
|
10
|
+
from skimage.measure import regionprops_table
|
11
11
|
from scipy.signal import find_peaks
|
12
12
|
from scipy.optimize import curve_fit, linear_sum_assignment
|
13
|
-
|
13
|
+
|
14
|
+
try:
|
15
|
+
from numpy import trapz
|
16
|
+
except ImportError:
|
17
|
+
from scipy.integrate import trapz
|
18
|
+
|
14
19
|
import matplotlib.pyplot as plt
|
15
20
|
|
21
|
+
|
16
22
|
def _npz_to_movie(arrays, filenames, save_path, fps=10):
|
17
23
|
"""
|
18
24
|
Convert a list of numpy arrays to a movie file.
|
spacr/toxo.py
CHANGED
@@ -5,23 +5,7 @@ import numpy as np
|
|
5
5
|
from adjustText import adjust_text
|
6
6
|
import pandas as pd
|
7
7
|
from scipy.stats import fisher_exact
|
8
|
-
from IPython.display import display
|
9
|
-
from matplotlib.legend import Legend
|
10
|
-
from matplotlib.transforms import Bbox
|
11
|
-
from brokenaxes import brokenaxes
|
12
|
-
|
13
|
-
import os
|
14
|
-
import pandas as pd
|
15
|
-
import seaborn as sns
|
16
|
-
import matplotlib.pyplot as plt
|
17
|
-
from scipy.spatial.distance import cosine
|
18
|
-
from scipy.stats import pearsonr
|
19
|
-
import pandas as pd
|
20
|
-
import matplotlib.pyplot as plt
|
21
|
-
import seaborn as sns
|
22
8
|
from sklearn.metrics import mean_absolute_error
|
23
|
-
|
24
|
-
|
25
9
|
from matplotlib.gridspec import GridSpec
|
26
10
|
|
27
11
|
def custom_volcano_plot(data_path, metadata_path, metadata_column='tagm_location',point_size=50, figsize=20, threshold=0,save_path=None, x_lim=[-0.5, 0.5], y_lims=[[0, 6], [9, 20]]):
|
spacr/utils.py
CHANGED
@@ -1,8 +1,11 @@
|
|
1
|
+
|
1
2
|
import os, re, sqlite3, torch, torchvision, random, string, shutil, cv2, tarfile, glob, psutil, platform, gzip, subprocess, time, requests, ast, traceback
|
3
|
+
|
2
4
|
import numpy as np
|
3
5
|
import pandas as pd
|
4
6
|
from cellpose import models as cp_models
|
5
7
|
from cellpose import denoise
|
8
|
+
from functools import partial
|
6
9
|
|
7
10
|
from skimage import morphology
|
8
11
|
from skimage.measure import label, regionprops_table, regionprops
|
@@ -37,7 +40,6 @@ from torchvision import models
|
|
37
40
|
from torchvision.models.resnet import ResNet18_Weights, ResNet34_Weights, ResNet50_Weights, ResNet101_Weights, ResNet152_Weights
|
38
41
|
import torchvision.transforms as transforms
|
39
42
|
from torchvision.models import resnet50
|
40
|
-
from torchvision.utils import make_grid
|
41
43
|
|
42
44
|
import seaborn as sns
|
43
45
|
import matplotlib.pyplot as plt
|
@@ -65,10 +67,11 @@ from sklearn.decomposition import PCA
|
|
65
67
|
from sklearn.ensemble import RandomForestClassifier
|
66
68
|
|
67
69
|
from huggingface_hub import list_repo_files
|
68
|
-
|
70
|
+
|
71
|
+
#from spacr import __file__ as spacr_path
|
72
|
+
spacr_path = os.path.join(os.path.dirname(__file__), '__init__.py')
|
69
73
|
|
70
74
|
import umap.umap_ as umap
|
71
|
-
#import umap
|
72
75
|
|
73
76
|
def _generate_mask_random_cmap(mask):
|
74
77
|
"""
|
@@ -1283,7 +1286,7 @@ def _pivot_counts_table(db_path):
|
|
1283
1286
|
pivoted_df.to_sql('pivoted_counts', conn, if_exists='replace', index=False)
|
1284
1287
|
conn.close()
|
1285
1288
|
|
1286
|
-
def
|
1289
|
+
def _get_cellpose_channels_v2(src, nucleus_channel, pathogen_channel, cell_channel):
|
1287
1290
|
cell_mask_path = os.path.join(src, 'masks', 'cell_mask_stack')
|
1288
1291
|
nucleus_mask_path = os.path.join(src, 'masks', 'nucleus_mask_stack')
|
1289
1292
|
pathogen_mask_path = os.path.join(src, 'masks', 'pathogen_mask_stack')
|
@@ -1311,7 +1314,7 @@ def _get_cellpose_channels(src, nucleus_channel, pathogen_channel, cell_channel)
|
|
1311
1314
|
|
1312
1315
|
return cellpose_channels
|
1313
1316
|
|
1314
|
-
def
|
1317
|
+
def _get_cellpose_channels(src, nucleus_channel, pathogen_channel, cell_channel):
|
1315
1318
|
|
1316
1319
|
cell_mask_path = os.path.join(src, 'masks', 'cell_mask_stack')
|
1317
1320
|
nucleus_mask_path = os.path.join(src, 'masks', 'nucleus_mask_stack')
|
@@ -1340,6 +1343,7 @@ def _get_cellpose_channels_v1(src, nucleus_channel, pathogen_channel, cell_chann
|
|
1340
1343
|
cellpose_channels['cell'] = [0,1]
|
1341
1344
|
else:
|
1342
1345
|
cellpose_channels['cell'] = [0,0]
|
1346
|
+
|
1343
1347
|
return cellpose_channels
|
1344
1348
|
|
1345
1349
|
def annotate_conditions(df, cells=None, cell_loc=None, pathogens=None, pathogen_loc=None, treatments=None, treatment_loc=None):
|
@@ -1541,9 +1545,8 @@ class Cache:
|
|
1541
1545
|
"""
|
1542
1546
|
A class representing a cache with a maximum size.
|
1543
1547
|
|
1544
|
-
|
1548
|
+
Args:
|
1545
1549
|
max_size (int): The maximum size of the cache.
|
1546
|
-
cache (OrderedDict): The cache data structure.
|
1547
1550
|
"""
|
1548
1551
|
|
1549
1552
|
def __init__(self, max_size):
|
@@ -1562,23 +1565,15 @@ class Cache:
|
|
1562
1565
|
self.cache.popitem(last=False)
|
1563
1566
|
self.cache[key] = value
|
1564
1567
|
|
1565
|
-
class
|
1568
|
+
class ScaledDotProductAttention_v1(nn.Module):
|
1566
1569
|
"""
|
1567
1570
|
Scaled Dot-Product Attention module.
|
1568
1571
|
|
1569
1572
|
Args:
|
1570
1573
|
d_k (int): The dimension of the key and query vectors.
|
1571
|
-
|
1572
|
-
Attributes:
|
1573
|
-
d_k (int): The dimension of the key and query vectors.
|
1574
|
-
|
1575
|
-
Methods:
|
1576
|
-
forward(Q, K, V): Performs the forward pass of the attention mechanism.
|
1577
|
-
|
1578
1574
|
"""
|
1579
|
-
|
1580
1575
|
def __init__(self, d_k):
|
1581
|
-
super(
|
1576
|
+
super(ScaledDotProductAttention_v1, self).__init__()
|
1582
1577
|
self.d_k = d_k
|
1583
1578
|
|
1584
1579
|
def forward(self, Q, K, V):
|
@@ -1592,24 +1587,23 @@ class ScaledDotProductAttention(nn.Module):
|
|
1592
1587
|
|
1593
1588
|
Returns:
|
1594
1589
|
torch.Tensor: The output tensor of shape (batch_size, seq_len_q, d_k).
|
1595
|
-
|
1596
1590
|
"""
|
1597
1591
|
scores = torch.matmul(Q, K.transpose(-2, -1)) / torch.sqrt(torch.tensor(self.d_k, dtype=torch.float32))
|
1598
1592
|
attention_probs = F.softmax(scores, dim=-1)
|
1599
1593
|
output = torch.matmul(attention_probs, V)
|
1600
1594
|
return output
|
1601
1595
|
|
1602
|
-
class
|
1596
|
+
class SelfAttention_v1(nn.Module):
|
1603
1597
|
"""
|
1604
1598
|
Self-Attention module that applies scaled dot-product attention mechanism.
|
1605
|
-
|
1599
|
+
|
1606
1600
|
Args:
|
1607
1601
|
in_channels (int): Number of input channels.
|
1608
1602
|
d_k (int): Dimensionality of the key and query vectors.
|
1609
1603
|
"""
|
1610
1604
|
|
1611
1605
|
def __init__(self, in_channels, d_k):
|
1612
|
-
super(
|
1606
|
+
super(SelfAttention_v1, self).__init__()
|
1613
1607
|
self.W_q = nn.Linear(in_channels, d_k)
|
1614
1608
|
self.W_k = nn.Linear(in_channels, d_k)
|
1615
1609
|
self.W_v = nn.Linear(in_channels, d_k)
|
@@ -1618,10 +1612,10 @@ class SelfAttention(nn.Module):
|
|
1618
1612
|
def forward(self, x):
|
1619
1613
|
"""
|
1620
1614
|
Forward pass of the SelfAttention module.
|
1621
|
-
|
1615
|
+
|
1622
1616
|
Args:
|
1623
1617
|
x (torch.Tensor): Input tensor of shape (batch_size, in_channels).
|
1624
|
-
|
1618
|
+
|
1625
1619
|
Returns:
|
1626
1620
|
torch.Tensor: Output tensor of shape (batch_size, d_k).
|
1627
1621
|
"""
|
@@ -3298,7 +3292,7 @@ class SelectChannels:
|
|
3298
3292
|
img[2, :, :] = 0 # Zero out the blue channel
|
3299
3293
|
return img
|
3300
3294
|
|
3301
|
-
def
|
3295
|
+
def preprocess_image_v1(image_path, image_size=224, channels=[1,2,3], normalize=True):
|
3302
3296
|
|
3303
3297
|
if normalize:
|
3304
3298
|
transform = transforms.Compose([
|
@@ -3739,7 +3733,7 @@ def merge_dataframes(df, image_paths_df, verbose):
|
|
3739
3733
|
display(df)
|
3740
3734
|
return df
|
3741
3735
|
|
3742
|
-
def
|
3736
|
+
def remove_highly_correlated_columns_v1(df, threshold):
|
3743
3737
|
corr_matrix = df.corr().abs()
|
3744
3738
|
upper_tri = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(bool))
|
3745
3739
|
to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > threshold)]
|
@@ -4678,7 +4672,54 @@ def _merge_cells_without_nucleus(adj_cell_mask: np.ndarray, nuclei_mask: np.ndar
|
|
4678
4672
|
|
4679
4673
|
return out.astype(np.uint16)
|
4680
4674
|
|
4681
|
-
def
|
4675
|
+
def process_mask_file_adjust_cell(file_name, parasite_folder, cell_folder, nuclei_folder, overlap_threshold, perimeter_threshold):
|
4676
|
+
start = time.perf_counter()
|
4677
|
+
|
4678
|
+
parasite_path = os.path.join(parasite_folder, file_name)
|
4679
|
+
cell_path = os.path.join(cell_folder, file_name)
|
4680
|
+
nuclei_path = os.path.join(nuclei_folder, file_name)
|
4681
|
+
|
4682
|
+
if not (os.path.exists(cell_path) and os.path.exists(nuclei_path)):
|
4683
|
+
raise ValueError(f"Corresponding cell or nuclei mask file for {file_name} not found.")
|
4684
|
+
|
4685
|
+
parasite_mask = np.load(parasite_path, allow_pickle=True)
|
4686
|
+
cell_mask = np.load(cell_path, allow_pickle=True)
|
4687
|
+
nuclei_mask = np.load(nuclei_path, allow_pickle=True)
|
4688
|
+
|
4689
|
+
merged_cell_mask = _merge_cells_based_on_parasite_overlap(parasite_mask, cell_mask, nuclei_mask, overlap_threshold, perimeter_threshold)
|
4690
|
+
#merged_cell_mask = _merge_cells_without_nucleus(merged_cell_mask, nuclei_mask)
|
4691
|
+
|
4692
|
+
np.save(cell_path, merged_cell_mask)
|
4693
|
+
|
4694
|
+
end = time.perf_counter()
|
4695
|
+
return end - start
|
4696
|
+
|
4697
|
+
def adjust_cell_masks(parasite_folder, cell_folder, nuclei_folder, overlap_threshold=5, perimeter_threshold=30, n_jobs=None):
|
4698
|
+
|
4699
|
+
parasite_files = sorted([f for f in os.listdir(parasite_folder) if f.endswith('.npy')])
|
4700
|
+
cell_files = sorted([f for f in os.listdir(cell_folder) if f.endswith('.npy')])
|
4701
|
+
nuclei_files = sorted([f for f in os.listdir(nuclei_folder) if f.endswith('.npy')])
|
4702
|
+
|
4703
|
+
if not (len(parasite_files) == len(cell_files) == len(nuclei_files)):
|
4704
|
+
raise ValueError("The number of files in the folders do not match.")
|
4705
|
+
|
4706
|
+
n_jobs = n_jobs or max(1, cpu_count() - 2)
|
4707
|
+
|
4708
|
+
time_ls = []
|
4709
|
+
files_to_process = len(parasite_files)
|
4710
|
+
process_fn = partial(process_mask_file_adjust_cell,
|
4711
|
+
parasite_folder=parasite_folder,
|
4712
|
+
cell_folder=cell_folder,
|
4713
|
+
nuclei_folder=nuclei_folder,
|
4714
|
+
overlap_threshold=overlap_threshold,
|
4715
|
+
perimeter_threshold=perimeter_threshold)
|
4716
|
+
|
4717
|
+
with Pool(n_jobs) as pool:
|
4718
|
+
for i, duration in enumerate(pool.imap_unordered(process_fn, parasite_files), 1):
|
4719
|
+
time_ls.append(duration)
|
4720
|
+
print_progress(i, files_to_process, n_jobs=n_jobs, time_ls=time_ls, batch_size=None, operation_type='adjust_cell_masks')
|
4721
|
+
|
4722
|
+
def adjust_cell_masks_v1(parasite_folder, cell_folder, nuclei_folder, overlap_threshold=5, perimeter_threshold=30):
|
4682
4723
|
|
4683
4724
|
"""
|
4684
4725
|
Process all npy files in the given folders. Merge and relabel cells in cell masks
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: spacr
|
3
|
-
Version: 1.0.
|
3
|
+
Version: 1.0.3
|
4
4
|
Summary: Spatial phenotype analysis of crisp screens (SpaCr)
|
5
5
|
Home-page: https://github.com/EinarOlafsson/spacr
|
6
6
|
Author: Einar Birnir Olafsson
|
@@ -75,7 +75,7 @@ Requires-Dist: ipywidgets <9.0,>=8.1.2
|
|
75
75
|
Requires-Dist: brokenaxes <1.0,>=0.6.2
|
76
76
|
Requires-Dist: huggingface-hub <0.25,>=0.24.0
|
77
77
|
Provides-Extra: dev
|
78
|
-
Requires-Dist: pytest <3.
|
78
|
+
Requires-Dist: pytest <3.12,>=3.9 ; extra == 'dev'
|
79
79
|
Provides-Extra: full
|
80
80
|
Requires-Dist: opencv-python ; extra == 'full'
|
81
81
|
Provides-Extra: headless
|
@@ -164,16 +164,6 @@ SpaCr GUI requires Tkinter.
|
|
164
164
|
|
165
165
|
spacr
|
166
166
|
|
167
|
-
Data Availability
|
168
|
-
-----------------
|
169
|
-
|
170
|
-
- **Raw sequencing data** are available from NCBI BioProject `PRJNA1261935 <https://www.ncbi.nlm.nih.gov/bioproject/PRJNA1261935>`_ and SRA accessions: `SRR33531217 <https://www.ncbi.nlm.nih.gov/sra/SRR33531217>`_, `SRR33531218 <https://www.ncbi.nlm.nih.gov/sra/SRR33531218>`_, `SRR33531219 <https://www.ncbi.nlm.nih.gov/sra/SRR33531219>`_, `SRR33531220 <https://www.ncbi.nlm.nih.gov/sra/SRR33531220>`_
|
171
|
-
|
172
|
-
- **Image data** is deposited at EBI BioStudies under accession:
|
173
|
-
`S-BIAD2076 <https://www.ebi.ac.uk/biostudies/studies/S-BIAD2076>`_
|
174
|
-
*(If the link redirects to the main BioStudies portal, copy and paste it directly into your browser.)*
|
175
|
-
|
176
|
-
|
177
167
|
Example Notebooks
|
178
168
|
-----------------
|
179
169
|
|
@@ -204,6 +194,25 @@ Click below to explore the step-by-step GUI and Notebook tutorials for spaCR:
|
|
204
194
|
|
205
195
|
|Tutorial|
|
206
196
|
|
197
|
+
spaCRPower
|
198
|
+
----------
|
199
|
+
|
200
|
+
Power analasys of pooled perterbation spaCR screens.
|
201
|
+
|
202
|
+
`spaCRPower <https://github.com/maomlab/spaCRPower>`_
|
203
|
+
|
204
|
+
Data Availability
|
205
|
+
-----------------
|
206
|
+
|
207
|
+
- **Full microscopy image dataset:**
|
208
|
+
`EMBL-EBI BioStudies S-BIAD2135 <https://doi.org/10.6019/S-BIAD2135>`_
|
209
|
+
|
210
|
+
- **Testing dataset:**
|
211
|
+
`Hugging Face toxo_mito <https://huggingface.co/datasets/einarolafsson/toxo_mito>`_
|
212
|
+
|
213
|
+
- **Sequencing data:**
|
214
|
+
`NCBI BioProject PRJNA1261935 <https://www.ncbi.nlm.nih.gov/bioproject/?term=PRJNA1261935>`_
|
215
|
+
|
207
216
|
License
|
208
217
|
-------
|
209
218
|
spaCR is distributed under the terms of the MIT License.
|
@@ -1,4 +1,4 @@
|
|
1
|
-
spacr/__init__.py,sha256=
|
1
|
+
spacr/__init__.py,sha256=dB3aFOfOe5m5x81CTENDDP3FToXQwZwdBPtLBTuT4Js,1601
|
2
2
|
spacr/__main__.py,sha256=H4MjaMF9ohZL6xfl1kTxVn1Nt_vEhhZArENMMBv8f4E,77
|
3
3
|
spacr/app_annotate.py,sha256=p0OyvgFycIug7RcLfejFmc4HWB7yQskCBxxy3Sdq_Y0,2905
|
4
4
|
spacr/app_classify.py,sha256=urTP_wlZ58hSyM5a19slYlBxN0PdC-9-ga0hvq8CGWc,165
|
@@ -8,28 +8,28 @@ spacr/app_measure.py,sha256=_K7APYIeOKpV6e_LcqabBjvEi7mfq9Fch8175x1x0k8,162
|
|
8
8
|
spacr/app_sequencing.py,sha256=DjG26jy4cpddnV8WOOAIiExtOe9MleVMY4MFa5uTo5w,157
|
9
9
|
spacr/app_umap.py,sha256=ZWAmf_OsIKbYvolYuWPMYhdlVe-n2CADoJulAizMiEo,153
|
10
10
|
spacr/chat_bot.py,sha256=n3Fhqg3qofVXHmh3H9sUcmfYy9MmgRnr48663MVdY9E,1244
|
11
|
-
spacr/core.py,sha256=
|
12
|
-
spacr/deep_spacr.py,sha256=
|
11
|
+
spacr/core.py,sha256=GZWrKVKH_3EghGGcvAtia9OFTQ6hkZtwQRw9dpv2MBM,46672
|
12
|
+
spacr/deep_spacr.py,sha256=x3_102k-p9IN3gE3XYdsEnXGCOyGqQh_ny5A-SO9HkA,54497
|
13
13
|
spacr/gui.py,sha256=NhMh96KoArrSAaJBV6PhDQpIC1cQpxgb6SclhRbYG8s,8122
|
14
|
-
spacr/gui_core.py,sha256=
|
15
|
-
spacr/gui_elements.py,sha256=
|
16
|
-
spacr/gui_utils.py,sha256=
|
17
|
-
spacr/io.py,sha256=
|
14
|
+
spacr/gui_core.py,sha256=IWkTEHI8imt_Pk9s8IUKbK_zLXtQNrbvDVl36Ft-n5k,54429
|
15
|
+
spacr/gui_elements.py,sha256=9hEcIJ5ROQmBfHIROukgFawF3go37w5B-3OEHedNk3A,156078
|
16
|
+
spacr/gui_utils.py,sha256=OJT1LX3X1U95joUY30b5Vn9fBrC0V65K4VBvdCckeNI,40995
|
17
|
+
spacr/io.py,sha256=RKvy8DjyWbGnjV6zcbx8YHm2AdrPiTQBpHKEVrpA6dc,158412
|
18
18
|
spacr/logger.py,sha256=lJhTqt-_wfAunCPl93xE65Wr9Y1oIHJWaZMjunHUeIw,1538
|
19
19
|
spacr/measure.py,sha256=nYvrfVfCIqD1AUk4QBE2jtpeSFtLdfUcnkhkqf9G4xQ,60877
|
20
20
|
spacr/mediar.py,sha256=p0F515eFbm6_rePSnChsgqrgH-H5Sr_3zWrghtOnAUg,14863
|
21
|
-
spacr/ml.py,sha256=
|
21
|
+
spacr/ml.py,sha256=iE7vI9Q1rjN_gHukqe8IXiWFCRaFj1lj3poFjEZY6Kw,91499
|
22
22
|
spacr/openai.py,sha256=5vBZ3Jl2llYcW3oaTEXgdyCB2aJujMUIO5K038z7w_A,1246
|
23
|
-
spacr/plot.py,sha256=
|
23
|
+
spacr/plot.py,sha256=OgMr1ur1RFcFoZC_K8f-Qra48i-YmwgyyE_JNi5mw28,169111
|
24
24
|
spacr/sequencing.py,sha256=EY12RdW5QRKpHDRQCw1QoAlxCq8FK2v6WoVa5uuDBXQ,26745
|
25
25
|
spacr/settings.py,sha256=38MClzEv-eP4Kfo_UJ_jlIzj0Vos3TY5-scPlBpolYI,88520
|
26
|
-
spacr/sim.py,sha256=
|
27
|
-
spacr/sp_stats.py,sha256=
|
26
|
+
spacr/sim.py,sha256=EiCXNCarU6TAFwIKKSr7LOPKtNrxWK6_3XSBMaEpg20,71176
|
27
|
+
spacr/sp_stats.py,sha256=KoQe5F0jfPBrZQ38MG_1b3AnF4XFNupR8a9crXiyi64,9516
|
28
28
|
spacr/spacr_cellpose.py,sha256=AvnyD2qoj-lUqhICeTpfhyk9T2hCjZrpBXn2iKh1EYE,16785
|
29
|
-
spacr/submodules.py,sha256=
|
30
|
-
spacr/timelapse.py,sha256
|
31
|
-
spacr/toxo.py,sha256=
|
32
|
-
spacr/utils.py,sha256=
|
29
|
+
spacr/submodules.py,sha256=kyX_oX0llXBrU5YwLLq0DssCczG3J1BAa2QeKI9P7oE,83114
|
30
|
+
spacr/timelapse.py,sha256=WKz21Qy4ZJAMHUeYZNHnuw8SATA-q2CfnKgEUETDGMM,43031
|
31
|
+
spacr/toxo.py,sha256=7to5GHfHt0EPL9qaxAQnA-_Io1t2NQ1nBxXlfZc3_oI,25674
|
32
|
+
spacr/utils.py,sha256=L1-F13dcBxRgBIX4toe_zYqL4FyIbtz3tYwtiQwyShY,240591
|
33
33
|
spacr/version.py,sha256=axH5tnGwtgSnJHb5IDhiu4Zjk5GhLyAEDRe-rnaoFOA,409
|
34
34
|
spacr/resources/data/lopit.csv,sha256=ERI5f9W8RdJGiSx_khoaylD374f8kmvLia1xjhD_mII,4421709
|
35
35
|
spacr/resources/data/toxoplasma_metadata.csv,sha256=9TXx0VlClDHAxQmaLhoklE8NuETduXaGHZjhR_6lZfs,2969409
|
@@ -103,9 +103,9 @@ spacr/resources/icons/umap.png,sha256=dOLF3DeLYy9k0nkUybiZMe1wzHQwLJFRmgccppw-8b
|
|
103
103
|
spacr/resources/images/plate1_E01_T0001F001L01A01Z01C02.tif,sha256=Tl0ZUfZ_AYAbu0up_nO0tPRtF1BxXhWQ3T3pURBCCRo,7958528
|
104
104
|
spacr/resources/images/plate1_E01_T0001F001L01A02Z01C01.tif,sha256=m8N-V71rA1TT4dFlENNg8s0Q0YEXXs8slIn7yObmZJQ,7958528
|
105
105
|
spacr/resources/images/plate1_E01_T0001F001L01A03Z01C03.tif,sha256=Pbhk7xn-KUP6RSIhJsxQcrHFImBm3GEpLkzx7WOc-5M,7958528
|
106
|
-
spacr-1.0.
|
107
|
-
spacr-1.0.
|
108
|
-
spacr-1.0.
|
109
|
-
spacr-1.0.
|
110
|
-
spacr-1.0.
|
111
|
-
spacr-1.0.
|
106
|
+
spacr-1.0.3.dist-info/LICENSE,sha256=t0Pov6pnK8thLteoF4xZGmdCwe5mhNwl3OXxLYTGD9U,1081
|
107
|
+
spacr-1.0.3.dist-info/METADATA,sha256=mHQ3iqlrD2kQfkGHtW4LURYbI1VOuMjzCpDLQJFW3NY,10792
|
108
|
+
spacr-1.0.3.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
|
109
|
+
spacr-1.0.3.dist-info/entry_points.txt,sha256=BMC0ql9aNNpv8lUZ8sgDLQMsqaVnX5L535gEhKUP5ho,296
|
110
|
+
spacr-1.0.3.dist-info/top_level.txt,sha256=GJPU8FgwRXGzKeut6JopsSRY2R8T3i9lDgya42tLInY,6
|
111
|
+
spacr-1.0.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|