spacr 0.4.0__py3-none-any.whl → 0.4.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/__init__.py +2 -2
- spacr/deep_spacr.py +2 -95
- spacr/plot.py +2 -2
- spacr/settings.py +1 -1
- spacr/sp_stats.py +221 -0
- spacr/submodules.py +1 -1
- spacr/utils.py +16 -16
- {spacr-0.4.0.dist-info → spacr-0.4.11.dist-info}/METADATA +1 -1
- {spacr-0.4.0.dist-info → spacr-0.4.11.dist-info}/RECORD +13 -12
- {spacr-0.4.0.dist-info → spacr-0.4.11.dist-info}/LICENSE +0 -0
- {spacr-0.4.0.dist-info → spacr-0.4.11.dist-info}/WHEEL +0 -0
- {spacr-0.4.0.dist-info → spacr-0.4.11.dist-info}/entry_points.txt +0 -0
- {spacr-0.4.0.dist-info → spacr-0.4.11.dist-info}/top_level.txt +0 -0
spacr/__init__.py
CHANGED
@@ -27,7 +27,7 @@ from . import openai
|
|
27
27
|
from . import ml
|
28
28
|
from . import toxo
|
29
29
|
from . import cellpose
|
30
|
-
from . import
|
30
|
+
from . import sp_stats
|
31
31
|
from . import logger
|
32
32
|
|
33
33
|
__all__ = [
|
@@ -58,7 +58,7 @@ __all__ = [
|
|
58
58
|
"ml",
|
59
59
|
"toxo",
|
60
60
|
"cellpose",
|
61
|
-
"
|
61
|
+
"sp_stats",
|
62
62
|
"logger"
|
63
63
|
]
|
64
64
|
|
spacr/deep_spacr.py
CHANGED
@@ -938,67 +938,8 @@ def deep_spacr(settings={}):
|
|
938
938
|
if os.path.exists(settings['model_path']):
|
939
939
|
apply_model_to_tar(settings)
|
940
940
|
|
941
|
-
def model_knowledge_transfer(
|
942
|
-
teacher_paths,
|
943
|
-
student_save_path,
|
944
|
-
data_loader, # A DataLoader with (images, labels)
|
945
|
-
device='cpu',
|
946
|
-
student_model_name='maxvit_t',
|
947
|
-
pretrained=True,
|
948
|
-
dropout_rate=None,
|
949
|
-
use_checkpoint=False,
|
950
|
-
alpha=0.5,
|
951
|
-
temperature=2.0,
|
952
|
-
lr=1e-4,
|
953
|
-
epochs=10
|
954
|
-
):
|
955
|
-
"""
|
956
|
-
Performs multi-teacher knowledge distillation on a new labeled dataset,
|
957
|
-
producing a single student TorchModel that combines (distills) the
|
958
|
-
teachers' knowledge plus the labeled data.
|
959
|
-
|
960
|
-
Usage:
|
961
|
-
student = model_knowledge_transfer(
|
962
|
-
teacher_paths=[
|
963
|
-
'teacherA.pth',
|
964
|
-
'teacherB.pth',
|
965
|
-
...
|
966
|
-
],
|
967
|
-
student_save_path='distilled_student.pth',
|
968
|
-
data_loader=my_data_loader,
|
969
|
-
device='cuda',
|
970
|
-
student_model_name='maxvit_t',
|
971
|
-
alpha=0.5,
|
972
|
-
temperature=2.0,
|
973
|
-
lr=1e-4,
|
974
|
-
epochs=10
|
975
|
-
)
|
976
|
-
|
977
|
-
Then load it via:
|
978
|
-
fused_student = torch.load('distilled_student.pth')
|
979
|
-
# fused_student is a TorchModel instance, ready for inference.
|
980
|
-
|
981
|
-
Args:
|
982
|
-
teacher_paths (list[str]): List of paths to teacher models (TorchModel
|
983
|
-
or dict with 'model' in it). They must have the same architecture
|
984
|
-
or at least produce the same dimension of output.
|
985
|
-
student_save_path (str): Destination path to save the final student
|
986
|
-
TorchModel.
|
987
|
-
data_loader (DataLoader): Yields (images, labels) from the new dataset.
|
988
|
-
device (str): 'cpu' or 'cuda'.
|
989
|
-
student_model_name (str): Architecture name for the student TorchModel.
|
990
|
-
pretrained (bool): If the student should be initialized as pretrained.
|
991
|
-
dropout_rate (float): If needed by your TorchModel init.
|
992
|
-
use_checkpoint (bool): If needed by your TorchModel init.
|
993
|
-
alpha (float): Weight balancing real-label CE vs. distillation loss
|
994
|
-
(0..1).
|
995
|
-
temperature (float): Distillation temperature (>1 typically).
|
996
|
-
lr (float): Learning rate for the student.
|
997
|
-
epochs (int): Number of training epochs.
|
941
|
+
def model_knowledge_transfer(teacher_paths, student_save_path, data_loader, device='cpu', student_model_name='maxvit_t', pretrained=True, dropout_rate=None, use_checkpoint=False, alpha=0.5, temperature=2.0, lr=1e-4, epochs=10):
|
998
942
|
|
999
|
-
Returns:
|
1000
|
-
TorchModel: The final, trained student model.
|
1001
|
-
"""
|
1002
943
|
from spacr.utils import TorchModel # Adapt if needed
|
1003
944
|
|
1004
945
|
# Adjust filename to reflect knowledge-distillation if desired
|
@@ -1101,42 +1042,8 @@ def model_knowledge_transfer(
|
|
1101
1042
|
|
1102
1043
|
return student_model
|
1103
1044
|
|
1104
|
-
def model_fusion(model_paths,
|
1105
|
-
save_path,
|
1106
|
-
device='cpu',
|
1107
|
-
model_name='maxvit_t',
|
1108
|
-
pretrained=True,
|
1109
|
-
dropout_rate=None,
|
1110
|
-
use_checkpoint=False,
|
1111
|
-
aggregator='mean'):
|
1112
|
-
"""
|
1113
|
-
Fuses an arbitrary number of TorchModel instances by combining their weights
|
1114
|
-
(using mean, geomean, median, sum, max, or min) and saves the entire fused
|
1115
|
-
model object.
|
1116
|
-
|
1117
|
-
You can later load the fused model with:
|
1118
|
-
model = torch.load('fused_model.pth')
|
1119
|
-
|
1120
|
-
which returns a ready-to-use TorchModel instance.
|
1121
|
-
|
1122
|
-
Parameters:
|
1123
|
-
model_paths (list of str): Paths to the model checkpoints to fuse.
|
1124
|
-
Each checkpoint can be:
|
1125
|
-
- A dict with keys ['model', 'model_name', ...]
|
1126
|
-
- A TorchModel instance
|
1127
|
-
save_path (str): Destination path to save the fused model.
|
1128
|
-
device (str): 'cpu' or 'cuda' for loading weights and final model device.
|
1129
|
-
model_name (str): Default model name (used if not in checkpoint).
|
1130
|
-
pretrained (bool): Default if not in checkpoint.
|
1131
|
-
dropout_rate (float): Default if not in checkpoint.
|
1132
|
-
use_checkpoint (bool): Default if not in checkpoint.
|
1133
|
-
aggregator (str): How to combine weights across models:
|
1134
|
-
'mean', 'geomean', 'median', 'sum', 'max', or 'min'.
|
1045
|
+
def model_fusion(model_paths,save_path,device='cpu',model_name='maxvit_t',pretrained=True,dropout_rate=None,use_checkpoint=False,aggregator='mean'):
|
1135
1046
|
|
1136
|
-
Returns:
|
1137
|
-
fused_model (TorchModel): The final fused TorchModel instance
|
1138
|
-
with combined weights.
|
1139
|
-
"""
|
1140
1047
|
from spacr.utils import TorchModel
|
1141
1048
|
|
1142
1049
|
if save_path.endswith('.pth'):
|
spacr/plot.py
CHANGED
@@ -2705,7 +2705,7 @@ class spacrGraph:
|
|
2705
2705
|
def perform_posthoc_tests(self, is_normal, unique_groups):
|
2706
2706
|
"""Perform post-hoc tests for multiple groups based on all_to_all flag."""
|
2707
2707
|
|
2708
|
-
from .
|
2708
|
+
from .sp_stats import choose_p_adjust_method
|
2709
2709
|
|
2710
2710
|
posthoc_results = []
|
2711
2711
|
if is_normal and len(unique_groups) > 2 and self.all_to_all:
|
@@ -3815,7 +3815,7 @@ def plot_proportion_stacked_bars(settings, df, group_column, bin_column, prc_col
|
|
3815
3815
|
- pairwise_results (list): Pairwise test results from `chi_pairwise`.
|
3816
3816
|
"""
|
3817
3817
|
|
3818
|
-
from .
|
3818
|
+
from .sp_stats import chi_pairwise
|
3819
3819
|
|
3820
3820
|
# Calculate contingency table for overall chi-squared test
|
3821
3821
|
raw_counts = df.groupby([group_column, bin_column]).size().unstack(fill_value=0)
|
spacr/settings.py
CHANGED
@@ -89,7 +89,7 @@ def set_default_settings_preprocess_generate_masks(settings={}):
|
|
89
89
|
settings.setdefault('timelapse_frame_limits', None)
|
90
90
|
settings.setdefault('timelapse_remove_transient', False)
|
91
91
|
settings.setdefault('timelapse_mode', 'trackpy')
|
92
|
-
settings.setdefault('timelapse_objects',
|
92
|
+
settings.setdefault('timelapse_objects', None)
|
93
93
|
|
94
94
|
# Misc settings
|
95
95
|
settings.setdefault('all_to_mip', False)
|
spacr/sp_stats.py
ADDED
@@ -0,0 +1,221 @@
|
|
1
|
+
from scipy.stats import shapiro, normaltest, levene, ttest_ind, mannwhitneyu, kruskal, f_oneway
|
2
|
+
from statsmodels.stats.multicomp import pairwise_tukeyhsd
|
3
|
+
import scikit_posthocs as sp
|
4
|
+
import numpy as np
|
5
|
+
import pandas as pd
|
6
|
+
from scipy.stats import chi2_contingency, fisher_exact
|
7
|
+
import itertools
|
8
|
+
from statsmodels.stats.multitest import multipletests
|
9
|
+
|
10
|
+
|
11
|
+
def choose_p_adjust_method(num_groups, num_data_points):
|
12
|
+
"""
|
13
|
+
Selects the most appropriate p-value adjustment method based on data characteristics.
|
14
|
+
|
15
|
+
Parameters:
|
16
|
+
- num_groups: Number of unique groups being compared
|
17
|
+
- num_data_points: Number of data points per group (assuming balanced groups)
|
18
|
+
|
19
|
+
Returns:
|
20
|
+
- A string representing the recommended p-adjustment method
|
21
|
+
"""
|
22
|
+
num_comparisons = (num_groups * (num_groups - 1)) // 2 # Number of pairwise comparisons
|
23
|
+
|
24
|
+
# Decision logic for choosing the adjustment method
|
25
|
+
if num_comparisons <= 10 and num_data_points > 5:
|
26
|
+
return 'holm' # Balanced between power and Type I error control
|
27
|
+
elif num_comparisons > 10 and num_data_points <= 5:
|
28
|
+
return 'fdr_bh' # FDR control for large number of comparisons and small sample size
|
29
|
+
elif num_comparisons <= 10:
|
30
|
+
return 'sidak' # Less conservative than Bonferroni, good for independent comparisons
|
31
|
+
else:
|
32
|
+
return 'bonferroni' # Very conservative, use for strict control of Type I errors
|
33
|
+
|
34
|
+
def perform_normality_tests(df, grouping_column, data_columns):
|
35
|
+
"""Perform normality tests for each group and data column."""
|
36
|
+
unique_groups = df[grouping_column].unique()
|
37
|
+
normality_results = []
|
38
|
+
|
39
|
+
for column in data_columns:
|
40
|
+
for group in unique_groups:
|
41
|
+
data = df.loc[df[grouping_column] == group, column].dropna()
|
42
|
+
n_samples = len(data)
|
43
|
+
|
44
|
+
if n_samples < 3:
|
45
|
+
# Skip test if there aren't enough data points
|
46
|
+
print(f"Skipping normality test for group '{group}' on column '{column}' - Not enough data.")
|
47
|
+
normality_results.append({
|
48
|
+
'Comparison': f'Normality test for {group} on {column}',
|
49
|
+
'Test Statistic': None,
|
50
|
+
'p-value': None,
|
51
|
+
'Test Name': 'Skipped',
|
52
|
+
'Column': column,
|
53
|
+
'n': n_samples
|
54
|
+
})
|
55
|
+
continue
|
56
|
+
|
57
|
+
# Choose the appropriate normality test based on the sample size
|
58
|
+
if n_samples >= 8:
|
59
|
+
stat, p_value = normaltest(data)
|
60
|
+
test_name = "D'Agostino-Pearson test"
|
61
|
+
else:
|
62
|
+
stat, p_value = shapiro(data)
|
63
|
+
test_name = "Shapiro-Wilk test"
|
64
|
+
|
65
|
+
normality_results.append({
|
66
|
+
'Comparison': f'Normality test for {group} on {column}',
|
67
|
+
'Test Statistic': stat,
|
68
|
+
'p-value': p_value,
|
69
|
+
'Test Name': test_name,
|
70
|
+
'Column': column,
|
71
|
+
'n': n_samples
|
72
|
+
})
|
73
|
+
|
74
|
+
# Check if all groups are normally distributed (p > 0.05)
|
75
|
+
normal_p_values = [result['p-value'] for result in normality_results if result['Column'] == column and result['p-value'] is not None]
|
76
|
+
is_normal = all(p > 0.05 for p in normal_p_values)
|
77
|
+
|
78
|
+
return is_normal, normality_results
|
79
|
+
|
80
|
+
|
81
|
+
def perform_levene_test(df, grouping_column, data_column):
|
82
|
+
"""Perform Levene's test for equal variance."""
|
83
|
+
unique_groups = df[grouping_column].unique()
|
84
|
+
grouped_data = [df.loc[df[grouping_column] == group, data_column].dropna() for group in unique_groups]
|
85
|
+
stat, p_value = levene(*grouped_data)
|
86
|
+
return stat, p_value
|
87
|
+
|
88
|
+
def perform_statistical_tests(df, grouping_column, data_columns, paired=False):
|
89
|
+
"""Perform statistical tests for each data column."""
|
90
|
+
unique_groups = df[grouping_column].unique()
|
91
|
+
test_results = []
|
92
|
+
|
93
|
+
for column in data_columns:
|
94
|
+
grouped_data = [df.loc[df[grouping_column] == group, column].dropna() for group in unique_groups]
|
95
|
+
if len(unique_groups) == 2: # For two groups
|
96
|
+
if paired:
|
97
|
+
print("Performing paired tests (not implemented in this template).")
|
98
|
+
continue # Extend as needed
|
99
|
+
else:
|
100
|
+
# Check normality for two groups
|
101
|
+
is_normal, _ = perform_normality_tests(df, grouping_column, [column])
|
102
|
+
if is_normal:
|
103
|
+
stat, p = ttest_ind(grouped_data[0], grouped_data[1])
|
104
|
+
test_name = 'T-test'
|
105
|
+
else:
|
106
|
+
stat, p = mannwhitneyu(grouped_data[0], grouped_data[1])
|
107
|
+
test_name = 'Mann-Whitney U test'
|
108
|
+
else:
|
109
|
+
# Check normality for multiple groups
|
110
|
+
is_normal, _ = perform_normality_tests(df, grouping_column, [column])
|
111
|
+
if is_normal:
|
112
|
+
stat, p = f_oneway(*grouped_data)
|
113
|
+
test_name = 'One-way ANOVA'
|
114
|
+
else:
|
115
|
+
stat, p = kruskal(*grouped_data)
|
116
|
+
test_name = 'Kruskal-Wallis test'
|
117
|
+
|
118
|
+
test_results.append({
|
119
|
+
'Column': column,
|
120
|
+
'Test Name': test_name,
|
121
|
+
'Test Statistic': stat,
|
122
|
+
'p-value': p,
|
123
|
+
'Groups': len(unique_groups)
|
124
|
+
})
|
125
|
+
|
126
|
+
return test_results
|
127
|
+
|
128
|
+
|
129
|
+
def perform_posthoc_tests(df, grouping_column, data_column, is_normal):
|
130
|
+
"""Perform post-hoc tests for multiple groups with both original and adjusted p-values."""
|
131
|
+
unique_groups = df[grouping_column].unique()
|
132
|
+
posthoc_results = []
|
133
|
+
|
134
|
+
if len(unique_groups) > 2:
|
135
|
+
num_groups = len(unique_groups)
|
136
|
+
num_data_points = len(df[data_column].dropna()) // num_groups # Assuming roughly equal data points per group
|
137
|
+
p_adjust_method = choose_p_adjust_method(num_groups, num_data_points)
|
138
|
+
|
139
|
+
if is_normal:
|
140
|
+
# Tukey's HSD automatically adjusts p-values
|
141
|
+
tukey_result = pairwise_tukeyhsd(df[data_column], df[grouping_column], alpha=0.05)
|
142
|
+
for comparison, p_value in zip(tukey_result._results_table.data[1:], tukey_result.pvalues):
|
143
|
+
posthoc_results.append({
|
144
|
+
'Comparison': f"{comparison[0]} vs {comparison[1]}",
|
145
|
+
'Original p-value': None, # Tukey HSD does not provide raw p-values
|
146
|
+
'Adjusted p-value': p_value,
|
147
|
+
'Adjusted Method': 'Tukey HSD',
|
148
|
+
'Test Name': 'Tukey HSD'
|
149
|
+
})
|
150
|
+
else:
|
151
|
+
# Dunn's test with p-value adjustment
|
152
|
+
raw_dunn_result = sp.posthoc_dunn(df, val_col=data_column, group_col=grouping_column, p_adjust=None)
|
153
|
+
adjusted_dunn_result = sp.posthoc_dunn(df, val_col=data_column, group_col=grouping_column, p_adjust=p_adjust_method)
|
154
|
+
for i, group_a in enumerate(adjusted_dunn_result.index):
|
155
|
+
for j, group_b in enumerate(adjusted_dunn_result.columns):
|
156
|
+
if i < j: # Only consider unique pairs
|
157
|
+
posthoc_results.append({
|
158
|
+
'Comparison': f"{group_a} vs {group_b}",
|
159
|
+
'Original p-value': raw_dunn_result.iloc[i, j],
|
160
|
+
'Adjusted p-value': adjusted_dunn_result.iloc[i, j],
|
161
|
+
'Adjusted Method': p_adjust_method,
|
162
|
+
'Test Name': "Dunn's Post-hoc"
|
163
|
+
})
|
164
|
+
|
165
|
+
return posthoc_results
|
166
|
+
|
167
|
+
def chi_pairwise(raw_counts, verbose=False):
|
168
|
+
"""
|
169
|
+
Perform pairwise chi-square or Fisher's exact tests between all unique group pairs
|
170
|
+
and apply p-value correction.
|
171
|
+
|
172
|
+
Parameters:
|
173
|
+
- raw_counts (DataFrame): Contingency table with group-wise counts.
|
174
|
+
- verbose (bool): Whether to print results for each pair.
|
175
|
+
|
176
|
+
Returns:
|
177
|
+
- pairwise_df (DataFrame): DataFrame with pairwise test results, including corrected p-values.
|
178
|
+
"""
|
179
|
+
pairwise_results = []
|
180
|
+
groups = raw_counts.index.unique() # Use index from raw_counts for group pairs
|
181
|
+
raw_p_values = [] # Store raw p-values for correction later
|
182
|
+
|
183
|
+
# Calculate the number of groups and average number of data points per group
|
184
|
+
num_groups = len(groups)
|
185
|
+
num_data_points = raw_counts.sum(axis=1).mean() # Average total data points per group
|
186
|
+
p_adjust_method = choose_p_adjust_method(num_groups, num_data_points)
|
187
|
+
|
188
|
+
for group1, group2 in itertools.combinations(groups, 2):
|
189
|
+
contingency_table = raw_counts.loc[[group1, group2]].values
|
190
|
+
if contingency_table.shape[1] == 2: # Fisher's Exact Test for 2x2 tables
|
191
|
+
oddsratio, p_value = fisher_exact(contingency_table)
|
192
|
+
test_name = "Fisher's Exact Test"
|
193
|
+
else: # Chi-Square Test for larger tables
|
194
|
+
chi2_stat, p_value, _, _ = chi2_contingency(contingency_table)
|
195
|
+
test_name = 'Pairwise Chi-Square Test'
|
196
|
+
|
197
|
+
pairwise_results.append({
|
198
|
+
'Group 1': group1,
|
199
|
+
'Group 2': group2,
|
200
|
+
'Test Name': test_name,
|
201
|
+
'p-value': p_value
|
202
|
+
})
|
203
|
+
raw_p_values.append(p_value)
|
204
|
+
|
205
|
+
# Apply p-value correction
|
206
|
+
corrected_p_values = multipletests(raw_p_values, method=p_adjust_method)[1]
|
207
|
+
|
208
|
+
# Add corrected p-values to results
|
209
|
+
for i, result in enumerate(pairwise_results):
|
210
|
+
result['p-value_adj'] = corrected_p_values[i]
|
211
|
+
|
212
|
+
pairwise_df = pd.DataFrame(pairwise_results)
|
213
|
+
|
214
|
+
pairwise_df['adj'] = p_adjust_method
|
215
|
+
|
216
|
+
if verbose:
|
217
|
+
# Print pairwise results
|
218
|
+
print("\nPairwise Frequency Analysis Results:")
|
219
|
+
print(pairwise_df.to_string(index=False))
|
220
|
+
|
221
|
+
return pairwise_df
|
spacr/submodules.py
CHANGED
@@ -1041,7 +1041,7 @@ def analyze_class_proportion(settings):
|
|
1041
1041
|
from .io import _read_and_merge_data
|
1042
1042
|
from .settings import set_analyze_class_proportion_defaults
|
1043
1043
|
from .plot import plot_plates, plot_proportion_stacked_bars
|
1044
|
-
from .
|
1044
|
+
from .sp_stats import perform_normality_tests, perform_levene_test, perform_statistical_tests, perform_posthoc_tests
|
1045
1045
|
|
1046
1046
|
settings = set_analyze_class_proportion_defaults(settings)
|
1047
1047
|
save_settings(settings, name='analyze_class_proportion', show=True)
|
spacr/utils.py
CHANGED
@@ -5011,22 +5011,22 @@ def generate_cytoplasm_mask(nucleus_mask, cell_mask):
|
|
5011
5011
|
return cytoplasm_mask
|
5012
5012
|
|
5013
5013
|
def add_column_to_database(settings):
|
5014
|
-
|
5015
|
-
|
5016
|
-
|
5017
|
-
|
5018
|
-
|
5019
|
-
|
5020
|
-
|
5021
|
-
|
5022
|
-
|
5023
|
-
|
5024
|
-
|
5025
|
-
|
5026
|
-
|
5027
|
-
|
5028
|
-
|
5029
|
-
|
5014
|
+
"""
|
5015
|
+
Adds a new column to the database table by matching on a common column from the DataFrame.
|
5016
|
+
If the column already exists in the database, it adds the column with a suffix.
|
5017
|
+
NaN values will remain as NULL in the database.
|
5018
|
+
|
5019
|
+
Parameters:
|
5020
|
+
settings (dict): A dictionary containing the following keys:
|
5021
|
+
csv_path (str): Path to the CSV file with the data to be added.
|
5022
|
+
db_path (str): Path to the SQLite database (or connection string for other databases).
|
5023
|
+
table_name (str): The name of the table in the database.
|
5024
|
+
update_column (str): The name of the new column in the DataFrame to add to the database.
|
5025
|
+
match_column (str): The common column used to match rows.
|
5026
|
+
|
5027
|
+
Returns:
|
5028
|
+
None
|
5029
|
+
"""
|
5030
5030
|
|
5031
5031
|
# Read the DataFrame from the provided CSV path
|
5032
5032
|
df = pd.read_csv(settings['csv_path'])
|
@@ -1,4 +1,4 @@
|
|
1
|
-
spacr/__init__.py,sha256=
|
1
|
+
spacr/__init__.py,sha256=iPlE-WRc1CjSNGPMbVvEKlJsTQlKjq29VjzvNUCpcOM,1401
|
2
2
|
spacr/__main__.py,sha256=bkAJJD2kjIqOP-u1kLvct9jQQCeUXzlEjdgitwi1Lm8,75
|
3
3
|
spacr/app_annotate.py,sha256=W9eLPa_LZIvXsXx_-0iDFEU938LBDvRy6prXo0qF4KQ,2533
|
4
4
|
spacr/app_classify.py,sha256=urTP_wlZ58hSyM5a19slYlBxN0PdC-9-ga0hvq8CGWc,165
|
@@ -10,7 +10,7 @@ spacr/app_umap.py,sha256=ZWAmf_OsIKbYvolYuWPMYhdlVe-n2CADoJulAizMiEo,153
|
|
10
10
|
spacr/cellpose.py,sha256=RBHMs2vwXcfkj0xqAULpALyzJYXddSRycgZSzmwI7v0,14755
|
11
11
|
spacr/chat_bot.py,sha256=n3Fhqg3qofVXHmh3H9sUcmfYy9MmgRnr48663MVdY9E,1244
|
12
12
|
spacr/core.py,sha256=lKeqmsVrGQ8cPU_WkoNGNBWrk-gtR1RkRkwDdnJ0u64,48829
|
13
|
-
spacr/deep_spacr.py,sha256=
|
13
|
+
spacr/deep_spacr.py,sha256=WN64EaQqF87JZg3Uan46t5Y28xsAGD2KMjr2ht6CyDs,54563
|
14
14
|
spacr/gui.py,sha256=ARyn9Q_g8HoP-cXh1nzMLVFCKqthY4v2u9yORyaQqQE,8230
|
15
15
|
spacr/gui_core.py,sha256=U0A7waKgWq_Es9fMwcZbXUZYGzCqt2bgfY3HbxiFXnw,47466
|
16
16
|
spacr/gui_elements.py,sha256=HmITDncklKwtdFhxLhtYXOwndsRfgwWIPVi83VlXHB4,146419
|
@@ -21,15 +21,16 @@ spacr/measure.py,sha256=jmOnLBudq3TuY723Cfo1EJBn67P6rlEvL6I-2FSkUgI,55315
|
|
21
21
|
spacr/mediar.py,sha256=FwLvbLQW5LQzPgvJZG8Lw7GniA2vbZx6Jv6vIKu7I5c,14743
|
22
22
|
spacr/ml.py,sha256=MrIAtUUxMOibWVL1SjCUnYlizawCp3l3SeY4Y9yEsPw,97251
|
23
23
|
spacr/openai.py,sha256=5vBZ3Jl2llYcW3oaTEXgdyCB2aJujMUIO5K038z7w_A,1246
|
24
|
-
spacr/plot.py,sha256=
|
24
|
+
spacr/plot.py,sha256=Q5TbsR2NUWhA7z4HyF_2_FAEBFSNMU-G3UNDbRzW6mM,169485
|
25
25
|
spacr/sequencing.py,sha256=ClUfwPPK6rNUbUuiEkzcwakzVyDKKUMv9ricrxT8qQY,25227
|
26
|
-
spacr/settings.py,sha256=
|
26
|
+
spacr/settings.py,sha256=fEk-9LSSvV1wGsn6xTaJWY7wF7_u8Fc-S1DaDHqZU3I,83997
|
27
27
|
spacr/sim.py,sha256=1xKhXimNU3ukzIw-3l9cF3Znc_brW8h20yv8fSTzvss,71173
|
28
|
+
spacr/sp_stats.py,sha256=mbhwsyIqt5upsSD346qGjdCw7CFBa0tIS7zHU9e0jNI,9536
|
28
29
|
spacr/stats.py,sha256=mbhwsyIqt5upsSD346qGjdCw7CFBa0tIS7zHU9e0jNI,9536
|
29
|
-
spacr/submodules.py,sha256=
|
30
|
+
spacr/submodules.py,sha256=mb2g0igUTws7y6xW1zIJw1E7eQyxsjEj5mk2Z-Qd8uw,67629
|
30
31
|
spacr/timelapse.py,sha256=KGfG4L4-QnFfgbF7L6C5wL_3gd_rqr05Foje6RsoTBg,39603
|
31
32
|
spacr/toxo.py,sha256=TmuhejSIPLBvsgeblsUgSvBFCR1gOkApyTKidooJ5Us,26044
|
32
|
-
spacr/utils.py,sha256=
|
33
|
+
spacr/utils.py,sha256=of2t5Tq_RKdJ1QRDo4nJ3oEVev_6s2Oko3-lBxl4ScU,226293
|
33
34
|
spacr/version.py,sha256=axH5tnGwtgSnJHb5IDhiu4Zjk5GhLyAEDRe-rnaoFOA,409
|
34
35
|
spacr/resources/MEDIAR/.gitignore,sha256=Ff1q9Nme14JUd-4Q3jZ65aeQ5X4uttptssVDgBVHYo8,152
|
35
36
|
spacr/resources/MEDIAR/LICENSE,sha256=yEj_TRDLUfDpHDNM0StALXIt6mLqSgaV2hcCwa6_TcY,1065
|
@@ -152,9 +153,9 @@ spacr/resources/icons/umap.png,sha256=dOLF3DeLYy9k0nkUybiZMe1wzHQwLJFRmgccppw-8b
|
|
152
153
|
spacr/resources/images/plate1_E01_T0001F001L01A01Z01C02.tif,sha256=Tl0ZUfZ_AYAbu0up_nO0tPRtF1BxXhWQ3T3pURBCCRo,7958528
|
153
154
|
spacr/resources/images/plate1_E01_T0001F001L01A02Z01C01.tif,sha256=m8N-V71rA1TT4dFlENNg8s0Q0YEXXs8slIn7yObmZJQ,7958528
|
154
155
|
spacr/resources/images/plate1_E01_T0001F001L01A03Z01C03.tif,sha256=Pbhk7xn-KUP6RSIhJsxQcrHFImBm3GEpLkzx7WOc-5M,7958528
|
155
|
-
spacr-0.4.
|
156
|
-
spacr-0.4.
|
157
|
-
spacr-0.4.
|
158
|
-
spacr-0.4.
|
159
|
-
spacr-0.4.
|
160
|
-
spacr-0.4.
|
156
|
+
spacr-0.4.11.dist-info/LICENSE,sha256=SR-2MeGc6SCM1UORJYyarSWY_A-JaOMFDj7ReSs9tRM,1083
|
157
|
+
spacr-0.4.11.dist-info/METADATA,sha256=xQABPDmILj46nsesjIjNZ7SoVliz-PzHg0AHprY85-4,6073
|
158
|
+
spacr-0.4.11.dist-info/WHEEL,sha256=HiCZjzuy6Dw0hdX5R3LCFPDmFS4BWl8H-8W39XfmgX4,91
|
159
|
+
spacr-0.4.11.dist-info/entry_points.txt,sha256=BMC0ql9aNNpv8lUZ8sgDLQMsqaVnX5L535gEhKUP5ho,296
|
160
|
+
spacr-0.4.11.dist-info/top_level.txt,sha256=GJPU8FgwRXGzKeut6JopsSRY2R8T3i9lDgya42tLInY,6
|
161
|
+
spacr-0.4.11.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|