spacr 0.4.60__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- spacr/__init__.py +2 -4
- spacr/__main__.py +3 -3
- spacr/core.py +13 -107
- spacr/gui.py +0 -1
- spacr/gui_core.py +2 -2
- spacr/gui_utils.py +5 -14
- spacr/io.py +189 -200
- spacr/mediar.py +12 -8
- spacr/plot.py +50 -13
- spacr/settings.py +71 -14
- spacr/submodules.py +21 -14
- spacr/timelapse.py +192 -6
- spacr/utils.py +180 -56
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/METADATA +64 -62
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/RECORD +20 -72
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/WHEEL +1 -1
- spacr/resources/MEDIAR/.gitignore +0 -18
- spacr/resources/MEDIAR/LICENSE +0 -21
- spacr/resources/MEDIAR/README.md +0 -189
- spacr/resources/MEDIAR/SetupDict.py +0 -39
- spacr/resources/MEDIAR/config/baseline.json +0 -60
- spacr/resources/MEDIAR/config/mediar_example.json +0 -72
- spacr/resources/MEDIAR/config/pred/pred_mediar.json +0 -17
- spacr/resources/MEDIAR/config/step1_pretraining/phase1.json +0 -55
- spacr/resources/MEDIAR/config/step1_pretraining/phase2.json +0 -58
- spacr/resources/MEDIAR/config/step2_finetuning/finetuning1.json +0 -66
- spacr/resources/MEDIAR/config/step2_finetuning/finetuning2.json +0 -66
- spacr/resources/MEDIAR/config/step3_prediction/base_prediction.json +0 -16
- spacr/resources/MEDIAR/config/step3_prediction/ensemble_tta.json +0 -23
- spacr/resources/MEDIAR/core/BasePredictor.py +0 -120
- spacr/resources/MEDIAR/core/BaseTrainer.py +0 -240
- spacr/resources/MEDIAR/core/Baseline/Predictor.py +0 -59
- spacr/resources/MEDIAR/core/Baseline/Trainer.py +0 -113
- spacr/resources/MEDIAR/core/Baseline/__init__.py +0 -2
- spacr/resources/MEDIAR/core/Baseline/utils.py +0 -80
- spacr/resources/MEDIAR/core/MEDIAR/EnsemblePredictor.py +0 -105
- spacr/resources/MEDIAR/core/MEDIAR/Predictor.py +0 -234
- spacr/resources/MEDIAR/core/MEDIAR/Trainer.py +0 -172
- spacr/resources/MEDIAR/core/MEDIAR/__init__.py +0 -3
- spacr/resources/MEDIAR/core/MEDIAR/utils.py +0 -429
- spacr/resources/MEDIAR/core/__init__.py +0 -2
- spacr/resources/MEDIAR/core/utils.py +0 -40
- spacr/resources/MEDIAR/evaluate.py +0 -71
- spacr/resources/MEDIAR/generate_mapping.py +0 -121
- spacr/resources/MEDIAR/image/examples/img1.tiff +0 -0
- spacr/resources/MEDIAR/image/examples/img2.tif +0 -0
- spacr/resources/MEDIAR/image/failure_cases.png +0 -0
- spacr/resources/MEDIAR/image/mediar_framework.png +0 -0
- spacr/resources/MEDIAR/image/mediar_model.PNG +0 -0
- spacr/resources/MEDIAR/image/mediar_results.png +0 -0
- spacr/resources/MEDIAR/main.py +0 -125
- spacr/resources/MEDIAR/predict.py +0 -70
- spacr/resources/MEDIAR/requirements.txt +0 -14
- spacr/resources/MEDIAR/train_tools/__init__.py +0 -3
- spacr/resources/MEDIAR/train_tools/data_utils/__init__.py +0 -1
- spacr/resources/MEDIAR/train_tools/data_utils/custom/CellAware.py +0 -88
- spacr/resources/MEDIAR/train_tools/data_utils/custom/LoadImage.py +0 -161
- spacr/resources/MEDIAR/train_tools/data_utils/custom/NormalizeImage.py +0 -77
- spacr/resources/MEDIAR/train_tools/data_utils/custom/__init__.py +0 -3
- spacr/resources/MEDIAR/train_tools/data_utils/custom/modalities.pkl +0 -0
- spacr/resources/MEDIAR/train_tools/data_utils/datasetter.py +0 -208
- spacr/resources/MEDIAR/train_tools/data_utils/transforms.py +0 -148
- spacr/resources/MEDIAR/train_tools/data_utils/utils.py +0 -84
- spacr/resources/MEDIAR/train_tools/measures.py +0 -200
- spacr/resources/MEDIAR/train_tools/models/MEDIARFormer.py +0 -102
- spacr/resources/MEDIAR/train_tools/models/__init__.py +0 -1
- spacr/resources/MEDIAR/train_tools/utils.py +0 -70
- spacr/stats.py +0 -221
- /spacr/{cellpose.py → spacr_cellpose.py} +0 -0
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/LICENSE +0 -0
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/entry_points.txt +0 -0
- {spacr-0.4.60.dist-info → spacr-0.9.0.dist-info}/top_level.txt +0 -0
@@ -1,102 +0,0 @@
|
|
1
|
-
import torch
|
2
|
-
import torch.nn as nn
|
3
|
-
|
4
|
-
from segmentation_models_pytorch import MAnet
|
5
|
-
from segmentation_models_pytorch.base.modules import Activation
|
6
|
-
|
7
|
-
__all__ = ["MEDIARFormer"]
|
8
|
-
|
9
|
-
|
10
|
-
class MEDIARFormer(MAnet):
|
11
|
-
"""MEDIAR-Former Model"""
|
12
|
-
|
13
|
-
def __init__(
|
14
|
-
self,
|
15
|
-
encoder_name="mit_b5", # Default encoder
|
16
|
-
encoder_weights="imagenet", # Pre-trained weights
|
17
|
-
decoder_channels=(1024, 512, 256, 128, 64), # Decoder configuration
|
18
|
-
decoder_pab_channels=256, # Decoder Pyramid Attention Block channels
|
19
|
-
in_channels=3, # Number of input channels
|
20
|
-
classes=3, # Number of output classes
|
21
|
-
):
|
22
|
-
# Initialize the MAnet model with provided parameters
|
23
|
-
super().__init__(
|
24
|
-
encoder_name=encoder_name,
|
25
|
-
encoder_weights=encoder_weights,
|
26
|
-
decoder_channels=decoder_channels,
|
27
|
-
decoder_pab_channels=decoder_pab_channels,
|
28
|
-
in_channels=in_channels,
|
29
|
-
classes=classes,
|
30
|
-
)
|
31
|
-
|
32
|
-
# Remove the default segmentation head as it's not used in this architecture
|
33
|
-
self.segmentation_head = None
|
34
|
-
|
35
|
-
# Modify all activation functions in the encoder and decoder from ReLU to Mish
|
36
|
-
_convert_activations(self.encoder, nn.ReLU, nn.Mish(inplace=True))
|
37
|
-
_convert_activations(self.decoder, nn.ReLU, nn.Mish(inplace=True))
|
38
|
-
|
39
|
-
# Add custom segmentation heads for different segmentation tasks
|
40
|
-
self.cellprob_head = DeepSegmentationHead(
|
41
|
-
in_channels=decoder_channels[-1], out_channels=1
|
42
|
-
)
|
43
|
-
self.gradflow_head = DeepSegmentationHead(
|
44
|
-
in_channels=decoder_channels[-1], out_channels=2
|
45
|
-
)
|
46
|
-
|
47
|
-
def forward(self, x):
|
48
|
-
"""Forward pass through the network"""
|
49
|
-
# Ensure the input shape is correct
|
50
|
-
self.check_input_shape(x)
|
51
|
-
|
52
|
-
# Encode the input and then decode it
|
53
|
-
features = self.encoder(x)
|
54
|
-
decoder_output = self.decoder(*features)
|
55
|
-
|
56
|
-
# Generate masks for cell probability and gradient flows
|
57
|
-
cellprob_mask = self.cellprob_head(decoder_output)
|
58
|
-
gradflow_mask = self.gradflow_head(decoder_output)
|
59
|
-
|
60
|
-
# Concatenate the masks for output
|
61
|
-
masks = torch.cat([gradflow_mask, cellprob_mask], dim=1)
|
62
|
-
|
63
|
-
return masks
|
64
|
-
|
65
|
-
|
66
|
-
class DeepSegmentationHead(nn.Sequential):
|
67
|
-
"""Custom segmentation head for generating specific masks"""
|
68
|
-
|
69
|
-
def __init__(
|
70
|
-
self, in_channels, out_channels, kernel_size=3, activation=None, upsampling=1
|
71
|
-
):
|
72
|
-
# Define a sequence of layers for the segmentation head
|
73
|
-
layers = [
|
74
|
-
nn.Conv2d(
|
75
|
-
in_channels,
|
76
|
-
in_channels // 2,
|
77
|
-
kernel_size=kernel_size,
|
78
|
-
padding=kernel_size // 2,
|
79
|
-
),
|
80
|
-
nn.Mish(inplace=True),
|
81
|
-
nn.BatchNorm2d(in_channels // 2),
|
82
|
-
nn.Conv2d(
|
83
|
-
in_channels // 2,
|
84
|
-
out_channels,
|
85
|
-
kernel_size=kernel_size,
|
86
|
-
padding=kernel_size // 2,
|
87
|
-
),
|
88
|
-
nn.UpsamplingBilinear2d(scale_factor=upsampling)
|
89
|
-
if upsampling > 1
|
90
|
-
else nn.Identity(),
|
91
|
-
Activation(activation) if activation else nn.Identity(),
|
92
|
-
]
|
93
|
-
super().__init__(*layers)
|
94
|
-
|
95
|
-
|
96
|
-
def _convert_activations(module, from_activation, to_activation):
|
97
|
-
"""Recursively convert activation functions in a module"""
|
98
|
-
for name, child in module.named_children():
|
99
|
-
if isinstance(child, from_activation):
|
100
|
-
setattr(module, name, to_activation)
|
101
|
-
else:
|
102
|
-
_convert_activations(child, from_activation, to_activation)
|
@@ -1 +0,0 @@
|
|
1
|
-
from .MEDIARFormer import *
|
@@ -1,70 +0,0 @@
|
|
1
|
-
import torch
|
2
|
-
import numpy as np
|
3
|
-
import os, json, random
|
4
|
-
from pprint import pprint
|
5
|
-
|
6
|
-
__all__ = ["ConfLoader", "directory_setter", "random_seeder", "pprint_config"]
|
7
|
-
|
8
|
-
|
9
|
-
class ConfLoader:
|
10
|
-
"""
|
11
|
-
Load json config file using DictWithAttributeAccess object_hook.
|
12
|
-
ConfLoader(conf_name).opt attribute is the result of loading json config file.
|
13
|
-
"""
|
14
|
-
|
15
|
-
class DictWithAttributeAccess(dict):
|
16
|
-
"""
|
17
|
-
This inner class makes dict to be accessed same as class attribute.
|
18
|
-
For example, you can use opt.key instead of the opt['key'].
|
19
|
-
"""
|
20
|
-
|
21
|
-
def __getattr__(self, key):
|
22
|
-
return self[key]
|
23
|
-
|
24
|
-
def __setattr__(self, key, value):
|
25
|
-
self[key] = value
|
26
|
-
|
27
|
-
def __init__(self, conf_name):
|
28
|
-
self.conf_name = conf_name
|
29
|
-
self.opt = self.__get_opt()
|
30
|
-
|
31
|
-
def __load_conf(self):
|
32
|
-
with open(self.conf_name, "r") as conf:
|
33
|
-
opt = json.load(
|
34
|
-
conf, object_hook=lambda dict: self.DictWithAttributeAccess(dict)
|
35
|
-
)
|
36
|
-
return opt
|
37
|
-
|
38
|
-
def __get_opt(self):
|
39
|
-
opt = self.__load_conf()
|
40
|
-
opt = self.DictWithAttributeAccess(opt)
|
41
|
-
|
42
|
-
return opt
|
43
|
-
|
44
|
-
|
45
|
-
def directory_setter(path="./results", make_dir=False):
|
46
|
-
"""
|
47
|
-
Make dictionary if not exists.
|
48
|
-
"""
|
49
|
-
if not os.path.exists(path) and make_dir:
|
50
|
-
os.makedirs(path) # make dir if not exist
|
51
|
-
print("directory %s is created" % path)
|
52
|
-
|
53
|
-
if not os.path.isdir(path):
|
54
|
-
raise NotADirectoryError(
|
55
|
-
"%s is not valid. set make_dir=True to make dir." % path
|
56
|
-
)
|
57
|
-
|
58
|
-
|
59
|
-
def random_seeder(seed):
|
60
|
-
"""Fix randomness."""
|
61
|
-
torch.manual_seed(seed)
|
62
|
-
np.random.seed(seed)
|
63
|
-
random.seed(seed)
|
64
|
-
torch.backends.cudnn.deterministic = True
|
65
|
-
torch.backends.cudnn.benchmark = False
|
66
|
-
|
67
|
-
def pprint_config(opt):
|
68
|
-
print("\n" + "=" * 50 + " Configuration " + "=" * 50)
|
69
|
-
pprint(opt, compact=True)
|
70
|
-
print("=" * 115 + "\n")
|
spacr/stats.py
DELETED
@@ -1,221 +0,0 @@
|
|
1
|
-
from scipy.stats import shapiro, normaltest, levene, ttest_ind, mannwhitneyu, kruskal, f_oneway
|
2
|
-
from statsmodels.stats.multicomp import pairwise_tukeyhsd
|
3
|
-
import scikit_posthocs as sp
|
4
|
-
import numpy as np
|
5
|
-
import pandas as pd
|
6
|
-
from scipy.stats import chi2_contingency, fisher_exact
|
7
|
-
import itertools
|
8
|
-
from statsmodels.stats.multitest import multipletests
|
9
|
-
|
10
|
-
|
11
|
-
def choose_p_adjust_method(num_groups, num_data_points):
|
12
|
-
"""
|
13
|
-
Selects the most appropriate p-value adjustment method based on data characteristics.
|
14
|
-
|
15
|
-
Parameters:
|
16
|
-
- num_groups: Number of unique groups being compared
|
17
|
-
- num_data_points: Number of data points per group (assuming balanced groups)
|
18
|
-
|
19
|
-
Returns:
|
20
|
-
- A string representing the recommended p-adjustment method
|
21
|
-
"""
|
22
|
-
num_comparisons = (num_groups * (num_groups - 1)) // 2 # Number of pairwise comparisons
|
23
|
-
|
24
|
-
# Decision logic for choosing the adjustment method
|
25
|
-
if num_comparisons <= 10 and num_data_points > 5:
|
26
|
-
return 'holm' # Balanced between power and Type I error control
|
27
|
-
elif num_comparisons > 10 and num_data_points <= 5:
|
28
|
-
return 'fdr_bh' # FDR control for large number of comparisons and small sample size
|
29
|
-
elif num_comparisons <= 10:
|
30
|
-
return 'sidak' # Less conservative than Bonferroni, good for independent comparisons
|
31
|
-
else:
|
32
|
-
return 'bonferroni' # Very conservative, use for strict control of Type I errors
|
33
|
-
|
34
|
-
def perform_normality_tests(df, grouping_column, data_columns):
|
35
|
-
"""Perform normality tests for each group and data column."""
|
36
|
-
unique_groups = df[grouping_column].unique()
|
37
|
-
normality_results = []
|
38
|
-
|
39
|
-
for column in data_columns:
|
40
|
-
for group in unique_groups:
|
41
|
-
data = df.loc[df[grouping_column] == group, column].dropna()
|
42
|
-
n_samples = len(data)
|
43
|
-
|
44
|
-
if n_samples < 3:
|
45
|
-
# Skip test if there aren't enough data points
|
46
|
-
print(f"Skipping normality test for group '{group}' on column '{column}' - Not enough data.")
|
47
|
-
normality_results.append({
|
48
|
-
'Comparison': f'Normality test for {group} on {column}',
|
49
|
-
'Test Statistic': None,
|
50
|
-
'p-value': None,
|
51
|
-
'Test Name': 'Skipped',
|
52
|
-
'Column': column,
|
53
|
-
'n': n_samples
|
54
|
-
})
|
55
|
-
continue
|
56
|
-
|
57
|
-
# Choose the appropriate normality test based on the sample size
|
58
|
-
if n_samples >= 8:
|
59
|
-
stat, p_value = normaltest(data)
|
60
|
-
test_name = "D'Agostino-Pearson test"
|
61
|
-
else:
|
62
|
-
stat, p_value = shapiro(data)
|
63
|
-
test_name = "Shapiro-Wilk test"
|
64
|
-
|
65
|
-
normality_results.append({
|
66
|
-
'Comparison': f'Normality test for {group} on {column}',
|
67
|
-
'Test Statistic': stat,
|
68
|
-
'p-value': p_value,
|
69
|
-
'Test Name': test_name,
|
70
|
-
'Column': column,
|
71
|
-
'n': n_samples
|
72
|
-
})
|
73
|
-
|
74
|
-
# Check if all groups are normally distributed (p > 0.05)
|
75
|
-
normal_p_values = [result['p-value'] for result in normality_results if result['Column'] == column and result['p-value'] is not None]
|
76
|
-
is_normal = all(p > 0.05 for p in normal_p_values)
|
77
|
-
|
78
|
-
return is_normal, normality_results
|
79
|
-
|
80
|
-
|
81
|
-
def perform_levene_test(df, grouping_column, data_column):
|
82
|
-
"""Perform Levene's test for equal variance."""
|
83
|
-
unique_groups = df[grouping_column].unique()
|
84
|
-
grouped_data = [df.loc[df[grouping_column] == group, data_column].dropna() for group in unique_groups]
|
85
|
-
stat, p_value = levene(*grouped_data)
|
86
|
-
return stat, p_value
|
87
|
-
|
88
|
-
def perform_statistical_tests(df, grouping_column, data_columns, paired=False):
|
89
|
-
"""Perform statistical tests for each data column."""
|
90
|
-
unique_groups = df[grouping_column].unique()
|
91
|
-
test_results = []
|
92
|
-
|
93
|
-
for column in data_columns:
|
94
|
-
grouped_data = [df.loc[df[grouping_column] == group, column].dropna() for group in unique_groups]
|
95
|
-
if len(unique_groups) == 2: # For two groups
|
96
|
-
if paired:
|
97
|
-
print("Performing paired tests (not implemented in this template).")
|
98
|
-
continue # Extend as needed
|
99
|
-
else:
|
100
|
-
# Check normality for two groups
|
101
|
-
is_normal, _ = perform_normality_tests(df, grouping_column, [column])
|
102
|
-
if is_normal:
|
103
|
-
stat, p = ttest_ind(grouped_data[0], grouped_data[1])
|
104
|
-
test_name = 'T-test'
|
105
|
-
else:
|
106
|
-
stat, p = mannwhitneyu(grouped_data[0], grouped_data[1])
|
107
|
-
test_name = 'Mann-Whitney U test'
|
108
|
-
else:
|
109
|
-
# Check normality for multiple groups
|
110
|
-
is_normal, _ = perform_normality_tests(df, grouping_column, [column])
|
111
|
-
if is_normal:
|
112
|
-
stat, p = f_oneway(*grouped_data)
|
113
|
-
test_name = 'One-way ANOVA'
|
114
|
-
else:
|
115
|
-
stat, p = kruskal(*grouped_data)
|
116
|
-
test_name = 'Kruskal-Wallis test'
|
117
|
-
|
118
|
-
test_results.append({
|
119
|
-
'Column': column,
|
120
|
-
'Test Name': test_name,
|
121
|
-
'Test Statistic': stat,
|
122
|
-
'p-value': p,
|
123
|
-
'Groups': len(unique_groups)
|
124
|
-
})
|
125
|
-
|
126
|
-
return test_results
|
127
|
-
|
128
|
-
|
129
|
-
def perform_posthoc_tests(df, grouping_column, data_column, is_normal):
|
130
|
-
"""Perform post-hoc tests for multiple groups with both original and adjusted p-values."""
|
131
|
-
unique_groups = df[grouping_column].unique()
|
132
|
-
posthoc_results = []
|
133
|
-
|
134
|
-
if len(unique_groups) > 2:
|
135
|
-
num_groups = len(unique_groups)
|
136
|
-
num_data_points = len(df[data_column].dropna()) // num_groups # Assuming roughly equal data points per group
|
137
|
-
p_adjust_method = choose_p_adjust_method(num_groups, num_data_points)
|
138
|
-
|
139
|
-
if is_normal:
|
140
|
-
# Tukey's HSD automatically adjusts p-values
|
141
|
-
tukey_result = pairwise_tukeyhsd(df[data_column], df[grouping_column], alpha=0.05)
|
142
|
-
for comparison, p_value in zip(tukey_result._results_table.data[1:], tukey_result.pvalues):
|
143
|
-
posthoc_results.append({
|
144
|
-
'Comparison': f"{comparison[0]} vs {comparison[1]}",
|
145
|
-
'Original p-value': None, # Tukey HSD does not provide raw p-values
|
146
|
-
'Adjusted p-value': p_value,
|
147
|
-
'Adjusted Method': 'Tukey HSD',
|
148
|
-
'Test Name': 'Tukey HSD'
|
149
|
-
})
|
150
|
-
else:
|
151
|
-
# Dunn's test with p-value adjustment
|
152
|
-
raw_dunn_result = sp.posthoc_dunn(df, val_col=data_column, group_col=grouping_column, p_adjust=None)
|
153
|
-
adjusted_dunn_result = sp.posthoc_dunn(df, val_col=data_column, group_col=grouping_column, p_adjust=p_adjust_method)
|
154
|
-
for i, group_a in enumerate(adjusted_dunn_result.index):
|
155
|
-
for j, group_b in enumerate(adjusted_dunn_result.columns):
|
156
|
-
if i < j: # Only consider unique pairs
|
157
|
-
posthoc_results.append({
|
158
|
-
'Comparison': f"{group_a} vs {group_b}",
|
159
|
-
'Original p-value': raw_dunn_result.iloc[i, j],
|
160
|
-
'Adjusted p-value': adjusted_dunn_result.iloc[i, j],
|
161
|
-
'Adjusted Method': p_adjust_method,
|
162
|
-
'Test Name': "Dunn's Post-hoc"
|
163
|
-
})
|
164
|
-
|
165
|
-
return posthoc_results
|
166
|
-
|
167
|
-
def chi_pairwise(raw_counts, verbose=False):
|
168
|
-
"""
|
169
|
-
Perform pairwise chi-square or Fisher's exact tests between all unique group pairs
|
170
|
-
and apply p-value correction.
|
171
|
-
|
172
|
-
Parameters:
|
173
|
-
- raw_counts (DataFrame): Contingency table with group-wise counts.
|
174
|
-
- verbose (bool): Whether to print results for each pair.
|
175
|
-
|
176
|
-
Returns:
|
177
|
-
- pairwise_df (DataFrame): DataFrame with pairwise test results, including corrected p-values.
|
178
|
-
"""
|
179
|
-
pairwise_results = []
|
180
|
-
groups = raw_counts.index.unique() # Use index from raw_counts for group pairs
|
181
|
-
raw_p_values = [] # Store raw p-values for correction later
|
182
|
-
|
183
|
-
# Calculate the number of groups and average number of data points per group
|
184
|
-
num_groups = len(groups)
|
185
|
-
num_data_points = raw_counts.sum(axis=1).mean() # Average total data points per group
|
186
|
-
p_adjust_method = choose_p_adjust_method(num_groups, num_data_points)
|
187
|
-
|
188
|
-
for group1, group2 in itertools.combinations(groups, 2):
|
189
|
-
contingency_table = raw_counts.loc[[group1, group2]].values
|
190
|
-
if contingency_table.shape[1] == 2: # Fisher's Exact Test for 2x2 tables
|
191
|
-
oddsratio, p_value = fisher_exact(contingency_table)
|
192
|
-
test_name = "Fisher's Exact Test"
|
193
|
-
else: # Chi-Square Test for larger tables
|
194
|
-
chi2_stat, p_value, _, _ = chi2_contingency(contingency_table)
|
195
|
-
test_name = 'Pairwise Chi-Square Test'
|
196
|
-
|
197
|
-
pairwise_results.append({
|
198
|
-
'Group 1': group1,
|
199
|
-
'Group 2': group2,
|
200
|
-
'Test Name': test_name,
|
201
|
-
'p-value': p_value
|
202
|
-
})
|
203
|
-
raw_p_values.append(p_value)
|
204
|
-
|
205
|
-
# Apply p-value correction
|
206
|
-
corrected_p_values = multipletests(raw_p_values, method=p_adjust_method)[1]
|
207
|
-
|
208
|
-
# Add corrected p-values to results
|
209
|
-
for i, result in enumerate(pairwise_results):
|
210
|
-
result['p-value_adj'] = corrected_p_values[i]
|
211
|
-
|
212
|
-
pairwise_df = pd.DataFrame(pairwise_results)
|
213
|
-
|
214
|
-
pairwise_df['adj'] = p_adjust_method
|
215
|
-
|
216
|
-
if verbose:
|
217
|
-
# Print pairwise results
|
218
|
-
print("\nPairwise Frequency Analysis Results:")
|
219
|
-
print(pairwise_df.to_string(index=False))
|
220
|
-
|
221
|
-
return pairwise_df
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|