scitex 2.15.1__py3-none-any.whl → 2.15.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- scitex/__init__.py +68 -61
- scitex/_mcp_tools/introspect.py +42 -23
- scitex/_mcp_tools/template.py +24 -0
- scitex/ai/classification/timeseries/_TimeSeriesSlidingWindowSplit.py +30 -1550
- scitex/ai/classification/timeseries/_sliding_window_core.py +467 -0
- scitex/ai/classification/timeseries/_sliding_window_plotting.py +369 -0
- scitex/audio/__init__.py +2 -2
- scitex/audio/_tts.py +18 -10
- scitex/audio/engines/base.py +17 -10
- scitex/audio/engines/elevenlabs_engine.py +1 -1
- scitex/canvas/editor/flask_editor/_core/__init__.py +27 -0
- scitex/canvas/editor/flask_editor/_core/_bbox_extraction.py +200 -0
- scitex/canvas/editor/flask_editor/_core/_editor.py +173 -0
- scitex/canvas/editor/flask_editor/_core/_export_helpers.py +353 -0
- scitex/canvas/editor/flask_editor/_core/_routes_basic.py +190 -0
- scitex/canvas/editor/flask_editor/_core/_routes_export.py +332 -0
- scitex/canvas/editor/flask_editor/_core/_routes_panels.py +252 -0
- scitex/canvas/editor/flask_editor/_core/_routes_save.py +218 -0
- scitex/canvas/editor/flask_editor/_core.py +25 -1684
- scitex/cli/introspect.py +112 -74
- scitex/cli/main.py +2 -0
- scitex/cli/plt.py +357 -0
- scitex/cli/repro.py +15 -8
- scitex/cli/resource.py +15 -8
- scitex/cli/scholar/__init__.py +15 -8
- scitex/cli/social.py +6 -6
- scitex/cli/stats.py +15 -8
- scitex/cli/template.py +129 -12
- scitex/cli/tex.py +15 -8
- scitex/cli/writer.py +15 -8
- scitex/cloud/__init__.py +41 -2
- scitex/config/_env_registry.py +84 -19
- scitex/context/__init__.py +22 -0
- scitex/dev/__init__.py +20 -1
- scitex/gen/__init__.py +50 -14
- scitex/gen/_list_packages.py +4 -4
- scitex/introspect/__init__.py +16 -9
- scitex/introspect/_core.py +7 -8
- scitex/{gen/_inspect_module.py → introspect/_list_api.py} +43 -54
- scitex/introspect/_mcp/__init__.py +10 -6
- scitex/introspect/_mcp/handlers.py +37 -12
- scitex/introspect/_members.py +7 -3
- scitex/introspect/_signature.py +3 -3
- scitex/introspect/_source.py +2 -2
- scitex/io/_save.py +1 -2
- scitex/logging/_formatters.py +19 -9
- scitex/mcp_server.py +1 -1
- scitex/os/__init__.py +4 -0
- scitex/{gen → os}/_check_host.py +4 -5
- scitex/plt/__init__.py +11 -14
- scitex/session/__init__.py +26 -7
- scitex/session/_decorator.py +1 -1
- scitex/sh/__init__.py +7 -4
- scitex/social/__init__.py +10 -8
- scitex/stats/_mcp/_handlers/__init__.py +31 -0
- scitex/stats/_mcp/_handlers/_corrections.py +113 -0
- scitex/stats/_mcp/_handlers/_descriptive.py +78 -0
- scitex/stats/_mcp/_handlers/_effect_size.py +106 -0
- scitex/stats/_mcp/_handlers/_format.py +94 -0
- scitex/stats/_mcp/_handlers/_normality.py +110 -0
- scitex/stats/_mcp/_handlers/_posthoc.py +224 -0
- scitex/stats/_mcp/_handlers/_power.py +247 -0
- scitex/stats/_mcp/_handlers/_recommend.py +102 -0
- scitex/stats/_mcp/_handlers/_run_test.py +279 -0
- scitex/stats/_mcp/_handlers/_stars.py +48 -0
- scitex/stats/_mcp/handlers.py +19 -1171
- scitex/stats/auto/_stat_style.py +175 -0
- scitex/stats/auto/_style_definitions.py +411 -0
- scitex/stats/auto/_styles.py +22 -620
- scitex/stats/descriptive/__init__.py +11 -8
- scitex/stats/descriptive/_ci.py +39 -0
- scitex/stats/power/_power.py +15 -4
- scitex/str/__init__.py +2 -1
- scitex/str/_title_case.py +63 -0
- scitex/template/__init__.py +25 -10
- scitex/template/_code_templates.py +147 -0
- scitex/template/_mcp/handlers.py +81 -0
- scitex/template/_mcp/tool_schemas.py +55 -0
- scitex/template/_templates/__init__.py +51 -0
- scitex/template/_templates/audio.py +233 -0
- scitex/template/_templates/canvas.py +312 -0
- scitex/template/_templates/capture.py +268 -0
- scitex/template/_templates/config.py +43 -0
- scitex/template/_templates/diagram.py +294 -0
- scitex/template/_templates/io.py +107 -0
- scitex/template/_templates/module.py +53 -0
- scitex/template/_templates/plt.py +202 -0
- scitex/template/_templates/scholar.py +267 -0
- scitex/template/_templates/session.py +130 -0
- scitex/template/_templates/session_minimal.py +43 -0
- scitex/template/_templates/session_plot.py +67 -0
- scitex/template/_templates/session_stats.py +77 -0
- scitex/template/_templates/stats.py +323 -0
- scitex/template/_templates/writer.py +296 -0
- scitex/ui/_backends/_email.py +10 -2
- scitex/ui/_backends/_webhook.py +5 -1
- scitex/web/_search_pubmed.py +10 -6
- {scitex-2.15.1.dist-info → scitex-2.15.2.dist-info}/METADATA +1 -1
- {scitex-2.15.1.dist-info → scitex-2.15.2.dist-info}/RECORD +105 -64
- scitex/gen/_ci.py +0 -12
- scitex/gen/_title_case.py +0 -89
- /scitex/{gen → context}/_detect_environment.py +0 -0
- /scitex/{gen → context}/_get_notebook_path.py +0 -0
- /scitex/{gen/_shell.py → sh/_shell_legacy.py} +0 -0
- {scitex-2.15.1.dist-info → scitex-2.15.2.dist-info}/WHEEL +0 -0
- {scitex-2.15.1.dist-info → scitex-2.15.2.dist-info}/entry_points.txt +0 -0
- {scitex-2.15.1.dist-info → scitex-2.15.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# Timestamp: 2026-01-25
|
|
3
|
+
# File: src/scitex/stats/_mcp/_handlers/_recommend.py
|
|
4
|
+
|
|
5
|
+
"""Test recommendation handler."""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
|
|
12
|
+
__all__ = ["recommend_tests_handler"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _get_test_rationale(test_name: str) -> str:
|
|
16
|
+
"""Get rationale for recommending a specific test."""
|
|
17
|
+
rationales = {
|
|
18
|
+
"brunner_munzel": "Robust nonparametric test - no normality/equal variance assumptions",
|
|
19
|
+
"ttest_ind": "Classic parametric test for comparing two independent groups",
|
|
20
|
+
"ttest_paired": "Parametric test for paired/matched samples",
|
|
21
|
+
"ttest_1samp": "One-sample t-test for comparing to a population mean",
|
|
22
|
+
"mannwhitneyu": "Nonparametric alternative to independent t-test",
|
|
23
|
+
"wilcoxon": "Nonparametric alternative to paired t-test",
|
|
24
|
+
"anova": "Parametric test for comparing 3+ groups",
|
|
25
|
+
"kruskal": "Nonparametric alternative to one-way ANOVA",
|
|
26
|
+
"chi2": "Test for independence in contingency tables",
|
|
27
|
+
"fisher_exact": "Exact test for small sample contingency tables",
|
|
28
|
+
"pearson": "Parametric correlation coefficient",
|
|
29
|
+
"spearman": "Nonparametric rank correlation",
|
|
30
|
+
"kendall": "Robust nonparametric correlation for ordinal data",
|
|
31
|
+
}
|
|
32
|
+
return rationales.get(test_name, "Applicable to the given context")
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
async def recommend_tests_handler(
|
|
36
|
+
n_groups: int = 2,
|
|
37
|
+
sample_sizes: list[int] | None = None,
|
|
38
|
+
outcome_type: str = "continuous",
|
|
39
|
+
design: str = "between",
|
|
40
|
+
paired: bool = False,
|
|
41
|
+
has_control_group: bool = False,
|
|
42
|
+
top_k: int = 3,
|
|
43
|
+
) -> dict:
|
|
44
|
+
"""Recommend appropriate statistical tests based on data characteristics."""
|
|
45
|
+
try:
|
|
46
|
+
from scitex.stats.auto import StatContext, recommend_tests
|
|
47
|
+
|
|
48
|
+
loop = asyncio.get_event_loop()
|
|
49
|
+
|
|
50
|
+
def do_recommend():
|
|
51
|
+
ctx = StatContext(
|
|
52
|
+
n_groups=n_groups,
|
|
53
|
+
sample_sizes=sample_sizes or [30] * n_groups,
|
|
54
|
+
outcome_type=outcome_type,
|
|
55
|
+
design=design,
|
|
56
|
+
paired=paired,
|
|
57
|
+
has_control_group=has_control_group,
|
|
58
|
+
n_factors=1,
|
|
59
|
+
)
|
|
60
|
+
tests = recommend_tests(ctx, top_k=top_k)
|
|
61
|
+
|
|
62
|
+
# Get details about each recommended test
|
|
63
|
+
from scitex.stats.auto._rules import TEST_RULES
|
|
64
|
+
|
|
65
|
+
recommendations = []
|
|
66
|
+
for test_name in tests:
|
|
67
|
+
rule = TEST_RULES.get(test_name)
|
|
68
|
+
if rule:
|
|
69
|
+
recommendations.append(
|
|
70
|
+
{
|
|
71
|
+
"name": test_name,
|
|
72
|
+
"family": rule.family,
|
|
73
|
+
"priority": rule.priority,
|
|
74
|
+
"needs_normality": rule.needs_normality,
|
|
75
|
+
"needs_equal_variance": rule.needs_equal_variance,
|
|
76
|
+
"rationale": _get_test_rationale(test_name),
|
|
77
|
+
}
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
return recommendations
|
|
81
|
+
|
|
82
|
+
recommendations = await loop.run_in_executor(None, do_recommend)
|
|
83
|
+
|
|
84
|
+
return {
|
|
85
|
+
"success": True,
|
|
86
|
+
"context": {
|
|
87
|
+
"n_groups": n_groups,
|
|
88
|
+
"sample_sizes": sample_sizes,
|
|
89
|
+
"outcome_type": outcome_type,
|
|
90
|
+
"design": design,
|
|
91
|
+
"paired": paired,
|
|
92
|
+
"has_control_group": has_control_group,
|
|
93
|
+
},
|
|
94
|
+
"recommendations": recommendations,
|
|
95
|
+
"timestamp": datetime.now().isoformat(),
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
except Exception as e:
|
|
99
|
+
return {"success": False, "error": str(e)}
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
# EOF
|
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# Timestamp: 2026-01-25
|
|
3
|
+
# File: src/scitex/stats/_mcp/_handlers/_run_test.py
|
|
4
|
+
|
|
5
|
+
"""Statistical test execution handler."""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
|
|
12
|
+
import numpy as np
|
|
13
|
+
|
|
14
|
+
__all__ = ["run_test_handler"]
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
async def run_test_handler(
|
|
18
|
+
test_name: str,
|
|
19
|
+
data: list[list[float]],
|
|
20
|
+
alternative: str = "two-sided",
|
|
21
|
+
) -> dict:
|
|
22
|
+
"""Execute a statistical test on provided data."""
|
|
23
|
+
try:
|
|
24
|
+
from scipy import stats as scipy_stats
|
|
25
|
+
|
|
26
|
+
loop = asyncio.get_event_loop()
|
|
27
|
+
|
|
28
|
+
def do_test():
|
|
29
|
+
# Convert data to numpy arrays
|
|
30
|
+
groups = [np.array(g, dtype=float) for g in data]
|
|
31
|
+
|
|
32
|
+
result = {}
|
|
33
|
+
|
|
34
|
+
# Run the appropriate test
|
|
35
|
+
if test_name == "ttest_ind":
|
|
36
|
+
result = _run_ttest_ind(groups, alternative, scipy_stats)
|
|
37
|
+
elif test_name == "ttest_paired":
|
|
38
|
+
result = _run_ttest_paired(groups, alternative, scipy_stats)
|
|
39
|
+
elif test_name == "ttest_1samp":
|
|
40
|
+
result = _run_ttest_1samp(groups, alternative, scipy_stats)
|
|
41
|
+
elif test_name == "brunner_munzel":
|
|
42
|
+
result = _run_brunner_munzel(groups, alternative, scipy_stats)
|
|
43
|
+
elif test_name == "mannwhitneyu":
|
|
44
|
+
result = _run_mannwhitneyu(groups, alternative, scipy_stats)
|
|
45
|
+
elif test_name == "wilcoxon":
|
|
46
|
+
result = _run_wilcoxon(groups, alternative, scipy_stats)
|
|
47
|
+
elif test_name == "anova":
|
|
48
|
+
result = _run_anova(groups, scipy_stats)
|
|
49
|
+
elif test_name == "kruskal":
|
|
50
|
+
result = _run_kruskal(groups, scipy_stats)
|
|
51
|
+
elif test_name == "chi2":
|
|
52
|
+
result = _run_chi2(data, scipy_stats)
|
|
53
|
+
elif test_name == "fisher_exact":
|
|
54
|
+
result = _run_fisher_exact(data, alternative, scipy_stats)
|
|
55
|
+
elif test_name == "pearson":
|
|
56
|
+
result = _run_pearson(groups, scipy_stats)
|
|
57
|
+
elif test_name == "spearman":
|
|
58
|
+
result = _run_spearman(groups, scipy_stats)
|
|
59
|
+
elif test_name == "kendall":
|
|
60
|
+
result = _run_kendall(groups, scipy_stats)
|
|
61
|
+
else:
|
|
62
|
+
raise ValueError(f"Unknown test: {test_name}")
|
|
63
|
+
|
|
64
|
+
# Calculate effect size if applicable
|
|
65
|
+
if test_name in [
|
|
66
|
+
"ttest_ind",
|
|
67
|
+
"ttest_paired",
|
|
68
|
+
"brunner_munzel",
|
|
69
|
+
"mannwhitneyu",
|
|
70
|
+
]:
|
|
71
|
+
result = _add_effect_size(result, groups)
|
|
72
|
+
|
|
73
|
+
# Add significance determination
|
|
74
|
+
alpha = 0.05
|
|
75
|
+
result["significant"] = result["p_value"] < alpha
|
|
76
|
+
result["alpha"] = alpha
|
|
77
|
+
|
|
78
|
+
return result
|
|
79
|
+
|
|
80
|
+
result = await loop.run_in_executor(None, do_test)
|
|
81
|
+
|
|
82
|
+
return {
|
|
83
|
+
"success": True,
|
|
84
|
+
"test_name": test_name,
|
|
85
|
+
"alternative": alternative,
|
|
86
|
+
**result,
|
|
87
|
+
"timestamp": datetime.now().isoformat(),
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
except Exception as e:
|
|
91
|
+
return {"success": False, "error": str(e)}
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def _run_ttest_ind(groups, alternative, scipy_stats):
|
|
95
|
+
if len(groups) != 2:
|
|
96
|
+
raise ValueError("t-test requires exactly 2 groups")
|
|
97
|
+
stat, p_value = scipy_stats.ttest_ind(groups[0], groups[1], alternative=alternative)
|
|
98
|
+
df = len(groups[0]) + len(groups[1]) - 2
|
|
99
|
+
return {
|
|
100
|
+
"test": "Independent t-test",
|
|
101
|
+
"statistic": float(stat),
|
|
102
|
+
"statistic_name": "t",
|
|
103
|
+
"p_value": float(p_value),
|
|
104
|
+
"df": df,
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def _run_ttest_paired(groups, alternative, scipy_stats):
|
|
109
|
+
if len(groups) != 2:
|
|
110
|
+
raise ValueError("Paired t-test requires exactly 2 groups")
|
|
111
|
+
stat, p_value = scipy_stats.ttest_rel(groups[0], groups[1], alternative=alternative)
|
|
112
|
+
df = len(groups[0]) - 1
|
|
113
|
+
return {
|
|
114
|
+
"test": "Paired t-test",
|
|
115
|
+
"statistic": float(stat),
|
|
116
|
+
"statistic_name": "t",
|
|
117
|
+
"p_value": float(p_value),
|
|
118
|
+
"df": df,
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _run_ttest_1samp(groups, alternative, scipy_stats):
|
|
123
|
+
if len(groups) != 1:
|
|
124
|
+
raise ValueError("One-sample t-test requires exactly 1 group")
|
|
125
|
+
stat, p_value = scipy_stats.ttest_1samp(groups[0], 0, alternative=alternative)
|
|
126
|
+
df = len(groups[0]) - 1
|
|
127
|
+
return {
|
|
128
|
+
"test": "One-sample t-test",
|
|
129
|
+
"statistic": float(stat),
|
|
130
|
+
"statistic_name": "t",
|
|
131
|
+
"p_value": float(p_value),
|
|
132
|
+
"df": df,
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def _run_brunner_munzel(groups, alternative, scipy_stats):
|
|
137
|
+
if len(groups) != 2:
|
|
138
|
+
raise ValueError("Brunner-Munzel requires exactly 2 groups")
|
|
139
|
+
res = scipy_stats.brunnermunzel(groups[0], groups[1], alternative=alternative)
|
|
140
|
+
return {
|
|
141
|
+
"test": "Brunner-Munzel test",
|
|
142
|
+
"statistic": float(res.statistic),
|
|
143
|
+
"statistic_name": "BM",
|
|
144
|
+
"p_value": float(res.pvalue),
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def _run_mannwhitneyu(groups, alternative, scipy_stats):
|
|
149
|
+
if len(groups) != 2:
|
|
150
|
+
raise ValueError("Mann-Whitney U requires exactly 2 groups")
|
|
151
|
+
stat, p_value = scipy_stats.mannwhitneyu(
|
|
152
|
+
groups[0], groups[1], alternative=alternative
|
|
153
|
+
)
|
|
154
|
+
return {
|
|
155
|
+
"test": "Mann-Whitney U test",
|
|
156
|
+
"statistic": float(stat),
|
|
157
|
+
"statistic_name": "U",
|
|
158
|
+
"p_value": float(p_value),
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def _run_wilcoxon(groups, alternative, scipy_stats):
|
|
163
|
+
if len(groups) != 2:
|
|
164
|
+
raise ValueError("Wilcoxon requires exactly 2 paired groups")
|
|
165
|
+
stat, p_value = scipy_stats.wilcoxon(groups[0], groups[1], alternative=alternative)
|
|
166
|
+
return {
|
|
167
|
+
"test": "Wilcoxon signed-rank test",
|
|
168
|
+
"statistic": float(stat),
|
|
169
|
+
"statistic_name": "W",
|
|
170
|
+
"p_value": float(p_value),
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def _run_anova(groups, scipy_stats):
|
|
175
|
+
if len(groups) < 2:
|
|
176
|
+
raise ValueError("ANOVA requires at least 2 groups")
|
|
177
|
+
stat, p_value = scipy_stats.f_oneway(*groups)
|
|
178
|
+
df_between = len(groups) - 1
|
|
179
|
+
df_within = sum(len(g) for g in groups) - len(groups)
|
|
180
|
+
return {
|
|
181
|
+
"test": "One-way ANOVA",
|
|
182
|
+
"statistic": float(stat),
|
|
183
|
+
"statistic_name": "F",
|
|
184
|
+
"p_value": float(p_value),
|
|
185
|
+
"df_between": df_between,
|
|
186
|
+
"df_within": df_within,
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def _run_kruskal(groups, scipy_stats):
|
|
191
|
+
if len(groups) < 2:
|
|
192
|
+
raise ValueError("Kruskal-Wallis requires at least 2 groups")
|
|
193
|
+
stat, p_value = scipy_stats.kruskal(*groups)
|
|
194
|
+
return {
|
|
195
|
+
"test": "Kruskal-Wallis H test",
|
|
196
|
+
"statistic": float(stat),
|
|
197
|
+
"statistic_name": "H",
|
|
198
|
+
"p_value": float(p_value),
|
|
199
|
+
"df": len(groups) - 1,
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def _run_chi2(data, scipy_stats):
|
|
204
|
+
table = np.array(data)
|
|
205
|
+
chi2, p_value, dof, expected = scipy_stats.chi2_contingency(table)
|
|
206
|
+
return {
|
|
207
|
+
"test": "Chi-square test of independence",
|
|
208
|
+
"statistic": float(chi2),
|
|
209
|
+
"statistic_name": "chi2",
|
|
210
|
+
"p_value": float(p_value),
|
|
211
|
+
"df": int(dof),
|
|
212
|
+
"expected_frequencies": expected.tolist(),
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
def _run_fisher_exact(data, alternative, scipy_stats):
|
|
217
|
+
table = np.array(data)
|
|
218
|
+
if table.shape != (2, 2):
|
|
219
|
+
raise ValueError("Fisher's exact test requires a 2x2 table")
|
|
220
|
+
odds_ratio, p_value = scipy_stats.fisher_exact(table, alternative=alternative)
|
|
221
|
+
return {
|
|
222
|
+
"test": "Fisher's exact test",
|
|
223
|
+
"statistic": float(odds_ratio),
|
|
224
|
+
"statistic_name": "odds_ratio",
|
|
225
|
+
"p_value": float(p_value),
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
def _run_pearson(groups, scipy_stats):
|
|
230
|
+
if len(groups) != 2:
|
|
231
|
+
raise ValueError("Pearson correlation requires exactly 2 variables")
|
|
232
|
+
r, p_value = scipy_stats.pearsonr(groups[0], groups[1])
|
|
233
|
+
return {
|
|
234
|
+
"test": "Pearson correlation",
|
|
235
|
+
"statistic": float(r),
|
|
236
|
+
"statistic_name": "r",
|
|
237
|
+
"p_value": float(p_value),
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def _run_spearman(groups, scipy_stats):
|
|
242
|
+
if len(groups) != 2:
|
|
243
|
+
raise ValueError("Spearman correlation requires exactly 2 variables")
|
|
244
|
+
r, p_value = scipy_stats.spearmanr(groups[0], groups[1])
|
|
245
|
+
return {
|
|
246
|
+
"test": "Spearman correlation",
|
|
247
|
+
"statistic": float(r),
|
|
248
|
+
"statistic_name": "rho",
|
|
249
|
+
"p_value": float(p_value),
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def _run_kendall(groups, scipy_stats):
|
|
254
|
+
if len(groups) != 2:
|
|
255
|
+
raise ValueError("Kendall correlation requires exactly 2 variables")
|
|
256
|
+
tau, p_value = scipy_stats.kendalltau(groups[0], groups[1])
|
|
257
|
+
return {
|
|
258
|
+
"test": "Kendall tau correlation",
|
|
259
|
+
"statistic": float(tau),
|
|
260
|
+
"statistic_name": "tau",
|
|
261
|
+
"p_value": float(p_value),
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
def _add_effect_size(result, groups):
|
|
266
|
+
"""Add effect size calculations to result."""
|
|
267
|
+
from scitex.stats.effect_sizes import cliffs_delta, cohens_d
|
|
268
|
+
|
|
269
|
+
if len(groups) == 2:
|
|
270
|
+
d = cohens_d(groups[0], groups[1])
|
|
271
|
+
delta = cliffs_delta(groups[0], groups[1])
|
|
272
|
+
result["effect_size"] = {
|
|
273
|
+
"cohens_d": float(d),
|
|
274
|
+
"cliffs_delta": float(delta),
|
|
275
|
+
}
|
|
276
|
+
return result
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
# EOF
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
# Timestamp: 2026-01-25
|
|
3
|
+
# File: src/scitex/stats/_mcp/_handlers/_stars.py
|
|
4
|
+
|
|
5
|
+
"""P-value to stars conversion handler."""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
|
|
11
|
+
__all__ = ["p_to_stars_handler"]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
async def p_to_stars_handler(
|
|
15
|
+
p_value: float,
|
|
16
|
+
thresholds: list[float] | None = None,
|
|
17
|
+
) -> dict:
|
|
18
|
+
"""Convert p-value to significance stars."""
|
|
19
|
+
try:
|
|
20
|
+
thresh = thresholds or [0.001, 0.01, 0.05]
|
|
21
|
+
|
|
22
|
+
if p_value < thresh[0]:
|
|
23
|
+
stars = "***"
|
|
24
|
+
significance = f"p < {thresh[0]}"
|
|
25
|
+
elif p_value < thresh[1]:
|
|
26
|
+
stars = "**"
|
|
27
|
+
significance = f"p < {thresh[1]}"
|
|
28
|
+
elif p_value < thresh[2]:
|
|
29
|
+
stars = "*"
|
|
30
|
+
significance = f"p < {thresh[2]}"
|
|
31
|
+
else:
|
|
32
|
+
stars = "ns"
|
|
33
|
+
significance = f"p >= {thresh[2]} (not significant)"
|
|
34
|
+
|
|
35
|
+
return {
|
|
36
|
+
"success": True,
|
|
37
|
+
"p_value": p_value,
|
|
38
|
+
"stars": stars,
|
|
39
|
+
"significance": significance,
|
|
40
|
+
"thresholds": thresh,
|
|
41
|
+
"timestamp": datetime.now().isoformat(),
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
except Exception as e:
|
|
45
|
+
return {"success": False, "error": str(e)}
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
# EOF
|