critical-es-value 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- critical_es_value/__init__.py +13 -0
- critical_es_value/corrtest.py +72 -0
- critical_es_value/linreg.py +97 -0
- critical_es_value/ttest.py +275 -0
- critical_es_value/utils.py +58 -0
- critical_es_value-0.1.0.dist-info/METADATA +112 -0
- critical_es_value-0.1.0.dist-info/RECORD +9 -0
- critical_es_value-0.1.0.dist-info/WHEEL +4 -0
- critical_es_value-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,13 @@
|
|
1
|
+
from .corrtest import critical_for_correlation_test
|
2
|
+
from .linreg import (
|
3
|
+
critical_for_linear_regression,
|
4
|
+
critical_for_linear_regression_se_coefficients,
|
5
|
+
)
|
6
|
+
from .ttest import (
|
7
|
+
critical_for_one_sample_ttest,
|
8
|
+
critical_for_two_sample_ttest,
|
9
|
+
)
|
10
|
+
from .utils import (
|
11
|
+
get_alpha,
|
12
|
+
get_bias_correction_factor_J,
|
13
|
+
)
|
@@ -0,0 +1,72 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import pandas as pd
|
3
|
+
import pingouin
|
4
|
+
from numpy.typing import ArrayLike
|
5
|
+
from scipy import stats
|
6
|
+
|
7
|
+
from critical_es_value import utils
|
8
|
+
|
9
|
+
|
10
|
+
def critical_for_correlation_test(
|
11
|
+
x: ArrayLike,
|
12
|
+
y: ArrayLike,
|
13
|
+
confidence: float = 0.95,
|
14
|
+
alternative: str = "two-sided",
|
15
|
+
variant: str = "ttest",
|
16
|
+
) -> pd.DataFrame:
|
17
|
+
"""Calculate critical effect size values for a pearson correlation test.
|
18
|
+
|
19
|
+
Returns a DataFrame with the following columns:
|
20
|
+
- r: Pearson correlation coefficient
|
21
|
+
- n: Sample size
|
22
|
+
- dof: Degrees of freedom
|
23
|
+
- r_critical: Critical value for the correlation coefficient
|
24
|
+
- rz_critical: Critical value for Fisher's z-transformed correlation coefficient (only for "ztest" variant)
|
25
|
+
- se_r: Standard error of the correlation coefficient
|
26
|
+
- se_r_critical: Standard error of the critical correlation coefficient
|
27
|
+
- se_rz_critical: Standard error of the critical Fisher's z-transformed correlation coefficient (only for "ztest" variant)
|
28
|
+
|
29
|
+
Args:
|
30
|
+
x (ArrayLike): Sample data for group 1.
|
31
|
+
y (ArrayLike): Sample data for group 2.
|
32
|
+
confidence (float): Confidence level between 0 and 1 (exclusive). Default is 0.95.
|
33
|
+
alternative (str): The alternative hypothesis. Either "two-sided", "greater", or "less". Default is "two-sided".
|
34
|
+
variant (str): The statistical test variant. Either "ttest" or "ztest". Default is "ttest".
|
35
|
+
|
36
|
+
Returns:
|
37
|
+
pd.DataFrame: A DataFrame containing critical effect size values.
|
38
|
+
"""
|
39
|
+
|
40
|
+
if variant not in ["ttest", "ztest"]:
|
41
|
+
raise ValueError("variant must be one of 'ttest' or 'ztest'")
|
42
|
+
|
43
|
+
corr_test_result = pingouin.corr(
|
44
|
+
x, y, alternative=alternative, method="pearson"
|
45
|
+
).iloc[0]
|
46
|
+
|
47
|
+
r = corr_test_result["r"]
|
48
|
+
n = corr_test_result["n"]
|
49
|
+
|
50
|
+
alpha = utils.get_alpha(confidence, alternative)
|
51
|
+
dof = n - 2
|
52
|
+
|
53
|
+
if variant == "ttest":
|
54
|
+
tc = np.abs(stats.t.ppf(alpha, dof))
|
55
|
+
rc = np.sqrt(tc**2 / (tc**2 + dof))
|
56
|
+
else:
|
57
|
+
zc = np.abs(stats.norm.ppf(alpha))
|
58
|
+
rc = np.tanh(zc / np.sqrt(n - 3))
|
59
|
+
|
60
|
+
result = {
|
61
|
+
"n": n,
|
62
|
+
"r": r,
|
63
|
+
"dof": dof,
|
64
|
+
"r_critical": rc,
|
65
|
+
"se_r": np.sqrt((1 - r**2) / dof),
|
66
|
+
"se_r_critical": np.sqrt((1 - rc**2) / dof),
|
67
|
+
}
|
68
|
+
if variant == "ztest":
|
69
|
+
result["rz_critical"] = np.atanh(rc)
|
70
|
+
result["se_rz_critical"] = 1 / np.sqrt(n - 3)
|
71
|
+
|
72
|
+
return pd.DataFrame([result], index=["critical"])
|
@@ -0,0 +1,97 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import pandas as pd
|
3
|
+
import pingouin
|
4
|
+
from numpy.typing import ArrayLike
|
5
|
+
from scipy import stats
|
6
|
+
|
7
|
+
from critical_es_value import utils
|
8
|
+
|
9
|
+
|
10
|
+
def critical_for_linear_regression_se_coefficients(
|
11
|
+
se_coefficients: ArrayLike,
|
12
|
+
dof: int,
|
13
|
+
confidence: float,
|
14
|
+
alternative: str,
|
15
|
+
variant: str = "ttest",
|
16
|
+
) -> list[float]:
|
17
|
+
"""Calculate critical effect size values for linear regression coefficients.
|
18
|
+
|
19
|
+
Args:
|
20
|
+
se_coefficients (ArrayLike): Standard errors of the regression coefficients.
|
21
|
+
dof (int): Degrees of freedom of the model residuals.
|
22
|
+
confidence (float): Confidence level between 0 and 1 (exclusive).
|
23
|
+
alternative (str): The alternative hypothesis. Either "two-sided", "greater", or "less".
|
24
|
+
variant (str): The statistical test variant. Either "ttest" or "ztest". Default is "ttest".
|
25
|
+
|
26
|
+
Returns:
|
27
|
+
np.ndarray: An array containing critical effect size values for each coefficient.
|
28
|
+
|
29
|
+
Raises:
|
30
|
+
ValueError: If variant is not one of "ttest" or "ztest".
|
31
|
+
"""
|
32
|
+
if variant not in ["ttest", "ztest"]:
|
33
|
+
raise ValueError("variant must be one of 'ttest' or 'ztest'")
|
34
|
+
|
35
|
+
alpha = utils.get_alpha(confidence, alternative)
|
36
|
+
|
37
|
+
if variant == "ttest":
|
38
|
+
qc = np.abs(stats.t.ppf(alpha, dof))
|
39
|
+
else:
|
40
|
+
qc = np.abs(stats.norm.ppf(alpha))
|
41
|
+
|
42
|
+
return qc * np.array(se_coefficients)
|
43
|
+
|
44
|
+
|
45
|
+
def critical_for_linear_regression(
|
46
|
+
X: pd.DataFrame,
|
47
|
+
y: pd.Series,
|
48
|
+
alternative: str = "two-sided",
|
49
|
+
confidence: float = 0.95,
|
50
|
+
variant: str = "ttest",
|
51
|
+
**kwargs,
|
52
|
+
):
|
53
|
+
"""Calculate critical effect size values for linear regression coefficients.
|
54
|
+
|
55
|
+
Returns a DataFrame with the following columns:
|
56
|
+
- names: Names of the regression coefficients
|
57
|
+
- coef: Estimated regression coefficients
|
58
|
+
- coef_critical: Critical value for the regression coefficients
|
59
|
+
|
60
|
+
Args:
|
61
|
+
X (pd.DataFrame): DataFrame containing the independent variables.
|
62
|
+
y (pd.Series): Series containing the dependent variable.
|
63
|
+
alternative (str): The alternative hypothesis. Either "two-sided", "greater", or "less". Default is "two-sided".
|
64
|
+
confidence (float): Confidence level between 0 and 1 (exclusive). Default is 0.95.
|
65
|
+
variant (str): The statistical test variant. Either "ttest" or "ztest". Default is "ttest".
|
66
|
+
**kwargs: Additional keyword arguments to pass to pingouin.linear_regression.
|
67
|
+
|
68
|
+
Returns:
|
69
|
+
pd.DataFrame: A DataFrame containing critical effect size values.
|
70
|
+
|
71
|
+
Raises:
|
72
|
+
ValueError: If variant is not one of "ttest" or "ztest".
|
73
|
+
"""
|
74
|
+
if variant not in ["ttest", "ztest"]:
|
75
|
+
raise ValueError("variant must be one of 'ttest' or 'ztest'")
|
76
|
+
|
77
|
+
alpha = utils.get_alpha(confidence, alternative)
|
78
|
+
|
79
|
+
model = pingouin.linear_regression(X=X, y=y, alpha=alpha, **kwargs)
|
80
|
+
coef = model["coef"].values
|
81
|
+
|
82
|
+
coef_critical = critical_for_linear_regression_se_coefficients(
|
83
|
+
se_coefficients=model["se"].values,
|
84
|
+
dof=model.df_resid_,
|
85
|
+
confidence=confidence,
|
86
|
+
alternative=alternative,
|
87
|
+
variant=variant,
|
88
|
+
)
|
89
|
+
|
90
|
+
return pd.DataFrame(
|
91
|
+
{
|
92
|
+
"names": model["names"].values,
|
93
|
+
"coef": coef,
|
94
|
+
"coef_critical": coef_critical,
|
95
|
+
},
|
96
|
+
index=list(range(len(coef))),
|
97
|
+
)
|
@@ -0,0 +1,275 @@
|
|
1
|
+
from typing import Union
|
2
|
+
|
3
|
+
import numpy as np
|
4
|
+
import pandas as pd
|
5
|
+
import pingouin
|
6
|
+
from numpy.typing import ArrayLike
|
7
|
+
from scipy import stats
|
8
|
+
|
9
|
+
from critical_es_value import utils
|
10
|
+
|
11
|
+
|
12
|
+
def determine_welch_correction(correction: Union[bool, str], n1: int, n2: int) -> bool:
|
13
|
+
"""Determine whether to apply Welch's correction for unequal variances.
|
14
|
+
|
15
|
+
Args:
|
16
|
+
correction (bool or str): If True, always apply Welch's correction.
|
17
|
+
If False, never apply Welch's correction.
|
18
|
+
If "auto", apply Welch's correction only if sample sizes are unequal.
|
19
|
+
n1 (int): Sample size of group 1.
|
20
|
+
n2 (int): Sample size of group 2.
|
21
|
+
|
22
|
+
Returns:
|
23
|
+
bool: Whether to apply Welch's correction.
|
24
|
+
|
25
|
+
Raises:
|
26
|
+
ValueError: If `correction` is not one of True, False, or "auto".
|
27
|
+
"""
|
28
|
+
if correction not in (True, False, "auto"):
|
29
|
+
raise ValueError("correction must be one of True, False, or 'auto'")
|
30
|
+
if correction is True or (correction == "auto" and n1 != n2):
|
31
|
+
return True
|
32
|
+
return False
|
33
|
+
|
34
|
+
|
35
|
+
def critical_for_one_sample_ttest(
|
36
|
+
x: ArrayLike,
|
37
|
+
alternative: str = "two-sided",
|
38
|
+
confidence: float = 0.95,
|
39
|
+
) -> pd.DataFrame:
|
40
|
+
"""Calculate critical effect size values for a one-sample t-test.
|
41
|
+
|
42
|
+
Returns a DataFrame with the following columns:
|
43
|
+
- T: t-value of the test statistic
|
44
|
+
- dof: Degrees of freedom
|
45
|
+
- T_critical: Critical t-value
|
46
|
+
- d: Cohen's d
|
47
|
+
- d_critical: Critical value for Cohen's d
|
48
|
+
- b_critical: Critical value for the raw mean difference
|
49
|
+
- g: Hedges' g
|
50
|
+
- g_critical: Critical value for Hedges' g
|
51
|
+
|
52
|
+
Args:
|
53
|
+
x (ArrayLike): Sample data.
|
54
|
+
alternative (str): The alternative hypothesis. Either "two-sided", "greater", or "less". Default is "two-sided".
|
55
|
+
confidence (float): Confidence level between 0 and 1 (exclusive). Default is 0.95.
|
56
|
+
|
57
|
+
Returns:
|
58
|
+
pd.DataFrame: A DataFrame containing critical effect size values.
|
59
|
+
"""
|
60
|
+
|
61
|
+
t_test_result = pingouin.ttest(
|
62
|
+
x=x,
|
63
|
+
y=0,
|
64
|
+
paired=False,
|
65
|
+
alternative=alternative,
|
66
|
+
correction=False,
|
67
|
+
confidence=confidence,
|
68
|
+
).iloc[0]
|
69
|
+
|
70
|
+
alpha = utils.get_alpha(confidence, alternative)
|
71
|
+
dof = t_test_result.dof
|
72
|
+
|
73
|
+
n = len(x)
|
74
|
+
factor = np.sqrt(1 / n)
|
75
|
+
|
76
|
+
t = t_test_result["T"]
|
77
|
+
d = t * factor
|
78
|
+
|
79
|
+
tc = np.abs(stats.t.ppf(alpha, dof))
|
80
|
+
dc = tc * factor
|
81
|
+
|
82
|
+
j = utils.get_bias_correction_factor_J(dof)
|
83
|
+
|
84
|
+
return pd.DataFrame(
|
85
|
+
[
|
86
|
+
{
|
87
|
+
"T": t,
|
88
|
+
"dof": dof,
|
89
|
+
"T_critical": tc,
|
90
|
+
"d": d,
|
91
|
+
"d_critical": dc,
|
92
|
+
"b_critical": tc * np.std(x, ddof=1) / np.sqrt(n),
|
93
|
+
"g": d * j,
|
94
|
+
"g_critical": dc * j,
|
95
|
+
}
|
96
|
+
],
|
97
|
+
index=["critical"],
|
98
|
+
)
|
99
|
+
|
100
|
+
|
101
|
+
def _critical_for_two_sample_ttest_paired(
|
102
|
+
x: ArrayLike,
|
103
|
+
y: ArrayLike,
|
104
|
+
alternative: str = "two-sided",
|
105
|
+
confidence: float = 0.95,
|
106
|
+
) -> pd.DataFrame:
|
107
|
+
"""Calculate critical effect size values for a PAIRED two-sample t-test.
|
108
|
+
|
109
|
+
Returns a DataFrame with the following columns:
|
110
|
+
- T: t-value of the test statistic
|
111
|
+
- dof: Degrees of freedom
|
112
|
+
- T_critical: Critical t-value
|
113
|
+
- d: Cohen's d
|
114
|
+
- d_critical: Critical value for Cohen's d
|
115
|
+
- b_critical: Critical value for the raw mean difference
|
116
|
+
- g: Hedges' g
|
117
|
+
- g_critical: Critical value for Hedges' g
|
118
|
+
- dz: Cohen's dz
|
119
|
+
- dz_critical: Critical value for Cohen's dz
|
120
|
+
- gz: Hedges' gz
|
121
|
+
- gz_critical: Critical value for Hedges' gz
|
122
|
+
|
123
|
+
Args:
|
124
|
+
x (ArrayLike): Sample data for group 1.
|
125
|
+
y (ArrayLike): Sample data for group 2.
|
126
|
+
alternative (str): The alternative hypothesis. Either "two-sided", "greater", or "less". Default is "two-sided".
|
127
|
+
confidence (float): Confidence level between 0 and 1 (exclusive). Default is 0.95.
|
128
|
+
|
129
|
+
Returns:
|
130
|
+
pd.DataFrame: A DataFrame containing critical effect size values.
|
131
|
+
"""
|
132
|
+
|
133
|
+
if len(x) != len(y):
|
134
|
+
raise ValueError("For paired tests, x and y must have the same length.")
|
135
|
+
|
136
|
+
t_test_result = pingouin.ttest(
|
137
|
+
x=x,
|
138
|
+
y=y,
|
139
|
+
paired=True,
|
140
|
+
alternative=alternative,
|
141
|
+
correction=False,
|
142
|
+
confidence=confidence,
|
143
|
+
).iloc[0]
|
144
|
+
|
145
|
+
alpha = utils.get_alpha(confidence, alternative)
|
146
|
+
dof = t_test_result.dof
|
147
|
+
n = len(x)
|
148
|
+
|
149
|
+
r12 = np.corrcoef(x, y)[0, 1]
|
150
|
+
factor1 = np.sqrt(1 / n)
|
151
|
+
factor2 = np.sqrt(2 * (1 - r12))
|
152
|
+
|
153
|
+
t = t_test_result["T"]
|
154
|
+
dz = t * factor1
|
155
|
+
d = dz * factor2
|
156
|
+
|
157
|
+
tc = np.abs(stats.t.ppf(alpha, dof))
|
158
|
+
dzc = tc * factor1
|
159
|
+
dc = dzc * factor2
|
160
|
+
|
161
|
+
j = utils.get_bias_correction_factor_J(dof)
|
162
|
+
|
163
|
+
return pd.DataFrame(
|
164
|
+
[
|
165
|
+
{
|
166
|
+
"T": t,
|
167
|
+
"dof": dof,
|
168
|
+
"T_critical": tc,
|
169
|
+
"d": d,
|
170
|
+
"d_critical": dc,
|
171
|
+
"b_critical": tc * np.std(x - y, ddof=1) / np.sqrt(n),
|
172
|
+
"g": d * j,
|
173
|
+
"g_critical": dc * j,
|
174
|
+
"dz": dz,
|
175
|
+
"dz_critical": dzc,
|
176
|
+
"gz": dz * j,
|
177
|
+
"gz_critical": dzc * j,
|
178
|
+
}
|
179
|
+
],
|
180
|
+
index=["critical"],
|
181
|
+
)
|
182
|
+
|
183
|
+
|
184
|
+
def critical_for_two_sample_ttest(
|
185
|
+
x: ArrayLike,
|
186
|
+
y: ArrayLike,
|
187
|
+
paired: bool = False,
|
188
|
+
correction: Union[bool, str] = "auto",
|
189
|
+
alternative: str = "two-sided",
|
190
|
+
confidence: float = 0.95,
|
191
|
+
) -> pd.DataFrame:
|
192
|
+
"""Calculate critical effect size values for a paired or an unpaired two-sample t-test.
|
193
|
+
|
194
|
+
Returns a DataFrame with the following columns:
|
195
|
+
- T: t-value of the test statistic
|
196
|
+
- dof: Degrees of freedom
|
197
|
+
- T_critical: Critical t-value
|
198
|
+
- d: Cohen's d
|
199
|
+
- d_critical: Critical value for Cohen's d
|
200
|
+
- b_critical: Critical value for the raw mean difference
|
201
|
+
- g: Hedges' g
|
202
|
+
- g_critical: Critical value for Hedges' g
|
203
|
+
|
204
|
+
Args:
|
205
|
+
x (ArrayLike): Sample data for group 1.
|
206
|
+
y (ArrayLike): Sample data for group 2.
|
207
|
+
paired (bool): Whether the samples are paired. Default is False.
|
208
|
+
correction (bool): For unpaired two sample T-tests, specify whether or not to correct for unequal variances
|
209
|
+
using Welch separate variances T-test. If "auto", it will automatically uses Welch T-test when the sample
|
210
|
+
sizes are unequal. For paired T-tests, this parameter is ignored and no correction is performed. Default
|
211
|
+
is "auto".
|
212
|
+
alternative (str): The alternative hypothesis. Either "two-sided", "greater", or "less". Default is "two-sided".
|
213
|
+
confidence (float): Confidence level between 0 and 1 (exclusive). Default is 0.95.
|
214
|
+
|
215
|
+
Returns:
|
216
|
+
pd.DataFrame: A DataFrame containing critical effect size values.
|
217
|
+
"""
|
218
|
+
|
219
|
+
if paired:
|
220
|
+
return _critical_for_two_sample_ttest_paired(
|
221
|
+
x=x,
|
222
|
+
y=y,
|
223
|
+
alternative=alternative,
|
224
|
+
confidence=confidence,
|
225
|
+
)
|
226
|
+
|
227
|
+
n1 = len(x)
|
228
|
+
n2 = len(y)
|
229
|
+
correction = determine_welch_correction(correction, n1=n1, n2=n2)
|
230
|
+
|
231
|
+
t_test_result = pingouin.ttest(
|
232
|
+
x=x,
|
233
|
+
y=y,
|
234
|
+
paired=paired,
|
235
|
+
alternative=alternative,
|
236
|
+
correction=correction,
|
237
|
+
confidence=confidence,
|
238
|
+
).iloc[0]
|
239
|
+
|
240
|
+
alpha = utils.get_alpha(confidence, alternative)
|
241
|
+
dof = t_test_result.dof
|
242
|
+
|
243
|
+
factor = np.sqrt(1 / n1 + 1 / n2)
|
244
|
+
|
245
|
+
t = t_test_result["T"]
|
246
|
+
d = t * factor
|
247
|
+
|
248
|
+
tc = np.abs(stats.t.ppf(alpha, dof))
|
249
|
+
dc = tc * factor
|
250
|
+
|
251
|
+
s1 = np.std(x, ddof=1)
|
252
|
+
s2 = np.std(y, ddof=1)
|
253
|
+
|
254
|
+
if correction:
|
255
|
+
se = np.sqrt((s1**2 / n1) + (s2**2 / n2))
|
256
|
+
else:
|
257
|
+
se = np.sqrt((s1**2 * (n1 - 1) + s2**2 * (n2 - 1)) / (n1 + n2 - 2)) * factor
|
258
|
+
|
259
|
+
j = utils.get_bias_correction_factor_J(dof)
|
260
|
+
|
261
|
+
return pd.DataFrame(
|
262
|
+
[
|
263
|
+
{
|
264
|
+
"T": t,
|
265
|
+
"dof": dof,
|
266
|
+
"T_critical": tc,
|
267
|
+
"d": d,
|
268
|
+
"d_critical": dc,
|
269
|
+
"b_critical": tc * se,
|
270
|
+
"g": d * j,
|
271
|
+
"g_critical": dc * j,
|
272
|
+
}
|
273
|
+
],
|
274
|
+
index=["critical"],
|
275
|
+
)
|
@@ -0,0 +1,58 @@
|
|
1
|
+
import numpy as np
|
2
|
+
from scipy import special as scipy_special
|
3
|
+
|
4
|
+
|
5
|
+
def get_alpha(confidence: float, alternative: str) -> float:
|
6
|
+
"""Calculate the significance level (alpha) corresponding to a given confidence level.
|
7
|
+
|
8
|
+
Args:
|
9
|
+
confidence (float): Confidence level between 0 and 1 (exclusive).
|
10
|
+
alternative (str): The alternative hypothesis. Either "two-sided", "greater", or "less".
|
11
|
+
|
12
|
+
Returns:
|
13
|
+
float: The significance level (alpha).
|
14
|
+
|
15
|
+
Raises:
|
16
|
+
ValueError: If `confidence` is not in (0, 1)
|
17
|
+
ValueError: If `alternative` is not one of "two-sided", "greater", or "less".
|
18
|
+
|
19
|
+
Examples:
|
20
|
+
>>> get_alpha(0.95, "two-sided")
|
21
|
+
0.025
|
22
|
+
>>> get_alpha(0.95, "less")
|
23
|
+
0.05
|
24
|
+
"""
|
25
|
+
|
26
|
+
if confidence <= 0 or confidence >= 1:
|
27
|
+
raise ValueError("confidence must be in (0, 1)")
|
28
|
+
if alternative not in ("two-sided", "greater", "less"):
|
29
|
+
raise ValueError("alternative must be one of 'two-sided', 'greater', or 'less'")
|
30
|
+
|
31
|
+
alpha = 1 - confidence
|
32
|
+
|
33
|
+
if alternative == "two-sided":
|
34
|
+
return alpha / 2
|
35
|
+
return alpha
|
36
|
+
|
37
|
+
|
38
|
+
def get_bias_correction_factor_J(dof: int) -> np.float64:
|
39
|
+
"""Calculate the bias correction factor J for Hedges' g.
|
40
|
+
|
41
|
+
Args:
|
42
|
+
dof (int): Degrees of freedom.
|
43
|
+
|
44
|
+
Returns:
|
45
|
+
np.float64: The bias correction factor J.
|
46
|
+
|
47
|
+
Examples:
|
48
|
+
>>> get_bias_correction_factor_J(10)
|
49
|
+
0.92274560805
|
50
|
+
>>> get_bias_correction_factor_J(20)
|
51
|
+
0.96194453374
|
52
|
+
"""
|
53
|
+
if dof <= 1:
|
54
|
+
raise ValueError("dof must be greater than 1.")
|
55
|
+
|
56
|
+
num = scipy_special.loggamma(dof / 2)
|
57
|
+
denom = np.log(np.sqrt(dof / 2)) + scipy_special.loggamma((dof - 1) / 2)
|
58
|
+
return np.exp(num - denom)
|
@@ -0,0 +1,112 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: critical-es-value
|
3
|
+
Version: 0.1.0
|
4
|
+
Summary: Calculate critical effect size values.
|
5
|
+
Author-email: Florian Rohrer <semicolonator@gmail.com>
|
6
|
+
License-File: LICENSE
|
7
|
+
Requires-Python: >=3.13
|
8
|
+
Requires-Dist: mpmath>=1.3.0
|
9
|
+
Requires-Dist: pandas>=2.3.3
|
10
|
+
Requires-Dist: pingouin>=0.5.5
|
11
|
+
Description-Content-Type: text/markdown
|
12
|
+
|
13
|
+
# critical_es_value
|
14
|
+
|
15
|
+
Calculate critical effect size values for t-Tests, correlation tests and linear regression coefficients.
|
16
|
+
|
17
|
+
## Usage
|
18
|
+
|
19
|
+
```python
|
20
|
+
import numpy as np
|
21
|
+
import pingouin as pg
|
22
|
+
|
23
|
+
from critical_es_value import (
|
24
|
+
critical_for_one_sample_ttest,
|
25
|
+
critical_for_two_sample_ttest,
|
26
|
+
critical_for_correlation_test,
|
27
|
+
critical_for_linear_regression,
|
28
|
+
)
|
29
|
+
|
30
|
+
np.random.seed(123)
|
31
|
+
mean, cov, n = [4, 5], [(1, .6), (.6, 1)], 30
|
32
|
+
x, y = np.random.multivariate_normal(mean, cov, n).T
|
33
|
+
```
|
34
|
+
|
35
|
+
### t-Test
|
36
|
+
|
37
|
+
|
38
|
+
```python
|
39
|
+
pg.ttest(x, 0)
|
40
|
+
critical_for_one_sample_ttest(x)
|
41
|
+
```
|
42
|
+
|
43
|
+
| | T | dof | alternative | p-val | CI95% | cohen-d | BF10 | power |
|
44
|
+
|:-------|--------:|------:|:--------------|------------:|:------------|----------:|----------:|--------:|
|
45
|
+
| T-test | 16.0765 | 29 | two-sided | 5.54732e-16 | [3.37 4.35] | 2.93515 | 1.031e+13 | nan |
|
46
|
+
|
47
|
+
| | T | dof | T_critical | d | d_critical | b_critical | g | g_critical |
|
48
|
+
|:---------|--------:|------:|-------------:|--------:|-------------:|-------------:|--------:|-------------:|
|
49
|
+
| critical | 16.0765 | 29 | 2.04523 | 2.93515 | 0.373406 | 0.491162 | 2.85847 | 0.363651 |
|
50
|
+
|
51
|
+
```python
|
52
|
+
pg.ttest(x, y)
|
53
|
+
critical_for_two_sample_ttest(x, y)
|
54
|
+
```
|
55
|
+
|
56
|
+
| | T | dof | alternative | p-val | CI95% | cohen-d | BF10 | power |
|
57
|
+
|:-------|---------:|------:|:--------------|----------:|:--------------|----------:|-------:|---------:|
|
58
|
+
| T-test | -3.40071 | 58 | two-sided | 0.0012224 | [-1.68 -0.43] | 0.878059 | 26.155 | 0.916807 |
|
59
|
+
|
60
|
+
|
61
|
+
| | T | dof | T_critical | d | d_critical | b_critical | g | g_critical |
|
62
|
+
|:---------|---------:|------:|-------------:|----------:|-------------:|-------------:|----------:|-------------:|
|
63
|
+
| critical | -3.40071 | 58 | 2.00172 | -0.878059 | 0.516841 | 0.62077 | -0.866647 | 0.510124 |
|
64
|
+
|
65
|
+
|
66
|
+
### Correlation Test
|
67
|
+
|
68
|
+
```python
|
69
|
+
pg.corr(x, y)
|
70
|
+
critical_for_correlation_test(x, y)
|
71
|
+
```
|
72
|
+
|
73
|
+
| | n | r | CI95% | p-val | BF10 | power |
|
74
|
+
|:--------|----:|---------:|:------------|-----------:|-------:|---------:|
|
75
|
+
| pearson | 30 | 0.594785 | [0.3 0.79] | 0.00052736 | 69.723 | 0.950373 |
|
76
|
+
|
77
|
+
| | n | r | dof | r_critical | se_r | se_r_critical |
|
78
|
+
|:---------|----:|---------:|------:|-------------:|--------:|----------------:|
|
79
|
+
| critical | 30 | 0.594785 | 28 | 0.361007 | 0.15192 | 0.176238 |
|
80
|
+
|
81
|
+
|
82
|
+
### Linear Regression
|
83
|
+
|
84
|
+
```python
|
85
|
+
import pandas as pd
|
86
|
+
|
87
|
+
np.random.seed(123)
|
88
|
+
data = pd.DataFrame({"X": x, "Y": y, "Z": np.random.normal(5, 1, 30)})
|
89
|
+
|
90
|
+
pg.linear_regression(data[["X", "Z"]], data["Y"])
|
91
|
+
critical_for_linear_regression(data[["X", "Z"]], data["Y"])
|
92
|
+
```
|
93
|
+
|
94
|
+
| | names | coef | se | T | pval | r2 | adj_r2 | CI[2.5%] | CI[97.5%] |
|
95
|
+
|---:|:----------|-----------:|---------:|----------:|------------:|---------:|---------:|-----------:|------------:|
|
96
|
+
| 0 | Intercept | 3.15799 | 0.844129 | 3.74112 | 0.000874245 | 0.354522 | 0.306709 | 1.42598 | 4.88999 |
|
97
|
+
| 1 | X | 0.487772 | 0.126736 | 3.84871 | 0.000659501 | 0.354522 | 0.306709 | 0.22773 | 0.747814 |
|
98
|
+
| 2 | Z | -0.0249309 | 0.140417 | -0.177548 | 0.860403 | 0.354522 | 0.306709 | -0.313044 | 0.263182 |
|
99
|
+
|
100
|
+
| | names | coef | coef_critical |
|
101
|
+
|---:|:----------|-----------:|----------------:|
|
102
|
+
| 0 | Intercept | 3.15799 | 1.73201 |
|
103
|
+
| 1 | X | 0.487772 | 0.260042 |
|
104
|
+
| 2 | Z | -0.0249309 | 0.288113 |
|
105
|
+
|
106
|
+
|
107
|
+
## Resources
|
108
|
+
|
109
|
+
* [R package](https://psicostat.github.io/criticalESvalue/index.html)
|
110
|
+
* [Original paper](https://journals.sagepub.com/doi/10.1177/25152459251335298?icid=int.sj-full-text.similar-articles.5)
|
111
|
+
> Perugini, A., Gambarota, F., Toffalini, E., Lakens, D., Pastore, M., Finos, L., ... & Altoè, G. (2025). The Benefits of Reporting Critical-Effect-Size Values. Advances in Methods and Practices in Psychological Science, 8(2), 25152459251335298.
|
112
|
+
|
@@ -0,0 +1,9 @@
|
|
1
|
+
critical_es_value/__init__.py,sha256=PK8lNkZdPVOc6QU17x72Cwe0QyVqRA8s82dbXl2i5og,329
|
2
|
+
critical_es_value/corrtest.py,sha256=YDOFh93RBaLgt09hIkbQm0nEWEhbMSUPiydEqH_cRaU,2462
|
3
|
+
critical_es_value/linreg.py,sha256=YF_PQTLUfqm_yJTKplqDMlom8xCqKZUUhh9_9L0Ui-k,3301
|
4
|
+
critical_es_value/ttest.py,sha256=AeHmqVwwjZTrszFSLLxXRlBpNpqtBtd4ulzAdexjiRA,8128
|
5
|
+
critical_es_value/utils.py,sha256=mtpc_nQIVG1NQWrzlZbb7Z-CtktqQsdLvcXtIK5AuZc,1710
|
6
|
+
critical_es_value-0.1.0.dist-info/METADATA,sha256=IHpPNvc9AgnCSYb1zix9toLstKru5ia_RKd1AxXCaKw,4414
|
7
|
+
critical_es_value-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
8
|
+
critical_es_value-0.1.0.dist-info/licenses/LICENSE,sha256=4t8Z4NCsOSXi1k1chjuKznC1BHPZp2bTXHH_qCDF7no,1071
|
9
|
+
critical_es_value-0.1.0.dist-info/RECORD,,
|
@@ -0,0 +1,21 @@
|
|
1
|
+
MIT License
|
2
|
+
|
3
|
+
Copyright (c) 2025 Florian Rohrer
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
13
|
+
copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21
|
+
SOFTWARE.
|