deepbox 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +344 -0
- package/dist/CSRMatrix-CwGwQRea.d.cts +219 -0
- package/dist/CSRMatrix-KzNt6QpS.d.ts +219 -0
- package/dist/Tensor-BQLk1ltW.d.cts +147 -0
- package/dist/Tensor-g8mUClel.d.ts +147 -0
- package/dist/chunk-4S73VUBD.js +677 -0
- package/dist/chunk-4S73VUBD.js.map +1 -0
- package/dist/chunk-5R4S63PF.js +2925 -0
- package/dist/chunk-5R4S63PF.js.map +1 -0
- package/dist/chunk-6AE5FKKQ.cjs +9264 -0
- package/dist/chunk-6AE5FKKQ.cjs.map +1 -0
- package/dist/chunk-AD436M45.js +3854 -0
- package/dist/chunk-AD436M45.js.map +1 -0
- package/dist/chunk-ALS7ETWZ.cjs +4263 -0
- package/dist/chunk-ALS7ETWZ.cjs.map +1 -0
- package/dist/chunk-AU7XHGKJ.js +2092 -0
- package/dist/chunk-AU7XHGKJ.js.map +1 -0
- package/dist/chunk-B5TNKUEY.js +1481 -0
- package/dist/chunk-B5TNKUEY.js.map +1 -0
- package/dist/chunk-BCR7G3A6.js +9136 -0
- package/dist/chunk-BCR7G3A6.js.map +1 -0
- package/dist/chunk-C4PKXY74.cjs +1917 -0
- package/dist/chunk-C4PKXY74.cjs.map +1 -0
- package/dist/chunk-DWZY6PIP.cjs +6400 -0
- package/dist/chunk-DWZY6PIP.cjs.map +1 -0
- package/dist/chunk-E3EU5FZO.cjs +2113 -0
- package/dist/chunk-E3EU5FZO.cjs.map +1 -0
- package/dist/chunk-F3JWBINJ.js +1054 -0
- package/dist/chunk-F3JWBINJ.js.map +1 -0
- package/dist/chunk-FJYLIGJX.js +1940 -0
- package/dist/chunk-FJYLIGJX.js.map +1 -0
- package/dist/chunk-JSCDE774.cjs +729 -0
- package/dist/chunk-JSCDE774.cjs.map +1 -0
- package/dist/chunk-LWECRCW2.cjs +2412 -0
- package/dist/chunk-LWECRCW2.cjs.map +1 -0
- package/dist/chunk-MLBMYKCG.js +6379 -0
- package/dist/chunk-MLBMYKCG.js.map +1 -0
- package/dist/chunk-OX6QXFMV.cjs +3874 -0
- package/dist/chunk-OX6QXFMV.cjs.map +1 -0
- package/dist/chunk-PHV2DKRS.cjs +1072 -0
- package/dist/chunk-PHV2DKRS.cjs.map +1 -0
- package/dist/chunk-PL7TAYKI.js +4056 -0
- package/dist/chunk-PL7TAYKI.js.map +1 -0
- package/dist/chunk-PR647I7R.js +1898 -0
- package/dist/chunk-PR647I7R.js.map +1 -0
- package/dist/chunk-QERHVCHC.cjs +2960 -0
- package/dist/chunk-QERHVCHC.cjs.map +1 -0
- package/dist/chunk-XEG44RF6.cjs +1514 -0
- package/dist/chunk-XEG44RF6.cjs.map +1 -0
- package/dist/chunk-XMWVME2W.js +2377 -0
- package/dist/chunk-XMWVME2W.js.map +1 -0
- package/dist/chunk-ZB75FESB.cjs +1979 -0
- package/dist/chunk-ZB75FESB.cjs.map +1 -0
- package/dist/chunk-ZLW62TJG.cjs +4061 -0
- package/dist/chunk-ZLW62TJG.cjs.map +1 -0
- package/dist/chunk-ZXKBDFP3.js +4235 -0
- package/dist/chunk-ZXKBDFP3.js.map +1 -0
- package/dist/core/index.cjs +204 -0
- package/dist/core/index.cjs.map +1 -0
- package/dist/core/index.d.cts +2 -0
- package/dist/core/index.d.ts +2 -0
- package/dist/core/index.js +3 -0
- package/dist/core/index.js.map +1 -0
- package/dist/dataframe/index.cjs +22 -0
- package/dist/dataframe/index.cjs.map +1 -0
- package/dist/dataframe/index.d.cts +3 -0
- package/dist/dataframe/index.d.ts +3 -0
- package/dist/dataframe/index.js +5 -0
- package/dist/dataframe/index.js.map +1 -0
- package/dist/datasets/index.cjs +134 -0
- package/dist/datasets/index.cjs.map +1 -0
- package/dist/datasets/index.d.cts +3 -0
- package/dist/datasets/index.d.ts +3 -0
- package/dist/datasets/index.js +5 -0
- package/dist/datasets/index.js.map +1 -0
- package/dist/index-74AB8Cyh.d.cts +1126 -0
- package/dist/index-9oQx1HgV.d.cts +1180 -0
- package/dist/index-BJY2SI4i.d.ts +483 -0
- package/dist/index-BWGhrDlr.d.ts +733 -0
- package/dist/index-B_DK4FKY.d.cts +242 -0
- package/dist/index-BbA2Gxfl.d.ts +456 -0
- package/dist/index-BgHYAoSS.d.cts +837 -0
- package/dist/index-BndMbqsM.d.ts +1439 -0
- package/dist/index-C1mfVYoo.d.ts +2517 -0
- package/dist/index-CCvlwAmL.d.cts +809 -0
- package/dist/index-CDw5CnOU.d.ts +785 -0
- package/dist/index-Cn3SdB0O.d.ts +1126 -0
- package/dist/index-CrqLlS-a.d.ts +776 -0
- package/dist/index-D61yaSMY.d.cts +483 -0
- package/dist/index-D9Loo1_A.d.cts +2517 -0
- package/dist/index-DIT_OO9C.d.cts +785 -0
- package/dist/index-DIp_RrRt.d.ts +242 -0
- package/dist/index-DbultU6X.d.cts +1427 -0
- package/dist/index-DmEg_LCm.d.cts +776 -0
- package/dist/index-DoPWVxPo.d.cts +1439 -0
- package/dist/index-DuCxd-8d.d.ts +837 -0
- package/dist/index-Dx42TZaY.d.ts +809 -0
- package/dist/index-DyZ4QQf5.d.cts +456 -0
- package/dist/index-GFAVyOWO.d.ts +1427 -0
- package/dist/index-WHQLn0e8.d.cts +733 -0
- package/dist/index-ZtI1Iy4L.d.ts +1180 -0
- package/dist/index-eJgeni9c.d.cts +1911 -0
- package/dist/index-tk4lSYod.d.ts +1911 -0
- package/dist/index.cjs +72 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +17 -0
- package/dist/index.d.ts +17 -0
- package/dist/index.js +15 -0
- package/dist/index.js.map +1 -0
- package/dist/linalg/index.cjs +86 -0
- package/dist/linalg/index.cjs.map +1 -0
- package/dist/linalg/index.d.cts +3 -0
- package/dist/linalg/index.d.ts +3 -0
- package/dist/linalg/index.js +5 -0
- package/dist/linalg/index.js.map +1 -0
- package/dist/metrics/index.cjs +158 -0
- package/dist/metrics/index.cjs.map +1 -0
- package/dist/metrics/index.d.cts +3 -0
- package/dist/metrics/index.d.ts +3 -0
- package/dist/metrics/index.js +5 -0
- package/dist/metrics/index.js.map +1 -0
- package/dist/ml/index.cjs +87 -0
- package/dist/ml/index.cjs.map +1 -0
- package/dist/ml/index.d.cts +3 -0
- package/dist/ml/index.d.ts +3 -0
- package/dist/ml/index.js +6 -0
- package/dist/ml/index.js.map +1 -0
- package/dist/ndarray/index.cjs +501 -0
- package/dist/ndarray/index.cjs.map +1 -0
- package/dist/ndarray/index.d.cts +5 -0
- package/dist/ndarray/index.d.ts +5 -0
- package/dist/ndarray/index.js +4 -0
- package/dist/ndarray/index.js.map +1 -0
- package/dist/nn/index.cjs +142 -0
- package/dist/nn/index.cjs.map +1 -0
- package/dist/nn/index.d.cts +6 -0
- package/dist/nn/index.d.ts +6 -0
- package/dist/nn/index.js +5 -0
- package/dist/nn/index.js.map +1 -0
- package/dist/optim/index.cjs +77 -0
- package/dist/optim/index.cjs.map +1 -0
- package/dist/optim/index.d.cts +4 -0
- package/dist/optim/index.d.ts +4 -0
- package/dist/optim/index.js +4 -0
- package/dist/optim/index.js.map +1 -0
- package/dist/plot/index.cjs +114 -0
- package/dist/plot/index.cjs.map +1 -0
- package/dist/plot/index.d.cts +6 -0
- package/dist/plot/index.d.ts +6 -0
- package/dist/plot/index.js +5 -0
- package/dist/plot/index.js.map +1 -0
- package/dist/preprocess/index.cjs +82 -0
- package/dist/preprocess/index.cjs.map +1 -0
- package/dist/preprocess/index.d.cts +4 -0
- package/dist/preprocess/index.d.ts +4 -0
- package/dist/preprocess/index.js +5 -0
- package/dist/preprocess/index.js.map +1 -0
- package/dist/random/index.cjs +74 -0
- package/dist/random/index.cjs.map +1 -0
- package/dist/random/index.d.cts +3 -0
- package/dist/random/index.d.ts +3 -0
- package/dist/random/index.js +5 -0
- package/dist/random/index.js.map +1 -0
- package/dist/stats/index.cjs +142 -0
- package/dist/stats/index.cjs.map +1 -0
- package/dist/stats/index.d.cts +3 -0
- package/dist/stats/index.d.ts +3 -0
- package/dist/stats/index.js +5 -0
- package/dist/stats/index.js.map +1 -0
- package/dist/tensor-B96jjJLQ.d.cts +205 -0
- package/dist/tensor-B96jjJLQ.d.ts +205 -0
- package/package.json +226 -0
|
@@ -0,0 +1,733 @@
|
|
|
1
|
+
import { T as Tensor } from './Tensor-g8mUClel.js';
|
|
2
|
+
import { A as Axis } from './tensor-B96jjJLQ.js';
|
|
3
|
+
|
|
4
|
+
/**
|
|
5
|
+
* Pearson correlation coefficient.
|
|
6
|
+
*
|
|
7
|
+
* Measures linear correlation between two variables.
|
|
8
|
+
*
|
|
9
|
+
* @param x - First tensor
|
|
10
|
+
* @param y - Second tensor (must have same size as x)
|
|
11
|
+
* @returns Tuple of [correlation coefficient in [-1, 1], two-tailed p-value]
|
|
12
|
+
* @throws {InvalidParameterError} If tensors have different sizes, < 2 samples, or constant input
|
|
13
|
+
*
|
|
14
|
+
* @example
|
|
15
|
+
* ```ts
|
|
16
|
+
* const x = tensor([1, 2, 3, 4, 5]);
|
|
17
|
+
* const y = tensor([2, 4, 6, 8, 10]);
|
|
18
|
+
* const [r, p] = pearsonr(x, y); // r = 1.0 (perfect linear)
|
|
19
|
+
* ```
|
|
20
|
+
*
|
|
21
|
+
* @remarks
|
|
22
|
+
* This function follows IEEE 754 semantics for special values:
|
|
23
|
+
* - NaN inputs propagate to NaN correlation
|
|
24
|
+
* - Infinity inputs result in NaN correlation
|
|
25
|
+
*
|
|
26
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html | SciPy stats.pearsonr}
|
|
27
|
+
*/
|
|
28
|
+
declare function pearsonr(x: Tensor, y: Tensor): [number, number];
|
|
29
|
+
/**
|
|
30
|
+
* Computes Spearman's rank correlation coefficient.
|
|
31
|
+
*
|
|
32
|
+
* Non-parametric measure of monotonic relationship between two variables.
|
|
33
|
+
* Computed as Pearson correlation of rank values.
|
|
34
|
+
* - ρ = 1: Perfect monotonic increasing relationship
|
|
35
|
+
* - ρ = 0: No monotonic relationship
|
|
36
|
+
* - ρ = -1: Perfect monotonic decreasing relationship
|
|
37
|
+
*
|
|
38
|
+
* @param x - First tensor
|
|
39
|
+
* @param y - Second tensor (must have same size as x)
|
|
40
|
+
* @returns Tuple of [correlation coefficient, p-value]
|
|
41
|
+
* @throws {InvalidParameterError} If tensors have different sizes, < 2 samples, or constant input
|
|
42
|
+
*
|
|
43
|
+
* @example
|
|
44
|
+
* ```ts
|
|
45
|
+
* const x = tensor([1, 2, 3, 4, 5]);
|
|
46
|
+
* const y = tensor([2, 4, 6, 8, 10]);
|
|
47
|
+
* const [rho, p] = spearmanr(x, y); // rho = 1.0 (perfect monotonic)
|
|
48
|
+
* ```
|
|
49
|
+
*
|
|
50
|
+
* @remarks
|
|
51
|
+
* Ties are assigned average ranks.
|
|
52
|
+
* NaN values are ranked according to JavaScript sort behavior.
|
|
53
|
+
* Infinity values are sorted naturally (±Infinity at extremes).
|
|
54
|
+
*
|
|
55
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html | SciPy stats.spearmanr}
|
|
56
|
+
*/
|
|
57
|
+
declare function spearmanr(x: Tensor, y: Tensor): [number, number];
|
|
58
|
+
/**
|
|
59
|
+
* Computes Kendall's tau correlation coefficient.
|
|
60
|
+
*
|
|
61
|
+
* Non-parametric measure of ordinal association based on concordant/discordant pairs.
|
|
62
|
+
* More robust to outliers than Spearman, but computationally more expensive.
|
|
63
|
+
* - τ = 1: All pairs concordant (perfect agreement)
|
|
64
|
+
* - τ = 0: Equal concordant and discordant pairs
|
|
65
|
+
* - τ = -1: All pairs discordant (perfect disagreement)
|
|
66
|
+
*
|
|
67
|
+
* @param x - First tensor
|
|
68
|
+
* @param y - Second tensor (must have same size as x)
|
|
69
|
+
* @returns Tuple of [tau coefficient, p-value]
|
|
70
|
+
* @throws {InvalidParameterError} If tensors have different sizes or < 2 samples
|
|
71
|
+
*
|
|
72
|
+
* @example
|
|
73
|
+
* ```ts
|
|
74
|
+
* const x = tensor([1, 2, 3, 4, 5]);
|
|
75
|
+
* const y = tensor([1, 3, 2, 4, 5]);
|
|
76
|
+
* const [tau, p] = kendalltau(x, y); // Mostly concordant
|
|
77
|
+
* ```
|
|
78
|
+
*
|
|
79
|
+
* @remarks
|
|
80
|
+
* This implementation uses the tau-b variant with tie correction.
|
|
81
|
+
* Ties are excluded from concordant/discordant counts and reduce the denominator.
|
|
82
|
+
* The p-value uses a normal approximation with tie-corrected variance.
|
|
83
|
+
*
|
|
84
|
+
* @complexity O(n²) - suitable for moderate sample sizes (n < 10,000)
|
|
85
|
+
*
|
|
86
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kendalltau.html | SciPy stats.kendalltau}
|
|
87
|
+
*/
|
|
88
|
+
declare function kendalltau(x: Tensor, y: Tensor): [number, number];
|
|
89
|
+
/**
|
|
90
|
+
* Computes the Pearson correlation coefficient matrix.
|
|
91
|
+
*
|
|
92
|
+
* For two variables, returns 2x2 correlation matrix.
|
|
93
|
+
* For a 2D tensor, treats each column as a variable and computes pairwise correlations.
|
|
94
|
+
*
|
|
95
|
+
* @param x - Input tensor (1D or 2D)
|
|
96
|
+
* @param y - Optional second tensor (if provided, computes correlation between x and y)
|
|
97
|
+
* @returns Correlation matrix (symmetric with 1s on diagonal)
|
|
98
|
+
* @throws {InvalidParameterError} If < 2 observations or size mismatch
|
|
99
|
+
* @throws {ShapeError} If tensor is not 1D or 2D
|
|
100
|
+
*
|
|
101
|
+
* @example
|
|
102
|
+
* ```ts
|
|
103
|
+
* const x = tensor([1, 2, 3, 4, 5]);
|
|
104
|
+
* const y = tensor([2, 4, 5, 4, 5]);
|
|
105
|
+
* corrcoef(x, y); // Returns [[1.0, 0.8], [0.8, 1.0]]
|
|
106
|
+
*
|
|
107
|
+
* const data = tensor([[1, 2], [3, 4], [5, 6]]);
|
|
108
|
+
* corrcoef(data); // Returns 2x2 correlation matrix for 2 variables
|
|
109
|
+
* ```
|
|
110
|
+
*
|
|
111
|
+
* @see {@link https://numpy.org/doc/stable/reference/generated/numpy.corrcoef.html | NumPy corrcoef}
|
|
112
|
+
*/
|
|
113
|
+
declare function corrcoef(x: Tensor, y?: Tensor): Tensor;
|
|
114
|
+
/**
|
|
115
|
+
* Computes the covariance matrix.
|
|
116
|
+
*
|
|
117
|
+
* Covariance measures how two variables change together.
|
|
118
|
+
* For two variables, returns 2x2 covariance matrix.
|
|
119
|
+
* For a 2D tensor, treats each column as a variable.
|
|
120
|
+
*
|
|
121
|
+
* @param x - Input tensor (1D or 2D)
|
|
122
|
+
* @param y - Optional second tensor (if provided, computes covariance between x and y)
|
|
123
|
+
* @param ddof - Delta degrees of freedom (0 = population, 1 = sample, default: 1)
|
|
124
|
+
* @returns Covariance matrix (symmetric)
|
|
125
|
+
* @throws {InvalidParameterError} If tensor is empty, ddof < 0, ddof >= sample size, or size mismatch
|
|
126
|
+
* @throws {ShapeError} If tensor is not 1D or 2D
|
|
127
|
+
*
|
|
128
|
+
* @example
|
|
129
|
+
* ```ts
|
|
130
|
+
* const x = tensor([1, 2, 3, 4, 5]);
|
|
131
|
+
* const y = tensor([2, 4, 5, 4, 5]);
|
|
132
|
+
* cov(x, y); // Returns 2x2 covariance matrix
|
|
133
|
+
*
|
|
134
|
+
* const data = tensor([[1, 2], [3, 4], [5, 6]]);
|
|
135
|
+
* cov(data); // Returns 2x2 covariance matrix for 2 variables
|
|
136
|
+
* ```
|
|
137
|
+
*
|
|
138
|
+
* @see {@link https://numpy.org/doc/stable/reference/generated/numpy.cov.html | NumPy cov}
|
|
139
|
+
*/
|
|
140
|
+
declare function cov(x: Tensor, y?: Tensor, ddof?: number): Tensor;
|
|
141
|
+
|
|
142
|
+
/**
|
|
143
|
+
* Internal utilities for the stats package.
|
|
144
|
+
*
|
|
145
|
+
* This module contains internal helper functions used by the stats package.
|
|
146
|
+
* Functions are exported for use by other stats modules but are not part
|
|
147
|
+
* of the stable public API exported from `src/stats/index.ts`.
|
|
148
|
+
*
|
|
149
|
+
* Some functions (particularly CDFs and special functions like `normalCdf`,
|
|
150
|
+
* `studentTCdf`, `logGamma`, etc.) may be promoted to the public API in
|
|
151
|
+
* future versions if there is user demand.
|
|
152
|
+
*
|
|
153
|
+
* @internal
|
|
154
|
+
* @module stats/_internal
|
|
155
|
+
*/
|
|
156
|
+
|
|
157
|
+
/**
|
|
158
|
+
* Type representing axis specification for reduction operations.
|
|
159
|
+
* Can be a single axis number/alias or an array of axis numbers/aliases.
|
|
160
|
+
*
|
|
161
|
+
* @example
|
|
162
|
+
* ```ts
|
|
163
|
+
* const axis1: AxisLike = 0; // Single axis
|
|
164
|
+
* const axis2: AxisLike = [0, 1]; // Multiple axes
|
|
165
|
+
* const axis3: AxisLike = "rows"; // Alias
|
|
166
|
+
* ```
|
|
167
|
+
*/
|
|
168
|
+
type AxisLike = Axis | readonly Axis[];
|
|
169
|
+
|
|
170
|
+
/**
|
|
171
|
+
* Computes the arithmetic mean along specified axes.
|
|
172
|
+
*
|
|
173
|
+
* The mean is the sum of all values divided by the count.
|
|
174
|
+
* Supports axis-wise reduction with optional dimension preservation.
|
|
175
|
+
*
|
|
176
|
+
* @param t - Input tensor
|
|
177
|
+
* @param axis - Axis or axes along which to compute the mean (undefined = all axes)
|
|
178
|
+
* @param _keepdims - If true, reduced axes are retained with size 1 (default: false)
|
|
179
|
+
* @returns Tensor containing mean values
|
|
180
|
+
* @throws {InvalidParameterError} If tensor is empty or reduction over empty axis
|
|
181
|
+
* @throws {IndexError} If axis is out of bounds
|
|
182
|
+
*
|
|
183
|
+
* @example
|
|
184
|
+
* ```ts
|
|
185
|
+
* const t = tensor([[1, 2, 3], [4, 5, 6]]);
|
|
186
|
+
* mean(t); // Returns tensor([3.5]) - mean of all elements
|
|
187
|
+
* mean(t, 0); // Returns tensor([2.5, 3.5, 4.5]) - column means
|
|
188
|
+
* mean(t, 1); // Returns tensor([2, 5]) - row means
|
|
189
|
+
* mean(t, 1, true); // Returns tensor([[2], [5]]) - keepdims
|
|
190
|
+
* ```
|
|
191
|
+
*
|
|
192
|
+
* @remarks
|
|
193
|
+
* This function follows IEEE 754 semantics for special values:
|
|
194
|
+
* - NaN inputs propagate to NaN output
|
|
195
|
+
* - Infinity is handled according to standard arithmetic rules
|
|
196
|
+
* - Mixed Infinity values (Infinity + -Infinity) result in NaN
|
|
197
|
+
*
|
|
198
|
+
* @see {@link https://numpy.org/doc/stable/reference/generated/numpy.mean.html | NumPy mean}
|
|
199
|
+
*/
|
|
200
|
+
declare function mean(t: Tensor, axis?: AxisLike, _keepdims?: boolean): Tensor;
|
|
201
|
+
/**
|
|
202
|
+
* Computes the median (50th percentile) along specified axes.
|
|
203
|
+
*
|
|
204
|
+
* The median is the middle value when data is sorted. For even-sized arrays,
|
|
205
|
+
* it's the average of the two middle values. More robust to outliers than mean.
|
|
206
|
+
*
|
|
207
|
+
* @param t - Input tensor
|
|
208
|
+
* @param axis - Axis or axes along which to compute the median (undefined = all axes)
|
|
209
|
+
* @param _keepdims - If true, reduced axes are retained with size 1 (default: false)
|
|
210
|
+
* @returns Tensor containing median values
|
|
211
|
+
* @throws {InvalidParameterError} If tensor is empty or reduction over empty axis
|
|
212
|
+
* @throws {IndexError} If axis is out of bounds
|
|
213
|
+
*
|
|
214
|
+
* @example
|
|
215
|
+
* ```ts
|
|
216
|
+
* const t = tensor([1, 2, 3, 4, 5]);
|
|
217
|
+
* median(t); // Returns tensor([3])
|
|
218
|
+
*
|
|
219
|
+
* const t2 = tensor([1, 2, 3, 4]);
|
|
220
|
+
* median(t2); // Returns tensor([2.5]) - average of 2 and 3
|
|
221
|
+
* ```
|
|
222
|
+
*
|
|
223
|
+
* @remarks
|
|
224
|
+
* This function follows IEEE 754 semantics for special values:
|
|
225
|
+
* - NaN inputs result in NaN output (NaN sorts to end)
|
|
226
|
+
* - Infinity values are sorted naturally (±Infinity at extremes)
|
|
227
|
+
*
|
|
228
|
+
* @see {@link https://numpy.org/doc/stable/reference/generated/numpy.median.html | NumPy median}
|
|
229
|
+
*/
|
|
230
|
+
declare function median(t: Tensor, axis?: AxisLike, _keepdims?: boolean): Tensor;
|
|
231
|
+
/**
|
|
232
|
+
* Computes the mode (most frequent value) along specified axis.
|
|
233
|
+
*
|
|
234
|
+
* The mode is the value that appears most frequently in the dataset.
|
|
235
|
+
* If multiple values have the same maximum frequency, returns the smallest value.
|
|
236
|
+
*
|
|
237
|
+
* @param t - Input tensor
|
|
238
|
+
* @param axis - Axis or axes along which to compute the mode (undefined = all axes)
|
|
239
|
+
* @returns Tensor containing mode values
|
|
240
|
+
* @throws {InvalidParameterError} If tensor is empty
|
|
241
|
+
* @throws {IndexError} If axis is out of bounds
|
|
242
|
+
*
|
|
243
|
+
* @example
|
|
244
|
+
* ```ts
|
|
245
|
+
* const t = tensor([1, 2, 2, 3, 3, 3]);
|
|
246
|
+
* mode(t); // Returns tensor([3]) - most frequent value
|
|
247
|
+
*
|
|
248
|
+
* const t2 = tensor([[1, 2, 2], [3, 3, 4]]);
|
|
249
|
+
* mode(t2, 1); // Returns tensor([2, 3]) - mode of each row
|
|
250
|
+
* ```
|
|
251
|
+
*
|
|
252
|
+
* @remarks
|
|
253
|
+
* NaN inputs propagate to NaN output.
|
|
254
|
+
*
|
|
255
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mode.html | SciPy stats.mode}
|
|
256
|
+
*/
|
|
257
|
+
declare function mode(t: Tensor, axis?: AxisLike): Tensor;
|
|
258
|
+
/**
|
|
259
|
+
* Computes the standard deviation along specified axes.
|
|
260
|
+
*
|
|
261
|
+
* Standard deviation is the square root of variance, measuring spread of data.
|
|
262
|
+
* Uses Welford's algorithm for numerical stability via the variance function.
|
|
263
|
+
*
|
|
264
|
+
* @param t - Input tensor
|
|
265
|
+
* @param axis - Axis or axes along which to compute std (undefined = all axes)
|
|
266
|
+
* @param _keepdims - If true, reduced axes are retained with size 1 (default: false)
|
|
267
|
+
* @param ddof - Delta degrees of freedom (0 = population, 1 = sample, default: 0)
|
|
268
|
+
* @returns Tensor containing standard deviation values
|
|
269
|
+
* @throws {InvalidParameterError} If tensor is empty, ddof < 0, ddof >= sample size, or reduction over empty axis
|
|
270
|
+
* @throws {IndexError} If axis is out of bounds
|
|
271
|
+
* @throws {DTypeError} If tensor has string dtype
|
|
272
|
+
*
|
|
273
|
+
* @example
|
|
274
|
+
* ```ts
|
|
275
|
+
* const t = tensor([1, 2, 3, 4, 5]);
|
|
276
|
+
* std(t); // Population std (ddof=0)
|
|
277
|
+
* std(t, 0, false, 1); // Sample std (ddof=1)
|
|
278
|
+
* ```
|
|
279
|
+
*
|
|
280
|
+
* @remarks
|
|
281
|
+
* This function follows IEEE 754 semantics for special values:
|
|
282
|
+
* - NaN inputs propagate to NaN output
|
|
283
|
+
* - Infinity inputs result in NaN (infinite standard deviation)
|
|
284
|
+
*
|
|
285
|
+
* @see {@link https://numpy.org/doc/stable/reference/generated/numpy.std.html | NumPy std}
|
|
286
|
+
*/
|
|
287
|
+
declare function std(t: Tensor, axis?: AxisLike, _keepdims?: boolean, ddof?: number): Tensor;
|
|
288
|
+
/**
|
|
289
|
+
* Computes the variance along specified axes.
|
|
290
|
+
*
|
|
291
|
+
* Variance measures the average squared deviation from the mean.
|
|
292
|
+
* Uses Welford's online algorithm for numerical stability.
|
|
293
|
+
*
|
|
294
|
+
* @param t - Input tensor
|
|
295
|
+
* @param axis - Axis or axes along which to compute variance (undefined = all axes)
|
|
296
|
+
* @param _keepdims - If true, reduced axes are retained with size 1 (default: false)
|
|
297
|
+
* @param ddof - Delta degrees of freedom (0 = population, 1 = sample, default: 0)
|
|
298
|
+
* @returns Tensor containing variance values
|
|
299
|
+
* @throws {InvalidParameterError} If tensor is empty, ddof < 0, ddof >= sample size, or reduction over empty axis
|
|
300
|
+
* @throws {IndexError} If axis is out of bounds
|
|
301
|
+
* @throws {DTypeError} If tensor has string dtype
|
|
302
|
+
*
|
|
303
|
+
* @example
|
|
304
|
+
* ```ts
|
|
305
|
+
* const t = tensor([1, 2, 3, 4, 5]);
|
|
306
|
+
* variance(t); // Population variance: 2.0
|
|
307
|
+
* variance(t, 0, false, 1); // Sample variance: 2.5
|
|
308
|
+
* ```
|
|
309
|
+
*
|
|
310
|
+
* @remarks
|
|
311
|
+
* This function follows IEEE 754 semantics for special values:
|
|
312
|
+
* - NaN inputs propagate to NaN output
|
|
313
|
+
* - Infinity inputs result in NaN (infinite variance)
|
|
314
|
+
*
|
|
315
|
+
* @see {@link https://numpy.org/doc/stable/reference/generated/numpy.var.html | NumPy var}
|
|
316
|
+
*/
|
|
317
|
+
declare function variance(t: Tensor, axis?: AxisLike, _keepdims?: boolean, ddof?: number): Tensor;
|
|
318
|
+
/**
|
|
319
|
+
* Computes the skewness (third standardized moment) along specified axis.
|
|
320
|
+
*
|
|
321
|
+
* Skewness measures the asymmetry of the probability distribution.
|
|
322
|
+
* - Negative skew: left tail is longer (mean < median)
|
|
323
|
+
* - Zero skew: symmetric distribution (normal distribution)
|
|
324
|
+
* - Positive skew: right tail is longer (mean > median)
|
|
325
|
+
*
|
|
326
|
+
* Uses Fisher's moment coefficient: E[(X - μ)³] / σ³
|
|
327
|
+
*
|
|
328
|
+
* @param t - Input tensor
|
|
329
|
+
* @param axis - Axis or axes along which to compute skewness (undefined = all axes)
|
|
330
|
+
* @param bias - If false, applies the unbiased Fisher-Pearson correction (default: true)
|
|
331
|
+
* @returns Tensor containing skewness values
|
|
332
|
+
*
|
|
333
|
+
* @example
|
|
334
|
+
* ```ts
|
|
335
|
+
* const t = tensor([1, 2, 3, 4, 5]);
|
|
336
|
+
* skewness(t); // Returns ~0 (symmetric)
|
|
337
|
+
*
|
|
338
|
+
* const t2 = tensor([1, 2, 2, 3, 3, 3, 4, 4, 4, 4]);
|
|
339
|
+
* skewness(t2); // Positive skew (right-tailed)
|
|
340
|
+
* ```
|
|
341
|
+
*
|
|
342
|
+
* @remarks
|
|
343
|
+
* This function follows IEEE 754 semantics for special values:
|
|
344
|
+
* - NaN inputs propagate to NaN output
|
|
345
|
+
* - Returns NaN for constant input (zero variance)
|
|
346
|
+
* - Unbiased correction requires at least 3 samples; otherwise returns NaN
|
|
347
|
+
*
|
|
348
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.skew.html | SciPy stats.skew}
|
|
349
|
+
*/
|
|
350
|
+
declare function skewness(t: Tensor, axis?: AxisLike, bias?: boolean): Tensor;
|
|
351
|
+
/**
|
|
352
|
+
* Computes the kurtosis (fourth standardized moment) along specified axis.
|
|
353
|
+
*
|
|
354
|
+
* Kurtosis measures the "tailedness" of the probability distribution.
|
|
355
|
+
* - Negative excess kurtosis: lighter tails than normal (platykurtic)
|
|
356
|
+
* - Zero excess kurtosis: same tails as normal distribution (mesokurtic)
|
|
357
|
+
* - Positive excess kurtosis: heavier tails than normal (leptokurtic)
|
|
358
|
+
*
|
|
359
|
+
* Uses Fisher's definition: E[(X - μ)⁴] / σ⁴ - 3 (excess kurtosis)
|
|
360
|
+
*
|
|
361
|
+
* @param t - Input tensor
|
|
362
|
+
* @param axis - Axis or axes along which to compute kurtosis (undefined = all axes)
|
|
363
|
+
* @param fisher - If true, returns excess kurtosis (subtract 3, default: true)
|
|
364
|
+
* @param bias - If false, applies bias correction (requires at least 4 samples, default: true)
|
|
365
|
+
* @returns Tensor containing kurtosis values
|
|
366
|
+
*
|
|
367
|
+
* @example
|
|
368
|
+
* ```ts
|
|
369
|
+
* const t = tensor([1, 2, 3, 4, 5]);
|
|
370
|
+
* kurtosis(t, undefined, true); // Excess kurtosis (Fisher)
|
|
371
|
+
* kurtosis(t, undefined, false); // Raw kurtosis (Pearson)
|
|
372
|
+
* ```
|
|
373
|
+
*
|
|
374
|
+
* @remarks
|
|
375
|
+
* This function follows IEEE 754 semantics for special values:
|
|
376
|
+
* - NaN inputs propagate to NaN output
|
|
377
|
+
* - Returns NaN for constant input (zero variance)
|
|
378
|
+
* - Unbiased correction requires at least 4 samples; otherwise returns NaN
|
|
379
|
+
*
|
|
380
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kurtosis.html | SciPy stats.kurtosis}
|
|
381
|
+
*/
|
|
382
|
+
declare function kurtosis(t: Tensor, axis?: AxisLike, fisher?: boolean, bias?: boolean): Tensor;
|
|
383
|
+
/**
|
|
384
|
+
* Computes quantiles along specified axes.
|
|
385
|
+
*
|
|
386
|
+
* Quantiles are cut points dividing the range of a probability distribution.
|
|
387
|
+
* Uses linear interpolation between data points.
|
|
388
|
+
*
|
|
389
|
+
* @param t - Input tensor
|
|
390
|
+
* @param q - Quantile(s) to compute, in range [0, 1] (0.5 = median)
|
|
391
|
+
* @param axis - Axis or axes along which to compute quantiles (undefined = all axes)
|
|
392
|
+
* @returns Tensor containing quantile values
|
|
393
|
+
* @throws {InvalidParameterError} If q is not in [0, 1], tensor is empty, or reduction over empty axis
|
|
394
|
+
* @throws {IndexError} If axis is out of bounds
|
|
395
|
+
*
|
|
396
|
+
* @example
|
|
397
|
+
* ```ts
|
|
398
|
+
* const t = tensor([1, 2, 3, 4, 5]);
|
|
399
|
+
* quantile(t, 0.5); // Returns tensor([3]) - median
|
|
400
|
+
* quantile(t, [0.25, 0.75]); // Returns tensor([2, 4]) - quartiles
|
|
401
|
+
* quantile(t, 0.95); // Returns tensor([4.8]) - 95th percentile
|
|
402
|
+
* ```
|
|
403
|
+
*
|
|
404
|
+
* @see {@link https://numpy.org/doc/stable/reference/generated/numpy.quantile.html | NumPy quantile}
|
|
405
|
+
*/
|
|
406
|
+
declare function quantile(t: Tensor, q: number | number[], axis?: AxisLike): Tensor;
|
|
407
|
+
/**
|
|
408
|
+
* Computes percentiles along specified axes.
|
|
409
|
+
*
|
|
410
|
+
* Percentiles are quantiles expressed as percentages (0-100 instead of 0-1).
|
|
411
|
+
* This is a convenience wrapper around quantile().
|
|
412
|
+
*
|
|
413
|
+
* @param t - Input tensor
|
|
414
|
+
* @param q - Percentile(s) to compute, in range [0, 100] (50 = median)
|
|
415
|
+
* @param axis - Axis or axes along which to compute percentiles (undefined = all axes)
|
|
416
|
+
* @returns Tensor containing percentile values
|
|
417
|
+
* @throws {InvalidParameterError} If q is not in [0, 100], tensor is empty, or reduction over empty axis
|
|
418
|
+
* @throws {IndexError} If axis is out of bounds
|
|
419
|
+
*
|
|
420
|
+
* @example
|
|
421
|
+
* ```ts
|
|
422
|
+
* const t = tensor([1, 2, 3, 4, 5]);
|
|
423
|
+
* percentile(t, 50); // Returns tensor([3]) - median
|
|
424
|
+
* percentile(t, [25, 75]); // Returns tensor([2, 4]) - quartiles
|
|
425
|
+
* percentile(t, 95); // Returns tensor([4.8]) - 95th percentile
|
|
426
|
+
* ```
|
|
427
|
+
*
|
|
428
|
+
* @see {@link https://numpy.org/doc/stable/reference/generated/numpy.percentile.html | NumPy percentile}
|
|
429
|
+
*/
|
|
430
|
+
declare function percentile(t: Tensor, q: number | number[], axis?: AxisLike): Tensor;
|
|
431
|
+
/**
|
|
432
|
+
* Computes the n-th central moment about the mean.
|
|
433
|
+
*
|
|
434
|
+
* The n-th moment is defined as: E[(X - μ)ⁿ]
|
|
435
|
+
* - n=1: Always 0 (by definition of mean)
|
|
436
|
+
* - n=2: Variance
|
|
437
|
+
* - n=3: Related to skewness
|
|
438
|
+
* - n=4: Related to kurtosis
|
|
439
|
+
*
|
|
440
|
+
* @param t - Input tensor
|
|
441
|
+
* @param n - Order of the moment (must be non-negative integer)
|
|
442
|
+
* @param axis - Axis or axes along which to compute moment (undefined = all axes)
|
|
443
|
+
* @returns Tensor containing moment values
|
|
444
|
+
* @throws {InvalidParameterError} If n is not a non-negative integer
|
|
445
|
+
* @throws {IndexError} If axis is out of bounds
|
|
446
|
+
*
|
|
447
|
+
* @example
|
|
448
|
+
* ```ts
|
|
449
|
+
* const t = tensor([1, 2, 3, 4, 5]);
|
|
450
|
+
* moment(t, 1); // Returns ~0 (first moment about mean)
|
|
451
|
+
* moment(t, 2); // Returns variance
|
|
452
|
+
* moment(t, 3); // Returns third moment (related to skewness)
|
|
453
|
+
* ```
|
|
454
|
+
*
|
|
455
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.moment.html | SciPy stats.moment}
|
|
456
|
+
*/
|
|
457
|
+
declare function moment(t: Tensor, n: number, axis?: AxisLike): Tensor;
|
|
458
|
+
/**
|
|
459
|
+
* Computes the geometric mean along specified axis.
|
|
460
|
+
*
|
|
461
|
+
* The geometric mean is the n-th root of the product of n values.
|
|
462
|
+
* Computed as: exp(mean(log(x))) for numerical stability.
|
|
463
|
+
* Useful for averaging ratios, growth rates, and multiplicative processes.
|
|
464
|
+
*
|
|
465
|
+
* @param t - Input tensor (all values must be > 0)
|
|
466
|
+
* @param axis - Axis or axes along which to compute geometric mean (undefined = all axes)
|
|
467
|
+
* @returns Tensor containing geometric mean values
|
|
468
|
+
* @throws {InvalidParameterError} If any value is <= 0
|
|
469
|
+
* @throws {IndexError} If axis is out of bounds
|
|
470
|
+
*
|
|
471
|
+
* @example
|
|
472
|
+
* ```ts
|
|
473
|
+
* const t = tensor([1, 2, 4, 8]);
|
|
474
|
+
* geometricMean(t); // Returns ~2.83 (⁴√(1*2*4*8))
|
|
475
|
+
*
|
|
476
|
+
* // Growth rates: 10% and 20% growth
|
|
477
|
+
* const growth = tensor([1.1, 1.2]);
|
|
478
|
+
* geometricMean(growth); // Returns ~1.149 (average growth rate)
|
|
479
|
+
* ```
|
|
480
|
+
*
|
|
481
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.gmean.html | SciPy stats.gmean}
|
|
482
|
+
*/
|
|
483
|
+
declare function geometricMean(t: Tensor, axis?: AxisLike): Tensor;
|
|
484
|
+
/**
|
|
485
|
+
* Computes the harmonic mean along specified axis.
|
|
486
|
+
*
|
|
487
|
+
* The harmonic mean is the reciprocal of the arithmetic mean of reciprocals.
|
|
488
|
+
* Computed as: n / sum(1/x)
|
|
489
|
+
* Useful for averaging rates and ratios (e.g., speeds, densities).
|
|
490
|
+
*
|
|
491
|
+
* @param t - Input tensor (all values must be > 0)
|
|
492
|
+
* @param axis - Axis or axes along which to compute harmonic mean (undefined = all axes)
|
|
493
|
+
* @returns Tensor containing harmonic mean values
|
|
494
|
+
* @throws {InvalidParameterError} If any value is <= 0
|
|
495
|
+
* @throws {IndexError} If axis is out of bounds
|
|
496
|
+
*
|
|
497
|
+
* @example
|
|
498
|
+
* ```ts
|
|
499
|
+
* const t = tensor([1, 2, 4]);
|
|
500
|
+
* harmonicMean(t); // Returns ~1.71 (3 / (1/1 + 1/2 + 1/4))
|
|
501
|
+
*
|
|
502
|
+
* // Average speed: 60 mph for half distance, 40 mph for other half
|
|
503
|
+
* const speeds = tensor([60, 40]);
|
|
504
|
+
* harmonicMean(speeds); // Returns 48 mph (correct average)
|
|
505
|
+
* ```
|
|
506
|
+
*
|
|
507
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.hmean.html | SciPy stats.hmean}
|
|
508
|
+
*/
|
|
509
|
+
declare function harmonicMean(t: Tensor, axis?: AxisLike): Tensor;
|
|
510
|
+
/**
|
|
511
|
+
* Computes the trimmed mean (mean after removing outliers from both tails).
|
|
512
|
+
*
|
|
513
|
+
* Removes a specified proportion of extreme values from both ends before computing mean.
|
|
514
|
+
* More robust to outliers than regular mean, less extreme than median.
|
|
515
|
+
*
|
|
516
|
+
* @param t - Input tensor
|
|
517
|
+
* @param proportiontocut - Fraction to cut from each tail, in range [0, 0.5)
|
|
518
|
+
* @param axis - Axis or axes along which to compute trimmed mean (undefined = all axes)
|
|
519
|
+
* @returns Tensor containing trimmed mean values
|
|
520
|
+
* @throws {InvalidParameterError} If proportiontocut is not in [0, 0.5), tensor is empty, or reduction over empty axis
|
|
521
|
+
* @throws {IndexError} If axis is out of bounds
|
|
522
|
+
*
|
|
523
|
+
* @example
|
|
524
|
+
* ```ts
|
|
525
|
+
* const t = tensor([1, 2, 3, 4, 5, 100]); // 100 is outlier
|
|
526
|
+
* mean(t); // Returns ~19.17 (affected by outlier)
|
|
527
|
+
* trimMean(t, 0.2); // Returns 3.5 (removes 1 and 100)
|
|
528
|
+
* trimMean(t, 0.1); // Returns ~22.8 (removes only 100)
|
|
529
|
+
* ```
|
|
530
|
+
*
|
|
531
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.trim_mean.html | SciPy stats.trim_mean}
|
|
532
|
+
*/
|
|
533
|
+
declare function trimMean(t: Tensor, proportiontocut: number, axis?: AxisLike): Tensor;
|
|
534
|
+
|
|
535
|
+
type TestResult = {
|
|
536
|
+
statistic: number;
|
|
537
|
+
pvalue: number;
|
|
538
|
+
};
|
|
539
|
+
/**
|
|
540
|
+
* One-sample t-test.
|
|
541
|
+
*
|
|
542
|
+
* Tests whether mean of sample differs from population mean.
|
|
543
|
+
*
|
|
544
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_1samp.html | SciPy stats.ttest_1samp}
|
|
545
|
+
*/
|
|
546
|
+
declare function ttest_1samp(a: Tensor, popmean: number): TestResult;
|
|
547
|
+
/**
|
|
548
|
+
* Independent two-sample t-test.
|
|
549
|
+
*
|
|
550
|
+
* Tests whether means of two independent samples are equal.
|
|
551
|
+
*
|
|
552
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_ind.html | SciPy stats.ttest_ind}
|
|
553
|
+
*/
|
|
554
|
+
declare function ttest_ind(a: Tensor, b: Tensor, equalVar?: boolean): TestResult;
|
|
555
|
+
/**
|
|
556
|
+
* Paired-sample t-test.
|
|
557
|
+
*
|
|
558
|
+
* Tests whether means of two related samples are equal.
|
|
559
|
+
*
|
|
560
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_rel.html | SciPy stats.ttest_rel}
|
|
561
|
+
*/
|
|
562
|
+
declare function ttest_rel(a: Tensor, b: Tensor): TestResult;
|
|
563
|
+
/**
|
|
564
|
+
* Chi-square goodness of fit test.
|
|
565
|
+
*
|
|
566
|
+
* Observed and expected frequencies must be non-negative and sum to the same total
|
|
567
|
+
* (within floating-point tolerance).
|
|
568
|
+
*
|
|
569
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.chisquare.html | SciPy stats.chisquare}
|
|
570
|
+
*/
|
|
571
|
+
declare function chisquare(f_obs: Tensor, f_exp?: Tensor): TestResult;
|
|
572
|
+
/**
|
|
573
|
+
* Kolmogorov-Smirnov test for goodness of fit.
|
|
574
|
+
*
|
|
575
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kstest.html | SciPy stats.kstest}
|
|
576
|
+
*/
|
|
577
|
+
declare function kstest(data: Tensor, cdf: string | ((x: number) => number)): TestResult;
|
|
578
|
+
/**
|
|
579
|
+
* Test for normality.
|
|
580
|
+
*
|
|
581
|
+
* Uses D'Agostino-Pearson's omnibus test combining skewness and kurtosis.
|
|
582
|
+
*
|
|
583
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.normaltest.html | SciPy stats.normaltest}
|
|
584
|
+
*/
|
|
585
|
+
declare function normaltest(a: Tensor): TestResult;
|
|
586
|
+
/**
|
|
587
|
+
* Shapiro-Wilk test for normality.
|
|
588
|
+
*
|
|
589
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.shapiro.html | SciPy stats.shapiro}
|
|
590
|
+
*/
|
|
591
|
+
declare function shapiro(x: Tensor): TestResult;
|
|
592
|
+
/**
|
|
593
|
+
* Anderson-Darling test for normality.
|
|
594
|
+
*
|
|
595
|
+
* Uses sample standard deviation and size-adjusted critical values.
|
|
596
|
+
* For very small samples (n < 10), uses an IQR-based scale estimate to stabilize
|
|
597
|
+
* the statistic.
|
|
598
|
+
*
|
|
599
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.anderson.html | SciPy stats.anderson}
|
|
600
|
+
*/
|
|
601
|
+
declare function anderson(x: Tensor): {
|
|
602
|
+
statistic: number;
|
|
603
|
+
critical_values: number[];
|
|
604
|
+
significance_level: number[];
|
|
605
|
+
};
|
|
606
|
+
/**
|
|
607
|
+
* Mann-Whitney U test (non-parametric).
|
|
608
|
+
*
|
|
609
|
+
* Tests whether two independent samples come from same distribution.
|
|
610
|
+
*
|
|
611
|
+
* Note: Uses normal approximation for the p-value with tie correction and
|
|
612
|
+
* continuity correction. No exact method selection is available.
|
|
613
|
+
*
|
|
614
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mannwhitneyu.html | SciPy stats.mannwhitneyu}
|
|
615
|
+
*/
|
|
616
|
+
declare function mannwhitneyu(x: Tensor, y: Tensor): TestResult;
|
|
617
|
+
/**
|
|
618
|
+
* Wilcoxon signed-rank test (non-parametric paired test).
|
|
619
|
+
*
|
|
620
|
+
* Note: Uses normal approximation for the p-value with tie correction and
|
|
621
|
+
* continuity correction. No exact method selection is available.
|
|
622
|
+
*
|
|
623
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.wilcoxon.html | SciPy stats.wilcoxon}
|
|
624
|
+
*/
|
|
625
|
+
declare function wilcoxon(x: Tensor, y?: Tensor): TestResult;
|
|
626
|
+
/**
|
|
627
|
+
* Kruskal-Wallis H-test (non-parametric version of ANOVA).
|
|
628
|
+
*
|
|
629
|
+
* Note: Uses chi-square approximation for the p-value with tie correction.
|
|
630
|
+
*
|
|
631
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kruskal.html | SciPy stats.kruskal}
|
|
632
|
+
*/
|
|
633
|
+
declare function kruskal(...samples: Tensor[]): TestResult;
|
|
634
|
+
/**
|
|
635
|
+
* Friedman test (non-parametric repeated measures ANOVA).
|
|
636
|
+
*
|
|
637
|
+
* Note: Uses chi-square approximation for the p-value with tie correction.
|
|
638
|
+
*
|
|
639
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.friedmanchisquare.html | SciPy stats.friedmanchisquare}
|
|
640
|
+
*/
|
|
641
|
+
declare function friedmanchisquare(...samples: Tensor[]): TestResult;
|
|
642
|
+
/**
|
|
643
|
+
* Levene's test for equality of variances.
|
|
644
|
+
*
|
|
645
|
+
* Tests whether two or more groups have equal variances.
|
|
646
|
+
* More robust than Bartlett's test for non-normal data.
|
|
647
|
+
*
|
|
648
|
+
* @param center - Method to use for centering: 'median' (default, most robust),
|
|
649
|
+
* 'mean' (traditional), or 'trimmed' (10% trimmed mean)
|
|
650
|
+
* @param samples - Two or more sample tensors to compare
|
|
651
|
+
* @returns Test result with statistic and p-value
|
|
652
|
+
*
|
|
653
|
+
* @example
|
|
654
|
+
* ```ts
|
|
655
|
+
* import { levene, tensor } from 'deepbox';
|
|
656
|
+
*
|
|
657
|
+
* const group1 = tensor([1, 2, 3, 4, 5]);
|
|
658
|
+
* const group2 = tensor([2, 4, 6, 8, 10]);
|
|
659
|
+
* const result = levene('median', group1, group2);
|
|
660
|
+
* console.log(result.pvalue); // p-value for equal variances
|
|
661
|
+
* ```
|
|
662
|
+
*
|
|
663
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.levene.html | SciPy stats.levene}
|
|
664
|
+
*/
|
|
665
|
+
declare function levene(center: "mean" | "median" | "trimmed", ...samples: Tensor[]): TestResult;
|
|
666
|
+
/**
|
|
667
|
+
* Bartlett's test for equality of variances.
|
|
668
|
+
*
|
|
669
|
+
* Tests whether two or more groups have equal variances.
|
|
670
|
+
* Assumes data is normally distributed; use Levene's test for non-normal data.
|
|
671
|
+
*
|
|
672
|
+
* @param samples - Two or more sample tensors to compare
|
|
673
|
+
* @returns Test result with statistic and p-value
|
|
674
|
+
*
|
|
675
|
+
* @example
|
|
676
|
+
* ```ts
|
|
677
|
+
* import { bartlett, tensor } from 'deepbox';
|
|
678
|
+
*
|
|
679
|
+
* const group1 = tensor([1, 2, 3, 4, 5]);
|
|
680
|
+
* const group2 = tensor([2, 4, 6, 8, 10]);
|
|
681
|
+
* const result = bartlett(group1, group2);
|
|
682
|
+
* console.log(result.pvalue); // p-value for equal variances
|
|
683
|
+
* ```
|
|
684
|
+
*
|
|
685
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bartlett.html | SciPy stats.bartlett}
|
|
686
|
+
*/
|
|
687
|
+
declare function bartlett(...samples: Tensor[]): TestResult;
|
|
688
|
+
/**
|
|
689
|
+
* One-way ANOVA.
|
|
690
|
+
*
|
|
691
|
+
* @see {@link https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.f_oneway.html | SciPy stats.f_oneway}
|
|
692
|
+
*/
|
|
693
|
+
declare function f_oneway(...samples: Tensor[]): TestResult;
|
|
694
|
+
|
|
695
|
+
type index_TestResult = TestResult;
|
|
696
|
+
declare const index_anderson: typeof anderson;
|
|
697
|
+
declare const index_bartlett: typeof bartlett;
|
|
698
|
+
declare const index_chisquare: typeof chisquare;
|
|
699
|
+
declare const index_corrcoef: typeof corrcoef;
|
|
700
|
+
declare const index_cov: typeof cov;
|
|
701
|
+
declare const index_f_oneway: typeof f_oneway;
|
|
702
|
+
declare const index_friedmanchisquare: typeof friedmanchisquare;
|
|
703
|
+
declare const index_geometricMean: typeof geometricMean;
|
|
704
|
+
declare const index_harmonicMean: typeof harmonicMean;
|
|
705
|
+
declare const index_kendalltau: typeof kendalltau;
|
|
706
|
+
declare const index_kruskal: typeof kruskal;
|
|
707
|
+
declare const index_kstest: typeof kstest;
|
|
708
|
+
declare const index_kurtosis: typeof kurtosis;
|
|
709
|
+
declare const index_levene: typeof levene;
|
|
710
|
+
declare const index_mannwhitneyu: typeof mannwhitneyu;
|
|
711
|
+
declare const index_mean: typeof mean;
|
|
712
|
+
declare const index_median: typeof median;
|
|
713
|
+
declare const index_mode: typeof mode;
|
|
714
|
+
declare const index_moment: typeof moment;
|
|
715
|
+
declare const index_normaltest: typeof normaltest;
|
|
716
|
+
declare const index_pearsonr: typeof pearsonr;
|
|
717
|
+
declare const index_percentile: typeof percentile;
|
|
718
|
+
declare const index_quantile: typeof quantile;
|
|
719
|
+
declare const index_shapiro: typeof shapiro;
|
|
720
|
+
declare const index_skewness: typeof skewness;
|
|
721
|
+
declare const index_spearmanr: typeof spearmanr;
|
|
722
|
+
declare const index_std: typeof std;
|
|
723
|
+
declare const index_trimMean: typeof trimMean;
|
|
724
|
+
declare const index_ttest_1samp: typeof ttest_1samp;
|
|
725
|
+
declare const index_ttest_ind: typeof ttest_ind;
|
|
726
|
+
declare const index_ttest_rel: typeof ttest_rel;
|
|
727
|
+
declare const index_variance: typeof variance;
|
|
728
|
+
declare const index_wilcoxon: typeof wilcoxon;
|
|
729
|
+
declare namespace index {
|
|
730
|
+
export { type index_TestResult as TestResult, index_anderson as anderson, index_bartlett as bartlett, index_chisquare as chisquare, index_corrcoef as corrcoef, index_cov as cov, index_f_oneway as f_oneway, index_friedmanchisquare as friedmanchisquare, index_geometricMean as geometricMean, index_harmonicMean as harmonicMean, index_kendalltau as kendalltau, index_kruskal as kruskal, index_kstest as kstest, index_kurtosis as kurtosis, index_levene as levene, index_mannwhitneyu as mannwhitneyu, index_mean as mean, index_median as median, index_mode as mode, index_moment as moment, index_normaltest as normaltest, index_pearsonr as pearsonr, index_percentile as percentile, index_quantile as quantile, index_shapiro as shapiro, index_skewness as skewness, index_spearmanr as spearmanr, index_std as std, index_trimMean as trimMean, index_ttest_1samp as ttest_1samp, index_ttest_ind as ttest_ind, index_ttest_rel as ttest_rel, index_variance as variance, index_wilcoxon as wilcoxon };
|
|
731
|
+
}
|
|
732
|
+
|
|
733
|
+
export { levene as A, mannwhitneyu as B, normaltest as C, shapiro as D, ttest_1samp as E, ttest_ind as F, ttest_rel as G, wilcoxon as H, type TestResult as T, cov as a, kurtosis as b, corrcoef as c, median as d, mode as e, moment as f, geometricMean as g, harmonicMean as h, index as i, percentile as j, kendalltau as k, skewness as l, mean as m, std as n, anderson as o, pearsonr as p, quantile as q, bartlett as r, spearmanr as s, trimMean as t, chisquare as u, variance as v, f_oneway as w, friedmanchisquare as x, kruskal as y, kstest as z };
|