deepbox 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (173) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +344 -0
  3. package/dist/CSRMatrix-CwGwQRea.d.cts +219 -0
  4. package/dist/CSRMatrix-KzNt6QpS.d.ts +219 -0
  5. package/dist/Tensor-BQLk1ltW.d.cts +147 -0
  6. package/dist/Tensor-g8mUClel.d.ts +147 -0
  7. package/dist/chunk-4S73VUBD.js +677 -0
  8. package/dist/chunk-4S73VUBD.js.map +1 -0
  9. package/dist/chunk-5R4S63PF.js +2925 -0
  10. package/dist/chunk-5R4S63PF.js.map +1 -0
  11. package/dist/chunk-6AE5FKKQ.cjs +9264 -0
  12. package/dist/chunk-6AE5FKKQ.cjs.map +1 -0
  13. package/dist/chunk-AD436M45.js +3854 -0
  14. package/dist/chunk-AD436M45.js.map +1 -0
  15. package/dist/chunk-ALS7ETWZ.cjs +4263 -0
  16. package/dist/chunk-ALS7ETWZ.cjs.map +1 -0
  17. package/dist/chunk-AU7XHGKJ.js +2092 -0
  18. package/dist/chunk-AU7XHGKJ.js.map +1 -0
  19. package/dist/chunk-B5TNKUEY.js +1481 -0
  20. package/dist/chunk-B5TNKUEY.js.map +1 -0
  21. package/dist/chunk-BCR7G3A6.js +9136 -0
  22. package/dist/chunk-BCR7G3A6.js.map +1 -0
  23. package/dist/chunk-C4PKXY74.cjs +1917 -0
  24. package/dist/chunk-C4PKXY74.cjs.map +1 -0
  25. package/dist/chunk-DWZY6PIP.cjs +6400 -0
  26. package/dist/chunk-DWZY6PIP.cjs.map +1 -0
  27. package/dist/chunk-E3EU5FZO.cjs +2113 -0
  28. package/dist/chunk-E3EU5FZO.cjs.map +1 -0
  29. package/dist/chunk-F3JWBINJ.js +1054 -0
  30. package/dist/chunk-F3JWBINJ.js.map +1 -0
  31. package/dist/chunk-FJYLIGJX.js +1940 -0
  32. package/dist/chunk-FJYLIGJX.js.map +1 -0
  33. package/dist/chunk-JSCDE774.cjs +729 -0
  34. package/dist/chunk-JSCDE774.cjs.map +1 -0
  35. package/dist/chunk-LWECRCW2.cjs +2412 -0
  36. package/dist/chunk-LWECRCW2.cjs.map +1 -0
  37. package/dist/chunk-MLBMYKCG.js +6379 -0
  38. package/dist/chunk-MLBMYKCG.js.map +1 -0
  39. package/dist/chunk-OX6QXFMV.cjs +3874 -0
  40. package/dist/chunk-OX6QXFMV.cjs.map +1 -0
  41. package/dist/chunk-PHV2DKRS.cjs +1072 -0
  42. package/dist/chunk-PHV2DKRS.cjs.map +1 -0
  43. package/dist/chunk-PL7TAYKI.js +4056 -0
  44. package/dist/chunk-PL7TAYKI.js.map +1 -0
  45. package/dist/chunk-PR647I7R.js +1898 -0
  46. package/dist/chunk-PR647I7R.js.map +1 -0
  47. package/dist/chunk-QERHVCHC.cjs +2960 -0
  48. package/dist/chunk-QERHVCHC.cjs.map +1 -0
  49. package/dist/chunk-XEG44RF6.cjs +1514 -0
  50. package/dist/chunk-XEG44RF6.cjs.map +1 -0
  51. package/dist/chunk-XMWVME2W.js +2377 -0
  52. package/dist/chunk-XMWVME2W.js.map +1 -0
  53. package/dist/chunk-ZB75FESB.cjs +1979 -0
  54. package/dist/chunk-ZB75FESB.cjs.map +1 -0
  55. package/dist/chunk-ZLW62TJG.cjs +4061 -0
  56. package/dist/chunk-ZLW62TJG.cjs.map +1 -0
  57. package/dist/chunk-ZXKBDFP3.js +4235 -0
  58. package/dist/chunk-ZXKBDFP3.js.map +1 -0
  59. package/dist/core/index.cjs +204 -0
  60. package/dist/core/index.cjs.map +1 -0
  61. package/dist/core/index.d.cts +2 -0
  62. package/dist/core/index.d.ts +2 -0
  63. package/dist/core/index.js +3 -0
  64. package/dist/core/index.js.map +1 -0
  65. package/dist/dataframe/index.cjs +22 -0
  66. package/dist/dataframe/index.cjs.map +1 -0
  67. package/dist/dataframe/index.d.cts +3 -0
  68. package/dist/dataframe/index.d.ts +3 -0
  69. package/dist/dataframe/index.js +5 -0
  70. package/dist/dataframe/index.js.map +1 -0
  71. package/dist/datasets/index.cjs +134 -0
  72. package/dist/datasets/index.cjs.map +1 -0
  73. package/dist/datasets/index.d.cts +3 -0
  74. package/dist/datasets/index.d.ts +3 -0
  75. package/dist/datasets/index.js +5 -0
  76. package/dist/datasets/index.js.map +1 -0
  77. package/dist/index-74AB8Cyh.d.cts +1126 -0
  78. package/dist/index-9oQx1HgV.d.cts +1180 -0
  79. package/dist/index-BJY2SI4i.d.ts +483 -0
  80. package/dist/index-BWGhrDlr.d.ts +733 -0
  81. package/dist/index-B_DK4FKY.d.cts +242 -0
  82. package/dist/index-BbA2Gxfl.d.ts +456 -0
  83. package/dist/index-BgHYAoSS.d.cts +837 -0
  84. package/dist/index-BndMbqsM.d.ts +1439 -0
  85. package/dist/index-C1mfVYoo.d.ts +2517 -0
  86. package/dist/index-CCvlwAmL.d.cts +809 -0
  87. package/dist/index-CDw5CnOU.d.ts +785 -0
  88. package/dist/index-Cn3SdB0O.d.ts +1126 -0
  89. package/dist/index-CrqLlS-a.d.ts +776 -0
  90. package/dist/index-D61yaSMY.d.cts +483 -0
  91. package/dist/index-D9Loo1_A.d.cts +2517 -0
  92. package/dist/index-DIT_OO9C.d.cts +785 -0
  93. package/dist/index-DIp_RrRt.d.ts +242 -0
  94. package/dist/index-DbultU6X.d.cts +1427 -0
  95. package/dist/index-DmEg_LCm.d.cts +776 -0
  96. package/dist/index-DoPWVxPo.d.cts +1439 -0
  97. package/dist/index-DuCxd-8d.d.ts +837 -0
  98. package/dist/index-Dx42TZaY.d.ts +809 -0
  99. package/dist/index-DyZ4QQf5.d.cts +456 -0
  100. package/dist/index-GFAVyOWO.d.ts +1427 -0
  101. package/dist/index-WHQLn0e8.d.cts +733 -0
  102. package/dist/index-ZtI1Iy4L.d.ts +1180 -0
  103. package/dist/index-eJgeni9c.d.cts +1911 -0
  104. package/dist/index-tk4lSYod.d.ts +1911 -0
  105. package/dist/index.cjs +72 -0
  106. package/dist/index.cjs.map +1 -0
  107. package/dist/index.d.cts +17 -0
  108. package/dist/index.d.ts +17 -0
  109. package/dist/index.js +15 -0
  110. package/dist/index.js.map +1 -0
  111. package/dist/linalg/index.cjs +86 -0
  112. package/dist/linalg/index.cjs.map +1 -0
  113. package/dist/linalg/index.d.cts +3 -0
  114. package/dist/linalg/index.d.ts +3 -0
  115. package/dist/linalg/index.js +5 -0
  116. package/dist/linalg/index.js.map +1 -0
  117. package/dist/metrics/index.cjs +158 -0
  118. package/dist/metrics/index.cjs.map +1 -0
  119. package/dist/metrics/index.d.cts +3 -0
  120. package/dist/metrics/index.d.ts +3 -0
  121. package/dist/metrics/index.js +5 -0
  122. package/dist/metrics/index.js.map +1 -0
  123. package/dist/ml/index.cjs +87 -0
  124. package/dist/ml/index.cjs.map +1 -0
  125. package/dist/ml/index.d.cts +3 -0
  126. package/dist/ml/index.d.ts +3 -0
  127. package/dist/ml/index.js +6 -0
  128. package/dist/ml/index.js.map +1 -0
  129. package/dist/ndarray/index.cjs +501 -0
  130. package/dist/ndarray/index.cjs.map +1 -0
  131. package/dist/ndarray/index.d.cts +5 -0
  132. package/dist/ndarray/index.d.ts +5 -0
  133. package/dist/ndarray/index.js +4 -0
  134. package/dist/ndarray/index.js.map +1 -0
  135. package/dist/nn/index.cjs +142 -0
  136. package/dist/nn/index.cjs.map +1 -0
  137. package/dist/nn/index.d.cts +6 -0
  138. package/dist/nn/index.d.ts +6 -0
  139. package/dist/nn/index.js +5 -0
  140. package/dist/nn/index.js.map +1 -0
  141. package/dist/optim/index.cjs +77 -0
  142. package/dist/optim/index.cjs.map +1 -0
  143. package/dist/optim/index.d.cts +4 -0
  144. package/dist/optim/index.d.ts +4 -0
  145. package/dist/optim/index.js +4 -0
  146. package/dist/optim/index.js.map +1 -0
  147. package/dist/plot/index.cjs +114 -0
  148. package/dist/plot/index.cjs.map +1 -0
  149. package/dist/plot/index.d.cts +6 -0
  150. package/dist/plot/index.d.ts +6 -0
  151. package/dist/plot/index.js +5 -0
  152. package/dist/plot/index.js.map +1 -0
  153. package/dist/preprocess/index.cjs +82 -0
  154. package/dist/preprocess/index.cjs.map +1 -0
  155. package/dist/preprocess/index.d.cts +4 -0
  156. package/dist/preprocess/index.d.ts +4 -0
  157. package/dist/preprocess/index.js +5 -0
  158. package/dist/preprocess/index.js.map +1 -0
  159. package/dist/random/index.cjs +74 -0
  160. package/dist/random/index.cjs.map +1 -0
  161. package/dist/random/index.d.cts +3 -0
  162. package/dist/random/index.d.ts +3 -0
  163. package/dist/random/index.js +5 -0
  164. package/dist/random/index.js.map +1 -0
  165. package/dist/stats/index.cjs +142 -0
  166. package/dist/stats/index.cjs.map +1 -0
  167. package/dist/stats/index.d.cts +3 -0
  168. package/dist/stats/index.d.ts +3 -0
  169. package/dist/stats/index.js +5 -0
  170. package/dist/stats/index.js.map +1 -0
  171. package/dist/tensor-B96jjJLQ.d.cts +205 -0
  172. package/dist/tensor-B96jjJLQ.d.ts +205 -0
  173. package/package.json +226 -0
@@ -0,0 +1,1911 @@
1
+ import { D as DType, a as Device, S as Shape, b as TypedArray, A as Axis, T as TensorLike } from './tensor-B96jjJLQ.cjs';
2
+ import { G as GradTensor, a as GradTensorOptions, S as SliceRange, d as dropout, g as gather, i as im2col$1, l as logSoftmax$1, n as noGrad, p as parameter, s as slice, b as softmax$1, v as variance$1 } from './index-B_DK4FKY.cjs';
3
+ import { T as Tensor, a as TensorOptions } from './Tensor-BQLk1ltW.cjs';
4
+ import { C as CSRMatrix, a as CSRMatrixInit } from './CSRMatrix-CwGwQRea.cjs';
5
+
6
+ /**
7
+ * Recursive type for nested number arrays.
8
+ *
9
+ * Used to represent multi-dimensional data in JavaScript arrays.
10
+ *
11
+ * @example
12
+ * ```ts
13
+ * const scalar: NestedArray = 5;
14
+ * const vector: NestedArray = [1, 2, 3];
15
+ * const matrix: NestedArray = [[1, 2], [3, 4]];
16
+ * const tensor3d: NestedArray = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]];
17
+ * ```
18
+ */
19
+ type NestedArray = number | NestedArray[];
20
+ type StringNestedArray = string | StringNestedArray[];
21
+ type TensorCreateOptions = {
22
+ readonly dtype?: DType;
23
+ readonly device?: Device;
24
+ };
25
+ /**
26
+ * Create a tensor from nested arrays or TypedArray.
27
+ *
28
+ * This is the primary function for creating tensors. It accepts:
29
+ * - Nested JavaScript arrays (e.g., [[1, 2], [3, 4]])
30
+ * - TypedArrays (e.g., Float32Array)
31
+ * - Scalars (single numbers)
32
+ *
33
+ * Time complexity: O(n) where n is total number of elements.
34
+ * Space complexity: O(n) for data storage.
35
+ *
36
+ * @param data - Input data as nested array or TypedArray
37
+ * @param opts - Creation options (dtype, device)
38
+ * @returns New tensor
39
+ *
40
+ * @throws {TypeError} If data has inconsistent shape (ragged arrays)
41
+ * @throws {DTypeError} If dtype is incompatible with data
42
+ *
43
+ * @example
44
+ * ```ts
45
+ * import { tensor } from 'deepbox/ndarray';
46
+ *
47
+ * // From nested arrays
48
+ * const t1 = tensor([[1, 2, 3], [4, 5, 6]]);
49
+ *
50
+ * // Specify dtype
51
+ * const t2 = tensor([1, 2, 3], { dtype: 'int32' });
52
+ *
53
+ * // From TypedArray
54
+ * const data = new Float32Array([1, 2, 3, 4]);
55
+ * const t3 = tensor(data);
56
+ *
57
+ * // Scalar
58
+ * const t4 = tensor(42);
59
+ * ```
60
+ *
61
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.array.html | NumPy array}
62
+ */
63
+ declare function tensor(data: NestedArray | StringNestedArray | TypedArray, opts?: TensorCreateOptions): Tensor;
64
+ /**
65
+ * All zeros.
66
+ */
67
+ declare function zeros(shape: Shape, opts?: TensorCreateOptions): Tensor;
68
+ /**
69
+ * All ones.
70
+ */
71
+ declare function ones(shape: Shape, opts?: TensorCreateOptions): Tensor;
72
+ /**
73
+ * Fill with a scalar value.
74
+ */
75
+ declare function empty(shape: Shape, opts?: TensorCreateOptions): Tensor;
76
+ declare function full(shape: Shape, value: number | string, opts?: TensorCreateOptions): Tensor;
77
+ /**
78
+ * Range.
79
+ */
80
+ declare function arange(start: number, stop?: number, step?: number, opts?: TensorCreateOptions): Tensor;
81
+ /**
82
+ * Evenly spaced numbers over a specified interval.
83
+ *
84
+ * Returns `num` evenly spaced samples, calculated over the interval [start, stop].
85
+ *
86
+ * **Algorithm**: Linear interpolation
87
+ *
88
+ * **Parameters**:
89
+ * @param start - Starting value of the sequence
90
+ * @param stop - End value of the sequence
91
+ * @param num - Number of samples to generate (default: 50)
92
+ * @param endpoint - If true, stop is the last sample. Otherwise, it is not included (default: true)
93
+ * @param opts - Tensor options (dtype, device)
94
+ *
95
+ * **Returns**: Tensor of shape (num,)
96
+ *
97
+ * @example
98
+ * ```ts
99
+ * import { linspace } from 'deepbox/ndarray';
100
+ *
101
+ * const x = linspace(0, 10, 5);
102
+ * // [0, 2.5, 5, 7.5, 10]
103
+ *
104
+ * const y = linspace(0, 10, 5, false);
105
+ * // [0, 2, 4, 6, 8]
106
+ * ```
107
+ *
108
+ * @throws {RangeError} If num < 0
109
+ *
110
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.linspace.html | NumPy linspace}
111
+ */
112
+ declare function linspace(start: number, stop: number, num?: number, endpoint?: boolean, opts?: TensorCreateOptions): Tensor;
113
+ /**
114
+ * Numbers spaced evenly on a log scale.
115
+ *
116
+ * In linear space, the sequence starts at base^start and ends with base^stop.
117
+ *
118
+ * **Parameters**:
119
+ * @param start - base^start is the starting value
120
+ * @param stop - base^stop is the final value
121
+ * @param num - Number of samples to generate (default: 50)
122
+ * @param base - The base of the log space (default: 10)
123
+ * @param endpoint - If true, stop is the last sample (default: true)
124
+ * @param opts - Tensor options
125
+ *
126
+ * **Returns**: Tensor of shape (num,)
127
+ *
128
+ * @example
129
+ * ```ts
130
+ * import { logspace } from 'deepbox/ndarray';
131
+ *
132
+ * const x = logspace(0, 3, 4);
133
+ * // [1, 10, 100, 1000]
134
+ * ```
135
+ *
136
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.logspace.html | NumPy logspace}
137
+ */
138
+ declare function logspace(start: number, stop: number, num?: number, base?: number, endpoint?: boolean, opts?: TensorCreateOptions): Tensor;
139
+ /**
140
+ * Numbers spaced evenly on a log scale (geometric progression).
141
+ *
142
+ * Each output value is a constant multiple of the previous.
143
+ *
144
+ * **Parameters**:
145
+ * @param start - Starting value of the sequence
146
+ * @param stop - Final value of the sequence
147
+ * @param num - Number of samples (default: 50)
148
+ * @param endpoint - If true, stop is the last sample (default: true)
149
+ * @param opts - Tensor options
150
+ *
151
+ * **Returns**: Tensor of shape (num,)
152
+ *
153
+ * @example
154
+ * ```ts
155
+ * import { geomspace } from 'deepbox/ndarray';
156
+ *
157
+ * const x = geomspace(1, 1000, 4);
158
+ * // [1, 10, 100, 1000]
159
+ * ```
160
+ *
161
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.geomspace.html | NumPy geomspace}
162
+ */
163
+ declare function geomspace(start: number, stop: number, num?: number, endpoint?: boolean, opts?: TensorCreateOptions): Tensor;
164
+ /**
165
+ * Identity matrix.
166
+ *
167
+ * Returns a 2D tensor with ones on the diagonal and zeros elsewhere.
168
+ *
169
+ * **Parameters**:
170
+ * @param n - Number of rows
171
+ * @param m - Number of columns (default: n, making it square)
172
+ * @param k - Index of the diagonal (default: 0, main diagonal)
173
+ * @param opts - Tensor options
174
+ *
175
+ * **Returns**: Tensor of shape (n, m)
176
+ *
177
+ * @example
178
+ * ```ts
179
+ * import { eye } from 'deepbox/ndarray';
180
+ *
181
+ * const I = eye(3);
182
+ * // [[1, 0, 0],
183
+ * // [0, 1, 0],
184
+ * // [0, 0, 1]]
185
+ *
186
+ * const A = eye(3, 4, 1);
187
+ * // [[0, 1, 0, 0],
188
+ * // [0, 0, 1, 0],
189
+ * // [0, 0, 0, 1]]
190
+ * ```
191
+ *
192
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.eye.html | NumPy eye}
193
+ */
194
+ declare function eye(n: number, m?: number, k?: number, opts?: TensorCreateOptions): Tensor;
195
+ /**
196
+ * Return a tensor filled with random samples from a standard normal distribution.
197
+ *
198
+ * @param shape - Shape of the output tensor
199
+ * @param opts - Additional tensor options
200
+ *
201
+ * @example
202
+ * ```ts
203
+ * import { randn } from 'deepbox/ndarray';
204
+ *
205
+ * const x = randn([2, 3]);
206
+ * // Random values from N(0, 1)
207
+ * ```
208
+ *
209
+ * @see {@link https://numpy.org/doc/stable/reference/random/generated/numpy.random.randn.html | NumPy randn}
210
+ */
211
+ declare function randn(shape: Shape, opts?: TensorCreateOptions): Tensor;
212
+
213
+ /**
214
+ * Change shape (view) without copying.
215
+ *
216
+ * Notes:
217
+ * - Currently only supports contiguous tensors.
218
+ * - In the future, reshape should support more view cases using strides.
219
+ */
220
+ declare function reshape(t: Tensor, newShape: Shape): Tensor;
221
+ /**
222
+ * Flatten to 1D.
223
+ */
224
+ declare function flatten(t: Tensor): Tensor;
225
+ /**
226
+ * Transpose tensor dimensions.
227
+ *
228
+ * Reverses or permutes the axes of a tensor.
229
+ *
230
+ * @param t - Input tensor
231
+ * @param axes - Permutation of axes. If undefined, reverses all axes
232
+ * @returns Transposed tensor
233
+ *
234
+ * @example
235
+ * ```ts
236
+ * import { transpose, tensor } from 'deepbox/ndarray';
237
+ *
238
+ * const x = tensor([[1, 2], [3, 4]]); // shape: (2, 2)
239
+ * const y = transpose(x); // shape: (2, 2), values: [[1, 3], [2, 4]]
240
+ *
241
+ * const z = tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]); // shape: (2, 2, 2)
242
+ * const w = transpose(z, [2, 0, 1]); // shape: (2, 2, 2), axes permuted
243
+ * ```
244
+ *
245
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.transpose.html | NumPy transpose}
246
+ */
247
+ declare function transpose(t: Tensor, axes?: readonly number[]): Tensor;
248
+
249
+ /**
250
+ * Compute dot product or matrix multiplication.
251
+ *
252
+ * Supported cases:
253
+ * - Both 1-D (vector, vector): inner product, returns a scalar tensor
254
+ * - Both 2-D (matrix, matrix): standard matrix multiplication (m,k) x (k,n) -> (m,n)
255
+ * - 2-D x 1-D (matrix, vector): matrix-vector product (m,k) x (k,) -> (m,)
256
+ * - Both 3-D: batch matrix multiplication (b,m,k) x (b,k,n) -> (b,m,n)
257
+ *
258
+ * Other combinations (e.g., 1-D x 2-D, mixed dimensionalities above 3-D)
259
+ * are not yet implemented and will throw a ShapeError.
260
+ *
261
+ * @param a - First tensor
262
+ * @param b - Second tensor
263
+ * @returns Dot product result
264
+ */
265
+ declare function dot(a: Tensor, b: Tensor): Tensor;
266
+
267
+ /**
268
+ * Sigmoid activation function.
269
+ *
270
+ * Applies element-wise: sigmoid(x) = 1 / (1 + exp(-x))
271
+ *
272
+ * **Properties**:
273
+ * - Output range: (0, 1)
274
+ * - Smooth gradient
275
+ * - Can suffer from vanishing gradients
276
+ *
277
+ * @example
278
+ * ```ts
279
+ * import { sigmoid, tensor } from 'deepbox/ndarray';
280
+ *
281
+ * const x = tensor([-1, 0, 1]);
282
+ * const result = sigmoid(x); // [0.268..., 0.5, 0.731...]
283
+ * ```
284
+ *
285
+ * @see {@link https://en.wikipedia.org/wiki/Sigmoid_function | Wikipedia: Sigmoid}
286
+ */
287
+ declare function sigmoid(t: Tensor): Tensor;
288
+ /**
289
+ * Rectified Linear Unit activation.
290
+ *
291
+ * Applies element-wise: relu(x) = max(0, x)
292
+ *
293
+ * **Properties**:
294
+ * - Output range: [0, ∞)
295
+ * - Non-linear but simple
296
+ * - Can suffer from dying ReLU problem
297
+ *
298
+ * @example
299
+ * ```ts
300
+ * import { relu, tensor } from 'deepbox/ndarray';
301
+ *
302
+ * const x = tensor([-1, 0, 1]);
303
+ * const result = relu(x); // [0, 0, 1]
304
+ * ```
305
+ *
306
+ * @see {@link https://en.wikipedia.org/wiki/Rectifier_(neural_networks) | Wikipedia: ReLU}
307
+ */
308
+ declare function relu(t: Tensor): Tensor;
309
+ /**
310
+ * Leaky ReLU activation.
311
+ *
312
+ * Applies element-wise: leaky_relu(x) = max(alpha * x, x)
313
+ *
314
+ * **Parameters**:
315
+ * @param t - Input tensor
316
+ * @param alpha - Slope for negative values (default: 0.01)
317
+ *
318
+ * @example
319
+ * ```ts
320
+ * import { leakyRelu, tensor } from 'deepbox/ndarray';
321
+ *
322
+ * const x = tensor([-1, 0, 1]);
323
+ * const result = leakyRelu(x, 0.1); // [-0.1, 0, 1]
324
+ * ```
325
+ */
326
+ declare function leakyRelu(t: Tensor, alpha?: number): Tensor;
327
+ /**
328
+ * Exponential Linear Unit activation.
329
+ *
330
+ * Applies element-wise:
331
+ * - elu(x) = x if x > 0
332
+ * - elu(x) = alpha * (exp(x) - 1) if x <= 0
333
+ *
334
+ * **Parameters**:
335
+ * @param t - Input tensor
336
+ * @param alpha - Scale for negative values (default: 1.0)
337
+ *
338
+ * @example
339
+ * ```ts
340
+ * import { elu, tensor } from 'deepbox/ndarray';
341
+ *
342
+ * const x = tensor([-1, 0, 1]);
343
+ * const result = elu(x);
344
+ * ```
345
+ */
346
+ declare function elu(t: Tensor, alpha?: number): Tensor;
347
+ /**
348
+ * Gaussian Error Linear Unit activation.
349
+ *
350
+ * Applies element-wise: gelu(x) = x * Φ(x)
351
+ * where Φ(x) is the cumulative distribution function of the standard normal distribution.
352
+ *
353
+ * **Algorithm**: Can use tanh approximation or erf function
354
+ *
355
+ * @example
356
+ * ```ts
357
+ * import { gelu, tensor } from 'deepbox/ndarray';
358
+ *
359
+ * const x = tensor([-1, 0, 1]);
360
+ * const result = gelu(x);
361
+ * ```
362
+ *
363
+ * @see Hendrycks & Gimpel (2016) "Gaussian Error Linear Units (GELUs)"
364
+ */
365
+ declare function gelu(t: Tensor): Tensor;
366
+ /**
367
+ * Softmax activation function.
368
+ *
369
+ * Normalizes input to probability distribution over classes.
370
+ * Applies along specified axis: softmax(x)_i = exp(x_i - max(x)) / sum(exp(x - max(x)))
371
+ *
372
+ * Uses numerically stable computation by subtracting the maximum value
373
+ * along the axis before exponentiating.
374
+ *
375
+ * **Parameters**:
376
+ * @param t - Input tensor of any dimensionality
377
+ * @param axis - Axis along which to compute softmax (default: -1, i.e., last axis)
378
+ *
379
+ * **Properties**:
380
+ * - Output sums to 1 along the specified axis
381
+ * - Output values in (0, 1)
382
+ * - Supports tensors of any dimensionality
383
+ *
384
+ * Output dtype:
385
+ * - `float64`
386
+ *
387
+ * @example
388
+ * ```ts
389
+ * import { softmax, tensor } from 'deepbox/ndarray';
390
+ *
391
+ * const x = tensor([[1, 2, 3], [1, 2, 3]]);
392
+ * const result = softmax(x, 1); // Each row sums to 1
393
+ *
394
+ * const x3d = tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]);
395
+ * const result3d = softmax(x3d, -1); // Softmax along last axis
396
+ * ```
397
+ *
398
+ * @see {@link https://en.wikipedia.org/wiki/Softmax_function | Wikipedia: Softmax}
399
+ */
400
+ declare function softmax(t: Tensor, axis?: Axis): Tensor;
401
+ /**
402
+ * Log-Softmax activation function.
403
+ *
404
+ * Computes log(softmax(x)) in a numerically stable way.
405
+ * Uses the identity: log_softmax(x) = x - max(x) - log(sum(exp(x - max(x))))
406
+ *
407
+ * **Parameters**:
408
+ * @param t - Input tensor of any dimensionality
409
+ * @param axis - Axis along which to compute log-softmax (default: -1, i.e., last axis)
410
+ *
411
+ * **Properties**:
412
+ * - More numerically stable than computing log(softmax(x)) directly
413
+ * - Output values are log probabilities (negative values, sum to 0 when exp'd)
414
+ * - Supports tensors of any dimensionality
415
+ *
416
+ * Output dtype:
417
+ * - `float64`
418
+ *
419
+ * **Performance**:
420
+ * - Time complexity: O(n) where n is the tensor size
421
+ * - Space complexity: O(axisSize) for temporary buffer (reused across slices)
422
+ * - More efficient than computing log(softmax(x)) separately
423
+ *
424
+ * @example
425
+ * ```ts
426
+ * import { logSoftmax, tensor } from 'deepbox/ndarray';
427
+ *
428
+ * const x = tensor([[1, 2, 3]]);
429
+ * const result = logSoftmax(x, 1);
430
+ *
431
+ * const x3d = tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]);
432
+ * const result3d = logSoftmax(x3d, -1); // Log-softmax along last axis
433
+ * ```
434
+ */
435
+ declare function logSoftmax(t: Tensor, axis?: Axis): Tensor;
436
+ /**
437
+ * Swish activation function (also known as SiLU).
438
+ *
439
+ * Applies element-wise: swish(x) = x * sigmoid(x)
440
+ *
441
+ * @example
442
+ * ```ts
443
+ * import { swish, tensor } from 'deepbox/ndarray';
444
+ *
445
+ * const x = tensor([-1, 0, 1]);
446
+ * const result = swish(x);
447
+ * ```
448
+ *
449
+ * @see Ramachandran et al. (2017) "Searching for Activation Functions"
450
+ */
451
+ declare function swish(t: Tensor): Tensor;
452
+ /**
453
+ * Mish activation function.
454
+ *
455
+ * Applies element-wise: mish(x) = x * tanh(softplus(x))
456
+ * where softplus(x) = log(1 + exp(x))
457
+ *
458
+ * @example
459
+ * ```ts
460
+ * import { mish, tensor } from 'deepbox/ndarray';
461
+ *
462
+ * const x = tensor([-1, 0, 1]);
463
+ * const result = mish(x);
464
+ * ```
465
+ *
466
+ * @see Misra (2019) "Mish: A Self Regularized Non-Monotonic Activation Function"
467
+ */
468
+ declare function mish(t: Tensor): Tensor;
469
+ /**
470
+ * Softplus activation function.
471
+ *
472
+ * Smooth approximation of ReLU: softplus(x) = log(1 + exp(x))
473
+ *
474
+ * @example
475
+ * ```ts
476
+ * import { softplus, tensor } from 'deepbox/ndarray';
477
+ *
478
+ * const x = tensor([-1, 0, 1]);
479
+ * const result = softplus(x);
480
+ * ```
481
+ */
482
+ declare function softplus(t: Tensor): Tensor;
483
+
484
+ /**
485
+ * Image to Column operation (im2col).
486
+ *
487
+ * Rearranges image blocks into columns.
488
+ *
489
+ * @param input - Input tensor of shape (batch, channels, height, width)
490
+ * @param kernelSize - Size of the kernel [kH, kW]
491
+ * @param stride - Stride [sH, sW]
492
+ * @param padding - Padding [pH, pW]
493
+ * @returns Output tensor of shape (batch, outH * outW, channels * kH * kW)
494
+ */
495
+ declare function im2col(input: Tensor, kernelSize: [number, number], stride: [number, number], padding: [number, number]): Tensor;
496
+ /**
497
+ * Column to Image operation (col2im).
498
+ *
499
+ * Rearranges columns back into image blocks (accumulating values).
500
+ * Used for gradient computation.
501
+ *
502
+ * @param cols - Column tensor of shape (batch, outH * outW, channels * kH * kW)
503
+ * @param inputShape - Shape of the original image (batch, channels, height, width)
504
+ * @param kernelSize - Size of the kernel [kH, kW]
505
+ * @param stride - Stride [sH, sW]
506
+ * @param padding - Padding [pH, pW]
507
+ * @returns Gradient tensor of shape inputShape
508
+ */
509
+ declare function col2im(cols: Tensor, inputShape: Shape, kernelSize: [number, number], stride: [number, number], padding: [number, number]): Tensor;
510
+
511
+ /**
512
+ * Element-wise addition.
513
+ *
514
+ * Supported:
515
+ * - Standard broadcasting semantics (including scalar and dimension-1 broadcast)
516
+ */
517
+ declare function add(a: Tensor, b: Tensor): Tensor;
518
+ /**
519
+ * Element-wise subtraction.
520
+ *
521
+ * Computes a - b element by element.
522
+ *
523
+ * Broadcasting: supports standard numpy-style broadcasting.
524
+ *
525
+ * @param a - First tensor
526
+ * @param b - Second tensor
527
+ * @returns Tensor containing element-wise difference
528
+ *
529
+ * @example
530
+ * ```ts
531
+ * import { sub, tensor } from 'deepbox/ndarray';
532
+ *
533
+ * const a = tensor([5, 6, 7]);
534
+ * const b = tensor([1, 2, 3]);
535
+ * const result = sub(a, b); // [4, 4, 4]
536
+ * ```
537
+ *
538
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.subtract.html | NumPy subtract}
539
+ */
540
+ declare function sub(a: Tensor, b: Tensor): Tensor;
541
+ /**
542
+ * Element-wise multiplication.
543
+ *
544
+ * Computes a * b element by element.
545
+ *
546
+ * Broadcasting: supports standard numpy-style broadcasting.
547
+ *
548
+ * @param a - First tensor
549
+ * @param b - Second tensor
550
+ * @returns Tensor containing element-wise product
551
+ *
552
+ * @example
553
+ * ```ts
554
+ * import { mul, tensor } from 'deepbox/ndarray';
555
+ *
556
+ * const a = tensor([2, 3, 4]);
557
+ * const b = tensor([5, 6, 7]);
558
+ * const result = mul(a, b); // [10, 18, 28]
559
+ * ```
560
+ *
561
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.multiply.html | NumPy multiply}
562
+ */
563
+ declare function mul(a: Tensor, b: Tensor): Tensor;
564
+ /**
565
+ * Element-wise division.
566
+ *
567
+ * Computes a / b element by element.
568
+ *
569
+ * Broadcasting: supports standard numpy-style broadcasting.
570
+ *
571
+ * @param a - Numerator tensor
572
+ * @param b - Denominator tensor
573
+ * @returns Tensor containing element-wise quotient
574
+ *
575
+ * @example
576
+ * ```ts
577
+ * import { div, tensor } from 'deepbox/ndarray';
578
+ *
579
+ * const a = tensor([10, 20, 30]);
580
+ * const b = tensor([2, 4, 5]);
581
+ * const result = div(a, b); // [5, 5, 6]
582
+ * ```
583
+ *
584
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.divide.html | NumPy divide}
585
+ */
586
+ declare function div(a: Tensor, b: Tensor): Tensor;
587
+ /**
588
+ * Add a scalar value to all elements of a tensor.
589
+ *
590
+ * @param t - Input tensor
591
+ * @param s - Scalar value to add
592
+ * @returns New tensor with scalar added to all elements
593
+ *
594
+ * @example
595
+ * ```ts
596
+ * import { addScalar, tensor } from 'deepbox/ndarray';
597
+ *
598
+ * const x = tensor([1, 2, 3]);
599
+ * const result = addScalar(x, 10); // [11, 12, 13]
600
+ * ```
601
+ */
602
+ declare function addScalar(t: Tensor, s: number): Tensor;
603
+ /**
604
+ * Multiply all elements of a tensor by a scalar value.
605
+ *
606
+ * @param t - Input tensor
607
+ * @param s - Scalar multiplier
608
+ * @returns New tensor with all elements multiplied by scalar
609
+ *
610
+ * @example
611
+ * ```ts
612
+ * import { mulScalar, tensor } from 'deepbox/ndarray';
613
+ *
614
+ * const x = tensor([1, 2, 3]);
615
+ * const result = mulScalar(x, 10); // [10, 20, 30]
616
+ * ```
617
+ */
618
+ declare function mulScalar(t: Tensor, s: number): Tensor;
619
+ /**
620
+ * Element-wise floor division.
621
+ *
622
+ * Computes the largest integer less than or equal to the quotient.
623
+ *
624
+ * @example
625
+ * ```ts
626
+ * import { floorDiv, tensor } from 'deepbox/ndarray';
627
+ *
628
+ * const a = tensor([7, 8, 9]);
629
+ * const b = tensor([3, 3, 3]);
630
+ * const result = floorDiv(a, b); // [2, 2, 3]
631
+ * ```
632
+ *
633
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.floor_divide.html | NumPy floor_divide}
634
+ */
635
+ declare function floorDiv(a: Tensor, b: Tensor): Tensor;
636
+ /**
637
+ * Element-wise modulo operation.
638
+ *
639
+ * Returns remainder of division.
640
+ *
641
+ * @example
642
+ * ```ts
643
+ * import { mod, tensor } from 'deepbox/ndarray';
644
+ *
645
+ * const a = tensor([7, 8, 9]);
646
+ * const b = tensor([3, 3, 3]);
647
+ * const result = mod(a, b); // [1, 2, 0]
648
+ * ```
649
+ *
650
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.mod.html | NumPy mod}
651
+ */
652
+ declare function mod(a: Tensor, b: Tensor): Tensor;
653
+ /**
654
+ * Element-wise power.
655
+ *
656
+ * Raises elements of first tensor to powers from second tensor.
657
+ *
658
+ * @example
659
+ * ```ts
660
+ * import { pow, tensor } from 'deepbox/ndarray';
661
+ *
662
+ * const a = tensor([2, 3, 4]);
663
+ * const b = tensor([2, 3, 2]);
664
+ * const result = pow(a, b); // [4, 27, 16]
665
+ * ```
666
+ *
667
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.power.html | NumPy power}
668
+ */
669
+ declare function pow(a: Tensor, b: Tensor): Tensor;
670
+ /**
671
+ * Element-wise negation.
672
+ *
673
+ * Returns -x for each element.
674
+ *
675
+ * @example
676
+ * ```ts
677
+ * import { neg, tensor } from 'deepbox/ndarray';
678
+ *
679
+ * const x = tensor([1, -2, 3]);
680
+ * const result = neg(x); // [-1, 2, -3]
681
+ * ```
682
+ */
683
+ declare function neg(t: Tensor): Tensor;
684
+ /**
685
+ * Element-wise absolute value.
686
+ *
687
+ * Returns |x| for each element.
688
+ *
689
+ * @example
690
+ * ```ts
691
+ * import { abs, tensor } from 'deepbox/ndarray';
692
+ *
693
+ * const x = tensor([-1, 2, -3]);
694
+ * const result = abs(x); // [1, 2, 3]
695
+ * ```
696
+ *
697
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.absolute.html | NumPy absolute}
698
+ */
699
+ declare function abs(t: Tensor): Tensor;
700
+ /**
701
+ * Element-wise sign function.
702
+ *
703
+ * Returns -1, 0, or 1 depending on sign of each element.
704
+ *
705
+ * @example
706
+ * ```ts
707
+ * import { sign, tensor } from 'deepbox/ndarray';
708
+ *
709
+ * const x = tensor([-5, 0, 3]);
710
+ * const result = sign(x); // [-1, 0, 1]
711
+ * ```
712
+ *
713
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.sign.html | NumPy sign}
714
+ */
715
+ declare function sign(t: Tensor): Tensor;
716
+ /**
717
+ * Element-wise reciprocal.
718
+ *
719
+ * Returns 1/x for each element.
720
+ *
721
+ * @example
722
+ * ```ts
723
+ * import { reciprocal, tensor } from 'deepbox/ndarray';
724
+ *
725
+ * const x = tensor([2, 4, 8]);
726
+ * const result = reciprocal(x); // [0.5, 0.25, 0.125]
727
+ * ```
728
+ */
729
+ declare function reciprocal(t: Tensor): Tensor;
730
+ /**
731
+ * Element-wise maximum of two tensors.
732
+ *
733
+ * @example
734
+ * ```ts
735
+ * import { maximum, tensor } from 'deepbox/ndarray';
736
+ *
737
+ * const a = tensor([1, 5, 3]);
738
+ * const b = tensor([4, 2, 6]);
739
+ * const result = maximum(a, b); // [4, 5, 6]
740
+ * ```
741
+ *
742
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.maximum.html | NumPy maximum}
743
+ */
744
+ declare function maximum(a: Tensor, b: Tensor): Tensor;
745
+ /**
746
+ * Element-wise minimum of two tensors.
747
+ *
748
+ * @example
749
+ * ```ts
750
+ * import { minimum, tensor } from 'deepbox/ndarray';
751
+ *
752
+ * const a = tensor([1, 5, 3]);
753
+ * const b = tensor([4, 2, 6]);
754
+ * const result = minimum(a, b); // [1, 2, 3]
755
+ * ```
756
+ *
757
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.minimum.html | NumPy minimum}
758
+ */
759
+ declare function minimum(a: Tensor, b: Tensor): Tensor;
760
+ /**
761
+ * Clip (limit) values in tensor.
762
+ *
763
+ * Given an interval, values outside the interval are clipped to interval edges.
764
+ *
765
+ * **Parameters**:
766
+ * @param t - Input tensor
767
+ * @param min - Minimum value. If undefined, no lower clipping
768
+ * @param max - Maximum value. If undefined, no upper clipping
769
+ *
770
+ * @example
771
+ * ```ts
772
+ * import { clip, tensor } from 'deepbox/ndarray';
773
+ *
774
+ * const x = tensor([1, 2, 3, 4, 5]);
775
+ * const result = clip(x, 2, 4); // [2, 2, 3, 4, 4]
776
+ * ```
777
+ *
778
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.clip.html | NumPy clip}
779
+ */
780
+ declare function clip(t: Tensor, min?: number, max?: number): Tensor;
781
+
782
+ /**
783
+ * Element-wise equality.
784
+ *
785
+ * Output dtype:
786
+ * - `bool`
787
+ */
788
+ declare function equal(a: Tensor, b: Tensor): Tensor;
789
+ /**
790
+ * Element-wise inequality (not equal) comparison.
791
+ *
792
+ * Returns a boolean tensor where each element is true (1) if the
793
+ * corresponding elements in a and b are not equal, false (0) otherwise.
794
+ *
795
+ * @param a - First input tensor
796
+ * @param b - Second input tensor
797
+ * @returns Boolean tensor of same shape as inputs
798
+ */
799
+ declare function notEqual(a: Tensor, b: Tensor): Tensor;
800
+ /**
801
+ * Element-wise greater than comparison (a > b).
802
+ *
803
+ * Returns a boolean tensor where each element is true (1) if the
804
+ * corresponding element in a is greater than the element in b.
805
+ *
806
+ * @param a - First input tensor
807
+ * @param b - Second input tensor
808
+ * @returns Boolean tensor with comparison results
809
+ */
810
+ declare function greater(a: Tensor, b: Tensor): Tensor;
811
+ /**
812
+ * Element-wise greater than or equal comparison (a >= b).
813
+ *
814
+ * @param a - First input tensor
815
+ * @param b - Second input tensor
816
+ * @returns Boolean tensor with comparison results
817
+ */
818
+ declare function greaterEqual(a: Tensor, b: Tensor): Tensor;
819
+ /**
820
+ * Element-wise less than comparison (a < b).
821
+ *
822
+ * @param a - First input tensor
823
+ * @param b - Second input tensor
824
+ * @returns Boolean tensor with comparison results
825
+ */
826
+ declare function less(a: Tensor, b: Tensor): Tensor;
827
+ /**
828
+ * Element-wise less than or equal comparison (a <= b).
829
+ *
830
+ * @param a - First input tensor
831
+ * @param b - Second input tensor
832
+ * @returns Boolean tensor with comparison results
833
+ */
834
+ declare function lessEqual(a: Tensor, b: Tensor): Tensor;
835
+ /**
836
+ * Element-wise test for approximate equality within tolerance.
837
+ *
838
+ * Returns true where: |a - b| <= (atol + rtol * |b|)
839
+ *
840
+ * Useful for floating-point comparisons where exact equality is unreliable.
841
+ *
842
+ * @param a - First input tensor
843
+ * @param b - Second input tensor
844
+ * @param rtol - Relative tolerance (default: 1e-5)
845
+ * @param atol - Absolute tolerance (default: 1e-8)
846
+ * @returns Boolean tensor with closeness test results
847
+ */
848
+ declare function isclose(a: Tensor, b: Tensor, rtol?: number, atol?: number): Tensor;
849
+ /**
850
+ * Test whether all corresponding elements are close within tolerance.
851
+ *
852
+ * Returns a single boolean (not a tensor) indicating if ALL elements pass
853
+ * the closeness test.
854
+ *
855
+ * @param a - First input tensor
856
+ * @param b - Second input tensor
857
+ * @param rtol - Relative tolerance (default: 1e-5)
858
+ * @param atol - Absolute tolerance (default: 1e-8)
859
+ * @returns True if all elements are close, false otherwise
860
+ */
861
+ declare function allclose(a: Tensor, b: Tensor, rtol?: number, atol?: number): boolean;
862
+ /**
863
+ * Test for exact array equality (shape, dtype, and all values).
864
+ *
865
+ * Returns a single boolean indicating if tensors are identical.
866
+ *
867
+ * @param a - First input tensor
868
+ * @param b - Second input tensor
869
+ * @returns True if arrays are exactly equal, false otherwise
870
+ */
871
+ declare function arrayEqual(a: Tensor, b: Tensor): boolean;
872
+ /**
873
+ * Element-wise test for NaN (Not a Number) values.
874
+ *
875
+ * Returns true (1) for NaN elements, false (0) otherwise.
876
+ *
877
+ * @param t - Input tensor
878
+ * @returns Boolean tensor with NaN test results
879
+ */
880
+ declare function isnan(t: Tensor): Tensor;
881
+ /**
882
+ * Element-wise test for infinity (+Inf or -Inf).
883
+ *
884
+ * Returns true (1) for infinite elements, false (0) otherwise.
885
+ * Note: NaN is NOT considered infinite.
886
+ *
887
+ * @param t - Input tensor
888
+ * @returns Boolean tensor with infinity test results
889
+ */
890
+ declare function isinf(t: Tensor): Tensor;
891
+ /**
892
+ * Element-wise test for finite values (not NaN, not Inf).
893
+ *
894
+ * Returns true (1) for finite elements, false (0) for NaN or Inf.
895
+ *
896
+ * @param t - Input tensor
897
+ * @returns Boolean tensor with finite test results
898
+ */
899
+ declare function isfinite(t: Tensor): Tensor;
900
+
901
+ /**
902
+ * Element-wise logical AND.
903
+ *
904
+ * Returns true (1) where both inputs are non-zero (truthy), false (0) otherwise.
905
+ *
906
+ * @param a - First input tensor
907
+ * @param b - Second input tensor
908
+ * @returns Boolean tensor with AND results
909
+ *
910
+ * @example
911
+ * ```ts
912
+ * const a = tensor([1, 0, 1]);
913
+ * const b = tensor([1, 1, 0]);
914
+ * logicalAnd(a, b); // tensor([1, 0, 0])
915
+ * ```
916
+ *
917
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.logical_and.html | NumPy logical_and}
918
+ */
919
+ declare function logicalAnd(a: Tensor, b: Tensor): Tensor;
920
+ /**
921
+ * Element-wise logical OR.
922
+ *
923
+ * Returns true (1) where at least one input is non-zero (truthy).
924
+ *
925
+ * @param a - First input tensor
926
+ * @param b - Second input tensor
927
+ * @returns Boolean tensor with OR results
928
+ *
929
+ * @example
930
+ * ```ts
931
+ * const a = tensor([1, 0, 0]);
932
+ * const b = tensor([1, 1, 0]);
933
+ * logicalOr(a, b); // tensor([1, 1, 0])
934
+ * ```
935
+ *
936
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.logical_or.html | NumPy logical_or}
937
+ */
938
+ declare function logicalOr(a: Tensor, b: Tensor): Tensor;
939
+ /**
940
+ * Element-wise logical XOR (exclusive OR).
941
+ *
942
+ * Returns true (1) where exactly one input is non-zero (true but not both).
943
+ *
944
+ * @param a - First input tensor
945
+ * @param b - Second input tensor
946
+ * @returns Boolean tensor with XOR results
947
+ *
948
+ * @example
949
+ * ```ts
950
+ * const a = tensor([1, 0, 1, 0]);
951
+ * const b = tensor([1, 1, 0, 0]);
952
+ * logicalXor(a, b); // tensor([0, 1, 1, 0])
953
+ * ```
954
+ *
955
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.logical_xor.html | NumPy logical_xor}
956
+ */
957
+ declare function logicalXor(a: Tensor, b: Tensor): Tensor;
958
+ /**
959
+ * Element-wise logical NOT.
960
+ *
961
+ * Returns true (1) for zero elements, false (0) for non-zero elements.
962
+ * Inverts the truthiness of each element.
963
+ *
964
+ * @param t - Input tensor
965
+ * @returns Boolean tensor with NOT results
966
+ *
967
+ * @example
968
+ * ```ts
969
+ * const t = tensor([1, 0, 5, 0]);
970
+ * logicalNot(t); // tensor([0, 1, 0, 1])
971
+ * ```
972
+ *
973
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.logical_not.html | NumPy logical_not}
974
+ */
975
+ declare function logicalNot(t: Tensor): Tensor;
976
+
977
+ /**
978
+ * Tensor manipulation operations.
979
+ *
980
+ * This module provides functions for manipulating tensor structure and content:
981
+ * - concatenate: Join tensors along an axis
982
+ * - stack: Stack tensors along a new axis
983
+ * - split: Split tensor into multiple sub-tensors
984
+ * - tile: Repeat tensor along axes
985
+ * - repeat: Repeat elements along an axis
986
+ * - pad: Add padding to tensor
987
+ * - flip: Reverse tensor along axes
988
+ *
989
+ * All operations maintain type safety and proper error handling.
990
+ */
991
+
992
+ /**
993
+ * Concatenate tensors along an existing axis.
994
+ *
995
+ * All tensors must have the same shape except in the concatenation dimension.
996
+ * The output dtype is determined by the first tensor.
997
+ *
998
+ * **Complexity**: O(n) where n is total number of elements
999
+ *
1000
+ * @param tensors - Array of tensors to concatenate
1001
+ * @param axis - Axis along which to concatenate (default: 0)
1002
+ * @returns Concatenated tensor
1003
+ *
1004
+ * @example
1005
+ * ```ts
1006
+ * const a = tensor([[1, 2], [3, 4]]);
1007
+ * const b = tensor([[5, 6]]);
1008
+ * const c = concatenate([a, b], 0); // [[1, 2], [3, 4], [5, 6]]
1009
+ * ```
1010
+ *
1011
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.concatenate.html | NumPy concatenate}
1012
+ */
1013
+ declare function concatenate(tensors: Tensor[], axis?: Axis): Tensor;
1014
+ /**
1015
+ * Stack tensors along a new axis.
1016
+ *
1017
+ * All tensors must have exactly the same shape.
1018
+ * Creates a new dimension at the specified axis.
1019
+ *
1020
+ * **Complexity**: O(n) where n is total number of elements
1021
+ *
1022
+ * @param tensors - Array of tensors to stack
1023
+ * @param axis - Axis along which to stack (default: 0)
1024
+ * @returns Stacked tensor
1025
+ *
1026
+ * @example
1027
+ * ```ts
1028
+ * const a = tensor([1, 2, 3]);
1029
+ * const b = tensor([4, 5, 6]);
1030
+ * const c = stack([a, b], 0); // [[1, 2, 3], [4, 5, 6]]
1031
+ * const d = stack([a, b], 1); // [[1, 4], [2, 5], [3, 6]]
1032
+ * ```
1033
+ *
1034
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.stack.html | NumPy stack}
1035
+ */
1036
+ declare function stack(tensors: Tensor[], axis?: Axis): Tensor;
1037
+ /**
1038
+ * Split tensor into multiple sub-tensors along an axis.
1039
+ *
1040
+ * If indices_or_sections is an integer, the tensor is split into that many
1041
+ * equal parts (axis dimension must be divisible).
1042
+ * If it's an array, it specifies the indices where to split.
1043
+ *
1044
+ * **Complexity**: O(n) where n is total number of elements
1045
+ *
1046
+ * @param t - Input tensor
1047
+ * @param indices_or_sections - Number of sections or array of split indices
1048
+ * @param axis - Axis along which to split (default: 0)
1049
+ * @returns Array of sub-tensors
1050
+ *
1051
+ * @example
1052
+ * ```ts
1053
+ * const t = tensor([1, 2, 3, 4, 5, 6]);
1054
+ * const parts = split(t, 3); // [tensor([1, 2]), tensor([3, 4]), tensor([5, 6])]
1055
+ * const parts2 = split(t, [2, 4]); // [tensor([1, 2]), tensor([3, 4]), tensor([5, 6])]
1056
+ * ```
1057
+ *
1058
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.split.html | NumPy split}
1059
+ */
1060
+ declare function split(t: Tensor, indices_or_sections: number | number[], axis?: Axis): Tensor[];
1061
+ /**
1062
+ * Repeat tensor along axes by tiling.
1063
+ *
1064
+ * Constructs a tensor by repeating the input tensor the specified number
1065
+ * of times along each axis.
1066
+ *
1067
+ * **Complexity**: O(n * product(reps)) where n is input size
1068
+ *
1069
+ * @param t - Input tensor
1070
+ * @param reps - Number of repetitions along each axis
1071
+ * @returns Tiled tensor
1072
+ *
1073
+ * @example
1074
+ * ```ts
1075
+ * const t = tensor([[1, 2], [3, 4]]);
1076
+ * const tiled = tile(t, [2, 3]);
1077
+ * // [[1, 2, 1, 2, 1, 2],
1078
+ * // [3, 4, 3, 4, 3, 4],
1079
+ * // [1, 2, 1, 2, 1, 2],
1080
+ * // [3, 4, 3, 4, 3, 4]]
1081
+ * ```
1082
+ *
1083
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.tile.html | NumPy tile}
1084
+ */
1085
+ declare function tile(t: Tensor, reps: number[]): Tensor;
1086
+ /**
1087
+ * Repeat elements of a tensor along an axis.
1088
+ *
1089
+ * Each element is repeated the specified number of times.
1090
+ *
1091
+ * **Complexity**: O(n * repeats) where n is input size
1092
+ *
1093
+ * @param t - Input tensor
1094
+ * @param repeats - Number of times to repeat each element
1095
+ * @param axis - Axis along which to repeat (default: flatten first)
1096
+ * @returns Tensor with repeated elements
1097
+ *
1098
+ * @example
1099
+ * ```ts
1100
+ * const t = tensor([1, 2, 3]);
1101
+ * const r = repeat(t, 2); // [1, 1, 2, 2, 3, 3]
1102
+ * ```
1103
+ *
1104
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.repeat.html | NumPy repeat}
1105
+ */
1106
+ declare function repeat(t: Tensor, repeats: number, axis?: Axis): Tensor;
1107
+
1108
+ /**
1109
+ * Element-wise exponential.
1110
+ *
1111
+ * Output dtype:
1112
+ * - Always `float64` for now.
1113
+ */
1114
+ declare function exp(t: Tensor): Tensor;
1115
+ /**
1116
+ * Element-wise natural logarithm.
1117
+ *
1118
+ * @param t - Input tensor
1119
+ * @returns Tensor with log(x) for each element
1120
+ *
1121
+ * @example
1122
+ * ```ts
1123
+ * import { log, tensor } from 'deepbox/ndarray';
1124
+ *
1125
+ * const x = tensor([1, 2.71828, 7.389]);
1126
+ * const result = log(x); // [0, 1, 2]
1127
+ * ```
1128
+ */
1129
+ declare function log(t: Tensor): Tensor;
1130
+ /**
1131
+ * Element-wise square root.
1132
+ *
1133
+ * @param t - Input tensor
1134
+ * @returns Tensor with sqrt(x) for each element
1135
+ *
1136
+ * @example
1137
+ * ```ts
1138
+ * import { sqrt, tensor } from 'deepbox/ndarray';
1139
+ *
1140
+ * const x = tensor([4, 9, 16]);
1141
+ * const result = sqrt(x); // [2, 3, 4]
1142
+ * ```
1143
+ */
1144
+ declare function sqrt(t: Tensor): Tensor;
1145
+ /**
1146
+ * Element-wise square.
1147
+ *
1148
+ * @example
1149
+ * ```ts
1150
+ * import { square, tensor } from 'deepbox/ndarray';
1151
+ *
1152
+ * const x = tensor([1, 2, 3]);
1153
+ * const result = square(x); // [1, 4, 9]
1154
+ * ```
1155
+ */
1156
+ declare function square(t: Tensor): Tensor;
1157
+ /**
1158
+ * Element-wise reciprocal square root.
1159
+ *
1160
+ * Returns 1/sqrt(x) for each element.
1161
+ *
1162
+ * @example
1163
+ * ```ts
1164
+ * import { rsqrt, tensor } from 'deepbox/ndarray';
1165
+ *
1166
+ * const x = tensor([4, 9, 16]);
1167
+ * const result = rsqrt(x); // [0.5, 0.333..., 0.25]
1168
+ * ```
1169
+ */
1170
+ declare function rsqrt(t: Tensor): Tensor;
1171
+ /**
1172
+ * Element-wise cube root.
1173
+ *
1174
+ * @example
1175
+ * ```ts
1176
+ * import { cbrt, tensor } from 'deepbox/ndarray';
1177
+ *
1178
+ * const x = tensor([8, 27, 64]);
1179
+ * const result = cbrt(x); // [2, 3, 4]
1180
+ * ```
1181
+ */
1182
+ declare function cbrt(t: Tensor): Tensor;
1183
+ /**
1184
+ * Element-wise exp(x) - 1.
1185
+ *
1186
+ * More accurate than exp(x) - 1 for small x.
1187
+ *
1188
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.expm1.html | NumPy expm1}
1189
+ */
1190
+ declare function expm1(t: Tensor): Tensor;
1191
+ /**
1192
+ * Element-wise base-2 exponential.
1193
+ *
1194
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.exp2.html | NumPy exp2}
1195
+ */
1196
+ declare function exp2(t: Tensor): Tensor;
1197
+ /**
1198
+ * Element-wise log(1 + x).
1199
+ *
1200
+ * More accurate than log(1 + x) for small x.
1201
+ *
1202
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.log1p.html | NumPy log1p}
1203
+ */
1204
+ declare function log1p(t: Tensor): Tensor;
1205
+ /**
1206
+ * Element-wise base-2 logarithm.
1207
+ *
1208
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.log2.html | NumPy log2}
1209
+ */
1210
+ declare function log2(t: Tensor): Tensor;
1211
+ /**
1212
+ * Element-wise base-10 logarithm.
1213
+ *
1214
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.log10.html | NumPy log10}
1215
+ */
1216
+ declare function log10(t: Tensor): Tensor;
1217
+ /**
1218
+ * Element-wise floor (round down).
1219
+ *
1220
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.floor.html | NumPy floor}
1221
+ */
1222
+ declare function floor(t: Tensor): Tensor;
1223
+ /**
1224
+ * Element-wise ceil (round up).
1225
+ *
1226
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.ceil.html | NumPy ceil}
1227
+ */
1228
+ declare function ceil(t: Tensor): Tensor;
1229
+ /**
1230
+ * Element-wise round to nearest integer.
1231
+ *
1232
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.round.html | NumPy round}
1233
+ */
1234
+ declare function round(t: Tensor): Tensor;
1235
+ /**
1236
+ * Element-wise truncate (round toward zero).
1237
+ *
1238
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.trunc.html | NumPy trunc}
1239
+ */
1240
+ declare function trunc(t: Tensor): Tensor;
1241
+
1242
+ type NumericDType = Exclude<DType, "string">;
1243
+ /**
1244
+ * Generate a tensor of Bernoulli samples scaled by a constant.
1245
+ *
1246
+ * Each element is independently drawn: it equals `scale` with probability
1247
+ * `(1 - p)` and `0` with probability `p`. This is the mask used by inverted
1248
+ * dropout: non-dropped elements are pre-scaled by `1 / (1 - p)`.
1249
+ *
1250
+ * @param shape - Output tensor shape
1251
+ * @param p - Probability of an element being zero (drop probability)
1252
+ * @param scale - Value assigned to kept elements (typically `1 / (1 - p)`)
1253
+ * @param dtype - Numeric dtype for the output tensor
1254
+ * @param device - Target device
1255
+ * @returns Tensor of the given shape filled with `0` or `scale`
1256
+ *
1257
+ * @internal
1258
+ */
1259
+ declare function dropoutMask(shape: Shape, p: number, scale: number, dtype: NumericDType, device: Device): Tensor;
1260
+
1261
+ /**
1262
+ * Sum reduction.
1263
+ *
1264
+ * Supported (initial foundation):
1265
+ * - `axis` omitted: sum all elements to a scalar tensor
1266
+ * - `axis` provided: supports any axis
1267
+ *
1268
+ * Output dtype:
1269
+ * - `int64` stays `int64`
1270
+ * - `float32` and `float64` promote to `float64`
1271
+ * - `int32`/`uint8`/`bool` promote to `int32`
1272
+ * - `string` is unsupported
1273
+ */
1274
+ declare function sum(t: Tensor, axis?: Axis, keepdims?: boolean): Tensor;
1275
+ /**
1276
+ * Compute the arithmetic mean along the specified axis.
1277
+ *
1278
+ * Returns NaN for empty reductions (when the number of elements along the
1279
+ * reduction axis is 0), matching NumPy's behavior of returning NaN with a
1280
+ * "mean of empty slice" warning.
1281
+ *
1282
+ * @param t - Input tensor
1283
+ * @param axis - Axis along which to compute mean. If undefined, compute over all elements
1284
+ * @param keepdims - If true, keep reduced dimensions as size 1
1285
+ * @returns Tensor containing mean values. NaN for empty reductions.
1286
+ *
1287
+ * @example
1288
+ * ```ts
1289
+ * import { mean, tensor } from 'deepbox/ndarray';
1290
+ *
1291
+ * const x = tensor([[1, 2], [3, 4]]);
1292
+ * const m1 = mean(x); // 2.5
1293
+ * const m2 = mean(x, 0); // [2, 3]
1294
+ * const m3 = mean(x, 1); // [1.5, 3.5]
1295
+ * ```
1296
+ *
1297
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.mean.html | NumPy mean}
1298
+ */
1299
+ declare function mean(t: Tensor, axis?: Axis, keepdims?: boolean): Tensor;
1300
+ /**
1301
+ * Product of array elements along axis.
1302
+ *
1303
+ * Multiplies all elements together. Returns 1 for empty arrays.
1304
+ * Supports full reduction and reduction over one or more axes.
1305
+ *
1306
+ * **Complexity**: O(n) where n is the number of elements
1307
+ *
1308
+ * Output dtype:
1309
+ * - Same as input dtype (int32, float64, int64)
1310
+ * - Potential for overflow with large products
1311
+ *
1312
+ * @param t - Input tensor
1313
+ * @param axis - Axis or axes along which to compute the product
1314
+ * @param keepdims - If true, keep reduced dimensions as size 1
1315
+ * @returns Scalar tensor containing the product
1316
+ *
1317
+ * @example
1318
+ * ```ts
1319
+ * import { tensor, prod } from 'deepbox/ndarray';
1320
+ *
1321
+ * const t = tensor([1, 2, 3, 4]);
1322
+ * prod(t); // tensor(24) - 1*2*3*4
1323
+ *
1324
+ * const t2 = tensor([2.5, 4.0]);
1325
+ * prod(t2); // tensor(10.0)
1326
+ * ```
1327
+ *
1328
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.prod.html | NumPy prod}
1329
+ */
1330
+ declare function prod(t: Tensor, axis?: Axis | Axis[], keepdims?: boolean): Tensor;
1331
+ /**
1332
+ * Standard deviation along axis.
1333
+ *
1334
+ * Computes the standard deviation, a measure of the spread of a distribution.
1335
+ * Uses the formula: sqrt(variance)
1336
+ *
1337
+ * **Complexity**: O(n) where n is the number of elements (requires 2 passes)
1338
+ *
1339
+ * @param t - Input tensor
1340
+ * @param axis - Axis or axes along which to compute std. If undefined, compute over all elements
1341
+ * @param keepdims - If true, keep reduced dimensions as size 1
1342
+ * @param ddof - Delta degrees of freedom (default 0 for population, 1 for sample)
1343
+ * @returns Tensor containing the standard deviation values
1344
+ *
1345
+ * @example
1346
+ * ```ts
1347
+ * import { tensor, std } from 'deepbox/ndarray';
1348
+ *
1349
+ * const t = tensor([1, 2, 3, 4, 5]);
1350
+ * std(t); // tensor(1.414...) - population std
1351
+ * std(t, undefined, false, 1); // sample std
1352
+ *
1353
+ * const t2 = tensor([[1, 2], [3, 4]]);
1354
+ * std(t2, 0); // [1, 1] - std along rows
1355
+ * std(t2, 1); // [0.5, 0.5] - std along columns
1356
+ * ```
1357
+ *
1358
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.std.html | NumPy std}
1359
+ */
1360
+ declare function std(t: Tensor, axis?: Axis, keepdims?: boolean, ddof?: number): Tensor;
1361
+ /**
1362
+ * Variance along axis.
1363
+ *
1364
+ * Computes the variance, the average of squared deviations from the mean.
1365
+ * Formula: Σ(x - mean)² / (N - ddof)
1366
+ *
1367
+ * **Complexity**: O(n) where n is the number of elements (requires 2 passes: one for mean, one for variance)
1368
+ *
1369
+ * @param t - Input tensor
1370
+ * @param axis - Axis along which to compute variance. If undefined, compute over all elements
1371
+ * @param keepdims - If true, keep reduced dimensions as size 1
1372
+ * @param ddof - Delta degrees of freedom (default 0 for population, 1 for sample)
1373
+ * @returns Tensor containing the variance values
1374
+ *
1375
+ * @example
1376
+ * ```ts
1377
+ * import { tensor, variance } from 'deepbox/ndarray';
1378
+ *
1379
+ * const t = tensor([1, 2, 3, 4, 5]);
1380
+ * variance(t); // tensor(2.0) - population variance
1381
+ * variance(t, undefined, false, 1); // tensor(2.5) - sample variance
1382
+ *
1383
+ * const t2 = tensor([[1, 2], [3, 4]]);
1384
+ * variance(t2, 0); // [1, 1] - variance along rows
1385
+ * variance(t2, 1); // [0.25, 0.25] - variance along columns
1386
+ * ```
1387
+ *
1388
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.var.html | NumPy var}
1389
+ */
1390
+ declare function variance(t: Tensor, axis?: Axis, keepdims?: boolean, ddof?: number): Tensor;
1391
+ /**
1392
+ * Minimum value along axis.
1393
+ *
1394
+ * Finds the smallest element in the tensor, optionally along one or more axes.
1395
+ *
1396
+ * **Complexity**: O(n) where n is the number of elements
1397
+ *
1398
+ * @param t - Input tensor
1399
+ * @param axis - Axis or axes along which to compute the minimum
1400
+ * @param keepdims - If true, keep reduced dimensions as size 1
1401
+ * @returns Scalar tensor containing the minimum value
1402
+ *
1403
+ * @example
1404
+ * ```ts
1405
+ * import { tensor, min } from 'deepbox/ndarray';
1406
+ *
1407
+ * const t = tensor([3, 1, 4, 1, 5]);
1408
+ * min(t); // tensor(1)
1409
+ * ```
1410
+ *
1411
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.amin.html | NumPy amin}
1412
+ */
1413
+ declare function min(t: Tensor, axis?: Axis | Axis[], keepdims?: boolean): Tensor;
1414
+ /**
1415
+ * Maximum value along axis.
1416
+ *
1417
+ * Finds the largest element in the tensor, optionally along one or more axes.
1418
+ *
1419
+ * **Complexity**: O(n) where n is the number of elements
1420
+ *
1421
+ * @param t - Input tensor
1422
+ * @param axis - Axis or axes along which to compute the maximum
1423
+ * @param keepdims - If true, keep reduced dimensions as size 1
1424
+ * @returns Scalar tensor containing the maximum value
1425
+ *
1426
+ * @example
1427
+ * ```ts
1428
+ * import { tensor, max } from 'deepbox/ndarray';
1429
+ *
1430
+ * const t = tensor([3, 1, 4, 1, 5]);
1431
+ * max(t); // tensor(5)
1432
+ * ```
1433
+ *
1434
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.amax.html | NumPy amax}
1435
+ */
1436
+ declare function max(t: Tensor, axis?: Axis | Axis[], keepdims?: boolean): Tensor;
1437
+ /**
1438
+ * Median value along axis.
1439
+ *
1440
+ * Computes the median (middle value) of the data.
1441
+ * For even-sized arrays, returns the average of the two middle values.
1442
+ *
1443
+ * **Complexity**: O(n log n) due to sorting per output element
1444
+ *
1445
+ * @param t - Input tensor
1446
+ * @param axis - Axis along which to compute median. If undefined, compute over all elements
1447
+ * @param keepdims - If true, keep reduced dimensions as size 1
1448
+ * @returns Tensor containing the median values
1449
+ *
1450
+ * @example
1451
+ * ```ts
1452
+ * import { tensor, median } from 'deepbox/ndarray';
1453
+ *
1454
+ * const t = tensor([1, 3, 5, 7, 9]);
1455
+ * median(t); // tensor(5) - middle value
1456
+ *
1457
+ * const t2 = tensor([1, 2, 3, 4]);
1458
+ * median(t2); // tensor(2.5) - average of 2 and 3
1459
+ *
1460
+ * const t3 = tensor([[1, 3], [2, 4]]);
1461
+ * median(t3, 0); // [1.5, 3.5] - median along rows
1462
+ * median(t3, 1); // [2, 3] - median along columns
1463
+ * ```
1464
+ *
1465
+ * Performance:
1466
+ * - This implementation copies and sorts values (O(n log n) time, O(n) memory).
1467
+ *
1468
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.median.html | NumPy median}
1469
+ */
1470
+ declare function median(t: Tensor, axis?: Axis, keepdims?: boolean): Tensor;
1471
+ /**
1472
+ * Cumulative sum along axis.
1473
+ *
1474
+ * Returns an array of the same shape where each element is the sum of all
1475
+ * previous elements (inclusive) along the specified axis.
1476
+ *
1477
+ * **Complexity**: O(n) where n is the number of elements
1478
+ *
1479
+ * @param t - Input tensor
1480
+ * @param axis - Axis along which to compute cumulative sum. If undefined, operates on the flattened array.
1481
+ * @returns Tensor of same shape with cumulative sums
1482
+ *
1483
+ * @example
1484
+ * ```ts
1485
+ * import { tensor, cumsum } from 'deepbox/ndarray';
1486
+ *
1487
+ * const t = tensor([1, 2, 3, 4]);
1488
+ * cumsum(t); // tensor([1, 3, 6, 10])
1489
+ * ```
1490
+ *
1491
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.cumsum.html | NumPy cumsum}
1492
+ */
1493
+ declare function cumsum(t: Tensor, axis?: Axis): Tensor;
1494
+ /**
1495
+ * Cumulative product along axis.
1496
+ *
1497
+ * Returns an array of the same shape where each element is the product of all
1498
+ * previous elements (inclusive) along the specified axis.
1499
+ *
1500
+ * **Complexity**: O(n) where n is the number of elements
1501
+ *
1502
+ * @param t - Input tensor
1503
+ * @param axis - Axis along which to compute cumulative product. If undefined, operates on the flattened array.
1504
+ * @returns Tensor of same shape with cumulative products
1505
+ *
1506
+ * @example
1507
+ * ```ts
1508
+ * import { tensor, cumprod } from 'deepbox/ndarray';
1509
+ *
1510
+ * const t = tensor([1, 2, 3, 4]);
1511
+ * cumprod(t); // tensor([1, 2, 6, 24])
1512
+ * ```
1513
+ *
1514
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.cumprod.html | NumPy cumprod}
1515
+ */
1516
+ declare function cumprod(t: Tensor, axis?: Axis): Tensor;
1517
+ /**
1518
+ * Calculate differences along axis.
1519
+ *
1520
+ * Computes the n-th discrete difference along the given axis.
1521
+ * The first difference is given by out[i] = a[i+1] - a[i] along the flattened array.
1522
+ * Computes differences along the specified axis (default: last axis).
1523
+ *
1524
+ * **Complexity**: O(n) where n is the number of elements
1525
+ *
1526
+ * @param t - Input tensor
1527
+ * @param n - Number of times to take difference (default 1)
1528
+ * @param axis - Axis along which to compute differences (default: last axis)
1529
+ * @returns Tensor with differences (size reduced by n along the given axis)
1530
+ *
1531
+ * @example
1532
+ * ```ts
1533
+ * import { tensor, diff } from 'deepbox/ndarray';
1534
+ *
1535
+ * const t = tensor([1, 3, 6, 10]);
1536
+ * diff(t); // tensor([2, 3, 4]) - differences between consecutive elements
1537
+ * ```
1538
+ *
1539
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.diff.html | NumPy diff}
1540
+ */
1541
+ declare function diff(t: Tensor, n?: number, axis?: number): Tensor;
1542
+ /**
1543
+ * Test whether any array element evaluates to True (non-zero).
1544
+ *
1545
+ * Returns true if at least one element is non-zero, false if all are zero.
1546
+ * Supports reduction over one or more axes.
1547
+ *
1548
+ * **Complexity**: O(n) - checks each element until finding non-zero or reaching end
1549
+ *
1550
+ * @param t - Input tensor
1551
+ * @param axis - Axis or axes along which to compute the reduction
1552
+ * @param keepdims - If true, keep reduced dimensions as size 1
1553
+ * @returns Scalar tensor (true=1, false=0)
1554
+ *
1555
+ * @example
1556
+ * ```ts
1557
+ * const t = tensor([0, 0, 1, 0]);
1558
+ * any(t); // tensor(1) - at least one non-zero
1559
+ *
1560
+ * const t2 = tensor([0, 0, 0]);
1561
+ * any(t2); // tensor(0) - all zeros
1562
+ * ```
1563
+ *
1564
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.any.html | NumPy any}
1565
+ */
1566
+ declare function any(t: Tensor, axis?: number | number[], keepdims?: boolean): Tensor;
1567
+ /**
1568
+ * Test whether all array elements evaluate to True (non-zero).
1569
+ *
1570
+ * Returns true only if all elements are non-zero, false if any are zero.
1571
+ * Supports reduction over one or more axes.
1572
+ *
1573
+ * **Complexity**: O(n) - checks each element until finding zero or reaching end
1574
+ *
1575
+ * @param t - Input tensor
1576
+ * @param axis - Axis or axes along which to compute the reduction
1577
+ * @param keepdims - If true, keep reduced dimensions as size 1
1578
+ * @returns Scalar tensor (true=1, false=0)
1579
+ *
1580
+ * @example
1581
+ * ```ts
1582
+ * const t = tensor([1, 2, 3]);
1583
+ * all(t); // tensor(1) - all non-zero
1584
+ *
1585
+ * const t2 = tensor([1, 0, 3]);
1586
+ * all(t2); // tensor(0) - has a zero
1587
+ * ```
1588
+ *
1589
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.all.html | NumPy all}
1590
+ */
1591
+ declare function all(t: Tensor, axis?: number | number[], keepdims?: boolean): Tensor;
1592
+
1593
+ /**
1594
+ * Sort values along a given axis.
1595
+ *
1596
+ * Supports tensors of any dimensionality. Default axis is -1 (last).
1597
+ *
1598
+ * Performance:
1599
+ * - O(N log N) where N is the total number of elements.
1600
+ */
1601
+ declare function sort(t: Tensor, axis?: Axis | undefined, descending?: boolean): Tensor;
1602
+ /**
1603
+ * Return indices that would sort the tensor along a given axis.
1604
+ *
1605
+ * Supports tensors of any dimensionality. Default axis is -1 (last).
1606
+ *
1607
+ * Performance:
1608
+ * - O(N log N) where N is the total number of elements.
1609
+ */
1610
+ declare function argsort(t: Tensor, axis?: Axis | undefined, descending?: boolean): Tensor;
1611
+
1612
+ /**
1613
+ * Element-wise sine.
1614
+ *
1615
+ * Output dtype:
1616
+ * - Always `float64` for now.
1617
+ *
1618
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.sin.html | NumPy sin}
1619
+ */
1620
+ declare function sin(t: Tensor): Tensor;
1621
+ /**
1622
+ * Element-wise cosine.
1623
+ *
1624
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.cos.html | NumPy cos}
1625
+ */
1626
+ declare function cos(t: Tensor): Tensor;
1627
+ /**
1628
+ * Element-wise tangent.
1629
+ *
1630
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.tan.html | NumPy tan}
1631
+ */
1632
+ declare function tan(t: Tensor): Tensor;
1633
+ /**
1634
+ * Element-wise inverse sine.
1635
+ *
1636
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.arcsin.html | NumPy arcsin}
1637
+ */
1638
+ declare function asin(t: Tensor): Tensor;
1639
+ /**
1640
+ * Element-wise inverse cosine.
1641
+ *
1642
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.arccos.html | NumPy arccos}
1643
+ */
1644
+ declare function acos(t: Tensor): Tensor;
1645
+ /**
1646
+ * Element-wise inverse tangent.
1647
+ *
1648
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.arctan.html | NumPy arctan}
1649
+ */
1650
+ declare function atan(t: Tensor): Tensor;
1651
+ /**
1652
+ * Element-wise arctangent of y/x with correct quadrant.
1653
+ *
1654
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.arctan2.html | NumPy arctan2}
1655
+ */
1656
+ declare function atan2(y: Tensor, x: Tensor): Tensor;
1657
+ /**
1658
+ * Element-wise hyperbolic sine.
1659
+ *
1660
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.sinh.html | NumPy sinh}
1661
+ */
1662
+ declare function sinh(t: Tensor): Tensor;
1663
+ /**
1664
+ * Element-wise hyperbolic cosine.
1665
+ *
1666
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.cosh.html | NumPy cosh}
1667
+ */
1668
+ declare function cosh(t: Tensor): Tensor;
1669
+ /**
1670
+ * Element-wise hyperbolic tangent.
1671
+ *
1672
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.tanh.html | NumPy tanh}
1673
+ */
1674
+ declare function tanh(t: Tensor): Tensor;
1675
+ /**
1676
+ * Element-wise inverse hyperbolic sine.
1677
+ *
1678
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.arcsinh.html | NumPy arcsinh}
1679
+ */
1680
+ declare function asinh(t: Tensor): Tensor;
1681
+ /**
1682
+ * Element-wise inverse hyperbolic cosine.
1683
+ *
1684
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.arccosh.html | NumPy arccosh}
1685
+ */
1686
+ declare function acosh(t: Tensor): Tensor;
1687
+ /**
1688
+ * Element-wise inverse hyperbolic tangent.
1689
+ *
1690
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.arctanh.html | NumPy arctanh}
1691
+ */
1692
+ declare function atanh(t: Tensor): Tensor;
1693
+
1694
+ /**
1695
+ * Remove single-dimensional entries from the shape.
1696
+ *
1697
+ * Returns a view of the tensor with all dimensions of size 1 removed.
1698
+ * If axis is specified, only removes dimensions at those positions.
1699
+ *
1700
+ * **Complexity**: O(ndim) - only manipulates shape metadata, no data copy
1701
+ *
1702
+ * **Parameters**:
1703
+ * @param t - Input tensor
1704
+ * @param axis - Axis to squeeze. If undefined, squeeze all axes of size 1
1705
+ *
1706
+ * **Returns**: Tensor with squeezed dimensions (view, no copy)
1707
+ *
1708
+ * @example
1709
+ * ```ts
1710
+ * import { squeeze, tensor } from 'deepbox/ndarray';
1711
+ *
1712
+ * const x = tensor([[[1], [2], [3]]]);
1713
+ // shape: (1, 3, 1)
1714
+ * const y = squeeze(x); // shape: (3,)
1715
+ * const z = squeeze(x, 2); // shape: (1, 3)
1716
+ * ```
1717
+ *
1718
+ * @throws {Error} If axis is specified and dimension is not 1
1719
+ *
1720
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.squeeze.html | NumPy squeeze}
1721
+ */
1722
+ declare function squeeze(t: Tensor, axis?: Axis | Axis[]): Tensor;
1723
+ /**
1724
+ * Expand the shape by inserting a new axis.
1725
+ *
1726
+ * Returns a view of the tensor with a new dimension of size 1 inserted
1727
+ * at the specified position.
1728
+ *
1729
+ * **Complexity**: O(ndim) - only manipulates shape metadata, no data copy
1730
+ *
1731
+ * **Parameters**:
1732
+ * @param t - Input tensor
1733
+ * @param axis - Position where new axis is placed (can be negative)
1734
+ *
1735
+ * **Returns**: Tensor with expanded dimensions (view, no copy)
1736
+ *
1737
+ * @example
1738
+ * ```ts
1739
+ * import { unsqueeze, tensor } from 'deepbox/ndarray';
1740
+ *
1741
+ * const x = tensor([1, 2, 3]); // shape: (3,)
1742
+ * const y = unsqueeze(x, 0); // shape: (1, 3)
1743
+ * const z = unsqueeze(x, 1); // shape: (3, 1)
1744
+ * const w = unsqueeze(x, -1); // shape: (3, 1)
1745
+ * ```
1746
+ *
1747
+ * @see {@link https://numpy.org/doc/stable/reference/generated/numpy.expand_dims.html | NumPy expand_dims}
1748
+ */
1749
+ declare function unsqueeze(t: Tensor, axis: number): Tensor;
1750
+ /**
1751
+ * Alias for unsqueeze.
1752
+ */
1753
+ declare const expandDims: typeof unsqueeze;
1754
+
1755
+ /**
1756
+ * Union type representing either a Tensor or GradTensor.
1757
+ *
1758
+ * This type enables functions to accept both regular tensors and
1759
+ * differentiable tensors interchangeably, improving API flexibility.
1760
+ *
1761
+ * Use this type when a function should work with either tensor type:
1762
+ * - `Tensor`: For pure numerical operations without gradient tracking
1763
+ * - `GradTensor`: For operations that need automatic differentiation
1764
+ *
1765
+ * @example
1766
+ * ```ts
1767
+ * import type { AnyTensor } from 'deepbox/ndarray';
1768
+ *
1769
+ * function processData(input: AnyTensor): void {
1770
+ * console.log(input.shape); // Works with both Tensor and GradTensor
1771
+ * console.log(input.dtype);
1772
+ * }
1773
+ * ```
1774
+ */
1775
+ type AnyTensor = Tensor | GradTensor;
1776
+
1777
+ type index_AnyTensor = AnyTensor;
1778
+ declare const index_CSRMatrix: typeof CSRMatrix;
1779
+ declare const index_CSRMatrixInit: typeof CSRMatrixInit;
1780
+ declare const index_DType: typeof DType;
1781
+ declare const index_Device: typeof Device;
1782
+ declare const index_GradTensor: typeof GradTensor;
1783
+ declare const index_GradTensorOptions: typeof GradTensorOptions;
1784
+ type index_NestedArray = NestedArray;
1785
+ declare const index_Shape: typeof Shape;
1786
+ declare const index_SliceRange: typeof SliceRange;
1787
+ declare const index_Tensor: typeof Tensor;
1788
+ type index_TensorCreateOptions = TensorCreateOptions;
1789
+ declare const index_TensorLike: typeof TensorLike;
1790
+ declare const index_TensorOptions: typeof TensorOptions;
1791
+ declare const index_TypedArray: typeof TypedArray;
1792
+ declare const index_abs: typeof abs;
1793
+ declare const index_acos: typeof acos;
1794
+ declare const index_acosh: typeof acosh;
1795
+ declare const index_add: typeof add;
1796
+ declare const index_addScalar: typeof addScalar;
1797
+ declare const index_all: typeof all;
1798
+ declare const index_allclose: typeof allclose;
1799
+ declare const index_any: typeof any;
1800
+ declare const index_arange: typeof arange;
1801
+ declare const index_argsort: typeof argsort;
1802
+ declare const index_arrayEqual: typeof arrayEqual;
1803
+ declare const index_asin: typeof asin;
1804
+ declare const index_asinh: typeof asinh;
1805
+ declare const index_atan: typeof atan;
1806
+ declare const index_atan2: typeof atan2;
1807
+ declare const index_atanh: typeof atanh;
1808
+ declare const index_cbrt: typeof cbrt;
1809
+ declare const index_ceil: typeof ceil;
1810
+ declare const index_clip: typeof clip;
1811
+ declare const index_col2im: typeof col2im;
1812
+ declare const index_concatenate: typeof concatenate;
1813
+ declare const index_cos: typeof cos;
1814
+ declare const index_cosh: typeof cosh;
1815
+ declare const index_cumprod: typeof cumprod;
1816
+ declare const index_cumsum: typeof cumsum;
1817
+ declare const index_diff: typeof diff;
1818
+ declare const index_div: typeof div;
1819
+ declare const index_dot: typeof dot;
1820
+ declare const index_dropoutMask: typeof dropoutMask;
1821
+ declare const index_elu: typeof elu;
1822
+ declare const index_empty: typeof empty;
1823
+ declare const index_equal: typeof equal;
1824
+ declare const index_exp: typeof exp;
1825
+ declare const index_exp2: typeof exp2;
1826
+ declare const index_expandDims: typeof expandDims;
1827
+ declare const index_expm1: typeof expm1;
1828
+ declare const index_eye: typeof eye;
1829
+ declare const index_flatten: typeof flatten;
1830
+ declare const index_floor: typeof floor;
1831
+ declare const index_floorDiv: typeof floorDiv;
1832
+ declare const index_full: typeof full;
1833
+ declare const index_gather: typeof gather;
1834
+ declare const index_gelu: typeof gelu;
1835
+ declare const index_geomspace: typeof geomspace;
1836
+ declare const index_greater: typeof greater;
1837
+ declare const index_greaterEqual: typeof greaterEqual;
1838
+ declare const index_im2col: typeof im2col;
1839
+ declare const index_isclose: typeof isclose;
1840
+ declare const index_isfinite: typeof isfinite;
1841
+ declare const index_isinf: typeof isinf;
1842
+ declare const index_isnan: typeof isnan;
1843
+ declare const index_leakyRelu: typeof leakyRelu;
1844
+ declare const index_less: typeof less;
1845
+ declare const index_lessEqual: typeof lessEqual;
1846
+ declare const index_linspace: typeof linspace;
1847
+ declare const index_log: typeof log;
1848
+ declare const index_log10: typeof log10;
1849
+ declare const index_log1p: typeof log1p;
1850
+ declare const index_log2: typeof log2;
1851
+ declare const index_logSoftmax: typeof logSoftmax;
1852
+ declare const index_logicalAnd: typeof logicalAnd;
1853
+ declare const index_logicalNot: typeof logicalNot;
1854
+ declare const index_logicalOr: typeof logicalOr;
1855
+ declare const index_logicalXor: typeof logicalXor;
1856
+ declare const index_logspace: typeof logspace;
1857
+ declare const index_max: typeof max;
1858
+ declare const index_maximum: typeof maximum;
1859
+ declare const index_mean: typeof mean;
1860
+ declare const index_median: typeof median;
1861
+ declare const index_min: typeof min;
1862
+ declare const index_minimum: typeof minimum;
1863
+ declare const index_mish: typeof mish;
1864
+ declare const index_mod: typeof mod;
1865
+ declare const index_mul: typeof mul;
1866
+ declare const index_mulScalar: typeof mulScalar;
1867
+ declare const index_neg: typeof neg;
1868
+ declare const index_noGrad: typeof noGrad;
1869
+ declare const index_notEqual: typeof notEqual;
1870
+ declare const index_ones: typeof ones;
1871
+ declare const index_parameter: typeof parameter;
1872
+ declare const index_pow: typeof pow;
1873
+ declare const index_prod: typeof prod;
1874
+ declare const index_randn: typeof randn;
1875
+ declare const index_reciprocal: typeof reciprocal;
1876
+ declare const index_relu: typeof relu;
1877
+ declare const index_repeat: typeof repeat;
1878
+ declare const index_reshape: typeof reshape;
1879
+ declare const index_round: typeof round;
1880
+ declare const index_rsqrt: typeof rsqrt;
1881
+ declare const index_sigmoid: typeof sigmoid;
1882
+ declare const index_sign: typeof sign;
1883
+ declare const index_sin: typeof sin;
1884
+ declare const index_sinh: typeof sinh;
1885
+ declare const index_slice: typeof slice;
1886
+ declare const index_softmax: typeof softmax;
1887
+ declare const index_softplus: typeof softplus;
1888
+ declare const index_sort: typeof sort;
1889
+ declare const index_split: typeof split;
1890
+ declare const index_sqrt: typeof sqrt;
1891
+ declare const index_square: typeof square;
1892
+ declare const index_squeeze: typeof squeeze;
1893
+ declare const index_stack: typeof stack;
1894
+ declare const index_std: typeof std;
1895
+ declare const index_sub: typeof sub;
1896
+ declare const index_sum: typeof sum;
1897
+ declare const index_swish: typeof swish;
1898
+ declare const index_tan: typeof tan;
1899
+ declare const index_tanh: typeof tanh;
1900
+ declare const index_tensor: typeof tensor;
1901
+ declare const index_tile: typeof tile;
1902
+ declare const index_transpose: typeof transpose;
1903
+ declare const index_trunc: typeof trunc;
1904
+ declare const index_unsqueeze: typeof unsqueeze;
1905
+ declare const index_variance: typeof variance;
1906
+ declare const index_zeros: typeof zeros;
1907
+ declare namespace index {
1908
+ export { type index_AnyTensor as AnyTensor, index_CSRMatrix as CSRMatrix, index_CSRMatrixInit as CSRMatrixInit, index_DType as DType, index_Device as Device, index_GradTensor as GradTensor, index_GradTensorOptions as GradTensorOptions, type index_NestedArray as NestedArray, index_Shape as Shape, index_SliceRange as SliceRange, index_Tensor as Tensor, type index_TensorCreateOptions as TensorCreateOptions, index_TensorLike as TensorLike, index_TensorOptions as TensorOptions, index_TypedArray as TypedArray, index_abs as abs, index_acos as acos, index_acosh as acosh, index_add as add, index_addScalar as addScalar, index_all as all, index_allclose as allclose, index_any as any, index_arange as arange, index_argsort as argsort, index_arrayEqual as arrayEqual, index_asin as asin, index_asinh as asinh, index_atan as atan, index_atan2 as atan2, index_atanh as atanh, index_cbrt as cbrt, index_ceil as ceil, index_clip as clip, index_col2im as col2im, index_concatenate as concatenate, index_cos as cos, index_cosh as cosh, index_cumprod as cumprod, index_cumsum as cumsum, index_diff as diff, index_div as div, index_dot as dot, dropout as dropoutGrad, index_dropoutMask as dropoutMask, index_elu as elu, index_empty as empty, index_equal as equal, index_exp as exp, index_exp2 as exp2, index_expandDims as expandDims, index_expm1 as expm1, index_eye as eye, index_flatten as flatten, index_floor as floor, index_floorDiv as floorDiv, index_full as full, index_gather as gather, index_gelu as gelu, index_geomspace as geomspace, index_greater as greater, index_greaterEqual as greaterEqual, index_im2col as im2col, im2col$1 as im2colGrad, index_isclose as isclose, index_isfinite as isfinite, index_isinf as isinf, index_isnan as isnan, index_leakyRelu as leakyRelu, index_less as less, index_lessEqual as lessEqual, index_linspace as linspace, index_log as log, index_log10 as log10, index_log1p as log1p, index_log2 as log2, index_logSoftmax as logSoftmax, logSoftmax$1 as logSoftmaxGrad, index_logicalAnd as logicalAnd, index_logicalNot as logicalNot, index_logicalOr as logicalOr, index_logicalXor as logicalXor, index_logspace as logspace, index_max as max, index_maximum as maximum, index_mean as mean, index_median as median, index_min as min, index_minimum as minimum, index_mish as mish, index_mod as mod, index_mul as mul, index_mulScalar as mulScalar, index_neg as neg, index_noGrad as noGrad, index_notEqual as notEqual, index_ones as ones, index_parameter as parameter, index_pow as pow, index_prod as prod, index_randn as randn, index_reciprocal as reciprocal, index_relu as relu, index_repeat as repeat, index_reshape as reshape, index_round as round, index_rsqrt as rsqrt, index_sigmoid as sigmoid, index_sign as sign, index_sin as sin, index_sinh as sinh, index_slice as slice, index_softmax as softmax, softmax$1 as softmaxGrad, index_softplus as softplus, index_sort as sort, index_split as split, index_sqrt as sqrt, index_square as square, index_squeeze as squeeze, index_stack as stack, index_std as std, index_sub as sub, index_sum as sum, index_swish as swish, index_tan as tan, index_tanh as tanh, index_tensor as tensor, index_tile as tile, index_transpose as transpose, index_trunc as trunc, index_unsqueeze as unsqueeze, index_variance as variance, variance$1 as varianceGrad, index_zeros as zeros };
1909
+ }
1910
+
1911
+ export { lessEqual as $, type AnyTensor as A, atan as B, atan2 as C, atanh as D, cbrt as E, ceil as F, clip as G, concatenate as H, cos as I, cosh as J, cumprod as K, cumsum as L, diff as M, div as N, equal as O, exp as P, exp2 as Q, expm1 as R, floor as S, floorDiv as T, greater as U, greaterEqual as V, isclose as W, isfinite as X, isinf as Y, isnan as Z, less as _, logSoftmax as a, log as a0, log1p as a1, log2 as a2, log10 as a3, logicalAnd as a4, logicalNot as a5, logicalOr as a6, logicalXor as a7, max as a8, maximum as a9, tan as aA, tanh as aB, tile as aC, trunc as aD, variance as aE, dropoutMask as aF, type NestedArray as aG, type TensorCreateOptions as aH, arange as aI, empty as aJ, eye as aK, flatten as aL, full as aM, geomspace as aN, linspace as aO, logspace as aP, ones as aQ, randn as aR, reshape as aS, tensor as aT, transpose as aU, zeros as aV, expandDims as aW, squeeze as aX, unsqueeze as aY, mean as aa, median as ab, min as ac, minimum as ad, mod as ae, mul as af, mulScalar as ag, neg as ah, notEqual as ai, pow as aj, prod as ak, reciprocal as al, repeat as am, round as an, rsqrt as ao, sign as ap, sin as aq, sinh as ar, sort as as, split as at, sqrt as au, square as av, stack as aw, std as ax, sub as ay, sum as az, softmax as b, softplus as c, dot as d, elu as e, swish as f, gelu as g, col2im as h, index as i, im2col as j, abs as k, leakyRelu as l, mish as m, acos as n, acosh as o, add as p, addScalar as q, relu as r, sigmoid as s, all as t, allclose as u, any as v, argsort as w, arrayEqual as x, asin as y, asinh as z };