deepbox 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (173) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +344 -0
  3. package/dist/CSRMatrix-CwGwQRea.d.cts +219 -0
  4. package/dist/CSRMatrix-KzNt6QpS.d.ts +219 -0
  5. package/dist/Tensor-BQLk1ltW.d.cts +147 -0
  6. package/dist/Tensor-g8mUClel.d.ts +147 -0
  7. package/dist/chunk-4S73VUBD.js +677 -0
  8. package/dist/chunk-4S73VUBD.js.map +1 -0
  9. package/dist/chunk-5R4S63PF.js +2925 -0
  10. package/dist/chunk-5R4S63PF.js.map +1 -0
  11. package/dist/chunk-6AE5FKKQ.cjs +9264 -0
  12. package/dist/chunk-6AE5FKKQ.cjs.map +1 -0
  13. package/dist/chunk-AD436M45.js +3854 -0
  14. package/dist/chunk-AD436M45.js.map +1 -0
  15. package/dist/chunk-ALS7ETWZ.cjs +4263 -0
  16. package/dist/chunk-ALS7ETWZ.cjs.map +1 -0
  17. package/dist/chunk-AU7XHGKJ.js +2092 -0
  18. package/dist/chunk-AU7XHGKJ.js.map +1 -0
  19. package/dist/chunk-B5TNKUEY.js +1481 -0
  20. package/dist/chunk-B5TNKUEY.js.map +1 -0
  21. package/dist/chunk-BCR7G3A6.js +9136 -0
  22. package/dist/chunk-BCR7G3A6.js.map +1 -0
  23. package/dist/chunk-C4PKXY74.cjs +1917 -0
  24. package/dist/chunk-C4PKXY74.cjs.map +1 -0
  25. package/dist/chunk-DWZY6PIP.cjs +6400 -0
  26. package/dist/chunk-DWZY6PIP.cjs.map +1 -0
  27. package/dist/chunk-E3EU5FZO.cjs +2113 -0
  28. package/dist/chunk-E3EU5FZO.cjs.map +1 -0
  29. package/dist/chunk-F3JWBINJ.js +1054 -0
  30. package/dist/chunk-F3JWBINJ.js.map +1 -0
  31. package/dist/chunk-FJYLIGJX.js +1940 -0
  32. package/dist/chunk-FJYLIGJX.js.map +1 -0
  33. package/dist/chunk-JSCDE774.cjs +729 -0
  34. package/dist/chunk-JSCDE774.cjs.map +1 -0
  35. package/dist/chunk-LWECRCW2.cjs +2412 -0
  36. package/dist/chunk-LWECRCW2.cjs.map +1 -0
  37. package/dist/chunk-MLBMYKCG.js +6379 -0
  38. package/dist/chunk-MLBMYKCG.js.map +1 -0
  39. package/dist/chunk-OX6QXFMV.cjs +3874 -0
  40. package/dist/chunk-OX6QXFMV.cjs.map +1 -0
  41. package/dist/chunk-PHV2DKRS.cjs +1072 -0
  42. package/dist/chunk-PHV2DKRS.cjs.map +1 -0
  43. package/dist/chunk-PL7TAYKI.js +4056 -0
  44. package/dist/chunk-PL7TAYKI.js.map +1 -0
  45. package/dist/chunk-PR647I7R.js +1898 -0
  46. package/dist/chunk-PR647I7R.js.map +1 -0
  47. package/dist/chunk-QERHVCHC.cjs +2960 -0
  48. package/dist/chunk-QERHVCHC.cjs.map +1 -0
  49. package/dist/chunk-XEG44RF6.cjs +1514 -0
  50. package/dist/chunk-XEG44RF6.cjs.map +1 -0
  51. package/dist/chunk-XMWVME2W.js +2377 -0
  52. package/dist/chunk-XMWVME2W.js.map +1 -0
  53. package/dist/chunk-ZB75FESB.cjs +1979 -0
  54. package/dist/chunk-ZB75FESB.cjs.map +1 -0
  55. package/dist/chunk-ZLW62TJG.cjs +4061 -0
  56. package/dist/chunk-ZLW62TJG.cjs.map +1 -0
  57. package/dist/chunk-ZXKBDFP3.js +4235 -0
  58. package/dist/chunk-ZXKBDFP3.js.map +1 -0
  59. package/dist/core/index.cjs +204 -0
  60. package/dist/core/index.cjs.map +1 -0
  61. package/dist/core/index.d.cts +2 -0
  62. package/dist/core/index.d.ts +2 -0
  63. package/dist/core/index.js +3 -0
  64. package/dist/core/index.js.map +1 -0
  65. package/dist/dataframe/index.cjs +22 -0
  66. package/dist/dataframe/index.cjs.map +1 -0
  67. package/dist/dataframe/index.d.cts +3 -0
  68. package/dist/dataframe/index.d.ts +3 -0
  69. package/dist/dataframe/index.js +5 -0
  70. package/dist/dataframe/index.js.map +1 -0
  71. package/dist/datasets/index.cjs +134 -0
  72. package/dist/datasets/index.cjs.map +1 -0
  73. package/dist/datasets/index.d.cts +3 -0
  74. package/dist/datasets/index.d.ts +3 -0
  75. package/dist/datasets/index.js +5 -0
  76. package/dist/datasets/index.js.map +1 -0
  77. package/dist/index-74AB8Cyh.d.cts +1126 -0
  78. package/dist/index-9oQx1HgV.d.cts +1180 -0
  79. package/dist/index-BJY2SI4i.d.ts +483 -0
  80. package/dist/index-BWGhrDlr.d.ts +733 -0
  81. package/dist/index-B_DK4FKY.d.cts +242 -0
  82. package/dist/index-BbA2Gxfl.d.ts +456 -0
  83. package/dist/index-BgHYAoSS.d.cts +837 -0
  84. package/dist/index-BndMbqsM.d.ts +1439 -0
  85. package/dist/index-C1mfVYoo.d.ts +2517 -0
  86. package/dist/index-CCvlwAmL.d.cts +809 -0
  87. package/dist/index-CDw5CnOU.d.ts +785 -0
  88. package/dist/index-Cn3SdB0O.d.ts +1126 -0
  89. package/dist/index-CrqLlS-a.d.ts +776 -0
  90. package/dist/index-D61yaSMY.d.cts +483 -0
  91. package/dist/index-D9Loo1_A.d.cts +2517 -0
  92. package/dist/index-DIT_OO9C.d.cts +785 -0
  93. package/dist/index-DIp_RrRt.d.ts +242 -0
  94. package/dist/index-DbultU6X.d.cts +1427 -0
  95. package/dist/index-DmEg_LCm.d.cts +776 -0
  96. package/dist/index-DoPWVxPo.d.cts +1439 -0
  97. package/dist/index-DuCxd-8d.d.ts +837 -0
  98. package/dist/index-Dx42TZaY.d.ts +809 -0
  99. package/dist/index-DyZ4QQf5.d.cts +456 -0
  100. package/dist/index-GFAVyOWO.d.ts +1427 -0
  101. package/dist/index-WHQLn0e8.d.cts +733 -0
  102. package/dist/index-ZtI1Iy4L.d.ts +1180 -0
  103. package/dist/index-eJgeni9c.d.cts +1911 -0
  104. package/dist/index-tk4lSYod.d.ts +1911 -0
  105. package/dist/index.cjs +72 -0
  106. package/dist/index.cjs.map +1 -0
  107. package/dist/index.d.cts +17 -0
  108. package/dist/index.d.ts +17 -0
  109. package/dist/index.js +15 -0
  110. package/dist/index.js.map +1 -0
  111. package/dist/linalg/index.cjs +86 -0
  112. package/dist/linalg/index.cjs.map +1 -0
  113. package/dist/linalg/index.d.cts +3 -0
  114. package/dist/linalg/index.d.ts +3 -0
  115. package/dist/linalg/index.js +5 -0
  116. package/dist/linalg/index.js.map +1 -0
  117. package/dist/metrics/index.cjs +158 -0
  118. package/dist/metrics/index.cjs.map +1 -0
  119. package/dist/metrics/index.d.cts +3 -0
  120. package/dist/metrics/index.d.ts +3 -0
  121. package/dist/metrics/index.js +5 -0
  122. package/dist/metrics/index.js.map +1 -0
  123. package/dist/ml/index.cjs +87 -0
  124. package/dist/ml/index.cjs.map +1 -0
  125. package/dist/ml/index.d.cts +3 -0
  126. package/dist/ml/index.d.ts +3 -0
  127. package/dist/ml/index.js +6 -0
  128. package/dist/ml/index.js.map +1 -0
  129. package/dist/ndarray/index.cjs +501 -0
  130. package/dist/ndarray/index.cjs.map +1 -0
  131. package/dist/ndarray/index.d.cts +5 -0
  132. package/dist/ndarray/index.d.ts +5 -0
  133. package/dist/ndarray/index.js +4 -0
  134. package/dist/ndarray/index.js.map +1 -0
  135. package/dist/nn/index.cjs +142 -0
  136. package/dist/nn/index.cjs.map +1 -0
  137. package/dist/nn/index.d.cts +6 -0
  138. package/dist/nn/index.d.ts +6 -0
  139. package/dist/nn/index.js +5 -0
  140. package/dist/nn/index.js.map +1 -0
  141. package/dist/optim/index.cjs +77 -0
  142. package/dist/optim/index.cjs.map +1 -0
  143. package/dist/optim/index.d.cts +4 -0
  144. package/dist/optim/index.d.ts +4 -0
  145. package/dist/optim/index.js +4 -0
  146. package/dist/optim/index.js.map +1 -0
  147. package/dist/plot/index.cjs +114 -0
  148. package/dist/plot/index.cjs.map +1 -0
  149. package/dist/plot/index.d.cts +6 -0
  150. package/dist/plot/index.d.ts +6 -0
  151. package/dist/plot/index.js +5 -0
  152. package/dist/plot/index.js.map +1 -0
  153. package/dist/preprocess/index.cjs +82 -0
  154. package/dist/preprocess/index.cjs.map +1 -0
  155. package/dist/preprocess/index.d.cts +4 -0
  156. package/dist/preprocess/index.d.ts +4 -0
  157. package/dist/preprocess/index.js +5 -0
  158. package/dist/preprocess/index.js.map +1 -0
  159. package/dist/random/index.cjs +74 -0
  160. package/dist/random/index.cjs.map +1 -0
  161. package/dist/random/index.d.cts +3 -0
  162. package/dist/random/index.d.ts +3 -0
  163. package/dist/random/index.js +5 -0
  164. package/dist/random/index.js.map +1 -0
  165. package/dist/stats/index.cjs +142 -0
  166. package/dist/stats/index.cjs.map +1 -0
  167. package/dist/stats/index.d.cts +3 -0
  168. package/dist/stats/index.d.ts +3 -0
  169. package/dist/stats/index.js +5 -0
  170. package/dist/stats/index.js.map +1 -0
  171. package/dist/tensor-B96jjJLQ.d.cts +205 -0
  172. package/dist/tensor-B96jjJLQ.d.ts +205 -0
  173. package/package.json +226 -0
@@ -0,0 +1,242 @@
1
+ import { A as Axis, D as DType, S as Shape, b as TypedArray } from './tensor-B96jjJLQ.cjs';
2
+ import { T as Tensor } from './Tensor-BQLk1ltW.cjs';
3
+
4
+ type SliceRange = number | {
5
+ readonly start?: number;
6
+ readonly end?: number;
7
+ readonly step?: number;
8
+ };
9
+ /**
10
+ * Slice a tensor.
11
+ *
12
+ * Examples:
13
+ * - `slice(t, { start: 0, end: 2 })` on a 1D tensor keeps the first 2 elements.
14
+ * - `slice(t, 0, { start: 1 })` on a 2D tensor selects row 0 and columns from 1.
15
+ */
16
+ declare function slice(t: Tensor, ...ranges: SliceRange[]): Tensor;
17
+ /**
18
+ * Gather values along an axis specified by indices.
19
+ *
20
+ * @param t - Input tensor
21
+ * @param indices - Indices to gather
22
+ * @param axis - Axis along which to gather
23
+ * @returns Gathered tensor
24
+ *
25
+ * @example
26
+ * ```ts
27
+ * const t = tensor([[1, 2], [3, 4], [5, 6]]);
28
+ * const indices = tensor([0, 2]);
29
+ * const result = gather(t, indices, 0); // [[1, 2], [5, 6]]
30
+ * ```
31
+ */
32
+ declare function gather(t: Tensor, indices: Tensor, axis: Axis): Tensor;
33
+
34
+ /**
35
+ * Autograd module for automatic differentiation.
36
+ *
37
+ * Implements reverse-mode automatic differentiation (backpropagation)
38
+ * for `Tensor` operations.
39
+ *
40
+ * ## Gradient state
41
+ *
42
+ * A **module-level singleton** `gradEnabled` controls whether new
43
+ * operations record their backward graph. Use {@link noGrad} to
44
+ * temporarily disable gradient tracking (e.g. during inference).
45
+ * `noGrad` only accepts **synchronous** callbacks — passing an async
46
+ * function will throw, because the flag would be restored before the
47
+ * async work completes.
48
+ *
49
+ * ## max / min backward — tie-breaking
50
+ *
51
+ * When multiple elements share the maximum (or minimum) value along the
52
+ * reduced axis, **all** tied positions receive gradient. This means the
53
+ * gradient is *not* divided among ties — each tied element gets the full
54
+ * upstream gradient. This matches PyTorch's behaviour and avoids the
55
+ * cost of counting ties, but callers should be aware that the
56
+ * "effective" gradient magnitude is multiplied by the tie count.
57
+ */
58
+
59
+ type GradTensorOptions = {
60
+ readonly requiresGrad?: boolean;
61
+ readonly dtype?: Exclude<DType, "string">;
62
+ };
63
+ type BackwardFn = () => void;
64
+ /**
65
+ * Tensor wrapper that records a computation graph for reverse-mode autodiff.
66
+ */
67
+ declare class GradTensor {
68
+ readonly tensor: Tensor;
69
+ requiresGrad: boolean;
70
+ private _grad;
71
+ private readonly _prev;
72
+ private readonly _backward;
73
+ private constructor();
74
+ static create(args: {
75
+ readonly tensor: Tensor;
76
+ readonly requiresGrad: boolean;
77
+ readonly prev: readonly GradTensor[];
78
+ readonly backward: BackwardFn;
79
+ }): GradTensor;
80
+ static fromTensor(t: Tensor, options?: GradTensorOptions): GradTensor;
81
+ static scalar(value: number, options?: GradTensorOptions): GradTensor;
82
+ /**
83
+ * Get the shape of the underlying tensor.
84
+ * Implements TensorLike interface for compatibility with Tensor.
85
+ */
86
+ get shape(): Shape;
87
+ /**
88
+ * Get the total number of elements.
89
+ * Implements TensorLike interface for compatibility with Tensor.
90
+ */
91
+ get size(): number;
92
+ /**
93
+ * Get the number of dimensions.
94
+ * Implements TensorLike interface for compatibility with Tensor.
95
+ */
96
+ get ndim(): number;
97
+ /**
98
+ * Get the data type of the underlying tensor.
99
+ * Implements TensorLike interface for compatibility with Tensor.
100
+ */
101
+ get dtype(): DType;
102
+ /**
103
+ * Get the device where the tensor resides.
104
+ * Implements TensorLike interface for compatibility with Tensor.
105
+ */
106
+ get device(): Tensor["device"];
107
+ /**
108
+ * Get the memory strides of the underlying tensor.
109
+ * Implements TensorLike interface for compatibility with Tensor.
110
+ */
111
+ get strides(): readonly number[];
112
+ /**
113
+ * Get the offset into the underlying data buffer.
114
+ * Implements TensorLike interface for compatibility with Tensor.
115
+ */
116
+ get offset(): number;
117
+ /**
118
+ * Get the underlying data buffer.
119
+ * Implements TensorLike interface for compatibility with Tensor.
120
+ */
121
+ get data(): TypedArray;
122
+ /**
123
+ * Get the accumulated gradient for this tensor.
124
+ * Returns null if no gradient has been computed yet.
125
+ */
126
+ get grad(): Tensor | null;
127
+ setGrad(grad: Tensor): void;
128
+ zeroGrad(): void;
129
+ detach(): GradTensor;
130
+ setRequiresGrad(value: boolean): void;
131
+ hasGrad(): boolean;
132
+ /** @internal */
133
+ accumulateGrad(grad: Tensor): void;
134
+ /**
135
+ * Backpropagate gradients from this node through the recorded graph.
136
+ */
137
+ backward(grad?: Tensor): void;
138
+ add(other: GradTensor): GradTensor;
139
+ sub(other: GradTensor): GradTensor;
140
+ mul(other: GradTensor): GradTensor;
141
+ neg(): GradTensor;
142
+ sum(axis?: Axis, keepdims?: boolean): GradTensor;
143
+ div(other: GradTensor): GradTensor;
144
+ pow(exponent: number): GradTensor;
145
+ sqrt(): GradTensor;
146
+ matmul(other: GradTensor): GradTensor;
147
+ relu(): GradTensor;
148
+ sigmoid(): GradTensor;
149
+ square(): GradTensor;
150
+ exp(): GradTensor;
151
+ log(): GradTensor;
152
+ tanh(): GradTensor;
153
+ slice(...args: SliceRange[]): GradTensor;
154
+ gather(indices: GradTensor, axis: Axis): GradTensor;
155
+ mean(axis?: Axis, keepdims?: boolean): GradTensor;
156
+ max(axis?: Axis, keepdims?: boolean): GradTensor;
157
+ /**
158
+ * Reshape the GradTensor to a new shape without copying data.
159
+ *
160
+ * Returns a new GradTensor with the specified shape. The underlying tensor
161
+ * is reshaped, and gradient computation is preserved through the reshape operation.
162
+ *
163
+ * @param newShape - The desired shape for the tensor
164
+ * @returns A new GradTensor with the specified shape
165
+ * @throws {ShapeError} If the new shape is incompatible with the tensor's size
166
+ *
167
+ * @example
168
+ * ```ts
169
+ * const t = parameter([1, 2, 3, 4, 5, 6]);
170
+ * const reshaped = t.reshape([2, 3]);
171
+ * console.log(reshaped.shape); // [2, 3]
172
+ * ```
173
+ */
174
+ reshape(newShape: Shape): GradTensor;
175
+ /**
176
+ * Flatten the GradTensor to a 1-dimensional array.
177
+ *
178
+ * Returns a new 1D GradTensor containing all elements.
179
+ *
180
+ * @returns A 1D GradTensor with shape [size]
181
+ *
182
+ * @example
183
+ * ```ts
184
+ * const matrix = parameter([[1, 2, 3], [4, 5, 6]]);
185
+ * const flat = matrix.flatten();
186
+ * console.log(flat.shape); // [6]
187
+ * ```
188
+ */
189
+ flatten(): GradTensor;
190
+ /**
191
+ * Create a view of the GradTensor with a different shape.
192
+ *
193
+ * Similar to reshape but uses the underlying tensor's view method.
194
+ *
195
+ * @param shape - The desired shape for the view
196
+ * @param strides - Optional custom strides
197
+ * @param offset - Optional offset into the data buffer
198
+ * @returns A new GradTensor view with the specified shape
199
+ */
200
+ view(shape: Shape, strides?: readonly number[], offset?: number): GradTensor;
201
+ transpose(axes?: readonly number[]): GradTensor;
202
+ min(axis?: Axis, keepdims?: boolean): GradTensor;
203
+ abs(): GradTensor;
204
+ clip(minVal: number, maxVal: number): GradTensor;
205
+ leakyRelu(negativeSlope?: number): GradTensor;
206
+ elu(alpha?: number): GradTensor;
207
+ gelu(): GradTensor;
208
+ /**
209
+ * Return a human-readable string representation of this GradTensor.
210
+ *
211
+ * Delegates to the underlying {@link Tensor.toString} and appends
212
+ * gradient metadata.
213
+ *
214
+ * @param maxElements - Maximum elements per dimension before summarizing (default: 6).
215
+ * @returns Formatted string representation
216
+ */
217
+ toString(maxElements?: number): string;
218
+ }
219
+ /**
220
+ * Create a GradTensor with requiresGrad=true.
221
+ */
222
+ declare function parameter(data: number | number[] | number[][] | number[][][] | Tensor, options?: GradTensorOptions): GradTensor;
223
+ /**
224
+ * Context manager to disable gradient calculation.
225
+ *
226
+ * **Important:** The callback must be synchronous. Passing an async function
227
+ * will cause `gradEnabled` to be restored before the awaited work finishes,
228
+ * silently breaking gradient tracking inside the async continuation.
229
+ *
230
+ * @throws {DeepboxError} If the callback returns a Promise (async function detected)
231
+ */
232
+ declare function noGrad<T>(fn: () => T): T;
233
+ /**
234
+ * Image to Column operation for GradTensor.
235
+ */
236
+ declare function im2col(input: GradTensor, kernelSize: [number, number], stride: [number, number], padding: [number, number]): GradTensor;
237
+ declare function softmax(input: GradTensor, axis?: number): GradTensor;
238
+ declare function logSoftmax(input: GradTensor, axis?: number): GradTensor;
239
+ declare function variance(input: GradTensor, axis?: number, correction?: number): GradTensor;
240
+ declare function dropout(input: GradTensor, p?: number, training?: boolean): GradTensor;
241
+
242
+ export { GradTensor as G, type SliceRange as S, type GradTensorOptions as a, softmax as b, dropout as d, gather as g, im2col as i, logSoftmax as l, noGrad as n, parameter as p, slice as s, variance as v };
@@ -0,0 +1,456 @@
1
+ import { D as DType, a as Device, S as Shape } from './tensor-B96jjJLQ.js';
2
+ import { T as Tensor } from './Tensor-g8mUClel.js';
3
+
4
+ type RandomOptions = {
5
+ readonly dtype?: DType;
6
+ readonly device?: Device;
7
+ };
8
+ /**
9
+ * Set global random seed.
10
+ *
11
+ * @param seed - Random seed value (any finite number). The seed is coerced to a uint64
12
+ * internally, so the same seed always produces the same sequence.
13
+ *
14
+ * @throws {InvalidParameterError} When seed is not finite (NaN or ±Infinity)
15
+ *
16
+ * @remarks
17
+ * - Setting a seed makes all random operations deterministic and reproducible.
18
+ * - The seed is truncated to uint64 range (0 to 2^64-1) for internal state.
19
+ * - Use {@link getSeed} to retrieve the currently set seed.
20
+ * - When no seed is set, random sampling uses a cryptographically secure RNG.
21
+ * Seeded mode is deterministic and **not** intended for cryptographic use.
22
+ *
23
+ * @example
24
+ * ```js
25
+ * import { setSeed, rand } from 'deepbox/random';
26
+ *
27
+ * setSeed(42);
28
+ * const a = rand([5]);
29
+ * setSeed(42);
30
+ * const b = rand([5]);
31
+ * // a and b contain identical values
32
+ * ```
33
+ */
34
+ declare function setSeed(seed: number): void;
35
+ /**
36
+ * Get current random seed.
37
+ *
38
+ * @returns Current seed value or undefined if not set
39
+ *
40
+ * @example
41
+ * ```js
42
+ * import { setSeed, getSeed } from 'deepbox/random';
43
+ *
44
+ * setSeed(12345);
45
+ * console.log(getSeed()); // 12345
46
+ * ```
47
+ */
48
+ declare function getSeed(): number | undefined;
49
+ /**
50
+ * Clear the current random seed and revert to cryptographically secure randomness.
51
+ *
52
+ * @remarks
53
+ * - After calling this, random sampling uses `crypto.getRandomValues`.
54
+ * - Use this to leave deterministic mode after {@link setSeed}.
55
+ *
56
+ * @example
57
+ * ```js
58
+ * import { clearSeed, rand } from 'deepbox/random';
59
+ *
60
+ * clearSeed();
61
+ * const x = rand([3]); // cryptographically secure randomness
62
+ * ```
63
+ */
64
+ declare function clearSeed(): void;
65
+ /**
66
+ * Random values in half-open interval [0, 1).
67
+ *
68
+ * @param shape - Output shape
69
+ * @param opts - Options (dtype, device)
70
+ *
71
+ * @remarks
72
+ * - Values are uniformly distributed in [0, 1) (inclusive lower, exclusive upper bound).
73
+ * - Uses deterministic PRNG when seed is set via {@link setSeed}.
74
+ * - Default dtype is float32; use float64 for higher precision.
75
+ * - Only float32 and float64 dtypes are supported.
76
+ *
77
+ * @example
78
+ * ```js
79
+ * import { rand, setSeed } from 'deepbox/random';
80
+ *
81
+ * const x = rand([2, 3]); // 2x3 matrix of random values
82
+ *
83
+ * // Deterministic generation
84
+ * setSeed(42);
85
+ * const a = rand([5]);
86
+ * setSeed(42);
87
+ * const b = rand([5]);
88
+ * // a and b are identical
89
+ * ```
90
+ *
91
+ * @see {@link https://numpy.org/doc/stable/reference/random/generated/numpy.random.rand.html | NumPy random.rand}
92
+ */
93
+ declare function rand(shape: Shape, opts?: RandomOptions): Tensor;
94
+ /**
95
+ * Random samples from standard normal distribution.
96
+ *
97
+ * @param shape - Output shape
98
+ * @param opts - Options (dtype, device)
99
+ *
100
+ * @remarks
101
+ * - Uses Box-Muller transform to generate normally distributed values.
102
+ * - Mean = 0, standard deviation = 1.
103
+ * - All values are finite (no infinities from tail behavior).
104
+ * - Deterministic when seed is set via {@link setSeed}.
105
+ * - Only float32 and float64 dtypes are supported.
106
+ *
107
+ * @example
108
+ * ```js
109
+ * import { randn } from 'deepbox/random';
110
+ *
111
+ * const x = randn([2, 3]); // 2x3 matrix of normal random values
112
+ * ```
113
+ *
114
+ * @see {@link https://numpy.org/doc/stable/reference/random/generated/numpy.random.randn.html | NumPy random.randn}
115
+ */
116
+ declare function randn(shape: Shape, opts?: RandomOptions): Tensor;
117
+ /**
118
+ * Random integers in half-open interval [low, high).
119
+ *
120
+ * @param low - Lowest integer (inclusive)
121
+ * @param high - Highest integer (exclusive)
122
+ * @param shape - Output shape
123
+ * @param opts - Options (dtype, device)
124
+ *
125
+ * @throws {InvalidParameterError} When low or high is not finite
126
+ * @throws {InvalidParameterError} When low or high is not an integer
127
+ * @throws {InvalidParameterError} When high <= low
128
+ *
129
+ * @remarks
130
+ * - Generates integers uniformly in [low, high) range.
131
+ * - Both low and high must be safe integers (within ±2^53-1).
132
+ * - dtype must be int32 or int64; int32 output requires bounds within int32 range.
133
+ * - Deterministic when seed is set via {@link setSeed}.
134
+ *
135
+ * @example
136
+ * ```js
137
+ * import { randint } from 'deepbox/random';
138
+ *
139
+ * const x = randint(0, 10, [5]); // 5 random integers from 0 to 9
140
+ * ```
141
+ *
142
+ * @see {@link https://numpy.org/doc/stable/reference/random/generated/numpy.random.randint.html | NumPy random.randint}
143
+ */
144
+ declare function randint(low: number, high: number, shape: Shape, opts?: RandomOptions): Tensor;
145
+ /**
146
+ * Random samples from continuous uniform distribution.
147
+ *
148
+ * @param low - Lower boundary (default: 0)
149
+ * @param high - Upper boundary (default: 1)
150
+ * @param shape - Output shape
151
+ * @param opts - Options
152
+ *
153
+ * @throws {InvalidParameterError} When low or high is not finite
154
+ * @throws {InvalidParameterError} When high < low
155
+ *
156
+ * @remarks
157
+ * - Values are uniformly distributed in [low, high).
158
+ * - For very large ranges, floating-point precision may affect uniformity.
159
+ * - Deterministic when seed is set via {@link setSeed}.
160
+ * - Only float32 and float64 dtypes are supported.
161
+ *
162
+ * @example
163
+ * ```js
164
+ * import { uniform } from 'deepbox/random';
165
+ *
166
+ * const x = uniform(-1, 1, [3, 3]); // Values between -1 and 1
167
+ * ```
168
+ *
169
+ * @see {@link https://numpy.org/doc/stable/reference/random/generated/numpy.random.uniform.html | NumPy random.uniform}
170
+ */
171
+ declare function uniform(low?: number, high?: number, shape?: Shape, opts?: RandomOptions): Tensor;
172
+ /**
173
+ * Random samples from normal (Gaussian) distribution.
174
+ *
175
+ * @param mean - Mean of distribution (default: 0)
176
+ * @param std - Standard deviation (default: 1)
177
+ * @param shape - Output shape
178
+ * @param opts - Options
179
+ *
180
+ * @throws {InvalidParameterError} When mean or std is not finite
181
+ * @throws {InvalidParameterError} When std < 0
182
+ *
183
+ * @remarks
184
+ * - Uses Box-Muller transform internally.
185
+ * - All values are finite due to RNG resolution (no infinities from log(0)).
186
+ * - std=0 produces constant values equal to mean.
187
+ * - Deterministic when seed is set via {@link setSeed}.
188
+ * - Only float32 and float64 dtypes are supported.
189
+ *
190
+ * @example
191
+ * ```js
192
+ * import { normal } from 'deepbox/random';
193
+ *
194
+ * const x = normal(0, 2, [100]); // Mean 0, std 2
195
+ * ```
196
+ *
197
+ * @see {@link https://numpy.org/doc/stable/reference/random/generated/numpy.random.normal.html | NumPy random.normal}
198
+ */
199
+ declare function normal(mean?: number, std?: number, shape?: Shape, opts?: RandomOptions): Tensor;
200
+ /**
201
+ * Random samples from binomial distribution.
202
+ *
203
+ * @param n - Number of trials (non-negative integer)
204
+ * @param p - Probability of success (in [0, 1])
205
+ * @param shape - Output shape
206
+ * @param opts - Options
207
+ *
208
+ * @throws {InvalidParameterError} When n is not finite, not an integer, or < 0
209
+ * @throws {InvalidParameterError} When p is not finite or not in [0, 1]
210
+ *
211
+ * @remarks
212
+ * - Generates number of successes in n independent Bernoulli trials.
213
+ * - Uses an exact geometric waiting-time method for small means and
214
+ * a mode-centered chop-down inversion for larger means.
215
+ * - Results are in range [0, n].
216
+ * - Deterministic when seed is set via {@link setSeed}.
217
+ * - Only int32 and int64 dtypes are supported.
218
+ *
219
+ * @example
220
+ * ```js
221
+ * import { binomial } from 'deepbox/random';
222
+ *
223
+ * const x = binomial(10, 0.5, [100]); // 10 coin flips, 100 times
224
+ * ```
225
+ *
226
+ * @see {@link https://numpy.org/doc/stable/reference/random/generated/numpy.random.binomial.html | NumPy random.binomial}
227
+ */
228
+ declare function binomial(n: number, p: number, shape?: Shape, opts?: RandomOptions): Tensor;
229
+ /**
230
+ * Random samples from Poisson distribution.
231
+ *
232
+ * @param lambda - Expected number of events (rate, must be >= 0)
233
+ * @param shape - Output shape
234
+ * @param opts - Options
235
+ *
236
+ * @throws {InvalidParameterError} When lambda is not finite or < 0
237
+ *
238
+ * @remarks
239
+ * - Uses Knuth's method for lambda < 30, transformed rejection for lambda >= 30.
240
+ * - Stable and efficient for all lambda values (tested up to lambda=1000+).
241
+ * - lambda=0 always produces 0.
242
+ * - Deterministic when seed is set via {@link setSeed}.
243
+ * - Only int32 and int64 dtypes are supported.
244
+ *
245
+ * @example
246
+ * ```js
247
+ * import { poisson } from 'deepbox/random';
248
+ *
249
+ * const x = poisson(5, [100]); // Rate = 5 events
250
+ * ```
251
+ *
252
+ * @see {@link https://numpy.org/doc/stable/reference/random/generated/numpy.random.poisson.html | NumPy random.poisson}
253
+ */
254
+ declare function poisson(lambda: number, shape?: Shape, opts?: RandomOptions): Tensor;
255
+ /**
256
+ * Random samples from exponential distribution.
257
+ *
258
+ * @param scale - Scale parameter (1/lambda, default: 1, must be > 0)
259
+ * @param shape - Output shape
260
+ * @param opts - Options
261
+ *
262
+ * @throws {InvalidParameterError} When scale is not finite or <= 0
263
+ *
264
+ * @remarks
265
+ * - Uses inverse transform sampling: -scale * log(U).
266
+ * - All values are positive (u=0 is avoided to prevent infinities).
267
+ * - Mean = scale, variance = scale^2.
268
+ * - Deterministic when seed is set via {@link setSeed}.
269
+ * - Only float32 and float64 dtypes are supported.
270
+ *
271
+ * @example
272
+ * ```js
273
+ * import { exponential } from 'deepbox/random';
274
+ *
275
+ * const x = exponential(2, [100]);
276
+ * ```
277
+ *
278
+ * @see {@link https://numpy.org/doc/stable/reference/random/generated/numpy.random.exponential.html | NumPy random.exponential}
279
+ */
280
+ declare function exponential(scale?: number, shape?: Shape, opts?: RandomOptions): Tensor;
281
+ /**
282
+ * Random samples from gamma distribution.
283
+ *
284
+ * @param shape_param - Shape parameter (k, must be > 0)
285
+ * @param scale - Scale parameter (theta, default: 1, must be > 0)
286
+ * @param shape - Output shape
287
+ * @param opts - Options
288
+ *
289
+ * @throws {InvalidParameterError} When shape_param is not finite or <= 0
290
+ * @throws {InvalidParameterError} When scale is not finite or <= 0
291
+ *
292
+ * @remarks
293
+ * - Uses Marsaglia and Tsang's method (2000) for efficient sampling.
294
+ * - All values are positive.
295
+ * - Mean = shape_param * scale, variance = shape_param * scale^2.
296
+ * - For shape_param < 1, uses a transformation to handle the case.
297
+ * - Deterministic when seed is set via {@link setSeed}.
298
+ * - Only float32 and float64 dtypes are supported.
299
+ *
300
+ * @example
301
+ * ```js
302
+ * import { gamma } from 'deepbox/random';
303
+ *
304
+ * const x = gamma(2, 2, [100]);
305
+ * ```
306
+ *
307
+ * @see {@link https://numpy.org/doc/stable/reference/random/generated/numpy.random.gamma.html | NumPy random.gamma}
308
+ */
309
+ declare function gamma(shape_param: number, scale?: number, shape?: Shape, opts?: RandomOptions): Tensor;
310
+ /**
311
+ * Random samples from beta distribution.
312
+ *
313
+ * @param alpha - Alpha parameter (must be > 0)
314
+ * @param beta_param - Beta parameter (must be > 0)
315
+ * @param shape - Output shape
316
+ * @param opts - Options
317
+ *
318
+ * @throws {InvalidParameterError} When alpha is not finite or <= 0
319
+ * @throws {InvalidParameterError} When beta_param is not finite or <= 0
320
+ *
321
+ * @remarks
322
+ * - Uses ratio of two gamma distributions: X / (X + Y).
323
+ * - All values are in the open interval (0, 1) up to floating-point rounding.
324
+ * - Mean = alpha / (alpha + beta), useful for modeling proportions.
325
+ * - Deterministic when seed is set via {@link setSeed}.
326
+ * - Only float32 and float64 dtypes are supported.
327
+ *
328
+ * @example
329
+ * ```js
330
+ * import { beta } from 'deepbox/random';
331
+ *
332
+ * const x = beta(2, 5, [100]);
333
+ * ```
334
+ *
335
+ * @see {@link https://numpy.org/doc/stable/reference/random/generated/numpy.random.beta.html | NumPy random.beta}
336
+ */
337
+ declare function beta(alpha: number, beta_param: number, shape?: Shape, opts?: RandomOptions): Tensor;
338
+ /**
339
+ * Random sample from array.
340
+ *
341
+ * @param a - Input array or integer (if integer, sample from arange(a))
342
+ * @param size - Number of samples or output shape
343
+ * @param replace - Whether to sample with replacement (default: true)
344
+ * @param p - Optional probability weights for weighted sampling
345
+ *
346
+ * @throws {InvalidParameterError} When population size is invalid (not finite, not integer, or < 0)
347
+ * @throws {InvalidParameterError} When size > population and replace is false
348
+ * @throws {InvalidParameterError} When tensor is not contiguous (offset !== 0 or non-standard strides)
349
+ * @throws {DTypeError} When input tensor has string dtype
350
+ *
351
+ * @remarks
352
+ * - Input tensor must be contiguous (no slicing/striding).
353
+ * - With replacement: can sample more elements than population size.
354
+ * - Without replacement: size must be <= population size.
355
+ * - Does NOT modify the input tensor (returns a new tensor).
356
+ * - Deterministic when seed is set via {@link setSeed}.
357
+ * - If `a` is a number, the population is `0..a-1` and output dtype is int32.
358
+ * - Numeric populations are limited to `a <= 2^31` for int32 output.
359
+ *
360
+ * @example
361
+ * ```js
362
+ * import { choice, tensor } from 'deepbox/random';
363
+ *
364
+ * const x = tensor([1, 2, 3, 4, 5]);
365
+ * const sample = choice(x, 3); // Pick 3 elements with replacement
366
+ *
367
+ * // Without replacement
368
+ * const unique = choice(x, 3, false); // All different elements
369
+ * ```
370
+ *
371
+ * @see {@link https://numpy.org/doc/stable/reference/random/generated/numpy.random.choice.html | NumPy random.choice}
372
+ */
373
+ declare function choice(a: Tensor | number, size?: number | Shape, replace?: boolean, p?: Tensor): Tensor;
374
+ /**
375
+ * Randomly shuffle array in-place.
376
+ *
377
+ * @param x - Input tensor (**MODIFIED IN-PLACE**)
378
+ *
379
+ * @throws {InvalidParameterError} When tensor is not contiguous (offset !== 0 or non-standard strides)
380
+ * @throws {DTypeError} When input tensor has string dtype
381
+ *
382
+ * @remarks
383
+ * - **WARNING: This function mutates the input tensor directly.**
384
+ * - Uses Fisher-Yates shuffle algorithm (O(n) time, optimal).
385
+ * - Input tensor must be contiguous (no slicing/striding).
386
+ * - All elements are preserved, only their order changes.
387
+ * - Deterministic when seed is set via {@link setSeed}.
388
+ * - If you need a shuffled copy without mutation, use {@link permutation} instead.
389
+ *
390
+ * @example
391
+ * ```js
392
+ * import { shuffle, tensor } from 'deepbox/random';
393
+ *
394
+ * const x = tensor([1, 2, 3, 4, 5]);
395
+ * shuffle(x); // x is now shuffled IN-PLACE
396
+ * console.log(x); // e.g., [3, 1, 5, 2, 4]
397
+ * ```
398
+ *
399
+ * @see {@link https://numpy.org/doc/stable/reference/random/generated/numpy.random.shuffle.html | NumPy random.shuffle}
400
+ */
401
+ declare function shuffle(x: Tensor): void;
402
+ /**
403
+ * Return random permutation of array.
404
+ *
405
+ * @param x - Input tensor or integer
406
+ *
407
+ * @throws {DTypeError} When input tensor has string dtype
408
+ *
409
+ * @remarks
410
+ * - Returns a NEW tensor (does NOT modify input).
411
+ * - If x is an integer, returns permutation of arange(x).
412
+ * - If x is a tensor, returns a shuffled copy with the same shape.
413
+ * - Tensor inputs must be contiguous (no slicing/striding).
414
+ * - Uses Fisher-Yates shuffle algorithm internally.
415
+ * - Deterministic when seed is set via {@link setSeed}.
416
+ * - Numeric input is limited to `x <= 2^31` for int32 output.
417
+ *
418
+ * @example
419
+ * ```js
420
+ * import { permutation, tensor } from 'deepbox/random';
421
+ *
422
+ * // Permutation of integers
423
+ * const x = permutation(10); // Random permutation of [0...9]
424
+ *
425
+ * // Permutation of tensor (does not modify original)
426
+ * const original = tensor([1, 2, 3, 4, 5]);
427
+ * const shuffled = permutation(original);
428
+ * // original is unchanged
429
+ * ```
430
+ *
431
+ * @see {@link https://numpy.org/doc/stable/reference/random/generated/numpy.random.permutation.html | NumPy random.permutation}
432
+ */
433
+ declare function permutation(x: Tensor | number): Tensor;
434
+
435
+ type index_RandomOptions = RandomOptions;
436
+ declare const index_beta: typeof beta;
437
+ declare const index_binomial: typeof binomial;
438
+ declare const index_choice: typeof choice;
439
+ declare const index_clearSeed: typeof clearSeed;
440
+ declare const index_exponential: typeof exponential;
441
+ declare const index_gamma: typeof gamma;
442
+ declare const index_getSeed: typeof getSeed;
443
+ declare const index_normal: typeof normal;
444
+ declare const index_permutation: typeof permutation;
445
+ declare const index_poisson: typeof poisson;
446
+ declare const index_rand: typeof rand;
447
+ declare const index_randint: typeof randint;
448
+ declare const index_randn: typeof randn;
449
+ declare const index_setSeed: typeof setSeed;
450
+ declare const index_shuffle: typeof shuffle;
451
+ declare const index_uniform: typeof uniform;
452
+ declare namespace index {
453
+ export { type index_RandomOptions as RandomOptions, index_beta as beta, index_binomial as binomial, index_choice as choice, index_clearSeed as clearSeed, index_exponential as exponential, index_gamma as gamma, index_getSeed as getSeed, index_normal as normal, index_permutation as permutation, index_poisson as poisson, index_rand as rand, index_randint as randint, index_randn as randn, index_setSeed as setSeed, index_shuffle as shuffle, index_uniform as uniform };
454
+ }
455
+
456
+ export { type RandomOptions as R, randn as a, randint as b, clearSeed as c, binomial as d, exponential as e, gamma as f, getSeed as g, beta as h, index as i, choice as j, shuffle as k, permutation as l, normal as n, poisson as p, rand as r, setSeed as s, uniform as u };