@thi.ng/tensors 0.1.0 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +45 -1
- package/README.md +93 -34
- package/abs.d.ts +1 -29
- package/abs.js +2 -11
- package/add.d.ts +4 -35
- package/add.js +2 -11
- package/addn.d.ts +1 -33
- package/addn.js +2 -11
- package/api.d.ts +138 -15
- package/broadcast.d.ts +24 -0
- package/broadcast.js +54 -0
- package/clamp.d.ts +4 -38
- package/clamp.js +2 -11
- package/clampn.d.ts +1 -37
- package/clampn.js +2 -11
- package/cos.d.ts +1 -29
- package/cos.js +2 -11
- package/defopn.d.ts +4 -7
- package/defopn.js +17 -7
- package/defoprt.d.ts +5 -7
- package/defoprt.js +21 -11
- package/defoprtt.d.ts +7 -7
- package/defoprtt.js +30 -26
- package/defopt.d.ts +4 -7
- package/defopt.js +17 -20
- package/defoptn.d.ts +4 -7
- package/defoptn.js +17 -20
- package/defoptnn.d.ts +4 -7
- package/defoptnn.js +20 -16
- package/defoptt.d.ts +5 -6
- package/defoptt.js +36 -27
- package/defopttt.d.ts +5 -7
- package/defopttt.js +43 -37
- package/diagonal.d.ts +16 -0
- package/diagonal.js +18 -0
- package/div.d.ts +4 -35
- package/div.js +2 -11
- package/divn.d.ts +1 -33
- package/divn.js +2 -11
- package/dot.d.ts +4 -26
- package/dot.js +3 -12
- package/errors.d.ts +2 -0
- package/errors.js +3 -0
- package/exp.d.ts +1 -29
- package/exp.js +2 -11
- package/exp2.d.ts +1 -29
- package/exp2.js +2 -11
- package/filtered-indices.d.ts +34 -0
- package/filtered-indices.js +17 -0
- package/identity.d.ts +7 -0
- package/identity.js +3 -2
- package/index.d.ts +6 -0
- package/index.js +6 -0
- package/log.d.ts +1 -29
- package/log.js +2 -11
- package/log2.d.ts +1 -29
- package/log2.js +2 -11
- package/mag.d.ts +5 -0
- package/magsq.d.ts +1 -25
- package/magsq.js +3 -12
- package/max.d.ts +5 -30
- package/max.js +2 -11
- package/maxn.d.ts +1 -33
- package/maxn.js +2 -11
- package/min.d.ts +5 -30
- package/min.js +2 -11
- package/minn.d.ts +1 -33
- package/minn.js +2 -11
- package/mul.d.ts +4 -35
- package/mul.js +2 -11
- package/mulm.d.ts +1 -1
- package/mulm.js +21 -20
- package/muln.d.ts +1 -33
- package/muln.js +2 -11
- package/mulv.d.ts +1 -1
- package/mulv.js +16 -15
- package/normalize.d.ts +9 -1
- package/package.json +48 -4
- package/pow.d.ts +5 -30
- package/pow.js +2 -11
- package/pown.d.ts +1 -33
- package/pown.js +2 -11
- package/product.d.ts +1 -25
- package/product.js +5 -12
- package/rand-distrib.d.ts +13 -10
- package/rand-distrib.js +55 -23
- package/range.d.ts +20 -0
- package/range.js +28 -0
- package/relu.d.ts +1 -29
- package/relu.js +2 -11
- package/relun.d.ts +1 -33
- package/relun.js +2 -11
- package/select.d.ts +6 -6
- package/select.js +26 -23
- package/set.d.ts +1 -6
- package/set.js +3 -11
- package/setn.d.ts +1 -6
- package/setn.js +3 -11
- package/sigmoid.d.ts +1 -29
- package/sigmoid.js +2 -11
- package/sin.d.ts +1 -29
- package/sin.js +2 -11
- package/softmax.d.ts +1 -1
- package/softplus.d.ts +1 -33
- package/softplus.js +2 -11
- package/sqrt.d.ts +1 -29
- package/sqrt.js +2 -11
- package/step.d.ts +1 -33
- package/step.js +2 -11
- package/sub.d.ts +4 -35
- package/sub.js +2 -11
- package/subn.d.ts +1 -33
- package/subn.js +2 -11
- package/sum.d.ts +1 -25
- package/sum.js +5 -12
- package/svd.d.ts +33 -0
- package/svd.js +246 -0
- package/swap.d.ts +26 -0
- package/swap.js +15 -0
- package/tan.d.ts +1 -29
- package/tan.js +2 -11
- package/tanh.d.ts +1 -29
- package/tanh.js +2 -11
- package/tensor.d.ts +6 -1
- package/tensor.js +58 -21
- package/top.d.ts +2 -6
package/CHANGELOG.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Change Log
|
|
2
2
|
|
|
3
|
-
- **Last updated**: 2025-
|
|
3
|
+
- **Last updated**: 2025-05-08T15:13:02Z
|
|
4
4
|
- **Generator**: [thi.ng/monopub](https://thi.ng/monopub)
|
|
5
5
|
|
|
6
6
|
All notable changes to this project will be documented in this file.
|
|
@@ -11,6 +11,50 @@ See [Conventional Commits](https://conventionalcommits.org/) for commit guidelin
|
|
|
11
11
|
**Note:** Unlisted _patch_ versions only involve non-code or otherwise excluded changes
|
|
12
12
|
and/or version bumps of transitive dependencies.
|
|
13
13
|
|
|
14
|
+
## [0.3.0](https://github.com/thi-ng/umbrella/tree/@thi.ng/tensors@0.3.0) (2025-05-08)
|
|
15
|
+
|
|
16
|
+
#### 🚀 Features
|
|
17
|
+
|
|
18
|
+
- add swap(), update defOpRT/TT() fns ([5d2dd2a](https://github.com/thi-ng/umbrella/commit/5d2dd2a))
|
|
19
|
+
- add range() tensor factory ([a8a6365](https://github.com/thi-ng/umbrella/commit/a8a6365))
|
|
20
|
+
- add ITensor.position() & impls ([595764d](https://github.com/thi-ng/umbrella/commit/595764d))
|
|
21
|
+
- add filteredIndices() & presets ([edd8983](https://github.com/thi-ng/umbrella/commit/edd8983))
|
|
22
|
+
- add nonZeroIndices()
|
|
23
|
+
- add negativeIndices(), positiveIndices()
|
|
24
|
+
|
|
25
|
+
#### 🩹 Bug fixes
|
|
26
|
+
|
|
27
|
+
- update ITensor.broadcast() return type ([f7b2e7a](https://github.com/thi-ng/umbrella/commit/f7b2e7a))
|
|
28
|
+
|
|
29
|
+
#### ♻️ Refactoring
|
|
30
|
+
|
|
31
|
+
- update range() impl (w/o dependencies) ([ce89d71](https://github.com/thi-ng/umbrella/commit/ce89d71))
|
|
32
|
+
|
|
33
|
+
## [0.2.0](https://github.com/thi-ng/umbrella/tree/@thi.ng/tensors@0.2.0) (2025-05-02)
|
|
34
|
+
|
|
35
|
+
#### 🚀 Features
|
|
36
|
+
|
|
37
|
+
- add `zeroes()`/`ones()` ([eb6f82d](https://github.com/thi-ng/umbrella/commit/eb6f82d))
|
|
38
|
+
- add singular value decomp `svd()` ([8902157](https://github.com/thi-ng/umbrella/commit/8902157))
|
|
39
|
+
- add `diagonal()` and `trace()` fns ([47a0e73](https://github.com/thi-ng/umbrella/commit/47a0e73))
|
|
40
|
+
- add broadcast(), add ITensor.broadcast() ([b9b2dfc](https://github.com/thi-ng/umbrella/commit/b9b2dfc))
|
|
41
|
+
- update all tensor ops & op generators ([47b3e4e](https://github.com/thi-ng/umbrella/commit/47b3e4e))
|
|
42
|
+
- add broadcasting support where possible
|
|
43
|
+
- update all `defOpXXX` HOF generators
|
|
44
|
+
- replace & remove various `TensorOpXXX` function types
|
|
45
|
+
- update all tensor ops, only keep one version per op
|
|
46
|
+
- update doc strings
|
|
47
|
+
|
|
48
|
+
#### 🩹 Bug fixes
|
|
49
|
+
|
|
50
|
+
- add missing `randDistrib4()` ([eb2ded2](https://github.com/thi-ng/umbrella/commit/eb2ded2))
|
|
51
|
+
|
|
52
|
+
#### ♻️ Refactoring
|
|
53
|
+
|
|
54
|
+
- update internal local var handling in various tensor ops ([c02c2a6](https://github.com/thi-ng/umbrella/commit/c02c2a6))
|
|
55
|
+
- internal updates dot(), identity(), select() ([c0ed334](https://github.com/thi-ng/umbrella/commit/c0ed334))
|
|
56
|
+
- update top() return type ([dcab223](https://github.com/thi-ng/umbrella/commit/dcab223))
|
|
57
|
+
|
|
14
58
|
## [0.1.0](https://github.com/thi-ng/umbrella/tree/@thi.ng/tensors@0.1.0) (2025-04-30)
|
|
15
59
|
|
|
16
60
|
#### 🚀 Features
|
package/README.md
CHANGED
|
@@ -16,6 +16,7 @@
|
|
|
16
16
|
|
|
17
17
|
- [About](#about)
|
|
18
18
|
- [Built-in tensor operations](#built-in-tensor-operations)
|
|
19
|
+
- [Broadcasting support](#broadcasting-support)
|
|
19
20
|
- [Status](#status)
|
|
20
21
|
- [Installation](#installation)
|
|
21
22
|
- [Dependencies](#dependencies)
|
|
@@ -43,51 +44,109 @@ listed below are also based on this approach. The function signatures and naming
|
|
|
43
44
|
conventions are closely aligned to the ones used by the
|
|
44
45
|
[thi.ng/vectors](https://thi.ng/vectors) package.
|
|
45
46
|
|
|
46
|
-
- [abs](https://docs.thi.ng/umbrella/tensors/variables/abs.html)
|
|
47
|
-
- [add](https://docs.thi.ng/umbrella/tensors/variables/add.html)
|
|
48
|
-
- [addN](https://docs.thi.ng/umbrella/tensors/variables/addN.html)
|
|
49
|
-
- [argMax](https://docs.thi.ng/umbrella/tensors/variables/argMax.html)
|
|
50
|
-
- [argMin](https://docs.thi.ng/umbrella/tensors/variables/argMin.html)
|
|
51
|
-
- [clamp](https://docs.thi.ng/umbrella/tensors/variables/clamp.html)
|
|
52
|
-
- [clampN](https://docs.thi.ng/umbrella/tensors/variables/clampN.html)
|
|
53
|
-
- [cos](https://docs.thi.ng/umbrella/tensors/variables/cos.html)
|
|
54
|
-
- [
|
|
55
|
-
- [
|
|
56
|
-
- [
|
|
57
|
-
- [
|
|
58
|
-
- [
|
|
59
|
-
- [
|
|
60
|
-
- [
|
|
61
|
-
- [
|
|
62
|
-
- [
|
|
63
|
-
- [
|
|
64
|
-
- [
|
|
65
|
-
- [
|
|
66
|
-
- [
|
|
67
|
-
- [
|
|
68
|
-
- [
|
|
47
|
+
- [abs](https://docs.thi.ng/umbrella/tensors/variables/abs.html): componentwise `Math.abs`
|
|
48
|
+
- [add](https://docs.thi.ng/umbrella/tensors/variables/add.html): tensor-tensor addition
|
|
49
|
+
- [addN](https://docs.thi.ng/umbrella/tensors/variables/addN.html): tensor-scalar addition
|
|
50
|
+
- [argMax](https://docs.thi.ng/umbrella/tensors/variables/argMax.html): maximum component index/value
|
|
51
|
+
- [argMin](https://docs.thi.ng/umbrella/tensors/variables/argMin.html): minimum component index/value
|
|
52
|
+
- [clamp](https://docs.thi.ng/umbrella/tensors/variables/clamp.html): tensor-tensor interval clamping
|
|
53
|
+
- [clampN](https://docs.thi.ng/umbrella/tensors/variables/clampN.html): tensor-scalar interval clamping
|
|
54
|
+
- [cos](https://docs.thi.ng/umbrella/tensors/variables/cos.html): componentwise `Math.cos`
|
|
55
|
+
- [diagonal](https://docs.thi.ng/umbrella/tensors/variables/diagonal.html): diagonal extraction
|
|
56
|
+
- [div](https://docs.thi.ng/umbrella/tensors/variables/div.html): tensor-tensor division
|
|
57
|
+
- [divN](https://docs.thi.ng/umbrella/tensors/variables/divN.html): tensor-scalar division
|
|
58
|
+
- [dot](https://docs.thi.ng/umbrella/tensors/variables/dot.html): dot product
|
|
59
|
+
- [exp](https://docs.thi.ng/umbrella/tensors/variables/exp.html): componentwise `Math.exp`
|
|
60
|
+
- [exp2](https://docs.thi.ng/umbrella/tensors/variables/exp2.html): componentwise `2^x`
|
|
61
|
+
- [log](https://docs.thi.ng/umbrella/tensors/variables/log.html): componentwise `Math.log`
|
|
62
|
+
- [log2](https://docs.thi.ng/umbrella/tensors/variables/log2.html): componentwise `Math.log2`
|
|
63
|
+
- [mag](https://docs.thi.ng/umbrella/tensors/variables/mag.html): tensor magnitude
|
|
64
|
+
- [magSq](https://docs.thi.ng/umbrella/tensors/variables/magSq.html): tensor squared magnitude
|
|
65
|
+
- [max](https://docs.thi.ng/umbrella/tensors/variables/max.html): tensor-tensor maximum
|
|
66
|
+
- [maxN](https://docs.thi.ng/umbrella/tensors/variables/maxN.html): tensor-scalar maximum
|
|
67
|
+
- [min](https://docs.thi.ng/umbrella/tensors/variables/min.html): tensor-tensor minimum
|
|
68
|
+
- [minN](https://docs.thi.ng/umbrella/tensors/variables/minN.html): tensor-scalar maximum
|
|
69
|
+
- [mul](https://docs.thi.ng/umbrella/tensors/variables/mul.html): tensor-tensor multiplication
|
|
70
|
+
- [mulN](https://docs.thi.ng/umbrella/tensors/variables/mulN.html): tensor-scalar multiplication
|
|
69
71
|
- [mulM](https://docs.thi.ng/umbrella/tensors/variables/mulM.html): matrix-matrix product
|
|
70
72
|
- [mulV](https://docs.thi.ng/umbrella/tensors/variables/mulV.html): matrix-vector product
|
|
71
|
-
- [normalize](https://docs.thi.ng/umbrella/tensors/variables/normalize.html)
|
|
72
|
-
- [pow](https://docs.thi.ng/umbrella/tensors/variables/pow.html)
|
|
73
|
-
- [powN](https://docs.thi.ng/umbrella/tensors/variables/powN.html)
|
|
73
|
+
- [normalize](https://docs.thi.ng/umbrella/tensors/variables/normalize.html): tensor normalization (w/ optional length)
|
|
74
|
+
- [pow](https://docs.thi.ng/umbrella/tensors/variables/pow.html): tensor-tensor `Math.pow`
|
|
75
|
+
- [powN](https://docs.thi.ng/umbrella/tensors/variables/powN.html): tensor-scalar `Math.pow`
|
|
74
76
|
- [product](https://docs.thi.ng/umbrella/tensors/variables/product.html): component product
|
|
75
|
-
- [randDistrib](https://docs.thi.ng/umbrella/tensors/variables/randDistrib.html)
|
|
77
|
+
- [randDistrib](https://docs.thi.ng/umbrella/tensors/variables/randDistrib.html): fill with random data from distribution fn
|
|
76
78
|
- [relu](https://docs.thi.ng/umbrella/tensors/variables/relu.html): ReLU activation
|
|
77
79
|
- [reluN](https://docs.thi.ng/umbrella/tensors/variables/reluN.html): leaky ReLU activation
|
|
78
80
|
- [select](https://docs.thi.ng/umbrella/tensors/variables/select.html): generalization of argMin/Max
|
|
79
81
|
- [set](https://docs.thi.ng/umbrella/tensors/variables/set.html): tensor setter
|
|
80
82
|
- [setN](https://docs.thi.ng/umbrella/tensors/variables/setN.html): tensor setter w/ uniform scalar
|
|
81
83
|
- [sigmoid](https://docs.thi.ng/umbrella/tensors/variables/sigmoid.html): Sigmoid activation
|
|
82
|
-
- [sin](https://docs.thi.ng/umbrella/tensors/variables/sin.html)
|
|
84
|
+
- [sin](https://docs.thi.ng/umbrella/tensors/variables/sin.html): componentwise `Math.sin`
|
|
83
85
|
- [softMax](https://docs.thi.ng/umbrella/tensors/variables/softMax.html): Soft Max activation
|
|
84
|
-
- [sqrt](https://docs.thi.ng/umbrella/tensors/variables/sqrt.html)
|
|
86
|
+
- [sqrt](https://docs.thi.ng/umbrella/tensors/variables/sqrt.html): componentwise `Math.sqrt`
|
|
85
87
|
- [step](https://docs.thi.ng/umbrella/tensors/variables/step.html): Threshold function (as as GLSL `step()`)
|
|
86
|
-
- [sub](https://docs.thi.ng/umbrella/tensors/variables/sub.html)
|
|
87
|
-
- [subN](https://docs.thi.ng/umbrella/tensors/variables/subN.html)
|
|
88
|
+
- [sub](https://docs.thi.ng/umbrella/tensors/variables/sub.html): tensor-tensor subtraction
|
|
89
|
+
- [subN](https://docs.thi.ng/umbrella/tensors/variables/subN.html): tensor-scalar subtraction
|
|
88
90
|
- [sum](https://docs.thi.ng/umbrella/tensors/variables/sum.html): component sum
|
|
89
|
-
- [
|
|
90
|
-
- [
|
|
91
|
+
- [svd](https://docs.thi.ng/umbrella/tensors/variables/sum.html): singular value decomposition
|
|
92
|
+
- [tan](https://docs.thi.ng/umbrella/tensors/variables/tan.html): componentwise `Math.tan`
|
|
93
|
+
- [tanh](https://docs.thi.ng/umbrella/tensors/variables/tanh.html): componentwise `Math.tanh`
|
|
94
|
+
- [trace](https://docs.thi.ng/umbrella/tensors/variables/trace.html): matrix trace (diagonal component sum)
|
|
95
|
+
|
|
96
|
+
### Broadcasting support
|
|
97
|
+
|
|
98
|
+
Most of the built-in functions taking two or more tensors as input are
|
|
99
|
+
supporting broadcasting, i.e. the shapes of the individual arguments only need
|
|
100
|
+
to be compatible, not identical. The operators attempt to adjust the tensor
|
|
101
|
+
shape & stride configurations to be compatible, applying the steps and rules
|
|
102
|
+
below:
|
|
103
|
+
|
|
104
|
+
- If the dimensions are unequal, the smaller tensor's dimensions will be
|
|
105
|
+
increased as needed. The size of each added dimension will be set to 1 and its
|
|
106
|
+
stride set to zero.
|
|
107
|
+
- The size of each dimension will be compared and only the following cases are
|
|
108
|
+
accepted (otherwise will throw an error): sizes are equal or one side is 1
|
|
109
|
+
- Any of the tensors requiring shape adjustments will be shallow copied with
|
|
110
|
+
new shape/stride config applied.
|
|
111
|
+
|
|
112
|
+
Some examples:
|
|
113
|
+
|
|
114
|
+
```ts tangle:export/readme-broadcast.ts
|
|
115
|
+
import { add, sub, print, tensor } from "@thi.ng/tensors";
|
|
116
|
+
|
|
117
|
+
// 2D + 1D
|
|
118
|
+
print(add(null, tensor([[1,2], [3,4]]), tensor([10, 20])));
|
|
119
|
+
// 11.0000 22.0000
|
|
120
|
+
// 13.0000 24.0000
|
|
121
|
+
|
|
122
|
+
// 2D + 1D (as column vector)
|
|
123
|
+
print(add(null, tensor([[1, 2], [3, 4]]), tensor([10, 20]).reshape([2,1])));
|
|
124
|
+
// 11.0000 12.0000
|
|
125
|
+
// 23.0000 24.0000
|
|
126
|
+
|
|
127
|
+
// 1D - 2D
|
|
128
|
+
print(sub(null, tensor([10, 20]), tensor([[1,2], [3,4]])));
|
|
129
|
+
// 9.0000 18.0000
|
|
130
|
+
// 7.0000 16.0000
|
|
131
|
+
|
|
132
|
+
// 1D + 3D
|
|
133
|
+
print(add(null, tensor([10, 20]), tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])));
|
|
134
|
+
// --- 0: ---
|
|
135
|
+
// 11.0000 22.0000
|
|
136
|
+
// 13.0000 24.0000
|
|
137
|
+
// --- 1: ---
|
|
138
|
+
// 15.0000 26.0000
|
|
139
|
+
// 17.0000 28.0000
|
|
140
|
+
|
|
141
|
+
// 2D + 3D
|
|
142
|
+
print(add(null, tensor([[10, 20], [100, 200]]), tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])));
|
|
143
|
+
// --- 0: ---
|
|
144
|
+
// 11.0000 22.0000
|
|
145
|
+
// 103.0000 204.0000
|
|
146
|
+
// --- 1: ---
|
|
147
|
+
// 15.0000 26.0000
|
|
148
|
+
// 107.0000 208.0000
|
|
149
|
+
```
|
|
91
150
|
|
|
92
151
|
## Status
|
|
93
152
|
|
|
@@ -121,7 +180,7 @@ For Node.js REPL:
|
|
|
121
180
|
const ten = await import("@thi.ng/tensors");
|
|
122
181
|
```
|
|
123
182
|
|
|
124
|
-
Package sizes (brotli'd, pre-treeshake): ESM:
|
|
183
|
+
Package sizes (brotli'd, pre-treeshake): ESM: 8.06 KB
|
|
125
184
|
|
|
126
185
|
## Dependencies
|
|
127
186
|
|
package/abs.d.ts
CHANGED
|
@@ -5,33 +5,5 @@
|
|
|
5
5
|
* @param out - output tensor
|
|
6
6
|
* @param a - input tensor
|
|
7
7
|
*/
|
|
8
|
-
export declare const abs: import("./api.js").
|
|
9
|
-
/**
|
|
10
|
-
* Same as {@link abs} for 1D tensors.
|
|
11
|
-
*
|
|
12
|
-
* @param out - output tensor
|
|
13
|
-
* @param a - input tensor
|
|
14
|
-
*/
|
|
15
|
-
export declare const abs1: import("./api.js").TensorOpT<number, number, import("./tensor.js").Tensor1<number>, import("./tensor.js").Tensor1<number>>;
|
|
16
|
-
/**
|
|
17
|
-
* Same as {@link abs} for 2D tensors.
|
|
18
|
-
*
|
|
19
|
-
* @param out - output tensor
|
|
20
|
-
* @param a - input tensor
|
|
21
|
-
*/
|
|
22
|
-
export declare const abs2: import("./api.js").TensorOpT<number, number, import("./tensor.js").Tensor2<number>, import("./tensor.js").Tensor2<number>>;
|
|
23
|
-
/**
|
|
24
|
-
* Same as {@link abs} for 3D tensors.
|
|
25
|
-
*
|
|
26
|
-
* @param out - output tensor
|
|
27
|
-
* @param a - input tensor
|
|
28
|
-
*/
|
|
29
|
-
export declare const abs3: import("./api.js").TensorOpT<number, number, import("./tensor.js").Tensor3<number>, import("./tensor.js").Tensor3<number>>;
|
|
30
|
-
/**
|
|
31
|
-
* Same as {@link abs} for 4D tensors.
|
|
32
|
-
*
|
|
33
|
-
* @param out - output tensor
|
|
34
|
-
* @param a - input tensor
|
|
35
|
-
*/
|
|
36
|
-
export declare const abs4: import("./api.js").TensorOpT<number, number, import("./tensor.js").Tensor4<number>, import("./tensor.js").Tensor4<number>>;
|
|
8
|
+
export declare const abs: import("./api.js").MultiTensorOpImpl<import("./api.js").TensorOpT<number>>;
|
|
37
9
|
//# sourceMappingURL=abs.d.ts.map
|
package/abs.js
CHANGED
|
@@ -1,14 +1,5 @@
|
|
|
1
1
|
import { defOpT } from "./defopt.js";
|
|
2
|
-
const
|
|
3
|
-
const abs = a;
|
|
4
|
-
const abs1 = b;
|
|
5
|
-
const abs2 = c;
|
|
6
|
-
const abs3 = d;
|
|
7
|
-
const abs4 = e;
|
|
2
|
+
const abs = defOpT(Math.abs);
|
|
8
3
|
export {
|
|
9
|
-
abs
|
|
10
|
-
abs1,
|
|
11
|
-
abs2,
|
|
12
|
-
abs3,
|
|
13
|
-
abs4
|
|
4
|
+
abs
|
|
14
5
|
};
|
package/add.d.ts
CHANGED
|
@@ -1,42 +1,11 @@
|
|
|
1
1
|
/**
|
|
2
2
|
* Componentwise nD tensor addition. Writes result to `out`. If `out` is null,
|
|
3
|
-
*
|
|
3
|
+
* creates a new tensor using `a`'s type and storage provider and shape as
|
|
4
|
+
* determined by broadcasting rules (see {@link broadcast} for details).
|
|
4
5
|
*
|
|
5
6
|
* @param out - output tensor
|
|
6
7
|
* @param a - input tensor
|
|
7
|
-
* @param
|
|
8
|
+
* @param b - input tensor
|
|
8
9
|
*/
|
|
9
|
-
export declare const add: import("./api.js").
|
|
10
|
-
/**
|
|
11
|
-
* Same as {@link add} for 1D tensors.
|
|
12
|
-
*
|
|
13
|
-
* @param out - output tensor
|
|
14
|
-
* @param a - input tensor
|
|
15
|
-
* @param n - scalar
|
|
16
|
-
*/
|
|
17
|
-
export declare const add1: import("./api.js").TensorOpTT<number, number, import("./tensor.js").Tensor1<number>, import("./tensor.js").Tensor1<number>>;
|
|
18
|
-
/**
|
|
19
|
-
* Same as {@link add} for 2D tensors.
|
|
20
|
-
*
|
|
21
|
-
* @param out - output tensor
|
|
22
|
-
* @param a - input tensor
|
|
23
|
-
* @param n - scalar
|
|
24
|
-
*/
|
|
25
|
-
export declare const add2: import("./api.js").TensorOpTT<number, number, import("./tensor.js").Tensor2<number>, import("./tensor.js").Tensor2<number>>;
|
|
26
|
-
/**
|
|
27
|
-
* Same as {@link add} for 3D tensors.
|
|
28
|
-
*
|
|
29
|
-
* @param out - output tensor
|
|
30
|
-
* @param a - input tensor
|
|
31
|
-
* @param n - scalar
|
|
32
|
-
*/
|
|
33
|
-
export declare const add3: import("./api.js").TensorOpTT<number, number, import("./tensor.js").Tensor3<number>, import("./tensor.js").Tensor3<number>>;
|
|
34
|
-
/**
|
|
35
|
-
* Same as {@link add} for 4D tensors.
|
|
36
|
-
*
|
|
37
|
-
* @param out - output tensor
|
|
38
|
-
* @param a - input tensor
|
|
39
|
-
* @param n - scalar
|
|
40
|
-
*/
|
|
41
|
-
export declare const add4: import("./api.js").TensorOpTT<number, number, import("./tensor.js").Tensor4<number>, import("./tensor.js").Tensor4<number>>;
|
|
10
|
+
export declare const add: import("./api.js").TensorOpTT<number>;
|
|
42
11
|
//# sourceMappingURL=add.d.ts.map
|
package/add.js
CHANGED
|
@@ -1,15 +1,6 @@
|
|
|
1
1
|
import { $add } from "@thi.ng/vectors/ops";
|
|
2
2
|
import { defOpTT } from "./defoptt.js";
|
|
3
|
-
const
|
|
4
|
-
const add = a;
|
|
5
|
-
const add1 = b;
|
|
6
|
-
const add2 = c;
|
|
7
|
-
const add3 = d;
|
|
8
|
-
const add4 = e;
|
|
3
|
+
const add = defOpTT($add);
|
|
9
4
|
export {
|
|
10
|
-
add
|
|
11
|
-
add1,
|
|
12
|
-
add2,
|
|
13
|
-
add3,
|
|
14
|
-
add4
|
|
5
|
+
add
|
|
15
6
|
};
|
package/addn.d.ts
CHANGED
|
@@ -6,37 +6,5 @@
|
|
|
6
6
|
* @param a - input tensor
|
|
7
7
|
* @param n - scalar
|
|
8
8
|
*/
|
|
9
|
-
export declare const addN: import("./api.js").
|
|
10
|
-
/**
|
|
11
|
-
* Same as {@link addN} for 1D tensors.
|
|
12
|
-
*
|
|
13
|
-
* @param out - output tensor
|
|
14
|
-
* @param a - input tensor
|
|
15
|
-
* @param n - scalar
|
|
16
|
-
*/
|
|
17
|
-
export declare const addN1: import("./api.js").TensorOpTN<number, number, import("./tensor.js").Tensor1<number>, import("./tensor.js").Tensor1<number>>;
|
|
18
|
-
/**
|
|
19
|
-
* Same as {@link addN} for 2D tensors.
|
|
20
|
-
*
|
|
21
|
-
* @param out - output tensor
|
|
22
|
-
* @param a - input tensor
|
|
23
|
-
* @param n - scalar
|
|
24
|
-
*/
|
|
25
|
-
export declare const addN2: import("./api.js").TensorOpTN<number, number, import("./tensor.js").Tensor2<number>, import("./tensor.js").Tensor2<number>>;
|
|
26
|
-
/**
|
|
27
|
-
* Same as {@link addN} for 3D tensors.
|
|
28
|
-
*
|
|
29
|
-
* @param out - output tensor
|
|
30
|
-
* @param a - input tensor
|
|
31
|
-
* @param n - scalar
|
|
32
|
-
*/
|
|
33
|
-
export declare const addN3: import("./api.js").TensorOpTN<number, number, import("./tensor.js").Tensor3<number>, import("./tensor.js").Tensor3<number>>;
|
|
34
|
-
/**
|
|
35
|
-
* Same as {@link addN} for 4D tensors.
|
|
36
|
-
*
|
|
37
|
-
* @param out - output tensor
|
|
38
|
-
* @param a - input tensor
|
|
39
|
-
* @param n - scalar
|
|
40
|
-
*/
|
|
41
|
-
export declare const addN4: import("./api.js").TensorOpTN<number, number, import("./tensor.js").Tensor4<number>, import("./tensor.js").Tensor4<number>>;
|
|
9
|
+
export declare const addN: import("./api.js").MultiTensorOpImpl<import("./api.js").TensorOpTN<number>>;
|
|
42
10
|
//# sourceMappingURL=addn.d.ts.map
|
package/addn.js
CHANGED
|
@@ -1,15 +1,6 @@
|
|
|
1
1
|
import { $add } from "@thi.ng/vectors/ops";
|
|
2
2
|
import { defOpTN } from "./defoptn.js";
|
|
3
|
-
const
|
|
4
|
-
const addN = a;
|
|
5
|
-
const addN1 = b;
|
|
6
|
-
const addN2 = c;
|
|
7
|
-
const addN3 = d;
|
|
8
|
-
const addN4 = e;
|
|
3
|
+
const addN = defOpTN($add);
|
|
9
4
|
export {
|
|
10
|
-
addN
|
|
11
|
-
addN1,
|
|
12
|
-
addN2,
|
|
13
|
-
addN3,
|
|
14
|
-
addN4
|
|
5
|
+
addN
|
|
15
6
|
};
|
package/api.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import type { Type as $Type,
|
|
1
|
+
import type { Type as $Type, ICopy, IEqualsDelta, IEquiv, ILength, Maybe, NumericArray } from "@thi.ng/api";
|
|
2
2
|
import type { Tensor1, Tensor2, Tensor3, Tensor4 } from "./tensor.js";
|
|
3
3
|
export interface TensorData<T = number> extends Iterable<T>, ILength {
|
|
4
4
|
[id: number]: T;
|
|
@@ -71,8 +71,37 @@ export interface ITensor<T = number> extends ICopy<ITensor<T>>, IEquiv, IEqualsD
|
|
|
71
71
|
orderedShape: number[];
|
|
72
72
|
orderedStride: number[];
|
|
73
73
|
[Symbol.iterator](): IterableIterator<T>;
|
|
74
|
+
/**
|
|
75
|
+
* Internal use only. Creates a shallow view used for broadcasting
|
|
76
|
+
* operators. See {@link broadcast} for details.
|
|
77
|
+
*
|
|
78
|
+
* @param shape
|
|
79
|
+
* @param stride
|
|
80
|
+
*
|
|
81
|
+
* @internal
|
|
82
|
+
*/
|
|
83
|
+
broadcast<S extends Shape>(shape: S, stride: S): ShapeTensor<S, T>;
|
|
74
84
|
empty(storage?: ITensorStorage<T>): this;
|
|
85
|
+
/**
|
|
86
|
+
* Computes linear array index from given grid position. Reverse-op of
|
|
87
|
+
* {@link ITensor.position}.
|
|
88
|
+
*
|
|
89
|
+
* @param pos
|
|
90
|
+
*/
|
|
75
91
|
index(pos: NumericArray): number;
|
|
92
|
+
/**
|
|
93
|
+
* Computes nD grid position for given linear array index. Reverse-op of
|
|
94
|
+
* {@link ITensor.index}.
|
|
95
|
+
*
|
|
96
|
+
* @remarks
|
|
97
|
+
* **CAUTION:** Currently only supports tensors with positive strides,
|
|
98
|
+
* otherwise will yield incorrect results! Tensors with negative strides
|
|
99
|
+
* (aka flipped axes in reverse order) need to be first packed via
|
|
100
|
+
* {@link ITensor.pack}.
|
|
101
|
+
*
|
|
102
|
+
* @param index
|
|
103
|
+
*/
|
|
104
|
+
position(index: number): number[];
|
|
76
105
|
get(pos: NumericArray): T;
|
|
77
106
|
set(pos: NumericArray, value: T): this;
|
|
78
107
|
lo(pos: NumericArray): this;
|
|
@@ -94,12 +123,114 @@ export interface ITensorStorage<T> {
|
|
|
94
123
|
release(buf: TensorData<T>): boolean;
|
|
95
124
|
}
|
|
96
125
|
export type StorageRegistry = Record<Type, ITensorStorage<any>>;
|
|
97
|
-
export
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
126
|
+
export interface TensorOpT<T = number> {
|
|
127
|
+
(out: Tensor1<T> | null, a: Tensor1<T>): Tensor1<T>;
|
|
128
|
+
(out: Tensor2<T> | null, a: Tensor2<T>): Tensor2<T>;
|
|
129
|
+
(out: Tensor3<T> | null, a: Tensor3<T>): Tensor3<T>;
|
|
130
|
+
(out: Tensor4<T> | null, a: Tensor4<T>): Tensor4<T>;
|
|
131
|
+
}
|
|
132
|
+
export interface TensorOpTT<T = number> {
|
|
133
|
+
(out: Tensor1<T> | null, a: Tensor1<T>, b: Tensor1<T>): Tensor1<T>;
|
|
134
|
+
(out: Tensor2<T> | null, a: Tensor1<T>, b: Tensor2<T>): Tensor2<T>;
|
|
135
|
+
(out: Tensor3<T> | null, a: Tensor1<T>, b: Tensor3<T>): Tensor3<T>;
|
|
136
|
+
(out: Tensor4<T> | null, a: Tensor1<T>, b: Tensor4<T>): Tensor4<T>;
|
|
137
|
+
(out: Tensor2<T> | null, a: Tensor2<T>, b: Tensor1<T>): Tensor2<T>;
|
|
138
|
+
(out: Tensor2<T> | null, a: Tensor2<T>, b: Tensor2<T>): Tensor2<T>;
|
|
139
|
+
(out: Tensor3<T> | null, a: Tensor2<T>, b: Tensor3<T>): Tensor3<T>;
|
|
140
|
+
(out: Tensor4<T> | null, a: Tensor2<T>, b: Tensor4<T>): Tensor4<T>;
|
|
141
|
+
(out: Tensor3<T> | null, a: Tensor3<T>, b: Tensor1<T>): Tensor3<T>;
|
|
142
|
+
(out: Tensor3<T> | null, a: Tensor3<T>, b: Tensor2<T>): Tensor3<T>;
|
|
143
|
+
(out: Tensor3<T> | null, a: Tensor3<T>, b: Tensor3<T>): Tensor3<T>;
|
|
144
|
+
(out: Tensor4<T> | null, a: Tensor3<T>, b: Tensor4<T>): Tensor4<T>;
|
|
145
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor1<T>): Tensor4<T>;
|
|
146
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor2<T>): Tensor4<T>;
|
|
147
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor3<T>): Tensor4<T>;
|
|
148
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor4<T>): Tensor4<T>;
|
|
149
|
+
}
|
|
150
|
+
export interface TensorOpTTT<T = number> {
|
|
151
|
+
(out: Tensor1<T> | null, a: Tensor1<T>, b: Tensor1<T>, c: Tensor1<T>): Tensor1<T>;
|
|
152
|
+
(out: Tensor2<T> | null, a: Tensor1<T>, b: Tensor1<T>, c: Tensor2<T>): Tensor2<T>;
|
|
153
|
+
(out: Tensor3<T> | null, a: Tensor1<T>, b: Tensor1<T>, c: Tensor3<T>): Tensor3<T>;
|
|
154
|
+
(out: Tensor4<T> | null, a: Tensor1<T>, b: Tensor1<T>, c: Tensor4<T>): Tensor4<T>;
|
|
155
|
+
(out: Tensor2<T> | null, a: Tensor1<T>, b: Tensor2<T>, c: Tensor1<T>): Tensor2<T>;
|
|
156
|
+
(out: Tensor2<T> | null, a: Tensor1<T>, b: Tensor2<T>, c: Tensor2<T>): Tensor2<T>;
|
|
157
|
+
(out: Tensor3<T> | null, a: Tensor1<T>, b: Tensor2<T>, c: Tensor3<T>): Tensor3<T>;
|
|
158
|
+
(out: Tensor4<T> | null, a: Tensor1<T>, b: Tensor2<T>, c: Tensor4<T>): Tensor4<T>;
|
|
159
|
+
(out: Tensor3<T> | null, a: Tensor1<T>, b: Tensor3<T>, c: Tensor1<T>): Tensor3<T>;
|
|
160
|
+
(out: Tensor3<T> | null, a: Tensor1<T>, b: Tensor3<T>, c: Tensor2<T>): Tensor3<T>;
|
|
161
|
+
(out: Tensor3<T> | null, a: Tensor1<T>, b: Tensor3<T>, c: Tensor3<T>): Tensor3<T>;
|
|
162
|
+
(out: Tensor4<T> | null, a: Tensor1<T>, b: Tensor3<T>, c: Tensor4<T>): Tensor4<T>;
|
|
163
|
+
(out: Tensor4<T> | null, a: Tensor1<T>, b: Tensor4<T>, c: Tensor1<T>): Tensor4<T>;
|
|
164
|
+
(out: Tensor4<T> | null, a: Tensor1<T>, b: Tensor4<T>, c: Tensor2<T>): Tensor4<T>;
|
|
165
|
+
(out: Tensor4<T> | null, a: Tensor1<T>, b: Tensor4<T>, c: Tensor3<T>): Tensor4<T>;
|
|
166
|
+
(out: Tensor4<T> | null, a: Tensor1<T>, b: Tensor4<T>, c: Tensor4<T>): Tensor4<T>;
|
|
167
|
+
(out: Tensor2<T> | null, a: Tensor2<T>, b: Tensor1<T>, c: Tensor1<T>): Tensor2<T>;
|
|
168
|
+
(out: Tensor2<T> | null, a: Tensor2<T>, b: Tensor1<T>, c: Tensor2<T>): Tensor2<T>;
|
|
169
|
+
(out: Tensor3<T> | null, a: Tensor2<T>, b: Tensor1<T>, c: Tensor3<T>): Tensor3<T>;
|
|
170
|
+
(out: Tensor4<T> | null, a: Tensor2<T>, b: Tensor1<T>, c: Tensor4<T>): Tensor4<T>;
|
|
171
|
+
(out: Tensor2<T> | null, a: Tensor2<T>, b: Tensor2<T>, c: Tensor1<T>): Tensor2<T>;
|
|
172
|
+
(out: Tensor2<T> | null, a: Tensor2<T>, b: Tensor2<T>, c: Tensor2<T>): Tensor2<T>;
|
|
173
|
+
(out: Tensor3<T> | null, a: Tensor2<T>, b: Tensor2<T>, c: Tensor3<T>): Tensor3<T>;
|
|
174
|
+
(out: Tensor4<T> | null, a: Tensor2<T>, b: Tensor2<T>, c: Tensor4<T>): Tensor4<T>;
|
|
175
|
+
(out: Tensor3<T> | null, a: Tensor2<T>, b: Tensor3<T>, c: Tensor1<T>): Tensor3<T>;
|
|
176
|
+
(out: Tensor3<T> | null, a: Tensor2<T>, b: Tensor3<T>, c: Tensor2<T>): Tensor3<T>;
|
|
177
|
+
(out: Tensor3<T> | null, a: Tensor2<T>, b: Tensor3<T>, c: Tensor3<T>): Tensor3<T>;
|
|
178
|
+
(out: Tensor4<T> | null, a: Tensor2<T>, b: Tensor3<T>, c: Tensor4<T>): Tensor4<T>;
|
|
179
|
+
(out: Tensor4<T> | null, a: Tensor2<T>, b: Tensor4<T>, c: Tensor1<T>): Tensor4<T>;
|
|
180
|
+
(out: Tensor4<T> | null, a: Tensor2<T>, b: Tensor4<T>, c: Tensor2<T>): Tensor4<T>;
|
|
181
|
+
(out: Tensor4<T> | null, a: Tensor2<T>, b: Tensor4<T>, c: Tensor3<T>): Tensor4<T>;
|
|
182
|
+
(out: Tensor4<T> | null, a: Tensor2<T>, b: Tensor4<T>, c: Tensor4<T>): Tensor4<T>;
|
|
183
|
+
(out: Tensor3<T> | null, a: Tensor3<T>, b: Tensor1<T>, c: Tensor1<T>): Tensor3<T>;
|
|
184
|
+
(out: Tensor3<T> | null, a: Tensor3<T>, b: Tensor1<T>, c: Tensor2<T>): Tensor3<T>;
|
|
185
|
+
(out: Tensor3<T> | null, a: Tensor3<T>, b: Tensor1<T>, c: Tensor3<T>): Tensor3<T>;
|
|
186
|
+
(out: Tensor4<T> | null, a: Tensor3<T>, b: Tensor1<T>, c: Tensor4<T>): Tensor4<T>;
|
|
187
|
+
(out: Tensor3<T> | null, a: Tensor3<T>, b: Tensor2<T>, c: Tensor1<T>): Tensor3<T>;
|
|
188
|
+
(out: Tensor3<T> | null, a: Tensor3<T>, b: Tensor2<T>, c: Tensor2<T>): Tensor3<T>;
|
|
189
|
+
(out: Tensor3<T> | null, a: Tensor3<T>, b: Tensor2<T>, c: Tensor3<T>): Tensor3<T>;
|
|
190
|
+
(out: Tensor4<T> | null, a: Tensor3<T>, b: Tensor2<T>, c: Tensor4<T>): Tensor4<T>;
|
|
191
|
+
(out: Tensor3<T> | null, a: Tensor3<T>, b: Tensor3<T>, c: Tensor1<T>): Tensor3<T>;
|
|
192
|
+
(out: Tensor3<T> | null, a: Tensor3<T>, b: Tensor3<T>, c: Tensor2<T>): Tensor3<T>;
|
|
193
|
+
(out: Tensor3<T> | null, a: Tensor3<T>, b: Tensor3<T>, c: Tensor3<T>): Tensor3<T>;
|
|
194
|
+
(out: Tensor4<T> | null, a: Tensor3<T>, b: Tensor3<T>, c: Tensor4<T>): Tensor4<T>;
|
|
195
|
+
(out: Tensor4<T> | null, a: Tensor3<T>, b: Tensor4<T>, c: Tensor1<T>): Tensor4<T>;
|
|
196
|
+
(out: Tensor4<T> | null, a: Tensor3<T>, b: Tensor4<T>, c: Tensor2<T>): Tensor4<T>;
|
|
197
|
+
(out: Tensor4<T> | null, a: Tensor3<T>, b: Tensor4<T>, c: Tensor3<T>): Tensor4<T>;
|
|
198
|
+
(out: Tensor4<T> | null, a: Tensor3<T>, b: Tensor4<T>, c: Tensor4<T>): Tensor4<T>;
|
|
199
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor1<T>, c: Tensor1<T>): Tensor4<T>;
|
|
200
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor1<T>, c: Tensor2<T>): Tensor4<T>;
|
|
201
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor1<T>, c: Tensor3<T>): Tensor4<T>;
|
|
202
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor1<T>, c: Tensor4<T>): Tensor4<T>;
|
|
203
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor2<T>, c: Tensor1<T>): Tensor4<T>;
|
|
204
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor2<T>, c: Tensor2<T>): Tensor4<T>;
|
|
205
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor2<T>, c: Tensor3<T>): Tensor4<T>;
|
|
206
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor2<T>, c: Tensor4<T>): Tensor4<T>;
|
|
207
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor3<T>, c: Tensor1<T>): Tensor4<T>;
|
|
208
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor3<T>, c: Tensor2<T>): Tensor4<T>;
|
|
209
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor3<T>, c: Tensor3<T>): Tensor4<T>;
|
|
210
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor3<T>, c: Tensor4<T>): Tensor4<T>;
|
|
211
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor4<T>, c: Tensor1<T>): Tensor4<T>;
|
|
212
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor4<T>, c: Tensor2<T>): Tensor4<T>;
|
|
213
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor4<T>, c: Tensor3<T>): Tensor4<T>;
|
|
214
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, b: Tensor4<T>, c: Tensor4<T>): Tensor4<T>;
|
|
215
|
+
}
|
|
216
|
+
export interface TensorOpN<A = number, B = A> {
|
|
217
|
+
(out: Tensor1<B>, n: A): Tensor1<B>;
|
|
218
|
+
(out: Tensor2<B>, n: A): Tensor2<B>;
|
|
219
|
+
(out: Tensor3<B>, n: A): Tensor3<B>;
|
|
220
|
+
(out: Tensor4<B>, n: A): Tensor4<B>;
|
|
221
|
+
}
|
|
222
|
+
export interface TensorOpTN<T = number> {
|
|
223
|
+
(out: Tensor1<T> | null, a: Tensor1<T>, n: T): Tensor1<T>;
|
|
224
|
+
(out: Tensor2<T> | null, a: Tensor2<T>, n: T): Tensor2<T>;
|
|
225
|
+
(out: Tensor3<T> | null, a: Tensor3<T>, n: T): Tensor3<T>;
|
|
226
|
+
(out: Tensor4<T> | null, a: Tensor4<T>, n: T): Tensor4<T>;
|
|
227
|
+
}
|
|
228
|
+
export interface TensorOpTNN<T = number> {
|
|
229
|
+
(out: Tensor1<T>, a: Tensor1<T>, n: T, m: T): Tensor1<T>;
|
|
230
|
+
(out: Tensor2<T>, a: Tensor2<T>, n: T, m: T): Tensor2<T>;
|
|
231
|
+
(out: Tensor3<T>, a: Tensor3<T>, n: T, m: T): Tensor3<T>;
|
|
232
|
+
(out: Tensor4<T>, a: Tensor4<T>, n: T, m: T): Tensor4<T>;
|
|
233
|
+
}
|
|
103
234
|
export type TensorOpRT<A, B, TA extends ITensor<A> = ITensor<A>> = (a: TA) => B;
|
|
104
235
|
export type TensorOpRTT<A, B, TA extends ITensor<A> = ITensor<A>> = (a: TA, b: TA) => B;
|
|
105
236
|
export interface MultiTensorOp<TOP> {
|
|
@@ -126,12 +257,4 @@ export interface MultiTensorOp<TOP> {
|
|
|
126
257
|
impl(dim?: number): Maybe<TOP>;
|
|
127
258
|
}
|
|
128
259
|
export type MultiTensorOpImpl<T> = T & MultiTensorOp<T>;
|
|
129
|
-
export type MultiTensorOpN<A = number, B = A> = MultiTensorOpImpl<TensorOpN<A, B>>;
|
|
130
|
-
export type MultiTensorOpT<A = number, B = A> = MultiTensorOpImpl<TensorOpT<A, B>>;
|
|
131
|
-
export type MultiTensorOpTN<A = number, B = A> = MultiTensorOpImpl<TensorOpTN<A, B>>;
|
|
132
|
-
export type MultiTensorOpTNN<A = number, B = A> = MultiTensorOpImpl<TensorOpTNN<A, B>>;
|
|
133
|
-
export type MultiTensorOpTT<A = number, B = A> = MultiTensorOpImpl<TensorOpTT<A, B>>;
|
|
134
|
-
export type MultiTensorOpTTT<A = number, B = A> = MultiTensorOpImpl<TensorOpTTT<A, B>>;
|
|
135
|
-
export type MultiTensorOpRT<A = number, B = A> = MultiTensorOpImpl<TensorOpRT<A, B>>;
|
|
136
|
-
export type MultiTensorOpRTT<A = number, B = A> = MultiTensorOpImpl<TensorOpRTT<A, B>>;
|
|
137
260
|
//# sourceMappingURL=api.d.ts.map
|
package/broadcast.d.ts
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
import type { ITensor, Shape } from "./api.js";
|
|
2
|
+
/**
|
|
3
|
+
* Helper function for various tensor operators which support broadcasting.
|
|
4
|
+
* Takes two tensors and attempts to adjust their shape & stride configurations
|
|
5
|
+
* to be compatible, applying the steps and rules below. Returns an object of
|
|
6
|
+
* adjusted shape and possibly adjusted shallow copies of both tensors.
|
|
7
|
+
*
|
|
8
|
+
* - if the dimensions are unequal, the smaller tensor's dimensions will be
|
|
9
|
+
* increased as needed. The size of each added dimension will be set to 1 and
|
|
10
|
+
* its stride to zero.
|
|
11
|
+
* - Size of each dimension will be compared and only the following cases are
|
|
12
|
+
* accepted (otherwise will throw an error): sizes are equal or one side is 1
|
|
13
|
+
* - Any of the tensors requiring shape adjustments will be shallow copied with
|
|
14
|
+
* new shape/stride config applied (via {@link ITensor.broadcast}).
|
|
15
|
+
*
|
|
16
|
+
* @param a
|
|
17
|
+
* @param b
|
|
18
|
+
*/
|
|
19
|
+
export declare const broadcast: <T = number>(a: ITensor<T>, b: ITensor<T>) => {
|
|
20
|
+
shape: Shape;
|
|
21
|
+
a: ITensor<T> | import("./tensor.js").Tensor4<T> | import("./tensor.js").Tensor3<T> | import("./tensor.js").Tensor2<T> | import("./tensor.js").Tensor1<T>;
|
|
22
|
+
b: ITensor<T> | import("./tensor.js").Tensor4<T> | import("./tensor.js").Tensor3<T> | import("./tensor.js").Tensor2<T> | import("./tensor.js").Tensor1<T>;
|
|
23
|
+
};
|
|
24
|
+
//# sourceMappingURL=broadcast.d.ts.map
|