@thi.ng/tensors 0.4.1 → 0.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +23 -1
- package/README.md +39 -2
- package/api.d.ts +38 -0
- package/apply-kernel.d.ts +28 -0
- package/apply-kernel.js +163 -0
- package/convert.d.ts +23 -0
- package/convert.js +22 -0
- package/convolve.d.ts +20 -0
- package/convolve.js +140 -0
- package/defoprt.d.ts +5 -5
- package/defoprt.js +6 -5
- package/defoprtt.d.ts +7 -6
- package/defoprtt.js +6 -5
- package/errors.d.ts +1 -1
- package/index.d.ts +5 -0
- package/index.js +5 -0
- package/kernels.d.ts +77 -0
- package/kernels.js +117 -0
- package/mean.d.ts +8 -0
- package/mean.js +5 -0
- package/normalize.d.ts +1 -1
- package/package.json +37 -13
- package/swap.js +1 -0
- package/tensor.d.ts +1 -0
- package/tensor.js +4 -2
package/CHANGELOG.md
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Change Log
|
|
2
2
|
|
|
3
|
-
- **Last updated**: 2025-
|
|
3
|
+
- **Last updated**: 2025-06-05T12:59:44Z
|
|
4
4
|
- **Generator**: [thi.ng/monopub](https://thi.ng/monopub)
|
|
5
5
|
|
|
6
6
|
All notable changes to this project will be documented in this file.
|
|
@@ -11,6 +11,28 @@ See [Conventional Commits](https://conventionalcommits.org/) for commit guidelin
|
|
|
11
11
|
**Note:** Unlisted _patch_ versions only involve non-code or otherwise excluded changes
|
|
12
12
|
and/or version bumps of transitive dependencies.
|
|
13
13
|
|
|
14
|
+
## [0.6.0](https://github.com/thi-ng/umbrella/tree/@thi.ng/tensors@0.6.0) (2025-06-05)
|
|
15
|
+
|
|
16
|
+
#### 🚀 Features
|
|
17
|
+
|
|
18
|
+
- update defOpRT()/defOpRTT() ([8099abc](https://github.com/thi-ng/umbrella/commit/8099abc))
|
|
19
|
+
- add optional `complete` fn to produce/transform final result (reducer like)
|
|
20
|
+
- add mean() tensor op ([2f15d21](https://github.com/thi-ng/umbrella/commit/2f15d21))
|
|
21
|
+
|
|
22
|
+
## [0.5.0](https://github.com/thi-ng/umbrella/tree/@thi.ng/tensors@0.5.0) (2025-05-28)
|
|
23
|
+
|
|
24
|
+
#### 🚀 Features
|
|
25
|
+
|
|
26
|
+
- add convolve() (1D/2D/3D versions) ([bc9a6eb](https://github.com/thi-ng/umbrella/commit/bc9a6eb))
|
|
27
|
+
- add `constant()`, refactor `ones()` ([8cdd68c](https://github.com/thi-ng/umbrella/commit/8cdd68c))
|
|
28
|
+
- add convolution kernel presets ([e553f06](https://github.com/thi-ng/umbrella/commit/e553f06))
|
|
29
|
+
- add 1D/2D/3D sobel kernels
|
|
30
|
+
- add 2D box blur
|
|
31
|
+
- add 2D gaussian blur
|
|
32
|
+
- add/update/rename convolution kernels ([025c7ba](https://github.com/thi-ng/umbrella/commit/025c7ba))
|
|
33
|
+
- add `applyKernel()`, add/update kernel presets ([b8c2a7a](https://github.com/thi-ng/umbrella/commit/b8c2a7a))
|
|
34
|
+
- add `fromFloatBuffer()` conversion ([d78a0d3](https://github.com/thi-ng/umbrella/commit/d78a0d3))
|
|
35
|
+
|
|
14
36
|
## [0.4.0](https://github.com/thi-ng/umbrella/tree/@thi.ng/tensors@0.4.0) (2025-05-12)
|
|
15
37
|
|
|
16
38
|
#### 🚀 Features
|
package/README.md
CHANGED
|
@@ -7,7 +7,7 @@
|
|
|
7
7
|
[](https://mastodon.thi.ng/@toxi)
|
|
8
8
|
|
|
9
9
|
> [!NOTE]
|
|
10
|
-
> This is one of
|
|
10
|
+
> This is one of 208 standalone projects, maintained as part
|
|
11
11
|
> of the [@thi.ng/umbrella](https://github.com/thi-ng/umbrella/) monorepo
|
|
12
12
|
> and anti-framework.
|
|
13
13
|
>
|
|
@@ -17,6 +17,8 @@
|
|
|
17
17
|
- [About](#about)
|
|
18
18
|
- [Built-in tensor operations](#built-in-tensor-operations)
|
|
19
19
|
- [Broadcasting support](#broadcasting-support)
|
|
20
|
+
- [Convolution support](#convolution-support)
|
|
21
|
+
- [Conversions](#conversions)
|
|
20
22
|
- [Status](#status)
|
|
21
23
|
- [Installation](#installation)
|
|
22
24
|
- [Dependencies](#dependencies)
|
|
@@ -51,6 +53,7 @@ conventions are closely aligned to the ones used by the
|
|
|
51
53
|
- [argMin](https://docs.thi.ng/umbrella/tensors/variables/argMin.html): Minimum component index/value
|
|
52
54
|
- [clamp](https://docs.thi.ng/umbrella/tensors/variables/clamp.html): Tensor-tensor interval clamping
|
|
53
55
|
- [clampN](https://docs.thi.ng/umbrella/tensors/variables/clampN.html): Tensor-scalar interval clamping
|
|
56
|
+
- [convolve](https://docs.thi.ng/umbrella/tensors/variables/convolve.html): Tensor convolution (1D/2D/3D only)
|
|
54
57
|
- [cos](https://docs.thi.ng/umbrella/tensors/variables/cos.html): Componentwise `Math.cos`
|
|
55
58
|
- [diagonal](https://docs.thi.ng/umbrella/tensors/variables/diagonal.html): Diagonal extraction
|
|
56
59
|
- [div](https://docs.thi.ng/umbrella/tensors/variables/div.html): Tensor-tensor division
|
|
@@ -149,6 +152,40 @@ print(add(null, tensor([[10, 20], [100, 200]]), tensor([[[1, 2], [3, 4]], [[5, 6
|
|
|
149
152
|
// 107.0000 208.0000
|
|
150
153
|
```
|
|
151
154
|
|
|
155
|
+
### Convolution support
|
|
156
|
+
|
|
157
|
+
Tensor convolution is only possible if both the domain tensor and the kernel
|
|
158
|
+
tensor have same dimensionality. No broadcasting support.
|
|
159
|
+
|
|
160
|
+
The following kernel presets and tensor factories are included and can be used
|
|
161
|
+
with
|
|
162
|
+
[`convolve()`](https://docs.thi.ng/umbrella/tensors/variables/convolve.html):
|
|
163
|
+
|
|
164
|
+
- `BOX_BLUR2(radius)`: Box blur kernel factory
|
|
165
|
+
- `GAUSSION2(radius)`: Gaussian blur kernel factory
|
|
166
|
+
- `EDGE2(radius)`: Edge/ridge detection kernel factory
|
|
167
|
+
- `SOBEL1`: 1D Sobel kernel
|
|
168
|
+
- `SOBEL2`: 2D Sobel kernel
|
|
169
|
+
- `SOBEL3`: 3D Sobel kernel
|
|
170
|
+
|
|
171
|
+
For more generalized convolution-like functionality, the following kernel
|
|
172
|
+
factories can be used with
|
|
173
|
+
[`applyKernel()`](https://docs.thi.ng/umbrella/tensors/variables/applyKernel.html):
|
|
174
|
+
|
|
175
|
+
- `MAX2_POOL(width,height?)`: max pooling
|
|
176
|
+
- `MIN2_POOL(width,height?)`: min pooling
|
|
177
|
+
- `MAXIMA2(radius)`: local maxima detection
|
|
178
|
+
- `MINIMA2(radius)`: local minima detection
|
|
179
|
+
|
|
180
|
+
### Conversions
|
|
181
|
+
|
|
182
|
+
The following functions can be used to convert/coerce other data structures into
|
|
183
|
+
tensors:
|
|
184
|
+
|
|
185
|
+
- [`fromFloatBuffer()`](https://docs.thi.ng/umbrella/tensors/functions/fromFloatBuffer.html):
|
|
186
|
+
Coerce [thi.ng/pixel] float buffer/image (or compatible data structures) into
|
|
187
|
+
a 2D/3D tensor
|
|
188
|
+
|
|
152
189
|
## Status
|
|
153
190
|
|
|
154
191
|
**ALPHA** - bleeding edge / work-in-progress
|
|
@@ -181,7 +218,7 @@ For Node.js REPL:
|
|
|
181
218
|
const ten = await import("@thi.ng/tensors");
|
|
182
219
|
```
|
|
183
220
|
|
|
184
|
-
Package sizes (brotli'd, pre-treeshake): ESM:
|
|
221
|
+
Package sizes (brotli'd, pre-treeshake): ESM: 9.40 KB
|
|
185
222
|
|
|
186
223
|
## Dependencies
|
|
187
224
|
|
package/api.d.ts
CHANGED
|
@@ -275,4 +275,42 @@ export interface MultiTensorOp<TOP> {
|
|
|
275
275
|
impl(dim?: number): Maybe<TOP>;
|
|
276
276
|
}
|
|
277
277
|
export type MultiTensorOpImpl<T> = T & MultiTensorOp<T>;
|
|
278
|
+
/**
|
|
279
|
+
* Convolution kernel spec for use with {@link applyKernel}.
|
|
280
|
+
*
|
|
281
|
+
* @remarks
|
|
282
|
+
* Provided implementations:
|
|
283
|
+
*
|
|
284
|
+
* - {@link MAX2_MOORE}
|
|
285
|
+
* - {@link MAX2_VON_NEUMANN}
|
|
286
|
+
* - {@link MAXIMA2_MOORE}
|
|
287
|
+
* - {@link MAXIMA2_VON_NEUMANN}
|
|
288
|
+
*/
|
|
289
|
+
export interface KernelSpec<T = any> {
|
|
290
|
+
/**
|
|
291
|
+
* Kernel shape/size
|
|
292
|
+
*/
|
|
293
|
+
shape: Shape;
|
|
294
|
+
/**
|
|
295
|
+
* Windowed intialization. Returns initial accumulator for each new kernel
|
|
296
|
+
* window.
|
|
297
|
+
*/
|
|
298
|
+
init: () => T;
|
|
299
|
+
/**
|
|
300
|
+
* Windowed reduction function. Receives current accumulator, domain value
|
|
301
|
+
* and kernel-local coordinates. Returns updated accumulator.
|
|
302
|
+
*
|
|
303
|
+
* @param acc
|
|
304
|
+
* @param value
|
|
305
|
+
* @param coords
|
|
306
|
+
*/
|
|
307
|
+
reduce: (acc: T, value: number, ...coords: number[]) => T;
|
|
308
|
+
/**
|
|
309
|
+
* Windowed reducer result function. Produces final result from current
|
|
310
|
+
* accumulator.
|
|
311
|
+
*
|
|
312
|
+
* @param acc
|
|
313
|
+
*/
|
|
314
|
+
complete: (acc: T) => number;
|
|
315
|
+
}
|
|
278
316
|
//# sourceMappingURL=api.d.ts.map
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import type { KernelSpec, ITensor } from "./api.js";
|
|
2
|
+
/**
|
|
3
|
+
* Generalized convolution using a set of kenrel functions instead of static
|
|
4
|
+
* kernel tensor (as with {@link convolve}). The kernel function is applied as a
|
|
5
|
+
* windowed reducer. For each window (size defined by the kernel), the
|
|
6
|
+
* {@link KernelSpec.init} function initializes an accumulator value. Then
|
|
7
|
+
* {@link KernelSpec.reduce} is called with the current accumulator, a domain
|
|
8
|
+
* value from tensor `a` and the current kernel coordinates. The final window
|
|
9
|
+
* result is produced by the kernel's {@link KernelSpec.complete} function.
|
|
10
|
+
*
|
|
11
|
+
* @remarks
|
|
12
|
+
* The output tensor has the same shape as domain `a`. If `pad = true`
|
|
13
|
+
* (default), edge values in `a` will be repeated, otherwise padded with given
|
|
14
|
+
* value. If `out` is null, a new tensor will be created using `a`'s storage
|
|
15
|
+
* backend.
|
|
16
|
+
*
|
|
17
|
+
* References:
|
|
18
|
+
*
|
|
19
|
+
* - https://en.wikipedia.org/wiki/Convolution
|
|
20
|
+
* - https://en.wikipedia.org/wiki/Kernel_(image_processing)#Convolution
|
|
21
|
+
*
|
|
22
|
+
* @param out - output tensor
|
|
23
|
+
* @param a - input tensor
|
|
24
|
+
* @param kernel - kernel spec
|
|
25
|
+
* @param pad - padding
|
|
26
|
+
*/
|
|
27
|
+
export declare const applyKernel: import("./api.js").MultiTensorOpImpl<(<T extends ITensor>(out: T | null, a: T, kernel: KernelSpec, pad?: true | number) => T)>;
|
|
28
|
+
//# sourceMappingURL=apply-kernel.d.ts.map
|
package/apply-kernel.js
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
import { top } from "./top.js";
|
|
2
|
+
const applyKernel1 = (out, a, { init, reduce, complete, shape: [sxk] }, pad = true) => {
|
|
3
|
+
!out && (out = a.empty());
|
|
4
|
+
const {
|
|
5
|
+
data: odata,
|
|
6
|
+
offset: oo,
|
|
7
|
+
stride: [txo]
|
|
8
|
+
} = out;
|
|
9
|
+
const {
|
|
10
|
+
data: adata,
|
|
11
|
+
offset: oa,
|
|
12
|
+
shape: [sxa],
|
|
13
|
+
stride: [txa]
|
|
14
|
+
} = a;
|
|
15
|
+
const sxk2 = sxk >> 1;
|
|
16
|
+
const maxx = sxa - 1;
|
|
17
|
+
const repeat = pad === true;
|
|
18
|
+
let x, xx, i, acc, maskx;
|
|
19
|
+
for (x = 0; x < sxa; x++) {
|
|
20
|
+
for (acc = init(), i = 0; i < sxk; i++) {
|
|
21
|
+
xx = x + i - sxk2;
|
|
22
|
+
if (xx < 0) {
|
|
23
|
+
maskx = false;
|
|
24
|
+
xx = 0;
|
|
25
|
+
} else if (xx > maxx) {
|
|
26
|
+
maskx = false;
|
|
27
|
+
xx = maxx;
|
|
28
|
+
} else maskx = true;
|
|
29
|
+
acc = reduce(acc, maskx || repeat ? adata[oa + xx * txa] : pad, i);
|
|
30
|
+
}
|
|
31
|
+
odata[oo + x * txo] = complete(acc);
|
|
32
|
+
}
|
|
33
|
+
return out;
|
|
34
|
+
};
|
|
35
|
+
const applyKernel2 = (out, a, { init, reduce, complete, shape: [sxk, syk] }, pad = true) => {
|
|
36
|
+
!out && (out = a.empty());
|
|
37
|
+
const {
|
|
38
|
+
data: odata,
|
|
39
|
+
offset: oo,
|
|
40
|
+
stride: [txo, tyo]
|
|
41
|
+
} = out;
|
|
42
|
+
const {
|
|
43
|
+
data: adata,
|
|
44
|
+
offset: oa,
|
|
45
|
+
shape: [sxa, sya],
|
|
46
|
+
stride: [txa, tya]
|
|
47
|
+
} = a;
|
|
48
|
+
const sxk2 = sxk >> 1;
|
|
49
|
+
const syk2 = syk >> 1;
|
|
50
|
+
const maxx = sxa - 1;
|
|
51
|
+
const maxy = sya - 1;
|
|
52
|
+
const repeat = pad === true;
|
|
53
|
+
let x, xx, y, yy, oox, oax, i, j, acc, maskx, masky;
|
|
54
|
+
for (x = 0; x < sxa; x++) {
|
|
55
|
+
oox = oo + x * txo;
|
|
56
|
+
for (y = 0; y < sya; y++) {
|
|
57
|
+
for (acc = init(), i = 0; i < sxk; i++) {
|
|
58
|
+
xx = x + i - sxk2;
|
|
59
|
+
if (xx < 0) {
|
|
60
|
+
maskx = false;
|
|
61
|
+
xx = 0;
|
|
62
|
+
} else if (xx > maxx) {
|
|
63
|
+
maskx = false;
|
|
64
|
+
xx = maxx;
|
|
65
|
+
} else maskx = true;
|
|
66
|
+
oax = oa + xx * txa;
|
|
67
|
+
for (j = 0; j < syk; j++) {
|
|
68
|
+
yy = y + j - syk2;
|
|
69
|
+
if (yy < 0) {
|
|
70
|
+
masky = false;
|
|
71
|
+
yy = 0;
|
|
72
|
+
} else if (yy > maxy) {
|
|
73
|
+
masky = false;
|
|
74
|
+
yy = maxy;
|
|
75
|
+
} else masky = maskx;
|
|
76
|
+
acc = reduce(
|
|
77
|
+
acc,
|
|
78
|
+
masky || repeat ? adata[oax + yy * tya] : pad,
|
|
79
|
+
i,
|
|
80
|
+
j
|
|
81
|
+
);
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
odata[oox + y * tyo] = complete(acc);
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
return out;
|
|
88
|
+
};
|
|
89
|
+
const applyKernel3 = (out, a, { init, reduce, complete, shape: [sxk, syk, szk] }, pad = true) => {
|
|
90
|
+
!out && (out = a.empty());
|
|
91
|
+
const {
|
|
92
|
+
data: odata,
|
|
93
|
+
offset: oo,
|
|
94
|
+
stride: [txo, tyo, tzo]
|
|
95
|
+
} = out;
|
|
96
|
+
const {
|
|
97
|
+
data: adata,
|
|
98
|
+
offset: oa,
|
|
99
|
+
shape: [sxa, sya, sza],
|
|
100
|
+
stride: [txa, tya, tza]
|
|
101
|
+
} = a;
|
|
102
|
+
const sxk2 = sxk >> 1;
|
|
103
|
+
const syk2 = syk >> 1;
|
|
104
|
+
const szk2 = szk >> 1;
|
|
105
|
+
const maxx = sxa - 1;
|
|
106
|
+
const maxy = sya - 1;
|
|
107
|
+
const maxz = sza - 1;
|
|
108
|
+
const repeat = pad === true;
|
|
109
|
+
let x, xx, y, yy, z, zz, oox, oax, ooy, oay, i, j, k, acc, maskx, masky, maskz;
|
|
110
|
+
for (x = 0; x < sxa; x++) {
|
|
111
|
+
oox = oo + x * txo;
|
|
112
|
+
for (y = 0; y < sya; y++) {
|
|
113
|
+
ooy = oox + y * tyo;
|
|
114
|
+
for (z = 0; z < sza; z++) {
|
|
115
|
+
for (acc = init(), i = 0; i < sxk; i++) {
|
|
116
|
+
xx = x + i - sxk2;
|
|
117
|
+
if (xx < 0) {
|
|
118
|
+
maskx = false;
|
|
119
|
+
xx = 0;
|
|
120
|
+
} else if (xx > maxx) {
|
|
121
|
+
maskx = false;
|
|
122
|
+
xx = maxx;
|
|
123
|
+
} else maskx = true;
|
|
124
|
+
oax = oa + xx * txa;
|
|
125
|
+
for (j = 0; j < syk; j++) {
|
|
126
|
+
yy = y + j - syk2;
|
|
127
|
+
if (yy < 0) {
|
|
128
|
+
masky = false;
|
|
129
|
+
yy = 0;
|
|
130
|
+
} else if (yy > maxy) {
|
|
131
|
+
masky = false;
|
|
132
|
+
yy = maxy;
|
|
133
|
+
} else masky = maskx;
|
|
134
|
+
oay = oax + yy * tya;
|
|
135
|
+
for (k = 0; k < szk; k++) {
|
|
136
|
+
zz = z + k - szk2;
|
|
137
|
+
if (zz < 0) {
|
|
138
|
+
maskz = false;
|
|
139
|
+
zz = 0;
|
|
140
|
+
} else if (zz > maxz) {
|
|
141
|
+
maskz = false;
|
|
142
|
+
zz = maxz;
|
|
143
|
+
} else maskz = masky;
|
|
144
|
+
acc = reduce(
|
|
145
|
+
acc,
|
|
146
|
+
maskz || repeat ? adata[oay + zz * tza] : pad,
|
|
147
|
+
i,
|
|
148
|
+
j,
|
|
149
|
+
k
|
|
150
|
+
);
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
odata[ooy + z * tzo] = complete(acc);
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
return out;
|
|
159
|
+
};
|
|
160
|
+
const applyKernel = top(1, void 0, applyKernel1, applyKernel2, applyKernel3);
|
|
161
|
+
export {
|
|
162
|
+
applyKernel
|
|
163
|
+
};
|
package/convert.d.ts
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
import type { NumericArray } from "@thi.ng/api";
|
|
2
|
+
/**
|
|
3
|
+
* Simplified interface of thi.ng/pixel `FloatBuffer`, only defining parts
|
|
4
|
+
* relevant to the conversion.
|
|
5
|
+
*/
|
|
6
|
+
export interface FloatBufferLike {
|
|
7
|
+
size: [number, number];
|
|
8
|
+
stride: [number, number];
|
|
9
|
+
data: NumericArray;
|
|
10
|
+
format: {
|
|
11
|
+
size: number;
|
|
12
|
+
};
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Helper function to coerce a thi.ng/pixel float buffer (or compatible data
|
|
16
|
+
* structures) into a tensor. Single-channel (i.e. grayscale) buffers will
|
|
17
|
+
* result in 2D tensors, otherwise 3D (with the innermost dimension representing
|
|
18
|
+
* different color channels). In all cases, this is a zero-copy operation.
|
|
19
|
+
*
|
|
20
|
+
* @param buffer
|
|
21
|
+
*/
|
|
22
|
+
export declare const fromFloatBuffer: ({ size: [sx, sy], stride: [tx, ty], data, format: { size }, }: FloatBufferLike) => import("./tensor.js").Tensor3<number> | import("./tensor.js").Tensor2<number>;
|
|
23
|
+
//# sourceMappingURL=convert.d.ts.map
|
package/convert.js
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { typedArrayType } from "@thi.ng/api/typedarray";
|
|
2
|
+
import { tensor } from "./tensor.js";
|
|
3
|
+
const fromFloatBuffer = ({
|
|
4
|
+
size: [sx, sy],
|
|
5
|
+
stride: [tx, ty],
|
|
6
|
+
data,
|
|
7
|
+
format: { size }
|
|
8
|
+
}) => {
|
|
9
|
+
const type = typedArrayType(data);
|
|
10
|
+
return size > 1 ? tensor(type, [sy, sx, size], {
|
|
11
|
+
stride: [ty, tx, 1],
|
|
12
|
+
copy: false,
|
|
13
|
+
data
|
|
14
|
+
}) : tensor(type, [sy, sx], {
|
|
15
|
+
stride: [ty, tx],
|
|
16
|
+
copy: false,
|
|
17
|
+
data
|
|
18
|
+
});
|
|
19
|
+
};
|
|
20
|
+
export {
|
|
21
|
+
fromFloatBuffer
|
|
22
|
+
};
|
package/convolve.d.ts
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import type { ITensor } from "./api.js";
|
|
2
|
+
/**
|
|
3
|
+
* Tensor convolution (without broadcasting support).
|
|
4
|
+
*
|
|
5
|
+
* @remarks
|
|
6
|
+
* The output tensor has the same shape as domain `a`. Edge values in `a` will
|
|
7
|
+
* be repeated. If `out` is null, a new tensor will be created using `a`'s
|
|
8
|
+
* storage backend.
|
|
9
|
+
*
|
|
10
|
+
* References:
|
|
11
|
+
*
|
|
12
|
+
* - https://en.wikipedia.org/wiki/Convolution
|
|
13
|
+
* - https://en.wikipedia.org/wiki/Kernel_(image_processing)#Convolution
|
|
14
|
+
*
|
|
15
|
+
* @param out - output tensor
|
|
16
|
+
* @param a - input tensor
|
|
17
|
+
* @param k - kernel tensor
|
|
18
|
+
*/
|
|
19
|
+
export declare const convolve: import("./api.js").MultiTensorOpImpl<(<T extends ITensor>(out: T | null, a: T, k: T) => T)>;
|
|
20
|
+
//# sourceMappingURL=convolve.d.ts.map
|
package/convolve.js
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import { top } from "./top.js";
|
|
2
|
+
const convolve1 = (out, a, k) => {
|
|
3
|
+
!out && (out = a.empty());
|
|
4
|
+
const {
|
|
5
|
+
data: odata,
|
|
6
|
+
offset: oo,
|
|
7
|
+
stride: [txo]
|
|
8
|
+
} = out;
|
|
9
|
+
const {
|
|
10
|
+
data: adata,
|
|
11
|
+
offset: oa,
|
|
12
|
+
shape: [sxa],
|
|
13
|
+
stride: [txa]
|
|
14
|
+
} = a;
|
|
15
|
+
const {
|
|
16
|
+
data: kdata,
|
|
17
|
+
offset: ok,
|
|
18
|
+
shape: [sxk],
|
|
19
|
+
stride: [txk]
|
|
20
|
+
} = k;
|
|
21
|
+
const sxk2 = sxk >> 1;
|
|
22
|
+
const mx = sxa - 1;
|
|
23
|
+
let x, xx, i, sum;
|
|
24
|
+
for (x = 0; x < sxa; x++) {
|
|
25
|
+
for (sum = 0, i = 0; i < sxk; i++) {
|
|
26
|
+
xx = x + i - sxk2;
|
|
27
|
+
if (xx < 0) xx = 0;
|
|
28
|
+
else if (xx > mx) xx = mx;
|
|
29
|
+
sum += adata[oa + xx * txa] * kdata[ok + i * txk];
|
|
30
|
+
}
|
|
31
|
+
odata[oo + x * txo] = sum;
|
|
32
|
+
}
|
|
33
|
+
return out;
|
|
34
|
+
};
|
|
35
|
+
const convolve2 = (out, a, k) => {
|
|
36
|
+
!out && (out = a.empty());
|
|
37
|
+
const {
|
|
38
|
+
data: odata,
|
|
39
|
+
offset: oo,
|
|
40
|
+
stride: [txo, tyo]
|
|
41
|
+
} = out;
|
|
42
|
+
const {
|
|
43
|
+
data: adata,
|
|
44
|
+
offset: oa,
|
|
45
|
+
shape: [sxa, sya],
|
|
46
|
+
stride: [txa, tya]
|
|
47
|
+
} = a;
|
|
48
|
+
const {
|
|
49
|
+
data: kdata,
|
|
50
|
+
offset: ok,
|
|
51
|
+
shape: [sxk, syk],
|
|
52
|
+
stride: [txk, tyk]
|
|
53
|
+
} = k;
|
|
54
|
+
const sxk2 = sxk >> 1;
|
|
55
|
+
const syk2 = syk >> 1;
|
|
56
|
+
const mx = sxa - 1;
|
|
57
|
+
const my = sya - 1;
|
|
58
|
+
let x, xx, y, yy, oox, oax, okx, i, j, sum;
|
|
59
|
+
for (x = 0; x < sxa; x++) {
|
|
60
|
+
oox = oo + x * txo;
|
|
61
|
+
for (y = 0; y < sya; y++) {
|
|
62
|
+
for (sum = 0, i = 0; i < sxk; i++) {
|
|
63
|
+
xx = x + i - sxk2;
|
|
64
|
+
if (xx < 0) xx = 0;
|
|
65
|
+
else if (xx > mx) xx = mx;
|
|
66
|
+
oax = oa + xx * txa;
|
|
67
|
+
okx = ok + i * txk;
|
|
68
|
+
for (j = 0; j < syk; j++) {
|
|
69
|
+
yy = y + j - syk2;
|
|
70
|
+
if (yy < 0) yy = 0;
|
|
71
|
+
else if (yy > my) yy = my;
|
|
72
|
+
sum += adata[oax + yy * tya] * kdata[okx + j * tyk];
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
odata[oox + y * tyo] = sum;
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
return out;
|
|
79
|
+
};
|
|
80
|
+
const convolve3 = (out, a, k) => {
|
|
81
|
+
!out && (out = a.empty());
|
|
82
|
+
const {
|
|
83
|
+
data: odata,
|
|
84
|
+
offset: oo,
|
|
85
|
+
stride: [txo, tyo, tzo]
|
|
86
|
+
} = out;
|
|
87
|
+
const {
|
|
88
|
+
data: adata,
|
|
89
|
+
offset: oa,
|
|
90
|
+
shape: [sxa, sya, sza],
|
|
91
|
+
stride: [txa, tya, tza]
|
|
92
|
+
} = a;
|
|
93
|
+
const {
|
|
94
|
+
data: kdata,
|
|
95
|
+
offset: ok,
|
|
96
|
+
shape: [sxk, syk, szk],
|
|
97
|
+
stride: [txk, tyk, tzk]
|
|
98
|
+
} = k;
|
|
99
|
+
const sxk2 = sxk >> 1;
|
|
100
|
+
const syk2 = syk >> 1;
|
|
101
|
+
const szk2 = szk >> 1;
|
|
102
|
+
const mx = sxa - 1;
|
|
103
|
+
const my = sya - 1;
|
|
104
|
+
const mz = sza - 1;
|
|
105
|
+
let x, xx, y, yy, z, zz, oox, oax, okx, ooy, oay, oky, i, j, l, sum;
|
|
106
|
+
for (x = 0; x < sxa; x++) {
|
|
107
|
+
oox = oo + x * txo;
|
|
108
|
+
for (y = 0; y < sya; y++) {
|
|
109
|
+
ooy = oox + y * tyo;
|
|
110
|
+
for (z = 0; z < sza; z++) {
|
|
111
|
+
for (sum = 0, i = 0; i < sxk; i++) {
|
|
112
|
+
xx = x + i - sxk2;
|
|
113
|
+
if (xx < 0) xx = 0;
|
|
114
|
+
else if (xx > mx) xx = mx;
|
|
115
|
+
oax = oa + xx * txa;
|
|
116
|
+
okx = ok + i * txk;
|
|
117
|
+
for (j = 0; j < syk; j++) {
|
|
118
|
+
yy = y + j - syk2;
|
|
119
|
+
if (yy < 0) yy = 0;
|
|
120
|
+
else if (yy > my) yy = my;
|
|
121
|
+
oay = oax + yy * tya;
|
|
122
|
+
oky = okx + j * tyk;
|
|
123
|
+
for (l = 0; l < szk; l++) {
|
|
124
|
+
zz = z + l - szk2;
|
|
125
|
+
if (zz < 0) zz = 0;
|
|
126
|
+
else if (zz > mz) zz = mz;
|
|
127
|
+
sum += adata[oay + zz * tza] * kdata[oky + l * tzk];
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
odata[ooy + z * tzo] = sum;
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
return out;
|
|
136
|
+
};
|
|
137
|
+
const convolve = top(1, void 0, convolve1, convolve2, convolve3);
|
|
138
|
+
export {
|
|
139
|
+
convolve
|
|
140
|
+
};
|
package/defoprt.d.ts
CHANGED
|
@@ -1,12 +1,12 @@
|
|
|
1
|
-
import type {
|
|
2
|
-
import type { TensorData, TensorOpRT } from "./api.js";
|
|
1
|
+
import type { ITensor, TensorData, TensorOpRT } from "./api.js";
|
|
3
2
|
/**
|
|
4
3
|
* Higher order tensor reduction op factory. Takes given reduction `rfn` and
|
|
5
|
-
* `init` function to produce an initial result
|
|
6
|
-
* applying the given
|
|
4
|
+
* `init` function to produce an initial result and optional `complete` to
|
|
5
|
+
* produce the final result. Returns a {@link TensorOpRT} applying the given
|
|
6
|
+
* function component-wise.
|
|
7
7
|
*
|
|
8
8
|
* @param rfn
|
|
9
9
|
* @param init
|
|
10
10
|
*/
|
|
11
|
-
export declare const defOpRT: <A = number, B = A>(rfn: (acc: B, data: TensorData<A>, i: number) => B, init:
|
|
11
|
+
export declare const defOpRT: <A = number, B = A>(rfn: (acc: B, data: TensorData<A>, i: number) => B, init: () => B, complete?: (acc: B, a: ITensor<A>) => B) => import("./api.js").MultiTensorOpImpl<TensorOpRT<A, B>>;
|
|
12
12
|
//# sourceMappingURL=defoprt.d.ts.map
|
package/defoprt.js
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
|
+
import { identity } from "@thi.ng/api/fn";
|
|
1
2
|
import { top } from "./top.js";
|
|
2
|
-
const defOpRT = (rfn, init) => {
|
|
3
|
+
const defOpRT = (rfn, init, complete = identity) => {
|
|
3
4
|
const f1 = (a) => {
|
|
4
5
|
const {
|
|
5
6
|
data,
|
|
@@ -11,7 +12,7 @@ const defOpRT = (rfn, init) => {
|
|
|
11
12
|
for (let x = 0; x < sx; x++) {
|
|
12
13
|
res = rfn(res, data, offset + x * tx);
|
|
13
14
|
}
|
|
14
|
-
return res;
|
|
15
|
+
return complete(res, a);
|
|
15
16
|
};
|
|
16
17
|
const f2 = (a) => {
|
|
17
18
|
const {
|
|
@@ -28,7 +29,7 @@ const defOpRT = (rfn, init) => {
|
|
|
28
29
|
res = rfn(res, data, ox + y * ty);
|
|
29
30
|
}
|
|
30
31
|
}
|
|
31
|
-
return res;
|
|
32
|
+
return complete(res, a);
|
|
32
33
|
};
|
|
33
34
|
const f3 = (a) => {
|
|
34
35
|
const {
|
|
@@ -48,7 +49,7 @@ const defOpRT = (rfn, init) => {
|
|
|
48
49
|
}
|
|
49
50
|
}
|
|
50
51
|
}
|
|
51
|
-
return res;
|
|
52
|
+
return complete(res, a);
|
|
52
53
|
};
|
|
53
54
|
const f4 = (a) => {
|
|
54
55
|
const {
|
|
@@ -71,7 +72,7 @@ const defOpRT = (rfn, init) => {
|
|
|
71
72
|
}
|
|
72
73
|
}
|
|
73
74
|
}
|
|
74
|
-
return res;
|
|
75
|
+
return complete(res, a);
|
|
75
76
|
};
|
|
76
77
|
return top(
|
|
77
78
|
0,
|
package/defoprtt.d.ts
CHANGED
|
@@ -1,14 +1,15 @@
|
|
|
1
|
-
import type {
|
|
2
|
-
import type { TensorData, TensorOpRTT } from "./api.js";
|
|
1
|
+
import type { ITensor, TensorData, TensorOpRTT } from "./api.js";
|
|
3
2
|
/**
|
|
4
3
|
* Higher order tensor reduction op factory. Takes given reduction `rfn` and
|
|
5
|
-
* `init` function to produce an initial result
|
|
6
|
-
*
|
|
7
|
-
*
|
|
4
|
+
* `init` function to produce an initial result and optional `complete` to
|
|
5
|
+
* produce the final result. Returns a {@link TensorOpRTT} applying the given
|
|
6
|
+
* function componentwise, by default with broadcasting rules (see
|
|
7
|
+
* {@link broadcast} for details).
|
|
8
8
|
*
|
|
9
9
|
* @param rfn
|
|
10
10
|
* @param init
|
|
11
|
+
* @param complete
|
|
11
12
|
* @param useBroadcast
|
|
12
13
|
*/
|
|
13
|
-
export declare const defOpRTT: <A = number, B = A>(rfn: (acc: B, adata: TensorData<A>, bdata: TensorData<A>, ia: number, ib: number) => B, init:
|
|
14
|
+
export declare const defOpRTT: <A = number, B = A>(rfn: (acc: B, adata: TensorData<A>, bdata: TensorData<A>, ia: number, ib: number) => B, init: () => B, complete?: (acc: B, a: ITensor<A>, b: ITensor<A>) => B, useBroadcast?: boolean) => TensorOpRTT<A, B>;
|
|
14
15
|
//# sourceMappingURL=defoprtt.d.ts.map
|
package/defoprtt.js
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
|
+
import { identity } from "@thi.ng/api/fn";
|
|
1
2
|
import { broadcast } from "./broadcast.js";
|
|
2
|
-
const defOpRTT = (rfn, init, useBroadcast = true) => {
|
|
3
|
+
const defOpRTT = (rfn, init, complete = identity, useBroadcast = true) => {
|
|
3
4
|
const f1 = (a, b) => {
|
|
4
5
|
const {
|
|
5
6
|
data: adata,
|
|
@@ -16,7 +17,7 @@ const defOpRTT = (rfn, init, useBroadcast = true) => {
|
|
|
16
17
|
for (let x = 0; x < sx; x++) {
|
|
17
18
|
res = rfn(res, adata, bdata, oa + x * txa, ob + x * txb);
|
|
18
19
|
}
|
|
19
|
-
return res;
|
|
20
|
+
return complete(res, a, b);
|
|
20
21
|
};
|
|
21
22
|
const f2 = (a, b) => {
|
|
22
23
|
const {
|
|
@@ -39,7 +40,7 @@ const defOpRTT = (rfn, init, useBroadcast = true) => {
|
|
|
39
40
|
res = rfn(res, adata, bdata, oax + y * tya, obx + y * tyb);
|
|
40
41
|
}
|
|
41
42
|
}
|
|
42
|
-
return res;
|
|
43
|
+
return complete(res, a, b);
|
|
43
44
|
};
|
|
44
45
|
const f3 = (a, b) => {
|
|
45
46
|
const {
|
|
@@ -66,7 +67,7 @@ const defOpRTT = (rfn, init, useBroadcast = true) => {
|
|
|
66
67
|
}
|
|
67
68
|
}
|
|
68
69
|
}
|
|
69
|
-
return res;
|
|
70
|
+
return complete(res, a, b);
|
|
70
71
|
};
|
|
71
72
|
const f4 = (a, b) => {
|
|
72
73
|
const {
|
|
@@ -103,7 +104,7 @@ const defOpRTT = (rfn, init, useBroadcast = true) => {
|
|
|
103
104
|
}
|
|
104
105
|
}
|
|
105
106
|
}
|
|
106
|
-
return res;
|
|
107
|
+
return complete(res, a, b);
|
|
107
108
|
};
|
|
108
109
|
const impls = [, f1, f2, f3, f4];
|
|
109
110
|
const wrapper = useBroadcast ? (a, b) => {
|
package/errors.d.ts
CHANGED
|
@@ -9,7 +9,7 @@ export declare const IllegalShapeError: {
|
|
|
9
9
|
cause?: unknown;
|
|
10
10
|
};
|
|
11
11
|
captureStackTrace(targetObject: object, constructorOpt?: Function): void;
|
|
12
|
-
prepareStackTrace
|
|
12
|
+
prepareStackTrace(err: Error, stackTraces: NodeJS.CallSite[]): any;
|
|
13
13
|
stackTraceLimit: number;
|
|
14
14
|
};
|
|
15
15
|
export declare const illegalShape: (shape: NumericArray) => never;
|
package/index.d.ts
CHANGED
|
@@ -2,9 +2,12 @@ export * from "./abs.js";
|
|
|
2
2
|
export * from "./add.js";
|
|
3
3
|
export * from "./addn.js";
|
|
4
4
|
export * from "./api.js";
|
|
5
|
+
export * from "./apply-kernel.js";
|
|
5
6
|
export * from "./broadcast.js";
|
|
6
7
|
export * from "./clamp.js";
|
|
7
8
|
export * from "./clampn.js";
|
|
9
|
+
export * from "./convert.js";
|
|
10
|
+
export * from "./convolve.js";
|
|
8
11
|
export * from "./cos.js";
|
|
9
12
|
export * from "./defopn.js";
|
|
10
13
|
export * from "./defoprt.js";
|
|
@@ -24,12 +27,14 @@ export * from "./exp2.js";
|
|
|
24
27
|
export * from "./filtered-indices.js";
|
|
25
28
|
export * from "./format.js";
|
|
26
29
|
export * from "./identity.js";
|
|
30
|
+
export * from "./kernels.js";
|
|
27
31
|
export * from "./log.js";
|
|
28
32
|
export * from "./log2.js";
|
|
29
33
|
export * from "./mag.js";
|
|
30
34
|
export * from "./magsq.js";
|
|
31
35
|
export * from "./max.js";
|
|
32
36
|
export * from "./maxn.js";
|
|
37
|
+
export * from "./mean.js";
|
|
33
38
|
export * from "./min.js";
|
|
34
39
|
export * from "./minn.js";
|
|
35
40
|
export * from "./mul.js";
|
package/index.js
CHANGED
|
@@ -2,9 +2,12 @@ export * from "./abs.js";
|
|
|
2
2
|
export * from "./add.js";
|
|
3
3
|
export * from "./addn.js";
|
|
4
4
|
export * from "./api.js";
|
|
5
|
+
export * from "./apply-kernel.js";
|
|
5
6
|
export * from "./broadcast.js";
|
|
6
7
|
export * from "./clamp.js";
|
|
7
8
|
export * from "./clampn.js";
|
|
9
|
+
export * from "./convert.js";
|
|
10
|
+
export * from "./convolve.js";
|
|
8
11
|
export * from "./cos.js";
|
|
9
12
|
export * from "./defopn.js";
|
|
10
13
|
export * from "./defoprt.js";
|
|
@@ -24,12 +27,14 @@ export * from "./exp2.js";
|
|
|
24
27
|
export * from "./filtered-indices.js";
|
|
25
28
|
export * from "./format.js";
|
|
26
29
|
export * from "./identity.js";
|
|
30
|
+
export * from "./kernels.js";
|
|
27
31
|
export * from "./log.js";
|
|
28
32
|
export * from "./log2.js";
|
|
29
33
|
export * from "./mag.js";
|
|
30
34
|
export * from "./magsq.js";
|
|
31
35
|
export * from "./max.js";
|
|
32
36
|
export * from "./maxn.js";
|
|
37
|
+
export * from "./mean.js";
|
|
33
38
|
export * from "./min.js";
|
|
34
39
|
export * from "./minn.js";
|
|
35
40
|
export * from "./mul.js";
|
package/kernels.d.ts
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import type { KernelSpec } from "./api.js";
|
|
2
|
+
/**
|
|
3
|
+
* 1D Sobel convolution kernel.
|
|
4
|
+
*/
|
|
5
|
+
export declare const SOBEL1: import("./tensor.js").Tensor1<number>;
|
|
6
|
+
/**
|
|
7
|
+
* 2D Sobel convolution kernel (along outer axis).
|
|
8
|
+
*
|
|
9
|
+
* @remarks
|
|
10
|
+
* Use `SOBEL2.transpose([1, 0])` for inner axis.
|
|
11
|
+
*/
|
|
12
|
+
export declare const SOBEL2: import("./tensor.js").Tensor2<number>;
|
|
13
|
+
/**
|
|
14
|
+
* 3D Sobel convolution kernel (along outer axis).
|
|
15
|
+
*
|
|
16
|
+
* @remarks
|
|
17
|
+
* Use `SOBEL3.transpose()` for other axes.
|
|
18
|
+
*/
|
|
19
|
+
export declare const SOBEL3: import("./tensor.js").Tensor3<number>;
|
|
20
|
+
/**
|
|
21
|
+
* 2D edge detection convolution kernel factory for given integer radius `r`.
|
|
22
|
+
* Returns 2D tensor of size `2*r+1`.
|
|
23
|
+
*
|
|
24
|
+
* @param r
|
|
25
|
+
*/
|
|
26
|
+
export declare const EDGE2: (r: number) => import("./tensor.js").Tensor2<number>;
|
|
27
|
+
/**
|
|
28
|
+
* 2D sharpen 3x3 kernel preset.
|
|
29
|
+
*/
|
|
30
|
+
export declare const SHARPEN2_3x3: import("./tensor.js").Tensor2<number>;
|
|
31
|
+
/**
|
|
32
|
+
* 2D box blur convolution kernel factory for given integer radius `r`. Returns
|
|
33
|
+
* 2D tensor of size `2*r+1`.
|
|
34
|
+
*
|
|
35
|
+
* @param r
|
|
36
|
+
*/
|
|
37
|
+
export declare const BOX_BLUR2: (r: number) => import("./tensor.js").Tensor2<number>;
|
|
38
|
+
/**
|
|
39
|
+
* 2D Gaussian blur kernel factory for given integer radius `r`. Returns 2D
|
|
40
|
+
* tensor of size `2r+1`.
|
|
41
|
+
*
|
|
42
|
+
* @param r -
|
|
43
|
+
*/
|
|
44
|
+
export declare const GAUSSIAN_BLUR2: (r: number) => import("./tensor.js").Tensor2<number>;
|
|
45
|
+
/**
|
|
46
|
+
* Max. pool kernel factory for given window size and use with
|
|
47
|
+
* {@link applyKernel}. The kernel produces the maximum value in its window.
|
|
48
|
+
*
|
|
49
|
+
* @param w
|
|
50
|
+
* @param h
|
|
51
|
+
*/
|
|
52
|
+
export declare const MAX2_POOL: (w: number, h?: number) => KernelSpec<number>;
|
|
53
|
+
/**
|
|
54
|
+
* Min. pool kernel factory for given window size and use with
|
|
55
|
+
* {@link applyKernel}. The kernel produces the minimum value in its window.
|
|
56
|
+
*
|
|
57
|
+
* @param w
|
|
58
|
+
* @param h
|
|
59
|
+
*/
|
|
60
|
+
export declare const MIN2_POOL: (w: number, h?: number) => KernelSpec<number>;
|
|
61
|
+
/**
|
|
62
|
+
* Kernel factory for given integer radius `r` and use with {@link applyKernel}.
|
|
63
|
+
* The kernel marks local maxima within its window and produces only 0 or 1
|
|
64
|
+
* result values (where 1 is used to mark the maxima).
|
|
65
|
+
*
|
|
66
|
+
* @param r
|
|
67
|
+
*/
|
|
68
|
+
export declare const MAXIMA2: (r: number) => KernelSpec<[number, number]>;
|
|
69
|
+
/**
|
|
70
|
+
* Kernel factory for given integer radius `r` and use with {@link applyKernel}.
|
|
71
|
+
* The kernel marks local minima within its window and produces only 0 or 1
|
|
72
|
+
* result values (where 1 is used to mark the minima).
|
|
73
|
+
*
|
|
74
|
+
* @param r
|
|
75
|
+
*/
|
|
76
|
+
export declare const MINIMA2: (r: number) => KernelSpec<[number, number]>;
|
|
77
|
+
//# sourceMappingURL=kernels.d.ts.map
|
package/kernels.js
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
import { identity } from "@thi.ng/api/fn";
|
|
2
|
+
import { constant, tensor } from "./tensor.js";
|
|
3
|
+
const SOBEL1 = tensor([-1, 0, 1]);
|
|
4
|
+
const SOBEL2 = tensor([
|
|
5
|
+
[-1, -2, -1],
|
|
6
|
+
[0, 0, 0],
|
|
7
|
+
[1, 2, 1]
|
|
8
|
+
]);
|
|
9
|
+
const SOBEL3 = tensor([
|
|
10
|
+
[
|
|
11
|
+
[-1, -2, -1],
|
|
12
|
+
[-2, -4, -2],
|
|
13
|
+
[-1, -2, -1]
|
|
14
|
+
],
|
|
15
|
+
[
|
|
16
|
+
[0, 0, 0],
|
|
17
|
+
[0, 0, 0],
|
|
18
|
+
[0, 0, 0]
|
|
19
|
+
],
|
|
20
|
+
[
|
|
21
|
+
[1, 2, 1],
|
|
22
|
+
[2, 4, 2],
|
|
23
|
+
[1, 2, 1]
|
|
24
|
+
]
|
|
25
|
+
]);
|
|
26
|
+
const EDGE2 = (r) => {
|
|
27
|
+
r |= 0;
|
|
28
|
+
const w = 2 * r + 1;
|
|
29
|
+
const data = new Array(w * w).fill(-1);
|
|
30
|
+
data[data.length >> 1] = w * w - 1;
|
|
31
|
+
return tensor("num", [w, w], { data, copy: false });
|
|
32
|
+
};
|
|
33
|
+
const SHARPEN2_3x3 = tensor([
|
|
34
|
+
[0, -1, 0],
|
|
35
|
+
[-1, 5, -1],
|
|
36
|
+
[0, -1, 0]
|
|
37
|
+
]);
|
|
38
|
+
const BOX_BLUR2 = (r) => {
|
|
39
|
+
r = 2 * r + 1;
|
|
40
|
+
return constant([r, r], 1 / (r * r), "num");
|
|
41
|
+
};
|
|
42
|
+
const GAUSSIAN_BLUR2 = (r) => {
|
|
43
|
+
r |= 0;
|
|
44
|
+
const sigma = -1 / (2 * (Math.SQRT2 * r / 3) ** 2);
|
|
45
|
+
const res = [];
|
|
46
|
+
let sum = 0;
|
|
47
|
+
for (let y = -r; y <= r; y++) {
|
|
48
|
+
for (let x = -r; x <= r; x++) {
|
|
49
|
+
const g = Math.exp((x * x + y * y) * sigma);
|
|
50
|
+
res.push(g);
|
|
51
|
+
sum += g;
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
const size = r * 2 + 1;
|
|
55
|
+
return tensor("num", [size, size], {
|
|
56
|
+
data: res.map((x) => x / sum),
|
|
57
|
+
copy: false
|
|
58
|
+
});
|
|
59
|
+
};
|
|
60
|
+
const { max, min } = Math;
|
|
61
|
+
const MAX2_POOL = (w, h = w) => {
|
|
62
|
+
return {
|
|
63
|
+
shape: [w, h],
|
|
64
|
+
init: () => -Infinity,
|
|
65
|
+
reduce: (acc, val) => max(acc, val),
|
|
66
|
+
complete: identity
|
|
67
|
+
};
|
|
68
|
+
};
|
|
69
|
+
const MIN2_POOL = (w, h = w) => {
|
|
70
|
+
return {
|
|
71
|
+
shape: [w, h],
|
|
72
|
+
init: () => Infinity,
|
|
73
|
+
reduce: (acc, val) => min(acc, val),
|
|
74
|
+
complete: identity
|
|
75
|
+
};
|
|
76
|
+
};
|
|
77
|
+
const MAXIMA2 = (r) => {
|
|
78
|
+
r |= 0;
|
|
79
|
+
const w = 2 * r + 1;
|
|
80
|
+
return {
|
|
81
|
+
shape: [w, w],
|
|
82
|
+
init: () => [-Infinity, 0],
|
|
83
|
+
reduce: (acc, val, i, j) => {
|
|
84
|
+
if (i === r && j === r) acc[1] = val;
|
|
85
|
+
else acc[0] = max(acc[0], val);
|
|
86
|
+
return acc;
|
|
87
|
+
},
|
|
88
|
+
complete: (acc) => acc[1] > acc[0] ? 1 : 0
|
|
89
|
+
};
|
|
90
|
+
};
|
|
91
|
+
const MINIMA2 = (r) => {
|
|
92
|
+
r |= 0;
|
|
93
|
+
const w = 2 * r + 1;
|
|
94
|
+
return {
|
|
95
|
+
shape: [w, w],
|
|
96
|
+
init: () => [Infinity, 0],
|
|
97
|
+
reduce: (acc, val, i, j) => {
|
|
98
|
+
if (i === r && j === r) acc[1] = val;
|
|
99
|
+
else acc[0] = min(acc[0], val);
|
|
100
|
+
return acc;
|
|
101
|
+
},
|
|
102
|
+
complete: (acc) => acc[1] < acc[0] ? 1 : 0
|
|
103
|
+
};
|
|
104
|
+
};
|
|
105
|
+
export {
|
|
106
|
+
BOX_BLUR2,
|
|
107
|
+
EDGE2,
|
|
108
|
+
GAUSSIAN_BLUR2,
|
|
109
|
+
MAX2_POOL,
|
|
110
|
+
MAXIMA2,
|
|
111
|
+
MIN2_POOL,
|
|
112
|
+
MINIMA2,
|
|
113
|
+
SHARPEN2_3x3,
|
|
114
|
+
SOBEL1,
|
|
115
|
+
SOBEL2,
|
|
116
|
+
SOBEL3
|
|
117
|
+
};
|
package/mean.d.ts
ADDED
package/mean.js
ADDED
package/normalize.d.ts
CHANGED
|
@@ -7,5 +7,5 @@ import type { ITensor } from "./api.js";
|
|
|
7
7
|
* @param a
|
|
8
8
|
* @param n
|
|
9
9
|
*/
|
|
10
|
-
export declare const normalize: (out: ITensor | null, a: ITensor, n?: number) => import("./tensor.js").Tensor1<any
|
|
10
|
+
export declare const normalize: (out: ITensor | null, a: ITensor, n?: number) => ITensor<number> | import("./tensor.js").Tensor1<any>;
|
|
11
11
|
//# sourceMappingURL=normalize.d.ts.map
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@thi.ng/tensors",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.6.0",
|
|
4
4
|
"description": "1D/2D/3D/4D tensors with extensible polymorphic operations and customizable storage",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"module": "./index.js",
|
|
@@ -39,19 +39,19 @@
|
|
|
39
39
|
"tool:tangle": "../../node_modules/.bin/tangle src/**/*.ts"
|
|
40
40
|
},
|
|
41
41
|
"dependencies": {
|
|
42
|
-
"@thi.ng/api": "^8.11.
|
|
43
|
-
"@thi.ng/arrays": "^2.11.
|
|
44
|
-
"@thi.ng/checks": "^3.7.
|
|
45
|
-
"@thi.ng/equiv": "^2.1.
|
|
46
|
-
"@thi.ng/errors": "^2.5.
|
|
47
|
-
"@thi.ng/math": "^5.11.
|
|
48
|
-
"@thi.ng/random": "^4.1.
|
|
49
|
-
"@thi.ng/strings": "^3.9.
|
|
50
|
-
"@thi.ng/vectors": "^8.2.
|
|
42
|
+
"@thi.ng/api": "^8.11.28",
|
|
43
|
+
"@thi.ng/arrays": "^2.11.1",
|
|
44
|
+
"@thi.ng/checks": "^3.7.8",
|
|
45
|
+
"@thi.ng/equiv": "^2.1.84",
|
|
46
|
+
"@thi.ng/errors": "^2.5.34",
|
|
47
|
+
"@thi.ng/math": "^5.11.28",
|
|
48
|
+
"@thi.ng/random": "^4.1.19",
|
|
49
|
+
"@thi.ng/strings": "^3.9.13",
|
|
50
|
+
"@thi.ng/vectors": "^8.2.1"
|
|
51
51
|
},
|
|
52
52
|
"devDependencies": {
|
|
53
|
-
"esbuild": "^0.25.
|
|
54
|
-
"typedoc": "^0.28.
|
|
53
|
+
"esbuild": "^0.25.5",
|
|
54
|
+
"typedoc": "^0.28.5",
|
|
55
55
|
"typescript": "^5.8.3"
|
|
56
56
|
},
|
|
57
57
|
"keywords": [
|
|
@@ -62,19 +62,28 @@
|
|
|
62
62
|
"activation",
|
|
63
63
|
"algebra",
|
|
64
64
|
"array",
|
|
65
|
+
"blur",
|
|
65
66
|
"broadcast",
|
|
67
|
+
"convolution",
|
|
66
68
|
"data-oriented",
|
|
67
69
|
"datastructure",
|
|
68
70
|
"decomposition",
|
|
71
|
+
"detect",
|
|
69
72
|
"diagonal",
|
|
70
73
|
"dotproduct",
|
|
74
|
+
"edge",
|
|
71
75
|
"equality",
|
|
76
|
+
"filter",
|
|
77
|
+
"gaussian",
|
|
78
|
+
"gradient",
|
|
72
79
|
"interval",
|
|
73
80
|
"math",
|
|
74
81
|
"matrix",
|
|
75
82
|
"memory-mapped",
|
|
76
83
|
"nd",
|
|
77
84
|
"polymorphic",
|
|
85
|
+
"pool",
|
|
86
|
+
"presets",
|
|
78
87
|
"random",
|
|
79
88
|
"step",
|
|
80
89
|
"svd",
|
|
@@ -113,6 +122,9 @@
|
|
|
113
122
|
"./api": {
|
|
114
123
|
"default": "./api.js"
|
|
115
124
|
},
|
|
125
|
+
"./apply-kernel": {
|
|
126
|
+
"default": "./apply-kernel.js"
|
|
127
|
+
},
|
|
116
128
|
"./broadcast": {
|
|
117
129
|
"default": "./broadcast.js"
|
|
118
130
|
},
|
|
@@ -122,6 +134,12 @@
|
|
|
122
134
|
"./clampn": {
|
|
123
135
|
"default": "./clampn.js"
|
|
124
136
|
},
|
|
137
|
+
"./convert": {
|
|
138
|
+
"default": "./convert.js"
|
|
139
|
+
},
|
|
140
|
+
"./convolve": {
|
|
141
|
+
"default": "./convolve.js"
|
|
142
|
+
},
|
|
125
143
|
"./cos": {
|
|
126
144
|
"default": "./cos.js"
|
|
127
145
|
},
|
|
@@ -179,6 +197,9 @@
|
|
|
179
197
|
"./identity": {
|
|
180
198
|
"default": "./identity.js"
|
|
181
199
|
},
|
|
200
|
+
"./kernels": {
|
|
201
|
+
"default": "./kernels.js"
|
|
202
|
+
},
|
|
182
203
|
"./log": {
|
|
183
204
|
"default": "./log.js"
|
|
184
205
|
},
|
|
@@ -197,6 +218,9 @@
|
|
|
197
218
|
"./maxn": {
|
|
198
219
|
"default": "./maxn.js"
|
|
199
220
|
},
|
|
221
|
+
"./mean": {
|
|
222
|
+
"default": "./mean.js"
|
|
223
|
+
},
|
|
200
224
|
"./min": {
|
|
201
225
|
"default": "./min.js"
|
|
202
226
|
},
|
|
@@ -301,5 +325,5 @@
|
|
|
301
325
|
"status": "alpha",
|
|
302
326
|
"year": 2018
|
|
303
327
|
},
|
|
304
|
-
"gitHead": "
|
|
328
|
+
"gitHead": "5a1224bbd501c0dad203fa231749f21740350db7\n"
|
|
305
329
|
}
|
package/swap.js
CHANGED
package/tensor.d.ts
CHANGED
|
@@ -131,6 +131,7 @@ export declare function tensorFromArray<T extends NumType, N extends Nested<numb
|
|
|
131
131
|
export declare function tensorFromArray<N extends Nested<string>>(data: N, opts?: TensorFromArrayOpts<"str", string>): NestedTensor<N, string>;
|
|
132
132
|
export declare const zeroes: <S extends Shape>(shape: S, type?: NumType, storage?: ITensorStorage<number>) => ShapeTensor<S, number>;
|
|
133
133
|
export declare const ones: <S extends Shape>(shape: S, type?: NumType, storage?: ITensorStorage<number>) => ShapeTensor<S, number>;
|
|
134
|
+
export declare const constant: <T extends Type, S extends Shape>(shape: S, value: TypeMap[T], type: T, storage?: ITensorStorage<TypeMap[T]>) => ShapeTensor<S, TypeMap[T]>;
|
|
134
135
|
export declare const shapeToStride: (shape: number[]) => any[];
|
|
135
136
|
export declare const strideOrder: (strides: number[]) => number[];
|
|
136
137
|
//# sourceMappingURL=tensor.d.ts.map
|
package/tensor.js
CHANGED
|
@@ -481,9 +481,10 @@ function tensorFromArray(data, opts) {
|
|
|
481
481
|
});
|
|
482
482
|
}
|
|
483
483
|
const zeroes = (shape, type = "num", storage) => tensor(type, shape, { storage });
|
|
484
|
-
const ones = (shape, type = "num", storage) =>
|
|
484
|
+
const ones = (shape, type = "num", storage) => constant(shape, 1, type, storage);
|
|
485
|
+
const constant = (shape, value, type, storage) => {
|
|
485
486
|
const res = tensor(type, shape, { storage });
|
|
486
|
-
res.data.fill(
|
|
487
|
+
res.data.fill(value);
|
|
487
488
|
return res;
|
|
488
489
|
};
|
|
489
490
|
const shapeToStride = (shape) => {
|
|
@@ -551,6 +552,7 @@ export {
|
|
|
551
552
|
Tensor2,
|
|
552
553
|
Tensor3,
|
|
553
554
|
Tensor4,
|
|
555
|
+
constant,
|
|
554
556
|
ones,
|
|
555
557
|
shapeToStride,
|
|
556
558
|
strideOrder,
|