@datagrok/eda 1.4.4 → 1.4.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +11 -0
- package/dist/111.js +2 -0
- package/dist/111.js.map +1 -0
- package/dist/128.js +2 -0
- package/dist/128.js.map +1 -0
- package/dist/153.js +2 -0
- package/dist/153.js.map +1 -0
- package/dist/23.js +2 -0
- package/dist/23.js.map +1 -0
- package/dist/234.js +2 -0
- package/dist/234.js.map +1 -0
- package/dist/242.js +2 -0
- package/dist/242.js.map +1 -0
- package/dist/260.js +2 -0
- package/dist/260.js.map +1 -0
- package/dist/33.js +2 -0
- package/dist/33.js.map +1 -0
- package/dist/348.js +2 -0
- package/dist/348.js.map +1 -0
- package/dist/377.js +2 -0
- package/dist/377.js.map +1 -0
- package/dist/412.js +2 -0
- package/dist/412.js.map +1 -0
- package/dist/415.js +2 -0
- package/dist/415.js.map +1 -0
- package/dist/501.js +2 -0
- package/dist/501.js.map +1 -0
- package/dist/531.js +2 -0
- package/dist/531.js.map +1 -0
- package/dist/583.js +2 -0
- package/dist/583.js.map +1 -0
- package/dist/589.js +2 -0
- package/dist/589.js.map +1 -0
- package/dist/603.js +2 -0
- package/dist/603.js.map +1 -0
- package/dist/656.js +2 -0
- package/dist/656.js.map +1 -0
- package/dist/682.js +2 -0
- package/dist/682.js.map +1 -0
- package/dist/705.js +2 -0
- package/dist/705.js.map +1 -0
- package/dist/727.js +2 -0
- package/dist/727.js.map +1 -0
- package/dist/731.js +2 -0
- package/dist/731.js.map +1 -0
- package/dist/738.js +3 -0
- package/dist/738.js.LICENSE.txt +51 -0
- package/dist/738.js.map +1 -0
- package/dist/763.js +2 -0
- package/dist/763.js.map +1 -0
- package/dist/778.js +2 -0
- package/dist/778.js.map +1 -0
- package/dist/783.js +2 -0
- package/dist/783.js.map +1 -0
- package/dist/793.js +2 -0
- package/dist/793.js.map +1 -0
- package/dist/801.js +2 -0
- package/dist/801.js.map +1 -0
- package/dist/810.js +2 -0
- package/dist/810.js.map +1 -0
- package/dist/860.js +2 -0
- package/dist/860.js.map +1 -0
- package/dist/907.js +2 -0
- package/dist/907.js.map +1 -0
- package/dist/950.js +2 -0
- package/dist/950.js.map +1 -0
- package/dist/980.js +2 -0
- package/dist/980.js.map +1 -0
- package/dist/990.js +2 -0
- package/dist/990.js.map +1 -0
- package/dist/package-test.js +1 -26140
- package/dist/package-test.js.map +1 -1
- package/dist/package.js +1 -30337
- package/dist/package.js.map +1 -1
- package/package.json +2 -2
- package/src/anova/anova-ui.ts +39 -24
- package/src/package-api.ts +4 -0
- package/src/package.g.ts +33 -32
- package/src/package.ts +2 -2
- package/test-console-output-1.log +72 -93
- package/test-record-1.mp4 +0 -0
- package/webpack.config.js +1 -1
- package/dist/_d4c0.js +0 -279
- package/dist/_d4c0.js.map +0 -1
- package/dist/node_modules_datagrok-libraries_math_src_dbscan_wasm_clustering-worker_js.js +0 -279
- package/dist/node_modules_datagrok-libraries_math_src_dbscan_wasm_clustering-worker_js.js.map +0 -1
- package/dist/node_modules_datagrok-libraries_ml_src_MCL_mcl-sparse-matrix-mult-worker_js.js +0 -59
- package/dist/node_modules_datagrok-libraries_ml_src_MCL_mcl-sparse-matrix-mult-worker_js.js.map +0 -1
- package/dist/node_modules_datagrok-libraries_ml_src_distance-matrix_distance-matrix-worker_js.js +0 -284
- package/dist/node_modules_datagrok-libraries_ml_src_distance-matrix_distance-matrix-worker_js.js.map +0 -1
- package/dist/node_modules_datagrok-libraries_ml_src_distance-matrix_single-value-knn-worker_js.js +0 -265
- package/dist/node_modules_datagrok-libraries_ml_src_distance-matrix_single-value-knn-worker_js.js.map +0 -1
- package/dist/node_modules_datagrok-libraries_ml_src_distance-matrix_sparse-matrix-worker_js.js +0 -287
- package/dist/node_modules_datagrok-libraries_ml_src_distance-matrix_sparse-matrix-worker_js.js.map +0 -1
- package/dist/src_workers_softmax-worker_ts.js +0 -154
- package/dist/src_workers_softmax-worker_ts.js.map +0 -1
- package/dist/src_workers_tsne-worker_ts.js +0 -244
- package/dist/src_workers_tsne-worker_ts.js.map +0 -1
- package/dist/src_workers_umap-worker_ts.js +0 -252
- package/dist/src_workers_umap-worker_ts.js.map +0 -1
- package/dist/vendors-node_modules_datagrok-libraries_math_src_dbscan_wasm_dbscan_js.js +0 -1253
- package/dist/vendors-node_modules_datagrok-libraries_math_src_dbscan_wasm_dbscan_js.js.map +0 -1
- package/dist/vendors-node_modules_datagrok-libraries_math_src_hierarchical-clustering_wasm_clustering-worker_js.js +0 -942
- package/dist/vendors-node_modules_datagrok-libraries_math_src_hierarchical-clustering_wasm_clustering-worker_js.js.map +0 -1
- package/dist/vendors-node_modules_datagrok-libraries_math_src_webGPU_sparse-matrix_webGPU-sparse-matrix_js-07693f.js +0 -1525
- package/dist/vendors-node_modules_datagrok-libraries_math_src_webGPU_sparse-matrix_webGPU-sparse-matrix_js-07693f.js.map +0 -1
- package/dist/vendors-node_modules_datagrok-libraries_ml_src_MCL_mcl-worker_js-node_modules_datagrok-librar-e4203d.js +0 -2244
- package/dist/vendors-node_modules_datagrok-libraries_ml_src_MCL_mcl-worker_js-node_modules_datagrok-librar-e4203d.js.map +0 -1
- package/dist/vendors-node_modules_datagrok-libraries_ml_src_distance-matrix_knn-threshold-worker_js.js +0 -286
- package/dist/vendors-node_modules_datagrok-libraries_ml_src_distance-matrix_knn-threshold-worker_js.js.map +0 -1
- package/dist/vendors-node_modules_datagrok-libraries_ml_src_distance-matrix_knn-worker_js.js +0 -280
- package/dist/vendors-node_modules_datagrok-libraries_ml_src_distance-matrix_knn-worker_js.js.map +0 -1
- package/dist/vendors-node_modules_datagrok-libraries_ml_src_distance-matrix_sparse-matrix-threshold-worker_js.js +0 -282
- package/dist/vendors-node_modules_datagrok-libraries_ml_src_distance-matrix_sparse-matrix-threshold-worker_js.js.map +0 -1
- package/dist/vendors-node_modules_datagrok-libraries_ml_src_distance-matrix_utils_js-node_modules_datagrok-72c7b2.js +0 -1821
- package/dist/vendors-node_modules_datagrok-libraries_ml_src_distance-matrix_utils_js-node_modules_datagrok-72c7b2.js.map +0 -1
- package/dist/vendors-node_modules_datagrok-libraries_ml_src_multi-column-dimensionality-reduction_mulit-co-3800a0.js +0 -7776
- package/dist/vendors-node_modules_datagrok-libraries_ml_src_multi-column-dimensionality-reduction_mulit-co-3800a0.js.map +0 -1
- package/dist/vendors-node_modules_keckelt_tsne_lib_index_js.js +0 -379
- package/dist/vendors-node_modules_keckelt_tsne_lib_index_js.js.map +0 -1
- package/dist/vendors-node_modules_ml-matrix_matrix_mjs.js +0 -5946
- package/dist/vendors-node_modules_ml-matrix_matrix_mjs.js.map +0 -1
- package/dist/vendors-node_modules_umap-js_dist_index_js.js +0 -2284
- package/dist/vendors-node_modules_umap-js_dist_index_js.js.map +0 -1
- package/dist/wasm_EDAForWebWorker_js-wasm_callWasmForWebWorker_js.js +0 -779
- package/dist/wasm_EDAForWebWorker_js-wasm_callWasmForWebWorker_js.js.map +0 -1
- package/dist/wasm_workers_errorWorker_js.js +0 -267
- package/dist/wasm_workers_errorWorker_js.js.map +0 -1
- package/dist/wasm_workers_fitLinearRegressionParamsWithDataNormalizingWorker_js.js +0 -267
- package/dist/wasm_workers_fitLinearRegressionParamsWithDataNormalizingWorker_js.js.map +0 -1
- package/dist/wasm_workers_fitLinearRegressionParamsWorker_js.js +0 -267
- package/dist/wasm_workers_fitLinearRegressionParamsWorker_js.js.map +0 -1
- package/dist/wasm_workers_fitSoftmaxWorker_js.js +0 -267
- package/dist/wasm_workers_fitSoftmaxWorker_js.js.map +0 -1
- package/dist/wasm_workers_generateDatasetWorker_js.js +0 -267
- package/dist/wasm_workers_generateDatasetWorker_js.js.map +0 -1
- package/dist/wasm_workers_normalizeDatasetWorker_js.js +0 -267
- package/dist/wasm_workers_normalizeDatasetWorker_js.js.map +0 -1
- package/dist/wasm_workers_partialLeastSquareRegressionWorker_js.js +0 -267
- package/dist/wasm_workers_partialLeastSquareRegressionWorker_js.js.map +0 -1
- package/dist/wasm_workers_predictByLSSVMWorker_js.js +0 -267
- package/dist/wasm_workers_predictByLSSVMWorker_js.js.map +0 -1
- package/dist/wasm_workers_principalComponentAnalysisNipalsWorker_js.js +0 -267
- package/dist/wasm_workers_principalComponentAnalysisNipalsWorker_js.js.map +0 -1
- package/dist/wasm_workers_principalComponentAnalysisWorkerUpd_js.js +0 -271
- package/dist/wasm_workers_principalComponentAnalysisWorkerUpd_js.js.map +0 -1
- package/dist/wasm_workers_trainAndAnalyzeLSSVMWorker_js.js +0 -267
- package/dist/wasm_workers_trainAndAnalyzeLSSVMWorker_js.js.map +0 -1
- package/dist/wasm_workers_trainLSSVMWorker_js.js +0 -267
- package/dist/wasm_workers_trainLSSVMWorker_js.js.map +0 -1
- package/dist/wasm_workers_xgboostWorker_js.js +0 -279
- package/dist/wasm_workers_xgboostWorker_js.js.map +0 -1
|
@@ -1,379 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
(self["webpackChunkeda"] = self["webpackChunkeda"] || []).push([["vendors-node_modules_keckelt_tsne_lib_index_js"],{
|
|
3
|
-
|
|
4
|
-
/***/ "./node_modules/@keckelt/tsne/lib/index.js":
|
|
5
|
-
/*!*************************************************!*\
|
|
6
|
-
!*** ./node_modules/@keckelt/tsne/lib/index.js ***!
|
|
7
|
-
\*************************************************/
|
|
8
|
-
/***/ ((__unused_webpack_module, exports, __webpack_require__) => {
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
Object.defineProperty(exports, "__esModule", ({ value: true }));
|
|
12
|
-
exports.TSNE = void 0;
|
|
13
|
-
var tsne_1 = __webpack_require__(/*! ./tsne */ "./node_modules/@keckelt/tsne/lib/tsne.js");
|
|
14
|
-
Object.defineProperty(exports, "TSNE", ({ enumerable: true, get: function () { return tsne_1.TSNE; } }));
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
/***/ }),
|
|
18
|
-
|
|
19
|
-
/***/ "./node_modules/@keckelt/tsne/lib/tsne.js":
|
|
20
|
-
/*!************************************************!*\
|
|
21
|
-
!*** ./node_modules/@keckelt/tsne/lib/tsne.js ***!
|
|
22
|
-
\************************************************/
|
|
23
|
-
/***/ ((__unused_webpack_module, exports) => {
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
Object.defineProperty(exports, "__esModule", ({ value: true }));
|
|
27
|
-
exports.TSNE = void 0;
|
|
28
|
-
class TSNE {
|
|
29
|
-
constructor(opt) {
|
|
30
|
-
// return 0 mean unit standard deviation random number
|
|
31
|
-
this.returnV = false;
|
|
32
|
-
this.vValue = 0.0;
|
|
33
|
-
this.iter = 0;
|
|
34
|
-
opt = opt || {};
|
|
35
|
-
this.perplexity = this.getopt(opt, 'perplexity', 30); // effective number of nearest neighbors
|
|
36
|
-
this.dim = this.getopt(opt, 'dim', 2); // by default 2-D tSNE
|
|
37
|
-
this.epsilon = this.getopt(opt, 'epsilon', 10); // learning rate
|
|
38
|
-
}
|
|
39
|
-
assert(condition, message) {
|
|
40
|
-
if (!condition) {
|
|
41
|
-
throw message || 'Assertion failed';
|
|
42
|
-
}
|
|
43
|
-
}
|
|
44
|
-
// syntax sugar
|
|
45
|
-
getopt(opt, field, defaultval) {
|
|
46
|
-
if (opt.hasOwnProperty(field)) {
|
|
47
|
-
return opt[field];
|
|
48
|
-
}
|
|
49
|
-
else {
|
|
50
|
-
return defaultval;
|
|
51
|
-
}
|
|
52
|
-
}
|
|
53
|
-
gaussRandom() {
|
|
54
|
-
if (this.returnV) {
|
|
55
|
-
this.returnV = false;
|
|
56
|
-
return this.vValue;
|
|
57
|
-
}
|
|
58
|
-
const u = 2 * Math.random() - 1;
|
|
59
|
-
const v = 2 * Math.random() - 1;
|
|
60
|
-
const r = u * u + v * v;
|
|
61
|
-
if (r === 0 || r > 1) {
|
|
62
|
-
return this.gaussRandom();
|
|
63
|
-
}
|
|
64
|
-
const c = Math.sqrt(-2 * Math.log(r) / r);
|
|
65
|
-
this.vValue = v * c; // cache this for next function call for efficiency
|
|
66
|
-
this.returnV = true;
|
|
67
|
-
return u * c;
|
|
68
|
-
}
|
|
69
|
-
// return random normal number
|
|
70
|
-
randn(mu, std) { return mu + this.gaussRandom() * std; }
|
|
71
|
-
// utilitity that creates contiguous vector of zeros of size n
|
|
72
|
-
zeros(n) {
|
|
73
|
-
if (typeof (n) === 'undefined' || isNaN(n)) {
|
|
74
|
-
return [];
|
|
75
|
-
}
|
|
76
|
-
if (typeof ArrayBuffer === 'undefined') {
|
|
77
|
-
// lacking browser support
|
|
78
|
-
const arr = new Array(n);
|
|
79
|
-
for (let i = 0; i < n; i++) {
|
|
80
|
-
arr[i] = 0;
|
|
81
|
-
}
|
|
82
|
-
return arr;
|
|
83
|
-
}
|
|
84
|
-
else {
|
|
85
|
-
return new Float64Array(n); // typed arrays are faster
|
|
86
|
-
}
|
|
87
|
-
}
|
|
88
|
-
// utility that returns 2d array filled with random numbers
|
|
89
|
-
// or with value s, if provided
|
|
90
|
-
randn2d(n, d, s) {
|
|
91
|
-
const uses = typeof s !== 'undefined';
|
|
92
|
-
const x = [];
|
|
93
|
-
for (let i = 0; i < n; i++) {
|
|
94
|
-
const xhere = [];
|
|
95
|
-
for (let j = 0; j < d; j++) {
|
|
96
|
-
if (uses) {
|
|
97
|
-
xhere.push(s);
|
|
98
|
-
}
|
|
99
|
-
else {
|
|
100
|
-
xhere.push(this.randn(0.0, 1e-4));
|
|
101
|
-
}
|
|
102
|
-
}
|
|
103
|
-
x.push(xhere);
|
|
104
|
-
}
|
|
105
|
-
return x;
|
|
106
|
-
}
|
|
107
|
-
// compute L2 distance between two vectors
|
|
108
|
-
L2(x1, x2) {
|
|
109
|
-
const D = x1.length;
|
|
110
|
-
let d = 0;
|
|
111
|
-
for (let i = 0; i < D; i++) {
|
|
112
|
-
const x1i = x1[i];
|
|
113
|
-
const x2i = x2[i];
|
|
114
|
-
d += (x1i - x2i) * (x1i - x2i);
|
|
115
|
-
}
|
|
116
|
-
return d;
|
|
117
|
-
}
|
|
118
|
-
// compute pairwise distance in all vectors in X
|
|
119
|
-
xtod(X) {
|
|
120
|
-
const N = X.length;
|
|
121
|
-
const dist = this.zeros(N * N); // allocate contiguous array
|
|
122
|
-
for (let i = 0; i < N; i++) {
|
|
123
|
-
for (let j = i + 1; j < N; j++) {
|
|
124
|
-
const d = this.L2(X[i], X[j]);
|
|
125
|
-
dist[i * N + j] = d;
|
|
126
|
-
dist[j * N + i] = d;
|
|
127
|
-
}
|
|
128
|
-
}
|
|
129
|
-
return dist;
|
|
130
|
-
}
|
|
131
|
-
// compute (p_{i|j} + p_{j|i})/(2n)
|
|
132
|
-
d2p(D, perplexity, tol) {
|
|
133
|
-
const nf = Math.sqrt(D.length); // this better be an integer
|
|
134
|
-
const n = Math.floor(nf);
|
|
135
|
-
this.assert(n === nf, 'D should have square number of elements.');
|
|
136
|
-
const hTarget = Math.log(perplexity); // target entropy of distribution
|
|
137
|
-
const P = this.zeros(n * n); // temporary probability matrix
|
|
138
|
-
const prow = this.zeros(n); // a temporary storage compartment
|
|
139
|
-
for (let i = 0; i < n; i++) {
|
|
140
|
-
let betamin = -Infinity;
|
|
141
|
-
let betamax = Infinity;
|
|
142
|
-
let beta = 1; // initial value of precision
|
|
143
|
-
let done = false;
|
|
144
|
-
const maxtries = 50;
|
|
145
|
-
// perform binary search to find a suitable precision beta
|
|
146
|
-
// so that the entropy of the distribution is appropriate
|
|
147
|
-
let num = 0;
|
|
148
|
-
while (!done) {
|
|
149
|
-
//debugger;
|
|
150
|
-
// compute entropy and kernel row with beta precision
|
|
151
|
-
let psum = 0.0;
|
|
152
|
-
for (let j = 0; j < n; j++) {
|
|
153
|
-
let pj = Math.exp(-D[i * n + j] * beta);
|
|
154
|
-
if (i === j) {
|
|
155
|
-
pj = 0;
|
|
156
|
-
} // we dont care about diagonals
|
|
157
|
-
prow[j] = pj;
|
|
158
|
-
psum += pj;
|
|
159
|
-
}
|
|
160
|
-
// normalize p and compute entropy
|
|
161
|
-
let nHere = 0.0;
|
|
162
|
-
for (let j = 0; j < n; j++) {
|
|
163
|
-
let pj;
|
|
164
|
-
if (psum === 0) {
|
|
165
|
-
pj = 0;
|
|
166
|
-
}
|
|
167
|
-
else {
|
|
168
|
-
pj = prow[j] / psum;
|
|
169
|
-
}
|
|
170
|
-
prow[j] = pj;
|
|
171
|
-
if (pj > 1e-7) {
|
|
172
|
-
nHere -= pj * Math.log(pj);
|
|
173
|
-
}
|
|
174
|
-
}
|
|
175
|
-
// adjust beta based on result
|
|
176
|
-
if (nHere > hTarget) {
|
|
177
|
-
// entropy was too high (distribution too diffuse)
|
|
178
|
-
// so we need to increase the precision for more peaky distribution
|
|
179
|
-
betamin = beta; // move up the bounds
|
|
180
|
-
if (betamax === Infinity) {
|
|
181
|
-
beta = beta * 2;
|
|
182
|
-
}
|
|
183
|
-
else {
|
|
184
|
-
beta = (beta + betamax) / 2;
|
|
185
|
-
}
|
|
186
|
-
}
|
|
187
|
-
else {
|
|
188
|
-
// converse case. make distrubtion less peaky
|
|
189
|
-
betamax = beta;
|
|
190
|
-
if (betamin === -Infinity) {
|
|
191
|
-
beta = beta / 2;
|
|
192
|
-
}
|
|
193
|
-
else {
|
|
194
|
-
beta = (beta + betamin) / 2;
|
|
195
|
-
}
|
|
196
|
-
}
|
|
197
|
-
// stopping conditions: too many tries or got a good precision
|
|
198
|
-
num++;
|
|
199
|
-
if (Math.abs(nHere - hTarget) < tol) {
|
|
200
|
-
done = true;
|
|
201
|
-
}
|
|
202
|
-
if (num >= maxtries) {
|
|
203
|
-
done = true;
|
|
204
|
-
}
|
|
205
|
-
}
|
|
206
|
-
// console.log('data point ' + i + ' gets precision ' + beta + ' after ' + num + ' binary search steps.');
|
|
207
|
-
// copy over the final prow to P at row i
|
|
208
|
-
for (let j = 0; j < n; j++) {
|
|
209
|
-
P[i * n + j] = prow[j];
|
|
210
|
-
}
|
|
211
|
-
} // end loop over examples i
|
|
212
|
-
// symmetrize P and normalize it to sum to 1 over all ij
|
|
213
|
-
const pOut = this.zeros(n * n);
|
|
214
|
-
const N2 = n * 2;
|
|
215
|
-
for (let i = 0; i < n; i++) {
|
|
216
|
-
for (let j = 0; j < n; j++) {
|
|
217
|
-
pOut[i * n + j] = Math.max((P[i * n + j] + P[j * n + i]) / N2, 1e-100);
|
|
218
|
-
}
|
|
219
|
-
}
|
|
220
|
-
return pOut;
|
|
221
|
-
}
|
|
222
|
-
// helper function
|
|
223
|
-
sign(x) { return x > 0 ? 1 : x < 0 ? -1 : 0; }
|
|
224
|
-
// this function takes a set of high-dimensional points
|
|
225
|
-
// and creates matrix P from them using gaussian kernel
|
|
226
|
-
initDataRaw(X) {
|
|
227
|
-
const N = X.length;
|
|
228
|
-
const D = X[0].length;
|
|
229
|
-
this.assert(N > 0, ' X is empty? You must have some data!');
|
|
230
|
-
this.assert(D > 0, ' X[0] is empty? Where is the data?');
|
|
231
|
-
const dists = this.xtod(X); // convert X to distances using gaussian kernel
|
|
232
|
-
this.P = this.d2p(dists, this.perplexity, 1e-4); // attach to object
|
|
233
|
-
this.N = N; // back up the size of the dataset
|
|
234
|
-
this.initSolution(); // refresh this
|
|
235
|
-
}
|
|
236
|
-
// this function takes a given distance matrix and creates
|
|
237
|
-
// matrix P from them.
|
|
238
|
-
// D is assumed to be provided as a list of lists, and should be symmetric
|
|
239
|
-
initDataDist(D) {
|
|
240
|
-
const N = D.length;
|
|
241
|
-
this.assert(N > 0, ' X is empty? You must have some data!');
|
|
242
|
-
// convert D to a (fast) typed array version
|
|
243
|
-
const dists = this.zeros(N * N); // allocate contiguous array
|
|
244
|
-
for (let i = 0; i < N; i++) {
|
|
245
|
-
for (let j = i + 1; j < N; j++) {
|
|
246
|
-
const d = D[i][j];
|
|
247
|
-
dists[i * N + j] = d;
|
|
248
|
-
dists[j * N + i] = d;
|
|
249
|
-
}
|
|
250
|
-
}
|
|
251
|
-
this.P = this.d2p(dists, this.perplexity, 1e-4);
|
|
252
|
-
this.N = N;
|
|
253
|
-
this.initSolution(); // refresh this
|
|
254
|
-
}
|
|
255
|
-
// (re)initializes the solution to random
|
|
256
|
-
initSolution() {
|
|
257
|
-
// generate random solution to t-SNE
|
|
258
|
-
this.Y = this.randn2d(this.N, this.dim); // the solution
|
|
259
|
-
this.gains = this.randn2d(this.N, this.dim, 1.0); // step gains to accelerate progress in unchanging directions
|
|
260
|
-
this.ystep = this.randn2d(this.N, this.dim, 0.0); // momentum accumulator
|
|
261
|
-
this.iter = 0;
|
|
262
|
-
}
|
|
263
|
-
// return pointer to current solution
|
|
264
|
-
getSolution() {
|
|
265
|
-
return this.Y;
|
|
266
|
-
}
|
|
267
|
-
// perform a single step of optimization to improve the embedding
|
|
268
|
-
step() {
|
|
269
|
-
this.iter += 1;
|
|
270
|
-
const N = this.N;
|
|
271
|
-
const cg = this.costGrad(this.Y); // evaluate gradient
|
|
272
|
-
const cost = cg.cost;
|
|
273
|
-
const grad = cg.grad;
|
|
274
|
-
// perform gradient step
|
|
275
|
-
const ymean = this.zeros(this.dim);
|
|
276
|
-
for (let i = 0; i < N; i++) {
|
|
277
|
-
for (let d = 0; d < this.dim; d++) {
|
|
278
|
-
const gid = grad[i][d];
|
|
279
|
-
const sid = this.ystep[i][d];
|
|
280
|
-
const gainid = this.gains[i][d];
|
|
281
|
-
// compute gain update
|
|
282
|
-
let newgain = this.sign(gid) === this.sign(sid) ? gainid * 0.8 : gainid + 0.2;
|
|
283
|
-
if (newgain < 0.01) {
|
|
284
|
-
newgain = 0.01;
|
|
285
|
-
} // clamp
|
|
286
|
-
this.gains[i][d] = newgain; // store for next turn
|
|
287
|
-
// compute momentum step direction
|
|
288
|
-
const momval = this.iter < 250 ? 0.5 : 0.8;
|
|
289
|
-
const newsid = momval * sid - this.epsilon * newgain * grad[i][d];
|
|
290
|
-
this.ystep[i][d] = newsid; // remember the step we took
|
|
291
|
-
// step!
|
|
292
|
-
this.Y[i][d] += newsid;
|
|
293
|
-
ymean[d] += this.Y[i][d]; // accumulate mean so that we can center later
|
|
294
|
-
}
|
|
295
|
-
}
|
|
296
|
-
// reproject Y to be zero mean
|
|
297
|
-
for (let i = 0; i < N; i++) {
|
|
298
|
-
for (let d = 0; d < this.dim; d++) {
|
|
299
|
-
this.Y[i][d] -= ymean[d] / N;
|
|
300
|
-
}
|
|
301
|
-
}
|
|
302
|
-
//if(this.iter%100===0) console.log('iter ' + this.iter + ', cost: ' + cost);
|
|
303
|
-
return cost; // return current cost
|
|
304
|
-
}
|
|
305
|
-
// for debugging: gradient check
|
|
306
|
-
debugGrad() {
|
|
307
|
-
const N = this.N;
|
|
308
|
-
const cg = this.costGrad(this.Y); // evaluate gradient
|
|
309
|
-
const cost = cg.cost;
|
|
310
|
-
const grad = cg.grad;
|
|
311
|
-
const e = 1e-5;
|
|
312
|
-
for (let i = 0; i < N; i++) {
|
|
313
|
-
for (let d = 0; d < this.dim; d++) {
|
|
314
|
-
const yold = this.Y[i][d];
|
|
315
|
-
this.Y[i][d] = yold + e;
|
|
316
|
-
const cg0 = this.costGrad(this.Y);
|
|
317
|
-
this.Y[i][d] = yold - e;
|
|
318
|
-
const cg1 = this.costGrad(this.Y);
|
|
319
|
-
const analytic = grad[i][d];
|
|
320
|
-
const numerical = (cg0.cost - cg1.cost) / (2 * e);
|
|
321
|
-
console.log(i + ',' + d + ': gradcheck analytic: ' + analytic + ' vs. numerical: ' + numerical);
|
|
322
|
-
this.Y[i][d] = yold;
|
|
323
|
-
}
|
|
324
|
-
}
|
|
325
|
-
}
|
|
326
|
-
// return cost and gradient, given an arrangement
|
|
327
|
-
costGrad(Y) {
|
|
328
|
-
const N = this.N;
|
|
329
|
-
const dim = this.dim; // dim of output space
|
|
330
|
-
const P = this.P;
|
|
331
|
-
const pmul = this.iter < 100 ? 4 : 1; // trick that helps with local optima
|
|
332
|
-
// compute current Q distribution, unnormalized first
|
|
333
|
-
const quArr = this.zeros(N * N);
|
|
334
|
-
let qsum = 0.0;
|
|
335
|
-
for (let i = 0; i < N; i++) {
|
|
336
|
-
for (let j = i + 1; j < N; j++) {
|
|
337
|
-
let dsum = 0.0;
|
|
338
|
-
for (let d = 0; d < dim; d++) {
|
|
339
|
-
const dhere = Y[i][d] - Y[j][d];
|
|
340
|
-
dsum += dhere * dhere;
|
|
341
|
-
}
|
|
342
|
-
const qu = 1.0 / (1.0 + dsum); // Student t-distribution
|
|
343
|
-
quArr[i * N + j] = qu;
|
|
344
|
-
quArr[j * N + i] = qu;
|
|
345
|
-
qsum += 2 * qu;
|
|
346
|
-
}
|
|
347
|
-
}
|
|
348
|
-
// normalize Q distribution to sum to 1
|
|
349
|
-
const NN = N * N;
|
|
350
|
-
const Q = this.zeros(NN);
|
|
351
|
-
for (let q = 0; q < NN; q++) {
|
|
352
|
-
Q[q] = Math.max(quArr[q] / qsum, 1e-100);
|
|
353
|
-
}
|
|
354
|
-
let cost = 0.0;
|
|
355
|
-
const grad = [];
|
|
356
|
-
for (let i = 0; i < N; i++) {
|
|
357
|
-
const gsum = new Array(dim); // init grad for point i
|
|
358
|
-
for (let d = 0; d < dim; d++) {
|
|
359
|
-
gsum[d] = 0.0;
|
|
360
|
-
}
|
|
361
|
-
for (let j = 0; j < N; j++) {
|
|
362
|
-
cost += -P[i * N + j] * Math.log(Q[i * N + j]); // accumulate cost (the non-constant portion at least...)
|
|
363
|
-
const premult = 4 * (pmul * P[i * N + j] - Q[i * N + j]) * quArr[i * N + j];
|
|
364
|
-
for (let d = 0; d < dim; d++) {
|
|
365
|
-
gsum[d] += premult * (Y[i][d] - Y[j][d]);
|
|
366
|
-
}
|
|
367
|
-
}
|
|
368
|
-
grad.push(gsum);
|
|
369
|
-
}
|
|
370
|
-
return { cost, grad };
|
|
371
|
-
}
|
|
372
|
-
}
|
|
373
|
-
exports.TSNE = TSNE;
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
/***/ })
|
|
377
|
-
|
|
378
|
-
}]);
|
|
379
|
-
//# sourceMappingURL=vendors-node_modules_keckelt_tsne_lib_index_js.js.map
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"vendors-node_modules_keckelt_tsne_lib_index_js.js","mappings":";;;;;;;;;AAAa;AACb,8CAA6C,EAAE,aAAa,EAAC;AAC7D,YAAY;AACZ,aAAa,mBAAO,CAAC,wDAAQ;AAC7B,wCAAuC,EAAE,qCAAqC,uBAAuB,EAAC;;;;;;;;;;;ACJzF;AACb,8CAA6C,EAAE,aAAa,EAAC;AAC7D,YAAY;AACZ;AACA;AACA;AACA;AACA;AACA;AACA;AACA,8DAA8D;AAC9D,+CAA+C;AAC/C,wDAAwD;AACxD;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,6BAA6B;AAC7B;AACA;AACA;AACA;AACA,qBAAqB;AACrB;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,4BAA4B,OAAO;AACnC;AACA;AACA;AACA;AACA;AACA,wCAAwC;AACxC;AACA;AACA;AACA;AACA;AACA;AACA;AACA,wBAAwB,OAAO;AAC/B;AACA,4BAA4B,OAAO;AACnC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,wBAAwB,OAAO;AAC/B;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,wCAAwC;AACxC,wBAAwB,OAAO;AAC/B,gCAAgC,OAAO;AACvC;AACA;AACA;AACA;AACA;AACA;AACA;AACA,mBAAmB,KAAK,KAAK,IAAI;AACjC;AACA,wCAAwC;AACxC;AACA;AACA,8CAA8C;AAC9C,qCAAqC;AACrC,oCAAoC;AACpC,wBAAwB,OAAO;AAC/B;AACA;AACA,0BAA0B;AAC1B;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,gCAAgC,OAAO;AACvC;AACA;AACA;AACA,sBAAsB;AACtB;AACA;AACA;AACA;AACA;AACA,gCAAgC,OAAO;AACvC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,oCAAoC;AACpC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,4BAA4B,OAAO;AACnC;AACA;AACA,UAAU;AACV;AACA;AACA;AACA,wBAAwB,OAAO;AAC/B,4BAA4B,OAAO;AACnC;AACA;AACA;AACA;AACA;AACA;AACA,cAAc;AACd;AACA;AACA;AACA;AACA;AACA;AACA;AACA,oCAAoC;AACpC,yDAAyD;AACzD,oBAAoB;AACpB,6BAA6B;AAC7B;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,yCAAyC;AACzC,wBAAwB,OAAO;AAC/B,gCAAgC,OAAO;AACvC;AACA;AACA;AACA;AACA;AACA;AACA;AACA,6BAA6B;AAC7B;AACA;AACA;AACA;AACA,iDAAiD;AACjD,0DAA0D;AAC1D,0DAA0D;AAC1D;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,0CAA0C;AAC1C;AACA;AACA;AACA;AACA,wBAAwB,OAAO;AAC/B,4BAA4B,cAAc;AAC1C;AACA;AACA;AACA;AACA;AACA;AACA;AACA,kBAAkB;AAClB,4CAA4C;AAC5C;AACA;AACA;AACA,2CAA2C;AAC3C;AACA;AACA,0CAA0C;AAC1C;AACA;AACA;AACA,wBAAwB,OAAO;AAC/B,4BAA4B,cAAc;AAC1C;AACA;AACA;AACA;AACA,qBAAqB;AACrB;AACA;AACA;AACA;AACA,0CAA0C;AAC1C;AACA;AACA;AACA,wBAAwB,OAAO;AAC/B,4BAA4B,cAAc;AAC1C;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,8BAA8B;AAC9B;AACA,8CAA8C;AAC9C;AACA;AACA;AACA,wBAAwB,OAAO;AAC/B,gCAAgC,OAAO;AACvC;AACA,gCAAgC,SAAS;AACzC;AACA;AACA;AACA,+CAA+C;AAC/C;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,wBAAwB,QAAQ;AAChC;AACA;AACA;AACA;AACA,wBAAwB,OAAO;AAC/B,yCAAyC;AACzC,4BAA4B,SAAS;AACrC;AACA;AACA,4BAA4B,OAAO;AACnC,gEAAgE;AAChE;AACA,gCAAgC,SAAS;AACzC;AACA;AACA;AACA;AACA;AACA,iBAAiB;AACjB;AACA;AACA,YAAY","sources":["webpack://eda/./node_modules/@keckelt/tsne/lib/index.js","webpack://eda/./node_modules/@keckelt/tsne/lib/tsne.js"],"sourcesContent":["\"use strict\";\nObject.defineProperty(exports, \"__esModule\", { value: true });\nexports.TSNE = void 0;\nvar tsne_1 = require(\"./tsne\");\nObject.defineProperty(exports, \"TSNE\", { enumerable: true, get: function () { return tsne_1.TSNE; } });\n","\"use strict\";\nObject.defineProperty(exports, \"__esModule\", { value: true });\nexports.TSNE = void 0;\nclass TSNE {\n constructor(opt) {\n // return 0 mean unit standard deviation random number\n this.returnV = false;\n this.vValue = 0.0;\n this.iter = 0;\n opt = opt || {};\n this.perplexity = this.getopt(opt, 'perplexity', 30); // effective number of nearest neighbors\n this.dim = this.getopt(opt, 'dim', 2); // by default 2-D tSNE\n this.epsilon = this.getopt(opt, 'epsilon', 10); // learning rate\n }\n assert(condition, message) {\n if (!condition) {\n throw message || 'Assertion failed';\n }\n }\n // syntax sugar\n getopt(opt, field, defaultval) {\n if (opt.hasOwnProperty(field)) {\n return opt[field];\n }\n else {\n return defaultval;\n }\n }\n gaussRandom() {\n if (this.returnV) {\n this.returnV = false;\n return this.vValue;\n }\n const u = 2 * Math.random() - 1;\n const v = 2 * Math.random() - 1;\n const r = u * u + v * v;\n if (r === 0 || r > 1) {\n return this.gaussRandom();\n }\n const c = Math.sqrt(-2 * Math.log(r) / r);\n this.vValue = v * c; // cache this for next function call for efficiency\n this.returnV = true;\n return u * c;\n }\n // return random normal number\n randn(mu, std) { return mu + this.gaussRandom() * std; }\n // utilitity that creates contiguous vector of zeros of size n\n zeros(n) {\n if (typeof (n) === 'undefined' || isNaN(n)) {\n return [];\n }\n if (typeof ArrayBuffer === 'undefined') {\n // lacking browser support\n const arr = new Array(n);\n for (let i = 0; i < n; i++) {\n arr[i] = 0;\n }\n return arr;\n }\n else {\n return new Float64Array(n); // typed arrays are faster\n }\n }\n // utility that returns 2d array filled with random numbers\n // or with value s, if provided\n randn2d(n, d, s) {\n const uses = typeof s !== 'undefined';\n const x = [];\n for (let i = 0; i < n; i++) {\n const xhere = [];\n for (let j = 0; j < d; j++) {\n if (uses) {\n xhere.push(s);\n }\n else {\n xhere.push(this.randn(0.0, 1e-4));\n }\n }\n x.push(xhere);\n }\n return x;\n }\n // compute L2 distance between two vectors\n L2(x1, x2) {\n const D = x1.length;\n let d = 0;\n for (let i = 0; i < D; i++) {\n const x1i = x1[i];\n const x2i = x2[i];\n d += (x1i - x2i) * (x1i - x2i);\n }\n return d;\n }\n // compute pairwise distance in all vectors in X\n xtod(X) {\n const N = X.length;\n const dist = this.zeros(N * N); // allocate contiguous array\n for (let i = 0; i < N; i++) {\n for (let j = i + 1; j < N; j++) {\n const d = this.L2(X[i], X[j]);\n dist[i * N + j] = d;\n dist[j * N + i] = d;\n }\n }\n return dist;\n }\n // compute (p_{i|j} + p_{j|i})/(2n)\n d2p(D, perplexity, tol) {\n const nf = Math.sqrt(D.length); // this better be an integer\n const n = Math.floor(nf);\n this.assert(n === nf, 'D should have square number of elements.');\n const hTarget = Math.log(perplexity); // target entropy of distribution\n const P = this.zeros(n * n); // temporary probability matrix\n const prow = this.zeros(n); // a temporary storage compartment\n for (let i = 0; i < n; i++) {\n let betamin = -Infinity;\n let betamax = Infinity;\n let beta = 1; // initial value of precision\n let done = false;\n const maxtries = 50;\n // perform binary search to find a suitable precision beta\n // so that the entropy of the distribution is appropriate\n let num = 0;\n while (!done) {\n //debugger;\n // compute entropy and kernel row with beta precision\n let psum = 0.0;\n for (let j = 0; j < n; j++) {\n let pj = Math.exp(-D[i * n + j] * beta);\n if (i === j) {\n pj = 0;\n } // we dont care about diagonals\n prow[j] = pj;\n psum += pj;\n }\n // normalize p and compute entropy\n let nHere = 0.0;\n for (let j = 0; j < n; j++) {\n let pj;\n if (psum === 0) {\n pj = 0;\n }\n else {\n pj = prow[j] / psum;\n }\n prow[j] = pj;\n if (pj > 1e-7) {\n nHere -= pj * Math.log(pj);\n }\n }\n // adjust beta based on result\n if (nHere > hTarget) {\n // entropy was too high (distribution too diffuse)\n // so we need to increase the precision for more peaky distribution\n betamin = beta; // move up the bounds\n if (betamax === Infinity) {\n beta = beta * 2;\n }\n else {\n beta = (beta + betamax) / 2;\n }\n }\n else {\n // converse case. make distrubtion less peaky\n betamax = beta;\n if (betamin === -Infinity) {\n beta = beta / 2;\n }\n else {\n beta = (beta + betamin) / 2;\n }\n }\n // stopping conditions: too many tries or got a good precision\n num++;\n if (Math.abs(nHere - hTarget) < tol) {\n done = true;\n }\n if (num >= maxtries) {\n done = true;\n }\n }\n // console.log('data point ' + i + ' gets precision ' + beta + ' after ' + num + ' binary search steps.');\n // copy over the final prow to P at row i\n for (let j = 0; j < n; j++) {\n P[i * n + j] = prow[j];\n }\n } // end loop over examples i\n // symmetrize P and normalize it to sum to 1 over all ij\n const pOut = this.zeros(n * n);\n const N2 = n * 2;\n for (let i = 0; i < n; i++) {\n for (let j = 0; j < n; j++) {\n pOut[i * n + j] = Math.max((P[i * n + j] + P[j * n + i]) / N2, 1e-100);\n }\n }\n return pOut;\n }\n // helper function\n sign(x) { return x > 0 ? 1 : x < 0 ? -1 : 0; }\n // this function takes a set of high-dimensional points\n // and creates matrix P from them using gaussian kernel\n initDataRaw(X) {\n const N = X.length;\n const D = X[0].length;\n this.assert(N > 0, ' X is empty? You must have some data!');\n this.assert(D > 0, ' X[0] is empty? Where is the data?');\n const dists = this.xtod(X); // convert X to distances using gaussian kernel\n this.P = this.d2p(dists, this.perplexity, 1e-4); // attach to object\n this.N = N; // back up the size of the dataset\n this.initSolution(); // refresh this\n }\n // this function takes a given distance matrix and creates\n // matrix P from them.\n // D is assumed to be provided as a list of lists, and should be symmetric\n initDataDist(D) {\n const N = D.length;\n this.assert(N > 0, ' X is empty? You must have some data!');\n // convert D to a (fast) typed array version\n const dists = this.zeros(N * N); // allocate contiguous array\n for (let i = 0; i < N; i++) {\n for (let j = i + 1; j < N; j++) {\n const d = D[i][j];\n dists[i * N + j] = d;\n dists[j * N + i] = d;\n }\n }\n this.P = this.d2p(dists, this.perplexity, 1e-4);\n this.N = N;\n this.initSolution(); // refresh this\n }\n // (re)initializes the solution to random\n initSolution() {\n // generate random solution to t-SNE\n this.Y = this.randn2d(this.N, this.dim); // the solution\n this.gains = this.randn2d(this.N, this.dim, 1.0); // step gains to accelerate progress in unchanging directions\n this.ystep = this.randn2d(this.N, this.dim, 0.0); // momentum accumulator\n this.iter = 0;\n }\n // return pointer to current solution\n getSolution() {\n return this.Y;\n }\n // perform a single step of optimization to improve the embedding\n step() {\n this.iter += 1;\n const N = this.N;\n const cg = this.costGrad(this.Y); // evaluate gradient\n const cost = cg.cost;\n const grad = cg.grad;\n // perform gradient step\n const ymean = this.zeros(this.dim);\n for (let i = 0; i < N; i++) {\n for (let d = 0; d < this.dim; d++) {\n const gid = grad[i][d];\n const sid = this.ystep[i][d];\n const gainid = this.gains[i][d];\n // compute gain update\n let newgain = this.sign(gid) === this.sign(sid) ? gainid * 0.8 : gainid + 0.2;\n if (newgain < 0.01) {\n newgain = 0.01;\n } // clamp\n this.gains[i][d] = newgain; // store for next turn\n // compute momentum step direction\n const momval = this.iter < 250 ? 0.5 : 0.8;\n const newsid = momval * sid - this.epsilon * newgain * grad[i][d];\n this.ystep[i][d] = newsid; // remember the step we took\n // step!\n this.Y[i][d] += newsid;\n ymean[d] += this.Y[i][d]; // accumulate mean so that we can center later\n }\n }\n // reproject Y to be zero mean\n for (let i = 0; i < N; i++) {\n for (let d = 0; d < this.dim; d++) {\n this.Y[i][d] -= ymean[d] / N;\n }\n }\n //if(this.iter%100===0) console.log('iter ' + this.iter + ', cost: ' + cost);\n return cost; // return current cost\n }\n // for debugging: gradient check\n debugGrad() {\n const N = this.N;\n const cg = this.costGrad(this.Y); // evaluate gradient\n const cost = cg.cost;\n const grad = cg.grad;\n const e = 1e-5;\n for (let i = 0; i < N; i++) {\n for (let d = 0; d < this.dim; d++) {\n const yold = this.Y[i][d];\n this.Y[i][d] = yold + e;\n const cg0 = this.costGrad(this.Y);\n this.Y[i][d] = yold - e;\n const cg1 = this.costGrad(this.Y);\n const analytic = grad[i][d];\n const numerical = (cg0.cost - cg1.cost) / (2 * e);\n console.log(i + ',' + d + ': gradcheck analytic: ' + analytic + ' vs. numerical: ' + numerical);\n this.Y[i][d] = yold;\n }\n }\n }\n // return cost and gradient, given an arrangement\n costGrad(Y) {\n const N = this.N;\n const dim = this.dim; // dim of output space\n const P = this.P;\n const pmul = this.iter < 100 ? 4 : 1; // trick that helps with local optima\n // compute current Q distribution, unnormalized first\n const quArr = this.zeros(N * N);\n let qsum = 0.0;\n for (let i = 0; i < N; i++) {\n for (let j = i + 1; j < N; j++) {\n let dsum = 0.0;\n for (let d = 0; d < dim; d++) {\n const dhere = Y[i][d] - Y[j][d];\n dsum += dhere * dhere;\n }\n const qu = 1.0 / (1.0 + dsum); // Student t-distribution\n quArr[i * N + j] = qu;\n quArr[j * N + i] = qu;\n qsum += 2 * qu;\n }\n }\n // normalize Q distribution to sum to 1\n const NN = N * N;\n const Q = this.zeros(NN);\n for (let q = 0; q < NN; q++) {\n Q[q] = Math.max(quArr[q] / qsum, 1e-100);\n }\n let cost = 0.0;\n const grad = [];\n for (let i = 0; i < N; i++) {\n const gsum = new Array(dim); // init grad for point i\n for (let d = 0; d < dim; d++) {\n gsum[d] = 0.0;\n }\n for (let j = 0; j < N; j++) {\n cost += -P[i * N + j] * Math.log(Q[i * N + j]); // accumulate cost (the non-constant portion at least...)\n const premult = 4 * (pmul * P[i * N + j] - Q[i * N + j]) * quArr[i * N + j];\n for (let d = 0; d < dim; d++) {\n gsum[d] += premult * (Y[i][d] - Y[j][d]);\n }\n }\n grad.push(gsum);\n }\n return { cost, grad };\n }\n}\nexports.TSNE = TSNE;\n"],"names":[],"sourceRoot":""}
|