tfjs-evolution 0.0.4 → 0.0.5
Sign up to get free protection for your applications and to get access to all the features.
- package/README.md +6 -1
- package/esm2022/lib/components/display-panel/display-panel.component.mjs +32 -5
- package/esm2022/lib/models/custom-mobilenet.mjs +182 -0
- package/esm2022/lib/models/teachable-evolution.mjs +424 -0
- package/esm2022/lib/utils/class.mjs +2 -0
- package/esm2022/lib/utils/tf.mjs +29 -0
- package/esm2022/lib/utils/util.mjs +165 -0
- package/fesm2022/tfjs-evolution.mjs +824 -4
- package/fesm2022/tfjs-evolution.mjs.map +1 -1
- package/lib/components/display-panel/display-panel.component.d.ts +4 -0
- package/lib/models/custom-mobilenet.d.ts +63 -0
- package/lib/models/teachable-evolution.d.ts +101 -0
- package/lib/utils/class.d.ts +5 -0
- package/lib/utils/tf.d.ts +9 -0
- package/lib/utils/util.d.ts +43 -0
- package/package.json +1 -1
@@ -2,6 +2,8 @@ import * as i0 from '@angular/core';
|
|
2
2
|
import { Injectable, Component } from '@angular/core';
|
3
3
|
import * as i1 from '@angular/common';
|
4
4
|
import { CommonModule } from '@angular/common';
|
5
|
+
import * as tf from '@tensorflow/tfjs';
|
6
|
+
import * as tfvis from '@tensorflow/tfjs-vis';
|
5
7
|
|
6
8
|
class TfjsEvolutionService {
|
7
9
|
constructor() { }
|
@@ -32,12 +34,805 @@ i0.ɵɵngDeclareClassMetadata({ minVersion: "12.0.0", version: "17.3.4", ngImpor
|
|
32
34
|
` }]
|
33
35
|
}] });
|
34
36
|
|
37
|
+
const IMAGE_SIZE = 224;
|
38
|
+
const DEFAULT_MOBILENET_VERSION = 2;
|
39
|
+
const DEFAULT_TRAINING_LAYER_V1 = 'conv_pw_13_relu';
|
40
|
+
const DEFAULT_TRAINING_LAYER_V2 = "out_relu";
|
41
|
+
const DEFAULT_ALPHA_V1_v2 = 0.35;
|
42
|
+
const DEFAULT_ALPHA_V1 = 0.25; //256
|
43
|
+
const DEFAULT_ALPHA_V2 = 0.5; //512
|
44
|
+
const DEFAULT_ALPHA_V3 = 0.75; //768 features
|
45
|
+
const DEFAULT_ALPHA_V4 = 1; //1024 features
|
46
|
+
const DEFAULT_ALPHA = 1; //1024 features
|
47
|
+
// v2: 0.35, 0.50, 0.75 or 1.00.
|
48
|
+
const isAlphaValid = (version, alpha) => {
|
49
|
+
if (version === 1) {
|
50
|
+
if (alpha !== 0.25 && alpha !== 0.5 && alpha !== 0.75 && alpha !== 1) {
|
51
|
+
console.warn("Invalid alpha. Options are: 0.25, 0.50, 0.75 or 1.00.");
|
52
|
+
console.log("Loading model with alpha: ", DEFAULT_ALPHA_V1.toFixed(2));
|
53
|
+
return DEFAULT_ALPHA_V1;
|
54
|
+
}
|
55
|
+
}
|
56
|
+
else {
|
57
|
+
if (alpha !== 0.35 && alpha !== 0.5 && alpha !== 0.75 && alpha !== 1) {
|
58
|
+
console.warn("Invalid alpha. Options are: 0.35, 0.50, 0.75 or 1.00.");
|
59
|
+
console.log("Loading model with alpha: ", DEFAULT_ALPHA_V2.toFixed(2));
|
60
|
+
return DEFAULT_ALPHA_V2;
|
61
|
+
}
|
62
|
+
}
|
63
|
+
return alpha;
|
64
|
+
};
|
65
|
+
const parseModelOptions = (options) => {
|
66
|
+
options = options || {};
|
67
|
+
if (options.checkpointUrl && options.trainingLayer) {
|
68
|
+
if (options.alpha || options.version) {
|
69
|
+
console.warn("Checkpoint URL passed to modelOptions, alpha options are ignored");
|
70
|
+
}
|
71
|
+
return [options.checkpointUrl, options.trainingLayer];
|
72
|
+
}
|
73
|
+
else {
|
74
|
+
options.version = options.version || DEFAULT_MOBILENET_VERSION;
|
75
|
+
if (options.version === 1) {
|
76
|
+
options.alpha = options.alpha || DEFAULT_ALPHA_V4;
|
77
|
+
options.alpha = isAlphaValid(options.version, options.alpha);
|
78
|
+
console.log(`Loading mobilenet ${options.version} and alpha ${options.alpha}`);
|
79
|
+
// exception is alpha o f 1 can only be 1.0
|
80
|
+
let alphaString = options.alpha.toFixed(2);
|
81
|
+
if (alphaString === "1.00") {
|
82
|
+
alphaString = "1.0";
|
83
|
+
}
|
84
|
+
console.log("Using the model: ");
|
85
|
+
return [
|
86
|
+
// tslint:disable-next-line:max-line-length
|
87
|
+
//They are loading MobileNet_v1
|
88
|
+
`https://storage.googleapis.com/tfjs-models/tfjs/mobilenet_v1_${alphaString}_${IMAGE_SIZE}/model.json`,
|
89
|
+
DEFAULT_TRAINING_LAYER_V1
|
90
|
+
];
|
91
|
+
}
|
92
|
+
else if (options.version === 2) {
|
93
|
+
options.alpha = options.alpha || DEFAULT_ALPHA_V4;
|
94
|
+
options.alpha = isAlphaValid(options.version, options.alpha);
|
95
|
+
console.log(`Loading mobilenet ${options.version} and alpha ${options.alpha}`);
|
96
|
+
console.log(`Loading mobilenet ${options.version} and alpha ${options.alpha}`);
|
97
|
+
return [
|
98
|
+
// tslint:disable-next-line:max-line-length
|
99
|
+
`https://storage.googleapis.com/teachable-machine-models/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_${options.alpha.toFixed(2)}_${IMAGE_SIZE}_no_top/model.json`,
|
100
|
+
DEFAULT_TRAINING_LAYER_V2
|
101
|
+
];
|
102
|
+
}
|
103
|
+
else {
|
104
|
+
throw new Error(`MobileNet V${options.version} doesn't exist`);
|
105
|
+
}
|
106
|
+
}
|
107
|
+
};
|
108
|
+
/**
|
109
|
+
* load the base mobilenet model
|
110
|
+
* @param modelOptions options determining what model to load
|
111
|
+
*/
|
112
|
+
async function loadTruncatedMobileNet(modelOptions) {
|
113
|
+
const [checkpointUrl, trainingLayer] = parseModelOptions(modelOptions);
|
114
|
+
const mobilenet = await tf.loadLayersModel(checkpointUrl);
|
115
|
+
if (modelOptions && modelOptions.version === 1) {
|
116
|
+
const layer = mobilenet.getLayer(trainingLayer);
|
117
|
+
const truncatedModel = tf.model({ inputs: mobilenet.inputs, outputs: layer.output });
|
118
|
+
console.log("Feature model loaded, memory: ", tf.memory().numBytes);
|
119
|
+
const model = tf.sequential();
|
120
|
+
model.add(truncatedModel);
|
121
|
+
model.add(tf.layers.flatten());
|
122
|
+
return model;
|
123
|
+
}
|
124
|
+
else {
|
125
|
+
const layer = mobilenet.getLayer(trainingLayer);
|
126
|
+
const truncatedModel = tf.model({ inputs: mobilenet.inputs, outputs: layer.output });
|
127
|
+
console.log("Feature model loaded, memory: ", tf.memory().numBytes);
|
128
|
+
const model = tf.sequential();
|
129
|
+
model.add(truncatedModel);
|
130
|
+
model.add(tf.layers.globalAveragePooling2d({})); // go from shape [7, 7, 1280] to [1280]
|
131
|
+
return model;
|
132
|
+
}
|
133
|
+
}
|
134
|
+
class CustomMobileNet {
|
135
|
+
static getinputShape() {
|
136
|
+
/**truncatedModel is the base model, the model used to apply transfer learning */
|
137
|
+
const inputShape = this.truncatedModel.outputs[0].shape.slice(1); // [ 7 x 7 x 1280] (not sure about those dimensions)
|
138
|
+
// console.log("Input Shape(complete): ", this.truncatedModel.outputs[0].shape);
|
139
|
+
// console.log("Input Shape: ", inputShape);
|
140
|
+
const inputSize = tf.util.sizeFromShape(inputShape);
|
141
|
+
// console.log("Input Size: ", inputSize);
|
142
|
+
return inputSize;
|
143
|
+
}
|
144
|
+
static get EXPECTED_IMAGE_SIZE() {
|
145
|
+
return IMAGE_SIZE;
|
146
|
+
}
|
147
|
+
getMetadata() {
|
148
|
+
return this._metadata;
|
149
|
+
}
|
150
|
+
constructor() {
|
151
|
+
// this._metadata = fillMetadata(metadata);
|
152
|
+
//Loading the truncated model
|
153
|
+
// loadTruncatedMobileNet();
|
154
|
+
// this.loadFeatureModel();
|
155
|
+
}
|
156
|
+
static async loadFeatureModel() {
|
157
|
+
this.truncatedModel = await loadTruncatedMobileNet();
|
158
|
+
}
|
159
|
+
/**
|
160
|
+
* get the total number of classes existing within model
|
161
|
+
*/
|
162
|
+
// getTotalClasses() {
|
163
|
+
// const output = this.model.output as SymbolicTensor;
|
164
|
+
// const totalClasses = output.shape[1];
|
165
|
+
// return totalClasses;
|
166
|
+
// }
|
167
|
+
/**
|
168
|
+
* get the model labels
|
169
|
+
*/
|
170
|
+
getClassLabels() {
|
171
|
+
return this._metadata.labels;
|
172
|
+
}
|
173
|
+
/**
|
174
|
+
* Given an image element, makes a prediction through mobilenet returning the
|
175
|
+
* probabilities of the top K classes.
|
176
|
+
* @param image the image to classify
|
177
|
+
* @param maxPredictions the maximum number of classification predictions
|
178
|
+
*/
|
179
|
+
// async predictTopK(image: ClassifierInputSource, maxPredictions = 10, flipped = false) {
|
180
|
+
// const croppedImage = cropTo(image, this._metadata.imageSize, flipped);
|
181
|
+
// const logits = tf.tidy(() => {
|
182
|
+
// const captured = capture(croppedImage, this._metadata.grayscale);
|
183
|
+
// return this.model.predict(captured);
|
184
|
+
// });
|
185
|
+
// // Convert logits to probabilities and class names.
|
186
|
+
// const classes = await getTopKClasses(this._metadata.labels, logits as tf.Tensor<tf.Rank>, maxPredictions);
|
187
|
+
// dispose(logits);
|
188
|
+
// return classes;
|
189
|
+
// }
|
190
|
+
/**
|
191
|
+
* Given an image element, makes a prediction through mobilenet returning the
|
192
|
+
* probabilities for ALL classes.
|
193
|
+
* @param image the image to classify
|
194
|
+
* @param flipped whether to flip the image on X
|
195
|
+
*/
|
196
|
+
// async predict(image: ClassifierInputSource, flipped = false) {
|
197
|
+
// const croppedImage = cropTo(image, this._metadata.imageSize, flipped);
|
198
|
+
// const logits = tf.tidy(() => {
|
199
|
+
// const captured = capture(croppedImage, this._metadata.grayscale);
|
200
|
+
// return this.model.predict(captured);
|
201
|
+
// });
|
202
|
+
// const values = await (logits as tf.Tensor<tf.Rank>).data();
|
203
|
+
// const classes = [];
|
204
|
+
// for (let i = 0; i < values.length; i++) {
|
205
|
+
// classes.push({
|
206
|
+
// className: this._metadata.labels[i],
|
207
|
+
// probability: values[i]
|
208
|
+
// });
|
209
|
+
// }
|
210
|
+
// dispose(logits);
|
211
|
+
// return classes;
|
212
|
+
// }
|
213
|
+
dispose() {
|
214
|
+
this.truncatedModel.dispose();
|
215
|
+
}
|
216
|
+
} // end of CustomMobileNet
|
217
|
+
|
218
|
+
/**
|
219
|
+
* Receives an image and normalizes it between -1 and 1.
|
220
|
+
* Returns a batched image (1 - element batch) of shape [1, w, h, c]
|
221
|
+
* @param rasterElement the element with pixels to convert to a Tensor
|
222
|
+
* @param grayscale optinal flag that changes the crop to [1, w, h, 1]
|
223
|
+
*/
|
224
|
+
function capture(rasterElement, grayscale) {
|
225
|
+
return tf.tidy(() => {
|
226
|
+
// console.log("Not a tensor....")
|
227
|
+
const pixels = tf.browser.fromPixels(rasterElement);
|
228
|
+
// crop the image so we're using the center square
|
229
|
+
const cropped = cropTensor(pixels);
|
230
|
+
// Expand the outer most dimension so we have a batch size of 1
|
231
|
+
const batchedImage = cropped.expandDims(0);
|
232
|
+
// Normalize the image between -1 and a1. The image comes in between 0-255
|
233
|
+
// so we divide by 127 and subtract 1.
|
234
|
+
return batchedImage.toFloat().div(tf.scalar(127)).sub(tf.scalar(1));
|
235
|
+
});
|
236
|
+
}
|
237
|
+
function cropTensor(img) {
|
238
|
+
const size = Math.min(img.shape[0], img.shape[1]);
|
239
|
+
const centerHeight = img.shape[0] / 2;
|
240
|
+
const beginHeight = centerHeight - (size / 2);
|
241
|
+
const centerWidth = img.shape[1] / 2;
|
242
|
+
const beginWidth = centerWidth - (size / 2);
|
243
|
+
return img.slice([beginHeight, beginWidth, 0], [size, size, 3]);
|
244
|
+
}
|
245
|
+
|
246
|
+
class Util {
|
247
|
+
/**
|
248
|
+
* Receives an image and normalizes it between -1 and 1.
|
249
|
+
* Returns a batched image (1 - element batch) of shape [1, w, h, c]
|
250
|
+
* @param rasterElement the element with pixels to convert to a Tensor
|
251
|
+
* @param grayscale optinal flag that changes the crop to [1, w, h, 1]
|
252
|
+
*/
|
253
|
+
capture(rasterElement, grayscale) {
|
254
|
+
return tf.tidy(() => {
|
255
|
+
const pixels = tf.browser.fromPixels(rasterElement);
|
256
|
+
// // crop the image so we're using the center square
|
257
|
+
const cropped = this.cropTensor(pixels, grayscale);
|
258
|
+
// // Expand the outer most dimension so we have a batch size of 1
|
259
|
+
const batchedImage = cropped.expandDims(0);
|
260
|
+
// // Normalize the image between -1 and a1. The image comes in between 0-255
|
261
|
+
// // so we divide by 127 and subtract 1.
|
262
|
+
return batchedImage.toFloat().div(tf.scalar(127)).sub(tf.scalar(1));
|
263
|
+
});
|
264
|
+
}
|
265
|
+
cropTensor(img, grayscaleModel, grayscaleInput) {
|
266
|
+
const size = Math.min(img.shape[0], img.shape[1]);
|
267
|
+
const centerHeight = img.shape[0] / 2;
|
268
|
+
const beginHeight = centerHeight - (size / 2);
|
269
|
+
const centerWidth = img.shape[1] / 2;
|
270
|
+
const beginWidth = centerWidth - (size / 2);
|
271
|
+
if (grayscaleModel && !grayscaleInput) {
|
272
|
+
//cropped rgb data
|
273
|
+
let grayscale_cropped = img.slice([beginHeight, beginWidth, 0], [size, size, 3]);
|
274
|
+
grayscale_cropped = grayscale_cropped.reshape([size * size, 1, 3]);
|
275
|
+
const rgb_weights = [0.2989, 0.5870, 0.1140];
|
276
|
+
grayscale_cropped = tf.mul(grayscale_cropped, rgb_weights);
|
277
|
+
grayscale_cropped = grayscale_cropped.reshape([size, size, 3]);
|
278
|
+
grayscale_cropped = tf.sum(grayscale_cropped, -1);
|
279
|
+
grayscale_cropped = tf.expandDims(grayscale_cropped, -1);
|
280
|
+
return grayscale_cropped;
|
281
|
+
}
|
282
|
+
return img.slice([beginHeight, beginWidth, 0], [size, size, 3]);
|
283
|
+
}
|
284
|
+
/**
|
285
|
+
* This function will make a copy of a model on the weight level
|
286
|
+
* This is an attempt to avoid influencing the new mode when the old one
|
287
|
+
* is eliminated.
|
288
|
+
*
|
289
|
+
* @param originalModel - the model to be copied
|
290
|
+
* @param recipient - the new model
|
291
|
+
*/
|
292
|
+
async copyModel_v3(originalModel, recipient) {
|
293
|
+
originalModel.layers.forEach((layer, index) => {
|
294
|
+
recipient.layers[index].setWeights(layer.getWeights());
|
295
|
+
});
|
296
|
+
// originalModel.dispose();
|
297
|
+
}
|
298
|
+
/**
|
299
|
+
* This function will make a copy of a TFJS model, as so it would be possible
|
300
|
+
* to erase the original.
|
301
|
+
* @param model - model to be copied
|
302
|
+
* @returns - copy of the model
|
303
|
+
*/
|
304
|
+
async copyModel_v2(originalModel) {
|
305
|
+
// Serialize the original model
|
306
|
+
const modelTopology = originalModel.toJSON();
|
307
|
+
// Load the serialized model into a new model
|
308
|
+
const copiedModel = await tf.loadLayersModel(tf.io.fromMemory(modelTopology, undefined, undefined));
|
309
|
+
// Compile the copied model with the same settings as the original
|
310
|
+
copiedModel.compile({
|
311
|
+
loss: originalModel.loss,
|
312
|
+
optimizer: originalModel.optimizer
|
313
|
+
});
|
314
|
+
return copiedModel;
|
315
|
+
}
|
316
|
+
/**
|
317
|
+
* This function will make a copy of a TFJS model, as so it would be possible
|
318
|
+
* to erase the original.
|
319
|
+
* @param model - model to be copied
|
320
|
+
* @returns - copy of the model
|
321
|
+
*/
|
322
|
+
copyModel(model) {
|
323
|
+
const copy = tf.sequential();
|
324
|
+
`
|
325
|
+
`;
|
326
|
+
model.layers.forEach(layer => {
|
327
|
+
const aux = layer;
|
328
|
+
// layer.dispose();
|
329
|
+
copy.add(aux);
|
330
|
+
});
|
331
|
+
copy.compile({
|
332
|
+
loss: model.loss,
|
333
|
+
optimizer: model.optimizer
|
334
|
+
});
|
335
|
+
return copy;
|
336
|
+
}
|
337
|
+
removeElementByIndex(arr, index) {
|
338
|
+
// Check if the index is within bounds
|
339
|
+
if (index >= 0 && index < arr.length) {
|
340
|
+
// Remove the element at the specified index
|
341
|
+
arr.splice(index, 1);
|
342
|
+
}
|
343
|
+
return arr;
|
344
|
+
}
|
345
|
+
removeElement(arr, element) {
|
346
|
+
// Remove all occurrences of the specified element from the array
|
347
|
+
return arr.filter((item) => item !== element);
|
348
|
+
}
|
349
|
+
clean_array_of_tensors(tensors) {
|
350
|
+
tensors.forEach((elem, index) => {
|
351
|
+
// if(!index_selection.includes(index))
|
352
|
+
elem.dispose();
|
353
|
+
});
|
354
|
+
}
|
355
|
+
getClassNameBySignature(classes, signature) {
|
356
|
+
const class_name = classes.find(p => {
|
357
|
+
let match = true;
|
358
|
+
p.signature?.forEach((elem, index) => {
|
359
|
+
if (elem !== signature[index])
|
360
|
+
match = false;
|
361
|
+
});
|
362
|
+
return match;
|
363
|
+
});
|
364
|
+
return class_name ? class_name.name : "not found";
|
365
|
+
}
|
366
|
+
identityMatrix(n) {
|
367
|
+
return Array.from({ length: n }, (_, i) => Array.from({ length: n }, (_, j) => (i === j ? 1 : 0)));
|
368
|
+
}
|
369
|
+
indexOfMax(arr) {
|
370
|
+
if (arr.length === 0) {
|
371
|
+
return -1; // Return -1 for an empty array
|
372
|
+
}
|
373
|
+
let max = arr[0];
|
374
|
+
let maxIndex = 0;
|
375
|
+
for (let i = 1; i < arr.length; i++) {
|
376
|
+
if (arr[i] > max) {
|
377
|
+
maxIndex = i;
|
378
|
+
max = arr[i];
|
379
|
+
}
|
380
|
+
}
|
381
|
+
return maxIndex;
|
382
|
+
}
|
383
|
+
suffle(array1, array2) {
|
384
|
+
// Shuffle the order of elements
|
385
|
+
for (let i = array1.length - 1; i > 0; i--) {
|
386
|
+
const j = Math.floor(Math.random() * (i + 1));
|
387
|
+
// Swap elements in both arrays
|
388
|
+
[array1[i], array1[j]] = [array1[j], array1[i]];
|
389
|
+
[array2[i], array2[j]] = [array2[j], array2[i]];
|
390
|
+
}
|
391
|
+
}
|
392
|
+
sortByValuePreservingIndex(arr1, arr2) {
|
393
|
+
// console.log("Vector for organizing: ", arr1)
|
394
|
+
// arr2[0].summary()
|
395
|
+
// Create an array of objects with value, index from arr1, and original index
|
396
|
+
const pairingArray = arr1.map((value, index) => ({
|
397
|
+
value,
|
398
|
+
index,
|
399
|
+
originalIndex: index,
|
400
|
+
elementFromArr2: arr2[index], // Preserve the corresponding element from arr2
|
401
|
+
}));
|
402
|
+
// Sort the pairing array by value (largest to smallest)
|
403
|
+
pairingArray.sort((a, b) => b.value - a.value);
|
404
|
+
// Extract the sorted elements from arr2 based on the original index
|
405
|
+
const sortedElementsFromArr2 = pairingArray.map(pair => pair.elementFromArr2);
|
406
|
+
return sortedElementsFromArr2;
|
407
|
+
}
|
408
|
+
}
|
409
|
+
|
410
|
+
const VALIDATION_FRACTION = 0.15;
|
411
|
+
/**
|
412
|
+
* Receives a Metadata object and fills in the optional fields such as timeStamp
|
413
|
+
* @param data a Metadata object
|
414
|
+
*/
|
415
|
+
const fillMetadata = (data) => {
|
416
|
+
// util.assert(typeof data.tfjsVersion === 'string', () => `metadata.tfjsVersion is invalid`);
|
417
|
+
// data.packageVersion = data.packageVersion || version;
|
418
|
+
data.packageName = data.packageName || '@teachablemachine/image';
|
419
|
+
data.timeStamp = data.timeStamp || new Date().toISOString();
|
420
|
+
data.userMetadata = data.userMetadata || {};
|
421
|
+
data.modelName = data.modelName || 'untitled';
|
422
|
+
data.labels = data.labels || [];
|
423
|
+
// data.imageSize = data.imageSize || IMAGE_SIZE;
|
424
|
+
return data;
|
425
|
+
};
|
426
|
+
class TeachableMobileNet extends CustomMobileNet {
|
427
|
+
// Array of all the examples collected.
|
428
|
+
/**
|
429
|
+
It is static since all the instance will share the same features, for saving memory and time.
|
430
|
+
The idea is avoiding restoring the features individually and having the recalculate them for every new
|
431
|
+
individuals.
|
432
|
+
*/
|
433
|
+
static { this.examples = []; }
|
434
|
+
// Number of total samples
|
435
|
+
static { this.totalSamples = 0; }
|
436
|
+
static { this.classes_names = []; }
|
437
|
+
constructor() {
|
438
|
+
super();
|
439
|
+
this.classes = [];
|
440
|
+
this.createHead();
|
441
|
+
}
|
442
|
+
/**
|
443
|
+
* This method will return the head, the trainable part, the part under evolution.
|
444
|
+
*/
|
445
|
+
getHead() {
|
446
|
+
return this.trainingModel;
|
447
|
+
}
|
448
|
+
/**
|
449
|
+
* Create the head for transfer learning.
|
450
|
+
* This is the trainable section of the transfer learning.
|
451
|
+
*/
|
452
|
+
createHead() {
|
453
|
+
const inputSize = TeachableMobileNet.getinputShape();
|
454
|
+
this.trainingModel = tf.sequential({
|
455
|
+
layers: [
|
456
|
+
tf.layers.dense({
|
457
|
+
inputShape: [inputSize],
|
458
|
+
units: 100,
|
459
|
+
activation: 'relu',
|
460
|
+
useBias: true
|
461
|
+
}),
|
462
|
+
tf.layers.dense({
|
463
|
+
useBias: false,
|
464
|
+
activation: 'softmax',
|
465
|
+
units: TeachableMobileNet.classes_names.length
|
466
|
+
})
|
467
|
+
]
|
468
|
+
});
|
469
|
+
const optimizer = tf.train.adam();
|
470
|
+
// const optimizer = tf.train.rmsprop(params.learningRate);
|
471
|
+
this.trainingModel.compile({
|
472
|
+
optimizer,
|
473
|
+
// loss: 'binaryCrossentropy',
|
474
|
+
loss: 'categoricalCrossentropy',
|
475
|
+
metrics: ['accuracy']
|
476
|
+
});
|
477
|
+
}
|
478
|
+
async train() {
|
479
|
+
const trainingSurface = { name: 'Loss and MSE', tab: 'Training' };
|
480
|
+
const dataset = TeachableMobileNet.convertToTfDataset();
|
481
|
+
//Salving a copy of the validation dataset, for later
|
482
|
+
TeachableMobileNet.validationDataset = dataset.validationDataset;
|
483
|
+
// console.log("Dataset for training: ", dataset.trainDataset);
|
484
|
+
const trainData = dataset.trainDataset.batch(30);
|
485
|
+
const validationData = dataset.validationDataset.batch(10);
|
486
|
+
// this.createHead();
|
487
|
+
const callbacks = [
|
488
|
+
// Show on a tfjs-vis visor the loss and accuracy values at the end of each epoch.
|
489
|
+
tfvis.show.fitCallbacks(trainingSurface, ['loss', 'acc', "val_loss", "val_acc"], {
|
490
|
+
callbacks: ['onEpochEnd'],
|
491
|
+
}),
|
492
|
+
{},
|
493
|
+
];
|
494
|
+
const history = await this.trainingModel.fitDataset(trainData, {
|
495
|
+
epochs: 100,
|
496
|
+
validationData,
|
497
|
+
callbacks
|
498
|
+
}).then((info) => {
|
499
|
+
console.log('Precisão final', info.history.val_acc[info.history.acc.length - 1]);
|
500
|
+
});
|
501
|
+
// await this.accuracy_per_class();
|
502
|
+
// console.log("History: ", history.history.acc);
|
503
|
+
// await this.trainingModel.fit(this.featureX, this.target, {})
|
504
|
+
}
|
505
|
+
async accuracy_per_class(confusion_matrix_recipient) {
|
506
|
+
/**Calculating Accuracy per class */
|
507
|
+
const accuracyperclass = await this.calculateAccuracyPerClass(TeachableMobileNet.validationDataset);
|
508
|
+
// console.log("Accuracy per class: ", accuracyperclass);
|
509
|
+
//Confusion matrix
|
510
|
+
// Calling tf.confusionMatrix() method
|
511
|
+
const output = tf.math.confusionMatrix(accuracyperclass.reference, accuracyperclass.predictions, TeachableMobileNet.classes_names.length);
|
512
|
+
// Printing output
|
513
|
+
output.print();
|
514
|
+
const confusion_matrix = output.dataSync();
|
515
|
+
// console.log(confusion_matrix);
|
516
|
+
// console.log(confusion_matrix[TeachableMobileNet.classes_names.length + TeachableMobileNet.classes_names.length]);
|
517
|
+
const accuracy = [];
|
518
|
+
for (let i = 0; i < TeachableMobileNet.classes_names.length; i++) {
|
519
|
+
accuracy.push(confusion_matrix[TeachableMobileNet.classes_names.length * i + i] / TeachableMobileNet.numValidation);
|
520
|
+
}
|
521
|
+
console.log("Accuracy per class: ", accuracy);
|
522
|
+
for (let i = 0; i < TeachableMobileNet.classes_names.length; i++) {
|
523
|
+
confusion_matrix_recipient.push([]);
|
524
|
+
for (let j = 0; j < TeachableMobileNet.classes_names.length; j++) {
|
525
|
+
confusion_matrix_recipient[i].push([]);
|
526
|
+
confusion_matrix_recipient[i][j] = confusion_matrix[TeachableMobileNet.classes_names.length * i + j] / TeachableMobileNet.numValidation;
|
527
|
+
confusion_matrix_recipient[i][j] = (confusion_matrix_recipient[i][j].toFixed(2)) * 100;
|
528
|
+
}
|
529
|
+
// accuracy.push(confusion_matrix[TeachableMobileNet.classes_names.length*i+ i]/TeachableMobileNet.numValidation)
|
530
|
+
}
|
531
|
+
console.log("Confusion matrix as a matrix");
|
532
|
+
console.log(confusion_matrix_recipient);
|
533
|
+
return accuracy.map((elem) => elem.toFixed(2) * 100);
|
534
|
+
}
|
535
|
+
async loadImages(number_of_species, classes_names, options) {
|
536
|
+
TeachableMobileNet.classes_names = classes_names;
|
537
|
+
await this.add_species(number_of_species, options);
|
538
|
+
}
|
539
|
+
async add_species(number_of_species, options) {
|
540
|
+
//Loading feature model, used to create features from images
|
541
|
+
// await this.loadFeatureModel();
|
542
|
+
for (let i = 0; i < TeachableMobileNet.classes_names.length; i++) {
|
543
|
+
// this.add_images(this.classes_names[i], number_of_species, options);
|
544
|
+
}
|
545
|
+
}
|
546
|
+
/**
|
547
|
+
*
|
548
|
+
* @param name - name of the class receiving an example
|
549
|
+
* @param number_of_species - how many images to add
|
550
|
+
* @param options - details on the location of the images
|
551
|
+
*/
|
552
|
+
async add_images(name, number_of_species, options) {
|
553
|
+
const class_add = [];
|
554
|
+
for (let i = 0; i < number_of_species; i++) {
|
555
|
+
// class_add.push(`${options.base}/${name}/${options.file_name} ${i}.${options.file_extension}`);
|
556
|
+
//Uploading images
|
557
|
+
const cake = new Image();
|
558
|
+
// cake.src = `${options.base}/${name}/${options.file_name} ${i}.${options.file_extension}`;
|
559
|
+
cake.height = 224;
|
560
|
+
cake.width = 224;
|
561
|
+
cake.src = "./assets/dataset/Can%C3%A1rio-da-Terra/image%200.jpeg";
|
562
|
+
// console.log("Image location: ", cake.src )
|
563
|
+
await new Promise((resolve, reject) => {
|
564
|
+
cake.onload = () => {
|
565
|
+
//Finding the correspondent index of the class with name given
|
566
|
+
const index = TeachableMobileNet.classes_names.findIndex((elem) => elem === name);
|
567
|
+
// this.addExample(index, cake);
|
568
|
+
resolve();
|
569
|
+
};
|
570
|
+
cake.onerror = (error) => {
|
571
|
+
// Handle error if the image fails to load
|
572
|
+
reject(error);
|
573
|
+
};
|
574
|
+
});
|
575
|
+
}
|
576
|
+
// this.classes.push({name: name, images: class_add})
|
577
|
+
}
|
578
|
+
/**
|
579
|
+
* This method will transform images into tensors
|
580
|
+
* @param number_of_classes - number of classes
|
581
|
+
* @param classes_names - name of each class
|
582
|
+
*/
|
583
|
+
async createTensors(number_of_classes, classes_names) {
|
584
|
+
let output = [];
|
585
|
+
/** There is a function on TensorFlow.js that also does that */
|
586
|
+
const signatures = new Util().identityMatrix(number_of_classes);
|
587
|
+
for (let i = 0; i < number_of_classes; i++) {
|
588
|
+
this.classes[i].signature = signatures[i];
|
589
|
+
this.classes[i].name = classes_names[i];
|
590
|
+
for (let j = 0; j < this.classes[i].images.length; j++) {
|
591
|
+
}
|
592
|
+
}
|
593
|
+
}
|
594
|
+
/**
|
595
|
+
* Add a sample of data under the provided className
|
596
|
+
* @param className the classification this example belongs to
|
597
|
+
* @param sample the image / tensor that belongs in this classification
|
598
|
+
*/
|
599
|
+
// public async addExample(className: number, sample: HTMLCanvasElement | tf.Tensor) {
|
600
|
+
static async addExample(className, name, sample) {
|
601
|
+
// console.log("Adding a new example...")
|
602
|
+
const cap = isTensor(sample) ? sample : capture(sample);
|
603
|
+
//Getting the features
|
604
|
+
const example = this.truncatedModel.predict(cap);
|
605
|
+
// console.log("Shape after feature extraction: ", example.shape)
|
606
|
+
const activation = example.dataSync();
|
607
|
+
//Very important to clean the memory aftermath, it makes the difference
|
608
|
+
cap.dispose();
|
609
|
+
example.dispose();
|
610
|
+
// //Accessing the instance variable, not the local ones
|
611
|
+
// // save samples of each class separately
|
612
|
+
if (!TeachableMobileNet.examples[className])
|
613
|
+
//and an empty array, make sure there is not empty elements.
|
614
|
+
//it will create issue when transforming to tensors
|
615
|
+
TeachableMobileNet.examples[className] = [];
|
616
|
+
if (!TeachableMobileNet.classes_names[className])
|
617
|
+
//Saving the lable when it first appears
|
618
|
+
TeachableMobileNet.classes_names[className] = name;
|
619
|
+
TeachableMobileNet.examples[className].push(activation);
|
620
|
+
// // increase our sample counter
|
621
|
+
TeachableMobileNet.totalSamples++;
|
622
|
+
}
|
623
|
+
/**
|
624
|
+
* process the current examples provided to calculate labels and format
|
625
|
+
* into proper tf.data.Dataset
|
626
|
+
*/
|
627
|
+
static prepare() {
|
628
|
+
for (const classes in TeachableMobileNet.examples) {
|
629
|
+
if (classes.length === 0) {
|
630
|
+
throw new Error('Add some examples before training');
|
631
|
+
}
|
632
|
+
}
|
633
|
+
const datasets = this.convertToTfDataset();
|
634
|
+
this.trainDataset = datasets.trainDataset;
|
635
|
+
this.validationDataset = datasets.validationDataset;
|
636
|
+
}
|
637
|
+
prepareDataset() {
|
638
|
+
for (let i = 0; i < TeachableMobileNet.numClasses; i++) {
|
639
|
+
//Different from the original implementation of TM, mine is using example as static.
|
640
|
+
//The goal is saving memory by using a single instance of the variable
|
641
|
+
TeachableMobileNet.examples[i] = [];
|
642
|
+
}
|
643
|
+
}
|
644
|
+
/**
|
645
|
+
* Process the examples by first shuffling randomly per class, then adding
|
646
|
+
* one-hot labels, then splitting into training/validation datsets, and finally
|
647
|
+
* sorting one last time
|
648
|
+
*/
|
649
|
+
static convertToTfDataset() {
|
650
|
+
// first shuffle each class individually
|
651
|
+
// TODO: we could basically replicate this by insterting randomly
|
652
|
+
for (let i = 0; i < TeachableMobileNet.examples.length; i++) {
|
653
|
+
TeachableMobileNet.examples[i] = fisherYates(TeachableMobileNet.examples[i], this.seed);
|
654
|
+
}
|
655
|
+
// then break into validation and test datasets
|
656
|
+
let trainDataset = [];
|
657
|
+
let validationDataset = [];
|
658
|
+
// for each class, add samples to train and validation dataset
|
659
|
+
for (let i = 0; i < TeachableMobileNet.examples.length; i++) {
|
660
|
+
// console.log("Number of classes: ", TeachableMobileNet.classes_names.length);
|
661
|
+
const y = flatOneHot(i, TeachableMobileNet.classes_names.length);
|
662
|
+
const classLength = TeachableMobileNet.examples[i].length;
|
663
|
+
// console.log("Number of elements per class: ", classLength);
|
664
|
+
const numValidation = Math.ceil(VALIDATION_FRACTION * classLength);
|
665
|
+
const numTrain = classLength - numValidation;
|
666
|
+
this.numValidation = numValidation;
|
667
|
+
/**It is visiting per class, thus, it is possible to fix y, the target label */
|
668
|
+
const classTrain = this.examples[i].slice(0, numTrain).map((dataArray) => {
|
669
|
+
return { data: dataArray, label: y };
|
670
|
+
});
|
671
|
+
const classValidation = this.examples[i].slice(numTrain).map((dataArray) => {
|
672
|
+
return { data: dataArray, label: y };
|
673
|
+
});
|
674
|
+
trainDataset = trainDataset.concat(classTrain);
|
675
|
+
validationDataset = validationDataset.concat(classValidation);
|
676
|
+
}
|
677
|
+
// console.log("Training element: ", trainDataset[trainDataset.length-1])
|
678
|
+
// console.log("Training length: ", trainDataset.length)
|
679
|
+
// console.log("validation length: ", validationDataset.length);
|
680
|
+
// finally shuffle both train and validation datasets
|
681
|
+
trainDataset = fisherYates(trainDataset, this.seed);
|
682
|
+
validationDataset = fisherYates(validationDataset, this.seed);
|
683
|
+
const trainX = tf.data.array(trainDataset.map(sample => sample.data));
|
684
|
+
const validationX = tf.data.array(validationDataset.map(sample => sample.data));
|
685
|
+
const trainY = tf.data.array(trainDataset.map(sample => sample.label));
|
686
|
+
const validationY = tf.data.array(validationDataset.map(sample => sample.label));
|
687
|
+
// return tf.data dataset objects
|
688
|
+
return {
|
689
|
+
trainDataset: tf.data.zip({ xs: trainX, ys: trainY }),
|
690
|
+
validationDataset: tf.data.zip({ xs: validationX, ys: validationY })
|
691
|
+
};
|
692
|
+
}
|
693
|
+
datasetForEvaluation() {
|
694
|
+
}
|
695
|
+
async evaluate() {
|
696
|
+
if (!TeachableMobileNet.feature_aux) {
|
697
|
+
const features = [];
|
698
|
+
const targets = [];
|
699
|
+
for (let i = 0; i < TeachableMobileNet.examples.length; i++) {
|
700
|
+
const y = flatOneHot(i, TeachableMobileNet.classes_names.length);
|
701
|
+
//For class i, push all the examples.
|
702
|
+
TeachableMobileNet.examples[i].forEach((elemn) => {
|
703
|
+
//Pushing the target signature
|
704
|
+
targets.push(y);
|
705
|
+
//Pushing features
|
706
|
+
features.push(elemn);
|
707
|
+
});
|
708
|
+
}
|
709
|
+
TeachableMobileNet.feature_aux = tf.tensor(features);
|
710
|
+
TeachableMobileNet.target_aux = tf.tensor(targets);
|
711
|
+
}
|
712
|
+
const aux = this.trainingModel.evaluate(TeachableMobileNet.feature_aux, TeachableMobileNet.target_aux);
|
713
|
+
return aux[1].dataSync()[0];
|
714
|
+
}
|
715
|
+
// async evaluate(){
|
716
|
+
// const features: any=[];
|
717
|
+
// const targets: any=[];
|
718
|
+
// for (let i = 0; i < TeachableMobileNet.examples.length; i++) {
|
719
|
+
// const y = flatOneHot(i, TeachableMobileNet.classes_names.length);
|
720
|
+
// //For class i, push all the examples.
|
721
|
+
// TeachableMobileNet.examples[i].forEach((elemn)=>{
|
722
|
+
// //Pushing the target signature
|
723
|
+
// targets.push(y);
|
724
|
+
// //Pushing features
|
725
|
+
// features.push(elemn)
|
726
|
+
// })
|
727
|
+
// }
|
728
|
+
// const aux_features= tf.tensor(features);
|
729
|
+
// const aux_target= tf.tensor(targets);
|
730
|
+
// // console.log("Tensor stack for evaluation: ", aux_features.shape)
|
731
|
+
// const aux: any = this.trainingModel.evaluate(aux_features, aux_target);
|
732
|
+
// return aux[1].dataSync()[0];
|
733
|
+
// }
|
734
|
+
/*** Final statistics */
|
735
|
+
/*
|
736
|
+
* Calculate each class accuracy using the validation dataset
|
737
|
+
*/
|
738
|
+
async calculateAccuracyPerClass(validationData) {
|
739
|
+
const validationXs = TeachableMobileNet.validationDataset.mapAsync(async (dataset) => {
|
740
|
+
return dataset.xs;
|
741
|
+
});
|
742
|
+
const validationYs = TeachableMobileNet.validationDataset.mapAsync(async (dataset) => {
|
743
|
+
return dataset.ys;
|
744
|
+
});
|
745
|
+
// console.log("validation dataset: ", validationXs);
|
746
|
+
// console.log("For calculating batch size: ", validationYs);
|
747
|
+
// we need to split our validation data into batches in case it is too large to fit in memory
|
748
|
+
const batchSize = Math.min(validationYs.size, 32);
|
749
|
+
// const batchSize =1;
|
750
|
+
const iterations = Math.ceil(validationYs.size / batchSize);
|
751
|
+
// console.log("Batch size: ", batchSize);
|
752
|
+
const batchesX = validationXs.batch(batchSize);
|
753
|
+
const batchesY = validationYs.batch(batchSize);
|
754
|
+
const itX = await batchesX.iterator();
|
755
|
+
const itY = await batchesY.iterator();
|
756
|
+
const allX = [];
|
757
|
+
const allY = [];
|
758
|
+
for (let i = 0; i < iterations; i++) {
|
759
|
+
// 1. get the prediction values in batches
|
760
|
+
const batchedXTensor = await itX.next();
|
761
|
+
// console.log("Batch size on accuracy per class: ", batchedXTensor.value.shape);
|
762
|
+
const batchedXPredictionTensor = this.trainingModel.predict(batchedXTensor.value);
|
763
|
+
const argMaxX = batchedXPredictionTensor.argMax(1); // Returns the indices of the max values along an axis
|
764
|
+
allX.push(argMaxX);
|
765
|
+
// 2. get the ground truth label values in batches
|
766
|
+
const batchedYTensor = await itY.next();
|
767
|
+
const argMaxY = batchedYTensor.value.argMax(1); // Returns the indices of the max values along an axis
|
768
|
+
allY.push(argMaxY);
|
769
|
+
// 3. dispose of all our tensors
|
770
|
+
batchedXTensor.value.dispose();
|
771
|
+
batchedXPredictionTensor.dispose();
|
772
|
+
batchedYTensor.value.dispose();
|
773
|
+
}
|
774
|
+
// concatenate all the results of the batches
|
775
|
+
const reference = tf.concat(allY); // this is the ground truth
|
776
|
+
const predictions = tf.concat(allX); // this is the prediction our model is guessing
|
777
|
+
// console.log("this is the ground truth: ", reference.dataSync())
|
778
|
+
// console.log("This is the prediction our model is guessing: ", predictions.dataSync())
|
779
|
+
// only if we concatenated more than one tensor for preference and reference
|
780
|
+
if (iterations !== 1) {
|
781
|
+
for (let i = 0; i < allX.length; i++) {
|
782
|
+
allX[i].dispose();
|
783
|
+
allY[i].dispose();
|
784
|
+
}
|
785
|
+
}
|
786
|
+
// console.log("Lengtth: ", await reference.dataSync().length)
|
787
|
+
// const accuracyperclass=[];
|
788
|
+
// const reference_aux= await reference.dataSync();
|
789
|
+
// const prediction_aux= await predictions.dataSync();
|
790
|
+
// console.log( predictions.dataSync());
|
791
|
+
// reference_aux.forEach((element, index) => {
|
792
|
+
// if()
|
793
|
+
// });
|
794
|
+
return { reference, predictions };
|
795
|
+
}
|
796
|
+
} //end of class
|
797
|
+
/***Support methods (helpers) */
|
798
|
+
const isTensor = (c) => typeof c.dataId === 'object' && typeof c.shape === 'object';
|
799
|
+
/**
|
800
|
+
* Converts an integer into its one-hot representation and returns
|
801
|
+
* the data as a JS Array.
|
802
|
+
*/
|
803
|
+
function flatOneHot(label, numClasses) {
|
804
|
+
const labelOneHot = new Array(numClasses).fill(0);
|
805
|
+
labelOneHot[label] = 1;
|
806
|
+
return labelOneHot;
|
807
|
+
}
|
808
|
+
/**
|
809
|
+
* Shuffle an array of Float32Array or Samples using Fisher-Yates algorithm
|
810
|
+
* Takes an optional seed value to make shuffling predictable
|
811
|
+
*/
|
812
|
+
function fisherYates(array, seed) {
|
813
|
+
const length = array.length;
|
814
|
+
// need to clone array or we'd be editing original as we goo
|
815
|
+
const shuffled = array.slice();
|
816
|
+
for (let i = (length - 1); i > 0; i -= 1) {
|
817
|
+
let randomIndex;
|
818
|
+
if (seed) {
|
819
|
+
randomIndex = Math.floor(seed() * (i + 1));
|
820
|
+
}
|
821
|
+
else {
|
822
|
+
randomIndex = Math.floor(Math.random() * (i + 1));
|
823
|
+
}
|
824
|
+
[shuffled[i], shuffled[randomIndex]] = [shuffled[randomIndex], shuffled[i]];
|
825
|
+
}
|
826
|
+
return shuffled;
|
827
|
+
}
|
828
|
+
|
35
829
|
class DisplayPanelComponent {
|
36
830
|
constructor() {
|
37
831
|
this.classes = [];
|
38
832
|
}
|
39
833
|
loadImages(number_of_species, classes_names, options) {
|
40
834
|
this.classes_names = classes_names;
|
835
|
+
this.number_of_samples_per_class = number_of_species;
|
41
836
|
this.add_species(number_of_species, options);
|
42
837
|
}
|
43
838
|
add_species(number_of_species, options) {
|
@@ -47,17 +842,42 @@ class DisplayPanelComponent {
|
|
47
842
|
}
|
48
843
|
add_images(name, number_of_species, options) {
|
49
844
|
const class_add = [];
|
50
|
-
for (let i =
|
51
|
-
class_add.push(`${options.base}/${name}/${options.file_name} ${i}.${options.file_extension}`);
|
845
|
+
for (let i = 0; i < number_of_species; i++) {
|
846
|
+
class_add.push(`${options.base}/${name}/${options.file_name} ${i + 1}.${options.file_extension}`);
|
52
847
|
}
|
53
848
|
this.classes.push({ name: name, images: class_add });
|
54
849
|
}
|
850
|
+
async addexamples() {
|
851
|
+
//This is needed to make sure it gives time for the images to upload
|
852
|
+
//The images upload very fast, what makes this method execute before the images are on HTML
|
853
|
+
//It can be removed if somehow this method is just called after the images are available.
|
854
|
+
// console.log("Loading examples as tensors....")
|
855
|
+
await this.delay(0);
|
856
|
+
for (let i = 0; i < this.classes_names.length; i++) {
|
857
|
+
await this.add_example(this.classes_names[i], this.number_of_samples_per_class);
|
858
|
+
}
|
859
|
+
}
|
860
|
+
async add_example(name, number_of_species) {
|
861
|
+
const class_add = [];
|
862
|
+
// console.log(name)
|
863
|
+
for (let i = 0; i < number_of_species; i++) {
|
864
|
+
//Collecting the images from HTML
|
865
|
+
const aux = document.getElementById(`class-${name}-${i}`);
|
866
|
+
//Adding the example
|
867
|
+
const index = this.classes_names.findIndex((elem) => elem === name);
|
868
|
+
await TeachableMobileNet.addExample(index, name, aux);
|
869
|
+
}
|
870
|
+
// this.classes.push({name: name, images: class_add})
|
871
|
+
}
|
872
|
+
delay(ms) {
|
873
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
874
|
+
}
|
55
875
|
static { this.ɵfac = i0.ɵɵngDeclareFactory({ minVersion: "12.0.0", version: "17.3.4", ngImport: i0, type: DisplayPanelComponent, deps: [], target: i0.ɵɵFactoryTarget.Component }); }
|
56
|
-
static { this.ɵcmp = i0.ɵɵngDeclareComponent({ minVersion: "14.0.0", version: "17.3.4", type: DisplayPanelComponent, isStandalone: true, selector: "neuroevolution-display-panel", ngImport: i0, template: "<div *ngFor=\"let class of classes; index as i\">\r\n <h1>{{class.name}}</h1>\r\n <img *ngFor=\"let item of class.images; index as i\" [src]=\"item\" width=\"
|
876
|
+
static { this.ɵcmp = i0.ɵɵngDeclareComponent({ minVersion: "14.0.0", version: "17.3.4", type: DisplayPanelComponent, isStandalone: true, selector: "neuroevolution-display-panel", ngImport: i0, template: "<div *ngFor=\"let class of classes; index as i\">\r\n <h1>{{class.name}}</h1>\r\n <img *ngFor=\"let item of class.images; index as i\" [src]=\"item\" width=\"224\" height=\"224\" [id]=\"'class-' + class.name + '-' + i\" crossorigin=\"anonymous\" >\r\n</div>\r\n", styles: [""], dependencies: [{ kind: "ngmodule", type: CommonModule }, { kind: "directive", type: i1.NgForOf, selector: "[ngFor][ngForOf]", inputs: ["ngForOf", "ngForTrackBy", "ngForTemplate"] }] }); }
|
57
877
|
}
|
58
878
|
i0.ɵɵngDeclareClassMetadata({ minVersion: "12.0.0", version: "17.3.4", ngImport: i0, type: DisplayPanelComponent, decorators: [{
|
59
879
|
type: Component,
|
60
|
-
args: [{ selector: 'neuroevolution-display-panel', standalone: true, imports: [CommonModule], template: "<div *ngFor=\"let class of classes; index as i\">\r\n <h1>{{class.name}}</h1>\r\n <img *ngFor=\"let item of class.images; index as i\" [src]=\"item\" width=\"
|
880
|
+
args: [{ selector: 'neuroevolution-display-panel', standalone: true, imports: [CommonModule], template: "<div *ngFor=\"let class of classes; index as i\">\r\n <h1>{{class.name}}</h1>\r\n <img *ngFor=\"let item of class.images; index as i\" [src]=\"item\" width=\"224\" height=\"224\" [id]=\"'class-' + class.name + '-' + i\" crossorigin=\"anonymous\" >\r\n</div>\r\n" }]
|
61
881
|
}] });
|
62
882
|
|
63
883
|
/*
|