learning_model 1.0.51 → 1.0.53
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/Makefile +7 -1
- package/dist/learning/mobilenet.d.ts +0 -1
- package/dist/learning/mobilenet.js +3 -3
- package/dist/lib/index.js +10 -0
- package/dist/lib/learning/base.js +2 -0
- package/dist/lib/learning/data_model.js +215 -0
- package/dist/lib/learning/data_model.test.js +56 -0
- package/dist/lib/learning/mobilenet.d.ts +0 -1
- package/dist/lib/learning/mobilenet.js +386 -0
- package/dist/lib/learning/mobilenet.test.js +89 -0
- package/dist/lib/utils/canvas.js +46 -0
- package/dist/lib/utils/data_manager.d.ts +2 -3
- package/dist/lib/utils/data_manager.js +72 -0
- package/dist/lib/utils/dataset.d.ts +1 -1
- package/dist/lib/utils/dataset.js +20 -0
- package/dist/lib/utils/tf.d.ts +1 -1
- package/dist/lib/utils/tf.js +135 -0
- package/lib/learning/mobilenet.ts +3 -3
- package/package.json +2 -3
- package/tsconfig.json +1 -0
- package/dist/index.bundle.js +0 -2
- package/dist/index.bundle.js.LICENSE.txt +0 -352
- package/dist/index.html +0 -1
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
|
15
|
+
}) : function(o, v) {
|
|
16
|
+
o["default"] = v;
|
|
17
|
+
});
|
|
18
|
+
var __importStar = (this && this.__importStar) || (function () {
|
|
19
|
+
var ownKeys = function(o) {
|
|
20
|
+
ownKeys = Object.getOwnPropertyNames || function (o) {
|
|
21
|
+
var ar = [];
|
|
22
|
+
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
|
|
23
|
+
return ar;
|
|
24
|
+
};
|
|
25
|
+
return ownKeys(o);
|
|
26
|
+
};
|
|
27
|
+
return function (mod) {
|
|
28
|
+
if (mod && mod.__esModule) return mod;
|
|
29
|
+
var result = {};
|
|
30
|
+
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
|
|
31
|
+
__setModuleDefault(result, mod);
|
|
32
|
+
return result;
|
|
33
|
+
};
|
|
34
|
+
})();
|
|
35
|
+
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
|
|
36
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
37
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
38
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
39
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
40
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
41
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
42
|
+
});
|
|
43
|
+
};
|
|
44
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
45
|
+
exports.isTensor = isTensor;
|
|
46
|
+
exports.loadModel = loadModel;
|
|
47
|
+
exports.mobileNetURL = mobileNetURL;
|
|
48
|
+
exports.imageToTensor = imageToTensor;
|
|
49
|
+
exports.capture = capture;
|
|
50
|
+
exports.cropTensor = cropTensor;
|
|
51
|
+
const tf = __importStar(require("@tensorflow/tfjs"));
|
|
52
|
+
function isTensor(c) {
|
|
53
|
+
return typeof c.dataId === 'object' && typeof c.shape === 'object';
|
|
54
|
+
}
|
|
55
|
+
function loadModel() {
|
|
56
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
57
|
+
const trainLayerV1 = 'conv_pw_13_relu';
|
|
58
|
+
const trainLayerV2 = 'out_relu';
|
|
59
|
+
var mobileNetVersion = 2;
|
|
60
|
+
const modelURL = mobileNetURL(mobileNetVersion);
|
|
61
|
+
const load_model = yield tf.loadLayersModel(modelURL);
|
|
62
|
+
if (mobileNetVersion == 1) {
|
|
63
|
+
const layer = load_model.getLayer(trainLayerV1);
|
|
64
|
+
const truncatedModel = tf.model({
|
|
65
|
+
inputs: load_model.inputs,
|
|
66
|
+
outputs: layer.output
|
|
67
|
+
});
|
|
68
|
+
const model = tf.sequential();
|
|
69
|
+
model.add(truncatedModel);
|
|
70
|
+
model.add(tf.layers.flatten());
|
|
71
|
+
return model;
|
|
72
|
+
}
|
|
73
|
+
else {
|
|
74
|
+
const layer = load_model.getLayer(trainLayerV2);
|
|
75
|
+
const truncatedModel = tf.model({
|
|
76
|
+
inputs: load_model.inputs,
|
|
77
|
+
outputs: layer.output
|
|
78
|
+
});
|
|
79
|
+
const model = tf.sequential();
|
|
80
|
+
model.add(truncatedModel);
|
|
81
|
+
model.add(tf.layers.globalAveragePooling2d({})); // go from shape [7, 7, 1280] to [1280]
|
|
82
|
+
return model;
|
|
83
|
+
}
|
|
84
|
+
});
|
|
85
|
+
}
|
|
86
|
+
function mobileNetURL(version) {
|
|
87
|
+
if (version == 1) {
|
|
88
|
+
return "https://storage.googleapis.com/tfjs-models/tfjs/mobilenet_v1_1.0_224/model.json";
|
|
89
|
+
}
|
|
90
|
+
return "https://storage.googleapis.com/teachable-machine-models/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224_no_top/model.json";
|
|
91
|
+
}
|
|
92
|
+
function imageToTensor(data) {
|
|
93
|
+
let tensor;
|
|
94
|
+
if (data instanceof tf.Tensor) {
|
|
95
|
+
tensor = data;
|
|
96
|
+
}
|
|
97
|
+
else {
|
|
98
|
+
// MobileNet 모델 로드
|
|
99
|
+
tensor = tf.browser.fromPixels(data);
|
|
100
|
+
}
|
|
101
|
+
return tensor;
|
|
102
|
+
}
|
|
103
|
+
function capture(rasterElement, grayscale) {
|
|
104
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
105
|
+
return tf.tidy(() => {
|
|
106
|
+
const pixels = tf.browser.fromPixels(rasterElement);
|
|
107
|
+
// crop the image so we're using the center square
|
|
108
|
+
const cropped = cropTensor(pixels, grayscale);
|
|
109
|
+
// Expand the outer most dimension so we have a batch size of 1
|
|
110
|
+
const batchedImage = cropped.expandDims(0);
|
|
111
|
+
// Normalize the image between -1 and a1. The image comes in between 0-255
|
|
112
|
+
// so we divide by 127 and subtract 1.
|
|
113
|
+
return batchedImage.toFloat().div(tf.scalar(127)).sub(tf.scalar(1));
|
|
114
|
+
});
|
|
115
|
+
});
|
|
116
|
+
}
|
|
117
|
+
function cropTensor(img, grayscaleModel, grayscaleInput) {
|
|
118
|
+
const size = Math.min(img.shape[0], img.shape[1]);
|
|
119
|
+
const centerHeight = img.shape[0] / 2;
|
|
120
|
+
const beginHeight = centerHeight - (size / 2);
|
|
121
|
+
const centerWidth = img.shape[1] / 2;
|
|
122
|
+
const beginWidth = centerWidth - (size / 2);
|
|
123
|
+
if (grayscaleModel && !grayscaleInput) {
|
|
124
|
+
//cropped rgb data
|
|
125
|
+
let grayscale_cropped = img.slice([beginHeight, beginWidth, 0], [size, size, 3]);
|
|
126
|
+
grayscale_cropped = grayscale_cropped.reshape([size * size, 1, 3]);
|
|
127
|
+
const rgb_weights = [0.2989, 0.5870, 0.1140];
|
|
128
|
+
grayscale_cropped = tf.mul(grayscale_cropped, rgb_weights);
|
|
129
|
+
grayscale_cropped = grayscale_cropped.reshape([size, size, 3]);
|
|
130
|
+
grayscale_cropped = tf.sum(grayscale_cropped, -1);
|
|
131
|
+
grayscale_cropped = tf.expandDims(grayscale_cropped, -1);
|
|
132
|
+
return grayscale_cropped;
|
|
133
|
+
}
|
|
134
|
+
return img.slice([beginHeight, beginWidth, 0], [size, size, 3]);
|
|
135
|
+
}
|
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
///////////////////////////////////////////////////////////////////////////
|
|
6
6
|
|
|
7
7
|
import * as tf from '@tensorflow/tfjs';
|
|
8
|
-
import '@tensorflow/tfjs-backend-wasm';
|
|
8
|
+
import { setWasmPaths } from '@tensorflow/tfjs-backend-wasm';
|
|
9
9
|
import { dispose } from '@tensorflow/tfjs';
|
|
10
10
|
import { io } from '@tensorflow/tfjs-core';
|
|
11
11
|
import LearningInterface from './base';
|
|
@@ -201,8 +201,6 @@ class LearningMobilenet implements LearningInterface {
|
|
|
201
201
|
private async setupBackend() {
|
|
202
202
|
const isWasmSupported = await this.checkWasmSupport();
|
|
203
203
|
if (isWasmSupported) {
|
|
204
|
-
await tf.setBackend('wasm');
|
|
205
|
-
await tf.ready();
|
|
206
204
|
console.log('Backend is set to WebAssembly');
|
|
207
205
|
} else {
|
|
208
206
|
await tf.setBackend('cpu');
|
|
@@ -213,6 +211,8 @@ class LearningMobilenet implements LearningInterface {
|
|
|
213
211
|
|
|
214
212
|
private async checkWasmSupport(): Promise<boolean> {
|
|
215
213
|
try {
|
|
214
|
+
const wasmVersion = (tf.version as Record<string, string>)['tfjs-backend-wasm'] || tf.version['tfjs-core'];
|
|
215
|
+
setWasmPaths(`https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-wasm@${wasmVersion}/dist/`);
|
|
216
216
|
await tf.setBackend('wasm');
|
|
217
217
|
await tf.ready();
|
|
218
218
|
return true;
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "learning_model",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.53",
|
|
4
4
|
"description": "learning model develop",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"types": "dist/index.d.ts",
|
|
@@ -21,8 +21,7 @@
|
|
|
21
21
|
"@tensorflow/tfjs": "^4.6.0",
|
|
22
22
|
"@tensorflow/tfjs-layers": "^4.6.0",
|
|
23
23
|
"@tensorflow/tfjs-backend-wasm": "^4.20.0",
|
|
24
|
-
"canvas": "^2.11.2"
|
|
25
|
-
"learning_model": "^1.0.0"
|
|
24
|
+
"canvas": "^2.11.2"
|
|
26
25
|
},
|
|
27
26
|
"devDependencies": {
|
|
28
27
|
"@babel/core": "^7.15.0",
|