learning_model 1.0.4 → 1.0.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/README.md +15 -1
  2. package/build/index.js +10 -0
  3. package/build/learning/base.js +2 -0
  4. package/build/learning/image.js +252 -0
  5. package/build/learning/mobilenet_image.js +256 -0
  6. package/dist/index.bundle.js +2 -0
  7. package/dist/index.bundle.js.LICENSE.txt +335 -0
  8. package/dist/index.html +1 -0
  9. package/dist/types/index.d.ts +3 -0
  10. package/dist/types/learning/base.d.ts +19 -0
  11. package/dist/types/learning/image.d.ts +40 -0
  12. package/dist/types/learning/mobilenet_image.d.ts +42 -0
  13. package/dist/types/learning/mobilenet_image.test.d.ts +1 -0
  14. package/dist/types/public/index.d.ts +1 -0
  15. package/dist/types/src/index.d.ts +3 -0
  16. package/dist/types/src/learning/base.d.ts +21 -0
  17. package/dist/types/src/learning/image.d.ts +37 -0
  18. package/dist/types/src/learning/mobilenet_image.d.ts +39 -0
  19. package/package.json +5 -2
  20. package/public/index.ts +4 -4
  21. package/src/learning/base.ts +15 -11
  22. package/src/learning/image.ts +18 -9
  23. package/src/learning/mobilenet_image.test.ts +63 -0
  24. package/src/learning/mobilenet_image.ts +18 -11
  25. package/tsconfig.json +3 -2
  26. package/types/index.d.ts +3 -0
  27. package/types/learning/base.d.ts +19 -0
  28. package/types/learning/image.d.ts +40 -0
  29. package/types/learning/mobilenet_image.d.ts +42 -0
  30. package/types/learning/mobilenet_image.test.d.ts +1 -0
  31. package/types/public/index.d.ts +1 -0
  32. package/types/src/index.d.ts +3 -0
  33. package/types/src/learning/base.d.ts +19 -0
  34. package/types/src/learning/image.d.ts +40 -0
  35. package/types/src/learning/mobilenet_image.d.ts +42 -0
package/README.md CHANGED
@@ -1,3 +1,17 @@
1
1
  ## learning 클래스
2
- * image: 기본 CNN을 가지고 이미지 분류 작업
2
+ * image: 기본 CNN을 가지고 이미지 분류
3
3
  * mobilenet : 모바일넷 모델을 가지고 마지막 softmax층만 빼서 전이학습을 한 방법
4
+
5
+ ## 테스트 방법
6
+ ```
7
+ yarn
8
+ yarn start
9
+ ```
10
+
11
+ ## npm 배포
12
+ package.json 버전을 올린다.
13
+ ```
14
+ yarn build
15
+ npm login
16
+ npm publish
17
+ ```
package/build/index.js ADDED
@@ -0,0 +1,10 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.LearningMobilenetImage = exports.LearningImage = void 0;
7
+ const image_1 = __importDefault(require("./learning/image"));
8
+ exports.LearningImage = image_1.default;
9
+ const mobilenet_image_1 = __importDefault(require("./learning/mobilenet_image"));
10
+ exports.LearningMobilenetImage = mobilenet_image_1.default;
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,252 @@
1
+ "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
18
+ var __importStar = (this && this.__importStar) || function (mod) {
19
+ if (mod && mod.__esModule) return mod;
20
+ var result = {};
21
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
22
+ __setModuleDefault(result, mod);
23
+ return result;
24
+ };
25
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
26
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
27
+ return new (P || (P = Promise))(function (resolve, reject) {
28
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
29
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
30
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
31
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
32
+ });
33
+ };
34
+ Object.defineProperty(exports, "__esModule", { value: true });
35
+ const tf = __importStar(require("@tensorflow/tfjs"));
36
+ class LearningImage {
37
+ constructor({ epochs = 10, batchSize = 16, limitSize = 2, } = {}) {
38
+ this.trainImages = [];
39
+ this.MOBILE_NET_INPUT_WIDTH = 224;
40
+ this.MOBILE_NET_INPUT_HEIGHT = 224;
41
+ this.MOBILE_NET_INPUT_CHANNEL = 3;
42
+ this.IMAGE_NORMALIZATION_FACTOR = 255.0;
43
+ this.onProgress = () => { };
44
+ this.onLoss = () => { };
45
+ this.onTrainBegin = () => { };
46
+ this.onTrainEnd = () => { };
47
+ this.model = null;
48
+ this.epochs = epochs;
49
+ this.batchSize = batchSize;
50
+ this.labels = [];
51
+ this.isRunning = false;
52
+ this.isReady = false;
53
+ this.limitSize = 2;
54
+ }
55
+ // 학습 데이타 등록
56
+ addData(label, data) {
57
+ try {
58
+ const tensor = tf.browser.fromPixels(data);
59
+ console.log('addData', tensor);
60
+ this.trainImages.push(tensor);
61
+ this.labels.push(label);
62
+ if (this.labels.length >= this.limitSize) {
63
+ this.isReady = true;
64
+ }
65
+ return;
66
+ }
67
+ catch (error) {
68
+ console.error('Model training failed', error);
69
+ throw error;
70
+ }
71
+ }
72
+ // 모델 학습 처리
73
+ train() {
74
+ return __awaiter(this, void 0, void 0, function* () {
75
+ if (this.isRunning) {
76
+ return Promise.reject(new Error('Training is already in progress.'));
77
+ }
78
+ // 콜백 정의
79
+ const customCallback = {
80
+ onTrainBegin: (log) => {
81
+ this.onTrainBegin(log);
82
+ console.log('Training has started.');
83
+ },
84
+ onTrainEnd: (log) => {
85
+ this.onTrainEnd(log);
86
+ console.log('Training has ended.');
87
+ this.isRunning = false;
88
+ },
89
+ onBatchBegin: (batch, logs) => {
90
+ console.log(`Batch ${batch} is starting.`);
91
+ },
92
+ onBatchEnd: (batch, logs) => {
93
+ console.log(`Batch ${batch} has ended.`);
94
+ },
95
+ onEpochBegin: (epoch, logs) => {
96
+ //const progress = Math.floor(((epoch + 1) / this.epochs) * 100);
97
+ this.onProgress(epoch + 1);
98
+ console.log(`Epoch ${epoch + 1} is starting.`);
99
+ },
100
+ onEpochEnd: (epoch, logs) => {
101
+ console.log(`Epoch ${epoch + 1} has ended.`);
102
+ this.onLoss(logs.loss);
103
+ console.log('Loss:', logs.loss);
104
+ }
105
+ };
106
+ try {
107
+ this.isRunning = true;
108
+ if (this.labels.length < this.limitSize) {
109
+ return Promise.reject(new Error('Please train Data need over 2 data length'));
110
+ }
111
+ this.model = yield this._createModel(this.labels.length);
112
+ const inputData = this._preprocessedInputData(this.model);
113
+ const targetData = this._preprocessedTargetData();
114
+ const history = yield this.model.fit(inputData, targetData, {
115
+ epochs: this.epochs,
116
+ batchSize: this.batchSize,
117
+ callbacks: customCallback
118
+ });
119
+ console.log('Model training completed', history);
120
+ return history;
121
+ }
122
+ catch (error) {
123
+ this.isRunning = false;
124
+ console.error('Model training failed', error);
125
+ throw error;
126
+ }
127
+ });
128
+ }
129
+ // 추론하기
130
+ infer(data) {
131
+ return __awaiter(this, void 0, void 0, function* () {
132
+ if (this.model === null) {
133
+ throw new Error('Model is null');
134
+ }
135
+ try {
136
+ const tensor = tf.browser.fromPixels(data);
137
+ const resizedTensor = tf.image.resizeBilinear(tensor, [this.MOBILE_NET_INPUT_WIDTH, this.MOBILE_NET_INPUT_HEIGHT]);
138
+ const reshapedTensor = resizedTensor.expandDims(0); // 배치 크기 1을 추가하여 4차원으로 변환
139
+ const predictions = this.model.predict(reshapedTensor);
140
+ const predictionsData = yield predictions.data(); // 예측 텐서의 데이터를 비동기로 가져옴
141
+ const classProbabilities = new Map(); // 클래스별 확률 누적값을 저장할 맵
142
+ for (let i = 0; i < predictionsData.length; i++) {
143
+ const className = this.labels[i]; // 클래스 이름
144
+ const probability = predictionsData[i];
145
+ const existingProbability = classProbabilities.get(className);
146
+ if (existingProbability !== undefined) {
147
+ classProbabilities.set(className, existingProbability + probability);
148
+ }
149
+ else {
150
+ classProbabilities.set(className, probability);
151
+ }
152
+ }
153
+ console.log('Class Probabilities:', classProbabilities);
154
+ return classProbabilities;
155
+ }
156
+ catch (error) {
157
+ throw error;
158
+ }
159
+ });
160
+ }
161
+ // 모델 저장
162
+ saveModel() {
163
+ console.log('saved model');
164
+ }
165
+ // 진행중 여부
166
+ running() {
167
+ return this.isRunning;
168
+ }
169
+ ready() {
170
+ return this.isReady;
171
+ }
172
+ // target 라벨 데이타
173
+ _preprocessedTargetData() {
174
+ // 라벨 unique 처리 & 배열 리턴
175
+ console.log('uniqueLabels.length', this.labels, this.labels.length);
176
+ const labelIndices = this.labels.map((label) => this.labels.indexOf(label));
177
+ console.log('labelIndices', labelIndices);
178
+ const oneHotEncode = tf.oneHot(tf.tensor1d(labelIndices, 'int32'), this.labels.length);
179
+ console.log('oneHotEncode', oneHotEncode);
180
+ return oneHotEncode;
181
+ }
182
+ // 입력 이미지 데이타
183
+ _preprocessedInputData(model) {
184
+ // 이미지 배열을 배치로 변환 - [null, 224, 224, 3]
185
+ const inputShape = model.inputs[0].shape;
186
+ console.log('inputShape', inputShape);
187
+ // inputShape를 이와 같이 포멧 맞춘다. for reshape to [224, 224, 3]
188
+ const inputShapeArray = inputShape.slice(1);
189
+ console.log('inputShapeArray', inputShapeArray);
190
+ const inputBatch = tf.stack(this.trainImages.map((image) => {
191
+ // 이미지 전처리 및 크기 조정 등을 수행한 후에
192
+ // 모델의 입력 형태로 변환하여 반환
193
+ const xs = this._preprocessData(image); // 전처리 함수는 사용자 정의해야 함
194
+ return tf.reshape(xs, inputShapeArray);
195
+ }));
196
+ return inputBatch;
197
+ }
198
+ // 모델 학습하기 위한 데이타 전처리 단계
199
+ _preprocessData(tensor) {
200
+ try {
201
+ // mobilenet model summary를 하면 위와 같이 224,224 사이즈의 입력값 설정되어 있다. ex) input_1 (InputLayer) [null,224,224,3]
202
+ const resizedImage = tf.image.resizeBilinear(tensor, [this.MOBILE_NET_INPUT_WIDTH, this.MOBILE_NET_INPUT_HEIGHT]);
203
+ // 이미지를 [0,1] 범위로 정규화 255로 나뉜 픽셀값
204
+ const normalizedImage = resizedImage.div(this.IMAGE_NORMALIZATION_FACTOR);
205
+ // expandDims(0)을 하여 차원을 추가하여 4D텐서 반환
206
+ return normalizedImage.expandDims(0);
207
+ }
208
+ catch (error) {
209
+ console.error('Failed to _preprocessData data', error);
210
+ throw error;
211
+ }
212
+ }
213
+ // 모델 저장
214
+ _createModel(numClasses) {
215
+ return __awaiter(this, void 0, void 0, function* () {
216
+ try {
217
+ const inputShape = [this.MOBILE_NET_INPUT_WIDTH, this.MOBILE_NET_INPUT_HEIGHT, this.MOBILE_NET_INPUT_CHANNEL];
218
+ const model = tf.sequential();
219
+ model.add(tf.layers.conv2d({
220
+ inputShape,
221
+ filters: 32,
222
+ kernelSize: 3,
223
+ activation: 'relu'
224
+ }));
225
+ model.add(tf.layers.maxPooling2d({ poolSize: 2 }));
226
+ model.add(tf.layers.conv2d({
227
+ filters: 64,
228
+ kernelSize: 3,
229
+ activation: 'relu'
230
+ }));
231
+ model.add(tf.layers.maxPooling2d({ poolSize: 2 }));
232
+ model.add(tf.layers.flatten());
233
+ model.add(tf.layers.dense({
234
+ units: numClasses,
235
+ activation: 'softmax'
236
+ }));
237
+ model.compile({
238
+ loss: (numClasses === 2) ? 'binaryCrossentropy' : 'categoricalCrossentropy',
239
+ optimizer: tf.train.adam(),
240
+ metrics: ['accuracy']
241
+ });
242
+ model.summary();
243
+ return model;
244
+ }
245
+ catch (error) {
246
+ console.error('Failed to load model', error);
247
+ throw error;
248
+ }
249
+ });
250
+ }
251
+ }
252
+ exports.default = LearningImage;
@@ -0,0 +1,256 @@
1
+ "use strict";
2
+ ///////////////////////////////////////////////////////////////////////////
3
+ ///////////////////////////////////////////////////////////////////////////
4
+ ///////////////////////////////////////////////////////////////////////////
5
+ // mobilenet 모델을 이용한 전이학습 방법
6
+ ///////////////////////////////////////////////////////////////////////////
7
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
8
+ if (k2 === undefined) k2 = k;
9
+ var desc = Object.getOwnPropertyDescriptor(m, k);
10
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
11
+ desc = { enumerable: true, get: function() { return m[k]; } };
12
+ }
13
+ Object.defineProperty(o, k2, desc);
14
+ }) : (function(o, m, k, k2) {
15
+ if (k2 === undefined) k2 = k;
16
+ o[k2] = m[k];
17
+ }));
18
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
19
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
20
+ }) : function(o, v) {
21
+ o["default"] = v;
22
+ });
23
+ var __importStar = (this && this.__importStar) || function (mod) {
24
+ if (mod && mod.__esModule) return mod;
25
+ var result = {};
26
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
27
+ __setModuleDefault(result, mod);
28
+ return result;
29
+ };
30
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
31
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
32
+ return new (P || (P = Promise))(function (resolve, reject) {
33
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
34
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
35
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
36
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
37
+ });
38
+ };
39
+ Object.defineProperty(exports, "__esModule", { value: true });
40
+ const tf = __importStar(require("@tensorflow/tfjs"));
41
+ class LearningMobilenetImage {
42
+ constructor({ modelURL = 'https://storage.googleapis.com/tfjs-models/tfjs/mobilenet_v1_0.25_224/model.json', // 디폴트 mobilenet 이미지
43
+ epochs = 10, batchSize = 16, limitSize = 2, } = {}) {
44
+ this.trainImages = [];
45
+ this.MOBILE_NET_INPUT_WIDTH = 224;
46
+ this.MOBILE_NET_INPUT_HEIGHT = 224;
47
+ this.MOBILE_NET_INPUT_CHANNEL = 3;
48
+ this.IMAGE_NORMALIZATION_FACTOR = 255.0;
49
+ // 진행 상태를 나타내는 이벤트를 정의합니다.
50
+ this.onProgress = () => { };
51
+ this.onLoss = () => { };
52
+ this.onTrainBegin = () => { };
53
+ this.onTrainEnd = () => { };
54
+ this.model = null;
55
+ this.epochs = epochs;
56
+ this.batchSize = batchSize;
57
+ this.labels = [];
58
+ this.modelURL = modelURL;
59
+ this.isRunning = false;
60
+ this.isReady = false;
61
+ this.limitSize = limitSize;
62
+ }
63
+ // 학습 데이타 등록
64
+ addData(label, data) {
65
+ try {
66
+ const tensor = tf.browser.fromPixels(data);
67
+ console.log('addData', tensor);
68
+ this.trainImages.push(tensor);
69
+ this.labels.push(label);
70
+ if (this.labels.length >= this.limitSize) {
71
+ this.isReady = true;
72
+ }
73
+ return;
74
+ }
75
+ catch (error) {
76
+ console.error('Model training failed', error);
77
+ throw error;
78
+ }
79
+ }
80
+ // 모델 학습 처리
81
+ train() {
82
+ return __awaiter(this, void 0, void 0, function* () {
83
+ if (this.isRunning) {
84
+ return Promise.reject(new Error('Training is already in progress.'));
85
+ }
86
+ // 콜백 정의
87
+ const customCallback = {
88
+ onTrainBegin: (log) => {
89
+ this.onTrainBegin(log);
90
+ console.log('Training has started.');
91
+ },
92
+ onTrainEnd: (log) => {
93
+ this.onTrainEnd(log);
94
+ console.log('Training has ended.');
95
+ this.isRunning = false;
96
+ },
97
+ onBatchBegin: (batch, logs) => {
98
+ console.log(`Batch ${batch} is starting.`);
99
+ },
100
+ onBatchEnd: (batch, logs) => {
101
+ console.log(`Batch ${batch} has ended.`);
102
+ },
103
+ onEpochBegin: (epoch, logs) => {
104
+ //const progress = Math.floor(((epoch + 1) / this.epochs) * 100);
105
+ this.onProgress(epoch + 1);
106
+ console.log(`Epoch ${epoch + 1} is starting.`);
107
+ },
108
+ onEpochEnd: (epoch, logs) => {
109
+ console.log(`Epoch ${epoch + 1} has ended.`);
110
+ console.log('Loss:', logs.loss);
111
+ }
112
+ };
113
+ try {
114
+ this.isRunning = true;
115
+ if (this.labels.length < this.limitSize) {
116
+ return Promise.reject(new Error('Please train Data need over 2 data length'));
117
+ }
118
+ this.model = yield this._createModel(this.labels.length);
119
+ const inputData = this._preprocessedInputData(this.model);
120
+ const targetData = this._preprocessedTargetData();
121
+ const history = yield this.model.fit(inputData, targetData, {
122
+ epochs: this.epochs,
123
+ batchSize: this.batchSize,
124
+ callbacks: customCallback
125
+ });
126
+ console.log('Model training completed', history);
127
+ return history;
128
+ }
129
+ catch (error) {
130
+ this.isRunning = false;
131
+ console.error('Model training failed', error);
132
+ throw error;
133
+ }
134
+ });
135
+ }
136
+ // 추론하기
137
+ infer(data) {
138
+ return __awaiter(this, void 0, void 0, function* () {
139
+ if (this.model === null) {
140
+ throw new Error('Model is null');
141
+ }
142
+ try {
143
+ const tensor = tf.browser.fromPixels(data);
144
+ const resizedTensor = tf.image.resizeBilinear(tensor, [this.MOBILE_NET_INPUT_WIDTH, this.MOBILE_NET_INPUT_HEIGHT]);
145
+ const reshapedTensor = resizedTensor.expandDims(0); // 배치 크기 1을 추가하여 4차원으로 변환
146
+ const predictions = this.model.predict(reshapedTensor);
147
+ const predictionsData = yield predictions.data(); // 예측 텐서의 데이터를 비동기로 가져옴
148
+ const classProbabilities = new Map(); // 클래스별 확률 누적값을 저장할 맵
149
+ for (let i = 0; i < predictionsData.length; i++) {
150
+ const className = this.labels[i]; // 클래스 이름
151
+ const probability = predictionsData[i];
152
+ const existingProbability = classProbabilities.get(className);
153
+ if (existingProbability !== undefined) {
154
+ classProbabilities.set(className, existingProbability + probability);
155
+ }
156
+ else {
157
+ classProbabilities.set(className, probability);
158
+ }
159
+ }
160
+ console.log('Class Probabilities:', classProbabilities);
161
+ return classProbabilities;
162
+ }
163
+ catch (error) {
164
+ throw error;
165
+ }
166
+ });
167
+ }
168
+ // 모델 저장
169
+ saveModel() {
170
+ console.log('saved model');
171
+ }
172
+ // 진행중 여부
173
+ running() {
174
+ return this.isRunning;
175
+ }
176
+ ready() {
177
+ return this.isReady;
178
+ }
179
+ // target 라벨 데이타
180
+ _preprocessedTargetData() {
181
+ // 라벨 unique 처리 & 배열 리턴
182
+ console.log('uniqueLabels.length', this.labels, this.labels.length);
183
+ const labelIndices = this.labels.map((label) => this.labels.indexOf(label));
184
+ console.log('labelIndices', labelIndices);
185
+ const oneHotEncode = tf.oneHot(tf.tensor1d(labelIndices, 'int32'), this.labels.length);
186
+ console.log('oneHotEncode', oneHotEncode);
187
+ return oneHotEncode;
188
+ }
189
+ // 입력 이미지 데이타
190
+ _preprocessedInputData(model) {
191
+ // 이미지 배열을 배치로 변환 - [null, 224, 224, 3]
192
+ const inputShape = model.inputs[0].shape;
193
+ console.log('inputShape', inputShape);
194
+ // inputShape를 이와 같이 포멧 맞춘다. for reshape to [224, 224, 3]
195
+ const inputShapeArray = inputShape.slice(1);
196
+ console.log('inputShapeArray', inputShapeArray);
197
+ const inputBatch = tf.stack(this.trainImages.map((image) => {
198
+ // 이미지 전처리 및 크기 조정 등을 수행한 후에
199
+ // 모델의 입력 형태로 변환하여 반환
200
+ const xs = this._preprocessData(image); // 전처리 함수는 사용자 정의해야 함
201
+ return tf.reshape(xs, inputShapeArray);
202
+ }));
203
+ return inputBatch;
204
+ }
205
+ // 모델 학습하기 위한 데이타 전처리 단계
206
+ _preprocessData(tensor) {
207
+ try {
208
+ // mobilenet model summary를 하면 위와 같이 224,224 사이즈의 입력값 설정되어 있다. ex) input_1 (InputLayer) [null,224,224,3]
209
+ const resizedImage = tf.image.resizeBilinear(tensor, [this.MOBILE_NET_INPUT_WIDTH, this.MOBILE_NET_INPUT_HEIGHT]);
210
+ // 이미지를 [0,1] 범위로 정규화 255로 나뉜 픽셀값
211
+ const normalizedImage = resizedImage.div(this.IMAGE_NORMALIZATION_FACTOR);
212
+ // expandDims(0)을 하여 차원을 추가하여 4D텐서 반환
213
+ return normalizedImage.expandDims(0);
214
+ }
215
+ catch (error) {
216
+ console.error('Failed to _preprocessData data', error);
217
+ throw error;
218
+ }
219
+ }
220
+ // 모델 저장
221
+ _createModel(numClasses) {
222
+ return __awaiter(this, void 0, void 0, function* () {
223
+ try {
224
+ const load_model = yield tf.loadLayersModel(this.modelURL);
225
+ // 기존 MobileNet 모델에서 마지막 레이어 제외
226
+ const truncatedModel = tf.model({
227
+ inputs: load_model.inputs,
228
+ outputs: load_model.layers[load_model.layers.length - 2].output
229
+ });
230
+ // 모델을 학습 가능하게 설정하고 선택한 레이어까지 고정
231
+ for (let layer of truncatedModel.layers) {
232
+ layer.trainable = false;
233
+ }
234
+ const model = tf.sequential();
235
+ model.add(truncatedModel);
236
+ model.add(tf.layers.flatten()); // 필요한 경우 Flatten 레이어 추가
237
+ model.add(tf.layers.dense({
238
+ units: numClasses,
239
+ activation: 'softmax'
240
+ }));
241
+ model.compile({
242
+ loss: (numClasses === 2) ? 'binaryCrossentropy' : 'categoricalCrossentropy',
243
+ optimizer: tf.train.adam(),
244
+ metrics: ['accuracy']
245
+ });
246
+ model.summary();
247
+ return model;
248
+ }
249
+ catch (error) {
250
+ console.error('Failed to load model', error);
251
+ throw error;
252
+ }
253
+ });
254
+ }
255
+ }
256
+ exports.default = LearningMobilenetImage;