learning_model 1.0.48 → 1.0.50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -138,8 +138,10 @@ class LearningMobilenet {
138
138
  return __awaiter(this, void 0, void 0, function* () {
139
139
  try {
140
140
  if (this.mobilenetModule !== null) {
141
- const cap = (0, tf_1.isTensor)(data) ? data : (0, tf_1.capture)(data, false);
142
- const predict = this.mobilenetModule.predict(cap);
141
+ const cap = (0, tf_1.isTensor)(data) ? data : yield (0, tf_1.capture)(data, false);
142
+ const predict = tf.tidy(() => {
143
+ return this.mobilenetModule.predict(cap);
144
+ });
143
145
  const activation = yield predict.data();
144
146
  const classIndex = this.registerClassNumber(label);
145
147
  if (!this.imageExamples[classIndex]) {
@@ -149,6 +151,12 @@ class LearningMobilenet {
149
151
  if (this.classNumber.length >= this.limitSize) {
150
152
  this.isReady = true;
151
153
  }
154
+ // Dispose of the prediction tensor to free memory
155
+ predict.dispose();
156
+ // If cap is not a tensor, we don't need to dispose it. Otherwise, we should.
157
+ if (!(0, tf_1.isTensor)(data)) {
158
+ cap.dispose();
159
+ }
152
160
  }
153
161
  else {
154
162
  throw new Error('mobilenetModule is null');
@@ -246,8 +254,8 @@ class LearningMobilenet {
246
254
  try {
247
255
  const classProbabilities = new Map();
248
256
  const croppedImage = (0, canvas_1.cropTo)(data, 224, false);
257
+ const captured = yield (0, tf_1.capture)(croppedImage, false);
249
258
  const logits = tf.tidy(() => {
250
- const captured = (0, tf_1.capture)(croppedImage, false);
251
259
  return this.model.predict(captured);
252
260
  });
253
261
  const values = yield logits.data();
@@ -56,22 +56,24 @@ function ImagePathToTensor(imagePath) {
56
56
  }
57
57
  describe('LearningMobilenetImage', () => {
58
58
  const learning = new mobilenet_1.default({});
59
+ const image1Path = path.join(__dirname, '../../public/images/image1.jpeg');
60
+ const image2Path = path.join(__dirname, '../../public/images/image2.jpeg');
61
+ console.log('Resolved path for image1:', image1Path);
59
62
  beforeAll(() => __awaiter(void 0, void 0, void 0, function* () {
60
- const image1Path = path.join(__dirname, '../../public/images/image1.jpeg');
61
- const image2Path = path.join(__dirname, '../../public/images/image2.jpeg');
63
+ learning.init();
62
64
  imageTensor1 = yield ImagePathToTensor(image1Path);
63
65
  imageTensor2 = yield ImagePathToTensor(image2Path);
64
66
  }));
65
- test('loads an image and converts it to a tensor', () => {
67
+ it('loads an image and converts it to a tensor', () => {
66
68
  expect(imageTensor1).toBeDefined();
67
69
  expect(imageTensor1 instanceof tf.Tensor).toBe(true);
68
70
  expect(imageTensor2).toBeDefined();
69
71
  expect(imageTensor2 instanceof tf.Tensor).toBe(true);
70
72
  });
71
- test('mobilenet add data', () => {
72
- learning.addData("라벨1", imageTensor1);
73
- learning.addData("라벨1", imageTensor1);
74
- learning.addData("라벨2", imageTensor2);
75
- learning.addData("라벨2", imageTensor2);
76
- });
73
+ // test('mobilenet add data', () => {
74
+ // learning.addData("라벨1", imageTensor1);
75
+ // learning.addData("라벨1", imageTensor1);
76
+ // learning.addData("라벨2", imageTensor2);
77
+ // learning.addData("라벨2", imageTensor2);
78
+ // });
77
79
  });
@@ -3,5 +3,5 @@ export declare function isTensor(c: any): c is tf.Tensor;
3
3
  export declare function loadModel(): Promise<tf.LayersModel>;
4
4
  export declare function mobileNetURL(version: number): string;
5
5
  export declare function imageToTensor(data: any): tf.Tensor3D;
6
- export declare function capture(rasterElement: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement, grayscale?: boolean): tf.Tensor<tf.Rank>;
6
+ export declare function capture(rasterElement: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement, grayscale?: boolean): Promise<tf.Tensor<tf.Rank>>;
7
7
  export declare function cropTensor(img: tf.Tensor3D, grayscaleModel?: boolean, grayscaleInput?: boolean): tf.Tensor3D;
@@ -3,5 +3,5 @@ export declare function isTensor(c: any): c is tf.Tensor;
3
3
  export declare function loadModel(): Promise<tf.LayersModel>;
4
4
  export declare function mobileNetURL(version: number): string;
5
5
  export declare function imageToTensor(data: any): tf.Tensor3D;
6
- export declare function capture(rasterElement: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement, grayscale?: boolean): tf.Tensor<tf.Rank>;
6
+ export declare function capture(rasterElement: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement, grayscale?: boolean): Promise<tf.Tensor<tf.Rank>>;
7
7
  export declare function cropTensor(img: tf.Tensor3D, grayscaleModel?: boolean, grayscaleInput?: boolean): tf.Tensor3D;
package/dist/utils/tf.js CHANGED
@@ -90,15 +90,17 @@ function imageToTensor(data) {
90
90
  }
91
91
  exports.imageToTensor = imageToTensor;
92
92
  function capture(rasterElement, grayscale) {
93
- return tf.tidy(() => {
94
- const pixels = tf.browser.fromPixels(rasterElement);
95
- // crop the image so we're using the center square
96
- const cropped = cropTensor(pixels, grayscale);
97
- // Expand the outer most dimension so we have a batch size of 1
98
- const batchedImage = cropped.expandDims(0);
99
- // Normalize the image between -1 and a1. The image comes in between 0-255
100
- // so we divide by 127 and subtract 1.
101
- return batchedImage.toFloat().div(tf.scalar(127)).sub(tf.scalar(1));
93
+ return __awaiter(this, void 0, void 0, function* () {
94
+ return tf.tidy(() => {
95
+ const pixels = tf.browser.fromPixels(rasterElement);
96
+ // crop the image so we're using the center square
97
+ const cropped = cropTensor(pixels, grayscale);
98
+ // Expand the outer most dimension so we have a batch size of 1
99
+ const batchedImage = cropped.expandDims(0);
100
+ // Normalize the image between -1 and a1. The image comes in between 0-255
101
+ // so we divide by 127 and subtract 1.
102
+ return batchedImage.toFloat().div(tf.scalar(127)).sub(tf.scalar(1));
103
+ });
102
104
  });
103
105
  }
104
106
  exports.capture = capture;
package/jest.config.js CHANGED
@@ -2,7 +2,7 @@ module.exports = {
2
2
  preset: 'ts-jest',
3
3
  testEnvironment: 'node',
4
4
  // 기타 Jest 구성 옵션
5
- testMatch: ['**/?(*.)+(spec|test).[jt]s?(x)'],
6
- //testMatch: ['/lib/learning/data_model_test.ts'],
5
+ //testMatch: ['**/?(*.)+(spec|test).[jt]s?(x)'],
6
+ testMatch: ['**/mobile(*.)+(spec|test).[jt]s?(x)'],
7
7
  testPathIgnorePatterns: ['/node_modules/'],
8
8
  };
@@ -20,25 +20,29 @@ async function ImagePathToTensor(imagePath: string): Promise<tf.Tensor3D> {
20
20
 
21
21
  describe('LearningMobilenetImage', () => {
22
22
  const learning = new LearningMobilenetImage({});
23
-
23
+
24
+ const image1Path = path.join(__dirname, '../../public/images/image1.jpeg');
25
+ const image2Path = path.join(__dirname, '../../public/images/image2.jpeg');
26
+ console.log('Resolved path for image1:', image1Path);
24
27
  beforeAll(async () => {
25
- const image1Path = path.join(__dirname, '../../public/images/image1.jpeg');
26
- const image2Path = path.join(__dirname, '../../public/images/image2.jpeg');
28
+ learning.init();
27
29
  imageTensor1 = await ImagePathToTensor(image1Path);
28
30
  imageTensor2 = await ImagePathToTensor(image2Path);
29
31
  });
30
32
 
31
- test('loads an image and converts it to a tensor', () => {
33
+
34
+ it('loads an image and converts it to a tensor', () => {
32
35
  expect(imageTensor1).toBeDefined();
33
36
  expect(imageTensor1 instanceof tf.Tensor).toBe(true);
34
37
  expect(imageTensor2).toBeDefined();
35
38
  expect(imageTensor2 instanceof tf.Tensor).toBe(true);
36
39
  });
37
40
 
38
- test('mobilenet add data', () => {
39
- learning.addData("라벨1", imageTensor1);
40
- learning.addData("라벨1", imageTensor1);
41
- learning.addData("라벨2", imageTensor2);
42
- learning.addData("라벨2", imageTensor2);
43
- });
41
+ // test('mobilenet add data', () => {
42
+ // learning.addData("라벨1", imageTensor1);
43
+ // learning.addData("라벨1", imageTensor1);
44
+ // learning.addData("라벨2", imageTensor2);
45
+ // learning.addData("라벨2", imageTensor2);
46
+ // });
44
47
  });
48
+
@@ -150,17 +150,32 @@ class LearningMobilenet implements LearningInterface {
150
150
  public async addData(label: string, data: any): Promise<void> {
151
151
  try {
152
152
  if (this.mobilenetModule !== null) {
153
- const cap = isTensor(data) ? data : capture(data, false);
154
- const predict = this.mobilenetModule.predict(cap) as tf.Tensor;
153
+ const cap = isTensor(data) ? data : await capture(data, false);
154
+
155
+ const predict = tf.tidy(() => {
156
+ return this.mobilenetModule!.predict(cap) as tf.Tensor;
157
+ });
158
+
155
159
  const activation = await predict.data() as Float32Array;
160
+
156
161
  const classIndex = this.registerClassNumber(label);
157
162
  if (!this.imageExamples[classIndex]) {
158
163
  this.imageExamples[classIndex] = [];
159
164
  }
160
165
  this.imageExamples[classIndex].push(activation);
166
+
161
167
  if(this.classNumber.length >= this.limitSize) {
162
168
  this.isReady = true;
163
169
  }
170
+
171
+ // Dispose of the prediction tensor to free memory
172
+ predict.dispose();
173
+
174
+ // If cap is not a tensor, we don't need to dispose it. Otherwise, we should.
175
+ if (!isTensor(data)) {
176
+ cap.dispose();
177
+ }
178
+
164
179
  } else {
165
180
  throw new Error('mobilenetModule is null');
166
181
  }
@@ -227,7 +242,9 @@ class LearningMobilenet implements LearningInterface {
227
242
  const trainData = datasets.trainDataset.batch(this.batchSize);
228
243
  const validationData = datasets.validationDataset.batch(this.batchSize);
229
244
  const optimizer = tf.train.adam(this.learningRate);
245
+
230
246
  const trainModel = await this._createModel(optimizer);
247
+
231
248
  const jointModel = tf.sequential();
232
249
  jointModel.add(this.mobilenetModule!);
233
250
  jointModel.add(trainModel);
@@ -257,16 +274,19 @@ class LearningMobilenet implements LearningInterface {
257
274
  try {
258
275
  const classProbabilities = new Map<string, number>();
259
276
  const croppedImage = cropTo(data, 224, false);
277
+ const captured = await capture(croppedImage, false);
260
278
 
261
279
  const logits = tf.tidy(() => {
262
- const captured = capture(croppedImage, false);
263
280
  return this.model!.predict(captured);
264
281
  });
282
+
265
283
  const values = await (logits as tf.Tensor<tf.Rank>).data();
266
284
  const EPSILON = 1e-6; // 매우 작은 값을 표현하기 위한 엡실론
285
+
267
286
  for (let i = 0; i < values.length; i++) {
268
287
  let probability = Math.max(0, Math.min(1, values[i])); // 확률 값을 0과 1 사이로 조정
269
288
  probability = probability < EPSILON ? 0 : probability; // 매우 작은 확률 값을 0으로 간주
289
+
270
290
  const className = this.classNumber[i]; // 클래스 이름
271
291
  const existingProbability = classProbabilities.get(className);
272
292
  if (existingProbability !== undefined) {
@@ -275,6 +295,7 @@ class LearningMobilenet implements LearningInterface {
275
295
  classProbabilities.set(className, probability);
276
296
  }
277
297
  }
298
+
278
299
  console.log('classProbabilities', classProbabilities);
279
300
  dispose(logits);
280
301
  return classProbabilities;
package/lib/utils/tf.ts CHANGED
@@ -54,7 +54,7 @@ export function imageToTensor(data: any): tf.Tensor3D {
54
54
  return tensor;
55
55
  }
56
56
 
57
- export function capture(rasterElement: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement, grayscale?: boolean) {
57
+ export async function capture(rasterElement: HTMLImageElement | HTMLVideoElement | HTMLCanvasElement, grayscale?: boolean) {
58
58
  return tf.tidy(() => {
59
59
  const pixels = tf.browser.fromPixels(rasterElement);
60
60
 
@@ -70,6 +70,7 @@ export function capture(rasterElement: HTMLImageElement | HTMLVideoElement | HTM
70
70
  });
71
71
  }
72
72
 
73
+
73
74
  export function cropTensor( img: tf.Tensor3D, grayscaleModel?: boolean, grayscaleInput?: boolean ) : tf.Tensor3D {
74
75
  const size = Math.min(img.shape[0], img.shape[1]);
75
76
  const centerHeight = img.shape[0] / 2;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "learning_model",
3
- "version": "1.0.48",
3
+ "version": "1.0.50",
4
4
  "description": "learning model develop",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",