ruby-dnn 0.4.4 → 0.5.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a74b3daebe0908a73f01f96bc9a0ed43ece1a6fdeefeac9eb4b4083234f0dc34
4
- data.tar.gz: db3c9347d78d0cf3df7558b5cf728cfe05ee6c3bb074737ead4e1d81f9ee0411
3
+ metadata.gz: 4644c4f76c548fe20e899150fbbed0d445334afa428c8119458d4c90e0bf6afc
4
+ data.tar.gz: c0ea6b81a390bc06925695663d01f0b235a4e6915a4d4c4e901f22c51217d43a
5
5
  SHA512:
6
- metadata.gz: f52aa63135790f941d0193a31f120f3b2356132e9c1b1936de418e0a37abd91331f59a4d0d5c1c6918310f90ad30b0d24d7d7e873deb48576abf57db0c0055a2
7
- data.tar.gz: fbd915978ac62794b572127caeadba92beac7d8752b6fbe97d55b8f767d5c4b996eac7c7e5da0d50e160cc7c860f4bf14e172e62723c685fdfcaff0f31324017
6
+ metadata.gz: 282164d11e647a23b86a775cb563a746995cbf4dcc66e10e10ce50706c14ae91e6c6e5579e8ace064554a57e749d1ebc5f617091316e9cd11e05fdc3667e2851
7
+ data.tar.gz: 2fdd20a6bd776c84ce2bd6d5a49c12003ae20c64218db3b1ba504106d34d08caa55ece05253b32140dd79e2d89f68e33d12348e6c3c2efede7a1d1e1b2ce17c2
data/API-Reference.ja.md CHANGED
@@ -2,7 +2,7 @@
2
2
  ruby-dnnのAPIリファレンスです。このリファレンスでは、APIを利用するうえで必要となるクラスとメソッドしか記載していません。
3
3
  そのため、プログラムの詳細が必要な場合は、ソースコードを参照してください。
4
4
 
5
- 最終更新バージョン:0.4.2
5
+ 最終更新バージョン:0.5.0
6
6
 
7
7
  # module DNN
8
8
  ruby-dnnの名前空間をなすモジュールです。
@@ -121,7 +121,7 @@ epoch_proc
121
121
  ### return
122
122
  なし。
123
123
 
124
- ## def train_on_batch
124
+ ## def train_on_batch(x, y, batch_size, &batch_proc)
125
125
  入力されたバッチデータをもとに、一度だけ学習を行います。
126
126
  ### arguments
127
127
  * SFloat x
@@ -485,7 +485,7 @@ Float alpha
485
485
  出力値が負のときの傾き。
486
486
 
487
487
 
488
- # class IdentityWithLoss < OutputLayer
488
+ # class IdentityMSE < OutputLayer
489
489
  恒等関数と二乗誤差関数を合わせた出力層のレイヤーです。
490
490
 
491
491
 
data/lib/dnn.rb CHANGED
@@ -11,6 +11,7 @@ require "dnn/core/error"
11
11
  require "dnn/core/model"
12
12
  require "dnn/core/initializers"
13
13
  require "dnn/core/layers"
14
+ require "dnn/core/cnn_layers"
14
15
  require "dnn/core/activations"
15
16
  require "dnn/core/optimizers"
16
17
  require "dnn/core/util"
@@ -75,7 +75,7 @@ module DNN
75
75
  end
76
76
 
77
77
 
78
- class IdentityWithLoss < OutputLayer
78
+ class IdentityMSE < OutputLayer
79
79
  def forward(x)
80
80
  @out = x
81
81
  end
@@ -0,0 +1,263 @@
1
+ module DNN
2
+ module Layers
3
+ #This module is used for convolution.
4
+ module Conv2DModule
5
+ private
6
+
7
+ def im2col(img, out_h, out_w, fil_h, fil_w, strides)
8
+ bsize = img.shape[0]
9
+ ch = img.shape[3]
10
+ col = SFloat.zeros(bsize, ch, fil_h, fil_w, out_h, out_w)
11
+ img = img.transpose(0, 3, 1, 2)
12
+ (0...fil_h).each do |i|
13
+ i_range = (i...(i + strides[0] * out_h)).step(strides[0]).to_a
14
+ (0...fil_w).each do |j|
15
+ j_range = (j...(j + strides[1] * out_w)).step(strides[1]).to_a
16
+ col[true, true, i, j, true, true] = img[true, true, i_range, j_range]
17
+ end
18
+ end
19
+ col.transpose(0, 4, 5, 2, 3, 1).reshape(bsize * out_h * out_w, fil_h * fil_w * ch)
20
+ end
21
+
22
+ def col2im(col, img_shape, out_h, out_w, fil_h, fil_w, strides)
23
+ bsize, img_h, img_w, ch = img_shape
24
+ col = col.reshape(bsize, out_h, out_w, fil_h, fil_w, ch).transpose(0, 5, 3, 4, 1, 2)
25
+ img = SFloat.zeros(bsize, ch, img_h, img_w)
26
+ (0...fil_h).each do |i|
27
+ i_range = (i...(i + strides[0] * out_h)).step(strides[0]).to_a
28
+ (0...fil_w).each do |j|
29
+ j_range = (j...(j + strides[1] * out_w)).step(strides[1]).to_a
30
+ img[true, true, i_range, j_range] += col[true, true, i, j, true, true]
31
+ end
32
+ end
33
+ img.transpose(0, 2, 3, 1)
34
+ end
35
+
36
+ def padding(img, pad)
37
+ bsize, img_h, img_w, ch = img.shape
38
+ img2 = SFloat.zeros(bsize, img_h + pad[0], img_w + pad[1], ch)
39
+ i_begin = pad[0] / 2
40
+ i_end = i_begin + img_h
41
+ j_begin = pad[1] / 2
42
+ j_end = j_begin + img_w
43
+ img2[true, i_begin...i_end, j_begin...j_end, true] = img
44
+ img2
45
+ end
46
+
47
+ def back_padding(img, pad)
48
+ i_begin = pad[0] / 2
49
+ i_end = img.shape[1] - (pad[0] / 2.0).round
50
+ j_begin = pad[1] / 2
51
+ j_end = img.shape[2] - (pad[1] / 2.0).round
52
+ img[true, i_begin...i_end, j_begin...j_end, true]
53
+ end
54
+
55
+ def out_size(prev_h, prev_w, fil_h, fil_w, strides)
56
+ out_h = (prev_h - fil_h) / strides[0] + 1
57
+ out_w = (prev_w - fil_w) / strides[1] + 1
58
+ [out_h, out_w]
59
+ end
60
+ end
61
+
62
+
63
+ class Conv2D < HasParamLayer
64
+ include Initializers
65
+ include Conv2DModule
66
+
67
+ attr_reader :num_filters
68
+ attr_reader :filter_size
69
+ attr_reader :strides
70
+ attr_reader :weight_decay
71
+
72
+ def initialize(num_filters, filter_size,
73
+ weight_initializer: nil,
74
+ bias_initializer: nil,
75
+ strides: 1,
76
+ padding: false,
77
+ weight_decay: 0)
78
+ super()
79
+ @num_filters = num_filters
80
+ @filter_size = filter_size.is_a?(Integer) ? [filter_size, filter_size] : filter_size
81
+ @weight_initializer = (weight_initializer || RandomNormal.new)
82
+ @bias_initializer = (bias_initializer || Zeros.new)
83
+ @strides = strides.is_a?(Integer) ? [strides, strides] : strides
84
+ @padding = padding
85
+ @weight_decay = weight_decay
86
+ end
87
+
88
+ def self.load_hash(hash)
89
+ Conv2D.new(hash[:num_filters], hash[:filter_size],
90
+ weight_initializer: Util.load_hash(hash[:weight_initializer]),
91
+ bias_initializer: Util.load_hash(hash[:bias_initializer]),
92
+ strides: hash[:strides],
93
+ padding: hash[:padding],
94
+ weight_decay: hash[:weight_decay])
95
+ end
96
+
97
+ def build(model)
98
+ super
99
+ prev_h, prev_w = prev_layer.shape[0..1]
100
+ @out_size = out_size(prev_h, prev_w, *@filter_size, @strides)
101
+ out_w, out_h = @out_size
102
+ if @padding
103
+ @pad = [prev_h - out_h, prev_w - out_w]
104
+ @out_size = [prev_h, prev_w]
105
+ end
106
+ end
107
+
108
+ def forward(x)
109
+ x = padding(x, @pad) if @padding
110
+ @x_shape = x.shape
111
+ @col = im2col(x, *@out_size, *@filter_size, @strides)
112
+ out = @col.dot(@params[:weight])
113
+ out.reshape(x.shape[0], *@out_size, out.shape[3])
114
+ end
115
+
116
+ def backward(dout)
117
+ dout = dout.reshape(dout.shape[0..2].reduce(:*), dout.shape[3])
118
+ @grads[:weight] = @col.transpose.dot(dout)
119
+ if @weight_decay > 0
120
+ dridge = @weight_decay * @params[:weight]
121
+ @grads[:weight] += dridge
122
+ end
123
+ @grads[:bias] = dout.sum(0)
124
+ dcol = dout.dot(@params[:weight].transpose)
125
+ dx = col2im(dcol, @x_shape, *@out_size, *@filter_size, @strides)
126
+ @padding ? back_padding(dx, @pad) : dx
127
+ end
128
+
129
+ def shape
130
+ [*@out_size, @num_filters]
131
+ end
132
+
133
+ def to_hash
134
+ super({num_filters: @num_filters,
135
+ filter_size: @filter_size,
136
+ weight_initializer: @weight_initializer.to_hash,
137
+ bias_initializer: @bias_initializer.to_hash,
138
+ strides: @strides,
139
+ padding: @padding,
140
+ weight_decay: @weight_decay})
141
+ end
142
+
143
+ private
144
+
145
+ def init_params
146
+ num_prev_filter = prev_layer.shape[2]
147
+ @params[:weight] = SFloat.new(num_prev_filter * @filter_size.reduce(:*), @num_filters)
148
+ @params[:bias] = SFloat.new(@num_filters)
149
+ @weight_initializer.init_param(self, :weight)
150
+ @bias_initializer.init_param(self, :bias)
151
+ end
152
+ end
153
+
154
+
155
+ class MaxPool2D < Layer
156
+ include Conv2DModule
157
+
158
+ attr_reader :pool_size
159
+ attr_reader :strides
160
+
161
+ def self.load_hash(hash)
162
+ MaxPool2D.new(hash[:pool_size], strides: hash[:strides], padding: hash[:padding])
163
+ end
164
+
165
+ def initialize(pool_size, strides: nil, padding: false)
166
+ super()
167
+ @pool_size = pool_size.is_a?(Integer) ? [pool_size, pool_size] : pool_size
168
+ @strides = if strides
169
+ strides.is_a?(Integer) ? [strides, strides] : strides
170
+ else
171
+ @pool_size.clone
172
+ end
173
+ @padding = padding
174
+ end
175
+
176
+ def build(model)
177
+ super
178
+ prev_w, prev_h = prev_layer.shape[0..1]
179
+ @num_channel = prev_layer.shape[2]
180
+ @out_size = out_size(prev_h, prev_w, *@pool_size, @strides)
181
+ out_w, out_h = @out_size
182
+ if @padding
183
+ @pad = [prev_h - out_h, prev_w - out_w]
184
+ @out_size = [prev_h, prev_w]
185
+ end
186
+ end
187
+
188
+ def forward(x)
189
+ x = padding(x, @pad) if @padding
190
+ @x_shape = x.shape
191
+ col = im2col(x, *@out_size, *@pool_size, @strides)
192
+ col = col.reshape(x.shape[0] * @out_size.reduce(:*) * x.shape[3], @pool_size.reduce(:*))
193
+ @max_index = col.max_index(1)
194
+ col.max(1).reshape(x.shape[0], *@out_size, x.shape[3])
195
+ end
196
+
197
+ def backward(dout)
198
+ dmax = SFloat.zeros(dout.size * @pool_size.reduce(:*))
199
+ dmax[@max_index] = dout.flatten
200
+ dcol = dmax.reshape(dout.shape[0..2].reduce(:*), dout.shape[3] * @pool_size.reduce(:*))
201
+ dx = col2im(dcol, @x_shape, *@out_size, *@pool_size, @strides)
202
+ @padding ? back_padding(dx, @pad) : dx
203
+ end
204
+
205
+ def shape
206
+ [*@out_size, @num_channel]
207
+ end
208
+
209
+ def to_hash
210
+ super({pool_width: @pool_width,
211
+ pool_height: @pool_height,
212
+ strides: @strides,
213
+ padding: @padding})
214
+ end
215
+ end
216
+
217
+
218
+ class UnPool2D < Layer
219
+ attr_reader :unpool_size
220
+
221
+ def initialize(unpool_size)
222
+ super()
223
+ @unpool_size = unpool_size.is_a?(Integer) ? [unpool_size, unpool_size] : unpool_size
224
+ end
225
+
226
+ def self.load_hash(hash)
227
+ UnPool2D.new(hash[:unpool_size])
228
+ end
229
+
230
+ def build(model)
231
+ super
232
+ prev_h, prev_w = prev_layer.shape[0..1]
233
+ unpool_h, unpool_w = @unpool_size
234
+ out_h = prev_h * unpool_h
235
+ out_w = prev_w * unpool_w
236
+ @out_size = [out_h, out_w]
237
+ @num_channel = prev_layer.shape[2]
238
+ end
239
+
240
+ def forward(x)
241
+ @x_shape = x.shape
242
+ unpool_h, unpool_w = @unpool_size
243
+ x2 = SFloat.zeros(x.shape[0], x.shape[1], unpool_h, x.shape[2], unpool_w, @num_channel)
244
+ x2[true, true, 0, true, 0, true] = x
245
+ x2.reshape(x.shape[0], *@out_size, x.shape[3])
246
+ end
247
+
248
+ def backward(dout)
249
+ unpool_h, unpool_w = @unpool_size
250
+ dout = dout.reshape(dout.shape[0], @x_shape[0], unpool_h, @x_shape[1], unpool_w, @num_channel)
251
+ dout[true, true, 0, true, 0, true].clone
252
+ end
253
+
254
+ def shape
255
+ [@out_width, @out_height, @num_channel]
256
+ end
257
+
258
+ def to_hash
259
+ super({unpool_size: @unpool_size})
260
+ end
261
+ end
262
+ end
263
+ end
@@ -4,10 +4,4 @@ module DNN
4
4
  class DNN_TypeError < DNN_Error; end
5
5
 
6
6
  class DNN_SharpError < DNN_Error; end
7
-
8
- class DNN_GradUnfairError < DNN_Error
9
- def initialize(grad, n_grad)
10
- super("gradient is #{grad}, but numerical gradient is #{n_grad}")
11
- end
12
- end
13
7
  end
@@ -158,267 +158,6 @@ module DNN
158
158
  end
159
159
  end
160
160
 
161
-
162
- #This module is used for convolution.
163
- module Conv2DModule
164
- private
165
-
166
- def im2col(img, out_h, out_w, fil_h, fil_w, strides)
167
- bsize = img.shape[0]
168
- ch = img.shape[3]
169
- col = SFloat.zeros(bsize, ch, fil_h, fil_w, out_h, out_w)
170
- img = img.transpose(0, 3, 1, 2)
171
- (0...fil_h).each do |i|
172
- i_range = (i...(i + strides[0] * out_h)).step(strides[0]).to_a
173
- (0...fil_w).each do |j|
174
- j_range = (j...(j + strides[1] * out_w)).step(strides[1]).to_a
175
- col[true, true, i, j, true, true] = img[true, true, i_range, j_range]
176
- end
177
- end
178
- col.transpose(0, 4, 5, 2, 3, 1).reshape(bsize * out_h * out_w, fil_h * fil_w * ch)
179
- end
180
-
181
- def col2im(col, img_shape, out_h, out_w, fil_h, fil_w, strides)
182
- bsize, img_h, img_w, ch = img_shape
183
- col = col.reshape(bsize, out_h, out_w, fil_h, fil_w, ch).transpose(0, 5, 3, 4, 1, 2)
184
- img = SFloat.zeros(bsize, ch, img_h, img_w)
185
- (0...fil_h).each do |i|
186
- i_range = (i...(i + strides[0] * out_h)).step(strides[0]).to_a
187
- (0...fil_w).each do |j|
188
- j_range = (j...(j + strides[1] * out_w)).step(strides[1]).to_a
189
- img[true, true, i_range, j_range] += col[true, true, i, j, true, true]
190
- end
191
- end
192
- img.transpose(0, 2, 3, 1)
193
- end
194
-
195
- def padding(img, pad)
196
- bsize, img_h, img_w, ch = img.shape
197
- img2 = SFloat.zeros(bsize, img_h + pad[0], img_w + pad[1], ch)
198
- i_begin = pad[0] / 2
199
- i_end = i_begin + img_h
200
- j_begin = pad[1] / 2
201
- j_end = j_begin + img_w
202
- img2[true, i_begin...i_end, j_begin...j_end, true] = img
203
- img2
204
- end
205
-
206
- def back_padding(img, pad)
207
- i_begin = pad[0] / 2
208
- i_end = img.shape[1] - (pad[0] / 2.0).round
209
- j_begin = pad[1] / 2
210
- j_end = img.shape[2] - (pad[1] / 2.0).round
211
- img[true, i_begin...i_end, j_begin...j_end, true]
212
- end
213
-
214
- def out_size(prev_h, prev_w, fil_h, fil_w, strides)
215
- out_h = (prev_h - fil_h) / strides[0] + 1
216
- out_w = (prev_w - fil_w) / strides[1] + 1
217
- [out_h, out_w]
218
- end
219
- end
220
-
221
-
222
- class Conv2D < HasParamLayer
223
- include Initializers
224
- include Conv2DModule
225
-
226
- attr_reader :num_filters
227
- attr_reader :filter_size
228
- attr_reader :strides
229
- attr_reader :weight_decay
230
-
231
- def initialize(num_filters, filter_size,
232
- weight_initializer: nil,
233
- bias_initializer: nil,
234
- strides: 1,
235
- padding: false,
236
- weight_decay: 0)
237
- super()
238
- @num_filters = num_filters
239
- @filter_size = filter_size.is_a?(Integer) ? [filter_size, filter_size] : filter_size
240
- @weight_initializer = (weight_initializer || RandomNormal.new)
241
- @bias_initializer = (bias_initializer || Zeros.new)
242
- @strides = strides.is_a?(Integer) ? [strides, strides] : strides
243
- @padding = padding
244
- @weight_decay = weight_decay
245
- end
246
-
247
- def self.load_hash(hash)
248
- Conv2D.new(hash[:num_filters], hash[:filter_size],
249
- weight_initializer: Util.load_hash(hash[:weight_initializer]),
250
- bias_initializer: Util.load_hash(hash[:bias_initializer]),
251
- strides: hash[:strides],
252
- padding: hash[:padding],
253
- weight_decay: hash[:weight_decay])
254
- end
255
-
256
- def build(model)
257
- super
258
- prev_h, prev_w = prev_layer.shape[0..1]
259
- @out_size = out_size(prev_h, prev_w, *@filter_size, @strides)
260
- out_w, out_h = @out_size
261
- if @padding
262
- @pad = [prev_h - out_h, prev_w - out_w]
263
- @out_size = [prev_h, prev_w]
264
- end
265
- end
266
-
267
- def forward(x)
268
- x = padding(x, @pad) if @padding
269
- @x_shape = x.shape
270
- @col = im2col(x, *@out_size, *@filter_size, @strides)
271
- out = @col.dot(@params[:weight])
272
- out.reshape(x.shape[0], *@out_size, out.shape[3])
273
- end
274
-
275
- def backward(dout)
276
- dout = dout.reshape(dout.shape[0..2].reduce(:*), dout.shape[3])
277
- @grads[:weight] = @col.transpose.dot(dout)
278
- if @weight_decay > 0
279
- dridge = @weight_decay * @params[:weight]
280
- @grads[:weight] += dridge
281
- end
282
- @grads[:bias] = dout.sum(0)
283
- dcol = dout.dot(@params[:weight].transpose)
284
- dx = col2im(dcol, @x_shape, *@out_size, *@filter_size, @strides)
285
- @padding ? back_padding(dx, @pad) : dx
286
- end
287
-
288
- def shape
289
- [*@out_size, @num_filters]
290
- end
291
-
292
- def to_hash
293
- super({num_filters: @num_filters,
294
- filter_size: @filter_size,
295
- weight_initializer: @weight_initializer.to_hash,
296
- bias_initializer: @bias_initializer.to_hash,
297
- strides: @strides,
298
- padding: @padding,
299
- weight_decay: @weight_decay})
300
- end
301
-
302
- private
303
-
304
- def init_params
305
- num_prev_filter = prev_layer.shape[2]
306
- @params[:weight] = SFloat.new(num_prev_filter * @filter_size.reduce(:*), @num_filters)
307
- @params[:bias] = SFloat.new(@num_filters)
308
- @weight_initializer.init_param(self, :weight)
309
- @bias_initializer.init_param(self, :bias)
310
- end
311
- end
312
-
313
-
314
- class MaxPool2D < Layer
315
- include Conv2DModule
316
-
317
- attr_reader :pool_size
318
- attr_reader :strides
319
-
320
- def self.load_hash(hash)
321
- MaxPool2D.new(hash[:pool_size], strides: hash[:strides], padding: hash[:padding])
322
- end
323
-
324
- def initialize(pool_size, strides: nil, padding: false)
325
- super()
326
- @pool_size = pool_size.is_a?(Integer) ? [pool_size, pool_size] : pool_size
327
- @strides = if strides
328
- strides.is_a?(Integer) ? [strides, strides] : strides
329
- else
330
- @pool_size.clone
331
- end
332
- @padding = padding
333
- end
334
-
335
- def build(model)
336
- super
337
- prev_w, prev_h = prev_layer.shape[0..1]
338
- @num_channel = prev_layer.shape[2]
339
- @out_size = out_size(prev_h, prev_w, *@pool_size, @strides)
340
- out_w, out_h = @out_size
341
- if @padding
342
- @pad = [prev_h - out_h, prev_w - out_w]
343
- @out_size = [prev_h, prev_w]
344
- end
345
- end
346
-
347
- def forward(x)
348
- x = padding(x, @pad) if @padding
349
- @x_shape = x.shape
350
- col = im2col(x, *@out_size, *@pool_size, @strides)
351
- col = col.reshape(x.shape[0] * @out_size.reduce(:*) * x.shape[3], @pool_size.reduce(:*))
352
- @max_index = col.max_index(1)
353
- col.max(1).reshape(x.shape[0], *@out_size, x.shape[3])
354
- end
355
-
356
- def backward(dout)
357
- dmax = SFloat.zeros(dout.size * @pool_size.reduce(:*))
358
- dmax[@max_index] = dout.flatten
359
- dcol = dmax.reshape(dout.shape[0..2].reduce(:*), dout.shape[3] * @pool_size.reduce(:*))
360
- dx = col2im(dcol, @x_shape, *@out_size, *@pool_size, @strides)
361
- @padding ? back_padding(dx, @pad) : dx
362
- end
363
-
364
- def shape
365
- [*@out_size, @num_channel]
366
- end
367
-
368
- def to_hash
369
- super({pool_width: @pool_width,
370
- pool_height: @pool_height,
371
- strides: @strides,
372
- padding: @padding})
373
- end
374
- end
375
-
376
-
377
- class UnPool2D < Layer
378
- attr_reader :unpool_size
379
-
380
- def initialize(unpool_size)
381
- super()
382
- @unpool_size = unpool_size.is_a?(Integer) ? [unpool_size, unpool_size] : unpool_size
383
- end
384
-
385
- def self.load_hash(hash)
386
- UnPool2D.new(hash[:unpool_size])
387
- end
388
-
389
- def build(model)
390
- super
391
- prev_h, prev_w = prev_layer.shape[0..1]
392
- unpool_h, unpool_w = @unpool_size
393
- out_h = prev_h * unpool_h
394
- out_w = prev_w * unpool_w
395
- @out_size = [out_h, out_w]
396
- @num_channel = prev_layer.shape[2]
397
- end
398
-
399
- def forward(x)
400
- @x_shape = x.shape
401
- unpool_h, unpool_w = @unpool_size
402
- x2 = SFloat.zeros(x.shape[0], x.shape[1], unpool_h, x.shape[2], unpool_w, @num_channel)
403
- x2[true, true, 0, true, 0, true] = x
404
- x2.reshape(x.shape[0], *@out_size, x.shape[3])
405
- end
406
-
407
- def backward(dout)
408
- unpool_h, unpool_w = @unpool_size
409
- dout = dout.reshape(dout.shape[0], @x_shape[0], unpool_h, @x_shape[1], unpool_w, @num_channel)
410
- dout[true, true, 0, true, 0, true].clone
411
- end
412
-
413
- def shape
414
- [@out_width, @out_height, @num_channel]
415
- end
416
-
417
- def to_hash
418
- super({unpool_size: @unpool_size})
419
- end
420
- end
421
-
422
161
 
423
162
  class Flatten < Layer
424
163
  def forward(x)
@@ -8,13 +8,6 @@ module DNN
8
8
  attr_accessor :layers
9
9
  attr_reader :optimizer
10
10
  attr_reader :batch_size
11
-
12
- def initialize
13
- @layers = []
14
- @optimizer = nil
15
- @batch_size = nil
16
- @compiled = false
17
- end
18
11
 
19
12
  def self.load(file_name)
20
13
  Marshal.load(File.binread(file_name))
@@ -27,6 +20,14 @@ module DNN
27
20
  model.compile(Util.load_hash(hash[:optimizer]))
28
21
  model
29
22
  end
23
+
24
+ def initialize
25
+ @layers = []
26
+ @optimizer = nil
27
+ @batch_size = nil
28
+ @training = false
29
+ @compiled = false
30
+ end
30
31
 
31
32
  def load_json_params(json_str)
32
33
  has_param_layers_params = JSON.parse(json_str, symbolize_names: true)
@@ -128,6 +129,47 @@ module DNN
128
129
  epoch_proc.call(epoch) if epoch_proc
129
130
  end
130
131
  end
132
+
133
+ def train(x, y, epochs,
134
+ batch_size: 1,
135
+ test: nil,
136
+ verbose: true,
137
+ batch_proc: nil,
138
+ &epoch_proc)
139
+ @batch_size = batch_size
140
+ num_train_data = x.shape[0]
141
+ (1..epochs).each do |epoch|
142
+ puts "【 epoch #{epoch}/#{epochs} 】" if verbose
143
+ (num_train_data.to_f / @batch_size).ceil.times do |index|
144
+ x_batch, y_batch = Util.get_minibatch(x, y, @batch_size)
145
+ loss = train_on_batch(x_batch, y_batch, @batch_size, &batch_proc)
146
+ if loss.nan?
147
+ puts "\nloss is nan" if verbose
148
+ return
149
+ end
150
+ num_trained_data = (index + 1) * batch_size
151
+ num_trained_data = num_trained_data > num_train_data ? num_train_data : num_trained_data
152
+ log = "\r"
153
+ 40.times do |i|
154
+ if i < num_trained_data * 40 / num_train_data
155
+ log << "="
156
+ elsif i == num_trained_data * 40 / num_train_data
157
+ log << ">"
158
+ else
159
+ log << "_"
160
+ end
161
+ end
162
+ log << " #{num_trained_data}/#{num_train_data} loss: #{sprintf('%.8f', loss)}"
163
+ print log if verbose
164
+ end
165
+ if verbose && test
166
+ acc = accurate(test[0], test[1], batch_size,&batch_proc)
167
+ print " accurate: #{acc}"
168
+ end
169
+ puts "" if verbose
170
+ epoch_proc.call(epoch) if epoch_proc
171
+ end
172
+ end
131
173
 
132
174
  def train_on_batch(x, y, batch_size, &batch_proc)
133
175
  @batch_size = batch_size
data/lib/dnn/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module DNN
2
- VERSION = "0.4.4"
2
+ VERSION = "0.5.0"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-dnn
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.4
4
+ version: 0.5.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - unagiootoro
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-07-25 00:00:00.000000000 Z
11
+ date: 2018-07-29 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: numo-narray
@@ -106,6 +106,7 @@ files:
106
106
  - examples/xor_example.rb
107
107
  - lib/dnn.rb
108
108
  - lib/dnn/core/activations.rb
109
+ - lib/dnn/core/cnn_layers.rb
109
110
  - lib/dnn/core/error.rb
110
111
  - lib/dnn/core/initializers.rb
111
112
  - lib/dnn/core/layers.rb