ruby-dnn 1.1.1 → 1.1.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 97822807b84847cb2ad475bef5dd65329ae0c699847c577eb8b30a2be0425ec0
4
- data.tar.gz: 6871849ea256e466f4d10e354c5e7698247be8328c17e540e5d4b35adf46ee27
3
+ metadata.gz: 41b7dde05615d65bbc09fec893f67c0874a44aaff4dff8243c9ccdf97f5c9d7a
4
+ data.tar.gz: fea5cb29b787d4cbbba18f34ab2f3f4e6275f8018ebda6c335c9f3564194b109
5
5
  SHA512:
6
- metadata.gz: aa8fb779d8cec6e4acd1ce63a952f626eceae801ee7e16045a890412d8b9189e95d3798c9d840ab07de4afe517ebeac9230391e193b24cedb316927bda4726c9
7
- data.tar.gz: 16aa82f8e009eca027e693adb3fb543046376f11539b881a65ef29dd484b8225b34c17ee3171d72a6a9b2a3712b8463630deac778f1a851830df6ed53b7feca8
6
+ metadata.gz: c18e653840184c9590966b18f36a98a843cf2d1d6ced1159c071ca21839476e4d6f0b6b36bb5319414a50c430cef8a1dd66bedc0ab87d16602896dff6006bc2e
7
+ data.tar.gz: ec7844e9a5bae664e8a5dd88d5684731c2a2bd3149c8131b8cbe606a9e10c30a68f7f8c4904644f931d2616cdc19dc345254934b792df4eb94117ab31c9b06f2
data/README.md CHANGED
@@ -95,6 +95,8 @@ If you want to know more detailed information, please refer to the source code.
95
95
  | Losses | MeanSquaredError, MeanAbsoluteError, Hinge, HuberLoss, SoftmaxCrossEntropy, SigmoidCrossEntropy |
96
96
 
97
97
  ## Datasets
98
+ By setting the environment variable "RUBY_DNN_DOWNLOADS_PATH", you can specify the path to read dataset.
99
+
98
100
  ● Iris
99
101
  ● MNIST
100
102
  ● Fashion-MNIST
@@ -2,8 +2,6 @@ include DNN::Models
2
2
  include DNN::Layers
3
3
 
4
4
  class Generator < Model
5
- attr_reader :generate_images
6
-
7
5
  def initialize(input_shape)
8
6
  super()
9
7
  @input_shape = input_shape
@@ -27,7 +25,6 @@ class Generator < Model
27
25
  @bn7 = BatchNormalization.new
28
26
  @bn8 = BatchNormalization.new
29
27
  @bn9 = BatchNormalization.new
30
- @generate_images = nil
31
28
  end
32
29
 
33
30
  def forward(x)
@@ -75,7 +72,6 @@ class Generator < Model
75
72
 
76
73
  x = @l11.(x)
77
74
  x = Tanh.(x)
78
- @generate_images = x.data
79
75
  x
80
76
  end
81
77
  end
@@ -162,9 +158,9 @@ class DCGAN < Model
162
158
  end
163
159
 
164
160
  def forward(input)
165
- x = @gen.(input)
161
+ images = @gen.(input)
166
162
  @dis.disable_training
167
- x = @dis.([input, x])
168
- x
163
+ out = @dis.([input, images])
164
+ [images, out]
169
165
  end
170
166
  end
@@ -17,28 +17,35 @@ def load_dataset
17
17
  [x_in, x_out]
18
18
  end
19
19
 
20
+ initial_epoch = 1
21
+
20
22
  epochs = 20
21
23
  batch_size = 128
22
24
 
23
- gen = Generator.new([32, 32, 1])
24
- dis = Discriminator.new([32, 32, 1], [32, 32, 3])
25
- dcgan = DCGAN.new(gen, dis)
26
-
27
- gen.setup(Adam.new(alpha: 0.0002, beta1: 0.5), MeanAbsoluteError.new)
28
- dis.setup(Adam.new(alpha: 0.00001, beta1: 0.1), SigmoidCrossEntropy.new)
29
- dcgan.setup(Adam.new(alpha: 0.0002, beta1: 0.5), SigmoidCrossEntropy.new)
25
+ if initial_epoch == 1
26
+ gen = Generator.new([32, 32, 1])
27
+ dis = Discriminator.new([32, 32, 1], [32, 32, 3])
28
+ dcgan = DCGAN.new(gen, dis)
29
+ gen.setup(Adam.new(alpha: 0.0002, beta1: 0.5), MeanAbsoluteError.new)
30
+ dis.setup(Adam.new(alpha: 0.00001, beta1: 0.1), SigmoidCrossEntropy.new)
31
+ dcgan.setup(Adam.new(alpha: 0.0002, beta1: 0.5),
32
+ [MeanAbsoluteError.new, SigmoidCrossEntropy.new], loss_weights: [10, 1])
33
+ else
34
+ dcgan = DCGAN.load("trained/dcgan_model_epoch#{initial_epoch - 1}.marshal")
35
+ gen = dcgan.gen
36
+ dis = dcgan.dis
37
+ end
30
38
 
31
39
  x_in, x_out = load_dataset
32
40
 
33
41
  iter1 = DNN::Iterator.new(x_in, x_out)
34
42
  iter2 = DNN::Iterator.new(x_in, x_out)
35
43
  num_batchs = x_in.shape[0] / batch_size
36
- (1..epochs).each do |epoch|
44
+ (initial_epoch..epochs).each do |epoch|
37
45
  num_batchs.times do |index|
38
46
  x_in, x_out = iter1.next_batch(batch_size)
39
- gen_loss = gen.train_on_batch(x_in, x_out)
40
47
 
41
- images = gen.generate_images
48
+ images = gen.predict(x_in)
42
49
  y_real = Numo::SFloat.ones(batch_size, 1)
43
50
  y_fake = Numo::SFloat.zeros(batch_size, 1)
44
51
  dis.enable_training
@@ -46,9 +53,9 @@ num_batchs = x_in.shape[0] / batch_size
46
53
  dis_loss += dis.train_on_batch([x_in, images], y_fake)
47
54
 
48
55
  x_in, x_out = iter2.next_batch(batch_size)
49
- dcgan_loss = dcgan.train_on_batch(x_in, y_real)
56
+ dcgan_loss = dcgan.train_on_batch(x_in, [x_out, y_real])
50
57
 
51
- puts "epoch: #{epoch}, index: #{index}, gen_loss: #{gen_loss}, dis_loss: #{dis_loss}, dcgan_loss: #{dcgan_loss}"
58
+ puts "epoch: #{epoch}, index: #{index}, dis_loss: #{dis_loss}, dcgan_loss: #{dcgan_loss}"
52
59
  end
53
60
  iter1.reset
54
61
  iter2.reset
@@ -19,11 +19,12 @@ module DNN
19
19
  forward(y, t)
20
20
  end
21
21
 
22
- def loss(y, t, layers = nil)
22
+ def loss(y, t, layers: nil, loss_weight: nil)
23
23
  unless y.shape == t.shape
24
24
  raise DNNShapeError, "The shape of y does not match the t shape. y shape is #{y.shape}, but t shape is #{t.shape}."
25
25
  end
26
26
  loss = call(y, t)
27
+ loss *= loss_weight if loss_weight
27
28
  loss = regularizers_forward(loss, layers) if layers
28
29
  loss
29
30
  end
@@ -109,6 +109,7 @@ module DNN
109
109
  # This class deals with the model of the network.
110
110
  class Model < Chain
111
111
  attr_accessor :optimizer
112
+ attr_accessor :loss_weights
112
113
  attr_reader :last_log
113
114
 
114
115
  # Load marshal model.
@@ -126,6 +127,7 @@ module DNN
126
127
  @optimizer = nil
127
128
  @loss_func = nil
128
129
  @built = false
130
+ @loss_weights = nil
129
131
  @callbacks = []
130
132
  @last_log = {}
131
133
  end
@@ -139,7 +141,8 @@ module DNN
139
141
  # Set optimizer and loss_func to model.
140
142
  # @param [DNN::Optimizers::Optimizer] optimizer Optimizer to use for learning.
141
143
  # @param [DNN::Losses::Loss] loss_func Loss function to use for learning.
142
- def setup(optimizer, loss_func)
144
+ # @param [Array | NilClass] loss_weights Setting loss weights contribution.
145
+ def setup(optimizer, loss_func, loss_weights: nil)
143
146
  unless optimizer.is_a?(Optimizers::Optimizer)
144
147
  raise TypeError, "optimizer:#{optimizer.class} is not an instance of DNN::Optimizers::Optimizer class."
145
148
  end
@@ -148,6 +151,7 @@ module DNN
148
151
  end
149
152
  @optimizer = optimizer
150
153
  self.loss_func = loss_func
154
+ @loss_weights = loss_weights
151
155
  end
152
156
 
153
157
  def loss_func
@@ -178,18 +182,21 @@ module DNN
178
182
  # @param [Array | NilClass] test If you to test the model for every 1 epoch,
179
183
  # specify [x_test, y_test]. Don't test to the model, specify nil.
180
184
  # @param [Boolean] verbose Set true to display the log. If false is set, the log is not displayed.
185
+ # @param [Boolean] accuracy Set true to compute the accuracy.
181
186
  def train(x, y, epochs,
182
187
  batch_size: 1,
183
188
  initial_epoch: 1,
184
189
  test: nil,
185
- verbose: true)
190
+ verbose: true,
191
+ accuracy: true)
186
192
  check_xy_type(x, y)
187
193
  train_iterator = Iterator.new(x, y)
188
194
  train_by_iterator(train_iterator, epochs,
189
195
  batch_size: batch_size,
190
196
  initial_epoch: initial_epoch,
191
197
  test: test,
192
- verbose: verbose)
198
+ verbose: verbose,
199
+ accuracy: accuracy)
193
200
  end
194
201
 
195
202
  alias fit train
@@ -203,11 +210,13 @@ module DNN
203
210
  # @param [Array | NilClass] test If you to test the model for every 1 epoch,
204
211
  # specify [x_test, y_test]. Don't test to the model, specify nil.
205
212
  # @param [Boolean] verbose Set true to display the log. If false is set, the log is not displayed.
213
+ # @param [Boolean] accuracy Set true to compute the accuracy.
206
214
  def train_by_iterator(train_iterator, epochs,
207
215
  batch_size: 1,
208
216
  initial_epoch: 1,
209
217
  test: nil,
210
- verbose: true)
218
+ verbose: true,
219
+ accuracy: true)
211
220
  raise DNNError, "The model is not optimizer setup complete." unless @optimizer
212
221
  raise DNNError, "The model is not loss_func setup complete." unless @loss_func
213
222
 
@@ -242,11 +251,18 @@ module DNN
242
251
 
243
252
  if test
244
253
  acc, loss = if test.is_a?(Array)
245
- evaluate(test[0], test[1], batch_size: batch_size)
254
+ evaluate(test[0], test[1], batch_size: batch_size, accuracy: accuracy)
246
255
  else
247
- evaluate_by_iterator(test, batch_size: batch_size)
256
+ evaluate_by_iterator(test, batch_size: batch_size, accuracy: accuracy)
248
257
  end
249
- print " " + metrics_to_str({ accuracy: acc, test_loss: loss }) if verbose
258
+ if verbose
259
+ metrics = if accuracy
260
+ { accuracy: acc, test_loss: loss }
261
+ else
262
+ { test_loss: loss }
263
+ end
264
+ print " " + metrics_to_str(metrics)
265
+ end
250
266
  end
251
267
  puts "" if verbose
252
268
  call_callbacks(:after_epoch)
@@ -285,17 +301,16 @@ module DNN
285
301
  if output_tensors.is_a?(Array)
286
302
  loss_data = []
287
303
  output_tensors.each.with_index do |out, i|
288
- loss = if i == 0
289
- @loss_func[i].loss(out, Tensor.convert(y[i]), layers)
290
- else
291
- @loss_func[i].loss(out, Tensor.convert(y[i]))
292
- end
304
+ loss_opt = {}
305
+ loss_opt[:layers] = layers if i == 0
306
+ loss_opt[:loss_weight] = @loss_weights[i] if @loss_weights
307
+ loss = @loss_func[i].loss(out, Tensor.convert(y[i]), **loss_opt)
293
308
  loss_data << loss.data.to_f
294
309
  loss.link.backward(Xumo::SFloat.ones(y[i][0...1, false].shape[0], 1))
295
310
  end
296
311
  else
297
312
  out = output_tensors
298
- loss = @loss_func.loss(out, Tensor.convert(y), layers)
313
+ loss = @loss_func.loss(out, Tensor.convert(y), layers: layers)
299
314
  loss_data = loss.data.to_f
300
315
  loss.link.backward(Xumo::SFloat.ones(y[0...1, false].shape[0], 1))
301
316
  end
@@ -310,16 +325,18 @@ module DNN
310
325
  # @param [Numo::SFloat] y Output test data.
311
326
  # @param [Integer] batch_size Batch size used for one test.
312
327
  # @return [Array] Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].
313
- def evaluate(x, y, batch_size: 100)
328
+ # If accuracy is not needed returns in the form [nil, mean_loss].
329
+ def evaluate(x, y, batch_size: 100, accuracy: true)
314
330
  check_xy_type(x, y)
315
- evaluate_by_iterator(Iterator.new(x, y, random: false), batch_size: batch_size)
331
+ evaluate_by_iterator(Iterator.new(x, y, random: false), batch_size: batch_size, accuracy: accuracy)
316
332
  end
317
333
 
318
334
  # Evaluate model by iterator.
319
335
  # @param [DNN::Iterator] test_iterator Iterator used for testing.
320
336
  # @param [Integer] batch_size Batch size used for one test.
321
337
  # @return [Array] Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].
322
- def evaluate_by_iterator(test_iterator, batch_size: 100)
338
+ # If accuracy is not needed returns in the form [nil, mean_loss].
339
+ def evaluate_by_iterator(test_iterator, batch_size: 100, accuracy: true)
323
340
  num_test_datas = test_iterator.num_datas
324
341
  batch_size = batch_size >= num_test_datas ? num_test_datas : batch_size
325
342
  if @loss_func.is_a?(Array)
@@ -331,27 +348,28 @@ module DNN
331
348
  end
332
349
  max_steps = (num_test_datas.to_f / batch_size).ceil
333
350
  test_iterator.foreach(batch_size) do |x_batch, y_batch|
334
- correct, loss_value = test_on_batch(x_batch, y_batch)
351
+ correct, loss_value = test_on_batch(x_batch, y_batch, accuracy: accuracy)
335
352
  if @loss_func.is_a?(Array)
336
353
  @loss_func.each_index do |i|
337
- total_correct[i] += correct[i]
354
+ total_correct[i] += correct[i] if accuracy
338
355
  sum_loss[i] += loss_value[i]
339
356
  end
340
357
  else
341
- total_correct += correct
358
+ total_correct += correct if accuracy
342
359
  sum_loss += loss_value
343
360
  end
344
361
  end
362
+ acc = nil
345
363
  if @loss_func.is_a?(Array)
346
364
  mean_loss = Array.new(@loss_func.length, 0)
347
- acc = Array.new(@loss_func.length, 0)
365
+ acc = Array.new(@loss_func.length, 0) if accuracy
348
366
  @loss_func.each_index do |i|
349
367
  mean_loss[i] += sum_loss[i] / max_steps
350
- acc[i] += total_correct[i].to_f / num_test_datas
368
+ acc[i] += total_correct[i].to_f / num_test_datas if accuracy
351
369
  end
352
370
  else
353
371
  mean_loss = sum_loss / max_steps
354
- acc = total_correct.to_f / num_test_datas
372
+ acc = total_correct.to_f / num_test_datas if accuracy
355
373
  end
356
374
  @last_log[:test_loss] = mean_loss
357
375
  @last_log[:test_accuracy] = acc
@@ -361,22 +379,24 @@ module DNN
361
379
  # Evaluate once.
362
380
  # @param [Numo::SFloat | Array] x Input test data.
363
381
  # @param [Numo::SFloat | Array] y Output test data.
364
- # @return [Array] Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].
365
- def test_on_batch(x, y)
382
+ # @return [Array] Returns the test data accuracy and mean loss in the form [accuracy, loss].
383
+ # If accuracy is not needed returns in the form [nil, loss].
384
+ def test_on_batch(x, y, accuracy: true)
366
385
  call_callbacks(:before_test_on_batch)
367
386
  DNN.learning_phase = false
368
387
  output_tensors = call(Tensor.convert(x))
388
+ correct = nil
369
389
  if output_tensors.is_a?(Array)
370
- correct = []
390
+ correct = [] if accuracy
371
391
  loss_data = []
372
392
  output_tensors.each.with_index do |out, i|
373
- correct << accuracy(out.data, y[i])
393
+ correct << accuracy(out.data, y[i]) if accuracy
374
394
  loss = @loss_func[i].(out, Tensor.convert(y[i]))
375
395
  loss_data << loss.data.to_f
376
396
  end
377
397
  else
378
398
  out = output_tensors
379
- correct = accuracy(out.data, y)
399
+ correct = accuracy(out.data, y) if accuracy
380
400
  loss = @loss_func.(out, Tensor.convert(y))
381
401
  loss_data = loss.data.to_f
382
402
  end
@@ -27,36 +27,51 @@ end
27
27
 
28
28
  class DNNKerasModelConvertError < DNN::DNNError; end
29
29
 
30
+ class DNNKerasLayerNotConvertSupportError < DNNKerasModelConvertError; end
31
+
30
32
  class KerasModelConvertor
31
33
  pyfrom :"keras.models", import: :load_model
32
34
 
33
- def self.k_load_model(k_model_name, k_weights_name)
34
- model = load_model(k_model_name)
35
- model.load_weights(k_weights_name) if k_weights_name
36
- model
35
+ def self.load(k_model_name, k_weights_name = nil)
36
+ k_model = load_model(k_model_name)
37
+ k_model.load_weights(k_weights_name) if k_weights_name
38
+ self.new(k_model)
37
39
  end
38
40
 
39
- def initialize(k_model_name, k_weights_name = nil)
40
- @k_model = KerasModelConvertor.k_load_model(k_model_name, k_weights_name)
41
+ def initialize(k_model)
42
+ @k_model = k_model
41
43
  end
42
44
 
43
- def convert
45
+ def convert(not_support_to_nil: false, debug_message: false)
44
46
  unless @k_model.__class__.__name__ == "Sequential"
45
47
  raise DNNKerasModelConvertError.new("#{@k_model.__class__.__name__} models do not support convert.")
46
48
  end
47
- layers = convert_layers(@k_model.layers)
48
- input_shape = @k_model.layers[0].input_shape.to_a[1..-1]
49
- input_layer = DNN::Layers::InputLayer.new(input_shape)
50
- input_layer.build(input_shape)
51
- layers.unshift(input_layer)
49
+ layers = convert_layers(not_support_to_nil: not_support_to_nil, debug_message: debug_message)
52
50
  dnn_model = DNN::Models::Sequential.new(layers)
53
51
  dnn_model
54
52
  end
55
53
 
56
- def convert_layers(k_layers)
57
- k_layers.map do |k_layer|
58
- layer_convert(k_layer)
54
+ def convert_layers(not_support_to_nil: false, debug_message: false)
55
+ layers = []
56
+ @k_model.layers.each do |k_layer|
57
+ layer = if not_support_to_nil
58
+ begin
59
+ layer_convert(k_layer)
60
+ rescue DNNKerasLayerNotConvertSupportError => e
61
+ nil
62
+ end
63
+ else
64
+ layer_convert(k_layer)
65
+ end
66
+ if layer.is_a?(Array)
67
+ layer.each { |l| puts "Converted #{l.class.name} layer" } if debug_message
68
+ layers += layer
69
+ else
70
+ puts "Converted #{layer.class.name} layer" if debug_message
71
+ layers << layer
72
+ end
59
73
  end
74
+ layers
60
75
  end
61
76
 
62
77
  private
@@ -67,7 +82,7 @@ class KerasModelConvertor
67
82
  if respond_to?(method_name, true)
68
83
  send(method_name, k_layer)
69
84
  else
70
- raise DNNKerasModelConvertError.new("#{k_layer_name} layer do not support convert.")
85
+ raise DNNKerasLayerNotConvertSupportError.new("#{k_layer_name} layer do not support convert.")
71
86
  end
72
87
  end
73
88
 
@@ -82,18 +97,34 @@ class KerasModelConvertor
82
97
  dnn_layer.build(input_shape)
83
98
  end
84
99
 
100
+ def convert_InputLayer(k_input_layer)
101
+ input_shape, output_shape = get_k_layer_shape(k_input_layer)
102
+ input_layer = DNN::Layers::InputLayer.new(input_shape)
103
+ input_layer.build(input_shape)
104
+ input_layer
105
+ end
106
+
85
107
  def convert_Dense(k_dense)
86
108
  input_shape, output_shape = get_k_layer_shape(k_dense)
87
109
  dense = DNN::Layers::Dense.new(output_shape[0])
88
110
  dense.build(input_shape)
89
111
  dense.weight.data = Numpy.to_na(k_dense.get_weights[0])
90
112
  dense.bias.data = Numpy.to_na(k_dense.get_weights[1])
91
- dense
113
+ returns = [dense]
114
+ unless k_dense.get_config[:activation] == "linear"
115
+ returns << activation_to_dnn_layer(k_dense.get_config[:activation], output_shape)
116
+ end
117
+ returns
92
118
  end
93
119
 
94
120
  def convert_Activation(k_activation)
121
+ input_shape, output_shape = get_k_layer_shape(k_activation)
95
122
  activation_name = k_activation.get_config[:activation].to_s
96
- activation = case k_activation.get_config[:activation].to_s
123
+ activation_to_dnn_layer(activation_name, input_shape)
124
+ end
125
+
126
+ def activation_to_dnn_layer(activation_name, shape)
127
+ activation = case activation_name
97
128
  when "sigmoid"
98
129
  DNN::Layers::Sigmoid.new
99
130
  when "tanh"
@@ -105,7 +136,7 @@ class KerasModelConvertor
105
136
  else
106
137
  raise DNNKerasModelConvertError.new("#{activation_name} activation do not support convert.")
107
138
  end
108
- build_dnn_layer(k_activation, activation)
139
+ activation.build(shape)
109
140
  activation
110
141
  end
111
142
 
@@ -136,19 +167,29 @@ class KerasModelConvertor
136
167
  build_dnn_layer(k_conv2d, conv2d)
137
168
  conv2d.filters = Numpy.to_na(k_conv2d.get_weights[0])
138
169
  conv2d.bias.data = Numpy.to_na(k_conv2d.get_weights[1])
139
- conv2d
140
- end
141
-
142
- def convert_Conv2DTranspose(k_conv2d)
143
- padding = k_conv2d.get_config[:padding].to_s == "same" ? true : false
144
- filter_size = k_conv2d.get_config[:kernel_size].to_a
145
- strides = k_conv2d.get_config[:strides].to_a
146
- num_filters = k_conv2d.get_config[:filters]
147
- conv2d = DNN::Layers::Conv2DTranspose.new(num_filters, filter_size, padding: padding, strides: strides)
148
- build_dnn_layer(k_conv2d, conv2d)
149
- conv2d.filters = Numpy.to_na(k_conv2d.get_weights[0])
150
- conv2d.bias.data = Numpy.to_na(k_conv2d.get_weights[1])
151
- conv2d
170
+ returns = [conv2d]
171
+ unless k_conv2d.get_config[:activation] == "linear"
172
+ input_shape, output_shape = get_k_layer_shape(k_conv2d)
173
+ returns << activation_to_dnn_layer(k_conv2d.get_config[:activation], output_shape)
174
+ end
175
+ returns
176
+ end
177
+
178
+ def convert_Conv2DTranspose(k_conv2d_t)
179
+ padding = k_conv2d_t.get_config[:padding].to_s == "same" ? true : false
180
+ filter_size = k_conv2d_t.get_config[:kernel_size].to_a
181
+ strides = k_conv2d_t.get_config[:strides].to_a
182
+ num_filters = k_conv2d_t.get_config[:filters]
183
+ conv2d_t = DNN::Layers::Conv2DTranspose.new(num_filters, filter_size, padding: padding, strides: strides)
184
+ build_dnn_layer(k_conv2d_t, conv2d_t)
185
+ conv2d_t.filters = Numpy.to_na(k_conv2d_t.get_weights[0])
186
+ conv2d_t.bias.data = Numpy.to_na(k_conv2d_t.get_weights[1])
187
+ returns = [conv2d_t]
188
+ unless conv2d_t.get_config[:activation] == "linear"
189
+ input_shape, output_shape = get_k_layer_shape(k_conv2d)
190
+ returns << activation_to_dnn_layer(conv2d_t.get_config[:activation], output_shape)
191
+ end
192
+ returns
152
193
  end
153
194
 
154
195
  def convert_MaxPooling2D(k_max_pool2d)
@@ -160,6 +201,24 @@ class KerasModelConvertor
160
201
  max_pool2d
161
202
  end
162
203
 
204
+ def convert_AveragePooling2D(k_avg_pool2d)
205
+ padding = k_avg_pool2d.get_config[:padding].to_s == "same" ? true : false
206
+ pool_size = k_avg_pool2d.get_config[:pool_size].to_a
207
+ strides = k_avg_pool2d.get_config[:strides].to_a
208
+ avg_pool2d = DNN::Layers::AvgPool2D.new(pool_size, padding: padding, strides: strides)
209
+ build_dnn_layer(k_avg_pool2d, avg_pool2d)
210
+ avg_pool2d
211
+ end
212
+
213
+ def convert_GlobalAveragePooling2D(k_glb_avg_pool2d)
214
+ padding = k_glb_avg_pool2d.get_config[:padding].to_s == "same" ? true : false
215
+ pool_size = k_glb_avg_pool2d.get_config[:pool_size].to_a
216
+ strides = k_glb_avg_pool2d.get_config[:strides].to_a
217
+ glb_avg_pool2d = DNN::Layers::GlobalAvgPool2D.new
218
+ build_dnn_layer(k_glb_avg_pool2d, glb_avg_pool2d)
219
+ glb_avg_pool2d
220
+ end
221
+
163
222
  def convert_UpSampling2D(k_upsampling2d)
164
223
  input_shape, output_shape = get_k_layer_shape(k_upsampling2d)
165
224
  unpool_size = k_upsampling2d.get_config[:size].to_a
@@ -1,3 +1,3 @@
1
1
  module DNN
2
- VERSION = "1.1.1"
2
+ VERSION = "1.1.2"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-dnn
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.1.1
4
+ version: 1.1.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - unagiootoro
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2020-02-09 00:00:00.000000000 Z
11
+ date: 2020-02-15 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: numo-narray