ruby-dnn 1.0.0 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0db9ac3047ba8c15d903ace901f5e4e332835d11dffca2f441664ae843049d1d
4
- data.tar.gz: f1b4bf61da8a48b8ad483eb806ab443bb40f1b0d88573c2d901ae45299abf86d
3
+ metadata.gz: 62482330aab914fc53313fa7a1170a53ea3644e72dfe1cecb2fffca1160f6f7e
4
+ data.tar.gz: e21d0e28ba2c603179ece6fe9cb2c4722ea8bbaa85c72cc357f8b5fa691e33d0
5
5
  SHA512:
6
- metadata.gz: 880fe0688bb5b15c016fdddb15b18f5e0b3ba2a45ae36292182adf8def20d93ca3ae176747dbf3d1369ea28cdaaf23e7cd9e96d0a0c6c4bb92db27131d8f4d93
7
- data.tar.gz: 4d00dc6831f0c82e0dc1b4128d98dc391456410cbf26e0d0192b19092333664774eccabd2db0a609b739c1c40664e148dcd69152cdc45394690e21d785e1acc0
6
+ metadata.gz: 5f7a34efbba1465aa4e245b83928bbbe764d67708e9e6c3faef7030b13873dc27f775adbf8b32b0f3015280f1b3bbb600a1e4c4bff9c1a5be40387217efc2e26
7
+ data.tar.gz: 9bba32d0b4142df90ab07b75191bb7590fa000f38470f9d9149f4cd95c346f5ba90fbf4549d714dee5374d1024184cf45eb61bcd1ba3e10e1ab0aa5eda398011
data/Rakefile CHANGED
@@ -1,5 +1,7 @@
1
1
  require "bundler/gem_tasks"
2
2
  require "rake/testtask"
3
+ require "yard"
4
+ require "yard/rake/yardoc_task"
3
5
 
4
6
  Rake::TestTask.new(:test) do |t|
5
7
  t.libs << "test"
@@ -18,10 +20,12 @@ end
18
20
 
19
21
  task :default => [:test, :build_rb_stb_image]
20
22
 
21
- task :doc do
22
- src_list = Dir["lib/dnn.rb"]
23
- src_list += Dir["lib/dnn/core/*.rb"]
24
- src_list += Dir["lib/dnn/core/layers/*.rb"]
25
- src_list += Dir["lib/dnn/*.rb"]
26
- sh "yardoc #{src_list.join(' ')}"
23
+ YARD::Rake::YardocTask.new do |t|
24
+ t.files = [
25
+ "lib/dnn.rb",
26
+ "lib/dnn/core/*.rb",
27
+ "lib/dnn/core/layers/*.rb",
28
+ "lib/dnn/*.rb",
29
+ "lib/dnn/datasets/*.rb",
30
+ ]
27
31
  end
@@ -7,6 +7,7 @@ module DNN
7
7
  prev = (input.is_a?(Tensor) ? input.link : input)
8
8
  y = forward_node(x)
9
9
  link = Link.new(prev, self)
10
+ prev.next = link if prev.is_a?(Link)
10
11
  Tensor.new(y, link)
11
12
  end
12
13
 
@@ -77,6 +78,10 @@ module DNN
77
78
  @input_shape
78
79
  end
79
80
 
81
+ def <<(tensor)
82
+ self.(tensor)
83
+ end
84
+
80
85
  # Layer to a hash.
81
86
  def to_hash(merge_hash = nil)
82
87
  hash = { class: self.class.name }
@@ -154,20 +159,6 @@ module DNN
154
159
  method(:call).to_proc
155
160
  end
156
161
 
157
- def >>(layer)
158
- if RUBY_VERSION < "2.6.0"
159
- raise DNNError, "Function composition is not supported before ruby version 2.6.0."
160
- end
161
- to_proc >> layer
162
- end
163
-
164
- def <<(layer)
165
- if RUBY_VERSION < "2.6.0"
166
- raise DNNError, "Function composition is not supported before ruby version 2.6.0."
167
- end
168
- to_proc << layer
169
- end
170
-
171
162
  def to_hash
172
163
  super(input_shape: @input_shape)
173
164
  end
@@ -8,6 +8,7 @@ module DNN
8
8
  attr_reader :weight
9
9
  attr_reader :weight_initializer
10
10
  attr_reader :weight_regularizer
11
+ attr_reader :mask_zero
11
12
 
12
13
  # @param [Integer | Array] input_dim_or_shape Set input data dimension or shape.
13
14
  # @param [Integer] input_length Set the time series length of input data.
@@ -15,13 +16,15 @@ module DNN
15
16
  # @param [DNN::Regularizers::Regularizer | NilClass] weight_regularizer Weight regularizer.
16
17
  def initialize(input_dim_or_shape, input_length,
17
18
  weight_initializer: Initializers::RandomUniform.new,
18
- weight_regularizer: nil)
19
+ weight_regularizer: nil,
20
+ mask_zero: false)
19
21
  super()
20
22
  @input_shape = input_dim_or_shape.is_a?(Array) ? input_dim_or_shape : [input_dim_or_shape]
21
23
  @input_length = input_length
22
24
  @weight_initializer = weight_initializer
23
25
  @weight_regularizer = weight_regularizer
24
26
  @weight = Param.new(nil, Xumo::SFloat[0])
27
+ @mask_zero = mask_zero
25
28
  end
26
29
 
27
30
  def build(input_shape)
@@ -35,7 +38,14 @@ module DNN
35
38
  @x = x
36
39
  y = Xumo::SFloat.zeros(*x.shape)
37
40
  x.shape[0].times do |i|
38
- y[i, false] = @weight.data[x[i, false]]
41
+ if @mask_zero
42
+ x.shape[1].times do |j|
43
+ index = x[i, j]
44
+ y[i, j] = index == 0 ? 0 : @weight.data[index]
45
+ end
46
+ else
47
+ y[i, false] = @weight.data[x[i, false]]
48
+ end
39
49
  end
40
50
  y
41
51
  end
@@ -44,7 +54,12 @@ module DNN
44
54
  @weight.grad += Xumo::SFloat.zeros(*@weight.data.shape)
45
55
  @x.shape[0].times do |i|
46
56
  @x.shape[1].times do |j|
47
- @weight.grad[@x[i, j]] += dy[i, j]
57
+ index = @x[i, j]
58
+ if @mask_zero
59
+ @weight.grad[index] += dy[i, j] unless index == 0
60
+ else
61
+ @weight.grad[index] += dy[i, j]
62
+ end
48
63
  end
49
64
  end
50
65
  nil
@@ -56,13 +71,15 @@ module DNN
56
71
 
57
72
  def to_hash
58
73
  super(input_shape: @input_shape, input_length: @input_length,
59
- weight_initializer: @weight_initializer.to_hash, weight_regularizer: @weight_regularizer&.to_hash)
74
+ weight_initializer: @weight_initializer.to_hash, weight_regularizer: @weight_regularizer&.to_hash,
75
+ mask_zero: @mask_zero)
60
76
  end
61
77
 
62
78
  def load_hash(hash)
63
79
  initialize(hash[:input_shape], hash[:input_length],
64
80
  weight_initializer: Initializers::Initializer.from_hash(hash[:weight_initializer]),
65
- weight_regularizer: Regularizers::Regularizer.from_hash(hash[:weight_regularizer]))
81
+ weight_regularizer: Regularizers::Regularizer.from_hash(hash[:weight_regularizer]),
82
+ mask_zero: hash[:mask_zero])
66
83
  end
67
84
 
68
85
  def get_params
data/lib/dnn/core/link.rb CHANGED
@@ -1,14 +1,21 @@
1
1
  module DNN
2
2
  class Link
3
3
  attr_accessor :prev
4
+ attr_accessor :next
4
5
  attr_accessor :layer_node
5
6
 
6
7
  def initialize(prev = nil, layer_node = nil)
7
8
  @prev = prev
8
9
  @layer_node = layer_node
10
+ @next = nil
9
11
  end
10
12
 
11
- def backward(dy = Numo::SFloat[1])
13
+ def forward(x)
14
+ x = @layer_node.(x)
15
+ @next ? @next.forward(x) : x
16
+ end
17
+
18
+ def backward(dy = Xumo::SFloat[1])
12
19
  dy = @layer_node.backward_node(dy)
13
20
  @prev&.backward(dy)
14
21
  end
@@ -17,15 +24,26 @@ module DNN
17
24
  class TwoInputLink
18
25
  attr_accessor :prev1
19
26
  attr_accessor :prev2
27
+ attr_accessor :next
20
28
  attr_accessor :layer_node
21
29
 
22
30
  def initialize(prev1 = nil, prev2 = nil, layer_node = nil)
23
31
  @prev1 = prev1
24
32
  @prev2 = prev2
25
33
  @layer_node = layer_node
34
+ @next = nil
35
+ @hold = []
36
+ end
37
+
38
+ def forward(x)
39
+ @hold << x
40
+ return if @hold.length < 2
41
+ x = @layer_node.(*@hold)
42
+ @hold = []
43
+ @next ? @next.forward(x) : x
26
44
  end
27
45
 
28
- def backward(dy = Numo::SFloat[1])
46
+ def backward(dy = Xumo::SFloat[1])
29
47
  dys = @layer_node.backward_node(dy)
30
48
  if dys.is_a?(Array)
31
49
  dy1, dy2 = *dys
@@ -72,12 +72,43 @@ module DNN
72
72
  end
73
73
  @layers_cache = layers_array
74
74
  end
75
+
76
+ def to_hash
77
+ layers_hash = { class: self.class.name }
78
+ instance_variables.sort.each do |ivar|
79
+ obj = instance_variable_get(ivar)
80
+ if obj.is_a?(Layers::Layer) || obj.is_a?(Chain)
81
+ layers_hash[ivar] = obj.to_hash
82
+ elsif obj.is_a?(LayersList)
83
+ layers_hash[ivar] = obj.to_hash_list
84
+ end
85
+ end
86
+ layers_hash
87
+ end
88
+
89
+ def load_hash(layers_hash)
90
+ instance_variables.sort.each do |ivar|
91
+ hash_or_array = layers_hash[ivar]
92
+ if hash_or_array.is_a?(Array)
93
+ instance_variable_set(ivar, LayersList.from_hash_list(hash_or_array))
94
+ elsif hash_or_array.is_a?(Hash)
95
+ obj_class = DNN.const_get(hash_or_array[:class])
96
+ obj = obj_class.allocate
97
+ if obj.is_a?(Chain)
98
+ obj = obj_class.new
99
+ obj.load_hash(hash_or_array)
100
+ instance_variable_set(ivar, obj)
101
+ else
102
+ instance_variable_set(ivar, Layers::Layer.from_hash(hash_or_array))
103
+ end
104
+ end
105
+ end
106
+ end
75
107
  end
76
108
 
77
109
  # This class deals with the model of the network.
78
110
  class Model < Chain
79
111
  attr_accessor :optimizer
80
- attr_accessor :loss_func
81
112
  attr_reader :last_log
82
113
 
83
114
  # Load marshal model.
@@ -100,9 +131,9 @@ module DNN
100
131
  end
101
132
 
102
133
  def call(input_tensors)
103
- output_tensor = forward(input_tensors)
134
+ output_tensors = forward(input_tensors)
104
135
  @built = true unless @built
105
- output_tensor
136
+ output_tensors
106
137
  end
107
138
 
108
139
  # Set optimizer and loss_func to model.
@@ -112,11 +143,29 @@ module DNN
112
143
  unless optimizer.is_a?(Optimizers::Optimizer)
113
144
  raise TypeError, "optimizer:#{optimizer.class} is not an instance of DNN::Optimizers::Optimizer class."
114
145
  end
115
- unless loss_func.is_a?(Losses::Loss)
116
- raise TypeError, "loss_func:#{loss_func.class} is not an instance of DNN::Losses::Loss class."
146
+ unless loss_func.is_a?(Losses::Loss) || loss_func.is_a?(Array)
147
+ raise TypeError, "loss_func:#{loss_func.class} is not an instance of DNN::Losses::Loss or Array class."
117
148
  end
118
149
  @optimizer = optimizer
119
- @loss_func = loss_func
150
+ self.loss_func = loss_func
151
+ end
152
+
153
+ def loss_func
154
+ @loss_func
155
+ end
156
+
157
+ def loss_func=(lfs)
158
+ if lfs.is_a?(Array)
159
+ @loss_func = []
160
+ lfs.each.with_index do |lf, i|
161
+ unless lf.is_a?(Losses::Loss)
162
+ raise TypeError, "loss_func[#{i}]:#{lf.class} is not an instance of DNN::Losses::Loss class."
163
+ end
164
+ @loss_func << lf
165
+ end
166
+ else
167
+ @loss_func = lfs
168
+ end
120
169
  end
121
170
 
122
171
  # Start training.
@@ -232,13 +281,28 @@ module DNN
232
281
  check_xy_type(x, y)
233
282
  call_callbacks(:before_train_on_batch)
234
283
  DNN.learning_phase = true
235
- out = call(Tensor.convert(x))
236
- loss = @loss_func.loss(out, Tensor.convert(y), layers)
237
- loss.link.backward(Xumo::SFloat.ones(y[0...1, false].shape[0], 1))
284
+ output_tensors = call(Tensor.convert(x))
285
+ if output_tensors.is_a?(Array)
286
+ loss_data = []
287
+ output_tensors.each.with_index do |out, i|
288
+ loss = if i == 0
289
+ @loss_func[i].loss(out, Tensor.convert(y[i]), layers)
290
+ else
291
+ @loss_func[i].loss(out, Tensor.convert(y[i]))
292
+ end
293
+ loss_data << loss.data.to_f
294
+ loss.link.backward(Xumo::SFloat.ones(y[i][0...1, false].shape[0], 1))
295
+ end
296
+ else
297
+ out = output_tensors
298
+ loss = @loss_func.loss(out, Tensor.convert(y), layers)
299
+ loss_data = loss.data.to_f
300
+ loss.link.backward(Xumo::SFloat.ones(y[0...1, false].shape[0], 1))
301
+ end
238
302
  @optimizer.update(get_all_trainable_params)
239
- @last_log[:train_loss] = loss.data
303
+ @last_log[:train_loss] = loss_data
240
304
  call_callbacks(:after_train_on_batch)
241
- loss.data
305
+ loss_data
242
306
  end
243
307
 
244
308
  # Evaluate model and get accuracy and loss of test data.
@@ -258,33 +322,66 @@ module DNN
258
322
  def evaluate_by_iterator(test_iterator, batch_size: 100)
259
323
  num_test_datas = test_iterator.num_datas
260
324
  batch_size = batch_size >= num_test_datas ? num_test_datas : batch_size
261
- total_correct = 0
262
- sum_loss = 0
325
+ if @loss_func.is_a?(Array)
326
+ total_correct = Array.new(@loss_func.length, 0)
327
+ sum_loss = Array.new(@loss_func.length, 0)
328
+ else
329
+ total_correct = 0
330
+ sum_loss = 0
331
+ end
263
332
  max_steps = (num_test_datas.to_f / batch_size).ceil
264
333
  test_iterator.foreach(batch_size) do |x_batch, y_batch|
265
334
  correct, loss_value = test_on_batch(x_batch, y_batch)
266
- total_correct += correct
267
- sum_loss += loss_value
335
+ if @loss_func.is_a?(Array)
336
+ @loss_func.each_index do |i|
337
+ total_correct[i] += correct[i]
338
+ sum_loss[i] += loss_value[i]
339
+ end
340
+ else
341
+ total_correct += correct
342
+ sum_loss += loss_value
343
+ end
344
+ end
345
+ if @loss_func.is_a?(Array)
346
+ mean_loss = Array.new(@loss_func.length, 0)
347
+ acc = Array.new(@loss_func.length, 0)
348
+ @loss_func.each_index do |i|
349
+ mean_loss[i] += sum_loss[i] / max_steps
350
+ acc[i] += total_correct[i].to_f / num_test_datas
351
+ end
352
+ else
353
+ mean_loss = sum_loss / max_steps
354
+ acc = total_correct.to_f / num_test_datas
268
355
  end
269
- mean_loss = sum_loss / max_steps
270
- acc = total_correct.to_f / num_test_datas
271
356
  @last_log[:test_loss] = mean_loss
272
357
  @last_log[:test_accuracy] = acc
273
358
  [acc, mean_loss]
274
359
  end
275
360
 
276
361
  # Evaluate once.
277
- # @param [Numo::SFloat] x Input test data.
278
- # @param [Numo::SFloat] y Output test data.
362
+ # @param [Numo::SFloat | Array] x Input test data.
363
+ # @param [Numo::SFloat | Array] y Output test data.
279
364
  # @return [Array] Returns the test data accuracy and mean loss in the form [accuracy, mean_loss].
280
365
  def test_on_batch(x, y)
281
366
  call_callbacks(:before_test_on_batch)
282
367
  DNN.learning_phase = false
283
- out = call(Tensor.convert(x))
284
- correct = accuracy(out.data, y)
285
- loss = @loss_func.(out, Tensor.convert(y))
368
+ output_tensors = call(Tensor.convert(x))
369
+ if output_tensors.is_a?(Array)
370
+ correct = []
371
+ loss_data = []
372
+ output_tensors.each.with_index do |out, i|
373
+ correct << accuracy(out.data, y[i])
374
+ loss = @loss_func[i].(out, Tensor.convert(y[i]))
375
+ loss_data << loss.data.to_f
376
+ end
377
+ else
378
+ out = output_tensors
379
+ correct = accuracy(out.data, y)
380
+ loss = @loss_func.(out, Tensor.convert(y))
381
+ loss_data = loss.data.to_f
382
+ end
286
383
  call_callbacks(:after_test_on_batch)
287
- [correct, loss.data]
384
+ [correct, loss_data]
288
385
  end
289
386
 
290
387
  # Implement the process to accuracy this model.
@@ -313,12 +410,23 @@ module DNN
313
410
  def predict(x, use_loss_activation: true)
314
411
  check_xy_type(x)
315
412
  DNN.learning_phase = false
316
- out = call(Tensor.convert(x))
317
- y = out.data
318
- if use_loss_activation && @loss_func.class.respond_to?(:activation)
319
- y = @loss_func.class.activation(y)
413
+ output_tensors = call(Tensor.convert(x))
414
+ if output_tensors.is_a?(Array)
415
+ lfs = @loss_func
416
+ ary_output_tensors = output_tensors
417
+ else
418
+ lfs = [@loss_func]
419
+ ary_output_tensors = [output_tensors]
420
+ end
421
+ ys = []
422
+ ary_output_tensors.each.with_index do |out, i|
423
+ y = out.data
424
+ if use_loss_activation && lfs[i].class.respond_to?(:activation)
425
+ y = lfs[i].class.activation(y)
426
+ end
427
+ ys << y
320
428
  end
321
- y
429
+ output_tensors.is_a?(Array) ? ys : ys.first
322
430
  end
323
431
 
324
432
  # Predict one data.
@@ -433,16 +541,38 @@ module DNN
433
541
  end
434
542
 
435
543
  def metrics_to_str(mertics)
436
- mertics.map { |key, num| "#{key}: #{sprintf('%.4f', num)}" }.join(", ")
544
+ mertics.map { |key, values|
545
+ str_values = if values.is_a?(Array)
546
+ values_fmt = values.map { |v| sprintf('%.4f', v) }
547
+ "[#{values_fmt.join(", ")}]"
548
+ else
549
+ sprintf('%.4f', values)
550
+ end
551
+ "#{key}: #{str_values}"
552
+ }.join(", ")
437
553
  end
438
554
 
439
555
  def check_xy_type(x, y = nil)
440
556
  if !x.is_a?(Xumo::SFloat) && !x.is_a?(Array)
441
557
  raise TypeError, "x:#{x.class.name} is not an instance of #{Xumo::SFloat.name} class or Array class."
442
558
  end
559
+ if x.is_a?(Array)
560
+ x.each.with_index do |v, i|
561
+ unless v.is_a?(Xumo::SFloat)
562
+ raise TypeError, "x[#{i}]:#{v.class.name} is not an instance of #{Xumo::SFloat.name} class."
563
+ end
564
+ end
565
+ end
443
566
  if y && !y.is_a?(Xumo::SFloat) && !y.is_a?(Array)
444
567
  raise TypeError, "y:#{y.class.name} is not an instance of #{Xumo::SFloat.name} class or Array class."
445
568
  end
569
+ if y.is_a?(Array)
570
+ y.each.with_index do |v, i|
571
+ unless v.is_a?(Xumo::SFloat)
572
+ raise TypeError, "x[#{i}]:#{v.class.name} is not an instance of #{Xumo::SFloat.name} class."
573
+ end
574
+ end
575
+ end
446
576
  end
447
577
  end
448
578
 
@@ -459,14 +589,14 @@ module DNN
459
589
  end
460
590
 
461
591
  # Add layer to the model.
462
- # @param [DNN::Layers::Layer] layer Layer to add to the model.
592
+ # @param [DNN::Layers::Layer | DNN::Models::Chain] layer Layer or Chain to add to the model.
463
593
  # @return [DNN::Models::Model] Return self.
464
594
  def add(layer)
465
595
  if layer.is_a?(Layers::MergeLayer)
466
596
  raise TypeError, "layer: #{layer.class.name} should not be a DNN::Layers::MergeLayer class."
467
597
  end
468
- unless layer.is_a?(Layers::Layer) || layer.is_a?(Model)
469
- raise TypeError, "layer: #{layer.class.name} is not an instance of the DNN::Layers::Layer class or DNN::Models::Model class."
598
+ unless layer.is_a?(Layers::Layer) || layer.is_a?(Chain)
599
+ raise TypeError, "layer: #{layer.class.name} is not an instance of the DNN::Layers::Layer class or DNN::Models::Chain class."
470
600
  end
471
601
  @stack << layer
472
602
  self
@@ -475,20 +605,20 @@ module DNN
475
605
  alias << add
476
606
 
477
607
  # Insert layer to the model by index position.
478
- # @param [DNN::Layers::Layer] layer Layer to add to the model.
608
+ # @param [DNN::Layers::Layer | DNN::Models::Chain] layer Layer or Chain to add to the model.
479
609
  # @return [DNN::Models::Model] Return self.
480
610
  def insert(index, layer)
481
611
  if layer.is_a?(Layers::MergeLayer)
482
612
  raise TypeError, "layer: #{layer.class.name} should not be a DNN::Layers::MergeLayer class."
483
613
  end
484
- unless layer.is_a?(Layers::Layer) || layer.is_a?(Model)
485
- raise TypeError, "layer: #{layer.class.name} is not an instance of the DNN::Layers::Layer class or DNN::Models::Model class."
614
+ unless layer.is_a?(Layers::Layer) || layer.is_a?(Chain)
615
+ raise TypeError, "layer: #{layer.class.name} is not an instance of the DNN::Layers::Layer class or DNN::Models::Chain class."
486
616
  end
487
617
  @stack.insert(index, layer)
488
618
  end
489
619
 
490
620
  # Remove layer to the model.
491
- # @param [DNN::Layers::Layer] layer Layer to remove to the model.
621
+ # @param [DNN::Layers::Layer | DNN::Models::Chain] layer Layer to remove to the model.
492
622
  # @return [Boolean] Return true if success for remove layer.
493
623
  def remove(layer)
494
624
  @stack.delete(layer) ? true : false
@@ -502,5 +632,40 @@ module DNN
502
632
  end
503
633
  end
504
634
 
635
+ class FixedModel < Model
636
+ attr_reader :layers
637
+
638
+ def initialize(output_tensor, layers)
639
+ super()
640
+ @input_link = get_input_link(output_tensor.link)
641
+ @layers = layers
642
+ end
643
+
644
+ def forward(input_tensors)
645
+ if input_tensors.is_a?(Array)
646
+ input_tensors.each do |tensor|
647
+ @input_link.forward(tensor)
648
+ end
649
+ else
650
+ @input_link.forward(input_tensors)
651
+ end
652
+ end
653
+
654
+ private
655
+
656
+ def get_input_link(last_link)
657
+ get_input_link = -> link do
658
+ if link.is_a?(Link)
659
+ return link unless link.prev
660
+ get_input_link.(link.prev)
661
+ else
662
+ return link unless link.prev1
663
+ get_input_link.(link.prev1)
664
+ end
665
+ end
666
+ get_input_link.(last_link)
667
+ end
668
+ end
669
+
505
670
  end
506
671
  end
@@ -97,7 +97,7 @@ module DNN
97
97
  if @include_model
98
98
  @model.clean_layers
99
99
  data = {
100
- version: VERSION, class: @model.class.name, input_shape: @model.layers.first.input_shape,
100
+ version: VERSION, class: @model.class.name,
101
101
  params: params_data, model: @model
102
102
  }
103
103
  else
@@ -16,6 +16,10 @@ module DNN
16
16
  @link = link
17
17
  end
18
18
 
19
+ def >>(layer)
20
+ layer.(self)
21
+ end
22
+
19
23
  def shape
20
24
  @data.shape
21
25
  end
@@ -0,0 +1,170 @@
1
+ # This library is not yet complete.
2
+
3
+ # This library converts keras models to ruby-dnn models.
4
+ # Use of the library requires the installation of PyCall.
5
+
6
+ require "pycall/import"
7
+ require "numpy"
8
+ require_relative "numo2numpy"
9
+
10
+ include PyCall::Import
11
+
12
+ pyimport :numpy, as: :np
13
+ pyimport :keras
14
+ pyfrom :"keras.models", import: :Sequential
15
+ pyfrom :"keras.layers", import: [:Dense, :Dropout, :Conv2D, :Activation, :MaxPooling2D, :Flatten]
16
+ pyfrom :"keras.layers.normalization", import: :BatchNormalization
17
+
18
+ class DNNKerasModelConvertError < DNN::DNNError; end
19
+
20
+ class KerasModelConvertor
21
+ pyfrom :"keras.models", import: :load_model
22
+
23
+ def self.k_load_model(k_model_name, k_weights_name)
24
+ model = load_model(k_model_name)
25
+ model.load_weights(k_weights_name) if k_weights_name
26
+ model
27
+ end
28
+
29
+ def initialize(k_model_name, k_weights_name = nil)
30
+ @k_model = KerasModelConvertor.k_load_model(k_model_name, k_weights_name)
31
+ end
32
+
33
+ def convert
34
+ unless @k_model.__class__.__name__ == "Sequential"
35
+ raise DNNKerasModelConvertError.new("#{@k_model.__class__.__name__} models do not support convert.")
36
+ end
37
+ dnn_model = DNN::Models::Sequential.new
38
+ @k_model.layers.each do |k_layer|
39
+ dnn_layer = layer_convert(k_layer)
40
+ dnn_model << dnn_layer if dnn_layer
41
+ end
42
+ input_shape = @k_model.layers[0].input_shape.to_a[1..-1]
43
+ input_layer = DNN::Layers::InputLayer.new(input_shape)
44
+ input_layer.build(input_shape)
45
+ dnn_model.insert(0, input_layer)
46
+ dnn_model
47
+ end
48
+
49
+ private
50
+
51
+ def layer_convert(k_layer)
52
+ k_layer_name = k_layer.__class__.__name__
53
+ method_name = "convert_" + k_layer_name
54
+ if respond_to?(method_name, true)
55
+ send(method_name, k_layer)
56
+ else
57
+ raise DNNKerasModelConvertError.new("#{k_layer_name} layer do not support convert.")
58
+ end
59
+ end
60
+
61
+ def get_k_layer_shape(k_layer)
62
+ input_shape = k_layer.input_shape.to_a[1..-1]
63
+ output_shape = k_layer.output_shape.to_a[1..-1]
64
+ [input_shape, output_shape]
65
+ end
66
+
67
+ def build_dnn_layer(k_layer, dnn_layer)
68
+ input_shape, output_shape = get_k_layer_shape(k_layer)
69
+ dnn_layer.build(input_shape)
70
+ end
71
+
72
+ def convert_Dense(k_dense)
73
+ input_shape, output_shape = get_k_layer_shape(k_dense)
74
+ dense = DNN::Layers::Dense.new(output_shape[0])
75
+ dense.build(input_shape)
76
+ dense.weight.data = Numpy.to_na(k_dense.get_weights[0])
77
+ dense.bias.data = Numpy.to_na(k_dense.get_weights[1])
78
+ dense
79
+ end
80
+
81
+ def convert_Activation(k_activation)
82
+ activation_name = k_activation.get_config[:activation].to_s
83
+ case k_activation.get_config[:activation].to_s
84
+ when "sigmoid"
85
+ activation = DNN::Layers::Sigmoid.new
86
+ when "tanh"
87
+ activation = DNN::Layers::Tanh.new
88
+ when "relu"
89
+ activation = DNN::Layers::ReLU.new
90
+ when "softmax"
91
+ return nil
92
+ else
93
+ raise DNNKerasModelConvertError.new("#{activation_name} activation do not support convert.")
94
+ end
95
+ build_dnn_layer(k_activation, activation)
96
+ activation
97
+ end
98
+
99
+ def convert_Dropout(k_dropout)
100
+ dropout_ratio = k_dropout.get_config[:rate]
101
+ dropout = DNN::Layers::Dropout.new(dropout_ratio, use_scale: false)
102
+ build_dnn_layer(k_dropout, dropout)
103
+ dropout
104
+ end
105
+
106
+ def convert_BatchNormalization(k_batch_norm)
107
+ momentum = k_batch_norm.get_config[momentum]
108
+ batch_norm = DNN::Layers::BatchNormalization.new(momentum: momentum)
109
+ build_dnn_layer(k_batch_norm, batch_norm)
110
+ batch_norm.gamma.data = Numpy.to_na(k_batch_norm.get_weights[0])
111
+ batch_norm.beta.data = Numpy.to_na(k_batch_norm.get_weights[1])
112
+ batch_norm.running_mean.data = Numpy.to_na(k_batch_norm.get_weights[2])
113
+ batch_norm.running_var.data = Numpy.to_na(k_batch_norm.get_weights[3])
114
+ batch_norm
115
+ end
116
+
117
+ def convert_Conv2D(k_conv2d)
118
+ padding = k_conv2d.get_config[:padding].to_s == "same" ? true : false
119
+ filter_size = k_conv2d.get_config[:kernel_size].to_a
120
+ strides = k_conv2d.get_config[:strides].to_a
121
+ num_filters = k_conv2d.get_config[:filters]
122
+ conv2d = DNN::Layers::Conv2D.new(num_filters, filter_size, padding: padding, strides: strides)
123
+ build_dnn_layer(k_conv2d, conv2d)
124
+ conv2d.filters = Numpy.to_na(k_conv2d.get_weights[0])
125
+ conv2d.bias.data = Numpy.to_na(k_conv2d.get_weights[1])
126
+ conv2d
127
+ end
128
+
129
+ def convert_Conv2DTranspose(k_conv2d)
130
+ padding = k_conv2d.get_config[:padding].to_s == "same" ? true : false
131
+ filter_size = k_conv2d.get_config[:kernel_size].to_a
132
+ strides = k_conv2d.get_config[:strides].to_a
133
+ num_filters = k_conv2d.get_config[:filters]
134
+ conv2d = DNN::Layers::Conv2DTranspose.new(num_filters, filter_size, padding: padding, strides: strides)
135
+ build_dnn_layer(k_conv2d, conv2d)
136
+ conv2d.filters = Numpy.to_na(k_conv2d.get_weights[0])
137
+ conv2d.bias.data = Numpy.to_na(k_conv2d.get_weights[1])
138
+ conv2d
139
+ end
140
+
141
+ def convert_MaxPooling2D(k_max_pool2d)
142
+ padding = k_max_pool2d.get_config[:padding].to_s == "same" ? true : false
143
+ pool_size = k_max_pool2d.get_config[:pool_size].to_a
144
+ strides = k_max_pool2d.get_config[:strides].to_a
145
+ max_pool2d = DNN::Layers::MaxPool2D.new(pool_size, padding: padding, strides: strides)
146
+ build_dnn_layer(k_max_pool2d, max_pool2d)
147
+ max_pool2d
148
+ end
149
+
150
+ def convert_UpSampling2D(k_upsampling2d)
151
+ input_shape, output_shape = get_k_layer_shape(k_upsampling2d)
152
+ unpool_size = k_upsampling2d.get_config[:size].to_a
153
+ unpool2d = DNN::Layers::UnPool2D.new(unpool_size)
154
+ build_dnn_layer(k_upsampling2d, unpool2d)
155
+ unpool2d
156
+ end
157
+
158
+ def convert_Flatten(k_flatten)
159
+ flatten = DNN::Layers::Flatten.new
160
+ build_dnn_layer(k_flatten, flatten)
161
+ flatten
162
+ end
163
+
164
+ def convert_Reshape(k_reshape)
165
+ input_shape, output_shape = get_k_layer_shape(k_reshape)
166
+ reshape = DNN::Layers::Reshape.new(output_shape)
167
+ build_dnn_layer(k_reshape, reshape)
168
+ reshape
169
+ end
170
+ end
@@ -0,0 +1,72 @@
1
+ # This library is a performs mutual conversion between Numo and Numpy.
2
+ # You need to install PyCall to use this library.
3
+ # [Usage]
4
+ # numpy to numo: Numpy.to_na(np_array)
5
+ # numo to numpy: Numpy.from_na(narray)
6
+
7
+ require "pycall/import"
8
+ require "numpy"
9
+
10
+ include PyCall::Import
11
+
12
+ class NumpyToNumoError < StandardError; end
13
+
14
+ module Numpy
15
+ def self.from_na(narray)
16
+ bin = narray.to_binary
17
+ bin.force_encoding("ASCII-8BIT")
18
+ case
19
+ when narray.is_a?(Numo::Int8)
20
+ Numpy.frombuffer(bin, dtype: "int8").reshape(*narray.shape)
21
+ when narray.is_a?(Numo::UInt8)
22
+ Numpy.frombuffer(bin, dtype: "uint8").reshape(*narray.shape)
23
+ when narray.is_a?(Numo::Int16)
24
+ Numpy.frombuffer(bin, dtype: "int16").reshape(*narray.shape)
25
+ when narray.is_a?(Numo::UInt16)
26
+ Numpy.frombuffer(bin, dtype: "uint16").reshape(*narray.shape)
27
+ when narray.is_a?(Numo::Int32)
28
+ Numpy.frombuffer(bin, dtype: "int32").reshape(*narray.shape)
29
+ when narray.is_a?(Numo::UInt32)
30
+ Numpy.frombuffer(bin, dtype: "uint32").reshape(*narray.shape)
31
+ when narray.is_a?(Numo::Int64)
32
+ Numpy.frombuffer(bin, dtype: "int64").reshape(*narray.shape)
33
+ when narray.is_a?(Numo::UInt64)
34
+ Numpy.frombuffer(bin, dtype: "uint64").reshape(*narray.shape)
35
+ when narray.is_a?(Numo::SFloat)
36
+ Numpy.frombuffer(bin, dtype: "float32").reshape(*narray.shape)
37
+ when narray.is_a?(Numo::DFloat)
38
+ Numpy.frombuffer(bin, dtype: "float64").reshape(*narray.shape)
39
+ else
40
+ raise NumpyToNumoError.new("#{narray.class.name} is not support convert.")
41
+ end
42
+ end
43
+
44
+ def self.to_na(ndarray)
45
+ shape = ndarray.shape
46
+ bin = ndarray.flatten.tobytes
47
+ case ndarray.dtype.to_s
48
+ when "int8"
49
+ Numo::Int8.from_binary(bin).reshape(*shape)
50
+ when "uint8"
51
+ Numo::UInt8.from_binary(bin).reshape(*shape)
52
+ when "int16"
53
+ Numo::Int16.from_binary(bin).reshape(*shape)
54
+ when "uint16"
55
+ Numo::UInt16.from_binary(bin).reshape(*shape)
56
+ when "int32"
57
+ Numo::Int32.from_binary(bin).reshape(*shape)
58
+ when "uint32"
59
+ Numo::UInt32.from_binary(bin).reshape(*shape)
60
+ when "int64"
61
+ Numo::Int64.from_binary(bin).reshape(*shape)
62
+ when "uint64"
63
+ Numo::UInt64.from_binary(bin).reshape(*shape)
64
+ when "float32"
65
+ Numo::SFloat.from_binary(bin).reshape(*shape)
66
+ when "float64"
67
+ Numo::DFloat.from_binary(bin).reshape(*shape)
68
+ else
69
+ raise NumpyToNumoError.new("#{ndarray.dtype} is not support convert.")
70
+ end
71
+ end
72
+ end
data/lib/dnn/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module DNN
2
- VERSION = "1.0.0"
2
+ VERSION = "1.1.0"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-dnn
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.0
4
+ version: 1.1.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - unagiootoro
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2020-01-13 00:00:00.000000000 Z
11
+ date: 2020-02-02 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: numo-narray
@@ -150,6 +150,8 @@ files:
150
150
  - lib/dnn/datasets/mnist.rb
151
151
  - lib/dnn/datasets/stl-10.rb
152
152
  - lib/dnn/image.rb
153
+ - lib/dnn/keras-model-convertor.rb
154
+ - lib/dnn/numo2numpy.rb
153
155
  - lib/dnn/version.rb
154
156
  - ruby-dnn.gemspec
155
157
  - third_party/stb_image.h