ruby-dnn 0.14.0 → 0.14.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: '08960a5015b086d49e27dc44ea1076cb56c6548cf2945698dd81ae30a1f738e7'
4
- data.tar.gz: 2b78bfa3f91b224226fa2f268f0fb0cc94c2d711bc1d7324dddf32d7e50be409
3
+ metadata.gz: a07d117f60cfce7327a63514df0ffb2bf2228d4dfaa531d5b1c849ad581c5aa3
4
+ data.tar.gz: 460158875978df670f99a501dc1a5878c392517d2c5123f13c3c8c96718f3a5e
5
5
  SHA512:
6
- metadata.gz: ccdc09a73a918fd8eb206283d9d2db396a528f737d9981d49ecb90affcfa1d417429c3fb0a7070d57563a3168c3283a6aa762e04271df58d9b57bb2232a96545
7
- data.tar.gz: 5e01577078cfee549f2f5b963fa3565a59ea1fc805eca9bebea5a368b95e32f5c248db0e42d28c883eb71b138ce70372abb7f799acc3e2fde72550b3db0996f3
6
+ metadata.gz: e8cbb2fc0992b458f5af4176d2757f9e1222aba1cf1a980db5f8c4e64c20f8442bb1c6c103f78adbe5855bfdd7a8f01b33ffe3978b369b20b52efbdd9de7af4c
7
+ data.tar.gz: a67ced6f09ea4d1fd5c81a7e13a1ac1b35d46512b54e06fa3773025036fed3cf5200b5cefb985defe4debd8fc2b8a669fa9a48a2ea21ee6721ff539c89c1d944
@@ -0,0 +1,58 @@
1
+ require "dnn"
2
+ require "dnn/datasets/mnist"
3
+ # If you use numo/linalg then please uncomment out.
4
+ # require "numo/linalg/autoloader"
5
+
6
+ include DNN::Models
7
+ include DNN::Layers
8
+ include DNN::Optimizers
9
+ include DNN::Losses
10
+ include DNN::Callbacks
11
+ MNIST = DNN::MNIST
12
+
13
+ EPOCHS = 3
14
+ BATCH_SIZE = 128
15
+
16
+ x_train, y_train = MNIST.load_train
17
+ x_test, y_test = MNIST.load_test
18
+
19
+ x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 784)
20
+ x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 784)
21
+
22
+ x_train /= 255
23
+ x_test /= 255
24
+
25
+ y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
26
+ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
27
+
28
+ class MLP < Model
29
+ def initialize
30
+ super
31
+ @l1 = Dense.new(256)
32
+ @l2 = Dense.new(256)
33
+ @l3 = Dense.new(10)
34
+ @bn1 = BatchNormalization.new
35
+ @bn2 = BatchNormalization.new
36
+ end
37
+
38
+ def call(x)
39
+ x = InputLayer.(x)
40
+ x = @l1.(x)
41
+ x = @bn1.(x)
42
+ x = ReLU.(x)
43
+ x = @l2.(x)
44
+ x = @bn2.(x)
45
+ x = ReLU.(x)
46
+ x = @l3.(x)
47
+ x
48
+ end
49
+ end
50
+
51
+ model = MLP.new
52
+ model.setup(Adam.new, SoftmaxCrossEntropy.new)
53
+
54
+ # Add EarlyStopping callback for model.
55
+ # This callback is stop the training when test accuracy is over 0.9.
56
+ model.add_callback(EarlyStopping.new(:test_accuracy, 0.9))
57
+
58
+ model.train(x_train, y_train, EPOCHS, batch_size: BATCH_SIZE, test: [x_test, y_test])
@@ -0,0 +1,54 @@
1
+ require "dnn"
2
+ require "dnn/datasets/mnist"
3
+ # If you use numo/linalg then please uncomment out.
4
+ # require "numo/linalg/autoloader"
5
+
6
+ include DNN::Models
7
+ include DNN::Layers
8
+ include DNN::Initializers
9
+ include DNN::Optimizers
10
+ include DNN::Losses
11
+ MNIST = DNN::MNIST
12
+
13
+ EPOCHS = 3
14
+ BATCH_SIZE = 128
15
+
16
+ x_train, y_train = MNIST.load_train
17
+ x_test, y_test = MNIST.load_test
18
+
19
+ x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 784)
20
+ x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 784)
21
+
22
+ x_train /= 255
23
+ x_test /= 255
24
+
25
+ y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
26
+ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
27
+
28
+ class MLP < Model
29
+ def initialize
30
+ super
31
+ # Set the initial values of weight and bias to the initial values of He.
32
+ @l1 = Dense.new(256, weight_initializer: He.new, bias_initializer: He.new)
33
+ @l2 = Dense.new(256, weight_initializer: He.new, bias_initializer: He.new)
34
+ @l3 = Dense.new(10, weight_initializer: He.new, bias_initializer: He.new)
35
+ @bn1 = BatchNormalization.new
36
+ @bn2 = BatchNormalization.new
37
+ end
38
+
39
+ def call(x)
40
+ x = InputLayer.(x)
41
+ x = @l1.(x)
42
+ x = @bn1.(x)
43
+ x = ReLU.(x)
44
+ x = @l2.(x)
45
+ x = @bn2.(x)
46
+ x = ReLU.(x)
47
+ x = @l3.(x)
48
+ x
49
+ end
50
+ end
51
+
52
+ model = MLP.new
53
+ model.setup(Adam.new, SoftmaxCrossEntropy.new)
54
+ model.train(x_train, y_train, EPOCHS, batch_size: BATCH_SIZE, test: [x_test, y_test])
@@ -0,0 +1,55 @@
1
+ require "dnn"
2
+ require "dnn/datasets/mnist"
3
+ # If you use numo/linalg then please uncomment out.
4
+ # require "numo/linalg/autoloader"
5
+
6
+ include DNN::Models
7
+ include DNN::Layers
8
+ include DNN::Regularizers
9
+ include DNN::Optimizers
10
+ include DNN::Losses
11
+ MNIST = DNN::MNIST
12
+
13
+ EPOCHS = 3
14
+ BATCH_SIZE = 128
15
+ L2_LAMBDA = 0.01
16
+
17
+ x_train, y_train = MNIST.load_train
18
+ x_test, y_test = MNIST.load_test
19
+
20
+ x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 784)
21
+ x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 784)
22
+
23
+ x_train /= 255
24
+ x_test /= 255
25
+
26
+ y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
27
+ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
28
+
29
+ class MLP < Model
30
+ def initialize
31
+ super
32
+ # Set L2 regularizer(weight decay) for weight and bias.
33
+ @l1 = Dense.new(256, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
34
+ @l2 = Dense.new(256, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
35
+ @l3 = Dense.new(10, weight_regularizer: L2.new(L2_LAMBDA), bias_regularizer: L2.new(L2_LAMBDA))
36
+ @bn1 = BatchNormalization.new
37
+ @bn2 = BatchNormalization.new
38
+ end
39
+
40
+ def call(x)
41
+ x = InputLayer.(x)
42
+ x = @l1.(x)
43
+ x = @bn1.(x)
44
+ x = ReLU.(x)
45
+ x = @l2.(x)
46
+ x = @bn2.(x)
47
+ x = ReLU.(x)
48
+ x = @l3.(x)
49
+ x
50
+ end
51
+ end
52
+
53
+ model = MLP.new
54
+ model.setup(Adam.new, SoftmaxCrossEntropy.new)
55
+ model.train(x_train, y_train, EPOCHS, batch_size: BATCH_SIZE, test: [x_test, y_test])
@@ -0,0 +1,85 @@
1
+ require "dnn"
2
+ require "dnn/datasets/mnist"
3
+ # If you use numo/linalg then please uncomment out.
4
+ # require "numo/linalg/autoloader"
5
+
6
+ include DNN::Models
7
+ include DNN::Layers
8
+ include DNN::Optimizers
9
+ include DNN::Losses
10
+ include DNN::Savers
11
+ include DNN::Loaders
12
+ MNIST = DNN::MNIST
13
+
14
+ USE_MARSHAL = 0
15
+ USE_JSON = 1
16
+
17
+ EPOCHS = 3
18
+ BATCH_SIZE = 128
19
+
20
+ # Select save style from USE_MARSHAL or USE_JSON.
21
+ SAVE_STYLE = USE_MARSHAL
22
+
23
+ # When set a true, save data included optimizer status.
24
+ # This setting is enabled when SAVE_STYLE is USE_MARSHAL.
25
+ INCLUDE_OPTIMIZER = false
26
+
27
+ x_train, y_train = MNIST.load_train
28
+ x_test, y_test = MNIST.load_test
29
+
30
+ x_train = Numo::SFloat.cast(x_train).reshape(x_train.shape[0], 784)
31
+ x_test = Numo::SFloat.cast(x_test).reshape(x_test.shape[0], 784)
32
+
33
+ x_train /= 255
34
+ x_test /= 255
35
+
36
+ y_train = DNN::Utils.to_categorical(y_train, 10, Numo::SFloat)
37
+ y_test = DNN::Utils.to_categorical(y_test, 10, Numo::SFloat)
38
+
39
+ class MLP < Model
40
+ def initialize
41
+ super
42
+ @l1 = Dense.new(256)
43
+ @l2 = Dense.new(256)
44
+ @l3 = Dense.new(10)
45
+ @bn1 = BatchNormalization.new
46
+ @bn2 = BatchNormalization.new
47
+ end
48
+
49
+ def call(x)
50
+ x = InputLayer.(x)
51
+ x = @l1.(x)
52
+ x = @bn1.(x)
53
+ x = ReLU.(x)
54
+ x = @l2.(x)
55
+ x = @bn2.(x)
56
+ x = ReLU.(x)
57
+ x = @l3.(x)
58
+ x
59
+ end
60
+ end
61
+
62
+ model = MLP.new
63
+ model.setup(Adam.new, SoftmaxCrossEntropy.new)
64
+ model.train(x_train, y_train, EPOCHS, batch_size: BATCH_SIZE, test: [x_test, y_test])
65
+
66
+ if SAVE_STYLE == USE_MARSHAL
67
+ saver = MarshalSaver.new(model, include_optimizer: INCLUDE_OPTIMIZER)
68
+ saver.save("trained_mnist.marshal")
69
+ # model.save("trained_mnist.marshal") # This code is equivalent to the code above.
70
+ elsif SAVE_STYLE == USE_JSON
71
+ saver = JSONSaver.new(model)
72
+ saver.save("trained_mnist.json")
73
+ end
74
+
75
+ model2 = MLP.new
76
+ if SAVE_STYLE == USE_MARSHAL
77
+ loader = MarshalLoader.new(model2)
78
+ loader.load("trained_mnist.marshal")
79
+ # MLP.load("trained_mnist.marshal") # This code is equivalent to the code above.
80
+ elsif SAVE_STYLE == USE_JSON
81
+ loader = JSONLoader.new(model2)
82
+ loader.load("trained_mnist.json")
83
+ end
84
+
85
+ puts model2.accuracy(x_test, y_test)
@@ -65,7 +65,7 @@ module DNN
65
65
  def judge_early_stopping_train
66
66
  case @trigger
67
67
  when :train_loss
68
- return true if model.last_log[@trigger].mean <= @tolerance
68
+ return true if model.last_log[@trigger] <= @tolerance
69
69
  end
70
70
  false
71
71
  end
@@ -73,7 +73,7 @@ module DNN
73
73
  def judge_early_stopping_test
74
74
  case @trigger
75
75
  when :test_loss
76
- return true if model.last_log[@trigger].mean <= @tolerance
76
+ return true if model.last_log[@trigger] <= @tolerance
77
77
  when :test_accuracy
78
78
  return true if model.last_log[@trigger] >= @tolerance
79
79
  end
@@ -97,8 +97,9 @@ module DNN
97
97
  end
98
98
 
99
99
  class InputLayer < Layer
100
- def self.call(input_tensor)
101
- new(input_tensor.data.shape[1..-1]).(input_tensor)
100
+ def self.call(input)
101
+ shape = input.is_a?(Tensor) ? input.data.shape : input.shape
102
+ new(shape[1..-1]).(input)
102
103
  end
103
104
 
104
105
  # @param [Array] input_dim_or_shape Setting the shape or dimension of the input data.
@@ -107,9 +108,16 @@ module DNN
107
108
  @input_shape = input_dim_or_shape.is_a?(Array) ? input_dim_or_shape : [input_dim_or_shape]
108
109
  end
109
110
 
110
- def call(input_tensor)
111
+ def call(input)
111
112
  build unless built?
112
- Tensor.new(forward(input_tensor.data), Link.new(input_tensor&.link, self))
113
+ if input.is_a?(Tensor)
114
+ x = input.data
115
+ prev_link = input&.link
116
+ else
117
+ x = input
118
+ prev_link = nil
119
+ end
120
+ Tensor.new(forward(x), Link.new(prev_link, self))
113
121
  end
114
122
 
115
123
  def build
@@ -333,7 +341,7 @@ module DNN
333
341
  def forward(x)
334
342
  if DNN.learning_phase
335
343
  Xumo::SFloat.srand(@rnd.rand(1 << 31))
336
- @mask = Xumo::SFloat.ones(*x.shape).rand < @dropout_ratio
344
+ @mask = Xumo::SFloat.new(*x.shape).rand < @dropout_ratio
337
345
  x[@mask] = 0
338
346
  elsif @use_scale
339
347
  x *= (1 - @dropout_ratio)
@@ -17,7 +17,7 @@ module DNN
17
17
  end
18
18
  loss_value = forward(y, t)
19
19
  loss_value += regularizers_forward(layers) if layers
20
- loss_value.is_a?(Float) ? Xumo::SFloat[loss_value] : loss_value
20
+ loss_value.is_a?(Float) ? loss_value : loss_value.sum
21
21
  end
22
22
 
23
23
  def forward(y, t)
@@ -135,7 +135,7 @@ module DNN
135
135
  # @return [Hash] Hash of contents to be output to log.
136
136
  private def train_step(x, y)
137
137
  loss_value = train_on_batch(x, y)
138
- { loss: loss_value.mean }
138
+ { loss: loss_value }
139
139
  end
140
140
 
141
141
  # Implement the test process to be performed.
@@ -185,7 +185,7 @@ module DNN
185
185
  iter.foreach(batch_size) do |x_batch, y_batch|
186
186
  correct, loss_value = test_on_batch(x_batch, y_batch)
187
187
  total_correct += correct
188
- sum_loss += loss_value.mean
188
+ sum_loss += loss_value
189
189
  end
190
190
  mean_loss = sum_loss / max_steps
191
191
  acc = total_correct.to_f / num_test_datas
@@ -259,8 +259,9 @@ module DNN
259
259
 
260
260
  # Save the model in marshal format.
261
261
  # @param [String] file_name Name to save model.
262
- def save(file_name)
263
- saver = Savers::MarshalSaver.new(self)
262
+ # @param [Boolean] include_optimizer Set true to save data included optimizer status.
263
+ def save(file_name, include_optimizer: true)
264
+ saver = Savers::MarshalSaver.new(self, include_optimizer: include_optimizer)
264
265
  saver.save(file_name)
265
266
  end
266
267
 
@@ -312,7 +313,12 @@ module DNN
312
313
  def forward(x, learning_phase)
313
314
  DNN.learning_phase = learning_phase
314
315
  @layers_cache = nil
315
- output_tensor = call(Tensor.new(x, nil))
316
+ inputs = if x.is_a?(Array)
317
+ x.map { |a| Tensor.new(a, nil) }
318
+ else
319
+ Tensor.new(x, nil)
320
+ end
321
+ output_tensor = call(inputs)
316
322
  @last_link = output_tensor.link
317
323
  unless @built
318
324
  @built = true
data/lib/dnn/image.rb CHANGED
@@ -18,9 +18,9 @@ module DNN
18
18
  # @param [String] file_name File name to read.
19
19
  # @param [Integer] channel_type Specify channel type of image.
20
20
  def self.read(file_name, channel_type = RGB)
21
- raise ImageReadError.new("#{file_name} is not found.") unless File.exist?(file_name)
21
+ raise ImageReadError, "#{file_name} is not found." unless File.exist?(file_name)
22
22
  bin, w, h, n = Stb.stbi_load(file_name, channel_type)
23
- raise ImageReadError.new("#{file_name} load failed.") if bin == ""
23
+ raise ImageReadError, "#{file_name} load failed." if bin == ""
24
24
  img = Numo::UInt8.from_binary(bin)
25
25
  img.reshape(h, w, channel_type)
26
26
  end
@@ -38,18 +38,26 @@ module DNN
38
38
  end
39
39
  h, w, ch = img.shape
40
40
  bin = img.to_binary
41
- case file_name
42
- when /\.png$/i
41
+ match_data = file_name.match(/\.(\w+)$/i)
42
+ if match_data
43
+ ext = match_data[1]
44
+ else
45
+ raise ImageWriteError, "File name has not extension."
46
+ end
47
+ case ext
48
+ when "png"
43
49
  stride_in_bytes = w * ch
44
50
  res = Stb.stbi_write_png(file_name, w, h, ch, bin, stride_in_bytes)
45
- when /\.bmp$/i
51
+ when "bmp"
46
52
  res = Stb.stbi_write_bmp(file_name, w, h, ch, bin)
47
- when /\.jpg$/i, /\.jpeg/i
53
+ when "jpg", "jpeg"
48
54
  raise TypeError, "quality:#{quality.class} is not an instance of Integer class." unless quality.is_a?(Integer)
49
55
  raise ArgumentError, "quality should be between 1 and 100." unless quality.between?(1, 100)
50
56
  res = Stb.stbi_write_jpg(file_name, w, h, ch, bin, quality)
57
+ else
58
+ raise ImageWriteError, "Extension '#{ext}' is not support."
51
59
  end
52
- raise ImageWriteError.new("Image write failed.") if res == 0
60
+ raise ImageWriteError, "Image write failed." if res == 0
53
61
  end
54
62
 
55
63
  # Resize the image.
@@ -90,11 +98,11 @@ module DNN
90
98
  end
91
99
 
92
100
  private_class_method def self.img_check(img)
93
- raise TypeError.new("img: #{img.class} is not an instance of the Numo::UInt8 class.") unless img.is_a?(Numo::UInt8)
101
+ raise TypeError, "img: #{img.class} is not an instance of the Numo::UInt8 class." unless img.is_a?(Numo::UInt8)
94
102
  if img.shape.length != 3
95
- raise ImageShapeError.new("img shape is #{img.shape}. But img shape must be 3 dimensional.")
103
+ raise ImageShapeError, "img shape is #{img.shape}. But img shape must be 3 dimensional."
96
104
  elsif !img.shape[2].between?(1, 4)
97
- raise ImageShapeError.new("img channel is #{img.shape[2]}. But img channel must be 1 or 2 or 3 or 4.")
105
+ raise ImageShapeError, "img channel is #{img.shape[2]}. But img channel must be 1 or 2 or 3 or 4."
98
106
  end
99
107
  end
100
108
  end
data/lib/dnn/version.rb CHANGED
@@ -1,3 +1,3 @@
1
1
  module DNN
2
- VERSION = "0.14.0"
2
+ VERSION = "0.14.1"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ruby-dnn
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.14.0
4
+ version: 0.14.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - unagiootoro
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2019-10-19 00:00:00.000000000 Z
11
+ date: 2019-10-21 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: numo-narray
@@ -98,6 +98,10 @@ files:
98
98
  - Rakefile
99
99
  - bin/console
100
100
  - bin/setup
101
+ - examples/api-examples/early_stopping_example.rb
102
+ - examples/api-examples/initializer_example.rb
103
+ - examples/api-examples/regularizer_example.rb
104
+ - examples/api-examples/save_example.rb
101
105
  - examples/cifar100_example.rb
102
106
  - examples/cifar10_example.rb
103
107
  - examples/dcgan/dcgan.rb